Luminescence/0000755000176200001440000000000014521216670012670 5ustar liggesusersLuminescence/NAMESPACE0000644000176200001440000002003114521210044014070 0ustar liggesusers# Generated by roxygen2: do not edit by hand S3method("$",RLum.Analysis) S3method("$",RLum.Data.Curve) S3method("$",RLum.Results) S3method("$<-",DRAC.list) S3method("*",RLum.Data.Curve) S3method("+",RLum.Data.Curve) S3method("-",RLum.Data.Curve) S3method("/",RLum.Data.Curve) S3method("[",RLum.Analysis) S3method("[",RLum.Data.Curve) S3method("[",RLum.Data.Image) S3method("[",RLum.Data.Spectrum) S3method("[",RLum.Results) S3method("[<-",DRAC.list) S3method("[<-",RLum.Data.Curve) S3method("[[",RLum.Analysis) S3method("[[",RLum.Results) S3method("[[<-",DRAC.list) S3method(as.data.frame,DRAC.list) S3method(as.data.frame,RLum.Data.Curve) S3method(as.data.frame,RLum.Data.Spectrum) S3method(as.data.frame,Risoe.BINfileData) S3method(as.list,RLum.Analysis) S3method(as.list,RLum.Data.Curve) S3method(as.list,RLum.Data.Image) S3method(as.list,RLum.Results) S3method(as.matrix,RLum.Data.Curve) S3method(as.matrix,RLum.Data.Image) S3method(as.matrix,RLum.Data.Spectrum) S3method(dim,RLum.Data.Curve) S3method(dim,RLum.Data.Spectrum) S3method(hist,RLum.Analysis) S3method(hist,RLum.Data.Curve) S3method(hist,RLum.Data.Image) S3method(hist,RLum.Results) S3method(length,RLum.Analysis) S3method(length,RLum.Data.Curve) S3method(length,RLum.Results) S3method(length,Risoe.BINfileData) S3method(merge,RLum) S3method(names,RLum.Analysis) S3method(names,RLum.Data.Curve) S3method(names,RLum.Data.Image) S3method(names,RLum.Data.Spectrum) S3method(names,RLum.Results) S3method(names,Risoe.BINfileData) S3method(plot,RLum.Analysis) S3method(plot,RLum.Data.Curve) S3method(plot,RLum.Data.Image) S3method(plot,RLum.Data.Spectrum) S3method(plot,RLum.Results) S3method(plot,Risoe.BINfileData) S3method(plot,list) S3method(print,DRAC.highlights) S3method(print,DRAC.list) S3method(rep,RLum) S3method(row.names,RLum.Data.Spectrum) S3method(subset,RLum.Analysis) S3method(subset,Risoe.BINfileData) S3method(summary,RLum.Analysis) S3method(summary,RLum.Data.Curve) S3method(summary,RLum.Data.Image) S3method(summary,RLum.Results) S3method(unlist,RLum.Analysis) export(Analyse_SAR.OSLdata) export(CW2pHMi) export(CW2pLM) export(CW2pLMi) export(CW2pPMi) export(PSL2Risoe.BINfileData) export(Risoe.BINfileData2RLum.Analysis) export(Second2Gray) export(analyse_Al2O3C_CrossTalk) export(analyse_Al2O3C_ITC) export(analyse_Al2O3C_Measurement) export(analyse_FadingMeasurement) export(analyse_IRSAR.RF) export(analyse_SAR.CWOSL) export(analyse_SAR.TL) export(analyse_baSAR) export(analyse_pIRIRSequence) export(analyse_portableOSL) export(apply_CosmicRayRemoval) export(apply_EfficiencyCorrection) export(bin.RLum.Data.Curve) export(bin.RLum.Data.Spectrum) export(bin_RLum.Data) export(calc_AliquotSize) export(calc_AverageDose) export(calc_CentralDose) export(calc_CobbleDoseRate) export(calc_CommonDose) export(calc_CosmicDoseRate) export(calc_FadingCorr) export(calc_FastRatio) export(calc_FiniteMixture) export(calc_FuchsLang2001) export(calc_HomogeneityTest) export(calc_Huntley2006) export(calc_IEU) export(calc_Kars2008) export(calc_Lamothe2003) export(calc_MaxDose) export(calc_MinDose) export(calc_OSLLxTxDecomposed) export(calc_OSLLxTxRatio) export(calc_SourceDoseRate) export(calc_Statistics) export(calc_TLLxTxRatio) export(calc_ThermalLifetime) export(calc_WodaFuchs2008) export(calc_gSGC) export(calc_gSGC_feldspar) export(combine_De_Dr) export(convert_Activity2Concentration) export(convert_BIN2CSV) export(convert_Concentration2DoseRate) export(convert_Daybreak2CSV) export(convert_PSL2CSV) export(convert_RLum2Risoe.BINfileData) export(convert_SG2MG) export(convert_Wavelength2Energy) export(convert_XSYG2CSV) export(extract_IrradiationTimes) export(extract_ROI) export(fit_CWCurve) export(fit_EmissionSpectra) export(fit_LMCurve) export(fit_OSLLifeTimes) export(fit_SurfaceExposure) export(fit_ThermalQuenching) export(get_Layout) export(get_Quote) export(get_RLum) export(get_Risoe.BINfileData) export(get_rightAnswer) export(github_branches) export(github_commits) export(github_issues) export(install_DevelopmentVersion) export(is.RLum) export(is.RLum.Analysis) export(is.RLum.Data) export(is.RLum.Data.Curve) export(is.RLum.Data.Image) export(is.RLum.Data.Spectrum) export(is.RLum.Results) export(length_RLum) export(merge_RLum) export(merge_RLum.Analysis) export(merge_RLum.Data.Curve) export(merge_RLum.Results) export(merge_Risoe.BINfileData) export(names_RLum) export(plot_AbanicoPlot) export(plot_DRCSummary) export(plot_DRTResults) export(plot_DetPlot) export(plot_FilterCombinations) export(plot_GrowthCurve) export(plot_Histogram) export(plot_KDE) export(plot_NRt) export(plot_OSLAgeSummary) export(plot_RLum) export(plot_RLum.Analysis) export(plot_RLum.Data.Curve) export(plot_RLum.Data.Image) export(plot_RLum.Data.Spectrum) export(plot_RLum.Results) export(plot_ROI) export(plot_RadialPlot) export(plot_Risoe.BINfileData) export(plot_ViolinPlot) export(read_BIN2R) export(read_Daybreak2R) export(read_PSL2R) export(read_RF2R) export(read_SPE2R) export(read_TIFF2R) export(read_XSYG2R) export(replicate_RLum) export(report_RLum) export(sTeve) export(scale_GammaDose) export(set_RLum) export(set_Risoe.BINfileData) export(smooth_RLum) export(structure_RLum) export(subset_SingleGrainData) export(template_DRAC) export(tune_Data) export(use_DRAC) export(verify_SingleGrainData) export(write_R2BIN) export(write_R2TIFF) export(write_RLum2CSV) exportClasses(RLum) exportClasses(RLum.Analysis) exportClasses(RLum.Data) exportClasses(RLum.Data.Curve) exportClasses(RLum.Data.Image) exportClasses(RLum.Data.Spectrum) exportClasses(RLum.Results) exportClasses(Risoe.BINfileData) exportMethods(bin_RLum.Data) exportMethods(get_RLum) exportMethods(get_Risoe.BINfileData) exportMethods(length_RLum) exportMethods(names_RLum) exportMethods(replicate_RLum) exportMethods(set_RLum) exportMethods(set_Risoe.BINfileData) exportMethods(show) exportMethods(smooth_RLum) exportMethods(structure_RLum) import(data.table) import(methods) import(utils) importFrom(Rcpp,evalCpp) importFrom(grDevices,adjustcolor) importFrom(grDevices,axisTicks) importFrom(grDevices,colorRampPalette) importFrom(grDevices,dev.off) importFrom(grDevices,gray.colors) importFrom(grDevices,rgb) importFrom(grDevices,topo.colors) importFrom(grDevices,xy.coords) importFrom(graphics,abline) importFrom(graphics,arrows) importFrom(graphics,axTicks) importFrom(graphics,axis) importFrom(graphics,barplot) importFrom(graphics,box) importFrom(graphics,boxplot) importFrom(graphics,close.screen) importFrom(graphics,contour) importFrom(graphics,curve) importFrom(graphics,frame) importFrom(graphics,grconvertX) importFrom(graphics,grconvertY) importFrom(graphics,grid) importFrom(graphics,hist) importFrom(graphics,layout) importFrom(graphics,legend) importFrom(graphics,lines) importFrom(graphics,mtext) importFrom(graphics,par) importFrom(graphics,persp) importFrom(graphics,plot) importFrom(graphics,plot.default) importFrom(graphics,points) importFrom(graphics,polygon) importFrom(graphics,rug) importFrom(graphics,screen) importFrom(graphics,segments) importFrom(graphics,split.screen) importFrom(graphics,text) importFrom(graphics,title) importFrom(httr,GET) importFrom(httr,accept_json) importFrom(httr,content) importFrom(httr,status_code) importFrom(parallel,makeCluster) importFrom(parallel,parLapply) importFrom(parallel,stopCluster) importFrom(stats,approx) importFrom(stats,as.formula) importFrom(stats,coef) importFrom(stats,complete.cases) importFrom(stats,confint) importFrom(stats,density) importFrom(stats,dnorm) importFrom(stats,fitted) importFrom(stats,formula) importFrom(stats,glm) importFrom(stats,lm) importFrom(stats,median) importFrom(stats,na.exclude) importFrom(stats,na.omit) importFrom(stats,nls) importFrom(stats,nls.control) importFrom(stats,pchisq) importFrom(stats,pnorm) importFrom(stats,predict) importFrom(stats,qf) importFrom(stats,quantile) importFrom(stats,residuals) importFrom(stats,rnorm) importFrom(stats,runif) importFrom(stats,sd) importFrom(stats,setNames) importFrom(stats,smooth) importFrom(stats,smooth.spline) importFrom(stats,spline) importFrom(stats,t.test) importFrom(stats,uniroot) importFrom(stats,update) importFrom(stats,var) importFrom(stats,weighted.mean) useDynLib(Luminescence, .registration = TRUE) Luminescence/README.md0000644000176200001440000001263314521210045014142 0ustar liggesusers # Luminescence The R package `'Luminescence'` by the R-Luminescence Group provides a collection of various R functions for luminescence dating data analysis. [![Project Status: Active – The project has reached a stable, usable state and is being actively developed.](https://www.repostatus.org/badges/latest/active.svg)](https://www.repostatus.org/#active) [![DOI](https://zenodo.org/badge/23153315.svg)](https://zenodo.org/badge/latestdoi/23153315) [![CRAN](https://www.r-pkg.org/badges/version/Luminescence)](https://cran.r-project.org/package=Luminescence) [![Downloads](https://cranlogs.r-pkg.org/badges/grand-total/Luminescence)](https://www.r-pkg.org/pkg/Luminescence) [![Downloads](https://cranlogs.r-pkg.org/badges/Luminescence)](https://www.r-pkg.org/pkg/Luminescence) [![Downloads](https://cranlogs.r-pkg.org/badges/last-week/Luminescence)](https://www.r-pkg.org/pkg/Luminescence) [![Downloads](https://cranlogs.r-pkg.org/badges/last-day/Luminescence)](https://www.r-pkg.org/pkg/Luminescence) [![R-CMD-check](https://github.com/R-Lum/Luminescence/workflows/GitHub%20Actions%20CI/badge.svg)](https://github.com/R-Lum/Luminescence/actions) [![Coverage Status](https://img.shields.io/codecov/c/github/R-Lum/Luminescence.svg)](https://app.codecov.io/github/R-Lum/Luminescence?branch=master) ## Social media and other resources Visit our [R-Luminescence homepage](https://r-luminescence.org). ## Installation #### i. Requirements - *Windows (32/64bit)*: [Rtools](https://cran.r-project.org/bin/windows/Rtools/) (provided by CRAN) - *macOS*: [Xcode](https://developer.apple.com/) (provided by Apple) - *Linux*: [gcc](https://gcc.gnu.org) often comes pre-installed in most distributions. #### ii. Install the package Install any development versions using our [RStudio](https://posit.co) add-in ![](man/figures/README-Screenshot_AddIn.png) ##### The plain **R** way To install the stable version from CRAN, simply run the following from an R console: ``` r install.packages("Luminescence") ``` To install the latest development builds directly from GitHub, run ``` r if(!require("devtools")) install.packages("devtools") devtools::install_github("R-Lum/Luminescence@") ``` ## Contribute The R luminescence project is based on and evolves from ideas, contributions and constructive criticism of its users. Help us to maintain and develop the package, to find bugs and create new functions as well as a user-friendly design. Try or write us an [e-mail](mailto:developers@r-luminescence.org) if anything crosses your mind or if you want your new self-written function to be to implemented. You are kindly invited to bring forward the package with us! ## Note **The package comes without any guarantee!** Please further note that this version is a development version and may change day by day. For stable branches please visit the package on [CRAN Luminescence](https://CRAN.R-project.org/package=Luminescence). ## License This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the [GNU General Public License](https://github.com/R-Lum/Luminescence/blob/master/LICENSE) for more details. ## Funding - 2011-2013: The initial version of the package was developed in the framework of the PhD thesis by Sebastian Kreutzer, while he was funded through the DFG programme “Rekonstruktion der Umweltbedingungen des Spätpleistozäns in Mittelsachsen anhand von Löss-Paläobodensequenzen” ([GEPRIS id: 46526743](https://gepris.dfg.de/gepris/projekt/46526743)) - 2014-2018: Cooperation and personal exchange between the developers is gratefully funded by the DFG in the framework of the program “Scientific Networks”. Project title: “RLum.Network: Ein Wissenschaftsnetzwerk zur Analyse von Lumineszenzdaten mit R” ([GEPRIS id: 250974974](https://gepris.dfg.de/gepris/projekt/250974974)) - 05/2014-12/2019: The work of Sebastian Kreutzer as maintainer of the package was supported by [LabEx LaScArBx (ANR - n. ANR-10-LABX-52)](https://lascarbx.labex.u-bordeaux.fr/en/). - 01/2020-04/2022: Sebastian Kreutzer as maintainer of the package has received funding from the European Union’s Horizon 2020 research and innovation programme under the Marie Skłodowska-Curie grant agreement [No 844457 (CREDit)](https://cordis.europa.eu/project/id/844457), and could continue maintaining the package. - since 03/2023: Sebastian Kreutzer as maintainer of the package receives funding from the DFG Heisenberg programme [No 505822867](https://gepris.dfg.de/gepris/projekt/505822867). - All other authors gratefully received additional funding from various public funding bodies. ## Related projects - [RLumModel](https://github.com/R-Lum/RLumModel) - [RLumShiny](https://github.com/R-Lum/RLumShiny) - [RLumDocker](https://github.com/R-Lum/RLumDocker) - [BayLum](https://github.com/crp2a/BayLum) - [RCarb](https://github.com/R-Lum/RCarb) - [RLumCarlo](https://github.com/R-Lum/RLumCarlo) Luminescence/data/0000755000176200001440000000000014521207343013576 5ustar liggesusersLuminescence/data/ExampleData.portableOSL.rda0000644000176200001440000006575413240265321020656 0ustar liggesusers7zXZi"6!XS7j fcH=.2sěp3qcelRIY-'%-rRukDT:qzw=fb\Yk?E$I> ,y9Ȁ {ox"D23H^2_ >/6=o55R+g=4A3i C_WBJxĎeQS39ɫF]5C0E$X87@ %ew>:/ ڟ^U5po}1ߺ(lJqTǜl271F+\ߥ,H/!Si4 A_kP, vwʧ^' OאGn }{'݆b`xz(w[saxUP7N06TT?Q2QWzY'gk[^^k}s͕D^̕2'E -@v=7\ C};{T $#b$#$ ]|̫5A ~%fO{X8εY.m ziұa~ztZ*8Kmt:iX<}@n+ۛaQ&͎#;z}pG*RGce(㠰6EC_ʯ- I/ @֐A-m-H pBUF*Cd@{5S@ְ 75ރbW5S"Oy?K+q Șk |U0αP9 / ABH/.ql Eao,%F -cO KǮ? Sk7d6[ְfHe3cg " Rߣ!zZHI`3r?Zu*W81HM\k؅#J2U٩%dk M{ah^}&5iE +uj}O~ *>~(V!0dF=ڧ`ae` mpbR *(}"7bHW/ydⳗX(^ U0CqiÄ& \:"&"(qYuH U|^o<~qї{_'OV׶ |WˊZ8W}ZZ픁Q;$hp\*y7q BY \#$A۬ ~m M8}S$,hBX6+K`}V{_`7`bq ?Tƿ>ȍ-m'K$0(V&-14ۈb1XU 4KvYJ%S'/Ƒ7dJ95͋5n WR\rX:4]sG`9}ǘ xR05FA|:U+\5$i}!)/_녠F3!I}a"o( z_\ "ݒ'][=(ञx]8~D y$(cfYB݂ˍEw`ەȎRSWXvc81/WrLlSɊC%BAN$ \g5A`J#L8|.%@{i_I ye N>8"o`07pw4jh95 тKGL|D,J>Fģz+4zHaNއU0B4n"gy7Jq9ۗj\n3|S΄MF-h1qE`Ŗj ذspJ_&@u&BCX_@Ҭzu-  '@r҅9)~_պ5Hb *WT +ѷU㎜G@pʡ*q菘b>$gT؝!H?x2g(gHs~E}}a-1VYV!e'Dok$;'x`ƺ12i!%^âAwН>WsA2 F/b /L^?23F-C+!v}jW\<OA[i$|i0lB%tK'u?ŞÁQ'^}b&~1HEg&1;ӑy{u/ /¡H1deJ9AP-Qf%%'^ ~P?`e,#@O~ AVAo%VDap-;Xژ2,^~ U%SWױܤd<⮞*mGL8z}=L<3s3+wDFvQmymynpcK4.LA\~B3q5TKDL&O♯}ÀOD<3IIp7oZy}hZC+8>JڔnMT'$+&7`nrE'f5̽<"4ߨΊ?xB귿 ]VKƬ!{Ht@τed\+Ejf+MNѮ8 '^nSA_+s/GWlP\5օRryIޫ NDv9֟%1f^PN"Cv+Kn5:9|ޟn&?Ύ%?{*ԁIp=כ.1 A0Ē둼VM2S[gINۿ1NjlS17;zǤ]Hssⴐg٥zZ6*-#vRCDu1=届T{=&TR]sGމ0 zMBj۷z5-ZsJ̾ C )3t[x,{{FKӫ]j?!xwo{Ϋ81|S[o72"C!?'׭6BiڊXlQ9 `L:5a:4kc(NхinBV;qsO yzĥLZ5'oXU̍`JUEkQxK0vR# DI5[l1tGҐּZL!, -r<;o+qrcMbi6) LQ:?/ce|-4^t2M~ U)D[ `.P`Xw4B`4~cq&,5n3+Z./g$#"zB5WeTc3#}6" PDv 7!ʧ@Ƞ^ {1e<\\4XBY3߱1eCk\' )?uNN*4;\{|a#ȿ҃<b,:Fj`]"_9+e6?58joP̾XƛVq3 xu J,=i֗SR cAR߁Ԇ*,}CB~S'THI`OȀxZ8sGq¿z6s9zS%Am3K(KݡNI/-#Rj=E3S2z Nsg(4uFzv6{JE"ǎDPwtd/>5cr~-X} ۈ3Y;e.1 gpYs.}z6~.PVV\OuFs{&VncD\vһ1injNIYmV Ke, A8BNhOk!TAE2L5I N[&\jLvJ a s+Wj2A3x)e)c1MkUd Wmѵ0:):hjOP*m9,\?N,E,Ѩ"M{C^`Q?Z33TY`:;VMqM&sBz'vpFt+mkP$K%N +"chx|rwuͅ#DŽO GXs`i<@ "WOQ%*r_kUdcϋzĦҀD틝N< /oIɉQ@F8U%CGݬ$P^*w?ESD,ض2^gf iQꫤر41CǛ1 ُ U]۬LzDҸlWa.r׭D}]ҝDș^[})BoS7m5̚>X޹wG':fBӀR(ñEʏG_!I1Ҕ9(4ֿz< kPVXXZwnrE vm;uhdl&# ,ϒ-k*)tTp-uCpnZ888Ov8f$zt hUmC3BÁ%k@n9%ϚLCç9ڰ{ Cax۪5?PAeq{ǹU i Y uw"ly{|fj5DI#/_'lDd$( =)}x ~( 8WMv4B'}%aO? Ҡ@7=&JiػަA=܏x4V3Ⅽ1ܻ(@Z=6R[ W}# iw-pL-l!U_*vBa#^ʅ"'@I~O+sѹ WWY'}p x~bw걶t.Z_nUXԻw' 01 er:A9AL%ynu2JߠOƽ2P}ׄ#,g7BژU]Mo/CpjOS0Q #Hq^ 1;pqoBq-V Y4PqB:Qκ :Fe ?;4݉HRO=؉p+ȸׇz/M|fgR.S >C>97w?=Q$&\p]OFVh`!Z0i;A"aBJCAb{M)>3:x~64_ky ȑN:ob^Zyu,~ ?6o\0є'W)b-y%R=eP/_[%*ӫp~8PDG?V欩ÖĊܝl諬x&Y F^}ϡrt1,7 zMvqL JnpO 5~!-m:ZTۢ'$gڽ#yL!h GQKҍ0:ʶO2ݗD3ȧg(l7:=;&ygO ׈+)m{-F)V#Kmo+qvCd`|%%c|TV8M(tHYW;1Y@r5T 7–z&}l+\h8ߎAtdec"ϝRxZPeO!`ifvY̮?ֺaB<+]tPZZ}Gj-t4fnKgD/$0YFGe7~qO?%~fu'  j#܂G∛_:r@mbu~2c~Ht']QkO ;eNrxZWgXvE||[iȴ.DZ'c@ U95D|5cB!4i줆kQL$1K߹G]\:*[z01/ `,7 mMDӄ4NUKfzqJۯ0:6 2DS ٹ[ng,EϤZOq{|ÓTYӱIyEd"oUiI.Ba/@,. FH+JCM(әxmWUv UA+Hx~ JNj 43xO8 b^C S^b}|Nu n b! _ )BOv!G'`S6#36R&]ӰFrYڲrrwzWf9'$*`6`nV@^1Q"gF[)@^X1POzM9 cgn./.|I #S]^ƒZIa6uck,+ϣ*'OkUc }0ӯ2sO`Z|f9M W!8gJ"M"ˡ翮7%m#h]{ )& .w'Vv}9oDjFҩ&S9p]=BZ^=N`zq[lDɓnb).DIFtn9E9t:qShh Ͳ~ջf9u\`*$^vjZڒ!8U5=zb fr䋽]^b=.<$KD]3T;3ox34۩:I NJ ^dؿlNg\<89Δ s~00Aoybd';uL=ȴnrk\NTMœj Wڇ)k}P|` C̤A3Pd]32UnMiU2pu*4/_gb 3yu`}TfKî[~o[ڹLrص`F`f]˒L&ɝJ?J6:ENoCVؐqmeb;5L(pVY5ޯ}/Y"0:"Zan̈-,LC؈><8>?*& J Ǖr F-(kYxY, NRS݋~m4~p/U m4;=Y]eѢѣ}*jz~2]PU3az H<p 'QD=Pp% o 5"RYn,eI[i~J*cAc'<zeo`MoXEJޝJM^1drS9-Cz%ʶPzs&[z`1:T{`/s@%]c6sh.Oes& ;=/03⾄M7/Cx2@dsS|ı~NwXq=$2.79<hc0;쬅j\vp??fL4.$(|5vzRGs[#LǽVr䓧Vsзp":F~`V]pKSŽ|17i>wI[J5PԌ2'Z;s,."yT+^tNLA6mzLmL.37aۍXPQ|x/$FAy~ın#75re)(qa՛(ٽb*l߱dv}Z_+:\%XaA V[Rf9mLh7KԦfI+`0-_#} /BWr~{7K$\*1Ef#Gpw.l8;'R-izpN"Mm񣷯ɡV=xwُ2ـks cbTmðidH/wFL3ZPBV2lh2"dԢ5* B,#جLk[\Ott <1NιkP;fNz<}ɪ-AKtp'S'޷AK O~׭3_[qu.[0.rKSKInGa7GR]&OiXCOfN9Q |eI^u0iD$V 7uxo0^eYG9j!W~E&#uSPsƼn*?|ϐ!Mh i7$GqXB.w X}Ut_G K{WE_\ e wO]`LyDÒ͠%**$> vNL6_bK&:Ŝ1{jҕ4_l=N|joRA2oJH3!9BR\J&{[q¹dc~Nqߦan}XK^Ľlnޯ S;Ó;f Fr';{N $p:@R/}CfkyЕZceb3bh߾ 0^(}yMˠF-A&NBK,l6tJc~j%奮^0+xeb"x_+L_d7K w8[1{m/B'lgEZ z^ _mA&1A9>(YӱEMv2R?S?,?: z[Qc>6UDD8dtYpQ hiwP5 {bOөJ%Oxlo"]V&j-lJTɥ Tk^Y^MWw"nDlɂopFk\'7$th'l4wZ>5O"LѺ0~QE)-S_i8VH5;DRU4x`y9X@͋+Lįܐ8(B;4EG|L-W;sZ~I f'1Z-$.E^OUpNo^VJיYSy[2+Y:$kP5JQ]A`f$b ߷D۶(Rկu MPvOiB tz(oTm M3g6z\iBӫ&fmbߋKO Ӊ؂Nz av!inX_Ə]شN֫mr3&\f: _HR@:,%&ld=$> 5g~}i'gNvz]ZGHX @rOvIxQ1hZ0ZAd 8:dX$^V6$yAߢ:5XIF*vXƽhTeT\smyXu5~F6L={cO+'؆",tfU 6(q,|ݺ(F]N?TAi`ѡ %KBANldQ(d >IO@ ^( .c`| FH\yJoOŭe>AKx/i׍Ѭ}(LREoju2(cC-ՁL?&ښ!uIze+JSיwb̪ɅxWMn W-%WQ!SGϻ DD̔.rY*W5y\/ɮd/ UX*;D2e[qO3}_F% sh]<%DSOBUuˍMԭ.c&OZ@x)NċG CA}j?Ec+sNAvLF&`w D%n_? J "%]RT\ʖ3U-Xޫm?YheÂ@=Qbd~^ma4 `66ѻlޱ1OME$Cn9bWU}v$[dGzqgn]$5'MOׯwK.1[T3/?=;ϕ%|kh4OXTzh81.lYڸ,L؋'AwvV;! ׾RXaۨ/H[&Հbjx_g(SI]M'&h\I55)V kczanD+ߜ\?lߜ me8ُVg6]gcW)9gSf Z&~rPN~R"8wF@Dk}zεTpӍCL)OIGLQt{Pj5`Q u=4F_mɌƆ7j Hbr# N1 uyÜ {,,E:erQTt&Gu-BʱR}yY/luF[D:)|-$Քا;!fj 24X*d+Ї ?FRN}{eوSLrtAKӚJ*T$5Wx$5X!K9Q |qݓE}6'ޤ.ܽc`;.0a~~6߮t[5 QNҙ0; H[>Ri6m]~۪`7]aWwG[5fDL|ghYj@`=Xc_@5i Ru\J?&?/{u^phtot;2hfn#Ȟ9W"IOoٸ P6ݽ]"xf'-ȭZ;U&Z*0 1eL1qfqzK]W\j+’yQry-a 7/"op[^12[{=eAyݛtS?mSWcVhN<DθB&;bSA(cKZ;_@tJH* _/zG~5XlbD9ᜅECkP\&\{tÍͮ 浪2]U=SCa '\){ -!Mn첥b7ߊשU17jGuD68}9o,ؔB?xHy~D8g65{tHm M,*m?Hk;@󥻹զƠ'$l@ZBziroÑN"JN [hu(䎸 En'I/e L Ӎ NY5d30Dл=& {rf_w{+ mh bKN+3]936Vϥc'E2Jv -|y6ifa[bb7:HŠ%,?D<~ fA(N ?+~nTmILD)$ź/9۠zˁ޷ζTEG^Љ2?KL}I]^-]O)/Y,.6*k^ T|AO}9 !` zԟż[1*d߼/+~S#kU]Dsn|\G4;ȋl3XxP[TBx>s?~tF$,|uukQYa@ eeo”# %)>,[~{^mVʭ2ia%!]=sD1UX(_ZY些8Bq1맷]{곂l1kbXXw@}%jF!c)\ CY R+"Wv b>+|F6܄h,iD  323i5:rhT6]\Y^YhzdUnj<%3dDPmGw0XNnZZwwm<9͙]gils 91j\CsqZ\>.f_̺ŚhG .gVksxq'D ǖorOގ/7^tkT+!cU2;?]iyrGgVX&WW g].;tMNiZ c\"R6!@hw-ڄuRX2r0qpJ8So4=#"\Cy)d/ÇigІ]3O|Pt?̬eK}3 @?<-#+e_[i9lWO!0HK<~XP wpPU9ʂޡ2Pgl] 7{!BN'i~H5k#Y.-g,Z8&9ב"§ϕ?* w7ϊc4Wp=0 ֯Y+RqnTF򇏘! 6yw驅H- ]=1-d-SDGɒb5Xh`]ưoC%=.潺 wq}ȇԎYx~hU f0jjg\[2օ]y?idžv@Po0f=`]n ,Vc4yn6׉s!u]#ɑ:b. `HCfz"X3ካ%RMHȉI)\guU*/>-@S1Lp[AjOƪrE, LZ`3g67S B)~CFӪ%ELƄ*|x"\b@4(Ha>ADURs]Z@h Y VnW}ES6 ;\ N80"I#櫤$㎞=yQq\oU j+zrG70q8+FC.r]ׁOyJ;\Nxė1-1 ^k,PLQ:]@wCbU30Y%MȄ 12o\j)Nw 0Ywwu,^.e8)rPAGNPeJf9&֭}## Ǻۑ +cx a* z5vG\N':$i";D +e{]mI><@Ըwef]Qw+vd7⯇;]e7$-j$ԥKIu2B!gz&̂Q^ ҡpDf\*9$uB+2_tPuZS',!8PnxP@ׄjM!RDT3)ʍ_2žP0\csjkkN8\[(  +[17tFWIIh}W. G I}uKK n$/ƈv}b;z]Dʹ @X.2jC}`*vZ!UڸBE],U {J <㬅bsg?U^5#9`wDa \Ӵ5.sژ19hq*G%ԜkH*vMR-օc#"T ԷYg=jOz`U"$e|ptO:J`Zϓ%U{1 LP /jwDocn2 >ޗi= 6Hc CBjQhP"SX_j`EHVz!@ߺ* F Lm^'y)\IԬ{!dUq=c@Oz 0CDjHWJUt57ksq+R1بC'!Os.4~ǂ30 `(X%T-f.@NOܨ/s㔢KYh,3h|0Z3QXqj_~l 6*Ս`M;,d*^P4OGbYS@&}40INiBp5LJ]gD=5Xmd nQ_Ap{&9㿻a&\Bz]I" @7H8Rf'f*{sBL'L}ɖW4WS ݢv<{r2ޢɬbٳEq?MI0mt9צ>!q:}Ұ6c>ٛ),6y=`o/g}Lu3tr}}4RyRqgwOPMK3ϙ,x0噾:EÏ:))hN 93-\G{:&g|٬uQ9%kaRq?0OS qGn9XFw-# (BؕDGrUxq4W} ,.)˗VȤy29,iH5n:ֳ^}`q^} c4N/Ա"p!t6[洠,Ƚ’oY(c$0&uu:S6[ȟ#'W3Ul}KЃ ?SRݿ;Fa_t?0 ~z}=ḿ:ʲ-`M/fK+ʣ AR :A9mb lxiĠlSgKiLb5Tu꽯h΃nCcN4s#F\FtTʇТ.FfXmlu~<  >,sm:JG}M ,?N -jZFHGz?PHi|m`W2#vD)=DʟͿ'3bI[Ց]%)DX ,o!"Ol{V^MiJ,޽sWBxBog:Mk6Ҋ=vx)]G=[tI:,|9ّaHr,lI(d"h}FmטkjyKMK{ Vsm aɟ }Cd#<\GãqF ;gD;,yWP!k1~FgCXR 3Pi!S*3(p9.Y@En@>?ab0K zP,>)ݘ-{EcϨըMsfG):epYo[f2Z!Q[$1u6%,J 槆%"g+?ĉ}mv B(l5X?*+WЫՂY}=Tws.,)o5?%dK;7np ד|'sù$^,HʾuÌmy&w!\H8wg"?,NGD6]!V L7f1<*յ]qoϒDCRQBV p jC}Z97.hZj/}5UG_E49K39jO.o`;8=!~tР\Gˎz}F<>}ξHPogXNFa 7GzgK9Sm=}bXǿ*3V'cy$"im9ao5lQ^|< YۂTlLw=JtG4V 4(@TaY⁹k3)_Vߧ$Hu"~w]4OaM'~&F2HINItSP zǑL`~~էZC9f"KOhٴD=S㟯16(y9|9S5XyB @1FݟeDΉ|ĆNN=}jި^1P':R?B`2~Qv @e3W?k;zWUVLDZH_f D9ȱTnsEZ"q8.oYG ([r̺N쪙sE||r´C Ӣn8sbVAyuôdf;yY2laRuָ)E #sfb]+9;>74|>p;lȒ $gWXPqQB<_.uy1$p뗯fl[gt_*U < >eyXaeX.Q7$?KribCu0#9WTh`ÒD\]LU(j.TdylĔJef_DN_j峗 YRcaOzFuu ?vzJJ dJL|Aڧ,fN=\){uXW'E-zl91`漋%ʟy#~Kch!GKJ!% g,k7T8iT-98QMȡ Nߞr'u'oqڙZ`\ `78]sIP@v^+∭,b>f!&y띻a(z!C}1r7N2Yo!0v7ߜ!wW6gc 3 "$)uSrDw!1' @p5pl ڸZi$ÊhA@N&;Fa{P`X{|g7+MpS[]5aU>VVGr6 [WƦϧ v솧ۡnB=ۼhlN O`!yB9#=)pNĪÑwT\{ߐ2= y1Hq'kԬ(4p%cF&hlsVc*$\k# Ů:{1vs~p}Ȫ*ezQ="xsXNTĽ ovWovG8c`U$"E0y+xMl_U$1?5 =k66?2Wn7g5%٤g0*q*{Z'hDA9t>5g/~Wf!;[^!#ZޭO'DE^eW&FwB(=A1w/#`Fd)y@6CUb {%:? z^s6Q _LJSX_ 'y#M5r7:lQwBr+j& Б5J_/gyj(2oIܫP+;_&*"Fy+mi=śzYi6WƘ<.?zN5ÎIy"]F5|qX⬣"qr S]}UOY}DS5odV.աbP"&d6:{ IvBB[O%u*c}rv/r\'djǷau{wq{AxqJ"̣NhMN8ig)B.)}Ea6CU_ٙnNYx.cxZw_j|8UZ/̛yg$v0R_c,\]Q8/ bX۔cB/&!Tb>>%eg& Rj.*f@I撧Ib=p|/#)@ % {&(AyYFDf8jIdz@v̾g?#$a߂G2*$;R%N MV=s|}dFG!v5hZ8m>T4ӏ]@R8ĊN(;Rưrt_5yf{1# c> ٨Ih0Į ETEI~*Ь]J_~&.Rq3lju3Ym:UhE)ys.9L*9Ԯ%> 1뻒Fs?~ ^udl1_ ]UQ^!x|okǼp;ea% Wu5 (X`@p $Ljv[/,hqBcQkM7`#O)Bn*3{Uk > ᢳNǖ<orYƫ}Vv#8pc~Tn] EVo*;ɿNKI nrf;f*S[9Vҟcucq%`Fn,lr4l<@A]3 D><3P\8`<@f>hT:ĥ/,$Ďf{s\?Ie4pI!8]Tf`Gi,4: w1^Ez5&{4Dr>b~~nQ%?㣞Hd0a9i` !6(ܵJ!PC~p1 r92g E%w}wOgqgH1G]>O>Yr(+KaV9Հ\G>1F쿯b%sɻ~5م]/WKl9;Т b\g6ryN.e7Q}7DGeDޘ־P}tl!gj5jV.Z.yP4'~(L%˻b}u qywʕ!k;az@iooχ-Pq> (^i \N;SxdItDۡ1i!О*'^?_+SQ2ߝR͸ܠ,;%؛۟ n:Qplk`S1b7/$rm=nD  ps~< +IM)A1 KpͣʌGסNnTњڕ abc#:%^Cd,Jv[s[5mE/ }&hHYt]}k{[%[BJ_| xIpՑ1~B1Ipɛ3mf< ص,zLO`b+~n^<ۇDdſD2V~q,M t0aJ4 [댘JXB8*z=.ƄCs*ɋj?8 6c֛bߪ/$TOl g86$ #$7I 0h"քz> uo>޸o(HhJƑQ99{Vv&'dZyݽܧ \XW:jQeקn4N>~uc}v>_o6a%Pv.N!?.^PxJ0CyDML$b@x a;.Z-lCR~4ZR94V&Kp\{kzAuk9XlET!t]$ڰX k -P5ݲj(>f#ssn]ci7of^b&ItQZ}݈Fk9Q?z,sչʂtc>d:*7EBĘ`4:e@!HZasZr&4l\ph.' M:[%.߰98Hh3V {$ u񷉣~4İnٖ6J.ŝ_P衬̄.3}H N%ŧ_!gbxX`9lL$+Ԃۤ&0G`IƸFVl(Y$ 3]ܖ7KG{Vp,]E:DZ%~ULSi3"s&2Fץݦ#ѽKt?%A2O0yQr:7"]3XfɵWK۳{}8?Iu!#ʯ=8K̜<[fN ?/ *pr=7r⧩ܺAAk:| A-uީe OCΘub޵jU H^'tud$ƍ* T }$<̒Ɍs@rQdҥ\_>W*TC1V#OYTn#]v+T>|/%KȘڮ_Tc'613(Na5=72/=~{2)u-JKkؑT{{V f/tL+$bfmz)F/ kzXe孢p#u"(D%& @.Ka Qft &\A{l3+DŽo3,jqɀȩy5Y`ʗ^x΂)V֤iL:|ڵ8.Cmp/ۊ=$ZZ I `b>0 YZLuminescence/data/datalist0000644000176200001440000000203514521210031015312 0ustar liggesusersBaseDataSet.ConversionFactors: BaseDataSet.ConversionFactors BaseDataSet.CosmicDoseRate: values.cosmic.Softcomp values.factor.Altitude values.par.FJH BaseDataSet.FractionalGammaDose: BaseDataSet.FractionalGammaDose BaseDataSet.GrainSizeAttenuation: BaseDataSet.GrainSizeAttenuation ExampleData.Al2O3C: data_CrossTalk data_ITC ExampleData.BINfileData: CWOSL.SAR.Data TL.SAR.Data ExampleData.CobbleData: ExampleData.CobbleData ExampleData.CW_OSL_Curve: CW_Curve.BosWallinga2012 ExampleData.CW_OSL_Curve ExampleData.DeValues: ExampleData.DeValues ExampleData.Fading: ExampleData.Fading ExampleData.FittingLM: values.curve values.curveBG ExampleData.LxTxData: LxTxData ExampleData.LxTxOSLData: Lx.data Tx.data ExampleData.portableOSL: ExampleData.portableOSL ExampleData.RLum.Analysis: IRSAR.RF.Data ExampleData.RLum.Data.Image: ExampleData.RLum.Data.Image ExampleData.ScaleGammaDose: ExampleData.ScaleGammaDose ExampleData.SurfaceExposure: ExampleData.SurfaceExposure ExampleData.TR_OSL: ExampleData.TR_OSL ExampleData.XSYG: OSL.SARMeasurement TL.Spectrum Luminescence/data/BaseDataSet.FractionalGammaDose.rda0000644000176200001440000000272613417222471022256 0ustar liggesusersBZh91AY&SY吷޹PM$4h@ @AMhI&4Ѳ@hh BC4&M44=M='3!144M 2hɡiP64 @fAOQ3Pe zFڍ1!MC@0 d4b ޔ4ii[d?+@`S6uՃ""sPO=!*$4QȲY$h[Y!'ݘ*$aW! 1a׊ U\F7ÞPHU Sd-ijag ] H`4mAUmُB6j'ЄKRO@%C} >yMy+x~ *!W^M5Ei`1cE&ϡ\}tZ *kn6S- 5 ] BJ#LO`61#d &yUu*>,ekyxn瑲ɇsݟBN3ĺku8YA*L+Mna>OK<+(# gz@3Z$qy[HѤKGPh-=c"fU}> WWRpE"BQ(EqJ1*)@;P ~=|׭op=r0-Q))N{G, )E+gQZ>ٔSȟNp0N@As-%7BUy%}\XfE(9Z WM0&pI~0ft搡LCQ>a#w:;SC(NК lp1  FVPwِZBXGUOa"1DkbPa%Ozm@7x(Fp7[%J8Ą/}^`~ ' PCfWhi: ӴgZRS+Ua0ʊXT6w~ F"Vom5 *7-Rـ  UPX\A˷AxS ىViƈQ6l(gz[7ju &Z% +]0DEQro8WV-6q:Q`jъ ۯM叧R ⣶M'; ->0 YZLuminescence/data/ExampleData.SurfaceExposure.rda0000644000176200001440000002012313240265320021567 0ustar liggesusersy4W[e(SH n)| !T!BBcmz׮i8^vŌW<m9.4Qi*>QqƨzZ yP(~}YIq(n/NOr%*T=_T)X(R2[Fq))nŭ8㤸Eq\T9sY/M[Eq(nŭYMq<Cq<KqGq|GqOq'@qP R Q S0 SZ[Kq"'Bq"Qz[Oz_PO8Q8Q818181@qx6P@q W_ \'x>Nr9["x O,OjK=mDPG.?E` H77/r?TKՌ'eY|ѱd>?*[ +CMCC@^>t]*v9zx(k4똽 Q֣4#7Z!Eg~K{$?"Ye,ڑǖ|pgֽ6Ii(\b,8e e]DFT}~[5oړ$N)]70ݔpP#>׬L^II/!ZI;fh݅d :TmU^gދ5򾼝52| "eb j7F_\voȘNԑ _d^3>szlt?~<?Ix9PR E:aD~IT~]&r `ġee6^ oaY0b,h.c-2`mKi (uͺ6O`T2{&^J00NRuViK(nL/t:{3/ 0XS?Lp.l[n1u06>)7ab?=e;{s]=44=_2@ 3Wk+Y}k `7+z)%ÓTv v/*&OBҬ|sD}ї|A.ϻS5aPin;M%ʋ>/5bMPν{rbF*s$}_%wrnN*C&ժ3~Z!^*quw KMA5udp;g3W˝ S58yuW ׄڠm*Ťb͌lJi嵲ҡъw 1]tDZ2qϸ*j6":q]YTO}}!sլ\ň+?]blBWlKxp9˥]K x!8?a 88ʈX:gVP!QW} 1diu<+P׾˖-Ns;^ue}GJf|'=F[y(y Fq>)|Nx!]Ğ8?]v-xc@V岱U?Ux,`vBٕ+W+׷-AzGc瀦W\(d@:I+D{"7)n;.zGw,XrA {@(/xbz@Q͗ȝ+!ohV1J?-zE)}åݗls V%4'ֻN)][͊:(n-Ďv&]:P-yK²WΙEl9P>t)U\Yz<Xߺg˜d٦_Sѹ}ht(c{ bBf"cmzȶ/_Aw_l*%\P<϶*? Weã~3XBRWfhe ,Ӈ+x{BT7PejG޼_[ria:T:|ػ?k<׈=C2n'YaXUt}G0\b#!p&m[v⿱`0"jxGFM7K'&8YfVVC=01xNw_ta9 K{1+ )lbp"`ձWc{wcAm@ [f_q*h̚S+9`xNYg`]\6h_83;8 OqNhZSn6< X)4o:xWaΙWۍerm|uûZHԤ[ Ewd/E2/t4 KEF=1\0 Fp.÷` ނ Fnmܨ`߆ kp--~XmԂQZ0H ƈh,[08C`d ̂!Y0, Fe,8ʼnSIPFHq)N$)N(N6Q&Lq)n3m-RV8i AH߾-If9 ]c~xٍ$4nFn;j]4r&觸o-о s63n()P>ѷ< }"2wL# Vٖg_APke{j-0WG%/8 wSq' pƓ]ݿM0RyLL Tv%eøPŇYϕ0.Q^a\J\ ?z3͡zň&@|c&"ziV@+ {)faq`P>6OM4\Bօrݝa ֒<}5Im䥜Ug/[Fܘ=YEFLOsiRyFwIɯ'noou|4t ~#I ?Z_®)і9 p* /9kTx#ql8oim5v ;Z[i]y_~ǜ$*mpSUYhTUu9s6Yc`W/\SsbLZOq'h) ik?"j领.jxHSq?vre(X"_d4$rrJ~F~\ +nK< fQR38MlFR?2"j/E/A3Ľ0 MK /eI*$i[dg܁'$~M-x"۫UE}{I.t( >.gOՅ'v]&;2n`PyPudJɀ7 ou `3AU{;hSo bn) zw%@?Ɲ|!OT#.\f.*d[@?\PXU"a\roā`kGc_j`kc}j͎ kR]L.s-aqL%J:OU}m ܘ+Y1'2m8ۀ]=Y_XpG/u5֐0 !,Q_\(h噦^ O.qehKj};;ChwLR(`UZ]޳{rQC5߮H"oO"E}t}tG_ƍX 3R~$N~͔(!]VV$ne<'p7HG NH.kcZ8O`%R R^$?>N% yaˎ:j=c}i Mp<.:RZfґ1aTlKBO]sBoH܁ͪED\f/{U!S r{d"v+уȵE;yYҼ}; ҤÛf/^q>س73AV-L+wbxFkaF$$hw kr fVeB~-o(2h?LZϰ.u^@{l!՞)>bQ0 @y}wgGo\( OC/v@ j60@;=Oj,{47Rgl_U1^՝J찟@cwL[}FfQC5_%5*q)'Iͼ$53KFr?YKԒ^}g~)I-/Oتv7dy<|dwLι~9MEc;gd{YD <ۏj\$,Wb3y,:Hd~!{I.JyyYDxc!ꑴfsδo--mPAQڢ!QޑUB>y,"IEW t6}D"ukB^性|VBq YgPbzdn-ym,rO撬 <8vf]: Ώ>ط1l r-:kC+Iҩ FĎ!Fc/j}-P / 4bzh*PKPΎM02:&. L)uallCꗥD8@ha~vUti{' 09K d: Z!BM8 􂋍2_6QӸ߫eYE-Rh]*VWcK=ԛϭz2K=^ z0\H|K52-#*eٙ%# o@t`춟`]ۑc c!9 <0~ZyP3.|g`w @g ,ta!@r9_S}cOzyK򵀮- K%m%ZTfW\VӚ`q+u%`rN'n~8v=᳆uއIdBLOpIƉ:UG* `,N@vc+@ryy*a5"WE5|MT-`mO4Yl@)L|u n4KLp9VuFMN  sns:FLu- ԻԱq@\~ P2++&',Q*<&WfVLM \ohBU32{oijs{AӾdo"}f: p1aM^ 9~^30-R!}B c-,?ϴN%l`c)_N-SXUO;4,nCc?r'9{pM3>>PCGB7ݜ%4z\U_`c5tQC;:w"}=NDd)ĉB*zv!̝/~Ežek' iGNpA$5HҎӐA<殌6]|F8-[lB6g [}!>r#PM"P3ëPX)YqJ¥C}) 5O j}U5qyؾzl } }ZQLo/YYQV8"T?"! :T$|4e0|q=жFk\abyoF^q88ڣÜMm3mݒd)o|t%m%N3aiGـa_¤9WtW;yX`I$wytz%_ ~[gg+eRײ"=U`M+uzzg`Kl$|hj25MR+؋9^v0/mH}7M}'h_!n)Cro{#s/ <ƙ1GN+i/j袆5ԷX$~SqH+$p 5A2vp ~ǢH|ݗJd$bӾli&D'$B6-jy Χ5@6ȧGzVBJ~wa u5"w d_1 cwGxC8>ǴaJzP'Y 4Z/XͿ:?GL>Ү2~}%d(p|aW;=E|û][?B `T0 `p` d(!v;0zE[0Z2OBWX!ο7W_po۔9Z|r@`ү+ ¬JNOSMteg^Y![Z/1\6tl;Zq^ _<=? xÃS-JLj 3lsj5 ֕ӎ51ybqP'δ 5{+{tR4QC5_6_-:!}eͯ<rL #BDvo!8'd 黏C?kGQEfw+ ͧz 9 H5shUJQvG[݄⦯2(+?85BVČPvd(\9o+֕zb8sv|Y[MBǴ5Bm7NBҜrm95\/I/Z8+'gM*ۂ p :T{I"|~ c7;#4_PQz2>zX@0lOooPU/:(Moֲ6 @H43g)]7Ý`tAm$;w>(p,|[U "ַ`ӏNJ@hG74v*pCޯJ.Hpr}i\"v4*2&zW?.޲ [Mo~+bB3!@܉$)|%k<2WCX-r?3r0y9[YDLuminescence/data/ExampleData.Al2O3C.rda0000644000176200001440000021124013240265315017375 0ustar liggesusers7zXZi"6!X])TW"nRʟu,=!z%mt=c@ [ t|y*5+-P}Pv|fCFK'<(SHe)D)##NZg7Πz"Wo43k4$vb^gG`˗(£i֎yc-دjIQea;4zX:}/h𔶠I&[0pk6sG8߹ RX(D.ǿ0zbBT]|xjrpXs6ѭ/9nfw|4=Jrz(zŘ~"}n9X%YP~tk ?1S,R_BzEBi"COPFm\VD6iuZU4n%~fªYo#ntEh;vAo|V`I՞]l0T!ƣ4sm岜6tM}fI,) <8{-P yJO;=%K|&c"a#TX1fMBF046񓍱*MmIEۙ}9[Be7}2 C{nA > iyiw[TDIBRRiZ&'n1oHa,&~V)ᦼ)N%ǦlUdPP|.oF(ԪPy$]5Z:3 կ25wNůӽL %!4|3>HL):"gŇi>q/bBvb!W4ʉ}Yn RzYK-#?rȓ'\@[4jK9bЍ [~ρVbGD wy7Q~4%sh[^J}HVl2(R @5S_U.BOv^"f|X3)ʉ~FJ*'PzSI(mHCȆi,-%i7]TnoG>VSčՑk sv!@qW =kzN%E+kL8g"R 43S :|d۝ER2g١#9z Γ'zw|dm{ŧdst}Ͽ&g5N]y=![xњA ЎIŻ)b!]C%D_eyڃwx+viM }=U)SNc t|diCXeVFm|@շy@w\/md]/v ĆL&NͳJ?0P&:"so}+,I/>!0MD8"AnuC=$k_7rdPh~InLuxDEڬK#)=`!LBΐB|K> 9"^Z^K($jt4y9gIn53 8kzq SE!іS86@%7kIʥxA 0=~1>ϚYk$a@;>kך^{ \иs^=ͦTXE#gM+JB>)=J~ {W+֔ü yYOXJ'_ yWJ!˚ tuG:<-$"X!*Y Y/yC: M k9f[F nK -i#kDԦJ>O4OR1K!x89X?]wDMۚ~zGqd]EySJS>݇M.mdGeA)Jw! Y]FCBK&tM{wq5W#@+@>xN5IBةʜ{|y+qQ#<+g"^\_>!q%WN(OMd*ǃ) g!>.q"bH7\YMeMN  i^:JK;SDx(g`Y0qT}pSvDxՐߒpT[x]%uD`^A!\͓dC1To}1}_!q-=bǒ*I|d{Z &gNI%p¯ K_X+z[h@5<~fK]#7TOKkܣz=\J/zBQi4i~;u^l qnf,TGO7-Kv/1ўW,e'QWŋ$A"lta-/pu@⵫.Pm#"!=YsLCn=Xe=^M |S XHS`LM#…5j_ߦ4%أ3w0 M41cl߄+ֳe "P;)&s{WkE~i;㻫_>v(a`&vV2jtB)ؠ(+ iגkhA,Q!d7ӐGGE%a5j* j"˷-JþEM]Kqj&IΦqj}lçW9Xah53![jlL ?jTW[ہQ I8Isb0T0,~>^f:BN&_BbL)9^%:P}.R3zz'CNϘa\L[~_*\ߢ 5n(vA>y2FN&O_#;1 yhѮMˣ75 E>(Fɤ# B\B b, f6J)V0׫AfV4wfk]ǧ!6ۭM.gG8ɱ+o=%jY"֗Ug/PonJ h0yĒ&4^Sn?4Nd!k3QC-e#Lw IX-~_\4ڷ4:#,PL:]?Zy8%Lۂ4}6Pr)(*O)%;JL 套vM#[,/*r6Uk13'9p&Wnqf^$gpP,s2Q'X|;\|5z6[?O*[+e _[-'UH^viއ9@Oda}[&6W, ^Njrx0RZG[F CM t7&Y@ځN`RbJ%Ί #!%6IYF6lC&MxFY $ps^KNF+XzTSw#tMO@,opTw̉jX}*oMk+SofSɳ1H}v>TDNɛz{Rsr܃Sx:}%1N]],Դ:wD5ōV8?j^g% z/.)oIIv.KTSp-Jd::mVgQEe[ymmh\1I(c 1΀GǥX&zCv5iȅiEÉd#D'SZ0!t: Rs@ZoѕˍXl̳T RV<U/xy yjr G; lZLprT5p߄: m C]}y<(^ו`}ksz-7h n@k.[aX$?,1BU'2hdS%P`ԾX~)!@ԅ5lG#ZySo۪=a8|$)9!id:#d-ʱA}>u!;roU2.=9/?BɪObj5Qa2ZA9#<:Ix)}X3D{ 7ANFcš ֨H’hI pԅ恞rHC+џxo-8!$Jw&_M[~BTR@sJ6 +1jK[_j_)y+#QCNIu9&0jF6|O&.mTPn}_^v 8ޙ4$5Lr.fރi+=NTaeE"J5oVKh1ѱ\D?/*eDun =9}]ըLՌzyXpPI% ӵ~:ǹtJt2lӀ>m:OeM[?5:"S@%){w/ɸTsF4oS<]$0Nb"y Y j7D/q|^d-N9Jҳ{HXlOՕe jc]+N׋Y}zbj}[ڭwCARdzqIkSXV#qa J[؋J9q pY͞PӲ#yrYDn%4_zl/pyK`(ꡒPh ʣ&r)‰tjk^ 9]/_Wy,G46 aN4Y0͊Y4#_Hz%e58h&}bLFAF荇u˥$~slP+gNNYE0rMa|PMq0.Ųf=;MP) 5,ucqN`IVDͺY9v~Ә6&ya9#˜5悱"gmg;[w_İ"vpoE:ѳcrfcV]x7Ӎ A4&5j8i&6Vz-C 6 Y@If{}vGLee̢kbKhmj'4WVzs1k$ Z+2ÞǨAq0TPwzuU DËN)*j;Bo4کaҳ^s̋ LEn5M.' B~s(g ju T0qTN0f[<[[<#uyhly L:h'R w`,v5`!( LX`˸o2O%+g<=1!F ,:!VLq)F[ HsBݏ:3bW*;| "!sȝ4hk gt'Zjk.240 sl:!P[EB lak;%yfd?b'00QĤY*O@DZƜcRR]ҫaHPxv?90@,Z_t".2eS@+S[< C,Bl\|Z5i˅:s>xf9}ɠ}!Ξ'=J9&1M^4tOL&mH 8g5`M?|ӛYCy 2y$JNpƮS]GsBղߗ zSH N:x]w.lݝ:gs2Vrqo2_Ȇ-bt\?׭ԩ?8aU!c|%Eo:WGgER.*[?kj?!81!#/듩 ~@*Jq~ǰ`B{[2.$`R8>w{܂\pؕ3L "Dk\-o1 q[(&B{( HӬja'@dv$A đc1RhkD;8,^UC"B?~^K%>4j7h&4Bͦ£=}:X&/Dg̤U7WH'xq7B^Xn<<%R}z@ M:8>޻9 G..z u6Cv.糨+ž'{2ZTKd=RRmޛ9|eǹ,RjL[laRfv~5i)ښKQ8SUH@8CЈv2TQPe_)8-@E6ag@ ^d19RAX<I)OL%S3Gip߁NTP+~W;D0 o54nѯ&+= ~#&o7X0D:Yʄ&??JkVx{3ĨI/rz}B dd4E6N0H% 䌟Y,KǘEH/[\CPo^~C&,_Nnr1Ҷ"wGϢ*dK~}`jH+.BJ 8 Eи-lv#6/V_݇b*uo@xOPƵYkA*Fq@lc8YC˭dhg ~L#@/ͥpn q% BsMD9{ߋG nJ~1(nؤTQ 5CX4y 7*lMwXygs광βڎڗf/_ X;Jׇ4^fw(5M1[:-.4va7vGy(DXw'[7ADX^@ɀ1}Q.gݦ_^)V^-ue1:T~)k*ȵՋ~lNMُϴa `u ipc [i~kL&w9qzJF5;+XVg8إitu(RhH1MD'<[^%?0av43۫Xi_T`,fGig(_kh`V[51Vy#{*\kyxvN@9yTh^ WE,tt\n;V c׷..+2IWFؐ#έw8kk)ȅqk.mOj܁}xTR3fDcYh?GzLb38QokjWBEI6"{]l'Ic=š':,D06rJȿVw2Xm&i mS`SoQn%ߗTMO$VL?W 9O,yam A̲h#pwߖ^ދ.&ZEU.*ϼ|4Cn3|%piF=:hmjDI};M}`KmVw}2ldٙgo/IVJ!4b'$RTnZGAU&G|8K[7nipf۰t=hq2 M?܄,vu&#]qY[Rf.4-<{$~AwGx)|ꑪc„ qç7R}Jб:gx |iyt~=1"$4%s\$?AWЕ_Mz}aYOF+ 8ijˣV+ȍH_{4rN$5 q|. ,2k mg'Z# };nck8piYنKs:)5aAyr7V!Bh^grhb>siq4&6?Eգ=z #7x@~$ɼXTFų*iة҅>VkE@Yf `2*׮7Z~;L66hL=Y1h`IϖV`Ք!Ct֫ZA&7㛏(MvJ`*K<)Ts{)1 yM"3$ůcz&ԈN[*iO;ϧ1~<(Z̃N]c7Ed{ A^|R8ԝjS5Mɜ.a';:͊XWsẂڄ8RB')pfrI:y~wFܹr^sڃħ!W{bmZ+#Hl/~}^\JoY v,ZkTx9_rMAF !;b"(.owm7bO&lb {ٜBdvl-M'JAFEyX1]Vx9#F4Zly~cw q#Kב8jEH20S_A?ܜas{weG6f3)}4G(C;Ox0MA:? b"yϑ1^mpU:kdg&dTOb +bw&@W$搳qzPjpkYb6 4's<(yv7=[À? )yZ5 W~I9 'lpaF=U\ܩ k溭3̌k'΀5ٹ?@ӥ/H6L#NV{&|"{oQ`Z!]P>u*Mc4wo:Ӝ2/ 1L>o /=սf7a0ɉì. z^ l A# A&+. JsrK$szDF2<$(/x!@9 F#G?4ϳ 0?qru `ٻ)pl#pg1W)~bGNV?8\g4Y@{oݿ *8k էAW&k$>ʑ]a8Ƅ d8S|54΋EVMz8sQfc1XF=$r}( 2 J8ävR@%j Rvr/@&?qN~-{t[Ҋr6YւtO3r=OAC ,E% t6M0긧kEn]"f}8ҭz=5Je-hJR9a-ŘИl7ЭT}2VڞmeU7aF~~ǜ~"` )'#EUP~3.1}ۢDB4 `{0m[U*5*}I1٭X߅ j+4X瘐ឿi5m"ygR%%Z.ԉ EH2}@% d^F|dHLAXq߹5)t%/}pkX7`Jp)Io]kˬ5qɛU)8Ӵ^=spuNe{I<*܊( +0{vdbj_8.~m@asE%Bk'!b}ߨλsu 8 ZAd(Ĉ̋ɢ;Ħ1dCX}q1zё1-Mϩ I fUUEFI 562@ELvŮs١`/$48Իii[OO^ {o|Fډoj,7`Yy5E"3S-HOVj=g%L/;ޗ%uJXe.Cp6l]AyWaZ\q@:'2b׺}'{υ"sX&5gc/r(@ܐRf,!mI:oY[u2U42T/z+fkM-&ȹ<6+K*O\2'Qg1n(xީC7~ [Jў҈OKM]ΩW(iT݇q A3 @ٖv`Ӳm r^n*D-)"qN=7: V/pDPτf=pa&Ķ\^9u,` yXmMq Qv9skl=3u~?KR?$Inf|QaTp麛ݏ'$MZtlpG>Z6rĭGRQ#ewk":1 7OHwSvr'&B Yڙ&6U `ǍŸl-V8WRT)=3YcET8=ac`B#\r,o횜x)X`GoX'9M}o)q ϶/TmkE MxHѧ?'>!'MVxJN][U*ҐSv ch&f ,,ž5h%~POH(C[w?.?ېFl˚KuF2ElUQ7>:$MqU Ntu,>#?*aر8 P*@y0 EN+&+Vd`grԫɹ:Z:M K](9BAN+s@Xt\>&@ժIPג4ɯn-=mgŀyNZޢNO_Λ`7q,Ii@ .,aJ3(/ZO4B` e܍79(Xcy'߬j@ ASP)Lxp+\:AZÅ* Z_C" 4$<Fz;-$5M>2BnНuQq_0l 3i8R!JG$U'תJOFԍ8ڏ2&pofY/E\(j& B-EA> *. yq kJ^rvs 7VN5Yڥ' SBikh]lHЁ߾~ /(N!8;ursff~YVsە1=\_SdV^u}K%o~qKYzPײKtPp.N Oi`}W&zM@Q/ ̅vƼl4z: q_"_xv2/']>}A{a*[gRKɧ.)[x[nfK/s<[D ^OIAʂ(ߞSh9BG7pI4ۧi"JUs[o >+M(NY貽Y1ON/zHHc Π=^ge+n @+XnѸ1+bD/1 ƦhM1 ,zHcV̆ ֘ 3e^f[d:mN@}]mnX ws@baC? *PQԱ"!]+Oh8MĮ "+%Sa1z%#is4Ɗbh/i #ܖx U*da4Ýq+J\[;zoXk\fBܓR2M-4Oo "Q#2z0cO ۚp ;ypJ[tVmojMEb)5mcP+ adab֯/e)FZhزw?V}QA7P\#:oBF%N/Z}ys(AH~<|zqR+b'63:J"{ru[S-y>b{ж0եa3)kތ&"9ؒYXq:4~4Z9Z,D:809SCO0H~;V((ȂTM"@{F!C= bW2vQiTKR$ϰ~hWeZ\zs{C?C ib +BW%5i97 `IȕFOMOI=773C aD]BuǠ5`d]b^$i&# ;a8HsYT|%~4ErM13Ub'b4ي 輻! ]G9_^,S^42 ^#-bwм5)  mkŵW={;ʌu!!'{r+mji|r r7#~*p2kЉ!qÕd P u qF,.vj$kL4KhV_|L|`J|'W2ljS?XBl+5XiiJ<6rڸLX0R05 P%}pV;;hPI$>L &cs[;^6@UC)ȇgx߭t9nF\0:5s.>ݍE^ ,2*3P޷pZ& 92?AGEMf~i7nÏregZ+͘nPgAƔZg/Dvz0n$-:tpH}8%p< {]]/XT[닍L9В%\Mt;)J\Xmܰ\+d vm9FɛOA"bQĐ6h~YXHP5)`}2tFU L3#:I"لPIy"0XD43"E: ,Gq<<RG:ϧUo]!P,7%fo/b\;"-Vִ(!@T6 ,5D?3moݎ/=Tf;4ܵvT;>kl5'2+!w(ն4X j3ˀxVyMJH/a,7}U?K"^b!кWS#sTi0d.9)ka'`"B54hQBk_a5qS~C3Ix/Ji-surqU$ +$9By1)叢Uߥg& NV엟y}ќW5=2K7{L>p0[2dWAsp!eB(8üy s%e'#`@ĉxߩ.j ھi[ߡ`DЪ\`G1ǐ@u5iݵ̀궇A3:5Qfsڧ0ÃE'IkĽ99UgE k9ƻ{uh@Qvu.T,-iə|nqn?>A,e R۾j HBie%4rlǫ@ [y06lf]$ko\"wQ̉ /n_*ųN5WpxLL'6΍q?wpVt`O}^TX`iej(5@a(56\JߡoC21h>⬐P|@K)z]6j#27ZԋqЛVPc Ss(UJl()s.[6/)5(I3Ekt_( ٣ɒ8@H:y,!|IъpWH:T>1&8ar$Ç* }>ч,]^A[>\`-&knxSLI"9F9<%ĸ)$,oJ{;/ݿ" 5/3oG2}z6%1%2, q~(HЙ^`][NUI8,Sc) zAk$bW&WCud{%GIw >Ŭ=KcQI/xgR ]t&h0Q 蠌^ϏA v9DT%FGݯ*+߇ddKſ;/_hk-κnz(H+V7>&֝edK7_iR~v!sdP[<7"pʶ !@?/՛hH]zD"#30"+- $>}( Uzr.d3 -MԟIb8%C 6C k];SʜE=ؤx^:`]l`Y]A{SAl^\ 99s7H{VᬉU$j(Œ@iZ-rΈV{s( a[8w&$@3M/$m :8P<Ѿk.V,vL(n'YJ_g7*r#~CR#ȶ^ k]m| \,Z g7 O%2V0K/!souO{%_)<䩪~lMaԎO2m۸Rd3IG΄چ $bS\^wE@rb'6x4Jd|7w<X[ӍYA qwEӷnwaԈFMhvcRЧ] %LF$$:i(9=  SNWkasd4nBٶ gY膑X< } $IuSv]OG!E}sSZ0]q9 &P(/<e(jU|ƆKu0׸!ݯGԣSB˽:ֲT&͹* SnP'Ьueon7L_#89;, QTWdU (YNPҖb_ (bzʳC>LrD/L÷G vi0-Ԡ %$M=w-T+JrY]M- 7αН_.#@9䃊s->~5(Oa8.cTs[xT*oӾ9X2&2'֛lRyK6] cMqM.&=h#f|XDb/Ӌew0cԴ(3F#h?gJUt#Ad4{O: :PM4 ((1ʲW 0݌!i0 LS<|OdWɰ%"6߾h~ ! 9)3ôt/Z/8vA:Y6AQ1 B և:/$^GC3# WQ1ZHG3lɪN[9#@ۙ*:9诨c$#vB^2+G [lMd[EC|Ñ([Qv DM<v |3=t5~={):YV[#3ٟ7uIk^kՖ1V[Rx߳$:jI'8ʤOb#U:/S5c<2`B3]*-} n$ mN87ޑQ8;o+:Q˱.X@ֳLuh[@5}O%Ht[^7EEn`c-^@۔k%޴bnWlD2Iy=@zࢬٕ#$zQh}9Plm] &1(B,ArLVkV*0`h*rCі=k> #pP7KoʋoF8 NP멠4;L9 ^ѬQжgj ~QuYwcص$3UXPj:s\":ob=yxI_Mdv4>m6QiAP0:B`qJ s}(*ڊ܇?o:2Zߵ}b1 ʛl } dDGk B ľQf)PUHv7Z` '* z%Ҳ}&av__ wد8I|7.ɟZ*OOiKRbbcf (U@vУCnkkB P 'Wg72KP0|꙽LN{җiO8Lc%s-OxV7'Lp@ H'Ouae=Y9Px-m[W22ÐUfEYXo(ի'05sVOSXsI4 *3fH6|^Kx$I-wBA8ŮIc-̅wkŮR96$b88Ц$mϋ5̸QFKX(5[-aFڋYfMP3w<1GfHSFah܌HͲ[$NE}I)!m}& O5y–kW$K e4rvI\M?)EE`GȈ1\ӊs]ݪp@_AIhaĪ^+ yY;n=~%"c@,KW5|; CV_*B\˜$ځȱ/֠?04SVdqAag(U^%'Zwk߃ľHvY.hY„ˉ"U>R;?h&k*}43> ߣ")9nױYYR nA QTjzqR@Ds!WeG K(WD{)M/#%g/#L$*q=Ŗ{Ř2u\[_ XS<=vǞ4O]OKjۅ)qe[C=TV0o…$ !P<Ӿ)KSU 0":'(AEN][LLl՛s%)915:2ߘ[?,v| tcf+wYP1">[!dbz150(#7ھ! > y1R9 1u!IL|E[GޣSJŕ~_Y秫OKbwp& +ZK0RoFVG4lS|Pqwm6k ~(#; <"8{ԕWn*GJX֟u+/-=8AP,C6>3U2eķI'1mL`jኳ춍|Lƒ1,<%3ٸ ;C1<|Lw![]l5&}c|@rO3F0F ýR(O'w`IX* RjM&#B?AF+̥+ VQ= mJج+"&ypԢGYbg5atJ:9~m߶ e! 6g~r(7PUc&I"53XlM$1hUF|G擝o6+{wʝ0jGo`UT-#l028JaAoMK^j5 MP$]u( 5i{ϛ"I)z#6/'$Y⯆3;9v&_>p?ܼ2 F9pͦ=GȽ0ky9-@IJxǶo~Ǽ EJ40fϨfDMDtྨFRP|03,A hNX?nE(sj=ԋkqpzs#&#a I>59CbCT!{ny{C+75P/Y}w gt}e߻U"̅HO+U|@Dhm"1Z3t{OD68Q[QؕZ(FRrSGr`Dm0Ts)8 X%2 &[i@L%ECXe'Pƌ˦3Tӈ2q2ho n+r)AƵ'C=X$Zmr71(ޡaSed}މz񬤑EpeTPwn:#ԆVx7q@FUyWE?ZL`xNv/PbF4mFCG4S17ri.Ze]Y> f*GlE!I-^D@2.ӛVu L.Οr(Q+d('D C@x;ta#Wӝ 3aXSR-jZo. \gm ?rr+nw'Bvԉ.JXTa8YBx=6z'Z|g y;{3[]XZr/9[J3[SB\v.{F!aaF.&cM$- Շ_ qF>Z%ϧ@L{vIi ,e?plsT ӚCC-ofzz @X/v6/{oRd{h~\ Hцb47n" !(GJ>vI41Q%aW2eH9+9AAٟGd-4S{Ж̼L@F{;8ctAY od2q/y~7>_?"s= s ShA. pP}ӭTI6s>b۽_+4^sϛ^:7Fq,η&3%,Ġ.-tw{~%o9:?2V]W`JDK1:kN)\aӑZv{Z @4Ӆ;&zu. a-bMⱭrRȀ|ӟcy ]s`ɯJL̥4=BL?jt  ʼ3&{[2X%Gn"KV ,_.Љ!{BaA~ڎk )V^Za8ap!J=e1"vӡ})rP>^r&2bAWz,K\yJw1_eI̮}-=" p9=U8W9dYHFlU~Lk]7DrS=4`T %!խzCs<Vq B@pE(ͧ hehsSPu&svE@!1é͉V2NgQH;$4m,ѩc1k<mF׼0Ҋ{妐G(B@["8i|70 "+x8dz*isP<5Cb:5x8pI [T#3vɲ*I}eIb2 :#PS1[L^ 6Ơ%~{EAN{2JFhh%9}kp)/kϢ?ӵ;M4`J+a\hK02{9}@QqRC[ۇnd>4˶@4Nw돈Q%ç$m堠cC)3#FjcZ4mq͵jdyE:ABᰉ%x{847Vy[-3.ubdB⭱Mhxyt)`Z:)Or1jzIs^ OTa`P㱜R,ޕpx#{5窗nY[sIn$Iz_r`)0 l̮H޼7yBO,$K40c~.t12*.Cdhmkeݖvz0EDMAN`$|8QQ+w 5p,ӆ@x"MN|J¯,w/[QDS|[y$\>/.Ja1űw_B I@S83QIW7" Ixu1ۂ't4@ ZSeؖh]=sh.!Y5)į4;AӋ BC-q5[[L~h1RNC47[u欺4@N%K\*%Qe7ZC2Cr$g$75_`}'N2.Uj-gJ@-H˓IjܹSEv^F0QC[\@PnC( 0_&m#酥C3m;yR3Uɤ< {|7Qr ^l1 ,@3sТúIߤ{wD))[)juWhC5;U s &>fzpbeXW,3I<s]pu׃|m N+ZՃņ)1扗l#!FfYm-<埪Izef[y2ebːB]D:hiۊs^v 5l8 jޑ%ɜMq]|&;z0Ec1 olcѨdΞo{H ԃ_PN+;w]/S=E/{RfapZu@~s)Sq6tkm,0*N*}[z㮯F T龚 b>r2p(V5OAo\njlZ"{.-sč+a&yMF@eՏ (nXX?+(։p8pf_26bPH =/MԋG_u߹J%u6% (u}!T}B5`8Ӝ=,r /`^p.:}rDn.+kM8WNF )0GV3c4|C~ `^IvT||t6G犓ұEe͸l?RJ-+liLQT靇JңK91=K-|xQ`^7[ n,eי27q] `T˱iZs%|WI)haIn.*6J#} OBA߇ld!Q6:p`HYuvXTM *xB,Lgxy,d}8@gO 3?$Swg{w.ش@v Z$aR tP]GRf`qS/.`O^wfXIe 44\:d~ܚI4/uc:/-JeCUyuք]*`Uya|V[LDJfmen!JT`w'I:PC*1ϓhuęLTn8qʘ'\̾" j&9"()ӵehrcC<ԁ*f!6O`_ ; UW= Љ,vSșmFi4cc"k7uN/P街KWy&w+pvH)m])Pz1h1]XyE,'vFH'.Y}`DKyf3CsNKWY=z_Ř&HAxS:`XMTe./rC'L߁U; oYqqK@~y<2{8b|&7ĮOHG$1ζvXB!vg!}XZ-[Z⨲N78^j6G-VVUߦ,%L Q FECAK2E4`K_EUt9,v:3a9^wBJkH0'O^m,9nϡ F%Ӎ*Mt\ $%T7}-K 5ܻhd0r `@Ý,RwH/W"L?@ ק\TVϯt,bĀg+˪ޛ 6=`N%36z`">2-Q#1 6Ȱ.:(!Ɓ};i ƩP;]"^k}v_+ks3 jW4%w궎:YmL Z@79*"~ӂ@6MZͰ;hbj?C TPHw9J2Nj ^gAOweq>{E4n;Lj2 '&'o1E:&6m}<8hmjFq!%Zr@D铸Ts6 VԹ\AmVq?Z)Z-Ʋ+3@c߾g 2׽o2&}ODIW 2伮e!xb +~\Z~b'!!1Ѧ=!aXFxVeĩ{Yi~nЦ!S,Ӵbq 7kUqvC!p)u_Mw.˳[?&o?@:f=e$]}\3hJ*YY%%Jg^8>zM|S7{U*9U,P0ogga']Չ!fR om6 /T@)/t A9}^ !>4FԤ 겋&Rʶd'#h9z "k0l%aFH&"qY{Q[Cowv#L%RM7.eY|`4LF"x|$M#kM*ƈ[a~3ɉ5MsOH@ dO, Ӣ8^j}7MQW5!EԘ؍2h)uYSȇ La @h߅-y15J`u=gT?Ϭ91x1^_F,_QU-HB9-/ϯ4#pW:/_L^>,BL҇ UʨJ_*lɡ^~VMR6Í\g=2< -uT !ߴzq}D{{ JlMGB1}FH8A ;*u-,|B a:+W'| lk޺KV}ƚ6oFn.exf vb^FDkIC(5HJ08tZ yVE@ 2}<#=ojmK0?W_>eg2T^!wu⚎OI9;G(]ӊW5"W?)fm/d9[h"<Յ4y&<ιi?!K)IGccXaW/G5i-Scm;QD฼%erJP8#o9/4`{3|F31$?&,I}EŤ R)FROH 8 _.9"״kCkfyYh!OlE3xQ7r)1]bߥTç->Xz~L@6趛xR Y?DaKV) LF5E ?0On=y5 U0g>.Km用GSP@-Owm??\IxUD#Pv[֭'(dF2m _i[߽̃uLMDb X\`K}(9, $uMkٌB50F$.㸼 7`3x'ٚ-6>vDr >X>%yr&zmJ~D8]k܁߃c$bg5F:.gϚ˨V/Ԍq{#|qV-|Ҝ(L%T~\kfF+90M_p˴\RM,`=@y%Cqqlv6.yY.m!ڹφ ܙ%>q[ /-XǨpd Y_DR eW-UjZ`_y]gěIO Gky&b /I'oߛ{"%0R;4+SY8u{'Е]%\P:iR6< j"GC&JhhO%;iQO:h2IjzN;Ii5D/ DO;n\d&>mٹ@iaóW f OjVvMa},@Ɲ{ 7r 6١q\F&qnF ؾAI|Y݄i*hk  RDΧ y^h M#iYjƜ^ٺn͛)ZúoA Lɍrܗ.議"s6$;2 i:#Z9vyw5wbT:`vnƚυ[ޅِaZז)xKs @DZFv~㰯БM-HA9ݢV3)[ nefbʖ% ? )-3ژy&&y9A]h@Z`9H Pb]QOaYۗztv{Z]?ѭygI0JlqIH2/C .#1 (+Z9whpIO|O9NMY!8$:D!v]IOP)\ :BA-ٓ%C)pjsP $ ]v^r2lUj!ڔ(ȣwG{:&Yۙ3/_qg49~AGQ`y0331]BhK2j%Dd~..薶RcRyXJ8wX#9dxJ +$[*$fTiD/VLԝ}%JAɾ[ēImB֔ݝjTn {솷XL,X-̼VgđGiL* Ԫ#RV͑M9僎 ;Ʌb*̀>NѠ =7 GQ:Y?Mח{oʛpuѭ-3b٨եҒa7%ڧ~d:E' F8,7P|w{~t4kãz>;Q(}7ce-?>xysuDa3.’\}/T0aD3O,ʑ>pm_vwM髰0a'n+V-a*CbY`|FԓL?B⢾:]svWw(M&s6P& >drf {WpB fꙛ\f\p.{TH8֞QPHNFژ_hQXʁ ]m0+IJVU弞 M{C"?) !!e r-AYUt6?o0_ p~Iahq6#@ߤ׍ {-s:Q^~/ |JWNjh,E0+ypU"gQ8-ܷݗ(C G<9+YN&L(a\v025rm)#0rVkDoQTqFmN~k/ذ0Yȭ4VѮTÄhFul~ 9$Ñ򡘔7ai]QE 3"'\+Q:J4A#ho.yGTڗIyu |Rv;ն1qBi3_Vj=)6!8v[o$ef^LvZY;@˵0oV,cM`z %P>)HY+l;nFłBGd,_,|d#[pi܀o4鿯˵VGR^*~v$#E&x`qHCP&t.WVheYŮ/o0u t[ D~n &c*EMp; )ZP%'LYUra 6ҿjyD8wx G_}ޓD&[*\jGw{즙9/= q7h+ßES'̵ܶ@HmZatiMeAB-z7YB}"Ej"Pd rww͠NҖ&(>ºyҞ 61e*1]KBMBy[P| 7TeK:;Q g䈖P.x!?Y'O-EoCƜ47FqnBGh?% ͜ HI xޥp&h "bIqpJP`1 [FGɵ?h𭉺гnH[&nP*K#ޛ=}ŨԳGח̟cvF]1" D h >#u:EFN}C)Gy$W@뼶Lᮏ٢LPp.\8w4$ 1<,iˤ YX=}0o Aht3Z,a ãs3V/6?D=]F-,k1"z^N ]vG(NEl4I9RŇ~%?Pъ]RMۇ}:/6\]e*r89bt ]`Y _Tڥ 6fLt>a!(KP쇇1W>,zjP_Lg.4aR!n~8sC m9J4?4-u{Z*:h4o 3 h 3«=rWz[z[O0 DBJ A ^tYRwOn0'vYPJ®> 2ĴV=PZFtת//] E?(UC]dJsWi%JOb@=m$*UB@GriqCgƲ5xxa2WŗuDStY%L+ 3]RLX,ADM2ZA5[m<.f`:,erp~HC`R3rI.GW)$[3%BukC8e*ېLiFqb i<"kAv , YK(߫+bbc4P͵=24~`\DҮQxŊ?grNb)Kȼ@;[[tv3/ 4[B )ƪ ,K Dv uN%a{~`.9-JT =ʳ , cfmfu@w H! s;ݍJ%C+b1x%jAz*u`}t`ˍ1B 4T&MۖbЄ@2` Ր0I O|ɩw nZ/1yqkFcy;BGK*Y`HE`\iLzܞ&4O#D9bIe%-F+qw4%t!0:/z.HpOɅc"πvǺ0Y4`! $DC{C>|u@b^P/L.td{MF]"܀puCۨp;K|AmRe}ʆwJI4{a$~WxbTz 䔀yRA7Vjj6UbMgf1/LƱΥlq/gTMcgtW7 ׇj-.;H.V TC|7AFa^Cyc*ǥ ,RF+ٲT֤'7e(QAS];ߩPI5U|ő.5UE1"$b",g{n"{)1k9v*]T|nuX6yъT9(8ŨSJ3gidBcc_`h{IҌU ;%*5FDM YMk#Sf˷1l^}Imw)z$/^c|#DB`O,+ng'lʩTQ[D7S(2y5}ʡ抶o\eAȶQ4&~8ǬZG@Bd?pf!}MUs5fz0Q7o+U·\arh eyd=;Yٰ ɛ}AӰ*=m;a iWNV/PS >@˾` b}AXi?Nu{+qO,AS|z؞_OBQzZ# xZvwcwoPf, #f^ZpJBⓨwaG #s!16ڥ[Hâ4Чh .>7\SgL{BjޠTX^\jH(i Ț1} c!`rd[H1"$%RbD1` xk`axPt7Di@x܃x6 9ީjJ1h3^EnS )by#(J 턻pta<~ 2YI{5XYWUQ]HI$~ [V0 =:`cbEmBE#WWOx-~HI[P$SXUX>jYu1`w6udPql3/*!,vc [VA BTHrҺd}@ؐd4]"H2 s*?&|=Nd0˧D {k9}_tQ+98L~w&w,tnFJÊrжGhmwO2G;CE"eXٖv8@ ֢e/5{dpqIQ,.%S.|)EwWکߛrbɾ8 -G07lW/[+1TiicG`6|1.r' |EވQǑTz@gVzN6T6ԡ֥_{PERa GlT K*wñZRvERX5&ԧF#|< Fᬝf1#K/C/iG킥@ Nز 4_r9Q:'dbkS֭$Vfބ|G9"(U`ձ`^{Qtwv2)2bAu3𫟢:BoܜT=GĊ? c145#?q4%̱R@G4xTWŢf(;4?Qj`M4ht.0ε?x+p&z_sW㊼% 2C<ьr9 ށ'i 3\AV_e6bG\@rFF)u2Ɗ*PѴVLET)&4F6bۨ5+f 0fNwW=˨;[Y8NqtwЧv-Fȓyqp?^fWw{@#Y\TDYBj{?|qódU,#6Z>~+i`0+(Y.by6l%Ϳ֊M5Sw8 J(Mn/zYM%o,ˋ$- <+;M±c/~ 3[ h4!+K5'U>sҥqƗFdg=\[<[+eJG*n 1k dzSmtThs|2}QU]nfVPwaIC\Na? jl18rGlє kSuD.k& q `LJH_*x=&KEWX^vxLIG}-7zWz`\=Tj#o@]4Re\q =UP\/VoIgMSM8A#N:mѴ8Ve>ASy\~u l 6y |ȜXlzaDmVT/ 萦)a}S (qn/*Zr\Fcl}㊾8ZM^RwRavfK/Mn6aZ1IXʵrH4( YaRԉ.wx[Rw%Bz -Т^]HUAx_*p6pIg`?! :q>i r}NȩvRf?YXced÷.}h96z-gw(dnqX/(4CP1"o*71BIX㸶G}mԋE2r@ҩ8N;ntC!<4c ѕ"7D(t2:*NDm#G*F=޺8 ispOk`3b'Y]|;G}q}P4$vlȵ&Gd]g6l n7AGrf-Rup}e(a^/grsilMȜA&$#8o J~!^Uҫ/10lkX_}5ᣓ" X f#7 ҽC(7ANO1[9f%ׄ^ q7H|! \k!رdݲZ(GڙU_C<]8On*[w 7xw$_+BKAn3MPH& 6TtQ@@r䡠).{$i"]pP]ԁK"ͫ Ѣd2YiKB./sĮK-tKQ`eOc"ӛw>6.82 X0K#FWNH4{^9t [nvE;5+QY mr3!a_ŭN_AHӤkjs h@X`:Cw.;zNhJP; UYg׀SI SߗǢoƨ^= %;f ՚1>#eRo !l,yPm4L‘mI̟R?髈kB֝IQ'1:)>mW:1b#'"K^0x/{eSP>Twdޒ8msz~U3twqKN` &D3wxw:_*Ƨ(_QQ'|v N[Unc }CT,ʮK!96;1+B.NJo=6,4hrZfyL:8tF7&bUSzKi"0ݛjԚObV}Lf`j!+QmKp 3I(\E5,1P['wRv|>)rgDK]FY7~m{,D"Y:;L69Jg ZMiScYRSWR'q$o} B5o\}VJAqRܽ0V[v"2`r. >o)tA?213^bnVX&lb.'/}__qJްqh/d65?=9 d.;C; ,N}2B" FZb5V;yTEYZ摸vľWKaQX6k>u9̢Wt$ ײx75g2$FNKڈi'UA4$K+p3|CED]a۽ {IW 9lã80(~jkLN'k5Kҫ_ mw u}d 8FY"SĘ}/W.l:Bf 9 ih+r+"Im հ[K&-T];b8'S٭lp(2CN28 .+|i(OBr7heQ{K c4Yz-MbL^&LUחIv?5DUn߳]b_I=6w\g#h58$ *=xʲaH?9(p{!އgtpO `=N:Y}IDG$t=)c*/YvQ؎iI z3 q> w .8]vdrmTC߀x3P3X[rt&3R. #/&t0]w`*W{ Kq7f},t5J"'2[`U1n*mjb/rвGxTK '[i<M{ۥkW?"RdRNC:Fb}9REp83FFу慧Dyvgw*%㮴< (y2Y\%%sG3Ca&xmWU"I"ijQ&;S@%%c-Xx9yBkC9C瞁e?2OZ 1ǘiBE>y\/AqcUe6] (ҋ9ۑCɣں)N92 XuKOY#Tn(k;l %: ^i4p_boCY&l Dr!}s n/IIMf'A[Pâ?OcoP {.=LӮxF&\=Xe!fܨiM &M;nBPC6Yڶ;(!, sfFgO$Ű`f sEpVl}~)2wƲ݌,ZskE #ɹܘy d2?KB"B OEkxiJhslk57! R"%9 0IZ})Ń@g7ֹ%flEva Ϣt}3&?Ejww)3t=aB-q@@}Uc@4ʒ,jhtvRۃmf9M8~5:X٭+Y4HrmXIQ >A%duCɣT&k`0!.5i;2ܬvbg Z0 Eߋu":MLZqC?&0Ja >}wl?,%%Ƽ88lBgjR0tc2Ґ E:R 2~r:!SFad{==~M^.I1zlte׳Nĉ~)Yi?s4G8?~2U/uka. )'A-H,# )zO6{Q"q,RZ[CGͩmB1#D/mz^27 f[^ufI< QB7 Z~3mHو~KZy٤G:(k%&N[0i_ZӠw} jCog i@Nse48:Z5ɦҽ@nFVi]{ ֋˛š _Ljd9-"3ϫ`mӜ\tu=xT$5Y̵VzذS7Ӽ8PfIԡ%pe/t@Uėh?{ IMЊmm[G >לa؀NQLpM"z 1ݏl!k.i062rifjQeC!}RԽQ5f-բ fTNլ0ӝļ.JW=kWCj^>,zcO9oy5D6R^WR*rPe]/䳽e3*:ϐjz(\rfdJN,e\OzL`]"@IzFt)uVޝd;#)y j(់?QVULD7 B;@ӆc;[I*dN@oQ+j-/!3;aƠUs=en'\ 0>]e(lHtbE=+.ISR (ZQ,}[F#ŏhGS;h}6m7Qc$:eQ>czn|$ ؟USUV^t[$D嵲f>5q{qGSaJ8A=@A-hW{4gi)q'7~k0#F~v)l}iّH~y_t+h)6`#BwmYϞ!F⦠TqT}HsfqybaqwY)2J_>PW0;zI')~.7}4sև9g27',u j 8Ze-1qU<2lbG j=vPR̚M/pP=e0s9ik.j`Idr ufe`#KURV+O]bI +D6 ] өD| y{::- sm`bG_ٰ0T39DA ])y%/~c6#2d$t]1 ']Y3U} k#<$⹴_+}a֝Hr0 LmuQ( xH6p#K 2] O9"QӽL`FV9sVSyY4J+.} 5 Vbԣ jPȻ%WFtP'̺>W׌0=!@!ā;o)Y-MԑGP G^U M31]*ݪKkpFfԹyWȎ4mR?{b?K'H =Cr\8ͻKc[:CtTHMԦƓ6b/avW$9 J~ pN: Y 3b^cL $~ͮlͶjb .?*7gm \S/FċV={Cs)62 ݁ Eu&lg 5Y_nP)n1ć։"̶r]Ǹ_im8>2ᒟ<~{IP1&GU',@:RMyA"P v ڝ . $㺲S]BLN3M<]ԉT|W iWOȕ2_3;y JÏ9Bߧ}"{BOf3-} ? װJ@[(PqvWtIKiNvcE̕so?',4Dv|ܢ@s[´yteG(xT{feq'DŽه` f!HŴ/{Νw!R1R~?rb;fY/ïuS?^@{7NcvxmVKOINcu5iYw>>zϢ>N@ƻ{x,oTb1sP Gx 6 *ʨ=~*=Z!v^QyM S}HrDf^o$}^ƥ(4!e2/?*.O?ЯO^SKd% |&M*&K(L7ZWM @Oe{ګ^XG<*(&M1̾h1wɰaKgwMv2 ۔'I\7{C +̻t}տF;H]>(޶VD: "X 4*W, oȑKg I}x3 aBj WVvBJ>MvZ1Ig/sQNL +^6ή]ˤΰ @zJq*vlv璗R>5$J0OH̄+;3``e`D:`[zש@(pMt Ȣo>k+O tT$&-{:sI3>Q#ŠC6M5a`j֎"x̰uY %&Z:fpF,X?Fzds=ɲ@.d.hnD\o(3F]7ȲjN%!G2Ieˬ^x3.JJOJa]Hob DʞG*~oesoH=d }$mvx$A\ :mPŀǭ?il*$Hʳ҇9LvnwqDZibI1Drp=ŀBx>_6:RvA&(׳aj9x>\[RHJ|ZCK*WAz'C#Cm O$ZGw%RK`;dF]ǞOjP5F8%}y#T:}=ep[x07 yZɜJɣD;Vx0Gۻ܅xi3GGIʟR^<`^V-7wG5gmtt_{2buwգ~; +;>APo( <`dz| KmJ9.b 2ܾ7dMEQӔZ(ZD,8/=l5Râ,Vږzl{څ3" q^FlڰrA+(.by1;$ PG@AZBu)7ƞ2~?o{tWRs%>{#Tbg,k`'Fv#i辷,:ua`P|`ԫԙM/H$TKőR B,f|Y} ϛ1Q7T6Jx!VZv1DʆA^OG D7G< p[l$cIMʳF޼#gX|n ϫxr[+DMtR,Q򐙂.s?9؀pW&k&$`G҆L7Ū(:"K{e7:tlrbJP[^}7іUА4& ԧ/WJ,͙ b,#CSw9!N-ͧwUj $T:mh SqE__B vuT4Co8~5JX"nwr5"v\nj_s(~R!I6~DwᢵHLS3TKۧYժvFA$%5ʶـFmzl`h[eT[X8Uv b4F7s7WE_T aڞQE)d:d4׌py !m#p  @}| e~@{J[؈@0}h<~VHY\,4 ,:+ ܄bPQ34֊/7^-f xz/j* J( m'"o #W=ēXx#Jr0 (KF?z.Snx_ hؗoGp,ʆc*4L0:>vFs $+VWMQ@M/b WO":P6` mܑ4o8wD.,0bar=ܛKg9?^^ptilerQ׮@L.:X`߹6ovcNkl: <ÐByy('Z֐ʼnY[y3(tP6Kg ͳʭfV_SČ ASC&J+IBM;xa:3`eU0XA@wL Uѝs'45N)sv!r9kq /Fg{WWf%wnh|| ܎ X$dN5t waHZBWTod;>2צc({4@/yStf}'56򧯾5&w)A_sO %('/Q0}q_1)r2?سZA"E+X'fR7CG aK. ^ǔE\,:a%;x̙2KFЯ'+Mo*po톱V=̱&%bd>~Lft3̒!ϊ]VUR͛&P!;wdo_!l{r4XOM$,fp[[c&B>sT(T?]gstI< @e9RT%a"6(E@<{C!+Xq}m2cbafr1!Klx'-9Wz@+g] KZݡJ,:l&(ow쏒iWݳ3~!fs5Y[?T4r.mLTȋ5aݘ7;N0\-HrtÄQ$3+^{/v(L SMt)f7JPԚKG@^9f}%WePm y L<3wTI=WN?K+:*Ѫ tN#Rᤜ#bV(k`O,\[_T)w_`̉0scomOL7jΧjai|E'`YZE,5n@ zSaQrS OJکY߰ O44P8,/)[J-|SY2ػ\bh!"~sU/{q^ z_ ~tޥ&.+X|>*bm$lZ+L6Ȧ Ѹ"ܤ1Z\2:_ƞ=\i) Zy1Ut%. _uI̕$Gs'ЫbimK:h#y!~\|u\w©k4/k 3Ef|O_I $GJΊ,* V A_` b ^9/t#4I&,~<^[W$cOq ) b]hBJ3ı[Uȭ1Ãbs|X MP }##L_٬RX.09QS$Zop*;-1@I$RC)6\I_{m>ZDF%&>A, qJx]5_1Y MU{!9}pPֱmD(KB?2lZgm9B3 4.h6[c9v;wNdOeRSκ)T/8}Wː:k5cXe2 dAI@D3)fE2H^P3E"GR~36-K$s|xX8lmZXNq@bw_.G$'^M yʫL#)Gb g[:T4P*ɶ$+eZſ`#̕Ǡȷ$  UTҶlm|8 igCEXGX v5)g]SaY-Ah83K}PrM睃šEΔ  ^fR}g8B]Gh:PM=X(=+or'd[J>ǞxpYЙre60 f"Hpp|,GO(*cʰ@uaL9Aݭ6lXl_jxLs+4^XL*Lވ%ia dw 8z# ZD:RD aZ~](-,=qiS#!cM[~"6یJ./K_e2`мRI&&kVrl+uV Ba!uE4y\ZҩZ倵eG$XtH$17^F`S}} }6„ rJbTvgmDOmC?J]w_ͧuB!he `QĬwxkH#;gp1fn]~+@k)NP Q֯2 F_Ucc jphe|!l ,zoeȦJDN؟j C3.*CLHdz{!_yJrKyAGVBš 44v.%ɀJ+$Dr٧uoI-={Io>0LYJ}`VH< wgQ9)HǸ_GfXy33u!ӗ]: N}EUݳ5c=Yi^1n1a6'o\8Yh{}Jb[;m@:k B+ւC`.0 $'cYxq}1+s\5MA0cANґ[yzlV((ӈ>TdV yŋ& )NQA=^AZ3JVVJ_?^&tnM"BM/*l捼ꪞmoO9^T3NJt=c1Ӱlszoy[+KL[c9t$-#*j}Ԓ< IĹji.vWjeIZS#J=`urw>GT(CT+QѬV;{č{Y-0P]$IN^=ս}}gFVJy/m˶.j2.^؉-) j~%u1J=j3Fo+Tv~ ImݔjZNN`f{B$f>вp(;.~#뚻(mY:Xq@' {r^nW+DۃR~FEY]T%]ӣjB}DU.YQMzFcg9[1fb+ωШ[k0ŅWL'q-D_ވ3;V/^9 ukgt+ڃ0wЋ6!0랖eLٲSie$WDtG `ρ wz Вy?S=Y8 hgT.~5B>"Ϻ\9y̼!UsK?ut L >o3 t&L@ށRw\ժ媦X7@]Rt:Ǝ:,)}Tft ; ԊLk֐w*>U_wӲR_%XŃPP36s)`nnJ>B5ۈXUg/dߚl{nOVŸ٧r :Vk:/\'/oʩL!DcϪvٓ->cb31$փ#vkA,l);G@^t ,J"9Đf 6{z4e=%/8PUm_BqDz9U\]Dar@!(2_PV\1'$;Sm=D>hgߗ[ZmE M]sy-kW4緢qzvKh-*Y* lVY]ӉÄCh7ˊ>\sUS8)Q_\Ss] ^2h"o2{ݚY$U7^Xޯ)'L4Re2 w({lu͆m!?f27<>,i(3z٦:L,(A\|S#*DkT'`'zX$K.ęWl)(?AxdG辩pbɻn<6i=%HI YF^컔 >࡚7A¯ڀ⊍5s`{&bf@vF+TT]pՓdz[J I0 0[h&dG!LL?Vi9l]4^BAGڥLuN[I㖨CxNp٦y N8öׇ{-_|{dky'oH'%R*0tPaxW>gKHW"ng_z1~]3)nU;`͝wր䇏j|d c/ L%{oROI[!PG_nT{'7sSVRGY]iArf ٝN[L W['O>wcK@6Ĭy︅pLaypn˨2~T qS]~,&&J?z`Rl; w9rRs@ǵ@~eʊP_o{3HUoC֝IّQvE.0> *']s0%KLT4CI0&4W@,6H"+gr/QϽިRc7Ct\}iɿ%W_3?O/$4&"<ǝ0'.o ,Y{ Nu"Z>&Ui5`y"fiZ wCvm_uh.MǪEa1 S¥ c2oiQ1Mä5˚m# z*B'-Θrň5-B*@LnHңvU/9,`D#xw| @L K =q/82aёhWP R}T=: *ŬUCR͉wҶ:a؋Q~cVS Es-# lF.Egf7zLSԓ @_W"JGEk{0-,1嫸zs;X6WqKwy/ņ "3D" 5zM6\7/Ѣ%u k8^`F7P+78ZTSytp忿sv]( {:hJOv~. jnpڃhF㓃QOԌn}NWeĔ$ghY1bR|@v(b *~*ʩA Y$(+d*>GȜGflgY!Ƅ rzd -XBN"DIU5~-F]QgdK9o8"z6]€C"` 1K a~1,2xRKŪ:xtFU2 F1˃n{-d tTX+6WJjv f)A(J Ŝt>PW 8c?|`<}'1eTӸ{g_"o5ETvBc暄8䷔ Ѓ<3ki{q)Ua.Z zq˟NoN{ G0z|z[0o3ў(i ; (<}1[uah&Be)s,_UY(d'uz9矢eaPݎPm#ÿ=Qw 9膴NTd$JpG,5~׶c$ƜC.[-:*U0[@LM߭ucбx7m5hҽ14)I__C_MzѰh74Iʖ5a8kP%K9U ħg!H[~G: _X7*O.\8/LQ~\f! @cG7Ctg,517g֔~<AGbU.7=~Пi`5B3R69fp2MOdY8sT4;x6?p#>5\T [PuIֱSƀ3}/˶MO'@c1tf# U4gg.sprua-nm Z`G] Īף͋^7oظ=ҷ 󛦕vc.c܀6-ݵ*<gEa |~nU@rlc3ay0Ȅ/`7_hK1̊9)k>WQZ"fOKJƎǧ+Ey`f#JM'c3\u+| QWLop󐝎mQq038pe&+,9@V)^q%bn԰#jѿ;4䆝j썈';/Tkcҷ+Yyxe5I#2"nKVz^I8@ R|/7Rvo~XF X"!V~z<p~j*YҒ(@<9DVFO &7UygH- m`KG|f\ 6XĻ|bXb =] <( ՗g]\O&<-9d MoQ:0jU ͌񛾯9'a?y>mqfH:?B`(zm8Q dgH `aS,gIdW3`-,bz0Yku{A,ŅXC"H6&D1# $'` H:z*`#1ŗ/d"iR4ۘc= wK(DmB NcS.{txWg1sΧ ɭ Rgctɭt -"%bpݠKkܪec][sCP~ D9M3:A 7Hu)"[;Fv:lNگATq!( Ton -}y~i)W)'W) FpBB6I]F' 0"󭥄V)& ɟM dyKԁ'Tlf8"H̯!p >SP]'~C,{Ղmw8=*qM-E ˟ [tr%*^\Q` a/aB̞:5qC˶ՎYG'~ӯ?QA1ϥ>7}g/3[De'fJآayN<^Yi;$\ݗFfT91f3!Y )k Jn`r E{ن!&ρhJ7~d8K3xTףgG?QV1 4h<̑Ӡƾ2/꟣YhYZU>swl,י\n:nACTQ&w%bK Eզ @\bF&*}w v$'PU0UܛdR3  Z.&%)] ^-5`=' zy8H< q{F\g!nJWȪe-B$8VD70~ԵclNefY!.CFrF&/q{*rf8Nzt{E'vop~H]urm]g8ՖQla6R.8I j33\?W_2W$LPg=z.~J] Af }!BDǁ%Odq釧 N{$W۵esz, >q ;\ҾY$'۵*{}#ՏMB/:r<֦C2 o{] 7edNPW >`fI`n6{(͒nђ"AIlB*Xrʂd i~4 ߯7 KLvivƸ^ J; G8ޥ]ZOv'%6&Z$2c %G6 ^6ܰE%o 'rѦ 3엏mca!܉d[OÐ#0fD()g|0 =9oZ=Wm*% --P1"yh&\EEY/' E@TwJ2\LcK1>J`횄`ohζ)|{iOw!^\&4`̡8tSH&Lu; {2 NFЊ_Ķ8q˦M 6)kxDe}La?޼JרoJ_m,r@eY@w]4zs(;]H=6 YzQ 3d !C͵u`SP`DUk.I-ܚ*qs$$]S KatO`Lvc₀ahM((ewcݘXbT2xM>䱟yj}rǶk9 V b t d<GrucΥ驉q2"m%!rm84JsÍac#9J'kF[:}Q]PqSqX؃mppBԀJh]m6tC:tY%/poaUB/^qO'o<ضY@MKN7OxU@z(h59خ'bR,7Y 2萬$uH4aZ h9y}3oF)#M2p06UzV%r0g[TF L1` q(d0%#gQ/r`a@陠q}k뗙8uV}j>|;c2&=LiJh:/BλG?V!^ {ZVm[366vV2c}*H'aӠX(0հBzďYh,;b|} PU&|fں%FS-.^i&#]TkVBp3~xeMmN2mɬa2@-*j<ܾZڛ¨C7h;QiivպbCgSp+a; YD7-efJ<6s_]Ӑ.Õ6>5unTK׃U@ƠFN`C0[s87Gκ 0B, 32'P-igS`:G\k<)6g/խ׿&de>Qe(q٬II߰?.vqˁ=^YU|mt}2bήjn 5YJo؝쨫wx~TՉ@OT1Blbb. )X4^6O`.}frON;xlU>)Vu¨S("  BHjգZ!3+Ոm= yV%@We]5(j#.hon)Z-nJIų|e3m0p!aҙ*o<(^'|?<%=m*J)%\%\@Ft1ض1yM؀ #(v5B TA?Q՟ `$oJҚ rJPO@[U&+>q8[X}@3 K9TkWfDv5qɬBwVl¡ -w@-<7ch&^ ?fGBC'6,Ly$43"/XKN.y8Q~ҌpޝG+b_G&Yty')a$AB)CW47~Гy8hK 3&?^b%SZb1)tt״>|HN|}kg_{f $ݒ(7Ȉe{%7'x#wS"hqMC&$k2˨;0Ǐ)SsɉYǠCPg{ 8p1$79AzFlc_- H?[X/͍ӧ6R央6T+(3L61\Uxρz}\-?d^a8%gB0C+rNɩ1M戟b}BE(nIq&R)+ ?6Lr>h&gMAqXSzNs HrJ!sC=E\o~ģrx>L 5GcC*Z~Ұg#l; a(}B{}؏Um3ިV w}?6=x5q# yqh(7VD/#:! ´RRG"lc1_ .˓5#8!R_}V)_O$o~ٷ)  +j.jy꘿OR̥BpPnh1|9bks"ee_TR}Y6Q4-T9υoS= !VzdT2dѵ8vd^Aǎ\rђ,$24Z 4^vo`jNKW8^p fpcDQ1>*_MMԼꉑk+j;˔&I~K"UJ-k;)\*]Nq>Ԫp?K2(Q)*0]_gV1-<%bg&g#ʩk!s0Opg@Q$T OvҸ WT C<~>t>ܡljYLHPpz F$z}dF:8p =~F#̍$$6,5^G 0-}YCZ@hđ s NvW6Q̹3r_kn ^*J~?oHϲ2͆_'tY^vPa0 5gC s#4JA kçqx깐}iUiϕrD^Kq %hdD".vyʏ7qӦIR&8ܝzRZnV,'NIwھB2nQ=i%x':A1Gc>%IjP)+VѨDDO0oU$Tk۱OsWPk]kA$X=29]nuQ̢ʬU#״[JqiyY2 '`I+Y䏡*NםN6 5NenԌ4cz=eՌYm1'߇%KUyHD}Tf,]Z8Ն y#{;\B5m Sokc ۭ~5|헒WL$wc|';p#o/e+Bd{ dx ̞;Slx-!OrB^5<rM'ҵ:Wn{:J/7BlÂbMfI. *e|эqCh%xu?,Zg}*`w$$+sIDtFfWc!l&FFrG5mDEQyqS9#R^"L%>e+O_bQҘdu?!PK?LMMZKxX51Zlg"=v*qww׊%0{Wj{91f'UtrP9S'4KCï̓]='y.B9ՙ#yի; Y% jɮŨA&Erro鿺]S?\܋kMf۩0<5[`j'vbFb/>}^02Yduطȩir )Y(挤{)Ѽ%6!up1*-U*Joam)qnIve!쇃ጞ8o.8\d< @>8wlԇi hK=̀<ܘ@?ߤfҮƳe'cY̿o&$5#?4R'ui"e0[òʶTavJ@T; o(0YldU8Rj윙-İ'A\gHh "j%Ty-R=3=C¾I lP{vh1&Zy  g4;R>虜j5+/΄XzO'=ğwA#Ғڌʋ!wjfa gqdx*~' }aB0[G:и ?IGAn7YM]!BG˞ A*>0 YZLuminescence/data/BaseDataSet.GrainSizeAttenuation.rda0000644000176200001440000000136114062436223022516 0ustar liggesusersuT HTA}kb"&AfebJLLO,ږ+ֺEQWJJ1UDKjKRBBLJJ**sBνgν"U> BKAnA_+Z VQ![A. livݮNH+TNF&kzZϚ@p"xi,B%0Mi6 LEq\ n f)d9F]X r +?U43^|ɶJ?"c2;i}P#T+?^t@3y鎼Е+smF_o1jh9/痸x=caPEٸ'Px=:3(zz^ނAEAuFp3n2C}DhaRis`Du hΌc ^$[eYǑt%clnȟ0GCKNM$R'fA;V3v L{ ;1짢̽^/f} \OskF qHz]؉?dm8Ga*,y'[Rcv5ۿFҫ?*-bxd' ~V}d?ZiG5)!ZspRΗȅ26*҆^p:ŜlT_+^G|Ssss_hSաLuminescence/data/ExampleData.BINfileData.rda0000644000176200001440000124116513240265317020530 0ustar liggesusersBZh91AY&SY!">8 *@(P@4 P̀i˥mc0Zi`vU͚wjk]nֶ4&tLeSUU" i{2[I٪t4[]w.۶-d ,CBdk4lV&IڥBKrc"ڻkatT +Vmĥ/3Gn[\vݴvt9dݱEc"F]m1JX6ܛgV;k-skT"v.۵m6huшR `ڶ['XPR6RXjܷQ%hѶfD+@[ m5 .nc:̻c&Tzy8]]Ҵґfڭ)U&lRVι*è'khֺt.٭*6ƁSB7ll$m[cL +%[e(Vآj6j5M,Y5R4l5KjSF(]5it&j A&K# @0ATjWfVd fI$R!kYRSmhiJ(U:ݚfcb"հ3* - @J:FU*%B6E(vaUڭd"u{UV4PLB!B!:jġT(PFT)95"G{uKEET :dj@2+Q 2UC0j2#@ЦN@JR""TJ 4D% D@JT T" )@ ( P +઀ PD*J(d(P@(  h#@ i@4ɉS@Lf&L  0UT#@ L4ЍOiFhi *@B E?U?SjCG&@ 40h LLѠa Fhd0TMM i=SOʟ?T=@2x4Iz4d0 0@#&h 4d`4dbM1 &AiM0cHiOcS=I5&ڙO2M mC?T2fSFMG=FdeS O)HdԊ?JR)JR)JR)JR)JR(J_ 4@4f 4 YGi՟m{{s9s9s9s9s9s9s9s9s9s9s9s9s9s9s9s9s9s9s` 4 {{{{{88889ܥt pʻ|U˿"bqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqy99999999999999999(*qo{!PS~$ @ū/aag'+/1nv%"M ԅړow i"@y\eo;BDtcv}Lӵ~q{r9|n> פDȿ* ]̒hfp,MHe9AF3HLܩ5eMZqd rF OhMu*&J)24Z[)j3q .'5:0L.Mwp}gkKXݦu7ӌ Qw.R> AB"9͛7L(pՂ₂(hH$""4RFc8uQ+Hh!&/b!qI!Q)t*a_. uϱE˭Y6 ;\6i@s28m2FA"q$ 'I3$IR) 4L( &Gq]b%u!,ɘ$,H=aGO&'9!ai5RshC'1*XŦjO 9Ba,QJaBv!T3q/ԛ0ɊAev Il|TXYI4~Qܖh=T^@4BʙԘD ,T`~LZ,F%P *S@l&FС iCYⲙ)u)Bq),?d'}c›g%@Y6baXPҗ' JƠ.;@nH+lHII2niK9jgl(X-TMh|#Xɨe "`iZD%\iL[K`Xn{BnOY3\dB ,))'zΥs9Y4ȑ*(Ro\2F+p{)5dqXLZJ](rIBRah_whI\A^ JGSXf 4 DQhpdeg-;^P5"L-?+@ȄP]"r65jχ<1/ݿvwҐ& $M"0Xc(jLsqI*k>QaZP*D ft)#)ؒJh4t<ҳ)ڤoS Y0'N#1Fp2.΀V Ġ QTnA4mHTIMAY Qd{ȶ(ud R꠷Y+9* P9sS B!ov@5馚*X)Gy8f'!ީ*Ȇ~R ~L"Q1ȋbÕ<CGsTCq]jtIJҫZXD:,( <12 846fB`ϯG)bI"T8Cu$Bјʹ]kޠtdԨUcba1Xo&jp&j/ys(WFB,E IK'XeHkzEmzPMcINHPQ`ȡ@9huQT 筴^c*3=X|#5z)WB8VL:X<6YD&F4!eʲ 07XjwZrXhI-2bj-)îH) <\VXLUr>ދMk\WQ1ޙanQ4 Y7olmkݲ{0uLp d$" 5 CB,P;!RgHMbsEʒw5#sCM.2ɤdr0XDeEDGWqr`ZuRVכZHRPʪ(Qy { F.Ƣ-2yXV ĒrTiIaNK@JYQzV2u2RaPMv iB tNRFz7 [>Rc59*8% mj՚kTeIu{,B%AERHM՜C'p0q4a M4ҀFIBS2HyVP(@)8dUtiBh)КF76F{T4ֳ5@$e)D'usM(/ScX/ZB E +`;®ÓA7 B‚bpE9A5cyHd@*pJqhm|<K6!'uY(C9R@i]᱋[3* -'[ NB.աN*7 BhJI42m$ra5(l:AJEp/%v59P42uNStӝnVgaR>(3 =<ڒf95ޜ槼DS|qvp]!.SӮ*m{Sy k)n/ aN9W@-#K :;q 8҂]W=([Lt\ϒX64ʵ O*ẃܸ(N`,uGJQ+XBhdT Ԙ%&C)RdVBY'Ie>UL@s$'lFF^ESɊllץfL* OibA (iAPğ|f:B#B ^,ZU7pRѬyf@ Zjye0!x'JkoeVb̖$ ITuV3V +:YTT`a8eP X߮xatZV7紸k*`(Ԩm%y`##57s<(VNt'J;IL3S߷s&V0Z) xi>iV$z'óO# S^DSK0V@p2C${*NrC)aLPI&YvaIT1ȼtI0 2|&E!&еop+5-&s#Z# lg =UBJq9 jIf"l˳i+Za*R̾7%^V9R:k3`0G59(Ćl3YJbwzot"2pb'H : R.GǝvY3޳B\o+Uia%aa [kޭaYrVAƥ*'DAuVJo7e^:*u&$K4[^,=GߢA(3V$ .Ac,2۱by&zKM&:h"gnvQ_ 0D.مi #M(̠| V#SSrNFioL+4 h B Ƒ tͥԹՈIS,U3 +JІFa$l;2\Cu|uP,bzVU>I⌁4QHxlÂ؍kW@3)WFh;VDtXRFߔ%IL-;sCe=KLA]lqEd5c8UߋRA ckR颍Y}E+<9 c E…4W)iaHX%&7?%zq-!XZXj+ Jj(uP'=SJ֓+ZJo)fLF$€A^7}@Vm\R˝2y.bȁ䦤MWD,c:%Af2׻ mK(P#oZ+bݣ`h+(.EZqMd$N帟|#@u8ǜde7ߡǟ2|zZ _$dNkjȗF@DŊMp0xYIrîMJmaYќr+LqOl*gW\ꮔqszRھ]Ig 7u%oZ&@$(yney IWHPpD FdS\be "[Sd:[& iVaNIC|cj N3[1e;50:!:'eyD*"n@=ZA![4,MnX{ VbQ[W(P HJ1Z:<°E֗ӼH\iQIWfN~e=ZQ$x|L&C)#֧ftR\bT,LAp3̬'px:%EMDgY<=ڊRhT@@MI%RS%dVM1{سZ^|~U|;VBT0@J tz0q$)K<9J{ ]ciKM4z еDFIO)E{Ef)N gc;2S'ІM?kY~Xi$OWomY_zj-LV2Uï2 8ŷlCXrf]Q)P2hȽ53h eG?3y|avDCg72O 'XնkK,/Ӷk)cuAyP`Tz֠c;PuZL!ZqHwqP (4$VOTڽ뵳{dIV]w&YyLN5bk֊Fn?=XVtOlsTZHأ<4ZCwS":T̄9U0" { NVy>|OBqt~:4NE}o7n28]_r$ʯRWco6 5ttEh2ݟ^oʽ Ѻx3&z}Bo캪wҵ9(}$hԧ bCΨU KY*O M9ՙBqf03NQHj 2,Ih:9bh}*mz 7 s )ͭ%&%a@)Jm5LGRfo8Ƞ6 ARKIjI2ѽR,sYP#De\/$ `+f<٥-C JZrNm4qD~Fvb[t:LBN WapT;(tA#y5,N0-l1)2եJڮ@@h8xqѱӘ n#ثEX֏EPʄA@DVCetM4md)[}ը9vvU 07^7 ~P&yHYK, @E-#27U~:$Ӆ{{OԸ}1wnl{/DZ^b6 po;%J`j=Ht)4B.2PZ*_o-\7z:G,\'8d~0>}:×[bmeçuf)0߭'qH,8tuH&IѸ2ɋMEF K|[ I=r'ZQtD~= 7rm3d2a߀VZ&2dU}> o||~l ]H d * )(/I׹ra0'v)"?JfDH < ir WKEWk6 ȴQ!X*sG')7Mm}L7םTl4"rvįkEm)am',Ai +{ʢױ-@E K:HQyIQ0wfNpg fb7cP>YcObK_+kN[Ƀde$!Ģa"C^_PLRx]w>qPPR5/V NA*9,aqo|,zg$)tRPe":͞d}בi#3:ׯ-BuF6P7hT6PpSpQN0ˇ xh^s!rq{SS>l|6g5t4^8$WVcD-b°>&}a]8(Jt[7Q=Z-#S0gRSF˶tV%|JZOk$~(f18pg: 8I_@ vZGt+uOP%TT6_aKhLG@sxxe~۩Cߌ7& |w7?mSߘk"hteRef[YҬF+i#}jT“->xj \+Q!#GW`Ebe\A}MRHF2xi^ihlmdb3}+M= +4euelK^3u\[E6V. }[(m5qoQ\D?$N[گXVb ߂|}Qtʑ {yڛ*kO|_S q=}_u 2^j3f 2S66i6D(*>'6ͬ=" tRK[~?3*CkfM9 pc"VPFʀj?r8bوֹ\/ص+h:iZdTxqaJ#\ Ӵ-h^&+V*ֽb@DUӓVj6Zuf `:kǨ6 J*R3(R\\3S4\Txr1 ݑ!d4n[qM "Yz-+!_]r'L(\?ljS<-NfwY .$8Sn*8ا,z([O U fTgbyЛU1㶌J>>>*j۔R;ݼV ! /ՅF:H^+ABs ;Se:(w9t`'Zh-&cd"GC~,sPyw6Xu`9^bFj%8[@˙|64Mk%D~e`{{Vh՘ 8ݞXsn*8xsw9W͌(j?ZhЀdUL1{6+Cm ibɊ@1s*R*9uj䌖9Z6 UY9M.ߦRbӯBfڇ^bN$Яyo# Qq vEtLih?Tj.U Rs8K7=F` ?=Αg[SPZ8ss%Kk2MPnW*sVDGHp5f|lE}sL[NXR{2*5& \PDǻ3i x=k}'$.ǜ?M'UWtU:d!qfsNe*i+kkm%Z/~7AwRŠ~vx'aiSl~' {Ү+ j&m:=iAi&S# Z Шr UVz"7 lTWdD辥m6qL2c):+Ej [+HH8Hr/PtOJ*qwI[ew*7rRSD8>jE:٪@'hdc0%r2ÍzcHi̿-">iwq12D(GexWfҹȑd(i\,St8"2(-5Z y&xŽ0{e4x0CĦb0vȼ˹HRΛ- :W$K [>:;M@HFYm3*NF6+/F@l}'s* Dh 5 Sc͇?ڷ|%| l{╺ta1RAwqւ^FTeo1'miLM;MŨ!prIRUj:bn'j%a03DZ[^Axf齲[x'{\x(_yJt~E/s$n. $}ClN/i)kONjBS 'yV.52EEBRO!`p4*'-->!qOH2ɫq6v-i?%O{ B"H'zCb-840PXK&--kSxU"?28K<.y|bO5R_aVU@ƴp {;"4cWSҨNe[NC_6iN2+.xkwGJ4FqcOYđU wQRRS۞$q ad}eh'ہ`e9ѝ61ߺ\.1$aQtAd 2 ş{5嶇%2LqjA]ɸJvqhVNU`ȌS.hnA`ijpV٘P$¨Z$REutGmA0㍁Bn.?PdCEaqC3x.4Hi9c(`hΞXGTAع4i:(QX._]]+n&f@alٺroLqIIK+:lFjB3L$+D04R(a3` (tG$xK ?FΜf&Jz`(]cvM>.=v+sNO]Ayg3/iޝ`v4wqnv]ݖSt,Ѯs?!Jv-1 zoc+uthc.C_ eX\ ;?d'4 T\8u̕k{vzi}MFQ(4Aͷcѫ]Xm*V&aسݐ:DSLA;!v!7^o35"C\ fAA+^BC:KVEJ9,8hTa֯& _K<7M$%`**Ğ@eﶯt$^ttP,Ԇ̣!" ߲ gܬʒ .cABȋ@YS26UO?Zl"{+,Pۻ *ib|hG3enP) X_ AKo=XǏr2-y; yolxGé۞5Rco8%k(Y ?kHl ,z0s4ۥdC`V~,y#]\21Kt;7Je33^\6t<=w\\+uO5&  }e%DFlGۦ{Qb̦vBQ'XO$EE1J$AThQ񒈫V-K>۞}zM>X}M+L]9ʭBx]ËM1馚d|eXk""4H՛-`tb6o,,XޠyTw$<z/wVH GzO|;Ug LyՔؠf"5eI\!(AM!Mɼ7S[ 1@J_aQ$xqPC ״_C]WK?uIyW+?P Qqh~Y[lzO2af F0wR %:`{_!HYC$RkWc>iDewV`ǠDZ! + sK{8J_ Um;r]3?uӆfOE"%`dҚqcBb֕{֨߷řaHSHԒ5(I FDW6*YNZO:Mx6YqkVxs6 ŁbGUL*B8lS JʪosMWweX /qGN8+DNޛ-U5dW]:ҙj8&! Kc3rvxsU|{( L3(A2$д̚rɞRR`sdɉ mcD2bWGWz.pRN ( 1jȰZ2Ud m O*wߡػe 'Vd6"@yS+2zӾJ4j ;ScX=hXt$K2xL 2=W^h-I]q ^P/} ٻ4vu^щ1F ٱ5D@4RyYIHGUP}˵,ߜ'Koדٺ(t*)܇- a,4SR领֡ V>>M}^ XZM)ɸ4괇l&FeFT}c:ꥴ'F h>Ex AA2X'ԞR@5 `(ڵ6-EOf5dU̺DVUR,D "0t|+$ D)n+.J3S}猪Ŝv.RZFУGJV:Z,_[PaC91+^C)I!d)4R;N_?5>ipv'`eS.t;.ijoXsNV C@#Cl O{W|rՁ5dKeՌVA7=A{=?^JZֹ1[ǞJ]χvKS Η .7 8ift!2~j-ODҚAaG9؞Ј:lm" %lW6X9J) H#F=R&s`hǴ DŜk}HHw 4ČQC$l3֩B+3E:Cf'慹S/D[&m(tSW8-ԴT)>?B cwk%3 0hY ' {lTQRnvUz'_k/Ռ>7|jTï,+G}Ch r^Ik=DCo#FI3#9pdžH3#-FN7QFs^,RmY%XAon7u"z*ڣJ|d,Nhjp  5a.lVK,L~T10b#A7|ۺ HT!]CNrIoz޻uuXx=o* ,]D\b)Dz' r&o}2kl%!D;R[mFcnO,%J9Sk A81HxRٱm9 m K (kHln3}  /4(Accr0#3>A[@+~fBO##1ِPMeAq-SUdKnD> =@mO?p6@<+ű8\zjXPF! Mp>(U57ygx{gٵjسӋdefklv,ޚ68װe{VƓg塳¯~S)5[gnAˎ ~S+|0>@NV }9dN  {^5[u@Zۊ=s?x8"%%^5ũLU&eKLJt(ЧpĹ_e =-KC4YTTXˆ-7uRĪ=<.N&Z9|5^YNK+oFs43"f9nqOb [ M;iơ4`"嬣HMGMLC{ͥBxGJ0l)A6TmfR3>JvjR=-+uv+\S4,j=+ eV҈ZuЖΑIupd(u$"DAal-zM6%Kޮ hD'fbVkD0%;0Yx̂p@&m־bpá\HOlAE?X(#,CÝbI2 #;j[ l\L :4 *t"Y-bf"B ;Z<i֜~:;).zePRU zFﮆv3骱I0qdeEH=E8ozGDM@T UW2 Wxbl>!= 6&ſ) mc43P:pU;ji;ddg7d}x} YQ\KL)v!zR44sZmT N |fF>d#qx  5%WrɣB?# =ͩ͐'B v>}X(b=L ^5FZxv w Mj!k~yPċu^h$Q`z&f+fMMXDD+ Nw@]e͖즽6V*d iu>>ɜI}ti4Kv+J73:M1 l73nԺ-EƥPeH dP>DBDjn C:Oz>m]^_0?P8{1O*UUws+)йfJuB%k G7!o4lfcQ,NZ`}2DI >_tI *lp* 'Ψ((QD [ ud9&PʔU Ȭ AǙ}L(VmD+Q3 UF@ξnDj_ #N"Jl](iXpA <$F@AK Rvq 5)h={*`n;vDң+^>ҝ֧1؋lrK!W)'OUˉ]-kLbuMg Ca?pTR[xwB:􍲧fkYŎ߁αsEYfdߚ{$997\}p~{4WY5a`XUkptLط4 =qk QiT1cT*H,!L'R|zKtߔKOLOwv=DseX'N0^a#,+,,:<}O';HMD$̵6aA b` ƒs52Y @170˼ie3mFμ={xchW40gw1 6B 4e-▩%$k\,7l ^?# }! 5\ ҍH$B>Euc»yӚ?V-+qZB\ª *:UWz\S%S g:M҈EnpI͕Jb⛂63ϟ3}|,Xdz`'FZ-l Q*V 5B㭹X_oe,!~il)AGE3k[ۼiͣNph-=~GNk\>g?I1eT[ `Zh{XK@e3#RbXأӰ](‹ 1_eGimB&>V Ъ6h[Mq1;M0_? "Lȯf[5l#5Ј&^F!8E W 5c q;fZg2P1W֫' =fd>K3;EMci>U*u,mTij`va*X1_uֵcWёӓϿiQΞRL]R%KtMb\vk 3>ē wZq)};&~bGffZfj=Fynb67&/E ;^;15"*Ts '0Ɣ躡>tD-Qz>DxhȼN}Nzb5g S.H<,›vGvNTax4d3f$%}[;*7@x}[o9Db0pfH'SJ.S䄙ԁn,mڽTk+>֍t `PL4{挊_ǺtSԮ;|3n[MsG羽.c3H6VK}F[-oxI[lbh<.ճ3q-^N7 H4 SpHfE+udzK <['M942žMOήpν| dR7fN$&2Q6GzQoKu\ͪpҘ@*1BJmD*[@_uW:Bf -c0]{R8 TAp3ŜAm"L 8Ink3<-TE*JǚUXXU=7StB(## 2:63byh]sb͞61GDU 0* FU;)qz%D1t풃c+r}#E^L"؎xF{-5. d֗jF0F" 8o<{0!ŦFFNN)|1]E8 y^Ṷô2-dNZ?̃6z9eۡAۦnCaVJ@fp=ujDm5;b~qeG(їۉmd[x7e)WTxݩ 9^0](ߪJ<ʒđ|Ŵ MD?Hb|NnL}g\`D5y\vb\V*%ix VQyI9ٰB`} x&֌2@':l]m!Ej8}֧jZ` C5[ٻ"hҭ'440}4Bw3cРyo,v~ףy:ݬͫ9'qvhxkhhLt(7DQsT-g pӗ0<õs̰^E;VB FϹPCd&lvD x]D/p7VHgB۬g?]qt980m nCD{>=4j./7^2m 4gaDfGb'y$&ӮqJz2rboT\׮e7W'+vʐ_[ut4()l!M[e͆eaWD<q/hy`D0$Rze>WzNWT<?o "xoKUZ 8Wr&e7Q-AH=7k-U!xkLN#``X7OfP 6%߇N2zU%JJ2/IK0}i*)wYZmBCA~Uu˜ٺ?;f4FQz<꺚#~7lBl%|-5P {^ * aSYköR)wPaᢓ6m7c]gTCgw4='qø~m7jc3ՆE7Eo OL_m!dEnN<G'GδP$/)m-f8n(3y@42Cb Ãi&Xn 3C h0P%[X]o #/yؒy~qk|2iwZk6U@d}'\5z:e:,2sFC;‚Sa/~LݠJKǐLmҌ?> ;G}WG;=#PhA}WpDŽ-)]$OX6ٳ6ZtY ]$8&R}K!]ƃbTkk^c_ [Q2in1 'RMl*6z` ěCM^^bN\AԙOOc$7Y ||| A rU3Fms*r.l2b g$q )Dρюfm$>9"Ͷ(w`b`=> /s^TTxaپs:'!{EѪdw@EݕMbhTC߲H.iUs2ev=u{n` JE099^xDрEßve[ŐށxƊ1z=A4n S?k nE*G^dMㆮZ)`'ˬ-@,|rvPÌZAj)},Ff,wP/L)w,75`)k-KL0YىpPQ$xyF+٬!ȫ8ɥ49%lM *wd !䳉Ma19GmhKjq>\]LJKNHl*f oSē6Y@9[3ttoRyqG5= dzWj])c !+uH<Ȯ)Gt1(l#(z?Hٌ2 Қ@891 emPA58[ &'AnXt4ä ^x-1V QX34NƁUt `Ǐ@p4SPWs]Vhk(@|UtY70V̵7>X"IWTk78xE# Y@%hýNCW l)r<>z;Ti V{IµK!wڬbT00t=zp K˷^I͙dD7E5:KYëk+%31"/pw0ƴYovV^.-O!Su(cL5x6j@-@(!`tg ps+CңcJ0D|kH8ڪs7[l]U5J&:2E}tQ.c;jaC$ / $FJc;9BycܕۚW Hzw- @KpjJtߙ t^2T!nu|q8/r<ROO%WE<ͧ\c0=MzhAlx (P](9o2YRDOcQZ\RJI:Fl3Ōy9p<9_U^5Z9yu|JaT ;TADfδ a0V3r-RnB%AѶ]?U "eHnzX'ffUnb(*qe.iU@ t&fjqY)a,ZMD=Mg\tQ#'I%N_^◥Rp?IwU;Ut9 #9hiY7$!e;^I|2PyWpk9 $^79S$U)HW]lk4ў\55(ʇ%I0J\!.Ȃ30֋3Ib/5 npO6P&*Qp若tx%}>AuY*sl$mF[CC:OaGj6Ig1.9g?Ae~VMmt@PT]fJ s:I);Y S+wїe"WGC@{q/K9:aY4-qn9!! ]/T ̥O3WG],ݽ:9GaydXZ"CCn3p5f0\)t'~=T@j"0&$PUj.H9% >pɌ5$~LԳ׆etxOie94֩ݕHL棒 q7 "n\޳@ӖUեc lkeʤhUʇb'GNp0+8G-#puӾD1:! (ij뗪>&~8AX=}-m^ Λ.Ґd梴.e546-b1ɂGZ7=LrrXRW2orľNI"5Mcgy)g}+ 12i[07׸M>Y4UCїc =XIvB%vƆ-*Pn9э+-ax˗ěM^W# P-L47|3^PdKh 4j&d/X*x.a֔N8#y8ODZ i_T$g](vzߦK4;%(&*"t$`ƷXEsE84T,!}qpQN.eY~!DCbU8R}_!{~siH=(.c>yAרT<]HQ\eԇ R=}G@3ssw]2#A2 ]~o~ e/>D/3ߔpu ID(PEdjC!i&{$aݵieu2%ǁM5>@`' '{(a24W:Ϋ:@{zZv8ٷؑ}b R 2 z>k [αa0Kr2ymNs xU\+_n(Psfa`fv煠ib!2PlI 暛ަy]~ጛ҄SXYbHz J1O~9r4-\Sh@[290jS[jک`vrri E IgCj `)OS;xtOVɲHy2z)";6'c29ٜT (HB^qA+A푒A0~=_D7I^Y*s?w,*L~[{_~ɹRy]clt}!,sct)mA )%@KRYSHGҥDֵցu]z=bb[y)!i?}"H Ca[uU> Nu"2AyF5wCPoZ_z-\ 34#ĈIAЦ@Ry)apBi8Z{S@p`AJX7ҾM`*w>M衲(&Y&+`>=ls8e2ZUZW&QaSrBp*ޜ_E-aS PȌd5CN@޴Gp/ܓV\NW^_c&.j65Z9 ?_Q*`yʟ h6 2R=3SCC":vapAS>>P{Y>!j$۴( 'Β=} :7%|~}č-S./jB>}~QVA\5+I kw<>LZ(Cv쑧Aܚ q  qmaDgVM CB l ɮeJdma)Î}u"MГ@S$^ N7k;1 xgA,tdĚ"ڋF97tQV,jiCw0Б!ϰS 2tN[]EO+7){XfWpsy-OGϔq fg,B MU߶h-qƦtTЋP"_Y@2/^F[Jo\(f$ځ,ie81d%BJ"aymf˳)y'=n29EŒGݙwC4M mS-7C-/ IvgikOM7үÝx]mNA|94) ?5OKlj8% ^Gn!iٶǠ3&fC[M{GoVԥQǨV%d&19YI8սz' 8,hSuGHkF:i<5@E f[PѦ~ݧ(mޗb<{!-$AIt D2ÐO5#@;n*M i횓h.~bMnE*6rΰ\pL`H3*~Pl݄5Y]ujeJvO4iwP~$ܫY6C2rzG(M`vd#}l%0>DJU 2.LQ!,~5r?:qqֺ̱ km/z/.xܔp1$,ZDag  We)skTVOYB_^ E"N,ZX䕏(]Ko7R$yJf/ɂ CV&^)?'7g;m1-DT O0S,Q'N)d{92 ia6EO>ˉtA蠙dZ l. )(~9ƄK)!v1'tZ]L7×" J*C,RAbv1,K`!}* F "V |l.olMľ(p)b2a1 d:WR=imWT":2Jff>qJRvEuWD?y.ս>V;_OdL`v" }3/Tx .}W$=&Hh_:׫ik&7/*Gea.(wKWD·0]&7B^еhH fbSp#,4ƾyO'FnD1p~K'rNj) 4P"0A ށ'Y5dh7jVAknV$mVROAbηEsI7!uGŀs{Utn$}q 8F*O etvsARzҰ @?)<#䡢EpBxdʒjQ38w) ]`7,>!yjd ]khC&6.VWBGn CsLaeր߅2q[Z;P3M8;SՊMaH;5=jLA6g9hUP7J 29X`[eK$nF _v󃤇0i@7@0-+[.2TY 2q(\Do53:WjO92L6#-<4%P dX]F`!4.%|D'uB?n#n#3P+Y,bA{ѷ-V[h{-ױ)E}f1B #q 8W VK @[֏o ;|3ckuʯ_DŚ`YBücenYyyXp~e*9ۧIp8V;(HrGtzrQ5;Z>bͤF~]SHW_y^X=fVYfrc%84S1AԾ4@$dBtert(zN1ªLjwQf)0϶Z Q1Y]c2|QW̏ʭ'H\6BN99̒WHKFoG,HG4 I ݑewWcX50 szJڦ#1b!SBA|׸@Y6{)02~1ssgyF 6^&* I3b v |ynn9< J$Qs2tN []VeTT4ACف)9҅(T`@'(NG}8`T*(ˊ뮨eR20W;;K@j TeZLǪ uX\ؽJKxJyJ'~ӴF%I45S~Dc"DE0 8!á69j[#cs8 PL&OCTQɓ-^s޿:9r٬H $8MZA.[&?wtW1mÎ!ĥ SC-uՄaAӷg^QI7j&Wf ] hM—@䘅u&J`"hmԟ&<ID5!/Q H=gQ'yA1JHc;1ƥ>5omwE5qq3ECBȚAz.5\5ZDǦp^H Z>9UE8R4u!az+MnD&.+k]kY<MЗqBJ+\y@u|U~>$GtpmhwsZdwax:H'3`gK:VO~w󓪅 )8,UVggX>J%1Sa% K&q9zn4yߕF>/v x珒wဵ&ۮ=kPh! -~ 3h?_Ft#pkp <ŷ) j}91t!㦑!ϠN bon얺w4Jy8>R&avr GQ~##d$/Ktzl@eIcPhT!ח` ;Zs^{g)B)^,dw'Ȱ!QE>1GDjjFSk[|3s=+3f~wa%,9bnFNqz/; $x!)*#Rr$?) <Ǵ Tȵ  A&[h=z1"Gn^UTMIl$L1#=M1Ih~"-՚V4X(߶$4"ڣx]TV^ Ұ_J`#¸F3tk;v*3iV%&9N$(͏\  Yi&>vN̂i~C ]ZlfcE%d$|$gV VOYjt9/M,"A#UQ4r4,lI-9 ZKD^<^[vOP=,V1aFNzDBN/r[*-G=In/~r6nq--=kBgx5gȨ:rr%R&X,.! 6WL9)Naew7qP*R7X<@:@gb_/Ԅ/jܵtY1aĒC.Eift 9{ԣu@V=KfDͤ -0=.AM*Ey~fR/Ѥç+8g~]Oy(B+;À1Lѧ;"-L|*Ċi=\"cYи~>.rzGW.gJywU&4e[&+i]+8Rl( 6:A"Q+;=+< ]@ta=~3(0~p:5 exWQB3_czl甓jxBjKh< az1'sdč="ɂasmވ5hLp{-2uWU3R(1 E :*1U-hA;PUl[w2\Ѱ3UdzOჹ3/x>cH$L rϮj0aI'\ \58Z)IUx.QK( w6I=yQD@ #`{N} G:eqUlWH}pM Ьs#Tx! g=CO8|iSDBTF.rԜ =G:5dE}ì7rxEwC, @9T.', kqw' ^tHhg2~:S4߁FFDfM>QQwmlBb%!*,l9Pe˼ D8t ތU}k-;#?k]7S(^& LΫ5 IDtnܘ35egM?nћP-BV Ƞo$:4=")mGuċHdC^Z _4Ϙ]Ozp_⟈rي_~֪{"C!ھ e0=j3bٹܻ<]]JՔ19v6hk~t[muϩi-NJ72KE<mkp:βUqƖH\hwږCϹ&Dh1Z'+oF,/0]rȬNH y6gNYBth93X}L` cIz(M.E< 6T3c᜵rZ)`_!=|f48R u(묟W/14%:H3%g=KJGFx0  S8=cRSP3ot۸HbeqN#ߨElBݸ\b ̽tWSR569u^AWst=8tXZFcӘͲX*T.zmuYOBY0'IS}/YB ~ڟ"QεM@zsl7پ}( u5@TRK߬ [/r5گJ*/YjΘYjM.v5O)!5D*OeL9X7켹"Vc7SjsH8@5k>Fx"6eDi{\:8YAenPP͛o KUu=':bc>c{o)WIv%ж)~q~pΏ<0I"-gM<ݏ!&֒@BE@ T]ej9ZU.!kQ .>H醅179_:BmQԔP]I<}hRGzzh͚(k}w!ĥS¤*z mm(s٭b Sȣ,L3yGQm#=g٢'Qkץ-6-w\ykrJ38qتCTħm~f,mq7oV>;DVitC(NdF2dZ;~!-]C#Х+YFv6 |t.c۞7LG`? R2z#Ŋ%u7:-;|3Qͱf NK&~u#6`BSݳ;1wMׂɹPǣKNTg]16*o,6T>Y9zQyV8cE9Eco&F$g4JZ\xl>ǘ20)>uOn aޚD\Ţlkٶ6[JHH |MC}R|[F<"(NL<1nVf0mg)[\$|C O5D0aeQzTcw8@.g< ٶrv(0SeMyuLoܞ)f'$s1 L@Ͱ="D!,-uMVV?}oco(M:pKQfLB?W4z/, U5ߣ Rb~HoL1Efh]tr7Bb7c]_ Tཅ` u,ڙRC-Iұ  6ׁC7 ՊqE rAOH)Z,c<y K=&iCPͶ<уFv R$kF4SfdYUsU cVs=mנߣSV}iƻ>˰w$wblfvB9KDW8<}OQtmyM:] HQ}^}Au\1U& ` EOfDyRCJx)HsWR@n=ToRΚF-x`I;,eqTMvHI=T3{c $e"OWB=؎V}ڴ0G˂_>JIr iDO=n(;/Z,ڞ1?mWW_Ւ^U\v'"P &/q3*5$o`ZiZYH ,9x}\,1إ="r\:LFbXdPF-C>IA[Iʴa<xߤG&17[i`{0*FXOYЉE /E9\*PAcR0۹ZiG E_RygDھo%T [J٤| 4N!{dQ@v⛾KU#A3:m°&\6V vG ksgX . wwef{GD§U&tbΗ]uA(p,Ej2ik]:2IW7t_&nD+m7x:%o^y ÔW g,Q|erya^> Dh7sijW2VVf'h-WZs!#x~B7ER #!:S S >^!9f"S.g.[8v_|$ΣkU:ig!đ+ 2w5%@iˠK'F9׌DŽ ]x4.l_%R:%uA= ^IjMևFM C[Z+7 ;J3;^US.ۜ/E(RvHmc-2ҫW6Duz0K` SlA]Y ]ٻSk->e{4rzL=D5v IPxdˀf5$^#ګ yfm(¦ ա-)XsTˍ[,y;jqcSӓAXQHLb@%&qؽr>0^ _kUGȽ&<_`~&mRIkHb ,6=ZMb3Dt 8$Qs0uRQa⏫HL0E.ڹ(osEɶGeIHaOڤܪV NDu еDg SX)33 ]㰜wrZD$X}@``_U'Bu!$5p`B,Y^W ͹] xyml>xdjcC6G3 F7aD>= +OSĵ7l?:<$h3!JbݮI@ ΒQS@vH(6fQA,bZQߙi %7-`90#Ӌo~$'# `lsl0q@v3 <1`Z&?$?d}s;$nf)&zR܀ ME:?bFYac?.7G G~Qt؏+?Ut?9x̭pHXF2x(F*ԿA>awe2-4itA-'pHu N)Lnr[/ah X.l`pKrKg@ 9v%!!:(,3"H8F W6WKy ¨%KA~cӔ,p"N5F'd2alt-/H6qc$lTTW*=q%&H·$H:*xx.~.$ 99,lYAE_=2_GBH$nqs_B$I$,J#`:3KiU^9@ȯbG8wそ̷q~O@~՚[]l9lh@i>+Nɬ¾e8TKR6`DER07 M/R]6H\xBNet|8SMBl5I z#9FJ?;N0A0nE%Jދa\ 9lBJy  X޼sDy?orw:ފ PEXCV)H壻3^f]Q x 1o͞L3![:orb^2nSSlNƐ`sg*c1(4Xb 3XV;J3,B$&kU gvJu-hLEQ8ܿ$7|4Ғ٥pZ߇(E'$ʠJ{˷ R 8Б}ǢOf%kX#􊇔 ^1܀e3kp -&^+.tdݩϦ}d.ZbPl\=LH8B𜀝܂B `#N }/9n-Ǹ~Hm<94LY vvA ڵ.%.$ h"ࢣ"#mgcQ ,a9^<@!A dKr%N.`5"޹( 0PG “A!ooN*'-&`;I6|o{PA3aa!pf`۠/_x/+CTҠb=;RBj{^υiW(\=mRq9R^xUb4\׊[CD x4J%N>#j P a ]V\%Z-8)qvk(XԬvWވ: 3w[1x觤wEˇV_+EBXv*% KEH3yk~Di2XcuYnT8XG ϓrJ;Uۥ;~ڠ$C#0Qxt7 O%pV攝b'f͒ JI|b}q_*3ܵW"|KYܑM־ª+ brPUٲ)2x >bI`~b' ]uM*.f[ſ{_l1CW,K>HѺem}s~,q5$,M#f "4d"ڴSHOY#ucP*A{*}^upA/ : lD d:kc[R wc~$LlMO0bFb bJ}`$*ܺ0]}m@ؚ :v0U5cQ( Q}p@tj'­ih,8SwE#4P-y0P]e?fpd[y8}P <>]# P+ :̈$iE !ꍋLV4L]t*jj͸![KX2vmCdC;wje As٭d3CꚲvQw k+AЦǣ4; ņ>2v]NBEToz~'FlLU22 ~Xt DZ(OS(k6k(J_Kv.λ,U#DMx$r3:+A3Xþyqu=e$쪈{G`|4BL=+D|m5m^nbb3ro4'{떼hU%离˚hs~*.zo48-')A&zGl! +vI-T2<{EѤϦ-|{}5LU5⡐w Q_w<G$"d B\FWPC%51` ltHTPϷ:]kM^<4^Hs>߈fB l4zD&Í;n ף5Peͩ@ ?hU޼S7cըAvU]7W-[a4LYq@(b !񔶊~dP ԫZVN  xȵ$^! &wdrBS'AZ/A%r1y/6B|TgrúK;ڜ+ lҲetEU?,ضQk""ݴ8X}fza%ET)Yݙ%a +ZYѦ/dF:z[}dAK';p4I9Z@M;॒eզxB0wЉ02rj0̠a;-N? ѿO>Q͝`+%r k9JݧJawYE\MUR !k$/tpX'ؒת2`S z1{`和|w,jQI#98(zY)9̴w(C0}dkb+CX$ B`]|PwPB%jߊޚNs`h$j*A=\t< KuQG. *We^ӵ%ztD;|˿Zkr 8"^+Uf{OaiAp?F9P5nյ'D%v?й䷕<7nf8У >^/!=Zjވ|:&dNh ҄{RCwRb$v߆ T>񉊷z@u|Wh9vbmz֒jg|pI@cM *IRE- o.B^㴓"3L8dzAaED⎒B*5N :|Ygg8GVF'5ӒL}a #d?\ӅJ1\}bB(zΓ1 c/jzԔߌA`Ab׾5KDK N2Uؽ6`/j)^rol`Z-j{q+G^[-t0ߙD9S@J l(k;*a}AS*āJyz$bIncOBw׶ Ewi#„*q6nUAYue-8 PƵe -sQ*L&EQĖjE| =Q$'Wɔ ##^).UT*r4=ќ<`'5-5x0G!%jU w8Z<ƚ:I$:ہ3iQhB=NN !@˂)ޏY']+cxELWHn`3+FՀuMDIK)"ٜʲ"MFOU`5)e?׍+4"XMAep;+bH@Wwʲ61֭][|̟P@n' ?Uo#-Nhg}q/OL"6\ %8-TgtDr;zg#<\t $":9JM,:jb4ܣaSp-mLMyVN*vjR{QVcalT3w=xl YŨV=Ave7n̑Gp-N@!??7(;m1w+QT|hqT:M9G#8:eHHxeg >4TR#$E4>#m͠G>^2vY<;b=/o9uApzMKA(L'T׭GKk| 0!G{IN6,j~$y$<2ˍj O2/U&QnE~Bljo\hK aw. ڿ;Nhi2Wdvx{R߉O d. zaQ'٢',6O~f&(!ܫ{JvQ~5'"#kWijh]Hyac]TUa;"dK187(&svO47RqseA f>0vrIqay}(q!'F`<SϞ&E#{T ds*Y: nǍE}V+@_d/FcimTAQ5ƧTw_֔+fHxŧ:J991ƛ^ɔQ@R}IM̫X[}8 9-l0OAZq8tnUbonXɫ]i%cѶs9a]r$d?P|('@)}B[|Hݶ%Y!;ѫ֛)c{9iߏ0絸y-)huە[<\TnH #w9حEŔFVMGS4ڏ<Ǣ̭DaUc '!TP'&9uiw|ow&oyn7;CsJw\(өn*DK1BSlGH(@܉Ӑp.*{G+%2-VBTbe뫳ͺMT%Y5m/ 6GoR=% [3+s0wX #;B v>J-GSp<," ^fPnNZG@3/{c_[ Y$;`5 rKf Q"Z% f1)4[uYgjEk^+ ܽ{;'MISH>vvgP=yWOEPNt:W2h&ζV$zkF~P-xH;MxIQ֒[L(=?WJ[a ]&|L LnF@Y QmL +mWA 5:6{5T|jN#1pue+Ot-sLZH&zxH0~=jd ;2 2JiӶwT:qViZ`,˖r~;~-A @Bz#Kuͨ  4j2ZDAt~~?*5$> Z~ g4 L/BN꺁|9r@L;HH`a$`qČz@[0Z`^{~vC8cVg,9#3]YiVMvF Aa 054SФ*b[4Y@}s2̽5I*d eX3=~Zcu^:w >5?z",2`Hg5w* RdF]VE\ .B(Bpqy|[DEٮͤRToC&eZPt|Ff:hnb4{m"%4u3j]i=(꧷;^Mk(jmIԵv&|t\p}dxHjhٮDm6{/\\Ԃ&L'D/iMXWx ; ׄMrM1tTe*ʸ:16bZph#D/K,>CDgaq?@i[0U n"ʤ 7mݫ> F,n|j_hp|u]~`qxQWuA sRגB"\ɧR5"40yb idSq4\s@NjʸF*:B!q1 X\P)&>-H#(P 1W&6y7g;Q$v#kw:"8@4D_4{{+wGGcֶehk'eQAldln 7bzeL:npC w6ż Tt~,f8c (ё:#Cvb+Jٓe$G̻8q2R1 @h?>TPuHPxQJa}a-¨QHqX]0x:kVh`hdTHTHxxbK#)EȎ Z0m ƸUp19̳)ƽvU/Z̎mpx1+O"0t`+ >2Eؙfb١QFmCCQ9?5 F%ږ iY1eH%jV:X&vfGucXBn+I1$`{nCx.qTL`f?"B0SmqgK,w\Bd6HJ 5Fk?B槄{W۸møNIs92P-,ٌ%D1.;2N5 {KZίJ(Pt.fSҾ*;M `4i9P@6Rc+N)`WEczareWeKsa$gu;޻ ZX^\B$|U m"@ 0a1TpqJDe3l,MA#fR'thZnh*qJ@~-8SO rt'Iyf }RHDzȶjq[jZ;V,&rr BL~n6ƫh] |6_9ME.ĖMB4^n؉6ٗ-u HZ6j {U?uڳ.+F"ᢣ1_݂k,"q0落ɻӳVYUP=CX ul|aK`z.@J ƺg±RP%3wkVĘ[Ḡ*ז R"Bs1`39$'ѕ8[ɯ_-yS0w4wwnX#J8f!Kjo}V0zYE5acaô@Q9mP }mGFWR4b:Spo[M42 6(2`|*k508jNr*Ml)BN֑P"Q"Yr4yina~OC:G?<!hٯ"on9֦%:?T ,VP/iwU,bVǛllrtT[6"qmj9Q'P {H>,P0Alq1ʠ>MCz Æ7%H;_~T3iձ?z.C,v ʞXM*}5Cq KZ1/28Jy춻AOi`uXCnj+ݪ6TB/ɹ~.~!S7<.^ *Jl *|FutrPa!ӁT)Y / sR8nD.%UEpL-J&&QCl!WH2䉓WB'UQuZSA l}zm]gj\aD ulp9FfN"NMZfY$)u^8}nC۠&Mpk! 7&ϊdf AgJf\y-_\%-r~5Oű.Gx,J}Ԛz})/5ZTD}8'^]4+ۯ[Vt^B8.Ast%g p@2l@!_z=[ [Z=:z)x)( nQ늘/WwxQ@,2[[jO7#ԏ'ZDG9U Ђ$4A;۔o}< 0/t>Hb9l^_nN&|-bP`*h]k4N w]lTD96^ da"Ԧ>kQ Pk4B ЌBt Cks6[VΤb:8]zF\9X(@61Ylo9&^/U}Z0 &o_y+LWaLN(B*+jXT P𺳯{ x1ڗU8`ƹࠔR:D5G=Q @Izx-3!N洗 (z'%&/iƅ"BG[d_xVT*Nos=} ڏ`J/NC y(ļ'ᑮ  L$EP0f Cp>)NE ,v8CYO>8ۘGblvrl)Q:3H3)H;t?"&=ژ9,ep Dm/72>WJ66sO#D'e|+^&]D T4]z;>GJ5hu@Mra4މ5ʐs[GAXY*anl0(rDRʧZW%"W}FR`02Yѩ>>kOvL׾_J`r,{.<3p_k_97$l5vF[a BݭHҐmЯ5< +PR3}ySRpcUH4 S3pKL 1ܱ9>xTM)*1sl@&󴭰W**[b$? [F JwʩotLbD`MBє ėGJsYB@kyX fo:$'Dؔ'ֺP+UM{& HhTRA' zS;cB${:CmC. Kqu%%~Ήo?JC!wwufm >ǎJ>{aVa5jȆICY/WҾ qQ .J\BΎpy!h *34?cq;kBUB-79"lY4aq4GsQ$Em"ps6ה{{ _8 j}ePčCvb"sn HyIjqa :ͳmswŻd<^~ޘ aFl=ҡ΢y8ط,5#ф]]ٹ _E0+"W 5G2#oJX54l'*ݘ5BBLPMZYT5 >2k;@$@MnFT-zAkK4pqr:?abw˶))sX %1 LbD=hPV-DF;V\U *;sw(rr3PuJ rasW-HنသîHq[n I32`tyTha1S,jk6o1U]8!7sA1̬,3$DJ 0'6X9X%L%}c ؼ/˃Qyڇ /fgXzw%6f\E)t57c u5wi"zX&`vRJuSK5<]Q5h7]h1B5rڨ(m/KK'OWUC̸QN۬mtthmoٚSp'E({U0eՄ^ .ᰪ;'c=z|k[pP,[.ޗ{w0eV"IN^h}mۖ3Nn>߅M- lCs#:;4ҫn.Zx@._^]^?bU$Xڀ'{Kvkx#PG_qqm2P Wv&.heg^=fMMW㶁JjKzsY7ձ)oeh jBFV:R RSZ(<sGiv3 ҏSn&`wxLډo8ء!+1bΫ cscMZEsG# 'u-b5K6rs9CLvĜGɞm-#]£WUl`iu=<@^̏;l-湶*QuH, f&󊛨Ck[O> ' S!\*C_φȈXQT_00dwqJ ~L{N3-xH̡k*m 1N[l§!m$JT?ayWAzmo_nvLtz]+ϦwTedA$n(6uoNH0 :`Hƥ[fWySqk/YQUn g;8=;LJ` R¡4nsSW jlQ}ܕTD2*}sY]/qS";VXfx`QsvL)2(:ON\%9{qchݜsPl]xqG@E5 % WGR"X_( ^p䌛Δ-v+p^Ri,naIEe`0Aؠ Hwh=lW,,Nw Pz/mi PpYWNx4i OaR^n6 ;"') o\"$w?/ SjAmc>/Fs65O26R Gw^YY9bьzQٳHBYY5FÁ^! d=ڪ+)т\=E,ň+Bc'ҧ.-I #)E=5uY# ( du)& r`JVd~8YhUpY^3\a[9j$45Q<\]qn0s"ib-`}0^1bv7Q;7|틯cv|D!&ըELRZ1/*&z8ۣ>+ŖnOd:sjm*"r1аoUhGj3o*c>alIV͔ܥ8 ]uM>d'kl(dY@GB(L3`qa"wM/L;`?r%v-uU?8_B G*5sl,OKO>ZUX!Qp9ˋz8gЏڑS']OjNH;u :쭋E+.aJK=YٳB5]Q,}5xv03!jLyO;6o3tyc/a ѐ+j3į[l0>J8Y -6[clϖ"JPO>g2VT!B't3ꍠ:f 4RY P3bw@S}3[14GYq%Vetp>U8yVWm BJmi6v[8S۷, ϥp (!l' 4"I{ e ޥ iNׄe %NxP(s"6BgnWǭYntU0+JkJ8{ 88) F!I%: '+CPG8M 9cPpJ-=QzYtlMog]fVUI$#dIPV5P ֋9UM-%'XKʡ2ڞdm@&ƶ[ (]59==5CzJ"V?%BJead4=*k6(^9'D2kuǛ<-v3,qHQ*TԺ(w,u3H4H aIu'&=\ʅW$_I2:JF~es< nNi;ѕ_si@4) 2[m]N>3[ $=TVSS\ p:bgnQppxhG:.͓Xm$6ߐU]֛[< I4|β>GeVߨݶq!:)W;1g=n}1DX":TvFq S!9 3d9aL2-8#{)j~H1"@2&9p7gfM9ֹ#zpl868"[P.IQ?me۫pT INk='Q{IPw"vQb )ŨTSqylv4rx,ypS1Kic&Llߙ7ÎꞰ"q_CX?݂!c>Nsf t| UūܘR*Œ95NJ @U"v2yWʺB]nCzv|GLd'.Nm:Fp΀=}mJk+챋ERbWyJrNv_*Q<oPx(H@a[ ^0uy1uN?8 mt>kz/+]IE`BH&N)9ݭDj#k3`R-TEu>ܒ7U~,_ǫB tUgl΍&ƻCf}󺮁D'܎C0CL/l !} HF7)EkPa-inAk]2㆚AZСS05=&1$DE-՘ ƄcǍ.( **X6g,Rӵ7WRYQ`U1VcdVo`:j<:c~7;g( r/HX|G2Eɻ c7 7>5trYFM-e°֔xSA ~β Sn%^ᡞ ؄fhN=S,ZP估M@Y}Y+y!OI^ơ2m"z夥(2L8@g3:W"q8n;:G;SVBi8 R1#WRtՏ虏19=Cfܕst?Yw"J (P/Rat8$1&X*ry(v Ŵ䱝L>W( %~2ҍS^V'\L#z%9=LM(A޺-T@#=8Zq',4".&,s5#I~hзU>H)oWXzkQ Wdz0Άpa' Ӈunpj}LK[t&:!(8T%71Jrj:.+3ZBVmORQ9rLΐ>\ݓfX30 jfSҾ-ر^I02MxҙXq!ƵG>&(c}H?E4la paށjُV. 4br8҂$.B՝B(?INJPmhmzlC#J]x$h?xamL`FH0r͛c@b$r} R0J^{Oy#69tTq%ID^\>oXI5G:LKh3a>z2Z)Ɍ:sЏWVyr ? spZ&B FwhJ},09lzPv~qɋ +stbwTrk $z4&Ss=̈k S"N;>&.:15ŧqOL}"2˛^Nā><U'9 "VN̏ 5 b#yZ*L,E3KqEvCIAϢg0]@EzM|dt-.}88.+ŭŦ{Uaɓ̐Z'MhvxF#_ D,FS]9X=bcpX[Niu,ABTi˃hvi5`!8r7fMS;A0:VyI,RyèR1#E4͇M&QVVCxO`tH嬥BPM "suŽZpFqrsE(H,h [.*$( gi"+{jr`x +{~T5ȬКpslk$CeF!F6.Rx 4v8ȶ"nW)m?X׀X>- MCb#܋#};6|F`C(TN [96OE6U;,(-YҰ(Ն;ɠӎۊm6}xgeBU@ +eO+KSr``Jw?3 <,> IgYB=+֋ֵ:Z\nfL 5*]٬$vx*2|<oVߺh"tlZ\AeH}Lعw J ݶn??*0T2Q`${ciY?ã=(fҿ+EvYt3o#d (Zdw2Ei /4RtELCK=>2e'p}Pul(z5P_44x?&`qvl ~$68%NH/NE%o\ 3x^\ϤӶoH|t}vfx9N U^:`y"+{QZdܗTCLӈ#暭~ =ŽDԂÝ˽@SgHmK=c Z3{G{`/= äɉ9M^Z$,kۇ83cwSK\A_|joYi԰ ޣ$(uIK= *DK)njeY wTZ:[s)Ia(w}R&$po9kkU&[*`fZ*벭Y\ J3[^y9^.jpS4gg7DlAٕw}䩿ʼw 좒S[8 ٳ&ㅨe9 1ҕ>:@r1/$Xđ a~55S0KKRkB0V` $K)=! H2P [,[xY1`D|:Ulc\.!XbIE*t:텭,J!WqڷCm+熢U5)'](# N-m&=wB DhfM;ݿ"-p1ޡSH+_gV:K5 `Eq?4*#EC8ho@knڑy]^`]Fy73/e+#K۫ ]U|C`A{QM-8jE%z]r6[^GONoQ > | f*`"WsdX{-QӞb Y-'bjL 9vpD,hmHuSqLd&fWR3L [yMse OhDPH  E+.V5XVdFK}hTMsV\( 7'}Ë}"E1!yZ) ޺BFru#vb4m^uЛ#]>H 2.vǀRjɾ1MbTR_1 ,J]uÛX$ލ_ QdA B52JD 7[\4?Y`{4j4PN% wIkg!eT`jީ] @%)j:s#x| œHv_# @3@@2gSԏª[5޷^>"*MBu7|ḥ,Kwv/Y{Z#BH=ե]`^t amy-c\IG(np@߿B YBwga=fC~;k#C쪷v/OGT8* K3?W?XɷL9BՓZN}[wW0ԡ)^530dMKɵWBqA;ߣQꫥ,rϦC?ր4Ck|nKKݸ/Wy?js+䇦h {Ai2ee3/'z^=@|X8ㄼͿ˥V{Tnm'HOsa;B_L̿+wYqf ̾t 5t>XL"C;ĂrceM z@b\;ș<@GGަx4J[0gdZe1b)!?,"`i7m_kjSJ)̚Q] %r13*T@KX J2A~МXdZmZԽVVɞLE t)A/Wg&H`-yGaԵ.uӧ]}Vǟ滰{߈7 3$"[C@uZ^x%XyJJY;gf,0E6.ЬWe[sNhv?#z%*]/SW%n5\(:4#[`y"9%Oɦ g W'֨/99I*$Hpi]{&vN֫ qDv.P_]YϰEj h`KRgLB+ЉKJe4.rԛivOZď3(sI֚>F6/$s-qawM 0)ٖql-*C!DNpY2&zAM$: U#\"Zy睯Pz鎪>hɄvWYrRN!#Z lܔ@JCU#2*gTJkixR]<b+L% Ei `Ws%~ Qx6_u Cȱ.jw<й! eQi+_,L%%yQ~!/@zB#>06w%(I~Cu.RC;jRzY)&Eݵb@1b8U3Uu*HP)&Oul8tԿ7?=mZb,,VX\,''"Ғ2ګ/3x fVlt.0l_^AP|smezTt_r~jhm)L+p/au+vU}i]HJY߈uwyZb"Ca,i t$)q$ /@4@!Tp/4DHX ؁4-ւ}_c%Ь!,u+%y)p("-[9_MtE%!fdca&ܾ benf*,Nj>7ri1۟GEc]}Kpϧ߳1ax)5h'!3m>KF_r`d}bOb*q43Ԭ|apg=q#Ԯ0^N**X\k g1,2jALb f_ -&.@%)I-n%4B3pnu %7D8"sN22| ~s/$Q7$.lzZS 42we>+"kF 0ww4`8y_c `:x:J¯ 9F kPΕp7M&$_x$Df#~_)mII5[N131zl`)QqJOtw=yDόX5 XFtL =D"ݥm;merɜswO+GGH§G#/ ?+/My)+{Py֋*j+_ep3{Mh ZŬ/bة77MDS7cT{H &)G) Ƒ#leqUPb/&lbU#{,uC~'E } Iams0}Boww*H?Kޅ~͏i4M2nlBvŊH&).p{cFQX|mS$GE{HqQ"S)j#!`-UBA9$%؀Ҥ @Ef>$%.vUL-z6Xakyy贎`/> L(Amה[@bN{RS5R/P1hs9,+{ oŦX^仉eDNHQ1GnIcA;(gn=p&O_(ۀYP=*Ԇ.D8)~W!W\ Rz5,$CW<Ƨ@;zt~$mIzΛ<89T#8VCrn8@?Z`ӢJ+24 z?l,q\ev6+R]* Jp&`N|R?J"\>A`Cg?zJ=;Lv`v! d=Q f47B3 8zfttx=kºLl`#n~9̤XbtZ9e*z!ҰeO , nL~))4}v~vccRO`O̹klW~Szmաқچb$XaEKR`0{GVhLwaI0«)Q*,)#:P-CIE +inԲ8̍u$y(}rёKA,u”Qi9WTpK[|⎫W1OAš٠]CLˢyyfd--<ߍM\6uw(G \>90~zm*MDM8g TB$t)hwwdNHE7[c=WԫC6T~NlMj[^A=F7kxapie\`Ҍӳzlʁ ^ j#"V).FXO)£X(RRyg&z>8Px;~;Zߣ>ǖFR ɳT(/D0cJ5t)ubk }!kP{8T _d[.1[BY yt"%,&v.8ǒ"ӽHoa-6" i&N#.TEL-6!P8"Ӯϑ< pvBZ4CO84˻ƟP7:^RoR.ڒUc҅]"n`Շ'܅ѧ.jeT"Z⨮ Q9g~/I^~)Wr0R;5*O`ZѶ.A-Xcƣ{(."Idʒ "AuҤ.1`!uҨl,hZlVŸkLXWڍ |֜"ЦȊ bt2]u4|nir9N186XK4jգi , 6]{QxP82b"rS/iyUB|fҪ{+#'ڊ h8l<ţN.Y| 9TĶ BgaiMS0r@#~nkMՠn`N3j[T7q/7)9Qv3N~Dh 2^нeZmt 0uD2y9ǤCmn} &yZzg/r kIL/}5XdKfXD:EB`IܹZ{+:;vĹM뷯D$ImTdٵ?7Ncn)^AP!iܢ#9~ ׇ ;eVJ&=>+ ntos?}4}`gҝ ĈTXRMoT N )*!Α<9(}+'r5ߌ}IuVw: C p9e3wiM녝V|#e$.;}}68Lq耸~+Nk )k ^YvEzxb9/g (4qS,L`*dU'u˗qՅ20]P]ϓ\R`l4]߼&MJ_Tk_^[u]]O7 qq|@g’ϰx: m:R3W`ڌ N0vTC w*WTc60|Z]>L3p{:h!'8=}񸄲6ӇLW,7QQΒޗVxGln#PQ .H ӽش֌V/ q+~jY\l~5ʨs݇'BCfiW0 u9)9jk^ %EZ<_<2o _E=hnޟӱt1P5({|@8}K*&*~a V=$~KE{/9dc5kSCp/"3- w oxV8%@"q^.DD팹e!UjtQRPtzݽ|3Kx!+tgމk T^uU<+ȹk.0<2)oR* jv.. hq8l 9r&s{ imO,@H^5j)9_H~W0xGz86K3$ ֧{z;7v||yHCMe6qύmfX,13y0CH.LISPg3*NEz1˒ʫ''wK._xP8t)cd_?x`O>UO>&z .5*""95NȲ8MEC%:!뵂2I@/@"EKI4ZR0 {mp ߉Ɓ`. %d.Oe\Eњhi!‰}N`2_vt]e eX v,-_Ȝ@ \Q{0q08 09hug9]3BDD5pެQ2œ-9ة֊9AL)du|bB(kMyKxYdֳi:C^T32rX=Ș4mRWx:SӣKJZ/ATG{]2L# er;麎ཚ`/agPa+ 6y70qX^=ݪߠ h@9z熽.nC}|Čko "Սqv S5sP$g Ôb2=Vfjy[Ϊ'V2'~f}ny#VA~ʝmʄx.6B㦯kq Vֻ]kE]en7^HacC2x:~ظt+ϟ&fbԩ*[Ǥ,vy&Za>}R-<Ԇ!bѫZZb7xm:E7;"i':2:_ ܉i3E7ύH.57`iXd5o |Hnum ~䲹g UB++Q(6Cн3JK8S9[UONݒ/U<-ш:[HʌB Nmg* TP\macA 7X>hm<|>{ۇ~wSljL)[l|2nrC|(Cx.NljȨ,`U?c'!+%b&8`'^6P*;sn(S^#wT9Js Y_Kג"hF=W >x]( l2U(R#b׆"0ž]+8oC @|@>YT:td@4C'~EQ+,{9|~1U{|>W:cVZ~kLү c;? xld.w,0@xWiK7Ih7J젨@Jֶ欷;xl1OpKw:>Uߪ1tRAW.@qhRg&> DpB*Sq۾W!@J@ìAĐ uO ^L FjHe[;0d.g5o٬XwX1} ʡCuJ,MTsj,?FBI]B`%hp0I'=V!LUOUȱwc`>dk޸Ap%hy`r3Gk1'ɧ3%ڦ^Xk04  Ӓ]az$jNDDF ž_)&f 2;gD&(;;Vign=@WPv(ijw㈜;&`x_1jYCAD0)ݞ۴M쨱;_Po H>qiy +!Ih0&12ǩUHL8 :T qqHء!j8Uxoz .R1x5[g* t}޽:},3]X ;+CzLVIcL"0ak,U`n3JFH4[tx: ]XNeghں.@Eyr}?m[qi4 zqbYti7ahoL] d47H[ZaWof@{C_^1usI& -p_ǞzfC=_5xm(_՞TrJS=+|ӗcDʭgpdYEyHMoZre`$ 33X9Hb(jGoPq#zױy:J4zz=IYR]]J: ^v/JYS87y_+s[84YR&14͗BO)ѭ "?+L.VClI͉#?;Z&!]]eKhPXɡqDfƠ & לh.R4,Ʀ82iafŚy~# Qh`Rm'!dm׋Xp 7U80ߕTZI/NIQUݮ!񬼜8*—A02܉_yXV<#"D2"b0YO> #҃j:4L&tmt˙Qrt Ve1D :a9m*5ܵdS#M@]"l$ZCǍ[ oъ-hIq.!9UOs9hU (l)0 ~-7|Ҁ)Ut-ߦ!P.7͝hFVw2*tq.([(xiϔ*jqRBGI {:P .fс7\ih2i:RrwD/ofz()[W d*X(j|FsZGebl]yX9emZyTy {\Y'Vx@nQaV&pҩNͦDuYI9R"❺|>^" SfRfms~ Ż"nD~t1~τ!ĢL`;F)jVO;iQR3v(X4˱9 A=A8X`q(Р X7R!wrySf"Bq. ˰SziT.>/R)DYc!t+d_kIu vpsKPk ^JcR&3d~p@7mM3 rj CtdaK3N|ScZQpQIնh!u( JU;8О^kjjV5Ըd3ȴ;ݸ: r+LLJ\vq+t=7;%G{KuTY# ΌjϙW'>09<"( Bic~4>ig)3$`װG 8A;7D5SǸb5(r5*jGMaÊZM;P揄u狹]`Zs<P# ufۨo!DxOI?=:tG&ѥ!jQ]}-ҺO㗩RUj)[%jU[(k̻XiEx5ήGGGjD[zaإCmu|3G2R$~}~=+e/yv5.`!jMk@(ldk;j*0wgRC/#Sg/lyYY OCtf°i=g+,-NHܚ!*-9IdrQ4ɎؤDZU._?}Ne ]$}`[l uLTOJ}tԐny7(>m8W߮!/&\NϘ='|JDkO+k_ 6'Bʢ U3\t- xX A`ER~^#zerDy85;wXj9 RVz8&9K>!3H2\ $zx⊔?)f ][Fbϴf`t3Z\"&;)dKqsWGRiڜ7HH31[̓aqUYj") 0 g2'zNKb쩌a%! F}Et?kp>髷ޖQJM҃9NpBc>Fu(,j ֮WE_>mP/ oJyhϨgx/r%N{i>%@C~ު6BO3pF]GՈf'_!ovI!9.D_ZӤ SW:Hs<fA,+!LTIs@5Er[}ws jKcL%@:IVÜιߋjE=(d5tTI[6_0v%ݿ1EMQ `ܫlf䊭=`h&4qi,MzJV7pl=Bt1ea>%pPaM SǶ "2}6W 4FejF|w&*A+UsBk\tf>^o9)s()aK9~/8q7Qzos,~+"t@`;,q#:V3!2]g~%|MZkΘ!4ǤuAyd pIJҭ];ǩlU鉲M \_??ꈧiI>o(FBxAI1>CIwP0&0R: XYBNtNNٞ?3>s%\լ50lz5QIr7x2dHÌnλ;ҁ[쩨ҫN'LcdvwK&ΈF@ѶIDXcS]Y, ƽ= SAcK4 O ;4f(#E^itӼ2^)׸e$Qhi|A} NŽ4 ܒ*3VPyX6 Q+'l5}1Qˍ6+Cgd)ܝI>l@=7x ,4:dYڟFLGg樀S4 FCz)s1ZyzË!5"m\l~~$yQgQ}nIֹ(k"k=f1@Ȫ^WCyT˖eۿe rj -E"OQЙ̩'WJ}q5o|ӥmko0 uN[6lD{9 1ڗm==]KKe #X[<٭zj ɐ׎}K+Sߋg:' 3t(=5~A(e+O-< UڑG.idN a߅w5 Г.xx`:hh.YN1БrBp] tl-)_5:P'^X*'|^{1\i?-GռjOHvv&MY7anLC1 OH^rqF*ߩ{:C/HRTItVU ];D`{U]Gƫ }IK[= X]nh _9?a 㽸)d}NNij6! js{PMmZ"LW=s3 "`Do0 z'lI,t¹f |.R7=t-3.oឯPN2ۺq$8;g\0.=0!q~d!eKS\9̳W@Koz *=muaSEQix<*'BFASԽ]_{&RQ↋,tߚ\}&Ui=.>wSSW \w 񟴚ƑFk?YqSk^tVX6|FG Y\P=ބf{9tw!yOz1$ob9mub3y[-_he=3 ;,Sf]P2*t7AQhyfJ.;=&w"L.65=|[K% P^Qie"ڲx>=|@npa|qۻ\0xTIQVmnmS/~Aӛ +g]ρ޻>\s<Z.U:=_ߤ- ޑScY7y'Nafm/C `t9R(ǃշ]6nPLz*dt(c px ÞRm) 1]ILQ|  "404~ -'kH62paV46[ VsI ܏@0&, "7`!pH3x=yb I7R)?=(^Q :oجxO/CzI2#Vl`r:5gU!3H.6Jd}5D%kAבt\5nTaFWA,=՚'bT\I1tz!;,$R`TXaWO|@*ٰe ̡DgҎ>NhUSUp ^.loIY")sdԭs({ J.`94>:{^#'KVM<pmi@eWZs`I+4 aS)%=%7d+~ BV 6'4-.\o1pnkv2;LxhČ}@o d E: FB}1xɵ7lP0-W2P{ag;SR\Vb53IJ{S;Dy`#F*+ U6"40= n*G&BOc,[x(ގy <MW7,eP5ys'jǀaxX,X]V>5JR vQ$VZN" )vICWm/s [#0DIqf3"`,Mza>N!bLy*Uv&w&s~)Qڢ{$I6 #9d4f= a.>3Ia\kXƅ0[k+,*7>/wŨ!)?geZ @/Cr \O/uۢ4`*;86/^6H9 [V;J&OB pcΒ&T58U<^Pb[&ֺuOQD.Wr5B!J6T6\o &>)I@ځQ:D_Mך}Z#$a)Ψ›6DE^? {-\lVipz7y:`ɕf.hZ,٧=| Hl=V[CLyZ^srr5ڎdk.j:"՚kpyj3YU wƦn9ƚ/ ~퐾?4 am&<߆Z`؁SpUX28<:c8ߊ[g C|Raʼn/jt+5PZț>CKr%L6&/g$Dֳ&u_۶A ]}!"۽z/\YJ;H:i'l(^O I]L٠AKU@T d noZ@Gη%c]⠍ukձ*ɎG+ js`伙Z︌kRQYiDVSjllbiG7ìvˆobUV'۷EזٜIT=uc{G(vB}I~u_ M0<sqkqWHwꗒU~1WZr,Ƿ[>9J@4O-Οu> w3CQҟCh*Ss<#G'Ruuon>]rƓ':aC` ]sU5{T4,Yb%l)cw[7LLnW?[K~h{s/wy{:U T{ylDۑ>&cxm-^C۽g\]ôo<1_oCnVK? Y?jP7h3{SW?'t~kbP\)k Zl&>~Lv΢HXF,), g9B҈4UTEne#aj%)yB,BZ7,.6F+Dw leӣ,/+ത rّ4eM#N ?2E]vjAK5^۟eJVx"22vp0gBse5PJ{4\:I*щ?l֞!C(TrZ0fUҞq:kz42ONnZ6ҍZ 9.PCVjbW+c̙ѿ*[*Z5_}g )aAMZCVrͲ uTY!!l8h-yi\d JfHk(rrFMd֟0Pbreh3SRϑiB C]Xbi R=2Ώ|Uwl 6 P1BԕN \LA젌]S޻'zB*iH}t\M̩A~sK;;k+j'e90u 4TC"ݎJd+\yP]bjM<9+s6`Vz]?z|b:-#,ts \9<i;4c| O7ZVO^J/|8;HNsm:EI&[{lܬJMM P՟E hGzsq@a .c-fqJSSF6s[aK3;l۫;eษ qn?S"DBM*|!Ebw)'%0Q;,]e| #u4UU pu谨@Unw2H_Ib)LWwUKm?0Z'ҭ/< N\7oI$b6a7\8 z{DS dž$ mW·4Ҧ#9(zd-44$g_sѠ>ǫ/4qߣ  f ˓2Xr:Hy,|j+ck.*8^|iTq tOfPIO~ Rq9);z+2)%:dD;}A[*4lQ#vyaXÜB3Dmbhÿ»mʻ`y9i@? W94>TTNR_r{ЎA[m*}9RV9ܛ5"N s"-xd5{û oJU ^l]!0) g V6zXZoa1Zr -ΦףgɂU>sc7d+~Ěd#|yv=Mq*ojAo1z!Dx|%\/DH}OQF OZh0Δ$3yP nY? JՇ5=0]C^u{osڟ$2 ԏHWD[RI vJ=9|µӧ!ga[4\6RX0j̧+=ʼn fTqϒ8~|09VJxjMi3I}6Ǽ3w{Ç\~g$[ysw(o2J  hŃYP"&X!>`(M%Kx[IAg2qx;Z]Jb)'uA6m}R\:5Tʌ1'!X1 ]= 3|ÕC؋m9 ׾w kZX~θ›^t}H-Qߐuֿ̽4(W K6ij٢VU q@j4`U#f CBXd?zyMѿd@߿u0kdXr1hm#nY샺5P `h"u`0M9(sZ.SI| ػ]x*@򵧆sHpϐ׷"_,)1R-[-Pރ4I jWoΛ%Gc0\N -R34 -e g۰ou_jWְgQ]08VDԤS"&[_T^|(kaؐ itRFL6]L8<42wr6dJzL'D YwPL1"&ZTFm4*4tl('yPd~޴]ZZ @| ]'N:T2ѥ%x=8ԱȨŞg'9gsEA"=W-\ (5]ş́]_w‰caED)A[L2`΢`5;쪆 Y|ghLr n78Zhu6xG4wl]TO*S\q4^}%KXcQUw هk۪1n ^Q<0<:ѽ$3 Uf]f V Y})<-D:ugN-S3w5j5 qk*CˀIEDkw%ԲU7 {Ul8NlWVV_i=PPlX@,94w^Ynj@_5Vפ,䀦qSE^hlH|M_T=q]3g=qOʗ Y4y\,u:h{P,8p-'ei9CXtKk#$ |ITtPM{b0M䤴6B;f`VUsfå7N(=Ͽˑ9\W7u7A—j֛C٢e]Ҝ3k\K"1ˆ80wL\Y$`o?ԊisYBi #˥"0m)d YIAkAF`EŽȏg4n'"n=Z>[DHj2LIԳ[9io$]ߪ?bU>t }We)(D|04_Āx-5ې$= D 9pC/W${ tinć/ Hؠ #v7Bm&!lSPr0ϩK0@<//sjL(jĪVP<KGffG("%!;lgudܴ@k"e96Bz9)79q*OkCYk@D@LSLE&ʸ%MrEr xR$6s]t%=An T4`dM"ƲZލfBdREe_.mljhMrr{M!Ci1<<ӗUrT cҫ3:vOu\*z\G~u׍bBsTg`L*A=K{;XcD֖%e؄]]Ba:}gubp6)aKbG*.oZiJ>NF`-.jɟhfnV*tkjD{RcYUv\Q֖bRt~ \lfPTB[e$9DLKݢ'#VC6I͞)`L]z# ;7^qHNMiVEjw_$Srq Qv uy)hcoziDI^p-`ebB)E1ł'zNTlaEmhL" y|-ts[ SK*m8>'Dw!Y>Qoq7hAzʷռzBϻǻk]k i$ OhT+]ÄpX@* -Χm86_ԐF@=t-g8`.\hj}}bb[3u@~q|˴1R B!3X)LBnuQE{p5^c1_e} % ꦑO)?V8`\8Bӊ}h8XeqcRa WZntZGk֦eiBi7Aǹ׆q豓 :) >6=_u:xERn>C s)y:-2岐i|m\ژʎNhG,Y\WuLF/j3e<<(=Zݼ,qIIg"ͳ2|Dk8x|}0Ay2([z>o>@wjǷEOT-mڡ(:g0XkǃL<=cͽN {jcCxE5aIGPo6|6 b -QrޥyL}{Bg"T[l8uKhC*n2Ux`w).IEr EjmY]SJͅ%ӆ>CCiyYLyB޾"`P@TBy]5VC,0:0K =^#anc6gR{ `Tk󲨛\7WOO!œ R"O`Js!x%ɨq`Œ!%1DoJvJ௳ +k>b5֋xy{;H77IyElCR"HsI†TߺnѬ[Y!2"ZY@l&Xv. #(B[v]BV"`]1 XOD\*I6D5Mx@OQ$o\<%5~B-hh[rNEȦʍMERV0bDS|" *5{EViw+:Z$̴uLD~qJ;'֌ 1Q!q;~<>n;uST u5s+[(B(S&r4Ծa!l:_lNx*-F6sY:@I4m[ƻtq<)AB w[<w2[I;_LG]a`5:9 CCVapy.kJ}"L 6F}%*y|r!VgUoJ=΁p@=8R"ϪZ2X >~}:4T ر:cl^GU G]OBIhVDD]kpQ]p66,4^I+V`U4=WYg<& Vf kŗj;qB+t$;:G+ =~Io'D]E0K:MSwɝYjגјw;rcCZaf4\{.j1%rku܅Ks+z%S7Ŵe܅OE]e\K=jg-t:JKKČ=И v`ٶD<8#UUZQիVbڊϫrhUgxӞpĔ=q.{/kWnvũ+.ᅮ"j=M -dU QRDD_(' L J'?deEʐq?92ё,^ 1`7|LKP22"+saf~:ks2$Dy saϯb7j\]}w/k9#U+oN7˅#2-<]?TL(bԊ(NƯ:D.E vЄd4@~+Aܾ=u8$qPmUixxpZg~ΕDT( c&8Dߌ) @NzH=n@:gL"'RD› ^m6ibv0LODh3 <㗗Wp/5Pyn}vot .6TFMt%;^|Sn5=5 Xp$uH m)XO{==1 )NqL0(UG(Tib'v讆V.h٘,+">l遍HM㈶j(.3 ʅ:%3zvGi LTS5)8[w]E*{:v[T'gHPO稰>d`qK0cX76 v6jixX2.(IߊczC~Q/[υEBB^6Tɯ|t˱.œTpu`{.`mq$?PSxd=n~"> ]'r;tV]x': ڥ{6^ ,ySDG{#};M& G:o 4̚.#xy\ٜWЋ /ꍷUE8p?0yqQ!ur!;:CבE1D.b>nj:dYJx>I9b>JtƣaA~zaŮ^M>\ϮBKm͞Ldh9IFO׃DMiDjk",f^/g|Lt9MĘd:%|0()rԭ'8te7Ik|NP:j6kR8P_%N|^'`&];uv|vYj1Cy7dvW ͐ ̢>ju!B4a}-x8_hs0W$i؟i0J2IU s(U%m^oyReUn魸V [ٝmqGխKASm阃5@0ΖrT͜ ׊Y;CDFTEzH{اsf!D (,cR=G"O#JGSt&gK=9Qѯ!nE}O;h[dXC2|ܽG،<hHk0 2頛/#EBd@ /û(lXa南Ye^#qɥӬWoOu; .몦6*jD\Yaʜ7O,1xRN3yl2csidJ+P7$5=uTV樯kۉ‚]:ռhvG|f˄* ; +g n OU8@Z|K{.pQ2(}p(;lc'nir";(q&ȸE}('a' xJg"ZAD|M6@+Q\j qgy!,VУp$'B1ߋ tnVzmC{/e&0 iޓp+?r5Y2lX37vNenv#fMF]GIßĘ5Pvܯ_}&HW}bzaFT_}Vx;}|Zxl,XmgхoH8^pK܀~4zcҫ!<}2wZǺ5}iI/_>8ܗ ֶ.}Yq`eNmV OjMrKn]0Z^V=|w\9_ >]慘ջYGbÅڔhO*|+&haH. ǯJDD^BZz{zf`0~&(G§mGaA϶{#hx.DG5 ɞQ=V++-fv77W?O*TBDT4Pm"78@)Fs8l?9-T 8A\=q"t&> X?[xXΝ6JfP̫VBMM3%aƹ"qEQ@ d9DӘ>IT:U~u dq .U 9}o,JDq"*]JE.$uNiv"9Kehg X-#-xpJ?{;K~i\Ƕ,dڠpb/`5xT}tDƟHg=_KGȱŰ*q*$)W딒--Z ZT62 \yg.lF[䈑xWdT.}r2N| ƞ5 9ۓazO6_GMˉ\WE ôUrMm+Yh>A-}}kEqGnQƍjcAp&V a QUN4Fmc}>Ϊ/5\-\rAfWX; xdPI=a0_QhLJ}"6h #e B[V"g1i~Q;akE7O/ >kNÛiI$ H}6l[u&Z:È6!P^\gt? ;? 4ǝܓe$L(pO(Mc=T Dic@&0uו`Wв]kdK!fDn:6WQRwu'jEFn#\@Ŕ%6N7h=Wv/J˦OhN1͙)FϰD;zɞ4~d ZN;,>U=>CnS@#mLޗkJE7CNg "aQCxb]t$q D 3ՕEXa?[L}~@ſ؜V/nXgY r!gߩ٣0w8w9W~<ob4ܙ,Ckǽ_Cƣ5UU D)}/B1ه Qh&W TӬi ͰA@Shg]>Ѳ|_x1=GP4xQ(;$ե.&X6O6Mlgnm{. ilvK(ʯQ ݱl))m 8vuFDŘt& K<_ +wR$?r⦩aG27_9"~/}$\@ zPہ8xǹ'Gb-ng\H{*A?j "K=S^+k2Dv`S? lV^o91 ޕL+k4u|B J/\$M}++fpqT)ˮNk^6hTTr{wdFVC^;K-3TFuD+Ps6/I]I!Ee u/ǃ,`h/l?$" h>/#ߎr-Nv5Ut*]xG~ZY=Q$pF̭A:﯍~~77%71)xQnU_77s009iNdc3X6,v88L+|c*a4M'(TXh==8oڧj G8@-OP 6X$RO_^!@$+pN1*f\EPEFQ@4pHƉPUS *4sP4@Re)W_ *8g0jk :ٌDeJ![axa e3;&}p5廉#*E6>hAk=0,<՟ܓ` /xvm ^/Dz IZO~JFAY'N{a.G)]VV߯ .L Lxߧ GϑUgChms5+i`}}-.J&I* ({. 8hWpT0m l04ztO' qU<=|ʑDPO>YޕK.瘨0K-+ub&ȶzw"bCK-~<ը~=?8Q "ABW*Ku篜r\Ta^+<.{ޡ+Zeuԩ5ݝ6 zHJl|8ԕbprBAC7e-q݋H~ؾ2p5suq+H3S (z![ǥD.pbX>gsoup5TOTtf@*j4+صGn`eQ94A ̊ЏӛO饞x®݄QݢVO^F=hxn[o^ЬwF-N:kI!x^.um'X\5m;ϧNNxWˊD\Ϡe}N^ XZ6®8PQv[u4UPN]aQ;xuv6]$ęWjPگH^^kLB~i`9]Ȏp#n]+uSNINuһs{5jUZzUJPNU ?!hSO6 :}X,l" & zGݞ;jۘQT]BD4 MR8rpVFDд}IưNȞ!1tUіl^kG>z!eqE< l60q̞R4`-޳s~r%Goes?K{Ԏ!0p̮A j_ƉC׿N$ ZN)cMs2'z}mQu I)t80x9=z3i 4^\Lk<#yAm S sO. p]m˿w@ɷ9ޒUNrtv[^'uozgYNr`~beM5{X562<3Ki8!Wxh_Y|zۦj^)@G?[%GM*sjc=+䵳—_)!~o/S"RN-{F#Jgvfq0y}ZP삻¼[|B8J驁Z-&ǹ{ㆺRv&h~=oլb&(G(~٬6cBtxyV;p>PeL_~־ zAc˒ԃF֬Ė-Ak&<٣KBawFlDے~sE ri *q X{q/ժ}$9eYPMBuC?P(yM7.r쯔@XJB9~;^_W/Ͷ\`r8JStsopeW_9Vhe^15s^gh&F^jQl@cy0pXT݇2 HtHmmOH :YeQ1)¯ߔa)=u|vO |oe?Wբ% MѣQ p*' 0qDZ@`VvG!Ilv_Njw= 4ۛ(gвz <)3˖KHm7KLJ-+Il.jQrpi3ݦfIϔ+{eJzj{sPs#̀g:^ìcSXhP%&o7}+OʨOu%C&- 9HÊW72JiuBֆ0*}' 1@E;0B5*婈Oހv$B AFlxaU/N{mϺzç5pYtv8<3.y-vb;|{ 2lHzZfMC/YJtlWy--gDҧzmB5 zZ|&UrRs=_}/уþ:7u9RXvY,%IU@}ۇzot)}J2p|u54Ϛg$XHMtxϗn-FEoW_#d"D}0Ldm \o{K}AW땀1df!ȁ/@S-p`%jq U_~q{(\m2~+ySEf_B툊UsZ:\?6mr<~=CgI֊Ԃ)E9-a4%y ׵rXB7X"=~REJk:C_6:(q9BPՉiJ vR ޖmO%?ux>7Y6mUfHzS]wm3)'@t>f%+dDd6V2aU-k枷y}dϯlwMiAnPT=h틚 vaM U0N%P?5z1fxD(/q`;ԯ>4 \zEoуe={P9B`e=x=WoT@ώ POltE IR t7<˟|B|y!yU|j~`QӃN;4abo9/%\W54.2ˤVBxơeSڂ _Ɲ]MEa1X8.Â[uz+QU%S,]߯f$4 rϙ8V:)W3߽fA2asH"\k أ9b^t)2AGkykƽ5l{v}V i}KwF4sl W@Dw ʣXER'y-٦)es ZT g vCNY͖G8M77R!T77Q$ݩn7 j4SÊUqTBK=76  >t1X$vPJ.]OKEy۽tݡH:s=<kf?Sp V.gvf73sMLỉ<6uT5Ȧ}g} x>Kӂo);.gw >!V#^Yh'VimkTٌ3 Ŋ(DBL}g°3ٍ?=IHV!Le4Y܌yO1(]~q!$"4m }Nեyb^-8^)r䊒R 'O$h1%uOr~ZoyU7kau,ޜmg?m~&ۃ:g~ EV71ї:Qo&wr W^堖SY~0s1`UG+hz1aAnݑMLO{~9>ʹ x %ci&T9L̼i?yi[.Mse\$ا2PDϖ \K maVa<(NJGٗdTl}u߶dwK:Z):ƇTS–k&z.5U(98uQsN6g=Ct`p͇?NuK;\@#ɷFSޖg83Gr30e[FK',bo]@ BuRΡJK#u!&1heRpаeU槃,*{j89afaNj Sm֚:b (K^ گ:ViQ'8UT. <"I]Ʋ@~a;DAVBv%.TΚ3Cb{秠x?@t˧{h3;Wz&\wjYey>͘M:UX`ŪWE8}~$`;RL?}zz&㍱b\z^JVCܐpK'ߎ=]0mYIW[}zeGpƟXǍ"8^^9 JU3jKO>\Cr7Oۢc=6pI{ ~¾[7U:A>9xѣ$XJ56cYVIy fga:Z-)s7/vI6hC5p4]c/X [WӮXE&kaT^R!'4(U]K )b,+q/s*h^yf4:U"Ezx>y;7\' qڏ[y3ӝ-χ˘yxυ2TVP}X.}p^!~8oB5Mȗм.a=2Y-){ᗌwt3)$X ;Fh^c#+S"!LpD M\H1-n[yd6^qӝ,s(U'YK4 /M60 ?M)`V#o=g Q*X%n~erT/ZA A*2U5F.$,Go[=ݝ@,ϵl N|:h&2~ )7m]n|X^twR0,[~ޘOSXQ%!6  =2.{OBɆ[ RT̲>gAmm_ 3{َ- gridmafʻ3+<ǩts)K{ߵyJ1[dfżÎd  |hVWoO~D۹I.@_TNE ׯQG"1~ϵn3MO~ggZcG;>1GVY*vZ9d7 ){gۗY`UaFbb ?!BDn]¼sxqes'B廽Tj1`3YXK;c a&E=d1#-ϧ# c`d^WsʧԳ@b)o^_9"ډmo!QDg2[ut8qW<l}i:hX&{Wwח(H+~߂WSgy#w-=xWh? 64zn>269nύn/G_A/|u5Gߌ-_^lԀ 1,Cf3(]/E}Aͨ;bj{vց{z#z#E/4'Z@CC60z3%sbT"y'{Ѹ,9ZҪθΣ\yUAT<$<:&5IO=2Z1էL\)j7"iz^TF4צ:X{uUoo M+9[lr"̛,\UR\ue7_i#{=!2Ϳ r.1_jcKaV%’ͺцR^\ F#j\Zeg%jw!!7W6gx@1أPI|70X齃zN`sF !tO70$oPίVT}2~2ӳ=.yWe o43<(!sR=]]n(_a&#y,qW~CW8,Y-^f «=M{c /DmJzEƬtE _M0ӕnrb7%~S~i8ָ zO>X$3jEpk[YWw}(}q8lAMuv >y_j?K=UHϴ+~Ab}{C\ιZgWsG8UԮVbcA*^WaatzߴiX~[fu>o}|X{Õ]}tN 51_0ÿO,?Q81"T Bn4IՁ4p* !!]E9i6j1(wpUJ|`m?<ܮ g|5Uzj =*vMVʗB_5Z}%ѿW_=Ϋ+Xfs W9vPPLtͼXs~B՟7`)AGخaZ浖Enz7,#Tbn]=VubJWcطxϔ XoDzC6i,,OqҌ8a[JkD B*T_!w>#:/R|#sx8mLeȜ^1U4:y|tb Y+,xYk5UId:?_r8a,t|u*so]LNbm? YU@J졜c<5h:$d.rD FgX^+?N *ћs9AVndF?]irMf+Ous҄n0Nh 6VfUdމ;1mk~\:sϢ#͔2t%3Ttk_5.Ho7˞;WEINbFIHj23y'@3,Exgpݹ=tUAwג6dWYR2>m3ezȫzvyug ˓IWKq{O59Cq .^c®\#mc:XI:Y^K|LJJ+ڊ.Tςl ܧ)GnMl;h i*X{DNhEJ c87R?KĒ6[ ȔgG$+ DrB+zH6C' )B JeX7uEm[ifMo2 g] T]4逴 z3kzcm\"Q]J^#y{||q#O>L[:—eG<*KmS{p7}q.@zJ9«)+ˤb Y^LO <ʲ;]5+,<,mIg[{V:_e4+~ee|/}b8 DŽ~5޿>S`|~<3ΏgFqy^=O[vG>~:{WW|&14GՐyZ}Q =-8X8,,<ʮNƁ l?|YCs8]dry`jsEܐtC=o˗GbgN)tYӴܐ[e|HI7^O2OnoQ"?]v2Q~tpc~_woTߞᙝ59J~p˽S~<}}=&kD/.`~ &Vevn8uk-9, )3XQ@>Me VOuiGϴ3]A(Wvk<;F?j|9wXquS;(:pZ{'_6SfO1p`LiEؘn1)j㗘-;3,-x='(\i|nǓ%׊4S??pNQ oiP O7[Ŷo~Qlm;ۯ`Z$.v9`.HE_efߙ{Z^Mۻͯ_W}zKlxcҪ3?/#,]*ψ|߻ˌ~x|Tzhm_ӭ޹y?Þ 9oe#%Z^Z$"C̎C!߿ؔ~]X+7wO>\dȭ{ Z*uTT?9UsO @U3^;f$-Od\>RWf$ Y̊TUP"\BeF XY"1EQ +#KVJ"oxԁ}5Ct/[PK,ÉA\J)LޏG\˕=ތQݰQ/p .Eւ='x>2i*va>m 6[ ?>~?OU'|w=-s04?C]VN^6?]חY3**Xr3?KO[W6D^{;I癖?O\lwpqZgM0rmg_-Z/;LK7_*ϱmO_o *>;}γQ{xھ쪴~ ^ Y 5|ϳҴߗ&DIQ Ll a,]B3V(lܒsRX9%kӓ]v#uAI}ΒhiGGiMs-he?$5[qpcڵ^Dɻ4qGkP,2-'n0S*NÍlJM۩qcFBy8%.G{$daerJ"f'uL.e?=3j1?[8To4=oRÅqĄ@g,<ķO,)/Yԙz1Wܮt]A1+:KqcVzKҠ=:dxw~9˜1%z0fQSTEXOJrw1gοwn9!;o]s]/M /k]\.x(%Ve,V龔n? HX)aY_ZjbS7\p?pu8UI,Y=y(Vʽ~!p\&8Ps|'V"{rA_ǜrv\g7gAM, F^l73K7~t2zLߥ~/O<=|_9iˇEoR_֨TmƾhQ>ynVh+sT*b3>) ھRxO)Iv;'+bsu+Ժ+u:Uj&ss(\*EMȭYBhpq,3Bd%S fxoVnF4BmrI3B% kP/nEt*$XB T"")n.^CM~h);8eƦj2+>)?x2l~Tge݅]E}v?OiW}b,[+ynǺ !htT?5/AOi.ÕtbتeeD+04n#/cT=!f_`^(4&TxV=#5_{T*9,8Sy ʿivA a2]QX)9 z8C6D!.XXР&֐%UXrUTKllY`T+A9Aykdd4;rZG OpϿ'_q_|;r*9_a's 44(EH~5l XUN􀺲?u/E9/qKW77/ߵu{\+Y ^^?~Mm S%;Շ Koo~?:+~ 濘ՏbBJpV_<:av5Yj`xҞ+GRW )[#S 뺆ǚ.[R6dһ(=ҁu[+jhP ܀υ J OLŦGfU4X JXಂ2tZTG# %KRN\p uHt5IP*\#`U:S!LJ \A`5.sJD1fC+wf<~tr/ [G_ QO%ggŜUUiW|4[B̧kE>>-礷Ms}oWLߥo/s|;>YNRґ7ȶu'z)"h'ue7zzQ:%1k;"j,` !ܩ)Ki,jWkC6ݖ/QGfUnY,"oO+pLj<.6\d:o}ѰGa^7} =cI<`R7'[֌EW\V|WG;sv, ܇Qίo}o5BH17kY#}v3 $2ڀbԡBTŠ2jцĊ0TD`u|u6X:#W80m]n"3AU!hZ]\*ɑ)jdTmGe3%9W8 řuWZתR ۉHor~<~hajf1a;?{N+_fp`Xy(ƽo=/Z(u'r_ì} ϯǷ>K-`w.==hJ XgƪzK&3SRL:˯s&U6D}͜Aݷwd0^srb|MRg@dI4#C*j]o.qKZ2WxoI9y*9O*ס]j R:DYEi̪vRZ6@5J_" -(9Eg=[??/bBa^(+Oz6+SKsZ>wb;eNuU{u4:hk/ y"\j J֬FUzP'U @A^4ou}qkN U۔.}t? :y~L} U|@_gc"el=9`9~Z?(ZNfh}mR@fw%y|M?'[BIxׯhfb-KV 2{VW+NJa&a #D!ðjb̆EKLr)b]blDU"DxK3YhX]zƛJ`fEP9vЏLme͙48vPvc36PJJRj +TRMVLާ+oߛۖZNU>H޳%~3c" $*Cڃd^Ńyޓ$2M}x [{Q`X⍛hr?n9U@X?hzsz~^MGa5#/ǡ<lf(uf ,"+% t<ʠ=sC\!^߬]c/rtUy/gW M9`:PX0p": bui<+M V)GNkE^9 u8)ˆPuNC6V_gG5|+s9?7Jp+c݂Ȇa[El((T[(VUu4vt}1iCC$`0WA׆>(RZߏ?uHmo])2}Z[==& K}L;qcğm'E LJINR ]$?_G=D/-| ~飧 mdvn/G ;̫@IO5z8RqMkЁᡚKb=wN%/;ͯK='誹n J:=a ӮADV&m=r>} Jv<xv7~Zxސ![+je!@d$ oSŃki4ܩypCaХ5K&,6Y3f 4exJ#%xJgrx5U6['u~:vB "łXZTQ+Èʢ)&!Pa7͒ IUl9Noce~O^4PcLWG3>|fD^eKW?̠=x,˼L|*ajM h~0|wGJ.ek{iWF'Jo.Pi%^%KJuSFwݙp+![1hr =[#l3ė˕!+*13E.e`;F&TbGeH,'i@B%4l\+.@T-~^P=6n~h{@6J\ F9{bduLS^}1]K J'̷1_NW"l)EIL>s[]Zct櫴[ÔB ,YFSwql%;@ƶ1JĿ-d}wIV -yU7Xr&[1^<':}jAYdk-D,R[JL>ڱp5C!L@  k"dP^LJW۔ر5}.)]<>Y2m_z~WZk~t̕ .j RXcJ-o0nD8LYI35I |`ψ875&\D KaHfI /&X:)fX&lHl^;dO=jX3aPXǬu" :Uj~ǰWW SY ] LS'% =JjhԼHP.M%ۜP~J7k S+KQ!VڶS]j;eේ [Y/gKӛ>Ne%'lP f!񠐩3D;JB(E"I$7 ,Mi)4NquP-lD 0-SUcPxTp$sODz?l~<'3; 0,,VkEr Nf}%, uw I+7گХ[ f",XGX{$s+TZ1u.a T- V:e~3.uAڅu md ~e/ Q*v&*GA(UEGcˊTB43AScDY dtk&=^VH48"H+&uD{5=2QfS0 x[$AEV! qm)FIUpC%AHM%0^#'r`X (rD)܉o@Lh6Rz 1`(f["y.kj(/m(1"@>U9B`s*T"{DdvV6 ,RL4JayZ'BtH'IA7pj*4Qdžn 3GAhl%Z 1d;h }t7^"VA(orˠ23+^gbɭfJI/ujʞ@nP5J ,=%dI"XY&p0ݣ0L0mBnd&iqmN/xP.V:,$ƥ" \jlZq|Vd}R7YS #jK%>\ǻUB"V j?[=x8JC`/սyx >~ȣ(gPa/N})`9^K4|m Hd>tzxs 87Mˆj@p(!c$]Huem>_"Mxjjp$,7Mpgzy$ 2/')D,dixYvS*vń'3?(\&, ~5 M( 3+MS]35TxRi[6NMtb[NJ Zִ*Ά;o6YD+(OAsNr9/1T.X ԓFZRcbK)bs5oM-؀rizPff\g2QY@#<=m!{'v#A C9m@TPP:L Ehb5AlմY#29^T. #&4HlHs%cvjlRwmEIc ֔i"9NiMI WTO E(ڎu3]j&ғkV$>O^MfaT dB_jDv-Rz;d qYE;u J:H6UѰv5JL`M ڝ,a'Z`)Ort 4 6 LEc G?݅I[Y1-Y+"`4A\ExhR_2nTGR20rƣhJPwG~ \ٖQ W͔IF^-PoXjb%:9K i( zmc42{$y } 5 â`͊ VI IK[b ZTYP!,h8aSCa0cD=0z[0vF0T$X|B\{L71̚Db+t(d|UQ l2))E``i" r7|/)(~U{XϰZo<)D? x@-`[IRT;&W i6JbU!r糀t1_X˖g8.d>*m)E >*V?|q"%+>iX(HTScdhYd+ gS<,K>UMZAɠa"B\u:^r̃"πb`ʸ'xk%R޴:32A}\V1ądIN6~ay:heez95 ˖ʟo&U5as@Z :Y. M)m%yU@2J᷸10xz`?*.2,GyBk _b`'YP#{S!(Xv٩\VxGu4`K;P)pY-T؄{ɡgZrj+[eu-:G KÂdـU nݞbtnթ%uDKXZn]'; YJi-}5hL%@*F뢘LӌM%EFDxqqU:%.; 1: jZY }+f]w1s{θ[baoD l$a5C,)(5.!RrYKr]5Zqt sD"?vت1 EPM{MjDk?_QiuLVu)R:CAMkʠ5 t(Wc?f|ÊrÒb*qƢ+a/mL f!xgxb-{fy5aw9xs2G97Ż,c(4pz&Z  n\Lv(v/Fe I@h uA{ xljB[>',;u ;M) B!Yb ,jbJ<8 cf>7A jtPir 6Fp.LQE #dBqͩ+e W8M 6zj vqV_Nf['p%W/#xā&b4,_!OuH>TqWsxp \)O 0jG|ܮFLRTj%r5|sVPDęiT/> \8OO[1.V0!). c֛F3[OLer*A/6!$YVʇv>4 *;*hc*KLH;NB*gBouaD(!r摰6pF$·av0 ovzUC~,2a6o('H5@vL Z-l'tebK!8$$d„ՔFOl=+6:W ũ7#}DJy#)hXa)@)L8BC![2Q],B[1tRwKmjstf;0?nk X#OEE.Cٌ5fMؔ%,)l^! 17 A-FVl] 5-@qQIQ5'8S*!ţwkVJPl1܍2L 5tQԞ5c"P"vwJ H=FXiU5C[$b\Za7ā8p;| E΄G'N„ZcX1 tɋ:R7iCZVV+G̜{xcK,F6v#/F{lK $ Z*.)a:d03S<0H KեeVFc,oU..(U8R2XN4]5-|h8dBʮA/p)dB#)a>ZՋXXm΋8B_!XKF2ERNFdB+ɝ,Z#SƋĴǟ5vVF᪇φr쑖eэQ`9bq^򨌉V=R%/ݎ' n{8/"ٴjZlT FJtn3s!qu0 ?҅.FWPBlIZغeZE&Vc1g0H v,TłHc\-qtKTsAz oWr+yh[o1ZQTjzb 5 =6F4Rp;gTzٚnh ޖ|G7U1fZxXRJ0w&΍f dq>E:QYI a*!l'(;9%A5jLbgSzUmPT^|%J;\m&ӯǒY##/((壊jfG~0 '&yCZvqսEކ|?S.YJLp6@Õ ye&IxN5 dfU aZ#IS|Q~h~L2&>[1X(Β N2ǭ >*A̢"%dMDPp)DMj[ƯEK3e؜ɲ0v)W *O"*& }^[KN;9NYKVfj˼\ϦUǀgs!Խzcٛ;;DL185#c<ATkrf(SUux}rILJ)w׎EL,;:!ma-g[Y\`1}W0-֥O,Vp dS̾rS%`8\ X^tox#Qe%\b\L$5qy .k!S)E"Ȟz"})IrcLP.># MmqDxvp1dˋ ah3b Z$|uȇD6|$\X qD&gO-Ղ E^rԕes87s-*n3SUVT76x؎@z"{K00ehŤT'0TZĢ[y,&kL"qv"pSŦ. b\vLRɬxB>5۩ؠ*lw,״ I6:\@z9F:XQSś·Lin^!V`NubZQebV@ă܌& q\hEU-cgč!*Ϋ8 QPdafأfdH|~ ZVyhb*yFTᢸix:0O'`%{U羍~ +¥vdl`5d 8jmSz’n sIT uN)h&ڊ:Gu4{W^^яe0l *Άst^N.Qwɛ By1kC.Jͬ]\ ]-3]Pf:;(aq̃]&Q~jMPc:U(R_i{35H`JcO-3j⮉iBM NwVZέV] 9>桌Xl6}h:.vR%һnF7YŦ;f )Ŝ1I`@|'5ݳ{#6ݹ8*귦d6Ȩ cɰ*,r屹QnD)Y c|yITlz43TpMUkKRsZ16Wʕ oy) iy GwD֗M+ue+ zM zɚɉV20DLsp1cQBrXir6گ?#uTN`M`U3 ZlijW! ցS)Nk^ҕ +'z«-}1Cԅ75 gbT\U5e@wbnj5͍EDktZeîՎ  FP,߅CS1ͭ;)#Yl]MJoGhU1R=4T(1݆E_y{fԐ鲵\x9hY_J΍*]׋HRY9֕o9&Ü= 3SSz:- YqNA[Ҁ݆{eouaSW#A>-&7͌A]rU#G0xؑCnFVN5rE.QoqS')sO9NI(!73B1lDI+E"S4TMeiIiIt©e9)G#t\0ɫ0e ":cM,,iv,e#6 yjvWq(F ~G/4TLUku)黧pP%8eSir%gW`Ǜ o2^g0@\@U8Y97"ug,.hF>ٝz; \yZZx{NHO59H|wɯC1vIKS,sGyЫHus|m+B͂S> 4 384÷123(-J<;zhh^:n6z.MLtVeӐӛkT.}h;˃`ql>VʀY5Tr>.:/d6f|MvCܨX+o)[=kŃ3HQqiq10G7;|.f-5ǒϿz a(Rhn1o+{MdVoh!yD;ۗk"A{Q[e܂/Kc;'T.8w?{nz1z72t&>d'FJgeFp-"CW&]k&<{lԡ $V!^t sWBQk#x /Zr7j /wR0n*μ(L%pc&204Vd3+-2?ppնX%) vB wuߕ!*)3`frDO;z\.xSԀeP3Ec.P+N륆o_ZYMЙ=*>5XnZbj0wܼE\Nga~{Ly1aW>lZjh$B-aT^ nxHJ8̙lE½eN9Ukؑieu7+mc'xT{6 1 rҪ;nJ(tb/!ʳVLc)j ָgCtܨS{ ]mUd'V"J^r7>_݋7j#޸}3`x&-ǯF?M- 4&ΕM`ȋװݸ2cp4\Е =Fkqcb vE[Lདྷg 3{ŒwG=;~Nuŕ;M8zwY葭e>/06̙6ә@{zGi8cPq")}, LVSQJz(,[K;)+:1P^賮аƂQ# \UYZ}GqՁ-e=_Wu[kyK<2گc&%KK~`#LdƔ֙@xm7[mc4K2ԾY^3}*8Pub d`!ZD;SLCg nbip|^?xyqiM ]Fo2o?ŒShu4hi z+1LMu_Pȼa,w\lRuH@Ig}BY!O\7+;b׈D=W?| ϥu} \3v%fS9mvP,\lŧssBbwùvkY̮''I:0m7Y.SS6TvFC2[/kc6E꛻C4*-& l` E2( Y5/vp_4DRA?ndԮs/FvOihӀ eXq?!lm#DX29Q)/+5 Cf#6A2`@ $ t.~cYcBK<$ CkL}$;Z A㝫\m8⋉eWj}]EI 2ƂyMy)M3SH%_N*lV 59ޙ<7%Ah3 <$w`kP1v:-v~#b TR*6}f}>2q R09ƽlKK@vD7eF`cr*sd=9L 麀 !}K=7`i_ޣ+r+m/,GӉۑÖsIxБ˪J||0+-}[6vw*{ieFl-fwƺ\\Ġx \?'D;tTgqaKf:S`v pu_yТlWs鷑54W1rƦȤV$ALZ %Ur8~{hh/p>Kn[" aGKYeթoԖ ,-d}?wc Y/XgIrndi#(y7/2!6,ے$%q)4LKS7'F*f%84*m 3#1V0&^͐H%Eu^@_?:gd^u&Ͷ/h3z,/EJDǹ x-ݚJѬǀսqW}t]%O®A ϶!4x4vPx y8mk*ݥ_1yכnq2epDOQf4 I?+_V)4^U2)yx}ruyrC<>"}6dG`M }Pnpfv]^?C &;t7G3_Ng=ߝvb'~cdsLەʳo>\SYK;ԑ8O#_J.7/hÝh:%3ZU.݁)G\q=ӖUcrl2y8TFyZcbXF쳢'?'YNaR%fx꠼7cQ%[׺Oy/g%Ѡ쵶0ݿC-xݯ[^?@"4Tg wt3ɏlu K18@b&6Qk6~ 73@I! OA7D x~gKv%u$ &vsmygmh=MPB"\`)Jx瞄$q9_)G+J0i <=lzlhM"b{\m]]nw/"Ce%aSPsDY#4׶5tt, 3 yot o){^NܷX[7_성H:vx`3~-]n2CZvX5w/1Zs6 &fʝF\fvnw 4cV. m{`xgVauy9 &{3e[p=[5W)p]Ip.N"chѸǩ2[V\wl 6kϜ(\\>&- uSv׊4iek0Dd%0')}f[ke|B"8fJpm4mV+t77\4/+g>GbҘN!U+ɇF-:Q35ٍ8Jg1*2mt8cT %R 4rdqMQWGq>|\}:N;] `4g w|{s"W3RʼiY8U6W=o3kfܵZދ΂H*7F,8;^vRm΃&VVO_h}ٗlE9p$G?b~`+3Yn'yKetQ*+t/N7Gvڬ<[Go'뎍& {/}og(jĝX#/.FDx|Nã,i)1FKǺe7QLb؊*K?MNǕ7 &NǩwZ~؁|^\Կ&W*x{^>mMhWWi.C~Ʈℑ?MF"(x0*?^lh~2}NG};H*Ҋ3H*ߜ#}& oalWhN2abezma :'.G/O$~gwr*'ʖkKNoպξ d_`{;$I p\'G/7J>t,>G<7ܰNSx91p.U/wWs;<ۉI]$.qJy:{ow:w!G]/J|5X]'ݑ_zE7uNwؠܭt|-Ϭ3z;+{E0iz%Or;O'ȅ~NDѶ-}.c)d|Nq3K?#sP0grrXZb !j=ikKiv +W,G~Ja?O.BjL5ҟz_Ow_^[Wyq}kuϸ7۝ϭN{/m3-GeYl ~cېM6=A ?ճW3}Q}^p)w'TWǮ' a:>r<Ũ֍}niF@!?#o K_yi>fOU;4-xU8`JPCCaR_K'/J{}, X S9nӒo:i`;+cyyoby 6:zA%I8i|7dzx.;N;d70<k;޷v:MVwŷ8ݓչ(jow ڎPҹ6q=~g&L.7GFGL ._ccgi%g/-r( !ߛ+Uq=/~la<ㇳƿLAl6a 4oMҾ/ R*p+Ke( 7zYeL!I=\1\ʝeg K֡,p?Ca_PHv=}ú<Oܑn¿P 2|ަ}o{?r+>G'&oR{63oAfOeCp ݣwB.X ޔWzH{?gO9, U]jw`PlW{W#Ҽ>T\|?pٝo/q*N]By߇h(>$3z* җlWe|v.?DgZw <{r~B+v4O2^z^^kgW CAuFRnz?svɗ77K}گN76'7{_a{?}n)3-doQ=_͏VT;C$.8vfv\.[:'x6߬/ ?It8)~gV낗]{<.5=ҧS֩6}g-7Yݧ7>=??kv]Y{P~}?+u&d֟5̭o>f߯brw9[韁x5?@$$"I?Џ ~ǵ >WCqiH֢ \'ީ>kUA~L{o|n/aϵ??#(Oi\oJ,ĘKaAh"m ,ޏ_o?1ysKqS;w7s&o8{_^wGuW#^pz&G~]ooGT/uxz;7? C/\=?|wS]럖\n^3|_(.iR/͊TΠ̪χyND/}?dV3=uanX._F??>;_ ws^oWoO#v=rut>WbJ?ZWz.<)Yُ7O~kZoޏ;=ng.[Gct?a~/|e˝jk~/j~WyA>@:$U3 ŗ>W>g68 }f]YݧxN7I[N}~Oȶ?{v1~{r{?TE@/Cus^e>|>th܇0~W(:>g|=I"̟9*}@G~:?rP_+H闁)XX|}rEY @(P??/ z~^s|_.~ov7$y8{8}O?7;ߕ|No?,3lz }%O}|?,-N77SUw+e~a}пv "eViMn[o1/t2?|Q_#l<c??{Կwyc~ozߋu/Z~__zf]fvʶR?_&mnv4LW ;~>z|~O~7g‚ lP y#_ ._ OKXன|3;H$ 0Wx`۔Xo(t_?/owUcWp?aΌu~*.{_]_:<-߻c?>'eJ| ?x ~@F~ߟ}ǂAc#rUݝ>>P{/w[YϞ//{vHi|7/>B>F1f]{#LM('dO|zǥ7p|^/y?a*!?j0~)}v^ޕ}nJ7 wŒPWZwSok/x|*o2iotꯇ(S_^n}[>N/4 ƭ?Ӹ|og~?FW-Z`M>wD WHB:V[kyvo;<<|o{R|~gj>8 'x!h8]% 3PzHYQc fKMo3&p}C9+@_uτC;^Uo_^όҫ=!T*ސ=m'² "@Iꄓ'lp2gk'Qk[ + 6G_qG/Q ߍG]OKCwoŁM?ڢ}?/vm_TAH;S@$$}^2ɿeVTc/ℱc}9gԀ\EEx>giOMJtr?E0@1.,Opb,6|6yڟ_ag $H H8XOLϣ/w;Pu7?8$"D!GK󿂟/?B}G~_y^{OoПv"@"@ޙ?!u<8?0m醫^H@H@=9OםK_|?(œ֠~GĢ*/'jv~?WTT?b/3A?}}_ƻ;KoA^__a5?Wb k:AD>?p|ϫ1E<~)XS_;Oz'?+)Ν'|/00@.QPRTP D'?Cr(b/"EUE~~}_W(#E~h"( B("}**H $ ~q;U??+ԯm .6IDTa8p$Ol 9'ޔ}&V2)Ikq)2L!Ue(FXUu4JHAD]d#' GVQ&%!D +A$RvQbq's D BC !Qi'pxJR ihqwv941cr--0I|pƑ$`"D'fh`h`I܉J\aPVC)!C! @$h,҉ RJ"#yW&rVI)JJI$9#نg}_}g~XTJ1(+[& m2$]kRa ,9_\}_k mt 8qkU@"""$P@ TEHETYBFADaDPdEQG:9""" idE @tX"-"," "B22"" "+""$ jW.1DTs e!$1H) R!IHRRD$E$Dd$IId$@AIBE B@.T*P*:*@I` " #HH""*  ȊHH ! $ "T ⨂ES "ȊH_D!D$)i  D$RA$R@PP@ AC䬆7MN 2 (Ȭ) )"H2$HȨ(,!$HI"+"H2(H- #HH(``>-dPB@TdBEDQAPIYDT,H(X Xn/g?7F@$P$D$UD$T$RD$R@7ŭg߽DH+ H*$!" ""(H*Ȣ# ((bL^wgN * " ""$)  B@U]mPDoH! +"R"H,qKiǒ DA$EYDP " Y$$ O՗|~#+(h*9*_$ADD5(k5Tu+5~o~hjb( 6 P+|/W7?[?';csD@d9R!}lZ 9_@ +) <}2Q + #+ uؠp"AmP33R8tq 9-L-#%Q '-?elEBȅx1}>6‚P M n &dˇb &h2f3ٙ_@C0C033{3<~*(ffcED00\0MڵsPLEE :@H8I{ճ&/ׯ^zׯ^t?|'|gUT@(;^}[=^J|S׿$P b;]׍,q@EmiH&NNG (".*"bad b . +yUW4-iAD~gb"+жDooɾs:~莎49Y HX-ceEQ ,Y: B.Ck@o/^<3);|뼧gDUP oۯ_}or"ȼ'3Wkσ)d>n)JЭ/)+}.ҲVZ]JRՕA0"`IHHFDd$R@IHĐBF I$D{2ȡ"ZV!! BB2 Hi$$Ҳc dFBAd@ eg))JQPPHMMQP@@ЄHEH̀@̌DPo_J]9o:G8u]6RJR$ Bh( *j"" BDfM đ FF E$dII BHb@HFI$dXHI $Y "0 FI$ BB@d$Y $IAI $H!BD$RAY$I+&ԅdj`Dh `"*HH`b (DDRRMہSr=7۲)i)"*&jDJf*I}ֵRV]]ě᱒$I$ " $` RIIA$FH I#BBF" )Ȉ&$E3BA1S#%CD$30"""$"2$"3= ̌HQ PHHt$ 12"! ""P3D3"ЈPLLPA ADĄ PUDTȌ  }o37 ooqt^%u5 22$1#4R4K}u/.e_|e L.@@C3BdA@@ IH@@Dt2%"B"43Cֵ]$](]IukZRo"&w u)KuiKMHIUДĐHmM4MDD PHLt22B%##DĤ[*HHHK$kJHJ%&o5e5lJfJhDjfT/XV})u`a.KIph`*j*&k! ѐR+ukK+t!JֳfP/ip i{$BFF*bF"B*$hD`f0+KK_}j (]@ҍkF_u )HnVmڊ(av!zSMZoKkZ!)*uԥBJRdFBVBVV!+)+JH_ZԒ] RRTլ!}i*PխHDun[" (sNoC88^)uԺkJx"  *)hCRR7)7_Iu닮JPV2\JJun.(Beh]R mڂ*@iX1 Dr##,:"!)h&jF`B""D&@@hFffF @JHd": )"* &DF@ @$"DB2/YZPIJR_)I$iZRRnނ ;?NM6w)wUU'cU"#7`3sPR );D:#@+ ""2#)D4JFGAGW`b4pWuWsC *2JRf9 8 2 #3 "FrXHg Ru$%6!RpgTК@ʲa#V)"M$b"$M "EF ) !dĂ(Ä.!Sarhdd8D! ܌ T$*#G-!TVI,RFP0#g;0)j(ICPKFQHXآ@&T9(1IH  d%#P),F$ i&C D]̎ XF` "QR!)9#(#C+"CC Ie#Ò´J" Ė $b b ΢l X٢LRHB#8cURb0 $B"DnfD"Es B; J@ %"%%DT`8]XAZ:+ɢF"L!&yU$" iHMYJD(lHC93D K!RR2HHIdruxFF(DL&l*fpP"N2F DQUL*0''`Lji(FNn)C$1YA9403I\e%#a* )w[4tMaqA4 P bkN} Y, =~ ^o߀Hmmlm mmmc$ 0u$HZl@"@H %s~ n ")kDAI$XH$I$I$IbeAQ+DdYA"IEdYEd@肤$YEdVEdVEdVEdVEdVEdVEgQZdVAvEYdUVEYdVREI+͈%@ ƻ5O@[}F'>}q; n. ddd+z5LQA J56߯5ߎelfhhL>WzxI7.}}[N#8}y*cŝP%t'KAbZ3xxP^28 C߅;xnEܫ w\b^tn@n yLkؤhp$$ $d0` 0` sx!'vx-kh#iˏ*(XFf3<_frɾ &:D7]\&{[OG+_M{wDL3GxTCyGxt7x|~#teClm:G>>*sv||nK8uMIӣEKe[%mv] / Grnv<Q1Z_{#u-1P-lCQqp0S 012A #vd Hj3)ߝ^c4hѢDHN;'d5 2^ev9}zwu{=VUC8NoO^Vh.]/G!DQ5@:izn#w|j*p.w'w2Tw^`NoMTN^8:8=;\)kg3?mJv]kZp /I 4GM k=^kb(vfedJ"d#dY.gD4T pd3iU%!.>.#DQ p y^K}wB+8@@4:- c+!4ٳf͛4hѣF[-:Px؏r X1:ӳf*h#.Ynw<ۄϫ=fHtCR*g"@D5AXvgNjHպނ`* `8a:NSj$v^}%U##.3E3\<e-2@rW!Lqs3 8L=y*੃!@1\EH:A,t M*ah%[::ڙ35@HD$u7:/hH $5ZZXZZZZZZZZZYʾ bO=?}a J.(8y}>@B#!mdz|/jEJpppײn2r1z=GR7~%l7sq\@11cGA 05삖X8۔K69 DoV _Ǐ 3ļ y^ $r  7vN-%\]eڲdq/[0 2LLr?$.6UQ66D|"s@sS2yʥśs98]3qKk7~`bkuiay~^* ㉉_`Fbjۉ~:*E."g瀣^cxxS/)S0/4:| *[}-^xE4R|\\L6uP0\*y""`8ʘ")Sֲal-!c`vfu2" ?1AY&SY8.F^H@z͡hg˼cj^vl,vCE(UN)t4%V`RډJZ4J ]uїG'U5HM@UjJ%K56BH$uB$.ܩh44U"Sـ飪!lh]DBCAf*AI A@(4E-5"@@@ KJ4 2+JRu)Z 04nU86ƁG@5lha4@ RkdRJ kEJA5M H@mCEPHEJ.vmhPR65@ġFf2hkK`b5=ݙ C%4ց@+#M$mXMh 02цV*()6f kA-5k)ŭiB hZJj̓hZX@:u*WbT-VIR4EP*[jfE!j'HWMfjPQPM)TRT5U(I44)H62 Mu”D*t0Ts"D@%J*j)J֐B+Y Е4@PL)((R$ ];@ʀht4鮔( r4hFA N@   R@P @I  T!D*T$I"DU **E ( @ *~0M0C@h  hd4&4iM04)``*U?24@M4& A24d4@&E6t".rjSe 9v)GN `Qe@Ȝ-M]ӿV$Ȯ,bQ5צS%h銐AU@O:[7}3֣̬o0 ((eRchܪضea_;,ۇ@.0Z6ċH*$oc%h̔2hISNR-* .bj\F ]f.k$k3dTMi<ZFqtZ r{<- #3DT $Ƹ 5Ʋ|(+';Ո [-/ :SXpR(O1kOK&鑸Z$@0QA=58T@{OKD/POD|@SrW29T7%]Q0!VF BU6IX,2 Sv |RH=$O7 s'! yX*G8(cly΅Sk[sM77稓hh\&1yZ ssOD qCk:I]gc΃ IqLߙEYk҃Մ-d}0mdCQ0Vu󛰇[!H$6Ubwo0D%IrNڦWyܢXP JS5nUi(SJL%rZ@C3Y`<(aKԛ+$v\Ԑ2Uy= c1֞mS薚ԙa}+ > /z Xƌ QH[  - 6*R蓄PBUB -͂ ,H; I+U0$`{9%ZCqݶbڌ(XcaX9]qS=Jȁ60GQ옅ηZMVTV@!ͽ}vO뭁/ +as}Kyy;s#ç] IeԬml3ǩ''6rt@q ܣxHLdKJ@vGBT PXVZo0G˯C|m%$3iS ]xbDLR"Gq`1#GTFcM2c м}f}gV2,u0f?fMX: ݩMZmgpD-F|M@0E>-m9d a(03(Sv) Va8G[^0$B:nwP5!=ٳ-D6F&;Џyk!ٞ _,b`԰G:,IlhPoF0 lk"r$"v) ""݈NMM /`Ӟmm()d +<} ^fJG݉oTvaGb62w乽Myۂ|>qJ~G.)9|v}RiPL)fv{+@5uQ] L`6l-pު[(b%{8dnD2Zm]y{K@GW:ͼ|w=2t܆Y=¾of4p•i KӒ 껠,1^6,aSb}bBR9Yv>c u*.w/mD'jlj8,A3 Xip^}J9ݯ][<-ǎռr\^I'8]i;;K6e0q+;#sӿg{=p\(9c4ۍeq^rD`GÙƂܽx@Xz;?`>LF=3w!GYgNV!(& wb0<--Fv'm*2 fDz2Xu%<͇Ӳ-GDuWtn#:8ʾ/<%m2VI c3%jAUc * *kr 04.X{\?cWb"<^.k!r\z$ro*dѫbvbW8b 'C'mvѧNS^]${lQz~M:9bظ[˾G>ڷ[gl_)1W=kA<_ EC8+ŏ|[oSp m>nu:qۮ3ĢgNb ԉ]GH.%X}95 !@qa ߢJ%h u&iD> й-%$3S1"б}:fe;=5ah#TFe Bޅ-MDn('(D>M>0SA1&k),J> m|gIΔ42Z;r!r|0u׷nչ'76[2v~U h[,Ѭh1= l,fegCfF/YW~G+-Z`[WTV&H/tO[~',rGnޣ!+ࣲ:|ҪMWFl^sTt| P0}h nWCKdF&db(o5~{spϝツ;>ADva vC;LZa$%"P̻-rX2Vf7wcWg~{:ٞh0#aѨy3~qO~<}t=`l2u-j:ۺq``ËaKo7͛$fH?1Azj( bLu>kBMAtGSgA ڶTF=SnBCN7|YjL}%z0lod}qWGJsfL@:]s.MXz([7[)nֻeƺjU.W"ap_5)&={cŸ|x/Y,s (ϱ,3N0Ŭ7[9թA[8BkIaFԒA%H ҪVFekϝԠ X!fM^>5s?n$.n5v ^ WN="yΕ8bUHB-Rzgmvָܝ|CM-6Tk_ܠ 0s. xR3i˯$nɝӜok֯ Mծ:]sGDxۆkmd~e"ǣǃYk>uj\r%,;R me~ uACߞY[xe vo7"ȆclO.9윉>9^z_o>^FsY'A )("JzGJ%YgW+P;#gQA>N=rͳ^tEK0.1 X~*[9ű-s+:ckl]BFpў%l/*uS+}'o*lTÊ #9>>c'Ѧ{_Msؼ8_CA󭈎Q0"&DhyFs [9:{8>2m ,\N)ԍޯd*U_#T o+=uZPj#WlT%aDLAR@u;4U$/lVf/5$x@a27H~擨;[ʨ0}sJTJ M{3+9^x\Wj3/0#sk̀V꘎GYW"^j #kutVਡkX|ZMtW폿GvNKgfgy73u[\&MRxo=XmJgJCv AȢ71 dYR϶CeA}(E veRGٙ1!jtܕ.V\#`s_Z3\-}~7wOa&MJᄥ'GC7ڡɈ~e04s`RñpoDD$^9Y\~\,: 2XԬ j9K qG8~L,4{yA՗`ː^;8m:ص"Ex2s&׍BX:I(I 3w&֪9F2A#P{or[؉L ~#}WGZbݐn!LfS*1`$u/rJ_>q=/7zͷ%m_ u˜|m.dn'\ϝۂ~l\G-)],ZJBYpv?YK?Sɯ1Ǧy>2&jR;NMu>{wߍnwƶMc)T~>hCVI|3<)q>|T/ʍ)u c?C$&ӴE8&UÜ|RűqOX0r@\T!PoO7qwbI2B2:F`[Uwc A`"G)2 6TvB=|`y7eJ8`V0>f>e9F"5m}o_D$|,M=|{f rBu99>AlXk?ɈqT|$ki eQܹCgf4\P=B2y\o1U;\kB1rN 0d^Si{M묚cpHGVb#Ed Eq޴ۑq"[BU-HKN삞3swhㄦ~J}Z1W+ %{UqˈGGyYo->d}+LS}}H;}tݱBuqv:_ȈLF!גarthbe6Rdߜي֐ LL4t'%^jTe)BRя081`G9N6%rH r7>V8 Ik S۹@+t,RLlYryR;:(4QTPLZ cRZ\D ̀BY`A4z]v R0΂A4̣l&kPy!{lNTv^ߗZuK|g7FA-C \' Q>b}",X#kgF/iڴwap8 -q6C?i,'9C}GsKb{C8h+O3|b rݵٴ%@)E=))8wu->ɇ;"5/r}>!uK_d>b_ͥ_ vv&u՝Tێ=$$N}Sگ mlB#@. Y2Bc[4"D}Zp-Ƣԗ]m`1i<@5CVB;Ry~޸(еpc\xic-KzuehLg5*9LKsT Ӛf{B\+M$@g/0j j7ރ2`dX_Y׍A yI( EYYh"g4K\DJaH &bJ hC.0 aAGqFAseuȑGbE῝7vBAm?4%=L\K11#ts7paʣjj;qlݹihRGx:G6ع{g8]m{+}sM2zqU[]_',X5ޮ+sN|??/Glz oh[9 "re{bw\+/&OAV܎'ǩT?3ADn*]+"ە(i]\ץşaH)EQ~X1JS,љ乶]Z"wab^h:utR^+Fj0!%5m״{cxDrx8AHFl@2Cۊ TV[X>P0fA0 '8=Rq;TTRzθE:!G-tu;_?S~d*ӿCEVKS[³s 4*ڵw+Ez:^rb_5]c<1zxÈƜևӏx kq,229(;b+jr#v3(WЎ*uyaA)e5 2Tp 7CtlaLh"wjuтsXbA'ل.kO'yUR0` ` ɒ-ՏNI{)g5A&"f?6J0֑ @mg] Kcd~\),AT˫/]@ hg><џqAq` 6^¢GOx$ 4X7 q Ir6#ANb-%Zjh3I|r ~#; W}xT#Eu˕Β0g1cf`! عmjӡ}"4DAW],% ;F%az'g[%^FmpE疷T2yN˾@v@‡qln'y4h1ѻ'؂Ҧ !eNto9 O<2ѹ>ӺpS`Þ$ r-CL9h%: pű+/ArulڝraJ8c ɖ4ԟ.Wik{Gq~Jq9ɪy𖤁H!Y׎% 6T"'A'a\9NV fͷn-y̔#H`r$?MIEćU?m9ٻ#YP|7\xjx`r;wf瑪wR<[O6̆s~K$ dJr@ V1Ns=nd}aRe>[9?GEP_ZĒ 4j(at 8\ĘE]*g%RiY|^`vjv `heQ͋ 2#a/v<-b$CM\(LF!˂ P1 mO.\ QϢP0<|>+t+H 0hVJY:@7*P2ۮ9֯;_7*Y%hq9&1v}D b3٫lJ>.fY&6^ӣZlW tlKP=-+bG}<~|b .QkƗjJ,ÞP6J@n?K;s7^⨝N-4rCPS]ZN-9S\]ڛ"yia mAc>¨>xa QM2dxoeܺB7T̟u]B,G[or:rg"`C\7RBz{t$tX=`@QRRD3b8_GY(bZrGp3$ȬdbqQ%'.]!ML J,@0]Mrsx{.1Fh#,e 1H̙ۅ)G@B27Xx"pf$&g< Yt\\oBÃZ(>ibl;Xўd4'2Hj, S7?zh\7;юJVV$I[+;ѿRPg!3 hɄnHrgjϊ2LW=vJiJςI;bt^lqQ1nxڢp*, >Ap@j(s;"iX]e0v4G:aX֬\F*m3nR֢r{HY)r!*[/J*U6vۢ-(%PVحqPDbyT؂C\UoQiğ,ZGA"nV uHVH% `e̙:#BfBH7Vgf鍘\E\P.Bf *m4;LU O^'De7H-%lNd* @Q\ NHUh+92L 𩽖y_Bs,݃8@AH>vP)cFc(^rh6ԜoCE׸xwazvݼ\(nJ ؐoV?d~ncM=tjp+ W0wxXX&DA3CUYn$Dx2hGfF, aa,zԞ6A:=0KlPə)l,UM&b&yOuZ {MZeGKF|=L_bZ,u_@R?,:uK6§x"+k!j9 s- k2 N:b\/u+;qopgD*z-×yQw6k^VLSR(H|Ρr'4K6-[I"I賱"3'=( m6XYLt"ITJV&mXhNk|N^3!Q,/(ǩT1 `?.Vc<ܻ rxz8wZ@JǚFR`d ȴ" 2 `'ԙN]6U5F`(SR*hTٰIвQK 4]cݟ>ܿw5(d8P#0Ht$טϊ4N6e}cyS 4.np,&Lՙ Xo4sFu'r$5oW;)$^w4I-`f"",Ȃ(y8U.c4nrqM^QCUlqDXAځ|,S[MLQ&QKO aƜ[ɋ{̄ro0MrR>h>zf* sX0wfYHPMTPP$֍6{Qpc;` `-9*oRxq 2S0= UTf̈́sQ9!r|A̦Lq)#lcOGA^VM/dzHbĊQH7M<<ؕi(elLMqee ,$cPt`Ǐ@2ʬ Iknj @ wEZ- |b('V>Bgv..%=Kf2 &'\bYr-pg?qϯ<\Ja~tbʋ+0ӏI1wcQ}2pxCV,&BcFl#g^[:/^^']T;2Ep.<✰d5d.\[%)㫋&Oar0 /?ļ:5n(^7M⭈d҇݉DHN;L@xlDzUm "Yq5z%ECҎ3 9h)4T ga*6&BsCyвc*p)ᒬl}/p` LzGbOAdh86Vu,oNvp9\v8` AAc\oKV5/33Da/a,Et!_)ehRq|AAu3ϩ!"7 'Oay.+26)c \@J'qC12~OFFW*iaj a|W(!;-[K tW|& BYHRM8&([ъ],xmBs4>4xbʅ ̳5 lumMAUNBzPd<"y.ScwFϝꈰN[`^lU9r$>k5U$-P&&'8yZBMG^ #؋hW.k& > ZDrbݺ dm {maUbƃ KW w=ud?>ӓUhf<%`_b,Ye^PXAyNF*Blf 2p"]"%.{!oŗH1 @ŃO#o1s>8%{.2U,њ@ XvbN[,Ћ:B$G;_tN N.Y/Y_]N_iM4мħ).{CK`E} 4"*|R.FlNzM骗,w>a4HY?[Z N@;qqW6#q@rI!W:mxHn hIt YѴUՊwb.$&ɏudWt 5_SO(BZ10M w6Q$hbA7?ިڭ-_ O.1JShb0\9+s _ӊnkAHJS0NXN4@:w@XYA8| ˟mtirm}Vy%%mi=v%6k^]$ñ$6*c+5srDVʭX:LQe;dstOA/ 4*@9dYȒ&!X/TH%EjՉ[&/(*1$"}XL0[`8}.g:-v@c%+pص9hM*E(,NmǭiX(YnO;7=Ĩݍrq70lA T_xPE)BԵ .30,Y \o\{-V!!Ö;眆5`$CxfIgP< Ėp;%wqd{5k*Lh)١1g\I6AcQ Weg!5lg[s1!T]8hv]P$8Ь؛9egSSkbf̦]\h׾ K:wEu]8.$4[].4(WDE;mA`A Pӿ}|b{[6 h Hd@j/Ӕtt݅3)LCgp&lp1ՇwdN6džpW7w#NEOU(p^i>b/uz,%exSp?5 |Ź` a7iT%I5H,0Z#  /Xk٢#GG{ģg+PӭzQVx@P/#VҹautZt fD5HG8g?n>ࡇDS<@qQfxk$2@n¤}$1evżb%L^X&>qO5X<[v-"zZT7yv`j-׋n2LRnci9t HKL@z䎅ƓE+*f (üal[wʦe*ez).S'yZh@]SI=q4эd` 7`!(_!s)`|sn}p:&)T\"rK޵P6]64JՉ F4ɷS׼y#`.O@ca;tHl蠕GuNhMxu,6_TMRċ\>82HaVfN JI  7NW2^_wڐ>\p v#sǑuetme%q Z-͆V1_a mZ+b 1^W *ex;WK'jX[Y˵~[ƻ|ꆻ{ndzAiIn=w'r:4'G-?DTp^Mܞwy'49]_fږyM؅sF'KיGN^_:a"쁈(izeAWfըndyv'ZTc ݼ[q,/v]b7h80}ohH#;P8 Ƙ_[}>O~"lj޲RjIwJ'][¾U _ ̲nu2ku ` q<5 %ϱ%(#iaRoʍʹ$.iLxdB53Lk 6cI < kװD}6|"E-`=)ۿzfAsBl: !qk3. kA!&"g2 q B^b6<(#lJ/j;KoI6-fN.qXkҺ m  F<;hpIw0̑Uvv0xa{ BGd|آRѬ!4*$ZRQ@+ܚ,#]]^Y!f0EW:(m" &ewbQ=V{O`cziq99rCDהJJѱ *EL9tR5s9l, j旁) EHړ=o#y̠υ"C,ccgUi]Dv :YZL0 BE5[TT^%dRN!.|hlqrT01g\WDJc#[vITл 0 {ZyEcCXEq=(0s"2cv 2(Icԋ95'[Ë/~N`Ċ=Bou!IXl$þ(+l\h[Y`=WF?Dyp5L 1hZ FJDbBhp`f E K/85.@|Ew-@^>FZabLsuՇN5%QY< !rd,.;??:8OMc$+#Sڡlǹ*SJ;{߷,*>fbQXcR asl|lH"kD%|#@>pW,dk=®,8Q^y^hL2,a EB<_T圆&,(g1Ӯ ,/4\P xs8ˆ& -ORj< ʏ_ PBϳZ(VsfȊI{ʂz@s ҉= ~fNc煭un/0d䷚YP *_C8־W}Ig/ Q/YYKEY<=>W GlԔs/1M5f9%^x#1 T#_S+EL[ai48X!BI?F"f34#׈t hPX0{AۛvRNf .Mpub{q/FF(!7 }W;Dmz"H[%kP昖P1 ޒ@(_Ψd/j%sY*龼ԡD2埉)hW5Dg'%ÿjYCgNNȯ5~nۜ1*A{P5M=cH[] 6AJ-~H@F6x6K [܄>l\=uaa|P@NtW dhJyp!do}I, Z7'LOʯ3DY@pؒ`fBr{(frlpM0'|zc=^$G fN`HudMפ`ȧ6 rۦcZ1n2 SBpfS \a%P Qcg cvpt[)OO˻m ]Ъ3F ?dXHݝm%BBdw~R3蔥1Z GPU]ַ7جs&P0Ҕ`#۞읯Jčq ^lO܆UwM{8rs(_lc ESe${L FT-ǂf 8=BL:^8S$+'dQ\h&v1!Kˋ3E&t,8gN+\nWZE^J#Ƶ$ޛ-Xςa$$՘Pqc@漻۴*ٽAv.Y"Y8mo]7[p\-ʝ9E@*` ! FlBqxhW:JYGQFqXOE{r|7I'%ʞ.W -*}OyMTWAX#bYD;*JW@ u.(b4+05i#Yc}FX*p# OdQ6EPyym6]L`}ǻG+ARD}D|̷p`ً:g<6ս*!h1]D#$1 .K y~n1AX9c*a,\epF` (x6ᤩ]oLeHdj\3Q͠FBiV 'tЀ@ۥ"P+W:nfQ)7otBҲ5m۬OMrSȁ,py&pT]9VyQZ\ssI1a M0Sg9-,tրq-blV- 9{u\(E^c&/Tek:sj_|^:ÙyN|P,`ÌIs/.D 혤~ɑ$%D#jft&(]m NZ}oaL2M%n, 6 YjHb)~x!n4PI7\3 A?8t`. gMv)jA!`,#pZ6p2VpxI#-լ>Uc53D{@~ĩ.:LΚp`ه6Gcc=ř,N5IN%,].v!• 90`sl?9A+nHE&FQ tKk!~JLYd۟yYJ'=Y2.I48e4rܴCI}H$\Gp>W6f0pd52WYtf9kmBڕ6!D B!9Rx=pȄT\ +>h7wMN:[H:+J9i#/*3YԉCJLYiuV}mAdUYK5ipxYsFH?Ǘ3!zM$'؅5&գL'> d5V@ rF*h2T ##).&="Y61lEfR2hpG痏\ӌafP}ªL2:PZE7)!854rC3&VO'+7v'*!ҫ:91^dL@@RQ2ϥ%|hSȩ؂-eZn*1x/)w-GxC+%B`P]6& *ځ t( my6}m:% :R!\bȄ_:כ'֏8h3H%iM>`s~O)9?F;%,Z28Ef %|A< z砚Xw^̙-1c}5Q>#I$h,R=KX#(~[bD D1IDd΃ejɤL &ԝ!F/~`VN,0B'AؓhC &Q +C=-?f[e:C+uKA"@juwDFo:Pe&w&g&x7Vh "{D=dbMjGA%2([!iwW$ 3r0D0镇~VϏŴ8 &  pNq݀S(&Nӳ1S[05L|E5q<[ݰ:O5L!@)H8\;=^?YUwL65~  fjQS+5GCb>!ZkU-N7%!`2&Z t8d>5\Q( ņ@A. k$ģd@H$neV) >(U=ީa:PX;-yϋ026qab7hVAZEOFCdӇ@51!7OvL֙ƃԂL ILrK8D9vfh%bj!ş& b_muÿvu5h.^c%*\6ZWNsLBٔ}(aFw5}F{9 W{cNfIS:g4mUi1dj͊c''Rq O=&'ʸOl8N 2yeUZpG5KOqkT/QeZԴk`GGM) QYP#Hߓ>|,"ԎePA7Zm`:w[_69n^'l`5D}Dmh] _,`wIpF@54z(N[ ^b2߭#+~L0"IDNy cb|;mV32^Ϲ""5W+Ĥ7)cuxs*&RCQ#zľ B^{+U )m1h'ڐM=C1OKBP#x3 抙JMKPHi#ӺjTjC!YyK] 3=sl`sgP·{`ʐ\.Z1%Ő wgK.I(k.I{f4}ݡAvnSŖ=-]l)JnbM`elRߚWu ݯY0pgLIC+e[};ɻ@m@l35|дc e:msVE,|g,"\ډ3]f8Yb.|ф )Z`ͻACRXaM\@3ξFu[뎍f96#og }m5@9kH%U p#]M91mp"'l8k3F:h\ 6'gAhrѝ[f'38&z-5 9vDS G2B&VT+F{Y USfH ÕE$S/{|=29f %'# 2(4? uY_GC6m7 4Co~ 77;4B V8}P!v?n9ah0)ݔRw#&IΛÑSs@6drX9>K04ɯ1p4<)cV4ia 90GژM|Ӗ ı1T Hgs&Ce±Wq _FH{mUP<ګM%$جG=Jb-S(4C Y'A~r ~4T\ɎyjT%H0[4vP Oh2z,msŠw>/kN8kƀbUSEq>l 9"ѯ|>h$HΎ= 8P,c&]v~pQs5ZNhlZC'!?2cR³$TI0ZMfv+ڼ:|SjQe!)(%ӺNsv< n){a4-slɓZ46!mrSP.T YL 4u.n!VCǬ5 \Bh"/s>?8':ZF귩lTDHLQhلҪЌ%XܻQ#l(jVWѵ92ƚՋD=U=oQuz PbTD)A U{jzCQ{X;\-'Blk ӻ>#'b7#ߑ@"})j9SdŮG+P'Yq5gDVĮZ`Dǀ:'_P-Nw09S0p&d|OksծvC薔V֋@'b \(akFY,q-P^ u|<ï[AWSf& zQ /!Gaw ak&"NR7+I0vR9dYF<;LA DahQp&)fJX !١~t)¼6nH)jAT>d]b- bN~3`8M8(JCcn 3x`VX]P":/_Q /jTyF(ʺABaC;FPY!($e#% CLXøꈁUpZi GvpQad =6$h'h':;ꩣJt`9V#kӒ(ʎ`n$S0LiK=)PL+]/ aKc #iGKd1CLP$0TFcX-Dv\wl@D8qv9k}ɉ?.,%]k.껞 ?6C#P} ZI_$c̔{@Zrf̪hw`z:-J yoUAsiN" K"<'Mx1Z.+3EYG*)l% y}1a١cW/KX+_H4i)ƛxs ҺitÝd1Ԋ;e""ϊ77+*D8(KT|߹{ٵs%n!h]8h Jqaĵl*SG; =ㆈ+ƨZړ㖚u( HUo&ȶ72`蛫Bw\"J=א_#6 5$+a,K޸?trCg&ұi5, Xo;% 0u9BC]F?&UԟW^)3)aj*PcC*6aX\cu{[\TxZٛv3P@׬*rcs s/y#Nĭ|dQǾ}+ %ܔDnM;+oÂIyA莀pFvn!y/=vz4)߆b 1aUEܝ*' y!2:Xh0i D w'7~2>*"dk57}_vJ*5%{os;5š$i|ë́'ae- f9_ Zcu" lAqpt_(DxA$}3 w>\[ 8 "ԨedUJbzxNpjd?PJAYLE5i$ hun1tEkKA$3ݸE\aj-|rf]ɐފ4 -$M ksaX f 1C ĪD˺7U8-f.ؕSi=,G{C %ñ+iPƽ+jA}¼]}`#0EPqz<h7+\dG)fC$`hd9 Sf(`U$f(sͦpzg+T+}RM$1I~LXتH?&Ԛ2eHI XE;.bMSIaFM|2ڹ= CC9YMʤ-w1E=vFIL! 2/労Ƣ[/L"Q|hSClGLܔjDXE:kN`m4Vl(<% 8gc-h@i0?nˊl5jݶ17qNgpPo0xVg`!)]\pJ"~kӔAGQȿo&O>g8]HJR_TW9GVL:hDWͷc4 .™&.QZw\ʯFs< 07 x39jGjC%k]aKl e ~v<]~TQ>SwA=4gGH> ؉sU#Ǫ@Mk6ZAJUj+WzUtJ7 -Ȁec"$6bEWa=w z}^|2AP0mH-$nj8^Wm׸v.t@Vx rHa|`9d X4%P7gRSU)M'VN"HZjl ?D  >:_Zh}N,]"8*qNIo+ /Yxʾo)GQ@ܴ`{[^bzr(h0F+3w2DqgYv&E@,G'J-W&ڭ{7QXd"upNo@Q!1&k-6ܖ޹̇>4E ԡKV0Q9j'Znxn٦C+M:avQ0&Ļtsl&MӰiMKmQ1zeG3 #?( F2vd [}t@V3J1RX~<fi!I`]_R!jjҔ7 4 fo, B'HdI%ep%χ'8\uKԴܐ#=a"Z LVJFm ~҉%PW=TvdS2<|ӎR`aHZ$1(JK28|NYd0 ZfT:Ԧ=9%1z9䚕4(I4Tv-Jw\^fDxD9xx[(-1(Y3W)K9IGIL2  W'0uZn'`D-ڮ;Lj U(8,T#S1ql}]z "-],(Ɋ 3@),3V?`W Z #l@0ʈF$Ĩ=MIp,rN8\ibiK/*!~J vv z&"V'9\ϸYTMD [cI]njqK>qM5mZ$- vzP(& /@DQMD#^gX0Ǎ̈5ߑC' ^ {*ނӇe,vB!-92D/B LŊh,޾vٴ2W|~Vd ijVL(^ SIK4Wb#2҅) m[cOl$h3홓=Xps+ȴV% y6nυ`',3CԦ4.8]7 l["go͔yCՠyvw B|YD.p66@x,|ϚUILV -L^>A:u쭉E`@͵Ћ0l{nն!Xc)ҮG>OAAD,oY6ym\% LC[O,xQ15C³n\!jԋ1!6셎CVLڲ C}/I6IE؈dq*W3"TDc&H 8栻 i;@4ú3HB` B™y33SsM+ÓP#,dō-|[z` Kd7箙|ǀS/G(v0<<%H7w\nvgluRLsM2s5#Oa33M`E9IX5|4L-]*krN&܁81O`HOԠ59em[E/l6u @ф((:3UpAc[j. r,,{^D !44h:,5Q_"'d;e+Gh~]4Om\ED XIV](k-7G=b4juۋ]č{RE$C+ gσh OD)QN%Hl?}3]ݻr$ #K c8Iɓ,FjHS(Vx'<{"hda.~9kص<8"i0Z y9j݌]\۞JiWEc ;L{w@yt^pKD2?%m4+gG+DF] k}Lw H'?AP,TsUJH޴0AnppF~g-Rp@ӵÞpڄRt(k=[˲=~C0Wk$xZTHr+g+Q!~ ώ}0;|DZ_阠o=罻[PRjܴj~4{4´}F ˂#f/`xc O*2"qk).9bHs0O% }UB Z@V&0S2SuVG"\5DhSEH77ƶ K=хVehjFfq睡#q4Lj=pBXK̃KPRtӓV7c%JL8ښD%`Gojc+d2qDPMLťmnt>Jsty[4_nSPc>ӎcn~%]Ddv9yL2q9x1TL@dEZ┦M#+Kh$XUf0k-dbُ-\W>"#(#1zٌ2`fk>Tg$C5C(c:hr+ؙHP,2c ?  僝3G6M?d M,!%ڰ"fo/P`pB{\01`|D`9RFȤύ4;BQ2q:9d ( T9MyQR= vQrs)[Gj%¸@vb)jS1+Nn&F!}wV$scLp$ZQ8U"5Fy11w;YnI/ͪ:@2 ֲ0{Vdd$!|;ܡl/wfk  k\E\ q.˗9GRtu=E<%? 3!e{z\JlRḤ-$n<#z .cq4euvsFKnxbԄ&e<g\#OD.9Ug{![SU@@,QC?y26؍u+@'>`a[$1W6.q|*2&A% QxFzFdcBkTxSqU-1gWq現WS+^eZ2#V)65Pǜ|'̧5 v*d@'iPT0S=2$M޹"z$@T`a)(x$RnD =493ȋ+Kp]'guhkǘ:UY\1H`7hK%]A}WN}qw2qT:$f]DϖmtCA˰Ķ J&QSЌNrldLZ jiNjҷ28a6+5)-zLO욁VȾ#Cb#V.um4]JOb7iLr0wa[+SRɭb(̸g@ݺ%JLc{p^x)=q2ZǞl 3_#AaAUߦcol"Dz'˲>YPzU[K㑞_YT;aĎsx׸z/=mt/1a7횶QpoE56,8gcPHw*loZ!tsʼ=|evl"C6JՐ4-iq 3<-^kBȡv{Z Lq8Mw&5?jA1up چ< iuiE̹(&E|-ԴzCS."ăcXk&@Y dREK44lG׿w=3xQbA&g_뷵ܺ\{&]0 z°2# ZJ cN~t=f`.94u"26:ҤV\C1!ώYM 朳2Ǭ_ZkĴfrx`) ςcTx w:}甗ņT!>T'r ϝRɷZ&'{&"|. @X1*-U3(&[w|!RVɛL? fHdOؕջ"'_'䱌9 Zp;Y)fGT{ؤ?, HSQ%gr@MSJKն%kR) $&R9YvArT7 j4ai[Kt0a|ސp%"6_[!F`;rbz1NܬOkRNƪʤRLȌ `%k. kqo[q1(e$'"z"c[a~UԶ?M*b™m Yxf[+Qt[n)5J٪t$Z6Ge7RIXָ:- `5#b<,[k&:!p y5:UfLT"BdQ0QxIjX²NtWNWW%ư<`RT.1c:Z^Wr8k #Ȉ#$SZrgTCU}uƽ7 {(V mS7P:zKY#3%y<|rd-Jܬ3ubVfx=[A nbD,e|W=ށ7ܓXr(Am~JzF#g*'!`7ZK>yDgvKe( %(@u2`NthiZ_grX; Xq#;/٤V"G _gqtF^Yc=ɼJhy[݇>WGd! 8@ֿ&1t e(qbM AESUNE B7Qf<V c9 Z;A-aLvB{X]^}J tFN3쟾%Br}]YJU$ދw嵠e#I.1k[}8UAI(e3kv똑JL!r2MaaqAS?60't= /@Ir %,u& UC.1mlv9&љDɲB1K9F.yKXQ7{@qQl$b}l7.?McJI/T[V3@TY 8hA9!ɴJ=VvZ[R7P/Pȭ^5ou|tJ˜`[O!ނ:Y> BjSVⷔ9<J-!e BS,Qج! /ziߔQE)Τ['d4aQV 91,9P-k_פ=?4W2æ><%5ڽa|H"#KǴ|e /ZV_#湛T T&0m>MY5f 0Gmc, S檓/GŰ+JW)LY@;SAX1-Q9QiLaBDET)E-"&\6M)!W޹`:7Qrc9Maj*̃&%}8dUqK{l /BI5ܡ]˳ Q?>tsn+yތ3YZ,2W#UdOuԚYZ u+NR⿯z]A閧Û"6OǑV(aPCģ/c lm9:Fv) \g ^ϔ91q,x0)x3O@$׊ 1{нUzȒ8]YK4T"#e= Rc\'Ԗ( 2L)/ ]Vt\.h*gUU=dUBh+QKq$4n}V:M 6:Q+> xY,9' |No_|Pu0 M6X-PQNr=zܴk$ ^Ȧg=Ly$ <é#*ĺb/ z,$ h[2V̂7hB\iwba+UBFr +ZLbԃ?l4s7Yfpɲ9)pZ9nUYCobiq 9KHB%ђ& z ^XL\ 4 Vb(HXXřTMM :Y}I> ./wwnD_@[bk"ΈFn|8xN "݄bδUVL(=" vO#,,ӝc Mq-f\5ogcl"EFg}\%KZ`CU1]4R3Z̜O&hC{SF-" QJgFw7?dԡ8s\C߈^<y’긛'Aɠom"yO{B P:Zh1WCRBٞ>~"+`Ft<` |>2"i3ƅ}."+@)[14(nV!-իaz V 1`T?l%qn92g.-ح+_`msfC@Y >,2M[P:^(4EZ&Gx9m,36 (Qu VMM/-D p.Y w`Y3,.iP@8ZLGsApNTklt If% GΡfΪp$z)̄Ԣøn|p3$BM\2U@)8zQodªG\ɕ*6 x"-t_06( ܾt UJP d_B;įy9r7"Ap!'e"R1~!B r,nm5-oO9??۳bۏh/09qyb,ԻSZve/M:$,~nPI_\|c?6:'md!JžB:擉6o }g㮝cW[udf͵/(zDo:"zkt+H  TCgVJ: S1 hev6NuMg8}rglg"o.= oVvx6cfpk{xϢ̌n$D"rqe;1zoͶÁ%FT$3@%G}IfF/4F .jfp]p NL~un/,N,%1O_JUb< i2EtZ<ץc)!VtxXDƾ40#Yc @P kB@A2sH%GGQLtۥQ+ \CR:)$nAȯ-_Q=a\F UGr!(aQ U I_w<Ԝp(ȠBVr_&Fkmepڡ6:¶Ahd#= hRC._ x܈]gEZ\ƢYm wy0_˷\Hi:p dX8& X/!` 5_c"겨\Tȭl+nڂѼ끅2DVS84š/U}!ź)ӋVAŕ拇#Uj9,Mf%A Rw q%eޡ\=''qk+- 㙼&yk"Gg1n$!:4Kr;J)sM5jQ'qFR0X AX[6"H|PHyP2g ͛幱_e䷻[U2"}(ĺt !d۵s(|fy$24L}":%ݭ3m/Cq C5-)wW*xⲬ68:κk`)ZAx9= #&Yv0?\I%c֪aQ t~zLro- p-rhH+ Y+X4^*_#vFBx!95ly쭽劘 % 5Kj" vϲ:$s1=RKQ(G3S'3Q :D̆!U Gm ^8J0z95mm( $h%Go;.6WXg/3"\6;wp,) [)c%HW~KO^CHeTtqdI A _obsajOUEZ^?4oI>*|7=|i8Gk ̥=cƬ̀ HسTMIG۟%-v!Accn8ŕ##Mw1H&@ qZm]kxad 7p Ng&\])9H{a@p&"[(liA7&8cq8?\bܘǘIsprH-8=SX4Wa߈պK-N/ǒO3 g\EɓV`L|_4DN%}V(_iFU[~ivado[Q\o[1\FۗGRc CļSΓ_8UljΘNH ޤm<,uɡGǰ(;>q3p)+!yB6 |/E#8pp<4FI krɏY>e :h(C}( E.xcj$"H*b w!`{yw@):aK2Lt~!R L=DʓcG) oF\kO( EBȆ*)Foe> 빠y浙]4# U$/IuTM>A9aJ@ԇqIuqOO:R?2gL9Cu|:\(,>}y.]WSupTΪ_&RA֛zW[){܁nIˮhL![J&Ghi֟3W20=wOnl4f ye^lY#2  XH:2oMOdw&G"p 9Gzlٍr DpRj[j0 @ZzD{'_ $.Ug4Z a =mO|2}a<Èe&c B@@|e(ٻ6Q5mF*H(qP;!a]B"{A2qx"B 1~* й<|+.}Tw`Dbpxh:K3:n3{ ک<Ocs5"TI0/m)y|Fz0F/F"tcNԨZN_HHlQ56> ǚX+=},ٛwk]$@I6K.{-钬0 I {P5CCcS̕k"KtKaF\@jJxWdv]Δ|:jۘPHL`lj1H}A5果h.dY65ɤžmJϪ&>A??) e$WȆy;C~~Fq#o4a~{7T[ hvS9[FҞ˭Ni`^ ߭Ƚ"9QSGN ^JbKII(7O4#+D ձT']L|6ix 0]2Ya;+ ,!TG ÜHV=6Au/ p`fACe)xJN+rrfd[n E/,f &`"a2Wc*1#"=;.K}b^E-ڹ.^(_e߮CG,hA<5|f Ijƨe5A2ټ[_24; ͜t,Cō(">GLdF;F8*iL#1 Z ^Ir"ÂBP516U$"-`v77PqKS+2/U\z[8RT*1}{:Xeql&)ST$o;({&VfW4j3G}(J{N dHi9hmyna,5x2{ ^c+ɠ/4~>~YI[P 5^`mSd" v Ng P,x-EDH,{UK "QDဥB{k55O?81 ŔJ^A6B9|}"AJ wtu]B9"ul'=m*aH2;$6 NPuYt 1Ϩ^U RP͑cȷ@HB̝A BX1hPh'cChBb^)z^$x5f{Kƛ*b(GF gɬ Gtz|&PxZ1 .bĠ!̃6s*! n h82"I@ ]fo gb|%/V&9?\9-԰]|iG]'SCFNcqM_e )+h d`z<4V\Rрkd!kt)|];@@&1T!`ۚ (Ν1;{,7/6jĀƎx2M5dŸ7'Qu<7$Z&slhVB$ج)_'ZX!^,qv Q\6iB_m wpl)1*E$ȴ!BoI%Ȯ]ضЦ:b4eRdQu.3Og>#"Ɂ QaCdֿk:3fޘ[;g?@oB<#~G>f!7+t;ρݑt!`Oz:pvI`/zE d4kofxlP}M ~6R-XgjAq0QV` 7>50,MA6;VjPSX۲ݞtibZ5=ևR kL1ML,E 52rbiwTl5.@ z l1Og~ ^N 9%F?  Pdhq5K#WⳙnxPDԏ?? p5NB=uaݨv-ۚ4{yq=J$ ⻾ƗVb 'l6!;69CUgy)MAW4] n"۬*Byf 6EUv 4[+qWo%cziAprqP,':b= %2&W?֢\ }KZaj,LE^ 5@?j$i,ػ ֡5/Rj?葏SEM\\:fӑ5 >Kˁ-f.*]z%@iU;aߒYKjxx NKe潴~NM~讆C+NgCr~݈ؗHH0_^s$ ]Vi 4-P[@n$Q6LdTj;~ʁuK2>n_Lh_> ~LU#2];E(`8e>c1ZON ">$P)xޮ.LgrGeEr є@Iϝl@DK TQo5h\]P9 MRH<41T28<}0^DGM;X A$_\;[ӇAl%%*6vWm`[EhXJZ5h$5ِ/?zW/wr@faI"FJ̋wFI^EKυ6O}C,GSO|Avm9Vy/[# yf^Rgn1[j,5`$⏏n [~ m)&2gGzx(U>D.O_XaBo.^ryGI;,e"zCd!i%ΨSa: BQ|)&G{sc~gPd d Q WcƖノsa)8SոVn-LdT?7GDaj#I-b[O ˭AFKR[C[b{w dFSU)MsxEsab><ۀ:US9CW@THBמ A %͑"rxc) fFb(u2ik#Nj6Őb\=kʕۘ-|ZBY|3PbyO  oux( |7t495&e0|BQc#"=0WF7ΉBktNtGhiA"'ѫգ%Xl^glDi+ ́n$=>-Cإ;"E+65GK4h:认%$Њ'"IdyTx9\/H0R0{V}>(3԰)V[72nr;3ZIi,g^փb]Im}̐6RWmJߋzdO3 !RgB M6ZY3=p{MZpPPtUQxNkf^N˿)&Fv Ce6Ņl~D 3bsK~" p6] suoHl% WE-Ӫ4"CkZF]`~O;HA"pnjRwp",Nk>2(9pGxDۻ4iaiTp2ݓiOr&\ ǣ{^sZZgMb.iw֚7%koX!;2 O[=L_uEcs -2GGƶGIGKj4-TG) == o'~uXܴAy='3#0 iSDfjj[O~*Tg/-= "%\Lvc?Ue0$+z~ٛSǛb&\|鱲ʠzxr WZpG5ͧ%3rK5CAnJLHE"Ztrʺ:fd5!]P$(B@)κ%`x2C:$If { eRI~<z:p!y|3}VNJfsou-4нz8I}(xRͨU}Gֵ(0vX}Q-X}XCޚ_{dZef>NbX%"(yxXV 4DLg^l7e^gC2pJʆ&~\+4j>'Q:[_۲jިc\'mM"74<9g 0.3S1ńtW龌ׂU`[˫.]Eu(jޡMlW2"camǬŲzL ,E Ip)?8w ]FeŽ~C x(9-M\Ь:>mwCS Ƽڒ92rT5'2=\[o>SHxu>4qq*/BceW$2udYz Z>|^+vLьYtj*^;d6L'v wo0]lTCf`vKFF? YAgU|Q|tML'=ȜE6>- _ |bwHjb4û;=᫽38!!%QF9->YʜO'ΓTӏDkM!p^[8/l`EWV=끾m h7Mr9 9ǔʵpHkt`U^_$-Eg&*mMX6v*3QA{ ' }ŭ aR1UL,д>/c霫x<٦fri* /6y}keˊJ6}KcOTׁ)\gḣ{zWG~Ӝ=0Y@:5VuaI=.лQ Bo-xN7sSE_?3( tEM*ɲra)JCIIrxoA4YF%7=UsJ@BLs^# 9@ޘhfcdNVF-5rK¨Zߩ:.`l-U I@Bg\KZ$*`tTN{oI*限#xK”8>bs)Jw]"wR ''{ِL$筎ӴPs85}P3^֛۞]fԙy5MR1A:zmDe?Ua!㩖dgmr=0eJvh.=(ax3\:gY=^b[KA3mv5{DguJ nIdUD r8%S p',c s`14Ag,CREM$Mt*UO%56m)B%ʜ_F|N+ 2G\f~,0CsC3Ͻpjmh.,Y"쌬 xl@7c]6 ۿJOtӻϭ(шĎ9pbEISǼnGWmu6]v_mJ4g7*ͫJ_blL=W 2L 2MOIw.T{]җAr8 }MP lTկ?X XFd`U*j 2{ʧLxݻ~Q#Ar]m y6aFE ZC#C]tȀ43gvl%)+ıvn)?6^99u3lFKQd÷ U׺d鍸 =^uٚ4! kQi99(Npj0P&JwxKJTqz$ vS0n o7`J']=[c1I ޶БkmEp\w)KE[ c[,, zZ;5Ѭu 2WG5&* YjE'ʗbhu[ \ B0}tH,ϞְpӒDY\-WP KKԸ$b'/Gu\kKs9 s"s jOhǼEpado@k|w$ы"[˥[3iUe/i\4*nAq$*y;Yqg\ 'C=!RF\MT>> 1 3>S!s,k I 1t&?3%O|x/ɮчdy@ !Th3TfIM903U Iv_U4{n0ֈcuCDK\H㦧LU.>hACeT~6jJ6uhuI0]:#*,[N/gGIi6:[oϗ,7iYYQvߺ6`aGV {@>(bP6R Td|=.V-8ۯlDPܺ!]t5d];z,Ǣ2V7)~iߍ&Zpе.7x mÈY?xa(Y7& KM ΃wJFiDD5먔\\e{A| WC(exgXxUNJr%lR>9e ԏ 㺞ﵒ1|7 7 ~7>X}E0(i_y<1oqԣ'»:KDJ ( I#PёM][';:A*jWkHJNNK ;h\rNJ\q_ۭӤ3? "mr>잻DaGK-ՓQՄ:Ɋ~PLosʴ\Jy}#?لK+o6'HRNE|J1C Se0?1UUQWkyˡڐp3-˭QpKnE2p,Y&H}g%m]NrLҳizwΰtS5ۣ@W߲k 9d'4柜z}y(A{}\([|.g ҅w(]"Y_|=%ٛu0lȨ>Jm*@&μznvա?5-=)&gլE||yx:{6 N7{+s;KDZ Z M`894Fz$(c5N>-IpI"~u' V;+= ?{'M11j]!al|+dͱ]D k_ɑ$b @yhd3(xQLHZڊÂDĥkiiBųVȡ 'Aaf~* m.$[y'eT)>sTd~x!RsuvOyKݣq^L;E: G`bq G 0)x8˭&(4K 0_c2Jz{p +VK, >8`Px?XX/Vr -@󛀀]$rkq욚2_TMЯss--wVb3r&YG{ bts֥yg5,NeavVU|UWڅ8c>ˮK S=9DfK;MoC<!Vmԡjgx/&xi t^0n>N@Nj5W9I]D>Iy`}$,ݑ L mR'웭Bv۸ĕs];g1Qoѹ8TMi)MhM=t?U-QC:a bui7c+\bIfۖG,]+*h+6QfmH1YIMN"Q5cnֳׂG$c!m &mV=vx vvg~Lo8l\—"T%n)fwV*>rvE1pg4ΐ71I!'\qj/xÓj۳[)`"S\*]8&>Wn>9;rdJhcڕ.3ax- \P| DHAzeLZZ *nEA8^'E`FkwԉUi"w[ѣVeiHrKu\hP"ȣTU搋JcR8jf&δv@3ߦCKlyҥ[,1 cm#QΟћG|}[ d5k@! F3V*@+w< q Ow^ uND1!շ*)9T"r HWzY*~1MNyue*{rⱽ.a1^~ޣ.Ҹ$>s:˾`s%ˍtPѱ1#;ͼRO쒿yaGy\s 9b8dƀ $k*kFQ^qwcQcƣ*SIV6Q[[l[LJ_H95Ń*!whͶn8|EnW$]B\ z>OLj +s#=Ԩ)1ii HjC.JDUH{>Eb^ZN 9,#ayB1|X]%gH$܏[Bs}dI ~0bN"|3yR}EyICbD6/)nL.xdB&l\)jQ})RG4lp؛s>z|Q@f:B&}؞5č|I=F@٬6eCΈԲAs֍ݝXrw7>OiNz,v#tW:L1 5473eϋ\ Q9FɑG^/z!six0wb+.}ܘU䈼 -ՆX4rQuj)s(G1%h|FPd#숨E|ž|zQn"hh(tL,wgz݀@.@A;Vy~鍹7o^WzeQL^`S `Ϝ/%=A3"_ Mx/P xR<:)jUP]. T&L=|75pF)X+DB XmVW*zh+?mwgasmKy'qGJ]S%o0kF r9\GsstaZQ^)a mB4{!:,UҪF0BT[RLq8 pF(cV DdNX Nc$ Ç6{8̻5y l 2 !$~z] Jm ,W(1,h S⯠Z6(4OBXpT'2}KO.4ťڛCA|x^K^>]oKt̋uP& )U7o#,Tnue^ &:ӝ`̲Qr[{X|=$B'# 73?uÏ0{K2og >2H*0!RRR$i֥'Dgex<io*d~`ОTʸHlFWɶvQ1dMtHl3q"k6a"!Ky7Ӆq~ҳ$9/)uVq|0 _+.q|x_op6KQ93EG ${j=0 2 OCRP&[*>! ի#9?i AŐB "U3@"$vmFmGv,^jEY±>a0H kJErf؋p u hX$q$R[)sDAYZ/&Lh0Tub(yaۣ xM?923VaK J%;y͚6ʆc^!\f/`Xτk4ߦ5 F@y6s]KBϢj$J!dEChPW<8Z[<_zi2jl[Deelno"Hq>F߷_a_~P$un9[EZk YpDBK{EPN:[ nspx(nO{6F P0v_-C} hG`kfp78iDQ:ǣ mA_TЅtnw m]h2HETB#3 0rj-4^$g 6h1#Utm AXObWR4c/% y8y$9bfKA撯Q`Y ( HZ- TcB,a(o;W8oNʡȯӧ"gT}$<`BxL~72Ѹ&o#02f,E<*="oNH9)m4o{W2d` ! 6uQ[ *\cBO&Eluu^Hԙ\dψHPe4'!x\^J/`-$L6_;*v7@ۈ s_c7̝IjM2X¢܊GȘv"@X5KLO/=ůͮ?Y()DˡzJb;ϗտLQq@<>B8dS/|TywdN'f&OGJ#rjy.*<ɥ9V ҅:fQRa6cfK~ļ{ߪMg| ,5G͗`1?rTp1k~Y9|l&cTvIecFGC[]s77B[ks1')oech ]14%1͠3y*w&I=D? ]1{Z\+1e"NJQi;KCg'5~2i{pddpŞl%DG $ߗ)-.kc+8CVxf:Ֆܽ]%Ѓ`yIX}Z`eyyKfJVM1-FMݦoPm7(_sT oiKfk٬?O}eߟۊ"9 B t֌FjDcyФRwM\`^P2#$Hg(( N^ TY=h #5:B JdS~Ź*Pc[A,*1fiv 5Z;ٌOT(Wl8UjEg5N#&8xT0YR=r0eYqGg/2",Gia'۝K$uQN807 `(/&΃4ם#zΧKe>Km8\LXBZ E̠] wb{pocc7J# V\,e4kt,,,ohs{ۘZ}wu ]Qd.qm,V̗Hs.,u(vB{js84@Aq_W'{8AeLz4LTʱLr&ˆ #FMl`jxk犃Ӻ ;Z} ;a:||) ` 0($Z^Hp_hbs2pRgL#⸶qfES W.(UcfXBw{`*ps,#*648jIz4_pU]kf}q{ZWdWfFy_\5No068C]tc.>rG@(ә(bO EiN#t9g9v@cϤEaH5fϬ+ڷܙf0 4F%lVΉ%,XT/=uٵc;fNqr׌>[ ]G=K4 s#oǰe֑wu/"`\GYsAyݔ3O|sВ<0;JV"$TD}OKd.[=i3ÊΣqNI.@bj=}WӑuH,Re!$o.ɂu9Ey X\%D]$@wۺ 4* lE?. cFm3yIfؼ 7hycvXj!vVf'k;F lCܐXcq˯v$ݞ_l +";8򬊆BM%eh_:vˆxVCIrђ-)vZn2Hۛ3xx94 g> ?o ̥Eynϒ\S 4Xs$бp7սzǦr0k*IfS`~0 {03γ8@-+W*+_+Fp@{?.t4bj`Xb%2sjj$*JZdхg<#I{&xȾ/bW&lAz VqP 4C$Kβ6x &nyc\lX`C,|e/\~1(m`ʣeD*]*uKiqcf3%zd}vLP6"@6n>x|YD:ǂ=`gBc?=cz̄EvXNh-u՞nlm: }N,#5TL:Yu hb f"qY -A2c=͘}Fc28w$2k]a<8& >Ϩ[jgMteE€j9~ʲq1؀Ǜ5>V߱H&VAb{r5BO`' 9eZ[T69/ɱAhAuςhDP`עNI(a/IxPVy{:#ԓWQeGPI8Q ' c )wݻ]tO +Efy|\Xq?c%=F25zș/ $@OưpE*Jb_4[) ּr}cR8/J-Upo\S]7oۻmB4[II7:ha ΃ _" a01v3RtQeau)v)!>?TM,91-w2QfրxY%I:`a(!b0p~Bヹs&MvXp;N6.bp<7j3<7ذ,,$HQᢞ2c6$ d$ϳ*$U)[ԃ%h\Cqtk2n/Au),zlaِ ѱP,c[ĝ9Ăo^V=iݠ*DbbA A M* - {"dHQ-"G.s?ϏZ+(̄ERn5Iw4;|EKB)ؕADI8 C@){BQڡ)]!Hh3` ̉~ĵ1lOz)L5\?E.dt)_*՝"ݒ>+gS`œE j w(ɮ 1t> @禯Y/ (%y>lIGUp}c DžzVSE LNgQzqWBG߾x}@ 9!MϲG@ fvDz`$91?{(zCJQ߆?/lIqlaq|ðxp69{\o۷+(K]!K;Kq>aW(2@l~%O|:y C⢮>g!TMNl]˾5TmP>:ޢ|{ 8v,MP=qڲ"?Fz)Â]]+8jltRZ@UbQ26>pw}nR^&WPe$Ab3p7F o z̅I $R4Ώi蚋Eԋ4w6q]5qLh$]: FXJ| MQwq\ұXA pd Mm  lߖ.SIq1Sf17PqO|~^Qփ5J AsqE]sIr CL^Lk ͇F`ݓ___P4t᝶z;QAKUn!ɞz>1HH[T"R:-3Cɲ3|- 1 %[)_c2֓y Ax[Qo)V ȄB TyEz } $rĆ7 l/]e\VX [ K]@ig] cOpTCNwfsN=~X8U=3vc 0aĠy`kY~~s 0+?ϵw:͂:ntȬ?*xs "-WVFQce}KqP.8ff ]"xnV ]SK5pfG-yQ5 6$Fkh1?483oCw]'Xj3QA1 Xʆ[ը*צgj c#O1LGYm.h~}|녬-m6O)ס09*쉵jo7[dYy]IeRKyNBqעD6?]c2HCu貎 $M;KC%z>;EXKg_9jڛ Լf n+?tLcOA`&i AW2'-%ظv <16]j;V,bͶVUIzyGi/ Hh*!~`W'TK K֢Ŋhͤ5aiAѿ5嫓% GyfΛ)zvD"z`9@Oq:a/DxKvgA[\l o_gU#`#;tjZe/.}?{|?xI7t/X>y'0__V'ǘ|vpBQc!Kb$o2Dc?teX'9F;3FEb]'ӿG=ͅW[sP Z|4G W{Dž;ufHNleSY|4udOTF"5= v hd(i1M{W3-t}{-w߉`vY*(ʤ"MdzmZ54 eF^`,/ ~\4śEc1ZX܀,s6> x.}iLzBw ?;~:i? d݉#@6Mws?0)~(=i} ),͓BH#MG c%В3X]8,gJ .7L>⋚]g-^J5[L9X,R.V gTdaZpďV93 ؓ}}B"L"":ı=qݻ _>QfB.RxCb!yV9Ң4X/r!_ŭjPra?Ef' μ`~Z{xh%fIGWyIJ "#INq/"N݃Qp8XͬxlZgk Xi$dqP 0+A)Lmcه"<9bE. =UjγajX`vQO|M,Yސ40Xcp8g}D:he{xX\)M|a!H} g:8Q)AKI&6Oq'3 ? V9pHs?} m@6em$g;# uLZ<5rkC;ۀo隱Y$\0 <|Gi9A'M6B ƠH2|L_Rafe>w%/@BD6U{Vem` *'"O˦]P&B hb +oHYV(4YfhyoO,n a㝞YnK ˲b$݆H;P 2#رc&ٍ Gq<=&]iʕDs<Mʶ6 AxR"{L}lw@0eH Q!txwi #r%zXvawMZa%fb̓dV\7 o%XYfV30h 7ۅ<S[Go>ltŨwcbbh9M1:b-$iX_C-ض hJC"Kn(]|qF0Ni.خf ϒ1_S ^\8_/ʾ`M+ %ݙR`k-T5 =89xO1(QgB7y:S`p@͇=Mo|m2k^W&-l}Huzv_GWLDu^<>t.Hf/=,J!yAl,VZ7ǜFdK6A0'.`3>)!IxrFtUyYνEWYB]py^Zڒ YrD9H=_D,O1n Ѽ99ۗ,ea&2~I>'e~/d<5dD.!IP)Eꂪ"3sxWB9$^szkn~hzB9x5ݬ9ԘTM!|U Q3Kk J Êg}\/,AYG vH8{ KG-ly2i(Fv-BE%xWdx)sx7??HB_DQ{ ,тTR_9((%4eZ<f %Bli)oN>/qʇ?tÕd,}ϣ2$VCUjE6RC7/h3r~1'Gz|JK@P{Ǫd&uϧSB.]w`z9BG 27@`l!Ϋf. jq!st ~ ΂OD_m *4#NzF[/_h~]o=$jߙzϑ|ThE︷{4r2c)؍Cm5ղp02\ C!^ tG"Vvu,]w\,_/mlu 4Kli)YUIQǤ|=&k*0Lp,6F*s럏s˦['UY'?Q>}C*0>7c7ZXOzX&Woa"Q ¿@@(Wt/ꎻhR7YKadSiULF t9ėlm(6BO@#gsbaدVn '/5+G, ?>oY+$\!dեxGx@Iu;?sNZ< VB,uzk2h{dy֐EJ&' pKDw2q3ա%X3,3Zd͌X/eStvMZ0PrQ= WE\lZQj,8Ug4x2!MZ02oܒ?Y/Vv IϞdXW#h&73#2%'ʪ( ZմfWB7sZ(dWE D|Rq[{a"5FɉeGISo{G!!$C1r43v`*9 3:uhȖ?W7sozfmFV}M`Fzdb0Sh &Hv-Gr\R/\hQJRMJKZAmȗip0 Pd}{цvcLJT7`vdݤ8_+͊^z,~شI{h&Rc*J ]Gh[^mMzY?(USRc04 Qbn|5o B%4f6-Y잞 z$:[;O2Q}MzSL3{9aT_O,.MZmTwѻY!*B$Kڞ qPv X8D:gzA̘\aE O)y-z9X馘?9_| >޴ کCO5KANƂ^1ӲӚq`b=IMM-tLiHC:FՀܻ/)x}aEeLYZ_exi<~[9ӺOoZ#E+wҍe6ad]>ڗ&(Ø-Yemui& t<;4W9|YR il L׾HX#TI#~m1݅Sk5AS,(6 Mh/xيR󷱖A 2Å7:. ːa?,i#j읻)\\Qq@QY(<\X'_zI] qo1@p(ΏzkU]M.2qs58s5ccyJRR(74ʆ05N ΥoQxMQm:/ڂ픻K7BAfk`0wDž} 92'F\')׵" vQ4 ޭz9/o|\_-TxA)% QO-IdPF8Ǿ8 E5 ' T1) rݼE!V;NqhB(Z] \%AzlC}6zBiRn 'ܩվD@<%g ɜ5Ӟ{]$FMP׫UIF$Yƣ-1e6Eǁ76%l.]{QN'4cS癨xKۋ Ȣ!w0͠ƽRΨ*T%e{7^kſU^0GWt00[YM Υ0 l.,1עė0fCm~>a^YKa4mIJ-^Om?Vެ EC߾L<5I[F×$J%[uhhU<+RYkB,$D *=-mρ?' \XGEHTfʪk<7L6ytfvgvzȵ -YqJϨB{Q C8(늓YOS.r[A_ Bky2{ !*VVy&+Țϰ!PB6yl[c@s6.q,=S ~vUFe]3˞98?G#8v8!maTMAEW o,p34_-n0Z|NR!훃t߂ƠǸ8S~ iO"Ms:Ƙ=:(`獵_Җ%r\_M٥-O ̂w4~WDVCE\55D 3+ ||OR[e:b*`\$AW,<-ĐXo?!`;dOFNM)#mCM}'=1P->/ɱO =vh 5A➖É?n|?$2L+L,qP/;}%YG!^$n m?d{Ϗb$Y55N9op&NP"րچ dYJ`v4ʫ.%h+ T٪' d AF(g\@ t!9@S%꽫?}Ė[h54 {3:,8Eb Az dGدĘ q-w>y9r1(UzR%YStQ aAjhRQ)Ch6C [cXOdBDڊݵs[(?M;Eىl| TPeIWC'a[€DyRhzj Ot<)f"]͏f84+}:xצfqxӾsBDҒF -ODSuGqJU*g)2b=As uqQȦ7>*s0Q:98T-қsD*^ͼ6qɕ<F<$ҐS}϶v 8+ySi{~IL}IcB`T[SGQ+υ?':,ZSC{;Ad,ÐvlHeF7 kԣU?0jM8Gƅø8$8 2a>*G)ֶ90=W/廲tҩLΧ/MԒU7 [^Euf/S7/㷼=O 쟯\Y\A`gT{9h>HJPb;zU=`/-PWj $9[.|jEe' =PsBr]V& Тq`bۼ]ʔ(3έ%O;? l?Ss8>\}5e ٗ+vffrɍm0`b`R/HlWz4ͭ e벱xяUզqk;uF5xacr3,CC,nh'=Jm[FiRߧ 5sd ܏`K/.)Jɢ[=8=7_L[_Uߦ'xPU.>܇x n2wQPŜ>ց,kPğ2Czlm}SKC;򘭇Wإ>FS2Zߥ&;2@ v9ѽ"@傘 EIq4ɞs DH"5~S%OkIQU2kx~gXL┚6>+?a>"N5┚%`vOJV~{f1#1$P# hk HDؔKJ6fh܌$-J윣og$xa+v!W,WÇ$~Ȟ}Yu=ƏStǹ|mhq(]wAbGv!{Fxtsl5p,ʫxޱD=Jr AX $1UQt26ok2Agn+> Z?]uaF$Gp0iL[ۦ8HBtHPGyIQ`up4 ϣZ mkY ir$%l9|bOԸ`R@m|#Oܢp_C5㛪*n[_όT\e5r`DH'sBݵ~] }52ø~)TQ(F9|2!8^3:wϻ h^<QVmkC̋_|@=Pp*Q&H 1}՚`?Ӿ?ep?_p?f/RmS CF͓nE&XkM0?#i 7L[\*s<jA[b-]%&E2| N^`$]恼9P_g Zf |@Es7~K8֛7[G}]et[_C?g]$VPi^|n"d0D9բ=˿J MTgFʌm6ғJdf_d5.W?Q!Nךy@ssmr$l:namy*ot9I%k-8(c1PR^II@CA7k_Z5z]!$ks~yol~x/J%$P`74ifЌU@XœͿ 7MdIIzrnɷZ= 1"X&gP pW F<Gd{wNiw+3“+&x~wC{!v"?@yC[fߠ/mVL.跆s TBtݬƳȴ Tt~gf-ycSHV Z'"'L49ܨ\)IJړt(h)zlUɮm)H$C&Ť9!*6у-0QEAxǼt`~8$%eBD ^*BIMf,iJ܃9ż4C[XVcx\b}\:(H.I ?#Ml2c kBb>7!.뿵uNFؗ1]q'^r1C.]Pa7 IB2(ve  { >'l-J!>ʜ2/L@OP0Hnk*Fl381዁eZ~9ysE!x?.mwОfy |+<ׇk~|0JV:veftsH_c\+:+1IhЭ<(Ɵ6~!~1Oi,"e<B3l=wwy(}'Ka2BA˺M\Oi 7ej"ە Un$7鳃mhq;O "& E(/6-–|轿^NR 4ΠmSlu~-.:Ca<'(Uh#%&L bz[ei*'EBh 0 @72 p9&媄f#laY~Dd]<^  M5Ϭ.3~ $0;dqxFqxeH$9&]@ bɺ1r:ȸ(ʅwMlM/ 3Z_QZ-!srI8ܠ>|̬SvƏD!ŁeaKa SE -mxeů&lMB*p1jS3r4DQ:&Y3:sAF"sS0]r$ q|쭻o[8;mxV LZ$ f:顠!x 16/jڒ'f+}TK|y;];ANqaojI@`J`*Xg&T~ ՎQȰ(d^Z:^FĞTnO;k֑=|7bN:}kV&i\-h|ޡ)}{8,]3 %z l{Vlifk7'w&#r--~ȢlXr2P7 ~JsXDO=)2*wHiL}>}HK`Ga*Y`Vy2&t^޴>yɐ3 HFPCCfY>Gc~`s1c6/u3Yۼw;u+niƺصٵx5ۭVCXR5 M… WZcNi-(LzwV<)У:;Z׳2Vjs>"kU&z%x״E27SYސwӌEs;]>V^R"Ybq$"<۟~=߻qIG֒7{~ZYbeimߔIv {o*DOF0=2vg{1]yUuNTں& _E98v^AQ4!<}M08=sj5ʀq$ZY`$O)7SO[' I=c|Cv膵{xQ9Խ*Ɯ2!Xowzu;dQgW{Fq-U$D7yA~C k{Gd9.է4„ a~&)#XiizYKPy6UL 16tpIxͺ r=6WPʀa`D?@CCuwǮ*3\]*q?ԞS h!3Б1/2qDXU򓔓 JTV mA_IBkiFo E2P_hY<-Š7de#0OԎ{ "NvR_"H+KM&*M;n&p\Xmæ`3NH<:"ߜk t~:kaC@GjuO6/.7$5m/32D/Z?\H"f{:-|\s>kH?%RW-|PL%>gP(E}"((g[@Yݹw9/G4.י>,5ńu' Hi!DzM3hoJ_"KS@֜ v 6Yp ZxB#IW(δk_o!jf,nvhkԖDjLkL"2H67Yº.##kS}7̍=|\&\Z%*}\ ]z1o Qt}y"~croowCsnv(1ڱr3J73|)Nqf]10- ;cxAWlʏUrJ ԻCf3k MR޹/ri+MŖc#΋Iv#_է^rTxC Oi}+8,8 3VY M8RȆ\IҌ"/<ʹЕ8~/wt_lz&r( 㻶gEݐ5bp[:Fmj3L]p^i1G1:V"` Eoh} ~SbxkvsJ?$V|G.@]Lyͧ}AFMT!>peaeգXC.|^~E#O8A|_ C>/T޷W-t>Vzq)FXR1Y{EyK*7δVō. P&^5d B 06ZLF'0J R06Ǿ@B y G6 xǼi ME=M1+/{f53I J#8?a\?nK[3m-A[Η>ػmpc}5e##0;iUH f1_\$L#!|.fe6M•d=8ms\j2oCw4SGۉX'r„_'轒cQ\TKJ>PuH0Z`9B%:z# ]c4W\tZi?8$ '^3pG!%^Rbb(||j/)d̜ͦǛ] @/ExʁxUE|ٖg`W$Ue/v,$#c8xz+a`gHm0kNn-Fqj)ZŊ^/(Ա1@+tTtA6IeK=8!.c|, \d c`ǯऎlh3Mu2!2lҧ |9^ێc6-"2Zzy~{CnOvl#mԀ4>O&i:PA~ 6y@U2$rg\dӑ'L)iۏHl>" y=) 0QVw xL8=Q ۿ)]T?OZ]Z%N8!BVO4UϜ2YtiH WXr5hL*jv堄v㢊ZC9kN(!yne"꜑SrWYq@+ ~[FdG_Iaa935b` cI ᳵHh|>2+dλicz8\!{.7eaܠxIc]_q>1)چО~!AV7;gإZq*U}ъݿ+Qu"V&C(g q$WD(7_\k9})BA%ЈkdZݺ+ q] kxkiA3L_wۡjqlĥf,xdW +(=$-y R<Ք&j V\#t_`bIh}9WRR\Rx6!-c"nLvۡ(8Q./I%y 7qj51ycXa[ __[7ix_?ƝЗ"TzT U炂L0:d8$BIj y!WT5qژ){o[F*,Y)=> ~hAZVTm0ScY{xb}1r6œ(,Gɤcp)|iOlb3PDHI ҦqU;?KiGkTa^GW`ɄWvEmdʞjNNW+pZa T#m߻lEls!dkz0br75C"t;I%G$hw>pAdJb\[ޒNszdRiՕXi~%pYI[sp%M t#N64pדMQ5CW o'`I5i#'RMW79A |4%x0E%av\_MXh!-tEǐ8æW 8%Z@H!(Gق#/v/h6So<<3UtGוJ\vD23B1Ϥxe~}³=66}70 3Je}&X?=`)iq׾/.Y tQs59UO^`T~t?;V ?OYF~$h%@@"/lpCy|ASTI_ d,`/VK59(cBE%$5 7I<}b:|f`VgM8Z H`zݷq,!!C֣Q"̷hͅ,& {NC'D(MYe`Mp8LC'ZV_˹뮡6o| 4Ϸu۫qc8ev-^y<]NDI猹ekM"3]E4Dc LP8mrpg_4hsM̔w9y uoI%./WdT`UL}]t&'G@PN34a'7creS|ϧ3euSOG[Qn}0(\Qp˲en yoH b[\Uq \{H΅v*H762#4s8aa`\ u4?%υM3F*t ަ]ϧ,o*c0z=!ETuU/za,utB(I:CAgG{s^ˬ"^ (>q' R;m0+έÍ =HwBҺ-Aܡ (b3 itH66:*睖rqH$U[7hX >UWВ~*ԍhA"8xnzYjrY,Ή҇B# HW'suh%b@X-؁=d}iJ@پKeG%xyz8I dy)$r9[K j0oڎJސpQпq4r}T`kH:ӱһXu}|ϛ׽N`kh@iQ2,'e ⨛2a+(gC5-’+=Ju$:檱+w/oixפtngk$&P9Z3Ƣ#oGоDqһAવԣ5Y0jR:In'uܹ]4y6t^tŅʼnѤeQq2u꥚]2+bG$(Z*fNqކjSz*,umP0Ț(( Ia:?Sǥwq\B!'V*7=Wk9:Ve_-ݒHyUƞ6E0[zRR鈳uour=_CILz>=`Wvr}zU|R[wMd{IZKG%=| %tgV e,Ys(1ߙ@B*-WA Cgo3~fQ"u ih9Ȝs偂S?f IEopIqs-peuZ@ >.337O r}z,# =W/_1MA%%-ݼQg</0pcH{&b^gՓ"wL*ltݫpF7ǡ+DQj|.7T=)4!0~ h8ϴ|]JS 猠߅轕B?.!Zd vJ uH~}tV9m9oCnх^Eb]2oB:)2N7 췀xB4~emF$&U`Pjso 8'L51kܫUCV1tXa=}r{Xf^ٯ jg>זx1 Q9scSpC 7lWJd@=A{|MؙY%  #0#~{~8nD}K(Ӄl66v šmFi,TQX B_QxnbXqK-SObϧIJgŤ턆´- ΝA2ήpaCa<P$_2m3ׅ ;hDUlbԇyt=7Q"YVOc[n y%ů,(ӃNJhX1>C\6} 2 #sƎ3?ߣ}}P&.{d30kT!B^ԤPѼtӴ_Ukb++zQUg_M 8A\eK=e@B{uFdk !;Eiojé.I}?>[&3ȧM:6CHy#XE[Z]ꕀ: rm^Tc-FvUpj[L/N `ïYk{Wi?|7N1R%6N%1 +X(=c-IU bbK_AB^/e9H\!j8 2Sj72w/u:idvG$U(ko.` yx#7Nh^!D?ÿ=Rh.mMbes0|R+C ux@ClWGB-}z訪Kt)7zX x K:E!3?ʧGľʨfGR+?"b7U?S ] ΊxPx9!q76.\`tSD\;.Zj*$X,g$NݺlSXAnmQ&ͪOW~l>;PW"p!jjwYd6YBBrgi79̠`"pBL,(Nǔ| k $Qj׶P^mH=#{FM`Tf$5X؞ v勭2?Ƞlbmv/ū =PBaom#bV2_і̟D O'ɣޢUz_*҉5Xe( C֠gvاZ=dl^,юâK2=SKI"Z; 9*Zm] unIw~<6pzށ=%3iïOa3Τ u8>bslUMo)y͝ZgQY'D}ԒgNjz 1i ~ܯ `uW"0VIb9 Fv_e{xwR"P8 ą˧RY52We9D) EE hCF /P5OT#lh~r+M7,CFP¿΃.xV8H06=^G[+Λzcԙ ;-D'p q qqI)j J~+s B^4][.V Bpt[Y 7\q+< 2c2!CV>c| r 츛T r]k%qa/eITmc8! >AN$OX"1h7r\Eۮ7.Y.EJ49$h'dvȳi#8$%:Ąs7NhKṅސn:gYqn=yo/FˊwO2Z8#|JGFXz%dH D+[@3SR4~A=4}UQM^e=T6=/USfhv#gUg<~to($ykZʻk+ղk7O<[~}5F1NUOyQUKcng_vwqE7Ol2xx2r EemPt}lt۸o!L Dcrz;P*%n8T`VW 7p5mOŞߛ|rNo_&ݷh?8UtG+)tu)ݚ:Ԣ; cr>_V?V'&rS3I@b}2}mʮÛIoE˛Wsl{i-klgpPh &alFb!x (i'nqw f(RtJ1C$0F>I^[ ;eq0:1݄.Q|tEO b2^>E7:?Ҕ*n\WF2s[c 3JLឲ8YonrN+Qjw.WvOk'gUK8~Ivl4SIҕh^ %.c!ɂb2h*[/QjIHUw.A{ke IP w)fv &g&u )Ҹ P0>4сsؘ1?=cvGJw jؿ#.H\N&^<*L 0#tIXUyaQa9(j]9F {׽5]~$_~6ilH4F缪SΞS~}{\"m6AU"PAz޹:N9q@o-L _KVۨw3a͵M3uoA9-"BGƞl%Q3BJ/ڱZ,a50)r2|Z)J] ]?ZAӕ)hU=nqnR^ZjG(oY@ /wlMtQTҨ{HÆ?z$] ʆfGCh_EPim<92fēcY(PpYhS{ ]~?R:zo3! 'Lh}0Ԁ3' !,P%LfѦy-1>k@[%o+އMP0cv/1?a[|XN8Ys^s _xjP wOi? a(ُ&i,E *nT6r'=_:<_Fh!(t+Z $#[r6ǟ3ӄGvV%$P.ɀ5WGa/IyL<:FY3f[`s!HxH+*We.nݜΈ9T) ?.3ͫsr֚^*E6{{D>,ex;U풼"[@bG4x"ឡ$w_kHiV 8z#i0]u ƿ3*%3=4X-WIEG[ڪ$I.l+&oʾ|LǼ# AIn_|0`e3ǠMk ^ٹdhNK{Qdڏ}T?n3fz'fjwUw\/M6\e[T >D=ga~!=NlQmy&[Uo_X 6l6۔ sƣ3=e&5 +J7fHn  ~dl9Nu۞fbԑ:$a6h> dK[-9t!;vfp(0Hk~Vʿ-uMM8?:lh; o,9b 'LфEt@^i8]bgh9uՒyvo*hHs臍!g.>.zѯ{>8eDݸR/LUfmt y9;QeIf v AO>X5 /!G!&:b>8B(uo;Lhg>&0rк>>8W 8^Hs#LQPYga6rlxdc$ѵ$= 1NEFw;RIxtA I?:'現yIU2}ġn ( 3yhf^zОJ7s70#S(cS}֌k5Y=pπfqq?YێKS]Sci?OBږA$tƼsnJbw1Oou3r(e<Z^}fUX?凔eLb:鏔Jq7 ^YoDŜ}{WnYE6{y@-dXU6 7^~,L*Wm*=*&mD 0MN8 ߛ𢦼4-IN7*|q SYi}uzWћ[ E ݖxDA5 Rkvj7x'r+)"b఼[V[kY |`n=ޅܶ9חm<ƺ{WׯN1? Q%XARL2usWq$&,3*+}V ]<|8&0]/%@uZo{wUu^9.Fw;Jpگӣ8tiVd_QT.ํ2"̨2q-^ nܵ=dÓNT'.},ULG2+w5ǧ VYxB`LUʯU}ռZkG8;}f[I 4~7Ak)k[]*lE\hե88=>qA^d%1,\zC =N[6'u8TujaM T^AZZ w.2tL:煊Jqh>] bi7'U!#Da[rGȯIk:]gQoQ \W3wvRO"KVW _9&ugÜRY@{U⯆h>ݻe~i!bt2+w#TZ N!eavK%6F[V*8va-Z SW ЮWʡp..7DHz{n+C~ɟH!٤ݷ ="j@!/[ [Ӯ W l͞WᔐCS$ҁvX <菄t^뵑;pOzGx?ƀ_l\s>^ $O!KS鉶vܩw|>n"@:HjN"ofyPުc? GoMۢ ӊVzx'ڏ/()nQ73YŖ˗t9J.<[}RSn_*2Kc]7]T"c 2T-3[ GGCe=TlW)@**̜MBj2ݤ|PGQ\};mZJmD\*~QwOEFi śx}t^4#m| Ҩk~Ds@b Mb#S>m&AO#;I~}|xq]tܜzfr U͂v\ ~:C/Ov䨰I-}Ó!od!!d#󢗢 BP:jd6ԙ"abN+=`^+~t($s`4c 6s +EGtqzZKLt#"㤐MiIe_G{'xAmu+&rVYrk/ecW+(}gTPkR]9r5Q(<0m̱6(jv n0ܑ=ܨ+C ?r^QqbQqcT]kW {x%w \| c8rwAYaijT.6Msd yba0dPn̤m[1u#.q:@p֮oǶ ,%dfpqʩ/pQp`!n>|㠋[|ogzTBw\" P?`. HȕE^ҭ}%mSen9 a+魑hY;R=6tN~{㮜TDi1#(9Glφ'o\mp3:4^#2x[LpPݾ8Ț%mÝp6:&aBh1dQ?֒:ob"Og>@T2}sgp~hFGXe &)~tKeioB{uH̤("5.&:,Qf]HmytL,z:"Mir]!ȟ\藸jc<㊧*FDlrzlMǷʯIgH,e$nLNX$.T > !gd$ɃlaNQ5'u>n]KA4eLvlS $xHu!&ZtǑ2w62 ZL@I lfb\ҥ=iMDmuNU[WU12b~v=;*DUdч#wG^'Ea7(REGc"sObG/m{ /.@R'Rǻ~'>%rU\9aySTÌ/? Dq4kgO-T?p.a`9!i,6`J.ZPf`u'OEB!C((]}CaP~(j8%.DӦRr!ǎ/|7 peH3ⶖGdjǙRbћʖR+53l*/;k̳0k374d f$jNl6FxYbAp_);z#$Wĕ?"wwUJFhx89ٕFZxIL`}RJqrV{.س|C]MYd,3ɓyeq-U8[Ձ 8 1{%C%omKm̭$St;t /)S$ex!7a':[iOvZLgfbF0}rbT v hq>f\CG8 kv(f 5ux|Sd/֠ Ֆ{o|l]1Umo6WρIA }xzI/cI%ֲ̉S-[aiO]ŕh>{zcn6]^:>Jeq*d7ޢo8ZQ]۠1^S eqJ+qFS{EU/,;q:.&sY" o`FPaDJ"{v}( r׿ .R%R/+wn͘*,MPB[dE|k\G&61>qZ:޹A;芎sMmQtkU$.zv\~J0knw78 LE;GRX GtqSu :)A=Ck} )kw4Km\ Q t"f[fgP6}.,Ҁ ߆]ɦZHhic<諦~ NA;!dL&<.`׏PVBr5< ,>3MkMjBޢ yvy^0S^C8`{=w) 9|N/z8e|~ز8_PPLMIh[GQ j@/q4a$ RJ+1_i@x68BN~͐IreSn{5r.Xl^q|R]r( MzL;LxK)*b۴(.o m /I$׊E2%Wo=xh{Pd4#9fҴb6B!8btV+9}beP@,};68OFeXgTO$C(=NMkŭV#ȍ= "SqH$#U*6dZ>-B(29jʦgJAH9@TLJTieIb[1͸%L90ڋ2#p~.5~.!l㿍\fг 0jn BiudXHuY7h&N6>婥 ݥq~fHBdt@'%obTn/:Pk)/-ݷcϕ˟x"3?.h 5rGKHw ;cK,w›ɾpXǷin67 L]Ha&9_#utsy@r`;Hw@s>IԸrfޜ~*fr78wڌK@e =sS׍KHoVs:K@lj:d'ǖV9,; Tƀ&kgVc`) ckdld ۻa'(q{7S ٝE0:9\%Eq'ѕ3NyG~>lTUU7u>bH5O=piArrw$+H87uۃ|'vWxg//'İL$x\:&,g y³WiA`xW|0GzKaz4ŽL%k{LOp-; 9 O1E_jtUZ{Mq8Tɦil)+InF}=Ս6]j%H@;Ja{}.EG5nR!8=cAUonYa~'߾&Ja^ݩ9: %|?qg7O}^TALSicph%8Nf|[lp!=dn5zQ&[['?{x *S|T/h9MZ0|Y;˽Sl~m%PjͣHn, Vm)밄2 #='QFaǍmΛxK قZOj{:V`wM?Z9D.k' x{'ȯPjOwMA%o{B'v7!c}?b 0][ :Eg pHrv/?K-x2@P͈Ӷ~ TTE Dy3Ȁ3$~_U9Ӷ(rmܘV)qժ11oj9!9Q] Ω0nu niBU054A9$; }nkz$*M؛YI; l…Q5uc AmҎ`u#iI^Gk}NvV14!=`qMLhϣˋW'8Fv3ID<1g%k` 'x1@$= 2z?ۿ}m[@<`^ʭKr)ᗇ4wògHJ9׮7/sP6 JDf%]酜3Y%ک )D}[ŮN8S0oʨj~bE+W9Lm~\S^S~G:>qqAKp`tz]t2wcFmM`K5a!{[ES?>.mN^I/Q)lfWV|" H8A:i:_]FGo$x$&v/o4F!P`Q'q>@fGO9FgMwfhoxnM!SN.\WeKݙFp"ARno).pUM9I)|UgƎ=Y$ "]uo{͛4ӝO': Y+6Oԩ$W*a o{.s?A _UշEOd[>04 X]__ UmQ~g; p?w?<JMnYE63 yϯr)cw} 6f^"o4%RY(>߻o:>3a|| }mٿ;{*Ǟߎ5{E)78󡻁)EmK*q߬??9h,櫠[\kf}WB4:8MF"@ZD0C ay0nPBT _ @O쒡?jTd+`B~/-0 qڏҶ|#OgD΍"(~'|}OlKi˴sXu:j}/IGrɳ,O4?T2񾜰dt \@ {Vs8)5^)CY~+o q)*y#~`{IφYmkO.) pĒdU4AΤhv!.z>F7>Ws ?OؠfltV:Sm:ۆhM^FSgOKuVu4u }lO 8uF7V ܡqmN[xl+m':(Rv#H 獫*A}~qiUƾW.$^V{(ଵ\kO£lOZwh1&Է h03hXS3cN6θp_w uNVe`=c ~Ѿr jӵIf'kNAbSc~27@&Ax ̀41O bR 3c/<*r<=-bktK s(W&*5^ r;;e%Vup/W)oĪ{\ZiKP~}纺/yTx@3KX%ycهp4[ct!0sj)~zLj(w҇Aq/8-uMZ߆svj [Dh{jcQk1pVzɎn~3nN`9;[~K|)$AW"L#}ϫy,T֥O>iLzDOӮtL 4 ](g,Y(]92ӢSkzC6.SmT(ypD%#¯].VL(ڈJBR7qNˢȡD qouet.7qXy|GB:A]=M#E nzsO=MLj U;ٷrۂA)f>O~xOZ'F~yx?[V`]'08~LWqˏBY)p\r>٨ v A&b"nI^-8sX+ǯ ;V m˒ ,MSbAtIAf߾α))Grcǧh)Wb+4.oZdtq; PIƧ*I=ܗ89Q"0=VE!hB_(&SV\ZMv IQh0| Ԕf\ QDt<2KU!xfGj&?6=@yzR-hg7'h[^0URԴMNϺ;ކCb?9WbeCC0'kwse@= J}#o~}䋟z\}Ņ"N$oh/l .P'Ԝ'ҟ11ӡD^-jy\;ڗZW5+MlwyQY[~qǾ>&)ߑ|uPӪsy[Q7 aEs J ߴ#^jC"X0Lj;YQ(4$fLYt,dÁg m:Fs\1[졺*#F~lF{:6`u t9O%+/`r0gLC9UZc ־SG} Aj?Wa tZtp"(/'OXOh|7˗R/.okoz+y|s ś3.URj.#)>%>R4͜{)efxNVm(:x߿+I@*v6<': 穀xDU6^L"3E.XGU5]xEPM~(L"]]Pzj삯| '.mh7gfWɒ,'؟dɻ70-EZ0<|,DH̱T@i35xK8W}㿭az=W|*--(&IH{xY~,oڈn@#f4C {q,PU.c$2)HNןAne">PB7v/Za=\4'qv6,0>.| Io}'HF2GX4Kqnu^Lx8äWQn %jCm҈T ʲ`Tmr/{\:)'bh5cQ& dǣK}e'jp]5={RƒǢnA <0i{ce,۴2?Y H!aƽrҰ\/loTGћ6_.~XRd_ A D7iٔyHb}e ?_bt%l>1ax"y|BmDS2_miJ`D+]<\3Hī/Úpdcu ,h=i2}y8t/rCd~2[+g 9(ZC,: TFqZx|Sx7M Lg3b86>,֤e9I$좱 0|'Q\eq$NBl\+˸9DA%JNkK?Q(IoVNj9p{ M 8*v"r[wgP0m4t͞'h-ŐU@n ָ3Ho L 9"L߲T*iqvW&| ^-W/$nTulWD]Bˊ=x9GEXfSuշo sE%pŠ: ՌLFSRvN' ޙfk"_U62e|<*{|F~vܿ\h8Ǡ46H݃pC:3GgG|i P]Gbbg]'еXr^-M= _㙤o_1 eOl?]ky/:ɽy^ĵ;QEB֯j+nBd`ACEТufJcx|+KlBϭm$y>TMoxǡ1dfT y'4}*oOURw(ҨvSX7 ,[CܿNO lzy` Qȁ)[0WeA`&pށ.E#㗽] Ysy ~1^iϕdsFQLDLq.ՇPs‚-3˻__:O1eEQSM~=>O=h9!V 'g{'2wk,| :ʣH:7MҤ޼i,C^tt1eCc̤/*/ǸrlUẼq{+ &0]rMժ?+)waLwlja)GK1wE`8gC|bml>~_K󚉰 MV#% P#"S'Au՘噵˼- TIjCȰ`DDw,T>Gn{֒?OCؤN$Y1`-- Tʋl /u [T2UC_*ޑY,geMȖ;Ԑ.ʚP|/;Ci:r9*' M39se/'% F.޻MU;2F+[dY~b ^PF:#q6π#ncDlnYjqIUͮ1& *)xc䴼9^_{%j2 o[.FͳzOýRW+嘍g mhW[Q6&Z)"J8ȯn@MQ6F"8% p{rKya|eܕ6 m\iLAuRE#lPZ_>FVRM4nZA9=tEfz'iv>KEoۭq:|#;Fفk4pvu#(7102glY=|"y0{H}skE]&<>/L}<ʫXr/*(Ǐyط1o 2ujۨs0.ˡ;]Y?|@AUy1;*UXsqY [JPcxV3%4/+ӹ9xw){-$ r!:%j O(Ԍ[(x$DEv_57Тun\YB=ZiZFyɰ}D! ]A-dZnMenwU:f-uzhh^|Ddm U+|C#wn# "Ψ_3,̇zAINolae䉌bt[ih{k]D=箷QḰjkn 3R|ⓙ6u|~p.ZA~NUЯj ů̡VnMG+Uj^g+NdqcH_5]ˍF?Y=z7F+ P37~h~{)Z:I/TQf@5d&#E LU|Uq:,!cfhJ^9UoulGBTHL%GT3E].c((bMrR$~jGJ!v,qM^?@Fe?ÂWsϐ).L\ɕGX>1V(}}Sc=z`"U2<{цoW?9j֬ǞTgfCKd@1tF!k2݄Cmٟ/I#)@5Y| 󽜁oaTRm4UwgeaX9^2+֪@/|I{D zepoS0nԤ`L@7+w8t{ E(̴ֿ.y=2a)y(]WgO\$ -?rAؕf2VO ʑxQ˴xl} mw^ᦈ.k5Tg?IowX0#0'ߊ-6Ct!NJ1f۸;{2-F^? 6ninw.NSjO[o˥ Q.#kPeCy Zݜ2j?:> DKDHwb0{_l{B)u)>7*HDpDwݡa)$B/lX 19竩p$;,Y,^SR.,C6Мq( I>yQy ^CDY@®vXXrUWWTN" U֖JG0|=^Vb N\0^c/(ެ~)ǎ6QYl6dnJuo0- U&Xw5| 㵴_֏'C 4%%i)mSbrZ"Le\R~?NxƱ 4 30lS*eWv&w--=3 DzI-=~9Yދ~rO!RRhޙj3J5y{ҟ*2a7,eon4=-:5iY]Qe:'Q.G~1@~&NZ>/_\.^7h 'ytt>]=dT+Ksa^lY (3rILN~R32sXt^=5XIʊ郥Ӕ`(޸}N!gwпͺٕ'Ŏ *Nzw7O\A|[nUu؂M}VMj} p9l|>sιZcZja.yFkl mU3ccëmVW@s$tw`0NMzhd>Ɗ8j~QtUjC5`}LŭE ܲV1!t`P?y*j%jsX3Q{i/ͺ{xI=74˂~w`XjDv +XȇED ,WLEErv @бحVL@&`)A@640<+D/p#Y<#l U+D#g>j5?r>?FL?.tPQx IyMR87ݞȮZbL}ゲSBh[zn}w]$R<9g;Es X?Qe`5|wNU71F[Y( |aS^2Rr+~G$ڮ~j+,fer4Յzب_(sobJ`)h݀WAKzBؖGzyc,'X鴠ts8@??i 7:['~diQ9F^T vSa-vV}e x7n <Ҥ˵\XصtfXf b7}Oߑ,{ Ѥ5VѴ#g9OQ@^GÓ)ql~L-b׵oiUSQ'K5?JM] 8]T:obJuOӴXEDPOCi!sLx )wenw] +D[?拲_ O4__QZvV7:Ht;3;M?DwOٚz)3ثW>&Ugs,0ζRoh/ 4:EYJOG]NKUzXF>BPI*FG} $h?^8H rIP)RW5b}o|hOZʮa;oX֟2VΗJCt Nz[5_?jdɟir/:Enr<'dZ\rMeRկhI2Ǧ~f4o8u]g>/7ו{bXN0|\w/ۿcfIy# ~VW8?q;T=:pƟKHV,E>4bK'6p1 &B~.?Gέbʼny<4RcEs!By1OX!Nۯe vT`(h-Ajg!)G^fV/Wt~Iq~Un ҒtgّMqr"1,_)LXo [c']ݸTQboe)kVk>"5%->|@ t^IĨj!|tDLgU6,mxX[VՓ@rtoW6^J2#KE@Z>e._Rb3d,TOC~tyVin7c IF=ҽ ``|qSKkx#}Ni 8Q1X߇o,\@ U0GO8E{9Zix9l/8M͗zrqI3U J"ǯY_fDHc$DR%*}4|lݕ2eo,W¬f;җSΧ3k tX3u)E0E$θ4dY/;G=Dȱx4R0Rտh$WiݥY`䢉(4\Xr[}Bm1i'yMR[YSX qv[{@l Hf&,ŊN-?7ޛzMЍ7Fmm4aXOR}kǰqt^K 19sǨ>%lhi~d)q@M͉>>SBDzX-A.hiizv.O=+SW`0[\r]۟mD{$d (7IAoz6"Ǿ@mR}ѺF9={1}[4?:8~)޾R{8g<'#Ƀ{-M%{yVxjrP)ߜ!̢w/ʌ*F*rd| 璿+"!I5""JHܳ -o !7v}dW"+z8I̫]*CwV𢧯 }q>DdQ㣻-++-9Ոn=mk2u.z΃~O!)GqJ];7֋|_aIf!.XkR.;jZ"]9YCL* lU,U\ aH,lJռ*K%l$96_ ժOr[>&ǣ06ٛ;亳_* dM6ʒ&5=0Ad~"~#*ney':({rWzCJhIl7f]OHނVDq(ȿ^&nmV-պc=3Rφ)8Zsaо_UE$}ђxX=\Ci-L)-=(E:tyM:/# + Un4N4GRvt͞HΓw9r(gw5DZ&Z4&y8r~jZysa4oOʧL 7l2(#Kf0Aȍѯ*' G`9[[.PCK;)>Oni-p,;PMUݒH<##6+V&+Zy;' YÙ#<}Ϻ.j8 Ë3r@uvΩ ìK]N~EUDNe0W{||@{~"3BZqԹF-ԃ|JEbG@ 36Av&?/Hw*m9Bg2}mdpa~jvBL42z@-u ҲAui0š0-\Oe+m oi7 У2̃2b1Eg}æ/ӏP}Q(&ܸ.-`>Ud'$l՟|S=3ױh_snMssܒӭa7+>hpܟ0Os}&_K뿰Py?o>7FtA@VGzΗE@̝ɹF'1^fn{Ô#{i۞iχۉ㐃ʕ}]&euVes<۴@5p.<.s맳\p($+]ߢᤇuPTKOxUZ`_gO&%֙@}ȘB*(y)x.ʜT8GXu $3pxq%E5z&3IdtHe-4cv?->5ioV( ?B1v"$<1|MsڃTUg>;K Oq d@(B̅-(`?)S0%#]4[LE^3< s|0Hq̣ ): X7Vи/z@5{g/t %]= >8B"7]2Js}5x٫H0 CY 0teLmޅK._I V`O*N37fD7e.N~;^w~>8,kb;K,ɘuqa{.7rhƾ>oοe6Os𯒧LT ?N l'{A[{f|_?G/CuBxM?HV=~<`!!eN4bRzG E+|{!n9VO`GA6Aˍ];?孏ۖ&/Ӥ2! Ȃ87Jȝ 'M41К0}&B md6ŀ~3qt<\qܕr7 u\z{JU46Ӯ2i<^wn$wAuNXeS?)dtqYsPc()[yx< u!$x'Q&)/$zt kxaC^TD3o!S֣&(%X&v|YC )RqWI\P(S j4$TK[Ydqbaܡ*\L8fd[pVe9Ar V.w?d,po(_Lly뒇A^ev祐?,uu?ܛZ7p~O틉t &p1 5ROh0X%ܺ=ˊoh!+=5颜8]Z9d~&jƿлrq"h҂RQTLo`=޴1ƻʋT=J!)}t" /tj\r-QS]"N r*(n讛N,dJn{H 7n[㶔Z(~κ3ٵ㽲rU?[%xkᬏ7:){7nY*w\\h!]І  vAr'=o2JDmWV}]dWx8}w+/{xJ`x8"NȻazF:MOC(,gAiy 7/]`kZu^W)0#Bi.s6ﴭfؖvIoj>r1䅚o7-|sm/i5 G* (Z T/^fUA/o7/܀K_ A?Jɂ'!Sq̣Qt(8oL$oRR s'6ksEIݳ8oHuTj\9?DhnO\H :3^3RoBHVaYeiR 'cH EInY l[ADapmZP P xe_gJ\OH wT}yO5T"B렾seӦ?}%d"B:E_i|o"nvG27}L ôO7&eL1 ߑ~t"#@%Wlc\5VC>h :K#AAl~;pr0_qb+#]`"cv`YqV=`zKc5tNjF/aM$+ch d>xb.q߲Pϣ@='Í=&NEϔ,)dS@D} 9o$9$X;q^SIȒ硾?òGzYciknk4u+eY2U",y| x{G6}2oLG+M[o.\~.QƤٖh=ǓHLQ:z{̘綠Z hNk&Ih 8jTՎ#O~B6PA,1e`>9MX_l;cyA6@ދ˾I¡vZb>7Tr'ӊE/&#weZ[k pZ K)țM8mC @-$3YZQF8 +.ਣg)tFV>ATbOC$ʚ[ )5p2G͔*aݜXq=}w0(Ye#UM%`o Ys'xRɜ7~iW^n= >F>>h+z:(D?.,4]VT! IYrZajIn~y&LP7GOU0E?:vZg>oߟ/i¸!\^ ߹$=772X(< ,*6S3fӠq,J#a{i9Y E4(bC<"+2: Is>bpBVksԂkGpyA`ei O6'c+Ť9 \[ˈ6R6rAEey663mf8=[Nb8ziYbU^ըTYW l'Aݠxf$OzCւt?{γM!?W]_ANQhݺhgNR{.r6Q<]"%KɱNպ\~ ŗ4HOtNPl ;uXU-?QšWJt{Q_|5Q4?@\e: Кi{f3?҃sNY7(~ώ+e[l&bpdMDS%(`x3&@e"xKL C *N 5(||tA,hE3e$XNn +;o>ltFSk"<+Xc:˶r*]VV)_KNb#H~qdV,u fAMB@"ń909<ջg,,dF =뾹?Gɖqqqjf0 үdE{4}5t'\MBlˁ!k?.:`qHv/[?Xz/Hࠄ /jR}UGn{Qgi{^+Xb*CCEc,oUxM]sK?2N`7&m8 ؝ LA=C߽H#!䫣[Rw^+EOsr/wI~}kP ~ObxZhm9w6{m[ZePXj KϏXuݶ.n>sm4A EӓV!W=>!~ P76"OXrcϓ屹L@b8]mWVA@g|锤*hW#2"߾b#R~H6GA[{׺QmA=-fk EcqC~P/r<7"c"g4bQs:'o-Y~sq{>o{riK2Lݛ˵F޶邮c5}M+6Y!=yEᅓaG?s|,(P4$Gow9ẚꎊg;qnu𣼰l? o|&yݎ>Dˤ;'ArҬ_3N':4oI7Bo f$UuDzW"QkL6oP|M$(#w@WTb;D%+|{e2.EdPc JtCS>=hs'Z\lX2 #xYO_ w'Y>}MXE<g,ʫg@o&>Q(77O7e)7W7gǺUdrdXH4=U/io &8~V)iH c 9/\j a-tƶx]!;w8AΏ/m>m\?j~NI[=. '\n^ã^QB[.>~'+`0T9pRe,vyz2IݛΧ?f;b?qg3յ5<1Cie'Kf+aH+Eb{£40ߺ!!܃]򖎈-LY?OMfؼ(+҆$PҬ)OjPKry?hxPKDA=;{VTx=d"'cm=_cU@釔4Jgs(qB͞bi6I6W48UF>7?1gq@}t@[DzhS'>:d?%=DUh+ZbY+nO>u9n<E_קѓp' !,Mn!~-훁JK%A@0(:Z+?[0L;SܗI3x3Ȃr&}/~kq̶v܊\-jsXxY9CK#r.ǟB^sopKX@zc̺2ODk,w(=i ٘l1U\WSL r$D;et*)j `-RÜnÎ]5N4Q@|8BI#e+P%/zT~PCA8G $:ei \Y}'hX\SD#rp~ mCeOcۃ}Sʟǣlq$]b !܍r}.6þtUB' O=0 hWb ݤAK=[~j=rv Nq?-Yv̍v JGx;ɥ̶zyxǏ[-3xȯ_VTM|n[\Q+QE-7Uc5$h:R̕@p{H~IP- $LQ1pcYӛ&4JЃ@xs̓pvgڤP,l',[xF*Ї`D%q֕0Mn5qoH"{a3L7x_[(/Id5_ 9ir`7s W>nFl#Lyth2an˯ F1a_8y /T8a&Cx%U6[qܩl;U7_: 1ެF4_՛n#C1HOwHB nI_^2` ]D+LA1 zR~q+8zey Q !?qQ$|\&Ζ$WImQͰt!Xqzm%&_guWa`Ũ&ڤK!tZ1 郹&KWZܳ!5>ug7?\{r'\qe5q7S.۸M?'t<3?F7HC$7/ 9S1 k3V 9Pkj bşO%ZqY\ب(@jff,ɌHZq@F?&OgǍI*?ۦW{`°%_B^!m/X_"㘔,ji~N|;G$>MςI`x֯ :ƿRdg${{!K& e#U/L0QT0M /oW7?yx[0ٔ ۆlWKE3fϫq~d[uus>>S)K4uz߀ʃؼ! =G;"?Dy\G%D~;㺈wh9!{RW2h./7]Qb CsRNJDc^6MYIͨOJR 5z}_ʻ_~˅|u@;|5P^ >)3? "Wnm},^j:R>[i?{G0p5~ A;0Xɻ>}I0>I5 V%~/Sv;8D*.A&ngҏ7x ."7bӚ*oVs4ouUU_rkMyzIޏ~>Ãj($SHkaG@Bآn^4I?e%ntz #*GtؓᝲS{*a{Q x<)[X{㌼ PO]xa\2{7pW1H/]y 6> Q)bI[50ȮIFUÊ9˪>Y4j)$Te$''0MsRj@>t qR9!hew.` %\-+1yهE+BDI:N@<) aո< `r''K8nGk`ݕ|Չ42i-!kc1vƫQ7qaǔfs2~OpFxaLxW@'?M1_Sた+E-(i e߅1uEEXӨ=/Iy,th&d(f2rO /'TT"ݳn'Mx;Bqi#1k|^J+w _O,ΎJAz rKbi߆<m}Vƫ|;"0E4֪mO(f4!1y-_ [kMK寣<"c}$ZWmts߄Rj^:O'G;O7' E9 l[a lS(gGrACU5 g9 ,rVN6|+w\TnZ+8~qk$ bx;!1ḬF K~Wm%iR__>^q,T0> Y4 Nxx.ݓa0?#ha[w,\[[&~zFRid c~buo\D-#0<_M 0߳N]!Cx l]$Q9@oG)Eza\n=H  ^x}s*J;_{;=j2z>kح\յ\^ž*cnzǯ _4)۹ut>;QO`t6rRmۂZaVg8^х S4FW,&湒64L zN?n?G|W:`IE.w[zSK"{GH*DQkvӫS Ý2Dd w e$H 5H**C蛳ndX2QPdb 2DCTUO~iw:<}gn|qY]g㵱a#G̔(ٹϤk ~OH_Gʀ~_ʟ˂=)Y}۟Rӏ"~}d-,L~8rLv.Ţ $:a~m?_pkKcטhxhgu9St6⑯'\|n A'3ɗha$]um" B7ەʫe:9w ̑ij9g72&ۣ*H^-*nIJ "DQ-VIORb@Ȗ/IL>"#NTfnz_`,jz ipHX}%u~[;6 BU@`yE˷Rc)3г\?H$WŸ#ySŒOʰs!+ɻ\~XKe^F_"QU{t/Ry/!MIY:qi$ 6 圻w8fQ2e q<(G<ķôu'IA&z)Wad8'PD6|V}Ѓ|:tUytmY:D>C.=w^=Ybx44AL=&p|kj};l^[:ZmP?uл7z;x [ ªLSb=a)]w()\pB@W}{(#h)ϑl):υS&q/ 9IŦ-Rɘ]ЮM,Մ9 P+NpK{ydby FO<ZW V )Nԟ(CLJNV848ujYMռu"꣘Qb ApDCv IjzOXl&}F ,!k]oNm[&fZjZBwtU˽yCe%tZeӄZJ<:E"#|d~i?[ЀkpǏzH8$q[ i>)ݳ`&}j 1_XCaƓ>2 A$`ي<é $'TwP@L5;MۄXP򤎋r&OɰJ-;'uK+!=al-pc0nsE, S_qfNX Λz8d/+cd)QddjOwCk f,cK"cZ:pԲ=%-M~zZ.Hu]EEC7o}g-/ĴovvOsӌgk@yn^pև**uXcrٶ-F=z#uJdp-l@!XE);Ix8>Xŗ|b5ee2|w?.2t< .'.plN!{nJU PV$Z4+=Pӏٙ[oĈk蕖HP])m*h$f8u0TPKVEFIĚor=g.<)^)zGnXux~1z{jj/7jlnYYP/s~Pڹԑ+4tu`=G_*g y M; ~j7?N}> X)C?:  !G+{drifᴃI>@_i/ I&mW-'!> -ɱxd`ξVG,a ?, 6撳Rq`qrt"EE6F4ix;8(y2REXo@PAxa[0ae +/.9٥)v'<=MU~phxײ_&̇y2QVuGP͊ 5`hp\iPM໎f "juzs*{OAvipD HC,G9ݨo*1ͦ2eFl<΢ C{=adC Z>P* Q@Y0TLm\oh$K88mȲU> C%<HuMX`Eh )/Lg‹7 o WO7.9WM&h:_cy]{OZ=] sЯ'|6WΠk,wz tmhސ{ lRg}ω2K7 NMQ>ӌçWoe=/Rۗ?$թy4",fRjATT;-g3W+  }f!!`%P/3ǭ֪R^h;wiꍻT&kNՃ(A KoL |\rx3\qgE}oEdzO VvD<}|j4&ZltFݹ(vҐ;|M_vO em%8G\|9&tH@hA<+ V{I& N )GuQԌ-!! vO)svc̦ 3 dF(PjmE9?#B6Oꌔx(xZ-IGPk BYkoҿ_Ho&[՚{&6P*P򿒞8q&FާKLO곆~ ?T)W %NY GK1e9b9@zXE AfSA1t֎HerWXڇѠ\K{F4ҟ91 U:Ih3 A<3Xkuhxb"Pk :@ܵvX7fC1Ί1YtEs[Ab;mcs\ijBtMK :V suLIPe-)G{v4y6<8`cQ+8F$Yq.˗~1k,c*dZT>l!Nxk ~_cx\ueK?7PWLjlW\рQ{ƹ}wO޼!eGPSc!LqVDGrIpCקMR1}2k"^0tTdL tړkBD*1A2l(/KתVyZrC!b&KD2"*L1'y*l]k콋Ϧ7-pv>]O FD-۾T1H3)aC#(M=u';Kr8&;bu{\xk4ֵsG;-x۱Pɇ-i[ :2.[bƒg,Y,'2 W#B<31=myiS׍_fªHW/yGmJ&!]RQߊ讉 ȃ,Q B V 6Wݻ,L0qӗ>9P:¢tI5ڵiLsq4FLR9exPmYtqI#c$zj`ݥmD,AwGjgǥO0]|bBvB)^­lTO@ka2ċ%dz(mO ̶,' 'h -"hlQ! 9Za\N iޙ[%kV|0Atx:q. Br栓 H(C>ҩzV?*P=8Gj?OQeCɮg{Y-]c1ntcޥԇVO .~y&/!$w~U~\pT",(`5+ p! z;ʛ҄km8 /7?zvA_&{74PE`#YY3X"ی ^"Eȵ2T#UH܉ڿt܋eʽa-Q[9 ɧIc(H`¸rZɓ f)F;*&>3$4i\ HϞn[,to87>YcىICA1[%BMe%-*cg-d1Tܑ `vѼ+`VjPAφzf2k.|zQM5Q(6Sg4ot6djN n`%T92WAOpSg+V NA"6[{٧W.=/Jd}nps^{Dg#)ZоP㪐<.6BzSchSMrFuBf:u/^) W\u-^8l֣*0G|+AKXW;8\uӖJQfs@!WB 1|x.70 ^*Îⰵ9!4q8QkYy}58cyS=9%9ú?TsI|H$et.p|TN2Qp*4!=zi cxiF^Lh?fU âvIO6;2K0Ae.Y:i"qվT(eSN2"3)|;6YM+v]=7QZF?VEA3MJEZ6j|*h֘7qdb2Pt/M+[ֶ9oZT{1%g ?"Ppa7 Mt_|?*@}g ^^j,T\ʅG5Moi^[zrt>6,VpѝG.?Eʯ%&h]lssmӜVn{\#6\'D‚( ,TYST '-4Ӗ [^RP x$ Ya$*h<W9$O1K‚~܉.M/moZSI47+7+Nq+ [Zv,۳{p:u :Rl&|aom᪁)~N=7]状 6Q&lٺUP[]g C kCZ>aoBB,JVS& BXuTzIYI[0,,"C3.sv%| Sh) *` o_RjʋrSQDŽ As2jէ9Ԥl臹d52ru@gP*f)M4)`Ft K$!\='hʍ]=r0ď%QVSپQ-2+u|3&{:=ukÞMmzbeþG\@QfGFȔ Q'([ GuG;-;ԁ"zKr Tڰ`n he_PuJdP!SYR_3zeےY(uՎXl6142aK8xGH{~xT?wξ/z.D@plCyI A+p3%kgnE9n06:`0efEڰuEwig$tbd*fT.SPٵاt֣ԈqQl62Sclliɱy2o/C$LфRp=ƶN>=bR*(b I96ZTYYEEfUsŭ=&@*VX4 ^q"a=9@N*M&K}0ݬ`|*KiE=x R< ΁5$DHo(ĝt9֠5S9fҊ{>5]˩{^Q;B3c!h[%CɩVHś*(ѣ*AOktF 1!,M!hh;tcFzsA,4_C"haLe$wFI|X@R$0_'klT&i1N΋#H ڳ̍dav:dxt@+ "Sus`+/}B:g>6 H{,R$}u-!T]jLL%C0=t⪍db.U>kt\VߏK~29wS7Ӛә]j8G4'JfPp) "\CjZ&~ qrK"Jq0i ʷF)'4/,'qNjr̒W*i t )HXgdUh^fR-!+2Ў6QYdR+QVjg\6qj\Rj)8AЈ}YJB-WQJPBh[v_D}'څ<&nZdq0--e*il]%b @o s:rbWQC hG=ma +2j9䴕.˩,{.XVДMy%ep!Ȗ*TբNݽ?4ے7#&Aս(dג(881-<%:hrEdD"=oJaB: ߯. BƲ#Uq6;pYICIeb {ÝhZmFkȅYvt0F1e ۄ#,m1!+"0X:<\jE;E<#1_h؍g(kYd"TulE*<%EX2Ød*iiU0**`{NIVҳ5dYϪCFTBCTqMDjU!9`(jު76?ӟȀ"v?s05_3p|[_cql}v9߄T)s+_/`77^.5ipo_Zj uex sXnjܘN<%(li`JQs)8i(V4\͍7ΓC `4}N/nHE-,ߪ}m\E隉9)bRI/ k{ zˉL1X!E&F Fs<& uU=" ʚܧ纺äXCJ.dSC!Tؚ0%"86!% >m2>w&tىd 7Pf}vv>Z,ݑn-XGt =TSÜԗ#<--]Pe*\ N r Fl V4C.|[LqmSB&`'Anח֞ɑ>"0;LlogC$ti o #lkXdb!b 5 5g\ .mV^3pX^srVJ6hdRN-Bh-%zY7Pы>4Q,i%&+0ea1xBeę%zVD n9'ddƌ3'u6pn6t="խ/e7.V*m3p7*>$sC D+G𥼯iيj|U\YdzsrbҲ3.oUSȗLb0OHcbSv|'KD>kCtqCV+9r⦽cKT 6 {Z5JV1&u!4Y. "L"`V|3n PmA(afK~h8@(kd]P !hTaԮ=Ԫhi-?*i:rȹEl ssў]%5`4fdN`}yVͤ>e~@n/`՞`ֈZG D vxW_xWgtJmi]Mc*͒zLAi\K1%1}vԪVJ.b J/B[Q¡,Fg5S0^  祽fT]`1I٦0n83`>^qDܶ,{odwa0lc. Q"/#XQ]F>9k_1 >Iq ܸTxB!}JWXԧ~DH5bP3pIW V`:_RJ "J[ U;+;n Зl 3Ʌ.p/qVG Y٘e#ՕN>$VdZg='#Pjw 遭bT9bQN)J%{6G|tߢK0$mn"%H0oV[`0F%0bBpŽْPfDoOM);lk2m[b%!4\z+%zr1.渲lvUfHMH`\EV.)" aQmwe,+@)1h1ڢ $.)ۏkB|?5]\|~.-^n2'Y\oY@7i`h1p"sloaJ$UZlR&F,- Z5 NS^'rgs4<U4M-6m 5rb7V6`2"tW_Kyfm^P M)*L12H~ISTi+p֒N0idRfqФΥ&>̦n+@3`p?k%4e.fkw0&M#HlBQZgXcܻ"n9sk]t5b̐NhIEW o%A(%r[}#+@Hb*XpfKz aMwQhUe| ,Ft]TI"DitDU$G7㎩^%=VNi'^Y"bPBB<[ ҌǑ T%UrY$0H\ q+ (* yhڑE̙h(X˰4ӘIpZ3Z9ٽ9( ɢy5jȰ69xMKH]w*c0Vkde<%ք[Dμ>dخļ;]@aFYi=ovThG)8Y+ 򥳟^5T#AU}8jp󭹿Uz]Z &\)lj O(F7q!c[XhuGBw4 ggW*1_MkCx=*.,)Z-*++#2vLe"Ϣj»j!Ò,U 2jf6И箱D+`xn[6*_sP$ iF3tG\ks( hȍ FHSJq6?& aoZ7kL41g">tZH0D[L.ZQqEFC˱-iep1Îʍ+p+r· ٰOY-QZѴ P$IKӵf|P5Pչ"#I) 7ٴd䖘Ic\͋,eh$4N±Mid'iS, .~yDjpX|GaWÙE)\VT(f bR0CJ{" $T$ F `!cXpS*Q-6hśAI )tʼrU'423鐪b\#{`5.sOA{hHz;2 p.q0qE%!+-xxHa\cH&Fd$F3*q!HNPB#~( RIj*+)Q2"4bBȃ~61޼9H1LҝLYFC;hԵHcU=dhzܱ+ZmR1^C3bs!\.6#ý_=ӌJI*3iʏXKF$#hǃN=F5]vŮvm6Z1[᾽㸑LŘ[RHRMN>*M O.Kcm] i-4 c\ F̴ *ry/vk&Lvqv,iLMPA!þk  a%ˡEř#gۿ! MKvb)Ls_ҫ^ Ŭ]7o.nīԮaNEMgm-U. 1|0^H+wjRkuKX-C{R-BdIAC^CgUVJþ1jsj7@ou _5cŷ]bd\SpRpZ+q6m S͕WVFC()kE-˦/iHNt֫w\UQJ|=@0G%wfU+~< I2W-0~ ݉xej$9kdBfHN^B(~9 spѿ bgcjH4Sϩ^kYj.a]Pb֖Nq2ح쬔HXv*ѓBSJ!#FF!wwF(*$g Eн.󐸘h&!&eS +!]ag*2Y B*Zo= PRרT,4L Œ-j.V'Ws #q.㖪V\yZ!s*(eDT,9W ZȂ.ff?*QYVm9zP/* yzb{Wּ:yqkſEz%cLAdGMXy 3aAJTz!c"]Nq9Ls.쵬 I5h=A(1 c4E;%DV6Ծ+`ºU]rEL14C VKK#ns:>qM Q]ՊrL1GmdXy;KOy2o}8\xJ&OCF܏ Ȍpi<3WZ]"levVBng1YUÝL +r`xqa#R-Hf#nFeJ!z!ל[ETnň\\!S9bT6H?ڻxu;1᛿rS%Y|},nc۴ρ}BwvjC50i"W*,˅тW6VEV >^ytxrk>2sw8=(Aa0L0_^%1}eEV\KqSڱ*i 5ttV'Y@{"IJU^>R*+{$F5_USU",,Wvߓ~&N={mlW.|VT:?Q3-A XtX\x/# 8 DvV_Bfs&φ9AuT1jgEaU7jaAǒ 0唥x4y]&~?\QXoΣ|ս-KY:4/vRRTo6燿y ͜hd~ `7uޥ#=[lc5S@a& B1W"i#nZZgVWq>$@noW/s yvZ0#Gg)d~g,^BWc]|\gجٛce1NƮt3y(^ $qlEܽbĸ@{K_%26HZ.ی&8r4W9м+eu\U}WLU%5^M"C$Mn̚jX˔PZ/vS_⫆7" Dp š;d]l;ɿ.4 HD"0k c֟\SBfň&.' 0Z >j&Nh{ }o^iuVăJT/ę⅃=g_i62]S_GA**b>/VAP=+X۬770wNLLVs!Ǖ߯W|v=4SFk)S;ULVVuJM>Jyj }[dlоf˔ ~,%Su|sOu0!{8 o&4@WKi9/tqs6g`xVqCYn\7}DsO W%lsгijd>N)$)b$@YiVx3L L%dؑmAH\D*I3uExsyg=e:dy tRnЖWK?fn =1Q37IUK-6CC$z";v!p.pyY/w;1u]1WGvztr(PbveWbbُv4؂{d;g{tu-kyACo.u9Pq6:ϦpqTcW nEWeɃ?&|46"dxNww]}OU:QN+ٳ:rorU>NJ[]xNKd+9>˒]S/3Y6=svjQ90Aa;?׼Y ɘEX7Vz κM-q3>9?VB3biKwpxfl!5.n7GBC-2+]ϨRtVź{ؓ g[鯅 \y=nyZa3 {lY[z/G=uf!^JfiOxU,p8Kzu/ ؋RR76 ^2M'6?%6ß`5UkZo /^ۥ<,dzi%g|p\;gʾ\N+SIV>1QyswRtc19[l_ g#z"ޜWwH vt$a_~\Ѳ3t /p2w;x_-d33\'9xzomd_%՛Ɂc2g*b, ujo#et]m )FLGI{:9s۲/UTy^_LeF;g/8,gs5UW7j}:r !ɿwןgS1E!3z Z_n#yT'qee6l?/dmϵR{W-8zx[#K?V<63 G5)r[).`+;&or8;Ĵ֋g+^J]e3MwF.q\{J:P"[r%);j-n9}>Lo\Ĥss}Quk[f!( mnj+l`:\-[1$דp9/H?q7MxVNjuϟ)CcuWkOڿ3Vkj?)~E::]tϧnč.LEalgHMJefzJn&o0](UesoUNl0u $^c??g :іO Cn}ww]v?&"`ryw=l:i^oBk|\7m-{;p8Q`3.Mh%C2?k=Z[1[-қfj{Jic\/y| ޗrk<-8\m[ޭ[Y 2M8*]pd#kf ?a:pp VXO9Hi3!I: 4v;r;x[~]8;c兦doW6B`C]mϢnuJ9O<oW}Vg%J]aobpzP; IⲚy@#%OW 5X%\wɯ{v ؇ӕ\ cFx3Fr:#I.yFň/:ZqM[]ٽ~׺!/ ͤg7.[ ~y~{hAyԼx0._ӽy'd_N?A#l}¦guzl^N[oO Ig6K@[X?|)ruS7F05f 'd.Z}5ѦAߩo6a 8?{w,+ohbdt*x qp0`[+}s}}22E:Vaa]L4Uy9banf;4<^t_w]Y2hC UKniOz\e'{=8R*kJ*lV}Yq1!< <-$2wxZ#1׶XXC떼/[h2XdD[ɕWħk;K 2szV({B<tbN߼=x9t ˒+D zZڄ"~I涂 d6pą9 ;|m>}1/_V1gZw}fsU>͊ q,HiS4a?['Q>5I>vIw$gZѱέy?=H~GP=/yC}GNnҖuڝp Cwpl\~Jԛ3$\HQG.ZM.=1t{ w9fM+Hvc3Oyu|8{.-nĴgV^É!gѬ/+l&,^Dg<~n[߫O5JWp= N'w$>w~Io߅h@[ܾx]_mo9dzE,Cyy/{_"ײWӫܺKsK[">PRkj z=Z}4A[}',q6hZg$ob_=Qt?̏|d4ZvVSsrp6g:k&3BNOw8΅"~>CVdzӯc#Mk>zTYUk?%ow=3#tKn+YVWZ?7COe?zCӗx<cH+`'BgKҩԉOP iS?bӷ}=}Sl#Qx=iw{k]c<ٿ`<{1*ImQJm?_g#w#pu8)b{Xv(y.k 霥3Q&2Ȅ8 3 kS2, L tz yW.gOJ Ιqp2VYΏv}kTq|N5̟b?%o[wc.Mp rvz0kKK [#cum*ym!+jΦr/_ Xz:i{jԡ(ptv?c/<~nC43bAzp /9;;&몁sWzρq379 RAYG=p'$W[ߡx?T[I>tw;{S_H}wVkW 6m,mb=ػWvZ=Z<}tiޓzs:t|]uu8hY|l? 6 5o3HomlY~W1uW|4;o_g/3ϥ!ngz|ngG?ŇFG3XOkEpı+Ա[<1JYdp#{xdU5Σ:~oֺgM\7t]x-eE"u? U(xZuA0}'?볻S|xΓzLcSэK} &LdR.l~Hk^ON|}Z/ o]ng؊'7Dń2}>Eʉp+V+LiZ/̔wc3;:7}$`{|;V1wx+wfKv<^PQSMv La {COWEg]#iķ8/~|E^3raX\ ~v ovx 5?\= LVK~VXnѻՓ̋OKWO_xfs6l^ -XCyqu"2Z2Ќ5co;c&C}L:֧wqщfx2kGr zyǟ_*{|~to'mn/nL{*J< }>LS9ʓ9}hHiڸ+vä7jʽv}w]]V^gxvnd\0'Z$⺁Mϻξ9:0frXyo;;z,wg53աڹ@?S ǫ=.6釰g/_x7bFG_}Q6pгz~7u}%aNs.OJ0r.o\u4^馜>Ln$/oiJ{c|Kom]l{3tl`.wgmޡn~>?7}-/*:oȿהG*q|+_ψ.a,)sNu.x >氋k=sP]}? ;=t5&ϲey}?F;fۨ2oʓ\wZp' |u];'gu{zpTs_K>ˈ?U{n>w Or|_^^ێ&7G{y_|{}o/sn;w;>OWJWG$}^SB˽ vKJ?/̟k׹g| u }܏_UO|^7݇.WC#I;oRs9x!`;qT%S:ܽ{Dz̞udO lrmrpl^_Cօ_wwR$}jku,?o/9ut9{p#;oaAlqo_ܿ|awiv-c *a|3?X`ywgk;3ؼ_"w_'íl/ywawߦ];/P| fϼ^f9!x^IYwbpt$p>~'.fݗο\;GVW4.Kr}K%QZ;5;yg_'e3Aַ>OMυ1m?c~Z)~o-R{;k}N8,َ37ѯh~ ap;x{~gak?jk,Q w&C3U3{;/:[v_ɕ2y<]w5.ѴV3fq/X{p_U!Cly^שw/v5w>, ?Q_5~~'Y.>GYU4ϗ4_OGsix?/~d']]oBW#{ц0}3_GyK'Wއ]__uV>&bw/vO}躟~RUWG6.{=KOGloH}o&阶O_ӿ4>5>_e-Scz_|lZ o+BUYit{Ÿxf;o>܀KMsz?)~IWgo~yӏؓn{w﷝?ܯJicSo{o#bz{Yo߿~wϙIo?qB܏{?W~ױN?=s/>j; }GwSO~8%wn?-B >]Zߩb5OhX?'^#ϼZ\Υ_;M^Kl|O+jvk [:~߶!NOm%XK󻎪q}>o痝Qg{WV>߉D}}8{\/WI>ZFʩ=c#S?~o޾>g4w-}Wv^_;h|~~7{7>os A7[}/=//[A3>򺖿k.}׾%w1}1wOBSUA`]^zq&{LmO*Sw_^'zDg/~?--7πG?Vn&^*gs y{ϳ_Tnp<>_VgBOwu~?}7 _Oشʁ1P?oa}2>7*%fcpR~3? M󸹌-Rr5?WsO}cqv/w׉{JO6tUj&7<'mv?3>o]x gWP\}{4*o[ou>w_u{ݣ/kF,rC;}]AGeƦz;m_bCk|^h7_pn7n*{?{T|xo=z3c/lwN<#}CcXIY~G#{3?{^b›Ps7/ wW;~O 9ߌk `? /?4=/c}+?__wֿݧz6|ϯa?~zGo}vy?W:N~wwԑ,7E.]__=3₿F sLc7@>q}.w}wrCz@Z$W'{_{=y_(L{?+TO*_Osr'pOG#pp}LJ7&JgOPp.8+ {,Dio 7o1G׿hɀ|?}Pahg8 } | ~[~>W($OGk~>O>;Gi~%.Π=Wqxy}&l_So_]hG#?_ߏ?蒑?{c_{o/7{Au_{E~ߤ󪪟^>{_Z/?//tkM W}_m'>{ǯκT{+^%?g?T?uK|g񾇱`C?I@#}Am䤏1(㷲 {?uGqsRTRQR;}*CvH??^*U_Q{8 l*Bv`HN0%#xT}F~ WLRc GH/SIR=XBD(~Wf%aX b1 ``1Jc)c0R1&0Tv￴Oxc X1,b aF1C%X–1*1,a\Avھyc0XŌ1aL`1`" &00b%1L`ccSLb* Tcc +5s?H3,bcXı;cc&1A"c1cc`b V1X†1I+6F__D 1CcS ccLaU)P1bc a0I&1 `#6o_+*1 `A)Xc%c) ccc *0qcs $*V0,b!UC)1wb*va!Up;mTcc Bc$cc QT1U1T)e;\~I.cV1! X'f1cbTaliοi?%1110"cc SSF v> b&1x^>ǫ+H"`JV1"9w׎7EIcJ"%LPڻ]D2G,-Jqc0T-m~ZBHǟ BxƷ{)* m5 Urrs΂jVM2%[Ug~ˆQW(u6)7Mxxxz#Š p'=DIhUONg"u'Hw]~AAj{]"*%`˕o_r#in7oHEh:wR ;T(܍sn}nT6WA!XI[O}}ݶ*GE]#ӿR8.7CqNp}*CJ2&c77>*Riy7 T7: M4rys˿] M|m[Ml<NjrqpUHM*:HMڵ7k9Hʬfn/CemZ5ϵHj:7uPխ]U SQN8PgO{4c-9߷ZS,mRsqqVд4ׇ>X{U$[MżR]=*ҭ 8u{٢Q윳Ϸu Firow>CHiZ4gAwTu]^>I*U05~UJFk.kŸr,^)%M8=}҇Jt~oAEiiԁhg)Jg2龹ie.7߽Гpov䉺[[PMyH~O)C88q@ZM4从y9jVw{V[rUnն**L}WU"VuWW_>>8W-{ʅܫ9:>?BK=7#p8_?$ڭ6ߏ)5a6߳gÊ\\L10&edfew۩&6.|N]+%#*YuH[qg*̌\=?⪮*qN7ZI.'hwU't]˸x fL#4{N⻮]kӸFnDvJB3UTv`ZP4M-Z럅U \o\J  5_>uij盤KƀjZןRIj֚q*^WJ>\[Rus5㴫dnǏS>o+pRSǧIiM x?%m~-QN7~TY3&oW?ڣqnl=}x%pW_qTp.oÏxJpW 0㮼7!ob;~_TZ|.'/O0rS4xuHDtN/8qBYEhit t㋌Vl 뿏Mm碣]kbN9k5Diiӯj^9RffnW 89^&Vw5&U|||wOqUqW}N8Jҵkɯg2<K2gK5 #,{jyTfYrskf$ˏkt:CoG^}R˫ѨV^. VIzY30EҮӟ/U֛>HSSVns 5,+.޷ݶf3vFѫ^{UGyPqG}[ 2^2\3Ef9^pHfg];?czQp$tCoWEtdҘs}=^>#t+e~N=+ݽ㫮[eFѲ nttguPVI{EϟBE)@B4J )%"EU$ UTP: P FLIEB @ Bz @> @  %"X[̴Z10M0 M4JxL6M0i b2 d# 4 @LS(̄M&i4  SDE@jE!5U %JF OSi3SzjiP2`hCAMC@ #@h C٨GA (SEu}FWc2u $k[!0K}*jFYWuMzڎpIƖ YJ\iEԪ"=:xŲ0h|. 1 \kDDA`a qiϏK5%0O0 ZH ˴ZKLjz"'-n 8Q-oY6L•7@ęìUӥC)LRj-2i~ݽO~{$z!Q$,AyčE E!gkkd}>•v5[5GGU MG$#H)'ƶ7@ -Ru&pnb][ih)2W@O)8f<þ6 l#S'n'$KIMLU1Z}Q 0=D'uCFa-JaqY \¼HԊ#e чbKLBmP @RՇĵfqbblf0T9nir6*duS!6Ja%d8+/^s(͝ySMBP41Z0A]$kF)H7s r4Aq*C,nsCu/}W&`qKfDf,lV fQ"5&uA ]xy.""mxs57I$NVtَ.Ae\IH`A"Djzk@Ƌ|J ;ǜA(|9gGyܽ<-i6m y$tN* #"d<ى)K4ff :jߦkbN240f MfQB\!^ ނ9-v̯m%\c}1=0RQ!EJ1"C=Wh<ǂr䴄Jd9ԯ6}s ގǨFR {ـ6zm:6NA$y c h/ [Da 6D"19}\_R42"A.gu0Q "LIv `Z9sk8Q@N<1`F`k!UlHvS8F|4l,rހ*\P6&&A$=!"}Q>uyן${~NH4AꚧEԽbs0.:e%tigCtMwOِ{BxsrwP@8;jnq (G̼4کαJ !hNT]! OqY ABZG yNA^m8aŻ@U dbd #I1~7' Hs8bDŽni$4#&ӜCKZؘlvrq4fq@DLjUZ{pAMU ccBHd_uQlcLyqeRq^! R&F8`%EXiLNhSӌ͂-R{$ |,DL*A(Gt=H @^ʛU'X[Q g㥝QƂX #TIн1.έ"&T4Ó=OsAdqBnw|!/a9>d~|L0gVW) `_lo8/wNwMcL[=cʡ$ qw a^ɚ~"Y wcF"͉9giC￷4Ω¡rVJfqrNuG_+/џqOP!@+RiH=p"3hUZ0 wc8p!!q ًSiLTAF :csگX] ⹗áz@<~5MMVN4R"k bֳAbb@j5"lOg.x|=%'2Om8y9BH*ndT;\:F72"5&YiC> My bsNfH =Anf=ҚeHEQa0RDr6atqwS> =,H#Ԟޡ 3^Z!BJg_n߂=<ύ8x2)d2ӺhaǘKᩈ3[:,ԯ! LYµd:Ub G1 KpluU\1u; vgr*w-ֶyCyu0&tH|ZOPbC9to4紽 {鴙eOH-<"9dN*I[524M!N)@fz}nU[,\+q,]tQh'HML1G3A>| $K9&^!NOM`uFnlQ"swVsMz,|FRV lMewolC֛ H|J? B YxB8o$(8pd&a(Rߟ>-DW4E c!lB,֝~9Odyïs6 cJ ruc6""U\A"XӍaؒ+MsApo \6]7;ܬFd EfP[)g$HQmxX " kmxTݑ87s#%7\kq6櫤WQY=g `|;yN>8D[&d.&dOMfFa $q-03M2u DD)Aޫڮw|/?}v< ߘW`[עczD l;jP^UOM cdJcNswvZkYkq99~a73t9<Zf$EueqIau"vx6w|Mw4%/fSƖf="qI‚z8Q-[JGĨ:q^gd?<ݨ 1cMqT%5CB͒j(NןL瞎u=Cު{u.INdtpzU2¾Mr0JgG͢EW" U=O!: eAB4X1gX&{4U1[t+SI6ms/"28*& <=6%T&혆uqu!m(kPT5kŤH k^#Sp vcqǸe`!C %+,߿֋8'RDQYXO3dAb꣛}-ArJj* q5[q;OqIY pEH9M0f7`ro̲3 }ޞhLBvB&piܓ<|kx,<@,ndޅ9U7teF9hF)kC^E3Ƹq<9J !W6"L;\~exIA O)6_n_ZޕF47$kgPj GInH/2 t9)(T$nKo;wugCs}_^9Ҏ\rY&iZ of>,K80 U^dDZ#Va>C.fQ4|Hsׄ{%wUK:;gϬww%4wwa L,0W,ܐ]tlF{ByStTc0&yG-$IGIZ6i]$ҵ73+II0ٮrk!bkc{9M-\>\.@V3@hIp hY(sř3T| >d҅]2H@ǟ$(r|xcm@$CE*n^f 92[8Y:5gY:j5~aW Dp-En"ݎCxPE:YA[2.&1b&DivO|[M6>]mJ@ӨJ$mzYf)QahXUDNs/U$PGčD,șCw\幑Y'Km}qOsLd(c, y+EV$atYy$I5QW!+/8n @̌ l:Gs͒˧QO7gdi<`Ӂ2,;wcA\A*,3p$r6w~|sAzM*B:9sFA3"2ynryږ(9xGнd#w,r+lTP||Q(Dwمr|Ѱs7꿂O~-qg{ͨx;Hr>Җn8>3d&0uU(k05Ć_Jv=4Vћ^Y )v B85XR$fI\2CA~L8W*js߯:MG@q0hi N3У)k]"K'\CXA`,iosg=nJJRQ2W@?%N{ϤR)uNx>ΊGGMXno3Q.π @*mfc.0xӹֳrg @Mo$±J7 Ì,XFI5W,[sLIg8RJ>a<Br -`"+S-.> P4Y*,_Hd3YLaalĆ/B~7Ag .xjGTPbKpM8/ Ϛ7Cj\d:O-֣9 v4#>(J=' yCIMa;YW.g9L*$ rM"ṩ (JVu"*fvo+Y>.MӴ[r8Q $ ykS`"zmJv@eOf{E_5{߭/W~eԲSb]1J_5>Cg}]|wޏ~vu|QM֏{'74ΐh1weGF\D~hxRu3Ɓ;ڮ:X~Ph a3M8 4ܪYA{:G&dbgޘl#5<FyllEacl8G9P[{vH-d,Sv%w6>O(v~5th('SϾw>nf={;grG߹艣? /0/nƓ8LiF&1 #G*p_t}"z>zCWbCX {yۆN+I U8Plviơ8=Y, tfqV{Ddb͇NެqsFL +0bAQEM0ɯ8:Cϖ7h#Og2ENz#bA%8\:eg@NC}IL5Y"0s=9Z*ʶ+qn߉V(79sfQʇUp[{^RB^ɟFd=r*&7Ht^2o05]!ëCskԒ#||Ʌz&h90-rB}yhw/o 'Ƨ ~ #8R[?#H Z؂+a SуQτxe* .J-x9, ,(}V皽RGg8=:T! ܃=Z,7r#[䙊|]Ӵ"SüEN5jRb;ṝ=L3aMȁf*4utY[)Ns!j_}y0twϫ8թpck%|Є.s'L\!Սej|O'BA-/<29eSȇVigH;RՔRtL%XLki^HM7MŖ&+8VTWWmW,I9_LQ4䮫ɇ ix<9WeU|ge%` IY' tkJb.ay?]m>~[3i%ngIzG-5etns-Z2N5}c9 7,;|0Nn}M"clZɿ`CFeyYV9[%0i9ՎH5uCUd-z@Z+ NV 4#)<ʉ-X0l:Vm)'RKg(:YfCt3[KLZ9z#uIF3ො :FG[Cv5c3֝ lQk9+lr6k4e0x| YE[;z2PgVt1l IrQZ֝2b 6W9kirE4υ*D/^Nd5CWur;#Gs_)\"Z.to7bЦ&[9ˈ-:SR.ve'\&P^z䪑L j&КIhaɤ cs-g(G'E6]aM[.)WEf++!(*#7s^jih{ _]ij$JxI{TbqcL 7ݧ s(W!nhF< ifwl(=2˫ ˽a.|텟,:,sƸrxsm_e~,폢DЉ)*ڼ雳;;oJ15n>%\U)=TB DzY(F2 ' (L> d)EBRU$`hyI!p^0u)I Wj<ӮY/je O7Fwc>4nRizE5D5_Բy8EQDe9휓r,R $_rTKdʓ/D鎙#FqH՞pY::О\F.6(NNuJ??*u{7wu:~##vc7^׆ҴES{˦xǀr;vۦ}im ^=) J\:'~{ޟ{P }'ȧO>zMr4];r-;<BoyWI ;kN]>VNo XQ+. r{Ǿ|ɮlSϮ?wjm~<\FnIxϜ;'z7>>j  z0mSעFk?~o#^~OWG$@ݾ'FhGVU~p~rWm֘eҿ_]s6WǯMuw]S5L~$ߘ1EcR8yFMX::&qbٛTKڳH;Mz :AgޕQdk 勨x!'OCtw4wncKope5Izu_"ϟd|jT6S">=dT$ OU,(n%VeI o^p~y$dz;S;_dݭ7[6yU(bqeȊ%V)Xaf [6W>4 l<:f!)ёDyk/3hמNl *J2q9c˳NPąjJx9`ΑvgT>NsBvє2GӉݬrsٸ5İDK'o C-zQ::|e&Ln}`DBˋ2lz sMѦ E%&̔6d (ORDE;,-W V@06 =C4ÓƢ7pܫ&)3 O ~+F?/!sBK+i;6̴/T B6yEB/olgNUDq:41.}3d1ynAnʩ?k\pCI@%0F9{vR8z&qhW#PNk鱆RqI5&FTB`}+XhgtY7[(gѦ֫^e!Ɇ勼iž Fo"/!ו?}@@.F-Ӧn/Fuإ,aYP[ 1jq T5+Tmѣ*};B~ll^( ө( A)rύZpS<c:g]/YJHQNHpMũ)h3*Ҵf}cvu8rb3prga㌤jaR ^DQ٩޷%S"4t(ZA@lMcKm˪VeuoEXM:0LRu0t'AٌLE! 6K=n7cԐU5B7ӆAz#m_HɴEe 2Rr<-8g{% E57?n][xCFb@؟䏒[}SX fC[C>c7{wjo~pC8[3KSt'^* |m6 }M,d) 1;"]v[@7$||,"r2SxŒ9=2ڎ#~gQ]_wr#w2/6]/3|uW]](Yݢs9i`0+#ҫYm`XFi4D gfvOQqRvbwW&^!;GKG7|jY-$^AUuJA{yZ1 ţsv6GgADgZ8me`ˍb, 2^+;w)DRqֆ62&?h0/.ȸmYuz3ɚ|I懄>4q_~Ci.j8y[ʅC7a>FF$uu0 l矌]]1SN焜fC^zF9lr :ujAB'?\+PAn`-/z|`s)\S#10 A9l}8;?]$Seih<DptI)7Gښ6yuX(" x\HAVcg e[X($D<2kf`_rF%&0P+¦/`KqEV \ɞ5C[qcko`׳ 1u}uש؎{_bvq^x)~sϷmSW 96*Wy "8yu5t ץ"v~{Ovz7,?/II0NVO֟lS?E.{oaƜ5_-~Ӂ: 6[^өBz,Ya>N{)|uOшBH JoNgr;l_3 @wwJmf.жn[yeK[zsUWW#t>IK?Ge+Š=Tt+~~WmGPG zք{$D Ku ~p"yK<8dߧ^S^ix.bĐ t;kj?\HrX^'Q~}?#)"}'^%JT_,3ޝ <$SN]Z=iIkThr;>6r/+;חVdԥn]m]2i%i4 .2ذGyn).OO%X@@D@2'bc _.+6ڠ× -]g"!5EwE# W c/} 1_sԄ^̀/Knz\a >}"mo.2Z{! hBMw-w(/k\VKS+fuIu1TUGQ3Jձ'1[`QIU0~,[8MDDDsLLiQϻ5ڴ\gY~RT~L`j#nkVPF|]/7e黷ٕ#[wJGZq'74dk" "y 7TeZ^ D2rrB!?\96s09qTd'%V#K@弼^7!vwSDއ)~TXU}$Gߒy:;ȿ It0)u7 . j@@h@f3R]uk0ey_MV_/ȤnwSvj@G/%P #(G)vu(GUĮ1ݮkê6ޚ""V34xzhz޲15^,}Bxm.Ja[=ߝaDI`;x|z3\2@;&j &6D㱍LͤmwZ D\v99|",4P[M`.Bۇݷvnt橶'l^X?C___OW4=<4\ƪi#LlqėO槻cAEr)*96g^o_QUK<+ 4<:- FMK|O·kƺUh|S! H~̿B[]ՅFj|VJK?Sx#y{uzپO+NZ˟^s&09mM}uWwFU4+'g HB>}K\PU. GW^iJKmWY$7RcP0l 'x_W*7[D{ks6 5I𵵵 gQu?meggXFJuג@9i)h7{u2^鬗3{+8 ,_)9ZQi6`k I['Yom9T_{R\MsDҕxkIT9/#ut\w cK?MqR=[rLwpu:5}|j IS˜yc頦%5AX,9b F `ј9]v~Wz}~_;QPߏ9iJZmο,l^,MghkZ) qnq|Te@82Ikܐ"kKWsdm3y"2yI:hC\l@lp%2@et~ZU}f!D7i19Ӹ i66GڕMx$I-=kɔ"|EvHXXz  L&\bs1Oҏ(lo$O/HC9>jC>C/J}2}_y/@0?)O{ f!$Dqk<[Q "g4` 7_-pH ~fP, /R Kȁ'ϲof5TST!EUS% PF GM @rNJxM T PAB1B y 4I(Z9$11DKJ\cAT--դ4 huEFDP% ̪lekJh(Ehik@h!J'EPjlCAš J)Қ(B Bh)Z$N#JM'hJiVi(QU:2*< - C@P14%4@RQE E"tZttSTB @ģAtH 4 4^mN44P,8E4Г44 !@  H*/K7OgWNSJlJ UJDnGsH&h E cEUAzwNS& DD88:9cpTS^.n/?B w"E|G'u/e?(zFSG??}@~=s@)쎹Wʢ=1@\)n;cP")(PuGGGG9Yk>hhGȀH0 Ǐ %NR")", JeJzRJ/pR0ZkA\ǗTAp˽)̄"q8PS}? 3 a?r 9G?] X y<ǧ}w'vB*S"Ol=V F=J0"&&LNu0ǀDǕ :>:""@C߾@L/} {Yq"us/??G͎6"r # bbbcἈ(!}tǀq=" ՝=?ZṼoT/ۧPN{!>=P<ǃtyo(HS;z<]IGx<{@XJd2vЈwiXy{J/߃|o{W]gO/'%҇~/u컾n`<\HXʽ[- o;w}wVC޻2!{.ǧ.n@Xt\3O}SUCr5py<1?z1l\˙s;f&&%+N]MR"^mLNš4;(CPr=}sҏ~wD1@I>"L鏥 Oi&D!9Zg̍`b *S7u~<KC`dQ1E2Arhh"1p ^.Vf<a/. M?/ }y|L'ZH@LH%b IbYx@"f`msyGsv÷qϪ:s3Yxy|Qy/xq*KZk; UpR;"ȶ* 2 5G>&(ux~$A~c1OZ@S7=z-xZ<7@9=OS>N_|>KQ|@1_N uì(( >'DM#Ts» xD,V@zpyӘa^ɠ_<>Z\q9~C`(y:`V  (J$JRV`%"PD ǩf). cS,O7ݔH "15u3"O[_*D*U /}Ȯ/{ըiDxx|:0i{Ph@ nqL8BqD!:4e H̙iHxF#dDpV<"Po |Iz\Tw=:T3nb 2mY c>c~[]T"@ǫCtss D1<`08k=ofx'N7!ҿw B"#{Z^刘 ͹bQw R*±.}P_?[F0hx@LOY 9^RX -|i Hd5ELNmb&"` 3mb+G|~G p8 \cِ3O_2Bݨ篠0&mciO{7j km" mVb 0g`\^D$Co{U0S!2˯)R@am>odDR~mb!k Z=?PUAJxw? $sw F_g0&WҼ@9q$(T{Ɂ0$X~{HHJ<𛄾DL*6=F `6KWl{XV"}1#.鐘 o4HDXXJlD1glRgք(LLKcLL R{8јbs))P )˔BreZ7}aak`uq G1N "=ܾD r[x.~DR,WY&moV!_ ս@n~NꈂCĄ@Fg)$#< BpIF$?G Rs`ր=3`.#QRAz5zT!BF  uL'^ٟP^7΀EB,Ƶ~V הeϧr3mi1r35)syZԾ4 AxB-xkm~_T}?.L2 @H c.bT}&1MIJJ KVz9|ߋ#1tqso,y4oG.p!NLuminescence/data/ExampleData.TR_OSL.rda0000644000176200001440000001243013456140362017517 0ustar liggesusers7zXZi"6!X])TW"nRʟA,ʿBJƼy $ 7ZUH2"F4$^HK#דGAa]\DGF}3}bΏ`y >+x5@8&#oMMs7Bk{@WomXע*zl$ ԢR+CBfIp'VGj4Т] b"Ѿ+`xJ ophȅ~;#`%XK4j"<ꊈY"u$.ZA,$U'c0~Rv!!a'瑒W93_9l]̡ 7g~ $&Tau Ɏe`d,i`W@V̠:|Fv {#4t4qm?"GSsV"A3 fo,:!krTaOeI Y&֦LpM4VY}%ɬ3,k9OfE =J֥񴕵=8otHb^`%xV/;o P̶F/ T?(`XYYؖk}X\X{>hƐ7-EF;_xAx/Ak@ ۺ6Y#b4'PU yX#)-І#yX\OA~ޤ#^0Wf,HiuIC_U/$i z^H6i?)tVs- t0tMV!g𽇤E=q /Az)B@x wxDҋ>68+%%@*.k/>:͓)5 +޾"|r(v^yO%6!Qÿ3#veZ9ׂJ^~=_]e{#W 5,R$G;6Ύ@`ݭ[%sAj?Od8*'4uDI϶9aܧ@i}i: '>m4qWgMG#%!@*Yx'Vǰ.#NKrjG?ARS4wNM='_mRŒC*!Xroy=#1˷$g^dzq jÌ ʗP7Yڷct&2ڣ{2 L{ކo#o-8f3O܌5 M0m>?dȤ X6s,mFކLObAb0". '[0_4/H.9ߨ=Q;n[ )c*8*Z;SG-ʼn.[zRsl䴦ͪ|k]"!rU~dkZ@QMs%xcuL6s: Q%^T޹D r`|n߶}'Y]'6M07̑^IvKodٷL ֖. 1Nu4"J& gF-xTU<{]'g,_b f'E8pa}&sl.72 swk5 pT(RͰK9‹O'J αf շmX/_68tI?7\O"{|mPKV@gl#΀dOE6dZрΦU/JO:풂ׁDZUS=1LS:5IpID֨ 0(Jԁ$/x~:`1B }g.!֨> {c*n6+KGstvYi P̴X G 3p.~.| ,I2)28^q`RɊV{y5r!JkByC\6?GeZWL߹P~#p%V8D\&5.h?iK:n1/q %H=A284LK:|.@% a .wjٷ!K΂~07Nu?:EN0uNl"eR%y{!x8x9?o.f&S.:+QqRue؈ 3=_hHIJvh2 "w{RځpzGW/E5=xdtkT2@ =7I(5vV.Qrp>gԽ C}G4JدѨZ=`V';8WL_e,MX]T̴oj#˱Hz eXoHkZE*mPjD@OD`"YO[A% 1`@S]7o6뫎֬DZx~*u:wU#]ި_6urqv4)_z@d6T~*VqhHG{y*d7y%=+ ԾnPsO i|nzs <~ ]X6"7|QK|JV$rN:kqYtHC (@'C4W#4z2G9m[ ^;7Z0dƤO]?vϩq~ zkP(MYcP}D5ݕc8.wwd~kw>*|z7-| Vŋ@KuQBWeJR>ȼ3Em "q5f]T^JhO3BGFGm9?A#,J+EXr1V jxbf\L<Y ѭ\|je 3QnL ⪮-Ō6_u,H/>gUwE&w. ]8[힘/08^qUMڒ1E[nn]Ÿ5NL0h)9rQ{;*+t"AO9XdUxx5VuQW1qֈfɽbcON7 [t}( Ɇ wDȇХEHCCW2K$$pA82q)Qx cyi¶ Q~%B-<69O+?xp\ϐ~ڇ^ktEU 3Wn_5! M碁|JƞAX"8w:}6vrgTq?\pj?zV$]AZr»Q.k} iLHDNCŰ5K M`b/MȌשQUcԾ巈/':,k uQUFF!6QS΃l0 9|zy(C1Z5{@XnnIK#t>oItc/1Z{-+uh46J b/4xGY[мlJnXVa$0+^j 7WMjaU k@yAiˉ i uXYS6bA?Cy4BX%|bk%Jx;wG:9h>Iw~%Xt`G-0j}6 gS&ƔӠ<\|+Ajx޹f}F[*[QC`Ӳrn\DH>E0#ϲ%wɯ`j*N{KQѲ?ݏDbH?rv¢3$UJ -^#@}KBHHb]e@. d԰{15zryAby4Qrd&-bk [NJgn;>QHaO*k *xZű˧1)i'SNVF|rFON\!38ዓh@\G!ިȃh-eõV:*$?J JPݼ: B $Nz -57FOp3 ѭ@c9M,(1vW(䤁7 9CS)؞֗j@O8JN|b6OL\5t?_YR2Bb5n)2<+V#¤BKpPNnâޭ;Zgx:G&&bLܸ9bPL۾;V"l=2O?mJU} jS V_Wn4%1boʛUohe(4?|m@k }!(<]ZBQFHND]fmB$T0)>0 YZLuminescence/data/ExampleData.LxTxData.rda0000644000176200001440000000045513240265317020151 0ustar liggesusers r0b```b`f@& `d`a>!.% I bv0`<r/00p~APdOV/By{8X$&:ylHr4>}e'"[oiy~&]NIK?w~ XX~;m\p$KWAQ 6% d` f`0b74%`OBY\SalP@@\ kQQ~L6$(fdrNb1H W 0,Ҋ),/׃| @3 uLuminescence/data/ExampleData.Fading.rda0000644000176200001440000000533313240265317017650 0ustar liggesusersYgTlHQ,Fi+ذnD$6lAP(zΪ[b% ewأp6Ͻܣ?7yyց BaYħVRa}ԓk|1־ఈ [: Kr$C?'G7{vjh0Mk`-0rm,}W w(M$?-.q6`Y%B=YOjQбVskrZůԍ 1 xڠpu4t:>$ ^Q:?O0];`An7uf`69 1A _F~ )ۮ[ ۗPc(1.%c닗5OHYqtis[O;jo2;t(袝ucԗ}@i < '{ [7j'AjOA%12mj;7{;=e?HS/EhԷ&lovЧXLK ʴg˺ WĮ?Lϣqy<E_Kiߪ98 x6a0b*x&X)\A9}\/(@eܚQ#使j 8bȡD;hLuH Z kzL5Œc.=%YЬ3vɫ d s^ΑhB+S=4ښ2npAfB,V=@I0JB=oNБqr*W89|ByY OVPD}YM.UzyM},%bfAa;Ye®(<͸&(dC=2Aq]慊ȖG^bvHP-ΌsnU#HAU3j?]9o" غx?d+1_tj(7u V̼ h%Э-$>1;Ӽ]r3w2ΗgC?,Wgq@;ޯ4#tp}XIбkn;wd)3 K)̿LU^U>YpF EO%qC8~-AJ꺚"[fː#?rMXmY?N- +8`2#dRw7_@OP.??LIDyg2HȖloHrdذ.Ә<.]z/3J} 'l?Uҕ" cߨq2\n_qv(6$m;,y@δh{5&!mIO?Q׽+FΒM". H 3tgWv k7BHǁ382Zұ=4SUZt !}a`*).ⲐfȝJ50AK\n/c2Ft 29D*w D\.%T+ ~u42iȯ0Fi칚Dm<2ddz^=W]~hl$ H$=CW8cl%psJV@&@ o8 ?m6ǹ$WB&ݜYnip#Z;)< B '2+^2g]یn+ςvwރޣcOXm&"%oȎu'6r)5e۳@N̾"WRp{qyԧ Ԍ]a ,r˒s)uWRt@وWg%ȕ6gwr&FnbK}PL z69*7b@ɀ T͟ǁYHdȆO'P1;ȵ~gqiLy$^QJRp0zש1DMэ "8 p20y񣢠l~=^O*+/%[$(okX^j-Ffns-/26d"kL7%פU/=rU?+E x_~ȟY%Df`}U'*4XR:MV"r3G7jiQLw &oo'8S\NOATV,]Yx {Dꔿ/14 #GB>-zk0oFq걭H(s//B"ę=_!_8;]Ͼ߯7u-dIςwWڒ;  1/ 8DFv^pd + Ԓ47G"Q+Y 5b-[3ITbrL3S'GC ƎT@Sg2JUD5X~LZDNF'\+ہgivљ3 BDFyDG<1*W:wXѢ ovZ8C$D| y);j-#Լ:I'uYb<4PD|`7awO gwu"5lƬ <^|W~N1I%Đr_X)e ?lQ>l1vVt;jS|Bi e )T&Eb7:ok\Lsݚ:\A];w3| /fL .H46Pk3T+㗥S l7]uʿw)gfXD)h3:^gsXą:v Pж`IfCWX)>{?8iԨ:~T k[A݃ X繭UhMϑi4KiK"z1V CT[vq8 䂻-@reچfNgvG*`!Ȁbr,X \'E [i ӗSNr1 PIO*q(63+=W8ydHkԧLjCB  Hgvx8 YaZ;"NA'4h}co\t#EI]<\TQ2ek"~\ekSL}"fHi?uF=Z8ɾ w(*1 5ۻgx;Y8*Y 2 tMjq^bcB\4SxrFXF-V'1Z MtSdJ0 yC,pك3 [kd;PBiƆʂas *sq(n_ CJ)uyXO'.ɄJe!)z67т\p=7u'?.^~)(*KX[}DKJó^9? QH`~m>6_hFH`!΢gPl~][j.չAN3zeLx|K%sIr^GDzCڷ"O|T_T]]mMxcOq{[ȰUi*"rWzgVZi#tx=Kpfb,wmSe*w~#!}~f6q%5(W-v/" v'[=TM=H:(ywWpϞ k#6S#r »t&7%3ᦨ(xhozu6iR 9e`ɔ.q5X̤9 uF^Q@x,퀤#%,/ńsũU`$Ң(yz)zt){/v? p0&;n\0k'Cz52U(.D ~ޮ*avhEfWy1g*Fi-sβ O(ٖ.%Pj_!'n='݇Ŧz<S"/>+ x!y5ٗaM(uVII s%s9YدE1ٺe#;&k9Ljƺ\1e-+QEbZd?VN$r[`GqzlyQ ~EKfƃ@9EzQuI^c,(\ɴ~j)ñws䋇g-xgwm #]CnQODnj`bGӐSfxF(Rru 6=jr}wQq @?l@i4~~+ڂS-FQd+8T:&S65קSI1zr T`\2whbUK 0+ʨG%Ɋ׿( c{Quf JnHSK]j!/"`tu%H~bl4c.}$vaA}Ѩȃh;+15"10 YZLuminescence/data/ExampleData.DeValues.rda0000644000176200001440000000156013540751607020173 0ustar liggesusers7zXZi"6!X 3])TW"nRʟ + ,ʿBJy $ ?bz0z^gVh^aOy2.[:#HvGӕ7Rm emzK!8#ҵ~Ɯ.>5CyuR^S\+o8c=G_ɘ}a>FaztkWMu>tӺƐ"$aD3<[٢ӎ"\>0;Sn{  >Iiի.UcژK(6}-{[fs?I+FX>EOj%I0S:4z7 ה77LS _։ - }baեc ?0&5~.E6;+j3-$eL3KA ]-ar_qŸhX󷖄wKi8 3g{/PGu4n{bq) 'S=&ZU5l,{88KpUON~u! -$B Mi{rG2Z-͌=u䶱&{>ыZ_.v&eZw2y܉LcZpebsh*LFQEM ژEݍɆoG$TU|* z4Z6lUX~WZy=l$ˣ=m OkL2xۄ˚E4m3דү~d0ck }`{QhFH&D_@>0 YZLuminescence/data/ExampleData.FittingLM.rda0000644000176200001440000002413413240265317020315 0ustar liggesusers7zXZi"6!Xk(])TW"nRʟu,=!zBtiXP͗/0'&#k| X>: Ea҇)! Wq䤿@hN~ʕ5"1 MS8WxhZ7 |y֮6}3 XYF؛h11]#ls*M 6 |r@b8SY?~VS=V)x5T g9X`ug9 *)B;pЋ}#j]gUǢyH\"T'O9qhݫ~,_VfBbҾ 2}gkMsdCp !Qv~T+D~$|{<ʆ) ;vί[ x1@ڕ΀Th "H]4.#gh/ƝM2О^ekCjy:76de%_3DȢ`7,J]|AFKM-}- bUcSXis>!ki,MULb$1Oͱ=US _p ȻX\_д}T`K[۫KKbMz6ruf\*di|Ņ󟅝BkADCBY^_z oӜtߢh~| 7UsQgt>{mj>1'YB'q2f|L+mXkSu8x̝ 9>l6(kڑ.]` G֨#1h# OT4]iKGݾY<B%GQ)Ui7@n ䷴%lu|:{#FKd=vFPy!;qv{&RhbzRE}2,JزPNvhYـ?;x= gOAL'Nֺ0vħQBIw@W-3OMgPp/0U\왾P߫SbwQ%E~i &oɞz:]vyZ%|\~#{q;FˎJ3T3z-c4EҴEU$ojy8653NE]!jXHZ=TaN`Zk=0uiTJ-_͖0l kvSV#<;uFѰ)DsN#8_$H]  HͿ$wb&!O~{}WR+ՠ=b-aXwI,ia^5u&^-+M۞ t9 P W"|8G{ ۞X dT$|~cT*4 ~=pot[,xg荄{vx>O#=' 6EN l/]B^REpZpς<'mcB͉8Jя xgpBig5*낚DJ(^yD^@ Ubou.qXTpKO- 6~;METb 'BL 1)͋y'KT`b5 qӗuK~BOf"ƼY(!hכZՒWAMdA Noj2&4;b05x4xxcWٽS"}Lg ˒(`+\!-6۵8ӸMb5 .]ɵcHNt/#$9O;D C{F{ >g(0~FMkSelRУ>;)A`+RGFuzN \p>p "zo $3~i}Sߡ%=Є1н0#;QGJMAB,RN;Wo ub gD:^F>C3S?j>-O2[Vo * ng|P) Op) 9ޏCiHC7s/O얉yˋoW`P_qTԢ{{83oϲ JiÝ{Dspo g ]j$vJ3pe#RfW!mp<(dY[TN c\]霘Ɛޑ RqMRU:لo"ɝΖ++ |7@cKhP"Wb=P@N{A43-F.Ç&2yzsۘds`8NRG_̀90^dK{,V sΨ0 (фad$_rEyφAfa0u- +2^Ž|V&VFFi4}zx:mY2 ۥ&(:\( 9E+>JCJ붟:8g 듸ݧ?72ˀ}? =;a())0uʸGZUN(qlL|nb_ij.!s8{PQ.>MA$v}C3%}NQ[9WIy|61!;he^avQsD[ :L]m,~O<> |4YJߔQN@ *Ry#γAV\|HPt0+p2c$źO:%0`n<**rzBRM6~9|Vs6rE|rߤҪg8G=;r"|b8\ CLY ` XRW"Dg~9(n3]#d7p6E˱rX1zvzOgGbR,0o* @%;&5Ǚk]s>A|]u#zbqP p䞢9tՓ+8ꔩ8't2l]r>w[%u@uqrV7fC 1`%]ն[$_{ -rƜz^n9 \/tzW2BMM4#8N#BbrqJIa?TP(=>0vpb\~`H' yzK]+mzf { &>Q-Uy@g.C>׎Tɽ>M7jdpyq U8=OTL]8(=T K%R-)X cp o1''~PdNj6~ȞAm2*xj^e)*Gd !*KqOffbZ+R MjM3B8 K'thA(SI Ƶ[M5c3PEi>WiL?J6d3?:(~$4~$s娔K `)VawO8Iw R!y/_V&Z ָWݻ* 3PsEQZLl;-G Rn:J-_2 19=YM=,nKynp 6 O<䥌* nQNd6N9|?bB`[%ǤMP0_*?"Xo]~ӄ45uj)rIس\qOZ ";a;TuC8zkf4H[U#HѬ-9b}KV$DBxm HOPpOÙ;SaIՎD+3ɂp$47juR*IG %mVbTرj0Kv]<@\%l(&@O0'C.|}.X2衞eWRr!=%e:N;6}icc0P$=(֔tjXGC+Y!s%%V"Ui*ܣsTW\u:\cu% ӕoiۮaoaEY:|p&x7{A, 4sOEҹbS럕Cl-Mi4+Amxps"II eqOV4IJYe%~O=,t,ɳ]t Aq{s"o4Կw|ץ/;(Ω ̐Sq!Dxnu/pD/L]E09T?gXWB+~-:@Jc5&/C,=)qOZ!78+Cxu\6X ͦXaq[{ )&ڬxnsdg'z)eO~JN$i~'Mo2fIPG[S֯ ̊OAx`CWf $zry!z2T:9]6  &jq#yk*& Ԏz<q]:Z^8Unav$?Ƞ-E%d Fȟ rd" мZ1 : Y c]7;_[@ّ5( Vl6kW"iu8#I1R{\2lE7/|0f>Cƅn,qy4m=mL٤|9h{깡ϯlTPyfZIOC.qsWнk Ra =5\e1+] 7dE\aD R@lDtDl^>NG0K@9qVXsּ:,|f$Y6Z6DO6W(7yjBb~G+djAf[0 7,k"IsѺtIk_T4[]e_BR7M/oiO z)`)2)&EQt-Άx[X&`u %g$TEpSwr&{ otKHًZଥZXf'!82PG +2H5-QG p=YKnѶ7۔˹NppG(%TD}˕`|1c&+[Cfw*qvQJ$oRpH)A1鼭]ޙ Yw_{ܸs%٤QlӚȃA [&*f7L`t61F":߉7-lN faB)XPs!wMNm!ZkfYU(#xġҶ6)!/6Bj,,)kf( &823z/[G 5D?\~e)]'Q[Mz;q(1ͅ X%dD":Ly"e}Xj9LCt>m^T;9~PBuqRUW4IDJ!*1d~x&9, lMO~wk}3>oӭm#eй H/jYMHd(GVqԗ_EH(!#ݔcQf^зK6H8,X 8 zz.?D`1h}AD{Zjr.ֳ3a@wR!6ڇd3e ?DNIA&Sb2\4_nݴ B" 2}5ؽDLըItdJ=K$ ĮxHQ CҾØĂ^O M)[~ARJ^ƺ*?@~m:kutP2,Amf7Oe,ORI0&l鳍 > `K(::)c\!P&m?R4-*}tQU S٨K;B20NDKb t1sEQL f)V-\ͯ=0; #͘O|7_χ0|ȫs\B`)0A0T>6j Ҹ/qI{`a!)] ~@ E͓Ϟ $ _QRLV ik#`z!̍E=B숬[F)Q (gJ+C4wP*=\IzdfH?C0+jTA Cg|^|Sza }rd޶GMY~)&zPt,g3>_?d2Cb\sF>T2acRI'UTVs=?HGTu~d>]'bQ1eb6"Ma3DUc-ۭɩd 9JHO+03"}K~אYR,G0aH#OIi?KG/ ,&̥wG "[ ^mz$)px =C(=0` 5*@}m;b gWF_el Ru(*SrQLP|#GY>I Q Q4LN ]ʱ8!uW͞8|mt'ԷY9u`/ToAj[%^C97`{0 YZLuminescence/data/ExampleData.RLum.Analysis.rda0000644000176200001440000001543413240265317021124 0ustar liggesuserszyXw22d( > *K<w$1L<#2(yD [ks~y뼿z{w|-de%$$$%K1$~$1 s{mEQ+F00,"4*+4?# dSd C|o%F;+:"H^fF8x ոv2q d=.Х|aA(9~lW.c 1C wxg2`AB kuMB#Bfaϙj.Ckk'-O `oa<`rc+NY`Yar^k7fh?U61_L*՘xHy,ߡ=js^F܀PAXh&i ogcQrWatF<y]hW`ArRZ{J ,)jٌsZ˵ciiX!;jX73ww vQ{[nQqK) X3|F?a9Gq,=a-gzDءp V+H%aaŅXoQukI6C W.aW.IcePڧXy^\a Ny^kjq°cpUNcؕt̋U<'Cc{εCvs} N|9~.*wBRz/U V^tk…V`]jΖmX] +[Pk귒iQpJSrnRW(7Uf$íh p2mRi|T31k5iM[5k?kQץgva킙=X[,ky]/ ;!knC.f=ǂ/ #*xms>֬zrXے4xZg"?i|&w`xӳl_lTB,?Ǝ%5S40/V$wjT"@9L#aȌ2bx,h$.jzXWtSdS:І@ W8!Pՙ4]>KLCE^~/_Q"PGS'Wqn:;^ZQ A=z:pĈ#x F6eU#`eY#li= y^}!.#d*zl拐a ,r51Bg5v~P8^W*rvb^1לEqS\;XS5a  lq+6g kLOexZ,ke IV]AdlY#G0i*GC\ڥn{*POە|I3G#\(e";Zev" 3Ũ6ŸyD X(p"fgѿ.`DwUMJX/Y)/FDey( q}3"jW'LBf#;͈372Ԟ)"-|i"5y^"~ "EV "hWC8#e6`RiҮc?^ˣh+OA9I9+BܣҤ#@Š S F[H7!Vc}2'[聪wb_ߧ:cF,E|Ci2 z AYeDK$Lzp >zX ˵W(EoOgf$M OC¶:4W J݊9v\QNzXw8<|,u$j]^q b^@SҌGH `iل H,ywF$in׍ٓU6H|n $ITT#i)kx+{_|_u%UȘk\gHZ>$^\ I'5/Ͻ]sU"#6RHnl1d@1vV$\dkR okd8ɩkԍ 9[GɻIɭDdy OאI$]R^:W)rk޸!EuC..Ftq>ߐ09;kBʁZ >ˑrVSRwp)/+N@D}HSppR*ݞTˀeNQHu+ZUj]ʭ7d n~^o|7]+R4/} SZ`}uwYN o͑,Sm[ >aP̤M׬[n6"}ۃ ҽ2렊g&uHR<݆t҇]}RUxEJm(G-^OiX+H?I|,W-ŃDzkUgvwg?9nZR`˪2 pW]@k:k6 fw =IZl}ebkv*;ĺ #^p>1W;A`go>#Al%U`V\.죎_]qC4,ؗ#f}~H؏XEހf`ܲe8R[;"tJ>8j;t a(; ^Μk[$5cNq8#vMIxUWy]pBJ>8;11lxpRc|38Y붗#2p.8;(= Ͽ޾$Ws]P L\́jZӄ(Ԟ>$ ';Ps习((;@A%/l Gn(H tQI-8BR犂t?LyGU`MpDpU˗E#-?=yŸM21דz] 8dw0ZD~ޱ~gF ~iA3~,{P㑟E~6 _Eg+7yژNB =J[!%~ ĞWZOko@}y/Kλ+;A rxtA3/@xR>]:S=<#bw6JJ]6?bDȌvS?)~$orݹy$?&PCfv okŸ{k37JDn;ϐ[KsoZw)r+,l"q ~ȽoJ#Da/$EXJ?ʳtmC?w30k(vJOL:wRz亾}\-IAMdJJfyxd笗 oUH"87Nq')p/EV6K0ODOxX|9" ҿ뉝Oļz+GQ~=/TeBZ|ص0}w܃8S='2 6%_fQyT]S;'?P<+n}x#MeHcHS^iYEmt^m}WtZvAt䳈>+ s%EDeSUD)$N"鏢Ȁ(ʡ]/JE-_Q '&L\ю܃3e $y$Eϫ0pD9+T?HKSzڧDv$" Ƒډ%"O{D+b0J7c:#Gn8TA_X\@dN7tXyAjL:.B'r@$O"A*l;])Nj+a9y)R;{W?iF7߾egG_!gezޢkz LniPH)t&u)d~&z0ԓЂԠR߅Tn!Ɛ|Ja?jaj ttCUPPAżA BGV?zg? ]EmWB7ΡB |\j"U; T:A?M]sx~$C퐦 R;tI H?_ ʽ,ߢJ%_ SM>CHϛHi&9) <.<'[%@PSɌQyDÆTB*!(#~$S]y&]o#Htԋ0뱠iwryTOr o |H PTʿ䳀?A)xQ?Rx 0~ē||R?O0ğC-(>UJ7%u P"FgO =tjTǔN>AOMNqNK`´:'|wQ=ǁ ]o'qΰ>o jg#?_ψW>7Կ^y4=ӹ&S<,ܘc#d{JZ}h^'{KSyD/^>z t 'xߑ*5{AMzu<+L# U6c.ǿ0H4\ Ccs6dJI\xx'_QڣEm;/G7w%'E;9{ !^3sn`RT_e_d~@_Q+*/_I7LCY﯈zE@OoάY&&Sq% O ]-h(c3E EH%Luminescence/data/BaseDataSet.CosmicDoseRate.rda0000644000176200001440000000154513240265315021256 0ustar liggesusersT}lKQnƖd&'l,y+F Y6ijA|DDcb2Yjel뺱 ALzs+^,q~{t:aT:FŮF%0&NQż}ÚGsbao"}dXaYsOؿ*8XV@Tjg oJPu]:ڤwuȯ'3\ЀSH&\Od'}mf#rB iЁH*"% > M1)z}aHp˟C8vwRoxhߛiK_T2})! M@H\s ۅ[ 5ЖnY#`ϨCU H@p6xޅZ @c e%c3fXWVf&N ?b?Iq_*tl?2Xbx#vD<8h4(4ceݼ`Uz0y@u^Y[tǹ[YiD00mYpLP(lXZJ/96E#GʚV|Bo#z<5̩L~m""wJ|kG,9/G(ҧi+/Ք=h4Ef#3 fcF4xa4#OFdS"C̤GF&9\n=uMVg7 U_vuMnjh9#j*h8O/V]/Vs %*j[٦oއ39!np; y )TY^£8ƺb_\NTBR hYvXE;ٴxy|aIb~z^Luminescence/data/ExampleData.ScaleGammaDose.rda0000644000176200001440000000110613417222471021257 0ustar liggesusersBZh91AY&SY*Qa>įo0C)lclJFILM0b1h4i 10 L&~PPi4 O@hhFhFm @4  4@4CiPgB6  #ڀ( SkPeEE7TSsJ<=r9ڕy ?^ ?Se0 }IPDI m$/[o!Â6c]H iг`J04p @SvI%LU nB"c 0fTDC>&"șF&o,tQlrNK!!Iq w#[Y%x3`;Ew U1!K9YAw HrMtm}+|Ŷ-I%A?AMؽ%QkL#玛|QՔ2DVF&(ߥ§ TJɹ#Q;9S"?OiXKaH"#.p U)Luminescence/data/ExampleData.LxTxOSLData.rda0000644000176200001440000000147613240265317020533 0ustar liggesusersKhAI$5O4}i ZŚ( EHh[UJ=͋I=8ZP¾TFSj"%`HxDžOHGz_?39ǽ4K+ӟ?K+WB/D/e/2zeUЫWErzjҫWG z @㍜D^fz"""ZЋҋҋkJ^6zuA^'.z]uKK?A?$$$$$=MϦgӳ9z=c<(KL.rn)G P]{D ΡN0sJ,>3 Pj}'&nN( N Ϡց)m`6. 0ҜG3fP ^'XUAg-(ny*Z˂8mx`<36<nH:'L&::!#X+OL/uLY6{B~n_Ŭ]01k4j1n6\< Ϊ^In0byV0fΡvKM(Ud̸̙z&o\u};5ޏ~0f?Փsެ1@e'y Luminescence/data/BaseDataSet.ConversionFactors.rda0000644000176200001440000000125014062436223022053 0ustar liggesusers7zXZi"6!X /j])ThnRʠ3$n/XsZTjBrꗝl,!}!3%^Ő3e4({4-XViGOVK̼DmIq#G;GA(G%bPGuxVH|(ݮ¬IhQ eR7)p0 ['U;}Йw͆io 2׃|Ȋ/|cxU?)7QB@llݜ5JnmxB.6En0pR-! 67:-|8i}T6΃ ByH^>0 YZLuminescence/data/ExampleData.XSYG.rda0000644000176200001440000016120013240265320017240 0ustar liggesusers7zXZi"6!XC])TW"nRʟu,=!zKnbry}aM8&İ֤s0ɣHEr]! +}/e dmL_Ƅvi(\>p@D4wAS e,|@Ĺ!hQpDC[1!BgJh"CRRd0mNh(d٠#8Z&zdu{aCOo.Qgƛybm1өB&_|L{dۛ*-\Ӕ=:uͮ_2zEZ6V#8虨MzK2Q?TTG"D(b'drooFA"n {GjhG>=qH hm ?(tTy5]ڣ 3' ej>:$m nܻ =榠i ya/,8:_S[xts휉%oi,IH#jQ1,C H ,MmL/8 h%2JxV`ǏyjKa}1= g ;:j%'>Qt"3zO{`sO[uI΋{/*f#X7͆ʿѕ8Fy[ c( 7\d0[U?-o1*.VQS-%ޯ+dwc$zBp&Q- jLYU!5Rֵ٠N uXI'Ay~Lf[&, L(xʯTj#[6g]Us@b s]Q=lnvd%42/_`vxTbNH*ExH Dzm׫,$yh2Zoc|w",Egy{Aq9jbQ\O)QNhE-` ]OҖ>ǑHdᬀ.jx/l2o Wv1_sDdCJŦ"2?xg,OAbELZ;5(oB7W Uj_ndՙ59sLSKT\>L ']<%і d>c35Yӯb_H6 BYi½i |ST+!R`O3ȭbXW$RrN%<ņZҽ@)3EA(Fo[$Q-8l_0Ԁa'qfXWItpr!4- T)ܸZR0ReGB"!O~d]n=R݄'v',2 \`L r\\QMzj#))HoM~^n_dA壘#% ?wj R i`ٵ3-%a{*dB\8P |ƨp!e|an a8Zل [GZ lCv!"no(76˗>W! |^(ZQP Mpz* WBGuc 5w~ Tg. +Lm6a# m./xɥYe@C mJģ/O(D2BٹG68EW#"MystwI@ -E md 3|\楧ʱ_ev{zᚮ>pp![ cDcPbgQEj]$!:$oQ'1hSH;۷?&_2%CpB|T֩>ס9-ѐRE[pUB;nW_?t.EZاaC_c "X оHbTy{VNrx,qِ]Djҿ8IˢI5(C29geXuXpmV^yr02<# jy慁;?))$s6B~*ɉcxWp)Le6( 7́EEke;k ԕc/NkǦL"Fѧ:*Bv##DWK8~.ײA 5Oe,̮odIw/h!L gEWOn6b  E(/㩀TYJ8vx #.P9c@Gg>BtY6EU:zl Y:Dι*+ڥ* ƳEm5M=aw[aոhZo8A̜+O/&klM "w5T3c=,s,Xtj:+ E_1mh8O-)K&v\hKX؏AM&(](EKd ?6s_}HU/hL[wl<#t_v4d"3pB0.%2ȍ ROVa_sy^I&0Y+][r8ڮCP sS]㘟DQ1ҤTرO=quUYd#)dR-K ֱ(G_ZOZg@Z4vO}\6##inR3ΟK4vx?+80wHOv"ySkjum]XvU1~5׆=u7J$ d_CH.~D^]v?PA-fkREX<+b|uq?@w{_QThLޏ |]D&q5 tRAHՅPcm!N0)7m)d:*dbŷ=^C$=)0ajb]l0׳d~&,Щ<{ zq@vE'@9!~ 3~Ep1<%p#@\ƀPΑg"B0X$uvz)c22eK&)G'廖Krn/X_OfkfD/6DfPivRN5#UlӨcjۊ* WFq*zQ )NPWfab*[E۷$XGZ7NJ?}+ÿǜ{[,Wt]a7S#ܫfo(ٹMx44eq +xd/(+V]_@?P>*|xgH1Faew>9ߦηʗ*_`FD !{ď.v9#oUq֕>/=~NZvx0_W.?ȋhP}ui 1<#3ɣx+-ƿRAh=3b4>zY"WK"K\F:N#J"~SW[ԹV.a$<ɆF/g1 E|,iMf*[A%}c?`L+c@).m DKɼu7wTF(z -jCN!AE9kYRӝ鈋;;)[u[j^PS.~ l[9'bGu*YRV&^"Wqs@ҳZtEU)|)/}En^1<5ޱo?EѹȘQq|Pq?֛i)l):n.a8vQzk_(>N 峽XrIVT'18Q;_n%<#jGYt @;cq(J9#iR!k' HB7a B.5`Ѐg.9}1[y?= F7I0pFsXT!eai*Wbؓ;}kdI`nop ȲY6{q_8^y/1(9W\;X/6@Ӎ%a˅ڈ7jrz=&V)*9&#_.hst@8b +e3*MCP@V/ҝyAiĢ^!.7is7iRrw:z&aS/qby;/'ms-Z/O3S·Gq;%8%t @x"[3V<_:dQGIeϐޙIWs$jP)jziޯͧHGeks a8aƠg{bMO8e]")؎DiYs#BqV$,(I#QX"$|R_DJYψ|OӒD,wRO4σFS㚧A_[ͣYՉh}Sot4 bUBU+z4HqG1qfjm_X&2U[rPFx*/ :X*)7 2sSõ͒zR$aZJ|=;~Tz5K "%;PXGQO 3FL=0m׻㡽}.r3"dJ ^LS^t=5O^^lPXDyS)ԑÛӂ9*O2@HeqTG2I2IIgԨ(t]-L+'A6 wyX@d%%vH{x P|HxK 5ǂ0و˽_woB3W.~8gY՜+$޿wv^̤ٞ$9'!;pͥQNs<I&M/6:O--'q+c/Ӓm ObԤ؜w1+(w' -ϓYmz܂C8Qb?zQ?m+o܅#=|.+ ܧw.(-pNވ@!P͸rD'إG_8,0 |řY iKSnn Ig@ 1:CeBe3@}^LEcEȟ3Y!_@- JZN̑|J'L7;:JO@-6HAP@_̸y~%4KX˳Vj݁3~[ޘ~fhiĝR5 Mt_717Yj)-%ZZAHOKr4uZ [֕>&S?.o^L>y&?*9bEx~øhӉ., ~'`xă\wFq OjHAq.Lj9saK{A"L=x';jM ;莳D9n[d6rRtǨ`9TE?~>֬tL}x;ˎۑ^,0[IԧeBZ xH'&ʛx矂yevmnc~ Bu%*hR_ X< Ϳ+dp ܤ(\Xk`g2rk+A$k b^SJ.ƐlcDs']FZo-^yr!?V_`7|='Hz^1UoQ`/5, ,&KU2+yx vIW!Y@pR `7^kc\vw;*< )1(QcOt>HB 02Pr54`o|/*QxϡnQ9 rX.g b}fhP,͞(]8I.4s磦2"_3L !v؊tvnNB1(:*tD6KxpWlVTY4]$vQŌ arMOO Ԇ:7En7v^cT_SstaW)$TfB]t 6uT) pE.OSdlCRl,X̞&+rtSոj_` =E*s lѯxOB-si8$m՛FC@B>A9"|+ T$o`ĸQ* q~<M$P|ZvZo A旚rE:WGO%Rڤe?>n<~LͨD/e- ^Ӄ?ny|yO&`|Z.!vU;T})`<;,iUDڅwn< s80676&f7&/6Mh ,G rSX2CkAB|!&@R,G9e k$uCx@ZeP.[Bx)8穣pW ekd*a`H&(ndfUǥ|8"LXxcQJt@&̈́xMD 2ސg>;P%J@`H7F WuyT ǀ1PE%m%Y]ƸtY >1g4oK,މOG$5F.A[h[H hUq GX QbK{5!M[7kܼm(,Cįx9,7FZ麠ft\ADRt+'kǤcefDzAq"h,j8^s¾F#B3qh ڤz)4ڀ:pPG\UEZհF72nomK|Lqp\i`Xõ^0ښU/C.fIf3"i-\H7}I8u#hF|op=KnT9M-3-~8.j<1]AXyЩTB:i_ݻ*B'Z3pl iw'd)U?ʫIpCX(iz[KhȀw@Zj)r$&:DrxwMHKUY=>=ezĆ=\ȯG #fOht!H 7?EPрރi,XP\3IݓtQlZlfn@َ%%קm֋{})I tAzoni~)=>"YhJюகҤ mwGpf%~!Ýs5Jj8{=X-M)~@Ĥ|g<Ӳ _k≚ pQF?F똙0([bJVW/ |] ~//ldHxTp\)}kKC=:Hqȟ{+Clgşh97g@P xdW.c[]?Pʨ/?&a2OWF83E\Py`;BME?j^;D6u˱Ò?n-m_oq_yx EӸ>E%hq?2s;VS;ȫ.t=a[)D@ƥZA3Vse հ\)^,4t ejY( otq$vm_)hZU F~IIT&lQ+cJϪW16^cg}Xe ZfLAT`EaQ[+mJl Ng QV8Xw =7K$&?6,gqUX I)"_![k@-Sx'e;+.ɇ-*z@ c+a>J .usbխWJ±r5G9|#I}auQ#)5c}U^}Gµ?蚓Uk'naعiXӠ/ V'_H>-/);J';T1ZϽͩ #su]Xޜ!K}U I_pLJ# \*UFGG073ڊB;"^O|k趨%.%uDu:Fko*P%blvdaѺ@ʿy ~~o. V103ەY@a<ʼnGG*>э`l* %9U^RF*1Z6TyOީĵAdLnڣ#͋^W%U6}&ŏ=%^BF:Ox;/GVE$sb.p{8)JI;וk3FHXM5pB# ndFG%h5˪3OM?1Xz k5eM}ɐ }Ȥ7:ѐ{MKs/BWZf:cX1\DF &}ǗL#th,[A`nݍl.5BtY3 䜱,EGr#`O:'U{XI4 *6?K|,2O^dM+-@p~D&1t͛]薳jWa ?y=?G\UT=̬ $|V*U8QehMF[{ T?n7XzJFg{@ "GpL0ICe䨕ӻ_pC^f-[%7,]} ҄p*69wP+JJ3Pȣ|T3bX!XALJahH꽻.S}He(pZW'CohVD:SPO[2vP~8n"xg~y'μrQ1N4_K@S)'l[Pr`hKс®RlE;rb|-BT'?d#LOied\ Jl`V0:B>F$yʌ Me8TKEq45B9c[óJqnR_E>B`R.?= zx ;8m\M÷ B7@(>ʲ39 ^&$Tbm $  ;hͮo)]uP"VheWvϦF}ۉCƱ!R7O<]:GD JLvSvO.>N7vyҭ&`# AM͋(#|9a;̏8C|),8 YaZ/ +2{Ge4o21ٺz VD a<@ʮCjxa_4,y \]io^[%Oq5v'T~ޢ̅Hy>F<"̞@ *(6 swt`-:#mzܫMtsz,ɴGc/²Ck``-\1VbaO_rTx$sJJYZ劦M%CpkP4R:{~%Y=Lnmo=`++1ptYSO摴vF8xvh(C^8+!;n̋y)BdkNhcZx0wG8swoI#czy;V?#/̝c<+>ZHc7b#i*RCQ.j7^^6)Q|J([*a9B}ocUK[V]s.-B*vt[+Ծ31EAF7*I8S$7Diu%%1~16,G`%$\ij7 4ç#}쨐qCA0jm|̈(a,F@p"33YRߤhuPW|h&*ĕ BU]ys'#?-P¶QImP-)99sUk\%"1C--΍XTUN)sg6F}|-P8ےhD%J ^D{8hfN`0!AIf!MdJAݝ>ER_ǀ6c̞Y ma%uVW3)z^"+sȺMPW9fv'.}}S0sD!J-}UQDʚPmLM9L ЙBgw1QHcAauv$1ὁ=J"%Dte! E*7n*%FxwF]ZA};gݣZ 5[#<ǚ*̌v)-;媜Ag+}{ˇ(q@Ʈd qNN>*2!6ƹD7D%2W RA=΃ˏYI83w#f{F9Kq Oq2_{-JM =Xk}cV^R9a]W7FnHc߫! AunugA|\ubE<WӉ_t 1rVj<_3oPaFm gBŏB7JBÒx P["1`W*Q2= *w\a3uTwX.h ]` T%8,^)ƀ`+c0mR.݈)p}nxEn9~ԺzdqQٱ巼1+3P 24b#\> Dp? A)mm tt 𨀁!s:0(0%T6VYPQ !!{)"c{ f2,|H8iCI*N%iә(wos%[^=JLJCq,P1 lھ[ ٳ<ܨUP^RM<.|,-M08B܉cHC v^i >ŠMw Z)-?"8B]}q?rMG,9QgǤD~ACMDxPVCj'G ـH,a?Y/5¨)!ςT[[Ǘ_^=wBZm[ T>兮Mσ8i#8p tǥJȩ=X+Qg( J-ޏJRbFڭjgWA1YwӟU]Ch$aDY4ٙ|E)Oo0Nmj4LrRAyұ9qIuJa(!$Q6k/PrGĬl:K;Uq"59L2pA5>źx}HtUCUd=y*!R;iŋfqc;WLsnΛȸPԑʈn Ta`N`#E/TB#GYsqǍ";䞏rKe_iSc~+JJuG +Pl]>RJ2@T_AǷsZEdΔs+XA<17L8y=<'6ue}#Pew\g5iMTvoi6r3oNK5az!/ s4-re(9*]} qL<0hXSQe1a%DWrU^p 2ni[,[;aB;rt  u1˷Nd8ݶvF7EXl-46M}mht[~Zmr^u{p=恀^'XnZq C4R}Bqu:_#^([|祧Qt *K]^1KS_ crh+'/xڸ%@; 6'35XXqz@jLMc"eq=Ok<7 *T~H^77zL96"]w鍳T'访 N1D-K5X>s''O(,7~s7 I-U5` \]wALfE$ ` 0d|n#ꕒ'wB(?fƘm5J{]pM]bͦD|Dt/]J].6INY)r7(2<2RlDJwT'Xiٺl{B`TcXz^.tЍ9g)LL+4bf,䣮u¨588r0M!/[ c}].n5M9CMפ3OoҪ5؊:D {+Ɂ?.J)\Hz)\e 1;YWNJz0/?LMu^5-ߣ{V`O*HP#0_v+I_t/N:ice%OU^M~U(n”UJ,p]qB"KMĹCG}k!%=aA0q+b(z4'H~G`={իu~L`pOKu!>2#[Th9*}9vOᱨ_}u&0WrPP,F,;!UyJ#J)Iμ3 Et2#?z13u 5N\vbr229ƙ-}1dSM8Nn i[n7ʚP2uM*myUMp25.5mV*UL=0iUh%3Ϲ͋^T4 >h-7DmU\ARhxJDLrڑ0HQW -d6l}gխ5V n/WS$/^Ypܢ禃T"auYYWi1ٿ5v -bZ¶Y 5^f 8X}tev8R 6|eVA+x4pG)RJWVb8ZfnsoLsb@%#^ kX Jkk%$Jo1 cX9:wwILOϣO˧wQ.:_ڕ#*ro- O )uiL۪c [6[*ߔ!-ʎ [)&}(N cCŊ쾐C_3MsS402:,,iLW h3do56R1Z@}.?ؾEXyCtzIOrz8:r:c sԴ6{㓇&Lpx_*f* غjEM}րXaHpt)\tnN70d%ւ!]>RŚiӘ~fn~S$Gkf- !՟g@.cƛ0"iP4cA:p@&ԠEH}vxn<\_X_#ZB6τ 4%8&shpbu5B90B2iDWmiQJ*-!~_h".X5Igԏ!TfBOPA%XZ_NE k$ԧO?*ss_@yZT9}p<)j<+sWO#%-uЗzfZn,egͻX'ԍZ4}k‚^;e8?tH%M9Rgu-"|,ʔh%ߵ3:` UqesӪ]i*ߎigK7i`i3' /״Nezr#@%mjõN;4B`9#*4 syMṯNO`UE P ͔M^BOm|0[%t}X@Ty%*r)'m9J0~i j5ho!`y'$!@X>` NUQ9,\V3hPˊì+rp6ɃU#1zo+iW Ecam`ԓg&߰0Ck 揁 |ǿc$bha66ӡ!G-WXM_{-;F2.0r0bCYk@a!XȼVx8e $dztۗ9{d0s0ˑYm{'I0ޞ Ir(!׷Svb0 xꊫK]\4>|7ѤQj"syNxtW9,bM9..n1B"p! 8Y Ll5 s6wLT=j^ˣ7H)d&)N|1s^̇:1H*+M(&|ܡ#ބ^ķ'ky۷A?8 ?JjֿNvvw)-J[hfBKu=3NԊxMOAhf9wr̪kV;8޸pM5~G TΨ76Jݖ _ȅqPh*5z=Jfx{4ZFB)ZsR?0 %V U+Ui6}{*G%H*`v4,ED%_sl 0c8U6%^jSYʖ ,B!=ZhMO9*/)e&?|̇{:g8@AbG l*""G`#>ɤzR{(}`YNZ/hOqorG(_y?#qr{0c2:?"pߐWP@v+W_dE28D&9{AeYm vv) kKg(7FIZ#~#rUr9}Q5%aj! ؆ ppEj 9 s/[.d%-bh~gGb0$Mx掣m>>i~h2+hR1@!Of8!7P, K-GRhL;5%<fI0|,]mWf1탇lxz*pEUZL schȧM0:iJls:I@_D QZ1%&k5zK^z tX3Yd+UzV,Ծ)v"[c=?>9ByۼFb|'&=QF`. /YY^^. (v[F:4i2G 6yA!-cjyZg{+F!ĤϘw D2S*=:e"#v}^2S-H!A lAqzRĘlxU`]a7υ{ Sg)+%%r eP_ 5${ZxjUM5*fIgQ,;-[ :D4 ,9} m\g>_{ސP x "U$+N1ϡ} X'%=B)tP^>@@IQ$=7O:zqm(ǛM 3RpAvu]%GTCi * @ifO]g˪\9[F֒h[FxH] Wk))ƥGɻL< w WҴޞ\!8+hBqYMme]dp碷<2ZBݷ  5 vS 3mN)5*[z# ^Qj>~٧R ē[ %~vSL{_H̯T 4F~4xc{/21q% EQ`%Tc3=So37wƽؑG᫻RUT:DJAK-ESWLcY1z^ ;/Z>%ږK+Qr5 4Gռs+[.DF")ۊnd3†"bcևkD23ɰ8iW bU hCkQvAA0/]2;䱖`ô/B *r}/<gQ.ብ|@qs $dHd&1'l j}8TCNT#Y)fX;g5YkB$1@)*WGC"WL~6oMJHѴ0|?w5jErE~ V:9E ьPH6kQǪ HcsЎ 6_v;D}tsA!EH9:r8(+i5a?* phie ػiR}ma/%e5 WiCQN Y? Vi|ȟ$dCaLI-;R0|s ^0vRǦ:җeQ(}Y1^$` FI#s]0g!#Qӵ3yE7WvN4ϡyAG`Hj-[?^.H7ن n %:Ӥ;0\`mlȻnv;E#q>LX#N1%l*.S:MM($*lȼY d9ę=r򄨟Yufȉ>{|(pBEpb 0zlQYDcoy/R$^=&pn!Jޕøyk!OQC^r{e#.X]%ūL^3V=6a״|XK(7LЇdڢʠPqA wfV>̂ч\yaA^iˠվ՘xC*ztk/,bW_-y+X&$-4Ŵ)u _gJrvlKu9Zj ѿ(a:xѳ2$yGe2;pAvuyb;`ʧG,:0jyO?`"ٗg.`\%H`)i\ )16F(bH#ml7E Ԗev-PDz0Uw|A/)VuZFcB4 's%4<{HT BhH e6]ŇuD.RAV\ދ4?D96&7nY* 0IٻM)ŲaLPkwэ/.UiA9Bجє%-K Ƿ qNõœV0eԋڱLU!;(w!6~zˢb̶};6Tޮ٠rӀVUz70 ,,0?~!j"ĊLpd+RCYfp({=A4< +Kd)IMzifUg(LckiC+CЛZ]hFx!HG*呌ٳ*y9ʑG2sqWNxJ A3q'Rd$1DECJUOQ٥޿CS"cI>P7vM@k~B<ɧiRo4JNJdBH[B8AB@.к AM]3QlSdVT;6:bM0|G"=sRi;x25sLr[-m72KhōS_E_%y|J+`%s1eLq>u@/Ɓ:GL;s,8TվqQZC [PJ`i&z7W8+=_ "T9XbTtgXd:3ۋ]%kFTsߤOԏvѩvXijBw gn0Di¡b4nFc38]HγV*%2U~'򲀶vF㖂ۓgF(wxҴ & VҚJ(aX<xn"wXټ=pPh?Hxшg圑J-)#Ia#ulh'ssx j ;S996 گc7ɃNЭa -D|m/-{qv)\dKOLV߽N? $zYˇK"I9= cCuV"b1^]6q rf)@NK0Bhgc7yx7iJiH]O+I %Dxbƶ`Gm0gӜyd|sJ7dNJʆ볔p6QZvU"!xMю+롻֙A| Ͻug a?TܴmҾTYRm߰n{m㑜SpFñXJnEfݖIAq(kʆl|iSUetaG{'Me@DW6kS.st)'m~}f4K-缗@I_)ҲV,EUar]^GQ8/{Q? h8&''rp5hQ)"n'ߤS Ś yQ*#Tbo ő-7}QdEZr"WJDh'5{ٛ+#DZ2{:PA9vF2.?ϵ[#!Cی٩rd`doypfI.-Z NXyGשp@zF47[Р^>O@H[rx휉ܓCՑKDƣMt=[ p YIP?HdcmM} SC#G0qGikd~C24 јC7ND rz<ވdQy'p#X:i=XjVG.Y!(TX`$R9=4 LnQ!.{k-&`{K"fqP,)t|=Y--|V@< +%j~z6C {80jrcv2y g rz8Pf w[ 'Ky'kg;&L0WuV&ژtxg:$h>h%㜡s_NX'01Lי.n3Xξ!x .CV#!, ![vY 0g'łٰ4ޢzg #b \\n>Rr "#KGo}cP&p.,!+=C&%a.6ʮe5۷@X؝$ȴosL1ޥ+,XB\95 %8fYL2|w 2:FρṼpqM b}ԩ[pb=g~cY#&Nڽ. :>\GeIi >:N]SPԆySd~>A @cS45>ƪ P-vHawFZը\*3eUuXt1'$B5GLm6N(ȫRv NC2sōV=jrDJJ Tխ-QH̯*Gl/db+fOo u/zӷ>7#w7= +NSp%5pJK:u 0B)>UڶX.*W{UG?A 6+ǽz]sYxͦM 1"Wi15Mfp+>~.=MɫA;}P},hc8obt{MޚD uCȏn,YH 3#8\O 2asa@Nm29.Pzo<MqMN1nnY3O5J&HwdGё&QA6L&h 3TW4<HItBʆy7XYe5p!2>x |[`J32-bI{P>˷Y"?-.҃5S3к3SQ3.pnCpZ%( U%YGlX'I]~d9<][?0F p; p7D^,"!'چ&yDfȟ"׺?G/w8 6ҖPQj+vǖ9{2Ȉ}\9Sѝ±ġW]K ̃-mܺ6E᥋Rг4l h VJ6 UE( #?L#) G`sbF.↢Kط*{)\4Z7l Q9j-"q4*N(ky%lv+@ˮȲaT) ``ܰNڋ+>AQVg B[0)w0 ''6(՝3?6Y˺\oB{WMF9XV5BV2 a)hk N '؞p3 V Xٙ` xTIN~l+ܨ${/U S詫KvCYˢVƯBҠZzM;goph^_n8Eٷ/`&2#5:5f{(WZ Jt"S:Y]IY5$wS\B=]fEΉ ._)R<6%A4V81BGiJxKQl9W$;M'F *}=aRi7d-.EOf6Ӊ}fo=7}U-r=I/(QpS'Nz~FH+p#擈܊'^DTsbXVSg|6j^<*Mʯ&r^MI08rkԣl:Y,nە;E9Gٚ&m$ E/s)ZBf66Utx[źeTqFY`G 4HF сS ׂߊU\URw){oFm ,wfMechl?愯;_Iy|J[`4ْXL"iL^w^ :u9dА5N@Yux"dHs lD sT]ɾ~]W}HN ఏ4 `uȫ5)3C.SBH^V(H=0sPAKyF{ [.%l kM8>@n,"6u[? L+<,x`T&v!zkCW^Јݒ@WosRenY~P_e):KBeX#Ö>"ѻgWY}Kf}Y |X8M'(̸7owpօtBRW 5flqKϒM?^D(Nj(xoe8/`vSU9BT:yf&-e.|-6k.oݕcxhTzhpTqsz(,;{E+D f5M'ҳ:e73)i0],q bO \ST 2Q&WYw|c>qyϟ4 i-:F͛C.'mh5r\s̓cX;RJ;ka#ĠYfn}9fOЄաV}|xi3%p(F"LIN0FQ*[8Y  %"O& }}lfAHt? sϞq,HRXկ5"jRPz)a,Ll&AcdLi|ms_ Ɗ3p/[ϫ 6{2?MWwbOKb0j'Vqω3x5$l9/ưF'FZ CԠDv'ngq%%M+evGc2lH{8mTLjd;x8k)]b7bv*{K=E/)kCÃ"$'͛u~S q2]3=u;X=AjX=7+DDtUCifJzE:&{B0rG31a-My _5G *2]8wժtٺߵD&)ڛwB"S9/9# <}(jVE sflTSTĦJqi%sV_q*z| JM86a@ח^}?&'U6cnنufuA <9rfήdR |(!4sd#B"ꈴ{" Pч`7=OΉz5,1ͯQ݄8K_ = v/@Š5ۘ^D?2'je2yw&ڪ~o=`N9klȧE2"rŃO `;⏠kffb `GőY@Pa :];i,DxZ$(Ͼ-wPDd;dLmնeߴ驚X^mQtN2 <ƸHecO,Zn'>6hS,OىIy(:qCawIvd}z5q/24d,Yق@"Flj"L%hz_mJU(k,KA"J$cg5.Y yL j]cZ;x  \u' "Wj1Qdz|^mMrBxSݘPl0yKJo?4SyjO7ۂic߀RF|ZxOX㢲-tIٓ<[b1nr%'dU={f4oj#UAr,c] =[% }z7CdkQ 2'ߍǺ$4wF+B!dBt6*y_:P3:_!^.htn6BZZtkÊǾ葁 ngX.0hF8 +;'>vMN98^kڙo4cp}K^iR'RfضcIp; ƶ 87ɷ194A/Z҈)]Ҡ&VM~/Y]!BK*Rsd}EJ~%&  yԒ,KueFzDz< D_\'mEsA}@m0\˹ Ģ}m2ڸ,91£gw(nWTC;[_["jH^ Quhr0`]( 畇d@:̥,!A@N<&J^v~RîrRKa-h iPauOP'QɱYAE:=c ar,TBUʒWcRv~łU@[BU4,պ8$<.E:MʺNhz0Tnҭ7Gȼ!Y5Mb gl(2 oX{S =dayTCFxl ;B>ھ[4ldtۚDQlXU׋IdE0Bx%T!}/;}H3z=?;Ym'F26>+Ϻ NT5{$m:k0^4hMn+wDDO9:5W[c܂*Nb;$뿼sL0bKx *ӝ|j{WHQ~ŝdO Q^ ޏ[RNf"]jBlDV0K$ns"歇//7Ki 8Jijٜ+;aM5L%4#A_N4m IyƸ?v!|t2b'K}o% -'xmЅ$Qoi0౫y|\U[Ir7K!T[!5}zw36KibpwoJ6E6\`Exxq# q, dоƂ#7쒜,}K~`00hKڠa~^nF Jh'[."Z N#g5J;.Ujj$2 4ckdy`@;IéAE\oT%gJg H٧8'8[Rĩe8|%oCU-7R lSz$.?l'(?/dфpe},*? 3M3\=~q؆.OX}KxMoW&CbH7S|\ۀUXa2X˦$YS^(`+ ^=ߣg`bb7YK0X_#?k{]ƈ$esama/=sM)aԁדƧ?|2y݌5Do| #gNKon6yС=UHMQ yڞEc'u( \evF}tOH(c: L'ɩ'6/g$).++Vz(|еj/[E-Ao ?+f `bJ=qAiYWǷ$;;sEc)hد#L IvB ? `-^K)ɆۨʄWoJ \!7F'N> .i.4K!AXˠlu[`Q .Bɞ&t)d΋1?~+-p]֌2$0 OB.džGֿ=&NQ'vtvjJkJK-&ĔDX]ێ_k֓Xt|QH\ofWV8PCP~CPaabKi=燒ӷ;HXMє1hAH]D'cZ-* ؾ62jJw45b㠢ӳ< &Bxv} gߥ$ (8jn]v7SFB,,VNCGUO M ӳX]9B0%GOc|-Hac3m82[I{_}/#6'Fϻ-PbfFިUCVX͌nC xHfϞF8.' TBH(Lg7}Ck_9i Y,.F~V1Lp^IJߢy%{əTW4h@Ƀ#ۓ! (ɹ6ti!1Ϻ2< ;e=]TK^!N<0EJ yhƹ \m) }{NCkEl>ˌ@{0[W_d䓎A/uT?21P2=&U%R@}tvu -.c]GJ0;n+8*㏂!*X@+&_dDIH8kSTИH S|Z]3sHẆD(#ҝQlgž]"'庅 ^׉O뽉K=7_s!oCOuGtrmI+@EM㨿r'Ŋ| &=dw)Q`dkQ2@ !SZACI=w`Œ# K]gL*CKF?%xgq[ ً~RKa ut.碑-WqrNo(ti3kiecwz b15o2ܬOs#s`FŶ^kz6{0%&'% ;u-kSE4t[ZAa`aٜ8ʱ`=Dz'Ҝ*m<~N?jRYXSdE\@򮇵 FItX/x s![8̑bSHRY/ @.4I #@?EDd5b=~AOۣ=uD|{F1;_2E3 0F;^nnV5rAgO] B E."8jüX/O_& *rՅsCR)c+4;=e#fv}2!9 Uu "fQ޼oW3C';~Gc_"zsT1^T!8 [ax4d v:8x a@.N>'8'ZtN#/;oY#'E7@Il?eB%1 Arz"u^'vfd] Mazj'Zu]^KQ MxqPJ f[NhGf^]Goz.'hMM`d_*Ce5n*Dk@D[W׏uAQ/(( x-ZO|6?ӞZDhj\6*0/r&>{4q(I<MuS=;_dmr+lCS@@T kRR΁^}{X o͋5L Gy39m4МTwc@E?묡I*`{!u;q.C׉묡DARi5e"icrP=Y}wտ ԰P -1kc(B%sH;%TRdӳYjVgs"5<^*;b-Dy|xn,r96^p 3{4;4{`^0h~ת].9o |6ybLu +熃u9֔ 1ܭ.^(Z`dW.4Wa0Y˙d"7}3<Ϟ{lN;l{T;O_QB9,ׯ#M0 M; #ٴ#Hy`Sqp7]$t䚝BPWIMEڄͲs㥀~yR#ѫG@WWx1LtNLxd-cнZyx'QX0ɲ0>{)4*f#0HO> Fx.O:^T2"|D{uzD$ksr&Y׽jS[ܒ&3Iz PF4>a s c>`ѧj/HZȻaڠpa3N S;w V) !EnB겙V/P["|>sh@#'Tz b>3"/Cb;ᔷSP bJOȔ+;d|1 _6+]$<(m ]dzϽwփS . |BnB M]^K ߿T_#h7 봄0P+`Gj,a̪\b|4ǜ#gYnvK@W3d`Q0ei0 n= 쐚W8.6aEmRŤ{}OD#:x}'ByzYq^ḃ,i_bFYw_:)gO3^S23"7zJđhvr (b{P 72fx D194GY<ϧGN5,ɋ"UL+0:NR߈nh𸲸iā_<*_Yp1͛@G@Bx"]B$/}{(lnWF bښjay'mϯ ʺp&z P4Sm,Q2Q=sHl5";~Au:Bѳ< #;"#6tv-)1m~lF̝Ʋ+k?Bև mz.Ev#JF}*@ٖ E˝|"ߡe>?'SC}|n*ɠKrW}%ݼfR*<@!B@- M8ouպ1ѤJY<!ObAqbM>ϴ U<<e GĿ={'t /i,5>kN<3#F5gKR^13:'H:lenMYs:M#f q=s?7 -*{2_vfWV_*c)_Lha5Ph@Z`Yޡ)AY%,_GG4ܹx#NI |bxf Q&}wMMIe VB1|Ee 2h8noGޗ^K+TqZK gI{1?RZ99$#d&mg&kֻm IQHOٰEV7͊$Q۞ =xfOTr#wum0.aXB| 8@ov4 maNEWe.z\K)6?go=9$qW)yQJD ۳*vA*wB1yi{Pq0qi"oGoXuG..mw@/iUQbGgsUȼ9Ҫ<8M;bE$NgL-ZB^}Ea~[mQIr !CM",UgL@\spUjao+BL/fG`ːzGWJ&œW~x/>UoZ.X싉T%ͤeud,VTl~b)eRZ5}X.K]ߞui4&"elY1X x |xW׉#r!G ɕiB#pe:mOmk!21֏)U.^ZUHT6tȾtش? lo|Ca;2;@ =זk{[

V^ 36`TcCF#56Y< шsPfB@#Ò:huԼ%$˙w%GsDžզ@HNŽ]TY"Cr|.7ƶr<++μ1.;Mq؅zs 5͘5|zS99# `vvvyd9nMrmuݸ! ́#e>r^h\w5q:" F0Ab!)Hq@ҦXgy-%2]WVŸg" J<3URq,W*x5>1<>3]ؐ~Z箲q*ۂ~_0:Leu6wN9D='%9|QmQJ'y^z f OW79fE+DmhOd38D?޴CzFf@ۯ]P T]*H#VS&X7*$4DwޯA#%7kT`z.tAN^G]/Ԑ|rV$g6+KbR#YtkgsIŊ8LCW՞ fGd0 5 Iq-\#/&zPݻ:![pV脝\N )@%m ⺑'WL ٳOO1Nlo1zw#: @642&#\ )d,^LXjn!14bB*K#q/d8:hTvTua#RawK CQ1m@3 nbHK'Qm9T,vW`Y`\p[. ЌjTϱp̿4a=j+b>V^ WP|\r#^Yudv#V:{"~ql-SIS&@@x+̵o͜g u(8NE>6h:((š.T6b#%8s\yo KݦYe WXf+{QF/:o$wCɩC6_Pm+,[ikcFI}_\_KRUc `4nOCfM hni>*J; JI$s?ź, ؾ|/nH/}?r^5ܘ+yh[%ŹdMŲ_[/QM3' d *VnfK.-, Z[Aj:AjivlH ƉSbZ:KW#SMf^<|uة%TY<癟84R"iq@}"b2LIa-P\`R+u$bDm_x`+H ^(0b.-~?$YxG~U|(=G&_@)'.ڹQ m˾ QNo7$K9ʤfFIԗw3 ΈƅS#%.q;甛r-z5b@8FzWYYJzCj􄃭}ˋ7!Ky< M9r_':6xt$&>B%F!<ԾaL5b}[u=cI׍p<>bQ{K˃n( #rpMn[ÒFeYv\!/<?h_ꘫ& F}McNcyзWgdy'V-n2yfinPsE]Ddސmp.dt؁ʨmfs+@s!,S~MH(v\:|W6b uf{w[/3lzPuIV% c ?/N9vW YCչvI熌_?Fm,"YdS&`bp;4!p$e)V0OZs:[>5;Wm>nKIDpdADU3mٻ8b3 *(&x̭e};m ɧ]v68nLMPuZRQ.ueXU 7Mz=,޴d5!e;U;=#+<>Sl띧Q}M~t~k REW٠7d 9dHꬊzQz'|xɽ0սm gu]lk 7E˸JF&! 9u+p-:?!g&9B9w-#GQ+#eǕJ;P ^|okH+2V₻dCAA#  ɒRNNe^fG3suN%r͡/MQ['X`ҞICYGH?tdp}Mx ΉNK|<ƏV]ǀɽo!3 V,œۈR;mcwrMϧM,H| t_~][J6GA2} yo4Wf.ۡҤH È*;CJOPdC=Av(v10XǞ5c#EMؖrr[B덫iZI-GG(wKyȎm`Wz 0a %8B#;ӻE0?E&.p2)OYo:|hE;b*w/nlk -/V[ QtݙKg7wbKq.x#~T.NE;JC>Fjt{۽ug<=(PBWPDN^_XgHCoofI:2n' o 1 z/8E;`*hYfv ޵Φ_IDu WtD,y@lה 9n{BZEM,unw;e3|!JKXjRo*H^=\QcSo);AJnG=3 "͜4Z&RuY eys|h!WWȎ0-nyX#;y B-()kѽ< :ƌ+1>^`SQ q-Ϟv #+1'ӪA0U(k[riz|eWkYnIE ܝYDUQm Sk\p- TV"NlI_b:@㽾֙V`Wm ީ=q{6O Zj,wv\({;=&!y3k6bV7SH9[D)@NhlrFΧ* fF/z%kPxKȷ>tjzRZoCOZ$ _h=o1O9R]a>8 9F^/=/<YVIIPDuB,)lon$@#3u0"2w^ztWG܈X`K4 $x8BZMJʟD/Բ-oaPװjec~(C;oEw r-qwd{1\b#2ǢQVшk2YhV$lq^1Nr!*O:6ܦ~_۶ J0zMHk!_shFpe Ƃ g f:- ^yӿ5.3X# L wzqfhHGA@[yy8 ˵/MXQ=6 ŭmh"nRSϚDfic6S]+zȮa8Ԣu!#}9 8+-w3ElG]!:,r)k:(oq'ۦ8c8Zd31n# ώg8D,jvέe΁d UK KMe(<A7eS^:,ENé $sIN1ʪsN0ѫTn>!S*ry;Y:Z%uW, 5ýբg0c=b=RxщDᳺj"*UNƀ^!C)ah~ DRH?ҳ{Hfu{:.KMGI)lXĴbqgwMtr.)i3KHeVt|Qs\8-&L!(29e,B >*Ud҇#Ej7Eh QF OiCY.+v 6 Fj6LLȼKs >dWzO]p)5XőXrPLO=X=ϼQøZ(6U,4mF/C~}5^9PC7\R7U[I$n:(lq7(%i,䁒~<".-q7b~a54R9o O;̥q:+I,t#{ɂ?$ }z<|5KܙiGsP%&CGA1;2WtC&d0(bCU[Ә_TpYؔfGyܮ -F`ph5Nth\9*{l=' )3֕:82z ȥߧ$*;Linrh?<~JB 9:`@ntB )ϰrbmnjA-ٹ+8e]S>mj` L"X˩Q*ebV+Al0xxb*nuțn#jM+랖c"s+RO<.LZu5hfN_uZP]MG׸2s=;m z ͱ'E~oO"q΃-nLT#:7,4YYg" -޻7}ވ$r>aӨaBHzP[ 4r>syj\}sQdR8==`v%!4ѿPƺK~mI(+8g$ʠCd6L)j&淯Q"+~>Te6[d4 P!6Dc0\pVi7^.$S6d5{ NJß\i/-F=:bԛ7@ʷ*5~0}`o|*CuӞMsȽzCl0 26Q-Q8.m!FwLT@|4oTP=?v<#N_'R}H~ģVl)~!q}vA~@kr+<)5&H5C9WU'NaJr|(w˸aN; ZS=߷66TT~UEӊ6 ޥSy6dM> 5! Dw&[u@F;ہ=ETeσ*Pn那^w[ xČ^L 4L7[Q\k@&ށ- NR=+,fm7Lۉd|;ouU>]͍bv bpo"T @,,}mSz&A \X~Ԭb+?P$a҂BOOU>Rjt;(Xxw[fjN ɟxh4q/g|ĦQt\pbͲ*TH'K ~H |kJsУaЃ7cjlɬK'rHUi}p8HGcϛLi.^A㺃o}~ ~IZKĈ]12#@Al7Һ#{C纁f#LC.޺t&tzz2yKǾgJbV;u%xrҀ&nZX<$vg]b +,JYV3c5B ,D5CJRW9ge 9B "&:*E9ehul~X%Vǖ:, PGT0s%@x Z _/Is>*7^JcNg (F=` &"zށfZtnvn9O?$(ey 27_gah&HXoaEpA%3#+soTp9k_ToX!]m+>\CpRtqMe Olڂ% ,۔mv4;Қm yg)L)O^)DD5)do bE| |^1vwgNJAai z{$>$ o9 E$DCӗCvvyZgJ㗪)Ϭq@}ab5Ur&ǫ2%If 캥`3Ე]R;֥d!&t>YNH(Gk) D=0|yo H >% A#0f Ouу bZ7/0ڐPDmT۵@wLʡ2)M,P@LXEY\g6=E{jN) ֛B@yb4}jRά:JZYaf˷4o53j` H5AVj08$w]8~h*YW`=8V; XiMr[mNC?NqFƸ\o >2, &CAx>8wrc1]q{`2($R^9ͩm10b9X{ҹx]}Nhu0qe_QϸeQ R_Eʥ5T}XjHږ7&Qf@WbU'}<a~1L #'.#[\z侁(Y +[[G].olT5ZL(v{k7%=0eN3QIbBV%$r묶 -s7MXj=$! "EwPBcڞOsIh$F&]U.GЍWJ5s+U$ b3KUSC%(8Ecb (eZKaހ #>K/x7M5c8Ekh"#M@j|}DJzT\Z|0xMˡaUMl4 <)%Օo"d}) $6x [#./Zl0UGfY?>Fg[rG nwˌH݊ej:|O.OGnw3[|쟐&%ԖM 鳽˻d6pGRN3MB &o? $rq)+f),dS N# KT K&c5^ [V@jHdCX;@#[ne64}#yb0-̳L7%H_uq]'TbA;{4bPx֎ *(p4 ü. ъj0':)ot'JЅIn"Nsv8EsORsPNeڔIoS5*~؊VN4lm0c qo™^íy層qлGY֕Z&u|MfkcɪfG;WN3VˏM<^OMy4X,͌L>m7y(ncg76 $8?OP~3Fr4 TYYe a}!puXM6`5J~ߨĞWT}PNKG9A*YwPf_P \;hpVV HֿŌ`w>93XcJ+01f(cl| fCQu4-E u@WZqG;#@NAbHbsA`Uf0Բ6;JF4Fx,k/IkRW /"FR0@EB%;ۓZdD8Hv,{P8Dvp$g+o3@$hw B=+ -oZ!F:E૝ 1LS1Uz@Z;z.Pq*adQ$.1J,%x|2_呉M)4ňѵ*".W3OɚX@7eݢ%`#OMpoUx{cQc2@5(`  ά4ycR0`>ٍfߓ\FG(N41sFۦ\wאY*bXǑ U=hܪescyc{Blxvin͏@o?z~f0$":#}'-ݎ )"#/@9t"6m,K4^:s|b OZ)sdWjW N)wtp *x Qf'#[#{hU2h薐{ EvQ;M mN~S@5TPY* oFr'ñV$[Su*e9tP~PU# {H٢-Ea2dVK y?j^}b56tnz]iG [Vt;*>=ymA\; [`ne -X ʏPԂm^3J_qXzI`(2} vX\k{AA49%''mrp32!o"R#TTȲYau5F[HK;Yn_v޽٠JC╇9٣B8MW m 6WCFZ#p6eZ1OcspY% s{ӡqb^ zac~|I"fNd .e>cP{ji@lXďR6+]tnL} IJQTDnJ{z "oC%Iݡ 7ALX̒@{;Qe`A ܶ}(Z'Iݥ1E6jeu_XGn͞dE07BIb54c-2_+u>~|܍f^TA DXCLlΨNc?#o \*s"](Bdm#?=$3EaqJinP䚱[Lvi"B- Rq OPVwӘwB"0!h.F,Grzj(W-$3'ilFa 8m۞/e^1@g86b-/s[%ʨGr%Ԣ93 Q>76´{"pO QED rouKt[}Ss :fENN%%|ʒ~MD̗4ZBpx("[R":Q׿U 8 :۹v>~gMadLid0H6lviL31l34@ԉ$k#FwipYWS;l>jmҦz-,З 2ȋv@:Sw|O.؍xc1 t.+wjN‚/3Cq-,=qfc;QpdӿZ1v+iOQc5 c:rѴup7mnB&DkgwPpI%+ih@i0i&=ݢBܒ;pvRZ`Q>v$Q\7&߳7A0H_e6@V=1&)v-S7C#}^[8Shc7z<PX ~a.#2C 4FH{GxpkY5)H:e)efbgu؝sp b$`UH`"Ioc$,]aCa=РoC|voY5op{]y.) b&XQsh>6wf uf_jOE)8O4<];`9 OuyWc+s[C)`Xq<_#EQX*RAI.d[)A,+I6ߵa2ç`z}Q O!ѲN^aD~mrTEkПVEsǣJςi&e+`9RIȢSՒYÌ%=+BY D ]b1'4I >6qR,?~# ׆+J5]s YUƓ][b,)m&cuC:{IUXivʿʱ&G4LE}uSQpE*T}kO ˾-_3$0R lݵMPP};[8T^%Ѹ]p{C3(=/oc&l8M܋:O)n,cTNlڴLbTŃEЇ?+#}C}OօgYOS'"I蕱.l[e3e~Ѽ;^L:r-d9ʸ=!KPy <ϒݏVp]074ȹD6X@ -k)&P[Ha۽9u:G^2t6G2. 9${Uv`)*R['ێ獂Y#ud56B,Rq[2."\ ݒ&{׆UꇺSPֻQk<ݒ$yƛà{3z xnB96V"b ΐ~{%Դ*I02]\whc7K^=!A2{_Bt; 靘u>a5B.rO g(-lWPOؐL*+%M;ʂ϶h©ML^) -Z)L`ŻkvÇT\CO~}<\ 44άOS~?7?"j]Aj4j9M#Nm"C; \<+i=ZL{Ctw zVkb#I%O}: 4SҴEM1o}#I0H7㌿Ldy$E S!CäפL1ku"HLӹ'Z6gԟYU m>El\JFJ{p\߀ K􎖊I,–͡\fU5m\w(3X@|~W<'_-ؙ|s K2ȇn]}7RKɸ%5yZy'ekD1eHzBawrƋJ0 YZLuminescence/data/ExampleData.RLum.Data.Image.rda0000644000176200001440000037730414171333122021234 0ustar liggesusers7zXZi"6!X])ThnRʠ3$RKLɄCbN~z/ H~_0"0=*?EA(.-0'Hv6 %?ɨ3aD8y0a73 㞞6}yI .#ABbj#-K/dKGA>P%HQ8VU [?-RoI`M;Z7wW.6ki1!F=:f}abG{0 k mVs,B-&x3O]X{GcySxAZ4q؁e9hЍ7H* MS;`5ĮMw1W4C${6xj[1V<:)ڔZ. 2#(gYd :/YЏݗQ[6Mm }R! r?'/NDOW @vE4hLB1D]eG-ם_ƒ*xG^@-]>Lb(@J2?o.S㳛2oԺW<§CS꺗*/bdSnCoiJ= cNx ~fl/R(Z$_W꞊L6K[ofzcDiTz=RgI %-XUDAaz!:rHk}j s(L^ʕeBE}Jb3{/UގUg? "O|e]W|-|8(2 n59r;w/O=$7D"gղ{)Сf1W/@~^y rpdG[kx/SsJ?[4g8ivIdtIJq##!Q%Y0Ž["`Q ),*pDds7qS=yi AgwDSE9^G›%`L_$JVoD&;\˜{䝐$:‚Kr< Py;p2;8:ip )Xz]+dٛ@q J=7;Ea^s׿#D%&CIߏpЄ9pq~sMfڳWZFIzlv,m mI]~zA: ;(=몍[0T5 \j1 "9;Oe9v[#C>q8B5J '56|q4)kC"Q6`E MGOJK ${y+:|\15*Q+t )Rg:C{o2$̃iFv4$'jӹmx]qbF "|7%OI-_!5 y;2pE:^0rf*{ +{8(by|NkէmIX1X pRs, !wϸW7"qQwQ ?DbUG7K^`'ߍhXTA%H(ioY˄ؼW[PI0;L?'Y ɅC(Puu)* Pvhxl9Q $@uss-FFa牠`G<^_C )t45,O(oߩ,xj s1s9U+ y-?!*}OwzD]&ЙS`8jd@ wmHiaя❻ <&]Nt:xz`ve+ERaKQf^Z6e$ 6~17c[;" Bi< Y^h:,䨫ACPl1^EJAο+ +*jd8+asꮌuG]ʅ$&D3SImC{ñ$DmM=Hᄍe>3&"`ox6dw3T?~,ղ,x_7l2Ad}mҳ%W^_AD[b!LsvArx S}hR[x|$!"_HPX%,*CLyo99WKCbJQ\')D"Bj}K${S?1tZN?B0I$#Q|t \KSW7Ͽ77`NxryQ2rEh''CbHLdP8fx@Hed (|KVW/N9l.L(v;fjgB_O1یv?͵Cr+ MeenǬ_ޡr}k@:$5`f6lrѩ4yF@ɠ&74+E9 f:Z%jaD":P<@9zl0pr/Pqx#U+ ӰHh|7K_tfTh&{ED `+{S i=L-UWI 3Jͥq4W{B n2h|f{{})iԁBD 1>kXA󤂢1e (WIJ=cQ4ƌCz<,+X,4v˰~Ml??_NĨwA;rb8_ ](΂>6좺eʮL}{Ɲ{D)6Ulbb{=|! "2t׮FTt$6WZYdoGJrNF}!Hj \qqFG[% 5 K"v!yA\ԅ=),}3a]U $yӃ2@2ݻ1VƆŏ!b.hsug\2n$@DV^(cL~oAI)@aoNΚmH[NFuu; @h\@rk-, !k2jkKl7OÖ& 3S%x%.s$@Na€8@ I6&MW G`eeai闬c #9))$*;wxr\ $De/^zN ~!ܣ=5JDz§y.̞Az1jÞS (|6 ?uW n[ ܻXU7d9 5;$~X޽q7daOҪ'UBuQGԊbHIH%Q'\!簜e"uw^,ͳ{I;U:N~o%p1/,f \(}Vv5Sg+dLAr#\N- Dmka *jjHeɸGwq4DVݛ.z&dݲh]r@Pj[Y#@ 92 h9R rK(B^ ߵ6=]4/*aO ~iG%6~".Tk86RW$.2Mf.E}<$uMWo,ǀ t[}93T*Ka "b;. Un-MA,Rԝ:xrLC.UpRl66A8!ٲ + n&g 4\rޒ=0seEhd#,0ec!bVc8bgb]f$r4.8+f1w69p~ JM}-cB͆28zn¿:L¶Y7*z ֨@:TXrx4+]U!en۟Ll4$[;\q?,󘇢,tI!OR; ýk92^L"샍kU#f芴s ~C,M*NmեLw宕5??kE*\:Ʈ(1Agk[PZ@.:"a{iE2ԩD]C&x*$VXq*/*%۞Tm"d4hA_`ڟ/hp`DJ!$6:pt̎[kiuQ:0ly|  Y ړ[ڣny z"|uޕyjbR[Cp5G, h;:1Pѷ$ > G":7b>dl'TdPoJמoOMx>*֑ :YB"jǞo9 c*Qިi;Q{9l?n)>'ãAi6(у 7W^ܗXM4̿V]AMS0bB9,]E16q묭\Τ0*FʺI׶oHzL 6e2bDqc? [NI n.`䯷O@[lP-a\R:&:A.\ q[ljW㸽;M7g^x4!ՉXFMΗ7(ѢT~SzG`2(1Sb O(O[hNA+x0cNՄ49_[8(C> YWx_?s<lDq3ؼ'wx@#ş]z)-Η"wcOL?=IPko<-K>.Wܩ *]Rd6Ź>D! znIh؏w-?VzA4ġGoVP/=LC?JY9?MDř2+LZ *Ii6@/qg_F˹ @?!u:؈ 'xSjNx? \;՝& SGUxO -U>8Y|ʼuղ@ )[^T8@Ri]py[q`U ^}P/>r&0=Ôv ; -'NzGv)oSj6l\7ʇd%A7fJ R6`Q B0T"Th<gBWpJ%'2kخ;fYY\M-r0Oh_e` Hmӿ`"HM;0jw?nVEw z# G;>h,wz7_'_瀔N:Sih%N+BlG}1f b(D&,me>2o 3Q2gN$UUvz_+e4IFgt>e)<h Þ/%y6Vi Ϙjp l+^H4Es\1SKރ35Sw4&hd(w4;g67Yan7\D$t 0MqIa;yHſ5G{TwDKXW4݉#@<˥5_b28V—}qJ"i`9gUW~u&#b?]& $yl /fן&K#C+ 4ܼk0Z!br%esfsE#BSm6\\-8肼eDz83_`>N tb9+[I-S-FvV(7WK ,|pX3+nB5= dfŨvkԵs*[#RS@'~ъqthOuU?M2Uf&{Ut+tW^:6+TPE"d%eEÂY`vqqr#:HëwL 59e8{H:ɩ#dڴ\@On.yJ?D\3}Gr!}2Rî]xԽ8(1l^ /Zzέ1j>XKE)$d׾0I#`=RDʊMkqa7U?ٴ"90^`Pe؛ 獾9whƥey74 *ܧzk}̨ҵ:uPq;.uѷyvÉZ'|Փ("1o7{ XNQ*3ڑԳ 4oˁaKc;`HX3bE| pfk0V{NEe_:bc&!zbF?8k1vWLkň!|짺ߙ-JFprڗFM+[Y79%>bPmnozD`.@xQ~gP54e'Ф[(DTĎdZ n>$0m=O*F_u ܩӀގ}"Y8oQ?u]r@cK3R&4~BNqӮ`̞F=T9Vbēza 8bzW!tc] qà rwFC;OE2tK╊Kr OdpL8fw[tf7T9v t݊dfx xqZ*(epBPAVp"gFϡԊ7 dKyy>Ti*V%͡|oTwLx, FiCFE%݊Pٝu<;mTOM?=޳wt ]v,A =#'SheZhY~#+h_u{?yA˜5EƸ}?+b|cݞŒ-&CbԊ}棂|xbDwE&"ʸ'#u$z [}v.[>;6PAh?L%MZ yCw:5&gkC[UFyK+[ p$lQ܏Wu2 AK>Cu\3~i樕6VPB񏌕oX:d3K4?%!os|`6ybAL_\hj8z0ʭ<9t7v= Z.}y= )q61;' 04WetueõVD>6UKToGjV7 $z҂diGgG\W7.LF|1f@*nW̵q?GF6` }t(`/-g\m1 s6w Nߪ DGQjjw7RafOza_"o$ɿMR|S0*L؍]J~ojF{bF8썸mq\/Ou8*Q!{j^u)+ >'{S8?g"[;|934"*p| D¨Z!щϞ|*۩$9ڞRx'fM6`wO.Yyt2酤B{74$̼^%߉=}:\w<[xX|+hI> IW_.igvyjoO-z!\TG*TA**`KHxy) Q"Pp'iy+Jp˿ʼ8CxaҊ&E~$zwIUNT}ۉPwK1usLfH@77l(k-1Ȗ3 V wNN87f'_c V%9hElC8¦kF'>##UPF8]Gbℜs73.je0n!{'am|Q`CʮefY3VCA:[P3ky1́?mTc_! )Pfj3wW+kM4-elTXQscv^\ ҂%̂rÐw'垃7ve G?'/4b,敞ސohD fyR(/}Pii +A3qq KOy?(|F"C"Vb_d>iD˭$SYe7g~&Zĺ&{x4nF%HN4jƄL_b t*X-B[J!;*P_N'QJOp[K6OEh.(Jm՘m(EuG#%]T~ Xu|]@U?gĬmMMT*~MYa6wnF'Z7'@ydeL*ghسH3F?}DÑ@_7/^$zN%p VRFCIvPW%mݼ/cIE.px7Ÿ4l{/wᄍ :H}\=bd& 2Y '(j%5GZ3 XX[@"g21f{ `u0zwLRi; d(ync(b#] =g|o#Y2 uN|dǮp$?; N,D<~YBإny8 ~r$dn3Լ۽ PwLUXLHQgd%S_pwlƩq=&XF13u+ h]y_/Ԯ`Be`ܩEfHk`G̰`MN)CCn~h݋QE>}RgZ)~aڳ=FofOdx5{t x~qC\0׭'ex`!n&])45r~t'Ρ/=@"Իs"-Ůr:15,EM830nд ƦɞVU衜c;lDZHWNnvbhLYif_b/Oj.z(  E:^-[.'ǎ.G:MŐSeL*'6&0ی=ݘWeLp1OjhϬKUcpc8J l1e]d5neJI#ѫϳ )*=˟]BK~ݯ#X>[ڇDyN ִ-,3HO*e6.ǜ M_WQgAMOAKB$4W8pQՊ7h|J^=Q,jQAA{[Ti%ǝ;ꐒ׷'(e=?!Ksj5YBjZy:t(:R#DEk^ |7S$KmDy|l{})>dfJ)ף㏍} o/豳a-dsI7ٵrj7ZyOO$"&7̎7hл *H&_d0S2ڻtA i~gGf[QY0]!`( B2X" C`@n{L2#9vu(˨e~HzbThk3/z[;6߁qgZ{YOFW6P$̅hwGxFD&Ba3Mfmn匟"=1IڻQ:_}*.F)BXdW og 1'H,Hi @tOl\pzAa$L0wET2.u^Ty $[9恆."vH;&#`tX`~ʊ\1c8.s 8iv avzZ GZrZC|7w/w 6w pc/pFMy&qv5P!J>9B<{bq/j\J1gR#T|lR ދPQT DYs>`S wnsʾSO&/ܯ2,hhRo_B1.j XlQZʉWe`Ap-1R, 3D` jHNp W!ХX8]ũ2AV?- J'-xqߊKd XWdcc?M { B&sEPgm>*8{9Ǧ? '?=3&iqmʧӬx /OoRbU,_DY05:IT,p{BjPcL}NֽE 2犩8GMO۳/.d4ewQgn]a1q;Z?aeAx2A* S1Yd7y%3`YDLQX7S |Ͱ0C2#֘Wime: \?]UpC`)h~e]@:&[YM뢳I4 RUAӖtY-&{;Us>픈UQfFB +}l{eRu;:Կ~`C0 E逴HZpF(UJ*06J\RHHCޅRMhhRSmtԓm[5C<ֶ5\-IQ@D*' t !sSр]!y^Phgƌ-| 0-p:6M[F X!2Vy-G߶zYW,`RN떍UČ,YE/W I^Qùwc@AjARgHjsW_}WeV2ЁV=o Mnx1bCM |*y5G9vj~@D1eNPTEQ$)kǍMtD5F\8QQFEM;ޓzAtRM홴|b(lb_=+?NJoD!u2o?f;iu1SDg^ԟjRn|tW۱ >;Uӂ*ފx[v0AKݡZ墨i͋ ]ts_/ڔ #( JoG) ~@q|njM'MBbO[IZwr. u&#~{13sAsf)#g}mD{ItY7ӟ hlWbhy$tŐ78ONqo#VόY|~gOiki,I o[MGmgZT,l BXĬ:];xs.. [Ŵ1/za2"NO5Q 7z{訾;Xeh|a~&`0󬷐:{j}~߇65h ]*xjz XThY9;MBd$1Ux\:,"ݡ oa[M,gM~$C=2F{QzpBb4wY8Wܞj$d__m3{&(_&,}hH/&d~5uՆ/ə)í1HR[{vNI"#3ګB}DT4(AF7IܨvSCr oq\ Fq^ÈKtGt}F,_o5mDnȳh6XQsa˷rco9CUڣF҆2*+żt!݋r6Wic]6'+H?*q\h6P[QX?7$^`=?5c^ղLV2!S Pz^.Y([r,k6XXOG^U ?wQgeX[9776=z#YPWx>d'O SȮӇ#487*s {+{ςI)8[-"㟅 1x JQֶK[}[cV,ò=E+3)plX܌WŹ 8|:?xmT8i `졮teǿsHQ^T,`\MLAvڹr/8s*^yvE~F%?TBsm]b8`x|̨q?cOt΅Yh'zsK,13'I±\D^Oypp#6%P6 W=iT#84ZJ9D+˹+/!E ѻ9U ,{[pwhaHPcy?8$gUAߡjg\mfumR >ڐm'&r5,7Bwg_IA=s%ʷs=^e3^&:*KN?W*6\V+Xԏ` 3 Hk ]NPRD r[ WDzdd` Ivqn;Crz==5 !lpa" ?Iv0&oeWngU6sRcuCٴ.vc^MH d\hfunq ڪg^Z_#en_V 3Ջy6笔i#>})?Y 83jѪ?9 Y:I@;O)prRŠ}z;9NytL~S:k݊Шa٢ ܤ.Ľl;*LXI*k6EŸêRE+2o_Ž\TK0CUV\ѲvÍNAV`&0,r;264t.W2h `a Ό+8>{rRVY\}.hK bmnqV (,sf~)ǞbqC)i5 .,l0Pa^h;{tjlD$8<&6zX!ǩNʎdkWI)2f2@UqVxfߎ썸vkW&2mGC>mF0pO+w [} &/ƍ۷e*Z3h,=xG_F[O28S Jg\J@ɭw2-+:e_Ϻ#fpF87s#;b0V|Z+4Js}8˹'&}**DunJGn\3}yP$eiy< a]X垰.b۸ IqgЛ' R(4T$T`#Ī}IךؒfPC%qPȠ\-F8c  {S@Ls&f@T#yg#L7ΑHX_G-Qډ>v \<C50-͵I?BU-o yI\sƒX p_v eđ`Љ~6=ɍ.A͒[d3Ljh>̲k>Z)7{ezHh`|92 jEK~l1T) Tp!k̘{fD.=Q.Z65CZ+w n6ˎ^-fCXtł.d.#&ۺE@rkΟғ(H%?־"@w [kKɋpl[=֘LXs҄Ƙon>tsh48Fp*!01u Cn%H$g=ut٠⡩+w9i4Zi4u P,ߌ"k{-]XZF1?=^\4 ybVIwШaIAm/9|U] o 7U_8>K As}3Ŕ杏,S8^e 's/? ni[ELxU43U}5f&[g(|>EG+i[EӬz{1^DAQDlQan&U 4=!,X?)43^*ǁ3S$*yˤۢxBl 7 yDbвcӭ|*̡rs!p[kB9UX1Ȼ9t}=qv,7([ cu%V]mbI/bgL%nڋ]ELzdOXR{ѠPCJ`ON^՟M] chl8B2 "-HR8 \d +?Zof| gS`.O?3ǁ=t$fcI1m6 ! k{d\j{(eƶNcTX&gր2n;HPCG>sP2 O.knszP 8X[2`Ϙ_%G[bE9hBffΈ3$ ֹ ]C5[g U\*ITfeƐC8sQ,u{W7*ݳbs&tUHzgw>JPJ)2B=슚oqPEV\S +F;ۭ/@ԂNV=lTL`-1$aMT'2!4 7~Ƚ}=`],6ԲwXF!dQmq'P$Ӥ=!2K)<`XAu(يە0[YHI<{Q)ĊjKRމ`bv̷وNO+-.oJ/9Z5oxWUZ3oA)8+00Ϝm;Gcx:݁!o>*=rQ$ h,*Xȏ@"ѓojHdA*1Wp$T)}")y oC8²$5fȆ+6 (1:o tR1LĐhd<1Yg G4f, \_Qpy)q ,5G~_}9H_lv]aM/uiP|s^'zn<`}P.be]gu&+T!;5v H<:<+KJ{@ rNߣXǧ]h:ngnfǾ2֝)qE[_/e# G[`Eʻ nl}z-9EwixhؕxQ~p[W$0߈ִ*Da^vsy:N!!p,ޏ 4Ջw[$(Ab&J\4/=Ʀwޕ3@W ˴9X Y|ϴar s-,,Uk;lFH1б}5eeF Agr3Rs+w ;,c26чOs ߘnj` vh^'W3M3ۭ#Il>\2N6:4ڡ 85 s32ׂ5 3c~? V7,8vSKIVN g+C.!mSǫ '=m(}.g첽jT =>SYFO6}<']{XZ1;F=1[tI=EqdE.#E%E UZq'}sS`^I8%,,VDu$} (נzwփU_\x*3t|3IM0]?{^X'JPmՏK`Jumk3-q2¸gY‟xcBЖ=a><߉p|7y S}gQ8d'[gwv#Х[&>lqU}S@%cp 3ˍEcT,@%@C澎 0Qe7GiĐ cRl_ Ȏ̵zFؠD!mƒ(boG9YȘnS>5 Ч{n*Bo8fDp4Pu.[txMׇ,Kw- '%o@;]H$ؾag]ΊW#{5B7R*ھ;QlcHo$SV)DFHEX!i2\#ajSgfݳN'l5QY, +6=XOS(4 PQÖfqj-x(xV5[n˘q ˑN/#0WsZUPՓ,_VL|}c>47Q_5 Yz~Iř?+C'N0,:~O;)FoZY/L:#3zeNhqб*p[~qqP/29iBPsRPHʸ .vTxh#-֗BJ~~:^%Stx*Bgh$ɼ8 \WZw:O"UN==]o<e@}ä:m̻μ{ ߖ,V O\a .ˎ**&SrIIC@9 ѽG,۽R77)߲62`a+5yKejߺ'h{}a[5@ړza*cI_8s|m]b-Ez"3Uta'U:UHU]00~uHP2I_ijuh ?+ ~3 K 6=&*̡̕/&Rrc튍o[Ssaפ_ZSaVOpYQ;sz.$"ctk;ӽ2.8}q[%!5S=xa̝}8nWdE1[{jz8%3'7(?-|PP8Qoַkѱ3Monwm7H&TήmG4DrW?b7;DePG/Dվ#K`<}SaVQ͐챰6wkf#mDOBѶvaȡ#<H x<Ț8i~6jVkD<8Y(8C5:TPĎ¹{º *jƪmri \c!SUA;_3.Y遒x4&Xxӗu&abvSՆ-Cy$v3cIa̮بmbz2x*"ª^4`5" Vj7. . Jd#dG|<,|!Ac{= }qs_{iXzZ."_w]<b]iMt{_!f#{a3}H޳þqm\SU HvT*!._"S› լ= /?{z8Kʄ "HT~3u}7 a# va VFb֝f҈1]0*+#d`V$b b ǰ#bZZ4j@<=Y.ݶ^]gdOT8x8{J6] ?iۥd0yG "] d|^so{y. ?Hoo[qkJ{qhdۇhf(N4:X>Zc 6/\)KYjy0Z)uHF LQB,-֝k잙`2xhd*CyÖd$J2x`2^RQK=F_MX`7Yoມ}*s"dBKZ11Xih"6t;qx߆WS"r );ݳA5G^ i7ݛGW;"]:rN0}0Z3}_[@h },^MV[ZK:yP0v1tWͧU zE> l㻛g Lj`pK١7@joNPbJ*QP\@9I"ningKWt,^l T] RRnm4Mh,AC`DϺDxO4IvoSQc]xz F} C#r ,)D]O~ILl6w ѕ;J(, [-UMqf U/(AɦG@k~ZBL=O뻎e'yqYh&c kP A~~0sB҆;i{-|yQCRFe{IG!%-_XN%r먄D@dx58Y-0ֻMw܉`@>"JZ^ڀ+)ʫs#*[6] ~y%nd@y V0dİ ~LGK5$qS_"bv״,s7k*-6cM9i(sR/Ƌ /Xp$t s:~bw*Yn:O- *8 %O]",50 #i'5)&HgE;?iPğbiewdDm@:bvִrXA&ȆLeѫzX8\dq0,ihbbx#4s81YzCԤCMdXblFn\tB%}jy 6ʤ.+ R"օr,5),)͠"ev jhpflRFox_2+115 kMl"Nj4Fbo^dd$>dweiVvy}Ԑl\œK*7`B'$O%qQ,B$+Pp`D/8YvK`PZ4EfاXQFԺQ!1S袤Wa(^C ڞov3L&wS fR) a6he"e[_BG2a F: OM 4ogn$࿼׉\A$_-_xeT UgŸߛf yΎњ_C#l48VDbeV-k/엱ܢ ¶񲤤UA%弢g[]؛3rQC{tI"?f v;QSFFe׎!;N Fr; fx]j B9X" &A%]`;wY.dž.![ Ҹիa >l/>J#Q0j*Z#՛5k8+#:] g3PK JaA%;TKfk7/%o'u둘DzkhRϘ#R>GICI#O4~yDIJ"SSp)Dƾlmɰ arBvXWSI8Z'fjhVIN5u?A@ ^uKixaRUrݿt:)5dLB7ќIPtm G#u\Ƃy`Hr\ L᛬8@!{XW\#"یIh1局 5TI{/sI,AfDz!Hv0a bXg cN@$RMwxX7P=ZH({Z!9W`/"G :֚GJ[Q b&-v!ڍN`UT?RMWe;yO9 YLM+d8J@/ dK=Xa2S(q3Ï)\h5Ǯq[+ug99vGRx*"8LE$g]PG3#OJa@x^&y.eTyˆHCg ޳)v&PCrv0%BF'ńMRNb E䑄ad2(ƜIRVS0*WtƜGf?p "EKwxPH0sb*sV'P/)'f][K Ҕ#~}Z"P,H/Ĺ7L\r%,m+,͵V Yヒ7,ݺqk"d;%if 5PI@ײ{І eYp@GdncF_;1Jo_.km2<è(?so~>YI9{ظ%?qa%9z6XMNi̴Vuʔ֑Hz<=Ǵr\GRdOQY/FwPжif;UqHmk>j7{Z 9k-IJt}n*X/K.W6ܛGF# l"eE/͔ˉ(km/Po ǃj@p3C]4νš jQ xEaOȎc::~3:lh.Hѫ.WZb0 ZsjWi)ʐ5kPGEڪԃ~z*|4cA(Qeqs=wwF h_8 'Ur@E:ޥ1y2ڶim ރBAC]-|& 'J2] s1v*1˳o T4mVQGrzҶ9$fYY9KY9kUjU"6~Zp(3%0o cg /L)FKRLq]=h˞[)һ[s6Ϙlc?Qn_mU&3T.%7pq<>p*rQL=lu%([>*ݰ,ʜ9!|jk B(lE6EaF5,Ž^`A;w0q9/JB |dZ\ ]q2[3fWbӎ*h&C7UXq{ Ȝy4٢;HmnĖlK?&r>-%2e'a)]L# IY:sY+Yp-H@ІH 0D`3e/;o;#QuR@5xpJ}&ql%VFϴJVx̳jڵ{UGfzjHƅ4B\rEku8ܡc Art)9%u5{c]C;R/s o_(Exk"ZBE|*uᬑ .ebx.a֧n3z:&#^I|rxK GHrX:5>#qja$q;ͬ] Ti1 33 na/u>2|E;U5b< \\%bxA>+2dUΞ>7/bE%綎N;` 1'm&R/'<)uQ|hC6BO*r8<+ޖبǷdkv4BgSV,sr@^Q'=Y8ue8v{bJPAJO)d54SP[E ,i8sq9G]4?1xArv&@hѽ@#7Lu#ɠt@.$#]=']d`0"}L}\f7C}-ٹ޲CNheM[VQH~NLM!+vk8Z,Md: Y{B\v+@ 3nR)j>/zY pcshD?BhB&Eɧ& ɂeS=܋Z~SU9ضN8(THv銿vk_B[U&ujM3Ս`Cl~gH_苗g̑êqzp:eDc䮱IIl"Jה z`"*aÒ+Ҫ=}j:Ðx]oXMAjĕb^U3+{Ũ V V`7"{li/'vcGvdWo}+wPnX7|ny|ءʧB>D L8\|e Qō+s}JK֧2zFųĐyz] yBjl. c{?Yb2.@GX2)vċW&l"AiIp\FɫfhBKsj2B~r~~8[>?1{ @\] ۡ٦LvWqaۘ)X,$('| >B&=M3l͒Hs6zawt_fNV>_\&nuS~3%~~=Q )Y rU4$2}<&$_ht qݧvf̚*3JWs>ڶ 9165lBjf$Z#N-䓅fo%|&_}9VsJ@ll#T\`3q*(Ce9xk=. &*Y INțt*f={'mBs{o0;@;*z ZR C$B9^70tV mYMuۢ|W}HmZSXP PU7!h1أqbKy[Ugvé1/RS4Fr-H>SEtuh&_!L8Ĥv_Qocj޽$%h$!`tVk]M._78mqTD?d;RIvp4̈́s>c9[L?IJI5fH搝kIy5Nw밲HhN'LC$_$ w|?ö B[nXPNIw VutM.Rksvk~!)$k (ns Vd Du*waF#xL~b^słD5Xb6j1`N7&E391[k*\ObXVc .Is֦VctpD&WiQc\]Kei8Ɇ^{ׄ{nٮuSc0_̛CdjmAcQ/˚epm"s?Jwn<>>H*^HrZ.)$. <{xz1Gv hHL. T㾷؂5Ed՟xے|TD؄|j6/(k8VcJ]j7\ cǤtd{Ez_LՎ.W%(%qBЌ(XťkIa@=Pӫ% YeY:}Z;6tHMd lYÀYqDY`Gwz+iJ9io|x6Re'4=Vk>xN,UaE>H$=Ha\r{̶UؤZ>gç# 9IoA"5. 7b2Oc'v|[-k }06_oH10SR;YD'ZXx 9 P&T۠y2vI$ͥfi'@WtP a| .53t^ŁIww҆$l-|Z]> idX= RR􋋱 7!JG7AC_h+$/@ԃVjŷ BP6V*P,.fe~OPFkX~K1zm ii!ܖ% l lW@d&9Ao<֠B˿@;Eaqf&8$ĈtHn p 6Lnv-' kve9H/PyPzOi{DX5DiOC6.; An{\2A ,1ULܥ{4%K>;,s6~Eyu5@Q8YUYvor!]^ïI)j=5LlqwF3݊_I#/oySӅL@p`%1f MM, ".F)'8e6V*2jTU'fd\ b)*<BMA$r繨\E 0$ Xd_)wCQqz,2Bi;Jm/Fk\wl;]j(窮D+Ihc+.YPKD,VHp4c`2K_[v8,uy0WLip qǿBF*?WP-ͪEtC?Z B>dlwTrO%qɅ䌑uw^쬶2+&(=}FG3ϓK9MBv i␇zwUz}׫pLӮEE0 & :6(@FG8k'KVĒ-=B#cWʨ1){ d$<>o|*۩d՘}㳿U?,^F)8='J:eE?p}kg;+t8NEYnPx1$kTڨ'f~EG<,5E룱2s  K) e Y&kgAxd;۬i._4Bss-da6Z8u4꼅#V]wiYŏh˝OȿcGծ,>[:niG\ݛb I@B,vu!4${VI+'*=.?V|³ZB$;VwYU vS)y)N+EFrІˏ\p)Ji 1ޖֿ_SǗ؄B{lv'+fΞ lB^ eVty?87kٽ@?'qrPlC!f_kܷx,+dGerBl 5L7FmKl_&J˳@'QHc_znl JiI4lO$7(ﺥ]ND~fv=aEէӑlwgrۜ\5;|lf| EuWGAHm͏h:af$'aڗ)nE+}w)>@SAs<`x3Ȕ,!׉/<wm" w2"ٝm.1;?4ZGqGg)8SnMcNkeo 䆡P:Z! _*OҀ&$J5ҁHri@3CsE4d+33L-e1XPK2RڻOOeڡ/㻀Ii/%Ⱦ<̷S`x*>Uˆ:@ZɁ+ Hc`K[CFH?'c9TKOoɏ[$?|yF?@U-VoQFB\DjpJd019*HF$*$.3A7BT_80g@4ˉB[@%u =鱜KZHqY/M Zrt!oP35"?f8V޾-BA;zݩ&06}]P 2dZ*Q<0pVXKeԏpapδCd,*R-38S 4iǨ*x #y?x}azr˸jVqx?o~.(|.[.#*ܘ(Ȓ =)"[K/}Q3+sQnZA X* 9rPҚsW+ )k*8U>V]?w)2O=Sy7-*n?ځOQ3j ` %ح5zV>?l]~w*y?agW0j۳3-<~sӸ: ԧLF8:>Sp˻+xolXG5o8P%)kˬ3=uvlr4:{!Կ)CYf ?f&䩮28#KR]M:T{5gn>kF'U@[iH_Y9 :&2'R8`1 w;' O g4 ϕ>NȨeyoE2H E"ug߭7}ʛky8%RU`|#?YQ *)̪A2EM8y *|s2; wb{*H9??t\riqf %Kыˣt 5͢(*hFw$ʵ%RtӳIGrmHڛ: &V/`A̍?Nk>{AHHu>`io [' Ե8uekQ22A/:dׂ@GP,h?ɘp,ZwIyvSso#ɼɨj!GBrzm,4)o%@LL*mVCE`l~ܱ>BRh>x?3;ytt{".] :}|\aQ iqLNHyr\Y  >V*EAhm) SvѺy )N1G|'*s$y7!<zrݵ 6 #p\-KqU%{%%VUXL>2ڊAp wNk pkp mcET,*pyg $7DoբD&K u(ѷIp 0p2v<U65 c|GZ5?NH`{ <ͷk*ҔX<BY$epk;'ϓ6(Y0h&qKņ64v:j;;_/*FcW2$jQ 67\4Y5/U8RI+.!9\PH^h%/)kL+ AbYnP]hk­Buymʁ @*$s, }"XC ^` ғSvỖl*b--|8LF[DiB9("E[F>2xC } J+V^1z+}ypOj+ 5etDϥ:7zo>7%<{Id#Q8[gѿʓˆ*2 SH9Ue8ߢ`=AH*KݿKV/ͣj ޟo5 *Tr %2}0A@76Z%~cp_zUW v?ʣJ9=P ۭjIwۜ$Atԭg:ڦrG]J hnrN+_tVz51~^BA{xtHR0K 3.IiֱB$r a*55G lf  ˺w/Ғb۔>rk.(4 it[=e_lkC B+"N"c[jLF׷#d^nV+3UbzK]_jxO:/_V}Gu&i\=SnO 4# l5oY$qgb"Q۞iugٍx d8ã7!y;}F#XÒl{+[lJ6B;7: =>[>Zif+_6t/a5 ځ~D.yӵ~ G9@[0^slX"}e< pg&@m̠F)!45Mb}gbQ 8L.BKt K+O*! O@kȿӭjEZ4:5>IAFaVq>uN $f)mv=)_\n'ϥ W^cK۴KmƧƕv'1 N{L ZwLR9 m/̷b3앚_Zcytӹ T -=9ۑŚ18Cp{>YmP_w>u%ĆԫE-&[405(i)zH-3DZ{~J_gA%IÙ\h@9jAZXH0 jkg)Nu%a%bO`2UpMkI [&[U 3%+gw{Y䃇(gMEm?\vHu'T{ggNv~E? 5`"ŠVkW%3xzJ3X$:T_ 锌$88B ! Z;.f}!TF)(v;by,-^"(ħGBOuQhv8XuO/mqdɂ^¤by}\Ygy Z:lf"@coq! O|- M-hC(7>LaeeAXSL 俍*M=<\$оKJ|1z8+B^,"GV֭9c&2}t_WJ`A+?뒁?lp쉬ߤ /^E?`p!SidZrGk ۹%I~ԬSņ<2}HCn3/H3Y)l戀6=jS/ CXJn|P"(z] !}',.DឃƯ ?[)lD^lă gzx)-S>*A'tYl>2"*S •,B4rɣҬԔf3_T3E¼Jaei q߶VMቓ̰G)7_\}!XEuƉ_; ӪU$'2VM(<"Zǜ~J=wE*N fN9;JVOHT:@z:6M.SjANUZd<ݮJc* zgZI:ů sϊ4>gx躅]?$)=-(ܬ߄~L GY==~WN0q -iUqL8{X-fW58})=c $д]D+z.8 WX9wGu;uÍ'T4[?oQl` u̼_󿰀Vj~p| \elaƕAS&eTɄvWˆ2K1/,G85 ٔJ8(Υ NW!ѩ/nŝ;G?BB[, ~{A3OR-V8F6 šxjQP찃Ч9Ř>5>- qһ+AO{(U !Kcu5RY@/KrRTF`˜s&]}f;l/p-..!#`nU=2cԲq}@vdG{L4iHѡK)HBBa U0beUЧ5u0x:Ye cev74˨+UW!>u~xX(MI)~Oc,3soVȔ&nfF\ny fNgs:(k; jWZNa: &_.A:6U>ZҶ5m$`dΞI!g#^cc/_ [d)f{Lj]](X,l+kow"]##?L7BP0æF%'yλO<-7srF0i.ΡlԯEa p@1pz1#zG(ڬZ4VΣ8a!Fw}KYy^z6Y< ־MOybREY_8S/rv=TxrQgL G`\4dc JbQt\I*Ru8dv(Mj 30 6-s-o5W31kRe^ &`Q e|z( 8쥎h$􇚼*l-#@7,j&VH :I\ss~>GFjT}BpPq19ZUTLvt}biz{E|w'OgR(߂^C]03)Cٷbِ[Bj` t7$ԥ*l@YVј2g.Y/U qyW)|3X.20:؜ϲTj>Vď^/=zc#xlqt6MWM/-@R$.&ݖbbnN|GmsxM#r! ,m3"k׺0 &QsE:E?'?}XBSf/sM<,x[3 P'Nb8}>\X.+)_ab>j%fGp-oYadxDjBg'j4Lstx֛;aL:F Uzr3/$;Y$o4HRx]3= ݁0$Kwg3Nfؿj9MbmU܌(nY߽cT$ y-KH>A>(.q [ miyx?ذ,Pc98J mk< ^UuT`*r.[-±W8vH\y.Ɂ)nRARGRCzh¸+sZI^y*I֋I `V Y<Aϕ z~/Sxȇ_HrhZߚRnEJ&' .*v3; j;;0P tޟ"Y叧<hOya{^Qj>ߤ(FF=;Z 7wRv1 x{Fe.&~ajI+#wW6Cz˽$F/%s97C`͟K[t'eXJ}>nu9"Eb4.'^Ѐ]Rʬ/xl/‘JBU&>= 75 DdZp*=hgR8"YIxu; hZLg$,>11`.]?GMZW1wy6Q臰w #q*=,I^3Nx} 3iO P՝Ouu(,=yAHe4-w!낄ۿO7A=޵̤rq؛ЪՕM@u('ke8ό_JJځ:ve:g%ődVGa7_t^ȧTASg㝪QN }6qUgVvwU춼3)ߴ%K9|Fña#;+w5 8aKM O~\ު~Fq /$t,?p E Sa`&19X /٪t~>/,^@V~fQj7%pc-)/v/ naυ䫶MYJzO9-={&fMvRZP6־,mw^:I/ n1~sePטn,7j]xK\LGxrz;-I61ⴲ,pplD\d,?Vc\ׯg~e<t'ߵZDd;W@_泝p& n򬶰!h?řlN$9jA3Q JA8̕x\oocHypM&rhMx\v<`Ry^r _^V1W *=ɲ6[3Lp$Sn_s3G+hDXW *KUv@z:*dwX?d]WɊFͷ2:IW $J8#kGQu7N 4;mJ n:oX?>4/>2sv'UUN>dGDiW+{Xפ: bq9pO{MJv&EWC*錤J!,I:$@p0Ϥ9 ]}^ǐy9LH(MZmS՗ˊe%Y?vc) ":a=%/favv>7"=<` CBmyFe,dL6*207h} f\k_H=wʞ (YeD*ACLKnλմ/ J)-Z!:;zoxbqWg(ăFW%,/k,bщLvh{udL]Mh D-6RR5OTx)"*A$Hm˼ڑJ:0ךv}Abqp pIUrZcE 2k$ ǎ̘lfX5)L 0FCL 51^LV%+?:t*\Vu5q[A"Zxpzpe=]6qgy_\ *ϒ=g9^Nzz"; (١jKm'(pAܱSOd?/ǚokjwD?@B5ɯSS9=kxy@EIXPL4W5[|zxZߓp}'5,*Қ uG ݏ k킙fC~MޕlgXNJB`m WE/BC@PC|P jO!JV>I u.>#misYTͼQsEzb%ȽXm?nMo]NCe10W^d"}s۽MEb8ytEzhI=}dtP1 *$+dP陪 n8"#0&8 :Օ1fE;TuBɾз/etsۘ 83Zz FyZ=lbXs4؎m^Zʕ+Tw]"cX)m4-#3Q Y1Ŷ, #0tp.hIϋ@Ex&wM~/63%_}\07'*]]Rf!.uffΝKc&q{KoF| Y9R(Z _~c$^RrI[B=\œf g3hwaSc露mD <@C,L}N5V;:Md{>Eqgz&-ֺ#n-#_B>t*;lS< Z/PHk#,*(oJ',% "Y!TnAL{dѮo/ҋ""rŝFM@dGbeZ4Ǡns8!3r#67VnHh= 9*!!U~~qݳ&p,ZX)UKD3Dn">yv4JLBiqj` Pݲ`F<= B.ՒҤBœ*,).3fQI=53s\#if_dvїVQbN5iVtBF $2* b}~ e9ER gvR[A&;+ fҧTLxۣƏV~+8Dz ANhU+s]W[$tXlҏ:, xwfSX@<w%&T)n3K/buޣ (iTS|3`L ;Y+u*}ҮЯv${Eq;SQYÇPQw^2yP{pb`#"In 8}WKdgP#QscHP1Z ctR5NY(ąؿb+qlI&L mNrF֫@πvJ)-7r3. 70l uLCN%E$`ω?6n.V͊1 8bj{i*5W9i*k1O=Դ~LroW\C 3 A X)϶WܚZ ≳V)J6 xaSvOzHiK;jt^w]HTs3-:QV-)yt0$;^ q{HKE+`;춸b2&Li& %|BIyo%ALNJELH3 h7ĺ\Pn$GqaqmN͖-Fh}K>&.?O'yFC3{p<GX?Ru%L>`"0}6"QWkx\(t"ӭ麔to_^]>s!d;ݢ&+ɪBjijdeD-$h HLEAd&0-FIT!)+R"vĿ@)F:N; $ .o,b}!2zwm3$B2|R PkǕ D-𧌚O&ba.ՓwMoA52*-6!m;la= \^P ƻ^\?"P 턤Ҏen17Rn,WdwyhI rH(D#{;H|z]j kE'{*uya ,8q,;  tvf($nVx=}"˒d_V,s{"T^.ёETx~9ez}k55!OldЈ47r~>ΊJ`nmV_stXGz*i4#v ᗶ;cBRJϓBFL7ZHlYKfemT,_~#4}@Ww E#(BT~I exP|,a3G'ks't;hU(raZ nBGQǑ&@HzΜ~вH;c,pM=FllC_zۢ:߾rjzhsq K~ɂ؃H)ojk fH1TbYhmF,Ge2Àa .;I RfS\n{0j:K])Z*녜XN]nM,/cO鶆;/r†6ܞo?) 'sCFF8"nb>@f,7L[|T/˩+ 'j (8t 4rD^<ݩ $sl]]7-zρ,d yw&6$A3䛫C|(ty ުLu_q>B/ulk[Zŀw|\(Z -{[&5 t )|_XBq[r+|Pl#"*kw|cܣ7P|yz}N靽}!J2n#[̮U!"צI:͗Nu9|VK3mxP75+tМ}H+7 iVG"({ؐM -eH/nI*5M]MiQ^ 14'eU`Kn$oň\v #ZhFZFӈGŭ?N+NtZ=l^×8/%l(; ?NpH+S7(*NbΥjMK;[N!l9̃^ 5[AA.F+PS=p]=)Wlɰ')]Z-VhRY7[֯pc:? 'U@'g,0HkzRpTRXVɄNc.:)Z~T^tk@`gn׀RiW근F_ž,{RϪmknW#;o s^,0gcQAS<ƙ^a@:Q&[+>#(bz,g{W\ģQi`)G:^^Tj$;U>ay_YDY-g74#"q$=?2ҿzV5,f@u[`flF͍RE1+^׷V8emj;.SmrosSk8>P1SoVqjWz.0I4DI"5<~'4)h|:|q2IzM@ѝ$ %s(a(5{wp$y\ȹ)iBIHA2(,w!Z.+v**NPz"E>`-ĸXst ]XMLlNC}T6IL{HGS3  tO~Z-RsOlk3g.)&ժ| ΔNV;B>*nJnxS̪v m?C*q 06f^]H+iS'I yk԰= @nU (#D7p]+)f .cDebTCnFh/ÂPܤ;e'@w4O oD蛗7w׊%Hta2z< 0To2dҁ&P3"k8CxbpNDMOz[ GbHR] gY_6pt4Po.] y1h@B\]L#֒}4R u72#&*'Yyr/ @wg<]RCM_:$IxԌUY(kG:٥Dis(e2 F=:!} !Jw]GSVt28*~:d}<ޛc1_z$OѓDmIX+|?!CzG/f;O/y7^+k;u5*ؽZsϭ Yycđ#&=_s(i+6[М(erZ~l|(n m/ *V#]5 b#4,m>9C!|;r-(Vpe%%(Z!|nE}RYIaܙ8t+ J%ީ _7 `?ˑʫ;m`$~R[#PȽz8MT@K}{tc0$4L)?RLGhe'@Z>OΌ>7fq9 ĄO.0?+7ET(kj &WOʎ}.w0ъeTvYA@?GNNU豣@uIp 'L ~AJESӵ?Q àm+Go&\\/wni*f9o/0KX+lC1(kY1סߔoOH󶧆sG~APH+XKg䋁ʂ紌rDRϧ9c{; (1?nޮKD#ߓ61r͏M.Q|w9e<#L0Fz)6.S[F"%MCc0fb>_~ݎR `q}޿宭‘1/ɝf$=l:8`bVPm u:5JQAGDD!ObZq4n7|VV byp]2rc;U@o9\pPR!ߠ_<,9)/d>qx !H>2]3?a~Mz O3UD\n M3!܀f'LEUFRX"wsRrld3\ڸ8EsQӺI=w(mR3ZzJ_-BҎ\TP3@z:{xdT:yݙ6!AHv(7C?[K?'2vX'LUYBߛVjAԑFmA+p'}PHx,l-/=́a-NfR}}aə^6qn !IY'=3oͦ?'t`0Ԣc []鸴^ڦ"+h  )΅}V{ڊ)7I|U)yIfcm&EYN癗msQI:YL䔍,% }@HӞPH<`1eTxl+$k3otiCs9/{-܊Fu[7c0+vo^S=Yk̪  (Eǖ;@6bϬtx݊f<D8 mwܡWyp:{_jIBL1ZS(K$`yלT ǜmjw x: !K7R`00Rn &f0W;ץЀW]3ưApڝ g(D-3QsX0j539K,Bd[؄'S`Ţߣ6@qJO*lb?`>aw(y)C$ϋk4,vAvяrfݫ``ґ+L6mZkZCp1/0d펍QX(*`zt;?M,ZC d?iVt m_W:_pKqJx Z6_u o$Q*]׉][ǡW7a0kD.bD37pS87w1U,L<0VRθ-X[rgzE(&AAOnkx*昔 J+ #Q+D휎7u~scXl$t%Lhp[7"38wxfz&ϑ[X%cl'| qĘOܖ'LNq9s\w:͞qo`3eP IlY1G-)S[:ʾӸ,̏?:`"N=WN>Ay:/qK4\쏩jIML^0q3!t]$}e1uc~GmLmes2|!JӞ+̀pwyQ@B^~ yNHD+OS_Eg.E!};(Bre %r0#44Ϣi 9/CNp^V40 jbY<ڔ=0փj zLQLٯ4ޑ' ]t{6weWgŋZS쐟==ݓ{\hG:U0ۯ6F  gD4@EZelKWPS&L-K+@Xg^=| $o@YV?֖`sKX7dr e?J;C1YQ<Ŕ o5'eG;O>r}jT7M3m}bC3ХrVem>DySBw&Q޷k(j"HXQOO?ȉg}pJʯInUtTbtcq\cP(r~x)-V` /ʬ&0/"Z$RlHxTx}Q\aFyYjcO^ JohlrCKGHv&Hs20 8Goi~>Uݬa DqH1?B%P2:gO2fjq'W2QLSeff]>'͓Ƀ*maM(tG*xI!OcuCjԛ'uBQćWhH(R}jK&p86,.vž.jsL].jidI6L3HZNķy%,qHy8(s!y*WEd#% ._8q\'*BFS %ߔ8QѵYɣUoF{rM$ 7@^I[y5q8 ôSab~ R[ʾ17yBEZp{uJ$U"`:Mվ* mfעUFP 3ٓ‹Gm#2x0z _5+%P"R B5+yznO-9E,LPZ0|ٌfPj<{{2$A,43'?*<y׹/3s鵮Ԇv<aGSq/mqAL&tZkF%Q;e k#^G,'`4 ??)Dzٓbz" Cҏ߽n(t9A~'VKUc-HEV)OSOCdlu]xQs{j.aͬn&ރx%m3T4i4]1 H æ7=bqǫ·*0j 6BLiḑҬeTo0 lO{!<.i+5 O& `Ӎam6-ڒ@ŴIQ,B̈́lr ȢLsTЩk~'6sS;E,Jޓge!}yuŨǮwp l Fn<ԩ[ jɜ~h|˒S7/Q7UUkp6yf zZUeM5ƗE7|Yr:Pds18̄9I*#:o 7{Q h؛z{'gP^Ax~:7vxK~&<`e- س-p&N=U "x.\g2ii)IG[jIN(XcsC'YNR}h-Ex$춽'߇BRP,٪yyե:nd GPē6Jq#/ER xfpհ"qUDfm8FXuj PlRqU߰ 5܉pل!oWR 4W%T>楮s n >\U-c>c4Y0WNx"ʲg]0Ù$Za8˖%rPJZNbctSL&%9V Yy*]NP[Z~.Еۻ49E2Fv+hK7Ӳ>U1ѮuZfo"!`1xGaAеwls-4aM_GGjs}W][y:1fcɐD0լT "KF)IhSӞ#j5r2OM0L0% T#i JC1ܨQl9\^`Y9VUSA}*oN3SVڂe׾AT_·gpfկ^߀(>ȁ8XZQ$-+x~Il}Sk ax S {ڷK3ΖG%vYT-cYfK&M'"8 ڢ1UF됢kC 2r3.y JTX{<NQ4-M{j~w̹J اkK` =-R; +FEzg8fԗa0 1\G>nGCs[bysދ'9D[5)mac1Tu@{ /i#>ܞOO̓!xcUQ+=#>FZuݸI9<7a9_D\/j׶J5-9 3Rd礪t;m2{ Cqt/q.`3˺hKek5_~ͭ|/gtuw"vyC`,ƹGoEuIN@C9z0?5nB (,[nmNC<P RWwڮB+ViqJS`Fi[ʍC:>"Mx+WzX#kUh5Dq>B&4GejI\|\|~^hrRm3s9ؗGLwA_b?& 3B]`T63吖4K*ԭNi dyWN~%P%um\ MOVY0X255'? xݔ=MݞC$@r님"w=b.D+/jϬoH##C%f#aޥXo9K̻td6,O?G8VW˿A>aWk)*8K]K ̒ツ<&Yu" pm!ueku&E:a!rFYK))1/:RK.;ƛ}f-O rCemVX_dx4Ly;C-)v#lwc\Vjԕʙf  2)-YULJT/A";%$;/غEx(5PgƆ2ۘz]WFBs}Bx|T0ϼUP}]~EElHE~a49.G+1xQWvk^״A~i"&TŒ1\;d̥ʱ$]KW ((G~bOdv-on5J[,j1QWrHe]ftY%0&K[i;kr~2 OBGc T.]_h~hО"K&3\ҕJpI qЧciXRlf dȴ*+5Gڎ]:SmC2''V弔?N簕`%Gz{{}d{sTY*8^U)[>y ȝJ-ďfZkuvLG8^aWōVԞ]^2w֦B!C&7K\ua=6md]_eRI eҦ"x[XF'hlx#=Z4 -hB^ 8a,cc> 2GffR=ۥZ3Xw~ 4R#Bp"H dތOC;;˱l]NG^> o(K5v F5wi Æ(lUI?I7rHPAcrGЗ_ <ݚ81ZGY*{]nC֌⿉~v`z>XnIº ʖ]ƅsAh/t:|-E#g~ ?(Rb/`dB/Hn&P Ib~ I4mwI:b=E`P(-!,h;gaT|颷{ˆ"=|M/AlZWىW(x#g4VO__I= & cPY(TK'e~y {+*!LnGL#ܞKqGLm"84ج@$a:W3Ʌ0џ ^70esϾ+mZ@hjvꩨ\Ȃn2_/["oKْZ$t$/[c | 2XpkWLN2WQ7]:jS̑L7p 忁Uo l!g,<1KН:IOp@ܬqA§qtQ"H饓50۹[**'nӱjڥz3b*UW/0aw18[?׮V0 S;|QD.6a?3 iQm>>\ D.I~YJgÎX[]Õ@9T5`2?A6ҚZ?"e=؃\/HFt{YtY*hz@?RJwۍb9߃=_`Ś,`whvь' 76hl{fu^GqM:@tl>1ѕA恭={`dPđ~e7GSȾІOs@+; L,{%Oon~ &$[*2+n6I +«jKy&~ɠdĮ#YpobQۤY q=Y:e1;ZHH-4;)p< yCm1"k9uh#bXfn~XP'rWgQ獅Z>!еWxPls)"I4z5l &/þ\ݐ-,mH:}[%kcEl9|c'dcpD, 2)(}ZϸHR#wHXDCbrOK Di^gv* -\HJjўN{ f><-kwp&Lt u<.,GAKd#=m 4 Ҡ`gK,kBƽ֛]"jXhE瞸z1`R{hC-JKС8[pӗ#Iwl;G,Mc,i)'gGVwκWq>zSEiE8&i#wH4 Ѵ,PM3-b &?:v``ǜ8Q ZUjW?r <y K;5DἱMiEE@`*d 5G!{ {*-`]p:qG]<E9E~s;$H42q6u#@ҭi<0jQuU4=ӫW?<À'd*Cƺ[0peGisB6F}4T12bze cd`3:Gn w-"NN߃_DLi=T,{]q7hX+K1 %+U=2~QLov_f 0!{Э&N .]*dM]y63NihH]W!?x(Σe+ PN)nX_x44})`#51q#bJVJ N6hb:lRk cC$ޒF SCfNgj׬ 6d}^S λwj(HKNgɎa!7>h6UoT{o+i+;y8kjyCHy׎(70q@~[H}QJ%Rdyk?=p,pƠY遶RdOւO4̶͖W9 Z 3AOxo{mIA!Z$V,),>pca5"(p/o&GMnU>Hr%KF.9یCW#(52? 3D2]ҵ!2![ᓅ00!H6<9s{_ߣ7Y?Ö9+0?Kэ5P&! ^'C$"'h-u]SO4=7p@f6R*M05[i!;vR@x0Ut QM%g ɩDs?Clz(::Њ̖:mM\Gw &XSo0|k ]AEezI%=NwS"߹02U1k,vt;m~|~! $!;O04BcP<1opw?Zy5-PA1L v5|}oj0yA4WYl|L f5U}lyZ4[ #'L'usز&}f.&7#PmP5 jGr*"}=  PXLT B4n>*яt6eXS`TQe6$ :C矵3 _%g IVNdd $F;' <ۨB ¿4^A$yDm[sVwщՌE Aʚh wc rRXkcd֔)ASM I&w }!ATchO*f P@K2\NZ(zqRYeN tI1-#Jڤ>i)窾N tS 4F¢ʼ0>'}*Jr)$wᆧ+i#> .ruvV#@2G%=[!^9(!T/RhM.`.Ptfms[T'~.}WSEK=[M+azmO02P#NYu2}ozaZN0#I v׀~C5O 6)j|9&>+˹NzT[yb]O;ըLqRB'6L3ow>-tmOV(ڲa Oޓӄo҂"0zc$-reMIhSw^2 h@p +4~MW%!+ǠlzawL?BN16MNMCg[|}JEGڤEGӔI/]:Y"z;ֈlXm LߋZB%hთw.B 򉩄ߴp2Gcd:~HC0zt/T~4lA!"HAN|1Ra<^}M*_̼nyʄ;ڋ\G]Z_= y@?? 61yقO*L8B߁z:nvmJnWcO|3hG&a>}-|FծE<}qynFzRׯަT\ͫEm+ ه_f"㰩NBh&.\z k7%6RU5(!S[f$1M+HEGмk.y _!:{@BTӰRkt(E!"r !p=/n|E&I;oQ^Īl8m02XGQLJZ?Η*j-3R!bO]wdao q6oz%,WgTIf_u w;Xk]3_OwᶖP9m\"9JB*]콏vpm %*o%p3 B"ؚ\$>wYFx3l&=FJ#LbjeTB̉-:f6%#\8%Yi* }sGo82 7 q.t. K菮 "TAomEQnd+?"m#-Cl @T7pl(a* 6kQMȌLmP>tS3=$.pu@2eMW^A)Hd#-G $44 q}\6_?9Ϭ﫲N ܝ9Fu(`0XyUgv!CK@2UGkf}O \8o2Yv*9cbMgK.af50eqi ᦇ/ܜT)Z(5t&M/d7cOar;0Ҧΐڶ \H &]04 Ô:۫(lOvsRJ L],*I4/LB㟥a $=\\kDjd%}>}(BWP[u9G~#$o@;H{-A}*ݏϣU7NAj5ZF1Չ:sdJޮOqu_rSv-YN*;ŸutG޽wSn11)[ 8f h{0z괴-*)'vpq %QPhdۋ: ?j}[Je 2dA?bnΖJ̭[Mx@uvY@q6/_ u[)c gǘX3o2@ ~~> 䎄7{C𩽩5'DHCz](_z7QtѾ<08]9oBܴj6ĔA/jF bﺒAطMIW+&HAw(7\vϝ3( =]4g~?\?y^$Nm2±MI j|=d>n2az|]T@:TdEq!)C6|c;os9.MT#k&4^%l0N0XSZag.j7s:ګg'K!ڎ>?b濥V B{'sk|[*Q^C4-}WʂgBT*77)PC QTXzsXI/Aqg'{V gw֞b="Lw3Er 2#Αa>v|:߈}Wh+O?wɻq$$'l;Y*koұ F~3\{dF{. >GNhN/zjNtBo[u<~.=uaÕ&.^D]]'ud@9qF_d] [懡'5^@Qp%WDrg21 Tvv٦1eyp7sXm֟MCFfd5zɑkc~oV l=nAǬ⅔?X#/cBmk?(Pb$s.2$%c>wy ~N~XIfhh(;63[Fn(&ӪsP@hrr D(?̢5Cդ*\^Q_DV[͕=o`Żǩ_mVRg+d*(Y.ĖNNa,XȲ=hc T-\l`I% NU@vU妩@7my ׂUw᭲|2WAMq%[=;0V5>ٲPz%Kxe\4f}&QR`y  Cn Iy5^<ƭ_[k'ib];x %CJ[)h䮩Y@XKOb!yce]chOt޴i򟃑,ĥYE:m"Y+?'OvAO}~D\kGڳLvS#&.?;\neڻK?Bvrvn(tz.&ʹ{ATP-g4ҿ/-H355^pt宒ӠQJ} B=1E[I$.%ι9lk|.D{ `z_W\-5Ŭ'k;U?ad4)}) $d?tW->[<;2i׺l8,VC~3A<暐׷vG j9Q*1*OpGj;> (W޾V DLEy"O켛,2/Ϫ 4^b3<qJ.pvA۟='_QS#H [;ˬٔز<m9[dR<HfظZ7ZU58+:{mK^[0Lpq*HTLSH#d6;⫂[M`k#4vPWG?=Q8߻]/2Xj5CW~`ۀ3nU8gIuhnR܎<쒞̙+&鋕rofjǎ'HX}?z-pn1MTy=&CT5B2:#.n[GWO.d6F <7$-~ʂӰL=1 Ot/cBǽ`q.Y6(ƯuD[{2kJ9+FSo lzSg|6o.ᘬRH+D `*|^4V v_[XoJs',ؙd~! (U]}XG z}1A+bskPۖW+IJTŭPśpWf>},ӂRv[iRWeǝO>B>YTAHC? ;g9 $3=H?W)3xf/=0N^5M~`΅<CΔ~n*2fZb]eu3b0=.U97p[mmf~^vy_K*lHVkAZw7E,.M/c꿫ADbirF?&, 0/Z#=_LgCzgN@s(MT=Ġu%P~ ۥaLEJD[Y??+Q6\Pڙ8w*T)!Y)a<$vvnAipSh`3Ga{H)crڮ?z8jjeÍaQ(pk7K"fl_X4$9]5nV`O (p$cTz2ë\F,)#,I GDx@em-~~HZ5N~(>?F΂NuAO7LIW QoM8'y & $ oPRp!3L]K<{Y(v_[y8 7n9v{D+kE3k8zMۣD3@eCmCSQv޳ٲGȭ;t+~H\4v M*kyJ,48>ߝj%61/@9CcF6iLٹtL+~'>Q+HqTK3&\,bI$E/'flK nD_V:!]>]:[IK96Ţ=;e0H;[{Fơ]:KwɠχJvg j0{sq#6#=ʖ$3\FGD'j*fK8~ JZFX#-τ ,׏ml3ir,u?cX MN_]<<X<)n6NxBc!5YGV _7|!TJkCn[= 4')Jϑ*l7!vc:p%7l>>4X!gM!W8-gL\yc`ΧsAFn<sNo nLrmP%A ZfuvV ;K諂)p|UiB5A,+ YoƲ;}̣0*Z;41=_ Ԯ66X.\ iInU*脻̋> fFMv ak@`Ҋ]-4b==Q%;UYaEo%U\AUcH4r,pY,Ne>`;_ Nh0SQF4׋>Ǧ:.1/r(n"c+4G 7PowxNdY =а҃qG1YY_wԟyI-J |)@ 2,M$X:8.8p>[1[-_z5u*~/CqdOQf{ *^, Q~fܑo92سʭTGu/Ûw, DFջ9)5ګ]R#5KRp>_iR}4dӻiY TS q@ZF% BMB斬7f#xN'On Ŧf>1 I}yCUE&OqBRI;;0w2rMd g<tI",˱W)GN}+(I&R,uJtq'{mO%bױ'g{EDlSm@I .,r(]4u>$x97(,TsmUn=i{ 근g PŰoZޅ}&-7[&NAÿbDW0w}ƷQ7#y w#a҂n.>yC6$2 YwQPY2A74E c20qauxyRWS^ޫsrdE(9FFݞ'C1HW7ɼf$PzDVT˰ ǯJĆ3L?%e0DŽ ) ·.birGIM`㠟- ׊؁~_?v؆,(k鈟,Pn C^33KFv`fflv[|Rwy=HT.L9TQ2:|e@iў^;SW 8a$Zz>tH1>9!,GsQ/$|y` N}h #ƆH5 0.]K 鶱(ႚ[6;tY: L:4Xko[WHdf%\LKiiu>ڴZ2'TFdI^pU4g!0YYY!州,L6lwk8o%JX:ƫ̺g N5W#~U'X_̩@,:Y\ #))S@PFZ"掄z'L& Vm*JHЂM8i|6KZg$ Z6~H{lC0aj{ü$u$?A=9B[~Ըaebٍ _.Iu(Nju$3jT;ZzSYHr vTޠ׃z}@d5ǒ+ۯAruK(921N@~n!qQy;t0镸pr MMTs.pCB@7/\!ONF$s ++`,솰 3a?9+3H_ZHd P:tRgBۏ *S6GZw+u {K(?^:ׅ(* KgO+'=#v\lר6r*.Y Nۓ߰-8t]m}_{-8z^L -0Hp)sAp&)aLm{T +sgA"cQ+aӐ! _̭^, 5a(r7:-5aȮ}n&DҔn0r/p,nQe83mGm -ׅ}_5~x`*iNS$zjf.{1¯6,ŗh._W<.CqM*s_.ˌ+z Lͽ0Hf⫉-ɥAML8rg`tQS;#N`{aV=S7Qqvk#&)g;6҈~qAbg C+ѻ]G\CMFfCekvbi7Wg.4)IBo;l+vQ*RG@L_qWb.t0jmhכ@嵜 ϟ1/ "5=0<߂@\SlhS,kV 3||Mt֥cM_V\NFxW QHOʜ3'p6ÙAN?0rG+"627 Zׁ:pX$mMӚѮ˳ZܒoAO$.,pمˎ0s?RElߒB7nN;Yޢu)PQB{Xh]+lpMisP⻜{\Ƌ%>S_]L;I#X}@g2H5y*G%8"ݵ:9٠" 3@Hš{3VUE,P3}]xۓPzVWތ/x(BF:6^WM&,}B)0˃u,*|Kvў&[ɱECmLg&jnpF0qV8%=j ! Q~: P5P7ߝM\q/{,S72tZ J-]l,̀: ͡ČSK* R`HC*> LbU;K'[u95ہp0H/;HijwmCNQS+?Ly06Ngy,IܺS .v4!P~2v5b{y4 2 Y,.ܥ+xzSZ5m1RXF(~<((L@L?++&ɍX"O4^К4X\I͹r5p""#2i|,a|N˴ݡMjOK`K._B~*jJ\ebijT,7ǀkW*V\gn"sƮ(5O8d/si+~24B?m9PS7=3P u}JpYCrfbQX1nU CwE$$<8au]*0ⓀEd9y{5LWgG[#T/&9]IR"m,jQ)`}&#?ҼGJLA+ī^ a@| agL>Kʌ|UBB<\=yH9*o^zP7')4eIfo);J>dt!5v^_b `l,s=~c(lq6&;ڵTΎ$xZiM VK)@uS5|eIU: 4](Ԇ1 Kc\m{^uvEH@@Ձ3VWfޞ<ׂ]p2DHTBி:}Z 5zrDLXdbTU\e @"fećVc].8s T?ŽUЬ,R8Vdz$h'maݩ/UUAgl+Tsk/"Uv E%@. mz~roGͥ >AX~JTIh'ʶ\<13Z{^9?U=$9} $9Og$ z&f/¹A#l)8/W21XEX? 'uؓb(W^So{7Lܽ!Eim庶ܪm>h-!;A ɷ=q/岔n//*Ljqj'Py%LyzqA,os"Ta%(TY-zT'C).U̚i[,U:~yzhv bȠK^1NߗAJT:Ш:b21cm( ĄF_Dқ6]NRA2&ܙ+ ~M@A9,r<;"Iг%iWɠp|fYu]}8oE^OfCE*]hp՗m r א )](e8Mo!9a!*]ai:$ƨ{ٶW6J}vބ|TTeMsji)ݯ0(OK]x9)fP`:AGXW;EyHP0 f̯29#mi#KOGu c1]1R,|WUmŸ=WŪ&@^Z+mv m(' dfA)@EKP5+jb!)hJ,I 8oUvcvd[/|yvڑ1i@880s fL7*7W#dq5 #' b0@ZkT(Ha\"/iGXOYP2X!٧4'p[?J ٍr;a?Ay$/`υƜGN wFg/z؃s=QLDvw?5}c=x%[I#" >6Avx\"=B2b(;*RUص SӨ *ָ0[:OR{&R7an+؜g5B셽cM6+i_ِaHP^jxgV4AJ/"< zQY#-DH@lġGJCRLtMgMbg>Y:Ay}7(>К0PY_w;CceLZZwثh,Ql3i|"V;41*Mј6&:qNWlŻO~H`z|"<܆~<+Pҟ Gin[w-{xq$F+=)jR?"۾ q*6-}ee ,@efݧ<ㆵ)I\-"(VQ>!< >'Dgb/?{VѧS 72bV`&46>2 ٸF ] g^fG[hV\Qis9)m*;4W7t5 g?@ODB`W{i u1ףoxR-% fӢaJM aN"NCp/uķ":=`!ƅS WIM{r)s(BE9#܃I_}-EQ6eM6D%PC]d g3><gddrbP7tl#S(*H^@B-޿]_3n]#uGRx?qrBk|{rkL/X|HSF$:4e1®I*(&  $yᮄ7#iV7HfMfkρT~C~T(F֜pؽ ϝ?k鿇щ\:9ވZ$Jq>K"@n$<ƵCJ*w EI ?CX3f|^x_G& bgk(chUGmSGRHJ.F{a뻎![St]ADm+ds2p?U&~Co(f<ԌꓔMon$<*)͉ש9t\"c*ߗEEtJ| گV 'h@[s+mGFOB ?;v%m|g`kL]N/b0@}1e+awA qʡкX0~*z/T~Bb Lp,;kZ"Ŷ7JXwoz;[:uo!HY։/+kCLL JXs$J@Bi!&J =8h@\3lqÆBN~YE`xiT $)j.uYFc/A4&W\֕vk9ZeM\Zn欬%q[J[7i ̱DH/ns\YT me~Zck4|J4Wdn+]m qKZY,!tc6xSHD9 \iɅ#!;)/Vlw-FV&f@=K*WZ%c/P7[b=c&>\vjOɮk´NWh#2K3K/7"[H]i=Ƶed?T|wvB kܤ:ЕH2q\TZf%.1@ 8xEWZ$d4d:<:Bp)~>ay[js]`6 lQA,<DEXi/`7ڼ?KDF/(MMd';o%@iH B;PiU9(}9\1 |+3ʤ vzl4Iء@?%UܢRP%cOYe=epLH-7[#~]v!f~,$zjR2-|{Zd=HAӚjo=TxIڷ Ly!{uRo5X&WU!WF9P, DΧL>e%] UVҙ ҝLOJ&a12}i|;<BԮǦizK6o4+-4O1}՝jh^z_8&EO-IŪ*L鄼iŽvr0.  v tH+ Heԇx*V$`Dǁ7 -%>>d^MO.uO\Uv3Ioׁ^Q8 B H1TְZJ?NpF OSąV]*GS! vhe«hٿ]Cɱp S kTKy6׷qq_>7lyN܅Ҿ£W$-VPDZ |/wQ+}ׁd3CH BA 9@n4H9L 놅/"z${Fb,GcbBN,Ņ5GgYDMfP!4WW޸zk1 ; Z dQ>x7A rb ]+#34i_n48fSN:$)H!,{/ZHk]o6afQ+E}̔; ']$IS +1?qAJTRf5vw Rqd<ָ;C Zܽԥ Ψn'  Ā,$҉Y!Q)H/I=evi4-hG>mnyŚ\{hPT1Tt Tj]l\ *d76C|K =5|ɇ9ͼO24ƑgG1eq5пLbCȞ:50;#dߑ؝f9s?=+[;0=/\uc*3?-H⸢7q:xդ -iЅ .²z >:2ud79]P~Qlrj&;R|^m"bpkcVvTܜ5SMt1XP O&?T^Pґ9o׺ =##yx5uO6؍ֆ6[8@ܱ 7RjJж[aP Mh2\Y=j?༹E7mk5a3o߅']B`9 v;3?وDP;RN߫%)#1ʮZLע1Vca0s<]nF[=UF2HT A>#v# bE=hOPʮ3DS=Z_"`9@I]QE|P XeIRaiR9Ef'wGoty(b4ڞ o 4HZ}vpG[Gy[/\ k/ƯYgCƛmHځJGx!>Y<.f@^r= O!NZŲEhp睺k]ROnR|yjŽ4=I Jǒ h`P?5^M&b9ViI)լD$3d2 "/`~9q.QpdfZ4˯4,!*z4p^gQw,PS ߈s2dblSum]S;k~d5s/_y, y,؞pf5"k3ѕg͕_$tsn-BK ,ڬM[dNGZhc}O};!r4p3˔XE?V~K?r,a?a @_8K(\0Mݟ0>-m7W \ ,f7[Iz"7)gi}疼鏇4Obr l[FfMH{:FP@. lԷ;jȠJ^3ñ 8ifK{2PȂ}Cn@Gc',GI$6vqz.+] k{J/)R;UL։O})fw8xvv_aDmNչ5?r;!Фgs 1+e M%D8Mܶʮ(&[!{t]jKTtaK-s_3XGo/?L34K]-O (RpQc?UE^:I5'Z\ڐS$ܮF01O"ڰYk]z>m0y3z_ÛV7[kpk!nl1XoSSy!MRc,F&zٲ;DG.3^ )_RyعX 4C&>.:S쬏,Xfg-/%l׳ڇ\'>nfq=qPFFюaU_TI NFՀdE8[=񧂙mh1Ҳc GR}k{?NWz&GX7ԚUGPvkwhk6?lF(=dŅ 2gBĺm`Q#s;IUac3?"^svvU.&$OSO硵$|B.%<4pt+}I3Y7zVBUňM(zK$ ᢲ= z1F=!W[ u:dAV&j5z9>W^QP&V%*./y"0 oʘ*ߐntsٜoMP<[O(W땁sb[rEt Μٷuery{7oC!rr~. wlۂ~x]c&;f>kRfEhoEv˳L4e|IPcC&/\ өluW8a uoIe[u',fDݚۿ]#`s?$8}gtDŽm,C0o fQWu9BU#]sT@$P""S1)bxaݫ'y􍓌7 L8Bfjsc) .ߣUm쿩#a՜Ϝl^d!A舊~c 8n푳i}a6Q44h3®U'> ?ߒ듪[iYF<`{H2HPCt= VSvmLvyѱAS Fy%|;{6dr6!LW5 "KN1mw|=^VG!ṫgyL*p~9Z&F(ćѿ\`F#0ӗe Xa+fw).Ek2:םmS<xƪ}#ݷat@_aG Hzع5e||mmΒMthпh ͫ].&ךȩMoG4!ֳ+9 w/DlrVKz}-'BcPOzi Wb@2Z_wL  K]BDa8or*Xpw=ŧ*'B a~񩳗郯Yۑ b@z9~5XP@K KNdCF.NOVfiF&^}@2J۶(S@p1<]#C[B[noylwdr6/"lN2䔭}aeh N5`t8;!x@R ԗR#5iA 6nB\Eˆ\@q2qF6sdv[siC96WM-8hǮ/){*;`I\=J'x_8辞":m!t0\i[]Q0 !ږN+G $.,OH(3*߿ػ*4dHc1B%ɣO.W}{m'r#FBZͶ1 qV_6n_@ӹ8h+wn"Cs:rPZlF ,b0B ݓh6erNWO2>]J_:Cz8*mvH/q|-M ӏӾQl;A}ʚu^ ʝR3)TynP!=ЎT~4[wLG%%W/nL@CVǎK?FfꏬRGy#yJv+Ijm d&6(+"/2;Oc^.]E,UDZ9USN1R.!u앪2&n*k9O#t򨚨EH6sܪ 7 -gks,f&~\֣N V t疣DArw;B#OܹF;#y>:{,.Ǹ?څ%2,RRɆ$9e"]o#}0nx 3E6(7&[0qՌưg\e? :όo=&IOG![t= Xr\Iz$JAi%(=c_O#hKnjWDh7&^s̒@ՏP ۠rϖ 93+y=0MJE `.q+ 7 " 8hx: 4ei|GSQvǙhIB/,st*` jGqM@uD8a{ϱ\DNY -"i3 2;O oyfӀJHJc<>0l5 5"۝FZ憩6Q) ̫@w2&i^]Ah10?gz ]Y6t7j{!L ":*AL^~ fd7Tg/kɪ4g^CNAi7P[6C<9VjB ?=7b37"JEOn2y5fB6-;ÕǃQ+L2 RdGް`IeB$: yRZLZdD*/oUJjݗ%LsP.ym @P<0@=l2FųQ{]x+" ;xh_}n <)ƱW&|0"H`dm}}̦b&rz@hה7 |9n.V*j{.ٞSEES&0I$Y6l|Y@/NK}^ƶ,$s|p-^,f|ˤ$f0(!1ژd~82$ދ*Xǟ](H+oPG< dG8"MMU`^aΨ.van |tn؆u՗L0g<)&Xˮ8. {S7Pay)5LUdb pvjL=N7(Dǜδ!m-hSj6Nm+v1:]+Dq,L;ұ0Dz7S *Z8NI!u,Et:2'{O\[KV{FxKNǼ9 -56#RZwۓ| W,Jj9gQ趱rLDT*QMe+J04z`ˆ*=4{ʪ%Ey@vYAvL:^Q^~slzhkvb4V$Αn*r9g~CවKN&h± yRc܇ur͛.ƕ~&*'/0 |pP|d?<匶 ט`CSʀMWn_n0EE&F_8:? Irz`:ӄ":L *gLgBicYt^S@uK?#q{G'$}X*/Bgo@2Yx"-Hn1Idn)*4.Dk+w1cct7 4=e WM/Ǔ)Ӊ[z#Ԗ`篙ˆZ%o՗g|WěxxCՔNE7Z#0j ?M^CeYSkbˈM" @pn]_9զ$x:u%mPww9~^Dx2}Xpv:Veĩop7Ho/0>6衸]@eIdzvJw1rLZjەm `]If怍4u*ViTw+ƻQӨfXjM-;X)>7݃Τk.8^ d :JW=FE/)CʷB$o ;+-5Ǣ)ȽEv6Ru`Ԝ 7T"TGkV֩¶aIޖU4(䪐rM Rlw'6P-l-LؔMg"AvefEA6IShq)?]j純ͳ %lGZwJ2ݦK-4_ *X7n~6\:O東߈=,ȩjPEDw庰p!P&:0."ǧcA6!^WkUhh*VuU Vbг|qG(Ou /PJd"Ͽ 07>7`RBµ1[*f;R3d0Cڼ8RMz  jBP+"ؓA<:|gG."x[a!8/6N+:w1-SS)kfrzR.~ -)CDCr~%EwG'HBr &)7:gr݃D*GRamޟ!YR!B!{|WbdqpǙoa~E wlf]1%7Db^#>٬s77*P$6v.t']iaNGޫ‰lj[۴)]9{Imү, 5k&r^RsMX*{af,\10ڦߧ#L:-)ᷯөX1؂NCuE|'.1G~N{AoxTC!;\{J9{v= F5cHq1 Ҟ90na %>c5 1,ejQ/;ze&j4߳3 AC%ܑわ@\o+ +'Bp+zӀne5ދ!2Rel3]%7iNWӯF@kgs r}R-sSK蜎9-&FobfcFM-y_yԡqT``vFJJRmKN#~6^<\+ =f[^C^8v|w8kxJ`sQz$R!DݚX4*:'pʫD?`6eΨH_50dlgk5Yw׭S`[% ;;ĤU =k'*0V DǞrLƹ=Ucs-R囥_H(a:\<(_7ō0Oh65]kc#}JYJ ׼ A "3J~I{~EWD8SQPN.r&Aue5Eɗ@pLA'MbXzKl:3H|Hn/IjmJh<֮srA.ͬ!sdE >]Ub8Ԑr\?2q|#_EmXJ#Z46N2ijK`>m0$-*%JW2zc1Q1ֶm)IRiϚt[p]e j aS2@s ʨ DXE0‚+Vh"3np^][v?P f9Hb:'=3*"a~SVG|bFo9#oW3"BJ)6z^B%ۊϔ(3@RSi# *MM]H93z; L%zJz=_vo5 K7{e:wut.+r-;гLo[M秙 K'A hFU%-N\V( &^D#5n5%n 5SU԰{pK6{0Eӳ͡ 56Q ߮ant ՞C$/z? n>.ѿdC1^sB{c+Wmn? sv BHF)2l#߫ӆꄺqz*@*~K5) ܇dG/>8~ ɵp]Enu|H.W^Lэ 4sg ݍ77¿@ ȴ@JM/Fe}cn@ES**a ّK?1h lp-]&LS⮶.uڬ L|(o?m7Ao@gd% uݦnrXݞꤳgSD|cGSl8b])FvQ+)c77[^^A(=' 77ZkY/Id/~K664A"-Uo$}մ_#KC,A ^ް NSDĹI[w>.PF6H̐n5R8x&ŰoX/zL4G&v+^E~yt{jBemѴ~s1dR5q>W: <8_*g{ʬd3MQa($s򏴽 la(EqJ}<*9 \3[p({(.5WDɨBtK + =;%Kf (:nG%bE{JC$$uD)};4  h,tFt>sdT-1߈7۪v?z9mĵvll~^@66K6PT\emˬ6'd H <*4"&Jz=G,眹 ΧxOSXp2y99N>3:h k@04?䍬9s￐WDtEu=Pu@zn' J6vٽlE /OrpB`(YyX~1ì=q7Foo"Š};%g [}BnJPJT>/>H55F@%˜ >ާ\NJ1>j7jS7!R^Mױj#6 T>^cP4Q# *#XbwXRߝ㓉iJsݶDW>sy~CBĺ(Gj:ђ{sE1ASmbx>1IoYE @GٝF}N23VSLԐ`c@هy2|b4'7-Ѡ?IJU'U- h㎚ MUڋ: "TdV cbgSO G:FvV۝H˙69Ƶ}^EE?!2oUW0+L]Ȭ:G>p770ï]k{%E?yqE#&*ľz%I , Ϝ "\q-'U-Aeq73$t9OWA9üyϪ #荊xB^ti)?>Q /S3z-U/YK; [c_.LG.CF QSYF -?$^FuzDlŽOQ8rʈ1azA 353o{@\rВ>7Dsۼd}d@w;v_lƟB[Zs_cXRSG!^@]k>͑*lwI3۾bC+2j# 5| 5#1?[emĉn4HBC'0X;P;.گb6 E4PwoE_˥pN)2G3e;hCb}sg#I?f` S=!a ݏSe >)I| *8ʖ ie|/ Y'Kcsl { c!s8ffǏ:J4o[wXUV.j*<\YfIJN.) 5YDC8d /*S0NMb*' 67gOme)cΫ󄉨i/2mzME_iIc'8v7* Ǩr7vמjC!S~j2*Wk$9 W,@2nYƗ ي*?ExU@d6zP Tݞ'lE2qcq] ` O/t ?))O[ZgMxy4oȹx؟M̀~"6H|Lto/{Ou\|OsHh*3^prܴ Ed~,a LmLbe 8qkO|Z;>#*末8߯`,66+?1{KVfm{ :DJl ܶpLj [s Mh!LB>1<^@&ѷ?L%|AL<1`U*r-Y 3 =]yq~SLmRt {lq}TȣW'5{u~ N% =R}:m?X CUs/n( oHهC'g;z+I蔶:}Skx8u5z^;\-(C~>>/QQQ9)|'癫zgJ#)s?eY1XΜeԔvs6HZuK~Ge6\Bs`biQ'3O=+柧TVZ1ZNd{ L /=.@z2$qX&P.0^Tddb%=If!Uf4X"pE"D4+? AO6&2z?Y߁H6 4~?)_氉hOTTS>`!!.1]j M /dRNKp^>>5 %˽>?W zzEcfԮ⤎]ct1hLKZEpUOrf'OQxMniN\f '6w|Ml7ٷ#_:%_ nv߮zh'AܯPc&^ߞf~[i2iz[_'$XY5=/Trׇc2;AƏ>Tx8vC? #f6 BH50Ĵ:+[-/iMlWdŦ@+;sf/lEtpoXeiCk@{}!׹]l4 KH6hQ `$dq>2#~qـ!Nju*/}9p?ҕ&펧N!ey,AkKٸ,VE5thJy>PKFMhhqvO>}l&pb%R O`H`xwD׌,YqAcb,9;*`:^|˜:Ϸs|;d:R$ ;Hldz|EI RD`DEsN}E`?ۂ7e\q.SȊd \kS?:V.G" yM޸f|u;5PQ2$xYD,S͞#95gRPCvxSĊZ:zG BD>^g6A].W ; A@RAyGOG`Mv \#,PhnckگMV Bsfc$hH)F- D B(Cwz(Ɩ;O0CV[?.X6f M=D{ N[*F+e~ӁBEWDN-!1Jq!q'ovSRrSˊRG2d V !|aB26{?E*fPDG3p-dN,<1א?-\G!S7`mx̹ &#n4&\mR?W`ՁBY sut+@ZUzCn(!)Wm+ĐP^R(|{јZp qY8ɞvɎe&zpCiarX0 x õmlH\΁R`ThpH[Y U\&A =qʪzQd?elf87u 9s!iH}eQOױ*5v~9p 0ñ\}iQwn+ҋJWBkXjKge 'D.ixxѾ-mDF 3Q0ޖ7;<>ض%vg]eK v(?ؔ m{Z>ɐ&YH o:TKEFpUI"dDŽO2*h{`MXkG|Gt" j8*7D1INdPs [b#tDaJ8%CvaZpgfAacM!m6l6þ )Y AYfONiֱJg0EXH`{dw}NK9LAJ4}4UtG>"\O9|~yPOUȇ@0,eW\Qo)wL l߶h|cի뙇|o>ɒ$0`˴3j'l 2Fj 5EfVmVllk~)4hOiߵi6N)P;>pt[(v֒pl(<={@ǐOPiQSoM94y}PW+E[@P8MSmޫ(6tE*u ><о3>uKHIibܥ?Jc?LC"C/:m<&^K ak60v-b˻g"Ik9L5S$hS:<|]D!L\c.O {ӭ5{Q 2&Ę/3M RdCRX@!b(VS#JธmǩP꼓B7>ÏAUNH7NJ!ۨF zt)d/]]ꢧib[RΙŇ#9w `%LH+RasGb@꧜//@uoՓa'6~/4`޵=B]nbuj@֊ڈ;BXߦ&qpK΅4+J>߀K`8lϳV` dѾ#h4`^bCCP J] ^͛|Aoϧ^h7]I^LmhqobJӶ;Ǻ*:ǣk'ȵ]1}6k GsC%0v;?IX Wa'G_vaTiB25Ն񅂼^\=FsI1".P=$5;֕*4SU4 aL><,@<Qۑ(afEd:."]i9yg z0G1&bE.uނ(:c:20Q}W9/V6҉fNtڨaWxT3B.l;Ewt@ʋT1g}yIT'`sm$3*g|!cZXQ)%[6Cmk&y~r6 W!v%S/mЕ;?񙝛_'h$RttB"OIO=K3@_A &=\pVjfg0*#g4ĺ_ H sY}}ȃVY[+G/Iѧp,mǤ(-74O2.t[$q^A^hjdBe5y^"#\Wzʆykby3[žJNˏ6z 63T/JGq*Jب( wаw-2ԣ*ln\{/gK^ %6JƖzY _AzQ!XNA)U$u,+04)SxMVPxo<|0SvXS=ߘ$ LZ d+`Bׇ6Oe(B!`yKrg ؓx~.Z&R-D4o'V2o)`9o1 6PM.lq%"hEJ/uЏHGQ:BޱN?)%bcI[YzCX?VKDwGW01Ņw7PvpZ9L8V6DV:= $"\c{nw2Sb&T0AўT1'xZ7VJ!'Qx< "2hGd1enNWz{q ic)? }0 .eSk/B/B.տh YW@ű8,Șwth 1{|(k2*$n°髟n>EN 0Cw) }5Aj(+U\*ۦpepAV{y=QL*c҉7*\?ғ h!XJZ%cn6 BOK;j|4-=s]%۳Jj6GN"˿b||K@ͧEjb= 瑊XKZa SJ9*턔2'1{_ت-!?4A6j$YWii24ŠmjjUt b@2EͶ VYiIZS*0Dr#^Ӆ菔"E8oC@de6"gj@+- o[kDUY۸78䒍B=B5K/Qnz]y˟84e(,c>*O)b ྫ8up.Jt7ywMSPF^Fvt# Yph;8Advg??fFP~o'gePb.afS U JLal+ԳNA8`VCbjIM 8L "dd0M}?XۗޟU97 bbț]+d]r# V~{b8*'WYMƐ$UZXӸ~㩤%x<,w6hIE#G k2A8 {atfQR#UHP Ww;K[nJ[y\Vgcʚ@N7W%"Nտ]D17V#˥..U3%gߏX.2!*xۢh-zѽ'?묉ׅ O<Ƣ}i%!WB`;[W[[}Y'cg nu0s4' SI6qyx|v&IqF0xT vR PHg!)QG]+' Н|4i@ K)%ܮ V5\Mjm>{Vb~a/|SUrH_ELsQ-5Gm=}/-?|?  z>>e~m~"`<tuZSCBTl(qõ.7ՕY Ti®15YhcQCAk0Mf4HeYDC25`+_364k!Qé0 Ae7ٸ6)ڐ'dŚ`V ;6 . ޲Yc2[ݮxsf5 JWIEe O@[?0p+=y0G _۩x% {- Bp'-/~Z]Uo=i Qeb'T޳6cʁ"dkvH+xowvuK}3mh:yAg]-j:ў_  ךͿ=T(**ʬCy1J|(Tq@*>5:)+9DZe.0V^ h){eTYP}kO QfC2v=*:dJ2xG*9 D ˂RcfU #<|Q=[Zu{SVqw%,E9=P.ȥZ֕@N 3qGwļ R (F>:ȹdMy[/Xߟ8 5YEh 뜗jcz&Gs=5c!쾡-` bLTKA sΪ (pMF \T񕙤L?$z )RMx;Z["Z4!@&qʑhV|57nr shؼdj~c/C mWH7byvg+^jmP1tU^C/ڲ؞nV-㨣̲ O=+BȀ5t ~{ z4P($4ʸ7ٝ+]Fj7Ƈ66R|} h=hϖa7 RB)>ě׉a@؎:Fj=XI% y ~  ɱ팧o5EͶOcm^d8qKh'F&j&EJNՆzGMX 6mqMwKQ$d|w?cfC dDJCBC%ǤT@z#|`mZ$Wִ;mIAU >Qx zpc\Yȱi&ܸJNt/ dZk=P@,#CĬC}"µk E*̬qqQkEK(M)hI ]\,y /mp^$= Lj)ydB>Vp? dՔ`rmM쟦vΗɁR9L*3m%7Ei~.̻tdtW#,a -ϺÆF/ 'CU` M?|nK2 1) mcDc;=9a9> Zpؘ''Zr~x"j>bNy7t(k_= 1W&ʂ#NfmfI!!,T1$f@2ɉnjc3(~^=*VZfQޘC_/n;>4PK-Ω֨$d&h{n9n +zY~z/5ЃN+Sic5H 7ze^*WEե*GMI"qV/Mw=zUYn[}ٔ˿e9[$kdgJqS0Q,ga϶Q%:*R_xY8-ˆ2to pA`? }0'^O};^R(TsUc? L۠3!uEB}::{l_5 \)9.-'o>}^FtDgKb+ 9 >A[jP:Z&N6 ;eT1},ى/$uQpX̔v[iMT(=Lbܪ W"|\>Y X_]>J5\9B?z(IyhPMQ(tlPS]$>\?jˮQ,eI3rŚ k!]a!sK.<^+018S 7WӦo凐#X|7]V~ԏKdNz)A0_e>tQeht@3(UDTIlPؽk O+g묗j 4Ȧ #. ^Qfgi]Z쐖WSmT5Yš 鰂5|V2*PC|*?nX[0F[s;K4izJ }ֽ|v"sLo?@YO*hs>\j ->E qR1M롚dvR<~~A~'#&]^yWJ2O{: !";ё!q^ʜғdTbn!TcRC+៺am􄄣͵v/jb#pEMAu fh kV\ɿ[۽aI Ljn)}S-;0z=RamgWH= ۇO\[|6Οf,O3Mj:OM%1@HrVJjQ$ӟp;[|>L R%-KSs N^-sz^6HtKt@ϗ%22 \#𓴦r_#C>*/6'5N}~Qu+ԽODqJ9]|zz\NOz C A,+hXU3ajOshG{Y3|֒JƅT~cf&P*R+xlp\}.nṒfV}\zۨB6Eo@p㺑1v˶=vM/H (/{.(IFwSH~n}s7MZ;?yjv;N6#K36!l*Dbhkj*r$h"⶜㰆ivH;vKA.w${!( `kuKҾGLtīq`;:ࡻ ڛxת=3;LI^+2n۳*y=F#ujnSoKR ?Y ˽[|XWW-ߣsrQ L] ˦4CMlw9TlXvEPo6fƦN`Me{ͱG[NG 8|Hsl$}9+>Tz`sSu&WlRM %cvb/I8}?xHctD.%XJ%AA/1M%N40f^4x}lo]ٷĩD&MI2ȝzќCȶ1~9]o ;!V4"Sc(^P>Hخ1~NǺVa1(}~'~lW|d,$C]UJDu #a w[->cAoxXur49NH_C9KCβm&.@~*;O?~G`jB#S1^ ke=m۸X9]@ C$cLQt`w3Tךzp-۩}vmnǐ O7Kq=T_A'jcc$ԔtU=Tl~,V`W;T+C.(;tNՠfԚ^\6cQFL̷#NʂnI/ ە9g[UajVM~+B]2:-I?>>]"@K RCN:#)Be@̎v[1ZΖrNx.hi9eeC'uZ^;t{!8L:~[4'Y}!6[,~B8Rl;qF29A2`qkdȃtDIHhgE /98dZRNr: 7t룏ZOd:KѵQ@@?g*ɬAf2n=[}j/+%~CϐT Ԕ {gu)gԆ>ʩ#4p&x˔? \ara[,&e/^H;yGwa%D-u425SLe2t35%r1o!v67#_R/,`i@.V'a ~7aq&" e9ylQ JQU#Sb'\K\=&:Q;AiBuZ[r׶*߄-?nv?ΞfSTa\:iwo6Q̂fXZ|pH/f`kN}4w~&t~uAKSpxWbl0SlK˗9>m1VIc9$|ۦV72aizjു4x )5>P9ig1õStH#Rtp*aMО)xK_` Lܢy #;kVϤr/TǤUփZ D>Ga`Sʎ}hE]{Ew|ſ%,Ϙ=@/BLl5fV\M=&dD4B#\=dT@(.u[< h`rof8hijo Y 41B ǣ*q)OK2re.d-w~ZZIii)$g8C #(\i>ۣT" .Q -TǾ.iS4~Ȣ=$'N jb R{7 Yӡ)S92O]lMQ/ yog2lO,//E9y{G%?M$3ں\܅ZVML'QUƶs ^ C6jEH0٦ ч"KA91W}pRQDss39OUAob聿aDxhYr(\rű|O #{د:%)WG5*rΜ+W,gDQ_F.r"h{(Y]\Lr U#F;TKqcD׶JoҪS<֛u+͔ا@̠YL照d eÔ¯ɽ6e]9u.=yE T1[(eۨ_u %RSO%=r!Lcud,37y3Ag/ |Onx˽tV wskI^eLU%-fJodIQ7 Or!È͍ - +L˱%5"@GODb67Tp?"wY)K8PNS\O.7Kޤ]']>ψ$U:rt'~Vչ3MI~MW:U^r YlSvYOt,q^Nu@qi7NUggl 6zv>~jbaاcB8:Փ_b~E]d$sslݳ@0LSՒQ]c8:] !k'NsоNZ%ϧZA/]2ǁbݽ/p-v]9)NJ`g=,cI^5-CoF8lI۟B> :3yY=̫Z) t`*Wl޾tT׳)-߇3@&Ush=-}SKoއM.vyfi)o~S2mkqAJ+f,Md%E ]imVQ!7Y!lCzL *Vn>OK1]E`2"G7N+ٔSB0 `%Ϩڄʀ^9"xuӑY͹|LOݟÁ#2hTRdPՀR}~L dyR&O=_܃6KMxHGgl/"3_H`R4)00~S5CdN +ћr38 Hv=M 7\h, +;r)|l`5t=Hn7X-fKFSkբMCL5vI̽ ;yE_mVt@\ئ ʱ~0]d \ 0sodkٛoI]0k1%A4nAr z$a ӥ})&-fTL]dYӰja ؊bt\l>G>&r0p#?7mvEȩmxe. X&3ϗV5J%ea7Xb0ыT9zƾD/-҇r>$C~C*"S^Z/GPY<gDSp6& yrB۲q˨%J@_GPCRʾUQ.Nc{ih.HH^ d& TYTwZ6S8XWe;rmq:ӎ*X =T`[߰m/-:[eTA)rkj,FDPOl2h.,KbNJXh^Z_#=/5NX$9g\LY(6sq&~O7^T1l&@ 巐 ~-HC * R&5A$F-=oIJDɛ"UUCh,T_&JOsSv9 0gԲ +Ey&pt ʴ׮Sاu_Y*g-[W`cN l$`ڔ ,>Q$9iUj i>@W] 84`i#ca%{fF0ZK P;S!=c ~'#c)c.yM6ic [qd.&|ɀjTz Gj2uU1APџ) THwa E8ЦTZtF+Lf ˁZA>/5@;8*@`OT=a0 &껋2haBj2iG'/Vufs&3!E kb"@'\k滺E Z[&&D K`9qDWj;,Y fx|I )x.~NǼu,867KBoM7Bցt!G]eHsSo:8VӿKޓ z#[*A"5<|LCͤhV˛ۜ^oKOP׮f@N ©xujG 3IFz@! մ) &C{z)~*=b _YsoB1.Vr`-^K[w>k}0$_"HTdXu Nt!Kݴf)Sm#O7G8e>iJܒ{ǨM|s/ X_fԜzߊ"F2B+Z8JrГLB,';vZ7nR{fȊGEffz+ YW%~SA:Ӕ+)g,vΡ^Lg[evq+bh44ZU}GdLY%~$zg)}x%\֔9hJ~!BZؓTGEOw%ܺnWw2~ Be-'i?I䭲K=l@,H7J̠@KWter5ͽKʊߨ^d|YZy9&Hop*n8]~:^yR=IOA=9tڗhc2$pX\ eE>OR'[3zO1 `X״Tsk4cv}IYA3 `v@ş0lܰq:Ag |9= 'w5ƅIEf) 8?p`(qX\@yq%ysV߁J|z'#I֓nx z`\C"QEsgf$V,|:Sp3Tb߹AbpVӷΟsO03414=BX4&|L˨8.1Y"V}op]̠YtRQj4SWt4LsI*/r e8]>^}IOc=D":H7-搚hc‹$7Z0?=)/ٿ ?)Ag("ۼ~xEwi-fc˨Ͼ2P9<`v]KS +Ƞѵck[kdLavy]*V<NQ]!G&m}n^rC{\ՎaH#c WEKtk6 Q_?z5E_N%iKvhUǟjzP| ckGz@2=j~U+a9yuio-"#r,BG2% V\QRSvl`L0k0h{}SP=fܯ@*Ѿ)amMv~oz\Mm u$àB^ }H`(@x9O=>Pqv_ͯt)a\ҫ ' B%T<5NLWJFqcqC $-zs!R٤ 3D5|ؘ,QMScP;YXr=Iıf,9qy@pKQ䄑|%!pQm4CEpIgwȚܟʭ& >=z5+P*TGe6Xݳ15 ,ͦņ񷰾y'&#[(ޫRz>- m(칷p|}KC >ڥ6lXC ;́7[*BèB>gаO}8'IyGS c/G^,!H 8 ,۷Da#EC)m5VNi0BW_A+ p74A~ g@1xGSJCu VTjj69,')1s Ł&q}䝏l,/iZ Z-<ǡJU^9Wq6;SZإ47\}ѡ]Mk`7(EW D 5O+QL[pJe$V/^#6Nڜ/eKG;L!给zIF8qá[lu}܈4tt@UGifHJ.[)Gk,շ%l&s_ cq341T> &`9s;x11^^rl3tz^7q~u m=0qHQC߂>RHf tȆpكA}):]L^[XQ3R5Kv7`<&sbPW:"qҜ>z^sȪ }}74ZTnK}ʧl(/m6CG 5ʾ3Pepm41>hkGGBPOxTWI'tmdwI^pmv88 a,yn_E3"IrHLd0Ȫz{Aɹl] ;,bX:A^rov!=`D߭QnkF閪eތq3[uc<_1r%;L[ 0j35W]ԗS)lc)_1qc0yEJu.:gqPAݹV!4xޒ/g먩E2]RE09t/JsvGjו۹@&+ 廲ffr"8 Oܶ4[/ũ:LilZk4̄5NZ2|鞗`wkZrAC7] ϲk~]q_O:Ž!% ݣ2̒طݣAQ-`'^~/t4tvu6& b 1is UK }g 0 YZLuminescence/man/0000755000176200001440000000000014521210067013435 5ustar liggesusersLuminescence/man/calc_CentralDose.Rd0000644000176200001440000001175014521210045017111 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calc_CentralDose.R \name{calc_CentralDose} \alias{calc_CentralDose} \title{Apply the central age model (CAM) after Galbraith et al. (1999) to a given De distribution} \usage{ calc_CentralDose(data, sigmab, log = TRUE, na.rm = FALSE, plot = TRUE, ...) } \arguments{ \item{data}{\linkS4class{RLum.Results} or \link{data.frame} (\strong{required}): for \link{data.frame}: two columns with De \code{(data[,1])} and De error \code{(data[,2])}} \item{sigmab}{\link{numeric} (\emph{with default}): additional spread in De values. This value represents the expected overdispersion in the data should the sample be well-bleached (Cunningham & Walling 2012, p. 100). \strong{NOTE}: For the logged model (\code{log = TRUE}) this value must be a fraction, e.g. 0.2 (= 20 \\%). If the un-logged model is used (\code{log = FALSE}), sigmab must be provided in the same absolute units of the De values (seconds or Gray).} \item{log}{\link{logical} (\emph{with default}): fit the (un-)logged central age model to De data} \item{na.rm}{\link{logical} (\emph{with default}): strip \code{NA} values before the computation proceeds} \item{plot}{\link{logical} (\emph{with default}): plot output} \item{...}{further arguments (\code{trace}, \code{verbose}).} } \value{ Returns a plot (\emph{optional}) and terminal output. In addition an \linkS4class{RLum.Results} object is returned containing the following elements: \item{.$summary}{\link{data.frame} summary of all relevant model results.} \item{.$data}{\link{data.frame} original input data} \item{.$args}{\link{list} used arguments} \item{.$call}{\link{call} the function call} \item{.$profile}{\link{data.frame} the log likelihood profile for sigma} The output should be accessed using the function \link{get_RLum} } \description{ This function calculates the central dose and dispersion of the De distribution, their standard errors and the profile log likelihood function for sigma. } \details{ This function uses the equations of Galbraith & Roberts (2012). The parameters \code{delta} and \code{sigma} are estimated by numerically solving eq. 15 and 16. Their standard errors are approximated using eq. 17. In addition, the profile log-likelihood function for \code{sigma} is calculated using eq. 18 and presented as a plot. Numerical values of the maximum likelihood approach are \strong{only} presented in the plot and \strong{not} in the console. A detailed explanation on maximum likelihood estimation can be found in the appendix of Galbraith & Laslett (1993, 468-470) and Galbraith & Roberts (2012, 15) } \section{Function version}{ 1.4.1 } \examples{ ##load example data data(ExampleData.DeValues, envir = environment()) ##apply the central dose model calc_CentralDose(ExampleData.DeValues$CA1) } \section{How to cite}{ Burow, C., 2023. calc_CentralDose(): Apply the central age model (CAM) after Galbraith et al. (1999) to a given De distribution. Function version 1.4.1. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Galbraith, R.F. & Laslett, G.M., 1993. Statistical models for mixed fission track ages. Nuclear Tracks Radiation Measurements 4, 459-470. Galbraith, R.F., Roberts, R.G., Laslett, G.M., Yoshida, H. & Olley, J.M., 1999. Optical dating of single grains of quartz from Jinmium rock shelter, northern Australia. Part I: experimental design and statistical models. Archaeometry 41, 339-364. Galbraith, R.F. & Roberts, R.G., 2012. Statistical aspects of equivalent dose and error calculation and display in OSL dating: An overview and some recommendations. Quaternary Geochronology 11, 1-27. \strong{Further reading} Arnold, L.J. & Roberts, R.G., 2009. Stochastic modelling of multi-grain equivalent dose (De) distributions: Implications for OSL dating of sediment mixtures. Quaternary Geochronology 4, 204-230. Bailey, R.M. & Arnold, L.J., 2006. Statistical modelling of single grain quartz De distributions and an assessment of procedures for estimating burial dose. Quaternary Science Reviews 25, 2475-2502. Cunningham, A.C. & Wallinga, J., 2012. Realizing the potential of fluvial archives using robust OSL chronologies. Quaternary Geochronology 12, 98-106. Rodnight, H., Duller, G.A.T., Wintle, A.G. & Tooth, S., 2006. Assessing the reproducibility and accuracy of optical dating of fluvial deposits. Quaternary Geochronology, 1 109-120. Rodnight, H., 2008. How many equivalent dose values are needed to obtain a reproducible distribution?. Ancient TL 26, 3-10. } \seealso{ \link{plot}, \link{calc_CommonDose}, \link{calc_FiniteMixture}, \link{calc_FuchsLang2001}, \link{calc_MinDose} } \author{ Christoph Burow, University of Cologne (Germany) \cr Based on a rewritten S script of Rex Galbraith, 2010 , RLum Developer Team} Luminescence/man/calc_IEU.Rd0000644000176200001440000000621314521210045015326 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calc_IEU.R \name{calc_IEU} \alias{calc_IEU} \title{Apply the internal-external-uncertainty (IEU) model after Thomsen et al. (2007) to a given De distribution} \usage{ calc_IEU(data, a, b, interval, decimal.point = 2, plot = TRUE, ...) } \arguments{ \item{data}{\linkS4class{RLum.Results} or \link{data.frame} (\strong{required}): for \link{data.frame}: two columns with De \code{(data[,1])} and De error \code{(values[,2])}} \item{a}{\link{numeric} (\strong{required}): slope} \item{b}{\link{numeric} (\strong{required}): intercept} \item{interval}{\link{numeric} (\strong{required}): fixed interval (e.g. 5 Gy) used for iteration of \code{Dbar}, from the mean to Lowest.De used to create Graph.IEU \verb{[Dbar.Fixed vs Z]}} \item{decimal.point}{\link{numeric} (\emph{with default}): number of decimal points for rounding calculations (e.g. 2)} \item{plot}{\link{logical} (\emph{with default}): plot output} \item{...}{further arguments (\verb{trace, verbose}).} } \value{ Returns a plot (\emph{optional}) and terminal output. In addition an \linkS4class{RLum.Results} object is returned containing the following elements: \item{.$summary}{\link{data.frame} summary of all relevant model results.} \item{.$data}{\link{data.frame} original input data} \item{.$args}{\link{list} used arguments} \item{.$call}{\link{call} the function call} \item{.$tables}{\link{list} a list of data frames containing all calculation tables} The output should be accessed using the function \link{get_RLum}. } \description{ Function to calculate the IEU De for a De data set. } \details{ This function uses the equations of Thomsen et al. (2007). The parameters a and b are estimated from dose-recovery experiments. } \section{Function version}{ 0.1.1 } \examples{ ## load data data(ExampleData.DeValues, envir = environment()) ## apply the IEU model ieu <- calc_IEU(ExampleData.DeValues$CA1, a = 0.2, b = 1.9, interval = 1) } \section{How to cite}{ Smedley, R.K., 2023. calc_IEU(): Apply the internal-external-uncertainty (IEU) model after Thomsen et al. (2007) to a given De distribution. Function version 0.1.1. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Smedley, R.K., 2015. A new R function for the Internal External Uncertainty (IEU) model. Ancient TL 33, 16-21. Thomsen, K.J., Murray, A.S., Boetter-Jensen, L. & Kinahan, J., 2007. Determination of burial dose in incompletely bleached fluvial samples using single grains of quartz. Radiation Measurements 42, 370-379. } \seealso{ \link{plot}, \link{calc_CommonDose}, \link{calc_CentralDose}, \link{calc_FiniteMixture}, \link{calc_FuchsLang2001}, \link{calc_MinDose} } \author{ Rachel Smedley, Geography & Earth Sciences, Aberystwyth University (United Kingdom) \cr Based on an excel spreadsheet and accompanying macro written by Kristina Thomsen. , RLum Developer Team} Luminescence/man/fit_SurfaceExposure.Rd0000644000176200001440000002102614521210045017706 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fit_SurfaceExposure.R \name{fit_SurfaceExposure} \alias{fit_SurfaceExposure} \title{Nonlinear Least Squares Fit for OSL surface exposure data} \usage{ fit_SurfaceExposure( data, sigmaphi = NULL, mu = NULL, age = NULL, Ddot = NULL, D0 = NULL, weights = FALSE, plot = TRUE, legend = TRUE, error_bars = TRUE, coord_flip = FALSE, ... ) } \arguments{ \item{data}{\link{data.frame} or \link{list} (\strong{required}): Measured OSL surface exposure data with the following structure: \if{html}{\out{

}}\preformatted{ (optional) | depth (a.u.)| intensity | error | | [ ,1] | [ ,2] | [ ,3] | |-------------|-----------|-------| [1, ]| ~~~~ | ~~~~ | ~~~~ | [2, ]| ~~~~ | ~~~~ | ~~~~ | ... | ... | ... | ... | [x, ]| ~~~~ | ~~~~ | ~~~~ | }\if{html}{\out{
}} Alternatively, a \link{list} of \code{data.frames} can be provided, where each \code{data.frame} has the same structure as shown above, with the exception that they must \strong{not} include the optional error column. Providing a \link{list} as input automatically activates the global fitting procedure (see details).} \item{sigmaphi}{\link{numeric} (\emph{optional}): A numeric value for \code{sigmaphi}, i.e. the charge detrapping rate. Example: \code{sigmaphi = 5e-10}} \item{mu}{\link{numeric} (\emph{optional}): A numeric value for mu, i.e. the light attenuation coefficient. Example: \code{mu = 0.9}} \item{age}{\link{numeric} (\emph{optional}): The age (a) of the sample, if known. If \code{data} is a \link{list} of \emph{x} samples, then \code{age} must be a numeric vector of length \emph{x}. Example: \code{age = 10000}, or \code{age = c(1e4, 1e5, 1e6)}.} \item{Ddot}{\link{numeric} (\emph{optional}): A numeric value for the environmental dose rate (Gy/ka). For this argument to be considered a value for \code{D0} must also be provided; otherwise it will be ignored.} \item{D0}{\link{numeric} (\emph{optional}): A numeric value for the characteristic saturation dose (Gy). For this argument to be considered a value for \code{Ddot} must also be provided; otherwise it will be ignored.} \item{weights}{\link{logical} (\emph{optional}): If \code{TRUE} the fit will be weighted by the inverse square of the error. Requires \code{data} to be a \link{data.frame} with three columns.} \item{plot}{\link{logical} (\emph{optional}): Show or hide the plot.} \item{legend}{\link{logical} (\emph{optional}): Show or hide the equation inside the plot.} \item{error_bars}{\link{logical} (\emph{optional}): Show or hide error bars (only applies if errors were provided).} \item{coord_flip}{\link{logical} (\emph{optional}): Flip the coordinate system.} \item{...}{Further parameters passed to \link{plot}. Custom parameters include: \itemize{ \item \code{verbose} (\link{logical}): show or hide console output \item \code{line_col}: Colour of the fitted line \item \code{line_lty}: Type of the fitted line (see \code{lty} in \code{?par}) \item \code{line_lwd}: Line width of the fitted line (see \code{lwd} in \code{?par}) }} } \value{ Function returns results numerically and graphically: -----------------------------------\cr \verb{[ NUMERICAL OUTPUT ]}\cr -----------------------------------\cr \strong{\code{RLum.Results}}-object \strong{slot:} \strong{\verb{@data}} \tabular{lll}{ \strong{Element} \tab \strong{Type} \tab \strong{Description}\cr \verb{$summary} \tab \code{data.frame} \tab summary of the fitting results \cr \verb{$data} \tab \code{data.frame} \tab the original input data \cr \verb{$fit} \tab \code{nls} \tab the fitting object produced by \link[minpack.lm:nlsLM]{minpack.lm::nlsLM} \cr \verb{$args} \tab \code{character} \tab arguments of the call \cr \verb{$call} \tab \code{call} \tab the original function call \cr } \strong{slot:} \strong{\verb{@info}} Currently unused. ------------------------\cr \verb{[ PLOT OUTPUT ]}\cr ------------------------\cr A scatter plot of the provided depth-intensity OSL surface exposure data with the fitted model. } \description{ This function determines the (weighted) least-squares estimates of the parameters of either equation 1 in \emph{Sohbati et al. (2012a)} or equation 12 in \emph{Sohbati et al. (2012b)} for a given OSL surface exposure data set (\strong{BETA}). } \details{ \strong{Weighted fitting} If \code{weights = TRUE} the function will use the inverse square of the error (\eqn{1/\sigma^2}) as weights during fitting using \link[minpack.lm:nlsLM]{minpack.lm::nlsLM}. Naturally, for this to take effect individual errors must be provided in the third column of the \code{data.frame} for \code{data}. Weighted fitting is \strong{not} supported if \code{data} is a list of multiple \code{data.frame}s, i.e., it is not available for global fitting. \strong{Dose rate} If any of the arguments \code{Ddot} or \code{D0} is at its default value (\code{NULL}), this function will fit equation 1 in Sohbati et al. (2012a) to the data. If the effect of dose rate (i.e., signal saturation) needs to be considered, numeric values for the dose rate (\code{Ddot}) (in Gy/ka) and the characteristic saturation dose (\code{D0}) (in Gy) must be provided. The function will then fit equation 12 in Sohbati et al. (2012b) to the data. \strong{NOTE}: Currently, this function does \strong{not} consider the variability of the dose rate with sample depth (\code{x})! In the original equation the dose rate \code{D} is an arbitrary function of \code{x} (term \code{D(x)}), but here \code{D} is assumed constant. \strong{Global fitting} If \code{data} is \link{list} of multiple \code{data.frame}s, each representing a separate sample, the function automatically performs a global fit to the data. This may be useful to better constrain the parameters \code{sigmaphi} or \code{mu} and \strong{requires} that known ages for each sample is provided (e.g., \code{age = c(100, 1000)} if \code{data} is a list with two samples). } \note{ \strong{This function has BETA status. If possible, results should be} \strong{cross-checked.} } \section{Function version}{ 0.1.0 } \examples{ ## Load example data data("ExampleData.SurfaceExposure") ## Example 1 - Single sample # Known parameters: 10000 a, mu = 0.9, sigmaphi = 5e-10 sample_1 <- ExampleData.SurfaceExposure$sample_1 head(sample_1) results <- fit_SurfaceExposure( data = sample_1, mu = 0.9, sigmaphi = 5e-10) get_RLum(results) ## Example 2 - Single sample and considering dose rate # Known parameters: 10000 a, mu = 0.9, sigmaphi = 5e-10, # dose rate = 2.5 Gy/ka, D0 = 40 Gy sample_2 <- ExampleData.SurfaceExposure$sample_2 head(sample_2) results <- fit_SurfaceExposure( data = sample_2, mu = 0.9, sigmaphi = 5e-10, Ddot = 2.5, D0 = 40) get_RLum(results) ## Example 3 - Multiple samples (global fit) to better constrain 'mu' # Known parameters: ages = 1e3, 1e4, 1e5, 1e6 a, mu = 0.9, sigmaphi = 5e-10 set_1 <- ExampleData.SurfaceExposure$set_1 str(set_1, max.level = 2) results <- fit_SurfaceExposure( data = set_1, age = c(1e3, 1e4, 1e5, 1e6), sigmaphi = 5e-10) get_RLum(results) ## Example 4 - Multiple samples (global fit) and considering dose rate # Known parameters: ages = 1e2, 1e3, 1e4, 1e5, 1e6 a, mu = 0.9, sigmaphi = 5e-10, # dose rate = 1.0 Ga/ka, D0 = 40 Gy set_2 <- ExampleData.SurfaceExposure$set_2 str(set_2, max.level = 2) results <- fit_SurfaceExposure( data = set_2, age = c(1e2, 1e3, 1e4, 1e5, 1e6), sigmaphi = 5e-10, Ddot = 1, D0 = 40) get_RLum(results) } \section{How to cite}{ Burow, C., 2023. fit_SurfaceExposure(): Nonlinear Least Squares Fit for OSL surface exposure data. Function version 0.1.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Sohbati, R., Murray, A.S., Chapot, M.S., Jain, M., Pederson, J., 2012a. Optically stimulated luminescence (OSL) as a chronometer for surface exposure dating. Journal of Geophysical Research 117, B09202. doi: \doi{10.1029/2012JB009383} Sohbati, R., Jain, M., Murray, A.S., 2012b. Surface exposure dating of non-terrestrial bodies using optically stimulated luminescence: A new method. Icarus 221, 160-166. } \seealso{ \link{ExampleData.SurfaceExposure}, \link[minpack.lm:nlsLM]{minpack.lm::nlsLM} } \author{ Christoph Burow, University of Cologne (Germany) , RLum Developer Team} \keyword{datagen} Luminescence/man/plot_DetPlot.Rd0000644000176200001440000001414614521210045016337 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot_DetPlot.R \name{plot_DetPlot} \alias{plot_DetPlot} \title{Create De(t) plot} \usage{ plot_DetPlot( object, signal.integral.min, signal.integral.max, background.integral.min, background.integral.max, method = "shift", signal_integral.seq = NULL, analyse_function = "analyse_SAR.CWOSL", analyse_function.control = list(), n.channels = NULL, show_ShineDownCurve = TRUE, respect_RC.Status = FALSE, verbose = TRUE, ... ) } \arguments{ \item{object}{\linkS4class{RLum.Analysis} (\strong{required}): input object containing data for analysis} \item{signal.integral.min}{\link{integer} (\strong{required}): lower bound of the signal integral.} \item{signal.integral.max}{\link{integer} (\strong{required}): upper bound of the signal integral.} \item{background.integral.min}{\link{integer} (\strong{required}): lower bound of the background integral.} \item{background.integral.max}{\link{integer} (\strong{required}): upper bound of the background integral.} \item{method}{\link{character} (\emph{with default}): method applied for constructing the De(t) plot. \itemize{ \item \code{shift} (\emph{the default}): the chosen signal integral is shifted the shine down curve, \item \code{expansion}: the chosen signal integral is expanded each time by its length }} \item{signal_integral.seq}{\link{numeric} (\emph{optional}): argument to provide an own signal integral sequence for constructing the De(t) plot} \item{analyse_function}{\link{character} (\emph{with default}): name of the analyse function to be called. Supported functions are: \link{analyse_SAR.CWOSL}, \link{analyse_pIRIRSequence}} \item{analyse_function.control}{\link{list} (\emph{optional}): selected arguments to be passed to the supported analyse functions (\link{analyse_SAR.CWOSL}, \link{analyse_pIRIRSequence})} \item{n.channels}{\link{integer} (\emph{optional}): number of channels used for the De(t) plot. If nothing is provided all De-values are calculated and plotted until the start of the background integral.} \item{show_ShineDownCurve}{\link{logical} (\emph{with default}): enables or disables shine down curve in the plot output} \item{respect_RC.Status}{\link{logical} (\emph{with default}): remove De-values with 'FAILED' RC.Status from the plot (cf. \link{analyse_SAR.CWOSL} and \link{analyse_pIRIRSequence})} \item{verbose}{\link{logical} (\emph{with default}): enables or disables terminal feedback} \item{...}{further arguments and graphical parameters passed to \link{plot.default}, \link{analyse_SAR.CWOSL} and \link{analyse_pIRIRSequence} (see details for further information). Plot control parameters are: \code{ylim}, \code{xlim}, \code{ylab}, \code{xlab}, \code{main}, \code{pch}, \code{mtext}, \code{cex}, \code{legend}, \code{legend.text}, \code{legend.pos}} } \value{ A plot and an \linkS4class{RLum.Results} object with the produced De values \verb{@data}: \tabular{lll}{ \strong{Object} \tab \strong{Type} \tab \strong{Description}\cr De.values \tab \code{data.frame} \tab table with De values \cr signal_integral.seq \tab \code{numeric} \tab integral sequence used for the calculation } \verb{@info}: \tabular{lll}{ \strong{Object} \tab \strong{Type} \tab \strong{Description}\cr call \tab \code{call} \tab the original function call } } \description{ Plots the equivalent dose (De) in dependency of the chosen signal integral (cf. Bailey et al., 2003). The function is simply passing several arguments to the function \link{plot} and the used analysis functions and runs it in a loop. Example: \code{legend.pos} for legend position, \code{legend} for legend text. } \details{ \strong{method} The original method presented by Bailey et al., 2003 shifted the signal integrals and slightly extended them accounting for changes in the counting statistics. Example: \code{c(1:3, 3:5, 5:7)}. However, here also another method is provided allowing to expand the signal integral by consecutively expanding the integral by its chosen length. Example: \code{c(1:3, 1:5, 1:7)} Note that in both cases the integral limits are overlap. The finally applied limits are part of the function output. \strong{analyse_function.control} The argument \code{analyse_function.control} currently supports the following arguments \code{sequence.structure}, \code{dose.points}, \code{mtext.outer}, \code{fit.method}, \code{fit.force_through_origin}, \code{plot}, \code{plot.single} } \note{ The entire analysis is based on the used analysis functions, namely \link{analyse_SAR.CWOSL} and \link{analyse_pIRIRSequence}. However, the integrity checks of this function are not that thoughtful as in these functions itself. It means, that every sequence should be checked carefully before running long calculations using several hundreds of channels. } \section{Function version}{ 0.1.5 } \examples{ \dontrun{ ##load data ##ExampleData.BINfileData contains two BINfileData objects ##CWOSL.SAR.Data and TL.SAR.Data data(ExampleData.BINfileData, envir = environment()) ##transform the values from the first position in a RLum.Analysis object object <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos=1) plot_DetPlot( object, signal.integral.min = 1, signal.integral.max = 3, background.integral.min = 900, background.integral.max = 1000, n.channels = 5) } } \section{How to cite}{ Kreutzer, S., 2023. plot_DetPlot(): Create De(t) plot. Function version 0.1.5. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Bailey, R.M., Singarayer, J.S., Ward, S., Stokes, S., 2003. Identification of partial resetting using De as a function of illumination time. Radiation Measurements 37, 511-518. doi:10.1016/S1350-4487(03)00063-5 } \seealso{ \link{plot}, \link{analyse_SAR.CWOSL}, \link{analyse_pIRIRSequence} } \author{ Sebastian Kreutzer, Institute of Geography, Ruprecht-Karl University of Heidelberg (Germany) , RLum Developer Team} Luminescence/man/plot_RLum.Rd0000644000176200001440000000565014521210045015643 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot_RLum.R \name{plot_RLum} \alias{plot_RLum} \title{General plot function for RLum S4 class objects} \usage{ plot_RLum(object, ...) } \arguments{ \item{object}{\linkS4class{RLum} (\strong{required}): S4 object of class \code{RLum}. Optional a \link{list} containing objects of class \linkS4class{RLum} can be provided. In this case the function tries to plot every object in this list according to its \code{RLum} class. Non-RLum objects are removed.} \item{...}{further arguments and graphical parameters that will be passed to the specific plot functions. The only argument that is supported directly is \code{main} (setting the plot title). In contrast to the normal behaviour \code{main} can be here provided as \link{list} and the arguments in the list will dispatched to the plots if the \code{object} is of type \code{list} as well.} } \value{ Returns a plot. } \description{ Function calls object specific plot functions for RLum S4 class objects. } \details{ The function provides a generalised access point for plotting specific \linkS4class{RLum} objects.\cr Depending on the input object, the corresponding plot function will be selected. Allowed arguments can be found in the documentations of each plot function. \tabular{lll}{ \strong{object} \tab \tab \strong{corresponding plot function} \cr \linkS4class{RLum.Data.Curve} \tab : \tab \link{plot_RLum.Data.Curve} \cr \linkS4class{RLum.Data.Spectrum} \tab : \tab \link{plot_RLum.Data.Spectrum}\cr \linkS4class{RLum.Data.Image} \tab : \tab \link{plot_RLum.Data.Image}\cr \linkS4class{RLum.Analysis} \tab : \tab \link{plot_RLum.Analysis}\cr \linkS4class{RLum.Results} \tab : \tab \link{plot_RLum.Results} } } \note{ The provided plot output depends on the input object. } \section{Function version}{ 0.4.4 } \examples{ #load Example data data(ExampleData.CW_OSL_Curve, envir = environment()) #transform data.frame to RLum.Data.Curve object temp <- as(ExampleData.CW_OSL_Curve, "RLum.Data.Curve") #plot RLum object plot_RLum(temp) } \seealso{ \link{plot_RLum.Data.Curve}, \linkS4class{RLum.Data.Curve}, \link{plot_RLum.Data.Spectrum}, \linkS4class{RLum.Data.Spectrum}, \link{plot_RLum.Data.Image}, \linkS4class{RLum.Data.Image}, \link{plot_RLum.Analysis}, \linkS4class{RLum.Analysis}, \link{plot_RLum.Results}, \linkS4class{RLum.Results} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. plot_RLum(): General plot function for RLum S4 class objects. Function version 0.4.4. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{dplot} Luminescence/man/plot_RLum.Analysis.Rd0000644000176200001440000001212214521210045017415 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot_RLum.Analysis.R \name{plot_RLum.Analysis} \alias{plot_RLum.Analysis} \title{Plot function for an RLum.Analysis S4 class object} \usage{ plot_RLum.Analysis( object, subset = NULL, nrows, ncols, abline = NULL, combine = FALSE, records_max = NULL, curve.transformation, plot.single = FALSE, ... ) } \arguments{ \item{object}{\linkS4class{RLum.Analysis} (\strong{required}): S4 object of class \code{RLum.Analysis}} \item{subset}{named \link{list} (\emph{optional}): subsets elements for plotting. The arguments in the named \link{list} will be directly passed to the function \link{get_RLum} (e.g., \code{subset = list(curveType = "measured")})} \item{nrows}{\link{integer} (\emph{optional}): sets number of rows for plot output, if nothing is set the function tries to find a value.} \item{ncols}{\link{integer} (\emph{optional}): sets number of columns for plot output, if nothing is set the function tries to find a value.} \item{abline}{\link{list} (\emph{optional}): allows to add ab-lines to the plot. Argument are provided in a list and will be forward to the function \link{abline}, e.g., \code{list(v = c(10, 100))} adds two vertical lines add 10 and 100 to all plots. In contrast \verb{list(v = c(10), v = c(100)} adds a vertical at 10 to the first and a vertical line at 100 to the 2nd plot.} \item{combine}{\link{logical} (\emph{with default}): allows to combine all \linkS4class{RLum.Data.Curve} objects in one single plot.} \item{records_max}{\link{numeric} (\emph{optional}): limits number of records shown if \code{combine = TRUE}. Shown are always the first and the last curve, the other number of curves to be shown a distributed evenly, this may result in less number of curves plotted as specified.} \item{curve.transformation}{\link{character} (\emph{optional}): allows transforming CW-OSL and CW-IRSL curves to pseudo-LM curves via transformation functions. Allowed values are: \code{CW2pLM}, \code{CW2pLMi}, \code{CW2pHMi} and \code{CW2pPMi}. See details.} \item{plot.single}{\link{logical} (\emph{with default}): global par settings are considered, normally this should end in one plot per page} \item{...}{further arguments and graphical parameters will be passed to the \code{plot} function. Supported arguments: \code{main}, \code{mtext}, \code{log}, \code{lwd}, \code{lty} \code{type}, \code{pch}, \code{col}, \code{norm} (see \link{plot_RLum.Data.Curve}), \code{xlim},\code{ylim}, \code{xlab}, \code{ylab}, ... and for \code{combine = TRUE} also: \code{sub_title}, \code{legend}, \code{legend.text}, \code{legend.pos} (typical plus 'outside'), \code{legend.col}, \code{smooth}. All arguments can be provided as \code{vector} or \code{list} to gain in full control of all plot settings.} } \value{ Returns multiple plots. } \description{ The function provides a standardised plot output for curve data of an RLum.Analysis S4 class object } \details{ The function produces a multiple plot output. A file output is recommended (e.g., \link{pdf}). \strong{curve.transformation} This argument allows transforming continuous wave (CW) curves to pseudo (linear) modulated curves. For the transformation, the functions of the package are used. Currently, it is not possible to pass further arguments to the transformation functions. The argument works only for \code{ltype} \code{OSL} and \code{IRSL}. Please note: The curve transformation within this functions works roughly, i.e. every IRSL or OSL curve is transformed, without considering whether it is measured with the PMT or not! However, for a fast look it might be helpful. } \note{ Not all arguments available for \link{plot} will be passed and they partly do not behave in the way you might expect them to work. This function was designed to serve as an overview plot, if you want to have more control, extract the objects and plot them individually. } \section{Function version}{ 0.3.14 } \examples{ ##load data data(ExampleData.BINfileData, envir = environment()) ##convert values for position 1 temp <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos=1) ##(1) plot (combine) TL curves in one plot plot_RLum.Analysis( temp, subset = list(recordType = "TL"), combine = TRUE, norm = TRUE, abline = list(v = c(110)) ) ##(2) same as example (1) but using ## the argument smooth = TRUE plot_RLum.Analysis( temp, subset = list(recordType = "TL"), combine = TRUE, norm = TRUE, smooth = TRUE, abline = list(v = c(110)) ) } \seealso{ \link{plot}, \link{plot_RLum}, \link{plot_RLum.Data.Curve} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. plot_RLum.Analysis(): Plot function for an RLum.Analysis S4 class object. Function version 0.3.14. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{aplot} Luminescence/man/sTeve.Rd0000644000176200001440000000266714521210045015021 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/zzz.R \name{sTeve} \alias{sTeve} \title{sTeve - sophisticated tool for efficient data validation and evaluation} \usage{ sTeve(n_frames = 10, t_animation = 2, n.tree = 7, type) } \arguments{ \item{n_frames}{\link{integer} (\emph{with default}): n frames} \item{t_animation}{\link{integer} (\emph{with default}): t animation} \item{n.tree}{\link{integer} (\emph{with default}): how many trees do you want to cut?} \item{type}{\link{integer} (\emph{optional}): Make a decision: 1, 2 or 3} } \value{ Validates your data. } \description{ This function provides a sophisticated routine for comprehensive luminescence dating data analysis. } \details{ This amazing sophisticated function validates your data seriously. } \note{ This function should not be taken too seriously. } \examples{ ##no example available } \seealso{ \link{plot_KDE} } \author{ R Luminescence Team, 2012-2046 , RLum Developer Team} \section{How to cite}{ NA, NA, , , 2023. sTeve(): sTeve - sophisticated tool for efficient data validation and evaluation. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{manip} Luminescence/man/RLum.Data.Spectrum-class.Rd0000644000176200001440000001342114521210045020354 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RLum.Data.Spectrum-class.R \docType{class} \name{RLum.Data.Spectrum-class} \alias{RLum.Data.Spectrum-class} \alias{show,RLum.Data.Spectrum-method} \alias{set_RLum,RLum.Data.Spectrum-method} \alias{get_RLum,RLum.Data.Spectrum-method} \alias{names_RLum,RLum.Data.Spectrum-method} \alias{bin_RLum.Data,RLum.Data.Spectrum-method} \title{Class \code{"RLum.Data.Spectrum"}} \usage{ \S4method{show}{RLum.Data.Spectrum}(object) \S4method{set_RLum}{RLum.Data.Spectrum}( class, originator, .uid, .pid, recordType = "Spectrum", curveType = NA_character_, data = matrix(), info = list() ) \S4method{get_RLum}{RLum.Data.Spectrum}(object, info.object) \S4method{names_RLum}{RLum.Data.Spectrum}(object) \S4method{bin_RLum.Data}{RLum.Data.Spectrum}(object, bin_size.col = 1, bin_size.row = 1) } \arguments{ \item{object}{\code{\link{get_RLum}}, \code{\link{names_RLum}} (\strong{required}): an object of class \linkS4class{RLum.Data.Spectrum}} \item{class}{\code{\link{set_RLum}}; \link{character} (\emph{automatic}): name of the \code{RLum} class to create.} \item{originator}{\link{character} (\emph{automatic}): contains the name of the calling function (the function that produces this object); can be set manually.} \item{.uid}{\code{\link{set_RLum}}; \link{character} (\emph{automatic}): sets an unique ID for this object using the internal C++ function \code{create_UID}.} \item{.pid}{\code{\link{set_RLum}}; \link{character} (\emph{with default}): option to provide a parent id for nesting at will.} \item{recordType}{\code{\link{set_RLum}}; \link{character}: record type (e.g. "OSL")} \item{curveType}{\code{\link{set_RLum}}; \link{character}: curve type (e.g. "predefined" or "measured")} \item{data}{\code{\link{set_RLum}}; \link{matrix}: raw curve data. If data is of type \code{RLum.Data.Spectrum}, this can be used to re-construct the object. If the object is reconstructed, \code{.uid}, \code{.pid} and \code{orginator} are always taken from the input object} \item{info}{\code{\link{set_RLum}} \link{list}: info elements} \item{info.object}{\code{\link{get_RLum}}; \link{character} (\emph{optional}): the name of the info object to be called} \item{bin_size.col}{\link{integer} (\emph{with default}): set number of channels used for each bin, e.g. \code{bin_size.col = 2} means that two channels are binned. Note: The function does not check the input, very large values mean a full column binning (a single sum)} \item{bin_size.row}{\link{integer} (\emph{with default}): set number of channels used for each bin, e.g. \code{bin_size.row = 2} means that two channels are binned. Note: The function does not check the input, very large values mean a full row binning (a single sum)} } \value{ \strong{\verb{[set_RLum]}} An object from the class \code{RLum.Data.Spectrum} \strong{\verb{[get_RLum]}} \enumerate{ \item A \link{matrix} with the spectrum values or \item only the info object if \code{info.object} was set. } \strong{\verb{[names_RLum]}} The names of the info objects \strong{\verb{[bin_RLum.Data]}} Same object as input, after applying the binning. } \description{ Class for representing luminescence spectra data (TL/OSL/RF). } \section{Methods (by generic)}{ \itemize{ \item \code{show(RLum.Data.Spectrum)}: Show structure of \code{RLum.Data.Spectrum} object \item \code{set_RLum(RLum.Data.Spectrum)}: Construction method for RLum.Data.Spectrum object. The slot info is optional and predefined as empty list by default \item \code{get_RLum(RLum.Data.Spectrum)}: Accessor method for RLum.Data.Spectrum object. The argument info.object is optional to directly access the info elements. If no info element name is provided, the raw curve data (matrix) will be returned \item \code{names_RLum(RLum.Data.Spectrum)}: Returns the names info elements coming along with this curve object \item \code{bin_RLum.Data(RLum.Data.Spectrum)}: Allows binning of RLum.Data.Spectrum data. Count values and values on the x-axis are summed-up; for wavelength/energy values the mean is calculated. }} \section{Slots}{ \describe{ \item{\code{recordType}}{Object of class \link{character} containing the type of the curve (e.g. "TL" or "OSL")} \item{\code{curveType}}{Object of class \link{character} containing curve type, allowed values are measured or predefined} \item{\code{data}}{Object of class \link{matrix} containing spectrum (count) values. Row labels indicate wavelength/pixel values, column labels are temperature or time values.} \item{\code{info}}{Object of class \link{list} containing further meta information objects} }} \note{ The class should only contain data for a single spectra data set. For additional elements the slot \code{info} can be used. Objects from this class are automatically created by, e.g., \link{read_XSYG2R} } \section{Objects from the Class}{ Objects can be created by calls of the form \code{set_RLum("RLum.Data.Spectrum", ...)}. } \section{Class version}{ 0.5.2 } \examples{ showClass("RLum.Data.Spectrum") ##show example data data(ExampleData.XSYG, envir = environment()) TL.Spectrum ##show data matrix get_RLum(TL.Spectrum) ##plot spectrum \dontrun{ plot_RLum(TL.Spectrum) } } \seealso{ \linkS4class{RLum}, \linkS4class{RLum.Data}, \link{plot_RLum} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. RLum.Data.Spectrum-class(): Class 'RLum.Data.Spectrum'. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{classes} \keyword{internal} Luminescence/man/Risoe.BINfileData2RLum.Analysis.Rd0000644000176200001440000000764614521210045021523 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Risoe.BINfileData2RLum.Analysis.R \name{Risoe.BINfileData2RLum.Analysis} \alias{Risoe.BINfileData2RLum.Analysis} \title{Convert Risoe.BINfileData object to an RLum.Analysis object} \usage{ Risoe.BINfileData2RLum.Analysis( object, pos = NULL, grain = NULL, run = NULL, set = NULL, ltype = NULL, dtype = NULL, protocol = "unknown", keep.empty = TRUE, txtProgressBar = FALSE ) } \arguments{ \item{object}{\linkS4class{Risoe.BINfileData} (\strong{required}): \code{Risoe.BINfileData} object} \item{pos}{\link{numeric} (\emph{optional}): position number of the \code{Risoe.BINfileData} object for which the curves are stored in the \code{RLum.Analysis} object. If \code{length(position)>1} a list of \code{RLum.Analysis} objects is returned. If nothing is provided every position will be converted. If the position is not valid \code{NA} is returned.} \item{grain}{\link{vector}, \link{numeric} (\emph{optional}): grain number from the measurement to limit the converted data set (e.g., \code{grain = c(1:48)}). Please be aware that this option may lead to unwanted effects, as the output is strictly limited to the chosen grain number for all position numbers} \item{run}{\link{vector}, \link{numeric} (\emph{optional}): run number from the measurement to limit the converted data set (e.g., \code{run = c(1:48)}).} \item{set}{\link{vector}, \link{numeric} (\emph{optional}): set number from the measurement to limit the converted data set (e.g., \code{set = c(1:48)}).} \item{ltype}{\link{vector}, \link{character} (\emph{optional}): curve type to limit the converted data. Commonly allowed values are: \code{IRSL}, \code{OSL}, \code{TL}, \code{RIR}, \code{RBR} and \code{USER} (see also \linkS4class{Risoe.BINfileData})} \item{dtype}{\link{vector}, \link{character} (\emph{optional}): data type to limit the converted data. Commonly allowed values are listed in \linkS4class{Risoe.BINfileData}} \item{protocol}{\link{character} (\emph{optional}): sets protocol type for analysis object. Value may be used by subsequent analysis functions.} \item{keep.empty}{\link{logical} (\emph{with default}): If \code{TRUE} (default) an \code{RLum.Analysis} object is returned even if it does not contain any records. Set to \code{FALSE} to discard all empty objects.} \item{txtProgressBar}{\link{logical} (\emph{with default}): enables or disables \link{txtProgressBar}.} } \value{ Returns an \linkS4class{RLum.Analysis} object. } \description{ Converts values from one specific position of a Risoe.BINfileData S4-class object to an RLum.Analysis object. } \details{ The \linkS4class{RLum.Analysis} object requires a set of curves for specific further protocol analyses. However, the \linkS4class{Risoe.BINfileData} usually contains a set of curves for different aliquots and different protocol types that may be mixed up. Therefore, a conversion is needed. } \note{ The \code{protocol} argument of the \linkS4class{RLum.Analysis} object is set to 'unknown' if not stated otherwise. } \section{Function version}{ 0.4.2 } \examples{ ##load data data(ExampleData.BINfileData, envir = environment()) ##convert values for position 1 Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos = 1) } \seealso{ \linkS4class{Risoe.BINfileData}, \linkS4class{RLum.Analysis}, \link{read_BIN2R} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. Risoe.BINfileData2RLum.Analysis(): Convert Risoe.BINfileData object to an RLum.Analysis object. Function version 0.4.2. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{manip} Luminescence/man/get_RLum.Rd0000644000176200001440000000574214521210045015446 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_RLum.R \name{get_RLum} \alias{get_RLum} \alias{get_RLum,list-method} \alias{get_RLum,NULL-method} \title{General accessors function for RLum S4 class objects} \usage{ get_RLum(object, ...) \S4method{get_RLum}{list}(object, class = NULL, null.rm = FALSE, ...) \S4method{get_RLum}{`NULL`}(object, ...) } \arguments{ \item{object}{\linkS4class{RLum} (\strong{required}): S4 object of class \code{RLum} or an object of type \link{list} containing only objects of type \linkS4class{RLum}} \item{...}{further arguments that will be passed to the object specific methods. For further details on the supported arguments please see the class documentation: \linkS4class{RLum.Data.Curve}, \linkS4class{RLum.Data.Spectrum}, \linkS4class{RLum.Data.Image}, \linkS4class{RLum.Analysis} and \linkS4class{RLum.Results}} \item{class}{\link{character} (\emph{optional}): allows to define the class that gets selected if applied to a list, e.g., if a list consists of different type of RLum-class objects, this arguments allows to make selection. If nothing is provided, all RLum-objects are treated.} \item{null.rm}{\link{logical} (\emph{with default}): option to get rid of empty and NULL objects} } \value{ Return is the same as input objects as provided in the list. } \description{ Function calls object-specific get functions for RLum S4 class objects. } \details{ The function provides a generalised access point for specific \linkS4class{RLum} objects.\cr Depending on the input object, the corresponding get function will be selected. Allowed arguments can be found in the documentations of the corresponding \linkS4class{RLum} class. } \section{Functions}{ \itemize{ \item \code{get_RLum(list)}: Returns a list of \linkS4class{RLum} objects that had been passed to \link{get_RLum} \item \code{get_RLum(`NULL`)}: Returns NULL }} \section{Function version}{ 0.3.3 } \examples{ ##Example based using data and from the calc_CentralDose() function ##load example data data(ExampleData.DeValues, envir = environment()) ##apply the central dose model 1st time temp1 <- calc_CentralDose(ExampleData.DeValues$CA1) ##get results and store them in a new object temp.get <- get_RLum(object = temp1) } \seealso{ \linkS4class{RLum.Data.Curve}, \linkS4class{RLum.Data.Image}, \linkS4class{RLum.Data.Spectrum}, \linkS4class{RLum.Analysis}, \linkS4class{RLum.Results} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. get_RLum(): General accessors function for RLum S4 class objects. Function version 0.3.3. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{utilities} Luminescence/man/scale_GammaDose.Rd0000644000176200001440000002625014521210045016731 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/scale_GammaDose.R \name{scale_GammaDose} \alias{scale_GammaDose} \title{Calculate the gamma dose deposited within a sample taking layer-to-layer variations in radioactivity into account (according to Aitken, 1985)} \usage{ scale_GammaDose( data, conversion_factors = c("Cresswelletal2018", "Guerinetal2011", "AdamiecAitken1998", "Liritzisetal2013")[1], fractional_gamma_dose = c("Aitken1985")[1], verbose = TRUE, plot = TRUE, plot_single = TRUE, ... ) } \arguments{ \item{data}{\link{data.frame} (\strong{required}): A table containing all relevant information for each individual layer. The table must have the following named columns: \itemize{ \item \code{id} (\link{character}): an arbitrary id or name of each layer \item \code{thickness} (\link{numeric}): vertical extent of each layer in cm \item \code{sample_offset} (\link{logical}): distance of the sample in cm, \strong{measured from the BOTTOM OF THE TARGET LAYER}. Except for the target layer all values must be \code{NA}. \item \code{K} (\link{numeric}): K nuclide content in \% \item \code{K_se} (\link{numeric}): error on the K content \item \code{Th} (\link{numeric}): Th nuclide content in ppm \item \code{Th_se} (\link{numeric}): error on the Th content \item \code{U} (\link{numeric}): U nuclide content in ppm \item \code{U_se} (\link{numeric}): error on the U content \item \code{water_content} (\link{numeric}): water content of each layer in \% \item \code{water_content_se} (\link{numeric}): error on the water content \item \code{density} (\link{numeric}): bulk density of each layer in g/cm^-3 }} \item{conversion_factors}{\link{character} (\emph{optional}): The conversion factors used to calculate the dose rate from sediment nuclide contents. Valid options are: \itemize{ \item \code{"Cresswelletal2018"} (default) \item \code{"Liritzisetal2013"} \item \code{"Guerinetal2011"} \item \code{"AdamiecAitken1998"} }} \item{fractional_gamma_dose}{\link{character} (\emph{optional}): Factors to scale gamma dose rate values. Valid options are: \itemize{ \item \code{"Aitken1985"} (default): Table H1 in the appendix }} \item{verbose}{\link{logical} (\emph{optional}): Show or hide console output (defaults to \code{TRUE}).} \item{plot}{\link{logical} (\emph{optional}): Show or hide the plot (defaults to \code{TRUE}).} \item{plot_single}{\link{logical} (\emph{optional}): Show all plots in one panel (defaults to \code{TRUE}).} \item{...}{Further parameters passed to \link{barplot}.} } \value{ After performing the calculations the user is provided with different outputs. \enumerate{ \item The total gamma dose rate received by the sample (+/- uncertainties) as a print in the console. \item A plot showing the sediment sequence, the user input sample information and the contribution to total gamma dose rate. \item RLum Results. If the user wishes to save these results, writing a script to run the function and to save the results would look like this: } \if{html}{\out{
}}\preformatted{mydata <- read.table("c:/path/to/input/file.txt") results <- scale_GammaDose(mydata) table <- get_RLum(results) write.csv(table, "c:/path/to/results.csv") }\if{html}{\out{
}} -----------------------------------\cr \verb{[ NUMERICAL OUTPUT ]}\cr -----------------------------------\cr \strong{\code{RLum.Results}}-object \strong{slot:} \strong{\verb{@data}} \tabular{lll}{ \strong{Element} \tab \strong{Type} \tab \strong{Description}\cr \verb{$summary} \tab \code{data.frame} \tab summary of the model results \cr \verb{$data} \tab \code{data.frame} \tab the original input data \cr \verb{$dose_rates} \tab \code{list} \tab two \code{data.frames} for the scaled and infinite matrix dose rates \cr \verb{$tables} \tab \code{list} \tab several \code{data.frames} containing intermediate results \cr \verb{$args} \tab \code{character} \tab arguments of the call \cr \verb{$call} \tab \code{call} \tab the original function call \cr } \strong{slot:} \strong{\verb{@info}} Currently unused. ------------------------\cr \verb{[ PLOT OUTPUT ]}\cr ------------------------\cr Three plots are produced: \itemize{ \item A visualisation of the provided sediment layer structure to quickly assess whether the data was provided and interpreted correctly. \item A scatter plot of the nuclide contents per layer (K, Th, U) as well as the water content. This may help to correlate the dose rate contribution of specific layers to the layer of interest. \item A barplot visualising the contribution of each layer to the total dose rate received by the sample in the target layer. } } \description{ This function calculates the gamma dose deposited in a luminescence sample taking into account layer-to-layer variations in sediment radioactivity . The function scales user inputs of uranium, thorium and potassium based on input parameters for sediment density, water content and given layer thicknesses and distances to the sample. } \details{ \strong{User Input} To calculate the gamma dose which is deposited in a sample, the user needs to provide information on those samples influencing the luminescence sample. As a rule of thumb, all sediment layers within at least 30 cm radius from the luminescence sample taken should be taken into account when calculating the gamma dose rate. However, the actual range of gamma radiation might be different, depending on the emitting radioelement, the water content and the sediment density of each layer (Aitken, 1985). Therefore the user is advised to provide as much detail as possible and physically sensible. The function requires a \link{data.frame} that is to be structured in columns and rows, with samples listed in rows. The first column contains information on the layer/sample ID, the second on the thickness (in cm) of each layer, whilst column 3 should contain \code{NA} for all layers that are not sampled for OSL/TL. For the layer the OSL/TL sample was taken from a numerical value must be provided, which is the distance (in cm) measured from \strong{bottom} of the layer of interest. If the whole layer was sampled insert \code{0}. If the sample was taken from \emph{within} the layer, insert a numerical value \verb{>0}, which describes the distance from the middle of the sample to the bottom of the layer in cm. Columns 4 to 9 should contain radionuclide concentrations and their standard errors for potassium (in \%), thorium (in ppm) and uranium (in ppm). Columns 10 and 11 give information on the water content and its uncertainty (standard error) in \%. The layer density (in g/cm3) should be given in column 12. No cell should be left blank. Please ensure to keep the column titles as given in the example dataset (\code{data('ExampleData.ScaleGammaDose')}, see examples). The user can decide which dose rate conversion factors should be used to calculate the gamma dose rates. The options are: \itemize{ \item \code{"Cresswelletal2018"} (Cresswell et al., 2018) \item \code{"Liritzisetal2013"} (Liritzis et al., 2013) \item \code{"Guerinetal2011"} (Guerin et al., 2011) \item \code{"AdamiecAitken1998"} (Adamiec and Aitken, 1998) } \strong{Water content} The water content provided by the user should be calculated according to: \deqn{ ( Wet weight [g] - Dry weight [g] ) / Dry weight [g] * 100 } \strong{Calculations} After converting the radionuclide concentrations into dose rates, the function will scale the dose rates based on the thickness of the layers, the distances to the sample, the water content and the density of the sediment. The calculations are based on Aitken (1985, Appendix H). As an example (equivalent to Aitken, 1985), assuming three layers of sediment, where \strong{L} is inert and positioned in between the infinite thick and equally active layers \strong{A} and \strong{B}, the dose in \strong{L} and \strong{B} due to \strong{A} is given by \deqn{ {1-f(x)}D_A } Where \code{x} is the distance into the inert medium, so \code{f(x)} is the weighted average fractional dose at \code{x} and \code{D_A} denotes that the dose is delivered by \strong{A}. \code{f(x)} is derived from table H1 (Aitken, 1985), when setting \code{z = x}. Consequently, the dose in \strong{A} and \strong{L} due to \strong{B} is given by \deqn{ {1 - f(t-x)}D_B } Here \code{t} is the thickness of \strong{L} and the other parameters are denoted as above, just for the dose being delivered by B. \code{f(t-x)} is derived from table H1 (Aitken, 1985), when setting \code{z} equal to \code{t-x}. Following this, the dose in \strong{L} delivered by \strong{A} and \strong{B} is given by \deqn{ {2 - f(x) - f(t-x)}D_{AB} } Since \strong{A} and \strong{B} are equally active \verb{D_\{AB\} = D_A = D_B}. The function uses the value of the fractional dose rate at the layer boundary to start the calculation for the next layer. This way, the function is able to scale the gamma dose rate accurately for distant layers when the density and water content is not constant for the entire section. } \note{ \strong{This function has BETA status. If possible, results should be} \strong{cross-checked.} } \section{Function version}{ 0.1.2 } \section{Acknowledgements}{ We thank Dr Ian Bailiff for the provision of an excel spreadsheet, which has been very helpful when writing this function. } \examples{ # Load example data data("ExampleData.ScaleGammaDose", envir = environment()) x <- ExampleData.ScaleGammaDose # Scale gamma dose rate results <- scale_GammaDose(data = x, conversion_factors = "Cresswelletal2018", fractional_gamma_dose = "Aitken1985", verbose = TRUE, plot = TRUE) get_RLum(results) } \section{How to cite}{ Riedesel, S., Autzen, M., Burow, C., 2023. scale_GammaDose(): Calculate the gamma dose deposited within a sample taking layer-to-layer variations in radioactivity into account (according to Aitken, 1985). Function version 0.1.2. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Aitken, M.J., 1985. Thermoluminescence Dating. Academic Press, London. Adamiec, G., Aitken, M.J., 1998. Dose-rate conversion factors: update. Ancient TL 16, 37-46. Cresswell., A.J., Carter, J., Sanderson, D.C.W., 2018. Dose rate conversion parameters: Assessment of nuclear data. Radiation Measurements 120, 195-201. Guerin, G., Mercier, N., Adamiec, G., 2011. Dose-rate conversion factors: update. Ancient TL, 29, 5-8. Liritzis, I., Stamoulis, K., Papachristodoulou, C., Ioannides, K., 2013. A re-evaluation of radiation dose-rate conversion factors. Mediterranean Archaeology and Archaeometry 13, 1-15. } \seealso{ \link{ExampleData.ScaleGammaDose}, \link{BaseDataSet.ConversionFactors}, \link{approx}, \link{barplot} } \author{ Svenja Riedesel, Aberystwyth University (United Kingdom) \cr Martin Autzen, DTU NUTECH Center for Nuclear Technologies (Denmark) \cr Christoph Burow, University of Cologne (Germany) \cr Based on an excel spreadsheet and accompanying macro written by Ian Bailiff. , RLum Developer Team} \keyword{datagen} Luminescence/man/ExampleData.FittingLM.Rd0000644000176200001440000000213014521210044017734 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Luminescence-package.R \name{ExampleData.FittingLM} \alias{ExampleData.FittingLM} \alias{values.curve} \alias{values.curveBG} \title{Example data for fit_LMCurve() in the package Luminescence} \format{ Two objects (data.frames) with two columns (time and counts). } \source{ \tabular{ll}{ Lab: \tab Luminescence Laboratory Bayreuth\cr Lab-Code: \tab BT900\cr Location: \tab Norway\cr Material: \tab Beach deposit, coarse grain quartz measured on aluminium discs on a Risø TL/OSL DA-15 reader\cr } } \description{ Linearly modulated (LM) measurement data from a quartz sample from Norway including background measurement. Measurements carried out in the luminescence laboratory at the University of Bayreuth. } \examples{ ##show LM data data(ExampleData.FittingLM, envir = environment()) plot(values.curve,log="x") } \references{ Fuchs, M., Kreutzer, S., Fischer, M., Sauer, D., Soerensen, R., 2012. OSL and IRSL dating of raised beach sand deposits along the south-eastern coast of Norway. Quaternary Geochronology, 10, 195-200. } Luminescence/man/get_Risoe.BINfileData.Rd0000644000176200001440000000315514521210045017705 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_Risoe.BINfileData.R \name{get_Risoe.BINfileData} \alias{get_Risoe.BINfileData} \title{General accessor function for RLum S4 class objects} \usage{ get_Risoe.BINfileData(object, ...) } \arguments{ \item{object}{\linkS4class{Risoe.BINfileData} (\strong{required}): S4 object of class \code{RLum}} \item{...}{further arguments that one might want to pass to the specific get function} } \value{ Return is the same as input objects as provided in the list } \description{ Function calls object-specific get functions for RisoeBINfileData S4 class objects. } \details{ The function provides a generalised access point for specific \linkS4class{Risoe.BINfileData} objects. \cr Depending on the input object, the corresponding get function will be selected. Allowed arguments can be found in the documentations of the corresponding \linkS4class{Risoe.BINfileData} class. } \section{Function version}{ 0.1.0 } \seealso{ \linkS4class{Risoe.BINfileData} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. get_Risoe.BINfileData(): General accessor function for RLum S4 class objects. Function version 0.1.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{utilities} Luminescence/man/GitHub-API.Rd0000644000176200001440000000602514521210045015514 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/github.R \name{GitHub-API} \alias{GitHub-API} \alias{github_commits} \alias{github_branches} \alias{github_issues} \title{GitHub API} \usage{ github_commits(user = "r-lum", repo = "luminescence", branch = "master", n = 5) github_branches(user = "r-lum", repo = "luminescence") github_issues(user = "r-lum", repo = "luminescence", verbose = TRUE) } \arguments{ \item{user}{\link{character} (\emph{with default}): GitHub user name (defaults to \code{'r-lum'}).} \item{repo}{\link{character} (\emph{with default}): name of a GitHub repository (defaults to \code{'luminescence'}).} \item{branch}{\link{character} (\emph{with default}): branch of a GitHub repository (defaults to \code{'master'}).} \item{n}{\link{integer} (\emph{with default}): number of commits returned (defaults to 5).} \item{verbose}{\link{logical} (\emph{with default}): print the output to the console (defaults to \code{TRUE}).} } \value{ \code{github_commits}: \link{data.frame} with columns: \tabular{ll}{ \verb{[ ,1]} \tab SHA \cr \verb{[ ,2]} \tab AUTHOR \cr \verb{[ ,3]} \tab DATE \cr \verb{[ ,4]} \tab MESSAGE \cr } \code{github_branches}: \link{data.frame} with columns: \tabular{ll}{ \verb{[ ,1]} \tab BRANCH \cr \verb{[ ,2]} \tab SHA \cr \verb{[ ,3]} \tab INSTALL \cr } \code{github_commits}: Nested \link{list} with \code{n} elements. Each commit element is a list with elements: \tabular{ll}{ \verb{[[1]]} \tab NUMBER \cr \verb{[[2]]} \tab TITLE \cr \verb{[[3]]} \tab BODY \cr \verb{[[4]]} \tab CREATED \cr \verb{[[5]]} \tab UPDATED \cr \verb{[[6]]} \tab CREATOR \cr \verb{[[7]]} \tab URL \cr \verb{[[8]]} \tab STATUS \cr } } \description{ R Interface to the GitHub API v3. } \details{ These functions can be used to query a specific repository hosted on GitHub. \cr \code{github_commits} lists the most recent \code{n} commits of a specific branch of a repository. \code{github_branches} can be used to list all current branches of a repository and returns the corresponding SHA hash as well as an installation command to install the branch in R via the 'devtools' package. \code{github_issues} lists all open issues for a repository in valid YAML. } \section{Function version}{ 0.1.0 } \examples{ \dontrun{ github_branches(user = "r-lum", repo = "luminescence") github_issues(user = "r-lum", repo = "luminescence") github_commits(user = "r-lum", repo = "luminescence", branch = "master", n = 10) } } \section{How to cite}{ Burow, C., 2023. GitHub-API(): GitHub API. Function version 0.1.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ GitHub Developer API v3. \url{https://docs.github.com/v3/}, last accessed: 10/01/2017. } \author{ Christoph Burow, University of Cologne (Germany) , RLum Developer Team} Luminescence/man/ExampleData.XSYG.Rd0000644000176200001440000000607314521210044016703 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Luminescence-package.R \name{ExampleData.XSYG} \alias{ExampleData.XSYG} \alias{OSL.SARMeasurement} \alias{TL.Spectrum} \title{Example data for a SAR OSL measurement and a TL spectrum using a lexsyg reader} \format{ \code{OSL.SARMeasurement}: SAR OSL measurement data The data contain two elements: (a) \verb{$Sequence.Header} is a \link{data.frame} with metadata from the measurement,(b) \code{Sequence.Object} contains an \linkS4class{RLum.Analysis} object for further analysis. \code{TL.Spectrum}: TL spectrum data \linkS4class{RLum.Data.Spectrum} object for further analysis. The spectrum was cleaned from cosmic-rays using the function \code{apply_CosmicRayRemoval}. Note that no quantum efficiency calibration was performed. } \source{ \strong{OSL.SARMeasurement} \tabular{ll}{ Lab: \tab Luminescence Laboratory Giessen\cr Lab-Code: \tab no code\cr Location: \tab not specified\cr Material: \tab Coarse grain quartz on steel cups on lexsyg research reader\cr Reference: \tab unpublished } \strong{TL.Spectrum} \tabular{ll}{ Lab: \tab Luminescence Laboratory Giessen\cr Lab-Code: \tab BT753\cr Location: \tab Dolni Vestonice/Czech Republic\cr Material: \tab Fine grain polymineral on steel cups on lexsyg research reader\cr Reference: \tab Fuchs et al., 2013 \cr Spectrum: \tab Integration time 19 s, channel time 20 s\cr Heating: \tab 1 K/s, up to 500 deg. C } } \description{ Example data from a SAR OSL measurement and a TL spectrum for package Luminescence imported from a Freiberg Instruments XSYG file using the function \link{read_XSYG2R}. } \section{Version}{ 0.1 } \examples{ ##show data data(ExampleData.XSYG, envir = environment()) ## ========================================= ##(1) OSL.SARMeasurement OSL.SARMeasurement ##show $Sequence.Object OSL.SARMeasurement$Sequence.Object ##grep OSL curves and plot the first curve OSLcurve <- get_RLum(OSL.SARMeasurement$Sequence.Object, recordType="OSL")[[1]] plot_RLum(OSLcurve) ## ========================================= ##(2) TL.Spectrum TL.Spectrum ##plot simple spectrum (2D) plot_RLum.Data.Spectrum(TL.Spectrum, plot.type="contour", xlim = c(310,750), ylim = c(0,300), bin.rows=10, bin.cols = 1) ##plot 3d spectrum (uncomment for usage) # plot_RLum.Data.Spectrum(TL.Spectrum, plot.type="persp", # xlim = c(310,750), ylim = c(0,300), bin.rows=10, # bin.cols = 1) } \references{ Unpublished data measured to serve as example data for that package. Location origin of sample BT753 is given here: Fuchs, M., Kreutzer, S., Rousseau, D.D., Antoine, P., Hatte, C., Lagroix, F., Moine, O., Gauthier, C., Svoboda, J., Lisa, L., 2013. The loess sequence of Dolni Vestonice, Czech Republic: A new OSL-based chronology of the Last Climatic Cycle. Boreas, 42, 664--677. } \seealso{ \link{read_XSYG2R}, \linkS4class{RLum.Analysis}, \linkS4class{RLum.Data.Spectrum}, \link{plot_RLum}, \link{plot_RLum.Analysis}, \link{plot_RLum.Data.Spectrum} } \keyword{datasets} Luminescence/man/RLum.Data.Image-class.Rd0000644000176200001440000001112714521210045017575 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RLum.Data.Image-class.R \docType{class} \name{RLum.Data.Image-class} \alias{RLum.Data.Image-class} \alias{show,RLum.Data.Image-method} \alias{set_RLum,RLum.Data.Image-method} \alias{get_RLum,RLum.Data.Image-method} \alias{names_RLum,RLum.Data.Image-method} \title{Class \code{"RLum.Data.Image"}} \usage{ \S4method{show}{RLum.Data.Image}(object) \S4method{set_RLum}{RLum.Data.Image}( class, originator, .uid, .pid, recordType = "Image", curveType = NA_character_, data = array(), info = list() ) \S4method{get_RLum}{RLum.Data.Image}(object, info.object) \S4method{names_RLum}{RLum.Data.Image}(object) } \arguments{ \item{object}{\code{\link{get_RLum}}, \code{\link{names_RLum}} (\strong{required}): an object of class \linkS4class{RLum.Data.Image}} \item{class}{\code{\link{set_RLum}}; \link{character}: name of the \code{RLum} class to create} \item{originator}{\code{\link{set_RLum}}; \link{character} (\emph{automatic}): contains the name of the calling function (the function that produces this object); can be set manually.} \item{.uid}{\code{\link{set_RLum}}; \link{character} (\emph{automatic}): sets an unique ID for this object using the internal C++ function \code{create_UID}.} \item{.pid}{\code{\link{set_RLum}}; \link{character} (\emph{with default}): option to provide a parent id for nesting at will.} \item{recordType}{\code{\link{set_RLum}}; \link{character}: record type (e.g. "OSL")} \item{curveType}{\code{\link{set_RLum}}; \link{character}: curve type (e.g. "predefined" or "measured")} \item{data}{\code{\link{set_RLum}}; \link{matrix}: raw curve data. If data is of type \code{RLum.Data.Image} this can be used to re-construct the object, i.e. modified parameters except \code{.uid} and \code{.pid}. The rest will be subject to copy and paste unless provided.} \item{info}{\code{\link{set_RLum}}; \link{list}: info elements} \item{info.object}{\code{\link{get_RLum}}; \link{character}: name of the info object to returned} } \value{ \strong{\code{set_RLum}} Returns an object from class \code{RLum.Data.Image} \strong{\code{get_RLum}} \enumerate{ \item Returns the data object (\link{array}) \item only the info object if \code{info.object} was set. } \strong{\code{names_RLum}} Returns the names of the info elements } \description{ Class for representing luminescence image data (TL/OSL/RF). Such data are for example produced by the function \link{read_SPE2R} } \section{Methods (by generic)}{ \itemize{ \item \code{show(RLum.Data.Image)}: Show structure of \code{RLum.Data.Image} object \item \code{set_RLum(RLum.Data.Image)}: Construction method for RLum.Data.Image object. The slot info is optional and predefined as empty list by default. \item \code{get_RLum(RLum.Data.Image)}: Accessor method for \code{RLum.Data.Image} object. The argument \code{info.object} is optional to directly access the info elements. If no info element name is provided, the raw image data (\code{array}) will be returned. \item \code{names_RLum(RLum.Data.Image)}: Returns the names info elements coming along with this curve object }} \section{Slots}{ \describe{ \item{\code{recordType}}{Object of class \link{character} containing the type of the curve (e.g. "OSL image", "TL image")} \item{\code{curveType}}{Object of class \link{character} containing curve type, allowed values are measured or predefined} \item{\code{data}}{Object of class \link{array} containing image data.} \item{\code{info}}{Object of class \link{list} containing further meta information objects} }} \note{ The class should only contain data for a set of images. For additional elements the slot \code{info} can be used. } \section{Objects from the class}{ Objects can be created by calls of the form \code{set_RLum("RLum.Data.Image", ...)}. } \section{Class version}{ 0.5.1 } \examples{ showClass("RLum.Data.Image") ##create empty RLum.Data.Image object set_RLum(class = "RLum.Data.Image") } \seealso{ \linkS4class{RLum}, \linkS4class{RLum.Data}, \link{plot_RLum}, \link{read_SPE2R}, \link{read_TIFF2R} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. RLum.Data.Image-class(): Class 'RLum.Data.Image'. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{classes} \keyword{internal} Luminescence/man/plot_ROI.Rd0000644000176200001440000000702714521210045015415 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot_ROI.R \name{plot_ROI} \alias{plot_ROI} \title{Create Regions of Interest (ROI) Graphic} \usage{ plot_ROI( object, exclude_ROI = c(1), dist_thre = -Inf, dim.CCD = NULL, bg_image = NULL, plot = TRUE, ... ) } \arguments{ \item{object}{\linkS4class{RLum.Analysis}, \linkS4class{RLum.Results} or a \link{list} of such objects (\strong{required}): data input. Please note that to avoid function errors, only input created by the functions \link{read_RF2R} or \link{extract_ROI} is accepted} \item{exclude_ROI}{\link{numeric} (\emph{with default}): option to remove particular ROIs from the analysis. Those ROIs are plotted but not coloured and not taken into account in distance analysis. \code{NULL} excludes nothing.} \item{dist_thre}{\link{numeric} (\emph{optional}): euclidean distance threshold in pixel distance. All ROI for which the euclidean distance is smaller are marked. This helps to identify ROIs that might be affected by signal cross-talk. Note: the distance is calculated from the centre of an ROI, e.g., the threshold should include consider the ROIs or grain radius.} \item{dim.CCD}{\link{numeric} (\emph{optional}): metric x and y for the recorded (chip) surface in µm. For instance \code{c(8192,8192)}, if set additional x and y-axes are shown} \item{bg_image}{\linkS4class{RLum.Data.Image} (\emph{optional}): background image object please note that the dimensions are not checked.} \item{plot}{\link{logical} (\emph{with default}): enable or disable plot output to use the function only to extract the ROI data} \item{...}{further parameters to manipulate the plot. On top of all arguments of \link[graphics:plot.default]{graphics::plot.default} the following arguments are supported: \code{lwd.ROI}, \code{lty.ROI}, \code{col.ROI}, \code{col.pixel}, \code{text.labels}, \code{text.offset}, \code{grid} (\code{TRUE/FALSE}), \code{legend} (\code{TRUE/FALSE}), \code{legend.text}, \code{legend.pos}} } \value{ An ROI plot and an \linkS4class{RLum.Results} object with a matrix containing the extracted ROI data and a object produced by \link[stats:dist]{stats::dist} containing the euclidean distance between the ROIs. } \description{ Create ROI graphic with data extracted from the data imported via \link{read_RF2R}. This function is used internally by \link{analyse_IRSAR.RF} but might be of use to work with reduced data from spatially resolved measurements. The plot dimensions mimic the original image dimensions } \section{Function version}{ 0.2.0 } \examples{ ## simple example file <- system.file("extdata", "RF_file.rf", package = "Luminescence") temp <- read_RF2R(file) plot_ROI(temp) ## in combination with extract_ROI() m <- matrix(runif(100,0,255), ncol = 10, nrow = 10) roi <- matrix(c(2.,4,2,5,6,7,3,1,1), ncol = 3) t <- extract_ROI(object = m, roi = roi) plot_ROI(t, bg_image = m) } \seealso{ \link{read_RF2R}, \link{analyse_IRSAR.RF} } \author{ Sebastian Kreutzer, Department of Geography & Earth Sciences, Aberystwyth University (United Kingdom) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. plot_ROI(): Create Regions of Interest (ROI) Graphic. Function version 0.2.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{datagen} \keyword{plot} Luminescence/man/calc_ThermalLifetime.Rd0000644000176200001440000001302514521210045017756 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calc_ThermalLifetime.R \name{calc_ThermalLifetime} \alias{calc_ThermalLifetime} \title{Calculates the Thermal Lifetime using the Arrhenius equation} \usage{ calc_ThermalLifetime( E, s, T = 20, output_unit = "Ma", profiling = FALSE, profiling_config = NULL, verbose = TRUE, plot = TRUE, ... ) } \arguments{ \item{E}{\link{numeric} (\strong{required}): vector of trap depths in eV, if \code{profiling = TRUE} only the first two elements are considered} \item{s}{\link{numeric} (\strong{required}): vector of frequency factor in 1/s, if \code{profiling = TRUE} only the first two elements are considered} \item{T}{\link{numeric} (\emph{with default}): temperature in deg. C for which the lifetime(s) will be calculated. A vector can be provided.} \item{output_unit}{\link{character} (\emph{with default}): output unit of the calculated lifetimes, accepted entries are: \code{"Ma"}, \code{"ka"}, \code{"a"}, \code{"d"}, \code{"h"}, \code{"min"}, \code{"s"}} \item{profiling}{\link{logical} (\emph{with default}): this option allows to estimate uncertainties based on given E and s parameters and their corresponding standard error (cf. details and examples section)} \item{profiling_config}{\link{list} (\emph{optional}): allows to set configuration parameters used for the profiling (and only have an effect here). Supported parameters are: \itemize{ \item \code{n} (number of MC runs), \item \code{E.distribution} (distribution used for the re-sampling for E) and \item \code{s.distribution} (distribution used for the re-sampling for s). } Currently only the normal distribution is supported (e.g., \code{profiling_config = list(E.distribution = "norm")}} \item{verbose}{\link{logical}: enables/disables verbose mode} \item{plot}{\link{logical}: enables/disables output plot, currently only in combination with \code{profiling = TRUE}.} \item{...}{further arguments that can be passed in combination with the plot output. Standard plot parameters are supported (\link{plot.default})} } \value{ A \linkS4class{RLum.Results} object is returned a along with a plot (for \code{profiling = TRUE}). The output object contain the following slots: \strong{\verb{@data}} \tabular{lll}{ \strong{Object} \tab \strong{Type} \tab \strong{Description} \cr \code{lifetimes} \tab \link{array} or \link{numeric} \tab calculated lifetimes \cr \code{profiling_matrix} \tab \link{matrix} \tab profiling matrix used for the MC runs } \strong{\verb{@info}} \tabular{lll}{ \strong{Object} \tab \strong{Type} \tab \strong{Description} \cr \code{call} \tab \code{call} \tab the original function call } } \description{ The function calculates the thermal lifetime of charges for given E (in eV), s (in 1/s) and T (in deg. C.) parameters. The function can be used in two operational modes: } \details{ \strong{Mode 1 \code{(profiling = FALSE)}} An arbitrary set of input parameters (E, s, T) can be provided and the function calculates the thermal lifetimes using the Arrhenius equation for all possible combinations of these input parameters. An array with 3-dimensions is returned that can be used for further analyses or graphical output (see example 1) \strong{Mode 2 \code{(profiling = TRUE)}} This mode tries to profile the variation of the thermal lifetime for a chosen temperature by accounting for the provided E and s parameters and their corresponding standard errors, e.g., \code{E = c(1.600, 0.001)} The calculation based on a Monte Carlo simulation, where values are sampled from a normal distribution (for E and s). \strong{Used equation (Arrhenius equation)} \deqn{\tau = 1/s exp(E/kT)} where: \eqn{\tau} in s as the mean time an electron spends in the trap for a given \eqn{T}, \eqn{E} trap depth in eV, \eqn{s} the frequency factor in 1/s, \eqn{T} the temperature in K and \eqn{k} the Boltzmann constant in eV/K (cf. Furetta, 2010). } \note{ The profiling is currently based on re-sampling from a normal distribution, this distribution assumption might be, however, not valid for given E and s parameters. } \section{Function version}{ 0.1.0 } \examples{ ##EXAMPLE 1 ##calculation for two trap-depths with similar frequency factor for different temperatures E <- c(1.66, 1.70) s <- 1e+13 T <- 10:20 temp <- calc_ThermalLifetime( E = E, s = s, T = T, output_unit = "Ma" ) contour(x = E, y = T, z = temp$lifetimes[1,,], ylab = "Temperature [\u00B0C]", xlab = "Trap depth [eV]", main = "Thermal Lifetime Contour Plot" ) mtext(side = 3, "(values quoted in Ma)") ##EXAMPLE 2 ##profiling of thermal life time for E and s and their standard error E <- c(1.600, 0.003) s <- c(1e+13,1e+011) T <- 20 calc_ThermalLifetime( E = E, s = s, T = T, profiling = TRUE, output_unit = "Ma" ) } \section{How to cite}{ Kreutzer, S., 2023. calc_ThermalLifetime(): Calculates the Thermal Lifetime using the Arrhenius equation. Function version 0.1.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Furetta, C., 2010. Handbook of Thermoluminescence, Second Edition. World Scientific. } \seealso{ \link[graphics:matplot]{graphics::matplot}, \link[stats:Normal]{stats::rnorm}, \link{get_RLum} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \keyword{datagen} Luminescence/man/RLum.Analysis-class.Rd0000644000176200001440000002075014521210045017470 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RLum.Analysis-class.R \docType{class} \name{RLum.Analysis-class} \alias{RLum.Analysis-class} \alias{show,RLum.Analysis-method} \alias{set_RLum,RLum.Analysis-method} \alias{get_RLum,RLum.Analysis-method} \alias{structure_RLum,RLum.Analysis-method} \alias{length_RLum,RLum.Analysis-method} \alias{names_RLum,RLum.Analysis-method} \alias{smooth_RLum,RLum.Analysis-method} \title{Class \code{"RLum.Analysis"}} \usage{ \S4method{show}{RLum.Analysis}(object) \S4method{set_RLum}{RLum.Analysis}( class, originator, .uid, .pid, protocol = NA_character_, records = list(), info = list() ) \S4method{get_RLum}{RLum.Analysis}( object, record.id = NULL, recordType = NULL, curveType = NULL, RLum.type = NULL, protocol = "UNKNOWN", get.index = NULL, drop = TRUE, recursive = TRUE, info.object = NULL, subset = NULL, env = parent.frame(2) ) \S4method{structure_RLum}{RLum.Analysis}(object, fullExtent = FALSE) \S4method{length_RLum}{RLum.Analysis}(object) \S4method{names_RLum}{RLum.Analysis}(object) \S4method{smooth_RLum}{RLum.Analysis}(object, ...) } \arguments{ \item{object}{\code{\link{get_RLum}}: \code{\link{names_RLum}}, \code{\link{length_RLum}}, \code{\link{structure_RLum}} (\strong{required}): an object of class \linkS4class{RLum.Analysis}} \item{class}{\code{\link{set_RLum}} \link{character} (\strong{required}): name of the \code{RLum} class to be created} \item{originator}{\code{\link{set_RLum}} \link{character} (\emph{automatic}): contains the name of the calling function (the function that produces this object); can be set manually.} \item{.uid}{\code{\link{set_RLum}} \link{character} (\emph{automatic}): sets an unique ID for this object using the internal C++ function \code{create_UID}.} \item{.pid}{\code{\link{set_RLum}} \link{character} (\emph{with default}): option to provide a parent id for nesting at will.} \item{protocol}{\code{\link{set_RLum}} \link{character} (\emph{optional}): sets protocol type for analysis object. Value may be used by subsequent analysis functions.} \item{records}{\code{\link{set_RLum}} \link{list} (\strong{required}): list of \linkS4class{RLum.Analysis} objects} \item{info}{\code{\link{set_RLum}} \link{list} (\emph{optional}): a list containing additional info data for the object \strong{\code{set_RLum}}: Returns an \linkS4class{RLum.Analysis} object.} \item{record.id}{\code{\link{get_RLum}}: \link{numeric} or \link{logical} (\emph{optional}): IDs of specific records. If of type \code{logical} the entire id range is assumed and \code{TRUE} and \code{FALSE} indicates the selection.} \item{recordType}{\code{\link{get_RLum}}: \link{character} (\emph{optional}): record type (e.g., "OSL"). Can be also a vector, for multiple matching, e.g., \code{recordType = c("OSL", "IRSL")}} \item{curveType}{\code{\link{get_RLum}}: \link{character} (\emph{optional}): curve type (e.g. "predefined" or "measured")} \item{RLum.type}{\code{\link{get_RLum}}: \link{character} (\emph{optional}): RLum object type. Defaults to "RLum.Data.Curve" and "RLum.Data.Spectrum".} \item{get.index}{\code{\link{get_RLum}}: \link{logical} (\emph{optional}): return a numeric vector with the index of each element in the RLum.Analysis object.} \item{drop}{\code{\link{get_RLum}}: \link{logical} (\emph{with default}): coerce to the next possible layer (which are \code{RLum.Data}-objects), \code{drop = FALSE} keeps the original \code{RLum.Analysis}} \item{recursive}{\code{\link{get_RLum}}: \link{logical} (\emph{with default}): if \code{TRUE} (the default) and the result of the \code{get_RLum()} request is a single object this object will be unlisted, means only the object itself and no list containing exactly one object is returned. Mostly this makes things easier, however, if this method is used within a loop this might be undesired.} \item{info.object}{\code{\link{get_RLum}}: \link{character} (\emph{optional}): name of the wanted info element} \item{subset}{\code{\link{get_RLum}}: \link{expression} (\emph{optional}): logical expression indicating elements or rows to keep: missing values are taken as false. This argument takes precedence over all other arguments, meaning they are not considered when subsetting the object.} \item{env}{\code{\link{get_RLum}}: \link{environment} (\emph{with default}): An environment passed to \link{eval} as the enclosure. This argument is only relevant when subsetting the object and should not be used manually.} \item{fullExtent}{\link{structure_RLum}; \link{logical} (\emph{with default}): extents the returned \code{data.frame} to its full extent, i.e. all info elements are part of the return as well. The default value is \code{FALSE} as the data frame might become rather big.} \item{...}{further arguments passed to underlying methods} } \value{ \strong{\code{get_RLum}}: Returns: \enumerate{ \item \link{list} of \linkS4class{RLum.Data} objects or \item Single \linkS4class{RLum.Data} object, if only one object is contained and \code{recursive = FALSE} or \item \linkS4class{RLum.Analysis} objects for \code{drop = FALSE} } \strong{\code{structure_RLum}}: Returns \linkS4class{data.frame} showing the structure. \strong{\code{length_RLum}} Returns the number records in this object. \strong{\code{names_RLum}} Returns the names of the record types (\code{recordType}) in this object. \strong{\code{smooth_RLum}} Same object as input, after smoothing } \description{ Object class to represent analysis data for protocol analysis, i.e. all curves, spectra etc. from one measurements. Objects from this class are produced, by e.g. \link{read_XSYG2R}, \link{read_Daybreak2R} } \section{Methods (by generic)}{ \itemize{ \item \code{show(RLum.Analysis)}: Show structure of \code{RLum.Analysis} object \item \code{set_RLum(RLum.Analysis)}: Construction method for \linkS4class{RLum.Analysis} objects. \item \code{get_RLum(RLum.Analysis)}: Accessor method for RLum.Analysis object. The slots record.id, \verb{@recordType}, \verb{@curveType} and \verb{@RLum.type} are optional to allow for records limited by their id (list index number), their record type (e.g. \code{recordType = "OSL"}) or object type. Example: curve type (e.g. \code{curveType = "predefined"} or \code{curveType ="measured"}) The selection of a specific RLum.type object superimposes the default selection. Currently supported objects are: RLum.Data.Curve and RLum.Data.Spectrum \item \code{structure_RLum(RLum.Analysis)}: Method to show the structure of an \linkS4class{RLum.Analysis} object. \item \code{length_RLum(RLum.Analysis)}: Returns the length of the object, i.e., number of stored records. \item \code{names_RLum(RLum.Analysis)}: Returns the names of the \linkS4class{RLum.Data} objects objects (same as shown with the show method) \item \code{smooth_RLum(RLum.Analysis)}: Smoothing of \code{RLum.Data} objects contained in this \code{RLum.Analysis} object \link[zoo:rollmean]{zoo::rollmean} or \link[zoo:rollmean]{zoo::rollmedian}. In particular the internal function \code{.smoothing} is used. }} \section{Slots}{ \describe{ \item{\code{protocol}}{Object of class \link{character} describing the applied measurement protocol} \item{\code{records}}{Object of class \link{list} containing objects of class \linkS4class{RLum.Data}} }} \note{ The method \link{structure_RLum} is currently just available for objects containing \linkS4class{RLum.Data.Curve}. } \section{Objects from the Class}{ Objects can be created by calls of the form \code{set_RLum("RLum.Analysis", ...)}. } \section{Class version}{ 0.4.16 } \examples{ showClass("RLum.Analysis") ##set empty object set_RLum(class = "RLum.Analysis") ###use example data ##load data data(ExampleData.RLum.Analysis, envir = environment()) ##show curves in object get_RLum(IRSAR.RF.Data) ##show only the first object, but by keeping the object get_RLum(IRSAR.RF.Data, record.id = 1, drop = FALSE) } \seealso{ \link{Risoe.BINfileData2RLum.Analysis}, \linkS4class{Risoe.BINfileData}, \linkS4class{RLum} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. RLum.Analysis-class(): Class 'RLum.Analysis'. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{classes} \keyword{internal} \keyword{methods} Luminescence/man/read_Daybreak2R.Rd0000644000176200001440000000517714521210045016653 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/read_Daybreak2R.R \name{read_Daybreak2R} \alias{read_Daybreak2R} \title{Import measurement data produced by a Daybreak TL/OSL reader into R} \usage{ read_Daybreak2R(file, raw = FALSE, verbose = TRUE, txtProgressBar = TRUE) } \arguments{ \item{file}{\link{character} or \link{list} (\strong{required}): path and file name of the file to be imported. Alternatively a list of file names can be provided or just the path a folder containing measurement data. Please note that the specific, common, file extension (txt) is likely leading to function failures during import when just a path is provided.} \item{raw}{\link{logical} (\emph{with default}): if the input is a DAT-file (binary) a \link[data.table:data.table]{data.table::data.table} instead of the \linkS4class{RLum.Analysis} object can be returned for debugging purposes.} \item{verbose}{\link{logical} (\emph{with default}): enables or disables terminal feedback} \item{txtProgressBar}{\link{logical} (\emph{with default}): enables or disables \link{txtProgressBar}.} } \value{ A list of \linkS4class{RLum.Analysis} objects (each per position) is provided. } \description{ Import a TXT-file (ASCII file) or a DAT-file (binary file) produced by a Daybreak reader into R. The import of the DAT-files is limited to the file format described for the software TLAPLLIC v.3.2 used for a Daybreak, model 1100. } \note{ \strong{\verb{[BETA VERSION]}} This function still needs to be tested properly. In particular the function has underwent only very rough rests using a few files. } \section{Function version}{ 0.3.2 } \examples{ \dontrun{ file <- system.file("extdata/Daybreak_TestFile.txt", package = "Luminescence") temp <- read_Daybreak2R(file) } } \seealso{ \linkS4class{RLum.Analysis}, \linkS4class{RLum.Data.Curve}, \link[data.table:data.table]{data.table::data.table} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany)\cr Antoine Zink, C2RMF, Palais du Louvre, Paris (France) The ASCII-file import is based on a suggestion by Willian Amidon and Andrew Louis Gorin , RLum Developer Team} \section{How to cite}{ Kreutzer, S., Zink, A., 2023. read_Daybreak2R(): Import measurement data produced by a Daybreak TL/OSL reader into R. Function version 0.3.2. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{IO} Luminescence/man/ExampleData.portableOSL.Rd0000644000176200001440000000150014521210044020265 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Luminescence-package.R \docType{data} \name{ExampleData.portableOSL} \alias{ExampleData.portableOSL} \title{Example portable OSL curve data for the package Luminescence} \source{ \strong{ExampleData.portableOSL} \tabular{ll}{ Lab: \tab Cologne Luminescence Laboratory\cr Lab-Code: \tab \code{none} \cr Location: \tab Nievenheim/Germany\cr Material: \tab Fine grain quartz \cr Reference: \tab unpublished data } } \description{ A \code{list} of \linkS4class{RLum.Analysis} objects, each containing the same number of \linkS4class{RLum.Data.Curve} objects representing individual OSL, IRSL and dark count measurements of a sample. } \examples{ data(ExampleData.portableOSL, envir = environment()) plot_RLum(ExampleData.portableOSL) } \keyword{datasets} Luminescence/man/calc_Statistics.Rd0000644000176200001440000000635214521210045017042 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calc_Statistics.R \name{calc_Statistics} \alias{calc_Statistics} \title{Function to calculate statistic measures} \usage{ calc_Statistics( data, weight.calc = "square", digits = NULL, n.MCM = NULL, na.rm = TRUE ) } \arguments{ \item{data}{\link{data.frame} or \linkS4class{RLum.Results} object (\strong{required}): for \link{data.frame} two columns: De (\code{data[,1]}) and De error (\code{data[,2]}). To plot several data sets in one plot the data sets must be provided as \code{list}, e.g. \code{list(data.1, data.2)}.} \item{weight.calc}{\link{character}: type of weight calculation. One out of \code{"reciprocal"} (weight is 1/error), \code{"square"} (weight is 1/error^2). Default is \code{"square"}.} \item{digits}{\link{integer} (\emph{with default}): round numbers to the specified digits. If digits is set to \code{NULL} nothing is rounded.} \item{n.MCM}{\link{numeric} (\emph{with default}): number of samples drawn for Monte Carlo-based statistics. \code{NULL} (the default) disables MC runs.} \item{na.rm}{\link{logical} (\emph{with default}): indicating whether \code{NA} values should be stripped before the computation proceeds.} } \value{ Returns a list with weighted and unweighted statistic measures. } \description{ This function calculates a number of descriptive statistics for estimates with a given standard error (SE), most fundamentally using error-weighted approaches. } \details{ The option to use Monte Carlo Methods (\code{n.MCM}) allows calculating all descriptive statistics based on random values. The distribution of these random values is based on the Normal distribution with \code{De} values as means and \code{De_error} values as one standard deviation. Increasing the number of MCM-samples linearly increases computation time. On a Lenovo X230 machine evaluation of 25 Aliquots with n.MCM = 1000 takes 0.01 s, with n = 100000, ca. 1.65 s. It might be useful to work with logarithms of these values. See Dietze et al. (2016, Quaternary Geochronology) and the function \link{plot_AbanicoPlot} for details. } \section{Function version}{ 0.1.7 } \examples{ ## load example data data(ExampleData.DeValues, envir = environment()) ## show a rough plot of the data to illustrate the non-normal distribution plot_KDE(ExampleData.DeValues$BT998) ## calculate statistics and show output str(calc_Statistics(ExampleData.DeValues$BT998)) \dontrun{ ## now the same for 10000 normal distributed random numbers with equal errors x <- as.data.frame(cbind(rnorm(n = 10^5, mean = 0, sd = 1), rep(0.001, 10^5))) ## note the congruent results for weighted and unweighted measures str(calc_Statistics(x)) } } \author{ Michael Dietze, GFZ Potsdam (Germany) , RLum Developer Team} \section{How to cite}{ Dietze, M., 2023. calc_Statistics(): Function to calculate statistic measures. Function version 0.1.7. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{datagen} Luminescence/man/calc_AverageDose.Rd0000644000176200001440000001320114521210045017064 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calc_AverageDose.R \name{calc_AverageDose} \alias{calc_AverageDose} \title{Calculate the Average Dose and the dose rate dispersion} \usage{ calc_AverageDose( data, sigma_m = NULL, Nb_BE = 500, na.rm = TRUE, plot = TRUE, verbose = TRUE, ... ) } \arguments{ \item{data}{\linkS4class{RLum.Results} or \link{data.frame} (\strong{required}): for \link{data.frame}: two columns with \code{De} \code{(data[,1])} and \verb{De error} \code{(values[,2])}} \item{sigma_m}{\link{numeric} (\strong{required}): the overdispersion resulting from a dose recovery experiment, i.e. when all grains have received the same dose. Indeed in such a case, any overdispersion (i.e. dispersion on top of analytical uncertainties) is, by definition, an unrecognised measurement uncertainty.} \item{Nb_BE}{\link{integer} (\emph{with default}): sample size used for the bootstrapping} \item{na.rm}{\link{logical} (\emph{with default}): exclude NA values from the data set prior to any further operation.} \item{plot}{\link{logical} (\emph{with default}): enables/disables plot output} \item{verbose}{\link{logical} (\emph{with default}): enables/disables terminal output} \item{...}{further arguments that can be passed to \link[graphics:hist]{graphics::hist}. As three plots are returned all arguments need to be provided as \link{list}, e.g., \code{main = list("Plot 1", "Plot 2", "Plot 3")}. Note: not all arguments of \code{hist} are supported, but the output of \code{hist} is returned and can be used of own plots. \cr Further supported arguments: \code{mtext} (\link{character}), \code{rug} (\code{TRUE/FALSE}).} } \value{ The function returns numerical output and an (\emph{optional}) plot. -----------------------------------\cr \verb{[ NUMERICAL OUTPUT ]} \cr -----------------------------------\cr \strong{\code{RLum.Results}}-object\cr \strong{slot:} \strong{\verb{@data}} \cr \verb{[.. $summary : data.frame]}\cr \tabular{lll}{ \strong{Column} \tab \strong{Type} \tab \strong{Description}\cr AVERAGE_DOSE \tab \link{numeric} \tab the obtained average dose\cr AVERAGE_DOSE.SE \tab \link{numeric} \tab the average dose error \cr SIGMA_D \tab \link{numeric}\tab sigma \cr SIGMA_D.SE \tab \link{numeric}\tab standard error of the sigma \cr IC_AVERAGE_DOSE.LEVEL \tab \link{character}\tab confidence level average dose\cr IC_AVERAGE_DOSE.LOWER \tab \link{character}\tab lower quantile of average dose \cr IC_AVERAGE_DOSE.UPPER \tab \link{character}\tab upper quantile of average dose\cr IC_SIGMA_D.LEVEL \tab \link{integer}\tab confidence level sigma\cr IC_SIGMA_D.LOWER \tab \link{character}\tab lower sigma quantile\cr IC_SIGMA_D.UPPER \tab \link{character}\tab upper sigma quantile\cr L_MAX \tab \link{character}\tab maximum likelihood value } \verb{[.. $dstar : matrix]} \cr Matrix with bootstrap values\cr \verb{[.. $hist : list]}\cr Object as produced by the function histogram ------------------------\cr \verb{[ PLOT OUTPUT ]}\cr ------------------------\cr The function returns two different plot panels. (1) An abanico plot with the dose values (2) A histogram panel comprising 3 histograms with the equivalent dose and the bootstrapped average dose and the sigma values. } \description{ This functions calculates the Average Dose and their extrinsic dispersion and estimates the standard errors by bootstrapping based on the Average Dose Model by Guerin et al., 2017 } \details{ \strong{\code{sigma_m}}\cr The program requires the input of a known value of \code{sigma_m}, which corresponds to the intrinsic overdispersion, as determined by a dose recovery experiment. Then the dispersion in doses (\code{sigma_d}) will be that over and above \code{sigma_m} (and individual uncertainties \code{sigma_wi}). } \note{ This function has beta status! } \section{Function version}{ 0.1.5 } \examples{ ##Example 01 using package example data ##load example data data(ExampleData.DeValues, envir = environment()) ##calculate Average dose ##(use only the first 56 values here) AD <- calc_AverageDose(ExampleData.DeValues$CA1[1:56,], sigma_m = 0.1) ##plot De and set Average dose as central value plot_AbanicoPlot( data = ExampleData.DeValues$CA1[1:56,], z.0 = AD$summary$AVERAGE_DOSE) } \section{How to cite}{ Christophe, C., Philippe, A., Kreutzer, S., 2023. calc_AverageDose(): Calculate the Average Dose and the dose rate dispersion. Function version 0.1.5. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Guerin, G., Christophe, C., Philippe, A., Murray, A.S., Thomsen, K.J., Tribolo, C., Urbanova, P., Jain, M., Guibert, P., Mercier, N., Kreutzer, S., Lahaye, C., 2017. Absorbed dose, equivalent dose, measured dose rates, and implications for OSL age estimates: Introducing the Average Dose Model. Quaternary Geochronology 1-32. doi:10.1016/j.quageo.2017.04.002 \strong{Further reading}\cr Efron, B., Tibshirani, R., 1986. Bootstrap Methods for Standard Errors, Confidence Intervals, and Other Measures of Statistical Accuracy. Statistical Science 1, 54-75. } \seealso{ \link{read.table}, \link[graphics:hist]{graphics::hist} } \author{ Claire Christophe, IRAMAT-CRP2A, Université de Nantes (France), Anne Philippe, Université de Nantes, (France), Guillaume Guérin, IRAMAT-CRP2A, Université Bordeaux Montaigne, (France), Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \keyword{datagen} Luminescence/man/methods_RLum.Rd0000644000176200001440000001761214521210044016330 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_RLum.R \name{methods_RLum} \alias{methods_RLum} \alias{plot.list} \alias{plot.RLum.Results} \alias{plot.RLum.Analysis} \alias{plot.RLum.Data.Curve} \alias{plot.RLum.Data.Spectrum} \alias{plot.RLum.Data.Image} \alias{plot.Risoe.BINfileData} \alias{hist.RLum.Results} \alias{hist.RLum.Data.Image} \alias{hist.RLum.Data.Curve} \alias{hist.RLum.Analysis} \alias{summary.RLum.Results} \alias{summary.RLum.Analysis} \alias{summary.RLum.Data.Image} \alias{summary.RLum.Data.Curve} \alias{subset.Risoe.BINfileData} \alias{subset.RLum.Analysis} \alias{bin.RLum.Data.Curve} \alias{bin.RLum.Data.Spectrum} \alias{length.RLum.Results} \alias{length.RLum.Analysis} \alias{length.RLum.Data.Curve} \alias{length.Risoe.BINfileData} \alias{dim.RLum.Data.Curve} \alias{dim.RLum.Data.Spectrum} \alias{rep.RLum} \alias{names.RLum.Data.Curve} \alias{names.RLum.Data.Spectrum} \alias{names.RLum.Data.Image} \alias{names.RLum.Analysis} \alias{names.RLum.Results} \alias{names.Risoe.BINfileData} \alias{row.names.RLum.Data.Spectrum} \alias{as.data.frame.RLum.Data.Curve} \alias{as.data.frame.RLum.Data.Spectrum} \alias{as.data.frame.Risoe.BINfileData} \alias{as.list.RLum.Results} \alias{as.list.RLum.Data.Curve} \alias{as.list.RLum.Data.Image} \alias{as.list.RLum.Analysis} \alias{as.matrix.RLum.Data.Curve} \alias{as.matrix.RLum.Data.Spectrum} \alias{as.matrix.RLum.Data.Image} \alias{is.RLum} \alias{is.RLum.Data} \alias{is.RLum.Data.Curve} \alias{is.RLum.Data.Spectrum} \alias{is.RLum.Data.Image} \alias{is.RLum.Analysis} \alias{is.RLum.Results} \alias{merge.RLum} \alias{unlist.RLum.Analysis} \alias{+.RLum.Data.Curve} \alias{-.RLum.Data.Curve} \alias{*.RLum.Data.Curve} \alias{/.RLum.Data.Curve} \alias{[.RLum.Data.Curve} \alias{[.RLum.Data.Spectrum} \alias{[.RLum.Data.Image} \alias{[.RLum.Analysis} \alias{[.RLum.Results} \alias{[<-.RLum.Data.Curve} \alias{[[.RLum.Analysis} \alias{[[.RLum.Results} \alias{$.RLum.Data.Curve} \alias{$.RLum.Analysis} \alias{$.RLum.Results} \title{methods_RLum} \usage{ \method{plot}{list}(x, y, ...) \method{plot}{RLum.Results}(x, y, ...) \method{plot}{RLum.Analysis}(x, y, ...) \method{plot}{RLum.Data.Curve}(x, y, ...) \method{plot}{RLum.Data.Spectrum}(x, y, ...) \method{plot}{RLum.Data.Image}(x, y, ...) \method{plot}{Risoe.BINfileData}(x, y, ...) \method{hist}{RLum.Results}(x, ...) \method{hist}{RLum.Data.Image}(x, ...) \method{hist}{RLum.Data.Curve}(x, ...) \method{hist}{RLum.Analysis}(x, ...) \method{summary}{RLum.Results}(object, ...) \method{summary}{RLum.Analysis}(object, ...) \method{summary}{RLum.Data.Image}(object, ...) \method{summary}{RLum.Data.Curve}(object, ...) \method{subset}{Risoe.BINfileData}(x, subset, records.rm = TRUE, ...) \method{subset}{RLum.Analysis}(x, subset = NULL, ...) bin.RLum.Data.Curve(x, bin_size = 2, ...) bin.RLum.Data.Spectrum(x, bin_size.row = 1, bin_size.col = 1, ...) \method{length}{RLum.Results}(x, ...) \method{length}{RLum.Analysis}(x, ...) \method{length}{RLum.Data.Curve}(x, ...) \method{length}{Risoe.BINfileData}(x, ...) \method{dim}{RLum.Data.Curve}(x) \method{dim}{RLum.Data.Spectrum}(x) \method{rep}{RLum}(x, ...) \method{names}{RLum.Data.Curve}(x, ...) \method{names}{RLum.Data.Spectrum}(x, ...) \method{names}{RLum.Data.Image}(x, ...) \method{names}{RLum.Analysis}(x, ...) \method{names}{RLum.Results}(x, ...) \method{names}{Risoe.BINfileData}(x) \method{row.names}{RLum.Data.Spectrum}(x, ...) \method{as.data.frame}{RLum.Data.Curve}(x, row.names = NULL, optional = FALSE, ...) \method{as.data.frame}{RLum.Data.Spectrum}(x, row.names = NULL, optional = FALSE, ...) \method{as.data.frame}{Risoe.BINfileData}(x, row.names = NULL, optional = FALSE, ...) \method{as.list}{RLum.Results}(x, ...) \method{as.list}{RLum.Data.Curve}(x, ...) \method{as.list}{RLum.Data.Image}(x, ...) \method{as.list}{RLum.Analysis}(x, ...) \method{as.matrix}{RLum.Data.Curve}(x, ...) \method{as.matrix}{RLum.Data.Spectrum}(x, ...) \method{as.matrix}{RLum.Data.Image}(x, ...) is.RLum(x, ...) is.RLum.Data(x, ...) is.RLum.Data.Curve(x, ...) is.RLum.Data.Spectrum(x, ...) is.RLum.Data.Image(x, ...) is.RLum.Analysis(x, ...) is.RLum.Results(x, ...) \method{merge}{RLum}(x, y, ...) \method{unlist}{RLum.Analysis}(x, recursive = TRUE, ...) \method{+}{RLum.Data.Curve}(x, y) \method{-}{RLum.Data.Curve}(x, y) \method{*}{RLum.Data.Curve}(x, y) \method{/}{RLum.Data.Curve}(x, y) \method{[}{RLum.Data.Curve}(x, y, z, drop = TRUE) \method{[}{RLum.Data.Spectrum}(x, y, z, drop = TRUE) \method{[}{RLum.Data.Image}(x, y, z, drop = TRUE) \method{[}{RLum.Analysis}(x, i, drop = FALSE) \method{[}{RLum.Results}(x, i, drop = TRUE) \method{[}{RLum.Data.Curve}(x, i, j) <- value \method{[[}{RLum.Analysis}(x, i) \method{[[}{RLum.Results}(x, i) \method{$}{RLum.Data.Curve}(x, i) \method{$}{RLum.Analysis}(x, i) \method{$}{RLum.Results}(x, i) } \arguments{ \item{x}{\linkS4class{RLum} or \linkS4class{Risoe.BINfileData} (\strong{required}): input object} \item{y}{\link{integer} (\emph{optional}): the row index of the matrix, data.frame} \item{...}{further arguments that can be passed to the method} \item{object}{\linkS4class{RLum} (\strong{required}): input object} \item{subset}{\verb{[subset]} \link{expression} (\strong{required}): logical expression indicating elements or rows to keep, this function works in \linkS4class{Risoe.BINfileData} objects like \link{subset.data.frame}, but takes care of the object structure. Works also on \linkS4class{RLum.Analysis} objects.} \item{records.rm}{\link{subset} \link{logical} (\emph{with default}): remove records from data set, can be disabled, to just set the column \code{SET} to \code{TRUE} or \code{FALSE}} \item{row.names}{\link{logical} (\emph{with default}): enables or disables row names (\code{as.data.frame})} \item{optional}{\link{logical} (\emph{with default}): logical. If TRUE, setting row names and converting column names (to syntactic names: see make.names) is optional (see \link[base:as.data.frame]{base::as.data.frame})} \item{recursive}{\link{logical} (\emph{with default}): enables or disables further sub-setting (\code{unlist})} \item{z}{\link{integer} (\emph{optional}): the column index of the matrix, data.frame} \item{drop}{\link{logical} (\emph{with default}): keep object structure or drop it} \item{i}{\link{character} (\emph{optional}): name of the wanted record type or data object or row in the \code{RLum.Data.Curve} object} \item{j}{\link{integer} (\emph{optional}): column of the data matrix in the \code{RLum.Data.Curve} object} \item{value}{\link{numeric} \strong{(required)}: numeric value which replace the value in the \code{RLum.Data.Curve} object} } \description{ Methods for S3-generics implemented for the package 'Luminescence'. This document summarises all implemented S3-generics. The name of the function is given before the first dot, after the dot the name of the object that is supported by this method is given, e.g. \code{plot.RLum.Data.Curve} can be called by \code{plot(object, ...)}, where \code{object} is the \code{RLum.Data.Curve} object. } \details{ The term S3-generics sounds complicated, however, it just means that something has been implemented in the package to increase the usability for users new in R and who are not familiar with the underlying \code{RLum}-object structure of the package. The practical outcome is that operations and functions presented in standard books on R can be used without knowing the specifics of the R package \code{'Luminescence'}. For examples see the example section. } \note{ \code{methods_RLum} are not really new functions, everything given here are mostly just surrogates for existing functions in the package. } \examples{ ##load example data data(ExampleData.RLum.Analysis, envir = environment()) ##combine curve is various ways curve1 <- IRSAR.RF.Data[[1]] curve2 <- IRSAR.RF.Data[[1]] curve1 + curve2 curve1 - curve2 curve1 / curve2 curve1 * curve2 ##`$` access curves IRSAR.RF.Data$RF } \keyword{internal} Luminescence/man/calc_TLLxTxRatio.Rd0000644000176200001440000000733014521210045017043 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calc_TLLxTxRatio.R \name{calc_TLLxTxRatio} \alias{calc_TLLxTxRatio} \title{Calculate the Lx/Tx ratio for a given set of TL curves -beta version-} \usage{ calc_TLLxTxRatio( Lx.data.signal, Lx.data.background = NULL, Tx.data.signal, Tx.data.background = NULL, signal.integral.min, signal.integral.max ) } \arguments{ \item{Lx.data.signal}{\linkS4class{RLum.Data.Curve} or \link{data.frame} (\strong{required}): TL data (x = temperature, y = counts) (TL signal)} \item{Lx.data.background}{\linkS4class{RLum.Data.Curve} or \link{data.frame} (\emph{optional}): TL data (x = temperature, y = counts). If no data are provided no background subtraction is performed.} \item{Tx.data.signal}{\linkS4class{RLum.Data.Curve} or \link{data.frame} (\strong{required}): TL data (x = temperature, y = counts) (TL test signal)} \item{Tx.data.background}{\linkS4class{RLum.Data.Curve} or \link{data.frame} (\emph{optional}): TL data (x = temperature, y = counts). If no data are provided no background subtraction is performed.} \item{signal.integral.min}{\link{integer} (\strong{required}): channel number for the lower signal integral bound (e.g. \code{signal.integral.min = 100})} \item{signal.integral.max}{\link{integer} (\strong{required}): channel number for the upper signal integral bound (e.g. \code{signal.integral.max = 200})} } \value{ Returns an S4 object of type \linkS4class{RLum.Results}. Slot \code{data} contains a \link{list} with the following structure: \if{html}{\out{
}}\preformatted{$ LxTx.table .. $ LnLx .. $ LnLx.BG .. $ TnTx .. $ TnTx.BG .. $ Net_LnLx .. $ Net_LnLx.Error }\if{html}{\out{
}} } \description{ Calculate Lx/Tx ratio for a given set of TL curves. } \details{ \strong{Uncertainty estimation} The standard errors are calculated using the following generalised equation: \deqn{SE_{signal} = abs(Signal_{net} * BG_f /BG_{signal})} where \eqn{BG_f} is a term estimated by calculating the standard deviation of the sum of the \eqn{L_x} background counts and the sum of the \eqn{T_x} background counts. However, if both signals are similar the error becomes zero. } \note{ \strong{This function has still BETA status!} Please further note that a similar background for both curves results in a zero error and is therefore set to \code{NA}. } \section{Function version}{ 0.3.3 } \examples{ ##load package example data data(ExampleData.BINfileData, envir = environment()) ##convert Risoe.BINfileData into a curve object temp <- Risoe.BINfileData2RLum.Analysis(TL.SAR.Data, pos = 3) Lx.data.signal <- get_RLum(temp, record.id=1) Lx.data.background <- get_RLum(temp, record.id=2) Tx.data.signal <- get_RLum(temp, record.id=3) Tx.data.background <- get_RLum(temp, record.id=4) signal.integral.min <- 210 signal.integral.max <- 230 output <- calc_TLLxTxRatio( Lx.data.signal, Lx.data.background, Tx.data.signal, Tx.data.background, signal.integral.min, signal.integral.max) get_RLum(output) } \seealso{ \linkS4class{RLum.Results}, \link{analyse_SAR.TL} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) \cr Christoph Schmidt, University of Bayreuth (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., Schmidt, C., 2023. calc_TLLxTxRatio(): Calculate the Lx/Tx ratio for a given set of TL curves -beta version-. Function version 0.3.3. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{datagen} Luminescence/man/analyse_IRSAR.RF.Rd0000644000176200001440000004765314521210044016640 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/analyse_IRSAR.RF.R \name{analyse_IRSAR.RF} \alias{analyse_IRSAR.RF} \title{Analyse IRSAR RF measurements} \usage{ analyse_IRSAR.RF( object, sequence_structure = c("NATURAL", "REGENERATED"), RF_nat.lim = NULL, RF_reg.lim = NULL, method = "FIT", method.control = NULL, test_parameters = NULL, n.MC = 10, txtProgressBar = TRUE, plot = TRUE, plot_reduced = FALSE, ... ) } \arguments{ \item{object}{\linkS4class{RLum.Analysis} or a \link{list} of \linkS4class{RLum.Analysis}-objects (\strong{required}): input object containing data for protocol analysis. The function expects to find at least two curves in the \linkS4class{RLum.Analysis} object: (1) \code{RF_nat}, (2) \code{RF_reg}. If a \code{list} is provided as input all other parameters can be provided as \code{list} as well to gain full control.} \item{sequence_structure}{\link{vector} \link{character} (\emph{with default}): specifies the general sequence structure. Allowed steps are \code{NATURAL}, \code{REGENERATED}. In addition any other character is allowed in the sequence structure; such curves will be ignored during the analysis.} \item{RF_nat.lim}{\link{vector} (\emph{with default}): set minimum and maximum channel range for natural signal fitting and sliding. If only one value is provided this will be treated as minimum value and the maximum limit will be added automatically.} \item{RF_reg.lim}{\link{vector} (\emph{with default}): set minimum and maximum channel range for regenerated signal fitting and sliding. If only one value is provided this will be treated as minimum value and the maximum limit will be added automatically.} \item{method}{\link{character} (\emph{with default}): setting method applied for the data analysis. Possible options are \code{"FIT"} or \code{"SLIDE"}.} \item{method.control}{\link{list} (\emph{optional}): parameters to control the method, that can be passed to the chosen method. These are for (1) \code{method = "FIT"}: \code{'trace'}, \code{'maxiter'}, \code{'warnOnly'}, \code{'minFactor'} and for (2) \code{method = "SLIDE"}: \code{'correct_onset'}, \code{'show_density'}, \code{'show_fit'}, \code{'trace'}. See details.} \item{test_parameters}{\link{list} (\emph{with default}): set test parameters. Supported parameters are: \code{curves_ratio}, \code{residuals_slope} (only for \code{method = "SLIDE"}), \code{curves_bounds}, \code{dynamic_ratio}, \code{lambda}, \code{beta} and \code{delta.phi}. All input: \link{numeric} values, \code{NA} and \code{NULL} (s. Details) (see Details for further information)} \item{n.MC}{\link{numeric} (\emph{with default}): set number of Monte Carlo runs for start parameter estimation (\code{method = "FIT"}) or error estimation (\code{method = "SLIDE"}). This value can be set to \code{NULL} to skip the MC runs. Note: Large values will significantly increase the computation time} \item{txtProgressBar}{\link{logical} (\emph{with default}): enables \code{TRUE} or disables \code{FALSE} the progression bar during MC runs} \item{plot}{\link{logical} (\emph{with default}): plot output (\code{TRUE} or \code{FALSE})} \item{plot_reduced}{\link{logical} (\emph{optional}): provides a reduced plot output if enabled to allow common R plot combinations, e.g., \code{par(mfrow(...))}. If \code{TRUE} no residual plot is returned; it has no effect if \code{plot = FALSE}} \item{...}{further arguments that will be passed to the plot output. Currently supported arguments are \code{main}, \code{xlab}, \code{ylab}, \code{xlim}, \code{ylim}, \code{log}, \code{legend} (\code{TRUE/FALSE}), \code{legend.pos}, \code{legend.text} (passes argument to x,y in \link[graphics:legend]{graphics::legend}), \code{xaxt}} } \value{ The function returns numerical output and an (\emph{optional}) plot. -----------------------------------\cr \verb{[ NUMERICAL OUTPUT ]}\cr -----------------------------------\cr \strong{\code{RLum.Results}}-object \strong{slot:} \strong{\verb{@data}} \verb{[.. $data : data.frame]} \tabular{lll}{ \strong{Column} \tab \strong{Type} \tab \strong{Description}\cr \code{DE} \tab \code{numeric} \tab the obtained equivalent dose\cr \code{DE.ERROR} \tab \code{numeric} \tab (only \code{method = "SLIDE"}) standard deviation obtained from MC runs \cr \code{DE.LOWER} \tab \code{numeric}\tab 2.5\% quantile for De values obtained by MC runs \cr \code{DE.UPPER} \tab \code{numeric}\tab 97.5\% quantile for De values obtained by MC runs \cr \code{DE.STATUS} \tab \code{character}\tab test parameter status\cr \code{RF_NAT.LIM} \tab \code{character}\tab used \code{RF_nat} curve limits \cr \code{RF_REG.LIM} \tab \code{character}\tab used \code{RF_reg} curve limits\cr \code{POSITION} \tab \code{integer}\tab (\emph{optional}) position of the curves\cr \code{DATE} \tab \code{character}\tab (\emph{optional}) measurement date\cr \code{SEQUENCE_NAME} \tab \code{character}\tab (\emph{optional}) sequence name\cr \code{UID} \tab \code{character}\tab unique data set ID } \verb{[.. $De.MC : numeric]} A \code{numeric} vector with all the De values obtained by the MC runs. \verb{[.. $test_parameters : data.frame]} \tabular{lll}{ \strong{Column} \tab \strong{Type} \tab \strong{Description}\cr \code{POSITION} \tab \code{numeric} \tab aliquot position \cr \code{PARAMETER} \tab \code{character} \tab test parameter name \cr \code{THRESHOLD} \tab \code{numeric} \tab set test parameter threshold value \cr \code{VALUE} \tab \code{numeric} \tab the calculated test parameter value (to be compared with the threshold)\cr \code{STATUS} \tab \code{character} \tab test parameter status either \code{"OK"} or \code{"FAILED"} \cr \code{SEQUENCE_NAME} \tab \code{character} \tab name of the sequence, so far available \cr \code{UID} \tab \code{character}\tab unique data set ID } \verb{[.. $fit : data.frame]} An \link{nls} object produced by the fitting. \verb{[.. $slide : list]} A \link{list} with data produced during the sliding. Some elements are previously reported with the summary object data. List elements are: \tabular{lll}{ \strong{Element} \tab \strong{Type} \tab \strong{Description}\cr \code{De} \tab \code{numeric} \tab the final De obtained with the sliding approach \cr \code{De.MC} \tab \code{numeric} \tab all De values obtained by the MC runs \cr \code{residuals} \tab \code{numeric} \tab the obtained residuals for each channel of the curve \cr \code{trend.fit} \tab \code{lm} \tab fitting results produced by the fitting of the residuals \cr \code{RF_nat.slid} \tab \code{matrix} \tab the slid \code{RF_nat} curve \cr \code{t_n.id} \tab \code{numeric} \tab the index of the t_n offset \cr \code{I_n} \tab \code{numeric} \tab the vertical intensity offset if a vertical slide was applied \cr \code{algorithm_error} \tab \code{numeric} \tab the vertical sliding suffers from a systematic effect induced by the used algorithm. The returned value is the standard deviation of all obtained De values while expanding the vertical sliding range. I can be added as systematic error to the final De error; so far wanted.\cr \code{vslide_range} \tab \code{numeric} \tab the range used for the vertical sliding \cr \code{squared_residuals} \tab \code{numeric} \tab the squared residuals (horizontal sliding) } \strong{slot:} \strong{\verb{@info}} The original function call (\link[methods:LanguageClasses]{methods::language}-object) The output (\code{data}) should be accessed using the function \link{get_RLum} ------------------------\cr \verb{[ PLOT OUTPUT ]}\cr ------------------------\cr The slid IR-RF curves with the finally obtained De } \description{ Function to analyse IRSAR RF measurements on K-feldspar samples, performed using the protocol according to Erfurt et al. (2003) and beyond. } \details{ The function performs an IRSAR analysis described for K-feldspar samples by Erfurt et al. (2003) assuming a negligible sensitivity change of the RF signal. \strong{General Sequence Structure} (according to Erfurt et al., 2003) \enumerate{ \item Measuring IR-RF intensity of the natural dose for a few seconds (\eqn{RF_{nat}}) \item Bleach the samples under solar conditions for at least 30 min without changing the geometry \item Waiting for at least one hour \item Regeneration of the IR-RF signal to at least the natural level (measuring (\eqn{RF_{reg}}) \item Fitting data with a stretched exponential function \item Calculate the the palaeodose \eqn{D_{e}} using the parameters from the fitting } Actually two methods are supported to obtain the \eqn{D_{e}}: \code{method = "FIT"} and \code{method = "SLIDE"}: \strong{\code{method = "FIT"}} The principle is described above and follows the original suggestions by Erfurt et al., 2003. For the fitting the mean count value of the \code{RF_nat} curve is used. Function used for the fitting (according to Erfurt et al. (2003)): \deqn{\phi(D) = \phi_{0}-\Delta\phi(1-exp(-\lambda*D))^\beta} with \eqn{\phi(D)} the dose dependent IR-RF flux, \eqn{\phi_{0}} the initial IR-RF flux, \eqn{\Delta\phi} the dose dependent change of the IR-RF flux, \eqn{\lambda} the exponential parameter, \eqn{D} the dose and \eqn{\beta} the dispersive factor. To obtain the palaeodose \eqn{D_{e}} the function is changed to: \deqn{D_{e} = ln(-(\phi(D) - \phi_{0})/(-\lambda*\phi)^{1/\beta}+1)/-\lambda} The fitting is done using the \code{port} algorithm of the \link{nls} function. \strong{\code{method = "SLIDE"}} For this method, the natural curve is slid along the x-axis until congruence with the regenerated curve is reached. Instead of fitting this allows working with the original data without the need for any physical model. This approach was introduced for RF curves by Buylaert et al., 2012 and Lapp et al., 2012. Here the sliding is done by searching for the minimum of the squared residuals. For the mathematical details of the implementation see Frouin et al., 2017 \strong{\code{method.control}} To keep the generic argument list as clear as possible, arguments to control the methods for De estimation are all pre set with meaningful default parameters and can be handled using the argument \code{method.control} only, e.g., \code{method.control = list(trace = TRUE)}. Supported arguments are: \tabular{lll}{ \strong{ARGUMENT} \tab \strong{METHOD} \tab \strong{DESCRIPTION}\cr \code{trace} \tab \code{FIT}, \code{SLIDE} \tab as in \link{nls}; shows sum of squared residuals\cr \code{trace_vslide} \tab \code{SLIDE} \tab \link{logical} argument to enable or disable the tracing of the vertical sliding\cr \code{maxiter} \tab \code{FIT} \tab as in \link{nls}\cr \code{warnOnly} \tab \code{FIT} \tab as in \link{nls}\cr \code{minFactor} \tab \code{FIT} \tab as in \link{nls}\cr \code{correct_onset} \tab \code{SLIDE} \tab The logical argument shifts the curves along the x-axis by the first channel, as light is expected in the first channel. The default value is \code{TRUE}.\cr \code{show_density} \tab \code{SLIDE} \tab \link{logical} (\emph{with default}) enables or disables KDE plots for MC run results. If the distribution is too narrow nothing is shown.\cr \code{show_fit} \tab \code{SLIDE} \tab \link{logical} (\emph{with default}) enables or disables the plot of the fitted curve routinely obtained during the evaluation.\cr \code{n.MC} \tab \code{SLIDE} \tab \link{integer} (\emph{with default}): This controls the number of MC runs within the sliding (assessing the possible minimum values). The default \code{n.MC = 1000}. Note: This parameter is not the same as controlled by the function argument \code{n.MC}. \cr \code{vslide_range} \tab \code{SLDE} \tab \link{logical} or \link{numeric} or \link{character} (\emph{with default}): This argument sets the boundaries for a vertical curve sliding. The argument expects a vector with an absolute minimum and a maximum (e.g., \code{c(-1000,1000)}). Alternatively the values \code{NULL} and \code{'auto'} are allowed. The automatic mode detects the reasonable vertical sliding range (\strong{recommended}). \code{NULL} applies no vertical sliding. The default is \code{NULL}.\cr \code{cores} \tab \code{SLIDE} \tab \code{number} or \code{character} (\emph{with default}): set number of cores to be allocated for a parallel processing of the Monte-Carlo runs. The default value is \code{NULL} (single thread), the recommended values is \code{'auto'}. An optional number (e.g., \code{cores} = 8) assigns a value manually. } \strong{Error estimation} For \strong{\code{method = "FIT"}} the asymmetric error range is obtained by using the 2.5 \% (lower) and the 97.5 \% (upper) quantiles of the \eqn{RF_{nat}} curve for calculating the \eqn{D_{e}} error range. For \strong{\code{method = "SLIDE"}} the error is obtained by bootstrapping the residuals of the slid curve to construct new natural curves for a Monte Carlo simulation. The error is returned in two ways: (a) the standard deviation of the herewith obtained \eqn{D_{e}} from the MC runs and (b) the confidence interval using the 2.5 \% (lower) and the 97.5 \% (upper) quantiles. The results of the MC runs are returned with the function output. \strong{Test parameters} The argument \code{test_parameters} allows to pass some thresholds for several test parameters, which will be evaluated during the function run. If a threshold is set and it will be exceeded the test parameter status will be set to \code{"FAILED"}. Intentionally this parameter is not termed \code{'rejection criteria'} as not all test parameters are evaluated for both methods and some parameters are calculated by not evaluated by default. Common for all parameters are the allowed argument options \code{NA} and \code{NULL}. If the parameter is set to \code{NA} the value is calculated but the result will not be evaluated, means it has no effect on the status (\code{"OK"} or \code{"FAILED"}) of the parameter. Setting the parameter to \code{NULL} disables the parameter entirely and the parameter will be also removed from the function output. This might be useful in cases where a particular parameter asks for long computation times. Currently supported parameters are: \code{curves_ratio} \link{numeric} (default: \code{1.001}): The ratio of \eqn{RF_{nat}} over \eqn{RF_{reg}} in the range of\eqn{RF_{nat}} of is calculated and should not exceed the threshold value. \code{intersection_ratio} \link{numeric} (default: \code{NA}): Calculated as absolute difference from 1 of the ratio of the integral of the normalised RF-curves, This value indicates intersection of the RF-curves and should be close to 0 if the curves have a similar shape. For this calculation first the corresponding time-count pair value on the RF_reg curve is obtained using the maximum count value of the \code{RF_nat} curve and only this segment (fitting to the \code{RF_nat} curve) on the RF_reg curve is taken for further calculating this ratio. If nothing is found at all, \code{Inf} is returned. \code{residuals_slope} \link{numeric} (default: \code{NA}; only for \code{method = "SLIDE"}): A linear function is fitted on the residuals after sliding. The corresponding slope can be used to discard values as a high (positive, negative) slope may indicate that both curves are fundamentally different and the method cannot be applied at all. Per default the value of this parameter is calculated but not evaluated. \code{curves_bounds} \link{numeric} (default: \eqn{max(RF_{reg_counts})}: This measure uses the maximum time (x) value of the regenerated curve. The maximum time (x) value of the natural curve cannot be larger than this value. However, although this is not recommended the value can be changed or disabled. \code{dynamic_ratio} \link{numeric} (default: \code{NA}): The dynamic ratio of the regenerated curve is calculated as ratio of the minimum and maximum count values. \code{lambda}, \code{beta} and \code{delta.phi} \link{numeric} (default: \code{NA}; \code{method = "SLIDE"}): The stretched exponential function suggested by Erfurt et al. (2003) describing the decay of the RF signal, comprises several parameters that might be useful to evaluate the shape of the curves. For \code{method = "FIT"} this parameter is obtained during the fitting, for \code{method = "SLIDE"} a rather rough estimation is made using the function \link[minpack.lm:nlsLM]{minpack.lm::nlsLM} and the equation given above. Note: As this procedure requests more computation time, setting of one of these three parameters to \code{NULL} also prevents a calculation of the remaining two. } \note{ This function assumes that there is no sensitivity change during the measurements (natural vs. regenerated signal), which is in contrast to the findings by Buylaert et al. (2012). } \section{Function version}{ 0.7.8 } \examples{ ##load data data(ExampleData.RLum.Analysis, envir = environment()) ##(1) perform analysis using the method 'FIT' results <- analyse_IRSAR.RF(object = IRSAR.RF.Data) ##show De results and test paramter results get_RLum(results, data.object = "data") get_RLum(results, data.object = "test_parameters") ##(2) perform analysis using the method 'SLIDE' results <- analyse_IRSAR.RF(object = IRSAR.RF.Data, method = "SLIDE", n.MC = 1) \dontrun{ ##(3) perform analysis using the method 'SLIDE' and method control option ## 'trace results <- analyse_IRSAR.RF( object = IRSAR.RF.Data, method = "SLIDE", method.control = list(trace = TRUE)) } } \section{How to cite}{ Kreutzer, S., 2023. analyse_IRSAR.RF(): Analyse IRSAR RF measurements. Function version 0.7.8. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Buylaert, J.P., Jain, M., Murray, A.S., Thomsen, K.J., Lapp, T., 2012. IR-RF dating of sand-sized K-feldspar extracts: A test of accuracy. Radiation Measurements 44 (5-6), 560-565. doi: 10.1016/j.radmeas.2012.06.021 Erfurt, G., Krbetschek, M.R., 2003. IRSAR - A single-aliquot regenerative-dose dating protocol applied to the infrared radiofluorescence (IR-RF) of coarse- grain K-feldspar. Ancient TL 21, 35-42. Erfurt, G., 2003. Infrared luminescence of Pb+ centres in potassium-rich feldspars. physica status solidi (a) 200, 429-438. Erfurt, G., Krbetschek, M.R., 2003. Studies on the physics of the infrared radioluminescence of potassium feldspar and on the methodology of its application to sediment dating. Radiation Measurements 37, 505-510. Erfurt, G., Krbetschek, M.R., Bortolot, V.J., Preusser, F., 2003. A fully automated multi-spectral radioluminescence reading system for geochronometry and dosimetry. Nuclear Instruments and Methods in Physics Research Section B: Beam Interactions with Materials and Atoms 207, 487-499. Frouin, M., Huot, S., Kreutzer, S., Lahaye, C., Lamothe, M., Philippe, A., Mercier, N., 2017. An improved radiofluorescence single-aliquot regenerative dose protocol for K-feldspars. Quaternary Geochronology 38, 13-24. doi:10.1016/j.quageo.2016.11.004 Lapp, T., Jain, M., Thomsen, K.J., Murray, A.S., Buylaert, J.P., 2012. New luminescence measurement facilities in retrospective dosimetry. Radiation Measurements 47, 803-808. doi:10.1016/j.radmeas.2012.02.006 Trautmann, T., 2000. A study of radioluminescence kinetics of natural feldspar dosimeters: experiments and simulations. Journal of Physics D: Applied Physics 33, 2304-2310. Trautmann, T., Krbetschek, M.R., Dietrich, A., Stolz, W., 1998. Investigations of feldspar radioluminescence: potential for a new dating technique. Radiation Measurements 29, 421-425. Trautmann, T., Krbetschek, M.R., Dietrich, A., Stolz, W., 1999. Feldspar radioluminescence: a new dating method and its physical background. Journal of Luminescence 85, 45-58. Trautmann, T., Krbetschek, M.R., Stolz, W., 2000. A systematic study of the radioluminescence properties of single feldspar grains. Radiation Measurements 32, 685-690. } \seealso{ \linkS4class{RLum.Analysis}, \linkS4class{RLum.Results}, \link{get_RLum}, \link{nls}, \link[minpack.lm:nlsLM]{minpack.lm::nlsLM}, \code{parallel::mclapply} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \keyword{datagen} Luminescence/man/calc_FiniteMixture.Rd0000644000176200001440000002102014521210045017471 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calc_FiniteMixture.R \name{calc_FiniteMixture} \alias{calc_FiniteMixture} \title{Apply the finite mixture model (FMM) after Galbraith (2005) to a given De distribution} \usage{ calc_FiniteMixture( data, sigmab, n.components, grain.probability = FALSE, dose.scale, pdf.weight = TRUE, pdf.sigma = "sigmab", pdf.colors = "gray", pdf.scale, plot.proportions = TRUE, plot = TRUE, ... ) } \arguments{ \item{data}{\linkS4class{RLum.Results} or \link{data.frame} (\strong{required}): for \link{data.frame}: two columns with De \code{(data[,1])} and De error \code{(values[,2])}} \item{sigmab}{\link{numeric} (\strong{required}): spread in De values given as a fraction (e.g. 0.2). This value represents the expected overdispersion in the data should the sample be well-bleached (Cunningham & Wallinga 2012, p. 100).} \item{n.components}{\link{numeric} (\strong{required}): number of components to be fitted. If a vector is provided (e.g. \code{c(2:8)}) the finite mixtures for 2, 3 ... 8 components are calculated and a plot and a statistical evaluation of the model performance (BIC score and maximum log-likelihood) is provided.} \item{grain.probability}{\link{logical} (\emph{with default}): prints the estimated probabilities of which component each grain is in} \item{dose.scale}{\link{numeric}: manually set the scaling of the y-axis of the first plot with a vector in the form of \code{c(min, max)}} \item{pdf.weight}{\link{logical} (\emph{with default}): weight the probability density functions by the components proportion (applies only when a vector is provided for \code{n.components})} \item{pdf.sigma}{\link{character} (\emph{with default}): if \code{"sigmab"} the components normal distributions are plotted with a common standard deviation (i.e. \code{sigmab}) as assumed by the FFM. Alternatively, \code{"se"} takes the standard error of each component for the sigma parameter of the normal distribution} \item{pdf.colors}{\link{character} (\emph{with default}): colour coding of the components in the the plot. Possible options are \code{"gray"}, \code{"colors"} and \code{"none"}} \item{pdf.scale}{\link{numeric}: manually set the max density value for proper scaling of the x-axis of the first plot} \item{plot.proportions}{\link{logical} (\emph{with default}): plot \link[graphics:barplot]{graphics::barplot} showing the proportions of components if \code{n.components} a vector with a length > 1 (e.g., \code{n.components = c(2:3)})} \item{plot}{\link{logical} (\emph{with default}): plot output} \item{...}{further arguments to pass. See details for their usage.} } \value{ Returns a plot (\emph{optional}) and terminal output. In addition an \linkS4class{RLum.Results} object is returned containing the following elements: \item{.$summary}{\link{data.frame} summary of all relevant model results.} \item{.$data}{\link{data.frame} original input data} \item{.$args}{\link{list} used arguments} \item{.$call}{\link{call} the function call} \item{.$mle}{ covariance matrices of the log likelihoods} \item{.$BIC}{ BIC score} \item{.$llik}{ maximum log likelihood} \item{.$grain.probability}{ probabilities of a grain belonging to a component} \item{.$components}{\link{matrix} estimates of the de, de error and proportion for each component} \item{.$single.comp}{\link{data.frame} single component FFM estimate} If a vector for \code{n.components} is provided (e.g. \code{c(2:8)}), \code{mle} and \code{grain.probability} are lists containing matrices of the results for each iteration of the model. The output should be accessed using the function \link{get_RLum} } \description{ This function fits a k-component mixture to a De distribution with differing known standard errors. Parameters (doses and mixing proportions) are estimated by maximum likelihood assuming that the log dose estimates are from a mixture of normal distributions. } \details{ This model uses the maximum likelihood and Bayesian Information Criterion (BIC) approaches. Indications of overfitting are: \itemize{ \item increasing BIC \item repeated dose estimates \item covariance matrix not positive definite \item covariance matrix produces \code{NaN} \item convergence problems } \strong{Plot} If a vector (\code{c(k.min:k.max)}) is provided for \code{n.components} a plot is generated showing the the k components equivalent doses as normal distributions. By default \code{pdf.weight} is set to \code{FALSE}, so that the area under each normal distribution is always 1. If \code{TRUE}, the probability density functions are weighted by the components proportion for each iteration of k components, so the sum of areas of each component equals 1. While the density values are on the same scale when no weights are used, the y-axis are individually scaled if the probability density are weighted by the components proportion.\cr The standard deviation (sigma) of the normal distributions is by default determined by a common \code{sigmab} (see \code{pdf.sigma}). For \code{pdf.sigma = "se"} the standard error of each component is taken instead.\cr The stacked \link[graphics:barplot]{graphics::barplot} shows the proportion of each component (in per cent) calculated by the FFM. The last plot shows the achieved BIC scores and maximum log-likelihood estimates for each iteration of k. } \section{Function version}{ 0.4.2 } \examples{ ## load example data data(ExampleData.DeValues, envir = environment()) ## (1) apply the finite mixture model ## NOTE: the data set is not suitable for the finite mixture model, ## which is why a very small sigmab is necessary calc_FiniteMixture(ExampleData.DeValues$CA1, sigmab = 0.2, n.components = 2, grain.probability = TRUE) ## (2) repeat the finite mixture model for 2, 3 and 4 maximum number of fitted ## components and save results ## NOTE: The following example is computationally intensive. Please un-comment ## the following lines to make the example work. FMM<- calc_FiniteMixture(ExampleData.DeValues$CA1, sigmab = 0.2, n.components = c(2:4), pdf.weight = TRUE, dose.scale = c(0, 100)) ## show structure of the results FMM ## show the results on equivalent dose, standard error and proportion of ## fitted components get_RLum(object = FMM, data.object = "components") } \section{How to cite}{ Burow, C., 2023. calc_FiniteMixture(): Apply the finite mixture model (FMM) after Galbraith (2005) to a given De distribution. Function version 0.4.2. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Galbraith, R.F. & Green, P.F., 1990. Estimating the component ages in a finite mixture. Nuclear Tracks and Radiation Measurements 17, 197-206. Galbraith, R.F. & Laslett, G.M., 1993. Statistical models for mixed fission track ages. Nuclear Tracks Radiation Measurements 4, 459-470. Galbraith, R.F. & Roberts, R.G., 2012. Statistical aspects of equivalent dose and error calculation and display in OSL dating: An overview and some recommendations. Quaternary Geochronology 11, 1-27. Roberts, R.G., Galbraith, R.F., Yoshida, H., Laslett, G.M. & Olley, J.M., 2000. Distinguishing dose populations in sediment mixtures: a test of single-grain optical dating procedures using mixtures of laboratory-dosed quartz. Radiation Measurements 32, 459-465. Galbraith, R.F., 2005. Statistics for Fission Track Analysis, Chapman & Hall/CRC, Boca Raton. \strong{Further reading} Arnold, L.J. & Roberts, R.G., 2009. Stochastic modelling of multi-grain equivalent dose (De) distributions: Implications for OSL dating of sediment mixtures. Quaternary Geochronology 4, 204-230. Cunningham, A.C. & Wallinga, J., 2012. Realizing the potential of fluvial archives using robust OSL chronologies. Quaternary Geochronology 12, 98-106. Rodnight, H., Duller, G.A.T., Wintle, A.G. & Tooth, S., 2006. Assessing the reproducibility and accuracy of optical dating of fluvial deposits. Quaternary Geochronology 1, 109-120. Rodnight, H. 2008. How many equivalent dose values are needed to obtain a reproducible distribution?. Ancient TL 26, 3-10. } \seealso{ \link{calc_CentralDose}, \link{calc_CommonDose}, \link{calc_FuchsLang2001}, \link{calc_MinDose} } \author{ Christoph Burow, University of Cologne (Germany) \cr Based on a rewritten S script of Rex Galbraith, 2006. , RLum Developer Team} Luminescence/man/bin_RLum.Data.Rd0000644000176200001440000000412514521210045016301 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bin_RLum.Data.R \name{bin_RLum.Data} \alias{bin_RLum.Data} \title{Channel binning - method dispatcher} \usage{ bin_RLum.Data(object, ...) } \arguments{ \item{object}{\linkS4class{RLum.Data} (\strong{required}): S4 object of class \code{RLum.Data}} \item{...}{further arguments passed to the specific class method} } \value{ An object of the same type as the input object is provided } \description{ Function calls the object-specific bin functions for RLum.Data S4 class objects. } \details{ The function provides a generalised access point for specific \linkS4class{RLum.Data} objects. \cr Depending on the input object, the corresponding function will be selected. Allowed arguments can be found in the documentations of the corresponding \linkS4class{RLum.Data} class. } \note{ Currently only \code{RLum.Data} objects of class \linkS4class{RLum.Data.Curve} and \linkS4class{RLum.Data.Spectrum} are supported! } \section{Function version}{ 0.2.0 } \examples{ ##load example data data(ExampleData.CW_OSL_Curve, envir = environment()) ##create RLum.Data.Curve object from this example curve <- set_RLum( class = "RLum.Data.Curve", recordType = "OSL", data = as.matrix(ExampleData.CW_OSL_Curve) ) ##plot data without and with 2 and 4 channel binning plot_RLum(curve) plot_RLum(bin_RLum.Data(curve, bin_size = 2)) plot_RLum(bin_RLum.Data(curve, bin_size = 4)) } \seealso{ \linkS4class{RLum.Data.Curve}, \linkS4class{RLum.Data.Spectrum} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. bin_RLum.Data(): Channel binning - method dispatcher. Function version 0.2.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{utilities} Luminescence/man/convert_RLum2Risoe.BINfileData.Rd0000644000176200001440000000425214521210045021467 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/convert_RLum2Risoe.BINfileData.R \name{convert_RLum2Risoe.BINfileData} \alias{convert_RLum2Risoe.BINfileData} \title{Converts RLum.Analysis-objects and RLum.Data.Curve-objects to RLum2Risoe.BINfileData-objects} \usage{ convert_RLum2Risoe.BINfileData(object, keep.position.number = FALSE) } \arguments{ \item{object}{\linkS4class{RLum.Analysis} or \linkS4class{RLum.Data.Curve} (\strong{required}): input object to be converted} \item{keep.position.number}{\link{logical} (with default): keeps the original position number or re-calculate the numbers to avoid doubling} } \value{ The function returns a \linkS4class{Risoe.BINfileData} object. } \description{ The functions converts \linkS4class{RLum.Analysis} and \linkS4class{RLum.Data.Curve} objects and a \link{list} of those to \linkS4class{Risoe.BINfileData} objects. The function intends to provide a minimum of compatibility between both formats. The created \linkS4class{RLum.Analysis} object can be later exported to a BIN-file using the function \link{write_R2BIN}. } \note{ The conversion can be never perfect. The \code{RLum} objects may contain information which are not part of the \linkS4class{Risoe.BINfileData} definition. } \section{Function version}{ 0.1.3 } \examples{ ##simple conversion using the example dataset data(ExampleData.RLum.Analysis, envir = environment()) convert_RLum2Risoe.BINfileData(IRSAR.RF.Data) } \seealso{ \linkS4class{RLum.Analysis}, \linkS4class{RLum.Data.Curve}, \link{write_R2BIN} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. convert_RLum2Risoe.BINfileData(): Converts RLum.Analysis-objects and RLum.Data.Curve-objects to RLum2Risoe.BINfileData-objects. Function version 0.1.3. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{IO} Luminescence/man/write_R2BIN.Rd0000644000176200001440000000763614521210045015762 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/write_R2BIN.R \name{write_R2BIN} \alias{write_R2BIN} \title{Export Risoe.BINfileData into Risø BIN/BINX-file} \usage{ write_R2BIN( object, file, version, compatibility.mode = FALSE, txtProgressBar = TRUE ) } \arguments{ \item{object}{\linkS4class{Risoe.BINfileData} (\strong{required}): input object to be stored in a bin file.} \item{file}{\link{character} (\strong{required}): file name and path of the output file \itemize{ \item \verb{[WIN]}: \code{write_R2BIN(object, "C:/Desktop/test.bin")} \item \verb{[MAC/LINUX]}: \code{write_R2BIN("/User/test/Desktop/test.bin")} }} \item{version}{\link{character} (\emph{optional}): version number for the output file. If no value is provided the highest version number from the \linkS4class{Risoe.BINfileData} is taken automatically. \strong{Note:} This argument can be used to convert BIN-file versions.} \item{compatibility.mode}{\link{logical} (\emph{with default}): this option recalculates the position values if necessary and set the max. value to 48. The old position number is appended as comment (e.g., 'OP: 70). This option accounts for potential compatibility problems with the Analyst software. It further limits the maximum number of points per curve to 9,999. If a curve contains more data the curve data got binned using the smallest possible bin width.} \item{txtProgressBar}{\link{logical} (\emph{with default}): enables or disables \link{txtProgressBar}.} } \value{ Write a binary file. } \description{ Exports a \code{Risoe.BINfileData} object in a \verb{*.bin} or \verb{*.binx} file that can be opened by the Analyst software or other Risø software. } \details{ The structure of the exported binary data follows the data structure published in the Appendices of the \emph{Analyst} manual p. 42. If \code{LTYPE}, \code{DTYPE} and \code{LIGHTSOURCE} are not of type \link{character}, no transformation into numeric values is done. } \note{ The function just roughly checks the data structures. The validity of the output data depends on the user. The validity of the file path is not further checked. BIN-file conversions using the argument \code{version} may be a lossy conversion, depending on the chosen input and output data (e.g., conversion from version 08 to 07 to 06 to 05 to 04 or 03). \strong{Warning} Although the coding was done carefully it seems that the BIN/BINX-files produced by Risø DA 15/20 TL/OSL readers slightly differ on the byte level. No obvious differences are observed in the METADATA, however, the BIN/BINX-file may not fully compatible, at least not similar to the once directly produced by the Risø readers! ROI definitions (introduced in BIN-file version 8) are not supported! There are furthermore ignored by the function \link{read_BIN2R}. } \section{Function version}{ 0.5.2 } \examples{ ##load exampled dataset file <- system.file("extdata/BINfile_V8.binx", package = "Luminescence") temp <- read_BIN2R(file) ##create temporary file path ##(for usage replace by own path) temp_file <- tempfile(pattern = "output", fileext = ".binx") ##export to temporary file path write_R2BIN(temp, file = temp_file) } \section{How to cite}{ Kreutzer, S., 2023. write_R2BIN(): Export Risoe.BINfileData into Risø BIN/BINX-file. Function version 0.5.2. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ DTU Nutech, 2016. The Sequence Editor, Users Manual, February, 2016. \url{https://www.fysik.dtu.dk} } \seealso{ \link{read_BIN2R}, \linkS4class{Risoe.BINfileData}, \link{writeBin} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \keyword{IO} Luminescence/man/merge_RLum.Rd0000644000176200001440000000503114521210045015755 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merge_RLum.R \name{merge_RLum} \alias{merge_RLum} \title{General merge function for RLum S4 class objects} \usage{ merge_RLum(objects, ...) } \arguments{ \item{objects}{\link{list} of \linkS4class{RLum} (\strong{required}): list of S4 object of class \code{RLum}} \item{...}{further arguments that one might want to pass to the specific merge function} } \value{ Return is the same as input objects as provided in the list. } \description{ Function calls object-specific merge functions for RLum S4 class objects. } \details{ The function provides a generalised access point for merge specific \linkS4class{RLum} objects. Depending on the input object, the corresponding merge function will be selected. Allowed arguments can be found in the documentations of each merge function. Empty list elements (\code{NULL}) are automatically removed from the input \code{list}. \tabular{lll}{ \strong{object} \tab \tab \strong{corresponding merge function} \cr \linkS4class{RLum.Data.Curve} \tab : \tab \code{merge_RLum.Data.Curve} \cr \linkS4class{RLum.Analysis} \tab : \tab \code{merge_RLum.Analysis} \cr \linkS4class{RLum.Results} \tab : \tab \code{merge_RLum.Results} } } \note{ So far not for every \code{RLum} object a merging function exists. } \section{Function version}{ 0.1.3 } \examples{ ##Example based using data and from the calc_CentralDose() function ##load example data data(ExampleData.DeValues, envir = environment()) ##apply the central dose model 1st time temp1 <- calc_CentralDose(ExampleData.DeValues$CA1) ##apply the central dose model 2nd time temp2 <- calc_CentralDose(ExampleData.DeValues$CA1) ##merge the results and store them in a new object temp.merged <- get_RLum(merge_RLum(objects = list(temp1, temp2))) } \seealso{ \linkS4class{RLum.Data.Curve}, \linkS4class{RLum.Data.Image}, \linkS4class{RLum.Data.Spectrum}, \linkS4class{RLum.Analysis}, \linkS4class{RLum.Results} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. merge_RLum(): General merge function for RLum S4 class objects. Function version 0.1.3. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{utilities} Luminescence/man/plot_RLum.Data.Image.Rd0000644000176200001440000000665314521210045017540 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot_RLum.Data.Image.R \name{plot_RLum.Data.Image} \alias{plot_RLum.Data.Image} \title{Plot function for an \code{RLum.Data.Image} S4 class object} \usage{ plot_RLum.Data.Image( object, frames = NULL, par.local = TRUE, plot.type = "plot.raster", ... ) } \arguments{ \item{object}{\linkS4class{RLum.Data.Image} (\strong{required}): S4 object of class \code{RLum.Data.Image}} \item{frames}{\link{numeric} (\emph{optional}): sets the frames to be set, by default all frames are plotted. Can be sequence of numbers, as long as the frame number is valid.} \item{par.local}{\link{logical} (\emph{with default}): use local graphical parameters for plotting, e.g. the plot is shown in one column and one row. If \code{par.local = FALSE} global parameters are inherited.} \item{plot.type}{\link{character} (\emph{with default}): plot types. Supported types are \code{plot.raster}, \code{contour}} \item{...}{further arguments and graphical parameters that will be passed to the specific plot functions. Standard supported parameters are \code{xlim}, \code{ylim}, \code{zlim}, \code{xlab}, \code{ylab}, \code{main}, \code{legend} (\code{TRUE} or \code{FALSE}), \code{col}, \code{cex}, \code{axes} (\code{TRUE} or \code{FALSE}), \code{zlim_image} (adjust the z-scale over different images), \code{stretch}} } \value{ Returns a plot } \description{ The function provides very basic plot functionality for image data of an \linkS4class{RLum.Data.Image} object. For more sophisticated plotting it is recommended to use other very powerful packages for image processing. \strong{Details on the plot functions} Supported plot types: \strong{\code{plot.type = "plot.raster"}} Uses the standard plot function of R \link[graphics:image]{graphics::image}. If wanted, the image is enhanced, using the argument \code{stretch}. Possible values are \code{hist}, \code{lin}, and \code{NULL}. The latter does nothing. The argument \code{useRaster = TRUE} is used by default, but can be set to \code{FALSE}. \strong{\code{plot.type = "contour"}} This uses the function \link[graphics:contour]{graphics::contour} } \note{ The axes limitations (\code{xlim}, \code{zlim}, \code{zlim}) work directly on the object, so that regardless of the chosen limits the image parameters can be adjusted for best visibility. However, in particular for z-scale limitations this is not always wanted, please use \code{zlim_image} to maintain a particular value range over a series of images. } \section{Function version}{ 0.2.1 } \examples{ ##load data data(ExampleData.RLum.Data.Image, envir = environment()) ##plot data plot_RLum.Data.Image(ExampleData.RLum.Data.Image) } \seealso{ \linkS4class{RLum.Data.Image}, \link{plot}, \link{plot_RLum}, \link[graphics:image]{graphics::image}, \link[graphics:contour]{graphics::contour} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. plot_RLum.Data.Image(): Plot function for an RLum.Data.Image S4 class object. Function version 0.2.1. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{aplot} Luminescence/man/write_RLum2CSV.Rd0000644000176200001440000000735514521210045016461 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/write_RLum2CSV.R \name{write_RLum2CSV} \alias{write_RLum2CSV} \title{Export RLum-objects to CSV} \usage{ write_RLum2CSV( object, path = NULL, prefix = "", export = TRUE, compact = TRUE, ... ) } \arguments{ \item{object}{\linkS4class{RLum} or a \link{list} of \code{RLum} objects (\strong{required}): objects to be written. Can be a \link{data.frame} if needed internally.} \item{path}{\link{character} (\emph{optional}): character string naming folder for the output to be written. If nothing is provided \code{path} will be set to the working directory. \strong{Note:} this argument is ignored if the the argument \code{export} is set to \code{FALSE}.} \item{prefix}{\link{character} (\emph{with default}): optional prefix to name the files. This prefix is valid for all written files} \item{export}{\link{logical} (\emph{with default}): enable or disable the file export. If set to \code{FALSE} nothing is written to the file connection, but a list comprising objects of type \link{data.frame} and \link{matrix} is returned instead} \item{compact}{\link{logical} (\emph{with default}): if \code{TRUE} (the default) the output will be more simple but less comprehensive, means not all elements in the objects will be fully broken down. This is in particular useful for writing \code{RLum.Results} objects to CSV-files, such objects can be rather complex and not all information are needed in a CSV-file or can be meaningful translated to it.} \item{...}{further arguments that will be passed to the function \link[utils:write.table]{utils::write.table}. All arguments except the argument \code{file} are supported} } \value{ The function returns either a CSV-file (or many of them) or for the option \code{export == FALSE} a list comprising objects of type \link{data.frame} and \link{matrix} } \description{ This function exports \linkS4class{RLum}-objects to CSV-files using the R function \link[utils:write.table]{utils::write.table}. All \linkS4class{RLum}-objects are supported, but the export is lossy, i.e. the pure numerical values are exported only. Information that cannot be coerced to a \link{data.frame} or a \link{matrix} are discarded as well as metadata. } \details{ However, in combination with the implemented import functions, nearly every supported import data format can be exported to CSV-files, this gives a great deal of freedom in terms of compatibility with other tools. \strong{Input is a list of objects} If the input is a \link{list} of objects all explicit function arguments can be provided as \link{list}. } \section{Function version}{ 0.2.2 } \examples{ ##transform values to a list (and do not write) data(ExampleData.BINfileData, envir = environment()) object <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data)[[1]] write_RLum2CSV(object, export = FALSE) \dontrun{ ##create temporary filepath ##(for usage replace by own path) temp_file <- tempfile(pattern = "output", fileext = ".csv") ##write CSV-file to working directory write_RLum2CSV(temp_file) } } \seealso{ \linkS4class{RLum.Analysis}, \linkS4class{RLum.Data}, \linkS4class{RLum.Results}, \link[utils:write.table]{utils::write.table} } \author{ Sebastian Kreutzer, Geography & Earth Science, Aberystwyth University (United Kingdom) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. write_RLum2CSV(): Export RLum-objects to CSV. Function version 0.2.2. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{IO} Luminescence/man/use_DRAC.Rd0000644000176200001440000001103114521210045015301 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/use_DRAC.R \name{use_DRAC} \alias{use_DRAC} \title{Use DRAC to calculate dose rate data} \usage{ use_DRAC(file, name, print_references = TRUE, citation_style = "text", ...) } \arguments{ \item{file}{\link{character} (\strong{required}): spreadsheet to be passed to the DRAC website for calculation. Can also be a DRAC template object obtained from \code{template_DRAC()}.} \item{name}{\link{character} (\emph{with default}): Optional user name submitted to DRAC. If omitted, a random name will be generated} \item{print_references}{(\emph{with default}): Print all references used in the input data table to the console.} \item{citation_style}{(\emph{with default}): If \code{print_references = TRUE} this argument determines the output style of the used references. Valid options are \code{"Bibtex"}, \code{"citation"}, \code{"html"}, \code{"latex"} or \code{"R"}. Default is \code{"text"}.} \item{...}{Further arguments. \itemize{ \item \code{url} \link{character}: provide an alternative URL to DRAC \item \code{verbose} \link{logical}: show or hide console output }} } \value{ Returns an \linkS4class{RLum.Results} object containing the following elements: \item{DRAC}{\link{list}: a named list containing the following elements in slot \verb{@data}: \tabular{lll}{ \verb{$highlights} \tab \link{data.frame} \tab summary of 25 most important input/output fields \cr \verb{$header} \tab \link{character} \tab HTTP header from the DRAC server response \cr \verb{$labels} \tab \link{data.frame} \tab descriptive headers of all input/output fields \cr \verb{$content} \tab \link{data.frame} \tab complete DRAC input/output table \cr \verb{$input} \tab \link{data.frame} \tab DRAC input table \cr \verb{$output} \tab \link{data.frame} \tab DRAC output table \cr \code{references}\tab \link{list} \tab A list of bib entries of used references \cr } } \item{data}{\link{character} or \link{list} path to the input spreadsheet or a DRAC template} \item{call}{\link{call} the function call} \item{args}{\link{list} used arguments} The output should be accessed using the function \link{get_RLum}. } \description{ The function provides an interface from R to DRAC. An R-object or a pre-formatted XLS/XLSX file is passed to the DRAC website and the results are re-imported into R. } \section{Function version}{ 0.14 } \examples{ ## (1) Method using the DRAC spreadsheet file <- "/PATH/TO/DRAC_Input_Template.csv" # send the actual IO template spreadsheet to DRAC \dontrun{ use_DRAC(file = file) } ## (2) Method using an R template object # Create a template input <- template_DRAC(preset = "DRAC-example_quartz") # Fill the template with values input$`Project ID` <- "DRAC-Example" input$`Sample ID` <- "Quartz" input$`Conversion factors` <- "AdamiecAitken1998" input$`External U (ppm)` <- 3.4 input$`errExternal U (ppm)` <- 0.51 input$`External Th (ppm)` <- 14.47 input$`errExternal Th (ppm)` <- 1.69 input$`External K (\%)` <- 1.2 input$`errExternal K (\%)` <- 0.14 input$`Calculate external Rb from K conc?` <- "N" input$`Calculate internal Rb from K conc?` <- "N" input$`Scale gammadoserate at shallow depths?` <- "N" input$`Grain size min (microns)` <- 90 input$`Grain size max (microns)` <- 125 input$`Water content ((wet weight - dry weight)/dry weight) \%` <- 5 input$`errWater content \%` <- 2 input$`Depth (m)` <- 2.2 input$`errDepth (m)` <- 0.22 input$`Overburden density (g cm-3)` <- 1.8 input$`errOverburden density (g cm-3)` <- 0.1 input$`Latitude (decimal degrees)` <- 30.0000 input$`Longitude (decimal degrees)` <- 70.0000 input$`Altitude (m)` <- 150 input$`De (Gy)` <- 20 input$`errDe (Gy)` <- 0.2 # use DRAC \dontrun{ output <- use_DRAC(input) } } \section{How to cite}{ Kreutzer, S., Dietze, M., Burow, C., 2023. use_DRAC(): Use DRAC to calculate dose rate data. Function version 0.14. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Durcan, J.A., King, G.E., Duller, G.A.T., 2015. DRAC: Dose Rate and Age Calculator for trapped charge dating. Quaternary Geochronology 28, 54-61. doi:10.1016/j.quageo.2015.03.012 } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany)\cr Michael Dietze, GFZ Potsdam (Germany)\cr Christoph Burow, University of Cologne (Germany) , RLum Developer Team} Luminescence/man/plot_RLum.Results.Rd0000644000176200001440000000422514521210045017300 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot_RLum.Results.R \name{plot_RLum.Results} \alias{plot_RLum.Results} \title{Plot function for an RLum.Results S4 class object} \usage{ plot_RLum.Results(object, single = TRUE, ...) } \arguments{ \item{object}{\linkS4class{RLum.Results} (\strong{required}): S4 object of class \code{RLum.Results}} \item{single}{\link{logical} (\emph{with default}): single plot output (\code{TRUE/FALSE}) to allow for plotting the results in as few plot windows as possible.} \item{...}{further arguments and graphical parameters will be passed to the \code{plot} function.} } \value{ Returns multiple plots. } \description{ The function provides a standardised plot output for data of an RLum.Results S4 class object } \details{ The function produces a multiple plot output. A file output is recommended (e.g., \link{pdf}). } \note{ Not all arguments available for \link{plot} will be passed! Only plotting of \code{RLum.Results} objects are supported. } \section{Function version}{ 0.2.1 } \examples{ ###load data data(ExampleData.DeValues, envir = environment()) # apply the un-logged minimum age model mam <- calc_MinDose(data = ExampleData.DeValues$CA1, sigmab = 0.2, log = TRUE, plot = FALSE) ##plot plot_RLum.Results(mam) # estimate the number of grains on an aliquot grains<- calc_AliquotSize(grain.size = c(100,150), sample.diameter = 1, plot = FALSE, MC.iter = 100) ##plot plot_RLum.Results(grains) } \seealso{ \link{plot}, \link{plot_RLum} } \author{ Christoph Burow, University of Cologne (Germany) \cr Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Burow, C., Kreutzer, S., 2023. plot_RLum.Results(): Plot function for an RLum.Results S4 class object. Function version 0.2.1. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{aplot} Luminescence/man/plot_KDE.Rd0000644000176200001440000001606214521210045015366 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot_KDE.R \name{plot_KDE} \alias{plot_KDE} \title{Plot kernel density estimate with statistics} \usage{ plot_KDE( data, na.rm = TRUE, values.cumulative = TRUE, order = TRUE, boxplot = TRUE, rug = TRUE, summary, summary.pos, summary.method = "MCM", bw = "nrd0", output = TRUE, ... ) } \arguments{ \item{data}{\link{data.frame} or \linkS4class{RLum.Results} object (\strong{required}): for \code{data.frame}: two columns: De (\code{values[,1]}) and De error (\code{values[,2]}). For plotting multiple data sets, these must be provided as \code{list} (e.g. \code{list(dataset1, dataset2)}).} \item{na.rm}{\link{logical} (\emph{with default}): exclude NA values from the data set prior to any further operation.} \item{values.cumulative}{\link{logical} (\emph{with default}): show cumulative individual data.} \item{order}{\link{logical}: Order data in ascending order.} \item{boxplot}{\link{logical} (\emph{with default}): optionally show a boxplot (depicting median as thick central line, first and third quartile as box limits, whiskers denoting +/- 1.5 interquartile ranges and dots further outliers).} \item{rug}{\link{logical} (\emph{with default}): optionally add rug.} \item{summary}{\link{character} (\emph{optional}): add statistic measures of centrality and dispersion to the plot. Can be one or more of several keywords. See details for available keywords.} \item{summary.pos}{\link{numeric} or \link{character} (\emph{with default}): optional position coordinates or keyword (e.g. \code{"topright"}) for the statistical summary. Alternatively, the keyword \code{"sub"} may be specified to place the summary below the plot header. However, this latter option in only possible if \code{mtext} is not used. In case of coordinate specification, y-coordinate refers to the right y-axis.} \item{summary.method}{\link{character} (\emph{with default}): keyword indicating the method used to calculate the statistic summary. One out of \code{"unweighted"}, \code{"weighted"} and \code{"MCM"}. See \link{calc_Statistics} for details.} \item{bw}{\link{character} (\emph{with default}): bin-width, chose a numeric value for manual setting.} \item{output}{\link{logical}: Optional output of numerical plot parameters. These can be useful to reproduce similar plots. Default is \code{TRUE}.} \item{...}{further arguments and graphical parameters passed to \link{plot}.} } \description{ Plot a kernel density estimate of measurement values in combination with the actual values and associated error bars in ascending order. If enabled, the boxplot will show the usual distribution parameters (median as bold line, box delimited by the first and third quartile, whiskers defined by the extremes and outliers shown as points) and also the mean and standard deviation as pale bold line and pale polygon, respectively. } \details{ The function allows passing several plot arguments, such as \code{main}, \code{xlab}, \code{cex}. However, as the figure is an overlay of two separate plots, \code{ylim} must be specified in the order: c(ymin_axis1, ymax_axis1, ymin_axis2, ymax_axis2) when using the cumulative values plot option. See examples for some further explanations. For details on the calculation of the bin-width (parameter \code{bw}) see \link{density}. A statistic summary, i.e. a collection of statistic measures of centrality and dispersion (and further measures) can be added by specifying one or more of the following keywords: \itemize{ \item \code{"n"} (number of samples) \item \code{"mean"} (mean De value) \item \code{"median"} (median of the De values) \item \code{"sd.rel"} (relative standard deviation in percent) \item \code{"sd.abs"} (absolute standard deviation) \item \code{"se.rel"} (relative standard error) \item \code{"se.abs"} (absolute standard error) \item \code{"in.2s"} (percent of samples in 2-sigma range) \item \code{"kurtosis"} (kurtosis) \item \code{"skewness"} (skewness) } \strong{Note} that the input data for the statistic summary is sent to the function \code{calc_Statistics()} depending on the log-option for the z-scale. If \code{"log.z = TRUE"}, the summary is based on the logarithms of the input data. If \code{"log.z = FALSE"} the linearly scaled data is used. \strong{Note} as well, that \code{"calc_Statistics()"} calculates these statistic measures in three different ways: \code{unweighted}, \code{weighted} and \code{MCM-based} (i.e., based on Monte Carlo Methods). By default, the MCM-based version is used. If you wish to use another method, indicate this with the appropriate keyword using the argument \code{summary.method}. } \note{ The plot output is no 'probability density' plot (cf. the discussion of Berger and Galbraith in Ancient TL; see references)! } \section{Function version}{ 3.6.0 } \examples{ ## read example data set data(ExampleData.DeValues, envir = environment()) ExampleData.DeValues <- Second2Gray(ExampleData.DeValues$BT998, c(0.0438,0.0019)) ## create plot straightforward plot_KDE(data = ExampleData.DeValues) ## create plot with logarithmic x-axis plot_KDE(data = ExampleData.DeValues, log = "x") ## create plot with user-defined labels and axes limits plot_KDE(data = ExampleData.DeValues, main = "Dose distribution", xlab = "Dose (s)", ylab = c("KDE estimate", "Cumulative dose value"), xlim = c(100, 250), ylim = c(0, 0.08, 0, 30)) ## create plot with boxplot option plot_KDE(data = ExampleData.DeValues, boxplot = TRUE) ## create plot with statistical summary below header plot_KDE(data = ExampleData.DeValues, summary = c("n", "median", "skewness", "in.2s")) ## create plot with statistical summary as legend plot_KDE(data = ExampleData.DeValues, summary = c("n", "mean", "sd.rel", "se.abs"), summary.pos = "topleft") ## split data set into sub-groups, one is manipulated, and merge again data.1 <- ExampleData.DeValues[1:15,] data.2 <- ExampleData.DeValues[16:25,] * 1.3 data.3 <- list(data.1, data.2) ## create plot with two subsets straightforward plot_KDE(data = data.3) ## create plot with two subsets and summary legend at user coordinates plot_KDE(data = data.3, summary = c("n", "median", "skewness"), summary.pos = c(110, 0.07), col = c("blue", "orange")) ## example of how to use the numerical output of the function ## return plot output to draw a thicker KDE line KDE_out <- plot_KDE(data = ExampleData.DeValues, output = TRUE) } \seealso{ \link{density}, \link{plot} } \author{ Michael Dietze, GFZ Potsdam (Germany)\cr Geography & Earth Sciences, Aberystwyth University (United Kingdom) , RLum Developer Team} \section{How to cite}{ Dietze, M., 2023. plot_KDE(): Plot kernel density estimate with statistics. Function version 3.6.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } Luminescence/man/RLum-class.Rd0000644000176200001440000000500614521210045015703 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RLum-class.R \docType{class} \name{RLum-class} \alias{RLum-class} \alias{replicate_RLum,RLum-method} \title{Class \code{"RLum"}} \usage{ \S4method{replicate_RLum}{RLum}(object, times = NULL) } \arguments{ \item{object}{\linkS4class{RLum} (\strong{required}): an object of class \linkS4class{RLum}} \item{times}{\link{integer} (\emph{optional}): number for times each element is repeated element} } \description{ Abstract class for data in the package Luminescence Subclasses are: } \details{ \strong{RLum-class}\cr |\cr |----\linkS4class{RLum.Data}\cr |----|-- \linkS4class{RLum.Data.Curve}\cr |----|-- \linkS4class{RLum.Data.Spectrum}\cr |----|-- \linkS4class{RLum.Data.Image}\cr |----\linkS4class{RLum.Analysis}\cr |----\linkS4class{RLum.Results} } \section{Methods (by generic)}{ \itemize{ \item \code{replicate_RLum(RLum)}: Replication method RLum-objects }} \section{Slots}{ \describe{ \item{\code{originator}}{Object of class \link{character} containing the name of the producing function for the object. Set automatically by using the function \link{set_RLum}.} \item{\code{info}}{Object of class \link{list} for additional information on the object itself} \item{\code{.uid}}{Object of class \link{character} for a unique object identifier. This id is usually calculated using the internal function \code{create_UID()} if the function \link{set_RLum} is called.} \item{\code{.pid}}{Object of class \link{character} for a parent id. This allows nesting RLum-objects at will. The parent id can be the uid of another object.} }} \note{ \code{RLum} is a virtual class. } \section{Objects from the Class}{ A virtual Class: No objects can be created from it. } \section{Class version}{ 0.4.0 } \examples{ showClass("RLum") } \seealso{ \linkS4class{RLum.Data}, \linkS4class{RLum.Data.Curve}, \linkS4class{RLum.Data.Spectrum}, \linkS4class{RLum.Data.Image}, \linkS4class{RLum.Analysis}, \linkS4class{RLum.Results}, \link{methods_RLum} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. RLum-class(): Class 'RLum'. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{classes} Luminescence/man/calc_CobbleDoseRate.Rd0000644000176200001440000001157114521210045017524 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calc_CobbleDoseRate.R \name{calc_CobbleDoseRate} \alias{calc_CobbleDoseRate} \title{Calculate dose rate of slices in a spherical cobble} \usage{ calc_CobbleDoseRate(input, conversion = "Guerinetal2011") } \arguments{ \item{input}{\link{data.frame} (\strong{required}): A table containing all relevant information for each individual layer. For the table layout see details.} \item{conversion}{Which dose rate conversion factors to use. For accepted values see \link{BaseDataSet.ConversionFactors}} } \value{ The function returns an \linkS4class{RLum.Results} object for which the first element is a \link{matrix} (\code{DataIndividual}) that gives the dose rate results for each slice for each decay chain individually, for both, the cobble dose rate and the sediment dose rate. The second element is also a \link{matrix} (\code{DataComponent}) that gives the total beta and gamma-dose rates for the cobble and the adjacent sediment for each slice of the cobble. } \description{ Calculates the dose rate profile through the cobble based on Riedesel and Autzen (2020). Corrects the beta dose rate in the cobble for the grain size following results of Guérin et al. (2012). Sediment beta and gamma dose rates are corrected for the water content of the sediment using the correction factors of Aitken (1985). Water content in the cobble is assumed to be 0. } \details{ \strong{The input table layout} \tabular{lll}{ COLUMN \tab DATA TYPE \tab DESCRIPTION\cr \code{Distance} \tab \code{numeric} \tab distance from the surface of the cobble to the top of each rock slice in mm. The distance for each slice will be listed in this column\cr \code{DistanceError} \tab \code{numeric} \tab Error on the distance in mm\cr \code{Thickness} \tab \code{numeric} \tab Thickness of each slice in mm\cr \code{TicknessError} \tab \code{numeric} \tab uncertainty of the thickness in mm.\cr \code{Mineral} \tab \code{character} \tab \code{'FS'} for feldspar, \code{'Q'} for quartz, depending which mineral in the cobble is used for dating\cr \code{Cobble_K} \tab \code{numeric} \tab K nuclide content in \% of the bulk cobble\cr \code{Cobble_K_SE} \tab \code{numeric} \tab error on K nuclide content in \% of the bulk cobble\cr \code{Cobble_Th} \tab \code{numeric} \tab Th nuclide content in ppm of the bulk cobble\cr \code{Cobble_Th_SE} \tab \code{numeric} \tab error on Th nuclide content in ppm of the bulk cobble\cr \code{Cobble_U} \tab \code{numeric} \tab U nuclide content in ppm of the bulk cobble\cr \code{CobbleU_SE} \tab \code{numeric} \tab error on U nuclide content in ppm of the bulk cobble\cr \code{GrainSize} \tab \code{numeric} \tab average grain size in µm of the grains used for dating\cr \code{Density} \tab \code{numeric} \tab Density of the cobble. Default is 2.7 g cm^-3\cr \code{CobbleDiameter} \tab \code{numeric} \tab Diameter of the cobble in cm.\cr \code{Sed_K} \tab \code{numeric} \tab K nuclide content in \% of the sediment matrix\cr \code{Sed_K_SE} \tab \code{numeric} \tab error on K nuclide content in \% of the sediment matrix\cr \code{Sed_Th} \tab \code{numeric} \tab Th nuclide content in ppm of the sediment matrix\cr \code{Sed_Th_SE} \tab \code{numeric} \tab error on Th nuclide content in ppm of the sediment matrix\cr \code{Sed_U} \tab \code{numeric} \tab U nuclide content in ppm of the sediment matrix\cr \code{Sed_U_SE} \tab \code{numeric} \tab error on U nuclide content in ppm of the sediment matrix\cr \code{GrainSize} \tab \code{numeric} \tab average grain size of the sediment matrix\cr \code{WaterContent} \tab \code{numeric} \tab mean water content of the sediment matrix in \%\cr \code{WaterContent_SE} \tab \code{numeric} \tab relative error on water content } \strong{Water content} The water content provided by the user should be calculated according to: \deqn{(Wet_weight - Dry_weight) / Dry_weight * 100} } \section{Function version}{ 0.1.0 } \examples{ ## load example data data("ExampleData.CobbleData", envir = environment()) ## run function calc_CobbleDoseRate(ExampleData.CobbleData) } \section{How to cite}{ Riedesel, S., Autzen, M., 2023. calc_CobbleDoseRate(): Calculate dose rate of slices in a spherical cobble. Function version 0.1.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Riedesel, S., Autzen, M., 2020. Beta and gamma dose rate attenuation in rocks and sediment. Radiation Measurements 133, 106295. } \seealso{ \link{convert_Concentration2DoseRate} } \author{ Svenja Riedesel, Aberystwyth University (United Kingdom) \cr Martin Autzen, DTU NUTECH Center for Nuclear Technologies (Denmark) , RLum Developer Team} \keyword{datagen} Luminescence/man/analyse_portableOSL.Rd0000644000176200001440000001415014521210044017622 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/analyse_portableOSL.R \name{analyse_portableOSL} \alias{analyse_portableOSL} \title{Analyse portable CW-OSL measurements} \usage{ analyse_portableOSL( object, signal.integral = NULL, invert = FALSE, normalise = FALSE, mode = "profile", coord = NULL, plot = TRUE, ... ) } \arguments{ \item{object}{\linkS4class{RLum.Analysis} (\strong{required}): \linkS4class{RLum.Analysis} object produced by \link{read_PSL2R}. The input can be a \link{list} of such objects, in such case each input is treated as a separate sample and the results are merged.} \item{signal.integral}{\link{numeric} (\strong{required}): A vector of two values specifying the lower and upper channel used to calculate the OSL/IRSL signal. Can be provided in form of \code{c(1, 5)} or \code{1:5}.} \item{invert}{\link{logical} (\emph{with default}): \code{TRUE} flip the plot the data in reverse order.} \item{normalise}{\link{logical} (\emph{with default}): \code{TRUE} to normalise the OSL/IRSL signals to the \emph{mean} of all corresponding data curves.} \item{mode}{\link{character} (\emph{with default}): defines the analysis mode, allowed are \code{"profile"} (the default) and \code{"surface"} for surface interpolation. If you select something else, nothing will be plotted (similar to \code{plot = FALSE}).} \item{coord}{\link{list} \link{matrix} (\emph{optional}): a list or matrix of the same length as number of samples measured with coordinates for the sampling positions. Coordinates are expected to be provided in meter (unit: m). Expected are x and y coordinates, e.g., \verb{coord = list(samp1 = c(0.1, 0.2)}. If you have not measured x coordinates, please x should be 0.} \item{plot}{\link{logical} (\emph{with default}): enable/disable plot output} \item{...}{other parameters to be passed to modify the plot output. Supported are \code{run} to provide the run name , if the input is a \code{list}, this is set automatically. Further plot parameters are \code{surface_values} (\link{character} with value to plot), \code{legend} (\code{TRUE}/\code{FALSE}), \code{col_ramp} (for surface mode), \code{contour} (contour lines \code{TRUE}/\code{FALSE} in surface mode), \code{grid} (\code{TRUE}/\code{FALSE}), \code{col}, \code{pch} (for profile mode), \code{xlim} (a name \link{list} for profile mode), \code{ylim}, \code{zlim} (surface mode only), \code{ylab}, \code{xlab}, \code{zlab} (here x-axis labelling), \code{main}, \code{bg_img} (for profile mode background image, usually a profile photo; should be a raster object), \code{bg_img_positions} (a vector with the four corner positions, cf. \link[graphics:rasterImage]{graphics::rasterImage})} } \value{ Returns an S4 \linkS4class{RLum.Results} object with the following elements: \verb{$data}\cr \code{.. $summary}: \link{data.frame} with the results\cr \code{.. $data}: \link{list} with the \linkS4class{RLum.Analysis} objects\cr \code{.. $args}: \link{list} the input arguments } \description{ The function analyses CW-OSL curve data produced by a SUERC portable OSL reader and produces a combined plot of OSL/IRSL signal intensities, OSL/IRSL depletion ratios and the IRSL/OSL ratio. } \details{ This function only works with \linkS4class{RLum.Analysis} objects produced by \link{read_PSL2R}. It further assumes (or rather requires) an equal amount of OSL and IRSL curves that are pairwise combined for calculating the IRSL/OSL ratio. For calculating the depletion ratios the cumulative signal of the last n channels (same number of channels as specified by \code{signal.integral}) is divided by cumulative signal of the first n channels (\code{signal.integral}). \strong{Note: The function assumes the following sequence pattern: \verb{DARK COUNT}, \code{IRSL}, \verb{DARK COUNT}, \code{BSL}, \verb{DARK COUNT}. If you have written a different sequence, the analysis function will (likely) not work!}. \strong{Signal processing} The function processes the signals as follows: \code{BSL} and \code{IRSL} signals are extracted using the chosen signal integral, dark counts are taken in full. \strong{Working with coordinates} Usually samples are taken from a profile with a certain stratigraphy. In the past the function calculated an index. With this newer version, you have two option of passing on xy-coordinates to the function: \itemize{ \item (1) Add coordinates to the sample name during measurement. The form is rather strict and has to follow the scheme \verb{_x:|y:}. Example: \code{sample_x:0.2|y:0.4}. \item (2) Alternatively, you can provide a \link{list} or \link{matrix} with the sample coordinates. Example: \code{coord = list(c(0.2, 1), c(0.3,1.2))} } Please note that the unit is meter (m) and the function expects always xy-coordinates. The latter one is useful for surface interpolations. If you have measured a profile where the x-coordinates to not measure, x-coordinates should be 0. } \section{Function version}{ 0.1.0 } \examples{ ## example profile plot # (1) load example data set data("ExampleData.portableOSL", envir = environment()) # (2) merge and plot all RLum.Analysis objects merged <- merge_RLum(ExampleData.portableOSL) plot_RLum( object = merged, combine = TRUE, records_max = 5, legend.pos = "outside") merged # (3) analyse and plot results <- analyse_portableOSL( merged, signal.integral = 1:5, invert = FALSE, normalise = TRUE) get_RLum(results) } \seealso{ \linkS4class{RLum.Analysis}, \linkS4class{RLum.Data.Curve}, \link{read_PSL2R} } \author{ Christoph Burow, University of Cologne (Germany), Sebastian Kreutzer, Institute of Geography, Ruprecht-Karl University of Heidelberg, Germany , RLum Developer Team} \section{How to cite}{ Burow, C., Kreutzer, S., 2023. analyse_portableOSL(): Analyse portable CW-OSL measurements. Function version 0.1.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{datagen} \keyword{plot} Luminescence/man/analyse_Al2O3C_CrossTalk.Rd0000644000176200001440000000705114521210044020346 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/analyse_Al2O3C_CrossTalk.R \name{analyse_Al2O3C_CrossTalk} \alias{analyse_Al2O3C_CrossTalk} \title{Al2O3:C Reader Cross Talk Analysis} \usage{ analyse_Al2O3C_CrossTalk( object, signal_integral = NULL, dose_points = c(0, 4), recordType = c("OSL (UVVIS)"), irradiation_time_correction = NULL, method_control = NULL, plot = TRUE, ... ) } \arguments{ \item{object}{\linkS4class{RLum.Analysis} \strong{(required)}: measurement input} \item{signal_integral}{\link{numeric} (\emph{optional}): signal integral, used for the signal and the background. If nothing is provided the full range is used} \item{dose_points}{\link{numeric} (\emph{with default}): vector with dose points, if dose points are repeated, only the general pattern needs to be provided. Default values follow the suggestions made by Kreutzer et al., 2018} \item{recordType}{\link{character} (\emph{with default}): input curve selection, which is passed to function \link{get_RLum}. To deactivate the automatic selection set the argument to \code{NULL}} \item{irradiation_time_correction}{\link{numeric} or \linkS4class{RLum.Results} (\emph{optional}): information on the used irradiation time correction obtained by another experiments.} \item{method_control}{\link{list} (\emph{optional}): optional parameters to control the calculation. See details for further explanations} \item{plot}{\link{logical} (\emph{with default}): enable/disable plot output} \item{...}{further arguments that can be passed to the plot output} } \value{ Function returns results numerically and graphically: -----------------------------------\cr \verb{[ NUMERICAL OUTPUT ]}\cr -----------------------------------\cr \strong{\code{RLum.Results}}-object \strong{slot:} \strong{\verb{@data}} \tabular{lll}{ \strong{Element} \tab \strong{Type} \tab \strong{Description}\cr \verb{$data} \tab \code{data.frame} \tab summed apparent dose table \cr \verb{$data_full} \tab \code{data.frame} \tab full apparent dose table \cr \verb{$fit} \tab \code{lm} \tab the linear model obtained from fitting \cr \verb{$col.seq} \tab \code{numeric} \tab the used colour vector \cr } \strong{slot:} \strong{\verb{@info}} The original function call ------------------------\cr \verb{[ PLOT OUTPUT ]}\cr ------------------------\cr \itemize{ \item An overview of the obtained apparent dose values } } \description{ The function provides the analysis of cross-talk measurements on a FI lexsyg SMART reader using Al2O3:C chips } \section{Function version}{ 0.1.3 } \examples{ ##load data data(ExampleData.Al2O3C, envir = environment()) ##run analysis analyse_Al2O3C_CrossTalk(data_CrossTalk) } \section{How to cite}{ Kreutzer, S., 2023. analyse_Al2O3C_CrossTalk(): Al2O3:C Reader Cross Talk Analysis. Function version 0.1.3. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Kreutzer, S., Martin, L., Guérin, G., Tribolo, C., Selva, P., Mercier, N., 2018. Environmental Dose Rate Determination Using a Passive Dosimeter: Techniques and Workflow for alpha-Al2O3:C Chips. Geochronometria 45, 56-67. doi: 10.1515/geochr-2015-0086 } \seealso{ \link{analyse_Al2O3C_ITC} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \keyword{datagen} Luminescence/man/ExampleData.ScaleGammaDose.Rd0000644000176200001440000000122114521210044020704 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Luminescence-package.R \name{ExampleData.ScaleGammaDose} \alias{ExampleData.ScaleGammaDose} \title{Example data for scale_GammaDose()} \format{ A \code{\link{data.frame}}. Please see \code{?scale_GammaDose()} for a detailed description of its structure. } \description{ An example data set for the function \code{scale_GammaDose()} containing layer specific information to scale the gamma dose rate considering variations in soil radioactivity. } \section{Version}{ 0.1 } \examples{ ## Load data data("ExampleData.ScaleGammaDose", envir = environment()) } \keyword{datasets} Luminescence/man/plot_GrowthCurve.Rd0000644000176200001440000003156214521210045017244 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot_GrowthCurve.R \name{plot_GrowthCurve} \alias{plot_GrowthCurve} \title{Fit and plot a dose-response curve for luminescence data (Lx/Tx against dose)} \usage{ plot_GrowthCurve( sample, na.rm = TRUE, mode = "interpolation", fit.method = "EXP", fit.force_through_origin = FALSE, fit.weights = TRUE, fit.includingRepeatedRegPoints = TRUE, fit.NumberRegPoints = NULL, fit.NumberRegPointsReal = NULL, fit.bounds = TRUE, NumberIterations.MC = 100, output.plot = TRUE, output.plotExtended = TRUE, output.plotExtended.single = FALSE, cex.global = 1, txtProgressBar = TRUE, verbose = TRUE, ... ) } \arguments{ \item{sample}{\link{data.frame} (\strong{required}): data frame with three columns for \code{x = Dose},\code{y = LxTx},\code{z = LxTx.Error}, \code{y1 = TnTx}. The column for the test dose response is optional, but requires \code{'TnTx'} as column name if used. For exponential fits at least three dose points (including the natural) should be provided.} \item{na.rm}{\link{logical} (\emph{with default}): excludes \code{NA} values from the data set prior to any further operations. This argument is defunct and will be removed in a future version!} \item{mode}{\link{character} (\emph{with default}): selects calculation mode of the function. \itemize{ \item \code{"interpolation"} (default) calculates the De by interpolation, \item \code{"extrapolation"} calculates the equivalent dose by extrapolation (useful for MAAD measurements) and \item \code{"alternate"} calculates no equivalent dose and just fits the data points. } Please note that for option \code{"regenerative"} the first point is considered as natural dose} \item{fit.method}{\link{character} (\emph{with default}): function used for fitting. Possible options are: \itemize{ \item \code{LIN}, \item \code{QDR}, \item \code{EXP}, \item \verb{EXP OR LIN}, \item \code{EXP+LIN}, \item \code{EXP+EXP}, \item \code{GOK}, \item \code{LambertW} } See details.} \item{fit.force_through_origin}{\link{logical} (\emph{with default}) allow to force the fitted function through the origin. For \code{method = "EXP+EXP"} the function will be fixed through the origin in either case, so this option will have no effect.} \item{fit.weights}{\link{logical} (\emph{with default}): option whether the fitting is done with or without weights. See details.} \item{fit.includingRepeatedRegPoints}{\link{logical} (\emph{with default}): includes repeated points for fitting (\code{TRUE}/\code{FALSE}).} \item{fit.NumberRegPoints}{\link{integer} (\emph{optional}): set number of regeneration points manually. By default the number of all (!) regeneration points is used automatically.} \item{fit.NumberRegPointsReal}{\link{integer} (\emph{optional}): if the number of regeneration points is provided manually, the value of the real, regeneration points = all points (repeated points) including reg 0, has to be inserted.} \item{fit.bounds}{\link{logical} (\emph{with default}): set lower fit bounds for all fitting parameters to 0. Limited for the use with the fit methods \code{EXP}, \code{EXP+LIN}, \verb{EXP OR LIN}, \code{GOK}, \code{LambertW} Argument to be inserted for experimental application only!} \item{NumberIterations.MC}{\link{integer} (\emph{with default}): number of Monte Carlo simulations for error estimation. See details.} \item{output.plot}{\link{logical} (\emph{with default}): plot output (\code{TRUE/FALSE}).} \item{output.plotExtended}{\link{logical} (\emph{with default}): If' \code{TRUE}, 3 plots on one plot area are provided: \enumerate{ \item growth curve, \item histogram from Monte Carlo error simulation and \item a test dose response plot. } If \code{FALSE}, just the growth curve will be plotted. \strong{Requires:} \code{output.plot = TRUE}.} \item{output.plotExtended.single}{\link{logical} (\emph{with default}): single plot output (\code{TRUE/FALSE}) to allow for plotting the results in single plot windows. Requires \code{output.plot = TRUE} and \code{output.plotExtended = TRUE}.} \item{cex.global}{\link{numeric} (\emph{with default}): global scaling factor.} \item{txtProgressBar}{\link{logical} (\emph{with default}): enables or disables \code{txtProgressBar}. If \code{verbose = FALSE} also no \code{txtProgressBar} is shown.} \item{verbose}{\link{logical} (\emph{with default}): enables or disables terminal feedback.} \item{...}{Further arguments and graphical parameters to be passed. Note: Standard arguments will only be passed to the growth curve plot. Supported: \code{xlim}, \code{ylim}, \code{main}, \code{xlab}, \code{ylab}} } \value{ Along with a plot (so far wanted) an \code{RLum.Results} object is returned containing, the slot \code{data} contains the following elements: \tabular{lll}{ \strong{DATA.OBJECT} \tab \strong{TYPE} \tab \strong{DESCRIPTION} \cr \code{..$De} : \tab \code{data.frame} \tab Table with De values \cr \code{..$De.MC} : \tab \code{numeric} \tab Table with De values from MC runs \cr \code{..$Fit} : \tab \link{nls} or \link{lm} \tab object from the fitting for \code{EXP}, \code{EXP+LIN} and \code{EXP+EXP}. In case of a resulting linear fit when using \code{LIN}, \code{QDR} or \verb{EXP OR LIN} \cr \code{..$Formula} : \tab \link{expression} \tab Fitting formula as R expression \cr \code{..$call} : \tab \code{call} \tab The original function call\cr } } \description{ A dose-response curve is produced for luminescence measurements using a regenerative or additive protocol. The function supports interpolation and extrapolation to calculate the equivalent dose. } \details{ \strong{Fitting methods} For all options (except for the \code{LIN}, \code{QDR} and the \verb{EXP OR LIN}), the \link[minpack.lm:nlsLM]{minpack.lm::nlsLM} function with the \code{LM} (Levenberg-Marquardt algorithm) algorithm is used. Note: For historical reasons for the Monte Carlo simulations partly the function \link{nls} using the \code{port} algorithm. The solution is found by transforming the function or using \link{uniroot}. \code{LIN}: fits a linear function to the data using \link{lm}: \deqn{y = mx + n} \code{QDR}: fits a linear function to the data using \link{lm}: \deqn{y = a + bx + cx^2} \code{EXP}: tries to fit a function of the form \deqn{y = a(1 - exp(-\frac{(x+c)}{b}))} Parameters b and c are approximated by a linear fit using \link{lm}. Note: b = D0 \verb{EXP OR LIN}: works for some cases where an \code{EXP} fit fails. If the \code{EXP} fit fails, a \code{LIN} fit is done instead. \code{EXP+LIN}: tries to fit an exponential plus linear function of the form: \deqn{y = a(1-exp(-\frac{x+c}{b}) + (gx))} The \eqn{D_e} is calculated by iteration. \strong{Note:} In the context of luminescence dating, this function has no physical meaning. Therefore, no D0 value is returned. \code{EXP+EXP}: tries to fit a double exponential function of the form \deqn{y = (a_1 (1-exp(-\frac{x}{b_1}))) + (a_2 (1 - exp(-\frac{x}{b_2})))} This fitting procedure is not robust against wrong start parameters and should be further improved. \code{GOK}: tries to fit the general-order kinetics function after Guralnik et al. (2015) of the form of \deqn{y = a (d - (1 + (\frac{1}{b}) x c)^{(-1/c)})} where \strong{c > 0} is a kinetic order modifier (not to be confused with \strong{c} in \code{EXP} or \code{EXP+LIN}!). \code{LambertW}: tries to fit a dose-response curve based on the Lambert W function according to Pagonis et al. (2020). The function has the form \deqn{y ~ (1 + (W((R - 1) * exp(R - 1 - ((x + D_{int}) / D_{c}))) / (1 - R))) * N} with \eqn{W} the Lambert W function, calculated using the package \link[lamW:lamW]{lamW::lambertW0}, \eqn{R} the dimensionless retrapping ratio, \eqn{N} the total concentration of trappings states in cm^-3 and \eqn{D_{c} = N/R} a constant. \eqn{D_{int}} is the offset on the x-axis. Please not that finding the root in \code{mode = "extrapolation"} is a non-easy task due to the shape of the function and the results might be unexpected. \strong{Fit weighting} If the option \code{fit.weights = TRUE} is chosen, weights are calculated using provided signal errors (Lx/Tx error): \deqn{fit.weights = \frac{\frac{1}{error}}{\Sigma{\frac{1}{error}}}} \strong{Error estimation using Monte Carlo simulation} Error estimation is done using a parametric bootstrapping approach. A set of \code{Lx/Tx} values is constructed by randomly drawing curve data sampled from normal distributions. The normal distribution is defined by the input values (\code{mean = value}, \code{sd = value.error}). Then, a dose-response curve fit is attempted for each dataset resulting in a new distribution of single \code{De} values. The standard deviation of this distribution is becomes then the error of the \code{De}. With increasing iterations, the error value becomes more stable. However, naturally the error will not decrease with more MC runs. Alternatively, the function returns highest probability density interval estimates as output, users may find more useful under certain circumstances. \strong{Note:} It may take some calculation time with increasing MC runs, especially for the composed functions (\code{EXP+LIN} and \code{EXP+EXP}).\cr Each error estimation is done with the function of the chosen fitting method. \strong{Subtitle information} To avoid plotting the subtitle information, provide an empty user \code{mtext} \code{mtext = ""}. To plot any other subtitle text, use \code{mtext}. } \section{Function version}{ 1.11.10 } \examples{ ##(1) plot growth curve for a dummy data.set and show De value data(ExampleData.LxTxData, envir = environment()) temp <- plot_GrowthCurve(LxTxData) get_RLum(temp) ##(1b) horizontal plot arrangement layout(mat = matrix(c(1,1,2,3), ncol = 2)) plot_GrowthCurve(LxTxData, output.plotExtended.single = TRUE) ##(1c) to access the fitting value try get_RLum(temp, data.object = "Fit") ##(2) plot the growth curve only - uncomment to use ##pdf(file = "~/Desktop/Growth_Curve_Dummy.pdf", paper = "special") plot_GrowthCurve(LxTxData) ##dev.off() ##(3) plot growth curve with pdf output - uncomment to use, single output ##pdf(file = "~/Desktop/Growth_Curve_Dummy.pdf", paper = "special") plot_GrowthCurve(LxTxData, output.plotExtended.single = TRUE) ##dev.off() ##(4) plot resulting function for given intervall x x <- seq(1,10000, by = 100) plot( x = x, y = eval(temp$Formula), type = "l" ) ##(5) plot using the 'extrapolation' mode LxTxData[1,2:3] <- c(0.5, 0.001) print(plot_GrowthCurve(LxTxData,mode = "extrapolation")) ##(6) plot using the 'alternate' mode LxTxData[1,2:3] <- c(0.5, 0.001) print(plot_GrowthCurve(LxTxData,mode = "alternate")) ##(7) import and fit test data set by Berger & Huntley 1989 QNL84_2_unbleached <- read.table(system.file("extdata/QNL84_2_unbleached.txt", package = "Luminescence")) results <- plot_GrowthCurve( QNL84_2_unbleached, mode = "extrapolation", plot = FALSE, verbose = FALSE) #calculate confidence interval for the parameters #as alternative error estimation confint(results$Fit, level = 0.68) \dontrun{ QNL84_2_bleached <- read.table(system.file("extdata/QNL84_2_bleached.txt", package = "Luminescence")) STRB87_1_unbleached <- read.table(system.file("extdata/STRB87_1_unbleached.txt", package = "Luminescence")) STRB87_1_bleached <- read.table(system.file("extdata/STRB87_1_bleached.txt", package = "Luminescence")) print( plot_GrowthCurve( QNL84_2_bleached, mode = "alternate", plot = FALSE, verbose = FALSE)$Fit) print( plot_GrowthCurve( STRB87_1_unbleached, mode = "alternate", plot = FALSE, verbose = FALSE)$Fit) print( plot_GrowthCurve( STRB87_1_bleached, mode = "alternate", plot = FALSE, verbose = FALSE)$Fit) } } \section{How to cite}{ Kreutzer, S., Dietze, M., 2023. plot_GrowthCurve(): Fit and plot a dose-response curve for luminescence data (Lx/Tx against dose). Function version 1.11.10. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Berger, G.W., Huntley, D.J., 1989. Test data for exponential fits. Ancient TL 7, 43-46. Guralnik, B., Li, B., Jain, M., Chen, R., Paris, R.B., Murray, A.S., Li, S.-H., Pagonis, P., Herman, F., 2015. Radiation-induced growth and isothermal decay of infrared-stimulated luminescence from feldspar. Radiation Measurements 81, 224-231. Pagonis, V., Kitis, G., Chen, R., 2020. A new analytical equation for the dose response of dosimetric materials, based on the Lambert W function. Journal of Luminescence 225, 117333. \doi{10.1016/j.jlumin.2020.117333} } \seealso{ \link{nls}, \linkS4class{RLum.Results}, \link{get_RLum}, \link[minpack.lm:nlsLM]{minpack.lm::nlsLM}, \link{lm}, \link{uniroot}, \link[lamW:lamW]{lamW::lambertW0} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany)\cr Michael Dietze, GFZ Potsdam (Germany) , RLum Developer Team} Luminescence/man/convert_Daybreak2CSV.Rd0000644000176200001440000000376114521210045017647 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/convert_Daybreak2CSV.R \name{convert_Daybreak2CSV} \alias{convert_Daybreak2CSV} \title{Export measurement data produced by a Daybreak luminescence reader to CSV-files} \usage{ convert_Daybreak2CSV(file, ...) } \arguments{ \item{file}{\link{character} (\strong{required}): name of the Daybreak-file (TXT-file, DAT-file) to be converted to CSV-files} \item{...}{further arguments that will be passed to the function \link{read_Daybreak2R} and \link{write_RLum2CSV}} } \value{ The function returns either a CSV-file (or many of them) or for the option \code{export = FALSE} a list comprising objects of type \link{data.frame} and \link{matrix} } \description{ This function is a wrapper function around the functions \link{read_Daybreak2R} and \link{write_RLum2CSV} and it imports an Daybreak-file (TXT-file, DAT-file) and directly exports its content to CSV-files. If nothing is set for the argument \code{path} (\link{write_RLum2CSV}) the input folder will become the output folder. } \section{Function version}{ 0.1.0 } \examples{ \dontrun{ ##select your BIN-file file <- file.choose() ##convert convert_Daybreak2CSV(file) } } \seealso{ \linkS4class{RLum.Analysis}, \linkS4class{RLum.Data}, \linkS4class{RLum.Results}, \link[utils:write.table]{utils::write.table}, \link{write_RLum2CSV}, \link{read_Daybreak2R} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. convert_Daybreak2CSV(): Export measurement data produced by a Daybreak luminescence reader to CSV-files. Function version 0.1.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{IO} Luminescence/man/analyse_pIRIRSequence.Rd0000644000176200001440000002012014521210044020044 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/analyse_pIRIRSequence.R \name{analyse_pIRIRSequence} \alias{analyse_pIRIRSequence} \title{Analyse post-IR IRSL measurement sequences} \usage{ analyse_pIRIRSequence( object, signal.integral.min, signal.integral.max, background.integral.min, background.integral.max, dose.points = NULL, sequence.structure = c("TL", "IR50", "pIRIR225"), plot = TRUE, plot.single = FALSE, ... ) } \arguments{ \item{object}{\linkS4class{RLum.Analysis} or \link{list} of \linkS4class{RLum.Analysis} objects (\strong{required}): input object containing data for analysis. If a \link{list} is provided the functions tries to iterate over the list.} \item{signal.integral.min}{\link{integer} (\strong{required}): lower bound of the signal integral. Provide this value as vector for different integration limits for the different IRSL curves.} \item{signal.integral.max}{\link{integer} (\strong{required}): upper bound of the signal integral. Provide this value as vector for different integration limits for the different IRSL curves.} \item{background.integral.min}{\link{integer} (\strong{required}): lower bound of the background integral. Provide this value as vector for different integration limits for the different IRSL curves.} \item{background.integral.max}{\link{integer} (\strong{required}): upper bound of the background integral. Provide this value as vector for different integration limits for the different IRSL curves.} \item{dose.points}{\link{numeric} (\emph{optional}): a numeric vector containing the dose points values. Using this argument overwrites dose point values in the signal curves.} \item{sequence.structure}{\link{vector} \link{character} (\emph{with default}): specifies the general sequence structure. Allowed values are \code{"TL"} and any \code{"IR"} combination (e.g., \code{"IR50"},\code{"pIRIR225"}). Additionally a parameter \code{"EXCLUDE"} is allowed to exclude curves from the analysis (Note: If a preheat without PMT measurement is used, i.e. preheat as none TL, remove the TL step.)} \item{plot}{\link{logical} (\emph{with default}): enables or disables plot output.} \item{plot.single}{\link{logical} (\emph{with default}): single plot output (\code{TRUE/FALSE}) to allow for plotting the results in single plot windows. Requires \code{plot = TRUE}.} \item{...}{further arguments that will be passed to the function \link{analyse_SAR.CWOSL} and \link{plot_GrowthCurve}. Furthermore, the arguments \code{main} (headers), \code{log} (IRSL curves), \code{cex} (control the size) and \code{mtext.outer} (additional text on the plot area) can be passed to influence the plotting. If the input is list, \code{main} can be passed as \link{vector} or \link{list}.} } \value{ Plots (\emph{optional}) and an \linkS4class{RLum.Results} object is returned containing the following elements: \tabular{lll}{ \strong{DATA.OBJECT} \tab \strong{TYPE} \tab \strong{DESCRIPTION} \cr \code{..$data} : \tab \code{data.frame} \tab Table with De values \cr \code{..$LnLxTnTx.table} : \tab \code{data.frame} \tab with the \code{LnLxTnTx} values \cr \code{..$rejection.criteria} : \tab \link{data.frame} \tab rejection criteria \cr \code{..$Formula} : \tab \link{list} \tab Function used for fitting of the dose response curve \cr \code{..$call} : \tab \link{call} \tab the original function call } The output should be accessed using the function \link{get_RLum}. } \description{ The function performs an analysis of post-IR IRSL sequences including curve fitting on \linkS4class{RLum.Analysis} objects. } \details{ To allow post-IR IRSL protocol (Thomsen et al., 2008) measurement analyses this function has been written as extended wrapper function for the function \link{analyse_SAR.CWOSL}, facilitating an entire sequence analysis in one run. With this, its functionality is strictly limited by the functionality of the function \link{analyse_SAR.CWOSL}. \strong{Defining the sequence structure} The argument \code{sequence.structure} expects a shortened pattern of your sequence structure and was mainly introduced to ease the use of the function. For example: If your measurement data contains the following curves: \code{TL}, \code{IRSL}, \code{IRSL}, \code{TL}, \code{IRSL}, \code{IRSL}, the sequence pattern in \code{sequence.structure} becomes \code{c('TL', 'IRSL', 'IRSL')}. The second part of your sequence for one cycle should be similar and can be discarded. If this is not the case (e.g., additional hotbleach) such curves have to be removed before using the function. \strong{If the input is a \code{list}} If the input is a list of RLum.Analysis-objects, every argument can be provided as list to allow for different sets of parameters for every single input element. For further information see \link{analyse_SAR.CWOSL}. } \note{ Best graphical output can be achieved by using the function \code{pdf} with the following options: \code{pdf(file = "", height = 15, width = 15)} } \section{Function version}{ 0.2.4 } \examples{ ### NOTE: For this example existing example data are used. These data are non pIRIR data. ### ##(1) Compile example data set based on existing example data (SAR quartz measurement) ##(a) Load example data data(ExampleData.BINfileData, envir = environment()) ##(b) Transform the values from the first position in a RLum.Analysis object object <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos=1) ##(c) Grep curves and exclude the last two (one TL and one IRSL) object <- get_RLum(object, record.id = c(-29,-30)) ##(d) Define new sequence structure and set new RLum.Analysis object sequence.structure <- c(1,2,2,3,4,4) sequence.structure <- as.vector(sapply(seq(0,length(object)-1,by = 4), function(x){sequence.structure + x})) object <- sapply(1:length(sequence.structure), function(x){ object[[sequence.structure[x]]] }) object <- set_RLum(class = "RLum.Analysis", records = object, protocol = "pIRIR") ##(2) Perform pIRIR analysis (for this example with quartz OSL data!) ## Note: output as single plots to avoid problems with this example results <- analyse_pIRIRSequence(object, signal.integral.min = 1, signal.integral.max = 2, background.integral.min = 900, background.integral.max = 1000, fit.method = "EXP", sequence.structure = c("TL", "pseudoIRSL1", "pseudoIRSL2"), main = "Pseudo pIRIR data set based on quartz OSL", plot.single = TRUE) ##(3) Perform pIRIR analysis (for this example with quartz OSL data!) ## Alternative for PDF output, uncomment and complete for usage \dontrun{ tempfile <- tempfile(fileext = ".pdf") pdf(file = tempfile, height = 15, width = 15) results <- analyse_pIRIRSequence(object, signal.integral.min = 1, signal.integral.max = 2, background.integral.min = 900, background.integral.max = 1000, fit.method = "EXP", main = "Pseudo pIRIR data set based on quartz OSL") dev.off() } } \section{How to cite}{ Kreutzer, S., 2023. analyse_pIRIRSequence(): Analyse post-IR IRSL measurement sequences. Function version 0.2.4. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Murray, A.S., Wintle, A.G., 2000. Luminescence dating of quartz using an improved single-aliquot regenerative-dose protocol. Radiation Measurements 32, 57-73. \doi{10.1016/S1350-4487(99)00253-X} Thomsen, K.J., Murray, A.S., Jain, M., Boetter-Jensen, L., 2008. Laboratory fading rates of various luminescence signals from feldspar-rich sediment extracts. Radiation Measurements 43, 1474-1486. \doi{10.1016/j.radmeas.2008.06.002} } \seealso{ \link{analyse_SAR.CWOSL}, \link{calc_OSLLxTxRatio}, \link{plot_GrowthCurve}, \linkS4class{RLum.Analysis}, \linkS4class{RLum.Results} \link{get_RLum} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \keyword{datagen} \keyword{plot} Luminescence/man/ExampleData.RLum.Analysis.Rd0000644000176200001440000000250014521210044020541 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Luminescence-package.R \name{ExampleData.RLum.Analysis} \alias{ExampleData.RLum.Analysis} \alias{IRSAR.RF.Data} \title{Example data as \linkS4class{RLum.Analysis} objects} \format{ \code{IRSAR.RF.Data}: IRSAR.RF.Data on coarse grain feldspar Each object contains data needed for the given protocol analysis. } \source{ \strong{IRSAR.RF.Data} These data were kindly provided by Tobias Lauer and Matthias Krbetschek. \tabular{ll}{ Lab: \tab Luminescence Laboratory TU Bergakademie Freiberg\cr Lab-Code: \tab ZEU/SA1\cr Location: \tab Zeuchfeld (Zeuchfeld Sandur; Saxony-Anhalt/Germany)\cr Material: \tab K-feldspar (130-200 \eqn{\mu}m)\cr Reference: \tab Kreutzer et al. (2014) } } \description{ Collection of different \linkS4class{RLum.Analysis} objects for protocol analysis. } \section{Version}{ 0.1 } \examples{ ##load data data(ExampleData.RLum.Analysis, envir = environment()) ##plot data plot_RLum(IRSAR.RF.Data) } \references{ \strong{IRSAR.RF.Data} Kreutzer, S., Lauer, T., Meszner, S., Krbetschek, M.R., Faust, D., Fuchs, M., 2014. Chronology of the Quaternary profile Zeuchfeld in Saxony-Anhalt / Germany - a preliminary luminescence dating study. Zeitschrift fuer Geomorphologie 58, 5-26. doi: 10.1127/0372-8854/2012/S-00112 } \keyword{datasets} Luminescence/man/fit_EmissionSpectra.Rd0000644000176200001440000001671514521210045017704 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fit_EmissionSpectra.R \name{fit_EmissionSpectra} \alias{fit_EmissionSpectra} \title{Luminescence Emission Spectra Deconvolution} \usage{ fit_EmissionSpectra( object, frame = NULL, n_components = NULL, start_parameters = NULL, sub_negative = 0, input_scale = NULL, method_control = list(), verbose = TRUE, plot = TRUE, ... ) } \arguments{ \item{object}{\linkS4class{RLum.Data.Spectrum}, \link{matrix} (\strong{required}): input object. Please note that an energy spectrum is expected} \item{frame}{\link{numeric} (\emph{optional}): defines the frame to be analysed} \item{n_components}{\link{numeric} (\emph{optional}): allows a number of the aimed number of components. However, it defines rather a maximum than than a minimum. Can be combined with other parameters.} \item{start_parameters}{\link{numeric} (\emph{optional}): allows to provide own start parameters for a semi-automated procedure. Parameters need to be provided in eV. Every value provided replaces a value from the automated peak finding algorithm (in ascending order).} \item{sub_negative}{\link{numeric} (\emph{with default}): substitute negative values in the input object by the number provided here (default: \code{0}). Can be set to \code{NULL}, i.e. negative values are kept.} \item{input_scale}{\link{character} (\emph{optional}): defines whether your x-values define wavelength or energy values. For the analysis an energy scale is expected, allowed values are \code{'wavelength'} and \code{'energy'}. If nothing (\code{NULL}) is defined, the function tries to understand the input automatically.} \item{method_control}{\link{list} (\emph{optional}): options to control the fit method, see details} \item{verbose}{\link{logical} (\emph{with default}): enable/disable verbose mode} \item{plot}{\link{logical} (\emph{with default}): enable/disable plot output} \item{...}{further arguments to be passed to control the plot output (supported: \code{main}, \code{xlab}, \code{ylab}, \code{xlim}, \code{ylim}, \code{log}, \code{mtext}, \code{legend} (\code{TRUE} or \code{FALSE}), \code{legend.text}, \code{legend.pos})} } \value{ -----------------------------------\cr \verb{[ NUMERICAL OUTPUT ]}\cr -----------------------------------\cr \strong{\code{RLum.Results}}-object \strong{slot:} \strong{\verb{@data}} \tabular{lll}{ \strong{Element} \tab \strong{Type} \tab \strong{Description}\cr \verb{$data} \tab \code{matrix} \tab the final fit matrix \cr \verb{$fit} \tab \code{nls} \tab the fit object returned by \link[minpack.lm:nls.lm]{minpack.lm::nls.lm} \cr \verb{$fit_info} \tab \code{list} \tab a few additional parameters that can be used to asses the quality of the fit } \strong{slot:} \strong{\verb{@info}} The original function call ---------------------------------\cr \verb{[ TERMINAL OUTPUT ]} \cr ---------------------------------\cr The terminal output provides brief information on the deconvolution process and the obtained results. Terminal output is only shown of the argument \code{verbose = TRUE}. ---------------------------\cr \verb{[ PLOT OUTPUT ]} \cr ---------------------------\cr The function returns a plot showing the raw signal with the detected components. If the fitting failed, a basic plot is returned showing the raw data and indicating the peaks detected for the start parameter estimation. The grey band in the residual plot indicates the 10\% deviation from 0 (means no residual). } \description{ Luminescence spectra deconvolution on \linkS4class{RLum.Data.Spectrum} and \link{matrix} objects on an \strong{energy scale}. The function is optimised for emission spectra typically obtained in the context of TL, OSL and RF measurements detected between 200 and 1000 nm. The function is not prepared to deconvolve TL curves (counts against temperature; no wavelength scale). If you are interested in such analysis, please check, e.g., the package \code{'tgcd'}. } \details{ \strong{Used equation} The emission spectra (on an energy scale) can be best described as the sum of multiple Gaussian components: '\deqn{ y = \Sigma Ci * 1/(\sigma_{i} * \sqrt(2 * \pi)) * exp(-1/2 * ((x - \mu_{i})/\sigma_{i}))^2) } with the parameters \eqn{\sigma} (peak width) and \eqn{\mu} (peak centre) and \eqn{C} (scaling factor). \strong{Start parameter estimation and fitting algorithm} The spectrum deconvolution consists of the following steps: \enumerate{ \item Peak finding \cr \item Start parameter estimation \cr \item Fitting via \link[minpack.lm:nls.lm]{minpack.lm::nls.lm}\cr } The peak finding is realised by an approach (re-)suggested by Petr Pikal via the R-help mailing list (\verb{https://stat.ethz.ch/pipermail/r-help/2005-November/thread.html}) in November 2005. This goes back to even earlier discussion in 2001 based on Prof Brian Ripley's idea. It smartly uses the functions \link[stats:embed]{stats::embed} and \link{max.col} to identify peaks positions. For the use in this context, the algorithm has been further modified to scale on the input data resolution (cf. source code).\cr The start parameter estimation uses random sampling from a range of meaningful parameters and repeats the fitting until 1000 successful fits have been produced or the set \code{max.runs} value is exceeded. Currently the best fit is the one with the lowest number for squared residuals, but other parameters are returned as well. If a series of curves needs to be analysed, it is recommended to make few trial runs, then fix the number of components and run at least 10,000 iterations (parameter \code{method_control = list(max.runs = 10000)}). \strong{Supported \code{method_control} settings} \tabular{llll}{ \strong{Parameter} \tab \strong{Type} \tab \strong{Default} \tab \strong{Description}\cr \code{max.runs} \tab \link{integer} \tab \code{10000} \tab maximum allowed search iterations, if exceed the searching stops \cr \code{graining} \tab \link{numeric} \tab \code{15} \tab gives control over how coarse or fine the spectrum is split into search intervals for the peak finding algorithm \cr \code{norm} \tab \link{logical} \tab \code{TRUE} \tab normalises data to the highest count value before fitting \cr \code{trace} \tab \link{logical} \tab \code{FALSE} \tab enables/disables the tracing of the minimisation routine } } \section{Function version}{ 0.1.1 } \examples{ ##load example data data(ExampleData.XSYG, envir = environment()) ##subtract background TL.Spectrum@data <- TL.Spectrum@data[] - TL.Spectrum@data[,15] results <- fit_EmissionSpectra( object = TL.Spectrum, frame = 5, method_control = list(max.runs = 10) ) ##deconvolution of a TL spectrum \dontrun{ ##load example data ##replace 0 values results <- fit_EmissionSpectra( object = TL.Spectrum, frame = 5, main = "TL spectrum" ) } } \seealso{ \linkS4class{RLum.Data.Spectrum}, \linkS4class{RLum.Results}, \link{plot_RLum}, \link{convert_Wavelength2Energy}, \link[minpack.lm:nls.lm]{minpack.lm::nls.lm} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. fit_EmissionSpectra(): Luminescence Emission Spectra Deconvolution. Function version 0.1.1. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{datagen} Luminescence/man/calc_OSLLxTxDecomposed.Rd0000644000176200001440000000635214521210045020170 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calc_OSLLxTxDecomposed.R \name{calc_OSLLxTxDecomposed} \alias{calc_OSLLxTxDecomposed} \title{Calculate Lx/Tx ratio for decomposed CW-OSL signal components} \usage{ calc_OSLLxTxDecomposed( Lx.data, Tx.data = NULL, OSL.component = 1L, sig0 = 0, digits = NULL ) } \arguments{ \item{Lx.data}{\link{data.frame} (\strong{required}): Component table created by \verb{[OSLdecomposition::RLum.OSL_decomposition]} and per default located at \code{object@records[[...]]@info$COMPONENTS}.The value of \verb{$n[OSL.component]} is set as \code{LnLx}. The value of \verb{$n.error[OSL.component]} is set as \code{LnLx.error}} \item{Tx.data}{\link{data.frame} (\emph{optional}): Component table created by \verb{[OSLdecomposition::RLum.OSL_decomposition]} and per default located at \code{object@records[[...]]@info$COMPONENTS}. The value of \verb{$n[OSL.component]} is set as \code{TnTx}. The value of \verb{$n.error[OSL.component]} is set as \code{TnTx.error}} \item{OSL.component}{\link{integer} or \link{character} (\emph{optional}): a single index or a name describing which OSL signal component shall be evaluated. This argument can either be the name of the OSL component assigned by \verb{[OSLdecomposition::RLum.OSL_global_fitting]} or the index of component. Then \code{'1'} selects the fastest decaying component, \code{'2'} the second fastest and so on. If not defined, the fastest decaying component is selected.} \item{sig0}{\link{numeric} (\emph{with default}): allows adding an extra error component to the final \code{Lx/Tx} error value (e.g., instrumental error).} \item{digits}{\link{integer} (\emph{with default}): round numbers to the specified digits. If digits is set to \code{NULL} nothing is rounded.} } \value{ Returns an S4 object of type \linkS4class{RLum.Results}. Slot \code{data} contains a \link{list} with the following structure: \strong{@data} \if{html}{\out{
}}\preformatted{$LxTx.table (data.frame) .. $ LnLx .. $ TnTx .. $ Net_LnLx .. $ Net_LnLx.Error .. $ Net_TnTx .. $ Net_TnTx.Error .. $ LxTx .. $ LxTx.relError .. $ LxTx.Error }\if{html}{\out{
}} } \description{ Calculate \code{Lx/Tx} ratios from a given set of decomposed CW-OSL curves decomposed by \verb{[OSLdecomposition::RLum.OSL_decomposition]} } \section{Function version}{ 0.1.0 } \section{How to cite}{ Mittelstrass, D., 2023. calc_OSLLxTxDecomposed(): Calculate Lx/Tx ratio for decomposed CW-OSL signal components. Function version 0.1.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Mittelstrass D., Schmidt C., Beyer J., Straessner A., 2019. Automated identification and separation of quartz CW-OSL signal components with R. talk presented at DLED 2019, Bingen, Germany \url{http://luminescence.de/OSLdecomp_talk.pdf}\cr } \seealso{ \linkS4class{RLum.Data.Curve}, \link{plot_GrowthCurve}, \link{analyse_SAR.CWOSL} } \author{ Dirk Mittelstrass , RLum Developer Team} \keyword{datagen} Luminescence/man/Analyse_SAR.OSLdata.Rd0000644000176200001440000001527114521210044017314 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Analyse_SAR.OSLdata.R \name{Analyse_SAR.OSLdata} \alias{Analyse_SAR.OSLdata} \title{Analyse SAR CW-OSL measurements.} \usage{ Analyse_SAR.OSLdata( input.data, signal.integral, background.integral, position, run, set, dtype, keep.SEL = FALSE, info.measurement = "unkown measurement", output.plot = FALSE, output.plot.single = FALSE, cex.global = 1, ... ) } \arguments{ \item{input.data}{\linkS4class{Risoe.BINfileData} (\strong{required}): input data from a Risø BIN file, produced by the function \link{read_BIN2R}.} \item{signal.integral}{\link{vector} (\strong{required}): channels used for the signal integral, e.g. \code{signal.integral=c(1:2)}} \item{background.integral}{\link{vector} (\strong{required}): channels used for the background integral, e.g. \code{background.integral=c(85:100)}} \item{position}{\link{vector} (\emph{optional}): reader positions that want to be analysed (e.g. \code{position=c(1:48)}. Empty positions are automatically omitted. If no value is given all positions are analysed by default.} \item{run}{\link{vector} (\emph{optional}): range of runs used for the analysis. If no value is given the range of the runs in the sequence is deduced from the \code{Risoe.BINfileData} object.} \item{set}{\link{vector} (\emph{optional}): range of sets used for the analysis. If no value is given the range of the sets in the sequence is deduced from the \code{Risoe.BINfileData} object.} \item{dtype}{\link{character} (\emph{optional}): allows to further limit the curves by their data type (\code{DTYPE}), e.g., \code{dtype = c("Natural", "Dose")} limits the curves to this two data types. By default all values are allowed. See \linkS4class{Risoe.BINfileData} for allowed data types.} \item{keep.SEL}{\link{logical} (default): option allowing to use the \code{SEL} element of the \linkS4class{Risoe.BINfileData} manually. \strong{NOTE:} In this case any limitation provided by \code{run}, \code{set} and \code{dtype} are ignored!} \item{info.measurement}{\link{character} (\emph{with default}): option to provide information about the measurement on the plot output (e.g. name of the BIN or BINX file).} \item{output.plot}{\link{logical} (\emph{with default}): plot output (\code{TRUE/FALSE})} \item{output.plot.single}{\link{logical} (\emph{with default}): single plot output (\code{TRUE/FALSE}) to allow for plotting the results in single plot windows. Requires \code{output.plot = TRUE}.} \item{cex.global}{\link{numeric} (\emph{with default}): global scaling factor.} \item{...}{further arguments that will be passed to the function \link{calc_OSLLxTxRatio} (supported: \code{background.count.distribution}, \code{sigmab}, \code{sig0}; e.g., for instrumental error) and can be used to adjust the plot. Supported" \code{mtext}, \code{log}} } \value{ A plot (\emph{optional}) and \link{list} is returned containing the following elements: \item{LnLxTnTx}{\link{data.frame} of all calculated Lx/Tx values including signal, background counts and the dose points.} \item{RejectionCriteria}{\link{data.frame} with values that might by used as rejection criteria. NA is produced if no R0 dose point exists.} \item{SARParameters}{\link{data.frame} of additional measurement parameters obtained from the BIN file, e.g. preheat or read temperature (not valid for all types of measurements).} } \description{ The function analyses SAR CW-OSL curve data and provides a summary of the measured data for every position. The output of the function is optimised for SAR OSL measurements on quartz. } \details{ The function works only for standard SAR protocol measurements introduced by Murray and Wintle (2000) with CW-OSL curves. For the calculation of the Lx/Tx value the function \link{calc_OSLLxTxRatio} is used. \strong{Provided rejection criteria} \verb{[recyling ratio]}: calculated for every repeated regeneration dose point. \verb{[recuperation]}: recuperation rate calculated by comparing the \code{Lx/Tx} values of the zero regeneration point with the \code{Ln/Tn} value (the \code{Lx/Tx} ratio of the natural signal). For methodological background see Aitken and Smith (1988) \verb{[IRSL/BOSL]}: the integrated counts (\code{signal.integral}) of an IRSL curve are compared to the integrated counts of the first regenerated dose point. It is assumed that IRSL curves got the same dose as the first regenerated dose point. \strong{Note:} This is not the IR depletion ratio described by Duller (2003). } \note{ Rejection criteria are calculated but not considered during the analysis to discard values. \strong{The analysis of IRSL data is not directly supported}. You may want to consider using the functions \link{analyse_SAR.CWOSL} or \link{analyse_pIRIRSequence} instead. \strong{The development of this function will not be continued. We recommend to use the function \link{analyse_SAR.CWOSL} or instead.} } \section{Function version}{ 0.2.17 } \examples{ ##load data data(ExampleData.BINfileData, envir = environment()) ##analyse data output <- Analyse_SAR.OSLdata(input.data = CWOSL.SAR.Data, signal.integral = c(1:5), background.integral = c(900:1000), position = c(1:1), output.plot = TRUE) ##combine results relevant for further analysis output.SAR <- data.frame(Dose = output$LnLxTnTx[[1]]$Dose, LxTx = output$LnLxTnTx[[1]]$LxTx, LxTx.Error = output$LnLxTnTx[[1]]$LxTx.Error) output.SAR } \section{How to cite}{ Kreutzer, S., Fuchs, M.C., 2023. Analyse_SAR.OSLdata(): Analyse SAR CW-OSL measurements.. Function version 0.2.17. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Aitken, M.J. and Smith, B.W., 1988. Optical dating: recuperation after bleaching. Quaternary Science Reviews 7, 387-393. Duller, G., 2003. Distinguishing quartz and feldspar in single grain luminescence measurements. Radiation Measurements, 37 (2), 161-165. Murray, A.S. and Wintle, A.G., 2000. Luminescence dating of quartz using an improved single-aliquot regenerative-dose protocol. Radiation Measurements 32, 57-73. } \seealso{ \link{calc_OSLLxTxRatio}, \linkS4class{Risoe.BINfileData}, \link{read_BIN2R}, \link{plot_GrowthCurve} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany)\cr Margret C. Fuchs, HZDR, Freiberg (Germany) , RLum Developer Team} \keyword{datagen} \keyword{dplot} Luminescence/man/verify_SingleGrainData.Rd0000644000176200001440000001315014521210045020300 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/verify_SingleGrainData.R \name{verify_SingleGrainData} \alias{verify_SingleGrainData} \title{Verify single grain data sets and check for invalid grains, i.e. zero-light level grains} \usage{ verify_SingleGrainData( object, threshold = 10, cleanup = FALSE, cleanup_level = "aliquot", verbose = TRUE, plot = FALSE ) } \arguments{ \item{object}{\linkS4class{Risoe.BINfileData} or \linkS4class{RLum.Analysis} (\strong{required}): input object. The function also accepts a list with objects of allowed type.} \item{threshold}{\link{numeric} (\emph{with default}): numeric threshold value for the allowed difference between the \code{mean} and the \code{var} of the count values (see details)} \item{cleanup}{\link{logical} (\emph{with default}): if set to \code{TRUE} curves identified as zero light level curves are automatically removed. Output is an object as same type as the input, i.e. either \linkS4class{Risoe.BINfileData} or \linkS4class{RLum.Analysis}} \item{cleanup_level}{\link{character} (\emph{with default}): selects the level for the cleanup of the input data sets. Two options are allowed: \code{"curve"} or \code{"aliquot"}: \itemize{ \item If \code{"curve"} is selected every single curve marked as \code{invalid} is removed. \item If \code{"aliquot"} is selected, curves of one aliquot (grain or disc) can be marked as invalid, but will not be removed. An aliquot will be only removed if all curves of this aliquot are marked as invalid. }} \item{verbose}{\link{logical} (\emph{with default}): enables or disables the terminal feedback} \item{plot}{\link{logical} (\emph{with default}): enables or disables the graphical feedback} } \value{ The function returns -----------------------------------\cr \verb{[ NUMERICAL OUTPUT ]}\cr -----------------------------------\cr \strong{\code{RLum.Results}}-object \strong{slot:****\verb{@data}} \tabular{lll}{ \strong{Element} \tab \strong{Type} \tab \strong{Description}\cr \verb{$unique_pairs} \tab \code{data.frame} \tab the unique position and grain pairs \cr \verb{$selection_id} \tab \code{numeric} \tab the selection as record ID \cr \verb{$selection_full} \tab \code{data.frame} \tab implemented models used in the baSAR-model core \cr } \strong{slot:****\verb{@info}} The original function call \strong{Output variation} For \code{cleanup = TRUE} the same object as the input is returned, but cleaned up (invalid curves were removed). This means: Either an \linkS4class{Risoe.BINfileData} or an \linkS4class{RLum.Analysis} object is returned in such cases. An \linkS4class{Risoe.BINfileData} object can be exported to a BIN-file by using the function \link{write_R2BIN}. } \description{ This function tries to identify automatically zero-light level curves (grains) from single grain data measurements. } \details{ \strong{How does the method work?} The function compares the expected values (\eqn{E(X)}) and the variance (\eqn{Var(X)}) of the count values for each curve. Assuming that the background roughly follows a Poisson distribution the absolute difference of both values should be zero or at least around zero as \deqn{E(x) = Var(x) = \lambda} Thus the function checks for: \deqn{abs(E(x) - Var(x)) >= \Theta} With \eqn{\Theta} an arbitrary, user defined, threshold. Values above the threshold indicating curves comprising a signal. Note: the absolute difference of \eqn{E(X)} and \eqn{Var(x)} instead of the ratio was chosen as both terms can become 0 which would result in 0 or \code{Inf}, if the ratio is calculated. } \note{ This function can work with \linkS4class{Risoe.BINfileData} objects or \linkS4class{RLum.Analysis} objects (or a list of it). However, the function is highly optimised for \linkS4class{Risoe.BINfileData} objects as it make sense to remove identify invalid grains before the conversion to an \linkS4class{RLum.Analysis} object. The function checking for invalid curves works rather robust and it is likely that Reg0 curves within a SAR cycle are removed as well. Therefore it is strongly recommended to use the argument \code{cleanup = TRUE} carefully. } \section{Function version}{ 0.2.2 } \examples{ ##01 - basic example I ##just show how to apply the function data(ExampleData.XSYG, envir = environment()) ##verify and get data.frame out of it verify_SingleGrainData(OSL.SARMeasurement$Sequence.Object)$selection_full ##02 - basic example II data(ExampleData.BINfileData, envir = environment()) id <- verify_SingleGrainData(object = CWOSL.SAR.Data, cleanup_level = "aliquot")$selection_id \dontrun{ ##03 - advanced example I ##importing and exporting a BIN-file ##select and import file file <- file.choose() object <- read_BIN2R(file) ##remove invalid aliquots(!) object <- verify_SingleGrainData(object, cleanup = TRUE) ##export to new BIN-file write_R2BIN(object, paste0(dirname(file),"/", basename(file), "_CLEANED.BIN")) } } \seealso{ \linkS4class{Risoe.BINfileData}, \linkS4class{RLum.Analysis}, \link{write_R2BIN}, \link{read_BIN2R} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. verify_SingleGrainData(): Verify single grain data sets and check for invalid grains, i.e. zero-light level grains. Function version 0.2.2. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{datagen} \keyword{manip} Luminescence/man/length_RLum.Rd0000644000176200001440000000305414521210045016142 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/length_RLum.R \name{length_RLum} \alias{length_RLum} \title{General accessor function for RLum S4 class objects} \usage{ length_RLum(object) } \arguments{ \item{object}{\linkS4class{RLum} (\strong{required}): S4 object of class \code{RLum}} } \value{ Return is the same as input objects as provided in the list. } \description{ Function calls object-specific get functions for RLum S4 class objects. } \details{ The function provides a generalised access point for specific \linkS4class{RLum} objects.\cr Depending on the input object, the corresponding get function will be selected. Allowed arguments can be found in the documentations of the corresponding \linkS4class{RLum} class. } \section{Function version}{ 0.1.0 } \seealso{ \linkS4class{RLum.Data.Curve}, \linkS4class{RLum.Data.Image}, \linkS4class{RLum.Data.Spectrum}, \linkS4class{RLum.Analysis}, \linkS4class{RLum.Results} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) (France) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. length_RLum(): General accessor function for RLum S4 class objects. Function version 0.1.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{utilities} Luminescence/man/set_Risoe.BINfileData.Rd0000644000176200001440000000303514521210045017716 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/set_Risoe.BINfileData.R \name{set_Risoe.BINfileData} \alias{set_Risoe.BINfileData} \title{General accessor function for RLum S4 class objects} \usage{ set_Risoe.BINfileData( METADATA = data.frame(), DATA = list(), .RESERVED = list() ) } \arguments{ \item{METADATA}{x} \item{DATA}{x} \item{.RESERVED}{x} } \value{ Return is the same as input objects as provided in the list. } \description{ Function calls object-specific get functions for RisoeBINfileData S4 class objects. } \details{ The function provides a generalised access point for specific \linkS4class{Risoe.BINfileData} objects.\cr Depending on the input object, the corresponding get function will be selected. Allowed arguments can be found in the documentations of the corresponding \linkS4class{Risoe.BINfileData} class. } \section{Function version}{ 0.1 } \seealso{ \linkS4class{Risoe.BINfileData} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. set_Risoe.BINfileData(): General accessor function for RLum S4 class objects. Function version 0.1. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{utilities} Luminescence/man/ExampleData.Fading.Rd0000644000176200001440000000641314521210044017277 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Luminescence-package.R \name{ExampleData.Fading} \alias{ExampleData.Fading} \title{Example data for feldspar fading measurements} \format{ A \link{list} with two elements, each containing a further \link{list} of \link{data.frame}s containing the data on the fading and equivalent dose measurements: \describe{ \verb{$fading.data}: A named \link{list} of \link{data.frame}s, each having three named columns (\verb{LxTx, LxTx.error, timeSinceIrradiation}).\cr \code{..$IR50}: Fading data of the IR50 signal.\cr \code{..$IR100}: Fading data of the IR100 signal.\cr \code{..$IR150}: Fading data of the IR150 signal.\cr \code{..$IR225}: Fading data of the IR225 signal.\cr \verb{$equivalentDose.data}: A named of \link{data.frame}s, each having three named columns (\verb{dose, LxTx, LxTx.error}).\cr \code{..$IR50}: Equivalent dose measurement data of the IR50 signal.\cr \code{..$IR100}: Equivalent dose measurement data of the IR100 signal.\cr \code{..$IR150}: Equivalent dose measurement data of the IR150 signal.\cr \code{..$IR225}: Equivalent dose measurement data of the IR225 signal.\cr } } \source{ These data were kindly provided by Georgina E. King. Detailed information on the sample UNIL/NB123 can be found in the reference given below. The raw data can be found in the accompanying supplementary information. } \description{ Example data set for fading measurements of the IR50, IR100, IR150 and IR225 feldspar signals of sample UNIL/NB123. It further contains regular equivalent dose measurement data of the same sample, which can be used to apply a fading correction to. } \examples{ ## Load example data data("ExampleData.Fading", envir = environment()) ## Get fading measurement data of the IR50 signal IR50_fading <- ExampleData.Fading$fading.data$IR50 head(IR50_fading) ## Determine g-value and rho' for the IR50 signal IR50_fading.res <- analyse_FadingMeasurement(IR50_fading) ## Show g-value and rho' results gval <- get_RLum(IR50_fading.res) rhop <- get_RLum(IR50_fading.res, "rho_prime") gval rhop ## Get LxTx values of the IR50 DE measurement IR50_De.LxTx <- ExampleData.Fading$equivalentDose.data$IR50 ## Calculate the De of the IR50 signal IR50_De <- plot_GrowthCurve(IR50_De.LxTx, mode = "interpolation", fit.method = "EXP") ## Extract the calculated De and its error IR50_De.res <- get_RLum(IR50_De) De <- c(IR50_De.res$De, IR50_De.res$De.Error) ## Apply fading correction (age conversion greatly simplified) IR50_Age <- De / 7.00 IR50_Age.corr <- calc_FadingCorr(IR50_Age, g_value = IR50_fading.res) } \references{ King, G.E., Herman, F., Lambert, R., Valla, P.G., Guralnik, B., 2016. Multi-OSL-thermochronometry of feldspar. Quaternary Geochronology 33, 76-87. doi:10.1016/j.quageo.2016.01.004 \strong{Details} \tabular{ll}{ Lab: \tab University of Lausanne \cr Lab-Code: \tab UNIL/NB123 \cr Location: \tab Namche Barwa (eastern Himalayas)\cr Material: \tab Coarse grained (180-212 microns) potassium feldspar \cr Units: \tab Values are given in seconds \cr Lab Dose Rate: \tab Dose rate of the beta-source at measurement ca. 0.1335 +/- 0.004 Gy/s \cr Environmental Dose Rate: \tab 7.00 +/- 0.92 Gy/ka (includes internal dose rate) } } \keyword{datasets} Luminescence/man/report_RLum.Rd0000644000176200001440000001732114521210045016176 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/report_RLum.R \name{report_RLum} \alias{report_RLum} \title{Create a HTML-report for (RLum) objects} \usage{ report_RLum( object, file = tempfile(), title = "RLum.Report", compact = TRUE, timestamp = TRUE, show_report = TRUE, launch.browser = FALSE, css.file = NULL, quiet = TRUE, clean = TRUE, ... ) } \arguments{ \item{object}{(\strong{required}): The object to be reported on, preferably of any \code{RLum}-class.} \item{file}{\link{character} (\emph{with default}): A character string naming the output file. If no filename is provided a temporary file is created.} \item{title}{\link{character} (\emph{with default}): A character string specifying the title of the document.} \item{compact}{\link{logical} (\emph{with default}): When \code{TRUE} the following report components are hidden: \verb{@.pid}, \verb{@.uid}, \code{'Object structure'}, \code{'Session Info'} and only the first and last 5 rows of long matrices and data frames are shown. See details.} \item{timestamp}{\link{logical} (\emph{with default}): \code{TRUE} to add a timestamp to the filename (suffix).} \item{show_report}{\link{logical} (\emph{with default}): If set to \code{TRUE} the function tries to display the report output in the local viewer, e.g., within \emph{RStudio} after rendering.} \item{launch.browser}{\link{logical} (\emph{with default}): \code{TRUE} to open the HTML file in the system's default web browser after it has been rendered.} \item{css.file}{\link{character} (\emph{optional}): Path to a CSS file to change the default styling of the HTML document.} \item{quiet}{\link{logical} (\emph{with default}): \code{TRUE} to suppress printing of the pandoc command line.} \item{clean}{\link{logical} (\emph{with default}): \code{TRUE} to clean intermediate files created during rendering.} \item{...}{further arguments passed to or from other methods and to control the document's structure (see details).} } \value{ Writes a HTML and .Rds file. } \description{ Create a HTML-report for (RLum) objects } \details{ This function creates a HTML-report for a given object, listing its complete structure and content. The object itself is saved as a serialised .Rds file. The report file serves both as a convenient way of browsing through objects with complex data structures as well as a mean of properly documenting and saving objects. The HTML report is created with \link[rmarkdown:render]{rmarkdown::render} and has the following structure: \tabular{ll}{ \strong{Section} \tab \strong{Description} \cr \code{Header} \tab A summary of general characteristics of the object \cr \verb{Object content} \tab A comprehensive list of the complete structure and content of the provided object. \cr \verb{Object structure} \tab Summary of the objects structure given as a table \cr \code{File} \tab Information on the saved RDS file \cr \verb{Session Info} \tab Captured output from \code{sessionInfo()} \cr \code{Plots} \tab (\emph{optional}) For \code{RLum-class} objects a variable number of plots \cr } The structure of the report can be controlled individually by providing one or more of the following arguments (all \code{logical}): \tabular{ll}{ \strong{Argument} \tab \strong{Description} \cr \code{header} \tab Hide or show general information on the object \cr \code{main} \tab Hide or show the object's content \cr \code{structure} \tab Hide or show object's structure \cr \code{rds} \tab Hide or show information on the saved RDS file \cr \code{session} \tab Hide or show the session info \cr \code{plot} \tab Hide or show the plots (depending on object) \cr } Note that these arguments have higher precedence than \code{compact}. Further options that can be provided via the \code{...} argument: \tabular{ll}{ \strong{Argument} \tab \strong{Description} \cr \code{short_table} \tab If \code{TRUE} only show the first and last 5 rows of long tables. \cr \code{theme} \tab Specifies the Bootstrap theme to use for the report. Valid themes include \code{"default"}, \code{"cerulean"}, \code{"journal"}, \code{"flatly"}, \code{"readable"}, \code{"spacelab"}, \code{"united"}, \code{"cosmo"}, \code{"lumen"}, \code{"paper"}, \code{"sandstone"}, \code{"simplex"}, and \code{"yeti"}. \cr \code{highlight} \tab Specifies the syntax highlighting style. Supported styles include \code{"default"}, \code{"tango"}, \code{"pygments"}, \code{"kate"}, \code{"monochrome"}, \code{"espresso"}, \code{"zenburn"}, \code{"haddock"}, and \code{"textmate"}. \cr \code{css} \tab \code{TRUE} or \code{FALSE} to enable/disable custom CSS styling \cr } The following arguments can be used to customise the report via CSS (Cascading Style Sheets): \tabular{ll}{ \strong{Argument} \tab \strong{Description} \cr \code{font_family} \tab Define the font family of the HTML document (default: \code{"arial"}) \cr \code{headings_size} \tab Size of the \verb{

} to \verb{

} tags used to define HTML headings (default: 166\%). \cr \code{content_color} \tab Colour of the object's content (default: #a72925). \cr } Note that these arguments must all be of class \link{character} and follow standard CSS syntax. For exhaustive CSS styling you can provide a custom CSS file for argument \code{css.file}. CSS styling can be turned of using \code{css = FALSE}. } \note{ This function requires the R packages 'rmarkdown', 'pander' and 'rstudioapi'. } \section{Function version}{ 0.1.4 } \examples{ \dontrun{ ## Example: RLum.Results ---- # load example data data("ExampleData.DeValues") # apply the MAM-3 age model and save results mam <- calc_MinDose(ExampleData.DeValues$CA1, sigmab = 0.2) # create the HTML report report_RLum(object = mam, file = "~/CA1_MAM.Rmd", timestamp = FALSE, title = "MAM-3 for sample CA1") # when creating a report the input file is automatically saved to a # .Rds file (see saveRDS()). mam_report <- readRDS("~/CA1_MAM.Rds") all.equal(mam, mam_report) ## Example: Temporary file & Viewer/Browser ---- # (a) # Specifying a filename is not necessarily required. If no filename is provided, # the report is rendered in a temporary file. If you use the RStudio IDE, the # temporary report is shown in the interactive Viewer pane. report_RLum(object = mam) # (b) # Additionally, you can view the HTML report in your system's default web browser. report_RLum(object = mam, launch.browser = TRUE) ## Example: RLum.Analysis ---- data("ExampleData.RLum.Analysis") # create the HTML report (note that specifying a file # extension is not necessary) report_RLum(object = IRSAR.RF.Data, file = "~/IRSAR_RF") ## Example: RLum.Data.Curve ---- data.curve <- get_RLum(IRSAR.RF.Data)[[1]] # create the HTML report report_RLum(object = data.curve, file = "~/Data_Curve") ## Example: Any other object ---- x <- list(x = 1:10, y = runif(10, -5, 5), z = data.frame(a = LETTERS[1:20], b = dnorm(0:9)), NA) report_RLum(object = x, file = "~/arbitray_list") } } \seealso{ \link[rmarkdown:render]{rmarkdown::render}, \link[pander:pander_return]{pander::pander_return}, \link[pander:openFileInOS]{pander::openFileInOS}, \link[rstudioapi:viewer]{rstudioapi::viewer}, \link{browseURL} } \author{ Christoph Burow, University of Cologne (Germany), Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) \cr , RLum Developer Team} \section{How to cite}{ Burow, C., Kreutzer, S., 2023. report_RLum(): Create a HTML-report for (RLum) objects. Function version 0.1.4. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } Luminescence/man/ExampleData.DeValues.Rd0000644000176200001440000000440214521210044017613 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Luminescence-package.R \name{ExampleData.DeValues} \alias{ExampleData.DeValues} \title{Example De data sets for the package Luminescence} \format{ A \link{list} with two elements, each containing a two column \link{data.frame}: \describe{ \verb{$BT998}: De and De error values for a fine grain quartz sample from a loess section in Rottewitz. \verb{$CA1}: Single grain De and De error values for a coarse grain quartz sample from a fluvial deposit in the rock shelter of Cueva Anton } } \description{ Equivalent dose (De) values measured for a fine grain quartz sample from a loess section in Rottewitz (Saxony/Germany) and for a coarse grain quartz sample from a fluvial deposit in the rock shelter of Cueva Anton (Murcia/Spain). } \examples{ ##(1) plot values as histogram data(ExampleData.DeValues, envir = environment()) plot_Histogram(ExampleData.DeValues$BT998, xlab = "De [s]") ##(2) plot values as histogram (with second to gray conversion) data(ExampleData.DeValues, envir = environment()) De.values <- Second2Gray(ExampleData.DeValues$BT998, dose.rate = c(0.0438, 0.0019)) plot_Histogram(De.values, xlab = "De [Gy]") } \references{ \strong{BT998} Unpublished data \strong{CA1} Burow, C., Kehl, M., Hilgers, A., Weniger, G.-C., Angelucci, D., Villaverde, V., Zapata, J. and Zilhao, J. (2015). Luminescence dating of fluvial deposits in the rock shelter of Cueva Anton, Spain. Geochronometria 52, 107-125. \strong{BT998} \tabular{ll}{ Lab: \tab Luminescence Laboratory Bayreuth\cr Lab-Code: \tab BT998\cr Location: \tab Rottewitz (Saxony/Germany)\cr Material: \tab Fine grain quartz measured on aluminium discs on a Risø TL/OSL DA-15 reader\cr Units: \tab Values are given in seconds \cr Dose Rate: \tab Dose rate of the beta-source at measurement ca. 0.0438 Gy/s +/- 0.0019 Gy/s\cr Measurement Date: \tab 2012-01-27 } \strong{CA1} \tabular{ll}{ Lab: \tab Cologne Luminescence Laboratory (CLL)\cr Lab-Code: \tab C-L2941\cr Location: \tab Cueva Anton (Murcia/Spain)\cr Material: \tab Coarse grain quartz (200-250 microns) measured on single grain discs on a Risoe TL/OSL DA-20 reader\cr Units: \tab Values are given in Gray \cr Measurement Date: \tab 2012 } } \keyword{datasets} Luminescence/man/ExampleData.LxTxOSLData.Rd0000644000176200001440000000115714521210044020156 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Luminescence-package.R \name{ExampleData.LxTxOSLData} \alias{ExampleData.LxTxOSLData} \alias{Lx.data} \alias{Tx.data} \title{Example Lx and Tx curve data from an artificial OSL measurement} \format{ Two \code{\link{data.frame}}s containing time and count values. } \source{ Arbitrary OSL measurement. } \description{ \code{Lx} and \code{Tx} data of continuous wave (CW-) OSL signal curves. } \examples{ ##load data data(ExampleData.LxTxOSLData, envir = environment()) ##plot data plot(Lx.data) plot(Tx.data) } \references{ unpublished data } Luminescence/man/read_TIFF2R.Rd0000644000176200001440000000237114521210045015652 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/read_TIFF2R.R \name{read_TIFF2R} \alias{read_TIFF2R} \title{Import TIFF Image Data into R} \usage{ read_TIFF2R(file) } \arguments{ \item{file}{\link{character} (\strong{required}): file name} } \value{ \linkS4class{RLum.Data.Image} object } \description{ Simple wrapper around \link[tiff:readTIFF]{tiff::readTIFF} to import TIFF images and TIFF image stacks to be further processed within the package \code{'Luminescence'} } \section{Function version}{ 0.1.1 } \examples{ \dontrun{ file <- file.choose() image <- read_TIFF2R(file) } } \seealso{ \link[tiff:readTIFF]{tiff::readTIFF}, \linkS4class{RLum.Data.Image} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. read_TIFF2R(): Import TIFF Image Data into R. Function version 0.1.1. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{IO} Luminescence/man/BaseDataSet.ConversionFactors.Rd0000644000176200001440000000313614521210044021510 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Luminescence-package.R \name{BaseDataSet.ConversionFactors} \alias{BaseDataSet.ConversionFactors} \title{Base data set of dose-rate conversion factors} \format{ A \code{\link{list}} with three elements with dose-rate conversion factors sorted by article and radiation type (alpha, beta, gamma): \tabular{ll}{ \code{AdamiecAitken1998}: \tab Conversion factors from Tables 5 and 6 \cr \code{Cresswelletal2018}: \tab Conversion factors from Tables 5 and 6 \cr \code{Guerinetal2011}: \tab Conversion factors from Tables 1, 2 and 3 \cr \code{Liritzisetal2013}: \tab Conversion factors from Tables 1, 2 and 3 \cr } } \source{ All gamma conversion factors were carefully read from the tables given in the references above. } \description{ Collection of published dose-rate conversion factors to convert concentrations of radioactive isotopes to dose rate values. } \section{Version}{ 0.2.0 } \examples{ ## Load data data("BaseDataSet.ConversionFactors", envir = environment()) } \references{ Adamiec, G., Aitken, M.J., 1998. Dose-rate conversion factors: update. Ancient TL 16, 37-46. Cresswell., A.J., Carter, J., Sanderson, D.C.W., 2018. Dose rate conversion parameters: Assessment of nuclear data. Radiation Measurements 120, 195-201. Guerin, G., Mercier, N., Adamiec, G., 2011. Dose-rate conversion factors: update. Ancient TL, 29, 5-8. Liritzis, I., Stamoulis, K., Papachristodoulou, C., Ioannides, K., 2013. A re-evaluation of radiation dose-rate conversion factors. Mediterranean Archaeology and Archaeometry 13, 1-15. } \keyword{datasets} Luminescence/man/ExampleData.LxTxData.Rd0000644000176200001440000000143514521210044017577 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Luminescence-package.R \name{ExampleData.LxTxData} \alias{ExampleData.LxTxData} \alias{LxTxData} \title{Example Lx/Tx data from CW-OSL SAR measurement} \format{ A \code{\link{data.frame}} with 4 columns (Dose, LxTx, LxTx.Error, TnTx). } \source{ \tabular{ll}{ Lab: \tab Luminescence Laboratory Bayreuth\cr Lab-Code: \tab BT607\cr Location: \tab Ostrau (Saxony-Anhalt/Germany)\cr Material: \tab Middle grain (38-63 \eqn{\mu}m) quartz measured on a Risoe TL/OSL DA-15 reader. } } \description{ LxTx data from a SAR measurement for the package Luminescence. } \examples{ ## plot Lx/Tx data vs dose [s] data(ExampleData.LxTxData, envir = environment()) plot(LxTxData$Dose,LxTxData$LxTx) } \references{ unpublished data } Luminescence/man/RLum.Data-class.Rd0000644000176200001440000000127114521210044016552 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RLum.Data-class.R \docType{class} \name{RLum.Data-class} \alias{RLum.Data-class} \title{Class \code{"RLum.Data"}} \description{ Generalized virtual data class for luminescence data. } \note{ Just a virtual class. } \section{Objects from the Class}{ A virtual Class: No objects can be created from it. } \section{Class version}{ 0.2.1 } \examples{ showClass("RLum.Data") } \seealso{ \linkS4class{RLum}, \linkS4class{RLum.Data.Curve}, \linkS4class{RLum.Data.Spectrum} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \keyword{classes} \keyword{internal} Luminescence/man/plot_RadialPlot.Rd0000644000176200001440000002507614521210045017023 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot_RadialPlot.R \name{plot_RadialPlot} \alias{plot_RadialPlot} \title{Function to create a Radial Plot} \usage{ plot_RadialPlot( data, na.rm = TRUE, log.z = TRUE, central.value, centrality = "mean.weighted", mtext, summary, summary.pos, legend, legend.pos, stats, rug = FALSE, plot.ratio, bar.col, y.ticks = TRUE, grid.col, line, line.col, line.label, output = FALSE, ... ) } \arguments{ \item{data}{\link{data.frame} or \linkS4class{RLum.Results} object (\strong{required}): for \code{data.frame} two columns: De (\code{data[,1]}) and De error (\code{data[,2]}). To plot several data sets in one plot, the data sets must be provided as \code{list}, e.g. \code{list(data.1, data.2)}.} \item{na.rm}{\link{logical} (\emph{with default}): excludes \code{NA} values from the data set prior to any further operations.} \item{log.z}{\link{logical} (\emph{with default}): Option to display the z-axis in logarithmic scale. Default is \code{TRUE}.} \item{central.value}{\link{numeric}: User-defined central value, primarily used for horizontal centring of the z-axis.} \item{centrality}{\link{character} or \link{numeric} (\emph{with default}): measure of centrality, used for automatically centring the plot and drawing the central line. Can either be one out of \itemize{ \item \code{"mean"}, \item \code{"median"}, \item \code{"mean.weighted"} and \item \code{"median.weighted"} or a \item numeric value used for the standardisation. }} \item{mtext}{\link{character}: additional text below the plot title.} \item{summary}{\link{character} (\emph{optional}): add statistic measures of centrality and dispersion to the plot. Can be one or more of several keywords. See details for available keywords.} \item{summary.pos}{\link{numeric} or \link{character} (\emph{with default}): optional position coordinates or keyword (e.g. \code{"topright"}) for the statistical summary. Alternatively, the keyword \code{"sub"} may be specified to place the summary below the plot header. However, this latter option is only possible if \code{mtext} is not used.} \item{legend}{\link{character} vector (\emph{optional}): legend content to be added to the plot.} \item{legend.pos}{\link{numeric} or \link{character} (with default): optional position coordinates or keyword (e.g. \code{"topright"}) for the legend to be plotted.} \item{stats}{\link{character}: additional labels of statistically important values in the plot. One or more out of the following: \itemize{ \item \code{"min"}, \item \code{"max"}, \item \code{"median"}. }} \item{rug}{\link{logical}: Option to add a rug to the z-scale, to indicate the location of individual values} \item{plot.ratio}{\link{numeric}: User-defined plot area ratio (i.e. curvature of the z-axis). If omitted, the default value (\code{4.5/5.5}) is used and modified automatically to optimise the z-axis curvature. The parameter should be decreased when data points are plotted outside the z-axis or when the z-axis gets too elliptic.} \item{bar.col}{\link{character} or \link{numeric} (\emph{with default}): colour of the bar showing the 2-sigma range around the central value. To disable the bar, use \code{"none"}. Default is \code{"grey"}.} \item{y.ticks}{\link{logical}: Option to hide y-axis labels. Useful for data with small scatter.} \item{grid.col}{\link{character} or \link{numeric} (\emph{with default}): colour of the grid lines (originating at \verb{[0,0]} and stretching to the z-scale). To disable grid lines, use \code{"none"}. Default is \code{"grey"}.} \item{line}{\link{numeric}: numeric values of the additional lines to be added.} \item{line.col}{\link{character} or \link{numeric}: colour of the additional lines.} \item{line.label}{\link{character}: labels for the additional lines.} \item{output}{\link{logical}: Optional output of numerical plot parameters. These can be useful to reproduce similar plots. Default is \code{FALSE}.} \item{...}{Further plot arguments to pass. \code{xlab} must be a vector of length 2, specifying the upper and lower x-axes labels.} } \value{ Returns a plot object. } \description{ A Galbraith's radial plot is produced on a logarithmic or a linear scale. } \details{ Details and the theoretical background of the radial plot are given in the cited literature. This function is based on an S script of Rex Galbraith. To reduce the manual adjustments, the function has been rewritten. Thanks to Rex Galbraith for useful comments on this function. \cr Plotting can be disabled by adding the argument \code{plot = "FALSE"}, e.g. to return only numeric plot output. Earlier versions of the Radial Plot in this package had the 2-sigma-bar drawn onto the z-axis. However, this might have caused misunderstanding in that the 2-sigma range may also refer to the z-scale, which it does not! Rather it applies only to the x-y-coordinate system (standardised error vs. precision). A spread in doses or ages must be drawn as lines originating at zero precision (x0) and zero standardised estimate (y0). Such a range may be drawn by adding lines to the radial plot ( \code{line}, \code{line.col}, \code{line.label}, cf. examples). A statistic summary, i.e. a collection of statistic measures of centrality and dispersion (and further measures) can be added by specifying one or more of the following keywords: \itemize{ \item \code{"n"} (number of samples), \item \code{"mean"} (mean De value), \item \code{"mean.weighted"} (error-weighted mean), \item \code{"median"} (median of the De values), \item \code{"sdrel"} (relative standard deviation in percent), \item \code{"sdrel.weighted"} (error-weighted relative standard deviation in percent), \item \code{"sdabs"} (absolute standard deviation), \item \code{"sdabs.weighted"} (error-weighted absolute standard deviation), \item \code{"serel"} (relative standard error), \item \code{"serel.weighted"} (error-weighted relative standard error), \item \code{"seabs"} (absolute standard error), \item \code{"seabs.weighted"} (error-weighted absolute standard error), \item \code{"in.2s"} (percent of samples in 2-sigma range), \item \code{"kurtosis"} (kurtosis) and \item \code{"skewness"} (skewness). } } \section{Function version}{ 0.5.9 } \examples{ ## load example data data(ExampleData.DeValues, envir = environment()) ExampleData.DeValues <- Second2Gray( ExampleData.DeValues$BT998, c(0.0438,0.0019)) ## plot the example data straightforward plot_RadialPlot(data = ExampleData.DeValues) ## now with linear z-scale plot_RadialPlot( data = ExampleData.DeValues, log.z = FALSE) ## now with output of the plot parameters plot1 <- plot_RadialPlot( data = ExampleData.DeValues, log.z = FALSE, output = TRUE) plot1 plot1$zlim ## now with adjusted z-scale limits plot_RadialPlot( data = ExampleData.DeValues, log.z = FALSE, zlim = c(100, 200)) ## now the two plots with serious but seasonally changing fun #plot_RadialPlot(data = data.3, fun = TRUE) ## now with user-defined central value, in log-scale again plot_RadialPlot( data = ExampleData.DeValues, central.value = 150) ## now with a rug, indicating individual De values at the z-scale plot_RadialPlot( data = ExampleData.DeValues, rug = TRUE) ## now with legend, colour, different points and smaller scale plot_RadialPlot( data = ExampleData.DeValues, legend.text = "Sample 1", col = "tomato4", bar.col = "peachpuff", pch = "R", cex = 0.8) ## now without 2-sigma bar, y-axis, grid lines and central value line plot_RadialPlot( data = ExampleData.DeValues, bar.col = "none", grid.col = "none", y.ticks = FALSE, lwd = 0) ## now with user-defined axes labels plot_RadialPlot( data = ExampleData.DeValues, xlab = c("Data error (\%)", "Data precision"), ylab = "Scatter", zlab = "Equivalent dose [Gy]") ## now with minimum, maximum and median value indicated plot_RadialPlot( data = ExampleData.DeValues, central.value = 150, stats = c("min", "max", "median")) ## now with a brief statistical summary plot_RadialPlot( data = ExampleData.DeValues, summary = c("n", "in.2s")) ## now with another statistical summary as subheader plot_RadialPlot( data = ExampleData.DeValues, summary = c("mean.weighted", "median"), summary.pos = "sub") ## now the data set is split into sub-groups, one is manipulated data.1 <- ExampleData.DeValues[1:15,] data.2 <- ExampleData.DeValues[16:25,] * 1.3 ## now a common dataset is created from the two subgroups data.3 <- list(data.1, data.2) ## now the two data sets are plotted in one plot plot_RadialPlot(data = data.3) ## now with some graphical modification plot_RadialPlot( data = data.3, col = c("darkblue", "darkgreen"), bar.col = c("lightblue", "lightgreen"), pch = c(2, 6), summary = c("n", "in.2s"), summary.pos = "sub", legend = c("Sample 1", "Sample 2")) } \section{How to cite}{ Dietze, M., Kreutzer, S., 2023. plot_RadialPlot(): Function to create a Radial Plot. Function version 0.5.9. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Galbraith, R.F., 1988. Graphical Display of Estimates Having Differing Standard Errors. Technometrics, 30 (3), 271-281. Galbraith, R.F., 1990. The radial plot: Graphical assessment of spread in ages. International Journal of Radiation Applications and Instrumentation. Part D. Nuclear Tracks and Radiation Measurements, 17 (3), 207-214. Galbraith, R. & Green, P., 1990. Estimating the component ages in a finite mixture. International Journal of Radiation Applications and Instrumentation. Part D. Nuclear Tracks and Radiation Measurements, 17 (3) 197-206. Galbraith, R.F. & Laslett, G.M., 1993. Statistical models for mixed fission track ages. Nuclear Tracks And Radiation Measurements, 21 (4), 459-470. Galbraith, R.F., 1994. Some Applications of Radial Plots. Journal of the American Statistical Association, 89 (428), 1232-1242. Galbraith, R.F., 2010. On plotting OSL equivalent doses. Ancient TL, 28 (1), 1-10. Galbraith, R.F. & Roberts, R.G., 2012. Statistical aspects of equivalent dose and error calculation and display in OSL dating: An overview and some recommendations. Quaternary Geochronology, 11, 1-27. } \seealso{ \link{plot}, \link{plot_KDE}, \link{plot_Histogram}, \link{plot_AbanicoPlot} } \author{ Michael Dietze, GFZ Potsdam (Germany)\cr Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany)\cr Based on a rewritten S script of Rex Galbraith, 2010 , RLum Developer Team} Luminescence/man/set_RLum.Rd0000644000176200001440000000533714521210045015462 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/set_RLum.R \name{set_RLum} \alias{set_RLum} \title{General set function for RLum S4 class objects} \usage{ set_RLum(class, originator, .uid = create_UID(), .pid = NA_character_, ...) } \arguments{ \item{class}{\linkS4class{RLum} (\strong{required}): name of the S4 class to create} \item{originator}{\link{character} (\emph{automatic}): contains the name of the calling function (the function that produces this object); can be set manually.} \item{.uid}{\link{character} (\emph{automatic}): sets an unique ID for this object using the internal C++ function \code{create_UID}.} \item{.pid}{\link{character} (\emph{with default}): option to provide a parent id for nesting at will.} \item{...}{further arguments that one might want to pass to the specific set method} } \value{ Returns an object of the specified class. } \description{ Function calls object-specific set functions for RLum S4 class objects. } \details{ The function provides a generalised access point for specific \linkS4class{RLum} objects.\cr Depending on the given class, the corresponding method to create an object from this class will be selected. Allowed additional arguments can be found in the documentations of the corresponding \linkS4class{RLum} class: \itemize{ \item \linkS4class{RLum.Data.Curve}, \item \linkS4class{RLum.Data.Image}, \item \linkS4class{RLum.Data.Spectrum}, \item \linkS4class{RLum.Analysis}, \item \linkS4class{RLum.Results} } } \section{Function version}{ 0.3.0 } \examples{ ##produce empty objects from each class set_RLum(class = "RLum.Data.Curve") set_RLum(class = "RLum.Data.Spectrum") set_RLum(class = "RLum.Data.Spectrum") set_RLum(class = "RLum.Analysis") set_RLum(class = "RLum.Results") ##produce a curve object with arbitrary curve values object <- set_RLum( class = "RLum.Data.Curve", curveType = "arbitrary", recordType = "OSL", data = matrix(c(1:100,exp(-c(1:100))),ncol = 2)) ##plot this curve object plot_RLum(object) } \seealso{ \linkS4class{RLum.Data.Curve}, \linkS4class{RLum.Data.Image}, \linkS4class{RLum.Data.Spectrum}, \linkS4class{RLum.Analysis}, \linkS4class{RLum.Results} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. set_RLum(): General set function for RLum S4 class objects. Function version 0.3.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{utilities} Luminescence/man/read_PSL2R.Rd0000644000176200001440000000614314521210045015561 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/read_PSL2R.R \name{read_PSL2R} \alias{read_PSL2R} \title{Import PSL files to R} \usage{ read_PSL2R( file, drop_bg = FALSE, as_decay_curve = TRUE, smooth = FALSE, merge = FALSE, ... ) } \arguments{ \item{file}{\link{character} (\strong{required}): path and file name of the PSL file. If input is a \code{vector} it should comprise only \code{character}s representing valid paths and PSL file names. Alternatively the input character can be just a directory (path). In this case the the function tries to detect and import all PSL files found in the directory.} \item{drop_bg}{\link{logical} (\emph{with default}): \code{TRUE} to automatically remove all non-OSL/IRSL curves.} \item{as_decay_curve}{\link{logical} (\emph{with default}): Portable OSL Reader curves are often given as cumulative light sum curves. Use \code{TRUE} (default) to convert the curves to the more usual decay form.} \item{smooth}{\link{logical} (\emph{with default}): \code{TRUE} to apply Tukey's Running Median Smoothing for OSL and IRSL decay curves. Smoothing is encouraged if you see random signal drops within the decay curves related to hardware errors.} \item{merge}{\link{logical} (\emph{with default}): \code{TRUE} to merge all \code{RLum.Analysis} objects. Only applicable if multiple files are imported.} \item{...}{currently not used.} } \value{ Returns an S4 \linkS4class{RLum.Analysis} object containing \linkS4class{RLum.Data.Curve} objects for each curve. } \description{ Imports PSL files produced by a SUERC portable OSL reader into R \strong{(BETA)}. } \details{ This function provides an import routine for the SUERC portable OSL Reader PSL format. PSL files are just plain text and can be viewed with any text editor. Due to the formatting of PSL files this import function relies heavily on regular expression to find and extract all relevant information. See \strong{note}. } \note{ Because this function relies heavily on regular expressions to parse PSL files it is currently only in beta status. If the routine fails to import a specific PSL file please report to \verb{} so the function can be updated. } \section{Function version}{ 0.0.2 } \examples{ # (1) Import PSL file to R file <- system.file("extdata", "DorNie_0016.psl", package = "Luminescence") psl <- read_PSL2R(file, drop_bg = FALSE, as_decay_curve = TRUE, smooth = TRUE, merge = FALSE) print(str(psl, max.level = 3)) plot(psl, combine = TRUE) } \seealso{ \linkS4class{RLum.Analysis}, \linkS4class{RLum.Data.Curve}, \linkS4class{RLum.Data.Curve} } \author{ Christoph Burow, University of Cologne (Germany) , RLum Developer Team} \section{How to cite}{ Burow, C., 2023. read_PSL2R(): Import PSL files to R. Function version 0.0.2. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{IO} Luminescence/man/BaseDataSet.CosmicDoseRate.Rd0000644000176200001440000000602114521210044020701 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Luminescence-package.R \name{BaseDataSet.CosmicDoseRate} \alias{BaseDataSet.CosmicDoseRate} \alias{values.cosmic.Softcomp} \alias{values.factor.Altitude} \alias{values.par.FJH} \title{Base data set for cosmic dose rate calculation} \format{ \tabular{ll}{ \code{values.cosmic.Softcomp}: \tab data frame containing cosmic dose rates for shallow depths (< 167 g cm^-2) obtained using the "AGE" program by Rainer Gruen (cf. Gruen 2009). These data essentially reproduce the graph shown in Fig. 1 of Prescott & Hutton (1988). \cr \code{values.factor.Altitude}: \tab data frame containing altitude factors for adjusting geomagnetic field-change factors. Values were read from Fig. 1 in Prescott & Hutton (1994). \cr \code{values.par.FJH}: \tab data frame containing values for parameters F, J and H (read from Fig. 2 in Prescott & Hutton 1994) used in the expression \cr } \deqn{Dc = D0*(F+J*exp((altitude/1000)/H))} } \source{ The following data were carefully read from figures in mentioned sources and used for fitting procedures. The derived expressions are used in the function \code{calc_CosmicDoseRate}. \strong{values.cosmic.Softcomp} \tabular{ll}{ Program: \tab "AGE"\cr Reference: \tab Gruen (2009) \cr Fit: \tab Polynomials in the form of } For depths between 40-167 g cm^-2: \deqn{y = 2*10^-6*x^2-0.0008*x+0.2535} (For depths <40 g cm^-2) \deqn{y = -6*10^-8*x^3+2*10^-5*x^2-0.0025*x+0.2969} \strong{\code{values.factor.Altitude}} \tabular{ll}{ Reference: \tab Prescott & Hutton (1994) \cr Page: \tab 499 \cr Figure: \tab 1 \cr Fit: \tab 2-degree polynomial in the form of } \deqn{y = -0.026*x^2 + 0.6628*x + 1.0435} \strong{\code{values.par.FJH}} \tabular{ll}{ Reference: \tab Prescott & Hutton (1994) \cr Page: \tab 500 \cr Figure: \tab 2 \cr Fits: \tab 3-degree polynomials and linear fits } F (non-linear part, \eqn{\lambda} < 36.5 deg.): \deqn{y = -7*10^-7*x^3-8*10^-5*x^2-0.0009*x+0.3988} F (linear part, \eqn{\lambda} > 36.5 deg.): \deqn{y = -0.0001*x + 0.2347} J (non-linear part, \eqn{\lambda} < 34 deg.): \deqn{y = 5*10^-6*x^3-5*10^-5*x^2+0.0026*x+0.5177} J (linear part, \eqn{\lambda} > 34 deg.): \deqn{y = 0.0005*x + 0.7388} H (non-linear part, \eqn{\lambda} < 36 deg.): \deqn{y = -3*10^-6*x^3-5*10^-5*x^2-0.0031*x+4.398} H (linear part, \eqn{\lambda} > 36 deg.): \deqn{y = 0.0002*x + 4.0914} } \description{ Collection of data from various sources needed for cosmic dose rate calculation } \section{Version}{ 0.1 } \examples{ ##load data data(BaseDataSet.CosmicDoseRate) } \references{ Gruen, R., 2009. The "AGE" program for the calculation of luminescence age estimates. Ancient TL, 27, pp. 45-46. Prescott, J.R., Hutton, J.T., 1988. Cosmic ray and gamma ray dosimetry for TL and ESR. Nuclear Tracks and Radiation Measurements, 14, pp. 223-227. Prescott, J.R., Hutton, J.T., 1994. Cosmic ray contributions to dose rates for luminescence and ESR dating: large depths and long-term time variations. Radiation Measurements, 23, pp. 497-500. } \keyword{datasets} Luminescence/man/calc_gSGC.Rd0000644000176200001440000000712514521210045015472 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calc_gSGC.R \name{calc_gSGC} \alias{calc_gSGC} \title{Calculate De value based on the gSGC by Li et al., 2015} \usage{ calc_gSGC( data, gSGC.type = "0-250", gSGC.parameters, n.MC = 100, verbose = TRUE, plot = TRUE, ... ) } \arguments{ \item{data}{\link{data.frame} (\strong{required}): input data of providing the following columns: \code{LnTn}, \code{LnTn.error}, \code{Lr1Tr1}, \code{Lr1Tr1.error}, \code{Dr1} \strong{Note:} column names are not required. The function expect the input data in the given order} \item{gSGC.type}{\link{character} (\emph{with default}): define the function parameters that should be used for the iteration procedure: Li et al., 2015 (Table 2) presented function parameters for two dose ranges: \code{"0-450"} and \code{"0-250"}} \item{gSGC.parameters}{\link{list} (\emph{optional}): option to provide own function parameters used for fitting as named list. Nomenclature follows Li et al., 2015, i.e. \code{list(A,A.error,D0,D0.error,c,c.error,Y0,Y0.error,range)}, range requires a vector for the range the function is considered as valid, e.g. \code{range = c(0,250)}\cr Using this option overwrites the default parameter list of the gSGC, meaning the argument \code{gSGC.type} will be without effect} \item{n.MC}{\link{integer} (\emph{with default}): number of Monte Carlo simulation runs for error estimation, see details.} \item{verbose}{\link{logical}: enable or disable terminal output} \item{plot}{\link{logical}: enable or disable graphical feedback as plot} \item{...}{parameters will be passed to the plot output} } \value{ Returns an S4 object of type \linkS4class{RLum.Results}. \strong{\verb{@data}}\cr \verb{$ De.value} (\link{data.frame}) \cr \code{.. $ De} \cr \code{.. $ De.error} \cr \code{.. $ Eta} \cr \verb{$ De.MC} (\link{list}) contains the matrices from the error estimation.\cr \verb{$ uniroot} (\link{list}) contains the \link{uniroot} outputs of the De estimations\cr \strong{\verb{@info}}\cr `$ call`` (\link{call}) the original function call } \description{ Function returns De value and De value error using the global standardised growth curve (gSGC) assumption proposed by Li et al., 2015 for OSL dating of sedimentary quartz } \details{ The error of the De value is determined using a Monte Carlo simulation approach. Solving of the equation is realised using \link{uniroot}. Large values for \code{n.MC} will significantly increase the computation time. } \section{Function version}{ 0.1.1 } \examples{ results <- calc_gSGC(data = data.frame( LnTn = 2.361, LnTn.error = 0.087, Lr1Tr1 = 2.744, Lr1Tr1.error = 0.091, Dr1 = 34.4)) get_RLum(results, data.object = "De") } \section{How to cite}{ Kreutzer, S., 2023. calc_gSGC(): Calculate De value based on the gSGC by Li et al., 2015. Function version 0.1.1. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Li, B., Roberts, R.G., Jacobs, Z., Li, S.-H., 2015. Potential of establishing a 'global standardised growth curve' (gSGC) for optical dating of quartz from sediments. Quaternary Geochronology 27, 94-104. doi:10.1016/j.quageo.2015.02.011 } \seealso{ \linkS4class{RLum.Results}, \link{get_RLum}, \link{uniroot} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \keyword{datagen} Luminescence/man/get_Layout.Rd0000644000176200001440000000430614521210045016037 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_Layout.R \name{get_Layout} \alias{get_Layout} \title{Collection of layout definitions} \usage{ get_Layout(layout) } \arguments{ \item{layout}{\link{character} or \link{list} object (\strong{required}): name of the layout definition to be returned. If name is provided the respective definition is returned. One of the following supported layout definitions is possible: \code{"default"}, \code{"journal.1"}, \code{"small"}, \code{"empty"}. User-specific layout definitions must be provided as a list object of predefined structure, see details.} } \value{ A list object with layout definitions for plot functions. } \description{ This helper function returns a list with layout definitions for homogeneous plotting. } \details{ The easiest way to create a user-specific layout definition is perhaps to create either an empty or a default layout object and fill/modify the definitions (\code{user.layout <- get_Layout(data = "empty")}). } \section{Function version}{ 0.1 } \examples{ ## read example data set data(ExampleData.DeValues, envir = environment()) ## show structure of the default layout definition layout.default <- get_Layout(layout = "default") str(layout.default) ## show colour definitions for Abanico plot, only layout.default$abanico$colour ## set Abanico plot title colour to orange layout.default$abanico$colour$main <- "orange" ## create Abanico plot with modofied layout definition plot_AbanicoPlot(data = ExampleData.DeValues, layout = layout.default) ## create Abanico plot with predefined layout "journal" plot_AbanicoPlot(data = ExampleData.DeValues, layout = "journal") } \author{ Michael Dietze, GFZ Potsdam (Germany) , RLum Developer Team} \section{How to cite}{ Dietze, M., 2023. get_Layout(): Collection of layout definitions. Function version 0.1. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } Luminescence/man/BaseDataSet.FractionalGammaDose.Rd0000644000176200001440000000161514521210044021701 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Luminescence-package.R \name{BaseDataSet.FractionalGammaDose} \alias{BaseDataSet.FractionalGammaDose} \title{Base data set of fractional gamma-dose values} \format{ A \code{\link{list}} with fractional gamma dose-rate values sorted by article: \tabular{ll}{ \code{Aitken1985}: \tab Fractional gamma-dose values from table H.1 } } \source{ Fractional gamma dose values were carefully read from the tables given in the references above. } \description{ Collection of (un-)published fractional gamma dose-rate values to scale the gamma-dose rate considering layer-to-layer variations in soil radioactivity. } \section{Version}{ 0.1 } \examples{ ## Load data data("BaseDataSet.FractionalGammaDose", envir = environment()) } \references{ Aitken, M.J., 1985. Thermoluminescence Dating. Academic Press, London. } \keyword{datasets} Luminescence/man/convert_Wavelength2Energy.Rd0000644000176200001440000001166514521210045021031 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/convert_Wavelength2Energy.R \name{convert_Wavelength2Energy} \alias{convert_Wavelength2Energy} \title{Emission Spectra Conversion from Wavelength to Energy Scales (Jacobian Conversion)} \usage{ convert_Wavelength2Energy(object, digits = 3L, order = FALSE) } \arguments{ \item{object}{\linkS4class{RLum.Data.Spectrum}, \link{data.frame}, \link{matrix} (\strong{required}): input object to be converted. If the input is not an \linkS4class{RLum.Data.Spectrum}, the first column is always treated as the wavelength column. The function supports a list of allowed input objects.} \item{digits}{\link{integer} (\emph{with default}): set the number of digits on the returned energy axis} \item{order}{\link{logical} (\emph{with default}): enables/disables sorting of the values in ascending energy order. After the conversion the longest wavelength has the lowest energy value and the shortest wavelength the highest. While this is correct, some R functions expect increasing x-values.} } \value{ The same object class as provided as input is returned. } \description{ The function provides a convenient and fast way to convert emission spectra wavelength to energy scales. The function works on \linkS4class{RLum.Data.Spectrum}, \link{data.frame} and \link{matrix} and a \link{list} of such objects. The function was written to smooth the workflow while analysing emission spectra data. This is in particular useful if you want to further treat your data and apply, e.g., a signal deconvolution. } \details{ The intensity of the spectrum is re-calculated using the following approach to recalculate wavelength and corresponding intensity values (e.g., Appendix 4 in Blasse and Grabmeier, 1994; Mooney and Kambhampati, 2013): \deqn{\phi_{E} = \phi_{\lambda} * \lambda^2 / (hc)} with \eqn{\phi_{E}} the intensity per interval of energy \eqn{E} (1/eV), \eqn{\phi_{\lambda}} the intensity per interval of wavelength \eqn{\lambda} (1/nm) and \eqn{h} (eV * s) the Planck constant and \eqn{c} (nm/s) the velocity of light. For transforming the wavelength axis (x-values) the equation as follow is used \deqn{E = hc/\lambda} } \note{ This conversion works solely for emission spectra. In case of absorption spectra only the x-axis has to be converted. } \section{Function version}{ 0.1.1 } \examples{ ##=====================## ##(1) Literature example after Mooney et al. (2013) ##(1.1) create matrix m <- matrix( data = c(seq(400, 800, 50), rep(1, 9)), ncol = 2) ##(1.2) set plot function to reproduce the ##literature figure p <- function(m) { plot(x = m[, 1], y = m[, 2]) polygon( x = c(m[, 1], rev(m[, 1])), y = c(m[, 2], rep(0, nrow(m)))) for (i in 1:nrow(m)) { lines(x = rep(m[i, 1], 2), y = c(0, m[i, 2])) } } ##(1.3) plot curves par(mfrow = c(1,2)) p(m) p(convert_Wavelength2Energy(m)) ##=====================## ##(2) Another example using density curves ##create dataset xy <- density( c(rnorm(n = 100, mean = 500, sd = 20), rnorm(n = 100, mean = 800, sd = 20))) xy <- data.frame(xy$x, xy$y) ##plot par(mfrow = c(1,2)) plot( xy, type = "l", xlim = c(150, 1000), xlab = "Wavelength [nm]", ylab = "Luminescence [a.u.]" ) plot( convert_Wavelength2Energy(xy), xy$y, type = "l", xlim = c(1.23, 8.3), xlab = "Energy [eV]", ylab = "Luminescence [a.u.]" ) } \section{How to cite}{ Kreutzer, S., 2023. convert_Wavelength2Energy(): Emission Spectra Conversion from Wavelength to Energy Scales (Jacobian Conversion). Function version 0.1.1. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Blasse, G., Grabmaier, B.C., 1994. Luminescent Materials. Springer. Mooney, J., Kambhampati, P., 2013. Get the Basics Right: Jacobian Conversion of Wavelength and Energy Scales for Quantitative Analysis of Emission Spectra. J. Phys. Chem. Lett. 4, 3316–3318. \doi{10.1021/jz401508t} Mooney, J., Kambhampati, P., 2013. Correction to “Get the Basics Right: Jacobian Conversion of Wavelength and Energy Scales for Quantitative Analysis of Emission Spectra.” J. Phys. Chem. Lett. 4, 3316–3318. \doi{10.1021/jz401508t} \strong{Further reading} Angulo, G., Grampp, G., Rosspeintner, A., 2006. Recalling the appropriate representation of electronic spectra. Spectrochimica Acta Part A: Molecular and Biomolecular Spectroscopy 65, 727–731. \doi{10.1016/j.saa.2006.01.007} Wang, Y., Townsend, P.D., 2013. Potential problems in collection and data processing of luminescence signals. Journal of Luminescence 142, 202–211. \doi{10.1016/j.jlumin.2013.03.052} } \seealso{ \linkS4class{RLum.Data.Spectrum}, \link{plot_RLum} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \keyword{IO} Luminescence/man/ExampleData.BINfileData.Rd0000644000176200001440000000376714521210044020162 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Luminescence-package.R \docType{data} \name{ExampleData.BINfileData} \alias{ExampleData.BINfileData} \alias{CWOSL.SAR.Data} \alias{TL.SAR.Data} \title{Example data from a SAR OSL and SAR TL measurement for the package Luminescence} \format{ \code{CWOSL.SAR.Data}: SAR OSL measurement data \code{TL.SAR.Data}: SAR TL measurement data Each class object contains two slots: (a) \code{METADATA} is a \link{data.frame} with all metadata stored in the BIN file of the measurements and (b) \code{DATA} contains a list of vectors of the measured data (usually count values). } \source{ \strong{CWOSL.SAR.Data} \tabular{ll}{ Lab: \tab Luminescence Laboratory Bayreuth \cr Lab-Code: \tab BT607 \cr Location: \tab Saxony/Germany \cr Material: \tab Middle grain quartz measured on aluminium cups on a Risø TL/OSL DA-15 reader\cr Reference: \tab unpublished } \strong{TL.SAR.Data} \tabular{ll}{ Lab: \tab Luminescence Laboratory of Cologne\cr Lab-Code: \tab LP1_5\cr Location: \tab Spain\cr Material: \tab Flint \cr Setup: \tab Risoe TL/OSL DA-20 reader (Filter: Semrock Brightline, HC475/50, N2, unpolished steel discs) \cr Reference: \tab unpublished \cr Remarks: \tab dataset limited to one position } } \description{ Example data from a SAR OSL and TL measurement for package Luminescence directly extracted from a Risoe BIN-file and provided in an object of type \linkS4class{Risoe.BINfileData} } \note{ Please note that this example data cannot be exported to a BIN-file using the function \code{writeR2BIN} as it was generated and implemented in the package long time ago. In the meantime the BIN-file format changed. } \section{Version}{ 0.1 } \examples{ ## show first 5 elements of the METADATA and DATA elements in the terminal data(ExampleData.BINfileData, envir = environment()) CWOSL.SAR.Data@METADATA[1:5,] CWOSL.SAR.Data@DATA[1:5] } \references{ \strong{CWOSL.SAR.Data}: unpublished data \strong{TL.SAR.Data}: unpublished data } \keyword{datasets} Luminescence/man/apply_CosmicRayRemoval.Rd0000644000176200001440000001151514521210044020346 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/apply_CosmicRayRemoval.R \name{apply_CosmicRayRemoval} \alias{apply_CosmicRayRemoval} \title{Function to remove cosmic rays from an RLum.Data.Spectrum S4 class object} \usage{ apply_CosmicRayRemoval( object, method = "smooth", method.Pych.smoothing = 2, method.Pych.threshold_factor = 3, MARGIN = 2, verbose = FALSE, plot = FALSE, ... ) } \arguments{ \item{object}{\linkS4class{RLum.Data.Spectrum} or \linkS4class{RLum.Analysis} (\strong{required}): input object to be treated. This can be also provided as \link{list}. If an \linkS4class{RLum.Analysis} object is provided, only the \linkS4class{RLum.Data.Spectrum} objects are treated. Please note: this mixing of objects do not work for a list of \code{RLum.Data} objects.} \item{method}{\link{character} (\emph{with default}): Defines method that is applied for cosmic ray removal. Allowed methods are \code{smooth}, the default, (\link{smooth}), \code{smooth.spline} (\link{smooth.spline}) and \code{Pych}. See details for further information.} \item{method.Pych.smoothing}{\link{integer} (\emph{with default}): Smoothing parameter for cosmic ray removal according to Pych (2003). The value defines how many neighbouring values in each frame are used for smoothing (e.g., \code{2} means that the two previous and two following values are used).} \item{method.Pych.threshold_factor}{\link{numeric} (\emph{with default}): Threshold for zero-bins in the histogram. Small values mean that more peaks are removed, but signal might be also affected by this removal.} \item{MARGIN}{\link{integer} (\emph{with default}): on which part the function cosmic ray removal should be applied on: \itemize{ \item 1 = along the time axis (line by line), \item 2 = along the wavelength axis (column by column). } \strong{Note:} This argument currently only affects the methods \code{smooth} and \code{smooth.spline}} \item{verbose}{\link{logical} (\emph{with default}): Option to suppress terminal output.,} \item{plot}{\link{logical} (\emph{with default}): If \code{TRUE} the histograms used for the cosmic-ray removal are returned as plot including the used threshold. Note: A separate plot is returned for each frame! Currently only for \code{method = "Pych"} a graphical output is provided.} \item{...}{further arguments and graphical parameters that will be passed to the \link{smooth} function.} } \value{ Returns same object as input. } \description{ The function provides several methods for cosmic-ray removal and spectrum smoothing \linkS4class{RLum.Data.Spectrum} objects and such objects embedded in \link{list} or \linkS4class{RLum.Analysis} objects. } \details{ \strong{\code{method = "Pych"}} This method applies the cosmic-ray removal algorithm described by Pych (2003). Some aspects that are different to the publication: \itemize{ \item For interpolation between neighbouring values the median and not the mean is used. \item The number of breaks to construct the histogram is set to: \code{length(number.of.input.values)/2} } For further details see references below. \strong{\code{method = "smooth"}} Method uses the function \link{smooth} to remove cosmic rays. Arguments that can be passed are: \code{kind}, \code{twiceit} \strong{\code{method = "smooth.spline"}} Method uses the function \link{smooth.spline} to remove cosmic rays. Arguments that can be passed are: \code{spar} \strong{How to combine methods?} Different methods can be combined by applying the method repeatedly to the dataset (see example). } \section{Function version}{ 0.3.0 } \examples{ ##(1) - use with your own data and combine (uncomment for usage) ## run two times the default method and smooth with another method ## your.spectrum <- apply_CosmicRayRemoval(your.spectrum, method = "Pych") ## your.spectrum <- apply_CosmicRayRemoval(your.spectrum, method = "Pych") ## your.spectrum <- apply_CosmicRayRemoval(your.spectrum, method = "smooth") } \section{How to cite}{ Kreutzer, S., 2023. apply_CosmicRayRemoval(): Function to remove cosmic rays from an RLum.Data.Spectrum S4 class object. Function version 0.3.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Pych, W., 2004. A Fast Algorithm for Cosmic-Ray Removal from Single Images. The Astronomical Society of the Pacific 116 (816), 148-153. \doi{10.1086/381786} } \seealso{ \linkS4class{RLum.Data.Spectrum}, \linkS4class{RLum.Analysis}, \link{smooth}, \link{smooth.spline}, \link{apply_CosmicRayRemoval} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \keyword{manip} Luminescence/man/analyse_baSAR.Rd0000644000176200001440000005264514521210044016377 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/analyse_baSAR.R \name{analyse_baSAR} \alias{analyse_baSAR} \title{Bayesian models (baSAR) applied on luminescence data} \usage{ analyse_baSAR( object, XLS_file = NULL, aliquot_range = NULL, source_doserate = NULL, signal.integral, signal.integral.Tx = NULL, background.integral, background.integral.Tx = NULL, irradiation_times = NULL, sigmab = 0, sig0 = 0.025, distribution = "cauchy", baSAR_model = NULL, n.MCMC = 1e+05, fit.method = "EXP", fit.force_through_origin = TRUE, fit.includingRepeatedRegPoints = TRUE, method_control = list(), digits = 3L, distribution_plot = "kde", plot = TRUE, plot_reduced = TRUE, plot.single = FALSE, verbose = TRUE, ... ) } \arguments{ \item{object}{\linkS4class{Risoe.BINfileData}, \linkS4class{RLum.Results}, \link{list} of \linkS4class{RLum.Analysis}, \link{character} or \link{list} (\strong{required}): input object used for the Bayesian analysis. If a \code{character} is provided the function assumes a file connection and tries to import a BIN/BINX-file using the provided path. If a \code{list} is provided the list can only contain either \code{Risoe.BINfileData} objects or \code{character}s providing a file connection. Mixing of both types is not allowed. If an \linkS4class{RLum.Results} is provided the function directly starts with the Bayesian Analysis (see details)} \item{XLS_file}{\link{character} (\emph{optional}): XLS_file with data for the analysis. This file must contain 3 columns: the name of the file, the disc position and the grain position (the last being 0 for multi-grain measurements).\cr Alternatively a \code{data.frame} of similar structure can be provided.} \item{aliquot_range}{\link{numeric} (\emph{optional}): allows to limit the range of the aliquots used for the analysis. This argument has only an effect if the argument \code{XLS_file} is used or the input is the previous output (i.e. is \linkS4class{RLum.Results}). In this case the new selection will add the aliquots to the removed aliquots table.} \item{source_doserate}{\link{numeric} \strong{(required)}: source dose rate of beta-source used for the measurement and its uncertainty in Gy/s, e.g., \code{source_doserate = c(0.12, 0.04)}. Parameter can be provided as \code{list}, for the case that more than one BIN-file is provided, e.g., \code{source_doserate = list(c(0.04, 0.004), c(0.05, 0.004))}.} \item{signal.integral}{\link{vector} (\strong{required}): vector with the limits for the signal integral used for the calculation, e.g., \code{signal.integral = c(1:5)}. Ignored if \code{object} is an \linkS4class{RLum.Results} object. The parameter can be provided as \code{list}, see \code{source_doserate}.} \item{signal.integral.Tx}{\link{vector} (\emph{optional}): vector with the limits for the signal integral for the Tx curve. I f nothing is provided the value from \code{signal.integral} is used and it is ignored if \code{object} is an \linkS4class{RLum.Results} object. The parameter can be provided as \code{list}, see \code{source_doserate}.} \item{background.integral}{\link{vector} (\strong{required}): vector with the bounds for the background integral. Ignored if \code{object} is an \linkS4class{RLum.Results} object. The parameter can be provided as \code{list}, see \code{source_doserate}.} \item{background.integral.Tx}{\link{vector} (\emph{optional}): vector with the limits for the background integral for the Tx curve. If nothing is provided the value from \code{background.integral} is used. Ignored if \code{object} is an \linkS4class{RLum.Results} object. The parameter can be provided as \code{list}, see \code{source_doserate}.} \item{irradiation_times}{\link{numeric} (\emph{optional}): if set this vector replaces all irradiation times for one aliquot and one cycle (Lx and Tx curves) and recycles it for all others cycles and aliquots. Please note that if this argument is used, for every(!) single curve in the dataset an irradiation time needs to be set.} \item{sigmab}{\link{numeric} (\emph{with default}): option to set a manual value for the overdispersion (for \code{LnTx} and \code{TnTx}), used for the \code{Lx}/\code{Tx} error calculation. The value should be provided as absolute squared count values, cf. \link{calc_OSLLxTxRatio}. The parameter can be provided as \code{list}, see \code{source_doserate}.} \item{sig0}{\link{numeric} (\emph{with default}): allow adding an extra component of error to the final Lx/Tx error value (e.g., instrumental error, see details is \link{calc_OSLLxTxRatio}). The parameter can be provided as \code{list}, see \code{source_doserate}.} \item{distribution}{\link{character} (\emph{with default}): type of distribution that is used during Bayesian calculations for determining the Central dose and overdispersion values. Allowed inputs are \code{"cauchy"}, \code{"normal"} and \code{"log_normal"}.} \item{baSAR_model}{\link{character} (\emph{optional}): option to provide an own modified or new model for the Bayesian calculation (see details). If an own model is provided the argument \code{distribution} is ignored and set to \code{'user_defined'}} \item{n.MCMC}{\link{integer} (\emph{with default}): number of iterations for the Markov chain Monte Carlo (MCMC) simulations} \item{fit.method}{\link{character} (\emph{with default}): equation used for the fitting of the dose-response curve using the function \link{plot_GrowthCurve} and then for the Bayesian modelling. Here supported methods: \code{EXP}, \code{EXP+LIN} and \code{LIN}} \item{fit.force_through_origin}{\link{logical} (\emph{with default}): force fitting through origin} \item{fit.includingRepeatedRegPoints}{\link{logical} (\emph{with default}): includes the recycling point (assumed to be measured during the last cycle)} \item{method_control}{\link{list} (\emph{optional}): named list of control parameters that can be directly passed to the Bayesian analysis, e.g., \code{method_control = list(n.chains = 4)}. See details for further information} \item{digits}{\link{integer} (\emph{with default}): round output to the number of given digits} \item{distribution_plot}{\link{character} (\emph{with default}): sets the final distribution plot that shows equivalent doses obtained using the frequentist approach and sets in the central dose as comparison obtained using baSAR. Allowed input is \code{'abanico'} or \code{'kde'}. If set to \code{NULL} nothing is plotted.} \item{plot}{\link{logical} (\emph{with default}): enables or disables plot output} \item{plot_reduced}{\link{logical} (\emph{with default}): enables or disables the advanced plot output} \item{plot.single}{\link{logical} (\emph{with default}): enables or disables single plots or plots arranged by \code{analyse_baSAR}} \item{verbose}{\link{logical} (\emph{with default}): enables or disables verbose mode} \item{...}{parameters that can be passed to the function \link{calc_OSLLxTxRatio} (almost full support), \link[readxl:read_excel]{readxl::read_excel} (full support), \link{read_BIN2R} (\code{n.records}, \code{position}, \code{duplicated.rm}), see details.} } \value{ Function returns results numerically and graphically: -----------------------------------\cr \verb{[ NUMERICAL OUTPUT ]}\cr -----------------------------------\cr \strong{\code{RLum.Results}}-object \strong{slot:} \strong{\verb{@data}} \tabular{lll}{ \strong{Element} \tab \strong{Type} \tab \strong{Description}\cr \verb{$summary} \tab \code{data.frame} \tab statistical summary, including the central dose \cr \verb{$mcmc} \tab \code{mcmc} \tab \link[coda:mcmc.list]{coda::mcmc.list} object including raw output \cr \verb{$models} \tab \code{character} \tab implemented models used in the baSAR-model core \cr \verb{$input_object} \tab \code{data.frame} \tab summarising table (same format as the XLS-file) including, e.g., Lx/Tx values\cr \verb{$removed_aliquots} \tab \code{data.frame} \tab table with removed aliquots (e.g., \code{NaN}, or \code{Inf} \code{Lx}/\code{Tx} values). If nothing was removed \code{NULL} is returned } \strong{slot:} \strong{\verb{@info}} The original function call ------------------------\cr \verb{[ PLOT OUTPUT ]}\cr ------------------------\cr \itemize{ \item (A) Ln/Tn curves with set integration limits, \item (B) trace plots are returned by the baSAR-model, showing the convergence of the parameters (trace) and the resulting kernel density plots. If \code{plot_reduced = FALSE} for every(!) dose a trace and a density plot is returned (this may take a long time), \item (C) dose plots showing the dose for every aliquot as boxplots and the marked HPD in within. If boxes are coloured 'orange' or 'red' the aliquot itself should be checked, \item (D) the dose response curve resulting from the monitoring of the Bayesian modelling are provided along with the Lx/Tx values and the HPD. Note: The amount for curves displayed is limited to 1000 (random choice) for performance reasons, \item (E) the final plot is the De distribution as calculated using the conventional (frequentist) approach and the central dose with the HPDs marked within. This figure is only provided for a comparison, no further statistical conclusion should be drawn from it. } \strong{Please note: If distribution was set to \code{log_normal} the central dose is given as geometric mean!} } \description{ This function allows the application of Bayesian models on luminescence data, measured with the single-aliquot regenerative-dose (SAR, Murray and Wintle, 2000) protocol. In particular, it follows the idea proposed by Combès et al., 2015 of using an hierarchical model for estimating a central equivalent dose from a set of luminescence measurements. This function is (I) the adoption of this approach for the R environment and (II) an extension and a technical refinement of the published code. } \details{ Internally the function consists of two parts: (I) The Bayesian core for the Bayesian calculations and applying the hierarchical model and (II) a data pre-processing part. The Bayesian core can be run independently, if the input data are sufficient (see below). The data pre-processing part was implemented to simplify the analysis for the user as all needed data pre-processing is done by the function, i.e. in theory it is enough to provide a BIN/BINX-file with the SAR measurement data. For the Bayesian analysis for each aliquot the following information are needed from the SAR analysis. \code{LxTx}, the \code{LxTx} error and the dose values for all regeneration points. \strong{How the systematic error contribution is calculated?} Standard errors (so far) provided with the source dose rate are considered as systematic uncertainties and added to final central dose by: \deqn{systematic.error = 1/n \sum SE(source.doserate)} \deqn{SE(central.dose.final) = \sqrt{SE(central.dose)^2 + systematic.error^2}} Please note that this approach is rather rough and can only be valid if the source dose rate errors, in case different readers had been used, are similar. In cases where more than one source dose rate is provided a warning is given. \strong{Input / output scenarios} Various inputs are allowed for this function. Unfortunately this makes the function handling rather complex, but at the same time very powerful. Available scenarios: \strong{(1) - \code{object} is BIN-file or link to a BIN-file} Finally it does not matter how the information of the BIN/BINX file are provided. The function supports \strong{(a)} either a path to a file or directory or a \code{list} of file names or paths or \strong{(b)} a \linkS4class{Risoe.BINfileData} object or a list of these objects. The latter one can be produced by using the function \link{read_BIN2R}, but this function is called automatically if only a file name and/or a path is provided. In both cases it will become the data that can be used for the analysis. \verb{[XLS_file = NULL]} If no XLS file (or data frame with the same format) is provided the functions runs an automatic process that consists of the following steps: \enumerate{ \item Select all valid aliquots using the function \link{verify_SingleGrainData} \item Calculate \code{Lx/Tx} values using the function \link{calc_OSLLxTxRatio} \item Calculate De values using the function \link{plot_GrowthCurve} } These proceeded data are subsequently used in for the Bayesian analysis \verb{[XLS_file != NULL]} If an XLS-file is provided or a \code{data.frame} providing similar information the pre-processing steps consists of the following steps: \enumerate{ \item Calculate \code{Lx/Tx} values using the function \link{calc_OSLLxTxRatio} \item Calculate De values using the function \link{plot_GrowthCurve} } Means, the XLS file should contain a selection of the BIN-file names and the aliquots selected for the further analysis. This allows a manual selection of input data, as the automatic selection by \link{verify_SingleGrainData} might be not totally sufficient. \strong{(2) - \code{object} \verb{RLum.Results object}} If an \linkS4class{RLum.Results} object is provided as input and(!) this object was previously created by the function \code{analyse_baSAR()} itself, the pre-processing part is skipped and the function starts directly the Bayesian analysis. This option is very powerful as it allows to change parameters for the Bayesian analysis without the need to repeat the data pre-processing. If furthermore the argument \code{aliquot_range} is set, aliquots can be manually excluded based on previous runs. \strong{\code{method_control}} These are arguments that can be passed directly to the Bayesian calculation core, supported arguments are: \tabular{lll}{ \strong{Parameter} \tab \strong{Type} \tab \strong{Description}\cr \code{lower_centralD} \tab \link{numeric} \tab sets the lower bound for the expected De range. Change it only if you know what you are doing!\cr \code{upper_centralD} \tab \link{numeric} \tab sets the upper bound for the expected De range. Change it only if you know what you are doing!\cr \code{n.chains} \tab \link{integer} \tab sets number of parallel chains for the model (default = 3) (cf. \link[rjags:jags.model]{rjags::jags.model})\cr \code{inits} \tab \link{list} \tab option to set initialisation values (cf. \link[rjags:jags.model]{rjags::jags.model}) \cr \code{thin} \tab \link{numeric} \tab thinning interval for monitoring the Bayesian process (cf. \link[rjags:jags.model]{rjags::jags.model})\cr \code{variable.names} \tab \link{character} \tab set the variables to be monitored during the MCMC run, default: \code{'central_D'}, \code{'sigma_D'}, \code{'D'}, \code{'Q'}, \code{'a'}, \code{'b'}, \code{'c'}, \code{'g'}. Note: only variables present in the model can be monitored. } \strong{User defined models}\cr The function provides the option to modify and to define own models that can be used for the Bayesian calculation. In the case the user wants to modify a model, a new model can be piped into the function via the argument \code{baSAR_model} as \code{character}. The model has to be provided in the JAGS dialect of the BUGS language (cf. \link[rjags:jags.model]{rjags::jags.model}) and parameter names given with the pre-defined names have to be respected, otherwise the function will break. \strong{FAQ} Q: How can I set the seed for the random number generator (RNG)? A: Use the argument \code{method_control}, e.g., for three MCMC chains (as it is the default): \if{html}{\out{
}}\preformatted{method_control = list( inits = list( list(.RNG.name = "base::Wichmann-Hill", .RNG.seed = 1), list(.RNG.name = "base::Wichmann-Hill", .RNG.seed = 2), list(.RNG.name = "base::Wichmann-Hill", .RNG.seed = 3) )) }\if{html}{\out{
}} This sets a reproducible set for every chain separately.\cr Q: How can I modify the output plots? A: You can't, but you can use the function output to create own, modified plots. Q: Can I change the boundaries for the central_D? A: Yes, we made it possible, but we DO NOT recommend it, except you know what you are doing!\cr Example: \verb{method_control = list(lower_centralD = 10))} Q: The lines in the baSAR-model appear to be in a wrong logical order?\cr A: This is correct and allowed (cf. JAGS manual) \strong{Additional arguments support via the \code{...} argument} This list summarizes the additional arguments that can be passed to the internally used functions. \tabular{llll}{ \strong{Supported argument} \tab \strong{Corresponding function} \tab \strong{Default} \tab **Short description **\cr \code{threshold} \tab \link{verify_SingleGrainData} \tab \code{30} \tab change rejection threshold for curve selection \cr \code{sheet} \tab \link[readxl:read_excel]{readxl::read_excel} \tab \code{1} \tab select XLS-sheet for import\cr \code{col_names} \tab \link[readxl:read_excel]{readxl::read_excel} \tab \code{TRUE} \tab first row in XLS-file is header\cr \code{col_types} \tab \link[readxl:read_excel]{readxl::read_excel} \tab \code{NULL} \tab limit import to specific columns\cr \code{skip} \tab \link[readxl:read_excel]{readxl::read_excel} \tab \code{0} \tab number of rows to be skipped during import\cr \code{n.records} \tab \link{read_BIN2R} \tab \code{NULL} \tab limit records during BIN-file import\cr \code{duplicated.rm} \tab \link{read_BIN2R} \tab \code{TRUE} \tab remove duplicated records in the BIN-file\cr \code{pattern} \tab \link{read_BIN2R} \tab \code{TRUE} \tab select BIN-file by name pattern\cr \code{position} \tab \link{read_BIN2R} \tab \code{NULL} \tab limit import to a specific position\cr \code{background.count.distribution} \tab \link{calc_OSLLxTxRatio} \tab \code{"non-poisson"} \tab set assumed count distribution\cr \code{fit.weights} \tab \link{plot_GrowthCurve} \tab \code{TRUE} \tab enables / disables fit weights\cr \code{fit.bounds} \tab \link{plot_GrowthCurve} \tab \code{TRUE} \tab enables / disables fit bounds\cr \code{NumberIterations.MC} \tab \link{plot_GrowthCurve} \tab \code{100} \tab number of MC runs for error calculation\cr \code{output.plot} \tab \link{plot_GrowthCurve} \tab \code{TRUE} \tab enables / disables dose response curve plot\cr \code{output.plotExtended} \tab \link{plot_GrowthCurve} \tab \code{TRUE} \tab enables / disables extended dose response curve plot\cr } } \note{ \strong{If you provide more than one BIN-file}, it is \strong{strongly} recommended to provide a \code{list} with the same number of elements for the following parameters: \code{source_doserate}, \code{signal.integral}, \code{signal.integral.Tx}, \code{background.integral}, \code{background.integral.Tx}, \code{sigmab}, \code{sig0}. Example for two BIN-files: \code{source_doserate = list(c(0.04, 0.006), c(0.05, 0.006))} \strong{The function is currently limited to work with standard Risoe BIN-files only!} } \section{Function version}{ 0.1.33 } \examples{ ##(1) load package test data set data(ExampleData.BINfileData, envir = environment()) ##(2) selecting relevant curves, and limit dataset CWOSL.SAR.Data <- subset( CWOSL.SAR.Data, subset = POSITION\%in\%c(1:3) & LTYPE == "OSL") \dontrun{ ##(3) run analysis ##please not that the here selected parameters are ##choosen for performance, not for reliability results <- analyse_baSAR( object = CWOSL.SAR.Data, source_doserate = c(0.04, 0.001), signal.integral = c(1:2), background.integral = c(80:100), fit.method = "LIN", plot = FALSE, n.MCMC = 200 ) print(results) ##XLS_file template ##copy and paste this the code below in the terminal ##you can further use the function write.csv() to export the example XLS_file <- structure( list( BIN_FILE = NA_character_, DISC = NA_real_, GRAIN = NA_real_), .Names = c("BIN_FILE", "DISC", "GRAIN"), class = "data.frame", row.names = 1L ) } } \section{How to cite}{ Mercier, N., Kreutzer, S., 2023. analyse_baSAR(): Bayesian models (baSAR) applied on luminescence data. Function version 0.1.33. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Combès, B., Philippe, A., Lanos, P., Mercier, N., Tribolo, C., Guerin, G., Guibert, P., Lahaye, C., 2015. A Bayesian central equivalent dose model for optically stimulated luminescence dating. Quaternary Geochronology 28, 62-70. doi:10.1016/j.quageo.2015.04.001 Mercier, N., Kreutzer, S., Christophe, C., Guerin, G., Guibert, P., Lahaye, C., Lanos, P., Philippe, A., Tribolo, C., 2016. Bayesian statistics in luminescence dating: The 'baSAR'-model and its implementation in the R package 'Luminescence'. Ancient TL 34, 14-21. \strong{Further reading} Gelman, A., Carlin, J.B., Stern, H.S., Dunson, D.B., Vehtari, A., Rubin, D.B., 2013. Bayesian Data Analysis, Third Edition. CRC Press. Murray, A.S., Wintle, A.G., 2000. Luminescence dating of quartz using an improved single-aliquot regenerative-dose protocol. Radiation Measurements 32, 57-73. doi:10.1016/S1350-4487(99)00253-X Plummer, M., 2017. JAGS Version 4.3.0 user manual. \verb{https://sourceforge.net/projects/mcmc-jags/files/Manuals/4.x/jags_user_manual.pdf/download} } \seealso{ \link{read_BIN2R}, \link{calc_OSLLxTxRatio}, \link{plot_GrowthCurve}, \link[readxl:read_excel]{readxl::read_excel}, \link{verify_SingleGrainData}, \link[rjags:jags.model]{rjags::jags.model}, \link[rjags:coda.samples]{rjags::coda.samples}, \link{boxplot.default} } \author{ Norbert Mercier, IRAMAT-CRP2A, Université Bordeaux Montaigne (France) \cr Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) \cr The underlying Bayesian model based on a contribution by Combès et al., 2015. , RLum Developer Team} \keyword{datagen} Luminescence/man/calc_Kars2008.Rd0000644000176200001440000001031114521210045016110 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calc_Kars2008.R \name{calc_Kars2008} \alias{calc_Kars2008} \title{Apply the Kars et al. (2008) model (deprecated)} \usage{ calc_Kars2008(fit.method = "EXP", ...) } \arguments{ \item{fit.method}{\link{character} (\emph{with default}): Fit function of the dose response curve. Can either be \code{EXP} (the default) or \code{GOK}. Note that \code{EXP} (single saturating exponential) is the original function the model after Huntley (2006) and Kars et al. (2008) was designed to use. The use of a general-order kinetics function (\code{GOK}) is an experimental adaption of the model and should only be used with great care.} \item{...}{Parameters passed to \link{calc_Huntley2006}.} } \value{ An \linkS4class{RLum.Results} object is returned: } \description{ A function to calculate the expected sample specific fraction of saturation following Kars et al. (2008) and Huntley (2006). This function is deprecated and will eventually be removed. Please use \code{calc_Huntley2006()} instead. } \details{ This function applies the approach described in Kars et al. (2008), developed from the model of Huntley (2006) to calculate the expected sample specific fraction of saturation of a feldspar and also to calculate fading corrected age using this model. \eqn{\rho}' (\code{rhop}), the density of recombination centres, is a crucial parameter of this model and must be determined separately from a fading measurement. The function \link{analyse_FadingMeasurement} can be used to calculate the sample specific \eqn{\rho}' value. } \note{ \strong{This function is deprecated and will eventually be removed from the package.} \strong{Please use the function \code{\link[=calc_Huntley2006]{calc_Huntley2006()}} instead} \strong{(use \code{fit.method = "EXP"} to apply the model after Kars et al., 2008).} } \section{Function version}{ 0.4.0 } \examples{ ## Load example data (sample UNIL/NB123, see ?ExampleData.Fading) data("ExampleData.Fading", envir = environment()) ## (1) Set all relevant parameters # a. fading measurement data (IR50) fading_data <- ExampleData.Fading$fading.data$IR50 # b. Dose response curve data data <- ExampleData.Fading$equivalentDose.data$IR50 ## (2) Define required function parameters ddot <- c(7.00, 0.004) readerDdot <- c(0.134, 0.0067) # Analyse fading measurement and get an estimate of rho'. # Note that the RLum.Results object can be directly used for further processing. # The number of MC runs is reduced for this example rhop <- analyse_FadingMeasurement(fading_data, plot = TRUE, verbose = FALSE, n.MC = 10) ## (3) Apply the Kars et al. (2008) model to the data kars <- suppressWarnings( calc_Kars2008(data = data, rhop = rhop, ddot = ddot, readerDdot = readerDdot, n.MC = 25) ) } \section{How to cite}{ King, G.E., Burow, C., 2023. calc_Kars2008(): Apply the Kars et al. (2008) model (deprecated). Function version 0.4.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Kars, R.H., Wallinga, J., Cohen, K.M., 2008. A new approach towards anomalous fading correction for feldspar IRSL dating-tests on samples in field saturation. Radiation Measurements 43, 786-790. doi:10.1016/j.radmeas.2008.01.021 Huntley, D.J., 2006. An explanation of the power-law decay of luminescence. Journal of Physics: Condensed Matter 18, 1359-1365. doi:10.1088/0953-8984/18/4/020 King, G.E., Herman, F., Lambert, R., Valla, P.G., Guralnik, B., 2016. Multi-OSL-thermochronometry of feldspar. Quaternary Geochronology 33, 76-87. doi:10.1016/j.quageo.2016.01.004 \strong{Further reading} Morthekai, P., Jain, M., Cunha, P.P., Azevedo, J.M., Singhvi, A.K., 2011. An attempt to correct for the fading in million year old basaltic rocks. Geochronometria 38(3), 223-230. } \author{ Georgina E. King, University of Bern (Switzerland) \cr Christoph Burow, University of Cologne (Germany) , RLum Developer Team} \keyword{datagen} Luminescence/man/read_RF2R.Rd0000644000176200001440000000371014521210045015427 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/read_RF2R.R \name{read_RF2R} \alias{read_RF2R} \title{Import RF-files to R} \usage{ read_RF2R(file) } \arguments{ \item{file}{\link{character} (\strong{required}): path and file name of the RF file. Alternatively a list of file names can be provided.} } \value{ Returns an S4 \linkS4class{RLum.Analysis} object containing \linkS4class{RLum.Data.Curve} objects for each curve. } \description{ Import files produced by the IR-RF 'ImageJ' macro (\code{SR-RF.ijm}; Mittelstraß and Kreutzer, 2021) into R and create a list of \linkS4class{RLum.Analysis} objects } \details{ The results of spatially resolved IR-RF data are summarised in so-called RF-files ((Mittelstraß and Kreutzer, 2021). This functions provides an easy import to process the data seamlessly with the R package 'Luminescence'. The output of the function can be passed to the function \link{analyse_IRSAR.RF} } \section{Function version}{ 0.1.0 } \examples{ ##Import file <- system.file("extdata", "RF_file.rf", package = "Luminescence") temp <- read_RF2R(file) } \section{How to cite}{ Kreutzer, S., 2023. read_RF2R(): Import RF-files to R. Function version 0.1.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Mittelstraß, D., Kreutzer, S., 2021. Spatially resolved infrared radiofluorescence: single-grain K-feldspar dating using CCD imaging. Geochronology 3, 299–319. \doi{10.5194/gchron-3-299-2021} } \seealso{ \linkS4class{RLum.Analysis}, \linkS4class{RLum.Data.Curve}, \link{analyse_IRSAR.RF} } \author{ Sebastian Kreutzer, Geography & Earth Science, Aberystwyth University (United Kingdom) , RLum Developer Team} \keyword{IO} Luminescence/man/write_R2TIFF.Rd0000644000176200001440000000405414521210045016071 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/write_R2TIFF.R \name{write_R2TIFF} \alias{write_R2TIFF} \title{Export RLum.Data.Image and RLum.Data.Spectrum objects to TIFF Images} \usage{ write_R2TIFF(object, file = tempfile(), norm = 65535, ...) } \arguments{ \item{object}{\linkS4class{RLum.Data.Image} or \linkS4class{RLum.Data.Spectrum} object (\strong{required}): input object, can be a \link{list} of such objects} \item{file}{\link{character} (\strong{required}): the file name and path} \item{norm}{\link{numeric} (\emph{with default}): normalisation values. Values in TIFF files must range between 0-1, however, usually in imaging applications the pixel values are real integer count values. The normalisation to the to the highest 16-bit integer values -1 ensures that the numerical values are retained in the exported image. If \code{1} nothing is normalised.} \item{...}{further arguments to be passed to \link[tiff:writeTIFF]{tiff::writeTIFF}.} } \value{ A TIFF file } \description{ Simple wrapper around \link[tiff:writeTIFF]{tiff::writeTIFF} to export suitable RLum-class objects to TIFF images. Per default 16-bit TIFF files are exported. } \section{Function version}{ 0.1.0 } \examples{ data(ExampleData.RLum.Data.Image, envir = environment()) write_R2TIFF(ExampleData.RLum.Data.Image, file = tempfile()) } \seealso{ \link[tiff:writeTIFF]{tiff::writeTIFF}, \linkS4class{RLum.Data.Image}, \linkS4class{RLum.Data.Spectrum} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. write_R2TIFF(): Export RLum.Data.Image and RLum.Data.Spectrum objects to TIFF Images. Function version 0.1.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{IO} Luminescence/man/extract_IrradiationTimes.Rd0000644000176200001440000001522514521210045020726 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/extract_IrradiationTimes.R \name{extract_IrradiationTimes} \alias{extract_IrradiationTimes} \title{Extract Irradiation Times from an XSYG-file} \usage{ extract_IrradiationTimes( object, file.BINX, recordType = c("irradiation (NA)", "IRSL (UVVIS)", "OSL (UVVIS)", "TL (UVVIS)"), compatibility.mode = TRUE, txtProgressBar = TRUE ) } \arguments{ \item{object}{\link{character}, \linkS4class{RLum.Analysis} or \link{list} (\strong{required}): path and file name of the XSYG file or an \linkS4class{RLum.Analysis} produced by the function \link{read_XSYG2R}; alternatively a \code{list} of \linkS4class{RLum.Analysis} can be provided. \strong{Note}: If an \linkS4class{RLum.Analysis} is used, any input for the arguments \code{file.BINX} and \code{recordType} will be ignored!} \item{file.BINX}{\link{character} (\emph{optional}): path and file name of an existing BINX-file. If a file name is provided the file will be updated with the information from the XSYG file in the same folder as the original BINX-file. \strong{Note:} The XSYG and the BINX-file have to be originate from the same measurement!} \item{recordType}{\link{character} (\emph{with default}): select relevant curves types from the XSYG file or \linkS4class{RLum.Analysis} object. As the XSYG-file format comprises much more information than usually needed for routine data analysis and allowed in the BINX-file format, only the relevant curves are selected by using the function \link{get_RLum}. The argument \code{recordType} works as described for this function. \strong{Note:} A wrong selection will causes a function error. Please change this argument only if you have reasons to do so.} \item{compatibility.mode}{\link{logical} (\emph{with default}): this option is parsed only if a BIN/BINX file is produced and it will reset all position values to a max. value of 48, cf.\link{write_R2BIN}} \item{txtProgressBar}{\link{logical} (\emph{with default}): enables \code{TRUE} or disables \code{FALSE} the progression bars during import and export} } \value{ An \linkS4class{RLum.Results} object is returned with the following structure: \if{html}{\out{
}}\preformatted{.. $irr.times (data.frame) }\if{html}{\out{
}} If a BINX-file path and name is set, the output will be additionally transferred into a new BINX-file with the function name as suffix. For the output the path of the input BINX-file itself is used. Note that this will not work if the input object is a file path to an XSYG-file, instead of a link to only one file. In this case the argument input for \code{file.BINX} is ignored. In the self call mode (input is a \code{list} of \linkS4class{RLum.Analysis} objects a list of \linkS4class{RLum.Results} is returned. } \description{ Extracts irradiation times, dose and times since last irradiation, from a Freiberg Instruments XSYG-file. These information can be further used to update an existing BINX-file. } \details{ The function was written to compensate missing information in the BINX-file output of Freiberg Instruments lexsyg readers. As all information are available within the XSYG-file anyway, these information can be extracted and used for further analysis or/and to stored in a new BINX-file, which can be further used by other software, e.g., Analyst (Geoff Duller). Typical application example: g-value estimation from fading measurements using the Analyst or any other self written script. Beside the some simple data transformation steps the function applies the functions \link{read_XSYG2R}, \link{read_BIN2R}, \link{write_R2BIN} for data import and export. } \note{ The function can be also used to extract irradiation times from \linkS4class{RLum.Analysis} objects previously imported via \link{read_BIN2R} (\code{fastForward = TRUE}) or in combination with \link{Risoe.BINfileData2RLum.Analysis}. Unfortunately the timestamp might not be very precise (or even invalid), but it allows to essentially treat different formats in a similar manner. The produced output object contains still the irradiation steps to keep the output transparent. However, for the BINX-file export this steps are removed as the BINX-file format description does not allow irradiations as separate sequences steps. \strong{BINX-file 'Time Since Irradiation' value differs from the table output?} The way the value 'Time Since Irradiation' is defined differs. In the BINX-file the 'Time Since Irradiation' is calculated as the 'Time Since Irradiation' plus the 'Irradiation Time'. The table output returns only the real 'Time Since Irradiation', i.e. time between the end of the irradiation and the next step. \strong{Negative values for \code{TIMESINCELAS.STEP}?} Yes, this is possible and no bug, as in the XSYG-file multiple curves are stored for one step. Example: TL step may comprise three curves: \itemize{ \item (a) counts vs. time, \item (b) measured temperature vs. time and \item (c) predefined temperature vs. time. } Three curves, but they are all belonging to one TL measurement step, but with regard to the time stamps this could produce negative values as the important function (\link{read_XSYG2R}) do not change the order of entries for one step towards a correct time order. } \section{Function version}{ 0.3.3 } \examples{ ## (1) - example for your own data ## ## set files and run function # # file.XSYG <- file.choose() # file.BINX <- file.choose() # # output <- extract_IrradiationTimes(file.XSYG = file.XSYG, file.BINX = file.BINX) # get_RLum(output) # ## export results additionally to a CSV.file in the same directory as the XSYG-file # write.table(x = get_RLum(output), # file = paste0(file.BINX,"_extract_IrradiationTimes.csv"), # sep = ";", # row.names = FALSE) } \section{How to cite}{ Kreutzer, S., 2023. extract_IrradiationTimes(): Extract Irradiation Times from an XSYG-file. Function version 0.3.3. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Duller, G.A.T., 2015. The Analyst software package for luminescence data: overview and recent improvements. Ancient TL 33, 35-42. } \seealso{ \linkS4class{RLum.Analysis}, \linkS4class{RLum.Results}, \linkS4class{Risoe.BINfileData}, \link{read_XSYG2R}, \link{read_BIN2R}, \link{write_R2BIN} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \keyword{IO} \keyword{manip} Luminescence/man/calc_MinDose.Rd0000644000176200001440000003606014521210045016245 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calc_MinDose.R \name{calc_MinDose} \alias{calc_MinDose} \title{Apply the (un-)logged minimum age model (MAM) after Galbraith et al. (1999) to a given De distribution} \usage{ calc_MinDose( data, sigmab, log = TRUE, par = 3, bootstrap = FALSE, init.values, level = 0.95, log.output = FALSE, plot = TRUE, multicore = FALSE, ... ) } \arguments{ \item{data}{\linkS4class{RLum.Results} or \link{data.frame} (\strong{required}): for \link{data.frame}: two columns with De \code{(data[ ,1])} and De error \code{(data[ ,2])}.} \item{sigmab}{\link{numeric} (\strong{required}): additional spread in De values. This value represents the expected overdispersion in the data should the sample be well-bleached (Cunningham & Walling 2012, p. 100). \strong{NOTE}: For the logged model (\code{log = TRUE}) this value must be a fraction, e.g. 0.2 (= 20 \\%). If the un-logged model is used (\code{log = FALSE}), sigmab must be provided in the same absolute units of the De values (seconds or Gray). See details.} \item{log}{\link{logical} (\emph{with default}): fit the (un-)logged minimum dose model to De data.} \item{par}{\link{numeric} (\emph{with default}): apply the 3- or 4-parameter minimum age model (\code{par=3} or \code{par=4}). The MAM-3 is used by default.} \item{bootstrap}{\link{logical} (\emph{with default}): apply the recycled bootstrap approach of Cunningham & Wallinga (2012).} \item{init.values}{\link{numeric} (\emph{optional}): a named list with starting values for gamma, sigma, p0 and mu (e.g. \code{list(gamma=100, sigma=1.5, p0=0.1, mu=100)}). If no values are provided reasonable values are tried to be estimated from the data. \strong{NOTE} that the initial values must always be given in the absolute units. The the logged model is applied (\code{log = TRUE}), the provided \code{init.values} are automatically log transformed.} \item{level}{\link{logical} (\emph{with default}): the confidence level required (defaults to 0.95).} \item{log.output}{\link{logical} (\emph{with default}): If \code{TRUE} the console output will also show the logged values of the final parameter estimates and confidence intervals (only applicable if \code{log = TRUE}).} \item{plot}{\link{logical} (\emph{with default}): plot output (\code{TRUE}/\code{FALSE})} \item{multicore}{\link{logical} (\emph{with default}): enable parallel computation of the bootstrap by creating a multicore SNOW cluster. Depending on the number of available logical CPU cores this may drastically reduce the computation time. Note that this option is highly experimental and may not work on all machines. (\code{TRUE}/\code{FALSE})} \item{...}{(\emph{optional}) further arguments for bootstrapping (\verb{bs.M, bs.N, bs.h, sigmab.sd}). See details for their usage. Further arguments are \itemize{ \item \code{verbose} to de-/activate console output (logical), \item \code{debug} for extended console output (logical) and \item \code{cores} (integer) to manually specify the number of cores to be used when \code{multicore=TRUE}. }} } \value{ Returns a plot (\emph{optional}) and terminal output. In addition an \linkS4class{RLum.Results} object is returned containing the following elements: \item{.$summary}{\link{data.frame} summary of all relevant model results.} \item{.$data}{\link{data.frame} original input data} \item{args}{\link{list} used arguments} \item{call}{\link{call} the function call} \item{.$mle}{\link{mle2} object containing the maximum log likelihood functions for all parameters} \item{BIC}{\link{numeric} BIC score} \item{.$confint}{\link{data.frame} confidence intervals for all parameters} \item{.$profile}{\link{profile.mle2} the log likelihood profiles} \item{.$bootstrap}{\link{list} bootstrap results} The output should be accessed using the function \link{get_RLum} } \description{ Function to fit the (un-)logged three or four parameter minimum dose model (MAM-3/4) to De data. } \details{ \strong{Parameters} This model has four parameters: \tabular{rl}{ \code{gamma}: \tab minimum dose on the log scale \cr \code{mu}: \tab mean of the non-truncated normal distribution \cr \code{sigma}: \tab spread in ages above the minimum \cr \code{p0}: \tab proportion of grains at gamma \cr } If \code{par=3} (default) the 3-parameter minimum age model is applied, where \code{gamma=mu}. For \code{par=4} the 4-parameter model is applied instead. \strong{(Un-)logged model} In the original version of the minimum dose model, the basic data are the natural logarithms of the De estimates and relative standard errors of the De estimates. The value for \code{sigmab} must be provided as a ratio (e.g, 0.2 for 20 \\%). This model will be applied if \code{log = TRUE}. If \code{log=FALSE}, the modified un-logged model will be applied instead. This has essentially the same form as the original version. \code{gamma} and \code{sigma} are in Gy and \code{gamma} becomes the minimum true dose in the population. \strong{Note} that the un-logged model requires \code{sigmab} to be in the same absolute unit as the provided De values (seconds or Gray). While the original (logged) version of the minimum dose model may be appropriate for most samples (i.e. De distributions), the modified (un-logged) version is specially designed for modern-age and young samples containing negative, zero or near-zero De estimates (Arnold et al. 2009, p. 323). \strong{Initial values & boundaries} The log likelihood calculations use the \link{nlminb} function for box-constrained optimisation using PORT routines. Accordingly, initial values for the four parameters can be specified via \code{init.values}. If no values are provided for \code{init.values} reasonable starting values are estimated from the input data. If the final estimates of \emph{gamma}, \emph{mu}, \emph{sigma} and \emph{p0} are totally off target, consider providing custom starting values via \code{init.values}. In contrast to previous versions of this function the boundaries for the individual model parameters are no longer required to be explicitly specified. If you want to override the default boundary values use the arguments \code{gamma.lower}, \code{gamma.upper}, \code{sigma.lower}, \code{sigma.upper}, \code{p0.lower}, \code{p0.upper}, \code{mu.lower} and \code{mu.upper}. \strong{Bootstrap} When \code{bootstrap=TRUE} the function applies the bootstrapping method as described in Wallinga & Cunningham (2012). By default, the minimum age model produces 1000 first level and 3000 second level bootstrap replicates (actually, the number of second level bootstrap replicates is three times the number of first level replicates unless specified otherwise). The uncertainty on sigmab is 0.04 by default. These values can be changed by using the arguments \code{bs.M} (first level replicates), \code{bs.N} (second level replicates) and \code{sigmab.sd} (error on sigmab). With \code{bs.h} the bandwidth of the kernel density estimate can be specified. By default, \code{h} is calculated as \deqn{h = (2*\sigma_{DE})/\sqrt{n}} \strong{Multicore support} This function supports parallel computing and can be activated by \code{multicore=TRUE}. By default, the number of available logical CPU cores is determined automatically, but can be changed with \code{cores}. The multicore support is only available when \code{bootstrap=TRUE} and spawns \code{n} R instances for each core to get MAM estimates for each of the N and M bootstrap replicates. Note that this option is highly experimental and may or may not work for your machine. Also the performance gain increases for larger number of bootstrap replicates. Also note that with each additional core and hence R instance and depending on the number of bootstrap replicates the memory usage can significantly increase. Make sure that memory is always available, otherwise there will be a massive performance hit. \strong{Likelihood profiles} The likelihood profiles are generated and plotted by the \code{bbmle} package. The profile likelihood plots look different to ordinary profile likelihood as "\verb{[...]} the plot method for likelihood profiles displays the square root of the the deviance difference (twice the difference in negative log-likelihood from the best fit), so it will be V-shaped for cases where the quadratic approximation works well \verb{[...]}." (Bolker 2016). For more details on the profile likelihood calculations and plots please see the vignettes of the \code{bbmle} package (also available here: \url{https://CRAN.R-project.org/package=bbmle}). } \note{ The default starting values for \emph{gamma}, \emph{mu}, \emph{sigma} and \emph{p0} may only be appropriate for some De data sets and may need to be changed for other data. This is especially true when the un-logged version is applied. \cr Also note that all R warning messages are suppressed when running this function. If the results seem odd consider re-running the model with \code{debug=TRUE} which provides extended console output and forwards all internal warning messages. } \section{Function version}{ 0.4.4 } \examples{ ## Load example data data(ExampleData.DeValues, envir = environment()) # (1) Apply the minimum age model with minimum required parameters. # By default, this will apply the un-logged 3-parameter MAM. calc_MinDose(data = ExampleData.DeValues$CA1, sigmab = 0.1) \dontrun{ # (2) Re-run the model, but save results to a variable and turn # plotting of the log-likelihood profiles off. mam <- calc_MinDose(data = ExampleData.DeValues$CA1, sigmab = 0.1, plot = FALSE) # Show structure of the RLum.Results object mam # Show summary table that contains the most relevant results res <- get_RLum(mam, "summary") res # Plot the log likelihood profiles retroactively, because before # we set plot = FALSE plot_RLum(mam) # Plot the dose distribution in an abanico plot and draw a line # at the minimum dose estimate plot_AbanicoPlot(data = ExampleData.DeValues$CA1, main = "3-parameter Minimum Age Model", line = mam,polygon.col = "none", hist = TRUE, rug = TRUE, summary = c("n", "mean", "mean.weighted", "median", "in.ci"), centrality = res$de, line.col = "red", grid.col = "none", line.label = paste0(round(res$de, 1), "\U00B1", round(res$de_err, 1), " Gy"), bw = 0.1, ylim = c(-25, 18), summary.pos = "topleft", mtext = bquote("Parameters: " ~ sigma[b] == .(get_RLum(mam, "args")$sigmab) ~ ", " ~ gamma == .(round(log(res$de), 1)) ~ ", " ~ sigma == .(round(res$sig, 1)) ~ ", " ~ rho == .(round(res$p0, 2)))) # (3) Run the minimum age model with bootstrap # NOTE: Bootstrapping is computationally intensive # (3.1) run the minimum age model with default values for bootstrapping calc_MinDose(data = ExampleData.DeValues$CA1, sigmab = 0.15, bootstrap = TRUE) # (3.2) Bootstrap control parameters mam <- calc_MinDose(data = ExampleData.DeValues$CA1, sigmab = 0.15, bootstrap = TRUE, bs.M = 300, bs.N = 500, bs.h = 4, sigmab.sd = 0.06, plot = FALSE) # Plot the results plot_RLum(mam) # save bootstrap results in a separate variable bs <- get_RLum(mam, "bootstrap") # show structure of the bootstrap results str(bs, max.level = 2, give.attr = FALSE) # print summary of minimum dose and likelihood pairs summary(bs$pairs$gamma) # Show polynomial fits of the bootstrap pairs bs$poly.fits$poly.three # Plot various statistics of the fit using the generic plot() function par(mfcol=c(2,2)) plot(bs$poly.fits$poly.three, ask = FALSE) # Show the fitted values of the polynomials summary(bs$poly.fits$poly.three$fitted.values) } } \section{How to cite}{ Burow, C., 2023. calc_MinDose(): Apply the (un-)logged minimum age model (MAM) after Galbraith et al. (1999) to a given De distribution. Function version 0.4.4. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Arnold, L.J., Roberts, R.G., Galbraith, R.F. & DeLong, S.B., 2009. A revised burial dose estimation procedure for optical dating of young and modern-age sediments. Quaternary Geochronology 4, 306-325. Galbraith, R.F. & Laslett, G.M., 1993. Statistical models for mixed fission track ages. Nuclear Tracks Radiation Measurements 4, 459-470. Galbraith, R.F., Roberts, R.G., Laslett, G.M., Yoshida, H. & Olley, J.M., 1999. Optical dating of single grains of quartz from Jinmium rock shelter, northern Australia. Part I: experimental design and statistical models. Archaeometry 41, 339-364. Galbraith, R.F., 2005. Statistics for Fission Track Analysis, Chapman & Hall/CRC, Boca Raton. Galbraith, R.F. & Roberts, R.G., 2012. Statistical aspects of equivalent dose and error calculation and display in OSL dating: An overview and some recommendations. Quaternary Geochronology 11, 1-27. Olley, J.M., Roberts, R.G., Yoshida, H., Bowler, J.M., 2006. Single-grain optical dating of grave-infill associated with human burials at Lake Mungo, Australia. Quaternary Science Reviews 25, 2469-2474. \strong{Further reading} Arnold, L.J. & Roberts, R.G., 2009. Stochastic modelling of multi-grain equivalent dose (De) distributions: Implications for OSL dating of sediment mixtures. Quaternary Geochronology 4, 204-230. Bolker, B., 2016. Maximum likelihood estimation analysis with the bbmle package. In: Bolker, B., R Development Core Team, 2016. bbmle: Tools for General Maximum Likelihood Estimation. R package version 1.0.18. \url{https://CRAN.R-project.org/package=bbmle} Bailey, R.M. & Arnold, L.J., 2006. Statistical modelling of single grain quartz De distributions and an assessment of procedures for estimating burial dose. Quaternary Science Reviews 25, 2475-2502. Cunningham, A.C. & Wallinga, J., 2012. Realizing the potential of fluvial archives using robust OSL chronologies. Quaternary Geochronology 12, 98-106. Rodnight, H., Duller, G.A.T., Wintle, A.G. & Tooth, S., 2006. Assessing the reproducibility and accuracy of optical dating of fluvial deposits. Quaternary Geochronology 1, 109-120. Rodnight, H., 2008. How many equivalent dose values are needed to obtain a reproducible distribution?. Ancient TL 26, 3-10. } \seealso{ \link{calc_CentralDose}, \link{calc_CommonDose}, \link{calc_FiniteMixture}, \link{calc_FuchsLang2001}, \link{calc_MaxDose} } \author{ Christoph Burow, University of Cologne (Germany) \cr Based on a rewritten S script of Rex Galbraith, 2010 \cr The bootstrap approach is based on a rewritten MATLAB script of Alastair Cunningham. \cr Alastair Cunningham is thanked for his help in implementing and cross-checking the code. , RLum Developer Team} Luminescence/man/plot_RLum.Data.Curve.Rd0000644000176200001440000000604214521210045017572 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot_RLum.Data.Curve.R \name{plot_RLum.Data.Curve} \alias{plot_RLum.Data.Curve} \title{Plot function for an RLum.Data.Curve S4 class object} \usage{ plot_RLum.Data.Curve( object, par.local = TRUE, norm = FALSE, smooth = FALSE, ... ) } \arguments{ \item{object}{\linkS4class{RLum.Data.Curve} (\strong{required}): S4 object of class \code{RLum.Data.Curve}} \item{par.local}{\link{logical} (\emph{with default}): use local graphical parameters for plotting, e.g. the plot is shown in one column and one row. If \code{par.local = FALSE}, global parameters are inherited.} \item{norm}{\link{logical} \link{character} (\emph{with default}): allows curve normalisation to the highest count value ('default'). Alternatively, the function offers the modes \code{"max"}, \code{"min"} and \code{"huot"} for a background corrected normalisation, see details.} \item{smooth}{\link{logical} (\emph{with default}): provides an automatic curve smoothing based on \link[zoo:rollmean]{zoo::rollmean}} \item{...}{further arguments and graphical parameters that will be passed to the \code{plot} function} } \value{ Returns a plot. } \description{ The function provides a standardised plot output for curve data of an \code{RLum.Data.Curve} S4-class object. } \details{ Only single curve data can be plotted with this function.Arguments according to \link{plot}. \strong{Curve normalisation} The argument \code{norm} normalises all count values, to date the following options are supported: \code{norm = TRUE} or \code{norm = "max"}: Curve values are normalised to the highest count value in the curve \code{norm = "last"}: Curves values are normalised to the last count value (this can be useful in particular for radiofluorescence curves) \code{norm = "huot"}: Curve values are normalised as suggested by Sébastien Huot via GitHub: \deqn{ y = (observed - median(background)) / (max(observed) - median(background)) } The background of the curve is defined as the last 20 \% of the count values of a curve. } \note{ Not all arguments of \link{plot} will be passed! } \section{Function version}{ 0.2.6 } \examples{ ##plot curve data #load Example data data(ExampleData.CW_OSL_Curve, envir = environment()) #transform data.frame to RLum.Data.Curve object temp <- as(ExampleData.CW_OSL_Curve, "RLum.Data.Curve") #plot RLum.Data.Curve object plot_RLum.Data.Curve(temp) } \seealso{ \link{plot}, \link{plot_RLum} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. plot_RLum.Data.Curve(): Plot function for an RLum.Data.Curve S4 class object. Function version 0.2.6. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{aplot} Luminescence/man/calc_FuchsLang2001.Rd0000644000176200001440000000733014521210045017062 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calc_FuchsLang2001.R \name{calc_FuchsLang2001} \alias{calc_FuchsLang2001} \title{Apply the model after Fuchs & Lang (2001) to a given De distribution.} \usage{ calc_FuchsLang2001(data, cvThreshold = 5, startDeValue = 1, plot = TRUE, ...) } \arguments{ \item{data}{\linkS4class{RLum.Results} or \link{data.frame} (\strong{required}): for \link{data.frame}: two columns with De \code{(data[,1])} and De error \code{(values[,2])}} \item{cvThreshold}{\link{numeric} (\emph{with default}): coefficient of variation in percent, as threshold for the method, e.g. \code{cvThreshold = 3}. See details .} \item{startDeValue}{\link{numeric} (\emph{with default}): number of the first aliquot that is used for the calculations} \item{plot}{\link{logical} (\emph{with default}): plot output \code{TRUE}/\code{FALSE}} \item{...}{further arguments and graphical parameters passed to \link{plot}} } \value{ Returns a plot (\emph{optional}) and terminal output. In addition an \linkS4class{RLum.Results} object is returned containing the following elements: \item{summary}{\link{data.frame} summary of all relevant model results.} \item{data}{\link{data.frame} original input data} \item{args}{\link{list} used arguments} \item{call}{\link{call} the function call} \item{usedDeValues}{\link{data.frame} containing the used values for the calculation} } \description{ This function applies the method according to Fuchs & Lang (2001) for heterogeneously bleached samples with a given coefficient of variation threshold. } \details{ \strong{Used values} If the coefficient of variation (\code{c[v]}) of the first two values is larger than the threshold \code{c[v_threshold]}, the first value is skipped. Use the \code{startDeValue} argument to define a start value for calculation (e.g. 2nd or 3rd value). \strong{Basic steps of the approach} \enumerate{ \item Estimate natural relative variation of the sample using a dose recovery test \item Sort the input values ascendantly \item Calculate a running mean, starting with the lowermost two values and add values iteratively. \item Stop if the calculated \code{c[v]} exceeds the specified \code{cvThreshold} } } \note{ Please consider the requirements and the constraints of this method (see Fuchs & Lang, 2001) } \section{Function version}{ 0.4.1 } \examples{ ## load example data data(ExampleData.DeValues, envir = environment()) ## calculate De according to Fuchs & Lang (2001) temp<- calc_FuchsLang2001(ExampleData.DeValues$BT998, cvThreshold = 5) } \section{How to cite}{ Kreutzer, S., Burow, C., 2023. calc_FuchsLang2001(): Apply the model after Fuchs & Lang (2001) to a given De distribution.. Function version 0.4.1. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Fuchs, M. & Lang, A., 2001. OSL dating of coarse-grain fluvial quartz using single-aliquot protocols on sediments from NE Peloponnese, Greece. In: Quaternary Science Reviews 20, 783-787. Fuchs, M. & Wagner, G.A., 2003. Recognition of insufficient bleaching by small aliquots of quartz for reconstructing soil erosion in Greece. Quaternary Science Reviews 22, 1161-1167. } \seealso{ \link{plot}, \link{calc_MinDose}, \link{calc_FiniteMixture}, \link{calc_CentralDose}, \link{calc_CommonDose}, \linkS4class{RLum.Results} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) \cr Christoph Burow, University of Cologne (Germany) , RLum Developer Team} \keyword{dplot} Luminescence/man/analyse_SAR.CWOSL.Rd0000644000176200001440000003204014521210044016745 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/analyse_SAR.CWOSL.R \name{analyse_SAR.CWOSL} \alias{analyse_SAR.CWOSL} \title{Analyse SAR CW-OSL measurements} \usage{ analyse_SAR.CWOSL( object, signal.integral.min = NA, signal.integral.max = NA, background.integral.min = NA, background.integral.max = NA, OSL.component = NULL, rejection.criteria = list(), dose.points = NULL, mtext.outer = "", plot = TRUE, plot_onePage = FALSE, plot.single = FALSE, onlyLxTxTable = FALSE, ... ) } \arguments{ \item{object}{\linkS4class{RLum.Analysis} (\strong{required}): input object containing data for analysis, alternatively a \link{list} of \linkS4class{RLum.Analysis} objects can be provided. The object should contain \strong{only} curves considered part of the SAR protocol (see Details.)} \item{signal.integral.min}{\link{integer} (\strong{required}): lower bound of the signal integral. Can be a \link{list} of \link{integer}s, if \code{object} is of type \link{list}. If the input is vector (e.g., \code{c(1,2)}) the 2nd value will be interpreted as the minimum signal integral for the \code{Tx} curve. Can be set to \code{NA}, in this case no integrals are taken into account.} \item{signal.integral.max}{\link{integer} (\strong{required}): upper bound of the signal integral. Can be a \link{list} of \link{integer}s, if \code{object} is of type \link{list}. If the input is vector (e.g., \code{c(1,2)}) the 2nd value will be interpreted as the maximum signal integral for the \code{Tx} curve. Can be set to \code{NA}, in this case no integrals are taken into account.} \item{background.integral.min}{\link{integer} (\strong{required}): lower bound of the background integral. Can be a \link{list} of \link{integer}s, if \code{object} is of type \link{list}. If the input is vector (e.g., \code{c(1,2)}) the 2nd value will be interpreted as the minimum background integral for the \code{Tx} curve. Can be set to \code{NA}, in this case no integrals are taken into account.} \item{background.integral.max}{\link{integer} (\strong{required}): upper bound of the background integral. Can be a \link{list} of \link{integer}s, if \code{object} is of type \link{list}. If the input is vector (e.g., \code{c(1,2)}) the 2nd value will be interpreted as the maximum background integral for the \code{Tx} curve. Can be set to \code{NA}, in this case no integrals are taken into account.} \item{OSL.component}{\link{character} or \link{integer} (\emph{optional}): s single index or a \link{character} defining the signal component to be evaluated. It requires that the object was processed by \verb{[OSLdecomposition::RLum.OSL_decomposition]}. This argument can either be the name of the OSL component assigned by \verb{[OSLdecomposition::RLum.OSL_global_fitting]} or the index in the descending order of decay rates. Then \code{"1"} selects the fastest decaying component, \code{"2"} the second fastest and so on. Can be a \link{list} of \link{integer}s or strings (or mixed) If object is a \link{list} and this parameter is provided as \link{list} it alternates over the elements (aliquots) of the object list, e.g., \code{list(1,2)} processes the first aliquot with component \code{1} and the second aliquot with component \code{2}. \code{NULL} does not process any component.} \item{rejection.criteria}{\link{list} (\emph{with default}): provide a \emph{named} list and set rejection criteria in \strong{percentage} for further calculation. Can be a \link{list} in a \link{list}, if \code{object} is of type \link{list}. Note: If an \emph{unnamed} \link{list} is provided the new settings are ignored! Allowed arguments are \code{recycling.ratio}, \code{recuperation.rate}, \code{palaeodose.error}, \code{testdose.error} and \code{exceed.max.regpoint = TRUE/FALSE}. Example: \code{rejection.criteria = list(recycling.ratio = 10)}. Per default all numerical values are set to 10, \code{exceed.max.regpoint = TRUE}. Every criterion can be set to \code{NA}. In this value are calculated, but not considered, i.e. the RC.Status becomes always \code{'OK'}} \item{dose.points}{\link{numeric} (\emph{optional}): a numeric vector containing the dose points values. Using this argument overwrites dose point values extracted from other data. Can be a \link{list} of \link{numeric} vectors, if \code{object} is of type \link{list}} \item{mtext.outer}{\link{character} (\emph{optional}): option to provide an outer margin \code{mtext}. Can be a \link{list} of \link{character}s, if \code{object} is of type \link{list}} \item{plot}{\link{logical} (\emph{with default}): enables or disables plot output.} \item{plot_onePage}{\link{logical} (\emph{with default}): enables or disables on page plot output} \item{plot.single}{\link{logical} (\emph{with default}) or \link{numeric} (\emph{optional}): single plot output (\code{TRUE/FALSE}) to allow for plotting the results in single plot windows. If a \link{numeric} vector is provided the plots can be selected individually, i.e. \code{plot.single = c(1,2,3,4)} will plot the TL and Lx, Tx curves but not the legend (5) or the growth curve (6), (7) and (8) belong to rejection criteria plots. Requires \code{plot = TRUE}.} \item{onlyLxTxTable}{\link{logical} (with default): If \code{TRUE} the dose response curve fitting and plotting is skipped. This allows to get hands on the \code{Lx/Tx} table for large datasets without the need for a curve fitting.} \item{...}{further arguments that will be passed to the function \link{plot_GrowthCurve} or \link{calc_OSLLxTxRatio} (supported: \code{background.count.distribution}, \code{sigmab}, \code{sig0}). \strong{Please note} that if you consider to use the early light subtraction method you should provide your own \code{sigmab} value!} } \value{ A plot (\emph{optional}) and an \linkS4class{RLum.Results} object is returned containing the following elements: \item{data}{\link{data.frame} containing De-values, De-error and further parameters} \item{LnLxTnTx.values}{\link{data.frame} of all calculated Lx/Tx values including signal, background counts and the dose points} \item{rejection.criteria}{\link{data.frame} with values that might by used as rejection criteria. \code{NA} is produced if no R0 dose point exists.} \item{Formula}{\link{formula} formula that have been used for the growth curve fitting} The output should be accessed using the function \link{get_RLum}. } \description{ The function performs a SAR CW-OSL analysis on an \linkS4class{RLum.Analysis} object including growth curve fitting. } \details{ The function performs an analysis for a standard SAR protocol measurements introduced by Murray and Wintle (2000) with CW-OSL curves. For the calculation of the \code{Lx/Tx} value the function \link{calc_OSLLxTxRatio} is used. For \strong{changing the way the Lx/Tx error is calculated} use the argument \code{background.count.distribution} and \code{sigmab}, which will be passed to the function \link{calc_OSLLxTxRatio}. \strong{What is part of a SAR sequence?} The function is rather picky when it comes down to accepted curve input (OSL,IRSL,...) and structure. A SAR sequence is basically a set of \eqn{L_{x}/T_{x}} curves. Hence, every 2nd curve is considered a shine-down curve related to the test dose. It also means that the number of curves for \eqn{L_{x}} has to be equal to the number of \eqn{T_{x}} curves, and that hot-bleach curves \strong{do not} belong into a SAR sequence; at least not for the analysis. Other curves allowed and processed are preheat curves, or preheat curves measured as TL, and irradiation curves. The later one indicates the duration of the irradiation, the dose and test dose points, e.g., as part of XSYG files. \strong{Argument \code{object} is of type \code{list}} If the argument \code{object} is of type \link{list} containing \strong{only} \linkS4class{RLum.Analysis} objects, the function re-calls itself as often as elements are in the list. This is useful if an entire measurement wanted to be analysed without writing separate for-loops. To gain in full control of the parameters (e.g., \code{dose.points}) for every aliquot (corresponding to one \linkS4class{RLum.Analysis} object in the list), in this case the arguments can be provided as \link{list}. This \code{list} should be of similar length as the \code{list} provided with the argument \code{object}, otherwise the function will create an own list of the requested length. Function output will be just one single \linkS4class{RLum.Results} object. Please be careful when using this option. It may allow a fast an efficient data analysis, but the function may also break with an unclear error message, due to wrong input data. \strong{Working with IRSL data} The function was originally designed to work just for 'OSL' curves, following the principles of the SAR protocol. An IRSL measurement protocol may follow this procedure, e.g., post-IR IRSL protocol (Thomsen et al., 2008). Therefore this functions has been enhanced to work with IRSL data, however, the function is only capable of analysing curves that follow the SAR protocol structure, i.e., to analyse a post-IR IRSL protocol, curve data have to be pre-selected by the user to fit the standards of the SAR protocol, i.e., Lx,Tx,Lx,Tx and so on. Example: Imagine the measurement contains \code{pIRIR50} and \code{pIRIR225} IRSL curves. Only one curve type can be analysed at the same time: The \code{pIRIR50} curves or the \code{pIRIR225} curves. \strong{Supported rejection criteria} \verb{[recycling.ratio]}: calculated for every repeated regeneration dose point. \verb{[recuperation.rate]}: recuperation rate calculated by comparing the \code{Lx/Tx} values of the zero regeneration point with the \code{Ln/Tn} value (the \code{Lx/Tx} ratio of the natural signal). For methodological background see Aitken and Smith (1988). \verb{[testdose.error]}: set the allowed error for the test dose, which per default should not exceed 10\%. The test dose error is calculated as \code{Tx_net.error/Tx_net}. The calculation of the \eqn{T_{n}} error is detailed in \link{calc_OSLLxTxRatio}. \verb{[palaeodose.error]}: set the allowed error for the De value, which per default should not exceed 10\%. \strong{Irradiation times} The function makes two attempts to extra irradiation data (dose points) automatically from the input object, if the argument \code{dose.points} was not set (aka set to \code{NULL}). \enumerate{ \item It searches in every curve for an info object called \code{IRR_TIME}. If this was set, any value set here is taken as dose point. \item If the object contains curves of type \code{irradiation}, the function tries to use this information to assign these values to the curves. However, the function does \strong{not} overwrite values preset in \code{IRR_TIME}. } } \note{ This function must not be mixed up with the function \link{Analyse_SAR.OSLdata}, which works with \linkS4class{Risoe.BINfileData} objects. \strong{The function currently does support only 'OSL', 'IRSL' and 'POSL' data!} } \section{Function version}{ 0.9.14 } \examples{ ##load data ##ExampleData.BINfileData contains two BINfileData objects ##CWOSL.SAR.Data and TL.SAR.Data data(ExampleData.BINfileData, envir = environment()) ##transform the values from the first position in a RLum.Analysis object object <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos=1) ##perform SAR analysis and set rejection criteria results <- analyse_SAR.CWOSL( object = object, signal.integral.min = 1, signal.integral.max = 2, background.integral.min = 900, background.integral.max = 1000, log = "x", fit.method = "EXP", rejection.criteria = list( recycling.ratio = 10, recuperation.rate = 10, testdose.error = 10, palaeodose.error = 10, exceed.max.regpoint = TRUE) ) ##show De results get_RLum(results) ##show LnTnLxTx table get_RLum(results, data.object = "LnLxTnTx.table") } \section{How to cite}{ Kreutzer, S., 2023. analyse_SAR.CWOSL(): Analyse SAR CW-OSL measurements. Function version 0.9.14. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Aitken, M.J. and Smith, B.W., 1988. Optical dating: recuperation after bleaching. Quaternary Science Reviews 7, 387-393. Duller, G., 2003. Distinguishing quartz and feldspar in single grain luminescence measurements. Radiation Measurements, 37 (2), 161-165. Murray, A.S. and Wintle, A.G., 2000. Luminescence dating of quartz using an improved single-aliquot regenerative-dose protocol. Radiation Measurements 32, 57-73. Thomsen, K.J., Murray, A.S., Jain, M., Boetter-Jensen, L., 2008. Laboratory fading rates of various luminescence signals from feldspar-rich sediment extracts. Radiation Measurements 43, 1474-1486. doi:10.1016/j.radmeas.2008.06.002 } \seealso{ \link{calc_OSLLxTxRatio}, \link{plot_GrowthCurve}, \linkS4class{RLum.Analysis}, \linkS4class{RLum.Results}, \link{get_RLum} } \author{ Sebastian Kreutzer, Geography & Earth Sciences, Aberystwyth University (United Kingdom) , RLum Developer Team} \keyword{datagen} \keyword{plot} Luminescence/man/fit_OSLLifeTimes.Rd0000644000176200001440000002100314521210045017015 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fit_OSLLifeTimes.R \name{fit_OSLLifeTimes} \alias{fit_OSLLifeTimes} \title{Fitting and Deconvolution of OSL Lifetime Components} \usage{ fit_OSLLifeTimes( object, tp = 0, signal_range = NULL, n.components = NULL, method_control = list(), plot = TRUE, plot_simple = FALSE, verbose = TRUE, ... ) } \arguments{ \item{object}{\linkS4class{RLum.Data.Curve}, \linkS4class{RLum.Analysis}, \link{data.frame} or \link{matrix} \strong{(required)}: Input object containing the data to be analysed. All objects can be provided also as list for an automated processing. Please note: \code{NA} values are automatically removed and the dataset should comprise at least 5 data points.} \item{tp}{\link{numeric} (\emph{with default}): option to account for the stimulation pulse width. For off-time measurements the default value is 0. \code{tp} has the same unit as the measurement data, e.g., µs. Please set this parameter carefully, if it all, otherwise you may heavily bias your fit results.} \item{signal_range}{\link{numeric} (\emph{optional}): allows to set a channel range, by default all channels are used, e.g. \code{signal_range = c(2,100)} considers only channels 2 to 100 and \code{signal_range = c(2)} considers only channels from channel 2 onwards.} \item{n.components}{\link{numeric} (\emph{optional}): Fix the number of components. If set the algorithm will try to fit the number of predefined components. If nothing is set, the algorithm will try to find the best number of components.} \item{method_control}{\link{list} (\emph{optional}): Named to allow a more fine control of the fitting process. See details for allowed options.} \item{plot}{\link{logical} (\emph{with default}): Enable/disable plot output} \item{plot_simple}{\link{logical} (\emph{with default}): Enable/disable reduced plot output. If \code{TRUE}, no residual plot is shown, however, plot output can be combined using the standard R layout options, such as \code{par(mfrow = c(2,2))}.} \item{verbose}{\link{logical} (\emph{with default}): Enable/disable terminal feedback} \item{...}{parameters passed to \link{plot.default} to control the plot output, supported are: \code{main}, \code{xlab}, \code{ylab}, \code{log}, \code{xlim}, \code{ylim}, \code{col}, \code{lty}, \code{legend.pos}, \code{legend.text}. If the input object is of type \linkS4class{RLum.Analysis} this arguments can be provided as a \link{list}.} } \value{ -----------------------------------\cr \verb{[ NUMERICAL OUTPUT ]}\cr -----------------------------------\cr \strong{\code{RLum.Results}}-object \strong{slot:} \strong{\verb{@data}} \tabular{lll}{ \strong{Element} \tab \strong{Type} \tab \strong{Description}\cr \verb{$data} \tab \code{matrix} \tab the final fit matrix \cr \verb{$start_matrix} \tab \code{matrix} \tab the start matrix used for the fitting \cr \verb{$total_counts} \tab \code{integer} \tab Photon count sum \cr \verb{$fit} \tab \code{nls} \tab the fit object returned by \link[minpack.lm:nls.lm]{minpack.lm::nls.lm} \cr } \strong{slot:} \strong{\verb{@info}} The original function call ------------------------\cr \verb{[ TERMINAL OUTPUT ]}\cr ------------------------\cr Terminal output is only shown of the argument \code{verbose = TRUE}. \emph{(1) Start parameter and component adaption}\cr Trave of the parameter adaptation process \emph{(2) Fitting results (sorted by ascending tau)}\cr The fitting results sorted by ascending tau value. Please note that if you access the \code{nls} fitting object, the values are not sorted. \emph{(3) Further information}\cr \itemize{ \item The photon count sum \item Durbin-Watson residual statistic to asses whether the residuals are correlated, ideally the residuals should be not correlated at all. Rough measures are: \cr D = 0: the residuals are systematically correlated \cr D = 2: the residuals are randomly distributed \cr D = 4: the residuals are systematically anti-correlated\cr } You should be suspicious if D differs largely from 2. ------------------------\cr \verb{[ PLOT OUTPUT ]}\cr ------------------------\cr A plot showing the original data and the fit so far possible. The lower plot shows the residuals of the fit. } \description{ Fitting and Deconvolution of OSL Lifetime Components } \details{ The function intends to provide an easy access to pulsed optically stimulated luminescence (POSL) data, in order determine signal lifetimes. The fitting is currently optimised to work with the off-time flank of POSL measurements only. For the signal deconvolution, a differential evolution optimisation is combined with nonlinear least-square fitting following the approach by Bluszcz & Adamiec (2006). \strong{Component deconvolution algorithm} The component deconvolution consists of two steps: (1) Adaptation phase In the adaptation phase the function tries to figure out the optimal and statistically justified number of signal components following roughly the approach suggested by Bluszcz & Adamiec (2006). In contrast to their work, for the optimisation by differential evolution here the package \code{'DEoptim'} is used. The function to be optimized has the form: \deqn{\chi^2 = \sum(w * (n_i/c - \sum(A_i * exp(-x/(tau_i + t_p))))^2)} with \eqn{w = 1} for unweighted regression analysis (\code{method_control = list(weights = FALSE)}) or \eqn{w = c^2/n_i} for weighted regression analysis. The default values is \code{TRUE}. \deqn{F = (\Delta\chi^2 / 2) / (\chi^2/(N - 2*m - 2))} (2) Final fitting \strong{\code{method_control}} \tabular{lll}{ \strong{Parameter} \tab \strong{Type} \tab \strong{Description}\cr \code{p} \tab \link{numeric} \tab controls the probability for the F statistic reference values. For a significance level of 5 \% a value of 0.95 (the default) should be added, for 1 \%, a value of 0.99 is sufficient: 1 > p > 0 (cf. \link[stats:Fdist]{stats::FDist})\cr \code{seed} \tab \link{numeric} \tab set the seed for the random number generator, provide a value here to get reproducible results \cr \code{DEoptim.trace} \tab \link{logical} \tab enables/disables the tracing of the differential evolution (cf. \link[DEoptim:DEoptim.control]{DEoptim::DEoptim.control}) \cr \code{DEoptim.itermax} \tab \link{logical} \tab controls the number of the allowed generations (cf. \link[DEoptim:DEoptim.control]{DEoptim::DEoptim.control}) \cr \code{weights} \tab \link{logical} \tab enables/disables the weighting for the start parameter estimation and fitting (see equations above). The default values is \code{TRUE} \cr \code{nlsLM.trace} \tab \link{logical} \tab enables/disables trace mode for the nls fitting (\link[minpack.lm:nlsLM]{minpack.lm::nlsLM}), can be used to identify convergence problems, default is \code{FALSE} \cr \code{nlsLM.upper} \tab \link{logical} \tab enables/disables upper parameter boundary, default is \code{TRUE} \cr \code{nlsLM.lower} \tab \link{logical} \tab enables/disables lower parameter boundary, default is \code{TRUE} } } \section{Function version}{ 0.1.5 } \examples{ ##load example data data(ExampleData.TR_OSL, envir = environment()) ##fit lifetimes (short run) fit_OSLLifeTimes( object = ExampleData.TR_OSL, n.components = 1) ##long example \dontrun{ fit_OSLLifeTimes( object = ExampleData.TR_OSL) } } \section{How to cite}{ Kreutzer, S., Schmidt, C., 2023. fit_OSLLifeTimes(): Fitting and Deconvolution of OSL Lifetime Components. Function version 0.1.5. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Bluszcz, A., Adamiec, G., 2006. Application of differential evolution to fitting OSL decay curves. Radiation Measurements 41, 886-891. \doi{10.1016/j.radmeas.2006.05.016}\cr Durbin, J., Watson, G.S., 1950. Testing for Serial Correlation in Least Squares Regression: I. Biometrika 37, 409-21. doi:10.2307/2332391 \strong{Further reading} Hughes, I., Hase, T., 2010. Measurements and Their Uncertainties. Oxford University Press. Storn, R., Price, K., 1997. Differential Evolution – A Simple and Efficient Heuristic for Global Optimization over Continuous Spaces. Journal of Global Optimization 11, 341–359. } \seealso{ \link[minpack.lm:nls.lm]{minpack.lm::nls.lm}, \link[DEoptim:DEoptim]{DEoptim::DEoptim} } \author{ Sebastian Kreutzer, Geography & Earth Sciences, Aberystwyth University, Christoph Schmidt, University of Bayreuth (Germany) , RLum Developer Team} Luminescence/man/get_Quote.Rd0000644000176200001440000000265614521210045015665 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_Quote.R \name{get_Quote} \alias{get_Quote} \title{Function to return essential quotes} \usage{ get_Quote(ID, separated = FALSE) } \arguments{ \item{ID}{\link{character} (\emph{optional}): quote ID to be returned.} \item{separated}{\link{logical} (\emph{with default}): return result in separated form.} } \value{ Returns a character with quote and respective (false) author. } \description{ This function returns one of the collected essential quotes in the growing library. If called without any parameters, a random quote is returned. } \section{Function version}{ 0.1.5 } \examples{ ## ask for an arbitrary quote get_Quote() } \author{ Quote credits: Michael Dietze, GFZ Potsdam (Germany), Sebastian Kreutzer, Geography & Earth Science, Aberystwyth University (United Kingdom), Dirk Mittelstraß, TU Dresden (Germany), Jakob Wallinga (Wageningen University, Netherlands) , RLum Developer Team} \section{How to cite}{ Dietze, M., Kreutzer, S., 2023. get_Quote(): Function to return essential quotes. Function version 0.1.5. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } Luminescence/man/plot_NRt.Rd0000644000176200001440000001076714521210045015474 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot_NRt.R \name{plot_NRt} \alias{plot_NRt} \title{Visualise natural/regenerated signal ratios} \usage{ plot_NRt( data, log = FALSE, smooth = c("none", "spline", "rmean"), k = 3, legend = TRUE, legend.pos = "topright", ... ) } \arguments{ \item{data}{\link{list}, \link{data.frame}, \link{matrix} or \linkS4class{RLum.Analysis} (\strong{required}): X,Y data of measured values (time and counts). See details on individual data structure.} \item{log}{\link{character} (\emph{optional}): logarithmic axes (\code{c("x", "y", "xy")}).} \item{smooth}{\link{character} (\emph{optional}): apply data smoothing. Use \code{"rmean"} to calculate the rolling where \code{k} determines the width of the rolling window (see \link{rollmean}). \code{"spline"} applies a smoothing spline to each curve (see \link{smooth.spline})} \item{k}{\link{integer} (\emph{with default}): integer width of the rolling window.} \item{legend}{\link{logical} (\emph{with default}): show or hide the plot legend.} \item{legend.pos}{\link{character} (\emph{with default}): keyword specifying the position of the legend (see \link{legend}).} \item{...}{further parameters passed to \link{plot} (also see \link{par}).} } \value{ Returns a plot and \linkS4class{RLum.Analysis} object. } \description{ This function creates a Natural/Regenerated signal vs. time (NR(t)) plot as shown in Steffen et al. 2009 } \details{ This function accepts the individual curve data in many different formats. If \code{data} is a \code{list}, each element of the list must contain a two column \code{data.frame} or \code{matrix} containing the \code{XY} data of the curves (time and counts). Alternatively, the elements can be objects of class \linkS4class{RLum.Data.Curve}. Input values can also be provided as a \code{data.frame} or \code{matrix} where the first column contains the time values and each following column contains the counts of each curve. } \examples{ ## load example data data("ExampleData.BINfileData", envir = environment()) ## EXAMPLE 1 ## convert Risoe.BINfileData object to RLum.Analysis object data <- Risoe.BINfileData2RLum.Analysis(object = CWOSL.SAR.Data, pos = 8, ltype = "OSL") ## extract all OSL curves allCurves <- get_RLum(data) ## keep only the natural and regenerated signal curves pos <- seq(1, 9, 2) curves <- allCurves[pos] ## plot a standard NR(t) plot plot_NRt(curves) ## re-plot with rolling mean data smoothing plot_NRt(curves, smooth = "rmean", k = 10) ## re-plot with a logarithmic x-axis plot_NRt(curves, log = "x", smooth = "rmean", k = 5) ## re-plot with custom axes ranges plot_NRt(curves, smooth = "rmean", k = 5, xlim = c(0.1, 5), ylim = c(0.4, 1.6), legend.pos = "bottomleft") ## re-plot with smoothing spline on log scale plot_NRt(curves, smooth = "spline", log = "x", legend.pos = "top") ## EXAMPLE 2 # you may also use this function to check whether all # TD curves follow the same shape (making it a TnTx(t) plot). posTD <- seq(2, 14, 2) curves <- allCurves[posTD] plot_NRt(curves, main = "TnTx(t) Plot", smooth = "rmean", k = 20, ylab = "TD natural / TD regenerated", xlim = c(0, 20), legend = FALSE) ## EXAMPLE 3 # extract data from all positions data <- lapply(1:24, FUN = function(pos) { Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos = pos, ltype = "OSL") }) # get individual curve data from each aliquot aliquot <- lapply(data, get_RLum) # set graphical parameters par(mfrow = c(2, 2)) # create NR(t) plots for all aliquots for (i in 1:length(aliquot)) { plot_NRt(aliquot[[i]][pos], main = paste0("Aliquot #", i), smooth = "rmean", k = 20, xlim = c(0, 10), cex = 0.6, legend.pos = "bottomleft") } # reset graphical parameters par(mfrow = c(1, 1)) } \section{How to cite}{ Burow, C., 2023. plot_NRt(): Visualise natural/regenerated signal ratios. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Steffen, D., Preusser, F., Schlunegger, F., 2009. OSL quartz underestimation due to unstable signal components. Quaternary Geochronology, 4, 353-362. } \seealso{ \link{plot} } \author{ Christoph Burow, University of Cologne (Germany) , RLum Developer Team} Luminescence/man/calc_MaxDose.Rd0000644000176200001440000001341514521210045016246 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calc_MaxDose.R \name{calc_MaxDose} \alias{calc_MaxDose} \title{Apply the maximum age model to a given De distribution} \usage{ calc_MaxDose( data, sigmab, log = TRUE, par = 3, bootstrap = FALSE, init.values, plot = TRUE, ... ) } \arguments{ \item{data}{\linkS4class{RLum.Results} or \link{data.frame} (\strong{required}): for \link{data.frame}: two columns with De \code{(data[ ,1])} and De error \code{(data[ ,2])}.} \item{sigmab}{\link{numeric} (\strong{required}): additional spread in De values. This value represents the expected overdispersion in the data should the sample be well-bleached (Cunningham & Walling 2012, p. 100). \strong{NOTE}: For the logged model (\code{log = TRUE}) this value must be a fraction, e.g. 0.2 (= 20 \\%). If the un-logged model is used (\code{log = FALSE}), sigmab must be provided in the same absolute units of the De values (seconds or Gray). See details (\link{calc_MinDose}.} \item{log}{\link{logical} (\emph{with default}): fit the (un-)logged three parameter minimum dose model to De data} \item{par}{\link{numeric} (\emph{with default}): apply the 3- or 4-parameter minimum age model (\code{par=3} or \code{par=4}).} \item{bootstrap}{\link{logical} (\emph{with default}): apply the recycled bootstrap approach of Cunningham & Wallinga (2012).} \item{init.values}{\link{numeric} (\emph{with default}): starting values for gamma, sigma, p0 and mu. Custom values need to be provided in a vector of length three in the form of \code{c(gamma, sigma, p0)}.} \item{plot}{\link{logical} (\emph{with default}): plot output (\code{TRUE}/\code{FALSE})} \item{...}{further arguments for bootstrapping (\verb{bs.M, bs.N, bs.h, sigmab.sd}). See details for their usage.} } \value{ Please see \link{calc_MinDose}. } \description{ Function to fit the maximum age model to De data. This is a wrapper function that calls \link{calc_MinDose} and applies a similar approach as described in Olley et al. (2006). } \details{ \strong{Data transformation} To estimate the maximum dose population and its standard error, the three parameter minimum age model of Galbraith et al. (1999) is adapted. The measured De values are transformed as follows: \enumerate{ \item convert De values to natural logs \item multiply the logged data to create a mirror image of the De distribution \item shift De values along x-axis by the smallest x-value found to obtain only positive values \item combine in quadrature the measurement error associated with each De value with a relative error specified by \code{sigmab} \item apply the MAM to these data } When all calculations are done the results are then converted as follows \enumerate{ \item subtract the x-offset \item multiply the natural logs by -1 \item take the exponent to obtain the maximum dose estimate in Gy } \strong{Further documentation} Please see \link{calc_MinDose}. } \section{Function version}{ 0.3.1 } \examples{ ## load example data data(ExampleData.DeValues, envir = environment()) # apply the maximum dose model calc_MaxDose(ExampleData.DeValues$CA1, sigmab = 0.2, par = 3) } \section{How to cite}{ Burow, C., 2023. calc_MaxDose(): Apply the maximum age model to a given De distribution. Function version 0.3.1. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Arnold, L.J., Roberts, R.G., Galbraith, R.F. & DeLong, S.B., 2009. A revised burial dose estimation procedure for optical dating of young and modern-age sediments. Quaternary Geochronology 4, 306-325. Galbraith, R.F. & Laslett, G.M., 1993. Statistical models for mixed fission track ages. Nuclear Tracks Radiation Measurements 4, 459-470. Galbraith, R.F., Roberts, R.G., Laslett, G.M., Yoshida, H. & Olley, J.M., 1999. Optical dating of single grains of quartz from Jinmium rock shelter, northern Australia. Part I: experimental design and statistical models. Archaeometry 41, 339-364. Galbraith, R.F., 2005. Statistics for Fission Track Analysis, Chapman & Hall/CRC, Boca Raton. Galbraith, R.F. & Roberts, R.G., 2012. Statistical aspects of equivalent dose and error calculation and display in OSL dating: An overview and some recommendations. Quaternary Geochronology 11, 1-27. Olley, J.M., Roberts, R.G., Yoshida, H., Bowler, J.M., 2006. Single-grain optical dating of grave-infill associated with human burials at Lake Mungo, Australia. Quaternary Science Reviews 25, 2469-2474 \strong{Further reading} Arnold, L.J. & Roberts, R.G., 2009. Stochastic modelling of multi-grain equivalent dose (De) distributions: Implications for OSL dating of sediment mixtures. Quaternary Geochronology 4, 204-230. Bailey, R.M. & Arnold, L.J., 2006. Statistical modelling of single grain quartz De distributions and an assessment of procedures for estimating burial dose. Quaternary Science Reviews 25, 2475-2502. Cunningham, A.C. & Wallinga, J., 2012. Realizing the potential of fluvial archives using robust OSL chronologies. Quaternary Geochronology 12, 98-106. Rodnight, H., Duller, G.A.T., Wintle, A.G. & Tooth, S., 2006. Assessing the reproducibility and accuracy of optical dating of fluvial deposits. Quaternary Geochronology 1, 109-120. Rodnight, H., 2008. How many equivalent dose values are needed to obtain a reproducible distribution?. Ancient TL 26, 3-10. } \seealso{ \link{calc_CentralDose}, \link{calc_CommonDose}, \link{calc_FiniteMixture}, \link{calc_FuchsLang2001}, \link{calc_MinDose} } \author{ Christoph Burow, University of Cologne (Germany) \cr Based on a rewritten S script of Rex Galbraith, 2010 , RLum Developer Team} Luminescence/man/template_DRAC.Rd0000644000176200001440000000707714521210045016337 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/template_DRAC.R \name{template_DRAC} \alias{template_DRAC} \title{Create a DRAC input data template (v1.2)} \usage{ template_DRAC(nrow = 1L, preset = NULL, notification = TRUE) } \arguments{ \item{nrow}{\link{integer} (\emph{with default}): specifies the number of rows of the template (i.e., the number of data sets you want to submit).} \item{preset}{\link{character} (\emph{optional}): By default, all values of the template are set to \code{NA}, which means that the user needs to fill in \strong{all} data first before submitting to DRAC using \code{use_DRAC()}. To reduce the number of values that need to be provided, \code{preset} can be used to create a template with at least a minimum of reasonable preset values. \code{preset} can be one of the following: \itemize{ \item \code{quartz_coarse} \item \code{quartz_fine} \item \code{feldspar_coarse} \item \code{polymineral_fine} \item \code{DRAC-example_quartz} \item \code{DRAC-example_feldspar} \item \code{DRAC-example_polymineral} } Note that the last three options can be used to produce a template with values directly taken from the official DRAC input \code{.csv} file.} \item{notification}{\link{logical} (\emph{with default}): show or hide the notification} } \value{ A list. } \description{ This function returns a DRAC input template (v1.2) to be used in conjunction with the \link{use_DRAC} function } \examples{ # create a new DRAC input input input <- template_DRAC(preset = "DRAC-example_quartz") # show content of the input print(input) print(input$`Project ID`) print(input[[4]]) ## Example: DRAC Quartz example # note that you only have to assign new values where they # are different to the default values input$`Project ID` <- "DRAC-Example" input$`Sample ID` <- "Quartz" input$`Conversion factors` <- "AdamiecAitken1998" input$`External U (ppm)` <- 3.4 input$`errExternal U (ppm)` <- 0.51 input$`External Th (ppm)` <- 14.47 input$`errExternal Th (ppm)` <- 1.69 input$`External K (\%)` <- 1.2 input$`errExternal K (\%)` <- 0.14 input$`Calculate external Rb from K conc?` <- "N" input$`Calculate internal Rb from K conc?` <- "N" input$`Scale gammadoserate at shallow depths?` <- "N" input$`Grain size min (microns)` <- 90 input$`Grain size max (microns)` <- 125 input$`Water content ((wet weight - dry weight)/dry weight) \%` <- 5 input$`errWater content \%` <- 2 input$`Depth (m)` <- 2.2 input$`errDepth (m)` <- 0.22 input$`Overburden density (g cm-3)` <- 1.8 input$`errOverburden density (g cm-3)` <- 0.1 input$`Latitude (decimal degrees)` <- 30.0000 input$`Longitude (decimal degrees)` <- 70.0000 input$`Altitude (m)` <- 150 input$`De (Gy)` <- 20 input$`errDe (Gy)` <- 0.2 # use DRAC \dontrun{ output <- use_DRAC(input) } } \section{How to cite}{ Burow, C., Kreutzer, S., 2023. template_DRAC(): Create a DRAC input data template (v1.2). In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Durcan, J.A., King, G.E., Duller, G.A.T., 2015. DRAC: Dose Rate and Age Calculator for trapped charge dating. Quaternary Geochronology 28, 54-61. doi:10.1016/j.quageo.2015.03.012 } \seealso{ \link{as.data.frame}, \link{list} } \author{ Christoph Burow, University of Cologne (Germany), Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} Luminescence/man/structure_RLum.Rd0000644000176200001440000000404014521210045016715 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/structure_RLum.R \name{structure_RLum} \alias{structure_RLum} \alias{structure_RLum,list-method} \title{General structure function for RLum S4 class objects} \usage{ structure_RLum(object, ...) \S4method{structure_RLum}{list}(object, ...) } \arguments{ \item{object}{\linkS4class{RLum} (\strong{required}): S4 object of class \code{RLum}} \item{...}{further arguments that one might want to pass to the specific structure method} } \value{ Returns a \link{data.frame} with structure of the object. } \description{ Function calls object-specific get functions for RLum S4 class objects. } \details{ The function provides a generalised access point for specific \linkS4class{RLum} objects.\cr Depending on the input object, the corresponding structure function will be selected. Allowed arguments can be found in the documentations of the corresponding \linkS4class{RLum} class. } \section{Functions}{ \itemize{ \item \code{structure_RLum(list)}: Returns a list of \linkS4class{RLum} objects that had been passed to \link{structure_RLum} }} \section{Function version}{ 0.2.0 } \examples{ ##load example data data(ExampleData.XSYG, envir = environment()) ##show structure structure_RLum(OSL.SARMeasurement$Sequence.Object) } \seealso{ \linkS4class{RLum.Data.Curve}, \linkS4class{RLum.Data.Image}, \linkS4class{RLum.Data.Spectrum}, \linkS4class{RLum.Analysis}, \linkS4class{RLum.Results} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. structure_RLum(): General structure function for RLum S4 class objects. Function version 0.2.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{utilities} Luminescence/man/apply_EfficiencyCorrection.Rd0000644000176200001440000000514614521210045021227 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/apply_EfficiencyCorrection.R \name{apply_EfficiencyCorrection} \alias{apply_EfficiencyCorrection} \title{Function to apply spectral efficiency correction to RLum.Data.Spectrum S4 class objects} \usage{ apply_EfficiencyCorrection(object, spectral.efficiency) } \arguments{ \item{object}{\linkS4class{RLum.Data.Spectrum} or \linkS4class{RLum.Analysis} (\strong{required}): S4 object of class \code{RLum.Data.Spectrum}, \code{RLum.Analysis}or a \link{list} of such objects. Other objects in the list are skipped.} \item{spectral.efficiency}{\link{data.frame} (\strong{required}): Data set containing wavelengths (x-column) and relative spectral response values (y-column) (values between 0 and 1). The provided data will be used to correct all spectra if \code{object} is a \link{list}} } \value{ Returns same object as provided as input } \description{ The function allows spectral efficiency corrections for RLum.Data.Spectrum S4 class objects } \details{ The efficiency correction is based on a spectral response dataset provided by the user. Usually the data set for the quantum efficiency is of lower resolution and values are interpolated for the required spectral resolution using the function \link[stats:approxfun]{stats::approx} If the energy calibration differs for both data set \code{NA} values are produces that will be removed from the matrix. } \note{ Please note that the spectral efficiency data from the camera alone may not sufficiently correct for spectral efficiency of the entire optical system (e.g., spectrometer, camera ...). } \section{Function version}{ 0.2.0 } \examples{ ##(1) - use with your own data (uncomment for usage) ## spectral.efficiency <- read.csv("your data") ## ## your.spectrum <- apply_EfficiencyCorrection(your.spectrum, ) } \seealso{ \linkS4class{RLum.Data.Spectrum}, \linkS4class{RLum.Analysis} } \author{ Sebastian Kreutzer, IRAMAT-CRP2A, UMR 5060, CNRS-Université Bordeaux Montaigne (France)\cr Johannes Friedrich, University of Bayreuth (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., Friedrich, J., 2023. apply_EfficiencyCorrection(): Function to apply spectral efficiency correction to RLum.Data.Spectrum S4 class objects. Function version 0.2.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{manip} Luminescence/man/tune_Data.Rd0000644000176200001440000000354514521210045015633 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tune_Data.R \name{tune_Data} \alias{tune_Data} \title{Tune data for experimental purpose} \usage{ tune_Data(data, decrease.error = 0, increase.data = 0) } \arguments{ \item{data}{\link{data.frame} (\strong{required}): input values, structure: data (\code{values[,1]}) and data error (\code{values [,2]}) are required} \item{decrease.error}{\link{numeric}: factor by which the error is decreased, ranges between 0 and 1.} \item{increase.data}{\link{numeric}: factor by which the error is decreased, ranges between 0 and \code{Inf}.} } \value{ Returns a \link{data.frame} with tuned values. } \description{ The error can be reduced and sample size increased for specific purpose. } \note{ You should not use this function to improve your poor data set! } \section{Function version}{ 0.5.0 } \examples{ ## load example data set data(ExampleData.DeValues, envir = environment()) x <- ExampleData.DeValues$CA1 ## plot original data plot_AbanicoPlot(data = x, summary = c("n", "mean")) ## decrease error by 10 \% plot_AbanicoPlot(data = tune_Data(x, decrease.error = 0.1), summary = c("n", "mean")) ## increase sample size by 200 \% #plot_AbanicoPlot(data = tune_Data(x, increase.data = 2) , # summary = c("n", "mean")) } \author{ Michael Dietze, GFZ Potsdam (Germany) , RLum Developer Team} \section{How to cite}{ Dietze, M., 2023. tune_Data(): Tune data for experimental purpose. Function version 0.5.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{manip} Luminescence/man/replicate_RLum.Rd0000644000176200001440000000240114521210045016624 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/replicate_RLum.R \name{replicate_RLum} \alias{replicate_RLum} \title{General replication function for RLum S4 class objects} \usage{ replicate_RLum(object, times = NULL) } \arguments{ \item{object}{\linkS4class{RLum} (\strong{required}): an \linkS4class{RLum} object} \item{times}{\link{integer} (\emph{optional}): number for times each element is repeated element} } \value{ Returns a \link{list} of the object to be repeated } \description{ Function replicates RLum S4 class objects and returns a list for this objects } \section{Function version}{ 0.1.0 } \seealso{ \linkS4class{RLum} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. replicate_RLum(): General replication function for RLum S4 class objects. Function version 0.1.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{utilities} Luminescence/man/smooth_RLum.Rd0000644000176200001440000000436114521210045016174 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/smooth_RLum.R \name{smooth_RLum} \alias{smooth_RLum} \alias{smooth_RLum,list-method} \title{Smoothing of data} \usage{ smooth_RLum(object, ...) \S4method{smooth_RLum}{list}(object, ...) } \arguments{ \item{object}{\linkS4class{RLum} (\strong{required}): S4 object of class \code{RLum}} \item{...}{further arguments passed to the specific class method} } \value{ An object of the same type as the input object is provided } \description{ Function calls the object-specific smooth functions for provided RLum S4-class objects. } \details{ The function provides a generalised access point for specific \linkS4class{RLum} objects.\cr Depending on the input object, the corresponding function will be selected. Allowed arguments can be found in the documentations of the corresponding \linkS4class{RLum} class. The smoothing is based on an internal function called \code{.smoothing}. } \section{Functions}{ \itemize{ \item \code{smooth_RLum(list)}: Returns a list of \linkS4class{RLum} objects that had been passed to \link{smooth_RLum} }} \note{ Currently only \code{RLum} objects of class \code{RLum.Data.Curve} and \code{RLum.Analysis} (with curve data) are supported! } \section{Function version}{ 0.1.0 } \examples{ ##load example data data(ExampleData.CW_OSL_Curve, envir = environment()) ##create RLum.Data.Curve object from this example curve <- set_RLum( class = "RLum.Data.Curve", recordType = "OSL", data = as.matrix(ExampleData.CW_OSL_Curve) ) ##plot data without and with smoothing plot_RLum(curve) plot_RLum(smooth_RLum(curve)) } \seealso{ \linkS4class{RLum.Data.Curve}, \linkS4class{RLum.Analysis} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. smooth_RLum(): Smoothing of data. Function version 0.1.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{utilities} Luminescence/man/install_DevelopmentVersion.Rd0000644000176200001440000000335014521210044021276 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/install_DevelopmentVersion.R \name{install_DevelopmentVersion} \alias{install_DevelopmentVersion} \title{Attempts to install the development version of the 'Luminescence' package} \usage{ install_DevelopmentVersion(force_install = FALSE) } \arguments{ \item{force_install}{\link{logical} (\emph{optional}): If \code{FALSE} (the default) the function produces and prints the required code to the console for the user to run manually afterwards. When \code{TRUE} and all requirements are fulfilled (see details) this function attempts to install the package itself.} } \value{ This function requires user input at the command prompt to choose the desired development branch to be installed. The required R code to install the package is then printed to the console. } \description{ This function is a convenient method for installing the development version of the R package 'Luminescence' directly from GitHub. } \details{ This function uses \link[=GitHub-API]{Luminescence::github_branches} to check which development branches of the R package 'Luminescence' are currently available on GitHub. The user is then prompted to choose one of the branches to be installed. It further checks whether the R package 'devtools' is currently installed and available on the system. Finally, it prints R code to the console that the user can copy and paste to the R console in order to install the desired development version of the package. If \code{force_install=TRUE} the functions checks if 'devtools' is available and then attempts to install the chosen development branch via \link[devtools:remote-reexports]{devtools::remote-reexports}. } \examples{ \dontrun{ install_DevelopmentVersion() } } Luminescence/man/plot_FilterCombinations.Rd0000644000176200001440000001435314521210045020557 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot_FilterCombinations.R \name{plot_FilterCombinations} \alias{plot_FilterCombinations} \title{Plot filter combinations along with the (optional) net transmission window} \usage{ plot_FilterCombinations( filters, wavelength_range = 200:1000, show_net_transmission = TRUE, interactive = FALSE, plot = TRUE, ... ) } \arguments{ \item{filters}{\link{list} (\strong{required}): a named list of filter data for each filter to be shown. The filter data itself should be either provided as \link{data.frame} or \link{matrix}. (for more options s. Details)} \item{wavelength_range}{\link{numeric} (\emph{with default}): wavelength range used for the interpolation} \item{show_net_transmission}{\link{logical} (\emph{with default}): show net transmission window as polygon.} \item{interactive}{\link{logical} (\emph{with default}): enable/disable interactive plot} \item{plot}{\link{logical} (\emph{with default}): enables or disables the plot output} \item{...}{further arguments that can be passed to control the plot output. Supported are \code{main}, \code{xlab}, \code{ylab}, \code{xlim}, \code{ylim}, \code{type}, \code{lty}, \code{lwd}. For non common plotting parameters see the details section.} } \value{ Returns an S4 object of type \linkS4class{RLum.Results}. \strong{@data} \tabular{lll}{ \strong{\code{Object}} \tab \strong{\code{Type}} \strong{\code{Description}} \cr \code{net_transmission_window} \tab \code{matrix} \tab the resulting net transmission window \cr \code{OD_total} \tab \code{matrix} \tab the total optical density\cr \code{filter_matrix} \tab \code{matrix} \tab the filter matrix used for plotting } \strong{@info} \tabular{lll}{ \strong{Object} \tab \strong{Type} \strong{Description} \cr \code{call} \tab \link{call} \tab the original function call } } \description{ The function allows to plot transmission windows for different filters. Missing data for specific wavelengths are automatically interpolated for the given filter data using the function \link{approx}. With that a standardised output is reached and a net transmission window can be shown. } \details{ \strong{Calculations} \strong{Net transmission window} The net transmission window of two filters is approximated by \deqn{T_{final} = T_{1} * T_{2}} \strong{Optical density} \deqn{OD = -log10(T)} \strong{Total optical density} \deqn{OD_{total} = OD_{1} + OD_{2}} Please consider using own calculations for more precise values. \strong{How to provide input data?} \emph{CASE 1} The function expects that all filter values are either of type \code{matrix} or \code{data.frame} with two columns. The first columns contains the wavelength, the second the relative transmission (but not in percentage, i.e. the maximum transmission can be only become 1). In this case only the transmission window is show as provided. Changes in filter thickness and reflection factor are not considered. \emph{CASE 2} The filter data itself are provided as list element containing a \code{matrix} or \code{data.frame} and additional information on the thickness of the filter, e.g., \code{list(filter1 = list(filter_matrix, d = 2))}. The given filter data are always considered as standard input and the filter thickness value is taken into account by \deqn{Transmission = Transmission^(d)} with d given in the same dimension as the original filter data. \emph{CASE 3} Same as CASE 2 but additionally a reflection factor P is provided, e.g., \code{list(filter1 = list(filter_matrix, d = 2, P = 0.9))}. The final transmission becomes: \deqn{Transmission = Transmission^(d) * P} \strong{Advanced plotting parameters} The following further non-common plotting parameters can be passed to the function: \tabular{lll}{ \strong{\code{Argument}} \tab \strong{\code{Datatype}} \tab \strong{\code{Description}}\cr \code{legend} \tab \code{logical} \tab enable/disable legend \cr \code{legend.pos} \tab \code{character} \tab change legend position (\link[graphics:legend]{graphics::legend}) \cr \code{legend.text} \tab \code{character} \tab same as the argument \code{legend} in (\link[graphics:legend]{graphics::legend}) \cr \code{net_transmission.col} \tab \code{col} \tab colour of net transmission window polygon \cr \code{net_transmission.col_lines} \tab \code{col} \tab colour of net transmission window polygon lines \cr \code{net_transmission.density} \tab \code{numeric} \tab specify line density in the transmission polygon \cr \code{grid} \tab \code{list} \tab full list of arguments that can be passed to the function \link[graphics:grid]{graphics::grid} } For further modifications standard additional R plot functions are recommend, e.g., the legend can be fully customised by disabling the standard legend and use the function \link[graphics:legend]{graphics::legend} instead. } \section{Function version}{ 0.3.2 } \examples{ ## (For legal reasons no real filter data are provided) ## Create filter sets filter1 <- density(rnorm(100, mean = 450, sd = 20)) filter1 <- matrix(c(filter1$x, filter1$y/max(filter1$y)), ncol = 2) filter2 <- matrix(c(200:799,rep(c(0,0.8,0),each = 200)), ncol = 2) ## Example 1 (standard) plot_FilterCombinations(filters = list(filter1, filter2)) ## Example 2 (with d and P value and name for filter 2) results <- plot_FilterCombinations( filters = list(filter_1 = filter1, Rectangle = list(filter2, d = 2, P = 0.6))) results ## Example 3 show optical density plot(results$OD_total) \dontrun{ ##Example 4 ##show the filters using the interactive mode plot_FilterCombinations(filters = list(filter1, filter2), interactive = TRUE) } } \seealso{ \linkS4class{RLum.Results}, \link{approx} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. plot_FilterCombinations(): Plot filter combinations along with the (optional) net transmission window. Function version 0.3.2. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{aplot} \keyword{datagen} Luminescence/man/figures/0000755000176200001440000000000014264017373015112 5ustar liggesusersLuminescence/man/figures/README-Screenshot_AddIn.png0000644000176200001440000003037314036021206021720 0ustar liggesusersPNG  IHDRbN2V iCCPICC ProfileHTSY{/zW5t`#$B !QQEDQp,`aq **%=s}}7a@ |=1q @: sb曦SZ yN rGNG: eE!Z ʧ;=S0ÿOD# %@yFPtQ6sx|#Pva'8( ==cQ6L'4,W3L'rߑ&}6:(B0tG.5#@YqOs/r"ϸY氼fY>,ܳ<13baR}~ZpT?)Dw,'|=YY̭慒0iIBi颹جw#jITS:W4/ >+F,CtB"@2>D  1`/" @?z<ĉ&<39B7YpGoQ"g-`X[0u'g~wѯ2p(BܹKS}EV%¬ԱX@@  ) oBNbrFIG;Y :PVT`?G1N2zm!` 9:9dtixE :k;90\]~twreָ>uqt{n~Ǹjv/׫ȫ[;һקgwo/o]:ͬg@  xh( l = fǡ,-]\YEX^Xg8-|E%# #%QQKƣKcƬˋmEǒ6K Yf,{ӖY!x<6>:PV5LNe{w_r8eDIII\gvHkry+'&/eoxjHjmdZtZS:!=>_ʿ+0 33wd EhU.;@KVUǕQ+ggrs6>=3bɋ 53:Ffw*j[ {!xGu?u~|bF_۾|{4>9)` YVAZw@}5If3O4 'a @e:gept#DIV3Zyb?NNSWɯbО9է)5PWW_%G qiTXtXML:com.adobe.xmp 271 98 ]$NIDATx] |Lm"KE$A!4hQ>$hM)}xO=KKiZKKӧ<6(% KIH"̤{L&3I$i"|>3s9[sӤPQOG4"='p8y#yT 6^#΃@ΣZJwpwՂWpjyp6bg* m9pw'cf RqfB%^3Bdz8@G,:5XZm(`gct2GOL]<G"PM瑇] c좏0[5.Sg*},*5odR27!.oЉI+qPdrmߗH7.q@#`{AUB۶mWDcmOxjqyͩ%]0J${x?Zt ٿnZeA`܋+i @tU}QWp)w檴,87n܀um5Ǘq3Mwϝ/h1[MB RXԐKv2ij10Wf $ uJ<Į;{X >hP)X[s@S[e'ףKGܭImƬ*532a@{vP-a >= oMjG룿j-+`V9{?5K>;<6~1Gsr⭶jcxrueʻ|UOխKւ]v8r%KmG#P&MX+E+vD![ttrǧ1D"26vcvd+XN2s+!U@ Fbe!GUdXȽiLڰ2CZBe4R<#i*84ĜGAuW$<9 cyR%4.$Aܸ r8`>Ar Wp|cxG#аw%ĤBrߘ3̼Ĥˍn1G#@X{Zܸb+s8ۣŗRGh/·#hDTs=F7#0vh)|(`#nu6mGҵ{a%wj=y+JJtl66ArUI=ZVnUVoQPnq4hT{0`זE3ږ3XWXAK@f 9hM6!Ewis'b!N^L@֭\sqhp)oުha![d2[T"z2׿GqϞǟ4gxӸHS[HfhWe,0-MC(xOa)@1GjWȌ`$G7Ggy=mĬق2L#V$&`xhR|d O=bƫqt\u A3ހ7SU!GΝpG*cT9GIv~|^G׌D$h>ȨDj".B͢ Ԕ `]8 i|@EmSܿ1uNܲǐYԠ}+Lf5ʅ(XŃK;ۥDfw{ӗ{y, sxn;#b 1^y(#ŝ`o3\݅4AW[bkLG_nwCuy2!G OwV%`}ә=чΞK(U+_L|ꎜ]u Iƾ GmHBFM*I-_Gxm+l3rH DE%"j`v9 Fu,GQO|h:?UAd*M2ZRR %dSFC5p LeuR+bzï'X7\It;\#qFۨYfm~4h'Bwhz} .rځTc"ѣ/|}C'G0%N"͊ɒ˻_`[eZGcW#mVVVȹ#DZ1߸֞G8x% _ ; QF7B~mf+$hf*M?xwj-ӷC7r!Jaӊ(ڃY2 0 r`(-(E< e`/-^5OIX6i(bκ}Z|'c_2S _:{(Z| W:歌@&1s!bNcWVCߵ6[9.9".*KB4[1i{VEJ#^,w ;@+&+A]UYnjJ #B0dRұVy o#.?A[10Yq0 <~݃]r=ODضMJ8xEc"5[SZ8qVcִ{ ;k(I bve,[ֆf"2iSbtG)dN4߹?ŋ 6?+Pq<҉ 7&F0.{F3z#%m<⿂ѥb‰ ?&^CYL:Fkv!=hlƻ5^)8k$ [г29X4zM+<| R4چ]mL/ǐ~0z8tmږѸr\BmC{40~=L/Ƿ4DWBt{"0? IEfUQt9?Pyҽ# @?QWsAf>7g;ؽ \ˆACMV!!ruVc3޶^Zy4MAVne+ [\1(U8C 6?t菷׋|h/,rB簙OG%o܈߮$lDΊagl W)pO`jq.- NacЏAZ0 ?8 @U X>xt-au_W<=:tc;"C7k7UY G3udOXX.L5 [&aHg͎,ޟF O#}Tn# ]  U)ʴ.tI̝P꫇ Q"@ut~v&س‡ (W,05lI5&eqm(1m{zOc ׆ݤ?|+O"5B䜧"N5 )&W@iblwm*~aܑˣ!)u9W1t6#p@Xl0sbsvtƹbEk<;8,I웲0>#C1ѵmX'&W*<= 8}mݽӒ\7lqhJw d6kzum.TRй<džs9#0v^p &h N(9g|(zv kݵ92"+^L`HL!LwאBxB§g#‚ch(w$A=S[_h$Bj`N@J5]^q-ȡSt|s~) k>N݅COmlY$PWj,R YDFh8܂تmuY5KcΫziEYp[!_ɐ[+n8 떝ѣ4Ȩ.s!ӉݚE#h6-Uuk,`.fe8t rWp.޼)HLQ@'~J,\^ …,*lJ؎ ()O2<:Bc3#yxt5`eOmQyg2)PRzhD;=M]3\ه|FRd&c/L^ɹ(Q"+w)"q4.Oy\l,'_/]˷ Qr/]:#.ij)Ϗx 4|>{gUl?|&Bݰs`ܬq_2Rxy܏Y {dXC} ;q47~zwtNUϒSv |1tp0<֮oE:#ok^n1qi(^'D Je3gȒ#.;9?Ezjŏ߾.K1l3ml]P(4gߪ}+Eg6}д'^#>x1噾YyUhEymrc|DfN{PLTϼ  4)F'_IQx+$ٱlڛY"Jk5f'<)_-:`*>US׏~[}#dZZ9%Sz忓1N2u oI\Ny$A!X]uau ZLUg6O4KͰNe:+p-hkIWq#ݖ6AOJL[jT 1Hi;; ;[qIHO Bbݧս~'Bxdjg3yDDŽ#)= qc)FLBÈ\6_A(DB8yړ~--HHq3"yg=:~ gHAۥ=xI9h?uтCS},Q;8' aSijFpC;W"vQ| ZalLs^j ?vYH_ Z|5WE d*>{ %LMg;uԓ/iZ@9= |]8y)h@G  {\Tl7M-!)W2 $V48 s B$#K"ޣQ)rC%V=weڭ "oLjxGtE7 񛮦Be3i&B3C~, 5 ^DL3KfV gH"HCZj ,P^&Ljt LT]W3a5lHA9KYҔ%3(y)IRj\=,aqR:9\7k0 Kז@]!f[0k7=#cO** i'^`VpSIL<%\ļz^slRB6NluK~6prYja)꨸=@X*@nn NЖ 5O͢=Tyk\c*iTl .9JgTɞ&0B`\?#̲vr}8S0|hK}xX RL_Logo_NEW ctiCCPsRGB IEC61966-2.1(u+Q?fBrML-n-5Sj?wK˭rč_*J)rᚸAՖ9=99k8dAdZhZ,l8 KT`pqŌ7VsZJ\W,lCYg4MmlvjVᢵj;y(9*}3zk$ڢ01gxڶէ[9$mˢU۸TV_;:&t$K]n^۸ݼjxFV;QtMsp/)X^lDUmlDp0]w#(穴:{6e_jr?;"}37C 1[P=v07fNvo{1>^s/i DyTNRv_QjƵdΌt{nɪ;Xơg;JS*ITwR6Yd3'dĭ}\DZsMRlFOP6_xH|\.X~ WmTӼ!ĵ:Ie ڨo$à]uMWRRΒ7T95]aizד:ej؈`?;0؀iUff/LbωZ+Pٽ4!%&-+&`̜̀:ƍ#NMl߾|:kjB9ciN Ff55,y{|+fOOǎuMOuүUThNWmΩ5RTR >[Om>?86M6TRT0[Ɔr[o⭄p0ƩSY|`-h:Ee:S?Rl6hhޫNG5>ǎ|F 5BYT|&f5)Tm|M=-=!b[Ya7eU|6lc_Z-De 6c7j.ɢul9xBKGݖofO -֎(NRC0y(""T/p~3|(YE]=lMӮH ef5 xd:㉋Sc.lܸ_nbyC_W뜚OjlT/jͬ0 QVZg6z~*V95Ҡ b2O5DmldH0'!N#-+/Lbױt-(j^rmYkU`X0n`fLKts |8 Z³Qv|T2[a(sTOzeΌo{nᩮv]|z6( 蜚jr;$vj[FE}1{2R|݇qj>tmgtN}2[}QǪ 1K;pӧ` 5"V Vofo bmBY[{wih,Z܄;v7pRK~,6+Xf5y@YXZ5ctV܄ʊ*Vڷ|.ʪ4m2W<ʯ b &a̼kGy*5dQ5-r:l}3 (Xf ʄ=jc {G.4K?I~ _MHolFT>k;fMO$6VF B|E2ťZ.qǮsjɂufg7mSѯ C𸲲J.OfՖovZ*qvT b6QHSnm(( w|>&)-U(ߣvE]s#)Xf5弿L$Ɍ)chrBxi|8yNʴvᚑ`-pcnК93'R܄j'kx6 ʵ\b2M:+-Xil6Ut#4!2`pF8H" ݦi; wʂeXl2JmlXP=N#.Xˁ4M96MjW,x 1N.ۅ%\S!w*<,6(Xf5֪ۂ93%^܄uUUN\evQRi{-([uNM5,jtSwdȾ2O% 1lwTi.ݦX=x`-n߀;ƆbяIXط ܫoc}13¿<k9xn:f^kc9WY3kz"Z\*`.+.G7~[0E2yŚ:{FoVk 2Bm|B&̞H>j}nv)-)>lٲKXWlThkYҟsj52`-֑(m´;1!q u<*i~pZɹȼIlOI^ xaٵZkzjcM&ŴG߰f߮]L.nՊM6$kCpZs5՝K7p%-(SDi՚ v_R5%sfS8M?{ o]ٵ+_Wi[UNV5m>Q|vh V].S݅J6`đMYek?꟞ck⪂b/^φGvmNUUkUrLx+ϋMy)uEflڴ09Ge1wd^2Vב*Xf5e'G@Yw`֌DZP}in?i_<` K P\m!>Y"MgQOr;͏Pj6vc=mRpQiԄϋ,V:fάVGv#d8 _4 ?Dc4ڄ~dq0lh/|YuÁ}+Es!y@4{UVTQpbJ()*wE) Ho(,4FX""B st}®;A_ 3;3qH KIM;G Q\RƉSYP@yU*'j hդ1-: -+umeqqx7ط}|TُV& r<ɩ3٤%M ;ӤI#|}8cmqV;Ya/\K˹{`nM/3\XLM~vȿPLROeq*EQa HgJ8̶GH9uSLB:C L`֝#5/k7aۯ\fDrWJJٱ#-I;S-r]Cl.SP甗WkQLa ʝn_fhʹ۽ ?[ty|w9s;X:eURuKߞit̙l.6~' ,r g/x:!|CXb&$3YYx/+ ;6 !0`u >qOWocڝ4m$7&gs̚2*%%˒It*B,C ?=r&7*v>L&/SSϱ|$!wpQT8=6/ڽLf̸o4ݺvXFF%HIt*B 0!q v[@``͚5֣ǣrs Xd#"O!t};h?w:;~Ma=JBghrV;cH$@xDGn-_Ytn=7X` N,^佞NEfh8ѳ=yHZ VeU&$$,1t"2B[8yQh׻#e=cHnZ>3|$J:: @81@ʪɿ}Ϲ\bqBdhjX9⥺+5mK{+}^{R !td#a|8OZrwbT86iiYZEF>9[ѱCk:u#&oMy:=!TJA2g:\Ν@`ֱh߂yEΩ,bJOR/-ѾusڷmEJqn鴄ЍZs۷#}vT u}m6Z6en:-Z4t:B{ QYi9{weyV| ߕ[f!⑂UQQEJJգrC }Srؽnјu֡h׮# /X_[XcG0`}f'[0~u 0ڳ#w6Nxżdh**,孏^:L>Z4%!!B+C cgb-%v[xkO#:ʘ\eeWw?1i]{4!+3K9)&,85>_KkҺ%I><Ђ@mXego"b}'M{Q]<ӑ'PIcܶCЂus2SQǣ.[JˍndhL:?WQQEV7UU:#[}vm!}B,&/CT].taN'Ajk78B`SCtw˵ cَK c`R~Wߐ8L|Pq{ Vئ^)Ђƣ,Rʫziv;?nŌitR{ Vn[ݧzt*]Z|LD-y驇 ļE9ɹy o;HkN Ügra~^e\d&aa]; ]/Mk\8wg껥rD{>BDS\:OݻW_{sT{FXGOu4(T uXXAAA]cθB+>g)*}'1Ƿ5wJXeBch) ^{k1'Of^{ ;<;Qaa4mk=oS#0xV6͹ܛUpl6OSbÈENa5;>4q='`2tÄSyuK%3 _[uX3'2[ IDAT_ZFjN_-VXa(kiNfMt_m?@ffۮ/DCfJ ~)>Κ {ʹBL"US mۚ{ н}kUN's?~;SvBgkٳ={suҖ5ߤe-7VCAC5_gBpÇz! [IM=wҲxxٵrhΠv}P֏_ق$PЂU]d~K|v~[wmcoq۵TZKo.$+=eB44ӧ)""8Yd/Sߴ1AA8].>ou[Kn $4sϾ[u@FZBЂuD][^==##GO0W'y㩇8{lNL&7[}م<'n1 Z2.ܾm?[6.@llSZ7QV|FGDpۮ]%}*}#h( -X"ȹ\>bH4kmNHmDDr؁n~MV;?ϫ3!D n~VEs$O$ꛋ?8sʘ+..LE:wi4XmeW_gh2M"(fy^2.`я-m9|8Epp 4ڭIaYyo o}H!+((t/aʮaAA ّIG\CGNЮU3C2;rr\|aXj.t_~Ajj& m[xZ2;M42$' fϓ}-*2iΈ[H5HkNPP]z{BBKzlG\1#+6~aɹB55O}v<*[. =k~x'r\E ۙ^< }}Gp,5='N{4yycJ{Gѧ{ۈj'+ʲu;)W gK7 ݙGfaXn&Gs.,6쾵9~.YB1 ߅zyڐGK7ק#%el,E؏&W8;x sYr #<ظnq-6;Y8p:w',( f撐jҹsg[lj*%%nư= {A!'ZRSrsO>DPPKos&4 Ky4A\*,u;=rq\={gї6б}k:u&+)ηLPPݏ\WZݦNMyE+7{]NL:nNtjߚ[Ӵi,>Ђy0;ϕUuХ 5ypF"qУy_\B׼ mtoNm -X]|Z~?* _CIXP-[ϪL{z4_ZF~jS3<ZzlOF>``LƝ5غ)Y>;V|LCpز`>""Gg[dS8RRX(]jjoJQ1{h !eb=үgгml7KO&8c keҝE[!HE)|3g!3+ڴClbcӳGv—xlc2) F/ KpH?=t[oZݼG;Alccw-!N}:?;u""jކ[o˫ﯼzBüp-Ļor rUTz:%!E`M1.7_ @X—ڹGtN`H.t@#j8ڽPCp]wY<)T{`e!++B/' Y)&?c!b!{vS!pۥ+22r#J]Z:wl@l?l%+ gIC Vhͱ39z􌑷:4>ģSyy% ۉn≉"4.]p?oxyБoNZ {ygv9HItBC.ydyKY<* auwT^LFWBmۖm뛻3ajM\8Uwk d]L: /p2qyU "KgF&5#߯u2RSq9N&EK+(+ iƭhj&7 ;we}ke†0{pySU;Oh(VEcёi PbNo] HrquD0|x/[zp?/m_0] a l22YPVVѣ9Oii%>a0aa{Ҳ0kC|d=usŨ}`JZrr xwהUUy[ѕ2`{j6[tsמFt.ѕ2 ggd}tվE3] %il׷sO+Vlv;J0`u@OFWBx-}:s\6WnFFWBxeA<\>j+ Vo#i:ķ 8ӟ1uTFWBxI{|Ht8Z??eh*X2s -Xy55Nl]2³ -Xǎgt*ޝrKWt'(AHһ!] YYDFL$0wzet%Z1Z4G8*V +!UG2KgJ`h!{Gt^>WrP:Faׂ6;}G m&DE$E5dt%0`9v%$=Shdt%0`=@oDFWBxC ֨Q}5)2»>UǖjHCFWJ ԩ,U1u"Nf {o -፼ψ i䦌j&+!W?F`l2{:٣֩墸 s8sndzQ^֭Ѻu:?3ΚXdӧ1&9dt%GB f j78OFWBx7+X4LFWBx?,XҲ/.B1GJU`_}g27Z֯{K{u('icZhdt%o0`9].D NRѕЂռYctJ3QaķnF[ ٸOFWBC V~ׯ-^(2·x[+RS3yhXT]uѕʂUYQM%e)wJ=5GBFWJ=y>[,+Q\VEVv ?WNfSnr")/z: ^4i4<=Jՠ mn}$LN>/UUTg$+!|[ VYEE1a*+!|[ VTd8 ͚5:BFWB>C{2thOwޢ^23{[!+!ܯALvN\iژ`7&+!O7\.+NuqC܌sHy* p bt=PFWBs\~VfQHXVUżUIںS2`@75'SMNND9{ޛ FFWBOiI9-Km^Wmss/ӿckvzi]eUUf_ ,0έ׫pJ¢BI7_~yxߟ[ IˮwonLTRY/]S/t zJN%<8Nu.] _tL?<4}:i'N~ʊ*>[_/[rՀa]7۾T{1{)99j E%"8.[\Wdt%|]{xw覯r֭s8ieU@0 X8Xa]pm}}T,XJ^r KW ?d9z s97?SX6x ۖ9ZLz-Vp77ڌEF0sHFG/o_f qcEbe켫Yz޵cG3 5^TPX?v[f}>\ub ~R]̙9Ԇ֪%Pdba˴i۶ƏTW9ҵ+4==9vCUjj f5x* ݿ3IF]R_hh(OZ'hd9WUݦy_.o^6b Ue%NL&F̋BBxFVo?HK˥v[MGlFO\_0{x[K9xMS^Vƾ_߾UTWU{3O3*1Pfm\*rn[%&+k;4&fDFyBҳw?/0(OIIEIe_r {Jk5qKlB66*,cB-7;xj @NN%jmyݦ݈[ b ^OhքSݧ P|` ׏_~=zP^^ɲ[b^*UPXwXkubm< o'tkϬl1eW V|K֓UP%,FS+ fu2jcÃw@ %8ĭ- 322rpA2qmq*Xpg*Psxb70dpw Vqq ld펃Tk/pmx]lF.3ƓR܄Vj'kaW۸T7(TuNM^[0[m3Lԓ(Ju§:t%%<xnӴr^_0[#PnQ(4) 6gp#iZ«vE}3ӟ,3xP׶YfOGuMU|yiwm):6>U0[([4?^>k;fMOlrAr~̅Gcovuy;,XW-֮(ޭ66$ ۇe}# ?9!ĉ]r_^rmff .XW-;P|g#=,^+?O#y1-4.}Yݦq[E0[ρj;6gtzg!ܦ/Y~7i6,Sأsj7 enpPlMFCi3!trGt 4p=4zub66,(c2a# wt6.Xz-}(_wm6f~[j(ƷI1)[ KYtkvƩ{xa95 +@ 3c<ڵ=7!|f}rM/S%뜚i 3X83%*ZuFg8iy6zs;TlCi6QHfK,++y ײXJߗv[yYk0 e6M<ǎ|F 5ِ-jѓz0[}Pǩ ѷp="m>~+=L̫޲ET$LŰad~ˏ2:vh=EM9\ ,kkY Rm|V̞1T/^wUhjم2OU,k 󪘀3s8KI>lg/hZd~躏vSdXjc#C4̝!0H|ݹsy| ='4mP++풾5Rtp}Ʒn)c0WZRg˒j~Eˁ96M ķ`l6BPxfϜ@\x o˂/PPZ_;5:`Irx66^L4a''3|p-iyZwvfRlCYGmltXF66˽ļEIl=tRKrm*,7cE@^M=-=U,Tb-ܸrm(Cy;sj;`l6E6~hώ<4--T`&rmt&rӤlM\iO kO jcCk&M`k`Ne1wR2k /^4 `y67`đhpױ~4O96Mp `yb ~P}rk̙9U/jW`Za9tNMԓ,/`XcQ/~ҹSo&`tn̜:&M#=wq>Z|MOH;gI"fui(Fs_` %H|k{r9^um΋e.o^6b U|III9ndT4 wm:&n,/eX#'.O0{x[螛7s inƥ2M/lNԄ`y9e5Mml!>y4 '%%%}AK=ޓv%G-([F2aLH/|rr (5=Uait8R|b ~FfM=5}:螛'Wlش m4kP'=sjM` e7_Oҭ=f$Ҳ̼`dK\$J;Ji-R|b펲jckD&MAXf2wZffk /BiBa뛙0,?`XD)\24 g#9W\Xٸvv[ CIf5xeG'vi݂93ӹssUUNogYNJofIIDAT*5lCYSԄH3f%#4ە#&m>vc ,2O?`):eH;v w[Ӫ32rpA4-Z=_v .Lڪm܈Ð *..cҍ|8H픾 o!0[('W߻]g'!8]_mRwߠSm95e`5 f5jcL&chСS|(|-3v·Hj[6 amCm o';;y 8% x xa]Ԝ9Rm>sWzm5aqQU\YY%̗Q+v#Zo-(<^>k;fMOllm>.$'g\(.Ւq7*YpI-֮(kޭ66$ ۇe}# 'Ne?%K [B%aX@i66&"w`~L_ħ֑v_Yݦ.`-`[;6Ov޺Rm4Q),,Q+q';~,y*Q)Xk?etRa;6M\A m>L̷ 'vi),b G٭i &.e*YD KhbX۠<=24͕v,qSpA7hϽ ܞKRM3[,@l n;fhbH1[Q`7Oݶڣ !K3`!ߒ%R>#urIENDB`Luminescence/man/extdata.Rd0000644000176200001440000000452114521210044015353 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Luminescence-package.R \name{extdata} \alias{extdata} \title{Collection of External Data} \description{ Description and listing of data provided in the folder \code{data/extdata} } \details{ The \strong{R} package \code{Luminescence} includes a number of raw data files, which are mostly used in the example sections of appropriate functions. They are also used internally for testing corresponding functions using the \code{testthat} package (see files in \verb{tests/testthat/}) to ensure their operational reliability. \strong{Accessibility} If the \strong{R} package \code{Luminescence} is installed correctly the preferred way to access and use these data from within \strong{R} is as follows: \code{system.file("extdata/", package = "Luminescence")} \strong{Individual file descriptions} \emph{>>Daybreak_TestFile.DAT/.txt<<} \strong{Type:} raw measurement data \cr \strong{Device:} Daybreak OSL/TL reader\cr \strong{Measurement date:} unknown\cr \strong{Location:} unknown\cr \strong{Provided by:} unknown\cr \strong{Related R function(s):} \code{read_Daybreak2R()}\cr \strong{Reference:} unknown \emph{>>DorNie_0016.psl<<} \strong{Type:} raw measurement data \cr \strong{Device:} SUERC portable OSL reader \cr \strong{Measurement date:} 19/05/2016 \cr \strong{Location:} Dormagen-Nievenheim, Germany \cr \strong{Provided by:} Christoph Burow (University of Cologne) \cr \strong{Related R function(s):} \code{read_PSL2R()} \cr \strong{Reference:} unpublished \cr \strong{Additional information:} Sample measured at an archaeological site near \cr Dormagen-Nievenheim (Germany) during a practical course on Luminescence dating in 2016. \cr \emph{>>QNL84_2_bleached.txt}, \emph{QNL84_2_unbleached.txt<<} \strong{Type:} Test data for exponential fits \cr \strong{Reference:} Berger, G.W., Huntley, D.J., 1989. Test data for exponential fits. Ancient TL 7, 43-46. \cr \emph{>>STRB87_1_bleached.txt}, \emph{STRB87_1_unbleached.txt<<} \strong{Type:} Test data for exponential fits \cr \strong{Reference:} Berger, G.W., Huntley, D.J., 1989. Test data for exponential fits. Ancient TL 7, 43-46. \emph{>>XSYG_file.xsyg} \strong{Type:} XSYG-file stump \cr **Info: ** XSYG-file with some basic curves to test functions \cr \strong{Reference:} no reference available } \keyword{datasets} Luminescence/man/plot_DRCSummary.Rd0000644000176200001440000000755614521210045016761 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot_DRCSummary.R \name{plot_DRCSummary} \alias{plot_DRCSummary} \title{Create a Dose-Response Curve Summary Plot} \usage{ plot_DRCSummary( object, source_dose_rate = NULL, sel_curves = NULL, show_dose_points = FALSE, show_natural = FALSE, n = 51L, ... ) } \arguments{ \item{object}{\linkS4class{RLum.Results} object (\strong{required}): input object created by the function \link{analyse_SAR.CWOSL}. The input object can be provided as \link{list}.} \item{source_dose_rate}{\link{numeric} (\emph{optional}): allows to modify the axis and show values in Gy, instead seconds. Only a single numerical values is allowed.} \item{sel_curves}{\link{numeric} (optional): id of the curves to be plotting in its occurring order. A sequence can be provided for selecting, e.g., only every 2nd curve from the input object} \item{show_dose_points}{\link{logical} (with default): enable or disable plot of dose points in the graph} \item{show_natural}{\link{logical} (with default): enable or disable the plot of the natural \code{Lx/Tx} values} \item{n}{\link{integer} (with default): the number of x-values used to evaluate one curve object. Large numbers slow down the plotting process and are usually not needed} \item{...}{Further arguments and graphical parameters to be passed. In particular: \code{main}, \code{xlab}, \code{ylab}, \code{xlim}, \code{ylim}, \code{lty}, \code{lwd}, \code{pch}, \code{col.pch}, \code{col.lty}, \code{mtext}} } \value{ An \linkS4class{RLum.Results} object is returned: Slot: \strong{@data}\cr \tabular{lll}{ \strong{OBJECT} \tab \strong{TYPE} \tab \strong{COMMENT}\cr \code{results} \tab \link{data.frame} \tab with dose and LxTx values \cr \code{data} \tab \linkS4class{RLum.Results} \tab original input data \cr } Slot: \strong{@info}\cr \tabular{lll}{ \strong{OBJECT} \tab \strong{TYPE} \tab \strong{COMMENT} \cr \code{call} \tab \code{call} \tab the original function call \cr \code{args} \tab \code{list} \tab arguments of the original function call \cr } \emph{Note: If the input object is a \link{list} a list of \linkS4class{RLum.Results} objects is returned.} } \description{ While analysing OSL SAR or pIRIR-data the view on the data is limited usually to one dose-response curve (DRC) at the time for one aliquot. This function overcomes this limitation by plotting all DRC from an \linkS4class{RLum.Results} object created by the function \link{analyse_SAR.CWOSL} in one single plot. } \details{ If you want plot your DRC on an energy scale (dose in Gy), you can either use the option \code{source_dose_rate} provided below or your can SAR analysis with the dose points in Gy (better axis scaling). } \section{Function version}{ 0.2.3 } \examples{ #load data example data data(ExampleData.BINfileData, envir = environment()) #transform the values from the first position in a RLum.Analysis object object <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos=1) results <- analyse_SAR.CWOSL( object = object, signal.integral.min = 1, signal.integral.max = 2, background.integral.min = 900, background.integral.max = 1000, plot = FALSE ) ##plot only DRC plot_DRCSummary(results) } \seealso{ \linkS4class{RLum.Results}, \link{analyse_SAR.CWOSL} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) \cr Christoph Burow, University of Cologne (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., Burow, C., 2023. plot_DRCSummary(): Create a Dose-Response Curve Summary Plot. Function version 0.2.3. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } Luminescence/man/convert_PSL2CSV.Rd0000644000176200001440000000515614521210045016563 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/convert_PSL2CSV.R \name{convert_PSL2CSV} \alias{convert_PSL2CSV} \title{Export PSL-file(s) to CSV-files} \usage{ convert_PSL2CSV(file, extract_raw_data = FALSE, single_table = FALSE, ...) } \arguments{ \item{file}{\link{character} (\strong{required}): name of the PSL-file to be converted to CSV-files} \item{extract_raw_data}{\link{logical} (\emph{with default}): enable/disable raw data extraction. The PSL files imported into R contain an element \verb{$raw_data}, which provides a few more information (e.g., count errors), sometimes it makes sense to use this data of the more compact standard values created by \link{read_PSL2R}} \item{single_table}{\link{logical} (\emph{with default}): enable/disable the creation of single table with n rows and n columns, instead of separate \link{data.frame} objects. Each curve will be represented by two columns for time and counts} \item{...}{further arguments that will be passed to the function \link{read_PSL2R} and \link{write_RLum2CSV}} } \value{ The function returns either a CSV-file (or many of them) or for the option \code{export = FALSE} a list comprising objects of type \link{data.frame} and \link{matrix} } \description{ This function is a wrapper function around the functions \link{read_PSL2R} and \link{write_RLum2CSV} and it imports an PSL-file (SUERC portable OSL reader file format) and directly exports its content to CSV-files. If nothing is set for the argument \code{path} (\link{write_RLum2CSV}) the input folder will become the output folder. } \section{Function version}{ 0.1.2 } \examples{ ## export into single data.frame file <- system.file("extdata/DorNie_0016.psl", package="Luminescence") convert_PSL2CSV(file, export = FALSE, single_table = TRUE) \dontrun{ ##select your BIN-file file <- file.choose() ##convert convert_PSL2CSV(file) } } \seealso{ \linkS4class{RLum.Analysis}, \linkS4class{RLum.Data}, \linkS4class{RLum.Results}, \link[utils:write.table]{utils::write.table}, \link{write_RLum2CSV}, \link{read_PSL2R} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. convert_PSL2CSV(): Export PSL-file(s) to CSV-files. Function version 0.1.2. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{IO} Luminescence/man/ExampleData.SurfaceExposure.Rd0000644000176200001440000000773414521210044021241 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Luminescence-package.R \name{ExampleData.SurfaceExposure} \alias{ExampleData.SurfaceExposure} \title{Example OSL surface exposure dating data} \format{ A \link{list} with 4 elements: \tabular{ll}{ \strong{Element} \tab \strong{Content} \cr \verb{$sample_1} \tab A \link{data.frame} with 3 columns (depth, intensity, error) \cr \verb{$sample_2} \tab A \link{data.frame} with 3 columns (depth, intensity, error) \cr \verb{$set_1} \tab A \link{list} of 4 \link{data.frame}s, each representing a sample with different ages \cr \verb{$set_2} \tab A \link{list} of 5 \link{data.frame}s, each representing a sample with different ages \cr } } \source{ See examples for the code used to create the data sets. } \description{ A set of synthetic OSL surface exposure dating data to demonstrate the \link{fit_SurfaceExposure} functionality. See examples to reproduce the data interactively. } \details{ \strong{\verb{$sample_1}} \tabular{ccc}{ \strong{mu} \tab \strong{\code{sigmaphi}} \tab \strong{age} \cr 0.9 \tab 5e-10 \tab 10000 \cr } \strong{\verb{$sample_2}} \tabular{ccccc}{ \strong{mu} \tab \strong{\code{sigmaphi}} \tab \strong{age} \tab \strong{Dose rate} \tab \strong{D0} \cr 0.9 \tab 5e-10 \tab 10000 \tab 2.5 \tab 40 \cr } \strong{\verb{$set_1}} \tabular{ccc}{ \strong{mu} \tab \strong{\code{sigmaphi}} \tab \strong{ages} \cr 0.9 \tab 5e-10 \tab 1e3, 1e4, 1e5, 1e6 \cr } \strong{\verb{$set_2}} \tabular{ccccc}{ \strong{mu} \tab \strong{\code{sigmaphi}} \tab \strong{ages} \tab \strong{Dose rate} \tab \strong{D0} \cr 0.9 \tab 5e-10 \tab 1e2, 1e3, 1e4, 1e5, 1e6 \tab 1.0 \tab 40 \cr } } \examples{ ## ExampleData.SurfaceExposure$sample_1 sigmaphi <- 5e-10 age <- 10000 mu <- 0.9 x <- seq(0, 10, 0.1) fun <- exp(-sigmaphi * age * 365.25*24*3600 * exp(-mu * x)) set.seed(666) synth_1 <- data.frame(depth = x, intensity = jitter(fun, 1, 0.1), error = runif(length(x), 0.01, 0.2)) ## VALIDATE sample_1 fit_SurfaceExposure(synth_1, mu = mu, sigmaphi = sigmaphi) ## ExampleData.SurfaceExposure$sample_2 sigmaphi <- 5e-10 age <- 10000 mu <- 0.9 x <- seq(0, 10, 0.1) Ddot <- 2.5 / 1000 / 365.25 / 24 / 60 / 60 # 2.5 Gy/ka in Seconds D0 <- 40 fun <- (sigmaphi * exp(-mu * x) * exp(-(age * 365.25*24*3600) * (sigmaphi * exp(-mu * x) + Ddot/D0)) + Ddot/D0) / (sigmaphi * exp(-mu * x) + Ddot/D0) set.seed(666) synth_2 <- data.frame(depth = x, intensity = jitter(fun, 1, 0.1), error = runif(length(x), 0.01, 0.2)) ## VALIDATE sample_2 fit_SurfaceExposure(synth_2, mu = mu, sigmaphi = sigmaphi, Ddot = 2.5, D0 = D0) ## ExampleData.SurfaceExposure$set_1 sigmaphi <- 5e-10 mu <- 0.9 x <- seq(0, 15, 0.2) age <- c(1e3, 1e4, 1e5, 1e6) set.seed(666) synth_3 <- vector("list", length = length(age)) for (i in 1:length(age)) { fun <- exp(-sigmaphi * age[i] * 365.25*24*3600 * exp(-mu * x)) synth_3[[i]] <- data.frame(depth = x, intensity = jitter(fun, 1, 0.05)) } ## VALIDATE set_1 fit_SurfaceExposure(synth_3, age = age, sigmaphi = sigmaphi) ## ExampleData.SurfaceExposure$set_2 sigmaphi <- 5e-10 mu <- 0.9 x <- seq(0, 15, 0.2) age <- c(1e2, 1e3, 1e4, 1e5, 1e6) Ddot <- 1.0 / 1000 / 365.25 / 24 / 60 / 60 # 2.0 Gy/ka in Seconds D0 <- 40 set.seed(666) synth_4 <- vector("list", length = length(age)) for (i in 1:length(age)) { fun <- (sigmaphi * exp(-mu * x) * exp(-(age[i] * 365.25*24*3600) * (sigmaphi * exp(-mu * x) + Ddot/D0)) + Ddot/D0) / (sigmaphi * exp(-mu * x) + Ddot/D0) synth_4[[i]] <- data.frame(depth = x, intensity = jitter(fun, 1, 0.05)) } ## VALIDATE set_2 fit_SurfaceExposure(synth_4, age = age, sigmaphi = sigmaphi, D0 = D0, Ddot = 1.0) \dontrun{ ExampleData.SurfaceExposure <- list( sample_1 = synth_1, sample_2 = synth_2, set_1 = synth_3, set_2 = synth_4 ) } } \references{ Unpublished synthetic data } \keyword{datasets} Luminescence/man/as.Rd0000644000176200001440000000342114521210044014322 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RLum.Analysis-class.R, % R/RLum.Data.Curve-class.R, R/RLum.Data.Image-class.R, % R/RLum.Data.Spectrum-class.R, R/RLum.Results-class.R \name{as} \alias{as} \title{as() - RLum-object coercion} \arguments{ \item{from}{\linkS4class{RLum}, \link{list}, \link{data.frame}, \link{matrix} (\strong{required}): object to be coerced from} \item{to}{\link{character} (\strong{required}): class name to be coerced to} } \description{ for \verb{[RLum.Analysis-class]} for \verb{[RLum.Data.Curve-class]} for \verb{[RLum.Data.Image-class]} for \verb{[RLum.Data.Spectrum-class]} for \verb{[RLum.Results-class]} } \details{ \strong{\linkS4class{RLum.Analysis}} \tabular{ll}{ \strong{from} \tab \strong{to}\cr \code{list} \tab \code{list}\cr } Given that the \link{list} consists of \linkS4class{RLum.Analysis} objects. \strong{\linkS4class{RLum.Data.Curve}} \tabular{ll}{ \strong{from} \tab \strong{to}\cr \code{list} \tab \code{list} \cr \code{data.frame} \tab \code{data.frame}\cr \code{matrix} \tab \code{matrix} } \strong{\linkS4class{RLum.Data.Image}} \tabular{ll}{ \strong{from} \tab \strong{to}\cr \code{data.frame} \tab \code{data.frame}\cr \code{matrix} \tab \code{matrix} } \strong{\linkS4class{RLum.Data.Spectrum}} \tabular{ll}{ \strong{from} \tab \strong{to}\cr \code{data.frame} \tab \code{data.frame}\cr \code{matrix} \tab \code{matrix} } \strong{\linkS4class{RLum.Results}} \tabular{ll}{ \strong{from} \tab \strong{to}\cr \code{list} \tab \code{list}\cr } Given that the \link{list} consists of \linkS4class{RLum.Results} objects. } \note{ Due to the complex structure of the \code{RLum} objects itself a coercing to standard R data structures will be always loosely! } \seealso{ \link[methods:as]{methods::as} } Luminescence/man/subset_SingleGrainData.Rd0000644000176200001440000000346514521210045020311 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/subset_SingleGrainData.R \name{subset_SingleGrainData} \alias{subset_SingleGrainData} \title{Simple Subsetting of Single Grain Data from Risø BIN/BINX files} \usage{ subset_SingleGrainData(object, selection) } \arguments{ \item{object}{\linkS4class{Risoe.BINfileData} (\strong{required}): input object with the data to subset} \item{selection}{\link{data.frame} (\strong{required}): selection table with two columns for position (1st column) and grain (2nd column) (columns names do not matter)} } \value{ A subset \linkS4class{Risoe.BINfileData} object } \description{ Most measured single grains do not exhibit light and it makes usually sense to subset single grain datasets using a table of position and grain pairs } \section{Function version}{ 0.1.0 } \examples{ ## load example data data(ExampleData.BINfileData, envir = environment()) ## set POSITION/GRAIN pair dataset selection <- data.frame(POSITION = c(1,5,7), GRAIN = c(0,0,0)) ##subset subset_SingleGrainData(object = CWOSL.SAR.Data, selection = selection) } \seealso{ \linkS4class{Risoe.BINfileData}, \link{read_BIN2R}, \link{verify_SingleGrainData} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. subset_SingleGrainData(): Simple Subsetting of Single Grain Data from Risø BIN/BINX files. Function version 0.1.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{datagen} \keyword{manip} Luminescence/man/read_XSYG2R.Rd0000644000176200001440000001553614521210045015723 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/read_XSYG2R.R \name{read_XSYG2R} \alias{read_XSYG2R} \title{Import XSYG files to R} \usage{ read_XSYG2R( file, recalculate.TL.curves = TRUE, fastForward = FALSE, import = TRUE, pattern = ".xsyg", verbose = TRUE, txtProgressBar = TRUE ) } \arguments{ \item{file}{\link{character} or \link{list} (\strong{required}): path and file name of the XSYG file. If input is a \code{list} it should comprise only \code{character}s representing each valid path and XSYG-file names. Alternatively the input character can be just a directory (path), in this case the the function tries to detect and import all XSYG-files found in the directory.} \item{recalculate.TL.curves}{\link{logical} (\emph{with default}): if set to \code{TRUE}, TL curves are returned as temperature against count values (see details for more information) Note: The option overwrites the time vs. count TL curve. Select \code{FALSE} to import the raw data delivered by the lexsyg. Works for TL curves and spectra.} \item{fastForward}{\link{logical} (\emph{with default}): if \code{TRUE} for a more efficient data processing only a list of \code{RLum.Analysis} objects is returned.} \item{import}{\link{logical} (\emph{with default}): if set to \code{FALSE}, only the XSYG file structure is shown.} \item{pattern}{\link{regex} (\emph{with default}): optional regular expression if \code{file} is a link to a folder, to select just specific XSYG-files} \item{verbose}{\link{logical} (\emph{with default}): enable or disable verbose mode. If verbose is \code{FALSE} the \code{txtProgressBar} is also switched off} \item{txtProgressBar}{\link{logical} (\emph{with default}): enables \code{TRUE} or disables \code{FALSE} the progression bar during import} } \value{ \strong{Using the option \code{import = FALSE}} A list consisting of two elements is shown: \itemize{ \item \link{data.frame} with information on file. \item \link{data.frame} with information on the sequences stored in the XSYG file. } \strong{Using the option \code{import = TRUE} (default)} A list is provided, the list elements contain: \item{Sequence.Header}{\link{data.frame} with information on the sequence.} \item{Sequence.Object}{\linkS4class{RLum.Analysis} containing the curves.} } \description{ Imports XSYG-files produced by a Freiberg Instruments lexsyg reader into R. } \details{ \strong{How does the import function work?} The function uses the \link{xml} package to parse the file structure. Each sequence is subsequently translated into an \linkS4class{RLum.Analysis} object. \strong{General structure XSYG format} \if{html}{\out{
}}\preformatted{ x0 , y0 ; x1 , y1 ; x2 , y2 ; x3 , y3 }\if{html}{\out{
}} So far, each XSYG file can only contain one \verb{}, but multiple sequences. Each record may comprise several curves. \strong{TL curve recalculation} On the FI lexsyg device TL curves are recorded as time against count values. Temperature values are monitored on the heating plate and stored in a separate curve (time vs. temperature). If the option \code{recalculate.TL.curves = TRUE} is chosen, the time values for each TL curve are replaced by temperature values. Practically, this means combining two matrices (Time vs. Counts and Time vs. Temperature) with different row numbers by their time values. Three cases are considered: \enumerate{ \item HE: Heating element \item PMT: Photomultiplier tube \item Interpolation is done using the function \link{approx} } CASE (1): \code{nrow(matrix(PMT))} > \code{nrow(matrix(HE))} Missing temperature values from the heating element are calculated using time values from the PMT measurement. CASE (2): \code{nrow(matrix(PMT))} < \code{nrow(matrix(HE))} Missing count values from the PMT are calculated using time values from the heating element measurement. CASE (3): \code{nrow(matrix(PMT))} == \code{nrow(matrix(HE))} A new matrix is produced using temperature values from the heating element and count values from the PMT. \strong{Note:} Please note that due to the recalculation of the temperature values based on values delivered by the heating element, it may happen that multiple count values exists for each temperature value and temperature values may also decrease during heating, not only increase. \strong{Advanced file import} To allow for a more efficient usage of the function, instead of single path to a file just a directory can be passed as input. In this particular case the function tries to extract all XSYG-files found in the directory and import them all. Using this option internally the function constructs as list of the XSYG-files found in the directory. Please note no recursive detection is supported as this may lead to endless loops. } \note{ This function is a beta version as the XSYG file format is not yet fully specified. Thus, further file operations (merge, export, write) should be done using the functions provided with the package \link{xml}. \strong{So far, no image data import is provided!} \cr Corresponding values in the XSXG file are skipped. } \section{Function version}{ 0.6.8 } \examples{ ##(1) import XSYG file to R (uncomment for usage) #FILE <- file.choose() #temp <- read_XSYG2R(FILE) ##(2) additional examples for pure XML import using the package XML ## (uncomment for usage) ##import entire XML file #FILE <- file.choose() #temp <- XML::xmlRoot(XML::xmlTreeParse(FILE)) ##search for specific subnodes with curves containing 'OSL' #getNodeSet(temp, "//Sample/Sequence/Record[@recordType = 'OSL']/Curve") ##(2) How to extract single curves ... after import data(ExampleData.XSYG, envir = environment()) ##grep one OSL curves and plot the first curve OSLcurve <- get_RLum(OSL.SARMeasurement$Sequence.Object, recordType="OSL")[[1]] ##(3) How to see the structure of an object? structure_RLum(OSL.SARMeasurement$Sequence.Object) } \section{How to cite}{ Kreutzer, S., 2023. read_XSYG2R(): Import XSYG files to R. Function version 0.6.8. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Grehl, S., Kreutzer, S., Hoehne, M., 2013. Documentation of the XSYG file format. Unpublished Technical Note. Freiberg, Germany \strong{Further reading} XML: \url{https://en.wikipedia.org/wiki/XML} } \seealso{ \link{xml}, \linkS4class{RLum.Analysis}, \linkS4class{RLum.Data.Curve}, \link{approx} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \keyword{IO} Luminescence/man/calc_OSLLxTxRatio.Rd0000644000176200001440000001671714521210045017172 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calc_OSLLxTxRatio.R \name{calc_OSLLxTxRatio} \alias{calc_OSLLxTxRatio} \title{Calculate \code{Lx/Tx} ratio for CW-OSL curves} \usage{ calc_OSLLxTxRatio( Lx.data, Tx.data = NULL, signal.integral, signal.integral.Tx = NULL, background.integral, background.integral.Tx = NULL, background.count.distribution = "non-poisson", use_previousBG = FALSE, sigmab = NULL, sig0 = 0, digits = NULL ) } \arguments{ \item{Lx.data}{\linkS4class{RLum.Data.Curve} or \link{data.frame} (\strong{required}): requires a CW-OSL shine down curve (x = time, y = counts)} \item{Tx.data}{\linkS4class{RLum.Data.Curve} or \link{data.frame} (\emph{optional}): requires a CW-OSL shine down curve (x = time, y = counts). If no input is given the \code{Tx.data} will be treated as \code{NA} and no \code{Lx/Tx} ratio is calculated.} \item{signal.integral}{\link{numeric} (\strong{required}): vector with the limits for the signal integral. Can be set to \code{NA} than now integrals are considered and all other integrals are set to \code{NA} as well.} \item{signal.integral.Tx}{\link{numeric} (\emph{optional}): vector with the limits for the signal integral for the \code{Tx}-curve. If nothing is provided the value from \code{signal.integral} is used.} \item{background.integral}{\link{numeric} (\strong{required}): vector with the bounds for the background integral. Can be set to \code{NA} than now integrals are considered and all other integrals are set to \code{NA} as well.} \item{background.integral.Tx}{\link{numeric} (\emph{optional}): vector with the limits for the background integral for the \code{Tx} curve. If nothing is provided the value from \code{background.integral} is used.} \item{background.count.distribution}{\link{character} (\emph{with default}): sets the count distribution assumed for the error calculation. Possible arguments \code{poisson} or \code{non-poisson}. See details for further information} \item{use_previousBG}{\link{logical} (\emph{with default}): If set to \code{TRUE} the background of the \code{Lx}-signal is subtracted also from the \code{Tx}-signal. Please note that in this case separate signal integral limits for the \code{Tx}-signal are not allowed and will be reset.} \item{sigmab}{\link{numeric} (\emph{optional}): option to set a manual value for the overdispersion (for \code{LnTx} and \code{TnTx}), used for the \code{Lx/Tx} error calculation. The value should be provided as absolute squared count values, e.g. \code{sigmab = c(300,300)}. \strong{Note:} If only one value is provided this value is taken for both (\code{LnTx} and \code{TnTx}) signals.} \item{sig0}{\link{numeric} (\emph{with default}): allow adding an extra component of error to the final \code{Lx/Tx} error value (e.g., instrumental error, see details).} \item{digits}{\link{integer} (\emph{with default}): round numbers to the specified digits. If digits is set to \code{NULL} nothing is rounded.} } \value{ Returns an S4 object of type \linkS4class{RLum.Results}. Slot \code{data} contains a \link{list} with the following structure: \strong{@data} \if{html}{\out{
}}\preformatted{$LxTx.table (data.frame) .. $ LnLx .. $ LnLx.BG .. $ TnTx .. $ TnTx.BG .. $ Net_LnLx .. $ Net_LnLx.Error .. $ Net_TnTx .. $ Net_TnTx.Error .. $ LxTx .. $ LxTx.Error $ calc.parameters (list) .. $ sigmab.LnTx .. $ sigmab.TnTx .. $ k }\if{html}{\out{
}} \strong{@info} \if{html}{\out{
}}\preformatted{$ call (original function call) }\if{html}{\out{
}} } \description{ Calculate \code{Lx/Tx} ratios from a given set of CW-OSL curves assuming late light background subtraction. } \details{ The integrity of the chosen values for the signal and background integral is checked by the function; the signal integral limits have to be lower than the background integral limits. If a \link{vector} is given as input instead of a \link{data.frame}, an artificial \link{data.frame} is produced. The error calculation is done according to Galbraith (2002). \strong{Please note:} In cases where the calculation results in \code{NaN} values (for example due to zero-signal, and therefore a division of 0 by 0), these \code{NaN} values are replaced by 0. \strong{\code{sigmab}} The default value of \code{sigmab} is calculated assuming the background is constant and \strong{would not} applicable when the background varies as, e.g., as observed for the early light subtraction method. \strong{sig0} This argument allows to add an extra component of error to the final \code{Lx/Tx} error value. The input will be treated as factor that is multiplied with the already calculated \code{LxTx} and the result is add up by: \deqn{se(LxTx) = \sqrt(se(LxTx)^2 + (LxTx * sig0)^2)} \strong{background.count.distribution} This argument allows selecting the distribution assumption that is used for the error calculation. According to Galbraith (2002, 2014) the background counts may be overdispersed (i.e. do not follow a Poisson distribution, which is assumed for the photomultiplier counts). In that case (might be the normal case) it has to be accounted for the overdispersion by estimating \eqn{\sigma^2} (i.e. the overdispersion value). Therefore the relative standard error is calculated as: \itemize{ \item \code{poisson} \deqn{rse(\mu_{S}) \approx \sqrt(Y_{0} + Y_{1}/k^2)/Y_{0} - Y_{1}/k} \item \code{non-poisson} \deqn{rse(\mu_{S}) \approx \sqrt(Y_{0} + Y_{1}/k^2 + \sigma^2(1+1/k))/Y_{0} - Y_{1}/k} } \strong{Please note} that when using the early background subtraction method in combination with the 'non-poisson' distribution argument, the corresponding \code{Lx/Tx} error may considerably increase due to a high \code{sigmab} value. Please check whether this is valid for your data set and if necessary consider to provide an own \code{sigmab} value using the corresponding argument \code{sigmab}. } \note{ The results of this function have been cross-checked with the Analyst (version 3.24b). Access to the results object via \link{get_RLum}. \strong{Caution:} If you are using early light subtraction (EBG), please either provide your own \code{sigmab} value or use \code{background.count.distribution = "poisson"}. } \section{Function version}{ 0.8.0 } \examples{ ##load data data(ExampleData.LxTxOSLData, envir = environment()) ##calculate Lx/Tx ratio results <- calc_OSLLxTxRatio( Lx.data = Lx.data, Tx.data = Tx.data, signal.integral = c(1:2), background.integral = c(85:100)) ##get results object get_RLum(results) } \section{How to cite}{ Kreutzer, S., 2023. calc_OSLLxTxRatio(): Calculate Lx/Tx ratio for CW-OSL curves. Function version 0.8.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Duller, G., 2018. Analyst v4.57 - User Manual. \verb{https://users.aber.ac.uk/ggd}\cr Galbraith, R.F., 2002. A note on the variance of a background-corrected OSL count. Ancient TL, 20 (2), 49-51. Galbraith, R.F., 2014. A further note on the variance of a background-corrected OSL count. Ancient TL, 31 (2), 1-3. } \seealso{ \linkS4class{RLum.Data.Curve}, \link{Analyse_SAR.OSLdata}, \link{plot_GrowthCurve}, \link{analyse_SAR.CWOSL} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \keyword{datagen} Luminescence/man/analyse_Al2O3C_Measurement.Rd0000644000176200001440000001515314521210044020730 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/analyse_Al2O3C_Measurement.R \name{analyse_Al2O3C_Measurement} \alias{analyse_Al2O3C_Measurement} \title{Al2O3:C Passive Dosimeter Measurement Analysis} \usage{ analyse_Al2O3C_Measurement( object, signal_integral = NULL, dose_points = c(0, 4), recordType = c("OSL (UVVIS)", "TL (UVVIS)"), calculate_TL_dose = FALSE, irradiation_time_correction = NULL, cross_talk_correction = NULL, travel_dosimeter = NULL, test_parameters = NULL, verbose = TRUE, plot = TRUE, ... ) } \arguments{ \item{object}{\linkS4class{RLum.Analysis} (\strong{required}): measurement input} \item{signal_integral}{\link{numeric} (\emph{optional}): signal integral, used for the signal and the background. Example: \code{c(1:10)} for the first 10 channels. If nothing is provided the full range is used} \item{dose_points}{\link{numeric} (\emph{with default}): vector with dose points, if dose points are repeated, only the general pattern needs to be provided. Default values follow the suggestions made by Kreutzer et al., 2018} \item{recordType}{\link{character} (\emph{with default}): input curve selection, which is passed to function \link{get_RLum}. To deactivate the automatic selection set the argument to \code{NULL}} \item{calculate_TL_dose}{\link{logical} (\emph{with default}): Enables/disables experimental dose estimation based on the TL curves. Taken is the ratio of the peak sums of each curves +/- 5 channels.} \item{irradiation_time_correction}{\link{numeric} or \linkS4class{RLum.Results} (\emph{optional}): information on the used irradiation time correction obtained by another experiments. I a \code{numeric} is provided it has to be of length two: mean, standard error} \item{cross_talk_correction}{\link{numeric} or \linkS4class{RLum.Results} (\emph{optional}): information on the used irradiation time correction obtained by another experiments. If a \code{numeric} vector is provided it has to be of length three: mean, 2.5 \% quantile, 97.5 \% quantile.} \item{travel_dosimeter}{\link{numeric} (\emph{optional}): specify the position of the travel dosimeter (so far measured a the same time). The dose of travel dosimeter will be subtracted from all other values.} \item{test_parameters}{\link{list} (\emph{with default}): set test parameters. Supported parameters are: \code{TL_peak_shift} All input: \link{numeric} values, \code{NA} and \code{NULL} (s. Details)} \item{verbose}{\link{logical} (\emph{with default}): enable/disable verbose mode} \item{plot}{\link{logical} (\emph{with default}): enable/disable plot output, if \code{object} is of type \link{list}, a \link{numeric} vector can be provided to limit the plot output to certain aliquots} \item{...}{further arguments that can be passed to the plot output, supported are \code{norm}, \code{main}, \code{mtext}, \code{title} (for self-call mode to specify, e.g., sample names)} } \value{ Function returns results numerically and graphically: -----------------------------------\cr \verb{[ NUMERICAL OUTPUT ]}\cr -----------------------------------\cr \strong{\code{RLum.Results}}-object \strong{slot:} \strong{\verb{@data}} \tabular{lll}{ \strong{Element} \tab \strong{Type} \tab \strong{Description}\cr \verb{$data} \tab \code{data.frame} \tab the estimated equivalent dose \cr \verb{$data_table} \tab \code{data.frame} \tab full dose and signal table \cr \code{test_parameters} \tab \code{data.frame} \tab results with test parameters \cr \code{data_TDcorrected} \tab \code{data.frame} \tab travel dosimeter corrected results (only if TD was provided)\cr } \emph{Note: If correction the irradiation time and the cross-talk correction method is used, the De values in the table \code{data} table are already corrected, i.e. if you want to get an uncorrected value, you can use the column \code{CT_CORRECTION} remove the correction} \strong{slot:} \strong{\verb{@info}} The original function call ------------------------\cr \verb{[ PLOT OUTPUT ]}\cr ------------------------\cr \itemize{ \item OSL and TL curves, combined on two plots. } } \description{ The function provides the analysis routines for measurements on a FI lexsyg SMART reader using Al2O3:C chips according to Kreutzer et al., 2018 } \details{ \strong{Working with a travel dosimeter} The function allows to define particular aliquots as travel dosimeters. For example: \code{travel_dosimeter = c(1,3,5)} sets aliquots 1, 3 and 5 as travel dosimeters. These dose values of this dosimeters are combined and automatically subtracted from the obtained dose values of the other dosimeters. \strong{Calculate TL dose} The argument \code{calculate_TL_dose} provides the possibility to experimentally calculate a TL-dose, i.e. an apparent dose value derived from the TL curve ratio. However, it should be noted that this value is only a fall back in case something went wrong during the measurement of the optical stimulation. The TL derived dose value is corrected for cross-talk and for the irradiation time, but not considered if a travel dosimeter is defined. Calculating the palaeodose is possible without \strong{any TL} curve in the sequence! \strong{Test parameters} \code{TL_peak_shift} \link{numeric} (default: \code{15}): Checks whether the TL peak shift is bigger > 15 K, indicating a problem with the thermal contact of the chip. \code{stimulation_power} \link{numeric} (default: \code{0.05}): So far available, information on the delivered optical stimulation are compared. Compared are the information from the first curves with all others. If the ratio differs more from unity than the defined by the threshold, a warning is returned. } \section{Function version}{ 0.2.6 } \examples{ ##load data data(ExampleData.Al2O3C, envir = environment()) ##run analysis analyse_Al2O3C_Measurement(data_CrossTalk) } \section{How to cite}{ Kreutzer, S., 2023. analyse_Al2O3C_Measurement(): Al2O3:C Passive Dosimeter Measurement Analysis. Function version 0.2.6. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Kreutzer, S., Martin, L., Guérin, G., Tribolo, C., Selva, P., Mercier, N., 2018. Environmental Dose Rate Determination Using a Passive Dosimeter: Techniques and Workflow for alpha-Al2O3:C Chips. Geochronometria 45, 56-67. } \seealso{ \link{analyse_Al2O3C_ITC} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \keyword{datagen} Luminescence/man/analyse_SAR.TL.Rd0000644000176200001440000001213414521210044016377 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/analyse_SAR.TL.R \name{analyse_SAR.TL} \alias{analyse_SAR.TL} \title{Analyse SAR TL measurements} \usage{ analyse_SAR.TL( object, object.background, signal.integral.min, signal.integral.max, integral_input = "channel", sequence.structure = c("PREHEAT", "SIGNAL", "BACKGROUND"), rejection.criteria = list(recycling.ratio = 10, recuperation.rate = 10), dose.points, log = "", ... ) } \arguments{ \item{object}{\linkS4class{RLum.Analysis} or a \link{list} of such objects (\strong{required}) : input object containing data for analysis} \item{object.background}{currently not used} \item{signal.integral.min}{\link{integer} (\strong{required}): requires the channel number for the lower signal integral bound (e.g. \code{signal.integral.min = 100})} \item{signal.integral.max}{\link{integer} (\strong{required}): requires the channel number for the upper signal integral bound (e.g. \code{signal.integral.max = 200})} \item{integral_input}{\link{character} (\emph{with default}): defines the input for the the arguments \code{signal.integral.min} and \code{signal.integral.max}. These limits can be either provided \code{'channel'} number (the default) or \code{'temperature'}. If \code{'temperature'} is chosen the best matching channel is selected.} \item{sequence.structure}{\link{vector} \link{character} (\emph{with default}): specifies the general sequence structure. Three steps are allowed (\code{"PREHEAT"}, \code{"SIGNAL"}, \code{"BACKGROUND"}), in addition a parameter \code{"EXCLUDE"}. This allows excluding TL curves which are not relevant for the protocol analysis. (\strong{Note:} None TL are removed by default)} \item{rejection.criteria}{\link{list} (\emph{with default}): list containing rejection criteria in percentage for the calculation.} \item{dose.points}{\link{numeric} (\emph{optional}): option set dose points manually} \item{log}{\link{character} (\emph{with default}): a character string which contains \code{"x"} if the x-axis is to be logarithmic, \code{"y"} if the y axis is to be logarithmic and \code{"xy"} or \code{"yx"} if both axes are to be logarithmic. See \link{plot.default}).} \item{...}{further arguments that will be passed to the function \link{plot_GrowthCurve}} } \value{ A plot (\emph{optional}) and an \linkS4class{RLum.Results} object is returned containing the following elements: \item{De.values}{\link{data.frame} containing De-values and further parameters} \item{LnLxTnTx.values}{\link{data.frame} of all calculated \code{Lx/Tx} values including signal, background counts and the dose points.} \item{rejection.criteria}{\link{data.frame} with values that might by used as rejection criteria. NA is produced if no R0 dose point exists.} \strong{note:} the output should be accessed using the function \link{get_RLum} } \description{ The function performs a SAR TL analysis on a \linkS4class{RLum.Analysis} object including growth curve fitting. } \details{ This function performs a SAR TL analysis on a set of curves. The SAR procedure in general is given by Murray and Wintle (2000). For the calculation of the \code{Lx/Tx} value the function \link{calc_TLLxTxRatio} is used. \strong{Provided rejection criteria} \verb{[recyling.ratio]}: calculated for every repeated regeneration dose point. \verb{[recuperation.rate]}: recuperation rate calculated by comparing the \code{Lx/Tx} values of the zero regeneration point with the \code{Ln/Tn} value (the \code{Lx/Tx} ratio of the natural signal). For methodological background see Aitken and Smith (1988) } \note{ \strong{THIS IS A BETA VERSION} None TL curves will be removed from the input object without further warning. } \section{Function version}{ 0.3.0 } \examples{ ##load data data(ExampleData.BINfileData, envir = environment()) ##transform the values from the first position in a RLum.Analysis object object <- Risoe.BINfileData2RLum.Analysis(TL.SAR.Data, pos=3) ##perform analysis analyse_SAR.TL( object = object, signal.integral.min = 210, signal.integral.max = 220, fit.method = "EXP OR LIN", sequence.structure = c("SIGNAL", "BACKGROUND")) } \section{How to cite}{ Kreutzer, S., 2023. analyse_SAR.TL(): Analyse SAR TL measurements. Function version 0.3.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Aitken, M.J. and Smith, B.W., 1988. Optical dating: recuperation after bleaching. Quaternary Science Reviews 7, 387-393. Murray, A.S. and Wintle, A.G., 2000. Luminescence dating of quartz using an improved single-aliquot regenerative-dose protocol. Radiation Measurements 32, 57-73. } \seealso{ \link{calc_TLLxTxRatio}, \link{plot_GrowthCurve}, \linkS4class{RLum.Analysis}, \linkS4class{RLum.Results}, \link{get_RLum} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \keyword{datagen} \keyword{plot} Luminescence/man/BaseDataSet.GrainSizeAttenuation.Rd0000644000176200001440000000166214521210044022152 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Luminescence-package.R \name{BaseDataSet.GrainSizeAttenuation} \alias{BaseDataSet.GrainSizeAttenuation} \title{Base dataset for grain size attenuation data by Guérin et al. (2012)} \source{ Guérin, G., Mercier, N., Nathan, R., Adamiec, G., Lefrais, Y., 2012. On the use of the infinite matrix assumption and associated concepts: A critical review. Radiation Measurements, 47, 778-785. } \description{ Grain size correction data for beta-dose rates published by Guérin et al. (2012). #' @format A \code{\link{data.frame}} seven columns and sixteen rows. Column headers are \code{GrainSize}, \code{Q_K}, \code{FS_K}, \code{Q_Th}, \code{FS_Th}, \code{Q_U}, \code{FS_U}. Grain sizes are quoted in µm (e.g., 20, 40, 60 etc.) } \section{Version}{ 0.1.0 } \examples{ ## load data data("BaseDataSet.GrainSizeAttenuation", envir = environment()) } \keyword{datasets} Luminescence/man/analyse_Al2O3C_ITC.Rd0000644000176200001440000001156614521210044017066 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/analyse_Al2O3C_ITC.R \name{analyse_Al2O3C_ITC} \alias{analyse_Al2O3C_ITC} \title{Al2O3 Irradiation Time Correction Analysis} \usage{ analyse_Al2O3C_ITC( object, signal_integral = NULL, dose_points = c(2, 4, 8, 12, 16), recordType = c("OSL (UVVIS)"), method_control = NULL, verbose = TRUE, plot = TRUE, ... ) } \arguments{ \item{object}{\linkS4class{RLum.Analysis} or \link{list} \strong{(required)}: results obtained from the measurement. Alternatively a list of \linkS4class{RLum.Analysis} objects can be provided to allow an automatic analysis} \item{signal_integral}{\link{numeric} (\emph{optional}): signal integral, used for the signal and the background. If nothing is provided the full range is used. Argument can be provided as \link{list}.} \item{dose_points}{\link{numeric} (\emph{with default}): vector with dose points, if dose points are repeated, only the general pattern needs to be provided. Default values follow the suggestions made by Kreutzer et al., 2018. Argument can be provided as \link{list}.} \item{recordType}{\link{character} (\emph{with default}): input curve selection, which is passed to function \link{get_RLum}. To deactivate the automatic selection set the argument to \code{NULL}} \item{method_control}{\link{list} (\emph{optional}): optional parameters to control the calculation. See details for further explanations} \item{verbose}{\link{logical} (\emph{with default}): enable/disable verbose mode} \item{plot}{\link{logical} (\emph{with default}): enable/disable plot output} \item{...}{further arguments that can be passed to the plot output} } \value{ Function returns results numerically and graphically: -----------------------------------\cr \verb{[ NUMERICAL OUTPUT ]}\cr -----------------------------------\cr \strong{\code{RLum.Results}}-object \strong{slot:} \strong{\verb{@data}} \tabular{lll}{ \strong{Element} \tab \strong{Type} \tab \strong{Description}\cr \verb{$data} \tab \code{data.frame} \tab correction value and error \cr \verb{$table} \tab \code{data.frame} \tab table used for plotting \cr \verb{$table_mean} \tab \code{data.frame} \tab table used for fitting \cr \verb{$fit} \tab \code{lm} or \code{nls} \tab the fitting as returned by the function \link{plot_GrowthCurve} } \strong{slot:} \strong{\verb{@info}} The original function call ------------------------\cr \verb{[ PLOT OUTPUT ]}\cr ------------------------\cr \itemize{ \item A dose response curve with the marked correction values } } \description{ The function provides a very particular analysis to correct the irradiation time while irradiating Al2O3:C chips in a luminescence reader. } \details{ Background: Due to their high dose sensitivity Al2O3:C chips are usually irradiated for only a very short duration or under the closed beta-source within a luminescence reader. However, due to its high dose sensitivity, during the movement towards the beta-source, the pellet already receives and non-negligible dose. Based on measurements following a protocol suggested by Kreutzer et al., 2018, a dose response curve is constructed and the intersection (absolute value) with the time axis is taken as real irradiation time. \strong{\code{method_control}} To keep the generic argument list as clear as possible, arguments to allow a deeper control of the method are all preset with meaningful default parameters and can be handled using the argument \code{method_control} only, e.g., \code{method_control = list(fit.method = "LIN")}. Supported arguments are: \tabular{lll}{ \strong{ARGUMENT} \tab \strong{FUNCTION} \tab \strong{DESCRIPTION}\cr \code{mode} \tab \code{plot_GrowthCurve} \tab as in \link{plot_GrowthCurve}; sets the mode used for fitting\cr \code{fit.method} \tab \code{plot_GrowthCurve} \tab as in \link{plot_GrowthCurve}; sets the function applied for fitting\cr } } \section{Function version}{ 0.1.1 } \examples{ ##load data data(ExampleData.Al2O3C, envir = environment()) ##run analysis analyse_Al2O3C_ITC(data_ITC) } \section{How to cite}{ Kreutzer, S., 2023. analyse_Al2O3C_ITC(): Al2O3 Irradiation Time Correction Analysis. Function version 0.1.1. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Kreutzer, S., Martin, L., Guérin, G., Tribolo, C., Selva, P., Mercier, N., 2018. Environmental Dose Rate Determination Using a Passive Dosimeter: Techniques and Workflow for alpha-Al2O3:C Chips. Geochronometria 45, 56-67. doi: 10.1515/geochr-2015-0086 } \seealso{ \link{plot_GrowthCurve} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \keyword{datagen} Luminescence/man/convert_SG2MG.Rd0000644000176200001440000000400014521210045016271 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/convert_SG2MG.R \name{convert_SG2MG} \alias{convert_SG2MG} \title{Converts Single-Grain Data to Multiple-Grain Data} \usage{ convert_SG2MG(object, write_file = FALSE, ...) } \arguments{ \item{object}{\linkS4class{Risoe.BINfileData} \link{character} (\strong{required}): \linkS4class{Risoe.BINfileData} object or BIN/BINX-file name} \item{write_file}{\link{logical} (\emph{with default}): if the input was a path to a file, the output can be written to a file if \code{TRUE}. The multiple grain file will be written into the same folder and with extension \code{-SG} to the file name.} \item{...}{further arguments passed down to \link{read_BIN2R} if input is file path} } \value{ \linkS4class{Risoe.BINfileData} object and if \code{write_file = TRUE} and the input was a file path, a file is written to origin folder. } \description{ Conversion of single-grain data to multiple-grain data by adding signals from grains belonging to one disc (unique pairs of position, set and run). } \section{Function version}{ 0.1.0 } \examples{ ## simple run ## (please not that the example is not using SG data) data(ExampleData.BINfileData, envir = environment()) convert_SG2MG(CWOSL.SAR.Data) } \seealso{ \linkS4class{Risoe.BINfileData}, \link{read_BIN2R}, \link{write_R2BIN} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany), Norbert Mercier, IRAMAT-CRP2A, UMR 5060, CNRS-Université Bordeaux Montaigne (France); , RLum Developer Team} \section{How to cite}{ Kreutzer, S., Mercier, N., 2023. convert_SG2MG(): Converts Single-Grain Data to Multiple-Grain Data. Function version 0.1.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{IO} Luminescence/man/CW2pLMi.Rd0000644000176200001440000001241614521210045015101 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/CW2pLMi.R \name{CW2pLMi} \alias{CW2pLMi} \title{Transform a CW-OSL curve into a pLM-OSL curve via interpolation under linear modulation conditions} \usage{ CW2pLMi(values, P) } \arguments{ \item{values}{\linkS4class{RLum.Data.Curve} or \link{data.frame} (\strong{required}): \linkS4class{RLum.Data.Curve} or \code{data.frame} with measured curve data of type stimulation time (t) (\code{values[,1]}) and measured counts (cts) (\code{values[,2]})} \item{P}{\link{vector} (\emph{optional}): stimulation time in seconds. If no value is given the optimal value is estimated automatically (see details). Greater values of P produce more points in the rising tail of the curve.} } \value{ The function returns the same data type as the input data type with the transformed curve values. \strong{\code{RLum.Data.Curve}} \tabular{rl}{ \verb{$CW2pLMi.x.t} \tab: transformed time values \cr \verb{$CW2pLMi.method} \tab: used method for the production of the new data points } } \description{ Transforms a conventionally measured continuous-wave (CW) OSL-curve into a pseudo linearly modulated (pLM) curve under linear modulation conditions using the interpolation procedure described by Bos & Wallinga (2012). } \details{ The complete procedure of the transformation is given in Bos & Wallinga (2012). The input \code{data.frame} consists of two columns: time (t) and count values (CW(t)) \strong{Nomenclature} \itemize{ \item P = stimulation time (s) \item 1/P = stimulation rate (1/s) } \strong{Internal transformation steps} (1) log(CW-OSL) values (2) Calculate t' which is the transformed time: \deqn{t' = 1/2*1/P*t^2} (3) Interpolate CW(t'), i.e. use the log(CW(t)) to obtain the count values for the transformed time (t'). Values beyond \code{min(t)} and \code{max(t)} produce \code{NA} values. (4) Select all values for t' < \code{min(t)}, i.e. values beyond the time resolution of t. Select the first two values of the transformed data set which contain no \code{NA} values and use these values for a linear fit using \link{lm}. (5) Extrapolate values for t' < \code{min(t)} based on the previously obtained fit parameters. (6) Transform values using \deqn{pLM(t) = t/P*CW(t')} (7) Combine values and truncate all values for t' > \code{max(t)} \strong{NOTE:} The number of values for t' < \code{min(t)} depends on the stimulation period (P) and therefore on the stimulation rate 1/P. To avoid the production of too many artificial data at the raising tail of the determined pLM curves it is recommended to use the automatic estimation routine for \code{P}, i.e. provide no own value for \code{P}. } \note{ According to Bos & Wallinga (2012) the number of extrapolated points should be limited to avoid artificial intensity data. If \code{P} is provided manually and more than two points are extrapolated, a warning message is returned. } \section{Function version}{ 0.3.1 } \examples{ ##(1) ##load CW-OSL curve data data(ExampleData.CW_OSL_Curve, envir = environment()) ##transform values values.transformed <- CW2pLMi(ExampleData.CW_OSL_Curve) ##plot plot(values.transformed$x, values.transformed$y.t, log = "x") ##(2) - produce Fig. 4 from Bos & Wallinga (2012) ##load data data(ExampleData.CW_OSL_Curve, envir = environment()) values <- CW_Curve.BosWallinga2012 ##open plot area plot(NA, NA, xlim = c(0.001,10), ylim = c(0,8000), ylab = "pseudo OSL (cts/0.01 s)", xlab = "t [s]", log = "x", main = "Fig. 4 - Bos & Wallinga (2012)") values.t <- CW2pLMi(values, P = 1/20) lines(values[1:length(values.t[,1]),1],CW2pLMi(values, P = 1/20)[,2], col = "red", lwd = 1.3) text(0.03,4500,"LM", col = "red", cex = .8) values.t <- CW2pHMi(values, delta = 40) lines(values[1:length(values.t[,1]),1],CW2pHMi(values, delta = 40)[,2], col = "black", lwd = 1.3) text(0.005,3000,"HM", cex =.8) values.t <- CW2pPMi(values, P = 1/10) lines(values[1:length(values.t[,1]),1], CW2pPMi(values, P = 1/10)[,2], col = "blue", lwd = 1.3) text(0.5,6500,"PM", col = "blue", cex = .8) } \section{How to cite}{ Kreutzer, S., 2023. CW2pLMi(): Transform a CW-OSL curve into a pLM-OSL curve via interpolation under linear modulation conditions. Function version 0.3.1. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Bos, A.J.J. & Wallinga, J., 2012. How to visualize quartz OSL signal components. Radiation Measurements, 47, 752-758. \strong{Further Reading} Bulur, E., 1996. An Alternative Technique For Optically Stimulated Luminescence (OSL) Experiment. Radiation Measurements, 26, 701-709. Bulur, E., 2000. A simple transformation for converting CW-OSL curves to LM-OSL curves. Radiation Measurements, 32, 141-145. } \seealso{ \link{CW2pLM}, \link{CW2pHMi}, \link{CW2pPMi}, \link{fit_LMCurve}, \linkS4class{RLum.Data.Curve} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) Based on comments and suggestions from:\cr Adrie J.J. Bos, Delft University of Technology, The Netherlands , RLum Developer Team} \keyword{manip} Luminescence/man/ExampleData.CW_OSL_Curve.Rd0000644000176200001440000000305414521210044020277 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Luminescence-package.R \docType{data} \name{ExampleData.CW_OSL_Curve} \alias{ExampleData.CW_OSL_Curve} \alias{CW_Curve.BosWallinga2012} \title{Example CW-OSL curve data for the package Luminescence} \format{ Data frame with 1000 observations on the following 2 variables: \describe{ \item{list("x")}{a numeric vector, time} \item{list("y")}{a numeric vector, counts} } } \source{ \strong{ExampleData.CW_OSL_Curve} \tabular{ll}{ Lab: \tab Luminescence Laboratory Bayreuth\cr Lab-Code: \tab BT607\cr Location: \tab Saxony/Germany\cr Material: \tab Middle grain quartz measured on aluminium cups on a Risø TL/OSL DA-15 reader.\cr Reference: \tab unpublished data } \strong{CW_Curve.BosWallinga2012} \tabular{ll}{ Lab: \tab Netherlands Centre for Luminescence Dating (NCL)\cr Lab-Code: \tab NCL-2108077\cr Location: \tab Guadalentin Basin, Spain\cr Material: \tab Coarse grain quartz\cr Reference: \tab Bos & Wallinga (2012) and Baartman et al. (2011) } } \description{ \code{data.frame} containing CW-OSL curve data (time, counts) } \examples{ data(ExampleData.CW_OSL_Curve, envir = environment()) plot(ExampleData.CW_OSL_Curve) } \references{ Baartman, J.E.M., Veldkamp, A., Schoorl, J.M., Wallinga, J., Cammeraat, L.H., 2011. Unravelling Late Pleistocene and Holocene landscape dynamics: The Upper Guadalentin Basin, SE Spain. Geomorphology, 125, 172-185. Bos, A.J.J. & Wallinga, J., 2012. How to visualize quartz OSL signal components. Radiation Measurements, 47, 752-758. } \keyword{datasets} Luminescence/man/calc_FastRatio.Rd0000644000176200001440000001202414521210045016575 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calc_FastRatio.R \name{calc_FastRatio} \alias{calc_FastRatio} \title{Calculate the Fast Ratio for CW-OSL curves} \usage{ calc_FastRatio( object, stimulation.power = 30.6, wavelength = 470, sigmaF = 2.6e-17, sigmaM = 4.28e-18, Ch_L1 = 1, Ch_L2 = NULL, Ch_L3 = NULL, x = 1, x2 = 0.1, dead.channels = c(0, 0), fitCW.sigma = FALSE, fitCW.curve = FALSE, plot = TRUE, ... ) } \arguments{ \item{object}{\linkS4class{RLum.Analysis}, \linkS4class{RLum.Data.Curve} or \link{data.frame} (\strong{required}): x, y data of measured values (time and counts).} \item{stimulation.power}{\link{numeric} (\emph{with default}): Stimulation power in mW/cm^2} \item{wavelength}{\link{numeric} (\emph{with default}): Stimulation wavelength in nm} \item{sigmaF}{\link{numeric} (\emph{with default}): Photoionisation cross-section (cm^2) of the fast component. Default value after Durcan & Duller (2011).} \item{sigmaM}{\link{numeric} (\emph{with default}): Photoionisation cross-section (cm^2) of the medium component. Default value after Durcan & Duller (2011).} \item{Ch_L1}{\link{numeric} (\emph{with default}): An integer specifying the channel for L1.} \item{Ch_L2}{\link{numeric} (\emph{optional}): An integer specifying the channel for L2.} \item{Ch_L3}{\link{numeric} (\emph{optional}): A vector of length 2 with integer values specifying the start and end channels for L3 (e.g., \code{c(40, 50)}).} \item{x}{\link{numeric} (\emph{with default}): \\% of signal remaining from the fast component. Used to define the location of L2 and L3 (start).} \item{x2}{\link{numeric} (\emph{with default}): \\% of signal remaining from the medium component. Used to define the location of L3 (end).} \item{dead.channels}{\link{numeric} (\emph{with default}): Vector of length 2 in the form of \code{c(x, y)}. Channels that do not contain OSL data, i.e. at the start or end of measurement.} \item{fitCW.sigma}{\link{logical} (\emph{optional}): fit CW-OSL curve using \link{fit_CWCurve} to calculate \code{sigmaF} and \code{sigmaM} (\strong{experimental}).} \item{fitCW.curve}{\link{logical} (\emph{optional}): fit CW-OSL curve using \link{fit_CWCurve} and derive the counts of L2 and L3 from the fitted OSL curve (\strong{experimental}).} \item{plot}{\link{logical} (\emph{with default}): plot output (\code{TRUE}/\code{FALSE})} \item{...}{available options: \code{verbose} (\link{logical}). Further arguments passed to \link{fit_CWCurve}.} } \value{ Returns a plot (\emph{optional}) and an S4 object of type \linkS4class{RLum.Results}. The slot \code{data} contains a \link{list} with the following elements: \item{summary}{\link{data.frame} summary of all relevant results} \item{data}{the original input data} \item{fit}{\linkS4class{RLum.Results} object if either \code{fitCW.sigma} or \code{fitCW.curve} is \code{TRUE}} \item{args}{\link{list} of used arguments} \item{call}{\verb{[call]} the function call} } \description{ Function to calculate the fast ratio of quartz CW-OSL single grain or single aliquot curves after Durcan & Duller (2011). } \details{ This function follows the equations of Durcan & Duller (2011). The energy required to reduce the fast and medium quartz OSL components to \code{x} and \code{x2} \\% respectively using eq. 3 to determine channels L2 and L3 (start and end). The fast ratio is then calculated from: \eqn{(L1-L3)/(L2-L3)}. } \section{Function version}{ 0.1.1 } \examples{ # load example CW-OSL curve data("ExampleData.CW_OSL_Curve") # calculate the fast ratio w/o further adjustments res <- calc_FastRatio(ExampleData.CW_OSL_Curve) # show the summary table get_RLum(res) } \section{How to cite}{ King, G.E., Durcan, J., Burow, C., 2023. calc_FastRatio(): Calculate the Fast Ratio for CW-OSL curves. Function version 0.1.1. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Durcan, J.A. & Duller, G.A.T., 2011. The fast ratio: A rapid measure for testing the dominance of the fast component in the initial OSL signal from quartz. Radiation Measurements 46, 1065-1072. Madsen, A.T., Duller, G.A.T., Donnelly, J.P., Roberts, H.M. & Wintle, A.G., 2009. A chronology of hurricane landfalls at Little Sippewissett Marsh, Massachusetts, USA, using optical dating. Geomorphology 109, 36-45. \strong{Further reading} Steffen, D., Preusser, F. & Schlunegger, 2009. OSL quartz age underestimation due to unstable signal components. Quaternary Geochronology 4, 353-362. } \seealso{ \link{fit_CWCurve}, \link{get_RLum}, \linkS4class{RLum.Analysis}, \linkS4class{RLum.Results}, \linkS4class{RLum.Data.Curve} } \author{ Georgina E. King, University of Bern (Switzerland) \cr Julie A. Durcan, University of Oxford (United Kingdom) \cr Christoph Burow, University of Cologne (Germany) , RLum Developer Team} Luminescence/man/fit_CWCurve.Rd0000644000176200001440000002040414521210045016100 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fit_CWCurve.R \name{fit_CWCurve} \alias{fit_CWCurve} \title{Nonlinear Least Squares Fit for CW-OSL curves -beta version-} \usage{ fit_CWCurve( values, n.components.max, fit.failure_threshold = 5, fit.method = "port", fit.trace = FALSE, fit.calcError = FALSE, LED.power = 36, LED.wavelength = 470, cex.global = 0.6, sample_code = "Default", output.path, output.terminal = TRUE, output.terminalAdvanced = TRUE, plot = TRUE, ... ) } \arguments{ \item{values}{\linkS4class{RLum.Data.Curve} or \link{data.frame} (\strong{required}): x, y data of measured values (time and counts). See examples.} \item{n.components.max}{\link{vector} (\emph{optional}): maximum number of components that are to be used for fitting. The upper limit is 7.} \item{fit.failure_threshold}{\link{vector} (\emph{with default}): limits the failed fitting attempts.} \item{fit.method}{\link{character} (\emph{with default}): select fit method, allowed values: \code{'port'} and \code{'LM'}. \code{'port'} uses the 'port' routine from the function \link{nls} \code{'LM'} utilises the function \code{nlsLM} from the package \code{minpack.lm} and with that the Levenberg-Marquardt algorithm.} \item{fit.trace}{\link{logical} (\emph{with default}): traces the fitting process on the terminal.} \item{fit.calcError}{\link{logical} (\emph{with default}): calculate 1-sigma error range of components using \link[stats:confint]{stats::confint}} \item{LED.power}{\link{numeric} (\emph{with default}): LED power (max.) used for intensity ramping in mW/cm^2. \strong{Note:} The value is used for the calculation of the absolute photoionisation cross section.} \item{LED.wavelength}{\link{numeric} (\emph{with default}): LED wavelength used for stimulation in nm. \strong{Note:} The value is used for the calculation of the absolute photoionisation cross section.} \item{cex.global}{\link{numeric} (\emph{with default}): global scaling factor.} \item{sample_code}{\link{character} (\emph{optional}): sample code used for the plot and the optional output table (\code{mtext}).} \item{output.path}{\link{character} (\emph{optional}): output path for table output containing the results of the fit. The file name is set automatically. If the file already exists in the directory, the values are appended.} \item{output.terminal}{\link{logical} (\emph{with default}): terminal output with fitting results.} \item{output.terminalAdvanced}{\link{logical} (\emph{with default}): enhanced terminal output. Requires \code{output.terminal = TRUE}. If \code{output.terminal = FALSE} no advanced output is possible.} \item{plot}{\link{logical} (\emph{with default}): returns a plot of the fitted curves.} \item{...}{further arguments and graphical parameters passed to \link{plot}.} } \value{ \strong{plot (\emph{optional})} the fitted CW-OSL curves are returned as plot. \strong{table (\emph{optional})} an output table (\verb{*.csv}) with parameters of the fitted components is provided if the \code{output.path} is set. \strong{RLum.Results} Beside the plot and table output options, an \linkS4class{RLum.Results} object is returned. \code{fit}: an \code{nls} object (\verb{$fit}) for which generic R functions are provided, e.g. \link{summary}, \link[stats:confint]{stats::confint}, \link{profile}. For more details, see \link{nls}. \code{output.table}: a \link{data.frame} containing the summarised parameters including the error \code{component.contribution.matrix}: \link{matrix} containing the values for the component to sum contribution plot (\verb{$component.contribution.matrix}). Matrix structure:\cr Column 1 and 2: time and \code{rev(time)} values \cr Additional columns are used for the components, two for each component, containing I0 and n0. The last columns \code{cont.} provide information on the relative component contribution for each time interval including the row sum for this values. \strong{object} beside the plot and table output options, an \linkS4class{RLum.Results} object is returned. \code{fit}: an \code{nls} object (\verb{$fit}) for which generic R functions are provided, e.g. \link{summary}, \link{confint}, \link{profile}. For more details, see \link{nls}. \code{output.table}: a \link{data.frame} containing the summarised parameters including the error\cr \code{component.contribution.matrix}: \link{matrix} containing the values for the component to sum contribution plot (\verb{$component.contribution.matrix}).\cr Matrix structure:\cr Column 1 and 2: time and \code{rev(time)} values\cr Additional columns are used for the components, two for each component, containing I0 and n0. The last columns \code{cont.} provide information on the relative component contribution for each time interval including the row sum for this values. } \description{ The function determines the weighted least-squares estimates of the component parameters of a CW-OSL signal for a given maximum number of components and returns various component parameters. The fitting procedure uses the \link{nls} function with the \code{port} algorithm. } \details{ \strong{Fitting function} The function for the CW-OSL fitting has the general form: \deqn{y = I0_{1}*\lambda_{1}*exp(-\lambda_1*x) + ,\ldots, + I0_{i}*\lambda_{i}*exp(-\lambda_i*x) } where \eqn{0 < i < 8} and \eqn{\lambda} is the decay constant \cr and \eqn{I0} the initial number of trapped electrons. \emph{(for the used equation cf. Boetter-Jensen et al., 2003, Eq. 2.31)} \strong{Start values} Start values are estimated automatically by fitting a linear function to the logarithmized input data set. Currently, there is no option to manually provide start parameters. \strong{Goodness of fit} The goodness of the fit is given as pseudoR^2 value (pseudo coefficient of determination). According to Lave (1970), the value is calculated as: \deqn{pseudoR^2 = 1 - RSS/TSS} where \eqn{RSS = Residual~Sum~of~Squares} \cr and \eqn{TSS = Total~Sum~of~Squares} \strong{Error of fitted component parameters} The 1-sigma error for the components is calculated using the function \link[stats:confint]{stats::confint}. Due to considerable calculation time, this option is deactivated by default. In addition, the error for the components can be estimated by using internal R functions like \link{summary}. See the \link{nls} help page for more information. \emph{For details on the nonlinear regression in R, see Ritz & Streibig (2008).} } \note{ \strong{Beta version - This function has not been properly tested yet and} \strong{should therefore not be used for publication purposes!} The pseudo-R^2 may not be the best parameter to describe the goodness of the fit. The trade off between the \code{n.components} and the pseudo-R^2 value is currently not considered. The function \strong{does not} ensure that the fitting procedure has reached a global minimum rather than a local minimum! } \section{Function version}{ 0.5.2 } \examples{ ##load data data(ExampleData.CW_OSL_Curve, envir = environment()) ##fit data fit <- fit_CWCurve(values = ExampleData.CW_OSL_Curve, main = "CW Curve Fit", n.components.max = 4, log = "x") } \section{How to cite}{ Kreutzer, S., 2023. fit_CWCurve(): Nonlinear Least Squares Fit for CW-OSL curves -beta version-. Function version 0.5.2. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Boetter-Jensen, L., McKeever, S.W.S., Wintle, A.G., 2003. Optically Stimulated Luminescence Dosimetry. Elsevier Science B.V. Lave, C.A.T., 1970. The Demand for Urban Mass Transportation. The Review of Economics and Statistics, 52 (3), 320-323. Ritz, C. & Streibig, J.C., 2008. Nonlinear Regression with R. In: R. Gentleman, K. Hornik, G. Parmigiani, eds., Springer, p. 150. } \seealso{ \link{fit_LMCurve}, \link{plot},\link{nls}, \linkS4class{RLum.Data.Curve}, \linkS4class{RLum.Results}, \link{get_RLum}, \link[minpack.lm:nlsLM]{minpack.lm::nlsLM} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \keyword{dplot} \keyword{models} Luminescence/man/calc_CosmicDoseRate.Rd0000644000176200001440000002377114521210045017560 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calc_CosmicDoseRate.R \name{calc_CosmicDoseRate} \alias{calc_CosmicDoseRate} \title{Calculate the cosmic dose rate} \usage{ calc_CosmicDoseRate( depth, density, latitude, longitude, altitude, corr.fieldChanges = FALSE, est.age = NA, half.depth = FALSE, error = 10, ... ) } \arguments{ \item{depth}{\link{numeric} (\strong{required}): depth of overburden (m). For more than one absorber use \cr \code{c(depth_1, depth_2, ..., depth_n)}} \item{density}{\link{numeric} (\strong{required}): average overburden density (g/cm^3). For more than one absorber use \cr \code{c(density_1, density_2, ..., density_n)}} \item{latitude}{\link{numeric} (\strong{required}): latitude (decimal degree), N positive} \item{longitude}{\link{numeric} (\strong{required}): longitude (decimal degree), E positive} \item{altitude}{\link{numeric} (\strong{required}): altitude (m above sea-level)} \item{corr.fieldChanges}{\link{logical} (\emph{with default}): correct for geomagnetic field changes after Prescott & Hutton (1994). Apply only when justified by the data.} \item{est.age}{\link{numeric} (\emph{with default}): estimated age range (ka) for geomagnetic field change correction (0-80 ka allowed)} \item{half.depth}{\link{logical} (\emph{with default}): How to overcome with varying overburden thickness. If \code{TRUE} only half the depth is used for calculation. Apply only when justified, i.e. when a constant sedimentation rate can safely be assumed.} \item{error}{\link{numeric} (\emph{with default}): general error (percentage) to be implemented on corrected cosmic dose rate estimate} \item{...}{further arguments (\code{verbose} to disable/enable console output).} } \value{ Returns a terminal output. In addition an \linkS4class{RLum.Results}-object is returned containing the following element: \item{summary}{\link{data.frame} summary of all relevant calculation results.} \item{args}{\link{list} used arguments} \item{call}{\link{call} the function call} The output should be accessed using the function \link{get_RLum} } \description{ This function calculates the cosmic dose rate taking into account the soft- and hard-component of the cosmic ray flux and allows corrections for geomagnetic latitude, altitude above sea-level and geomagnetic field changes. } \details{ This function calculates the total cosmic dose rate considering both the soft- and hard-component of the cosmic ray flux. \strong{Internal calculation steps} (1) Calculate total depth of all absorber in hg/cm^2 (1 hg/cm^2 = 100 g/cm^2) \deqn{absorber = depth_1*density_1 + depth_2*density_2 + ... + depth_n*density_n} (2) If \code{half.depth = TRUE} \deqn{absorber = absorber/2} (3) Calculate cosmic dose rate at sea-level and 55 deg. latitude a) If absorber is > 167 g/cm^2 (only hard-component; Allkofer et al. 1975): apply equation given by Prescott & Hutton (1994) (c.f. Barbouti & Rastin 1983) \deqn{D0 = C/(((absorber+d)^\alpha+a)*(absober+H))*exp(-B*absorber)} b) If absorber is < 167 g/cm^2 (soft- and hard-component): derive D0 from Fig. 1 in Prescott & Hutton (1988). (4) Calculate geomagnetic latitude (Prescott & Stephan 1982, Prescott & Hutton 1994) \deqn{\lambda = arcsin(0.203*cos(latitude)*cos(longitude-291)+0.979* sin(latitude))} (5) Apply correction for geomagnetic latitude and altitude above sea-level. Values for F, J and H were read from Fig. 3 shown in Prescott & Stephan (1982) and fitted with 3-degree polynomials for lambda < 35 degree and a linear fit for lambda > 35 degree. \deqn{Dc = D0*(F+J*exp((altitude/1000)/H))} (6) Optional: Apply correction for geomagnetic field changes in the last 0-80 ka (Prescott & Hutton 1994). Correction and altitude factors are given in Table 1 and Fig. 1 in Prescott & Hutton (1994). Values for altitude factor were fitted with a 2-degree polynomial. The altitude factor is operated on the decimal part of the correction factor. \deqn{Dc' = Dc*correctionFactor} \strong{Usage of \code{depth} and \code{density}} (1) If only one value for depth and density is provided, the cosmic dose rate is calculated for exactly one sample and one absorber as overburden (i.e. \code{depth*density}). (2) In some cases it might be useful to calculate the cosmic dose rate for a sample that is overlain by more than one absorber, e.g. in a profile with soil layers of different thickness and a distinct difference in density. This can be calculated by providing a matching number of values for \code{depth} and \code{density} (e.g. \verb{depth = c(1, 2), density = c(1.7, 2.4)}) (3) Another possibility is to calculate the cosmic dose rate for more than one sample of the same profile. This is done by providing more than one values for \code{depth} and only one for \code{density}. For example, \code{depth = c(1, 2, 3)} and \code{density = 1.7} will calculate the cosmic dose rate for three samples in 1, 2 and 3 m depth in a sediment of density 1.7 g/cm^3. } \note{ Despite its universal use the equation to calculate the cosmic dose rate provided by Prescott & Hutton (1994) is falsely stated to be valid from the surface to 10^4 hg/cm^2 of standard rock. The original expression by Barbouti & Rastin (1983) only considers the muon flux (i.e. hard-component) and is by their own definition only valid for depths between 10-10^4 hg/cm^2. Thus, for near-surface samples (i.e. for depths < 167 g/cm^2) the equation of Prescott & Hutton (1994) underestimates the total cosmic dose rate, as it neglects the influence of the soft-component of the cosmic ray flux. For samples at zero depth and at sea-level the underestimation can be as large as ~0.1 Gy/ka. In a previous article, Prescott & Hutton (1988) give another approximation of Barbouti & Rastin's equation in the form of \deqn{D = 0.21*exp(-0.070*absorber+0.0005*absorber^2)} which is valid for depths between 150-5000 g/cm^2. For shallower depths (< 150 g/cm^2) they provided a graph (Fig. 1) from which the dose rate can be read. As a result, this function employs the equation of Prescott & Hutton (1994) only for depths > 167 g/cm^2, i.e. only for the hard-component of the cosmic ray flux. Cosmic dose rate values for depths < 167 g/cm^2 were obtained from the "AGE" program (Gruen 2009) and fitted with a 6-degree polynomial curve (and hence reproduces the graph shown in Prescott & Hutton 1988). However, these values assume an average overburden density of 2 g/cm^3. It is currently not possible to obtain more precise cosmic dose rate values for near-surface samples as there is no equation known to the author of this function at the time of writing. } \section{Function version}{ 0.5.2 } \examples{ ##(1) calculate cosmic dose rate (one absorber) calc_CosmicDoseRate(depth = 2.78, density = 1.7, latitude = 38.06451, longitude = 1.49646, altitude = 364, error = 10) ##(2a) calculate cosmic dose rate (two absorber) calc_CosmicDoseRate(depth = c(5.0, 2.78), density = c(2.65, 1.7), latitude = 38.06451, longitude = 1.49646, altitude = 364, error = 10) ##(2b) calculate cosmic dose rate (two absorber) and ##correct for geomagnetic field changes calc_CosmicDoseRate(depth = c(5.0, 2.78), density = c(2.65, 1.7), latitude = 12.04332, longitude = 4.43243, altitude = 364, corr.fieldChanges = TRUE, est.age = 67, error = 15) ##(3) calculate cosmic dose rate and export results to .csv file #calculate cosmic dose rate and save to variable results<- calc_CosmicDoseRate(depth = 2.78, density = 1.7, latitude = 38.06451, longitude = 1.49646, altitude = 364, error = 10) # the results can be accessed by get_RLum(results, "summary") #export results to .csv file - uncomment for usage #write.csv(results, file = "c:/users/public/results.csv") ##(4) calculate cosmic dose rate for 6 samples from the same profile ## and save to .csv file #calculate cosmic dose rate and save to variable results<- calc_CosmicDoseRate(depth = c(0.1, 0.5 , 2.1, 2.7, 4.2, 6.3), density = 1.7, latitude = 38.06451, longitude = 1.49646, altitude = 364, error = 10) #export results to .csv file - uncomment for usage #write.csv(results, file = "c:/users/public/results_profile.csv") } \section{How to cite}{ Burow, C., 2023. calc_CosmicDoseRate(): Calculate the cosmic dose rate. Function version 0.5.2. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Allkofer, O.C., Carstensen, K., Dau, W.D., Jokisch, H., 1975. Letter to the editor. The absolute cosmic ray flux at sea level. Journal of Physics G: Nuclear and Particle Physics 1, L51-L52. Barbouti, A.I., Rastin, B.C., 1983. A study of the absolute intensity of muons at sea level and under various thicknesses of absorber. Journal of Physics G: Nuclear and Particle Physics 9, 1577-1595. Crookes, J.N., Rastin, B.C., 1972. An investigation of the absolute intensity of muons at sea-level. Nuclear Physics B 39, 493-508. Gruen, R., 2009. The "AGE" program for the calculation of luminescence age estimates. Ancient TL 27, 45-46. Prescott, J.R., Hutton, J.T., 1988. Cosmic ray and gamma ray dosimetry for TL and ESR. Nuclear Tracks and Radiation Measurements 14, 223-227. Prescott, J.R., Hutton, J.T., 1994. Cosmic ray contributions to dose rates for luminescence and ESR dating: large depths and long-term time variations. Radiation Measurements 23, 497-500. Prescott, J.R., Stephan, L.G., 1982. The contribution of cosmic radiation to the environmental dose for thermoluminescence dating. Latitude, altitude and depth dependences. PACT 6, 17-25. } \seealso{ \link{BaseDataSet.CosmicDoseRate} } \author{ Christoph Burow, University of Cologne (Germany) , RLum Developer Team} Luminescence/man/convert_BIN2CSV.Rd0000644000176200001440000000375514521210045016540 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/convert_BIN2CSV.R \name{convert_BIN2CSV} \alias{convert_BIN2CSV} \title{Export Risoe BIN-file(s) to CSV-files} \usage{ convert_BIN2CSV(file, ...) } \arguments{ \item{file}{\link{character} (\strong{required}): name of the BIN-file to be converted to CSV-files} \item{...}{further arguments that will be passed to the function \link{read_BIN2R} and \link{write_RLum2CSV}} } \value{ The function returns either a CSV-file (or many of them) or for the option \code{export == FALSE} a list comprising objects of type \link{data.frame} and \link{matrix} } \description{ This function is a wrapper function around the functions \link{read_BIN2R} and \link{write_RLum2CSV} and it imports a Risoe BIN-file and directly exports its content to CSV-files. If nothing is set for the argument \code{path} (\link{write_RLum2CSV}) the input folder will become the output folder. } \section{Function version}{ 0.1.0 } \examples{ ##transform Risoe.BINfileData values to a list data(ExampleData.BINfileData, envir = environment()) convert_BIN2CSV(subset(CWOSL.SAR.Data, POSITION == 1), export = FALSE) \dontrun{ ##select your BIN-file file <- file.choose() ##convert convert_BIN2CSV(file) } } \seealso{ \linkS4class{RLum.Analysis}, \linkS4class{RLum.Data}, \linkS4class{RLum.Results}, \link[utils:write.table]{utils::write.table}, \link{write_RLum2CSV}, \link{read_BIN2R} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. convert_BIN2CSV(): Export Risoe BIN-file(s) to CSV-files. Function version 0.1.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{IO} Luminescence/man/PSL2Risoe.BINfileData.Rd0000644000176200001440000000406614521210045017511 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/PSL2Risoe.BINfileData.R \name{PSL2Risoe.BINfileData} \alias{PSL2Risoe.BINfileData} \title{Convert portable OSL data to an Risoe.BINfileData object} \usage{ PSL2Risoe.BINfileData(object, ...) } \arguments{ \item{object}{\linkS4class{RLum.Analysis} (\strong{required}): \code{RLum.Analysis} object produced by \link{read_PSL2R}} \item{...}{currently not used.} } \value{ Returns an S4 \linkS4class{Risoe.BINfileData} object that can be used to write a BIN file using \link{write_R2BIN}. } \description{ Converts an \code{RLum.Analysis} object produced by the function \code{read_PSL2R()} to an \code{Risoe.BINfileData} object \strong{(BETA)}. } \details{ This function converts an \linkS4class{RLum.Analysis} object that was produced by the \link{read_PSL2R} function to an \linkS4class{Risoe.BINfileData}. The \code{Risoe.BINfileData} can be used to write a Risoe BIN file via \link{write_R2BIN}. } \section{Function version}{ 0.0.1 } \examples{ # (1) load and plot example data set data("ExampleData.portableOSL", envir = environment()) plot_RLum(ExampleData.portableOSL) # (2) merge all RLum.Analysis objects into one merged <- merge_RLum(ExampleData.portableOSL) merged # (3) convert to RisoeBINfile object bin <- PSL2Risoe.BINfileData(merged) bin # (4) write Risoe BIN file \dontrun{ write_R2BIN(bin, "~/portableOSL.binx") } } \seealso{ \linkS4class{RLum.Analysis}, \linkS4class{RLum.Data.Curve}, \linkS4class{Risoe.BINfileData} } \author{ Christoph Burow, University of Cologne (Germany) , RLum Developer Team} \section{How to cite}{ Burow, C., 2023. PSL2Risoe.BINfileData(): Convert portable OSL data to an Risoe.BINfileData object. Function version 0.0.1. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{IO} Luminescence/man/calc_SourceDoseRate.Rd0000644000176200001440000001416214521210045017575 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calc_SourceDoseRate.R \name{calc_SourceDoseRate} \alias{calc_SourceDoseRate} \title{Calculation of the source dose rate via the date of measurement} \usage{ calc_SourceDoseRate( measurement.date = Sys.Date(), calib.date, calib.dose.rate, calib.error, source.type = "Sr-90", dose.rate.unit = "Gy/s", predict = NULL ) } \arguments{ \item{measurement.date}{\link{character} or \link{Date} (with default): Date of measurement in \code{"YYYY-MM-DD"}. If no value is provided, the date will be set to today. The argument can be provided as vector.} \item{calib.date}{\link{character} or \link{Date} (\strong{required}): date of source calibration in \code{"YYYY-MM-DD"}} \item{calib.dose.rate}{\link{numeric} (\strong{required}): dose rate at date of calibration in Gy/s or Gy/min} \item{calib.error}{\link{numeric} (\strong{required}): error of dose rate at date of calibration Gy/s or Gy/min} \item{source.type}{\link{character} (\emph{with default}): specify irradiation source (\code{Sr-90}, \code{Co-60}, \code{Cs-137}, \code{Am-214}), see details for further information} \item{dose.rate.unit}{\link{character} (\emph{with default}): specify dose rate unit for input (\code{Gy/min} or \code{Gy/s}), the output is given in Gy/s as valid for the function \link{Second2Gray}} \item{predict}{\link{integer} (\emph{with default}): option allowing to predict the dose rate of the source over time in days set by the provided value. Starting date is the value set with \code{measurement.date}, e.g., \code{calc_SourceDoseRate(..., predict = 100)} calculates the source dose rate for the next 100 days.} } \value{ Returns an S4 object of type \linkS4class{RLum.Results}. Slot \code{data} contains a \link{list} with the following structure: \if{html}{\out{
}}\preformatted{$ dose.rate (data.frame) .. $ dose.rate .. $ dose.rate.error .. $ date (corresponding measurement date) $ parameters (list) .. $ source.type .. $ halflife .. $ dose.rate.unit $ call (the original function call) }\if{html}{\out{
}} The output should be accessed using the function \link{get_RLum}.\cr A plot method of the output is provided via \link{plot_RLum} } \description{ Calculating the dose rate of the irradiation source via the date of measurement based on: source calibration date, source dose rate, dose rate error. The function returns a data.frame that provides the input argument dose_rate for the function \link{Second2Gray}. } \details{ Calculation of the source dose rate based on the time elapsed since the last calibration of the irradiation source. Decay parameters assume a Sr-90 beta source. \deqn{dose.rate = D0 * exp(-log(2) / T.1/2 * t)} \cr with: D0 <- calibration dose rate T.1/2 <- half-life of the source nuclide (here in days) t <- time since source calibration (in days) log(2) / T.1/2 equals the decay constant lambda Information on the date of measurements may be taken from the data's original .BIN file (using e.g., \code{BINfile <- readBIN2R()} and the slot \code{BINfile@METADATA$DATE}) \strong{Allowed source types and related values} \tabular{rllll}{ \strong{#} \tab \strong{Source type} \tab \strong{T.1/2} \tab \strong{Reference} \cr \verb{[1]} \tab Sr-90 \tab 28.90 y \tab NNDC, Brookhaven National Laboratory \cr \verb{[2]}\tab Am-214 \tab 432.6 y \tab NNDC, Brookhaven National Laboratory \cr \verb{[3]} \tab Co-60 \tab 5.274 y \tab NNDC, Brookhaven National Laboratory \cr \verb{[4} \tab Cs-137 \tab 30.08 y \tab NNDC, Brookhaven National Laboratory} } \note{ Please be careful when using the option \code{predict}, especially when a multiple set for \code{measurement.date} and \code{calib.date} is provided. For the source dose rate prediction the function takes the last value \code{measurement.date} and predicts from that the the source source dose rate for the number of days requested, means: the (multiple) original input will be replaced. However, the function do not change entries for the calibration dates, but mix them up. Therefore, it is not recommended to use this option when multiple calibration dates (\code{calib.date}) are provided. } \section{Function version}{ 0.3.2 } \examples{ ##(1) Simple function usage ##Basic calculation of the dose rate for a specific date dose.rate <- calc_SourceDoseRate(measurement.date = "2012-01-27", calib.date = "2014-12-19", calib.dose.rate = 0.0438, calib.error = 0.0019) ##show results get_RLum(dose.rate) ##(2) Usage in combination with another function (e.g., Second2Gray() ) ## load example data data(ExampleData.DeValues, envir = environment()) ## use the calculated variable dose.rate as input argument ## to convert De(s) to De(Gy) Second2Gray(ExampleData.DeValues$BT998, dose.rate) ##(3) source rate prediction and plotting dose.rate <- calc_SourceDoseRate(measurement.date = "2012-01-27", calib.date = "2014-12-19", calib.dose.rate = 0.0438, calib.error = 0.0019, predict = 1000) plot_RLum(dose.rate) ##(4) export output to a LaTeX table (example using the package 'xtable') \dontrun{ xtable::xtable(get_RLum(dose.rate)) } } \section{How to cite}{ Fuchs, M.C., Kreutzer, S., 2023. calc_SourceDoseRate(): Calculation of the source dose rate via the date of measurement. Function version 0.3.2. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ NNDC, Brookhaven National Laboratory \verb{http://www.nndc.bnl.gov/} } \seealso{ \link{Second2Gray}, \link{get_RLum}, \link{plot_RLum} } \author{ Margret C. Fuchs, HZDR, Helmholtz-Institute Freiberg for Resource Technology (Germany) \cr Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \keyword{manip} Luminescence/man/Risoe.BINfileData-class.Rd0000644000176200001440000003247614521210045020161 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Risoe.BINfileData-class.R \docType{class} \name{Risoe.BINfileData-class} \alias{Risoe.BINfileData-class} \alias{show,Risoe.BINfileData-method} \alias{set_Risoe.BINfileData,ANY-method} \alias{get_Risoe.BINfileData,Risoe.BINfileData-method} \title{Class \code{"Risoe.BINfileData"}} \usage{ \S4method{show}{Risoe.BINfileData}(object) \S4method{set_Risoe.BINfileData}{ANY}( METADATA = data.frame(), DATA = list(), .RESERVED = list() ) \S4method{get_Risoe.BINfileData}{Risoe.BINfileData}(object, ...) } \arguments{ \item{object}{an object of class \linkS4class{Risoe.BINfileData}} \item{METADATA}{Object of class "data.frame" containing the meta information for each curve.} \item{DATA}{Object of class "list" containing numeric vector with count data.} \item{.RESERVED}{Object of class "list" containing list of undocumented raw values for internal use only.} \item{...}{other arguments that might be passed} } \description{ S4 class object for luminescence data in R. The object is produced as output of the function \link{read_BIN2R}. } \section{Methods (by generic)}{ \itemize{ \item \code{show(Risoe.BINfileData)}: Show structure of RLum and Risoe.BINfile class objects \item \code{set_Risoe.BINfileData(ANY)}: The Risoe.BINfileData is normally produced as output of the function read_BIN2R. This construction method is intended for internal usage only. \item \code{get_Risoe.BINfileData(Risoe.BINfileData)}: Formal get-method for Risoe.BINfileData object. It does not allow accessing the object directly, it is just showing a terminal message. }} \section{Slots}{ \describe{ \item{\code{METADATA}}{Object of class "data.frame" containing the meta information for each curve.} \item{\code{DATA}}{Object of class "list" containing numeric vector with count data.} \item{\code{.RESERVED}}{Object of class "list" containing list of undocumented raw values for internal use only.} }} \note{ \strong{Internal METADATA - object structure} This structure is compatible with BIN/BINX-files version 03-08, however, it does not follow (in its sequential arrangement) the manual provided by the manufacturer, but an own structure accounting for the different versions. \tabular{rllll}{ \strong{#} \tab \strong{Name} \tab \strong{Data Type} \tab \strong{V} \tab \strong{Description} \cr \verb{[,1]} \tab \code{ID} \tab \code{numeric} \tab RLum \tab Unique record ID (same ID as in slot \code{DATA})\cr \verb{[,2]} \tab \code{SEL} \tab \code{logic} \tab RLum \tab Record selection, not part official BIN-format, triggered by TAG\cr \verb{[,3]} \tab \code{VERSION} \tab \code{raw} \tab 03-08 \tab BIN-file version number \cr \verb{[,4]} \tab \code{LENGTH} \tab \code{integer} \tab 03-08 \tab Length of this record\cr \verb{[,5]} \tab \code{PREVIOUS} \tab \code{integer} \tab 03-08 \tab Length of previous record\cr \verb{[,6]} \tab \code{NPOINTS} \tab \code{integer} \tab 03-08 \tab Number of data points in the record\cr \verb{[,7]} \tab \code{RECTYPE} \tab \code{integer} \tab 08 \tab Record type \cr \verb{[,8]} \tab \code{RUN} \tab \code{integer} \tab 03-08 \tab Run number\cr \verb{[,9]} \tab \code{SET} \tab \code{integer} \tab 03-08 \tab Set number\cr \verb{[,10]} \tab \code{POSITION} \tab \code{integer} \tab 03-08 \tab Position number\cr \verb{[,11]} \tab \code{GRAIN} \tab \code{integer} \tab 03-04 \tab Grain number\cr \verb{[,12]} \tab \code{GRAINNUMBER} \tab \code{integer} \tab 05-08 \tab Grain number\cr \verb{[,13]} \tab \code{CURVENO} \tab \code{integer} \tab 05-08 \tab Curve number\cr \verb{[,14]} \tab \code{XCOORD} \tab \code{integer} \tab 03-08 \tab X position of a single grain\cr \verb{[,15]} \tab \code{YCOORD} \tab \code{integer} \tab 03-08 \tab Y position of a single grain\cr \verb{[,16]} \tab \code{SAMPLE} \tab \code{factor} \tab 03-08 \tab Sample name\cr \verb{[,17]} \tab \code{COMMENT} \tab \code{factor} \tab 03-08 \tab Comment name\cr \verb{[,18]} \tab \code{SYSTEMID} \tab \code{integer} \tab 03-08 \tab Risø system id\cr \verb{[,19]} \tab \code{FNAME} \tab \code{factor} \tab 05-08 \tab File name (\emph{.bin/}.binx)\cr \verb{[,20]} \tab \code{USER} \tab \code{factor} \tab 03-08 \tab User name\cr \verb{[,21]} \tab \code{TIME} \tab \code{character} \tab 03-08 \tab Data collection time (\code{hh-mm-ss})\cr \verb{[,22]} \tab \code{DATE} \tab \code{factor} \tab 03-08 \tab Data collection date (\code{ddmmyy})\cr \verb{[,23]} \tab \code{DTYPE} \tab \code{character} \tab 03-08 \tab Data type\cr \verb{[,24]} \tab \code{BL_TIME} \tab \code{numeric} \tab 03-08 \tab Bleaching time\cr \verb{[,25]} \tab \code{BL_UNIT} \tab \code{integer} \tab 03-08 \tab Bleaching unit (mJ, J, s, min, h)\cr \verb{[,26]} \tab \code{NORM1} \tab \code{numeric} \tab 03-08 \tab Normalisation factor (1)\cr \verb{[,27]} \tab \code{NORM2} \tab \code{numeric} \tab 03-08 \tab Normalisation factor (2)\cr \verb{[,28]} \tab \code{NORM3} \tab \code{numeric} \tab 03-08 \tab Normalisation factor (3)\cr \verb{[,29]} \tab \code{BG} \tab \code{numeric} \tab 03-08 \tab Background level\cr \verb{[,30]} \tab \code{SHIFT} \tab \code{integer} \tab 03-08 \tab Number of channels to shift data\cr \verb{[,31]} \tab \code{TAG} \tab \code{integer} \tab 03-08 \tab Tag, triggers \code{SEL}\cr \verb{[,32]} \tab \code{LTYPE} \tab \code{character} \tab 03-08 \tab Luminescence type\cr \verb{[,33]} \tab \code{LIGHTSOURCE} \tab \code{character} \tab 03-08 \tab Light source\cr \verb{[,34]} \tab \code{LPOWER} \tab \code{numeric} \tab 03-08 \tab Optical stimulation power\cr \verb{[,35]} \tab \code{LIGHTPOWER} \tab \code{numeric} \tab 05-08 \tab Optical stimulation power\cr \verb{[,36]} \tab \code{LOW} \tab \code{numeric} \tab 03-08 \tab Low (temperature, time, wavelength)\cr \verb{[,37]} \tab \code{HIGH} \tab \code{numeric} \tab 03-08 \tab High (temperature, time, wavelength)\cr \verb{[,38]} \tab \code{RATE} \tab \code{numeric} \tab 03-08 \tab Rate (heating rate, scan rate)\cr \verb{[,39]} \tab \code{TEMPERATURE} \tab \code{integer} \tab 03-08 \tab Sample temperature\cr \verb{[,40]} \tab \code{MEASTEMP} \tab \code{integer} \tab 05-08 \tab Measured temperature\cr \verb{[,41]} \tab \code{AN_TEMP} \tab \code{numeric} \tab 03-08 \tab Annealing temperature\cr \verb{[,42]} \tab \code{AN_TIME} \tab \code{numeric} \tab 03-08 \tab Annealing time\cr \verb{[,43]} \tab \code{TOLDELAY} \tab \code{integer} \tab 03-08 \tab TOL 'delay' channels\cr \verb{[,44]} \tab \code{TOLON} \tab \code{integer} \tab 03-08 \tab TOL 'on' channels\cr \verb{[,45]} \tab \code{TOLOFF} \tab \code{integer} \tab 03-08 \tab TOL 'off' channels\cr \verb{[,46]} \tab \code{IRR_TIME} \tab \code{numeric} \tab 03-08 \tab Irradiation time\cr \verb{[,47]} \tab \code{IRR_TYPE} \tab \code{integer} \tab 03-08 \tab Irradiation type (alpha, beta or gamma)\cr \verb{[,48]} \tab \code{IRR_UNIT} \tab \code{integer} \tab 03-04 \tab Irradiation unit (Gy, rad, s, min, h)\cr \verb{[,49]} \tab \code{IRR_DOSERATE} \tab \code{numeric} \tab 05-08 \tab Irradiation dose rate (Gy/s)\cr \verb{[,50]} \tab \code{IRR_DOSERATEERR} \tab \code{numeric} \tab 06-08 \tab Irradiation dose rate error (Gy/s)\cr \verb{[,51]} \tab \code{TIMESINCEIRR} \tab \code{integer} \tab 05-08 \tab Time since irradiation (s)\cr \verb{[,52]} \tab \code{TIMETICK} \tab \code{numeric} \tab 05-08 \tab Time tick for pulsing (s)\cr \verb{[,53]} \tab \code{ONTIME} \tab \code{integer} \tab 05-08 \tab On-time for pulsing (in time ticks)\cr \verb{[,54]} \tab \code{OFFTIME} \tab \code{integer} \tab 03 \tab Off-time for pulsed stimulation (in s) \cr \verb{[,55]} \tab \code{STIMPERIOD} \tab \code{integer} \tab 05-08 \tab Stimulation period (on+off in time ticks)\cr \verb{[,56]} \tab \code{GATE_ENABLED} \tab \code{raw} \tab 05-08 \tab PMT signal gating enabled\cr \verb{[,57]} \tab \code{ENABLE_FLAGS} \tab \code{raw} \tab 05-08 \tab PMT signal gating enabled\cr \verb{[,58]} \tab \code{GATE_START} \tab \code{integer} \tab 05-08 \tab Start gating (in time ticks)\cr \verb{[,59]} \tab \code{GATE_STOP} \tab \code{integer} \tab 05-08 \tab Stop gating (in time ticks), \code{'Gateend'} for version 04, here only GATE_STOP is used\cr \verb{[,60]} \tab \code{PTENABLED} \tab \code{raw} \tab 05-08 \tab Photon time enabled\cr \verb{[,61]} \tab \code{DTENABLED} \tab \code{raw} \tab 05-08 \tab PMT dead time correction enabled\cr \verb{[,62]} \tab \code{DEADTIME} \tab \code{numeric} \tab 05-08 \tab PMT dead time (s)\cr \verb{[,63]} \tab \code{MAXLPOWER} \tab \code{numeric} \tab 05-08 \tab Stimulation power to 100 percent (mW/cm^2)\cr \verb{[,64]} \tab \code{XRF_ACQTIME} \tab \code{numeric} \tab 05-08 \tab XRF acquisition time (s)\cr \verb{[,65]} \tab \code{XRF_HV} \tab \code{numeric} \tab 05-08 \tab XRF X-ray high voltage (V)\cr \verb{[,66]} \tab \code{XRF_CURR} \tab \code{integer} \tab 05-08 \tab XRF X-ray current (µA)\cr \verb{[,67]} \tab \code{XRF_DEADTIMEF} \tab \code{numeric} \tab 05-08 \tab XRF dead time fraction\cr \verb{[,68]} \tab \code{DETECTOR_ID} \tab \code{raw} \tab 07-08 \tab Detector ID\cr \verb{[,69]} \tab \code{LOWERFILTER_ID} \tab \code{integer} \tab 07-08 \tab Lower filter ID in reader\cr \verb{[,70]} \tab \code{UPPERFILTER_ID} \tab \code{integer} \tab 07-08 \tab Upper filter ID in reader\cr \verb{[,71]} \tab \code{ENOISEFACTOR} \tab \code{numeric} \tab 07-08 \tab Excess noise filter, usage unknown \cr \verb{[,72]} \tab \code{MARKPOS_X1} \tab \code{numeric} \tab 08 \tab Coordinates marker position 1 \cr \verb{[,73]} \tab \code{MARKPOS_Y1} \tab \code{numeric} \tab 08 \tab Coordinates marker position 1 \cr \verb{[,74]} \tab \code{MARKPOS_X2} \tab \code{numeric} \tab 08 \tab Coordinates marker position 2 \cr \verb{[,75]} \tab \code{MARKPOS_Y2} \tab \code{numeric} \tab 08 \tab Coordinates marker position 2 \cr \verb{[,76]} \tab \code{MARKPOS_X3} \tab \code{numeric} \tab 08 \tab Coordinates marker position 3 \cr \verb{[,77]} \tab \code{MARKPOS_Y3} \tab \code{numeric} \tab 08 \tab Coordinates marker position 3 \cr \verb{[,78]} \tab \code{EXTR_START} \tab \code{numeric} \tab 08 \tab usage unknown \cr \verb{[,79]} \tab \code{EXTR_END} \tab \code{numeric} \tab 08 \tab usage unknown\cr \verb{[,80]} \tab \code{SEQUENCE} \tab \code{character} \tab 03-04 \tab Sequence name } V = BIN-file version (RLum means that it does not depend on a specific BIN version) Note that the \code{Risoe.BINfileData} object combines all values from different versions from the BIN-file, reserved bits are skipped, however, the function \link{write_R2BIN} reset arbitrary reserved bits. Invalid values for a specific version are set to \code{NA}. Furthermore, the internal R data types do not necessarily match the required data types for the BIN-file data import! Data types are converted during data import.\cr \strong{LTYPE} values \tabular{rll}{ VALUE \tab TYPE \tab DESCRIPTION \cr \verb{[0]} \tab \code{TL} \tab: Thermoluminescence \cr \verb{[1]} \tab \code{OSL} \tab: Optically stimulated luminescence \cr \verb{[2]} \tab \code{IRSL} \tab: Infrared stimulated luminescence \cr \verb{[3]} \tab \code{M-IR} \tab: Infrared monochromator scan\cr \verb{[4]} \tab \code{M-VIS} \tab: Visible monochromator scan\cr \verb{[5]} \tab \code{TOL} \tab: Thermo-optical luminescence \cr \verb{[6]} \tab \code{TRPOSL} \tab: Time Resolved Pulsed OSL\cr \verb{[7]} \tab \code{RIR} \tab: Ramped IRSL\cr \verb{[8]} \tab \code{RBR} \tab: Ramped (Blue) LEDs\cr \verb{[9]} \tab \code{USER} \tab: User defined\cr \verb{[10]} \tab \code{POSL} \tab: Pulsed OSL \cr \verb{[11]} \tab \code{SGOSL} \tab: Single Grain OSL\cr \verb{[12]} \tab \code{RL} \tab: Radio Luminescence \cr \verb{[13]} \tab \code{XRF} \tab: X-ray Fluorescence } \strong{DTYPE} values \tabular{rl}{ VALUE \tab DESCRIPTION \cr \verb{[0]} \tab Natural \cr \verb{[1]} \tab N+dose \cr \verb{[2]} \tab Bleach \cr \verb{[3]} \tab Bleach+dose \cr \verb{[4]} \tab Natural (Bleach) \cr \verb{[5]} \tab N+dose (Bleach) \cr \verb{[6]} \tab Dose \cr \verb{[7]} \tab Background } \strong{LIGHTSOURCE} values \tabular{rl}{ VALUE \tab DESCRIPTION \cr \verb{[0]} \tab None \cr \verb{[1]} \tab Lamp \cr \verb{[2]} \tab IR diodes/IR Laser \cr \verb{[3]} \tab Calibration LED \cr \verb{[4]} \tab Blue Diodes \cr \verb{[5]} \tab White light \cr \verb{[6]} \tab Green laser (single grain) \cr \verb{[7]} \tab IR laser (single grain) } (information on the BIN/BINX file format are kindly provided by Risø, DTU Nutech) } \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("Risoe.BINfileData", ...)}. } \section{Function version}{ 0.4.0 } \examples{ showClass("Risoe.BINfileData") } \section{How to cite}{ Kreutzer, S., 2023. Risoe.BINfileData-class(): Class 'Risoe.BINfileData'. Function version 0.4.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Risø DTU, 2013. The Sequence Editor User Manual - Feb 2013 and Risø DTU, 2016. The Sequence Editor User Manual - February 2016 \url{https://www.fysik.dtu.dk} } \seealso{ \link{plot_Risoe.BINfileData}, \link{read_BIN2R}, \link{write_R2BIN}, \link{merge_Risoe.BINfileData}, \link{Risoe.BINfileData2RLum.Analysis} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany)\cr based on information provided by Torben Lapp and Karsten Bracht Nielsen (Risø DTU, Denmark) , RLum Developer Team} \keyword{classes} \keyword{internal} Luminescence/man/CW2pHMi.Rd0000644000176200001440000001554714521210045015105 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/CW2pHMi.R \name{CW2pHMi} \alias{CW2pHMi} \title{Transform a CW-OSL curve into a pHM-OSL curve via interpolation under hyperbolic modulation conditions} \usage{ CW2pHMi(values, delta) } \arguments{ \item{values}{\linkS4class{RLum.Data.Curve} or \link{data.frame} (\strong{required}): \linkS4class{RLum.Data.Curve} or \link{data.frame} with measured curve data of type stimulation time (t) (\code{values[,1]}) and measured counts (cts) (\code{values[,2]}).} \item{delta}{\link{vector} (\emph{optional}): stimulation rate parameter, if no value is given, the optimal value is estimated automatically (see details). Smaller values of delta produce more points in the rising tail of the curve.} } \value{ The function returns the same data type as the input data type with the transformed curve values. \strong{\code{RLum.Data.Curve}} \tabular{ll}{ \verb{$CW2pHMi.x.t} \tab: transformed time values \cr \verb{$CW2pHMi.method} \tab: used method for the production of the new data points } \strong{\code{data.frame}} \tabular{ll}{ \verb{$x} \tab: time\cr \verb{$y.t} \tab: transformed count values\cr \verb{$x.t} \tab: transformed time values \cr \verb{$method} \tab: used method for the production of the new data points } } \description{ This function transforms a conventionally measured continuous-wave (CW) OSL-curve to a pseudo hyperbolic modulated (pHM) curve under hyperbolic modulation conditions using the interpolation procedure described by Bos & Wallinga (2012). } \details{ The complete procedure of the transformation is described in Bos & Wallinga (2012). The input \code{data.frame} consists of two columns: time (t) and count values (CW(t)) \strong{Internal transformation steps} (1) log(CW-OSL) values (2) Calculate t' which is the transformed time: \deqn{t' = t-(1/\delta)*log(1+\delta*t)} (3) Interpolate CW(t'), i.e. use the log(CW(t)) to obtain the count values for the transformed time (t'). Values beyond \code{min(t)} and \code{max(t)} produce \code{NA} values. (4) Select all values for t' < \code{min(t)}, i.e. values beyond the time resolution of t. Select the first two values of the transformed data set which contain no \code{NA} values and use these values for a linear fit using \link{lm}. (5) Extrapolate values for t' < \code{min(t)} based on the previously obtained fit parameters. (6) Transform values using \deqn{pHM(t) = (\delta*t/(1+\delta*t))*c*CW(t')} \deqn{c = (1+\delta*P)/\delta*P} \deqn{P = length(stimulation~period)} (7) Combine all values and truncate all values for t' > \code{max(t)} \strong{NOTE:} The number of values for t' < \code{min(t)} depends on the stimulation rate parameter \code{delta}. To avoid the production of too many artificial data at the raising tail of the determined pHM curve, it is recommended to use the automatic estimation routine for \code{delta}, i.e. provide no value for \code{delta}. } \note{ According to Bos & Wallinga (2012), the number of extrapolated points should be limited to avoid artificial intensity data. If \code{delta} is provided manually and more than two points are extrapolated, a warning message is returned. The function \link{approx} may produce some \code{Inf} and \code{NaN} data. The function tries to manually interpolate these values by calculating the \code{mean} using the adjacent channels. If two invalid values are succeeding, the values are removed and no further interpolation is attempted. In every case a warning message is shown. } \section{Function version}{ 0.2.2 } \examples{ ##(1) - simple transformation ##load CW-OSL curve data data(ExampleData.CW_OSL_Curve, envir = environment()) ##transform values values.transformed<-CW2pHMi(ExampleData.CW_OSL_Curve) ##plot plot(values.transformed$x, values.transformed$y.t, log = "x") ##(2) - load CW-OSL curve from BIN-file and plot transformed values ##load BINfile #BINfileData<-readBIN2R("[path to BIN-file]") data(ExampleData.BINfileData, envir = environment()) ##grep first CW-OSL curve from ALQ 1 curve.ID<-CWOSL.SAR.Data@METADATA[CWOSL.SAR.Data@METADATA[,"LTYPE"]=="OSL" & CWOSL.SAR.Data@METADATA[,"POSITION"]==1 ,"ID"] curve.HIGH<-CWOSL.SAR.Data@METADATA[CWOSL.SAR.Data@METADATA[,"ID"]==curve.ID[1] ,"HIGH"] curve.NPOINTS<-CWOSL.SAR.Data@METADATA[CWOSL.SAR.Data@METADATA[,"ID"]==curve.ID[1] ,"NPOINTS"] ##combine curve to data set curve<-data.frame(x = seq(curve.HIGH/curve.NPOINTS,curve.HIGH, by = curve.HIGH/curve.NPOINTS), y=unlist(CWOSL.SAR.Data@DATA[curve.ID[1]])) ##transform values curve.transformed <- CW2pHMi(curve) ##plot curve plot(curve.transformed$x, curve.transformed$y.t, log = "x") ##(3) - produce Fig. 4 from Bos & Wallinga (2012) ##load data data(ExampleData.CW_OSL_Curve, envir = environment()) values <- CW_Curve.BosWallinga2012 ##open plot area plot(NA, NA, xlim=c(0.001,10), ylim=c(0,8000), ylab="pseudo OSL (cts/0.01 s)", xlab="t [s]", log="x", main="Fig. 4 - Bos & Wallinga (2012)") values.t<-CW2pLMi(values, P=1/20) lines(values[1:length(values.t[,1]),1],CW2pLMi(values, P=1/20)[,2], col="red" ,lwd=1.3) text(0.03,4500,"LM", col="red" ,cex=.8) values.t<-CW2pHMi(values, delta=40) lines(values[1:length(values.t[,1]),1],CW2pHMi(values, delta=40)[,2], col="black", lwd=1.3) text(0.005,3000,"HM", cex=.8) values.t<-CW2pPMi(values, P=1/10) lines(values[1:length(values.t[,1]),1],CW2pPMi(values, P=1/10)[,2], col="blue", lwd=1.3) text(0.5,6500,"PM", col="blue" ,cex=.8) } \section{How to cite}{ Kreutzer, S., 2023. CW2pHMi(): Transform a CW-OSL curve into a pHM-OSL curve via interpolation under hyperbolic modulation conditions. Function version 0.2.2. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Bos, A.J.J. & Wallinga, J., 2012. How to visualize quartz OSL signal components. Radiation Measurements, 47, 752-758.\cr \strong{Further Reading} Bulur, E., 1996. An Alternative Technique For Optically Stimulated Luminescence (OSL) Experiment. Radiation Measurements, 26, 701-709. Bulur, E., 2000. A simple transformation for converting CW-OSL curves to LM-OSL curves. Radiation Measurements, 32, 141-145. } \seealso{ \link{CW2pLM}, \link{CW2pLMi}, \link{CW2pPMi}, \link{fit_LMCurve}, \link{lm}, \linkS4class{RLum.Data.Curve} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany)\cr Based on comments and suggestions from:\cr Adrie J.J. Bos, Delft University of Technology, The Netherlands , RLum Developer Team} \keyword{manip} Luminescence/man/plot_Risoe.BINfileData.Rd0000644000176200001440000001065714521210045020111 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot_Risoe.BINfileData.R \name{plot_Risoe.BINfileData} \alias{plot_Risoe.BINfileData} \title{Plot single luminescence curves from a BIN file object} \usage{ plot_Risoe.BINfileData( BINfileData, position, run, set, sorter = "POSITION", ltype = c("IRSL", "OSL", "TL", "RIR", "RBR", "RL"), curve.transformation, dose_rate, temp.lab, cex.global = 1, ... ) } \arguments{ \item{BINfileData}{\linkS4class{Risoe.BINfileData} (\strong{required}): requires an S4 object returned by the \link{read_BIN2R} function.} \item{position}{\link{vector} (\emph{optional}): option to limit the plotted curves by position (e.g. \code{position = 1}, \code{position = c(1,3,5)}).} \item{run}{\link{vector} (\emph{optional}): option to limit the plotted curves by run (e.g., \code{run = 1}, \code{run = c(1,3,5)}).} \item{set}{\link{vector} (\emph{optional}): option to limit the plotted curves by set (e.g., \code{set = 1}, \code{set = c(1,3,5)}).} \item{sorter}{\link{character} (\emph{with default}): the plot output can be ordered by "POSITION","SET" or "RUN". POSITION, SET and RUN are options defined in the Risoe Sequence Editor.} \item{ltype}{\link{character} (\emph{with default}): option to limit the plotted curves by the type of luminescence stimulation. Allowed values: \code{"IRSL"}, \code{"OSL"},\code{"TL"}, \code{"RIR"}, \code{"RBR"} (corresponds to LM-OSL), \code{"RL"}. All type of curves are plotted by default.} \item{curve.transformation}{\link{character} (\emph{optional}): allows transforming CW-OSL and CW-IRSL curves to pseudo-LM curves via transformation functions. Allowed values are: \code{CW2pLM}, \code{CW2pLMi}, \code{CW2pHMi} and \code{CW2pPMi}. See details.} \item{dose_rate}{\link{numeric} (\emph{optional}): dose rate of the irradiation source at the measurement date. If set, the given irradiation dose will be shown in Gy. See details.} \item{temp.lab}{\link{character} (\emph{optional}): option to allow for different temperature units. If no value is set deg. C is chosen.} \item{cex.global}{\link{numeric} (\emph{with default}): global scaling factor.} \item{...}{further undocumented plot arguments.} } \value{ Returns a plot. } \description{ Plots single luminescence curves from an object returned by the \link{read_BIN2R} function. } \details{ \strong{Nomenclature} See \linkS4class{Risoe.BINfileData} \strong{curve.transformation} This argument allows transforming continuous wave (CW) curves to pseudo (linear) modulated curves. For the transformation, the functions of the package are used. Currently, it is not possible to pass further arguments to the transformation functions. The argument works only for \code{ltype} \code{OSL} and \code{IRSL}. \strong{Irradiation time} Plotting the irradiation time (s) or the given dose (Gy) requires that the variable \code{IRR_TIME} has been set within the BIN-file. This is normally done by using the 'Run Info' option within the Sequence Editor or by editing in R. } \note{ The function has been successfully tested for the Sequence Editor file output version 3 and 4. } \section{Function version}{ 0.4.1 } \examples{ ##load data data(ExampleData.BINfileData, envir = environment()) ##plot all curves from the first position to the desktop #pdf(file = "~/Desktop/CurveOutput.pdf", paper = "a4", height = 11, onefile = TRUE) ##example - load from *.bin file #BINfile<- file.choose() #BINfileData<-read_BIN2R(BINfile) #par(mfrow = c(4,3), oma = c(0.5,1,0.5,1)) #plot_Risoe.BINfileData(CWOSL.SAR.Data,position = 1) #mtext(side = 4, BINfile, outer = TRUE, col = "blue", cex = .7) #dev.off() } \section{How to cite}{ Kreutzer, S., Dietze, M., 2023. plot_Risoe.BINfileData(): Plot single luminescence curves from a BIN file object. Function version 0.4.1. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Duller, G., 2007. Analyst. pp. 1-45. } \seealso{ \linkS4class{Risoe.BINfileData},\link{read_BIN2R}, \link{CW2pLM}, \link{CW2pLMi}, \link{CW2pPMi}, \link{CW2pHMi} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany)\cr Michael Dietze, GFZ Potsdam (Germany) , RLum Developer Team} \keyword{dplot} Luminescence/man/calc_AliquotSize.Rd0000644000176200001440000001537414521210045017165 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calc_AliquotSize.R \name{calc_AliquotSize} \alias{calc_AliquotSize} \title{Estimate the amount of grains on an aliquot} \usage{ calc_AliquotSize( grain.size, sample.diameter, packing.density = 0.65, MC = TRUE, grains.counted, plot = TRUE, ... ) } \arguments{ \item{grain.size}{\link{numeric} (\strong{required}): mean grain size (microns) or a range of grain sizes from which the mean grain size is computed (e.g. \code{c(100,200)}).} \item{sample.diameter}{\link{numeric} (\strong{required}): diameter (mm) of the targeted area on the sample carrier.} \item{packing.density}{\link{numeric} (\emph{with default}): empirical value for mean packing density. \cr If \code{packing.density = "Inf"} a hexagonal structure on an infinite plane with a packing density of \eqn{0.906\ldots} is assumed.} \item{MC}{\link{logical} (\emph{optional}): if \code{TRUE} the function performs a Monte Carlo simulation for estimating the amount of grains on the sample carrier and assumes random errors in grain size distribution and packing density. Requires a vector with min and max grain size for \code{grain.size}. For more information see details.} \item{grains.counted}{\link{numeric} (\emph{optional}): grains counted on a sample carrier. If a non-zero positive integer is provided this function will calculate the packing density of the aliquot. If more than one value is provided the mean packing density and its standard deviation is calculated. Note that this overrides \code{packing.density}.} \item{plot}{\link{logical} (\emph{with default}): plot output (\code{TRUE}/\code{FALSE})} \item{...}{further arguments to pass (\verb{main, xlab, MC.iter}).} } \value{ Returns a terminal output. In addition an \linkS4class{RLum.Results} object is returned containing the following element: \item{.$summary}{\link{data.frame} summary of all relevant calculation results.} \item{.$args}{\link{list} used arguments} \item{.$call}{\link{call} the function call} \item{.$MC}{\link{list} results of the Monte Carlo simulation} The output should be accessed using the function \link{get_RLum}. } \description{ Estimate the number of grains on an aliquot. Alternatively, the packing density of an aliquot is computed. } \details{ This function can be used to either estimate the number of grains on an aliquot or to compute the packing density depending on the the arguments provided. The following function is used to estimate the number of grains \code{n}: \deqn{n = (\pi*x^2)/(\pi*y^2)*d} where \code{x} is the radius of the aliquot size (microns), \code{y} is the mean radius of the mineral grains (mm) and \code{d} is the packing density (value between 0 and 1). \strong{Packing density} The default value for \code{packing.density} is 0.65, which is the mean of empirical values determined by Heer et al. (2012) and unpublished data from the Cologne luminescence laboratory. If \code{packing.density = "Inf"} a maximum density of \eqn{\pi/\sqrt12 = 0.9068\ldots} is used. However, note that this value is not appropriate as the standard preparation procedure of aliquots resembles a PECC (\emph{"Packing Equal Circles in a Circle"}) problem where the maximum packing density is asymptotic to about 0.87. \strong{Monte Carlo simulation} The number of grains on an aliquot can be estimated by Monte Carlo simulation when setting \code{MC = TRUE}. Each of the parameters necessary to calculate \code{n} (\code{x}, \code{y}, \code{d}) are assumed to be normally distributed with means \eqn{\mu_x, \mu_y, \mu_d} and standard deviations \eqn{\sigma_x, \sigma_y, \sigma_d}. For the mean grain size random samples are taken first from \eqn{N(\mu_y, \sigma_y)}, where \eqn{\mu_y = mean.grain.size} and \eqn{\sigma_y = (max.grain.size-min.grain.size)/4} so that 95\\% of all grains are within the provided the grain size range. This effectively takes into account that after sieving the sample there is still a small chance of having grains smaller or larger than the used mesh sizes. For each random sample the mean grain size is calculated, from which random subsamples are drawn for the Monte Carlo simulation. The packing density is assumed to be normally distributed with an empirically determined \eqn{\mu = 0.65} (or provided value) and \eqn{\sigma = 0.18}. The normal distribution is truncated at \code{d = 0.87} as this is approximately the maximum packing density that can be achieved in PECC problem. The sample diameter has \eqn{\mu = sample.diameter} and \eqn{\sigma = 0.2} to take into account variations in sample disc preparation (i.e. applying silicon spray to the disc). A lower truncation point at \code{x = 0.5} is used, which assumes that aliqouts with smaller sample diameters of 0.5 mm are discarded. Likewise, the normal distribution is truncated at 9.8 mm, which is the diameter of the sample disc. For each random sample drawn from the normal distributions the amount of grains on the aliquot is calculated. By default, \code{10^5} iterations are used, but can be reduced/increased with \code{MC.iter} (see \code{...}). The results are visualised in a bar- and boxplot together with a statistical summary. } \section{Function version}{ 0.31 } \examples{ ## Estimate the amount of grains on a small aliquot calc_AliquotSize(grain.size = c(100,150), sample.diameter = 1, MC.iter = 100) ## Calculate the mean packing density of large aliquots calc_AliquotSize(grain.size = c(100,200), sample.diameter = 8, grains.counted = c(2525,2312,2880), MC.iter = 100) } \section{How to cite}{ Burow, C., 2023. calc_AliquotSize(): Estimate the amount of grains on an aliquot. Function version 0.31. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Duller, G.A.T., 2008. Single-grain optical dating of Quaternary sediments: why aliquot size matters in luminescence dating. Boreas 37, 589-612. Heer, A.J., Adamiec, G., Moska, P., 2012. How many grains are there on a single aliquot?. Ancient TL 30, 9-16. \strong{Further reading} Chang, H.-C., Wang, L.-C., 2010. A simple proof of Thue's Theorem on Circle Packing. \url{https://arxiv.org/pdf/1009.4322v1.pdf}, 2013-09-13. Graham, R.L., Lubachevsky, B.D., Nurmela, K.J., Oestergard, P.R.J., 1998. Dense packings of congruent circles in a circle. Discrete Mathematics 181, 139-154. Huang, W., Ye, T., 2011. Global optimization method for finding dense packings of equal circles in a circle. European Journal of Operational Research 210, 474-481. } \author{ Christoph Burow, University of Cologne (Germany) , RLum Developer Team} Luminescence/man/calc_Lamothe2003.Rd0000644000176200001440000001615214521210045016605 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calc_Lamothe2003.R \name{calc_Lamothe2003} \alias{calc_Lamothe2003} \title{Apply fading correction after Lamothe et al., 2003} \usage{ calc_Lamothe2003( object, dose_rate.envir, dose_rate.source, g_value, tc = NULL, tc.g_value = tc, verbose = TRUE, plot = TRUE, ... ) } \arguments{ \item{object}{\linkS4class{RLum.Results} \link{data.frame} (\strong{required}): Input data for applying the fading correction. Allow are (1) \link{data.frame} with three columns (\code{dose}, \code{LxTx}, \verb{LxTx error}; see details), (2) \linkS4class{RLum.Results} object created by the function \link{analyse_SAR.CWOSL} or \link{analyse_pIRIRSequence}} \item{dose_rate.envir}{\link{numeric} vector of length 2 (\strong{required}): Environmental dose rate in mGy/a} \item{dose_rate.source}{\link{numeric} vector of length 2 (\strong{required}): Irradiation source dose rate in Gy/s, which is, according to Lamothe et al. (2003) De/t*.} \item{g_value}{\link{numeric} vector of length 2 (\strong{required}): g_value in \\%/decade \emph{recalculated at the moment} the equivalent dose was calculated, i.e. \code{tc} is either similar for the \emph{g}-value measurement \strong{and} the De measurement or needs be to recalculated (cf. \link{calc_FadingCorr}). Inserting a normalised g-value, e.g., normalised to 2-days , will lead to wrong results} \item{tc}{\link{numeric} (optional): time in seconds between the \strong{end} of the irradiation and the prompt measurement used in the equivalent dose estimation (cf. Huntley & Lamothe 2001). If set to \code{NULL} it is assumed that \code{tc} is similar for the equivalent dose estimation and the \emph{g}-value estimation} \item{tc.g_value}{\link{numeric} (with default): the time in seconds between irradiation and the prompt measurement estimating the \emph{g}-value. If the \emph{g}-value was normalised to, e.g., 2 days, this time in seconds (i.e., \code{172800}) should be entered here along with the time used for the equivalent dose estimation. If nothing is provided the time is set to \code{tc}, which is the usual case for \emph{g}-values obtained using the SAR method and \emph{g}-values that had been not normalised to 2 days. Note: If this value is not \code{NULL} the functions expects a \link{numeric} value for \code{tc}.} \item{verbose}{\link{logical} (with default): Enables/disables terminal verbose mode} \item{plot}{\link{logical} (with default): Enables/disables plot output} \item{...}{further arguments passed to the function \link{plot_GrowthCurve}} } \value{ The function returns are graphical output produced by the function \link{plot_GrowthCurve} and an \linkS4class{RLum.Results}. -----------------------------------\cr \verb{[ NUMERICAL OUTPUT ]}\cr -----------------------------------\cr \strong{\code{RLum.Results}}-object \strong{slot:} \strong{\verb{@data}} \tabular{lll}{ \strong{Element} \tab \strong{Type} \tab \strong{Description}\cr \verb{$data} \tab \code{data.frame} \tab the fading corrected values \cr \verb{$fit} \tab \code{nls} \tab the object returned by the dose response curve fitting \cr } '\strong{slot:} \strong{\verb{@info}} The original function call } \description{ This function applies the fading correction for the prediction of long-term fading as suggested by Lamothe et al., 2003. The function basically adjusts the $L_n/T_n$ values and fit a new dose-response curve using the function \link{plot_GrowthCurve}. } \details{ \strong{Format of \code{object} if \code{data.frame}} If \code{object} is of type \link{data.frame}, all input values most be of type \link{numeric}. Dose values are excepted in seconds (s) not Gray (Gy). No \code{NA} values are allowed and the value for the natural dose (first row) should be \code{0}. Example for three dose points, column names are arbitrary: \if{html}{\out{
}}\preformatted{ object <- data.frame( dose = c(0,25,50), LxTx = c(4.2, 2.5, 5.0), LxTx_error = c(0.2, 0.1, 0.2)) }\if{html}{\out{
}} \strong{Note on the g-value and \code{tc}} Users new to R and fading measurements are often confused about what to enter for \code{tc} and why it may differ from \code{tc.g_value}. The \code{tc} value is, by convention (Huntley & Lamothe 2001), the time elapsed between the end of the irradiation and the prompt measurement. Usually there is no reason for having a \code{tc} value different for the equivalent dose measurement and the \emph{g}-value measurement, except if different equipment was used. However, if, for instance, the \emph{g}-value measurement sequence was analysed with the \emph{Analyst} (Duller 2015) and the \verb{'Luminescence} is used to correct for fading, there is a high chance that the value returned by the \emph{Analyst} comes normalised to 2-days; even the \code{tc} values of the measurement were identical. In such cases, the fading correction cannot be correct until the \code{tc.g_value} was manually set to 2-days (\code{172800} s) because the function will internally recalculate values to an identical \code{tc} value. } \section{Function version}{ 0.1.0 } \examples{ ##load data ##ExampleData.BINfileData contains two BINfileData objects ##CWOSL.SAR.Data and TL.SAR.Data data(ExampleData.BINfileData, envir = environment()) ##transform the values from the first position in a RLum.Analysis object object <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos=1) ##perform SAR analysis and set rejection criteria results <- analyse_SAR.CWOSL( object = object, signal.integral.min = 1, signal.integral.max = 2, background.integral.min = 900, background.integral.max = 1000, verbose = FALSE, plot = FALSE, onlyLxTxTable = TRUE ) ##run fading correction results_corr <- calc_Lamothe2003( object = results, dose_rate.envir = c(1.676 , 0.180), dose_rate.source = c(0.184, 0.003), g_value = c(2.36, 0.6), plot = TRUE, fit.method = "EXP") } \section{How to cite}{ Kreutzer, S., Mercier, N., 2023. calc_Lamothe2003(): Apply fading correction after Lamothe et al., 2003. Function version 0.1.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Huntley, D.J., Lamothe, M., 2001. Ubiquity of anomalous fading in K-feldspars and the measurement and correction for it in optical dating. Canadian Journal of Earth Sciences 38, 1093-1106. Duller, G.A.T., 2015. The Analyst software package for luminescence data: overview and recent improvements. Ancient TL 33, 35–42. Lamothe, M., Auclair, M., Hamzaoui, C., Huot, S., 2003. Towards a prediction of long-term anomalous fading of feldspar IRSL. Radiation Measurements 37, 493-498. } \seealso{ \link{plot_GrowthCurve}, \link{calc_FadingCorr}, \link{analyse_SAR.CWOSL}, \link{analyse_pIRIRSequence} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany), Norbert Mercier, IRAMAT-CRP2A, Université Bordeaux Montaigne (France) , RLum Developer Team} \keyword{datagen} Luminescence/man/RLum.Data.Curve-class.Rd0000644000176200001440000001643314521210045017644 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RLum.Data.Curve-class.R \docType{class} \name{RLum.Data.Curve-class} \alias{RLum.Data.Curve-class} \alias{show,RLum.Data.Curve-method} \alias{set_RLum,RLum.Data.Curve-method} \alias{get_RLum,RLum.Data.Curve-method} \alias{length_RLum,RLum.Data.Curve-method} \alias{names_RLum,RLum.Data.Curve-method} \alias{bin_RLum.Data,RLum.Data.Curve-method} \alias{smooth_RLum,RLum.Data.Curve-method} \title{Class \code{"RLum.Data.Curve"}} \usage{ \S4method{show}{RLum.Data.Curve}(object) \S4method{set_RLum}{RLum.Data.Curve}( class, originator, .uid, .pid, recordType = NA_character_, curveType = NA_character_, data = matrix(0, ncol = 2), info = list() ) \S4method{get_RLum}{RLum.Data.Curve}(object, info.object = NULL) \S4method{length_RLum}{RLum.Data.Curve}(object) \S4method{names_RLum}{RLum.Data.Curve}(object) \S4method{bin_RLum.Data}{RLum.Data.Curve}(object, bin_size = 2) \S4method{smooth_RLum}{RLum.Data.Curve}(object, k = NULL, fill = NA, align = "right", method = "mean") } \arguments{ \item{object}{\code{\link{get_RLum}}, \code{\link{length_RLum}}, \code{\link{names_RLum}} (\strong{required}): an object of class \linkS4class{RLum.Data.Curve}} \item{class}{\code{\link{set_RLum}}; \link{character} (\strong{required}): name of the \code{RLum} class to create} \item{originator}{\code{\link{set_RLum}}; \link{character} (\emph{automatic}): contains the name of the calling function (the function that produces this object); can be set manually.} \item{.uid}{\code{\link{set_RLum}}; \link{character} (\emph{automatic}): sets an unique ID for this object using the internal C++ function \code{create_UID}.} \item{.pid}{\code{\link{set_RLum}}; \link{character} (\emph{with default}): option to provide a parent id for nesting at will.} \item{recordType}{\code{\link{set_RLum}}; \link{character} (\emph{optional}): record type (e.g., "OSL")} \item{curveType}{\code{\link{set_RLum}}; \link{character} (\emph{optional}): curve type (e.g., "predefined" or "measured")} \item{data}{\code{\link{set_RLum}}; \link{matrix} (\strong{required}): raw curve data. If \code{data} itself is a \code{RLum.Data.Curve}-object this can be used to re-construct the object (s. details), i.e. modified parameters except \code{.uid}, \code{.pid} and \code{originator}. The rest will be subject to copy and paste unless provided.} \item{info}{\code{\link{set_RLum}}; \link{list} (\emph{optional}): info elements} \item{info.object}{\code{\link{get_RLum}} \link{character} (\emph{optional}): name of the wanted info element} \item{bin_size}{\link{integer} (\emph{with default}): set number of channels used for each bin, e.g. \code{bin_size = 2} means that two channels are binned.} \item{k}{\code{\link{smooth_RLum}}; \link{integer} (\emph{with default}): window for the rolling mean; must be odd for \code{rollmedian}. If nothing is set k is set automatically} \item{fill}{\code{\link{smooth_RLum}}; \link{numeric} (\emph{with default}): a vector defining the left and the right hand data} \item{align}{\code{\link{smooth_RLum}}; \link{character} (\emph{with default}): specifying whether the index of the result should be left- or right-aligned or centred (default) compared to the rolling window of observations, allowed \code{"right"}, \code{"center"} and \code{"left"}} \item{method}{\code{\link{smooth_RLum}}; \link{character} (\emph{with default}): defines which method should be applied for the smoothing: \code{"mean"} or \code{"median"}} } \value{ \strong{\code{set_RLum}} Returns an \linkS4class{RLum.Data.Curve} object. \strong{\code{get_RLum}} \enumerate{ \item A \link{matrix} with the curve values or \item only the info object if \code{info.object} was set. } \strong{\code{length_RLum}} Number of channels in the curve (row number of the matrix) **`names_RLum`** Names of the info elements (slot `info`) \strong{\code{bin_RLum.Data}} Same object as input, after applying the binning. \strong{\code{smooth_RLum}} Same object as input, after smoothing } \description{ Class for representing luminescence curve data. } \section{Methods (by generic)}{ \itemize{ \item \code{show(RLum.Data.Curve)}: Show structure of \code{RLum.Data.Curve} object \item \code{set_RLum(RLum.Data.Curve)}: Construction method for RLum.Data.Curve object. The slot info is optional and predefined as empty list by default. \item \code{get_RLum(RLum.Data.Curve)}: Accessor method for RLum.Data.Curve object. The argument info.object is optional to directly access the info elements. If no info element name is provided, the raw curve data (matrix) will be returned. \item \code{length_RLum(RLum.Data.Curve)}: Returns the length of the curve object, which is the maximum of the value time/temperature of the curve (corresponding to the stimulation length) \item \code{names_RLum(RLum.Data.Curve)}: Returns the names info elements coming along with this curve object \item \code{bin_RLum.Data(RLum.Data.Curve)}: Allows binning of specific objects \item \code{smooth_RLum(RLum.Data.Curve)}: Smoothing of RLum.Data.Curve objects using the function \link[zoo:rollmean]{zoo::rollmean} or \link[zoo:rollmean]{zoo::rollmedian}. In particular the internal function \code{.smoothing} is used. }} \section{Slots}{ \describe{ \item{\code{recordType}}{Object of class "character" containing the type of the curve (e.g. "TL" or "OSL")} \item{\code{curveType}}{Object of class "character" containing curve type, allowed values are measured or predefined} \item{\code{data}}{Object of class \link{matrix} containing curve x and y data. 'data' can also be of type \code{RLum.Data.Curve} to change object values without deconstructing the object. For example: \if{html}{\out{
}}\preformatted{set_RLum(class = 'RLum.Data.Curve', data = Your.RLum.Data.Curve, recordType = 'never seen before') }\if{html}{\out{
}} would just change the \code{recordType}. Missing arguments the value is taken from the input object in 'data' (which is already an RLum.Data.Curve object in this example)} }} \note{ The class should only contain data for a single curve. For additional elements the slot \code{info} can be used (e.g. providing additional heating ramp curve). Objects from the class \code{RLum.Data.Curve} are produced by other functions (partly within \linkS4class{RLum.Analysis} objects), namely: \link{Risoe.BINfileData2RLum.Analysis}, \link{read_XSYG2R} } \section{Create objects from this Class}{ Objects can be created by calls of the form \code{set_RLum(class = "RLum.Data.Curve", ...)}. } \section{Class version}{ 0.5.1 } \examples{ showClass("RLum.Data.Curve") ##set empty curve object set_RLum(class = "RLum.Data.Curve") } \seealso{ \linkS4class{RLum}, \linkS4class{RLum.Data}, \link{plot_RLum}, \link{merge_RLum} } \author{ Sebastian Kreutzer, IRAMAT-CRP2A, UMR 5060, CNRS - Université Bordeaux Montaigne (France) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. RLum.Data.Curve-class(): Class 'RLum.Data.Curve'. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{classes} \keyword{internal} Luminescence/man/convert_Activity2Concentration.Rd0000644000176200001440000001036414521210045022071 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/convert_Activity2Concentration.R \name{convert_Activity2Concentration} \alias{convert_Activity2Concentration} \title{Convert Nuclide Activities to Concentrations and Vice Versa} \usage{ convert_Activity2Concentration(data, input_unit = "Bq/kg", verbose = TRUE) } \arguments{ \item{data}{\link{data.frame} \strong{(required)}: provide dose rate data (activity or concentration) in three columns. The first column indicates the nuclide, the 2nd column measured value and in the 3rd column its error value. Allowed nuclide data are \code{'U-238'}, \code{'Th-232'} and \code{'K-40'}. See examples for an example.} \item{input_unit}{\link{character} (\emph{with default}): specify unit of input data given in the dose rate data frame, choose between 'Bq/kg' and 'ppm/\%' the default is 'Bq/kg'} \item{verbose}{\link{logical} (\emph{with default}): enable or disable verbose mode} } \description{ The function performs the conversion of the specific activities into concentrations and vice versa for the radioelements U, Th, and K to harmonise the measurement unit with the required data input unit of potential analytical tools for, e.g. dose rate calculation or related functions such as \link{use_DRAC}. } \details{ The conversion from nuclide activity of a sample to nuclide concentration is performed using conversion factors that are based on the mass-related specific activity of the respective nuclide. Constants used in this function were obtained from \verb{https://physics.nist.gov/cuu/Constants/} all atomic weights and composition values from \verb{https://www.nist.gov/pml/atomic-weights-and-isotopic-compositions-relative-atomic-masses} and the nuclide data from \verb{https://www.iaea.org/resources/databases/livechart-of-nuclides-advanced-version} The factors can be calculated using the equation: \deqn{ A = N_A \frac{N_{abund}}{N_{mol.mass}} ln(2) / N.half.life } to convert in ppm we further use: \deqn{ f = A / 10^6 } where: \itemize{ \item \code{N_A} - Avogadro constant in 1/mol \item \code{A} - specific activity of the nuclide in Bq/kg \item \code{N.abund} - relative natural abundance of the isotope \item \code{N.mol.mass} molar mass in kg/mol \item \code{N.half.life} half-life of the nuclide in s } example for calculating the activity of the radionuclide U-238: \itemize{ \item \code{N_A} = 6.02214076e+23 (1/mol) \item \code{T_0.5} = 1.41e+17 (s) \item \code{m_U_238} = 0.23802891 (kg/mol) \item \code{U_abund} = 0.992745 (unitless) } \deqn{A_{U} = N_{A} * U_{abund} / m_{U_238} * ln(2) / T_{1/2} = 2347046} (Bq/kg) \deqn{f.U = A_{U} / 10^6} } \note{ Although written otherwise for historical reasons. Input values must be element values. For instance, if a value is provided for U-238 the function assumes that this value represents the sum (activity or concentration) of U-238, U-235 and U-234. In other words, 1 ppm of U means that this is the composition of 0.992 parts of U-238, 0.000054 parts of U-234, and 0.00072 parts of U-235. } \section{Function version}{ 0.1.1 } \examples{ ##construct data.frame data <- data.frame( NUCLIDES = c("U-238", "Th-232", "K-40"), VALUE = c(40,80,100), VALUE_ERROR = c(4,8,10), stringsAsFactors = FALSE) ##perform analysis convert_Activity2Concentration(data) } \section{How to cite}{ Fuchs, M.C., 2023. convert_Activity2Concentration(): Convert Nuclide Activities to Concentrations and Vice Versa. Function version 0.1.1. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Debertin, K., Helmer, R.G., 1988. Gamma- and X-ray Spectrometry with Semiconductor Detectors, Elsevier Science Publishers, p.283 Wiechen, A., Ruehle, H., Vogl, K., 2013. Bestimmung der massebezogenen Aktivitaet von Radionukliden. AEQUIVAL/MASSAKT, ISSN 1865-8725, \url{https://www.bmuv.de/fileadmin/Daten_BMU/Download_PDF/Strahlenschutz/aequival-massakt_v2013-07_bf.pdf} } \author{ Margret C. Fuchs, Helmholtz-Institute Freiberg for Resource Technology (Germany) , RLum Developer Team} \keyword{IO} Luminescence/man/calc_WodaFuchs2008.Rd0000644000176200001440000000511014521210045017074 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calc_WodaFuchs2008.R \name{calc_WodaFuchs2008} \alias{calc_WodaFuchs2008} \title{Obtain the equivalent dose using the approach by Woda and Fuchs 2008} \usage{ calc_WodaFuchs2008(data, breaks = NULL, plot = TRUE, ...) } \arguments{ \item{data}{\link{data.frame} or \linkS4class{RLum.Results} object (\strong{required}): for \link{data.frame}: two columns: De (\code{values[,1]}) and De error (\code{values[,2]}). For plotting multiple data sets, these must be provided as \code{list} (e.g. \code{list(dataset1, dataset2)}).} \item{breaks}{\link{numeric}: Either number or locations of breaks. See \verb{[hist]} for details. If missing, the number of breaks will be estimated based on the bin width (as function of median error).} \item{plot}{\link{logical} (\emph{with default}): enable plot output.} \item{...}{Further plot arguments passed to the function.} } \description{ The function generates a histogram-like reorganisation of the data, to assess counts per bin. The log-transformed counts per bin are used to calculate the second derivative of the data (i.e., the curvature of the curve) and to find the central value of the bin hosting the distribution maximum. A normal distribution model is fitted to the counts per bin data to estimate the dose distribution parameters. The uncertainty of the model is estimated based on all input equivalent doses smaller that of the modelled central value. } \section{Function version}{ 0.2.0 } \examples{ ## read example data set data(ExampleData.DeValues, envir = environment()) results <- calc_WodaFuchs2008( data = ExampleData.DeValues$CA1, xlab = expression(paste(D[e], " [Gy]")) ) } \section{How to cite}{ Kreutzer, S., Dietze, M., 2023. calc_WodaFuchs2008(): Obtain the equivalent dose using the approach by Woda and Fuchs 2008. Function version 0.2.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Woda, C., Fuchs, M., 2008. On the applicability of the leading edge method to obtain equivalent doses in OSL dating and dosimetry. Radiation Measurements 43, 26-37. } \seealso{ \link{calc_FuchsLang2001}, \link{calc_CentralDose} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany),\cr Michael Dietze, GFZ Potsdam (Germany) , RLum Developer Team} Luminescence/man/merge_RLum.Data.Curve.Rd0000644000176200001440000001031314521210045017707 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merge_RLum.Data.Curve.R \name{merge_RLum.Data.Curve} \alias{merge_RLum.Data.Curve} \title{Merge function for RLum.Data.Curve S4 class objects} \usage{ merge_RLum.Data.Curve(object, merge.method = "mean", method.info) } \arguments{ \item{object}{\link{list} of \linkS4class{RLum.Data.Curve} (\strong{required}): list of S4 objects of class \code{RLum.Curve}.} \item{merge.method}{\link{character} (\strong{required}): method for combining of the objects, e.g. \code{'mean'}, \code{'sum'}, see details for further information and allowed methods. Note: Elements in slot info will be taken from the first curve in the list.} \item{method.info}{\link{numeric} (\emph{optional}): allows to specify how info elements of the input objects are combined, e.g. \code{1} means that just the elements from the first object are kept, \code{2} keeps only the info elements from the 2 object etc. If nothing is provided all elements are combined.} } \value{ Returns an \linkS4class{RLum.Data.Curve} object. } \description{ Function allows merging of RLum.Data.Curve objects in different ways } \details{ This function simply allowing to merge \linkS4class{RLum.Data.Curve} objects without touching the objects itself. Merging is always applied on the 2nd column of the data matrix of the object. \strong{Supported merge operations are \linkS4class{RLum.Data.Curve}} \code{"sum"} All count values will be summed up using the function \link{rowSums}. \code{"mean"} The mean over the count values is calculated using the function \link{rowMeans}. \code{"median"} The median over the count values is calculated using the function \link[matrixStats:rowMedians]{matrixStats::rowMedians}. \code{"sd"} The standard deviation over the count values is calculated using the function \link[matrixStats:rowSds]{matrixStats::rowSds}. \code{"var"} The variance over the count values is calculated using the function \link[matrixStats:rowVars]{matrixStats::rowVars}. \code{"min"} The min values from the count values is chosen using the function \link[matrixStats:rowRanges]{matrixStats::rowMins}. \code{"max"} The max values from the count values is chosen using the function \link[matrixStats:rowRanges]{matrixStats::rowMins}. \code{"append"} Appends count values of all curves to one combined data curve. The channel width is automatically re-calculated, but requires a constant channel width of the original data. \code{"-"} The row sums of the last objects are subtracted from the first object. \code{"*"} The row sums of the last objects are multiplied with the first object. \code{"/"} Values of the first object are divided by row sums of the last objects. } \note{ The information from the slot \code{recordType} is taken from the first \linkS4class{RLum.Data.Curve} object in the input list. The slot 'curveType' is filled with the name \code{merged}. } \section{S3-generic support}{ This function is fully operational via S3-generics: \code{+}, \code{-}, \code{/}, \code{*}, \code{merge} } \section{Function version}{ 0.2.1 } \examples{ ##load example data data(ExampleData.XSYG, envir = environment()) ##grep first and 3d TL curves TL.curves <- get_RLum(OSL.SARMeasurement$Sequence.Object, recordType = "TL (UVVIS)") TL.curve.1 <- TL.curves[[1]] TL.curve.3 <- TL.curves[[3]] ##plot single curves plot_RLum(TL.curve.1) plot_RLum(TL.curve.3) ##subtract the 1st curve from the 2nd and plot TL.curve.merged <- merge_RLum.Data.Curve(list(TL.curve.3, TL.curve.1), merge.method = "/") plot_RLum(TL.curve.merged) } \seealso{ \link{merge_RLum}, \linkS4class{RLum.Data.Curve} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. merge_RLum.Data.Curve(): Merge function for RLum.Data.Curve S4 class objects. Function version 0.2.1. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{internal} \keyword{utilities} Luminescence/man/calc_gSGC_feldspar.Rd0000644000176200001440000000673214521210045017355 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calc_gSGC_feldspar.R \name{calc_gSGC_feldspar} \alias{calc_gSGC_feldspar} \title{Calculate Global Standardised Growth Curve (gSGC) for Feldspar MET-pIRIR} \usage{ calc_gSGC_feldspar( data, gSGC.type = "50LxTx", gSGC.parameters, n.MC = 100, plot = FALSE ) } \arguments{ \item{data}{\link{data.frame} (\strong{required}): data frame with five columns per sample \code{c("LnTn", "LnTn.error", "Lr1Tr1", "Lr1Tr1.error","Dr1")}} \item{gSGC.type}{\link{character} (\emph{with default}): growth curve type to be selected according to Table 3 in Li et al. (2015). Allowed options are \code{"50LxTx"}, \code{"50Lx"}, \code{"50Tx"}, \code{"100LxTx"}, \code{"100Lx"}, \code{"100Tx"}, \code{"150LxTx"}, \code{"150Lx"}, \code{"150Tx"}, \code{"200LxTx"}, \code{"200Lx"}, \code{"200Tx"}, \code{"250LxTx"}, \code{"250Lx"}, \code{"250Tx"}} \item{gSGC.parameters}{\link{data.frame} (\emph{optional}): an own parameter set for the gSGC with the following columns \code{y1}, \code{y1_err}, \code{D1} \code{D1_err}, \code{y2}, \code{y2_err}, \code{y0}, \code{y0_err}.} \item{n.MC}{\link{numeric} (\emph{with default}): number of Monte-Carlo runs for the error calculation} \item{plot}{\link{logical} (\emph{with default}): enables/disables the control plot output} } \value{ Returns an S4 object of type \linkS4class{RLum.Results}. \strong{\verb{@data}}\cr \verb{$ df} (\link{data.frame}) \cr \code{.. $DE} the calculated equivalent dose\cr \code{.. $DE.ERROR} error on the equivalent dose, which is the standard deviation of the MC runs\cr \code{.. $HPD95_LOWER} lower boundary of the highest probability density (95\%)\cr \code{.. $HPD95_UPPER} upper boundary of the highest probability density (95\%)\cr \verb{$ m.MC} (\link{list}) numeric vector with results from the MC runs.\cr \strong{\verb{@info}}\cr `$ call`` (\link{call}) the original function call } \description{ Implementation of the gSGC approach for feldspar MET-pIRIR by Li et al. (2015) } \details{ ##TODO } \section{Function version}{ 0.1.0 } \examples{ ##test on a generated random sample n_samples <- 10 data <- data.frame( LnTn = rnorm(n=n_samples, mean=1.0, sd=0.02), LnTn.error = rnorm(n=n_samples, mean=0.05, sd=0.002), Lr1Tr1 = rnorm(n=n_samples, mean=1.0, sd=0.02), Lr1Tr1.error = rnorm(n=n_samples, mean=0.05, sd=0.002), Dr1 = rep(100,n_samples)) results <- calc_gSGC_feldspar( data = data, gSGC.type = "50LxTx", plot = FALSE) plot_AbanicoPlot(results) } \section{How to cite}{ Gray, H.J., Kreutzer, S., 2023. calc_gSGC_feldspar(): Calculate Global Standardised Growth Curve (gSGC) for Feldspar MET-pIRIR. Function version 0.1.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Li, B., Roberts, R.G., Jacobs, Z., Li, S.-H., Guo, Y.-J., 2015. Construction of a “global standardised growth curve” (gSGC) for infrared stimulated luminescence dating of K-feldspar 27, 119–130. \doi{10.1016/j.quageo.2015.02.010} } \seealso{ \linkS4class{RLum.Results}, \link{get_RLum}, \link{uniroot}, \link{calc_gSGC} } \author{ Harrison Gray, USGS (United States), Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \keyword{datagen} Luminescence/man/plot_RLum.Data.Spectrum.Rd0000644000176200001440000002477714521210045020327 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot_RLum.Data.Spectrum.R \name{plot_RLum.Data.Spectrum} \alias{plot_RLum.Data.Spectrum} \title{Plot function for an RLum.Data.Spectrum S4 class object} \usage{ plot_RLum.Data.Spectrum( object, par.local = TRUE, plot.type = "contour", optical.wavelength.colours = TRUE, bg.spectrum = NULL, bg.channels = NULL, bin.rows = 1, bin.cols = 1, norm = NULL, rug = TRUE, limit_counts = NULL, xaxis.energy = FALSE, legend.text, plot = TRUE, ... ) } \arguments{ \item{object}{\linkS4class{RLum.Data.Spectrum} or \link{matrix} (\strong{required}): S4 object of class \code{RLum.Data.Spectrum} or a \code{matrix} containing count values of the spectrum.\cr Please note that in case of a matrix row names and col names are set automatically if not provided.} \item{par.local}{\link{logical} (\emph{with default}): use local graphical parameters for plotting, e.g. the plot is shown in one column and one row. If \code{par.local = FALSE} global parameters are inherited.} \item{plot.type}{\link{character} (\emph{with default}): plot type, for 3D-plot use \code{persp}, or \code{interactive}, for a 2D-plot \code{image}, \code{contour}, \code{single} or \code{multiple.lines} (along the time or temperature axis) or \code{transect} (along the wavelength axis) \cr} \item{optical.wavelength.colours}{\link{logical} (\emph{with default}): use optical wavelength colour palette. Note: For this, the spectrum range is limited: \code{c(350,750)}. Own colours can be set with the argument \code{col}. If you provide already binned spectra, the colour assignment is likely to be wrong, since the colour gradients are calculated using the bin number.} \item{bg.spectrum}{\linkS4class{RLum.Data.Spectrum} or \link{matrix} (\emph{optional}): Spectrum used for the background subtraction. By definition, the background spectrum should have been measured with the same setting as the signal spectrum. If a spectrum is provided, the argument \code{bg.channels} works only on the provided background spectrum.} \item{bg.channels}{\link{vector} (\emph{optional}): defines channel for background subtraction If a vector is provided the mean of the channels is used for subtraction. If a spectrum is provided via \code{bg.spectrum}, this argument only works on the \code{bg.spectrum}. \strong{Note:} Background subtraction is applied prior to channel binning} \item{bin.rows}{\link{integer} (\emph{with default}): allow summing-up wavelength channels (horizontal binning), e.g. \code{bin.rows = 2} two channels are summed up. Binning is applied after the background subtraction.} \item{bin.cols}{\link{integer} (\emph{with default}): allow summing-up channel counts (vertical binning) for plotting, e.g. \code{bin.cols = 2} two channels are summed up. Binning is applied after the background subtraction.} \item{norm}{\link{character} (\emph{optional}): Normalise data to the maximum (\code{norm = "max"}) or minimum (\code{norm = "min"}) count values. The normalisation is applied after the binning.} \item{rug}{\link{logical} (\emph{with default}): enables or disables colour rug. Currently only implemented for plot type \code{multiple.lines} and \code{single}} \item{limit_counts}{\link{numeric} (\emph{optional}): value to limit all count values to this value, i.e. all count values above this threshold will be replaced by this threshold. This is helpful especially in case of TL-spectra.} \item{xaxis.energy}{\link{logical} (\emph{with default}): enables or disables energy instead of wavelength axis. For the conversion the function \link{convert_Wavelength2Energy} is used. \strong{Note:} This option means not only simply redrawing the axis, instead the spectrum in terms of intensity is recalculated, s. details.} \item{legend.text}{\link{character} (\emph{with default}): possibility to provide own legend text. This argument is only considered for plot types providing a legend, e.g. \code{plot.type="transect"}} \item{plot}{\link{logical} (\emph{with default}): enables/disables plot output. If the plot output is disabled, the \link{matrix} used for the plotting and the calculated colour values (as attributes) are returned. This way, the (binned, transformed etc.) output can be used in other functions and packages, such as plotting with the package \code{'plot3D'}} \item{...}{further arguments and graphical parameters that will be passed to the \code{plot} function.} } \value{ Returns a plot and the transformed \code{matrix} used for plotting with some useful attributes such as the \code{colour} and \code{pmat} (the transpose matrix from \link[graphics:persp]{graphics::persp}) } \description{ The function provides a standardised plot output for spectrum data of an \linkS4class{RLum.Data.Spectrum} class object. The purpose of this function is to provide easy and straight-forward spectra plotting, not provide a full customised access to all plot parameters. If this is wanted, standard R plot functionality should be used instead. \strong{Matrix structure} \cr (cf. \linkS4class{RLum.Data.Spectrum}) \itemize{ \item \code{rows} (x-values): wavelengths/channels (\code{xlim}, \code{xlab}) \item \code{columns} (y-values): time/temperature (\code{ylim}, \code{ylab}) \item \code{cells} (z-values): count values (\code{zlim}, \code{zlab}) } \emph{Note: This nomenclature is valid for all plot types of this function!} \strong{Nomenclature for value limiting} \itemize{ \item \code{xlim}: Limits values along the wavelength axis \item \code{ylim}: Limits values along the time/temperature axis \item \code{zlim}: Limits values along the count value axis } \strong{Details on the plot functions} Spectrum is visualised as 3D or 2D plot. Both plot types are based on internal R plot functions. \strong{\code{plot.type = "persp"}} Arguments that will be passed to \link[graphics:persp]{graphics::persp}: \itemize{ \item \code{shade}: default is \code{0.4} \item \code{phi}: default is \code{15} \item \code{theta}: default is \code{-30} \item \code{expand}: default is \code{1} \item \code{axes}: default is \code{TRUE} \item \code{box}: default is \code{TRUE}; accepts \code{"alternate"} for a custom plot design \item \code{ticktype}: default is \code{detailed}, \code{r}: default is \code{10} } \strong{Note:} Further parameters can be adjusted via \code{par}. For example to set the background transparent and reduce the thickness of the lines use: \code{par(bg = NA, lwd = 0.7)} previous the function call. \strong{\code{plot.type = "single"}} Per frame a single curve is returned. Frames are time or temperature steps. -\code{frames}: pick the frames to be plotted (depends on the binning!). Check without this setting before plotting. \strong{\code{plot.type = "multiple.lines"}} All frames plotted in one frame. -\code{frames}: pick the frames to be plotted (depends on the binning!). Check without this setting before plotting. '**\code{plot.type = "image"} or `plot.type = "contour" ** These plot types use the R functions \link[graphics:image]{graphics::image} or \link[graphics:contour]{graphics::contour}. The advantage is that many plots can be arranged conveniently using standard R plot functionality. If \code{plot.type = "image"} a contour is added by default, which can be disabled using the argument \code{contour = FALSE} to add own contour lines of choice. \strong{\code{plot.type = "transect"}} Depending on the selected wavelength/channel range a transect over the time/temperature (y-axis) will be plotted along the wavelength/channels (x-axis). If the range contains more than one channel, values (z-values) are summed up. To select a transect use the \code{xlim} argument, e.g. \code{xlim = c(300,310)} plot along the summed up count values of channel 300 to 310. \strong{Further arguments that will be passed (depending on the plot type)} \code{xlab}, \code{ylab}, \code{zlab}, \code{xlim}, \code{ylim}, \code{box}, \code{zlim}, \code{main}, \code{mtext}, \code{pch}, \code{type} (\code{"single"}, \code{"multiple.lines"}, \code{"interactive"}), \code{col}, \code{border}, \code{lwd}, \code{bty}, \code{showscale} (\code{"interactive"}, \code{"image"}) \code{contour}, \code{contour.col} (\code{"image"}) } \note{ Not all additional arguments (\code{...}) will be passed similarly! } \section{Function version}{ 0.6.8 } \examples{ ##load example data data(ExampleData.XSYG, envir = environment()) ##(1)plot simple spectrum (2D) - image plot_RLum.Data.Spectrum( TL.Spectrum, plot.type="image", xlim = c(310,750), ylim = c(0,300), bin.rows=10, bin.cols = 1) ##(2) plot spectrum (3D) plot_RLum.Data.Spectrum( TL.Spectrum, plot.type="persp", xlim = c(310,750), ylim = c(0,100), bin.rows=10, bin.cols = 1) ##(3) plot spectrum on energy axis ##please note the background subtraction plot_RLum.Data.Spectrum(TL.Spectrum, plot.type="persp", ylim = c(0,200), bin.rows=10, bg.channels = 10, bin.cols = 1, xaxis.energy = TRUE) ##(4) plot multiple lines (2D) - multiple.lines (with ylim) plot_RLum.Data.Spectrum( TL.Spectrum, plot.type="multiple.lines", xlim = c(310,750), ylim = c(0,100), bin.rows=10, bin.cols = 1) \dontrun{ ##(4) interactive plot using the package plotly ("surface") plot_RLum.Data.Spectrum(TL.Spectrum, plot.type="interactive", xlim = c(310,750), ylim = c(0,300), bin.rows=10, bin.cols = 1) ##(5) interactive plot using the package plotly ("contour") plot_RLum.Data.Spectrum(TL.Spectrum, plot.type="interactive", xlim = c(310,750), ylim = c(0,300), bin.rows=10, bin.cols = 1, type = "contour", showscale = TRUE) ##(6) interactive plot using the package plotly ("heatmap") plot_RLum.Data.Spectrum(TL.Spectrum, plot.type="interactive", xlim = c(310,750), ylim = c(0,300), bin.rows=10, bin.cols = 1, type = "heatmap", showscale = TRUE) } } \seealso{ \linkS4class{RLum.Data.Spectrum}, \link{convert_Wavelength2Energy}, \link{plot}, \link{plot_RLum}, \link[graphics:persp]{graphics::persp}, \link[plotly:plot_ly]{plotly::plot_ly}, \link[graphics:contour]{graphics::contour}, \link[graphics:image]{graphics::image} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. plot_RLum.Data.Spectrum(): Plot function for an RLum.Data.Spectrum S4 class object. Function version 0.6.8. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{aplot} Luminescence/man/plot_Histogram.Rd0000644000176200001440000001277314521210045016725 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot_Histogram.R \name{plot_Histogram} \alias{plot_Histogram} \title{Plot a histogram with separate error plot} \usage{ plot_Histogram( data, na.rm = TRUE, mtext, cex.global, se, rug, normal_curve, summary, summary.pos, colour, interactive = FALSE, ... ) } \arguments{ \item{data}{\link{data.frame} or \linkS4class{RLum.Results} object (\strong{required}): for \code{data.frame}: two columns: De (\code{data[,1]}) and De error (\code{data[,2]})} \item{na.rm}{\link{logical} (\emph{with default}): excludes \code{NA} values from the data set prior to any further operations.} \item{mtext}{\link{character} (\emph{optional}): further sample information (\link{mtext}).} \item{cex.global}{\link{numeric} (\emph{with default}): global scaling factor.} \item{se}{\link{logical} (\emph{optional}): plots standard error points over the histogram, default is \code{FALSE}.} \item{rug}{\link{logical} (\emph{optional}): adds rugs to the histogram, default is \code{TRUE}.} \item{normal_curve}{\link{logical} (\emph{with default}): adds a normal curve to the histogram. Mean and standard deviation are calculated from the input data. More see details section.} \item{summary}{\link{character} (\emph{optional}): add statistic measures of centrality and dispersion to the plot. Can be one or more of several keywords. See details for available keywords.} \item{summary.pos}{\link{numeric} or \link{character} (\emph{with default}): optional position coordinates or keyword (e.g. \code{"topright"}) for the statistical summary. Alternatively, the keyword \code{"sub"} may be specified to place the summary below the plot header. However, this latter option in only possible if \code{mtext} is not used. In case of coordinate specification, y-coordinate refers to the right y-axis.} \item{colour}{\link{numeric} or \link{character} (\emph{with default}): optional vector of length 4 which specifies the colours of the following plot items in exactly this order: histogram bars, rug lines, normal distribution curve and standard error points (e.g., \code{c("grey", "black", "red", "grey")}).} \item{interactive}{\link{logical} (\emph{with default}): create an interactive histogram plot (requires the 'plotly' package)} \item{...}{further arguments and graphical parameters passed to \link{plot} or \link{hist}. If y-axis labels are provided, these must be specified as a vector of length 2 since the plot features two axes (e.g. \code{ylab = c("axis label 1", "axis label 2")}). Y-axes limits (\code{ylim}) must be provided as vector of length four, with the first two elements specifying the left axes limits and the latter two elements giving the right axis limits.} } \description{ Function plots a predefined histogram with an accompanying error plot as suggested by Rex Galbraith at the UK LED in Oxford 2010. } \details{ If the normal curve is added, the y-axis in the histogram will show the probability density. A statistic summary, i.e. a collection of statistic measures of centrality and dispersion (and further measures) can be added by specifying one or more of the following keywords: \itemize{ \item \code{"n"} (number of samples), \item \code{"mean"} (mean De value), \item \code{"mean.weighted"} (error-weighted mean), \item \code{"median"} (median of the De values), \item \code{"sdrel"} (relative standard deviation in percent), \item \code{"sdrel.weighted"} (error-weighted relative standard deviation in percent), \item \code{"sdabs"} (absolute standard deviation), \item \code{"sdabs.weighted"} (error-weighted absolute standard deviation), \item \code{"serel"} (relative standard error), \item \code{"serel.weighted"} (error-weighted relative standard error), \item \code{"seabs"} (absolute standard error), \item \code{"seabs.weighted"} (error-weighted absolute standard error), \item \code{"kurtosis"} (kurtosis) and \item \code{"skewness"} (skewness). } } \note{ The input data is not restricted to a special type. } \section{Function version}{ 0.4.5 } \examples{ ## load data data(ExampleData.DeValues, envir = environment()) ExampleData.DeValues <- Second2Gray(ExampleData.DeValues$BT998, dose.rate = c(0.0438,0.0019)) ## plot histogram the easiest way plot_Histogram(ExampleData.DeValues) ## plot histogram with some more modifications plot_Histogram(ExampleData.DeValues, rug = TRUE, normal_curve = TRUE, cex.global = 0.9, pch = 2, colour = c("grey", "black", "blue", "green"), summary = c("n", "mean", "sdrel"), summary.pos = "topleft", main = "Histogram of De-values", mtext = "Example data set", ylab = c(expression(paste(D[e], " distribution")), "Standard error"), xlim = c(100, 250), ylim = c(0, 0.1, 5, 20)) } \seealso{ \link{hist}, \link{plot} } \author{ Michael Dietze, GFZ Potsdam (Germany)\cr Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Dietze, M., Kreutzer, S., 2023. plot_Histogram(): Plot a histogram with separate error plot. Function version 0.4.5. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } Luminescence/man/merge_RLum.Results.Rd0000644000176200001440000000267114521210045017424 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merge_RLum.Results.R \name{merge_RLum.Results} \alias{merge_RLum.Results} \title{Merge function for RLum.Results S4-class objects} \usage{ merge_RLum.Results(objects) } \arguments{ \item{objects}{\link{list} (\strong{required}): a list of \linkS4class{RLum.Results} objects} } \description{ Function merges objects of class \linkS4class{RLum.Results}. The slots in the objects are combined depending on the object type, e.g., for \link{data.frame} and \link{matrix} rows are appended. } \details{ Elements are appended where possible and attributes are preserved if not of similar name as the default attributes of, e.g., a \link{data.frame} } \note{ The \code{originator} is taken from the first element and not reset to \code{merge_RLum} } \section{Function version}{ 0.2.1 } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. merge_RLum.Results(): Merge function for RLum.Results S4-class objects. Function version 0.2.1. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{internal} Luminescence/man/convert_Concentration2DoseRate.Rd0000644000176200001440000001101214521210045021772 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/convert_Concentration2DoseRate.R \name{convert_Concentration2DoseRate} \alias{convert_Concentration2DoseRate} \title{Dose-rate conversion function} \usage{ convert_Concentration2DoseRate(input, conversion = "Guerinetal2011") } \arguments{ \item{input}{\link{data.frame} (\emph{optional}): a table containing all relevant information for each individual layer if nothing is provided, the function returns a template \link{data.frame} Please note that until one dataset per input is supported!} \item{conversion}{\link{character} (\emph{with default}): which dose rate conversion factors to use, defaults uses Guérin et al. (2011). For accepted values see \link{BaseDataSet.ConversionFactors}} } \value{ The function returns an \linkS4class{RLum.Results} object for which the first element is \link{matrix} with the converted values. If no input is provided, the function returns a template \link{data.frame} that can be used as input. } \description{ This function converts radionuclide concentrations (K in \%, Th and U in ppm) into dose rates (Gy/ka). Beta-dose rates are also attenuated for the grain size. Beta and gamma-dose rates are corrected for the water content. This function converts concentrations into dose rates (Gy/ka) and corrects for grain size attenuation and water content Dose rate conversion factors can be chosen from Adamiec and Aitken (1998), Guerin et al. (2011), Liritzis et al. (201) and Cresswell et al. (2018). Default is Guerin et al. (2011). Grain size correction for beta dose rates is achieved using the correction factors published by Guérin et al. (2012). Water content correction is based on factors provided by Aitken (1985), with the factor for beta dose rate being 1.25 and for gamma 1.14. } \details{ \strong{The input data} \tabular{lll}{ COLUMN \tab DATA TYPE \tab DESCRIPTION\cr \code{Mineral} \tab \code{character} \tab \code{'FS'} for feldspar, \code{'Q'} for quartz\cr \code{K} \tab \code{numeric} \tab K nuclide content in \%\cr \code{K_SE} \tab \code{numeric} \tab error on K nuclide content in \%\cr \code{Th} \tab \code{numeric} \tab Th nuclide content in ppm\cr \code{Th_SE} \tab \code{numeric} error on Th nuclide content in ppm\cr \code{U} \tab \code{numeric} U nuclide content in ppm\cr \code{U_SE} \tab \code{numeric} \tab error on U nuclide content in ppm\cr \code{GrainSize} \tab \code{numeric} \tab average grain size in µm\cr \code{WaterContent} \tab \code{numeric} \tab mean water content in \%\cr \code{WaterContent_SE} \tab \code{numeric} \tab relative error on water content } \strong{Water content} The water content provided by the user should be calculated according to: \deqn{(Wet_weight - Dry_weight) / Dry_weight * 100} The unit for the weight is gram (g). } \section{Function version}{ 0.1.0 } \examples{ ## create input template input <- convert_Concentration2DoseRate() ## fill input input$Mineral <- "FS" input$K <- 2.13 input$K_SE <- 0.07 input$Th <- 9.76 input$Th_SE <- 0.32 input$U <- 2.24 input$U_SE <- 0.12 input$GrainSize <- 200 input$WaterContent <- 30 input$WaterContent_SE <- 5 ## convert convert_Concentration2DoseRate(input) } \section{How to cite}{ Riedesel, S., Autzen, M., 2023. convert_Concentration2DoseRate(): Dose-rate conversion function. Function version 0.1.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Adamiec, G., Aitken, M.J., 1998. Dose-rate conversion factors: update. Ancient TL 16, 37-46. Cresswell., A.J., Carter, J., Sanderson, D.C.W., 2018. Dose rate conversion parameters: Assessment of nuclear data. Radiation Measurements 120, 195-201. Guerin, G., Mercier, N., Adamiec, G., 2011. Dose-rate conversion factors: update. Ancient TL, 29, 5-8. Guerin, G., Mercier, N., Nathan, R., Adamiec, G., Lefrais, Y., 2012. On the use of the infinite matrix assumption and associated concepts: A critical review. Radiation Measurements, 47, 778-785. Liritzis, I., Stamoulis, K., Papachristodoulou, C., Ioannides, K., 2013. A re-evaluation of radiation dose-rate conversion factors. Mediterranean Archaeology and Archaeometry 13, 1-15. } \author{ Svenja Riedesel, Aberystwyth University (United Kingdom) \cr Martin Autzen, DTU NUTECH Center for Nuclear Technologies (Denmark) , RLum Developer Team} \keyword{datagen} Luminescence/man/get_rightAnswer.Rd0000644000176200001440000000174414521210045017062 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_rightAnswer.R \name{get_rightAnswer} \alias{get_rightAnswer} \title{Function to get the right answer} \usage{ get_rightAnswer(...) } \arguments{ \item{...}{you can pass an infinite number of further arguments} } \value{ Returns the right answer } \description{ This function returns just the right answer } \section{Function version}{ 0.1.0 } \examples{ ## you really want to know? get_rightAnswer() } \author{ inspired by R.G. , RLum Developer Team} \section{How to cite}{ NA, NA, , , 2023. get_rightAnswer(): Function to get the right answer. Function version 0.1.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } Luminescence/man/fit_LMCurve.Rd0000644000176200001440000002525514521210045016110 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fit_LMCurve.R \name{fit_LMCurve} \alias{fit_LMCurve} \title{Nonlinear Least Squares Fit for LM-OSL curves} \usage{ fit_LMCurve( values, values.bg, n.components = 3, start_values, input.dataType = "LM", fit.method = "port", sample_code = "", sample_ID = "", LED.power = 36, LED.wavelength = 470, fit.trace = FALSE, fit.advanced = FALSE, fit.calcError = FALSE, bg.subtraction = "polynomial", verbose = TRUE, plot = TRUE, plot.BG = FALSE, ... ) } \arguments{ \item{values}{\linkS4class{RLum.Data.Curve} or \link{data.frame} (\strong{required}): x,y data of measured values (time and counts). See examples.} \item{values.bg}{\linkS4class{RLum.Data.Curve} or \link{data.frame} (\emph{optional}): x,y data of measured values (time and counts) for background subtraction.} \item{n.components}{\link{integer} (\emph{with default}): fixed number of components that are to be recognised during fitting (min = 1, max = 7).} \item{start_values}{\link{data.frame} (\emph{optional}): start parameters for \code{lm} and \code{xm} data for the fit. If no start values are given, an automatic start value estimation is attempted (see details).} \item{input.dataType}{\link{character} (\emph{with default}): alter the plot output depending on the input data: \code{"LM"} or \code{"pLM"} (pseudo-LM). See: \link{CW2pLM}} \item{fit.method}{\link{character} (\emph{with default}): select fit method, allowed values: \code{'port'} and \code{'LM'}. \code{'port'} uses the 'port' routine from the function \link{nls} \code{'LM'} utilises the function \code{nlsLM} from the package \code{minpack.lm} and with that the Levenberg-Marquardt algorithm.} \item{sample_code}{\link{character} (\emph{optional}): sample code used for the plot and the optional output table (mtext).} \item{sample_ID}{\link{character} (\emph{optional}): additional identifier used as column header for the table output.} \item{LED.power}{\link{numeric} (\emph{with default}): LED power (max.) used for intensity ramping in mW/cm^2. \strong{Note:} This value is used for the calculation of the absolute photoionisation cross section.} \item{LED.wavelength}{\link{numeric} (\emph{with default}): LED wavelength in nm used for stimulation. \strong{Note:} This value is used for the calculation of the absolute photoionisation cross section.} \item{fit.trace}{\link{logical} (\emph{with default}): traces the fitting process on the terminal.} \item{fit.advanced}{\link{logical} (\emph{with default}): enables advanced fitting attempt for automatic start parameter recognition. Works only if no start parameters are provided. \strong{Note:} It may take a while and it is not compatible with \code{fit.method = "LM"}.} \item{fit.calcError}{\link{logical} (\emph{with default}): calculate 1-sigma error range of components using \link[stats:confint]{stats::confint}.} \item{bg.subtraction}{\link{character} (\emph{with default}): specifies method for background subtraction (\code{polynomial}, \code{linear}, \code{channel}, see Details). \strong{Note:} requires input for \code{values.bg}.} \item{verbose}{\link{logical} (\emph{with default}): terminal output with fitting results.} \item{plot}{\link{logical} (\emph{with default}): returns a plot of the fitted curves.} \item{plot.BG}{\link{logical} (\emph{with default}): returns a plot of the background values with the fit used for the background subtraction.} \item{...}{Further arguments that may be passed to the plot output, e.g. \code{xlab}, \code{xlab}, \code{main}, \code{log}.} } \value{ Various types of plots are returned. For details see above. Furthermore an \code{RLum.Results} object is returned with the following structure: \strong{\verb{@data:}} \code{.. $data} : \link{data.frame} with fitting results\cr \code{.. $fit} : nls (\link{nls} object)\cr \code{.. $component_matrix} : \link{matrix} with numerical xy-values of the single fitted components with the resolution of the input data \code{.. $component.contribution.matrix} : \link{list} component distribution matrix \strong{\verb{info:}} \code{.. $call} : \link{call} the original function call Matrix structure for the distribution matrix: Column 1 and 2: time and \code{rev(time)} values\cr Additional columns are used for the components, two for each component, containing I0 and n0. The last columns \code{cont.} provide information on the relative component contribution for each time interval including the row sum for this values. } \description{ The function determines weighted nonlinear least-squares estimates of the component parameters of an LM-OSL curve (Bulur 1996) for a given number of components and returns various component parameters. The fitting procedure uses the function \link{nls} with the \code{port} algorithm. } \details{ \strong{Fitting function} The function for the fitting has the general form: \deqn{y = (exp(0.5)*Im_1*x/xm_1)*exp(-x^2/(2*xm_1^2)) + ,\ldots, + exp(0.5)*Im_i*x/xm_i)*exp(-x^2/(2*xm_i^2))} where \eqn{1 < i < 8} This function and the equations for the conversion to b (detrapping probability) and n0 (proportional to initially trapped charge) have been taken from Kitis et al. (2008): \deqn{xm_i=\sqrt{max(t)/b_i}} \deqn{Im_i=exp(-0.5)n0/xm_i} \strong{Background subtraction} Three methods for background subtraction are provided for a given background signal (\code{values.bg}). \itemize{ \item \code{polynomial}: default method. A polynomial function is fitted using \link{glm} and the resulting function is used for background subtraction: \deqn{y = a*x^4 + b*x^3 + c*x^2 + d*x + e} \item \code{linear}: a linear function is fitted using \link{glm} and the resulting function is used for background subtraction: \deqn{y = a*x + b} \item \code{channel}: the measured background signal is subtracted channel wise from the measured signal. } \strong{Start values} The choice of the initial parameters for the \code{nls}-fitting is a crucial point and the fitting procedure may mainly fail due to ill chosen start parameters. Here, three options are provided: \strong{(a)} If no start values (\code{start_values}) are provided by the user, a cheap guess is made by using the detrapping values found by Jain et al. (2003) for quartz for a maximum of 7 components. Based on these values, the pseudo start parameters \code{xm} and \code{Im} are recalculated for the given data set. In all cases, the fitting starts with the ultra-fast component and (depending on \code{n.components}) steps through the following values. If no fit could be achieved, an error plot (for \code{plot = TRUE}) with the pseudo curve (based on the pseudo start parameters) is provided. This may give the opportunity to identify appropriate start parameters visually. \strong{(b)} If start values are provided, the function works like a simple \link{nls} fitting approach. \strong{(c)} If no start parameters are provided and the option \code{fit.advanced = TRUE} is chosen, an advanced start parameter estimation is applied using a stochastic attempt. Therefore, the recalculated start parameters \strong{(a)} are used to construct a normal distribution. The start parameters are then sampled randomly from this distribution. A maximum of 100 attempts will be made. \strong{Note:} This process may be time consuming. \strong{Goodness of fit} The goodness of the fit is given by a pseudo-R^2 value (pseudo coefficient of determination). According to Lave (1970), the value is calculated as: \deqn{pseudoR^2 = 1 - RSS/TSS} where \eqn{RSS = Residual~Sum~of~Squares} and \eqn{TSS = Total~Sum~of~Squares} \strong{Error of fitted component parameters} The 1-sigma error for the components is calculated using the function \link[stats:confint]{stats::confint}. Due to considerable calculation time, this option is deactivated by default. In addition, the error for the components can be estimated by using internal R functions like \link{summary}. See the \link{nls} help page for more information. \emph{For more details on the nonlinear regression in R, see Ritz & Streibig (2008).} } \note{ The pseudo-R^2 may not be the best parameter to describe the goodness of the fit. The trade off between the \code{n.components} and the pseudo-R^2 value currently remains unconsidered. The function \strong{does not} ensure that the fitting procedure has reached a global minimum rather than a local minimum! In any case of doubt, the use of manual start values is highly recommended. } \section{Function version}{ 0.3.4 } \examples{ ##(1) fit LM data without background subtraction data(ExampleData.FittingLM, envir = environment()) fit_LMCurve(values = values.curve, n.components = 3, log = "x") ##(2) fit LM data with background subtraction and export as JPEG ## -alter file path for your preferred system ##jpeg(file = "~/Desktop/Fit_Output\\%03d.jpg", quality = 100, ## height = 3000, width = 3000, res = 300) data(ExampleData.FittingLM, envir = environment()) fit_LMCurve(values = values.curve, values.bg = values.curveBG, n.components = 2, log = "x", plot.BG = TRUE) ##dev.off() ##(3) fit LM data with manual start parameters data(ExampleData.FittingLM, envir = environment()) fit_LMCurve(values = values.curve, values.bg = values.curveBG, n.components = 3, log = "x", start_values = data.frame(Im = c(170,25,400), xm = c(56,200,1500))) } \section{How to cite}{ Kreutzer, S., 2023. fit_LMCurve(): Nonlinear Least Squares Fit for LM-OSL curves. Function version 0.3.4. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Bulur, E., 1996. An Alternative Technique For Optically Stimulated Luminescence (OSL) Experiment. Radiation Measurements, 26, 5, 701-709. Jain, M., Murray, A.S., Boetter-Jensen, L., 2003. Characterisation of blue-light stimulated luminescence components in different quartz samples: implications for dose measurement. Radiation Measurements, 37 (4-5), 441-449. Kitis, G. & Pagonis, V., 2008. Computerized curve deconvolution analysis for LM-OSL. Radiation Measurements, 43, 737-741. Lave, C.A.T., 1970. The Demand for Urban Mass Transportation. The Review of Economics and Statistics, 52 (3), 320-323. Ritz, C. & Streibig, J.C., 2008. Nonlinear Regression with R. R. Gentleman, K. Hornik, & G. Parmigiani, eds., Springer, p. 150. } \seealso{ \link{fit_CWCurve}, \link{plot}, \link{nls}, \link[minpack.lm:nlsLM]{minpack.lm::nlsLM}, \link{get_RLum} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \keyword{dplot} \keyword{models} Luminescence/man/calc_FadingCorr.Rd0000644000176200001440000001703714521210045016730 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calc_FadingCorr.R \name{calc_FadingCorr} \alias{calc_FadingCorr} \title{Apply a fading correction according to Huntley & Lamothe (2001) for a given g-value and a given tc} \usage{ calc_FadingCorr( age.faded, g_value, tc = NULL, tc.g_value = tc, n.MC = 10000, seed = NULL, interval = c(0.01, 500), txtProgressBar = TRUE, verbose = TRUE ) } \arguments{ \item{age.faded}{\link{numeric} \link{vector} (\strong{required}): uncorrected age with error in ka (see example)} \item{g_value}{\link{vector} (\strong{required}): g-value and error obtained from separate fading measurements (see example). Alternatively an \linkS4class{RLum.Results} object can be provided produced by the function \link{analyse_FadingMeasurement}, in this case \code{tc} is set automatically} \item{tc}{\link{numeric} (\strong{required}): time in seconds between irradiation and the prompt measurement (cf. Huntley & Lamothe 2001). Argument will be ignored if \code{g_value} was an \linkS4class{RLum.Results} object} \item{tc.g_value}{\link{numeric} (\emph{with default}): the time in seconds between irradiation and the prompt measurement used for estimating the g-value. If the g-value was normalised to, e.g., 2 days, this time in seconds (i.e., 172800) should be given here. If nothing is provided the time is set to tc, which is usual case for g-values obtained using the SAR method and g-values that had been not normalised to 2 days.} \item{n.MC}{\link{integer} (\emph{with default}): number of Monte Carlo simulation runs for error estimation. If \code{n.MC = 'auto'} is used the function tries to find a 'stable' error for the age. \strong{Note:} This may take a while!} \item{seed}{\link{integer} (\emph{optional}): sets the seed for the random number generator in R using \link{set.seed}} \item{interval}{\link{numeric} (\emph{with default}): a vector containing the end-points (age interval) of the interval to be searched for the root in 'ka'. This argument is passed to the function \link[stats:uniroot]{stats::uniroot} used for solving the equation.} \item{txtProgressBar}{\link{logical} (\emph{with default}): enables or disables \link{txtProgressBar}} \item{verbose}{\link{logical} (\emph{with default}): enables or disables terminal output} } \value{ Returns an S4 object of type \linkS4class{RLum.Results}.\cr Slot: \strong{\verb{@data}}\cr \tabular{lll}{ \strong{Object} \tab \strong{Type} \tab \strong{Comment} \cr \code{age.corr} \tab \link{data.frame} \tab Corrected age \cr \code{age.corr.MC} \tab \link{numeric} \tab MC simulation results with all possible ages from that simulation \cr } Slot: \strong{\verb{@info}}\cr \tabular{lll}{ \strong{Object} \tab \strong{Type} \tab \strong{Comment} \cr \code{info} \tab \link{character} \tab the original function call } } \description{ This function solves the equation used for correcting the fading affected age including the error for a given g-value according to Huntley & Lamothe (2001). } \details{ As the g-value slightly depends on the time between irradiation and the prompt measurement, this is tc, always a tc value needs to be provided. If the g-value was normalised to a distinct time or evaluated with a different tc value (e.g., external irradiation), also the tc value for the g-value needs to be provided (argument \code{tc.g_value} and then the g-value is recalculated to tc of the measurement used for estimating the age applying the following equation: \deqn{\kappa_{tc} = \kappa_{tc.g} / (1 - \kappa_{tc.g} * log(tc/tc.g))} where \deqn{\kappa_{tc.g} = g / 100 / log(10)} with \code{log} the natural logarithm. The error of the fading-corrected age is determined using a Monte Carlo simulation approach. Solving of the equation is realised using \link{uniroot}. Large values for \code{n.MC} will significantly increase the computation time.\cr \strong{\code{n.MC = 'auto'}} The error estimation based on a stochastic process, i.e. for a small number of MC runs the calculated error varies considerably every time the function is called, even with the same input values. The argument option \code{n.MC = 'auto'} tries to find a stable value for the standard error, i.e. the standard deviation of values calculated during the MC runs (\code{age.corr.MC}), within a given precision (2 digits) by increasing the number of MC runs stepwise and calculating the corresponding error. If the determined error does not differ from the 9 values calculated previously within a precision of (here) 3 digits the calculation is stopped as it is assumed that the error is stable. Please note that (a) the duration depends on the input values as well as on the provided computation resources and it may take a while, (b) the length (size) of the output vector \code{age.corr.MC}, where all the single values produced during the MC runs are stored, equals the number of MC runs (here termed observations). To avoid an endless loop the calculation is stopped if the number of observations exceeds 10^7. This limitation can be overwritten by setting the number of MC runs manually, e.g. \code{n.MC = 10000001}. Note: For this case the function is not checking whether the calculated error is stable.\cr \strong{\code{seed}} This option allows to recreate previously calculated results by setting the seed for the R random number generator (see \link{set.seed} for details). This option should not be mixed up with the option \strong{\code{n.MC = 'auto'}}. The results may appear similar, but they are not comparable!\cr \strong{FAQ}\cr Q: Which tc value is expected?\cr A: tc is the time in seconds between irradiation and the prompt measurement applied during your De measurement. However, this tc might differ from the tc used for estimating the g-value. In the case of an SAR measurement tc should be similar, however, if it differs, you have to provide this tc value (the one used for estimating the g-value) using the argument \code{tc.g_value}.\cr } \note{ Special thanks to Sébastien Huot for his support and clarification via e-mail. } \section{Function version}{ 0.4.3 } \examples{ ##run the examples given in the appendix of Huntley and Lamothe, 2001 ##(1) faded age: 100 a results <- calc_FadingCorr( age.faded = c(0.1,0), g_value = c(5.0, 1.0), tc = 2592000, tc.g_value = 172800, n.MC = 100) ##(2) faded age: 1 ka results <- calc_FadingCorr( age.faded = c(1,0), g_value = c(5.0, 1.0), tc = 2592000, tc.g_value = 172800, n.MC = 100) ##(3) faded age: 10.0 ka results <- calc_FadingCorr( age.faded = c(10,0), g_value = c(5.0, 1.0), tc = 2592000, tc.g_value = 172800, n.MC = 100) ##access the last output get_RLum(results) } \section{How to cite}{ Kreutzer, S., 2023. calc_FadingCorr(): Apply a fading correction according to Huntley & Lamothe (2001) for a given g-value and a given tc. Function version 0.4.3. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Huntley, D.J., Lamothe, M., 2001. Ubiquity of anomalous fading in K-feldspars and the measurement and correction for it in optical dating. Canadian Journal of Earth Sciences, 38, 1093-1106. } \seealso{ \linkS4class{RLum.Results}, \link{analyse_FadingMeasurement}, \link{get_RLum}, \link{uniroot} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \keyword{datagen} Luminescence/man/calc_CommonDose.Rd0000644000176200001440000001100014521210045016735 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calc_CommonDose.R \name{calc_CommonDose} \alias{calc_CommonDose} \title{Apply the (un-)logged common age model after Galbraith et al. (1999) to a given De distribution} \usage{ calc_CommonDose(data, sigmab, log = TRUE, ...) } \arguments{ \item{data}{\linkS4class{RLum.Results} or \link{data.frame} (\strong{required}): for \link{data.frame}: two columns with De \code{(data[,1])} and De error \code{(data[,2])}} \item{sigmab}{\link{numeric} (\emph{with default}): additional spread in De values. This value represents the expected overdispersion in the data should the sample be well-bleached (Cunningham & Walling 2012, p. 100). \strong{NOTE}: For the logged model (\code{log = TRUE}) this value must be a fraction, e.g. 0.2 (= 20 \\%). If the un-logged model is used (\code{log = FALSE}), sigmab must be provided in the same absolute units of the De values (seconds or Gray).} \item{log}{\link{logical} (\emph{with default}): fit the (un-)logged central age model to De data} \item{...}{currently not used.} } \value{ Returns a terminal output. In addition an \linkS4class{RLum.Results} object is returned containing the following element: \item{.$summary}{\link{data.frame} summary of all relevant model results.} \item{.$data}{\link{data.frame} original input data} \item{.$args}{\link{list} used arguments} \item{.$call}{\link{call} the function call} The output should be accessed using the function \link{get_RLum} } \description{ Function to calculate the common dose of a De distribution. } \details{ \strong{(Un-)logged model} When \code{log = TRUE} this function calculates the weighted mean of logarithmic De values. Each of the estimates is weighted by the inverse square of its relative standard error. The weighted mean is then transformed back to the dose scale (Galbraith & Roberts 2012, p. 14). The log transformation is not applicable if the De estimates are close to zero or negative. In this case the un-logged model can be applied instead (\code{log = FALSE}). The weighted mean is then calculated using the un-logged estimates of De and their absolute standard error (Galbraith & Roberts 2012, p. 14). } \section{Function version}{ 0.1.1 } \examples{ ## load example data data(ExampleData.DeValues, envir = environment()) ## apply the common dose model calc_CommonDose(ExampleData.DeValues$CA1) } \section{How to cite}{ Burow, C., 2023. calc_CommonDose(): Apply the (un-)logged common age model after Galbraith et al. (1999) to a given De distribution. Function version 0.1.1. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Galbraith, R.F. & Laslett, G.M., 1993. Statistical models for mixed fission track ages. Nuclear Tracks Radiation Measurements 4, 459-470. Galbraith, R.F., Roberts, R.G., Laslett, G.M., Yoshida, H. & Olley, J.M., 1999. Optical dating of single grains of quartz from Jinmium rock shelter, northern Australia. Part I: experimental design and statistical models. Archaeometry 41, 339-364. Galbraith, R.F. & Roberts, R.G., 2012. Statistical aspects of equivalent dose and error calculation and display in OSL dating: An overview and some recommendations. Quaternary Geochronology 11, 1-27. \strong{Further reading} Arnold, L.J. & Roberts, R.G., 2009. Stochastic modelling of multi-grain equivalent dose (De) distributions: Implications for OSL dating of sediment mixtures. Quaternary Geochronology 4, 204-230. Bailey, R.M. & Arnold, L.J., 2006. Statistical modelling of single grain quartz De distributions and an assessment of procedures for estimating burial dose. Quaternary Science Reviews 25, 2475-2502. Cunningham, A.C. & Wallinga, J., 2012. Realizing the potential of fluvial archives using robust OSL chronologies. Quaternary Geochronology 12, 98-106. Rodnight, H., Duller, G.A.T., Wintle, A.G. & Tooth, S., 2006. Assessing the reproducibility and accuracy of optical dating of fluvial deposits. Quaternary Geochronology, 1 109-120. Rodnight, H., 2008. How many equivalent dose values are needed to obtain a reproducible distribution?. Ancient TL 26, 3-10. } \seealso{ \link{calc_CentralDose}, \link{calc_FiniteMixture}, \link{calc_FuchsLang2001}, \link{calc_MinDose} } \author{ Christoph Burow, University of Cologne (Germany) , RLum Developer Team} Luminescence/man/analyse_FadingMeasurement.Rd0000644000176200001440000002345014521210044021035 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/analyse_FadingMeasurement.R \name{analyse_FadingMeasurement} \alias{analyse_FadingMeasurement} \title{Analyse fading measurements and returns the fading rate per decade (g-value)} \usage{ analyse_FadingMeasurement( object, structure = c("Lx", "Tx"), signal.integral, background.integral, t_star = "half", n.MC = 100, verbose = TRUE, plot = TRUE, plot.single = FALSE, ... ) } \arguments{ \item{object}{\linkS4class{RLum.Analysis} (\strong{required}): input object with the measurement data. Alternatively, a \link{list} containing \linkS4class{RLum.Analysis} objects or a \link{data.frame} with three columns (x = LxTx, y = LxTx error, z = time since irradiation) can be provided. Can also be a wide table, i.e. a \link{data.frame} with a number of columns divisible by 3 and where each triplet has the before mentioned column structure. \strong{Please note: The input object should solely consists of the curve needed for the data analysis, i.e. only IRSL curves representing Lx (and Tx)} If data from multiple aliquots are provided please \strong{see the details below} with regard to Lx/Tx normalisation. \strong{The function assumes that all your measurements are related to one (comparable) sample. If you to treat independent samples, you have use this function in a loop.}} \item{structure}{\link{character} (\emph{with default}): sets the structure of the measurement data. Allowed are \code{'Lx'} or \code{c('Lx','Tx')}. Other input is ignored} \item{signal.integral}{\link{vector} (\strong{required}): vector with channels for the signal integral (e.g., \code{c(1:10)}). Not required if a \code{data.frame} with \code{LxTx} values is provided.} \item{background.integral}{\link{vector} (\strong{required}): vector with channels for the background integral (e.g., \code{c(90:100)}). Not required if a \code{data.frame} with \code{LxTx} values is provided.} \item{t_star}{\link{character} (\emph{with default}): method for calculating the time elapsed since irradiation if input is \strong{not} a \code{data.frame}. Options are: \code{'half'} (the default), \verb{'half_complex}, which uses the long equation in Auclair et al. 2003, and and \code{'end'}, which takes the time between irradiation and the measurement step. Alternatively, \code{t_star} can be a function with one parameter which works on \code{t1}. For more information see details. \cr \emph{\code{t_star} has no effect if the input is a \link{data.frame}, because this input comes without irradiation times.}} \item{n.MC}{\link{integer} (\emph{with default}): number for Monte Carlo runs for the error estimation} \item{verbose}{\link{logical} (\emph{with default}): enables/disables verbose mode} \item{plot}{\link{logical} (\emph{with default}): enables/disables plot output} \item{plot.single}{\link{logical} (\emph{with default}): enables/disables single plot mode, i.e. one plot window per plot. Alternatively a vector specifying the plot to be drawn, e.g., \code{plot.single = c(3,4)} draws only the last two plots} \item{...}{(\emph{optional}) further arguments that can be passed to internally used functions. Supported arguments: \code{xlab}, \code{log}, \code{mtext} and \code{xlim} for the two first curve plots, and \code{ylim} for the fading curve plot. For further plot customization please use the numerical output of the functions for own plots.} } \value{ An \linkS4class{RLum.Results} object is returned: Slot: \strong{@data} \tabular{lll}{ \strong{OBJECT} \tab \strong{TYPE} \tab \strong{COMMENT}\cr \code{fading_results} \tab \code{data.frame} \tab results of the fading measurement in a table \cr \code{fit} \tab \code{lm} \tab object returned by the used linear fitting function \link[stats:lm]{stats::lm}\cr \code{rho_prime} \tab \code{data.frame} \tab results of rho' estimation after Kars et al. (2008) \cr \code{LxTx_table} \tab \code{data.frame} \tab Lx/Tx table, if curve data had been provided \cr \code{irr.times} \tab \code{integer} \tab vector with the irradiation times in seconds \cr } Slot: \strong{@info} \tabular{lll}{ \strong{OBJECT} \tab \code{TYPE} \tab \code{COMMENT}\cr \code{call} \tab \code{call} \tab the original function call\cr } } \description{ The function analysis fading measurements and returns a fading rate including an error estimation. The function is not limited to standard fading measurements, as can be seen, e.g., Huntley and Lamothe (2001). Additionally, the density of recombination centres (rho') is estimated after Kars et al. (2008). } \details{ All provided output corresponds to the \eqn{tc} value obtained by this analysis. Additionally in the output object the g-value normalised to 2-days is provided. The output of this function can be passed to the function \link{calc_FadingCorr}. \strong{Fitting and error estimation} For the fitting the function \link[stats:lm]{stats::lm} is used without applying weights. For the error estimation all input values, except \code{tc}, as the precision can be considered as sufficiently high enough with regard to the underlying problem, are sampled assuming a normal distribution for each value with the value as the mean and the provided uncertainty as standard deviation. \strong{The options for \code{t_star}} \itemize{ \item \code{t_star = "half"} (the default) The calculation follows the simplified version in Auclair et al. (2003), which reads \deqn{t_{star} := t_1 + (t_2 - t_1)/2} \item \code{t_star = "half_complex"} This option applies the complex function shown in Auclair et al. (2003), which is derived from Aitken (1985) appendix F, equations 9 and 11. It reads \deqn{t_{star} = t0 * 10^[(t_2 log(t_2/t_0) - t_1 log(t_1/t_0) - 0.43(t_2 - t_1))/(t_2 - t_1)]} where 0.43 = \eqn{1/ln(10)}. t0, which is an arbitrary constant, is set to 1. Please note that the equation in Auclair et al. (2003) is incorrect insofar that it reads \eqn{10exp(...)}, where the base should be 10 and not the Euler's number. Here we use the correct version (base 10). \item \code{t_star = "end"} This option uses the simplest possible form for \code{t_star} which is the time since irradiation without taking into account any addition parameter and it equals t1 in Auclair et al. (2003) \item \verb{t_star = } This last option allows you to provide an R function object that works on t1 and gives you all possible freedom. For instance, you may want to define the following function \code{fun <- function(x) {x^2}}, this would square all values of t1, because internally it calls \code{fun(t1)}. The name of the function does not matter. } \strong{Density of recombination centres} The density of recombination centres, expressed by the dimensionless variable rho', is estimated by fitting equation 5 in Kars et al. 2008 to the data. For the fitting the function \link[stats:nls]{stats::nls} is used without applying weights. For the error estimation the same procedure as for the g-value is applied (see above). \strong{Multiple aliquots & Lx/Tx normalisation} Be aware that this function will always normalise all \code{Lx/Tx} values by the \code{Lx/Tx} value of the prompt measurement of the first aliquot. This implicitly assumes that there are no systematic inter-aliquot variations in the \code{Lx/Tx} values. If deemed necessary to normalise the \code{Lx/Tx} values of each aliquot by its individual prompt measurement please do so \strong{before} running \link{analyse_FadingMeasurement} and provide the already normalised values for \code{object} instead. \strong{Shine-down curve plots} Please note that the shine-down curve plots are for information only. As such not all pause steps are plotted to avoid graphically overloaded plots. However, \emph{all} pause times are taken into consideration for the analysis. } \section{Function version}{ 0.1.21 } \examples{ ## load example data (sample UNIL/NB123, see ?ExampleData.Fading) data("ExampleData.Fading", envir = environment()) ##(1) get fading measurement data (here a three column data.frame) fading_data <- ExampleData.Fading$fading.data$IR50 ##(2) run analysis g_value <- analyse_FadingMeasurement( fading_data, plot = TRUE, verbose = TRUE, n.MC = 10) ##(3) this can be further used in the function ## to correct the age according to Huntley & Lamothe, 2001 results <- calc_FadingCorr( age.faded = c(100,2), g_value = g_value, n.MC = 10) } \section{How to cite}{ Kreutzer, S., Burow, C., 2023. analyse_FadingMeasurement(): Analyse fading measurements and returns the fading rate per decade (g-value). Function version 0.1.21. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Aitken, M.J., 1985. Thermoluminescence dating, Studies in archaeological science. Academic Press, London, Orlando. Auclair, M., Lamothe, M., Huot, S., 2003. Measurement of anomalous fading for feldspar IRSL using SAR. Radiation Measurements 37, 487-492. \doi{10.1016/S1350-4487(03)00018-0} Huntley, D.J., Lamothe, M., 2001. Ubiquity of anomalous fading in K-feldspars and the measurement and correction for it in optical dating. Canadian Journal of Earth Sciences 38, 1093-1106. doi: \code{10.1139/cjes-38-7-1093} Kars, R.H., Wallinga, J., Cohen, K.M., 2008. A new approach towards anomalous fading correction for feldspar IRSL dating-tests on samples in field saturation. Radiation Measurements 43, 786-790. \doi{10.1016/j.radmeas.2008.01.021} } \seealso{ \link{calc_OSLLxTxRatio}, \link{read_BIN2R}, \link{read_XSYG2R}, \link{extract_IrradiationTimes}, \link{calc_FadingCorr} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) \cr Christoph Burow, University of Cologne (Germany) , RLum Developer Team} \keyword{datagen} Luminescence/man/calc_Huntley2006.Rd0000644000176200001440000003253314521210045016650 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calc_Huntley2006.R \name{calc_Huntley2006} \alias{calc_Huntley2006} \title{Apply the Huntley (2006) model} \usage{ calc_Huntley2006( data, LnTn = NULL, rhop, ddot, readerDdot, normalise = TRUE, fit.method = c("EXP", "GOK"), lower.bounds = c(-Inf, -Inf, -Inf, -Inf), summary = TRUE, plot = TRUE, ... ) } \arguments{ \item{data}{\link{data.frame} (\strong{required}): A \code{data.frame} with one of the following structures: \itemize{ \item A \strong{three column} data frame with numeric values on a) dose (s), b) \code{LxTx} and c) \code{LxTx} error. \item If a \strong{two column} data frame is provided it is automatically assumed that errors on \code{LxTx} are missing. A third column will be attached with an arbitrary 5 \\% error on the provided \code{LxTx} values. \item Can also be a \strong{wide table}, i.e. a \link{data.frame} with a number of columns divisible by 3 and where each triplet has the aforementioned column structure. } \if{html}{\out{
}}\preformatted{ (optional) | dose (s)| LxTx | LxTx error | | [ ,1] | [ ,2]| [ ,3] | |---------|------|------------| [1, ]| 0 | LnTn | LnTn error | (optional, see arg 'LnTn') [2, ]| R1 | L1T1 | L1T1 error | ... | ... | ... | ... | [x, ]| Rx | LxTx | LxTx error | }\if{html}{\out{
}} \strong{NOTE:} The function assumes the first row of the function to be the \code{Ln/Tn}-value. If you want to provide more than one \code{Ln/Tn}-value consider using the argument \code{LnTn}.} \item{LnTn}{\link{data.frame} (\strong{optional}): This argument should \strong{only} be used to provide more than one \code{Ln/Tn}-value. It assumes a two column data frame with the following structure: \if{html}{\out{
}}\preformatted{ | LnTn | LnTn error | | [ ,1] | [ ,2] | |--------|--------------| [1, ]| LnTn_1 | LnTn_1 error | [2, ]| LnTn_2 | LnTn_2 error | ... | ... | ... | [x, ]| LnTn_x | LnTn_x error | }\if{html}{\out{
}} The function will calculate a \strong{mean} \code{Ln/Tn}-value and uses either the standard deviation or the highest individual error, whichever is larger. If another mean value (e.g. a weighted mean or median) or error is preferred, this value must be calculated beforehand and used in the first row in the data frame for argument \code{data}. \strong{NOTE:} If you provide \code{LnTn}-values with this argument the data frame for the \code{data}-argument \strong{must not} contain any \code{LnTn}-values!} \item{rhop}{\link{numeric} (\strong{required}): The density of recombination centres (\eqn{\rho}') and its error (see Huntley 2006), given as numeric vector of length two. Note that \eqn{\rho}' must \strong{not} be provided as the common logarithm. Example: \code{rhop = c(2.92e-06, 4.93e-07)}.} \item{ddot}{\link{numeric} (\strong{required}): Environmental dose rate and its error, given as a numeric vector of length two. Expected unit: Gy/ka. Example: \code{ddot = c(3.7, 0.4)}.} \item{readerDdot}{\link{numeric} (\strong{required}): Dose rate of the irradiation source of the OSL reader and its error, given as a numeric vector of length two. Expected unit: Gy/s. Example: \code{readerDdot = c(0.08, 0.01)}.} \item{normalise}{\link{logical} (\emph{with default}): If \code{TRUE} (the default) all measured and computed LxTx values are normalised by the pre-exponential factor A (see details).} \item{fit.method}{\link{character} (\emph{with default}): Fit function of the dose response curve. Can either be \code{EXP} (the default) or \code{GOK}. Note that \code{EXP} (single saturating exponential) is the original function the model after Huntley (2006) and Kars et al. (2008) was designed to use. The use of a general-order kinetics function (\code{GOK}) is an experimental adaption of the model and should be used with great care.} \item{lower.bounds}{\link{numeric} (\emph{with default}): Only applicable for \code{fit.method = 'GOK'}. A vector of length 3 that contains the lower bound values for fitting the general-order kinetics function using \link[minpack.lm:nlsLM]{minpack.lm::nlsLM}. In most cases, the default values (c(\verb{-Inf, -Inf, -Inf})) are appropriate for finding a best fit, but sometimes it may be useful to restrict the lower bounds to e.g. c(\verb{0, 0, 0}). The values of the vector are for parameters \code{a}, \code{D0} and \code{c} in that particular order (see details in \link{plot_GrowthCurve}).} \item{summary}{\link{logical} (\emph{with default}): If \code{TRUE} (the default) various parameters provided by the user and calculated by the model are added as text on the right-hand side of the plot.} \item{plot}{\link{logical} (\emph{with default}): enables/disables plot output.} \item{...}{Further parameters: \itemize{ \item \code{verbose} \link{logical}: Show or hide console output \item \code{n.MC} \link{numeric}: Number of Monte Carlo iterations (default = \code{100000}). \strong{Note} that it is generally advised to have a large number of Monte Carlo iterations for the results to converge. Decreasing the number of iterations will often result in unstable estimates. } All other arguments are passed to \link{plot} and \link{plot_GrowthCurve}.} } \value{ An \linkS4class{RLum.Results} object is returned: Slot: \strong{@data}\cr \tabular{lll}{ \strong{OBJECT} \tab \strong{TYPE} \tab \strong{COMMENT}\cr \code{results} \tab \link{data.frame} \tab results of the of Kars et al. 2008 model \cr \code{data} \tab \link{data.frame} \tab original input data \cr \code{Ln} \tab \link{numeric} \tab Ln and its error \cr \code{LxTx_tables} \tab \code{list} \tab A \code{list} of \code{data.frames} containing data on dose, LxTx and LxTx error for each of the dose response curves. Note that these \strong{do not} contain the natural Ln signal, which is provided separately. \cr \code{fits} \tab \code{list} \tab A \code{list} of \code{nls} objects produced by \link[minpack.lm:nlsLM]{minpack.lm::nlsLM} when fitting the dose response curves \cr } Slot: \strong{@info}\cr \tabular{lll}{ \strong{OBJECT} \tab \strong{TYPE} \tab \strong{COMMENT} \cr \code{call} \tab \code{call} \tab the original function call \cr \code{args} \tab \code{list} \tab arguments of the original function call \cr } } \description{ A function to calculate the expected sample specific fraction of saturation based on the model of Huntley (2006) using the approach as implemented in Kars et al. (2008) or Guralnik et al. (2015). } \details{ This function applies the approach described in Kars et al. (2008) or Guralnik et al. (2015), which are both developed from the model of Huntley (2006) to calculate the expected sample specific fraction of saturation of a feldspar and also to calculate fading corrected age using this model. \eqn{\rho}' (\code{rhop}), the density of recombination centres, is a crucial parameter of this model and must be determined separately from a fading measurement. The function \link{analyse_FadingMeasurement} can be used to calculate the sample specific \eqn{\rho}' value. \strong{Kars et al. (2008) - Single saturating exponential} To apply the approach after Kars et al. (2008) use \code{fit.method = "EXP"}. Firstly, the unfaded \eqn{D_0} value is determined through applying equation 5 of Kars et al. (2008) to the measured \eqn{\frac{L_x}{T_x}} data as a function of irradiation time, and fitting the data with a single saturating exponential of the form: \deqn{LxTx(t^*) = A \phi(t^*) \{1 - exp(-\frac{t^*}{D_0}))\}} where \deqn{\phi(t^*) = exp(-\rho' ln(1.8 \tilde{s} t^*)^3)} after King et al. (2016) where \eqn{A} is a pre-exponential factor, \eqn{t^*} (s) is the irradiation time, starting at the mid-point of irradiation (Auclair et al. 2003) and \eqn{\tilde{s}} (\eqn{3\times10^{15}} s\eqn{^{-1}}) is the athermal frequency factor after Huntley (2006). \cr Using fit parameters \eqn{A} and \eqn{D_0}, the function then computes a natural dose response curve using the environmental dose rate, \eqn{\dot{D}} (Gy/s) and equations \verb{[1]} and \verb{[2]}. Computed \eqn{\frac{L_x}{T_x}} values are then fitted using the \link{plot_GrowthCurve} function and the laboratory measured LnTn can then be interpolated onto this curve to determine the fading corrected \eqn{D_e} value, from which the fading corrected age is calculated. \strong{Guralnik et al. (2015) - General-order kinetics} To apply the approach after Guralnik et al. (2015) use \code{fit.method = "GOK"}. The approach of Guralnik et al. (2015) is very similar to that of Kars et al. (2008), but instead of using a single saturating exponential the model fits a general-order kinetics function of the form: \deqn{LxTx(t^*) = A \phi (t^*)(1 - (1 + (\frac{1}{D_0}) t^* c)^{-1/c})} where \eqn{A}, \eqn{\phi}, \eqn{t^*} and \eqn{D_0} are the same as above and \eqn{c} is a dimensionless kinetic order modifier (cf. equation 10 in Guralnik et al., 2015). \strong{Level of saturation} The \link{calc_Huntley2006} function also calculates the level of saturation (\eqn{\frac{n}{N}}) and the field saturation (i.e. athermal steady state, (n/N)_SS) value for the sample under investigation using the sample specific \eqn{\rho}', unfaded \eqn{D_0} and \eqn{\dot{D}} values, following the approach of Kars et al. (2008). \strong{Uncertainties} Uncertainties are reported at \eqn{1\sigma} and are assumed to be normally distributed and are estimated using Monte-Carlo re-sampling (\code{n.MC = 1000}) of \eqn{\rho}' and \eqn{\frac{L_x}{T_x}} during dose response curve fitting, and of \eqn{\rho}' in the derivation of (\eqn{n/N}) and (n/N)_SS. \strong{Age calculated from 2D0 of the simulated natural DRC} In addition to the age calculated from the equivalent dose derived from \code{Ln/Tn} projected on the simulated natural dose response curve (DRC), this function also calculates an age from twice the characteristic saturation dose (\code{D0}) of the simulated natural DRC. This can be a useful information for (over)saturated samples (i.e., no intersect of \code{Ln/Tn} on the natural DRC) to obtain at least a "minimum age" estimate of the sample. In the console output this value is denoted by \emph{"Age @2D0 (ka):"}. } \note{ This function has BETA status, in particular for the GOK implementation. Please verify your results carefully } \section{Function version}{ 0.4.2 } \examples{ ## Load example data (sample UNIL/NB123, see ?ExampleData.Fading) data("ExampleData.Fading", envir = environment()) ## (1) Set all relevant parameters # a. fading measurement data (IR50) fading_data <- ExampleData.Fading$fading.data$IR50 # b. Dose response curve data data <- ExampleData.Fading$equivalentDose.data$IR50 ## (2) Define required function parameters ddot <- c(7.00, 0.004) readerDdot <- c(0.134, 0.0067) # Analyse fading measurement and get an estimate of rho'. # Note that the RLum.Results object can be directly used for further processing. # The number of MC runs is reduced for this example rhop <- analyse_FadingMeasurement(fading_data, plot = TRUE, verbose = FALSE, n.MC = 10) ## (3) Apply the Kars et al. (2008) model to the data kars <- calc_Huntley2006( data = data, rhop = rhop, ddot = ddot, readerDdot = readerDdot, n.MC = 25) \dontrun{ # You can also provide LnTn values separately via the 'LnTn' argument. # Note, however, that the data frame for 'data' must then NOT contain # a LnTn value. See argument descriptions! LnTn <- data.frame(LnTn = c(1.84833, 2.24833), LnTn.error = c(0.17, 0.22)) LxTx <- data[2:nrow(data), ] kars <- calc_Huntley2006(data = LxTx, LnTn = LnTn, rhop = rhop, ddot = ddot, readerDdot = readerDdot, n.MC = 25) } } \section{How to cite}{ King, G.E., Burow, C., Kreutzer, S., 2023. calc_Huntley2006(): Apply the Huntley (2006) model. Function version 0.4.2. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Kars, R.H., Wallinga, J., Cohen, K.M., 2008. A new approach towards anomalous fading correction for feldspar IRSL dating-tests on samples in field saturation. Radiation Measurements 43, 786-790. doi:10.1016/j.radmeas.2008.01.021 Guralnik, B., Li, B., Jain, M., Chen, R., Paris, R.B., Murray, A.S., Li, S.-H., Pagonis, P., Herman, F., 2015. Radiation-induced growth and isothermal decay of infrared-stimulated luminescence from feldspar. Radiation Measurements 81, 224-231. Huntley, D.J., 2006. An explanation of the power-law decay of luminescence. Journal of Physics: Condensed Matter 18, 1359-1365. doi:10.1088/0953-8984/18/4/020 King, G.E., Herman, F., Lambert, R., Valla, P.G., Guralnik, B., 2016. Multi-OSL-thermochronometry of feldspar. Quaternary Geochronology 33, 76-87. doi:10.1016/j.quageo.2016.01.004 \strong{Further reading} Morthekai, P., Jain, M., Cunha, P.P., Azevedo, J.M., Singhvi, A.K., 2011. An attempt to correct for the fading in million year old basaltic rocks. Geochronometria 38(3), 223-230. } \author{ Georgina E. King, University of Lausanne (Switzerland) \cr Christoph Burow, University of Cologne (Germany) \cr Sebastian Kreutzer, Ruprecht-Karl University of Heidelberg (Germany) , RLum Developer Team} \keyword{datagen} Luminescence/man/merge_Risoe.BINfileData.Rd0000644000176200001440000000705214521210045020225 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merge_Risoe.BINfileData.R \name{merge_Risoe.BINfileData} \alias{merge_Risoe.BINfileData} \title{Merge Risoe.BINfileData objects or Risoe BIN-files} \usage{ merge_Risoe.BINfileData( input.objects, output.file, keep.position.number = FALSE, position.number.append.gap = 0 ) } \arguments{ \item{input.objects}{\link{character} with \linkS4class{Risoe.BINfileData} objects (\strong{required}): Character vector with path and files names (e.g. \code{input.objects = c("path/file1.bin", "path/file2.bin")} or \linkS4class{Risoe.BINfileData} objects (e.g. \code{input.objects = c(object1, object2)}). Alternatively a \code{list} is supported.} \item{output.file}{\link{character} (\emph{optional}): File output path and name. If no value is given, a \linkS4class{Risoe.BINfileData} is returned instead of a file.} \item{keep.position.number}{\link{logical} (\emph{with default}): Allows keeping the original position numbers of the input objects. Otherwise the position numbers are recalculated.} \item{position.number.append.gap}{\link{integer} (\emph{with default}): Set the position number gap between merged BIN-file sets, if the option \code{keep.position.number = FALSE} is used. See details for further information.} } \value{ Returns a \code{file} or a \linkS4class{Risoe.BINfileData} object. } \description{ Function allows merging Risoe BIN/BINX files or \code{Risoe.BINfileData} objects. } \details{ The function allows merging different measurements to one file or one object. The record IDs are recalculated for the new object. Other values are kept for each object. The number of input objects is not limited. \code{position.number.append.gap} option If the option \code{keep.position.number = FALSE} is used, the position numbers of the new data set are recalculated by adding the highest position number of the previous data set to the each position number of the next data set. For example: The highest position number is 48, then this number will be added to all other position numbers of the next data set (e.g. 1 + 48 = 49) However, there might be cases where an additional addend (summand) is needed before the next position starts. Example: \itemize{ \item Position number set (A): \verb{1,3,5,7} \item Position number set (B): \verb{1,3,5,7} } With no additional summand the new position numbers would be: \verb{1,3,5,7,8,9,10,11}. That might be unwanted. Using the argument \code{position.number.append.gap = 1} it will become: \verb{1,3,5,7,9,11,13,15,17}. } \note{ The validity of the output objects is not further checked. } \section{Function version}{ 0.2.8 } \examples{ ##merge two objects data(ExampleData.BINfileData, envir = environment()) object1 <- CWOSL.SAR.Data object2 <- CWOSL.SAR.Data object.new <- merge_Risoe.BINfileData(c(object1, object2)) } \section{How to cite}{ Kreutzer, S., 2023. merge_Risoe.BINfileData(): Merge Risoe.BINfileData objects or Risoe BIN-files. Function version 0.2.8. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Duller, G., 2007. Analyst. } \seealso{ \linkS4class{Risoe.BINfileData}, \link{read_BIN2R}, \link{write_R2BIN} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \keyword{IO} \keyword{manip} Luminescence/man/RLum.Results-class.Rd0000644000176200001440000001234314521210045017345 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RLum.Results-class.R \docType{class} \name{RLum.Results-class} \alias{RLum.Results-class} \alias{show,RLum.Results-method} \alias{set_RLum,RLum.Results-method} \alias{get_RLum,RLum.Results-method} \alias{length_RLum,RLum.Results-method} \alias{names_RLum,RLum.Results-method} \title{Class \code{"RLum.Results"}} \usage{ \S4method{show}{RLum.Results}(object) \S4method{set_RLum}{RLum.Results}(class, originator, .uid, .pid, data = list(), info = list()) \S4method{get_RLum}{RLum.Results}(object, data.object, info.object = NULL, drop = TRUE) \S4method{length_RLum}{RLum.Results}(object) \S4method{names_RLum}{RLum.Results}(object) } \arguments{ \item{object}{\code{\link{get_RLum}}; \linkS4class{RLum.Results} (\strong{required}): an object of class \linkS4class{RLum.Results} to be evaluated} \item{class}{\code{\link{set_RLum}}; \link{character} \strong{(required)}: name of the \code{RLum} class to create} \item{originator}{\code{\link{set_RLum}}; \link{character} (\emph{automatic}): contains the name of the calling function (the function that produces this object); can be set manually.} \item{.uid}{\code{\link{set_RLum}}; \link{character} (\emph{automatic}): sets an unique ID for this object using the internal C++ function \code{create_UID}.} \item{.pid}{\code{\link{set_RLum}}; \link{character} (\emph{with default}): option to provide a parent id for nesting at will.} \item{data}{\code{\link{set_RLum}}; \link{list} (\emph{optional}): a list containing the data to be stored in the object} \item{info}{\code{\link{set_RLum}}; \link{list} (\emph{optional}): a list containing additional info data for the object} \item{data.object}{\code{\link{get_RLum}}; \link{character} or \link{numeric}: name or index of the data slot to be returned} \item{info.object}{\code{\link{get_RLum}}; \link{character} (\emph{optional}): name of the wanted info element} \item{drop}{\code{\link{get_RLum}}; \link{logical} (\emph{with default}): coerce to the next possible layer (which are data objects, \code{drop = FALSE} keeps the original \code{RLum.Results}} } \value{ \strong{\code{set_RLum}}: Returns an object from the class \linkS4class{RLum.Results} \strong{\code{get_RLum}}: Returns: \enumerate{ \item Data object from the specified slot \item \link{list} of data objects from the slots if 'data.object' is vector or \item an \linkS4class{RLum.Results} for \code{drop = FALSE}. } \strong{\code{length_RLum}} Returns the number of data elements in the \code{RLum.Results} object. \strong{\code{names_RLum}} Returns the names of the data elements in the object. } \description{ Object class contains results data from functions (e.g., \link{analyse_SAR.CWOSL}). } \section{Methods (by generic)}{ \itemize{ \item \code{show(RLum.Results)}: Show structure of \code{RLum.Results} object \item \code{set_RLum(RLum.Results)}: Construction method for an RLum.Results object. \item \code{get_RLum(RLum.Results)}: Accessor method for RLum.Results object. The argument data.object allows directly accessing objects delivered within the slot data. The default return object depends on the object originator (e.g., \code{fit_LMCurve}). If nothing is specified always the first \code{data.object} will be returned. Note: Detailed specification should be made in combination with the originator slot in the receiving function if results are pipped. \item \code{length_RLum(RLum.Results)}: Returns the length of the object, i.e., number of stored data.objects \item \code{names_RLum(RLum.Results)}: Returns the names data.objects }} \section{Slots}{ \describe{ \item{\code{data}}{Object of class \link{list} containing output data} }} \note{ The class is intended to store results from functions to be used by other functions. The data in the object should always be accessed by the method \code{get_RLum}. } \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("RLum.Results", ...)}. } \section{Class version}{ 0.5.2 } \examples{ showClass("RLum.Results") ##create an empty object from this class set_RLum(class = "RLum.Results") ##use another function to show how it works ##Basic calculation of the dose rate for a specific date dose.rate <- calc_SourceDoseRate( measurement.date = "2012-01-27", calib.date = "2014-12-19", calib.dose.rate = 0.0438, calib.error = 0.0019) ##show object dose.rate ##get results get_RLum(dose.rate) ##get parameters used for the calcualtion from the same object get_RLum(dose.rate, data.object = "parameters") ##alternatively objects can be accessed using S3 generics, such as dose.rate$parameters } \seealso{ \linkS4class{RLum}, \link{plot_RLum}, \link{merge_RLum} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. RLum.Results-class(): Class 'RLum.Results'. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{classes} \keyword{internal} \keyword{methods} Luminescence/man/read_BIN2R.Rd0000644000176200001440000001411214521210045015526 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/read_BIN2R.R \name{read_BIN2R} \alias{read_BIN2R} \title{Import Risø BIN/BINX-files into R} \usage{ read_BIN2R( file, show.raw.values = FALSE, position = NULL, n.records = NULL, zero_data.rm = TRUE, duplicated.rm = FALSE, fastForward = FALSE, show.record.number = FALSE, txtProgressBar = TRUE, forced.VersionNumber = NULL, ignore.RECTYPE = FALSE, pattern = NULL, verbose = TRUE, ... ) } \arguments{ \item{file}{\link{character} or \link{list} (\strong{required}): path and file name of the BIN/BINX file (URLs are supported). If input is a \code{list} it should comprise only \code{character}s representing each valid path and BIN/BINX-file names. Alternatively the input character can be just a directory (path), in this case the the function tries to detect and import all BIN/BINX files found in the directory.} \item{show.raw.values}{\link{logical} (\emph{with default}): shows raw values from BIN-file for \code{LTYPE}, \code{DTYPE} and \code{LIGHTSOURCE} without translation in characters. Can be provided as \code{list} if \code{file} is a \code{list}.} \item{position}{\link{numeric} (\emph{optional}): imports only the selected position. Note: the import performance will not benefit by any selection made here. Can be provided as \code{list} if \code{file} is a \code{list}.} \item{n.records}{\link{raw} (\emph{optional}): limits the number of imported records. Can be used in combination with \code{show.record.number} for debugging purposes, e.g. corrupt BIN-files. Can be provided as \code{list} if \code{file} is a \code{list}.} \item{zero_data.rm}{\link{logical} (\emph{with default}): remove erroneous data with no count values. As such data are usually not needed for the subsequent data analysis they will be removed by default. Can be provided as \code{list} if \code{file} is a \code{list}.} \item{duplicated.rm}{\link{logical} (\emph{with default}): remove duplicated entries if \code{TRUE}. This may happen due to an erroneous produced BIN/BINX-file. This option compares only predecessor and successor. Can be provided as \code{list} if \code{file} is a \code{list}.} \item{fastForward}{\link{logical} (\emph{with default}): if \code{TRUE} for a more efficient data processing only a list of \code{RLum.Analysis} objects is returned instead of a \linkS4class{Risoe.BINfileData} object. Can be provided as \code{list} if \code{file} is a \code{list}.} \item{show.record.number}{\link{logical} (\emph{with default}): shows record number of the imported record, for debugging usage only. Can be provided as \code{list} if \code{file} is a \code{list}.} \item{txtProgressBar}{\link{logical} (\emph{with default}): enables or disables \link{txtProgressBar}.} \item{forced.VersionNumber}{\link{integer} (\emph{optional}): allows to cheat the version number check in the function by own values for cases where the BIN-file version is not supported. Can be provided as \code{list} if \code{file} is a \code{list}. \strong{Note:} The usage is at own risk, only supported BIN-file versions have been tested.} \item{ignore.RECTYPE}{\link{logical} (\emph{with default}): this argument allows to ignore values in the byte 'RECTYPE' (BIN-file version 08), in case there are not documented or faulty set. In this case the corrupted records are skipped.} \item{pattern}{\link{character} (\emph{optional}): argument that is used if only a path is provided. The argument will than be passed to the function \link{list.files} used internally to construct a \code{list} of wanted files} \item{verbose}{\link{logical} (\emph{with default}): enables or disables verbose mode} \item{...}{further arguments that will be passed to the function \link{Risoe.BINfileData2RLum.Analysis}. Please note that any matching argument automatically sets \code{fastForward = TRUE}} } \value{ Returns an S4 \linkS4class{Risoe.BINfileData} object containing two slots: \item{METADATA}{A \link{data.frame} containing all variables stored in the BIN-file.} \item{DATA}{A \link{list} containing a numeric \link{vector} of the measured data. The ID corresponds to the record ID in METADATA.} If \code{fastForward = TRUE} a list of \linkS4class{RLum.Analysis} object is returned. The internal coercing is done using the function \link{Risoe.BINfileData2RLum.Analysis} } \description{ Import a *.bin or a *.binx file produced by a Risø DA15 and DA20 TL/OSL reader into R. } \details{ The binary data file is parsed byte by byte following the data structure published in the Appendices of the Analyst manual p. 42. For the general BIN/BINX-file structure, the reader is referred to the Risø website: \url{https://www.fysik.dtu.dk} } \note{ The function works for BIN/BINX-format versions 03, 04, 05, 06, 07 and 08. The version number depends on the used Sequence Editor. \strong{ROI data sets introduced with BIN-file version 8 are not supported and skipped during import.} } \section{Function version}{ 0.16.7 } \examples{ file <- system.file("extdata/BINfile_V8.binx", package = "Luminescence") temp <- read_BIN2R(file) temp } \section{How to cite}{ Kreutzer, S., Fuchs, M.C., 2023. read_BIN2R(): Import Risø BIN/BINX-files into R. Function version 0.16.7. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ DTU Nutech, 2016. The Sequence Editor, Users Manual, February, 2016. \url{https://www.fysik.dtu.dk} } \seealso{ \link{write_R2BIN}, \linkS4class{Risoe.BINfileData}, \link[base:readBin]{base::readBin}, \link{merge_Risoe.BINfileData}, \linkS4class{RLum.Analysis} \link[utils:txtProgressBar]{utils::txtProgressBar}, \link{list.files} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany)\cr Margret C. Fuchs, HZDR Freiberg, (Germany) \cr based on information provided by Torben Lapp and Karsten Bracht Nielsen (Risø DTU, Denmark) , RLum Developer Team} \keyword{IO} Luminescence/man/ExampleData.TR_OSL.Rd0000644000176200001440000000162414521210044017150 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Luminescence-package.R \name{ExampleData.TR_OSL} \alias{ExampleData.TR_OSL} \title{Example TR-OSL data} \format{ One \linkS4class{RLum.Data.Curve} dataset imported using the function \link{read_XSYG2R} \describe{ \code{ExampleData.TR_OSL}: A single \linkS4class{RLum.Data.Curve} object with the TR-OSL data } } \description{ Single TR-OSL curve obtained by Schmidt et al. (under review) for quartz sample BT729 (origin: Trebgast Valley, Germany, quartz, 90-200 µm, unpublished data). } \examples{ ##(1) curves data(ExampleData.TR_OSL, envir = environment()) plot_RLum(ExampleData.TR_OSL) } \references{ Schmidt, C., Simmank, O., Kreutzer, S., under review. Time-Resolved Optically Stimulated Luminescence of Quartz in the Nanosecond Time Domain. Journal of Luminescence, 1-90 } \seealso{ \link{fit_OSLLifeTimes} } \keyword{datasets} Luminescence/man/plot_AbanicoPlot.Rd0000644000176200001440000004431114521210045017154 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot_AbanicoPlot.R \name{plot_AbanicoPlot} \alias{plot_AbanicoPlot} \title{Function to create an Abanico Plot.} \usage{ plot_AbanicoPlot( data, na.rm = TRUE, log.z = TRUE, z.0 = "mean.weighted", dispersion = "qr", plot.ratio = 0.75, rotate = FALSE, mtext, summary, summary.pos, summary.method = "MCM", legend, legend.pos, stats, rug = FALSE, kde = TRUE, hist = FALSE, dots = FALSE, boxplot = FALSE, y.axis = TRUE, error.bars = FALSE, bar, bar.col, polygon.col, line, line.col, line.lty, line.label, grid.col, frame = 1, bw = "SJ", interactive = FALSE, ... ) } \arguments{ \item{data}{\link{data.frame} or \linkS4class{RLum.Results} object (\strong{required}): for \code{data.frame} two columns: De (\code{data[,1]}) and De error (\code{data[,2]}). To plot several data sets in one plot the data sets must be provided as \code{list}, e.g. \code{list(data.1, data.2)}.} \item{na.rm}{\link{logical} (\emph{with default}): exclude NA values from the data set prior to any further operations.} \item{log.z}{\link{logical} (\emph{with default}): Option to display the z-axis in logarithmic scale. Default is \code{TRUE}.} \item{z.0}{\link{character} or \link{numeric}: User-defined central value, used for centring of data. One out of \code{"mean"}, \code{"mean.weighted"} and \code{"median"} or a numeric value (not its logarithm). Default is \code{"mean.weighted"}.} \item{dispersion}{\link{character} (\emph{with default}): measure of dispersion, used for drawing the scatter polygon. One out of \itemize{ \item \code{"qr"} (quartile range), \item \code{"pnn"} (symmetric percentile range with \code{nn} the lower percentile, e.g. \item \code{"p05"} depicting the range between 5 and 95 \%), \item \code{"sd"} (standard deviation) and \item \code{"2sd"} (2 standard deviations), } The default is \code{"qr"}. Note that \code{"sd"} and \code{"2sd"} are only meaningful in combination with \code{"z.0 = 'mean'"} because the unweighted mean is used to centre the polygon.} \item{plot.ratio}{\link{numeric}: Relative space, given to the radial versus the cartesian plot part, default is \code{0.75}.} \item{rotate}{\link{logical}: Option to turn the plot by 90 degrees.} \item{mtext}{\link{character}: additional text below the plot title.} \item{summary}{\link{character} (\emph{optional}): add statistic measures of centrality and dispersion to the plot. Can be one or more of several keywords. See details for available keywords. Results differ depending on the log-option for the z-scale (see details).} \item{summary.pos}{\link{numeric} or \link{character} (\emph{with default}): optional position coordinates or keyword (e.g. \code{"topright"}) for the statistical summary. Alternatively, the keyword \code{"sub"} may be specified to place the summary below the plot header. However, this latter option in only possible if \code{mtext} is not used.} \item{summary.method}{\link{character} (\emph{with default}): keyword indicating the method used to calculate the statistic summary. One out of \itemize{ \item \code{"unweighted"}, \item \code{"weighted"} and \item \code{"MCM"}. } See \link{calc_Statistics} for details.} \item{legend}{\link{character} vector (\emph{optional}): legend content to be added to the plot.} \item{legend.pos}{\link{numeric} or \link{character} (\emph{with default}): optional position coordinates or keyword (e.g. \code{"topright"}) for the legend to be plotted.} \item{stats}{\link{character}: additional labels of statistically important values in the plot. One or more out of the following: \itemize{ \item \code{"min"}, \item \code{"max"}, \item \code{"median"}. }} \item{rug}{\link{logical}: Option to add a rug to the KDE part, to indicate the location of individual values.} \item{kde}{\link{logical}: Option to add a KDE plot to the dispersion part, default is \code{TRUE}.} \item{hist}{\link{logical}: Option to add a histogram to the dispersion part. Only meaningful when not more than one data set is plotted.} \item{dots}{\link{logical}: Option to add a dot plot to the dispersion part. If number of dots exceeds space in the dispersion part, a square indicates this.} \item{boxplot}{\link{logical}: Option to add a boxplot to the dispersion part, default is \code{FALSE}.} \item{y.axis}{\link{logical}: Option to hide standard y-axis labels and show 0 only. Useful for data with small scatter. If you want to suppress the y-axis entirely please use \code{yaxt == 'n'} (the standard \link[graphics:par]{graphics::par} setting) instead.} \item{error.bars}{\link{logical}: Option to show De-errors as error bars on De-points. Useful in combination with \verb{y.axis = FALSE, bar.col = "none"}.} \item{bar}{\link{numeric} (\emph{with default}): option to add one or more dispersion bars (i.e., bar showing the 2-sigma range) centred at the defined values. By default a bar is drawn according to \code{"z.0"}. To omit the bar set \code{"bar = FALSE"}.} \item{bar.col}{\link{character} or \link{numeric} (\emph{with default}): colour of the dispersion bar. Default is \code{"grey60"}.} \item{polygon.col}{\link{character} or \link{numeric} (\emph{with default}): colour of the polygon showing the data scatter. Sometimes this polygon may be omitted for clarity. To disable it use \code{FALSE} or \code{polygon = FALSE}. Default is \code{"grey80"}.} \item{line}{\link{numeric}: numeric values of the additional lines to be added.} \item{line.col}{\link{character} or \link{numeric}: colour of the additional lines.} \item{line.lty}{\link{integer}: line type of additional lines} \item{line.label}{\link{character}: labels for the additional lines.} \item{grid.col}{\link{character} or \link{numeric} (\emph{with default}): colour of the grid lines (originating at \verb{[0,0]} and stretching to the z-scale). To disable grid lines use \code{FALSE}. Default is \code{"grey"}.} \item{frame}{\link{numeric} (\emph{with default}): option to modify the plot frame type. Can be one out of \itemize{ \item \code{0} (no frame), \item \code{1} (frame originates at 0,0 and runs along min/max isochrons), \item \code{2} (frame embraces the 2-sigma bar), \item \code{3} (frame embraces the entire plot as a rectangle). } Default is \code{1}.} \item{bw}{\link{character} (\emph{with default}): bin-width for KDE, choose a numeric value for manual setting.} \item{interactive}{\link{logical} (\emph{with default}): create an interactive abanico plot (requires the \code{'plotly'} package)} \item{...}{Further plot arguments to pass (see \link[graphics:plot.default]{graphics::plot.default}). Supported are: \code{main}, \code{sub}, \code{ylab}, \code{xlab}, \code{zlab}, \code{zlim}, \code{ylim}, \code{cex}, \code{lty}, \code{lwd}, \code{pch}, \code{col}, \code{tck}, \code{tcl}, \code{at}, \code{breaks}. \code{xlab} must be a vector of length two, specifying the upper and lower x-axes labels.} } \value{ returns a plot object and, optionally, a list with plot calculus data. } \description{ A plot is produced which allows comprehensive presentation of data precision and its dispersion around a central value as well as illustration of a kernel density estimate, histogram and/or dot plot of the dose values. } \details{ The Abanico Plot is a combination of the classic Radial Plot (\code{plot_RadialPlot}) and a kernel density estimate plot (e.g \code{plot_KDE}). It allows straightforward visualisation of data precision, error scatter around a user-defined central value and the combined distribution of the values, on the actual scale of the measured data (e.g. seconds, equivalent dose, years). The principle of the plot is shown in Galbraith & Green (1990). The function authors are thankful for the thought provoking figure in this article. The semi circle (z-axis) of the classic Radial Plot is bent to a straight line here, which actually is the basis for combining this polar (radial) part of the plot with any other Cartesian visualisation method (KDE, histogram, PDF and so on). Note that the plot allows displaying two measures of distribution. One is the 2-sigma bar, which illustrates the spread in value errors, and the other is the polygon, which stretches over both parts of the Abanico Plot (polar and Cartesian) and illustrates the actual spread in the values themselves. Since the 2-sigma-bar is a polygon, it can be (and is) filled with shaded lines. To change density (lines per inch, default is 15) and angle (default is 45 degrees) of the shading lines, specify these parameters. See \code{?polygon()} for further help. The Abanico Plot supports other than the weighted mean as measure of centrality. When it is obvious that the data is not (log-)normally distributed, the mean (weighted or not) cannot be a valid measure of centrality and hence central dose. Accordingly, the median and the weighted median can be chosen as well to represent a proper measure of centrality (e.g. \code{centrality = "median.weighted"}). Also user-defined numeric values (e.g. from the central age model) can be used if this appears appropriate. The proportion of the polar part and the cartesian part of the Abanico Plot can be modified for display reasons (\code{plot.ratio = 0.75}). By default, the polar part spreads over 75 \\% and leaves 25 \\% for the part that shows the KDE graph. A statistic summary, i.e. a collection of statistic measures of centrality and dispersion (and further measures) can be added by specifying one or more of the following keywords: \itemize{ \item \code{"n"} (number of samples) \item \code{"mean"} (mean De value) \item \code{"median"} (median of the De values) \item \code{"sd.rel"} (relative standard deviation in percent) \item \code{"sd.abs"} (absolute standard deviation) \item \code{"se.rel"} (relative standard error) \item \code{"se.abs"} (absolute standard error) \item \code{"in.2s"} (percent of samples in 2-sigma range) \item \code{"kurtosis"} (kurtosis) \item \code{"skewness"} (skewness) } \strong{Note} that the input data for the statistic summary is sent to the function \code{calc_Statistics()} depending on the log-option for the z-scale. If \code{"log.z = TRUE"}, the summary is based on the logarithms of the input data. If \code{"log.z = FALSE"} the linearly scaled data is used. \strong{Note} as well, that \code{"calc_Statistics()"} calculates these statistic measures in three different ways: \code{unweighted}, \code{weighted} and \code{MCM-based} (i.e., based on Monte Carlo Methods). By default, the MCM-based version is used. If you wish to use another method, indicate this with the appropriate keyword using the argument \code{summary.method}. The optional parameter \code{layout} allows to modify the entire plot more sophisticated. Each element of the plot can be addressed and its properties can be defined. This includes font type, size and decoration, colours and sizes of all plot items. To infer the definition of a specific layout style cf. \code{get_Layout()} or type e.g., for the layout type \code{"journal"} \code{get_Layout("journal")}. A layout type can be modified by the user by assigning new values to the list object. It is possible for the z-scale to specify where ticks are to be drawn by using the parameter \code{at}, e.g. \code{at = seq(80, 200, 20)}, cf. function documentation of \code{axis}. Specifying tick positions manually overrides a \code{zlim}-definition. } \section{Function version}{ 0.1.17 } \examples{ ## load example data and recalculate to Gray data(ExampleData.DeValues, envir = environment()) ExampleData.DeValues <- ExampleData.DeValues$CA1 ## plot the example data straightforward plot_AbanicoPlot(data = ExampleData.DeValues) ## now with linear z-scale plot_AbanicoPlot(data = ExampleData.DeValues, log.z = FALSE) ## now with output of the plot parameters plot1 <- plot_AbanicoPlot(data = ExampleData.DeValues, output = TRUE) str(plot1) plot1$zlim ## now with adjusted z-scale limits plot_AbanicoPlot(data = ExampleData.DeValues, zlim = c(10, 200)) ## now with adjusted x-scale limits plot_AbanicoPlot(data = ExampleData.DeValues, xlim = c(0, 20)) ## now with rug to indicate individual values in KDE part plot_AbanicoPlot(data = ExampleData.DeValues, rug = TRUE) ## now with a smaller bandwidth for the KDE plot plot_AbanicoPlot(data = ExampleData.DeValues, bw = 0.04) ## now with a histogram instead of the KDE plot plot_AbanicoPlot(data = ExampleData.DeValues, hist = TRUE, kde = FALSE) ## now with a KDE plot and histogram with manual number of bins plot_AbanicoPlot(data = ExampleData.DeValues, hist = TRUE, breaks = 20) ## now with a KDE plot and a dot plot plot_AbanicoPlot(data = ExampleData.DeValues, dots = TRUE) ## now with user-defined plot ratio plot_AbanicoPlot(data = ExampleData.DeValues, plot.ratio = 0.5) ## now with user-defined central value plot_AbanicoPlot(data = ExampleData.DeValues, z.0 = 70) ## now with median as central value plot_AbanicoPlot(data = ExampleData.DeValues, z.0 = "median") ## now with the 17-83 percentile range as definition of scatter plot_AbanicoPlot(data = ExampleData.DeValues, z.0 = "median", dispersion = "p17") ## now with user-defined green line for minimum age model CAM <- calc_CentralDose(ExampleData.DeValues, plot = FALSE) plot_AbanicoPlot(data = ExampleData.DeValues, line = CAM, line.col = "darkgreen", line.label = "CAM") ## now create plot with legend, colour, different points and smaller scale plot_AbanicoPlot(data = ExampleData.DeValues, legend = "Sample 1", col = "tomato4", bar.col = "peachpuff", pch = "R", cex = 0.8) ## now without 2-sigma bar, polygon, grid lines and central value line plot_AbanicoPlot(data = ExampleData.DeValues, bar.col = FALSE, polygon.col = FALSE, grid.col = FALSE, y.axis = FALSE, lwd = 0) ## now with direct display of De errors, without 2-sigma bar plot_AbanicoPlot(data = ExampleData.DeValues, bar.col = FALSE, ylab = "", y.axis = FALSE, error.bars = TRUE) ## now with user-defined axes labels plot_AbanicoPlot(data = ExampleData.DeValues, xlab = c("Data error (\%)", "Data precision"), ylab = "Scatter", zlab = "Equivalent dose [Gy]") ## now with minimum, maximum and median value indicated plot_AbanicoPlot(data = ExampleData.DeValues, stats = c("min", "max", "median")) ## now with a brief statistical summary as subheader plot_AbanicoPlot(data = ExampleData.DeValues, summary = c("n", "in.2s")) ## now with another statistical summary plot_AbanicoPlot(data = ExampleData.DeValues, summary = c("mean.weighted", "median"), summary.pos = "topleft") ## now a plot with two 2-sigma bars for one data set plot_AbanicoPlot(data = ExampleData.DeValues, bar = c(30, 100)) ## now the data set is split into sub-groups, one is manipulated data.1 <- ExampleData.DeValues[1:30,] data.2 <- ExampleData.DeValues[31:62,] * 1.3 ## now a common dataset is created from the two subgroups data.3 <- list(data.1, data.2) ## now the two data sets are plotted in one plot plot_AbanicoPlot(data = data.3) ## now with some graphical modification plot_AbanicoPlot(data = data.3, z.0 = "median", col = c("steelblue4", "orange4"), bar.col = c("steelblue3", "orange3"), polygon.col = c("steelblue1", "orange1"), pch = c(2, 6), angle = c(30, 50), summary = c("n", "in.2s", "median")) ## create Abanico plot with predefined layout definition plot_AbanicoPlot(data = ExampleData.DeValues, layout = "journal") ## now with predefined layout definition and further modifications plot_AbanicoPlot( data = data.3, z.0 = "median", layout = "journal", col = c("steelblue4", "orange4"), bar.col = adjustcolor(c("steelblue3", "orange3"), alpha.f = 0.5), polygon.col = c("steelblue3", "orange3")) ## for further information on layout definitions see documentation ## of function get_Layout() ## now with manually added plot content ## create empty plot with numeric output AP <- plot_AbanicoPlot(data = ExampleData.DeValues, pch = NA, output = TRUE) ## identify data in 2 sigma range in_2sigma <- AP$data[[1]]$data.in.2s ## restore function-internal plot parameters par(AP$par) ## add points inside 2-sigma range points(x = AP$data[[1]]$precision[in_2sigma], y = AP$data[[1]]$std.estimate.plot[in_2sigma], pch = 16) ## add points outside 2-sigma range points(x = AP$data[[1]]$precision[!in_2sigma], y = AP$data[[1]]$std.estimate.plot[!in_2sigma], pch = 1) } \section{How to cite}{ Dietze, M., Kreutzer, S., 2023. plot_AbanicoPlot(): Function to create an Abanico Plot.. Function version 0.1.17. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Galbraith, R. & Green, P., 1990. Estimating the component ages in a finite mixture. International Journal of Radiation Applications and Instrumentation. Part D. Nuclear Tracks and Radiation Measurements, 17 (3), 197-206. Dietze, M., Kreutzer, S., Burow, C., Fuchs, M.C., Fischer, M., Schmidt, C., 2015. The abanico plot: visualising chronometric data with individual standard errors. Quaternary Geochronology. doi:10.1016/j.quageo.2015.09.003 } \seealso{ \link{plot_RadialPlot}, \link{plot_KDE}, \link{plot_Histogram}, \link{plot_ViolinPlot} } \author{ Michael Dietze, GFZ Potsdam (Germany)\cr Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany)\cr Inspired by a plot introduced by Galbraith & Green (1990) , RLum Developer Team} Luminescence/man/read_SPE2R.Rd0000644000176200001440000001016514521210045015551 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/read_SPE2R.R \name{read_SPE2R} \alias{read_SPE2R} \title{Import Princeton Instruments (TM) SPE-file into R} \usage{ read_SPE2R( file, output.object = "RLum.Data.Image", frame.range, txtProgressBar = TRUE, verbose = TRUE ) } \arguments{ \item{file}{\link{character} (\strong{required}): SPE-file name (including path), e.g. \itemize{ \item \verb{[WIN]}: \code{read_SPE2R("C:/Desktop/test.spe")} \item \verb{[MAC/LINUX]}: \code{readSPER("/User/test/Desktop/test.spe")}. Additionally internet connections are supported. }} \item{output.object}{\link{character} (\emph{with default}): set \code{RLum} output object. Allowed types are \code{"RLum.Data.Spectrum"}, \code{"RLum.Data.Image"} or \code{"matrix"}} \item{frame.range}{\link{vector} (\emph{optional}): limit frame range, e.g. select first 100 frames by \code{frame.range = c(1,100)}} \item{txtProgressBar}{\link{logical} (\emph{with default}): enables or disables \link{txtProgressBar}.} \item{verbose}{\link{logical} (\emph{with default}): enables or disables verbose mode} } \value{ Depending on the chosen option the functions returns three different type of objects: \code{output.object} \code{RLum.Data.Spectrum} An object of type \linkS4class{RLum.Data.Spectrum} is returned. Row sums are used to integrate all counts over one channel. \code{RLum.Data.Image} An object of type \linkS4class{RLum.Data.Image} is returned. Due to performance reasons the import is aborted for files containing more than 100 frames. This limitation can be overwritten manually by using the argument \code{frame.range}. \code{matrix} Returns a matrix of the form: Rows = Channels, columns = Frames. For the transformation the function \link{get_RLum} is used, meaning that the same results can be obtained by using the function \link{get_RLum} on an \code{RLum.Data.Spectrum} or \code{RLum.Data.Image} object. } \description{ Function imports Princeton Instruments (TM) SPE-files into R environment and provides \linkS4class{RLum.Data.Image} objects as output. } \details{ Function provides an R only import routine for the Princeton Instruments SPE format. Import functionality is based on the file format description provided by Princeton Instruments and a MatLab script written by Carl Hall (s. references). } \note{ \strong{The function does not test whether the input data are spectra or pictures for spatial resolved analysis!} The function has been successfully tested for SPE format versions 2.x. \emph{Currently not all information provided by the SPE format are supported.} } \section{Function version}{ 0.1.4 } \examples{ ## to run examples uncomment lines and run the code ##(1) Import data as RLum.Data.Spectrum object #file <- file.choose() #temp <- read_SPE2R(file) #temp ##(2) Import data as RLum.Data.Image object #file <- file.choose() #temp <- read_SPE2R(file, output.object = "RLum.Data.Image") #temp ##(3) Import data as matrix object #file <- file.choose() #temp <- read_SPE2R(file, output.object = "matrix") #temp ##(4) Export raw data to csv, if temp is a RLum.Data.Spectrum object # write.table(x = get_RLum(temp), # file = "[your path and filename]", # sep = ";", row.names = FALSE) } \section{How to cite}{ Kreutzer, S., 2023. read_SPE2R(): Import Princeton Instruments (TM) SPE-file into R. Function version 0.1.4. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Princeton Instruments, 2014. Princeton Instruments SPE 3.0 File Format Specification, Version 1.A (for document URL please use an internet search machine) Hall, C., 2012: readSPE.m. \verb{https://www.mathworks.com/matlabcentral/fileexchange/35940-readspe} } \seealso{ \link{readBin}, \linkS4class{RLum.Data.Spectrum} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \keyword{IO} Luminescence/man/calc_HomogeneityTest.Rd0000644000176200001440000000445614521210045020042 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calc_HomogeneityTest.R \name{calc_HomogeneityTest} \alias{calc_HomogeneityTest} \title{Apply a simple homogeneity test after Galbraith (2003)} \usage{ calc_HomogeneityTest(data, log = TRUE, ...) } \arguments{ \item{data}{\linkS4class{RLum.Results} or \link{data.frame} (\strong{required}): for \link{data.frame}: two columns with De \code{(data[,1])} and De error \code{(values[,2])}} \item{log}{\link{logical} (\emph{with default}): perform the homogeneity test with (un-)logged data} \item{...}{further arguments (for internal compatibility only).} } \value{ Returns a terminal output. In addition an \linkS4class{RLum.Results}-object is returned containing the following elements: \item{summary}{\link{data.frame} summary of all relevant model results.} \item{data}{\link{data.frame} original input data} \item{args}{\link{list} used arguments} \item{call}{\link{call} the function call} The output should be accessed using the function \link{get_RLum} } \description{ A simple homogeneity test for De estimates } \details{ For details see Galbraith (2003). } \section{Function version}{ 0.3.0 } \examples{ ## load example data data(ExampleData.DeValues, envir = environment()) ## apply the homogeneity test calc_HomogeneityTest(ExampleData.DeValues$BT998) ## using the data presented by Galbraith (2003) df <- data.frame( x = c(30.1, 53.8, 54.3, 29.0, 47.6, 44.2, 43.1), y = c(4.8, 7.1, 6.8, 4.3, 5.2, 5.9, 3.0)) calc_HomogeneityTest(df) } \section{How to cite}{ Burow, C., Kreutzer, S., 2023. calc_HomogeneityTest(): Apply a simple homogeneity test after Galbraith (2003). Function version 0.3.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Galbraith, R.F., 2003. A simple homogeneity test for estimates of dose obtained using OSL. Ancient TL 21, 75-77. } \seealso{ \link{pchisq} } \author{ Christoph Burow, University of Cologne (Germany), Sebastian Kreutzer, IRAMAT-CRP2A, Université Bordeaux Montaigne (France) , RLum Developer Team} Luminescence/man/convert_XSYG2CSV.Rd0000644000176200001440000000373514521210045016720 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/convert_XSYG2CSV.R \name{convert_XSYG2CSV} \alias{convert_XSYG2CSV} \title{Export XSYG-file(s) to CSV-files} \usage{ convert_XSYG2CSV(file, ...) } \arguments{ \item{file}{\link{character} (\strong{required}): name of the XSYG-file to be converted to CSV-files} \item{...}{further arguments that will be passed to the function \link{read_XSYG2R} and \link{write_RLum2CSV}} } \value{ The function returns either a CSV-file (or many of them) or for the option \code{export = FALSE} a list comprising objects of type \link{data.frame} and \link{matrix} } \description{ This function is a wrapper function around the functions \link{read_XSYG2R} and \link{write_RLum2CSV} and it imports an XSYG-file and directly exports its content to CSV-files. If nothing is set for the argument \code{path} (\link{write_RLum2CSV}) the input folder will become the output folder. } \section{Function version}{ 0.1.0 } \examples{ ##transform XSYG-file values to a list data(ExampleData.XSYG, envir = environment()) convert_XSYG2CSV(OSL.SARMeasurement$Sequence.Object[1:10], export = FALSE) \dontrun{ ##select your BIN-file file <- file.choose() ##convert convert_XSYG2CSV(file) } } \seealso{ \linkS4class{RLum.Analysis}, \linkS4class{RLum.Data}, \linkS4class{RLum.Results}, \link[utils:write.table]{utils::write.table}, \link{write_RLum2CSV}, \link{read_XSYG2R} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. convert_XSYG2CSV(): Export XSYG-file(s) to CSV-files. Function version 0.1.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{IO} Luminescence/man/plot_OSLAgeSummary.Rd0000644000176200001440000000442014521210045017406 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot_OSLAgeSummary.R \name{plot_OSLAgeSummary} \alias{plot_OSLAgeSummary} \title{Plot Posterior OSL-Age Summary} \usage{ plot_OSLAgeSummary(object, level = 0.95, digits = 1L, verbose = TRUE, ...) } \arguments{ \item{object}{\linkS4class{RLum.Results}, \link{numeric} (\strong{required}): an object produced by \link{combine_De_Dr}. Alternatively, a \link{numeric} vector of a parameter from an MCMC process} \item{level}{\link{numeric} (\emph{with default}): probability of shown credible interval} \item{digits}{\link{integer} (\emph{with default}): number of digits considered for the calculation} \item{verbose}{\link{logical} (\emph{with default}): enable/disable additional terminal output} \item{...}{further arguments to modify the plot, supported: \code{xlim}, \code{ylim}, \code{xlab}, \code{ylab}, \code{main}, \code{lwd}, \code{lty}, \code{col}, \code{polygon_col}, \code{polygon_density}, \code{rug}} } \value{ A posterior distribution plot and an \linkS4class{RLum.Results} object with the credible interval. } \description{ A graphical summary of the statistical inference of an OSL age } \details{ The function is called automatically by \link{combine_De_Dr} } \section{Function version}{ 0.1.0 } \examples{ ##generate random data set.seed(1234) object <- rnorm(1000, 100, 10) plot_OSLAgeSummary(object) } \seealso{ \link{combine_De_Dr}, \link{plot.default}, \link[rjags:rjags-package]{rjags::rjags} } \author{ Anne Philippe, Université de Nantes (France), Jean-Michel Galharret, Université de Nantes (France), Norbert Mercier, IRAMAT-CRP2A, Université Bordeaux Montaigne (France), Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Philippe, A., Galharret, J., Mercier, N., Kreutzer, S., 2023. plot_OSLAgeSummary(): Plot Posterior OSL-Age Summary. Function version 0.1.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{dplot} \keyword{hplot} Luminescence/man/extract_ROI.Rd0000644000176200001440000000611014521210045016101 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/extract_ROI.R \name{extract_ROI} \alias{extract_ROI} \title{Extract Pixel Values through Circular Region-of-Interests (ROI) from an Image} \usage{ extract_ROI(object, roi, roi_summary = "mean", plot = FALSE) } \arguments{ \item{object}{\linkS4class{RLum.Data.Image}, \link{array} or \link{matrix} (\strong{required}): input image data} \item{roi}{\link{matrix} (\strong{required}): matrix with three columns containing the centre coordinates of the ROI (first two columns) and the diameter of the circular ROI. All numbers must by of type \link{integer} and will forcefully coerced into such numbers using \code{as.integer()} regardless.} \item{roi_summary}{(\strong{with default}): if \code{"mean"} (the default) defines what is returned in the element \code{roi_summary}; alternatively \code{"mean"}, \code{"median"}, \code{"sd"} or \code{"sum"} can be chosen. Pixel values are conveniently summarised using the above defined keyword.} \item{plot}{\link{logical} (\emph{optional}): enables/disables control plot. Only the first image frame is shown} } \value{ \linkS4class{RLum.Results} object with the following elements: \code{..$roi_signals}: a named \link{list} with all ROI values and their coordinates \code{..$roi_summary}: an \link{matrix} where rows are frames from the image, and columns are different ROI The element has two attributes: \code{summary} (the method used to summarise pixels) and \code{area} (the pixel area) \code{..$roi_coord}: a \link{matrix} that can be passed to \link{plot_ROI} If \code{plot = TRUE} a control plot is returned. } \description{ Light-weighted function to extract pixel values from pre-defined regions-of-interest (ROI) from \linkS4class{RLum.Data.Image}, \link{array} or \link{matrix} objects and provide simple image processing capacity. The function is limited to circular ROIs. } \details{ The function uses a cheap approach to decide whether a pixel lies within a circle or not. It assumes that pixel coordinates are integer values and that a pixel centring within the circle is satisfied by: \deqn{x^2 + y^2 <= (d/2)^2} where \eqn{x} and \eqn{y} are integer pixel coordinates and \eqn{d} is the integer diameter of the circle in pixel. } \section{Function version}{ 0.1.0 } \examples{ m <- matrix(runif(100,0,255), ncol = 10, nrow = 10) roi <- matrix(c(2.,4,2,5,6,7,3,1,1), ncol = 3) extract_ROI(object = m, roi = roi, plot = TRUE) } \seealso{ \linkS4class{RLum.Data.Image} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. extract_ROI(): Extract Pixel Values through Circular Region-of-Interests (ROI) from an Image. Function version 0.1.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{manip} Luminescence/man/combine_De_Dr.Rd0000644000176200001440000002123314521210045016372 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/combine_De_Dr.R \name{combine_De_Dr} \alias{combine_De_Dr} \title{Combine Dose Rate and Equivalent Dose Distribution} \usage{ combine_De_Dr( De, s, Dr, int_OD, Age_range = c(1, 300), outlier_threshold = 0.05, outlier_method = "default", outlier_analysis_plot = FALSE, method_control = list(), par_local = TRUE, verbose = TRUE, plot = TRUE, ... ) } \arguments{ \item{De}{\link{numeric} (\strong{required}): a equivalent dose sample} \item{s}{\link{numeric} (\strong{required}): a vector of measurement errors on the equivalent dose} \item{Dr}{\link{numeric} (\strong{required}): a dose rate sample} \item{int_OD}{\link{numeric} (\strong{required}): the intrinsic overdispersion, typically the standard deviation characterizing a dose-recovery test distribution} \item{Age_range}{\link{numeric} (\emph{with default}): the age range to be investigated by the algorithm, the larger the value the more iterations are needed and the longer it takes. Should not be set too narrow, cut the algorithm some slack.} \item{outlier_threshold}{\link{numeric} (\emph{with default}): the required significance level used for the outlier detection. If set to \code{1}, no outliers are removed. If \code{outlier_method = "RousseeuwCroux1993"}, the median distance is used as outlier threshold. Please see details for further information.} \item{outlier_method}{\link{character} (\emph{with default}): select the outlier detection method, either \code{"default"} or \code{"RousseeuwCroux1993"}. See details for further information.} \item{outlier_analysis_plot}{\link{logical} (\emph{with default}): enables/disables the outlier analysis plot. Note: the outlier analysis will happen with or without plot output} \item{method_control}{\link{list} (\emph{with default}): named \link{list} of further parameters passed down to the \link[rjags:rjags-package]{rjags::rjags} modelling} \item{par_local}{\link{logical} (\emph{with default}): if set to \code{TRUE} the function uses its own \link[graphics:par]{graphics::par} settings (which will end in two plots next to each other)} \item{verbose}{\link{logical} (\emph{with default}): enable/disable terminal feedback} \item{plot}{\link{logical} (\emph{with default}): enable/disable plot output} \item{...}{a few further arguments to fine-tune the plot output such as \code{cdf_ADr_quantiles} (\code{TRUE}/\code{FALSE}), \code{legend.pos}, \code{legend} (\code{TRUE}/\code{FALSE})} } \value{ The function returns a plot if \code{plot = TRUE} and an \linkS4class{RLum.Results} object with the following slots: \verb{@data}\cr \code{.. $Ages}: a \link{numeric} vector with the modelled ages to be further analysed or visualised\cr \code{.. $Ages_stats}: a \link{data.frame} with sum HPD, CI 68\% and CI 95\% for the ages \cr \code{.. $outliers_index}: the index with the detected outliers\cr \code{.. $cdf_ADr_mean} : empirical cumulative density distribution A * Dr (mean)\cr \code{.. $cdf_ADr_quantiles} : empirical cumulative density distribution A * Dr (quantiles .025,.975)\cr \code{.. $cdf_De_no_outlier} : empirical cumulative density distribution of the De with no outliers\cr \code{.. $cdf_De_initial} : empirical cumulative density distribution of the initial De\cr \code{.. $mcmc_IAM} : the MCMC list of the Individual Age Model, only of \code{method_control = list(return_mcmc = TRUE)} otherwise \code{NULL}\cr \code{.. $mcmc_BCAM} : the MCMC list of the Bayesian Central Age Model, only of \code{method_control = list(return_mcmc = TRUE)} otherwise \code{NULL}\cr \verb{@info}\cr \code{.. $call}: the original function call\cr \code{.. $model_IAM}: the BUGS model used to derive the individual age\cr \code{.. $model_BCAM}: the BUGS model used to calculate the Bayesian Central Age\cr } \description{ A Bayesian statistical analysis of OSL age requiring dose rate sample. Estimation contains a preliminary step for detecting outliers in the equivalent dose sample. } \details{ \strong{Outlier detection} Two different outlier detection methods are implemented (full details are given in the cited literature). \enumerate{ \item The \emph{default} and recommend method, uses quantiles to compare prior and posterior distributions of the individual variances of the equivalent doses. If the corresponding quantile in the corresponding posterior distribution is larger than the quantile in the prior distribution, the value is marked as outlier (cf. Galharret et al., preprint) \item The alternative method employs the method suggested by Rousseeuw and Croux (1993) using the absolute median distance. } \strong{Parameters available for \code{method_control}} The parameters listed below are used to granular control Bayesian modelling using \link[rjags:rjags-package]{rjags::rjags}. Internally the functions \code{.calc_IndividualAgeModel()} and \code{.calc_BayesianCentraAgelModel()}. The parameter settings affect both models. Note: \code{method_control} expects a \strong{named} list of parameters \tabular{llll}{ \strong{PARAMETER} \tab \strong{TYPE} \tab \strong{DEFAULT} \tab \strong{REMARKS} \cr \code{variable.names_IAM} \tab \link{character} \tab \code{c('A', 'a', 'sig_a')} \tab variables names to be monitored in the modelling process using the internal function \code{.calc_IndividualAgeModel()}\cr \code{variable.names_BCAM} \tab \link{character} \tab \code{c('A', 'D_e')} \tab variables names to be monitored in the modelling process using the internal function \code{.calc_BayesianCentraAgelModel()}\cr \code{n.chains} \tab \link{integer} \tab \code{4} \tab number of MCMC chains\cr \code{n.adapt} \tab \link{integer} \tab \code{1000} \tab number of iterations for the adaptation\cr \code{n.iter} \tab \link{integer} \tab \code{5000} \tab number of iterations to monitor cf. \link[rjags:coda.samples]{rjags::coda.samples}\cr \code{thin} \tab \link{numeric} \tab \code{1} \tab thinning interval for the monitoring cf. \link[rjags:coda.samples]{rjags::coda.samples}\cr \code{diag} \tab \link{logical} \tab \code{FALSE} \tab additional terminal convergence diagnostic. \code{FALSE} if \code{verbose = FALSE}\cr \code{progress.bar} \tab \link{logical} \tab \code{FALSE} \tab enable/disable progress bar. \code{FALSE} if \code{verbose = FALSE}\cr \code{quiet} \tab \link{logical} \tab \code{TRUE} \tab silence terminal output. Set to \code{TRUE} if \code{verbose = FALSE}\cr \code{return_mcmc}\tab \link{logical} \tab \code{FALSE} \tab return additional MCMC diagnostic information\cr } } \section{Function version}{ 0.1.0 } \examples{ ## set parameters Dr <- stats::rlnorm (1000, 0, 0.3) De <- 50*sample(Dr, 50, replace = TRUE) s <- stats::rnorm(50, 10, 2) ## run modelling ## note: modify parameters for more realistic results \dontrun{ results <- combine_De_Dr( Dr = Dr, int_OD = 0.1, De, s, Age_range = c(0,100), method_control = list( n.iter = 100, n.chains = 1)) ## show models used writeLines(results@info$model_IAM) writeLines(results@info$model_BCAM) } } \section{How to cite}{ Philippe, A., Galharret, J., Mercier, N., Kreutzer, S., 2023. combine_De_Dr(): Combine Dose Rate and Equivalent Dose Distribution. Function version 0.1.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Mercier, N., Galharret, J.-M., Tribolo, C., Kreutzer, S., Philippe, A., preprint. Luminescence age calculation through Bayesian convolution of equivalent dose and dose-rate distributions: the De_Dr model. Geochronology, 1-22. Galharret, J-M., Philippe, A., Mercier, N., preprint. Detection of outliers with a Bayesian hierarchical model: application to the single-grain luminescence dating method. Electronic Journal of Applied Statistics \strong{Further reading} Rousseeuw, P.J., Croux, C., 1993. Alternatives to the median absolute deviation. Journal of the American Statistical Association 88, 1273–1283. \doi{10.2307/2291267} Rousseeuw, P.J., Debruyne, M., Engelen, S., Hubert, M., 2006. Robustness and outlier detection in chemometrics. Critical Reviews in Analytical Chemistry 36, 221–242. \doi{10.1080/10408340600969403} } \seealso{ \link{plot_OSLAgeSummary}, \link[rjags:rjags-package]{rjags::rjags}, \link{mclust-package} } \author{ Anne Philippe, Université de Nantes (France), Jean-Michel Galharret, Université de Nantes (France), Norbert Mercier, IRAMAT-CRP2A, Université Bordeaux Montaigne (France), Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \keyword{datagen} \keyword{distribution} \keyword{dplot} Luminescence/man/plot_ViolinPlot.Rd0000644000176200001440000001043414521210045017057 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot_ViolinPlot.R \name{plot_ViolinPlot} \alias{plot_ViolinPlot} \title{Create a violin plot} \usage{ plot_ViolinPlot( data, boxplot = TRUE, rug = TRUE, summary = NULL, summary.pos = "sub", na.rm = TRUE, ... ) } \arguments{ \item{data}{\link{numeric} or \linkS4class{RLum.Results} (\strong{required}): input data for plotting. Alternatively a \link{data.frame} or a \link{matrix} can be provided, but only the first column will be considered by the function} \item{boxplot}{\link{logical} (\emph{with default}): enable or disable boxplot} \item{rug}{\link{logical} (\emph{with default}): enable or disable rug} \item{summary}{\link{character} (\emph{optional}): add statistic measures of centrality and dispersion to the plot. Can be one or more of several keywords. See details for available keywords.} \item{summary.pos}{\link{numeric} or \link{character} (\emph{with default}): optional position keywords (cf. \link{legend}) for the statistical summary. Alternatively, the keyword \code{"sub"} may be specified to place the summary below the plot header. However, this latter option in only possible if \code{mtext} is not used.} \item{na.rm}{\link{logical} (\emph{with default}): exclude NA values from the data set prior to any further operations.} \item{...}{further arguments and graphical parameters passed to \link{plot.default}, \link[stats:density]{stats::density} and \link{boxplot}. See details for further information} } \description{ Draws a kernel density plot in combination with a boxplot in its middle. The shape of the violin is constructed using a mirrored density curve. This plot is especially designed for cases where the individual errors are zero or to small to be visualised. The idea for this plot is based on the the 'volcano plot' in the ggplot2 package by Hadley Wickham and Winston Chang. The general idea for the violin plot seems to be introduced by Hintze and Nelson (1998). The function is passing several arguments to the function \link{plot}, \link[stats:density]{stats::density}, \link[graphics:boxplot]{graphics::boxplot}: Supported arguments are: \code{xlim}, \code{main}, \code{xlab}, \code{ylab}, \code{col.violin}, \code{col.boxplot}, \code{mtext}, \code{cex}, \code{mtext} \strong{\verb{Valid summary keywords}} \code{'n'}, \code{'mean'}, \code{'median'}, \code{'sd.abs'}, \code{'sd.rel'}, \code{'se.abs'}, \code{'se.rel'}. \code{'skewness'}, \code{'kurtosis'} } \note{ Although the code for this function was developed independently and just the idea for the plot was based on the 'ggplot2' package plot type 'volcano', it should be mentioned that, beyond this, two other R packages exist providing a possibility to produces this kind of plot, namely: \code{'vioplot'} and \code{'violinmplot'} (see references for details). } \section{Function version}{ 0.1.4 } \examples{ ## read example data set data(ExampleData.DeValues, envir = environment()) ExampleData.DeValues <- Second2Gray(ExampleData.DeValues$BT998, c(0.0438,0.0019)) ## create plot straightforward plot_ViolinPlot(data = ExampleData.DeValues) } \section{How to cite}{ Kreutzer, S., 2023. plot_ViolinPlot(): Create a violin plot. Function version 0.1.4. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Daniel Adler (2005). vioplot: A violin plot is a combination of a box plot and a kernel density plot. R package version 0.2 http://CRAN.R-project.org/package=violplot Hintze, J.L., Nelson, R.D., 1998. A Box Plot-Density Trace Synergism. The American Statistician 52, 181-184. Raphael W. Majeed (2012). violinmplot: Combination of violin plot with mean and standard deviation. R package version 0.2.1. http://CRAN.R-project.org/package=violinmplot Wickham. H (2009). ggplot2: elegant graphics for data analysis. Springer New York. } \seealso{ \link[stats:density]{stats::density}, \link{plot}, \link{boxplot}, \link{rug}, \link{calc_Statistics} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} Luminescence/man/Luminescence-package.Rd0000644000176200001440000001032314521210044017721 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Luminescence-package.R \docType{package} \name{Luminescence-package} \alias{Luminescence-package} \alias{Luminescence} \title{Comprehensive Luminescence Dating Data Analysis\cr \if{html}{ \figure{Luminescence_logo.png}{options: width="75" alt="r-luminescence.org"} }} \description{ A collection of various R functions for the purpose of luminescence dating data analysis. This includes, amongst others, data import, export, application of age models, curve deconvolution, sequence analysis and plotting of equivalent dose distributions. } \details{ \strong{Supervisor of the initial version in 2012} Markus Fuchs, Justus-Liebig-University Giessen, Germany \strong{Support contact} \itemize{ \item \email{developers@r-luminescence.org} \item \url{https://github.com/R-Lum/Luminescence/discussions} } \strong{Bug reporting} \itemize{ \item \email{developers@r-luminescence.org} or \item \url{https://github.com/R-Lum/Luminescence/issues} } \strong{Project website} \itemize{ \item \url{https://r-luminescence.org} } \strong{Project source code repository} \itemize{ \item \url{https://github.com/R-Lum/Luminescence} } \strong{Related package projects} \itemize{ \item \url{https://cran.r-project.org/package=RLumShiny} \item \url{https://cran.r-project.org/package=RLumModel} \item \url{https://cran.r-project.org/package=RLumCarlo} } \strong{Package maintainer} Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany),\cr \email{sebastian.kreutzer@aber.ac.uk} \strong{Funding} 2011-2013: The initial version of the package was developed, while Sebastian Kreutzer was funded through the DFG programme "Rekonstruktion der Umweltbedingungen des Spätpleistozäns in Mittelsachsen anhand von Löss-Paläobodensequenzen" (DFG id: 46526743) 2014-2018: Cooperation and personal exchange between the developers is gratefully funded by the DFG (SCHM 3051/3-1) in the framework of the program "Scientific Networks". Project title: "RLum.Network: Ein Wissenschaftsnetzwerk zur Analyse von Lumineszenzdaten mit R" (2014-2018) 05/2014-12/2019: The work of Sebastian Kreutzer as maintainer of the package was supported by LabEx LaScArBx (ANR - n. ANR-10-LABX-52). 01/2020-04/2022: Sebastian Kreutzer as maintainer of the package has received funding from the European Union’s Horizon 2020 research and innovation programme under the Marie Skłodowska-Curie grant agreement No 844457 (CREDit), and could continue maintaining the package. } \references{ Dietze, M., Kreutzer, S., Fuchs, M.C., Burow, C., Fischer, M., Schmidt, C., 2013. A practical guide to the R package Luminescence. Ancient TL, 31 (1), 11-18. Dietze, M., Kreutzer, S., Burow, C., Fuchs, M.C., Fischer, M., Schmidt, C., 2016. The abanico plot: visualising chronometric data with individual standard errors. Quaternary Geochronology 31, 1-7. https://doi.org/10.1016/j.quageo.2015.09.003 Fuchs, M.C., Kreutzer, S., Burow, C., Dietze, M., Fischer, M., Schmidt, C., Fuchs, M., 2015. Data processing in luminescence dating analysis: An exemplary workflow using the R package 'Luminescence'. Quaternary International, 362,8-13. https://doi.org/10.1016/j.quaint.2014.06.034 Kreutzer, S., Schmidt, C., Fuchs, M.C., Dietze, M., Fischer, M., Fuchs, M., 2012. Introducing an R package for luminescence dating analysis. Ancient TL, 30 (1), 1-8. Mercier, N., Kreutzer, S., Christophe, C., Guérin, G., Guibert, P., Lahaye, C., Lanos, P., Philippe, A., Tribolo, C., 2016. Bayesian statistics in luminescence dating: The 'baSAR'-model and its implementation in the R package ’Luminescence’. Ancient TL 34 (2), 14-21. Mercier, N., Galharret, J.-M., Tribolo, C., Kreutzer, S., Philippe, A., 2022. Luminescence age calculation through Bayesian convolution of equivalent dose and dose-rate distributions: the De_Dr model. Geochronology 4, 297–310. https://doi.org/10.5194/gchron-4-297-2022 Smedley, R.K., 2015. A new R function for the Internal External Uncertainty (IEU) model. Ancient TL, 33 (1), 16-21. King, E.G., Burow, C., Roberts, H., Pearce, N.J.G., 2018. Age determination using feldspar: evaluating fading-correction model performance. Radiation Measurements 119, 58-73. https://doi.org/10.1016/j.radmeas.2018.07.013 } \keyword{package} Luminescence/man/names_RLum.Rd0000644000176200001440000000331714521210045015766 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/names_RLum.R \name{names_RLum} \alias{names_RLum} \alias{names_RLum,list-method} \title{S4-names function for RLum S4 class objects} \usage{ names_RLum(object) \S4method{names_RLum}{list}(object) } \arguments{ \item{object}{\linkS4class{RLum} (\strong{required}): S4 object of class \code{RLum}} } \value{ Returns a \link{character} } \description{ Function calls object-specific names functions for RLum S4 class objects. } \details{ The function provides a generalised access point for specific \linkS4class{RLum} objects.\cr Depending on the input object, the corresponding 'names' function will be selected. Allowed arguments can be found in the documentations of the corresponding \linkS4class{RLum} class. } \section{Functions}{ \itemize{ \item \code{names_RLum(list)}: Returns a list of \linkS4class{RLum} objects that had been passed to \link{names_RLum} }} \section{Function version}{ 0.1.0 } \seealso{ \linkS4class{RLum.Data.Curve}, \linkS4class{RLum.Data.Image}, \linkS4class{RLum.Data.Spectrum}, \linkS4class{RLum.Analysis}, \linkS4class{RLum.Results} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. names_RLum(): S4-names function for RLum S4 class objects. Function version 0.1.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{utilities} Luminescence/man/ExampleData.Al2O3C.Rd0000644000176200001440000000333114521210044017026 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Luminescence-package.R \name{ExampleData.Al2O3C} \alias{ExampleData.Al2O3C} \alias{data_CrossTalk} \alias{data_ITC} \title{Example Al2O3:C Measurement Data} \format{ Two datasets comprising \linkS4class{RLum.Analysis} data imported using the function \link{read_XSYG2R} \describe{ \code{data_ITC}: Measurement data to determine the irradiation time correction, the data can be analysed with the function \link{analyse_Al2O3C_ITC} \code{data_CrossTalk}: Measurement data obtained while estimating the irradiation cross-talk of the reader used for the experiments. The data can be analysed either with the function \link{analyse_Al2O3C_CrossTalk} or \link{analyse_Al2O3C_Measurement} } } \description{ Measurement data obtained from measuring Al2O3:C chips at the IRAMAT-CRP2A, Université Bordeaux Montaigne in 2017 on a Freiberg Instruments lexsyg SMART reader. The example data used in particular to allow test of the functions developed in framework of the work by Kreutzer et al., 2018. } \note{ From both datasets unneeded curves have been removed and the number of aliquots have been reduced to a required minimum to keep the file size small, but still being able to run the corresponding functions. } \examples{ ##(1) curves data(ExampleData.Al2O3C, envir = environment()) plot_RLum(data_ITC[1:2]) } \references{ Kreutzer, S., Martin, L., Guérin, G., Tribolo, C., Selva, P., Mercier, N., 2018. Environmental Dose Rate Determination Using a Passive Dosimeter: Techniques and Workflow for alpha-Al2O3:C Chips. Geochronometria 45, 56–67. } \seealso{ \link{analyse_Al2O3C_ITC}, \link{analyse_Al2O3C_CrossTalk}, \link{analyse_Al2O3C_Measurement} } \keyword{datasets} Luminescence/man/ExampleData.RLum.Data.Image.Rd0000644000176200001440000000203214521210044020650 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Luminescence-package.R \name{ExampleData.RLum.Data.Image} \alias{ExampleData.RLum.Data.Image} \title{Example data as \linkS4class{RLum.Data.Image} objects} \format{ Object of class \linkS4class{RLum.Data.Image} } \source{ \strong{ExampleData.RLum.Data.Image} These data were kindly provided by Regina DeWitt. \tabular{ll}{ Lab.: \tab Department of Physics, East-Carolina University, NC, USA\cr Lab-Code: \tab - \cr Location: \tab - \cr Material: \tab - \cr Reference: \tab - \cr } Image data is a measurement of fluorescent ceiling lights with a cooled Princeton Instruments (TM) camera fitted on Risø DA-20 TL/OSL reader. } \description{ Measurement of Princton Instruments camera imported with the function \link{read_SPE2R} to R to produce an \linkS4class{RLum.Data.Image} object. } \section{Version}{ 0.1 } \examples{ ##load data data(ExampleData.RLum.Data.Image, envir = environment()) ##plot data plot_RLum(ExampleData.RLum.Data.Image) } \keyword{datasets} Luminescence/man/CW2pPMi.Rd0000644000176200001440000001305614521210045015106 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/CW2pPMi.R \name{CW2pPMi} \alias{CW2pPMi} \title{Transform a CW-OSL curve into a pPM-OSL curve via interpolation under parabolic modulation conditions} \usage{ CW2pPMi(values, P) } \arguments{ \item{values}{\linkS4class{RLum.Data.Curve} or \link{data.frame} (\strong{required}): \linkS4class{RLum.Data.Curve} or \code{data.frame} with measured curve data of type stimulation time (t) (\code{values[,1]}) and measured counts (cts) (\code{values[,2]})} \item{P}{\link{vector} (\emph{optional}): stimulation period in seconds. If no value is given, the optimal value is estimated automatically (see details). Greater values of P produce more points in the rising tail of the curve.} } \value{ The function returns the same data type as the input data type with the transformed curve values. \code{RLum.Data.Curve} \tabular{rl}{ \verb{$CW2pPMi.x.t} \tab: transformed time values \cr \verb{$CW2pPMi.method} \tab: used method for the production of the new data points } \code{data.frame} \tabular{rl}{ \verb{$x} \tab: time\cr \verb{$y.t} \tab: transformed count values\cr \verb{$x.t} \tab: transformed time values \cr \verb{$method} \tab: used method for the production of the new data points } } \description{ Transforms a conventionally measured continuous-wave (CW) OSL-curve into a pseudo parabolic modulated (pPM) curve under parabolic modulation conditions using the interpolation procedure described by Bos & Wallinga (2012). } \details{ The complete procedure of the transformation is given in Bos & Wallinga (2012). The input \code{data.frame} consists of two columns: time (t) and count values (CW(t)) \strong{Nomenclature} \itemize{ \item P = stimulation time (s) \item 1/P = stimulation rate (1/s) } \strong{Internal transformation steps} (1) log(CW-OSL) values (2) Calculate t' which is the transformed time: \deqn{t' = (1/3)*(1/P^2)t^3} (3) Interpolate CW(t'), i.e. use the log(CW(t)) to obtain the count values for the transformed time (t'). Values beyond \code{min(t)} and \code{max(t)} produce \code{NA} values. (4) Select all values for t' < \code{min(t)}, i.e. values beyond the time resolution of t. Select the first two values of the transformed data set which contain no \code{NA} values and use these values for a linear fit using \link{lm}. (5) Extrapolate values for t' < \code{min(t)} based on the previously obtained fit parameters. The extrapolation is limited to two values. Other values at the beginning of the transformed curve are set to 0. (6) Transform values using \deqn{pLM(t) = t^2/P^2*CW(t')} (7) Combine all values and truncate all values for t' > \code{max(t)} \strong{NOTE:} The number of values for t' < \code{min(t)} depends on the stimulation period \code{P}. To avoid the production of too many artificial data at the raising tail of the determined pPM curve, it is recommended to use the automatic estimation routine for \code{P}, i.e. provide no value for \code{P}. } \note{ According to Bos & Wallinga (2012), the number of extrapolated points should be limited to avoid artificial intensity data. If \code{P} is provided manually, not more than two points are extrapolated. } \section{Function version}{ 0.2.1 } \examples{ ##(1) ##load CW-OSL curve data data(ExampleData.CW_OSL_Curve, envir = environment()) ##transform values values.transformed <- CW2pPMi(ExampleData.CW_OSL_Curve) ##plot plot(values.transformed$x,values.transformed$y.t, log = "x") ##(2) - produce Fig. 4 from Bos & Wallinga (2012) ##load data data(ExampleData.CW_OSL_Curve, envir = environment()) values <- CW_Curve.BosWallinga2012 ##open plot area plot(NA, NA, xlim = c(0.001,10), ylim = c(0,8000), ylab = "pseudo OSL (cts/0.01 s)", xlab = "t [s]", log = "x", main = "Fig. 4 - Bos & Wallinga (2012)") values.t <- CW2pLMi(values, P = 1/20) lines(values[1:length(values.t[,1]),1],CW2pLMi(values, P = 1/20)[,2], col = "red",lwd = 1.3) text(0.03,4500,"LM", col = "red", cex = .8) values.t <- CW2pHMi(values, delta = 40) lines(values[1:length(values.t[,1]),1], CW2pHMi(values, delta = 40)[,2], col = "black", lwd = 1.3) text(0.005,3000,"HM", cex = .8) values.t <- CW2pPMi(values, P = 1/10) lines(values[1:length(values.t[,1]),1], CW2pPMi(values, P = 1/10)[,2], col = "blue", lwd = 1.3) text(0.5,6500,"PM", col = "blue", cex = .8) } \section{How to cite}{ Kreutzer, S., 2023. CW2pPMi(): Transform a CW-OSL curve into a pPM-OSL curve via interpolation under parabolic modulation conditions. Function version 0.2.1. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Bos, A.J.J. & Wallinga, J., 2012. How to visualize quartz OSL signal components. Radiation Measurements, 47, 752-758. \strong{Further Reading} Bulur, E., 1996. An Alternative Technique For Optically Stimulated Luminescence (OSL) Experiment. Radiation Measurements, 26, 701-709. Bulur, E., 2000. A simple transformation for converting CW-OSL curves to LM-OSL curves. Radiation Measurements, 32, 141-145. } \seealso{ \link{CW2pLM}, \link{CW2pLMi}, \link{CW2pHMi}, \link{fit_LMCurve}, \linkS4class{RLum.Data.Curve} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) Based on comments and suggestions from:\cr Adrie J.J. Bos, Delft University of Technology, The Netherlands , RLum Developer Team} \keyword{manip} Luminescence/man/CW2pLM.Rd0000644000176200001440000000610514521210045014726 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/CW2pLM.R \name{CW2pLM} \alias{CW2pLM} \title{Transform a CW-OSL curve into a pLM-OSL curve} \usage{ CW2pLM(values) } \arguments{ \item{values}{\linkS4class{RLum.Data.Curve} or \link{data.frame} (\strong{required}): \code{RLum.Data.Curve} data object. Alternatively, a \code{data.frame} of the measured curve data of type stimulation time (t) (\code{values[,1]}) and measured counts (cts) (\code{values[,2]}) can be provided.} } \value{ The function returns the same data type as the input data type with the transformed curve values (\link{data.frame} or \linkS4class{RLum.Data.Curve}). } \description{ Transforms a conventionally measured continuous-wave (CW) curve into a pseudo linearly modulated (pLM) curve using the equations given in Bulur (2000). } \details{ According to Bulur (2000) the curve data are transformed by introducing two new parameters \code{P} (stimulation period) and \code{u} (transformed time): \deqn{P=2*max(t)} \deqn{u=\sqrt{(2*t*P)}} The new count values are then calculated by \deqn{ctsNEW = cts(u/P)} and the returned \code{data.frame} is produced by: \code{data.frame(u,ctsNEW)} The output of the function can be further used for LM-OSL fitting. } \note{ The transformation is recommended for curves recorded with a channel resolution of at least 0.05 s/channel. } \section{Function version}{ 0.4.1 } \examples{ ##read curve from CWOSL.SAR.Data transform curve and plot values data(ExampleData.BINfileData, envir = environment()) ##read id for the 1st OSL curve id.OSL <- CWOSL.SAR.Data@METADATA[CWOSL.SAR.Data@METADATA[,"LTYPE"] == "OSL","ID"] ##produce x and y (time and count data for the data set) x<-seq(CWOSL.SAR.Data@METADATA[id.OSL[1],"HIGH"]/CWOSL.SAR.Data@METADATA[id.OSL[1],"NPOINTS"], CWOSL.SAR.Data@METADATA[id.OSL[1],"HIGH"], by = CWOSL.SAR.Data@METADATA[id.OSL[1],"HIGH"]/CWOSL.SAR.Data@METADATA[id.OSL[1],"NPOINTS"]) y <- unlist(CWOSL.SAR.Data@DATA[id.OSL[1]]) values <- data.frame(x,y) ##transform values values.transformed <- CW2pLM(values) ##plot plot(values.transformed) } \section{How to cite}{ Kreutzer, S., 2023. CW2pLM(): Transform a CW-OSL curve into a pLM-OSL curve. Function version 0.4.1. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Bulur, E., 2000. A simple transformation for converting CW-OSL curves to LM-OSL curves. Radiation Measurements, 32, 141-145. \strong{Further Reading} Bulur, E., 1996. An Alternative Technique For Optically Stimulated Luminescence (OSL) Experiment. Radiation Measurements, 26, 701-709. } \seealso{ \link{CW2pHMi}, \link{CW2pLMi}, \link{CW2pPMi}, \link{fit_LMCurve}, \link{lm}, \linkS4class{RLum.Data.Curve} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \keyword{manip} Luminescence/man/ExampleData.CobbleData.Rd0000644000176200001440000000120514521210044020061 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Luminescence-package.R \name{ExampleData.CobbleData} \alias{ExampleData.CobbleData} \title{Example data for calc_CobbleDoseRate()} \format{ A \code{\link{data.frame}}. Please see \link{calc_CobbleDoseRate} for detailed information on the structure of the \link{data.frame}. } \description{ An example data set for the function \link{calc_CobbleDoseRate} containing layer specific information for the cobble to be used in the function. } \section{Version}{ 0.1.0 } \examples{ ## Load data data("ExampleData.CobbleData", envir = environment()) } \keyword{datasets} Luminescence/man/merge_RLum.Analysis.Rd0000644000176200001440000000470114521210045017542 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/merge_RLum.Analysis.R \name{merge_RLum.Analysis} \alias{merge_RLum.Analysis} \title{Merge function for RLum.Analysis S4 class objects} \usage{ merge_RLum.Analysis(objects) } \arguments{ \item{objects}{\link{list} of \linkS4class{RLum.Analysis} (\strong{required}): list of S4 objects of class \code{RLum.Analysis}. Furthermore other objects of class \linkS4class{RLum} can be added, see details.} } \value{ Return an \linkS4class{RLum.Analysis} object. } \description{ Function allows merging of RLum.Analysis objects and adding of allowed objects to an RLum.Analysis. } \details{ This function simply allowing to merge \linkS4class{RLum.Analysis} objects. Additionally other \linkS4class{RLum} objects can be added to an existing \linkS4class{RLum.Analysis} object. Supported objects to be added are: \linkS4class{RLum.Data.Curve}, \linkS4class{RLum.Data.Spectrum} and \linkS4class{RLum.Data.Image}. The order in the new \linkS4class{RLum.Analysis} object is the object order provided with the input list. } \note{ The information for the slot 'protocol' is taken from the first \linkS4class{RLum.Analysis} object in the input list. Therefore at least one object of type \linkS4class{RLum.Analysis} has to be provided. } \section{Function version}{ 0.2.0 } \examples{ ##merge different RLum objects from the example data data(ExampleData.RLum.Analysis, envir = environment()) data(ExampleData.BINfileData, envir = environment()) object <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos=1) curve <- get_RLum(object)[[2]] temp.merged <- merge_RLum.Analysis(list(curve, IRSAR.RF.Data, IRSAR.RF.Data)) } \seealso{ \link{merge_RLum}, \linkS4class{RLum.Analysis}, \linkS4class{RLum.Data.Curve}, \linkS4class{RLum.Data.Spectrum}, \linkS4class{RLum.Data.Image}, \linkS4class{RLum} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} \section{How to cite}{ Kreutzer, S., 2023. merge_RLum.Analysis(): Merge function for RLum.Analysis S4 class objects. Function version 0.2.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \keyword{internal} \keyword{utilities} Luminescence/man/Second2Gray.Rd0000644000176200001440000001035214521210045016041 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Second2Gray.R \name{Second2Gray} \alias{Second2Gray} \title{Converting equivalent dose values from seconds (s) to Gray (Gy)} \usage{ Second2Gray(data, dose.rate, error.propagation = "omit") } \arguments{ \item{data}{\link{data.frame} (\strong{required}): input values, structure: data (\code{values[,1]}) and data error (\code{values [,2]}) are required} \item{dose.rate}{\linkS4class{RLum.Results}, \link{data.frame} or \link{numeric} (\strong{required}): \code{RLum.Results} needs to be originated from the function \link{calc_SourceDoseRate}, for \code{vector} dose rate in Gy/s and dose rate error in Gy/s} \item{error.propagation}{\link{character} (\emph{with default}): error propagation method used for error calculation (\code{omit}, \code{gaussian} or \code{absolute}), see details for further information} } \value{ Returns a \link{data.frame} with converted values. } \description{ Conversion of absorbed radiation dose in seconds (s) to the SI unit Gray (Gy) including error propagation. Normally used for equivalent dose data. } \details{ Calculation of De values from seconds (s) to Gray (Gy) \deqn{De [Gy] = De [s] * Dose Rate [Gy/s])} Provided calculation error propagation methods for error calculation (with \code{'se'} as the standard error and \code{'DR'} of the dose rate of the beta-source): \strong{(1) \code{omit}} (default) \deqn{se(De) [Gy] = se(De) [s] * DR [Gy/s]} In this case the standard error of the dose rate of the beta-source is treated as systematic (i.e. non-random), it error propagation is omitted. However, the error must be considered during calculation of the final age. (cf. Aitken, 1985, pp. 242). This approach can be seen as method (2) (gaussian) for the case the (random) standard error of the beta-source calibration is 0. Which particular method is requested depends on the situation and cannot be prescriptive. \strong{(2) \code{gaussian}} error propagation \deqn{se(De) [Gy] = \sqrt((DR [Gy/s] * se(De) [s])^2 + (De [s] * se(DR) [Gy/s])^2)} Applicable under the assumption that errors of \code{De} and \code{se} are uncorrelated. \strong{(3) \code{absolute}} error propagation \deqn{se(De) [Gy]= abs(DR [Gy/s] * se(De) [s]) + abs(De [s] * se(DR) [Gy/s])} Applicable under the assumption that errors of \code{De} and \code{se} are correlated. } \note{ If no or a wrong error propagation method is given, the execution of the function is stopped. Furthermore, if a \code{data.frame} is provided for the dose rate values is has to be of the same length as the data frame provided with the argument \code{data} } \section{Function version}{ 0.6.0 } \examples{ ##(A) for known source dose rate at date of measurement ## - load De data from the example data help file data(ExampleData.DeValues, envir = environment()) ## - convert De(s) to De(Gy) Second2Gray(ExampleData.DeValues$BT998, c(0.0438,0.0019)) ##(B) for source dose rate calibration data ## - calculate source dose rate first dose.rate <- calc_SourceDoseRate(measurement.date = "2012-01-27", calib.date = "2014-12-19", calib.dose.rate = 0.0438, calib.error = 0.0019) # read example data data(ExampleData.DeValues, envir = environment()) # apply dose.rate to convert De(s) to De(Gy) Second2Gray(ExampleData.DeValues$BT998, dose.rate) } \section{How to cite}{ Kreutzer, S., Dietze, M., Fuchs, M.C., 2023. Second2Gray(): Converting equivalent dose values from seconds (s) to Gray (Gy). Function version 0.6.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Aitken, M.J., 1985. Thermoluminescence dating. Academic Press. } \seealso{ \link{calc_SourceDoseRate} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany)\cr Michael Dietze, GFZ Potsdam (Germany)\cr Margret C. Fuchs, HZDR, Helmholtz-Institute Freiberg for Resource Technology (Germany) , RLum Developer Team} \keyword{manip} Luminescence/man/plot_DRTResults.Rd0000644000176200001440000001613414521210045016776 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot_DRTResults.R \name{plot_DRTResults} \alias{plot_DRTResults} \title{Visualise dose recovery test results} \usage{ plot_DRTResults( values, given.dose = NULL, error.range = 10, preheat, boxplot = FALSE, mtext, summary, summary.pos, legend, legend.pos, par.local = TRUE, na.rm = FALSE, ... ) } \arguments{ \item{values}{\linkS4class{RLum.Results} or \link{data.frame} (\strong{required}): input values containing at least De and De error. To plot more than one data set in one figure, a \code{list} of the individual data sets must be provided (e.g. \code{list(dataset.1, dataset.2)}).} \item{given.dose}{\link{numeric} (\emph{optional}): given dose used for the dose recovery test to normalise data. If only one given dose is provided this given dose is valid for all input data sets (i.e., \code{values} is a list). Otherwise a given dose for each input data set has to be provided (e.g., \code{given.dose = c(100,200)}). If \code{given.dose} in \code{NULL} the values are plotted without normalisation (might be useful for preheat plateau tests). \strong{Note:} Unit has to be the same as from the input values (e.g., Seconds or Gray).} \item{error.range}{\link{numeric}: symmetric error range in percent will be shown as dashed lines in the plot. Set \code{error.range} to 0 to void plotting of error ranges.} \item{preheat}{\link{numeric}: optional vector of preheat temperatures to be used for grouping the De values. If specified, the temperatures are assigned to the x-axis.} \item{boxplot}{\link{logical}: optionally plot values, that are grouped by preheat temperature as boxplots. Only possible when \code{preheat} vector is specified.} \item{mtext}{\link{character}: additional text below the plot title.} \item{summary}{\link{character} (\emph{optional}): adds numerical output to the plot. Can be one or more out of: \itemize{ \item \code{"n"} (number of samples), \item \code{"mean"} (mean De value), \item \code{"weighted$mean"} (error-weighted mean), \item \code{"median"} (median of the De values), \item \code{"sd.rel"} (relative standard deviation in percent), \item \code{"sd.abs"} (absolute standard deviation), \item \code{"se.rel"} (relative standard error) and \item \code{"se.abs"} (absolute standard error) } and all other measures returned by the function \link{calc_Statistics}.} \item{summary.pos}{\link{numeric} or \link{character} (\emph{with default}): optional position coordinates or keyword (e.g. \code{"topright"}) for the statistical summary. Alternatively, the keyword \code{"sub"} may be specified to place the summary below the plot header. However, this latter option in only possible if \code{mtext} is not used.} \item{legend}{\link{character} vector (\emph{optional}): legend content to be added to the plot.} \item{legend.pos}{\link{numeric} or \link{character} (\emph{with default}): optional position coordinates or keyword (e.g. \code{"topright"}) for the legend to be plotted.} \item{par.local}{\link{logical} (\emph{with default}): use local graphical parameters for plotting, e.g. the plot is shown in one column and one row. If \code{par.local = FALSE}, global parameters are inherited, i.e. parameters provided via \code{par()} work} \item{na.rm}{\link{logical}: indicating whether \code{NA} values are removed before plotting from the input data set} \item{...}{further arguments and graphical parameters passed to \link{plot}, supported are: \code{xlab}, \code{ylab}, \code{xlim}, \code{ylim}, \code{main}, \code{cex}, \code{las} and `pch``} } \value{ A plot is returned. } \description{ The function provides a standardised plot output for dose recovery test measurements. } \details{ Procedure to test the accuracy of a measurement protocol to reliably determine the dose of a specific sample. Here, the natural signal is erased and a known laboratory dose administered which is treated as unknown. Then the De measurement is carried out and the degree of congruence between administered and recovered dose is a measure of the protocol's accuracy for this sample.\cr In the plot the normalised De is shown on the y-axis, i.e. obtained De/Given Dose. } \note{ Further data and plot arguments can be added by using the appropriate R commands. } \section{Function version}{ 0.1.14 } \examples{ ## read example data set and misapply them for this plot type data(ExampleData.DeValues, envir = environment()) ## plot values plot_DRTResults( values = ExampleData.DeValues$BT998[7:11,], given.dose = 2800, mtext = "Example data") ## plot values with legend plot_DRTResults( values = ExampleData.DeValues$BT998[7:11,], given.dose = 2800, legend = "Test data set") ## create and plot two subsets with randomised values x.1 <- ExampleData.DeValues$BT998[7:11,] x.2 <- ExampleData.DeValues$BT998[7:11,] * c(runif(5, 0.9, 1.1), 1) plot_DRTResults( values = list(x.1, x.2), given.dose = 2800) ## some more user-defined plot parameters plot_DRTResults( values = list(x.1, x.2), given.dose = 2800, pch = c(2, 5), col = c("orange", "blue"), xlim = c(0, 8), ylim = c(0.85, 1.15), xlab = "Sample aliquot") ## plot the data with user-defined statistical measures as legend plot_DRTResults( values = list(x.1, x.2), given.dose = 2800, summary = c("n", "weighted$mean", "sd.abs")) ## plot the data with user-defined statistical measures as sub-header plot_DRTResults( values = list(x.1, x.2), given.dose = 2800, summary = c("n", "weighted$mean", "sd.abs"), summary.pos = "sub") ## plot the data grouped by preheat temperatures plot_DRTResults( values = ExampleData.DeValues$BT998[7:11,], given.dose = 2800, preheat = c(200, 200, 200, 240, 240)) ## read example data set and misapply them for this plot type data(ExampleData.DeValues, envir = environment()) ## plot values plot_DRTResults( values = ExampleData.DeValues$BT998[7:11,], given.dose = 2800, mtext = "Example data") ## plot two data sets grouped by preheat temperatures plot_DRTResults( values = list(x.1, x.2), given.dose = 2800, preheat = c(200, 200, 200, 240, 240)) ## plot the data grouped by preheat temperatures as boxplots plot_DRTResults( values = ExampleData.DeValues$BT998[7:11,], given.dose = 2800, preheat = c(200, 200, 200, 240, 240), boxplot = TRUE) } \section{How to cite}{ Kreutzer, S., Dietze, M., 2023. plot_DRTResults(): Visualise dose recovery test results. Function version 0.1.14. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Wintle, A.G., Murray, A.S., 2006. A review of quartz optically stimulated luminescence characteristics and their relevance in single-aliquot regeneration dating protocols. Radiation Measurements, 41, 369-391. } \seealso{ \link{plot} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany)\cr Michael Dietze, GFZ Potsdam (Germany) , RLum Developer Team} \keyword{dplot} Luminescence/man/fit_ThermalQuenching.Rd0000644000176200001440000001271614521210045020027 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fit_ThermalQuenching.R \name{fit_ThermalQuenching} \alias{fit_ThermalQuenching} \title{Fitting Thermal Quenching Data} \usage{ fit_ThermalQuenching( data, start_param = list(), method_control = list(), n.MC = 100, verbose = TRUE, plot = TRUE, ... ) } \arguments{ \item{data}{\link{data.frame} (\strong{required}): input data with three columns, the first column contains temperature values in deg. C, columns 2 and 3 the dependent values with its error} \item{start_param}{\link{list} (optional): option to provide own start parameters for the fitting, see details} \item{method_control}{\link{list} (optional): further options to fine tune the fitting, see details for further information} \item{n.MC}{\link{numeric} (\emph{with default}): number of Monte Carlo runs for the error estimation. If \code{n.MC} is \code{NULL} or \verb{<=1}, the error estimation is skipped} \item{verbose}{\link{logical} (\emph{with default}): enables/disables terminal output} \item{plot}{\link{logical} (\emph{with default}): enables/disables plot output} \item{...}{further arguments that can be passed to control the plotting, support are \code{main}, \code{pch}, \code{col_fit}, \code{col_points}, \code{lty}, \code{lwd}, \code{xlab}, \code{ylab}, \code{xlim}, \code{ylim}, \code{xaxt}} } \value{ The function returns numerical output and an (\emph{optional}) plot. -----------------------------------\cr \verb{[ NUMERICAL OUTPUT ]}\cr -----------------------------------\cr \strong{\code{RLum.Results}}-object \strong{slot:} \strong{\verb{@data}} \verb{[.. $data : data.frame]}\cr A table with all fitting parameters and the number of Monte Carlo runs used for the error estimation. \verb{[.. $fit : nls object]} \cr The nls \link[stats:nls]{stats::nls} object returned by the function \link[minpack.lm:nlsLM]{minpack.lm::nlsLM}. This object can be further passed to other functions supporting an nls object (cf. details section in \link[stats:nls]{stats::nls}) \strong{slot:} \strong{\verb{@info}} \verb{[.. $call : call]}\cr The original function call. -----------------------------------\cr \verb{[ GAPHICAL OUTPUT ]}\cr -----------------------------------\cr Plotted are temperature against the signal and their uncertainties. The fit is shown as dashed-line (can be modified). Please note that for the fitting the absolute temperature values are used but are re-calculated to deg. C for the plot. } \description{ Applying a nls-fitting to thermal quenching data. } \details{ \strong{Used equation}\cr The equation used for the fitting is \deqn{y = (A / (1 + C * (exp(-W / (k * x))))) + c} \emph{W} is the energy depth in eV and \emph{C} is dimensionless constant. \emph{A} and \emph{c} are used to adjust the curve for the given signal. \emph{k} is the Boltzmann in eV/K and \emph{x} is the absolute temperature in K. \strong{Error estimation}\cr The error estimation is done be varying the input parameters using the given uncertainties in a Monte Carlo simulation. Errors are assumed to follow a normal distribution. \strong{\code{start_param}} \cr The function allows the injection of own start parameters via the argument \code{start_param}. The parameters needs to be provided as names list. The names are the parameters to be optimised. Examples: \code{start_param = list(A = 1, C = 1e+5, W = 0.5, c = 0)} \strong{\code{method_control}} \cr The following arguments can be provided via \code{method_control}. Please note that arguments provided via \code{method_control} are not further tested, i.e., if the function crashes your input was probably wrong. \tabular{lll}{ \strong{ARGUMENT} \tab \strong{TYPE} \tab \strong{DESCRIPTION}\cr \code{upper} \tab named \link{vector} \tab sets upper fitting boundaries, if provided boundaries for all arguments are required, e.g., \code{c(A = 0, C = 0, W = 0, c = 0)} \cr \code{lower} \tab names \link{vector} \tab sets lower fitting boundaries (see \code{upper} for details) \cr \code{trace} \tab \link{logical} \tab enables/disables progression trace for \link[minpack.lm:nlsLM]{minpack.lm::nlsLM}\cr \code{weights} \tab \link{numeric} \tab option to provide own weights for the fitting, the length of this vector needs to be equal to the number for rows of the input \code{data.frame}. If set to \code{NULL} no weights are applied. The weights are defined by the third column of the input \code{data.frame}. } } \section{Function version}{ 0.1.0 } \examples{ ##create short example dataset data <- data.frame( T = c(25, 40, 50, 60, 70, 80, 90, 100, 110), V = c(0.06, 0.058, 0.052, 0.051, 0.041, 0.034, 0.035, 0.033, 0.032), V_X = c(0.012, 0.009, 0.008, 0.008, 0.007, 0.006, 0.005, 0.005, 0.004)) ##fit fit_ThermalQuenching( data = data, n.MC = NULL) } \section{How to cite}{ Kreutzer, S., 2023. fit_ThermalQuenching(): Fitting Thermal Quenching Data. Function version 0.1.0. In: Kreutzer, S., Burow, C., Dietze, M., Fuchs, M.C., Schmidt, C., Fischer, M., Friedrich, J., Mercier, N., Philippe, A., Riedesel, S., Autzen, M., Mittelstrass, D., Gray, H.J., Galharret, J., 2023. Luminescence: Comprehensive Luminescence Dating Data Analysis. R package version 0.9.23. https://CRAN.R-project.org/package=Luminescence } \references{ Wintle, A.G., 1975. Thermal Quenching of Thermoluminescence in Quartz. Geophys. J. R. astr. Soc. 41, 107–113. } \seealso{ \link[minpack.lm:nlsLM]{minpack.lm::nlsLM} } \author{ Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) , RLum Developer Team} Luminescence/DESCRIPTION0000644000176200001440000002037414521216670014404 0ustar liggesusersPackage: Luminescence Type: Package Title: Comprehensive Luminescence Dating Data Analysis Version: 0.9.23 Date: 2023-11-03 Author: Sebastian Kreutzer [aut, trl, cre, dtc] (), Christoph Burow [aut, trl, dtc] (), Michael Dietze [aut] (), Margret C. Fuchs [aut] (), Christoph Schmidt [aut] (), Manfred Fischer [aut, trl], Johannes Friedrich [aut] (), Norbert Mercier [aut] (), Rachel K. Smedley [ctb] (), Claire Christophe [ctb], Antoine Zink [ctb] (), Julie Durcan [ctb] (), Georgina E. King [ctb, dtc] (), Anne Philippe [aut] (), Guillaume Guerin [ctb] (), Svenja Riedesel [aut] (), Martin Autzen [aut] (), Pierre Guibert [ctb] (), Dirk Mittelstrass [aut] (), Harrison J. Gray [aut] (), Jean-Michel Galharret [aut] (), Markus Fuchs [ths] () Authors@R: c( person("Sebastian", "Kreutzer", role = c("aut", "trl", "cre", "dtc"), email = "sebastian.kreutzer@uni-heidelberg.de", comment = c(ORCID = "0000-0002-0734-2199")), person("Christoph", "Burow", role = c("aut", "trl", "dtc"), comment = c(ORCID = "0000-0002-5023-4046")), person("Michael", "Dietze", role = c("aut"), comment = c(ORCID = "0000-0001-6063-1726")), person("Margret C.", "Fuchs", role = c("aut"), comment = c(ORCID = "0000-0001-7210-1132")), person("Christoph", "Schmidt", role = c("aut"), comment = c(ORCID = "0000-0002-2309-3209")), person("Manfred", "Fischer", role = c("aut", "trl")), person("Johannes", "Friedrich", role = c("aut"), comment = c(ORCID = "0000-0002-0805-9547")), person("Norbert", "Mercier", role = c("aut"), comment = c(ORCID = "0000-0002-6375-9108")), person("Rachel K.", "Smedley", role = c("ctb"), comment = c(ORCID = "0000-0001-7773-5193")), person("Claire", "Christophe", role = c("ctb")), person("Antoine", "Zink", role = c("ctb"), comment = c(ORCID = "0000-0001-7146-1101")), person("Julie", "Durcan", role = c("ctb"), comment = c(ORCID = "0000-0001-8724-8022")), person("Georgina E.", "King", role = c("ctb", "dtc"), comment = c(ORCID = "0000-0003-1059-8192")), person("Anne", "Philippe", role = c("aut"), comment = c(ORCID = "0000-0002-5331-5087")), person("Guillaume", "Guerin", role = c("ctb"), comment = c(ORCID = "0000-0001-6298-5579")), person("Svenja", "Riedesel", role = c("aut"), comment = c(ORCID = "0000-0003-2936-8776")), person("Martin", "Autzen", role = c("aut"), comment = c(ORCID = "0000-0001-6249-426X")), person("Pierre", "Guibert", role = c("ctb"), comment = c(ORCID = "0000-0001-8969-8684")), person("Dirk", "Mittelstrass", role = c("aut"), comment = c(ORCID = "0000-0002-9567-8791")), person("Harrison J.", "Gray", role = c("aut"), comment = c(ORCID = "0000-0002-4555-7473")), person("Jean-Michel", "Galharret", role = c("aut"), comment = c(ORCID = "0000-0003-2219-8727")), person("Markus", "Fuchs", role = c("ths"), comment = c(ORCID = "0000-0003-4669-6528"))) Maintainer: Sebastian Kreutzer Description: A collection of various R functions for the purpose of Luminescence dating data analysis. This includes, amongst others, data import, export, application of age models, curve deconvolution, sequence analysis and plotting of equivalent dose distributions. Contact: Package Developers License: GPL-3 BugReports: https://github.com/R-Lum/Luminescence/issues Depends: R (>= 4.2), utils LinkingTo: Rcpp (>= 1.0.11), RcppArmadillo (>= 0.12.6.6.0) Imports: bbmle (>= 1.0.25), data.table (>= 1.14.8), DEoptim (>= 2.2-8), httr (>= 1.4.7), interp (>= 1.1-4), lamW (>= 2.2.1), matrixStats (>= 1.0.0), methods, minpack.lm (>= 1.2-4), mclust (>= 6.0.0), readxl (>= 1.4.3), Rcpp (>= 1.0.11), shape (>= 1.4.6), parallel, XML (>= 3.99-0.15), zoo (>= 1.8-12) Suggests: spelling (>= 2.2.1), plotly (>= 4.10.2), rmarkdown (>= 2.25), rstudioapi (>= 0.15.0), rjags (>= 4-14), coda (>= 0.19-4), pander (>= 0.6.5), testthat (>= 3.2.0), tiff (>= 0.1-11), devtools (>= 2.4.5), R.rsp (>= 0.45.0) VignetteBuilder: R.rsp URL: https://CRAN.R-project.org/package=Luminescence Encoding: UTF-8 Language: en-US Collate: 'Analyse_SAR.OSLdata.R' 'CW2pHMi.R' 'CW2pLM.R' 'CW2pLMi.R' 'CW2pPMi.R' 'Luminescence-package.R' 'PSL2Risoe.BINfileData.R' 'RcppExports.R' 'replicate_RLum.R' 'RLum-class.R' 'smooth_RLum.R' 'names_RLum.R' 'structure_RLum.R' 'length_RLum.R' 'set_RLum.R' 'get_RLum.R' 'RLum.Analysis-class.R' 'RLum.Data-class.R' 'bin_RLum.Data.R' 'RLum.Data.Curve-class.R' 'RLum.Data.Image-class.R' 'RLum.Data.Spectrum-class.R' 'RLum.Results-class.R' 'set_Risoe.BINfileData.R' 'get_Risoe.BINfileData.R' 'Risoe.BINfileData-class.R' 'Risoe.BINfileData2RLum.Analysis.R' 'Risoe.BINfileData2RLum.Data.Curve.R' 'Second2Gray.R' 'addins_RLum.R' 'analyse_Al2O3C_CrossTalk.R' 'analyse_Al2O3C_ITC.R' 'analyse_Al2O3C_Measurement.R' 'analyse_FadingMeasurement.R' 'analyse_IRSAR.RF.R' 'analyse_SAR.CWOSL.R' 'analyse_SAR.TL.R' 'analyse_baSAR.R' 'analyse_pIRIRSequence.R' 'analyse_portableOSL.R' 'apply_CosmicRayRemoval.R' 'apply_EfficiencyCorrection.R' 'calc_AliquotSize.R' 'calc_AverageDose.R' 'calc_CentralDose.R' 'calc_CobbleDoseRate.R' 'calc_CommonDose.R' 'calc_CosmicDoseRate.R' 'calc_FadingCorr.R' 'calc_FastRatio.R' 'calc_FiniteMixture.R' 'calc_FuchsLang2001.R' 'calc_HomogeneityTest.R' 'calc_Huntley2006.R' 'calc_IEU.R' 'calc_Kars2008.R' 'calc_Lamothe2003.R' 'calc_MaxDose.R' 'calc_MinDose.R' 'calc_OSLLxTxDecomposed.R' 'calc_OSLLxTxRatio.R' 'calc_SourceDoseRate.R' 'calc_Statistics.R' 'calc_TLLxTxRatio.R' 'calc_ThermalLifetime.R' 'calc_WodaFuchs2008.R' 'calc_gSGC.R' 'calc_gSGC_feldspar.R' 'combine_De_Dr.R' 'convert_Activity2Concentration.R' 'convert_BIN2CSV.R' 'convert_Concentration2DoseRate.R' 'convert_Daybreak2CSV.R' 'convert_PSL2CSV.R' 'convert_RLum2Risoe.BINfileData.R' 'convert_SG2MG.R' 'convert_Wavelength2Energy.R' 'convert_XSYG2CSV.R' 'extract_IrradiationTimes.R' 'extract_ROI.R' 'fit_CWCurve.R' 'fit_EmissionSpectra.R' 'fit_LMCurve.R' 'fit_OSLLifeTimes.R' 'fit_SurfaceExposure.R' 'fit_ThermalQuenching.R' 'get_Layout.R' 'get_Quote.R' 'get_rightAnswer.R' 'github.R' 'install_DevelopmentVersion.R' 'internal_as.latex.table.R' 'internals_RLum.R' 'merge_RLum.Analysis.R' 'merge_RLum.Data.Curve.R' 'merge_RLum.R' 'merge_RLum.Results.R' 'merge_Risoe.BINfileData.R' 'methods_DRAC.R' 'methods_RLum.R' 'plot_AbanicoPlot.R' 'plot_DRCSummary.R' 'plot_DRTResults.R' 'plot_DetPlot.R' 'plot_FilterCombinations.R' 'plot_GrowthCurve.R' 'plot_Histogram.R' 'plot_KDE.R' 'plot_NRt.R' 'plot_OSLAgeSummary.R' 'plot_RLum.Analysis.R' 'plot_RLum.Data.Curve.R' 'plot_RLum.Data.Image.R' 'plot_RLum.Data.Spectrum.R' 'plot_RLum.R' 'plot_RLum.Results.R' 'plot_ROI.R' 'plot_RadialPlot.R' 'plot_Risoe.BINfileData.R' 'plot_ViolinPlot.R' 'read_BIN2R.R' 'read_Daybreak2R.R' 'read_PSL2R.R' 'read_RF2R.R' 'read_SPE2R.R' 'read_TIFF2R.R' 'read_XSYG2R.R' 'report_RLum.R' 'scale_GammaDose.R' 'subset_SingleGrainData.R' 'template_DRAC.R' 'tune_Data.R' 'use_DRAC.R' 'utils_DRAC.R' 'verify_SingleGrainData.R' 'write_R2BIN.R' 'write_R2TIFF.R' 'write_RLum2CSV.R' 'zzz.R' RoxygenNote: 7.2.3 NeedsCompilation: yes Packaged: 2023-11-03 15:22:31 UTC; kreutzer Repository: CRAN Date/Publication: 2023-11-03 16:20:08 UTC Luminescence/build/0000755000176200001440000000000014521210067013761 5ustar liggesusersLuminescence/build/vignette.rds0000644000176200001440000000040314521210067016315 0ustar liggesusersePMo0 Mi`4 îq@O@h@ a,5(*IaGKvl~W1` hZ=Ԁq=˝v8K70J\Tę\@t4{JyA4e\Qx( d"}!ɴvz>jp|L_jjp%ڟp:iSkn`E?غcoZBS‰xc8b/r Luminescence/build/partial.rdb0000644000176200001440000000007414521210052016101 0ustar liggesusersb```b`a 00 FN ͚Z d@$$7Luminescence/tests/0000755000176200001440000000000014521210067014024 5ustar liggesusersLuminescence/tests/spelling.R0000644000176200001440000000022214521207352015763 0ustar liggesusersif (requireNamespace('spelling', quietly = TRUE)) spelling::spell_check_test( vignettes = TRUE, error = FALSE, skip_on_cran = TRUE) Luminescence/tests/testthat/0000755000176200001440000000000014521216670015672 5ustar liggesusersLuminescence/tests/testthat/test_calc_Statistics.R0000644000176200001440000000632114062436223022170 0ustar liggesusers## load example data data(ExampleData.DeValues, envir = environment()) ## calculate statistics and show output set.seed(1) temp <- calc_Statistics(ExampleData.DeValues$BT998, n.MCM = 1000) temp_alt1 <- calc_Statistics(ExampleData.DeValues$BT998, n.MCM = 1000, digits = 2) temp_alt2 <- calc_Statistics(ExampleData.DeValues$BT998, n.MCM = 1000, digits = NULL) temp_RLum <- set_RLum(class = "RLum.Results", data = list(data = ExampleData.DeValues$BT998)) test_that("check class and length of output", { testthat::skip_on_cran() local_edition(3) expect_type(temp, "list") expect_equal(length(temp), 3) }) test_that("Test certain input scenarios", { expect_is(calc_Statistics(temp_RLum), "list") df <- ExampleData.DeValues$BT998 df[,2] <- 0 expect_warning(calc_Statistics(df)) df <- ExampleData.DeValues$BT998 expect_silent(calc_Statistics(df, weight.calc = "reciprocal")) }) test_that("check error messages", { testthat::skip_on_cran() local_edition(3) df <- ExampleData.DeValues$BT998 expect_error(calc_Statistics(data = matrix(0,2)), regexp = "[calc_Statistics()] Input data is neither of type 'data.frame' nor 'RLum.Results'", fixed = TRUE) expect_error(calc_Statistics(data = df, weight.calc = "test")) }) test_that("check weighted values from output", { testthat::skip_on_cran() local_edition(3) expect_equal(temp$weighted$n, 25) expect_equal(sum(unlist(temp_alt1)),24535.72) expect_equal(sum(unlist(temp_alt2)),24534.1) expect_equal(round(temp$weighted$mean, digits = 3), 2896.036) expect_equal(round(temp$weighted$median, digits = 2), 2884.46) expect_equal(round(temp$weighted$sd.abs, digits = 4), 240.2228) expect_equal(round(temp$weighted$sd.rel, digits = 6), 8.294885) expect_equal(round(temp$weighted$se.abs, digits = 5), 48.04457) expect_equal(round(temp$weighted$se.rel, digits = 6), 1.658977) expect_equal(round(temp$weighted$skewness, digits = 6), 1.342018) expect_equal(round(temp$weighted$kurtosis, digits = 6), 4.387913) }) test_that("check unweighted values from output", { testthat::skip_on_cran() expect_equal(temp$weighted$n, 25) expect_equal(round(temp$unweighted$mean, digits = 3), 2950.818) expect_equal(round(temp$unweighted$median, digits = 2), 2884.46) expect_equal(round(temp$unweighted$sd.abs, digits = 4), 281.6433) expect_equal(round(temp$unweighted$sd.rel, digits = 6), 9.544584) expect_equal(round(temp$unweighted$se.abs, digits = 5), 56.32866) expect_equal(round(temp$unweighted$se.rel, digits = 6), 1.908917) expect_equal(round(temp$unweighted$skewness, digits = 6), 1.342018) expect_equal(round(temp$unweighted$kurtosis, digits = 6), 4.387913) }) test_that("check MCM values from output", { expect_equal(temp$MCM$n, 25) expect_equal(round(temp$MCM$mean, digits = 3), 2950.992) expect_equal(round(temp$MCM$median, digits = 3), 2885.622) expect_equal(round(temp$MCM$sd.abs, digits = 4), 295.0737) expect_equal(round(temp$MCM$sd.rel, digits = 6), 9.999137) expect_equal(round(temp$MCM$se.abs, digits = 5), 59.01474) expect_equal(round(temp$MCM$se.rel, digits = 6), 1.999827) expect_equal(round(temp$MCM$skewness, digits = 3), 1286.082) expect_equal(round(temp$MCM$kurtosis, digits = 3), 4757.097) }) Luminescence/tests/testthat/test_calc_IEU.R0000755000176200001440000000310214062436223020455 0ustar liggesusersdata(ExampleData.DeValues, envir = environment()) temp <- calc_IEU(ExampleData.DeValues$CA1, a = 0.2, b = 1.9, interval = 1, verbose = FALSE, plot = FALSE) test_that("Test general behaviour", { testthat::skip_on_cran() local_edition(3) data(ExampleData.DeValues, envir = environment()) ##standard expect_silent(calc_IEU( ExampleData.DeValues$CA1, a = 0.2, b = 1.9, interval = 1, verbose = FALSE, plot =FALSE )) ##enable plot expect_message(calc_IEU( ExampleData.DeValues$CA1, a = 0.2, b = 1.9, interval = 1, trace = TRUE, verbose = TRUE, plot = TRUE )) ##verbose without setting expect_message(calc_IEU( ExampleData.DeValues$CA1, a = 0.2, b = 1.9, interval = 1, plot = FALSE )) ##cause stop expect_error(calc_IEU( "ExampleData.DeValues$CA1", a = 0.2, b = 1.9, interval = 1, plot = FALSE )) ##provide RLum.Results ##cause stop expect_silent(calc_IEU( set_RLum(class = "RLum.Results", data = list(test = ExampleData.DeValues$CA1)), a = 0.2, b = 1.9, interval = 1, verbose = FALSE, plot = FALSE )) }) test_that("check class and length of output", { testthat::skip_on_cran() local_edition(3) expect_s4_class(temp, "RLum.Results") expect_equal(length(temp), 5) }) test_that("check values from output example", { testthat::skip_on_cran() local_edition(3) results <- get_RLum(temp) expect_equal(results$de, 46.67) expect_equal(results$de_err, 2.55) expect_equal(results$n, 24) }) Luminescence/tests/testthat/test_RLum.Data.Spectrum.R0000644000176200001440000000310314062436223022377 0ustar liggesuserstest_that("check class", { testthat::skip_on_cran() local_edition(3) ##set empty spectrum object and show it expect_output(show(set_RLum(class = "RLum.Data.Spectrum"))) ##check replacements object <- set_RLum(class = "RLum.Data.Spectrum") expect_s4_class(set_RLum(class = "RLum.Data.Spectrum", data = object), class = "RLum.Data.Spectrum") ##check get_RLum object <- set_RLum(class = "RLum.Data.Spectrum", data = object, info = list(a = "test")) expect_error(get_RLum(object, info.object = "est"), regexp = "Invalid element name. Valid names are: a") expect_error(get_RLum(object, info.object = 1L), "'info.object' has to be a character!") expect_type(get_RLum(object, info.object = "a"), "character") ##test method names expect_type(names(object), "character") ##test bin_RLum() expect_error(bin_RLum.Data(object, bin_size.col = "test"), "'bin_size.row' and 'bin_size.col' must be of class 'numeric'!") object@data <- matrix(data = rep(1:20, each = 10), ncol = 20) rownames(object@data) <- 1:10 colnames(object@data) <- 1:20 expect_s4_class(object = bin_RLum.Data(object, bin_size.row = 2), "RLum.Data.Spectrum") expect_s4_class(object = bin_RLum.Data(object, bin_size.row = 1, bin_size.col = 2), "RLum.Data.Spectrum") ##check conversions expect_s4_class(as(object = data.frame(x = 1:10), Class = "RLum.Data.Spectrum"), "RLum.Data.Spectrum") expect_s3_class(as(set_RLum("RLum.Data.Spectrum"), "data.frame"), "data.frame") expect_s4_class(as(object = matrix(1:10,ncol = 2), Class = "RLum.Data.Spectrum"), "RLum.Data.Spectrum") }) Luminescence/tests/testthat/test_plot_RLum.Data.Image.R0000644000176200001440000000207214212442347022662 0ustar liggesuserstest_that("Test image plotting", { testthat::skip_on_cran() local_edition(3) ## create dataset to test image <- as(array(rnorm(1000), dim = c(10,10,10)), "RLum.Data.Image") ## crash function ---- ### wrong input ----- expect_error(plot_RLum.Data.Image("image"), "\\[plot_RLum.Data.Image\\(\\)\\] Input object is not of type RLum.Data.Image.") expect_error(plot_RLum.Data.Image(image, plot.type = "error"), "\\[plot_RLum.Data.Image\\(\\)\\] Unknown plot type.") ## plot.raster --- expect_silent(plot_RLum.Data.Image(image, plot.type = "plot.raster")) expect_silent(plot_RLum.Data.Image(image, plot.type = "plot.raster", stretch = NULL)) ## check global z-scale expect_silent(plot_RLum.Data.Image(image, plot.type = "plot.raster", stretch = NULL, zlim_image = c(0,1))) ## contour --- expect_silent(plot_RLum.Data.Image(image, plot.type = "contour", stretch = NULL)) }) Luminescence/tests/testthat/test_calc_CobbeDoseRate.R0000644000176200001440000000076014062436223022500 0ustar liggesuserstest_that("basic checks", { testthat::skip_on_cran() local_edition(3) ## simple run with example data data("ExampleData.CobbleData", envir = environment()) expect_s4_class(calc_CobbleDoseRate(ExampleData.CobbleData), "RLum.Results") ## break the function df <- ExampleData.CobbleData df$Distance[[14]] <- 50000 expect_error(calc_CobbleDoseRate(df), "Slices outside of cobble. Please check your distances and make sure they are in mm and diameter is in cm!") }) Luminescence/tests/testthat/test_analyse_pIRIRSequence.R0000644000176200001440000000321514062436223023205 0ustar liggesusersset.seed(1) data(ExampleData.BINfileData, envir = environment()) object <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos = 1) object <- get_RLum(object, record.id = c(-29, -30)) sequence.structure <- c(1, 2, 2, 3, 4, 4) sequence.structure <- as.vector(sapply(seq(0, length(object) - 1, by = 4), function(x) { sequence.structure + x })) object <- sapply(1:length(sequence.structure), function(x) { object[[sequence.structure[x]]] }) object <- set_RLum(class = "RLum.Analysis", records = object, protocol = "pIRIR") results <- analyse_pIRIRSequence( object, signal.integral.min = 1, signal.integral.max = 2, background.integral.min = 900, background.integral.max = 1000, fit.method = "EXP", sequence.structure = c("TL", "pseudoIRSL1", "pseudoIRSL2"), main = "Pseudo pIRIR data set based on quartz OSL", plot = FALSE, plot.single = TRUE, verbose = FALSE ) test_that("check class and length of output", { testthat::skip_on_cran() local_edition(3) expect_s4_class(results, "RLum.Results") expect_equal(length(results), 4) expect_s3_class(results$LnLxTnTx.table, "data.frame") expect_s3_class(results$rejection.criteria, "data.frame") }) test_that("check output", { testthat::skip_on_cran() local_edition(3) ##fix for different R versions if(R.version$major == "3" && as.numeric(R.version$minor) < 6){ expect_equal(round(sum(results$data[1:2, 1:4]), 0),7583) }else{ expect_equal(round(sum(results$data[1:2, 1:4]), 0),7584) } expect_equal(round(sum(results$rejection.criteria$Value), 2),3338.69) }) Luminescence/tests/testthat/test_plot_DRCSummary.R0000644000176200001440000000352014504255743022105 0ustar liggesuserstest_that("Test certain input scenarios", { testthat::skip_on_cran() local_edition(3) ##function stop expect_error(plot_DRCSummary("test"), regexp = "The input is not of class 'RLum.Results'") expect_error(plot_DRCSummary(set_RLum("RLum.Results")), regexp = "'object' was created by none supported function, cf. manual for allowed originators") }) test_that("Test plotting", { testthat::skip_on_cran() local_edition(3) #load data example data data(ExampleData.BINfileData, envir = environment()) #transform the values from the first position in a RLum.Analysis object object <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos=1) results <- analyse_SAR.CWOSL( object = object, signal.integral.min = 1, signal.integral.max = 2, background.integral.min = 900, background.integral.max = 1000, plot = FALSE, verbose = FALSE ) ## create LambertW DRC results_LamW <- analyse_SAR.CWOSL( object = object, fit.method = "LambertW", signal.integral.min = 1, signal.integral.max = 2, background.integral.min = 900, background.integral.max = 1000, NumberIterations.MC = 2, plot = FALSE, verbose = FALSE ) ##simple expect_silent(plot_DRCSummary(results)) ##simple with graphical arguments expect_silent(plot_DRCSummary(results, col.lty = "red")) ##simple with LambertW expect_silent(plot_DRCSummary(results_LamW)) ##plus points expect_silent(plot_DRCSummary(results, show_dose_points = TRUE, show_natural = TRUE)) ##expect warning expect_warning(plot_DRCSummary(results, show_dose_points = TRUE, show_natural = TRUE, sel_curves = 1000)) ## different fit ##error err <- merge_RLum(list(results, results)) err@data$data$Fit[2] <- "err" expect_error(plot_DRCSummary(err), regexp = "\\[plot\\_DRCSummary\\(\\)\\] I can only.*") }) Luminescence/tests/testthat/test_structure_RLum.R0000644000176200001440000000057114062436223022054 0ustar liggesuserstest_that("Test whether the function works", { testthat::skip_on_cran() local_edition(3) data(ExampleData.RLum.Analysis, envir = environment()) expect_silent(structure_RLum(IRSAR.RF.Data)) expect_s3_class(structure_RLum(IRSAR.RF.Data), "data.frame") ##test a list of such elements expect_type(structure_RLum(list(IRSAR.RF.Data,IRSAR.RF.Data, "a")), "list") }) Luminescence/tests/testthat/test_merge_RLum.R0000644000176200001440000000163314464125673021125 0ustar liggesuserstest_that("Merge tests", { testthat::skip_on_cran() local_edition(3) ##load data data(ExampleData.RLum.Analysis, envir = environment()) ## set objects o1 <- IRSAR.RF.Data o2 <- IRSAR.RF.Data ## simple test expect_s4_class(merge_RLum(list(o1,o2)), "RLum.Analysis") ## with null objects expect_s4_class(merge_RLum(list(o1,o2, NULL)), "RLum.Analysis") ## with unwanted objects expect_error(merge_RLum(list(o1,o2, "test")), regexp = "\\[merge\\_RLum\\(\\)\\]: At least element \\#3 is not of class 'RLum' or a derivative class\\!") ## single object expect_s4_class( merge_RLum(list(o1)), "RLum.Analysis") ## zero objects produces warnings expect_warning( merge_RLum(list(NULL)), regexp = "\\[merge\\_RLum\\(\\)\\] Nothing was merged as the .+") ## crash wit non-list expect_error(merge_RLum("errr"), "\\[merge\\_RLum\\(\\)\\] argument 'objects' .*") }) Luminescence/tests/testthat/test_read_PSL2R.R0000644000176200001440000000072514062436223020713 0ustar liggesuserstest_that("Test functionality", { testthat::skip_on_cran() local_edition(3) ## default values expect_s4_class(read_PSL2R( file = system.file("extdata/DorNie_0016.psl", package = "Luminescence") ), "RLum.Analysis") ## custom values (all inverted) expect_s4_class(read_PSL2R( file = system.file("extdata/DorNie_0016.psl", package = "Luminescence"), drop_bg = TRUE, as_decay_curve = FALSE, smooth = TRUE, merge = TRUE ), "RLum.Analysis") }) Luminescence/tests/testthat/test_fit_EmissionSpectra.R0000644000176200001440000000340014145666016023030 0ustar liggesuserstest_that("standard check", { testthat::skip_on_cran() local_edition(3) ##load example data data(ExampleData.XSYG, envir = environment()) ##subtract background TL.Spectrum@data <- TL.Spectrum@data[] - TL.Spectrum@data[,15] # break function ----------- ## unwanted list element in list --------- expect_error(fit_EmissionSpectra(list(TL.Spectrum, "fail")), "\\[fit\\_EmissionSpectra\\(\\)\\] List elements of different class detected!") ## wrong frame range ------- expect_error(fit_EmissionSpectra(TL.Spectrum, frame = 1000), "\\[fit\\_EmissionSpectra\\(\\)\\] 'frame' invalid. Allowed range min: 1 and max: 24") ## wrong graining argument ------- expect_error(fit_EmissionSpectra(TL.Spectrum, frame = 5, method_control = list(graining = 10000)), "\\[fit\\_EmissionSpectra\\(\\)\\] method\\_control\\$graining cannot be larger than available channels \\(1024\\)!") ## for matrix input ------- expect_error(fit_EmissionSpectra("fail"), "\\[fit\\_EmissionSpectra\\(\\)\\] Input not supported, please read the manual!") # plain run ------- # #somewhat the plotting does not work for Github Actions results <- expect_s4_class(fit_EmissionSpectra( object = TL.Spectrum, frame = 5, main = "TL spectrum", n_components = 3, plot = FALSE, start_parameters = c(2.17), method_control = list(max.runs = 100)), "RLum.Results") # silent mode ------- expect_silent(fit_EmissionSpectra( object = TL.Spectrum, frame = 5, main = "TL spectrum", plot = FALSE, verbose = FALSE, method_control = list(max.runs = 10))) # regression test ---- expect_length(results, 3) expect_s3_class(results$fit[[1]], "nls") expect_type(results$data, "double") }) Luminescence/tests/testthat/test_fit_OSLLifeTimes.R0000644000176200001440000000302614062436223022154 0ustar liggesusers##load example data data(ExampleData.TR_OSL, envir = environment()) temp_list <- list(ExampleData.TR_OSL, ExampleData.TR_OSL) temp_analysis <- set_RLum("RLum.Analysis", records = temp_list) test_that("standard check", { testthat::skip_on_cran() local_edition(3) ##trigger errors expect_null(fit_OSLLifeTimes(object = "test")) ## Test different inputs ##simple run set.seed(1) expect_s4_class(object = fit_OSLLifeTimes( object = ExampleData.TR_OSL, plot = FALSE, n.components = 1), class = "RLum.Results") ##simple list expect_s4_class(object = fit_OSLLifeTimes( object = temp_list, plot = FALSE, n.components = 1), class = "RLum.Results") ##simple RLum.Analysis expect_s4_class(object = fit_OSLLifeTimes( object = temp_analysis, verbose = FALSE, plot = FALSE, n.components = 1), class = "RLum.Results") ##test arguments ##simple run expect_s4_class(object = fit_OSLLifeTimes( object = ExampleData.TR_OSL, method_control = list(seed = 1, weights = FALSE), plot = FALSE, n.components = 1), class = "RLum.Results") ##test options expect_s4_class(object = fit_OSLLifeTimes( object = ExampleData.TR_OSL, verbose = FALSE, plot = FALSE, n.components = 1), class = "RLum.Results") ##warning for log expect_warning(object = fit_OSLLifeTimes( object = ExampleData.TR_OSL, verbose = FALSE, plot = TRUE, plot_simple = TRUE, log = "x", n.components = 1), regexp = "log-scale requires x-values > 0, set min xlim to 0.01!") }) Luminescence/tests/testthat/test_plot_DetPlot.R0000644000176200001440000000324014464125673021474 0ustar liggesuserstest_that("plot_DetPlot", { testthat::skip_on_cran() local_edition(3) ##ExampleData.BINfileData contains two BINfileData objects ##CWOSL.SAR.Data and TL.SAR.Data data(ExampleData.BINfileData, envir = environment()) ##transform the values from the first position in a RLum.Analysis object object <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos=1) ## trigger stop expect_error( plot_DetPlot(object = "error"), regexp = "\\[plot_DetPlot\\(\\)\\] input must be an RLum\\.Analysis object\\!") ## simple run with default results <- expect_s4_class(plot_DetPlot( object, method = "shift", signal.integral.min = 1, signal.integral.max = 3, background.integral.min = 900, background.integral.max = 1000, analyse_function.control = list( fit.method = "LIN"), n.channels = 2), "RLum.Results") ## simple run with default results <- expect_s4_class(plot_DetPlot( object, method = "expansion", signal.integral.min = 1, signal.integral.max = 3, background.integral.min = 900, background.integral.max = 1000, analyse_function.control = list( fit.method = "LIN"), n.channels = 2), "RLum.Results") ## try with NA values object@records[[2]][,2] <- 1 object@records[[4]][,2] <- 1 object@records[[6]][,2] <- 1 object@records[[8]][,2] <- 1 results <- expect_s4_class(suppressWarnings(plot_DetPlot( object, method = "expansion", signal.integral.min = 1, signal.integral.max = 3, background.integral.min = 900, background.integral.max = 1000, analyse_function.control = list( fit.method = "EXP"), n.channels = 1)), "RLum.Results") }) Luminescence/tests/testthat/test_calc_CentralDose.R0000644000176200001440000000303614145666016022250 0ustar liggesusersdata(ExampleData.DeValues, envir = environment()) temp <- calc_CentralDose( ExampleData.DeValues$CA1, plot = FALSE, verbose = FALSE) temp_NA <- data.frame(rnorm(10)+5, rnorm(10)+5) temp_NA[1,1] <- NA test_that("errors and warnings function", { testthat::skip_on_cran() local_edition(3) expect_error(calc_CentralDose(data = "error"), "'data' has to be of type 'data.frame' or 'RLum.Results'!") expect_error(calc_CentralDose(temp, sigmab = 10), "sigmab needs to be given as a fraction between 0 and 1") expect_s4_class(calc_CentralDose(temp_NA), "RLum.Results") expect_warning(calc_CentralDose(temp_NA, na.rm = TRUE)) expect_error(calc_CentralDose(data.frame()), "should have at least two columns and two rows!") }) test_that("standard and output", { testthat::skip_on_cran() local_edition(3) expect_equal(is(temp), c("RLum.Results", "RLum")) expect_equal(length(temp), 4) ##log and trace expect_s4_class(calc_CentralDose(ExampleData.DeValues$CA1, log = FALSE, trace = TRUE), "RLum.Results") }) test_that("check summary output", { testthat::skip_on_cran() local_edition(3) results <- get_RLum(temp) expect_equal(round(results$de, digits = 5), 65.70929) expect_equal(round(results$de_err, digits = 6), 3.053443) expect_equal(round(results$OD, digits = 5), 22.79495) expect_equal(round(results$OD_err, digits = 6), 2.272736) expect_equal(round(results$rel_OD, digits = 5), 34.69061) expect_equal(round(results$rel_OD_err, digits = 6), 3.458774) expect_equal(round(results$Lmax, digits = 5), 31.85046) }) Luminescence/tests/testthat/test_calc_MaxDose.R0000755000176200001440000000206714062436223021404 0ustar liggesusersdata(ExampleData.DeValues, envir = environment()) temp <- calc_MaxDose(ExampleData.DeValues$CA1, sigmab = 0.2, par = 3, plot = FALSE, verbose = FALSE) test_that("check class and length of output", { testthat::skip_on_cran() local_edition(3) expect_s4_class(temp, "RLum.Results") expect_equal(length(temp), 9) }) test_that("check values from output example", { testthat::skip_on_cran() local_edition(3) results <- get_RLum(temp) expect_equal(round(results$de, digits = 2), 76.58) expect_equal(round(results$de_err, digits = 2), 7.57) expect_equal(results$ci_level, 0.95) expect_equal(round(results$ci_lower, digits = 2), 69.65) expect_equal(round(results$ci_upper, digits = 2), 99.33) expect_equal(results$par, 3) expect_equal(round(results$sig, digits = 2), 1.71) expect_equal(round(results$p0, digits = 2), 0.65) expect_equal(results$mu, NA) expect_equal(round(results$Lmax, digits = 2), -19.79) expect_equal(round(results$BIC, digits = 2), 58.87) }) Luminescence/tests/testthat/test_calc_CosmicDoseRate.R0000644000176200001440000000346014062436223022703 0ustar liggesuserstemp <- calc_CosmicDoseRate(depth = 2.78, density = 1.7, latitude = 38.06451, longitude = 1.49646, altitude = 364, error = 10) test_that("check class and length of output", { testthat::skip_on_cran() local_edition(3) expect_s4_class(temp, "RLum.Results") expect_equal(length(temp), 3) }) test_that("check values from output example 1", { testthat::skip_on_cran() local_edition(3) results <- get_RLum(temp) expect_equal(results$depth, 2.78) expect_equal(results$density, 1.7) expect_equal(results$latitude, 38.06451) expect_equal(results$longitude, 1.49646) expect_equal(results$altitude, 364) expect_equal(round(results$total_absorber.gcm2, digits = 0), 473) expect_equal(round(results$d0, digits = 3), 0.152) expect_equal(round(results$geom_lat, digits = 1), 41.1) expect_equal(round(results$dc, digits = 3), 0.161) }) test_that("check values from output example 2b", { testthat::skip_on_cran() local_edition(3) temp <- calc_CosmicDoseRate(depth = c(5.0, 2.78), density = c(2.65, 1.7), latitude = 12.04332, longitude = 4.43243, altitude = 364, corr.fieldChanges = TRUE, est.age = 67, error = 15) results <- get_RLum(temp) expect_equal(results$depth.1, 5) expect_equal(results$depth.2, 2.78) expect_equal(results$density.1, 2.65) expect_equal(results$density.2, 1.7) expect_equal(results$latitude, 12.04332) expect_equal(results$longitude, 4.43243) expect_equal(results$altitude, 364) expect_equal(round(results$total_absorber.gcm2, digits = 0), 1798) expect_equal(round(results$d0, digits = 4), 0.0705) expect_equal(round(results$geom_lat, digits = 1), 15.1) expect_equal(round(results$dc, digits = 3), 0.072) }) Luminescence/tests/testthat/test_names_RLum.R0000644000176200001440000000054414062436223021117 0ustar liggesuserstest_that("Test whether function works", { testthat::skip_on_cran() local_edition(3) data(ExampleData.RLum.Analysis, envir = environment()) expect_silent(names_RLum(IRSAR.RF.Data)) expect_type(names_RLum(IRSAR.RF.Data), "character") ##test a list of such elements expect_type(names_RLum(list(IRSAR.RF.Data,IRSAR.RF.Data, "a")), "list") }) Luminescence/tests/testthat/test_fit_ThermalQuenching.R0000644000176200001440000000323314062436223023153 0ustar liggesusers##create example data data <- data.frame( T = c(25, 40, 50, 60, 70, 80, 90, 100, 110), V = c(0.06, 0.058, 0.052, 0.051, 0.041, 0.034, 0.035, 0.033, 0.032), V_X = c(0.012, 0.009, 0.008, 0.008, 0.007, 0.006, 0.005, 0.005, 0.004)) data_list <- list(data, data) data_NA <- data data_NA[1,] <- NA test_that("standard check", { testthat::skip_on_cran() local_edition(3) ##trigger errors expect_error(fit_ThermalQuenching(data = "test")) ##simple run with error expect_error(fit_ThermalQuenching( data = data[,1:2], n.MC = NULL), regexp = "'data' is empty or has less than three columns!") ##simple run with warning expect_warning(fit_ThermalQuenching( data = cbind(data,data), n.MC = NULL), regexp = "data' has more than 3 columns, taking only the first three!") ##simple run with warning NA expect_warning(fit_ThermalQuenching( data = data_NA, n.MC = NULL), regexp = "NA values in 'data' automatically removed!") ##simple run expect_s4_class(fit_ThermalQuenching( data = data, n.MC = NULL), class = "RLum.Results") ##simple run with fitting error expect_null(fit_ThermalQuenching( data = data.frame(T = 1:10, V = 1:10, V_X = 1:10), n.MC = NULL)) # ##switch off weights expect_s4_class(fit_ThermalQuenching( data = data, method_control = list(weights = NULL), n.MC = NULL), class = "RLum.Results") ##simple list run expect_s4_class(fit_ThermalQuenching( data = data_list, n.MC = NULL), class = "RLum.Results") ##simple run without plot etc expect_s4_class(fit_ThermalQuenching( data = data, verbose = FALSE, plot = TRUE, n.MC = 10), class = "RLum.Results") }) Luminescence/tests/testthat/test_analyse_FadingMeasurement.R0000644000176200001440000000623414145666016024200 0ustar liggesuserstest_that("general test", { testthat::skip_on_cran() local_edition(3) ## load example data (sample UNIL/NB123, see ?ExampleData.Fading) data("ExampleData.Fading", envir = environment()) ##(1) get fading measurement data (here a three column data.frame) fading_data <- ExampleData.Fading$fading.data$IR50 ##break function expect_error(analyse_FadingMeasurement(object = "test"), regexp = "'object' needs to be of type 'RLum.Analysis' or a 'list' of such objects!") ## run routine analysis expect_s4_class(analyse_FadingMeasurement( fading_data, plot = TRUE, verbose = TRUE, n.MC = 10), class = "RLum.Results") ##no plot not verbose expect_s4_class(analyse_FadingMeasurement( fading_data, plot = FALSE, verbose = FALSE, n.MC = 10), class = "RLum.Results") ## test merging of objects if combined in a list ## this crashed before and was fixed expect_s4_class(merge_RLum( list(analyse_FadingMeasurement( fading_data[1,], plot = FALSE, verbose = FALSE, n.MC = 10), analyse_FadingMeasurement( fading_data[1:10,], plot = FALSE, verbose = FALSE, n.MC = 10))), "RLum.Results") }) test_that("test XSYG file fading data", { testthat::skip_on_cran() local_edition(3) # Create artificial object ------------------------------------------------ l <- list() time <- 0 for(x in runif(3, 120,130)) { ## set irr irr <- set_RLum( "RLum.Data.Curve", data = matrix(c(1:x, rep(1,x)), ncol = 2), originator = "read_XSYG2R", recordType = "irradiation (NA)", curveType = "simulated", info = list( startDate = format(Sys.time() + time, "%Y%m%d%H%M%S"), position = 1) ) ## set lum lum <- set_RLum( "RLum.Data.Curve", data = matrix(c(1:40, exp(-c(1:40)/ x * 10)), ncol = 2), originator = "read_XSYG2R", recordType = "IRSL", curveType = "measured", info = list( startDate = format(Sys.time() + time + x + 30, "%Y%m%d%H%M%S"), position = 1) ) time <- time + x + 60 l <- c(l, irr, lum) } ## generate object object <- set_RLum("RLum.Analysis", records = l, originator = "read_XSYG2R") # Test -------------------------------------------------------------------- expect_s4_class(analyse_FadingMeasurement( object, signal.integral = 1:2, background.integral = 10:40, structure = "Lx" ), "RLum.Results") ## check various for t_star ## stop t_star expect_error(analyse_FadingMeasurement( object, signal.integral = 1:2, t_star = "error", background.integral = 10:40, structure = "Lx" ), "\\[analyse_FadingMeasurement\\(\\)\\] Invalid input for t_star.") expect_s4_class(analyse_FadingMeasurement( object, signal.integral = 1:2, t_star = "half_complex", background.integral = 10:40, structure = "Lx", plot = FALSE ), "RLum.Results") expect_s4_class(analyse_FadingMeasurement( object, signal.integral = 1:2, t_star = "end", background.integral = 10:40, structure = "Lx", plot = FALSE ), "RLum.Results") }) Luminescence/tests/testthat/test_calc_gSGC.R0000644000176200001440000000256214062436223020624 0ustar liggesusersset.seed(seed = 1) temp <- calc_gSGC(data = data.frame( LnTn = 2.361, LnTn.error = 0.087, Lr1Tr1 = 2.744, Lr1Tr1.error = 0.091, Dr1 = 34.4), plot = FALSE, verbose = FALSE ) test_that("plot and verbose and so", { testthat::skip_on_cran() local_edition(3) expect_s4_class(calc_gSGC(data = data.frame( LnTn = 2.361, LnTn.error = 0.087, Lr1Tr1 = 2.744, Lr1Tr1.error = 0.091, Dr1 = 34.4), plot = TRUE, verbose = TRUE ), "RLum.Results") }) test_that("test errors", { testthat::skip_on_cran() local_edition(3) expect_error(calc_gSGC(data = NA)) expect_error(calc_gSGC(data = data.frame( LnTn = 2.361, LnTn.error = 0.087, Lr1Tr1 = 2.744, Lr1Tr1.error = 0.091, Dr1 = 34.4), gSGC.type = 1, plot = FALSE, verbose = FALSE)) expect_error(calc_gSGC(data = data.frame(a = 1, b = 1, c = 1, d = 1, e = 1, f = 1))) }) test_that("check class and length of output", { testthat::skip_on_cran() expect_is(temp, class = "RLum.Results", info = NULL, label = NULL) expect_is(temp$De, class = "data.frame", info = NULL, label = NULL) expect_is(temp$De.MC, class = "list", info = NULL, label = NULL) expect_equal(length(temp), 3) }) test_that("check values from output example", { testthat::skip_on_cran() expect_equal(round(sum(temp$De), digits = 2), 30.39) expect_equal(round(sum(temp$De.MC[[1]]), 0), 10848) }) Luminescence/tests/testthat/test_calc_OSLLxTxRatio.R0000755000176200001440000002044214062436223022315 0ustar liggesusers##preloads data(ExampleData.LxTxOSLData, envir = environment()) temp <- calc_OSLLxTxRatio( Lx.data = Lx.data, Tx.data = Tx.data, signal.integral = c(1:2), background.integral = c(85:100)) test_that("check class and length of output", { testthat::skip_on_cran() local_edition(3) expect_equal(is(temp), c("RLum.Results", "RLum")) expect_equal(length(temp), 2) }) test_that("test arguments", { testthat::skip_on_cran() local_edition(3) ##digits expect_silent(calc_OSLLxTxRatio( Lx.data, Tx.data, signal.integral = c(1:2), background.integral = c(85:100), digits = 1)) ##sigmab expect_silent(calc_OSLLxTxRatio( Lx.data, Tx.data, signal.integral = c(1:2), background.integral = c(85:100), sigmab = c(1000,100) )) ##poisson expect_silent(calc_OSLLxTxRatio( Lx.data, Tx.data, signal.integral = c(1:2), background.integral = c(85:100), background.count.distribution = "poisson" )) }) test_that("test input", { testthat::skip_on_cran() local_edition(3) ##RLum.Curve expect_silent(calc_OSLLxTxRatio( set_RLum(class = "RLum.Data.Curve", data = as.matrix(Lx.data)), set_RLum(class = "RLum.Data.Curve", data = as.matrix(Tx.data)), signal.integral = c(1:2), background.integral = c(70:100))) ##matrix expect_silent(calc_OSLLxTxRatio( as.matrix(Lx.data), as.matrix(Tx.data), signal.integral = c(1:2), background.integral = c(70:100))) ##numeric expect_silent(calc_OSLLxTxRatio( as.numeric(Lx.data[,2]), as.numeric(Tx.data[,2]), signal.integral = c(1:2), background.integral = c(70:100))) ##RLum.Curve expect_silent(calc_OSLLxTxRatio( set_RLum(class = "RLum.Data.Curve", data = as.matrix(Lx.data)), Tx.data = NULL, signal.integral = c(1:2), background.integral = c(70:100))) ##matrix expect_silent(calc_OSLLxTxRatio( as.matrix(Lx.data), Tx.data = NULL, signal.integral = c(1:2), background.integral = c(70:100))) ##numeric expect_silent(calc_OSLLxTxRatio( as.numeric(Lx.data[,2]), Tx.data = NULL, signal.integral = c(1:2), background.integral = c(70:100))) }) test_that("force function break", { testthat::skip_on_cran() local_edition(3) expect_error(calc_OSLLxTxRatio( Lx.data[1:10,], Tx.data, signal.integral = c(1:2), background.integral = c(85:100) ), "Channel numbers of Lx and Tx data differ!") expect_error(calc_OSLLxTxRatio( "Lx.data", Tx.data, signal.integral = c(1:2), background.integral = c(85:100) ), "Data type of Lx and Tx data differs!") expect_error(calc_OSLLxTxRatio( "Lx.data", "Tx.data", signal.integral = c(1:2), background.integral = c(85:100) ), "Data type error! Required types are data.frame or numeric vector.") expect_error(calc_OSLLxTxRatio( Lx.data, Tx.data, signal.integral = c(1:2000), background.integral = c(85:100) ), "signal.integral is not valid!") expect_error(calc_OSLLxTxRatio( Lx.data, Tx.data, signal.integral = c(1:90), background.integral = c(85:100) ), "Overlapping of 'signal.integral' and 'background.integral' is not permitted!") expect_error(calc_OSLLxTxRatio( Lx.data, Tx.data, signal.integral = c(1:10), signal.integral.Tx = c(1:90), background.integral = c(85:100), background.integral.Tx = c(85:100) ), "Overlapping of 'signal.integral.Tx' and 'background.integral.Tx' is not permitted!") expect_error(calc_OSLLxTxRatio( Lx.data, Tx.data, signal.integral = c(1:20), background.integral = c(85:1000) ), "background.integral is not valid! Max: 100") expect_error(calc_OSLLxTxRatio( Lx.data, Tx.data, signal.integral = c(1:10), signal.integral.Tx = c(1:10), background.integral = c(85:100), background.integral.Tx = c(85:10000) ), "background.integral.Tx is not valid! Max: 100") expect_error(calc_OSLLxTxRatio( Lx.data, Tx.data, signal.integral = c(1:10), signal.integral.Tx = c(1:1000), background.integral = c(85:100), background.integral.Tx = c(85:100) ), "signal.integral.Tx is not valid!") expect_error(calc_OSLLxTxRatio( Lx.data, Tx.data, signal.integral = c(1:20), signal.integral.Tx = c(1:20), background.integral = 80:100, background.integral.Tx = NULL ), "You have to provide both: signal.integral.Tx and background.integral.Tx!") expect_error(calc_OSLLxTxRatio( Lx.data, Tx.data, signal.integral = c(1:20), background.integral = 80:100, sigmab = "test" ), "'sigmab' has to be of type numeric.") expect_error(calc_OSLLxTxRatio( Lx.data, Tx.data, signal.integral = c(1:20), background.integral = 80:100, sigmab = 1:100 ), "Maximum allowed vector length for 'sigmab' is 2.") }) test_that("create warnings", { testthat::skip_on_cran() local_edition(3) expect_warning(calc_OSLLxTxRatio( Lx.data, Tx.data, signal.integral = c(1:20), signal.integral.Tx = c(1:20), background.integral = 80:100, background.integral.Tx = 60:100 ), "Number of background channels for Lx < 25; error estimation might not be reliable!") expect_warning(calc_OSLLxTxRatio( Lx.data, Tx.data, signal.integral = c(1:20), signal.integral.Tx = c(1:20), background.integral = 60:100, background.integral.Tx = 80:100 ), "Number of background channels for Tx < 25; error estimation might not be reliable!",) expect_warning(calc_OSLLxTxRatio( Lx.data, Tx.data, signal.integral = c(1:20), background.integral = 60:100, background.count.distribution = "hallo" ), "Unknown method for background.count.distribution. A non-poisson distribution is assumed!") expect_warning(calc_OSLLxTxRatio( Lx.data, Tx.data, signal.integral = c(1:20), signal.integral.Tx = c(2:20), background.integral = 60:100, background.integral.Tx = 40:100, use_previousBG = TRUE ), "For option use_previousBG = TRUE independent Lx and Tx integral limits are not allowed. Integral limits of Lx used for Tx.") }) test_that("check weird circumstances", { testthat::skip_on_cran() local_edition(3) ##(1) - Lx curve 0 expect_type(calc_OSLLxTxRatio( data.frame(Lx.data[,1],0), Tx.data, signal.integral = c(1:2), background.integral = c(85:100) )$LxTx.table, type = "list") ##(2) - Tx curve 0 expect_type(calc_OSLLxTxRatio( Lx.data, data.frame(Tx.data[,1],0), signal.integral = c(1:2), background.integral = c(85:100) )$LxTx.table, type = "list") ##(3) - Lx and Tx curve 0 expect_type(calc_OSLLxTxRatio( data.frame(Lx.data[,1],0), data.frame(Tx.data[,1],0), signal.integral = c(1:2), background.integral = c(85:100) )$LxTx.table, type = "list") ##(4) - Lx < 0 expect_type(calc_OSLLxTxRatio( data.frame(Lx.data[,1],-1000), data.frame(Tx.data[,1],0), signal.integral = c(1:2), background.integral = c(85:100) )$LxTx.table, type = "list") ##(5) - Tx < 0 expect_type(calc_OSLLxTxRatio( Lx.data, data.frame(Lx.data[,1],-1000), signal.integral = c(1:2), background.integral = c(85:100) )$LxTx.table, type = "list") ##(6) - Lx & Tx < 0 expect_type(calc_OSLLxTxRatio( data.frame(Lx.data[,1],-1000), data.frame(Tx.data[,1],-1000), signal.integral = c(1:2), background.integral = c(85:100) )$LxTx.table, type = "list") }) test_that("check values from output example", { testthat::skip_on_cran() local_edition(3) results <- get_RLum(temp) expect_equal(results$LnLx, 81709) expect_equal(results$LnLx.BG, 530) expect_equal(results$TnTx, 7403) expect_equal(results$TnTx.BG, 513) expect_equal(results$Net_LnLx, 81179) expect_equal(round(results$Net_LnLx.Error, digits = 4), 286.5461) expect_equal(results$Net_TnTx, 6890) expect_equal(round(results$Net_TnTx.Error, digits = 5), 88.53581) expect_equal(round(results$LxTx, digits = 5), 11.78215) expect_equal(round(results$LxTx.Error, digits = 7), 0.1570077) }) test_that("test NA mode with no signal integrals", { testthat::skip_on_cran() local_edition(3) data(ExampleData.LxTxOSLData, envir = environment()) temp <- expect_s4_class(calc_OSLLxTxRatio( Lx.data = Lx.data, Tx.data = Tx.data, signal.integral = NA, background.integral = NA), "RLum.Results") expect_equal(round(sum(temp$LxTx.table[1,]),0), 391926) }) Luminescence/tests/testthat/test_analyse_SARCWOSL.R0000644000176200001440000002243714404674556022050 0ustar liggesusers##prepare test file for regression test set.seed(1) data(ExampleData.BINfileData, envir = environment()) object <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos = 1:2) results <- analyse_SAR.CWOSL( object = object[[1]], signal.integral.min = 1, signal.integral.max = 2, background.integral.min = 900, background.integral.max = 1000, plot = FALSE, verbose = FALSE ) ##generate different datasets removing TL curves object_CH_TL <- get_RLum(object, record.id = -seq(1,30,4), drop = FALSE) object_NO_TL <- get_RLum(object, record.id = -seq(1,30,2), drop = FALSE) test_that("tests class elements", { testthat::skip_on_cran() local_edition(3) expect_s4_class(results, "RLum.Results") expect_equal(length(results), 4) expect_s3_class(results$data, "data.frame") expect_s3_class(results$LnLxTnTx.table, "data.frame") expect_s3_class(results$rejection.criteria, "data.frame") expect_type(results$Formula, "expression") }) test_that("regression tests De values", { testthat::skip_on_cran() local_edition(3) ##fix for different R versions if(R.version$major == "3" && as.numeric(R.version$minor) < 6){ expect_equal(object = round(sum(results$data[1:2]), digits = 0), 1716) }else{ expect_equal(object = round(sum(results$data[1:2]), digits = 0), 1716) } }) test_that("regression test LxTx table", { testthat::skip_on_cran() local_edition(3) expect_equal(object = round(sum(results$LnLxTnTx.table$LxTx), digits = 5), 20.92051) expect_equal(object = round(sum(results$LnLxTnTx.table$LxTx.Error), digits = 2), 0.34) }) test_that("regression test - check rejection criteria", { testthat::skip_on_cran() local_edition(3) ##fix for different R versions if(R.version$major == "3" && as.numeric(R.version$minor) < 6){ expect_equal(object = round(sum(results$rejection.criteria$Value), digits = 0), 1669) }else{ expect_equal(object = round(sum(results$rejection.criteria$Value), digits = 0), 1669) } }) test_that("simple run", { testthat::skip_on_cran() local_edition(3) ##verbose and plot off expect_s4_class( analyse_SAR.CWOSL( object = object[1:2], signal.integral.min = 1, signal.integral.max = 2, background.integral.min = 900, background.integral.max = 1000, fit.method = "LIN", plot = FALSE, verbose = FALSE ), class = "RLum.Results" ) local_edition(3) ##signal integral set to NA expect_warning( analyse_SAR.CWOSL( object = object[1], signal.integral.min = NA, signal.integral.max = NA, background.integral.min = NA, background.integral.max = NA, fit.method = "EXP", plot = FALSE, verbose = FALSE, fit.weights = FALSE ), "\\[analyse_SAR.CWOSL\\(\\)\\] No signal or background integral applied, because they were set to NA\\!") expect_s4_class( suppressWarnings(analyse_SAR.CWOSL( object = object[1], signal.integral.min = NA, signal.integral.max = NA, background.integral.min = NA, background.integral.max = NA, fit.method = "EXP", plot = FALSE, verbose = FALSE )), class = "RLum.Results" ) ##verbose and plot on ##full dataset expect_s4_class( analyse_SAR.CWOSL( object = object[[1]], signal.integral.min = 1, signal.integral.max = 2, background.integral.min = 900, background.integral.max = 1000, fit.method = "LIN", log = "x", ), class = "RLum.Results" ) ##only CH TL expect_s4_class( analyse_SAR.CWOSL( object = object_CH_TL[[1]], signal.integral.min = 1, signal.integral.max = 2, background.integral.min = 900, background.integral.max = 1000, fit.method = "LIN", log = "x", plot_onePage = TRUE ), class = "RLum.Results" ) ##no TL expect_s4_class( analyse_SAR.CWOSL( object = object_NO_TL[[1]], signal.integral.min = 1, signal.integral.max = 2, background.integral.min = 900, background.integral.max = 1000, fit.method = "LIN", log = "x", plot_onePage = TRUE ), class = "RLum.Results" ) ##plot single expect_s4_class( analyse_SAR.CWOSL( object = object[[1]], signal.integral.min = 1, signal.integral.max = 2, background.integral.min = 900, background.integral.max = 1000, fit.method = "EXP", plot = TRUE, plot.single = TRUE ), class = "RLum.Results" ) ##check rejection criteria expect_s4_class( analyse_SAR.CWOSL( object = object[[1]], signal.integral.min = 1, signal.integral.max = 2, background.integral.min = 900, background.integral.max = 1000, fit.method = "LIN", rejection.criteria= list( recycling.ratio = NA, recuperation.rate = 1, palaeodose.error = 1, testdose.error = 1, test = "new", exceed.max.regpoint = FALSE), plot = TRUE, ), class = "RLum.Results" ) # Trigger stops ----------------------------------------------------------- ##trigger stops for parameters ##object expect_error(analyse_SAR.CWOSL( object = "fail", background.integral.min = 900, fit.method = "LIN", plot = FALSE, verbose = FALSE ), regexp = "Input object is not of type 'RLum.Analysis'!") ##check stop for OSL.components ... failing expect_null(analyse_SAR.CWOSL( object = object[[1]], signal.integral.min = 1, signal.integral.max = 2, background.integral.min = 900, background.integral.max = 1000, dose.points = c(0,1,2), fit.method = "LIN", OSL.component = 1, plot = FALSE, verbose = FALSE )) expect_error(analyse_SAR.CWOSL( object = object[[1]], signal.integral.min = 1, signal.integral.max = 2, background.integral.min = 900, background.integral.max = 1000, dose.points = c(0,1,2), fit.method = "LIN", plot = FALSE, verbose = FALSE ), regexp = "length 'dose.points' differs from number of curves") expect_null(analyse_SAR.CWOSL( object = set_RLum("RLum.Analysis", records = list(set_RLum("RLum.Data.Curve", recordType = "false"))), signal.integral.min = 1, signal.integral.max = 2, background.integral.min = 800, background.integral.max = 900, fit.method = "LIN", plot = FALSE, verbose = FALSE )) ##check background integral expect_warning(analyse_SAR.CWOSL( object = object[[1]], signal.integral.min = 1, signal.integral.max = 2, background.integral.min = 800, background.integral.max = 9900, fit.method = "LIN", plot = FALSE, verbose = FALSE ), regexp = "Background integral out of bounds") local_edition(3) }) test_that("advance tests run", { testthat::skip_on_cran() local_edition(3) ##this tests basically checks the parameter expansion and make ##sure everything is evaluated properly # signal.integral.min <- 1 # signal.integral.max <- 2 ##test with variables for signal integral # expect_s4_class( # analyse_SAR.CWOSL( # object = object[1:2], # signal.integral.min = signal.integral.min, # signal.integral.max = signal.integral.max, # background.integral.min = 900, # background.integral.max = 1000, # fit.method = "LIN", # rejection.criteria = list( # recycling.ratio = NA, # recuperation.rate = 1, # palaeodose.error = 1, # testdose.error = 1, # test = "new", # exceed.max.regpoint = FALSE), # plot = FALSE, # verbose = FALSE # ), # class = "RLum.Results" # ) ##test rejection criteria is a list without(!) names, ##this should basically lead to no fail test_failed <- analyse_SAR.CWOSL( object = object[1], signal.integral.min = 1, signal.integral.max = 2, background.integral.min = 200, background.integral.max = 1000, fit.method = "LIN", rejection.criteria = list(recycling.ratio = 0), plot = FALSE, verbose = FALSE) expect_equal(object = test_failed$data$RC.Status, "FAILED") ##the same test but without a named list >>> OK test_ok <- analyse_SAR.CWOSL( object = object[1], signal.integral.min = 1, signal.integral.max = 2, background.integral.min = 200, background.integral.max = 1000, fit.method = "LIN", rejection.criteria = list(1), plot = FALSE, verbose = FALSE) expect_equal(object = test_ok$data$RC.Status, "OK") ##test multi parameter settings expect_s4_class( analyse_SAR.CWOSL( object = object[1:2], signal.integral.min = 1, signal.integral.max = list(10,20), background.integral.min = 900, background.integral.max = 1000, fit.method = "LIN", plot = FALSE, verbose = FALSE ), class = "RLum.Results" ) ##test rejection criteria list in list + test unknown argument expect_s4_class( analyse_SAR.CWOSL( object = object[1:2], signal.integral.min = 1, signal.integral.max = list(10,20), background.integral.min = 900, background.integral.max = 1000, rejection.criteria = list(list(recycling.ratio = 0)), fit.method = "LIN", unknown_argument = "hallo", plot = TRUE, verbose = FALSE ), class = "RLum.Results" ) }) Luminescence/tests/testthat/test_read_XSYG2R.R0000644000176200001440000000267314062436223021053 0ustar liggesuserstest_that("test import of XSYG files", { testthat::skip_on_cran() local_edition(3) ##force error expect_type(read_XSYG2R("https://raw.githubusercontent.com/R-Lum/rxylib/master/inst/extg", fastForward = TRUE), type = "NULL") expect_type(read_XSYG2R("/Test", fastForward = TRUE), type = "NULL") ##successful import expect_type(read_XSYG2R("https://raw.githubusercontent.com/R-Lum/rxylib/master/inst/extdata/TLSpectrum.xsyg", import = FALSE), type = "list") expect_s3_class(read_XSYG2R("https://raw.githubusercontent.com/R-Lum/rxylib/master/inst/extdata/TLSpectrum.xsyg", fastForward = TRUE, import = FALSE), class = "data.frame") expect_silent(read_XSYG2R("https://raw.githubusercontent.com/R-Lum/rxylib/master/inst/extdata/TLSpectrum.xsyg", verbose = FALSE)) expect_silent(read_XSYG2R("https://raw.githubusercontent.com/R-Lum/rxylib/master/inst/extdata/TLSpectrum.xsyg", verbose = FALSE, recalculate.TL.curves = FALSE)) expect_silent(read_XSYG2R("https://raw.githubusercontent.com/R-Lum/rxylib/master/inst/extdata/TLSpectrum.xsyg", verbose = FALSE, pattern = "xsyg",recalculate.TL.curves = FALSE)) expect_type(read_XSYG2R("https://raw.githubusercontent.com/R-Lum/rxylib/master/inst/extdata/TLSpectrum.xsyg", fastForward = FALSE), type = "list") results <- expect_type(read_XSYG2R("https://raw.githubusercontent.com/R-Lum/rxylib/master/inst/extdata/TLSpectrum.xsyg", fastForward = TRUE), type = "list") expect_output(print(results)) }) Luminescence/tests/testthat/test_plot_ROI.R0000644000176200001440000000250414212442347020543 0ustar liggesuserstest_that("Complete test", { testthat::skip_on_cran() local_edition(3) ##create suitable dataset file <- system.file("extdata", "RF_file.rf", package = "Luminescence") temp <- read_RF2R(file) ##crash function expect_error(plot_ROI(object = "stop"), regexp = "\\[plot\\_ROI\\(\\)\\] Input for 'object' not supported, please check documentation!") ##test standard cases expect_silent(plot_ROI(temp)) expect_silent(plot_ROI(temp, grid = TRUE)) expect_silent(plot_ROI(temp, dim.CCD = c(8192,8192))) expect_silent(plot_ROI(temp, dist_thre = 20)) expect_silent(plot_ROI(temp, exclude_ROI = NULL)) ##test non-list case expect_silent(plot_ROI(temp[[1]])) expect_silent(plot_ROI(temp[[1]], exclude_ROI = NULL)) ##output only case expect_s4_class(plot_ROI(temp, plot = FALSE), class = "RLum.Results") ## test combination with extract_ROI() m <- matrix(runif(100,0,255), ncol = 10, nrow = 10) roi <- matrix(c(2.,4,2,5,6,7,3,1,1), ncol = 3) t <- extract_ROI(object = m, roi = roi) expect_s4_class(plot_ROI(t, bg_image = m, exclude_ROI = NULL), "RLum.Results") ## trigger warning expect_warning(plot_ROI(t, bg_image = "stop", exclude_ROI = NULL), "\\[plot\\_ROI\\(\\)] 'bg\\_image' is not of type RLum.Data.Image and cannot be converted into such; background image plot skipped!") }) Luminescence/tests/testthat/test_github.R0000644000176200001440000000251014062436223020332 0ustar liggesusers## NOTE: # Unauthenticated requests to the GiHub APIv3 are limited to 60 requests per hour # (associated with the originating request). Exceeding the rate limit results in a # 403 Forbidden reply. Since CIs make multiple requests when testing the rate limit # is easily reached. We check whether we either get a valid response, or at least # a 403 response. test_that("Check github_commits()", { testthat::skip_on_cran() local_edition(3) response <- tryCatch(github_commits(), error = function(e) return(e)) if (inherits(response, "error")){ expect_output(print(response), regexp = "status code 403") } else { expect_s3_class(response, "data.frame") } rm(response) }) test_that("Check github_branches()", { testthat::skip_on_cran() local_edition(3) response <- tryCatch(github_branches(), error = function(e) return(e)) if (inherits(response, "error")) { expect_output(print(response), regexp = "status code 403") }else { expect_s3_class(response, "data.frame") } rm(response) }) test_that("Check github_issues()", { testthat::skip_on_cran() local_edition(3) response <- tryCatch(github_issues(), error = function(e) return(e)) if (inherits(response, "error")){ expect_output(print(response), regexp = "status code 403") }else{ expect_type(response, "list") } rm(response) }) Luminescence/tests/testthat/test_RisoeBINfileData-class.R0000644000176200001440000000114614062436223023223 0ustar liggesuserstest_that("Check the example and the numerical values", { testthat::skip_on_cran() local_edition(3) ##construct empty object temp <- set_Risoe.BINfileData(METADATA = data.frame(), DATA = list(), .RESERVED = list()) ##get function and check whether we get NULL expect_null(get_Risoe.BINfileData(temp)) ##check object expect_s4_class(temp, class = "Risoe.BINfileData") expect_output(show(temp)) ##show method data(ExampleData.BINfileData, envir = environment()) expect_output(show(CWOSL.SAR.Data)) ##as.data.frame expect_s3_class(as.data.frame(CWOSL.SAR.Data), "data.frame") }) Luminescence/tests/testthat/test_write_RLum2CSV.R0000644000176200001440000000222314464125673021612 0ustar liggesuserstest_that("test errors and general export function", { testthat::skip_on_cran() local_edition(3) ##test error expect_error(write_RLum2CSV(object = "", export = FALSE), regexp = "[write_RLum2CSV()] Object needs to be a member of the object class RLum!", fixed = TRUE) ##test export data("ExampleData.portableOSL", envir = environment()) expect_type(write_RLum2CSV(ExampleData.portableOSL, export = FALSE), "list") ##test RLum.Results objects ## load example data data(ExampleData.DeValues, envir = environment()) results <- calc_CommonDose(ExampleData.DeValues$CA1) ##using option compact expect_warning(write_RLum2CSV(object = results,export = FALSE), regexp = "elements could not be converted to a CSV-structure!") ##using option compact = FALSE expect_warning(write_RLum2CSV(object = results,export = FALSE, compact = TRUE), regexp = "elements could not be converted to a CSV-structure!") ##real export expect_warning( write_RLum2CSV(object = results, path = tempdir(), compact = TRUE), regexp = "elements could not be converted to a CSV-structure!") }) Luminescence/tests/testthat/test_merge_RLumDataCurve.R0000644000176200001440000000241714264017373022720 0ustar liggesuserstest_that("Merge tests", { testthat::skip_on_cran() local_edition(3) ##load example data data(ExampleData.XSYG, envir = environment()) TL.curves <- get_RLum(OSL.SARMeasurement$Sequence.Object, recordType = "TL (UVVIS)") TL.curve.1 <- TL.curves[[1]] TL.curve.3 <- TL.curves[[3]] TL.curve.3_short <- TL.curves[[3]] TL.curve.3_short@data <- TL.curve.3_short@data[1:(nrow(TL.curve.3_short@data) - 1),] ##check for error expect_error(merge_RLum.Data.Curve("", merge.method = "/")) ## check warning for different curve lengths expect_warning(merge_RLum.Data.Curve(list(TL.curve.1, TL.curve.3_short), merge.method = "mean")) ##check error for different resolution TL.curve.3_short@data <- TL.curve.3_short@data[-2,] expect_error(merge_RLum.Data.Curve(list(TL.curve.1, TL.curve.3_short), merge.method = "mean"), "\\[merge\\_RLum.Data.Curve\\(\\)\\] The objects do not seem to have the same channel resolution!") ##check various operations expect_s4_class(TL.curve.1 + TL.curve.3, "RLum.Data.Curve") expect_s4_class(TL.curve.1 - TL.curve.3, "RLum.Data.Curve") expect_s4_class(suppressWarnings(TL.curve.3 / TL.curve.1), "RLum.Data.Curve") expect_warning(TL.curve.3 / TL.curve.1) expect_s4_class(TL.curve.1 * TL.curve.3, "RLum.Data.Curve") }) Luminescence/tests/testthat/test_report_RLum.R0000644000176200001440000000120314062436223021320 0ustar liggesuserstest_that("Test Simple RLum Report", { testthat::skip_on_cran() local_edition(3) ## the test fails on AppVeyor for no obvious reason on the windows ## platform ... attempts to reproduce this failure failed. So ## we skip this platform for the test testthat::skip_on_os("windows") ### load example data data("ExampleData.DeValues") temp <- calc_CommonDose(ExampleData.DeValues$CA1) # create the standard HTML report testthat::expect_null(report_RLum(object = temp, timestamp = FALSE, show_report = FALSE)) testthat::expect_null(report_RLum(object = temp, timestamp = FALSE, show_report = FALSE, compact = FALSE)) }) Luminescence/tests/testthat/test_calc_FuchsLang2001.R0000755000176200001440000000216414062436223022217 0ustar liggesuserstest_that("check class and length of output", { testthat::skip_on_cran() local_edition(3) ##load example data data(ExampleData.DeValues, envir = environment()) ##break function expect_error(calc_FuchsLang2001( data = "ExampleData.DeValues$BT998", cvThreshold = 5, plot = FALSE, verbose = FALSE), "\\[calc_FuchsLang2001\\(\\)\\] 'data' has to be of type 'data.frame' or 'RLum.Results'\\!") ##the simple and silent run temp <- expect_s4_class( calc_FuchsLang2001( data = ExampleData.DeValues$BT998, cvThreshold = 5, plot = FALSE, verbose = FALSE), "RLum.Results") ##regression tests expect_equal(length(temp), 4) expect_equal(get_RLum(temp)$de, 2866.11) expect_equal(get_RLum(temp)$de_err, 157.35) expect_equal(get_RLum(temp)$de_weighted, 2846.66) expect_equal(get_RLum(temp)$de_weighted_err, 20.58) expect_equal(get_RLum(temp)$n.usedDeValues, 22) ##the check output output <- expect_s4_class( calc_FuchsLang2001( data = ExampleData.DeValues$BT998, cvThreshold = 5, plot = TRUE, verbose = TRUE ), "RLum.Results") }) Luminescence/tests/testthat/test_plot_GrowthCurve.R0000644000176200001440000003160514521207352022373 0ustar liggesuserstest_that("plot_GrowthCurve", { testthat::skip_on_cran() local_edition(3) ## load data data(ExampleData.LxTxData, envir = environment()) ##fit.method expect_error( object = plot_GrowthCurve(LxTxData, fit.method = "FAIL"), regexp = "\\[plot\\_GrowthCurve\\(\\)\\] Fit method not supported, supported.+") ## input object expect_error( object = plot_GrowthCurve("test"), regexp = "\\[plot\\_GrowthCurve\\(\\)\\] Argument 'sample' needs to be of type 'data.frame'\\!") ## shorten dataframe expect_error( object = plot_GrowthCurve(LxTxData[1:2,]), regexp = "\\[plot\\_GrowthCurve\\(\\)\\] At least two regeneration points are required!") ## wrong argument for mode expect_error( object = plot_GrowthCurve(LxTxData, mode = "fail"), regexp = "\\[plot\\_GrowthCurve\\(\\)\\] Unknown input for argument 'mode'") # Weird LxTx values -------------------------------------------------------- ##set LxTx LxTx <- structure(list( Dose = c(0, 250, 500, 750, 1000, 1500, 0, 500, 500), LxTx = c(1, Inf, 0, -Inf, Inf, 0, Inf, -0.25, 2), LxTx.Error = c(1.58133646008685, Inf, 0, Inf, Inf, 0, Inf, 1.41146256149428, 3.16267292017369)), class = "data.frame", row.names = c(NA, -9L)) ##fit expect_warning(Luminescence:::.warningCatcher( plot_GrowthCurve( sample = LxTx[,c("Dose", "LxTx", "LxTx.Error")], output.plot = FALSE))) ##all points have the same dose ... error but NULL data(ExampleData.LxTxData, envir = environment()) tmp_LxTx <- LxTxData tmp_LxTx$Dose <- 10 expect_null( object = plot_GrowthCurve(tmp_LxTx)) ## check input objects ... matrix expect_s4_class( object = plot_GrowthCurve(as.matrix(LxTxData), output.plot = FALSE), class = "RLum.Results") ## check input objects ... list expect_s4_class( object = plot_GrowthCurve(as.list(LxTxData), output.plot = FALSE), class = "RLum.Results") ## test case for only two columns expect_s4_class( object = suppressWarnings(plot_GrowthCurve(LxTxData[,1:2], output.plot = FALSE)), class = "RLum.Results") ## test case with all NA tmp_LxTx <- LxTxData tmp_LxTx$LxTx <- NA expect_null( object = suppressWarnings(plot_GrowthCurve(tmp_LxTx, output.plot = FALSE))) ## test defunct expect_error( object = plot_GrowthCurve(LxTxData[,1:2], output.plot = FALSE, na.rm = FALSE)) ## do not include reg point expect_s4_class( object = plot_GrowthCurve( sample = LxTxData, output.plot = FALSE, fit.includingRepeatedRegPoints = FALSE), class = "RLum.Results") # Check output for regression --------------------------------------------- set.seed(1) data(ExampleData.LxTxData, envir = environment()) temp_EXP <- plot_GrowthCurve( LxTxData, fit.method = "EXP", output.plot = FALSE, verbose = FALSE, NumberIterations.MC = 10 ) temp_LIN <- plot_GrowthCurve( LxTxData, fit.method = "LIN", output.plot = FALSE, verbose = FALSE, NumberIterations.MC = 10 ) temp_LIN <- plot_GrowthCurve( LxTxData, fit.method = "LIN", mode = "extrapolation", fit.force_through_origin = TRUE, output.plot = FALSE, verbose = FALSE, NumberIterations.MC = 10 ) temp_EXPLIN <- plot_GrowthCurve( LxTxData, fit.method = "EXP+LIN", output.plot = FALSE, verbose = FALSE, NumberIterations.MC = 10 ) temp_EXPEXP <- plot_GrowthCurve( LxTxData, fit.method = "EXP+EXP", output.plot = FALSE, verbose = FALSE, NumberIterations.MC = 10 ) temp_QDR <- plot_GrowthCurve( LxTxData, fit.method = "QDR", output.plot = FALSE, verbose = FALSE, NumberIterations.MC = 10 ) temp_QDR <- plot_GrowthCurve( LxTxData, fit.method = "QDR", output.plot = FALSE, mode = "extrapolation", fit.force_through_origin = TRUE, verbose = FALSE, NumberIterations.MC = 10 ) temp_GOK <- plot_GrowthCurve( LxTxData, fit.method = "GOK", output.plot = FALSE, verbose = FALSE, NumberIterations.MC = 10 ) temp_LambertW <- plot_GrowthCurve( LxTxData, fit.method = "LambertW", output.plot = FALSE, verbose = FALSE, NumberIterations.MC = 10 ) expect_s4_class(temp_EXP, class = "RLum.Results") expect_s3_class(temp_EXP$Fit, class = "nls") expect_s4_class(temp_LIN, class = "RLum.Results") expect_s3_class(temp_LIN$Fit, class = "lm") expect_s4_class(temp_EXPLIN, class = "RLum.Results") expect_s3_class(temp_EXPLIN$Fit, class = "nls") expect_s4_class(temp_EXPEXP, class = "RLum.Results") expect_s3_class(temp_EXPEXP$Fit, class = "nls") expect_s4_class(temp_QDR, class = "RLum.Results") expect_s3_class(temp_QDR$Fit, class = "lm") expect_s4_class(temp_GOK, class = "RLum.Results") expect_s3_class(temp_GOK$Fit, class = "nls") expect_s4_class(temp_LambertW, class = "RLum.Results") expect_s3_class(temp_LambertW$Fit, class = "nls") # Check more output ------------------------------------------------------- data(ExampleData.LxTxData, envir = environment()) set.seed(1) temp_EXP <- plot_GrowthCurve( LxTxData, fit.method = "EXP", output.plot = FALSE, verbose = FALSE, NumberIterations.MC = 10 ) temp_LIN <- plot_GrowthCurve( LxTxData, fit.method = "LIN", output.plot = FALSE, verbose = FALSE, NumberIterations.MC = 10 ) temp_EXPLIN <- plot_GrowthCurve( LxTxData, fit.method = "EXP+LIN", output.plot = FALSE, verbose = FALSE, NumberIterations.MC = 10 ) temp_EXPEXP <- plot_GrowthCurve( LxTxData, fit.method = "EXP+EXP", output.plot = FALSE, verbose = FALSE, NumberIterations.MC = 10 ) temp_QDR <- plot_GrowthCurve( LxTxData, fit.method = "QDR", output.plot = FALSE, verbose = FALSE, NumberIterations.MC = 10 ) temp_GOK <- plot_GrowthCurve( LxTxData, fit.method = "GOK", output.plot = FALSE, verbose = FALSE, NumberIterations.MC = 10) ## force through the origin temp_LxTx <-LxTxData temp_LxTx$LxTx[[7]] <- 1 expect_s4_class(plot_GrowthCurve( temp_LxTx, fit.method = "GOK", output.plot = FALSE, verbose = FALSE, NumberIterations.MC = 10, fit.force_through_origin = TRUE ), "RLum.Results") temp_LambertW <- plot_GrowthCurve( LxTxData, fit.method = "LambertW", output.plot = FALSE, verbose = FALSE, NumberIterations.MC = 10 ) expect_equal(round(temp_EXP$De[[1]], digits = 2), 1737.88) ##fix for different R versions if(R.version$major == "3" && as.numeric(R.version$minor) < 6){ expect_equal(round(sum(temp_EXP$De.MC, na.rm = TRUE), digits = 0), 17441) }else{ expect_equal(round(sum(temp_EXP$De.MC, na.rm = TRUE), digits = 0), 17562) } expect_equal(round(temp_LIN$De[[1]], digits = 2), 1811.33) ##fix for different R versions if(R.version$major == "3" && as.numeric(R.version$minor) < 6){ expect_equal(round(sum(temp_LIN$De.MC, na.rm = TRUE), digits = 0),18238) }else{ expect_equal(round(sum(temp_LIN$De.MC, na.rm = TRUE), digits = 0),18398) } expect_equal(round(temp_EXPLIN$De[[1]], digits = 2), 1791.53) ##fix for different R versions if(R.version$major == "3" && as.numeric(R.version$minor) < 6){ expect_equal(round(sum(temp_EXPLIN$De.MC, na.rm = TRUE), digits = 0),17474) }else{ expect_equal(round(sum(temp_EXPLIN$De.MC, na.rm = TRUE), digits = 0),18045) } expect_equal(round(temp_EXPEXP$De[[1]], digits = 2), 1787.15) ##fix for different R versions if(R.version$major == "3" && as.numeric(R.version$minor) < 6){ expect_equal(round(sum(temp_EXPEXP$De.MC, na.rm = TRUE), digits = 0), 7316) }else{ expect_equal(round(sum(temp_EXPEXP$De.MC, na.rm = TRUE), digits = 0), 7303, tolerance = 10) } expect_equal(round(temp_QDR$De[[1]], digits = 2), 1666.2) ##fix for different R versions if (R.version$major == "3" && as.numeric(R.version$minor) < 6){ expect_equal(round(sum(temp_QDR$De.MC, na.rm = TRUE), digits = 0), 14937) }else{ expect_equal(round(sum(temp_QDR$De.MC, na.rm = TRUE), digits = 0), 16476) } expect_equal(round(temp_GOK$De[[1]], digits = 0), 1786) ##fix for different R versions if (R.version$major > "3"){ if(any(grepl("aarch64", sessionInfo()$platform))) { expect_equal(round(sum(temp_GOK$De.MC, na.rm = TRUE), digits = 1), 17796, tolerance = 0.001) } else { expect_equal(round(sum(temp_GOK$De.MC, na.rm = TRUE), digits = 1), 17828.9, tolerance = 0.1) } } expect_equal(round(temp_LambertW$De[[1]], digits = 2), 1784.78) expect_equal(round(sum(temp_LambertW$De.MC, na.rm = TRUE), digits = 0), 17422) # Check extrapolation ----------------------------------------------------- ## load data data(ExampleData.LxTxData, envir = environment()) set.seed(1) LxTxData[1,2:3] <- c(0.5, 0.001) LIN <- expect_s4_class( plot_GrowthCurve(LxTxData,mode = "extrapolation", fit.method = "LIN"), "RLum.Results") EXP <- expect_s4_class( plot_GrowthCurve(LxTxData,mode = "extrapolation", fit.method = "EXP"), "RLum.Results") EXPLIN <- expect_s4_class( suppressWarnings( plot_GrowthCurve( LxTxData,mode = "extrapolation", fit.method = "EXP+LIN")), "RLum.Results") GOK <- expect_s4_class( plot_GrowthCurve(LxTxData,mode = "extrapolation", fit.method = "GOK"), "RLum.Results") LambertW <- expect_s4_class( plot_GrowthCurve(LxTxData,mode = "extrapolation", fit.method = "LambertW"), "RLum.Results") expect_equal(round(LIN$De$De,0), 165) expect_equal(round(EXP$De$De,0), 110) expect_equal(round(LambertW$De$De,0), 114) #it fails on some unix platforms for unknown reason. #expect_equivalent(round(EXPLIN$De$De,0), 110) # Check alternate --------------------------------------------------------- ## load data data(ExampleData.LxTxData, envir = environment()) set.seed(1) LxTxData[1,2:3] <- c(0.5, 0.001) ##LIN expect_s4_class( object = plot_GrowthCurve(LxTxData,mode = "alternate", fit.method = "LIN", output.plot = FALSE), "RLum.Results") ## EXP EXP <- expect_s4_class( object = plot_GrowthCurve(LxTxData,mode = "alternate", fit.method = "EXP", output.plot = FALSE), "RLum.Results") ## EXP+LIN EXPLIN <- expect_s4_class( object = suppressWarnings( plot_GrowthCurve(LxTxData,mode = "alternate", fit.method = "EXP+LIN", output.plot = FALSE, verbose = FALSE)), "RLum.Results") ## GOK expect_s4_class( object = plot_GrowthCurve( LxTxData, mode = "alternate", fit.method = "GOK", output.plot = FALSE, verbose = FALSE ), "RLum.Results" ) ## LambertW expect_s4_class( object = plot_GrowthCurve( LxTxData, mode = "alternate", fit.method = "LambertW", output.plot = FALSE, verbose = FALSE ), "RLum.Results" ) ## trigger LambertW related warning for ## extrapolation mode tmp <- structure(list( dose = c( 0, 1388.88888888889, 2777.77777777778, 4166.66666666667, 8333.33333333333, 16666.6666666667, 33333.3333333333, 0, 1388.88888888889, 2777.77777777778, 4166.66666666667, 8333.33333333333, 16666.6666666667, 33333.3333333333, 0, 1388.88888888889, 2777.77777777778, 4166.66666666667, 8333.33333333333, 16666.6666666667, 33333.3333333333 ), LxTx = c( 1.54252220145258, 4.43951568403849, 6.23268064543138, 7.84372723139206, 12.1816246695694, 16.220421545207, 19.9805214420208, 1.5693958789807, 4.01446969642433, 6.50442121919275, 8.13912565845306, 11.2791435536017, 14.2739718127927, 17.7646886436743, 1.55083317135234, 4.10327222363961, 6.1705969614814, 8.30005789933367, 12.7612004529065, 14.807776070804, 17.1563663039162 ), LxTx_X = c( 0.130074482379272, 2.59694106608334, 4.46970034588506, 3.0630786645803, 0.744512263874143, 6.0383153231303, 0.785060450424326, 3.16210365279, 0.0425273193228004, 2.9667194222907, 0.187174353876429, 4.29989597009486, 4.19802308979151, 2.77791088935002, 0.248412040945932, 0.626745230335262, 3.80396486752602, 16.1846310553925, 4.14921514089229, 1.40190110413806, 7.74406545663656 ) ), class = "data.frame", row.names = c(NA, -21L)) expect_warning(plot_GrowthCurve( tmp, mode = "extrapolation", fit.method = "LambertW", output.plot = FALSE, verbose = FALSE), regexp = "\\[plot\\_GrowthCurve\\(\\)\\] Standard root estimation using stats\\:\\:uniroot\\(\\).+") }) Luminescence/tests/testthat/test_RLum.Analysis-class.R0000644000176200001440000000440314062436223022617 0ustar liggesuserstest_that("Check the example and the numerical values", { testthat::skip_on_cran() local_edition(3) ##load example data data("ExampleData.RLum.Analysis") obj <- IRSAR.RF.Data ## set_RLum() ##construct empty object tmp <- set_RLum( "RLum.Analysis", protocol = "testthat", records = lapply(1:20, function(x) { set_RLum( "RLum.Data.Curve", recordType = "test", data = matrix(1:10, ncol = 2), info = list(el = as.character(x)) ) }), info = list(el = "test") ) ##overwrite object expect_s4_class(set_RLum("RLum.Analysis", records = tmp), "RLum.Analysis") ## as() expect_type(as(tmp, "list"), "list") expect_s4_class(as(list(), "RLum.Analysis"), "RLum.Analysis") ## output expect_output(print(as(list(), "RLum.Analysis")), regexp = "This is an empty object") expect_s4_class( set_RLum( "RLum.Analysis", protocol = "testthat", records = set_RLum( "RLum.Analysis", protocol = "nest", records = list(matrix(1:10, ncol = 2)) ), info = list(el = "test") ), "RLum.Analysis" ) ## get_RLum expect_length(get_RLum(obj, subset = (recordType == "RF")), 2) expect_null(get_RLum(obj, subset = (recordType == ""))) expect_length(get_RLum(tmp, subset = (el == "2")), 1) expect_s4_class(get_RLum(tmp, subset = (el == "2")), "RLum.Analysis") expect_type(get_RLum(tmp, info.object = "el"), "character") expect_warning(get_RLum(tmp, info.object = "missing"), regexp = "Invalid info.object name") expect_error(get_RLum(tmp, record.id = "character"), "'record.id' has to be of type 'numeric' or 'logical'!") expect_error(get_RLum(tmp, recordType = 1L), "'recordType' has to be of type 'character'!") expect_error(get_RLum(tmp, curveType = 1L), "'curveType' has to be of type 'character'!") expect_error(get_RLum(tmp, RLum.type = 1L), "'RLum.type' has to be of type 'character'!") expect_error(get_RLum(tmp, get.index = "a"), "'get.index' has to be of type 'logical'!") expect_null(suppressWarnings(get_RLum(set_RLum("RLum.Analysis"), info = "test"))) ##structure RLum expect_error( structure_RLum(set_RLum("RLum.Analysis", records = list(set_RLum("RLum.Data.Image")))), "Only 'RLum.Data.Curve' objects are allowed!") }) Luminescence/tests/testthat/test_convert_XYSG2CSV.R0000644000176200001440000000164514062436223022050 0ustar liggesuserstest_that("test convert functions", { testthat::skip_on_cran() local_edition(3) ##test for errors expect_error(convert_BIN2CSV(file = "", export = FALSE), regexp = "[read_BIN2R()] File does not exist!", fixed = TRUE) expect_error(convert_Daybreak2CSV(file = "", export = FALSE), regexp = "[read_Daybreak2R()] file name does not seem to exist.", fixed = TRUE) #expect_error(convert_PSL2CSV(file = "", export = FALSE)) expect_error(suppressWarnings(convert_XSYG2CSV(file = "", export = FALSE))) ##test conversion itself ##BIN2CSV data(ExampleData.BINfileData, envir = environment()) expect_type(convert_BIN2CSV(subset(CWOSL.SAR.Data, POSITION == 1), export = FALSE), "list") ##XSYG2CSV data(ExampleData.XSYG, envir = environment()) expect_type(convert_XSYG2CSV(OSL.SARMeasurement$Sequence.Object[1:10], export = FALSE), "list") }) Luminescence/tests/testthat/test_read_RF2R.R0000644000176200001440000000116514062436223020563 0ustar liggesuserstest_that("Test functionality", { testthat::skip_on_cran() local_edition(3) ##load file path file <- system.file("extdata", "RF_file.rf", package = "Luminescence") ##crash function expect_error(read_RF2R("file"), regexp = "File 'file' does not exist!") expect_error(read_RF2R(2), regexp = "'file' needs to be of type character!") ##simple import expect_type(read_RF2R(file), type = "list") ##import list expect_type(read_RF2R(list(file, "test")), type = "list") ##import false list expect_warning(read_RF2R(c(file, file)), regexp = "'file' has a length > 1. Only the first element was taken!") }) Luminescence/tests/testthat/test_calc_SourceDoseRate.R0000755000176200001440000000332514062436223022731 0ustar liggesuserstemp <- calc_SourceDoseRate(measurement.date = "2012-01-27", calib.date = "2014-12-19", calib.dose.rate = 0.0438, calib.error = 0.0019) test_that("General tests", { testthat::skip_on_cran() local_edition(3) ##simple run expect_silent(calc_SourceDoseRate( calib.date = "2014-12-19", calib.dose.rate = 0.0438, calib.error = 0.0019 )) ##simple run expect_silent(calc_SourceDoseRate( measurement.date = "2018-01-02", calib.date = "2014-12-19", calib.dose.rate = 0.0438, calib.error = 0.0019 )) ##simple run predict expect_silent(calc_SourceDoseRate( measurement.date = "2018-01-02", calib.date = "2014-12-19", calib.dose.rate = 0.0438, calib.error = 0.0019, predict = 10 )) ##Gy/min as unit expect_silent(calc_SourceDoseRate( measurement.date = "2018-01-02", calib.date = "2014-12-19", calib.dose.rate = 0.0438, calib.error = 0.0019, dose.rate.unit = "Gy/min" )) ##cause stop expect_error(calc_SourceDoseRate( measurement.date = "2018-01-02", calib.date = "2014-12-19", calib.dose.rate = 0.0438, calib.error = 0.0019, source.type = "SK" )) }) test_that("check class and length of output", { testthat::skip_on_cran() local_edition(3) expect_equal(is(temp), c("RLum.Results", "RLum")) expect_equal(length(temp), 3) }) test_that("check values from output example 1", { testthat::skip_on_cran() local_edition(3) results <- get_RLum(temp) expect_equal(round(results$dose.rate, digits = 8), 0.04695031) expect_equal(round(results$dose.rate.error, digits = 9), 0.002036657) expect_equal(results$date, as.Date("2012-01-27")) }) Luminescence/tests/testthat/test_Analyse_SAROSLdata.R0000644000176200001440000000322214166604717022374 0ustar liggesuserstest_that("full example test", { testthat::skip_on_cran() local_edition(3) data(ExampleData.BINfileData, envir = environment()) output <- Analyse_SAR.OSLdata(input.data = CWOSL.SAR.Data, signal.integral = c(1:5), background.integral = c(900:1000), position = c(1:1), output.plot = FALSE) ##checks expect_type(output, "list") expect_length(output, 3) ## errors expect_error({ Analyse_SAR.OSLdata() }, regexp = "No input data given") expect_error({ Analyse_SAR.OSLdata(input.data = CWOSL.SAR.Data) }, regexp = "No signal integral is given") expect_error({ Analyse_SAR.OSLdata(input.data = CWOSL.SAR.Data, signal.integral = 1:3) }, regexp = "No background integral is given") expect_error({ Analyse_SAR.OSLdata(input.data = subset(CWOSL.SAR.Data, LTYPE == "IRSL"), signal.integral = 1:3, background.integral = 200:250) }, regexp = "No 'OSL' curves found") ## should work expect_type(Analyse_SAR.OSLdata(input.data = CWOSL.SAR.Data, signal.integral = 1:3, background.integral = 200:250, position = 1, background.count.distribution = "non-poisson", sigmab = 0.1, output.plot = TRUE), "list") tmp <- subset(CWOSL.SAR.Data, LTYPE == "OSL" & POSITION == 1 & ID <= 457) expect_type( Analyse_SAR.OSLdata(tmp, 1:3, 200:250, output.plot = TRUE, output.plot.single = TRUE), "list") }) Luminescence/tests/testthat/test_scale_GammaDose.R0000644000176200001440000001137214062436223022062 0ustar liggesusersdata("ExampleData.ScaleGammaDose", envir = environment()) d <- ExampleData.ScaleGammaDose ## Conversion factors: Liritzisetal2013 results <- scale_GammaDose(data = d, conversion_factors = "Liritzisetal2013", fractional_gamma_dose = "Aitken1985", plot = FALSE, verbose = FALSE) test_that("check class and length of output", { testthat::skip_on_cran() local_edition(3) expect_equal(is(results), c("RLum.Results", "RLum")) expect_equal(length(results), 6) expect_equal(is(results$summary)[1], "data.frame") }) test_that("check values from output example", { testthat::skip_on_cran() local_edition(3) expect_equal(formatC(results$summary$dose_rate_total, 4), "0.9242") expect_equal(formatC(results$summary$dose_rate_total_err, 4), "0.2131") }) ## Conversion factors: Guerinetal2011 results <- scale_GammaDose(data = d, conversion_factors = "Guerinetal2011", fractional_gamma_dose = "Aitken1985", plot = FALSE, verbose = FALSE) test_that("check class and length of output", { testthat::skip_on_cran() local_edition(3) expect_equal(is(results), c("RLum.Results", "RLum")) expect_equal(length(results), 6) expect_equal(is(results$summary)[1], "data.frame") }) test_that("check values from output example", { testthat::skip_on_cran() local_edition(3) expect_equal(formatC(results$summary$dose_rate_total, 4), "0.9214") expect_equal(formatC(results$summary$dose_rate_total_err, 4), "0.2124") }) ## Conversion factors: Guerinetal2011 results <- scale_GammaDose(data = d, conversion_factors = "AdamiecAitken1998", fractional_gamma_dose = "Aitken1985", plot = FALSE, verbose = FALSE) test_that("check class and length of output", { testthat::skip_on_cran() local_edition(3) expect_equal(is(results), c("RLum.Results", "RLum")) expect_equal(length(results), 6) expect_equal(is(results$summary)[1], "data.frame") }) test_that("check values from output example", { testthat::skip_on_cran() local_edition(3) expect_equal(formatC(results$summary$dose_rate_total, 4), "0.9123") expect_equal(formatC(results$summary$dose_rate_total_err, 4), "0.2097") }) ## CONSOLE & PLOT OUTPUT test_that("console & plot", { expect_output({ scale_GammaDose(d, plot = TRUE, verbose = TRUE) scale_GammaDose(d, plot = TRUE, plot_single = FALSE, verbose = TRUE) }) }) ## WARNINGS & FAILURES test_that("check input data", { testthat::skip_on_cran() local_edition(3) expect_error( scale_GammaDose(NA, plot = FALSE, verbose = TRUE), "must be a data frame" ) expect_error( scale_GammaDose(d[ ,1:10], plot = FALSE, verbose = TRUE), "must have 12 columns" ) expect_warning({ tmp <- d colnames(tmp) <- letters[1:ncol(tmp)] scale_GammaDose(tmp, plot = FALSE, verbose = TRUE) }, "Unexpected column names" ) expect_error({ tmp <- d tmp$sample_offset <- NA scale_GammaDose(tmp, plot = FALSE, verbose = TRUE) }, "Only one layer must be contain a numeric value in column 'sample_offset'" ) expect_error({ tmp <- d tmp$sample_offset[5] <- "target" scale_GammaDose(tmp, plot = FALSE, verbose = TRUE) }, "Non-numeric value in the the row of the target layer." ) expect_error({ tmp <- d tmp$sample_offset[5] <- -1 scale_GammaDose(tmp, plot = FALSE, verbose = TRUE) }, "The numeric value in 'sample_offset' must be positive." ) expect_error({ tmp <- d tmp$sample_offset[5] <- 20 scale_GammaDose(tmp, plot = FALSE, verbose = TRUE) }, "Impossible! Sample offset larger than the target-layer's thickness!" ) expect_error({ scale_GammaDose(d, conversion_factors = c("a", "b"), plot = FALSE, verbose = TRUE) }, "must be an object of length 1 and of class 'character'." ) expect_error({ scale_GammaDose(d, conversion_factors = 1, plot = FALSE, verbose = TRUE) }, "must be an object of length 1 and of class 'character'." ) expect_error({ scale_GammaDose(d, conversion_factors = "HansGuenter2020", plot = FALSE, verbose = TRUE) }, "Invalid 'conversion_factors'. Valid options:" ) expect_error({ scale_GammaDose(d, fractional_gamma_dose = c("a", "b"), plot = FALSE, verbose = TRUE) }, "must be an object of length 1 and of class 'character'." ) expect_error({ scale_GammaDose(d, fractional_gamma_dose = 1, plot = FALSE, verbose = TRUE) }, "must be an object of length 1 and of class 'character'." ) expect_error({ scale_GammaDose(d, fractional_gamma_dose = "momgetthecameraiamontheinternet1995", plot = FALSE, verbose = TRUE) }, "Invalid 'fractional_gamma_dose'. Valid options:" ) }) Luminescence/tests/testthat/test_RLum.R0000644000176200001440000000071414212442347017734 0ustar liggesuserstest_that("check class", { testthat::skip_on_cran() local_edition(3) object <- set_RLum(class = "RLum.Data.Curve") expect_length(rep(object, 10), 10) ## data(ExampleData.DeValues, envir = environment()) temp <- calc_FuchsLang2001(ExampleData.DeValues$BT998, cvThreshold = 5, plot = FALSE) expect_output(show(temp)) ## test coercion expect_s4_class(as(as(object = temp, Class = "list"), "RLum.Results"), "RLum.Results") }) Luminescence/tests/testthat/test_read_SPE2R.R0000644000176200001440000000103314166622517020705 0ustar liggesuserstest_that("Test general functionality", { testthat::skip_on_cran() local_edition(3) ##crash function expect_null(read_SPE2R(file = "text")) ## default values expect_s4_class( read_SPE2R( "https://github.com/R-Lum/Luminescence/blob/master/tests/testdata/SPEfile.SPE?raw=true" ), "RLum.Data.Image" ) ##test verbose expect_s4_class( read_SPE2R( "https://github.com/R-Lum/Luminescence/blob/master/tests/testdata/SPEfile.SPE?raw=true", verbose = FALSE ), "RLum.Data.Image" ) }) Luminescence/tests/testthat/test_extract_IrradiationTimes.R0000644000176200001440000000156414464125673024073 0ustar liggesuserstest_that("Test the extraction of irradiation times", { testthat::skip_on_cran() local_edition(3) ##set file file <- system.file("extdata/XSYG_file.xsyg", package="Luminescence") ## break function expect_error(extract_IrradiationTimes("fail"), "\\[extract_IrradiationTimes\\(\\)\\] Wrong XSYG file name or file does not exsits!") ##general test results <- expect_s4_class(extract_IrradiationTimes(object = file, txtProgressBar = FALSE), "RLum.Results") ##check whether it makes sense expect_equal(sum(results$irr.times$IRR_TIME), 80) ## apply the function to something previoulsy imported via read_BIN2R file <- system.file("extdata/BINfile_V8.binx", package = "Luminescence") temp <- read_BIN2R(file, fastForward = TRUE) temp <- expect_s4_class(extract_IrradiationTimes(temp)[[1]], "RLum.Results") expect_type(temp$irr.times$START, "double") }) Luminescence/tests/testthat/test_convert_RLum2Risoe.BINfileData.R0000644000176200001440000000123514062436223024617 0ustar liggesuserstest_that("test for errors", { testthat::skip_on_cran() local_edition(3) expect_error(convert_RLum2Risoe.BINfileData(object = NA)) }) test_that("functionality", { testthat::skip_on_cran() local_edition(3) ##load example data data(ExampleData.RLum.Analysis, envir = environment()) ##provide RLum.Analysis expect_s4_class(convert_RLum2Risoe.BINfileData(IRSAR.RF.Data), "Risoe.BINfileData") ##provide RLum.Data.Curve expect_s4_class(convert_RLum2Risoe.BINfileData(IRSAR.RF.Data@records[[1]]), "Risoe.BINfileData") ##provide list expect_s4_class(convert_RLum2Risoe.BINfileData(list(IRSAR.RF.Data,IRSAR.RF.Data)), "Risoe.BINfileData") }) Luminescence/tests/testthat/test_plot_RadialPlot.R0000644000176200001440000000335214367174003022151 0ustar liggesuserstest_that("dedicated test for the radialplot", { testthat::skip_on_cran() local_edition(3) ##distribution plots set.seed(12310) x <- rnorm(30,5,0.5) y <- x * runif(30, 0.05, 0.10) df <- data.frame(x,y) ## standard data ## simple test expect_silent( plot_RadialPlot( data = df, centrality = 5)) ## standard data with two datasets ## simple test expect_silent( plot_RadialPlot( data = list(df, df), centrality = c(5,5))) ## simple test - unlogged expect_silent( plot_RadialPlot( data = df, centrality = 5, log.z = FALSE)) ## simple test - unlogged with statistics expect_silent( plot_RadialPlot( data = df, summary = c( "n", "mean", "median", "mean.weighted", "median.weighted", "kdemax", "sdabs", "sdrel", "seabs", "serel", "skewness", "kurtosis", "in.2s", "sdabs.weighted", "sdrel.weighted", "seabs.weighted", "serel.weighted"), log.z = FALSE)) ## simple test - unlogged expect_silent( plot_RadialPlot( data = df, centrality = -1, log.z = FALSE)) # Messages, Warnings, and Errors ----------------------------------------- ## trigger message expect_message( plot_RadialPlot( data = data.frame(x = df$x, y = rep(0.0001, nrow(df))), centrality = -1, log.z = FALSE), regexp = "Attention.*") ## trigger warning expect_warning(plot_RadialPlot( data = df, #centrality = , central.value = -1, log.z = FALSE), "\\[plot\\_RadialPlot\\(\\)\\] z-scale touches.*" ) ## trigger stop expect_error( plot_RadialPlot( data = df, centrality = "error"), "\\[plot\\_RadialPlot\\(\\)\\] Measure of centrality not supported\\!") }) Luminescence/tests/testthat/test_convert_PSL2CSV.R0000644000176200001440000000334114464125673021721 0ustar liggesuserstest_that("General test", { testthat::skip_on_cran() local_edition(3) ##get file file <- system.file("extdata/DorNie_0016.psl", package = "Luminescence") ##stop expect_error(convert_PSL2CSV()) ##the case where we have an object of type RLum expect_type(convert_PSL2CSV(read_PSL2R(file), export = FALSE), "list") ##export FALSE expect_type(convert_PSL2CSV(file, export = FALSE), "list") ##write to temp expect_silent(convert_PSL2CSV(file, export = TRUE, path = tempdir())) ##test single_table argument expect_type(convert_PSL2CSV(file, export = FALSE, single_table = TRUE), "list") ##test raw data extraction ## simple raw data extraction t <- expect_type(convert_PSL2CSV(file, export = FALSE, extract_raw_data = TRUE), "list") expect_length(t, 5) ## raw data extraction with single_table t <- expect_type(convert_PSL2CSV(file, export = FALSE, extract_raw_data = TRUE, single_table = TRUE), "list") expect_length(t, 1) expect_equal(nrow(t[[1]]), 100) ## test with files export tmp_path <- tempdir() expect_silent(convert_PSL2CSV(file, path = tmp_path, extract_raw_data = TRUE, single_table = TRUE, col.names = TRUE)) ## test with col.names df <- read.table(file = rev(list.files(path = tmp_path, pattern = ".csv", full.names = TRUE))[1], sep = ";", header = TRUE) expect_type(colnames(df), "character") expect_true(grepl(pattern = "USER", colnames(df)[1])) ## test without column names expect_silent(convert_PSL2CSV(file, path = tmp_path, extract_raw_data = TRUE, single_table = TRUE, col.names = FALSE)) df <- read.table(file = list.files(path = tmp_path, pattern = ".csv", full.names = TRUE)[1], sep = ";", header = TRUE) expect_false(grepl(pattern = "USER", colnames(df)[1])) }) Luminescence/tests/testthat/test_PSL2RisoeBINfiledata.R0000644000176200001440000000046514062436223022624 0ustar liggesuserstest_that("simple test", { testthat::skip_on_cran() local_edition(3) data("ExampleData.portableOSL", envir = environment()) merged <- merge_RLum(ExampleData.portableOSL) bin <- PSL2Risoe.BINfileData(merged) ##checks expect_s4_class(bin, "Risoe.BINfileData") expect_equal(length(bin), 70) }) Luminescence/tests/testthat/test_plot_RLum.Data.Curve.R0000644000176200001440000000220614062436223022722 0ustar liggesuserstest_that("Test the basic plot functionality", { testthat::skip_on_cran() local_edition(3) ## create dataset #load Example data data(ExampleData.CW_OSL_Curve, envir = environment()) temp <- as(ExampleData.CW_OSL_Curve, "RLum.Data.Curve") temp_NA <- temp temp_NA@data[] <- suppressWarnings(NA_real_) ## break function expect_error(plot_RLum.Data.Curve("temp"), regexp = "Input object is not of type RLum.Data.Curve") ## trigger warning expect_warning(plot_RLum.Data.Curve(temp_NA), regexp = "Curve contains only NA-values, nothing plotted.") expect_warning(plot_RLum.Data.Curve(set_RLum("RLum.Data.Curve"), norm = TRUE), "Normalisation led to Inf or NaN values. Values replaced by 0") ## run function with various conditions expect_silent(plot_RLum.Data.Curve(temp)) expect_silent(plot_RLum.Data.Curve(temp, norm = TRUE)) expect_silent(plot_RLum.Data.Curve(temp, norm = "max")) expect_silent(plot_RLum.Data.Curve(temp, norm = "min")) expect_silent(plot_RLum.Data.Curve(temp, norm = "huot")) expect_silent(plot_RLum.Data.Curve(temp, smooth = TRUE)) expect_silent(plot_RLum.Data.Curve(temp, par.local = FALSE)) }) Luminescence/tests/testthat/test_analyse_SARTL.R0000644000176200001440000000104614062436223021454 0ustar liggesusers##Full check test_that("Test examples", { skip_on_cran() local_edition(3) ##load data data(ExampleData.BINfileData, envir = environment()) ##transform the values from the first position in a RLum.Analysis object object <- Risoe.BINfileData2RLum.Analysis(TL.SAR.Data, pos=3) ##perform analysis expect_s4_class( analyse_SAR.TL( object, signal.integral.min = 210, signal.integral.max = 220, fit.method = "EXP OR LIN", sequence.structure = c("SIGNAL", "BACKGROUND") ), "RLum.Results" ) }) Luminescence/tests/testthat/test_calc_gSGC_feldspar.R0000644000176200001440000000320514062436223022477 0ustar liggesuserstest_that("test errors", { testthat::skip_on_cran() local_edition(3) ##crash function ##no data.frame expect_error(calc_gSGC_feldspar( data = "data", gSGC.type = "50LxTx", plot = FALSE), "\\[calc_gSGC_feldspar\\(\\)\\] 'data' needs to be of type data.frame.") ##no character expect_error(calc_gSGC_feldspar( data = data.frame(), gSGC.type = 1, plot = FALSE), "\\[calc_gSGC_feldspar\\(\\)\\] 'gSGC.type' needs to be of type character.") ## input is somewhat not what we expect for gSGC expect_error( calc_gSGC_feldspar( data = data.frame(a = 1, b = 1, c = 1, d = 1, e = 1), gSGC.type = "wrong", plot = FALSE ), "\\[calc_gSGC_feldspar\\(\\)\\] 'gSGC.type' needs to be one of the accepted values" ) ##finally run with plot output #test on a generated random sample set.seed(1234) n_samples <- 2 data <- data.frame( LnTn = rnorm(n=n_samples, mean=1.0, sd=0.02), LnTn.error = rnorm(n=n_samples, mean=0.05, sd=0.002), Lr1Tr1 = rnorm(n=n_samples, mean=1.0, sd=0.02), Lr1Tr1.error = rnorm(n=n_samples, mean=0.05, sd=0.002), Dr1 = rep(100,n_samples)) results <- expect_s4_class(calc_gSGC_feldspar( data = data, gSGC.type = "50LxTx", plot = TRUE), "RLum.Results") ##test own curve parameters results <- expect_s4_class(calc_gSGC_feldspar( data = data, gSGC.parameters = data.frame( y1 = 0.6, y1_err = 0.2, D1 = 250, D1_err = 50, y2 = 0.90, y2_err = 0.10, y0 = 0.001, y0_err = 0.0001 )), "RLum.Results") ##regression tests expect_true(all(is.na(unlist(results$m.MC)))) }) Luminescence/tests/testthat/test_methods_DRAC.R0000644000176200001440000000342014062436223021305 0ustar liggesusers##Full check test_that("methods_DRAC", { testthat::skip_on_cran() local_edition(3) input <- template_DRAC() ## print expect_output(print(input, blueprint = TRUE)) expect_output(print(input, blueprint = FALSE)) ## as.data.frame expect_s3_class(as.data.frame(input), "data.frame") expect_s3_class(as.data.frame(input), "DRAC.data.frame") ## [[<- expect_warning({ input <- template_DRAC() input[[1]] <- 1i }, regexp = "cannot use objects of class") expect_warning({ input <- template_DRAC() input[[1]] <- c(1, 2) }, regexp = "Input must be of length") expect_warning({ input <- template_DRAC() input[[5]] <- "1" }, regexp = "Input must be of class numeric") expect_warning({ input <- template_DRAC() input[[5]] <- "X" Luminescence:::.warningCatcher(input[[5]] <- "abc") }, regexp = "Cannot coerce < abc > to a numeric value") expect_warning({ input <- template_DRAC(nrow = 2) input[[5]] <- c("X", 1) Luminescence:::.warningCatcher(input[[5]] <- c("X", "abc")) }, regexp = "Cannot coerce < abc > to a numeric value") expect_warning({ input <- template_DRAC() input[[5]] <- 1L input[[5]] <- "abc" }, regexp = "Input must be of class integer") expect_warning({ input <- template_DRAC() input[[13]] <- "abc" }, regexp = "Invalid option") expect_warning({ input <- template_DRAC() input[[13]] <- 1 }, regexp = "Input must be of class character") ## [<- expect_identical( object = template_DRAC(), expected = { input <- template_DRAC() input[1] <- NA_character_ input }) ## $<- expect_identical( object = template_DRAC(), expected = { input <- template_DRAC() input$`Project ID` <- NA_character_ input }) }) Luminescence/tests/testthat/test_apply_CosmicRayRemoval.R0000644000176200001440000000321014062436223023472 0ustar liggesuserstest_that("check function", { testthat::skip_on_cran() local_edition(3) ##load data data(ExampleData.XSYG, envir = environment()) ##crash the function expect_error( apply_CosmicRayRemoval("error"), regexp = "An object of class 'character' is not supported as input; please read the manual!") ##run basic tests expect_silent(apply_CosmicRayRemoval(TL.Spectrum, method = "Pych")) expect_silent(apply_CosmicRayRemoval(TL.Spectrum, method = "smooth")) expect_silent(apply_CosmicRayRemoval(TL.Spectrum, method = "smooth", MARGIN = 1)) expect_output(apply_CosmicRayRemoval(TL.Spectrum, method = "Pych", MARGIN = 2, verbose = TRUE, plot = TRUE)) expect_silent(apply_CosmicRayRemoval(TL.Spectrum, method = "Pych", method.Pych.smoothing = 2, method.Pych.threshold_factor = 2)) ##construct objects for different tests RLum_list <- list(TL.Spectrum) RLum.Analysis <- set_RLum("RLum.Analysis", records = RLum_list) RLum.Analysis_list <- list(RLum.Analysis) RLum_list_mixed <- list(TL.Spectrum, set_RLum("RLum.Data.Curve")) RLum.Analysis_mixed <- set_RLum("RLum.Analysis", records = RLum_list_mixed ) RLum.Analysis_mixed_list <- list(RLum.Analysis_mixed) ##run tests expect_type(apply_CosmicRayRemoval(RLum_list),"list") expect_s4_class(apply_CosmicRayRemoval(RLum.Analysis), class = "RLum.Analysis") expect_type(apply_CosmicRayRemoval(RLum.Analysis_list), "list") expect_error(apply_CosmicRayRemoval(RLum_list_mixed)) expect_s4_class(apply_CosmicRayRemoval(RLum.Analysis_mixed), class = "RLum.Analysis") expect_type(apply_CosmicRayRemoval(RLum.Analysis_mixed_list), "list") }) Luminescence/tests/testthat/test_apply_EfficiencyCorrection.R0000644000176200001440000000313214062436223024352 0ustar liggesuserstest_that("check function", { testthat::skip_on_cran() local_edition(3) ##load data data(ExampleData.XSYG, envir = environment()) ##create efficiency data eff_data <- data.frame(WAVELENGTH = 1:1000, runif(1000)) ##break function expect_error(apply_EfficiencyCorrection(object = "ERROR"), regexp = "Input object is not of type RLum.Data.Spectrum") expect_error(apply_EfficiencyCorrection(object = TL.Spectrum, spectral.efficiency = "ERROR"), regexp = "'spectral.efficiency' is not of type data.frame") eff_data_false <- eff_data eff_data_false[1,2] <- 2 expect_error(apply_EfficiencyCorrection( object = TL.Spectrum, spectral.efficiency = eff_data_false), regexp = "Relative quantum efficiency values > 1 are not allowed.") ##run tests expect_s4_class(apply_EfficiencyCorrection(TL.Spectrum,spectral.efficiency = eff_data), "RLum.Data.Spectrum") ##run list test expect_warning( apply_EfficiencyCorrection(list(a = "test", TL.Spectrum), spectral.efficiency = eff_data), regexp = "Skipping character object in input list.") ##run test with RLum.Analysis objects expect_s4_class( apply_EfficiencyCorrection(set_RLum("RLum.Analysis", records = list(TL.Spectrum)), spectral.efficiency = eff_data), "RLum.Analysis") ##run test with everything combined input <- list(a = "test", TL.Spectrum,set_RLum("RLum.Analysis", records = list(TL.Spectrum))) expect_warning(apply_EfficiencyCorrection(input, eff_data), "Skipping character object in input list") }) Luminescence/tests/testthat/test_merge_RLumResults.R0000644000176200001440000000140014212442347022466 0ustar liggesuserstest_that("Merge RLum.Results", { testthat::skip_on_cran() local_edition(3) ## check whether arguments are retained a <- array(runif(300, 0,255), c(10,10,3)) roi <- matrix(c(2.,4,2,5,6,7,3,1,1), ncol = 3) t <- expect_s4_class(merge_RLum.Results(lapply(list(roi, roi, roi), function(x) extract_ROI(a, x))), "RLum.Results") expect_length(names(attributes(t@data$roi_summary)), 4) ## check standard without attributes to make sure that ## standard attributes become not removed ##load example data data(ExampleData.DeValues, envir = environment()) ##apply the central dose model c <- calc_CentralDose(ExampleData.DeValues$CA1, plot = FALSE) a <- merge_RLum.Results(list(c,c)) expect_s3_class(a@data$summary, "data.frame") }) Luminescence/tests/testthat/test_read_BIN2R.R0000644000176200001440000000645314367174003020674 0ustar liggesuserstest_that("test the import of various BIN-file versions", { testthat::skip_on_cran() local_edition(3) ##test for various errors expect_error(read_BIN2R(file = ""), "[read_BIN2R()] File does not exist!", fixed = TRUE) ##this test need an internet connect ... test for it if(!httr::http_error("https://github.com/R-Lum/Luminescence/tree/master/tests/testdata")){ ##try to import every format by using the files on GitHub ##V3 expect_s4_class( suppressWarnings(read_BIN2R(file = "https://github.com/R-Lum/Luminescence/raw/master/tests/testdata/BINfile_V3.bin", txtProgressBar = FALSE)), class = "Risoe.BINfileData") ##V4 expect_s4_class( suppressWarnings(read_BIN2R(file = "https://github.com/R-Lum/Luminescence/raw/master/tests/testdata/BINfile_V4.bin", txtProgressBar = FALSE)), class = "Risoe.BINfileData") ##V5 expect_s4_class( suppressWarnings(read_BIN2R(file = "https://github.com/R-Lum/Luminescence/raw/master/tests/testdata/BINfile_V5.binx", txtProgressBar = FALSE)), class = "Risoe.BINfileData") ##V6 expect_s4_class( suppressWarnings(read_BIN2R(file = "https://github.com/R-Lum/Luminescence/raw/master/tests/testdata/BINfile_V6.binx", txtProgressBar = FALSE)), class = "Risoe.BINfileData") ##V6 - show method expect_output( suppressWarnings(read_BIN2R(file = "https://github.com/R-Lum/Luminescence/raw/master/tests/testdata/BINfile_V6.binx", txtProgressBar = FALSE)) ) ##V7 expect_s4_class( suppressWarnings(read_BIN2R(file = "https://github.com/R-Lum/Luminescence/raw/master/tests/testdata/BINfile_V7.binx", txtProgressBar = FALSE)), class = "Risoe.BINfileData") ##V8 expect_s4_class( suppressWarnings(read_BIN2R(file = "https://github.com/R-Lum/Luminescence/raw/master/tests/testdata/BINfile_V8.binx", txtProgressBar = FALSE)), class = "Risoe.BINfileData") ##V8 - as part of the package expect_s4_class( read_BIN2R(system.file("extdata/BINfile_V8.binx", package = "Luminescence"), txtProgressBar = FALSE), class = "Risoe.BINfileData") ##V8 - as part of the package ... with arguments expect_type( read_BIN2R( file = system.file("extdata/BINfile_V8.binx", package = "Luminescence"), txtProgressBar = FALSE, position = 1, fastForward = TRUE), "list") ##V8 - as part of the package ... with arguments expect_type( read_BIN2R(system.file("extdata/BINfile_V8.binx", package = "Luminescence"), txtProgressBar = FALSE, fastForward = TRUE), "list") ##test further options ##n.records and fastForward expect_type( suppressWarnings(read_BIN2R(file = "https://github.com/R-Lum/Luminescence/raw/master/tests/testdata/BINfile_V4.bin", txtProgressBar = FALSE, n.records = 1, fastForward = TRUE, verbose = FALSE)), "list") } ## check for broken files t <- tempfile(pattern = "zero", fileext = ".binx") write(raw(), t) expect_error(read_BIN2R(t), regexp = "\\[read\\_BIN2R\\(\\)\\] Found BIN\\/BINX-format version \\(..\\) is not supported or the BIN/BINX-file is broken! Supported version numbers are: 03, 04, 05, 06, 07, 08.") file.remove(t) }) Luminescence/tests/testthat/test_calc_WodaFuchs2008.R0000644000176200001440000000110714062436223022230 0ustar liggesuserstest_that("Test general functionality", { testthat::skip_on_cran() local_edition(3) ##load example data ## read example data set data(ExampleData.DeValues, envir = environment()) ##test arguments expect_s4_class(suppressWarnings(calc_WodaFuchs2008(data = ExampleData.DeValues$CA1)), "RLum.Results") ##test arguments expect_s4_class(suppressWarnings( calc_WodaFuchs2008(data = ExampleData.DeValues$CA1, plot = FALSE)), "RLum.Results") ##test arguments expect_s4_class(calc_WodaFuchs2008(data = ExampleData.DeValues$CA1, breaks = 20), "RLum.Results") }) Luminescence/tests/testthat/test_zzz.R0000644000176200001440000000164114062436223017711 0ustar liggesuserstest_that("Test zzz functions ... they should still work", { testthat::skip_on_cran() local_edition(3) ##get right answer expect_equal(get_rightAnswer(), 46) expect_equal(get_rightAnswer("test"), 46) ##get quote expect_silent(get_Quote()) expect_silent(get_Quote(ID = 1)) expect_silent(get_Quote(ID = 10, separated = TRUE)) expect_silent(get_Quote(ID = 1e06)) ##tune data expect_warning(tune_Data(1:10)) expect_warning(tune_Data(data.frame(runif(n = 10, 8,12),runif(n = 10, 0.1,0.3) ), decrease.error = TRUE)) expect_warning(tune_Data(data.frame(runif(n = 10, 8,12),runif(n = 10, 0.1,0.3) ), increase.data = TRUE)) ##sTeve ## read example data set data(ExampleData.DeValues, envir = environment()) ExampleData.DeValues <- Second2Gray(ExampleData.DeValues$BT998, c(0.0438,0.0019)) ## create plot straightforward expect_silent(plot_KDE(data = ExampleData.DeValues, fun = TRUE)) }) Luminescence/tests/testthat/test_calc_CommonDose.R0000644000176200001440000000115214062436223022076 0ustar liggesusersdata(ExampleData.DeValues, envir = environment()) temp <- calc_CommonDose(ExampleData.DeValues$CA1, plot = FALSE, verbose = FALSE) test_that("check class and length of output", { testthat::skip_on_cran() local_edition(3) expect_s4_class(temp, "RLum.Results") expect_equal(length(temp), 4) }) test_that("check values from output", { testthat::skip_on_cran() local_edition(3) results <- get_RLum(temp) expect_equal(round(results$de, digits = 5), 62.15999) expect_equal(round(results$de_err, digits = 7), 0.7815117) expect_true(temp@data$args$log) expect_equal(temp@data$args$sigmab, 0) }) Luminescence/tests/testthat/test_calc_AliquotSize.R0000644000176200001440000000473214062436223022313 0ustar liggesusersset.seed(1) temp <- calc_AliquotSize( grain.size = c(100,150), sample.diameter = 1, MC.iter = 100, plot = FALSE, verbose = FALSE) test_that("consistency checks", { testthat::skip_on_cran() local_edition(3) expect_error(calc_AliquotSize(grain.size = 1:3)) expect_error(calc_AliquotSize(grain.size = 100, packing.density = 2)) expect_error(calc_AliquotSize(grain.size = 100, packing.density = 1, sample.diameter = -1)) expect_error(calc_AliquotSize(grain.size = 100, sample.diameter = 9.8, MC = TRUE)) expect_output(calc_AliquotSize(grain.size = 100, packing.density = 1, sample.diameter = 9.8, grains.counted = 30, MC = TRUE), regexp = "Monte Carlo simulation is only available for estimating the amount of grains on the sample disc.") expect_s4_class( calc_AliquotSize( grain.size = 100, packing.density = "inf", sample.diameter = 9.8, MC = FALSE), "RLum.Results") expect_s4_class( calc_AliquotSize( grain.size = c(100, 150), grains.counted = 1000, sample.diameter = 9.8, MC = FALSE), "RLum.Results") expect_s4_class( suppressWarnings(calc_AliquotSize( grain.size = c(100, 150), grains.counted = c(1000, 1100, 900), sample.diameter = 10, MC = FALSE)), "RLum.Results") }) test_that("check class and length of output", { testthat::skip_on_cran() local_edition(3) expect_equal(is(temp), c("RLum.Results", "RLum")) expect_equal(length(temp), 2) expect_s3_class(temp$summary, "data.frame") expect_type(temp$MC, "list") }) test_that("check summary output", { testthat::skip_on_cran() local_edition(3) result <- get_RLum(temp) expect_equal(result$grain.size, 125) expect_equal(result$sample.diameter, 1) expect_equal(result$packing.density, 0.65) expect_equal(result$n.grains, 42) expect_equal(result$grains.counted, NA) }) test_that("check MC run", { testthat::skip_on_cran() local_edition(3) expect_equal(round(temp$MC$statistics$n), 100) expect_equal(round(temp$MC$statistics$mean), 43) expect_equal(round(temp$MC$statistics$median), 39) ##fix for different R versions if(R.version$major == "3" && as.numeric(R.version$minor) < 6){ expect_equal(round(temp$MC$statistics$sd.abs), 20) }else{ expect_equal(round(temp$MC$statistics$sd.abs), 19) } expect_equal(round(temp$MC$statistics$sd.rel), 45) expect_equal(round(temp$MC$statistics$se.abs), 2) expect_equal(round(temp$MC$statistics$se.rel), 5) expect_length(temp$MC$kde$x, 10000) expect_length(temp$MC$kde$y, 10000) }) Luminescence/tests/testthat/test_methods_S3.R0000644000176200001440000000127414062436223021066 0ustar liggesuserstest_that("Test various S3 methods", { testthat::skip_on_cran() local_edition(3) ## create test data data(ExampleData.CW_OSL_Curve, envir = environment()) data(ExampleData.BINfileData, envir = environment()) data(ExampleData.DeValues, envir = environment()) data(ExampleData.DeValues, envir = environment()) temp <- as(ExampleData.CW_OSL_Curve, "RLum.Data.Curve") dose.rate <- calc_SourceDoseRate( measurement.date = "2012-01-27", calib.date = "2014-12-19", calib.dose.rate = 0.0438, calib.error = 0.0019) ## plotting ---- expect_silent(plot(list(temp, temp))) expect_silent(plot(subset(CWOSL.SAR.Data, ID == 1))) expect_silent(hist(dose.rate)) }) Luminescence/tests/testthat/test_get_RLum.R0000644000176200001440000000273214171333122020567 0ustar liggesusersdata(ExampleData.DeValues, envir = environment()) temp <- calc_CentralDose(ExampleData.DeValues$CA1, plot = FALSE, verbose = FALSE) temp_RLumDataCurve <- set_RLum(class = "RLum.Data.Curve") temp_RLumDataImage <- set_RLum(class = "RLum.Data.Image") temp_RLumDataSpectrum <- set_RLum(class = "RLum.Data.Spectrum") temp_RLumAnalysis <- set_RLum(class = "RLum.Analysis") temp_RLumResults <- set_RLum(class = "RLum.Results") test_that("check class and length of output", { testthat::skip_on_cran() local_edition(3) expect_s3_class(get_RLum(temp), class = "data.frame") expect_type(get_RLum(temp, data.object = "args"), "list") ##test objects expect_type(get_RLum(temp_RLumDataCurve), "double") expect_type(get_RLum(temp_RLumDataImage), "logical") expect_type(get_RLum(temp_RLumDataSpectrum), "logical") expect_null(suppressWarnings(get_RLum(temp_RLumAnalysis))) expect_null(get_RLum(temp_RLumResults)) }) test_that("check get_RLum on a list and NULL", { testthat::skip_on_cran() local_edition(3) object <- set_RLum(class = "RLum.Analysis", records = rep(set_RLum(class = "RLum.Data.Curve"), 10)) expect_warning(get_RLum(object, recordType = "test")) expect_null(get_RLum(NULL), "NULL") ##check class argument a <- list(set_RLum("RLum.Results"), set_RLum("RLum.Analysis", records = list(set_RLum("RLum.Data.Curve")))) expect_type(get_RLum(a, class = "test", drop = FALSE), "list") expect_type(get_RLum(a, class = "RLum.Results", drop = FALSE), "list") }) Luminescence/tests/testthat/test_plot_RLum.Data.Spectrum.R0000644000176200001440000001440414264017373023450 0ustar liggesuserstest_that("test pure success of the plotting without warning or error", { testthat::skip_on_cran() local_edition(3) ##RLum.Data.Spectrum ------- data(ExampleData.XSYG, envir = environment()) m <- TL.Spectrum@data bg.spectrum <- set_RLum(class = "RLum.Data.Spectrum", data = TL.Spectrum@data[,15:16, drop = FALSE]) ##crash the function with wrong input expect_error(plot_RLum.Data.Spectrum(object = "test"), regexp = "Input object neither of class 'RLum.Data.Spectrum' nor 'matrix'.") ##try a matrix as input expect_message(plot_RLum.Data.Spectrum(object = m), regexp = "Input has been converted to a RLum.Data.Spectrum object using set_RLum()") ##remove rownames and column names rownames(m) <- NULL colnames(m) <- NULL expect_message(plot_RLum.Data.Spectrum(object = m), regexp = "Input has been converted to a RLum.Data.Spectrum object using set_RLum()") ##standard plot with some settings expect_silent(plot( TL.Spectrum, plot.type = "contour", main = "Test", xlab = "test", ylab = "test", mtext = "test", cex = 1.2, pch = 2, lwd = 2, bty = "n", sub = "est" )) ##no plot expect_type(plot( TL.Spectrum, plot.type = "contour", main = "Test", xlab = "test", ylab = "test", mtext = "test", cex = 1.2, pch = 2, lwd = 2, bty = "n", plot = FALSE, ), "double") ##persp plot expect_silent(suppressWarnings( plot_RLum.Data.Spectrum( TL.Spectrum, plot.type = "persp", xlim = c(310, 750), ylim = c(0, 100), bin.rows = 10, bin.cols = 2, zlab = "test") )) ##test background subtraction expect_warning(plot_RLum.Data.Spectrum( TL.Spectrum, plot.type = "persp", xlim = c(310, 750), ylim = c(0, 300), bg.spectrum = bg.spectrum, bin.rows = 10, bin.cols = 1 ), "\\[plot_RLum.Data.Spectrum\\(\\)\\] 6 channel\\(s\\) removed due to row \\(wavelength\\) binning.") ## check output and limit counts expect_type(suppressWarnings(plot_RLum.Data.Spectrum( TL.Spectrum, plot.type = "persp", xlim = c(310, 750), limit_counts = 10000, bg.spectrum = bg.spectrum, bin.rows = 10, bin.cols = 1 )), "double") ## check our axes expect_type(suppressWarnings(plot_RLum.Data.Spectrum( TL.Spectrum, plot.type = "persp", xlim = c(310, 750), limit_counts = 10000, bg.spectrum = bg.spectrum, bin.rows = 10, box = "alternate", bin.cols = 1 )), "double") ##test energy axis expect_silent(plot_RLum.Data.Spectrum( TL.Spectrum, plot.type = "multiple.lines", xlim = c(1.4, 4), ylim = c(0, 300), bg.spectrum = bg.spectrum, bg.channels = 2, bin.cols = 1, xaxis.energy = TRUE )) ## plot: multiple.lines --------- expect_silent(suppressWarnings( plot_RLum.Data.Spectrum( TL.Spectrum, plot.type = "multiple.lines", xlim = c(310, 750), ylim = c(0, 100), bin.rows = 10, bin.cols = 1 ) )) expect_silent(suppressWarnings( plot_RLum.Data.Spectrum( TL.Spectrum, plot.type = "multiple.lines", xlim = c(310, 750), frames = c(1,3), ylim = c(0, 100), bin.rows = 10, bin.cols = 1 ) )) ## plot: image ------------ ### plot_image: standard ------- expect_silent(suppressWarnings( plot_RLum.Data.Spectrum( TL.Spectrum, plot.type = "image", xlim = c(310, 750), ylim = c(0, 300), bin.rows = 10, bin.cols = 1 ) )) ### plot_image: no contour ------- expect_silent(suppressWarnings( plot_RLum.Data.Spectrum( TL.Spectrum, plot.type = "image", xlim = c(310, 750), ylim = c(0, 300), bin.rows = 10, bin.cols = 1, contour = FALSE ))) ## plot: transect ------------ expect_silent(suppressWarnings( plot_RLum.Data.Spectrum( TL.Spectrum, plot.type = "transect", xlim = c(310, 750), ylim = c(0, 300), bin.rows = 10, bin.cols = 1, contour = FALSE))) ## plot: single ------------ expect_silent(suppressWarnings( plot_RLum.Data.Spectrum( TL.Spectrum, plot.type = "single", xlim = c(310, 750), ylim = c(0, 300), bin.rows = 10, bin.cols = 6, contour = FALSE))) ## test frames expect_silent(suppressWarnings( plot_RLum.Data.Spectrum( TL.Spectrum, plot.type = "single", xlim = c(310, 750), frames = 1, ylim = c(0, 300), bin.rows = 10, bin.cols = 6, contour = FALSE))) ### plot_image: colour changes ------- expect_silent(suppressWarnings( plot_RLum.Data.Spectrum( TL.Spectrum, plot.type = "image", xlim = c(310, 750), ylim = c(0, 300), bin.rows = 10, bin.cols = 1, col = grDevices::hcl.colors(20), contour.col = "yellow" ) )) ## plot: interactive ------------ expect_silent(suppressWarnings( plot_RLum.Data.Spectrum( TL.Spectrum, plot.type = "interactive", xlim = c(310, 750), ylim = c(0, 300), bin.rows = 10, bin.cols = 1 ) )) ## plot: interactive heatmap -------- expect_silent(suppressWarnings( plot_RLum.Data.Spectrum( TL.Spectrum, plot.type = "interactive", xlim = c(310, 750), ylim = c(0, 300), bin.rows = 10, bin.cols = 1, type = "heatmap", showscale = TRUE ) )) ##interactive contour expect_silent(suppressWarnings( plot_RLum.Data.Spectrum( TL.Spectrum, plot.type = "interactive", xlim = c(310, 750), ylim = c(0, 300), bin.rows = 10, bin.cols = 1, type = "contour", showscale = TRUE ) )) ##create more error expect_error(plot( TL.Spectrum, plot.type = "contour", xlim = c(310, 750), ylim = c(0, 300), bin.cols = 0 )) }) Luminescence/tests/testthat/test_convert_Concentration2DoseRate.R0000644000176200001440000000253014062436223025131 0ustar liggesuserstest_that("basic checks", { testthat::skip_on_cran() local_edition(3) ## template template <- expect_s3_class(convert_Concentration2DoseRate(), "data.frame") ## break function expect_error(convert_Concentration2DoseRate(input = "fail"), regexp = "input must be of type 'data.frame or 'matrix'") expect_error(convert_Concentration2DoseRate(input = data.frame(x = 1, y = 2)), regexp = "number of rows/columns in input does not match the requirements. See manual!") expect_error( convert_Concentration2DoseRate(suppressMessages(convert_Concentration2DoseRate()), conversion = "fail"), regexp = "You have not entered a valid conversion. Please check your spelling and consult the documentation!") template[[1]] <- "fail" expect_error(convert_Concentration2DoseRate(template), regexp = "As mineral only 'FS' or 'Q' is supported!") ## run function ## for FS df <- data.frame( Mineral = "FS", K = 2.13, K_SE = 0.07, Th = 9.76, Th_SE = 0.32, U = 2.24, U_SE = 0.12, GrainSize = 200, WaterContent = 30, WaterContent_SE = 5 ) expect_s4_class(object = convert_Concentration2DoseRate(df), class = "RLum.Results") ## for Q df$Mineral <- "Q" expect_s4_class(object = convert_Concentration2DoseRate(df), class = "RLum.Results") }) Luminescence/tests/testthat/test_bin_RLumData.R0000644000176200001440000000115314062436223021353 0ustar liggesusersdata(ExampleData.CW_OSL_Curve, envir = environment()) curve <- set_RLum( class = "RLum.Data.Curve", recordType = "OSL", data = as.matrix(ExampleData.CW_OSL_Curve) ) test_that("check class and length of output", { testthat::skip_on_cran() local_edition(3) expect_s4_class(bin_RLum.Data(curve), class = "RLum.Data.Curve") expect_length(bin_RLum.Data(curve)[,1], 500) }) test_that("check values from output example", { testthat::skip_on_cran() local_edition(3) expect_equal(sum(bin_RLum.Data(curve)[,2]), 119200) expect_equal(sum(bin_RLum.Data(curve, bin = 5)[1,2]), 41146) }) Luminescence/tests/testthat/test_plot_Functions.R0000644000176200001440000001214614504256042022064 0ustar liggesuserstest_that("test pure success of the plotting without warning or error", { testthat::skip_on_cran() local_edition(3) ##distribution plots data(ExampleData.DeValues, envir = environment()) ExampleData.DeValues <- ExampleData.DeValues$CA1 expect_silent(plot_RadialPlot(ExampleData.DeValues)) expect_silent(plot_KDE(ExampleData.DeValues)) expect_silent(plot_Histogram(ExampleData.DeValues)) expect_silent(plot_ViolinPlot(ExampleData.DeValues)) ##plot NRT data("ExampleData.BINfileData", envir = environment()) data <- Risoe.BINfileData2RLum.Analysis(object = CWOSL.SAR.Data, pos = 8, ltype = "OSL") allCurves <- get_RLum(data) pos <- seq(1, 9, 2) curves <- allCurves[pos] expect_silent(plot_NRt(curves)) ##filter combinations filter1 <- density(rnorm(100, mean = 450, sd = 20)) filter1 <- matrix(c(filter1$x, filter1$y/max(filter1$y)), ncol = 2) filter2 <- matrix(c(200:799,rep(c(0,0.8,0),each = 200)), ncol = 2) expect_silent(plot_FilterCombinations(filters = list(filter1, filter2))) ##plot_Det data(ExampleData.BINfileData, envir = environment()) object <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos=1) expect_s4_class( plot_DetPlot( object, signal.integral.min = 1, signal.integral.max = 3, background.integral.min = 900, background.integral.max = 1000, n.channels = 5, ), "RLum.Results" ) ##plot DRT data(ExampleData.DeValues, envir = environment()) expect_silent(plot_DRTResults(values = ExampleData.DeValues$BT998[7:11,], given.dose = 2800, mtext = "Example data")) ##plot RisoeBINFileData data(ExampleData.BINfileData, envir = environment()) expect_silent(plot_Risoe.BINfileData(CWOSL.SAR.Data,position = 1)) ##various RLum plots ##RLum.Data.Curve data(ExampleData.CW_OSL_Curve, envir = environment()) temp <- as(ExampleData.CW_OSL_Curve, "RLum.Data.Curve") expect_silent(plot(temp)) ##RLum.Data.Spectrum ------- data(ExampleData.XSYG, envir = environment()) expect_silent(plot(TL.Spectrum, plot.type="contour", xlim = c(310,750), ylim = c(0,300))) expect_silent(suppressWarnings(plot_RLum.Data.Spectrum(TL.Spectrum, plot.type="persp", xlim = c(310,750), ylim = c(0,100), bin.rows=10, bin.cols = 1))) expect_silent(suppressWarnings(plot_RLum.Data.Spectrum(TL.Spectrum, plot.type="multiple.lines", xlim = c(310,750), ylim = c(0,100), bin.rows=10, bin.cols = 1))) expect_silent(suppressWarnings(plot_RLum.Data.Spectrum(TL.Spectrum, plot.type="interactive", xlim = c(310,750), ylim = c(0,300), bin.rows=10, bin.cols = 1))) expect_silent(suppressWarnings(plot_RLum.Data.Spectrum(TL.Spectrum, plot.type="interactive", xlim = c(310,750), ylim = c(0,300), bin.rows=10, bin.cols = 1, type = "heatmap", showscale = TRUE))) expect_silent(suppressWarnings(plot_RLum.Data.Spectrum(TL.Spectrum, plot.type="interactive", xlim = c(310,750), ylim = c(0,300), bin.rows=10, bin.cols = 1, type = "contour", showscale = TRUE))) expect_error(plot(TL.Spectrum, plot.type="contour", xlim = c(310,750), ylim = c(0,300), bin.cols = 0)) ##RLum.Analysis data(ExampleData.BINfileData, envir = environment()) temp <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos=1) expect_silent(plot( temp, subset = list(recordType = "TL"), combine = TRUE, norm = TRUE, abline = list(v = c(110)) )) ##RLum.Results grains<- calc_AliquotSize(grain.size = c(100,150), sample.diameter = 1, plot = FALSE, MC.iter = 100) expect_silent(plot_RLum.Results(grains)) ##special plot RLum.Reuslts data(ExampleData.DeValues, envir = environment()) mam <- calc_MinDose(data = ExampleData.DeValues$CA1, sigmab = 0.2, log = TRUE, plot = FALSE) expect_silent(plot_RLum(mam)) cdm <- calc_CentralDose(ExampleData.DeValues$CA1) expect_silent(plot_RLum(cdm)) FMM <- calc_FiniteMixture(ExampleData.DeValues$CA1, sigmab = 0.2, n.components = c(2:4), pdf.weight = TRUE, dose.scale = c(0, 100)) plot_RLum(FMM) }) test_that("test for return values, if any", { testthat::skip_on_cran() local_edition(3) data(ExampleData.DeValues, envir = environment()) output <- plot_AbanicoPlot(ExampleData.DeValues, output = TRUE) expect_type(output, "list") expect_length(output, 10) }) Luminescence/tests/testthat/test_analyse_portableOSL.R0000644000176200001440000001200214521207352022746 0ustar liggesuserstest_that("check class and length of output", { testthat::skip_on_cran() local_edition(3) ## generate test data set for profile data("ExampleData.portableOSL", envir = environment()) merged <- surface <- merge_RLum(ExampleData.portableOSL) ## generate dataset for surface surface@records <- lapply(surface@records, function(x){ x@info$settings$Sample <- paste0("Test_x:", runif(1), "|y:", runif(1)) x }) ## standard run profile results <- expect_s4_class( analyse_portableOSL( merged, signal.integral = 1:5, invert = FALSE, mode = "profile", normalise = TRUE, plot = TRUE ), "RLum.Results") ## check standard with coordinates coord <- as.matrix(results$summary[,c("COORD_X", "COORD_Y")]) ## standard run profile no plot even with plot activated results <- expect_s4_class( analyse_portableOSL( merged, signal.integral = 1:5, invert = FALSE, normalise = TRUE, mode = NULL, coord = coord, plot = TRUE ), "RLum.Results") ## verify output expect_equal(length(results), 3) expect_s3_class(results$summary, "data.frame") expect_s4_class(results$data, "RLum.Analysis") ## standard surface results <- expect_s4_class( analyse_portableOSL( surface, signal.integral = 1:5, invert = FALSE, mode = "surface", normalise = TRUE, plot = TRUE ), "RLum.Results") ## surface with options set.seed(1234) results <- expect_s4_class( analyse_portableOSL( surface, signal.integral = 1:5, invert = TRUE, mode = "surface", xlim = c(0.1, 0.4), ylim = c(0.1, 0.4), zlim = c(0.1, 2), col_ramp = "red", surface_values = c("BSL", "IRSL"), normalise = TRUE, plot = TRUE ), "RLum.Results") ## check list input expect_s4_class( suppressWarnings(analyse_portableOSL(ExampleData.portableOSL)), "RLum.Results") ## check additional argument sample expect_s4_class(analyse_portableOSL( merged, signal.integral = 1:5, invert = FALSE, normalise = TRUE, ylim = c(1,2), zlim = list(BSL = c(0,1.1), IRSL = c(0,1)), plot = TRUE, sample = "test" ), "RLum.Results") ## trigger stops ## Only RLum.Analysis expect_error(analyse_portableOSL("error"), regexp = "\\[analyse\\_portableOSL\\(\\)\\] Only objects of class.+") ## Only RLum.Data.Curves tmp <- merged tmp@records <- list(tmp@records, "error") expect_error(analyse_portableOSL(tmp), regexp = "\\[analyse\\_portableOSL\\(\\)\\] The 'RLum.Analysis' object must contain only.+") ## Check originator tmp <- merged tmp@records[[1]]@originator <- "error" expect_error(analyse_portableOSL(tmp), regexp = "\\[analyse\\_portableOSL\\(\\)\\] Only objects originating from .+") ## Sequence pattern tmp <- merged tmp@records <- tmp@records[-1] expect_error( object = analyse_portableOSL(tmp), regexp = "\\[analyse\\_portableOSL\\(\\)\\] Sequence pattern not supported.+") ## coordinates not list or matrix expect_error( analyse_portableOSL( surface, signal.integral = 1:5, invert = FALSE, mode = "surface", coord = "error", normalise = TRUE, plot = FALSE), regexp = "\\[analyse\\_portableOSL\\(\\)\\] Argument 'coord' needs to be a.+") ## coordinates are not of the correct size expect_error( analyse_portableOSL( surface, signal.integral = 1:5, invert = FALSE, mode = "surface", coord = coord[1:2,], normalise = TRUE, plot = FALSE), regexp = "\\[analyse\\_portableOSL\\(\\)\\] Number of coordinates differ from the number.+") ## trigger warning expect_warning( analyse_portableOSL( merged, signal.integral = 1:5, invert = FALSE, normalise = TRUE, mode = "surface", surface_value = c("BSL"), plot = TRUE, sample = "test"), regexp = "\\[analyse\\_portableOSL\\(\\)\\] Surface interpolation failed, this.+") expect_warning( analyse_portableOSL( merged, signal.integral = 1:5, invert = FALSE, normalise = TRUE, mode = "profile", zlim = c(1,2), plot = TRUE, sample = "test"), regexp = "\\[analyse\\_portableOSL\\(\\)\\] In profile mode, zlim.+") }) test_that("check output", { testthat::skip_on_cran() local_edition(3) data("ExampleData.portableOSL", envir = environment()) merged <- merge_RLum(ExampleData.portableOSL) results <- analyse_portableOSL( merged, signal.integral = 1:5, invert = FALSE, normalise = TRUE, plot = FALSE ) expect_equal(round(sum(results$summary[,c(-1, -2, -10,-11)]), digits = 2), 175.44) }) Luminescence/tests/testthat/test_analyse_Al2O3C_Measurement.R0000644000176200001440000000160714264017373024067 0ustar liggesusers##Full check test_that("analyse_Al2O3C_Measurements", { skip_on_cran() local_edition(3) ##load data data(ExampleData.Al2O3C, envir = environment()) ##00 - cause function breaks expect_error(analyse_Al2O3C_Measurement()) expect_error(analyse_Al2O3C_Measurement(object = "test")) expect_warning(Luminescence:::.warningCatcher( analyse_Al2O3C_Measurement(object = data_CrossTalk, signal_integral = 1000))) ##run analysis expect_s4_class(suppressWarnings(analyse_Al2O3C_Measurement(data_CrossTalk)), "RLum.Results") expect_s4_class(suppressWarnings(analyse_Al2O3C_Measurement(data_CrossTalk, calculate_TL_dose = TRUE)), "RLum.Results") ##run test without TL curves temp <- get_RLum(data_CrossTalk, recordType = "OSL", drop = FALSE) expect_s4_class(suppressWarnings(analyse_Al2O3C_Measurement(temp)), "RLum.Results") }) Luminescence/tests/testthat/test_replicate_RLum.R0000644000176200001440000000045114062436223021761 0ustar liggesuserstest_that("Test replication of RLum-objects", { testthat::skip_on_cran() local_edition(3) data(ExampleData.RLum.Analysis, envir = environment()) expect_silent(results <- rep(IRSAR.RF.Data[[1]], 5)) expect_silent(rep(IRSAR.RF.Data[[1]])) ##check expect_equal(length(results),5) }) Luminescence/tests/testthat/test_plot_OSLAgeSummary.R0000644000176200001440000000167514145501521022545 0ustar liggesuserstest_that("Basic test", { testthat::skip_on_cran() local_edition(3) ##cause error expect_error(plot_OSLAgeSummary("error"), "\\[plot_OSLAgeSummary\\(\\)\\] class character not supported as input for object!") ##simple run with example data set.seed(1234) object <- rnorm(1000, 100, 10) ##run as numeric results <- expect_s4_class(plot_OSLAgeSummary(object), "RLum.Results") ##run from S4-class object1 <- set_RLum("RLum.Results", data = list(A = object), originator = ".calc_BayesianCentralAgeModel") object2 <- set_RLum("RLum.Results", data = list(A = object), originator = ".calc_IndividualAgeModel") expect_s4_class(plot_OSLAgeSummary(object1), "RLum.Results") expect_s4_class(plot_OSLAgeSummary(object2), "RLum.Results") ##run with no output expect_silent(plot_OSLAgeSummary(object, verbose = FALSE)) ##check the results expect_length(results, 3) }) Luminescence/tests/testthat/test_fit_LMCurve.R0000644000176200001440000000471614367174003021244 0ustar liggesusers## Test 1 with NLS data(ExampleData.FittingLM, envir = environment()) fit <- fit_LMCurve(values = values.curve, values.bg = values.curveBG, n.components = 3, log = "x", start_values = data.frame(Im = c(170,25,400), xm = c(56,200,1500)), plot = FALSE) test_that("crashs and warnings function", { testthat::skip_on_cran() local_edition(3) ## wrong input type expect_error(object = fit_LMCurve(values = "error"), regexp = "\\[fit\\_LMCurve\\(\\)\\] 'values' has to be of type 'data.frame' or 'RLum.Data.Curve'!") ## not RBR expect_error(fit_LMCurve(values = set_RLum("RLum.Data.Curve", recordType = "OSL")), regexp = "\\[fit\\_LMCurve\\(\\)\\] recordType should be .+") ## warning for failed confint ...skip on windows because with R >= 4.2 is does not fail anymore if (!grepl(pattern = "mingw", sessionInfo()$platform) && !grepl(pattern = "linux", sessionInfo()$platform)) expect_warning(fit_LMCurve(values = values.curve, fit.calcError = TRUE)) }) test_that("check class and length of output", { testthat::skip_on_cran() local_edition(3) expect_s4_class(fit, "RLum.Results") expect_equal(length(fit), 4) expect_type(fit$component_matrix, "double") expect_equal(nrow(fit$component_matrix), 4000) }) test_that("check values from output example", { testthat::skip_on_cran() local_edition(3) expect_equal(fit$data$n.components, 3) expect_equal(round(fit$data$Im1, digits = 0), 169) expect_equal(round(fit$data$xm1, digits = 0), 49) expect_equal(round(fit$data$b1, digits = 0), 2) expect_equal(round(fit$data$`pseudo-R^2`, digits = 0), 1) }) ## Test 2 with LM data(ExampleData.FittingLM, envir = environment()) fit <- fit_LMCurve(values = values.curve, values.bg = values.curveBG, n.components = 3, log = "x", fit.method = "LM", plot = FALSE) test_that("check class and length of output", { testthat::skip_on_cran() local_edition(3) expect_s4_class(fit, "RLum.Results") expect_equal(length(fit), 4) }) test_that("check values from output example", { testthat::skip_on_cran() local_edition(3) expect_equal(fit$data$n.components, 3) expect_equal(round(fit$data$Im1, digits = 0), 169) expect_equal(round(fit$data$xm1, digits = 0), 49) expect_equal(round(fit$data$b1, digits = 0), 2) expect_equal(round(fit$data$`pseudo-R^2`, digits = 0), 1) }) Luminescence/tests/testthat/test_verify_SingleGrainData.R0000644000176200001440000000147114145666016023444 0ustar liggesuserstest_that("Various function test", { testthat::skip_on_cran() local_edition(3) data(ExampleData.XSYG, envir = environment()) object <- get_RLum( OSL.SARMeasurement$Sequence.Object, recordType = "OSL (UVVIS)", drop = FALSE) ##initial expect_warning(verify_SingleGrainData(object)) output <- suppressWarnings(verify_SingleGrainData(object)) ##return value expect_s4_class(output, "RLum.Results") expect_s3_class(output$selection_full, "data.frame") ##check options expect_silent(suppressWarnings(verify_SingleGrainData(object, plot = TRUE))) expect_silent(suppressWarnings(verify_SingleGrainData(object, threshold = 100))) expect_silent(suppressWarnings(verify_SingleGrainData(object, verbose = FALSE))) expect_silent(suppressWarnings(verify_SingleGrainData(object, cleanup = TRUE))) }) Luminescence/tests/testthat/test_Second2Gray.R0000644000176200001440000000135414062436223021175 0ustar liggesusersdata(ExampleData.DeValues, envir = environment()) results <- Second2Gray(ExampleData.DeValues$BT998, c(0.2,0.01)) results_alt1 <- Second2Gray(ExampleData.DeValues$BT998, c(0.2,0.01), error.propagation = "gaussian") results_alt2 <- Second2Gray(ExampleData.DeValues$BT998, c(0.2,0.01), error.propagation = "absolute") test_that("check class and length of output", { testthat::skip_on_cran() local_edition(3) expect_s3_class(results, class = "data.frame") }) test_that("check values from output example", { testthat::skip_on_cran() local_edition(3) expect_equal(sum(results[[1]]), 14754.09) expect_equal(sum(results[[2]]), 507.692) expect_equal(sum(results_alt1[[2]]), 895.911) expect_equal(sum(results_alt2[[2]]), 1245.398) }) Luminescence/tests/testthat/test_analyse_baSAR.R0000644000176200001440000000250114062436223021514 0ustar liggesusers##Full check test_that("Full check of analyse_baSAR function", { skip_on_cran() local_edition(3) set.seed(1) ##(1) load package test data set data(ExampleData.BINfileData, envir = environment()) ##(2) selecting relevant curves, and limit dataset CWOSL.SAR.Data <- subset(CWOSL.SAR.Data, subset = POSITION %in% c(1:3) & LTYPE == "OSL") ##(3) run analysis ##please not that the here selected parameters are ##chosen for performance, not for reliability results <- suppressWarnings(analyse_baSAR( object = CWOSL.SAR.Data, source_doserate = c(0.04, 0.001), signal.integral = c(1:2), background.integral = c(80:100), fit.method = "EXP", method_control = list(inits = list( list(.RNG.name = "base::Wichmann-Hill", .RNG.seed = 1), list(.RNG.name = "base::Wichmann-Hill", .RNG.seed = 2), list(.RNG.name = "base::Wichmann-Hill", .RNG.seed = 3) )), plot = TRUE, verbose = TRUE, n.MCMC = 100, txtProgressBar = TRUE )) expect_s4_class(results, class = "RLum.Results") expect_s3_class(results$summary, "data.frame") expect_s3_class(results$mcmc, "mcmc.list") expect_type(results$models, "list") expect_type(round(sum(results$summary[, c(6:9)]), 2),type = "double") }) Luminescence/tests/testthat/test_internals.R0000644000176200001440000001111614521207352021050 0ustar liggesuserstest_that("Test internals", { testthat::skip_on_cran() local_edition(3) # .expand_parameters() ------------------------------------------------------ ##create empty function ... reminder ##this is an internal function, the first object is always discarded, it ##might be a list of RLum.Analysis objects is might be super large f <- function(object, a, b = 1, c = list(), d = NULL) { Luminescence:::.expand_parameters(len = 3) } ##test some functions ##missing arguments must be identified expect_error(f(), "Argument missing; with no default!") ##check whether the objects are properly recycled expect_type(f(object, a = 1), "list") expect_length(f(object, a = 1, c = list(a = 1, b = 2, c = 3))$c, 3) # .calc_HPDI() ------------------------------------------------------------ set.seed(1234) test <- expect_type(Luminescence:::.calc_HPDI(rnorm(100), plot = TRUE), "double") expect_equal(round(sum(test),2), 0.20, tolerance = 1) ##create a case where the value cannot be calculated expect_type(.calc_HPDI(rlnorm(n = 100, meanlog = 10, sdlog = 100)), type = "logical") # .warningCatcher() --------------------------------------------------------------------------- expect_warning(Luminescence:::.warningCatcher(for(i in 1:5) warning("test")), regexp = "\\(1\\) test\\: This warning occurred 5 times\\!") # .smoothing ---------------------------------------------------------------------------------- expect_silent(Luminescence:::.smoothing(runif(100), k = 5, method = "median")) expect_error(Luminescence:::.smoothing(runif(100), method = "test")) # fancy_scientific ()-------------------------------------------------------------------------- plot(seq(1e10, 1e20, length.out = 10),1:10, xaxt = "n") expect_silent(axis(1, at = axTicks(1),labels = Luminescence:::fancy_scientific(axTicks(1)))) # .add_fancy_log_axis() ----------------------------------------------------- y <- c(0.1, 0.001, 0.0001) plot(1:length(y), y, yaxt = "n", log = "y") expect_silent(Luminescence:::.add_fancy_log_axis(side = 2, las = 1)) # .create_StatisticalSummaryText() ------------------------------------------------------------ expect_silent(Luminescence:::.create_StatisticalSummaryText()) expect_type( Luminescence:::.create_StatisticalSummaryText( calc_Statistics(data.frame(1:10,1:10)), keywords = "mean"), "character") # .unlist_RLum() ------------------------------------------------------------------------------ expect_length(Luminescence:::.unlist_RLum(list(a = list(b = list(c = list(d = 1, e = 2))))), 2) # .rm_nonRLum() ----------------------------------------------------------- expect_type( Luminescence:::.rm_nonRLum(c(list(set_RLum("RLum.Analysis"), set_RLum("RLum.Analysis")), 2)), "list") expect_type( Luminescence:::.rm_nonRLum( c(list(set_RLum("RLum.Analysis"), set_RLum("RLum.Analysis")), 2), class = "RLum.Analysis"), "list") # .matrix_binning() --------------------------------------------------------------------------- m <- matrix(data = c(rep(1:20, each = 20)), ncol = 20, nrow = 20) rownames(m) <- 1:nrow(m) colnames(m) <- 1:ncol(m) ##crash the function expect_error(Luminescence:::.matrix_binning("none matrix"), regexp = "Input is not of class 'matrix'!") ##test operation modes and arguments expect_type(Luminescence:::.matrix_binning(m, bin_size = 4, bin_col = FALSE), "integer") expect_type(Luminescence:::.matrix_binning(m, bin_size = 4, bin_col = TRUE), "integer") ##test row / column renaming options expect_type(Luminescence:::.matrix_binning(m, bin_size = 2, bin_col = FALSE, names = "groups"), "integer") expect_type(Luminescence:::.matrix_binning(m, bin_size = 2, bin_col = FALSE, names = "mean"), "integer") expect_type(Luminescence:::.matrix_binning(m, bin_size = 2, bin_col = FALSE, names = "sum"), "integer") expect_type(Luminescence:::.matrix_binning(m, bin_size = 2, bin_col = FALSE, names = c("test1", "test2")), "integer") ##clean-up rm(m) ## C++ code ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ## # src_create_RLumDataCurve_matrix ------------------------------------------------------------- ##RLum.Data.Curve() ... test src_create_RLumDataCurve_matrix() expect_output( Luminescence:::src_create_RLumDataCurve_matrix( DATA = 1:100, VERSION = 4, NPOINTS = 100, LTYPE = "TL", LOW = 0, HIGH = 500, AN_TEMP = 0, TOLDELAY = 0, TOLON = 0, TOLOFF = 0 ) ) }) Luminescence/tests/testthat/test_extract_ROI.R0000644000176200001440000000524114212442347021240 0ustar liggesuserstest_that("extract_ROI", { testthat::skip_on_cran() local_edition(3) ## generate random data m <- matrix(runif(100,0,255), ncol = 10, nrow = 10) set.seed(12245) a <- array(runif(300, 0,255), c(10,10,3)) RLum <- set_RLum("RLum.Data.Image", data = a) RLum_list <- list(RLum, RLum) roi <- matrix(c(2.,4,2,5,6,7,3,1,1), ncol = 3) ## crash the function expect_error(extract_ROI(object = "error", roi), "\\[extract_ROI\\(\\)\\] Input for argument 'object' not supported\\!") expect_error(extract_ROI(object = m, "error"), "\\[extract_ROI\\(\\)\\] Please check the format of roi, it looks wrong\\!") expect_error(extract_ROI(object = m, matrix()), "\\[extract_ROI\\(\\)\\] Please check the format of roi, it looks wrong\\!") expect_error(extract_ROI(object = m, matrix(ncol = 3, nrow = 0)), "\\[extract_ROI\\(\\)\\] Please check the format of roi, it looks wrong\\!") ## run function for all supported input objects ## matrix expect_s4_class(extract_ROI(object = m, roi), "RLum.Results") ## array expect_s4_class(extract_ROI(object = a, roi), "RLum.Results") ## RLum.Data.Image expect_s4_class(extract_ROI(object = RLum, roi), "RLum.Results") ## list results <- expect_s4_class(extract_ROI(object = RLum_list, roi), "RLum.Results") ## regression test if it fails, we have to amend the documentation expect_length(results@data$roi_signals, 6) expect_length(results@data, 3) ## with plot output expect_silent(extract_ROI(object = RLum, roi, plot = TRUE)) expect_silent(extract_ROI(object = RLum_list, roi, plot = TRUE)) ## test with package example dataset data(ExampleData.RLum.Data.Image, envir = environment()) roi <- matrix(c(200,400,200,40,60,80,10,10,10), ncol = 3) expect_s4_class(extract_ROI(object = ExampleData.RLum.Data.Image, roi), "RLum.Results") ## test ROI summary options roi <- matrix(c(2.,4,2,5,6,7,3,1,1), ncol = 3) t_mean <- expect_type(extract_ROI(object = RLum, roi, roi_summary = "mean")@data$roi_summary, "double") expect_equal(sum(t_mean),1124, tolerance = 0.001) t_median <- expect_type(extract_ROI(object = RLum, roi, roi_summary = "median")@data$roi_summary, "double") expect_equal(sum(t_median),1104, tolerance = 0.001) t_sd <- expect_type(extract_ROI(object = RLum, roi, roi_summary = "sd")@data$roi_summary, "double") expect_equal(sum(t_sd),730, tolerance = 0.001) t_sum <- expect_type(extract_ROI(object = RLum, roi, roi_summary = "sum")@data$roi_summary, "double") expect_equal(sum(t_sum), 8117, tolerance = 0.001) ## crash expect_error(extract_ROI(object = RLum, roi, roi_summary = "error"), "\\[extract\\_ROI\\(\\)\\] roi\\_summary method not supported, check manual!") }) Luminescence/tests/testthat/test_analyse_IRSARRF.R0000644000176200001440000000521314062436223021677 0ustar liggesuserstest_that("check class and length of output", { testthat::skip_on_cran() local_edition(3) set.seed(1) data(ExampleData.RLum.Analysis, envir = environment()) results_fit <- analyse_IRSAR.RF(object = IRSAR.RF.Data, plot = TRUE, method = "FIT") results_slide <- suppressWarnings( analyse_IRSAR.RF(object = IRSAR.RF.Data, plot = TRUE, method = "SLIDE", n.MC = NULL)) results_slide_alt <- analyse_IRSAR.RF( object = IRSAR.RF.Data, plot = FALSE, method = "SLIDE", n.MC = 10, method.control = list(vslide_range = 'auto', trace_vslide = TRUE), txtProgressBar = FALSE ) expect_equal(is(results_fit), c("RLum.Results", "RLum")) expect_equal(length(results_fit), 5) expect_equal(length(results_slide), 5) expect_s3_class(results_fit$fit, class = "nls") expect_s3_class(results_slide$fit, class = "nls") expect_length(results_slide$slide, 10) expect_equal(results_fit$data$DE, 623.25) expect_equal(results_fit$data$DE.LOWER, 600.63) expect_equal(results_slide$data$DE, 610.17) expect_equal(round(results_slide_alt$data$DE, digits = 0), 384) }) test_that("test controlled chrash conditions", { testthat::skip_on_cran() local_edition(3) ##the sliding range should not exceed a certain value ... test it data(ExampleData.RLum.Analysis, envir = environment()) expect_error( analyse_IRSAR.RF( object = IRSAR.RF.Data, plot = FALSE, method = "SLIDE", n.MC = 2, method.control = list(vslide_range = c(0,1e+07)), txtProgressBar = FALSE ), regexp = "[:::src_analyse_IRSAR_SRS()] 'vslide_range' exceeded maximum size (1e+07)!", fixed = TRUE) }) test_that("test support for IR-RF data", { testthat::skip_on_cran() local_edition(3) ## get needed data file <- system.file("extdata", "RF_file.rf", package = "Luminescence") temp <- read_RF2R(file) expect_s4_class( suppressWarnings( analyse_IRSAR.RF(object = temp[1:3], method = "SLIDE", plot_reduced = TRUE, n.MC = 1)), "RLum.Results") }) test_that("test edge cases", { testthat::skip_on_cran() local_edition(3) data(ExampleData.RLum.Analysis, envir = environment()) RF_nat <- RF_reg <- IRSAR.RF.Data[[2]] RF_reg@data[,2] <- runif(length(RF_reg@data[,2]), 0.007557956, 0.05377426 ) RF_nat@data[,2] <- runif(length(RF_nat@data[,2]), 65.4, 76.7) RF_nat@data <- RF_nat@data[1:50,] expect_s4_class(analyse_IRSAR.RF( set_RLum("RLum.Analysis", records = list(RF_nat, RF_reg)), method = "SLIDE", method.control = list(vslide_range = 'auto', correct_onset = FALSE), RF_nat.lim = 2, RF_reg.lim = 2, plot = FALSE, txtProgressBar = FALSE ), "RLum.Results") }) Luminescence/tests/testthat/test_as_latex_table.R0000644000176200001440000000030114062436223022013 0ustar liggesuserstest_that("Check github_commits()", { testthat::skip_on_cran() local_edition(3) df <- data.frame(x = "test", y = 1:10) expect_output(Luminescence:::.as.latex.table.data.frame(df)) }) Luminescence/tests/testthat/test_convert_Wavelength2Energy.R0000644000176200001440000000433014062436223024152 0ustar liggesuserstest_that("test convert functions", { testthat::skip_on_cran() local_edition(3) # Set up test scenario ------------------------------------------------------------------------ #create artifical dataset according to Mooney et al. (2013) lambda <- seq(400,800,50) data <- matrix(data = rep(1, 2 * length(lambda)), ncol = 2) rownames(data) <- lambda colnames(data) <- 1:ncol(data) ##set plot function p <- function(m) { plot(x = m[, 1], y = m[, 2]) polygon(x = c(m[, 1], rev(m[, 1])), y = c(m[, 2], rep(0, nrow(m)))) for (i in 1:nrow(m)) { lines(x = rep(m[i, 1], 2), y = c(0, m[i, 2])) } } # Test ---------------------------------------------------------------------------------------- ##crash function expect_error(convert_Wavelength2Energy("test"), regexp = "Class 'character' not supported as input!") ##test all three allowed input objects expect_type(convert_Wavelength2Energy(data), "double") expect_s3_class(convert_Wavelength2Energy(as.data.frame(data)), class = "data.frame") object <- set_RLum(class = "RLum.Data.Spectrum", data = data[,1,drop = FALSE]) expect_s4_class(convert_Wavelength2Energy(object), class = "RLum.Data.Spectrum") ##test the list option expect_type(convert_Wavelength2Energy(list(data, as.data.frame(data), object)), "list") ##test order argument expect_type(convert_Wavelength2Energy(data, order = TRUE), "double") ##test special treatment of RLum.Data.Spectrum objects object@info[["curveDescripter"]] <- "energy" expect_message(convert_Wavelength2Energy(object), regexp = "Your object has already an energy scale, nothing done!") ##Code below just a cross check if wanted ##matrix # m <- cbind(as.numeric(rownames(data)), data) # par(mfrow = c(1,2)) # p(m) # p(convert_Wavelength2Energy(m)) # # ##df # df <- as.data.frame(cbind(as.numeric(rownames(data)), data)) # p(df) # p(convert_Wavelength2Energy(df)) # # ##RLum.Data.Spectrum # object <- set_RLum(class = "RLum.Data.Spectrum", data = data[,1,drop = FALSE]) # par(mfrow = c(1,2)) # plot_RLum.Data.Spectrum(object, plot.type = "single", par.local = FALSE) # plot_RLum.Data.Spectrum(convert_Wavelength2Energy(object), plot.type = "single", par.local = FALSE) }) Luminescence/tests/testthat/test_write_R2TIFF.R0000644000176200001440000000160714212442347021225 0ustar liggesuserstest_that("Test general functionality", { testthat::skip_on_cran() local_edition(3) ## load example data data(ExampleData.RLum.Data.Image, envir = environment()) data(ExampleData.XSYG, envir = environment()) ##crash function expect_error(write_R2TIFF(object = "test"), "\\[write\\_R2TIFF\\(\\)\\] Only RLum.Data.Image and RLum.Data.Spectrum objects are supported!") expect_error(write_R2TIFF(object = ExampleData.RLum.Data.Image, file = "error/error"), "\\[write\\_R2TIFF\\(\\)\\] Path does not exist!") ## export RLum.Data.Image expect_null(write_R2TIFF(ExampleData.RLum.Data.Image, file = tempfile(fileext = "tiff"))) ## export RLum.Data.Spectrum expect_null(write_R2TIFF(TL.Spectrum, file = tempfile(fileext = "tiff"))) ## a list expect_null(write_R2TIFF(list(ExampleData.RLum.Data.Image, TL.Spectrum), file = tempfile(fileext = "tiff"))) }) Luminescence/tests/testthat/test_plot_RLum.R0000644000176200001440000000161614212442347020774 0ustar liggesuserstest_that("test_plot_RLum", { testthat::skip_on_cran() local_edition(3) ## create dataset to test image <- as(array(rnorm(1000), dim = c(10,10,10)), "RLum.Data.Image") expect_silent(plot_RLum(image)) ## check list with different dispatched arguments image_short <- as(array(rnorm(100), dim = c(10, 10, 1)), "RLum.Data.Image") expect_silent(plot_RLum(list(image_short, image_short), main = list("test1", "test2"), mtext = "test")) ## trigger error expect_error(plot_RLum("error"), "\\[plot_RLum\\(\\)\\] Sorry, I don't know what to do for object of type 'character'.") ## test list of RLum.Analysis l <- list(set_RLum( class = "RLum.Analysis", records = list( set_RLum("RLum.Data.Curve", data = matrix(1:10, ncol = 2)), set_RLum("RLum.Data.Curve", data = matrix(1:20, ncol = 2))))) expect_silent(plot_RLum(l, main = list("test", "test2"), mtext = "test")) }) Luminescence/tests/testthat/test_CW2pX.R0000644000176200001440000000526214062436223017762 0ustar liggesusers##load data data(ExampleData.CW_OSL_Curve, envir = environment()) values <- CW_Curve.BosWallinga2012 test_that("Check the example and the numerical values", { testthat::skip_on_cran() local_edition(3) values_pLM <- CW2pLM(values) values_pLMi <- CW2pLMi(values, P = 1/20) values_pLMi_alt <- CW2pLMi(values) values_pHMi <- suppressWarnings(CW2pHMi(values, delta = 40)) values_pHMi_alt <- suppressWarnings(CW2pHMi(values)) values_pHMi_alt1 <- suppressWarnings(CW2pHMi(values, delta = 2)) values_pPMi <- suppressWarnings(CW2pPMi(values, P = 1/10)) ##check conversion sum values expect_equal(round(sum(values_pLM), digits = 0),90089) expect_equal(round(sum(values_pLMi[,1:2]), digits = 0),197522) expect_equal(round(sum(values_pLMi_alt[,1:2]), digits = 0),197522) expect_equal(round(sum(values_pHMi[,1:2]), digits = 0),217431) expect_equal(round(sum(values_pHMi_alt[,1:2]), digits = 0),217519) expect_equal(round(sum(values_pHMi_alt1[,1:2]), digits = 0), 221083) expect_equal(round(sum(values_pPMi[,1:2]), digits = 0),196150) }) test_that("Test RLum.Types", { testthat::skip_on_cran() local_edition(3) ##load CW-OSL curve data data(ExampleData.CW_OSL_Curve, envir = environment()) object <- set_RLum( class = "RLum.Data.Curve", data = as.matrix(ExampleData.CW_OSL_Curve), curveType = "measured", recordType = "OSL" ) ##transform values expect_s4_class(CW2pLM(object), class = "RLum.Data.Curve") expect_s4_class(CW2pLMi(object), class = "RLum.Data.Curve") expect_s4_class(CW2pHMi(object), class = "RLum.Data.Curve") expect_s4_class(suppressWarnings(CW2pPMi(object)), class = "RLum.Data.Curve") ##test error handling expect_error(CW2pLMi(values, P = 0), regexp = "[CW2pLMi] P has to be > 0!", fixed = TRUE) expect_warning(CW2pLMi(values, P = 10)) expect_error(object = CW2pLM(values = matrix(0, 2))) expect_error(object = CW2pLMi(values = matrix(0, 2))) expect_error(object = CW2pHMi(values = matrix(0, 2))) expect_error(object = CW2pPMi(values = matrix(0, 2))) object@recordType <- "RF" expect_error(CW2pLM(values = object), "recordType RF is not allowed for the transformation") expect_error(object = CW2pLMi(values = object), regexp = "[CW2pLMi()] recordType RF is not allowed for the transformation!", fixed = TRUE) expect_error(object = CW2pHMi(values = object), regexp = "[CW2pHMi()] recordType RF is not allowed for the transformation!", fixed = TRUE) expect_error(object = CW2pPMi(values = object), regexp = "[CW2pPMi()] recordType RF is not allowed for the transformation!", fixed = TRUE) }) Luminescence/tests/testthat/test_analyse_Al2O3C_ITC.R0000644000176200001440000000122114502613217022203 0ustar liggesusers##Full check test_that("Full check", { skip_on_cran() local_edition(3) ##check stops ##RLum-object expect_error(object = analyse_Al2O3_ITC(object = "test")) ##input curve type a <- set_RLum(class = "RLum.Data.Curve", recordType = "OSL", data = matrix(1:20, ncol = 2)) b <- set_RLum(class = "RLum.Data.Curve", recordType = "TL") object <- set_RLum(class = "RLum.Analysis", records = list(a,b)) expect_error(object = analyse_Al2O3_ITC(object)) ##check with example data ##load data data(ExampleData.Al2O3C, envir = environment()) ##run analysis expect_s4_class(analyse_Al2O3C_ITC(data_ITC), "RLum.Results") }) Luminescence/tests/testthat/test_calc_FastRatio.R0000644000176200001440000000267014062436223021735 0ustar liggesusersdata("ExampleData.CW_OSL_Curve") temp <- calc_FastRatio(ExampleData.CW_OSL_Curve, plot = FALSE, verbose = FALSE) test_that("check class and length of output", { testthat::skip_on_cran() local_edition(3) expect_s4_class(temp, "RLum.Results") expect_equal(length(temp), 5) }) test_that("check values from output", { testthat::skip_on_cran() local_edition(3) results <- get_RLum(temp) expect_equal(round(results$fast.ratio, digits = 3), 405.122) expect_equal(round(results$fast.ratio.se, digits = 4), 119.7442) expect_equal(round(results$fast.ratio.rse, digits = 5), 29.55756) expect_equal(results$channels, 1000) expect_equal(round(results$channel.width, digits = 2), 0.04) expect_equal(results$dead.channels.start, 0) expect_equal(results$dead.channels.end, 0) expect_equal(results$sigmaF, 2.6e-17) expect_equal(results$sigmaM, 4.28e-18) expect_equal(results$stimulation.power, 30.6) expect_equal(results$wavelength, 470) expect_equal(results$t_L1, 0) expect_equal(round(results$t_L2, digits = 6), 2.446413) expect_equal(round(results$t_L3_start, digits = 5), 14.86139) expect_equal(round(results$t_L3_end, digits = 5), 22.29208) expect_equal(results$Ch_L1, 1) expect_equal(results$Ch_L2, 62) expect_equal(results$Ch_L3_start, 373) expect_equal(results$Ch_L3_end, 558) expect_equal(results$Cts_L1, 11111) expect_equal(results$Cts_L2, 65) expect_equal(round(results$Cts_L3, digits = 5), 37.66667) }) Luminescence/tests/testthat/test_calc_TLLxTxRatio.R0000644000176200001440000000666114521207352022202 0ustar liggesuserstest_that("calc_TLLxTxRatio", { testthat::skip_on_cran() local_edition(3) ##load package example data data(ExampleData.BINfileData, envir = environment()) ##convert Risoe.BINfileData into a curve object temp <- Risoe.BINfileData2RLum.Analysis(TL.SAR.Data, pos = 3) Lx.data.signal <- get_RLum(temp, record.id = 1) Lx.data.background <- get_RLum(temp, record.id = 2) Tx.data.signal <- get_RLum(temp, record.id = 3) Tx.data.background <- get_RLum(temp, record.id = 4) signal.integral.min <- 210 signal.integral.max <- 230 ## break the function ## different data types expect_error(calc_TLLxTxRatio( Lx.data.signal, Lx.data.background, Tx.data.signal = as.data.frame(Tx.data.signal), Tx.data.background, signal.integral.min, signal.integral.max), regexp = "\\[calc\\_TLLxTxRatio\\(\\)\\] Data types of Lx and Tx data differ.+") ## different data types expect_error(calc_TLLxTxRatio( Lx.data.signal, Lx.data.background, Tx.data.signal = as.data.frame(Tx.data.signal), Tx.data.background, signal.integral.min, signal.integral.max), regexp = "\\[calc\\_TLLxTxRatio\\(\\)\\] Data types of Lx and Tx data differ.+") ## check for allowed data types expect_error(calc_TLLxTxRatio( Lx.data.signal = as.matrix(Tx.data.signal), Lx.data.background, Tx.data.signal = as.matrix(Tx.data.signal), Tx.data.background, signal.integral.min, signal.integral.max), regexp = "\\[calc\\_TLLxTxRatio\\(\\)\\] Input data type for not allowed.+") ## check for different channel numbers expect_error(calc_TLLxTxRatio( Lx.data.signal = as.data.frame(Tx.data.signal)[1:10,], Lx.data.background, Tx.data.signal = as.data.frame(Tx.data.signal), Tx.data.background, signal.integral.min, signal.integral.max), regexp = "\\[calc\\_TLLxTxRatio\\(\\)\\] Channel numbers differ for Lx and Tx data.+") ## use invalid signal integral expect_error(calc_TLLxTxRatio( Lx.data.signal, Lx.data.background, Tx.data.signal, Tx.data.background, signal.integral.min = 10, signal.integral.max = 1000), regexp = "\\[calc\\_TLLxTxRatio\\(\\)\\] signal.integral is not valid.+") ## trigger warning expect_warning(calc_TLLxTxRatio( Lx.data.signal, Lx.data.background, Tx.data.signal, Tx.data.background = Lx.data.background, signal.integral.min, signal.integral.max), regexp = "\\[calc\\_TLLxTxRatio\\(\\)\\] The background signals for Lx and Tx appear to be similar.+") ## run function without error temp <- expect_s4_class(calc_TLLxTxRatio( Lx.data.signal, Lx.data.background, Tx.data.signal, Tx.data.background, signal.integral.min, signal.integral.max), class = "RLum.Results") ## check lenght expect_equal(length(temp), 1) ## extract elements results <- get_RLum(temp) expect_equal(length(results), 10) expect_equal(results$LnLx, 257042) expect_equal(results$LnLx.BG, 4068) expect_equal(results$TnTx, 82298) expect_equal(results$TnTx.BG, 2943) expect_equal(results$net_LnLx, 252974) expect_equal(round(results$net_LnLx.Error, digits = 2), 49468.92) expect_equal(results$net_TnTx, 79355) expect_equal(round(results$net_TnTx.Error,2), 21449.72) expect_equal(round(results$LxTx, digits = 6), 3.187877) expect_equal(round(results$LxTx.Error, digits = 6), 1.485073) }) Luminescence/tests/testthat/test_calc_Huntley2006.R0000644000176200001440000000714414521207352022001 0ustar liggesusersset.seed(1) data("ExampleData.Fading", envir = environment()) fading_data <- ExampleData.Fading$fading.data$IR50 data <- ExampleData.Fading$equivalentDose.data$IR50 ddot <- c(7.00, 0.004) readerDdot <- c(0.134, 0.0067) rhop <- analyse_FadingMeasurement(fading_data, plot = FALSE, verbose = FALSE, n.MC = 10) huntley <- calc_Huntley2006( data = data, rhop = rhop, ddot = ddot, readerDdot = readerDdot, n.MC = 50, plot = FALSE, verbose = FALSE ) test_that("check class and length of output", { testthat::skip_on_cran() local_edition(3) ##rhop expect_s4_class(rhop, class = "RLum.Results") expect_s3_class(rhop$fading_results, "data.frame") expect_s3_class(rhop$fit, "lm") expect_s3_class(rhop$rho_prime, "data.frame") ##kars expect_s4_class(huntley, class = "RLum.Results") expect_s3_class(huntley$results, class = "data.frame") expect_s3_class(huntley$data, class = "data.frame") expect_type(huntley$Ln, "double") expect_type(huntley$fits, "list") }) test_that("check values from analyse_FadingMeasurement()", { expect_equal(round(sum(rhop$fading_results[,1:9]),0),415) expect_equal(round(sum(rhop$rho_prime),5),2e-05) expect_equal(round(sum(rhop$irr.times)), 2673108) }) test_that("check values from calc_Huntley2008()", { testthat::skip_on_cran() ##fix for different R versions if(R.version$major == "3" && as.numeric(R.version$minor) < 6){ expect_equal(round(huntley$results$Sim_Age, 1), 41.3) expect_equal(round(huntley$results$Sim_Age_2D0, 0), 164) expect_equal(round(sum(huntley$Ln),4), 0.1585) }else{ expect_equal(round(huntley$results$Sim_Age, 1), 42.3) expect_equal(round(huntley$results$Sim_Age_2D0, 0), 163) expect_equal(round(sum(huntley$Ln),4), 0.1621) } expect_equal(round(sum(huntley$data),0), 191530) expect_equal(round(sum(residuals(huntley$fits$simulated)),1), 0.3) expect_equal(round(sum(residuals(huntley$fits$measured)),4), 0.1894) expect_equal(round(sum(residuals(huntley$fits$unfaded)),4), 1.6293) ## COMPARE calc_Kars2008 (deprecated) vs. re-named calc_Huntley2006 test_that("compare deprecated calc_Kars2008 and calc_Huntley2006", { testthat::skip_on_cran() expect_identical({ set.seed(1) calc_Huntley2006( data = data, rhop = rhop, ddot = ddot, readerDdot = readerDdot, n.MC = 50, fit.method = "EXP", plot = FALSE, verbose = FALSE)$results }, { set.seed(1) suppressWarnings( calc_Kars2008( data = data, rhop = rhop, ddot = ddot, readerDdot = readerDdot, n.MC = 50, fit.method = "EXP", plot = FALSE, verbose = FALSE)$results ) })#EndOf::expect_identical expect_identical({ set.seed(1) calc_Huntley2006( data = data, rhop = rhop, ddot = ddot, readerDdot = readerDdot, n.MC = 500, fit.method = "GOK", plot = FALSE, verbose = FALSE)$results }, { set.seed(1) suppressWarnings( calc_Kars2008( data = data, rhop = rhop, ddot = ddot, readerDdot = readerDdot, n.MC = 500, fit.method = "GOK", plot = FALSE, verbose = FALSE)$results ) })#EndOf::expect_identical ## check extrapolation set.seed(1) expect_s4_class( object = suppressWarnings( calc_Kars2008( data = data, rhop = rhop, ddot = ddot, readerDdot = readerDdot, n.MC = 500, fit.method = "GOK", mode = "extrapolation", plot = FALSE, verbose = FALSE)), class = "RLum.Results") }) }) Luminescence/tests/testthat/test_calc_ThermalLifetime.R0000644000176200001440000000623314062436223023113 0ustar liggesusers##EXAMPLE 1 ##calculation for two trap-depths with similar frequency factor for different temperatures set.seed(1) temp <- calc_ThermalLifetime( E = c(1.66, 1.70), s = 1e+13, T = 10:20, output_unit = "Ma", verbose = FALSE ) ##EXAMPLE 2 ##profiling of thermal life time for E and s and their standard error temp2 <- calc_ThermalLifetime( E = c(1.600, 0.003), s = c(1e+13,1e+011), T = 20, profiling = TRUE, output_unit = "Ma", verbose = FALSE, plot = FALSE ) test_that("check class and length of output example 1", { testthat::skip_on_cran() local_edition(3) expect_s4_class(temp, "RLum.Results") expect_equal(length(temp), 2) }) # test_that("check values from output example 1", { testthat::skip_on_cran() local_edition(3) expect_type(temp$lifetimes, "double") expect_equal(dim(temp$lifetimes), c(1, 2, 11)) ##check results for 10 °C results <- lapply(1:length(10:20), function(x){ temp$lifetimes[,,x] }) expect_equal(round(results[[1]], digits = 3), c("1.66" = 1115.541, "1.7" = 5747.042)) expect_equal(round(results[[2]], digits = 4), c("1.66" = 878.0196, "1.7" = 4497.3585)) expect_equal(round(results[[3]], digits = 4), c("1.66" = 692.2329, "1.7" = 3525.4738)) expect_equal(round(results[[4]], digits = 4), c("1.66" = 546.6658, "1.7" = 2768.3216)) expect_equal(round(results[[5]], digits = 4), c("1.66" = 432.4199, "1.7" = 2177.4436)) expect_equal(round(results[[6]], digits = 4), c("1.66" = 342.6069, "1.7" = 1715.5406)) expect_equal(round(results[[7]], digits = 4), c("1.66" = 271.8854, "1.7" = 1353.8523)) expect_equal(round(results[[8]], digits = 4), c("1.66" = 216.1065, "1.7" = 1070.1642)) expect_equal(round(results[[9]], digits = 4), c("1.66" = 172.0421, "1.7" = 847.2879)) expect_equal(round(results[[10]], digits = 4), c("1.66" = 137.1765, "1.7" = 671.9020)) expect_equal(round(results[[11]], digits = 4), c("1.66" = 109.5458, "1.7" = 533.6641)) }) test_that("check class and length of output example 2", { testthat::skip_on_cran() expect_s4_class(temp2, c("RLum.Results")) testthat::expect_equal(length(temp2), 2) }) test_that("check values from output example 2", { testthat::skip_on_cran() testthat::expect_is(temp2$lifetimes, class = c("numeric", "vector")) testthat::expect_equal(length(temp2$lifetimes), 1000) testthat::expect_equal(dim(temp2$profiling_matrix), c(1000, 4)) }) test_that("check arguments", { testthat::skip_on_cran() ##missing E and/or s expect_error(calc_ThermalLifetime()) ##profiling settings expect_warning( calc_ThermalLifetime(E = 1.4, s = 1e05, profiling_config = list(n = 10))) expect_error(calc_ThermalLifetime( E = 1.4, s = 1e05, profiling = TRUE, profiling_config = list(E.distribution = "test") )) expect_error(suppressWarnings(calc_ThermalLifetime( E = 1.4, s = 1e05, profiling = TRUE, profiling_config = list(s.distribution = "test")) )) ##output expect_warning(calc_ThermalLifetime(E = 1.4, s = 1e05, output_unit = "test")) expect_output(calc_ThermalLifetime(E = 1.4, s = 1e05, verbose = TRUE)) expect_output(calc_ThermalLifetime(E = c(1.4, 0.001), s = c(1e05,1e03), plot = TRUE, profiling = TRUE)) }) Luminescence/tests/testthat/test_use_DRAC.R0000644000176200001440000000335114321331311020427 0ustar liggesusers##Full check test_that("Test DRAC", { testthat::skip_on_cran() local_edition(3) ##use manual example ##create template input <- template_DRAC(preset = "DRAC-example_quartz") ##test expect_s3_class(input, "DRAC.list") ##fill (which also tests the methods) input$`Project ID` <- "DRAC-Example" input$`Sample ID` <- "Quartz" input$`Conversion factors` <- "AdamiecAitken1998" input$`External U (ppm)` <- 3.4 input$`errExternal U (ppm)` <- 0.51 input$`External Th (ppm)` <- 14.47 input$`errExternal Th (ppm)` <- 1.69 input$`External K (%)` <- 1.2 input$`errExternal K (%)` <- 0.14 input$`Calculate external Rb from K conc?` <- "Y" input$`Calculate internal Rb from K conc?` <- "Y" input$`Scale gammadoserate at shallow depths?` <- "Y" input$`Grain size min (microns)` <- 90 input$`Grain size max (microns)` <- 125 input$`Water content ((wet weight - dry weight)/dry weight) %` <- 5 input$`errWater content %` <- 2 input$`Depth (m)` <- 2.2 input$`errDepth (m)` <- 0.22 input$`Overburden density (g cm-3)` <- 1.8 input$`errOverburden density (g cm-3)` <- 0.1 input$`Latitude (decimal degrees)` <- 30.0000 input$`Longitude (decimal degrees)` <- 70.0000 input$`Altitude (m)` <- 150 input$`De (Gy)` <- 20 input$`errDe (Gy)` <- 0.2 ##run DRAC output <- expect_s4_class(use_DRAC(input), "RLum.Results") ## print method for DRAC.highlights expect_output(print(output$DRAC$highlights), regexp = "TO:GP = errAge") ## crash function ## wrong file name expect_error(use_DRAC("error"), "\\[use_DRAC\\(\\)\\] It seems that the file doesn't exist!") ## exceed allowed limit input <- suppressWarnings(template_DRAC(preset = "DRAC-example_quartz", nrow = 5001)) expect_error(use_DRAC(input), "The limit of allowed datasets is 5000!") }) Luminescence/tests/testthat/test_plot_AbanicoPlot.R0000644000176200001440000001446414101766753022324 0ustar liggesuserstest_that("Test examples from the example page", { testthat::skip_on_cran() local_edition(3) ## load example data and recalculate to Gray data(ExampleData.DeValues, envir = environment()) ExampleData.DeValues <- ExampleData.DeValues$CA1 ## plot the example data straightforward expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues)) ## now with linear z-scale expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues, log.z = FALSE)) ## now with output of the plot parameters expect_type(plot_AbanicoPlot(data = ExampleData.DeValues, output = TRUE), "list") ## now with adjusted z-scale limits expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues, zlim = c(10, 200))) ## now with adjusted x-scale limits expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues, xlim = c(0, 20))) ## now with rug to indicate individual values in KDE part expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues, rug = TRUE)) ## now with a smaller bandwidth for the KDE plot expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues, bw = 0.04)) ## now with a histogram instead of the KDE plot expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues, hist = TRUE, kde = FALSE)) ## now with a KDE plot and histogram with manual number of bins expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues, hist = TRUE, breaks = 20)) ## now with a KDE plot and a dot plot expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues, dots = TRUE)) ## now with user-defined plot ratio expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues, plot.ratio = 0.5)) ## now with user-defined central value expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues, z.0 = 70)) ## now with median as central value expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues, z.0 = "median")) ## now with the 17-83 percentile range as definition of scatter expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues, z.0 = "median", dispersion = "p17")) ## now with user-defined green line for minimum age model CAM <- calc_CentralDose(ExampleData.DeValues, plot = FALSE) expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues, line = CAM, line.col = "darkgreen", line.label = "CAM")) ## now create plot with legend, colour, different points and smaller scale expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues, legend = "Sample 1", col = "tomato4", bar.col = "peachpuff", pch = "R", cex = 0.8)) ## now without 2-sigma bar, polygon, grid lines and central value line expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues, bar.col = FALSE, polygon.col = FALSE, grid.col = FALSE, y.axis = FALSE, lwd = 0)) ## now with direct display of De errors, without 2-sigma bar expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues, bar.col = FALSE, ylab = "", y.axis = FALSE, error.bars = TRUE)) ## now with user-defined axes labels expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues, xlab = c("Data error (%)", "Data precision"), ylab = "Scatter", zlab = "Equivalent dose [Gy]")) ## now with minimum, maximum and median value indicated expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues, stats = c("min", "max", "median"))) ## now with a brief statistical summary as subheader expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues, summary = c("n", "in.2s"))) ## now with another statistical summary expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues, summary = c("mean.weighted", "median"), summary.pos = "topleft")) ## now a plot with two 2-sigma bars for one data set expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues, bar = c(30, 100))) ## now the data set is split into sub-groups, one is manipulated data.1 <- ExampleData.DeValues[1:30,] data.2 <- ExampleData.DeValues[31:62,] * 1.3 data.3 <- list(data.1, data.2) ## now the two data sets are plotted in one plot expect_silent(plot_AbanicoPlot(data = data.3)) ## now with some graphical modification expect_silent(plot_AbanicoPlot(data = data.3, z.0 = "median", col = c("steelblue4", "orange4"), bar.col = c("steelblue3", "orange3"), polygon.col = c("steelblue1", "orange1"), pch = c(2, 6), angle = c(30, 50), summary = c("n", "in.2s", "median"))) ## create Abanico plot with predefined layout definition expect_silent(plot_AbanicoPlot(data = ExampleData.DeValues, layout = "journal")) ## now with predefined layout definition and further modifications expect_silent(plot_AbanicoPlot(data = data.3, z.0 = "median", layout = "journal", col = c("steelblue4", "orange4"), bar.col = adjustcolor(c("steelblue3", "orange3"), alpha.f = 0.5), polygon.col = c("steelblue3", "orange3"))) ## for further information on layout definitions see documentation ## of function get_Layout() ## now with manually added plot content ## create empty plot with numeric output expect_type(plot_AbanicoPlot(data = ExampleData.DeValues, pch = NA, output = TRUE), "list") }) test_that("Cause full function stop", { testthat::skip_on_cran() local_edition(3) ##wrong input data expect_error(plot_AbanicoPlot(data = "Michael"), regexp = "Input data format is neither 'data.frame' nor 'RLum.Results'") }) Luminescence/tests/testthat/test_calc_Lamothe2003.R0000644000176200001440000000657014062436223021742 0ustar liggesuserstest_that("Force function to break", { testthat::skip_on_cran() local_edition(3) ##argument check ##object expect_error(calc_Lamothe2003(), regexp = "Input for 'object' missing but required!") ##dose_rate.envir expect_error(calc_Lamothe2003(object = NULL), regexp = "Input for 'dose_rate.envir' missing but required!") expect_error(calc_Lamothe2003(object = NULL, dose_rate.envir = 1, dose_rate.source = 1, g_value = 1), regexp = "Input for 'dose_rate.envir' is not of type 'numeric' and/or of length < 2!") ##dose_rate.source expect_error(calc_Lamothe2003(object = NULL, dose_rate.envir = NULL), regexp = "Input for 'dose_rate.source' missing but required!") expect_error(calc_Lamothe2003(object = NULL, dose_rate.envir = c(1,1), dose_rate.source = 1, g_value = 1), regexp = "Input for 'dose_rate.source' is not of type 'numeric' and/or of length < 2!") ##check warnings expect_s4_class( suppressWarnings(calc_Lamothe2003( object = data.frame( x = c(0,10,20), y = c(1.4,0.7,2.3), z = c(0.01,0.02, 0.03)), dose_rate.envir = c(1,2,3), dose_rate.source = c(1,2,3), g_value = c(1,1))), "RLum.Results") ##g_value expect_error( calc_Lamothe2003( object = NULL, dose_rate.envir = NULL, dose_rate.source = NULL ), regexp = "Input for 'g_value' missing but required!" ) ##object expect_error(suppressWarnings( calc_Lamothe2003( object = NULL, dose_rate.envir = c(1, 2, 3), dose_rate.source = c(1, 2, 3), g_value = NULL ), regexp = "Unsupported data type for 'object'" )) expect_error(suppressWarnings( calc_Lamothe2003( object = set_RLum("RLum.Results"), dose_rate.envir = c(1, 2, 3), dose_rate.source = c(1, 2, 3), g_value = NULL ) )) ##tc expect_error( suppressWarnings(calc_Lamothe2003( object = set_RLum("RLum.Results"), dose_rate.envir = c(1, 2, 3), dose_rate.source = c(1, 2, 3), g_value = c(1, 1), tc.g_value = 1000 ), regexp = "If you set 'tc.g_value' you have to provide a value for 'tc' too!" )) }) test_that("Test the function itself", { testthat::skip_on_cran() local_edition(3) ##This is based on the package example ##load data ##ExampleData.BINfileData contains two BINfileData objects ##CWOSL.SAR.Data and TL.SAR.Data data(ExampleData.BINfileData, envir = environment()) ##transform the values from the first position in a RLum.Analysis object object <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos=1) ##perform SAR analysis and set rejection criteria results <- analyse_SAR.CWOSL( object = object, signal.integral.min = 1, signal.integral.max = 2, background.integral.min = 900, background.integral.max = 1000, verbose = FALSE, plot = FALSE, onlyLxTxTable = TRUE ) ##run fading correction expect_s4_class(calc_Lamothe2003( object = results, dose_rate.envir = c(1.676 , 0.180), dose_rate.source = c(0.184, 0.003), g_value = c(2.36, 0.6), plot = TRUE, fit.method = "EXP"), class = "RLum.Results") ##run fading correction expect_s4_class(calc_Lamothe2003( object = results, dose_rate.envir = c(1.676 , 0.180), dose_rate.source = c(0.184, 0.003), g_value = c(2.36, 0.6), tc = 1000, tc.g_value = 1200, plot = TRUE, fit.method = "EXP"), class = "RLum.Results") }) Luminescence/tests/testthat/test_calc_FiniteMixture.R0000755000176200001440000000166514145666016022652 0ustar liggesuserstest_that("check class and length of output", { testthat::skip_on_cran() local_edition(3) ## load example data data(ExampleData.DeValues, envir = environment()) ## simple run temp <- expect_s4_class(calc_FiniteMixture( ExampleData.DeValues$CA1, sigmab = 0.2, n.components = 2, grain.probability = TRUE, verbose = TRUE), "RLum.Results") ## check length of output expect_equal(length(temp), 10) ## check for numerical regression results <- get_RLum(temp) expect_equal(results$de[1], 31.5299) expect_equal(results$de[2], 72.0333) expect_equal(results$de_err[1], 3.6387) expect_equal(results$de_err[2], 2.4082) expect_equal(results$proportion[1], 0.1096) expect_equal(results$proportion[2], 0.8904) ## test plot expect_s4_class(calc_FiniteMixture( ExampleData.DeValues$CA1, sigmab = 0.2, n.components = 2:3, grain.probability = TRUE, verbose = FALSE), "RLum.Results") }) Luminescence/tests/testthat/test_calc_MinDose.R0000755000176200001440000000207114062436223021375 0ustar liggesusersdata(ExampleData.DeValues, envir = environment()) temp <- calc_MinDose(data = ExampleData.DeValues$CA1, sigmab = 0.1, verbose = FALSE, plot = FALSE) test_that("check class and length of output", { testthat::skip_on_cran() local_edition(3) expect_s4_class(temp, "RLum.Results") expect_equal(length(temp), 9) }) test_that("check values from output example", { testthat::skip_on_cran() local_edition(3) results <- get_RLum(temp) expect_equal(round(results$de, digits = 5), 34.31834) expect_equal(round(results$de_err, digits = 6), 2.550964) expect_equal(results$ci_level, 0.95) expect_equal(round(results$ci_lower, digits = 5), 29.37526) expect_equal(round(results$ci_upper, digits = 5), 39.37503) expect_equal(results$par, 3) expect_equal(round(results$sig, digits = 2), 2.07) expect_equal(round(results$p0, digits = 8), 0.01053938) expect_equal(results$mu, NA) expect_equal(round(results$Lmax, digits = 5), -43.57969) expect_equal(round(results$BIC, digits = 4), 106.4405) }) Luminescence/tests/testthat/test_fit_SurfaceExposure.R0000644000176200001440000000735114062436223023045 0ustar liggesusersdata("ExampleData.SurfaceExposure", envir = environment()) d1 <- ExampleData.SurfaceExposure$sample_1 d2 <- ExampleData.SurfaceExposure$sample_2 d3 <- ExampleData.SurfaceExposure$set_1 d4 <- ExampleData.SurfaceExposure$set_2 ## Example data 1 fit <- fit_SurfaceExposure(data = d1, sigmaphi = 5e-10, mu = 0.9, plot = FALSE, verbose = FALSE) test_that("check class and length of output", { testthat::skip_on_cran() local_edition(3) expect_equal(is(fit), c("RLum.Results", "RLum")) expect_equal(length(fit), 5) expect_equal(is(fit$fit), "nls") }) test_that("check values from output example", { testthat::skip_on_cran() local_edition(3) expect_equal(round(fit$summary$age), 9893) expect_equal(round(fit$summary$age_error), 369) }) # Sub-test - weighted fitting fit <- fit_SurfaceExposure(data = d1, sigmaphi = 5e-10, mu = 0.9, weights = TRUE, plot = FALSE, verbose = FALSE) test_that("check values from output example", { testthat::skip_on_cran() local_edition(3) expect_equal(round(fit$summary$age), 9624) expect_equal(round(fit$summary$age_error), 273) }) ## Example data 2 fit <- fit_SurfaceExposure(data = d2, age = 1e4, sigmaphi = 5e-10, Ddot = 2.5, D0 = 40, plot = FALSE, verbose = FALSE) test_that("check class and length of output", { testthat::skip_on_cran() local_edition(3) expect_equal(is(fit), c("RLum.Results", "RLum")) expect_equal(length(fit), 5) expect_equal(is(fit$fit), "nls") }) test_that("check values from output example", { testthat::skip_on_cran() local_edition(3) expect_equal(round(fit$summary$mu, 3), 0.904) expect_equal(round(fit$summary$mu_error, 3), 0.007) }) ## Example data 3 fit <- fit_SurfaceExposure(data = d3, age = c(1e3, 1e4, 1e5, 1e6), sigmaphi = 5e-10, plot = FALSE, verbose = FALSE) test_that("check class and length of output", { testthat::skip_on_cran() local_edition(3) expect_equal(is(fit), c("RLum.Results", "RLum")) expect_equal(nrow(fit$summary), 4) expect_equal(length(fit), 5) expect_equal(is(fit$fit), "nls") }) test_that("check values from output example", { testthat::skip_on_cran() local_edition(3) expect_equal(round(unique(fit$summary$mu), 3), 0.901) expect_equal(round(unique(fit$summary$mu_error), 3), 0.002) }) ## Example data 4 fit <- fit_SurfaceExposure(data = d4, age = c(1e2, 1e3, 1e4, 1e5, 1e6), sigmaphi = 5e-10, Ddot = 1.0, D0 = 40, plot = FALSE, verbose = FALSE) test_that("check class and length of output", { testthat::skip_on_cran() expect_equal(is(fit), c("RLum.Results", "RLum")) expect_equal(nrow(fit$summary), 5) expect_equal(length(fit), 5) expect_equal(is(fit$fit), "nls") }) test_that("check values from output example", { testthat::skip_on_cran() expect_equal(round(unique(fit$summary$mu), 3), 0.899) expect_equal(round(unique(fit$summary$mu_error), 3), 0.002) }) #### WARNINGS & FAILURES test_that("not enough parameters provided", { testthat::skip_on_cran() expect_message( fit_SurfaceExposure(data = d1, plot = FALSE, verbose = TRUE), "Unable to fit the data" ) expect_equal( is(fit_SurfaceExposure(data = d2, plot = FALSE, verbose = FALSE)$fit), "simpleError" ) expect_warning( fit_SurfaceExposure(data = d4, age = c(1e2, 1e3, 1e4, 1e5, 1e6), sigmaphi = 5e-10, Ddot = 1.0, D0 = 40, weights = TRUE, plot = FALSE, verbose = TRUE), "is not supported when" ) expect_error( fit_SurfaceExposure(data = d4, age = 1e4, sigmaphi = 5e-10, Ddot = 1.0, D0 = 40, plot = FALSE, verbose = FALSE), "'age' must be of the same length" ) }) Luminescence/tests/testthat/test_RLum.Data.Image.R0000644000176200001440000000375614171333122021630 0ustar liggesuserstest_that("check class ", { testthat::skip_on_cran() local_edition(3) ##load example data data(ExampleData.RLum.Data.Image, envir = environment()) ##set-method ##set empty class expect_s4_class(set_RLum(class = "RLum.Data.Image"), "RLum.Data.Image") ##overwrite only data expect_s4_class(set_RLum(class = "RLum.Data.Image", data = set_RLum("RLum.Data.Image")), "RLum.Data.Image") ##show-method ##show example data expect_output(show(ExampleData.RLum.Data.Image)) ##get-method expect_error(get_RLum(ExampleData.RLum.Data.Image, info.object = 1), regexp = "'info.object' has to be a character!") expect_error(get_RLum(ExampleData.RLum.Data.Image, info.object = "unknown")) expect_type(get_RLum(ExampleData.RLum.Data.Image, info.object = "NumFrames"), "integer") ##names expect_type(names_RLum(ExampleData.RLum.Data.Image), "character") ##conversions ##from matrix and to matrix expect_s4_class(as(matrix(1:10, ncol = 2), "RLum.Data.Image"), "RLum.Data.Image") expect_type(as(ExampleData.RLum.Data.Image, "matrix"), "integer") ##from data.frame and to data.frame df <- as.data.frame(as(ExampleData.RLum.Data.Image, "matrix")) expect_s4_class(as(df, "RLum.Data.Image"), "RLum.Data.Image") expect_s3_class(as(ExampleData.RLum.Data.Image, "data.frame"), "data.frame") ## to and from array expect_type(as(ExampleData.RLum.Data.Image, "array"), "integer") from_array <- expect_s4_class(as(array(1,dim = c(10,10,2)), "RLum.Data.Image"), "RLum.Data.Image") ## to and from list expect_s4_class(as(list(matrix(1, nrow = 10, ncol = 5), matrix(1, nrow = 10, ncol = 5)), "RLum.Data.Image"), "RLum.Data.Image") expect_type(as(ExampleData.RLum.Data.Image, "list"), "list") ## check edge cases expect_error(as(from_array, "matrix"), "No viable coercion to matrix, object contains multiple frames. Please convert to array instead.") expect_error(as(from_array, "data.frame"), "No viable coercion to data.frame, object contains multiple frames.") }) Luminescence/tests/testthat/test_calc_AverageDose.R0000644000176200001440000000127014062436223022221 0ustar liggesusersdata(ExampleData.DeValues, envir = environment()) temp <- calc_AverageDose(ExampleData.DeValues$CA1[1:56,], sigma_m = 0.1, plot = FALSE, verbose = FALSE) test_that("check class and length of output", { testthat::skip_on_cran() local_edition(3) expect_s4_class(temp, "RLum.Results") expect_equal(length(temp), 3) }) test_that("check summary output", { testthat::skip_on_cran() local_edition(3) results <- get_RLum(temp) expect_equal(round(results$AVERAGE_DOSE, digits = 4), 65.3597) expect_equal(round(results$SIGMA_D, digits = 4), 0.3092) expect_equal(round(results$L_MAX, digits = 5), -19.25096) }) Luminescence/tests/testthat/test_read_TIFF2R.R0000644000176200001440000000062514166622453021013 0ustar liggesuserstest_that("Test general functionality", { testthat::skip_on_cran() local_edition(3) ##crash function expect_error(object = read_TIFF2R(file = "text"), regexp = "\\[read_TIFF2R\\(\\)\\] File does not exist or is not readable!") ## test import file <- system.file("extdata", "TIFFfile.tif", package = "Luminescence") expect_s4_class(read_TIFF2R(file), "RLum.Data.Image") }) Luminescence/tests/testthat/test_subset_RLum.R0000644000176200001440000000212414062436223021315 0ustar liggesusers# RLum.Analysis ----------------------------------------------------------- test_that("subset RLum.Analysis", { testthat::skip_on_cran() local_edition(3) data("ExampleData.RLum.Analysis") temp <- IRSAR.RF.Data ## subset.RLum.Analysis() - S3 method ### empty call expect_s4_class(subset(temp), "RLum.Analysis") expect_length(subset(temp), length(temp)) expect_identical(subset(temp)[[1]], temp[[1]]) ### errors expect_error(subset(temp, LTYPE == "RF"), regexp = "Valid terms are") expect_null(subset(temp, recordType == "xx")) ### valid expect_s4_class(subset(temp, recordType == "RF"), class = "RLum.Analysis") expect_s4_class(subset(temp, recordType == "RF")[[1]], class = "RLum.Data.Curve") expect_length(subset(temp, recordType == "RF"), n = length(temp)) ## get_RLum(, subset = ()) expect_s4_class(get_RLum(temp, subset = recordType == "RF"), class = "RLum.Analysis") expect_s4_class(get_RLum(temp, subset = recordType == "RF")[[1]], class = "RLum.Data.Curve") expect_length(get_RLum(temp, subset = recordType == "RF"), n = length(temp)) }) Luminescence/tests/testthat/test_merge_RisoeBINfileData.R0000644000176200001440000000132314464125673023306 0ustar liggesusers##Full check test_that("Test merging", { testthat::skip_on_cran() local_edition(3) ##expect error expect_message(merge_Risoe.BINfileData(input.objects = "data"), regexp = "\\[merge\\_Risoe\\.BINfileData\\(\\)\\] Nothing done.+") expect_error(merge_Risoe.BINfileData(input.objects = c("data", "data2"))) expect_error(merge_Risoe.BINfileData(input.objects = list("data", "data2")), regexp = "[merge_Risoe.BINfileData()] Input list does not contain Risoe.BINfileData objects!", fixed = TRUE) ##expect success data(ExampleData.BINfileData, envir = environment()) object1 <- CWOSL.SAR.Data object2 <- CWOSL.SAR.Data expect_s4_class(merge_Risoe.BINfileData(c(object1, object2)), "Risoe.BINfileData") }) Luminescence/tests/testthat/test_calc_HomogeneityTest.R0000755000176200001440000000145414062436223023172 0ustar liggesusers##use the data example given by Galbraith (2003) df <- data.frame( x = c(30.1, 53.8, 54.3, 29.0, 47.6, 44.2, 43.1), y = c(4.8, 7.1, 6.8, 4.3, 5.2, 5.9, 3.0)) temp <- calc_HomogeneityTest(df) test_that("check class and length of output", { testthat::skip_on_cran() local_edition(3) expect_s4_class(temp, "RLum.Results") expect_equal(length(temp), 3) }) test_that("check values from output example", { testthat::skip_on_cran() local_edition(3) results <- get_RLum(temp) ##test the normal expect_equal(results$n, 7) expect_equal(round(results$g.value, 4), 19.2505) expect_equal(results$df, 6) expect_equal(round(results$P.value,3), 0.004) ##test the unlogged version temp <- calc_HomogeneityTest(df, log = FALSE)$summary expect_equal(round(temp$P.value,3),0.001) }) Luminescence/tests/testthat/test_analyse_Al2O3C_CrossTalk.R0000644000176200001440000000036414062436223023501 0ustar liggesusers##Full check test_that("Full check", { skip_on_cran() local_edition(3) ##load data data(ExampleData.Al2O3C, envir = environment()) ##run analysis expect_s4_class(analyse_Al2O3C_CrossTalk(data_CrossTalk), "RLum.Results") }) Luminescence/tests/testthat/test_template_DRAC.R0000644000176200001440000000403314171333122021451 0ustar liggesusers##Full check test_that("Check template creation ", { testthat::skip_on_cran() local_edition(3) ## test output class expect_s3_class(template_DRAC(), "DRAC.list") expect_s3_class(template_DRAC(notification = FALSE), "DRAC.list") expect_s3_class(template_DRAC(nrow = 10, notification = FALSE), "DRAC.list") ## test presets expect_identical(as.numeric(template_DRAC(notification = FALSE, preset = "quartz_coarse")$`a-value`), 0.035) expect_identical(as.numeric(template_DRAC(notification = FALSE, preset = "quartz_fine")$`a-value`), 0.035) expect_identical(as.numeric(template_DRAC(notification = FALSE, preset = "feldspar_coarse")$`a-value`), 0.08) expect_identical(as.numeric(template_DRAC(notification = FALSE, preset = "polymineral_fine")$`a-value`), 0.08) expect_identical(as.numeric(template_DRAC(notification = FALSE, preset = "DRAC-example_quartz")$`De (Gy)`), 20) expect_identical(as.numeric(template_DRAC(notification = FALSE, preset = "DRAC-example_feldspar")$`De (Gy)`), 15) expect_identical(as.numeric(template_DRAC(notification = FALSE, preset = "DRAC-example_polymineral")$`De (Gy)`), 204.47) expect_true( do.call(all.equal, as.list(template_DRAC(nrow = 2, notification = FALSE, preset = "DRAC-example_quartz")$`De (Gy)`)) ) ## misc tests expect_true(all(is.na(template_DRAC(notification = FALSE)))) expect_true(!all(is.na(template_DRAC(preset = "DRAC-example_polymineral", notification = FALSE)))) expect_equal(length(template_DRAC(notification = FALSE)), 53) expect_equal(length(template_DRAC(nrow = 10, notification = FALSE)[[1]]), 10) expect_s3_class(template_DRAC(nrow = -1, notification = FALSE), "DRAC.list") ## expect failure expect_warning(template_DRAC(nrow = 5001, notification = FALSE), regexp = "\\[template_DRAC\\(\\)\\] More than 5000 datasets might not be supported!") expect_error(template_DRAC(preset = "this_one_does_not_exist")) expect_error(template_DRAC(preset = c("this_one_does_not_exist", "this_one_neither"))) expect_error(template_DRAC(preset = 999)) }) Luminescence/tests/testthat/test_smooth_RLum.R0000644000176200001440000000202614062436223021322 0ustar liggesusersdata(ExampleData.CW_OSL_Curve, envir = environment()) temp <- set_RLum( class = "RLum.Data.Curve", recordType = "OSL", data = as.matrix(ExampleData.CW_OSL_Curve) ) ##create RLum.Analysis object temp_analysis <- set_RLum("RLum.Analysis", records = list(temp, temp)) test_that("check class and length of output", { testthat::skip_on_cran() local_edition(3) ##standard tests expect_s4_class(temp, class = "RLum.Data.Curve") expect_s4_class(smooth_RLum(temp), class = "RLum.Data.Curve") ##test on a list ##RLum list expect_type(smooth_RLum(list(temp, temp)), "list") ##normal list expect_type(smooth_RLum(list(a = 1, b = 2)), "list") ##test on an RLum.Analysis-object expect_s4_class(smooth_RLum(temp_analysis), "RLum.Analysis") }) test_that("check values from output example", { testthat::skip_on_cran() local_edition(3) expect_equal(round(mean(smooth_RLum(temp, k = 5)[,2], na.rm = TRUE), 0), 100) expect_equal(round(mean(smooth_RLum(temp, k = 10)[,2], na.rm = TRUE), 0), 85) }) Luminescence/tests/testthat/test_plot_RLum.Analysis.R0000644000176200001440000000426114145666016022563 0ustar liggesuserstest_that("Test the basic plot functionality", { testthat::skip_on_cran() local_edition(3) ##create dataset ##load data data(ExampleData.BINfileData, envir = environment()) ##convert values for position 1 temp <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos=1) ## trigger warning expect_silent(suppressWarnings(plot_RLum.Analysis( set_RLum("RLum.Analysis", records = list( set_RLum("RLum.Data.Curve", recordType = "OSL"), set_RLum("RLum.Data.Curve", recordType = "OSL") )), norm = TRUE, combine = TRUE))) ##Basic plot expect_silent(plot_RLum.Analysis( temp, subset = list(recordType = "TL"), combine = TRUE, norm = TRUE, abline = list(v = c(110)) )) ## test norm = "max" expect_silent(plot_RLum.Analysis( temp, subset = list(recordType = "TL"), combine = TRUE, norm = "max", abline = list(v = c(110)) )) ## test norm = "min" expect_silent(plot_RLum.Analysis( temp, subset = list(recordType = "OSL"), combine = TRUE, norm = "last", abline = list(v = c(110)) )) ## test norm = "huot expect_silent(plot_RLum.Analysis( temp, subset = list(recordType = "OSL"), combine = TRUE, norm = "huot", abline = list(v = c(110)) )) ## test records_max expect_silent(plot_RLum.Analysis( temp, subset = list(recordType = "TL"), combine = TRUE, norm = TRUE, sub_title = "(5 K/s)", records_max = 5, abline = list(v = c(110)) )) ##test arguments ##ylim expect_silent(plot_RLum.Analysis( temp, subset = list(recordType = "TL"), combine = FALSE, norm = TRUE, ylim = c(1,100), xlim = c(1,100), abline = list(v = c(110)) )) ##test arguments ##ylim - warning #TODO expect_warning(Luminescence:::.warningCatcher(plot_RLum.Analysis( temp, subset = list(recordType = "TL"), combine = FALSE, #norm = TRUE, ylim = c(1,200), xlim = c(1,100), abline = list(v = c(110)) ))) ##test arguments #ylim - warning expect_warning(Luminescence:::.warningCatcher(plot_RLum.Analysis( temp, subset = list(recordType = "TL"), combine = FALSE, norm = TRUE, log = "y" ))) }) Luminescence/tests/testthat/test_convert_Activity2Concentration.R0000644000176200001440000000154714171333122025221 0ustar liggesuserstest_that("check class and length of output", { testthat::skip_on_cran() local_edition(3) data <- data.frame( NUCLIDES = c("U-238", "Th-232", "K-40"), VALUE = c(40,80,100), VALUE_ERROR = c(4,8,10), stringsAsFactors = FALSE) results <- expect_s4_class(convert_Activity2Concentration(data), c("RLum.Results")) expect_s4_class(convert_Activity2Concentration(data, verbose = FALSE), c("RLum.Results")) expect_equal(length(convert_Activity2Concentration(data)), 1) expect_error(convert_Activity2Concentration()) expect_error(convert_Activity2Concentration(data = data.frame(a = 1, b = 2))) ## this test should flag if constants were changed, so that this is ## not forgotten in the NEWS expect_equal(round(sum(results$data$`CONC. (ppm/%)`),5), 23.20909) expect_equal(round(sum(results$data$`CONC. ERROR (ppm/%)`),5), 2.32091) }) Luminescence/tests/testthat/test_calc_FadingCorr.R0000644000176200001440000000345314062436223022057 0ustar liggesusersset.seed(1) temp <- calc_FadingCorr( age.faded = c(0.1,0), g_value = c(5.0, 1.0), tc = 2592000, tc.g_value = 172800, n.MC = 100, verbose = FALSE) test_that("check class and length of output", { testthat::skip_on_cran() local_edition(3) ##trigger some errors expect_error(calc_FadingCorr(age.faded = "test", g_value = "test"), "\\[calc_FadingCorr\\(\\)\\] 'tc' needs to be set!") expect_error( calc_FadingCorr(age.faded = "test", g_value = "test", tc = 200), "\\[calc\\_FadingCorr\\(\\)\\] 'age.faded', 'g_value' and 'tc' need be of type numeric\\!") ##check message expect_message(calc_FadingCorr( age.faded = c(6.404856, 0.51), g_value = c(17.5,1.42), tc = 462, n.MC = 100), "\\[calc_FadingCorr\\(\\)\\] No solution found, return NULL. This usually happens for very large, unrealistic g-values") expect_s4_class(temp, "RLum.Results") expect_equal(length(temp), 2) ##check the verbose mode expect_s4_class(calc_FadingCorr( age.faded = c(0.1,0), g_value = c(5.0, 1.0), tc = 2592000, tc.g_value = 172800, n.MC = 1, verbose = TRUE), class = "RLum.Results") }) test_that("check values from output example 1", { testthat::skip_on_cran() local_edition(3) results <- get_RLum(temp) expect_equal(results$AGE, 0.1169) expect_equal(results$AGE.ERROR, 0.0035) expect_equal(results$AGE_FADED, 0.1) expect_equal(results$AGE_FADED.ERROR, 0) expect_equal(results$G_VALUE, 5.312393) expect_equal(round(results$G_VALUE.ERROR, 5), 1.01190) expect_equal(round(results$KAPPA, 3), 0.0230) expect_equal(round(results$KAPPA.ERROR,3), 0.004) expect_equal(results$TC, 8.213721e-05) expect_equal(results$TC.G_VALUE, 5.475814e-06) expect_equal(results$n.MC, 100) expect_equal(results$OBSERVATIONS, 100) expect_equal(results$SEED, NA) }) Luminescence/tests/testthat/test_write_R2BIN.R0000644000176200001440000000340514062436223021102 0ustar liggesuserstest_that("write to empty connection", { testthat::skip_on_cran() local_edition(3) #Unit test for write_BIN2R() function #create data file data(ExampleData.BINfileData, envir = environment()) ##empty RisoeBINfileData object empty <- set_Risoe.BINfileData() ##replace the raw by numeric CWOSL.SAR.Data@METADATA$VERSION <- as.numeric(CWOSL.SAR.Data@METADATA$VERSION) CWOSL.SAR.Data@METADATA[] <- lapply(CWOSL.SAR.Data@METADATA, function(x){ if(is.factor(x)){ as.character(x) }else{ x } }) ##combing with existing BIN-file object new <- as.data.frame( data.table::rbindlist(l = list(empty@METADATA,CWOSL.SAR.Data@METADATA),fill = TRUE), stringsAsFactors = FALSE) ##new object new <- set_Risoe.BINfileData(METADATA = new, DATA = CWOSL.SAR.Data@DATA) ##replace NA values new@METADATA[is.na(new@METADATA)] <- 0 ##replace RECTYPE new@METADATA$RECTYPE <- 1 ##reduce files size considerably down to two records new <- subset(new, ID == 1:2) ##create files path <- tempfile() write_R2BIN(object = new, file = paste0(path, "BINfile_V3.bin"), version = "03") write_R2BIN(object = new, file = paste0(path, "BINfile_V4.bin"), version = "04") write_R2BIN(object = new, file = paste0(path, "BINfile_V65binx"), version = "05") write_R2BIN(object = new, file = paste0(path, "BINfile_V6.binx"), version = "06") write_R2BIN(object = new, file = paste0(path, "BINfile_V7.binx"), version = "07") write_R2BIN(object = new, file = paste0(path, "BINfile_V8.binx"), version = "08") ##catch errors expect_error(write_R2BIN(object = "a", file = ""), "[write_R2BIN()] Input object is not of type Risoe.BINfileData!", fixed = TRUE) expect_error(suppressWarnings(write_R2BIN(object = set_Risoe.BINfileData(), file = ""))) }) Luminescence/tests/testthat/test_fit_CWCurve.R0000644000176200001440000000257114521207352021236 0ustar liggesuserstest_that("check class and length of output", { testthat::skip_on_cran() local_edition(3) data(ExampleData.CW_OSL_Curve, envir = environment()) fit <- fit_CWCurve(values = ExampleData.CW_OSL_Curve, main = "CW Curve Fit", n.components.max = 4, log = "x", plot = FALSE) expect_s4_class(fit, "RLum.Results") expect_equal(length(fit), 3) }) test_that("check values from output example", { testthat::skip_on_cran() local_edition(3) data(ExampleData.CW_OSL_Curve, envir = environment()) fit <- fit_CWCurve(values = ExampleData.CW_OSL_Curve, main = "CW Curve Fit", n.components.max = 4, log = "x", plot = FALSE) t <- sessionInfo() #if(grepl(pattern = "apple", x = t$R.version$platform)) { expect_equal(fit$data$n.components, 3, tolerance = 1) expect_equal(round(fit$data$I01, digits = 0), 2388, tolerance = 1) expect_equal(round(fit$data$lambda1, digits = 1), 4.6, tolerance = 1) expect_equal(round(fit$data$`pseudo-R^2`, digits = 0), 1) # } else { # expect_equal(fit$data$n.components, 2) # expect_equal(round(fit$data$I01, digits = 0), 3286) # expect_equal(round(fit$data$lambda1, digits = 1), 3.8) # expect_equal(round(fit$data$`pseudo-R^2`, digits = 0), 1) # # } }) Luminescence/tests/testthat/test_subset_SingleGrainData.R0000644000176200001440000000137314464125673023451 0ustar liggesuserstest_that("Check subset_SingleGrain", { testthat::skip_on_cran() local_edition(3) ## get example ready data(ExampleData.BINfileData, envir = environment()) ## set POSITION/GRAIN pair dataset selection <- data.frame(POSITION = c(1,5,7), GRAIN = c(0,0,0)) ## crash function expect_error( object = subset_SingleGrainData("error"), regexp = "\\[subset\\_SingleGrainData\\(\\)\\] Only Risoe.BINfileData-class objects are allowed as input!") ## standard run expect_s4_class(subset_SingleGrainData(object = CWOSL.SAR.Data, selection = selection), "Risoe.BINfileData") ## try something different for the input expect_s4_class(subset_SingleGrainData(object = CWOSL.SAR.Data, selection = as.matrix(selection)), "Risoe.BINfileData") }) Luminescence/tests/testthat/test_combine_De_Dr.R0000644000176200001440000000303114367174003021523 0ustar liggesuserstest_that("Test combine_De_Dr", { testthat::skip_on_cran() local_edition(3) ## simple test using the example ## set parameters Dr <- stats::rlnorm(1000, 0, 0.3) De <- 50*sample(Dr, 50, replace = TRUE) s <- stats::rnorm(50, 10, 2) ## set seed set.seed(1276) ## break function expect_error(combine_De_Dr( Dr = Dr, int_OD = 0.1, De, s[-1]), "\\[combine_De_Dr\\(\\)\\] \\'De\\' and \\'s\\' are not of similar length!") ## simple run with standard settings results <- expect_s4_class(combine_De_Dr( Dr = Dr, int_OD = 0.1, De, s, outlier_analysis_plot = TRUE, Age_range = c(0, 100), cdf_ADr_quantiles = FALSE, legend.pos = "topright", legend = TRUE, method_control = list(n.iter = 100, n.chains = 1)), "RLum.Results") ## check whether mcmc is NULL expect_null(results$mcmc_IAM) expect_null(results$mcmc_BCAM) ## run the same with different par settings par(mfrow = c(2,2)) results <- expect_s4_class(combine_De_Dr( Dr = Dr, int_OD = 0.1, De, s, outlier_analysis_plot = TRUE, par_local = FALSE, Age_range = c(0, 100), method_control = list( n.iter = 100, n.chains = 1, return_mcmc = TRUE )), "RLum.Results") ## check the length of the output expect_length(results, 9) ## check whether we have the MCMC plots expect_s3_class(results$mcmc_IAM, "mcmc.list") expect_s3_class(results$mcmc_BCAM, "mcmc.list") ## try to plot the results again plot_OSLAgeSummary(results) }) Luminescence/tests/testthat/test_RLum.Data.Curve.R0000644000176200001440000000174514062436223021673 0ustar liggesuserstest_that("check class", { testthat::skip_on_cran() local_edition(3) ##set empty curve object and show it expect_output(show(set_RLum(class = "RLum.Data.Curve"))) ##check replacements fo object <- set_RLum(class = "RLum.Data.Curve") expect_s4_class(set_RLum(class = "RLum.Data.Curve", data = object), class = "RLum.Data.Curve") ##check get_RLum object <- set_RLum(class = "RLum.Data.Curve", data = object, info = list(a = "test")) expect_warning(get_RLum(object, info.object = "est"), regexp = "Invalid info.object name") ##test names expect_type(names(object), "character") ##test bin expect_warning(bin_RLum.Data(object, bin_size = -2), "Argument 'bin_size' invalid, nothing was done!") ##check conversions expect_s4_class(as(object = list(1:10), Class = "RLum.Data.Curve"), "RLum.Data.Curve") expect_type(as(object = object, Class = "list"), "list") expect_s4_class(as(object = matrix(1:10,ncol = 2), Class = "RLum.Data.Curve"), "RLum.Data.Curve") }) Luminescence/tests/testthat/test_convert_SG2MG.R0000644000176200001440000000176214145666016021446 0ustar liggesuserstest_that("test conversion from single grain data to multiple grain data", { testthat::skip_on_cran() local_edition(3) ## load example dataset data(ExampleData.BINfileData, envir = environment()) test_file_MG <- test_file_SG <- CWOSL.SAR.Data test_file_SG@METADATA$GRAIN <- 1 ## test pass through for pure multiple grain data expect_s4_class(convert_SG2MG(test_file_MG), "Risoe.BINfileData") ## test with pseudo single grain data expect_s4_class(convert_SG2MG(test_file_SG), "Risoe.BINfileData") ## test write option ## create environment dir <- tempdir() tmp <- paste0(dir, "/test.bin") write_file_test <- write_R2BIN( read_BIN2R(file = "https://github.com/R-Lum/Luminescence/raw/master/tests/testdata/BINfile_V4.bin"), tmp) expect_s4_class(convert_SG2MG(tmp, write_file = TRUE, txtProgressBar = FALSE), "Risoe.BINfileData") ##clear temp folder otherwise we have a problem with the CRAN check file.remove(list.files(dir,pattern = ".bin", full.names = TRUE)) }) Luminescence/tests/testthat/test_read_Daybreak2R.R0000644000176200001440000000160214062436223021772 0ustar liggesuserstest_that("Test functionality", { testthat::skip_on_cran() local_edition(3) ##TXT ##basic import options expect_type(read_Daybreak2R( file = system.file("extdata/Daybreak_TestFile.txt", package = "Luminescence") ), "list") ##verbose off expect_type(read_Daybreak2R( file = system.file("extdata/Daybreak_TestFile.txt", package = "Luminescence"), verbose = FALSE ), "list") ##txtProgressbar off expect_type(read_Daybreak2R( file = system.file("extdata/Daybreak_TestFile.txt", package = "Luminescence"), txtProgressBar = FALSE ), "list") ##DAT ##basic import options expect_type(read_Daybreak2R( file = system.file("extdata/Daybreak_TestFile.DAT", package = "Luminescence") ), "list") ##test silence expect_silent(read_Daybreak2R( file = system.file("extdata/Daybreak_TestFile.DAT", package = "Luminescence"), verbose = FALSE)) }) Luminescence/tests/testthat.R0000644000176200001440000000010413755775676016040 0ustar liggesuserslibrary(testthat) library(Luminescence) test_check("Luminescence") Luminescence/src/0000755000176200001440000000000014521210067013451 5ustar liggesusersLuminescence/src/create_UID.cpp0000644000176200001440000000225114212110522016111 0ustar liggesusers// +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ // Title: create_UID() // Author: Sebastian Kreutzer, Geography & Earth Science, Aberystwyth University (United Kingdom) // Contact: sebastian.kreutzer@aber.ac.uk // Version: 0.1.0 [2016-01-26] // Purpose: The purpose of this function is to create a unique ID for RLum // objects based on the system time and a random number. // +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #include #include using namespace Rcpp; // [[Rcpp::export("create_UID")]] CharacterVector create_UID() { //define variables CharacterVector random; time_t rawtime; struct tm * timeinfo; char timestamp [80]; //set date + timestamp (code snippet taken from C++ reference page) time (&rawtime); timeinfo = localtime (&rawtime); strftime (timestamp,80,"%Y-%m-%d-%I:%M.",timeinfo); //get time information and add a random number //according to the CRAN policy the standard C-function, rand(), even sufficient here, is not allowed random = runif(1); //combine and return results return timestamp + Rcpp::as(random); } Luminescence/src/src_analyse_IRSARRF_SRS.cpp0000644000176200001440000001474614062436223020417 0ustar liggesusers// +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ // Title: src_analyse_IRSARRF_SRS() // Author: Sebastian Kreutzer, Geography & Earth Science,Aberystwyth University (United Kingdom) // Contact: sebastian.kreutzer@aber.ac.uk // Version: 0.4.0 [2020-08-17] // Purpose: // // Function calculates the squared residuals for the R function analyse_IRSAR.RF() // including MC runs for the obtained minimum. The function allows a horizontal and // a vertical sliding of the curve // +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #include #include // [[Rcpp::depends(RcppArmadillo)]] using namespace Rcpp; // [[Rcpp::export("src_analyse_IRSARRF_SRS")]] RcppExport SEXP analyse_IRSARRF_SRS(arma::vec values_regenerated_limited, arma::vec values_natural_limited, arma::vec vslide_range, int n_MC, bool trace = false ){ //check for the vslide_range() if(vslide_range.size() > 1e+07){ stop("[:::src_analyse_IRSAR_SRS()] 'vslide_range' exceeded maximum size (1e+07)!"); } //pre-define variables arma::vec residuals(values_natural_limited.size()); arma::vec results(values_regenerated_limited.size() - values_natural_limited.size()); arma::vec results_vector_min_MC(n_MC); //variables for the algorithm int v_length; int v_index; arma::vec v_leftright(2); //the virtual vector arma::vec t_leftright(2); //the test points arma::vec c_leftright(2); //the calculation //(1) calculate sum of the squared residuals // this will be used to find the best fit of the curves (which is the minimum) //initialise values v_length = vslide_range.size(); v_index = 0; v_leftright[0] = 0; v_leftright[1] = vslide_range.size() - 1; if(v_length == 1){ t_leftright[0] = 0; t_leftright[1] = 0; }else{ t_leftright[0] = v_length/3; t_leftright[1] = 2 * v_length/3; } //***TRACE**** if(trace == true){ Rcout << "\n\n [:::src_analyse_IRSAR_SRS()]"; Rcout << "\n\n--- Inititalisation --- \n "; Rcout << "\n >> v_leftright: " << v_leftright; Rcout << "\n >> t_leftright: " << t_leftright; Rcout << "\n\n --- Optimisation --- \n "; Rcout << "\n ---------------------------------------------------------------------------------------------------------"; Rcout << "\n v_length \t\t v_leftright \t\t c_leftright \t\t\t\t absolute offset"; Rcout << "\n ---------------------------------------------------------------------------------------------------------"; } //start loop do { for (int t=0;t(t_leftright.size()); t++){ //HORIZONTAL SLIDING CORE -------------------------------------------------------------(start) //slide the curves against each other for (int i=0; i(results.size()); ++i){ //calculate squared residuals along one curve for (int j=0; j(values_natural_limited.size()); ++j){ residuals[j] = pow((values_regenerated_limited[j+i] - (values_natural_limited[j] + vslide_range[t_leftright[t]])),2); } //sum results and fill the results vector results[i] = sum(residuals); } //HORIZONTAL SLIDING CORE ---------------------------------------------------------------(end) c_leftright[t] = min(results); } //compare results and re-initialise variables if(c_leftright[0] < c_leftright[1]){ v_index = v_leftright[0]; //set index to left test index //update vector window (the left remains the same) v_leftright[1] = t_leftright[1]; //update window length v_length = v_leftright[1] - v_leftright[0]; }else if (c_leftright[0] > c_leftright[1]){ v_index = v_leftright[1]; //set index to right test index //update vector window (the right remains the same this time) v_leftright[0] = t_leftright[0]; //update window length v_length = v_leftright[1] - v_leftright[0]; }else{ v_length = 1; } //update test point index t_leftright[0] = v_leftright[0] + v_length/3; t_leftright[1] = v_leftright[0] + (2 * (v_length/3)); //***TRACE**** if(trace == true){ Rcout << "\n " << v_length << " \t\t\t " << v_leftright << " \t\t " << c_leftright << " \t\t\t " << vslide_range[v_index]; } } while (v_length > 1); //***TRACE**** if(trace == true){ Rcout << "\n ---------------------------------------------------------------------------------------------------------"; Rcout << "\n >> SRS minimum: \t\t " << c_leftright[0]; Rcout << "\n >> Vertical offset index: \t " << v_index + 1; Rcout << "\n >> Vertical offset absolute: \t " << vslide_range[v_index] << "\n\n"; } //(2) error calculation //use this values to bootstrap and find minimum values and to account for the variation //that may result from this method itself (the minimum lays within a valley of minima) // //using the obtained sliding vector and the function RcppArmadillo::sample() (which equals the //function sample() in R, but faster) //http://gallery.rcpp.org/articles/using-the-Rcpp-based-sample-implementation //this follows the way described in Frouin et al., 2017 ... still ... for (int i=0; i(results_vector_min_MC.size()); ++i){ results_vector_min_MC[i] = min( RcppArmadillo::sample( results, results.size(), TRUE, NumericVector::create() ) ); } //build list with four elements //sliding_vector: the original results_vector (this can be used to reproduced the results in R) //sliding_vector_min_index: the index of the minimum, it is later also calculated in R, however, sometimes we may need it directly //sliding_vector_min_MC: minimum values based on bootstrapping //vslide_index: this is the index where the minimum was identified for the vertical sliding //vslide_minimum: return the identified minimum value, this helps to re-run the function, as the //algorithm might got trapped in the local minimum List results_list; results_list["sliding_vector"] = results; results_list["sliding_vector_min_index"] = (int)results.index_min() + 1; results_list["sliding_vector_min_MC"] = results_vector_min_MC; results_list["vslide_index"] = v_index + 1; results_list["vslide_minimum"] = c_leftright[0]; //left and right should be similar return results_list; } Luminescence/src/src_get_XSYG_curve_values.cpp0000644000176200001440000000230114212110530021223 0ustar liggesusers// +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ // Title: src_get_XSYG_curve_values() // Author: Sebastian Kreutzer, Geography & Earth Science,Aberystwyth University (United Kingdom) // Contact: sebastian.kreutzer@aber.ac.uk // Version: 0.1.0 [2017-07-07] // Usage: used within the function read_XSYG2R() to extract curve values more efficiently // +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #include #include #include #include using namespace Rcpp; // [[Rcpp::export]] NumericMatrix src_get_XSYG_curve_values(std::string s) { //00: count pairs int pairs = std::count(s.begin(), s.end(), ';') + 1; //01: replace all ; by , std::replace(s.begin(), s.end(), ';', ','); //02: set needed matrix NumericMatrix m(pairs, 2); //03: set variables std::istringstream ss(s); std::string value; int i = 0; int sw = 0; //04: loop over string and convert to double while (std::getline(ss, value, ',')) { if (sw % 2 == 0){ m(i,0) = atof(value.c_str()); }else{ m(i,1) = atof(value.c_str()); i++; } sw++; } return m; } Luminescence/src/Makevars0000644000176200001440000000005714367174003015156 0ustar liggesusersPKG_CPPFLAGS=-I../inst/include CXX_STD = CXX17 Luminescence/src/Makevars.win0000644000176200001440000000005714367174003015752 0ustar liggesusersPKG_CPPFLAGS=-I../inst/include CXX_STD = CXX17 Luminescence/src/src_create_RLumDataCurve_matrix.cpp0000644000176200001440000000703614212110543022412 0ustar liggesusers// +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ // Title: src_create_RLumDataCurve_matrix() // Author: Sebastian Kreutzer, Geography & Earth Science,Aberystwyth University (United Kingdom) // Contact: sebastian.kreutzer@aber.ac.uk // Version: 0.1.3 [2019-09-19] // Purpose: Function to create the RLum.Data.Curve() matrix ... faster than in R itself // - Mainly used by the function Risoe.BINfileData2RLum.Data.Curve() // +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #include using namespace Rcpp; // ----------------------------------------------------------------------------------------------- // Define own function to create a sequence for the x-axis // .. but we do not export them to avoid side effects, as this function is not the same as the // .. base R function seq() // .. no export NumericVector seq_RLum(double from, double to, double length_out) { //calculate by double by = (to - from) / length_out; //set sequence vector and so set the first channel NumericVector sequence(static_cast(length_out), (from + by)); //loop and create sequence for (int i=1; i < static_cast(length_out); i++) sequence[i] = sequence[i-1] + by; return sequence; } // ----------------------------------------------------------------------------------------------- // The function we want to export // [[Rcpp::export("src_create_RLumDataCurve_matrix")]] NumericMatrix create_RLumDataCurve_matrix( NumericVector DATA, double VERSION, int NPOINTS, String LTYPE, double LOW, double HIGH, double AN_TEMP, int TOLDELAY, int TOLON, int TOLOFF ){ //generate X vectors if(NPOINTS > 0){ //set needed vectors and predefine matrix NumericVector X(NPOINTS); NumericMatrix curve_matrix(NPOINTS,2); //fill x column for the case we have a TL curve if(LTYPE == "TL" && VERSION >= 4.0){ //provide a fallback for non-conform BIN/BINX-files, otherwise the //the TL curves are wrong withouth having a reason. if((TOLON == 0) & (TOLOFF == 0) & (TOLDELAY == 0)){ Rcout << "[src_create_RLumDataCurve_matrix()] BIN/BINX-file non-conform. TL curve may be wrong!\n"; TOLOFF = NPOINTS; } //the heating curve consists of three vectors that needed to //be combined // //(A) - the start ramping NumericVector heat_ramp_start = seq_RLum(LOW,AN_TEMP,static_cast(TOLDELAY)); // //(B) - the plateau //B is simply TOLON // //(C) - the end ramping NumericVector heat_ramp_end = seq_RLum(AN_TEMP, HIGH, static_cast(TOLOFF)); //set index counters int c = 0; //fill vector for temperature for(int i = 0; i < X.length(); i++){ if(i < heat_ramp_start.length()){ X[i] = heat_ramp_start[i]; }else if(i >= heat_ramp_start.length() && i < heat_ramp_start.length() + static_cast(TOLON)){ X[i] = AN_TEMP; }else if(i >= heat_ramp_start.length() + TOLON){ X[i] = heat_ramp_end[c]; c++; } } }else{ X = seq_RLum(LOW, HIGH, static_cast(NPOINTS)); } //set final matrix curve_matrix.column(0) = X; curve_matrix.column(1) = DATA; return(curve_matrix); }else{ //set final matrix for the case NPOINTS <= 0 //fill this with NA values NumericMatrix curve_matrix(1,2); curve_matrix(0,0) = NumericVector::get_na(); curve_matrix(0,1) = NumericVector::get_na(); return(curve_matrix); } } Luminescence/src/RcppExports.cpp0000644000176200001440000000773414521210033016452 0ustar liggesusers// Generated by using Rcpp::compileAttributes() -> do not edit by hand // Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 #include #include using namespace Rcpp; #ifdef RCPP_USE_GLOBAL_ROSTREAM Rcpp::Rostream& Rcpp::Rcout = Rcpp::Rcpp_cout_get(); Rcpp::Rostream& Rcpp::Rcerr = Rcpp::Rcpp_cerr_get(); #endif // create_UID CharacterVector create_UID(); RcppExport SEXP _Luminescence_create_UID() { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; rcpp_result_gen = Rcpp::wrap(create_UID()); return rcpp_result_gen; END_RCPP } // analyse_IRSARRF_SRS RcppExport SEXP analyse_IRSARRF_SRS(arma::vec values_regenerated_limited, arma::vec values_natural_limited, arma::vec vslide_range, int n_MC, bool trace); RcppExport SEXP _Luminescence_analyse_IRSARRF_SRS(SEXP values_regenerated_limitedSEXP, SEXP values_natural_limitedSEXP, SEXP vslide_rangeSEXP, SEXP n_MCSEXP, SEXP traceSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< arma::vec >::type values_regenerated_limited(values_regenerated_limitedSEXP); Rcpp::traits::input_parameter< arma::vec >::type values_natural_limited(values_natural_limitedSEXP); Rcpp::traits::input_parameter< arma::vec >::type vslide_range(vslide_rangeSEXP); Rcpp::traits::input_parameter< int >::type n_MC(n_MCSEXP); Rcpp::traits::input_parameter< bool >::type trace(traceSEXP); rcpp_result_gen = Rcpp::wrap(analyse_IRSARRF_SRS(values_regenerated_limited, values_natural_limited, vslide_range, n_MC, trace)); return rcpp_result_gen; END_RCPP } // create_RLumDataCurve_matrix NumericMatrix create_RLumDataCurve_matrix(NumericVector DATA, double VERSION, int NPOINTS, String LTYPE, double LOW, double HIGH, double AN_TEMP, int TOLDELAY, int TOLON, int TOLOFF); RcppExport SEXP _Luminescence_create_RLumDataCurve_matrix(SEXP DATASEXP, SEXP VERSIONSEXP, SEXP NPOINTSSEXP, SEXP LTYPESEXP, SEXP LOWSEXP, SEXP HIGHSEXP, SEXP AN_TEMPSEXP, SEXP TOLDELAYSEXP, SEXP TOLONSEXP, SEXP TOLOFFSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericVector >::type DATA(DATASEXP); Rcpp::traits::input_parameter< double >::type VERSION(VERSIONSEXP); Rcpp::traits::input_parameter< int >::type NPOINTS(NPOINTSSEXP); Rcpp::traits::input_parameter< String >::type LTYPE(LTYPESEXP); Rcpp::traits::input_parameter< double >::type LOW(LOWSEXP); Rcpp::traits::input_parameter< double >::type HIGH(HIGHSEXP); Rcpp::traits::input_parameter< double >::type AN_TEMP(AN_TEMPSEXP); Rcpp::traits::input_parameter< int >::type TOLDELAY(TOLDELAYSEXP); Rcpp::traits::input_parameter< int >::type TOLON(TOLONSEXP); Rcpp::traits::input_parameter< int >::type TOLOFF(TOLOFFSEXP); rcpp_result_gen = Rcpp::wrap(create_RLumDataCurve_matrix(DATA, VERSION, NPOINTS, LTYPE, LOW, HIGH, AN_TEMP, TOLDELAY, TOLON, TOLOFF)); return rcpp_result_gen; END_RCPP } // src_get_XSYG_curve_values NumericMatrix src_get_XSYG_curve_values(std::string s); RcppExport SEXP _Luminescence_src_get_XSYG_curve_values(SEXP sSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< std::string >::type s(sSEXP); rcpp_result_gen = Rcpp::wrap(src_get_XSYG_curve_values(s)); return rcpp_result_gen; END_RCPP } static const R_CallMethodDef CallEntries[] = { {"_Luminescence_create_UID", (DL_FUNC) &_Luminescence_create_UID, 0}, {"_Luminescence_analyse_IRSARRF_SRS", (DL_FUNC) &_Luminescence_analyse_IRSARRF_SRS, 5}, {"_Luminescence_create_RLumDataCurve_matrix", (DL_FUNC) &_Luminescence_create_RLumDataCurve_matrix, 10}, {"_Luminescence_src_get_XSYG_curve_values", (DL_FUNC) &_Luminescence_src_get_XSYG_curve_values, 1}, {NULL, NULL, 0} }; RcppExport void R_init_Luminescence(DllInfo *dll) { R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); R_useDynamicSymbols(dll, FALSE); } Luminescence/vignettes/0000755000176200001440000000000014521210067014672 5ustar liggesusersLuminescence/vignettes/S4classObjects.pdf.asis0000644000176200001440000000030613231137116021150 0ustar liggesusers%\VignetteIndexEntry{S4-class Object Structure in 'Luminescence'} %\VignetteEngine{R.rsp::asis} %\VignetteKeyword{PDF} %\VignetteKeyword{HTML} %\VignetteKeyword{vignette} %\VignetteKeyword{package} Luminescence/R/0000755000176200001440000000000014521210067013063 5ustar liggesusersLuminescence/R/plot_RadialPlot.R0000644000176200001440000015434314367174002016317 0ustar liggesusers#' @title Function to create a Radial Plot #' #' @description A Galbraith's radial plot is produced on a logarithmic or a linear scale. #' #' @details Details and the theoretical background of the radial plot are given in the #' cited literature. This function is based on an S script of Rex Galbraith. To #' reduce the manual adjustments, the function has been rewritten. Thanks to #' Rex Galbraith for useful comments on this function. \cr #' Plotting can be disabled by adding the argument `plot = "FALSE"`, e.g. #' to return only numeric plot output. #' #' Earlier versions of the Radial Plot in this package had the 2-sigma-bar #' drawn onto the z-axis. However, this might have caused misunderstanding in #' that the 2-sigma range may also refer to the z-scale, which it does not! #' Rather it applies only to the x-y-coordinate system (standardised error vs. #' precision). A spread in doses or ages must be drawn as lines originating at #' zero precision (x0) and zero standardised estimate (y0). Such a range may be #' drawn by adding lines to the radial plot ( `line`, `line.col`, #' `line.label`, cf. examples). #' #' A statistic summary, i.e. a collection of statistic measures of #' centrality and dispersion (and further measures) can be added by specifying #' one or more of the following keywords: #' - `"n"` (number of samples), #' - `"mean"` (mean De value), #' - `"mean.weighted"` (error-weighted mean), #' - `"median"` (median of the De values), #' - `"sdrel"` (relative standard deviation in percent), #' - `"sdrel.weighted"` (error-weighted relative standard deviation in percent), #' - `"sdabs"` (absolute standard deviation), #' - `"sdabs.weighted"` (error-weighted absolute standard deviation), #' - `"serel"` (relative standard error), #' - `"serel.weighted"` (error-weighted relative standard error), #' - `"seabs"` (absolute standard error), #' - `"seabs.weighted"` (error-weighted absolute standard error), #' - `"in.2s"` (percent of samples in 2-sigma range), #' - `"kurtosis"` (kurtosis) and #' - `"skewness"` (skewness). #' #' @param data [data.frame] or [RLum.Results-class] object (**required**): #' for `data.frame` two columns: De (`data[,1]`) and De error (`data[,2]`). #' To plot several data sets in one plot, the data sets must be provided as #' `list`, e.g. `list(data.1, data.2)`. #' #' @param na.rm [logical] (*with default*): #' excludes `NA` values from the data set prior to any further operations. #' #' @param log.z [logical] (*with default*): #' Option to display the z-axis in logarithmic scale. Default is `TRUE`. #' #' @param central.value [numeric]: #' User-defined central value, primarily used for horizontal centring #' of the z-axis. #' #' @param centrality [character] or [numeric] (*with default*): #' measure of centrality, used for automatically centring the plot and drawing #' the central line. Can either be one out of #' - `"mean"`, #' - `"median"`, #' - `"mean.weighted"` and #' - `"median.weighted"` or a #' - numeric value used for the standardisation. #' #' @param mtext [character]: #' additional text below the plot title. #' #' @param summary [character] (*optional*): #' add statistic measures of centrality and dispersion to the plot. #' Can be one or more of several keywords. See details for available keywords. #' #' @param summary.pos [numeric] or [character] (*with default*): #' optional position coordinates or keyword (e.g. `"topright"`) #' for the statistical summary. Alternatively, the keyword `"sub"` may be #' specified to place the summary below the plot header. However, this latter #' option is only possible if `mtext` is not used. #' #' @param legend [character] vector (*optional*): #' legend content to be added to the plot. #' #' @param legend.pos [numeric] or [character] (with #' default): optional position coordinates or keyword (e.g. `"topright"`) #' for the legend to be plotted. #' #' @param stats [character]: additional labels of statistically #' important values in the plot. One or more out of the following: #' - `"min"`, #' - `"max"`, #' - `"median"`. #' #' @param rug [logical]: #' Option to add a rug to the z-scale, to indicate the location of individual values #' #' @param plot.ratio [numeric]: #' User-defined plot area ratio (i.e. curvature of the z-axis). If omitted, #' the default value (`4.5/5.5`) is used and modified automatically to optimise #' the z-axis curvature. The parameter should be decreased when data points #' are plotted outside the z-axis or when the z-axis gets too elliptic. #' #' @param bar.col [character] or [numeric] (*with default*): #' colour of the bar showing the 2-sigma range around the central #' value. To disable the bar, use `"none"`. Default is `"grey"`. #' #' @param y.ticks [logical]: #' Option to hide y-axis labels. Useful for data with small scatter. #' #' @param grid.col [character] or [numeric] (*with default*): #' colour of the grid lines (originating at `[0,0]` and stretching to #' the z-scale). To disable grid lines, use `"none"`. Default is `"grey"`. #' #' @param line [numeric]: #' numeric values of the additional lines to be added. #' #' @param line.col [character] or [numeric]: #' colour of the additional lines. #' #' @param line.label [character]: #' labels for the additional lines. #' #' @param output [logical]: #' Optional output of numerical plot parameters. These can be useful to #' reproduce similar plots. Default is `FALSE`. #' #' @param ... Further plot arguments to pass. `xlab` must be a vector of #' length 2, specifying the upper and lower x-axes labels. #' #' @return Returns a plot object. #' #' @section Function version: 0.5.9 #' #' @author #' Michael Dietze, GFZ Potsdam (Germany)\cr #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany)\cr #' Based on a rewritten S script of Rex Galbraith, 2010 #' #' @seealso [plot], [plot_KDE], [plot_Histogram], [plot_AbanicoPlot] #' #' @references #' Galbraith, R.F., 1988. Graphical Display of Estimates Having #' Differing Standard Errors. Technometrics, 30 (3), 271-281. #' #' Galbraith, R.F., 1990. The radial plot: Graphical assessment of spread in #' ages. International Journal of Radiation Applications and Instrumentation. #' Part D. Nuclear Tracks and Radiation Measurements, 17 (3), 207-214. #' #' Galbraith, R. & Green, P., 1990. Estimating the component ages in a finite #' mixture. International Journal of Radiation Applications and #' Instrumentation. Part D. Nuclear Tracks and Radiation Measurements, 17 (3) #' 197-206. #' #' Galbraith, R.F. & Laslett, G.M., 1993. Statistical models for mixed fission #' track ages. Nuclear Tracks And Radiation Measurements, 21 (4), 459-470. #' #' Galbraith, R.F., 1994. Some Applications of Radial Plots. Journal of the #' American Statistical Association, 89 (428), 1232-1242. #' #' Galbraith, R.F., 2010. On plotting OSL equivalent doses. Ancient TL, 28 (1), #' 1-10. #' #' Galbraith, R.F. & Roberts, R.G., 2012. Statistical aspects of equivalent #' dose and error calculation and display in OSL dating: An overview and some #' recommendations. Quaternary Geochronology, 11, 1-27. #' #' @examples #' #' ## load example data #' data(ExampleData.DeValues, envir = environment()) #' ExampleData.DeValues <- Second2Gray( #' ExampleData.DeValues$BT998, c(0.0438,0.0019)) #' #' ## plot the example data straightforward #' plot_RadialPlot(data = ExampleData.DeValues) #' #' ## now with linear z-scale #' plot_RadialPlot( #' data = ExampleData.DeValues, #' log.z = FALSE) #' #' ## now with output of the plot parameters #' plot1 <- plot_RadialPlot( #' data = ExampleData.DeValues, #' log.z = FALSE, #' output = TRUE) #' plot1 #' plot1$zlim #' #' ## now with adjusted z-scale limits #' plot_RadialPlot( #' data = ExampleData.DeValues, #' log.z = FALSE, #' zlim = c(100, 200)) #' #' ## now the two plots with serious but seasonally changing fun #' #plot_RadialPlot(data = data.3, fun = TRUE) #' #' ## now with user-defined central value, in log-scale again #' plot_RadialPlot( #' data = ExampleData.DeValues, #' central.value = 150) #' #' ## now with a rug, indicating individual De values at the z-scale #' plot_RadialPlot( #' data = ExampleData.DeValues, #' rug = TRUE) #' #' ## now with legend, colour, different points and smaller scale #' plot_RadialPlot( #' data = ExampleData.DeValues, #' legend.text = "Sample 1", #' col = "tomato4", #' bar.col = "peachpuff", #' pch = "R", #' cex = 0.8) #' #' ## now without 2-sigma bar, y-axis, grid lines and central value line #' plot_RadialPlot( #' data = ExampleData.DeValues, #' bar.col = "none", #' grid.col = "none", #' y.ticks = FALSE, #' lwd = 0) #' #' ## now with user-defined axes labels #' plot_RadialPlot( #' data = ExampleData.DeValues, #' xlab = c("Data error (%)", "Data precision"), #' ylab = "Scatter", #' zlab = "Equivalent dose [Gy]") #' #' ## now with minimum, maximum and median value indicated #' plot_RadialPlot( #' data = ExampleData.DeValues, #' central.value = 150, #' stats = c("min", "max", "median")) #' #' ## now with a brief statistical summary #' plot_RadialPlot( #' data = ExampleData.DeValues, #' summary = c("n", "in.2s")) #' #' ## now with another statistical summary as subheader #' plot_RadialPlot( #' data = ExampleData.DeValues, #' summary = c("mean.weighted", "median"), #' summary.pos = "sub") #' #' ## now the data set is split into sub-groups, one is manipulated #' data.1 <- ExampleData.DeValues[1:15,] #' data.2 <- ExampleData.DeValues[16:25,] * 1.3 #' #' ## now a common dataset is created from the two subgroups #' data.3 <- list(data.1, data.2) #' #' ## now the two data sets are plotted in one plot #' plot_RadialPlot(data = data.3) #' #' ## now with some graphical modification #' plot_RadialPlot( #' data = data.3, #' col = c("darkblue", "darkgreen"), #' bar.col = c("lightblue", "lightgreen"), #' pch = c(2, 6), #' summary = c("n", "in.2s"), #' summary.pos = "sub", #' legend = c("Sample 1", "Sample 2")) #' #' @md #' @export plot_RadialPlot <- function( data, na.rm = TRUE, log.z = TRUE, central.value, centrality = "mean.weighted", mtext, summary, summary.pos, legend, legend.pos, stats, rug = FALSE, plot.ratio, bar.col, y.ticks = TRUE, grid.col, line, line.col, line.label, output = FALSE, ... ) { ## Homogenise input data format if(is(data, "list") == FALSE) {data <- list(data)} ## Check input data for(i in 1:length(data)) { if(is(data[[i]], "RLum.Results") == FALSE & is(data[[i]], "data.frame") == FALSE) { stop(paste("[plot_RadialPlot] Error: Input data format is neither", "'data.frame' nor 'RLum.Results'"), call. = FALSE) } else { if(is(data[[i]], "RLum.Results") == TRUE) { data[[i]] <- get_RLum(data[[i]], "data") } ##use only the first two columns data[[i]] <- data[[i]][,1:2] } } ## check data and parameter consistency-------------------------------------- if(missing(stats) == TRUE) {stats <- numeric(0)} if(missing(summary) == TRUE) { summary <- c("n", "in.2s") } if(missing(summary.pos) == TRUE) { summary.pos <- "sub" } if(missing(bar.col) == TRUE) { bar.col <- rep("grey80", length(data)) } if(missing(grid.col) == TRUE) { grid.col <- rep("grey70", length(data)) } if(missing(summary) == TRUE) { summary <- NULL } if(missing(summary.pos) == TRUE) { summary.pos <- "topleft" } if(missing(mtext) == TRUE) { mtext <- "" } ## check z-axis log-option for grouped data sets if(is(data, "list") == TRUE & length(data) > 1 & log.z == FALSE) { warning(paste("Option 'log.z' is not set to 'TRUE' altough more than one", "data set (group) is provided.")) } ## optionally, remove NA-values if(na.rm == TRUE) { for(i in 1:length(data)) { data[[i]] <- na.exclude(data[[i]]) } } ## create preliminary global data set De.global <- data[[1]][,1] if(length(data) > 1) { for(i in 2:length(data)) { De.global <- c(De.global, data[[i]][,1]) } } ## calculate major preliminary tick values and tick difference extraArgs <- list(...) if("zlim" %in% names(extraArgs)) { limits.z <- extraArgs$zlim } else { z.span <- (mean(De.global) * 0.5) / (sd(De.global) * 100) z.span <- ifelse(z.span > 1, 0.9, z.span) limits.z <- c((ifelse(test = min(De.global) <= 0, yes = 1.1, no = 0.9) - z.span) * min(De.global), (1.1 + z.span) * max(De.global)) } ## calculate correction dose to shift negative values if(min(De.global) < 0 && log.z) { if("zlim" %in% names(extraArgs)) { De.add <- abs(extraArgs$zlim[1]) } else { ## estimate delta De to add to all data De.add <- min(10^ceiling(log10(abs(De.global))) * 10) ## optionally readjust delta De for extreme values if(De.add <= abs(min(De.global))) { De.add <- De.add * 10 } } } else { De.add <- 0 } ticks <- round(pretty(limits.z, n = 5), 3) De.delta <- ticks[2] - ticks[1] ## optionally add correction dose to data set and adjust error if(log.z) { for(i in 1:length(data)) data[[i]][,1] <- data[[i]][,1] + De.add De.global <- De.global + De.add } ## calculate major preliminary tick values and tick difference extraArgs <- list(...) if("zlim" %in% names(extraArgs)) { limits.z <- extraArgs$zlim } else { z.span <- (mean(De.global) * 0.5) / (sd(De.global) * 100) z.span <- ifelse(z.span > 1, 0.9, z.span) limits.z <- c((ifelse(min(De.global) <= 0, 1.1, 0.9) - z.span) * min(De.global), (1.1 + z.span) * max(De.global)) } ticks <- round(pretty(limits.z, n = 5), 3) De.delta <- ticks[2] - ticks[1] ## calculate and append statistical measures -------------------------------- ## z-values based on log-option z <- lapply(1:length(data), function(x){ if(log.z == TRUE) {log(data[[x]][,1])} else {data[[x]][,1]}}) if(is(z, "list") == FALSE) {z <- list(z)} data <- lapply(1:length(data), function(x) { cbind(data[[x]], z[[x]])}) rm(z) ## calculate se-values based on log-option se <- lapply(1:length(data), function(x, De.add){ if(log.z == TRUE) { if(De.add != 0) { data[[x]][,2] <- data[[x]][,2] / (data[[x]][,1] + De.add) } else { data[[x]][,2] / data[[x]][,1] } } else { data[[x]][,2] }}, De.add = De.add) if(is(se, "list") == FALSE) {se <- list(se)} data <- lapply(1:length(data), function(x) { cbind(data[[x]], se[[x]])}) rm(se) ## calculate central values if(centrality[1] == "mean") { z.central <- lapply(1:length(data), function(x){ rep(mean(data[[x]][,3], na.rm = TRUE), length(data[[x]][,3]))}) } else if(centrality[1] == "median") { z.central <- lapply(1:length(data), function(x){ rep(median(data[[x]][,3], na.rm = TRUE), length(data[[x]][,3]))}) } else if(centrality[1] == "mean.weighted") { z.central <- lapply(1:length(data), function(x){ sum(data[[x]][,3] / data[[x]][,4]^2) / sum(1 / data[[x]][,4]^2)}) } else if(centrality[1] == "median.weighted") { ## define function after isotone::weighted.median median.w <- function (y, w) { ox <- order(y) y <- y[ox] w <- w[ox] k <- 1 low <- cumsum(c(0, w)) up <- sum(w) - low df <- low - up repeat { if (df[k] < 0) k <- k + 1 else if (df[k] == 0) return((w[k] * y[k] + w[k - 1] * y[k - 1]) / (w[k] + w[k - 1])) else return(y[k - 1]) } } z.central <- lapply(1:length(data), function(x){ rep(median.w(y = data[[x]][,3], w = data[[x]][,4]), length(data[[x]][,3]))}) } else if(is.numeric(centrality) & length(centrality) == length(data)) { z.central.raw <- if(log.z == TRUE) { log(centrality + De.add) } else { centrality + De.add } z.central <- lapply(1:length(data), function(x){ rep(z.central.raw[x], length(data[[x]][,3]))}) } else if(is.numeric(centrality) == TRUE & length(centrality) > length(data)) { z.central <- lapply(1:length(data), function(x){ rep(median(data[[x]][,3], na.rm = TRUE), length(data[[x]][,3]))}) } else { stop("[plot_RadialPlot()] Measure of centrality not supported!", call. = FALSE) } data <- lapply(1:length(data), function(x) { cbind(data[[x]], z.central[[x]])}) rm(z.central) ## calculate precision precision <- lapply(1:length(data), function(x){ 1 / data[[x]][,4]}) if(is(precision, "list") == FALSE) {precision <- list(precision)} data <- lapply(1:length(data), function(x) { cbind(data[[x]], precision[[x]])}) rm(precision) ## calculate standard estimate std.estimate <- lapply(1:length(data), function(x){ (data[[x]][,3] - data[[x]][,5]) / data[[x]][,4]}) if(is(std.estimate, "list") == FALSE) {std.estimate <- list(std.estimate)} data <- lapply(1:length(data), function(x) { cbind(data[[x]], std.estimate[[x]])}) ## append empty standard estimate for plotting data <- lapply(1:length(data), function(x) { cbind(data[[x]], std.estimate[[x]])}) rm(std.estimate) ## generate global data set data.global <- cbind(data[[1]], rep(x = 1, times = nrow(data[[1]]))) colnames(data.global) <- rep("", 9) if(length(data) > 1) { for(i in 2:length(data)) { data.add <- cbind(data[[i]], rep(x = i, times = nrow(data[[i]]))) colnames(data.add) <- rep("", 9) data.global <- rbind(data.global, data.add) } } ## create column names colnames(data.global) <- c( "De", "error", "z", "se", "z.central", "precision", "std.estimate", "std.estimate.plot") ## calculate global central value if(centrality[1] == "mean") { z.central.global <- mean(data.global[,3], na.rm = TRUE) } else if(centrality[1] == "median") { z.central.global <- median(data.global[,3], na.rm = TRUE) } else if(centrality[1] == "mean.weighted") { z.central.global <- sum(data.global[,3] / data.global[,4]^2) / sum(1 / data.global[,4]^2) } else if(centrality[1] == "median.weighted") { ## define function after isotone::weighted.mean median.w <- function (y, w) { ox <- order(y) y <- y[ox] w <- w[ox] k <- 1 low <- cumsum(c(0, w)) up <- sum(w) - low df <- low - up repeat { if (df[k] < 0) k <- k + 1 else if (df[k] == 0) return((w[k] * y[k] + w[k - 1] * y[k - 1])/(w[k] + w[k - 1])) else return(y[k - 1]) } } z.central.global <- median.w(y = data.global[,3], w = data.global[,4]) } else if(is.numeric(centrality) == TRUE & length(centrality == length(data))) { z.central.global <- mean(data.global[,3], na.rm = TRUE) } ## optionally adjust central value by user-defined value if(missing(central.value) == FALSE) { # ## adjust central value for De.add central.value <- central.value + De.add z.central.global <- ifelse(log.z == TRUE, log(central.value), central.value) } ## create column names for(i in 1:length(data)) { colnames(data[[i]]) <- c("De", "error", "z", "se", "z.central", "precision", "std.estimate", "std.estimate.plot") } ## re-calculate standardised estimate for plotting for(i in 1:length(data)) { data[[i]][,8] <- (data[[i]][,3] - z.central.global) / data[[i]][,4] } data.global.plot <- data[[1]][,8] if(length(data) > 1) { for(i in 2:length(data)) { data.global.plot <- c(data.global.plot, data[[i]][,8]) } } data.global[,8] <- data.global.plot ## print warning for too small scatter if(max(abs(1 / data.global[6])) < 0.02) { small.sigma <- TRUE message(paste("Attention, small standardised estimate scatter.", "Toggle off y.ticks?")) } ## read out additional arguments--------------------------------------------- extraArgs <- list(...) main <- if("main" %in% names(extraArgs)) {extraArgs$main} else {expression(paste(D[e], " distribution"))} sub <- if("sub" %in% names(extraArgs)) {extraArgs$sub} else {""} if("xlab" %in% names(extraArgs)) { if(length(extraArgs$xlab) != 2) { stop("Argmuent xlab is not of length 2!") } else {xlab <- extraArgs$xlab} } else { xlab <- c(if(log.z == TRUE) { "Relative standard error (%)" } else { "Standard error" }, "Precision") } ylab <- if("ylab" %in% names(extraArgs)) { extraArgs$ylab } else { "Standardised estimate" } zlab <- if("zlab" %in% names(extraArgs)) { extraArgs$zlab } else { expression(paste(D[e], " [Gy]")) } if("zlim" %in% names(extraArgs)) { limits.z <- extraArgs$zlim } else { z.span <- (mean(data.global[,1]) * 0.5) / (sd(data.global[,1]) * 100) z.span <- ifelse(z.span > 1, 0.9, z.span) limits.z <- c((0.9 - z.span) * min(data.global[[1]]), (1.1 + z.span) * max(data.global[[1]])) } if("xlim" %in% names(extraArgs)) { limits.x <- extraArgs$xlim } else { limits.x <- c(0, max(data.global[,6])) } if(limits.x[1] != 0) { limits.x[1] <- 0 warning("Lower x-axis limit not set to zero, issue corrected!") } if("ylim" %in% names(extraArgs)) { limits.y <- extraArgs$ylim } else { y.span <- (mean(data.global[,1]) * 10) / (sd(data.global[,1]) * 100) y.span <- ifelse(y.span > 1, 0.98, y.span) limits.y <- c(-(1 + y.span) * max(abs(data.global[,7])), (0.8 + y.span) * max(abs(data.global[,7]))) } cex <- if("cex" %in% names(extraArgs)) { extraArgs$cex } else { 1 } lty <- if("lty" %in% names(extraArgs)) { extraArgs$lty } else { rep(2, length(data)) } lwd <- if("lwd" %in% names(extraArgs)) { extraArgs$lwd } else { rep(1, length(data)) } pch <- if("pch" %in% names(extraArgs)) { extraArgs$pch } else { rep(1, length(data)) } col <- if("col" %in% names(extraArgs)) { extraArgs$col } else { 1:length(data) } tck <- if("tck" %in% names(extraArgs)) { extraArgs$tck } else { NA } tcl <- if("tcl" %in% names(extraArgs)) { extraArgs$tcl } else { -0.5 } show <- if("show" %in% names(extraArgs)) {extraArgs$show} else {TRUE} if(show != TRUE) {show <- FALSE} fun <- if("fun" %in% names(extraArgs)) { extraArgs$fun } else { FALSE } ## define auxiliary plot parameters ----------------------------------------- ## optionally adjust plot ratio if(missing(plot.ratio)) { if(log.z) { plot.ratio <- 1 / (1 * ((max(data.global[,6]) - min(data.global[,6])) / (max(data.global[,7]) - min(data.global[,7])))) } else { plot.ratio <- 4.5 / 5.5 } } ##limit plot ratio plot.ratio <- min(c(1e+06, plot.ratio)) ## calculate conversion factor for plot coordinates f <- (max(data.global[,6]) - min(data.global[,6])) / (max(data.global[,7]) - min(data.global[,7])) * plot.ratio ## calculate major and minor z-tick values tick.values.major <- signif(c(limits.z, pretty(limits.z, n = 5))) tick.values.minor <- signif(pretty(limits.z, n = 25), 3) tick.values.major <- tick.values.major[tick.values.major >= min(tick.values.minor)] tick.values.major <- tick.values.major[tick.values.major <= max(tick.values.minor)] tick.values.major <- tick.values.major[tick.values.major >= limits.z[1]] tick.values.major <- tick.values.major[tick.values.major <= limits.z[2]] tick.values.minor <- tick.values.minor[tick.values.minor >= limits.z[1]] tick.values.minor <- tick.values.minor[tick.values.minor <= limits.z[2]] if(log.z == TRUE) { tick.values.major <- log(tick.values.major) tick.values.minor <- log(tick.values.minor) } ## calculate z-axis radius r.x <- limits.x[2] / max(data.global[,6]) + 0.05 r <- max(sqrt((data.global[,6])^2+(data.global[,7] * f)^2)) * r.x ## calculate major z-tick coordinates tick.x1.major <- r / sqrt(1 + f^2 * ( tick.values.major - z.central.global)^2) tick.y1.major <- (tick.values.major - z.central.global) * tick.x1.major tick.x2.major <- (1 + 0.015 * cex) * r / sqrt( 1 + f^2 * (tick.values.major - z.central.global)^2) tick.y2.major <- (tick.values.major - z.central.global) * tick.x2.major ticks.major <- cbind(0, tick.x1.major, tick.x2.major, tick.y1.major, tick.y2.major) ## calculate minor z-tick coordinates tick.x1.minor <- r / sqrt(1 + f^2 * ( tick.values.minor - z.central.global)^2) tick.y1.minor <- (tick.values.minor - z.central.global) * tick.x1.minor tick.x2.minor <- (1 + 0.007 * cex) * r / sqrt( 1 + f^2 * (tick.values.minor - z.central.global)^2) tick.y2.minor <- (tick.values.minor - z.central.global) * tick.x2.minor ticks.minor <- cbind(tick.x1.minor, tick.x2.minor, tick.y1.minor, tick.y2.minor) ## calculate z-label positions label.x <- 1.03 * r / sqrt(1 + f^2 * (tick.values.major - z.central.global)^2) label.y <- (tick.values.major - z.central.global) * tick.x2.major ## create z-axes labels if(log.z) { label.z.text <- signif(exp(tick.values.major), 3) } else { label.z.text <- signif(tick.values.major, 3) } ## subtract De.add from label values if(De.add != 0) label.z.text <- label.z.text - De.add labels <- cbind(label.x, label.y, label.z.text) ## calculate coordinates for 2-sigma-polygon overlay polygons <- matrix(nrow = length(data), ncol = 8) for(i in 1:length(data)) { polygons[i,1:4] <- c(limits.x[1], limits.x[1], max(data.global[,6]), max(data.global[,6])) polygons[i,5:8] <- c(-2, 2, (data[[i]][1,5] - z.central.global) * polygons[i,3] + 2, (data[[i]][1,5] - z.central.global) * polygons[i,4] - 2) } ## calculate node coordinates for semi-circle user.limits <- if(log.z) log(limits.z) else limits.z ellipse.values <- seq( from = min(c(tick.values.major, tick.values.minor, user.limits[1])), to = max(c(tick.values.major,tick.values.minor, user.limits[2])), length.out = 500) ellipse.x <- r / sqrt(1 + f^2 * (ellipse.values - z.central.global)^2) ellipse.y <- (ellipse.values - z.central.global) * ellipse.x ellipse <- cbind(ellipse.x, ellipse.y) ellipse.lims <- rbind(range(ellipse[,1]), range(ellipse[,2])) ## check if z-axis overlaps with 2s-polygon polygon_y_max <- max(polygons[,7]) polygon_y_min <- min(polygons[,7]) z_2s_upper <- ellipse.x[abs(ellipse.y - polygon_y_max) == min(abs(ellipse.y - polygon_y_max))] z_2s_lower <- ellipse.x[abs(ellipse.y - polygon_y_min) == min(abs(ellipse.y - polygon_y_min))] if(max(polygons[,3]) >= z_2s_upper | max(polygons[,3]) >= z_2s_lower) { warning("[plot_RadialPlot()] z-scale touches 2s-polygon. Decrease plot ratio.", call. = FALSE) } ## calculate statistical labels if(length(stats == 1)) {stats <- rep(stats, 2)} stats.data <- matrix(nrow = 3, ncol = 3) data.stats <- as.numeric(data.global[,1]) if("min" %in% stats == TRUE) { stats.data[1, 3] <- data.stats[data.stats == min(data.stats)][1] stats.data[1, 1] <- data.global[data.stats == stats.data[1, 3], 6][1] stats.data[1, 2] <- data.global[data.stats == stats.data[1, 3], 8][1] } if("max" %in% stats == TRUE) { stats.data[2, 3] <- data.stats[data.stats == max(data.stats)][1] stats.data[2, 1] <- data.global[data.stats == stats.data[2, 3], 6][1] stats.data[2, 2] <- data.global[data.stats == stats.data[2, 3], 8][1] } if("median" %in% stats == TRUE) { stats.data[3, 3] <- data.stats[data.stats == quantile(data.stats, 0.5, type = 3)] stats.data[3, 1] <- data.global[data.stats == stats.data[3, 3], 6][1] stats.data[3, 2] <- data.global[data.stats == stats.data[3, 3], 8][1] } ## recalculate axes limits if necessary limits.z.x <- range(ellipse[,1]) limits.z.y <- range(ellipse[,2]) if(!("ylim" %in% names(extraArgs))) { if(limits.z.y[1] < 0.66 * limits.y[1]) { limits.y[1] <- 1.8 * limits.z.y[1] } if(limits.z.y[2] > 0.77 * limits.y[2]) { limits.y[2] <- 1.3 * limits.z.y[2] } } if(!("xlim" %in% names(extraArgs))) { if(limits.z.x[2] > 1.1 * limits.x[2]) { limits.x[2] <- limits.z.x[2] } } ## calculate and paste statistical summary De.stats <- matrix(nrow = length(data), ncol = 18) colnames(De.stats) <- c("n", "mean", "mean.weighted", "median", "median.weighted", "kde.max", "sd.abs", "sd.rel", "se.abs", "se.rel", "q25", "q75", "skewness", "kurtosis", "sd.abs.weighted", "sd.rel.weighted", "se.abs.weighted", "se.rel.weighted") for(i in 1:length(data)) { data_to_stats <- data[[i]][,1:2] ## remove added De if(log.z) data_to_stats$De <- data_to_stats$De - De.add statistics <- calc_Statistics(data = data_to_stats) De.stats[i,1] <- statistics$weighted$n De.stats[i,2] <- statistics$unweighted$mean De.stats[i,3] <- statistics$weighted$mean De.stats[i,4] <- statistics$unweighted$median De.stats[i,5] <- statistics$unweighted$median De.stats[i,7] <- statistics$unweighted$sd.abs De.stats[i,8] <- statistics$unweighted$sd.rel De.stats[i,9] <- statistics$unweighted$se.abs De.stats[i,10] <- statistics$weighted$se.rel De.stats[i,11] <- quantile(data[[i]][,1], 0.25) De.stats[i,12] <- quantile(data[[i]][,1], 0.75) De.stats[i,13] <- statistics$unweighted$skewness De.stats[i,14] <- statistics$unweighted$kurtosis De.stats[i,15] <- statistics$weighted$sd.abs De.stats[i,16] <- statistics$weighted$sd.rel De.stats[i,17] <- statistics$weighted$se.abs De.stats[i,18] <- statistics$weighted$se.rel ## kdemax - here a little doubled as it appears below again De.density <- try(density(x = data[[i]][,1], kernel = "gaussian", from = limits.z[1], to = limits.z[2]), silent = TRUE) if(!inherits(De.density, "try-error")) { De.stats[i,6] <- NA } else { De.stats[i,6] <- De.density$x[which.max(De.density$y)] } } label.text = list(NA) if(summary.pos[1] != "sub") { n.rows <- length(summary) for(i in 1:length(data)) { stops <- paste(rep("\n", (i - 1) * n.rows), collapse = "") summary.text <- character(0) for(j in 1:length(summary)) { summary.text <- c(summary.text, paste( "", ifelse("n" %in% summary[j] == TRUE, paste("n = ", De.stats[i,1], "\n", sep = ""), ""), ifelse("mean" %in% summary[j] == TRUE, paste("mean = ", round(De.stats[i,2], 2), "\n", sep = ""), ""), ifelse("mean.weighted" %in% summary[j] == TRUE, paste("weighted mean = ", round(De.stats[i,3], 2), "\n", sep = ""), ""), ifelse("median" %in% summary[j] == TRUE, paste("median = ", round(De.stats[i,4], 2), "\n", sep = ""), ""), ifelse("median.weighted" %in% summary[j] == TRUE, paste("weighted median = ", round(De.stats[i,5], 2), "\n", sep = ""), ""), ifelse("kdemax" %in% summary[j] == TRUE, paste("kdemax = ", round(De.stats[i,6], 2), " \n ", sep = ""), ""), ifelse("sdabs" %in% summary[j] == TRUE, paste("sd = ", round(De.stats[i,7], 2), "\n", sep = ""), ""), ifelse("sdrel" %in% summary[j] == TRUE, paste("rel. sd = ", round(De.stats[i,8], 2), " %", "\n", sep = ""), ""), ifelse("seabs" %in% summary[j] == TRUE, paste("se = ", round(De.stats[i,9], 2), "\n", sep = ""), ""), ifelse("serel" %in% summary[j] == TRUE, paste("rel. se = ", round(De.stats[i,10], 2), " %", "\n", sep = ""), ""), ifelse("skewness" %in% summary[j] == TRUE, paste("skewness = ", round(De.stats[i,13], 2), "\n", sep = ""), ""), ifelse("kurtosis" %in% summary[j] == TRUE, paste("kurtosis = ", round(De.stats[i,14], 2), "\n", sep = ""), ""), ifelse("in.2s" %in% summary[j] == TRUE, paste("in 2 sigma = ", round(sum(data[[i]][,7] > -2 & data[[i]][,7] < 2) / nrow(data[[i]]) * 100 , 1), " %", sep = ""), ""), ifelse("sdabs.weighted" %in% summary[j] == TRUE, paste("abs. weighted sd = ", round(De.stats[i,15], 2), "\n", sep = ""), ""), ifelse("sdrel.weighted" %in% summary[j] == TRUE, paste("rel. weighted sd = ", round(De.stats[i,16], 2), "\n", sep = ""), ""), ifelse("seabs.weighted" %in% summary[j] == TRUE, paste("abs. weighted se = ", round(De.stats[i,17], 2), "\n", sep = ""), ""), ifelse("serel.weighted" %in% summary[j] == TRUE, paste("rel. weighted se = ", round(De.stats[i,18], 2), "\n", sep = ""), ""), sep = "")) } summary.text <- paste(summary.text, collapse = "") label.text[[length(label.text) + 1]] <- paste(stops, summary.text, stops, sep = "") } } else { for(i in 1:length(data)) { summary.text <- character(0) for(j in 1:length(summary)) { summary.text <- c(summary.text, ifelse("n" %in% summary[j] == TRUE, paste("n = ", De.stats[i,1], " | ", sep = ""), ""), ifelse("mean" %in% summary[j] == TRUE, paste("mean = ", round(De.stats[i,2], 2), " | ", sep = ""), ""), ifelse("mean.weighted" %in% summary[j] == TRUE, paste("weighted mean = ", round(De.stats[i,3], 2), " | ", sep = ""), ""), ifelse("median" %in% summary[j] == TRUE, paste("median = ", round(De.stats[i,4], 2), " | ", sep = ""), ""), ifelse("median.weighted" %in% summary[j] == TRUE, paste("weighted median = ", round(De.stats[i,5], 2), " | ", sep = ""), ""), ifelse("kdemax" %in% summary[j] == TRUE, paste("kdemax = ", round(De.stats[i,6], 2), " | ", sep = ""), ""), ifelse("sdrel" %in% summary[j] == TRUE, paste("rel. sd = ", round(De.stats[i,8], 2), " %", " | ", sep = ""), ""), ifelse("sdabs" %in% summary[j] == TRUE, paste("abs. sd = ", round(De.stats[i,7], 2), " | ", sep = ""), ""), ifelse("serel" %in% summary[j] == TRUE, paste("rel. se = ", round(De.stats[i,10], 2), " %", " | ", sep = ""), ""), ifelse("seabs" %in% summary[j] == TRUE, paste("abs. se = ", round(De.stats[i,9], 2), " | ", sep = ""), ""), ifelse("skewness" %in% summary[j] == TRUE, paste("skewness = ", round(De.stats[i,13], 2), " | ", sep = ""), ""), ifelse("kurtosis" %in% summary[j] == TRUE, paste("kurtosis = ", round(De.stats[i,14], 2), " | ", sep = ""), ""), ifelse("in.2s" %in% summary[j] == TRUE, paste("in 2 sigma = ", round(sum(data[[i]][,7] > -2 & data[[i]][,7] < 2) / nrow(data[[i]]) * 100 , 1), " % ", sep = ""), ""), ifelse("sdabs.weighted" %in% summary[j] == TRUE, paste("abs. weighted sd = ", round(De.stats[i,15], 2), " %", " | ", sep = ""), ""), ifelse("sdrel.weighted" %in% summary[j] == TRUE, paste("rel. weighted sd = ", round(De.stats[i,16], 2), " %", " | ", sep = ""), ""), ifelse("seabs.weighted" %in% summary[j] == TRUE, paste("abs. weighted se = ", round(De.stats[i,17], 2), " %", " | ", sep = ""), ""), ifelse("serel.weighted" %in% summary[j] == TRUE, paste("rel. weighted se = ", round(De.stats[i,18], 2), " %", " | ", sep = ""), "") ) } summary.text <- paste(summary.text, collapse = "") label.text[[length(label.text) + 1]] <- paste( " ", summary.text, sep = "") } ## remove outer vertical lines from string for(i in 2:length(label.text)) { label.text[[i]] <- substr(x = label.text[[i]], start = 3, stop = nchar(label.text[[i]]) - 3) } } ## remove dummy list element label.text[[1]] <- NULL ## convert keywords into summary placement coordinates if(missing(summary.pos) == TRUE) { summary.pos <- c(limits.x[1], limits.y[2]) summary.adj <- c(0, 1) } else if(length(summary.pos) == 2) { summary.pos <- summary.pos summary.adj <- c(0, 1) } else if(summary.pos[1] == "topleft") { summary.pos <- c(limits.x[1], limits.y[2]) summary.adj <- c(0, 1) } else if(summary.pos[1] == "top") { summary.pos <- c(mean(limits.x), limits.y[2]) summary.adj <- c(0.5, 1) } else if(summary.pos[1] == "topright") { summary.pos <- c(limits.x[2], limits.y[2]) summary.adj <- c(1, 1) } else if(summary.pos[1] == "left") { summary.pos <- c(limits.x[1], mean(limits.y)) summary.adj <- c(0, 0.5) } else if(summary.pos[1] == "center") { summary.pos <- c(mean(limits.x), mean(limits.y)) summary.adj <- c(0.5, 0.5) } else if(summary.pos[1] == "right") { summary.pos <- c(limits.x[2], mean(limits.y)) summary.adj <- c(1, 0.5) }else if(summary.pos[1] == "bottomleft") { summary.pos <- c(limits.x[1], limits.y[1]) summary.adj <- c(0, 0) } else if(summary.pos[1] == "bottom") { summary.pos <- c(mean(limits.x), limits.y[1]) summary.adj <- c(0.5, 0) } else if(summary.pos[1] == "bottomright") { summary.pos <- c(limits.x[2], limits.y[1]) summary.adj <- c(1, 0) } ## convert keywords into legend placement coordinates if(missing(legend.pos) == TRUE) { legend.pos <- c(limits.x[1], limits.y[2]) legend.adj <- c(0, 1) } else if(length(legend.pos) == 2) { legend.pos <- legend.pos legend.adj <- c(0, 1) } else if(legend.pos[1] == "topleft") { legend.pos <- c(limits.x[1], limits.y[2]) legend.adj <- c(0, 1) } else if(legend.pos[1] == "top") { legend.pos <- c(mean(limits.x), limits.y[2]) legend.adj <- c(0.5, 1) } else if(legend.pos[1] == "topright") { legend.pos <- c(limits.x[2], limits.y[2]) legend.adj <- c(1, 1) } else if(legend.pos[1] == "left") { legend.pos <- c(limits.x[1], mean(limits.y)) legend.adj <- c(0, 0.5) } else if(legend.pos[1] == "center") { legend.pos <- c(mean(limits.x), mean(limits.y)) legend.adj <- c(0.5, 0.5) } else if(legend.pos[1] == "right") { legend.pos <- c(limits.x[2], mean(limits.y)) legend.adj <- c(1, 0.5) } else if(legend.pos[1] == "bottomleft") { legend.pos <- c(limits.x[1], limits.y[1]) legend.adj <- c(0, 0) } else if(legend.pos[1] == "bottom") { legend.pos <- c(mean(limits.x), limits.y[1]) legend.adj <- c(0.5, 0) } else if(legend.pos[1] == "bottomright") { legend.pos <- c(limits.x[2], limits.y[1]) legend.adj <- c(1, 0) } ## calculate line coordinates and further parameters if(!missing(line)) { #line = line + De.add if(log.z == TRUE) {line <- log(line)} line.coords <- list(NA) for(i in 1:length(line)) { line.x <- c(limits.x[1], r / sqrt(1 + f^2 * (line[i] - z.central.global)^2)) line.y <- c(0, (line[i] - z.central.global) * line.x[2]) line.coords[[length(line.coords) + 1]] <- rbind(line.x, line.y) } line.coords[1] <- NULL if(missing(line.col) == TRUE) { line.col <- seq(from = 1, to = length(line.coords)) } if(missing(line.label) == TRUE) { line.label <- rep("", length(line.coords)) } } ## calculate rug coordinates if(missing(rug) == FALSE) { if(log.z == TRUE) { rug.values <- log(De.global) } else { rug.values <- De.global } rug.coords <- list(NA) for(i in 1:length(rug.values)) { rug.x <- c(r / sqrt(1 + f^2 * (rug.values[i] - z.central.global)^2) * 0.988, r / sqrt(1 + f^2 * (rug.values[i] - z.central.global)^2) * 0.995) rug.y <- c((rug.values[i] - z.central.global) * rug.x[1], (rug.values[i] - z.central.global) * rug.x[2]) rug.coords[[length(rug.coords) + 1]] <- rbind(rug.x, rug.y) } rug.coords[1] <- NULL } ## Generate plot ------------------------------------------------------------ ## check if plotting is enabled if(show) { ## determine number of subheader lines to shift the plot if(length(summary) > 0 & summary.pos[1] == "sub") { shift.lines <- length(data) + 1 } else {shift.lines <- 1} ## setup plot area default <- par(mar = c(4, 4, shift.lines + 1.5, 7), xpd = TRUE, cex = cex) ## reset on exit on.exit(par(default)) ## create empty plot plot(NA, xlim = limits.x, ylim = limits.y, main = "", sub = sub, xlab = "", ylab = "", xaxs = "i", yaxs = "i", frame.plot = FALSE, axes = FALSE) ## add y-axis label mtext(side = 2, line = 2.5, at = 0, adj = 0.5, cex = cex, text = ylab) ## calculate upper x-axis label values label.x.upper <- if(log.z == TRUE) { as.character(round(1/axTicks(side = 1)[-1] * 100, 1)) } else { as.character(round(1/axTicks(side = 1)[-1], 1)) } ## optionally, plot 2-sigma-bar if(bar.col[1] != "none") { for(i in 1:length(data)) { polygon(x = polygons[i,1:4], y = polygons[i,5:8], lty = "blank", col = bar.col[i]) } } ## optionally, add grid lines if(grid.col[1] != "none") { for(i in 1:length(tick.x1.major)) { lines(x = c(limits.x[1], tick.x1.major[i]), y = c(0, tick.y1.major[i]), col = grid.col) } } ## optionally, plot central value lines if(lwd[1] > 0 & lty[1] > 0) { for(i in 1:length(data)) { x2 <- r / sqrt(1 + f^2 * ( data[[i]][1,5] - z.central.global)^2) y2 <- (data[[i]][1,5] - z.central.global) * x2 lines(x = c(limits.x[1], x2), y = c(0, y2), lty = lty[i], lwd = lwd[i], col = col[i]) } } ## optionally add further lines if(missing(line) == FALSE) { for(i in 1:length(line)) { lines(x = line.coords[[i]][1,], y = line.coords[[i]][2,], col = line.col[i]) text(x = line.coords[[i]][1,2], y = line.coords[[i]][2,2] + par()$cxy[2] * 0.3, labels = line.label[i], pos = 2, col = line.col[i], cex = cex * 0.9) } } ## overplot unwanted parts polygon(x = c(ellipse[,1], limits.x[2] * 2, limits.x[2] * 2), y = c(ellipse[,2], max(ellipse[,2]), min(ellipse[,2])), col = "white", lty = 0) ## add plot title title(main = main, line = shift.lines, font = 2) ## plot lower x-axis (precision) x.axis.ticks <- axTicks(side = 1) x.axis.ticks <- x.axis.ticks[c(TRUE, x.axis.ticks <= limits.x[2])] x.axis.ticks <- x.axis.ticks[x.axis.ticks <= limits.x[2]] ## axis with lables and ticks axis(side = 1, at = x.axis.ticks, lwd = 1, xlab = "") ## extend axis line to right side of the plot lines(x = c(max(x.axis.ticks, na.rm = TRUE), limits.x[2]), y = c(limits.y[1], limits.y[1])) ## draw closing tick on right hand side axis(side = 1, tcl = 0.5, lwd = 0, lwd.ticks = 1, at = limits.x[2], labels = FALSE) axis(side = 1, tcl = -0.5, lwd = 0, lwd.ticks = 1, at = limits.x[2], labels = FALSE) ## add upper axis label mtext(text = xlab[1], at = (limits.x[1] + limits.x[2]) / 2, side = 1, line = -3.5, cex = cex) ## add lower axis label mtext(text = xlab[2], at = (limits.x[1] + limits.x[2]) / 2, side = 1, line = 2.5, cex = cex) ## plot upper x-axis axis(side = 1, tcl = 0.5, lwd = 0, lwd.ticks = 1, at = x.axis.ticks[-1], labels = FALSE) ## remove first tick label (infinity) label.x.upper <- label.x.upper[1:(length(x.axis.ticks) - 1)] ## add tick labels axis(side = 1, lwd = 0, labels = label.x.upper, at = x.axis.ticks[-1], line = -3) ## plot minor z-ticks for(i in 1:length(tick.values.minor)) { lines(x = c(tick.x1.minor[i], tick.x2.minor[i]), y = c(tick.y1.minor[i], tick.y2.minor[i])) } ## plot major z-ticks for(i in 1:length(tick.values.major)) { lines(x = c(tick.x1.major[i], tick.x2.major[i]), y = c(tick.y1.major[i], tick.y2.major[i])) } ## plot z-axis lines(ellipse) ## plot z-values text(x = label.x, y = label.y, label = label.z.text, 0) ## plot z-label mtext(side = 4, at = 0, line = 5, las = 3, adj = 0.5, cex = cex, text = zlab) ## optionally add rug if(rug == TRUE) { for(i in 1:length(rug.coords)) { lines(x = rug.coords[[i]][1,], y = rug.coords[[i]][2,], col = col[data.global[i,9]]) } } ## plot values for(i in 1:length(data)) { points(data[[i]][,6][data[[i]][,6] <= limits.x[2]], data[[i]][,8][data[[i]][,6] <= limits.x[2]], col = col[i], pch = pch[i]) } ## optionally add min, max, median sample text if(length(stats) > 0) { text(x = stats.data[,1], y = stats.data[,2], labels = round(stats.data[,3], 1), pos = 2, cex = 0.85) } ## optionally add legend content if(missing(legend) == FALSE) { legend(x = legend.pos[1], y = 0.8 * legend.pos[2], xjust = legend.adj[1], yjust = legend.adj[2], legend = legend, pch = pch, col = col, text.col = col, cex = 0.8 * cex, bty = "n") } ## plot y-axis if(y.ticks == TRUE) { char.height <- par()$cxy[2] tick.space <- axisTicks(usr = limits.y, log = FALSE) tick.space <- (max(tick.space) - min(tick.space)) / length(tick.space) if(tick.space < char.height * 1.5) { axis(side = 2, at = c(-2, 2), labels = c("", ""), las = 1) axis(side = 2, at = 0, tcl = 0, labels = paste("\u00B1", "2"), las = 1) } else { axis(side = 2, at = seq(-2, 2, by = 2), las = 2) } } else { axis(side = 2, at = 0) } ## optionally add subheader text mtext(side = 3, line = shift.lines - 2, text = mtext, cex = 0.8 * cex) ## add summary content for(i in 1:length(data)) { if(summary.pos[1] != "sub") { text(x = summary.pos[1], y = 0.8 * summary.pos[2], adj = summary.adj, labels = label.text[[i]], cex = 0.8 * cex, col = col[i]) } else { if(mtext == "") { mtext(side = 3, line = shift.lines - 1 - i, text = label.text[[i]], col = col[i], cex = 0.8 * cex) } } } ##FUN by R Luminescence Team if(fun==TRUE){sTeve()} } if(output) { return(list(data = data, data.global = data.global, xlim = limits.x, ylim = limits.y, zlim = limits.z, r = r, plot.ratio = plot.ratio, ticks.major = ticks.major, ticks.minor = ticks.minor, labels = labels, polygons = polygons, ellipse.lims = ellipse.lims)) } } Luminescence/R/get_Layout.R0000644000176200001440000006176414236146743015354 0ustar liggesusers#' Collection of layout definitions #' #' This helper function returns a list with layout definitions for homogeneous #' plotting. #' #' The easiest way to create a user-specific layout definition is perhaps to #' create either an empty or a default layout object and fill/modify the #' definitions (`user.layout <- get_Layout(data = "empty")`). #' #' @param layout [character] or [list] object (**required**): #' name of the layout definition to be returned. If name is provided the #' respective definition is returned. One of the following #' supported layout definitions is possible: `"default"`, #' `"journal.1"`, `"small"`, `"empty"`. #' #' User-specific layout definitions must be provided as a list object of #' predefined structure, see details. #' #' @return A list object with layout definitions for plot functions. #' #' @section Function version: 0.1 #' #' @author Michael Dietze, GFZ Potsdam (Germany) #' #' @examples #' #' ## read example data set #' data(ExampleData.DeValues, envir = environment()) #' #' ## show structure of the default layout definition #' layout.default <- get_Layout(layout = "default") #' str(layout.default) #' #' ## show colour definitions for Abanico plot, only #' layout.default$abanico$colour #' #' ## set Abanico plot title colour to orange #' layout.default$abanico$colour$main <- "orange" #' #' ## create Abanico plot with modofied layout definition #' plot_AbanicoPlot(data = ExampleData.DeValues, #' layout = layout.default) #' #' ## create Abanico plot with predefined layout "journal" #' plot_AbanicoPlot(data = ExampleData.DeValues, #' layout = "journal") #' #' @md #' @export get_Layout <- function( layout ) { ## pre-defined layout selections if(is.character(layout) == TRUE & length(layout) == 1) { if(layout == "empty") { layout = list( ## empty Abanico plot ------------------------------------------------- abanico = list( font.type = list( main = character(1), xlab1 = character(1), xlab2 = character(1), ylab = character(1), zlab = character(1), xtck1 = character(1), xtck2 = character(1), xtck3 = character(1), ytck = character(1), ztck = character(1), mtext = character(1), summary = character(1), # optionally vector stats = character(1), # optionally vector legend = character(1) # optionally vector ), font.size = list( main = numeric(1), xlab1 = numeric(1), xlab2 = numeric(1), xlab3 = numeric(1), ylab = numeric(1), zlab = numeric(1), xtck1 = numeric(1), xtck2 = numeric(1), xtck3 = numeric(1), ytck = numeric(1), ztck = numeric(1), mtext = numeric(1), summary = numeric(1), # optionally vector stats = numeric(1), # optionally vector legend = numeric(1) # optionally vector ), font.deco = list( main = character(1), xlab1 = character(1), xlab2 = character(1), xlab3 = character(1), ylab = character(1), zlab = character(1), xtck1 = character(1), xtck2 = character(1), xtck3 = character(1), ytck = character(1), ztck = character(1), mtext = character(1), summary = character(1), # optionally vector stats = character(1), # optionally vector legend = character(1) # optionally vector ), colour = list( main = numeric(1), # plot title colour xlab1 = numeric(1), # left x-axis label colour xlab2 = numeric(1), # right x-axis label colour xlab3 = numeric(1), # right x-axis label colour ylab = numeric(1), # y-axis label colour zlab = numeric(1), # z-axis label colour xtck1 = numeric(1), # left x-axis tick colour xtck2 = numeric(1), # right x-axis tick colour xtck3 = numeric(1), # right x-axis tick colour ytck = numeric(1), # y-axis tick colour ztck = numeric(1), # z-axis tick colour mtext = numeric(1), # subheader text colour summary = numeric(1), # statistic summary colour stats = numeric(1), # value statistics colour legend = numeric(1), # legend colour centrality = numeric(1), # Centrality line colour value.dot = numeric(1), # De value dot colour value.bar = numeric(1), # De value error bar colour value.rug = numeric(1), # De value rug colour poly.line = numeric(1), # polygon line colour poly.fill = numeric(1), # polygon fill colour bar.line = numeric(1), # polygon line colour bar.fill = numeric(1), # polygon fill colour kde.line = numeric(1), kde.fill = numeric(1), grid.major = numeric(1), grid.minor = numeric(1), border = numeric(1), background = numeric(1)), dimension = list( figure.width = numeric(1), # figure width in mm figure.height = numeric(1), # figure height in mm margin = numeric(4), # margin sizes in mm main.line = numeric(1), # line height in % xlab1.line = numeric(1), # line height in % xlab2.line = numeric(1), # line height in % xlab3.line = numeric(1), # line height in % ylab.line = numeric(1), # line height in % zlab.line = numeric(1), # line height in % xtck1.line = numeric(1), # line height in % xtck2.line = numeric(1), # line height in % xtck3.line = numeric(1), # line height in % ytck.line = numeric(1), # line height in % ztck.line = numeric(1), # line height in % xtcl1 = numeric(1), # tick length in % xtcl2 = numeric(1), # tick length in % xtcl3 = numeric(1), # tick length in % ytcl = numeric(1), # tick length in % ztcl = numeric(1), # tick length in % rugl = numeric(1), # rug length in % mtext = numeric(1), # line height in % summary.line = numeric(1) # line height in % )), ## empty KDE plot ----------------------------------------------------- kde = list( font.type = list( main = character(1), xlab = character(1), ylab1 = character(1), ylab2 = character(1), xtck = character(1), ytck1 = character(1), ytck2 = character(1), stats = character(1), # optionally vector legend = character(1) # optionally vector ), font.size = list( main = numeric(1), xlab = numeric(1), ylab1 = numeric(1), ylab2 = numeric(1), xtck = numeric(1), ytck1 = numeric(1), ytck2 = numeric(1), stats = numeric(1), # optionally vector legend = numeric(1) # optionally vector ), font.deco = list( main = character(1), xlab = character(1), ylab1 = character(1), ylab2 = character(1), xtck = character(1), ytck1 = character(1), ytck2 = character(1), stats = character(1), # optionally vector legend = character(1) # optionally vector ), colour = list( main = numeric(1), # plot title colour xlab = numeric(1), # x-axis label colour ylab1 = numeric(1), # primary y-axis label colour ylab2 = numeric(1), # secondary y-axis label colour xtck = numeric(1), # x-axis tick colour ytck1 = numeric(1), # primary y-axis tick colour ytck2 = numeric(1), # secondary y-axis tick colour box = numeric(1), # plot frame box line colour mtext = numeric(1), # subheader text colour stats = numeric(1), # statistic summary colour kde.line = numeric(1), # KDE line colour kde.fill = numeric(1), # KDE fill colour value.dot = numeric(1), # De value dot colour value.bar = numeric(1), # De value error bar colour value.rug = numeric(1), # De value rug colour boxplot.line = numeric(1), # boxplot line colour boxplot.fill = numeric(1), # boxplot fill colour mean.line = numeric(1), # mean line colour sd.bar = numeric(1), # sd-line colour background = numeric(1)), # background colour dimension = list( figure.width = numeric(1), # figure width in mm figure.height = numeric(1), # figure height in mm margin = numeric(4), # margin sizes in mm main.line = numeric(1), # line height in % xlab.line = numeric(1), # line height in % ylab1.line = numeric(1), # line height in % ylab2.line = numeric(1), # line height in % xtck.line = numeric(1), # line height in % ytck1.line = numeric(1), # line height in % ytck2.line = numeric(1), # line height in % xtcl = numeric(1), # tick length in % ytcl1 = numeric(1), # tick length in % ytcl2 = numeric(1), # tick length in % stats.line = numeric(1) # line height in % ) ) ) } else if(layout == "default") { layout = list( ## default Abanico plot ----------------------------------------------- abanico = list( font.type = list( main = "", xlab1 = "", xlab2 = "", ylab = "", zlab = "", xtck1 = "", xtck2 = "", xtck3 = "", ytck = "", ztck = "", mtext = "", summary = "", # optionally vector stats = "", # optionally vector legend = "" # optionally vector ), font.size = list( main = 12, xlab1 = 12, xlab2 = 12, xlab3 = 12, ylab = 12, zlab = 12, xtck1 = 12, xtck2 = 12, xtck3 = 12, ytck = 12, ztck = 12, mtext = 10, summary = 10, # optionally vector stats = 10, # optionally vector legend = 10 # optionally vector ), font.deco = list( main = "bold", xlab1 = "normal", xlab2 = "normal", xlab3 = "normal", ylab = "normal", zlab = "normal", xtck1 = "normal", xtck2 = "normal", xtck3 = "normal", ytck = "normal", ztck = "normal", mtext = "normal", summary = "normal", # optionally vector stats = "normal", # optionally vector legend = "normal" # optionally vector ), colour = list( main = 1, # plot title colour xlab1 = 1, # left x-axis label colour xlab2 = 1, # right x-axis label colour xlab3 = 1, # right x-axis label colour ylab = 1, # y-axis label colour zlab = 1, # z-axis label colour xtck1 = 1, # left x-axis tick colour xtck2 = 1, # right x-axis tick colour xtck3 = 1, # right x-axis tick colour ytck = 1, # y-axis tick colour ztck = 1, # z-axis tick colour mtext = 1, # subheader text colour summary = 1, # statistic summary colour stats = 1, # value statistics colour legend = 1, # legend colour centrality = 1, # Centrality line colour value.dot = 1, # De value dot colour value.bar = 1, # De value error bar colour value.rug = 1, # De value rug colour poly.line = NA, # polygon line colour poly.fill = adjustcolor("grey75", alpha.f = 0.6), # polygon fill colour bar.line = NA, # polygon line colour bar.fill = "grey60", # bar fill colour kde.line = 1, kde.fill = NA, grid.major = "grey80", grid.minor = "none", border = 1, background = NA), dimension = list( figure.width = "auto", # figure width in mm figure.height = "auto", # figure height in mm margin = c(10, 10, 10, 10), # margin sizes in mm main.line = 100, # line height in % xlab1.line = 90, # line height in % xlab2.line = 90, # line height in % xlab3.line = 90, # line height in % ylab.line = 100, # line height in % zlab.line = 70, # line height in % xtck1.line = 100, # line height in % xtck2.line = 100, # line height in % xtck3.line = 100, # line height in % ytck.line = 100, # line height in % ztck.line = 100, # line height in % xtcl1 = 100, # tick length in % xtcl2 = 100, # tick length in % xtcl3 = 100, # tick length in % ytcl = 100, # tick length in % ztcl = 100, # tick length in % rugl = 100, # rug length in % mtext = 100, # line height in % summary.line = 100 # line height in % )), ## default KDE plot --------------------------------------------------- kde = list( font.type = list( main = "", xlab = "", ylab1 = "", ylab2 = "", xtck = "", ytck1 = "", ytck2 = "", stats = "", # optionally vector legend = "" # optionally vector ), font.size = list( main = 14, xlab = 12, ylab1 = 12, ylab2 = 12, xtck = 12, ytck1 = 12, ytck2 = 12, stats = 12, # optionally vector legend = 12 # optionally vector ), font.deco = list( main = "bold", xlab = "normal", ylab1 = "normal", ylab2 = "normal", xtck = "normal", ytck1 = "normal", ytck2 = "normal", stats = "normal", # optionally vector legend = "normal" # optionally vector ), colour = list( main = 1, # plot title colour xlab = 1, # x-axis label colour ylab1 = 1, # primary y-axis label colour ylab2 = 1, # secondary y-axis label colour xtck = 1, # x-axis tick colour ytck1 = 1, # primary y-axis tick colour ytck2 = 1, # secondary y-axis tick colour box = 1, # plot frame box line colour mtext = 2, # subheader text colour stats = 1, # statistic summary colour kde.line = 1, # KDE line colour kde.fill = NULL, # KDE fill colour value.dot = 1, # De value dot colour value.bar = 1, # De value error bar colour value.rug = 1, # De value rug colour boxplot.line = 1, # boxplot line colour boxplot.fill = NULL, # boxplot fill colour mean.point = 1, # mean line colour sd.line = 1, # sd bar colour background = NULL), # background colour dimension = list( figure.width = "auto", # figure width in mm figure.height = "auto", # figure height in mm margin = c(10, 10, 10, 10), # margin sizes in mm main.line = 100, # line height in % xlab.line = 100, # line height in % ylab1.line = 100, # line height in % ylab2.line = 100, # line height in % xtck.line = 100, # line height in % ytck1.line = 100, # line height in % ytck2.line = 100, # line height in % xtcl = 100, # tick length in % ytcl1 = 100, # tick length in % ytcl2 = 100, # tick length in % stats.line = 100 # line height in % ) ) ) } else if(layout == "journal") { layout = list( ## journal Abanico plot ----------------------------------------------- abanico = list( font.type = list( main = "", xlab1 = "", xlab2 = "", ylab = "", zlab = "", xtck1 = "", xtck2 = "", xtck3 = "", ytck = "", ztck = "", mtext = "", summary = "", # optionally vector stats = "", # optionally vector legend = "" # optionally vector ), font.size = list( main = 8, xlab1 = 7, xlab2 = 7, xlab3 = 7, ylab = 7, zlab = 7, xtck1 = 7, xtck2 = 7, xtck3 = 7, ytck = 7, ztck = 7, mtext = 6, summary = 6, # optionally vector stats = 6, # optionally vector legend = 6 # optionally vector ), font.deco = list( main = "bold", xlab1 = "normal", xlab2 = "normal", xlab3 = "normal", ylab = "normal", zlab = "normal", xtck1 = "normal", xtck2 = "normal", xtck3 = "normal", ytck = "normal", ztck = "normal", mtext = "normal", summary = "normal", # optionally vector stats = "normal", # optionally vector legend = "normal" # optionally vector ), colour = list( main = 1, # plot title colour xlab1 = 1, # left x-axis label colour xlab2 = 1, # right x-axis label colour xlab3 = 1, # right x-axis label colour ylab = 1, # y-axis label colour zlab = 1, # z-axis label colour xtck1 = 1, # left x-axis tick colour xtck2 = 1, # right x-axis tick colour xtck3 = 1, # right x-axis tick colour ytck = 1, # y-axis tick colour ztck = 1, # z-axis tick colour mtext = 1, # subheader text colour summary = 1, # statistic summary colour stats = 1, # value statistics colour legend = 1, # legend colour centrality = 1, # Centrality line colour value.dot = 1, # De value dot colour value.bar = 1, # De value error bar colour value.rug = 1, # De value rug colour poly.line = NA, # polygon line colour poly.fill = adjustcolor("grey75", alpha.f = 0.6), # polygon fill colour bar.line = NA, # polygon line colour bar.fill = "grey60", # bar fill colour kde.line = 1, kde.fill = NA, grid.major = "grey80", grid.minor = "none", border = 1, background = NA), dimension = list( figure.width = 100, # figure width in mm figure.height = 100, # figure height in mm margin = c(10, 10, 10, 10), # margin sizes in mm main.line = 70, # line height in % xlab1.line = 30, # line height in % xlab2.line = 65, # line height in % xlab3.line = 30, # line height in % ylab.line = 30, # line height in % zlab.line = 40, # line height in % xtck1.line = 50, # line height in % xtck2.line = 50, # line height in % xtck3.line = 50, # line height in % ytck.line = 70, # line height in % ztck.line = 70, # line height in % xtcl1 = 50, # tick length in % xtcl2 = 50, # tick length in % xtcl3 = 50, # tick length in % ytcl = 50, # tick length in % ztcl = 70, # tick length in % rugl = 70, # rug length in % mtext = 100, # line height in % summary.line = 70, # line height in % pch = 50 # point size in % )), ## journal KDE plot --------------------------------------------------- kde = list( font.type = list( main = "", xlab = "", ylab1 = "", ylab2 = "", xtck = "", ytck1 = "", ytck2 = "", stats = "", # optionally vector legend = "" # optionally vector ), font.size = list( main = 8, xlab = 7, ylab1 = 7, ylab2 = 7, xtck = 7, ytck1 = 7, ytck2 = 7, stats = 7, legend = 7 ), font.deco = list( main = "bold", xlab = "normal", ylab1 = "normal", ylab2 = "normal", xtck = "normal", ytck1 = "normal", ytck2 = "normal", stats = "normal", # optionally vector legend = "normal" # optionally vector ), colour = list( main = 1, # plot title colour xlab = 1, # x-axis label colour ylab1 = 1, # primary y-axis label colour ylab2 = 1, # secondary y-axis label colour xtck = 1, # x-axis tick colour ytck1 = 1, # primary y-axis tick colour ytck2 = 1, # secondary y-axis tick colour box = 1, # plot frame box line colour mtext = 1, # subheader text colour stats = "#2062B3", # statistic summary colour kde.line = "#2062B3", # KDE line colour kde.fill = NULL, # KDE fill colour value.dot = 1, # De value dot colour value.bar = 1, # De value error bar colour value.rug = 1, # De value rug colour boxplot.line = 1, # boxplot line colour boxplot.fill = NULL, # boxplot fill colour mean.line = adjustcolor(col = 1, alpha.f = 0.4), # mean line colour sd.bar = adjustcolor(col = 1, alpha.f = 0.4), # sd bar colour background = NULL), dimension = list( figure.width = 80, # figure width in mm figure.height = 80, # figure height in mm margin = c(10, 10, 10, 10), # margin sizes in mm main.line = 70, # line height in % xlab.line = 30, # line height in % ylab1.line = 40, # line height in % ylab2.line = 30, # line height in % xtck.line = 50, # line height in % ytck1.line = 65, # line height in % ytck2.line = 50, # line height in % xtcl = 50, # tick length in % ytcl1 = 20, # tick length in % ytcl2 = 50, # tick length in % stats.line = 70 # line height in % ) ) ) } else { warning("Layout definition not supported! Default layout is used.") layout <- get_Layout(layout = "default") } } else if(is.list(layout) == TRUE) { ## user-specific layout definition assignment layout <- layout } ## return layout parameters return(layout) } Luminescence/R/utils_DRAC.R0000644000176200001440000001762514236146743015166 0ustar liggesusers## FUNCTIONS ------------------------------------------------------------------- # subset the DRAC reference list # 'x' is the input table from use_DRAC() get_DRAC_references <- function(x) { refs <- DRAC_refs() refs_names <- names(refs) used <- list(refs = NULL, desc = NULL) # TI:4 - Conversion factors ref_tmp <- unique(x$`TI:4`) for (i in 1:length(ref_tmp)) { if (ref_tmp[i] == "X") next used$refs <- c(used$refs, refs[refs_names %in% ref_tmp[i]]) used$desc <- c(used$desc, "Conversion factors") } # TI:13 - External Rubidium ref_tmp <- unique(x$`TI:13`) if (any(ref_tmp == "Y")) { used$refs <- c(used$refs, refs["Mejdahl1987"]) used$desc <- c(used$desc, "External rubidium") } # TI:22 - Internal Rubidium ref_tmp <- unique(x$`TI:22`) if (any(ref_tmp == "Y")) { used$refs <- c(used$refs, refs["Mejdahl1987"]) used$desc <- c(used$desc, "Internal rubidium") } # TI:31 - Gamma dose rate scaling ref_tmp <- unique(x$`TI:31`) if (any(ref_tmp == "Y")) { used$refs <- c(used$refs, refs["Aitken1985"]) used$desc <- c(used$desc, "Gamma dose rate scaling") } # TI:34 - alpha grain size attenuation ref_tmp <- unique(x$`TI:34`) for (i in 1:length(ref_tmp)) { if (ref_tmp[i] == "X") next used$refs <- c(used$refs, refs[refs_names %in% ref_tmp[i]]) used$desc <- c(used$desc, "Alpha grain size attenuation factors") } # TI:35 - Beta grain size attenuation ref_tmp <- unique(x$`TI:35`) for (i in 1:length(ref_tmp)) { if (ref_tmp[i] == "X") next used$refs <- c(used$refs, refs[refs_names %in% ref_tmp[i]]) used$desc <- c(used$desc, "Beta grain size attenuation factors") } # TI:38 - beta etch attenuation factor ref_tmp <- unique(x$`TI:38`) for (i in 1:length(ref_tmp)) { if (ref_tmp[i] == "X") next used$refs <- c(used$refs, refs[refs_names %in% ref_tmp[i]]) used$desc <- c(used$desc, "Beta etch attenuation factor") } # TI:50 - Cosmic dose rate ref_tmp <- unique(x$`TI:50`) if (any(ref_tmp == "X")) { used$refs <- c(used$refs, refs[c("PrescottHutton1994", "PrescottStephan1982")]) used$desc <- c(used$desc, c("Cosmic dose rate", "Cosmic dose rate")) } return(used) } ## REFERENCE LIST -------------------------------------------------------------- DRAC_refs <- function() { list( Aitken1985 = bibentry( bibtype = "Book", author = person("M.J.", "Aitken"), title = "Thermoluminescence Dating", year = "1985", publisher = "Academic Press", adress = "London" ), AitkenXie1990 = bibentry( bibtype = "Article", author = c( person("M.J.", "Aitken"), person("J.", "Xie") ), title = "Moisture correction for annual gamma dose", year = "1990", journal = "Ancient TL", volume = "8", pages = "6-9" ), AdamiecAitken1998 = bibentry( bibtype = "Article", author = c( person("G.", "Adamiec"), person("M.J.", "Aitken") ), title = "Dose-rate conversion factors: update", year = "1998", journal = "Ancient TL", volume = "16", pages = "37-46" ), Guerinetal2011 = bibentry( bibtype = "Article", author = c( person("G.", "Guerin"), person("N.", "Mercier"), person("G.", "Adamiec") ), title = "Dose-rate conversion factors: update", year = "2011", journal = "Ancient TL", volume = "29", pages = "5-8" ), Liritzisetal2013 = bibentry( bibtype = "Article", author = c( person("I.", "Liritzis"), person("K.", "Stamoulis"), person("C.", "Papachristodoulou"), person("K.", "Ioannides") ), title = "A re-evaluation of radiation dose-rate conversion factors. ", year = "2013", journal = "Mediterranean Archaeology and Archaeometry", volume = "13", pages = "1-15" ), Bell1979 = bibentry( bibtype = "Article", author = c( person("W.T.", "Bell") ), title = "Attenuation factors for the absorbed radiation dose in quartz inclusions for thermoluminescence dating", year = "1979", journal = "Ancient TL", volume = "8", pages = "1-12" ), Bell1980 = bibentry( bibtype = "Article", author = c( person("W.T.", "Bell") ), title = "Alpha attenuation in Quartz grains for Thermoluminescence Dating", year = "1980", journal = "Ancient TL", volume = "12", pages = "4-8" ), Brennanetal1991 = bibentry( bibtype = "Article", author = c( person("B.J.", "Brennan"), person("R.G.", "Lyons"), person("S.W.", "Phillips") ), title = "Attenuation of alpha particle track dose for spherical grains", year = "1991", journal = "International Journal of Radiation Applications and Instrumentation. Part D. Nuclear Tracks and Radiation Measurements", volume = "18", pages = "249-253" ), Mejdahl1979 = bibentry( bibtype = "Article", author = c( person("V.", "Mejdahl") ), title = "Thermoluminescence Dating: Beta-Dose Attenuation in Quartz Grains", year = "1979", journal = "Archaeometry", volume = "21", pages = "61-72" ), Mejdahl1987 = bibentry( bibtype = "Article", author = c( person("V.", "Mejdahl") ), title = "Internal radioactivity in quartz and feldspar grains", year = "1987", journal = "Ancient TL", volume = "5", pages = "10-17" ), Brennan2003 = bibentry( bibtype = "Article", author = c( person("B.J.", "Brennan") ), title = "Beta doses to spherical grains", year = "2003", journal = "Radiation Measurements", volume = "37", pages = "299-303" ), `Guerinetal2012-Q` = bibentry( bibtype = "Article", author = c( person("G.", "Guerin"), person("N.", "Mercier"), person("R.", "Nathan"), person("G.", "Adamiec"), person("Y.", "Lefrais") ), title = "On the use of the infinite matrix assumption and associated concepts: A critical review", year = "2012", journal = "Radiation Measurements", volume = "47", pages = "778-785" ), `Guerinetal2012-F` = bibentry( bibtype = "Article", author = c( person("G.", "Guerin"), person("N.", "Mercier"), person("R.", "Nathan"), person("G.", "Adamiec"), person("Y.", "Lefrais") ), title = "On the use of the infinite matrix assumption and associated concepts: A critical review", year = "2012", journal = "Radiation Measurements", volume = "47", pages = "778-785" ), PrescottHutton1994 = bibentry( bibtype = "Article", author = c( person("J.R.", "Prescott"), person("J.T.", "Hutton") ), title = "Cosmic ray contributions to dose rates for luminescence and ESR dating: Large depths and long-term time variations", year = "1994", journal = "Radiation Measurements", volume = "23", pages = "497-500" ), PrescottStephan1982 = bibentry( bibtype = "Article", author = c( person("J.R.", "Prescott"), person("L.G.", "Stephan") ), title = "The contribution of cosmic radiation to the environmental dose for thermoluminescence dating", year = "1982", journal = "PACT", volume = "6", pages = "17-25" ), Readhead2002 = bibentry( bibtype = "Article", author = c( person("M.L.", "ReadHead") ), title = "Absorbed dose fraction for 87Rb beta particles", year = "2002", journal = "Ancient TL", volume = "20", pages = "25-29" ) ) }Luminescence/R/plot_OSLAgeSummary.R0000644000176200001440000001157314264017373016714 0ustar liggesusers#'@title Plot Posterior OSL-Age Summary #' #'@description A graphical summary of the statistical inference of an OSL age #' #'@details The function is called automatically by [combine_De_Dr] #' #'@param object [RLum.Results-class], [numeric] (**required**): an object produced #' by [combine_De_Dr]. Alternatively, a [numeric] vector of a parameter from an MCMC process #' #'@param level [numeric] (*with default*): probability of shown credible interval #' #'@param digits [integer] (*with default*): number of digits considered for the calculation #' #'@param verbose [logical] (*with default*): enable/disable additional terminal output #' #'@param ... further arguments to modify the plot, supported: `xlim`, `ylim`, `xlab`, `ylab`, #' `main`, `lwd`, `lty`, `col`, `polygon_col`, `polygon_density`, `rug` #' #'@return A posterior distribution plot and an [RLum.Results-class] #' object with the credible interval. #' #'@author Anne Philippe, Université de Nantes (France), #' Jean-Michel Galharret, Université de Nantes (France), #' Norbert Mercier, IRAMAT-CRP2A, Université Bordeaux Montaigne (France), #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #'@section Function version: 0.1.0 #' #'@seealso [combine_De_Dr], [plot.default], [rjags::rjags] #' #'@keywords hplot dplot #' #'@examples #'##generate random data #'set.seed(1234) #'object <- rnorm(1000, 100, 10) #'plot_OSLAgeSummary(object) #' #'@md #'@export plot_OSLAgeSummary <- function( object, level = 0.95, digits = 1L, verbose = TRUE, ... ){ # Integrity tests --------------------------------------------------------- if(is(object, "RLum.Results") && object@originator %in% c(".calc_BayesianCentralAgeModel", ".calc_IndividualAgeModel")) object <- get_RLum(object, data.object = "A") if(is(object, "RLum.Results") && object@originator == "combine_De_Dr") object <- get_RLum(object, data.object = "Ages") if(!is(object, "numeric")) { stop(paste0("[plot_OSLAgeSummary()] class ", class(object)[1], " not supported as input for object!"),call. = FALSE) } ## A should be a matrix A <- as.matrix(object, ncol = 1) # Run calculations -------------------------------------------------------- ## use our internal function instead of Archaeophase to avoid the decency hell CI <- round(.calc_HPDI(A, prob = level[1]), digits[1]) Bayes_est_mean <- round(mean(A), digits = digits) Bayes_est_sd <- round(sd(A), digits = digits) # Terminal output --------------------------------------------------------- if(verbose){ cat("\n[plot_OSLAgeSummary()]\n") cat(paste0(" Credible Interval (", level * 100 ),"%): ",paste(CI[1,], collapse = " : "), "\n") cat(paste0(" Bayes estimate (posterior mean \u00b1 sd): ", Bayes_est_mean[1], " \u00b1 ", Bayes_est_sd[1]),"\n") } # Plot output ------------------------------------------------------------- density_A <- density(A) plot_settings <- modifyList(x = list( xlim = range(A), ylim = range(density_A$y), main = "Posterior distr. of A", xlab = "Age [ka]", ylab = "Density", lwd = 1, lty = 1, col = "black", polygon_col = rgb(1,0,0,0.3), polygon_density = 20, rug = FALSE ), val = list(...)) plot( x = density_A$x, y = density_A$y, xlim = plot_settings$xlim, ylim = plot_settings$ylim * 1.07, xlab = plot_settings$xlab, ylab = plot_settings$ylab, main = plot_settings$main, type = "l", lwd = plot_settings$lwd, lty = plot_settings$lty, col = plot_settings$col ) ## add lines on the top for the CI lines(x = c(CI[1,]), y = rep(par()$usr[4] * 0.92, 2)) lines(x = rep(CI[1,1], 2), y = c(par()$usr[4] * 0.91, par()$usr[4] * 0.92)) lines(x = rep(CI[1,2], 2), y = c(par()$usr[4] * 0.91, par()$usr[4] * 0.92)) ## add polygon fill polygon( x = c(density_A$x, rev(density_A$x)), y = c(density_A$y, rep(0, length(density_A$y))), col = plot_settings$polygon_col, lty = 0, density = NULL ) ## add CI xy_id <- density_A$x >= CI[1,1] & density_A$x <= CI[1,2] polygon( x = c(density_A$x[xy_id], rev(density_A$x[xy_id])), y = c(density_A$y[xy_id], rep(0, length(density_A$y[xy_id]))), col = "black", lwd = 0.5, border = TRUE, density = plot_settings$polygon_density ) ##add rug if(plot_settings$rug) rug(A) ## add text text(x = density_A$x[xy_id][1], y = density_A$y[xy_id][2], CI[1,1], pos = 2, cex = 0.6) text(x = max(density_A$x[xy_id]), y = rev(density_A$y[xy_id])[1], CI[1,2], pos = 4, cex = 0.6) text( x = median(density_A$x[xy_id]), y = par()$usr[4] * 0.91, labels = paste0("CI: ", level[1] * 100, "%"), pos = 3, cex = 0.6 ) # Return ------------------------------------------------------------------ return(set_RLum("RLum.Results", data = list( Estimate = Bayes_est_mean, Credible_Interval = CI, level = level), info = list(call = sys.call()))) } Luminescence/R/get_Risoe.BINfileData.R0000644000176200001440000000211114264017373017173 0ustar liggesusers#' General accessor function for RLum S4 class objects #' #' Function calls object-specific get functions for RisoeBINfileData S4 class objects. #' #' The function provides a generalised access point for specific #' [Risoe.BINfileData-class] objects. \cr #' Depending on the input object, the corresponding get function will be selected. #' Allowed arguments can be found in the documentations of the corresponding #' [Risoe.BINfileData-class] class. #' #' @param object [Risoe.BINfileData-class] (**required**): #' S4 object of class `RLum` #' #' @param ... further arguments that one might want to pass to the specific #' get function #' #' @return Return is the same as input objects as provided in the list #' #' @section Function version: 0.1.0 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [Risoe.BINfileData-class] #' #' @keywords utilities #' #' @md #' @export setGeneric( name = "get_Risoe.BINfileData", def = function(object, ...) { standardGeneric("get_Risoe.BINfileData") }, package = "Luminescence" ) Luminescence/R/calc_Statistics.R0000644000176200001440000001711414236146743016342 0ustar liggesusers#' Function to calculate statistic measures #' #' This function calculates a number of descriptive statistics for estimates #' with a given standard error (SE), most fundamentally using error-weighted approaches. #' #' The option to use Monte Carlo Methods (`n.MCM`) allows calculating #' all descriptive statistics based on random values. The distribution of these #' random values is based on the Normal distribution with `De` values as #' means and `De_error` values as one standard deviation. Increasing the #' number of MCM-samples linearly increases computation time. On a Lenovo X230 #' machine evaluation of 25 Aliquots with n.MCM = 1000 takes 0.01 s, with #' n = 100000, ca. 1.65 s. It might be useful to work with logarithms of these #' values. See Dietze et al. (2016, Quaternary Geochronology) and the function #' [plot_AbanicoPlot] for details. #' #' @param data [data.frame] or [RLum.Results-class] object (**required**): #' for [data.frame] two columns: De (`data[,1]`) and De error (`data[,2]`). #' To plot several data sets in one plot the data sets must be provided #' as `list`, e.g. `list(data.1, data.2)`. #' #' @param weight.calc [character]: #' type of weight calculation. One out of `"reciprocal"` (weight is 1/error), #' `"square"` (weight is 1/error^2). Default is `"square"`. #' #' @param digits [integer] (*with default*): #' round numbers to the specified digits. #' If digits is set to `NULL` nothing is rounded. #' #' @param n.MCM [numeric] (*with default*): #' number of samples drawn for Monte Carlo-based statistics. #' `NULL` (the default) disables MC runs. #' #' @param na.rm [logical] (*with default*): #' indicating whether `NA` values should be stripped before the computation proceeds. #' #' @return Returns a list with weighted and unweighted statistic measures. #' #' @section Function version: 0.1.7 #' #' @keywords datagen #' #' @author Michael Dietze, GFZ Potsdam (Germany) #' #' @examples #' #' ## load example data #' data(ExampleData.DeValues, envir = environment()) #' #' ## show a rough plot of the data to illustrate the non-normal distribution #' plot_KDE(ExampleData.DeValues$BT998) #' #' ## calculate statistics and show output #' str(calc_Statistics(ExampleData.DeValues$BT998)) #' #' \dontrun{ #' ## now the same for 10000 normal distributed random numbers with equal errors #' x <- as.data.frame(cbind(rnorm(n = 10^5, mean = 0, sd = 1), #' rep(0.001, 10^5))) #' #' ## note the congruent results for weighted and unweighted measures #' str(calc_Statistics(x)) #' } #' #' @md #' @export calc_Statistics <- function( data, weight.calc = "square", digits = NULL, n.MCM = NULL, na.rm = TRUE ) { ## Check input data if(is(data, "RLum.Results") == FALSE & is(data, "data.frame") == FALSE) { stop("[calc_Statistics()] Input data is neither of type 'data.frame' nor 'RLum.Results'", call. = FALSE) } else { if(is(data, "RLum.Results")) { data <- get_RLum(data, "data")[,1:2] } } ##strip na values if(na.rm){ data <- na.exclude(data) } ## handle error-free data sets if(ncol(data) == 1) { data <- cbind(data, rep(NA, length(data))) } ## replace Na values in error by 0 data[is.na(data[,2]),2] <- 0 if(sum(data[,2]) == 0) { warning("[calc_Statistics()] All errors are NA or zero! Automatically set to 10^-9!", call. = FALSE) data[,2] <- rep(x = 10^-9, length(data[,2])) } if(weight.calc == "reciprocal") { S.weights <- 1 / data[,2] } else if(weight.calc == "square") { S.weights <- 1 / data[,2]^2 } else { stop ("[calc_Statistics()] Weight calculation type not supported!", call. = FALSE) } S.weights <- S.weights / sum(S.weights) ## create MCM data if (is.null(n.MCM)) { data.MCM <- cbind(data[, 1]) } else { data.MCM <- matrix(data = rnorm( n = n.MCM * nrow(data), mean = data[, 1], sd = data[, 2] ), ncol = n.MCM) } ## calculate n S.n <- nrow(data) ## calculate mean S.mean <- mean(x = data[,1], na.rm = na.rm) S.wg.mean <- weighted.mean(x = data[,1], w = S.weights, n.rm = na.rm) S.m.mean <- mean(x = data.MCM, na.rm = na.rm) ## calculate median S.median <- median(x = data[,1], na.rm = na.rm) S.wg.median <- S.median S.m.median <- median(x = data.MCM, na.rm = na.rm) ## calculate absolute standard deviation S.sd.abs <- sd(x = data[,1], na.rm = na.rm) S.wg.sd.abs <- sqrt(sum(S.weights * (data[,1] - S.wg.mean)^2) / (((S.n - 1) * sum(S.weights)) / S.n)) S.m.sd.abs <- sd(x = data.MCM, na.rm = na.rm) ## calculate relative standard deviation S.sd.rel <- S.sd.abs / S.mean * 100 S.wg.sd.rel <- S.wg.sd.abs / S.wg.mean * 100 S.m.sd.rel <- S.m.sd.abs / S.m.mean * 100 ## calculate absolute standard error of the mean S.se.abs <- S.sd.abs / sqrt(S.n) S.wg.se.abs <- S.wg.sd.abs / sqrt(S.n) S.m.se.abs <- S.m.sd.abs / sqrt(S.n) ## calculate relative standard error of the mean S.se.rel <- S.se.abs / S.mean * 100 S.wg.se.rel <- S.wg.se.abs / S.wg.mean * 100 S.m.se.rel <- S.m.se.abs / S.m.mean * 100 ## calculate skewness S.skewness <- 1 / S.n * sum(((data[,1] - S.mean) / S.sd.abs)^3) S.m.skewness <- 1 / S.n * sum(((data.MCM - S.m.mean) / S.m.sd.abs)^3) ## calculate kurtosis S.kurtosis <- 1 / S.n * sum(((data[,1] - S.mean) / S.sd.abs)^4) S.m.kurtosis <- 1 / S.n * sum(((data.MCM - S.m.mean) / S.m.sd.abs)^4) ## create list objects of calculation output S.weighted <- list(n = S.n, mean = S.wg.mean, median = S.wg.median, sd.abs = S.wg.sd.abs, sd.rel = S.wg.sd.rel, se.abs = S.wg.se.abs, se.rel = S.wg.se.rel, skewness = S.skewness, kurtosis = S.kurtosis) if(!is.null(digits)) { S.weighted <- sapply(names(S.weighted), simplify = FALSE, USE.NAMES = TRUE, function(x) { round(S.weighted[[x]], digits = digits)}) } S.unweighted <- list(n = S.n, mean = S.mean, median = S.median, sd.abs = S.sd.abs, sd.rel = S.sd.rel, se.abs = S.se.abs, se.rel = S.se.rel, skewness = S.skewness, kurtosis = S.kurtosis) if(!is.null(digits)){ S.unweighted <- sapply(names(S.unweighted), simplify = FALSE, USE.NAMES = TRUE, function(x) { round(S.unweighted [[x]], digits = digits)}) } S.MCM <- list(n = S.n, mean = S.m.mean, median = S.m.median, sd.abs = S.m.sd.abs, sd.rel = S.m.sd.rel, se.abs = S.m.se.abs, se.rel = S.m.se.rel, skewness = S.m.skewness, kurtosis = S.m.kurtosis) if(!is.null(digits)){ S.MCM <- sapply(names(S.MCM), simplify = FALSE, USE.NAMES = TRUE, function(x) { round(S.MCM [[x]], digits = digits)}) } list(weighted = S.weighted, unweighted = S.unweighted, MCM = S.MCM) } Luminescence/R/PSL2Risoe.BINfileData.R0000644000176200001440000001737014236146743017015 0ustar liggesusers#' Convert portable OSL data to an Risoe.BINfileData object #' #' Converts an `RLum.Analysis` object produced by the function `read_PSL2R()` to #' an `Risoe.BINfileData` object **(BETA)**. #' #' This function converts an [RLum.Analysis-class] object that was produced #' by the [read_PSL2R] function to an [Risoe.BINfileData-class]. #' The `Risoe.BINfileData` can be used to write a Risoe BIN file via #' [write_R2BIN]. #' #' @param object [RLum.Analysis-class] (**required**): #' `RLum.Analysis` object produced by [read_PSL2R] #' #' @param ... currently not used. #' #' @return #' Returns an S4 [Risoe.BINfileData-class] object that can be used to write a #' BIN file using [write_R2BIN]. #' #' @seealso [RLum.Analysis-class], [RLum.Data.Curve-class], #' [Risoe.BINfileData-class] #' #' @author #' Christoph Burow, University of Cologne (Germany) #' #' @section Function version: 0.0.1 #' #' @keywords IO #' #' @examples #' #' # (1) load and plot example data set #' data("ExampleData.portableOSL", envir = environment()) #' plot_RLum(ExampleData.portableOSL) #' #' # (2) merge all RLum.Analysis objects into one #' merged <- merge_RLum(ExampleData.portableOSL) #' merged #' #' # (3) convert to RisoeBINfile object #' bin <- PSL2Risoe.BINfileData(merged) #' bin #' #' # (4) write Risoe BIN file #' \dontrun{ #' write_R2BIN(bin, "~/portableOSL.binx") #' } #' #' @md #' @export PSL2Risoe.BINfileData <- function(object, ...) { ## INTEGRITY CHECKS ---- if (!inherits(object, "RLum.Analysis")) stop("Only objects of class 'RLum.Analysis' are allowed.", call. = FALSE) if (!all(sapply(object, class) == "RLum.Data.Curve")) stop("The 'RLum.Analysis' object must only contain objects of class 'RLum.Data.Curve'.", call. = FALSE) if (!all(sapply(object, function(x) x@originator) == "read_PSL2R")) stop("Only objects originating from 'read_PSL2R()' are allowed.", call. = FALSE) ## EXTRACT CURVE INFORMATION ---- curves <- get_RLum(object) ## COLLECT META INFORMATION ---- META <- do.call(rbind, lapply(curves, function(x) { NPOINTS <- as.integer(x@info$settings$stimulation_time) LTYPE <- x@info$settings$stimulation_unit COMMENT <- x@info$settings$measurement HIGH <- x@info$settings$stimulation_time DATE <- format(x@info$settings$Date, format = "%d%m%y") TIME <- x@info$settings$Time if (nchar(TIME) < 8) TIME <- paste0("0", TIME) SAMPLE <- x@info$settings$Sample FNAME <- x@info$settings$Filename SEQUENCE <- strtrim(paste(x@info$settings$Run_Name, x@info$settings$Sample_no), 8) return(data.frame(NPOINTS = NPOINTS, LTYPE = LTYPE, COMMENT = COMMENT, HIGH = HIGH, DATE = DATE, TIME = TIME, SAMPLE = SAMPLE, FNAME = FNAME, SEQUENCE = SEQUENCE)) })) ## SAVE DATA ---- DATA <- lapply(curves, function(x) { as.integer(x@data[ ,2]) }) # SAVE METADATA ---- METADATA <- data.frame(ID = seq(1, length(curves), 1), SEL = rep(TRUE, length(curves)), VERSION = rep(7, length(curves)), LENGTH = 447 + 4 * META$NPOINTS, PREVIOUS = 447 + 4 * META$NPOINTS, NPOINTS = META$NPOINTS, RUN = seq(1, length(curves), 1), SET = rep(1, length(curves)), POSITION = rep(1, length(curves)), GRAIN = rep(0, length(curves)), GRAINNUMBER = rep(0, length(curves)), CURVENO = rep(0, length(curves)), XCOORD = rep(0, length(curves)), YCOORD = rep(0, length(curves)), SAMPLE = META$SAMPLE, COMMENT = META$COMMENT, SYSTEMID = rep(0, length(curves)), FNAME = META$FNAME, USER = rep("RLum", length(curves)), TIME = META$TIME, DATE = META$DATE, DTYPE = rep("Natural", length(curves)), BL_TIME = rep(0, length(curves)), BL_UNIT = rep(0, length(curves)), NORM1 = rep(0, length(curves)), NORM2 = rep(0, length(curves)), NORM3 = rep(0, length(curves)), BG = rep(0, length(curves)), SHIFT = rep(0, length(curves)), TAG = rep(1, length(curves)), LTYPE = META$LTYPE, LIGHTSOURCE = rep("None", length(curves)), LPOWER = rep(100, length(curves)), LIGHTPOWER = rep(100, length(curves)), LOW = rep(0, length(curves)), HIGH = META$HIGH, RATE = rep(0, length(curves)), TEMPERATURE = rep(0, length(curves)), MEASTEMP = rep(0, length(curves)), AN_TEMP = rep(0, length(curves)), AN_TIME = rep(0, length(curves)), TOLDELAY = rep(0, length(curves)), TOLON = rep(0, length(curves)), TOLOFF = rep(0, length(curves)), IRR_TIME = rep(0, length(curves)), IRR_TYPE = rep(0L, length(curves)), IRR_UNIT = rep(0, length(curves)), IRR_DOSERATE = rep(0, length(curves)), IRR_DOSERATEERR = rep(0, length(curves)), TIMESINCEIRR = rep(-1, length(curves)), TIMETICK = rep(1e-07, length(curves)), ONTIME = rep(0, length(curves)), OFFTIME = rep(NA, length(curves)), STIMPERIOD = rep(0, length(curves)), GATE_ENABLED = rep(0, length(curves)), ENABLE_FLAGS = rep(0, length(curves)), GATE_START = rep(0, length(curves)), GATE_STOP = rep(0, length(curves)), PTENABLED = rep(0, length(curves)), DTENABLED = rep(0, length(curves)), DEADTIME = rep(0, length(curves)), MAXLPOWER = rep(0, length(curves)), XRF_ACQTIME = rep(0, length(curves)), XRF_HV = rep(0, length(curves)), XRF_CURR = rep(0, length(curves)), XRF_DEADTIMEF = rep(0, length(curves)), SEQUENCE = META$SEQUENCE, DETECTOR_ID = rep(NA, length(curves)), LOWERFILTER_ID = rep(NA, length(curves)), UPPERFILTER_ID = rep(NA, length(curves)), ENOISEFACTOR = rep(NA, length(curves)), MARKPOS_X1 = rep(0, length(curves)), MARKPOS_Y1 = rep(0, length(curves)), MARKPOS_X2 = rep(0, length(curves)), MARKPOS_Y2 = rep(0, length(curves)), MARKPOS_X3 = rep(0, length(curves)), MARKPOS_Y3 = rep(0, length(curves)), EXTR_START = rep(0, length(curves)), EXTR_END = rep(0, length(curves)), RECTYPE = rep(0, length(curves))) ## CREATE Risoe.BINfileData OBJECT ---- bin <- set_Risoe.BINfileData(METADATA = METADATA, DATA = DATA, .RESERVED = list()) ## RETURN VALUE ---- return(bin) } Luminescence/R/verify_SingleGrainData.R0000644000176200001440000003443114521207343017576 0ustar liggesusers#' Verify single grain data sets and check for invalid grains, i.e. #' zero-light level grains #' #' This function tries to identify automatically zero-light level curves (grains) #' from single grain data measurements. #' #' **How does the method work?** #' #' The function compares the expected values (\eqn{E(X)}) and the variance #' (\eqn{Var(X)}) of the count values for each curve. Assuming that the #' background roughly follows a Poisson distribution the absolute difference #' of both values should be zero or at least around zero as #' #' \deqn{E(x) = Var(x) = \lambda} #' #' Thus the function checks for: #' #' \deqn{abs(E(x) - Var(x)) >= \Theta} #' #' With \eqn{\Theta} an arbitrary, user defined, threshold. Values above the #' threshold indicating curves comprising a signal. #' #' Note: the absolute difference of \eqn{E(X)} and \eqn{Var(x)} instead of the #' ratio was chosen as both terms can become 0 which would result in 0 or `Inf`, #' if the ratio is calculated. #' #' @param object [Risoe.BINfileData-class] or [RLum.Analysis-class] (**required**): #' input object. The function also accepts a list with objects of allowed type. #' #' @param threshold [numeric] (*with default*): #' numeric threshold value for the allowed difference between the `mean` and #' the `var` of the count values (see details) #' #' @param cleanup [logical] (*with default*): #' if set to `TRUE` curves identified as zero light level curves are #' automatically removed. Output is an object as same type as the input, i.e. #' either [Risoe.BINfileData-class] or [RLum.Analysis-class] #' #' @param cleanup_level [character] (*with default*): #' selects the level for the cleanup of the input data sets. #' Two options are allowed: `"curve"` or `"aliquot"`: #' #' - If `"curve"` is selected every single curve marked as `invalid` is removed. #' - If `"aliquot"` is selected, curves of one aliquot (grain or disc) can be #' marked as invalid, but will not be removed. An aliquot will be only removed #' if all curves of this aliquot are marked as invalid. #' #' @param verbose [logical] (*with default*): #' enables or disables the terminal feedback #' #' @param plot [logical] (*with default*): #' enables or disables the graphical feedback #' #' @return #' The function returns #' #' -----------------------------------\cr #' `[ NUMERICAL OUTPUT ]`\cr #' -----------------------------------\cr #' #' **`RLum.Results`**-object #' #' **slot:****`@data`** #' #' \tabular{lll}{ #' **Element** \tab **Type** \tab **Description**\cr #' `$unique_pairs` \tab `data.frame` \tab the unique position and grain pairs \cr #' `$selection_id` \tab `numeric` \tab the selection as record ID \cr #' `$selection_full` \tab `data.frame` \tab implemented models used in the baSAR-model core \cr #' } #' #' **slot:****`@info`** #' #' The original function call #' #' **Output variation** #' #' For `cleanup = TRUE` the same object as the input is returned, but cleaned up #' (invalid curves were removed). This means: Either an [Risoe.BINfileData-class] #' or an [RLum.Analysis-class] object is returned in such cases. #' An [Risoe.BINfileData-class] object can be exported to a BIN-file by #' using the function [write_R2BIN]. #' #' @note #' This function can work with [Risoe.BINfileData-class] objects or #' [RLum.Analysis-class] objects (or a list of it). However, the function is #' highly optimised for [Risoe.BINfileData-class] objects as it make sense to #' remove identify invalid grains before the conversion to an #' [RLum.Analysis-class] object. #' #' The function checking for invalid curves works rather robust and it is likely #' that Reg0 curves within a SAR cycle are removed as well. Therefore it is #' strongly recommended to use the argument `cleanup = TRUE` carefully. #' #' @section Function version: 0.2.2 #' #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' #' @seealso [Risoe.BINfileData-class], [RLum.Analysis-class], [write_R2BIN], #' [read_BIN2R] #' #' @keywords manip datagen #' #' @examples #' #' ##01 - basic example I #' ##just show how to apply the function #' data(ExampleData.XSYG, envir = environment()) #' #' ##verify and get data.frame out of it #' verify_SingleGrainData(OSL.SARMeasurement$Sequence.Object)$selection_full #' #' ##02 - basic example II #' data(ExampleData.BINfileData, envir = environment()) #' id <- verify_SingleGrainData(object = CWOSL.SAR.Data, #' cleanup_level = "aliquot")$selection_id #' #' \dontrun{ #' ##03 - advanced example I #' ##importing and exporting a BIN-file #' #' ##select and import file #' file <- file.choose() #' object <- read_BIN2R(file) #' #' ##remove invalid aliquots(!) #' object <- verify_SingleGrainData(object, cleanup = TRUE) #' #' ##export to new BIN-file #' write_R2BIN(object, paste0(dirname(file),"/", basename(file), "_CLEANED.BIN")) #' } #' #' @md #' @export verify_SingleGrainData <- function( object, threshold = 10, cleanup = FALSE, cleanup_level = 'aliquot', verbose = TRUE, plot = FALSE ){ ##three types of input are allowed: ##(1) RisoeBINfileData ##(2) RLum.Analysis ##(3) List of RLum.Analysis # Self Call ----------------------------------------------------------------------------------- if(is(object, "list")){ results <- .warningCatcher(lapply(1:length(object), function(x) { verify_SingleGrainData( object = object[[x]], threshold = threshold, cleanup = cleanup, cleanup_level = cleanup_level, verbose = verbose, plot = plot ) })) ##account for cleanup if(cleanup){ return(results) }else{ return(merge_RLum(results)) } } ##++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ##RisoeBINfileData if(is(object, "Risoe.BINfileData")){ ##run test on DATA slot ##MEAN + SD temp.results_matrix <- lapply(X = object@DATA, FUN = function(x){ c(mean(x), var(x)) }) temp.results_matrix <- do.call(rbind, temp.results_matrix) ##DIFF temp.results_matrix_RATIO <- temp.results_matrix[,2]/temp.results_matrix[,1] ##SEL temp.results_matrix_VALID <- temp.results_matrix_RATIO > threshold ##combine everything to in a data.frame selection <- data.frame( POSITION = object@METADATA$POSITION, GRAIN = object@METADATA$GRAIN, MEAN = temp.results_matrix[, 1], VAR = temp.results_matrix[, 2], RATIO = temp.results_matrix_RATIO, THRESHOLD = rep_len(threshold, length(object@DATA)), VALID = temp.results_matrix_VALID ) ##get unique pairs for POSITION and GRAIN for VALID == TRUE unique_pairs <- unique( selection[selection[["VALID"]], c("POSITION", "GRAIN")]) if(cleanup_level == "aliquot"){ selection_id <- sort(unlist(lapply(1:nrow(unique_pairs), function(x) { which( .subset2(selection, 1) == .subset2(unique_pairs, 1)[x] & .subset2(selection, 2) == .subset2(unique_pairs, 2)[x] ) }))) }else{ ##reduce data to TRUE selection selection_id <- which(selection[["VALID"]]) } ##select output on the chosen input if(cleanup){ ##selected wanted elements object@DATA <- object@DATA[selection_id] object@METADATA <- object@METADATA[selection_id,] object@METADATA$ID <- 1:length(object@DATA) ##print message selection_id <- paste(selection_id, collapse = ", ") if(verbose){ cat(paste0("\n[verify_SingleGrainData()] Risoe.BINfileData object reduced to records: \n", selection_id)) cat("\n\n[verify_SingleGrainData()] Risoe.BINfileData object record index reset.") } ##return return_object <- object }else{ return_object <- set_RLum( class = "RLum.Results", data = list( unique_pairs = unique_pairs, selection_id = selection_id, selection_full = selection), info = list(call = sys.call()) ) } ##++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ##RLum.Analysis and list with RLum.Analysis objects ## ... and yes it make sense not to mix that up with the code above }else if(is(object,"RLum.Analysis")){ ##first extract all count values from all curves object_list <- lapply(get_RLum(object), function(x){ ##yes, would work differently, but it is faster x@data[,2] }) ##MEAN + SD temp.results_matrix <- lapply(X = object_list, FUN = function(x){ c(mean(x), var(x)) }) temp.results_matrix <- do.call(rbind, temp.results_matrix) ##DIFF temp.results_matrix_RATIO <- temp.results_matrix[,2]/temp.results_matrix[,1] ##SEL temp.results_matrix_VALID <- temp.results_matrix_RATIO > threshold ##get structure for the RLum.Analysis object temp_structure <- structure_RLum(object, fullExtent = TRUE) ##now we have two cases, depending on where measurement is coming from if (object@originator == "Risoe.BINfileData2RLum.Analysis") { ##combine everything to in a data.frame selection <- data.frame( POSITION = temp_structure$info.POSITION, GRAIN = temp_structure$info.GRAIN, MEAN = temp.results_matrix[, 1], VAR = temp.results_matrix[, 2], RATIO = temp.results_matrix_RATIO, THRESHOLD = rep_len(threshold, length(object_list)), VALID = temp.results_matrix_VALID ) ##get unique pairs for POSITION and GRAIN for VALID == TRUE unique_pairs <- unique( selection[selection[["VALID"]], c("POSITION", "GRAIN")]) } else if (object@originator == "read_XSYG2R") { ##combine everything to in a data.frame selection <- data.frame( POSITION = if(any(grepl(pattern = "position", names(temp_structure)))){ temp_structure$info.position}else{ NA }, GRAIN = NA, MEAN = temp.results_matrix[, 1], VAR = temp.results_matrix[, 2], RATIO = temp.results_matrix_RATIO, THRESHOLD = rep_len(threshold, length(object_list)), VALID = temp.results_matrix_VALID ) ##get unique pairs for POSITION for VALID == TRUE unique_pairs <- unique( selection[["POSITION"]][selection[["VALID"]]]) } else{ stop("[verify_SingleGrainData()] I don't know what to do object 'originator' not supported!", call. = FALSE) } ##set up cleanup if(cleanup_level == "aliquot") { if (object@originator == "read_XSYG2R") { if(!is.na(unique_pairs)){ selection_id <- sort(unlist(lapply(1:nrow(unique_pairs), function(x) { which(.subset2(selection, 1) == .subset2(unique_pairs, 1)[x]) }))) }else{ selection_id <- NA } } else if (object@originator == "Risoe.BINfileData2RLum.Analysis") { selection_id <- sort(unlist(lapply(1:nrow(unique_pairs), function(x) { which( .subset2(selection, 1) == .subset2(unique_pairs, 1)[x] & .subset2(selection, 2) == .subset2(unique_pairs, 2)[x] ) }))) } ##make sure that we do not break subsequent code if(length(selection_id) == 0) selection_id <- NA } else{ ##reduce data to TRUE selection selection_id <- which(selection[["VALID"]]) } ##return value ##select output on the chosen input if(cleanup && !any(is.na(selection_id))){ ##print message if(verbose){ selection_id_text <- paste(selection_id, collapse = ", ") cat(paste0("\n[verify_SingleGrainData()] RLum.Analysis object reduced to records: ", selection_id_text)) } ##selected wanted elements if (length(selection_id) == 0) { object <- set_RLum( class = "RLum.Analysis", originator = object@originator, protocol = object@protocol, records = list(), info = list( unique_pairs = unique_pairs, selection_id = selection_id, selection_full = selection) ) } else{ object <- set_RLum( class = "RLum.Analysis", records = get_RLum(object, record.id = selection_id, drop = FALSE), info = list( unique_pairs = unique_pairs, selection_id = selection_id, selection_full = selection) ) } ##return return_object <- object }else{ if(any(is.na(selection_id))){ warning("[verify_SingleGrainData()] selection_id is NA, nothing removed, everything selected!", call. = FALSE) } return_object <- set_RLum( class = "RLum.Results", data = list( unique_pairs = unique_pairs, selection_id = selection_id, selection_full = selection), info = list(call = sys.call()) ) } }else{ stop(paste0("[verify_SingleGrainData()] Input type '", is(object)[1], "' is not allowed for this function!"), call. = FALSE) } # Plot ---------------------------------------------------------------------------------------- if(plot){ ##plot area plot( NA, NA, xlim = c(1,nrow(selection)), ylim = range(selection[["RATIO"]]), log = "y", xlab = "Record index", ylab = "Calculated ratio [a.u.]", main = "Record selection" ) ##plot points above the threshold points(x = which(selection[["VALID"]]), y = selection[["RATIO"]][selection[["VALID"]]], pch = 20, col = "darkgreen") points(x = which(!selection[["VALID"]]), y = selection[["RATIO"]][!selection[["VALID"]]], pch = 20, col = rgb(0,0,0,0.5)) abline(h = threshold, col = "red", lty = 1, lwd = 2) mtext( side = 3, text = paste0( "(total: ", nrow(selection), " | valid: ", length(which(selection[["VALID"]])), " | invalid: ", length(which(!selection[["VALID"]])), ")"), cex = 0.9 * par()$cex) } # Return -------------------------------------------------------------------------------------- return(return_object) } Luminescence/R/CW2pHMi.R0000644000176200001440000002622614264017373014400 0ustar liggesusers#' Transform a CW-OSL curve into a pHM-OSL curve via interpolation under #' hyperbolic modulation conditions #' #' This function transforms a conventionally measured continuous-wave (CW) #' OSL-curve to a pseudo hyperbolic modulated (pHM) curve under hyperbolic #' modulation conditions using the interpolation procedure described by Bos & #' Wallinga (2012). #' #' The complete procedure of the transformation is described in Bos & Wallinga #' (2012). The input `data.frame` consists of two columns: time (t) and #' count values (CW(t)) #' #' **Internal transformation steps** #' #' (1) log(CW-OSL) values #' #' (2) #' Calculate t' which is the transformed time: #' \deqn{t' = t-(1/\delta)*log(1+\delta*t)} #' #' (3) #' Interpolate CW(t'), i.e. use the log(CW(t)) to obtain the count values #' for the transformed time (t'). Values beyond `min(t)` and `max(t)` #' produce `NA` values. #' #' (4) #' Select all values for t' < `min(t)`, i.e. values beyond the time #' resolution of t. Select the first two values of the transformed data set #' which contain no `NA` values and use these values for a linear fit #' using [lm]. #' #' (5) #' Extrapolate values for t' < `min(t)` based on the previously #' obtained fit parameters. #' #' (6) #' Transform values using #' \deqn{pHM(t) = (\delta*t/(1+\delta*t))*c*CW(t')} #' \deqn{c = (1+\delta*P)/\delta*P} #' \deqn{P = length(stimulation~period)} #' #' (7) Combine all values and truncate all values for t' > `max(t)` #' #' #' **NOTE:** #' The number of values for t' < `min(t)` depends on the stimulation rate #' parameter `delta`. To avoid the production of too many artificial data #' at the raising tail of the determined pHM curve, it is recommended to use #' the automatic estimation routine for `delta`, i.e. provide no value for #' `delta`. #' #' @param values [RLum.Data.Curve-class] or [data.frame] (**required**): #' [RLum.Data.Curve-class] or [data.frame] with measured curve data of type #' stimulation time (t) (`values[,1]`) and measured counts (cts) (`values[,2]`). #' #' @param delta [vector] (*optional*): #' stimulation rate parameter, if no value is given, the optimal value is #' estimated automatically (see details). Smaller values of delta produce more #' points in the rising tail of #' the curve. #' #' @return #' The function returns the same data type as the input data type with #' the transformed curve values. #' #' #' **`RLum.Data.Curve`** #' #' \tabular{ll}{ #' `$CW2pHMi.x.t` \tab: transformed time values \cr #' `$CW2pHMi.method` \tab: used method for the production of the new data points #' } #' #' **`data.frame`** #' #' \tabular{ll}{ #' `$x` \tab: time\cr #' `$y.t` \tab: transformed count values\cr #' `$x.t` \tab: transformed time values \cr #' `$method` \tab: used method for the production of the new data points #' } #' #' @note #' According to Bos & Wallinga (2012), the number of extrapolated points #' should be limited to avoid artificial intensity data. If `delta` is #' provided manually and more than two points are extrapolated, a warning #' message is returned. #' #' The function [approx] may produce some `Inf` and `NaN` data. #' The function tries to manually interpolate these values by calculating #' the `mean` using the adjacent channels. If two invalid values are succeeding, #' the values are removed and no further interpolation is attempted. #' In every case a warning message is shown. #' #' @section Function version: 0.2.2 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany)\cr #' Based on comments and suggestions from:\cr #' Adrie J.J. Bos, Delft University of Technology, The Netherlands #' #' @seealso [CW2pLM], [CW2pLMi], [CW2pPMi], [fit_LMCurve], [lm], #' [RLum.Data.Curve-class] #' #' @references #' Bos, A.J.J. & Wallinga, J., 2012. How to visualize quartz OSL #' signal components. Radiation Measurements, 47, 752-758.\cr #' #' **Further Reading** #' #' Bulur, E., 1996. An Alternative Technique For #' Optically Stimulated Luminescence (OSL) Experiment. Radiation Measurements, #' 26, 701-709. #' #' Bulur, E., 2000. A simple transformation for converting CW-OSL curves to #' LM-OSL curves. Radiation Measurements, 32, 141-145. #' #' @keywords manip #' #' @examples #' #' ##(1) - simple transformation #' #' ##load CW-OSL curve data #' data(ExampleData.CW_OSL_Curve, envir = environment()) #' #' ##transform values #' values.transformed<-CW2pHMi(ExampleData.CW_OSL_Curve) #' #' ##plot #' plot(values.transformed$x, values.transformed$y.t, log = "x") #' #' ##(2) - load CW-OSL curve from BIN-file and plot transformed values #' #' ##load BINfile #' #BINfileData<-readBIN2R("[path to BIN-file]") #' data(ExampleData.BINfileData, envir = environment()) #' #' ##grep first CW-OSL curve from ALQ 1 #' curve.ID<-CWOSL.SAR.Data@@METADATA[CWOSL.SAR.Data@@METADATA[,"LTYPE"]=="OSL" & #' CWOSL.SAR.Data@@METADATA[,"POSITION"]==1 #' ,"ID"] #' #' curve.HIGH<-CWOSL.SAR.Data@@METADATA[CWOSL.SAR.Data@@METADATA[,"ID"]==curve.ID[1] #' ,"HIGH"] #' #' curve.NPOINTS<-CWOSL.SAR.Data@@METADATA[CWOSL.SAR.Data@@METADATA[,"ID"]==curve.ID[1] #' ,"NPOINTS"] #' #' ##combine curve to data set #' #' curve<-data.frame(x = seq(curve.HIGH/curve.NPOINTS,curve.HIGH, #' by = curve.HIGH/curve.NPOINTS), #' y=unlist(CWOSL.SAR.Data@@DATA[curve.ID[1]])) #' #' #' ##transform values #' #' curve.transformed <- CW2pHMi(curve) #' #' ##plot curve #' plot(curve.transformed$x, curve.transformed$y.t, log = "x") #' #' #' ##(3) - produce Fig. 4 from Bos & Wallinga (2012) #' #' ##load data #' data(ExampleData.CW_OSL_Curve, envir = environment()) #' values <- CW_Curve.BosWallinga2012 #' #' ##open plot area #' plot(NA, NA, #' xlim=c(0.001,10), #' ylim=c(0,8000), #' ylab="pseudo OSL (cts/0.01 s)", #' xlab="t [s]", #' log="x", #' main="Fig. 4 - Bos & Wallinga (2012)") #' #' values.t<-CW2pLMi(values, P=1/20) #' lines(values[1:length(values.t[,1]),1],CW2pLMi(values, P=1/20)[,2], #' col="red" ,lwd=1.3) #' text(0.03,4500,"LM", col="red" ,cex=.8) #' #' values.t<-CW2pHMi(values, delta=40) #' lines(values[1:length(values.t[,1]),1],CW2pHMi(values, delta=40)[,2], #' col="black", lwd=1.3) #' text(0.005,3000,"HM", cex=.8) #' #' values.t<-CW2pPMi(values, P=1/10) #' lines(values[1:length(values.t[,1]),1],CW2pPMi(values, P=1/10)[,2], #' col="blue", lwd=1.3) #' text(0.5,6500,"PM", col="blue" ,cex=.8) #' #' @md #' @export CW2pHMi<- function( values, delta ){ ##(1) data.frame or RLum.Data.Curve object? if(is(values, "data.frame") == FALSE & is(values, "RLum.Data.Curve") == FALSE){ stop("[CW2pHMi()] 'values' object has to be of type 'data.frame' or 'RLum.Data.Curve'!", call. = FALSE) } ##(2) if the input object is an 'RLum.Data.Curve' object check for allowed curves if(is(values, "RLum.Data.Curve") == TRUE){ if(!grepl("OSL", values@recordType) & !grepl("IRSL", values@recordType)){ stop(paste("[CW2pHMi()] recordType ",values@recordType, " is not allowed for the transformation!", sep=""), call. = FALSE) }else{ temp.values <- as(values, "data.frame") } }else{ temp.values <- values } # (1) Transform values ------------------------------------------------------ ##log transformation of the CW-OSL count values CW_OSL.log<-log(temp.values[,2]) ##time transformation t >> t' t<-temp.values[,1] ##set delta ##if no values for delta is set selected a delta value for a maximum of ##two extrapolation points if(missing(delta)==TRUE){ i<-10 delta<-i t.transformed<-t-(1/delta)*log(1+delta*t) while(length(t.transformed[t.transformed2){ delta<-i t.transformed<-t-(1/delta)*log(1+delta*t) i<-i+10 } }else{ t.transformed<-t-(1/delta)*log(1+delta*t) } # (2) Interpolation --------------------------------------------------------- ##interpolate values, values beyond the range return NA values CW_OSL.interpolated <- approx(t,CW_OSL.log, xout=t.transformed, rule=1) ##combine t.transformed and CW_OSL.interpolated in a data.frame temp <- data.frame(x=t.transformed, y=unlist(CW_OSL.interpolated$y)) ##Problem: I some cases the interpolation algorithm is not working properely ##and Inf or NaN values are returned ##fetch row number of the invalid values invalid_values.id <- c(which(is.infinite(temp[,2]) | is.nan(temp[,2]))) if(length(invalid_values.id) > 0){ warning(paste(length(invalid_values.id)," values have been found and replaced the mean of the nearest values." )) } ##interpolate between the lower and the upper value invalid_values.interpolated<-sapply(1:length(invalid_values.id), function(x) { mean(c(temp[invalid_values.id[x]-1,2], temp[invalid_values.id[x]+1,2])) } ) ##replace invalid values in data.frame with newly interpolated values if(length(invalid_values.id)>0){ temp[invalid_values.id,2]<-invalid_values.interpolated } # (3) Extrapolate first values of the curve --------------------------------- ##(a) - find index of first rows which contain NA values (needed for extrapolation) temp.sel.id<-min(which(is.na(temp[,2])==FALSE)) ##(b) - fit linear function fit.lm<-lm(y ~ x,data.frame(x=t[1:2],y=CW_OSL.log[1:2])) ##select values to extrapolate and predict (extrapolate) values based on the fitted function x.i<-data.frame(x=temp[1:(min(temp.sel.id)-1),1]) y.i<-predict(fit.lm,x.i) ##replace NA values by extrapolated values temp[1:length(y.i),2]<-y.i ##set method values temp.method<-c(rep("extrapolation",length(y.i)),rep("interpolation",(length(temp[,2])-length(y.i)))) ##print a warning message for more than two extrapolation points if(length(y.i)>2){warning("t' is beyond the time resolution and more than two data points have been extrapolated!")} # (4) Convert, transform and combine values --------------------------------- ##unlog CW-OSL count values, i.e. log(CW) >> CW CW_OSL<-exp(temp$y) ##set values for c and P ##P is the stimulation period P<-max(temp.values[,1]) ##c is a dimensionless constant c<-(1+(delta*P))/(delta*P) ##transform CW-OSL values to pLM-OSL values pHM<-((delta*t)/(1+(delta*t)))*c*CW_OSL ##combine all values and exclude NA values temp.values <- data.frame(x=t,y.t=pHM,x.t=t.transformed,method=temp.method) temp.values <- na.exclude(temp.values) # (5) Return values --------------------------------------------------------- ##returns the same data type as the input if(is(values, "data.frame") == TRUE){ values <- temp.values return(values) }else{ ##add old info elements to new info elements temp.info <- c(values@info, CW2pHMi.x.t = list(temp.values$x.t), CW2pHMi.method = list(temp.values$method)) newRLumDataCurves.CW2pHMi <- set_RLum( class = "RLum.Data.Curve", recordType = values@recordType, data = as.matrix(temp.values[,1:2]), info = temp.info) return(newRLumDataCurves.CW2pHMi) } } Luminescence/R/calc_Huntley2006.R0000644000176200001440000011543314521207352016142 0ustar liggesusers#' @title Apply the Huntley (2006) model #' #' @description #' A function to calculate the expected sample specific fraction of saturation #' based on the model of Huntley (2006) using the approach as implemented #' in Kars et al. (2008) or Guralnik et al. (2015). #' #' @details #' #' This function applies the approach described in Kars et al. (2008) or Guralnik et al. (2015), #' which are both developed from the model of Huntley (2006) to calculate the expected sample #' specific fraction of saturation of a feldspar and also to calculate fading #' corrected age using this model. \eqn{\rho}' (`rhop`), the density of recombination #' centres, is a crucial parameter of this model and must be determined #' separately from a fading measurement. The function [analyse_FadingMeasurement] #' can be used to calculate the sample specific \eqn{\rho}' value. #' #' **Kars et al. (2008) - Single saturating exponential** #' #' To apply the approach after Kars et al. (2008) use `fit.method = "EXP"`. #' #' Firstly, the unfaded \eqn{D_0} value is determined through applying equation 5 of #' Kars et al. (2008) to the measured \eqn{\frac{L_x}{T_x}} data as a function of irradiation #' time, and fitting the data with a single saturating exponential of the form: #' #' \deqn{LxTx(t^*) = A \phi(t^*) \{1 - exp(-\frac{t^*}{D_0}))\}} #' #' where #' #' \deqn{\phi(t^*) = exp(-\rho' ln(1.8 \tilde{s} t^*)^3)} #' #' after King et al. (2016) where \eqn{A} is a pre-exponential factor, #' \eqn{t^*} (s) is the irradiation time, starting at the mid-point of #' irradiation (Auclair et al. 2003) and \eqn{\tilde{s}} (\eqn{3\times10^{15}} s\eqn{^{-1}}) is the athermal frequency factor after Huntley (2006). \cr #' #' Using fit parameters \eqn{A} and \eqn{D_0}, the function then computes a natural dose #' response curve using the environmental dose rate, \eqn{\dot{D}} (Gy/s) and equations #' `[1]` and `[2]`. Computed \eqn{\frac{L_x}{T_x}} values are then fitted using the #' [plot_GrowthCurve] function and the laboratory measured LnTn can then #' be interpolated onto this curve to determine the fading corrected #' \eqn{D_e} value, from which the fading corrected age is calculated. #' #' **Guralnik et al. (2015) - General-order kinetics** #' #' To apply the approach after Guralnik et al. (2015) use `fit.method = "GOK"`. #' #' The approach of Guralnik et al. (2015) is very similar to that of #' Kars et al. (2008), but instead of using a single saturating exponential #' the model fits a general-order kinetics function of the form: #' #' \deqn{LxTx(t^*) = A \phi (t^*)(1 - (1 + (\frac{1}{D_0}) t^* c)^{-1/c})} #' #' where \eqn{A}, \eqn{\phi}, \eqn{t^*} and \eqn{D_0} are the same as above and \eqn{c} is a #' dimensionless kinetic order modifier (cf. equation 10 in #' Guralnik et al., 2015). #' #' **Level of saturation** #' #' The [calc_Huntley2006] function also calculates the level of saturation (\eqn{\frac{n}{N}}) #' and the field saturation (i.e. athermal steady state, (n/N)_SS) value for #' the sample under investigation using the sample specific \eqn{\rho}', #' unfaded \eqn{D_0} and \eqn{\dot{D}} values, following the approach of Kars et al. (2008). #' #' **Uncertainties** #' #' Uncertainties are reported at \eqn{1\sigma} and are assumed to be normally #' distributed and are estimated using Monte-Carlo re-sampling (`n.MC = 1000`) #' of \eqn{\rho}' and \eqn{\frac{L_x}{T_x}} during dose response curve fitting, and of \eqn{\rho}' #' in the derivation of (\eqn{n/N}) and (n/N)_SS. #' #' **Age calculated from 2D0 of the simulated natural DRC** #' #' In addition to the age calculated from the equivalent dose derived from #' `Ln/Tn` projected on the simulated natural dose response curve (DRC), this function #' also calculates an age from twice the characteristic saturation dose (`D0`) #' of the simulated natural DRC. This can be a useful information for #' (over)saturated samples (i.e., no intersect of `Ln/Tn` on the natural DRC) #' to obtain at least a "minimum age" estimate of the sample. In the console #' output this value is denoted by *"Age @2D0 (ka):"*. #' #' @param data [data.frame] (**required**): #' A `data.frame` with one of the following structures: #' - A **three column** data frame with numeric values on a) dose (s), b) `LxTx` and #' c) `LxTx` error. #' - If a **two column** data frame is provided it is automatically #' assumed that errors on `LxTx` are missing. A third column will be attached #' with an arbitrary 5 \% error on the provided `LxTx` values. #' - Can also be a **wide table**, i.e. a [data.frame] with a number of columns divisible by 3 #' and where each triplet has the aforementioned column structure. #' #' ``` #' (optional) #' | dose (s)| LxTx | LxTx error | #' | [ ,1] | [ ,2]| [ ,3] | #' |---------|------|------------| #' [1, ]| 0 | LnTn | LnTn error | (optional, see arg 'LnTn') #' [2, ]| R1 | L1T1 | L1T1 error | #' ... | ... | ... | ... | #' [x, ]| Rx | LxTx | LxTx error | #' #' ``` #' **NOTE:** The function assumes the first row of the function to be the #' `Ln/Tn`-value. If you want to provide more than one `Ln/Tn`-value consider #' using the argument `LnTn`. #' #' @param LnTn [data.frame] (**optional**): #' This argument should **only** be used to provide more than one `Ln/Tn`-value. #' It assumes a two column data frame with the following structure: #' #' ``` #' | LnTn | LnTn error | #' | [ ,1] | [ ,2] | #' |--------|--------------| #' [1, ]| LnTn_1 | LnTn_1 error | #' [2, ]| LnTn_2 | LnTn_2 error | #' ... | ... | ... | #' [x, ]| LnTn_x | LnTn_x error | #' ``` #' #' The function will calculate a **mean** `Ln/Tn`-value and uses either the #' standard deviation or the highest individual error, whichever is larger. If #' another mean value (e.g. a weighted mean or median) or error is preferred, #' this value must be calculated beforehand and used in the first row in the #' data frame for argument `data`. #' #' **NOTE:** If you provide `LnTn`-values with this argument the data frame #' for the `data`-argument **must not** contain any `LnTn`-values! #' #' @param rhop [numeric] (**required**): #' The density of recombination centres (\eqn{\rho}') and its error (see Huntley 2006), #' given as numeric vector of length two. Note that \eqn{\rho}' must **not** be #' provided as the common logarithm. Example: `rhop = c(2.92e-06, 4.93e-07)`. #' #' @param ddot [numeric] (**required**): #' Environmental dose rate and its error, given as a numeric vector of length two. #' Expected unit: Gy/ka. Example: `ddot = c(3.7, 0.4)`. #' #' @param readerDdot [numeric] (**required**): #' Dose rate of the irradiation source of the OSL reader and its error, #' given as a numeric vector of length two. #' Expected unit: Gy/s. Example: `readerDdot = c(0.08, 0.01)`. #' #' @param fit.method [character] (*with default*): #' Fit function of the dose response curve. Can either be `EXP` (the default) #' or `GOK`. Note that `EXP` (single saturating exponential) is the original #' function the model after Huntley (2006) and Kars et al. (2008) was #' designed to use. The use of a general-order kinetics function (`GOK`) #' is an experimental adaption of the model and should be used #' with great care. #' #' @param lower.bounds [numeric] (*with default*): #' Only applicable for `fit.method = 'GOK'`. A vector of length 3 that #' contains the lower bound values for fitting the general-order kinetics #' function using [minpack.lm::nlsLM]. In most cases, the default values #' (c(`-Inf, -Inf, -Inf`)) are appropriate for finding a best fit, but #' sometimes it may be useful to restrict the lower bounds to e.g. #' c(`0, 0, 0`). The values of the vector are for parameters #' `a`, `D0` and `c` in that particular order (see details in #' [Luminescence::plot_GrowthCurve]). #' #' @param normalise [logical] (*with default*): #' If `TRUE` (the default) all measured and computed LxTx values are #' normalised by the pre-exponential factor A (see details). #' #' @param summary [logical] (*with default*): #' If `TRUE` (the default) various parameters provided by the user #' and calculated by the model are added as text on the right-hand side of the #' plot. #' #' @param plot [logical] (*with default*): #' enables/disables plot output. #' #' @param ... #' Further parameters: #' - `verbose` [logical]: Show or hide console output #' - `n.MC` [numeric]: Number of Monte Carlo iterations (default = `100000`). #' **Note** that it is generally advised to have a large number of Monte Carlo #' iterations for the results to converge. Decreasing the number of iterations #' will often result in unstable estimates. #' #' All other arguments are passed to [plot] and [plot_GrowthCurve]. #' #' @return An [RLum.Results-class] object is returned: #' #' Slot: **@data**\cr #' #' \tabular{lll}{ #' **OBJECT** \tab **TYPE** \tab **COMMENT**\cr #' `results` \tab [data.frame] \tab results of the of Kars et al. 2008 model \cr #' `data` \tab [data.frame] \tab original input data \cr #' `Ln` \tab [numeric] \tab Ln and its error \cr #' `LxTx_tables` \tab `list` \tab A `list` of `data.frames` containing data on dose, #' LxTx and LxTx error for each of the dose response curves. #' Note that these **do not** contain the natural Ln signal, which is provided separately. \cr #' `fits` \tab `list` \tab A `list` of `nls` objects produced by [minpack.lm::nlsLM] when fitting the dose response curves \cr #' } #' #' Slot: **@info**\cr #' #' \tabular{lll}{ #' **OBJECT** \tab **TYPE** \tab **COMMENT** \cr #' `call` \tab `call` \tab the original function call \cr #' `args` \tab `list` \tab arguments of the original function call \cr #' } #' #' @section Function version: 0.4.2 #' #' @author #' Georgina E. King, University of Lausanne (Switzerland) \cr #' Christoph Burow, University of Cologne (Germany) \cr #' Sebastian Kreutzer, Ruprecht-Karl University of Heidelberg (Germany) #' #' @keywords datagen #' #' @note This function has BETA status, in particular for the GOK implementation. Please verify #' your results carefully #' #' @references #' #' Kars, R.H., Wallinga, J., Cohen, K.M., 2008. A new approach towards anomalous fading correction for feldspar #' IRSL dating-tests on samples in field saturation. Radiation Measurements 43, 786-790. doi:10.1016/j.radmeas.2008.01.021 #' #' Guralnik, B., Li, B., Jain, M., Chen, R., Paris, R.B., Murray, A.S., Li, S.-H., Pagonis, P., #' Herman, F., 2015. Radiation-induced growth and isothermal decay of infrared-stimulated luminescence #' from feldspar. Radiation Measurements 81, 224-231. #' #' Huntley, D.J., 2006. An explanation of the power-law decay of luminescence. #' Journal of Physics: Condensed Matter 18, 1359-1365. doi:10.1088/0953-8984/18/4/020 #' #' King, G.E., Herman, F., Lambert, R., Valla, P.G., Guralnik, B., 2016. #' Multi-OSL-thermochronometry of feldspar. Quaternary Geochronology 33, 76-87. doi:10.1016/j.quageo.2016.01.004 #' #' #' **Further reading** #' #' Morthekai, P., Jain, M., Cunha, P.P., Azevedo, J.M., Singhvi, A.K., 2011. An attempt to correct #' for the fading in million year old basaltic rocks. Geochronometria 38(3), 223-230. #' #' @examples #' #' ## Load example data (sample UNIL/NB123, see ?ExampleData.Fading) #' data("ExampleData.Fading", envir = environment()) #' #' ## (1) Set all relevant parameters #' # a. fading measurement data (IR50) #' fading_data <- ExampleData.Fading$fading.data$IR50 #' #' # b. Dose response curve data #' data <- ExampleData.Fading$equivalentDose.data$IR50 #' #' ## (2) Define required function parameters #' ddot <- c(7.00, 0.004) #' readerDdot <- c(0.134, 0.0067) #' #' # Analyse fading measurement and get an estimate of rho'. #' # Note that the RLum.Results object can be directly used for further processing. #' # The number of MC runs is reduced for this example #' rhop <- analyse_FadingMeasurement(fading_data, plot = TRUE, verbose = FALSE, n.MC = 10) #' #' ## (3) Apply the Kars et al. (2008) model to the data #' kars <- calc_Huntley2006( #' data = data, #' rhop = rhop, #' ddot = ddot, #' readerDdot = readerDdot, #' n.MC = 25) #' #' #' \dontrun{ #' # You can also provide LnTn values separately via the 'LnTn' argument. #' # Note, however, that the data frame for 'data' must then NOT contain #' # a LnTn value. See argument descriptions! #' LnTn <- data.frame(LnTn = c(1.84833, 2.24833), #' LnTn.error = c(0.17, 0.22)) #' #' LxTx <- data[2:nrow(data), ] #' #' kars <- calc_Huntley2006(data = LxTx, #' LnTn = LnTn, #' rhop = rhop, #' ddot = ddot, #' readerDdot = readerDdot, #' n.MC = 25) #' } #' @md #' @export calc_Huntley2006 <- function( data, LnTn = NULL, rhop, ddot, readerDdot, normalise = TRUE, fit.method = c("EXP", "GOK"), lower.bounds = c(-Inf, -Inf, -Inf, -Inf), summary = TRUE, plot = TRUE, ... ){ ## Validate Input ------------------------------------------------------------ ## Check fit method if (!fit.method[1] %in% c("EXP", "GOK")) stop("[calc_Huntley2006] Invalid fit option ('", fit.method[1], "'). Only 'EXP' and 'GOK' allowed for argument 'fit.method'.", call. = FALSE) ## Check length of lower.bounds if (fit.method[1] == "GOK" && length(lower.bounds) != 4) stop("[calc_Huntley2006] Argument 'lower.bounds' must be of length 3 exactly.", call. = FALSE) ## Check 'data' # must be a data frame if (is.data.frame(data)) { if (ncol(data) == 2) { warning("[calc_Huntley2006] 'data' only had two columns. We assumed that", " the errors on LxTx were missing and automatically added a", " 5 % error.\n Please provide a data frame with three columns", " if you wish to use actually measured LxTx errors.", call. = FALSE) data[ ,3] <- data[ ,2] * 0.05 } # Check if 'LnTn' is used and overwrite 'data' if (!is.null(LnTn)) { if (!is.data.frame(LnTn)) stop("Value for 'LnTn' must be a data frame!", call. = FALSE) if (ncol(LnTn) != 2) stop("Data frame for 'LnTn' must have two columns!", call. = FALSE) if (ncol(data) > 3) stop("Argument 'LnTn' requires the data frame 'data' to have 2 or 3 columns only!", call. = FALSE) # case 1: only one LnTn value if (nrow(LnTn) == 1) { LnTn <- setNames(cbind(0, LnTn), names(data)) data <- rbind(LnTn, data) # case 2: >1 LnTn value } else { LnTn_mean <- mean(LnTn[ ,1]) LnTn_sd <- sd(LnTn[ ,1]) LnTn_error <- max(LnTn_sd, LnTn[ ,2]) LnTn <- setNames(data.frame(0, LnTn_mean, LnTn_error), names(data)) data <- rbind(LnTn, data) } } # check number of columns if (ncol(data) %% 3 != 0) { stop("[calc_Huntley2006] the number of columns in 'data' must be a multiple of 3.", call. = FALSE) } else { # extract all LxTx values data_tmp <- do.call(rbind, lapply(seq(1, ncol(data), 3), function(col) { setNames(data[2:nrow(data), col:c(col+2)], c("dose", "LxTx", "LxTxError")) }) ) # extract the LnTn values (assumed to be the first row) and calculate the column mean LnTn_tmp <- do.call(rbind, lapply(seq(1, ncol(data), 3), function(col) { setNames(data[1, col:c(col+2)], c("dose", "LxTx", "LxTxError")) }) ) # check whether the standard deviation of LnTn estimates or the largest # individual error is highest, and take the larger one LnTn_error_tmp <- max(c(sd(LnTn_tmp[ ,2]), mean(LnTn_tmp[ ,3])), na.rm = TRUE) LnTn_tmp <- colMeans(LnTn_tmp) # re-bind the data frame data <- rbind(LnTn_tmp, data_tmp) data[1, 3] <- LnTn_error_tmp data <- data[complete.cases(data), ] } } else { stop("\n[calc_Huntley2006()] 'data' must be a data frame.", call. = FALSE) } ## Check 'rhop' # check if numeric if (is.numeric(rhop)) { ### TODO: can be of length 2 if error if (length(rhop) != 2) stop("\n[calc_Huntley2006()] 'rhop' must be a vector of length two.", call. = FALSE) # alternatively, and RLum.Results object produced by analyse_FadingMeasurement() # can be provided } else if (inherits(rhop, "RLum.Results")) { if (rhop@originator == "analyse_FadingMeasurement") rhop <- c(rhop@data$rho_prime$MEAN, rhop@data$rho_prime$SD) else stop("\n[calc_Huntley2006] Only an 'RLum.Results' object produced by", " 'analyse_FadingMeasurement()' is allowed as input for 'rhop'.", call. = FALSE) } # check if 'rhop' is actually a positive value if (any(is.na(rhop)) || !rhop[1] > 0 || any(is.infinite(rhop))) { stop("\n[calc_Huntley2006] 'rhop' must be a positive number. Provided value", " was: ", signif(rhop[1], 3), " \u2213 " , signif(rhop[2], 3), call. = FALSE) } ## Check ddot & readerDdot # check if numeric if (any(sapply(list(ddot, readerDdot), is.numeric) == FALSE)) stop("\n[calc_Huntley2006] 'ddot' and 'readerDdot' must be numeric values.", call. = FALSE) # check if length == 2 if (any(sapply(list(ddot, readerDdot), function(x) length(x) == 2) == FALSE)) stop("\n[calc_Huntley2006] 'ddot' and 'readerDdot' must be of length 2.", call. = FALSE) ## Settings ------------------------------------------------------------------ settings <- modifyList( list( verbose = TRUE, n.MC = 100000), list(...)) ## Define Constants ---------------------------------------------------------- kb <- 8.617343 * 1e-5 alpha <- 1 Hs <- 3e15 # s value after Huntley (2006) Ma <- 1e6 * 365.25 * 24 * 3600 #in seconds ka <- Ma / 1000 #in seconds ## Define Functions ---------------------------------------------------------- # fit data using using Eq 5. from Kars et al (2008) employing # theta after King et al. (2016) theta <- function(t, r) { res <- exp(-r * log(1.8 * Hs * (0.5 * t))^3) res[!is.finite(res)] <- 0 return(res) } ## Preprocessing ------------------------------------------------------------- readerDdot.error <- readerDdot[2] readerDdot <- readerDdot[1] ddot.error <- ddot[2] ddot <- ddot[1] colnames(data) <- c("dose", "LxTx", "LxTx.Error") dosetime <- data[["dose"]][2:nrow(data)] LxTx.measured <- data[["LxTx"]][2:nrow(data)] LxTx.measured.error <- data[["LxTx.Error"]][2:nrow(data)] #Keep LnTn separate for derivation of measured fraction of saturation Ln <- data[["LxTx"]][1] Ln.error <- data[["LxTx.Error"]][1] ## (1) MEASURED ---------------------------------------------------- if (settings$verbose) cat("\n") data.tmp <- data data.tmp[ ,1] <- data.tmp[ ,1] * readerDdot GC.settings <- list( sample = data.tmp, mode = "interpolation", fit.method = fit.method[1], fit.bounds = TRUE, output.plot = plot, main = "Measured dose response curve", xlab = "Dose (Gy)", verbose = FALSE) GC.settings <- modifyList(GC.settings, list(...)) GC.settings$verbose <- FALSE GC.measured <- try(do.call(plot_GrowthCurve, GC.settings)) if (inherits(GC.measured, "try-error")) stop("\n[calc_Huntley2006()] Unable to fit growth curve to measured data", call. = FALSE) # extract results and calculate age GC.results <- get_RLum(GC.measured) fit_measured <- GC.measured@data$Fit De.measured <- GC.results$De De.measured.error <- GC.results$De.Error D0.measured <- GC.results$D01 D0.measured.error <- GC.results$D01.ERROR Age.measured <- De.measured/ ddot Age.measured.error <- Age.measured * sqrt( (De.measured.error / De.measured)^2 + (readerDdot.error / readerDdot)^2 + (ddot.error / ddot)^2) ## (2) SIMULATED ----------------------------------------------------- # create MC samples rhop_MC <- rnorm(n = settings$n.MC, mean = rhop[1], sd = rhop[2]) ## do the fitting fitcoef <- do.call(rbind, sapply(rhop_MC, function(rhop_i) { if (fit.method[1] == "EXP") { fit_sim <- try({ minpack.lm::nlsLM( LxTx.measured ~ a * theta(dosetime, rhop_i) * (1 - exp(-(dosetime + c)/ D0)), start = list( a = coef(fit_measured)[["a"]], c = coef(fit_measured)[["c"]], D0 = D0.measured / readerDdot), lower = lower.bounds[1:3], control = list(maxiter = settings$maxiter)) }, silent = TRUE) } else if (fit.method[1] == "GOK") { fit_sim <- try({ minpack.lm::nlsLM( LxTx.measured ~ a * theta(dosetime, rhop_i) * (d-(1+(1/D0)*dosetime*c)^(-1/c)), start = list( a = coef(fit_measured)[["a"]], D0 = D0.measured / readerDdot, c = coef(fit_measured)[["c"]], d = coef(fit_measured)[["d"]]), lower = lower.bounds, control = list(maxiter = settings$maxiter))}, silent = TRUE) } if (!inherits(fit_sim, "try-error")) coefs <- coef(fit_sim) else coefs <- c(NA, NA, NA, NA) return(coefs) }, simplify = FALSE)) # final fit for export # fit_simulated <- minpack.lm::nlsLM(LxTx.measured ~ a * theta(dosetime, rhop[1]) * (1 - exp(-dosetime / D0)), # start = list(a = max(LxTx.measured), D0 = D0.measured / readerDdot)) # scaling factor A <- mean(fitcoef[, "a"], na.rm = TRUE) A.error <- sd(fitcoef[ ,"a"], na.rm = TRUE) # calculate measured fraction of saturation nN <- Ln / A nN.error <- nN * sqrt( (Ln.error / Ln)^2 + (A.error / A)^2) # compute a natural dose response curve following the assumptions of # Morthekai et al. 2011, Geochronometria # natdosetime <- seq(0, 1e14, length.out = settings$n.MC) # natdosetimeGray <- natdosetime * ddot / ka # calculate D0 dose in seconds computedD0 <- (fitcoef[ ,"D0"] * readerDdot) / (ddot / ka) # Legacy code: # This is an older approximation to calculate the natural dose response curve, # which sometimes tended to slightly underestimate nN_ss. This is now replaced # with the newer approach below. # compute natural dose response curve # LxTx.sim <- A * theta(natdosetime, rhop[1]) * (1 - exp(-natdosetime / mean(computedD0, na.rm = TRUE) )) # warning("LxTx Curve: ", round(max(LxTx.sim) / A, 3), call. = FALSE) # compute natural dose response curve ddots <- ddot / ka natdosetimeGray <- c(0, exp(seq(1, log(max(data[ ,1]) * 2), length.out = 999))) natdosetime <- natdosetimeGray rprime <- seq(0.01, 5, length.out = 500) pr <- 3 * rprime^2 * exp(-rprime^3) # Huntley 2006, eq. 3 K <- Hs * exp(-rhop[1]^-(1/3) * rprime) TermA <- matrix(NA, nrow = length(rprime), ncol = length(natdosetime)) UFD0 <- mean(fitcoef[ ,"D0"], na.rm = TRUE) * readerDdot if(fit.method[1] == "EXP") c_exp <- mean(fitcoef[ ,"c"], na.rm = TRUE) if (fit.method[1] == "GOK") { c_gok <- mean(fitcoef[ ,"c"], na.rm = TRUE) d_gok <- mean(fitcoef[ ,"d"], na.rm = TRUE) } for (j in 1:length(natdosetime)) { for (k in 1:length(rprime)) { if (fit.method[1] == "EXP") { TermA[k,j] <- A * pr[k] * ((ddots / UFD0) / (ddots / UFD0 + K[k]) * (1 - exp(-(natdosetime[j] + c_exp) * (1 / UFD0 + K[k]/ddots)))) } else if (fit.method[1] == "GOK") { TermA[k,j] <- A * pr[k] * (ddots / UFD0) / (ddots / UFD0 + K[k]) * (d_gok-(1+(1/UFD0 + K[k]/ddots) * natdosetime[j] * c_gok)^(-1/c_gok)) } }} LxTx.sim <- colSums(TermA) / sum(pr) # warning("LxTx Curve (new): ", round(max(LxTx.sim) / A, 3), call. = FALSE) # calculate Age positive <- which(diff(LxTx.sim) > 0) data.unfaded <- data.frame( dose = c(0, natdosetimeGray[positive]), LxTx = c(Ln, LxTx.sim[positive]), LxTx.error = c(Ln.error, LxTx.sim[positive] * A.error/A)) data.unfaded$LxTx.error[2] <- 0.0001 GC.settings <- list( sample = data.unfaded, mode = "interpolation", fit.method = fit.method[1], fit.bounds = TRUE, output.plot = plot, verbose = FALSE, main = "Simulated dose response curve", xlab = "Dose (Gy)") GC.settings <- modifyList(GC.settings, list(...)) GC.settings$verbose <- FALSE suppressWarnings( GC.simulated <- try(do.call(plot_GrowthCurve, GC.settings)) ) if (!inherits(GC.simulated, "try-error")) { GC.simulated.results <- get_RLum(GC.simulated) fit_simulated <- get_RLum(GC.simulated, "Fit") De.sim <- GC.simulated.results$De De.error.sim <- GC.simulated.results$De.Error # derive simulated D0 D0.sim.Gy <- GC.simulated.results$D01 D0.sim.Gy.error <- GC.simulated.results$D01.ERROR Age.sim <- De.sim / ddot Age.sim.error <- Age.sim * sqrt( ( De.error.sim/ De.sim)^2 + (readerDdot.error / readerDdot)^2 + (ddot.error / ddot)^2) Age.sim.2D0 <- 2 * D0.sim.Gy / ddot Age.sim.2D0.error <- Age.sim.2D0 * sqrt( ( D0.sim.Gy.error / D0.sim.Gy)^2 + (readerDdot.error / readerDdot)^2 + (ddot.error / ddot)^2) } else { De.sim <- De.error.sim <- Age.sim <- Age.sim.error <- fit_simulated <- D0.sim.Gy <- D0.sim.Gy.error <- NA Age.sim.2D0 <- Age.sim.2D0.error <- NA } if (Ln > max(LxTx.sim) * 1.1) warning("[calc_Huntley2006()] Ln is >10 % larger than the maximum computed LxTx value.", " The De and age should be regarded as infinite estimates.", call. = FALSE) # Estimate nN_(steady state) by Monte Carlo Simulation ddot_MC <- rnorm(n = settings$n.MC, mean = ddot, sd = ddot.error) UFD0_MC <- rnorm(n = settings$n.MC, mean = D0.sim.Gy, sd = D0.sim.Gy.error) nN_SS_MC <- mapply(function(rhop_i, ddot_i, UFD0_i) { rprime <- seq(0.01, 5, length.out = settings$n.MC) rho <- 3 * alpha^3 * rhop_i / (4 * pi) r <- rprime / (4 * pi * rho / 3)^(1 / 3) pr <- 3 * rprime^2 * exp(-rprime^3) tau <- ((1 / Hs) * exp(1)^(alpha * r)) / ka Ls <- 1 / (1 + UFD0_i / (ddot_i * tau)) Lstrap <- (pr * Ls) / sum(pr) # field saturation nN_SS_i <- sum(Lstrap) return(nN_SS_i) }, rhop_MC, ddot_MC, UFD0_MC, SIMPLIFY = TRUE) nN_SS <- suppressWarnings(exp(mean(log(nN_SS_MC), na.rm = TRUE))) nN_SS.error <- suppressWarnings(nN_SS * abs(sd(log(nN_SS_MC), na.rm = TRUE) / mean(log(nN_SS_MC), na.rm = TRUE))) ## legacy code for debugging purposes ## nN_SS is often lognormally distributed, so we now take the mean and sd ## of the log values. # warning(mean(nN_SS_MC, na.rm = TRUE)) # warning(sd(nN_SS_MC, na.rm = TRUE)) ## (3) UNFADED --------------------------------------------------------------- LxTx.unfaded <- LxTx.measured / theta(dosetime, rhop[1]) LxTx.unfaded[is.nan((LxTx.unfaded))] <- 0 LxTx.unfaded[is.infinite(LxTx.unfaded)] <- 0 dosetimeGray <- dosetime * readerDdot if (fit.method[1] == "EXP") { fit_unfaded <- minpack.lm::nlsLM( LxTx.unfaded ~ a * (1 - exp(-(dosetimeGray + c) / D0)), start = list( a = coef(fit_simulated)[["a"]], c = coef(fit_simulated)[["c"]], D0 = D0.measured / readerDdot), control = list(maxiter = settings$maxiter)) } else if (fit.method[1] == "GOK") { fit_unfaded <- minpack.lm::nlsLM( LxTx.unfaded ~ a * (d-(1+(1/D0)*dosetimeGray*c)^(-1/c)), start = list( a = coef(fit_simulated)[["a"]], D0 = coef(fit_simulated)[["b"]] / readerDdot, c = coef(fit_simulated)[["c"]], d = coef(fit_simulated)[["d"]]), lower = lower.bounds, control = list(maxiter = settings$maxiter)) } D0.unfaded <- coef(fit_unfaded)[["D0"]] D0.error.unfaded <- summary(fit_unfaded)$coefficients["D0", "Std. Error"] ## Create LxTx tables -------------------------------------------------------- # normalise by A (saturation point of the un-faded curve) if (normalise) { LxTx.measured.relErr <- (LxTx.measured.error / LxTx.measured) LxTx.measured <- LxTx.measured / A LxTx.measured.error <- LxTx.measured * LxTx.measured.relErr LxTx.sim <- LxTx.sim / A LxTx.unfaded <- LxTx.unfaded / A Ln.relErr <- Ln.error / Ln Ln <- Ln / A Ln.error <- Ln * Ln.relErr } # combine all computed LxTx values LxTx_measured <- data.frame( dose = dosetimeGray, LxTx = LxTx.measured, LxTx.Error = LxTx.measured.error) LxTx_simulated <- data.frame( dose = natdosetimeGray, LxTx = LxTx.sim, LxTx.Error = LxTx.sim * A.error / A) LxTx_unfaded <- data.frame( dose = dosetimeGray, LxTx = LxTx.unfaded, LxTx.Error = LxTx.unfaded * A.error / A) ## Plot settings ------------------------------------------------------------- plot.settings <- modifyList(list( main = "Dose response curves", xlab = "Dose (Gy)", ylab = ifelse(normalise, "normalised LxTx (a.u.)", "LxTx (a.u.)") ), list(...)) ## Plotting ------------------------------------------------------------------ if (plot) { ### par settings --------- # set plot parameters par.old.full <- par(no.readonly = TRUE) # set graphical parameters par(mfrow = c(1,1), mar = c(4.5, 4, 4, 4), cex = 0.8) if (summary) par(oma = c(0, 3, 0, 9)) else par(oma = c(0, 9, 0, 9)) # Find a good estimate of the x-axis limits if(GC.settings$mode == "extrapolation") { dosetimeGray <- c(-De.measured - De.measured.error, dosetimeGray) De.measured <- -De.measured } xlim <- range(pretty(dosetimeGray)) if (!is.na(De.sim) & De.sim > xlim[2]) xlim <- range(pretty(c(min(dosetimeGray), De.sim))) # Create figure after Kars et al. (2008) contrasting the dose response curves ## open plot window ------------ plot(dosetimeGray[dosetimeGray >= 0], LxTx_measured$LxTx, main = plot.settings$main, xlab = plot.settings$xlab, ylab = plot.settings$ylab, pch = 16, ylim = c(0, max(do.call(rbind, list(LxTx_measured, LxTx_unfaded))[["LxTx"]])), xlim = xlim ) ##add ablines for extrapolation if(GC.settings$mode == "extrapolation") abline(v = 0, h = 0, col = "gray") # LxTx error bars segments(x0 = dosetimeGray[dosetimeGray >= 0], y0 = LxTx_measured$LxTx + LxTx_measured$LxTx.Error, x1 = dosetimeGray[dosetimeGray >= 0], y1 = LxTx_measured$LxTx - LxTx_measured$LxTx.Error, col = "black") # re-calculate the measured dose response curve in Gray xRange <- range(pretty(dosetimeGray)) xNew <- seq(xRange[1], xRange[2], length.out = 200) yNew <- predict(GC.measured@data$Fit, list(x = xNew)) if (normalise) yNew <- yNew / A ## add measured curve ------- lines(xNew, yNew, col = "black") # add error polygon polygon(x = c(natdosetimeGray, rev(natdosetimeGray)), y = c(LxTx_simulated$LxTx + LxTx_simulated$LxTx.Error, rev(LxTx_simulated$LxTx - LxTx_simulated$LxTx.Error)), col = adjustcolor("grey", alpha.f = 0.5), border = NA) ## add simulated LxTx values points( x = natdosetimeGray, y = LxTx_simulated$LxTx, type = "l", lty = 2) # Ln and DE as points points(x = if(GC.settings$mode == "extrapolation") rep(De.measured, 2) else c(0, De.measured), y = if(GC.settings$mode == "extrapolation") c(0,0) else c(Ln, Ln), col = "red", pch = c(1, 16)) # Ln error bar segments(x0 = 0, y0 = Ln - Ln.error, x1 = 0, y1 = Ln + Ln.error, col = "red") # Ln as a horizontal line lines(x = if(GC.settings$mode == "extrapolation") c(0, min(c(De.measured, De.sim), na.rm = TRUE)) else c(0, max(c(De.measured, De.sim), na.rm = TRUE)), y = c(Ln, Ln), col = "black", lty = 3) # vertical line of measured DE lines(x = c(De.measured, De.measured), y = c(0, Ln), col = "black", lty = 3) # add legends legend("bottomright", legend = c( "Unfaded DRC", "Measured DRC", "Simulated natural DRC"), lty = c(5, 1, 2), bty = "n", cex = 0.8) # add vertical line of simulated De if (!is.na(De.sim)) { lines(x = if(GC.settings$mode == "extrapolation") c(-De.sim, -De.sim) else c(De.sim, De.sim), y = c(0, Ln), col = "black", lty = 3) points(x = if(GC.settings$mode == "extrapolation") -De.sim else De.sim, y = if(GC.settings$mode == "extrapolation") 0 else Ln, col = "red" , pch = 16) } else { lines(x = c(De.measured, xlim[2]), y = c(Ln, Ln), col = "black", lty = 3) } # add unfaded DRC xRange <- range(pretty(dosetimeGray)) xNew <- seq(xRange[1], xRange[2], length.out = 200) yNew <- predict(fit_unfaded, list(dosetimeGray = xNew)) if (normalise) yNew <- yNew / A lines(xNew, yNew, col = "black", lty = 5) points(x = dosetimeGray[dosetimeGray >= 0], y = LxTx_unfaded$LxTx, col = "black") # LxTx error bars segments( x0 = dosetimeGray[dosetimeGray >= 0], y0 = LxTx_unfaded$LxTx + LxTx_unfaded$LxTx.Error, x1 = dosetimeGray[dosetimeGray >= 0], y1 = LxTx_unfaded$LxTx - LxTx_unfaded$LxTx.Error, col = "black") # add text if (summary) { # define labels as expressions labels.text <- list( bquote(dot(D) == .(format(ddot, digits = 2, nsmall = 2)) %+-% .(round(as.numeric(format(ddot.error, digits = 3, nsmall = 3)), 3)) ~ frac(Gy, ka)), bquote(dot(D)["Reader"] == .(format(readerDdot, digits = 2, nsmall = 2)) %+-% .(round(as.numeric(format(readerDdot.error, digits = 3, nsmall = 3)), 3)) ~ frac(Gy, s)), bquote(log[10]~(rho~"'") == .(format(log10(rhop[1]), digits = 2, nsmall = 2)) %+-% .(round(as.numeric(format(rhop[2] / (rhop[1] * log(10, base = exp(1))), digits = 2, nsmall = 2)), 2)) ), bquote(bgroup("(", frac(n, N), ")") == .(format(nN, digits = 2, nsmall = 2)) %+-% .(round(as.numeric(format(nN.error, digits = 2, nsmall = 2)), 2)) ), bquote(bgroup("(", frac(n, N), ")")[SS] == .(format(nN_SS, digits = 2, nsmall = 2)) %+-% .(round(as.numeric(format(nN_SS.error, digits = 2, nsmall = 2)), 2)) ), bquote(D["E,sim"] == .(format(De.sim, digits = 1, nsmall = 0)) %+-% .(format(De.error.sim, digits = 1, nsmall = 0)) ~ Gy), bquote(D["0,sim"] == .(format(D0.sim.Gy, digits = 1, nsmall = 0)) %+-% .(format(D0.sim.Gy.error, digits = 1, nsmall = 0)) ~ Gy), bquote(Age["sim"] == .(format(Age.sim, digits = 1, nsmall = 0)) %+-% .(format(Age.sim.error, digits = 1, nsmall = 0)) ~ ka) ) # each of the labels is positioned at 1/10 of the available y-axis space ypos <- seq(range(axTicks(2))[2], range(axTicks(2))[1], length.out = 10)[1:length(labels.text)] # allow overprinting par(xpd = NA) # add labels iteratively mapply(function(label, pos) { text(x = max(axTicks(1)) * 1.05, y = pos, labels = label, pos = 4) }, labels.text, ypos) } # recover plot parameters on.exit(par(par.old.full)) } ## Results ------------------------------------------------------------------- results <- set_RLum( class = "RLum.Results", data = list( results = data.frame( "nN" = nN, "nN.error" = nN.error, "nN_SS" = nN_SS, "nN_SS.error" = nN_SS.error, "Meas_De" = abs(De.measured), "Meas_De.error" = De.measured.error, "Meas_D0" = D0.measured, "Meas_D0.error" = D0.measured.error, "Meas_Age" = Age.measured, "Meas_Age.error" = Age.measured.error, "Sim_De" = De.sim, "Sim_De.error" = De.error.sim, "Sim_D0" = D0.sim.Gy, "Sim_D0.error" = D0.sim.Gy.error, "Sim_Age" = Age.sim, "Sim_Age.error" = Age.sim.error, "Sim_Age_2D0" = Age.sim.2D0, "Sim_Age_2D0.error" = Age.sim.2D0.error, "Unfaded_D0" = D0.unfaded, "Unfaded_D0.error" = D0.error.unfaded, row.names = NULL), data = data, Ln = c(Ln, Ln.error), LxTx_tables = list( simulated = LxTx_simulated, measured = LxTx_measured, unfaded = LxTx_unfaded), fits = list( simulated = fit_simulated, measured = fit_measured, unfaded = fit_unfaded ) ), info = list( call = sys.call(), args = as.list(sys.call())[-1]) ) ## Console output ------------------------------------------------------------ if (settings$verbose) { cat("\n[calc_Huntley2006()]\n") cat("\n -------------------------------") cat("\n (n/N) [-]:\t", round(results@data$results$nN, 2), "\u00b1", round(results@data$results$nN.error, 2)) cat("\n (n/N)_SS [-]:\t", round(results@data$results$nN_SS, 2),"\u00b1", round(results@data$results$nN_SS.error, 2)) cat("\n\n ---------- Measured -----------") cat("\n DE [Gy]:\t", round(results@data$results$Meas_De, 2), "\u00b1", round(results@data$results$Meas_De.error, 2)) cat("\n D0 [Gy]:\t", round(results@data$results$Meas_D0, 2), "\u00b1", round(results@data$results$Meas_D0.error, 2)) if (fit.method[1] == "GOK") { cat("\n c [-]:\t\t", round(summary(fit_measured)$coefficients["c", "Estimate"], 2), "\u00b1", round(summary(fit_measured)$coefficients["c", "Std. Error"], 2)) } cat("\n Age [ka]:\t", round(results@data$results$Meas_Age, 2), "\u00b1", round(results@data$results$Meas_Age.error, 2)) cat("\n\n ---------- Un-faded -----------") cat("\n D0 [Gy]:\t", round(results@data$results$Unfaded_D0, 2), "\u00b1", round(results@data$results$Unfaded_D0.error, 2)) if (fit.method[1] == "GOK") { cat("\n c [-]:\t\t", round(summary(fit_unfaded)$coefficients["c", "Estimate"], 2), "\u00b1", round(summary(fit_unfaded)$coefficients["c", "Std. Error"], 2)) } cat("\n\n ---------- Simulated ----------") cat("\n DE [Gy]:\t", round(results@data$results$Sim_De, 2), "\u00b1", round(results@data$results$Sim_De.error, 2)) cat("\n D0 [Gy]:\t", round(results@data$results$Sim_D0, 2), "\u00b1", round(results@data$results$Sim_D0.error, 2)) if (fit.method[1] == "GOK") { cat("\n c [-]:\t\t", round(summary(fit_simulated)$coefficients["c", "Estimate"], 2), "\u00b1", round(summary(fit_simulated)$coefficients["c", "Std. Error"], 2)) } cat("\n Age [ka]:\t", round(results@data$results$Sim_Age, 2), "\u00b1", round(results@data$results$Sim_Age.error, 2)) cat("\n Age @2D0 [ka]:\t", round(results@data$results$Sim_Age_2D0, 2), "\u00b1", round(results@data$results$Sim_Age_2D0.error, 2)) cat("\n -------------------------------\n\n") } ## Return value -------------------------------------------------------------- return(results) } Luminescence/R/analyse_baSAR.R0000644000176200001440000026066714464125673015711 0ustar liggesusers#' Bayesian models (baSAR) applied on luminescence data #' #' This function allows the application of Bayesian models on luminescence data, measured #' with the single-aliquot regenerative-dose (SAR, Murray and Wintle, 2000) protocol. In particular, #' it follows the idea proposed by Combès et al., 2015 of using an hierarchical model for estimating #' a central equivalent dose from a set of luminescence measurements. This function is (I) the adoption #' of this approach for the R environment and (II) an extension and a technical refinement of the #' published code. #' #' Internally the function consists of two parts: (I) The Bayesian core for the Bayesian calculations #' and applying the hierarchical model and (II) a data pre-processing part. The Bayesian core can be run #' independently, if the input data are sufficient (see below). The data pre-processing part was #' implemented to simplify the analysis for the user as all needed data pre-processing is done #' by the function, i.e. in theory it is enough to provide a BIN/BINX-file with the SAR measurement #' data. For the Bayesian analysis for each aliquot the following information are needed from the SAR analysis. #' `LxTx`, the `LxTx` error and the dose values for all regeneration points. #' #' **How the systematic error contribution is calculated?** #' #' Standard errors (so far) provided with the source dose rate are considered as systematic uncertainties #' and added to final central dose by: #' #' \deqn{systematic.error = 1/n \sum SE(source.doserate)} #' #' \deqn{SE(central.dose.final) = \sqrt{SE(central.dose)^2 + systematic.error^2}} #' #' Please note that this approach is rather rough and can only be valid if the source dose rate #' errors, in case different readers had been used, are similar. In cases where more than #' one source dose rate is provided a warning is given. #' #' **Input / output scenarios** #' #' Various inputs are allowed for this function. Unfortunately this makes the function handling rather #' complex, but at the same time very powerful. Available scenarios: #' #' **(1) - `object` is BIN-file or link to a BIN-file** #' #' Finally it does not matter how the information of the BIN/BINX file are provided. The function #' supports **(a)** either a path to a file or directory or a `list` of file names or paths or #' **(b)** a [Risoe.BINfileData-class] object or a list of these objects. The latter one can #' be produced by using the function [read_BIN2R], but this function is called automatically #' if only a file name and/or a path is provided. In both cases it will become the data that can be #' used for the analysis. #' #' `[XLS_file = NULL]` #' #' If no XLS file (or data frame with the same format) is provided the functions runs an automatic process that #' consists of the following steps: #' #' 1. Select all valid aliquots using the function [verify_SingleGrainData] #' 2. Calculate `Lx/Tx` values using the function [calc_OSLLxTxRatio] #' 3. Calculate De values using the function [plot_GrowthCurve] #' #' These proceeded data are subsequently used in for the Bayesian analysis #' #' `[XLS_file != NULL]` #' #' If an XLS-file is provided or a `data.frame` providing similar information the pre-processing #' steps consists of the following steps: #' #' 1. Calculate `Lx/Tx` values using the function [calc_OSLLxTxRatio] #' 2. Calculate De values using the function [plot_GrowthCurve] #' #' Means, the XLS file should contain a selection of the BIN-file names and the aliquots selected #' for the further analysis. This allows a manual selection of input data, as the automatic selection #' by [verify_SingleGrainData] might be not totally sufficient. #' #' #' **(2) - `object` `RLum.Results object`** #' #' If an [RLum.Results-class] object is provided as input and(!) this object was #' previously created by the function `analyse_baSAR()` itself, the pre-processing part #' is skipped and the function starts directly the Bayesian analysis. This option is very powerful #' as it allows to change parameters for the Bayesian analysis without the need to repeat #' the data pre-processing. If furthermore the argument `aliquot_range` is set, aliquots #' can be manually excluded based on previous runs. #' #' **`method_control`** #' #' These are arguments that can be passed directly to the Bayesian calculation core, supported arguments #' are: #' #' \tabular{lll}{ #' **Parameter** \tab **Type** \tab **Description**\cr #' `lower_centralD` \tab [numeric] \tab sets the lower bound for the expected De range. Change it only if you know what you are doing!\cr #' `upper_centralD` \tab [numeric] \tab sets the upper bound for the expected De range. Change it only if you know what you are doing!\cr #' `n.chains` \tab [integer] \tab sets number of parallel chains for the model (default = 3) (cf. [rjags::jags.model])\cr #' `inits` \tab [list] \tab option to set initialisation values (cf. [rjags::jags.model]) \cr #' `thin` \tab [numeric] \tab thinning interval for monitoring the Bayesian process (cf. [rjags::jags.model])\cr #' `variable.names` \tab [character] \tab set the variables to be monitored during the MCMC run, default: #' `'central_D'`, `'sigma_D'`, `'D'`, `'Q'`, `'a'`, `'b'`, `'c'`, `'g'`. #' Note: only variables present in the model can be monitored. #' } #' #' **User defined models**\cr #' #' The function provides the option to modify and to define own models that can be used for #' the Bayesian calculation. In the case the user wants to modify a model, a new model #' can be piped into the function via the argument `baSAR_model` as `character`. #' The model has to be provided in the JAGS dialect of the BUGS language (cf. [rjags::jags.model]) #' and parameter names given with the pre-defined names have to be respected, otherwise the function #' will break. #' #' **FAQ** #' #' Q: How can I set the seed for the random number generator (RNG)? #' #' A: Use the argument `method_control`, e.g., for three MCMC chains #' (as it is the default): #' #' ``` #' method_control = list( #' inits = list( #' list(.RNG.name = "base::Wichmann-Hill", .RNG.seed = 1), #' list(.RNG.name = "base::Wichmann-Hill", .RNG.seed = 2), #' list(.RNG.name = "base::Wichmann-Hill", .RNG.seed = 3) #' )) #' ``` #' #' This sets a reproducible set for every chain separately.\cr #' #' Q: How can I modify the output plots? #' #' A: You can't, but you can use the function output to create own, modified plots. #' #' #' Q: Can I change the boundaries for the central_D? #' #' A: Yes, we made it possible, but we DO NOT recommend it, except you know what you are doing!\cr #' Example: `method_control = list(lower_centralD = 10))` #' #' Q: The lines in the baSAR-model appear to be in a wrong logical order?\cr #' #' A: This is correct and allowed (cf. JAGS manual) #' #' #' **Additional arguments support via the `...` argument** #' #' This list summarizes the additional arguments that can be passed to the internally used #' functions. #' #' \tabular{llll}{ #' **Supported argument** \tab **Corresponding function** \tab **Default** \tab **Short description **\cr #' `threshold` \tab [verify_SingleGrainData] \tab `30` \tab change rejection threshold for curve selection \cr #' `sheet` \tab [readxl::read_excel] \tab `1` \tab select XLS-sheet for import\cr #' `col_names` \tab [readxl::read_excel] \tab `TRUE` \tab first row in XLS-file is header\cr #' `col_types` \tab [readxl::read_excel] \tab `NULL` \tab limit import to specific columns\cr #' `skip` \tab [readxl::read_excel] \tab `0` \tab number of rows to be skipped during import\cr #' `n.records` \tab [read_BIN2R] \tab `NULL` \tab limit records during BIN-file import\cr #' `duplicated.rm` \tab [read_BIN2R] \tab `TRUE` \tab remove duplicated records in the BIN-file\cr #' `pattern` \tab [read_BIN2R] \tab `TRUE` \tab select BIN-file by name pattern\cr #' `position` \tab [read_BIN2R] \tab `NULL` \tab limit import to a specific position\cr #' `background.count.distribution` \tab [calc_OSLLxTxRatio] \tab `"non-poisson"` \tab set assumed count distribution\cr #' `fit.weights` \tab [plot_GrowthCurve] \tab `TRUE` \tab enables / disables fit weights\cr #' `fit.bounds` \tab [plot_GrowthCurve] \tab `TRUE` \tab enables / disables fit bounds\cr #' `NumberIterations.MC` \tab [plot_GrowthCurve] \tab `100` \tab number of MC runs for error calculation\cr #' `output.plot` \tab [plot_GrowthCurve] \tab `TRUE` \tab enables / disables dose response curve plot\cr #' `output.plotExtended` \tab [plot_GrowthCurve] \tab `TRUE` \tab enables / disables extended dose response curve plot\cr #' } #' #' #' @param object [Risoe.BINfileData-class], [RLum.Results-class], [list] of [RLum.Analysis-class], #' [character] or [list] (**required**): #' input object used for the Bayesian analysis. If a `character` is provided the function #' assumes a file connection and tries to import a BIN/BINX-file using the provided path. If a `list` is #' provided the list can only contain either `Risoe.BINfileData` objects or `character`s #' providing a file connection. Mixing of both types is not allowed. If an [RLum.Results-class] #' is provided the function directly starts with the Bayesian Analysis (see details) #' #' @param XLS_file [character] (*optional*): #' XLS_file with data for the analysis. This file must contain 3 columns: #' the name of the file, the disc position and the grain position #' (the last being 0 for multi-grain measurements).\cr #' Alternatively a `data.frame` of similar structure can be provided. #' #' @param aliquot_range [numeric] (*optional*): #' allows to limit the range of the aliquots used for the analysis. #' This argument has only an effect if the argument `XLS_file` is used or #' the input is the previous output (i.e. is [RLum.Results-class]). In this case the #' new selection will add the aliquots to the removed aliquots table. #' #' @param source_doserate [numeric] **(required)**: #' source dose rate of beta-source used for the measurement and its uncertainty #' in Gy/s, e.g., `source_doserate = c(0.12, 0.04)`. Parameter can be provided #' as `list`, for the case that more than one BIN-file is provided, e.g., #' `source_doserate = list(c(0.04, 0.004), c(0.05, 0.004))`. #' #' @param signal.integral [vector] (**required**): #' vector with the limits for the signal integral used for the calculation, #' e.g., `signal.integral = c(1:5)`. Ignored if `object` is an [RLum.Results-class] object. #' The parameter can be provided as `list`, see `source_doserate`. #' #' @param signal.integral.Tx [vector] (*optional*): #' vector with the limits for the signal integral for the Tx curve. I #' f nothing is provided the value from `signal.integral` is used and it is ignored #' if `object` is an [RLum.Results-class] object. #' The parameter can be provided as `list`, see `source_doserate`. #' #' @param background.integral [vector] (**required**): #' vector with the bounds for the background integral. #' Ignored if `object` is an [RLum.Results-class] object. #' The parameter can be provided as `list`, see `source_doserate`. #' #' @param background.integral.Tx [vector] (*optional*): #' vector with the limits for the background integral for the Tx curve. #' If nothing is provided the value from `background.integral` is used. #' Ignored if `object` is an [RLum.Results-class] object. #' The parameter can be provided as `list`, see `source_doserate`. #' #' @param irradiation_times [numeric] (*optional*): if set this vector replaces all irradiation #' times for one aliquot and one cycle (Lx and Tx curves) and recycles it for all others cycles and aliquots. #' Please note that if this argument is used, for every(!) single curve #' in the dataset an irradiation time needs to be set. #' #' @param sigmab [numeric] (*with default*): #' option to set a manual value for the overdispersion (for `LnTx` and `TnTx`), #' used for the `Lx`/`Tx` error calculation. The value should be provided as #' absolute squared count values, cf. [calc_OSLLxTxRatio]. #' The parameter can be provided as `list`, see `source_doserate`. #' #' @param sig0 [numeric] (*with default*): #' allow adding an extra component of error to the final Lx/Tx error value #' (e.g., instrumental error, see details is [calc_OSLLxTxRatio]). #' The parameter can be provided as `list`, see `source_doserate`. #' #' @param distribution [character] (*with default*): #' type of distribution that is used during Bayesian calculations for #' determining the Central dose and overdispersion values. #' Allowed inputs are `"cauchy"`, `"normal"` and `"log_normal"`. #' #' @param baSAR_model [character] (*optional*): #' option to provide an own modified or new model for the Bayesian calculation #' (see details). If an own model is provided the argument `distribution` is #' ignored and set to `'user_defined'` #' #' @param n.MCMC [integer] (*with default*): #' number of iterations for the Markov chain Monte Carlo (MCMC) simulations #' #' @param fit.method [character] (*with default*): #' equation used for the fitting of the dose-response curve using the function #' [plot_GrowthCurve] and then for the Bayesian modelling. Here supported methods: `EXP`, `EXP+LIN` and `LIN` #' #' @param fit.force_through_origin [logical] (*with default*): #' force fitting through origin #' #' @param fit.includingRepeatedRegPoints [logical] (*with default*): #' includes the recycling point (assumed to be measured during the last cycle) #' #' @param method_control [list] (*optional*): #' named list of control parameters that can be directly #' passed to the Bayesian analysis, e.g., `method_control = list(n.chains = 4)`. #' See details for further information #' #' @param digits [integer] (*with default*): #' round output to the number of given digits #' #' @param distribution_plot [character] (*with default*): sets the final distribution plot that #' shows equivalent doses obtained using the frequentist approach and sets in the central dose #' as comparison obtained using baSAR. Allowed input is `'abanico'` or `'kde'`. If set to `NULL` nothing is plotted. #' #' @param plot [logical] (*with default*): #' enables or disables plot output #' #' @param plot_reduced [logical] (*with default*): #' enables or disables the advanced plot output #' #' @param plot.single [logical] (*with default*): #' enables or disables single plots or plots arranged by `analyse_baSAR` #' #' @param verbose [logical] (*with default*): #' enables or disables verbose mode #' #' @param ... parameters that can be passed to the function [calc_OSLLxTxRatio] #' (almost full support), [readxl::read_excel] (full support), [read_BIN2R] (`n.records`, #' `position`, `duplicated.rm`), see details. #' #' #' @return Function returns results numerically and graphically: #' #' -----------------------------------\cr #' `[ NUMERICAL OUTPUT ]`\cr #' -----------------------------------\cr #' #' **`RLum.Results`**-object #' #' **slot:** **`@data`** #' #' \tabular{lll}{ #' **Element** \tab **Type** \tab **Description**\cr #' `$summary` \tab `data.frame` \tab statistical summary, including the central dose \cr #' `$mcmc` \tab `mcmc` \tab [coda::mcmc.list] object including raw output \cr #' `$models` \tab `character` \tab implemented models used in the baSAR-model core \cr #' `$input_object` \tab `data.frame` \tab summarising table (same format as the XLS-file) including, e.g., Lx/Tx values\cr #' `$removed_aliquots` \tab `data.frame` \tab table with removed aliquots (e.g., `NaN`, or `Inf` `Lx`/`Tx` values). If nothing was removed `NULL` is returned #' } #' #'**slot:** **`@info`** #' #' The original function call #' #' ------------------------\cr #' `[ PLOT OUTPUT ]`\cr #' ------------------------\cr #' #' - (A) Ln/Tn curves with set integration limits, #' - (B) trace plots are returned by the baSAR-model, showing the convergence of the parameters (trace) #' and the resulting kernel density plots. If `plot_reduced = FALSE` for every(!) dose a trace and #' a density plot is returned (this may take a long time), #' - (C) dose plots showing the dose for every aliquot as boxplots and the marked #' HPD in within. If boxes are coloured 'orange' or 'red' the aliquot itself should be checked, #' - (D) the dose response curve resulting from the monitoring of the Bayesian modelling are #' provided along with the Lx/Tx values and the HPD. Note: The amount for curves displayed #' is limited to 1000 (random choice) for performance reasons, #' - (E) the final plot is the De distribution as calculated using the conventional (frequentist) approach #' and the central dose with the HPDs marked within. This figure is only provided for a comparison, #' no further statistical conclusion should be drawn from it. #' #' #' **Please note: If distribution was set to `log_normal` the central dose is given as geometric mean!** #' #' #' @section Function version: 0.1.33 #' #' @author #' Norbert Mercier, IRAMAT-CRP2A, Université Bordeaux Montaigne (France) \cr #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) \cr #' The underlying Bayesian model based on a contribution by Combès et al., 2015. #' #' @seealso [read_BIN2R], [calc_OSLLxTxRatio], [plot_GrowthCurve], #' [readxl::read_excel], [verify_SingleGrainData], #' [rjags::jags.model], [rjags::coda.samples], [boxplot.default] #' #' #' @references #' #' Combès, B., Philippe, A., Lanos, P., Mercier, N., Tribolo, C., Guerin, G., Guibert, P., Lahaye, C., 2015. #' A Bayesian central equivalent dose model for optically stimulated luminescence dating. #' Quaternary Geochronology 28, 62-70. doi:10.1016/j.quageo.2015.04.001 #' #' Mercier, N., Kreutzer, S., Christophe, C., Guerin, G., Guibert, P., Lahaye, C., Lanos, P., Philippe, A., #' Tribolo, C., 2016. Bayesian statistics in luminescence dating: The 'baSAR'-model and its implementation #' in the R package 'Luminescence'. Ancient TL 34, 14-21. #' #' **Further reading** #' #' Gelman, A., Carlin, J.B., Stern, H.S., Dunson, D.B., Vehtari, A., Rubin, D.B., 2013. #' Bayesian Data Analysis, Third Edition. CRC Press. #' #' Murray, A.S., Wintle, A.G., 2000. Luminescence dating of quartz using an improved single-aliquot #' regenerative-dose protocol. Radiation Measurements 32, 57-73. doi:10.1016/S1350-4487(99)00253-X #' #' Plummer, M., 2017. JAGS Version 4.3.0 user manual. `https://sourceforge.net/projects/mcmc-jags/files/Manuals/4.x/jags_user_manual.pdf/download` #' #' @note #' **If you provide more than one BIN-file**, it is **strongly** recommended to provide #' a `list` with the same number of elements for the following parameters: #' #' `source_doserate`, `signal.integral`, `signal.integral.Tx`, `background.integral`, #' `background.integral.Tx`, `sigmab`, `sig0`. #' #' Example for two BIN-files: `source_doserate = list(c(0.04, 0.006), c(0.05, 0.006))` #' #' **The function is currently limited to work with standard Risoe BIN-files only!** #' #' @keywords datagen #' #' @examples #' #' ##(1) load package test data set #' data(ExampleData.BINfileData, envir = environment()) #' #' ##(2) selecting relevant curves, and limit dataset #' CWOSL.SAR.Data <- subset( #' CWOSL.SAR.Data, #' subset = POSITION%in%c(1:3) & LTYPE == "OSL") #' #' \dontrun{ #' ##(3) run analysis #' ##please not that the here selected parameters are #' ##choosen for performance, not for reliability #' results <- analyse_baSAR( #' object = CWOSL.SAR.Data, #' source_doserate = c(0.04, 0.001), #' signal.integral = c(1:2), #' background.integral = c(80:100), #' fit.method = "LIN", #' plot = FALSE, #' n.MCMC = 200 #' #' ) #' #' print(results) #' #' #' ##XLS_file template #' ##copy and paste this the code below in the terminal #' ##you can further use the function write.csv() to export the example #' #' XLS_file <- #' structure( #' list( #' BIN_FILE = NA_character_, #' DISC = NA_real_, #' GRAIN = NA_real_), #' .Names = c("BIN_FILE", "DISC", "GRAIN"), #' class = "data.frame", #' row.names = 1L #' ) #' #' } #' #' @md #' @export analyse_baSAR <- function( object, XLS_file = NULL, aliquot_range = NULL, source_doserate = NULL, signal.integral, signal.integral.Tx = NULL, background.integral, background.integral.Tx = NULL, irradiation_times = NULL, sigmab = 0, sig0 = 0.025, distribution = "cauchy", baSAR_model = NULL, n.MCMC = 100000, fit.method = "EXP", fit.force_through_origin = TRUE, fit.includingRepeatedRegPoints = TRUE, method_control = list(), digits = 3L, distribution_plot = "kde", plot = TRUE, plot_reduced = TRUE, plot.single = FALSE, verbose = TRUE, ... ){ ##//////////////////////////////////////////////////////////////////////////////////////////////// ##FUNCTION TO BE CALLED to RUN the Bayesian Model ##//////////////////////////////////////////////////////////////////////////////////////////////// ##START .baSAR_function <- function(Nb_aliquots, distribution, data.Dose, data.Lum, data.sLum, fit.method, n.MCMC, fit.force_through_origin, fit.includingRepeatedRegPoints, method_control, baSAR_model, verbose) { ##lower and uppder De, grep from method_control ... for sure we find it here, ##as it was set before the function call lower_centralD <- method_control[["lower_centralD"]] upper_centralD <- method_control[["upper_centralD"]] ##number of MCMC n.chains <- if (is.null(method_control[["n.chains"]])) { 3 } else{ method_control[["n.chains"]] } ##inits inits <- if (is.null(method_control[["inits"]])) { NULL } else{ method_control[["inits"]] } ##thin thin <- if (is.null(method_control[["thin"]])) { if(n.MCMC >= 1e+05){ thin <- n.MCMC/1e+05 * 250 }else{ thin <- 10 } } else{ method_control[["thin"]] } ##variable.names variable.names <- if (is.null(method_control[["variable.names"]])) { c('central_D', 'sigma_D', 'D', 'Q', 'a', 'b', 'c', 'g') } else{ method_control[["variable.names"]] } #check whether this makes sense at all, just a direty and quick test stopifnot(lower_centralD >= 0) Limited_cycles <- vector() if (fit.method == "EXP") {ExpoGC <- 1 ; LinGC <- 0 } if (fit.method == "LIN") {ExpoGC <- 0 ; LinGC <- 1 } if (fit.method == "EXP+LIN") {ExpoGC <- 1 ; LinGC <- 1 } if (fit.force_through_origin == TRUE) {GC_Origin <- 1} else {GC_Origin <- 0} ##Include or exclude repeated dose points if (fit.includingRepeatedRegPoints) { for (i in 1:Nb_aliquots) { Limited_cycles[i] <- length(stats::na.exclude(data.Dose[,i])) } }else{ for (i in 1:Nb_aliquots) { temp.logic <- !duplicated(data.Dose[,i], incomparables=c(0)) # logical excluding 0 m <- length(which(!temp.logic)) data.Dose[,i] <- c(data.Dose[,i][temp.logic], rep(NA, m)) data.Lum[,i] <- c(data.Lum[,i][temp.logic], rep(NA, m)) data.sLum[,i] <- c(data.sLum[,i][temp.logic], rep(NA, m)) rm(m) rm(temp.logic) } for (i in 1:Nb_aliquots) { Limited_cycles[i] <- length(data.Dose[, i]) - length(which(is.na(data.Dose[, i]))) } } ##check and correct for distribution name if(!is.null(baSAR_model)){ if(distribution != "user_defined"){ distribution <- "user_defined" warning("[analyse_baSAR()] 'distribution' set to 'user_defined'.", call. = FALSE) } } # Bayesian Models ---------------------------------------------------------------------------- # INFO: > # > sometimes lines apear to be in a wrong logical order, however, this is allowed in the # > model definition since: # > "The data block is not limited to logical relations, but may also include stochastic relations." # > (Plummer, 2017. JAGS Version 4.3.0 user manual, p. 9) baSAR_model <- list( cauchy = "model { central_D ~ dunif(lower_centralD,upper_centralD) precision_D ~ dt(0, pow(0.16*central_D, -2), 1)T(0, ) sigma_D <- 1/sqrt(precision_D) for (i in 1:Nb_aliquots) { a[i] ~ dnorm(6.5 , 1/(9.2^2) ) T(0, ) b[i] ~ dnorm(50 , 1/(1000^2) ) T(0, ) c[i] ~ dnorm(1.002 , 1/(0.9^2) ) T(0, ) g[i] ~ dnorm(0.5 , 1/(2.5^2) ) I(-a[i], ) sigma_f[i] ~ dexp (20) D[i] ~ dt ( central_D , precision_D, 1) # Cauchy distribution S_y[1,i] <- 1/(sLum[1,i]^2 + sigma_f[i]^2) Lum[1,i] ~ dnorm ( Q[1,i] , S_y[1,i]) Q[1,i] <- GC_Origin * g[i] + LinGC * (c[i] * D[i] ) + ExpoGC * (a[i] * (1 - exp (-D[i] /b[i])) ) for (m in 2:Limited_cycles[i]) { S_y[m,i] <- 1/(sLum[m,i]^2 + sigma_f[i]^2) Lum[m,i] ~ dnorm( Q[m,i] , S_y[m,i] ) Q[m,i] <- GC_Origin * g[i] + LinGC * (c[i] * Dose[m,i]) + ExpoGC * (a[i] * (1 - exp (-Dose[m,i]/b[i])) ) } } }", normal = "model { central_D ~ dunif(lower_centralD,upper_centralD) sigma_D ~ dunif(0.01, 1 * central_D) for (i in 1:Nb_aliquots) { a[i] ~ dnorm(6.5 , 1/(9.2^2) ) T(0, ) b[i] ~ dnorm(50 , 1/(1000^2) ) T(0, ) c[i] ~ dnorm(1.002 , 1/(0.9^2) ) T(0, ) g[i] ~ dnorm(0.5 , 1/(2.5^2) ) I(-a[i], ) sigma_f[i] ~ dexp (20) D[i] ~ dnorm ( central_D , 1/(sigma_D^2) ) # Normal distribution S_y[1,i] <- 1/(sLum[1,i]^2 + sigma_f[i]^2) Lum[1,i] ~ dnorm ( Q[1,i] , S_y[1,i]) Q[1,i] <- GC_Origin * g[i] + LinGC * (c[i] * D[i] ) + ExpoGC * (a[i] * (1 - exp (-D[i] /b[i])) ) for (m in 2:Limited_cycles[i]) { S_y[m,i] <- 1/(sLum[m,i]^2 + sigma_f[i]^2) Lum[m,i] ~ dnorm( Q[m,i] , S_y[m,i] ) Q[m,i] <- GC_Origin * g[i] + LinGC * (c[i] * Dose[m,i]) + ExpoGC * (a[i] * (1 - exp (-Dose[m,i]/b[i])) ) } } }", log_normal = "model { central_D ~ dunif(lower_centralD,upper_centralD) log_central_D <- log(central_D) - 0.5 * l_sigma_D^2 l_sigma_D ~ dunif(0.01, 1 * log(central_D)) sigma_D <- sqrt((exp(l_sigma_D^2) -1) * exp( 2*log_central_D + l_sigma_D^2) ) for (i in 1:Nb_aliquots) { a[i] ~ dnorm(6.5 , 1/(9.2^2) ) T(0, ) b[i] ~ dnorm(50 , 1/(1000^2) ) T(0, ) c[i] ~ dnorm(1.002 , 1/(0.9^2) ) T(0, ) g[i] ~ dnorm(0.5 , 1/(2.5^2) ) I(-a[i], ) sigma_f[i] ~ dexp (20) log_D[i] ~ dnorm ( log_central_D , 1/(l_sigma_D^2) ) # Log-Normal distribution D[i] <- exp(log_D[i]) S_y[1,i] <- 1/(sLum[1,i]^2 + sigma_f[i]^2) Lum[1,i] ~ dnorm ( Q[1,i] , S_y[1,i]) Q[1,i] <- GC_Origin * g[i] + LinGC * (c[i] * D[i] ) + ExpoGC * (a[i] * (1 - exp (-D[i] /b[i])) ) for (m in 2:Limited_cycles[i]) { S_y[m,i] <- 1/(sLum[m,i]^2 + sigma_f[i]^2) Lum[m,i] ~ dnorm( Q[m,i] , S_y[m,i] ) Q[m,i] <- GC_Origin * g[i] + LinGC * (c[i] * Dose[m,i]) + ExpoGC * (a[i] * (1 - exp (-Dose[m,i]/b[i])) ) } } }", user_defined = baSAR_model ) ##check whether the input for distribution was sufficient if(!any(distribution%in%names(baSAR_model))){ stop(paste0("[analyse_baSAR()] No model is pre-defined for the requested distribution. Please select ", paste(rev(names(baSAR_model))[-1], collapse = ", ")), " or define an own model using the argument 'baSAR_model'!", call. = FALSE) }else{ if(is.null(baSAR_model)){ stop("[analyse_baSAR()] You have specified a 'user_defined' distribution, but you have not provided a model via 'baSAR_model'!", call. = FALSE) } } ### Bayesian inputs data_Liste <- list( 'Dose' = data.Dose, 'Lum' = data.Lum, 'sLum' = data.sLum, 'LinGC' = LinGC, 'ExpoGC' = ExpoGC, 'GC_Origin' = GC_Origin, 'Limited_cycles' = Limited_cycles, 'lower_centralD' = lower_centralD, 'upper_centralD' = upper_centralD, 'Nb_aliquots' = Nb_aliquots ) if(verbose){ cat("\n[analyse_baSAR()] ---- baSAR-model ---- \n") cat("\n++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n") cat("[analyse_baSAR()] Bayesian analysis in progress ...\n") message(paste(".. >> bounds set to: lower_centralD =", lower_centralD, "| upper_centralD =", upper_centralD)) } Nb_Iterations <- n.MCMC if (verbose) { message(paste0( ".. >> calculation will be done assuming a '", distribution, "' distribution\n" )) } ##set model jagsfit <- rjags::jags.model( file = textConnection(baSAR_model[[distribution]]), data = data_Liste, inits = inits, n.chains = n.chains, n.adapt = Nb_Iterations, quiet = if(verbose){FALSE}else{TRUE} ) ##update jags model (it is a S3-method) update( object = jagsfit, n.iter = Nb_Iterations, progress.bar = if(verbose){"text"}else{NULL} ) ##get data ... full and reduced, the reduced one to limit the plot output sampling <- rjags::coda.samples( model = jagsfit, variable.names = variable.names, n.iter = Nb_Iterations, thin = thin ) ##this we need for the output of the terminal ##Why sampling reduced? Because the summary() method produces a considerable overhead while ##running over all the variables sampling_reduced <- rjags::coda.samples( model = jagsfit, variable.names = c('central_D', 'sigma_D'), n.iter = Nb_Iterations, thin = thin ) pt_zero <- 0 nb_decal <- 2 pt_zero <- Nb_aliquots ##standard error and mean output.mean <- round(summary(sampling_reduced)[[1]][c("central_D", "sigma_D"), 1:2], digits) ##calculate geometric mean for the case that the distribution is log-normal if(distribution == "log_normal"){ temp.vector <- unlist(lapply(sampling_reduced, function(x){as.vector(x[,1])})) gm <- round(exp(sum(log(temp.vector))/length(temp.vector)),digits) rm(temp.vector) }else{ gm <- NULL } ##quantiles ##68% + 95% output.quantiles <- round(summary(sampling_reduced, quantiles = c(0.025, 0.16, 0.84, 0.975))[[2]][c("central_D", "sigma_D"), 1:4], digits) #### output data.frame with results baSAR.output <- data.frame( DISTRIBUTION = distribution, NB_ALIQUOTS = Nb_aliquots, N.CHAINS = n.chains, N.MCMC = n.MCMC, FIT_METHOD = fit.method, CENTRAL = if(is.null(gm)){output.mean[1,1]}else{gm}, CENTRAL.SD = output.mean[1,2], SIGMA = output.mean[2,1], SIGMA.SD = output.mean[2,2], CENTRAL_Q_.16 = output.quantiles[1,2], CENTRAL_Q_.84 = output.quantiles[1,3], SIGMA_Q_.16 = output.quantiles[2,2], SIGMA_Q_.84 = output.quantiles[2,3], CENTRAL_Q_.025 = output.quantiles[1,1], CENTRAL_Q_.975 = output.quantiles[1,4], SIGMA_Q_.025 = output.quantiles[2,1], SIGMA_Q_.975 = output.quantiles[2,4] ) return( baSAR.output = list( baSAR.output_summary = baSAR.output, baSAR.output_mcmc = sampling, models = list( cauchy = baSAR_model[["cauchy"]], normal = baSAR_model[["normal"]], log_normal = baSAR_model[["log_normal"]], user_defined = baSAR_model[["user_defined"]] ) ) ) } ##END ##//////////////////////////////////////////////////////////////////////////////////////////////// # Integrity tests ----------------------------------------------------------------------------- ##check whether rjags is available ##code snippet taken from ##http://r-pkgs.had.co.nz/description.html if (!requireNamespace("rjags", quietly = TRUE)) { stop("[analyse_baSAR()] To use this function you have to first install the package 'rjags'.", call. = FALSE) } if (!requireNamespace("coda", quietly = TRUE)) { stop("[analyse_baSAR()] To use this function you have to first install the package 'coda'.", call. = FALSE) } #capture additional piped arguments additional_arguments <- list( ##verify_SingleGrainData threshold = 30, ##calc_OSLLxTxRatio() background.count.distribution = "non-poisson", ##readxl::read_excel() sheet = 1, col_names = TRUE, col_types = NULL, skip = 0, ##read_BIN2R() n.records = NULL, duplicated.rm = TRUE, position = NULL, pattern = NULL, ##plot_GrowthCurve() fit.weights = TRUE, fit.bounds = TRUE, NumberIterations.MC = 100, output.plot = if(plot){TRUE}else{FALSE}, output.plotExtended = if(plot){TRUE}else{FALSE} ) #modify this list on purpose additional_arguments <- modifyList(x = additional_arguments, val = list(...)) ##set function arguments function_arguments <- NULL ##SET fit.method if (fit.method != "EXP" & fit.method != "EXP+LIN" & fit.method != "LIN"){ stop("[analyse_baSAR()] Unsupported fitting method. Supported: 'EXP', 'EXP+LIN' and 'LIN'", call. = FALSE) } # Set input ----------------------------------------------------------------------------------- ##if the input is alreayd of type RLum.Results, use the input and do not run ##all pre-calculations again if(is(object, "RLum.Results")){ if(object@originator == "analyse_baSAR"){ ##We want to use previous function arguments and recycle them ##(1) get information you need as input from the RLum.Results object function_arguments <- as.list(object@info$call) ##(2) overwrite by current provided arguments ##by using a new argument we have the choise which argument is allowed for ##changes function_arguments.new <- modifyList(x = function_arguments, val = as.list(match.call())) ##get maximum cycles max_cycles <- max(object$input_object[["CYCLES_NB"]]) ##set Nb_aliquots Nb_aliquots <- nrow(object$input_object) ##return NULL if not a minium of three aliquots are used for the calculation if(Nb_aliquots < 2){ try(stop("[analyse_baSAR()] number of aliquots < 3, this makes no sense, NULL returned!", call. = FALSE)) return(NULL) } ##set variables ##Why is.null() ... it prevents that in case of a function crash is nothing is provided ... ##set changeable function arguments ##distribution if(!is.null(function_arguments.new$distribution)){ distribution <- function_arguments.new$distribution } ##n.MCMC if(!is.null(function_arguments.new$n.MCMC)){ n.MCMC <- function_arguments.new$n.MCMC } ##fit.method if(!is.null(function_arguments.new$fit.method)){ fit.method <- function_arguments.new$fit.method } ## fit.force_through_origin if(!is.null(function_arguments.new$fit.force_through_origin)){ fit.force_through_origin <- function_arguments.new$fit.force_through_origin } ##fit.includingRepeatedRegPoints if(!is.null(function_arguments.new$fit.includingRepeatedRegPoints)){ fit.includingRepeatedRegPoints <- function_arguments.new$fit.includingRepeatedRegPoints } ##source_doserate if(length(as.list(match.call())$source_doserate) > 0){ warning("[analyse_baSAR()] Argument 'source_doserate' is ignored in this modus, as it was alreay set.", call. = FALSE) } ##aliquot_range if(!is.null(function_arguments.new$aliquot_range)){ aliquot_range <- eval(function_arguments.new$aliquot_range) } ##method_control if(!is.null(function_arguments.new$method_control)){ method_control <- eval(function_arguments.new$method_control) } ##baSAR_model if(!is.null(function_arguments.new$baSAR_model)){ baSAR_model <- eval(function_arguments.new$baSAR_model) } ##plot if(!is.null(function_arguments.new$plot)){ plot <- function_arguments.new$plot } ##verbose if(!is.null(function_arguments.new$verbose)){ verbose <- function_arguments.new$verbose } ##limit according to aliquot_range ##TODO Take care of the case that this was provided, otherwise more and more is removed! if (!is.null(aliquot_range)) { if (max(aliquot_range) <= nrow(object$input_object)) { input_object <- object$input_object[aliquot_range, ] ##update list of removed aliquots removed_aliquots <-rbind(object$removed_aliquots, object$input_object[-aliquot_range,]) ##correct Nb_aliquots Nb_aliquots <- nrow(input_object) } else{ try(stop("[analyse_basAR()] aliquot_range out of bounds! Input ignored!", call. = FALSE)) ##reset aliquot range aliquot_range <- NULL ##take entire object input_object <- object$input_object ##set removed aliquots removed_aliquots <- object$removed_aliquots } } else{ ##set the normal case input_object <- object$input_object ##set removed aliquots removed_aliquots <- object$removed_aliquots } ##set non function arguments Doses <- t(input_object[,9:(8 + max_cycles)]) LxTx <- t(input_object[,(9 + max_cycles):(8 + 2 * max_cycles)]) LxTx.error <- t(input_object[,(9 + 2 * max_cycles):(8 + 3 * max_cycles)]) rm(max_cycles) }else{ stop("[analyse_baSAR()] 'object' is of type 'RLum.Results', but has not been produced by analyse_baSAR()!", call. = FALSE) } }else{ if(verbose){ cat("\n[analyse_baSAR()] ---- PRE-PROCESSING ----\n") } ##Supported input types are: ## (1) BIN-file ## .. list ## .. character ## (2) RisoeBINfileData object ## .. list ## .. S4 ## (3) RLum.Analyis objects ## .. list ## .. S4 ##In case an RLum.Analysis object is provided we try an ugly conversion only if(inherits(object, "list") && all(vapply(object, function(x){inherits(x, "RLum.Analysis")}, logical(1)))){ if(verbose) cat("[analyse_baSAR()] List of RLum.Analysis-objects detected .. ") ##stop for only one element if(length(object) < 2) stop("[analyse_baSAR()] At least two aliquots are needed for the calculation!", call. = FALSE) ##set number of objects if(inherits(object, "list")){ n_objects <- length(object) }else{ n_objects <- 1 } ##extract wanted curves if(verbose) cat("\n\t\t .. extract 'OSL (UVVIS)' and 'irradiation (NA)'") object <- get_RLum(object, recordType = c("OSL (UVVIS)", "irradiation (NA)"), drop = FALSE) ##extract irradiation times if(is.null(irradiation_times)){ if(verbose) cat("\n\t\t .. extract irradiation times") irradiation_times <- extract_IrradiationTimes(object[[1]])$irr.times$IRR_TIME } ##run conversion if(verbose) cat("\n\t\t .. run conversion") object <- try(convert_RLum2Risoe.BINfileData(object), silent = TRUE) ##create fallback if(inherits(object, "try-error")){ stop("[analyse_baSAR()] Object conversion failed. Return NULL!", call. = FALSE) return(NULL) } ##assign irradiation times if(is.null(irradiation_times)){ if(verbose) cat("\n\t\t .. set irradiation times") object@METADATA[["IRR_TIME"]] <- rep(irradiation_times,n_objects) } ##remove none-OSL curves if(verbose && !all("OSL" %in% object@METADATA[["LTYPE"]])){ cat("\n\t\t .. remove non-OSL curves") rm_id <- which(object@METADATA[["LTYPE"]] != "OSL") object@METADATA <- object@METADATA[-rm_id,] object@DATA[rm_id] <- NULL ##reset index object@METADATA[["ID"]] <- 1:length(object@METADATA[["ID"]]) ##delete objects rm(rm_id) } } if (is(object, "Risoe.BINfileData")) { fileBIN.list <- list(object) } else if (is(object, "list")) { ##check what the list containes ... object_type <- unique(unlist(lapply( 1:length(object), FUN = function(x) { is(object[[x]])[1] } ))) if (length(object_type) == 1) { if (object_type == "Risoe.BINfileData") { fileBIN.list <- object } else if (object_type == "character") { fileBIN.list <- read_BIN2R( file = object, position = additional_arguments$position, duplicated.rm = additional_arguments$duplicated.rm, n.records = additional_arguments$n.records, pattern = additional_arguments$pattern, verbose = verbose ) } else{ stop( "[analyse_baSAR()] data type in the input list provided for 'object' is not supported!", call. = FALSE ) } } else{ stop("[analyse_baSAR()] 'object' only accepts a list with objects of similar type!", call. = FALSE) } } else if (is(object, "character")) { fileBIN.list <- list( read_BIN2R( file = object, position = additional_arguments$position, duplicated.rm = additional_arguments$duplicated.rm, n.records = additional_arguments$n.records, verbose = verbose ) ) } else{ stop( paste0( "[analyse_baSAR()] '", is(object)[1], "' as input is not supported. Check manual for allowed input objects." ), call. = FALSE ) } ##Problem ... the user might have made a pre-selection in the Analyst software, if this the ##we respect this selection if(!all(unlist(lapply(fileBIN.list, FUN = function(x){(x@METADATA[["SEL"]])})))){ fileBIN.list <- lapply(fileBIN.list, function(x){ ##reduce data x@DATA <- x@DATA[x@METADATA[["SEL"]]] x@METADATA <- x@METADATA[x@METADATA[["SEL"]], ] ##reset index x@METADATA[["ID"]] <- 1:nrow(x@METADATA) return(x) }) if(verbose){ cat("\n[analyse_baSAR()] Record pre-selection in BIN-file detected >> record reduced to selection") } } # Declare variables --------------------------------------------------------------------------- Dose <- list() LxTx <- list() sLxTx <- list() Disc <- list() Grain <- list() Disc_Grain.list <- list() Nb_aliquots <- 0 previous.Nb_aliquots <- 0 object.file_name <- list() Mono_grain <- TRUE Limited_cycles <- vector() ##set information for (i in 1 : length(fileBIN.list)) { Disc[[i]] <- list() Grain[[i]] <- list() ##get BIN-file name object.file_name[[i]] <- unique(fileBIN.list[[i]]@METADATA[["FNAME"]]) } ##check for duplicated entries; remove them as they would cause a function crash if(any(duplicated(unlist(object.file_name)))){ ##provide messages if(verbose){ message(paste0( "[analyse_baSAR()] '", paste( object.file_name[which(duplicated(unlist(object.file_name)))], collapse = ", ", "' is a duplicate and therefore removed from the input!" ) )) } warning(paste0( "[analyse_baSAR()] '", paste( object.file_name[which(duplicated(unlist(object.file_name)))], collapse = ", ", "' is a duplicate and therefore removed from the input!" ) )) ##remove entry Disc[which(duplicated(unlist(object.file_name)))] <- NULL Grain[which(duplicated(unlist(object.file_name)))] <- NULL fileBIN.list[which(duplicated(unlist(object.file_name)))] <- NULL object.file_name[which(duplicated(unlist(object.file_name)))] <- NULL } # Expand parameter list ----------------------------------------------------------------------- ##test_parameter = source_doserate if(!is.null(source_doserate)){ if(is(source_doserate, "list")){ source_doserate <- rep(source_doserate, length = length(fileBIN.list)) }else{ source_doserate <- rep(list(source_doserate), length = length(fileBIN.list)) } }else{ stop("[analyse_baSAR()] 'source_doserate' is missing, but required as the current implementation expects dose values in Gy!", call. = FALSE) } ##sigmab if(is(sigmab, "list")){ sigmab <- rep(sigmab, length = length(fileBIN.list)) }else{ sigmab <- rep(list(sigmab), length = length(fileBIN.list)) } ##sig0 if(is(sig0, "list")){ sig0 <- rep(sig0, length = length(fileBIN.list)) }else{ sig0 <- rep(list(sig0), length = length(fileBIN.list)) } ##test_parameter = signal.integral if(is(signal.integral, "list")){ signal.integral <- rep(signal.integral, length = length(fileBIN.list)) }else{ signal.integral <- rep(list(signal.integral), length = length(fileBIN.list)) } ##test_parameter = signal.integral.Tx if (!is.null(signal.integral.Tx)) { if (is(signal.integral.Tx, "list")) { signal.integral.Tx <- rep(signal.integral.Tx, length = length(fileBIN.list)) } else{ signal.integral.Tx <- rep(list(signal.integral.Tx), length = length(fileBIN.list)) } } ##test_parameter = background.integral if(is(background.integral, "list")){ background.integral <- rep(background.integral, length = length(fileBIN.list)) }else{ background.integral <- rep(list(background.integral), length = length(fileBIN.list)) } ##test_parameter = background.integral if(is(background.integral, "list")){ background.integral <- rep(background.integral, length = length(fileBIN.list)) }else{ background.integral <- rep(list(background.integral), length = length(fileBIN.list)) } ##test_parameter = background.integral.Tx if (!is.null(background.integral.Tx)) { if (is(background.integral.Tx, "list")) { background.integral.Tx <- rep(background.integral.Tx, length = length(fileBIN.list)) } else{ background.integral.Tx <- rep(list(background.integral.Tx), length = length(fileBIN.list)) } } # Read EXCEL sheet ---------------------------------------------------------------------------- if(is.null(XLS_file)){ ##select aliquots giving light only, this function accepts also a list as input if(verbose){ cat("\n[analyse_baSAR()] No XLS-file provided, running automatic grain selection ...") } for (k in 1:length(fileBIN.list)) { ##if the uses provides only multiple grain data (GRAIN == 0), the verification ##here makes not really sense and should be skipped if(length(unique(fileBIN.list[[k]]@METADATA[["GRAIN"]])) > 1){ aliquot_selection <- verify_SingleGrainData( object = fileBIN.list[[k]], cleanup_level = "aliquot", threshold = additional_arguments$threshold, cleanup = FALSE ) ##remove grain position 0 (this are usually TL measurements on the cup or we are talking about multipe aliquot) if (sum(aliquot_selection$unique_pairs[["GRAIN"]] == 0, na.rm = TRUE) > 0) { warning( paste( "[analyse_baSAR()] Automatic grain selection:", sum(aliquot_selection$unique_pairs[["GRAIN"]] == 0, na.rm = TRUE), "curve(s) with grain index 0 had been removed from the dataset." ), call. = FALSE ) } datalu <- aliquot_selection$unique_pairs[!aliquot_selection$unique_pairs[["GRAIN"]] == 0,] if(nrow(datalu) == 0){ try(stop("[analyse_baSAR()] Sorry, nothing was left after the automatic grain selection! NULL returned!", call. = FALSE)) return(NULL) } }else{ warning("[analyse_baSAR()] Only multiple grain data provided, automatic selection skipped!", call. = FALSE) datalu <- unique(fileBIN.list[[k]]@METADATA[, c("POSITION", "GRAIN")]) ##set mono grain to FALSE Mono_grain <- FALSE aliquot_selection <- NA } ##get number of aliquots (one aliquot has a position and a grain number) Nb_aliquots <- nrow(datalu) ##write information in variables Disc[[k]] <- datalu[["POSITION"]] Grain[[k]] <- datalu[["GRAIN"]] ##free memory rm(datalu, aliquot_selection) } rm(k) } else if (is(XLS_file, "data.frame") || is(XLS_file, "character")) { ##load file if we have an XLS file if (is(XLS_file, "character")) { ##test for valid file if(!file.exists(XLS_file)){ stop("[analyse_baSAR()] XLS_file does not exist!", call. = FALSE) } ##import Excel sheet datalu <- as.data.frame(readxl::read_excel( path = XLS_file, sheet = additional_arguments$sheet, col_names = additional_arguments$col_names, col_types = additional_arguments$col_types, skip = additional_arguments$skip ), stringsAsFactors = FALSE) ###check whether data format is somehow odd, check only the first three columns if(!all(grepl(colnames(datalu), pattern = " ")[1:3])){ stop("[analyse_baSAR()] One of the first three columns in your XLS_file has no column header. Your XLS_file requires at least three columns for 'BIN_file', 'DISC' and 'GRAIN'", call. = FALSE) } ##get rid of empty rows if the BIN_FILE name column is empty datalu <- datalu[!is.na(datalu[[1]]), ] } else{ datalu <- XLS_file ##check number of number of columns in data.frame if(ncol(datalu) < 3){ stop("[analyse_baSAR()] The data.frame provided via XLS_file should consist of at least three columns (see manual)!", call. = FALSE) } ##problem: the first column should be of type character, the others are ##of type numeric, unfortunately it is too risky to rely on the user, we do the ##proper conversion by ourself ... datalu[[1]] <- as.character(datalu[[1]]) datalu[[2]] <- as.numeric(datalu[[2]]) datalu[[3]] <- as.numeric(datalu[[3]]) } ##limit aliquot range if (!is.null(aliquot_range)) { datalu <- datalu[aliquot_range,] } Nb_ali <- 0 k <- NULL for (nn in 1:length((datalu[, 1]))) { if (!is.na(datalu[nn, 1])) { ##check whether one file fits if (any(grepl( pattern = strsplit( x = basename(datalu[nn, 1]), split = ".", fixed = TRUE )[[1]][1], x = unlist(object.file_name) ))) { k <- grep(pattern = strsplit( x = basename(datalu[nn, 1]), split = ".", fixed = TRUE )[[1]][1], x = unlist(object.file_name)) nj <- length(Disc[[k]]) + 1 Disc[[k]][nj] <- as.numeric(datalu[nn, 2]) Grain[[k]][nj] <- as.numeric(datalu[nn, 3]) Nb_ali <- Nb_ali + 1 if (is.na(Grain[[k]][nj]) || Grain[[k]][nj] == 0) { Mono_grain <- FALSE } }else{ warning( paste0("[analyse_baSAR] '", (datalu[nn, 1]), "' not recognized or not loaded; skipped!"), call. = FALSE ) } } else{ if (Nb_ali == 0) { stop("[analyse_baSAR()] Nb. discs/grains = 0 !", call. = FALSE) } break() } } ##if k is NULL it means it was not set so far, so there was ##no corresponding BIN-file found if(is.null(k)){ stop("[analyse_baSAR()] BIN-file names in XLS-file do not fit to the loaded BIN-files!", call. = FALSE) } } else{ stop("[analyse_baSAR()] input type for 'XLS_file' not supported!", call. = FALSE) } ###################################### loops on files_number for (k in 1:length(fileBIN.list)) { Disc_Grain.list[[k]] <- list() # data.file number n_aliquots_k <- length((Disc[[k]])) if(n_aliquots_k == 0){ fileBIN.list[[k]] <- NULL if(verbose){ message(paste("[analyse_baSAR()] No data has been seletecd from BIN-file", k, ">> BIN-file removed from input!")) } warning(paste("[analyse_baSAR()] No data has been seletecd from BIN-file", k, ">> BIN-file removed from input!"), call. = FALSE) next() } for (d in 1:n_aliquots_k) { dd <- as.integer(unlist(Disc[[k]][d])) Disc_Grain.list[[k]][[dd]] <- list() # data.file number , disc_number } for (d in 1:n_aliquots_k) { dd <- as.integer(unlist(Disc[[k]][d])) if (Mono_grain == FALSE) { gg <- 1 } if (Mono_grain == TRUE) { gg <- as.integer(unlist(Grain[[k]][d]))} Disc_Grain.list[[k]][[dd]][[gg]] <- list() # data.file number , disc_number, grain_number for (z in 1:6) { Disc_Grain.list[[k]][[dd]][[gg]][[z]] <- list() # 1 = index numbers, 2 = irradiation doses, 3 = LxTx , 4 = sLxTx, 5 = N d'aliquot, 6 = De +- D0 +- (4 values) } } } if(verbose){ cat("\n[analyse_baSAR()] Preliminary analysis in progress ... ") cat("\n[analyse_baSAR()] Hang on, this may take a while ... \n") } for (k in 1:length(fileBIN.list)) { n_index.vector <- vector("numeric") measured_discs.vector <- vector("numeric") measured_grains.vector <- vector("numeric") measured_grains.vector_list <- vector("numeric") irrad_time.vector <- vector("numeric") disc_pos <- vector("numeric") grain_pos <- vector("numeric") ### METADATA length_BIN <- length(fileBIN.list[[k]]) n_index.vector <- fileBIN.list[[k]]@METADATA[["ID"]][1:length_BIN] # curves indexes vector measured_discs.vector <- fileBIN.list[[k]]@METADATA[["POSITION"]][1:length_BIN] # measured discs vector measured_grains.vector <- fileBIN.list[[k]]@METADATA[["GRAIN"]][1:length_BIN] # measured grains vector if(is.null(irradiation_times)){ irrad_time.vector <- fileBIN.list[[k]]@METADATA[["IRR_TIME"]][1:length_BIN] # irradiation durations vector }else{ irrad_time.vector <- rep(irradiation_times,n_objects) } ##if all irradiation times are 0 we should stop here if (length(unique(irrad_time.vector)) == 1) { try(stop( "[analyse_baSAR()] It appears the the irradiation times are all the same. Analysis stopped and NULL returned!", call. = FALSE )) return(NULL) } disc_pos <- as.integer(unlist(Disc[[k]])) grain_pos <- as.integer(unlist(Grain[[k]])) ### Automatic Filling - Disc_Grain.list for (i in 1: length(Disc[[k]])) { disc_selected <- as.integer(Disc[[k]][i]) if (Mono_grain == TRUE) {grain_selected <- as.integer(Grain[[k]][i])} else { grain_selected <-0} ##hard break if the disc number or grain number does not fit ##disc (position) disc_logic <- (disc_selected == measured_discs.vector) if (!any(disc_logic)) { try(stop( paste0( "[analyse_baSAR()] In BIN-file '", unique(fileBIN.list[[k]]@METADATA[["FNAME"]]), "' position number ", disc_selected, " does not exist! NULL returned!" ), call. = FALSE )) return(NULL) } ##grain grain_logic <- (grain_selected == measured_grains.vector) if (!any(grain_logic)) { try(stop( paste0( "[analyse_baSAR()] In BIN-file '", unique(fileBIN.list[[k]]@METADATA[["FNAME"]]), "' grain number ", grain_selected, " does not exist! NULL returned!" ), call. = FALSE )) return(NULL) } ##if the test passed, compile index list index_liste <- n_index.vector[disc_logic & grain_logic] if (Mono_grain == FALSE) {grain_selected <-1} for (kn in 1: length(index_liste)) { t <- index_liste[kn] ##check if the source_doserate is NULL or not if(!is.null(unlist(source_doserate))){ dose.value <- irrad_time.vector[t] * unlist(source_doserate[[k]][1]) }else{ dose.value <- irrad_time.vector[t] } s <- 1 + length( Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[1]] ) Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[1]][s] <- n_index.vector[t] # indexes if ( s%%2 == 1) { Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[2]][as.integer(1+s/2)] <- dose.value } # irradiation doses } } } ###################### Data associated with a single Disc/Grain max_cycles <- 0 count <- 1 calc_OSLLxTxRatio_warning <- list() for (k in 1:length(fileBIN.list)) { if (Mono_grain == TRUE) (max.grains <- 100) else (max.grains <- 1) ##plot Ln and Tn curves if wanted ##we want to plot the Ln and Tn curves to get a better feeling ##The approach here is rather rough coded, but it works if (plot) { curve_index <- vapply((1:length(Disc[[k]])), function(i) { disc_selected <- as.integer(Disc[[k]][i]) if (Mono_grain == TRUE) { grain_selected <- as.integer(Grain[[k]][i]) } else { grain_selected <- 1 } Ln_index <- as.numeric(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[1]][1]) Tn_index <- as.numeric(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[1]][2]) return(c(Ln_index, Tn_index)) }, FUN.VALUE = vector(mode = "numeric", length = 2)) ##set matrix for Ln values Ln_matrix <- cbind(1:length(fileBIN.list[[k]]@DATA[[curve_index[1, 1]]]), matrix(unlist(fileBIN.list[[k]]@DATA[curve_index[1, ]]), ncol = ncol(curve_index))) Tn_matrix <- cbind(1:length(fileBIN.list[[k]]@DATA[[curve_index[2, 1]]]), matrix(unlist(fileBIN.list[[k]]@DATA[curve_index[2, ]]), ncol = ncol(curve_index))) ##open plot are if(!plot.single){ par.default <- par()$mfrow par(mfrow = c(1, 2)) } ##get natural curve and combine them in matrix graphics::matplot( x = Ln_matrix[, 1], y = Ln_matrix[, -1], col = rgb(0, 0, 0, 0.3), ylab = "Luminescence [a.u.]", xlab = "Channel", main = expression(paste(L[n], " - curves")), type = "l" ) ##add integration limits abline(v = range(signal.integral[[k]]), lty = 2, col = "green") abline(v = range(background.integral[[k]]), lty = 2, col = "red") mtext(paste0("ALQ: ",count, ":", count + ncol(curve_index))) graphics::matplot( x = Tn_matrix[, 1], y = Tn_matrix[, -1], col = rgb(0, 0, 0, 0.3), ylab = "Luminescence [a.u.]", xlab = "Channel", main = expression(paste(T[n], " - curves")), type = "l" ) ##add integration limits depending on the choosen value if(is.null(signal.integral.Tx[[k]])){ abline(v = range(signal.integral[[k]]), lty = 2, col = "green") }else{ abline(v = range(signal.integral.Tx[[k]]), lty = 2, col = "green") } if(is.null(background.integral.Tx[[k]])){ abline(v = range(background.integral[[k]]), lty = 2, col = "red") }else{ abline(v = range(background.integral.Tx[[k]]), lty = 2, col = "red") } mtext(paste0("ALQ: ",count, ":", count + ncol(curve_index))) ##reset par if(!plot.single){ par(mfrow = par.default) } ##remove some variables rm(curve_index, Ln_matrix, Tn_matrix) } for (i in 1:length(Disc[[k]])) { disc_selected <- as.integer(Disc[[k]][i]) if (Mono_grain == TRUE) { grain_selected <- as.integer(Grain[[k]][i]) } else { grain_selected <- 1 } # Data for the selected Disc-Grain for (nb_index in 1:((length(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[1]]))/2 )) { index1 <- as.numeric(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[1]][2*nb_index-1]) index2 <- as.numeric(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[1]][2*nb_index]) Lx.data <- data.frame(seq(1:length( fileBIN.list[[k]]@DATA[[index1]])), fileBIN.list[[k]]@DATA[[index1]]) Tx.data <- data.frame(seq(1:length( fileBIN.list[[k]]@DATA[[index2]])), fileBIN.list[[k]]@DATA[[index2]]) ## call calc_OSLLxTxRatio() ## we run this function with a warnings catcher to reduce the load of warnings for the user temp_LxTx <- withCallingHandlers( calc_OSLLxTxRatio( Lx.data = Lx.data, Tx.data = Tx.data, signal.integral = signal.integral[[k]], signal.integral.Tx = signal.integral.Tx[[k]], background.integral = background.integral[[k]], background.integral.Tx = background.integral.Tx[[k]], background.count.distribution = additional_arguments$background.count.distribution, sigmab = sigmab[[k]], sig0 = sig0[[k]] ), warning = function(c) { calc_OSLLxTxRatio_warning[[i]] <<- c invokeRestart("muffleWarning") } ) ##get LxTx table LxTx.table <- temp_LxTx$LxTx.table Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[3]][nb_index] <- LxTx.table[[9]] Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[4]][nb_index] <- LxTx.table[[10]] Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[5]][nb_index] <- LxTx.table[[7]] ##free memory rm(LxTx.table) rm(temp_LxTx) } # Fitting Growth curve and Plot sample_dose <- unlist(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[2]]) sample_LxTx <- unlist(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[3]]) sample_sLxTx <- unlist(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[4]]) TnTx <- unlist(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[5]]) ##create needed data.frame (this way to make sure that rows are doubled if something is missing) selected_sample <- as.data.frame(cbind(sample_dose, sample_LxTx, sample_sLxTx, TnTx)) print(additional_arguments) ##call plot_GrowthCurve() to get De and De value fitcurve <- suppressWarnings(plot_GrowthCurve( sample = selected_sample, na.rm = TRUE, fit.method = fit.method, fit.force_through_origin = fit.force_through_origin, fit.weights = additional_arguments$fit.weights, fit.includingRepeatedRegPoints = fit.includingRepeatedRegPoints, fit.bounds = additional_arguments$fit.bounds, NumberIterations.MC = additional_arguments$NumberIterations.MC, output.plot = additional_arguments$output.plot, output.plotExtended = additional_arguments$output.plotExtended, txtProgressBar = FALSE, verbose = verbose, main = paste0("ALQ: ", count," | POS: ", Disc[[k]][i], " | GRAIN: ", Grain[[k]][i]) )) ##get data.frame with De values if(!is.null(fitcurve)){ fitcurve_De <- get_RLum(fitcurve, data.object = "De") Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[6]][1] <- fitcurve_De[["De"]] Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[6]][2] <- fitcurve_De[["De.Error"]] Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[6]][3] <- fitcurve_De[["D01"]] Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[6]][4] <- fitcurve_De[["D01.ERROR"]] }else{ ##we have to do this, otherwise the grains will be sorted out Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[6]][1:4] <- NA } Limited_cycles[previous.Nb_aliquots + i] <- length(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[2]]) if (length(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[2]]) > max_cycles) { max_cycles <- length(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[2]]) } previous.Nb_aliquots <- length(stats::na.exclude(Limited_cycles)) # Total count of aliquots count <- count + 1 } } ## END of loop on BIN files rm(count) ##evaluate warnings from calc_OSLLxTxRatio() if(length(calc_OSLLxTxRatio_warning)>0){ w_table <- table(unlist(calc_OSLLxTxRatio_warning)) w_table_names <- names(w_table) for(w in 1:length(w_table)){ warning(paste(w_table_names[w], "This warning occurred", w_table[w], "times!"), call. = FALSE) } rm(w_table) rm(w_table_names) } rm(calc_OSLLxTxRatio_warning) Nb_aliquots <- previous.Nb_aliquots ##create results matrix OUTPUT_results <- matrix(nrow = Nb_aliquots, ncol = (8 + 3 * max_cycles), byrow = TRUE) ## set column name (this makes it much easier to debug) colnames(OUTPUT_results) <- c( "INDEX_BINfile", "DISC", "GRAIN", "DE", "DE.SD", "D0", "D0.SD", "CYCLES_NB", paste0("DOSE_", 1:max_cycles), paste0("LxTx_", 1:max_cycles), paste0("LxTx_", 1:max_cycles, ".SD") ) comptage <- 0 for (k in 1:length(fileBIN.list)) { for (i in 1:length(Disc[[k]])) { disc_selected <- as.numeric(Disc[[k]][i]) if (Mono_grain == TRUE) { grain_selected <- as.numeric(Grain[[k]][i]) } else { grain_selected <- 1 } comptage <- comptage + 1 OUTPUT_results[comptage, 1] <- k OUTPUT_results[comptage, 2] <- as.numeric(disc_selected) if (Mono_grain == TRUE) { OUTPUT_results[comptage, 3] <- grain_selected } else { OUTPUT_results[comptage, 3] <- 0 } if (length(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[6]]) != 0) { ##DE OUTPUT_results[comptage, 4] <- as.numeric(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[6]][1]) ##DE.SD OUTPUT_results[comptage, 5] <- as.numeric(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[6]][2]) ##D0 OUTPUT_results[comptage, 6] <- as.numeric(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[6]][3]) ##D0.SD OUTPUT_results[comptage, 7] <- as.numeric(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[6]][4]) ##CYCLES_NB OUTPUT_results[comptage, 8] <- length(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[2]]) ##auxillary variable llong <- length(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[2]]) ##Dose OUTPUT_results[comptage, 9:(8 + llong)] <- as.numeric(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[2]]) ##LxTx values OUTPUT_results[comptage, (9 + max_cycles):(8 + max_cycles + llong)] <- as.numeric(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[3]]) ##LxTx SD values OUTPUT_results[comptage, (9 + 2*max_cycles):(8 + 2*max_cycles + llong)] <- as.numeric(Disc_Grain.list[[k]][[disc_selected]][[grain_selected]][[4]]) } } } ##Clean matrix and remove all unwanted entries ##remove all NA columns, means all NA columns in POSITION and DISC ##this NA values are no calculation artefacts, but coming from the data processing and have ##no further value OUTPUT_results <- OUTPUT_results[!is.na(OUTPUT_results[,2]),] ##clean up NaN values in the LxTx and corresponding error values ##the transposition of the matrix may increase the performance for very large matricies OUTPUT_results_reduced <- t(OUTPUT_results) selection <- vapply(X = 1:ncol(OUTPUT_results_reduced), FUN = function(x){ !any(is.nan(OUTPUT_results_reduced[9:(8+3*max_cycles), x]) | is.infinite(OUTPUT_results_reduced[9:(8+3*max_cycles), x])) }, FUN.VALUE = vector(mode = "logical", length = 1)) removed_aliquots <- t(OUTPUT_results_reduced[,!selection]) OUTPUT_results_reduced <- t(OUTPUT_results_reduced[,selection]) ##finally, check for difference in the number of dose points ... they should be the same if(length(unique(OUTPUT_results_reduced[,"CYCLES_NB"])) > 1){ warning("[analyse_baSAR()] The number of dose points differs across your data set. Check your data!", call. = FALSE) } ##correct number of aliquots if necessary if(Nb_aliquots > nrow(OUTPUT_results_reduced)) { Nb_aliquots <- nrow(OUTPUT_results_reduced) warning( paste0( "[analyse_baSAR()] 'Nb_aliquots' corrected due to NaN or Inf values in Lx and/or Tx to ", Nb_aliquots, ". You might want to check 'removed_aliquots' in the function output."), call. = FALSE) } ##Prepare for Bayesian analysis Doses <- t(OUTPUT_results_reduced[,9:(8 + max_cycles)]) LxTx <- t(OUTPUT_results_reduced[, (9 + max_cycles):(8 + 2 * max_cycles)]) LxTx.error <- t(OUTPUT_results_reduced[, (9 + 2 * max_cycles):(8 + 3 * max_cycles)]) ##prepare data frame for output that can used as input input_object <- data.frame( BIN_FILE = unlist(object.file_name)[OUTPUT_results_reduced[[1]]], OUTPUT_results_reduced[, -1], stringsAsFactors = FALSE ) ##prepare data frame for output that shows rejected aliquots if (length(removed_aliquots) > 0) { removed_aliquots <- as.data.frame(removed_aliquots, stringsAsFactors = FALSE) removed_aliquots <- cbind(BIN_FILE = unlist(object.file_name)[removed_aliquots[[1]]], removed_aliquots[, -1]) }else{ removed_aliquots <- NULL } } # Call baSAR-function ------------------------------------------------------------------------- ##check for the central_D bound settings ##Why do we use 0 and 1000: Combes et al., 2015 wrote ## that "We set the bounds for the prior on the central dose D, Dmin = 0 Gy and ## Dmax = 1000 Gy, to cover the likely range of possible values for D. ##check if something is set in method control, if not, set it if (is.null(method_control[["upper_centralD"]])) { method_control <- c(method_control, upper_centralD = 1000) }else{ if(distribution == "normal" | distribution == "cauchy" | distribution == "log_normal"){ warning("[analyse_baSAR()] You have modified the upper central_D boundary, while applying a predefined model. This is possible but not recommended!", call. = FALSE) } } ##we do the same for the lower_centralD, just to have everthing in one place if (is.null(method_control[["lower_centralD"]])) { method_control <- c(method_control, lower_centralD = 0) }else{ if(distribution == "normal" | distribution == "cauchy" | distribution == "log_normal"){ warning("[analyse_baSAR()] You have modified the lower central_D boundary while applying a predefined model. This is possible but not recommended!", call. = FALSE) } } if(min(input_object[["DE"]][input_object[["DE"]] > 0], na.rm = TRUE) < method_control$lower_centralD | max(input_object[["DE"]], na.rm = TRUE) > method_control$upper_centralD){ warning("[analyse_baSAR()] Your set lower_centralD and/or upper_centralD value seem to do not fit to your input data. This may indicate a wronlgy set 'source_doserate'.", call. = FALSE) } ##>> try here is much better, as the user might run a very long preprocessing and do not ##want to fail here results <- try(.baSAR_function( Nb_aliquots = Nb_aliquots, distribution = distribution, data.Dose = Doses, data.Lum = LxTx, data.sLum = LxTx.error, fit.method = fit.method, n.MCMC = n.MCMC, fit.force_through_origin = fit.force_through_origin, fit.includingRepeatedRegPoints = fit.includingRepeatedRegPoints, method_control = method_control, baSAR_model = baSAR_model, verbose = verbose )) ##check whether this became NULL if(!is(results, "try-error")){ ##how do we add the systematic error? ##(1) source_doserate is a list, not a vector, but the user can ##provide many source dose rates and he can provide only a single vector (no error) if(!is.null(unlist(source_doserate)) || !is.null(function_arguments$source_doserate)){ ##if it comes from the previous call, it is, unfortunately not that simple if(!is.null(function_arguments$source_doserate)){ source_doserate <- eval(function_arguments$source_doserate) if(!is(source_doserate, "list")){ source_doserate <- list(source_doserate) } } systematic_error <- unlist(lapply(source_doserate, function(x){ if(length(x) == 2) { x[2] } else{ NULL } })) }else{ systematic_error <- 0 } ##state are warning for very different errors if(mean(systematic_error) != systematic_error[1]){ warning("[analyse_baSAR()] Provided source dose rate errors differ. The mean was taken, but the calculated systematic error might be not valid!", .call = FALSE) } ##add to the final de DE_FINAL.ERROR <- sqrt(results[[1]][["CENTRAL.SD"]]^2 + mean(systematic_error)^2) ##consider the case that we get NA and this might be confusing if(is.na(DE_FINAL.ERROR)){ DE_FINAL.ERROR <- results[[1]][["CENTRAL.SD"]] } ##combine results[[1]] <- cbind(results[[1]], DE_FINAL = results[[1]][["CENTRAL"]], DE_FINAL.ERROR = DE_FINAL.ERROR) }else{ results <- NULL verbose <- FALSE plot <- FALSE } # Terminal output ----------------------------------------------------------------------------- if(verbose){ cat("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\n") cat("\n[analyse_baSAR()] ---- RESULTS ---- \n") cat("------------------------------------------------------------------\n") cat(paste0("Used distribution:\t\t", results[[1]][["DISTRIBUTION"]],"\n")) if(!is.null(removed_aliquots)){ if(!is.null(aliquot_range)){ cat(paste0("Number of aliquots used:\t", results[[1]][["NB_ALIQUOTS"]],"/", results[[1]][["NB_ALIQUOTS"]] + nrow(removed_aliquots), " (manually removed: " ,length(aliquot_range),")\n")) }else{ cat(paste0("Number of aliquots used:\t", results[[1]][["NB_ALIQUOTS"]],"/", results[[1]][["NB_ALIQUOTS"]] + nrow(removed_aliquots),"\n")) } }else{ cat(paste0("Number of aliquots used:\t", results[[1]][["NB_ALIQUOTS"]],"/", results[[1]][["NB_ALIQUOTS"]],"\n")) } if(!is.null(baSAR_model)){ cat(paste0("Considered fitting method:\t", results[[1]][["FIT_METHOD"]]," (user defined)\n")) }else{ cat(paste0("Considered fitting method:\t", results[[1]][["FIT_METHOD"]],"\n")) } cat(paste0("Number of independent chains:\t", results[[1]][["N.CHAINS"]],"\n")) cat(paste0("Number MCMC iterations/chain:\t", results[[1]][["N.MCMC"]],"\n")) cat("------------------------------------------------------------------\n") if(distribution == "log_normal"){ cat("\t\t\t\tmean*\tsd\tHPD\n") }else{ cat("\t\t\t\tmean\tsd\tHPD\n") } cat(paste0(">> Central dose:\t\t", results[[1]][["CENTRAL"]],"\t", results[[1]][["CENTRAL.SD"]],"\t", "[", results[[1]][["CENTRAL_Q_.16"]]," ; ", results[[1]][["CENTRAL_Q_.84"]], "]**\t")) cat(paste0("\n\t\t\t\t\t\t[", results[[1]][["CENTRAL_Q_.025"]]," ; ", results[[1]][["CENTRAL_Q_.975"]],"]***")) cat(paste0("\n>> sigma_D:\t\t\t", results[[1]][["SIGMA"]],"\t", results[[1]][["SIGMA.SD"]], "\t", "[",results[[1]][["SIGMA_Q_.16"]]," ; ", results[[1]][["SIGMA_Q_.84"]], "]**\t")) cat(paste0("\n\t\t\t\t\t\t[",results[[1]][["SIGMA_Q_.025"]]," ; ", results[[1]][["SIGMA_Q_.975"]], "]***")) cat(paste0("\n>> Final central De:\t\t", results[[1]][["DE_FINAL"]],"\t", round(results[[1]][["DE_FINAL.ERROR"]], digits = digits), "\t", " - \t -")) cat("\n------------------------------------------------------------------\n") cat( paste("(systematic error contribution to final De:", format((1-results[[1]][["CENTRAL.SD"]]/results[[1]][["DE_FINAL.ERROR"]])*100, scientific = TRUE), "%)\n") ) if(distribution == "log_normal"){ cat("* mean of the central dose is the geometric mean\n") } cat("** 68 % level | *** 95 % level\n") } # Plotting ------------------------------------------------------------------------------------ if(plot){ ##get colours from the package Luminescence col <- get("col", pos = .LuminescenceEnv) ##get list of variable names (we need them later) varnames <- coda::varnames(results[[2]]) ##//////////////////////////////////////////////////////////////////////////////////////////// ##TRACE AND DENSITY PLOT ####////////////////////////////////////////////////////////////////////////////////////////// if(plot_reduced){ plot_check <- try(plot(results[[2]][,c("central_D","sigma_D"),drop = FALSE]), silent = TRUE) ##show error if(is(plot_check, "try-error")){ stop("[analyse_baSAR()] Plots for 'central_D' and 'sigma_D' could not be produced. You are probably monitoring the wrong variables!", .call = FALSE) } }else{ try(plot(results[[2]])) } ##//////////////////////////////////////////////////////////////////////////////////////////// ##TRUE DOSE PLOT AND DECISION MAKER ####////////////////////////////////////////////////////////////////////////////////////////// if (!plot.single) { par(mfrow = c(2, 2)) } ##get list with D values ##get list out of it plot_matrix <- as.matrix(results[[2]][,grep(x = varnames, pattern = "D[", fixed = TRUE)]) aliquot_quantiles <- t(matrixStats::colQuantiles(x = plot_matrix, probs = c(0.25,0.75))) ##define boxplot colours ... we have red and orange box.col <- vapply(1:ncol(aliquot_quantiles), function(x){ if(aliquot_quantiles[2,x] < results[[1]][,c("CENTRAL_Q_.025")] | aliquot_quantiles[1,x] > results[[1]][,c("CENTRAL_Q_.975")] ){ col[2] }else if(aliquot_quantiles[2,x] < results[[1]][,c("CENTRAL_Q_.16")] | aliquot_quantiles[1,x] > results[[1]][,c("CENTRAL_Q_.84")]){ "orange" }else{ "white" } }, FUN.VALUE = vector(mode = "character", length = 1)) ##to assure a minium of quality not more then 15 boxes a plotted in each plot i <- 1 while(i < ncol(plot_matrix)){ step <- if((i + 14) > ncol(plot_matrix)){ncol(plot_matrix)}else{i + 14} plot_check <- try(boxplot( x = plot_matrix[,i:step], use.cols = TRUE, horizontal = TRUE, outline = TRUE, col = box.col[i:step], xlab = if(is.null(unlist(source_doserate))){"Dose [s]"}else{"Dose [Gy]"}, ylab = "Aliquot index", yaxt = "n", xlim = c(1,19), main = paste0("Individual Doses | ALQ: ", i,":",step) )) if(!is(plot_check, "try-error")){ if(step == ncol(plot_matrix)){ axis(side = 2, at = 1:15, labels = as.character(c(i:step, rep(" ", length = 15 - length(i:step)))), cex.axis = 0.8 ) }else{ axis(side = 2, at = 1:15, labels = as.character(i:step), cex.axis = 0.8) } ##add HPD with text ##HPD - 68% lines( x = c( results[[1]][, c("CENTRAL_Q_.16")], results[[1]][, c("CENTRAL_Q_.16")], results[[1]][, c("CENTRAL_Q_.84")], results[[1]][, c("CENTRAL_Q_.84")]), y = c(par()$usr[3], 16, 16, par()$usr[3]), lty = 3, col = col[3], lwd = 1.5 ) text( x = results[[1]][, c("CENTRAL")], y = 16, labels = "68 %", pos = 3, col = col[3], cex = 0.9 * par()$cex ) ##HPD - 98 %% lines( x = c( results[[1]][, c("CENTRAL_Q_.025")], results[[1]][, c("CENTRAL_Q_.025")], results[[1]][, c("CENTRAL_Q_.975")], results[[1]][, c("CENTRAL_Q_.975")]), y = c(par()$usr[3], 17.5, 17.5, par()$usr[3]), lty = 3, col = col[2], lwd = 1.5 ) text( x = results[[1]][, c("CENTRAL")], y = 17.5, labels = "95 %", pos = 3, col = col[2], cex = 0.9 * par()$cex) } ##update counter i <- i + 15 } rm(plot_matrix) if(!plot.single){ par(mfrow = c(1,2)) on.exit(par(mfrow = c(1,1), bg = "white", xpd = FALSE)) } ##//////////////////////////////////////////////////////////////////////////////////////////// ##DOSE RESPONSE CURVES AND Lx/Tx VALUES ####////////////////////////////////////////////////////////////////////////////////////////// ##define selection vector selection <- c("a[", "b[", "c[", "g[", "Q[1,") ##get list out of it list_selection <- lapply(X = selection, FUN = function(x){ unlist(results[[2]][,grep(x = varnames, pattern = x, fixed = TRUE)]) }) ##create matrix plot_matrix <- t(do.call(what = "cbind", args = list_selection)) ##free memory rm(list_selection) ##make selection according to the model for the curve plotting if (fit.method == "EXP") {ExpoGC <- 1 ; LinGC <- 0 } if (fit.method == "LIN") {ExpoGC <- 0 ; LinGC <- 1 } if (fit.method == "EXP+LIN") {ExpoGC <- 1 ; LinGC <- 1 } if (fit.force_through_origin) {GC_Origin <- 0} else {GC_Origin <- 1} ##add choise for own provided model if(!is.null(baSAR_model)){ fit.method_plot <- paste(fit.method, "(user defined)") }else{ fit.method_plot <- fit.method } ##open plot area ##for the xlim and ylim we have to identify the proper ranges based on the input xlim <- c(0, max(input_object[,grep(x = colnames(input_object), pattern = "DOSE")], na.rm = TRUE)*1.1) ylim <- c( min(input_object[,grep(x = colnames(input_object), pattern = "LxTx")], na.rm = TRUE), max(input_object[,grep(x = colnames(input_object), pattern = "LxTx")], na.rm = TRUE)*1.1) ##check for position of the legend ... we can do better if(results[[1]][["CENTRAL_Q_.975"]] < max(xlim)/2){ legend_pos <- "topright" }else{ legend_pos <- "topleft" } ##set plot area plot_check <- try(plot( NA, NA, ylim = ylim, xlim = xlim, ylab = expression(paste(L[x] / T[x])), xlab = if(is.null(unlist(source_doserate))){"Dose [s]"}else{"Dose [Gy]"}, main = "baSAR Dose Response Curves" )) if (!is(plot_check, "try-error")) { ##add mtext mtext(side = 3, text = paste("Fit:", fit.method_plot)) ##check whether we have all data we need (might be not the case of the user ##selects own variables) if (ncol(plot_matrix) != 0) { ##plot individual dose response curves x <- NA for (i in seq(1, ncol(plot_matrix), length.out = 1000)) { curve( GC_Origin * plot_matrix[4, i] + LinGC * (plot_matrix[3, i] * x) + ExpoGC * (plot_matrix[1, i] * (1 - exp ( -x / plot_matrix[2, i] ))), add = TRUE, col = rgb(0, 0, 0, .1) ) } }else{ try(stop("[analyse_baSAR()] Wrong 'variable.names' monitored, dose responses curves could not be plotted!", call. = FALSE)) } ##add dose points n.col <- length(input_object[, grep(x = colnames(input_object), pattern = "DOSE")]) ##add rug with natural Lx/Tx rug(side = 2, x = input_object[[9 + n.col]]) ##plot Lx/Tx values .. without errors ... this is enough here for (i in 2:length(input_object[, grep(x = colnames(input_object), pattern = "DOSE")])) { ##add error bars segments( x0 = input_object[[8 + i]], x1 = input_object[[8 + i]], y0 = input_object[[8 + n.col + i]] - input_object[[8 + 2 * n.col + i]], y1 = input_object[[8 + n.col + i]] + input_object[[8 + 2 * n.col + i]], col = "grey" ) ##add points in the top of it points( x = input_object[[8 + i]], y = input_object[[8 + n.col + i]], pch = 21, col = col[11], bg = "grey" ) } ##add ablines abline( v = results[[1]][, c("CENTRAL_Q_.16", "CENTRAL_Q_.84")], lty = 3, col = col[3], lwd = 1.2 ) abline(v = results[[1]][, c("CENTRAL_Q_.025", "CENTRAL_Q_.975")], lty = 2, col = col[2]) ##add legend1 legend( legend_pos, bty = "n", horiz = FALSE, lty = c(3, 2), col = c(col[3], col[2]), legend = c("HPD - 68 %", "HPD - 95 %") ) ##add legend2 legend( "bottomright", bty = "n", horiz = FALSE, pch = 21, col = col[11], bg = "grey", legend = "measured dose points" ) } ##remove object, it might be rather big rm(plot_matrix) ##03 Abanico Plot if(distribution_plot == "abanico"){ plot_check <- plot_AbanicoPlot( data = input_object[, c("DE", "DE.SD")], zlab = if(is.null(unlist(source_doserate))){expression(paste(D[e], " [s]"))}else{expression(paste(D[e], " [Gy]"))}, log.z = if (distribution != "log_normal") { FALSE } else{ TRUE }, z.0 = results[[1]]$CENTRAL, y.axis = FALSE, polygon.col = FALSE, line = results[[1]][,c( "CENTRAL_Q_.16", "CENTRAL_Q_.84", "CENTRAL_Q_.025", "CENTRAL_Q_.975")], line.col = c(col[3], col[3], col[2], col[2]), line.lty = c(3,3,2,2), output = TRUE, mtext = paste0( nrow(input_object) - length(which(is.na(input_object[, c("DE", "DE.SD")]))), "/", nrow(input_object), " plotted (removed are NA values)" ) ) if (!is.null(plot_check)) { legend( "topleft", legend = c("Central dose", "HPD - 68%", "HPD - 95 %"), lty = c(2, 3, 2), col = c("black", col[3], col[2]), bty = "n", cex = par()$cex * 0.8 ) } }else{ plot_check <- NULL } ##In case the Abanico plot will not work because of negative values ##provide a KDE if(is.null(plot_check) && distribution_plot == "kde"){ plot_check <- try(suppressWarnings(plot_KDE( data = input_object[, c("DE", "DE.SD")], xlab = if(is.null(unlist(source_doserate))){expression(paste(D[e], " [s]"))}else{expression(paste(D[e], " [Gy]"))}, mtext = paste0( nrow(input_object) - length(which(is.na(input_object[, c("DE", "DE.SD")]))), "/", nrow(input_object), " (removed are NA values)" ) ))) if(!is(plot_check, "try-error")) { abline(v = results[[1]]$CENTRAL, lty = 2) abline( v = results[[1]][, c("CENTRAL_Q_.16", "CENTRAL_Q_.84")], lty = 3, col = col[3], lwd = 1.2 ) abline(v = results[[1]][, c("CENTRAL_Q_.025", "CENTRAL_Q_.975")], lty = 2, col = col[2]) ##check for position of the legend if(results[[1]][["CENTRAL_Q_.975"]] < max(xlim)/2){ legend_pos <- "right" }else{ legend_pos <- "topleft" } legend( legend_pos, legend = c("Central dose", "HPD - 68%", "HPD - 95 %"), lty = c(2, 3, 2), col = c("black", col[3], col[2]), bty = "n", cex = par()$cex * 0.8 ) } } } # Return -------------------------------------------------------------------------------------- return(set_RLum( class = "RLum.Results", data = list( summary = results[[1]], mcmc = results[[2]], models = results[[3]], input_object = input_object, removed_aliquots = removed_aliquots ), info = list(call = sys.call()) )) } Luminescence/R/calc_OSLLxTxRatio.R0000644000176200001440000004520014464125673016463 0ustar liggesusers#' @title Calculate `Lx/Tx` ratio for CW-OSL curves #' #' @description #' Calculate `Lx/Tx` ratios from a given set of CW-OSL curves assuming late light #' background subtraction. #' #' @details #' The integrity of the chosen values for the signal and background integral is #' checked by the function; the signal integral limits have to be lower than #' the background integral limits. If a [vector] is given as input instead #' of a [data.frame], an artificial [data.frame] is produced. The #' error calculation is done according to Galbraith (2002). #' #' **Please note:** In cases where the calculation results in `NaN` values (for #' example due to zero-signal, and therefore a division of 0 by 0), these `NaN` #' values are replaced by 0. #' #' **`sigmab`** #' #' The default value of `sigmab` is calculated assuming the background is #' constant and **would not** applicable when the background varies as, #' e.g., as observed for the early light subtraction method. #' #' **sig0** #' #' This argument allows to add an extra component of error to the final `Lx/Tx` #' error value. The input will be treated as factor that is multiplied with #' the already calculated `LxTx` and the result is add up by: #' #' \deqn{se(LxTx) = \sqrt(se(LxTx)^2 + (LxTx * sig0)^2)} #' #' #' **background.count.distribution** #' #' This argument allows selecting the distribution assumption that is used for #' the error calculation. According to Galbraith (2002, 2014) the background #' counts may be overdispersed (i.e. do not follow a Poisson distribution, #' which is assumed for the photomultiplier counts). In that case (might be the #' normal case) it has to be accounted for the overdispersion by estimating #' \eqn{\sigma^2} (i.e. the overdispersion value). Therefore the relative #' standard error is calculated as: #' #' - `poisson` #' \deqn{rse(\mu_{S}) \approx \sqrt(Y_{0} + Y_{1}/k^2)/Y_{0} - Y_{1}/k} #' #' - `non-poisson` #' \deqn{rse(\mu_{S}) \approx \sqrt(Y_{0} + Y_{1}/k^2 + \sigma^2(1+1/k))/Y_{0} - Y_{1}/k} #' #' **Please note** that when using the early background subtraction method in #' combination with the 'non-poisson' distribution argument, the corresponding `Lx/Tx` error #' may considerably increase due to a high `sigmab` value. #' Please check whether this is valid for your data set and if necessary #' consider to provide an own `sigmab` value using the corresponding argument `sigmab`. #' #' @param Lx.data [RLum.Data.Curve-class] or [data.frame] (**required**): #' requires a CW-OSL shine down curve (x = time, y = counts) #' #' @param Tx.data [RLum.Data.Curve-class] or [data.frame] (*optional*): #' requires a CW-OSL shine down curve (x = time, y = counts). If no #' input is given the `Tx.data` will be treated as `NA` and no `Lx/Tx` ratio #' is calculated. #' #' @param signal.integral [numeric] (**required**): vector with the limits for the signal integral. #' Can be set to `NA` than now integrals are considered and all other integrals are set to `NA` as well. #' #' @param signal.integral.Tx [numeric] (*optional*): #' vector with the limits for the signal integral for the `Tx`-curve. If nothing is provided the #' value from `signal.integral` is used. #' #' @param background.integral [numeric] (**required**): #' vector with the bounds for the background integral. #' Can be set to `NA` than now integrals are considered and all other integrals are set to `NA` as well. #' #' @param background.integral.Tx [numeric] (*optional*): #' vector with the limits for the background integral for the `Tx` curve. #' If nothing is provided the value from `background.integral` is used. #' #' @param background.count.distribution [character] (*with default*): #' sets the count distribution assumed for the error calculation. #' Possible arguments `poisson` or `non-poisson`. See details for further information #' #' @param use_previousBG [logical] (*with default*): #' If set to `TRUE` the background of the `Lx`-signal is subtracted also #' from the `Tx`-signal. Please note that in this case separate #' signal integral limits for the `Tx`-signal are not allowed and will be reset. #' #' @param sigmab [numeric] (*optional*): #' option to set a manual value for the overdispersion (for `LnTx` and `TnTx`), #' used for the `Lx/Tx` error calculation. The value should be provided as #' absolute squared count values, e.g. `sigmab = c(300,300)`. #' **Note:** If only one value is provided this value is taken for both (`LnTx` and `TnTx`) signals. #' #' @param sig0 [numeric] (*with default*): #' allow adding an extra component of error to the final `Lx/Tx` error value #' (e.g., instrumental error, see details). #' #' @param digits [integer] (*with default*): #' round numbers to the specified digits. #' If digits is set to `NULL` nothing is rounded. #' #' @return #' Returns an S4 object of type [RLum.Results-class]. #' #' Slot `data` contains a [list] with the following structure: #' #' **@data** #' ``` #' $LxTx.table (data.frame) #' .. $ LnLx #' .. $ LnLx.BG #' .. $ TnTx #' .. $ TnTx.BG #' .. $ Net_LnLx #' .. $ Net_LnLx.Error #' .. $ Net_TnTx #' .. $ Net_TnTx.Error #' .. $ LxTx #' .. $ LxTx.Error #' $ calc.parameters (list) #' .. $ sigmab.LnTx #' .. $ sigmab.TnTx #' .. $ k #' ``` #' #' **@info** #' ``` #' $ call (original function call) #' ``` #' #' @note #' The results of this function have been cross-checked with the Analyst #' (version 3.24b). Access to the results object via [get_RLum]. #' #' **Caution:** If you are using early light subtraction (EBG), please either provide your #' own `sigmab` value or use `background.count.distribution = "poisson"`. #' #' @section Function version: 0.8.0 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [RLum.Data.Curve-class], [Analyse_SAR.OSLdata], [plot_GrowthCurve], #' [analyse_SAR.CWOSL] #' #' @references Duller, G., 2018. Analyst v4.57 - User Manual. #' `https://users.aber.ac.uk/ggd`\cr #' #' Galbraith, R.F., 2002. A note on the variance of a background-corrected OSL #' count. Ancient TL, 20 (2), 49-51. #' #' Galbraith, R.F., 2014. A further note on the variance of a #' background-corrected OSL count. Ancient TL, 31 (2), 1-3. #' #' @keywords datagen #' #' @examples #' #' ##load data #' data(ExampleData.LxTxOSLData, envir = environment()) #' #' ##calculate Lx/Tx ratio #' results <- calc_OSLLxTxRatio( #' Lx.data = Lx.data, #' Tx.data = Tx.data, #' signal.integral = c(1:2), #' background.integral = c(85:100)) #' #' ##get results object #' get_RLum(results) #' #' @md #' @export calc_OSLLxTxRatio <- function( Lx.data, Tx.data = NULL, signal.integral, signal.integral.Tx = NULL, background.integral, background.integral.Tx = NULL, background.count.distribution = "non-poisson", use_previousBG = FALSE, sigmab = NULL, sig0 = 0, digits = NULL ){ # Test input data --------------------------------------------------------- ##(1) - integrity checks if(!is.null(Tx.data)){ ##(a) - check data type if(is(Lx.data)[1]!=is(Tx.data)[1]){ stop("[calc_OSLLxTxRatio()] Data type of Lx and Tx data differs!", call. = FALSE) } ##(b) - test if data.type is valid in general if(is(Lx.data)[1] == "RLum.Data.Curve"){ Lx.data <- as(Lx.data, "data.frame") Tx.data <- as(Tx.data, "data.frame") }else{ ##go further if((is(Lx.data)[1] != "data.frame" & is(Lx.data)[1] != "numeric") & is(Lx.data)[1] != "matrix"){ stop("[calc_OSLLxTxRatio()] Data type error! Required types are data.frame or numeric vector.", call. = FALSE) } } ##(c) - convert vector to data.frame if necessary if(is(Lx.data)[1] != "data.frame" & is(Lx.data)[1] != "matrix"){ Lx.data <- data.frame(x=1:length(Lx.data),y=Lx.data) Tx.data <- data.frame(x=1:length(Tx.data),y=Tx.data) } ##(d) - check if Lx and Tx curves have the same channel length if(length(Lx.data[,2]) != length(Tx.data[,2])){ stop("[calc_OSLLxTxRatio()] Channel numbers of Lx and Tx data differ!")} }else{ Tx.data <- data.frame(x = NA,y = NA) ##support RLum.objects if(is(Lx.data)[1] == "RLum.Data.Curve"){ Lx.data <- as(Lx.data, "data.frame") } ##check for matrix if(is(Lx.data)[1] == "matrix"){ Lx.data <- as.data.frame(Lx.data) } ##no it should be a data.frame, if not, try to produce one if(is(Lx.data)[1]!="data.frame") { Lx.data <- data.frame(x = 1:length(Lx.data),y = Lx.data) } }#endif::missing Tx.data # Alternate mode ---------------------------------------------------------- if(any(is.na(c(signal.integral, background.integral)))){ signal.integral <- background.integral <- NA LnLx <- sum(Lx.data[,2]) TnTx <- sum(Tx.data[,2]) LnLxTnTx <- data.frame( LnLx = LnLx, LnLx.BG = 0, TnTx = TnTx, TnTx.BG = 0, Net_LnLx = LnLx, Net_LnLx.Error = 0, Net_TnTx = TnTx, Net_TnTx.Error = 0, LxTx = LnLx/TnTx, LxTx.Error = 0) return(set_RLum( class = "RLum.Results", data = list( LxTx.table = LnLxTnTx, calc.parameters = NULL, info = list(call = sys.call()) ))) } # Continue checks --------------------------------------------------------- ##(e) - check if signal integral is valid if(min(signal.integral) < 1 | max(signal.integral>length(Lx.data[,2]))){ stop("[calc_OSLLxTxRatio()] signal.integral is not valid!", call. = FALSE)} ##(f) - check if background integral is valid if(min(background.integral)<1 | max(background.integral>length(Lx.data[,2]))){ stop(paste("[calc_OSLLxTxRatio()] background.integral is not valid! Max: ",length(Lx.data[,2]),sep=""), call. = FALSE)} ##(g) - check if signal and background integral overlapping if(min(background.integral)<=max(signal.integral)){ stop("[calc_OSLLxTxRatio()] Overlapping of 'signal.integral' and 'background.integral' is not permitted!", call. = FALSE)} ##(h) - similar procedure for the Tx limits if(all(c(!is.null(signal.integral.Tx),!is.null(background.integral.Tx)))){ if(use_previousBG){ warning("[calc_OSLLxTxRatio()] For option use_previousBG = TRUE independent Lx and Tx integral limits are not allowed. Integral limits of Lx used for Tx.", call. = FALSE) signal.integral.Tx <- signal.integral background.integral.Tx <- background.integral } if(min(signal.integral.Tx) < 1 | max(signal.integral.Tx>length(Tx.data[,2]))){ stop("[calc_OSLLxTxRatio()] signal.integral.Tx is not valid!", call. = FALSE)} if(min(background.integral.Tx)<1 | max(background.integral.Tx>length(Tx.data[,2]))){ stop(paste("[calc_OSLLxTxRatio()] background.integral.Tx is not valid! Max: ",length(Tx.data[,2]),sep=""), call. = FALSE)} if(min(background.integral.Tx)<=max(signal.integral.Tx)){ stop("[calc_OSLLxTxRatio()] Overlapping of 'signal.integral.Tx' and 'background.integral.Tx' is not permitted!", call. = FALSE)} }else if(!all(c(is.null(signal.integral.Tx),is.null(background.integral.Tx)))){ stop("[calc_OSLLxTxRatio()] You have to provide both: signal.integral.Tx and background.integral.Tx!", call. = FALSE) }else{ signal.integral.Tx <- signal.integral background.integral.Tx <- background.integral } ##check sigmab if (!is.null(sigmab)) { if (!is(sigmab, "numeric")) { stop("[calc_OSLLxTxRatio()] 'sigmab' has to be of type numeric.", call. = FALSE) } if (length(sigmab) > 2) { stop("[calc_OSLLxTxRatio()] Maximum allowed vector length for 'sigmab' is 2.", call. = FALSE) } } ##--------------------------------------------------------------------------## ##(2) - read data and produce background subtracted values ## calculate k value - express the background as mutiple value from the number ## of signal integral channels, however, it can be < 1 also n <- length(signal.integral) m <- length(background.integral) k <- m/n n.Tx <- length(signal.integral.Tx) ##use previous BG and account for the option to set different integral limits if(use_previousBG){ m.Tx <- m }else{ m.Tx <- length(background.integral.Tx) } k.Tx <- m.Tx/n.Tx ##LnLx (comments are corresponding variables to Galbraith, 2002) Lx.curve <- Lx.data[,2] Lx.signal <- sum(Lx.curve[signal.integral]) #Y.0 Lx.background <- sum(Lx.curve[background.integral]) #Y.1 Lx.background <- Lx.background*1/k #mu.B LnLx <- Lx.signal - Lx.background ##TnTx Tx.curve <- ifelse(is.na(Tx.data[,1])==FALSE, Tx.data[,2], NA) Tx.signal <- sum(Tx.curve[signal.integral.Tx]) ##use previous BG if(use_previousBG){ Tx.background <- Lx.background }else{ Tx.background <- sum(Tx.curve[background.integral.Tx])*1/k.Tx } TnTx <- (Tx.signal-Tx.background) ##--------------------------------------------------------------------------## ##(3) ## calculate Lx/Tx Errors according Galbraith (2002) and the personal ## communication of Galbraith (2014) via e-mail ## Nomenclature as stated in the articles ##(a) ## set Y.0 (sum OSL signal including the background) and ## Y.1 (total counts over m later channels) Y.0 <- Lx.signal Y.0_TnTx <- Tx.signal Y.1 <- sum(Lx.curve[background.integral]) Y.1_TnTx <- sum(Tx.curve[background.integral.Tx]) ##(b) estimate overdispersion (here called sigmab), see equation (4) in ## Galbraith (2002), Galbraith (2014) ## If else condition for the case that k < 2 if(round(k,digits = 1) >= 2 & ((min(background.integral) + length(signal.integral)*(2+1)) <= length(Lx.curve))){ ##(b)(1)(1) ## note that m = n*k = multiple of background.integral from signal.integral Y.i <- vapply(0:round(k,digits=0), function(i){ sum(Lx.curve[ (min(background.integral)+length(signal.integral)*i): (min(background.integral)+length(signal.integral)+length(signal.integral)*i)]) }, FUN.VALUE = vector(mode = "numeric", length = 1L)) Y.i <- na.exclude(Y.i) sigmab.LnLx <- abs(var(Y.i) - mean(Y.i)) ##sigmab is denoted as sigma^2 = s.Y^2-Y.mean ##therefore here absolute values are given }else{ ## provide warning if m is < 25, as suggested by Rex Galbraith ## low number of degree of freedom if (m < 25) { warning( "[calc_OSLLxTxRatio()] Number of background channels for Lx < 25; error estimation might not be reliable!", call. = FALSE) } sigmab.LnLx <- abs((var(Lx.curve[background.integral]) - mean(Lx.curve[background.integral])) * n) } if (round(k.Tx, digits = 1) >= 2 & (( min(background.integral.Tx) + length(signal.integral.Tx) * (2 + 1) ) <= length(Tx.curve))) { ##(b)(1)(1) ## note that m.Tx = n.Tx*k.Tx = multiple of background.integral.Tx from signal.integral.Tx ## also for the TnTx signal Y.i_TnTx <- vapply(0:round(k.Tx, digits = 0), function(i) { sum(Tx.curve[(min(background.integral.Tx) + length(signal.integral.Tx) * i):( min(background.integral.Tx) + length(signal.integral.Tx) + length(signal.integral.Tx) * i )]) }, FUN.VALUE = vector(mode = "numeric", length = 1L)) Y.i_TnTx <- na.exclude(Y.i_TnTx) sigmab.TnTx <- abs(var(Y.i_TnTx) - mean(Y.i_TnTx)) } else{ ## provide warning if m is < 25, as suggested by Rex Galbraith ## low number of degree of freedom if (m.Tx < 25 && use_previousBG == FALSE) { warning("[calc_OSLLxTxRatio()] Number of background channels for Tx < 25; error estimation might not be reliable!", call. = FALSE) } sigmab.TnTx <- abs((var(Tx.curve[background.integral.Tx]) - mean(Tx.curve[background.integral.Tx])) * n.Tx) } ##account for a manually set sigmab value if (!is.null(sigmab)) { if (length(sigmab) == 2) { sigmab.LnLx <- sigmab[1] sigmab.TnTx <- sigmab[2] }else{ sigmab.LnLx <- sigmab[1] sigmab.TnTx <- sigmab[1] } } ##(c) ## Calculate relative error of the background subtracted signal ## according to Galbraith (2002), equation (6) with changes ## from Galbraith (2014), equation 6 ## Discussion with Rex Galbraith via e-mail (2014-02-27): ## Equation 6 is appropriate to be implemented as standard if(background.count.distribution == "poisson"){ ##(c.1) estimate relative standard error for assuming a poisson distribution LnLx.relError <- sqrt((Y.0 + Y.1/k^2))/(Y.0-Y.1/k) ## rse(mu.s) TnTx.relError <- sqrt((Y.0_TnTx + Y.1_TnTx/k^2))/(Y.0_TnTx-Y.1_TnTx/k) }else{ ##(c.2) estimate relative standard error for a non-poisson distribution if(background.count.distribution != "non-poisson"){ warning("Unknown method for background.count.distribution. A non-poisson distribution is assumed!", call. = FALSE)} LnLx.relError <- sqrt(Y.0 + Y.1/k^2 + sigmab.LnLx*(1+1/k))/ (Y.0 - Y.1/k) TnTx.relError <- sqrt(Y.0_TnTx + Y.1_TnTx/k^2 + sigmab.TnTx*(1+1/k))/ (Y.0_TnTx - Y.1_TnTx/k) } ##(d) ##calculate absolute standard error LnLx.Error <- abs(LnLx*LnLx.relError) TnTx.Error <- abs(TnTx*TnTx.relError) ##we do not want to have NaN values, as they are mathematically correct, but make ##no sense and would result in aliquots that become rejected later if(is.nan(LnLx.Error)) LnLx.Error <- 0 if(is.nan(TnTx.Error)) TnTx.Error <- 0 ##combine results LnLxTnTx <- cbind( Lx.signal, Lx.background, Tx.signal, Tx.background, LnLx, LnLx.Error, TnTx, TnTx.Error ) ##--------------------------------------------------------------------------## ##(4) Calculate LxTx error according Galbraith (2014) #transform results in a data.frame LnLxTnTx <- as.data.frame((LnLxTnTx)) #add col names colnames(LnLxTnTx)<-c("LnLx", "LnLx.BG", "TnTx", "TnTx.BG", "Net_LnLx", "Net_LnLx.Error", "Net_TnTx", "Net_TnTx.Error") ##calculate Ln/Tx LxTx <- LnLxTnTx$Net_LnLx/LnLxTnTx$Net_TnTx ##set NaN if(is.nan(LxTx)) LxTx <- 0 ##calculate Ln/Tx error LxTx.relError <- sqrt(LnLx.relError^2 + TnTx.relError^2) LxTx.Error <- abs(LxTx * LxTx.relError) ##set NaN if(is.nan(LxTx.Error)) LxTx.Error <- 0 ##add an extra component of error LxTx.Error <- sqrt(LxTx.Error^2 + (sig0 * LxTx)^2) ##return combined values temp <- cbind(LnLxTnTx,LxTx,LxTx.Error) ##apply digits if wanted if(!is.null(digits)){ temp[1,] <- round(temp[1,], digits = digits) } calc.parameters <- list( sigmab.LnLx = sigmab.LnLx, sigmab.TnTx = sigmab.TnTx, k = k) ##set results object return(set_RLum( class = "RLum.Results", data = list( LxTx.table = temp, calc.parameters = calc.parameters), info = list(call = sys.call()) )) } Luminescence/R/template_DRAC.R0000644000176200001440000006617214264017373015637 0ustar liggesusers#' Create a DRAC input data template (v1.2) #' #' This function returns a DRAC input template (v1.2) to be used in conjunction #' with the [use_DRAC] function #' #' @param nrow [integer] (*with default*): #' specifies the number of rows of the template (i.e., the number of data #' sets you want to submit). #' #' @param preset [character] (*optional*): #' By default, all values of the template are set to `NA`, which means that #' the user needs to fill in **all** data first before submitting to DRAC #' using `use_DRAC()`. To reduce the number of values that need to be #' provided, `preset` can be used to create a template with at least #' a minimum of reasonable preset values. #' #' `preset` can be one of the following: #' - `quartz_coarse` #' - `quartz_fine` #' - `feldspar_coarse` #' - `polymineral_fine` #' - `DRAC-example_quartz` #' - `DRAC-example_feldspar` #' - `DRAC-example_polymineral` #' #' Note that the last three options can be used to produce a template #' with values directly taken from the official DRAC input `.csv` file. #' #' @param notification [logical] (*with default*): #' show or hide the notification #' #' @return A list. #' #' @author #' Christoph Burow, University of Cologne (Germany), Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @references #' Durcan, J.A., King, G.E., Duller, G.A.T., 2015. DRAC: Dose Rate and Age Calculator for trapped charge dating. #' Quaternary Geochronology 28, 54-61. doi:10.1016/j.quageo.2015.03.012 #' #' @seealso [as.data.frame], [list] #' #' @examples #' #' # create a new DRAC input input #' input <- template_DRAC(preset = "DRAC-example_quartz") #' #' # show content of the input #' print(input) #' print(input$`Project ID`) #' print(input[[4]]) #' #' #' ## Example: DRAC Quartz example #' # note that you only have to assign new values where they #' # are different to the default values #' input$`Project ID` <- "DRAC-Example" #' input$`Sample ID` <- "Quartz" #' input$`Conversion factors` <- "AdamiecAitken1998" #' input$`External U (ppm)` <- 3.4 #' input$`errExternal U (ppm)` <- 0.51 #' input$`External Th (ppm)` <- 14.47 #' input$`errExternal Th (ppm)` <- 1.69 #' input$`External K (%)` <- 1.2 #' input$`errExternal K (%)` <- 0.14 #' input$`Calculate external Rb from K conc?` <- "N" #' input$`Calculate internal Rb from K conc?` <- "N" #' input$`Scale gammadoserate at shallow depths?` <- "N" #' input$`Grain size min (microns)` <- 90 #' input$`Grain size max (microns)` <- 125 #' input$`Water content ((wet weight - dry weight)/dry weight) %` <- 5 #' input$`errWater content %` <- 2 #' input$`Depth (m)` <- 2.2 #' input$`errDepth (m)` <- 0.22 #' input$`Overburden density (g cm-3)` <- 1.8 #' input$`errOverburden density (g cm-3)` <- 0.1 #' input$`Latitude (decimal degrees)` <- 30.0000 #' input$`Longitude (decimal degrees)` <- 70.0000 #' input$`Altitude (m)` <- 150 #' input$`De (Gy)` <- 20 #' input$`errDe (Gy)` <- 0.2 #' #' # use DRAC #' \dontrun{ #' output <- use_DRAC(input) #' } #' #' @md #' @export template_DRAC <- function( nrow = 1L, preset = NULL, notification = TRUE ){ ## TODO: # 1 - allow mineral specific presets; new argument 'mineral' # 2 - add option to return the DRAC example data set ## correct incoming to prevent negative values nrow <- max(1, nrow[1]) ## throw warning if (nrow > 5000) warning("[template_DRAC()] More than 5000 datasets might not be supported!", call. = FALSE) ## PRESETS ---- valid_presets <- c("quartz_coarse", "quartz_fine", "feldspar_coarse", "polymineral_fine", "DRAC-example_quartz", "DRAC-example_feldspar", "DRAC-example_polymineral") if (!is.null(preset)) { if (length(preset) != 1 || !is.character(preset)) stop("\n[template_DRAC()]: Argument 'preset' must be a 'character' of length 1.", call. = FALSE) if (!preset %in% valid_presets) stop("\n[template_DRAC()]: Invalid preset. Please use on of the following: ", paste(valid_presets, collapse = ", "), call. = FALSE) } ## LEGAL NOTICE ---- messages <- list("\n", "\t-------------------- IMPORTANT NOTE ------------------------\n", "\t This function returns a DRAC input template to be used in ", "\t conjunction with the use_DRAC() function. \n", "\t The template was reproduced with great care, but we do not", "\t take any responsibility and we are not liable for any ", "\t mistakes or unforeseen misbehaviour.", "\t Note that this template is only compatible with DRAC", "\t version 1.1. Before using this template make sure that", "\t this is the correct version, otherwise expect unspecified", "\t errors.\n", "\t Please ensure you cite the use of DRAC in your work,", "\t published or otherwise. Please cite the website name and", "\t version (e.g. DRAC v1.1) and the accompanying journal", "\t article:", "\t Durcan, J.A., King, G.E., Duller, G.A.T., 2015.", "\t DRAC: Dose rate and age calculation for trapped charge", "\t dating. Quaternary Geochronology 28, 54-61. \n", "\t Set 'notification = FALSE' to hide this message. \n", "\t-------------------- IMPORTANT NOTE ------------------------", "\n") if (notification) lapply(messages, message) # CREATE TEMPLATE ---- template <- list( `Project ID` = structure(rep(NA_character_, nrow), required = TRUE, allowsX = FALSE, key = "TI:1", description = "Inputs can be alphabetic, numeric or selected symbols (/ - () [] _). Spaces are not permitted."), # `Sample ID` = structure(rep(NA_character_, nrow), required = TRUE, allowsX = FALSE, key = "TI:2", description = "Inputs can be alphabetic, numeric or selected symbols (/ - () [] _). Spaces are not permitted."), # `Mineral` = structure(factor(rep(NA_character_, nrow), c("Q", "F", "PM")), required = TRUE, allowsX = FALSE, key = "TI:3", description = "The mineral used for dating: quartz, feldspar or polymineral. Input must be 'Q', 'F' or 'PM'."), # `Conversion factors` = structure(factor(rep(NA_character_, nrow), c("AdamiecAitken1998", "Guerinetal2011", "Liritzisetal2013", "X")), required = FALSE, allowsX = TRUE, key = "TI:4", description = "The conversion factors required to calculate dose rates from radionuclide concentrations. Users have the option of datasets from Adamiec and Aitken (1998), Guerin et al. (2011) or Liritzis et al. (2013). Input must be 'AdamiecAitken1998', 'Guerinetal2011', 'Liritzisetal2013' or 'X' if conversion factors are not required."), # `External U (ppm)` = structure(rep(NA_real_, nrow), required = FALSE, allowsX = TRUE, key = "TI:5", description = "Radionuclide concentrations in parts per million for Uranium, Thorium and Rubidium and % for Potassium. Inputs must be 0 or positive and should not be left blank."), # `errExternal U (ppm)` = structure(rep(NA_real_, nrow), required = FALSE, allowsX = TRUE, key = "TI:6", description = "Radionuclide concentrations in parts per million for Uranium, Thorium and Rubidium and % for Potassium. Inputs must be 0 or positive and should not be left blank."), # `External Th (ppm)` = structure(rep(NA_real_, nrow), required = FALSE, allowsX = TRUE, key = "TI:7", description = "Radionuclide concentrations in parts per million for Uranium, Thorium and Rubidium and % for Potassium. Inputs must be 0 or positive and should not be left blank."), # `errExternal Th (ppm)` = structure(rep(NA_real_, nrow), required = FALSE, allowsX = TRUE, key = "TI:8", description = "Radionuclide concentrations in parts per million for Uranium, Thorium and Rubidium and % for Potassium. Inputs must be 0 or positive and should not be left blank."), # `External K (%)` = structure(rep(NA_real_, nrow), required = FALSE, allowsX = TRUE, key = "TI:9", description = "Radionuclide concentrations in parts per million for Uranium, Thorium and Rubidium and % for Potassium. Inputs must be 0 or positive and should not be left blank."), # `errExternal K (%)` = structure(rep(NA_real_, nrow), required = FALSE, allowsX = TRUE, key = "TI:10", description = "Radionuclide concentrations in parts per million for Uranium, Thorium and Rubidium and % for Potassium. Inputs must be 0 or positive and should not be left blank."), # `External Rb (ppm)` = structure(rep(NA_real_, nrow), required = FALSE, allowsX = TRUE, key = "TI:11", description = "Radionuclide concentrations in parts per million for Uranium, Thorium and Rubidium and % for Potassium. Inputs must be 0 or positive and should not be left blank."), # `errExternal Rb (ppm)` = structure(rep(NA_real_, nrow), required = FALSE, allowsX = TRUE, key = "TI:12", description = "Radionuclide concentrations in parts per million for Uranium, Thorium and Rubidium and % for Potassium. Inputs must be 0 or positive and should not be left blank."), # `Calculate external Rb from K conc?` = structure(factor(rep(NA_character_, nrow), c("Y", "N")), required = FALSE, allowsX = FALSE, key = "TI:13", description = "Option to calculate a Rubidium concentration from Potassium, using the 270:1 ratio suggested by Mejdahl (1987). Input should be yes 'Y' or no 'N'."), # `Internal U (ppm)` = structure(rep(NA_real_, nrow), required = FALSE, allowsX = TRUE, key = "TI:14", description = "Internal radionuclide concentrations in parts per million for Uranium, Thorium and Rubidium and % for Potassium. Inputs must be 0 or positive and should not be left blank."), # `errInternal U (ppm)` = structure(rep(NA_real_, nrow), required = FALSE, allowsX = TRUE, key = "TI:15", description = "Internal radionuclide concentrations in parts per million for Uranium, Thorium and Rubidium and % for Potassium. Inputs must be 0 or positive and should not be left blank."), # `Internal Th (ppm)` = structure(rep(NA_real_, nrow), required = FALSE, allowsX = TRUE, key = "TI:16", description = "Internal radionuclide concentrations in parts per million for Uranium, Thorium and Rubidium and % for Potassium. Inputs must be 0 or positive and should not be left blank."), # `errInternal Th (ppm)` = structure(rep(NA_real_, nrow), required = FALSE, allowsX = TRUE, key = "TI:17", description = "Internal radionuclide concentrations in parts per million for Uranium, Thorium and Rubidium and % for Potassium. Inputs must be 0 or positive and should not be left blank."), # `Internal K (%)` = structure(rep(NA_real_, nrow), required = FALSE, allowsX = TRUE, key = "TI:18", description = "Internal radionuclide concentrations in parts per million for Uranium, Thorium and Rubidium and % for Potassium. Inputs must be 0 or positive and should not be left blank."), # `errInternal K (%)` = structure(rep(NA_real_, nrow), required = FALSE, allowsX = TRUE, key = "TI:19", description = "Internal radionuclide concentrations in parts per million for Uranium, Thorium and Rubidium and % for Potassium. Inputs must be 0 or positive and should not be left blank."), # `Rb (ppm)` = structure(rep(NA_real_, nrow), required = FALSE, allowsX = TRUE, key = "TI:20", description = "Internal radionuclide concentrations in parts per million for Uranium, Thorium and Rubidium and % for Potassium. Inputs must be 0 or positive and should not be left blank."), # `errRb (ppm)` = structure(rep(NA_real_, nrow), required = FALSE, allowsX = TRUE, key = "TI:21", description = "Internal radionuclide concentrations in parts per million for Uranium, Thorium and Rubidium and % for Potassium. Inputs must be 0 or positive and should not be left blank."), # `Calculate internal Rb from K conc?` = structure(factor(rep(NA_character_, nrow), c("Y", "N", "X")), required = FALSE, allowsX = TRUE, key = "TI:22", description = "Option to calculate an internal Rubidium concentration from Potassium, using the 270:1 ratio suggested by Mejdahl (1987). Input should be yes 'Y' or no 'N'."), # `User external alphadoserate (Gy.ka-1)` = structure(rep(NA_real_, nrow), required = FALSE, allowsX = TRUE, key = "TI:23", description = "Users may input directly measured values for external alpha, beta and gamma dose rates (in Gy.ka-1). Any positive inputs in these fields will override dose rates calculated from radionuclide concentrations. Inputs should be 0 or positive and should not be left blank"), # `errUser external alphadoserate (Gy.ka-1)` = structure(rep(NA_real_, nrow), required = FALSE, allowsX = TRUE, key = "TI:24", description = "Users may input directly measured values for external alpha, beta and gamma dose rates (in Gy.ka-1). Any positive inputs in these fields will override dose rates calculated from radionuclide concentrations. Inputs should be 0 or positive and should not be left blank"), # `User external betadoserate (Gy.ka-1)` = structure(rep(NA_real_, nrow), required = FALSE, allowsX = TRUE, key = "TI:25", description = "Users may input directly measured values for external alpha, beta and gamma dose rates (in Gy.ka-1). Any positive inputs in these fields will override dose rates calculated from radionuclide concentrations. Inputs should be 0 or positive and should not be left blank"), # `errUser external betadoserate (Gy.ka-1)` = structure(rep(NA_real_, nrow), required = FALSE, allowsX = TRUE, key = "TI:26", description = "Users may input directly measured values for external alpha, beta and gamma dose rates (in Gy.ka-1). Any positive inputs in these fields will override dose rates calculated from radionuclide concentrations. Inputs should be 0 or positive and should not be left blank"), # `User external gamma doserate (Gy.ka-1)` = structure(rep(NA_real_, nrow), required = FALSE, allowsX = TRUE, key = "TI:27", description = "Users may input directly measured values for external alpha, beta and gamma dose rates (in Gy.ka-1). Any positive inputs in these fields will override dose rates calculated from radionuclide concentrations. Inputs should be 0 or positive and should not be left blank"), # `errUser external gammadoserate (Gy.ka-1)` = structure(rep(NA_real_, nrow), required = FALSE, allowsX = TRUE, key = "TI:28", description = "Users may input directly measured values for external alpha, beta and gamma dose rates (in Gy.ka-1). Any positive inputs in these fields will override dose rates calculated from radionuclide concentrations. Inputs should be 0 or positive and should not be left blank"), # `User internal doserate (Gy.ka-1)` = structure(rep(NA_real_, nrow), required = FALSE, allowsX = TRUE, key = "TI:29", description = "Users may input an internal dose rate (either alpha, beta or the sum of the two; in Gy.ka-1). DRAC will assume that this value has already been corrected for attenuation. Inputs in this field will override dose rates calculated from radionuclide concentrations. Inputs should be 0 or positive and not left blank."), # `errUser internal doserate (Gy.ka-1)` = structure(rep(NA_real_, nrow), required = FALSE, allowsX = TRUE, key = "TI:30", description = "Users may input an internal dose rate (either alpha, beta or the sum of the two; in Gy.ka-1). DRAC will assume that this value has already been corrected for attenuation. Inputs in this field will override dose rates calculated from radionuclide concentrations. Inputs should be 0 or positive and not left blank."), # `Scale gammadoserate at shallow depths?` = structure(factor(rep(NA_character_, nrow), c("Y", "N")), required = FALSE, allowsX = FALSE, key = "TI:31", description = "Users may choose to scale gamma dose rates for samples taken within 0.3 m of the ground surface. The scaling factors of Aitken (1985) are used. Input should be yes 'Y' or no 'N'."), # `Grain size min (microns)` = structure(rep(NA_real_, nrow), required = TRUE, allowsX = FALSE, key = "TI:32", description = "The grain size range analysed. DRAC can be used for the grain size ranges between 1 and 1000 microns. Inputs should range between 1 and 1000 and not be left blank."), # `Grain size max (microns)` = structure(rep(NA_real_, nrow), required = TRUE, allowsX = FALSE, key = "TI:33", description = "The grain size range analysed. DRAC can be used for the grain size ranges between 1 and 1000 microns. Inputs should range between 1 and 1000 and not be left blank."), # `alpha-Grain size attenuation` = structure(factor(rep(NA_character_, nrow), c("Bell1980", "Brennanetal1991")), required = TRUE, allowsX = FALSE, key = "TI:34", description = "The grain size attenuation factors for the alpha dose rate. Users have the option of datasets from Bell (1980) and Brennan et al. (1991). Input must be 'Bell1980' or 'Brennanetal1991'."), # `beta-Grain size attenuation ` = structure(factor(rep(NA_character_, nrow), c("Mejdahl1979", "Brennan2003", "Guerinetal2012-Q", "Guerinetal2012-F")), required = TRUE, allowsX = FALSE, key = "TI:35", description = "The grain size attenuation factors for the beta dose rate. Users have the option of datasets from Mejdahl (1979), Brennan (2003) and Guerin et al. (2012) for quartz or feldspar. Input must be 'Mejdahl1979', 'Brennan2003', 'Guerinetal2012-Q' or 'Guerinetal2012-F' ."), # `Etch depth min (microns)` = structure(rep(NA_real_, nrow), required = TRUE, allowsX = FALSE, key = "TI:36", description = "The user defined etch depth range (microns). Inputs should range between 0 and 30 and not be left blank."), # `Etch depth max (microns)` = structure(rep(NA_real_, nrow), required = TRUE, allowsX = FALSE, key = "TI:37", description = "The user defined etch depth range (microns). Inputs should range between 0 and 30 and not be left blank."), # `beta-Etch depth attenuation factor` = structure(factor(rep(NA_character_, nrow), c("Bell1979", "Brennan2003", "X")), required = FALSE, allowsX = TRUE, key = "TI:38", description = "The etch depth attenuation factors for the beta dose rate. Users have the option of datasets from Bell (1979) and Brennan (2003). Input must be 'Bell1979' or 'Brennan2003'. Note: only the dataset of Bell (1980) is provided for attenuation of the alpha dose rate by etching."), # `a-value` = structure(rep(NA_real_, nrow), required = FALSE, allowsX = TRUE, key = "TI:39", description = "Alpha track efficiency value and uncertainty defined by the user. Inputs should be 0 or positive and not left blank."), # `erra-value` = structure(rep(NA_real_, nrow), required = TRUE, allowsX = TRUE, key = "TI:40", description = "Alpha track efficiency value and uncertainty defined by the user. Inputs should be 0 or positive and not left blank."), # `Water content ((wet weight - dry weight)/dry weight) %` = structure(rep(NA_real_, nrow), required = TRUE, allowsX = FALSE, key = "TI:41", description = "Sediment water content (%) over the burial period. Inputs should be 0 or positive and not be left blank."), # `errWater content %` = structure(rep(NA_real_, nrow), required = FALSE, allowsX = FALSE, key = "TI:42", description = "Sediment water content (%) over the burial period. Inputs should be 0 or positive and not be left blank."), # `Depth (m)` = structure(rep(NA_character_, nrow), required = FALSE, allowsX = TRUE, key = "TI:43", description = "Depth and uncertainty from which sample was extracted beneath the ground surface. Inputs should be 0 or positive and not left blank."), # `errDepth (m)` = structure(rep(NA_character_, nrow), required = FALSE, allowsX = TRUE, key = "TI:44", description = "Depth and uncertainty from which sample was extracted beneath the ground surface. Inputs should be 0 or positive and not left blank."), # `Overburden density (g cm-3)` = structure(rep(NA_real_, nrow), required = TRUE, allowsX = FALSE, key = "TI:45", description = "Density of the overlying sediment matrix from which the sample was taken. Inputs should be 0 or positive and not be left blank. The scaling calculation will use the overburden density and uncertainty provided."), # `errOverburden density (g cm-3)` = structure(rep(NA_real_, nrow), required = TRUE, allowsX = FALSE, key = "TI:46", description = "Density of the overlying sediment matrix from which the sample was taken. Inputs should be 0 or positive and not be left blank. The scaling calculation will use the overburden density and uncertainty provided."), # `Latitude (decimal degrees)` = structure(rep(NA_character_, nrow), required = FALSE, allowsX = TRUE, key = "TI:47", description = "Latitude and longitude of sample location (in degree decimals). Positive values should be used for northern latitudes and eastern longitudes and negative values for southern latitudes and western longitudes. Inputs should range from -90 to 90 degrees for latitudes and -180 to 180 degrees for longitude."), # `Longitude (decimal degrees)` = structure(rep(NA_character_, nrow), required = FALSE, allowsX = TRUE, key = "TI:48", description = "Latitude and longitude of sample location (in degree decimals). Positive values should be used for northern latitudes and eastern longitudes and negative values for southern latitudes and western longitudes. Inputs should range from -90 to 90 degrees for latitudes and -180 to 180 degrees for longitude."), # `Altitude (m)` = structure(rep(NA_character_, nrow), required = FALSE, allowsX = TRUE, key = "TI:49", description = "Altitude of sample location in metres above sea level. Input should be less than 5000 and not left blank."), # `User cosmicdoserate (Gy.ka-1)` = structure(rep(NA_character_, nrow), required = FALSE, allowsX = TRUE, key = "TI:50", description = "Users may input a cosmic dose rate (in Gy.ka-1). Inputs in these fields will override the DRAC calculated cosmic dose rate. Inputs should be positive or 'X' if not required, and not left blank."), # `errUser cosmicdoserate (Gy.ka-1)` = structure(rep(NA_character_, nrow), required = FALSE, allowsX = TRUE, key = "TI:51", description = "Users may input a cosmic dose rate (in Gy.ka-1). Inputs in these fields will override the DRAC calculated cosmic dose rate. Inputs should be positive or 'X' if not required, and not left blank."), # `De (Gy)` = structure(rep(NA_character_, nrow), required = FALSE, allowsX = TRUE, key = "TI:52", description = "Sample De and uncertainty (in Gy). Inputs should be positive or 'X' if not required, and not left blank."), # `errDe (Gy)` = structure(rep(NA_character_, nrow), required = FALSE, allowsX = TRUE, key = "TI:53", description = "Sample De and uncertainty (in Gy). Inputs should be positive or 'X' if not required, and not left blank.") # ) ## RETURN VALUE ---- # add an additional DRAC class so we can define our own S3 method for as.data.frame class(template) <- c("DRAC.list", "list") # set preset if (!is.null(preset)) template <- .preset_DRAC(template, preset) invisible(template) } .preset_DRAC <- function(x, preset) { preset_list <- list( ## DRAC COLUMNS (TI:xx) --- TI:1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 "quartz_coarse" = list("RLum_preset", "quartz_coarse", "Q", "Guerinetal2011", "X", "X", "X", "X", "X", "X", "X", "X", "N", "X", "X", "X", "X", "X", "X", "X", "X", "X", "X", "X", "X", "X", "X", "X", "X", "X", "Y", 100, 200, "Brennanetal1991", "Guerinetal2012-Q", 20, 5, "Bell1979", 0.035, 0.01, 0, 0, 0, 0, 1.8, 0.1, "X", "X", 0, "X", "X", "X", "X"), "quartz_fine" = list("RLum_preset", "quartz_fine", "Q", "Guerinetal2011", "X", "X", "X", "X", "X", "X", "X", "X", "N", "X", "X", "X", "X", "X", "X", "X", "X", "X", "X", "X", "X", "X", "X", "X", "X", "X", "Y", 4, 11, "Brennanetal1991", "Guerinetal2012-Q", 0, 0, "Bell1979", 0.035, 0.01, 0, 0, 0, 0, 1.8, 0.1, "X", "X", 0, "X", "X", "X", "X"), "feldspar_coarse" = list("RLum_preset", "feldspar_coarse", "F", "Guerinetal2011", "X", "X", "X", "X", "X", "X", "X", "X", "Y", "X", "X", "X", "X", 12.5, 0.5, "X", "X", "X", "X", "X", "X", "X", "X", "X", "X", "X", "Y", 100, 200, "Brennanetal1991", "Guerinetal2012-F", 0, 0, "Bell1979", 0.08, 0.01, 0, 0, 0, 0, 1.8, 0.1, "X", "X", 0, "X", "X", "X", "X"), "polymineral_fine" = list("RLum_preset", "polymineral_fine", "PM", "Guerinetal2011", "X", "X", "X", "X", "X", "X", "X", "X", "Y", "X", "X", "X", "X", 12.5, 0.5, "X", "X", "X", "X", "X", "X", "X", "X", "X", "X", "X", "Y", 4, 11, "Brennanetal1991", "Guerinetal2012-F", 0, 0, "Bell1979", 0.08, 0.01, 0, 0, 0, 0, 1.8, 0.1, "X", "X", 0, "X", "X", "X", "X"), "DRAC-example_quartz" = list("DRAC-example", "Quartz", "Q", "Guerinetal2011", 3.4, 0.51, 14.47, 1.69, 1.2, 0.14, 0, 0, "N", "X", "X", "X", "X", "X", "X", "X", "X", "X", "X", "X", "X", "X", "X", "X", "X", "X", "N", 90, 125, "Brennanetal1991", "Guerinetal2012-Q", 8, 10, "Bell1979", 0, 0, 5, 2, 2.22, 0.05, 1.8, 0.1, 30, 70, 150, "X", "X", 20, 0.2), "DRAC-example_feldspar" = list( "DRAC-example", "Feldspar", "F", "AdamiecAitken1998", 2, 0.2, 8, 0.4, 1.75, 0.05, 0, 0, "Y", "X", "X", "X", "X", 12.5, 0.5, "X", "X", "N", "X", "X", "X", "X", "X", "X", "X", "X", "Y", 180, 212, "Bell1980", "Mejdahl1979", 0, 0, "Bell1979", 0.15, 0.05, 10, 3, 0.15, 0.02, 1.8, 0.1, 60, 100, 200, "X", "X", 15, 1.5), "DRAC-example_polymineral" = list("DRAC-example", "Polymineral", "PM", "AdamiecAitken1998", 4, 0.4, 12, 0.12, 0.83, 0.08, 0, 0, "Y", "X", "X", "X", "X", 12.5, 0.5, "X", "X", "N", "X", "X", 2.5, 0.15, "X", "X", "X", "X", "Y", 4, 11, "Bell1980", "Mejdahl1979", 0, 0, "Bell1979", 0.086, 0.0038, 10, 5, 0.2, 0.02, 1.8, 0.1, 46, 118, 200, 0.2, 0.1, 204.47, 2.69) ) n <- length(x[[1]]) for (i in 1:length(x)) x[[i]] <- rep(preset_list[[preset]][[i]], n) return(x) } Luminescence/R/get_Quote.R0000644000176200001440000001176614236146743015171 0ustar liggesusers#' Function to return essential quotes #' #' This function returns one of the collected essential quotes in the #' growing library. If called without any parameters, a random quote is #' returned. #' #' @param ID [character] (*optional*): quote ID to be returned. #' #' @param separated [logical] (*with default*): return result in separated form. #' #' @return Returns a character with quote and respective (false) author. #' #' @section Function version: 0.1.5 #' #' @author Quote credits: Michael Dietze, GFZ Potsdam (Germany), Sebastian Kreutzer, Geography & Earth Science, Aberystwyth University (United Kingdom), Dirk Mittelstraß, TU Dresden (Germany), Jakob Wallinga (Wageningen University, Netherlands) #' #' @examples #' #' ## ask for an arbitrary quote #' get_Quote() #' #' @md #' @export get_Quote <- function( ID, separated = FALSE ) { ## definition of the ever growing quote data set quotes <- rbind( c("Anonymous student hotel employee", "Let me double check this."), c("The ordinary reviewer", "I love it when a plan comes together."), c("A tunnelling electron", "God does not play dice."), c("Goldfinger", "You cannot get this machine better and cheaper than from us."), c("A PhD supervisor", "Live long and in prosper."), c("A PhD supervisor", "You are not depressive, you simply have a crappy life."), c("A trapped charge", "I want to break free."), c("The R-package Luminescence manual", "Call unto me, and I will answer thee, and will shew thee great things, and difficult, which thou knowest not."), c("A stimulated feldspar grain", "I'm so excited and I just can't hide it."), c("The true age", "How many roads..."), c("The undecided OSL component", "Should I stay or should I go?"), c("A fluvially transported quartz grain at night", "Always look at the bright side of life."), c("An arctic sediment outcrop", "Marmor, Stein und Eisen bricht..."), c("A common luminescence reader customer", "If anything can go wrong, it will."), c("A blue LED to a trapped electron", "Resistance is futile."), c("A trapped electron to a yellow LED", "Well, that's all you've got?"), c("A weathering rock", "Who wants to live forever?"), c("A new pIRIR derivative", "20,000 miles below the sea."), c("Robert Oppenheimer", "I want this thing to work by just pressing one button."), c("An arbitrary member of the CRAN team", "No shirt, no shoes, no service!"), c("Rubber mallet to steel cylinder", "Let's rock and roll."), c("A data import function", "Better late than never."), c("A luminescence lab staff member to its customer", "Tell me the age, I tell you the price."), c("The NSA", "O'zapft is."), c("The natural dose", "You only live once."), c("A Windows user", "An apple a day keeps the doctor away."), c("The authors of sTeve", "We love to entertain you."), c("Any arbitrary independent OSL device manufacturer", "Sure it will work, it was me who built it!"), c("Response to the reviewer", "You are right, it was just a guess."), c("An aliquot disc", "The answer [...] is: 48"), c("Push Pin", "Made of used sample carriers"), c("A motivated R-Team member", "We are doing this not just for statistical reasons, there is real science behind it!"), c("An unbiased reviewer", "The data is too poor to be published in QG, try a higher ranked journal."), c("R Team member, asked about statistical details", "No idea, I'm just here for the visualisation."), c("An arbitrary unexperienced RLum-user", "Little by little, the bird builds its nest."), c("The answer to life, the universe and everything", "get_rightAnswer()"), c("Der Tatortreiniger", "Dreck ist nur Materie am falschen Ort."), c("Die Ex vom Tatortreiniger", "Das Ziel ist im Weg."), c("Bright grain to dim grains", "I'm so shiny!"), c("Fast component to slow component", "Life is short!"), c("Fast component to slow component", "What are you waiting for?"), c("Violet photon to deep trap electron", "Today I'm kicking you out of your comfort zone!"), c("Deep trap electron to infrared photon", "Don't bother me, I need to rest."), c("A single grain", "I feel so lonley."), c("Luminescence data to Bayesian process", "Don't you ever touch me again."), c("Quartz grain to heating plate", "Go ahead, I need a phase change."), c("Photon to electron", "I am in charge!"), c("You cannot spell 'data analysis' without 'daily satan'"), c("Bright grain to dim grain", "Don't you get it?") ) ## Check input data if(missing(ID) == TRUE) { ID <- sample(x = seq(from = 1, to = nrow(quotes)), size = 1) } ## check for correct ID and generate qoute if(length(ID) < 1 | ID > nrow(quotes)) { quote.out <- "Sorry, but this was an impossible task!" } else { ## generate qoute(s) if(separated == FALSE) { quote.out <- paste(quotes[ID,1], ": '", quotes[ID,2], "'", sep = "") } else { quote.out <- quotes[ID,] } } ## return quotes return(quote.out) } Luminescence/R/analyse_Al2O3C_Measurement.R0000644000176200001440000005663214264017373020237 0ustar liggesusers#' @title Al2O3:C Passive Dosimeter Measurement Analysis #' #' @description The function provides the analysis routines for measurements on a #' FI lexsyg SMART reader using Al2O3:C chips according to Kreutzer et al., 2018 #' #' @details #' #' **Working with a travel dosimeter** #' #' The function allows to define particular aliquots as travel dosimeters. For example: #' `travel_dosimeter = c(1,3,5)` sets aliquots 1, 3 and 5 as travel dosimeters. These dose values #' of this dosimeters are combined and automatically subtracted from the obtained dose values #' of the other dosimeters. #' #' **Calculate TL dose** #' #' The argument `calculate_TL_dose` provides the possibility to experimentally calculate a TL-dose, #' i.e. an apparent dose value derived from the TL curve ratio. However, it should be noted that #' this value is only a fall back in case something went wrong during the measurement of the optical #' stimulation. The TL derived dose value is corrected for cross-talk and for the irradiation time, #' but not considered if a travel dosimeter is defined. #' #' Calculating the palaeodose is possible without **any TL** curve in the sequence! #' #' **Test parameters** #' #' `TL_peak_shift` [numeric] (default: `15`): #' #' Checks whether the TL peak shift is bigger > 15 K, indicating a problem with the #' thermal contact of the chip. #' #' `stimulation_power` [numeric] (default: `0.05`): #' #' So far available, information on the delivered optical stimulation are compared. Compared are #' the information from the first curves with all others. If the ratio differs more from #' unity than the defined by the threshold, a warning is returned. #' #' @param object [RLum.Analysis-class] (**required**): measurement input #' #' @param signal_integral [numeric] (*optional*): signal integral, used for the signal #' and the background. Example: `c(1:10)` for the first 10 channels. #' If nothing is provided the full range is used #' #' @param dose_points [numeric] (*with default*): #' vector with dose points, if dose points are repeated, only the general #' pattern needs to be provided. Default values follow the suggestions #' made by Kreutzer et al., 2018 #' #' @param recordType [character] (*with default*): input curve selection, which is passed to #' function [get_RLum]. To deactivate the automatic selection set the argument to `NULL` #' #' @param irradiation_time_correction [numeric] or [RLum.Results-class] (*optional*): #' information on the used irradiation time correction obtained by another experiments. #' I a `numeric` is provided it has to be of length two: mean, standard error #' #' @param calculate_TL_dose [logical] (*with default*): Enables/disables experimental dose estimation #' based on the TL curves. Taken is the ratio of the peak sums of each curves +/- 5 channels. #' #' @param cross_talk_correction [numeric] or [RLum.Results-class] (*optional*): #' information on the used irradiation time correction obtained by another experiments. #' If a `numeric` vector is provided it has to be of length three: #' mean, 2.5 % quantile, 97.5 % quantile. #' #' @param travel_dosimeter [numeric] (*optional*): specify the position of the travel dosimeter #' (so far measured a the same time). The dose of travel dosimeter will be subtracted from all #' other values. #' #' @param test_parameters [list] (*with default*): #' set test parameters. Supported parameters are: `TL_peak_shift` All input: [numeric] #' values, `NA` and `NULL` (s. Details) #' #' @param verbose [logical] (*with default*): #' enable/disable verbose mode #' #' @param plot [logical] (*with default*): enable/disable plot output, if `object` is of type [list], #' a [numeric] vector can be provided to limit the plot output to certain aliquots #' #' @param ... further arguments that can be passed to the plot output, supported are `norm`, `main`, `mtext`, #' `title` (for self-call mode to specify, e.g., sample names) #' #' @return Function returns results numerically and graphically: #' #' -----------------------------------\cr #' `[ NUMERICAL OUTPUT ]`\cr #' -----------------------------------\cr #' #' **`RLum.Results`**-object #' #' **slot:** **`@data`** #' #' \tabular{lll}{ #' **Element** \tab **Type** \tab **Description**\cr #' `$data` \tab `data.frame` \tab the estimated equivalent dose \cr #' `$data_table` \tab `data.frame` \tab full dose and signal table \cr #' `test_parameters` \tab `data.frame` \tab results with test parameters \cr #' `data_TDcorrected` \tab `data.frame` \tab travel dosimeter corrected results (only if TD was provided)\cr #' } #' #' *Note: If correction the irradiation time and the cross-talk correction method is used, the De #' values in the table `data` table are already corrected, i.e. if you want to get an uncorrected value, #' you can use the column `CT_CORRECTION` remove the correction* #' #'**slot:** **`@info`** #' #' The original function call #' #' ------------------------\cr #' `[ PLOT OUTPUT ]`\cr #' ------------------------\cr #' #' - OSL and TL curves, combined on two plots. #' #' #' @section Function version: 0.2.6 #' #' @author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [analyse_Al2O3C_ITC] #' #' @references #' #' Kreutzer, S., Martin, L., Guérin, G., Tribolo, C., Selva, P., Mercier, N., 2018. Environmental Dose Rate #' Determination Using a Passive Dosimeter: Techniques and Workflow for alpha-Al2O3:C Chips. #' Geochronometria 45, 56-67. #' #' @keywords datagen #' #' @examples #' ##load data #' data(ExampleData.Al2O3C, envir = environment()) #' #' ##run analysis #' analyse_Al2O3C_Measurement(data_CrossTalk) #' #' @md #' @export analyse_Al2O3C_Measurement <- function( object, signal_integral = NULL, dose_points = c(0,4), recordType = c("OSL (UVVIS)", "TL (UVVIS)"), calculate_TL_dose = FALSE, irradiation_time_correction = NULL, cross_talk_correction = NULL, travel_dosimeter = NULL, test_parameters = NULL, verbose = TRUE, plot = TRUE, ... ){ # Self call ----------------------------------------------------------------------------------- if(is(object, "list")){ if(!all(unlist(lapply(object, function(x){is(x, "RLum.Analysis")})))){ stop("[analyse_Al2O3C_Measurement()] The elements in 'object' are not all of type 'RLum.Analsyis'", call. = FALSE) } ##expand input arguments if(!is.null(signal_integral)){ signal_integral <- rep(list(signal_integral), length = length(object)) } ##dose points if(is(dose_points, "list")){ dose.points <- rep(dose_points, length = length(object)) }else{ dose_points <- rep(list(dose_points), length = length(object)) } ##irradiation time correction if(is(irradiation_time_correction, "list")){ irradiation_time_correction <- rep(irradiation_time_correction, length = length(object)) }else{ irradiation_time_correction <- rep(list(irradiation_time_correction), length = length(object)) } ##cross talk correction if(is( cross_talk_correction, "list")){ cross_talk_correction <- rep( cross_talk_correction, length = length(object)) }else{ cross_talk_correction <- rep(list( cross_talk_correction), length = length(object)) } ##test_parameters if(is(test_parameters[[1]], "list")){ test_parameters <- rep(test_parameters, length = length(object)) }else{ test_parameters <- rep(list(test_parameters), length = length(object)) } ##verbose ##plot if(is(plot, "logical")){ plot <- rep(x = plot, length(object)) }else{ plot <- 1:length(object)%in%plot } ##run analyis results <- lapply(1:length(object), function(x) { temp <- analyse_Al2O3C_Measurement( object = object[[x]], signal_integral = signal_integral[[x]], dose_points = dose_points[[x]], irradiation_time_correction = irradiation_time_correction[[x]], cross_talk_correction = cross_talk_correction[[x]], test_parameters = test_parameters[[x]], calculate_TL_dose = calculate_TL_dose, verbose = verbose, plot = plot[x], ... ) ##adjusting the terminal output, to avoid confusions if(verbose) cat(" ... (#",x, " | ALQ POS: ", temp$data$POSITION,")\n", sep = "") ##add running number to the plot, but only of we had a plot here... if(plot[x]){ title(main = paste0(list(...)$title[x], " ","#", x), adj = 1, line = 3) } return(temp) }) ##merge results results <- merge_RLum(results) ##correct sys.call, otherwise it gets a little bit strange ##why this is not implemented in the merge_RLum() method ... because here it would be wrong! results@info[names(results@info) == "call"] <- NULL results@info$call <- sys.call() ##travel dosimeter ##check for travel dosimeter and subtract the values so far this is meaningful at all if(!is.null(travel_dosimeter)){ ##check data type if(!is(travel_dosimeter, "numeric")) stop("[analyse_Al2O3C_Measurement()] Input for `travel_dosimeter` is not numeric!", call. = FALSE) ##check whether everything is subtracted from everything ... you never know, users do weird stuff if(length(travel_dosimeter) == nrow(results$data)) try(stop("[analyse_Al2O3C_Measurement()] You specified every position as travel dosimeter, nothing corrected!", call. = FALSE)) ##check if the position is valid if(!any(travel_dosimeter%in%results$data$POSITION)) try(stop("[analyse_Al2O3C_Measurement()] Invalid position in 'travel_dosimeter', nothing corrected!", call. = FALSE)) ##correct for the travel dosimeter calculating the weighted mean and the sd (as new error) ##if only one value is given just take it if(length(travel_dosimeter) == 1 && nrow(results$data[travel_dosimeter==results$data$POSITION,c(1,2)]) == 1){ correction <- as.numeric(results$data[travel_dosimeter==results$data$POSITION,c(1,2)]) }else{ temp.correction <- results$data[results$data$POSITION%in%travel_dosimeter,c(1,2)] correction <- c( stats::weighted.mean( x = temp.correction[[1]], w = if(all(temp.correction[[2]]==0)){rep(1, length(temp.correction[[2]]))} else {temp.correction[[2]]}), sd(temp.correction[,1])) rm(temp.correction) } ##subtract all the values, in a new data frame, we do not touch the original data data_TDcorrected <- data.frame( DE = results@data$data[!results$data$POSITION%in%travel_dosimeter,1] - correction[1], DE_ERROR = sqrt(results@data$data[!results$data$POSITION%in%travel_dosimeter,2]^2 + correction[2]^2), POSITION = results@data$data[!results$data$POSITION%in%travel_dosimeter, "POSITION"] ) ##however, we set information on the travel dosimeter in the corresponding column results@data$data$TRAVEL_DOSIMETER <- results$data$POSITION%in%travel_dosimeter ##attach the new element to the results output results@data <- c(results@data, list(data_TDcorrected = data_TDcorrected)) ##return message if(verbose) cat("\n ...+ travel dosimeter correction applied.\n ...+ results stored in object $data_TDcorrected.\n\n") } ##end travel dosimeter ##return results return(results) } # Integrity check --------------------------------------------------------------------------- ##TODO ... do more, push harder ##Add sufficient unit tests # Preparation --------------------------------------------------------------------------------- ##select curves based on the recordType selection; if not NULL if(!is.null(recordType)){ object_raw <- object object <- get_RLum(object, recordType = recordType, drop = FALSE) } ##set signal integral if(is.null(signal_integral)){ signal_integral <- c(1:nrow(object[[1]][])) }else{ ##check whether the input is valid, otherwise make it valid if(min(signal_integral) < 1 | max(signal_integral) > nrow(object[[1]][])){ signal_integral <- c(1:nrow(object[[1]][])) warning( paste0( "[analyse_Al2O3C_Measurement()] Input for 'signal_integral' corrected to 1:", nrow(object[[1]][]) ), call. = FALSE ) } } ## Set Irradiation Time Correction --------------- if (!is.null(irradiation_time_correction)) { if (is(irradiation_time_correction, "RLum.Results")) { if (irradiation_time_correction@originator == "analyse_Al2O3C_ITC") { irradiation_time_correction <- get_RLum(irradiation_time_correction) ##consider the case for more than one observation ... if(nrow(irradiation_time_correction)>1){ irradiation_time_correction <- c(mean(irradiation_time_correction[[1]]), sd(irradiation_time_correction[[1]])) }else{ irradiation_time_correction <- c(irradiation_time_correction[[1]], irradiation_time_correction[[2]]) } } else{ stop( "[analyse_Al2O3C_Measurement()] The object provided for the argument 'irradiation_time_correction' was created by an unsupported function!", call. = FALSE ) } } } ## Set Cross Talk Correction --------------- ##check wehther the information on the position was stored in the input ##object if(!is.null(get_RLum(object = object[[1]], info.object = "position"))){ POSITION <- get_RLum(object = object[[1]], info.object = "position") }else{ message("[analyse_Al2O3_Measurement()] Aliquot position number was not found. No cross talk correction was applied!") cross_talk_correction <- c(0,0,0) POSITION <- NA } if(is.null(cross_talk_correction)){ cross_talk_correction <- c(0,0,0) }else{ ##check whether the input is of type RLum.Results and check orignator if (is(cross_talk_correction, "RLum.Results") && cross_talk_correction@originator == "analyse_Al2O3C_CrossTalk") { ##grep cross talk correction and calculate values for ##this particular carousel position cross_talk_correction <- as.numeric(predict(cross_talk_correction$fit, newdata = data.frame(x = POSITION), interval = "confidence")) }else{ stop( "[analyse_Al2O3C_Measurement()] The object provided for the argument 'cross_talk_correction' was created by an unsupported function or has a wrong originator!", call. = FALSE ) } } # Calculation --------------------------------------------------------------------------------- ##we have two dose points, and one background curve, we do know only the 2nd dose ##set test parameters test_parameters.default <- list( TL_peak_shift = 15, stimulation_power = 0.05 ) ##modify default values by given input if(!is.null(test_parameters)){ test_parameters <- modifyList(test_parameters.default, test_parameters) ##remove NULL elements from list test_parameters <- test_parameters[!sapply(test_parameters, is.null)] }else{ test_parameters <- test_parameters.default } ##calculate integrated light values NATURAL <- sum(get_RLum(object, recordType = "OSL")[[1]]@data[signal_integral, 2]) REGENERATED <- sum(get_RLum(object, recordType = "OSL")[[2]]@data[signal_integral, 2]) BACKGROUND <- sum(get_RLum(object, recordType = "OSL")[[3]]@data[signal_integral, 2]) ##do the same for the TL if (calculate_TL_dose[1] && any(grepl("TL", names(object)))){ NATURAL_TL <- try(sum( object@records[[2]]@data[ (which.max(object@records[[2]]@data[,2])-5):(which.max(object@records[[2]]@data[,2])+5),2]), silent = TRUE) REGENERATED_TL <- try(sum( object@records[[4]]@data[ (which.max(object@records[[4]]@data[,2])-5):(which.max(object@records[[4]]@data[,2])+5),2]), silent = TRUE) ##catch errors if the integration fails if(inherits(NATURAL_TL, "try-error")){ NATURAL_TL <- NA warning("[analyse_Al2O3_Measurement()] Natural TL signal out of bounds, NA returned!", call. = FALSE, immediate. = TRUE) } if(inherits(REGENERATED_TL, "try-error")){ REGENERATED_TL <- NA warning("[analyse_Al2O3_Measurement()] Regenerated TL signal out of bounds, NA returned!", call. = FALSE, immediate. = TRUE) } }else{ NATURAL_TL <- NA REGENERATED_TL <- NA } ##combine into data.frame temp_df <- data.frame( POSITION = POSITION, DOSE = if(!is.null(irradiation_time_correction)){ dose_points + irradiation_time_correction[1] }else{ dose_points }, DOSE_ERROR = if(!is.null(irradiation_time_correction)){ dose_points * irradiation_time_correction[2]/irradiation_time_correction[1] }else{ 0 }, STEP = c("NATURAL", "REGENERATED"), INTEGRAL = c(NATURAL, REGENERATED), BACKGROUND = c(BACKGROUND, BACKGROUND), NET_INTEGRAL = c(NATURAL - BACKGROUND, REGENERATED - BACKGROUND), NATURAL_TL = NATURAL_TL, REGENERATED_TL = REGENERATED_TL, row.names = NULL ) ##0 dose points should not be biased by the correction .. ##Note: it does not mean that 0 s beneath the source has a dose of 0, however, in the certain ##case aliquot was never moved under the source id_zero <- which(dose_points == 0) temp_df$DOSE[id_zero] <- 0 temp_df$DOSE_ERROR[id_zero] <- 0 ##calculate DE by using the irradiation time correction AND the cross talk correction ##(1) sample dose point values with irradiation time corrections (random) if(!is.null(irradiation_time_correction)){ DOSE_MC <- rnorm(1000, mean = temp_df$DOSE[2], sd = temp_df$DOSE_ERROR[2]) }else{ DOSE_MC <- temp_df$DOSE[2] } ##(2) random sampling from cross-irradiation CT <- runif(1000, min = cross_talk_correction[2], max = cross_talk_correction[3]) ##(3) signal ratio INTEGRAL_RATIO <- temp_df$NET_INTEGRAL[1]/temp_df$NET_INTEGRAL[2] ##(4) calculate DE temp_DE <- (DOSE_MC * INTEGRAL_RATIO) ##(5) substract cross-talk value from DE temp_DE <- temp_DE - CT ##(5.1) calculate TL based DE ##calculate a dose based on TL ##Note: we use irradiation time correction and CT correction based on GSL measurements if(calculate_TL_dose){ TL_Ratio <- NATURAL_TL/REGENERATED_TL temp_TL_DE <- (DOSE_MC * TL_Ratio) - CT TL_DE <- mean(temp_TL_DE) TL_DE.ERROR <- sd(temp_TL_DE) }else{ TL_DE <- NA TL_DE.ERROR <- NA } ##(6) create final data.frame data <- data.frame( DE = mean(temp_DE), DE_ERROR = sd(temp_DE), POSITION, INTEGRAL_RATIO, TRAVEL_DOSIMETER = NA, CT_CORRECTION = cross_talk_correction[1], CT_CORRECTION_Q2.5 = cross_talk_correction[2], CT_CORRECTION_Q97.5 = cross_talk_correction[3], TL_DE = TL_DE, TL_DE.ERROR = TL_DE.ERROR, row.names = NULL ) ##calculate test parameters ##TL_peak_shift ##check TL peak positions, if it differers more than the threshold, return a message ##can be done better, but should be enough here. if (any("TL_peak_shift"%in%names(test_parameters)) && any(grepl("TL", names(object)))){ ##calculate value TP_TL_peak_shift.value <- abs((object[[2]][which.max(object[[2]][,2]),1] - object[[4]][which.max(object[[4]][,2]),1])) ##compare TP_TL_peak_shift.status <- TP_TL_peak_shift.value > test_parameters$TL_peak_shift ##return warning if(TP_TL_peak_shift.status) warning("TL peak shift detected for aliquot position ",POSITION, "! Check curves!", call. = FALSE) ##set data.frame TP_TL_peak_shift <- data.frame( CRITERIA = "TL_peak_shift", THRESHOLD = test_parameters$TL_peak_shift, VALUE = TP_TL_peak_shift.value, STATUS = TP_TL_peak_shift.status, stringsAsFactors = FALSE) }else{ TP_TL_peak_shift <- data.frame(stringsAsFactors = FALSE) } ##stimulation_power if(any("stimulation_power"%in%names(test_parameters))){ ##get curves ids holding the information on the stimulation power temp_curves_OSL <- get_RLum(object_raw, recordType = "OSL", curveType = "measured") temp_curves_OSL <- lapply(temp_curves_OSL, function(o){ if("stimulator"%in%names(o@info)){ if(grepl(o@info$stimulator, pattern = "LED", fixed = TRUE)){ return(o) } } return(NULL) }) ##remove NULL temp_curves_OSL <- temp_curves_OSL[!sapply(temp_curves_OSL, is.null)] ##check whether something is left if(length(temp_curves_OSL) < 2){ TP_stimulation_power.value <- NA TP_stimulation_power.status <- FALSE }else{ ##calculate sum of the power TP_stimulation_power.value <- vapply(temp_curves_OSL, function(x){ sum(x@data[,2]) }, numeric(1)) ##estimate a theoretical value based on the first value ... it does not ##matter which value is correct or not TP_stimulation_power.value <- abs(1 - sum(TP_stimulation_power.value)/(TP_stimulation_power.value[1] * length(TP_stimulation_power.value))) TP_stimulation_power.status <- TP_stimulation_power.value > test_parameters$stimulation_power if(TP_stimulation_power.status) warning("Stimulation power was not stable for ALQ ",POSITION, "! Results are likely to be wrong!", call. = FALSE) } ##remove object rm(temp_curves_OSL) ##set data.frame TP_stimulation_power <- data.frame( CRITERIA = "stimulation_power", THRESHOLD = test_parameters$stimulation_power, VALUE = TP_stimulation_power.value, STATUS = TP_stimulation_power.status, stringsAsFactors = FALSE) }else{ TP_stimulation_power <- data.frame(stringsAsFactors = FALSE) } ##compile all test parameter df df_test_parameters <- rbind( TP_TL_peak_shift, TP_stimulation_power) # Terminal output ----------------------------------------------------------------------------- if(verbose){ cat(" [analyse_Al2O3_Measurement()] #",POSITION, " ", "DE: ", round(data$DE, 2), " \u00B1 ", round(data$DE_ERROR,2), "\n", sep = "") } # Plotting ------------------------------------------------------------------------------------ ##enable or disable plot ... we cannot put the condition higher, because we here ##calculate something we are going to need later if (plot) { ##get plot settings par.default <- par()$mfrow on.exit(par(mfrow = par.default)) ##settings plot_settings <- list( main = c(paste("ALQ POS:", POSITION, "| OSL"), paste("ALQ POS:", POSITION, "| TL")), norm = TRUE, mtext = "" ) ##modify on request plot_settings <- modifyList(x = plot_settings, val = list(...),) ##plot curves if(any(grepl("TL", names(object)))) par(mfrow = c(1,2)) plot_RLum( object, plot.single = TRUE, combine = TRUE, mtext = list(paste0("DE: ", round(data$DE,2), " \u00b1 ", round(data$DE_ERROR,2)), ""), xlab = list("Simulation [s]", "Temperature [\u00B0C]"), legend.text = list(list("#1 NAT", "#3 REG", "#5 BG"), list("#2 NAT", "#4 REG")), legend.pos = list("topright", "topleft"), main = as.list(plot_settings$main), norm = plot_settings$norm ) } # Output -------------------------------------------------------------------------------------- UID <- create_UID() output <- set_RLum( class = "RLum.Results", data = list( data = cbind(data, UID), data_table = cbind(temp_df, UID), test_parameters = cbind(df_test_parameters, UID) ), info = list( call = sys.call() ) ) } Luminescence/R/read_BIN2R.R0000644000176200001440000013275114367174002015034 0ustar liggesusers#' Import Risø BIN/BINX-files into R #' #' Import a *.bin or a *.binx file produced by a Risø DA15 and DA20 TL/OSL #' reader into R. #' #' The binary data file is parsed byte by byte following the data structure #' published in the Appendices of the Analyst manual p. 42. #' #' For the general BIN/BINX-file structure, the reader is referred to the #' Risø website: [https://www.fysik.dtu.dk]() #' #' @param file [character] or [list] (**required**): path and file name of the #' BIN/BINX file (URLs are supported). If input is a `list` it should comprise #' only `character`s representing each valid path and BIN/BINX-file names. #' Alternatively the input character can be just a directory (path), in this case the #' the function tries to detect and import all BIN/BINX files found in the directory. #' #' @param show.raw.values [logical] (*with default*): #' shows raw values from BIN-file for `LTYPE`, `DTYPE` and `LIGHTSOURCE` without #' translation in characters. Can be provided as `list` if `file` is a `list`. #' #' @param n.records [raw] (*optional*): #' limits the number of imported records. Can be used in combination with #' `show.record.number` for debugging purposes, e.g. corrupt BIN-files. #' Can be provided as `list` if `file` is a `list`. #' #' @param zero_data.rm [logical] (*with default*): #' remove erroneous data with no count values. As such data are usually not #' needed for the subsequent data analysis they will be removed by default. #' Can be provided as `list` if `file` is a `list`. #' #' @param duplicated.rm [logical] (*with default*): #' remove duplicated entries if `TRUE`. This may happen due to an erroneous #' produced BIN/BINX-file. This option compares only predecessor and successor. #' Can be provided as `list` if `file` is a `list`. #' #' @param position [numeric] (*optional*): #' imports only the selected position. Note: the import performance will not #' benefit by any selection made here. #' Can be provided as `list` if `file` is a `list`. #' #' @param fastForward [logical] (*with default*): #' if `TRUE` for a more efficient data processing only a list of `RLum.Analysis` #' objects is returned instead of a [Risoe.BINfileData-class] object. #' Can be provided as `list` if `file` is a `list`. #' #' @param show.record.number [logical] (*with default*): #' shows record number of the imported record, for debugging usage only. #' Can be provided as `list` if `file` is a `list`. #' #' @param txtProgressBar [logical] (*with default*): #' enables or disables [txtProgressBar]. #' #' @param forced.VersionNumber [integer] (*optional*): #' allows to cheat the version number check in the function by own values for #' cases where the BIN-file version is not supported. #' Can be provided as `list` if `file` is a `list`. #' #' **Note:** The usage is at own risk, only supported BIN-file versions have been tested. #' #' @param ignore.RECTYPE [logical] (*with default*): #' this argument allows to ignore values in the byte 'RECTYPE' (BIN-file version 08), #' in case there are not documented or faulty set. In this case the corrupted records are skipped. #' #' @param pattern [character] (*optional*): #' argument that is used if only a path is provided. The argument will than be #' passed to the function [list.files] used internally to construct a `list` #' of wanted files #' #' @param verbose [logical] (*with default*): #' enables or disables verbose mode #' #' @param ... further arguments that will be passed to the function #' [Risoe.BINfileData2RLum.Analysis]. Please note that any matching argument #' automatically sets `fastForward = TRUE` #' #' @return #' Returns an S4 [Risoe.BINfileData-class] object containing two #' slots: #' #' \item{METADATA}{A [data.frame] containing all variables stored in the BIN-file.} #' \item{DATA}{A [list] containing a numeric [vector] of the measured data. #' The ID corresponds to the record ID in METADATA.} #' #' If `fastForward = TRUE` a list of [RLum.Analysis-class] object is returned. The #' internal coercing is done using the function [Risoe.BINfileData2RLum.Analysis] #' #' @note #' The function works for BIN/BINX-format versions 03, 04, 05, 06, 07 and 08. The #' version number depends on the used Sequence Editor. #' #' **ROI data sets introduced with BIN-file version 8 are not supported and skipped during import.** #' #' @section Function version: 0.16.7 #' #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany)\cr #' Margret C. Fuchs, HZDR Freiberg, (Germany) \cr #' based on information provided by Torben Lapp and Karsten Bracht Nielsen (Risø DTU, Denmark) #' #' #' @seealso [write_R2BIN], [Risoe.BINfileData-class], #' [base::readBin], [merge_Risoe.BINfileData], [RLum.Analysis-class] #' [utils::txtProgressBar], [list.files] #' #' #'@references #'DTU Nutech, 2016. The Sequence Editor, Users Manual, February, 2016. #'[https://www.fysik.dtu.dk]() #' #' #'@keywords IO #' #'@examples #' #'file <- system.file("extdata/BINfile_V8.binx", package = "Luminescence") #'temp <- read_BIN2R(file) #'temp #' #' @md #' @export read_BIN2R <- function( file, show.raw.values = FALSE, position = NULL, n.records = NULL, zero_data.rm = TRUE, duplicated.rm = FALSE, fastForward = FALSE, show.record.number = FALSE, txtProgressBar = TRUE, forced.VersionNumber = NULL, ignore.RECTYPE = FALSE, pattern = NULL, verbose = TRUE, ... ){ # Self Call ----------------------------------------------------------------------------------- # Option (a): Input is a list, every element in the list will be treated as file connection # with that many file can be read in at the same time # Option (b): The input is just a path, the function tries to grep ALL BIN/BINX files in the # directory and import them, if this is detected, we proceed as list if (is.character(file)) { if (is.null(pattern)) { ##If this is not really a path we skip this here if (all(dir.exists(file)) & length(dir(file)) > 0) { if (verbose) { cat( "[read_BIN2R()] Directory detected, trying to extract '*.bin'/'*.binx' files ...\n" ) } ##get files file <- as.list(list.files( path = file, recursive = FALSE, pattern = "\\.bin*", full.names = TRUE, ignore.case = TRUE)) } }else if(dir.exists(file)){ file <- as.list(list.files(file, pattern = pattern, full.names = TRUE, recursive = TRUE)) } } if (is.list(file)) { ##extend list of parameters ##position position <- if(is.list(position)){ rep(position, length = length(file)) }else{ rep(list(position), length = length(file)) } ##n.records n.records <- if(is.list(n.records)){ rep(n.records, length = length(file)) }else{ rep(list(n.records), length = length(file)) } ##zero_data.rm zero_data.rm<- if(is.list(zero_data.rm)){ rep(zero_data.rm, length = length(file)) }else{ rep(list(zero_data.rm), length = length(file)) } ##duplicated.rm duplicated.rm <- if(is.list(duplicated.rm)){ rep(duplicated.rm, length = length(file)) }else{ rep(list(duplicated.rm), length = length(file)) } ## show.raw.values show.raw.values <- if(is.list(show.raw.values)){ rep( show.raw.values, length = length(file)) }else{ rep(list( show.raw.values), length = length(file)) } ## show.record.number show.record.number <- if(is.list(show.record.number)){ rep(show.record.number, length = length(file)) }else{ rep(list(show.record.number), length = length(file)) } ##forced.VersionNumber forced.VersionNumber <- if(is.list(forced.VersionNumber)){ rep(forced.VersionNumber, length = length(file)) }else{ rep(list(forced.VersionNumber), length = length(file)) } temp.return <- lapply(1:length(file), function(x) { temp <- read_BIN2R( file = file[[x]], fastForward = fastForward, position = position[[x]], n.records = n.records[[x]], duplicated.rm = duplicated.rm[[x]], show.raw.values = show.raw.values[[x]], show.record.number = show.record.number[[x]], txtProgressBar = txtProgressBar, forced.VersionNumber = forced.VersionNumber[[x]], verbose = verbose, ... ) }) ##return if (fastForward) { return(unlist(temp.return, recursive = FALSE)) }else{ return(temp.return) } } # Config -------------------------------------------------------------------------------------- ##set file_link for internet downloads file_link <- NULL on_exit <- function(){ ##unlink internet connection if(!is.null(file_link)){ unlink(file_link) } ##close connection if(exists("con") && !is.null(con)){ close(con) } } on.exit(expr = on_exit()) # Integrity checks ------------------------------------------------------ ##check if file exists if(!file.exists(file)){ ##check whether the file as an URL if(grepl(pattern = "http", x = file, fixed = TRUE)){ if(verbose){ cat("[read_BIN2R()] URL detected, checking connection ... ") } ##check URL if(!httr::http_error(file)){ if(verbose) cat("OK") ##download file file_link <- tempfile("read_BIN2R_FILE") utils::download.file( url = file, destfile = file_link, quiet = if(verbose) FALSE else TRUE, mode = "wb", cacheOK = FALSE) }else{ cat("FAILED") con <- NULL stop("[read_BIN2R()] File does not exist!", call. = FALSE) } }else{ con <- NULL stop("[read_BIN2R()] File does not exist!", call. = FALSE) } } ##check if file is a BIN or BINX file if(!(TRUE%in%(c("BIN", "BINX", "bin", "binx")%in%sub(pattern = "%20", replacement = "", x = tail( unlist(strsplit(file, split = "\\.")), n = 1), fixed = TRUE)))){ try( stop( paste0("[read_BIN2R()] '", file,"' is not a file or not of type 'BIN' or 'BINX'! Skipped!"), call. = FALSE)) con <- NULL return(NULL) } ##set correct file name of file_link was set if(!is.null(file_link)) file <- file_link # Config ------------------------------------------------------------------ ##set supported BIN format version VERSION.supported <- as.raw(c(03, 04, 05, 06, 07, 08)) # Short file parsing to get number of records ------------------------------------------------- #open connection con <- file(file, "rb") ##get information about file size file.size <- file.info(file) ##skip if zero-byte if(file.size$size == 0){ message(paste0("[read_BIN2R()] ", basename(file)," is a zero-byte file, skipped!")) return(NULL) } ##read data up to the end of con ##set ID temp.ID <- 0 ##start for BIN-file check up while(length(temp.VERSION<-readBin(con, what="raw", 1, size=1, endian="little"))>0) { ##force version number if(!is.null(forced.VersionNumber)){ temp.VERSION <- as.raw(forced.VersionNumber) } ##stop input if wrong VERSION if((temp.VERSION%in%VERSION.supported) == FALSE){ if(temp.ID > 0){ if(is.null(n.records)){ warning(paste0("[read_BIN2R()] BIN-file appears to be corrupt. Import limited to the first ", temp.ID," record(s)."), call. = FALSE) }else{ warning(paste0("[read_BIN2R()] BIN-file appears to be corrupt. 'n.records' reset to ", temp.ID,"."), call. = FALSE) } ##set or reset n.records n.records <- temp.ID break() }else{ ##show error message error.text <- paste("[read_BIN2R()] Found BIN/BINX-format version (",temp.VERSION,") is not supported or the BIN/BINX-file is broken! Supported version numbers are: ",paste(VERSION.supported,collapse=", "),".",sep="") ##show error stop(error.text, call. = FALSE) } } #empty byte position EMPTY<-readBin(con, what = "raw", 1, size = 1, endian = "little") if(temp.VERSION == 06 | temp.VERSION == 07 | temp.VERSION == 08){ ##GET record LENGTH temp.LENGTH <- readBin(con, what="int", 1, size=4, endian="little") STEPPING <- readBin(con, what="raw", temp.LENGTH-6, size=1, endian="little") }else{ ##GET record LENGTH temp.LENGTH <- readBin(con, what="int", 1, size=2, endian="little") STEPPING <- readBin(con, what="raw", temp.LENGTH-4, size=1, endian="little") } temp.ID<-temp.ID+1 if(!is.null(n.records) && temp.ID == n.records){ break() } } ##set n.records if(is.null(n.records)){ n.records <- temp.ID } rm(temp.ID) close(con) ##we have to close the connection here # Set Lookup tables -------------------------------------------------------------------------- ##LTYPE LTYPE.lookup <- c( "0" = "TL", "1" = "OSL", "2" = "IRSL", "3" = "M-IR", "4" = "M-VIS", "5" = "TOL", "6" = "TRPOSL", "7" = "RIR", "8" = "RBR", "9" = "USER", "10" = "POSL", "11" = "SGOSL", "12" = "RL", "13" = "XRF" ) ##DTYPE DTYPE.lookup <- c( "0" = "Natural", "1" = "N+dose", "2" = "Bleach", "3" = "Bleach+dose", "4" = "Natural (Bleach)", "5" = "N+dose (Bleach)", "6" = "Dose", "7" = "Background" ) ##LIGHTSOURCE LIGHTSOURCE.lookup <- c( "0" = "None", "1" = "Lamp", "2" = "IR diodes/IR Laser", "3" = "Calibration LED", "4" = "Blue Diodes", "5" = "White light", "6" = "Green laser (single grain)", "7" = "IR laser (single grain)" ) ##PRESET VALUES temp.CURVENO <- NA temp.FNAME <- NA temp.MEASTEMP <- NA temp.IRR_UNIT <- NA temp.IRR_DOSERATE <- NA temp.IRR_DOSERATEERR <- NA temp.TIMESINCEIRR <- NA temp.TIMETICK <- NA temp.ONTIME <- NA temp.OFFTIME <- NA temp.STIMPERIOD <- NA temp.GATE_ENABLED <- raw(length = 1) temp.ENABLE_FLAGS <- raw(length = 1) temp.GATE_START <- NA temp.GATE_STOP <- NA temp.GATE_END <- NA temp.PTENABLED <- raw(length = 1) temp.DTENABLED <- raw(length = 1) temp.DEADTIME <- NA temp.MAXLPOWER <- NA temp.XRF_ACQTIME <- NA temp.XRF_HV <- NA temp.XRF_CURR <- NA temp.XRF_DEADTIMEF <- NA temp.DETECTOR_ID <- NA temp.LOWERFILTER_ID <- NA temp.UPPERFILTER_ID <- NA temp.ENOISEFACTOR <- NA temp.SEQUENCE <- NA temp.GRAIN <- NA temp.GRAINNUMBER <- NA temp.LIGHTPOWER <- NA temp.LPOWER <- NA temp.RECTYPE <- 0 temp.MARKPOS_X1 <- NA temp.MARKPOS_Y1 <- NA temp.MARKPOS_X2 <- NA temp.MARKPOS_Y2 <- NA temp.MARKPOS_X3 <- NA temp.MARKPOS_Y3 <- NA temp.EXTR_START <- NA temp.EXTR_END <- NA ##SET length of entire record n.length <- n.records ##initialise data.frame results.METADATA <- data.table::data.table( ##1 to 7 ID = integer(length = n.length), SEL = logical(length = n.length), VERSION = numeric(length = n.length), LENGTH = integer(length = n.length), PREVIOUS = integer(length = n.length), NPOINTS = integer(length = n.length), RECTYPE = integer(length = n.length), #8 to 17 RUN = integer(length = n.length), SET = integer(length = n.length), POSITION = integer(length = n.length), GRAIN = integer(length = n.length), GRAINNUMBER = integer(length = n.length), CURVENO = integer(length = n.length), XCOORD = integer(length = n.length), YCOORD = integer(length = n.length), SAMPLE = character(length = n.length), COMMENT = character(length = n.length), #18 to 22 SYSTEMID = integer(length = n.length), FNAME = character(length = n.length), USER = character(length = n.length), TIME = character(length = n.length), DATE = character(length = n.length), ##23 to 31 DTYPE = character(length = n.length), BL_TIME = numeric(length = n.length), BL_UNIT = integer(length = n.length), NORM1 = numeric(length = n.length), NORM2 = numeric(length = n.length), NORM3 = numeric(length = n.length), BG = numeric(length = n.length), SHIFT = integer(length = n.length), TAG = integer(length = n.length), ##32 to 67 LTYPE = character(length = n.length), LIGHTSOURCE = character(length = n.length), LPOWER = numeric(length = n.length), LIGHTPOWER = numeric(length = n.length), LOW = numeric(length = n.length), HIGH = numeric(length = n.length), RATE = numeric(length = n.length), TEMPERATURE = numeric(length = n.length), MEASTEMP = numeric(length = n.length), AN_TEMP = numeric(length = n.length), AN_TIME = numeric(length = n.length), TOLDELAY = integer(length = n.length), TOLON = integer(length = n.length), TOLOFF = integer(length = n.length), IRR_TIME = numeric(length = n.length), IRR_TYPE = integer(length = n.length), IRR_UNIT = integer(length = n.length), IRR_DOSERATE = numeric(length = n.length), IRR_DOSERATEERR = numeric(length = n.length), TIMESINCEIRR = numeric(length = n.length), TIMETICK = numeric(length = n.length), ONTIME = numeric(length = n.length), OFFTIME = numeric(length = n.length), STIMPERIOD = integer(length = n.length), GATE_ENABLED = numeric(length = n.length), ENABLE_FLAGS = numeric(length = n.length), GATE_START = numeric(length = n.length), GATE_STOP = numeric(length = n.length), PTENABLED = numeric(length = n.length), DTENABLED = numeric(length = n.length), DEADTIME = numeric(length = n.length), MAXLPOWER = numeric(length = n.length), XRF_ACQTIME = numeric(length = n.length), XRF_HV = numeric(length = n.length), XRF_CURR = numeric(length = n.length), XRF_DEADTIMEF = numeric(length = n.length), #68 to 79 DETECTOR_ID = integer(length = n.length), LOWERFILTER_ID = integer(length = n.length), UPPERFILTER_ID = integer(length = n.length), ENOISEFACTOR = numeric(length = n.length), MARKPOS_X1 = numeric(length = n.length), MARKPOS_Y1 = numeric(length = n.length), MARKPOS_X2 = numeric(length = n.length), MARKPOS_Y2 = numeric(length = n.length), MARKPOS_X3 = numeric(length = n.length), MARKPOS_Y3 = numeric(length = n.length), EXTR_START = numeric(length = n.length), EXTR_END = numeric(length = n.length), ##80 SEQUENCE = character(length = n.length) ) #end set data table #set variable for DPOINTS handling results.DATA <- list() ##set list for RESERVED values results.RESERVED <- rep(list(list()), n.length) # Open Connection --------------------------------------------------------- ##show warning if version number check has been cheated if(!is.null(forced.VersionNumber)){ warning("Argument 'forced.VersionNumber' has been used. BIN-file version might be not supported!") } #open connection con <- file(file, "rb") ##get information about file size file.size<-file.info(file) ##output if(verbose){cat(paste("\n[read_BIN2R()]\n\t >> ",file,sep=""), fill=TRUE)} ##set progress bar if(txtProgressBar & verbose){ pb <- txtProgressBar(min=0 ,max = file.size$size, char="=", style=3) } ##read data up to the end of con ##set ID temp.ID <- 0 # LOOP -------------------------------------------------------------------- ##start loop for import BIN data while(length(temp.VERSION <- readBin(con, what="raw", 1, size=1, endian="little"))>0) { ##force version number if(!is.null(forced.VersionNumber)){ temp.VERSION <- as.raw(forced.VersionNumber) } ##stop input if wrong VERSION if((temp.VERSION%in%VERSION.supported) == FALSE){ ##show error message error.text <- paste("[read_BIN2R()] BIN-format version (",temp.VERSION,") of this file is currently not supported! Supported version numbers are: ",paste(VERSION.supported,collapse=", "),".",sep="") stop(error.text, call. = FALSE) } ##print record ID for debugging purposes if(verbose){ if(show.record.number == TRUE){ cat(temp.ID,",", sep = "") if(temp.ID%%10==0){ cat("\n") } } } #empty byte position EMPTY <- readBin(con, what="raw", 1, size=1, endian="little") # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # BINX FORMAT SUPPORT ----------------------------------------------------- if(temp.VERSION == 05 | temp.VERSION == 06 | temp.VERSION == 07 | temp.VERSION == 08){ ##(1) Header size and structure ##LENGTH, PREVIOUS, NPOINTS, LTYPE temp <- readBin(con, what="int", 3, size=4, endian="little") temp.LENGTH <- temp[1] temp.PREVIOUS <- temp[2] temp.NPOINTS <- temp[3] #for temp.VERSION == 08 #RECTYPE if(temp.VERSION == 08){ temp.RECTYPE <- readBin(con, what="int", 1, size=1, endian="little", signed = FALSE) if(temp.RECTYPE != 0 & temp.RECTYPE != 1){ ##jump to the next record by stepping the record length minus the already read bytes STEPPING <- readBin(con, what = "raw", size = 1, n = temp.LENGTH - 15) if(temp.RECTYPE == 128){ warning(paste0("[read_BIN2R()] ROI definition in data set #",temp.ID+1, " detected, but currently not supported, record skipped!"), call. = FALSE) }else{ if(!ignore.RECTYPE){ stop( paste0("[read_BIN2R()] Byte RECTYPE = ",temp.RECTYPE," is not supported in record #",temp.ID+1,"! Check your BIN/BINX file!"), call. = FALSE) }else{ if(verbose) cat(paste0("\n[read_BIN2R()] Byte RECTYPE = ",temp.RECTYPE," is not supported in record #",temp.ID+1,", record skipped!")) } } ## update and jump to next record, to avoid further trouble ## we set the VERSION to NA and remove it later, otherwise we ## break expected functionality temp.ID <- temp.ID + 1 results.METADATA[temp.ID,`:=` (VERSION = NA)] next() } } ##(2) Sample characteristics ##RUN, SET, POSITION, GRAINNUMBER, CURVENO, XCOORD, YCOORD temp <- readBin(con, what="int", 7, size=2, endian="little") temp.RUN <- temp[1] temp.SET <- temp[2] temp.POSITION <- temp[3] temp.GRAINNUMBER <- temp[4] temp.CURVENO <- temp[5] temp.XCOORD <- temp[6] temp.YCOORD <- temp[7] ##SAMPLE, COMMENT ##SAMPLE SAMPLE_SIZE<-readBin(con, what="int", 1, size=1, endian="little") temp.SAMPLE<-readChar(con, SAMPLE_SIZE, useBytes=TRUE) #however it should be set to 20 #step forward in con if(20-c(SAMPLE_SIZE)>0){ STEPPING<-readBin(con, what="raw", (20-c(SAMPLE_SIZE)), size=1, endian="little") } ##COMMENT COMMENT_SIZE<-readBin(con, what="int", 1, size=1, endian="little") temp.COMMENT<-readChar(con, COMMENT_SIZE, useBytes=TRUE) #set to 80 (manual) #step forward in con if(80-c(COMMENT_SIZE)>0){ STEPPING<-readBin(con, what="raw", (80-c(COMMENT_SIZE)), size=1, endian="little") } ##(3) Instrument and sequence characteristic ##SYSTEMID temp.SYSTEMID <- readBin(con, what="int", 1, size=2, endian="little") ##FNAME FNAME_SIZE<-readBin(con, what="int", 1, size=1, endian="little") ##correct for 0 file name length if(length(FNAME_SIZE)>0){ temp.FNAME<-readChar(con, FNAME_SIZE, useBytes=TRUE) #set to 100 (manual) }else{ FNAME_SIZE <- 0 } #step forward in con if(100-c(FNAME_SIZE)>0){ STEPPING<-readBin(con, what="raw", (100-c(FNAME_SIZE)), size=1, endian="little") } ##USER USER_SIZE<-readBin(con, what="int", 1, size=1, endian="little") ##correct for 0 user size length if (length(USER_SIZE) > 0) { temp.USER <- readChar(con, USER_SIZE, useBytes = TRUE) #set to 30 (manual) }else{ USER_SIZE <- 0 } #step forward in con if(30-c(USER_SIZE)>0){ STEPPING<-readBin(con, what="raw", (30-c(USER_SIZE)), size=1, endian="little") } ##TIME TIME_SIZE <- readBin(con, what="int", 1, size=1, endian="little") ##time size corrections for wrong time formats; set n to 6 for all values ##accoording the handbook by Geoff Duller, 2007 if(length(TIME_SIZE)>0){ temp.TIME<-readChar(con, TIME_SIZE, useBytes=TRUE) ##correct the mess by others if(nchar(temp.TIME) == 5) temp.TIME <- paste(c("0", temp.TIME), collapse = "") }else{ TIME_SIZE <- 0 } if(6-TIME_SIZE>0){ STEPPING<-readBin(con, what="raw", (6-TIME_SIZE), size=1, endian="little") } ##DATE DATE_SIZE<-readBin(con, what="int", 1, size=1, endian="little") ##date size corrections for wrong date formats; set n to 6 for all values ##according the handbook of Geoff Duller, 2007 DATE_SIZE<-6 temp.DATE <- readChar(con, DATE_SIZE, useBytes = TRUE) ##(4) Analysis ##DTYPE temp.DTYPE<-readBin(con, what="int", 1, size=1, endian="little") ##BL_TIME temp.BL_TIME<-readBin(con, what="double", 1, size=4, endian="little") ##BL_UNIT temp.BL_UNIT<-readBin(con, what="int", 1, size=1, endian="little") ##NORM1, NORM2, NORM3, BG temp <- readBin(con, what="double", 4, size=4, endian="little") temp.NORM1 <- temp[1] temp.NORM2 <- temp[2] temp.NORM3 <- temp[3] temp.BG <- temp[4] ##SHIFT temp.SHIFT<- readBin(con, what="integer", 1, size=2, endian="little") ##TAG temp.TAG <- readBin(con, what="int", 1, size=1, endian="little") ##RESERVED temp.RESERVED1 <-readBin(con, what="raw", 20, size=1, endian="little") ##(5) Measurement characteristics ##LTYPE temp.LTYPE <- readBin(con, what="int", 1, size=1, endian="little") ##LTYPESOURCE temp.LIGHTSOURCE <- readBin(con, what="int", 1, size=1, endian="little") ##LIGHTPOWER, LOW, HIGH, RATE temp <- readBin(con, what="double", 4, size=4, endian="little") temp.LIGHTPOWER <- temp[1] temp.LOW <- temp[2] temp.HIGH <- temp[3] temp.RATE <- temp[4] ##TEMPERATURE temp.TEMPERATURE <- readBin(con, what="int", 1, size=2, endian="little") ##MEASTEMP temp.MEASTEMP <- readBin(con, what="integer", 1, size=2, endian="little") ##AN_TEMP temp.AN_TEMP <- readBin(con, what="double", 1, size=4, endian="little") ##AN_TIME temp.AN_TIME <- readBin(con, what="double", 1, size=4, endian="little") ##DELAY, ON, OFF temp <- readBin(con, what="int", 3, size=2, endian="little") temp.TOLDELAY <- temp[1] temp.TOLON <- temp[2] temp.TOLOFF <- temp[3] ##IRR_TIME temp.IRR_TIME <- readBin(con, what="double", 1, size=4, endian="little") ##IRR_TYPE temp.IRR_TYPE <- readBin(con, what="int", 1, size=1, endian="little") ##IRR_DOSERATE temp.IRR_DOSERATE <- readBin(con, what="double", 1, size=4, endian="little") ##IRR_DOSERATEERR if(temp.VERSION != 05) temp.IRR_DOSERATEERR <- readBin(con, what="double", 1, size=4, endian="little") ##TIMESINCEIRR temp.TIMESINCEIRR <- readBin(con, what="integer", 1, size=4, endian="little") ##TIMETICK temp.TIMETICK <- readBin(con, what="double", 1, size=4, endian="little") ##ONTIME temp.ONTIME <- readBin(con, what="integer", 1, size=4, endian="little") ##STIMPERIOD temp.STIMPERIOD <- readBin(con, what="integer", 1, size=4, endian="little") ##GATE_ENABLED temp.GATE_ENABLED <- readBin(con, what="raw", 1, size=1, endian="little") ##GATE_START temp.GATE_START <- readBin(con, what="integer", 1, size=4, endian="little") ##GATE_STOP temp.GATE_STOP <- readBin(con, what="integer", 1, size=4, endian="little") ##PTENABLED temp.PTENABLED <- readBin(con, what="raw", 1, size=1, endian="little") ##DTENABLED temp.DTENABLED <- readBin(con, what="raw", 1, size=1, endian="little") ##DEADTIME, MAXLPOWER, XRF_ACQTIME, XRF_HV temp <- readBin(con, what="double", 4, size=4, endian="little") temp.DEADTIME <- temp[1] temp.MAXLPOWER <- temp[2] temp.XRF_ACQTIME <- temp[3] temp.XRF_HV <- temp[4] ##XRF_CURR temp.XRF_CURR <- readBin(con, what="integer", 1, size=4, endian="little") ##XRF_DEADTIMEF temp.XRF_DEADTIMEF <- readBin(con, what="double", 1, size=4, endian="little") ###Account for differences between V5, V6 and V7 if(temp.VERSION == 06){ ##RESERVED temp.RESERVED2<-readBin(con, what="raw", 24, size=1, endian="little") }else if(temp.VERSION == 05){ ##RESERVED temp.RESERVED2<-readBin(con, what="raw", 4, size=1, endian="little") }else{ ##DETECTOR_ID temp.DETECTOR_ID <- readBin(con, what="int", 1, size=1, endian="little") ##LOWERFILTER_ID, UPPERFILTER_ID temp <- readBin(con, what="int", 2, size=2, endian="little") temp.LOWERFILTER_ID <- temp[1] temp.UPPERFILTER_ID <- temp[2] ##ENOISEFACTOR temp.ENOISEFACTOR <- readBin(con, what="double", 1, size=4, endian="little") ##CHECK FOR VERSION 08 if(temp.VERSION == 07){ ##RESERVED for version 07 temp.RESERVED2<-readBin(con, what="raw", 15, size=1, endian="little") }else{ ##MARKER_POSITION temp <- readBin(con, what="double", 6, size=4, endian="little") temp.MARPOS_X1 <- temp[1] temp.MARPOS_Y1 <- temp[2] temp.MARPOS_X2 <- temp[3] temp.MARPOS_Y2 <- temp[4] temp.MARPOS_X3 <- temp[5] temp.MARPOS_Y3 <- temp[6] ###EXTR_START, EXTR_END temp <- readBin(con, what="double", 2, size=4, endian="little") temp.EXTR_START <- temp[1] temp.EXTR_END <- temp[2] temp.RESERVED2<-readBin(con, what="raw", 42, size=1, endian="little") } } #DPOINTS temp.DPOINTS<-readBin(con, what="integer", temp.NPOINTS, size=4, endian="little") }else if(temp.VERSION == 04 | temp.VERSION == 03){ ## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ##START BIN FILE FORMAT SUPPORT (vers. 03 and 04) ##LENGTH, PREVIOUS, NPOINTS, LTYPE temp <- readBin(con, what="int", 3, size=2, endian="little") temp.LENGTH <- temp[1] temp.PREVIOUS <- temp[2] temp.NPOINTS <- temp[3] ##LTYPE temp.LTYPE<-readBin(con, what="int", 1, size=1, endian="little") ##LOW, HIGH, RATE temp <- readBin(con, what="double", 3, size=4, endian="little") temp.LOW <- temp[1] temp.HIGH <- temp[2] temp.RATE <- temp[3] temp.TEMPERATURE<-readBin(con, what="integer", 1, size=2, endian="little") ##XCOORD, YCOORD, TOLDELAY, TOLON, TOLOFF temp <- readBin(con, what="integer", 5, size=2, endian="little") temp.XCOORD <- temp[1] temp.YCOORD <- temp[2] temp.TOLDELAY <- temp[3] temp.TOLON <- temp[4] temp.TOLOFF <- temp[5] ##POSITION temp.POSITION <- readBin( con, what="int", 1, size=1, endian="little", signed = FALSE) ##RUN temp.RUN <- readBin( con, what="int", 1, size=1, endian="little", signed = FALSE) ##TIME TIME_SIZE <- readBin( con, what="int", 1, size=1, endian="little") ##time size corrections for wrong time formats; set n to 6 for all values ##according to the handbook of Geoff Duller, 2007 TIME_SIZE<-6 temp.TIME<-readChar(con, TIME_SIZE, useBytes=TRUE) ##DATE DATE_SIZE<-readBin(con, what="int", 1, size=1, endian="little") ##date size corrections for wrong date formats; set n to 6 for all values ##accoording the handbook of Geoff Duller, 2007 DATE_SIZE<-6 temp.DATE<-readChar(con, DATE_SIZE, useBytes=TRUE) ##SEQUENCE SEQUENCE_SIZE<-readBin(con, what="int", 1, size=1, endian="little") temp.SEQUENCE<-readChar(con, SEQUENCE_SIZE, useBytes=TRUE) #step forward in con if(8-SEQUENCE_SIZE>0){ STEPPING<-readBin(con, what="raw", (8-c(SEQUENCE_SIZE)),size=1, endian="little") } ##USER USER_SIZE<-readBin(con, what="int", 1, size=1, endian="little") temp.USER<-readChar(con, USER_SIZE, useBytes=FALSE) #step forward in con if(8-c(USER_SIZE)>0){ STEPPING<-readBin(con, what="raw", (8-c(USER_SIZE)), size=1, endian="little") } ##DTYPE temp.DTYPE<-readBin(con, what="int", 1, size=1, endian="little") ##IRR_TIME temp.IRR_TIME<-readBin(con, what="double", 1, size=4, endian="little") ##IRR_TYPE temp.IRR_TYPE<-readBin(con, what="int", 1, size=1, endian="little") ##IRR_UNIT temp.IRR_UNIT<-readBin(con, what="int", 1, size=1, endian="little") ##BL_TIME temp.BL_TIME<-readBin(con, what="double", 1, size=4, endian="little") ##BL_UNIT temp.BL_UNIT<-readBin(con, what="int", 1, size=1, endian="little") ##AN_TEMP, AN_TIME, NORM1, NORM2, NORM3, BG temp <- readBin(con, what="double", 6, size=4, endian="little") temp.AN_TEMP <- temp[1] temp.AN_TIME <- temp[2] temp.NORM1 <- temp[3] temp.NORM2 <- temp[4] temp.NORM3 <- temp[5] temp.BG <- temp[6] ##SHIFT temp.SHIFT<-readBin(con, what="integer", 1, size=2, endian="little") ##SAMPLE SAMPLE_SIZE<-readBin(con, what="int", 1, size=1, endian="little") temp.SAMPLE<-readChar(con, SAMPLE_SIZE, useBytes=TRUE) #however it should be set to 20 #step forward in con if(20-c(SAMPLE_SIZE)>0){ STEPPING<-readBin(con, what="raw", (20-c(SAMPLE_SIZE)), size=1, endian="little") } ##COMMENT COMMENT_SIZE<-readBin(con, what="int", 1, size=1, endian="little") temp.COMMENT<-readChar(con, COMMENT_SIZE, useBytes=TRUE) #set to 80 (manual) #step forward in con if(80-c(COMMENT_SIZE)>0){ STEPPING<-readBin(con, what="raw", (80-c(COMMENT_SIZE)), size=1, endian="little") } ##LIGHTSOURCE, SET, TAG temp <- readBin(con, what="int", 3, size=1, endian="little") temp.LIGHTSOURCE <- temp[1] temp.SET <- temp[2] temp.TAG <- temp[3] ##GRAIN temp.GRAIN<-readBin(con, what="integer", 1, size=2, endian="little") ##LPOWER temp.LPOWER<-readBin(con, what="double", 1, size=4, endian="little") ##SYSTEMID temp.SYSTEMID<-readBin(con, what="integer", 1, size=2, endian="little") ##Unfortunately an inconsitent BIN-file structure forces a differenciation ... if(temp.VERSION == 03){ ##RESERVED temp.RESERVED1<-readBin(con, what="raw", 36, size=1, endian="little") ##ONTIME, OFFTIME temp <- readBin(con, what="double", 2, size=4, endian="little") temp.ONTIME <- temp[1] temp.OFFTIME <- temp[2] ##Enable flags #GateEnabled for v 06 temp.ENABLE_FLAGS <- readBin(con, what="raw", 1, size=1, endian="little") temp.GATE_ENABLED <- temp.ENABLE_FLAGS ##ONGATEDELAY, OFFGATEDELAY temp <- readBin(con, what="double", 2, size=4, endian="little") temp.GATE_START <- temp[1] temp.GATE_STOP <- temp[2] ##RESERVED temp.RESERVED2<-readBin(con, what="raw", 1, size=1, endian="little") }else{ ##RESERVED temp.RESERVED1<-readBin(con, what="raw", 20, size=1, endian="little") ##CURVENO temp.CURVENO <- readBin(con, what="integer", 1, size=2, endian="little") ##TIMETICK temp.TIMETICK <- readBin(con, what="double", 1, size=4, endian="little") ##ONTIME, STIMPERIOD temp <- readBin(con, what="integer", 2, size=4, endian="little") temp.ONTIME <- temp[1] temp.STIMPERIOD <- temp[2] ##GATE_ENABLED temp.GATE_ENABLED <- readBin(con, what="raw", 1, size=1, endian="little") ##ONGATEDELAY, OFFGATEDELAY temp <- readBin(con, what="double", 2, size=4, endian="little") temp.GATE_START <- temp[1] temp.GATE_END <- temp[2] temp.GATE_STOP <- temp.GATE_END ##PTENABLED temp.PTENABLED <- readBin(con, what="raw", 1, size=1, endian="little") ##RESERVED temp.RESERVED2<-readBin(con, what="raw", 10, size=1, endian="little") } #DPOINTS temp.DPOINTS<-readBin(con, what="integer", temp.NPOINTS, size=4, endian="little") }else{ stop("[read_BIN2R()] Unsupported BIN/BINX-file version.", call. = FALSE) } #endif:format support ##END BIN FILE FORMAT SUPPORT ## ==========================================================================# #SET UNIQUE ID temp.ID <- temp.ID + 1 ##update progress bar if(txtProgressBar & verbose){ setTxtProgressBar(pb, seek(con,origin="current")) } ##set for equal values with different names if(!is.na(temp.GRAINNUMBER)){temp.GRAIN <- temp.GRAINNUMBER} if(!is.na(temp.GRAIN)){temp.GRAINNUMBER <- temp.GRAIN} if(!is.na(temp.LIGHTPOWER)){temp.LPOWER <- temp.LIGHTPOWER} if(!is.na(temp.LPOWER)){temp.LIGHTPOWER <- temp.LPOWER} temp.SEL <- if(temp.TAG == 1){TRUE}else{FALSE} ##replace values in the data.table with values results.METADATA[temp.ID, `:=` ( ID = temp.ID, SEL = temp.SEL, VERSION = as.numeric(temp.VERSION), LENGTH = temp.LENGTH, PREVIOUS = temp.PREVIOUS, NPOINTS = temp.NPOINTS, RECTYPE = temp.RECTYPE, RUN = temp.RUN, SET = temp.SET, POSITION = temp.POSITION, GRAIN = temp.GRAIN, GRAINNUMBER = temp.GRAINNUMBER, CURVENO = temp.CURVENO, XCOORD = temp.XCOORD, YCOORD = temp.YCOORD, SAMPLE = temp.SAMPLE, COMMENT = temp.COMMENT, SYSTEMID = temp.SYSTEMID, FNAME = temp.FNAME, USER = temp.USER, TIME = temp.TIME, DATE = temp.DATE, DTYPE = as.character(temp.DTYPE), BL_TIME = temp.BL_TIME, BL_UNIT = temp.BL_UNIT, NORM1 = temp.NORM1, NORM2 = temp.NORM2, NORM3 = temp.NORM3, BG = temp.BG, SHIFT = temp.SHIFT, TAG = temp.TAG, LTYPE = as.character(temp.LTYPE), LIGHTSOURCE = as.character(temp.LIGHTSOURCE), LPOWER = temp.LPOWER, LIGHTPOWER = temp.LIGHTPOWER, LOW = temp.LOW, HIGH = temp.HIGH, RATE = temp.RATE, TEMPERATURE = temp.TEMPERATURE, MEASTEMP = temp.MEASTEMP, AN_TEMP = temp.AN_TEMP, AN_TIME = temp.AN_TIME, TOLDELAY = temp.TOLDELAY, TOLON = temp.TOLON, TOLOFF = temp.TOLOFF, IRR_TIME = temp.IRR_TIME, IRR_TYPE = temp.IRR_TYPE, IRR_UNIT = temp.IRR_UNIT, IRR_DOSERATE = temp.IRR_DOSERATE, IRR_DOSERATEERR = temp.IRR_DOSERATEERR, TIMESINCEIRR = temp.TIMESINCEIRR, TIMETICK = temp.TIMETICK, ONTIME = temp.ONTIME, OFFTIME = temp.OFFTIME, STIMPERIOD = temp.STIMPERIOD, GATE_ENABLED = as.numeric(temp.GATE_ENABLED), ENABLE_FLAGS = as.numeric(temp.ENABLE_FLAGS), GATE_START = temp.GATE_START, GATE_STOP = temp.GATE_STOP, PTENABLED = as.numeric(temp.PTENABLED), DTENABLED = as.numeric(temp.DTENABLED), DEADTIME = temp.DEADTIME, MAXLPOWER = temp.MAXLPOWER, XRF_ACQTIME = temp.XRF_ACQTIME, XRF_HV = temp.XRF_HV, XRF_CURR = temp.XRF_CURR, XRF_DEADTIMEF = temp.XRF_DEADTIMEF, DETECTOR_ID = temp.DETECTOR_ID, LOWERFILTER_ID = temp.LOWERFILTER_ID, UPPERFILTER_ID = temp.UPPERFILTER_ID, ENOISEFACTOR = temp.ENOISEFACTOR, MARKPOS_X1 = temp.MARKPOS_X1, MARKPOS_Y1 = temp.MARKPOS_Y1, MARKPOS_X2 = temp.MARKPOS_X2, MARKPOS_Y2 = temp.MARKPOS_Y2, MARKPOS_X3 = temp.MARKPOS_X3, MARKPOS_Y3 = temp.MARKPOS_Y3, SEQUENCE = temp.SEQUENCE )] results.DATA[[temp.ID]] <- temp.DPOINTS results.RESERVED[[temp.ID]][[1]] <- temp.RESERVED1 results.RESERVED[[temp.ID]][[2]] <- temp.RESERVED2 ##BREAK ##stop loop if record limit is reached if (!is.null(n.records)) { if (n.records == temp.ID) { break() } } ##reset values temp.GRAINNUMBER <- NA temp.GRAIN <- NA }#endwhile::end lopp ##close if(txtProgressBar & verbose){close(pb)} ## remove NA values created by skipping records results.METADATA <- na.omit(results.METADATA, cols = "VERSION") ##output if(verbose){cat(paste("\t >> ",temp.ID," records have been read successfully!\n\n", sep=""))} # Further limitation -------------------------------------------------------------------------- if(!is.null(position)){ ##check whether the position is valid at all if (all(position %in% results.METADATA[["POSITION"]])) { results.METADATA <- results.METADATA[which(results.METADATA[["POSITION"]] %in% position),] results.DATA <- results.DATA[results.METADATA[["ID"]]] ##re-calculate ID ... otherwise it will not match results.METADATA[["ID"]] <- 1:length(results.DATA ) ##show a message message("[read_BIN2R()] The record index has been recalculated!") }else{ valid.position <- paste(unique(results.METADATA[["POSITION"]]), collapse = ", ") warning( paste0( "Position limitation omitted. At least one position number is not valid, valid position numbers are: ", valid.position ) ) } } ##check for position that have no data at all (error during the measurement) if(zero_data.rm){ zero_data.check <- which(vapply(results.DATA, length, numeric(1)) == 0) ##remove records if there is something to remove if(length(zero_data.check) != 0){ results.METADATA <- results.METADATA[-zero_data.check, ] results.DATA[zero_data.check] <- NULL ##recalculate record index results.METADATA[["ID"]] <- 1:nrow(results.METADATA) warning( paste0( "\n[read_BIN2R()] ", length(zero_data.check), " zero data records detected and removed: ", paste(zero_data.check, collapse = ", "), ". \n\n >> Record index re-calculated." ), call. = FALSE ) } } ##check for duplicated entries and remove them if wanted, but only if we have more than 2 records if (n.records >= 2 && length(results.DATA) >= 2) { duplication.check <- suppressWarnings(which(c( 0, vapply( 2:length(results.DATA), FUN = function(x) { all(results.DATA[[x - 1]] == results.DATA[[x]]) }, FUN.VALUE = 1 ) ) == 1)) if (length(duplication.check) != 0) { if (duplicated.rm) { ##remove records results.METADATA <- results.METADATA[-duplication.check, ] results.DATA[duplication.check] <- NULL ##recalculate record index results.METADATA[["ID"]] <- 1:nrow(results.METADATA) ##message if(verbose) { message( paste0( "[read_BIN2R()] duplicated record(s) detected and removed: ", paste(duplication.check, collapse = ", "), ". Record index re-calculated." ) ) } } else{ warning( paste0( "[read_BIN2R()] duplicated record(s) detected: ", paste(duplication.check, collapse = ", "), ". \n\n >> You should consider 'duplicated.rm = TRUE'." ) ) } } } ##produce S4 object for output object <- set_Risoe.BINfileData( METADATA = results.METADATA, DATA = results.DATA, .RESERVED = results.RESERVED) # Convert Translation Matrix Values --------------------------------------- if (!show.raw.values) { ##LIGHTSOURCE CONVERSION object@METADATA[["LIGHTSOURCE"]] <- unname(LIGHTSOURCE.lookup[object@METADATA[["LIGHTSOURCE"]]]) ##LTYPE CONVERSION object@METADATA[["LTYPE"]] <- unname(LTYPE.lookup[object@METADATA[["LTYPE"]]]) ##DTYPE CONVERSION object@METADATA[["DTYPE"]] <- unname(DTYPE.lookup[object@METADATA[["DTYPE"]]]) ##CHECK for oddly set LTYPES, this may happen in old BIN-file versions if (object@METADATA[["VERSION"]][1] == 3) { object@METADATA[["LTYPE"]] <- sapply(1:length(object@METADATA[["LTYPE"]]), function(x) { if (object@METADATA[["LTYPE"]][x] == "OSL" & object@METADATA[["LIGHTSOURCE"]][x] == "IR diodes/IR Laser") { return("IRSL") } else{ return(object@METADATA[["LTYPE"]][x]) } }) } ##TIME CONVERSION, do not do for odd time formats as this could cause problems during export if (TIME_SIZE == 6) { object@METADATA[["TIME"]] <- format(strptime(as.character(object@METADATA[["TIME"]]), "%H%M%S"), "%H:%M:%S") } } ## check for empty BIN-files names ... if so, set the name of the file as BIN-file name ## This can happen if the user uses different equipment if(all(is.na(object@METADATA[["FNAME"]]))){ object@METADATA[["FNAME"]] <- strsplit(x = basename(file), split = ".", fixed = TRUE)[[1]][1] } # Fast Forward -------------------------------------------------------------------------------- ## set fastForward to TRUE if one of this arguments is used if(any(names(list(...)) %in% names(formals(Risoe.BINfileData2RLum.Analysis))[-1]) & fastForward == FALSE) { fastForward <- TRUE warning("[read_BIN2R()] automatically reset 'fastForward = TRUE'", call. = FALSE) } ##return values ##with fast fastForward they will be converted directly to a list of RLum.Analysis objects if(fastForward){ object <- Risoe.BINfileData2RLum.Analysis(object, ...) ##because we expect a list if(!is.list(object)){ object <- list(object) } } return(object) } Luminescence/R/convert_SG2MG.R0000644000176200001440000000627114264017373015604 0ustar liggesusers#' @title Converts Single-Grain Data to Multiple-Grain Data #' #' @description Conversion of single-grain data to multiple-grain data by adding signals #' from grains belonging to one disc (unique pairs of position, set and run). #' #' @param object [Risoe.BINfileData-class] [character] (**required**): [Risoe.BINfileData-class] #' object or BIN/BINX-file name #' #' @param write_file [logical] (*with default*): if the input was a path to a file, the #' output can be written to a file if `TRUE`. The multiple grain file will be written into the #' same folder and with extension `-SG` to the file name. #' #' @param ... further arguments passed down to [read_BIN2R] if input is file path #' #' @return [Risoe.BINfileData-class] object and if `write_file = TRUE` and the input #' was a file path, a file is written to origin folder. #' #' @section Function version: 0.1.0 #' #' @author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany), Norbert Mercier, IRAMAT-CRP2A, UMR 5060, CNRS-Université Bordeaux Montaigne (France); #' #' #' @seealso [Risoe.BINfileData-class], [read_BIN2R], [write_R2BIN] #' #' @keywords IO #' #' @examples #' ## simple run #' ## (please not that the example is not using SG data) #' data(ExampleData.BINfileData, envir = environment()) #' convert_SG2MG(CWOSL.SAR.Data) #' #' @md #' @export convert_SG2MG <- function( object, write_file = FALSE, ... ){ # Check input ------------------------------------------------------------- if(!is(object, "Risoe.BINfileData")) { file_name <- object object <- read_BIN2R(object, ...) } # Transform --------------------------------------------------------------- ## get unique pairs of position, run and set and then upairs_sg_id <- as.numeric(rownames( unique(object@METADATA[object@METADATA[["GRAIN"]] != 0,c("POSITION", "RUN", "SET")]))) for(i in upairs_sg_id){ ##get IDs of all relevant records records_id <- object@METADATA[ object@METADATA[["POSITION"]] == object@METADATA[["POSITION"]][[i]] & object@METADATA[["RUN"]] == object@METADATA[["RUN"]][[i]] & object@METADATA[["SET"]] == object@METADATA[["SET"]][[i]], "ID"] # sum up count values and write it into the first grain record object@DATA[[i]] <- matrixStats::rowSums2( matrix( unlist(object@DATA[records_id]), ncol = length(records_id))) } ## clean dataset and remove all irrelevant data upairs_id <- as.numeric(rownames( unique(object@METADATA[, c("POSITION", "RUN", "SET")]))) object@METADATA <- object@METADATA[upairs_id, ] object@DATA <- object@DATA[upairs_id] ##recalculate IDs and reset GRAIN object@METADATA[["ID"]] <- 1:length(object@DATA) object@METADATA[["GRAIN"]] <- 0 # Write file -------------------------------------------------------------- if(write_file[1]){ if(!inherits(try(file.exists(file_name), silent = FALSE), "try-error")){ dirname <- dirname(normalizePath(file_name)) filename <- strsplit(basename(normalizePath(file_name)), ".", fixed = TRUE)[[1]] write_R2BIN(object, paste0(dirname,"/",filename[1],"_SG.",filename[2]), ...) } } # Return object ----------------------------------------------------------- return(object) } Luminescence/R/calc_IEU.R0000644000176200001440000004000014236146743014620 0ustar liggesusers#' Apply the internal-external-uncertainty (IEU) model after Thomsen et al. #' (2007) to a given De distribution #' #' Function to calculate the IEU De for a De data set. #' #' This function uses the equations of Thomsen et al. (2007). The parameters a #' and b are estimated from dose-recovery experiments. #' #' @param data [RLum.Results-class] or [data.frame] (**required**): #' for [data.frame]: two columns with De `(data[,1])` and #' De error `(values[,2])` #' #' @param a [numeric] (**required**): #' slope #' #' @param b [numeric] (**required**): #' intercept #' #' @param interval [numeric] (**required**): #' fixed interval (e.g. 5 Gy) used for iteration of `Dbar`, from the mean to #' Lowest.De used to create Graph.IEU `[Dbar.Fixed vs Z]` #' #' @param decimal.point [numeric] (*with default*): #' number of decimal points for rounding calculations (e.g. 2) #' #' @param plot [logical] (*with default*): #' plot output #' #' @param ... further arguments (`trace, verbose`). #' #' @return #' Returns a plot (*optional*) and terminal output. In addition an #' [RLum.Results-class] object is returned containing the #' following elements: #' #' \item{.$summary}{[data.frame] summary of all relevant model results.} #' \item{.$data}{[data.frame] original input data} #' \item{.$args}{[list] used arguments} #' \item{.$call}{[call] the function call} #' \item{.$tables}{[list] a list of data frames containing all calculation tables} #' #' The output should be accessed using the function [get_RLum]. #' #' @section Function version: 0.1.1 #' #' @author #' Rachel Smedley, Geography & Earth Sciences, Aberystwyth University (United Kingdom) \cr #' Based on an excel spreadsheet and accompanying macro written by Kristina Thomsen. #' #' @seealso [plot], [calc_CommonDose], [calc_CentralDose], [calc_FiniteMixture], #' [calc_FuchsLang2001], [calc_MinDose] #' #' @references #' Smedley, R.K., 2015. A new R function for the Internal External Uncertainty (IEU) model. #' Ancient TL 33, 16-21. #' #' Thomsen, K.J., Murray, A.S., Boetter-Jensen, L. & Kinahan, J., #' 2007. Determination of burial dose in incompletely bleached fluvial samples #' using single grains of quartz. Radiation Measurements 42, 370-379. #' #' @examples #' #' ## load data #' data(ExampleData.DeValues, envir = environment()) #' #' ## apply the IEU model #' ieu <- calc_IEU(ExampleData.DeValues$CA1, a = 0.2, b = 1.9, interval = 1) #' #' @md #' @export calc_IEU <- function( data, a, b, interval, decimal.point = 2, plot = TRUE, ... ) { ##==========================================================================## ## CONSISTENCY CHECK OF INPUT DATA ##==========================================================================## if(missing(data)==FALSE){ if(is(data, "data.frame") == FALSE & is(data,"RLum.Results") == FALSE){ stop("[calc_IEU()] 'data' object has to be of type 'data.frame' or 'RLum.Results'!", call = FALSE) }else{ if(is(data, "RLum.Results") == TRUE){ data <- get_RLum(data) } } } ##==========================================================================## ## ... ARGUMENTS ##==========================================================================## extraArgs <- list(...) ## console output if ("verbose" %in% names(extraArgs)) { verbose <- extraArgs$verbose } else { verbose <- TRUE } # trace calculations if ("trace" %in% names(extraArgs)) { trace <- extraArgs$trace } else { trace <- FALSE } # TODO: main, xlab, ylab, xlim, ylim, pch, col ##============================================================================## ## CALCULATIONS ##============================================================================## empty <- NULL Table.Fixed.Iteration <- data.frame(matrix(nrow = 0, ncol = 9)) colnames(data) <- c("De", "De.Error") data <- data[order(data$De), ] Mean <- mean(data$De) Dbar <- round(Mean, decimal.point) Lowest.De <- round(data$De[1], decimal.point) # (a) Calculate IEU at fixed intervals of Dbar starting from the Mean and # subtracting the interval until Dbar is < Lowest.De; this creates a plot N <- nrow(data) Rank.number <- t(c(1:N)) De.Total.Error <- sqrt((data$De.Error^2) + (((a * Dbar) + b)^2)) Table.Calculations <- data.frame(Rank.number = c(Rank.number), De = c(data$De), De.Total.Error = c(De.Total.Error)) Z.top <- cumsum(Table.Calculations$De/(Table.Calculations$De.Total.Error^2)) Z.bottom <- cumsum(1/(Table.Calculations$De.Total.Error^2)) Z <- Z.top/Z.bottom Table.Calculations["Z"] <- Z temp <- NULL for (j in 1:N) { for (i in j) { Z <- Table.Calculations$Z[j] x <- ((Table.Calculations$De[1:i] - Z)^2)/((Table.Calculations$De.Total.Error[1:i])^2) y <- (sum(x)) temp <- rbind(temp, data.frame(y)) } } EXT.top <- temp EXT.bottom <- (Table.Calculations$Rank.number - 1) * Z.bottom EXT <- EXT.top/EXT.bottom INT <- 1/Z.bottom R <- sqrt(INT/EXT) R.Error <- (2 * (Table.Calculations$Rank.number - 1))^(-0.5) Table.IEU <- data.frame(Table.Calculations$Rank.number, Table.Calculations$De, Table.Calculations$De.Total.Error, Table.Calculations$Z, EXT.top, EXT, INT, R, R.Error) colnames(Table.IEU) <- c("Rank.number", "De", "De.Error", "Z", "EXT.top", "EXT", "INT", "R", "R.Uncertainty") Unity <- Table.IEU[R >= 1, ] Max <- max(Unity$Rank.number, na.rm = TRUE) Above.Z <- Table.IEU[Max, 4] Above.Error <- Table.IEU[Max, 6] Below.Z <- Table.IEU[Max + 1, 4] Below.Error <- Table.IEU[Max + 1, 6] Above.R <- Table.IEU[Max, 8] Below.R <- Table.IEU[Max + 1, 8] Slope <- (Above.R - Below.R)/(Above.Z - Below.Z) Intercept <- Above.R - (Slope * Above.Z) IEU.De <- round(((1 - Intercept)/Slope), decimal.point) IEU.Error <- max(sqrt(Above.Error), sqrt(Below.Error)) IEU.Error <- round(IEU.Error, decimal.point) n <- Max + 1 Dbar.Fixed <- Dbar - interval Dbar.Mean <- c(1, Dbar, Dbar.Fixed, IEU.De, IEU.Error, n, Below.R, a, b) repeat { if (Dbar.Fixed < Lowest.De) { break } else { Dbar <- Dbar.Fixed } De.Total.Error <- sqrt((data$De.Error^2) + (((a * Dbar) + b)^2)) Table.Calculations <- data.frame(Rank.number = c(Rank.number), De = c(data$De), De.Total.Error = c(De.Total.Error)) Z.top <- cumsum(Table.Calculations$De/(Table.Calculations$De.Total.Error^2)) Z.bottom <- cumsum(1/(Table.Calculations$De.Total.Error^2)) Z <- Z.top/Z.bottom Table.Calculations["Z"] <- Z temp <- NULL for (j in 1:N) { for (i in j) { Z <- Table.Calculations$Z[j] x <- ((Table.Calculations$De[1:i] - Z)^2)/((Table.Calculations$De.Total.Error[1:i])^2) y <- (sum(x)) temp <- rbind(temp, data.frame(y)) } } EXT.top <- temp EXT.bottom <- (Table.Calculations$Rank.number - 1) * Z.bottom EXT <- EXT.top/EXT.bottom INT <- 1/Z.bottom R <- sqrt(INT/EXT) R.Error <- (2 * (Table.Calculations$Rank.number - 1))^(-0.5) Table.IEU <- data.frame(Table.Calculations$Rank.number, Table.Calculations$De, Table.Calculations$De.Total.Error, Table.Calculations$Z, EXT.top, EXT, INT, R, R.Error) colnames(Table.IEU) <- c("Rank.number", "De", "De.Error", "Z", "EXT.top", "EXT", "INT", "R", "R.Uncertainty") Unity <- Table.IEU[R >= 1, ] Max <- max(Unity$Rank.number, na.rm = TRUE) Above.Z <- Table.IEU[Max, 4] Above.Error <- Table.IEU[Max, 6] Below.Z <- Table.IEU[Max + 1, 4] Below.Error <- Table.IEU[Max + 1, 6] Above.R <- Table.IEU[Max, 8] Below.R <- Table.IEU[Max + 1, 8] Slope <- (Above.R - Below.R)/(Above.Z - Below.Z) Intercept <- Above.R - (Slope * Above.Z) Zbar <- round(((1 - Intercept)/Slope), decimal.point) Zbar.Error <- max(sqrt(Above.Error), sqrt(Below.Error)) Zbar.Error <- round(IEU.Error, decimal.point) n <- Max + 1 Dbar.Fixed <- Dbar - interval Table.Fixed.Iteration <- rbind(Table.Fixed.Iteration, cbind(1, Dbar, Dbar.Fixed, Zbar, Zbar.Error, n, Below.R, a, b)) } Table.Fixed.Iteration <- rbind(Dbar.Mean, Table.Fixed.Iteration) colnames(Table.Fixed.Iteration) <- c(FALSE, "Dbar", "Dbar.Fixed", "Zbar", "Zbar.Error", "n", "Below.R", "a", "b") if (plot) { plot(Table.Fixed.Iteration$Dbar, Table.Fixed.Iteration$Zbar, type = "b", ylab = "Zbar, weighted mean (Gy)", xlab = "Dbar (Gy)", asp = 1/1) arrows(Table.Fixed.Iteration$Dbar, Table.Fixed.Iteration$Zbar + Table.Fixed.Iteration$Zbar.Error, Table.Fixed.Iteration$Dbar, Table.Fixed.Iteration$Zbar - Table.Fixed.Iteration$Zbar.Error, col = 1, angle = 90, length = 0.05, code = 3) abline(0, 1, untf = FALSE, lty = 3) } # (b) Calculate Dbar by iteration from [Dbar = Lowest.De] until [IEU.De = Dbar]; # this calculates the IEU De Dbar <- Lowest.De N <- nrow(data) Rank.number <- t(c(1:N)) De.Total.Error <- sqrt((data$De.Error^2) + (((a * Dbar) + b)^2)) Table.Calculations <- data.frame(Rank.number = c(Rank.number), De = c(data$De), De.Total.Error = c(De.Total.Error)) Z.top <- cumsum(Table.Calculations$De/(Table.Calculations$De.Total.Error^2)) Z.bottom <- cumsum(1/(Table.Calculations$De.Total.Error^2)) Z <- Z.top/Z.bottom Table.Calculations["Z"] <- Z temp <- NULL for (j in 1:N) { for (i in j) { Z <- Table.Calculations$Z[j] x <- ((Table.Calculations$De[1:i] - Z)^2)/((Table.Calculations$De.Total.Error[1:i])^2) y <- (sum(x)) temp <- rbind(temp, data.frame(y)) } } EXT.top <- temp EXT.bottom <- (Table.Calculations$Rank.number - 1) * Z.bottom EXT <- EXT.top/EXT.bottom INT <- 1/Z.bottom R <- sqrt(INT/EXT) R.Error <- (2 * (Table.Calculations$Rank.number - 1))^(-0.5) Table.IEU <- data.frame(Table.Calculations$Rank.number, Table.Calculations$De, Table.Calculations$De.Total.Error, Table.Calculations$Z, EXT.top, EXT, INT, R, R.Error) colnames(Table.IEU) <- c("Rank.number", "De", "De.Error", "Z", "EXT.top", "EXT", "INT", "R", "R.Uncertainty") Unity <- Table.IEU[R >= 1, ] Max <- max(Unity$Rank.number, na.rm = TRUE) Above.Z <- Table.IEU[Max, 4] Above.Error <- Table.IEU[Max, 6] Below.Z <- Table.IEU[Max + 1, 4] Below.Error <- Table.IEU[Max + 1, 6] Above.R <- Table.IEU[Max, 8] Below.R <- Table.IEU[Max + 1, 8] Slope <- (Above.R - Below.R)/(Above.Z - Below.Z) Intercept <- Above.R - (Slope * Above.Z) IEU.De <- round(((1 - Intercept)/Slope), decimal.point) IEU.Error <- max(sqrt(Above.Error), sqrt(Below.Error)) IEU.Error <- round(IEU.Error, decimal.point) n <- Max + 1 repeat { if (IEU.De <= Dbar) { break } else { Dbar <- IEU.De } De.Total.Error <- sqrt((data$De.Error^2) + (((a * Dbar) + b)^2)) Table.Calculations <- data.frame(Rank.number = c(Rank.number), De = c(data$De), De.Total.Error = c(De.Total.Error)) Z.top <- cumsum(Table.Calculations$De/(Table.Calculations$De.Total.Error^2)) Z.bottom <- cumsum(1/(Table.Calculations$De.Total.Error^2)) Z <- round((Z.top/Z.bottom), decimal.point) Table.Calculations["Z"] <- Z temp <- NULL for (j in 1:N) { for (i in j) { Z <- Table.Calculations$Z[j] x <- ((Table.Calculations$De[1:i] - Z)^2)/((Table.Calculations$De.Total.Error[1:i])^2) y <- (sum(x)) temp <- rbind(temp, data.frame(y)) } } EXT.top <- temp EXT.bottom <- (Table.Calculations$Rank.number - 1) * Z.bottom EXT <- EXT.top/EXT.bottom INT <- 1/Z.bottom R <- sqrt(INT/EXT) R.Error <- (2 * (Table.Calculations$Rank.number - 1))^(-0.5) Table.IEU <- data.frame(Table.Calculations$Rank.number, Table.Calculations$De, Table.Calculations$De.Total.Error, Table.Calculations$Z, EXT.top, EXT, INT, R, R.Error) colnames(Table.IEU) <- c("Rank.number", "De", "De.Error", "Z", "EXT.top", "EXT", "INT", "R", "R.Error") # to reduce the number of plots and increase perfomance # intermediate calculations are only plotted when trace = TRUE if (plot && trace) { ymin <- min(Table.IEU$R[2:nrow(Table.IEU)] - Table.IEU$R.Error[2:nrow(Table.IEU)]) ymax <- max(Table.IEU$R[2:nrow(Table.IEU)] + Table.IEU$R.Error[2:nrow(Table.IEU)]) ylim <- c(ifelse(ymin > 0, 0, ymin), ymax) plot(Table.IEU$Z, Table.IEU$R, type = "b", ylab = expression(paste("R = [", alpha["in"], "/", alpha["ex"],"]")), xlab = "Z [Gy]", ylim = ylim) arrows(Table.IEU$Z, Table.IEU$R + Table.IEU$R.Error, Table.IEU$Z, Table.IEU$R - Table.IEU$R.Error, col = 1, angle = 90, length = 0.05, code = 3) abline(1, 0, untf = FALSE, lty = 3) } Unity <- Table.IEU[R >= 1, ] Max <- max(Unity$Rank.number, na.rm = TRUE) Above.Z <- Table.IEU[Max, 4] Above.Error <- Table.IEU[Max, 6] Below.Z <- Table.IEU[Max + 1, 4] Below.Error <- Table.IEU[Max + 1, 6] Above.R <- Table.IEU[Max, 8] Below.R <- Table.IEU[Max + 1, 8] Slope <- (Above.R - Below.R)/(Above.Z - Below.Z) Intercept <- Above.R - (Slope * Above.Z) IEU.De <- round(((1 - Intercept)/Slope), decimal.point) IEU.Error <- max(sqrt(Above.Error), sqrt(Below.Error)) IEU.Error <- round(IEU.Error, decimal.point) n <- Max + 1 if (trace) { message(sprintf("[Iteration of Dbar] \n Dbar: %.4f \n IEU.De: %.4f \n IEU.Error: %.4f \n n: %i \n R: %.4f \n", Dbar, IEU.De, IEU.Error, n, Below.R)) } } # final plot if (plot) { ymin <- min(Table.IEU$R[2:nrow(Table.IEU)] - Table.IEU$R.Error[2:nrow(Table.IEU)]) ymax <- max(Table.IEU$R[2:nrow(Table.IEU)] + Table.IEU$R.Error[2:nrow(Table.IEU)]) ylim <- c(ifelse(ymin > 0, 0, ymin), ymax) plot(Table.IEU$Z, Table.IEU$R, type = "b", ylab = expression(paste("R = [", alpha["in"], "/", alpha["ex"],"]")), xlab = "Z [Gy]", ylim = ylim) arrows(Table.IEU$Z, Table.IEU$R + Table.IEU$R.Error, Table.IEU$Z, Table.IEU$R - Table.IEU$R.Error, col = 1, angle = 90, length = 0.05, code = 3) abline(1, 0, untf = FALSE, lty = 3) } Table.Results <- data.frame(Dbar, IEU.De, IEU.Error, n, a, b) colnames(Table.Results) <- c("Dbar", "IEU.De (Gy)", "IEU.Error (Gy)", "Number of De", "a", "b") ##==========================================================================## ## TERMINAL OUTPUT ##==========================================================================## if (verbose) { message(sprintf( "\n [calc_IEU] \n\n Dbar: %.2f \n IEU.De (Gy): %.2f \n IEU.Error (Gy): %.2f Number of De: %.0f \n a: %.4f \n b: %.4f", Table.Results[1], Table.Results[2], Table.Results[3], Table.Results[4], Table.Results[5], Table.Results[6])) } ##==========================================================================## ## RETURN VALUES ##==========================================================================## summary <- Table.Results[ ,c(-1, -5, -6)] colnames(summary) <- c("de", "de_err", "n") call <- sys.call() args <- list(a = a, b = b, interval = interval, decimal.point = decimal.point, plot = plot) newRLumResults.calc_IEU <- set_RLum( class = "RLum.Results", data = list(summary = summary, data = data, args = args, call = call, tables = list( Table.IEUCalculations = Table.IEU, Table.Fixed.Iteration = Table.Fixed.Iteration, Table.IEUResults = Table.Results ))) invisible(newRLumResults.calc_IEU) } Luminescence/R/convert_Wavelength2Energy.R0000644000176200001440000001643614367174002020326 0ustar liggesusers#'@title Emission Spectra Conversion from Wavelength to Energy Scales (Jacobian Conversion) #' #'@description The function provides a convenient and fast way to convert emission spectra wavelength #'to energy scales. The function works on [RLum.Data.Spectrum-class], [data.frame] and [matrix] and #'a [list] of such objects. The function was written to smooth the workflow while analysing #'emission spectra data. This is in particular useful if you want to further treat your data #'and apply, e.g., a signal deconvolution. #' #'@details #' #' The intensity of the spectrum is re-calculated using the following approach to recalculate #' wavelength and corresponding intensity values #' (e.g., Appendix 4 in Blasse and Grabmeier, 1994; Mooney and Kambhampati, 2013): #' #' \deqn{\phi_{E} = \phi_{\lambda} * \lambda^2 / (hc)} #' #' with #' \eqn{\phi_{E}} the intensity per interval of energy \eqn{E} (1/eV), #' \eqn{\phi_{\lambda}} the intensity per interval of wavelength \eqn{\lambda} #' (1/nm) and #' \eqn{h} (eV * s) the Planck constant and \eqn{c} (nm/s) the velocity of light. #' #' For transforming the wavelength axis (x-values) the equation as follow is used #' #' \deqn{E = hc/\lambda} #' #' @param object [RLum.Data.Spectrum-class], [data.frame], [matrix] (**required**): input object to be converted. #' If the input is not an [RLum.Data.Spectrum-class], the first column is always treated as the wavelength #' column. The function supports a list of allowed input objects. #' #' @param digits [integer] (*with default*): set the number of digits on the returned energy axis #' #' @param order [logical] (*with default*): enables/disables sorting of the values in ascending energy #' order. After the conversion the longest wavelength has the lowest energy value and the shortest #' wavelength the highest. While this is correct, some R functions expect increasing x-values. #' #' @return The same object class as provided as input is returned. #' #' @note This conversion works solely for emission spectra. In case of absorption spectra only #' the x-axis has to be converted. #' #' @section Function version: 0.1.1 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [RLum.Data.Spectrum-class], [plot_RLum] #' #' @keywords IO #' #' @references #' #' Blasse, G., Grabmaier, B.C., 1994. Luminescent Materials. Springer. #' #' Mooney, J., Kambhampati, P., 2013. Get the Basics Right: Jacobian Conversion of Wavelength and #' Energy Scales for Quantitative Analysis of Emission Spectra. J. Phys. Chem. Lett. 4, 3316–3318. #' \doi{10.1021/jz401508t} #' #' Mooney, J., Kambhampati, P., 2013. Correction to “Get the Basics Right: Jacobian Conversion of #' Wavelength and Energy Scales for Quantitative Analysis of Emission Spectra.” J. Phys. Chem. Lett. 4, #' 3316–3318. \doi{10.1021/jz401508t} #' #' **Further reading** #' #' Angulo, G., Grampp, G., Rosspeintner, A., 2006. Recalling the appropriate representation of #' electronic spectra. Spectrochimica Acta Part A: Molecular and Biomolecular Spectroscopy 65, #' 727–731. \doi{10.1016/j.saa.2006.01.007} #' #' Wang, Y., Townsend, P.D., 2013. Potential problems in collection and data processing of #' luminescence signals. Journal of Luminescence 142, 202–211. \doi{10.1016/j.jlumin.2013.03.052} #' #' @examples #' #' ##=====================## #' ##(1) Literature example after Mooney et al. (2013) #' ##(1.1) create matrix #' m <- matrix( #' data = c(seq(400, 800, 50), rep(1, 9)), ncol = 2) #' #'##(1.2) set plot function to reproduce the #'##literature figure #'p <- function(m) { #' plot(x = m[, 1], y = m[, 2]) #' polygon( #' x = c(m[, 1], rev(m[, 1])), #' y = c(m[, 2], rep(0, nrow(m)))) #' for (i in 1:nrow(m)) { #' lines(x = rep(m[i, 1], 2), y = c(0, m[i, 2])) #' } #'} #' #'##(1.3) plot curves #' par(mfrow = c(1,2)) #' p(m) #' p(convert_Wavelength2Energy(m)) #' #'##=====================## #'##(2) Another example using density curves #' ##create dataset #' xy <- density( #' c(rnorm(n = 100, mean = 500, sd = 20), #' rnorm(n = 100, mean = 800, sd = 20))) #' xy <- data.frame(xy$x, xy$y) #' #' ##plot #' par(mfrow = c(1,2)) #' plot( #' xy, #' type = "l", #' xlim = c(150, 1000), #' xlab = "Wavelength [nm]", #' ylab = "Luminescence [a.u.]" #' ) #'plot( #' convert_Wavelength2Energy(xy), #' xy$y, #' type = "l", #' xlim = c(1.23, 8.3), #' xlab = "Energy [eV]", #' ylab = "Luminescence [a.u.]" #' ) #' #'@md #'@export convert_Wavelength2Energy <- function( object, digits = 3L, order = FALSE ){ # Self-call ----------------------------------------------------------------------------------- if(inherits(object, "list")){ return(lapply(object, convert_Wavelength2Energy)) } # Conversion function ------------------------------------------------------------------------- ##this treats the matrix; in either caes and we play safe, means, we create in either case ##colnames and rownames, but remove them later depending on the input .conv_intensity <- function(m){ h <- 4.135667662e-15 #eV * s c <- 299792458e+09 #nm/s ##convert count values m[] <- m * as.numeric(rownames(m))^2 / (h * c) ##modify rownames rownames(m) <- round((h * c) / as.numeric(rownames(m)),digits) ##return results return(m) } # Treat input data ---------------------------------------------------------------------------- if(inherits(object, "RLum.Data.Spectrum")){ ##check whether the object might have this scale already ##this only works on RLum.Data.Spectrum objects and is sugar for using RLum-objects if(any("curveDescripter" %in% names(object@info))){ if(any(grepl(pattern = "energy", x = tolower(object@info$curveDescripter), fixed = TRUE))){ message("[convert_Wavelength2Energy()] Your object has already an energy scale, nothing done!") return(object) } } ##convert data object@data <- .conv_intensity(object@data) #sort values if needed if(order){ object@data <- object@data[order(as.numeric(rownames(object@data))),] rownames(object@data) <- sort(as.numeric(rownames(object@data))) } ##correct $curveDescripter (we do not attach the table, otherwise the object gets too big) if(any("curveDescripter" %in% names(object@info))){ temp_descripter <- strsplit(object@info$curveDescripter, ";", TRUE)[[1]] temp_descripter[grepl(x = temp_descripter,pattern = "wavelength", fixed = TRUE)] <- "energy [eV]" object@info$curveDescripter <- paste(temp_descripter, collapse = ";") } ##return new object return(object) }else if(inherits(object, "matrix") || inherits(object, "data.frame")){ temp <- as.matrix(object[,2:ncol(object)]) ##set rownames rownames(temp) <- object[,1] ##convert values temp <- .conv_intensity(temp) ##construct new full matrix temp <- cbind(as.numeric(rownames(temp)), temp) rownames(temp) <- rownames(object) colnames(temp) <- colnames(object) ##order on request (remember, here it is the first column) if(order) temp <- temp[order(temp[,1]),] ##return if(inherits(object, "data.frame")) return(as.data.frame(temp)) return(temp) }else{ stop( paste0( "[convert_Wavelength2Energy()] Class '", class(object)[1], "' not supported as input!" ), call. = FALSE ) } } Luminescence/R/plot_GrowthCurve.R0000644000176200001440000022030114521207352016524 0ustar liggesusers#' @title Fit and plot a dose-response curve for luminescence data (Lx/Tx against dose) #' #' @description #' #' A dose-response curve is produced for luminescence measurements using a #' regenerative or additive protocol. The function supports interpolation and #' extrapolation to calculate the equivalent dose. #' #' @details #' #' **Fitting methods** #' #' For all options (except for the `LIN`, `QDR` and the `EXP OR LIN`), #' the [minpack.lm::nlsLM] function with the `LM` (Levenberg-Marquardt algorithm) #' algorithm is used. Note: For historical reasons for the Monte Carlo #' simulations partly the function [nls] using the `port` algorithm. #' #' The solution is found by transforming the function or using [uniroot]. #' #' `LIN`: fits a linear function to the data using #' [lm]: \deqn{y = mx + n} #' #' `QDR`: fits a linear function to the data using #' [lm]: \deqn{y = a + bx + cx^2} #' #' `EXP`: tries to fit a function of the form #' \deqn{y = a(1 - exp(-\frac{(x+c)}{b}))} #' Parameters b and c are approximated by a linear fit using [lm]. Note: b = D0 #' #' `EXP OR LIN`: works for some cases where an `EXP` fit fails. #' If the `EXP` fit fails, a `LIN` fit is done instead. #' #' `EXP+LIN`: tries to fit an exponential plus linear function of the #' form: #' \deqn{y = a(1-exp(-\frac{x+c}{b}) + (gx))} #' The \eqn{D_e} is calculated by iteration. #' #' **Note:** In the context of luminescence dating, this #' function has no physical meaning. Therefore, no D0 value is returned. #' #' `EXP+EXP`: tries to fit a double exponential function of the form #' \deqn{y = (a_1 (1-exp(-\frac{x}{b_1}))) + (a_2 (1 - exp(-\frac{x}{b_2})))} #' This fitting procedure is not robust against wrong start parameters and #' should be further improved. #' #' `GOK`: tries to fit the general-order kinetics function after #' Guralnik et al. (2015) of the form of #' #' \deqn{y = a (d - (1 + (\frac{1}{b}) x c)^{(-1/c)})} #' #' where **c > 0** is a kinetic order modifier #' (not to be confused with **c** in `EXP` or `EXP+LIN`!). #' #' `LambertW`: tries to fit a dose-response curve based on the Lambert W function #' according to Pagonis et al. (2020). The function has the form #' #' \deqn{y ~ (1 + (W((R - 1) * exp(R - 1 - ((x + D_{int}) / D_{c}))) / (1 - R))) * N} #' #' with \eqn{W} the Lambert W function, calculated using the package [lamW::lambertW0], #' \eqn{R} the dimensionless retrapping ratio, \eqn{N} the total concentration #' of trappings states in cm^-3 and \eqn{D_{c} = N/R} a constant. \eqn{D_{int}} is #' the offset on the x-axis. Please not that finding the root in `mode = "extrapolation"` #' is a non-easy task due to the shape of the function and the results might be #' unexpected. #' #' **Fit weighting** #' #' If the option `fit.weights = TRUE` is chosen, weights are calculated using #' provided signal errors (Lx/Tx error): #' \deqn{fit.weights = \frac{\frac{1}{error}}{\Sigma{\frac{1}{error}}}} #' #' **Error estimation using Monte Carlo simulation** #' #' Error estimation is done using a parametric bootstrapping approach. A set of #' `Lx/Tx` values is constructed by randomly drawing curve data sampled from normal #' distributions. The normal distribution is defined by the input values (`mean #' = value`, `sd = value.error`). Then, a dose-response curve fit is attempted for each #' dataset resulting in a new distribution of single `De` values. The standard #' deviation of this distribution is becomes then the error of the `De`. With increasing #' iterations, the error value becomes more stable. However, naturally the error #' will not decrease with more MC runs. #' #' Alternatively, the function returns highest probability density interval #' estimates as output, users may find more useful under certain circumstances. #' #' **Note:** It may take some calculation time with increasing MC runs, #' especially for the composed functions (`EXP+LIN` and `EXP+EXP`).\cr #' Each error estimation is done with the function of the chosen fitting method. #' #' **Subtitle information** #' #' To avoid plotting the subtitle information, provide an empty user `mtext` #' `mtext = ""`. To plot any other subtitle text, use `mtext`. #' #' @param sample [data.frame] (**required**): #' data frame with three columns for `x = Dose`,`y = LxTx`,`z = LxTx.Error`, `y1 = TnTx`. #' The column for the test dose response is optional, but requires `'TnTx'` as #' column name if used. For exponential fits at least three dose points #' (including the natural) should be provided. #' #' @param na.rm [logical] (*with default*): excludes `NA` values from the data set prior to any further operations. This argument is defunct and will be removed in a future version! #' #' @param mode [character] (*with default*): #' selects calculation mode of the function. #' - `"interpolation"` (default) calculates the De by interpolation, #' - `"extrapolation"` calculates the equivalent dose by extrapolation (useful for MAAD measurements) and #' - `"alternate"` calculates no equivalent dose and just fits the data points. #' #' Please note that for option `"regenerative"` the first point is considered #' as natural dose #' #' @param fit.method [character] (*with default*): #' function used for fitting. Possible options are: #' - `LIN`, #' - `QDR`, #' - `EXP`, #' - `EXP OR LIN`, #' - `EXP+LIN`, #' - `EXP+EXP`, #' - `GOK`, #' - `LambertW` #' #' See details. #' #' @param fit.force_through_origin [logical] (*with default*) #' allow to force the fitted function through the origin. #' For `method = "EXP+EXP"` the function will be fixed through #' the origin in either case, so this option will have no effect. #' #' @param fit.weights [logical] (*with default*): #' option whether the fitting is done with or without weights. See details. #' #' @param fit.includingRepeatedRegPoints [logical] (*with default*): #' includes repeated points for fitting (`TRUE`/`FALSE`). #' #' @param fit.NumberRegPoints [integer] (*optional*): #' set number of regeneration points manually. By default the number of all (!) #' regeneration points is used automatically. #' #' @param fit.NumberRegPointsReal [integer] (*optional*): #' if the number of regeneration points is provided manually, the value of the #' real, regeneration points = all points (repeated points) including reg 0, #' has to be inserted. #' #' @param fit.bounds [logical] (*with default*): #' set lower fit bounds for all fitting parameters to 0. Limited for the use #' with the fit methods `EXP`, `EXP+LIN`, `EXP OR LIN`, `GOK`, `LambertW` #' Argument to be inserted for experimental application only! #' #' @param NumberIterations.MC [integer] (*with default*): #' number of Monte Carlo simulations for error estimation. See details. #' #' @param output.plot [logical] (*with default*): #' plot output (`TRUE/FALSE`). #' #' @param output.plotExtended [logical] (*with default*): #' If' `TRUE`, 3 plots on one plot area are provided: #' 1. growth curve, #' 2. histogram from Monte Carlo error simulation and #' 3. a test dose response plot. #' #' If `FALSE`, just the growth curve will be plotted. #' **Requires:** `output.plot = TRUE`. #' #' @param output.plotExtended.single [logical] (*with default*): #' single plot output (`TRUE/FALSE`) to allow for plotting the results in #' single plot windows. Requires `output.plot = TRUE` and #' `output.plotExtended = TRUE`. #' #' @param cex.global [numeric] (*with default*): #' global scaling factor. #' #' @param txtProgressBar [logical] (*with default*): #' enables or disables `txtProgressBar`. If `verbose = FALSE` also no #' `txtProgressBar` is shown. #' #' @param verbose [logical] (*with default*): #' enables or disables terminal feedback. #' #' @param ... Further arguments and graphical parameters to be passed. Note: #' Standard arguments will only be passed to the growth curve plot. Supported: #' `xlim`, `ylim`, `main`, `xlab`, `ylab` #' #' @return #' Along with a plot (so far wanted) an `RLum.Results` object is returned containing, #' the slot `data` contains the following elements: #' #' \tabular{lll}{ #' **DATA.OBJECT** \tab **TYPE** \tab **DESCRIPTION** \cr #' `..$De` : \tab `data.frame` \tab Table with De values \cr #' `..$De.MC` : \tab `numeric` \tab Table with De values from MC runs \cr #' `..$Fit` : \tab [nls] or [lm] \tab object from the fitting for `EXP`, `EXP+LIN` and `EXP+EXP`. #' In case of a resulting linear fit when using `LIN`, `QDR` or `EXP OR LIN` \cr #' `..$Formula` : \tab [expression] \tab Fitting formula as R expression \cr #' `..$call` : \tab `call` \tab The original function call\cr #' } #' #' @section Function version: 1.11.10 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany)\cr #' Michael Dietze, GFZ Potsdam (Germany) #' #' @references #' #' Berger, G.W., Huntley, D.J., 1989. Test data for exponential fits. Ancient TL 7, 43-46. #' #' Guralnik, B., Li, B., Jain, M., Chen, R., Paris, R.B., Murray, A.S., Li, S.-H., Pagonis, P., #' Herman, F., 2015. Radiation-induced growth and isothermal decay of infrared-stimulated luminescence #' from feldspar. Radiation Measurements 81, 224-231. #' #' Pagonis, V., Kitis, G., Chen, R., 2020. A new analytical equation for the dose response of dosimetric materials, #' based on the Lambert W function. Journal of Luminescence 225, 117333. \doi{10.1016/j.jlumin.2020.117333} #' #' @seealso [nls], [RLum.Results-class], [get_RLum], [minpack.lm::nlsLM], #' [lm], [uniroot], [lamW::lambertW0] #' #' @examples #' #' ##(1) plot growth curve for a dummy data.set and show De value #' data(ExampleData.LxTxData, envir = environment()) #' temp <- plot_GrowthCurve(LxTxData) #' get_RLum(temp) #' #' ##(1b) horizontal plot arrangement #' layout(mat = matrix(c(1,1,2,3), ncol = 2)) #' plot_GrowthCurve(LxTxData, output.plotExtended.single = TRUE) #' #' ##(1c) to access the fitting value try #' get_RLum(temp, data.object = "Fit") #' #' ##(2) plot the growth curve only - uncomment to use #' ##pdf(file = "~/Desktop/Growth_Curve_Dummy.pdf", paper = "special") #' plot_GrowthCurve(LxTxData) #' ##dev.off() #' #' ##(3) plot growth curve with pdf output - uncomment to use, single output #' ##pdf(file = "~/Desktop/Growth_Curve_Dummy.pdf", paper = "special") #' plot_GrowthCurve(LxTxData, output.plotExtended.single = TRUE) #' ##dev.off() #' #' ##(4) plot resulting function for given intervall x #' x <- seq(1,10000, by = 100) #' plot( #' x = x, #' y = eval(temp$Formula), #' type = "l" #' ) #' #' ##(5) plot using the 'extrapolation' mode #' LxTxData[1,2:3] <- c(0.5, 0.001) #' print(plot_GrowthCurve(LxTxData,mode = "extrapolation")) #' #' ##(6) plot using the 'alternate' mode #' LxTxData[1,2:3] <- c(0.5, 0.001) #' print(plot_GrowthCurve(LxTxData,mode = "alternate")) #' #' ##(7) import and fit test data set by Berger & Huntley 1989 #' QNL84_2_unbleached <- #' read.table(system.file("extdata/QNL84_2_unbleached.txt", package = "Luminescence")) #' #' results <- plot_GrowthCurve( #' QNL84_2_unbleached, #' mode = "extrapolation", #' plot = FALSE, #' verbose = FALSE) #' #' #calculate confidence interval for the parameters #' #as alternative error estimation #' confint(results$Fit, level = 0.68) #' #' #' \dontrun{ #' QNL84_2_bleached <- #' read.table(system.file("extdata/QNL84_2_bleached.txt", package = "Luminescence")) #' STRB87_1_unbleached <- #' read.table(system.file("extdata/STRB87_1_unbleached.txt", package = "Luminescence")) #' STRB87_1_bleached <- #' read.table(system.file("extdata/STRB87_1_bleached.txt", package = "Luminescence")) #' #' print( #' plot_GrowthCurve( #' QNL84_2_bleached, #' mode = "alternate", #' plot = FALSE, #' verbose = FALSE)$Fit) #' #' print( #' plot_GrowthCurve( #' STRB87_1_unbleached, #' mode = "alternate", #' plot = FALSE, #' verbose = FALSE)$Fit) #' #' print( #' plot_GrowthCurve( #' STRB87_1_bleached, #' mode = "alternate", #' plot = FALSE, #' verbose = FALSE)$Fit) #' } #' #' @md #' @export plot_GrowthCurve <- function( sample, na.rm = TRUE, mode = "interpolation", fit.method = "EXP", fit.force_through_origin = FALSE, fit.weights = TRUE, fit.includingRepeatedRegPoints = TRUE, fit.NumberRegPoints = NULL, fit.NumberRegPointsReal = NULL, fit.bounds = TRUE, NumberIterations.MC = 100, output.plot = TRUE, output.plotExtended = TRUE, output.plotExtended.single = FALSE, cex.global = 1, txtProgressBar = TRUE, verbose = TRUE, ... ) { ##1. Check input variable switch( class(sample)[1], data.frame = sample, matrix = sample <- as.data.frame(sample), list = sample <- as.data.frame(sample), stop( "[plot_GrowthCurve()] Argument 'sample' needs to be of type 'data.frame'!", call. = FALSE) ) ##2. Check supported fit methods fit.method_supported <- c("LIN", "QDR", "EXP", "EXP OR LIN", "EXP+LIN", "EXP+EXP", "GOK", "LambertW") if (!fit.method[1] %in% fit.method_supported) { stop(paste0( "[plot_GrowthCurve()] Fit method not supported, supported methods are: ", paste(fit.method_supported, collapse = ", ") ), call. = FALSE) } ##2. check if sample contains a least three rows if(length(sample[[1]]) < 3 & fit.method != "LIN") stop("\n [plot_GrowthCurve()] At least two regeneration points are required!", call. = FALSE) ##2.1 check column numbers; we assume that in this particular case no error value ##was provided, e.g., set all errors to 0 if(ncol(sample) == 2) sample <- cbind(sample, 0) ##2.2 check for inf data in the data.frame if(any(is.infinite(unlist(sample)))){ #https://stackoverflow.com/questions/12188509/cleaning-inf-values-from-an-r-dataframe #This is slow, but it does not break with previous code sample <- do.call(data.frame, lapply(sample, function(x) replace(x, is.infinite(x),NA))) warning("[plot_GrowthCurve()] Inf values found, replaced by NA!", call. = FALSE) } ##2.3 check whether the dose value is equal all the time if(sum(abs(diff(sample[[1]])), na.rm = TRUE) == 0){ try(stop("[plot_GrowthCurve()] All points have the same dose. NULL returned!", call. = FALSE)) return(NULL) } ## optionally, count and exclude NA values and print result if(na.rm[1]) { ## write warning if(sum(!complete.cases(sample)) > 0) warning(paste("[plot_GrowthCurve()]", sum(!complete.cases(sample)), "NA value(s) excluded."), call. = FALSE) ## exclude NA sample <- na.exclude(sample) ##Check if anything is left after removal if(nrow(sample) == 0){ try(stop("[plot_GrowthCurve()] Sorry, after NA removal nothing is left from the data set! NULL returned!", call. = FALSE)) return(NULL) } }else{ stop("[plot_GrowthCurve()] Sorry, the argument 'na.rm' is defunct and will be removed in future!", call. = FALSE) } ##3. verbose mode if(!verbose) txtProgressBar <- FALSE ##remove rownames from data.frame, as this could causes errors for the reg point calculation rownames(sample) <- NULL ##zero values in the data.frame are not allowed for the y-column if(length(sample[sample[,2]==0,2])>0){ warning( paste("[plot_GrowthCurve()]", length(sample[sample[,2]==0,2]), "values with 0 for Lx/Tx detected; replaced by ", .Machine$double.eps), call. = FALSE) sample[sample[, 2] == 0, 2] <- .Machine$double.eps } ##1. INPUT #1.0.1 calculate number of reg points if not set if(is.null(fit.NumberRegPoints)) fit.NumberRegPoints <- length(sample[-1,1]) if(is.null(fit.NumberRegPointsReal)){ fit.RegPointsReal <- which(!duplicated(sample[,1]) | sample[,1] != 0) fit.NumberRegPointsReal <- length(fit.RegPointsReal) } #1.1 Produce data.frame from input values, two options for different modes if(mode[1] == "interpolation"){ xy <- data.frame(x=sample[2:(fit.NumberRegPoints+1),1],y=sample[2:(fit.NumberRegPoints+1),2]) y.Error <- sample[2:(fit.NumberRegPoints+1),3] } else if (mode[1] == "extrapolation" || mode[1] == "alternate") { xy <- data.frame( x = sample[1:(fit.NumberRegPoints+1),1], y = sample[1:(fit.NumberRegPoints+1),2]) y.Error <- sample[1:(fit.NumberRegPoints+1),3] }else{ stop("[plot_GrowthCurve()] Unknown input for argument 'mode'", call. = FALSE) } ##1.1.1 produce weights for weighted fitting if(fit.weights){ fit.weights <- 1 / abs(y.Error) / sum(1 / abs(y.Error)) if(any(is.na(fit.weights))){ fit.weights <- rep(1, length(y.Error)) warning( "[plot_GrowthCurve()] 'fit.weights' ignored since the error column is invalid or 0.", call. = FALSE) } }else{ fit.weights <- rep(1, length(y.Error)) } #1.2 Prepare data sets regeneration points for MC Simulation if (mode[1] == "interpolation") { data.MC <- t(vapply( X = seq(2, fit.NumberRegPoints + 1, by = 1), FUN = function(x) { sample(rnorm( n = 10000, mean = sample[x, 2], sd = abs(sample[x, 3]) ), size = NumberIterations.MC, replace = TRUE) }, FUN.VALUE = vector("numeric", length = NumberIterations.MC) )) #1.3 Do the same for the natural signal data.MC.De <- numeric(NumberIterations.MC) data.MC.De <- sample(rnorm(10000, mean = sample[1, 2], sd = abs(sample[1, 3])), NumberIterations.MC, replace = TRUE) }else{ data.MC <- t(vapply( X = seq(1, fit.NumberRegPoints + 1, by = 1), FUN = function(x) { sample(rnorm( n = 10000, mean = sample[x, 2], sd = abs(sample[x, 3]) ), size = NumberIterations.MC, replace = TRUE) }, FUN.VALUE = vector("numeric", length = NumberIterations.MC) )) } #1.3 set x.natural x.natural <- vector("numeric", length = NumberIterations.MC) x.natural <- NA ##1.4 set initialise variables De <- De.Error <- D01 <- R <- Dc <- N <- NA # FITTING ---------------------------------------------------------------------- ##3. Fitting values with nonlinear least-squares estimation of the parameters ## set functions for fitting ## REMINDER: DO NOT ADD {} brackets, otherwise the formula construction will not ## work ## get current environment, we need that later currn_env <- environment() ## Define functions --------- ### EXP ------- fit.functionEXP <- function(a,b,c,x) a*(1-exp(-(x+c)/b)) ### EXP+LIN ----------- fit.functionEXPLIN <- function(a,b,c,g,x) a*(1-exp(-(x+c)/b)+(g*x)) ### EXP+EXP ---------- fit.functionEXPEXP <- function(a1,a2,b1,b2,x) (a1*(1-exp(-(x)/b1)))+(a2*(1-exp(-(x)/b2))) ### GOK ---------------- fit.functionGOK <- function(a,b,c,d,x) a*(d-(1+(1/b)*x*c)^(-1/c)) ### Lambert W ------------- fit.functionLambertW <- function(R, Dc, N, Dint, x) (1 + (lamW::lambertW0((R - 1) * exp(R - 1 - ((x + Dint) / Dc ))) / (1 - R))) * N ##input data for fitting; exclude repeated RegPoints if (!fit.includingRepeatedRegPoints[1]) { data <- data.frame(x = xy[[1]][!duplicated(xy[[1]])], y = xy[[2]][!duplicated(xy[[1]])]) fit.weights <- fit.weights[!duplicated(xy[[1]])] data.MC <- data.MC[!duplicated(xy[[1]]),,drop = FALSE] y.Error <- y.Error[!duplicated(xy[[1]])] xy <- xy[!duplicated(xy[[1]]),,drop = FALSE] }else{ data <- data.frame(xy) } ## for unknown reasons with only two points the nls() function is trapped in ## an endless mode, therefore the minimum length for data is 3 ## (2016-05-17) if(any(fit.method %in% c("EXP", "EXP+LIN", "EXP+EXP", "EXP OR LIN")) && length(data[,1])<=2) { ##set to LIN fit.method <- "LIN" warning("[plot_GrowthCurve()] Fitting using an exponential term requires at least 3 dose points! fit.method set to 'LIN'", call. = FALSE) if(verbose) message("[plot_GrowthCurve()] fit.method set to 'LIN', see warnings()") } ##START PARAMETER ESTIMATION ##general setting of start parameters for fitting ##a - estimation for a a the maximum of the y-values (Lx/Tx) a <- max(data[,2]) ##b - get start parameters from a linear fit of the log(y) data ## (suppress the warning in case one parameter is negative) fit.lm <- try(lm(suppressWarnings(log(data$y))~data$x)) if(inherits(fit.lm, "try-error")) b <- 1 else b <- as.numeric(1/fit.lm$coefficients[2]) ##c - get start parameters from a linear fit - offset on x-axis fit.lm<-lm(data$y~data$x) c <- as.numeric(abs(fit.lm$coefficients[1]/fit.lm$coefficients[2])) #take slope from x - y scaling g <- max(data[,2]/max(data[,1])) #set D01 and D02 (in case of EXp+EXP) D01 <- NA D01.ERROR <- NA D02 <- NA D02.ERROR <- NA ##--------------------------------------------------------------------------## ##to be a little bit more flexible the start parameters varries within a normal distribution ##draw 50 start values from a normal distribution a start values if (fit.method != "LIN") { a.MC <- suppressWarnings(rnorm(50, mean = a, sd = a / 100)) if (!is.na(b)) { b.MC <- suppressWarnings(rnorm(50, mean = b, sd = b / 100)) } else{ b <- NA } c.MC <- suppressWarnings(rnorm(50, mean = c, sd = c / 100)) g.MC <- suppressWarnings(rnorm(50, mean = g, sd = g / 1)) ##set start vector (to avoid errors witin the loop) a.start <- NA b.start <- NA c.start <- NA g.start <- NA } # QDR ------------------------------------------------------------------------ if (fit.method == "QDR"){ ##Do fitting with option to force curve through the origin if(fit.force_through_origin){ ##linear fitting ... polynomial fit <- lm(data$y ~ 0 + I(data$x) + I(data$x^2), weights = fit.weights) ##give function for uniroot De.fs <- function(x, y) { 0 + coef(fit)[1] * x + coef(fit)[2] * x ^ 2 - y } }else{ ##linear fitting ... polynomial fit <- lm(data$y ~ I(data$x) + I(data$x^2), weights = fit.weights) ##give function for uniroot De.fs <- function(x, y) { coef(fit)[1] + coef(fit)[2] * x + coef(fit)[3] * x ^ 2 - y } } ##solve and get De if (mode == "interpolation") { De.uniroot <- try(uniroot(De.fs, y = sample[1, 2], lower = 0, upper = max(sample[, 1]) * 1.5), silent = TRUE) if (!inherits(De.uniroot, "try-error")) { De <- De.uniroot$root if (verbose) { if (mode != "alternate") { writeLines(paste0("[plot_GrowthCurve()] Fit: ", fit.method, " (", mode,") ", "| De = ", round(De,2))) } } } else{ if (verbose) writeLines("[plot_GrowthCurve()] no solution found for QDR fit") De <- NA } }else if (mode == "extrapolation"){ De.uniroot <- try(uniroot(De.fs, y = 0, lower = -1e06, upper = max(sample[, 1]) * 1.5), silent = TRUE) if (!inherits(De.uniroot, "try-error")) { De <- De.uniroot$root if (verbose) { if (mode != "alternate") { writeLines(paste0("[plot_GrowthCurve()] Fit: ", fit.method, " (", mode,") ", "| De = ", round(abs(De), 2))) } } } else{ if (verbose) writeLines("[plot_GrowthCurve()] no solution found for QDR fit") De <- NA } }else{ De <- NA } ##set progressbar if(txtProgressBar){ cat("\n\t Run Monte Carlo loops for error estimation of the QDR fit\n") pb<-txtProgressBar(min=0,max=NumberIterations.MC, char="=", style=3) } #start loop for Monte Carlo Error estimation fit.MC <- sapply(1:NumberIterations.MC, function(i){ data <- data.frame(x = xy$x, y = data.MC[,i]) if(fit.force_through_origin){ ##linear fitting ... polynomial fit.MC <- lm(data$y ~ 0 + I(data$x) + I(data$x^2), weights = fit.weights) ##give function for uniroot De.fs.MC <- function(x, y) { 0 + coef(fit.MC)[1] * x + coef(fit.MC)[2] * x ^ 2 - y 0 + coef(fit.MC)[1] * x + coef(fit.MC)[2] * x ^ 2 - y } }else{ ##linear fitting ... polynomial fit.MC <- lm(data$y ~ I(data$x) + I(data$x^2), weights = fit.weights) ##give function for uniroot De.fs.MC <- function(x, y) { coef(fit.MC)[1] + coef(fit.MC)[2] * x + coef(fit.MC)[3] * x ^ 2 - y } } if (mode == "interpolation") { ##solve and get De De.uniroot.MC <- try(uniroot( De.fs.MC, y = data.MC.De[i], lower = 0, upper = max(sample[, 1]) * 1.5 ), silent = TRUE) if (!inherits(De.uniroot.MC, "try-error")) { De.MC <- De.uniroot.MC$root } else{ De.MC <- NA } }else if (mode == "extrapolation"){ ##solve and get De De.uniroot.MC <- try(uniroot( De.fs.MC, y = 0, lower = -1e6, upper = max(sample[, 1]) * 1.5 ), silent = TRUE) if (!inherits(De.uniroot.MC, "try-error")) { De.MC <- De.uniroot.MC$root } else{ De.MC <- NA } }else{ De.MC <- NA } ##update progress bar if(txtProgressBar) setTxtProgressBar(pb, i) return(De.MC) }) if(txtProgressBar) close(pb) x.natural<- fit.MC } #===========================================================================## #EXP --------------- if (fit.method=="EXP" | fit.method=="EXP OR LIN" | fit.method=="LIN"){ if((is.na(a) | is.na(b) | is.na(c)) && fit.method != "LIN"){ try(stop("[plot_GrowthCurve()] Fit could not be applied for this data set. NULL returned!", call. = FALSE)) return(NULL) } if(fit.method != "LIN"){ ##FITTING on GIVEN VALUES## # --use classic R fitting routine to fit the curve ##try to create some start parameters from the input values to make ## the fitting more stable for(i in 1:50){ a <- a.MC[i] b <- b.MC[i] c <- c.MC[i] fit.initial <- suppressWarnings(try(nls( formula = .toFormula(fit.functionEXP, env = currn_env), data = data, start = c(a = a, b = b, c = c), trace = FALSE, algorithm = "port", lower = c(a = 0, b > 0, c = 0), nls.control( maxiter = 100, warnOnly = TRUE, minFactor = 1 / 2048 ) ), silent = TRUE )) if(!inherits(fit.initial, "try-error")){ #get parameters out of it parameters<-(coef(fit.initial)) b.start[i] <- as.vector((parameters["b"])) a.start[i] <- as.vector((parameters["a"])) c.start[i] <- as.vector((parameters["c"])) } } ##used median as start parameters for the final fitting a <- median(na.exclude(a.start)) b <- median(na.exclude(b.start)) c <- median(na.exclude(c.start)) ##exception for b: if b is 1 it is likely to b wrong and should be reset if(!is.na(b) && b == 1) b <- mean(b.MC) #FINAL Fit curve on given values fit <- try(minpack.lm::nlsLM( formula = .toFormula(fit.functionEXP, env = currn_env), data = data, start = list(a = a, b = b,c = 0), weights = fit.weights, trace = FALSE, algorithm = "LM", lower = if (fit.bounds) { c(0,0,0) }else{ c(-Inf,-Inf,-Inf) }, upper = if (fit.force_through_origin) { c(Inf, Inf, 0) }else{ c(Inf, Inf, Inf) }, control = minpack.lm::nls.lm.control(maxiter = 500) ), silent = TRUE ) if (inherits(fit, "try-error") & inherits(fit.initial, "try-error")){ if(verbose) writeLines("[plot_GrowthCurve()] try-error for EXP fit") }else{ ##this is to avoid the singular convergence failure due to a perfect fit at the beginning ##this may happen especially for simulated data if(inherits(fit, "try-error") & !inherits(fit.initial, "try-error")){ fit <- fit.initial rm(fit.initial) } #get parameters out of it parameters <- (coef(fit)) b <- as.vector((parameters["b"])) a <- as.vector((parameters["a"])) c <- as.vector((parameters["c"])) #calculate De if(mode == "interpolation"){ De <- suppressWarnings(-c-b*log(1-sample[1,2]/a)) }else if (mode == "extrapolation"){ De <- suppressWarnings(-c-b*log(1-0/a)) }else{ De <- NA } #print D01 value D01 <- b if (verbose) { if (mode != "alternate") { writeLines(paste0( "[plot_GrowthCurve()] Fit: ", fit.method, " (", mode, ")", " | De = ", round(abs(De), digits = 2), " | D01 = ", round(D01, 2) )) } } #EXP MC ----- ##Monte Carlo Simulation # --Fit many curves and calculate a new De +/- De_Error # --take De_Error #set variables var.b<-vector(mode="numeric", length=NumberIterations.MC) var.a<-vector(mode="numeric", length=NumberIterations.MC) var.c<-vector(mode="numeric", length=NumberIterations.MC) #start loop for (i in 1:NumberIterations.MC) { ##set data set data <- data.frame(x = xy$x,y = data.MC[,i]) fit.MC <- try(minpack.lm::nlsLM( formula = .toFormula(fit.functionEXP, env = currn_env), data = data, start = list(a = a, b = b, c = c), weights = fit.weights, trace = FALSE, algorithm = "LM", lower = if (fit.bounds) { c(0,0,0) }else{ c(-Inf,-Inf,-Inf) }, upper = if (fit.force_through_origin) { c(Inf, Inf, 0) }else{ c(Inf, Inf, Inf) }, control = minpack.lm::nls.lm.control(maxiter = 500) ), silent = TRUE ) #get parameters out of it including error handling if (inherits(fit.MC, "try-error")) { x.natural[i] <- NA }else { #get parameters out parameters<-coef(fit.MC) var.b[i]<-as.vector((parameters["b"])) #D0 var.a[i]<-as.vector((parameters["a"])) #Imax var.c[i]<-as.vector((parameters["c"])) #calculate x.natural for error calculation if(mode == "interpolation"){ x.natural[i]<-suppressWarnings( -var.c[i]-var.b[i]*log(1-data.MC.De[i]/var.a[i])) }else if(mode == "extrapolation"){ x.natural[i]<-suppressWarnings( abs(-var.c[i]-var.b[i]*log(1-0/var.a[i]))) }else{ x.natural[i] <- NA } } }#end for loop ##write D01.ERROR D01.ERROR <- sd(var.b, na.rm = TRUE) ##remove values rm(var.b, var.a, var.c) }#endif::try-error fit }#endif:fit.method!="LIN" ##LIN ----- ##two options: just linear fit or LIN fit after the EXP fit failed #set fit object, if fit object was not set before if(exists("fit")==FALSE){fit<-NA} if ((fit.method=="EXP OR LIN" & inherits(fit, "try-error")) | fit.method=="LIN" | length(data[,1])<2) { ##Do fitting again as just allows fitting through the origin if(fit.force_through_origin){ fit.lm<-lm(data$y ~ 0 + data$x, weights = fit.weights) #calculate De if(mode == "interpolation"){ De <- sample[1,2]/fit.lm$coefficients[1] }else{ De <- 0 } }else{ fit.lm<-lm(data$y ~ data$x, weights = fit.weights) #calculate De if(mode == "interpolation"){ De <- (sample[1,2]-fit.lm$coefficients[1])/fit.lm$coefficients[2] }else if(mode == "extrapolation"){ De <- (0-fit.lm$coefficients[1])/fit.lm$coefficients[2] } } ##remove vector labels De <- as.numeric(as.character(De)) if (verbose) { if (mode != "alternate") { writeLines(paste0( "[plot_GrowthCurve()] Fit: ", fit.method, " (", mode, ") ", "| De = ", round(abs(De), 2) )) } } #start loop for Monte Carlo Error estimation #LIN MC --------- for (i in 1:NumberIterations.MC) { data <- data.frame(x = xy$x, y = data.MC[, i]) if(fit.force_through_origin){ ##do fitting fit.lmMC <- lm(data$y ~ 0 + data$x, weights=fit.weights) #calculate x.natural if(mode == "interpolation"){ x.natural[i] <- data.MC.De[i]/fit.lmMC$coefficients[1] }else if (mode == "extrapolation"){ x.natural[i] <- 0 } }else{ ##do fitting fit.lmMC <- lm(data$y~ data$x, weights=fit.weights) #calculate x.natural if(mode == "interpolation"){ x.natural[i] <- (data.MC.De[i]-fit.lmMC$coefficients[1])/ fit.lmMC$coefficients[2] }else if (mode == "extrapolation"){ x.natural[i] <- abs((0-fit.lmMC$coefficients[1])/ fit.lmMC$coefficients[2]) } } }#endfor::loop for MC #correct for fit.method fit.method <- "LIN" ##set fit object if(fit.method=="LIN"){fit<-fit.lm} }else{fit.method<-"EXP"}#endif::LIN }#end if EXP (this includes the LIN fit option) #=========================================================================== # #=========================================================================== # # EXP+LIN ---- else if (fit.method=="EXP+LIN") { ##try some start parameters from the input values to makes the fitting more stable for(i in 1:length(a.MC)){ a<-a.MC[i];b<-b.MC[i];c<-c.MC[i];g<-g.MC[i] ##---------------------------------------------------------## ##start: with EXP function fit.EXP <- try({ nls( formula = .toFormula(fit.functionEXP, env = currn_env), data = data, start = c(a=a,b=b,c=c), trace = FALSE, algorithm = "port", lower = c(a=0, b>10, c = 0), control = nls.control(maxiter=100,warnOnly=FALSE,minFactor=1/1024) )}, silent=TRUE) if(!inherits(fit.EXP, "try-error")){ #get parameters out of it parameters<-(coef(fit.EXP)) b<-as.vector((parameters["b"])) a<-as.vector((parameters["a"])) c<-as.vector((parameters["c"])) ##end: with EXP function ##---------------------------------------------------------## } fit<-try({ nls( formula = .toFormula(fit.functionEXPLIN, env = currn_env), data = data, start = c(a=a,b=b,c=c,g=g), trace = FALSE, algorithm = "port", lower = if(fit.bounds){ c(a=0,b>10,c=0,g=0)} else{c(a = -Inf,b = -Inf,c = -Inf,g = -Inf) }, control = nls.control( maxiter = 500, warnOnly = FALSE, minFactor = 1/2048) #increase max. iterations )}, silent=TRUE) if(!inherits(fit, "try-error")){ #get parameters out of it parameters<-(coef(fit)) b.start[i] <- as.vector((parameters["b"])) a.start[i] <- as.vector((parameters["a"])) c.start[i] <- as.vector((parameters["c"])) g.start[i] <- as.vector((parameters["g"])) } }##end for loop ##used mean as start parameters for the final fitting a <- median(na.exclude(a.start)) b <- median(na.exclude(b.start)) c <- median(na.exclude(c.start)) g <- median(na.exclude(g.start)) ##perform final fitting fit <- try(minpack.lm::nlsLM( formula = .toFormula(fit.functionEXPLIN, env = currn_env), data = data, start = list(a = a, b = b,c = c, g = g), weights = fit.weights, trace = FALSE, algorithm = "LM", lower = if (fit.bounds) { c(0,10,0,0) }else{ c(-Inf,-Inf,-Inf,-Inf) }, upper = if (fit.force_through_origin) { c(Inf, Inf, 0, Inf) }else{ c(Inf, Inf, Inf, Inf) }, control = minpack.lm::nls.lm.control(maxiter = 500) ), silent = TRUE ) #if try error stop calculation if(!inherits(fit, "try-error")){ #get parameters out of it parameters <- coef(fit) b <- as.vector((parameters["b"])) a <- as.vector((parameters["a"])) c <- as.vector((parameters["c"])) g <- as.vector((parameters["g"])) #problem: analytically it is not easy to calculate x, #use uniroot to solve that problem ... readjust function first if (mode == "interpolation") { f.unirootEXPLIN <- function(a, b, c, g, x, LnTn) { fit.functionEXPLIN(a, b, c, g, x) - LnTn } temp.De <- try(uniroot( f = f.unirootEXPLIN, interval = c(0, max(xy$x) * 1.5), tol = 0.001, a = a, b = b, c = c, g = g, LnTn = sample[1, 2], extendInt = "yes", maxiter = 3000 ), silent = TRUE) if (!inherits(temp.De, "try-error")) { De <- temp.De$root } else{ De <- NA } }else if(mode == "extrapolation"){ f.unirootEXPLIN <- function(a, b, c, g, x, LnTn) { fit.functionEXPLIN(a, b, c, g, x) - LnTn } temp.De <- try(uniroot( f = f.unirootEXPLIN, interval = c(-1e06, max(xy$x) * 1.5), tol = 0.001, a = a, b = b, c = c, g = g, LnTn = 0, extendInt = "yes", maxiter = 3000 ), silent = TRUE) if (!inherits(temp.De, "try-error")) { De <- temp.De$root } else{ De <- NA } }else{ De <- NA } if (verbose) { if (mode != "alternate") { writeLines(paste0( "[plot_GrowthCurve()] Fit: ", fit.method, " (", mode, ")" , " | De = ", round(abs(De),2) )) } } ##Monte Carlo Simulation for error estimation # --Fit many curves and calculate a new De +/- De_Error # --take De_Error #set variables var.b <- vector(mode="numeric", length=NumberIterations.MC) var.a <- vector(mode="numeric", length=NumberIterations.MC) var.c <- vector(mode="numeric", length=NumberIterations.MC) var.g <- vector(mode="numeric", length=NumberIterations.MC) ##set progressbar if(txtProgressBar){ cat("\n\t Run Monte Carlo loops for error estimation of the EXP+LIN fit\n") pb <- txtProgressBar(min=0,max=NumberIterations.MC, char="=", style=3) } #start Monto Carlo loops for(i in 1:NumberIterations.MC){ data <- data.frame(x=xy$x,y=data.MC[,i]) ##perform MC fitting fit.MC <- try(minpack.lm::nlsLM( formula = .toFormula(fit.functionEXPLIN, env = currn_env), data = data, start = list(a = a, b = b,c = c, g = g), weights = fit.weights, trace = FALSE, algorithm = "LM", lower = if (fit.bounds) { c(0,10,0,0) }else{ c(-Inf,-Inf,-Inf, -Inf) }, control = minpack.lm::nls.lm.control(maxiter = 500) ), silent = TRUE ) #get parameters out of it including error handling if (inherits(fit.MC, "try-error")) { x.natural[i] <- NA }else { parameters <- coef(fit.MC) var.b[i] <- as.vector((parameters["b"])) var.a[i] <- as.vector((parameters["a"])) var.c[i] <- as.vector((parameters["c"])) var.g[i] <- as.vector((parameters["g"])) #problem: analytical it is not easy to calculate x, #use uniroot to solve this problem if (mode == "interpolation") { temp.De.MC <- try(uniroot( f = f.unirootEXPLIN, interval = c(0, max(xy$x) * 1.5), tol = 0.001, a = var.a[i], b = var.b[i], c = var.c[i], g = var.g[i], LnTn = data.MC.De[i] ), silent = TRUE) if (!inherits(temp.De.MC, "try-error")) { x.natural[i] <- temp.De.MC$root } else{ x.natural[i] <- NA } } else if (mode == "extrapolation"){ temp.De.MC <- try(uniroot( f = f.unirootEXPLIN, interval = c(-1e6, max(xy$x) * 1.5), tol = 0.001, a = var.a[i], b = var.b[i], c = var.c[i], g = var.g[i], LnTn = 0 ), silent = TRUE) if (!inherits(temp.De.MC, "try-error")) { x.natural[i] <- abs(temp.De.MC$root) } else{ x.natural[i] <- NA } }else{ x.natural[i] <- NA } } ##update progress bar if(txtProgressBar) setTxtProgressBar(pb, i) }#end for loop ##close if(txtProgressBar) close(pb) ##remove objects rm(var.b, var.a, var.c, var.g) }else{ #print message if (verbose) { if (mode != "alternate") { writeLines(paste0( "[plot_GrowthCurve()] Fit: ", fit.method, " | De = NA (fitting FAILED)" )) } } } #end if "try-error" Fit Method } #End if EXP+LIN #EXP+EXP --------------------------------------------------------------------- else if (fit.method == "EXP+EXP") { a1.start <- NA a2.start <- NA b1.start <- NA b2.start <- NA ## try to create some start parameters from the input values to make the fitting more stable for(i in 1:50) { a1 <- a.MC[i];b1 <- b.MC[i]; a2 <- a.MC[i] / 2; b2 <- b.MC[i] / 2 fit.start <- try({ nls(formula = .toFormula(fit.functionEXPEXP, env = currn_env), data = data, start = c( a1 = a1,a2 = a2,b1 = b1,b2 = b2 ), trace = FALSE, algorithm = "port", lower = c(a1 > 0,a2 > 0,b1 > 0,b2 > 0), nls.control( maxiter = 500,warnOnly = FALSE,minFactor = 1 / 2048 ) #increase max. iterations )}, silent = TRUE) if (!inherits(fit.start, "try-error")) { #get parameters out of it parameters <- coef(fit.start) a1.start[i] <- as.vector((parameters["a1"])) b1.start[i] <- as.vector((parameters["b1"])) a2.start[i] <- as.vector((parameters["a2"])) b2.start[i] <- as.vector((parameters["b2"])) } } ##use obtained parameters for fit input a1.start <- median(a1.start, na.rm = TRUE) b1.start <- median(b1.start, na.rm = TRUE) a2.start <- median(a2.start, na.rm = TRUE) b2.start <- median(b2.start, na.rm = TRUE) ##perform final fitting fit <- try(minpack.lm::nlsLM( formula = .toFormula(fit.functionEXPEXP, env = currn_env), data = data, start = list(a1 = a1, b1 = b1, a2 = a2, b2 = b2), weights = fit.weights, trace = FALSE, algorithm = "LM", lower = if (fit.bounds) { c(0,0,0,0) }else{ c(-Inf,-Inf,-Inf, -Inf) }, control = minpack.lm::nls.lm.control(maxiter = 500) ), silent = TRUE ) ##insert if for try-error if (!inherits(fit, "try-error")) { #get parameters out of it parameters <- coef(fit) b1 <- as.vector((parameters["b1"])) b2 <- as.vector((parameters["b2"])) a1 <- as.vector((parameters["a1"])) a2 <- as.vector((parameters["a2"])) ##set D0 values D01 <- round(b1,digits = 2) D02 <- round(b2,digits = 2) #problem: analytically it is not easy to calculate x, use uniroot if (mode == "interpolation") { f.unirootEXPEXP <- function(a1, a2, b1, b2, x, LnTn) { fit.functionEXPEXP(a1, a2, b1, b2, x) - LnTn } temp.De <- try(uniroot( f = f.unirootEXPEXP, interval = c(0, max(xy$x) * 1.5), tol = 0.001, a1 = a1, a2 = a2, b1 = b1, b2 = b2, LnTn = sample[1, 2], extendInt = "yes", maxiter = 3000 ), silent = TRUE) if (!inherits(temp.De, "try-error")) { De <- temp.De$root } else{ De <- NA } ##remove object rm(temp.De) }else if (mode == "extrapolation"){ stop("[plot_GrowthCurve()] mode 'extrapolation' for this fitting method currently not supported!", call. = FALSE) } else{ De <- NA } #print D0 and De value values if(verbose){ if(mode != "alternate"){ writeLines(paste0("[plot_GrowthCurve()] Fit: ", fit.method, " | De = ", De, "| D01 = ",D01, " | D02 = ",D02)) } } ##Monte Carlo Simulation for error estimation # --Fit many curves and calculate a new De +/- De_Error # --take De_Error from the simulation # --comparison of De from the MC and original fitted De gives a value for quality #set variables var.b1 <- vector(mode="numeric", length=NumberIterations.MC) var.b2 <- vector(mode="numeric", length=NumberIterations.MC) var.a1 <- vector(mode="numeric", length=NumberIterations.MC) var.a2 <- vector(mode="numeric", length=NumberIterations.MC) ##progress bar if(txtProgressBar){ cat("\n\t Run Monte Carlo loops for error estimation of the EXP+EXP fit\n") pb<-txtProgressBar(min=0,max=NumberIterations.MC, initial=0, char="=", style=3) } #start Monto Carlo loops for (i in 1:NumberIterations.MC) { #update progress bar if(txtProgressBar) setTxtProgressBar(pb,i) data<-data.frame(x=xy$x,y=data.MC[,i]) ##perform final fitting fit.MC <- try(minpack.lm::nlsLM( formula = .toFormula(fit.functionEXPEXP, env = currn_env), data = data, start = list(a1 = a1, b1 = b1, a2 = a2, b2 = b2), weights = fit.weights, trace = FALSE, algorithm = "LM", lower = if (fit.bounds) { c(0,0,0,0) }else{ c(-Inf,-Inf,-Inf, -Inf) }, control = minpack.lm::nls.lm.control(maxiter = 500) ), silent = TRUE ) #get parameters out of it including error handling if (inherits(fit.MC, "try-error")) { x.natural[i]<-NA }else { parameters <- (coef(fit.MC)) var.b1[i] <- as.vector((parameters["b1"])) var.b2[i] <- as.vector((parameters["b2"])) var.a1[i] <- as.vector((parameters["a1"])) var.a2[i] <- as.vector((parameters["a2"])) #problem: analytically it is not easy to calculate x, here an simple approximation is made temp.De.MC <- try(uniroot( f = f.unirootEXPEXP, interval = c(0,max(xy$x) * 1.5), tol = 0.001, a1 = var.a1[i], a2 = var.a2[i], b1 = var.b1[i], b2 = var.b2[i], LnTn = data.MC.De[i] ), silent = TRUE) if (!inherits(temp.De.MC, "try-error")) { x.natural[i] <- temp.De.MC$root }else{ x.natural[i] <- NA } } #end if "try-error" MC simulation } #end for loop ##write D01.ERROR D01.ERROR <- sd(var.b1, na.rm = TRUE) D02.ERROR <- sd(var.b2, na.rm = TRUE) ##remove values rm(var.b1, var.b2, var.a1, var.a2) }else{ #print message if(verbose){ writeLines(paste0("[plot_GrowthCurve()] Fit: ", fit.method, " | De = NA (fitting FAILED)")) } } #end if "try-error" Fit Method ##close if(txtProgressBar) if(exists("pb")){close(pb)} } else if (fit.method[1] == "GOK") { # GOK ----- fit <- try(minpack.lm::nlsLM( formula = .toFormula(fit.functionGOK, env = currn_env), data = data, start = list(a = a, b = b, c = 1, d = 1), weights = fit.weights, trace = FALSE, algorithm = "LM", lower = if (fit.bounds) c(0,0,0,0) else c(-Inf,-Inf,-Inf,-Inf), upper = if(fit.force_through_origin) c(Inf, Inf, Inf, 1) else c(Inf, Inf, Inf, Inf), control = minpack.lm::nls.lm.control(maxiter = 500) ), silent = TRUE) if (inherits(fit, "try-error")){ if(verbose) writeLines("[plot_GrowthCurve()] try-error for GOK fit") }else{ #get parameters out of it parameters <- (coef(fit)) b <- as.vector((parameters["b"])) a <- as.vector((parameters["a"])) c <- as.vector((parameters["c"])) d <- as.vector((parameters["d"])) #calculate De y <- sample[1,2] De <- switch( mode, "interpolation" = suppressWarnings(-(b * (( (a * d - y)/a)^c - 1) * ( ((a * d - y)/a)^-c )) / c), "extrapolation" = suppressWarnings(-(b * (( (a * d - 0)/a)^c - 1) * ( ((a * d - 0)/a)^-c )) / c), NA) #print D01 value D01 <- b if (verbose) { if (mode != "alternate") { writeLines(paste0( "[plot_GrowthCurve()] Fit: ", fit.method, " (", mode, ")", " | De = ", round(abs(De), digits = 2), " | D01 = ", round(D01,2), " | c = ", round(c, digits = 2) )) } } #EXP MC ----- ##Monte Carlo Simulation # --Fit many curves and calculate a new De +/- De_Error # --take De_Error #set variables var.b <- vector(mode = "numeric", length = NumberIterations.MC) var.a <- vector(mode = "numeric", length = NumberIterations.MC) var.c <- vector(mode = "numeric", length = NumberIterations.MC) var.d <- vector(mode = "numeric", length = NumberIterations.MC) #start loop for (i in 1:NumberIterations.MC) { ##set data set data <- data.frame(x = xy$x,y = data.MC[,i]) fit.MC <- try({ minpack.lm::nlsLM( formula = .toFormula(fit.functionGOK, env = currn_env), data = data, start = list(a = a, b = b, c = 1, d = 1), weights = fit.weights, trace = FALSE, algorithm = "LM", lower = if (fit.bounds) { c(0,0,0,0) }else{ c(-Inf,-Inf,-Inf, -Inf) }, upper = if(fit.force_through_origin) c(Inf, Inf, Inf, 1) else c(Inf, Inf, Inf, Inf), control = minpack.lm::nls.lm.control(maxiter = 500) )}, silent = TRUE) # get parameters out of it including error handling if (inherits(fit.MC, "try-error")) { x.natural[i] <- NA } else { # get parameters out parameters<-coef(fit.MC) var.b[i] <- as.vector((parameters["b"])) #D0 var.a[i] <- as.vector((parameters["a"])) #Imax var.c[i] <- as.vector((parameters["c"])) #kinetic order modifier var.d[i] <- as.vector((parameters["d"])) #origin # calculate x.natural for error calculation x.natural[i] <- switch( mode, "interpolation" = suppressWarnings(-(var.b[i] * (( (var.a[i] * var.d[i] - data.MC.De[i])/var.a[i])^var.c[i] - 1) * (((var.a[i] * var.d[i] - data.MC.De[i])/var.a[i])^-var.c[i] )) / var.c[i]), "extrapolation" = suppressWarnings(abs(-(var.b[i] * (( (var.a[i] * var.d[i] - 0)/var.a[i])^var.c[i] - 1) * ( ((var.a[i] * var.d[i] - 0)/var.a[i])^-var.c[i] )) / var.c[i])), NA) } }#end for loop ##write D01.ERROR D01.ERROR <- sd(var.b, na.rm = TRUE) ##remove values rm(var.b, var.a, var.c) } } else if (fit.method == "LambertW") { # LambertW ----- if(mode == "extrapolation"){ Dint_lower <- 50 ##TODO - fragile ... however it is only used by a few } else{ Dint_lower <- 0.01 } fit <- try(minpack.lm::nlsLM( formula = .toFormula(fit.functionLambertW, env = currn_env), data = data, start = list(R = 0, Dc = b, N = b, Dint = 0), weights = fit.weights, trace = FALSE, algorithm = "LM", lower = if (fit.bounds) c(0, 0, 0, Dint_lower) else c(-Inf,-Inf,-Inf, -Inf), upper = if(fit.force_through_origin) c(10, Inf, Inf, 0) else c(10, Inf, Inf, Inf), control = minpack.lm::nls.lm.control(maxiter = 500) ), silent = TRUE) if (inherits(fit, "try-error")){ if(verbose) writeLines("[plot_GrowthCurve()] try-error for LambertW fit") }else{ #get parameters out of it parameters <- coef(fit) R <- as.vector((parameters["R"])) Dc <- as.vector((parameters["Dc"])) N <- as.vector((parameters["N"])) Dint <- as.vector((parameters["Dint"])) #calculate De if(mode == "interpolation"){ De <- try(suppressWarnings(stats::uniroot( f = function(x, R, Dc, N, Dint, LnTn) { fit.functionLambertW(R, Dc, N, Dint, x) - LnTn}, interval = c(0, max(sample[[1]]) * 1.2), R = R, Dc = Dc, N = N, Dint = Dint, LnTn = sample[1,2])$root), silent = TRUE) }else if (mode == "extrapolation"){ De <- try(suppressWarnings(stats::uniroot( f = function(x, R, Dc, N, Dint) { fit.functionLambertW(R, Dc, N, Dint, x)}, interval = c(-max(sample[[1]]),0), R = R, Dc = Dc, N = N, Dint = Dint)$root), silent = TRUE) ## there are cases where the function cannot calculate the root ## due to its shape, here we have to use the minimum if(inherits(De, "try-error")){ warning( "[plot_GrowthCurve()] Standard root estimation using stats::uniroot() failed. Using stats::optimize() instead, which may lead, however, to unexpected and inconclusive results for fit.method = 'LambertW'!", call. = FALSE) De <- try(suppressWarnings(stats::optimize( f = function(x, R, Dc, N, Dint) { fit.functionLambertW(R, Dc, N, Dint, x)}, interval = c(-max(sample[[1]]),0), R = R, Dc = Dc, N = N, Dint = Dint)$minimum), silent = TRUE) } } if(inherits(De, "try-error")) De <- NA if (verbose) { if (mode != "alternate") { writeLines(paste0( "[plot_GrowthCurve()] Fit: ", fit.method, " (", mode, ")", " | De = ", round(abs(De), digits = 2), " | R = ", round(R,2), " | Dc = ", round(Dc, digits = 2) )) } } #LambertW MC ----- ##Monte Carlo Simulation # --Fit many curves and calculate a new De +/- De_Error # --take De_Error #set variables var.R <- var.Dc <- var.N <- var.Dint <- vector( mode = "numeric", length = NumberIterations.MC) #start loop for (i in 1:NumberIterations.MC) { ##set data set data <- data.frame(x = xy$x,y = data.MC[,i]) fit.MC <- try(minpack.lm::nlsLM( formula = .toFormula(fit.functionLambertW, env = currn_env), data = data, start = list(R = 0, Dc = b, N = 0, Dint = 0), weights = fit.weights, trace = FALSE, algorithm = "LM", lower = if (fit.bounds) c(0, 0, 0, Dint*runif(1,0,2)) else c(-Inf,-Inf,-Inf, -Inf), upper = if(fit.force_through_origin) c(10, Inf, Inf, 0) else c(10, Inf, Inf, Inf), control = minpack.lm::nls.lm.control(maxiter = 500) ), silent = TRUE) # get parameters out of it including error handling x.natural[i] <- NA if (!inherits(fit.MC, "try-error")) { # get parameters out parameters<-coef(fit.MC) var.R[i] <- as.vector((parameters["R"])) var.Dc[i] <- as.vector((parameters["Dc"])) var.N[i] <- as.vector((parameters["N"])) var.Dint[i] <- as.vector((parameters["Dint"])) # calculate x.natural for error calculation if(mode == "interpolation"){ try <- try( {suppressWarnings(stats::uniroot( f = function(x, R, Dc, N, Dint, LnTn) { fit.functionLambertW(R, Dc, N, Dint, x) - LnTn}, interval = c(0, max(sample[[1]]) * 1.2), R = var.R[i], Dc = var.Dc[i], N = var.N[i], Dint = var.Dint[i], LnTn = data.MC.De[i])$root) }, silent = TRUE) }else if(mode == "extrapolation"){ try <- try( suppressWarnings(stats::uniroot( f = function(x, R, Dc, N, Dint) { fit.functionLambertW(R, Dc, N, Dint, x)}, interval = c(-max(sample[[1]]), 0), R = var.R[i], Dc = var.Dc[i], N = var.N[i], Dint = var.Dint[i])$root), silent = TRUE) if(inherits(try, "try-error")){ try <- try(suppressWarnings(stats::optimize( f = function(x, R, Dc, N, Dint) { fit.functionLambertW(R, Dc, N, Dint, x)}, interval = c(-max(sample[[1]]),0), R = var.R[i], Dc = var.Dc[i], N = var.N[i], Dint = var.Dint[i])$minimum), silent = TRUE) } }##endif extrapolation if(!inherits(try, "try-error") && !inherits(try, "function")) x.natural[i] <- try } }#end for loop ##we need absolute numbers x.natural <- abs(x.natural) ##write Dc.ERROR Dc.ERROR <- sd(var.Dc, na.rm = TRUE) ##remove values rm(var.R, var.Dc, var.N, var.Dint) }#endif::try-error fit }#End if Fit Method #Get De values from Monte Carlo simulation #calculate mean and sd (ignore NaN values) De.MonteCarlo <- mean(na.exclude(x.natural)) #De.Error is Error of the whole De (ignore NaN values) De.Error <- sd(na.exclude(x.natural)) # Formula creation -------------------------------------------------------- ## This information is part of the fit object output anyway, but ## we keep it here for legacy reasons fit_formula <- NA if(!inherits(fit, "try-error") && !is.na(fit[1])) fit_formula <- .replace_coef(fit) # Plotting ------------------------------------------------------------------------------------ ##5. Plotting if plotOutput==TRUE if(output.plot) { ## Deal with extra arguments -------------------------- extraArgs <- list(...) main <- if("main" %in% names(extraArgs)) {extraArgs$main} else {"Dose-response curve"} xlab <- if("xlab" %in% names(extraArgs)) {extraArgs$xlab} else {"Dose [s]"} ylab <- if("ylab" %in% names(extraArgs)) {extraArgs$ylab} else { if(mode == "regenration"){ expression(L[x]/T[x]) }else{ "Luminescence [a.u.]" } } if("cex" %in% names(extraArgs)) {cex.global <- extraArgs$cex} ylim <- if("ylim" %in% names(extraArgs)) { extraArgs$ylim } else { if(fit.force_through_origin | mode == "extrapolation"){ c(0-max(y.Error),(max(xy$y)+if(max(xy$y)*0.1>1.5){1.5}else{max(xy$y)*0.2})) }else{ c(0,(max(xy$y)+if(max(xy$y)*0.1>1.5){1.5}else{max(xy$y)*0.2})) } } xlim <- if("xlim" %in% names(extraArgs)) {extraArgs$xlim} else { if(mode != "extrapolation"){ c(0,(max(xy$x)+if(max(xy$x)*0.4>50){50}else{max(xy$x)*0.4})) }else{ if(!is.na(De)){ if(De > 0){ c(0,(max(xy$x)+if(max(xy$x)*0.4>50){50}else{max(xy$x)*0.4})) }else{ c(De * 2,(max(xy$x)+if(max(xy$x)*0.4>50){50}else{max(xy$x)*0.4})) } }else{ c(-min(xy$x) * 2,(max(xy$x)+if(max(xy$x)*0.4>50){50}else{max(xy$x)*0.4})) } } } fun <- if("fun" %in% names(extraArgs)) {extraArgs$fun} else {FALSE} ##set plot check plot_check <- NULL ##cheat the R check x <- NULL; rm(x) #PAR #open plot area if(output.plot == TRUE & output.plotExtended == TRUE & output.plotExtended.single == FALSE){ ## safe par settings par.old.full <- par(no.readonly = TRUE) on.exit(par(par.old.full)) ##set new parameter layout(matrix(c(1, 1, 1, 1, 2, 3), 3, 2, byrow = TRUE), respect = TRUE) par(cex = 0.8 * cex.global) } else { par(cex = cex.global) } #PLOT #Plot input values ##Make selection to support manual number of reg points input if(exists("fit.RegPointsReal")){ ##here the object sample has to be used otherwise the first regeneration point is not plotted. temp.xy.plot <- sample[fit.RegPointsReal,] } else { temp.xy.plot <- xy[1:fit.NumberRegPointsReal,] } plot_check <- try(plot( temp.xy.plot[, 1:2], ylim = ylim, xlim = xlim, pch = 19, xlab = xlab, ylab = ylab ), silent = TRUE) if (!is(plot_check, "try-error")) { if(mode == "extrapolation"){ abline(v = 0, lty = 1, col = "grey") abline(h = 0, lty = 1, col = "grey") } ### add header -------- title(main = main, line = NA) ## add curve ------- if(inherits(fit_formula, "expression")) { x <- seq(par()$usr[1], par()$usr[2], length.out = 100) lines(x, eval(fit_formula)) } ## add points ------- ##POINTS #Plot Reg0 and Repeated Points #Natural value if(mode == "interpolation"){ points(sample[1, 1:2], col = "red") segments(sample[1, 1], sample[1, 2] - sample[1, 3], sample[1, 1], sample[1, 2] + sample[1, 3], col = "red") }else if (mode == "extrapolation"){ points(x = De, y = 0, col = "red") } #repeated Point points( x = xy[which(duplicated(xy[, 1])), 1], y = xy[which(duplicated(xy[, 1])), 2], pch = 2) #reg Point 0 points( x = xy[which(xy == 0), 1], y = xy[which(xy == 0), 2], pch = 1, cex = 1.5 * cex.global) ##ARROWS #y-error Bar segments(xy$x, xy$y - y.Error, xy$x, xy$y + y.Error) ##LINES #Insert Ln/Tn if (mode == "interpolation") { if (is.na(De)) { lines( c(0, max(sample[, 1]) * 2), c(sample[1, 2], sample[1, 2]), col = "red", lty = 2, lwd = 1.25 ) } else { try(lines( c(0, De), c(sample[1, 2], sample[1, 2]), col = "red", lty = 2, lwd = 1.25 ), silent = TRUE) } try(lines(c(De, De), c(0, sample[1, 2]), col = "red", lty = 2, lwd = 1.25), silent = TRUE) try(points(De, sample[1, 2], col = "red", pch = 19), silent = TRUE) } else if (mode == "extrapolation"){ if(!is.na(De)){ abline(v = De, lty = 2, col = "red") lines(x = c(0,De), y = c(0,0), lty = 2, col = "red") } } ## check/set mtext mtext <- if ("mtext" %in% names(list(...))) { list(...)$mtext } else { if(mode != "alternate"){ substitute(D[e] == De, list(De = paste( round(abs(De), digits = 2), "\u00B1", format(De.Error, scientific = TRUE, digits = 2), " | fit: ", fit.method ))) }else{ "" } } ##TEXT #Insert fit and result try(mtext(side = 3, mtext, line = 0, cex = 0.8 * cex.global), silent = TRUE) #write error message in plot if De is NaN try(if (De == "NaN") { text( sample[2, 1], 0, "Error: De could not be calculated!", adj = c(0, 0), cex = 0.8, col = "red" ) }, silent = TRUE) ##LEGEND #plot legend if (mode == "interpolation") { legend( "topleft", c("REG point", "REG point repeated", "REG point 0"), pch = c(19, 2, 1), cex = 0.8 * cex.global, bty = "n" ) }else{ legend( "bottomright", c("Dose point", "Dose point rep.", "Dose point 0"), pch = c(19, 2, 1), cex = 0.8 * cex.global, bty = "n" ) } ##plot only if wanted if (output.plot == TRUE & output.plotExtended == TRUE) { ##HIST #try to plot histogram of De values from the Monte Carlo simulation if (output.plotExtended.single != TRUE) { par(cex = 0.7 * cex.global) } ##(A) Calculate histogram data try(histogram <- hist(x.natural, plot = FALSE), silent = TRUE) #to avoid errors plot only if histogram exists if (exists("histogram") && length(histogram$counts) > 2) { ##calculate normal distribution curves for overlay norm.curve.x <- seq(min(x.natural, na.rm = TRUE), max(x.natural, na.rm = TRUE), length = 101) norm.curve.y <- dnorm( norm.curve.x, mean = mean(x.natural, na.rm = TRUE), sd = sd(x.natural, na.rm = TRUE) ) ##plot histogram histogram <- try(hist( x.natural, xlab = xlab, ylab = "Frequency", main = "MC runs", freq = FALSE, border = "white", axes = FALSE, ylim = c(0, max(norm.curve.y)), sub = paste0("valid fits = ", length(na.exclude(x.natural)), "/",NumberIterations.MC), col = "grey" ), silent = TRUE) if (!is(histogram, "try-error")) { ##add axes axis(side = 1) axis( side = 2, at = seq(min(histogram$density), max(histogram$density), length = 5), labels = round(seq( min(histogram$counts), max(histogram$counts), length = 5 ), digits = 0) ) ##add norm curve lines(norm.curve.x, norm.curve.y, col = "red") ##add rug rug(x.natural) ##write De + Error from Monte Carlo simulation + write quality of error estimation try(mtext(side = 3, substitute(D[e[MC]] == De, list( De = paste( abs(round(De.MonteCarlo, 2)), "\u00B1", format(De.Error, scientific = TRUE, digits = 2), " | diff. = ", abs(round((abs(abs(De) - De.MonteCarlo) / abs(De)) * 100,1)), "%" ) )), cex = 0.6 * cex.global), silent = TRUE) }else{ plot_check <- histogram } } else { plot_check <- try(plot( NA, NA, xlim = c(0, 10), ylim = c(0, 10), main = expression(paste(D[e], " from MC runs"))), silent = TRUE ) if(!is(plot_check,"try-error")) text(5, 5, "not available") }#end ifelse ##PLOT #PLOT test dose response curve if available if not plot not available #plot Tx/Tn value for sensitvity change if (!is(plot_check, "try-error")) { if ("TnTx" %in% colnames(sample) == TRUE) { plot( 1:length(sample[, "TnTx"]), sample[1:(length(sample[, "TnTx"])), "TnTx"] / sample[1, "TnTx"], xlab = "SAR cycle", ylab = expression(paste(T[x] / T[n])), main = "Test-dose response", type = "o", pch = 20, ) ##LINES #plot 1 line lines(c(1, length(sample[, "TnTx"])), c(1, 1), lty = 2, col = "gray") } else { plot( NA, NA, xlim = c(0, 10), ylim = c(0, 10), main = "Test-dose response" ) text(5, 5, "not available\n no TnTx column") }#end if else } ## FUN by R Luminescence Team if (fun == TRUE) { sTeve() } }#endif::output.plotExtended }#end if plotOutput ##reset graphic device if the plotting failed! if(is(plot_check, "try-error")){ try(stop("[plot_GrowthCurve()] Figure margins too large, nothing plotted, but results returned!", call. = FALSE),) dev.off() } } # Output ------------------------------------------------------------------ ##calculate HPDI HPDI <- matrix(c(NA,NA,NA,NA), ncol = 4) if(!any(is.na(x.natural))){ HPDI <- cbind( .calc_HPDI(x.natural, prob = 0.68)[1, ,drop = FALSE], .calc_HPDI(x.natural, prob = 0.95)[1, ,drop = FALSE]) } output <- try(data.frame( De = abs(De), De.Error = De.Error, D01 = D01, D01.ERROR = D01.ERROR, D02 = D02, D02.ERROR = D02.ERROR, Dc = Dc, De.MC = De.MonteCarlo, Fit = fit.method, HPDI68_L = HPDI[1,1], HPDI68_U = HPDI[1,2], HPDI95_L = HPDI[1,3], HPDI95_U = HPDI[1,4] ), silent = TRUE ) ##make RLum.Results object output.final <- set_RLum( class = "RLum.Results", data = list( De = output, De.MC = x.natural, Fit = fit, Formula = fit_formula ), info = list( call = sys.call() ) ) invisible(output.final) } # Helper functions in plot_GrowthCurve() -------------------------------------- #'@title Replace coefficients in formula #' #'@description #' #'Replace the parameters in a fitting function by the true, fitted values. #'This way the results can be easily used by the other functions #' #'@param f [nls] or [lm] (**required**): the output object of the fitting #' #'@returns Returns an [expression] #' #'@md #'@noRd .replace_coef <- function(f) { ## get formula as character string if(inherits(f, "nls")) { str <- as.character(f$m$formula())[3] param <- coef(f) } else { str <- "a * x + b * x^2 + n" param <- c(n = 0, a = 0, b = 0) if(!"(Intercept)" %in% names(coef(f))) param[2:(length(coef(f))+1)] <- coef(f) else param[1:length(coef(f))] <- coef(f) } ## replace for(i in 1:length(param)) str <- gsub( pattern = names(param)[i], replacement = format(param[i], digits = 3, scientific = TRUE), x = str, fixed = TRUE) ## return return(parse(text = str)) } #'@title Convert function to formula #' #'@description The fitting functions are provided as functions, however, later is #'easer to work with them as expressions, this functions converts to formula #' #'@param f [function] (**required**): function to be converted #' #'@param env [environment] (*with default*): environment for the formula #'creation. This argument is required otherwise it can cause all kind of #'very complicated to-track-down errors when R tries to access the function #'stack #' #'@md #'@noRd .toFormula <- function(f, env) { ## deparse tmp <- deparse(f) ## set formula ## this is very fragile and works only if the functions are constructed ## without {} brackets, otherwise it will not work in combination ## of covr and testthat tmp_formula <- as.formula(paste0("y ~ ", paste(tmp[-1], collapse = "")), env = env) return(tmp_formula) } Luminescence/R/merge_RLum.Data.Curve.R0000644000176200001440000002150414264017373017212 0ustar liggesusers#' Merge function for RLum.Data.Curve S4 class objects #' #' Function allows merging of RLum.Data.Curve objects in different ways #' #' This function simply allowing to merge [RLum.Data.Curve-class] #' objects without touching the objects itself. Merging is always applied on #' the 2nd column of the data matrix of the object. #' #' **Supported merge operations are [RLum.Data.Curve-class]** #' #' `"sum"` #' #' All count values will be summed up using the function [rowSums]. #' #' `"mean"` #' #' The mean over the count values is calculated using the function #' [rowMeans]. #' #' `"median"` #' #' The median over the count values is calculated using the function #' [matrixStats::rowMedians]. #' #' `"sd"` #' #' The standard deviation over the count values is calculated using the function #' [matrixStats::rowSds]. #' #' `"var"` #' #' The variance over the count values is calculated using the function #' [matrixStats::rowVars]. #' #' `"min"` #' #' The min values from the count values is chosen using the function #' [matrixStats::rowMins][matrixStats::rowRanges]. #' #' `"max"` #' #' The max values from the count values is chosen using the function #' [matrixStats::rowMins][matrixStats::rowRanges]. #' #' `"append"` #' #' Appends count values of all curves to one combined data curve. The channel width #' is automatically re-calculated, but requires a constant channel width of the #' original data. #' #' `"-"` #' #' The row sums of the last objects are subtracted from the first object. #' #' `"*"` #' #' The row sums of the last objects are multiplied with the first object. #' #' `"/"` #' #' Values of the first object are divided by row sums of the last objects. #' #' @param object [list] of [RLum.Data.Curve-class] (**required**): #' list of S4 objects of class `RLum.Curve`. #' #' @param merge.method [character] (**required**): #' method for combining of the objects, e.g. `'mean'`, `'sum'`, see details for #' further information and allowed methods. Note: Elements in slot info will #' be taken from the first curve in the list. #' #' @param method.info [numeric] (*optional*): #' allows to specify how info elements of the input objects are combined, #' e.g. `1` means that just the elements from the first object are kept, #' `2` keeps only the info elements from the 2 object etc. #' If nothing is provided all elements are combined. #' #' @return Returns an [RLum.Data.Curve-class] object. #' #' @note #' The information from the slot `recordType` is taken from the first #' [RLum.Data.Curve-class] object in the input list. The slot #' 'curveType' is filled with the name `merged`. #' #' @section S3-generic support: #' #' This function is fully operational via S3-generics: #' ``+``, ``-``, ``/``, ``*``, `merge` #' #' @section Function version: 0.2.1 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [merge_RLum], [RLum.Data.Curve-class] #' #' #' @keywords utilities internal #' #' @examples #' #' #' ##load example data #' data(ExampleData.XSYG, envir = environment()) #' #' ##grep first and 3d TL curves #' TL.curves <- get_RLum(OSL.SARMeasurement$Sequence.Object, recordType = "TL (UVVIS)") #' TL.curve.1 <- TL.curves[[1]] #' TL.curve.3 <- TL.curves[[3]] #' #' ##plot single curves #' plot_RLum(TL.curve.1) #' plot_RLum(TL.curve.3) #' #' ##subtract the 1st curve from the 2nd and plot #' TL.curve.merged <- merge_RLum.Data.Curve(list(TL.curve.3, TL.curve.1), merge.method = "/") #' plot_RLum(TL.curve.merged) #' #' @md #' @export merge_RLum.Data.Curve<- function( object, merge.method = "mean", method.info ){ # Integrity checks ---------------------------------------------------------------------------- ##(1) check if object is of class RLum.Data.Curve temp.recordType.test <- sapply(1:length(object), function(x){ if(!inherits(object[[x]], "RLum.Data.Curve")){ temp.text <- paste( "[merge_RLum.Data.Curve()]: At least object", x, "is not of class 'RLum.Data.Curve'!") stop(temp.text, call. = FALSE) } ##provide class of objects return(object[[x]]@recordType) }) ##(2) Check for similar record types if(length(unique(temp.recordType.test))>1){ stop.text <- paste0("[merge_RLum.Data.Curve()] only similar record types are supported, you are trying to merge: ", paste0("'",unique(temp.recordType.test),"'", collapse = ", ")) stop(stop.text) } # Merge objects ---------------------------------------------------------------- ##merge data objects ##problem ... how to handle data with different resolution or length? ##(1) build new data matrix ##first find shortest object check.length <- vapply(object, function(x) nrow(x@data), numeric(1)) ## do something about it temp.matrix <- .warningCatcher(sapply(1:length(object), function(x){ ##check if the objects are of equal length if (length(unique(check.length)) != 1) { ##but we have to at least check the resolution (roughly) if (round(diff(object[[x]]@data[,1]),1)[1] != round(diff(object[[1]]@data[,1]),1)[1]) stop("[merge_RLum.Data.Curve()] The objects do not seem to have the same channel resolution!", call. = FALSE) ## either way, throw a warning warning("[merge_RLum.Data.Curve()] The number of channels between the curves differs. Resulting curve has the length of shortest curve.", call. = FALSE) ##if this is OK, we can continue and shorten the rest of the objects return(object[[x]]@data[1:min(check.length),2]) }else{ object[[x]]@data[,2] } })) ##(2) apply selected method for merging if(merge.method == "sum"){ temp.matrix <- rowSums(temp.matrix) }else if(merge.method == "mean"){ temp.matrix <- rowMeans(temp.matrix) }else if(merge.method == "median"){ temp.matrix <- matrixStats::rowMedians(temp.matrix) }else if(merge.method == "sd"){ temp.matrix <- matrixStats::rowSds(temp.matrix) }else if(merge.method == "var"){ temp.matrix <- matrixStats::rowVars(temp.matrix) }else if(merge.method == "max"){ temp.matrix <- matrixStats::rowMaxs(temp.matrix) }else if(merge.method == "min"){ temp.matrix <- matrixStats::rowMins(temp.matrix) }else if(merge.method == "append") { temp.matrix <- sapply(temp.matrix, c) }else if(merge.method == "-"){ if(ncol(temp.matrix) > 2){ temp.matrix <- temp.matrix[,1] - rowSums(temp.matrix[,-1]) }else{ temp.matrix <- temp.matrix[,1] - temp.matrix[,2] } }else if(merge.method == "*"){ if(ncol(temp.matrix) > 2){ temp.matrix <- temp.matrix[,1] * rowSums(temp.matrix[,-1]) }else{ temp.matrix <- temp.matrix[,1] * temp.matrix[,2] } }else if(merge.method == "/"){ if(ncol(temp.matrix) > 2){ temp.matrix <- temp.matrix[,1] / rowSums(temp.matrix[,-1]) }else{ temp.matrix <- temp.matrix[,1] / temp.matrix[,2] } ##get index of inf values id.inf <- which(is.infinite(temp.matrix) == TRUE) ##replace with 0 and throw warning temp.matrix[id.inf] <- 0 warning(paste0(length(id.inf), " 'inf' values have been replaced by 0 in the matrix."), call. = FALSE) }else{ stop("[merge_RLum.Data.Curve()] unsupported or unknown merge method!", call. = FALSE) } ##add first column #If we append the data of the second to the first curve we have to recalculate #the x-values (probably time/channel). The difference should always be the #same, so we just expand the sequence if this is true. If this is not true, #we revert to the default behaviour (i.e., append the x values) if (merge.method[1] == "append" & length(unique(diff(object[[1]]@data[,1])))) { step <- unique(diff(object[[1]]@data[,1])) newx <- seq(from = min(object[[1]]@data[,1]), by = step, length.out = sum(check.length)) temp.matrix <- cbind(newx, temp.matrix) } else { temp.matrix <- cbind(object[[1]]@data[1:min(check.length),1], temp.matrix) } ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ##merge info objects as simple as possible ... just keep them all ... other possibility ##would be to chose on the the input objects ##unlist is needed here, as otherwise i would cause unexpected behaviour further using ##the RLum.object if(missing(method.info)){ temp.info <- unlist(lapply(1:length(object), function(x){ object[[x]]@info }), recursive = FALSE) }else{ temp.info <- object[[method.info]]@info } # Build new RLum.Data.Curve object -------------------------------------------------------------- temp.new.Data.Curve <- set_RLum( class = "RLum.Data.Curve", originator = "merge_RLum.Data.Curve", recordType = object[[1]]@recordType, curveType = "merged", data = temp.matrix, info = temp.info, .pid = unlist(lapply(object, function(x) { x@.uid })) ) # Return object ------------------------------------------------------------------------------- return(temp.new.Data.Curve) } Luminescence/R/replicate_RLum.R0000644000176200001440000000125614264017373016132 0ustar liggesusers#' General replication function for RLum S4 class objects #' #' Function replicates RLum S4 class objects and returns a list for this objects #' #' @param object [RLum-class] (**required**): #' an [RLum-class] object #' #' @param times [integer] (*optional*): #' number for times each element is repeated element #' #' @return Returns a [list] of the object to be repeated #' #' @section Function version: 0.1.0 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [RLum-class] #' #' @keywords utilities #' #' @md #' @export setGeneric("replicate_RLum", function (object, times = NULL) { standardGeneric("replicate_RLum") }) Luminescence/R/calc_FadingCorr.R0000644000176200001440000003463314264017373016230 0ustar liggesusers#' Apply a fading correction according to Huntley & Lamothe (2001) for a given #' g-value and a given tc #' #' This function solves the equation used for correcting the fading affected age #' including the error for a given g-value according to Huntley & Lamothe (2001). #' #' As the g-value slightly depends on the time between irradiation and the prompt measurement, #' this is tc, always a tc value needs to be provided. If the g-value was normalised to a distinct #' time or evaluated with a different tc value (e.g., external irradiation), also the tc value #' for the g-value needs to be provided (argument `tc.g_value` and then the g-value is recalculated #' to tc of the measurement used for estimating the age applying the following equation: #' #' \deqn{\kappa_{tc} = \kappa_{tc.g} / (1 - \kappa_{tc.g} * log(tc/tc.g))} #' #' where #' #' \deqn{\kappa_{tc.g} = g / 100 / log(10)} #' #' with `log` the natural logarithm. #' #' #' The error of the fading-corrected age is determined using a Monte Carlo #' simulation approach. Solving of the equation is realised using #' [uniroot]. Large values for `n.MC` will significantly #' increase the computation time.\cr #' #' **`n.MC = 'auto'`** #' #' The error estimation based on a stochastic process, i.e. for a small number of MC runs the calculated #' error varies considerably every time the function is called, even with the same input values. #' The argument option `n.MC = 'auto'` tries to find a stable value for the standard error, i.e. #' the standard deviation of values calculated during the MC runs (`age.corr.MC`), #' within a given precision (2 digits) by increasing the number of MC runs stepwise and #' calculating the corresponding error. #' #' If the determined error does not differ from the 9 values calculated previously #' within a precision of (here) 3 digits the calculation is stopped as it is assumed that the error #' is stable. Please note that (a) the duration depends on the input values as well as on #' the provided computation resources and it may take a while, (b) the length (size) of the output #' vector `age.corr.MC`, where all the single values produced during the MC runs are stored, #' equals the number of MC runs (here termed observations). #' #' To avoid an endless loop the calculation is stopped if the number of observations exceeds 10^7. #' This limitation can be overwritten by setting the number of MC runs manually, #' e.g. `n.MC = 10000001`. Note: For this case the function is not checking whether the calculated #' error is stable.\cr #' #' **`seed`** #' #' This option allows to recreate previously calculated results by setting the seed #' for the R random number generator (see [set.seed] for details). This option #' should not be mixed up with the option **`n.MC = 'auto'`**. The results may #' appear similar, but they are not comparable!\cr #' #' **FAQ**\cr #' #' Q: Which tc value is expected?\cr #' #' A: tc is the time in seconds between irradiation and the prompt measurement applied during your #' De measurement. However, this tc might differ from the tc used for estimating the g-value. In the #' case of an SAR measurement tc should be similar, however, if it differs, you have to provide this #' tc value (the one used for estimating the g-value) using the argument `tc.g_value`.\cr #' #' @param age.faded [numeric] [vector] (**required**): #' uncorrected age with error in ka (see example) #' #' @param g_value [vector] (**required**): #' g-value and error obtained from separate fading measurements (see example). #' Alternatively an [RLum.Results-class] object can be provided produced by the function #' [analyse_FadingMeasurement], in this case `tc` is set automatically #' #' @param tc [numeric] (**required**): #' time in seconds between irradiation and the prompt measurement (cf. Huntley & Lamothe 2001). #' Argument will be ignored if `g_value` was an [RLum.Results-class] object #' #' @param tc.g_value [numeric] (*with default*): #' the time in seconds between irradiation and the prompt measurement used for estimating the g-value. #' If the g-value was normalised to, e.g., 2 days, this time in seconds (i.e., 172800) should be given here. #' If nothing is provided the time is set to tc, which is usual case for g-values obtained using the #' SAR method and g-values that had been not normalised to 2 days. #' #' @param n.MC [integer] (*with default*): #' number of Monte Carlo simulation runs for error estimation. #' If `n.MC = 'auto'` is used the function tries to find a 'stable' error for the age. #' **Note:** This may take a while! #' #' @param seed [integer] (*optional*): #' sets the seed for the random number generator in R using [set.seed] #' #' @param interval [numeric] (*with default*): #' a vector containing the end-points (age interval) of the interval to be searched for the root in 'ka'. #' This argument is passed to the function [stats::uniroot] used for solving the equation. #' #' @param txtProgressBar [logical] (*with default*): #' enables or disables [txtProgressBar] #' #' @param verbose [logical] (*with default*): #' enables or disables terminal output #' #' #' @return Returns an S4 object of type [RLum.Results-class].\cr #' #' Slot: **`@data`**\cr #' \tabular{lll}{ #' **Object** \tab **Type** \tab **Comment** \cr #' `age.corr` \tab [data.frame] \tab Corrected age \cr #' `age.corr.MC` \tab [numeric] \tab MC simulation results with all possible ages from that simulation \cr #' } #' #' Slot: **`@info`**\cr #' #' \tabular{lll}{ #' **Object** \tab **Type** \tab **Comment** \cr #' `info` \tab [character] \tab the original function call #' } #' #' #' @note Special thanks to Sébastien Huot for his support and clarification via e-mail. #' #' #' @section Function version: 0.4.3 #' #' #' @author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' #' @seealso [RLum.Results-class], [analyse_FadingMeasurement], [get_RLum], [uniroot] #' #' #' @references #' Huntley, D.J., Lamothe, M., 2001. Ubiquity of anomalous fading #' in K-feldspars and the measurement and correction for it in optical dating. #' Canadian Journal of Earth Sciences, 38, 1093-1106. #' #' #' @keywords datagen #' #' #' @examples #' #' ##run the examples given in the appendix of Huntley and Lamothe, 2001 #' #' ##(1) faded age: 100 a #' results <- calc_FadingCorr( #' age.faded = c(0.1,0), #' g_value = c(5.0, 1.0), #' tc = 2592000, #' tc.g_value = 172800, #' n.MC = 100) #' #' ##(2) faded age: 1 ka #' results <- calc_FadingCorr( #' age.faded = c(1,0), #' g_value = c(5.0, 1.0), #' tc = 2592000, #' tc.g_value = 172800, #' n.MC = 100) #' #' ##(3) faded age: 10.0 ka #' results <- calc_FadingCorr( #' age.faded = c(10,0), #' g_value = c(5.0, 1.0), #' tc = 2592000, #' tc.g_value = 172800, #' n.MC = 100) #' #' ##access the last output #' get_RLum(results) #' #' @md #' @export calc_FadingCorr <- function( age.faded, g_value, tc = NULL, tc.g_value = tc, n.MC = 10000, seed = NULL, interval = c(0.01,500), txtProgressBar = TRUE, verbose = TRUE ){ # Integrity checks --------------------------------------------------------------------------- stopifnot(!missing(age.faded), !missing(g_value)) ##check input if(inherits(g_value, "RLum.Results")){ if(g_value@originator == "analyse_FadingMeasurement"){ tc <- get_RLum(g_value)[["TC"]] g_value <- as.numeric(get_RLum(g_value)[,c("FIT", "SD")]) }else{ try({ stop("[calc_FadingCorr()] Unknown originator for the provided RLum.Results object via 'g_value'!", call. = FALSE) }) return(NULL) } } ##check if tc is still NULL if(is.null(tc[1])) stop("[calc_FadingCorr()] 'tc' needs to be set!", call. = FALSE) ##check type if(!all(is(age.faded, "numeric") && is(g_value, "numeric") && is(tc, "numeric"))) stop("[calc_FadingCorr()] 'age.faded', 'g_value' and 'tc' need be of type numeric!", call. = FALSE) ##============================================================================## ##DEFINE FUNCTION ##============================================================================## f <- function(x, af, kappa, tc) { 1 - kappa * (log(x / tc) - 1) - (af / x) } ##============================================================================## ##CALCULATION ##============================================================================## ##recalculate the g-value to the given tc ... should be similar ##of tc = tc.g_value ##re-calculation thanks to the help by Sebastien Huot, e-mail: 2016-07-19 ##Please note that we take the vector for the g_value here k0 <- g_value / 100 / log(10) k1 <- k0 / (1 - k0 * log(tc[1]/tc.g_value[1])) g_value <- 100 * k1 * log(10) ##calculate kappa (equation [5] in Huntley and Lamothe, 2001) kappa <- g_value / log(10) / 100 ##transform tc in ka years ##duration of the year over a long term taken from http://wikipedia.org tc <- tc[1] / 60 / 60 / 24 / 365.2425 / 1000 tc.g_value <- tc.g_value[1] / 60 / 60 / 24 / 365.2425 / 1000 ##calculate mean value temp <- try(suppressWarnings(uniroot( f, interval = interval, tol = 0.0001, tc = tc, extendInt = "yes", af = age.faded[1], kappa = kappa[1], check.conv = TRUE )), silent = TRUE) if(inherits(temp, "try-error")){ message("[calc_FadingCorr()] No solution found, return NULL. This usually happens for very large, unrealistic g-values.") return(NULL) } ##--------------------------------------------------------------------------## ##Monte Carlo simulation for error estimation tempMC.sd.recent <- NA tempMC.sd.count <- 1:10 counter <- 1 ##show some progression bar of the process if (n.MC == 'auto') { n.MC.i <- 10000 cat("\n[calc_FadingCorr()] ... trying to find stable error value ...") if (txtProgressBar) { cat("\n -------------------------------------------------------------\n") cat(paste0(" ",paste0("(",0:9,")", collapse = " "), "\n")) } }else{ n.MC.i <- n.MC } # Start loop --------------------------------------------------------------------------------- ##set object and preallocate memory tempMC <- vector("numeric", length = 1e+07) tempMC[] <- NA i <- 1 j <- n.MC.i while(length(unique(tempMC.sd.count))>1 | j > 1e+07){ ##set previous if(!is.na(tempMC.sd.recent)){ tempMC.sd.count[counter] <- tempMC.sd.recent } ##set seed if (!is.null(seed)) set.seed(seed) ##pre-allocate memory g_valueMC <- vector("numeric", length = n.MC.i) age.fadeMC <- vector("numeric", length = n.MC.i) kappaMC <- vector("numeric", length = n.MC.i) ##set-values g_valueMC <- rnorm(n.MC.i,mean = g_value[1],sd = g_value[2]) age.fadedMC <- rnorm(n.MC.i,mean = age.faded[1],sd = age.faded[2]) kappaMC <- g_valueMC / log(10) / 100 ##calculate for all values tempMC[i:j] <- suppressWarnings(vapply(X = 1:length(age.fadedMC), FUN = function(x) { temp <- try(uniroot( f, interval = interval, tol = 0.001, tc = tc, af = age.fadedMC[[x]], kappa = kappaMC[[x]], check.conv = TRUE, maxiter = 1000, extendInt = "yes" ), silent = TRUE) ##otherwise the automatic error value finding ##will never work if(!is(temp,"try-error") && temp$root<1e8) { return(temp$root) } else{ return(NA) } }, FUN.VALUE = 1)) i <- j + 1 j <- j + n.MC.i ##stop here if a fixed value is set if(n.MC != 'auto'){ break } ##set recent tempMC.sd.recent <- round(sd(tempMC, na.rm = TRUE), digits = 3) if (counter %% 10 == 0) { counter <- 1 }else{ counter <- counter + 1 } ##show progress in terminal if (txtProgressBar) { text <- rep("CHECK",10) if (counter %% 2 == 0) { text[1:length(unique(tempMC.sd.count))] <- "-----" }else{ text[1:length(unique(tempMC.sd.count))] <- " CAL " } cat(paste("\r ",paste(rev(text), collapse = " "))) } } ##--------------------------------------------------------------------------## ##remove all NA values from tempMC tempMC <- tempMC[!is.na(tempMC)] ##obtain corrected age age.corr <- data.frame( AGE = round(temp$root, digits = 4), AGE.ERROR = round(sd(tempMC), digits = 4), AGE_FADED = age.faded[1], AGE_FADED.ERROR = age.faded[2], G_VALUE = g_value[1], G_VALUE.ERROR = g_value[2], KAPPA = kappa[1], KAPPA.ERROR = kappa[2], TC = tc, TC.G_VALUE = tc.g_value, n.MC = n.MC, OBSERVATIONS = length(tempMC), SEED = ifelse(is.null(seed), NA, seed) ) ##============================================================================## ##OUTPUT VISUAL ##============================================================================## if(verbose) { cat("\n\n[calc_FadingCorr()]\n") cat("\n >> Fading correction according to Huntley & Lamothe (2001)") if (tc != tc.g_value) { cat("\n >> g-value re-calculated for the given tc") } cat(paste( "\n\n .. used g-value:\t", round(g_value[1], digits = 3), " \u00b1 ", round(g_value[2], digits = 3), " %/decade", sep = "" )) cat(paste( "\n .. used tc:\t\t", format(tc, digits = 4, scientific = TRUE), " ka", sep = "" )) cat(paste0( "\n .. used kappa:\t\t", round(kappa[1], digits = 4), " \u00b1 ", round(kappa[2], digits = 4) )) cat("\n ----------------------------------------------") cat(paste0("\n seed: \t\t\t", ifelse(is.null(seed), NA, seed))) cat(paste0("\n n.MC: \t\t\t", n.MC)) cat(paste0( "\n observations: \t\t", format(length(tempMC), digits = 2, scientific = TRUE), sep = "" )) cat("\n ----------------------------------------------") cat(paste0( "\n Age (faded):\t\t", round(age.faded[1], digits = 4), " ka \u00b1 ", round(age.faded[2], digits = 4), " ka" )) cat(paste0( "\n Age (corr.):\t\t", round(age.corr[1], digits = 4), " ka \u00b1 ", round(age.corr[2], digits = 4), " ka" )) cat("\n ---------------------------------------------- \n") } ##============================================================================## ##OUTPUT RLUM ##============================================================================## return(set_RLum( class = "RLum.Results", data = list(age.corr = age.corr, age.corr.MC = tempMC), info = list(call = sys.call()) )) } Luminescence/R/calc_CosmicDoseRate.R0000644000176200001440000005144514236146743017061 0ustar liggesusers#' Calculate the cosmic dose rate #' #' This function calculates the cosmic dose rate taking into account the soft- #' and hard-component of the cosmic ray flux and allows corrections for #' geomagnetic latitude, altitude above sea-level and geomagnetic field #' changes. #' #' This function calculates the total cosmic dose rate considering both the #' soft- and hard-component of the cosmic ray flux. #' #' **Internal calculation steps** #' #' (1) #' Calculate total depth of all absorber in hg/cm^2 (1 hg/cm^2 = 100 g/cm^2) #' #' \deqn{absorber = depth_1*density_1 + depth_2*density_2 + ... + depth_n*density_n} #' #' #' (2) #' If `half.depth = TRUE` #' #' \deqn{absorber = absorber/2} #' #' #' (3) #' Calculate cosmic dose rate at sea-level and 55 deg. latitude #' #' a) If absorber is > 167 g/cm^2 (only hard-component; Allkofer et al. 1975): #' apply equation given by Prescott & Hutton (1994) (c.f. Barbouti & Rastin #' 1983) #' #' \deqn{D0 = C/(((absorber+d)^\alpha+a)*(absober+H))*exp(-B*absorber)} #' #' b) If absorber is < 167 g/cm^2 (soft- and hard-component): derive D0 from #' Fig. 1 in Prescott & Hutton (1988). #' #' #' (4) #' Calculate geomagnetic latitude (Prescott & Stephan 1982, Prescott & #' Hutton 1994) #' #' \deqn{\lambda = arcsin(0.203*cos(latitude)*cos(longitude-291)+0.979* #' sin(latitude))} #' #' #' (5) #' Apply correction for geomagnetic latitude and altitude above sea-level. #' Values for F, J and H were read from Fig. 3 shown in Prescott & Stephan #' (1982) and fitted with 3-degree polynomials for lambda < 35 degree and a #' linear fit for lambda > 35 degree. #' #' \deqn{Dc = D0*(F+J*exp((altitude/1000)/H))} #' #' #' (6) #' Optional: Apply correction for geomagnetic field changes in the last #' 0-80 ka (Prescott & Hutton 1994). Correction and altitude factors are given #' in Table 1 and Fig. 1 in Prescott & Hutton (1994). Values for altitude #' factor were fitted with a 2-degree polynomial. The altitude factor is #' operated on the decimal part of the correction factor. #' #' \deqn{Dc' = Dc*correctionFactor} #' #' #' **Usage of `depth` and `density`** #' #' (1) If only one value for depth and density is provided, the cosmic dose #' rate is calculated for exactly one sample and one absorber as overburden #' (i.e. `depth*density`). #' #' (2) In some cases it might be useful to calculate the cosmic dose rate for a #' sample that is overlain by more than one absorber, e.g. in a profile with #' soil layers of different thickness and a distinct difference in density. #' This can be calculated by providing a matching number of values for #' `depth` and `density` (e.g. `depth = c(1, 2), density = c(1.7, 2.4)`) #' #' (3) Another possibility is to calculate the cosmic dose rate for more than #' one sample of the same profile. This is done by providing more than one #' values for `depth` and only one for `density`. For example, #' `depth = c(1, 2, 3)` and `density = 1.7` will calculate the cosmic dose rate #' for three samples in 1, 2 and 3 m depth in a sediment of density 1.7 g/cm^3. #' #' @param depth [numeric] (**required**): #' depth of overburden (m). For more than one absorber use \cr #' `c(depth_1, depth_2, ..., depth_n)` #' #' @param density [numeric] (**required**): #' average overburden density (g/cm^3). For more than one absorber use \cr #' `c(density_1, density_2, ..., density_n)` #' #' @param latitude [numeric] (**required**): #' latitude (decimal degree), N positive #' #' @param longitude [numeric] (**required**): #' longitude (decimal degree), E positive #' #' @param altitude [numeric] (**required**): #' altitude (m above sea-level) #' #' @param corr.fieldChanges [logical] (*with default*): #' correct for geomagnetic field changes after Prescott & Hutton (1994). #' Apply only when justified by the data. #' #' @param est.age [numeric] (*with default*): #' estimated age range (ka) for geomagnetic field change correction (0-80 ka allowed) #' #' @param half.depth [logical] (*with default*): #' How to overcome with varying overburden thickness. If `TRUE` only half the #' depth is used for calculation. Apply only when justified, i.e. when a constant #' sedimentation rate can safely be assumed. #' #' @param error [numeric] (*with default*): #' general error (percentage) to be implemented on corrected cosmic dose rate estimate #' #' @param ... further arguments (`verbose` to disable/enable console output). #' #' @return #' Returns a terminal output. In addition an #' [RLum.Results-class]-object is returned containing the #' following element: #' #' \item{summary}{[data.frame] summary of all relevant calculation results.} #' \item{args}{[list] used arguments} #' \item{call}{[call] the function call} #' #' The output should be accessed using the function [get_RLum] #' #' @note #' Despite its universal use the equation to calculate the cosmic dose #' rate provided by Prescott & Hutton (1994) is falsely stated to be valid from #' the surface to 10^4 hg/cm^2 of standard rock. The original expression by #' Barbouti & Rastin (1983) only considers the muon flux (i.e. hard-component) #' and is by their own definition only valid for depths between 10-10^4 #' hg/cm^2. #' #' Thus, for near-surface samples (i.e. for depths < 167 g/cm^2) the equation #' of Prescott & Hutton (1994) underestimates the total cosmic dose rate, as it #' neglects the influence of the soft-component of the cosmic ray flux. For #' samples at zero depth and at sea-level the underestimation can be as large #' as ~0.1 Gy/ka. In a previous article, Prescott & Hutton (1988) give another #' approximation of Barbouti & Rastin's equation in the form of #' #' \deqn{D = 0.21*exp(-0.070*absorber+0.0005*absorber^2)} #' #' which is valid for depths between 150-5000 g/cm^2. For shallower depths (< #' 150 g/cm^2) they provided a graph (Fig. 1) from which the dose rate can be #' read. #' #' As a result, this function employs the equation of Prescott & Hutton (1994) #' only for depths > 167 g/cm^2, i.e. only for the hard-component of the cosmic #' ray flux. Cosmic dose rate values for depths < 167 g/cm^2 were obtained from #' the "AGE" program (Gruen 2009) and fitted with a 6-degree polynomial curve #' (and hence reproduces the graph shown in Prescott & Hutton 1988). However, #' these values assume an average overburden density of 2 g/cm^3. #' #' It is currently not possible to obtain more precise cosmic dose rate values #' for near-surface samples as there is no equation known to the author of this #' function at the time of writing. #' #' #' @section Function version: 0.5.2 #' #' @author #' Christoph Burow, University of Cologne (Germany) #' #' @seealso [BaseDataSet.CosmicDoseRate] #' #' @references #' Allkofer, O.C., Carstensen, K., Dau, W.D., Jokisch, H., 1975. #' Letter to the editor. The absolute cosmic ray flux at sea level. Journal of #' Physics G: Nuclear and Particle Physics 1, L51-L52. #' #' Barbouti, A.I., Rastin, B.C., 1983. A study of the absolute intensity of muons at sea level #' and under various thicknesses of absorber. Journal of Physics G: Nuclear and #' Particle Physics 9, 1577-1595. #' #' Crookes, J.N., Rastin, B.C., 1972. An #' investigation of the absolute intensity of muons at sea-level. Nuclear #' Physics B 39, 493-508. #' #' Gruen, R., 2009. The "AGE" program for the #' calculation of luminescence age estimates. Ancient TL 27, 45-46. #' #' Prescott, J.R., Hutton, J.T., 1988. Cosmic ray and gamma ray dosimetry for #' TL and ESR. Nuclear Tracks and Radiation Measurements 14, 223-227. #' #' Prescott, J.R., Hutton, J.T., 1994. Cosmic ray contributions to dose rates #' for luminescence and ESR dating: large depths and long-term time variations. #' Radiation Measurements 23, 497-500. #' #' Prescott, J.R., Stephan, L.G., 1982. The contribution of cosmic radiation to the environmental dose for #' thermoluminescence dating. Latitude, altitude and depth dependences. PACT 6, 17-25. #' #' @examples #' #' ##(1) calculate cosmic dose rate (one absorber) #' calc_CosmicDoseRate(depth = 2.78, density = 1.7, #' latitude = 38.06451, longitude = 1.49646, #' altitude = 364, error = 10) #' #' ##(2a) calculate cosmic dose rate (two absorber) #' calc_CosmicDoseRate(depth = c(5.0, 2.78), density = c(2.65, 1.7), #' latitude = 38.06451, longitude = 1.49646, #' altitude = 364, error = 10) #' #' ##(2b) calculate cosmic dose rate (two absorber) and #' ##correct for geomagnetic field changes #' calc_CosmicDoseRate(depth = c(5.0, 2.78), density = c(2.65, 1.7), #' latitude = 12.04332, longitude = 4.43243, #' altitude = 364, corr.fieldChanges = TRUE, #' est.age = 67, error = 15) #' #' #' ##(3) calculate cosmic dose rate and export results to .csv file #' #calculate cosmic dose rate and save to variable #' results<- calc_CosmicDoseRate(depth = 2.78, density = 1.7, #' latitude = 38.06451, longitude = 1.49646, #' altitude = 364, error = 10) #' #' # the results can be accessed by #' get_RLum(results, "summary") #' #' #export results to .csv file - uncomment for usage #' #write.csv(results, file = "c:/users/public/results.csv") #' #' ##(4) calculate cosmic dose rate for 6 samples from the same profile #' ## and save to .csv file #' #calculate cosmic dose rate and save to variable #' results<- calc_CosmicDoseRate(depth = c(0.1, 0.5 , 2.1, 2.7, 4.2, 6.3), #' density = 1.7, latitude = 38.06451, #' longitude = 1.49646, altitude = 364, #' error = 10) #' #' #export results to .csv file - uncomment for usage #' #write.csv(results, file = "c:/users/public/results_profile.csv") #' #' @md #' @export calc_CosmicDoseRate<- function( depth, density, latitude, longitude, altitude, corr.fieldChanges = FALSE, est.age = NA, half.depth = FALSE, error = 10, ... ) { ##============================================================================## ## ... ARGUMENTS ##============================================================================## settings <- list(verbose = TRUE) settings <- modifyList(settings, list(...)) ##============================================================================## ## CONSISTENCY CHECK OF INPUT DATA ##============================================================================## if(any(depth < 0) || any(density < 0)) { cat(paste("\nNo negative values allowed for depth and density")) stop(domain=NA) } if(corr.fieldChanges == TRUE) { if(is.na(est.age) == TRUE) { cat(paste("\nCorrection for geomagnetic field changes requires", "an age estimate."), fill = FALSE) stop(domain=NA) } if(est.age > 80) { cat(paste("\nCAUTION: No geomagnetic field change correction for samples", "older >80 ka possible!"), fill = FALSE) corr.fieldChanges<- FALSE } } if(length(density) > length(depth)) { stop("\nIf you provide more than one value for density please", " provide an equal number of values for depth.", call. = FALSE) } ##============================================================================## ## CALCULATIONS ##============================================================================## # initialize parameter for Prescott & Hutton (1994) equation C<- 6072 B<- 0.00055 d<- 11.6 alpha<- 1.68 a<- 75 H<- 212 #variable needed to check if cosmic dose rate is calculated for more #than one sample profile.mode<- FALSE #calculate absorber (hgcm) of one depth and one absorber [single sample] if(length(depth)==1) { hgcm<- depth*density if(half.depth == TRUE) { hgcm<- hgcm/2 } } #calculate total absorber of n depths and n densities [single sample] if(length(depth)==length(density)){ hgcm<- 0 for(i in 1:length(depth)) { hgcm<- hgcm + depth[i]*density[i] } if(half.depth == TRUE) { hgcm<- hgcm/2 } } #if there are >1 depths and only one density, calculate #absorber for each sample [multi sample] if(length(depth) > length(density) & length(density) == 1) { profile.mode<- TRUE hgcm<- 1:length(depth) for(i in 1:length(depth)) { hgcm[i]<- depth[i]*density } if(half.depth == TRUE) { hgcm<- hgcm/2 } profile.results<- data.frame(rbind(c(1:3)),cbind(1:length(depth))) colnames(profile.results)<- c("depth (m)", "d0 (Gy/ka)", "dc (Gy/ka)","dc_error (Gy/ka)") } for(i in 1:length(hgcm)) { # calculate cosmic dose rate at sea-level for geomagnetic latitude 55 degrees if(hgcm[i]*100 >= 167) { d0<- (C/((((hgcm[i]+d)^alpha)+a)*(hgcm[i]+H)))*exp(-B*hgcm[i]) } if(hgcm[i]*100 < 167) { temp.hgcm<- hgcm[i]*100 d0.ph<- (C/((((hgcm[i]+d)^alpha)+a)*(hgcm[i]+H)))*exp(-B*hgcm[i]) if(hgcm[i]*100 < 40) { d0<- -6*10^-8*temp.hgcm^3+2*10^-5*temp.hgcm^2-0.0025*temp.hgcm+0.2969 } else { d0<- 2*10^-6*temp.hgcm^2-0.0008*temp.hgcm+0.2535 } if(d0.ph > d0) { d0<- d0.ph } } # Calculate geomagnetic latitude gml.temp<- 0.203*cos((pi/180)*latitude)* cos(((pi/180)*longitude)-(291*pi/180))+0.979* sin((pi/180)*latitude) true.gml<- asin(gml.temp)/(pi/180) gml<- abs(asin(gml.temp)/(pi/180)) # Find values for F, J and H from graph shown in Prescott & Hutton (1994) # values were read from the graph and fitted with 3 degree polynomials and a # linear part if(gml < 36.5) { # Polynomial fit F_ph<- -7*10^-7*gml^3-8*10^-5*gml^2-0.0009*gml+0.3988 } else { # Linear fit F_ph<- -0.0001*gml + 0.2347 } if(gml < 34) { # Polynomial fit J_ph<- 5*10^-6*gml^3-5*10^-5*gml^2+0.0026*gml+0.5177 } else { # Linear fit J_ph<- 0.0005*gml + 0.7388 } if(gml < 36) { # Polynomial fit H_ph<- -3*10^-6*gml^3-5*10^-5*gml^2-0.0031*gml+4.398 } else { # Linear fit H_ph<- 0.0002*gml + 4.0914 } # Apply correction for geomagnetic latitude and altitude according to # Prescott & Hutton (1994) dc<- d0*(F_ph + J_ph*exp((altitude/1000)/H_ph)) ## Additional correction for geomagnetic field change if(corr.fieldChanges==TRUE) { if(gml <= 35) { # Correction matrix for geomagnetic field changes at # sea-level (Prescott & Hutton (1994), Table 1) corr.matrix<- data.frame(rbind(1:5),1:7) colnames(corr.matrix)<- c(0, 10, 20, 30, 35, ">35") rownames(corr.matrix)<- c("0-5","5-10","10-15","15-20","20-35","35-50", "50-80") corr.matrix[1,]<- c(0.97, 0.97, 0.98, 0.98, 0.98, 1.00) corr.matrix[2,]<- c(0.99, 0.99, 0.99, 0.99, 0.99, 1.00) corr.matrix[3,]<- c(1.00, 1.00, 1.00, 1.00, 1.00, 1.00) corr.matrix[4,]<- c(1.01, 1.01, 1.01, 1.00, 1.00, 1.00) corr.matrix[5,]<- c(1.02, 1.02, 1.02, 1.01, 1.00, 1.00) corr.matrix[6,]<- c(1.03, 1.03, 1.02, 1.01, 1.00, 1.00) corr.matrix[7,]<- c(1.02, 1.02, 1.02, 1.01, 1.00, 1.00) # Find corresponding correction factor for given geomagnetic latitude # determine column if(gml <= 5) { corr.c<- 1 } if(5 < gml) { if(gml <= 15) { corr.c<- 2 } } if(15 < gml){ if(gml <= 25) { corr.c<- 3 } } if(25 < gml){ if(gml <= 32.5) { corr.c<- 4 } } if(32.5 < gml){ if(gml <= 35) { corr.c<- 5 } } # find row if(est.age <= 5) { corr.fac<- corr.matrix[1,corr.c] } if(5 < est.age) { if(est.age <= 10) { corr.fac<- corr.matrix[2,corr.c] } } if(10 < est.age){ if(est.age <= 15) { corr.fac<- corr.matrix[3,corr.c] } } if(15 < est.age){ if(est.age <= 20) { corr.fac<- corr.matrix[4,corr.c] } } if(20 < est.age){ if(est.age <= 35) { corr.fac<- corr.matrix[5,corr.c] } } if(35 < est.age){ if(est.age <= 50) { corr.fac<- corr.matrix[6,corr.c] } } if(50 < est.age){ if(est.age <= 80) { corr.fac<- corr.matrix[7,corr.c] } } # Find altitude factor via fitted function 2-degree polynomial # This factor is only available for positive altitudes if(altitude > 0) { alt.fac<- -0.026*(altitude/1000)^2 + 0.6628*altitude/1000 + 1.0435 # Combine geomagnetic latitude correction with altitude # correction (figure caption of Fig. 1 in Precott and Hutton (1994)) diff.one<- corr.fac - 1 corr.fac<- corr.fac + diff.one * alt.fac } # Final correction of cosmic dose rate dc<- dc * corr.fac if (settings$verbose) print(paste("corr.fac",corr.fac,"diff.one",diff.one,"alt.fac",alt.fac)) } else { if (settings$verbose) cat(paste("\n No geomagnetic field change correction necessary for geomagnetic latitude >35 degrees!")) } } # calculate error dc.err<- dc*error/100 # save intermediate results before next sample is calculated if(profile.mode==TRUE) { profile.results[i,1]<- round(depth[i],2) profile.results[i,2]<- round(d0,4) profile.results[i,3]<- round(dc,4) profile.results[i,4]<- round(dc.err,4) } }#END.OF.LOOP call<- sys.call() args<- list(depth = depth, density = density, latitude = latitude, longitude = longitude, altitude = altitude, corr.fieldChanges = corr.fieldChanges, est.age = est.age, half.depth = half.depth, error = error) if(length(hgcm)==1) { ##============================================================================## ##TERMINAL OUTPUT ##============================================================================## if (settings$verbose) { cat("\n\n [calc_CosmicDoseRate]") cat(paste("\n\n ---------------------------------------------------------")) cat(paste("\n depth (m) :", depth)) cat(paste("\n density (g cm^-3) :", density)) cat(paste("\n latitude (N deg.) :", latitude)) cat(paste("\n longitude (E deg.) :", longitude)) cat(paste("\n altitude (m) :", altitude)) cat(paste("\n ---------------------------------------------------------")) cat(paste("\n total absorber (g cm^-2) :", round(hgcm[i]*100,3))) cat(paste("\n")) cat(paste("\n cosmic dose rate (Gy ka^-1) :", round(d0,4))) cat(paste("\n [@sea-level & 55 deg. N G.lat]")) cat(paste("\n")) cat(paste("\n geomagnetic latitude (deg.) :", round(true.gml,1))) cat(paste("\n")) cat(paste("\n cosmic dose rate (Gy ka^-1) :", round(dc,4),"+-", round(dc.err,4))) cat(paste("\n [corrected] ")) cat(paste("\n ---------------------------------------------------------\n\n")) } ##============================================================================## ##RETURN VALUES ##============================================================================## if(length(depth)==1) { temp1<- data.frame(depth=depth,density=density) } else { temp1a<- data.frame(rbind(c(1:length(depth)))) tmpcoln1<- 1:length(depth) for(i in 1:length(depth)) { temp1a[i]<- depth[i] tmpcoln1[i]<- paste("depth",i) } temp1b<- data.frame(rbind(c(1:length(density)))) tmpcoln2<- 1:length(density) for(i in 1:length(density)) { temp1b[i]<- density[i] tmpcoln2[i]<- paste("density",i) } colnames(temp1a)<- tmpcoln1 colnames(temp1b)<- tmpcoln2 temp1<- cbind(temp1a,temp1b) } temp2<- data.frame(latitude=latitude,longitude=longitude, altitude=altitude,total_absorber.gcm2=hgcm*100, d0=d0,geom_lat=true.gml,dc=dc) summary<- data.frame(cbind(temp1,temp2)) newRLumResults.calc_CosmicDoseRate <- set_RLum( class = "RLum.Results", data = list(summary=summary, args=args, call=call)) # Return values invisible(newRLumResults.calc_CosmicDoseRate) } else { #terminal output if (settings$verbose) { cat("\n\n [calc_CosmicDoseRate]") cat(paste("\n\n Calculating cosmic dose rate for",length(depth), "samples. \n\n")) print(profile.results) } #return value add.info<- data.frame(latitude=latitude,longitude=longitude, altitude=altitude,total_absorber.gcm2=hgcm*100, geom_lat=true.gml) add.info<- rbind(add.info*length(i)) colnames(profile.results)<- c("depth","d0","dc","dc_err") summary<- data.frame(cbind(profile.results,add.info)) newRLumResults.calc_CosmicDoseRate <- set_RLum( class = "RLum.Results", data = list(summary=summary, args=args, call=call)) # Return values invisible(newRLumResults.calc_CosmicDoseRate) } } Luminescence/R/Analyse_SAR.OSLdata.R0000644000176200001440000006402114264017373016611 0ustar liggesusers#' Analyse SAR CW-OSL measurements. #' #' The function analyses SAR CW-OSL curve data and provides a summary of the #' measured data for every position. The output of the function is optimised #' for SAR OSL measurements on quartz. #' #' The function works only for standard SAR protocol measurements introduced by #' Murray and Wintle (2000) with CW-OSL curves. For the calculation of the #' Lx/Tx value the function [calc_OSLLxTxRatio] is used. #' #' **Provided rejection criteria** #' #' `[recyling ratio]`: calculated for every repeated regeneration dose point. #' #' `[recuperation]`: recuperation rate calculated by comparing the `Lx/Tx` values of the zero #' regeneration point with the `Ln/Tn` value (the `Lx/Tx` ratio of the natural #' signal). For methodological background see Aitken and Smith (1988) #' #' `[IRSL/BOSL]`: the integrated counts (`signal.integral`) of an #' IRSL curve are compared to the integrated counts of the first regenerated #' dose point. It is assumed that IRSL curves got the same dose as the first #' regenerated dose point. **Note:** This is not the IR depletion ratio #' described by Duller (2003). #' #' @param input.data [Risoe.BINfileData-class] (**required**): #' input data from a Risø BIN file, produced by the function [read_BIN2R]. #' #' @param signal.integral [vector] (**required**): #' channels used for the signal integral, e.g. `signal.integral=c(1:2)` #' #' @param background.integral [vector] (**required**): #' channels used for the background integral, e.g. `background.integral=c(85:100)` #' #' @param position [vector] (*optional*): #' reader positions that want to be analysed (e.g. `position=c(1:48)`. #' Empty positions are automatically omitted. If no value is given all #' positions are analysed by default. #' #' @param run [vector] (*optional*): #' range of runs used for the analysis. If no value is given the range of the #' runs in the sequence is deduced from the `Risoe.BINfileData` object. #' #' @param set [vector] (*optional*): #' range of sets used for the analysis. If no value is given the range of the #' sets in the sequence is deduced from the `Risoe.BINfileData` object. #' #' @param dtype [character] (*optional*): #' allows to further limit the curves by their data type (`DTYPE`), #' e.g., `dtype = c("Natural", "Dose")` limits the curves to this two data types. #' By default all values are allowed. #' See [Risoe.BINfileData-class] for allowed data types. #' #' @param keep.SEL [logical] (default): #' option allowing to use the `SEL` element of the [Risoe.BINfileData-class] manually. #' **NOTE:** In this case any limitation provided by `run`, `set` and `dtype` #' are ignored! #' #' @param info.measurement [character] (*with default*): #' option to provide information about the measurement on the plot #' output (e.g. name of the BIN or BINX file). #' #' @param output.plot [logical] (*with default*): #' plot output (`TRUE/FALSE`) #' #' @param output.plot.single [logical] (*with default*): #' single plot output (`TRUE/FALSE`) to allow for plotting the results in #' single plot windows. Requires `output.plot = TRUE`. #' #' @param cex.global [numeric] (*with default*): #' global scaling factor. #' #' @param ... further arguments that will be passed to the function #' [calc_OSLLxTxRatio] (supported: `background.count.distribution`, `sigmab`, #' `sig0`; e.g., for instrumental error) and can be used to adjust the plot. #' Supported" `mtext`, `log` #' #' @return #' A plot (*optional*) and [list] is returned containing the #' following elements: #' #' \item{LnLxTnTx}{[data.frame] of all calculated Lx/Tx values including signal, background counts and the dose points.} #' \item{RejectionCriteria}{[data.frame] with values that might by used as rejection criteria. NA is produced if no R0 dose point exists.} #' \item{SARParameters}{[data.frame] of additional measurement parameters obtained from the BIN file, e.g. preheat or read temperature #' (not valid for all types of measurements).} #' #' #' @note #' Rejection criteria are calculated but not considered during the #' analysis to discard values. #' #' **The analysis of IRSL data is not directly supported**. You may want to #' consider using the functions [analyse_SAR.CWOSL] or #' [analyse_pIRIRSequence] instead. #' #' **The development of this function will not be continued. We recommend to use the function [analyse_SAR.CWOSL] or instead.** #' #' #' @section Function version: 0.2.17 #' #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany)\cr #' Margret C. Fuchs, HZDR, Freiberg (Germany) #' #' @seealso [calc_OSLLxTxRatio], [Risoe.BINfileData-class], [read_BIN2R], [plot_GrowthCurve] #' #' @references #' Aitken, M.J. and Smith, B.W., 1988. Optical dating: recuperation #' after bleaching. Quaternary Science Reviews 7, 387-393. #' #' Duller, G., 2003. Distinguishing quartz and feldspar in single grain #' luminescence measurements. Radiation Measurements, 37 (2), 161-165. #' #' Murray, A.S. and Wintle, A.G., 2000. Luminescence dating of quartz using an #' improved single-aliquot regenerative-dose protocol. Radiation Measurements #' 32, 57-73. #' #' @keywords datagen dplot #' #' @examples #' ##load data #' data(ExampleData.BINfileData, envir = environment()) #' #' ##analyse data #' output <- Analyse_SAR.OSLdata(input.data = CWOSL.SAR.Data, #' signal.integral = c(1:5), #' background.integral = c(900:1000), #' position = c(1:1), #' output.plot = TRUE) #' #' ##combine results relevant for further analysis #' output.SAR <- data.frame(Dose = output$LnLxTnTx[[1]]$Dose, #' LxTx = output$LnLxTnTx[[1]]$LxTx, #' LxTx.Error = output$LnLxTnTx[[1]]$LxTx.Error) #' output.SAR #' #' @md #' @export Analyse_SAR.OSLdata <- function( input.data, signal.integral, background.integral, position, run, set, dtype, keep.SEL = FALSE, info.measurement = "unkown measurement", output.plot = FALSE, output.plot.single = FALSE, cex.global = 1, ... ){ ##============================================================================## ##CONFIG ##============================================================================## ##set colors gallery to provide more colors col <- get("col", pos = .LuminescenceEnv) ##============================================================================## ##ERROR HANDLING ##============================================================================## if(missing(input.data)==TRUE){stop("[Analyse_SAR.OSLdata] No input data given!") }else{sample.data<-input.data} if(missing(signal.integral)==TRUE){stop("[Analyse_SAR.OSLdata] No signal integral is given!")} if(missing(background.integral)==TRUE){stop("[Analyse_SAR.OSLdata] No background integral is given!")} ##set values for run and set if they are not defined by the user if(missing(position)==TRUE){position<-min(sample.data@METADATA[,"POSITION"]):max(sample.data@METADATA[,"POSITION"])} if(missing(run)==TRUE){run<-min(sample.data@METADATA[,"RUN"]):max(sample.data@METADATA[,"RUN"])} if(missing(set)==TRUE){set<-min(sample.data@METADATA[,"SET"]):max(sample.data@METADATA[,"SET"])} if(missing(dtype)){dtype <- c("Natural", "N+dose", "Bleach", "Bleach+dose", "Natural (Bleach)", "N+dose (Bleach)", "Dose", "Background")} # Deal with extra arguments ---------------------------------------------------- ##deal with addition arguments extraArgs <- list(...) background.count.distribution <- if ("background.count.distribution" %in% names(extraArgs)) { extraArgs$background.count.distribution } else { "non-poisson" } sigmab <- if("sigmab" %in% names(extraArgs)) {extraArgs$sigmab} else {NULL} ##============================================================================## ##CALCULATIONS ##============================================================================## ##loop over all positions for (i in position){ ##checking if position is valid if(length(which(sample.data@METADATA["POSITION"]==i))>0){ ##check if OSL curves are part of the data set if(nrow(sample.data@METADATA[sample.data@METADATA[,"LTYPE"]=="OSL",]) == 0){ stop("[Analyse_SAR.OSLdata()] No 'OSL' curves found!") } if(!keep.SEL){ ##select all OSL data depending on the run and set sample.data@METADATA[,"SEL"]<-FALSE sample.data@METADATA[sample.data@METADATA[,"LTYPE"]=="OSL" & sample.data@METADATA[,"RUN"]%in%run==TRUE & sample.data@METADATA[,"SET"]%in%set==TRUE & sample.data@METADATA[,"DTYPE"]%in%dtype==TRUE, "SEL"] <- TRUE } ##grep all OSL curve IDs OSL.curveID<-sample.data@METADATA[sample.data@METADATA["SEL"]==TRUE & sample.data@METADATA["POSITION"]==i,"ID"] ##estimate LnLx.curveID and TnTx.curveID from records LnLx.curveID<-OSL.curveID[seq(1,length(OSL.curveID),by=2)] TnTx.curveID<-OSL.curveID[seq(2,length(OSL.curveID),by=2)] ##Provide Values For Growth Curve Fitting ##(1) get dose information Dose<-sapply(1:length(LnLx.curveID),function(x){ Dose<-sample.data@METADATA[sample.data@METADATA["ID"]==LnLx.curveID[x],"IRR_TIME"] }) ##(2) set LxTx curves LnLxTnTx.curves<-(sapply(1:length(LnLx.curveID),function(x){ ##produce data.frames for Lx/Tx calculations Lx.HIGH<-sample.data@METADATA[sample.data@METADATA[,"ID"]==LnLx.curveID[x],"HIGH"] Lx.NPOINTS<-sample.data@METADATA[sample.data@METADATA[,"ID"]==LnLx.curveID[x],"NPOINTS"] Tx.HIGH<-sample.data@METADATA[sample.data@METADATA[,"ID"]==TnTx.curveID[x],"HIGH"] Tx.NPOINTS<-sample.data@METADATA[sample.data@METADATA[,"ID"]==TnTx.curveID[x],"NPOINTS"] Lx.curve<-data.frame(x=seq(Lx.HIGH/Lx.NPOINTS,Lx.HIGH,by=Lx.HIGH/Lx.NPOINTS), y=unlist(sample.data@DATA[LnLx.curveID[x]])) Tx.curve<-data.frame(x=seq(Tx.HIGH/Tx.NPOINTS,Tx.HIGH,by=Tx.HIGH/Tx.NPOINTS), y=unlist(sample.data@DATA[TnTx.curveID[x]])) return(list(Lx.curve,Tx.curve)) })) ##(3) calculate Lx/Tx ratio LnLxTnTx <- get_RLum( merge_RLum(lapply(1:length(LnLxTnTx.curves[1, ]), function(k) { calc_OSLLxTxRatio( Lx.data = as.data.frame(LnLxTnTx.curves[1, k]), Tx.data = as.data.frame(LnLxTnTx.curves[2, k]), signal.integral = signal.integral, background.integral = background.integral, background.count.distribution = background.count.distribution, sigmab = sigmab ) }))) ##finally combine to data.frame including the record ID for further analysis LnLxTnTx <- cbind(LnLxTnTx,LnLx.curveID,TnTx.curveID) ##(4.1) set info concerning the kind of regeneration points ##generate unique dose id - this are also the # for the generated points temp.DoseID<-c(0:(length(Dose)-1)) temp.DoseName<-paste("R",temp.DoseID,sep="") temp.DoseName<-cbind(Name=temp.DoseName,Dose) ##set natural temp.DoseName[temp.DoseName[,"Name"]=="R0","Name"]<-"Natural" ##set R0 temp.DoseName[temp.DoseName[,"Name"]!="Natural" & temp.DoseName[,"Dose"]==0,"Name"]<-"R0" ##find duplicated doses (including 0 dose - which means the Natural) temp.DoseDuplicated<-duplicated(temp.DoseName[,"Dose"]) ##combine temp.DoseName temp.DoseName<-cbind(temp.DoseName,Repeated=temp.DoseDuplicated) ##correct value for R0 (it is not really repeated) temp.DoseName[temp.DoseName[,"Dose"]==0,"Repeated"]<-FALSE ##(5) Combine all values in a data.frame temp.LnLxTnTx<-data.frame(Name=temp.DoseName[,"Name"], Dose=Dose, Repeated=as.logical(temp.DoseName[,"Repeated"])) LnLxTnTx<-cbind(temp.LnLxTnTx,LnLxTnTx) LnLxTnTx[,"Name"]<-as.character(LnLxTnTx[,"Name"]) ##(6) Calculate Recyling Ratio and Recuperation Rate ##(6.1) ##Calculate Recycling Ratio if(length(LnLxTnTx[LnLxTnTx[,"Repeated"]==TRUE,"Repeated"])>0){ ##identify repeated doses temp.Repeated<-LnLxTnTx[LnLxTnTx[,"Repeated"]==TRUE,c("Name","Dose","LxTx")] ##find corresponding previous dose for the repeated dose temp.Previous<-t(sapply(1:length(temp.Repeated[,1]),function(x){ LnLxTnTx[LnLxTnTx[,"Dose"]==temp.Repeated[x,"Dose"] & LnLxTnTx[,"Repeated"]==FALSE,c("Name","Dose","LxTx")] })) ##convert to data.frame temp.Previous<-as.data.frame(temp.Previous) ##set column names temp.ColNames<-sapply(1:length(temp.Repeated[,1]),function(x){ paste(temp.Repeated[x,"Name"],"/", temp.Previous[temp.Previous[,"Dose"]==temp.Repeated[x,"Dose"],"Name"] ,sep="") }) ##Calculate Recycling Ratio RecyclingRatio<-as.numeric(temp.Repeated[,"LxTx"])/as.numeric(temp.Previous[,"LxTx"]) ##Just transform the matrix and add column names RecyclingRatio<-t(RecyclingRatio) colnames(RecyclingRatio) <- unique(temp.ColNames) }else{RecyclingRatio<-NA} ##(6.2) ##Recuperation Rate if("R0" %in% LnLxTnTx[,"Name"]==TRUE){ Recuperation<-round(LnLxTnTx[LnLxTnTx[,"Name"]=="R0","LxTx"]/LnLxTnTx[LnLxTnTx[,"Name"]=="Natural","LxTx"],digits=4) }else{Recuperation<-NA} ##(6.3) IRSL ##Print IRSL Curves if IRSL curve is set sample.data@METADATA[,"SEL"]<-FALSE sample.data@METADATA[sample.data@METADATA["LTYPE"]=="IRSL" & sample.data@METADATA[,"RUN"]%in%run==TRUE & sample.data@METADATA[,"SET"]%in%set==TRUE,"SEL"]<-TRUE ##get IRSL curve ID & ID for Reg1 again IRSL.curveID<-sample.data@METADATA[sample.data@METADATA["SEL"]==TRUE & sample.data@METADATA["POSITION"]==i,"ID"] ##if no IRSL curve the length of the object is 0 if(length(IRSL.curveID)>0){ ##chose an IRSL curve with a dose of the first regeneration point Reg1again.curveID<-LnLxTnTx[LnLxTnTx[,"Repeated"]==TRUE & LnLxTnTx[,"Dose"]==LnLxTnTx[2,"Dose"],"LnLx.curveID"] if(length(Reg1again.curveID)>0){ ##BOSL/IRSL IRSL_BOSL<-round(sum(unlist(sample.data@DATA[IRSL.curveID])[signal.integral]) /sum(unlist(sample.data@DATA[Reg1again.curveID])[signal.integral]),digits=4) }else{IRSL_BOSL<-NA} }else{IRSL_BOSL<-NA} ##Combine the two values if(exists("RejectionCriteria")==FALSE){ RejectionCriteria<-cbind(RecyclingRatio,Recuperation,IRSL_BOSL) }else{ RejectionCriteria.temp<-cbind(RecyclingRatio,Recuperation,IRSL_BOSL) RejectionCriteria<-rbind(RejectionCriteria,RejectionCriteria.temp) } ##============================================================================## ##PLOTTING ##============================================================================## if(output.plot){ ##set plot settings plot.settings <- list( mtext = sample.data@METADATA[sample.data@METADATA[,"ID"]==LnLx.curveID[1],"SAMPLE"], log = "" ) ##modify arguments plot.settings <- modifyList(plot.settings, list(...)) if(output.plot.single==FALSE){ layout(matrix(c(1,2,1,2,3,4,3,5),4,2,byrow=TRUE)) } ##warning if number of curves exceed colour values if(length(col)0){ ##to ensure that the right TL curves are used the run and set number of the LnLx and TnTx curves are used LnLx.SET<-sapply(LnLx.curveID,function(x){sample.data@METADATA[sample.data@METADATA["ID"]==x,"SET"]}) LnLx.RUN<-sapply(LnLx.curveID,function(x){sample.data@METADATA[sample.data@METADATA["ID"]==x,"RUN"]}) TnTx.SET<-sapply(TnTx.curveID,function(x){sample.data@METADATA[sample.data@METADATA["ID"]==x,"SET"]}) TnTx.RUN<-sapply(TnTx.curveID,function(x){sample.data@METADATA[sample.data@METADATA["ID"]==x,"RUN"]}) ##get TL curve IDs in general considering the constraints TL.curveID<-sapply(1:length(TnTx.curveID),function(x){results<- sample.data@METADATA[sample.data@METADATA["SEL"]==TRUE & sample.data@METADATA["POSITION"]==i & sample.data@METADATA["SET"]>=LnLx.SET[x] & sample.data@METADATA["RUN"]>=LnLx.RUN[x] & sample.data@METADATA["SET"]<=TnTx.SET[x] & sample.data@METADATA["RUN"]<=TnTx.RUN[x],"ID"]}) ##get maximum value of TL curves TL.curveMax<-max(unlist(sample.data@DATA[TL.curveID])) ##get channel resolution (it should be the same for all values) HIGH<-unique(sample.data@METADATA[sample.data@METADATA["ID"]==TL.curveID[1],"HIGH"]) NPOINTS<-unique(sample.data@METADATA[sample.data@METADATA["ID"]==TL.curveID[1],"NPOINTS"]) xaxt.values<-seq(HIGH/NPOINTS,HIGH,by=HIGH/NPOINTS) ##get heating rate RATE<-unique(sample.data@METADATA[sample.data@METADATA["ID"]==TL.curveID[1],"RATE"]) ##open plot area for TL curves plot(NA,NA, xlab="T [\u00B0C]", ylab=paste("TL [cts/",HIGH/NPOINTS," \u00B0C]",sep=""), xlim=c(HIGH/NPOINTS,HIGH), ylim=c(1,TL.curveMax), main="Cutheat - TL curves", sub=paste("(",RATE," K/s)",sep=""), log=if(plot.settings$log=="y" | plot.settings$log=="xy"){"y"}else{""} ) ##plot curves and get legend values sapply(1:length(TL.curveID),function(x){ yaxt.values<-unlist(sample.data@DATA[TL.curveID[x]]) lines(xaxt.values,yaxt.values,col=col[x]) }) ##plot legend legend("topleft",as.character(LnLxTnTx$Name),lty=c(rep(1,length(TL.curveID))), cex=0.8*cex.global,col=col, bg="white", bty="n") ##sample name mtext(side=3,plot.settings$mtext,cex=0.7*cex.global) }else{ plot(NA,NA,xlim=c(0,100),ylim=c(0,100), main="Cutheat - TL curves") text(50,50,"no cutheat as TL curve detected") } ##======================================================================## ##Print IRSL Curves if IRSL curve is set if(is.na(IRSL_BOSL) == FALSE){ ##get channel resolution (it should be the same for all values) HIGH<-unique(sample.data@METADATA[sample.data@METADATA["ID"]==IRSL.curveID ,"HIGH"]) NPOINTS<-unique(sample.data@METADATA[sample.data@METADATA["ID"]==IRSL.curveID ,"NPOINTS"]) xaxt.values<-seq(HIGH/NPOINTS,HIGH,by=HIGH/NPOINTS) ##open plot IRSL curve plot(NA,NA, xlab="Time [s]", ylab=paste("OSL and IRSL [cts/",HIGH/NPOINTS," s]",sep=""), xlim=c(0,HIGH), ylim=c(0,max(unlist(sample.data@DATA[Reg1again.curveID]))), main="IRSLT" ) ##show integral limits abline(v=xaxt.values[min(signal.integral)], lty=2, col="gray") abline(v=xaxt.values[max(signal.integral)], lty=2, col="gray") ##print(length(sample.data@DATA[IRSL.curveID])) lines(xaxt.values,unlist(sample.data@DATA[IRSL.curveID]),col="red") lines(xaxt.values,unlist(sample.data@DATA[Reg1again.curveID[1]]),col="blue") ##legend legend("topright",c("R1 again","IRSL"),lty=c(1,1),col=c("blue","red"), bty="n") mtext(side=3,paste("IRSL/BOSL = ",IRSL_BOSL*100,"%",sep=""), cex=.8*cex.global ) } if(((is.na(IRSL_BOSL)==TRUE) & length(IRSL.curveID)>0) | ((is.na(IRSL_BOSL)==FALSE) & length(IRSL.curveID)>0)){ ##plot only IRSL curve plot(xaxt.values,unlist(sample.data@DATA[IRSL.curveID]), xlab="Time [s]", ylab=paste("IRSL [cts/",HIGH/NPOINTS," s]",sep=""), xlim=c(0,10), ylim=c(0,max(unlist(sample.data@DATA[IRSL.curveID]))), main="IRSL curve (10 s)", col="red", type="l" ) }else{ plot(NA,NA,xlim=c(0,10), ylim=c(0,10), main="IRSL curve") text(5,5,"no IRSL curve detected") } ##========================================================================= ##Plot header if(output.plot.single==TRUE){ mtext(side=3,paste("ALQ Pos. ",i,sep="")) }else{ mtext(side=3,paste("ALQ Pos. ",i,sep=""),outer=TRUE,line=-2.5) } ##Plot footer mtext(side=4,info.measurement,outer=TRUE,line=-1.5,cex=0.6*cex.global, col="blue") ##output on terminal for plot writeLines(paste("\n[Analyse_SAR.OSLdata()] >> Figure for position ",i," produced.",sep="")) ##reset mfrow par(mfrow=c(1,1)) }#endif for output.plot ##preprate output of values ##============================================================================== ##Add LnLxTnTx values to the list if(exists("LnLxTnTx_List")==FALSE){LnLxTnTx_List<-list()} LnLxTnTx_List[[i]]<-LnLxTnTx rm(LnLxTnTx) }else{writeLines(paste("[Analyse_SAR.OSLdata()] >> Position ",i," is not valid and has been omitted!",sep=""))} #end if position checking }#end for loop ##============================================================================## ##OUTPUT OF FUNCTION ##============================================================================## ##get further information from the position used ##this is what you get from the Risoe file readTemp<-unique(sample.data@METADATA[sample.data@METADATA[,"POSITION"]==min(position) & sample.data@METADATA[,"LTYPE"]!="TL","TEMPERATURE"]) cutheat<-unique(sample.data@METADATA[sample.data@METADATA[,"POSITION"]==min(position) & sample.data@METADATA[,"LTYPE"]=="TL","HIGH"]) if(length(cutheat)==0){cutheat=NA} systemID<-unique(sample.data@METADATA[sample.data@METADATA[,"POSITION"]==min(position),"SYSTEMID"]) SARParameters<-data.frame(readTemp=readTemp,cutheat=cutheat,systemID=systemID) return(list(LnLxTnTx=LnLxTnTx_List, RejectionCriteria=RejectionCriteria, SARParameters=SARParameters)) } Luminescence/R/install_DevelopmentVersion.R0000644000176200001440000000751214236146743020605 0ustar liggesusers#' Attempts to install the development version of the 'Luminescence' package #' #' This function is a convenient method for installing the development #' version of the R package 'Luminescence' directly from GitHub. #' #' This function uses [Luminescence::github_branches][Luminescence::GitHub-API] to check #' which development branches of the R package 'Luminescence' are currently #' available on GitHub. The user is then prompted to choose one of the branches #' to be installed. It further checks whether the R package 'devtools' is #' currently installed and available on the system. Finally, it prints R code #' to the console that the user can copy and paste to the R console in order #' to install the desired development version of the package. #' #' #' If `force_install=TRUE` the functions checks if 'devtools' is available #' and then attempts to install the chosen development branch via #' [devtools::remote-reexports]. #' #' @param force_install [logical] (*optional*): #' If `FALSE` (the default) the function produces and prints the required #' code to the console for the user to run manually afterwards. When `TRUE` #' and all requirements are fulfilled (see details) this function attempts to install #' the package itself. #' #' @return #' This function requires user input at the command prompt to choose the #' desired development branch to be installed. The required R code to install #' the package is then printed to the console. #' #' @examples #' #' \dontrun{ #' install_DevelopmentVersion() #' } #' #' @md #' @export install_DevelopmentVersion <- function(force_install = FALSE) { message("\n[install_DevelopmentVersion]\n") # check which branches are currently available # see ?github_branches for GitHub API implementation branches <- github_branches() index <- NULL # let user pick which branch he wants to install while(is.null(index)) { message(paste0("Which development branch do you want to install? \n", paste0(" [", 1:length(branches$BRANCH), "]: ", branches$BRANCH, collapse = "\n"))) message("\n [0]: ") index <- readline() if (index == 0) return(NULL) if (!index %in% seq_len(length(branches$BRANCH))) index <- NULL cat("\n") } # select the correct branch branch <- branches$BRANCH[as.numeric(index)] if (!force_install) { message("----\n", "Are all prerequisites installed? Make sure to have read\n", "https://github.com/R-Lum/Luminescence/blob/master/README.md\n", "----\n") message("Please copy and run the following code in your R command-line:\n") if (!requireNamespace("devtools", quietly = TRUE)) message("install.packages('devtools')") message(branches$INSTALL[as.numeric(index)], "\n") } else { reply <- NULL while(is.null(reply)) { message("Are all prerequisites installed?", " (https://github.com/R-Lum/Luminescence/blob/master/README.md)\n", " [n/N]: No\n", " [y/Y]: Yes\n") reply <- readline() if (reply == "n" || reply == "N") return(NULL) if (reply != "y" && reply != "Y") reply <- NULL } # check if 'devtools' is available and install if not if (!requireNamespace("devtools", quietly = TRUE)) { message("Please install the 'devtools' package first by running the following command:\n", "install.packages('devtools')") return(NULL) } # detach the 'Luminescence' package try(detach(name = "package:Luminescence", unload = TRUE, force = TRUE), silent = TRUE) # try to unload the dynamic library dynLibs <- sapply(.dynLibs(), function(x) x[["path"]] ) try(dyn.unload(dynLibs[grep("Luminescence", dynLibs)]), silent = TRUE) # install the development version devtools::install_github(paste0("r-lum/luminescence@", branch)) } } Luminescence/R/write_RLum2CSV.R0000644000176200001440000002136514464125673015762 0ustar liggesusers#' @title Export RLum-objects to CSV #' #' @description This function exports [RLum-class]-objects to CSV-files using the R function #' [utils::write.table]. All [RLum-class]-objects are supported, but the #' export is lossy, i.e. the pure numerical values are exported only. Information #' that cannot be coerced to a [data.frame] or a [matrix] are discarded as well as #' metadata. #' #' @details However, in combination with the implemented import functions, nearly every #' supported import data format can be exported to CSV-files, this gives a great #' deal of freedom in terms of compatibility with other tools. #' #' **Input is a list of objects** #' #' If the input is a [list] of objects all explicit function arguments can be provided #' as [list]. #' #' @param object [RLum-class] or a [list] of `RLum` objects (**required**): #' objects to be written. Can be a [data.frame] if needed internally. #' #' @param path [character] (*optional*): #' character string naming folder for the output to be written. If nothing #' is provided `path` will be set to the working directory. #' **Note:** this argument is ignored if the the argument `export` is set to `FALSE`. #' #' @param prefix [character] (*with default*): #' optional prefix to name the files. This prefix is valid for all written files #' #' @param export [logical] (*with default*): #' enable or disable the file export. If set to `FALSE` nothing is written to #' the file connection, but a list comprising objects of type [data.frame] and [matrix] #' is returned instead #' #' @param compact [logical] (*with default*): if `TRUE` (the default) the output will be more #' simple but less comprehensive, means not all elements in the objects will be fully broken down. #' This is in particular useful for writing `RLum.Results` objects to CSV-files, such objects #' can be rather complex and not all information are needed in a CSV-file or can be meaningful translated #' to it. #' #' @param ... further arguments that will be passed to the function #' [utils::write.table]. All arguments except the argument `file` are supported #' #' #' @return #' The function returns either a CSV-file (or many of them) or for the #' option `export == FALSE` a list comprising objects of type [data.frame] and [matrix] #' #' #' @section Function version: 0.2.2 #' #' @author #' Sebastian Kreutzer, Geography & Earth Science, Aberystwyth University (United Kingdom) #' #' @seealso [RLum.Analysis-class], [RLum.Data-class], [RLum.Results-class], #' [utils::write.table] #' #' @keywords IO #' #' @examples #' #' ##transform values to a list (and do not write) #' data(ExampleData.BINfileData, envir = environment()) #' object <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data)[[1]] #' write_RLum2CSV(object, export = FALSE) #' #' \dontrun{ #' #' ##create temporary filepath #' ##(for usage replace by own path) #' temp_file <- tempfile(pattern = "output", fileext = ".csv") #' #' ##write CSV-file to working directory #' write_RLum2CSV(temp_file) #' #' } #' #' @md #' @export write_RLum2CSV <- function( object, path = NULL, prefix = "", export = TRUE, compact = TRUE, ... ){ # General tests ------------------------------------------------------------------------------- if(missing(object)){ stop("[write_RLum2CSV()] input object is missing!", call. = FALSE) } # Self-call ----------------------------------------------------------------------------------- ##this option allows to work on a list of RLum-objects if(is.list(object) && !is.data.frame(object)){ ##extent the list of arguments if set ##path path <- rep(list(path), length = length(object)) ##prefix ... create automatic prefix if nothing is provided prefix <- as.list(paste0(prefix[1], "[[",1:length(object),"]]_")) ##export export <- rep(list(export), length = length(object)) ## write list name to object for (i in 1:length(object)) attr(object[[i]], "list_name") <- names(object)[i] ##execute the self-call function temp <- lapply(1:length(object), function(x){ write_RLum2CSV( object = object[[x]], path = path[[x]], prefix = prefix[[x]], export = export[[x]], ... ) }) ##this prevents that we get a list of NULL if(is.null(unlist(temp))){ return(NULL) }else{ return(temp) } } # Integrity tests ----------------------------------------------------------------------------- ##check path ##if NULL condition if(export == TRUE && is.null(path)){ path <- getwd() message(paste0("[write_RLum2CSV()] Path automatically set to: ", path)) } ##non NULL conditon if(export == TRUE && !dir.exists(path)){ stop("[write_RLum2CSV()] Diretory provided via the argument 'path' does not exist!", call. = FALSE) } ## What do we need at the end of the day is a named list of data.frames or matrices we can export ## using the function write.table; the name of the list elements will become the file names if(inherits(object, "RLum")){ if(is(object, "RLum.Analysis") || is(object, "RLum.Data.Curve") || is(object, "RLum.Data.Spectrum") || is(object, "RLum.Data.Image")){ ##extract all elements ... depending on the input if(is(object, "RLum.Analysis")){ ##tricky, we cannot use get_RLum() as the function lapply calls as.list() for an object! object_list <- lapply(object, function(x){get_RLum(x)}) ##change names of the list and produce the right format straight away names(object_list) <- paste0(1:length(object_list),"_",names(object)) } else { ##get object and make list object_list <- list(get_RLum(object)) ##set new name names(object_list) <- paste0("1_",object@recordType) } } else if (is(object, "RLum.Results")){ ##unlist what ever comes, but do not break structures like matrices, numerics and names <- names(object@data) ##get elements object_list <- lapply(object@data, function(e){ ##only run something on the list of it is worth it and pack it in the list if(inherits(e, "matrix") || inherits(e, "numeric") || inherits(e, "data.frame")) return(list(e)) ##unlist the rest until the end if(!compact) return(unlist(e)) ##now we return whatever we have return(e) }) ##now unlist again one level object_list <- unlist(object_list, recursive = FALSE) ##sort out objects we do not like and we cannot procede ... object_list_rm <- vapply(object_list, function(x) { inherits(x, "matrix") || inherits(x, "numeric") || inherits(x, "data.frame") }, vector(mode = "logical", length = 1)) ##remove unwanted objects object_list <- object_list[object_list_rm] ##set warning if(any(!object_list_rm)) warning(paste0("[write_RLum2CSV()] ", length(which(!object_list_rm)), " elements could not be converted to a CSV-structure!"), call. = FALSE) ##adjust the names names(object_list) <- paste0(1:length(object_list),"_",names(object_list)) } else { try(stop("[write_RLum2CSV()] One particular RLum-object is not yet supported! NULL returned!", call. = FALSE)) return(NULL) } } else if (inherits(object, "data.frame")) { object_list <- list(object) if(!is.null(attr(object, "filename"))) filename <- attr(object, "filename") else filename <- "" names(object_list) <- paste0("conv_", attr(object, "list_name"), filename) }else{ stop("[write_RLum2CSV()] Object needs to be a member of the object class RLum!", call. = FALSE) } # Export -------------------------------------------------------------------------------------- if(export){ ##set export settings for write.table export_settings.default <- list( append = FALSE, quote = TRUE, sep = ";", eol = "\n", na = "NA", dec = ".", row.names = FALSE, col.names = FALSE, qmethod = c("escape", "double"), fileEncoding = "" ) ##modify on demand export_settings <- modifyList(x = export_settings.default, val = list(...)) ##write files to file system for(i in 1:length(object_list)){ utils::write.table( x = object_list[[i]], file = paste0(path,"/", prefix, names(object_list)[i],".csv"), append = export_settings$append, quote = export_settings$quote, sep = export_settings$sep, eol = export_settings$eol, na = export_settings$na, dec = export_settings$dec, row.names = export_settings$row.names, col.names = export_settings$col.names, qmethod = export_settings$qmethod, fileEncoding = export_settings$fileEncoding) } }else{ return(object_list) } } Luminescence/R/calc_MinDose.R0000644000176200001440000011451014236146743015544 0ustar liggesusers#' Apply the (un-)logged minimum age model (MAM) after Galbraith et al. (1999) #' to a given De distribution #' #' Function to fit the (un-)logged three or four parameter minimum dose model #' (MAM-3/4) to De data. #' #' **Parameters** #' #' This model has four parameters: #' \tabular{rl}{ #' `gamma`: \tab minimum dose on the log scale \cr #' `mu`: \tab mean of the non-truncated normal distribution \cr #' `sigma`: \tab spread in ages above the minimum \cr #' `p0`: \tab proportion of grains at gamma \cr } #' #' If `par=3` (default) the 3-parameter minimum age model is applied, #' where `gamma=mu`. For `par=4` the 4-parameter model is applied instead. #' #' **(Un-)logged model** #' #' In the original version of the minimum dose model, the basic data are the natural #' logarithms of the De estimates and relative standard errors of the De #' estimates. The value for `sigmab` must be provided as a ratio #' (e.g, 0.2 for 20 \%). This model will be applied if `log = TRUE`. #' #' If `log=FALSE`, the modified un-logged model will be applied instead. This #' has essentially the same form as the original version. `gamma` and #' `sigma` are in Gy and `gamma` becomes the minimum true dose in the #' population. #' **Note** that the un-logged model requires `sigmab` to be in the same #' absolute unit as the provided De values (seconds or Gray). #' #' While the original (logged) version of the minimum dose #' model may be appropriate for most samples (i.e. De distributions), the #' modified (un-logged) version is specially designed for modern-age and young #' samples containing negative, zero or near-zero De estimates (Arnold et al. #' 2009, p. 323). #' #' **Initial values & boundaries** #' #' The log likelihood calculations use the [nlminb] function for box-constrained #' optimisation using PORT routines. Accordingly, initial values for the four #' parameters can be specified via `init.values`. If no values are #' provided for `init.values` reasonable starting values are estimated #' from the input data. If the final estimates of *gamma*, *mu*, #' *sigma* and *p0* are totally off target, consider providing custom #' starting values via `init.values`. #' In contrast to previous versions of this function the boundaries for the #' individual model parameters are no longer required to be explicitly specified. #' If you want to override the default boundary values use the arguments #' `gamma.lower`, `gamma.upper`, `sigma.lower`, `sigma.upper`, `p0.lower`, `p0.upper`, #' `mu.lower` and `mu.upper`. #' #' **Bootstrap** #' #' When `bootstrap=TRUE` the function applies the bootstrapping method as #' described in Wallinga & Cunningham (2012). By default, the minimum age model #' produces 1000 first level and 3000 second level bootstrap replicates #' (actually, the number of second level bootstrap replicates is three times #' the number of first level replicates unless specified otherwise). The #' uncertainty on sigmab is 0.04 by default. These values can be changed by #' using the arguments `bs.M` (first level replicates), `bs.N` #' (second level replicates) and `sigmab.sd` (error on sigmab). With #' `bs.h` the bandwidth of the kernel density estimate can be specified. #' By default, `h` is calculated as #' #' \deqn{h = (2*\sigma_{DE})/\sqrt{n}} #' #' **Multicore support** #' #' This function supports parallel computing and can be activated by `multicore=TRUE`. #' By default, the number of available logical CPU cores is determined #' automatically, but can be changed with `cores`. The multicore support #' is only available when `bootstrap=TRUE` and spawns `n` R instances #' for each core to get MAM estimates for each of the N and M bootstrap #' replicates. Note that this option is highly experimental and may or may not #' work for your machine. Also the performance gain increases for larger number #' of bootstrap replicates. Also note that with each additional core and hence #' R instance and depending on the number of bootstrap replicates the memory #' usage can significantly increase. Make sure that memory is always available, #' otherwise there will be a massive performance hit. #' #' **Likelihood profiles** #' #' The likelihood profiles are generated and plotted by the `bbmle` package. #' The profile likelihood plots look different to ordinary profile likelihood as #' #' "`[...]` the plot method for likelihood profiles displays the square root of #' the the deviance difference (twice the difference in negative log-likelihood from #' the best fit), so it will be V-shaped for cases where the quadratic approximation #' works well `[...]`." (Bolker 2016). #' #' For more details on the profile likelihood #' calculations and plots please see the vignettes of the `bbmle` package #' (also available here: [https://CRAN.R-project.org/package=bbmle]()). #' #' @param data [RLum.Results-class] or [data.frame] (**required**): #' for [data.frame]: two columns with De `(data[ ,1])` and De error `(data[ ,2])`. #' #' @param sigmab [numeric] (**required**): #' additional spread in De values. #' This value represents the expected overdispersion in the data should the sample be #' well-bleached (Cunningham & Walling 2012, p. 100). #' **NOTE**: For the logged model (`log = TRUE`) this value must be #' a fraction, e.g. 0.2 (= 20 \%). If the un-logged model is used (`log = FALSE`), #' sigmab must be provided in the same absolute units of the De values (seconds or Gray). #' See details. #' #' @param log [logical] (*with default*): #' fit the (un-)logged minimum dose model to De data. #' #' @param par [numeric] (*with default*): #' apply the 3- or 4-parameter minimum age model (`par=3` or `par=4`). The MAM-3 is #' used by default. #' #' @param bootstrap [logical] (*with default*): #' apply the recycled bootstrap approach of Cunningham & Wallinga (2012). #' #' @param init.values [numeric] (*optional*): #' a named list with starting values for gamma, sigma, p0 and mu #' (e.g. `list(gamma=100, sigma=1.5, p0=0.1, mu=100)`). If no values are provided reasonable values #' are tried to be estimated from the data. **NOTE** that the initial values must always be given #' in the absolute units. The the logged model is applied (`log = TRUE`), the provided `init.values` #' are automatically log transformed. #' #' @param level [logical] (*with default*): #' the confidence level required (defaults to 0.95). #' #' @param log.output [logical] (*with default*): #' If `TRUE` the console output will also show the logged values of the final parameter estimates #' and confidence intervals (only applicable if `log = TRUE`). #' #' @param plot [logical] (*with default*): #' plot output (`TRUE`/`FALSE`) #' #' @param multicore [logical] (*with default*): #' enable parallel computation of the bootstrap by creating a multicore SNOW cluster. Depending #' on the number of available logical CPU cores this may drastically reduce #' the computation time. Note that this option is highly experimental and may not #' work on all machines. (`TRUE`/`FALSE`) #' #' @param ... (*optional*) further arguments for bootstrapping #' (`bs.M, bs.N, bs.h, sigmab.sd`). See details for their usage. #' Further arguments are #' - `verbose` to de-/activate console output (logical), #' - `debug` for extended console output (logical) and #' - `cores` (integer) to manually specify the number of cores to be used when `multicore=TRUE`. #' #' @return Returns a plot (*optional*) and terminal output. In addition an #' [RLum.Results-class] object is returned containing the #' following elements: #' #' \item{.$summary}{[data.frame] summary of all relevant model results.} #' \item{.$data}{[data.frame] original input data} #' \item{args}{[list] used arguments} #' \item{call}{[call] the function call} #' \item{.$mle}{[mle2] object containing the maximum log likelihood functions for all parameters} #' \item{BIC}{[numeric] BIC score} #' \item{.$confint}{[data.frame] confidence intervals for all parameters} #' \item{.$profile}{[profile.mle2] the log likelihood profiles} #' \item{.$bootstrap}{[list] bootstrap results} #' #' The output should be accessed using the function [get_RLum] #' #' @note #' The default starting values for *gamma*, *mu*, *sigma* #' and *p0* may only be appropriate for some De data sets and may need to #' be changed for other data. This is especially true when the un-logged #' version is applied. \cr #' Also note that all R warning messages are suppressed #' when running this function. If the results seem odd consider re-running the #' model with `debug=TRUE` which provides extended console output and #' forwards all internal warning messages. #' #' @section Function version: 0.4.4 #' #' @author #' Christoph Burow, University of Cologne (Germany) \cr #' Based on a rewritten S script of Rex Galbraith, 2010 \cr #' The bootstrap approach is based on a rewritten MATLAB script of Alastair Cunningham. \cr #' Alastair Cunningham is thanked for his help in implementing and cross-checking the code. #' #' @seealso [calc_CentralDose], [calc_CommonDose], [calc_FiniteMixture], #' [calc_FuchsLang2001], [calc_MaxDose] #' #' @references #' Arnold, L.J., Roberts, R.G., Galbraith, R.F. & DeLong, S.B., #' 2009. A revised burial dose estimation procedure for optical dating of young #' and modern-age sediments. Quaternary Geochronology 4, 306-325. #' #' Galbraith, R.F. & Laslett, G.M., 1993. Statistical models for mixed fission #' track ages. Nuclear Tracks Radiation Measurements 4, 459-470. #' #' Galbraith, R.F., Roberts, R.G., Laslett, G.M., Yoshida, H. & Olley, J.M., #' 1999. Optical dating of single grains of quartz from Jinmium rock shelter, #' northern Australia. Part I: experimental design and statistical models. #' Archaeometry 41, 339-364. #' #' Galbraith, R.F., 2005. Statistics for #' Fission Track Analysis, Chapman & Hall/CRC, Boca Raton. #' #' Galbraith, R.F. & Roberts, R.G., 2012. Statistical aspects of equivalent dose and error #' calculation and display in OSL dating: An overview and some recommendations. #' Quaternary Geochronology 11, 1-27. #' #' Olley, J.M., Roberts, R.G., Yoshida, H., Bowler, J.M., 2006. Single-grain optical dating of grave-infill #' associated with human burials at Lake Mungo, Australia. Quaternary Science #' Reviews 25, 2469-2474. #' #' **Further reading** #' #' Arnold, L.J. & Roberts, R.G., 2009. Stochastic modelling of multi-grain equivalent dose #' (De) distributions: Implications for OSL dating of sediment mixtures. #' Quaternary Geochronology 4, 204-230. #' #' Bolker, B., 2016. Maximum likelihood estimation analysis with the bbmle package. #' In: Bolker, B., R Development Core Team, 2016. bbmle: Tools for General Maximum Likelihood Estimation. #' R package version 1.0.18. [https://CRAN.R-project.org/package=bbmle]() #' #' Bailey, R.M. & Arnold, L.J., 2006. Statistical modelling of single grain quartz De distributions and an #' assessment of procedures for estimating burial dose. Quaternary Science #' Reviews 25, 2475-2502. #' #' Cunningham, A.C. & Wallinga, J., 2012. Realizing the potential of fluvial archives using robust OSL chronologies. #' Quaternary Geochronology 12, 98-106. #' #' Rodnight, H., Duller, G.A.T., Wintle, A.G. & Tooth, S., 2006. Assessing the reproducibility and accuracy #' of optical dating of fluvial deposits. Quaternary Geochronology 1, 109-120. #' #' Rodnight, H., 2008. How many equivalent dose values are needed to #' obtain a reproducible distribution?. Ancient TL 26, 3-10. #' #' #' @examples #' #' ## Load example data #' data(ExampleData.DeValues, envir = environment()) #' #' # (1) Apply the minimum age model with minimum required parameters. #' # By default, this will apply the un-logged 3-parameter MAM. #' calc_MinDose(data = ExampleData.DeValues$CA1, sigmab = 0.1) #' #' \dontrun{ #' # (2) Re-run the model, but save results to a variable and turn #' # plotting of the log-likelihood profiles off. #' mam <- calc_MinDose(data = ExampleData.DeValues$CA1, #' sigmab = 0.1, #' plot = FALSE) #' #' # Show structure of the RLum.Results object #' mam #' #' # Show summary table that contains the most relevant results #' res <- get_RLum(mam, "summary") #' res #' #' # Plot the log likelihood profiles retroactively, because before #' # we set plot = FALSE #' plot_RLum(mam) #' #' # Plot the dose distribution in an abanico plot and draw a line #' # at the minimum dose estimate #' plot_AbanicoPlot(data = ExampleData.DeValues$CA1, #' main = "3-parameter Minimum Age Model", #' line = mam,polygon.col = "none", #' hist = TRUE, #' rug = TRUE, #' summary = c("n", "mean", "mean.weighted", "median", "in.ci"), #' centrality = res$de, #' line.col = "red", #' grid.col = "none", #' line.label = paste0(round(res$de, 1), "\U00B1", #' round(res$de_err, 1), " Gy"), #' bw = 0.1, #' ylim = c(-25, 18), #' summary.pos = "topleft", #' mtext = bquote("Parameters: " ~ #' sigma[b] == .(get_RLum(mam, "args")$sigmab) ~ ", " ~ #' gamma == .(round(log(res$de), 1)) ~ ", " ~ #' sigma == .(round(res$sig, 1)) ~ ", " ~ #' rho == .(round(res$p0, 2)))) #' #' #' #' # (3) Run the minimum age model with bootstrap #' # NOTE: Bootstrapping is computationally intensive #' # (3.1) run the minimum age model with default values for bootstrapping #' calc_MinDose(data = ExampleData.DeValues$CA1, #' sigmab = 0.15, #' bootstrap = TRUE) #' #' # (3.2) Bootstrap control parameters #' mam <- calc_MinDose(data = ExampleData.DeValues$CA1, #' sigmab = 0.15, #' bootstrap = TRUE, #' bs.M = 300, #' bs.N = 500, #' bs.h = 4, #' sigmab.sd = 0.06, #' plot = FALSE) #' #' # Plot the results #' plot_RLum(mam) #' #' # save bootstrap results in a separate variable #' bs <- get_RLum(mam, "bootstrap") #' #' # show structure of the bootstrap results #' str(bs, max.level = 2, give.attr = FALSE) #' #' # print summary of minimum dose and likelihood pairs #' summary(bs$pairs$gamma) #' #' # Show polynomial fits of the bootstrap pairs #' bs$poly.fits$poly.three #' #' # Plot various statistics of the fit using the generic plot() function #' par(mfcol=c(2,2)) #' plot(bs$poly.fits$poly.three, ask = FALSE) #' #' # Show the fitted values of the polynomials #' summary(bs$poly.fits$poly.three$fitted.values) #' } #' #' @md #' @export calc_MinDose <- function( data, sigmab, log = TRUE, par = 3, bootstrap = FALSE, init.values, level = 0.95, log.output = FALSE, plot = TRUE, multicore = FALSE, ... ){ ## ============================================================================## ## CONSISTENCY CHECK OF INPUT DATA ## ============================================================================## if (!missing(data)) { if (!is(data, "data.frame") & !is(data, "RLum.Results")) { stop("[calc_MinDose] Error: 'data' object has to be of type\n 'data.frame' or 'RLum.Results'!") } else { if (is(data, "RLum.Results")) { data <- get_RLum(data, "data") } } } if (any(!complete.cases(data))) { message(paste("\n[calc_MinDose] Warning:\nInput data contained NA/NaN values,", "which were removed prior to calculations!")) data <- data[complete.cases(data), ] } if (!missing(init.values) && length(init.values) != 4) { stop("[calc_MinDose] Error: Please provide initial values for all model parameters. ", "Missing parameter(s): ", paste(setdiff(c("gamma", "sigma", "p0", "mu"), names(init.values)), collapse = ", "), call. = FALSE) } ##============================================================================## ## ... ARGUMENTS ##============================================================================## extraArgs <- list(...) ## check if this function is called by calc_MaxDose() if ("invert" %in% names(extraArgs)) { invert <- extraArgs$invert if (!log) { log <- TRUE # overwrite user choice as max dose model currently only supports the logged version cat(paste("\n[WARNING] The maximum dose model only supports the logged version.", "'log' was automatically changed to TRUE.\n\n")) } } else { invert <- FALSE } ## console output if ("verbose" %in% names(extraArgs)) { verbose <- extraArgs$verbose } else { verbose <- TRUE } ## bootstrap replications # first level bootstrap if ("bs.M" %in% names(extraArgs)) { M <- as.integer(extraArgs$bs.M) } else { M <- 1000 } # second level bootstrap if ("bs.N" %in% names(extraArgs)) { N <- as.integer(extraArgs$bs.N) } else { N <- 3*M } # KDE bandwith if ("bs.h" %in% names(extraArgs)) { h <- extraArgs$bs.h } else { h <- (sd(data[ ,1])/sqrt(length(data[ ,1])))*2 } # standard deviation of sigmab if ("sigmab.sd" %in% names(extraArgs)) { sigmab.sd <- extraArgs$sigmab.sd } else { sigmab.sd <- 0.04 } if ("debug" %in% names(extraArgs)) { debug <- extraArgs$debug } else { debug <- FALSE } if ("cores" %in% names(extraArgs)) { cores <- extraArgs$cores } else { cores <- parallel::detectCores() if (multicore) message(paste("Logical CPU cores detected:", cores)) } ## WARNINGS ---- # if (!debug) # options(warn = -1) ##============================================================================## ## START VALUES ##============================================================================## if (missing(init.values)) { start <- list(gamma = ifelse(log, log(quantile(data[ ,1], probs = 0.25, na.rm = TRUE)), quantile(data[ ,1], probs = 0.25, na.rm = TRUE)), sigma = 1.2, p0 = 0.01, mu = ifelse(log, log(quantile(data[ ,1], probs = 0.25, na.rm = TRUE)), mean(data[ ,1]))) } else { start <- list(gamma = ifelse(log, log(init.values$gamma), init.values$gamma), sigma = ifelse(log, log(init.values$sigma), init.values$sigma), p0 = init.values$p0, mu = ifelse(log, log(init.values$mu), init.values$mu)) } ##============================================================================## ## ESTIMATE BOUNDARY PARAMETERS ##============================================================================## boundaries <- list( # gamma.lower = min(data[ ,1]/10), # gamma.upper = max(data[ ,1]*1.1), # sigma.lower = 0, # sigma.upper = 5, # mu.lower = min(data[ ,1])/10, # mu.upper = max(data[ ,1]*1.1) gamma.lower = -Inf, gamma.upper = Inf, sigma.lower = 0, sigma.upper = Inf, p0.lower = 0, p0.upper = 1, mu.lower = -Inf, mu.upper = Inf ) boundaries <- modifyList(boundaries, list(...)) # combine lower and upper boundary values to vectors if (log) { xlb <- c(ifelse(is.infinite(boundaries$gamma.lower), boundaries$gamma.lower, log(boundaries$gamma.lower)), boundaries$sigma.lower, boundaries$p0.lower) xub <- c(ifelse(is.infinite(boundaries$gamma.upper), boundaries$gamma.upper, log(boundaries$gamma.upper)), boundaries$sigma.upper, boundaries$p0.lower) } else { xlb <- c(boundaries$gamma.lower, boundaries$sigma.lower, boundaries$p0.lower) xub <- c(boundaries$gamma.upper, exp(boundaries$sigma.upper), boundaries$p0.lower) } if (par == 4) { xlb <- c(xlb, ifelse(log, ifelse(is.infinite(boundaries$mu.lower), -Inf, log(boundaries$mu.lower)), boundaries$mu.lower)) xub <- c(xub, ifelse(log, ifelse(is.infinite(boundaries$mu.upper), -Inf, log(boundaries$mu.upper)), boundaries$mu.upper)) } ##============================================================================## ## AUXILLARY FUNCTIONS ##============================================================================## # THIS FUNCTION CALCULATES THE NEGATIVE LOG LIKELIHOOD OF THE DATA Neglik_f <- function(gamma, sigma, p0, mu, data) { # this calculates the negative of the log likelihood of the # data (data) for a given set of parameters (gamma, sigma, p0) # data is a 2x2 matrix of data: De, rel_error (including sigma_b) # recover the data zi <- data[ ,1] si <- data[ ,2] n <- length(zi) # in the MAM-3 gamma and mu are assumed to be equal if (par == 3) mu <- gamma # calculate sigma^2 + seld^2, mu0 and sigma0 s2 <- sigma^2 + si^2 sigma0 <- 1/sqrt(1/sigma^2 + 1/si^2) mu0 <- (mu/sigma^2 + zi/si^2)/(1/sigma^2 + 1/si^2) # calculate the log-likelihood logsqrt2pi <- 0.5*log(2*pi) res0 <- (gamma - mu0)/sigma0 res1 <- (gamma - mu)/sigma lf1i <- log(p0) - log(si) - 0.5*((zi-gamma)/si)^2 - logsqrt2pi lf2i <- log(1-p0) - 0.5*log(s2) - 0.5*(zi-mu)^2/s2 - logsqrt2pi lf2i <- lf2i + log(1-pnorm(res0)) - log(1-pnorm(res1)) llik <- log( exp(lf1i) + exp(lf2i) ) negll <- -sum(llik) return(negll) } # THIS MAXIMIZES THE Neglik_f LIKELIHOOD FUNCTION AND RETURNS AN MLE OBJECT Get_mle <- function(data) { # TODO: PROPER ERROR HANDLING tryCatch({ suppressWarnings( mle <- bbmle::mle2(data = list(data = data), optimizer = "nlminb", lower=c(gamma = boundaries$gamma.lower, sigma = boundaries$sigma.lower, p0 = boundaries$p0.lower, mu = boundaries$mu.lower), upper=c(gamma = boundaries$gamma.upper, sigma = boundaries$sigma.upper, p0 = boundaries$p0.upper, mu = boundaries$mu.upper), minuslogl = Neglik_f, control = list(iter.max = 1000L), start = start) ) }, error = function(e) { stop(paste("Sorry, seems like I encountered an error...:", e), call. = FALSE) }) return(mle) } ##============================================================================## ## MAIN PROGRAM ##============================================================================## # combine errors if (log) { if (invert) { lcd <- log(data[ ,1])*-1 x.offset <- abs(min(lcd)) lcd <- lcd+x.offset } else { lcd <- log(data[ ,1]) } lse <- sqrt((data[ ,2]/data[ ,1])^2 + sigmab^2) } else { lcd <- data[ ,1] lse <- sqrt(data[ ,2]^2 + sigmab^2) } # create new data frame with DE and combined relative error dat <- cbind(lcd, lse) # get the maximum likelihood estimate ests <- Get_mle(dat) # check if any standard errors are NA or NaN coef_err <- suppressWarnings( t(as.data.frame(bbmle::summary(ests)@coef[ ,2])) ) if (debug) print(bbmle::summary(ests)) if (any(is.nan(coef_err))) coef_err[which(is.nan(coef_err))] <- t(as.data.frame(ests@coef))[which(is.nan(coef_err))] / 100 if (any(is.na(coef_err))) coef_err[which(is.na(coef_err))] <- t(as.data.frame(ests@coef))[which(is.na(coef_err))] / 100 if (par == 3) which <- c("gamma", "sigma", "p0") if (par == 4) which <- c("gamma", "sigma", "p0", "mu") # calculate profile log likelihoods prof <- suppressWarnings( bbmle::profile(ests, which = which, std.err = as.vector(coef_err), #try_harder = TRUE, quietly = TRUE, tol.newmin = Inf, skiperrs = TRUE, prof.lower=c(gamma = -Inf, sigma = 0, p0 = 0, mu = -Inf), prof.upper=c(gamma = Inf, sigma = Inf, p0 = 1, mu = Inf) ) ) # Fallback when profile() returns a 'better' fit maxsteps <- 100 cnt <- 1 while (!inherits(prof, "profile.mle2")) { message(paste0("## Trying to find a better fit (", cnt, "/10) ##")) if (maxsteps == 0L) stop(paste("Sorry, but I can't find a converging fit for the profile log-likelihood."), call. = FALSE) prof <- suppressWarnings( bbmle::profile(ests, which = which, std.err = as.vector(coef_err), try_harder = TRUE, quietly = TRUE, maxsteps = maxsteps, tol.newmin = Inf, skiperrs = TRUE, prof.lower=c(gamma = -Inf, sigma = 0, p0 = 0, mu = -Inf), prof.upper=c(gamma = Inf, sigma = Inf, p0 = 1, mu = Inf) ) ) maxsteps <- maxsteps - 10 cnt <- cnt + 1 } ## TODO: reduce the redundant code ## DELETE rows where z = -Inf/Inf prof@profile$gamma <- prof@profile$gamma[which(prof@profile$gamma["z"] != Inf), ] prof@profile$gamma <- prof@profile$gamma[which(prof@profile$gamma["z"] != -Inf), ] prof@profile$sigma <- prof@profile$sigma[which(prof@profile$sigma["z"] != Inf), ] prof@profile$sigma <- prof@profile$sigma[which(prof@profile$sigma["z"] != -Inf), ] prof@profile$p0 <- prof@profile$p0[which(prof@profile$p0["z"] != Inf), ] prof@profile$p0 <- prof@profile$p0[which(prof@profile$p0["z"] != -Inf), ] if (par == 4) { prof@profile$mu <- prof@profile$mu[which(prof@profile$mu["z"] != Inf), ] prof@profile$mu <- prof@profile$mu[which(prof@profile$mu["z"] != -Inf), ] } # calculate Bayesian Information Criterion (BIC) BIC <- BIC(ests) # retrieve results from mle2-object pal <- if (log) { if (invert) { exp((bbmle::coef(ests)[["gamma"]]-x.offset)*-1) } else { exp(bbmle::coef(ests)[["gamma"]]) } } else { bbmle::coef(ests)[["gamma"]] } sig <- bbmle::coef(ests)[["sigma"]] p0end <- bbmle::coef(ests)[["p0"]] if (par == 4) { muend <- ifelse(log, exp(bbmle::coef(ests)[["mu"]]), bbmle::coef(ests)[["mu"]]) } else { muend <- NA } ##============================================================================## ## ERROR CALCULATION #### METHOD 1: follow the instructions of Galbraith & Roberts (2012) #### # "If the likelihood profile is symmetrical about the parameter, an approximate standard error # can be calculated by dividing the length of this interval by 3.92" conf <- suppressWarnings( as.data.frame(bbmle::confint(prof, tol.newmin = Inf, quietly = TRUE, level = level)) ) class(conf[,1]) <- class(conf[,2]) <- "numeric" if (invert) { conf[1, ] <- (conf[1, ]-x.offset)*-1 t <- conf[1,1] conf[1,1] <- conf[1,2] conf[1,2] <- t } gamma_err <- if (log) { (exp(conf["gamma",2])-exp(conf["gamma",1]))/3.92 } else { (conf["gamma",2]-conf["gamma",1])/3.92 } ##============================================================================## ## AGGREGATE RESULTS summary <- data.frame(de=pal, de_err=gamma_err, ci_level = level, "ci_lower"=ifelse(log, exp(conf["gamma",1]), conf["gamma",1]), "ci_upper"=ifelse(log, exp(conf["gamma",2]), conf["gamma",2]), par=par, sig=ifelse(log, exp(sig), sig), p0=p0end, mu=muend, Lmax=-ests@min, BIC=BIC) call <- sys.call() args <- list(log=log, sigmab=sigmab, par = par, bootstrap=bootstrap, init.values=start, log.output = log.output, bs.M=M, bs.N=N, bs.h=h, sigmab.sd=sigmab.sd) ##============================================================================## ## BOOTSTRAP ##============================================================================## if (bootstrap) { ## BOOTSTRAP FUNCTIONS ---- # Function that draws N+M sets of integer values from 1:n and returns # both the indices and frequencies draw_Freq <- function() { f <- R <- matrix(0L, N+M, n) for (i in seq_len(N+M)) { R[i, ] <- sample(x = n, size = n, replace = TRUE) f[i, ] <- tabulate(R, n) } return(list(R = R, freq = f)) } # Function that adds the additional error sigmab to each individual DE error combine_Errors <- function(d, e) { if (log) { d[ ,2] <- sqrt((d[ ,2]/d[ ,1])^2 + e^2) d[ ,1] <- log(d[ ,1]) } else { d[ ,2] <- sqrt(d[ ,2]^2 + e^2) } return(d) } # Function that produces N+M replicates from the original data set using # randomly sampled indices with replacement and adding a randomly drawn # sigmab error create_Replicates <- function(f, s) { d <- apply(f$R, 1, function(x) data[x, ]) r <- mapply(function(x, y) combine_Errors(x, y), d, s, SIMPLIFY = FALSE) return(r) } # Function to extract the estimate of gamma from mle2 objects and converting # it back to the 'normal' scale save_Gamma <- function(d) { if (log) { if (invert) { m <- exp((bbmle::coef(d)[["gamma"]]-x.offset)*-1) } else { m <- exp(bbmle::coef(d)[["gamma"]]) } } else { m <- bbmle::coef(d)[["gamma"]] } return(m) } # Function that takes each of the N replicates and produces a kernel density # estimate of length n. The normalised values are then returned as a matrix # with dimensions [N, n] get_KDE <- function(d) { f <- approx(density(x=d[ ,1], kernel="gaussian", bw = h), xout = d[ ,1]) pStarTheta <- as.vector(f$y / sum(f$y)) x <- matrix(t(pStarTheta/(1/n)), N, n, byrow = TRUE) return(x) } # Function that calculates the product term of the recycled bootstrap get_ProductTerm <- function(Pmat, b2Pmatrix) { prodterm <- apply(Pmat^b2Pmatrix$freq[1:N, ], 1, prod) return(prodterm) } # Function that calculates the pseudo likelihoods for M replicates and # returns the dose-likelihood pairs make_Pairs <- function(theta, b2mamvec, prodterm) { pairs <- matrix(0, M, 2) for (i in seq_len(M)) { thetavec <- matrix(theta[i], N, 1) kdthis <- (thetavec-b2mamvec)/h kd1 <- dnorm(kdthis) kd2 <- kd1*prodterm[[i]] kd <- sum(kd2, na.rm = TRUE) likelihood <- (1/(N*h))*kd pairs[i, ] <- c(theta[i], likelihood) } return(pairs) } ## START BOOTSTRAP ---- msg <- sprintf(paste("\n [calc_MinDose] \n\nRecycled Bootstrap", "\n\nParameters:", "\n M = %d", "\n N = %d", "\n sigmab = %.2f \U00B1 %.2f", "\n h = %.2f", "\n\n Creating %d bootstrap replicates..."), M, N, sigmab, sigmab.sd, h, N+M) message(msg) n <- length(data[ ,1]) # Draw N+M samples of a normale distributed sigmab sigmab <- rnorm(N + M, sigmab, sigmab.sd) # Draw N+M random indices and their frequencies b2Pmatrix <- draw_Freq() # Finally draw N+M bootstrap replicates replicates <- create_Replicates(b2Pmatrix, sigmab) # MULTICORE: The call to 'Get_mle' is the bottleneck of the function. # Using multiple CPU cores can reduce the computation cost, but may # not work for all machines. if (multicore) { message(paste("\n Spawning", cores, "instances of R for parallel computation. This may take a few seconds...")) cl <- parallel::makeCluster(cores) message("\n Done! Applying the model to all replicates. This may take a while...") mle <- parallel::parLapply(cl, replicates, Get_mle) parallel::stopCluster(cl) } else { message("\n Applying the model to all replicates. This may take a while...") mle <- lapply(replicates, Get_mle) } # Final bootstrap calculations message("\n Calculating the likelihoods...") # Save 2nd- and 1st-level bootstrap results (i.e. estimates of gamma) b2mamvec <- as.matrix(sapply(mle[1:N], save_Gamma, simplify = TRUE)) theta <- sapply(mle[c(N+1):c(N+M)], save_Gamma) # Calculate the probability/pseudo-likelihood Pmat <- lapply(replicates[c(N+1):c(N+M)], get_KDE) prodterm <- lapply(Pmat, get_ProductTerm, b2Pmatrix) # Save the bootstrap results as dose-likelihood pairs pairs <- make_Pairs(theta, b2mamvec, prodterm) ## --------- FIT POLYNOMIALS -------------- ## message("\n Fit curves to dose-likelihood pairs...") # polynomial fits of increasing degrees ## if the input values are too close to zero, we may get ## Inf values >>> we remove them here with a warning if(any(is.infinite(pairs))){ inf_count <- length(which(is.infinite(pairs[,2])))/nrow(pairs) pairs <- pairs[!is.infinite(pairs[,2]),] warning( paste0("[calc_MinDose()] Inf values produced by bootstrapping removed for LOcal polynominal regrESSion fitting (loess)!\n The removed values represent ",round(inf_count * 100,2)," % of the total dataset. This message usually indicates that your values are close to 0."), call. = FALSE) } poly.three <- lm(pairs[ ,2] ~ poly(pairs[ ,1], degree = 3, raw = TRUE)) poly.four <- lm(pairs[ ,2] ~ poly(pairs[ ,1], degree = 4, raw = TRUE)) poly.five <- lm(pairs[ ,2] ~ poly(pairs[ ,1], degree = 5, raw = TRUE)) poly.six <- lm(pairs[ ,2] ~ poly(pairs[ ,1], degree = 6, raw = TRUE)) ## --------- FIT LOESS -------------- ## # Polynomials are probably not reasonable and often suffer badly from # overfitting, especially towards the margins of the fitted data. In this # particular use case polynomials may suggest a multimodal likelihood # distribution where actually none is given. The non-parametric # LOESS (LOcal polynomial regrESSion) often yields better results than # standard polynomials. loess <- loess(pairs[ ,2] ~ pairs[ ,1]) }#EndOf::Bootstrap ##============================================================================## ## CONSOLE PRINT ##============================================================================## if (verbose) { if (!bootstrap) { cat("\n----------- meta data -----------\n") print(data.frame(n=length(data[ ,1]), par=par, sigmab=sigmab, logged=log, Lmax=-ests@min, BIC=BIC, row.names = "")) cat("\n--- final parameter estimates ---\n") tmp <- round(data.frame( gamma=ifelse(!invert, ifelse(log, exp(bbmle::coef(ests)[["gamma"]]), bbmle::coef(ests)[["gamma"]]), ifelse(log, exp((bbmle::coef(ests)[["gamma"]]-x.offset)*-1),(bbmle::coef(ests)[["gamma"]]-x.offset)*-1) ), sigma=ifelse(log, exp(bbmle::coef(ests)[["sigma"]]), bbmle::coef(ests)[["sigma"]]), p0=bbmle::coef(ests)[["p0"]], mu=ifelse(par==4, muend, 0), row.names="", check.names = FALSE), 2) if (log && log.output) { tmp$`log(gamma)` = round(log(tmp$gamma),2) tmp$`log(sigma)` = round(log(tmp$sigma),2) if (par == 4) tmp$`log(mu)` = round(log(tmp$mu),2) } print(tmp) cat("\n------ confidence intervals -----\n") conf_print <- round(conf, 2) if (log) { logged_rows <- row.names(conf_print) != "p0" conf_print[logged_rows, ] <- exp(conf_print[logged_rows, ]) conf_print <- round(conf_print, 2) if (log.output) { conf_tmp <- round(conf, 2) conf_tmp[which(rownames(conf_tmp) == "p0"), ] <- "-" conf_print <- cbind(round(conf_print, 2), setNames(conf_tmp, names(conf_tmp))) conf_print <- rbind( setNames(data.frame("", "", "(logged)", "(logged)", row.names = "", stringsAsFactors = FALSE), names(conf_print)), conf_print) } } print(conf_print) cat("\n------ De (asymmetric error) -----\n") print(round(data.frame(De=pal, "lower"=ifelse(log, exp(conf["gamma",1]), conf["gamma",1]), "upper"=ifelse(log, exp(conf["gamma",2]), conf["gamma",2]), row.names=""), 2)) cat("\n------ De (symmetric error) -----\n") print(round(data.frame(De=pal, error=gamma_err, row.names=""), 2)) } else if (bootstrap) { message("\n Finished!") } } ##============================================================================## ## RETURN VALUES ##============================================================================## if (invert) prof@profile$gamma$par.vals[ ,"gamma"] <- rev((prof@profile$gamma$par.vals[ ,"gamma"] - x.offset)*-1) if (!bootstrap) pairs <- poly.three <- poly.four <- poly.five <- poly.six <- loess <- NULL newRLumResults.calc_MinDose <- set_RLum( class = "RLum.Results", originator = "calc_MinDose", data = list(summary = summary, data = data, args = args, call = call, mle = ests, BIC = BIC, confint = conf, profile = prof, bootstrap = list( pairs = list(gamma=pairs), poly.fits = list(poly.three = poly.three, poly.four = poly.four, poly.five = poly.five, poly.six = poly.six), loess.fit = loess))) ##=========## ## PLOTTING if (plot) try(plot_RLum.Results(newRLumResults.calc_MinDose, ...)) # if (!debug) # options(warn = 0) if (!is.na(summary$mu) && !is.na(summary$de)) { if (log(summary$de) > summary$mu) warning("Gamma is larger than mu. Consider re-running the model", " with new boundary values (see details '?calc_MinDose').", call. = FALSE) } invisible(newRLumResults.calc_MinDose) } Luminescence/R/write_R2BIN.R0000644000176200001440000011517314264017373015255 0ustar liggesusers#' @title Export Risoe.BINfileData into Risø BIN/BINX-file #' #' @description Exports a `Risoe.BINfileData` object in a `*.bin` or `*.binx` file that can be #' opened by the Analyst software or other Risø software. #' #' @details #' The structure of the exported binary data follows the data structure #' published in the Appendices of the *Analyst* manual p. 42. #' #' If #' `LTYPE`, `DTYPE` and `LIGHTSOURCE` are not of type #' [character], no transformation into numeric values is done. #' #' @param object [Risoe.BINfileData-class] (**required**): #' input object to be stored in a bin file. #' #' @param file [character] (**required**): #' file name and path of the output file #' #' - `[WIN]`: `write_R2BIN(object, "C:/Desktop/test.bin")` #' - `[MAC/LINUX]`: `write_R2BIN("/User/test/Desktop/test.bin")` #' #' @param version [character] (*optional*): #' version number for the output file. If no value is provided the highest #' version number from the [Risoe.BINfileData-class] is taken automatically. #' #' **Note:** #' This argument can be used to convert BIN-file versions. #' #' @param compatibility.mode [logical] (*with default*): #' this option recalculates the position values if necessary and set the max. #' value to 48. The old position number is appended as comment (e.g., 'OP: 70). #' This option accounts for potential compatibility problems with the Analyst software. #' It further limits the maximum number of points per curve to 9,999. If a curve contains more #' data the curve data got binned using the smallest possible bin width. #' #' @param txtProgressBar [logical] (*with default*): #' enables or disables [txtProgressBar]. #' #' @return Write a binary file. #' #' @note #' The function just roughly checks the data structures. The validity of #' the output data depends on the user. #' #' The validity of the file path is not further checked. BIN-file conversions #' using the argument `version` may be a lossy conversion, depending on the #' chosen input and output data (e.g., conversion from version 08 to 07 to 06 to 05 to 04 or 03). #' #' **Warning** #' #' Although the coding was done carefully it seems that the BIN/BINX-files #' produced by Risø DA 15/20 TL/OSL readers slightly differ on the byte level. #' No obvious differences are observed in the METADATA, however, the #' BIN/BINX-file may not fully compatible, at least not similar to the once #' directly produced by the Risø readers! #' #' @section Function version: 0.5.2 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @note #' ROI definitions (introduced in BIN-file version 8) are not supported! #' There are furthermore ignored by the function [read_BIN2R]. #' #' @seealso [read_BIN2R], [Risoe.BINfileData-class], [writeBin] #' #' @references #' DTU Nutech, 2016. The Sequence Editor, Users Manual, February, 2016. #' [https://www.fysik.dtu.dk]() #' #' @keywords IO #' #' @examples #' ##load exampled dataset #' file <- system.file("extdata/BINfile_V8.binx", package = "Luminescence") #' temp <- read_BIN2R(file) #' #' ##create temporary file path #' ##(for usage replace by own path) #' temp_file <- tempfile(pattern = "output", fileext = ".binx") #' #' ##export to temporary file path #' write_R2BIN(temp, file = temp_file) #' #' @md #' @export write_R2BIN <- function( object, file, version, compatibility.mode = FALSE, txtProgressBar = TRUE ){ # Config ------------------------------------------------------------------ ##set supported BIN format version VERSION.supported <- as.raw(c(3, 4, 5, 6, 7, 8)) # Check integrity --------------------------------------------------------- ##check if input object is of type 'Risoe.BINfileData' if(is(object, "Risoe.BINfileData") == FALSE){ stop("[write_R2BIN()] Input object is not of type Risoe.BINfileData!", call. = FALSE) } ##check if it fulfils the latest definition ... if(ncol(object@METADATA) != ncol(set_Risoe.BINfileData()@METADATA)){ stop("[write_R2BIN()] The number of columns in your slot 'METADATA' does not fit to the latest definition. What you are probably trying to do is to export a Risoe.BINfileData object you generated by your own or you imported with an old package version some time ago. Please re-import the BIN-file using the function read_BIN2R().", call. = FALSE) } ##check if input file is of type 'character' if(is(file, "character") == FALSE){ stop("[write_R2BIN()] argument 'file' has to be of type character!", call. = FALSE) } # Check Risoe.BINfileData Struture ---------------------------------------- ##check wether the BIN-file DATA slot contains more than 9999 records; needs to be run all the time temp_check <- vapply(object@DATA, function(x){ if(length(x) > 9999){ TRUE }else{ FALSE } }, FUN.VALUE = logical(1)) ##force compatibility if(compatibility.mode && any(temp_check)){ ##drop warning warning("[write_R2BIN()] Compatibility mode selected: Some data sets are longer than 9,999 points and will be binned!", call. = FALSE) ##BIN data to reduce amount of data if the BIN-file is too long object@DATA <- lapply(object@DATA, function(x){ if(length(x) > 9999){ ##we want to have a minimum binning (smallest number possible) bin_width <- ceiling(length(x)/9999) ##it should be symatric, thus, remove values if((length(x)/bin_width)%%2 != 0){ x <- x[-length(x)] } ##create matrix and return colSums(matrix(x, nrow = bin_width)) }else{ x } }) ##reset temp_check temp_check <- FALSE ##get new number of points temp_NPOINTS <- sapply(object@DATA, length) ##correct LENGTH object@METADATA[["LENGTH"]] <- object@METADATA[["LENGTH"]] - (4 * object@METADATA[["NPOINTS"]]) + (temp_NPOINTS * 4) ##correct PREVIOUS object@METADATA[["PREVIOUS"]] <- c(0,object@METADATA[["LENGTH"]][2:length(object@METADATA[["LENGTH"]])]) ##correct NPOINTS object@METADATA[["NPOINTS"]] <- temp_NPOINTS ##write comment object@METADATA[["COMMENT"]] <- paste(object@METADATA[["COMMENT"]], " - binned") } if(any(temp_check)) stop(paste("[write_R2BIN()]", length(which(temp_check)), " out of ",length(temp_check), "records contain more than 9,999 data points. This violates the BIN/BINX-file definition!"), call. = FALSE) ##remove rm(temp_check) ##VERSION ##If missing version argument set to the highest value if(missing(version)){ version <- as.raw(max(as.numeric(object@METADATA[,"VERSION"]))) version.original <- version }else{ version.original <- as.raw(max(as.numeric(object@METADATA[,"VERSION"]))) version <- as.raw(version) object@METADATA[["VERSION"]] <- version ##Furthermore, entries length needed to be recalculated if(version.original != version){ ##stepping decision header.stepping <- switch( EXPR = as.character(version), "08" = 507, "07" = 447, "06" = 447, "05" = 423, "04" = 272, "03" = 272) object@METADATA[,"LENGTH"] <- vapply(1:nrow(object@METADATA), function(x){ header.stepping + 4 * object@METADATA[x,"NPOINTS"] }, numeric(1)) object@METADATA[,"PREVIOUS"] <- vapply(1:nrow(object@METADATA), function(x){ if(x == 1) 0 else header.stepping + 4 * object@METADATA[x-1,"NPOINTS"] }, numeric(1)) } } ##check whether this file can be exported without problems due to the latest specifications if(ncol(object@METADATA) != 80){ stop("[write_R2BIN()] Your Risoe.BINfileData object seems not to be compatible with the latest specification of this S4-class object. You are probably trying to export a Risoe.BINfileData from your workspace you produced manually or with an old version.", call. = FALSE) } ##Check if the BINfile object contains of unsupported versions if((as.raw(object@METADATA[1,"VERSION"]) %in% VERSION.supported) == FALSE || version %in% VERSION.supported == FALSE){ ##show error message error.text <- paste("[write_R2BIN()] Writing BIN-files in format version (", object@METADATA[1,"VERSION"],") is currently not supported! Supported version numbers are: ", paste(VERSION.supported,collapse=", "),".",sep="") stop(error.text) } ##CHECK file name for version == 06 it has to be *.binx and correct for it if(version == 05 | version == 06 | version == 07 | version == 08){ ##grep file ending temp.file.name <- unlist(strsplit(file, "[:.:]")) ##*.bin? >> correct to binx if(temp.file.name[length(temp.file.name)]=="bin"){ temp.file.name[length(temp.file.name)] <- "binx" file <- paste(temp.file.name, collapse=".") } } ##SEQUENCE if (suppressWarnings(max(nchar(as.character(object@METADATA[,"SEQUENCE"]), type = "bytes"), na.rm = TRUE)) > 8) { stop("[write_R2BIN()] Value in 'SEQUENCE' exceed storage limit!") } ##USER if (suppressWarnings(max(nchar(as.character(object@METADATA[,"USER"]), type = "bytes"), na.rm = TRUE)) > 8) { stop("[write_R2BIN()] 'USER' exceed storage limit!") } ##SAMPLE if (suppressWarnings(max(nchar(as.character(object@METADATA[,"SAMPLE"]), type = "bytes"), na.rm = TRUE)) > 20) { stop("[write_R2BIN()] 'SAMPLE' exceed storage limit!") } ##enables compatibility to the Analyst as the the max value for POSITION becomes 48 if(compatibility.mode){ ##just do if position values > 48 if(max(object@METADATA[,"POSITION"])>48){ ##grep relevant IDs temp.POSITION48.id <- which(object@METADATA[,"POSITION"]>48) ##find unique values temp.POSITION48.unique <- unique(object@METADATA[temp.POSITION48.id,"POSITION"]) ##set translation vector starting from 1 and ending at 48 temp.POSITION48.new <- rep_len(1:48, length.out = length(temp.POSITION48.unique)) ##recaluate POSITION and update comment for(i in 1:length(temp.POSITION48.unique)){ object@METADATA[object@METADATA[,"POSITION"] == temp.POSITION48.unique[i],"COMMENT"] <- paste0(object@METADATA[object@METADATA[,"POSITION"] == temp.POSITION48.unique[i],"COMMENT"], "OP:",object@METADATA[object@METADATA[,"POSITION"] == temp.POSITION48.unique[i],"POSITION"]) object@METADATA[object@METADATA[,"POSITION"] == temp.POSITION48.unique[i],"POSITION"] <- temp.POSITION48.new[i] } } } ##COMMENT if(max(nchar(as.character(object@METADATA[,"COMMENT"]), type="bytes"))>80){ stop("[write_R2BIN()] 'COMMENT' exceeds storage limit!", call. = FALSE) } # Translation Matrices ----------------------------------------------------- ##LTYPE LTYPE.TranslationMatrix <- matrix(NA, nrow=14, ncol=2) LTYPE.TranslationMatrix[,1] <- 0:13 LTYPE.TranslationMatrix[,2] <- c( "TL", "OSL", "IRSL", "M-IR", "M-VIS", "TOL", "TRPOSL", "RIR", "RBR", "USER", "POSL", "SGOSL", "RL", "XRF") ##DTYPE DTYPE.TranslationMatrix <- matrix(NA, nrow=8, ncol=2) DTYPE.TranslationMatrix[,1] <- 0:7 DTYPE.TranslationMatrix[,2] <- c("Natural","N+dose","Bleach", "Bleach+dose","Natural (Bleach)", "N+dose (Bleach)","Dose","Background") ##LIGHTSOURCE LIGHTSOURCE.TranslationMatrix <- matrix(NA, nrow=8, ncol=2) LIGHTSOURCE.TranslationMatrix[,1] <- 0:7 LIGHTSOURCE.TranslationMatrix[,2] <- c( "None", "Lamp", "IR diodes/IR Laser", "Calibration LED", "Blue Diodes", "White light", "Green laser (single grain)", "IR laser (single grain)" ) ##TRANSLATE VALUES IN METADATA ##LTYPE if(is(object@METADATA[1,"LTYPE"], "character") == TRUE | is(object@METADATA[1,"LTYPE"], "factor") == TRUE){ object@METADATA[,"LTYPE"]<- sapply(1:length(object@METADATA[,"LTYPE"]),function(x){ as.integer(LTYPE.TranslationMatrix[object@METADATA[x,"LTYPE"]==LTYPE.TranslationMatrix[,2],1]) }) } ##DTYPE if(is(object@METADATA[1,"DTYPE"], "character") == TRUE | is(object@METADATA[1,"DTYPE"], "factor") == TRUE){ object@METADATA[,"DTYPE"]<- sapply(1:length(object@METADATA[,"DTYPE"]),function(x){ as.integer(DTYPE.TranslationMatrix[object@METADATA[x,"DTYPE"]==DTYPE.TranslationMatrix[,2],1]) }) } ##LIGHTSOURCE if(is(object@METADATA[1,"LIGHTSOURCE"], "character") == TRUE | is(object@METADATA[1,"LIGHTSOURCE"], "factor") == TRUE){ object@METADATA[,"LIGHTSOURCE"]<- sapply(1:length(object@METADATA[,"LIGHTSOURCE"]),function(x){ as.integer(LIGHTSOURCE.TranslationMatrix[ object@METADATA[x,"LIGHTSOURCE"]==LIGHTSOURCE.TranslationMatrix[,2],1]) })} ##TIME object@METADATA[,"TIME"] <- vapply(1:length(object@METADATA[["TIME"]]),function(x){ if(is.na(object@METADATA[["TIME"]][x])){ "000000" }else{ as.character(gsub(":","",object@METADATA[["TIME"]][x])) } }, character(1)) ##TAG and SEL ##in TAG information on the SEL are storred, here the values are copied to TAG ##before export object@METADATA[,"TAG"] <- ifelse(object@METADATA[,"SEL"] == TRUE, 1, 0) # SET FILE AND VALUES ----------------------------------------------------- con<-file(file, "wb") ##get records n.records <- length(object@METADATA[,"ID"]) ##output message(paste0("[write_R2BIN()]\n\t >> ",file)) ##set progressbar if(txtProgressBar) pb <- txtProgressBar(min=0,max=n.records, char="=", style=3) # LOOP ------------------------------------------------------------------- ID <- 1 if(version == 03 || version == 04){ ## version 03 and 04 ##start loop for export BIN data while(ID<=n.records) { ##VERSION writeBin(as.raw(object@METADATA[ID,"VERSION"]), con, size = 1, endian="little") ##stepping writeBin(raw(length=1), con, size = 1, endian="little") ##LENGTH, PREVIOUS, NPOINTS writeBin(c(as.integer(object@METADATA[ID,"LENGTH"]), as.integer(object@METADATA[ID,"PREVIOUS"]), as.integer(object@METADATA[ID,"NPOINTS"])), con, size = 2, endian="little") ##LTYPE writeBin(object@METADATA[ID,"LTYPE"], con, size = 1, endian="little") ##LOW, HIGH, RATE writeBin(c(as.double(object@METADATA[ID,"LOW"]), as.double(object@METADATA[ID,"HIGH"]), as.double(object@METADATA[ID,"RATE"])), con, size = 4, endian="little") ##TEMPERATURE, XCOORD, YCOORD, TOLDELAY; TOLON, TOLOFF writeBin(c(as.integer(object@METADATA[ID,"TEMPERATURE"]), as.integer(object@METADATA[ID,"XCOORD"]), as.integer(object@METADATA[ID,"YCOORD"]), as.integer(object@METADATA[ID,"TOLDELAY"]), as.integer(object@METADATA[ID,"TOLON"]), as.integer(object@METADATA[ID,"TOLOFF"])), con, size = 2, endian="little") ##POSITION, RUN writeBin(c(as.integer(object@METADATA[ID,"POSITION"]), as.integer(object@METADATA[ID,"RUN"])), con, size = 1, endian="little") ##TIME TIME_SIZE <- nchar(object@METADATA[ID,"TIME"]) writeBin(as.integer(TIME_SIZE), con, size = 1, endian="little") writeChar(object@METADATA[ID,"TIME"], con, nchars =TIME_SIZE, useBytes=TRUE, eos = NULL) if(6-TIME_SIZE>0){ writeBin(raw(length = c(6-TIME_SIZE)), con, size = 1, endian="little") } ##DATE writeBin(as.integer(6), con, size = 1 , endian="little") suppressWarnings(writeChar(as.character(object@METADATA[ID,"DATE"]), con, nchars = 6, useBytes=TRUE, eos = NULL)) ##SEQUENCE ##count number of characters SEQUENCE_SIZE <- as.integer( nchar(as.character(object@METADATA[["SEQUENCE"]][ID]), type = "bytes", keepNA = FALSE)) writeBin(SEQUENCE_SIZE, con, size = 1, endian="little") writeChar(as.character(object@METADATA[ID,"SEQUENCE"]), con, nchars = SEQUENCE_SIZE, useBytes=TRUE, eos = NULL) ##stepping if(8-SEQUENCE_SIZE>0){ writeBin(raw(length = (8-SEQUENCE_SIZE)), con, size = 1, endian="little") } ##USER USER_SIZE <- as.integer(nchar(as.character(object@METADATA[ID,"USER"]), type="bytes")) writeBin(USER_SIZE, con, size = 1, endian="little") writeChar(as.character(object@METADATA[ID,"USER"]), con, nchars = USER_SIZE, useBytes=TRUE, eos = NULL) ##stepping if(8-USER_SIZE>0){ writeBin(raw(length = (8-USER_SIZE)), con, size = 1, endian="little") } ##DTYPE writeBin(object@METADATA[ID,"DTYPE"], con, size = 1, endian="little") ##IRR_TIME writeBin(as.double(object@METADATA[ID,"IRR_TIME"]), con, size = 4, endian="little") ##IRR_TYPE, IRR_UNIT writeBin(c(object@METADATA[ID,"IRR_TYPE"], object@METADATA[ID,"IRR_UNIT"]), con, size = 1, endian="little") ##BL_TIME writeBin(as.double(object@METADATA[ID,"BL_TIME"]), con, size = 4, endian="little") ##BL_UNIT writeBin(as.integer(object@METADATA[ID,"BL_UNIT"]), con, size = 1, endian="little") ##AN_TEMP, AN_TIME, NORM1, NORM2, NORM2, BG writeBin(c(as.double(object@METADATA[ID,"AN_TEMP"]), as.double(object@METADATA[ID,"AN_TIME"]), as.double(object@METADATA[ID,"NORM1"]), as.double(object@METADATA[ID,"NORM2"]), as.double(object@METADATA[ID,"NORM3"]), as.double(object@METADATA[ID,"BG"])), con, size = 4, endian="little") ##SHIFT writeBin(as.integer(object@METADATA[ID,"SHIFT"]), con, size = 2, endian="little") ##SAMPLE SAMPLE_SIZE <- as.integer(nchar(as.character(object@METADATA[ID,"SAMPLE"]), type="bytes")) ##avoid problems with empty sample names if(SAMPLE_SIZE == 0){ SAMPLE_SIZE <- as.integer(2) object@METADATA[ID,"SAMPLE"] <- " " } writeBin(SAMPLE_SIZE, con, size = 1, endian="little") writeChar(as.character(object@METADATA[ID,"SAMPLE"]), con, nchars = SAMPLE_SIZE, useBytes=TRUE, eos = NULL) if((20-SAMPLE_SIZE)>0){ writeBin(raw(length = (20-SAMPLE_SIZE)), con, size = 1, endian="little") } ##COMMENT COMMENT_SIZE <- as.integer(nchar(as.character(object@METADATA[ID,"COMMENT"]), type="bytes")) ##avoid problems with empty comments if(COMMENT_SIZE == 0){ COMMENT_SIZE <- as.integer(2) object@METADATA[ID,"COMMENT"] <- " " } writeBin(COMMENT_SIZE, con, size = 1, endian="little") suppressWarnings(writeChar(as.character(object@METADATA[ID,"COMMENT"]), con, nchars = COMMENT_SIZE, useBytes=TRUE, eos = NULL)) if((80-COMMENT_SIZE)>0){ writeBin(raw(length = c(80-COMMENT_SIZE)), con, size = 1, endian="little") } ##LIGHTSOURCE, SET, TAG writeBin(c(as.integer(object@METADATA[ID,"LIGHTSOURCE"]), as.integer(object@METADATA[ID,"SET"]), as.integer(object@METADATA[ID,"TAG"])), con, size = 1, endian="little") ##GRAIN writeBin(as.integer(object@METADATA[ID,"GRAIN"]), con, size = 2, endian="little") ##LPOWER writeBin(as.double(object@METADATA[ID,"LPOWER"]), con, size = 4, endian="little") ##SYSTEMID writeBin(as.integer(object@METADATA[ID,"SYSTEMID"]), con, size = 2, endian="little") ##Further distinction needed to fully support format version 03 and 04 separately if(version == 03){ ##RESERVED 1 if(length(object@.RESERVED) == 0 || version.original != version){ writeBin(raw(length=36), con, size = 1, endian="little") }else{ writeBin(object = object@.RESERVED[[ID]][[1]], con, size = 1, endian="little") } ##ONTIME, OFFTIME writeBin(c(as.integer(object@METADATA[ID,"ONTIME"]), as.integer(object@METADATA[ID,"OFFTIME"])), con, size = 4, endian="little") ##GATE_ENABLED writeBin(as.integer(object@METADATA[ID,"GATE_ENABLED"]), con, size = 1, endian="little") ##GATE_START, GATE_STOP writeBin(c(as.integer(object@METADATA[ID,"GATE_START"]), as.integer(object@METADATA[ID,"GATE_STOP"])), con, size = 4, endian="little") ##RESERVED 2 if(length(object@.RESERVED) == 0 || version.original != version){ writeBin(raw(length=1), con, size = 1, endian="little") }else{ writeBin(object@.RESERVED[[ID]][[2]], con, size = 1, endian="little") } } else { ##version 04 ##RESERVED 1 if(length(object@.RESERVED) == 0 || version.original != version){ writeBin(raw(length=20), con, size = 1, endian="little") } else{ writeBin(object@.RESERVED[[ID]][[1]], con, size = 1, endian="little") } ##CURVENO writeBin(as.integer(object@METADATA[ID,"CURVENO"]), con, size = 2, endian="little") ##TIMETICK writeBin(c(as.double(object@METADATA[ID,"TIMETICK"])), con, size = 4, endian="little") ##ONTIME, STIMPERIOD writeBin(c(as.integer(object@METADATA[ID,"ONTIME"]), as.integer(object@METADATA[ID,"STIMPERIOD"])), con, size = 4, endian="little") ##GATE_ENABLED writeBin(as.integer(object@METADATA[ID,"GATE_ENABLED"]), con, size = 1, endian="little") ##GATE_START, GATE_STOP writeBin(c(as.integer(object@METADATA[ID,"GATE_START"]), as.integer(object@METADATA[ID,"GATE_STOP"])), con, size = 4, endian="little") ##PTENABLED writeBin(as.integer(object@METADATA[ID,"PTENABLED"]), con, size = 1, endian="little") ##RESERVED 2 if(length(object@.RESERVED) == 0 || version.original != version){ writeBin(raw(length=10), con, size = 1, endian="little") } else { writeBin(object@.RESERVED[[ID]][[2]], con, size = 1, endian="little") } } ##DPOINTS writeBin(as.integer(unlist(object@DATA[ID])), con, size = 4, endian="little") #SET UNIQUE ID ID <- ID + 1 ##update progress bar if(txtProgressBar) setTxtProgressBar(pb, ID) } } ## ==================================================== ## version > 06 if(version == 05 | version == 06 | version == 07 | version == 08){ ##start loop for export BIN data while(ID<=n.records) { ##VERSION writeBin(as.raw(object@METADATA[ID,"VERSION"]), con, size = 1, endian="little") ##stepping writeBin(raw(length=1), con, size = 1, endian="little") ##LENGTH, PREVIOUS, NPOINTS writeBin(c(as.integer(object@METADATA[ID,"LENGTH"]), as.integer(object@METADATA[ID,"PREVIOUS"]), as.integer(object@METADATA[ID,"NPOINTS"])), con, size = 4, endian="little") if(version == 08){ writeBin(as.integer(object@METADATA[ID,"RECTYPE"]), con, size = 1, endian="little") } ##RUN, SET, POSITION, GRAINNUMBER, CURVENO, XCOORD, YCOORD writeBin(c(as.integer(object@METADATA[ID,"RUN"]), as.integer(object@METADATA[ID,"SET"]), as.integer(object@METADATA[ID,"POSITION"]), as.integer(object@METADATA[ID,"GRAINNUMBER"]), as.integer(object@METADATA[ID,"CURVENO"]), as.integer(object@METADATA[ID,"XCOORD"]), as.integer(object@METADATA[ID,"YCOORD"])), con, size = 2, endian="little") ##SAMPLE SAMPLE_SIZE <- as.integer(nchar(as.character(object@METADATA[ID,"SAMPLE"]), type="bytes")) ##avoid problems with empty sample names if(SAMPLE_SIZE == 0){ SAMPLE_SIZE <- as.integer(2) object@METADATA[ID,"SAMPLE"] <- " " } writeBin(SAMPLE_SIZE, con, size = 1, endian="little") writeChar(as.character(object@METADATA[ID,"SAMPLE"]), con, nchars = SAMPLE_SIZE, useBytes=TRUE, eos = NULL) if((20-SAMPLE_SIZE)>0){ writeBin(raw(length = (20-SAMPLE_SIZE)), con, size = 1, endian="little") } ##COMMENT COMMENT_SIZE <- as.integer(nchar(as.character(object@METADATA[ID,"COMMENT"]), type="bytes")) ##avoid problems with empty comments if(COMMENT_SIZE == 0){ COMMENT_SIZE <- as.integer(2) object@METADATA[ID,"COMMENT"] <- " " } writeBin(COMMENT_SIZE, con, size = 1, endian="little") writeChar(as.character(object@METADATA[ID,"COMMENT"]), con, nchars = COMMENT_SIZE, useBytes=TRUE, eos = NULL) if((80-COMMENT_SIZE)>0){ writeBin(raw(length = c(80-COMMENT_SIZE)), con, size = 1, endian="little") } ##Instrument and sequence characteristics ##SYSTEMID writeBin(as.integer(object@METADATA[ID,"SYSTEMID"]), con, size = 2, endian="little") ##FNAME FNAME_SIZE <- as.integer(nchar(as.character(object@METADATA[ID,"FNAME"]), type="bytes")) ##correct for case that this is of 0 length if(length(FNAME_SIZE) == 0){FNAME_SIZE <- as.integer(0)} writeBin(FNAME_SIZE, con, size = 1, endian="little") if(FNAME_SIZE>0) { writeChar( as.character(object@METADATA[ID,"FNAME"]), con, nchars = FNAME_SIZE, useBytes = TRUE, eos = NULL ) } if((100-FNAME_SIZE)>0){ writeBin(raw(length = c(100-FNAME_SIZE)), con, size = 1, endian="little") } ##USER USER_SIZE <- as.integer(nchar(as.character(object@METADATA[ID,"USER"]), type="bytes")) writeBin(USER_SIZE, con, size = 1, endian="little") writeChar(as.character(object@METADATA[ID,"USER"]), con, nchars = USER_SIZE, useBytes=TRUE, eos = NULL) if((30-USER_SIZE)>0){ writeBin(raw(length = c(30-USER_SIZE)), con, size = 1, endian="little") } ##TIME TIME_SIZE <- nchar(object@METADATA[ID,"TIME"]) writeBin(as.integer(TIME_SIZE), con, size = 1, endian="little") writeChar(object@METADATA[ID,"TIME"], con, nchars =TIME_SIZE, useBytes=TRUE, eos = NULL) if(6-TIME_SIZE>0){ writeBin(raw(length = c(6-TIME_SIZE)), con, size = 1, endian="little") } ##DATE writeBin(as.integer(6), con, size = 1 , endian="little") suppressWarnings(writeChar(as.character(object@METADATA[ID,"DATE"]), con, nchars = 6, useBytes=TRUE, eos = NULL)) ##Analysis ##DTYPE writeBin(object@METADATA[ID,"DTYPE"], con, size = 1, endian="little") ##BL_TIME writeBin(as.double(object@METADATA[ID,"BL_TIME"]), con, size = 4, endian="little") ##BL_UNIT writeBin(as.integer(object@METADATA[ID,"BL_UNIT"]), con, size = 1, endian="little") ##NORM1, NORM2, NORM3, BG writeBin(c(as.double(object@METADATA[ID,"NORM1"]), as.double(object@METADATA[ID,"NORM2"]), as.double(object@METADATA[ID,"NORM3"]), as.double(object@METADATA[ID,"BG"])), con, size = 4, endian="little") ##SHIFT writeBin(as.integer(object@METADATA[ID,"SHIFT"]), con, size = 2, endian="little") ##TAG writeBin(c(as.integer(object@METADATA[ID,"TAG"])), con, size = 1, endian="little") ##RESERVED 1 if(length(object@.RESERVED) == 0 || version.original != version){ writeBin(raw(length=20), con, size = 1, endian="little") }else{ writeBin(object@.RESERVED[[ID]][[1]], con, size = 1, endian="little") } ##Measurement characteristics ##LTYPE writeBin(object@METADATA[ID,"LTYPE"], con, size = 1, endian="little") ##LIGHTSOURCE writeBin(c(as.integer(object@METADATA[ID,"LIGHTSOURCE"])), con, size = 1, endian="little") ##LIGHTPOWER, LOW, HIGH, RATE writeBin(c(as.double(object@METADATA[ID,"LIGHTPOWER"]), as.double(object@METADATA[ID,"LOW"]), as.double(object@METADATA[ID,"HIGH"]), as.double(object@METADATA[ID,"RATE"])), con, size = 4, endian="little") ##TEMPERATURE, MEASTEMP writeBin(c(as.integer(object@METADATA[ID,"TEMPERATURE"]), as.integer(object@METADATA[ID,"MEASTEMP"])), con, size = 2, endian="little") ##AN_TEMP, AN_TIME writeBin(c(as.double(object@METADATA[ID,"AN_TEMP"]), as.double(object@METADATA[ID,"AN_TIME"])), con, size = 4, endian="little") ##TOLDELAY; TOLON, TOLOFF writeBin(c(as.integer(object@METADATA[ID,"TOLDELAY"]), as.integer(object@METADATA[ID,"TOLON"]), as.integer(object@METADATA[ID,"TOLOFF"])), con, size = 2, endian="little") ##IRR_TIME writeBin(as.double(object@METADATA[ID,"IRR_TIME"]), con, size = 4, endian="little") ##IRR_TYPE writeBin(c(object@METADATA[ID,"IRR_TYPE"]), con, size = 1, endian="little") ##IRR_DOSERATE, IRR_DOSERATEERR if(version == 05){ writeBin(as.double(object@METADATA[ID,"IRR_DOSERATE"]), con, size = 4, endian="little") }else{ writeBin(c(as.double(object@METADATA[ID,"IRR_DOSERATE"]), as.double(object@METADATA[ID,"IRR_DOSERATEERR"])), con, size = 4, endian="little") } ##TIMESINCEIRR writeBin(c(as.integer(object@METADATA[ID,"TIMESINCEIRR"])), con, size = 4, endian="little") ##TIMETICK writeBin(c(as.double(object@METADATA[ID,"TIMETICK"])), con, size = 4, endian="little") ##ONTIME, STIMPERIOD writeBin(c(suppressWarnings(as.integer(object@METADATA[ID,"ONTIME"])), as.integer(object@METADATA[ID,"STIMPERIOD"])), con, size = 4, endian="little") ##GATE_ENABLED writeBin(as.integer(object@METADATA[ID,"GATE_ENABLED"]), con, size = 1, endian="little") ##GATE_START, GATE_STOP writeBin(c(as.integer(object@METADATA[ID,"GATE_START"]), as.integer(object@METADATA[ID,"GATE_STOP"])), con, size = 4, endian="little") ##PTENABLED, DTENABLED writeBin(c(as.integer(object@METADATA[ID,"PTENABLED"]), as.integer(object@METADATA[ID,"DTENABLED"])), con, size = 1, endian="little") ##DEADTIME, MAXLPOWER, XRF_ACQTIME, XRF_HV writeBin(c(as.double(object@METADATA[ID,"DEADTIME"]), as.double(object@METADATA[ID,"MAXLPOWER"]), as.double(object@METADATA[ID,"XRF_ACQTIME"]), as.double(object@METADATA[ID,"XRF_HV"])), con, size = 4, endian="little") ##XRF_CURR writeBin(c(as.integer(object@METADATA[ID,"XRF_CURR"])), con, size = 4, endian="little") ##XRF_DEADTIMEF writeBin(c(as.double(object@METADATA[ID,"XRF_DEADTIMEF"])), con, size = 4, endian="little") ##add version support for V7 if(version == 05){ ##RESERVED 2 if(length(object@.RESERVED) == 0 || version.original != version){ writeBin(raw(length=4), con, size = 1, endian="little") }else{ writeBin(object@.RESERVED[[ID]][[2]], con, size = 1, endian="little") } }else if(version == 06){ ##RESERVED 2 if(length(object@.RESERVED) == 0 || version.original != version){ writeBin(raw(length=24), con, size = 1, endian="little") }else{ writeBin(object@.RESERVED[[ID]][[2]], con, size = 1, endian="little") } }else{ ##DETECTOR_ID writeBin(as.integer(object@METADATA[ID,"DETECTOR_ID"]), con, size = 1, endian="little") ##LOWERFILTER_ID, UPPERFILTER_ID writeBin(c(as.integer(object@METADATA[ID,"LOWERFILTER_ID"]), as.integer(object@METADATA[ID,"UPPERFILTER_ID"])), con, size = 2, endian="little") ##ENOISEFACTOR writeBin(as.double(object@METADATA[ID,"ENOISEFACTOR"]), con, size = 4, endian="little") ##VERSION 08 if(version == 07){ ##RESERVED 2 if(length(object@.RESERVED) == 0 || version.original != version){ writeBin(raw(length=15), con, size = 1, endian="little") }else{ writeBin(object@.RESERVED[[ID]][[2]], con, size = 1, endian="little") } }else{ ##MARKPOS POSITION and extraction writeBin( c( as.double(object@METADATA[ID, "MARKPOS_X1"]), as.double(object@METADATA[ID, "MARKPOS_Y1"]), as.double(object@METADATA[ID, "MARKPOS_X2"]), as.double(object@METADATA[ID, "MARKPOS_Y2"]), as.double(object@METADATA[ID, "MARKPOS_X3"]), as.double(object@METADATA[ID, "MARKPOS_Y3"]), as.double(object@METADATA[ID, "EXTR_START"]), as.double(object@METADATA[ID, "EXTR_END"]) ), con, size = 4, endian = "little" ) ##RESERVED 2 if(length(object@.RESERVED) == 0 || version.original != version){ writeBin(raw(length=42), con, size = 1, endian="little") }else{ writeBin(object@.RESERVED[[ID]][[2]], con, size = 1, endian="little") } } }#end if version decision ##DPOINTS writeBin(as.integer(unlist(object@DATA[ID])), con, size = 4, endian="little") #SET UNIQUE ID ID <- ID + 1 ##update progress bar if(txtProgressBar) setTxtProgressBar(pb, ID) } } # ##close con close(con) # # ##close if(txtProgressBar) close(pb) ##output message(paste0("\t >> ",ID-1,"records have been written successfully!\n\n")) } Luminescence/R/fit_OSLLifeTimes.R0000644000176200001440000006062514236146743016334 0ustar liggesusers#' Fitting and Deconvolution of OSL Lifetime Components #' #' @details #' The function intends to provide an easy access to pulsed optically stimulated luminescence (POSL) data, #' in order determine signal lifetimes. The fitting is currently optimised to work with the off-time flank of POSL measurements #' only. For the signal deconvolution, a differential evolution optimisation is combined with nonlinear least-square fitting #' following the approach by Bluszcz & Adamiec (2006). #' #' **Component deconvolution algorithm** #' #' The component deconvolution consists of two steps: #' #' (1) Adaptation phase #' #' In the adaptation phase the function tries to figure out the optimal and statistically justified #' number of signal components following roughly the approach suggested by Bluszcz & Adamiec (2006). In #' contrast to their work, for the optimisation by differential evolution here the package `'DEoptim'` is used. #' #' The function to be optimized has the form: #' #' \deqn{\chi^2 = \sum(w * (n_i/c - \sum(A_i * exp(-x/(tau_i + t_p))))^2)} #' #' with \eqn{w = 1} for unweighted regression analysis (`method_control = list(weights = FALSE)`) or #' \eqn{w = c^2/n_i} for weighted regression analysis. The default values is `TRUE`. #' #' \deqn{F = (\Delta\chi^2 / 2) / (\chi^2/(N - 2*m - 2))} #' #' (2) Final fitting #' #' **`method_control`** #' #' \tabular{lll}{ #' **Parameter** \tab **Type** \tab **Description**\cr #' `p` \tab [numeric] \tab controls the probability for the F statistic reference values. For a significance level of 5 % a value of 0.95 (the default) should be added, for 1 %, a value of 0.99 is sufficient: 1 > p > 0 (cf. [stats::FDist])\cr #' `seed` \tab [numeric] \tab set the seed for the random number generator, provide a value here to get reproducible results \cr #' `DEoptim.trace` \tab [logical] \tab enables/disables the tracing of the differential evolution (cf. [DEoptim::DEoptim.control]) \cr #' `DEoptim.itermax` \tab [logical] \tab controls the number of the allowed generations (cf. [DEoptim::DEoptim.control]) \cr #' `weights` \tab [logical] \tab enables/disables the weighting for the start parameter estimation and fitting (see equations above). #' The default values is `TRUE` \cr #' `nlsLM.trace` \tab [logical] \tab enables/disables trace mode for the nls fitting ([minpack.lm::nlsLM]), can be used to identify convergence problems, default is `FALSE` \cr #' `nlsLM.upper` \tab [logical] \tab enables/disables upper parameter boundary, default is `TRUE` \cr #' `nlsLM.lower` \tab [logical] \tab enables/disables lower parameter boundary, default is `TRUE` #' } #' #' @param object [RLum.Data.Curve-class], [RLum.Analysis-class], [data.frame] or [matrix] **(required)**: #' Input object containing the data to be analysed. All objects can be provided also as list for an automated #' processing. Please note: `NA` values are automatically removed and the dataset should comprise at least 5 data points. #' #' @param tp [numeric] (*with default*): option to account for the stimulation pulse width. For off-time measurements #' the default value is 0. `tp` has the same unit as the measurement data, e.g., µs. Please set this parameter #' carefully, if it all, otherwise you may heavily bias your fit results. #' #' @param signal_range [numeric] (*optional*): allows to set a channel range, by default all channels are used, e.g. #' `signal_range = c(2,100)` considers only channels 2 to 100 and `signal_range = c(2)` considers only channels #' from channel 2 onwards. #' #' @param n.components [numeric] (*optional*): Fix the number of components. If set the algorithm will try #' to fit the number of predefined components. If nothing is set, the algorithm will try to find the best number #' of components. #' #' @param method_control [list] (*optional*): Named to allow a more fine control of the fitting process. See details #' for allowed options. #' #' @param plot [logical] (*with default*): Enable/disable plot output #' #' @param plot_simple [logical] (*with default*): Enable/disable reduced plot output. If `TRUE`, no #' residual plot is shown, however, plot output can be combined using the standard R layout options, #' such as `par(mfrow = c(2,2))`. #' #' @param verbose [logical] (*with default*): Enable/disable terminal feedback #' #' @param ... parameters passed to [plot.default] to control the plot output, supported are: #' `main`, `xlab`, `ylab`, `log`, `xlim`, `ylim`, `col`, `lty`, `legend.pos`, `legend.text`. If the input #' object is of type [RLum.Analysis-class] this arguments can be provided as a [list]. #' #' @return #' #' -----------------------------------\cr #' `[ NUMERICAL OUTPUT ]`\cr #' -----------------------------------\cr #' #' **`RLum.Results`**-object #' #' **slot:** **`@data`** #' #' \tabular{lll}{ #' **Element** \tab **Type** \tab **Description**\cr #' `$data` \tab `matrix` \tab the final fit matrix \cr #' `$start_matrix` \tab `matrix` \tab the start matrix used for the fitting \cr #' `$total_counts` \tab `integer` \tab Photon count sum \cr #' `$fit` \tab `nls` \tab the fit object returned by [minpack.lm::nls.lm] \cr #' } #' #' #'**slot:** **`@info`** #' #' The original function call #' #' ------------------------\cr #' `[ TERMINAL OUTPUT ]`\cr #' ------------------------\cr #' #' Terminal output is only shown of the argument `verbose = TRUE`. #' #' *(1) Start parameter and component adaption*\cr #' Trave of the parameter adaptation process #' #' *(2) Fitting results (sorted by ascending tau)*\cr #' The fitting results sorted by ascending tau value. Please note #' that if you access the `nls` fitting object, the values are not sorted. #' #' *(3) Further information*\cr #' - The photon count sum #' - Durbin-Watson residual statistic to asses whether the residuals are correlated, ideally #' the residuals should be not correlated at all. Rough measures are: \cr #' D = 0: the residuals are systematically correlated \cr #' D = 2: the residuals are randomly distributed \cr #' D = 4: the residuals are systematically anti-correlated\cr #' #' You should be suspicious if D differs largely from 2. #' #' #' ------------------------\cr #' `[ PLOT OUTPUT ]`\cr #' ------------------------\cr #' #' A plot showing the original data and the fit so far possible. The lower plot shows the #' residuals of the fit. #' #' @section Function version: 0.1.5 #' #' @author Sebastian Kreutzer, Geography & Earth Sciences, Aberystwyth University, #' Christoph Schmidt, University of Bayreuth (Germany) #' #' @seealso [minpack.lm::nls.lm], [DEoptim::DEoptim] #' #' @references #' Bluszcz, A., Adamiec, G., 2006. Application of differential evolution to fitting OSL decay curves. #' Radiation Measurements 41, 886-891. \doi{10.1016/j.radmeas.2006.05.016}\cr #' #' Durbin, J., Watson, G.S., 1950. Testing for Serial Correlation in Least Squares Regression: I. #' Biometrika 37, 409-21. doi:10.2307/2332391 #' #' **Further reading** #' #' Hughes, I., Hase, T., 2010. Measurements and Their Uncertainties. Oxford University Press. #' #' Storn, R., Price, K., 1997. Differential Evolution – #' A Simple and Efficient Heuristic for Global Optimization over Continuous Spaces. #' Journal of Global Optimization 11, 341–359. #' #'@examples #' #'##load example data #'data(ExampleData.TR_OSL, envir = environment()) #' #'##fit lifetimes (short run) #'fit_OSLLifeTimes( #' object = ExampleData.TR_OSL, #' n.components = 1) #' #'##long example #'\dontrun{ #'fit_OSLLifeTimes( #' object = ExampleData.TR_OSL) #' } #' #'@md #'@export fit_OSLLifeTimes <- function( object, tp = 0, signal_range = NULL, n.components = NULL, method_control = list(), plot = TRUE, plot_simple = FALSE, verbose = TRUE, ... ){ # Self-call ----------------------------------------------------------------------------------- if(inherits(object, "list") || inherits(object, "RLum.Analysis")){ ##allow RLum.Analysis objects if(all(vapply(object, function(x){ inherits(x, "RLum.Analysis")}, logical(1)))){ object <- lapply(object, function(x){x@records}) object <- .unlist_RLum(object) } ##expand parameters ##n.components if(!is.null(n.components)) n.components <- as.list(rep(n.components, length(object))) ##tp tp <- as.list(rep(tp, length(object))) ## names of extra arguments arg_names <- names(list(...)) ##pretreat some of the ... settings to avoid ## expand all arguments if(!is.null(arg_names)){ arg_list <- lapply(arg_names , function(x){ unlist(rep(list(...)[[x]], length.out = length(object))) }) ## make sure we organise this list (not nice but it works) arg_list <- lapply(1:length(object), function(x){ args <- lapply(1:length(arg_names), function(y){ arg_list[[y]][[x]] }) names(args) <- arg_names args }) } else{ arg_list <- NULL } ##run function temp_results <- lapply(1:length(object), function(x){ temp <- try(do.call(what = fit_OSLLifeTimes, c(list( object = object[[x]], tp = tp[[x]], signal_range = signal_range, n.components = n.components[[x]], method_control = method_control, plot = plot, plot_simple = plot_simple, verbose = verbose ), arg_list[[x]]) ), silent = FALSE) if(inherits(temp, "try-error")){ return(NULL) }else{ return(temp) } }) ##combine results and return results <- merge_RLum(temp_results) if(!is.null(results)) results@originator <- "fit_OSLLifeTimes" ##return return(results) } # Input integrity tests ------------------------------------------------------------------ if(inherits(object, "RLum.Data.Curve")){ if(!grepl(pattern = "POSL", x = object@recordType, fixed = TRUE)) stop(paste0("[fit_OSLLifeTime()] recordType ",object@recordType, " not supported for input object!"), call. = FALSE) df <- as.data.frame(object@data) }else if(inherits(object, "data.frame")){ df <- object[,1:2] } else if(inherits(object, "matrix")){ df <- as.data.frame(object[,1:2]) }else{ try(stop(paste0("[fit_OSLLifeTime()] Class '",class(object), "' not supported as input, NULL returned!"), call. = FALSE)) return(NULL) } ##remove NA values, whatever it is worth for if(any(is.na(df))){ df <- na.exclude(df) warning("[fit_OSLLifeTimes()] NA values detected and removed from dataset.",call. = TRUE) } ##rename columns for data.frame colnames(df) <- c("x","y") ##make sure that we have a minimum of data points available if(nrow(df) < 5){ try(stop("[fit_OSLLifeTimes()] Your input dataset has less than 5 data points. NULL returned!", call. = FALSE)) return(NULL) } #check for 0 data in dataset ... we opt for hard stop if(any(df[[2]] == 0)){ warning("[fit_OSLLifeTimes()] Your dataset contains 0. A value of 0.1 has been added to your count values!",call. = TRUE) df[[2]] <- df[[2]] + 0.1 } ##save original data for later df_raw <- df ##signal_range if(!is.null(signal_range)){ if(!inherits(signal_range, "numeric")) stop("[fit_OSLLifeTimes()] Argument 'signal_range' must by of type numeric!", call. = FALSE) ##check lengths if(length(signal_range) == 1) signal_range <- c(signal_range, nrow(df)) if(length(signal_range) > 2) warning("[fit_OSLLifeTimes()] 'signal_range' has more than two elements, take only the first two!", call. = FALSE) if(signal_range[2] > nrow(df)){ warning("[fit_OSLLifeTimes()] 'signal_range' > number of channels, reset to maximum!", call. = FALSE) signal_range[2] <- nrow(df) } if(signal_range[1] > signal_range[2]){ warning("[fit_OSLLifeTimes()] 'signal_range' first element > last element, reset to default", call. = FALSE) signal_range <- c(1, nrow(df)) } ##set range df <- df[signal_range[1]:signal_range[2],] } # Fitting ------------------------------------------------------------------------------------- ##(0) CONTROL +++++++++++++++++++++++++++++++++++++++++++++++++++++ method_control_setting <- list( p = 0.95, seed = NULL, DEoptim.trace = FALSE, DEoptim.itermax = 1000, weights = TRUE, nlsLM.trace = FALSE, nlsLM.upper = TRUE, nlsLM.lower = TRUE ) ##udpate list if the user did something method_control_setting <- modifyList(x = method_control_setting, val = method_control) ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ##(A) SETTINGS ## ## ##(1) >> set fitting function for minpack.lm x <- 0 #cheat R check routine fit_forumla <- function(n.components, tp){ A <- paste0("A.",1:n.components) tau <- paste0("tau.",1:n.components) as.formula(paste0("y ~ ", paste(A," * exp(- x/(",tau," + ", tp, "))", collapse = " + "))) } ## ## ##(2) create formula for differential evolution run fn_constructor <- function(m){ ##get length of x-vector x_len <- 1:(2 * m) ##generate term term <- vapply(seq(1,length(x_len), by = 2), function(i){ paste0("(x[", i, "] * exp(-t/(x[", i + 1, "] + tp)))") },character(1)) ##parse term <- paste(term, collapse = " + ") ##set weight (should be given as character) if(method_control_setting$weights){ w <- "c^2/n" }else{ w <- "1" } ##combine term <- paste0("sum(",w," * ((n/c) - (",term,"))^2)") ##parse ... if we do it here, we boost the speed of the evaluation parse(text = eval(term)) } ## ## ##(3) initialse objects chi_squared <- c(NA, NA) F <- c(Inf, Inf) start <- NULL if(is.null(n.components)){ m <- 1 }else{ m <- n.components } ## ## ##(4) set seed if(!is.null(method_control_setting$seed)) set.seed(method_control_setting$seed) ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ##(B) RUN DIFFERENTIAL EVOLUTION TO DETERMINE NUMBER OF COMPONENTS ##prevent collateral damage, so we want a data.frame that has at least 10 rows if(verbose){ cat("\n[fit_OSLLifeTime()]\n") cat("\n(1) Start parameter and component adapation") cat("\n---------------------(start adaption)------------------------------------") } while(!is.na(suppressWarnings(qf(method_control_setting$p, df1 = 2, df2 = length(df[[2]]) - 2 * m - 2))) && ( F[2] > qf(method_control_setting$p, df1 = 2, df2 = length(df[[2]]) - 2 * m - 2) & F[1] >= F[2])){ ##set F F[1] <- F[2] ##construct formula outside of the loop; needs to be done here, otherwise the performance ##goes down formula_string <- fn_constructor(m) ##set fn set_tp <- tp set_c <- diff(c(0,df[[1]])) set_t <- df[[1]] set_n <- df[[2]] ##set function ##Personal reminder: ##Why this function is not written in C++ ... because it adds basically nothing ##in terms of speed ~ 10 µs faster, but needed to be compiled and thus cannot be changed ##directly in the code fn <- function(x, tp = set_tp, n = set_n, c = set_c, t = set_t, term = formula_string){ eval(formula_string) } ##set start parameters if(!is.null(start)) start_parameters <- start$optim$bestmem ##run differential evolution start <- DEoptim::DEoptim( fn = fn, lower = rep(0, 2 * m), upper = rep(c(10 * sum(df[[2]]), 10000), m), control = DEoptim::DEoptim.control( trace = method_control_setting$DEoptim.trace, itermax = method_control_setting$DEoptim.itermax, c = .5, strategy = 2, parallelType = 0 #Does it make sense to use parallel processing here: no, it does not scale well ) ) ##set chi^2 value and calculate F for the 2nd run chi_squared[2] <- start$optim$bestval if(!is.na(chi_squared[1])){ F[2] <- (abs(diff(chi_squared))/2) / (chi_squared[2]/(nrow(df) - 2 * m - 2)) } ##terminal feedback if(verbose){ cat("\n>> + adaption for",m, "comp.", ": ", round(F[2],2), "(calc.) <> ", round(qf(method_control_setting$p, df1 = 2, df2 = length(df[[2]]) - 2 * m - 2), 2), "(ref.)") if(F[2] > qf(method_control_setting$p, df1 = 2, df2 = length(df[[2]]) - 2 * m - 2) & F[1] >= F[2]){ cat(" >> [add comp.]") }else{ cat(" >> [stop]\n") cat("---------------------(end adaption)--------------------------------------\n\n") } } ##break here if n.components was set others than NULL, in such case we force the number if(!is.null(n.components)){ if(verbose){ cat(" >> [forced stop]\n") cat("---------------------(end adaption)--------------------------------------\n\n") } start_parameters <- start$optim$bestmem break() } ##update objects chi_squared[1] <- chi_squared[2] m <- m + 1 } ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ##(C) RUN LM-FITTING ## ##reduce m by 2, why 2? ## - the last component violated the F statistic, so was obviously not the best call ## - the loop adds everytime another component if(is.null(n.components)){ ##this covers the extrem case that the process stops after the first run if(m == 2){ m <- 1 start_parameters <- start$optim$bestmem }else{ m <- m - 2 } } A <- start_parameters[seq(1,length(start_parameters), by = 2)] tau <- start_parameters[seq(2,length(start_parameters), by = 2)] names(A) <- paste0("A.", 1:(m)) names(tau) <- paste0("tau.", 1:(m)) ##create start_matrix start_matrix <- matrix(data = c(A,tau), ncol = 2) colnames(start_matrix) <- c("A", "tau") rownames(start_matrix) <- paste0("Comp.", 1:(m)) ##add terminal feedback if(verbose){ cat("\n>> Applied component matrix\n") print(start_matrix) cat("\n\n") } ##run fitting using the Levenberg-Marquardt algorithm fit <- try(minpack.lm::nlsLM( formula = fit_forumla(n.components = m, tp = tp), data = df, start = c(A, tau), upper = if(method_control_setting$nlsLM.upper){ c(rep(sum(df[[2]]), length(A)), rep(Inf,length(tau))) }else{ NULL }, lower = if(method_control_setting$nlsLM.lower){ c(rep(0,2*length(A))) }else{ NULL }, na.action = "na.exclude", weights = if(method_control_setting$weights){ set_c^2/df[,2] }else{ rep(1,nrow(df)) }, trace = method_control_setting$nlsLM.trace, control = minpack.lm::nls.lm.control(maxiter = 500) ), silent = FALSE) # Post-processing ----------------------------------------------------------------------------- if (!inherits(fit, 'try-error')) { ##extract coefficients A <- coef(fit)[1:(m)] tau <- coef(fit)[(m + 1):(2 * m)] ##order coef o <- order(tau) tau <- tau[o] A <- A[o] ##summary matrix summary_matrix <- summary(fit)$coefficients ##return warning if one parameter is negative, this can happen if the user let the boundaries ##free float if(any(summary_matrix[,1]<0)) warning("[fit_OSLLifeTimes()] At least one parameter is negative. Please carefully check your results!", call. = FALSE) ##order matrix by tau, but keep the rownames temp_rownames <- rownames(summary_matrix) summary_matrix <- summary_matrix[c(o,o + length(A)),] rownames(summary_matrix) <- temp_rownames rm(temp_rownames) ##calculate Durbin-Watson statistic R <- residuals(fit) D <- round(sum((R - c(0,R[-length(R)]))^2) / sum(R^2),2) rm(R) }else{ m <- 1 A <- NA tau <- NA summary_matrix <- NA D <- NA } # Terminal output ----------------------------------------------------------------------------- if(verbose){ if (!inherits(fit, 'try-error')) { cat("(2) Fitting results (sorted by ascending tau)\n") cat("-------------------------------------------------------------------------\n") print(summary_matrix) cat("-------------------------------------------------------------------------\n") }else{ try(stop("[fit_OSLLifeTimes()] The fitting was not sucessful, consider to try again!", call. = FALSE)) } cat("\n(3) Further information\n") cat("-------------------------------------------------------------------------\n") cat("Photon count sum: ", sum(df[[2]]),"\n") cat("Durbin-Watson residual statistic: ", D,"") if(!is.na(D)){ string <- c("\u005b",rep(" ",(D * 10)/4),"\u003c\u003e",rep(" ",10 - (D * 10)/4),"\u005d\n") }else{ string <- NA } cat(paste(string, collapse = "")) cat("\n") } # Plotting ------------------------------------------------------------------------------------ if(plot) { ##set plot default settings plot_settings <- list( main = "OSL Lifetimes", xlab = "Time [a.u.]", ylab = "POSL [a.u.]", log = "", xlim = c(0,max(df_raw[[1]])), ylim = c(0,max(df_raw[[2]])), col = get("col", pos = .LuminescenceEnv)[-1], lty = rep(1, (m + 1)), legend.pos = "topright", legend.text = c("sum", paste0("comp. ", 1:m)) ) ##modify settings on request plot_settings <- modifyList(x = plot_settings, val = list(...)) ##catch log scale if(grepl(pattern = "x", plot_settings$log, fixed = TRUE)){ if(plot_settings$xlim[1] == 0){ plot_settings$xlim[1] <- if(min(df_raw[[1]]) == 0) 1e-04 else min(df_raw[[1]]) warning(paste0("[fit_OSLLifeTime()] log-scale requires x-values > 0, set min xlim to ", round(plot_settings$xlim[1],4), "!"), immediate. = TRUE, call. = FALSE) } } if(grepl(pattern = "y", plot_settings$log, fixed = TRUE)){ if(plot_settings$ylim[1] == 0){ plot_settings$ylim[1] <- if(min(df_raw[[2]]) == 0) 1e-04 else min(df_raw[[2]]) warning(paste0("[fit_OSLLifeTime()] log-scale requires y-values > 0, set min ylim to ", round(plot_settings$ylim[1],4), "!"), immediate. = TRUE, call. = FALSE) } } ##plot if the fitting was a success if (!inherits(fit, 'try-error')) { if(!plot_simple){ ##make sure that the screen closes if something is wrong on.exit(close.screen(all.screens = TRUE)) split.screen(rbind( c(0.1,1,0.32, 0.98), c(0.1,1,0.1, 0.32))) screen(1) par(mar = c(0, 4, 3, 4)) } plot(NA,NA, xaxt = if(plot_simple) "s" else "n", xlab = if(plot_simple) plot_settings$xlab else "", ylab = plot_settings$ylab, ylim = plot_settings$ylim, xlim = plot_settings$xlim, log = plot_settings$log, main = plot_settings$main ) ##add used points points(df, col = rgb(0,0,0,0.8)) ##add not used points df_raw (this solution avoids overplotting) if(nrow(df) != nrow(df_raw)) points(df_raw[!df_raw[[1]]%in%df[[1]],], col = "grey") ##+ add some curve lines( df$x, fitted(fit), col = plot_settings$col[1], lwd = 1.3, lty = plot_settings$lty[1] ) ##+ add components for(i in 1:m) { if (length(plot_settings$lty) < 2) plot_settings$lty <- rep(plot_settings$lty, 1 + m) if (length(plot_settings$col) < 2) plot_settings$col <- rep(plot_settings$col, 1 + m) curve( A[i] * exp(-x / (tau[i] + tp)), add = TRUE, col = plot_settings$col[i + 1], lty = plot_settings$lty[i + 1] ) } ##+ add legend legend( plot_settings$legend.pos, legend = plot_settings$legend.text, lty = plot_settings$lty, col = plot_settings$col[c(1, 2:(m + 2))], bty = "n" ) if(!plot_simple){ screen(2) par(mar = c(4, 4, 0, 4)) plot( x = df[[1]], y = residuals(fit), xlab = plot_settings$xlab, type = "b", pch = 20, xlim = plot_settings$xlim, log = if(plot_settings$log == "x"){"x"}else{""}, ylab = "Resid." ) } }else{ plot( df, xlab = plot_settings$xlab, ylab = plot_settings$ylab, col = rgb(0, 0, 0, 0.8), main = plot_settings$main, xlim = plot_settings$xlim, ylim = plot_settings$ylim, log = plot_settings$log ) } }#if plot # Return -------------------------------------------------------------------------------------- ##create return object return( set_RLum( class = "RLum.Results", data = list( data = summary_matrix, start_matrix = start_matrix, total_counts = sum(df[[2]]), fit = fit ), info = list( call = sys.call() ) ) ) } Luminescence/R/zzz.R0000644000176200001440000002175014236146743014064 0ustar liggesusers##////////////////////////////////////////////////////////////////////////////// ##//zzz.R ##////////////////////////////////////////////////////////////////////////////// ## ##============================================================================== ##author: R Luminescence Package Team ##organisation: ##version.: 0.2.1 ##date: 2013-11-10 ##============================================================================== # Set namespace .LuminescenceEnv ------------------------------------------ .LuminescenceEnv <- new.env(parent = emptyenv()) # Assign variables to Namespace ------------------------------------------- ##variable col to define colours in the functions for output assign("col", unlist(colors())[c(261,552,51,62,76,151,451,474,654,657,100,513,23,612,129,27,551,393,80,652,555)], pos = ".LuminescenceEnv", envir = .LuminescenceEnv) ##============================================================================== ##on Attach .onAttach <- function(libname,pkgname){ ##set startup message try(packageStartupMessage(paste("Welcome to the R package Luminescence version ", packageDescription(pkg="Luminescence")$Version, " [Built: ", trimws(strsplit(packageDescription(pkg="Luminescence")$Built, ";")[[1]][3]), "]", sep=""), "\n", get_Quote()), silent=TRUE) } ##============================================================================== # DO NOT TOUCH! ----------------------------------------------------------- #' sTeve - sophisticated tool for efficient data validation and evaluation #' #' This function provides a sophisticated routine for comprehensive #' luminescence dating data analysis. #' #' This amazing sophisticated function validates your data seriously. #' #' @param n_frames [integer] (*with default*): #' n frames #' #' @param t_animation [integer] (*with default*): #' t animation #' #' @param n.tree [integer] (*with default*): #' how many trees do you want to cut? #' #' @param type [integer] (*optional*): #' Make a decision: 1, 2 or 3 #' #' @return Validates your data. #' #' @note This function should not be taken too seriously. #' #' @author R Luminescence Team, 2012-2046 #' #' @seealso [plot_KDE] #' #' @keywords manip #' @examples #' #' ##no example available #' #' @md #' @export sTeve<- function(n_frames = 10, t_animation = 2, n.tree = 7, type) { ## allow new overlay plot par(new = TRUE) ## infer month of year month <- as.numeric(strsplit(x = as.character(Sys.Date()), split = "-")[[1]][2]) ## select showtime item based on month or user-defined type if(missing(type) == TRUE) { if(month >= 1 & month <= 3) { type <- 1 } else if(month >3 & month <= 11) { type <- 2 } else if(month > 11 & month <= 12) { type <- 3 } } if(type == 1) { ## SHOWTIME OPTION 1 Sys.sleep(5) shape::emptyplot() shape::filledrectangle(wx = 0.9, wy = 0.4, mid = c(0.5, 0.5), lcol ="red", lwd=1, col=0, angle = 45) text(x=0.5, y=0.5, labels="NOT FUNNY", cex=2, col="red", font=2, srt=45) } else if(type == 2) { ## SHOWTIME OPTION 2 plot(NA, xlim = c(0, 10), ylim = c(0, 10), main = "", xlab = "", ylab = "", axes = FALSE, frame.plot = FALSE) n_frames <- n_frames t_animation <- t_animation dt <- t_animation / n_frames x1 <- seq(0, 10, length.out = n_frames) y1 <- rep(1.5, n_frames) r1 <- 0.5 x2 <- seq(0, 16, length.out = n_frames) y2 <- rep(8.5, n_frames) r2 <- 0.5 x4 <- seq(11, 0, length.out = n_frames) y4 <- rep(5, n_frames) r4 <- 0.5 # set angles for each step of mouth opening angles_mouth <- rep(c(0.01, 0.25, 0.5, 0.25), length.out = n_frames) for(i in 1:n_frames){ # define pacman circles shape::filledcircle(r1 = r1, r2 = 0.00001, mid = c(x1[i], y1[i]), from = angles_mouth[i], to = 2 * pi - angles_mouth[i], col = "yellow") shape::filledcircle(r1 = r2, r2 = 0.00001, mid = c(x2[i], y2[i]), from = angles_mouth[i], to = 2 * pi - angles_mouth[i], col = "yellow") shape::filledcircle(r1 = r4, r2 = 0.00001, mid = c(x4[i], y4[i]), from = angles_mouth[i] + 3, to = 2 * pi - angles_mouth[i] + 3, col = "yellow") # dinfine eyes for pacman points(x1[i] + 0.2, y1[i] + 0.75, pch = 21, bg = 1, cex = 0.7) points(x2[i] + 0.2, y2[i] + 0.75, pch = 21, bg = 1, cex = 0.7) points(x4[i] - 0.05, y4[i] + 0.75, pch = 21, bg = 1, cex = 0.7) Sys.sleep(dt) shape::plotcircle(r = 1.1 * r1, mid = c(x1[i], y1[i]), col = "white", lcol = "white") shape::plotcircle(r = 1.1 * r2, mid = c(x2[i], y2[i]), col = "white", lcol = "white") shape::plotcircle(r = 1.1 * r4, mid = c(x4[i], y4[i]), col = "white", lcol = "white") } } else if(type == 3) { ## calculate display ratio f <- par()$pin[2] / par()$pin[1] ## create new overlay plot plot(NA, xlim = c(0, 100), ylim = c(0, 100), axes = F, frame.plot = FALSE, xlab = "", ylab = "") ## create semi-transparent layer polygon(x = c(-100, -100, 200, 200), y = c(-100, 200, 200, -100), col = rgb(1,1,1, 0.8), lty = 0) ## draw christmas trees n = n.tree tree.x <- runif(n, 10, 90) tree.y <- runif(n, 10, 90) tree.size <- runif(n, 0.3, 1.5) for(i in 1:n) { ## stem polygon(x = c(tree.x[i] - 1.5 * tree.size[i], tree.x[i] - 1.5 * tree.size[i], tree.x[i] + 1.5 * tree.size[i], tree.x[i] + 1.5 * tree.size[i]) , y = c(tree.y[i] - 12 * tree.size[i], tree.y[i] - 1 * tree.size[i], tree.y[i] - 1 * tree.size[i], tree.y[i] - 12* tree.size[i]), col = "rosybrown4", lty = 0) ## branch one shape::filledellipse(rx1 = 10 * tree.size[i], rx2 = 0.00001, mid = c(tree.x[i], tree.y[i] + 3 * tree.size[i]), col = "darkgreen", from = 4.0143, to = 5.41052) ## branch two shape::filledellipse(rx1 = 8 * tree.size[i], rx2 = 0.00001, mid = c(tree.x[i], tree.y[i] + 7 * tree.size[i]), col = "darkgreen", from = 4.0143, to = 5.41052) ## branch three shape::filledellipse(rx1 = 6 * tree.size[i], rx2 = 0.00001, mid = c(tree.x[i], tree.y[i] + 9 * tree.size[i]), col = "darkgreen", from = 4.0143, to = 5.41052) ## branch four shape::filledellipse(rx1 = 4 * tree.size[i], rx2 = 0.00001, mid = c(tree.x[i], tree.y[i] + 11 * tree.size[i]), col = "darkgreen", from = 4.0143, to = 5.41052) ## sphere one shape::filledellipse(rx1 = 1 * f * tree.size[i], ry1 = 1 * tree.size[i], mid = c(tree.x[i] + 2 * tree.size[i], tree.y[i] + 5 * tree.size[i]), col = shape::shadepalette(n = 20, endcol = "darkred")) ## sphere two shape::filledellipse(rx1 = 0.8 * f * tree.size[i], ry1 = 0.8 * tree.size[i], mid = c(tree.x[i] - 1 * tree.size[i], tree.y[i] + -3 * tree.size[i]), col = shape::shadepalette(n = 20, endcol = "orange")) ## sphere three shape::filledellipse(rx1 = 1.2 * f * tree.size[i], ry1 = 1.2 * tree.size[i], mid = c(tree.x[i] - 1.7 * tree.size[i], tree.y[i] + 2 * tree.size[i]), col = shape::shadepalette(n = 20, endcol = "yellow3")) ## sphere four shape::filledellipse(rx1 = 1 * f * tree.size[i], ry1 = 1 * tree.size[i], mid = c(tree.x[i] + 3 * tree.size[i], tree.y[i] - 4 * tree.size[i]), col = shape::shadepalette(n = 20, endcol = "darkblue")) Sys.sleep(0.1) } ## add snow points(runif(300, 0, 100), runif(300, 0, 100), pch = 8, col = "lightgrey") } }#end function Luminescence/R/calc_WodaFuchs2008.R0000644000176200001440000001507514264017373016406 0ustar liggesusers#' Obtain the equivalent dose using the approach by Woda and Fuchs 2008 #' #' The function generates a histogram-like reorganisation of the data, to #' assess counts per bin. The log-transformed counts per bin are used to #' calculate the second derivative of the data (i.e., the curvature of the #' curve) and to find the central value of the bin hosting the distribution #' maximum. A normal distribution model is fitted to the counts per bin #' data to estimate the dose distribution parameters. The uncertainty of the #' model is estimated based on all input equivalent doses smaller that of the #' modelled central value. #' #' @param data [data.frame] or [RLum.Results-class] object (**required**): #' for [data.frame]: two columns: De (`values[,1]`) and De error (`values[,2]`). #' For plotting multiple data sets, these must be provided as `list` #' (e.g. `list(dataset1, dataset2)`). #' #' @param breaks [numeric]: #' Either number or locations of breaks. See `[hist]` for details. #' If missing, the number of breaks will be estimated based on the bin width #' (as function of median error). #' #' @param plot [logical] (*with default*): #' enable plot output. #' #' @param ... Further plot arguments passed to the function. #' #' @section Function version: 0.2.0 #' #' @author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany),\cr #' Michael Dietze, GFZ Potsdam (Germany) #' #' @seealso [calc_FuchsLang2001], [calc_CentralDose] #' #' @references #' Woda, C., Fuchs, M., 2008. On the applicability of the leading edge method to #' obtain equivalent doses in OSL dating and dosimetry. Radiation Measurements 43, 26-37. #' #' @examples #' #' ## read example data set #' data(ExampleData.DeValues, envir = environment()) #' #' results <- calc_WodaFuchs2008( #' data = ExampleData.DeValues$CA1, #' xlab = expression(paste(D[e], " [Gy]")) #' ) #' #' @md #' @export calc_WodaFuchs2008 <- function( data, breaks = NULL, plot = TRUE, ... ) { ##TODO # - complete manual # - add statistics to the plot # - check whether this makes sense at all ... ## check data and parameter consistency ------------------------------------- if(is(data, "RLum.Results") == FALSE & is(data, "data.frame") == FALSE & is.numeric(data) == FALSE) { warning(paste("[calc_WodaFuchs()] Input data format is neither", "'data.frame', 'RLum.Results' nor 'numeric'.", "No output generated!")) return(NULL) } else { if(is(data, "RLum.Results") == TRUE) { data <- get_RLum(data, "data") } if(length(data) < 2) { data <- cbind(data, rep(x = NA, times = length(data))) } } ## read additional arguments if("trace" %in% names(list(...))) { trace <- list(...)$trace } else { trace <- FALSE } ## calculations ------------------------------------------------------------- ## estimate bin width based on Woda and Fuchs (2008) if(sum(is.na(data[,2]) == nrow(data))) { message("[calc_WodFuchs2008()] No errors provided. Bin width set by 10 percent of input data!") bin_width <- median(data[,1] / 10, na.rm = TRUE) } else { bin_width <- median(data[,2], na.rm = TRUE) } ## optionally estimate class breaks based on bin width if(is.null(breaks)) { n_breaks <- (max(data[,1], na.rm = TRUE) - min(data[,1], na.rm = TRUE) / bin_width) } else { n_breaks <- breaks } ## calculate histogram H <- hist(x = data[,1], breaks = n_breaks, plot = FALSE) ## check/do log-normal model fit if needed if(n_breaks <= 3) { warning("[calc_WodaFuchs()] Less than four bins, now set to four!") ## calculate histogram H <- hist(x = data[,1], breaks = 4, plot = FALSE) } ## extract values from histogram object H_c <- H$counts H_m <- H$mids ## log counts counts_log <- log(H_c) ## estimate curvature curvature <- (counts_log[1] - counts_log[2]) / (counts_log[1] - counts_log[3]) ## do some other black magic class_center <- H$mids[H_c == max(H_c)] ## optionally print warning if(length(class_center) != 1) { warning("[calc_WodaFuchs()] More than one maximum. Fit may be invalid!", call. = FALSE) class_center <- class_center[1] } ## fit normal distribution to data fit <- nls(H_c ~ (A / sqrt(2 * pi * sigma^2)) * exp(-(H_m - class_center)^2 / (2 * sigma^2)), start = c(A = mean(H_m), sigma = bin_width), control = c(maxiter = 5000), algorithm = "port", trace = trace) ## extract fitted parameters A <- coef(fit)["A"] sigma <- coef(fit)["sigma"] ## estimate dose D_estimate <- as.numeric(x = class_center - sigma) ## count number of values smaller than center class count_ID <- length(which(H_m <= class_center)) ## extract H_m values smaller than center class H_m_smaller <- H_m[1:count_ID] ## calculate uncertainty according to Woda and Fuchs (2008) s <- round(sqrt(sum((H_m_smaller - D_estimate)^2) / (count_ID - 1)), digits = 2) ## plot output -------------------------------------------------------------- if(plot) { ##define plot settings plot_settings <- list( xlab = expression(paste(D[e], " [s]")), ylab = "Frequency", xlim = range(data[,1], na.rm = TRUE) + c(-10, 20), ylim = NULL, main = expression(paste(D[e]," applying Woda and Fuchs (2008)")), sub = NULL ) plot_settings <- modifyList(x = plot_settings, val = list(...), keep.null = TRUE) plot( x = H, xlab = plot_settings$xlab, ylab = plot_settings$ylab, xlim = plot_settings$xlim, ylim = plot_settings$ylim, main = plot_settings$main, sub = plot_settings$sub ) ## add curve with normal distribution x <- 0 rm(x) curve((A / sqrt(2 * pi * sigma^2)) * exp(-(x- class_center)^2 / (2 * sigma^2)), add = TRUE, to = class_center, col = "red" ) } ## return output ------------------------------------------------------------ return(set_RLum( class = "RLum.Results", data = list( D_estimate = data.frame( DP = D_estimate, DP.ERROR = s, CLASS_CENTER = class_center, BIN_WIDTH = bin_width, SIGMA = sigma, A = A, row.names = NULL ), breaks = H$breaks ), info = list(call = sys.call()) )) } Luminescence/R/RLum.Data.Curve-class.R0000644000176200001440000003472414264017373017146 0ustar liggesusers#' @include get_RLum.R set_RLum.R names_RLum.R length_RLum.R bin_RLum.Data.R smooth_RLum.R NULL #' Class `"RLum.Data.Curve"` #' #' Class for representing luminescence curve data. #' #' @name RLum.Data.Curve-class #' #' @docType class #' #' @slot recordType #' Object of class "character" containing the type of the curve (e.g. "TL" or "OSL") #' #' @slot curveType #' Object of class "character" containing curve type, allowed values are measured or predefined #' #' @slot data #' Object of class [matrix] containing curve x and y data. #' 'data' can also be of type `RLum.Data.Curve` to change object values without #' deconstructing the object. For example: #' ``` #' set_RLum(class = 'RLum.Data.Curve', #' data = Your.RLum.Data.Curve, #' recordType = 'never seen before') #' ``` #' would just change the `recordType`. Missing arguments the value is taken #' from the input object in 'data' (which is already an RLum.Data.Curve object #' in this example) #' #' #' @note #' The class should only contain data for a single curve. For additional #' elements the slot `info` can be used (e.g. providing additional heating #' ramp curve). Objects from the class `RLum.Data.Curve` are produced by other #' functions (partly within [RLum.Analysis-class] objects), #' namely: [Risoe.BINfileData2RLum.Analysis], [read_XSYG2R] #' #' @section Create objects from this Class: #' Objects can be created by calls of the form #' `set_RLum(class = "RLum.Data.Curve", ...)`. #' #' @section Class version: 0.5.1 #' #' @author Sebastian Kreutzer, IRAMAT-CRP2A, UMR 5060, CNRS - Université Bordeaux Montaigne (France) #' #' @seealso [RLum-class], [RLum.Data-class], [plot_RLum], [merge_RLum] #' #' @keywords classes #' #' @examples #' #' showClass("RLum.Data.Curve") #' #' ##set empty curve object #' set_RLum(class = "RLum.Data.Curve") #' #' @md #' @export setClass("RLum.Data.Curve", slots = list( recordType = "character", curveType = "character", data = "matrix" ), contains = "RLum.Data", prototype = list ( recordType = NA_character_, curveType = NA_character_, data = matrix(data = 0, ncol = 2) ) ) # as() ---------------------------------------------------------------------------------------- ##LIST ##COERCE RLum.Data.Curve >> list AND list >> RLum.Data.Curve #' as() - RLum-object coercion #' #' for `[RLum.Data.Curve-class]` #' #' **[RLum.Data.Curve-class]** #' #' \tabular{ll}{ #' **from** \tab **to**\cr #' `list` \tab `list` \cr #' `data.frame` \tab `data.frame`\cr #' `matrix` \tab `matrix` #' } #' #' @param from [RLum-class], [list], [data.frame], [matrix] (**required**): #' object to be coerced from #' #' @param to [character] (**required**): #' class name to be coerced to #' #' @seealso [methods::as] #' #' @note #' Due to the complex structure of the `RLum` objects itself a coercing to standard #' R data structures will be always loosely! #' #' @md #' @name as setAs("list", "RLum.Data.Curve", function(from,to){ new(to, recordType = "unkown curve type", curveType = NA_character_, data = matrix(unlist(from), ncol = 2), info = list()) }) setAs("RLum.Data.Curve", "list", function(from){ list(x = from@data[,1], y = from@data[,2]) }) ##DATA.FRAME ##COERCE RLum.Data.Curve >> data.frame AND data.frame >> RLum.Data.Curve setAs("data.frame", "RLum.Data.Curve", function(from,to){ new(to, recordType = "unkown curve type", curveType = NA_character_, data = as.matrix(from), info = list()) }) setAs("RLum.Data.Curve", "data.frame", function(from){ data.frame(x = from@data[,1], y = from@data[,2]) }) ##MATRIX ##COERCE RLum.Data.Curve >> matrix AND matrix >> RLum.Data.Curve setAs("matrix", "RLum.Data.Curve", function(from,to){ new(to, recordType = "unkown curve type", curveType = NA_character_, data = from, info = list()) }) setAs("RLum.Data.Curve", "matrix", function(from){ from@data }) # show() -------------------------------------------------------------------------------------- #' @describeIn RLum.Data.Curve #' Show structure of `RLum.Data.Curve` object #' #' @keywords internal #' #' @md #' @export setMethod("show", signature(object = "RLum.Data.Curve"), function(object){ ##print information cat("\n [RLum.Data.Curve-class]") cat("\n\t recordType:", object@recordType) cat("\n\t curveType:", object@curveType) cat("\n\t measured values:", length(object@data[,1])) cat("\n\t .. range of x-values:", suppressWarnings(range(object@data[,1]))) cat("\n\t .. range of y-values:", suppressWarnings(min(object@data[,2], na.rm = TRUE)), suppressWarnings(max(object@data[,2], na.rm = TRUE)), if(anyNA(object@data[,2])){"(contains NA values)"}else{""} ) cat("\n\t additional info elements:", length(object@info), "\n") #cat("\n\t\t >> names:", names(object@info)) } ) # set_RLum() ---------------------------------------------------------------------------------- #' @describeIn RLum.Data.Curve #' Construction method for RLum.Data.Curve object. The slot info is optional #' and predefined as empty list by default. #' #' @param class [`set_RLum`]; [character] (**required**): #' name of the `RLum` class to create #' #' @param originator [`set_RLum`]; [character] (*automatic*): #' contains the name of the calling function (the function that produces this object); #' can be set manually. #' #' @param .uid [`set_RLum`]; [character] (*automatic*): #' sets an unique ID for this object using the internal C++ function `create_UID`. #' #' @param .pid [`set_RLum`]; [character] (*with default*): #' option to provide a parent id for nesting at will. #' #' @param recordType [`set_RLum`]; [character] (*optional*): #' record type (e.g., "OSL") #' #' @param curveType [`set_RLum`]; [character] (*optional*): #' curve type (e.g., "predefined" or "measured") #' #' @param data [`set_RLum`]; [matrix] (**required**): #' raw curve data. If `data` itself is a `RLum.Data.Curve`-object this can be #' used to re-construct the object (s. details), i.e. modified parameters except #' `.uid`, `.pid` and `originator`. The rest will be subject to copy and paste unless provided. #' #' @param info [`set_RLum`]; [list] (*optional*): #' info elements #' #' @return #' #' **`set_RLum`** #' #' Returns an [RLum.Data.Curve-class] object. #' #' @md #' @export setMethod( "set_RLum", signature = signature("RLum.Data.Curve"), definition = function( class, originator, .uid, .pid, recordType = NA_character_, curveType = NA_character_, data = matrix(0, ncol = 2), info = list()) { ##The case where an RLum.Data.Curve object can be provided ##with this RLum.Data.Curve objects can be provided to be reconstructed if (is(data, "RLum.Data.Curve")) { ##check for missing curveType if (missing(curveType)) curveType <- data@curveType ##check for missing recordType if(missing(recordType)) recordType <- data@recordType ##check for missing data ... not possible as data is the object itself ##check for missing info if(missing(info)) info <- data@info ##check for missing .uid and .pid and originator ##>> no this is always taken from the old object here ##set empty class from object newRLumDataCurve <- new("RLum.Data.Curve") ##fill - this is the faster way, filling in new() costs ... newRLumDataCurve@recordType <- recordType newRLumDataCurve@curveType <- curveType newRLumDataCurve@data <- data@data newRLumDataCurve@info <- info newRLumDataCurve@originator <- data@originator newRLumDataCurve@.uid <- data@.uid newRLumDataCurve@.pid <- data@.pid } else { ##set empty class form object newRLumDataCurve <- new("RLum.Data.Curve") ##fill - this is the faster way, filling in new() costs ... newRLumDataCurve@originator <- originator newRLumDataCurve@recordType <- recordType newRLumDataCurve@curveType <- curveType newRLumDataCurve@data <- data newRLumDataCurve@info <- info newRLumDataCurve@.uid <- .uid newRLumDataCurve@.pid <- .pid } return(newRLumDataCurve) } ) # get_RLum() ---------------------------------------------------------------------------------- #' @describeIn RLum.Data.Curve #' Accessor method for RLum.Data.Curve object. The argument info.object is #' optional to directly access the info elements. If no info element name is #' provided, the raw curve data (matrix) will be returned. #' #' @param object [`get_RLum`], [`length_RLum`], [`names_RLum`] (**required**): #' an object of class [RLum.Data.Curve-class] #' #' @param info.object [`get_RLum`] [character] (*optional*): #' name of the wanted info element #' #' @return #' #' **`get_RLum`** #' #' 1. A [matrix] with the curve values or #' 2. only the info object if `info.object` was set. #' #' @md #' @export setMethod("get_RLum", signature("RLum.Data.Curve"), definition = function(object, info.object = NULL) { ##if info.object == NULL just show the curve values if(!is.null(info.object)) { if(info.object %in% names(object@info)){ unlist(object@info[info.object]) }else{ ##check for entries if(length(object@info) == 0){ warning("[get_RLum()] This RLum.Data.Curve object has no info objects! NULL returned!)") return(NULL) }else{ ##grep names temp.element.names <- paste(names(object@info), collapse = ", ") warning.text <- paste("[get_RLum()] Invalid info.object name. Valid names are:", temp.element.names) warning(warning.text, call. = FALSE) return(NULL) } } }else{ object@data } }) # length_RLum() ------------------------------------------------------------------------------- #' @describeIn RLum.Data.Curve #' Returns the length of the curve object, which is the maximum of the #' value time/temperature of the curve (corresponding to the stimulation length) #' #' @return #' #' **`length_RLum`** #' #' Number of channels in the curve (row number of the matrix) #' #' @md #' @export setMethod("length_RLum", "RLum.Data.Curve", function(object){ max(object@data[,1]) }) # names_RLum() -------------------------------------------------------------------------------- #' @describeIn RLum.Data.Curve #' Returns the names info elements coming along with this curve object #' #' @return #' #' **`names_RLum`** #' #' Names of the info elements (slot `info`) #' #' @export setMethod("names_RLum", "RLum.Data.Curve", function(object){ names(object@info) }) # bin_RLum.Data() ----------------------------------------------------------------------------- #' @describeIn RLum.Data.Curve #' Allows binning of specific objects #' #' @param bin_size [integer] (*with default*): #' set number of channels used for each bin, e.g. `bin_size = 2` means that #' two channels are binned. #' #' @return #' #' **`bin_RLum.Data`** #' #' Same object as input, after applying the binning. #' #' @md #' @export setMethod(f = "bin_RLum.Data", signature = "RLum.Data.Curve", function(object, bin_size = 2) { ##check for invalid bin_size values if (!is.null(bin_size) && bin_size > 0) { ##set stepping vector stepping <- seq(1, nrow(object@data), by = bin_size) ##get bin vector bin_vector <- object@data[, 2] ##set desired length of the vector ##to avoid add effects later length(bin_vector) <- suppressWarnings(prod(dim(matrix( bin_vector, ncol = length(stepping) )))) ##define new matrix for binning bin_matrix <- matrix(bin_vector, ncol = length(stepping)) ##calcuate column sums and replace matrix ##this is much faster than anly apply loop object@data <- matrix(c(object@data[stepping], colSums(bin_matrix, na.rm = TRUE)), ncol = 2) ##set matrix return(set_RLum(class = "RLum.Data.Curve", data = object)) } else{ warning("Argument 'bin_size' invalid, nothing was done!") ##just return the object return(object) } }) # smooth_RLum() ------------------------------------------------------------------------------- #' @describeIn RLum.Data.Curve #' Smoothing of RLum.Data.Curve objects using the function [zoo::rollmean] or [zoo::rollmedian][zoo::rollmean]. #' In particular the internal function `.smoothing` is used. #' #' @param k [`smooth_RLum`]; [integer] (*with default*): #' window for the rolling mean; must be odd for `rollmedian`. #' If nothing is set k is set automatically #' #' @param fill [`smooth_RLum`]; [numeric] (*with default*): #' a vector defining the left and the right hand data #' #' @param align [`smooth_RLum`]; [character] (*with default*): #' specifying whether the index of the result should be left- or right-aligned #' or centred (default) compared to the rolling window of observations, allowed #' `"right"`, `"center"` and `"left"` #' #' @param method [`smooth_RLum`]; [character] (*with default*): #' defines which method should be applied for the smoothing: `"mean"` or `"median"` #' #' @return #' #' **`smooth_RLum`** #' #' Same object as input, after smoothing #' #' @md #' @export setMethod( f = "smooth_RLum", signature = "RLum.Data.Curve", function(object, k = NULL, fill = NA, align = "right", method = "mean") { object@data[,2] <- .smoothing( x = object@data[,2], k = k, fill = fill, align = align, method = method) ##return via set function to get a new id set_RLum(class = "RLum.Data.Curve", originator = "smooth_RLum", data = object) } ) Luminescence/R/merge_Risoe.BINfileData.R0000644000176200001440000001453014464125673017530 0ustar liggesusers#' @title Merge Risoe.BINfileData objects or Risoe BIN-files #' #' @description Function allows merging Risoe BIN/BINX files or `Risoe.BINfileData` objects. #' #' @details #' The function allows merging different measurements to one file or one #' object. The record IDs are recalculated for the new object. Other values #' are kept for each object. The number of input objects is not limited. #' #' `position.number.append.gap` option #' #' If the option `keep.position.number = FALSE` is used, the position #' numbers of the new data set are recalculated by adding the highest position #' number of the previous data set to the each position number of the next data #' set. For example: The highest position number is 48, then this number will #' be added to all other position numbers of the next data set (e.g. 1 + 48 = #' 49) #' #' However, there might be cases where an additional addend (summand) is needed #' before the next position starts. Example: #' #' - Position number set (A): `1,3,5,7` #' - Position number set (B): `1,3,5,7` #' #' With no additional summand the new position numbers would be: #' `1,3,5,7,8,9,10,11`. That might be unwanted. Using the argument #' `position.number.append.gap = 1` it will become: #' `1,3,5,7,9,11,13,15,17`. #' #' @param input.objects [character] with [Risoe.BINfileData-class] objects (**required**): #' Character vector with path and files names #' (e.g. `input.objects = c("path/file1.bin", "path/file2.bin")` or #' [Risoe.BINfileData-class] objects (e.g. `input.objects = c(object1, object2)`). #' Alternatively a `list` is supported. #' #' #' @param output.file [character] (*optional*): #' File output path and name. If no value is given, a [Risoe.BINfileData-class] is #' returned instead of a file. #' #' @param keep.position.number [logical] (*with default*): #' Allows keeping the original position numbers of the input objects. #' Otherwise the position numbers are recalculated. #' #' @param position.number.append.gap [integer] (*with default*): #' Set the position number gap between merged BIN-file sets, if the option #' `keep.position.number = FALSE` is used. See details for further #' information. #' #' @return Returns a `file` or a [Risoe.BINfileData-class] object. #' #' @note #' The validity of the output objects is not further checked. #' #' @section Function version: 0.2.8 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [Risoe.BINfileData-class], [read_BIN2R], [write_R2BIN] #' #' @references #' Duller, G., 2007. Analyst. #' #' #' @keywords IO manip #' #' #' @examples #' #' ##merge two objects #' data(ExampleData.BINfileData, envir = environment()) #' #' object1 <- CWOSL.SAR.Data #' object2 <- CWOSL.SAR.Data #' #' object.new <- merge_Risoe.BINfileData(c(object1, object2)) #' #' @md #' @export merge_Risoe.BINfileData <- function( input.objects, output.file, keep.position.number = FALSE, position.number.append.gap = 0 ){ # Integrity Checks -------------------------------------------------------- if(length(input.objects) < 2){ message("[merge_Risoe.BINfileData()] Nothing done at least two input objects are needed!") return(input.objects) } if(is(input.objects, "character") == TRUE){ for(i in 1:length(input.objects)){ if(file.exists(input.objects[i])==FALSE){ stop("[merge_Risoe.BINfileData()] File ",input.objects[i]," does not exist!", call. = FALSE) } } }else{ if(is(input.objects, "list") == TRUE){ for(i in 1:length(input.objects)){ if(is(input.objects[[i]], "Risoe.BINfileData") == FALSE){ stop("[merge_Risoe.BINfileData()] Input list does not contain Risoe.BINfileData objects!") } } }else{ stop("[merge_Risoe.BINfileData()] Input object is not a 'character' nor a 'list'!") } } # Import Files ------------------------------------------------------------ ##loop over all files to store the results in a list ##or the input is already a list if(is(input.objects, "character") == TRUE){ temp <- lapply(input.objects, read_BIN2R) }else{ temp <- input.objects } # Get POSITION values ------------------------------------------------------- ##grep maximum position value from the first file temp.position.max <- max(temp[[1]]@METADATA[["POSITION"]]) ##grep all position values except from the first file temp.position.values <- unlist(sapply(2:length(temp), function(x){ temp <- temp[[x]]@METADATA[["POSITION"]] + temp.position.max + position.number.append.gap assign(x = "temp.position.max", value = max(temp), envir = parent.env(environment())) return(temp) })) temp.position.values <- c(temp[[1]]@METADATA[["POSITION"]], temp.position.values) # Get overall record length ----------------------------------------------- temp.record.length <- sum(sapply(1:length(temp), function(x){ length(temp[[x]]@METADATA[,"ID"]) })) # Merge Files ------------------------------------------------------------- ##loop for similar input objects for(i in 1:length(input.objects)){ if(exists("temp.new.METADATA") == FALSE){ temp.new.METADATA <- temp[[i]]@METADATA temp.new.DATA <- temp[[i]]@DATA if(inherits(try(temp[[i]]@.RESERVED, silent = TRUE), "try-error")){ temp.new.RESERVED <- list() }else{ temp.new.RESERVED <- temp[[i]]@.RESERVED } }else{ temp.new.METADATA <- rbind(temp.new.METADATA, temp[[i]]@METADATA) temp.new.DATA <- c(temp.new.DATA, temp[[i]]@DATA) if(inherits(try(temp[[i]]@.RESERVED, silent = TRUE), "try-error")){ temp.new.RESERVED <- c(temp.new.RESERVED, list()) }else{ temp.new.RESERVED <- c(temp.new.RESERVED, temp[[i]]@.RESERVED) } } } ##SET RECORD ID in METADATA temp.new.METADATA$ID <- 1:temp.record.length ##SET POSITION VALUES if(keep.position.number == FALSE){ temp.new.METADATA$POSITION <- temp.position.values } ##TODO version number? # Produce BIN file object ------------------------------------------------- temp.new <- set_Risoe.BINfileData( METADATA = temp.new.METADATA, DATA = temp.new.DATA, .RESERVED = temp.new.RESERVED ) # OUTPUT ------------------------------------------------------------------ if(missing(output.file) == FALSE){ write_R2BIN(temp.new, output.file) }else{ return(temp.new) } } Luminescence/R/CW2pPMi.R0000644000176200001440000002134314264017373014403 0ustar liggesusers#' Transform a CW-OSL curve into a pPM-OSL curve via interpolation under #' parabolic modulation conditions #' #' Transforms a conventionally measured continuous-wave (CW) OSL-curve into a #' pseudo parabolic modulated (pPM) curve under parabolic modulation conditions #' using the interpolation procedure described by Bos & Wallinga (2012). #' #' The complete procedure of the transformation is given in Bos & Wallinga #' (2012). The input `data.frame` consists of two columns: time (t) and #' count values (CW(t)) #' #' **Nomenclature** #' #' - P = stimulation time (s) #' - 1/P = stimulation rate (1/s) #' #' **Internal transformation steps** #' #' (1) #' log(CW-OSL) values #' #' (2) #' Calculate t' which is the transformed time: #' \deqn{t' = (1/3)*(1/P^2)t^3} #' #' (3) #' Interpolate CW(t'), i.e. use the log(CW(t)) to obtain the count values for #' the transformed time (t'). Values beyond `min(t)` and `max(t)` #' produce `NA` values. #' #' (4) #' Select all values for t' < `min(t)`, i.e. values beyond the time resolution #' of t. Select the first two values of the transformed data set which contain #' no `NA` values and use these values for a linear fit using [lm]. #' #' (5) #' Extrapolate values for t' < `min(t)` based on the previously obtained #' fit parameters. The extrapolation is limited to two values. Other values at #' the beginning of the transformed curve are set to 0. #' #' (6) #' Transform values using #' \deqn{pLM(t) = t^2/P^2*CW(t')} #' #' (7) #' Combine all values and truncate all values for t' > `max(t)` #' #' **NOTE:** #' The number of values for t' < `min(t)` depends on the stimulation #' period `P`. To avoid the production of too many artificial data at the #' raising tail of the determined pPM curve, it is recommended to use the #' automatic estimation routine for `P`, i.e. provide no value for #' `P`. #' #' @param values [RLum.Data.Curve-class] or [data.frame] (**required**): #' [RLum.Data.Curve-class] or `data.frame` with measured curve data of type #' stimulation time (t) (`values[,1]`) and measured counts (cts) (`values[,2]`) #' #' @param P [vector] (*optional*): #' stimulation period in seconds. If no value is given, the optimal value is #' estimated automatically (see details). Greater values of P produce more #' points in the rising tail of the curve. #' #' @return #' The function returns the same data type as the input data type with #' the transformed curve values. #' #' `RLum.Data.Curve` #' #' \tabular{rl}{ #' `$CW2pPMi.x.t` \tab: transformed time values \cr #' `$CW2pPMi.method` \tab: used method for the production of the new data points #' } #' #' `data.frame` #' #' \tabular{rl}{ #' `$x` \tab: time\cr #' `$y.t` \tab: transformed count values\cr #' `$x.t` \tab: transformed time values \cr #' `$method` \tab: used method for the production of the new data points #' } #' #' @note #' According to Bos & Wallinga (2012), the number of extrapolated points #' should be limited to avoid artificial intensity data. If `P` is #' provided manually, not more than two points are extrapolated. #' #' @section Function version: 0.2.1 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' Based on comments and suggestions from:\cr #' Adrie J.J. Bos, Delft University of Technology, The Netherlands #' #' @seealso [CW2pLM], [CW2pLMi], [CW2pHMi], [fit_LMCurve], [RLum.Data.Curve-class] #' #' @references #' Bos, A.J.J. & Wallinga, J., 2012. How to visualize quartz OSL #' signal components. Radiation Measurements, 47, 752-758. #' #' **Further Reading** #' #' Bulur, E., 1996. An Alternative Technique For #' Optically Stimulated Luminescence (OSL) Experiment. Radiation Measurements, #' 26, 701-709. #' #' Bulur, E., 2000. A simple transformation for converting CW-OSL curves to #' LM-OSL curves. Radiation Measurements, 32, 141-145. #' #' @keywords manip #' #' @examples #' #' #' ##(1) #' ##load CW-OSL curve data #' data(ExampleData.CW_OSL_Curve, envir = environment()) #' #' ##transform values #' values.transformed <- CW2pPMi(ExampleData.CW_OSL_Curve) #' #' ##plot #' plot(values.transformed$x,values.transformed$y.t, log = "x") #' #' ##(2) - produce Fig. 4 from Bos & Wallinga (2012) #' #' ##load data #' data(ExampleData.CW_OSL_Curve, envir = environment()) #' values <- CW_Curve.BosWallinga2012 #' #' ##open plot area #' plot(NA, NA, #' xlim = c(0.001,10), #' ylim = c(0,8000), #' ylab = "pseudo OSL (cts/0.01 s)", #' xlab = "t [s]", #' log = "x", #' main = "Fig. 4 - Bos & Wallinga (2012)") #' #' values.t <- CW2pLMi(values, P = 1/20) #' lines(values[1:length(values.t[,1]),1],CW2pLMi(values, P = 1/20)[,2], #' col = "red",lwd = 1.3) #' text(0.03,4500,"LM", col = "red", cex = .8) #' #' values.t <- CW2pHMi(values, delta = 40) #' lines(values[1:length(values.t[,1]),1], CW2pHMi(values, delta = 40)[,2], #' col = "black", lwd = 1.3) #' text(0.005,3000,"HM", cex = .8) #' #' values.t <- CW2pPMi(values, P = 1/10) #' lines(values[1:length(values.t[,1]),1], CW2pPMi(values, P = 1/10)[,2], #' col = "blue", lwd = 1.3) #' text(0.5,6500,"PM", col = "blue", cex = .8) #' #' @md #' @export CW2pPMi<- function( values, P ){ # (0) Integrity checks ------------------------------------------------------ ##(1) data.frame or RLum.Data.Curve object? if(is(values, "data.frame") == FALSE & is(values, "RLum.Data.Curve") == FALSE){ stop("[CW2pPMi()] 'values' object has to be of type 'data.frame' or 'RLum.Data.Curve'!", call. = FALSE) } ##(2) if the input object is an 'RLum.Data.Curve' object check for allowed curves if(is(values, "RLum.Data.Curve") == TRUE){ if(!grepl("OSL", values@recordType) & !grepl("IRSL", values@recordType)){ stop(paste("[CW2pPMi()] recordType ",values@recordType, " is not allowed for the transformation!", sep=""), call. = FALSE) }else{ temp.values <- as(values, "data.frame") } }else{ temp.values <- values } # (3) Transform values ------------------------------------------------------ ##log transformation of the CW-OSL count values CW_OSL.log<-log(temp.values[,2]) ##time transformation t >> t' t<-temp.values[,1] ##set P ##if no values for P is set selected a P value for a maximum of ##two extrapolation points if(missing(P)==TRUE){ i<-1 P<-1/i t.transformed<-(1/3)*(1/P^2)*t^3 while(length(t.transformed[t.transformed2){ P<-1/i t.transformed<-(1/3)*(1/P^2)*t^3 i<-i+1 } }else{ t.transformed<-(1/3)*(1/P^2)*t^3 } # (4) Interpolation --------------------------------------------------------- ##interpolate values, values beyond the range return NA values CW_OSL.interpolated <- approx(t, CW_OSL.log, xout=t.transformed, rule=1 ) ##combine t.transformed and CW_OSL.interpolated in a data.frame temp<-data.frame(x=t.transformed, y = unlist(CW_OSL.interpolated$y)) # (5) Extrapolate first values of the curve --------------------------------- ##(a) - find index of first rows which contain NA values (needed for extrapolation) temp.sel.id<-min(which(is.na(temp[,2])==FALSE)) ##(b) - fit linear function fit.lm<-lm(y ~ x,data.frame(x=t[1:2],y=CW_OSL.log[1:2])) ##select values to extrapolate and predict (extrapolate) values based on the fitted function x.i<-data.frame(x=temp[1:(min(temp.sel.id)-1),1]) y.i<-predict(fit.lm,x.i) ##replace NA values by extrapolated values temp[1:length(y.i),2]<-y.i ##set method values temp.method<-c(rep("extrapolation",length(y.i)),rep("interpolation",(length(temp[,2])-length(y.i)))) ##print a warning message for more than two extrapolation points if(temp.sel.id>2){warning("t' is beyond the time resolution. Only two data points have been extrapolated, the first ",temp.sel.id-3, " points have been set to 0!")} # (6) Convert, transform and combine values --------------------------------- ##unlog CW-OSL count values, i.e. log(CW) >> CW CW_OSL<-exp(temp$y) ##transform CW-OSL values to pPM-OSL values pPM<-(t^2/P^2)*CW_OSL ##combine all values and exclude NA values temp.values <- data.frame(x=t, y.t=pPM, x.t=t.transformed, method=temp.method) temp.values <- na.exclude(temp.values) # (7) Return values --------------------------------------------------------- ##returns the same data type as the input if(is(values, "data.frame") == TRUE){ values <- temp.values return(values) }else{ ##add old info elements to new info elements temp.info <- c(values@info, CW2pPMi.x.t = list(temp.values$x.t), CW2pPMi.method = list(temp.values$method)) newRLumDataCurves.CW2pPMi <- set_RLum( class = "RLum.Data.Curve", recordType = values@recordType, data = as.matrix(temp.values[,1:2]), info = temp.info) return(newRLumDataCurves.CW2pPMi) } } Luminescence/R/RLum.Results-class.R0000644000176200001440000002512114264017373016642 0ustar liggesusers#' @include get_RLum.R set_RLum.R length_RLum.R names_RLum.R NULL #' Class `"RLum.Results"` #' #' Object class contains results data from functions (e.g., [analyse_SAR.CWOSL]). #' #' @name RLum.Results-class #' #' @docType class #' #' @slot data #' Object of class [list] containing output data #' #' @note #' The class is intended to store results from functions to be used by #' other functions. The data in the object should always be accessed by the #' method `get_RLum`. #' #' @section Objects from the Class: #' Objects can be created by calls of the form `new("RLum.Results", ...)`. #' #' @section Class version: 0.5.2 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [RLum-class], [plot_RLum], [merge_RLum] #' #' @keywords classes methods #' #' @examples #' #' showClass("RLum.Results") #' #' ##create an empty object from this class #' set_RLum(class = "RLum.Results") #' #' ##use another function to show how it works #' #' ##Basic calculation of the dose rate for a specific date #' dose.rate <- calc_SourceDoseRate( #' measurement.date = "2012-01-27", #' calib.date = "2014-12-19", #' calib.dose.rate = 0.0438, #' calib.error = 0.0019) #' #' ##show object #' dose.rate #' #' ##get results #' get_RLum(dose.rate) #' #' ##get parameters used for the calcualtion from the same object #' get_RLum(dose.rate, data.object = "parameters") #' #' ##alternatively objects can be accessed using S3 generics, such as #' dose.rate$parameters #' #' @md #' @export setClass( Class = "RLum.Results", slots = list(data = "list"), contains = "RLum", prototype = list (data = list()) ) # as() ---------------------------------------------------------------------------------------- ##LIST ##COERCE RLum.Results >> list AND list >> RLum.Results #' as() - RLum-object coercion #' #' for `[RLum.Results-class]` #' #' **[RLum.Results-class]** #' #' \tabular{ll}{ #' **from** \tab **to**\cr #' `list` \tab `list`\cr #' } #' #' Given that the [list] consists of [RLum.Results-class] objects. #' #' @md #' @name as setAs("list", "RLum.Results", function(from,to){ new(to, originator = "coercion", data = from) }) setAs("RLum.Results", "list", function(from){ from@data }) # show() -------------------------------------------------------------------------------------- #' @describeIn RLum.Results #' Show structure of `RLum.Results` object #' #' @keywords internal #' #' @md #' @export setMethod("show", signature(object = "RLum.Results"), function(object) { ##data elements temp.names <- names(object@data) if (length(object) > 0) { temp.type <- sapply(1:length(object@data), function(x) { paste("\t .. $", temp.names[x], " : ", is(object@data[[x]])[1], sep = "") }) } else{ temp.type <- paste0("\t .. $", temp.names, " : ", is(object@data)[1]) } temp.type <- paste(temp.type, collapse = "\n") ##print information cat("\n [RLum.Results-class]") cat("\n\t originator: ", object@originator, "()", sep = "") cat("\n\t data:", length(object@data)) cat("\n", temp.type) cat("\n\t additional info elements: ", length(object@info),"\n") }) # set_RLum() ---------------------------------------------------------------------------------- #' @describeIn RLum.Results #' Construction method for an RLum.Results object. #' #' @param class [`set_RLum`]; [character] **(required)**: #' name of the `RLum` class to create #' #' @param originator [`set_RLum`]; [character] (*automatic*): #' contains the name of the calling function (the function that produces this object); #' can be set manually. #' #' @param .uid [`set_RLum`]; [character] (*automatic*): #' sets an unique ID for this object using the internal C++ function `create_UID`. #' #' @param .pid [`set_RLum`]; [character] (*with default*): #' option to provide a parent id for nesting at will. #' #' @param data [`set_RLum`]; [list] (*optional*): #' a list containing the data to #' be stored in the object #' #' @param info [`set_RLum`]; [list] (*optional*): #' a list containing additional info data for the object #' #' @return #' #' **`set_RLum`**: #' #' Returns an object from the class [RLum.Results-class] #' #' @md #' @export setMethod("set_RLum", signature = signature("RLum.Results"), function(class, originator, .uid, .pid, data = list(), info = list()) { ##create new class newRLumReuslts <- new("RLum.Results") ##fill object newRLumReuslts@originator <- originator newRLumReuslts@data <- data newRLumReuslts@info <- info newRLumReuslts@.uid <- .uid newRLumReuslts@.pid <- .pid return(newRLumReuslts) }) # get_RLum() ---------------------------------------------------------------------------------- #' @describeIn RLum.Results #' Accessor method for RLum.Results object. The argument data.object allows #' directly accessing objects delivered within the slot data. The default #' return object depends on the object originator (e.g., `fit_LMCurve`). #' If nothing is specified always the first `data.object` will be returned. #' #' Note: Detailed specification should be made in combination with the originator slot in the #' receiving function if results are pipped. #' #' @param object [`get_RLum`]; [RLum.Results-class] (**required**): #' an object of class [RLum.Results-class] to be evaluated #' #' @param data.object [`get_RLum`]; [character] or [numeric]: #' name or index of the data slot to be returned #' #' @param info.object [`get_RLum`]; [character] (*optional*): #' name of the wanted info element #' #' @param drop [`get_RLum`]; [logical] (*with default*): #' coerce to the next possible layer (which are data objects, `drop = FALSE` #' keeps the original `RLum.Results` #' #' @return #' #' **`get_RLum`**: #' #' Returns: #' #' 1. Data object from the specified slot #' 2. [list] of data objects from the slots if 'data.object' is vector or #' 3. an [RLum.Results-class] for `drop = FALSE`. #' #' #' @md #' @export setMethod( "get_RLum", signature = signature("RLum.Results"), definition = function(object, data.object, info.object = NULL, drop = TRUE) { ##if info.object is set, only the info objects are returned if (!is.null(info.object)) { if (info.object %in% names(object@info)) { unlist(object@info[info.object]) } else { ##check for entries if (length(object@info) == 0) { warning("[get_RLum] This RLum.Results object has no info objects! NULL returned!)", call. = FALSE) } else { warning(paste0( "[get_RLum] Invalid info.object name. Valid names are: ", paste(names(object@info), collapse = ", ") ), call. = FALSE) } return(NULL) } } else{ if (!missing(data.object)) { ##++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ##CASE1: data.object is of type 'character' if (is(data.object, "character")) { #check if the provided names are available if (all(data.object %in% names(object@data))) { ##account for multiple inputs if (length(data.object) > 1) { temp.return <- sapply(data.object, function(x) { object@data[[x]] }) } else{ temp.return <- list(data.object = object@data[[data.object]]) } } else { stop(paste0("[get_RLum()] data.object(s) unknown, valid names are: ", paste(names(object@data), collapse = ", ")), call. = FALSE) } } ##++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ##CASE2: data.object is of type 'numeric' else if (is(data.object, "numeric")) { ##check if index is valid if (max(data.object) > length(object@data)) { stop("[get_RLum] 'data.object' index out of bounds!") } else if (length(data.object) > 1) { temp.return <- lapply(data.object, function(x) { object@data[[x]] }) } else { temp.return <- list(object@data[[data.object]]) } ##restore names as that get los with this method names(temp.return) <- names(object@data)[data.object] } ##++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ##CASE3: data.object is of an unsupported type else{ stop("[get_RLum] 'data.object' has to be of type character or numeric!", call. = FALSE) } ##the CASE data.object is missing } else{ ##return always the first object if nothing is specified temp.return <- object@data[1] } ##CHECK whether an RLum.Results object needs to be produced ... ##This will just be the case if the funtion havn't returned something before if (drop) { ##we need to access the list here, otherwise we get unexpected behaviour as drop = TRUE ##should always return the lowest possible element here return(temp.return[[1]]) } else{ return(set_RLum( "RLum.Results", originator = object@originator, data = temp.return )) } } } ) # length_RLum() ------------------------------------------------------------------------------- #' @describeIn RLum.Results #' Returns the length of the object, i.e., number of stored data.objects #' #' @return #' #' **`length_RLum`** #' #' Returns the number of data elements in the `RLum.Results` object. #' #' @md #' @export setMethod("length_RLum", "RLum.Results", function(object){ length(object@data) }) # names_RLum() -------------------------------------------------------------------------------- #' @describeIn RLum.Results #' Returns the names data.objects #' #' @return #' #' **`names_RLum`** #' #' Returns the names of the data elements in the object. #' #' @md #' @export setMethod("names_RLum", "RLum.Results", function(object){ names(object@data) }) Luminescence/R/merge_RLum.Results.R0000644000176200001440000001247614264017373016727 0ustar liggesusers#' @title Merge function for RLum.Results S4-class objects #' #' @description Function merges objects of class [RLum.Results-class]. The slots in the objects #' are combined depending on the object type, e.g., for [data.frame] and [matrix] #' rows are appended. #' #' @details Elements are appended where possible and attributes are preserved if #' not of similar name as the default attributes of, e.g., a [data.frame] #' #' @note The `originator` is taken from the first element and not reset to `merge_RLum` #' #' @param objects [list] (**required**): #' a list of [RLum.Results-class] objects #' #' @section Function version: 0.2.1 #' #' @keywords internal #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @md #' @export merge_RLum.Results <- function( objects){ ##------------------------------------------------------------- ##Some integrity checks ##check if input object is a list if(!is(objects, "list")){ stop("[merge_RLum.Results()] 'objects' has to of type 'list'!", call. = FALSE) }else{ ##check if objects in the list are of type RLum.Results temp.originator <- sapply(1:length(objects), function(x){ if(is(objects[[x]], "RLum.Results") == FALSE){ stop("[merge_RLum.Results()] Objects to merge have to be of type 'RLum.Results'!", call. = FALSE) } objects[[x]]@originator }) } ##check if originator is different if(length(unique(temp.originator))>1){ stop("[merge_RLum.Results()] 'RLum.Results' object originator differs!", call. = FALSE) } ##------------------------------------------------------------- ##merge objects depending on the data structure for(i in 1:length(objects[[1]]@data)){ ## shelf list of attributes attr_list <- unlist( lapply(1:length(objects), function(x) attributes(objects[[x]]@data[[i]])), recursive = FALSE) ##++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ##numeric vector or data.frame or matrix if(is(objects[[1]]@data[[i]], "data.frame")|| is(objects[[1]]@data[[i]], "numeric") || is(objects[[1]]@data[[i]], "matrix")){ ##grep elements and combine them into a list temp.list <- lapply(1:length(objects), function(x) objects[[x]]@data[[i]]) ##check whether the objects can be combined by rbind if(length(unique(unlist(lapply(temp.list, FUN = ncol)))) > 1) stop("[merge_RLum.Results()] Objects cannot be combined, number of columns differs.", call. = FALSE) ##combine them using rbind or data.table::rbindList (depends on the data type) if(is(objects[[1]]@data[[i]], "numeric")){ objects[[1]]@data[[i]] <- unlist(temp.list) }else if(is(objects[[1]]@data[[i]], "matrix")){ objects[[1]]@data[[i]] <- do.call("rbind", temp.list) }else{ objects[[1]]@data[[i]] <- as.data.frame(data.table::rbindlist(temp.list)) } ## continue attribute preservation ## remove attributes that stem from the object itself attr_list[names(attr_list) %in% names(attributes(objects[[1]]@data[[i]]))] <- NULL ## just to avoid working the code if not needed if(length(attr_list) > 0) { ## merge attributes with similar name attrs <- lapply(unique(names(attr_list)), function(x){ tmp <- unlist(attr_list[names(attr_list)%in%x], recursive = FALSE) names(tmp) <- NULL tmp }) names(attrs) <- unique(names(attr_list)) # set attributes ... we try because some attributes for(n in names(attrs)) attr(objects[[1]]@data[[i]], n) <- attrs[[n]] } }else{ ##all other elements ##grep elements and write them into a list objects[[1]]@data[[i]] <- lapply(1:length(objects), function(x) objects[[x]]@data[[i]]) ##unlist to flatten list if necessary for the elements if(is(objects[[1]]@data[[i]][[1]])[1] == "list"){ objects[[1]]@data[[i]] <- unlist(objects[[1]]@data[[i]], recursive = FALSE) } } }##end loop #return by setting a new RLum.Results (for the .uid) #the originator is not reset objects_merged <- set_RLum( class = "RLum.Results", originator = objects[[1]]@originator, data = objects[[1]]@data, info = unlist(lapply(objects, function(x) { x@info }), recursive = FALSE), .pid = unlist(lapply(objects, function(x) { x@.uid }))) return(objects_merged) } Luminescence/R/plot_RLum.Data.Spectrum.R0000644000176200001440000011003114264017373017601 0ustar liggesusers#' @title Plot function for an RLum.Data.Spectrum S4 class object #' #' @description The function provides a standardised plot output for spectrum data of an #' [RLum.Data.Spectrum-class] class object. The purpose of this function is to provide #' easy and straight-forward spectra plotting, not provide a full customised access to #' all plot parameters. If this is wanted, standard R plot functionality should be used #' instead. #' #' **Matrix structure** \cr (cf. [RLum.Data.Spectrum-class]) #' #' - `rows` (x-values): wavelengths/channels (`xlim`, `xlab`) #' - `columns` (y-values): time/temperature (`ylim`, `ylab`) #' - `cells` (z-values): count values (`zlim`, `zlab`) #' #' *Note: This nomenclature is valid for all plot types of this function!* #' #' **Nomenclature for value limiting** #' #' - `xlim`: Limits values along the wavelength axis #' - `ylim`: Limits values along the time/temperature axis #' - `zlim`: Limits values along the count value axis #' #' **Details on the plot functions** #' #' Spectrum is visualised as 3D or 2D plot. Both plot types are based on #' internal R plot functions. #' #'**`plot.type = "persp"`** #' #' Arguments that will be passed to [graphics::persp]: #' #' - `shade`: default is `0.4` #' - `phi`: default is `15` #' - `theta`: default is `-30` #' - `expand`: default is `1` #' - `axes`: default is `TRUE` #' - `box`: default is `TRUE`; accepts `"alternate"` for a custom plot design #' - `ticktype`: default is `detailed`, `r`: default is `10` #' #' **Note:** Further parameters can be adjusted via `par`. For example #' to set the background transparent and reduce the thickness of the lines use: #' `par(bg = NA, lwd = 0.7)` previous the function call. #' #'**`plot.type = "single"`** #' #' Per frame a single curve is returned. Frames are time or temperature #' steps. #' #' -`frames`: pick the frames to be plotted (depends on the binning!). Check without #' this setting before plotting. #' #'**`plot.type = "multiple.lines"`** #' #' All frames plotted in one frame. #' #'-`frames`: pick the frames to be plotted (depends on the binning!). Check without #' this setting before plotting. #' #' '**`plot.type = "image"` or `plot.type = "contour" ** #' #' These plot types use the R functions [graphics::image] or [graphics::contour]. #' The advantage is that many plots can be arranged conveniently using standard #' R plot functionality. If `plot.type = "image"` a contour is added by default, #' which can be disabled using the argument `contour = FALSE` to add own contour #' lines of choice. #' #'**`plot.type = "transect"`** #' #' Depending on the selected wavelength/channel range a transect over the #' time/temperature (y-axis) will be plotted along the wavelength/channels #' (x-axis). If the range contains more than one channel, values (z-values) are #' summed up. To select a transect use the `xlim` argument, e.g. #' `xlim = c(300,310)` plot along the summed up count values of channel #' 300 to 310. #' #' **Further arguments that will be passed (depending on the plot type)** #' #' `xlab`, `ylab`, `zlab`, `xlim`, `ylim`, `box`, #' `zlim`, `main`, `mtext`, `pch`, `type` (`"single"`, `"multiple.lines"`, `"interactive"`), #' `col`, `border`, `lwd`, `bty`, `showscale` (`"interactive"`, `"image"`) #' `contour`, `contour.col` (`"image"`) #' #' @param object [RLum.Data.Spectrum-class] or [matrix] (**required**): #' S4 object of class `RLum.Data.Spectrum` or a `matrix` containing count #' values of the spectrum.\cr #' Please note that in case of a matrix row names and col names are set #' automatically if not provided. #' #' @param par.local [logical] (*with default*): #' use local graphical parameters for plotting, e.g. the plot is shown in one column and one row. #' If `par.local = FALSE` global parameters are inherited. #' #' @param plot.type [character] (*with default*): plot type, for #' 3D-plot use `persp`, or `interactive`, for a 2D-plot `image`, `contour`, #' `single` or `multiple.lines` (along the time or temperature axis) #' or `transect` (along the wavelength axis) \cr #' #' @param optical.wavelength.colours [logical] (*with default*): #' use optical wavelength colour palette. Note: For this, the spectrum range is #' limited: `c(350,750)`. Own colours can be set with the argument `col`. If you provide already #' binned spectra, the colour assignment is likely to be wrong, since the colour gradients are calculated #' using the bin number. #' #' @param bg.spectrum [RLum.Data.Spectrum-class] or [matrix] (*optional*): Spectrum #' used for the background subtraction. By definition, the background spectrum should have been #' measured with the same setting as the signal spectrum. If a spectrum is provided, the #' argument `bg.channels` works only on the provided background spectrum. #' #' @param bg.channels [vector] (*optional*): #' defines channel for background subtraction If a vector is provided the mean #' of the channels is used for subtraction. If a spectrum is provided via `bg.spectrum`, this #' argument only works on the `bg.spectrum`. #' #' **Note:** Background subtraction is applied prior to channel binning #' #' @param bin.rows [integer] (*with default*): #' allow summing-up wavelength channels (horizontal binning), #' e.g. `bin.rows = 2` two channels are summed up. #' Binning is applied after the background subtraction. #' #' @param bin.cols [integer] (*with default*): #' allow summing-up channel counts (vertical binning) for plotting, #' e.g. `bin.cols = 2` two channels are summed up. #' Binning is applied after the background subtraction. #' #' @param norm [character] (*optional*): Normalise data to the maximum (`norm = "max"`) or #' minimum (`norm = "min"`) count values. The normalisation is applied after the binning. #' #' @param rug [logical] (*with default*): #' enables or disables colour rug. Currently only implemented for plot #' type `multiple.lines` and `single` #' #' @param limit_counts [numeric] (*optional*): #' value to limit all count values to this value, i.e. all count values above #' this threshold will be replaced by this threshold. This is helpful #' especially in case of TL-spectra. #' #' @param xaxis.energy [logical] (*with default*): enables or disables energy instead of #' wavelength axis. For the conversion the function [convert_Wavelength2Energy] is used. #' #' **Note:** This option means not only simply redrawing the axis, #' instead the spectrum in terms of intensity is recalculated, s. details. #' #' @param legend.text [character] (*with default*): #' possibility to provide own legend text. This argument is only considered for #' plot types providing a legend, e.g. `plot.type="transect"` #' #' @param plot [logical] (*with default*): enables/disables plot output. If the plot #' output is disabled, the [matrix] used for the plotting and the calculated colour values #' (as attributes) are returned. This way, the (binned, transformed etc.) output can #' be used in other functions and packages, such as plotting with the package `'plot3D'` #' #' @param ... further arguments and graphical parameters that will be passed #' to the `plot` function. #' #' @return Returns a plot and the transformed `matrix` used for plotting with some useful #' attributes such as the `colour` and `pmat` (the transpose matrix from [graphics::persp]) #' #' @note Not all additional arguments (`...`) will be passed similarly! #' #' @section Function version: 0.6.8 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [RLum.Data.Spectrum-class], [convert_Wavelength2Energy], [plot], [plot_RLum], [graphics::persp], [plotly::plot_ly], [graphics::contour], [graphics::image] #' #' @keywords aplot #' #' @examples #' #' ##load example data #' data(ExampleData.XSYG, envir = environment()) #' #' ##(1)plot simple spectrum (2D) - image #' plot_RLum.Data.Spectrum( #' TL.Spectrum, #' plot.type="image", #' xlim = c(310,750), #' ylim = c(0,300), #' bin.rows=10, #' bin.cols = 1) #' #' ##(2) plot spectrum (3D) #' plot_RLum.Data.Spectrum( #' TL.Spectrum, #' plot.type="persp", #' xlim = c(310,750), #' ylim = c(0,100), #' bin.rows=10, #' bin.cols = 1) #' #'##(3) plot spectrum on energy axis #'##please note the background subtraction #'plot_RLum.Data.Spectrum(TL.Spectrum, #' plot.type="persp", #' ylim = c(0,200), #' bin.rows=10, #' bg.channels = 10, #' bin.cols = 1, #' xaxis.energy = TRUE) #' #' ##(4) plot multiple lines (2D) - multiple.lines (with ylim) #' plot_RLum.Data.Spectrum( #' TL.Spectrum, #' plot.type="multiple.lines", #' xlim = c(310,750), #' ylim = c(0,100), #' bin.rows=10, #' bin.cols = 1) #' #' \dontrun{ #' ##(4) interactive plot using the package plotly ("surface") #' plot_RLum.Data.Spectrum(TL.Spectrum, plot.type="interactive", #' xlim = c(310,750), ylim = c(0,300), bin.rows=10, #' bin.cols = 1) #' #' ##(5) interactive plot using the package plotly ("contour") #' plot_RLum.Data.Spectrum(TL.Spectrum, plot.type="interactive", #' xlim = c(310,750), ylim = c(0,300), bin.rows=10, #' bin.cols = 1, #' type = "contour", #' showscale = TRUE) #' #' ##(6) interactive plot using the package plotly ("heatmap") #' plot_RLum.Data.Spectrum(TL.Spectrum, plot.type="interactive", #' xlim = c(310,750), ylim = c(0,300), bin.rows=10, #' bin.cols = 1, #' type = "heatmap", #' showscale = TRUE) #' #' } #' #' @md #' @export plot_RLum.Data.Spectrum <- function( object, par.local = TRUE, plot.type = "contour", optical.wavelength.colours = TRUE, bg.spectrum = NULL, bg.channels = NULL, bin.rows = 1, bin.cols = 1, norm = NULL, rug = TRUE, limit_counts = NULL, xaxis.energy = FALSE, legend.text, plot = TRUE, ... ){ # Integrity check ----------------------------------------------------------- ##check if object is of class RLum.Data.Spectrum if(!inherits(object, "RLum.Data.Spectrum")){ if(inherits(object, "matrix")){ if(is.null(colnames(object))){ colnames(object) <- 1:ncol(object) } if(is.null(rownames(object))){ rownames(object) <- 1:nrow(object) } object <- set_RLum(class = "RLum.Data.Spectrum", data = object) message("[plot_RLum.Data.Spectrum()] Input has been converted to a RLum.Data.Spectrum object using set_RLum()") }else{ stop("[plot_RLum.Data.Spectrum()] Input object neither of class 'RLum.Data.Spectrum' nor 'matrix'.", call. = FALSE) } } ##XSYG ##check for curveDescripter if("curveDescripter" %in% names(object@info) == TRUE){ temp.lab <- strsplit(object@info$curveDescripter, split = ";")[[1]] xlab <- if(xaxis.energy == FALSE){ temp.lab[2]}else{"Energy [eV]"} ylab <- temp.lab[1] zlab <- temp.lab[3] }else{ xlab <- if(xaxis.energy == FALSE){ "Row values [a.u.]"}else{"Energy [eV]"} ylab <- "Column values [a.u.]" zlab <- "Cell values [a.u.]" } # Do energy axis conversion ------------------------------------------------------------------- if (xaxis.energy){ ##conversion object <- convert_Wavelength2Energy(object, digits = 5) ##modify row order (otherwise subsequent functions, like persp, have a problem) object@data[] <- object@data[order(as.numeric(rownames(object@data))),] rownames(object@data) <- sort(as.numeric(rownames(object@data))) } ##deal with addition arguments extraArgs <- list(...) main <- if("main" %in% names(extraArgs)) {extraArgs$main} else {"RLum.Data.Spectrum"} zlab <- if("zlab" %in% names(extraArgs)) {extraArgs$zlab} else {ifelse(plot.type == "multiple.lines", ylab, zlab)} xlab <- if("xlab" %in% names(extraArgs)) {extraArgs$xlab} else {xlab} ylab <- if("ylab" %in% names(extraArgs)) {extraArgs$ylab} else {ifelse(plot.type == "single" | plot.type == "multiple.lines", "Luminescence [cts/channel]", ylab)} xlim <- if("xlim" %in% names(extraArgs)) {extraArgs$xlim} else {c(min(as.numeric(rownames(object@data))), max(as.numeric(rownames(object@data))))} ylim <- if("ylim" %in% names(extraArgs)) {extraArgs$ylim} else {c(min(as.numeric(colnames(object@data))), max(as.numeric(colnames(object@data))))} #for zlim see below mtext <- if("mtext" %in% names(extraArgs)) {extraArgs$mtext} else {""} cex <- if("cex" %in% names(extraArgs)) {extraArgs$cex} else {1} phi <- if("phi" %in% names(extraArgs)) {extraArgs$phi} else {15} theta <- if("theta" %in% names(extraArgs)) {extraArgs$theta} else {-30} r <- if("r" %in% names(extraArgs)) {extraArgs$r} else {10} shade <- if("shade" %in% names(extraArgs)) {extraArgs$shade} else {0.4} expand <- if("expand" %in% names(extraArgs)) {extraArgs$expand} else {0.6} border <- if("border" %in% names(extraArgs)) {extraArgs$border} else {NULL} box <- if("box" %in% names(extraArgs)) {extraArgs$box} else {TRUE} axes <- if("axes" %in% names(extraArgs)) {extraArgs$axes} else {TRUE} ticktype <- if("ticktype" %in% names(extraArgs)) {extraArgs$ticktype} else {"detailed"} log<- if("log" %in% names(extraArgs)) {extraArgs$log} else {""} type<- if("type" %in% names(extraArgs)) {extraArgs$type} else { if (plot.type == "interactive") { "surface" } else{ "l" } } pch<- if("pch" %in% names(extraArgs)) {extraArgs$pch} else {1} lwd<- if("lwd" %in% names(extraArgs)) {extraArgs$lwd} else {1} bty <- if("bty" %in% names(extraArgs)) {extraArgs$bty} else {NULL} sub<- if("sub" %in% names(extraArgs)) {extraArgs$sub} else {""} #for plotly::plot_ly showscale<- if("showscale" %in% names(extraArgs)) {extraArgs$showscale} else {FALSE} # prepare values for plot --------------------------------------------------- ##copy data temp.xyz <- object@data ##check for NULL column names if(is.null(colnames(temp.xyz))){ colnames(temp.xyz) <- 1:ncol(temp.xyz) } if(is.null(rownames(temp.xyz))){ rownames(temp.xyz) <- 1:nrow(temp.xyz) } ##check for the case of a single column matrix if(ncol(temp.xyz)>1){ ##reduce for xlim temp.xyz <- temp.xyz[as.numeric(rownames(temp.xyz)) >= xlim[1] & as.numeric(rownames(temp.xyz)) <= xlim[2],] ##reduce for ylim temp.xyz <- temp.xyz[, as.numeric(colnames(temp.xyz)) >= ylim[1] & as.numeric(colnames(temp.xyz)) <= ylim[2]] } ## wavelength x <- as.numeric(rownames(temp.xyz)) ## time/temp y <- as.numeric(colnames(temp.xyz)) # Background spectrum ------------------------------------------------------------------------- if(!is.null(bg.spectrum)){ if(inherits(bg.spectrum, "RLum.Data.Spectrum") || inherits(bg.spectrum, "matrix")){ ##case RLum if(inherits(bg.spectrum, "RLum.Data.Spectrum")) bg.xyz <- bg.spectrum@data ##case matrix if(inherits(bg.spectrum, "matrix")) bg.xyz <- bg.spectrum ##take care of channel settings, otherwise set bg.channels if(is.null(bg.channels)) bg.channels <- c(1:ncol(bg.xyz)) ##set rownames if(is.null(rownames(bg.xyz))) rownames(bg.xyz) <- 1:nrow(bg.xyz) ##convert to energy scale if needed if(xaxis.energy){ #conversion bg.xyz <- convert_Wavelength2Energy(cbind(as.numeric(rownames(bg.xyz)), bg.xyz), digits = 5) rownames(bg.xyz) <- bg.xyz[,1] bg.xyz <- bg.xyz[,-1, drop = FALSE] ##modify row order (otherwise subsequent functions, like persp, have a problem) bg.xyz <- bg.xyz[order(as.numeric(rownames(bg.xyz))),,drop = FALSE] rownames(bg.xyz) <- sort(as.numeric(rownames(bg.xyz))) } ##reduce for xlim bg.xyz <- bg.xyz[as.numeric(rownames(bg.xyz)) >= xlim[1] & as.numeric(rownames(bg.xyz)) <= xlim[2],,drop = FALSE] }else{ stop("[plot_RLum.Data.Spectrum()] Input for 'bg.spectrum' not supported, please check manual!", call. = FALSE) } } # Background subtraction --------------------------------------------------- if(!is.null(bg.channels)){ ##set background object if not available if(is.null(bg.spectrum)) bg.xyz <- temp.xyz if(max(bg.channels) > ncol(bg.xyz) || any(bg.channels <= 0)){ ##correct the mess bg.channels <- sort(unique(bg.channels)) bg.channels[bg.channels <= 0] <- 1 bg.channels[bg.channels >= ncol(bg.xyz)] <- ncol(bg.xyz) warning( paste0( "[plot_RLum.Data.Spectrum()] 'bg.channels' out of range, corrected to: ", min(bg.channels), ":", max(bg.channels) ), call. = FALSE) } if(length(bg.channels) > 1){ temp.bg.signal <- rowMeans(bg.xyz[,bg.channels]) temp.xyz <- temp.xyz - temp.bg.signal }else{ temp.xyz <- temp.xyz - bg.xyz[,bg.channels] } ##set values < 0 to 0 temp.xyz[temp.xyz < 0] <- 0 ##check worst case if(sum(temp.xyz) == 0){ message("[plot_RLum.Data.Spectrum()] After background subtraction all counts < 0. Nothing plotted, NULL returned!") return(NULL) } } # Channel binning --------------------------------------------------------- ##rewrite arguments; makes things easier bin.cols <- bin.cols[1] bin.rows <- bin.rows[1] ##fatal check (not needed anymore, but never change running code) if(bin.cols < 1 | bin.rows < 1) stop("[plot_RLum.Data.Spectrum()] 'bin.cols' and 'bin.rows' have to be > 1!", call. = FALSE) if(bin.rows > 1){ temp.xyz <- .matrix_binning(temp.xyz, bin_size = bin.rows, bin_col = FALSE, names = "mean") x <- as.numeric(rownames(temp.xyz)) ##remove last channel (this is the channel that included less data) if(length(x)%%bin.rows != 0){ ##return warning warning( paste0("[plot_RLum.Data.Spectrum()] ",length(x)%%bin.rows, " channel(s) removed due to row (wavelength) binning."), call. = FALSE) ##do it temp.xyz <- temp.xyz[-length(x),] x <- x[-length(x)] } } if(bin.cols > 1){ temp.xyz <- .matrix_binning(temp.xyz, bin_size = bin.cols, bin_col = TRUE, names = "groups") y <- as.numeric(colnames(temp.xyz)) ##remove last channel (this is the channel that included less data) if(length(y)%%bin.cols != 0){ ##return warning warning( paste0("[plot_RLum.Data.Spectrum()] ",length(y)%%bin.cols, " channel(s) removed due to column (frame) binning."), call. = FALSE) ##do it temp.xyz <- temp.xyz[,-length(y)] y <- y[-length(y)] } } ##limit z-values if requested, this idea was taken from the Diss. by Thomas Schilles, 2002 if(!is.null(limit_counts[1])) temp.xyz[temp.xyz[] > max(min(temp.xyz), limit_counts[1])] <- limit_counts[1] # Normalise if wanted ------------------------------------------------------------------------- if(!is.null(norm)){ if(norm == "min") temp.xyz <- temp.xyz/min(temp.xyz) if(norm == "max") temp.xyz <- temp.xyz/max(temp.xyz) } ##check for zlim zlim <- if("zlim" %in% names(extraArgs)) {extraArgs$zlim} else {range(temp.xyz)} # set colour values -------------------------------------------------------- if("col" %in% names(extraArgs) == FALSE | plot.type == "single" | plot.type == "multiple.lines"){ if(optical.wavelength.colours == TRUE | (rug == TRUE & (plot.type != "persp" & plot.type != "interactive"))){ ##make different colour palette for energy values if (xaxis.energy) { col.violet <- c(2.76, ifelse(max(xlim) <= 4.13, max(xlim), 4.13)) col.blue <- c(2.52, 2.76) col.green <- c(2.18, 2.52) col.yellow <- c(2.10, 2.18) col.orange <- c(2.00, 2.10) col.red <- c(1.57, 2.00) col.infrared <- c(1.55, ifelse(min(xlim) >= 1.55, min(xlim), 1.57)) #set colour palette col <- unlist(sapply(1:length(x), function(i){ if(x[i] >= col.violet[1] & x[i] < col.violet[2]){"#EE82EE"} else if(x[i] >= col.blue[1] & x[i] < col.blue[2]){"#0000FF"} else if(x[i] >= col.green[1] & x[i] < col.green[2]){"#00FF00"} else if(x[i] >= col.yellow[1] & x[i] < col.yellow[2]){"#FFFF00"} else if(x[i] >= col.orange[1] & x[i] < col.orange[2]){"#FFA500"} else if(x[i] >= col.red[1] & x[i] < col.red[2]){"#FF0000"} else if(x[i] <= col.infrared[2]){"#BEBEBE"} })) }else{ ##wavelength colours for wavelength axis col.violet <- c(ifelse(min(xlim) <= 300, min(xlim), 300),450) col.blue <- c(450,495) col.green <- c(495,570) col.yellow <- c(570,590) col.orange <- c(590,620) col.red <- c(620,790) col.infrared <- c(790, ifelse(max(xlim) >= 800, max(xlim), 800)) #set colour palette col <- unlist(sapply(1:length(x), function(i){ if(x[i] >= col.violet[1] & x[i] < col.violet[2]){"#EE82EE"} else if(x[i] >= col.blue[1] & x[i] < col.blue[2]){"#0000FF"} else if(x[i] >= col.green[1] & x[i] < col.green[2]){"#00FF00"} else if(x[i] >= col.yellow[1] & x[i] < col.yellow[2]){"#FFFF00"} else if(x[i] >= col.orange[1] & x[i] < col.orange[2]){"#FFA500"} else if(x[i] >= col.red[1] & x[i] < col.red[2]){"#FF0000"} else if(x[i] >= col.infrared[1]){"#BEBEBE"} })) } ##find unique colours col.unique <- unique(col) ##if only one colour value, then skip gradient calculation as it causes ##an error if(length(col.unique) > 1){ ##set colour function for replacement colfunc <- colorRampPalette(col.unique) ##get index for colour values to be cut from the current palette col.unique.index <- vapply(col.unique, function(i) { max(which(col == i)) }, numeric(1)) ##remove last index (no colour gradient needed), for energy axis use the first value col.unique.index <- col.unique.index[-length(col.unique.index)] ##set borders for colour gradient recalculation col.unique.index.min <- col.unique.index - (50/bin.rows) col.unique.index.max <- col.unique.index + (50/bin.rows) ##set negative values to the lowest index col.unique.index.min[col.unique.index.min<=0] <- 1 ##build up new index sequence (might be better) col.gradient.index <- as.vector(unlist(( sapply(1:length(col.unique.index.min), function(j){ seq(col.unique.index.min[j],col.unique.index.max[j], by = 1) })))) ##generate colour ramp and replace values col.new <- colfunc(length(col.gradient.index)) col[col.gradient.index] <- col.new ##correct for overcharged colour values (causes zebra colour pattern) if (diff(c(length(col), nrow(temp.xyz))) < 0) { col <- col[1:c(length(col) - diff(c(length(col), nrow(temp.xyz))))] }else if(diff(c(length(col), nrow(temp.xyz))) > 0){ col <- col[1:c(length(col) + diff(c(length(col), nrow(temp.xyz))))] } } }else{ col <- "black" } }else{ col <- extraArgs$col } # Do log scaling if needed ------------------------------------------------- ##x if(grepl("x", log)==TRUE){x <- log10(x)} ##y if(grepl("y", log)==TRUE){y <- log10(y)} ##z if(grepl("z", log)==TRUE){temp.xyz <- log10(temp.xyz)} # PLOT -------------------------------------------------------------------- ## set variables we need later pmat <- NA if(plot){ ##par setting for possible combination with plot method for RLum.Analysis objects if(par.local) par(mfrow=c(1,1), cex = cex) ##rest plot type for 1 column matrix if(ncol(temp.xyz) == 1 && plot.type != "single"){ plot.type <- "single" warning("[plot_RLum.Data.Spectrum()] Single column matrix: plot.type has been automatically reset to 'single'", call. = FALSE) } ##do not let old code break down ... if(plot.type == "persp3d"){ plot.type <- "interactive" warning("[plot_RLum.Data.Spectrum()] 'plot.type' has been automatically reset to interactive!", call. = FALSE) } if(plot.type == "persp" && ncol(temp.xyz) > 1){ ## Plot: perspective plot ---- ## ==========================================================================# pmat <- persp( x, y, temp.xyz, shade = shade, axes = if(box[1] == "alternate") FALSE else axes, phi = phi, theta = theta, xlab = xlab, ylab = ylab, zlab = zlab, zlim = zlim, scale = TRUE, col = col[1:(length(x)-1)], ##needed due to recycling of the colours main = main, expand = expand, border = border, box = if(box[1] == "alternate") FALSE else box, r = r, ticktype = ticktype) ## this is custom plot output that might come in handy from time to time if(axes & box[1] == "alternate") { ## add axes manually x_axis <- seq(min(x), max(x), length.out = 20) y_axis <- seq(min(y), max(y), length.out = 20) z_axis <- seq(min(temp.xyz), max(temp.xyz), length.out = 20) lines(grDevices::trans3d(x_axis,min(y) - 5, min(temp.xyz),pmat)) lines(grDevices::trans3d(min(x) - 5,y_axis, min(temp.xyz),pmat)) lines(grDevices::trans3d(min(x) - 5,max(y), z_axis,pmat)) ## x-axis px_axis <- pretty(x_axis) px_axis <- px_axis[(px_axis) > min(x_axis) & px_axis < max(x_axis)] tick_start <- grDevices::trans3d(px_axis, min(y_axis), min(z_axis), pmat) tick_end <- grDevices::trans3d( px_axis, min(y_axis) - max(y_axis) * 0.05, min(z_axis), pmat) ## calculate slope angle for xlab and ticks m <- (tick_start$y[2] - tick_start$y[1]) / (tick_start$x[2] - tick_start$x[1]) m <- atan(m) * 360 / (2 * pi) segments(tick_start$x, tick_start$y, tick_end$x, tick_end$y) text( tick_end$x, tick_end$y, adj = c(0.5,1.2), px_axis, xpd = TRUE, cex = 0.85, srt = m) ## x-axis label text( mean(tick_end$x), min(tick_end$y), adj = c(0.5, 1), xlab, srt = m, xpd = TRUE) ## y-axis py_axis <- pretty(y_axis) py_axis <- py_axis[(py_axis) > min(y_axis) & py_axis < max(y_axis)] tick_start <- grDevices::trans3d(min(x_axis), py_axis, min(z_axis), pmat) tick_end <- grDevices::trans3d( min(x_axis) - max(x_axis) * 0.025, py_axis, min(z_axis), pmat) segments(tick_start$x, tick_start$y, tick_end$x, tick_end$y) ## calculate slope angle for xlab and ticks m <- (tick_start$y[2] - tick_start$y[1]) / (tick_start$x[2] - tick_start$x[1]) m <- atan(m) * 360 / (2 * pi) text( tick_end$x, tick_end$y, py_axis, adj = c(0.6,1.2), srt = m, cex = 0.85, xpd = TRUE) ## y-axis label text( min(tick_end$x), mean(tick_end$y), adj = c(0.5, 1), ylab, srt = m, xpd = TRUE) ## z-axis pz_axis <- pretty(z_axis) pz_axis <- pz_axis[(pz_axis) > min(z_axis) & pz_axis < max(z_axis)] tick_start <- grDevices::trans3d(min(x_axis), max(y_axis), pz_axis, pmat) tick_end <- grDevices::trans3d( min(x_axis) - max(x_axis) * 0.015, max(y_axis), pz_axis, pmat) segments(tick_start$x, tick_start$y, tick_end$x, tick_end$y) ## calculate slope angle for xlab and ticks m <- (tick_start$y[2] - tick_start$y[1]) / (tick_start$x[2] - tick_start$x[1]) m <- atan(m) * 360 / (2 * pi) text( tick_end$x, tick_end$y, format(pz_axis, scientific = TRUE, digits = 1), adj = c(0.5,1.2), srt = m, xpd = TRUE, cex = 0.85) ## z-axis label text( min(tick_end$x), mean(tick_end$y), adj = c(0.5, 2.5), zlab, srt = m, xpd = TRUE) } ##plot additional mtext mtext(mtext, side = 3, cex = cex * 0.8) }else if(plot.type == "interactive" && ncol(temp.xyz) > 1) { ## ==========================================================================# ##interactive plot and former persp3d ## ==========================================================================# ## Plot: interactive ---- ##http://r-pkgs.had.co.nz/description.html if (!requireNamespace("plotly", quietly = TRUE)) { stop("[plot_RLum.Data.Spectrum()] Package 'plotly' needed for this plot type. Please install it.", call. = FALSE) } ##set up plot p <- plotly::plot_ly( z = temp.xyz, x = as.numeric(colnames(temp.xyz)), y = as.numeric(rownames(temp.xyz)), type = type, showscale = showscale #colors = col[1:(length(col)-1)], ) ##change graphical parameters p <- plotly::layout( p = p, scene = list( xaxis = list( title = ylab ), yaxis = list( title = xlab ), zaxis = list(title = zlab) ), title = main ) print(p) on.exit(return(p)) }else if(plot.type == "contour" && ncol(temp.xyz) > 1) { ## Plot: contour plot ---- ## ==========================================================================# contour(x,y,temp.xyz, xlab = xlab, ylab = ylab, main = main, labcex = 0.6 * cex, col = "black" ) ##plot additional mtext mtext(mtext, side = 3, cex = cex*0.8) }else if(plot.type == "image" && ncol(temp.xyz) > 1) { ## Plot: image plot ---- ## ==========================================================================# graphics::image(x,y,temp.xyz, xlab = xlab, ylab = ylab, main = main, col = if(is.null(list(...)$col)) grDevices::hcl.colors(50, palette = "Inferno") else list(...)$col ) if(is.null(list(...)$contour) || list(...)$contour != FALSE) { contour(x, y, temp.xyz, col = if(is.null(list(...)$contour.col)) rgb(1,1,1,0.8) else list(...)$contour.col, labcex = 0.6 * cex, add = TRUE) } ##plot additional mtext mtext(mtext, side = 3, cex = cex*0.8) } else if(plot.type == "single") { ## Plot: single plot ---- ## ==========================================================================# ## set colour rug col.rug <- col col <- if("col" %in% names(extraArgs)) {extraArgs$col} else {"black"} box <- if("box" %in% names(extraArgs)) extraArgs$box[1] else TRUE frames <- if("frames" %in% names(extraArgs)) extraArgs$frames else 1:length(y) for(i in frames) { if("zlim" %in% names(extraArgs) == FALSE){zlim <- range(temp.xyz[,i])} plot(x, temp.xyz[,i], xlab = xlab, ylab = ylab, main = main, xlim = xlim, ylim = zlim, frame = box, xaxt = "n", col = col, sub = paste( "(frame ",i, " | ", ifelse(i==1, paste("0.0 :", round(y[i], digits = 1)), paste(round(y[i-1], digits = 1),":", round(y[i], digits =1))),")", sep = ""), type = type, pch = pch) ## add colour rug if(rug){ ##rug as continuous rectangle i <- floor(seq(1,length(x), length.out = 300)) graphics::rect( xleft = x[i[-length(i)]], xright = x[i[-1]], ytop = par("usr")[3] + diff(c(par("usr")[3], min(zlim))) * 0.9, ybottom = par("usr")[3], col = col.rug[i], border = NA, lwd = 1) ## add rectangle from zero to first value graphics::rect( xleft = par()$usr[1], xright = x[i[1]], ytop = par("usr")[3] + diff(c(par("usr")[3], min(zlim))) * 0.9, ybottom = par("usr")[3], col = col.rug[1], density = 50, border = NA, lwd = 1) ## add rectangle from the last value to end of plot graphics::rect( xleft = x[i[length(i)]], xright = par()$usr[2], ytop = par("usr")[3] + diff(c(par("usr")[3], min(zlim))) * 0.9, ybottom = par("usr")[3], col = col.rug[length(col.rug)], density = 50, border = NA, lwd = 1) } ## add y axis to prevent overplotting graphics::axis(side = 1) ## add box if needed if(box) graphics::box() } ##plot additional mtext mtext(mtext, side = 3, cex = cex*0.8) }else if(plot.type == "multiple.lines" && ncol(temp.xyz) > 1) { ## Plot: multiple.lines ---- ## ========================================================================# col.rug <- col col<- if("col" %in% names(extraArgs)) {extraArgs$col} else {"black"} box <- if("box" %in% names(extraArgs)) extraArgs$box else TRUE frames <- if("frames" %in% names(extraArgs)) extraArgs$frames else 1:length(y) ##change graphic settings par.default <- par()[c("mfrow", "mar", "xpd")] par(mfrow = c(1,1), mar=c(5.1, 4.1, 4.1, 8.1), xpd = TRUE) ##grep zlim if("zlim" %in% names(extraArgs) == FALSE){zlim <- range(temp.xyz)} ##open plot area plot(NA, NA, xlab = xlab, ylab = ylab, main = main, xlim = xlim, ylim = zlim, frame = box, xaxt = "n", sub = sub, bty = bty) ## add colour rug if(rug){ ##rug as continuous rectangle i <- floor(seq(1,length(x), length.out = 300)) graphics::rect( xleft = x[i[-length(i)]], xright = x[i[-1]], ytop = par("usr")[3] + diff(c(par("usr")[3], min(zlim))) * 0.9, ybottom = par("usr")[3], col = col.rug[i], border = NA, lwd = NA) ## add rectangle from zero to first value graphics::rect( xleft = par()$usr[1], xright = x[i[1]], ytop = par("usr")[3] + diff(c(par("usr")[3], min(zlim))) * 0.9, ybottom = par("usr")[3], col = col.rug[1], density = 50, border = NA, lwd = 1) ## add rectangle from the last value to end of plot graphics::rect( xleft = x[i[length(i)]], xright = par()$usr[2], ytop = par("usr")[3] + diff(c(par("usr")[3], min(zlim))) * 0.9, ybottom = par("usr")[3], col = col.rug[length(col.rug)], density = 50, border = NA, lwd = 1) } ##add lines for(i in frames){ lines(x, temp.xyz[,i], lty = i, lwd = lwd, type = type, col = col) } ## add y axis to prevent overplotting graphics::axis(side = 1) ## add box if needed if(box) graphics::box() ##for missing values - legend.text if(missing(legend.text)) legend.text <- as.character(paste(round(y[frames],digits=1), zlab)) ##legend legend(x = par()$usr[2], y = par()$usr[4], legend = legend.text, lwd= lwd, lty = frames, bty = "n", cex = 0.6*cex) ##plot additional mtext mtext(mtext, side = 3, cex = cex*0.8) ##reset graphic settings par(par.default) rm(par.default) }else if(plot.type == "transect" && ncol(temp.xyz) > 1) { ## Plot: transect plot ---- ## ========================================================================# ##sum up rows (column sum) temp.xyz <- colSums(temp.xyz) ##consider differences within the arguments #check for zlim zlim <- if("zlim" %in% names(extraArgs)) {extraArgs$zlim} else {c(0,max(temp.xyz))} #check for zlim zlab <- if("ylab" %in% names(extraArgs)) {extraArgs$ylab} else {paste("Counts [1/summed channels]")} plot(y, temp.xyz, xlab = ylab, ylab = zlab, main = main, xlim = ylim, ylim = zlim, col = col, sub = paste("(channel range: ", min(xlim), " : ", max(xlim), ")", sep=""), type = type, pch = pch) ##plot additional mtext mtext(mtext, side = 3, cex = cex*0.8) }else{ stop("[plot_RLum.Data.Spectrum()] Unknown plot type.", call. = FALSE) } ## option for plotting nothing } # Return ------------------------------------------------------------------ ## add some attributes attr(temp.xyz, "colour") <- col attr(temp.xyz, "pmat") <- pmat ## return visible or not if(plot) invisible(temp.xyz) else return(temp.xyz) } Luminescence/R/plot_KDE.R0000644000176200001440000012131314236146743014664 0ustar liggesusers#' Plot kernel density estimate with statistics #' #' Plot a kernel density estimate of measurement values in combination with the #' actual values and associated error bars in ascending order. If enabled, the #' boxplot will show the usual distribution parameters (median as #' bold line, box delimited by the first and third quartile, whiskers defined #' by the extremes and outliers shown as points) and also the mean and #' standard deviation as pale bold line and pale polygon, respectively. #' #' The function allows passing several plot arguments, such as `main`, #' `xlab`, `cex`. However, as the figure is an overlay of two #' separate plots, `ylim` must be specified in the order: c(ymin_axis1, #' ymax_axis1, ymin_axis2, ymax_axis2) when using the cumulative values plot #' option. See examples for some further explanations. For details on the #' calculation of the bin-width (parameter `bw`) see #' [density]. #' #' #' A statistic summary, i.e. a collection of statistic measures of #' centrality and dispersion (and further measures) can be added by specifying #' one or more of the following keywords: #' - `"n"` (number of samples) #' - `"mean"` (mean De value) #' - `"median"` (median of the De values) #' - `"sd.rel"` (relative standard deviation in percent) #' - `"sd.abs"` (absolute standard deviation) #' - `"se.rel"` (relative standard error) #' - `"se.abs"` (absolute standard error) #' - `"in.2s"` (percent of samples in 2-sigma range) #' - `"kurtosis"` (kurtosis) #' - `"skewness"` (skewness) #' #' #' **Note** that the input data for the statistic summary is sent to the function #' `calc_Statistics()` depending on the log-option for the z-scale. If #' `"log.z = TRUE"`, the summary is based on the logarithms of the input #' data. If `"log.z = FALSE"` the linearly scaled data is used. #' #' **Note** as well, that `"calc_Statistics()"` calculates these statistic #' measures in three different ways: `unweighted`, `weighted` and #' `MCM-based` (i.e., based on Monte Carlo Methods). By default, the #' MCM-based version is used. If you wish to use another method, indicate this #' with the appropriate keyword using the argument `summary.method`. #' #' #' @param data [data.frame] or [RLum.Results-class] object (**required**): #' for `data.frame`: two columns: De (`values[,1]`) and De error (`values[,2]`). #' For plotting multiple data sets, these must be provided as #' `list` (e.g. `list(dataset1, dataset2)`). #' #' @param na.rm [logical] (*with default*): #' exclude NA values from the data set prior to any further operation. #' #' @param values.cumulative [logical] (*with default*): #' show cumulative individual data. #' #' @param order [logical]: #' Order data in ascending order. #' #' @param boxplot [logical] (*with default*): #' optionally show a boxplot (depicting median as thick central line, #' first and third quartile as box limits, whiskers denoting +/- 1.5 #' interquartile ranges and dots further outliers). #' #' @param rug [logical] (*with default*): #' optionally add rug. #' #' @param summary [character] (*optional*): #' add statistic measures of centrality and dispersion to the plot. Can be one #' or more of several keywords. See details for available keywords. #' #' @param summary.pos [numeric] or [character] (*with default*): #' optional position coordinates or keyword (e.g. `"topright"`) #' for the statistical summary. Alternatively, the keyword `"sub"` may be #' specified to place the summary below the plot header. However, this latter #' option in only possible if `mtext` is not used. In case of coordinate #' specification, y-coordinate refers to the right y-axis. #' #' @param summary.method [character] (*with default*): #' keyword indicating the method used to calculate the statistic summary. #' One out of `"unweighted"`, `"weighted"` and `"MCM"`. #' See [calc_Statistics] for details. #' #' @param bw [character] (*with default*): #' bin-width, chose a numeric value for manual setting. #' #' @param output [logical]: #' Optional output of numerical plot parameters. These can be useful to #' reproduce similar plots. Default is `TRUE`. #' #' @param ... further arguments and graphical parameters passed to [plot]. #' #' @note #' The plot output is no 'probability density' plot (cf. the discussion #' of Berger and Galbraith in Ancient TL; see references)! #' #' @section Function version: 3.6.0 #' #' @author #' Michael Dietze, GFZ Potsdam (Germany)\cr #' Geography & Earth Sciences, Aberystwyth University (United Kingdom) #' #' @seealso [density], [plot] #' #' @examples #' #' ## read example data set #' data(ExampleData.DeValues, envir = environment()) #' ExampleData.DeValues <- #' Second2Gray(ExampleData.DeValues$BT998, c(0.0438,0.0019)) #' #' ## create plot straightforward #' plot_KDE(data = ExampleData.DeValues) #' #' ## create plot with logarithmic x-axis #' plot_KDE(data = ExampleData.DeValues, #' log = "x") #' #' ## create plot with user-defined labels and axes limits #' plot_KDE(data = ExampleData.DeValues, #' main = "Dose distribution", #' xlab = "Dose (s)", #' ylab = c("KDE estimate", "Cumulative dose value"), #' xlim = c(100, 250), #' ylim = c(0, 0.08, 0, 30)) #' #' ## create plot with boxplot option #' plot_KDE(data = ExampleData.DeValues, #' boxplot = TRUE) #' #' ## create plot with statistical summary below header #' plot_KDE(data = ExampleData.DeValues, #' summary = c("n", "median", "skewness", "in.2s")) #' #' ## create plot with statistical summary as legend #' plot_KDE(data = ExampleData.DeValues, #' summary = c("n", "mean", "sd.rel", "se.abs"), #' summary.pos = "topleft") #' #' ## split data set into sub-groups, one is manipulated, and merge again #' data.1 <- ExampleData.DeValues[1:15,] #' data.2 <- ExampleData.DeValues[16:25,] * 1.3 #' data.3 <- list(data.1, data.2) #' #' ## create plot with two subsets straightforward #' plot_KDE(data = data.3) #' #' ## create plot with two subsets and summary legend at user coordinates #' plot_KDE(data = data.3, #' summary = c("n", "median", "skewness"), #' summary.pos = c(110, 0.07), #' col = c("blue", "orange")) #' #' ## example of how to use the numerical output of the function #' ## return plot output to draw a thicker KDE line #' KDE_out <- plot_KDE(data = ExampleData.DeValues, #' output = TRUE) #' #' @md #' @export plot_KDE <- function( data, na.rm = TRUE, values.cumulative = TRUE, order = TRUE, boxplot = TRUE, rug = TRUE, summary, summary.pos, summary.method = "MCM", bw = "nrd0", output = TRUE, ... ) { ## check data and parameter consistency ------------------------------------- ## account for depreciated arguments if("centrality" %in% names(list(...))) { boxplot <- TRUE warning(paste("[plot_KDE()] Argument 'centrality' no longer supported. ", "Replaced by 'boxplot = TRUE'.")) } if("dispersion" %in% names(list(...))) { boxplot <- TRUE warning(paste("[plot_KDE()] Argument 'dispersion' no longer supported. ", "Replaced by 'boxplot = TRUE'.")) } if("polygon.col" %in% names(list(...))) { boxplot <- TRUE warning(paste("[plot_KDE()] Argument 'polygon.col' no longer supported. ", "Replaced by 'boxplot = TRUE'.")) } if("weights" %in% names(list(...))) { warning(paste("[plot_KDE()] Argument 'weights' no longer supported. ", "Weights are omitted.")) } ## Homogenise input data format if(is(data, "list") == FALSE) { data <- list(data) } ## check/adjust input data structure for(i in 1:length(data)) { if(is(data[[i]], "RLum.Results") == FALSE & is(data[[i]], "data.frame") == FALSE & is.numeric(data[[i]]) == FALSE) { stop(paste("[plot_KDE()] Input data format is neither", "'data.frame', 'RLum.Results' nor 'numeric'"), call. = FALSE) } else { ##extract RLum.Results if(is(data[[i]], "RLum.Results") == TRUE) { data[[i]] <- get_RLum(data[[i]], "data")[,1:2] } ##make sure we only take the first two columns data[[i]] <- data[[i]][,1:2] ##account for very short datasets if(length(data[[i]]) < 2) { data[[i]] <- cbind(data[[i]], rep(NA, length(data[[i]]))) } } ##check for Inf values and remove them if need if(any(is.infinite(unlist(data[[i]])))){ Inf_id <- which(is.infinite(unlist(data[[i]]))[1:nrow(data[[i]])/ncol(data[[i]])]) warning(paste("[plot_KDE()] Inf values removed in row(s):", paste(Inf_id, collapse = ", "), "in data.frame", i), call. = FALSE) data[[i]] <- data[[i]][-Inf_id,] rm(Inf_id) ##check if empty if(nrow(data[[i]]) == 0){ data[i] <- NULL } } } ##check if list is empty if(length(data) == 0) stop("[plot_KDE()] Your input is empty, intentionally or maybe after Inf removal? Nothing plotted!", call. = FALSE) ## check/set function parameters if(missing(summary) == TRUE) { summary <- "" } if(missing(summary.pos) == TRUE) { summary.pos <- "sub" } ## set mtext output if("mtext" %in% names(list(...))) { mtext <- list(...)$mtext } else { mtext <- "" } ## check/set layout definitions if("layout" %in% names(list(...))) { layout <- get_Layout(layout = list(...)$layout) } else { layout <- get_Layout(layout = "default") } ## data preparation steps --------------------------------------------------- ## optionally, count and exclude NA values and print result if(na.rm == TRUE) { for(i in 1:length(data)) { n.NA <- sum(is.na(data[[i]][,1])) if(n.NA == 1) { message(paste("1 NA value excluded from data set", i, ".")) } else if(n.NA > 1) { message(paste(n.NA, "NA values excluded from data set", i, ".")) } data[[i]] <- na.exclude(data[[i]]) } } ## optionally, order data set if(order == TRUE) { for(i in 1:length(data)) { data[[i]] <- data[[i]][order(data[[i]][,1]),] } } ## calculate and paste statistical summary De.stats <- matrix(nrow = length(data), ncol = 12) colnames(De.stats) <- c("n", "mean", "median", "kde.max", "sd.abs", "sd.rel", "se.abs", "se.rel", "q.25", "q.75", "skewness", "kurtosis") De.density <- list(NA) ## loop through all data sets for(i in 1:length(data)) { statistics <- calc_Statistics(data[[i]], na.rm = na.rm)[[summary.method]] De.stats[i,1] <- statistics$n De.stats[i,2] <- statistics$mean De.stats[i,3] <- statistics$median De.stats[i,5] <- statistics$sd.abs De.stats[i,6] <- statistics$sd.rel De.stats[i,7] <- statistics$se.abs De.stats[i,8] <- statistics$se.rel De.stats[i,9] <- quantile(data[[i]][,1], 0.25) De.stats[i,10] <- quantile(data[[i]][,1], 0.75) De.stats[i,11] <- statistics$skewness De.stats[i,12] <- statistics$kurtosis if(nrow(data[[i]]) >= 2){ De.density[[length(De.density) + 1]] <- density(data[[i]][,1], kernel = "gaussian", bw = bw) }else{ De.density[[length(De.density) + 1]] <- NA warning("[plot_KDE()] Less than 2 points provided, no density plotted.", call. = FALSE) } } ## remove dummy list element De.density[[1]] <- NULL ## create global data set De.global <- data[[1]][,1] De.error.global <- data[[1]][,2] De.density.range <- matrix(nrow = length(data), ncol = 4) for(i in 1:length(data)) { ##global De and De.error vector De.global <- c(De.global, data[[i]][,1]) De.error.global <- c(De.error.global, data[[i]][,2]) ## density range if(!all(is.na(De.density[[i]]))){ De.density.range[i,1] <- min(De.density[[i]]$x) De.density.range[i,2] <- max(De.density[[i]]$x) De.density.range[i,3] <- min(De.density[[i]]$y) De.density.range[i,4] <- max(De.density[[i]]$y) ## position of maximum KDE value De.stats[i,4] <- De.density[[i]]$x[which.max(De.density[[i]]$y)] }else{ De.density.range[i,1:4] <- NA De.stats[i,4] <- NA } } ## Get global range of densities De.density.range <- c(min(De.density.range[,1]), max(De.density.range[,2]), min(De.density.range[,3]), max(De.density.range[,4])) label.text = list(NA) if(summary.pos[1] != "sub") { n.rows <- length(summary) for(i in 1:length(data)) { stops <- paste(rep("\n", (i - 1) * n.rows), collapse = "") summary.text <- character(0) for(j in 1:length(summary)) { summary.text <- c(summary.text, paste( "", ifelse("n" %in% summary[j] == TRUE, paste("n = ", De.stats[i,1], "\n", sep = ""), ""), ifelse("mean" %in% summary[j] == TRUE, paste("mean = ", round(De.stats[i,2], 2), "\n", sep = ""), ""), ifelse("median" %in% summary[j] == TRUE, paste("median = ", round(De.stats[i,3], 2), "\n", sep = ""), ""), ifelse("kde.max" %in% summary[j] == TRUE, paste("kdemax = ", round(De.stats[i,4], 2), " \n ", sep = ""), ""), ifelse("sd.abs" %in% summary[j] == TRUE, paste("sd = ", round(De.stats[i,5], 2), "\n", sep = ""), ""), ifelse("sd.rel" %in% summary[j] == TRUE, paste("rel. sd = ", round(De.stats[i,6], 2), " %", "\n", sep = ""), ""), ifelse("se.abs" %in% summary[j] == TRUE, paste("se = ", round(De.stats[i,7], 2), "\n", sep = ""), ""), ifelse("se.rel" %in% summary[j] == TRUE, paste("rel. se = ", round(De.stats[i,8], 2), " %", "\n", sep = ""), ""), ifelse("skewness" %in% summary[j] == TRUE, paste("skewness = ", round(De.stats[i,11], 2), "\n", sep = ""), ""), ifelse("kurtosis" %in% summary[j] == TRUE, paste("kurtosis = ", round(De.stats[i,12], 2), "\n", sep = ""), ""), ifelse("in.2s" %in% summary[j] == TRUE, paste("in 2 sigma = ", round(sum(data[[i]][,1] > (De.stats[i,2] - 2 * De.stats[i,5]) & data[[i]][,1] < (De.stats[i,2] + 2 * De.stats[i,5])) / nrow(data[[i]]) * 100 , 1), " %", sep = ""), ""), sep = "")) } summary.text <- paste(summary.text, collapse = "") label.text[[length(label.text) + 1]] <- paste(stops, summary.text, stops, sep = "") } } else { for(i in 1:length(data)) { summary.text <- character(0) for(j in 1:length(summary)) { summary.text <- c(summary.text, ifelse("n" %in% summary[j] == TRUE, paste("n = ", De.stats[i,1], " | ", sep = ""), ""), ifelse("mean" %in% summary[j] == TRUE, paste("mean = ", round(De.stats[i,2], 2), " | ", sep = ""), ""), ifelse("median" %in% summary[j] == TRUE, paste("median = ", round(De.stats[i,3], 2), " | ", sep = ""), ""), ifelse("kde.max" %in% summary[j] == TRUE, paste("kdemax = ", round(De.stats[i,4], 2), " | ", sep = ""), ""), ifelse("sd.rel" %in% summary[j] == TRUE, paste("rel. sd = ", round(De.stats[i,6], 2), " %", " | ", sep = ""), ""), ifelse("sd.abs" %in% summary[j] == TRUE, paste("abs. sd = ", round(De.stats[i,5], 2), " | ", sep = ""), ""), ifelse("se.rel" %in% summary[j] == TRUE, paste("rel. se = ", round(De.stats[i,8], 2), " %", " | ", sep = ""), ""), ifelse("se.abs" %in% summary[j] == TRUE, paste("abs. se = ", round(De.stats[i,7], 2), " | ", sep = ""), ""), ifelse("skewness" %in% summary[j] == TRUE, paste("skewness = ", round(De.stats[i,11], 2), " | ", sep = ""), ""), ifelse("kurtosis" %in% summary[j] == TRUE, paste("kurtosis = ", round(De.stats[i,12], 2), " | ", sep = ""), ""), ifelse("in.2s" %in% summary[j] == TRUE, paste("in 2 sigma = ", round(sum(data[[i]][,1] > (De.stats[i,2] - 2 * De.stats[i,5]) & data[[i]][,1] < (De.stats[i,2] + 2 * De.stats[i,5])) / nrow(data[[i]]) * 100 , 1), " % ", sep = ""), "") ) } summary.text <- paste(summary.text, collapse = "") label.text[[length(label.text) + 1]] <- paste( " ", summary.text, sep = "") } ## remove outer vertical lines from string for(i in 2:length(label.text)) { label.text[[i]] <- substr(x = label.text[[i]], start = 3, stop = nchar(label.text[[i]]) - 3) } } ## remove dummy list element label.text[[1]] <- NULL ## read out additional parameters ------------------------------------------- if("main" %in% names(list(...))) { main <- list(...)$main } else { main <- expression(bold(paste(D[e], " distribution"))) } if("sub" %in% names(list(...))) { sub <- list(...)$sub } else { sub <- NULL } if("xlab" %in% names(list(...))) { xlab <- list(...)$xlab } else { xlab <- expression(paste(D[e], " [Gy]")) } if("ylab" %in% names(list(...))) { ylab <- list(...)$ylab } else { ylab <- c("Density", "Cumulative frequency") } if("xlim" %in% names(list(...))) { xlim.plot <- list(...)$xlim } else { xlim.plot <- c(min(c(De.global - De.error.global), De.density.range[1], na.rm = TRUE), max(c(De.global + De.error.global), De.density.range[2], na.rm = TRUE)) } if("ylim" %in% names(list(...))) { ylim.plot <- list(...)$ylim } else { if(!is.na(De.density.range[1])){ ylim.plot <- c(De.density.range[3], De.density.range[4], 0, max(De.stats[,1])) }else{ ylim.plot <- c(0, max(De.stats[,1]), 0, max(De.stats[,1])) } } if("log" %in% names(list(...))) { log.option <- list(...)$log } else { log.option <- "" } if("col" %in% names(list(...))) { col.main <- list(...)$col col.xlab <- 1 col.ylab1 <- 1 col.ylab2 <- 1 col.xtck <- 1 col.ytck1 <- 1 col.ytck2 <- 1 col.box <- 1 col.mtext <- 1 col.stats <- list(...)$col col.kde.line <- list(...)$col col.kde.fill <- NA col.value.dot <- list(...)$col col.value.bar <- list(...)$col col.value.rug <- list(...)$col col.boxplot <- list(...)$col col.boxplot.line <- list(...)$col col.boxplot.fill <- NA col.mean.line <- adjustcolor(col = list(...)$col, alpha.f = 0.4) col.sd.bar <- adjustcolor(col = list(...)$col, alpha.f = 0.4) col.background <- NA } else { if(length(layout$kde$colour$main) == 1) { col.main <- c(layout$kde$colour$main, 2:length(data)) } else { col.main <- layout$kde$colour$main } if(length(layout$kde$colour$xlab) == 1) { col.xlab <- c(layout$kde$colour$xlab, 2:length(data)) } else { col.xlab <- layout$kde$colour$xlab } if(length(layout$kde$colour$ylab1) == 1) { col.ylab1 <- c(layout$kde$colour$ylab1, 2:length(data)) } else { col.ylab1 <- layout$kde$colour$ylab1 } if(length(layout$kde$colour$ylab2) == 1) { col.ylab2 <- c(layout$kde$colour$ylab2, 2:length(data)) } else { col.ylab2 <- layout$kde$colour$ylab2 } if(length(layout$kde$colour$xtck) == 1) { col.xtck <- c(layout$kde$colour$xtck, 2:length(data)) } else { col.xtck <- layout$kde$colour$xtck } if(length(layout$kde$colour$ytck1) == 1) { col.ytck1 <- c(layout$kde$colour$ytck1, 2:length(data)) } else { col.ytck1 <- layout$kde$colour$ytck1 } if(length(layout$kde$colour$ytck2) == 1) { col.ytck2 <- c(layout$kde$colour$ytck2, 2:length(data)) } else { col.ytck2 <- layout$kde$colour$ytck2 } if(length(layout$kde$colour$box) == 1) { col.box <- c(layout$kde$colour$box, 2:length(data)) } else { col.box <- layout$kde$colour$box } if(length(layout$kde$colour$mtext) == 1) { col.mtext <- c(layout$kde$colour$mtext, 2:length(data)) } else { col.mtext <- layout$kde$colour$mtext } if(length(layout$kde$colour$stats) == 1) { col.stats <- c(layout$kde$colour$stats, 2:length(data)) } else { col.stats <- layout$kde$colour$stats } if(length(layout$kde$colour$kde.line) == 1) { col.kde.line <- c(layout$kde$colour$kde.line, 2:length(data)) } else { col.kde.line <- layout$kde$colour$kde.line } if(length(layout$kde$colour$kde.fill) == 1) { col.kde.fill <- c(layout$kde$colour$kde.fill, 2:length(data)) } else { col.kde.fill <- layout$kde$colour$kde.fill } if(length(layout$kde$colour$value.dot) == 1) { col.value.dot <- c(layout$kde$colour$value.dot, 2:length(data)) } else { col.value.dot <- layout$kde$colour$value.dot } if(length(layout$kde$colour$value.bar) == 1) { col.value.bar <- c(layout$kde$colour$value.bar, 2:length(data)) } else { col.value.bar <- layout$kde$colour$value.bar } if(length(layout$kde$colour$value.rug) == 1) { col.value.rug <- c(layout$kde$colour$value.rug, 2:length(data)) } else { col.value.rug <- layout$kde$colour$value.rug } if(length(layout$kde$colour$boxplot.line) == 1) { col.boxplot.line <- c(layout$kde$colour$boxplot.line, 2:length(data)) } else { col.boxplot.line <- layout$kde$colour$boxplot.line } if(length(layout$kde$colour$boxplot.fill) == 1) { col.boxplot.fill <- c(layout$kde$colour$boxplot.fill, 2:length(data)) } else { col.boxplot.fill <- layout$kde$colour$boxplot.fill } if(length(layout$kde$colour$mean.line) == 1) { col.mean.line <- adjustcolor(col = 1:length(data), alpha.f = 0.4) } else { col.mean.line <- layout$kde$colour$mean.point } if(length(layout$kde$colour$sd.bar) == 1) { col.sd.bar <- c(layout$kde$colour$sd.bar, 2:length(data)) } else { col.sd.bar <- layout$kde$colour$sd.line } if(length(layout$kde$colour$background) == 1) { col.background <- c(layout$kde$colour$background, 2:length(data)) } else { col.background <- layout$kde$colour$background } } if("lty" %in% names(list(...))) { lty <- list(...)$lty } else { lty <- rep(1, length(data)) } if("lwd" %in% names(list(...))) { lwd <- list(...)$lwd } else { lwd <- rep(1, length(data)) } if("cex" %in% names(list(...))) { cex <- list(...)$cex } else { cex <- 1 } if("fun" %in% names(list(...))) { fun <- list(...)$fun } else { fun <- FALSE } ## convert keywords into summary placement coordinates if(missing(summary.pos) == TRUE) { summary.pos <- c(xlim.plot[1], ylim.plot[2]) summary.adj <- c(0, 1) } else if(length(summary.pos) == 2) { summary.pos <- summary.pos summary.adj <- c(0, 1) } else if(summary.pos[1] == "topleft") { summary.pos <- c(xlim.plot[1], ylim.plot[2]) summary.adj <- c(0, 1) } else if(summary.pos[1] == "top") { summary.pos <- c(mean(xlim.plot), ylim.plot[2]) summary.adj <- c(0.5, 1) } else if(summary.pos[1] == "topright") { summary.pos <- c(xlim.plot[2], ylim.plot[2]) summary.adj <- c(1, 1) } else if(summary.pos[1] == "left") { summary.pos <- c(xlim.plot[1], mean(ylim.plot[1:2])) summary.adj <- c(0, 0.5) } else if(summary.pos[1] == "center") { summary.pos <- c(mean(xlim.plot), mean(ylim.plot[1:2])) summary.adj <- c(0.5, 0.5) } else if(summary.pos[1] == "right") { summary.pos <- c(xlim.plot[2], mean(ylim.plot[1:2])) summary.adj <- c(1, 0.5) }else if(summary.pos[1] == "bottomleft") { summary.pos <- c(xlim.plot[1], ylim.plot[1]) summary.adj <- c(0, 0) } else if(summary.pos[1] == "bottom") { summary.pos <- c(mean(xlim.plot), ylim.plot[1]) summary.adj <- c(0.5, 0) } else if(summary.pos[1] == "bottomright") { summary.pos <- c(xlim.plot[2], ylim.plot[1]) summary.adj <- c(1, 0) } ## plot data sets ----------------------------------------------------------- ## setup plot area if(length(summary) >= 1 & summary.pos[1] == "sub") { toplines <- length(data) } else { toplines <- 1 } ## extract original plot parameters par(bg = layout$kde$colour$background) bg.original <- par()$bg par(mar = c(5, 5.5, 2.5 + toplines, 4.5), xpd = FALSE, cex = cex) if(layout$kde$dimension$figure.width != "auto" | layout$kde$dimension$figure.height != "auto") { par(mai = layout$kde$dimension$margin / 25.4, pin = c(layout$kde$dimension$figure.width / 25.4 - layout$kde$dimension$margin[2] / 25.4 - layout$kde$dimension$margin[4] / 25.4, layout$kde$dimension$figure.height / 25.4 - layout$kde$dimension$margin[1] / 25.4 - layout$kde$dimension$margin[3]/25.4)) } ## create empty plot to get plot dimensions plot(NA, xlim = xlim.plot, ylim = ylim.plot[1:2], sub = sub, log = log.option, axes = FALSE, ann = FALSE) ## get line height in xy coordinates l_height <- par()$cxy[2] ## optionally update ylim if(boxplot == TRUE) { ylim.plot[1] <- ylim.plot[1] - 1.4 * l_height } ## create empty plot to set adjusted plot dimensions par(new = TRUE) plot(NA, xlim = xlim.plot, ylim = ylim.plot[1:2], log = log.option, cex = cex, axes = FALSE, ann = FALSE) ## add box box(which = "plot", col = layout$kde$colour$box) ## add x-axis axis(side = 1, col = layout$kde$colour$xtck, col.axis = layout$kde$colour$xtck, labels = NA, tcl = -layout$kde$dimension$xtcl / 200, cex = cex) axis(side = 1, line = 2 * layout$kde$dimension$xtck.line / 100 - 2, lwd = 0, col = layout$kde$colour$xtck, family = layout$kde$font.type$xtck, font = (1:4)[c("plain", "bold", "italic", "bold italic") == layout$kde$font.deco$xtck], col.axis = layout$kde$colour$xtck, cex.axis = layout$kde$font.size$xlab/12) mtext(text = xlab, side = 1, line = 3 * layout$kde$dimension$xlab.line / 100, col = layout$kde$colour$xlab, family = layout$kde$font.type$xlab, font = (1:4)[c("plain", "bold", "italic", "bold italic") == layout$kde$font.deco$xlab], cex = cex * layout$kde$font.size$xlab/12) ## add left y-axis axis(side = 2, at = pretty(x = range(De.density.range[3:4])), col = layout$kde$colour$ytck1, col.axis = layout$kde$colour$ytck1, labels = NA, tcl = -layout$kde$dimension$ytck1 / 200, cex = cex) axis(side = 2, at = pretty(x = range(De.density.range[3:4])), line = 2 * layout$kde$dimension$ytck1.line / 100 - 2, lwd = 0, col = layout$kde$colour$ytck1, family = layout$kde$font.type$ytck1, font = (1:4)[c("plain", "bold", "italic", "bold italic") == layout$kde$font.deco$ytck1], col.axis = layout$kde$colour$ytck1, cex.axis = layout$kde$font.size$ylab1/12) mtext(text = ylab[1], side = 2, line = 3 * layout$kde$dimension$ylab1.line / 100, col = layout$kde$colour$ylab1, family = layout$kde$font.type$ylab1, font = (1:4)[c("plain", "bold", "italic", "bold italic") == layout$kde$font.deco$ylab1], cex = cex * layout$kde$font.size$ylab1/12) for(i in 1:length(data)) { if(!all(is.na(De.density[[i]]))){ polygon(x = c(par()$usr[1], De.density[[i]]$x, par()$usr[2]), y = c(min(De.density[[i]]$y),De.density[[i]]$y, min(De.density[[i]]$y)), border = col.kde.line[i], col = col.kde.fill[i], lty = lty[i], lwd = lwd[i]) } } ## add plot title cex.old <- par()$cex par(cex = layout$kde$font.size$main / 12) title(main = main, family = layout$kde$font.type$main, font = (1:4)[c("plain", "bold", "italic", "bold italic") == layout$kde$font.deco$main], col.main = layout$kde$colour$main, line = (toplines + 1.2) * layout$kde$dimension$main / 100) par(cex = cex.old) ## optionally add mtext line if(mtext != "") { mtext(text = mtext, side = 3, line = 0.5, family = layout$kde$font.type$mtext, font = (1:4)[c("plain", "bold", "italic", "bold italic") == layout$kde$font.deco$mtext], col.main = layout$kde$colour$mtext, cex = layout$kde$font.size$mtext / 12) } ## add summary content for(i in 1:length(data)) { if(summary.pos[1] != "sub") { text(x = summary.pos[1], y = summary.pos[2], adj = summary.adj, labels = label.text[[i]], col = col.stats[i], cex = layout$kde$font.size$stats / 12) } else { if(mtext == "") { mtext(side = 3, line = (toplines + 0.3 - i) * layout$kde$dimension$stats.line / 100, text = label.text[[i]], col = col.stats[i], cex = layout$kde$font.size$stats / 12) } } } if(values.cumulative == TRUE) { ## create empty overlay plot par(new = TRUE) # adjust plot options ## add empty plot, scaled to preliminary secondary plot content plot(x = NA, xlim = xlim.plot, ylim = ylim.plot[3:4], log = log.option, ann = FALSE, axes = FALSE ) ## get line height in xy coordinates l_height <- par()$cxy[2] ## optionally update ylim if(boxplot == TRUE) { ylim.plot[3] <- ylim.plot[3] - 1.4 * l_height } ## create correctly scaled empty overlay plot par(new = TRUE) # adjust plot options ## add empty plot, scaled to secondary plot content plot(NA, xlim = xlim.plot, ylim = ylim.plot[3:4], log = log.option, ann = FALSE, axes = FALSE) ## optionally add boxplot if(boxplot == TRUE) { ## add zero line abline(h = 0) ## get extended boxplot data boxplot.data <- list(NA) for(i in 1:length(data)) { boxplot.i <- boxplot(x = data[[i]][,1], plot = FALSE) boxplot.i$group <- mean(x = data[[i]][,1], na.rm = TRUE) boxplot.i$names <- sd(x = data[[i]][,1], na.rm = TRUE) boxplot.data[[length(boxplot.data) + 1]] <- boxplot.i } ## remove dummy list object boxplot.data[[1]] <- NULL ## get new line hights l_height <- par()$cxy[2] for(i in 1:length(data)) { # ## draw sd line # lines(x = c(boxplot.data[[i]]$group[1] - boxplot.data[[i]]$names[1], # boxplot.data[[i]]$group[1] + boxplot.data[[i]]$names[1]), # y = c(-5/8 * l_height, # -5/8 * l_height), # col = col.mean.line[i]) # # ## draw mean line # points(x = boxplot.data[[i]]$group[1], # y = -5/8 * l_height, # pch = 18, # col = col.mean.line[i]) ## draw median line lines(x = c(boxplot.data[[i]]$stats[3,1], boxplot.data[[i]]$stats[3,1]), y = c(-11/8 * l_height, -7/8 * l_height), lwd = 2, col = col.boxplot.line[i]) ## draw q25-q75-polygon polygon(x = c(boxplot.data[[i]]$stats[2,1], boxplot.data[[i]]$stats[2,1], boxplot.data[[i]]$stats[4,1], boxplot.data[[i]]$stats[4,1]), y = c(-11/8 * l_height, -7/8 * l_height, -7/8 * l_height, -11/8 * l_height), col = col.boxplot.fill[i], border = col.boxplot.line[i]) ## draw whiskers lines(x = c(boxplot.data[[i]]$stats[2,1], boxplot.data[[i]]$stats[1,1]), y = c(-9/8 * l_height, -9/8 * l_height), col = col.boxplot.line[i]) lines(x = c(boxplot.data[[i]]$stats[1,1], boxplot.data[[i]]$stats[1,1]), y = c(-10/8 * l_height, -8/8 * l_height), col = col.boxplot.line[i]) lines(x = c(boxplot.data[[i]]$stats[4,1], boxplot.data[[i]]$stats[5,1]), y = c(-9/8 * l_height, -9/8 * l_height), col = col.boxplot.line[i]) lines(x = c(boxplot.data[[i]]$stats[5,1], boxplot.data[[i]]$stats[5,1]), y = c(-10/8 * l_height, -8/8 * l_height), col = col.boxplot.line[i]) ## draw outliers points(x = boxplot.data[[i]]$out, y = rep(-9/8 * l_height, length(boxplot.data[[i]]$out)), col = col.boxplot.line[i], cex = cex * 0.8) } } ## optionally add rug if(rug == TRUE) { for(i in 1:length(data)) { for(j in 1:nrow(data[[i]])) { lines(x = c(data[[i]][j,1], data[[i]][j,1]), y = c(0, -2/8 * l_height), col = col.value.rug[i]) } } } ## add secondary y-axis ticks_axis <- pretty(x = c(1, ylim.plot[4])) ticks_axis <- ifelse(test = ticks_axis == 0, yes = NA, no = ticks_axis) ## add right y-axis axis(side = 4, at = ticks_axis, col = layout$kde$colour$ytck2, col.axis = layout$kde$colour$ytck2, labels = NA, tcl = -layout$kde$dimension$ytck2 / 200, cex = cex) axis(side = 4, at = ticks_axis, line = 2 * layout$kde$dimension$ytck2.line / 100 - 2, lwd = 0, col = layout$kde$colour$ytck2, family = layout$kde$font.type$ytck2, font = (1:4)[c("plain", "bold", "italic", "bold italic") == layout$kde$font.deco$ytck2], col.axis = layout$kde$colour$ytck2, cex.axis = layout$kde$font.size$ylab2/12) mtext(text = ylab[2], side = 4, line = 3 * layout$kde$dimension$ylab2.line / 100, col = layout$kde$colour$ylab2, family = layout$kde$font.type$ylab2, font = (1:4)[c("plain", "bold", "italic", "bold italic") == layout$kde$font.deco$ylab2], cex = cex * layout$kde$font.size$ylab2/12) ## add De error bars for(i in 1:length(data)) { arrows(data[[i]][,1] - data[[i]][,2], 1:length(data[[i]][,1]), data[[i]][,1] + data[[i]][,2], 1:length(data[[i]][,1]), code = 3, angle = 90, length = 0.05, col = col.value.bar[i]) ## add De measurements points(data[[i]][,1], 1:De.stats[i,1], col = col.value.dot[i], pch = 20) } } ## add empty plot par(new = TRUE) plot(NA, ann = FALSE, axes = FALSE, xlim = xlim.plot, ylim = ylim.plot[1:2], log = log.option, cex = cex, cex.lab = cex, cex.main = cex, cex.axis = cex) ## FUN by R Luminescence Team if(fun==TRUE){sTeve()} if(output == TRUE) { return(invisible(list(De.stats = De.stats, summary.pos = summary.pos, De.density = De.density))) } } Luminescence/R/read_Daybreak2R.R0000644000176200001440000003550514264017373016150 0ustar liggesusers#' Import measurement data produced by a Daybreak TL/OSL reader into R #' #' Import a TXT-file (ASCII file) or a DAT-file (binary file) produced by a #' Daybreak reader into R. The import of the DAT-files is limited to the file #' format described for the software TLAPLLIC v.3.2 used for a Daybreak, model 1100. #' #' @param file [character] or [list] (**required**): #' path and file name of the file to be imported. Alternatively a list of file #' names can be provided or just the path a folder containing measurement data. #' Please note that the specific, common, file extension (txt) is likely #' leading to function failures during import when just a path is provided. #' #' @param raw [logical] (*with default*): #' if the input is a DAT-file (binary) a [data.table::data.table] instead of #' the [RLum.Analysis-class] object can be returned for debugging purposes. #' #' @param verbose [logical] (*with default*): #' enables or disables terminal feedback #' #' @param txtProgressBar [logical] (*with default*): #' enables or disables [txtProgressBar]. #' #' @return #' A list of [RLum.Analysis-class] objects (each per position) is provided. #' #' @note #' **`[BETA VERSION]`** #' This function still needs to be tested properly. In particular #' the function has underwent only very rough rests using a few files. #' #' @section Function version: 0.3.2 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany)\cr #' Antoine Zink, C2RMF, Palais du Louvre, Paris (France) #' #' The ASCII-file import is based on a suggestion by Willian Amidon and Andrew Louis Gorin #' #' @seealso [RLum.Analysis-class], [RLum.Data.Curve-class], [data.table::data.table] #' #' @keywords IO #' #' @examples #' #' \dontrun{ #' file <- system.file("extdata/Daybreak_TestFile.txt", package = "Luminescence") #' temp <- read_Daybreak2R(file) #' } #' #' @md #' @export read_Daybreak2R <- function( file, raw = FALSE, verbose = TRUE, txtProgressBar = TRUE ){ ##TODO ## - run tests ## - check where the warning messages are comming from ## - implement further integegrity tests (ASCII import) # Self Call ----------------------------------------------------------------------------------- # Option (a): Input is a list, every element in the list will be treated as file connection # with that many file can be read in at the same time # Option (b): The input is just a path, the function tries to grep ALL Daybreaks-txt files in the # directory and import them, if this is detected, we proceed as list if(is(file, "character")) { ##If this is not really a path we skip this here if (dir.exists(file) & length(dir(file)) > 0) { if(verbose){ cat("[read_Daybreak2R()] Directory detected, trying to extract '*.txt' files ...\n") } file <- as.list(paste0(file,dir( file, recursive = FALSE, pattern = ".txt" ))) } } ##if the input is already a list if (is(file, "list")) { temp.return <- lapply(1:length(file), function(x) { read_Daybreak2R( file = file[[x]], txtProgressBar = txtProgressBar ) }) ##return return(temp.return) } # Integrity checks ---------------------------------------------------------------------------- ##check if file exists if(!file.exists(file)){ stop("[read_Daybreak2R()] file name does not seem to exist.", call. = FALSE) } ##check for file extension ... distinguish between TXT and DAT if(substr(file, start = nchar(file) - 3, stop = nchar(file)) == ".DAT"){ # Read DAT-file ------------------------------------------------------------------------------ on.exit(close(con)) ##screen file to get information on the number of stored records con<-file(file,"rb") file.data <- file.info(file) max.pt<-readBin(con,what="int",6,size=2,endian="little")[6] file.size<-file.data$size n.length<-file.size/(190+8*(max.pt+1)) ##190 is is size of the header for each data set close(con) ##import data con <- file(file, "rb") ##pre-define data.table results.DATA <- data.table::data.table( ID = integer(length = n.length), MAXPT = integer(length = n.length), SPACING = integer(length = n.length), NDISK = integer(length = n.length), NRUN = integer(length = n.length), D1 = integer(length = n.length), NPT = integer(length = n.length), NATL = logical(length = n.length), TLRUN = logical(length = n.length), BEFORE_IRRAD = logical(length = n.length), SHIFT = double(length = n.length), RAMPRATE = double(length = n.length), GRATE = double(length = n.length), BRATE = double(length = n.length), ARATE = double(length = n.length), GAMMADOSE = double(length = n.length), BETADOSE = double(length = n.length), ALPHADOSE = double(length = n.length), BLEACHINGTIME = double(length = n.length), GRUNIT = character(length = n.length), BRUNIT = character(length = n.length), ARUNIT = character(length = n.length), BFILTER = character(length = n.length), GSOURCE = character(length = n.length), BSOURCE = character(length = n.length), ASOURCE = character(length = n.length), IRRAD_DATE = character(length = n.length), RUNREMARK = character(length = n.length), DATA = list() ) ##TERMINAL FEEDBACK if(verbose){ cat("\n[read_Daybreak2R()]") cat(paste("\n >> Importing:", file[1],"\n")) } ##PROGRESS BAR if(txtProgressBar & verbose){ pb <- txtProgressBar(min=0,max=n.length, char = "=", style=3) } ##LOOP over file i <- 1 while (i> Importing:", file[1],"\n")) } ##PROGRESS BAR if(txtProgressBar & verbose){ pb <- txtProgressBar(min=0,max=length(data.list), char = "=", style=3) } ##(2) ##Loop over the list to create RLum.Data.Curve objects RLum.Data.Curve.list <- lapply(1:length(data.list), function(x){ ##get length of record record.length <- length(data.list[[x]]) ##get header length until the argument 'Points' header.length <- grep(pattern = "Points", x = data.list[[x]]) if(length(header.length)>0){ temp.meta_data <- unlist(strsplit(data.list[[x]][2:header.length], split = "=", fixed = TRUE)) }else{ temp.meta_data <- unlist(strsplit(data.list[[x]][2:length(data.list[[x]])], split = "=", fixed = TRUE)) } ##get list names for the info element list info.names <- temp.meta_data[seq(1,length(temp.meta_data), by = 2)] ##info elements info <- as.list(temp.meta_data[seq(2,length(temp.meta_data), by = 2)]) names(info) <- info.names ##add position, which is 'Disk' info <- c(info, position = as.integer(info$Disk)) if(length(header.length)>0){ ##get measurement data temp.data <- unlist(strsplit(unlist(strsplit( data.list[[x]][12:length(data.list[[x]])], split = "=" )), split = ";")) ##grep only data of interest point.x <- suppressWarnings(as.numeric(gsub("^\\s+|\\s+$", "", temp.data[seq(2, length(temp.data), by = 4)]))) point.y <- suppressWarnings(as.numeric(gsub("^\\s+|\\s+$", "", temp.data[seq(3,length(temp.data), by = 4)]))) ##combine it into a matrix data <- matrix(c(point.x,point.y), ncol = 2) }else{ ##we presume this should be irradiation ... if ("IrradTime" %in% names(info)) { point.x <- 1:as.numeric(info$IrradTime) point.y <- rep(1, length(point.x)) data <- matrix(c(point.x,point.y), ncol = 2) } } ##update progress bar if (txtProgressBar & verbose) { setTxtProgressBar(pb, x) } ##return RLum object return( set_RLum( class = "RLum.Data.Curve", originator = "read_Daybreak2R", recordType = sub(" ", replacement = "_", x = info$DataType), curveType = "measured", data = data, info = info ) ) }) ##close ProgressBar if(txtProgressBar & verbose){close(pb)} ##(3) ##Now we have to find out how many aliquots we do have positions.id <- sapply(RLum.Data.Curve.list, function(x){ get_RLum(x, info.object = "position") }) ##(4) ##now combine everyting in an RLum.Analysis object in accordance to the position number RLum.Analysis.list <- lapply(unique(positions.id), function(x){ ##get list ids for position number n <- which(positions.id == x) ##make list temp.list <- lapply(n, function(x){ RLum.Data.Curve.list[[x]] }) ##put in RLum.Analysis object object <- set_RLum( class = "RLum.Analysis", originator = "read_Daybreak2R", protocol = "Custom", records = temp.list ) ##set parent id of records object <- .set_pid(object) return(object) }) ##TERMINAL FEEDBACK if(verbose){ cat(paste0("\n ",length(unlist(get_RLum(RLum.Analysis.list))), " records have been read sucessfully!\n")) } return(RLum.Analysis.list) } } Luminescence/R/convert_Daybreak2CSV.R0000644000176200001440000000522714264017373017145 0ustar liggesusers#' Export measurement data produced by a Daybreak luminescence reader to CSV-files #' #' This function is a wrapper function around the functions [read_Daybreak2R] and #' [write_RLum2CSV] and it imports an Daybreak-file (TXT-file, DAT-file) #' and directly exports its content to CSV-files. If nothing is set for the #' argument `path` ([write_RLum2CSV]) the input folder will become the output folder. #' #' @param file [character] (**required**): #' name of the Daybreak-file (TXT-file, DAT-file) to be converted to CSV-files #' #' @param ... further arguments that will be passed to the function #' [read_Daybreak2R] and [write_RLum2CSV] #' #' @return #' The function returns either a CSV-file (or many of them) or for the option `export = FALSE` #' a list comprising objects of type [data.frame] and [matrix] #' #' @section Function version: 0.1.0 #' #' @author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [RLum.Analysis-class], [RLum.Data-class], [RLum.Results-class], #' [utils::write.table], [write_RLum2CSV], [read_Daybreak2R] #' #' @keywords IO #' #' @examples #' #' \dontrun{ #' ##select your BIN-file #' file <- file.choose() #' #' ##convert #' convert_Daybreak2CSV(file) #' #' } #' #' @md #' @export convert_Daybreak2CSV <- function( file, ... ){ # General tests ------------------------------------------------------------------------------- ##file is missing? if(missing(file)){ stop("[convert_Daybreak2R()] file is missing!", call. = FALSE) } ##set input arguments convert_Daybreak2R_settings.default <- list( raw = FALSE, verbose = TRUE, txtProgressBar = TRUE, export = TRUE ) ##modify list on demand convert_Daybreak2R_settings <- modifyList(x = convert_Daybreak2R_settings.default, val = list(...)) # Import file --------------------------------------------------------------------------------- if(!inherits(file, "RLum")){ object <- read_Daybreak2R( file = file, raw = convert_Daybreak2R_settings$raw, verbose = convert_Daybreak2R_settings$raw, txtProgressBar = convert_Daybreak2R_settings$raw ) }else{ object <- file } # Export to CSV ------------------------------------------------------------------------------- ##get all arguments we want to pass and remove the doubled one arguments <- c(list(object = object, export = convert_Daybreak2R_settings$export), list(...)) arguments[duplicated(names(arguments))] <- NULL ##this if-condition prevents NULL in the terminal if(convert_Daybreak2R_settings$export == TRUE){ invisible(do.call("write_RLum2CSV", arguments)) }else{ do.call("write_RLum2CSV", arguments) } } Luminescence/R/fit_LMCurve.R0000644000176200001440000011317614521207352015401 0ustar liggesusers#' @title Nonlinear Least Squares Fit for LM-OSL curves #' #' @description The function determines weighted nonlinear least-squares estimates of the #' component parameters of an LM-OSL curve (Bulur 1996) for a given number of #' components and returns various component parameters. The fitting procedure #' uses the function [nls] with the `port` algorithm. #' #' @details #' **Fitting function** #' #' The function for the fitting has the general #' form: #' #' \deqn{y = (exp(0.5)*Im_1*x/xm_1)*exp(-x^2/(2*xm_1^2)) + ,\ldots, + exp(0.5)*Im_i*x/xm_i)*exp(-x^2/(2*xm_i^2))} #' #' where \eqn{1 < i < 8} #' #' This function and the equations for the conversion to b (detrapping probability) #' and n0 (proportional to initially trapped charge) have been taken from Kitis #' et al. (2008): #' #' \deqn{xm_i=\sqrt{max(t)/b_i}} #' \deqn{Im_i=exp(-0.5)n0/xm_i} #' #' **Background subtraction** #' #' Three methods for background subtraction #' are provided for a given background signal (`values.bg`). #' #' - `polynomial`: default method. A polynomial function is fitted using [glm] #' and the resulting function is used for background subtraction: #' \deqn{y = a*x^4 + b*x^3 + c*x^2 + d*x + e} #' #' - `linear`: a linear function is fitted using [glm] and the resulting function #' is used for background subtraction: #' \deqn{y = a*x + b} #' #' - `channel`: the measured #' background signal is subtracted channel wise from the measured signal. #' #' #' **Start values** #' #' The choice of the initial parameters for the `nls`-fitting is a crucial #' point and the fitting procedure may mainly fail due to ill chosen start #' parameters. Here, three options are provided: #' #' **(a)** #' If no start values (`start_values`) are provided by the user, a cheap guess is made #' by using the detrapping values found by Jain et al. (2003) for quartz for a #' maximum of 7 components. Based on these values, the pseudo start parameters #' `xm` and `Im` are recalculated for the given data set. In all cases, the fitting #' starts with the ultra-fast component and (depending on `n.components`) #' steps through the following values. If no fit could be achieved, an error #' plot (for `plot = TRUE`) with the pseudo curve (based on the #' pseudo start parameters) is provided. This may give the opportunity to #' identify appropriate start parameters visually. #' #' **(b)** #' If start values are provided, the function works like a simple [nls] #' fitting approach. #' #' **(c)** #' If no start parameters are provided and #' the option `fit.advanced = TRUE` is chosen, an advanced start parameter #' estimation is applied using a stochastic attempt. Therefore, the #' recalculated start parameters **(a)** are used to construct a normal #' distribution. The start parameters are then sampled randomly from this #' distribution. A maximum of 100 attempts will be made. **Note:** This #' process may be time consuming. #' #' **Goodness of fit** #' #' The goodness of the fit is given by a pseudo-R^2 value (pseudo coefficient of #' determination). According to Lave (1970), the value is calculated as: #' #' \deqn{pseudoR^2 = 1 - RSS/TSS} #' #' where \eqn{RSS = Residual~Sum~of~Squares} #' and \eqn{TSS = Total~Sum~of~Squares} #' #' **Error of fitted component parameters** #' #' The 1-sigma error for the components is calculated using #' the function [stats::confint]. Due to considerable calculation time, this #' option is deactivated by default. In addition, the error for the components #' can be estimated by using internal R functions like [summary]. See the #' [nls] help page for more information. #' #' *For more details on the nonlinear regression in R, see Ritz & Streibig (2008).* #' #' @param values [RLum.Data.Curve-class] or [data.frame] (**required**): #' x,y data of measured values (time and counts). See examples. #' #' @param values.bg [RLum.Data.Curve-class] or [data.frame] (*optional*): #' x,y data of measured values (time and counts) for background subtraction. #' #' @param n.components [integer] (*with default*): #' fixed number of components that are to be recognised during fitting #' (min = 1, max = 7). #' #' @param start_values [data.frame] (*optional*): #' start parameters for `lm` and `xm` data for the fit. If no start values are given, #' an automatic start value estimation is attempted (see details). #' #' @param input.dataType [character] (*with default*): #' alter the plot output depending on the input data: `"LM"` or `"pLM"` (pseudo-LM). #' See: [CW2pLM] #' #' @param fit.method [character] (*with default*): #' select fit method, allowed values: `'port'` and `'LM'`. `'port'` uses the 'port' #' routine from the function [nls] `'LM'` utilises the function `nlsLM` from #' the package `minpack.lm` and with that the Levenberg-Marquardt algorithm. #' #' @param sample_code [character] (*optional*): #' sample code used for the plot and the optional output table (mtext). #' #' @param sample_ID [character] (*optional*): #' additional identifier used as column header for the table output. #' #' @param LED.power [numeric] (*with default*): #' LED power (max.) used for intensity ramping in mW/cm^2. #' **Note:** This value is used for the calculation of the absolute #' photoionisation cross section. #' #' @param LED.wavelength [numeric] (*with default*): #' LED wavelength in nm used for stimulation. #' **Note:** This value is used for the calculation of the absolute #' photoionisation cross section. #' #' @param fit.trace [logical] (*with default*): #' traces the fitting process on the terminal. #' #' @param fit.advanced [logical] (*with default*): #' enables advanced fitting attempt for automatic start parameter recognition. #' Works only if no start parameters are provided. #' **Note:** It may take a while and it is not compatible with `fit.method = "LM"`. #' #' @param fit.calcError [logical] (*with default*): #' calculate 1-sigma error range of components using [stats::confint]. #' #' @param bg.subtraction [character] (*with default*): #' specifies method for background subtraction (`polynomial`, `linear`, `channel`, #' see Details). **Note:** requires input for `values.bg`. #' #' @param verbose [logical] (*with default*): #' terminal output with fitting results. #' #' @param plot [logical] (*with default*): #' returns a plot of the fitted curves. #' #' @param plot.BG [logical] (*with default*): #' returns a plot of the background values with the fit used for the #' background subtraction. #' #' @param ... Further arguments that may be passed to the plot output, e.g. #' `xlab`, `xlab`, `main`, `log`. #' #' @return #' Various types of plots are returned. For details see above. Furthermore an #' `RLum.Results` object is returned with the following structure: #' #' **`@data:`** #' #' `.. $data` : [data.frame] with fitting results\cr #' `.. $fit` : nls ([nls] object)\cr #' `.. $component_matrix` : [matrix] with numerical xy-values of the single fitted components with the resolution of the input data #' `.. $component.contribution.matrix` : [list] component distribution matrix #' #' **`info:`** #' #' `.. $call` : [call] the original function call #' #' Matrix structure for the distribution matrix: #' #' Column 1 and 2: time and `rev(time)` values\cr #' Additional columns are used for the components, two for each component, #' containing I0 and n0. The last columns `cont.` provide information on #' the relative component contribution for each time interval including the row #' sum for this values. #' #' @note #' The pseudo-R^2 may not be the best parameter to describe the goodness #' of the fit. The trade off between the `n.components` and the pseudo-R^2 #' value currently remains unconsidered. #' #' The function **does not** ensure that the fitting procedure has reached a #' global minimum rather than a local minimum! In any case of doubt, the use of #' manual start values is highly recommended. #' #' @section Function version: 0.3.4 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [fit_CWCurve], [plot], [nls], [minpack.lm::nlsLM], [get_RLum] #' #' @references #' Bulur, E., 1996. An Alternative Technique For Optically #' Stimulated Luminescence (OSL) Experiment. Radiation Measurements, 26, 5, #' 701-709. #' #' Jain, M., Murray, A.S., Boetter-Jensen, L., 2003. Characterisation of #' blue-light stimulated luminescence components in different quartz samples: #' implications for dose measurement. Radiation Measurements, 37 (4-5), #' 441-449. #' #' Kitis, G. & Pagonis, V., 2008. Computerized curve deconvolution analysis for #' LM-OSL. Radiation Measurements, 43, 737-741. #' #' Lave, C.A.T., 1970. The Demand for Urban Mass Transportation. The Review of #' Economics and Statistics, 52 (3), 320-323. #' #' Ritz, C. & Streibig, J.C., 2008. Nonlinear Regression with R. R. Gentleman, #' K. Hornik, & G. Parmigiani, eds., Springer, p. 150. #' #' @keywords dplot models #' #' @examples #' #' ##(1) fit LM data without background subtraction #' data(ExampleData.FittingLM, envir = environment()) #' fit_LMCurve(values = values.curve, n.components = 3, log = "x") #' #' ##(2) fit LM data with background subtraction and export as JPEG #' ## -alter file path for your preferred system #' ##jpeg(file = "~/Desktop/Fit_Output\%03d.jpg", quality = 100, #' ## height = 3000, width = 3000, res = 300) #' data(ExampleData.FittingLM, envir = environment()) #' fit_LMCurve(values = values.curve, values.bg = values.curveBG, #' n.components = 2, log = "x", plot.BG = TRUE) #' ##dev.off() #' #' ##(3) fit LM data with manual start parameters #' data(ExampleData.FittingLM, envir = environment()) #' fit_LMCurve(values = values.curve, #' values.bg = values.curveBG, #' n.components = 3, #' log = "x", #' start_values = data.frame(Im = c(170,25,400), xm = c(56,200,1500))) #' #' @md #' @export fit_LMCurve<- function( values, values.bg, n.components = 3, start_values, input.dataType = "LM", fit.method = "port", sample_code = "", sample_ID = "", LED.power = 36, LED.wavelength = 470, fit.trace = FALSE, fit.advanced = FALSE, fit.calcError = FALSE, bg.subtraction = "polynomial", verbose = TRUE, plot = TRUE, plot.BG = FALSE, ... ){ # (0) Integrity checks ------------------------------------------------------- ##(1) data.frame or RLum.Data.Curve object? if(is(values, "data.frame") == FALSE & is(values, "RLum.Data.Curve") == FALSE){ stop("[fit_LMCurve()] 'values' has to be of type 'data.frame' or 'RLum.Data.Curve'!", call. = FALSE) }else{ if(is(values, "RLum.Data.Curve") == TRUE && ( values@recordType!="RBR" & values@recordType!="LM-OSL")){ stop("[fit_LMCurve()] recordType should be 'RBR' or 'LM-OSL'! Consider using as(object,'data.frame') if you had used a pseudo transformation function.", call. = FALSE) }else if(is(values, "RLum.Data.Curve")){ values <- as(values,"data.frame") } } ##(2) data.frame or RLum.Data.Curve object? if(missing(values.bg)==FALSE){ if(is(values.bg, "data.frame") == FALSE & is(values.bg, "RLum.Data.Curve") == FALSE){ stop("[fit_LMCurve()] 'values.bg' object has to be of type 'data.frame' or 'RLum.Data.Curve'!", call. = FALSE) }else{ if(is(values, "RLum.Data.Curve") == TRUE && values@recordType!="RBR"){ stop("[fit_LMCurve()] recordType should be 'RBR'!", call. = FALSE) }else if(is(values.bg, "RLum.Data.Curve")){ values.bg <- as(values.bg,"data.frame") } } } ## Set plot format parameters ----------------------------------------------- extraArgs <- list(...) # read out additional arguments list log <- if("log" %in% names(extraArgs)) {extraArgs$log} else {""} xlim <- if("xlim" %in% names(extraArgs)) {extraArgs$xlim} else {c(min(values[,1]),max(values[,1]))} ylim <- if("ylim" %in% names(extraArgs)) {extraArgs$ylim} else { if(input.dataType=="pLM"){ c(0,max(values[,2]*1.1)) }else{ c(min(values[,2]),max(values[,2]*1.1)) } } xlab <- if("xlab" %in% names(extraArgs)) {extraArgs$xlab} else { if(input.dataType=="LM"){"Time [s]"}else{"u [s]"} } ylab <- if("ylab" %in% names(extraArgs)) {extraArgs$ylab} else { if(input.dataType=="LM"){ paste("LM-OSL [cts/",round(max(values[,1])/length(values[,1]),digits=2)," s]",sep="") }else{"pLM-OSL [a.u.]"} } main <- if("main" %in% names(extraArgs)) {extraArgs$main} else {"Default"} cex <- if("cex" %in% names(extraArgs)) {extraArgs$cex} else {0.8} fun <- if("fun" %in% names(extraArgs)) {extraArgs$fun} else {FALSE} # layout safety settings par.default <- par()[c("mfrow", "cex", "mar", "omi", "oma")] on.exit(par(par.default)) ##============================================================================## ## BACKGROUND SUBTRACTION ##============================================================================## if(missing(values.bg)==FALSE){ #set graphical parameters par(mfrow=c(1,1), cex=1.5*cex) ##check if length of bg and signal is consistent if(length(values[,2])!=length(values.bg[,2])) stop("[fit_LMCurve] Length of values and values.bg differs!", call. = FALSE) if(bg.subtraction=="polynomial"){ #fit polynomial function to background glm.fit<-glm(values.bg[,2] ~ values.bg[,1]+I(values.bg[,1]^2)+I(values.bg[,1]^3)) glm.coef<-coef(glm.fit) #subtract background with fitted function values[,2]<-values[,2]- (glm.coef[4]*values[,1]^3+glm.coef[3]*values[,1]^2+glm.coef[2]*values[,1]+glm.coef[1]) writeLines("[fit_LMCurve] >> Background subtracted (method=\"polynomial\")!") ##plot Background measurement if needed if(plot.BG==TRUE){ plot(values.bg, ylab="LM-OSL [a.u.]", xlab="Time [s]", main="Background") curve((glm.coef[4]*x^3+glm.coef[3]*x^2+glm.coef[2]*x+glm.coef[1]),add=TRUE,col="red",lwd=2) text(0,max(values.bg[,2]),paste("y = ", round(glm.coef[4],digits=2), "*x^3+", round(glm.coef[3],digits=2), "*x^2+", round(glm.coef[2],digits=2), "*x+", round(glm.coef[1],digits=2), sep=""),pos=4) mtext(side=3,sample_code,cex=.8*cex) } }else if(bg.subtraction=="linear"){ #fit linear function to background glm.fit<-glm(values.bg[,2] ~ values.bg[,1]) glm.coef<-coef(glm.fit) ##substract bg values[,2]<-values[,2]-(glm.coef[2]*values[,1]+glm.coef[1]) writeLines("[fit_LMCurve.R] >> Background subtracted (method=\"linear\")!") ##plot Background measurement if needed if(plot.BG){ plot(values.bg, ylab="LM-OSL [a.u.]", xlab="Time [s]", main="Background") curve((glm.coef[2]*x+glm.coef[1]),add=TRUE,col="red",lwd=1.5) text(0,max(values.bg[,2]),paste("y = ", round(glm.coef[2],digits=2), "*x+", round(glm.coef[1],digits=2), sep=""),pos=4) mtext(side=3,sample_code,cex=.8*cex) }#endif::plot BG }else if(bg.subtraction=="channel"){ values[,2]<-values[,2]-values.bg[,2] writeLines("[fit_LMCurve.R] >> Background subtracted (method=\"channel\")!") if(plot.BG==TRUE){ plot(values.bg, ylab="LM-OSL [a.u.]", xlab="Time [s]", main="Background") mtext(side=3,sample_code,cex=.8*cex) } } else { stop("[fit_LMCurve()] Invalid method for background subtraction", call. = FALSE) } } ##============================================================================## ## FITTING ##============================================================================## ##------------------------------------------------------------------------## ##set function for fit equation (according Kitis and Pagonis, 2008) ##////equation used for fitting////(start) fit.equation<-function(Im.i,xm.i){ equation<-parse( text=paste("exp(0.5)*Im[",Im.i,"]*(values[,1]/xm[",xm.i,"])*exp(-values[,1]^2/(2*xm[",xm.i,"]^2))", collapse="+",sep="")) return(equation) } ##////equation used for fitting///(end) ##------------------------------------------------------------------------## ##set formula elements for fitting functions ## the upper two functions should be removed ... but chances are needed ... TODO ##////equation used for fitting////(start) fit.formula <- function(n.components){ Im <- paste0("Im.",1:n.components) xm <- paste0("xm.",1:n.components) as.formula(paste0("y ~ ", paste("(exp(0.5) * ", Im, "* x/", xm, ") * exp(-x^2/(2 *",xm,"^2))", collapse=" + "))) } ##////equation used for fitting///(end) ##------------------------------------------------------------------------## ##automatic start parameter estimation ##set fit function fit.function <- fit.equation(Im.i = 1:n.components, xm.i = 1:n.components) if(missing(start_values)){ ##set b (detrapping) values for a 7-component function taken from Jain et al. (2003) b.pseudo<-c(32,2.5,0.65,0.15,0.025,0.0025,0.00030) ##calculate xm parameters from values set based on the pseudo curves xm.pseudo<-sqrt(max(values[,1])/b.pseudo) ##the Im values obtaind by calculating residuals xm.residual<-sapply(1:length(b.pseudo),function(x){abs(values[,1]-xm.pseudo[x])}) xm.residual<-cbind(xm.residual,values[,1]) Im.pseudo<-sapply(1:length(xm.pseudo),function(x){ min(xm.residual[which(xm.residual[,x]==min(xm.residual[,x])),8])#8 is time index }) ##set additional variables b.pseudo_start<-1 b.pseudo_end<-0 fit.trigger<-FALSE while(fit.trigger==FALSE){ xm <- xm.pseudo[b.pseudo_start:(n.components + b.pseudo_end)] Im <- Im.pseudo[b.pseudo_start:(n.components + b.pseudo_end)] if(fit.advanced){ ##---------------------------------------------------------------## ##MC for fitting parameter ##make the fitting more stable by small variations of the parameters ##sample input parameters values from a normal distribution xm.MC<-sapply(1:length(xm),function(x){ xm.MC<-sample(rnorm(30,mean=xm[x],sd=xm[x]/10), replace=TRUE) }) Im.MC<-sapply(1:length(xm),function(x){ Im.MC<-sample(rnorm(30,mean=Im[x],sd=Im[x]/10), replace=TRUE) }) ##---------------------------------------------------------------## for(i in 1:length(xm.MC[,1])){ ##NLS ##try fit fit<-try(nls(y~eval(fit.function), trace=fit.trace, data=data.frame(x=values[,1],y=values[,2]), algorithm="port", start=list(Im=Im.MC[i,],xm=xm.MC[i,]),#end start values input nls.control( maxiter=500 ),#end nls control lower=c(xm=min(values[,1]),Im=0), upper=c(xm=max(values[,1]),Im=max(values[,2]*1.1)) ),# nls silent=TRUE)# end try ##graphical output if(i==1){cat(paste("[fit_LMCurve()] >> advanced fitting attempt (#", b.pseudo_start,"): ",sep=""))} cat("*") if(inherits(fit,"try-error") == FALSE){break} }#end::forloop cat("\n") }else{ if(fit.method == "port") { fit <- try(nls( y ~ eval(fit.function), trace = fit.trace, data = data.frame(x = values[,1],y = values[,2]), algorithm = "port", start = list(Im = Im,xm = xm),#end start values input nls.control(maxiter = 500),#end nls control lower = c(xm = 0,Im = 0) ),# nls silent = TRUE) # end try }else if (fit.method == "LM") { ##re-name for method == "LM" names(Im) <- paste0("Im.", 1:n.components) names(xm) <- paste0("xm.", 1:n.components) start.list <- c(as.list(Im), as.list(xm)) lower <- vapply(start.list, function(x) { start.list[[x]] <- 0 }, FUN.VALUE = vector(mode = "numeric", length = 1)) fit <- try(minpack.lm::nlsLM( fit.formula(n.components), data = data.frame(x = values[,1], y = values[,2]), start = start.list, lower = lower, trace = fit.trace, control = minpack.lm::nls.lm.control(maxiter = 500) ), silent = TRUE) }else{ stop("[fit_LMCurve()] unknow method for 'fit.method'", call. = FALSE) } }#endifelse::fit.advanced if(inherits(fit,"try-error")==FALSE){fit.trigger<-TRUE} else{ if((n.components+b.pseudo_end)==7){fit.trigger<-TRUE }else{ b.pseudo_start<-b.pseudo_start+1 b.pseudo_end<-b.pseudo_end+1 }#endif::maximum loops }#endif::try-error }#end:whileloop fit trigger }else{#endif::missing start values ##------------------------------------------------------------------------## fit<-try(nls(y~eval(fit.function), trace=fit.trace, data.frame(x=values[,1],y=values[,2]), algorithm="port", start=list(Im=start_values[,1],xm=start_values[,2]),#end start values input nls.control(maxiter=500), lower=c(xm=0,Im=0), #upper=c(xm=max(x),Im=max(y)*1.1)# set lower boundaries for components )# nls )# end try }#endif::startparameter ##------------------------------------------------------------------------## ##grep parameters if(inherits(fit,"try-error")==FALSE){ parameters<-coef(fit) ##write parameters in vectors and order parameters Im<-parameters[1:(length(parameters)/2)] Im.names <- names(Im) xm<-parameters[(1+(length(parameters)/2)):length(parameters)] xm.names <- names(xm) ##order parameters o <- order(xm) xm <- xm[o] names(xm) <- xm.names Im <- Im[o] names(Im) <- Im.names if (verbose){ ##print rough fitting information - use the nls() control for more information writeLines("\n[fit_LMCurve()]") writeLines(paste("\nFitting was done using a ",n.components, "-component function:\n",sep="")) ##print parameters print(c(xm, Im)) #print some additional information writeLines("\n(equation used for fitting according Kitis & Pagonis, 2008)") }#end if ##============================================================================## ## Additional Calculations ##============================================================================## ##calculate stimulation intensity Schmidt (2008) ##Energy - E = h*v h<-6.62606957e-34 #in W*s^2 - Planck constant ny<-299792458/(LED.wavelength/10^9) #frequency of the light E<-h*ny ##transform LED.power in W/cm^2 LED.power<-LED.power/1000 stimulation_intensity<-LED.power/E ##calculate b and n from the equation of Bulur(1996) to compare results ##Using Equation 5 and 6 from Kitis (2008) b<-as.vector(max(values[,1])/xm^2) #detrapping probability n0<-as.vector((Im/exp(-0.5))*xm) ##CALCULATE 1- sigma CONFIDENCE INTERVAL ##------------------------------------------------------------------------## b.error <- rep(NA, n.components) n0.error <- rep(NA, n.components) if(fit.calcError){ ##option for confidence interval values.confint <- try(confint(fit, level = 0.68), silent = TRUE) if(!inherits(values.confint, "try-error")) { Im.confint <- values.confint[1:(length(values.confint[, 1]) / 2), ] xm.confint <- values.confint[((length(values.confint[,1])/2)+1):length(values.confint[,1]),] ##error calculation b.error < -as.vector(abs((max(values[,1])/xm.confint[,1]^2)-(max(values[,1])/xm.confint[,2]^2))) n0.error <- as.vector(abs(((Im.confint[,1]/exp(-0.5))*xm.confint[,1]) - ((Im.confint[,2]/exp(-0.5))*xm.confint[,2]))) } else { warning("[fit_LMCurve()] The computation of the parameter confidence intervals failed. Please try to run stats::confint() manually on the $fit output object!", call. = FALSE) } } ##------------------------------------------------------------------------## ##calculate photoionisation cross section and print on terminal ##using EQ (5) in Kitis cs<-as.vector((max(values[,1])/xm^2)/stimulation_intensity) rel_cs<-round(cs/cs[1],digits=4) ##coefficient of determination after law RSS <- sum(residuals(fit)^2) #residual sum of squares TSS <- sum((values[,2] - mean(values[,2]))^2) #total sum of squares pR<-round(1-RSS/TSS,digits=4) ##============================================================================## ## COMPONENT TO SUM CONTRIBUTION MATRIX ##============================================================================## ##+++++++++++++++++++++++++++++++ ##set matrix ##set polygon matrix for optional plot output component.contribution.matrix <- matrix(NA, nrow = length(values[,1]), ncol = (2*length(xm)) + 2) ##set x-values component.contribution.matrix[,1] <- values[,1] component.contribution.matrix[,2] <- rev(values[,1]) ##+++++++++++++++++++++++++++++++ ##set 1st polygon ##1st polygon (calculation) y.contribution_first <- (exp(0.5)*Im[1]*values[,1]/ xm[1]*exp(-values[,1]^2/(2*xm[1]^2))/ (eval(fit.function))*100) ##avoid NaN values (might happen with synthetic curves) y.contribution_first[is.nan(y.contribution_first)==TRUE] <- 0 ##set values in matrix component.contribution.matrix[,3] <- 100 component.contribution.matrix[,4] <- 100-rev(y.contribution_first) ##+++++++++++++++++++++++++++++++ ##set polygons in between ##polygons in between (calculate and plot) if (length(xm)>2){ y.contribution_prev <- y.contribution_first i<-2 ##matrix stepping k <- seq(3, ncol(component.contribution.matrix), by=2) while (i<=length(xm)-1) { y.contribution_next<-(exp(0.5)*Im[i]*values[,1]/ xm[i]*exp(-values[,1]^2/(2*xm[i]^2))/ (eval(fit.function))*100) ##avoid NaN values y.contribution_next[is.nan(y.contribution_next)==TRUE] <- 0 ##set values in matrix component.contribution.matrix[, k[i]] <- 100-y.contribution_prev component.contribution.matrix[, k[i]+1] <- rev(100-y.contribution_prev- y.contribution_next) y.contribution_prev <- y.contribution_prev + y.contribution_next i<-i+1 }#end while loop }#end if ##+++++++++++++++++++++++++++++++ ##set last polygon ##last polygon (calculation) y.contribution_last<-(exp(0.5)*Im[length(xm)]*values[,1]/ xm[length(xm)]*exp(-values[,1]^2/ (2*xm[length(xm)]^2))/ (eval(fit.function))*100) ##avoid NaN values y.contribution_last[is.nan(y.contribution_last)==TRUE]<-0 component.contribution.matrix[,((2*length(xm))+1)] <- y.contribution_last component.contribution.matrix[,((2*length(xm))+2)] <- 0 ##change names of matrix to make more easy to understand component.contribution.matrix.names <- c("x", "rev.x", paste(c("y.c","rev.y.c"),rep(1:n.components,each=2), sep="")) ##calculate area for each component, for each time interval component.contribution.matrix.area <- sapply( seq(3,ncol(component.contribution.matrix),by=2), function(x){ matrixStats::rowDiffs(cbind(rev(component.contribution.matrix[,(x+1)]), component.contribution.matrix[,x])) }) ##append to existing matrix component.contribution.matrix <- cbind( component.contribution.matrix, component.contribution.matrix.area, rowSums(component.contribution.matrix.area) ) ##set final column names colnames(component.contribution.matrix) <- c( component.contribution.matrix.names, paste(c("cont.c"),rep(1:n.components,each=1), sep=""), "cont.sum") ##============================================================================## ## Terminal Output (advanced) ##============================================================================## if (verbose){ ##write fill lines writeLines("------------------------------------------------------------------------------") writeLines("(1) Corresponding values according the equation in Bulur, 1996 for b and n0:\n") for (i in 1:length(b)){ writeLines(paste("b",i," = ",format(b[i],scientific=TRUE)," +/- ",format(b.error[i],scientific=TRUE),sep="")) writeLines(paste("n0",i," = ",format(n0[i],scientific=TRUE)," +/- ",format(n0.error[i],scientific=TRUE),"\n",sep="")) }#end for loop ##write photoionisation cross section on terminal for (i in 1:length(cs)){ writeLines(paste("cs from component.",i," = ",format(cs[i],scientific=TRUE, digits=4), " cm^2", "\t >> relative: ",round(cs[i]/cs[1],digits=4),sep="")) }#end for loop writeLines(paste( "\n(stimulation intensity value used for calculation: ",format(stimulation_intensity,scientific=TRUE)," 1/s 1/cm^2)",sep="")) writeLines("(errors quoted as 1-sigma uncertainties)") writeLines("------------------------------------------------------------------------------\n") #sum of squares writeLines(paste("pseudo-R^2 = ",pR,sep="")) }#end if ##============================================================================## ## COMPOSE RETURN VALUES (data.frame) ##============================================================================## ##write output table if values exists if (exists("fit")){ ##set data.frame for a max value of 7 components output.table <- data.frame(NA,NA,NA,NA,NA,NA,NA,NA, NA,NA,NA,NA,NA,NA,NA,NA, NA,NA,NA,NA,NA,NA,NA,NA, NA,NA,NA,NA,NA,NA,NA,NA, NA,NA,NA,NA,NA,NA,NA,NA, NA,NA,NA,NA,NA,NA,NA,NA, NA,NA,NA,NA,NA,NA,NA,NA) output.tableColNames<-c("Im1","xm1", "b1","b1.error","n01","n01.error", "cs1","rel_cs1", "Im2","xm2", "b2","b2.error","n02","n02.error", "cs2","rel_cs2", "Im3","xm3", "b3","b3.error","n03","n03.error", "cs3","rel_cs3", "Im4","xm4", "b4","b4.error","n04","n04.error", "cs4","rel_cs4", "Im5","xm5", "b5","b5.error","n05","n05.error", "cs5","rel_cs5", "Im6","xm6", "b6","b6.error","n06","n06.error", "cs6","rel_cs6", "Im7","xm7", "b7","b7.error","n07","n07.error", "cs7","rel_cs7") ##write components in output table i<-0 k<-1 while(i<=n.components*8){ output.table[1,i+1]<-Im[k] output.table[1,i+2]<-xm[k] output.table[1,i+3]<-b[k] output.table[1,i+4]<-b.error[k] output.table[1,i+5]<-n0[k] output.table[1,i+6]<-n0.error[k] output.table[1,i+7]<-cs[k] output.table[1,i+8]<-rel_cs[k] i<-i+8 k<-k+1 } ##add pR and n.components output.table<-cbind(sample_ID,sample_code,n.components,output.table,pR) ###alter column names colnames(output.table)<-c("ID","sample_code","n.components",output.tableColNames,"pseudo-R^2") ##----------------------------------------------------------------------------## }#endif::exists fit }else{ output.table <- NA component.contribution.matrix <- NA message("[fit_LMCurve] Fitting Error: Plot without fit produced!") } # Calculate component curves ---------------------------------------------- component_matrix <- NA if(!inherits(fit,"try-error")){ component_matrix <- matrix(NA, nrow = nrow(values), ncol = 2 + length(Im)) colnames(component_matrix) <- c("TIME", "SUM", paste("COMP_", 1:length(Im))) component_matrix[, 1] <- values[, 1] component_matrix[, 2] <- eval(fit.function) ## add single components for(i in 1:length(Im)){ component_matrix[, 2 + i] <- exp(0.5) * Im[i] * values[, 1] / xm[i] * exp(-values[, 1] ^ 2 / (2 * xm[i] ^ 2)) } } # Plotting ---------------------------------------------------------------- if(plot){ ##cheat the R check routine x <- NULL; rm(x) ##grep package colour gallery col <- get("col", pos = .LuminescenceEnv) ##change xlim values in case of the log plot the avoid problems if((log == "x" | log == "xy") && xlim[1] == 0){ warning("[fit_LMCurve()] x-axis limitation change to avoid 0 values for log-scale!", call. = FALSE) xlim <- c(2^0.5/2 * max(values[,1])/length(values[,1]), xlim[2]) } ##set plot frame layout(matrix(c(1,2,3),3,1, byrow=TRUE),c(1.6,1,1), c(1,0.3,0.4),TRUE) par(oma = c(1,1,1,1), mar = c(0,4,3,0), cex=cex) ##==upper plot==## ##open plot area plot( NA, NA, xlim = xlim, ylim = ylim, xlab = "", xaxt = "n", main = main, log = log, ylab = ylab )#endplot mtext(side=3,sample_code,cex=0.8*cex) ##plotting measured signal points(values[, 1], values[, 2], pch = 20, col = rgb(0.4, 0.4, 0.4, 0.5)) ##==pseudo curve==##------------------------------------------------------# ##curve for used pseudo values if(inherits(fit,"try-error")==TRUE & missing(start_values)==TRUE){ fit.function<-fit.equation(Im.i=1:n.components,xm.i=1:n.components) Im<-Im.pseudo[1:n.components] xm<-xm.pseudo[1:n.components] ##draw pseudo curve lines(values[,1],eval(fit.function), lwd=2, col="red", lty=2) axis(side=1) mtext(side=1,xlab, cex=.9*cex,line=2) mtext(side=4,paste(n.components, " component pseduo function is shown",sep=""),cex=0.7, col="blue") ##draw information text on plot text(min(values[,1]),max(values[,2]),"FITTING ERROR!",pos=4) ##additional legend legend("topright",c("pseudo sum function"),lty=2,lwd=2,col="red",bty="n") } ##==pseudo curve==##------------------------------------------------------## ##plot sum function if(inherits(fit,"try-error")==FALSE){ lines(values[,1],eval(fit.function), lwd=2, col="black") legend.caption<-"sum curve" curve.col<-1 ##plot signal curves ##plot curve for additional parameters for (i in 1:length(xm)) { curve(exp(0.5)*Im[i]*x/xm[i]*exp(-x^2/(2*xm[i]^2)),col=col[i+1], lwd=2,add=TRUE) legend.caption<-c(legend.caption,paste("component ",i,sep="")) curve.col<-c(curve.col,i+1) } ##plot legend legend(if(log=="x"| log=="xy"){ if(input.dataType=="pLM"){"topright"}else{"topleft"}}else{"topright"}, legend.caption,lty=1,lwd=2,col=col[curve.col], bty="n") ##==lower plot==## ##plot residuals par(mar=c(4.2,4,0,0)) plot(values[,1],residuals(fit), xlim=xlim, xlab=xlab, type="l", col="grey", ylab="Residual", lwd=2, log=log) ##ad 0 line abline(h=0) ##------------------------------------------------------------------------# ##++component to sum contribution plot ++## ##------------------------------------------------------------------------# ##plot component contribution to the whole signal #open plot area par(mar=c(4,4,3.2,0)) plot(NA,NA, xlim=xlim, ylim=c(0,100), ylab="Contribution [%]", xlab=xlab, main="Component contribution to sum curve", log=if(log=="xy"){"x"}else{log}) stepping <- seq(3,length(component.contribution.matrix),2) for(i in 1:length(xm)){ polygon(c(component.contribution.matrix[,1], component.contribution.matrix[,2]), c(component.contribution.matrix[,stepping[i]], component.contribution.matrix[,stepping[i]+1]), col = col[i+1]) } rm(stepping) ##------------------------------------------------------------------------## }#end if try-error for fit if(fun){sTeve()} } ##----------------------------------------------------------------------------- ##remove objects try(unlist("parameters")) ##============================================================================# ## Return Values ##============================================================================# newRLumResults.fit_LMCurve <- set_RLum( class = "RLum.Results", data = list( data = output.table, fit = fit, component_matrix = component_matrix, component.contribution.matrix = list(component.contribution.matrix) ), info = list(call = sys.call()) ) invisible(newRLumResults.fit_LMCurve) } Luminescence/R/internal_as.latex.table.R0000644000176200001440000002552014236146743017727 0ustar liggesusers#' Create LaTex tables from data.frames and RLum objects #' #' This function takes a data.frame and returns a table in LaTex code that #' can be copied in any tex document. #' #' @param x [data.frame] or `RLum` object (**required**) #' #' @param row.names currently unused #' #' @param col.names currently unused #' #' @param comments [logical] (*with default*): #' insert LaTex comments #' #' @param pos [character] (*with default*): #' `character` of length one specifying the alignment of each column, e.g., #' pos'clr' for a three column data frame and center, left #' and right alignment #' #' @param digits [numeric] (*with default*): #' number of digits (numeric fields) #' #' @param rm.zero [logical] (*with default*): remove columns containing #' only zeros, however this might not be wanted in all cases #' #' @param select [character] (*optional*): #' a [character] vector passed to [subset] #' #' @param split [integer] (*optional*): #' an [integer] specifying the number of individual tables #' the data frame is split into. Useful for wide tables. Currently unnused. #' #' @param tabular_only [logical] (*with default*): if `TRUE` on the tablular but not the #' table environment is returned. This gives a lot of additional flexibility at hand #' #' @param ... options: `verbose` #' #' @section TODO: #' - Improve by using RegEx to dynamically find error fields, eg. ( "([ ]err)|(^err)" ) #' - #' #' @return #' Returns LaTex code #' #' @examples #' df <- data.frame(x = 1:10, y = letters[1:10]) #' .as.latex.table(df) #' .as.latex.table(df, pos = "lr") #' .as.latex.table(df, select = "y", pos = "r") #' #' @md #' @noRd .as.latex.table <- function(x, row.names = NULL, col.names = NULL, comments = TRUE, pos = "c", digits = 3, rm.zero = TRUE, select, split = NULL, tabular_only = FALSE, ...) { args <- list(x = x, row.names = row.names, col.names = col.names, comments = comments, pos = pos, digits = digits, rm.zero = rm.zero, split = split, tabular_only = tabular_only, ... = ...) if (!missing(select)) args$select <- select switch(class(x)[1], data.frame = do.call(".as.latex.table.data.frame", args), DRAC.highlights = do.call(".as.latex.table.data.frame", args), RLum.Results = do.call(".as.latex.table.RLum.Results", args)) } ################################################################################ ## "Method" RLum.Results ## ##----------------------------------------------------------------------------## .as.latex.table.RLum.Results <- function(x, row.names = NULL, col.names = NULL, comments = TRUE, pos = "c", digits = 3, rm.zero = TRUE, select, split = NULL, ...) { ## Object: DRAC.highlights if (x@originator == "use_DRAC") { x <- get_RLum(x)$highlights x <- .digits(x, digits) ##remove columns containing zero values ... they add no information if(rm.zero){ x <- x[sapply(x, function(y){ y <- suppressWarnings(as.numeric(y)) if(anyNA(y) || sum(y, na.rm = TRUE) != 0){ TRUE }else{ FALSE } })] } ##add +/- symbol and remove the columns we don't need fields.w.error <- (grep(names(x), pattern = "err", fixed = TRUE) - 1) for(i in fields.w.error) x[ ,i] <- paste0(x[ ,i], "\\,$\\pm$\\,", trimws(x[ ,i + 1])) x <- x[-c(fields.w.error + 1)] ##create latex table text <- .as.latex.table(x, comments = comments, pos = pos, split = split, ...) ##split table text <- strsplit(text[[1]], split = "\n", fixed = TRUE) ##exchange columns ... or delete them at all (2nd step) ##Mineral ID for(i in 1:length(text)){ text[[i]][grepl(pattern = "Mineral", x = text[[i]], fixed = TRUE)] <- "\t\\multicolumn{1}{p{0.5cm}}{\\centering \\textbf{M.}} & " } ##put things again together (single character) text <- paste(text[[1]], collapse = "\n") ##replace some latex stuff text <- gsub(pattern = "p{2cm}", replacement = "p{1.5cm}", x = text, fixed = TRUE) text <- gsub(pattern = "Gy.ka-1", replacement = "Gy~ka$^{-1}$", x = text, fixed = TRUE) text <- gsub(pattern = "De", replacement = "$D_{E}$", x = text, fixed = TRUE) text <- gsub(pattern = "alphadoserate", replacement = "$\\dot{D}_{\\alpha}$", x = text, fixed = TRUE) text <- gsub(pattern = "betadoserate", replacement = "$\\dot{D}_{\\beta}$", x = text, fixed = TRUE) text <- gsub(pattern = "gammadoserate", replacement = "$\\dot{D}_{\\gamma}$", x = text, fixed = TRUE) text <- gsub(pattern = "Cosmicdoserate", replacement = "$\\dot{D}_{cosm.}$", x = text, fixed = TRUE) text <- gsub(pattern = "External \\\\ doserate", replacement = "$\\dot{D}_{ext.}$", x = text, fixed = TRUE) text <- gsub(pattern = "Internal \\\\ doserate", replacement = "$\\dot{D}_{int.}$", x = text, fixed = TRUE) text <- gsub(pattern = "Environmental \\\\ Dose \\\\ Rate", replacement = "$\\dot{D}_{env.}$", x = text, fixed = TRUE) ##retrun result return(text) }# EndOf::use_DRAC } ################################################################################ ## "Method" data.frame ## ##----------------------------------------------------------------------------## .as.latex.table.data.frame <- function(x, row.names = NULL, col.names = NULL, comments = TRUE, pos = "c", digits = 3, select, split = NULL, tabular_only = FALSE, ...) { ## Integrity checks ---- if (!is.data.frame(x)) stop("x must be a data frame", call. = FALSE) if (!is.null(col.names) && length(col.names) != ncol(x)) stop("length of col.names does not match the number of columns", call. = FALSE) if (!is.null(row.names) && length(row.names) != nrow(x)) stop("length of row.names does not match the number of rows", call. = FALSE) if (length(pos) != 1) stop("length of pos does not match the number of columns", call. = FALSE) ## Default settings ---- options <- list(verbose = TRUE) ## Override settings ---- options <- modifyList(options, list(...)) ## Subset data frame ---- if (!missing(select)) { is.name <- select %in% names(x) if (any(!is.name)) stop("Undefined columns selected. Please check provided column names in 'select'.", call. = FALSE) x <- subset(x, select = select) } ## Format numeric fields ---- x <- .digits(x, digits) ## Split the table if (is.null(split)) split <- 1 chunks <- ceiling(ncol(x) / split) chunks.start <- seq(1, ncol(x), chunks) chunks.end <- chunks.start + chunks - 1 chunks.end[length(chunks.end)] <- ncol(x) tex.table.list <- vector("list", split) for (i in 1:length(tex.table.list)) { x.chunk <- x[ ,chunks.start[i]:chunks.end[i]] if (ncol(x) == 1) { x.chunk <- as.data.frame(x.chunk) colnames(x.chunk) <- names(x[i]) } ## Comments ---- tex.comment.usePackage <- ifelse(comments, "% add usepackage{adjustbox} to latex preamble \n", "") ## Header ---- col.names <- tex.table.header <- gsub(pattern = " ", x = names(x.chunk), replacement = " \\\\\\\\ ") tex.table.header <- paste0("\t", paste("\\multicolumn{1}{p{2cm}}{\\centering", col.names, "}", collapse = " & \n\t"), "\\\\ \n") ## Rows ---- tex.table.rows <- "" for (j in 1:nrow(x.chunk)) { tex.table.rows <- paste0(tex.table.rows, paste(paste(x.chunk[j, ], collapse = " & "), "\\\\ \n")) } ## catch potential latex problems with underscores - after all are numbers, in can be only ## on the ID tex.table.rows <- gsub("_", "\\_", tex.table.rows, fixed = TRUE) ## Tex table ---- if (nchar(pos) != 1 && nchar(pos) != ncol(x)) pos <- "c" if (!any(strsplit(pos, split = "")[[1]] %in% c("l", "c", "r"))) pos <- "c" if (nchar(pos) == 1) pos <- paste0(rep(pos, ncol(x)), collapse = "") if(tabular_only){ tex.table.begin <- paste0(paste(" \\begin{tabular}{", pos, "}\n"), " \\hline \n") tex.table.end <- paste0(" \\hline \n", " \\end{tabular}") }else{ tex.table.begin <- paste0("\\begin{table}[ht] \n", " \\centering \n", " \\begin{adjustbox}{max width=\\textwidth} \n", paste(" \\begin{tabular}{", pos, "}\n"), " \\hline \n") tex.table.end <- paste0(" \\hline \n", " \\end{tabular} \n", " \\end{adjustbox} \n", "\\end{table}") } tex.table <- paste0(tex.comment.usePackage, tex.table.begin, tex.table.header, "\\hline \n", tex.table.rows, tex.table.end) if (options$verbose) cat(tex.table) tex.table.list[[i]] <- tex.table } invisible(tex.table.list) } # This function takes a data.frame, checks each column and tries to # force the specified amount of digits if numeric or coercable to numeric .digits <- function(x, digits) { for (i in 1:ncol(x)) { if (is.factor(x[ ,i])) x[ ,i] <- as.character(x[ ,i]) test.numeric <- suppressWarnings(as.numeric(x[ ,i])) if (!is.na(test.numeric[1])) x[ ,i] <- format(round(test.numeric, digits), nsmall = digits, digits = digits) } return(x) } Luminescence/R/write_R2TIFF.R0000644000176200001440000000613014264017373015365 0ustar liggesusers#'@title Export RLum.Data.Image and RLum.Data.Spectrum objects to TIFF Images #' #'@description Simple wrapper around [tiff::writeTIFF] to export suitable #' RLum-class objects to TIFF images. Per default 16-bit TIFF files are exported. #' #'@param object [RLum.Data.Image-class] or [RLum.Data.Spectrum-class] object (**required**): #'input object, can be a [list] of such objects #' #'@param file [character] (**required**): the file name and path #' #'@param norm [numeric] (*with default*): normalisation values. Values in TIFF files must range between 0-1, however, usually #'in imaging applications the pixel values are real integer count values. The normalisation to the #'to the highest 16-bit integer values -1 ensures that the numerical values are retained in the exported #'image. If `1` nothing is normalised. #' #'@param ... further arguments to be passed to [tiff::writeTIFF]. #' #'@return A TIFF file #' #'@author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #'@section Function version: 0.1.0 #' #'@seealso [tiff::writeTIFF], [RLum.Data.Image-class], [RLum.Data.Spectrum-class] #' #'@keywords IO #' #'@examples #'data(ExampleData.RLum.Data.Image, envir = environment()) #'write_R2TIFF(ExampleData.RLum.Data.Image, file = tempfile()) #' #'@md #'@export write_R2TIFF <- function( object, file = tempfile(), norm = 65535, ... ){ # Integrity --------------------------------------------------------------- ## most of the users don't need this import, no need to bother them ## with required libraries if (!requireNamespace("tiff", quietly = TRUE)) stop("Exporting objects to TIFF files requires the package tiff.\n", "To install this package run 'install.packages('tiff')' in your R console.", call. = FALSE) # Transform -------------------------------------------------------------- ## make a list ... it is just easier if(!is(object, "list")) object <- list(object) ## check list input if(!any(vapply(object, function(x) class(x)[1], character(1)) %in% c("RLum.Data.Image", "RLum.Data.Spectrum"))) stop("[write_R2TIFF()] Only RLum.Data.Image and RLum.Data.Spectrum objects are supported!", call. = FALSE) ## check path if(!dir.exists(dirname(file))) stop("[write_R2TIFF()] Path does not exist!", call. = FALSE) ## create file names file <- normalizePath(file, mustWork = FALSE) file_dir <- dirname(file) file_base <- strsplit(basename(file), split = ".", fixed = TRUE)[[1]][1] ## expand if longer than 1 if(length(object) > 1) file <- normalizePath(paste0(file_dir,"/",file_base,"_",1:length(object),".tiff"), mustWork = FALSE) # Export to TIFF ---------------------------------------------------------- ## remove arguments we already use args <- list(...)[!list(...) %in% c("what", "where")] ## modify arguments args <- modifyList(x = list( bits.per.sample = 16L ), args) for(i in 1:length(object)){ object[[i]]@data[] <- as.numeric(object[[i]]@data) object[[i]]@data[] <- object[[i]]@data / norm[1] do.call(what = tiff::writeTIFF, args = c(list(object[[i]]@data, where = file[i]), args)) } } Luminescence/R/addins_RLum.R0000644000176200001440000000473314264017373015427 0ustar liggesusers################################################################################## ## Luminescence - RStudio Add Ins ## ################################################################################## ##<> ## ## - Add-ins should support more expierenced users. For all others we have the package 'RLumShiny' ## ## - Add-ins should be provided as non-exported function only, having the a name with a leading dot, e.g., .addin. ## This prevents further chaos in the manuel. ## ## - Interative add-ins are not desired, except they are implemented in the package 'RLumShiny' or they ## are only available if the package 'RLumShiny' is available. ## ## ##<> ## ## Q. Why are the add-ins non-interactive ... had you been too lazy? ## A. No, but interactivity would require the installation of 'shiny" by default, which is not ## desired. ## ## Q. The add-ins are not shown in the 'Addin' menu? ## A. Well, if you read this information you are an advanced used, so please install the ## package 'rstudioapi', 'devtools' and get happy. #'Install package development version #' #'The function uses the GitHub APconnection provided by Christoph Burow #' #'@author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #'@noRd .installDevelopmentVersion <- function(){ install_DevelopmentVersion(force_install = TRUE) } #'Search for TODOs in the source code and list them in the terminal #' #'This add-in is a tool developers may want to use to briefly list all open #'issues in the terminal, instead of using search and stepping through the results. #' #'@author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #'@noRd .listTODO <- function(){ ##check if package is installed if(!requireNamespace("rstudioapi", quietly = TRUE)){ message("Package 'rstudioapi' is not installed but needed to search for TODOs, do you want to install it?\n\n", " [n/N]: No (default)\n", " [y/Y]: Yes\n") ##parse answer answer <- readline() if(tolower(answer) == "y"){ utils::install.packages("rstudioapi", dependencies = TRUE) } }else{ ##parse code code <- rstudioapi::getActiveDocumentContext()$contents ##get lines with ##TODO id <- grep(pattern = "#\\s*TODO", x = code, fixed = FALSE) ##list lines cat("\n", "[", length(id), " issue(s)]\n", sep = "") for(i in id){ cat(" line ", i, ": ->", code[i], "\n", sep = "") } } } Luminescence/R/plot_RLum.Data.Image.R0000644000176200001440000002001614446324326017025 0ustar liggesusers#' @title Plot function for an `RLum.Data.Image` S4 class object #' #' @description The function provides very basic plot functionality for image data of an #' [RLum.Data.Image-class] object. For more sophisticated plotting it is recommended #' to use other very powerful packages for image processing. #' #' #' **Details on the plot functions** #' #' Supported plot types: #' #' **`plot.type = "plot.raster"`** #' #' Uses the standard plot function of R [graphics::image]. If wanted, the image #' is enhanced, using the argument `stretch`. Possible values are `hist`, `lin`, and #' `NULL`. The latter does nothing. The argument `useRaster = TRUE` is used by default, but #' can be set to `FALSE`. #' #' **`plot.type = "contour"`** #' #' This uses the function [graphics::contour] #' #' @param object [RLum.Data.Image-class] (**required**): S4 #' object of class `RLum.Data.Image` #' #' @param par.local [logical] (*with default*): use local graphical #' parameters for plotting, e.g. the plot is shown in one column and one row. #' If `par.local = FALSE` global parameters are inherited. #' #' @param frames [numeric] (*optional*): sets the frames to be set, by default all #' frames are plotted. Can be sequence of numbers, as long as the frame number is valid. #' #' @param plot.type [character] (*with default*): plot types. #' Supported types are `plot.raster`, `contour` #' #' @param ... further arguments and graphical parameters that will be passed #' to the specific plot functions. Standard supported parameters are `xlim`, `ylim`, `zlim`, #' `xlab`, `ylab`, `main`, `legend` (`TRUE` or `FALSE`), `col`, `cex`, `axes` (`TRUE` or `FALSE`), #' `zlim_image` (adjust the z-scale over different images), `stretch` #' #' @return Returns a plot #' #' @note The axes limitations (`xlim`, `zlim`, `zlim`) work directly on the object, #' so that regardless of the chosen limits the image parameters can be adjusted for #' best visibility. However, in particular for z-scale limitations this is not always #' wanted, please use `zlim_image` to maintain a particular value range over a #' series of images. #' #' @section Function version: 0.2.1 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [RLum.Data.Image-class], [plot], [plot_RLum], [graphics::image], [graphics::contour] #' #' @keywords aplot #' #' @examples #' #' ##load data #' data(ExampleData.RLum.Data.Image, envir = environment()) #' #' ##plot data #' plot_RLum.Data.Image(ExampleData.RLum.Data.Image) #' #' @md #' @export plot_RLum.Data.Image <- function( object, frames = NULL, par.local = TRUE, plot.type = "plot.raster", ... ){ # Integrity check ----------------------------------------------------------- ##check if object is of class RLum.Data.Image if(!inherits(object, "RLum.Data.Image")) stop("[plot_RLum.Data.Image()] Input object is not of type RLum.Data.Image.", call. = FALSE) ## extract object object <- object@data # Define additional functions --------------------------------------------- .stretch <- function(x, type = "lin"){ if(is.null(type[1])) return(x[,,1]) if(type[1] == "lin") { x <- x[,,1] r <- range(x) q <- stats::quantile(x, c(0.05, 0.95), na.rm = TRUE) ## consider special case for q == 0 if(sum(q) > 0) { x <- (r[2] * (x - q[1])) / (q[2] - q[1]) x[x < 0] <- r[1] x[x > r[2]] <- r[2] } } if(type[1] == "hist") x <- matrix(stats::ecdf(x)(x) * 255, ncol = ncol(x)) return(x) } # Plot settings ----------------------------------------------------------- plot_settings <- modifyList(x = list( main = "RLum.Data.Image", axes = TRUE, xlab = "Length [px]", ylab = "Height [px]", xlim = c(1,dim(object)[1]), ylim = c(1,dim(object)[2]), zlim = range(object), zlim_image = NULL, legend = TRUE, useRaster = TRUE, stretch = "hist", col = c(grDevices::hcl.colors(50, palette = "Inferno")), cex = 1 ), val = list(...), keep.null = TRUE) ## set frames if(!is.null(frames)) { frames[1] <- max(1,min(frames)) frames[length(frames)] <- min(dim(object)[3],max(frames)) object <- object[,,frames,drop = FALSE] } ## enforce xlim, ylim and zlim directly here ## xlim, ylim object[] <- object[ max(plot_settings$xlim[1], 1):min(plot_settings$xlim[2], dim(object)[1]), max(plot_settings$ylim[1], 1):min(plot_settings$ylim[2], dim(object)[2]),, drop = FALSE] ## zlim object[object <= plot_settings$zlim[1]] <- max(0,plot_settings$zlim[1]) object[object >= plot_settings$zlim[2]] <- min(max(object),plot_settings$zlim[2]) ##par setting for possible combination with plot method for RLum.Analysis objects if(par.local) par(mfrow=c(1,1), cex = plot_settings$cex) if (plot.type == "plot.raster") { # plot.raster ------------------------------------------------------------- for(i in 1:dim(object)[3]) { par.default <- par(mar = c(4.5,4.5,4,3)) on.exit(par(par.default)) x <- object[, , i, drop = FALSE] image <-.stretch(x, type = plot_settings$stretch) graphics::image( x = image, useRaster = plot_settings$useRaster, axes = FALSE, zlim = if(is.null(plot_settings$zlim_image)) range(image) else plot_settings$zlim_image, xlab = plot_settings$xlab, ylab = plot_settings$ylab, main = paste0(plot_settings$main, " #",i), col = plot_settings$col) graphics::box() ## axes if(plot_settings$axes) { xlab <- pretty(1:dim(x)[1]) xlab[c(1,length(xlab))] <- c(0,dim(x)[1]) xat <- seq(0,1,length.out = length(xlab)) graphics::axis(side = 1, at = xat, labels = xlab) ylab <- pretty(1:dim(x)[2]) ylab[c(1,length(ylab))] <- c(0,dim(x)[2]) yat <- seq(0,1,length.out = length(ylab)) graphics::axis(side = 2, at = yat, labels = ylab) } ## add legend if(plot_settings$legend) { par.default <- c(par.default, par(xpd = TRUE)) on.exit(par(par.default)) col_grad <- plot_settings$col[seq(1, length(plot_settings$col), length.out = 14)] slices <- seq(0,1,length.out = 15) for(s in 1:(length(slices) - 1)){ graphics::rect( xleft = par()$usr[4] * 1.01, xright = par()$usr[4] * 1.03, ybottom = slices[s], ytop = slices[s + 1], col = col_grad[s], border = TRUE) } text( x = par()$usr[4] * 1.04, y = par()$usr[2], labels = if(is.null(plot_settings$zlim_image)) { format(max(x), digits = 1, scientific = TRUE) } else { format(plot_settings$zlim_image[2], digits = 1, scientific = TRUE) }, cex = 0.7, srt = 270, pos = 3) text( x = par()$usr[4] * 1.04, y = par()$usr[3], labels = if(is.null(plot_settings$zlim_image)) { format(min(x), digits = 1, scientific = TRUE) } else { format(plot_settings$zlim_image[1], digits = 1, scientific = TRUE) }, cex = 0.7, pos = 3, srt = 270) } } }else if(plot.type == "contour"){ for(i in 1:dim(object)[3]) { x <- object[, , i, drop = FALSE] graphics::contour( x = x[,,1], axes = FALSE, zlim = if(is.null(plot_settings$zlim_image)) range(x) else plot_settings$zlim_image, xlab = plot_settings$xlab, ylab = plot_settings$ylab, main = paste0(plot_settings$main, " #",i), col = plot_settings$col) graphics::box() } ## axes if(plot_settings$axes) { xlab <- pretty(1:dim(x)[1]) xlab[c(1,length(xlab))] <- c(0,dim(x)[1]) xat <- seq(0,1,length.out = length(xlab)) graphics::axis(side = 1, at = xat, labels = xlab) ylab <- pretty(1:dim(x)[2]) ylab[c(1,length(ylab))] <- c(0,dim(x)[1]) yat <- seq(0,1,length.out = length(ylab)) graphics::axis(side = 2, at = yat, labels = ylab) } }else{ stop("[plot_RLum.Data.Image()] Unknown plot type.", call. = FALSE) } } Luminescence/R/calc_FiniteMixture.R0000644000176200001440000005350114236146743017004 0ustar liggesusers#' @title Apply the finite mixture model (FMM) after Galbraith (2005) to a given De #' distribution #' #' @description This function fits a k-component mixture to a De distribution with differing #' known standard errors. Parameters (doses and mixing proportions) are #' estimated by maximum likelihood assuming that the log dose estimates are #' from a mixture of normal distributions. #' #' @details This model uses the maximum likelihood and Bayesian Information Criterion #' (BIC) approaches. #' #' Indications of overfitting are: #' #' - increasing BIC #' - repeated dose estimates #' - covariance matrix not positive definite #' - covariance matrix produces `NaN` #' - convergence problems #' #' **Plot** #' #' If a vector (`c(k.min:k.max)`) is provided #' for `n.components` a plot is generated showing the the k components #' equivalent doses as normal distributions. By default `pdf.weight` is #' set to `FALSE`, so that the area under each normal distribution is #' always 1. If `TRUE`, the probability density functions are weighted by #' the components proportion for each iteration of k components, so the sum of #' areas of each component equals 1. While the density values are on the same #' scale when no weights are used, the y-axis are individually scaled if the #' probability density are weighted by the components proportion.\cr #' The standard deviation (sigma) of the normal distributions is by default #' determined by a common `sigmab` (see `pdf.sigma`). For #' `pdf.sigma = "se"` the standard error of each component is taken #' instead.\cr #' The stacked [graphics::barplot] shows the proportion of each component (in #' per cent) calculated by the FFM. The last plot shows the achieved BIC scores #' and maximum log-likelihood estimates for each iteration of k. #' #' @param data [RLum.Results-class] or [data.frame] (**required**): #' for [data.frame]: two columns with De `(data[,1])` and De error `(values[,2])` #' #' @param sigmab [numeric] (**required**): #' spread in De values given as a fraction (e.g. 0.2). This value represents the expected #' overdispersion in the data should the sample be well-bleached #' (Cunningham & Wallinga 2012, p. 100). #' #' @param n.components [numeric] (**required**): #' number of components to be fitted. If a vector is provided (e.g. `c(2:8)`) the #' finite mixtures for 2, 3 ... 8 components are calculated and a plot and a #' statistical evaluation of the model performance (BIC score and maximum #' log-likelihood) is provided. #' #' @param grain.probability [logical] (*with default*): #' prints the estimated probabilities of which component each grain is in #' #' @param dose.scale [numeric]: #' manually set the scaling of the y-axis of the first plot with a vector #' in the form of `c(min, max)` #' #' @param pdf.weight [logical] (*with default*): #' weight the probability density functions by the components proportion (applies only #' when a vector is provided for `n.components`) #' #' @param pdf.sigma [character] (*with default*): #' if `"sigmab"` the components normal distributions are plotted with a common standard #' deviation (i.e. `sigmab`) as assumed by the FFM. Alternatively, #' `"se"` takes the standard error of each component for the sigma #' parameter of the normal distribution #' #' @param pdf.colors [character] (*with default*): #' colour coding of the components in the the plot. #' Possible options are `"gray"`, `"colors"` and `"none"` #' #' @param pdf.scale [numeric]: #' manually set the max density value for proper scaling of the x-axis of the first plot #' #' @param plot.proportions [logical] (*with default*): #' plot [graphics::barplot] showing the proportions of components if #' `n.components` a vector with a length > 1 (e.g., `n.components = c(2:3)`) #' #' @param plot [logical] (*with default*): plot output #' #' @param ... further arguments to pass. See details for their usage. #' #' @return #' Returns a plot (*optional*) and terminal output. In addition an #' [RLum.Results-class] object is returned containing the #' following elements: #' #' \item{.$summary}{[data.frame] summary of all relevant model results.} #' \item{.$data}{[data.frame] original input data} #' \item{.$args}{[list] used arguments} #' \item{.$call}{[call] the function call} #' \item{.$mle}{ covariance matrices of the log likelihoods} #' \item{.$BIC}{ BIC score} #' \item{.$llik}{ maximum log likelihood} #' \item{.$grain.probability}{ probabilities of a grain belonging to a component} #' \item{.$components}{[matrix] estimates of the de, de error and proportion for each component} #' \item{.$single.comp}{[data.frame] single component FFM estimate} #' #' If a vector for `n.components` is provided (e.g. `c(2:8)`), #' `mle` and `grain.probability` are lists containing matrices of the #' results for each iteration of the model. #' #' The output should be accessed using the function [get_RLum] #' #' @section Function version: 0.4.2 #' #' @author #' Christoph Burow, University of Cologne (Germany) \cr #' Based on a rewritten S script of Rex Galbraith, 2006. #' #' @seealso [calc_CentralDose], [calc_CommonDose], #' [calc_FuchsLang2001], [calc_MinDose] #' #' @references #' Galbraith, R.F. & Green, P.F., 1990. Estimating the component #' ages in a finite mixture. Nuclear Tracks and Radiation Measurements 17, #' 197-206. #' #' Galbraith, R.F. & Laslett, G.M., 1993. Statistical models #' for mixed fission track ages. Nuclear Tracks Radiation Measurements 4, #' 459-470. #' #' Galbraith, R.F. & Roberts, R.G., 2012. Statistical aspects of #' equivalent dose and error calculation and display in OSL dating: An overview #' and some recommendations. Quaternary Geochronology 11, 1-27. #' #' Roberts, R.G., Galbraith, R.F., Yoshida, H., Laslett, G.M. & Olley, J.M., 2000. #' Distinguishing dose populations in sediment mixtures: a test of single-grain #' optical dating procedures using mixtures of laboratory-dosed quartz. #' Radiation Measurements 32, 459-465. #' #' Galbraith, R.F., 2005. Statistics for Fission Track Analysis, Chapman & Hall/CRC, Boca Raton. #' #' **Further reading** #' #' Arnold, L.J. & Roberts, R.G., 2009. Stochastic #' modelling of multi-grain equivalent dose (De) distributions: Implications #' for OSL dating of sediment mixtures. Quaternary Geochronology 4, #' 204-230. #' #' Cunningham, A.C. & Wallinga, J., 2012. Realizing the #' potential of fluvial archives using robust OSL chronologies. Quaternary #' Geochronology 12, 98-106. #' #' Rodnight, H., Duller, G.A.T., Wintle, A.G. & #' Tooth, S., 2006. Assessing the reproducibility and accuracy of optical #' dating of fluvial deposits. Quaternary Geochronology 1, 109-120. #' #' Rodnight, H. 2008. How many equivalent dose values are needed to obtain a #' reproducible distribution?. Ancient TL 26, 3-10. #' #' #' @examples #' #' ## load example data #' data(ExampleData.DeValues, envir = environment()) #' #' ## (1) apply the finite mixture model #' ## NOTE: the data set is not suitable for the finite mixture model, #' ## which is why a very small sigmab is necessary #' calc_FiniteMixture(ExampleData.DeValues$CA1, #' sigmab = 0.2, n.components = 2, #' grain.probability = TRUE) #' #' ## (2) repeat the finite mixture model for 2, 3 and 4 maximum number of fitted #' ## components and save results #' ## NOTE: The following example is computationally intensive. Please un-comment #' ## the following lines to make the example work. #' FMM<- calc_FiniteMixture(ExampleData.DeValues$CA1, #' sigmab = 0.2, n.components = c(2:4), #' pdf.weight = TRUE, dose.scale = c(0, 100)) #' #' ## show structure of the results #' FMM #' #' ## show the results on equivalent dose, standard error and proportion of #' ## fitted components #' get_RLum(object = FMM, data.object = "components") #' #' @md #' @export calc_FiniteMixture <- function( data, sigmab, n.components, grain.probability = FALSE, dose.scale, pdf.weight = TRUE, pdf.sigma = "sigmab", pdf.colors = "gray", pdf.scale, plot.proportions = TRUE, plot=TRUE, ... ){ ## CONSISTENCY CHECK OF INPUT DATA -------- ##============================================================================## if(missing(data)==FALSE){ if(is(data, "data.frame") == FALSE & is(data,"RLum.Results") == FALSE){ stop("[calc_FiniteMixture] Error: 'data' object has to be of type 'data.frame' or 'RLum.Results'!") } else { if(is(data, "RLum.Results") == TRUE){ data <- get_RLum(data, "data") } } } try(colnames(data)<- c("ED","ED_Error"),silent=TRUE) if(colnames(data[1])!="ED"||colnames(data[2])!="ED_Error") { cat(paste("Columns must be named 'ED' and 'ED_Error'"), fill = FALSE) stop(domain=NA) } if(sigmab <0 | sigmab >1) { cat(paste("sigmab needs to be given as a fraction between", "0 and 1 (e.g. 0.2)"), fill = FALSE) stop(domain=NA) } if(any(n.components<2) == TRUE) { cat(paste("Atleast two components need to be fitted"), fill = FALSE) stop(domain=NA) } if(pdf.sigma!="se" ) { if(pdf.sigma!="sigmab") { cat(paste("Only 'se' or 'sigmab' allowed for the pdf.sigma argument"), fill = FALSE) stop(domain=NA) } } ## ... ARGUMENTS ------------ ##============================================================================## extraArgs <- list(...) ## console output if("verbose" %in% names(extraArgs)) { verbose<- extraArgs$verbose } else { verbose<- TRUE } # trace calculations if("trace" %in% names(extraArgs)) { trace<- extraArgs$trace } else { trace<- FALSE } # plot title if("main" %in% names(extraArgs)) { main<- extraArgs$main } else { main<- "Finite Mixture Model" } ##============================================================================## ## CALCULATIONS ##============================================================================## ## create storage variables if more than one k is provided if(length(n.components)>1) { # counter needed for various purposes cnt<- 1 # create summary matrix containing DE, standard error (se) and proportion # for each component comp.n<- matrix(data = NA, ncol = length(n.components), nrow = n.components[length(n.components)] * 3, byrow = TRUE) # create empty vector as storage for BIC and LLIK scores BIC.n<- vector(mode = "double") LLIK.n<- vector(mode = "double") # create empty vectors of type "lists" as storage for mle matrices and # grain probabilities vmat.n<- vector(mode = "list", length = length(n.components)) grain.probability.n<- vector(mode = "list", length = length(n.components)) } ## start actual calculation (loop) for each provided maximum components to ## be fitted. for(i in 1:length(n.components)) { k<- n.components[i] # calculate yu = log(ED), su = se(logED), n = number of grains yu<- log(data$ED) su<- data$ED_Error/data$ED n<- length(yu) # compute starting values fui<- matrix(0,n,k) pui<- matrix(0,n,k) nui<- matrix(0,n,k) pii<- rep(1/k,k) mu<- min(yu) + (max(yu)-min(yu))*(1:k)/(k+1) # remove the # in the line below to get alternative starting values # (useful to check that the algorithm converges to the same values) # mu<- quantile(yu,(1:k)/(k+1)) # compute maximum log likelihood estimates nit<- 499L wu<- 1/(sigmab^2 + su^2) rwu<- sqrt(wu) for(j in 1:nit){ for(i in 1:k) { fui[,i]<- rwu*exp(-0.5*wu*(yu-mu[i])^2) nui[,i]<- pii[i]*fui[,i] } pui<- nui/apply(nui,1,sum) mu<- apply(wu*yu*pui,2,sum)/apply(wu*pui,2,sum) pii<- apply(pui,2,mean) } # calculate the log likelihood and BIC llik<- sum( log( (1/sqrt(2*pi))*apply(nui,1,sum) )) bic<- -2*llik + (2*k - 1)*log(n) # calculate the covariance matrix and standard errors of the estimates # i.e., the dose estimtes in Gy and relative standard errors, and # the mixing proportions and standard errors. aui<- matrix(0,n,k) bui<- matrix(0,n,k) for(i in 1:k) { aui[,i]<- wu*(yu-mu[i]) bui[,i]<- -wu + (wu*(yu-mu[i]))^2 } delta<- diag(rep(1,k)) Au<- matrix(0,k-1,k-1) Bu<- matrix(0,k-1,k) Cu<- matrix(0,k,k) for(i in 1:(k-1)){ for(j in 1:(k-1)){ Au[i,j]<- sum( (pui[,i]/pii[i] - pui[,k]/pii[k])*(pui[,j]/pii[j] - pui[,k]/pii[k]) )}} for(i in 1:(k-1)){ for(j in 1:k){ Bu[i,j]<- sum( pui[,j]*aui[,j]*(pui[,i]/pii[i] - pui[,k]/pii[k] - delta[i,j]/pii[i] + delta[k,j]/pii[k] ) )}} for(i in 1:k){ for(j in 1:k){ Cu[i,j]<- sum( pui[,i]*pui[,j]*aui[,i]*aui[,j] - delta[i,j]*bui[,i]* pui[,i] ) }} invvmat<- rbind(cbind(Au,Bu),cbind(t(Bu),Cu)) vmat<- solve(invvmat, tol=.Machine$double.xmin) rek<- sqrt(sum(vmat[1:(k-1),1:(k-1)])) # calculate DE, relative standard error, standard error dose<- exp(mu) re<- sqrt(diag(vmat))[-c(1:(k-1))] if (any(is.nan(re))) re[is.nan(re)] <- NA sed<- dose*re estd<- rbind(dose,re,sed) # rename proportion prop<- pii # this calculates the proportional standard error of the proportion of grains # in the fitted components. However, the calculation is most likely erroneous. # sep<- c(sqrt(diag(vmat))[c(1:(k-1))],rek) # rename proportion estp<- prop # merge results to a data frame blk<- rep(" ",k) comp<- rbind(blk,round(estd,4),blk,round(estp,4)) comp<- data.frame(comp,row.names=c("","dose (Gy) ","rse(dose) ", "se(dose)(Gy)"," ","proportion ")) # label results data frame cp<- rep("comp",k) cn<- c(1:k) names(comp)<- paste(cp,cn,sep="") # calculate the log likelihood and BIC for a single component -- can # be useful to see if there is evidence of more than one component mu0<- sum(wu*yu)/sum(wu) fu0<- rwu*exp(-0.5*wu*(yu-mu0)^2) L0<- sum( log((1/sqrt(2*pi))*fu0 ) ) bic0<- -2*L0 + log(n) comp0<- round(c(exp(mu0),sigmab,L0,bic0),4) ## save results for k components in storage variables if(length(n.components)>1) { # vector of indices needed for finding the dose rows of the summary # matrix - position 1,4,7...n pos.n<- seq(from = 1, to = n.components[cnt]*3, by = 3) # save results of each iteration to summary matrix for(i in 1:n.components[cnt]) { comp.n[pos.n[i], cnt]<- round(dose[i], 2) #De comp.n[pos.n[i]+1, cnt]<- round(sed[i], 2) #SE comp.n[pos.n[i]+2, cnt]<- round(estp[i], 2) #Proportion } # save BIC and llik of each iteration to corresponding vector BIC.n[cnt]<- bic LLIK.n[cnt]<- llik # merge BIC and llik scores to a single data frame results.n<- rbind(BIC = round(BIC.n, 3), llik = round(LLIK.n, 3)) # save mle matrix and grain probabilities to corresponding vector vmat.n[[cnt]]<- vmat grain.probability.n[[cnt]]<- as.data.frame(pui) # increase counter by one for next iteration cnt<- cnt+1 }#EndOf::save intermediate results }##EndOf::calculation loop ##============================================================================## ## STATISTICAL CHECK ##============================================================================## if(length(n.components)>1) { ## Evaluate maximum log likelihood estimates LLIK.significant<- vector(mode = "logical") # check if llik is at least three times greater when adding a further # component for(i in 1:c(length(LLIK.n)-1)) { LLIK.significant[i]<- (LLIK.n[i+1]/LLIK.n[i])>3 } ## Find lowest BIC score BIC.lowest<- n.components[which.min(BIC.n)] } ## OUTPUT --------- ##============================================================================## if(verbose) { ## HEADER (always printed) cat("\n [calc_FiniteMixture]") ## OUTPUT WHEN ONLY ONE VALUE FOR n.components IS PROVIDED if(length(n.components) == 1) { # covariance matrix cat(paste("\n\n--- covariance matrix of mle's ---\n\n")) print(round(vmat,6)) # general information on sample and model performance cat(paste("\n----------- meta data ------------")) cat(paste("\n n: ",n)) cat(paste("\n sigmab: ",sigmab)) cat(paste("\n number of components: ",k)) cat(paste("\n llik: ",round(llik,4))) cat(paste("\n BIC: ",round(bic,3))) # fitted components cat(paste("\n\n----------- components -----------\n\n")) print(comp) # print (to 2 decimal places) the estimated probabilities of which component # each grain is in -- sometimes useful for diagnostic purposes if(grain.probability==TRUE) { cat(paste("\n-------- grain probability -------\n\n")) print(round(pui,2)) } # output for single component cat(paste("\n-------- single component --------")) cat(paste("\n mu: ", comp0[1])) cat(paste("\n sigmab: ", comp0[2])) cat(paste("\n llik: ", comp0[3])) cat(paste("\n BIC: ", comp0[4])) cat(paste("\n----------------------------------\n\n")) }#EndOf::Output for length(n.components) == 1 ##---------------------------------------------------------------------------- ## OUTPUT WHEN ONLY >1 VALUE FOR n.components IS PROVIDED if(length(n.components) > 1) { ## final labeling of component and BIC/llik matrices # create labels dose.lab<- paste("c", 1:n.components[length(n.components)],"_dose", sep="") se.lab<- paste("c", 1:n.components[length(n.components)],"_se", sep="") prop.lab<- paste("c", 1:n.components[length(n.components)],"_prop", sep="") # empty vector which stores the labeles in correct order (dose, se, prop) n.lab<- vector(mode = "expression", n.components[length(n.components)]*3) # loop to store the labels in correct order (dose, se, prop) cnt<- 1 for(i in pos.n) { n.lab[i]<- dose.lab[cnt] n.lab[i+1]<- se.lab[cnt] n.lab[i+2]<- prop.lab[cnt] cnt<- cnt+1 } # label columns and rows of summary matrix and BIC/LLIK data frame colnames(comp.n)<- n.components[1]:n.components[length(n.components)] rownames(comp.n)<- n.lab colnames(results.n)<- n.components[1]:n.components[length(n.components)] ## CONSOLE OUTPUT # general information on sample and model performance cat(paste("\n\n----------- meta data ------------")) cat(paste("\n n: ",n)) cat(paste("\n sigmab: ",sigmab)) cat(paste("\n number of components: ",n.components[1],"-", n.components[length(n.components)], sep="")) # output for single component cat(paste("\n\n-------- single component --------")) cat(paste("\n mu: ", comp0[1])) cat(paste("\n sigmab: ", comp0[2])) cat(paste("\n llik: ", comp0[3])) cat(paste("\n BIC: ", comp0[4])) # print component matrix cat(paste("\n\n----------- k components -----------\n")) print(comp.n, na.print="") # print BIC scores and LLIK estimates cat(paste("\n----------- statistical criteria -----------\n")) print(results.n) ## print evaluation of statistical criteria # lowest BIC score cat(paste("\n Lowest BIC score for k =", BIC.lowest)) # first significant increase in LLIK estimates if(!any(LLIK.significant, na.rm = TRUE)) { cat(paste("\n No significant increase in maximum log", "likelihood estimates. \n")) } else { cat(paste("\n First significant increase in maximum log likelihood for", "k =", which(LLIK.significant==TRUE)[1], "\n\n")) } cat(paste("\n")) }#EndOf::Output for length(n.components) > 1 } ## RETURN VALUES -------- ##============================================================================## # .@data$meta BIC<- data.frame(n.components=k, BIC=bic) llik<- data.frame(n.components=k, llik=llik) if(length(n.components)>1) { BIC.n<- data.frame(n.components=n.components, BIC=BIC.n) llik.n<- data.frame(n.components=n.components, llik=LLIK.n) } # .@data$single.comp single.comp<- data.frame(mu=comp0[1],sigmab=comp0[2], llik=comp0[3],BIC=comp0[4]) # .@data$components comp.re<- t(rbind(round(estd,4),round(estp,4))) colnames(comp.re)<- c("de","rel_de_err","de_err","proportion") comp.re<- comp.re[,-2] # remove the relative error column # .@data$grain.probability grain.probability<- round(pui, 2) summary<- data.frame(comp.re) call<- sys.call() args<- list(sigmab = sigmab, n.components = n.components) # create S4 object newRLumResults.calc_FiniteMixture <- set_RLum( class = "RLum.Results", data = list( summary=summary, data=data, args=args, call=call, mle=if(length(n.components)==1){vmat}else{vmat.n}, BIC=if(length(n.components)==1){BIC}else{BIC.n}, llik=if(length(n.components)==1){llik}else{llik.n}, grain.probability=if(length(n.components)==1){grain.probability}else{grain.probability.n}, components=if(length(n.components)==1){comp.re}else{comp.n}, single.comp=single.comp)) if (anyNA(unlist(summary)) && verbose) warning("\n[calc_FiniteMixture] The model produced NA values. Either the input data are inapplicable for the model", " or the the model parameters need to be adjusted (e.g. 'sigmab')", call. = FALSE) ##=========## ## PLOTTING ----------- if(plot && !anyNA(unlist(summary))) try(do.call(plot_RLum.Results, c(list(newRLumResults.calc_FiniteMixture), as.list(sys.call())[-c(1,2)]))) # Return values invisible(newRLumResults.calc_FiniteMixture) } Luminescence/R/calc_Lamothe2003.R0000644000176200001440000003453214264017373016106 0ustar liggesusers#'@title Apply fading correction after Lamothe et al., 2003 #' #'@description This function applies the fading correction for the prediction of long-term fading as suggested #' by Lamothe et al., 2003. The function basically adjusts the $L_n/T_n$ values and fit a new dose-response #' curve using the function [plot_GrowthCurve]. #' #'@details #' #' **Format of `object` if `data.frame`** #' #' If `object` is of type [data.frame], all input values most be of type [numeric]. #' Dose values are excepted in seconds (s) not Gray (Gy). No `NA` values are allowed and #' the value for the natural dose (first row) should be `0`. Example for three dose points, #' column names are arbitrary: #' #' ``` #' object <- data.frame( #' dose = c(0,25,50), #' LxTx = c(4.2, 2.5, 5.0), #' LxTx_error = c(0.2, 0.1, 0.2)) #' ``` #' #' **Note on the g-value and `tc`** #' #' Users new to R and fading measurements are often confused about what to #' enter for `tc` and why it may differ from `tc.g_value`. The `tc` value #' is, by convention (Huntley & Lamothe 2001), the time elapsed between the end of the irradiation and the prompt #' measurement. Usually there is no reason for having a `tc` value different for the equivalent dose measurement #' and the *g*-value measurement, except if different equipment was used. #' However, if, for instance, the *g*-value measurement sequence was analysed #' with the *Analyst* (Duller 2015) and the `'Luminescence` is used to correct for fading, #' there is a high chance that the value returned by the *Analyst* comes normalised to 2-days; #' even the `tc` values of the measurement were identical. #' In such cases, the fading correction cannot be correct until the `tc.g_value` was manually #' set to 2-days (`172800` s) because the function will internally recalculate values #' to an identical `tc` value. #' #' @param object [RLum.Results-class] [data.frame] (**required**): Input data for applying the #' fading correction. Allow are (1) [data.frame] with three columns (`dose`, `LxTx`, `LxTx error`; see details), (2) #' [RLum.Results-class] object created by the function [analyse_SAR.CWOSL] or [analyse_pIRIRSequence] #' #' @param dose_rate.envir [numeric] vector of length 2 (**required**): Environmental dose rate in mGy/a #' #' @param dose_rate.source [numeric] vector of length 2 (**required**): Irradiation source dose rate in Gy/s, #' which is, according to Lamothe et al. (2003) De/t*. #' #' @param g_value [numeric] vector of length 2 (**required**): g_value in \%/decade *recalculated at the moment* #' the equivalent dose was calculated, i.e. `tc` is either similar for the *g*-value measurement **and** the De measurement or #' needs be to recalculated (cf. [calc_FadingCorr]). Inserting a normalised g-value, e.g., normalised to 2-days , will #' lead to wrong results #' #' @param tc [numeric] (optional): time in seconds between the **end** of the irradiation and #' the prompt measurement used in the equivalent dose estimation (cf. Huntley & Lamothe 2001). #' If set to `NULL` it is assumed that `tc` is similar for the equivalent dose #' estimation and the *g*-value estimation #' #' @param tc.g_value [numeric] (with default): the time in seconds between irradiation and the #' prompt measurement estimating the *g*-value. If the *g*-value was normalised to, e.g., 2 days, #' this time in seconds (i.e., `172800`) should be entered here along with the time used for the #' equivalent dose estimation. If nothing is provided the time is set to `tc`, which is the #' usual case for *g*-values obtained using the SAR method and *g*-values that had been not normalised to 2 days. #' Note: If this value is not `NULL` the functions expects a [numeric] value for `tc`. #' #' @param plot [logical] (with default): Enables/disables plot output #' #' @param verbose [logical] (with default): Enables/disables terminal verbose mode #' #' @param ... further arguments passed to the function [plot_GrowthCurve] #' #' @return The function returns are graphical output produced by the function [plot_GrowthCurve] and #' an [RLum.Results-class]. #' #' -----------------------------------\cr #' `[ NUMERICAL OUTPUT ]`\cr #' -----------------------------------\cr #' #' **`RLum.Results`**-object #' #' **slot:** **`@data`** #' #' \tabular{lll}{ #' **Element** \tab **Type** \tab **Description**\cr #' `$data` \tab `data.frame` \tab the fading corrected values \cr #' `$fit` \tab `nls` \tab the object returned by the dose response curve fitting \cr #' } #' #' '**slot:** **`@info`** #' #' The original function call #' #' @references #' #' Huntley, D.J., Lamothe, M., 2001. Ubiquity of anomalous fading in K-feldspars and the measurement #' and correction for it in optical dating. Canadian Journal of Earth Sciences 38, 1093-1106. #' #' Duller, G.A.T., 2015. The Analyst software package for luminescence data: overview and recent improvements. #' Ancient TL 33, 35–42. #' #' Lamothe, M., Auclair, M., Hamzaoui, C., Huot, S., 2003. #' Towards a prediction of long-term anomalous fading of feldspar IRSL. Radiation Measurements 37, #' 493-498. #' #' @section Function version: 0.1.0 #' #' @author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany), Norbert Mercier, #' IRAMAT-CRP2A, Université Bordeaux Montaigne (France) #' #' @keywords datagen #' #' @seealso [plot_GrowthCurve], [calc_FadingCorr], [analyse_SAR.CWOSL], [analyse_pIRIRSequence] #' #' @examples #' #'##load data #'##ExampleData.BINfileData contains two BINfileData objects #'##CWOSL.SAR.Data and TL.SAR.Data #'data(ExampleData.BINfileData, envir = environment()) #' #'##transform the values from the first position in a RLum.Analysis object #'object <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos=1) #' #'##perform SAR analysis and set rejection criteria #'results <- analyse_SAR.CWOSL( #' object = object, #' signal.integral.min = 1, #' signal.integral.max = 2, #' background.integral.min = 900, #' background.integral.max = 1000, #' verbose = FALSE, #' plot = FALSE, #' onlyLxTxTable = TRUE #' ) #' #' ##run fading correction #' results_corr <- calc_Lamothe2003( #' object = results, #' dose_rate.envir = c(1.676 , 0.180), #' dose_rate.source = c(0.184, 0.003), #' g_value = c(2.36, 0.6), #' plot = TRUE, #' fit.method = "EXP") #' #' #'@md #'@export calc_Lamothe2003 <- function( object, dose_rate.envir, dose_rate.source, g_value, tc = NULL, tc.g_value = tc, verbose = TRUE, plot = TRUE, ... ){ # Input parameter test ------------------------------------------------------------------------ ##object if(missing(object)){ stop("[calc_Lamothe2003()] Input for 'object' missing but required!", call. = FALSE) } ##dose_rate.envir if(missing(dose_rate.envir)){ stop("[calc_Lamothe2003()] Input for 'dose_rate.envir' missing but required!", call. = FALSE) } ##dose_rate.source if(missing(dose_rate.source)){ stop("[calc_Lamothe2003()] Input for 'dose_rate.source' missing but required!", call. = FALSE) } ##g_value if(missing(g_value)){ stop("[calc_Lamothe2003()] Input for 'g_value' missing but required!", call. = FALSE) } ##check input type and length ##dose_rate.envir if(!inherits(dose_rate.envir, "numeric") || length(dose_rate.envir) < 2){ stop("[calc_Lamothe2003()] Input for 'dose_rate.envir' is not of type 'numeric' and/or of length < 2!", call. = FALSE) }else{ if(length(dose_rate.envir) > 2){ warning("[calc_Lamothe2003()] 'dose_rate.envir' has length > 2. Take only the first two entries.",call. = FALSE, immediate. = TRUE) dose_rate.envir <- dose_rate.envir[1:2] } } ##dose_rate.source if(!inherits(dose_rate.source, "numeric") || length(dose_rate.source) < 2){ stop("[calc_Lamothe2003()] Input for 'dose_rate.source' is not of type 'numeric' and/or of length < 2!", call. = FALSE) }else{ if(length(dose_rate.source) > 2){ warning("[calc_Lamothe2003()] 'dose_rate.source' has length > 2. Take only the first two entries.",call. = FALSE, immediate. = TRUE) dose_rate.source <- dose_rate.source[1:2] } } ##tc if(is.null(tc) && !is.null(tc.g_value)) stop("[calc_Lamothe2003()] If you set 'tc.g_value' you have to provide a value for 'tc' too!", call. = FALSE) # Input assignment ----------------------------------------------------------------------------- ## We allow input as data.frame() and RLum.Results objects ... the output from functions listed ## below .. if we allow a data.frame it should have at least Dose, Lx/Tx, Lx/Tx Error if(inherits(object, "data.frame")){ data <- object[,1:3] ##add signal information if(any(grepl(pattern = "Signal", x = colnames(object), fixed = TRUE))){ SIGNAL <- object[[which(grepl(pattern = "Signal", colnames(object), fixed = TRUE))[1]]] }else{ SIGNAL <- NA } }else if(inherits(object, "RLum.Results")){ if(object@originator == "analyse_SAR.CWOSL" || object@originator == "analyse_pIRIRSequence"){ ##now we do crazy stuff, we make a self-call here since this file can contain a lot of information ##get number of datasets; we have to search for the word natural, everything else is not safe enough full_table <- object@data$LnLxTnTx.table set_start <- which(grepl(full_table$Name, pattern = "Natural", fixed = TRUE)) set_end <- c(set_start[-1] - 1, nrow(full_table)) ##signal column if available if(object@originator == "analyse_pIRIRSequence"){ object <- full_table[,c("Dose", "LxTx", "LxTx.Error", "Signal")] }else{ object <- full_table[,c("Dose", "LxTx", "LxTx.Error")] } ##now run the function results <- lapply(1:length(set_start), function(x){ calc_Lamothe2003( object = object[set_start[x]:set_end[x], ], dose_rate.envir = dose_rate.envir, dose_rate.source = dose_rate.source, g_value = g_value, tc = tc, tc.g_value = tc.g_value, verbose = verbose, plot = plot, ... ) }) ##merge output return(merge_RLum(results)) }else{ stop(paste0("[calc_Lamothe2003()] Input for 'object' created by function ",object@originator, "() not supported!"), call. = FALSE) } }else{ stop("[calc_Lamothe2003()] Unsupported data type for 'object'!", call. = FALSE) } # Apply correction---------------------------------------------------------------------------- ##recalculate the g-value to the given tc ... ##re-calculation thanks to the help by Sébastien Huot, e-mail: 2016-07-19 if(!is.null(tc)){ k0 <- g_value / 100 / log(10) k1 <- k0 / (1 - k0 * log(tc[1]/tc.g_value[1])) g_value <- 100 * k1 * log(10) } # transform irradiation times to dose values data[[1]] <- data[[1]] * dose_rate.source[1] ## fading correction (including dose rate conversion from Gy/s to Gy/ka) ## and error calculation ## the formula in Lamothe et al. (2003) reads: ## I_faded = I_unfaded*(1-g*log((1/e)*DR_lab/DR_soil))) rr <- 31.5576e+09 * dose_rate.source[1] / (exp(1) * dose_rate.envir[1]) s_rr <- sqrt((dose_rate.source[2]/dose_rate.source[1])^2 + (dose_rate.envir[2]/dose_rate.envir[1])^2) * rr Fading_C <- 1 - g_value[1] / 100 * log10(rr) sFading_C <- sqrt((log10(rr) * g_value[2]/100)^2 + (g_value[1]/(100 * rr) * s_rr)^2) # store original Lx/Tx in new object LnTn_BEFORE <- data[[2]][1] LnTn_BEFORE.ERROR <- data[[3]][1] # apply to input data data[[2]][1] <- data[[2]][1] / Fading_C data[[3]][1] <- sqrt((data[[3]][1]/data[[2]][1])^2 + ((1/Fading_C - 1) * sFading_C/Fading_C)^2) * data[[2]][1] ##TODO discuss with Norbert # data[[3]][1] <- sqrt((data[[3]][1]/data[[2]][1])^2 + # (sFading_C/Fading_C)^2) * data[[2]][1] # # print(LnTn_BEFORE.ERROR/LnTn_BEFORE) # print(data[[3]][1]/ data[[2]][1] ) # Fitting --------------------------------------------------------------------------------- ##set arguments argument_list <- list( sample = data, verbose = FALSE, main = "Corrected Dose Response Curve", xlab = "Dose [Gy]", txtProgressBar = verbose, output.plotExtended = FALSE, output.plot = plot ) ##filter doubled arguments argument_list <- modifyList(x = argument_list, val = list(...)) ##run plot function fit_results <- do.call(what = plot_GrowthCurve,args = argument_list) # Age calculation ----------------------------------------------------------------------------- Age <- get_RLum(fit_results)[["De"]] / dose_rate.envir[1] s_Age <- sqrt((100*get_RLum(fit_results)[["De.Error"]]/get_RLum(fit_results)[["De"]])^2 + (100*dose_rate.envir[2]/dose_rate.envir[1])^2) *Age/100 # Terminal output ----------------------------------------------------------------------------- if(verbose){ cat("\n[calc_Lamothe2003()] \n\n") cat(" Used g_value:\t\t", round(g_value[1],3)," \u00b1 ",round(g_value[2],3),"%/decade \n") if(!is.null(tc)){ cat(" tc for g_value:\t", tc.g_value, " s\n") } cat("\n") cat(" Fading_C:\t\t", round(Fading_C,3), " \u00b1 ", round(sFading_C,3),"\n") cat(" Corrected Ln/Tn:\t", round(data[[2]][1],3), " \u00b1 ", round(data[[3]][1],3),"\n") cat(" Corrected De:\t\t", round(get_RLum(fit_results)[["De"]],2), " \u00b1 ", round(get_RLum(fit_results)[["De.Error"]],2)," Gy \n") cat("--------------------------------------------------------\n") cat(" Corrected Age:\t\t", round(Age,2), " \u00b1 ", round(s_Age,2)," ka \n") cat("--------------------------------------------------------\n") } # Compile output ------------------------------------------------------------------------------ return( set_RLum( class = "RLum.Results", data = list( data = data.frame( g_value = g_value[1], g_value.ERROR = g_value[2], tc = ifelse(is.null(tc), NA, tc), tc.g_value = ifelse(is.null(tc.g_value), NA, tc.g_value), FADING_C = Fading_C, FADING_C.ERROR = sFading_C, LnTn_BEFORE = LnTn_BEFORE, LnTn_BEFORE.ERROR = LnTn_BEFORE.ERROR, LnTn_AFTER = data[[2]][1], LnTn_AFTER.ERROR = data[[3]][1], DE = get_RLum(fit_results)[["De"]], DE.ERROR = get_RLum(fit_results)[["De.Error"]], AGE = Age, AGE.ERROR = s_Age, SIGNAL = SIGNAL ), fit = get_RLum(fit_results, data.object = "Fit") ), info = list( call = sys.call() ) ) ) } Luminescence/R/fit_CWCurve.R0000644000176200001440000007352414264017373015412 0ustar liggesusers#' Nonlinear Least Squares Fit for CW-OSL curves -beta version- #' #' The function determines the weighted least-squares estimates of the #' component parameters of a CW-OSL signal for a given maximum number of #' components and returns various component parameters. The fitting procedure #' uses the [nls] function with the `port` algorithm. #' #' **Fitting function** #' #' The function for the CW-OSL fitting has the general form: #' #' \deqn{y = I0_{1}*\lambda_{1}*exp(-\lambda_1*x) + ,\ldots, + I0_{i}*\lambda_{i}*exp(-\lambda_i*x) } #' #' where \eqn{0 < i < 8} #' #' and \eqn{\lambda} is the decay constant \cr #' and \eqn{I0} the initial number of trapped electrons. #' #' *(for the used equation cf. Boetter-Jensen et al., 2003, Eq. 2.31)* #' #' **Start values** #' #' Start values are estimated automatically by fitting a linear function to the #' logarithmized input data set. Currently, there is no option to manually #' provide start parameters. #' #' **Goodness of fit** #' #' The goodness of the fit is given as pseudoR^2 value (pseudo coefficient of #' determination). According to Lave (1970), the value is calculated as: #' #' \deqn{pseudoR^2 = 1 - RSS/TSS} #' #' where \eqn{RSS = Residual~Sum~of~Squares} \cr #' and \eqn{TSS = Total~Sum~of~Squares} #' #' #' #' **Error of fitted component parameters** #' #' The 1-sigma error for the #' components is calculated using the function [stats::confint]. Due to #' considerable calculation time, this option is deactivated by default. In #' addition, the error for the components can be estimated by using internal R #' functions like [summary]. See the [nls] help page #' for more information. #' #' *For details on the nonlinear regression in R, see Ritz & Streibig (2008).* #' #' @param values [RLum.Data.Curve-class] or [data.frame] (**required**): #' x, y data of measured values (time and counts). See examples. #' #' @param n.components.max [vector] (*optional*): #' maximum number of components that are to be used for fitting. #' The upper limit is 7. #' #' @param fit.failure_threshold [vector] (*with default*): #' limits the failed fitting attempts. #' #' @param fit.method [character] (*with default*): #' select fit method, allowed values: `'port'` and `'LM'`. `'port'` uses the 'port' #' routine from the function [nls] `'LM'` utilises the function `nlsLM` from #' the package `minpack.lm` and with that the Levenberg-Marquardt algorithm. #' #' @param fit.trace [logical] (*with default*): #' traces the fitting process on the terminal. #' #' @param fit.calcError [logical] (*with default*): #' calculate 1-sigma error range of components using [stats::confint] #' #' @param LED.power [numeric] (*with default*): #' LED power (max.) used for intensity ramping in mW/cm^2. #' **Note:** The value is used for the calculation of the absolute #' photoionisation cross section. #' #' @param LED.wavelength [numeric] (*with default*): #' LED wavelength used for stimulation in nm. #' **Note:** The value is used for the calculation of the absolute #' photoionisation cross section. #' #' @param cex.global [numeric] (*with default*): #' global scaling factor. #' #' @param sample_code [character] (*optional*): #' sample code used for the plot and the optional output table (`mtext`). #' #' @param output.path [character] (*optional*): #' output path for table output containing the results of the fit. The file #' name is set automatically. If the file already exists in the directory, #' the values are appended. #' #' @param output.terminal [logical] (*with default*): #' terminal output with fitting results. #' #' @param output.terminalAdvanced [logical] (*with default*): #' enhanced terminal output. Requires `output.terminal = TRUE`. #' If `output.terminal = FALSE` no advanced output is possible. #' #' @param plot [logical] (*with default*): #' returns a plot of the fitted curves. #' #' @param ... further arguments and graphical parameters passed to [plot]. #' #' @return #' **plot (*optional*)** #' #' the fitted CW-OSL curves are returned as plot. #' #' **table (*optional*)** #' #' an output table (`*.csv`) with parameters of the fitted components is #' provided if the `output.path` is set. #' #' #' **RLum.Results** #' #' Beside the plot and table output options, an [RLum.Results-class] object is #' returned. #' #' `fit`: #' an `nls` object (`$fit`) for which generic R functions are #' provided, e.g. [summary], [stats::confint], [profile]. For more #' details, see [nls]. #' #' `output.table`: #' a [data.frame] containing the summarised parameters including the error #' #' `component.contribution.matrix`: #' [matrix] containing the values for the component to sum contribution plot #' (`$component.contribution.matrix`). #' #' Matrix structure:\cr #' Column 1 and 2: time and `rev(time)` values \cr #' Additional columns are used for the components, two for each component, #' containing I0 and n0. The last columns `cont.` provide information on #' the relative component contribution for each time interval including the row #' sum for this values. #' #' **object** #' #' beside the plot and table output options, an [RLum.Results-class] object #' is returned. #' #' `fit`: #' an `nls` object (`$fit`) for which generic R functions #' are provided, e.g. [summary], [confint], [profile]. For more #' details, see [nls]. #' #' `output.table`: #' a [data.frame] containing the summarised parameters including the error\cr #' `component.contribution.matrix`: [matrix] containing the values #' for the component to sum contribution plot (`$component.contribution.matrix`).\cr #' #' Matrix structure:\cr #' Column 1 and 2: time and `rev(time)` values\cr #' Additional columns are used for the components, two for each component, #' containing I0 and n0. The last columns `cont.` provide information on #' the relative component contribution for each time interval including the row #' sum for this values. #' #' #' @note #' #' **Beta version - This function has not been properly tested yet and** #' **should therefore not be used for publication purposes!** #' #' The pseudo-R^2 may not be the best parameter to describe the goodness of the #' fit. The trade off between the `n.components` and the pseudo-R^2 value #' is currently not considered. #' #' The function **does not** ensure that the fitting procedure has reached a #' global minimum rather than a local minimum! #' #' @section Function version: 0.5.2 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [fit_LMCurve], [plot],[nls], [RLum.Data.Curve-class], #' [RLum.Results-class], [get_RLum], [minpack.lm::nlsLM] #' #' @references #' Boetter-Jensen, L., McKeever, S.W.S., Wintle, A.G., 2003. #' Optically Stimulated Luminescence Dosimetry. Elsevier Science B.V. #' #' Lave, C.A.T., 1970. The Demand for Urban Mass Transportation. The Review of #' Economics and Statistics, 52 (3), 320-323. #' #' Ritz, C. & Streibig, J.C., 2008. Nonlinear Regression with R. In: R. #' Gentleman, K. Hornik, G. Parmigiani, eds., Springer, p. 150. #' #' @keywords dplot models #' #' @examples #' #' ##load data #' data(ExampleData.CW_OSL_Curve, envir = environment()) #' #' ##fit data #' fit <- fit_CWCurve(values = ExampleData.CW_OSL_Curve, #' main = "CW Curve Fit", #' n.components.max = 4, #' log = "x") #' #' @md #' @export fit_CWCurve<- function( values, n.components.max, fit.failure_threshold = 5, fit.method = "port", fit.trace = FALSE, fit.calcError = FALSE, LED.power = 36, LED.wavelength = 470, cex.global = 0.6, sample_code = "Default", output.path, output.terminal = TRUE, output.terminalAdvanced = TRUE, plot = TRUE, ... ){ ##TODO ##remove output.path # INTEGRITY CHECKS -------------------------------------------------------- ##INPUT OBJECTS if(is(values, "RLum.Data.Curve") == FALSE & is(values, "data.frame") == FALSE){ stop("[fit_CWCurve()] Input object is not of type 'RLum.Data.Curve' or 'data.frame'!", call. = FALSE) } if(is(values, "RLum.Data.Curve") == TRUE){ x <- values@data[,1] y <- values@data[,2] ##needed due to inconsistencies in the R code below values <- data.frame(x,y) }else{ ##set x and y values x<-values[,1] y<-values[,2] } # Deal with extra arguments ----------------------------------------------- ##deal with addition arguments extraArgs <- list(...) main <- if("main" %in% names(extraArgs)) {extraArgs$main} else {"CW-OSL Curve Fit"} log <- if("log" %in% names(extraArgs)) {extraArgs$log} else {""} xlab <- if("xlab" %in% names(extraArgs)) {extraArgs$xlab} else {"Time [s]"} ylab <- if("ylab" %in% names(extraArgs)) {extraArgs$ylab} else {paste("OSL [cts/",round(max(x)/length(x), digits = 2)," s]",sep="")} ##============================================================================## ## FITTING ##============================================================================## ## ##////equation used for fitting////(start) fit.equation <- function(I0.i,lambda.i){ equation<-parse( text=paste("I0[",I0.i,"]*lambda[",lambda.i,"]*exp(-lambda[",lambda.i,"]*x)", collapse="+",sep="")) return(equation) } ##////equation used for fitting///(end) ##////equation used for fitting////(start) fit.equation.simple <- function(I0.i,lambda.i){ equation<-parse( text=paste("I0[",I0.i,"]*exp(-lambda[",lambda.i,"]*x)", collapse="+",sep="")) return(equation) } ##////equation used for fitting///(end) ##set formula elements for fitting functions ## the upper two funtions should be removed ... but chances are needed ... TODO ##////equation used for fitting////(start) fit.formula <- function(n.components){ I0 <- paste0("I0.",1:n.components) lambda <- paste0("lambda.",1:n.components) as.formula(paste0("y ~ ", paste(I0," * ", lambda, "* exp(-",lambda," * x)", collapse=" + "))) } ##////equation used for fitting///(end) ##////equation used for fitting////(start) fit.formula.simple <- function(n.components){ I0 <- paste0("I0.",1:n.components) lambda <- paste0("lambda.",1:n.components) as.formula(paste0("y ~ ", paste(I0," * exp(-",lambda," * x)", collapse=" + "))) } ##////equation used for fitting///(end) ##set variables fit.trigger <- TRUE #triggers if the fitting should stopped n.components <- 1 #number of components used for fitting - start with 1 fit.failure_counter <- 0 #counts the failed fitting attempts ##if n.components_max is missing, then it is Inf if(missing(n.components.max)==TRUE){n.components.max<-Inf} ## ##++++Fitting loop++++(start) while(fit.trigger==TRUE & n.components <= n.components.max){ ##(0) START PARAMETER ESTIMATION ##rough automatic start parameter estimation ##I0 I0<-rep(values[1,2]/3,n.components) names(I0) <- paste0("I0.",1:n.components) ##lambda ##ensure that no values <=0 are included remove them for start parameter ##estimation and fit an linear function a first guess if(min(y)<=0){ temp.values<-data.frame(x[-which(y<=0)], log(y[-which(y<=0)])) }else{ temp.values<-data.frame(x, log(y)) } temp<-lm(temp.values) lambda<-abs(temp$coefficient[2])/nrow(values) k<-2 while(k<=n.components){ lambda[k]<-lambda[k-1]/100 k<-k+1 } names(lambda) <- paste0("lambda.",1:n.components) ##(1) FIRST FIT WITH A SIMPLE FUNCTION if(fit.method == "LM"){ ##try fit simple fit.try<-suppressWarnings(try(minpack.lm::nlsLM(fit.formula.simple(n.components), data=values, start=c(I0,lambda), na.action = "na.exclude", trace = fit.trace, control = minpack.lm::nls.lm.control( maxiter = 500 )), silent = TRUE ))#end try }else if(fit.method == "port"){ ##try fit simple fit.try<-suppressWarnings(try(nls(fit.formula.simple(n.components), data=values, trace = fit.trace, algorithm="port", na.action = "na.exclude", start=c(I0,lambda), nls.control( tol = 1, maxiter=100, warnOnly=FALSE, minFactor=1/1024 ), lower=rep(0,n.components * 2)# set lower boundaries for components ), silent=TRUE# nls ))#end try }else{ stop("[fit_CWCurve()] fit.method unknown.", call. = FALSE) } ##(3) FIT WITH THE FULL FUNCTION if(inherits(fit.try,"try-error") == FALSE){ ##grep parameters from simple fit to further work with them parameters <- coef(fit.try) ##grep parameters an set new starting parameters, here just lambda is choosen as ##it seems to be the most valuable parameter lambda <- parameters[(n.components+1):length(parameters)] if(fit.method == "LM"){ ##try fit simple fit.try<-suppressWarnings(try(minpack.lm::nlsLM(fit.formula(n.components), data=values, start=c(I0,lambda), trace = fit.trace, na.action = "na.exclude", lower = rep(0,n.components * 2), control = minpack.lm::nls.lm.control( maxiter = 500 )), silent = TRUE)) ## HACK: # minpack.lm::nlsLM() stores the 'lower' argument as class "call" rather # than "numeric" as nls() does. Before running confint() on this object # we overwrite the "lower" slot with the numeric values again. if (!inherits(fit.try, "try-error")) { fit.try$call$lower <- rep(0,n.components * 2) } }else{ ##try fit fit.try<-suppressWarnings(try(nls(fit.formula(n.components), trace=fit.trace, data=values, algorithm="port", na.action = "na.exclude", start=c(I0,lambda), nls.control( maxiter = 500, warnOnly = FALSE, minFactor = 1/4096 ), lower=rep(0,n.components * 2)# set lower boundaries for components ), silent=TRUE# nls ))#end try }#fit.method } ##count failed attempts for fitting if(inherits(fit.try,"try-error")==FALSE){ fit <- fit.try n.components <- n.components + 1 }else{ n.components<-n.components+1 fit.failure_counter <- fit.failure_counter+1 if(n.components==fit.failure_counter & exists("fit")==FALSE){fit<-fit.try}} ##stop fitting after a given number of wrong attempts if(fit.failure_counter>=fit.failure_threshold){ fit.trigger <- FALSE if(!exists("fit")){fit <- fit.try} }else if(n.components == n.components.max & exists("fit") == FALSE){ fit <- fit.try } }##end while ##++++Fitting loop++++(end) ##============================================================================## ## FITTING OUTPUT ##============================================================================## ##grep parameters if(inherits(fit,"try-error")==FALSE){ parameters <- coef(fit) ##correct fit equation for the de facto used number of components I0.i<-1:(length(parameters)/2) lambda.i<-1:(length(parameters)/2) fit.function<-fit.equation(I0.i=I0.i,lambda.i=lambda.i) n.components<-length(I0.i) ##write parameters in vectors and order by decreasing lambda value I0<-parameters[1:(length(parameters)/2)] lambda<-parameters[(1+(length(parameters)/2)):length(parameters)] o<-order(lambda,decreasing=TRUE) I0<-I0[o] lambda<-lambda[o] ##============================================================================## ## Additional Calculation ##============================================================================## ## --------------------------------------------- ##calculate stimulation intensity Schmidt (2008) ##Energy - E = h*v h<-6.62606957e-34 #in W*s^2 - Planck constant ny<-299792458/(LED.wavelength/10^9) #frequency of light E<-h*ny ##transform LED.power in W/cm^2 LED.power<-LED.power/1000 ##gets stimulation intensity stimulation_intensity<-LED.power/E ## --------------------------------------------- ##calculate photoionisation cross section and print on terminal ##using EQ (5) in Kitis cs<-as.vector(lambda/stimulation_intensity) cs.rel<-round(cs/cs[1],digits=4) ## --------------------------------------------- ##coefficient of determination after law RSS <- sum(residuals(fit)^2) #residual sum of squares TSS <- sum((y - mean(y))^2) #total sum of squares pR<-round(1-RSS/TSS,digits=4) if(pR<0){ warning("pseudo-R^2 < 0!") } ## --------------------------------------------- ##calculate 1- sigma CONFIDENCE INTERVALL lambda.error<-rep(NA, n.components) I0.error<-rep(NA, n.components) if(fit.calcError==TRUE){ ##option for confidence interval values.confint<-confint(fit, level=0.68) I0.confint<-values.confint[1:(length(values.confint[,1])/2),] lambda.confint<-values.confint[((length(values.confint[,1])/2)+1):length(values.confint[,1]),] ##error calculation I0.error<-as.vector(abs(I0.confint[,1]-I0.confint[,2])) lambda.error<-as.vector(abs(lambda.confint[,1]-lambda.confint[,2])) }#endif::fit.calcError ##============================================================================## ## Terminal Output ##============================================================================## if (output.terminal==TRUE){ ##print rough fitting information - use the nls() control for more information writeLines("\n[fit_CWCurve()]") writeLines(paste("\nFitting was finally done using a ",n.components, "-component function (max=",n.components.max,"):",sep="")) writeLines("------------------------------------------------------------------------------") writeLines(paste0("y ~ ", as.character(fit.formula(n.components))[3], "\n")) ##combine values and change rows names fit.results<-cbind(I0,I0.error,lambda,lambda.error,cs, cs.rel) row.names(fit.results)<-paste("c", 1:(length(parameters)/2), sep="") ##print parameters print(fit.results) #print some additional information if(fit.calcError==TRUE){writeLines("(errors quoted as 1-sigma values)")} writeLines("------------------------------------------------------------------------------") }#end if ##============================================================================## ## Terminal Output (advanced) ##============================================================================## if (output.terminalAdvanced==TRUE && output.terminal==TRUE){ ##sum of squares writeLines(paste("pseudo-R^2 = ",pR,sep="")) }#end if ##============================================================================## ## Table Output ##============================================================================## ##write output table if values exists if (exists("fit")){ ##set data.frame for a max value of 7 components output.table<-data.frame(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA, NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA, NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA) output.tableColNames<-c("I01","I01.error","lambda1", "lambda1.error", "cs1","cs1.rel", "I02","I02.error","lambda2", "lambda2.error", "cs2","cs2.rel", "I03","I03.error","lambda3", "lambda3.error", "cs3","cs3.rel", "I04","I04.error","lambda4", "lambda4.error", "cs4","cs4.rel", "I05","I05.error","lambda5", "lambda5.error", "cs5","cs5.rel", "I06","I06.error","lambda6", "lambda6.error", "cs6","cs6.rel", "I07","I07.error","lambda7", "lambda7.error", "cs7","cs7.rel" ) ##write components in output table i<-0 k<-1 while(i<=n.components*6){ output.table[1,i+1]<-I0[k] output.table[1,i+2]<-I0.error[k] output.table[1,i+3]<-lambda[k] output.table[1,i+4]<-lambda.error[k] output.table[1,i+5]<-cs[k] output.table[1,i+6]<-cs.rel[k] i<-i+6 k<-k+1 } ##add pR and n.components output.table<-cbind(sample_code,n.components,output.table,pR) ##alter column names colnames(output.table)<-c("sample_code","n.components", output.tableColNames,"pseudo-R^2") if(missing(output.path)==FALSE){ ##write file with just the header if the file not exists if(file.exists(paste(output.path,"fit_CWCurve_Output_",sample_code,".csv",sep=""))==FALSE){ write.table(output.table,file=paste(output.path,"fit_CWCurve_Output_", sample_code,".csv",sep=""), sep=";" ,row.names=FALSE) }else{ write.table(output.table,file=paste(output.path,"fit_CWCurve_Output_", sample_code,".csv",sep=""), sep=";" ,row.names=FALSE, append=TRUE, col.names=FALSE) }#endif::for write option }#endif::table output ##============================================================================## ## COMPONENT TO SUM CONTRIBUTION PLOT ##============================================================================## ##+++++++++++++++++++++++++++++++ ##set matrix ##set polygon matrix for optional plot output component.contribution.matrix <- matrix(NA, nrow = length(values[,1]), ncol = (2*length(I0)) + 2) ##set x-values component.contribution.matrix[,1] <- values[,1] component.contribution.matrix[,2] <- rev(values[,1]) ##+++++++++++++++++++++++++++++++ ##set 1st polygon ##1st polygon (calculation) y.contribution_first<-(I0[1]*lambda[1]*exp(-lambda[1]*x))/(eval(fit.function))*100 ##avoid NaN values (might happen with synthetic curves) y.contribution_first[is.nan(y.contribution_first)==TRUE] <- 0 ##set values in matrix component.contribution.matrix[,3] <- 100 component.contribution.matrix[,4] <- 100 - rev(y.contribution_first) ##+++++++++++++++++++++++++++++++ ##set polygons in between ##polygons in between (calculate and plot) if (length(I0)>2){ y.contribution_prev <- y.contribution_first i<-2 ##matrix stepping k <- seq(3, ncol(component.contribution.matrix), by=2) while (i<=length(I0)-1) { y.contribution_next<-I0[i]*lambda[i]*exp(-lambda[i]*x)/(eval(fit.function))*100 ##avoid NaN values y.contribution_next[is.nan(y.contribution_next)==TRUE] <- 0 ##set values in matrix component.contribution.matrix[,k[i]] <- 100 - y.contribution_prev component.contribution.matrix[, k[i]+1] <- rev(100-y.contribution_prev- y.contribution_next) y.contribution_prev <- y.contribution_prev + y.contribution_next i <- i+1 }#end while loop }#end if ##+++++++++++++++++++++++++++++++ ##set last polygon ##last polygon (calculation) y.contribution_last <- I0[length(I0)]*lambda[length(lambda)]*exp(-lambda[length(lambda)]*x)/ (eval(fit.function))*100 ##avoid NaN values y.contribution_last[is.nan(y.contribution_last)==TRUE]<-0 component.contribution.matrix[,((2*length(I0))+1)] <- y.contribution_last component.contribution.matrix[,((2*length(I0))+2)] <- 0 ##change names of matrix to make more easy to understand component.contribution.matrix.names <- c( "x", "rev.x", paste(c("y.c","rev.y.c"),rep(1:n.components,each=2), sep="")) ##calculate area for each component, for each time interval component.contribution.matrix.area <- sapply( seq(3,ncol(component.contribution.matrix),by=2), function(x){ matrixStats::rowDiffs(cbind(rev(component.contribution.matrix[,(x+1)]), component.contribution.matrix[,x])) }) ##append to existing matrix component.contribution.matrix <- cbind( component.contribution.matrix, component.contribution.matrix.area, rowSums(component.contribution.matrix.area) ) ##set final column names colnames(component.contribution.matrix) <- c( component.contribution.matrix.names, paste(c("cont.c"),rep(1:n.components,each=1), sep=""), "cont.sum") }#endif :: (exists("fit")) }else{ if (output.terminal==TRUE) writeLines("[fit_CWCurve()] Fitting Error >> Plot without fit produced!") output.table<-NA component.contribution.matrix <- NA } ##============================================================================## ## PLOTTING ##============================================================================## if(plot==TRUE){ ##grep par parameters par.default <- par(no.readonly = TRUE) ##set colors gallery to provide more colors col <- get("col", pos = .LuminescenceEnv) ##set plot frame if(!inherits(fit, "try-error")){ layout(matrix(c(1,2,3),3,1,byrow=TRUE),c(1.6,1,1), c(1,0.3,0.4),TRUE) par(oma=c(1,1,1,1),mar=c(0,4,3,0),cex=cex.global) }else{ par(cex=cex.global) } ##==uppper plot==## ##open plot area plot(NA,NA, xlim=c(min(x),max(x)), ylim=if(log=="xy"){c(1,max(y))}else{c(0,max(y))}, xlab=if(!inherits(fit, "try-error")){""}else{xlab}, xaxt=if(!inherits(fit, "try-error")){"n"}else{"s"}, ylab=ylab, main=main, log=log) ##plotting measured signal points(x,y,pch=20, col="grey") ##add additional labeling (fitted function) mtext(side=3, sample_code, cex=0.7*cex.global) ##plot sum function if(inherits(fit,"try-error")==FALSE){ lines(x,eval(fit.function), lwd=2, col="black") legend.caption<-"sum curve" curve.col <- 1 ##plot signal curves ##plot curve for additional parameters if(length(I0)>1){ for (i in 1:length(I0)) { curve(I0[i]*lambda[i]*exp(-lambda[i]*x),col=col[i+1], lwd = 2, add = TRUE) legend.caption<-c(legend.caption,paste("component ",i,sep="")) curve.col<-c(curve.col,i+1) } }#end if ##plot legend #legend(y=max(y)*1,"measured values",pch=20, col="gray", bty="n") legend("topright",legend.caption,lty=rep(1,n.components+1,NA),lwd=2,col=col[curve.col], bty="n") ##==lower plot==## ##plot residuals par(mar=c(4.2,4,0,0)) plot(x,residuals(fit), xlim=c(min(x),max(x)), xlab=xlab, type="l", col="grey", ylab="Residual [a.u.]", lwd=2, log=if(log=="x" | log=="xy"){log="x"}else{""} ) ##add 0 line abline(h=0) ##------------------------------------------------------------------------## ##++component to sum contribution plot ++## ##------------------------------------------------------------------------## ##plot component contribution to the whole signal #open plot area par(mar=c(4,4,3.2,0)) plot(NA,NA, xlim=c(min(x),max(x)), ylim=c(0,100), ylab="Contribution [%]", xlab=xlab, main="Component contribution to sum curve", log=if(log=="x" | log=="xy"){log="x"}else{""}) stepping <- seq(3,length(component.contribution.matrix[1,]),2) for(i in 1:length(I0)){ polygon(c(component.contribution.matrix[,1], component.contribution.matrix[,2]), c(component.contribution.matrix[,stepping[i]], component.contribution.matrix[,stepping[i]+1]), col = col[i+1]) } rm(stepping) }#end if try-error for fit par(par.default) rm(par.default) } ##============================================================================## ## Return Values ##============================================================================## newRLumResults.fit_CWCurve <- set_RLum( class = "RLum.Results", data = list( data = output.table, fit = fit, component.contribution.matrix = list(component.contribution.matrix) ), info = list(call = sys.call()) ) rm(fit) rm(output.table) rm(component.contribution.matrix) invisible(newRLumResults.fit_CWCurve) } Luminescence/R/calc_CobbleDoseRate.R0000644000176200001440000004074414236146743017032 0ustar liggesusers#'@title Calculate dose rate of slices in a spherical cobble #' #'@description #' #'Calculates the dose rate profile through the cobble based on Riedesel and Autzen (2020). #' #'Corrects the beta dose rate in the cobble for the grain size following results #'of Guérin et al. (2012). Sediment beta and gamma dose rates are corrected #'for the water content of the sediment using the correction factors of Aitken (1985). #'Water content in the cobble is assumed to be 0. #' #' #'@details #' #'**The input table layout** #' #'\tabular{lll}{ #'COLUMN \tab DATA TYPE \tab DESCRIPTION\cr #'`Distance` \tab `numeric` \tab distance from the surface of the cobble to the top of each rock slice in mm. The distance for each slice will be listed in this column\cr #'`DistanceError` \tab `numeric` \tab Error on the distance in mm\cr #'`Thickness` \tab `numeric` \tab Thickness of each slice in mm\cr #'`TicknessError` \tab `numeric` \tab uncertainty of the thickness in mm.\cr #'`Mineral` \tab `character` \tab `'FS'` for feldspar, `'Q'` for quartz, depending which mineral in the cobble is used for dating\cr #'`Cobble_K` \tab `numeric` \tab K nuclide content in % of the bulk cobble\cr #'`Cobble_K_SE` \tab `numeric` \tab error on K nuclide content in % of the bulk cobble\cr #'`Cobble_Th` \tab `numeric` \tab Th nuclide content in ppm of the bulk cobble\cr #'`Cobble_Th_SE` \tab `numeric` \tab error on Th nuclide content in ppm of the bulk cobble\cr #'`Cobble_U` \tab `numeric` \tab U nuclide content in ppm of the bulk cobble\cr #'`CobbleU_SE` \tab `numeric` \tab error on U nuclide content in ppm of the bulk cobble\cr #'`GrainSize` \tab `numeric` \tab average grain size in µm of the grains used for dating\cr #'`Density` \tab `numeric` \tab Density of the cobble. Default is 2.7 g cm^-3\cr #'`CobbleDiameter` \tab `numeric` \tab Diameter of the cobble in cm.\cr #'`Sed_K` \tab `numeric` \tab K nuclide content in % of the sediment matrix\cr #'`Sed_K_SE` \tab `numeric` \tab error on K nuclide content in % of the sediment matrix\cr #'`Sed_Th` \tab `numeric` \tab Th nuclide content in ppm of the sediment matrix\cr #'`Sed_Th_SE` \tab `numeric` \tab error on Th nuclide content in ppm of the sediment matrix\cr #'`Sed_U` \tab `numeric` \tab U nuclide content in ppm of the sediment matrix\cr #'`Sed_U_SE` \tab `numeric` \tab error on U nuclide content in ppm of the sediment matrix\cr #'`GrainSize` \tab `numeric` \tab average grain size of the sediment matrix\cr #'`WaterContent` \tab `numeric` \tab mean water content of the sediment matrix in %\cr #'`WaterContent_SE` \tab `numeric` \tab relative error on water content #'} #' #'**Water content** #'The water content provided by the user should be calculated according to: #' #'\deqn{(Wet_weight - Dry_weight) / Dry_weight * 100} #' #'@param input [data.frame] (**required**): A table containing all relevant information #'for each individual layer. For the table layout see details. #' #'@param conversion Which dose rate conversion factors to use. For accepted values see [BaseDataSet.ConversionFactors] #' #'@references #'Riedesel, S., Autzen, M., 2020. Beta and gamma dose rate attenuation in rocks and sediment. #'Radiation Measurements 133, 106295. #' #'@section Function version: 0.1.0 #' #'@author Svenja Riedesel, Aberystwyth University (United Kingdom) \cr #'Martin Autzen, DTU NUTECH Center for Nuclear Technologies (Denmark) #' #'@return The function returns an [RLum.Results-class] object for which the first element #'is a [matrix] (`DataIndividual`) that gives the dose rate results for each slice #'for each decay chain individually, for both, the cobble dose rate and the sediment #'dose rate. The second element is also a [matrix] (`DataComponent`) that gives #'the total beta and gamma-dose rates for the cobble and the adjacent sediment #'for each slice of the cobble. #' #'@keywords datagen #' #'@seealso [convert_Concentration2DoseRate] #' #'@examples #'## load example data #'data("ExampleData.CobbleData", envir = environment()) #' #'## run function #'calc_CobbleDoseRate(ExampleData.CobbleData) #' #'@md #'@export calc_CobbleDoseRate <- function(input,conversion = "Guerinetal2011"){ # Integrity tests --------------------------------------------------------- if ((max(input[,1])>input$CobbleDiameter[1]*10) || ((max(input[,1]) + input[length(input[,1]),3]) > input$CobbleDiameter[1]*10)) stop("[calc_CobblDoseRate()] Slices outside of cobble. Please check your distances and make sure they are in mm and diameter is in cm!", call. = FALSE) # Calculate Dose Rate ----------------------------------------------------- SedDoseData <- matrix(data = NA, nrow = 1, ncol = 10) CobbleDoseData <- matrix(data = 0, nrow = 1, ncol = 10) CobbleDoseData <- input[1,5:12] CobbleDoseData <- cbind(CobbleDoseData,0,0) SedDoseData <- cbind(input[1,5],input[1,15:20],input[1,12],input[1,23:24]) CobbleDoseRate <- get_RLum(convert_Concentration2DoseRate( input = CobbleDoseData, conversion = conversion)) SedDoseRate <- get_RLum( convert_Concentration2DoseRate(input = SedDoseData, conversion = conversion)) ## Distance should be from the surface of the rock to the top of the slice. Distances and thicknesses are in mm N <- length(input$Distance) Diameter <- input$CobbleDiameter[1] ### Calculate gamma attenuation coefficient for the cobbles internal dose rate if (Diameter<25){ CobbleGammaAtt <- (0.55 * exp(-0.45 * Diameter) + 0.09 * exp(-0.06 * Diameter)) * 10 }else { CobbleGammaAtt <- 0.02 } ## Scale the density and infinite matrix gamma dose rates ---- Scaling <- input$Density[1] / 2.7 GammaEdge <- 0.5 * (1 - exp(-0.039 * Diameter)) GammaCentre <- 2 * GammaEdge DiameterSeq <- seq(0, Diameter * 10, by = 0.01) #Converts diameter into integer of 10 um ### Create matrices for use ---- Temp <- matrix(data = NA, nrow = length(DiameterSeq), ncol = 9) DistanceError <- matrix(data = NA, nrow = N, ncol = 8) ThicknessError <- matrix(data = NA, nrow = N, ncol = 8) DataIndividual <- matrix(data = NA, nrow = N, ncol = 25) DataComponent <- matrix(data = NA, nrow = N, ncol = 9) DoseRates <- matrix(data = NA, nrow = 1, ncol = 24) output <- matrix(list(), nrow = 2, ncol = 1) ### Calculate dose rate profiles through the rock ---- t <- Diameter * 10 - DiameterSeq tGamma <- t #Beta and gamma functions for the cobbles own dose rate KBetaCobble <- function(x) (1 - 0.5 * exp(-3.77 * DiameterSeq))+(1-0.5*exp(-3.77*t))-1 ThBetaCobble_short <- function(x) (1 - 0.5 * exp(-5.36 * x * Scaling))+(1-0.5*exp(-5.36*t*Scaling))-1 ThBetaCobble_long <- function(x) (1 - 0.33 * exp(-2.36 * x * Scaling))+(1-0.33*exp(-2.36*t*Scaling))-1 UBetaCobble_short <- function(x) (1 - 0.5 * exp(-4.15 * x * Scaling))+(1-0.5*exp(-4.15*t*Scaling))-1 UBetaCobble_long <- function(x) (1 - 0.33 * exp(-2.36 * x * Scaling))+(1-0.33*exp(-2.36*t*Scaling))-1 GammaCobble <- function(x) { (GammaCentre - GammaEdge * exp(-CobbleGammaAtt * x * Scaling)) + (GammaCentre - GammaEdge * exp(-CobbleGammaAtt * tGamma * Scaling)) - GammaCentre } #Beta and gamma functions for the sediment dose rates into the cobble KBetaSed <- function(x) 2 - (1 - 0.5 * exp(-3.77 * x * Scaling)) - (1 - 0.5 * exp(-3.77 * t * Scaling)) ThBetaSed_short <- function(x) 2 - (1 - 0.5 * exp(-5.36 * x * Scaling)) - (1 - 0.5 * exp(-5.36 * t * Scaling)) ThBetaSed_long <- function(x) 2 - (1 - 0.33 * exp(-2.36 * x * Scaling)) - (1 - 0.33 * exp(-2.36 * t * Scaling)) UBetaSed_short <- function(x) 2 - (1 - 0.5 * exp(-4.15 * x * Scaling)) - (1 - 0.5 * exp(-4.15 * t * Scaling)) UBetaSed_long <- function(x) 2 - (1 - 0.33 * exp(-2.36 * x * Scaling)) - (1 - 0.33 * exp(-2.36 * t * Scaling)) GammaSed <- function(x) 2 - (1 - 0.5 * exp(-0.02 * x * Scaling)) - (1 - 0.5 * exp(-0.02 * tGamma * Scaling)) Temp[, 1] <- DiameterSeq Temp[, 2] <- KBetaCobble(DiameterSeq) Temp[, 3] <- ThBetaCobble_long(DiameterSeq) Temp[, 4] <- UBetaCobble_long(DiameterSeq) Temp[, 5] <- GammaCobble(DiameterSeq) Temp[, 6] <- KBetaSed(DiameterSeq) Temp[, 7] <- ThBetaSed_long(DiameterSeq) Temp[, 8] <- UBetaSed_long(DiameterSeq) Temp[, 9] <- GammaSed(DiameterSeq) TempThCob <- ThBetaCobble_short(DiameterSeq) TempUCob <- UBetaCobble_short(DiameterSeq) TempThSed <- ThBetaSed_short(DiameterSeq) TempUSed <- UBetaSed_short(DiameterSeq) n <- which(DiameterSeq >= (max(DiameterSeq)-0.15))[1] Max <- length(DiameterSeq) ## Create the full matrix based on the short and long beta attenuations Temp[0:16, 3] <- TempThCob[0:16] Temp[n:Max, 3] <- TempThCob[n:Max] Temp[0:16, 7] <- TempThSed[0:16] Temp[n:Max, 7] <- TempThSed[n:Max] Temp[0:16, 4] <- TempUCob[0:16] Temp[n:Max, 4] <- TempUCob[n:Max] Temp[0:16, 8] <- TempUSed[0:16] Temp[n:Max, 8] <- TempUSed[n:Max] colnames(Temp) <- c( "Distance", "KBetaCob", "ThBetaCob", "UBetaCob", "GammaCob", "KBetaSed", "ThBetaSed", "UBetaSed", "GammaSed" ) ### Create data output matrices ---- Distances <- input$Distance / 0.01 + 1 Thicknesses <- input$Thickness / 0.01 MinDistance <- (input$Distance - input$DistanceError) / 0.01 + 1 MaxDistance <- (input$Distance + input$DistanceError) / 0.01 + 1 MinThickness <- (input$Thickness - input$ThicknessError) / 0.01 MaxThickness <- (input$Thickness + input$ThicknessError) / 0.01 for (i in 1:N){ Start <- Distances[i] End <- Start+Thicknesses[i] d_min <- MinDistance[i] d_max <- MaxDistance[i] t_min <- MinThickness[i] t_max <- MaxThickness[i] #Calculate errors ---- #Check if minimum distance from top is less than 0 if (MinDistance[i]<0){ d_min <- 0 } j <- d_min+Thicknesses[i] k <- d_max+Thicknesses[i] for (l in 1:8){ m <- l + 1 if (d_min == Start){ DistanceError[i,l]<- abs( (mean(Temp[d_max:k,m])-mean(Temp[Start:End,m]))/(2*mean(Temp[Start:End,m]))) } else if (k > Max){ DistanceError[i,l] <- abs( (mean(Temp[Start:End,m])-mean(Temp[d_min:j,m]))/(2*mean(Temp[Start:End,m]))) } else { DistanceError[i,l] <- abs( mean((mean(Temp[d_max:k,m])-mean(Temp[Start:End,m])):(mean(Temp[Start:End,m])-mean(Temp[d_min:j,m])))/(2*mean(Temp[Start:End,m]))) } j2 <- Start+t_min k2 <- Start+t_max if (k2 > Max){ ThicknessError[i,l] <- abs( (mean(Temp[Start:End,m])-mean(Temp[Start:j2,m]))/(2*mean(Temp[Start:End,m]))) } else { ThicknessError[i,l] <- abs( mean((mean(Temp[Start:k2,m])-mean(Temp[Start:End,m])):(mean(Temp[Start:End,m])-mean(Temp[Start:j2,m])))/(2*mean(Temp[Start:End,m]))) } } ### Calculate average dose rates ---- DataIndividual[i, 1] <- input[i, 1] # Cobble K Beta DataIndividual[i, 2] <- mean(Temp[Start:End, 2]) * CobbleDoseRate[1, 1] DataIndividual[i, 3] <- DataIndividual[i, 2] * sqrt(DistanceError[i, 1] ^ 2 + ThicknessError[i, 1] ^ 2 + (CobbleDoseRate[1, 2] / CobbleDoseRate[1, 1]) ^ 2) # Cobble Th Beta DataIndividual[i, 4] <- mean(Temp[Start:End, 3]) * CobbleDoseRate[1, 3] DataIndividual[i, 5] <- DataIndividual[i, 4] * sqrt(DistanceError[i, 2] ^ 2 + ThicknessError[i, 2] ^ 2 + (CobbleDoseRate[1, 4] / CobbleDoseRate[1, 3]) ^ 2) # Cobble U Beta DataIndividual[i, 6] <- mean(Temp[Start:End, 4]) * CobbleDoseRate[1, 5] DataIndividual[i, 7] <- DataIndividual[i, 6] * sqrt(DistanceError[i, 3] ^ 2 + ThicknessError[i, 3] ^ 2 + (CobbleDoseRate[1, 6] / CobbleDoseRate[1, 5]) ^ 2) # Cobble K Gamma DataIndividual[i, 8] <- mean(Temp[Start:End, 5]) * CobbleDoseRate[2, 1] DataIndividual[i, 9] <- DataIndividual[i, 8] * sqrt(DistanceError[i, 4] ^ 2 + ThicknessError[i, 4] ^ 2 + (CobbleDoseRate[2, 2] / CobbleDoseRate[2, 1]) ^ 2) # Cobble Th Gamma DataIndividual[i, 10] <- mean(Temp[Start:End, 5]) * CobbleDoseRate[2, 3] DataIndividual[i, 11] <- DataIndividual[i, 10] * sqrt(DistanceError[i, 4] ^ 2 + ThicknessError[i, 4] ^ 2 + (CobbleDoseRate[2, 4] / CobbleDoseRate[2, 3]) ^ 2) # Cobble U Gamma DataIndividual[i, 12] <- mean(Temp[Start:End, 5]) * CobbleDoseRate[2, 5] DataIndividual[i, 13] <- DataIndividual[i, 12] * sqrt(DistanceError[i, 4] ^ 2 + ThicknessError[i, 4] ^ 2 + (CobbleDoseRate[2, 6] / CobbleDoseRate[2, 5]) ^ 2) # Sediment K Beta DataIndividual[i, 14] <- mean(Temp[Start:End, 6]) * SedDoseRate[1, 1] DataIndividual[i, 15] <- DataIndividual[i, 14] * sqrt(DistanceError[i, 5] ^ 2 + ThicknessError[i, 5] ^ 2 + (SedDoseRate[1, 2] / SedDoseRate[1, 1]) ^ 2) # Sediment Th Beta DataIndividual[i, 16] <- mean(Temp[Start:End, 7]) * SedDoseRate[1, 3] DataIndividual[i, 17] <- DataIndividual[i, 16] * sqrt(DistanceError[i, 6] ^ 2 + ThicknessError[i, 6] ^ 2 + (SedDoseRate[1, 4] / SedDoseRate[1, 3]) ^ 2) # Sediment U Beta DataIndividual[i, 18] <- mean(Temp[Start:End, 8]) * SedDoseRate[1, 5] DataIndividual[i, 19] <- DataIndividual[i, 18] * sqrt(DistanceError[i, 7] ^ 2 + ThicknessError[i, 7] ^ 2 + (SedDoseRate[1, 6] / SedDoseRate[1, 5]) ^ 2) # Sediment K Gamma DataIndividual[i, 20] <- mean(Temp[Start:End, 9]) * SedDoseRate[2, 1] DataIndividual[i, 21] <- DataIndividual[i, 20] * sqrt(DistanceError[i, 8] ^ 2 + ThicknessError[i, 8] ^ 2 + (SedDoseRate[2, 2] / SedDoseRate[2, 1]) ^ 2) # Sediment Th Gamma DataIndividual[i, 22] <- mean(Temp[Start:End, 9]) * SedDoseRate[2, 3] DataIndividual[i, 23] <- DataIndividual[i, 22] * sqrt(DistanceError[i, 8] ^ 2 + ThicknessError[i, 8] ^ 2 + (SedDoseRate[2, 4] / SedDoseRate[2, 3]) ^ 2) # Sediment U Gamma DataIndividual[i, 24] <- mean(Temp[Start:End, 9]) * SedDoseRate[2, 5] DataIndividual[i, 25] <- DataIndividual[i, 24] * sqrt(DistanceError[i, 8] ^ 2 + ThicknessError[i, 8] ^ 2 + (SedDoseRate[2, 6] / SedDoseRate[2, 5]) ^ 2) ### Sum data into beta and gamma dose rates from cobble and sediment ---- DataComponent[i, 1] <- input[i, 1] DataComponent[i, 2] <- DataIndividual[i, 2] + DataIndividual[i, 4] + DataIndividual[i, 6] DataComponent[i, 3] <- DataComponent[i,2]*sqrt((DataIndividual[i,3]/DataIndividual[i,2])^2+(DataIndividual[i,5]/DataIndividual[i,4])^2+(DataIndividual[i,7]/DataIndividual[i,6])^2) DataComponent[i, 4] <- DataIndividual[i, 8] + DataIndividual[i, 10] + DataIndividual[i, 12] DataComponent[i, 5] <- DataComponent[i,4]*sqrt((DataIndividual[i,9]/DataIndividual[i,8])^2+(DataIndividual[i,11]/DataIndividual[i,10])^2+(DataIndividual[i,13]/DataIndividual[i,12])^2) DataComponent[i, 6] <- DataIndividual[i, 14] + DataIndividual[i, 16] + DataIndividual[i, 18] DataComponent[i, 7] <- DataComponent[i,6]*sqrt((DataIndividual[i,15]/DataIndividual[i,14])^2+(DataIndividual[i,17]/DataIndividual[i,16])^2+(DataIndividual[i,19]/DataIndividual[i,18])^2) DataComponent[i, 8] <- DataIndividual[i, 20] + DataIndividual[i, 22] + DataIndividual[i, 24] DataComponent[i, 9] <- DataComponent[i,8]*sqrt((DataIndividual[i,21]/DataIndividual[i,20])^2+(DataIndividual[i,23]/DataIndividual[i,22])^2 + (DataIndividual[i,25]/DataIndividual[i,24])^2) } colnames(DataIndividual) <- c( "Distance.", "K Beta cobble", "SE", "Th Beta cobble", "SE", "U Beta cobble", "SE", "K Gamma cobble", "SE", "Th Gamma cobble", "SE", "U Gamma cobble", "SE", "K Beta sed.", "SE", "Th Beta sed.", "SE", "U Beta sed.", "SE", "K Gamma sed.", "SE", "Th Gamma sed.", "SE", "U Gamma sed.", "SE" ) colnames(DataComponent) <- c( "Distance", "Total Cobble Beta", "SE", "Total Cobble Gamma", "SE", "Total Beta Sed.", "SE", "Total Gamma Sed.", "SE" ) DataIndividual[is.na(DataIndividual)] <- 0 DataComponent[is.na(DataComponent)] <- 0 # Return ------------------------------------------------------------------ return( set_RLum( class = "RLum.Results", data = list( DataIndividual = DataIndividual, DataComponent = DataComponent, input = input ), info = list( call = sys.call() ))) } Luminescence/R/plot_FilterCombinations.R0000644000176200001440000003067614264017373020064 0ustar liggesusers#' Plot filter combinations along with the (optional) net transmission window #' #' The function allows to plot transmission windows for different filters. Missing data for specific #' wavelengths are automatically interpolated for the given filter data using the function [approx]. #' With that a standardised output is reached and a net transmission window can be shown. #' #' **Calculations** #' #' **Net transmission window** #' #' The net transmission window of two filters is approximated by #' #' \deqn{T_{final} = T_{1} * T_{2}} #' #' **Optical density** #' #' \deqn{OD = -log10(T)} #' #' **Total optical density** #' #' \deqn{OD_{total} = OD_{1} + OD_{2}} #' #' Please consider using own calculations for more precise values. #' #' **How to provide input data?** #' #' *CASE 1* #' #' The function expects that all filter values are either of type `matrix` or `data.frame` #' with two columns. The first columns contains the wavelength, the second the relative transmission #' (but not in percentage, i.e. the maximum transmission can be only become 1). #' #' In this case only the transmission window is show as provided. Changes in filter thickness and #' reflection factor are not considered. #' #' *CASE 2* #' #' The filter data itself are provided as list element containing a `matrix` or #' `data.frame` and additional information on the thickness of the filter, e.g., #' `list(filter1 = list(filter_matrix, d = 2))`. #' The given filter data are always considered as standard input and the filter thickness value #' is taken into account by #' #' \deqn{Transmission = Transmission^(d)} #' #' with d given in the same dimension as the original filter data. #' #' *CASE 3* #' #' Same as CASE 2 but additionally a reflection factor P is provided, e.g., #' `list(filter1 = list(filter_matrix, d = 2, P = 0.9))`. #' The final transmission becomes: #' #' \deqn{Transmission = Transmission^(d) * P} #' #' **Advanced plotting parameters** #' #' The following further non-common plotting parameters can be passed to the function: #' #' \tabular{lll}{ #' **`Argument`** \tab **`Datatype`** \tab **`Description`**\cr #' `legend` \tab `logical` \tab enable/disable legend \cr #' `legend.pos` \tab `character` \tab change legend position ([graphics::legend]) \cr #' `legend.text` \tab `character` \tab same as the argument `legend` in ([graphics::legend]) \cr #' `net_transmission.col` \tab `col` \tab colour of net transmission window polygon \cr #' `net_transmission.col_lines` \tab `col` \tab colour of net transmission window polygon lines \cr #' `net_transmission.density` \tab `numeric` \tab specify line density in the transmission polygon \cr #' `grid` \tab `list` \tab full list of arguments that can be passed to the function [graphics::grid] #' } #' #' For further modifications standard additional R plot functions are recommend, e.g., the legend #' can be fully customised by disabling the standard legend and use the function [graphics::legend] #' instead. #' #' #' @param filters [list] (**required**): #' a named list of filter data for each filter to be shown. #' The filter data itself should be either provided as [data.frame] or [matrix]. #' (for more options s. Details) #' #' @param wavelength_range [numeric] (*with default*): #' wavelength range used for the interpolation #' #' @param show_net_transmission [logical] (*with default*): #' show net transmission window as polygon. #' #' @param interactive [logical] (*with default*): #' enable/disable interactive plot #' #' @param plot [logical] (*with default*): #' enables or disables the plot output #' #' @param ... further arguments that can be passed to control the plot output. #' Supported are `main`, `xlab`, `ylab`, `xlim`, `ylim`, `type`, `lty`, `lwd`. #' For non common plotting parameters see the details section. #' #' @return Returns an S4 object of type [RLum.Results-class]. #' #' **@data** #' #' \tabular{lll}{ #' **`Object`** \tab **`Type`** **`Description`** \cr #' `net_transmission_window` \tab `matrix` \tab the resulting net transmission window \cr #' `OD_total` \tab `matrix` \tab the total optical density\cr #' `filter_matrix` \tab `matrix` \tab the filter matrix used for plotting #' } #' #' **@info** #' #' \tabular{lll}{ #' **Object** \tab **Type** **Description** \cr #' `call` \tab [call] \tab the original function call #' } #' #' @section Function version: 0.3.2 #' #' @author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [RLum.Results-class], [approx] #' #' @keywords datagen aplot #' #' @examples #' #' ## (For legal reasons no real filter data are provided) #' #' ## Create filter sets #' filter1 <- density(rnorm(100, mean = 450, sd = 20)) #' filter1 <- matrix(c(filter1$x, filter1$y/max(filter1$y)), ncol = 2) #' filter2 <- matrix(c(200:799,rep(c(0,0.8,0),each = 200)), ncol = 2) #' #' ## Example 1 (standard) #' plot_FilterCombinations(filters = list(filter1, filter2)) #' #' ## Example 2 (with d and P value and name for filter 2) #' results <- plot_FilterCombinations( #' filters = list(filter_1 = filter1, Rectangle = list(filter2, d = 2, P = 0.6))) #' results #' #' ## Example 3 show optical density #' plot(results$OD_total) #' #' \dontrun{ #' ##Example 4 #' ##show the filters using the interactive mode #' plot_FilterCombinations(filters = list(filter1, filter2), interactive = TRUE) #' #' } #' #' #' @md #' @export plot_FilterCombinations <- function( filters, wavelength_range = 200:1000, show_net_transmission = TRUE, interactive = FALSE, plot = TRUE, ...) { # Integrity tests ----------------------------------------------------------------------------- #check filters if (!is(filters, "list")) { stop("[plot_FilterCombinations()] 'filters' should be of type 'list'") } #input should either data.frame or matrix lapply(filters, function(x) { if (!is(x, "data.frame") & !is(x, "matrix") & !is(x, "list")) { stop( paste( "[plot_FilterCombinations()] input for filter", x, "is not of type 'matrix', 'data.frame' or 'list'!" ) ) } }) #check for named list, if not set names if (is.null(names(filters))) { names(filters) <- paste("Filter ", 1:length(filters)) } # Data Preparation ---------------------------------------------------------------------------- ##check if filters are provided with their tickness, if so correct ##transmission for this ... relevant for glass filters filters <- lapply(filters, function(x) { if (is(x, "list")) { ##correction for the transmission accounting for filter tickness, the ##provided thickness is always assumed to be 1 if(length(x) > 1){ x[[1]][, 2] <- x[[1]][, 2] ^ (x[[2]]) }else{ return(x[[1]]) } ##account for potentially provided transmission relexion factor if(length(x) > 2){ x[[1]][,2] <- x[[1]][,2] * x[[3]] return(x[[1]]) }else{ return(x[[1]]) } } else{ return(x) } }) #check if there are transmission values greater than one, this is not possible lapply(filters, function(x) { if (max(x[, 2], na.rm = TRUE) > 1.01) { stop("[plot_FilterCombinations()] transmission values > 1 found. Check your data.") } }) ##combine everything in a matrix using approx for interpolation filter_matrix <- vapply(filters, function(x) { approx(x = x[, 1], y = x[, 2], xout = wavelength_range)$y }, FUN.VALUE = vector(mode = "numeric", length = length(wavelength_range))) ##calculate transmission window filter_matrix <- cbind(filter_matrix) net_transmission_window <- matrix( c(wavelength_range, matrixStats::rowProds(filter_matrix)), ncol = 2) ##add optical density to filter matrix ##calculate OD OD <- -log10(filter_matrix) ##calculate total OD OD_total <- cbind(wavelength_range, matrixStats::rowSums2(OD)) ##add to matrix filter_matrix <- cbind(filter_matrix, OD) ##set rownames of filter matrix rownames(filter_matrix) <- wavelength_range ##set column names for filter matrix colnames(filter_matrix) <- c(names(filters), paste0(names(filters), "_OD")) # Plotting ------------------------------------------------------------------------------------ if (plot) { ##(1) ... select transmission values filter_matrix_transmisison <- filter_matrix[,!grepl(pattern = "OD", x = colnames(filter_matrix)), drop = FALSE] ##set plot settings plot_settings <- list( main = "Filter Combination", xlab = "Wavelength [nm]", ylab = "Transmission [a.u.]", xlim = range(wavelength_range), ylim = c(0, 1), legend.pos = "topleft", lty = 1, lwd = 1, col = 1:length(filters), grid = expression(nx = 10, ny = 10), legend = TRUE, legend.text = colnames(filter_matrix_transmisison), net_transmission.col = rgb(0,0.7,0,.2), net_transmission.col_lines = "grey", net_transmission.density = 20 ) ##modify settings on request plot_settings <- modifyList(plot_settings, list(...)) if(interactive){ ##check for plotly if (!requireNamespace("plotly", quietly = TRUE)) { stop("[plot_FilterCombinations()] Package 'plotly' needed interactive plot functionality. Please install it.", call. = FALSE) } ##create basic plot p <- plotly::plot_ly(x = wavelength_range, y = filter_matrix[,1], type = "scatter", name = colnames(filter_matrix_transmisison)[1], mode = "lines") ##add further filters if (ncol(filter_matrix_transmisison) > 1) { for (i in 2:ncol(filter_matrix_transmisison)) { p <- plotly::add_trace(p, y = filter_matrix[, i], name = colnames(filter_matrix_transmisison)[i], mode = 'lines') } } ##add polygon ##replace all NA vaules with 0, otherwise it looks odd net_transmission_window[is.na(net_transmission_window)] <- 0 p <- plotly::add_polygons(p, x = c(wavelength_range, rev(wavelength_range)), y = c(net_transmission_window[, 2], rep(0, length(wavelength_range))), name = "net transmission" ) ##change graphical parameters p <- plotly::layout( p = p, xaxis = list( title = plot_settings$xlab ), yaxis = list( title = plot_settings$ylab ), title = plot_settings$main ) print(p) on.exit(return(p)) }else{ ##plot induvidal filters graphics::matplot( x = wavelength_range, y = filter_matrix_transmisison, type = "l", main = plot_settings$main, xlab = plot_settings$xlab, ylab = plot_settings$ylab, xlim = plot_settings$xlim, ylim = plot_settings$ylim, lty = plot_settings$lty, lwd = plot_settings$lwd, col = plot_settings$col ) if (!is.null(plot_settings$grid)) { graphics::grid(eval(plot_settings$grid)) } ##show effective transmission, which is the minimum for each row if (show_net_transmission) { ##replace all NA vaules with 0, otherwise it looks odd net_transmission_window[is.na(net_transmission_window)] <- 0 polygon( x = c(wavelength_range, rev(wavelength_range)), y = c(net_transmission_window[, 2], rep(0, length(wavelength_range))), col = plot_settings$net_transmission.col, border = NA, ) polygon( x = c(wavelength_range, rev(wavelength_range)), y = c(net_transmission_window[, 2], rep(0, length(wavelength_range))), col = plot_settings$net_transmission.col_lines, border = NA, density = plot_settings$net_transmission.density ) } #legend if (plot_settings$legend) { legend( plot_settings$legend.pos, legend = plot_settings$legend.text, col = plot_settings$col, lty = plot_settings$lty, bty = "n" ) } } } # Produce output object ----------------------------------------------------------------------- invisible(set_RLum( class = "RLum.Results", data = list( net_transmission_window = net_transmission_window, OD_total = OD_total, filter_matrix = filter_matrix ), info = list(call = sys.call()) )) } Luminescence/R/analyse_FadingMeasurement.R0000644000176200001440000010725714521207352020337 0ustar liggesusers#' @title Analyse fading measurements and returns the fading rate per decade (g-value) #' #' @description #' The function analysis fading measurements and returns a fading rate including an error estimation. #' The function is not limited to standard fading measurements, as can be seen, e.g., Huntley and #' Lamothe (2001). Additionally, the density of recombination centres (rho') is estimated after #' Kars et al. (2008). #' #' @details #' All provided output corresponds to the \eqn{tc} value obtained by this analysis. Additionally #' in the output object the g-value normalised to 2-days is provided. The output of this function #' can be passed to the function [calc_FadingCorr]. #' #' **Fitting and error estimation** #' #' For the fitting the function [stats::lm] is used without applying weights. For the #' error estimation all input values, except `tc`, as the precision can be considered as sufficiently #' high enough with regard to the underlying problem, are sampled assuming a normal distribution #' for each value with the value as the mean and the provided uncertainty as standard deviation. #' #' **The options for `t_star`** #' #' \itemize{ #' \item `t_star = "half"` (the default) The calculation follows the simplified #' version in Auclair et al. (2003), which reads #' \deqn{t_{star} := t_1 + (t_2 - t_1)/2} #' \item `t_star = "half_complex"` This option applies the complex function shown in Auclair et al. (2003), #' which is derived from Aitken (1985) appendix F, equations 9 and 11. #' It reads \deqn{t_{star} = t0 * 10^[(t_2 log(t_2/t_0) - t_1 log(t_1/t_0) - 0.43(t_2 - t_1))/(t_2 - t_1)]} #' where 0.43 = \eqn{1/ln(10)}. t0, which is an arbitrary constant, is set to 1. #' Please note that the equation in Auclair et al. (2003) is incorrect #' insofar that it reads \eqn{10exp(...)}, where the base should be 10 and not the Euler's number. #' Here we use the correct version (base 10). #' \item `t_star = "end"` This option uses the simplest possible form for `t_star` which is the time since #' irradiation without taking into account any addition parameter and it equals t1 in Auclair et al. (2003) #' \item `t_star = ` This last option allows you to provide an R function object that works on t1 and #' gives you all possible freedom. For instance, you may want to define the following #' function `fun <- function(x) {x^2}`, this would square all values of t1, because internally #' it calls `fun(t1)`. The name of the function does not matter. #' } #' #' **Density of recombination centres** #' #' The density of recombination centres, expressed by the dimensionless variable rho', is estimated #' by fitting equation 5 in Kars et al. 2008 to the data. For the fitting the function #' [stats::nls] is used without applying weights. For the error estimation the same #' procedure as for the g-value is applied (see above). #' #' **Multiple aliquots & Lx/Tx normalisation** #' #' Be aware that this function will always normalise all `Lx/Tx` values by the `Lx/Tx` value of the #' prompt measurement of the first aliquot. This implicitly assumes that there are no systematic #' inter-aliquot variations in the `Lx/Tx` values. If deemed necessary to normalise the `Lx/Tx` values #' of each aliquot by its individual prompt measurement please do so **before** running #' [analyse_FadingMeasurement] and provide the already normalised values for `object` instead. #' #' **Shine-down curve plots** #' Please note that the shine-down curve plots are for information only. As such #' not all pause steps are plotted to avoid graphically overloaded plots. #' However, *all* pause times are taken into consideration for the analysis. #' #' @param object [RLum.Analysis-class] (**required**): #' input object with the measurement data. Alternatively, a [list] containing [RLum.Analysis-class] #' objects or a [data.frame] with three columns #' (x = LxTx, y = LxTx error, z = time since irradiation) can be provided. #' Can also be a wide table, i.e. a [data.frame] with a number of columns divisible by 3 #' and where each triplet has the before mentioned column structure. #' #' **Please note: The input object should solely consists of the curve needed for the data analysis, i.e. #' only IRSL curves representing Lx (and Tx)** #' #' If data from multiple aliquots are provided please **see the details below** with regard to #' Lx/Tx normalisation. **The function assumes that all your measurements are related to #' one (comparable) sample. If you to treat independent samples, you have use this function #' in a loop.** #' #' @param structure [character] (*with default*): #' sets the structure of the measurement data. Allowed are `'Lx'` or `c('Lx','Tx')`. #' Other input is ignored #' #' @param signal.integral [vector] (**required**): vector with channels for the signal integral #' (e.g., `c(1:10)`). Not required if a `data.frame` with `LxTx` values is provided. #' #' @param background.integral [vector] (**required**): vector with channels for the background integral #' (e.g., `c(90:100)`). Not required if a `data.frame` with `LxTx` values is provided. #' #' @param t_star [character] (*with default*): #' method for calculating the time elapsed since irradiation if input is **not** a `data.frame`. #' Options are: `'half'` (the default), `'half_complex`, which uses the long equation in Auclair et al. 2003, and #' and `'end'`, which takes the time between irradiation and the measurement step. #' Alternatively, `t_star` can be a function with one parameter which works on `t1`. #' For more information see details. \cr #' #' *`t_star` has no effect if the input is a [data.frame], because this input comes #' without irradiation times.* #' #' @param n.MC [integer] (*with default*): #' number for Monte Carlo runs for the error estimation #' #' @param verbose [logical] (*with default*): #' enables/disables verbose mode #' #' @param plot [logical] (*with default*): #' enables/disables plot output #' #' @param plot.single [logical] (*with default*): #' enables/disables single plot mode, i.e. one plot window per plot. #' Alternatively a vector specifying the plot to be drawn, e.g., #' `plot.single = c(3,4)` draws only the last two plots #' #' @param ... (*optional*) further arguments that can be passed to internally used functions. Supported arguments: #' `xlab`, `log`, `mtext` and `xlim` for the two first curve plots, and `ylim` for the fading #' curve plot. For further plot customization please use the numerical output of the functions for #' own plots. #' #' @return #' An [RLum.Results-class] object is returned: #' #' Slot: **@data** #' #' \tabular{lll}{ #' **OBJECT** \tab **TYPE** \tab **COMMENT**\cr #' `fading_results` \tab `data.frame` \tab results of the fading measurement in a table \cr #' `fit` \tab `lm` \tab object returned by the used linear fitting function [stats::lm]\cr #' `rho_prime` \tab `data.frame` \tab results of rho' estimation after Kars et al. (2008) \cr #' `LxTx_table` \tab `data.frame` \tab Lx/Tx table, if curve data had been provided \cr #' `irr.times` \tab `integer` \tab vector with the irradiation times in seconds \cr #' } #' #' Slot: **@info** #' #' \tabular{lll}{ #' **OBJECT** \tab `TYPE` \tab `COMMENT`\cr #' `call` \tab `call` \tab the original function call\cr #' } #' #' @section Function version: 0.1.21 #' #' @author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) \cr #' Christoph Burow, University of Cologne (Germany) #' #' @keywords datagen #' #' @references #' #' Aitken, M.J., 1985. Thermoluminescence dating, Studies in archaeological science. #' Academic Press, London, Orlando. #' #' Auclair, M., Lamothe, M., Huot, S., 2003. Measurement of anomalous fading for feldspar IRSL using #' SAR. Radiation Measurements 37, 487-492. \doi{10.1016/S1350-4487(03)00018-0} #' #' Huntley, D.J., Lamothe, M., 2001. Ubiquity of anomalous fading in K-feldspars and the measurement #' and correction for it in optical dating. Canadian Journal of Earth Sciences 38, #' 1093-1106. doi: `10.1139/cjes-38-7-1093` #' #' Kars, R.H., Wallinga, J., Cohen, K.M., 2008. A new approach towards anomalous #' fading correction for feldspar IRSL dating-tests on samples in field saturation. #' Radiation Measurements 43, 786-790. \doi{10.1016/j.radmeas.2008.01.021} #' #' @seealso [calc_OSLLxTxRatio], [read_BIN2R], [read_XSYG2R], #' [extract_IrradiationTimes], [calc_FadingCorr] #' #' @examples #' #' ## load example data (sample UNIL/NB123, see ?ExampleData.Fading) #' data("ExampleData.Fading", envir = environment()) #' #' ##(1) get fading measurement data (here a three column data.frame) #' fading_data <- ExampleData.Fading$fading.data$IR50 #' #' ##(2) run analysis #' g_value <- analyse_FadingMeasurement( #' fading_data, #' plot = TRUE, #' verbose = TRUE, #' n.MC = 10) #' #' ##(3) this can be further used in the function #' ## to correct the age according to Huntley & Lamothe, 2001 #' results <- calc_FadingCorr( #' age.faded = c(100,2), #' g_value = g_value, #' n.MC = 10) #' #' @md #' @export analyse_FadingMeasurement <- function( object, structure = c("Lx", "Tx"), signal.integral, background.integral, t_star = 'half', n.MC = 100, verbose = TRUE, plot = TRUE, plot.single = FALSE, ... ){ # Integrity Tests ----------------------------------------------------------------------------- if (is(object, "list")) { if (any(sapply(object, class) != "RLum.Analysis")) { ##warning warning(paste("[analyse_FadingMeasurement()]", length(which(sapply(object, class) != "RLum.Analysis")), "non-supported records removed!"), call. = FALSE) ##remove unwanted stuff object[sapply(object, class) != "RLum.Analysis"] <- NULL ##check whether this is empty now if(length(object) == 0) stop( "[analyse_FadingMeasurement()] 'object' expects an 'RLum.Analysis' object or a 'list' of such objects!", call. = FALSE ) } } else if (inherits(object, "RLum.Analysis")) { object <- list(object) } else if(inherits(object,"data.frame")){ if (ncol(object) %% 3 != 0) { stop("[analyse_FadingMeasurement()] 'object': if you provide a data.frame as input, the number of columns must be a multiple of 3.") } else { object <- do.call(rbind, lapply(seq(1, ncol(object), 3), function(col) { setNames(object[ , col:c(col+2)], c("LxTx", "LxTxError", "timeSinceIrr")) }) ) object <- object[complete.cases(object), ] } ##set table and object LxTx_table <- data.frame(LxTx = object[[1]], LxTx.Error = object[[2]]) TIMESINCEIRR <- object[[3]] irradiation_times <- TIMESINCEIRR object <- NULL }else{ stop( "[analyse_FadingMeasurement()] 'object' needs to be of type 'RLum.Analysis' or a 'list' of such objects!", call. = FALSE ) } # Prepare data -------------------------------------------------------------------------------- if(!is.null(object)){ ##support read_XSYG2R() if(length(unique(unlist(lapply(object, slot, name = "originator")))) == 1 && unique(unlist(lapply(object, slot, name = "originator"))) == "read_XSYG2R"){ ## extract irradiation times irradiation_times <- extract_IrradiationTimes(object) ## get TIMESINCEIRR TIMESINCEIRR <- unlist(lapply(irradiation_times, function(x) { x@data$irr.times[["TIMESINCEIRR"]][!grepl(pattern = "irradiation", x = x@data$irr.times[["STEP"]], fixed = TRUE)] })) ## get irradiation times irradiation_times <- unlist(lapply(irradiation_times, function(x) { x@data$irr.times[["IRR_TIME"]][!grepl(pattern = "irradiation", x = x@data$irr.times[["STEP"]], fixed = TRUE)] })) ##clean object by removing the irradiation step ... and yes, we drop! object_clean <- unlist(get_RLum(object, curveType = "measured")) ##support read_BIN2R() }else if (length(unique(unlist(lapply(object, slot, name = "originator")))) == 1 && unique(unlist(lapply(object, slot, name = "originator"))) %in% c("read_BIN2R","Risoe.BINfileData2RLum.Analysis")){ ##assign object, unlist and drop it object_clean <- unlist(get_RLum(object)) ##set TIMESINCEIRR vector TIMESINCEIRR <- vapply(object_clean, function(o){ o@info$TIMESINCEIRR }, numeric(1)) ##check whether we have negative irradiation times, sort out such values if(any(TIMESINCEIRR < 0)){ #count affected records rm_records <- length(which(TIMESINCEIRR < 0)) ##now we have a problem and we first have to make sure that we understand ##the data structure and remove also the corresponding values if(all(structure == c("Lx", "Tx"))){ rm_id <- matrix(TIMESINCEIRR, ncol = 2, byrow = TRUE) rm_id[apply(rm_id < 0, MARGIN = 1, any),] <- NA rm_id <- which(is.na(as.numeric(t(rm_id)))) object_clean[rm_id] <- NULL TIMESINCEIRR <- TIMESINCEIRR[-rm_id] rm_records <- length(rm_id) rm(rm_id) }else{ object_clean[TIMESINCEIRR < 0] <- NULL TIMESINCEIRR <- TIMESINCEIRR[!TIMESINCEIRR < 0] } ##return warning warning( paste0("[analyse_FadingMeasurement()] ", rm_records, " records 'time since irradiation' value removed from the dataset!"), call. = FALSE) rm(rm_records) } ##set irradiation times irradiation_times <- vapply(object_clean, function(o){ o@info$IRR_TIME }, numeric(1)) ##not support }else{ try(stop("[analyse_FadingMeasurement()] Unknown or unsupported originator!", call. = FALSE)) return(NULL) } ##correct irradiation time for t_star ##in accordance with Auclair et al., 2003, p. 488 ##but here we have no t1 ... this needs to be calculated ##set variables t1 <- TIMESINCEIRR t2 <- TIMESINCEIRR + irradiation_times ## set t_star ---- if(is(t_star, "function")){ t_star <- t_star(t1) } else { if(t_star == "half"){ ##calculate t_star using the simplified equation in Auclair et al. (2003) t_star <- t1 + (t2 - t1)/2 } else if(t_star == "half_complex"){ # calculate t_star after the full equation Auclair et al. (2003) # t0 is an arbitrary constant, we are setting that to 1 t0 <- 1 t_star <- t0 * 10^((t2 * log10(t2/t0) - t1 * log10(t1/t0) - (t2 - t1) * log10(exp(1))) / (t2 - t1)) }else if (t_star == "end"){ ##set t_start as t_1 (so after the end of irradiation) t_star <- t1 }else{ stop("[analyse_FadingMeasurement()] Invalid input for t_star.", call. = FALSE) } } ##overwrite TIMESINCEIRR TIMESINCEIRR <- t_star rm(t_star) # Calculation --------------------------------------------------------------------------------- ##calculate Lx/Tx or ... just Lx, it depends on the pattern ... set IRR_TIME if(length(structure) == 2){ Lx_data <- object_clean[seq(1,length(object_clean), by = 2)] Tx_data <- object_clean[seq(2,length(object_clean), by = 2)] ##we need only every 2nd irradiation time, the one from the Tx should be the same ... all the time TIMESINCEIRR <- TIMESINCEIRR[seq(1,length(TIMESINCEIRR), by = 2)] }else if(length(structure) == 1){ Lx_data <- object_clean Tx_data <- NULL }else{ try(stop("[analyse_FadingMeasurement()] I have no idea what your structure means!", call. = FALSE)) return(NULL) } ##calculate Lx/Tx table LxTx_table <- merge_RLum(.warningCatcher(lapply(1:length(Lx_data), function(x) { calc_OSLLxTxRatio( Lx.data = Lx_data[[x]], Tx.data = Tx_data[[x]], signal.integral = signal.integral, background.integral = background.integral, signal.integral.Tx = list(...)$signal.integral.Tx, background.integral.Tx = list(...)$background.integral.Tx, sigmab = list(...)$sigmab, sig0 = if( is.null(list(...)$sig0)){ formals(calc_OSLLxTxRatio)$sig0 }else{ list(...)$sig0 }, background.count.distribution = if( is.null(list(...)$background.count.distribution)){ formals(calc_OSLLxTxRatio)$background.count.distribution }else{ list(...)$background.count.distribution } ) })))$LxTx.table } ##create unique identifier uid <- create_UID() ##normalise data to prompt measurement tc <- min(TIMESINCEIRR)[1] ##remove NA values in LxTx table if(any(is.infinite(LxTx_table[["LxTx"]]))){ rm_id <- which(is.infinite(LxTx_table[["LxTx"]])) LxTx_table <- LxTx_table[-rm_id,] TIMESINCEIRR <- TIMESINCEIRR[-rm_id] rm(rm_id) } ##normalise if(length(structure) == 2 | is.null(object)){ LxTx_NORM <- LxTx_table[["LxTx"]] / LxTx_table[["LxTx"]][which(TIMESINCEIRR== tc)[1]] LxTx_NORM.ERROR <- LxTx_table[["LxTx.Error"]] / LxTx_table[["LxTx"]][which(TIMESINCEIRR == tc)[1]] }else{ LxTx_NORM <- LxTx_table[["Net_LnLx"]] / LxTx_table[["Net_LnLx"]][which(TIMESINCEIRR== tc)[1]] LxTx_NORM.ERROR <- LxTx_table[["Net_LnLx.Error"]] / LxTx_table[["Net_LnLx"]][which(TIMESINCEIRR == tc)[1]] } ##normalise time since irradtion TIMESINCEIRR_NORM <- TIMESINCEIRR/tc ##add dose and time since irradiation LxTx_table <- cbind( LxTx_table, TIMESINCEIRR = TIMESINCEIRR, TIMESINCEIRR_NORM = TIMESINCEIRR_NORM, TIMESINCEIRR_NORM.LOG = log10(TIMESINCEIRR_NORM), LxTx_NORM = LxTx_NORM, LxTx_NORM.ERROR = LxTx_NORM.ERROR, UID = uid ) # Fitting ------------------------------------------------------------------------------------- ##prevent that n.MC can became smaller than 2 n.MC <- max(c(n.MC[1],2)) ##we need to fit the data to get the g_value ##sample for monte carlo runs MC_matrix <- suppressWarnings(cbind(LxTx_table[["TIMESINCEIRR_NORM.LOG"]], matrix(rnorm( n = n.MC * nrow(LxTx_table), mean = LxTx_table[["LxTx_NORM"]], sd = abs(LxTx_table[["LxTx_NORM.ERROR"]]) ), ncol = n.MC))) ##apply the fit fit_matrix <- vapply(X = 2:(n.MC+1), FUN = function(x){ ##fit fit <- try(stats::lm(y~x, data = data.frame( x = MC_matrix[,1], y = MC_matrix[,x]))$coefficients, silent = TRUE) if(inherits(fit, "try-error")){ return(c(NA_real_, NA_real_)) }else{ return(fit) } }, FUN.VALUE = vector("numeric", length = 2)) ##calculate g-values from matrix g_value.MC <- -fit_matrix[2, ] * 1 / fit_matrix[1, ] * 100 ##calculate rho prime (Kars et al. 2008; proposed by Georgina E. King) ##s value after Huntley (2006) J. Phys. D. Hs <- 3e15 ##sample for monte carlo runs MC_matrix_rhop <- suppressWarnings(matrix(rnorm( n = n.MC * nrow(LxTx_table), mean = LxTx_table[["LxTx_NORM"]], sd = abs(LxTx_table[["LxTx_NORM.ERROR"]]) ), ncol = n.MC)) ## calculate rho prime for all MC samples fit_vector_rhop <- suppressWarnings(apply(MC_matrix_rhop, MARGIN = 2, FUN = function(x) { tryCatch({ coef(minpack.lm::nlsLM(x ~ c * exp(-rhop * (log(1.8 * Hs * LxTx_table$TIMESINCEIRR))^3), start = list(c = x[1], rhop = 10^-5.5)))[["rhop"]] }, error = function(e) { return(NA) }) })) ## discard all NA values produced in MC runs fit_vector_rhop <- fit_vector_rhop[!is.na(fit_vector_rhop)] ## calculate mean and standard deviation of rho prime (in log10 space) rhoPrime <- data.frame( MEAN = mean(fit_vector_rhop), SD = sd(fit_vector_rhop), Q_0.025 = quantile(x = fit_vector_rhop, probs = 0.025, na.rm = TRUE), Q_0.16 = quantile(x = fit_vector_rhop, probs = 0.16, na.rm = TRUE), Q_0.84 = quantile(x = fit_vector_rhop, probs = 0.84, na.rm = TRUE), Q_0.975 = quantile(x = fit_vector_rhop, probs = 0.975, na.rm = TRUE), row.names = NULL ) ## calc g-value ----- fit <- try(stats::lm(y ~ x, data = data.frame(x = LxTx_table[["TIMESINCEIRR_NORM.LOG"]], y = LxTx_table[["LxTx_NORM"]])), silent = TRUE) fit_power <- try(stats::lm(y ~ I(x^3) + I(x^2) + I(x) , data = data.frame(x = LxTx_table[["TIMESINCEIRR_NORM.LOG"]], y = LxTx_table[["LxTx_NORM"]])), silent = TRUE) ##for predicting fit_predict <- try(stats::lm(y ~ x, data = data.frame(y = LxTx_table[["TIMESINCEIRR_NORM.LOG"]], x = LxTx_table[["LxTx_NORM"]])), silent = TRUE) ##calculate final g_value ##the 2nd term corrects for the (potential) offset from one if(inherits(fit, "try-error")){ g_value_fit <- NA }else{ g_value_fit <- -fit$coefficient[2] * 1 / fit$coefficient[1] * 100 } ##construct output data.frame g_value <- data.frame( FIT = g_value_fit, MEAN = mean(g_value.MC), SD = sd(g_value.MC), Q_0.025 = quantile(x = g_value.MC, probs = 0.025, na.rm = TRUE), Q_0.16 = quantile(x = g_value.MC, probs = 0.16, na.rm = TRUE), Q_0.84 = quantile(x = g_value.MC, probs = 0.84, na.rm = TRUE), Q_0.975 = quantile(x = g_value.MC, probs = 0.975, na.rm = TRUE) ) ##normalise the g-value to 2-days using the equation provided by Sébastien Huot via e-mail ##this means the data is extended ## calc g2-value days ---- k0 <- g_value[,c("FIT", "SD")] / 100 / log(10) k1 <- k0 / (1 - k0 * log(172800/tc)) g_value_2days <- 100 * k1 * log(10) names(g_value_2days) <- c("G_VALUE_2DAYS", "G_VALUE_2DAYS.ERROR") # Approximation ------------------------------------------------------------------------------- T_0.5.interpolated <- try(approx(x = LxTx_table[["LxTx_NORM"]], y = LxTx_table[["TIMESINCEIRR_NORM"]], ties = mean, xout = 0.5), silent = TRUE) if(inherits(T_0.5.interpolated, 'try-error')){ T_0.5.predict <- NULL T_0.5.interpolated <- NULL }else{ T_0.5.predict <- stats::predict.lm(fit_predict,newdata = data.frame(x = 0.5), interval = "predict") } T_0.5 <- data.frame( T_0.5_INTERPOLATED = T_0.5.interpolated$y, T_0.5_PREDICTED = (10^T_0.5.predict[,1])*tc, T_0.5_PREDICTED.LOWER = (10^T_0.5.predict[,2])*tc, T_0.5_PREDICTED.UPPER = (10^T_0.5.predict[,2])*tc ) # Plotting ------------------------------------------------------------------------------------ if(plot) { if (!plot.single[1]) { par.default <- par()$mfrow on.exit(par(mfrow = par.default)) par(mfrow = c(2, 2)) } ##get package col <- get("col", pos = .LuminescenceEnv) ##set some plot settings plot_settings <- list( xlab = "Stimulation time [s]", ylim = NULL, xlim = NULL, log = "", mtext = "" ) ##modify on request plot_settings <- modifyList(x = plot_settings, val = list(...)) ##get unique irradiation times ... for plotting irradiation_times.unique <- unique(TIMESINCEIRR) ##limit to max 5 if(length(irradiation_times.unique) >= 5){ irradiation_times.unique <- irradiation_times.unique[seq(1, length(irradiation_times.unique), length.out = 5)] } ## plot Lx-curves ----- if (!is.null(object)) { if (length(structure) == 2) { if (is(plot.single, "logical") || (is(plot.single, "numeric") & 1 %in% plot.single)) { plot_RLum( set_RLum(class = "RLum.Analysis", records = object_clean[seq(1, length(object_clean), by = 2)]), combine = TRUE, col = c(col[1:5], rep( rgb(0, 0, 0, 0.3), abs(length(TIMESINCEIRR) - 5) )), records_max = 10, plot.single = TRUE, legend.text = c(paste(round(irradiation_times.unique, 1), "s")), xlab = plot_settings$xlab, xlim = plot_settings$xlim, log = plot_settings$log, legend.pos = "outside", main = expression(paste(L[x], " - curves")), mtext = plot_settings$mtext ) ##add integration limits abline(v = c( object_clean[[1]][range(signal.integral), 1], object_clean[[1]][range(background.integral), 1]), lty = c(2,2,2,2), col = c("green", "green", "red", "red")) } # plot Tx-curves ---- if (is(plot.single, "logical") || (is(plot.single, "numeric") & 2 %in% plot.single)) { plot_RLum( set_RLum(class = "RLum.Analysis", records = object_clean[seq(2, length(object_clean), by = 2)]), combine = TRUE, records_max = 10, plot.single = TRUE, legend.text = paste(round(irradiation_times.unique, 1), "s"), xlab = plot_settings$xlab, log = plot_settings$log, legend.pos = "outside", main = expression(paste(T[x], " - curves")), mtext = plot_settings$mtext ) if (is.null(list(...)$signal.integral.Tx)) { ##add integration limits abline(v = c( object_clean[[1]][range(signal.integral), 1], object_clean[[1]][range(background.integral), 1]), lty = c(2,2,2,2), col = c("green", "green", "red", "red")) } else{ ##add integration limits abline( v = range(list(...)$signal.integral.Tx) * max(as.matrix(object_clean[[1]][, 1])) / nrow(as.matrix(object_clean[[1]])), lty = 2, col = "green" ) abline( v = range(list(...)$background.integral.Tx) * max(as.matrix(object_clean[[1]][, 1])) / nrow(as.matrix(object_clean[[1]])), lty = 2, col = "red" ) } } } else{ if (is(plot.single, "logical") || (is(plot.single, "numeric") & 1 %in% plot.single)) { plot_RLum( set_RLum(class = "RLum.Analysis", records = object_clean), combine = TRUE, records_max = 10, plot.single = TRUE, legend.text = c(paste(round(irradiation_times.unique, 1), "s")), legend.pos = "outside", xlab = plot_settings$xlab, log = plot_settings$log, main = expression(paste(L[x], " - curves")), mtext = plot_settings$mtext ) ##add integration limits abline( v = range(signal.integral) * max(as.matrix(object_clean[[1]][, 1])) / nrow(as.matrix(object_clean[[1]])), lty = 2, col = "green" ) abline( v = range(background.integral) * max(as.matrix(object_clean[[1]][, 1])) / nrow(as.matrix(object_clean[[1]])), lty = 2, col = "red" ) } ##empty Tx plot if (is(plot.single, "logical") || (is(plot.single, "numeric") & 2 %in% plot.single)) { plot( NA, NA, xlim = c(0, 1), ylim = c(0, 1), xlab = "", ylab = "", axes = FALSE ) text(x = 0.5, y = 0.5, labels = expression(paste("No ", T[x], " curves detected"))) } } }else{ if (is(plot.single, "logical") || (is(plot.single, "numeric") & 1 %in% plot.single)) { ##empty Lx plot plot( NA, NA, xlim = c(0, 1), ylim = c(0, 1), xlab = "", ylab = "", axes = FALSE ) text(x = 0.5, y = 0.5, labels = expression(paste("No ", L[x], " curves detected"))) } if (is(plot.single, "logical") || (is(plot.single, "numeric") & 2 %in% plot.single)) { ##empty Tx plot plot( NA, NA, xlim = c(0, 1), ylim = c(0, 1), xlab = "", ylab = "", axes = FALSE ) text(x = 0.5, y = 0.5, labels = expression(paste("No ", T[x], " curves detected"))) } } ## plot fading ---- if (is(plot.single, "logical") || (is(plot.single, "numeric") & 3 %in% plot.single)) { if(all(is.na(LxTx_table[["LxTx_NORM"]]))){ shape::emptyplot() text(x = .5, y = .5, labels = "All NA values!") }else{ plot( NA, NA, ylab = "Norm. intensity", xaxt = "n", xlab = "Time since irradition [s]", sub = expression(paste("[", log[10](t / t[c]), "]")), ylim = if(is.null(plot_settings$ylim)){ if (max(LxTx_table[["LxTx_NORM"]]) > 1.1) { c(0.1, max(LxTx_table[["LxTx_NORM"]]) + max(LxTx_table[["LxTx_NORM.ERROR"]])) } else { c(0.1, 1.1) } } else { plot_settings$ylim }, xlim = range(LxTx_table[["TIMESINCEIRR_NORM.LOG"]], na.rm = TRUE), main = "Signal Fading" ) ##add axis (with an additional formatting to provide a nice log10-axis) ##https://stackoverflow.com/questions/6897243/labelling-logarithmic-scale-display-in-r x_axis_lab <- seq(0:nchar(floor(max(LxTx_table[["TIMESINCEIRR"]])))) x_axis_ticks <- log10((10^x_axis_lab)/tc) ## if we have less then two values to show, we fall back to the ## old data representation. if (length(x_axis_ticks[x_axis_ticks > 0]) > 2) { axis( side = 1, at = x_axis_ticks, labels = sapply(x_axis_lab, function(i) as.expression(bquote(10 ^ .(i)))) ) ##lower axis axis( side = 1, at = x_axis_ticks, labels = paste0("[",round(x_axis_ticks,1),"]"), cex.axis = 0.7, tick = FALSE, line = 0.75) } else { axis( side = 1, at = axTicks(side = 1), labels = suppressWarnings(format((10 ^ (axTicks(side = 1)) * tc), digits = 1, decimal.mark = "", scientific = TRUE))) ##lower axis axis( side = 1, at = axTicks(1), labels = axTicks(1), cex.axis = 0.7, tick = FALSE, line = 0.75) } mtext( side = 3, paste0( "g-value: ", round(g_value$FIT, digits = 2), " \u00b1 ", round(g_value$SD, digits = 2), " (%/decade) | tc = ", format(tc, digits = 4, scientific = TRUE) ), cex = par()$cex * 0.9 ) ##add MC error polygon x_range <- range(LxTx_table[["TIMESINCEIRR_NORM.LOG"]], na.rm = TRUE) x <- seq(x_range[1], x_range[2], length.out = 50) m <- matrixStats::rowRanges(vapply(1:n.MC, function(i){ fit_matrix[2, i] * x + fit_matrix[1, i] }, numeric(length(x)))) polygon( x = c(x, rev(x)), y = c(m[, 2], rev(m[, 1])), col = rgb(0, 0, 0, 0.2), border = NA ) ##add master curve in red curve( fit$coefficient[2] * x + fit$coefficient[1], col = "red", add = TRUE, lwd = 1.5 ) ##add power law curve curve( x ^ 3 * fit_power$coefficient[2] + x ^ 2 * fit_power$coefficient[3] + x * fit_power$coefficient[4] + fit_power$coefficient[1], add = TRUE, col = "blue", lty = 2 ) ##addpoints points(x = LxTx_table[["TIMESINCEIRR_NORM.LOG"]], y = LxTx_table[["LxTx_NORM"]], pch = 21, bg = "grey") ##error bars segments( x0 = LxTx_table[["TIMESINCEIRR_NORM.LOG"]], x1 = LxTx_table[["TIMESINCEIRR_NORM.LOG"]], y0 = LxTx_table[["LxTx_NORM"]] + LxTx_table[["LxTx_NORM.ERROR"]], y1 = LxTx_table[["LxTx_NORM"]] - LxTx_table[["LxTx_NORM.ERROR"]], col = "grey" ) ##add legend legend( "bottom", legend = c("fit", "fit MC", "trend"), col = c("red", "grey", "blue"), lty = c(1, 1, 2), bty = "n", horiz = TRUE ) }#end if a }# if (is(plot.single, "logical") || (is(plot.single, "numeric") & 4 %in% plot.single)) { if(all(is.na(g_value.MC))){ shape::emptyplot() text(x = .5, y = .5, labels = "All NA values!") }else{ plot(density(g_value.MC), main = "Density: g-values (%/decade)") rug(x = g_value.MC) abline(v = c(g_value[["Q_0.16"]], g_value[["Q_0.84"]]), lty = 2, col = "darkgreen") abline(v = c(g_value[["Q_0.025"]], g_value[["Q_0.975"]]), lty = 2, col = "red") legend( "topleft", legend = c("HPD - 68 %", "HPD - 95 %"), lty = 2, col = c("darkgreen", "red"), bty = "n" ) } } } # Terminal ------------------------------------------------------------------------------------ if (verbose){ cat("\n[analyse_FadingMeasurement()]\n") cat(paste0("\n n.MC:\t",n.MC)) cat(paste0("\n tc:\t",format(tc, digits = 4, scientific = TRUE), " s")) cat("\n---------------------------------------------------") cat(paste0("\nT_0.5 interpolated:\t",T_0.5$T_0.5_INTERPOLATED)) cat(paste0("\nT_0.5 predicted:\t",format(T_0.5$T_0.5_PREDICTED, digits = 2, scientific = TRUE))) cat(paste0("\ng-value:\t\t", round(g_value$FIT, digits = 2), " \u00b1 ", round(g_value$SD, digits = 2), " (%/decade)")) cat(paste0("\ng-value (norm. 2 days):\t", round(g_value_2days[1], digits = 2), " \u00b1 ", round(g_value_2days[2], digits = 2), " (%/decade)")) cat("\n---------------------------------------------------") cat(paste0("\nrho':\t\t\t", format(rhoPrime$MEAN, digits = 3), " \u00b1 ", format(rhoPrime$SD, digits = 3))) cat(paste0("\nlog10(rho'):\t\t", suppressWarnings(round(log10(rhoPrime$MEAN), 2)), " \u00b1 ", round(rhoPrime$SD / (rhoPrime$MEAN * log(10, base = exp(1))), 2))) cat("\n---------------------------------------------------\n") } # Return -------------------------------------------------------------------------------------- ##set data.frame if(all(is.na(g_value))){ fading_results <- data.frame( FIT = NA, MEAN = NA, SD = NA, Q_0.025 = NA, Q_0.16 = NA, Q_0.84 = NA, Q_0.975 = NA, TC = NA, G_VALUE_2DAYS = NA, G_VALUE_2DAYS.ERROR = NA, T_0.5_INTERPOLATED = NA, T_0.5_PREDICTED = NA, T_0.5_PREDICTED.LOWER = NA, T_0.5_PREDICTED.UPPER = NA, UID = uid, stringsAsFactors = FALSE ) }else{ fading_results <- data.frame( g_value, TC = tc, G_VALUE_2DAYS = g_value_2days[1], G_VALUE_2DAYS.ERROR = g_value_2days[2], T_0.5, UID = uid, stringsAsFactors = FALSE ) } ##return return(set_RLum( class = "RLum.Results", data = list( fading_results = fading_results, fit = fit, rho_prime = rhoPrime, LxTx_table = LxTx_table, irr.times = irradiation_times ), info = list(call = sys.call()) )) } Luminescence/R/Second2Gray.R0000644000176200001440000001475214264017373015350 0ustar liggesusers#' Converting equivalent dose values from seconds (s) to Gray (Gy) #' #' Conversion of absorbed radiation dose in seconds (s) to the SI unit Gray #' (Gy) including error propagation. Normally used for equivalent dose data. #' #' Calculation of De values from seconds (s) to Gray (Gy) #' #' \deqn{De [Gy] = De [s] * Dose Rate [Gy/s])} #' #' Provided calculation error propagation methods for error calculation #' (with `'se'` as the standard error and `'DR'` of the dose rate of the beta-source): #' #' **(1) `omit`** (default) #' #' \deqn{se(De) [Gy] = se(De) [s] * DR [Gy/s]} #' #' In this case the standard error of the dose rate of the beta-source is #' treated as systematic (i.e. non-random), it error propagation is omitted. #' However, the error must be considered during calculation of the final age. #' (cf. Aitken, 1985, pp. 242). This approach can be seen as method (2) (gaussian) #' for the case the (random) standard error of the beta-source calibration is #' 0. Which particular method is requested depends on the situation and cannot #' be prescriptive. #' #' **(2) `gaussian`** error propagation #' #' \deqn{se(De) [Gy] = \sqrt((DR [Gy/s] * se(De) [s])^2 + (De [s] * se(DR) [Gy/s])^2)} #' #' Applicable under the assumption that errors of `De` and `se` are uncorrelated. #' #' **(3) `absolute`** error propagation #' #' \deqn{se(De) [Gy]= abs(DR [Gy/s] * se(De) [s]) + abs(De [s] * se(DR) [Gy/s])} #' #' Applicable under the assumption that errors of `De` and `se` are correlated. #' #' #' @param data [data.frame] (**required**): #' input values, structure: data (`values[,1]`) and data error (`values [,2]`) #' are required #' #' @param dose.rate [RLum.Results-class], [data.frame] or [numeric] (**required**): #' `RLum.Results` needs to be originated from the function [calc_SourceDoseRate], #' for `vector` dose rate in Gy/s and dose rate error in Gy/s #' #' @param error.propagation [character] (*with default*): #' error propagation method used for error calculation (`omit`, `gaussian` or #' `absolute`), see details for further information #' #' @return #' Returns a [data.frame] with converted values. #' #' @note #' If no or a wrong error propagation method is given, the execution of the function is #' stopped. Furthermore, if a `data.frame` is provided for the dose rate values is has to #' be of the same length as the data frame provided with the argument `data` #' #' @section Function version: 0.6.0 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany)\cr #' Michael Dietze, GFZ Potsdam (Germany)\cr #' Margret C. Fuchs, HZDR, Helmholtz-Institute Freiberg for Resource Technology (Germany) #' #' @seealso [calc_SourceDoseRate] #' #' @references #' Aitken, M.J., 1985. Thermoluminescence dating. Academic Press. #' #' @keywords manip #' #' @examples #' #' ##(A) for known source dose rate at date of measurement #' ## - load De data from the example data help file #' data(ExampleData.DeValues, envir = environment()) #' ## - convert De(s) to De(Gy) #' Second2Gray(ExampleData.DeValues$BT998, c(0.0438,0.0019)) #' #' #' #' #' #' ##(B) for source dose rate calibration data #' ## - calculate source dose rate first #' dose.rate <- calc_SourceDoseRate(measurement.date = "2012-01-27", #' calib.date = "2014-12-19", #' calib.dose.rate = 0.0438, #' calib.error = 0.0019) #' # read example data #' data(ExampleData.DeValues, envir = environment()) #' #' # apply dose.rate to convert De(s) to De(Gy) #' Second2Gray(ExampleData.DeValues$BT998, dose.rate) #' #' @md #' @export Second2Gray <- function( data, dose.rate, error.propagation = "omit" ){ # Integrity tests ----------------------------------------------------------------------------- ##(1) data.frame or RLum.Data.Curve object? if(!is(data, "data.frame")){ stop("[Second2Gray()] 'data' object has to be of type 'data.frame'!") } ##(2) numeric, data.frame or RLum.Data.Curve object? if(!is(dose.rate, "numeric") & !is(dose.rate, "RLum.Results") & !is(dose.rate, "data.frame")){ stop("[Second2Gray()] 'dose.rate' object has to be of type 'numeric', 'data.frame' or 'RLum.Results'!") } ##(3) last check to avoid problems if(is(dose.rate, "data.frame")){ if(nrow(dose.rate)!=nrow(data)){ stop("[Second2Gray()] the data frames in 'data' and 'dose.rate' need to be of similar length!") } } ##(4) check for right orginator if(is(dose.rate, "RLum.Results")){ if(dose.rate@originator != "calc_SourceDoseRate"){ stop("[Second2Gray()] Wrong originator for dose.rate 'RLum.Results' object.") }else{ ##check what is what if(!is(get_RLum(dose.rate, data.object = "dose.rate"), "data.frame")){ dose.rate <- data.frame( dose.rate <- as.numeric(get_RLum(dose.rate, data.object = "dose.rate")[1]), dose.rate.error <- as.numeric(get_RLum(dose.rate, data.object = "dose.rate")[2]) ) }else{ dose.rate <- get_RLum(dose.rate, data.object = "dose.rate") } } } # Calculation --------------------------------------------------------------------------------- De.seconds <- data[,1] De.error.seconds <- data[,2] De.gray <- NA De.error.gray <- NA if(is(dose.rate,"data.frame")){ De.gray <- round(De.seconds*dose.rate[,1], digits=2) }else{ De.gray <- round(De.seconds*dose.rate[1], digits=2) } if(error.propagation == "omit"){ if(is(dose.rate,"data.frame")){ De.error.gray <- round(dose.rate[,1]*De.error.seconds, digits=3) }else{ De.error.gray <- round(dose.rate[1]*De.error.seconds, digits=3) } }else if(error.propagation == "gaussian"){ if(is(dose.rate,"data.frame")){ De.error.gray <- round(sqrt((De.seconds*dose.rate[,2])^2+(dose.rate[,1]*De.error.seconds)^2), digits=3) }else{ De.error.gray <- round(sqrt((De.seconds*dose.rate[2])^2+(dose.rate[1]*De.error.seconds)^2), digits=3) } }else if (error.propagation == "absolute"){ if(is(dose.rate,"data.frame")){ De.error.gray <- round(abs(dose.rate[,1] * De.error.seconds) + abs(De.seconds * dose.rate[,2]), digits=3) }else{ De.error.gray <- round(abs(dose.rate[1] * De.error.seconds) + abs(De.seconds * dose.rate[2]), digits=3) } }else{ stop("[Second2Gray()] unsupported error propagation method!" ) } # Return -------------------------------------------------------------------------------------- data <- data.frame(De=De.gray, De.error=De.error.gray) return(data) } Luminescence/R/RLum.Data.Image-class.R0000644000176200001440000002424414264017373017100 0ustar liggesusers#' @include get_RLum.R set_RLum.R names_RLum.R NULL #' Class `"RLum.Data.Image"` #' #' Class for representing luminescence image data (TL/OSL/RF). Such data are for example produced #' by the function [read_SPE2R] #' #' @name RLum.Data.Image-class #' #' @docType class #' #' @slot recordType #' Object of class [character] containing the type of the curve (e.g. "OSL image", "TL image") #' #' @slot curveType #' Object of class [character] containing curve type, allowed values #' are measured or predefined #' #' @slot data #' Object of class [array] containing image data. #' #' @slot info #' Object of class [list] containing further meta information objects #' #' @note #' The class should only contain data for a set of images. For additional #' elements the slot `info` can be used. #' #' @section Objects from the class: #' Objects can be created by calls of the form `set_RLum("RLum.Data.Image", ...)`. #' #' @section Class version: 0.5.1 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [RLum-class], [RLum.Data-class], [plot_RLum], [read_SPE2R], [read_TIFF2R] #' #' @keywords classes #' #' @examples #' #' showClass("RLum.Data.Image") #' #' ##create empty RLum.Data.Image object #' set_RLum(class = "RLum.Data.Image") #' #' @md #' @export setClass( "RLum.Data.Image", slots = list( recordType = "character", curveType = "character", data = "array", info = "list" ), contains = "RLum.Data", prototype = list ( recordType = character(), curveType = character(), data = array(), info = list() ) ) # as() ---------------------------------------------------------------------------------------- ##DATA.FRAME ##COERCE RLum.Data.Image >> data.frame AND data.frame >> RLum.Data.Image #' as() #' #' for `[RLum.Data.Image-class]` #' #' **[RLum.Data.Image-class]** #' #' \tabular{ll}{ #' **from** \tab **to**\cr #' `data.frame` \tab `data.frame`\cr #' `matrix` \tab `matrix` #' } #' #' @md #' @name as ## from data.frame ---- setAs("data.frame", "RLum.Data.Image", function(from,to){ new(to, recordType = "unkown curve type", curveType = "NA", data = array(unlist(from), dim = c(nrow(from),ncol(from),1)), info = list()) }) ## to data.frame ---- setAs("RLum.Data.Image", "data.frame", function(from){ if(dim(from@data)[3] == 1) { as.data.frame(from@data[,,1]) } else { stop("No viable coercion to data.frame, object contains multiple frames.", call. = FALSE) } }) ## from matrix ---- setAs("matrix", "RLum.Data.Image", function(from,to){ new(to, recordType = "unkown curve type", curveType = "NA", data = array(from, c(nrow(from), ncol(from), 1)), info = list()) }) ## to matrix ---- setAs("RLum.Data.Image", "matrix", function(from){ if(dim(from@data)[3] == 1) { from@data[,,1, drop = TRUE] } else { stop("No viable coercion to matrix, object contains multiple frames. Please convert to array instead.", call. = FALSE) } }) ## from array ---- setAs("array", "RLum.Data.Image", function(from, to){ new(to, recordType = "unkown curve type", curveType = "NA", data = from, info = list()) }) ## to array ---- setAs("RLum.Data.Image", "array", function(from) from@data) ## from list ---- setAs("list", "RLum.Data.Image", function(from, to){ array_list <- lapply(from, function(x) array(unlist(as.vector(x)), c(nrow(x), ncol(x), 1))) new(to, recordType = "unkown curve type", curveType = "NA", data = array(unlist(array_list), c(nrow(array_list[[1]]), ncol(array_list[[1]]), length(array_list))), info = list()) }) ## to list ---- setAs("RLum.Data.Image", "list", function(from){ lapply(1:dim(from@data)[3], function(x) from@data[,,x]) }) # show() -------------------------------------------------------------------------------------- #' @describeIn RLum.Data.Image #' Show structure of `RLum.Data.Image` object #' #' @keywords internal #' #' @md #' @export setMethod("show", signature(object = "RLum.Data.Image"), function(object){ ## get dimension dim <- dim(object@data) ##print information cat("\n [RLum.Data.Image-class]") cat("\n\t recordType:", object@recordType) cat("\n\t curveType:", object@curveType) cat("\n\t .. recorded frames:", max(1,dim[3], na.rm = TRUE)) cat("\n\t .. .. pixel per frame:", dim[1]*dim[2]) cat("\n\t .. .. x dimension [px]:", dim[1]) cat("\n\t .. .. y dimension [px]:", dim[2]) cat("\n\t .. .. full pixel value range:", paste(format(range(object@data), scientific = TRUE, digits = 2), collapse=" : ")) cat("\n\t additional info elements:", length(object@info)) #cat("\n\t\t >> names:", names(object@info)) } ) # set_RLum() ---------------------------------------------------------------------------------- #' @describeIn RLum.Data.Image #' Construction method for RLum.Data.Image object. The slot info is optional #' and predefined as empty list by default. #' #' @param class [`set_RLum`]; [character]: name of the `RLum` class to create #' #' @param originator [`set_RLum`]; [character] (*automatic*): #' contains the name of the calling function (the function that produces this object); #' can be set manually. #' #' @param .uid [`set_RLum`]; [character] (*automatic*): #' sets an unique ID for this object using the internal C++ function `create_UID`. #' #' @param .pid [`set_RLum`]; [character] (*with default*): #' option to provide a parent id for nesting at will. #' #' @param recordType [`set_RLum`]; [character]: #' record type (e.g. "OSL") #' #' @param curveType [`set_RLum`]; [character]: #' curve type (e.g. "predefined" or "measured") #' #' @param data [`set_RLum`]; [matrix]: #' raw curve data. If data is of type `RLum.Data.Image` this can be used to #' re-construct the object, i.e. modified parameters except `.uid` and `.pid`. The rest #' will be subject to copy and paste unless provided. #' #' @param info [`set_RLum`]; [list]: #' info elements #' #' @return #' #' **`set_RLum`** #' #' Returns an object from class `RLum.Data.Image` #' #' @md #' @export setMethod( "set_RLum", signature = signature("RLum.Data.Image"), definition = function( class, originator, .uid, .pid, recordType = "Image", curveType = NA_character_, data = array(), info = list()) { ##The case where an RLum.Data.Image object can be provided ##with this RLum.Data.Image objects can be provided to be reconstructed if (is(data, "RLum.Data.Image")) { ##check for missing curveType if (missing(curveType)) curveType <- data@curveType ##check for missing recordType if (missing(recordType)) recordType <- data@recordType ##check for missing data ... not possible as data is the object itself ##check for missing info if (missing(info)) info <- data@info ##check for modified .uid & .pid ## >> this cannot be changed here, since both would be reset, by ## the arguments passed down from set_RLum() ... the generic function ##set empty class form object newRLumDataImage <- new("RLum.Data.Image") ##fill - this is the faster way, filling in new() costs ... newRLumDataImage@originator <- data@originator newRLumDataImage@recordType <- recordType newRLumDataImage@curveType <- curveType newRLumDataImage@data <- data@data newRLumDataImage@info <- info newRLumDataImage@.uid <- data@.uid newRLumDataImage@.pid <- data@.pid } else { ##set empty class from object newRLumDataImage <- new("RLum.Data.Image") ##fill - this is the faster way, filling in new() costs ... newRLumDataImage@originator <- originator newRLumDataImage@recordType <- recordType newRLumDataImage@curveType <- curveType newRLumDataImage@data <- data newRLumDataImage@info <- info newRLumDataImage@.uid <- .uid newRLumDataImage@.pid <- .pid } return(newRLumDataImage) } ) # get_RLum() ---------------------------------------------------------------------------------- #' @describeIn RLum.Data.Image #' Accessor method for `RLum.Data.Image` object. The argument `info.object` is #' optional to directly access the info elements. If no info element name is #' provided, the raw image data (`array`) will be returned. #' #' @param object [`get_RLum`], [`names_RLum`] (**required**): #' an object of class [RLum.Data.Image-class] #' #' @param info.object [`get_RLum`]; [character]: #' name of the info object to returned #' #' @return #' #' **`get_RLum`** #' #' 1. Returns the data object ([array]) #' 2. only the info object if `info.object` was set. #' #' @md #' @export setMethod("get_RLum", signature("RLum.Data.Image"), definition = function(object, info.object) { ##if missing info.object just show the curve values if(!missing(info.object)){ if(!inherits(info.object, "character")) stop("[get_RLum] 'info.object' has to be a character!", call. = FALSE) if(info.object %in% names(object@info)){ unlist(object@info[info.object]) } else { stop(paste0( "[get_RLum] Invalid element name. Valid names are: ", paste(names(object@info), collapse = ", ") ), call. = FALSE) } } else { object@data } }) # names_RLum() -------------------------------------------------------------------------------- #' @describeIn RLum.Data.Image #' Returns the names info elements coming along with this curve object #' #' @return #' #' **`names_RLum`** #' #' Returns the names of the info elements #' #' @md #' @export setMethod( "names_RLum", "RLum.Data.Image", function(object) names(object@info)) Luminescence/R/calc_SourceDoseRate.R0000644000176200001440000001677314264017373017106 0ustar liggesusers#' Calculation of the source dose rate via the date of measurement #' #' Calculating the dose rate of the irradiation source via the date of #' measurement based on: source calibration date, source dose rate, dose rate #' error. The function returns a data.frame that provides the input argument #' dose_rate for the function [Second2Gray]. #' #' Calculation of the source dose rate based on the time elapsed since the last #' calibration of the irradiation source. Decay parameters assume a Sr-90 beta #' source. \deqn{dose.rate = D0 * exp(-log(2) / T.1/2 * t)} \cr with: D0 <- #' calibration dose rate T.1/2 <- half-life of the source nuclide (here in #' days) t <- time since source calibration (in days) log(2) / T.1/2 equals the #' decay constant lambda #' #' Information on the date of measurements may be taken from the data's #' original .BIN file (using e.g., `BINfile <- readBIN2R()` and the slot #' `BINfile@@METADATA$DATE`) #' #' **Allowed source types and related values** #' #' \tabular{rllll}{ #' **#** \tab **Source type** \tab **T.1/2** \tab **Reference** \cr #' `[1]` \tab Sr-90 \tab 28.90 y \tab NNDC, Brookhaven National Laboratory \cr #' `[2]`\tab Am-214 \tab 432.6 y \tab NNDC, Brookhaven National Laboratory \cr #' `[3]` \tab Co-60 \tab 5.274 y \tab NNDC, Brookhaven National Laboratory \cr #' `[4` \tab Cs-137 \tab 30.08 y \tab NNDC, Brookhaven National Laboratory} #' #' @param measurement.date [character] or [Date] (with default): Date of measurement in `"YYYY-MM-DD"`. #' If no value is provided, the date will be set to today. The argument can be provided as vector. #' #' @param calib.date [character] or [Date] (**required**): #' date of source calibration in `"YYYY-MM-DD"` #' #' @param calib.dose.rate [numeric] (**required**): #' dose rate at date of calibration in Gy/s or Gy/min #' #' @param calib.error [numeric] (**required**): #' error of dose rate at date of calibration Gy/s or Gy/min #' #' @param source.type [character] (*with default*): #' specify irradiation source (`Sr-90`, `Co-60`, `Cs-137`, `Am-214`), #' see details for further information #' #' @param dose.rate.unit [character] (*with default*): #' specify dose rate unit for input (`Gy/min` or `Gy/s`), the output is given in #' Gy/s as valid for the function [Second2Gray] #' #' @param predict [integer] (*with default*): #' option allowing to predict the dose rate of the source over time in days #' set by the provided value. Starting date is the value set with #' `measurement.date`, e.g., `calc_SourceDoseRate(..., predict = 100)` calculates #' the source dose rate for the next 100 days. #' #' @return #' Returns an S4 object of type [RLum.Results-class]. #' Slot `data` contains a [list] with the following structure: #' #' ``` #' $ dose.rate (data.frame) #' .. $ dose.rate #' .. $ dose.rate.error #' .. $ date (corresponding measurement date) #' $ parameters (list) #' .. $ source.type #' .. $ halflife #' .. $ dose.rate.unit #' $ call (the original function call) #' ``` #' #' The output should be accessed using the function [get_RLum].\cr #' A plot method of the output is provided via [plot_RLum] #' #' @note #' Please be careful when using the option `predict`, especially when a multiple set #' for `measurement.date` and `calib.date` is provided. For the source dose rate prediction #' the function takes the last value `measurement.date` and predicts from that the the source #' source dose rate for the number of days requested, #' means: the (multiple) original input will be replaced. However, the function #' do not change entries for the calibration dates, but mix them up. Therefore, #' it is not recommended to use this option when multiple calibration dates (`calib.date`) #' are provided. #' #' @section Function version: 0.3.2 #' #' @author #' Margret C. Fuchs, HZDR, Helmholtz-Institute Freiberg for Resource Technology (Germany) \cr #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' #' @seealso [Second2Gray], [get_RLum], [plot_RLum] #' #' @references #' NNDC, Brookhaven National Laboratory `http://www.nndc.bnl.gov/` #' #' @keywords manip #' #' @examples #' #' #' ##(1) Simple function usage #' ##Basic calculation of the dose rate for a specific date #' dose.rate <- calc_SourceDoseRate(measurement.date = "2012-01-27", #' calib.date = "2014-12-19", #' calib.dose.rate = 0.0438, #' calib.error = 0.0019) #' #' ##show results #' get_RLum(dose.rate) #' #' ##(2) Usage in combination with another function (e.g., Second2Gray() ) #' ## load example data #' data(ExampleData.DeValues, envir = environment()) #' #' ## use the calculated variable dose.rate as input argument #' ## to convert De(s) to De(Gy) #' Second2Gray(ExampleData.DeValues$BT998, dose.rate) #' #' ##(3) source rate prediction and plotting #' dose.rate <- calc_SourceDoseRate(measurement.date = "2012-01-27", #' calib.date = "2014-12-19", #' calib.dose.rate = 0.0438, #' calib.error = 0.0019, #' predict = 1000) #' plot_RLum(dose.rate) #' #' #'##(4) export output to a LaTeX table (example using the package 'xtable') #'\dontrun{ #' xtable::xtable(get_RLum(dose.rate)) #' #'} #' #' @md #' @export calc_SourceDoseRate <- function( measurement.date = Sys.Date(), calib.date, calib.dose.rate, calib.error, source.type = "Sr-90", dose.rate.unit = "Gy/s", predict = NULL ){ if (is(measurement.date, "character")) { measurement.date <- as.Date(measurement.date) } ##calibration date if(is(calib.date, "character")) { calib.date <- as.Date(calib.date) } # --- if predict is set if(!is.null(predict) && predict > 1){ measurement.date <- seq(tail(measurement.date), by = 1, length = predict) } # -- calc days since source calibration decay.days <- measurement.date - calib.date # -- calc dose rate of source at date of measurement, considering the chosen source-type ##set halflife halflife.years <- switch( source.type, "Sr-90" = 28.90, "Am-241" = 432.6, "Co-60" = 5.274, "Cs-137" = 30.08 ) if(is.null(halflife.years)) stop("[calc_SourceDoseRate()] Source type unknown or currently not supported!", call. = FALSE) halflife.days <- halflife.years * 365 # N(t) = N(0)*e^((lambda * t) with lambda = log(2)/T1.2) measurement.dose.rate <- (calib.dose.rate) * exp((-log(2) / halflife.days) * as.numeric(decay.days)) measurement.dose.rate.error <- (calib.error) * exp((-log(2) / halflife.days) * as.numeric(decay.days)) # -- convert to input unit to [Gy/s] if(dose.rate.unit == "Gy/min"){ source.dose.rate <- measurement.dose.rate / 60 source.dose.rate.error <- source.dose.rate * (measurement.dose.rate.error / measurement.dose.rate) }else if(dose.rate.unit == "Gy/s"){ source.dose.rate <- measurement.dose.rate source.dose.rate.error <- measurement.dose.rate.error } # Output -------------------------------------------------------------------------------------- dose_rate <- data.frame( dose.rate = source.dose.rate, dose.rate.error = source.dose.rate.error, date = measurement.date, stringsAsFactors = TRUE ) temp.return <- set_RLum( class = "RLum.Results", data = list( dose.rate = dose_rate, parameters = list(source.type = source.type, halflife = halflife.years, dose.rate.unit = dose.rate.unit), call = sys.call() )) return(temp.return) } Luminescence/R/calc_Kars2008.R0000644000176200001440000000735714236146743015432 0ustar liggesusers#' Apply the Kars et al. (2008) model (deprecated) #' #' A function to calculate the expected sample specific fraction of saturation #' following Kars et al. (2008) and Huntley (2006). This function is deprecated #' and will eventually be removed. Please use `calc_Huntley2006()` instead. #' #' This function applies the approach described in Kars et al. (2008), #' developed from the model of Huntley (2006) to calculate the expected sample #' specific fraction of saturation of a feldspar and also to calculate fading #' corrected age using this model. \eqn{\rho}' (`rhop`), the density of recombination #' centres, is a crucial parameter of this model and must be determined #' separately from a fading measurement. The function [analyse_FadingMeasurement] #' can be used to calculate the sample specific \eqn{\rho}' value. #' #' @param fit.method [character] (*with default*): #' Fit function of the dose response curve. Can either be `EXP` (the default) #' or `GOK`. Note that `EXP` (single saturating exponential) is the original #' function the model after Huntley (2006) and Kars et al. (2008) was #' designed to use. The use of a general-order kinetics function (`GOK`) #' is an experimental adaption of the model and should only be used #' with great care. #' #' @param ... Parameters passed to [calc_Huntley2006]. #' #' @return An [RLum.Results-class] object is returned: #' #' @section Function version: 0.4.0 #' #' @author #' Georgina E. King, University of Bern (Switzerland) \cr #' Christoph Burow, University of Cologne (Germany) #' #' @note **This function is deprecated and will eventually be removed from the package.** #' **Please use the function [calc_Huntley2006()] instead** #' **(use `fit.method = "EXP"` to apply the model after Kars et al., 2008).** #' #' @keywords datagen #' #' @references #' #' Kars, R.H., Wallinga, J., Cohen, K.M., 2008. A new approach towards anomalous fading correction for feldspar #' IRSL dating-tests on samples in field saturation. Radiation Measurements 43, 786-790. doi:10.1016/j.radmeas.2008.01.021 #' #' Huntley, D.J., 2006. An explanation of the power-law decay of luminescence. #' Journal of Physics: Condensed Matter 18, 1359-1365. doi:10.1088/0953-8984/18/4/020 #' #' King, G.E., Herman, F., Lambert, R., Valla, P.G., Guralnik, B., 2016. #' Multi-OSL-thermochronometry of feldspar. Quaternary Geochronology 33, 76-87. doi:10.1016/j.quageo.2016.01.004 #' #' #' **Further reading** #' #' Morthekai, P., Jain, M., Cunha, P.P., Azevedo, J.M., Singhvi, A.K., 2011. An attempt to correct #' for the fading in million year old basaltic rocks. Geochronometria 38(3), 223-230. #' #' @examples #' #' ## Load example data (sample UNIL/NB123, see ?ExampleData.Fading) #' data("ExampleData.Fading", envir = environment()) #' #' ## (1) Set all relevant parameters #' # a. fading measurement data (IR50) #' fading_data <- ExampleData.Fading$fading.data$IR50 #' #' # b. Dose response curve data #' data <- ExampleData.Fading$equivalentDose.data$IR50 #' #' ## (2) Define required function parameters #' ddot <- c(7.00, 0.004) #' readerDdot <- c(0.134, 0.0067) #' #' # Analyse fading measurement and get an estimate of rho'. #' # Note that the RLum.Results object can be directly used for further processing. #' # The number of MC runs is reduced for this example #' rhop <- analyse_FadingMeasurement(fading_data, plot = TRUE, verbose = FALSE, n.MC = 10) #' #' ## (3) Apply the Kars et al. (2008) model to the data #' kars <- suppressWarnings( #' calc_Kars2008(data = data, #' rhop = rhop, #' ddot = ddot, #' readerDdot = readerDdot, #' n.MC = 25) #' ) #' #' @md #' @export calc_Kars2008 <- function(fit.method = "EXP", ...) { .Deprecated("calc_Huntley2006") calc_Huntley2006(fit.method = fit.method, ...) }Luminescence/R/calc_MaxDose.R0000644000176200001440000001256114236146743015551 0ustar liggesusers#' Apply the maximum age model to a given De distribution #' #' Function to fit the maximum age model to De data. This is a wrapper function #' that calls [calc_MinDose] and applies a similar approach as described in #' Olley et al. (2006). #' #' **Data transformation** #' #' To estimate the maximum dose population #' and its standard error, the three parameter minimum age model of Galbraith #' et al. (1999) is adapted. The measured De values are transformed as follows: #' #' 1. convert De values to natural logs #' 2. multiply the logged data to create a mirror image of the De distribution #' 3. shift De values along x-axis by the smallest x-value found to obtain only positive values #' 4. combine in quadrature the measurement error associated with each De value #' with a relative error specified by `sigmab` #' 5. apply the MAM to these data #' #' When all calculations are done the results are then converted as follows #' 1. subtract the x-offset #' 2. multiply the natural logs by -1 #' 3. take the exponent to obtain the maximum dose estimate in Gy #' #' **Further documentation** #' #' Please see [calc_MinDose]. #' #' @param data [RLum.Results-class] or [data.frame] (**required**): #' for [data.frame]: two columns with De `(data[ ,1])` and De error `(data[ ,2])`. #' #' @param sigmab [numeric] (**required**): #' additional spread in De values. #' This value represents the expected overdispersion in the data should the sample be #' well-bleached (Cunningham & Walling 2012, p. 100). #' **NOTE**: For the logged model (`log = TRUE`) this value must be #' a fraction, e.g. 0.2 (= 20 \%). If the un-logged model is used (`log = FALSE`), #' sigmab must be provided in the same absolute units of the De values (seconds or Gray). #' See details ([calc_MinDose]. #' #' @param log [logical] (*with default*): #' fit the (un-)logged three parameter minimum dose model to De data #' #' @param par [numeric] (*with default*): #' apply the 3- or 4-parameter minimum age model (`par=3` or `par=4`). #' #' @param bootstrap [logical] (*with default*): #' apply the recycled bootstrap approach of Cunningham & Wallinga (2012). #' #' @param init.values [numeric] (*with default*): #' starting values for gamma, sigma, p0 and mu. Custom values need to be provided in a vector of #' length three in the form of `c(gamma, sigma, p0)`. #' #' @param plot [logical] (*with default*): #' plot output (`TRUE`/`FALSE`) #' #' @param ... further arguments for bootstrapping (`bs.M, bs.N, bs.h, sigmab.sd`). #' See details for their usage. #' #' @return Please see [calc_MinDose]. #' #' @section Function version: 0.3.1 #' #' @author #' Christoph Burow, University of Cologne (Germany) \cr #' Based on a rewritten S script of Rex Galbraith, 2010 #' #' #' @seealso [calc_CentralDose], [calc_CommonDose], [calc_FiniteMixture], #' [calc_FuchsLang2001], [calc_MinDose] #' #' @references #' Arnold, L.J., Roberts, R.G., Galbraith, R.F. & DeLong, S.B., #' 2009. A revised burial dose estimation procedure for optical dating of young #' and modern-age sediments. Quaternary Geochronology 4, 306-325. #' #' Galbraith, R.F. & Laslett, G.M., 1993. Statistical models for mixed fission #' track ages. Nuclear Tracks Radiation Measurements 4, 459-470. #' #' Galbraith, R.F., Roberts, R.G., Laslett, G.M., Yoshida, H. & Olley, J.M., #' 1999. Optical dating of single grains of quartz from Jinmium rock shelter, #' northern Australia. Part I: experimental design and statistical models. #' Archaeometry 41, 339-364. #' #' Galbraith, R.F., 2005. Statistics for #' Fission Track Analysis, Chapman & Hall/CRC, Boca Raton. #' #' Galbraith, R.F. & Roberts, R.G., 2012. Statistical aspects of equivalent dose and error #' calculation and display in OSL dating: An overview and some recommendations. #' Quaternary Geochronology 11, 1-27. #' #' Olley, J.M., Roberts, R.G., Yoshida, H., Bowler, J.M., 2006. Single-grain optical dating of grave-infill #' associated with human burials at Lake Mungo, Australia. Quaternary Science #' Reviews 25, 2469-2474 #' #' **Further reading** #' #' Arnold, L.J. & Roberts, R.G., 2009. Stochastic modelling of multi-grain equivalent dose #' (De) distributions: Implications for OSL dating of sediment mixtures. #' Quaternary Geochronology 4, 204-230. #' #' Bailey, R.M. & Arnold, L.J., 2006. Statistical modelling of single grain quartz De distributions and an #' assessment of procedures for estimating burial dose. Quaternary Science #' Reviews 25, 2475-2502. #' #' Cunningham, A.C. & Wallinga, J., 2012. Realizing the potential of fluvial archives using robust OSL chronologies. #' Quaternary Geochronology 12, 98-106. #' #' Rodnight, H., Duller, G.A.T., Wintle, A.G. & Tooth, S., 2006. Assessing the reproducibility and accuracy #' of optical dating of fluvial deposits. Quaternary Geochronology 1, 109-120. #' #' Rodnight, H., 2008. How many equivalent dose values are needed to #' obtain a reproducible distribution?. Ancient TL 26, 3-10. #' #' @examples #' #' ## load example data #' data(ExampleData.DeValues, envir = environment()) #' #' # apply the maximum dose model #' calc_MaxDose(ExampleData.DeValues$CA1, sigmab = 0.2, par = 3) #' #' @md #' @export calc_MaxDose<- function( data, sigmab, log=TRUE, par=3, bootstrap=FALSE, init.values, plot=TRUE, ... ){ res<- calc_MinDose(data, sigmab, log, par, bootstrap, init.values, plot=FALSE, invert=TRUE, ...) res@originator<- "calc_MaxDose" if (plot) try(plot_RLum.Results(res, ...)) invisible(res) } Luminescence/R/plot_RLum.Results.R0000644000176200001440000011464714264017373016611 0ustar liggesusers#' Plot function for an RLum.Results S4 class object #' #' The function provides a standardised plot output for data of an RLum.Results #' S4 class object #' #' The function produces a multiple plot output. A file output is recommended #' (e.g., [pdf]). #' #' @param object [RLum.Results-class] (**required**): #' S4 object of class `RLum.Results` #' #' @param single [logical] (*with default*): #' single plot output (`TRUE/FALSE`) to allow for plotting the results in as #' few plot windows as possible. #' #' @param ... further arguments and graphical parameters will be passed to #' the `plot` function. #' #' @return Returns multiple plots. #' #' @note #' Not all arguments available for [plot] will be passed! #' Only plotting of `RLum.Results` objects are supported. #' #' @section Function version: 0.2.1 #' #' @author #' Christoph Burow, University of Cologne (Germany) \cr #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [plot], [plot_RLum] #' #' @keywords aplot #' #' @examples #' #' #' ###load data #' data(ExampleData.DeValues, envir = environment()) #' #' # apply the un-logged minimum age model #' mam <- calc_MinDose(data = ExampleData.DeValues$CA1, sigmab = 0.2, log = TRUE, plot = FALSE) #' #' ##plot #' plot_RLum.Results(mam) #' #' # estimate the number of grains on an aliquot #' grains<- calc_AliquotSize(grain.size = c(100,150), sample.diameter = 1, plot = FALSE, MC.iter = 100) #' #' ##plot #' plot_RLum.Results(grains) #' #' #' @md #' @export plot_RLum.Results<- function( object, single = TRUE, ... ){ ##============================================================================## ## CONSISTENCY CHECK OF INPUT DATA ##============================================================================## ##check if object is of class RLum.Data.Curve if(!is(object,"RLum.Results")){ stop("[plot_RLum.Results()] Input object is not of type 'RLum.Results'") } ##============================================================================## ## SAFE AND RESTORE PLOT PARAMETERS ON EXIT ##============================================================================## par.old <- par(no.readonly = TRUE) on.exit(suppressWarnings(par(par.old))) ##============================================================================## ## ... ARGUMENTS ##============================================================================## ##deal with addition arguments extraArgs <- list(...) ##main main <- if("main" %in% names(extraArgs)) {extraArgs$main} else {""} ##mtext mtext <- if("mtext" %in% names(extraArgs)) {extraArgs$mtext} else {""} ##log log <- if("log" %in% names(extraArgs)) {extraArgs$log} else {""} ##lwd lwd <- if("lwd" %in% names(extraArgs)) {extraArgs$lwd} else {1} ##lty lty <- if("lty" %in% names(extraArgs)) {extraArgs$lty} else {1} ##type type <- if("type" %in% names(extraArgs)) {extraArgs$type} else {"l"} ##pch pch <- if("pch" %in% names(extraArgs)) {extraArgs$pch} else {1} ##col col <- if("col" %in% names(extraArgs)) {extraArgs$col} else {"black"} ##============================================================================## ## PLOTTING ##============================================================================## #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ## CASE 1: Minimum Age Model / Maximum Age Model if(object@originator=="calc_MinDose" || object@originator=="calc_MaxDose") { ## single MAM estimate # plot profile log likelhood profiles <- object@data$profile if (object@data$args$log) { profiles@profile$gamma$par.vals[ ,"gamma"] <- exp(profiles@profile$gamma$par.vals[ ,"gamma"]) profiles@profile$sigma$par.vals[ ,"sigma"] <- exp(profiles@profile$sigma$par.vals[ ,"sigma"]) if (object@data$args$par == 4) profiles@profile$mu$par.vals[ ,"mu"] <- exp(profiles@profile$mu$par.vals[ ,"mu"]) } if (single) par(mfrow=c(2, 2)) param <- c("gamma", "sigma", "p0", "mu") for (i in param) { if (object@data$summary$par == 3 && i == "mu") break tryCatch({ xvals <- as.data.frame(profiles@profile[[i]]$par.vals)[[i]] xlim <- range(xvals[xvals > 0]) suppressWarnings( bbmle::plot(profiles, which = i, xlab = "", xaxt = "n", xlim = xlim) ) axis(1, mgp = c(3, 0.5, 0)) title(xlab = i, line = 1.2) if (i %in% c("gamma", "sigma", "mu") && object@data$args$log && object@data$args$log.output) { axis(1, at = axTicks(1), labels = format(round(log(axTicks(1)), 2), nsmall = 2), line = 2.5, mgp = c(3, 0.5, 0)) title(xlab = paste0("log(", i, ")"), line = 4) } }, error = function(e) { message(paste("Unable to plot the Likelihood profile for:", i, "(Likelihood probably infinite)")) }) } par(mfrow=c(1,1)) # }) ## bootstrap MAM estimates if(object@data$args$bootstrap==TRUE) { # save previous plot parameter and set new ones .pardefault<- par(no.readonly = TRUE) # get De-llik pairs pairs<- object@data$bootstrap$pairs$gamma # get polynomial fit objects poly.lines<- list(poly.three=object@data$bootstrap$poly.fits$poly.three, poly.four=object@data$bootstrap$poly.fits$poly.four, poly.five=object@data$bootstrap$poly.fits$poly.five, poly.six=object@data$bootstrap$poly.fits$poly.six) # define polynomial curve functions for plotting poly.curves<- list(poly.three.curve=function(x) { poly.lines$poly.three$coefficient[4]*x^3 + poly.lines$poly.three$coefficient[3]*x^2 + poly.lines$poly.three$coefficient[2]*x + poly.lines$poly.three$coefficient[1] }, poly.four.curve=function(x) { poly.lines$poly.four$coefficient[5]*x^4 + poly.lines$poly.four$coefficient[4]*x^3 + poly.lines$poly.four$coefficient[3]*x^2 + poly.lines$poly.four$coefficient[2]*x + poly.lines$poly.four$coefficient[1] }, poly.five.curve=function(x) { poly.lines$poly.five$coefficient[6]*x^5 + poly.lines$poly.five$coefficient[5]*x^4 + poly.lines$poly.five$coefficient[4]*x^3 + poly.lines$poly.five$coefficient[3]*x^2 + poly.lines$poly.five$coefficient[2]*x + poly.lines$poly.five$coefficient[1] }, poly.six.curve=function(x) { poly.lines$poly.six$coefficient[7]*x^6 + poly.lines$poly.six$coefficient[6]*x^5 + poly.lines$poly.six$coefficient[5]*x^4 + poly.lines$poly.six$coefficient[4]*x^3 + poly.lines$poly.six$coefficient[3]*x^2 + poly.lines$poly.six$coefficient[2]*x + poly.lines$poly.six$coefficient[1] }) ## --------- PLOT "RECYCLE" BOOTSTRAP RESULTS ------------ ## if(single==TRUE) { layout(cbind(c(1,1,2, 5,5,6), c(3,3,4, 7,7,8))) par(cex = 0.6) } else { layout(matrix(c(1,1,2)),2,1) par(cex = 0.8) } for(i in 1:4) { ## ----- LIKELIHOODS # set margins (bottom, left, top, right) par(mar=c(0,5,5,3)) # sort De and likelihoods by De (increasing) pairs<- pairs[order(pairs[,1]),] # remove invalid NA values pairs<- na.omit(pairs) plot(x=pairs[,1], y=pairs[,2], xlab="Equivalent Dose [Gy]", ylab="Likelihood", xlim=range(pretty(pairs[,1])), ylim=range(pretty(c(0, as.double(quantile(pairs[,2],probs=0.98))))), xaxt = "n", xaxs = "i", yaxs = "i", bty = "l", main="Recycled bootstrap MAM-3") axis(side = 1, labels = FALSE, tick = FALSE) # add subtitle mtext(as.expression(bquote(italic(M) == .(object@data$args$bs.M) ~ "|" ~ italic(N) == .(object@data$args$bs.N) ~ "|" ~ italic(sigma[b]) == .(object@data$args$sigmab) ~ "\u00B1" ~ .(object@data$args$sigmab.sd) ~ "|" ~ italic(h) == .(round(object@data$args$bs.h,1)) ) ), side = 3, line = 0.3, adj = 0.5, cex = if(single){0.5}else{0.8}) # add points points(x=pairs[,1], y=pairs[,2], pch=1, col = "grey80") # get polynomial function poly.curve<- poly.curves[[i]] # add curve to plot curve(poly.curve, from = min(pairs[,1]), to = (max(pairs[,1])), col = "black", add = TRUE, type = "l") # add legend legend<- c("Third degree", "Fourth degree", "Fifth degree", "Sixth degree") legend("topright", xjust = 0, legend = legend[i], y.intersp = 1.2, bty = "n", title = "Polynomial Fit", lty = 1, lwd= 1.5) ## ----- RESIDUALS # set margins (bottom, left, top, right) par(mar=c(5,5,0,3)) plot(x = pairs[,1], y = residuals(poly.lines[[i]]), ylim = c(min(residuals(poly.lines[[i]]))*1.2, as.double(quantile(residuals(poly.lines[[i]]),probs=0.99))), xlim=range(pretty(pairs[,1])), xaxt = "n", bty = "l", xaxs = "i", col = "grey80", ylab = "Fit residual", xlab = "Equivalent dose [Gy]") axis(side = 1, labels = TRUE, tick = TRUE) # add horizontal line abline(h = 0, lty=2) # calculate residual sum of squares (RSS) and add to plot rss<- sum(residuals(poly.lines[[i]])^2) mtext(text = paste("RSS =",round(rss,3)), adj = 1, side = 3, line = -2, cex = if(single){0.6}else{0.8}) ## ----- PROPORTIONS }##EndOf::Plot_loop # restore previous plot parameters par(.pardefault) ### TODO: plotting of the LOESS fit needs to be fleshed out ### possibly integrate this in the prior polynomial plot loop ### LOESS PLOT pairs<- object@data$bootstrap$pairs$gamma pred<- predict(object@data$bootstrap$loess.fit) loess<- cbind(pairs[,1], pred) loess<- loess[order(loess[,1]),] # plot gamma-llik pairs plot(pairs, ylim = c(0, as.double(quantile( pairs[,2],probs=0.99))), ylab = "Likelihood", xlab = "Equivalent dose [Gy]", col = "gray80") # add LOESS line lines(loess, type = "l", col = "black") ### ------ PLOT BOOTSTRAP LIKELIHOOD FIT par(mar=c(5,4,4,4)) xlim<- range(pretty(object@data$data[,1])) xlim[1]<- xlim[1]-object@data$data[which.min(object@data$data[,1]),2] xlim[2]<- xlim[2]+object@data$data[which.max(object@data$data[,1]),2] xlim<- range(pretty(xlim)) # empty plot plot(NA,NA, xlim=xlim, ylim=c(0,2), xlab="Equivalent dose [Gy]", ylab="", bty="l", axes=FALSE, xaxs="i", yaxs="i", yaxt="n") axis(side = 1) axis(side = 2, at = c(0,0.5,1)) mtext(text = "Normalised likelihood / density", side = 2, line = 2.5, adj = 0) # set the polynomial to plot poly.curve<- poly.curves[[1]] # three degree poly # plot a nice grey polygon as in the publication step<- 0.1 x<- seq(min(pairs[,1]), max(pairs[,1]), step) y<- poly.curve(x) # normalise y-values y<- y/max(y) x<- c(min(pairs[,1]), x, max(pairs[,1])) y<- c(0, y, 0) # cutoff negative y values neg<- which(y<0) if (length(neg) != 0) { y<- y[-neg] x<- x[-neg] } # add bootstrap likelihood polygon to plot polygon(x, y, col = "grey80", border = NA) if (all(x > max(xlim)) || all(x < min(xlim))) warning("Bootstrap estimates out of x-axis range.", call. = FALSE) ### ----- PLOT MAM SINGLE ESTIMATE # symmetric errors, might not be appropriate mean<- object@data$summary$de sd<- object@data$summary$de_err if (any(is.na(c(mean, sd)))) { warning("Unable to plot the MAM single estimate (NA value).", call. = FALSE) } else { x<- seq(mean-5*sd, mean+5*sd, 0.001) y<- dnorm(seq(mean-5*sd, mean+5*sd, 0.001), mean, sd) # normalise y-values y<- y/max(y) points(x, y, type="l", col="red") ## asymmetric errors x<- unlist(object@data$profile@profile$gamma$par.vals[,1]) y<- abs(unlist(object@data$profile@profile$gamma$z)) if(object@data$args$log == TRUE) { x<- exp(x) } # now invert the data by shifting y<- -y y<- y-min(y) y<- y/max(y) # fit a smoothing spline l<- spline(x = x, y = y, method = "n", n = 1000) # make the endpoints zero l$y[1]<- l$y[length(l$y)]<- 0 # add profile log likelihood curve to plot lines(l, col="blue", lwd=1) # add vertical lines of the mean values #points(x = 80, y = 100,type = "l") } #### ------ PLOT DE par(new = TRUE) # sort the data in ascending order dat<- object@data$data[order(object@data$data[,1]),] x<- dat[,1] y<- 1:length(object@data$data[,1]) plot(x = x, y = y, xlim=xlim, ylim=c(0, max(y)+1), axes = FALSE, pch = 16, xlab = "", ylab="", xaxs="i", yaxs="i") axis(side = 4) mtext(text = "# Grain / aliquot", side = 4, line = 2.5) # get sorted errors err<- object@data$data[order(object@data$data[,1]),2] # fancy error bars arrows(x0 = x-err, y0 = y, x1 = x+err, y1 = y, code = 3, angle = 90, length = 0.05) ### ---- AUXILLARY # add legend legend("bottomright", bty = "n", col = c("grey80", "red", "blue", "black"), pch = c(NA,NA,NA,16), lty = c(1,1,1,1), lwd=c(10,2,2,2), legend = c("Bootstrap likelihood", "Profile likelihood (gaussian fit)","Profile likelihood", "Grain / aliquot"), ) }##EndOf::Bootstrap_plotting }#EndOf::CASE1_MinimumAgeModel-3 #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ## CASE 2: Central Age Model if(object@originator=="calc_CentralDose") { # get profile log likelihood data sig<- object@data$profile$sig*100 llik<- object@data$profile$llik # save previous plot parameter and set new ones .pardefault<- par(no.readonly = TRUE) # plot the profile log likeihood par(oma=c(2,1,2,1),las=1,cex.axis=1.2, cex.lab=1.2) plot(sig,llik,type="l",xlab=as.expression(bquote(sigma[OD]~"[%]")),ylab="Log likelihood",lwd=1.5) abline(h=0,lty=3) abline(h=-1.92,lty=3) title(as.expression(bquote("Profile log likelihood for" ~ sigma[OD]))) # find upper and lower confidence limits for sigma sigmax<- sig[which.max(llik)] tf<- abs(llik+1.92) < 0.05 sig95<- sig[tf] ntf<- length(sig95) sigL<- sig95[1] sigU<- sig95[ntf] # put them on the graph abline(v=sigL) abline(v=sigmax) abline(v=sigU) dx<- 0.006 dy<- 0.2 ytext<- min(llik) + dy res<- c(sigL,sigmax,sigU) text(res+dx,rep(ytext,3),round(res,2),adj=0) # restore previous plot parameters par(.pardefault) rm(.pardefault) }##EndOf::Case 2 - calc_CentralDose() #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ## CASE 3: Fuchs & Lang 2001 if(object@originator=="calc_FuchsLang2001") { ##deal with addition arguments extraArgs <- list(...) main <- if("main" %in% names(extraArgs)) {extraArgs$main} else {"Fuchs & Lang (2001)"} xlab <- if("xlab" %in% names(extraArgs)) {extraArgs$xlab} else {expression(paste(D[e]," [s]"))} ylab <- if("ylab" %in% names(extraArgs)) {extraArgs$ylab} else {"# Aliquots"} sub <- if("sub" %in% names(extraArgs)) {extraArgs$sub} else {""} cex <- if("cex" %in% names(extraArgs)) {extraArgs$cex} else {1} lwd <- if("lwd" %in% names(extraArgs)) {extraArgs$lwd} else {1} pch <- if("pch" %in% names(extraArgs)) {extraArgs$pch} else {19} ylim <- if("ylim" %in% names(extraArgs)) {extraArgs$ylim} else {c(1,length(object@data$data[,1])+3)} xlim <- if("xlim" %in% names(extraArgs)) {extraArgs$xlim} else {c(min(object@data$data[,1])-max(object@data$data[,2]), max(object@data$data[,1])+max(object@data$data[,2]))} mtext <- if("mtext" %in% names(extraArgs)) {extraArgs$mtext} else {"unknown sample"} # extract relevant plotting parameters o<- order(object@data$data[[1]]) data_ordered<- object@data$data[o,] usedDeValues<- object@data$usedDeValues n.usedDeValues<- object@data$summary$n.usedDeValues par(cex = cex, mfrow=c(1,1)) ##PLOT counter<-seq(1,max(o)) plot(NA,NA, ylim = ylim, xlim = xlim, xlab = xlab, ylab = ylab, main = main, sub = sub) ##SEGMENTS segments(data_ordered[,1]-data_ordered[,2],1:length(data_ordered[,1]), data_ordered[,1]+data_ordered[,2],1:length(data_ordered[,1]), col="gray") ##POINTS points(data_ordered[,1], counter,pch=pch) ##LINES ##BOUNDARY INFORMATION ##lower boundary lines(c( usedDeValues[length(usedDeValues[,1])-n.usedDeValues+1,1], #boundary_counter for incorporate skipped values usedDeValues[length(usedDeValues[,1])-n.usedDeValues+1,1]), c(min(o)-0.5,max(o)+0.5), col="red", lty="dashed", lwd = lwd) #upper boundary lines(c(max(usedDeValues[,1]),max(usedDeValues[,1])),c(min(o)-0.5,max(o)+0.5), col="red",lty="dashed", lwd = lwd) #plot some further informations into the grafik arrows( usedDeValues[length(usedDeValues[,1])-n.usedDeValues+1,1]+usedDeValues[length(usedDeValues[,1])-n.usedDeValues+1,1]*0.02, #x1 max(o)+0.5, #y1 max(usedDeValues[,1]-usedDeValues[,1]*0.02), #x2 max(o)+0.5, #y2, code=3, length=0.03) text( c( usedDeValues[length(usedDeValues[,1])-n.usedDeValues+1,1], usedDeValues[length(usedDeValues[,1])-n.usedDeValues+1,1]), c(max(o)+2,max(o)+2), labels=paste("used values = ", n.usedDeValues), cex=0.6*cex, adj=0) ##MTEXT mtext(side=3,mtext,cex=cex) } #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ## CASE 4: Finite Mixture Model -------- if(object@originator == "calc_FiniteMixture") { if(length(object@data$args$n.components) > 1) { ##deal with addition arguments extraArgs <- list(...) main <- if("main" %in% names(extraArgs)) {extraArgs$main} else {"Finite Mixture Model"} plot.proportions<- if("plot.proportions" %in% names(extraArgs)) {extraArgs$plot.proportions} else {TRUE} pdf.colors<- if("pdf.colors" %in% names(extraArgs)) {extraArgs$pdf.colors} else {"gray"} pdf.weight<- if("pdf.weight" %in% names(extraArgs)) {extraArgs$pdf.weight} else {TRUE} pdf.sigma<- if("pdf.sigma" %in% names(extraArgs)) {extraArgs$pdf.sigma} else {"sigmab"} # extract relevant data from object n.components <- object@data$args$n.components comp.n <- object@data$components sigmab <- object@data$args$sigmab BIC.n <- object@data$BIC$BIC LLIK.n <- object@data$llik$llik # save previous plot parameter and set new ones .pardefault<- par(no.readonly = TRUE) ## DEVICE AND PLOT LAYOUT n.plots<- length(n.components) #number of PDF plots in plot area #1 seq.vertical.plots<- seq(from = 1, to = n.plots, by = 1) #indices ID.plot.two<- n.plots+if(plot.proportions==TRUE){1}else{0} #ID of second plot area ID.plot.three<- n.plots+if(plot.proportions==TRUE){2}else{1} #ID of third plot area #empty vector for plot indices seq.matrix<- vector(mode="integer", length=4*n.plots) #fill vector with plot indices in correct order cnt<- 1 seq<- seq(1,length(seq.matrix),4) for(i in seq) { seq.matrix[i]<- cnt seq.matrix[i+1]<- cnt seq.matrix[i+2]<- if(plot.proportions==TRUE){ID.plot.two}else{cnt} seq.matrix[i+3]<- ID.plot.three cnt<- cnt+1 } # create device layout layout(matrix(c(seq.matrix),4,n.plots)) # outer margins (bottom, left, top, right) par(oma=c(2.5,5,3,5)) # general plot parameters (global scaling, allow overplotting) par(cex = 0.8, xpd = NA) # define colour palette for prettier output if(pdf.colors == "colors") { col.n<- c("red3", "slateblue3", "seagreen", "tan3", "yellow3", "burlywood4", "magenta4", "mediumpurple3", "brown4","grey", "aquamarine") poly.border<- FALSE } if(pdf.colors == "gray" || pdf.colors == "grey") { col.n<- gray.colors(length(n.components)*2) poly.border<- FALSE } if(pdf.colors == "none") { col.n<- NULL poly.border<- TRUE } ##-------------------------------------------------------------------------- ## PLOT 1: EQUIVALENT DOSES OF COMPONENTS ## create empty plot without x-axis for(i in 1:n.plots) { pos.n<- seq(from = 1, to = n.components[i]*3, by = 3) # set margins (bottom, left, top, right) par(mar=c(1,0,2,0)) # empty plot area plot(NA, NA, xlim=c(min(n.components)-0.2, max(n.components)+0.2), ylim=c(min(comp.n[pos.n,]-comp.n[pos.n+1,], na.rm = TRUE), max((comp.n[pos.n,]+comp.n[pos.n+1,])*1.1, na.rm = TRUE)), ylab="", xaxt="n", yaxt="n", xlab="") # add text in upper part of the plot ("k = 1,2..n") mtext(bquote(italic(k) == .(n.components[i])), side = 3, line = -2, cex=0.8) # add y-axis label (only for the first plot) if(i==1) { mtext(expression(paste("D"[e]," [Gy]")), side=2,line=2.7, cex=1) } # empty list to store normal distribution densities sapply.storage<- list() ## NORMAL DISTR. OF EACH COMPONENT options(warn=-1) #suppress warnings for NA values # LOOP - iterate over number of components for(j in 1:max(n.components)) { # draw random values of the ND to check for NA values comp.nd.n<- sort(rnorm(n = length(object@data$data[,1]), mean = comp.n[pos.n[j],i], sd = comp.n[pos.n[j]+1,i])) # proceed if no NA values occurred if(length(comp.nd.n)!=0) { # weight - proportion of the component wi<- comp.n[pos.n[j]+2,i] # calculate density values with(out) weights fooX<- function(x) { dnorm(x, mean = comp.n[pos.n[j],i], sd = if(pdf.sigma=="se"){comp.n[pos.n[j]+1,i]} else{if(pdf.sigma=="sigmab"){comp.n[pos.n[j],i]*sigmab}} )* if(pdf.weight==TRUE){wi}else{1} } # x-axis scaling - determine highest dose in first cycle if(i==1 && j==1){ max.dose<- max(object@data$data[,1])+sd(object@data$data[,1])/2 min.dose<- min(object@data$data[,1])-sd(object@data$data[,1])/2 # density function to determine y-scaling if no weights are used fooY<- function(x) { dnorm(x, mean = na.exclude(comp.n[pos.n,]), sd = na.exclude(comp.n[pos.n+1,])) } # set y-axis scaling dens.max<-max(sapply(0:max.dose, fooY)) }##EndOfIf::first cycle settings # override y-axis scaling if weights are used if(pdf.weight==TRUE){ sapply.temp<- list() for(b in 1:max(n.components)){ # draw random values of the ND to check for NA values comp.nd.n<- sort(rnorm(n = length(object@data$data[,1]), mean = comp.n[pos.n[b],i], sd = comp.n[pos.n[b]+1,i])) # proceed if no NA values occurred if(length(comp.nd.n)!=0) { # weight - proportion of the component wi.temp<- comp.n[pos.n[b]+2,i] fooT<- function(x) { dnorm(x, mean = comp.n[pos.n[b],i], sd = if(pdf.sigma=="se"){comp.n[pos.n[b]+1,i]} else{if(pdf.sigma=="sigmab"){comp.n[pos.n[b],i]*sigmab}} )*wi.temp } sapply.temp[[b]]<- sapply(0:max.dose, fooT) } } dens.max<- max(Reduce('+', sapply.temp)) } # calculate density values for 0 to maximum dose sapply<- sapply(0:max.dose, fooX) # save density values in list for sum curve of gaussians sapply.storage[[j]]<- sapply ## determine axis scaling # x-axis (dose) if("dose.scale" %in% names(extraArgs)) { y.scale<- extraArgs$dose.scale } else { y.scale<- c(min.dose,max.dose) } # y-axis (density) if("pdf.scale" %in% names(extraArgs)) { x.scale<- extraArgs$pdf.scale } else { x.scale<- dens.max*1.1 } ## PLOT Normal Distributions par(new=TRUE) plot(sapply, 1:length(sapply)-1, type="l", yaxt="n", xaxt="n", col=col.n[j], lwd=1, ylim=y.scale, xlim=c(0,x.scale), xaxs="i", yaxs="i", ann=FALSE, xpd = FALSE) # draw coloured polygons under curve polygon(x=c(min(sapply), sapply, min(sapply)), y=c(0, 0:max.dose, 0), col = adjustcolor(col.n[j], alpha.f = 0.66), yaxt="n", border=poly.border, xpd = FALSE, lty = 2, lwd = 1.5) } }##EndOf::Component loop #turn warnings on again options(warn=0) # Add sum of Gaussian curve par(new = TRUE) plot(Reduce('+', sapply.storage),1:length(sapply)-1, type="l", yaxt="n", xaxt="n", col="black", lwd=1.5, lty = 1, ylim=y.scale, xlim=c(0,x.scale), xaxs="i", yaxs="i", ann=FALSE, xpd = FALSE) # draw additional info during first k-cycle if(i == 1) { # plot title mtext("Normal distributions", side = 3, font = 2, line = 0, adj = 0, cex = 0.8) # main title mtext(main, side = 3, font = 2, line = 3.5, adj = 0.5, at = grconvertX(0.5, from = "ndc", to = "user")) # subtitle mtext(as.expression(bquote(italic(sigma[b]) == .(sigmab) ~ "|" ~ n == .(length(object@data$data[,1])))), side = 3, font = 1, line = 2.2, adj = 0.5, at = grconvertX(0.5, from = "ndc", to = "user"), cex = 0.9) # x-axis label mtext("Density [a.u.]", side = 1, line = 0.5, adj = 0.5, at = grconvertX(0.5, from = "ndc", to = "user")) # draw y-axis with proper labels axis(side=2, labels = TRUE) } if(pdf.colors == "colors") { # create legend labels dose.lab.legend<- paste("c", 1:n.components[length(n.components)], sep="") if(max(n.components)>8) { ncol.temp<- 8 yadj<- 1.025 } else { ncol.temp<- max(n.components) yadj<- 0.93 } # add legend if(i==n.plots) { legend(grconvertX(0.55, from = "ndc", to = "user"), grconvertY(yadj, from = "ndc", to = "user"), legend = dose.lab.legend, col = col.n[1:max(n.components)], pch = 15, adj = c(0,0.2), pt.cex=1.4, bty = "n", ncol=ncol.temp, x.intersp=0.4) mtext("Components: ", cex = 0.8, at = grconvertX(0.5, from = "ndc", to = "user")) } } }##EndOf::k-loop and Plot 1 ##-------------------------------------------------------------------------- ## PLOT 2: PROPORTION OF COMPONENTS if(plot.proportions==TRUE) { # margins for second plot par(mar=c(2,0,2,0)) # create matrix with proportions from a subset of the summary matrix prop.matrix<- comp.n[pos.n+2,]*100 # stacked barplot of proportions without x-axis barplot(prop.matrix, width=1, xlim=c(0.2, length(n.components)-0.2), ylim=c(0,100), axes=TRUE, space=0, col=col.n, xpd=FALSE, xaxt="n") # y-axis label mtext("Proportion [%]", side=2,line=3, cex=1) # add x-axis with corrected tick positions axis(side = 1, labels = n.components, at = n.components+0.5-n.components[1]) # draw a box (not possible with barplot()) box(lty=1, col="black") # add subtitle mtext("Proportion of components", side = 3, font = 2, line = 0, adj = 0, cex = 0.8) } ##-------------------------------------------------------------------------- ## PLOT 3: BIC & LLIK # margins for third plot par(mar=c(2,0,2,0)) # prepare scaling for both y-axes BIC.scale<- c(min(BIC.n)*if(min(BIC.n)<0){1.2}else{0.8}, max(BIC.n)*if(max(BIC.n)<0){0.8}else{1.2}) LLIK.scale<- c(min(LLIK.n)*if(min(LLIK.n)<0){1.2}else{0.8}, max(LLIK.n)*if(max(LLIK.n)<0){0.8}else{1.2}) # plot BIC scores plot(n.components, BIC.n, main= "", type="b", pch=22, cex=1.5, xlim=c(min(n.components)-0.2, max(n.components)+0.2), ylim=BIC.scale, xaxp=c(min(n.components), max(n.components), length(n.components)-1), xlab=expression(paste(italic(k), " Components")), ylab=expression(paste("BIC")), cex.lab=1.25) # following plot should be added to previous par(new = TRUE) # plot LLIK estimates plot(n.components, LLIK.n, xlim=c(min(n.components)-0.2, max(n.components)+0.2), xaxp=c(min(n.components), max(n.components), length(n.components)-1), ylim=LLIK.scale, yaxt="n", type="b", pch=16, xlab="", ylab="", lty=2, cex = 1.5) # subtitle mtext("Statistical criteria", side = 3, font = 2, line = 0, adj = 0, cex = 0.8) # second y-axis with proper scaling axis(side = 4, ylim=c(0,100)) # LLIK axis label mtext(bquote(italic(L)[max]), side=4,line=3, cex=1.3) # legend legend(grconvertX(0.75, from = "nfc", to = "user"), grconvertY(0.96, from = "nfc", to = "user"), legend = c("BIC", as.expression(bquote(italic(L)[max]))), pch = c(22,16), pt.bg=c("white","black"), adj = 0, pt.cex=1.3, lty=c(1,2), bty = "n", horiz = TRUE, x.intersp=0.5) ## restore previous plot parameters par(.pardefault) } }##EndOf::Case 4 - Finite Mixture Model #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ## CASE 5: Aliquot Size if(object@originator=="calc_AliquotSize") { if(!is.null(object@data$MC$estimates)) { extraArgs <- list(...) main <- if("main" %in% names(extraArgs)) { extraArgs$main } else { "Monte Carlo Simulation" } xlab <- if("xlab" %in% names(extraArgs)) { extraArgs$xlab } else { "Amount of grains on aliquot" } # extract relevant data MC.n<- object@data$MC$estimates MC.n.kde<- object@data$MC$kde MC.stats<- object@data$MC$statistics MC.q<- object@data$MC$quantile MC.iter<- object@data$args$MC.iter # set layout of plotting device layout(matrix(c(1,1,2)),2,1) par(cex = 0.8) ## plot MC estimate distribution # set margins (bottom, left, top, right) par(mar=c(2,5,5,3)) # plot histogram hist(MC.n, freq=FALSE, col = "gray90", main="", xlab=xlab, xlim = c(min(MC.n)*0.95, max(MC.n)*1.05), ylim = c(0, max(MC.n.kde$y)*1.1)) # add rugs to histogram rug(MC.n) # add KDE curve lines(MC.n.kde, col = "black", lwd = 1) # add mean, median and quantils (0.05,0.95) abline(v=c(MC.stats$mean, MC.stats$median, MC.q), lty=c(2, 4, 3,3), lwd = 1) # add main- and subtitle mtext(main, side = 3, adj = 0.5, line = 3, cex = 1) mtext(as.expression(bquote(italic(n) == .(MC.iter) ~ "|" ~ italic(hat(mu)) == .(round(MC.stats$mean)) ~ "|" ~ italic(hat(sigma)) == .(round(MC.stats$sd.abs)) ~ "|" ~ italic(frac(hat(sigma),sqrt(n))) == .(round(MC.stats$se.abs)) ~ "|" ~ italic(v) == .(round(MC.stats$skewness, 2)) ) ), side = 3, line = 0.3, adj = 0.5, cex = 0.9) # add legend legend("topright", legend = c("mean","median", "0.05 / 0.95 quantile"), lty = c(2, 4, 3), bg = "white", box.col = "white", cex = 0.9) ## BOXPLOT # set margins (bottom, left, top, right) par(mar=c(5,5,0,3)) plot(NA, type="n", xlim=c(min(MC.n)*0.95, max(MC.n)*1.05), xlab=xlab, ylim=c(0.5,1.5), xaxt="n", yaxt="n", ylab="") par(bty="n") boxplot(MC.n, horizontal = TRUE, add = TRUE, bty="n") } else { on.exit(NULL) } }#EndOf::Case 5 - calc_AliqoutSize() #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ## CASE 6: calc_SourceDoseRate() if(object@originator=="calc_SourceDoseRate") { ##prepare data ##get data df <- get_RLum(object = object, data.object = "dose.rate") ##reduce the size for plotting, more than 100 points makes no sense if(nrow(df)>100) { df <- df[seq(1,nrow(df), length = 100),] } ##plot settings plot.settings <- list( main = "Source Dose Rate Prediction", xlab = "Date", ylab = paste0( "Dose rate/(",get_RLum(object = object, data.object = "parameters")$dose.rate.unit,")"), log = "", cex = 1, xlim = NULL, ylim = c(min(df[,1]) - max(df[,2]), max(df[,1]) + max(df[,2])), pch = 1, mtext = paste0( "source type: ", get_RLum(object = object, data.object = "parameters")$source.type, " | ", "half-life: ", get_RLum(object = object, data.object = "parameters")$halflife, " a" ), grid = expression(nx = 10, ny = 10), col = 1, type = "b", lty = 1, lwd = 1, segments = "" ) ##modify list if something was set plot.settings <- modifyList(plot.settings, list(...)) ##plot plot( df[,3], df[,1], main = plot.settings$main, xlab = plot.settings$xlab, ylab = plot.settings$ylab, xlim = plot.settings$xlim, ylim = plot.settings$ylim, log = plot.settings$log, pch = plot.settings$pch, col = plot.settings$pch, type = plot.settings$type, lty = plot.settings$lty, lwd = plot.settings$lwd ) if(!is.null(plot.settings$segments)){ segments( x0 = df[,3], y0 = df[,1] + df[,2], x1 = df[,3], y1 = df[,1] - df[,2] ) } mtext(side = 3, plot.settings$mtext) if(!is.null(plot.settings$grid)){ grid(eval(plot.settings$grid)) } }#EndOf::Case 6 - calc_SourceDoseRate() #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# ## CASE 7: Fast Ratio if (object@originator=="calc_FastRatio") { # graphical settings settings <- list(main = "Fast Ratio", xlab = "t/s", ylab = "Signal/cts", type = "b", log = "", pch = 16, cex = 1.0, col = "black") settings <- modifyList(settings, list(...)) par(cex = settings$cex) # fetch data from RLum.Results object curve <- get_RLum(object, "data") if (inherits(curve, "RLum.Data.Curve")) curve <- get_RLum(curve) res <- get_RLum(object, "summary") fit <- get_RLum(object, "fit") # calculate the dead channel time offset offset <- res$dead.channels.start * res$channel.width # plot the OSL curve plot(curve, type = "n", main = settings$main, xlab = settings$xlab, ylab = settings$ylab, log = settings$log) # plot points to show measured data points (i.e., the channels) if (settings$type == "p" || settings$type == "b") points(curve[(res$dead.channels.start + 1):(nrow(curve) - res$dead.channels.end), ], pch = settings$pch, col = settings$col) # plot dead channels as empty circles if (res$dead.channels.start > 0) points(curve[1:res$dead.channels.start,]) if (res$dead.channels.end > 0) points(curve[(nrow(curve) - res$dead.channels.end):nrow(curve), ]) if (settings$type == "l" || settings$type == "b") lines(curve, col = settings$col) # optional: plot fitted CW curve if (!is.null(fit)) { nls.fit <- get_RLum(fit, "fit") if (!inherits(fit, "try-error") & "fitCW.curve" %in% names(object@data$args)) { if (object@data$args$fitCW.curve == "T" | object@data$args$fitCW.curve == TRUE) { lines(curve[(res$dead.channels.start + 1):(nrow(curve) - res$dead.channels.end), 1], predict(nls.fit), col = "red", lty = 1) ##plot curve for additional parameters col_components <- c("red", "green", "blue") for (i in 1:3) { if (!is.na(fit@data$data[[paste0("I0", i)]])) curve(fit@data$data[[paste0("I0", i)]] * fit@data$data[[paste0("lambda", i)]] * exp(-fit@data$data[[paste0("lambda", i)]] * x), lwd = 1, lty = 4, add = TRUE, col = col_components[i]) } } } } # add vertical lines and labels for L1, L2, L3 L_times <- c(curve[res$Ch_L1, 1], curve[res$Ch_L2, 1], curve[res$Ch_L3_start, 1], curve[res$Ch_L3_end, 1]) + offset abline(v = L_times, lty = 2) text(L_times, max(curve[ ,2]) * 0.95, pos = c(4,4,2,2), labels = expression('L'[1], 'L'[2], 'L'[3['start']], 'L'[3['end']])) }#EndOf::Case7 - calc_FastRatio() } Luminescence/R/github.R0000644000176200001440000001455314367174002014506 0ustar liggesusers# ------------------------------------------------------------------------ # Author: Christoph Burow # Affiliation: University of Cologne # Date: 15/01/2019 # API version: v3 # Reference: https://docs.github.com/v3/ # ------------------------------------------------------------------------ #' GitHub API #' #' R Interface to the GitHub API v3. #' #' These functions can be used to query a specific repository hosted on GitHub. \cr #' #' #' @param user [character] (*with default*): #' GitHub user name (defaults to `'r-lum'`). #' #' @param repo [character] (*with default*): #' name of a GitHub repository (defaults to `'luminescence'`). #' #' @param branch [character] (*with default*): #' branch of a GitHub repository (defaults to `'master'`). #' #' @param n [integer] (*with default*): #' number of commits returned (defaults to 5). #' #' @param verbose [logical] (*with default*): #' print the output to the console (defaults to `TRUE`). #' #' @author Christoph Burow, University of Cologne (Germany) #' #' @section Function version: 0.1.0 #' #' @references #' #' GitHub Developer API v3. [https://docs.github.com/v3/](), last accessed: 10/01/2017. #' #' @examples #' #' \dontrun{ #' github_branches(user = "r-lum", repo = "luminescence") #' github_issues(user = "r-lum", repo = "luminescence") #' github_commits(user = "r-lum", repo = "luminescence", branch = "master", n = 10) #' } #' #' @md #' @name GitHub-API NULL # COMMITS ----------------------------------------------------------------- #' @rdname GitHub-API #' #' @details #' `github_commits` lists the most recent `n` commits of a specific branch of a repository. #' #' @return #' `github_commits`: [data.frame] with columns: #' #' \tabular{ll}{ #' `[ ,1]` \tab SHA \cr #' `[ ,2]` \tab AUTHOR \cr #' `[ ,3]` \tab DATE \cr #' `[ ,4]` \tab MESSAGE \cr #' } #' #' @md #' @export github_commits <- function(user = "r-lum", repo = "luminescence", branch = "master", n = 5) { # fetch available branches and check if provided branch exists branches <- github_branches(user, repo) if (!any(grepl(branch, branches$BRANCH))) stop("Branch ", branch, " does not exist.", call. = FALSE) # build URL and retrieve content sha <- branches$SHA[grep(paste0("^", branch, "$"), branches$BRANCH)] url <- paste0("https://api.github.com/repos/", user, "/", repo, "/commits?", "per_page=", n, "&sha=", sha) content <- github_getContent(url) # format output as data.frame output <- do.call(rbind, lapply(content, function(x) { data.frame(SHA = x$sha, AUTHOR = x$commit$author$name, DATE = x$commit$author$date, MESSAGE = x$commit$message, stringsAsFactors = FALSE) })) return(output) } # BRANCHES ---------------------------------------------------------------- #' @rdname GitHub-API #' #' @details #' `github_branches` can be used to list all current branches of a #' repository and returns the corresponding SHA hash as well as an installation #' command to install the branch in R via the 'devtools' package. #' #' @return #' `github_branches`: [data.frame] with columns: #' #' \tabular{ll}{ #' `[ ,1]` \tab BRANCH \cr #' `[ ,2]` \tab SHA \cr #' `[ ,3]` \tab INSTALL \cr #' } #' #' @md #' @export github_branches <- function(user = "r-lum", repo = "luminescence") { # build URL and retrieve content url <- paste0("https://api.github.com/repos/", user, "/", repo, "/branches") content <- github_getContent(url) # extract relevant information from server response branches <- sapply(content, function(x) x$name) sha <- sapply(content, function(x) x$commit$sha) # format output as data.frame output <- data.frame( BRANCH = branches, SHA = sha, INSTALL = paste0("devtools::install_github('r-lum/luminescence@", branches, "')"), stringsAsFactors = FALSE ) return(output) } # ISSUES ------------------------------------------------------------------ #' @rdname GitHub-API #' #' @details #' `github_issues` lists all open issues for a repository in valid YAML. #' #' @return #' `github_commits`: Nested [list] with `n` elements. #' Each commit element is a list with elements: #' #' \tabular{ll}{ #' `[[1]]` \tab NUMBER \cr #' `[[2]]` \tab TITLE \cr #' `[[3]]` \tab BODY \cr #' `[[4]]` \tab CREATED \cr #' `[[5]]` \tab UPDATED \cr #' `[[6]]` \tab CREATOR \cr #' `[[7]]` \tab URL \cr #' `[[8]]` \tab STATUS \cr #' } #' #' @md #' @export github_issues <- function(user = "r-lum", repo = "luminescence", verbose = TRUE) { # build URL and retrieve content url <- paste0("https://api.github.com/repos/", user,"/", repo, "/issues") content <- github_getContent(url) # format output as nested list issues <- lapply(content, function(x) { list( NUMBER = x$number, TITLE = x$title, BODY = gsub("\n", "", x$body), CREATED = x$created_at, UPDATED = x$updated_at, CREATOR = x$user$login, URL = x$url, STATUS = x$state, MILESTONE = x$milestone$title) }) # custom printing of the the issues-list as print.list produces unreadable # console output if (verbose) { tmp <- lapply(issues, function(x) { # limit width of description text DESCRIPTION <- "" for (i in seq_len(ceiling(nchar(x$BODY) / 100))) DESCRIPTION <- paste(DESCRIPTION, " ", substr(x$BODY, i*100-99, i*100), "\n") # print to console in valid YAML cat(paste0("---\n", 'title: "', x$TITLE, '"', "\n", "number: ", x$NUMBER, "\n", 'url: "', x$URL, '"', "\n", "created: ", x$CREATED, "\n", "updated: ", x$UPDATED, "\n", "creator: ", x$CREATOR, "\n", "status: ", x$STATUS, "\n", 'milestone: "', x$MILESTONE, '"', "\n", "description: >\n", DESCRIPTION, "\n\n\n")) }) } # return invisible as we explicitly print the output invisible(issues) } # HELPER ------------------------------------------------------------------ # This function queries the URL, checks the server response and returns # the content. github_getContent <- function(url) { response <- GET(url, accept_json()) if (status_code(response) != 200) stop("Contacting ", url, " had status code ", status_code(response), call. = FALSE) content <- content(response) return(content) } Luminescence/R/analyse_SAR.CWOSL.R0000644000176200001440000016243514521207352016253 0ustar liggesusers#' @title Analyse SAR CW-OSL measurements #' #' @description The function performs a SAR CW-OSL analysis on an #' [RLum.Analysis-class] object including growth curve fitting. #' #' @details #' The function performs an analysis for a standard SAR protocol measurements #' introduced by Murray and Wintle (2000) with CW-OSL curves. For the #' calculation of the `Lx/Tx` value the function [calc_OSLLxTxRatio] is #' used. For **changing the way the Lx/Tx error is calculated** use the argument #' `background.count.distribution` and `sigmab`, which will be passed to the function #' [calc_OSLLxTxRatio]. #' #' **What is part of a SAR sequence?** #' #' The function is rather picky when it comes down to accepted curve input (OSL,IRSL,...) and structure. #' A SAR sequence is basically a set of \eqn{L_{x}/T_{x}} curves. Hence, every 2nd curve #' is considered a shine-down curve related to the test dose. It also means that the number of #' curves for \eqn{L_{x}} has to be equal to the number of \eqn{T_{x}} curves, and that #' hot-bleach curves **do not** belong into a SAR sequence; at least not for the analysis. #' Other curves allowed and processed are preheat curves, or preheat curves measured as TL, and #' irradiation curves. The later one indicates the duration of the irradiation, the #' dose and test dose points, e.g., as part of XSYG files. #' #' **Argument `object` is of type `list`** #' #' If the argument `object` is of type [list] containing **only** #' [RLum.Analysis-class] objects, the function re-calls itself as often as elements #' are in the list. This is useful if an entire measurement wanted to be analysed without #' writing separate for-loops. To gain in full control of the parameters (e.g., `dose.points`) for #' every aliquot (corresponding to one [RLum.Analysis-class] object in the list), in #' this case the arguments can be provided as [list]. This `list` should #' be of similar length as the `list` provided with the argument `object`, #' otherwise the function will create an own list of the requested length. #' Function output will be just one single [RLum.Results-class] object. #' #' Please be careful when using this option. It may allow a fast an efficient data analysis, but #' the function may also break with an unclear error message, due to wrong input data. #' #' **Working with IRSL data** #' #' The function was originally designed to work just for 'OSL' curves, #' following the principles of the SAR protocol. An IRSL measurement protocol #' may follow this procedure, e.g., post-IR IRSL protocol (Thomsen et al., #' 2008). Therefore this functions has been enhanced to work with IRSL data, #' however, the function is only capable of analysing curves that follow the #' SAR protocol structure, i.e., to analyse a post-IR IRSL protocol, curve data #' have to be pre-selected by the user to fit the standards of the SAR #' protocol, i.e., Lx,Tx,Lx,Tx and so on. #' #' Example: Imagine the measurement contains `pIRIR50` and `pIRIR225` IRSL curves. #' Only one curve type can be analysed at the same time: The `pIRIR50` curves or #' the `pIRIR225` curves. #' #' **Supported rejection criteria** #' #' `[recycling.ratio]`: calculated for every repeated regeneration dose point. #' #' `[recuperation.rate]`: recuperation rate calculated by comparing the #' `Lx/Tx` values of the zero regeneration point with the `Ln/Tn` value (the `Lx/Tx` #' ratio of the natural signal). For methodological background see Aitken and #' Smith (1988). #' #' `[testdose.error]`: set the allowed error for the test dose, which per #' default should not exceed 10%. The test dose error is calculated as `Tx_net.error/Tx_net`. #' The calculation of the \eqn{T_{n}} error is detailed in [calc_OSLLxTxRatio]. #' #' `[palaeodose.error]`: set the allowed error for the De value, which per #' default should not exceed 10%. #' #' **Irradiation times** #' #' The function makes two attempts to extra irradiation data (dose points) #' automatically from the input object, if the argument `dose.points` was not #' set (aka set to `NULL`). #' #' 1. It searches in every curve for an info object called `IRR_TIME`. If this was set, any value #' set here is taken as dose point. #' #' 2. If the object contains curves of type `irradiation`, the function tries to #' use this information to assign these values to the curves. However, the function #' does **not** overwrite values preset in `IRR_TIME`. #' #' @param object [RLum.Analysis-class] (**required**): #' input object containing data for analysis, alternatively a [list] of #' [RLum.Analysis-class] objects can be provided. The object should contain **only** curves #' considered part of the SAR protocol (see Details.) #' #' @param signal.integral.min [integer] (**required**): #' lower bound of the signal integral. Can be a [list] of [integer]s, if `object` is #' of type [list]. If the input is vector (e.g., `c(1,2)`) the 2nd value will be interpreted #' as the minimum signal integral for the `Tx` curve. Can be set to `NA`, in this #' case no integrals are taken into account. #' #' @param signal.integral.max [integer] (**required**): #' upper bound of the signal integral. Can be a [list] of [integer]s, if `object` is #' of type [list]. If the input is vector (e.g., `c(1,2)`) the 2nd value will be interpreted #' as the maximum signal integral for the `Tx` curve. Can be set to `NA`, in this #' case no integrals are taken into account. #' #' @param background.integral.min [integer] (**required**): #' lower bound of the background integral. Can be a [list] of [integer]s, if `object` is #' of type [list]. If the input is vector (e.g., `c(1,2)`) the 2nd value will be interpreted #' as the minimum background integral for the `Tx` curve. Can be set to `NA`, in this #' case no integrals are taken into account. #' #' @param background.integral.max [integer] (**required**): #' upper bound of the background integral. Can be a [list] of [integer]s, if `object` is #' of type [list]. If the input is vector (e.g., `c(1,2)`) the 2nd value will be interpreted #' as the maximum background integral for the `Tx` curve. Can be set to `NA`, in this #' case no integrals are taken into account. #' #' @param OSL.component [character] or [integer] (*optional*): s single index #' or a [character] defining the signal component to be evaluated. #' It requires that the object was processed by `[OSLdecomposition::RLum.OSL_decomposition]`. #' This argument can either be the name of the OSL component assigned by #' `[OSLdecomposition::RLum.OSL_global_fitting]` or the index in the descending #' order of decay rates. Then `"1"` selects the fastest decaying component, `"2"` #' the second fastest and so on. Can be a [list] of [integer]s or strings (or mixed) #' If object is a [list] and this parameter is provided as [list] it alternates over #' the elements (aliquots) of the object list, e.g., `list(1,2)` processes the first #' aliquot with component `1` and the second aliquot with component `2`. #' `NULL` does not process any component. #' #' @param rejection.criteria [list] (*with default*): #' provide a *named* list and set rejection criteria in **percentage** #' for further calculation. Can be a [list] in a [list], if `object` is of type [list]. #' Note: If an *unnamed* [list] is provided the new settings are ignored! #' #' Allowed arguments are `recycling.ratio`, `recuperation.rate`, #' `palaeodose.error`, `testdose.error` and `exceed.max.regpoint = TRUE/FALSE`. #' Example: `rejection.criteria = list(recycling.ratio = 10)`. #' Per default all numerical values are set to 10, `exceed.max.regpoint = TRUE`. #' Every criterion can be set to `NA`. In this value are calculated, but not considered, i.e. #' the RC.Status becomes always `'OK'` #' #' @param dose.points [numeric] (*optional*): #' a numeric vector containing the dose points values. Using this argument #' overwrites dose point values extracted from other data. Can be a [list] of #' [numeric] vectors, if `object` is of type [list] #' #' @param mtext.outer [character] (*optional*): #' option to provide an outer margin `mtext`. Can be a [list] of [character]s, #' if `object` is of type [list] #' #' @param plot [logical] (*with default*): enables or disables plot output. #' #' @param plot_onePage [logical] (*with default*): enables or disables on page plot output #' #' @param plot.single [logical] (*with default*) or [numeric] (*optional*): #' single plot output (`TRUE/FALSE`) to allow for plotting the results in single plot windows. #' If a [numeric] vector is provided the plots can be selected individually, i.e. #' `plot.single = c(1,2,3,4)` will plot the TL and Lx, Tx curves but not the legend (5) or the #' growth curve (6), (7) and (8) belong to rejection criteria plots. Requires #' `plot = TRUE`. #' #' @param onlyLxTxTable [logical] (with default): If `TRUE` the dose response #' curve fitting and plotting is skipped. #' This allows to get hands on the `Lx/Tx` table for large datasets #' without the need for a curve fitting. #' #' @param ... further arguments that will be passed to the function #' [plot_GrowthCurve] or [calc_OSLLxTxRatio] #' (supported: `background.count.distribution`, `sigmab`, `sig0`). #' **Please note** that if you consider to use the early light subtraction #' method you should provide your own `sigmab` value! # #' @return #' A plot (*optional*) and an [RLum.Results-class] object is #' returned containing the following elements: #' #' \item{data}{[data.frame] containing De-values, De-error and further parameters} #' \item{LnLxTnTx.values}{[data.frame] of all calculated Lx/Tx values including signal, #' background counts and the dose points} #' \item{rejection.criteria}{[data.frame] with values that might by used as rejection criteria. #' `NA` is produced if no R0 dose point exists.} #' \item{Formula}{[formula] formula that have been used for the growth curve fitting} #' #' The output should be accessed using the function [get_RLum]. #' #' @note #' This function must not be mixed up with the function #' [Analyse_SAR.OSLdata], which works with #' [Risoe.BINfileData-class] objects. #' #' **The function currently does support only 'OSL', 'IRSL' and 'POSL' data!** #' #' @section Function version: 0.9.14 #' #' @author Sebastian Kreutzer, Geography & Earth Sciences, Aberystwyth University #' (United Kingdom) #' #' @seealso [calc_OSLLxTxRatio], [plot_GrowthCurve], [RLum.Analysis-class], #' [RLum.Results-class], [get_RLum] #' #' @references #' Aitken, M.J. and Smith, B.W., 1988. Optical dating: recuperation #' after bleaching. Quaternary Science Reviews 7, 387-393. #' #' Duller, G., 2003. Distinguishing quartz and feldspar in single grain #' luminescence measurements. Radiation Measurements, 37 (2), 161-165. #' #' Murray, A.S. and Wintle, A.G., 2000. Luminescence dating of quartz using an #' improved single-aliquot regenerative-dose protocol. Radiation Measurements #' 32, 57-73. #' #' Thomsen, K.J., Murray, A.S., Jain, M., Boetter-Jensen, L., 2008. Laboratory #' fading rates of various luminescence signals from feldspar-rich sediment #' extracts. Radiation Measurements 43, 1474-1486. #' doi:10.1016/j.radmeas.2008.06.002 #' #' @keywords datagen plot #' #' @examples #' #' ##load data #' ##ExampleData.BINfileData contains two BINfileData objects #' ##CWOSL.SAR.Data and TL.SAR.Data #' data(ExampleData.BINfileData, envir = environment()) #' #' ##transform the values from the first position in a RLum.Analysis object #' object <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos=1) #' #' ##perform SAR analysis and set rejection criteria #' results <- analyse_SAR.CWOSL( #' object = object, #' signal.integral.min = 1, #' signal.integral.max = 2, #' background.integral.min = 900, #' background.integral.max = 1000, #' log = "x", #' fit.method = "EXP", #' rejection.criteria = list( #' recycling.ratio = 10, #' recuperation.rate = 10, #' testdose.error = 10, #' palaeodose.error = 10, #' exceed.max.regpoint = TRUE) #') #' #' ##show De results #' get_RLum(results) #' #' ##show LnTnLxTx table #' get_RLum(results, data.object = "LnLxTnTx.table") #' #' @md #' @export analyse_SAR.CWOSL<- function( object, signal.integral.min = NA, signal.integral.max = NA, background.integral.min = NA, background.integral.max = NA, OSL.component = NULL, rejection.criteria = list(), dose.points = NULL, mtext.outer = "", plot = TRUE, plot_onePage = FALSE, plot.single = FALSE, onlyLxTxTable = FALSE, ... ) { # SELF CALL ----------------------------------------------------------------------------------- if(is.list(object)){ ##clean object input and expand parameters object <- .rm_nonRLum(object) parm <- .expand_parameters(length(object)) ##handle main separately if("main"%in% names(list(...))){ if(inherits(list(...)$main, "list")){ main <- rep(list(...)$main,length = length(object)) }else{ main <- rep(as.list(list(...)$main),length = length(object)) } }else{ main <- as.list(paste0("ALQ #",1:length(object))) } results <- merge_RLum(lapply(1:length(object), function(x){ analyse_SAR.CWOSL( object = object[[x]], signal.integral.min = parm$signal.integral.min[[x]], signal.integral.max = parm$signal.integral.max[[x]], background.integral.min = parm$background.integral.min[[x]], background.integral.max = parm$background.integral.max[[x]], OSL.component = parm$OSL.component[[x]], dose.points = parm$dose.points[[x]], mtext.outer = parm$mtext.outer[[x]], plot = parm$plot[[x]], rejection.criteria = parm$rejection.criteria[[x]], plot.single = parm$plot.single[[x]], plot_onePage = parm$plot_onePage[[x]], onlyLxTxTable = parm$onlyLxTxTable[[x]], main = main[[x]], ...) })) ##return ##DO NOT use invisible here, this will prevent the function from stopping if(length(results) == 0) return(NULL) return(results) } # CONFIG ----------------------------------------------------------------- ##set error list, this allows to set error messages without breaking the function error.list <- list() # General Integrity Checks --------------------------------------------------- ##MISSING INPUT if(!inherits(object, "RLum.Analysis")) stop("[analyse_SAR.CWOSL()] Input object is not of type 'RLum.Analysis'!", call. = FALSE) ##skip all those tests if signal integral is NA if(any(is.na(c(signal.integral.min, signal.integral.max, background.integral.min, background.integral.max)))){ signal.integral <- background.integral <- NA signal.integral.Tx <- background.integral.Tx <- NULL warning("[analyse_SAR.CWOSL()] No signal or background integral applied, because they were set to NA!", call. = FALSE) } else { ##build signal and background integrals signal.integral <- c(signal.integral.min[1]:signal.integral.max[1]) background.integral <- c(background.integral.min[1]:background.integral.max[1]) ##account for the case that Lx and Tx integral differ if (length(signal.integral.min) == 2 & length(signal.integral.max) == 2) { signal.integral.Tx <- c(signal.integral.min[2]:signal.integral.max[2]) }else{ signal.integral.Tx <- NULL } if (length(background.integral.min) == 2 & length(background.integral.max) == 2) { background.integral.Tx <- c(background.integral.min[2]:background.integral.max[2]) }else{ background.integral.Tx <- NULL } ##Account for the case that the use did not provide everything ... if(is.null(signal.integral.Tx) & !is.null(background.integral.Tx)){ signal.integral.Tx <- signal.integral warning("[analyse_SAR.CWOSL()] background integral for Tx curves set, but not for the signal integral; signal integral for Tx automatically set.") } if(!is.null(signal.integral.Tx) & is.null(background.integral.Tx)){ background.integral.Tx <- background.integral warning("[analyse_SAR.CWOSL()] signal integral for Tx curves set, but not for the background integral; background integral for Tx automatically set.") } ##INTEGRAL LIMITS if(!is(signal.integral, "integer") | !is(background.integral, "integer")){ stop("[analyse_SAR.CWOSL()] 'signal.integral' or 'background.integral' is not of type integer!", call. = FALSE) } } ## try to extract the correct curves for the sequence based on allowed curve types and ## the curve type used most frequently ## now remove all non-allowed curves CWcurve.type <- regmatches( x = names(object), m = regexpr("(OSL[a-zA-Z]*|IRSL[a-zA-Z]*|POSL[a-zA-Z]*)", names(object), perl = TRUE)) if(length(CWcurve.type) == 0) { try(stop("[analyse_SAR.CWOSL()] No record of type 'OSL', 'IRSL', 'POSL' detected! NULL returned.", call. = FALSE)) return(NULL) } ## now get the type which is used most CWcurve.type <- names(which.max(table(CWcurve.type))) # Rejection criteria ------------------------------------------------------ if(is.null(rejection.criteria) || class(rejection.criteria)[1] != "list") rejection.criteria <- list() ##set list rejection.criteria <- modifyList(x = list( recycling.ratio = 10, recuperation.rate = 10, palaeodose.error = 10, testdose.error = 10, exceed.max.regpoint = TRUE ), val = rejection.criteria, keep.null = TRUE) # Deal with extra arguments ---------------------------------------------------- ##deal with addition arguments extraArgs <- list(...) main <- if("main" %in% names(extraArgs)) extraArgs$main else "" log <- if("log" %in% names(extraArgs)) extraArgs$log else "" cex <- if("cex" %in% names(extraArgs)) extraArgs$cex else 1 background.count.distribution <- if ("background.count.distribution" %in% names(extraArgs)) { extraArgs$background.count.distribution } else { "non-poisson" } sigmab <- if("sigmab" %in% names(extraArgs)) extraArgs$sigmab else NULL sig0 <- if("sig0" %in% names(extraArgs)) extraArgs$sig0 else 0 # Protocol Integrity Checks -------------------------------------------------- ##check overall structure of the object ##every SAR protocol has to have equal number of curves ##grep curve types from analysis value and remove unwanted information temp.ltype <- sapply(1:length(object@records), function(x) { ##export as global variable object@records[[x]]@recordType <<- gsub(" .*", "", object@records[[x]]@recordType) object@records[[x]]@recordType }) ##FI lexsyg devices provide irradiation information in a separate curve if(any("irradiation" %in% temp.ltype)){ ##grep irradiation times temp.irradiation <- extract_IrradiationTimes(object)@data$irr.times[["IRR_TIME"]] ##write this into the records for(i in 1:length(object@records)){ if(is.null(object@records[[i]]@info$IRR_TIME)) object@records[[i]]@info <- c(object@records[[i]]@info, IRR_TIME = temp.irradiation[i]) } ## remove irradiation curves object <- get_RLum(object, record.id = c(!temp.ltype %in% "irradiation"), drop = FALSE) } ##check if the wanted curves are a multiple of two ##gsub removes unwanted information from the curves if(table(temp.ltype)[CWcurve.type]%%2!=0){ error.list[[1]] <- "[analyse_SAR.CWOSL()] Input OSL/IRSL curves are not a multiple of two." } ##check if the curve lengths differ temp.matrix.length <- unlist(sapply(1:length(object@records), function(x) { if(object@records[[x]]@recordType==CWcurve.type){ length(object@records[[x]]@data[,1]) } })) if(length(unique(temp.matrix.length))!=1){ error.list[[2]] <- "[analyse_SAR.CWOSL()] Input curves lengths differ." } ##just proceed if error list is empty if (length(error.list) == 0) { ##check background integral if (!all(is.na(signal.integral)) && max(signal.integral) == min(signal.integral)) { signal.integral <- c(min(signal.integral) : (max(signal.integral) + 1)) warning("[analyse_SAR.CWOSL()] integral signal limits cannot be equal, reset automatically!") } ##background integral should not be longer than curve channel length if (!all(is.na(background.integral)) && max(background.integral) == min(background.integral)) { background.integral <- c((min(background.integral) - 1) : max(background.integral)) } if (!all(is.na(background.integral)) && max(background.integral) > temp.matrix.length[1]) { background.integral <- c((temp.matrix.length[1] - length(background.integral)):temp.matrix.length[1]) ##prevent that the background integral becomes negative if(min(background.integral) < max(signal.integral)){ background.integral <- c((max(signal.integral) + 1):max(background.integral)) } warning("[analyse_SAR.CWOSL()] Background integral out of bounds. Set to: c(", min(background.integral),":", max(background.integral),")", call. = FALSE ) } ##Do the same for the Tx-if set if (!is.null(background.integral.Tx)) { if (max(background.integral.Tx) == min(background.integral.Tx)) { background.integral.Tx <- c((min(background.integral.Tx) - 1) : max(background.integral.Tx)) } if (max(background.integral.Tx) > temp.matrix.length[2]) { background.integral.Tx <- c((temp.matrix.length[2] - length(background.integral.Tx)):temp.matrix.length[2]) ##prevent that the background integral becomes negative if (min(background.integral.Tx) < max(signal.integral.Tx)) { background.integral.Tx <- c((max(signal.integral.Tx) + 1):max(background.integral.Tx)) } warning( "Background integral for Tx out of bounds. Set to: c(", min(background.integral.Tx), ":", max(background.integral.Tx), ")" ) } } # Grep Curves ------------------------------------------------------------- ##grep relevant curves from RLum.Analyis object OSL.Curves.ID <- get_RLum(object, recordType = CWcurve.type, get.index = TRUE) ##separate curves by Lx and Tx (it makes it much easier) OSL.Curves.ID.Lx <- OSL.Curves.ID[seq(1,length(OSL.Curves.ID),by = 2)] OSL.Curves.ID.Tx <- OSL.Curves.ID[seq(2,length(OSL.Curves.ID),by = 2)] ##get index of TL curves TL.Curves.ID <- suppressWarnings(get_RLum(object, recordType = "TL$", get.index = TRUE)) ##separate TL curves which is always coming before the OSL curve ##Note: we do not check anymore whether the sequence makes sense. TL.Curves.ID.Lx <- TL.Curves.ID[TL.Curves.ID%in%(OSL.Curves.ID.Lx - 1)] TL.Curves.ID.Tx <- TL.Curves.ID[TL.Curves.ID%in%(OSL.Curves.ID.Tx - 1)] # Calculate LnLxTnTx values -------------------------------------------------- ##calculate LxTx values using external function LnLxTnTx <- try(lapply(seq(1,length(OSL.Curves.ID),by = 2), function(x){ if(!is.null(OSL.component) && length(OSL.component) > 0){ temp.LnLxTnTx <- get_RLum( calc_OSLLxTxDecomposed( Lx.data = object@records[[OSL.Curves.ID[x]]]@info$COMPONENTS, Tx.data = object@records[[OSL.Curves.ID[x + 1]]]@info$COMPONENTS, OSL.component = OSL.component, digits = 4, sig0 = sig0)) } else { temp.LnLxTnTx <- get_RLum( calc_OSLLxTxRatio( Lx.data = object@records[[OSL.Curves.ID[x]]]@data, Tx.data = object@records[[OSL.Curves.ID[x + 1]]]@data, signal.integral = signal.integral, signal.integral.Tx = signal.integral.Tx, background.integral = background.integral, background.integral.Tx = background.integral.Tx, background.count.distribution = background.count.distribution, sigmab = sigmab, sig0 = sig0)) } ##grep dose temp.Dose <- object@records[[OSL.Curves.ID[x]]]@info$IRR_TIME ##for the case that no information on the dose can be found if (is.null(temp.Dose)) temp.Dose <- NA temp.LnLxTnTx <- cbind(Dose = temp.Dose, temp.LnLxTnTx) }), silent = TRUE) ##this is basically for the OSL.component case to avoid that everything ##fails if something goes wrong therein if(inherits(LnLxTnTx, "try-error")){ try(stop( "[analyse_SAR.CWOSL()] Something went wrong while generating the LxTx-table. Return NULL.", call. = FALSE)) return(NULL) } ##combine LnLxTnTx <- data.table::rbindlist(LnLxTnTx) # Set regeneration points ------------------------------------------------- ##overwrite dose point manually if (!is.null(dose.points) & length(dose.points) > 0) { if (length(dose.points) != length(LnLxTnTx$Dose)) { stop("[analyse_SAR.CWOSL()] length 'dose.points' differs from number of curves.", call. = FALSE) } LnLxTnTx$Dose <- dose.points } ##check whether we have dose points at all if (is.null(dose.points) & anyNA(LnLxTnTx$Dose)) { stop("[analyse_SAR.CWOSL()] 'dose.points' contains NA values or have not been set!", call. = FALSE) } ##check whether the first OSL/IRSL curve (i.e., the Natural) has 0 dose. If not ##not, it is probably a Dose Recovery Test with the given dose that is treated as the ##unknown dose. We overwrite this value and warn the user. if (LnLxTnTx$Dose[1] != 0) { warning("[analyse_SAR.CWOSL()] The natural signal has a dose of ", LnLxTnTx$Dose[1], " s, which is indicative of a dose recovery test. The natural dose was set to 0.", call. = FALSE) LnLxTnTx$Dose[1] <- 0 } #generate unique dose id - this are also the # for the generated points temp.DoseID <- c(0:(length(LnLxTnTx$Dose) - 1)) temp.DoseName <- paste("R",temp.DoseID,sep = "") temp.DoseName <- cbind(Name = temp.DoseName,Dose = LnLxTnTx$Dose) ##set natural temp.DoseName[temp.DoseName[,"Name"] == "R0","Name"] <- "Natural" ##set R0 temp.DoseName[temp.DoseName[,"Name"] != "Natural" & temp.DoseName[,"Dose"] == 0,"Name"] <- "R0" ##correct numeration numeration of other dose points ##how many dose points do we have with 0? non.temp.zero.dose.number <- nrow(temp.DoseName[temp.DoseName[, "Dose"] != 0,]) if(length(non.temp.zero.dose.number) > 0){ temp.DoseName[temp.DoseName[,"Name"] != "Natural" & temp.DoseName[,"Name"] != "R0","Name"] <- paste0("R",c(1:non.temp.zero.dose.number)) } ##find duplicated doses (including 0 dose - which means the Natural) temp.DoseDuplicated <- duplicated(temp.DoseName[,"Dose"]) ##combine temp.DoseName temp.DoseName <- cbind(temp.DoseName,Repeated = temp.DoseDuplicated) ##correct value for R0 (it is not really repeated) temp.DoseName[temp.DoseName[,"Dose"] == 0,"Repeated"] <- FALSE ##combine in the data frame temp.LnLxTnTx <- data.frame(Name = temp.DoseName[,"Name"], Repeated = as.logical(temp.DoseName[,"Repeated"])) LnLxTnTx <- cbind(temp.LnLxTnTx,LnLxTnTx) LnLxTnTx[,"Name"] <- as.character(LnLxTnTx[,"Name"]) # Calculate Recycling Ratio ----------------------------------------------- ##Calculate Recycling Ratio if (length(LnLxTnTx[LnLxTnTx[,"Repeated"] == TRUE,"Repeated"]) > 0) { ##identify repeated doses temp.Repeated <- LnLxTnTx[LnLxTnTx[,"Repeated"] == TRUE,c("Name","Dose","LxTx")] ##find concerning previous dose for the repeated dose temp.Previous <- t(sapply(1:length(temp.Repeated[,1]),function(x) { LnLxTnTx[LnLxTnTx[,"Dose"] == temp.Repeated[x,"Dose"] & LnLxTnTx[,"Repeated"] == FALSE,c("Name","Dose","LxTx")] })) ##convert to data.frame temp.Previous <- as.data.frame(temp.Previous) ##set column names temp.ColNames <- unlist(lapply(1:length(temp.Repeated[,1]),function(x) { temp <- paste("Recycling ratio (", temp.Repeated[x,"Name"],"/", temp.Previous[temp.Previous[,"Dose"] == temp.Repeated[x,"Dose"],"Name"], ")", sep = "") return(temp[1]) })) ##Calculate Recycling Ratio RecyclingRatio <- round(as.numeric(temp.Repeated[,"LxTx"]) / as.numeric(temp.Previous[,"LxTx"]), digits = 4) ##Just transform the matrix and add column names RecyclingRatio <- t(RecyclingRatio) colnames(RecyclingRatio) <- temp.ColNames }else{ RecyclingRatio <- NA } # Calculate Recuperation Rate --------------------------------------------- ##Recuperation Rate (capable to handle multiple type of recuperation values) if (length(LnLxTnTx[LnLxTnTx[,"Name"] == "R0","Name"]) > 0) { Recuperation <- sapply(1:length(LnLxTnTx[LnLxTnTx[,"Name"] == "R0","Name"]), function(x) { round(LnLxTnTx[LnLxTnTx[,"Name"] == "R0","LxTx"][x] / LnLxTnTx[LnLxTnTx[,"Name"] == "Natural","LxTx"], digits = 4) }) ##Just transform the matrix and add column names Recuperation <- t(Recuperation) colnames(Recuperation) <- unlist(strsplit(paste( "Recuperation rate", 1:length(LnLxTnTx[LnLxTnTx[,"Name"] == "R0","Name"]), collapse = ";" ), ";")) }else{ Recuperation <- NA } # Evaluate and Combine Rejection Criteria --------------------------------- temp.criteria <- c( if(!is.null(colnames(RecyclingRatio))){ colnames(RecyclingRatio)}else{NA}, if(!is.null(colnames(Recuperation))){ colnames(Recuperation)}else{NA}) temp.value <- c(RecyclingRatio,Recuperation) temp.threshold <- c(rep( rejection.criteria$recycling.ratio / 100, length(RecyclingRatio) ), rep( rejection.criteria$recuperation.rate / 100, length(Recuperation) )) ##RecyclingRatio temp.status.RecyclingRatio <- rep("OK", length(RecyclingRatio)) if (!any(is.na(RecyclingRatio)) & !is.na(rejection.criteria$recycling.ratio)) temp.status.RecyclingRatio[abs(1 - RecyclingRatio) > (rejection.criteria$recycling.ratio / 100)] <- "FAILED" ##Recuperation if (!is.na(Recuperation)[1] & !is.na(rejection.criteria$recuperation.rate)) { temp.status.Recuperation <- sapply(1:length(Recuperation), function(x) { if (Recuperation[x] > rejection.criteria$recuperation.rate / 100) { "FAILED" } else{ "OK" } }) } else{ temp.status.Recuperation <- "OK" } # Provide Rejection Criteria for Testdose error -------------------------- testdose.error.calculated <- (LnLxTnTx$Net_TnTx.Error/LnLxTnTx$Net_TnTx)[1] testdose.error.threshold <- rejection.criteria$testdose.error / 100 if (is.na(testdose.error.calculated)) { testdose.error.status <- "FAILED" }else{ if(!is.na(testdose.error.threshold)){ testdose.error.status <- ifelse( testdose.error.calculated <= testdose.error.threshold, "OK", "FAILED" ) }else{ testdose.error.status <- "OK" } } testdose.error.data.frame <- data.frame( Criteria = "Testdose error", Value = testdose.error.calculated, Threshold = testdose.error.threshold, Status = testdose.error.status, stringsAsFactors = FALSE ) RejectionCriteria <- data.frame( Criteria = temp.criteria, Value = temp.value, Threshold = temp.threshold, Status = c(temp.status.RecyclingRatio,temp.status.Recuperation), stringsAsFactors = FALSE ) RejectionCriteria <- rbind(RejectionCriteria, testdose.error.data.frame) ##========================================================================## ##PLOTTING ##========================================================================## if (plot) { ##make sure the par settings are good after the functions stops ##Why this is so complicated? Good question, if par() is called in the ##single mode, it starts a new plot and then subsequent functions like ##analyse_pIRIRSequence() produce an odd plot output. par.default <- par()[c("oma","mar","cex", "mfrow", "mfcol")] on_exit <- function(x = par.default){ par( oma = x$oma, mar = x$mar, cex = x$cex, mfrow = x$mfrow, mfcol = x$mfcol ) } ##colours and double for plotting col <- get("col", pos = .LuminescenceEnv) # plot everyting on one page ... doing it here is much cleaner than # Plotting - one Page config ------------------------------------------------------- if(plot_onePage){ on.exit(on_exit()) plot.single <- TRUE layout(matrix( c(1, 1, 3, 3, 6, 6, 7, 1, 1, 3, 3, 6, 6, 8, 2, 2, 4, 4, 9, 9, 10, 2, 2, 4, 4, 9, 9, 10, 5, 5, 5, 5, 5, 5, 5), 5, 7, byrow = TRUE )) par(oma = c(0, 0, 0, 0), mar = c(4, 4, 3, 1), cex = cex * 0.6) } # Plotting - old way config ------------------------------------------------------- if (plot.single[1] == FALSE) { on.exit(on_exit()) layout(matrix( c(1, 1, 3, 3, 1, 1, 3, 3, 2, 2, 4, 4, 2, 2, 4, 4, 5, 5, 5, 5), 5, 4, byrow = TRUE )) par( oma = c(0,0,0,0), mar = c(4,4,3,3), cex = cex * 0.6 ) ## 1 -> TL previous LnLx ## 2 -> LnLx ## 3 -> TL previous TnTx ## 4 -> TnTx ## 5 -> Legend ## set selected curves to allow plotting of all curves plot.single.sel <- c(1,2,3,4,5,6,7,8) }else{ ##check for values in the single output of the function and convert if (!is(plot.single, "logical")) { if (!is(plot.single, "numeric")) { stop("[analyse_SAR.CWOSL()] Invalid data type for 'plot.single'.") } plot.single.sel <- plot.single }else{ plot.single.sel <- c(1,2,3,4,5,6,7,8) } } ##warning if number of curves exceed colour values if (length(col) < length(OSL.Curves.ID) / 2) { temp.message <- paste( "\n[analyse_SAR.CWOSL()] To many curves! Only the first", length(col),"curves are plotted!" ) warning(temp.message) } ##legend text legend.text <- paste(LnLxTnTx$Name,"\n(",LnLxTnTx$Dose,")", sep = "") ##get channel resolution (should be equal for all curves) resolution.OSLCurves <- round(object@records[[OSL.Curves.ID[1]]]@data[2,1] - object@records[[OSL.Curves.ID[1]]]@data[1,1], digits = 2) # Plotting TL Curves previous LnLx ---------------------------------------- ##overall plot option selection for plot.single.sel if (1 %in% plot.single.sel) { ##check if TL curves are available if (length(TL.Curves.ID.Lx) > 0) { ##It is just an approximation taken from the data resolution.TLCurves <- round(mean(diff( round(object@records[[TL.Curves.ID.Lx[[1]]]]@data[,1], digits = 1) )), digits = 1) ylim.range <- vapply(TL.Curves.ID.Lx, function(x) { range(object@records[[x]]@data[,2]) }, numeric(2)) plot( NA,NA, xlab = "T [\u00B0C]", ylab = paste("TL [cts/",resolution.TLCurves," \u00B0C]",sep = ""), xlim = c(object@records[[TL.Curves.ID.Lx[[1]]]]@data[1,1], max(object@records[[TL.Curves.ID.Lx[[1]]]]@data[,1])), ylim = c(1,max(ylim.range)), main = main, log = if (log == "y" | log == "xy") { "y" }else{ "" } ) #provide curve information as mtext, to keep the space for the header mtext(side = 3, expression(paste( "TL previous ", L[n],",",L[x]," curves",sep = "" )), cex = cex * 0.7) ##plot TL curves sapply(1:length(TL.Curves.ID.Lx) ,function(x) { lines(object@records[[TL.Curves.ID.Lx[[x]]]]@data,col = col[x]) }) }else{ plot( NA,NA,xlim = c(0,1), ylim = c(0,1), main = "", axes = FALSE, ylab = "", xlab = "" ) text(0.5,0.5, "No TL curve detected") } }#plot.single.sel # Plotting LnLx Curves ---------------------------------------------------- ##overall plot option selection for plot.single.sel if (2 %in% plot.single.sel) { ylim.range <- vapply(OSL.Curves.ID.Lx, function(x) { range(object@records[[x]]@data[,2]) }, numeric(2)) if((log == "x" | log == "xy") & object@records[[OSL.Curves.ID.Lx[[1]]]]@data[1,1] == 0){ xlim <- c(object@records[[OSL.Curves.ID.Lx[1]]]@data[2,1], max(object@records[[OSL.Curves.ID.Lx[1]]]@data[,1]) + object@records[[OSL.Curves.ID.Lx[1]]]@data[2,1]) }else{ xlim <- c(object@records[[OSL.Curves.ID.Lx[1]]]@data[1,1], max(object@records[[OSL.Curves.ID.Lx[1]]]@data[,1])) } #open plot area LnLx plot( NA,NA, xlab = "Time [s]", ylab = paste(CWcurve.type," [cts/",resolution.OSLCurves," s]",sep = ""), xlim = xlim, ylim = range(ylim.range), main = main, log = log ) #provide curve information as mtext, to keep the space for the header mtext(side = 3, expression(paste(L[n],",",L[x]," curves",sep = "")), cex = cex * 0.7) ##plot curves sapply(1:length(OSL.Curves.ID.Lx), function(x) { if((log == "x" | log == "xy") & object@records[[OSL.Curves.ID.Lx[[x]]]]@data[1,1] == 0){ object@records[[OSL.Curves.ID.Lx[[x]]]]@data[1,] <- object@records[[OSL.Curves.ID.Lx[[x]]]]@data[1,] + diff(c(object@records[[OSL.Curves.ID.Lx[[x]]]]@data[1,1], object@records[[OSL.Curves.ID.Lx[[x]]]]@data[2,1])) warnings("[analyse_SAR.CWOSL()] curves shifted by one chanel for log-plot.") } lines(object@records[[OSL.Curves.ID.Lx[[x]]]]@data,col = col[x]) }) ##mark integration limit Lx curves abline(v = c( object@records[[OSL.Curves.ID.Lx[1]]]@data[min(signal.integral),1], object@records[[OSL.Curves.ID.Lx[1]]]@data[max(signal.integral),1], object@records[[OSL.Curves.ID.Lx[1]]]@data[min(background.integral),1], object@records[[OSL.Curves.ID.Lx[1]]]@data[max(background.integral),1]), lty = 2, col = "gray") ##mtext, implemented here, as a plot window has to be called first mtext( mtext.outer, side = 4, outer = TRUE, line = -1.7, cex = cex, col = "blue" ) }# plot.single.sel # Plotting TL Curves previous TnTx ---------------------------------------- ##overall plot option selection for plot.single.sel if (3 %in% plot.single.sel) { ##check if TL curves are available if (length(TL.Curves.ID.Tx) > 0) { ##It is just an approximation taken from the data resolution.TLCurves <- round(mean(diff( round(object@records[[TL.Curves.ID.Tx[[1]]]]@data[,1], digits = 1) )), digits = 1) ylim.range <- vapply(TL.Curves.ID.Tx, function(x) { range(object@records[[x]]@data[,2]) }, numeric(2)) plot( NA,NA, xlab = "T [\u00B0C]", ylab = paste("TL [cts/",resolution.TLCurves," \u00B0C]",sep = ""), xlim = c(object@records[[TL.Curves.ID.Tx[[1]]]]@data[1,1], max(object@records[[TL.Curves.ID.Tx[[1]]]]@data[,1])), ylim = c(1,max(ylim.range)), main = main, log = if (log == "y" | log == "xy") { "y" }else{ "" } ) #provide curve information as mtext, to keep the space for the header mtext(side = 3, expression(paste( "TL previous ", T[n],",",T[x]," curves",sep = "" )), cex = cex * 0.7) ##plot TL curves sapply(1:length(TL.Curves.ID.Tx) ,function(x) { lines(object@records[[TL.Curves.ID.Tx[[x]]]]@data,col = col[x]) }) }else{ plot( NA,NA,xlim = c(0,1), ylim = c(0,1), main = "", axes = FALSE, ylab = "", xlab = "" ) text(0.5,0.5, "No TL curve detected") } }#plot.single.sel # Plotting TnTx Curves ---------------------------------------------------- ##overall plot option selection for plot.single.sel if (4 %in% plot.single.sel) { ylim.range <- vapply(OSL.Curves.ID.Tx, function(x) { range(object@records[[x]]@data[,2]) }, numeric(2)) if((log == "x" | log == "xy") & object@records[[OSL.Curves.ID.Tx[[1]]]]@data[1,1] == 0){ xlim <- c(object@records[[OSL.Curves.ID.Tx[1]]]@data[2,1], max(object@records[[OSL.Curves.ID.Tx[1]]]@data[,1]) + object@records[[OSL.Curves.ID.Tx[1]]]@data[2,1]) }else{ xlim <- c(object@records[[OSL.Curves.ID.Tx[1]]]@data[1,1], max(object@records[[OSL.Curves.ID.Tx[1]]]@data[,1])) } #open plot area LnLx plot( NA,NA, xlab = "Time [s]", ylab = paste0(CWcurve.type ," [cts/",resolution.OSLCurves," s]"), xlim = xlim, ylim = range(ylim.range), main = main, log = log ) #provide curve information as mtext, to keep the space for the header mtext(side = 3, expression(paste(T[n],",",T[x]," curves",sep = "")), cex = cex * 0.7) ##plot curves and get legend values sapply(1:length(OSL.Curves.ID.Tx) ,function(x) { ##account for log-scale and 0 values if((log == "x" | log == "xy") & object@records[[OSL.Curves.ID.Tx[[x]]]]@data[1,1] == 0){ object@records[[OSL.Curves.ID.Tx[[x]]]]@data[1,] <- object@records[[OSL.Curves.ID.Tx[[x]]]]@data[1,] + diff(c(object@records[[OSL.Curves.ID.Tx[[x]]]]@data[1,1], object@records[[OSL.Curves.ID.Tx[[x]]]]@data[2,1])) warnings("[analyse_SAR.CWOSL()] curves shifted by one chanel for log-plot.") } lines(object@records[[OSL.Curves.ID.Tx[[x]]]]@data,col = col[x]) }) ##mark integration limit Tx curves abline(v = c( object@records[[OSL.Curves.ID.Tx[1]]]@data[min(signal.integral),1], object@records[[OSL.Curves.ID.Tx[1]]]@data[max(signal.integral),1], object@records[[OSL.Curves.ID.Tx[1]]]@data[min(background.integral),1], object@records[[OSL.Curves.ID.Tx[1]]]@data[max(background.integral),1]), lty = 2, col = "gray") }# plot.single.sel # Plotting Legend ---------------------------------------- ##overall plot option selection for plot.single.sel if (5 %in% plot.single.sel) { par.margin <- par()$mar par.mai <- par()$mai par(mar = c(1,1,1,1), mai = c(0,0,0,0)) plot( c(1:(length( OSL.Curves.ID ) / 2)), rep(7,length(OSL.Curves.ID) / 2), type = "p", axes = FALSE, xlab = "", ylab = "", pch = 20, col = unique(col[1:length(OSL.Curves.ID)]), cex = 4 * cex, ylim = c(0,10) ) ##add text text(c(1:(length( OSL.Curves.ID ) / 2)), rep(7,length(OSL.Curves.ID) / 2), legend.text, offset = 1, pos = 1) ##add line abline(h = 10,lwd = 0.5) #reset margin par(mar = par.margin, mai = par.mai) }#plot.single.sel }##end plot # Plotting GC ---------------------------------------- ##create data.frame temp.sample <- data.frame( Dose = LnLxTnTx$Dose, LxTx = LnLxTnTx$LxTx, LxTx.Error = LnLxTnTx$LxTx.Error, TnTx = LnLxTnTx$Net_TnTx ) ##overall plot option selection for plot.single.sel if (plot == TRUE && 6 %in% plot.single.sel) { plot <- TRUE }else { plot <- FALSE } ##Fit and plot growth curve if(!onlyLxTxTable){ temp.GC <- do.call(plot_GrowthCurve, args = modifyList( list( sample = temp.sample, output.plot = plot, output.plotExtended.single = plot_onePage, cex.global = if(plot_onePage) .6 else 1 ), list(...) )) ##if null if(is.null(temp.GC)){ temp.GC <- data.frame( De = NA, De.Error = NA, D01 = NA, D01.ERROR = NA, D02 = NA, D02.ERROR = NA, Dc = NA, De.MC = NA, Fit = NA, HPDI68_L = NA, HPDI68_U = NA, HPDI95_L = NA, HPDI95_U = NA, RC.Status = NA, stringsAsFactors = FALSE ) temp.GC.fit.Formula <- NA ##create empty plots if needed, otherwise subsequent functions may crash if(plot){ if("output.plotExtended" %in% list(...) && list(...)$output.plotExtended == FALSE){ shape::emptyplot() }else{ shape::emptyplot() shape::emptyplot() shape::emptyplot() } } }else{ ##grep information on the fit object temp.GC.fit.Formula <- get_RLum(temp.GC, "Formula") ##grep results temp.GC <- get_RLum(temp.GC) # Provide Rejection Criteria for Palaeodose error -------------------------- if(is.na(temp.GC[,1])){ palaeodose.error.calculated <- NA }else{ palaeodose.error.calculated <- round(temp.GC[,2] / temp.GC[,1], digits = 5) } palaeodose.error.threshold <- rejection.criteria$palaeodose.error / 100 if (is.na(palaeodose.error.calculated)) { palaeodose.error.status <- "FAILED" }else{ if(!is.na(palaeodose.error.threshold)){ palaeodose.error.status <- ifelse( palaeodose.error.calculated <= palaeodose.error.threshold, "OK", "FAILED" ) }else{ palaeodose.error.status <- "OK" } } palaeodose.error.data.frame <- data.frame( Criteria = "Palaeodose error", Value = palaeodose.error.calculated, Threshold = palaeodose.error.threshold, Status = palaeodose.error.status, stringsAsFactors = FALSE ) ##add exceed.max.regpoint if (!is.na(temp.GC[,1]) & !is.na(rejection.criteria$exceed.max.regpoint) && rejection.criteria$exceed.max.regpoint) { status.exceed.max.regpoint <- ifelse(max(LnLxTnTx$Dose) < temp.GC[,1], "FAILED", "OK") }else{ status.exceed.max.regpoint <- "OK" } exceed.max.regpoint.data.frame <- data.frame( Criteria = "De > max. dose point", Value = as.numeric(temp.GC[,1]), Threshold = if(is.na(rejection.criteria$exceed.max.regpoint)){ NA }else if(!rejection.criteria$exceed.max.regpoint){ Inf }else{ as.numeric(max(LnLxTnTx$Dose)) }, Status = status.exceed.max.regpoint ) ##add to RejectionCriteria data.frame RejectionCriteria <- rbind(RejectionCriteria, palaeodose.error.data.frame, exceed.max.regpoint.data.frame) ##add rejection status if (length(grep("FAILED",RejectionCriteria$Status)) > 0) { temp.GC <- data.frame(temp.GC, RC.Status = "FAILED", stringsAsFactors = FALSE) }else{ temp.GC <- data.frame(temp.GC, RC.Status = "OK", stringsAsFactors = FALSE) } }#endif for is.null ##end onlyLxTxTable }else{ temp.GC <- data.frame( De = NA, De.Error = NA, D01 = NA, D01.ERROR = NA, D02 = NA, D02.ERROR = NA, Dc = NA, De.MC = NA, Fit = NA, HPDI68_L = NA, HPDI68_U = NA, HPDI95_L = NA, HPDI95_U = NA, RC.Status = NA) temp.GC.fit.Formula <- NULL } ##add information on the integration limits temp.GC.extended <- data.frame( signal.range = paste(min(signal.integral),":", max(signal.integral)), background.range = paste(min(background.integral),":", max(background.integral)), signal.range.Tx = paste(min(ifelse(is.null(signal.integral.Tx),NA,signal.integral.Tx)),":", max(ifelse(is.null(signal.integral.Tx),NA,signal.integral.Tx))), background.range.Tx = paste(min(ifelse(is.null(background.integral.Tx), NA,background.integral.Tx)) ,":", max(ifelse(is.null(background.integral.Tx), NA,background.integral.Tx))), stringsAsFactors = FALSE ) # Set return Values ----------------------------------------------------------- ##generate unique identifier UID <- create_UID() temp.results.final <- set_RLum( class = "RLum.Results", data = list( data = as.data.frame(c(temp.GC, temp.GC.extended, UID = UID), stringsAsFactors = FALSE), LnLxTnTx.table = cbind(LnLxTnTx, UID = UID, stringsAsFactors = FALSE), rejection.criteria = cbind(RejectionCriteria, UID, stringsAsFactors = FALSE), Formula = temp.GC.fit.Formula ), info = list(call = sys.call()) ) # Plot graphical interpretation of rejection criteria ----------------------------------------- if (plot && 7 %in% plot.single.sel) { ##set graphical parameter if (!plot.single[1]) par(mfrow = c(1,2)) ##Rejection criteria temp.rejection.criteria <- get_RLum(temp.results.final, data.object = "rejection.criteria") temp.rc.reycling.ratio <- temp.rejection.criteria[ grep("Recycling ratio",temp.rejection.criteria[,"Criteria"]),] temp.rc.recuperation.rate <- temp.rejection.criteria[ grep("Recuperation rate",temp.rejection.criteria[,"Criteria"]),] temp.rc.palaedose.error <- temp.rejection.criteria[ grep("Palaeodose error",temp.rejection.criteria[,"Criteria"]),] temp.rc.testdose.error <- temp.rejection.criteria[ grep("Testdose error",temp.rejection.criteria[,"Criteria"]),] plot( NA,NA, xlim = c(-0.5,0.5), ylim = c(0,40), yaxt = "n", ylab = "", xaxt = "n", xlab = "", bty = "n", main = "Rejection criteria" ) axis( side = 1, at = c(-0.2,-0.1,0,0.1,0.2), labels = c("- 0.2", "- 0.1","0/1","+ 0.1", "+ 0.2") ) ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++## ##polygon for recycling ratio text(x = -0.35, y = 35, "Recycling R.", pos = 3, srt = 90, cex = 0.8*cex, offset = 0) polygon(x = c( -as.numeric(as.character(temp.rc.reycling.ratio$Threshold))[1],-as.numeric(as.character(temp.rc.reycling.ratio$Threshold))[1], as.numeric(as.character(temp.rc.reycling.ratio$Threshold))[1], as.numeric(as.character(temp.rc.reycling.ratio$Threshold))[1] ), y = c(31,39,39,31), col = "gray", border = NA) polygon( x = c(-0.3, -0.3, 0.3, 0.3) , y = c(31, 39, 39, 31), border = ifelse(any( grepl(pattern = "FAILED", temp.rc.reycling.ratio$Status) ), "red", "black")) ##consider possibility of multiple pIRIR signals and multiple recycling ratios if (nrow(temp.rc.recuperation.rate) > 0) { col.id <- 1 for (i in seq(1,nrow(temp.rc.recuperation.rate), length(unique(temp.rc.recuperation.rate[,"Criteria"])))) { for (j in 0:length(unique(temp.rc.recuperation.rate[,"Criteria"]))) { points( temp.rc.reycling.ratio[i + j, "Value"] - 1, y = 35, pch = col.id, col = col.id, cex = 1.3 * cex ) } col.id <- col.id + 1 } rm(col.id) ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++## ##polygon for recuperation rate text( x = -0.35, y = 25, "Recuperation", pos = 3, srt = 90, cex = 0.8*cex, offset = 0, ) polygon( x = c( 0, 0, as.numeric(as.character( temp.rc.recuperation.rate$Threshold ))[1], as.numeric(as.character( temp.rc.recuperation.rate$Threshold ))[1] ), y = c(21,29,29,21), col = "gray", border = NA ) polygon( x = c(-0.3, -0.3, 0.3, 0.3) , y = c(21, 29, 29, 21), border = ifelse(any( grepl(pattern = "FAILED", temp.rc.recuperation.rate$Status) ), "red", "black") ) polygon( x = c(-0.3,-0.3,0,0) , y = c(21,29,29,21), border = NA, density = 10, angle = 45 ) for (i in 1:nrow(temp.rc.recuperation.rate)) { points( temp.rc.recuperation.rate[i, "Value"], y = 25, pch = i, col = i, cex = 1.3 * cex ) } } ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++## ##polygon for testdose error text( x = -0.35, y = 15, "Testdose Err.", pos = 3, srt = 90, cex = 0.8*cex, offset = 0, ) polygon( x = c( 0, 0, as.numeric(as.character(temp.rc.testdose.error$Threshold))[1], as.numeric(as.character(temp.rc.testdose.error$Threshold))[1] ), y = c(11,19,19,11), col = "gray", border = NA ) polygon( x = c(-0.3, -0.3, 0.3, 0.3) , y = c(11, 19, 19, 11), border = ifelse(any( grepl(pattern = "FAILED", temp.rc.testdose.error$Status) ), "red", "black") ) polygon( x = c(-0.3,-0.3,0,0) , y = c(11,19,19,11), border = NA, density = 10, angle = 45 ) for (i in 1:nrow(temp.rc.testdose.error)) { points( temp.rc.testdose.error[i, "Value"], y = 15, pch = i, col = i, cex = 1.3 * cex ) } ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++## ##polygon for palaeodose error text( x = -0.35, y = 5, "Palaeodose Err.", pos = 3, srt = 90, cex = 0.8*cex, offset = 0, ) polygon( x = c( 0, 0, as.numeric(as.character(temp.rc.palaedose.error$Threshold))[1], as.numeric(as.character(temp.rc.palaedose.error$Threshold))[1] ), y = c(1,9,9,1), col = "gray", border = NA ) polygon( x = c(-0.3, -0.3, 0.3, 0.3) , y = c(1, 9, 9, 1), border = ifelse(any( grepl(pattern = "FAILED", temp.rc.palaedose.error$Status) ), "red", "black") ) polygon( x = c(-0.3,-0.3,0,0) , y = c(1,9,9,1), border = NA, density = 10, angle = 45 ) if(nrow(temp.rc.palaedose.error) != 0){ for (i in 1:nrow(temp.rc.palaedose.error)) { if(!is.na(temp.rc.palaedose.error[i, "Value"])){ points( temp.rc.palaedose.error[i, "Value"], y = 5, pch = i, col = i, cex = 1.3 * cex ) } } } } if (plot == TRUE && 8 %in% plot.single.sel) { ##graphical representation of IR-curve temp.IRSL <- suppressWarnings(get_RLum(object, recordType = "IRSL")) if(length(temp.IRSL) != 0){ if(inherits(temp.IRSL, "RLum.Data.Curve")){ plot_RLum.Data.Curve(temp.IRSL, par.local = FALSE) }else if(inherits(temp.IRSL, "list")){ plot_RLum.Data.Curve(temp.IRSL[[length(temp.IRSL)]], par.local = FALSE) warning( "[analyse_SAR.CWOSL()] Multiple IRSL curves detected (IRSL test), show only the last one.", immediate. = TRUE, call. = FALSE ) }else{ shape::emptyplot() } }else{ plot(1, type="n", axes=F, xlab="", ylab="") text(x = c(1,1), y = c(1, 1), labels = "No IRSL curve detected!") } } # Return -------------------------------------------------------------------------------------- invisible(temp.results.final) }else{ warning(paste0( "\n", paste(unlist(error.list), collapse = "\n"),"\n... >> nothing was done here!" ), call. = FALSE) invisible(NULL) } } Luminescence/R/convert_BIN2CSV.R0000644000176200001440000000707014264017373016031 0ustar liggesusers#' Export Risoe BIN-file(s) to CSV-files #' #' This function is a wrapper function around the functions [read_BIN2R] and #' [write_RLum2CSV] and it imports a Risoe BIN-file and directly exports its #' content to CSV-files. If nothing is set for the argument `path` #' ([write_RLum2CSV]) the input folder will become the output folder. #' #' @param file [character] (**required**): #' name of the BIN-file to be converted to CSV-files #' #' @param ... further arguments that will be passed to the function #' [read_BIN2R] and [write_RLum2CSV] #' #' @return #' The function returns either a CSV-file (or many of them) or for the #' option `export == FALSE` a list comprising objects of type [data.frame] and [matrix] #' #' @section Function version: 0.1.0 #' #' @author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [RLum.Analysis-class], [RLum.Data-class], [RLum.Results-class], #' [utils::write.table], [write_RLum2CSV], [read_BIN2R] #' #' @keywords IO #' #' @examples #' #' ##transform Risoe.BINfileData values to a list #' data(ExampleData.BINfileData, envir = environment()) #' convert_BIN2CSV(subset(CWOSL.SAR.Data, POSITION == 1), export = FALSE) #' #' \dontrun{ #' ##select your BIN-file #' file <- file.choose() #' #' ##convert #' convert_BIN2CSV(file) #' #' } #' #' @md #' @export convert_BIN2CSV <- function( file, ... ){ # General tests ------------------------------------------------------------------------------- ##file is missing? if(missing(file)){ stop("[convert_BIN2CSV()] file is missing!", call. = FALSE) } ##set input arguments convert_BIN2CSV_settings.default <- list( path = if(!is(file, "Risoe.BINfileData")){dirname(file)}else{NULL}, show.raw.values = FALSE, position = NULL, n.records = NULL, zero_data.rm = TRUE, duplicated.rm = FALSE, show.record.number = FALSE, txtProgressBar = TRUE, forced.VersionNumber = NULL, ignore.RECTYPE = FALSE, pattern = NULL, verbose = TRUE, export = TRUE ) ##modify list on demand convert_BIN2CSV_settings <- modifyList(x = convert_BIN2CSV_settings.default, val = list(...)) # Import file --------------------------------------------------------------------------------- if(!is(file, "Risoe.BINfileData")){ object <- read_BIN2R( file = file, show.raw.values = convert_BIN2CSV_settings$show.raw.values, position = convert_BIN2CSV_settings$position, n.records = convert_BIN2CSV_settings$n.records, zero_data.rm = convert_BIN2CSV_settings$zero_data.rm, duplicated.rm = convert_BIN2CSV_settings$duplicated.rm, fastForward = TRUE, show.record.number = convert_BIN2CSV_settings$show.record.number, txtProgressBar = convert_BIN2CSV_settings$txtProgressBar, forced.VersionNumber = convert_BIN2CSV_settings$forced.VersionNumber, ignore.RECTYPE = convert_BIN2CSV_settings$ignore.RECTYPE, pattern = convert_BIN2CSV_settings$pattern, verbose = convert_BIN2CSV_settings$verbose ) }else{ object <- Risoe.BINfileData2RLum.Analysis(file) } # Export to CSV ------------------------------------------------------------------------------- ##get all arguments we want to pass and remove the doubled one arguments <- c(list(object = object, export = convert_BIN2CSV_settings$export), list(...)) arguments[duplicated(names(arguments))] <- NULL ##this if-condition prevents NULL in the terminal if(convert_BIN2CSV_settings$export == TRUE){ invisible(do.call("write_RLum2CSV", arguments)) }else{ do.call("write_RLum2CSV", arguments) } } Luminescence/R/plot_DetPlot.R0000644000176200001440000002760414464125673015646 0ustar liggesusers#' @title Create De(t) plot #' #' @description Plots the equivalent dose (De) in dependency of the chosen signal integral #' (cf. Bailey et al., 2003). The function is simply passing several arguments #' to the function [plot] and the used analysis functions and runs it in a loop. #' Example: `legend.pos` for legend position, `legend` for legend text. #' #' @details #' #' **method** #' #' The original method presented by Bailey et al., 2003 shifted the signal integrals and slightly #' extended them accounting for changes in the counting statistics. Example: `c(1:3, 3:5, 5:7)`. #' However, here also another method is provided allowing to expand the signal integral by #' consecutively expanding the integral by its chosen length. Example: `c(1:3, 1:5, 1:7)` #' #' Note that in both cases the integral limits are overlap. The finally applied limits are part #' of the function output. #' #' **analyse_function.control** #' #' The argument `analyse_function.control` currently supports the following arguments #' `sequence.structure`, `dose.points`, `mtext.outer`, `fit.method`, `fit.force_through_origin`, `plot`, `plot.single` #' #' @param object [RLum.Analysis-class] (**required**): #' input object containing data for analysis #' #' @param signal.integral.min [integer] (**required**): #' lower bound of the signal integral. #' #' @param signal.integral.max [integer] (**required**): #' upper bound of the signal integral. #' #' @param background.integral.min [integer] (**required**): #' lower bound of the background integral. #' #' @param background.integral.max [integer] (**required**): #' upper bound of the background integral. #' #' @param method [character] (*with default*): #' method applied for constructing the De(t) plot. #' - `shift` (*the default*): the chosen signal integral is shifted the shine down curve, #' - `expansion`: the chosen signal integral is expanded each time by its length #' #' @param signal_integral.seq [numeric] (*optional*): #' argument to provide an own signal integral sequence for constructing the De(t) plot #' #' @param analyse_function [character] (*with default*): #' name of the analyse function to be called. Supported functions are: #' [analyse_SAR.CWOSL], [analyse_pIRIRSequence] #' #' @param analyse_function.control [list] (*optional*): #' selected arguments to be passed to the supported analyse functions #' ([analyse_SAR.CWOSL], [analyse_pIRIRSequence]) #' #' @param n.channels [integer] (*optional*): #' number of channels used for the De(t) plot. If nothing is provided all #' De-values are calculated and plotted until the start of the background #' integral. #' #' @param show_ShineDownCurve [logical] (*with default*): #' enables or disables shine down curve in the plot output #' #' @param respect_RC.Status [logical] (*with default*): #' remove De-values with 'FAILED' RC.Status from the plot #' (cf. [analyse_SAR.CWOSL] and [analyse_pIRIRSequence]) #' #' @param verbose [logical] (*with default*): #' enables or disables terminal feedback #' #' @param ... further arguments and graphical parameters passed to #' [plot.default], [analyse_SAR.CWOSL] and [analyse_pIRIRSequence] (see details for further information). #' Plot control parameters are: `ylim`, `xlim`, `ylab`, `xlab`, `main`, `pch`, `mtext`, `cex`, `legend`, #' `legend.text`, `legend.pos` #' #' @return #' A plot and an [RLum.Results-class] object with the produced De values #' #' `@data`: #' #' \tabular{lll}{ #' **Object** \tab **Type** \tab **Description**\cr #' De.values \tab `data.frame` \tab table with De values \cr #' signal_integral.seq \tab `numeric` \tab integral sequence used for the calculation #' } #' #' `@info`: #' #' \tabular{lll}{ #' **Object** \tab **Type** \tab **Description**\cr #' call \tab `call` \tab the original function call #' } #' #' #' @note #' The entire analysis is based on the used analysis functions, namely #' [analyse_SAR.CWOSL] and [analyse_pIRIRSequence]. However, the integrity #' checks of this function are not that thoughtful as in these functions itself. #' It means, that every sequence should be checked carefully before running long #' calculations using several hundreds of channels. #' #' @section Function version: 0.1.5 #' #' @author Sebastian Kreutzer, Institute of Geography, Ruprecht-Karl University of Heidelberg (Germany) #' #' @references #' Bailey, R.M., Singarayer, J.S., Ward, S., Stokes, S., 2003. Identification of partial resetting #' using De as a function of illumination time. Radiation Measurements 37, 511-518. #' doi:10.1016/S1350-4487(03)00063-5 #' #' @seealso [plot], [analyse_SAR.CWOSL], [analyse_pIRIRSequence] #' #' @examples #' #' \dontrun{ #' ##load data #' ##ExampleData.BINfileData contains two BINfileData objects #' ##CWOSL.SAR.Data and TL.SAR.Data #' data(ExampleData.BINfileData, envir = environment()) #' #' ##transform the values from the first position in a RLum.Analysis object #' object <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos=1) #' #' plot_DetPlot( #' object, #' signal.integral.min = 1, #' signal.integral.max = 3, #' background.integral.min = 900, #' background.integral.max = 1000, #' n.channels = 5) #' } #' #' @md #' @export plot_DetPlot <- function( object, signal.integral.min, signal.integral.max, background.integral.min, background.integral.max, method = "shift", signal_integral.seq = NULL, analyse_function = "analyse_SAR.CWOSL", analyse_function.control = list(), n.channels = NULL, show_ShineDownCurve = TRUE, respect_RC.Status = FALSE, verbose = TRUE, ... ) { # Integrity Tests ----------------------------------------------------------------------------- ##check input if(!inherits(object, "RLum.Analysis")) stop("[plot_DetPlot()] input must be an RLum.Analysis object!", call. = FALSE) ##get structure object.structure <- structure_RLum(object) # Set parameters ------------------------------------------------------------------------------ ##set n.channels if(is.null(n.channels)){ n.channels <- ceiling( (background.integral.min - 1 - signal.integral.max) / (signal.integral.max - signal.integral.min) ) } analyse_function.settings <- list( sequence.structure = c("TL", "IR50", "pIRIR225"), dose.points = NULL, mtext.outer = "", fit.method = "EXP", fit.force_through_origin = FALSE, plot = FALSE, plot.single = FALSE ) analyse_function.settings <- modifyList(analyse_function.settings, analyse_function.control) # Analyse ------------------------------------------------------------------------------------- ##set integral sequence if (is.null(signal_integral.seq)) { if(signal.integral.min == signal.integral.max){ signal_integral.seq <- signal.integral.min:(background.integral.min - 1) }else{ signal_integral.seq <- seq(signal.integral.min, background.integral.min - 1, by = signal.integral.max - signal.integral.min) } } if(analyse_function == "analyse_SAR.CWOSL"){ results <- merge_RLum(lapply(1:n.channels, function(x){ analyse_SAR.CWOSL( object = object, signal.integral.min = if(method == "shift"){signal_integral.seq[x]}else{signal_integral.seq[1]}, signal.integral.max = signal_integral.seq[x+1], background.integral.min = background.integral.min, background.integral.max = background.integral.max, dose.points = analyse_function.settings$dose.points, mtext.outer = analyse_function.settings$mtext.outer, fit.force_through_origin = analyse_function.settings$fit.force_through_origin, fit.method = analyse_function.settings$fit.method, plot = analyse_function.settings$plot, plot.single = analyse_function.settings$plot.single, verbose = verbose ) })) } else if(analyse_function == "analyse_pIRIRSequence"){ results <- merge_RLum(lapply(1:n.channels, function(x){ analyse_pIRIRSequence( object = object, signal.integral.min = if(method == "shift"){signal_integral.seq[x]}else{signal_integral.seq[1]}, signal.integral.max = signal_integral.seq[x+1], background.integral.min = background.integral.min, background.integral.max = background.integral.max, dose.points = analyse_function.settings$dose.points, mtext.outer = analyse_function.settings$mtext.outer, plot = analyse_function.settings$plot, plot.single = analyse_function.settings$plot.single, sequence.structure = analyse_function.settings$sequence.structure, verbose = verbose ) })) } else{ stop("[plot_DetPlot()] 'analyse_function' unknown!", call. = FALSE) } # Plot ---------------------------------------------------------------------------------------- ##get De results if(analyse_function == "analyse_pIRIRSequence"){ pIRIR_signals <- unique(get_RLum(results)$Signal) }else{ pIRIR_signals <- NA } ##run this in a loop to account for pIRIR data df_final <- lapply(1:length(pIRIR_signals), function(i){ ##get data.frame df <- get_RLum(results) ##further limit if(!is.na(pIRIR_signals[1])) df <- df[df$Signal == pIRIR_signals[i],] ##add shine down curve, which is by definition the first IRSL/OSL curve ##and normalise on the highest De value OSL_curve <- as(get_RLum(object, recordType = "SL")[[i]], "matrix") ##limit to what we see OSL_curve <- OSL_curve[1:signal_integral.seq[n.channels + 1],] m <- ((min(df$De - df$De.Error, na.rm = TRUE)) - (max(df$De, na.rm = TRUE) + max(df$De.Error, na.rm = TRUE))) / (min(OSL_curve[, 2], na.rm = TRUE) - max(OSL_curve[, 2], na.rm = TRUE)) n <- (max(df$De, na.rm = TRUE) + max(df$De.Error, na.rm = TRUE)) - m * max(OSL_curve[, 2]) OSL_curve[, 2] <- m * OSL_curve[, 2] + n rm(n, m) ##set plot setting plot.settings <- modifyList(list( ylim = c( min(df$De - df$De.Error, na.rm = TRUE), (max(df$De, na.rm = TRUE) + max(df$De.Error, na.rm = TRUE))), xlim = c(min(OSL_curve[, 1]), max(OSL_curve[, 1])), ylab = expression(paste(D[e] / s, " and ", L[n]/(a.u.))), xlab = "Stimulation time [s]", main = "De(t) plot", pch = 1, mtext = ifelse(is.na(pIRIR_signals[1]), "", paste0("Signal: ",pIRIR_signals[i])), cex = 1, legend = TRUE, legend.text = c(expression(L[n]-signal), expression(D[e])), legend.pos = "bottomleft" ), list(...)) ##general settings par(cex = plot.settings$cex) ##set x-axis df_x <- OSL_curve[seq(signal.integral.max, signal_integral.seq[n.channels+1], length.out = nrow(df)),1] #combine everything to allow excluding unwanted values df_final <- cbind(df, df_x) if (respect_RC.Status) { df_final <- df_final[df_final$RC.Status != "FAILED", ] } ##open plot area plot( NA, NA, xlim = plot.settings$xlim, ylim = if(any(is.infinite(plot.settings$ylim))) c(-1,1) else plot.settings$ylim , xlab = plot.settings$xlab, ylab = plot.settings$ylab, main = plot.settings$main ) if (show_ShineDownCurve) lines(OSL_curve, type = "b", pch = 20) ##ToDo:color failed points red ##plot points and error bars points(df_final[, c("df_x", "De")], pch = plot.settings$pch) segments( x0 = df_final$df_x, y0 = df_final$De + df_final$De.Error, x1 = df_final$df_x, y1 = df_final$De - df_final$De.Error ) ##set mtext mtext(side = 3, plot.settings$mtext) ##legend if(plot.settings$legend){ legend( plot.settings$legend.pos, legend = plot.settings$legend.text, pch = c(plot.settings$pch, 20), bty = "n" ) } ##set return return(df_final) }) ##merge results return(set_RLum( class = "RLum.Results", data = list( De.values = as.data.frame(data.table::rbindlist(df_final)), signal_integral.seq = signal_integral.seq ), info = list(call = sys.call()) )) } Luminescence/R/length_RLum.R0000644000176200001440000000173014264017373015440 0ustar liggesusers#' General accessor function for RLum S4 class objects #' #' Function calls object-specific get functions for RLum S4 class objects. #' #' The function provides a generalised access point for specific #' [RLum-class] objects.\cr #' Depending on the input object, the corresponding get function will be selected. #' Allowed arguments can be found in the documentations of the corresponding #' [RLum-class] class. #' #' @param object [RLum-class] (**required**): #' S4 object of class `RLum` #' #' @return Return is the same as input objects as provided in the list. #' #' @section Function version: 0.1.0 #' #' @author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' (France) #' @seealso #' [RLum.Data.Curve-class], #' [RLum.Data.Image-class], #' [RLum.Data.Spectrum-class], #' [RLum.Analysis-class], #' [RLum.Results-class] #' @keywords utilities #' #' #' @md #' @export setGeneric("length_RLum", function(object) { standardGeneric("length_RLum") }) Luminescence/R/fit_EmissionSpectra.R0000644000176200001440000005214614264017373017201 0ustar liggesusers#'@title Luminescence Emission Spectra Deconvolution #' #'@description Luminescence spectra deconvolution on [RLum.Data.Spectrum-class] and [matrix] objects #'on an **energy scale**. The function is optimised for emission spectra typically #'obtained in the context of TL, OSL and RF measurements detected between 200 and 1000 nm. #'The function is not prepared to deconvolve TL curves (counts against temperature; #'no wavelength scale). If you are interested in such analysis, please check, e.g., #'the package `'tgcd'`. #' #'@details #' #'**Used equation** #' #'The emission spectra (on an energy scale) can be best described as the sum of multiple #'Gaussian components: #' #''\deqn{ #'y = \Sigma Ci * 1/(\sigma_{i} * \sqrt(2 * \pi)) * exp(-1/2 * ((x - \mu_{i})/\sigma_{i}))^2) #'} #' #'with the parameters \eqn{\sigma} (peak width) and \eqn{\mu} (peak centre) and \eqn{C} #'(scaling factor). #' #' #'**Start parameter estimation and fitting algorithm** #' #'The spectrum deconvolution consists of the following steps: #' #'1. Peak finding \cr #'2. Start parameter estimation \cr #'3. Fitting via [minpack.lm::nls.lm]\cr #' #'The peak finding is realised by an approach (re-)suggested by Petr Pikal via the R-help #'mailing list (`https://stat.ethz.ch/pipermail/r-help/2005-November/thread.html`) in November 2005. #'This goes back to even earlier discussion in 2001 based on Prof Brian Ripley's idea. #'It smartly uses the functions [stats::embed] and [max.col] to identify peaks positions. #'For the use in this context, the algorithm has been further modified to scale on the #'input data resolution (cf. source code).\cr #' #'The start parameter estimation uses random sampling from a range of meaningful parameters #'and repeats the fitting until 1000 successful fits have been produced or the set `max.runs` value #'is exceeded. #' #'Currently the best fit is the one with the lowest number for squared residuals, but #'other parameters are returned as well. If a series of curves needs to be analysed, #'it is recommended to make few trial runs, then fix the number of components and #'run at least 10,000 iterations (parameter `method_control = list(max.runs = 10000)`). #' #'**Supported `method_control` settings** #' #'\tabular{llll}{ #' **Parameter** \tab **Type** \tab **Default** \tab **Description**\cr #' `max.runs` \tab [integer] \tab `10000` \tab maximum allowed search iterations, if exceed #' the searching stops \cr #' `graining` \tab [numeric] \tab `15` \tab gives control over how coarse or fine the spectrum is split into search intervals for the peak finding algorithm \cr #' `norm` \tab [logical] \tab `TRUE` \tab normalises data to the highest count value before fitting \cr #' `trace` \tab [logical] \tab `FALSE` \tab enables/disables the tracing of the minimisation routine #'} #' #'@param object [RLum.Data.Spectrum-class], [matrix] (**required**): input #'object. Please note that an energy spectrum is expected #' #'@param frame [numeric] (*optional*): defines the frame to be analysed #' #'@param start_parameters [numeric] (*optional*): allows to provide own start parameters for a #'semi-automated procedure. Parameters need to be provided in eV. Every value provided replaces a #'value from the automated peak finding algorithm (in ascending order). #' #'@param n_components [numeric] (*optional*): allows a number of the aimed number of #'components. However, it defines rather a maximum than than a minimum. Can be combined with #'other parameters. #' #'@param input_scale [character] (*optional*): defines whether your x-values define wavelength or #'energy values. For the analysis an energy scale is expected, allowed values are `'wavelength'` and #'`'energy'`. If nothing (`NULL`) is defined, the function tries to understand the input #'automatically. #' #'@param sub_negative [numeric] (*with default*): substitute negative values in the input object #'by the number provided here (default: `0`). Can be set to `NULL`, i.e. negative values are kept. #' #'@param method_control [list] (*optional*): options to control the fit method, see details #' #'@param verbose [logical] (*with default*): enable/disable verbose mode #' #'@param plot [logical] (*with default*): enable/disable plot output #' #'@param ... further arguments to be passed to control the plot output #'(supported: `main`, `xlab`, `ylab`, `xlim`, `ylim`, `log`, `mtext`, `legend` (`TRUE` or `FALSE`), #'`legend.text`, `legend.pos`) #' #'@return #' -----------------------------------\cr #' `[ NUMERICAL OUTPUT ]`\cr #' -----------------------------------\cr #' #' **`RLum.Results`**-object #' #' **slot:** **`@data`** #' #' \tabular{lll}{ #' **Element** \tab **Type** \tab **Description**\cr #' `$data` \tab `matrix` \tab the final fit matrix \cr #' `$fit` \tab `nls` \tab the fit object returned by [minpack.lm::nls.lm] \cr #' `$fit_info` \tab `list` \tab a few additional parameters that can be used to asses the quality #' of the fit #' } #' #' #'**slot:** **`@info`** #' #' The original function call #' #' ---------------------------------\cr #' `[ TERMINAL OUTPUT ]` \cr #' ---------------------------------\cr #' #' The terminal output provides brief information on the #' deconvolution process and the obtained results. #' Terminal output is only shown of the argument `verbose = TRUE`. #' #' ---------------------------\cr #' `[ PLOT OUTPUT ]` \cr #' ---------------------------\cr #' #' The function returns a plot showing the raw signal with the #' detected components. If the fitting failed, a basic plot is returned #' showing the raw data and indicating the peaks detected for the start #' parameter estimation. The grey band in the residual plot indicates the #' 10% deviation from 0 (means no residual). #' #'@section Function version: 0.1.1 #' #'@author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #'@seealso [RLum.Data.Spectrum-class], [RLum.Results-class], [plot_RLum], #'[convert_Wavelength2Energy], [minpack.lm::nls.lm] #' #'@keywords datagen #' #'@examples #' #'##load example data #'data(ExampleData.XSYG, envir = environment()) #' #'##subtract background #'TL.Spectrum@@data <- TL.Spectrum@@data[] - TL.Spectrum@@data[,15] #' #'results <- fit_EmissionSpectra( #' object = TL.Spectrum, #' frame = 5, #' method_control = list(max.runs = 10) #' ) #' #' ##deconvolution of a TL spectrum #' \dontrun{ #' #' ##load example data #' #' ##replace 0 values #' results <- fit_EmissionSpectra( #' object = TL.Spectrum, #' frame = 5, main = "TL spectrum" #' ) #' #'} #' #'@md #'@export fit_EmissionSpectra <- function( object, frame = NULL, n_components = NULL, start_parameters = NULL, sub_negative = 0, input_scale = NULL, method_control = list(), verbose = TRUE, plot = TRUE, ... ){ ##TODO: Find a way to get a significant number of components ## This function works only on a list of matrices, so what ever we do here, we have to ## create a list of data treat, frame controls the number of frames analysed ##input RLum.Data.Spectrum ... make list either way if(inherits(object, "RLum.Data.Spectrum")) object <- list(object) ##stop, mixed input is not allowed if(inherits(object, "list") && length(unique(sapply(object, class))) != 1) stop("[fit_EmissionSpectra()] List elements of different class detected!", call. = FALSE) ##deal with RLum.Data.Spectrum lists if(inherits(object, "list") && all(sapply(object, class) == "RLum.Data.Spectrum")){ temp <- lapply(object, function(o){ ##get x-axis x <- as.numeric(rownames(o@data)) rownames(o@data) <- NULL ##set frame if(is.null(frame)){ frame <- 1:ncol(o@data) }else{ if(max(frame) > ncol(o@data)|| min(frame) < 1){ stop( paste0( "[fit_EmissionSpectra()] 'frame' invalid. Allowed range min: 1 and max: ",ncol(o@data)), call. = FALSE) } } ##get frame temp_frame <- lapply(frame, function(f) cbind(x, o@data[,f])) names(temp_frame) <- paste0("Frame: ", frame) return(temp_frame) }) ##set object name names(temp) <- paste0("ALQ: ", 1:length(temp), " ") ##unlist, now we have what we want object <- unlist(temp, use.names = TRUE, recursive = FALSE) names(object) <- gsub(" .", names(object), replacement = " | ", fixed = TRUE) rm(temp) } ##handle a single matrix that may have different columns if(inherits(object, "matrix") && ncol(object) > 2){ rownames(object) <- NULL ##set frame if(is.null(frame)){ frame <- 1:(ncol(object) - 1) }else{ if(max(frame) > (ncol(object)-1) || min(frame) < 1){ stop( paste0( "[fit_EmissionSpectra()] 'frame' invalid. Allowed range min: 1 and max: ", ncol(object)-1),"!", call. = FALSE) } } temp <- lapply(frame +1 , function(x) cbind(object[,1],object[,x])) names(temp) <- paste0("Frame: ",frame) object <- temp rm(temp) } ##now treat different lists, the aim is to have a list of 2-column matrices ##we have two types of lists, # Self-call ----------------------------------------------------------------------------------- if(inherits(object, "list")){ ##get argument list args_list <- list(...) ##recycle arguments if(!"mtext" %in% names(args_list)){ mtext <- names(object) }else{ mtext <- as.list(rep(args_list$mtext, length(object))) args_list$mtext <- NULL } ##run over the list results <- lapply(1:length(object), function(o){ do.call(fit_EmissionSpectra, args = c( list( object = object[[o]], start_parameters = start_parameters, n_components = n_components, sub_negative = sub_negative, method_control = method_control, frame = frame, mtext = mtext[[o]]), verbose = verbose, plot = plot, args_list) ) }) ##merge output and return return(merge_RLum(results)) } # Start main core ----------------------------------------------------------------------------- ##backstop, from here we allow only a matrix if(!inherits(object, "matrix")) stop("[fit_EmissionSpectra()] Input not supported, please read the manual!",call. = FALSE) ##extract matrix for everything below m <- object[,1:2] ##replace all negative values if(!is.null(sub_negative)) m[m[,2] < 0,2] <- sub_negative ##output if(verbose){ cat("\n[fit_EmissionSpectra()]\n") cat("\n>> Treating dataset >>",frame,"<<\n") } ##check the scale if(is.null(input_scale)){ ##values above 30 are unlikely, means its likely that we have a wavelength scale if(max(m[,1]) > 30){ if(verbose) cat(">> Wavelength scale detected ...\n") m <- convert_Wavelength2Energy(m, order = TRUE) if(verbose) cat(">> Wavelength to energy scale conversion ... \t[OK]\n") } }else if(input_scale == "wavelength"){ m <- convert_Wavelength2Energy(m, order = TRUE) if(verbose) cat(">> Wavelength to energy scale conversion ... \t[OK]\n") } # Settings ------------------------------------------------------------------------------------ ##create peak finding function ... this helps to get good start parameters ##https://grokbase.com/t/r/r-help/05bqza71c4/r-finding-peaks-in-a-simple-dataset-with-r ##https://stat.ethz.ch/pipermail/r-help/2005-November/thread.html ##author: Petr Pikal in 2004; with modifications by Sebastian Kreutzer .peaks <- function(x, span, size = nrow(m)) { z <- stats::embed(x, span) s <- span %/% sample(1:4, size = 1) ##the part `ceiling(...)` scales the entire algorithm v <- max.col(z, ties.method = "first") == ceiling(10^(3 - log10(nrow(m)))) + s result <- c(rep(FALSE, s), v) which(result[1:(length(result) - s)]) } ##set fit function x <- 0 #cheat R check routine fit_forumla <- function(n.components){ sigma <- paste0("sigma.",1:n.components) mu <- paste0("mu.",1:n.components) C <- paste0("C.",1:n.components) as.formula( paste0("y ~ ", paste(C," * 1/(",sigma," * sqrt(2 * pi)) * exp(-0.5 * ((x - ",mu,")/",sigma,")^2)", collapse = " + "))) } # Fitting ------------------------------------------------------------------------------------- ## method_control -------- method_control <- modifyList(x = list( max.runs = 10000, graining = 15, norm = TRUE, trace = FALSE ), val = method_control) # set data.frame ------------------------------------------------------------------------------ df <- data.frame(x = m[,1], y = m[,2]) ##normalise values, it is just easier if(method_control$norm[1]) df[["y"]] <- df[["y"]]/max(m[,2]) ##normalise values, it is just easier ##initialise objects success_counter <- 0 run <- 0 fit <- list() mu <- C <- sigma <- NA R2 <- SSR <- SST <- R2adj <- NA ## (WHILE LOOP START) ------- ##run iterations while(success_counter < 1000 && run < method_control$max.runs){ ##try to find start parameters ##check graining parameter if(method_control$graining >= nrow(m)) stop(paste0( "[fit_EmissionSpectra()] method_control$graining cannot be larger than available channels (", nrow(m) ,")!"), call. = FALSE) ##identify peaks id_peaks <- .peaks(m[,2], sample(method_control$graining[1]:(nrow(m) - 1), 1)) ##make sure that we do not end up in an endless loop if(length(id_peaks) == 0){ if (verbose) cat("\r>> Searching components ... \t\t\t[-]") run <- run + 1 next() } ## set start parameters for fitting -------- mu <- m[id_peaks,1] if(!is.null(start_parameters)) mu <- c(sort(start_parameters), mu[-c(1:length(start_parameters))]) ## limit the number of components if(!is.null(n_components[1])) mu <- mu[seq(1,length(mu), length.out = n_components[1])] sigma <- rep(sample(0.01:10,1),length(mu)) C <- rep(max(df[[2]])/2, length(mu)) names(mu) <- paste0("mu.", 1:length(mu)) names(sigma) <- paste0("sigma.", 1:length(mu)) names(C) <- paste0("C.", 1:length(mu)) ##run fitting using the Levenberg-Marquardt algorithm fit_try <- try(minpack.lm::nlsLM( formula = fit_forumla(n.components = length(mu)), data = df, start = c(sigma, mu, C), trace = method_control$trace, lower = rep(0, 3 * length(mu)), upper = c( rep(1000, length(mu)), rep(max(df[[1]]), length(mu)), rep(max(df[[2]]), length(mu))), control = minpack.lm::nls.lm.control(maxiter = 500) ), silent = TRUE) ##handle output if (class(fit_try)[1] != "try-error") { success_counter <- success_counter + 1 fit[[success_counter]] <- fit_try if (verbose) cat("\r>> Searching components ... \t\t\t[/]") } else{ if (verbose) cat("\r>> Searching components ... \t\t\t[\\]") } ##update run counter run <- run + 1 } ## handle the output if(length(fit) == 0){ if (verbose) cat("\r>> Searching components ... \t\t\t[FAILED]") }else{ if (verbose) cat("\r>> Searching components ... \t\t\t[OK]") } ##Extract best fit values ##TODO ... should be improved, its works, but maybe there are better solutions if (length(fit) != 0) { ##obtain the fit with the best fit best_fit <- vapply(fit, function(x) sum(residuals(x) ^ 2), numeric(1)) fit <- fit[[which.min(best_fit)]] ## more parameters SSR <- min(best_fit) SST <- sum((df[[2]] - mean(df[[2]]))^2) R2 <- 1 - SSR/SST R2adj <- ((1 - R2) * (nrow(df) - 1)) / (nrow(df) - length(coef(fit)) - 1) }else{ fit <- NA } # Extract values ------------------------------------------------------------------------------ ##extract components if(!is.na(fit[1]) && is(fit, "nls")){ ##extract values we need only m_coef <- summary(fit)$coefficients m_coef <- matrix( data = c( as.numeric(m_coef[grepl(pattern = "mu", x = rownames(m_coef), fixed = TRUE),1:2]), as.numeric(m_coef[grepl(pattern = "sigma", x = rownames(m_coef), fixed = TRUE),1:2]), as.numeric(m_coef[grepl(pattern = "C", x = rownames(m_coef), fixed = TRUE),1:2]) ), ncol = 6 ) ##set colnames colnames(m_coef) <- c("mu", "SE(mu)", "sigma", "SE(sigma)", "C", "SE(C)") ##order by sigma m_coef <- m_coef[order(m_coef[,1]),, drop = FALSE] ##extract single values, we need this later mu <- m_coef[,"mu"] sigma <- m_coef[,"sigma"] C <- m_coef[,"C"] }else{ m_coef <- NA } # Terminal output ----------------------------------------------------------------------------- if(verbose && !is.na(m_coef[1])){ cat(paste0("\n\n>> Fitting results (",length(mu), " component model):\n")) cat("-------------------------------------------------------------------------\n") print(m_coef) cat("-------------------------------------------------------------------------") cat(paste0("\nSE: standard error | SSR: ", format(min(best_fit), scientific=TRUE, digits = 4), "| R^2: ", round(R2,3), " | R^2_adj: ", round(R2adj,4))) cat("\n(use the output in $fit for a more detailed analysis)\n\n") } # Plotting ------------------------------------------------------------------------------------ if(plot){ ##get colour values col <- get("col", pos = .LuminescenceEnv)[-1] ##plot settings plot_settings <- modifyList(x = list( xlab = "Energy [eV]", ylab = "Luminescence [a.u.]", main = "Emission Spectrum Deconvolution", xlim = range(df[[1]]), ylim = range(df[[2]]), log = "", mtext = "", legend = TRUE, legend.pos = "topright", legend.text = c("sum", paste0("c",1:length(mu),": ", round(mu,2), " eV")) ), val = list(...)) if(!is.na(fit[1]) && class(fit)[1] != "try-error"){ ##make sure that the screen closes if something is wrong on.exit(close.screen(n = c(1,2))) ##set split screen settings split.screen(rbind( c(0.1,1,0.32, 0.98), c(0.1,1,0.1, 0.315))) ##SCREEN 1 ---------- screen(1) par(mar = c(0, 4, 3, 4)) plot( df, pch = 20, xlab = plot_settings$xlab, ylab = plot_settings$ylab, xlim = plot_settings$xlim, ylim = plot_settings$ylim, main = plot_settings$main, col = rgb(0, 0, 0, .6), xaxt = "n", yaxt = "n", log = plot_settings$log ) ## add axis normalised axis(side = 2, at = axTicks(side = 2), labels = c(axTicks(2))) ## add axis with real count vales if(method_control$norm[1]) { axis( side = 2, axTicks(side = 2)[-1], labels = format( max(m[, 2]) * axTicks(side = 2)[-1], digit = 1, scientific = TRUE ), line = 0.8, cex.axis = 0.7, tick = FALSE ) } ##plot sum curve lines(x = df[[1]], y = predict(fit), col = col[1], lwd = 1.5) ##add mtext mtext(side = 3, text = plot_settings$mtext) ##add components for(i in 1:length(mu)){ curve( (C[i] * 1 / (sigma[i] * sqrt(2 * pi)) * exp(-0.5 * ((x - mu[i])/sigma[i])^2)), add = TRUE, col = col[i + 1] ) } ##add legend if(plot_settings$legend){ legend( plot_settings$legend.pos, legend = plot_settings$legend.text, lwd = 1, col = col[1:(length(mu) + 2)], bty = "n" ) } ## SCREEN 2 ----- screen(2) par(mar = c(4, 4, 0, 4)) plot(NA, NA, ylim = range(residuals(fit)), xlab = plot_settings$xlab, type = "b", pch = 20, yaxt = "n", xlim = plot_settings$xlim, ylab = "", col = rgb(0,0,0,.6), log = ifelse(grepl(plot_settings$log[1], pattern = "x", fixed = TRUE), "x", "") ) ## add one axis label axis(side = 2, at = 0, labels = 0) ## add ± 5 line polygon(x = c(df[[1]], rev(df[[1]])), y = c(df[[2]] * 1.1 - df[[2]], rev(df[[2]] * 0.9 - df[[2]])), border = FALSE, col = rgb(0.8,0.8,0.8)) ## add points points(df[[1]],residuals(fit), pch = 20, col = rgb(0,0,0,0.3)) ## add zero line abline(h = 0, lty = 2) ##add wavelength axis h <- 4.135667662e-15 #eV * s c <- 299792458e+09 #nm/s axis( side = 1, labels = paste0("(",round((h * c) / axTicks(side = 3), 0), " nm)"), at = axTicks(side = 3), cex.axis = .7, line = .8, tick = FALSE ) }else{ ##provide control plot plot(df, main = "fit_EmissionSpectra() - control plot") ##abline abline(v = mu, lty = 2) ##add information mtext(side = 3, text = "(dashed lines indicate identified peaks)") ##add components for(i in 1:length(mu)){ curve( (C[i] * 1 / (sigma[i] * sqrt(2 * pi)) * exp(-0.5 * ((x - mu[i])/sigma[i])^2)), add = TRUE, col = i ) } } }##if plot # Output -------------------------------------------------------------------------------------- results <- set_RLum( class = "RLum.Results", data = list(data = m_coef, fit = fit, fit_info = list( SSR = SSR, SST = SST, R2 = R2, R2adj = R2adj) ), info = list(call = sys.call()) ) ##return return(results) } Luminescence/R/subset_SingleGrainData.R0000644000176200001440000000373514464125673017615 0ustar liggesusers#'@title Simple Subsetting of Single Grain Data from Risø BIN/BINX files #' #'@description Most measured single grains do not exhibit light and it makes #'usually sense to subset single grain datasets using a table of #'position and grain pairs #' #'@param object [Risoe.BINfileData-class] (**required**): input object with the #'data to subset #' #'@param selection [data.frame] (**required**): selection table with two columns #'for position (1st column) and grain (2nd column) (columns names do not matter) #' #'@return A subset [Risoe.BINfileData-class] object #' #'@section Function version: 0.1.0 #' #'@author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #'@keywords manip datagen #' #'@seealso [Risoe.BINfileData-class], [read_BIN2R], [verify_SingleGrainData] #' #'@examples #' #'## load example data #'data(ExampleData.BINfileData, envir = environment()) #' #'## set POSITION/GRAIN pair dataset #'selection <- data.frame(POSITION = c(1,5,7), GRAIN = c(0,0,0)) #' #'##subset #'subset_SingleGrainData(object = CWOSL.SAR.Data, selection = selection) #' #'@md #'@export subset_SingleGrainData <- function ( object, selection ){ # Integrity tests --------------------------------------------------------- ## check object if (!inherits(object, "Risoe.BINfileData")) stop("[subset_SingleGrainData()] Only Risoe.BINfileData-class objects are allowed as input!", call. = FALSE) ## try to work with selection selection <- as.data.frame(selection)[,1:2] colnames(selection) <- c("POSITION", "GRAIN") # Subset ------------------------------------------------------------------ ## select ids for subsetting sel_id <-sort(merge(object@METADATA[,c("POSITION", "GRAIN", "ID")], selection)[["ID"]]) ## pick data object@METADATA <- object@METADATA[sel_id,] object@DATA <- object@DATA[sel_id] object@METADATA[["ID"]] <- 1:nrow(object@METADATA) # Return ------------------------------------------------------------------ return(object) } Luminescence/R/internals_RLum.R0000644000176200001440000004652714367174076016203 0ustar liggesusers# INTERNAL HELPER FUNCTIONS ----------------------------------------------- #+++++++++++++++++++++ #+ .set_pid() + #+++++++++++++++++++++ #' Set unique id of the RLum.Analysis object as parent id for each RLum.Data #' object in the record list #' #' This function only applies on RLum.Analysis objects and was written for performance not #' usability, means the functions runs without any checks and is for internal usage only. #' #' @param [RLum.Analysis-class] (**required**): #' input object where the function should be applied on #' #' @return #' Returns the same object as the input #' #' @section Function version: 0.1.0 #' #' @author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @examples #' #' ##example using self created data #' object <- set_RLum( #' "RLum.Analysis", #' records = list( #' set_RLum("RLum.Data.Curve"), #' set_RLum("RLum.Data.Curve"))) #' #' object <- .set_pid(object) #' #' @md #' @noRd .set_pid <- function(object){ object@records <- lapply(object@records, function(x) { x@.pid <- object@.uid return(x) }) return(object) } #+++++++++++++++++++++ #+ .warningCatcher() + #+++++++++++++++++++++ #' Catches warning returned by a function and merges them. #' The original return of the function is returned. This function is in particular #' helpful if a function returns a lot of warnings with the same content. #' #' @param expr [expression] (**required**): #' the R expression, usually a function #' #' @return #' Returns the same object as the input and a warning table #' #' @section Function version: 0.1.0 #' #' @author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @examples #' #' f <- function() { #' warning("warning 1") #' warning("warning 1") #' warning("warning 2") #' 1:10 #' } #' print(.warningCatcher(f())) #' #' @md #' @noRd .warningCatcher <- function(expr) { ##set variables warning_collector <- list() env <- environment() ##run function and catch warnings results <- withCallingHandlers( expr = expr, warning = function(c) { temp <- c(get("warning_collector", envir = env), c[[1]]) assign(x = "warning_collector", value = temp, envir = env) ##TODO should be replaced tryInvokeRestart once R 4.1 was released invokeRestart("muffleWarning") } ) ##set new warning messages with merged results if (length(warning_collector) > 0) { w_table <- table(as.character(unlist(warning_collector))) w_table_names <- names(w_table) warning(paste0( "(", 1:length(w_table), ") ", w_table_names, ": This warning occurred ", w_table, " times!" ,collapse = "\n"), call. = FALSE) } return(results) } #+++++++++++++++++++++ #+ .smoothing() + #+++++++++++++++++++++ #' Allows smmoothing of data based on the function zoo::rollmean #' #' The function just allows a direct and meaningfull access to the functionality of the zoo::rollmean() #' function. Arguments of the function are only partly valid. #' #' @param x [numeric] (**required**): #' the object for which the smoothing should be applied. #' #' @param k [integer] (*with default*): #' window for the rolling mean; must be odd for rollmedian. #' If nothing is set k is set automatically #' #' @param fill [numeric] (*with default*): #' a vector defining the left and the right hand data #' #' @param align [character] (*with default*): #' specifying whether the index of the result should be #' left- or right-aligned or centered (default) compared to the rolling window of observations, #' allowed `"right"`, `"center"` and `left` #' #' @param method [method] (*with default*): #' defines which method should be applied for the smoothing: `"mean"` or `"median"` #' #' @return #' Returns the same object as the input and a warning table #' #' @section Function version: 0.1.1 #' #' @author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @examples #' #' v <- 1:100 #' .smoothing(v) #' #' @md #' @noRd .smoothing <- function( x, k = NULL, fill = NA, align = "right", method = "mean") { ##set k if (is.null(k)){ k <- ceiling(length(x) / 100) if(method == "median" && k %%2 ==0) k <- k + 1 } ##smooth data if(method == "mean"){ zoo::rollmean(x, k = k, fill = fill, align = align) }else if(method == "median"){ zoo::rollmedian(x, k = k, fill = fill, align = align) }else{ stop("[Luminescence:::.smoothing()] Unvalid input for 'method'!") } } #++++++++++++++++++++++++++++++ #+ Scientific axis annotation + #++++++++++++++++++++++++++++++ #' Bored of the 1e10 notation of large numbers in R? Already tried to force #' R to produce more fancy labels? Worry not, fancy_scientific() (written by #' Jack Aidley) is at your help! #' #' Source: #' [http://stackoverflow.com/questions/11610377/how-do-i-change-the-formatting-of-numbers-on-an-axis-with-ggplot]() #' #' @param l [numeric] (**required**): #' a numeric vector, i.e. the labels that you want to add to your plot #' #' @return #' Returns an expression #' #' @section Function version: 0.1.0 #' #' @author Jack Aidley #' #' @examples #' plot(seq(1e10, 1e20, length.out = 10), #' 1:10, #' xaxt = "n") #' #' axis(1, at = axTicks(1), #' labels = fancy_scientific(axTicks(1))) #' #' @md #' @noRd fancy_scientific <- function(l) { # turn in to character string in scientific notation l <- format(l, scientific = TRUE) # quote the part before the exponent to keep all the digits l <- gsub("^(.*)e", "'\\1'e", l) # turn the 'e+' into plotmath format l <- gsub("e", "%*%10^", l) # remove plus sign l <- gsub("\\+", "", l) # return this as an expression parse(text=l) } #'Add fancy log axis with minor ticks the fancy axis labelling #' #'@param side [numeric] (**required**): the side where to plot the axis #' #'@param ... extra arguments to be passed to [graphics::axis], `side`, `at`and `labels` #'are pre-defined and cannot be modified #' #'@return #'Returns fancy log axis #' #'@author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #'@examples #' #'y <- c(0.1, 0.001, 0.0001) #'plot(1:length(y), y, yaxt = "n", log = "y") #'.add_fancy_log_axis(side = 2, las = 1) #' #'@md #'@noRd .add_fancy_log_axis <- function(side, ...){ ## do just nothing if it would cause an error if(!(par()$xlog && any(c(1,3) %in% side[1])) && !(par()$ylog && any(c(2,4) %in% side[1]))) return(NULL) ## get current axis ticks and get exponent ticks <- graphics::axTicks(side, log = TRUE) ticks <- unique(floor(log10(ticks))) minor_ticks <- vapply(ticks, function(x) { seq(10^(x-1),10^x, length.out = 10)[-10] }, numeric(9)) ## add minor ticks graphics::axis( side, at = as.numeric(minor_ticks), lwd.ticks = 0.5, tcl = -.35, labels = FALSE) ## add main axis ## remove settings we set args <- list(...) args$side <- NULL args$at <- NULL args$labels <- NULL ## call the axis do.call(what = graphics::axis, args = c( list(side = side, at = 10^ticks, labels = fancy_scientific(10^ticks)), args)) } #+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #+ Statistical Summary for Plot functions #+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #' Create Statistical Summary Character Vector for Plot functions #' #' This function automatically generates the statistical summary for the plot functions within #' the package. This should unify the approach how such things are created and support, theoretically #' all keywords for all plot functions in a similar way. #' #'@param x [data.frame] (optional): output from the function `calc_Statistics()`. If nothing is #'provided a list of prefix keyword combinations supported by the function `calc_Statistics()` is returned. #' #'@param keywords[character] (with default): keywords supported by the function `calc_Statistics()` #' #'@param digits [numeric] (with default): modifiy the digits independently for the plot output #' #'@param sep [character] (with default): a separator used for the creation of the output of the plot #' #'@param prefix [character] (with default): allows to add a leading prefix to the string #' #'@param suffix [character] (with default): allows to add a suffix to the entire string #' #'@author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #'@section Version: 0.1.0 #' #' #'@md #'@noRd .create_StatisticalSummaryText <- function( x = NULL, #insert the output of calc_Statistics keywords = NULL, digits = 2, #allow for different digits sep = " \n ", prefix = "", suffix = "" ){ ## Grep keyword information if (is.null(x)) { summary <- calc_Statistics(data.frame(x = 1:2, y = 1:2)) } else { summary <- x } #all allowed combinations keywords_allowed <- unlist(lapply(names(summary), function(x){ paste0(x, "$", names(summary[[x]])) })) ##return if for x == NULL if(is.null(x)) return(keywords_allowed) ## Create call #create list l <- lapply(keywords, function(k) { ##strip keyword if necessary if (grepl(pattern = "$", x = k, fixed = TRUE)[1]) { strip <- strsplit(k, split = "$", fixed = TRUE)[[1]] keywords_prefix <- strip[1] k_strip <- strip[2] } else{ keywords_prefix <- "unweighted" k_strip <- k } ##construct string if(!is.null(summary[[keywords_prefix]][[k_strip]])){ if(keywords_prefix == "unweighted"){ paste0(k_strip, " = ", round(summary[[keywords_prefix]][[k_strip]], digits)) }else{ paste0(k, " = ", round(summary[[keywords_prefix]][[k_strip]], digits)) } }else{ return(NULL) } }) ##remove NULL entries l <- l[!sapply(l, is.null)] ##construct final call return(paste0(prefix, paste(unlist(l), collapse = sep), suffix)) } #++++++++++++++++++++++++++++++ #+ Unlist RLum + #++++++++++++++++++++++++++++++ #' #' Recursive unlisting of lists until the first element in the list #' is something, but not a list. This funktion helps #' to get rid of nested lists. The function stops running if a single #' level list is reached. #' #' @param x [list] (**required**): list with lists #' #' @author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @examples #' a <- list(b = list(c = list("test"))) #' .unlist_RLum(a) #' #' @return [list] with only one level left #' @md #' @noRd .unlist_RLum <- function(x){ stopifnot(class(x) == "list") if(length(x) > 0 && inherits(x[[1]], "list")){ x <- unlist(x, recursive = FALSE) .unlist_RLum(x) }else{ return(x) } } #++++++++++++++++++++++++++++++ #+ .rm_nonRLum + #++++++++++++++++++++++++++++++ #' @title Removes all non-RLum objects from list #' #' @description Removes all non RLum objects from a list #' supposed to consist only of RLum-class objects #' As an internal function, the function is rather unforgiving, no further #' checks are applied. #' #' @param x [list] (**required**): list #' #' @param class [character]: class to look for, if nothing is set #' it checks for RLum in general #' #' @author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @examples #' x <- c(list(set_RLum("RLum.Analysis"), set_RLum("RLum.Analysis")), 2) #' .rm_nonRLum(x) #' #' @return [list] with only RLum objects #' #' @md #' @noRd .rm_nonRLum <- function(x, class = NULL){ if(is.null(class)) return(x[vapply(x, inherits, logical(1), "RLum")]) x[vapply(x, "class", character(1)) == class[1]] } #++++++++++++++++++++++++++++++ #+ .matrix_binning + #++++++++++++++++++++++++++++++ # #' This function allows efficient binning of matrices including #' row and column name handling. Internally, the function uses [rowsum], #' means the binning is always applied on the rows. For column binning the function #' internally transposes the matrix first #' #' @param m [matrix] (**required**): the matrix uses the base function [rowsum] #' #' @param bin_size [integer] (*with default*): bin size #' #' @param bin_col [logical] (*with default*): applies the binning on the columns instead of the #' rows. If you want to perform binning on rows and columns, you have to call this function twice. #' #' @param names [character] (*with default*): the handling of the row and column names. The default #' `NULL` removes the column and row names. Other allowed input is: `'groups'` this uses the group #' name, e.g., the last time value of a group, `'mean'` this calculates the mean value of a group, #' `'sum'` to sum-up groups and you can provide any other value which will then be recycled throughout. #' For example: `c('row1', 'row2')`. #' #' @author Sebastian Kreutzer, IRAMAT-CRP2A, UMR 5060, CNRS - Université Bordeaux Montaigne (France) #' #' @section Function version: 0.1.2 #' #' @note Row and column names are transformed to numeric and also summed up; this is not a bug #' but a feature! #' #' @return [matrix] #' #' @examples #' m <- matrix(data = c(rep(1:20,each = 20)), ncol = 10, nrow = 20) #' rownames(m) <- 1:nrow(m) #' colnames(m) <- 1:ncol(m) #' #' .matrix_binning(m, bin_size = 4) #' #' @md #' @noRd .matrix_binning <- function( m, bin_size = 1, bin_col = FALSE, names = NULL) { #@ The only check if(!inherits(m, "matrix")) stop("[.matrix_binning()] Input is not of class 'matrix'!", call. = FALSE) ## transpose in column mode if(bin_col) m <- t(m) ## binning calculation ##set groups ##with the correction in the 2nd line we ##get rid potential problems groups <- rep(1:nrow(m), each = bin_size)[1:nrow(m)] ##row binning (thats all) temp_m <- rowsum(m, group = groups) ## Correct names if(!is.null(names[1])){ if(names[1] == "groups"){ ##get rownames correct (it is the end of each bin) row_names <- rownames(m)[which(diff(groups) != 0)] ##correct last value if(length(row_names) < nrow(m)) row_names <- c(row_names,rownames(m)[nrow(m)]) }else if(names[1] == "mean"){ groups <- rep(1:nrow(m), each = bin_size)[1:nrow(m)] row_names <- as.numeric(rownames(m)) row_names <- tapply(X = row_names, INDEX = groups, FUN = mean) }else if(names[1] == "sum"){ row_names <- rowsum(as.numeric(rownames(m)), group = groups) }else{ row_names <- names } ##reset rownames and make sure it fits the length rownames(temp_m) <- rep(row_names, length.out = nrow(temp_m)) }else{ rownames(temp_m) <- NULL } ## re-transpose in column mode if(bin_col) temp_m <- t(temp_m) ## return return(temp_m) } #++++++++++++++++++++++++++++++ #+ .expand_parameters + #++++++++++++++++++++++++++++++ #' @title Expand function parameters of self-call #' #' @description For the self-call, the function parameters need to #' be expended, this was done, so far in a non-consistent way and #' repeated in every function using the self-call. This functions #' does it once and for all similar in all functions. #' #' **NOTE**: the first argument is never extended due to performance reasons, #' it might be a very large object #' #' @param len [numeric] (**required**): length of the parameter expansion #' #' @author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @return [list] with expanded parameters #' #' @md #' @noRd .expand_parameters <- function(len){ ##get original definition and the call of f f_def <- sys.function(sys.parent()) f_call <- sys.call(sys.parent()) ##extract arguments (do not consider the first argument, this might be a very ##large object) args_default <- as.list(f_def)[-length(as.list(f_def))][-1] args_new <- as.list(match.call(f_def, f_call, FALSE))[-c(1:2)] ##now we have to make sure that we evaluate all language objects ##before passing them further down if(length(args_new) > 0){ for(i in 1:length(args_new)){ if(class(args_new[[i]])[1] == "name" | class(args_new[[i]])[1] == "call") args_new[[i]] <- eval(args_new[[i]]) } } ##combine the two argument lists args <- modifyList( x = args_default, val = args_new, keep.null = TRUE) ##evaluate arguments and take care of missing values for(i in 1:length(args)){ if(is.na(names(args[i])) || names(args[i]) == "...") next if(class(args[[i]])[1] == "name" & names(args[i]) != "...") { stop(paste0("[",f_call[[1]],"()]: Argument <", names(args[i]), "> missing; with no default!"), call. = FALSE) } ##evaluate and cover special cases if(!is.null(args[[i]])) args[[i]] <- eval(args[[i]]) if(inherits(args[i], "list") & length(args[[i]]) == 0) args[[i]] <- list() } ##expand all arguments ##we have two conditions and three cases ##1: the argument is a list AND the list itself is not named ## ... the case when the user what to use different values for the objects ##2: the argument is no list ... ## ... the standard automated expansion ## ... OR it is a list with names (e.g., rejection.criteria = list(recycling.ration = 10)) for(i in 1:length(args)){ if(inherits(args[[i]], "list") & is.null(names(args[[i]]))){ args[[i]] <- rep(args[[i]], length = len[1]) } else { args[[i]] <- rep(list(args[[i]]), length = len[1]) } } return(args) } #++++++++++++++++++++++++++++++ #+ .calc_HPDI + #++++++++++++++++++++++++++++++ #' @title Calculates Highest Probability Density Interval #' #' @description The idea of this function is to provide a convenient #' method to calculate the highest probability density intervals for #' sets of data. This function might be exported later #' Currently it follows roughly the idea of what is implemented #' in `code` and `hdrcde`. If the results has more than one peak, #' also this is shown, therefore the output is a matrix #' #' @param object [numeric] (**required**): numeric object with input data #' #' @param prob [numeric] (*with default*): sets aimed probability interval #' #' @param plot [logical] (*with default*): enables/disables additional control #' plot #' #' @param ... further arguments passed to [stats::density] #' #' @author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @references #' Hyndman, R.J., 1996. Computing and Graphing Highest Density Regions. #' The American Statistician 50, 120–8. doi:10.2307/2684423 #' #' @examples #' x <- rnorm(100) #' .calc_HPDI(x) #' #' @return [matrix] with HPDI #' #' @md #' @noRd .calc_HPDI <- function(object, prob = 0.95, plot = FALSE, ...){ ##estimate density dens <- density(object, ...) diff <- diff(dens$x[1:2]) ##calculate probabilities m <- cbind(matrix(c(dens$x, dens$y), ncol = 2), dens$y * diff) o <- order(m[, 3], decreasing = TRUE) m_ind <- which(cumsum(m[o, 3]) <= prob) thres <- suppressWarnings(min(m[o, 2][m_ind])) ##get peaks peaks_id <- which(abs(diff((m[,2] - thres) > 0)) == 1) ##calculate HPDI HPDI <- matrix(NA, ncol = 2, nrow = 1) if(length(peaks_id != 0)) HPDI <- matrix(m[peaks_id,1], ncol = 2) colnames(HPDI) <- c("lower", "upper") attr(HPDI, "Probabilty") <- prob if(plot){ xy <- m[m_ind,c(1,2)] plot(dens, main = "HPDI (control plot)") abline(h = thres, lty = 2) if(length(peaks_id != 0)) { for(i in seq(1,length(peaks_id),2)) { lines(x = m[peaks_id[i]:peaks_id[i + 1], 1], y = m[peaks_id[i]:peaks_id[i + 1], 2], col = "red") } } } return(HPDI) } Luminescence/R/plot_RLum.Analysis.R0000644000176200001440000006140114521207343016712 0ustar liggesusers#' Plot function for an RLum.Analysis S4 class object #' #' The function provides a standardised plot output for curve data of an #' RLum.Analysis S4 class object #' #' The function produces a multiple plot output. A file output is recommended #' (e.g., [pdf]). #' #' **curve.transformation** #' #' This argument allows transforming continuous wave (CW) curves to pseudo #' (linear) modulated curves. For the transformation, the functions of the #' package are used. Currently, it is not possible to pass further arguments to #' the transformation functions. The argument works only for `ltype` #' `OSL` and `IRSL`. #' #' Please note: The curve transformation within this functions works roughly, #' i.e. every IRSL or OSL curve is transformed, without considering whether it #' is measured with the PMT or not! However, for a fast look it might be #' helpful. #' #' @param object [RLum.Analysis-class] (**required**): #' S4 object of class `RLum.Analysis` #' #' @param subset named [list] (*optional*): #' subsets elements for plotting. The arguments in the named [list] will be #' directly passed to the function [get_RLum] #' (e.g., `subset = list(curveType = "measured")`) #' #' @param nrows [integer] (*optional*): #' sets number of rows for plot output, if nothing is set the function #' tries to find a value. #' #' @param ncols [integer] (*optional*): #' sets number of columns for plot output, if nothing is set the function #' tries to find a value. #' #' @param abline [list] (*optional*): #' allows to add ab-lines to the plot. Argument are provided #' in a list and will be forward to the function [abline], #' e.g., `list(v = c(10, 100))` adds two vertical lines add 10 and 100 to all #' plots. In contrast `list(v = c(10), v = c(100)` adds a vertical at 10 to #' the first and a vertical line at 100 to the 2nd plot. #' #' @param combine [logical] (*with default*): #' allows to combine all [RLum.Data.Curve-class] objects in one single plot. #' #' @param records_max [numeric] (*optional*): limits number of records #' shown if `combine = TRUE`. Shown are always the first and the last curve, #' the other number of curves to be shown a distributed evenly, this may result #' in less number of curves plotted as specified. #' #' @param curve.transformation [character] (*optional*): #' allows transforming CW-OSL and CW-IRSL curves to pseudo-LM curves via #' transformation functions. Allowed values are: `CW2pLM`, `CW2pLMi`, #' `CW2pHMi` and `CW2pPMi`. See details. #' #' @param plot.single [logical] (*with default*): #' global par settings are considered, normally this should end in one plot per page #' #' @param ... further arguments and graphical parameters will be passed to #' the `plot` function. #' #' Supported arguments: `main`, `mtext`, `log`, `lwd`, `lty` `type`, `pch`, `col`, #' `norm` (see [plot_RLum.Data.Curve]), `xlim`,`ylim`, `xlab`, `ylab`, ... #' #' and for `combine = TRUE` also: `sub_title`, `legend`, `legend.text`, `legend.pos` #' (typical plus 'outside'), `legend.col`, `smooth`. #' #' All arguments can be provided as `vector` or `list` to gain in full control #' of all plot settings. #' #' @return Returns multiple plots. #' #' @note #' Not all arguments available for [plot] will be passed and they partly do not behave in the #' way you might expect them to work. This function was designed to serve as an overview #' plot, if you want to have more control, extract the objects and plot them individually. #' #' @section Function version: 0.3.14 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [plot], [plot_RLum], [plot_RLum.Data.Curve] #' #' @keywords aplot #' #' @examples #' #'##load data #'data(ExampleData.BINfileData, envir = environment()) #' #'##convert values for position 1 #'temp <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos=1) #' #'##(1) plot (combine) TL curves in one plot #'plot_RLum.Analysis( #' temp, #' subset = list(recordType = "TL"), #' combine = TRUE, #' norm = TRUE, #' abline = list(v = c(110)) #' ) #' #'##(2) same as example (1) but using #'## the argument smooth = TRUE #'plot_RLum.Analysis( #' temp, #' subset = list(recordType = "TL"), #' combine = TRUE, #' norm = TRUE, #' smooth = TRUE, #' abline = list(v = c(110)) #' ) #' #' @md #' @export plot_RLum.Analysis <- function( object, subset = NULL, nrows, ncols, abline = NULL, combine = FALSE, records_max = NULL, curve.transformation, plot.single = FALSE, ... ){ # Integrity check ---------------------------------------------------------------------------- ##check if object is of class RLum.Analysis (lists are handled via plot_RLum()) if (!is(object, "RLum.Analysis")) stop("[plot_RLum.Analysis()] Input object is not of type 'RLum.Analysis'", call. = FALSE) # Make selection if wanted ------------------------------------------------------------------- if(!is.null(subset)){ ##check whether the user set the drop option and remove it, as we cannot work with it subset <- subset[!sapply(names(subset), function(x){"drop" %in% x})] object <- do.call(get_RLum, c(object = object, subset, drop = FALSE)) } # Deal with additional arguments. ------------------------------------------------------------ ##create plot settings list plot.settings <- list( main = NULL, mtext = NULL, log = "", lwd = 1, lty = 1, type = "l", xlab = NULL, ylab = NULL, xlim = NULL, ylim = NULL, pch = 1, col = "auto", norm = FALSE, sub_title = NULL, cex = 1, legend = TRUE, legend.text = NULL, legend.pos = NULL, legend.col = NULL, smooth = FALSE ) plot.settings <- modifyList(x = plot.settings, val = list(...), keep.null = TRUE) ##try to find optimal parameters, this is however, a little bit stupid, but ##better than without any presetting if(combine){ n.plots <- length(unique(as.character(structure_RLum(object)$recordType))) }else{ n.plots <- length_RLum(object) } if (missing(ncols) | missing(nrows)) { if (missing(ncols) & !missing(nrows)) { if (n.plots == 1) { ncols <- 1 } else{ ncols <- 2 } } else if (!missing(ncols) & missing(nrows)) { if (n.plots == 1) { nrows <- 1 } else if (n.plots > 1 & n.plots <= 4) { nrows <- 2 } else{ nrows <- 3 } } else{ if (n.plots == 1) { nrows <- 1 ncols <- 1 } else if (n.plots > 1 & n.plots <= 2) { nrows <- 1 ncols <- 2 } else if (n.plots > 2 & n.plots <= 4) { nrows <- 2 ncols <- 2 } else{ nrows <- 3 ncols <- 2 } } } # Plotting ------------------------------------------------------------------ ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ##(1) NORMAL (combine == FALSE) ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ if(!combine || length(object@records) == 1){ ##show warning message if(combine & length(object@records) == 1){ warning("Nothing to combine, object contains a single curve.") } ##grep RLum.Data.Curve or RLum.Data.Spectrum objects temp <- lapply(1:length(object@records), function(x){ if(is(object@records[[x]], "RLum.Data.Curve") || is(object@records[[x]], "RLum.Data.Spectrum")){ object@records[[x]] }}) ##calculate number of pages for mtext if (length(temp) %% (nrows * ncols) > 0) { n.pages <- round(length(temp) / (nrows * ncols), digits = 0) + 1 } else{ n.pages <- length(temp) / (nrows * ncols) } ##set par par.default <- par("mfrow") if(!plot.single){on.exit(par(mfrow = par.default))} if(!plot.single) { par(mfrow = c(nrows, ncols)) } ##expand plot settings list plot.settings <- lapply(setNames(1:length(plot.settings), names(plot.settings)), function(x) { if (!is.null(plot.settings[[x]])) { if(length(plot.settings[[x]]) > 1){ if(is(plot.settings[[x]], "list")){ rep_len(plot.settings[[x]], length.out = length(temp)) }else{ rep_len(list(plot.settings[[x]]), length.out = length(temp)) } }else{ rep_len(plot.settings[[x]], length.out = length(temp)) } } else{ plot.settings[[x]] } }) ##expand abline if(!is.null(abline)){ abline.names <- rep_len(names(abline), length.out = length(temp)) abline <- rep_len(abline, length.out = length(temp)) names(abline) <- abline.names } ##apply curve transformation for(i in 1:length(temp)){ if(is(temp[[i]], "RLum.Data.Curve") == TRUE){ ##set curve transformation if wanted if((grepl("IRSL", temp[[i]]@recordType) | grepl("OSL", temp[[i]]@recordType)) & !missing(curve.transformation)){ if(curve.transformation=="CW2pLM"){ temp[[i]] <- CW2pLM(temp[[i]]) }else if(curve.transformation=="CW2pLMi"){ temp[[i]] <- CW2pLMi(temp[[i]]) }else if(curve.transformation=="CW2pHMi"){ temp[[i]]<- CW2pHMi(temp[[i]]) }else if(curve.transformation=="CW2pPMi"){ temp[[i]] <- CW2pPMi(temp[[i]]) }else{ warning("Function for 'curve.transformation' is unknown. No transformation is performed.") } } ##check plot settings and adjust ##xlim if (!is.null(plot.settings$xlim)) { xlim.set <- plot.settings$xlim[[i]] if (plot.settings$xlim[[i]][1] < min(temp[[i]]@data[,1])) { warning(paste0("[plot_RLum.Analysis()] min('xlim') < x-value range for curve #",i,"; reset to minimum."), call. = FALSE) xlim.set[1] <- min(temp[[i]]@data[,1]) } if (plot.settings$xlim[[i]][2] > max(temp[[i]]@data[,1])) { warning(paste0("[plot_RLum.Analysis()] max('xlim') > x-value range for curve #",i,"; reset to maximum."), call. = FALSE) xlim.set[2] <- max(temp[[i]]@data[,1]) } }else{ xlim.set <- plot.settings$xlim[[i]] } ##ylim if (!is.null(plot.settings$ylim)) { ylim.set <- plot.settings$ylim[[i]] if (plot.settings$ylim[[i]][1] < min(temp[[i]]@data[,2])) { warning(paste0("[plot_RLum.Analysis()] min('ylim') < y-value range for curve #",i,"; reset to minimum."), call. = FALSE) ylim.set[1] <- min(temp[[i]]@data[,2]) } if (plot.settings$ylim[[i]][2] > max(temp[[i]]@data[,2])) { warning(paste0("[plot_RLum.Analysis()] max('ylim') > y-value range for curve #",i,"; reset to maximum."), call. = FALSE) ylim.set[2] <- max(temp[[i]]@data[,2]) } }else{ ylim.set <- plot.settings$ylim[[i]] } ##col if (unique(plot.settings$col) != "auto") { col <- plot.settings$col[i] } else{ if (grepl("IRSL", temp[[i]]@recordType)) { col <- "red" } else if (grepl("OSL", temp[[i]]@recordType)) { col <- "blue" } else { col <- "black" } } ##main main <- if (is.null(plot.settings$main[[i]])) { temp[[i]]@recordType } else{ plot.settings$main[[i]] } ##++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ##PLOT ##++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ##plot RLum.Data.Curve curve ##we have to do this via this way, otherwise we run into a duplicated arguments ##problem ##check and remove duplicated arguments arguments <- c( list( object = temp[[i]], col = col, mtext = if (!is.null(plot.settings$mtext[[i]])) { plot.settings$mtext[[i]] } else{ paste("#", i, sep = "") }, par.local = FALSE, main = main, log = plot.settings$log[[i]], lwd = plot.settings$lwd[[i]], type = plot.settings$type[[i]], lty = plot.settings$lty[[i]], xlim = xlim.set, ylim = ylim.set, norm = plot.settings$norm, pch = plot.settings$pch[[i]], cex = plot.settings$cex[[i]], smooth = plot.settings$smooth[[i]] ), list(...) ) arguments[duplicated(names(arguments))] <- NULL ##call the function plot_RLum.Data.Curve do.call(what = "plot_RLum.Data.Curve", args = arguments) rm(arguments) ##add abline if(!is.null(abline[[i]])){ do.call(what = "abline", args = abline[i]) } } else if(is(temp[[i]], "RLum.Data.Spectrum")) { plot_RLum.Data.Spectrum(temp[[i]], mtext = if(!is.null(plot.settings$mtext[[i]])){ plot.settings$mtext[[i]] }else{ paste("#", i, sep = "") }, par.local = FALSE, main = if(!is.null(plot.settings$main)){ plot.settings$main }else{ temp[[i]]@recordType }) } }#end for loop }else{ ##++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ##(2) NORMAL (combine == TRUE) ##++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ##(1) check RLum objects in the set object.list <- object@records sapply(object.list, function(o){ if(!inherits(o, "RLum.Data.Curve")){ stop("[plot_RLum.Analysis()] Using 'combine' is limited to 'RLum.Data.Curve' objects.", call. = FALSE) } }) ##account for different curve types, combine similar temp.object.structure <- structure_RLum(object) temp.recordType <- as.character(unique(temp.object.structure$recordType)) ##change graphic settings if(!plot.single){ par.default <- par()[c("cex", "mfrow")] if(!missing(ncols) & !missing(nrows)){ par(mfrow = c(nrows, ncols)) } ##this 2nd par request is needed as setting mfrow resets the par settings ... this might ##not be wanted par(cex = plot.settings$cex[1]) }else{ par.default <- par()[c("cex")] par(cex = plot.settings$cex) } ##expand plot settings list ##expand list plot.settings <- lapply(setNames(1:length(plot.settings), names(plot.settings)), function(x) { if (!is.null(plot.settings[[x]])) { if(is.list(plot.settings[[x]])){ rep_len(plot.settings[[x]], length.out = length(temp.recordType)) }else{ rep_len(list(plot.settings[[x]]), length.out = length(temp.recordType)) } } else{ plot.settings[[x]] } }) ##expand abline if(!is.null(abline)){ abline.names <- rep_len(names(abline), length.out = length(temp.recordType)) abline <- rep_len(abline, length.out = length(temp.recordType)) names(abline) <- abline.names } ##(2) PLOT values for(k in 1:length(temp.recordType)) { ###get type of curves temp.object <- get_RLum(object, recordType = temp.recordType[k], drop = FALSE) ##get structure object.structure <- structure_RLum(temp.object) ##now get the real list object (note the argument recursive = FALSE) object.list <- get_RLum(object, recordType = temp.recordType[k], recursive = FALSE) ## limit number of records shown ... show always first and last; ## distribute the rest if(!is.null(records_max) && records_max[1] > 2){ records_show <- ceiling(seq(1,length(object.list), length.out = records_max)) object.list[(1:length(object.list))[-records_show]] <- NULL } ##prevent problems for non set argument if (missing(curve.transformation)) { curve.transformation <- "None" } ##transform values to data.frame and norm values temp.data.list <- lapply(1:length(object.list), function(x) { ##set curve transformation if wanted if (grepl("IRSL", object.list[[x]]@recordType) | grepl("OSL", object.list[[x]]@recordType)) { if (curve.transformation == "CW2pLM") { object.list[[x]] <- CW2pLM(object.list[[x]]) }else if (curve.transformation == "CW2pLMi") { object.list[[x]] <- CW2pLMi(object.list[[x]]) }else if (curve.transformation == "CW2pHMi") { object.list[[x]] <- CW2pHMi(object.list[[x]]) }else if (curve.transformation == "CW2pPMi") { object.list[[x]] <- CW2pPMi(object.list[[x]]) } } temp.data <- as(object.list[[x]], "data.frame") ##normalise curves if argument has been set if(plot.settings$norm[[k]][1] %in% c('max', 'last', 'huot') || plot.settings$norm[[k]][1] == TRUE){ if (plot.settings$norm[[k]] == "max" || plot.settings$norm[[k]] == TRUE) { temp.data[[2]] <- temp.data[[2]] / max(temp.data[[2]]) } else if (plot.settings$norm[[k]] == "last") { temp.data[[2]] <- temp.data[[2]] / temp.data[[2]][length(temp.data[[2]])] } else if (plot.settings$norm[[k]] == "huot") { bg <- median(temp.data[[2]][floor(nrow(temp.data)*0.8):nrow(temp.data)]) temp.data[[2]] <- (temp.data[[2]] - bg) / max(temp.data[[2]] - bg) } ##check for Inf and NA if(any(is.infinite(temp.data[[2]])) || anyNA(temp.data[[2]])){ temp.data[[2]][is.infinite(temp.data[[2]]) | is.na(temp.data[[2]])] <- 0 warning("[plot_RLum.Data.Analysis()] Normalisation led to Inf or NaN values. Values replaced by 0.", call. = FALSE) } } return(temp.data) }) ##set plot parameters ##main main <- if (!is.null(plot.settings$main[[k]])) { plot.settings$main[[k]] } else{ paste0(temp.recordType[[k]], " combined") } ##xlab xlab <- if(!is.null(plot.settings$xlab[[k]])){ plot.settings$xlab[[k]] }else{ switch(temp.recordType[[k]], "TL" = "Temperature [\u00B0C]", "IRSL" = "Time [s]", "OSL" = "Time [s]", "RF" = "Time [s]", "RBR" = "Time [s]", "LM-OSL" = "Time [s]" ) } ##ylab ylab <- if(!is.null(plot.settings$ylab[[k]])){ plot.settings$ylab[[k]] }else{ paste0(temp.recordType[[k]], " [a.u.]") } ##xlim xlim <- if (!is.null(plot.settings$xlim[[k]]) & length(plot.settings$xlim[[k]]) >1) { plot.settings$xlim[[k]] } else { c(min(object.structure$x.min), max(object.structure$x.max)) } if (grepl("x", plot.settings$log[[k]], ignore.case = TRUE)) xlim[which(xlim == 0)] <- 1 ##ylim ylim <- if (!is.null(plot.settings$ylim[[k]]) & length(plot.settings$ylim[[k]]) > 1) { plot.settings$ylim[[k]] } else { range(unlist(lapply(X = temp.data.list, FUN = function(x){ range(x[,2]) }))) } if (grepl("y", plot.settings$log[[k]], ignore.case = TRUE)) ylim[which(ylim == 0)] <- 1 ##col (again) col <- if(length(plot.settings$col[[k]]) > 1 || plot.settings$col[[k]][1] != "auto"){ plot.settings$col[[k]] }else{ col <- get("col", pos = .LuminescenceEnv) } ##if length of provided colours is < the number of objects, just one colour is supported if (length(col) < length(object.list)) { col <- rep_len(col, length(object.list)) } ##lty if (length(plot.settings$lty[[k]]) < length(object.list)) { lty <- rep(plot.settings$lty[[k]], times = length(object.list)) }else{ lty <- plot.settings$lty[[k]] } ##pch if (length(plot.settings$pch[[k]]) < length(object.list)) { pch <- rep(plot.settings$pch[[k]], times = length(object.list)) }else{ pch <- plot.settings$pch[[k]] } ##legend.text legend.text <- if(!is.null(plot.settings$legend.text[[k]])){ plot.settings$legend.text[[k]] }else{ if(!is.null(records_max) && records_max[1] > 2) { paste("Curve", records_show) } else { paste("Curve", 1:length(object.list)) } } ##legend.col legend.col <- if(!is.null(plot.settings$legend.col[[k]])){ plot.settings$legend.col[[k]] }else{ NULL } ##legend.pos legend.pos <- if(!is.null(plot.settings$legend.pos[[k]])){ plot.settings$legend.pos[[k]] }else{ "topright" } if (legend.pos == "outside") { par.default.outside <- par()[c("mar", "xpd")] par(mar = c(5.1, 4.1, 4.1, 8.1)) } ##open plot area plot( NA,NA, xlim = xlim, ylim = ylim, main = main, xlab = xlab, ylab = ylab, log = plot.settings$log[[k]], sub = plot.settings$sub_title[[k]] ) ##plot single curve values ## ...?Why using matplot is a bad idea: The channel resolution might be different for (n in 1:length(temp.data.list)) { ##smooth ##Why here again ... because the call differs from the one before, where the argument ##is passed to plot_RLum.Data.Curve() if(plot.settings$smooth[[k]]){ k_factor <- ceiling(length(temp.data.list[[n]][, 2])/100) temp.data.list[[n]][, 2] <- zoo::rollmean(temp.data.list[[n]][, 2], k = k_factor, fill = NA) } ##remove 0 values if plotted on a log-scale # y-Axis if (grepl("y", plot.settings$log[[k]], ignore.case = TRUE)) temp.data.list[[n]] <- temp.data.list[[n]][which(temp.data.list[[n]]$y > 0), ] # x-Axis if (grepl("x", plot.settings$log[[k]], ignore.case = TRUE)) temp.data.list[[n]] <- temp.data.list[[n]][which(temp.data.list[[n]]$x > 0), ] ##print lines if (plot.settings$type[[k]] == "l" | plot.settings$type[[k]] == "b" ) { lines( temp.data.list[[n]], col = col[n], lty = lty[n], lwd = plot.settings$lwd[[k]] ) } ##add points if requested if (plot.settings$type[[k]] == "p" | plot.settings$type[[k]] == "b" ) { points( temp.data.list[[n]], col = col[n], pch = pch[n], ) } } ##add abline if(!is.null(abline[[k]])){ do.call(what = "abline", args = abline[k]) } ##mtext mtext(plot.settings$mtext[[k]], side = 3, cex = .8 * plot.settings$cex[[k]]) ##if legend is outside of the plotting area we need to allow overplotting ##AFTER all lines have been drawn if (legend.pos == "outside") { par(xpd = TRUE) # determine legend position on log(y) scale if (grepl("y", plot.settings$log[[k]], ignore.case = TRUE)) ypos <- 10^par()$usr[4] else ypos <- par()$usr[4] # determine position on log(x) scale if (grepl("x", plot.settings$log[[k]], ignore.case = TRUE)) xpos <- 10^par()$usr[2] else xpos <- par()$usr[2] } ##legend if (plot.settings$legend[[k]]) { legend( x = ifelse(legend.pos == "outside", xpos, legend.pos), y = ifelse(legend.pos == "outside", ypos, NULL), legend = legend.text, lwd = plot.settings$lwd[[k]], lty = plot.settings$lty[[k]], col = if (is.null(legend.col)) { col[1:length(object.list)] } else{ legend.col }, bty = "n", cex = 0.8 * plot.settings$cex[[k]] ) # revert the overplotting if (legend.pos == "outside") par(xpd = FALSE) } } ##reset graphic settings if (exists("par.default.outside")) { par(par.default.outside) rm(par.default.outside) } par(par.default) rm(par.default) } } Luminescence/R/calc_FuchsLang2001.R0000644000176200001440000002070014264017373016355 0ustar liggesusers#' Apply the model after Fuchs & Lang (2001) to a given De distribution. #' #' This function applies the method according to Fuchs & Lang (2001) for #' heterogeneously bleached samples with a given coefficient of variation #' threshold. #' #' **Used values** #' #' If the coefficient of variation (`c[v]`) of the first #' two values is larger than the threshold `c[v_threshold]`, the first value is #' skipped. Use the `startDeValue` argument to define a start value for #' calculation (e.g. 2nd or 3rd value). #' #' **Basic steps of the approach** #' #' 1. Estimate natural relative variation of the sample using a dose recovery test #' 2. Sort the input values ascendantly #' 3. Calculate a running mean, starting with the lowermost two values and add values iteratively. #' 4. Stop if the calculated `c[v]` exceeds the specified `cvThreshold` #' #' @param data [RLum.Results-class] or [data.frame] (**required**): #' for [data.frame]: two columns with De `(data[,1])` and De error `(values[,2])` #' #' @param cvThreshold [numeric] (*with default*): #' coefficient of variation in percent, as threshold for the method, #' e.g. `cvThreshold = 3`. See details #' . #' @param startDeValue [numeric] (*with default*): #' number of the first aliquot that is used for the calculations #' #' @param plot [logical] (*with default*): #' plot output `TRUE`/`FALSE` #' #' @param ... further arguments and graphical parameters passed to [plot] #' #' @return #' Returns a plot (*optional*) and terminal output. In addition an #' [RLum.Results-class] object is returned containing the #' following elements: #' #' \item{summary}{[data.frame] summary of all relevant model results.} #' \item{data}{[data.frame] original input data} #' \item{args}{[list] used arguments} #' \item{call}{[call] the function call} #' \item{usedDeValues}{[data.frame] containing the used values for the calculation} #' #' @note Please consider the requirements and the constraints of this method #' (see Fuchs & Lang, 2001) #' #' @section Function version: 0.4.1 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) \cr #' Christoph Burow, University of Cologne (Germany) #' #' @seealso [plot], [calc_MinDose], [calc_FiniteMixture], [calc_CentralDose], #' [calc_CommonDose], [RLum.Results-class] #' #' @references #' Fuchs, M. & Lang, A., 2001. OSL dating of coarse-grain fluvial #' quartz using single-aliquot protocols on sediments from NE Peloponnese, #' Greece. In: Quaternary Science Reviews 20, 783-787. #' #' Fuchs, M. & Wagner, G.A., 2003. Recognition of insufficient bleaching by #' small aliquots of quartz for reconstructing soil erosion in Greece. #' Quaternary Science Reviews 22, 1161-1167. #' #' @keywords dplot #' #' #' @examples #' ## load example data #' data(ExampleData.DeValues, envir = environment()) #' #' ## calculate De according to Fuchs & Lang (2001) #' temp<- calc_FuchsLang2001(ExampleData.DeValues$BT998, cvThreshold = 5) #' #' @md #' @export calc_FuchsLang2001 <- function( data, cvThreshold = 5, startDeValue = 1, plot = TRUE, ... ){ # Integrity Tests --------------------------------------------------------- if(!missing(data)){ if(!is(data, "data.frame") & !is(data,"RLum.Results")){ stop("[calc_FuchsLang2001()] 'data' has to be of type 'data.frame' or 'RLum.Results'!", call. = FALSE) } else { if(is(data, "RLum.Results")){ data <- get_RLum(data, "data") } } } # Deal with extra arguments ----------------------------------------------- ##deal with addition arguments extraArgs <- list(...) verbose <- if("verbose" %in% names(extraArgs)) {extraArgs$verbose} else {TRUE} ##============================================================================## ##PREPARE DATA ##============================================================================## ##1. order values in ascending order write used D[e] values in data.frame o <- order(data[[1]]) # o is only an order parameter data_ordered <- data[o,] # sort values after o and write them into a new variable ##2. estimate D[e] # set variables usedDeValues <- data.frame(De = NA, De_Error = NA, cv = NA) endDeValue <- startDeValue[1] # if the first D[e] values are not used write this information in the data.frame if (startDeValue[1] != 1) { n <- abs(1 - startDeValue[1]) # write used D[e] values in data.frame usedDeValues[1:n, 1] <- data_ordered[1:n, 1] usedDeValues[1:n, 2] <- data_ordered[1:n, 2] usedDeValues[1:n, 3] <- "skipped" } ##=================================================================================================## ##LOOP FOR MODEL ##=================================================================================================## # repeat loop (run at least one time) repeat { #calculate mean, sd and cv mean<-round(mean(data_ordered[startDeValue:endDeValue,1]),digits=2) #calculate mean from ordered D[e] values sd<-round(sd(data_ordered[startDeValue:endDeValue,1]),digits=2) #calculate sd from ordered D[e] values cv <- round(sd / mean * 100, digits = 2) #calculate coefficient of variation # break if cv > cvThreshold if (cv > cvThreshold[1] & endDeValue > startDeValue) { # if the first two D[e] values give a cv > cvThreshold, than skip the first D[e] value if (endDeValue-startDeValue<2) { # write used D[e] values in data.frame usedDeValues[endDeValue, 1] <- data_ordered[endDeValue, 1] usedDeValues[endDeValue, 2] <- data_ordered[endDeValue, 2] usedDeValues[endDeValue - 1, 3] <- "not used" # go to the next D[e] value startDeValue <- startDeValue + 1 } else { usedDeValues[endDeValue, 1] <- data_ordered[endDeValue, 1] usedDeValues[endDeValue, 2] <- data_ordered[endDeValue, 2] usedDeValues[endDeValue, 3] <- paste("# ", cv, " %", sep = "") break #break loop } }#EndIf else { # write used D[e] values in data.frame usedDeValues[endDeValue,1]<-data_ordered[endDeValue,1] usedDeValues[endDeValue,2]<-data_ordered[endDeValue,2] # first cv values alway contains NA to ensure that NA% is not printed test if(is.na(cv)==TRUE) { usedDeValues[endDeValue,3]<-cv } else { usedDeValues[endDeValue,3]<-paste(cv," %",sep="") } }#EndElse # go the next D[e] value until the maximum number is reached if (endDeValue> data.frame AND data.frame >> RLum.Data.Spectrum #' as() #' #' for `[RLum.Data.Spectrum-class]` #' #' #' **[RLum.Data.Spectrum-class]** #' #' \tabular{ll}{ #' **from** \tab **to**\cr #' `data.frame` \tab `data.frame`\cr #' `matrix` \tab `matrix` #' } #' #' #' @md #' @name as setAs("data.frame", "RLum.Data.Spectrum", function(from,to){ new(to, recordType = NA_character_, curveType = NA_character_, data = as.matrix(from), info = list()) }) setAs("RLum.Data.Spectrum", "data.frame", function(from){ as.data.frame(from@data) }) ##MATRIX ##COERCE RLum.Data.Spectrum >> matrix AND matrix >> RLum.Data.Spectrum setAs("matrix", "RLum.Data.Spectrum", function(from,to){ new(to, recordType = NA_character_, curveType = NA_character_, data = from, info = list()) }) setAs("RLum.Data.Spectrum", "matrix", function(from){ from@data }) # show() ------------------------------------------------------------------------------------- #' @describeIn RLum.Data.Spectrum #' Show structure of `RLum.Data.Spectrum` object #' #' @keywords internal #' #' @md #' @export setMethod("show", signature(object = "RLum.Data.Spectrum"), function(object){ x.range <- suppressWarnings(range(as.numeric(rownames(object@data)))) y.range <- suppressWarnings(range(as.numeric(colnames(object@data)))) z.range <- range(object@data) ##print information cat("\n [RLum.Data.Spectrum-class]") cat("\n\t recordType:", object@recordType) cat("\n\t curveType:", object@curveType) cat("\n\t .. recorded frames:", length(object@data[1,])) cat("\n\t .. .. measured values per frame:", length(object@data[,1])) cat("\n\t .. .. range wavelength/pixel:", x.range) cat("\n\t .. .. range time/temp.:", y.range) cat("\n\t .. .. range count values:", z.range) cat("\n\t additional info elements:", length(object@info)) #cat("\n\t\t >> names:", names(object@info)) } ) # set_RLum() ---------------------------------------------------------------------------------- #' @describeIn RLum.Data.Spectrum #' Construction method for RLum.Data.Spectrum object. The slot info is optional #' and predefined as empty list by default #' #' @param class [`set_RLum`]; [character] (*automatic*): #' name of the `RLum` class to create. #' #' @param originator [character] (*automatic*): #' contains the name of the calling function (the function that produces this object); #' can be set manually. #' #' @param .uid [`set_RLum`]; [character] (*automatic*): #' sets an unique ID for this object using the internal C++ function `create_UID`. #' #' @param .pid [`set_RLum`]; [character] (*with default*): #' option to provide a parent id for nesting at will. #' #' @param recordType [`set_RLum`]; [character]: #' record type (e.g. "OSL") #' #' @param curveType [`set_RLum`]; [character]: #' curve type (e.g. "predefined" or "measured") #' #' @param data [`set_RLum`]; [matrix]: #' raw curve data. If data is of type `RLum.Data.Spectrum`, this can be used #' to re-construct the object. If the object is reconstructed, `.uid`, `.pid` and `orginator` #' are always taken from the input object #' #' @param info [`set_RLum`] [list]: #' info elements #' #' @return #' #' **`[set_RLum]`** #' #' An object from the class `RLum.Data.Spectrum` #' #' @md #' @export setMethod( "set_RLum", signature = signature("RLum.Data.Spectrum"), definition = function( class, originator, .uid, .pid, recordType = "Spectrum", curveType = NA_character_, data = matrix(), info = list()) { ##The case where an RLum.Data.Spectrum object can be provided ##with this RLum.Data.Spectrum objects can be provided to be reconstructed if (is(data, "RLum.Data.Spectrum")) { ##check for missing curveType if (missing(curveType)) curveType <- data@curveType ##check for missing recordType if (missing(recordType)) recordType <- data@recordType ##check for missing data ... not possible as data is the object itself ##check for missing info if (missing(info)) info <- data@info ##check for missing .uid and .pid >> this are always taken from the ##original dataset ##set empty clas form object newRLumDataSpectrum <- new("RLum.Data.Spectrum") ##fill - this is the faster way, filling in new() costs ... newRLumDataSpectrum@originator = data@originator newRLumDataSpectrum@recordType = recordType newRLumDataSpectrum@curveType = curveType newRLumDataSpectrum@data = data@data newRLumDataSpectrum@info = info newRLumDataSpectrum@.uid = data@.uid newRLumDataSpectrum@.pid = data@.pid } else { ##set empty class from object newRLumDataSpectrum <- new("RLum.Data.Spectrum") ##fill - this is the faster way, filling in new() costs ... newRLumDataSpectrum@originator = originator newRLumDataSpectrum@recordType = recordType newRLumDataSpectrum@curveType = curveType newRLumDataSpectrum@data = data newRLumDataSpectrum@info = info newRLumDataSpectrum@.uid = .uid newRLumDataSpectrum@.pid = .pid } return(newRLumDataSpectrum) } ) # get_RLum() ---------------------------------------------------------------------------------- #' @describeIn RLum.Data.Spectrum #' Accessor method for RLum.Data.Spectrum object. The argument info.object #' is optional to directly access the info elements. If no info element name #' is provided, the raw curve data (matrix) will be returned #' #' @param object [`get_RLum`], [`names_RLum`] (**required**): #' an object of class [RLum.Data.Spectrum-class] #' #' @param info.object [`get_RLum`]; [character] (*optional*): #' the name of the info object to be called #' #' @return #' #' **`[get_RLum]`** #' #' 1. A [matrix] with the spectrum values or #' 2. only the info object if `info.object` was set. #' #' @md #' @export setMethod("get_RLum", signature("RLum.Data.Spectrum"), definition = function(object, info.object) { ##if missing info.object just show the curve values if (missing(info.object) == FALSE){ if(is(info.object, "character") == FALSE) stop("[get_RLum] 'info.object' has to be a character!", call. = FALSE) if (info.object %in% names(object@info) == TRUE){ unlist(object@info[info.object]) } else { stop(paste0( "[get_RLum] Invalid element name. Valid names are: ", paste(names(object@info), collapse = ", ") ), call. = FALSE) } } else { object@data } }) # names() ------------------------------------------------------------------------------------- #' @describeIn RLum.Data.Spectrum #' Returns the names info elements coming along with this curve object #' #' @return #' #' **`[names_RLum]`** #' #' The names of the info objects #' #' @md #' @export setMethod("names_RLum", "RLum.Data.Spectrum", function(object){ names(object@info) }) # bin_RLum() ----------------------------------------------------------------------------------# #' @describeIn RLum.Data.Spectrum #' Allows binning of RLum.Data.Spectrum data. Count values and values on the x-axis are summed-up; #' for wavelength/energy values the mean is calculated. #' #' @param bin_size.col [integer] (*with default*): #' set number of channels used for each bin, e.g. `bin_size.col = 2` means that #' two channels are binned. Note: The function does not check the input, very large values #' mean a full column binning (a single sum) #' #' @param bin_size.row [integer] (*with default*): #' set number of channels used for each bin, e.g. `bin_size.row = 2` means that #' two channels are binned. Note: The function does not check the input, very large values #' mean a full row binning (a single sum) #' #' @return #' #' **`[bin_RLum.Data]`** #' #' Same object as input, after applying the binning. #' #' @md #' @export setMethod(f = "bin_RLum.Data", signature = "RLum.Data.Spectrum", function(object, bin_size.col = 1, bin_size.row = 1) { ##make sure that we have no input problems if (!inherits(bin_size.col, "numeric") || !inherits(bin_size.row, "numeric")){ stop("[bin_RLum.Data()] 'bin_size.row' and 'bin_size.col' must be of class 'numeric'!", call. = FALSE) } ##make sure that we do not get in trouble with negative values bin_size.col <- abs(bin_size.col) bin_size.row <- abs(bin_size.row) ##perform binning ##we want to be efficient, so we start ##with the larger object if(bin_size.row > bin_size.col){ ##row binning first m <- .matrix_binning(object@data, bin_size = bin_size.row, bin_col = FALSE, names = "mean") m <- .matrix_binning(m, bin_size = bin_size.col, bin_col = TRUE, names = "groups") } else { ##column binning first m <- .matrix_binning(object@data, bin_size = bin_size.col, bin_col = TRUE, names = "groups") m <- .matrix_binning(m, bin_size = bin_size.row, bin_col = FALSE, names = "mean") } ##write back to object object@data <- m ##return object return(object) }) Luminescence/R/set_Risoe.BINfileData.R0000644000176200001440000000176214264017373017222 0ustar liggesusers#' General accessor function for RLum S4 class objects #' #' Function calls object-specific get functions for RisoeBINfileData S4 class objects. #' #' The function provides a generalised access point for specific #' [Risoe.BINfileData-class] objects.\cr #' Depending on the input object, the corresponding get function will be selected. #' Allowed arguments can be found in the documentations of the corresponding #' [Risoe.BINfileData-class] class. #' #' @param METADATA x #' #' @param DATA x #' #' @param .RESERVED x #' #' @return Return is the same as input objects as provided in the list. #' #' @section Function version: 0.1 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [Risoe.BINfileData-class] #' #' @keywords utilities #' #' @md #' @export setGeneric("set_Risoe.BINfileData", function(METADATA = data.frame(), DATA = list(), .RESERVED = list()) { standardGeneric("set_Risoe.BINfileData") }, package = "Luminescence" ) Luminescence/R/merge_RLum.Analysis.R0000644000176200001440000001017514264017373017043 0ustar liggesusers#' Merge function for RLum.Analysis S4 class objects #' #' Function allows merging of RLum.Analysis objects and adding of allowed #' objects to an RLum.Analysis. #' #' This function simply allowing to merge [RLum.Analysis-class] #' objects. Additionally other [RLum-class] objects can be added #' to an existing [RLum.Analysis-class] object. Supported objects #' to be added are: [RLum.Data.Curve-class], #' [RLum.Data.Spectrum-class] and #' [RLum.Data.Image-class]. #' #' The order in the new [RLum.Analysis-class] object is the object #' order provided with the input list. #' #' @param objects [list] of [RLum.Analysis-class] (**required**): #' list of S4 objects of class `RLum.Analysis`. Furthermore other objects of #' class [RLum-class] can be added, see details. #' #' @return Return an [RLum.Analysis-class] object. #' #' @note #' The information for the slot 'protocol' is taken from the first #' [RLum.Analysis-class] object in the input list. Therefore at #' least one object of type [RLum.Analysis-class] has to be provided. #' #' @section Function version: 0.2.0 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [merge_RLum], [RLum.Analysis-class], [RLum.Data.Curve-class], #' [RLum.Data.Spectrum-class], [RLum.Data.Image-class], [RLum-class] #' #' #' @keywords utilities internal #' #' @examples #' #' #' ##merge different RLum objects from the example data #' data(ExampleData.RLum.Analysis, envir = environment()) #' data(ExampleData.BINfileData, envir = environment()) #' #' object <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos=1) #' curve <- get_RLum(object)[[2]] #' #' temp.merged <- merge_RLum.Analysis(list(curve, IRSAR.RF.Data, IRSAR.RF.Data)) #' #' @md #' @export merge_RLum.Analysis<- function( objects ){ # Ingegrity checks ---------------------------------------------------------------------------- ##check if object is of class RLum temp.class.test <- sapply(1:length(objects), function(x){ if(is(objects[[x]], "RLum") == FALSE){ temp.text <- paste("[merge_RLum.Analysis()]: At least element", x, "is not of class 'RLum' or a derivative class!") stop(temp.text) } ##provide class of objects is(objects[[x]])[1] }) ##check if at least one object of RLum.Analysis is provided if(!"RLum.Analysis"%in%temp.class.test){ stop("[merge_RLum.Analysis()] At least one input object in the list has to be of class 'RLum.Analysis'!") } # Merge objects ------------------------------------------------------------------------------- ##(0) get recent environment to later set variable temp.meta.data.first temp.environment <- environment() temp.meta.data.first <- NA; rm(temp.meta.data.first) #to avoid problems with the R check routine ##(1) collect all elements in a list temp.element.list <- unlist(lapply(1:length(objects), function(x){ ##Depending on the element the right functions is used if(is(objects[[x]])[1] == "RLum.Analysis"){ ##grep export meta data from the first RLum.Analysis objects an write if(!exists("temp.meta.data.first")){ assign("temp.meta.data.first", objects[[x]]@protocol, envir = temp.environment) } ##return to list get_RLum(objects[[x]]) }else if((is(objects[[x]])[1] == "RLum.Data.Curve") | (is(objects[[x]])[1] == "RLum.Data.Image") | (is(objects[[x]])[1] == "RLum.Data.Spectrum")){ ##return to list objects[[x]] }else{ stop("[merge_RLum.Anlysis()] What ever was provided, this 'RLum' object is not supported!") } })) # Build new RLum.Analysis object -------------------------------------------------------------- temp.new.RLum.Analysis <- set_RLum( class = "RLum.Analysis", originator = "merge_RLum.Analysis", records = temp.element.list, protocol = temp.meta.data.first, info = unlist(lapply(objects, function(x) { x@info }), recursive = FALSE), .pid = unlist(lapply(objects, function(x) { x@.uid })) ) # Return object ------------------------------------------------------------------------------- return( temp.new.RLum.Analysis) } Luminescence/R/RcppExports.R0000644000176200001440000000144714521210033015476 0ustar liggesusers# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 create_UID <- function() { .Call(`_Luminescence_create_UID`) } src_analyse_IRSARRF_SRS <- function(values_regenerated_limited, values_natural_limited, vslide_range, n_MC, trace = FALSE) { .Call(`_Luminescence_analyse_IRSARRF_SRS`, values_regenerated_limited, values_natural_limited, vslide_range, n_MC, trace) } src_create_RLumDataCurve_matrix <- function(DATA, VERSION, NPOINTS, LTYPE, LOW, HIGH, AN_TEMP, TOLDELAY, TOLON, TOLOFF) { .Call(`_Luminescence_create_RLumDataCurve_matrix`, DATA, VERSION, NPOINTS, LTYPE, LOW, HIGH, AN_TEMP, TOLDELAY, TOLON, TOLOFF) } src_get_XSYG_curve_values <- function(s) { .Call(`_Luminescence_src_get_XSYG_curve_values`, s) } Luminescence/R/plot_Risoe.BINfileData.R0000644000176200001440000002110714264017373017400 0ustar liggesusers#' Plot single luminescence curves from a BIN file object #' #' Plots single luminescence curves from an object returned by the #' [read_BIN2R] function. #' #' **Nomenclature** #' #' See [Risoe.BINfileData-class] #' #' **curve.transformation** #' #' This argument allows transforming continuous wave (CW) curves to pseudo #' (linear) modulated curves. For the transformation, the functions of the #' package are used. Currently, it is not possible to pass further arguments #' to the transformation functions. The argument works only for `ltype` #' `OSL` and `IRSL`. #' #' **Irradiation time** #' #' Plotting the irradiation time (s) or the given dose (Gy) requires that the #' variable `IRR_TIME` has been set within the BIN-file. This is normally #' done by using the 'Run Info' option within the Sequence Editor or by editing #' in R. #' #' @param BINfileData [Risoe.BINfileData-class] (**required**): #' requires an S4 object returned by the [read_BIN2R] function. #' #' @param position [vector] (*optional*): #' option to limit the plotted curves by position #' (e.g. `position = 1`, `position = c(1,3,5)`). #' #' @param run [vector] (*optional*): #' option to limit the plotted curves by run #' (e.g., `run = 1`, `run = c(1,3,5)`). #' #' @param set [vector] (*optional*): #' option to limit the plotted curves by set #' (e.g., `set = 1`, `set = c(1,3,5)`). #' #' @param sorter [character] (*with default*): #' the plot output can be ordered by "POSITION","SET" or "RUN". #' POSITION, SET and RUN are options defined in the Risoe Sequence Editor. #' #' @param ltype [character] (*with default*): #' option to limit the plotted curves by the type of luminescence stimulation. #' Allowed values: `"IRSL"`, `"OSL"`,`"TL"`, `"RIR"`, `"RBR"` #' (corresponds to LM-OSL), `"RL"`. All type of curves are plotted by #' default. #' #' @param curve.transformation [character] (*optional*): #' allows transforming CW-OSL and CW-IRSL curves to pseudo-LM curves via #' transformation functions. Allowed values are: `CW2pLM`, `CW2pLMi`, `CW2pHMi` and #' `CW2pPMi`. See details. #' #' @param dose_rate [numeric] (*optional*): #' dose rate of the irradiation source at the measurement date. #' If set, the given irradiation dose will be shown in Gy. See details. #' #' @param temp.lab [character] (*optional*): #' option to allow for different temperature units. If no value is set deg. C is chosen. #' #' @param cex.global [numeric] (*with default*): #' global scaling factor. #' #' @param ... further undocumented plot arguments. #' #' @return Returns a plot. #' #' @note #' The function has been successfully tested for the Sequence Editor file #' output version 3 and 4. #' #' @section Function version: 0.4.1 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany)\cr #' Michael Dietze, GFZ Potsdam (Germany) #' #' @seealso [Risoe.BINfileData-class],[read_BIN2R], [CW2pLM], [CW2pLMi], #' [CW2pPMi], [CW2pHMi] #' #' @references #' Duller, G., 2007. Analyst. pp. 1-45. #' #' @keywords dplot #' #' @examples #' #' ##load data #' data(ExampleData.BINfileData, envir = environment()) #' #' ##plot all curves from the first position to the desktop #' #pdf(file = "~/Desktop/CurveOutput.pdf", paper = "a4", height = 11, onefile = TRUE) #' #' ##example - load from *.bin file #' #BINfile<- file.choose() #' #BINfileData<-read_BIN2R(BINfile) #' #' #par(mfrow = c(4,3), oma = c(0.5,1,0.5,1)) #' #plot_Risoe.BINfileData(CWOSL.SAR.Data,position = 1) #' #mtext(side = 4, BINfile, outer = TRUE, col = "blue", cex = .7) #' #dev.off() #' #' @md #' @export plot_Risoe.BINfileData<- function( BINfileData, position, run, set, sorter = "POSITION", ltype = c("IRSL","OSL","TL","RIR","RBR","RL"), curve.transformation, dose_rate, temp.lab, cex.global = 1, ... ){ ##check if the object is of type Risoe.BINfileData if(!inherits(BINfileData, "Risoe.BINfileData")) stop("Wrong object! Object of type Risoe.BINfileData needed.", call. = FALSE) temp<-BINfileData # Missing check ---------------------------------------------------------------- ##set plot position if missing if(missing(position)==TRUE){position<-c(min(temp@METADATA[,"POSITION"]):max(temp@METADATA[,"POSITION"]))} if(missing(run)==TRUE){run<-c(min(temp@METADATA[,"RUN"]):max(temp@METADATA[,"RUN"]))} if(missing(set)==TRUE){set<-c(min(temp@METADATA[,"SET"]):max(temp@METADATA[,"SET"]))} ##temp.lab if(missing(temp.lab) == TRUE){temp.lab <- "\u00B0C"} ##fun extraArgs <- list(...) # read out additional arguments list fun <- if("fun" %in% names(extraArgs)) {extraArgs$fun} else {FALSE} # Ordering -------------------------------------------------------------------- ##(1) order by RUN, SET OR BY POSITION if(sorter=="RUN"){ temp@METADATA<-temp@METADATA[order(temp@METADATA[,"RUN"]),] }else if(sorter=="SET"){ temp@METADATA<-temp@METADATA[order(temp@METADATA[,"SET"]),] }else { temp@METADATA<-temp@METADATA[order(temp@METADATA[,"POSITION"]),] } # Select values for plotting ------------------------------------------------------------------ ##(2) set SEL for selected position ##set all to FALSE temp@METADATA[,"SEL"]<-FALSE ##set TRUE temp@METADATA[(temp@METADATA[,"POSITION"] %in% position)==TRUE & (temp@METADATA[,"RUN"] %in% run)==TRUE & (temp@METADATA[,"SET"] %in% set)==TRUE & (temp@METADATA[,"LTYPE"] %in% ltype)==TRUE,"SEL"]<-TRUE ##------------------------------------------------------------------------## ##PLOTTING ##------------------------------------------------------------------------## ##(3) plot curves for(i in 1:length(temp@METADATA[,"ID"])){ ##print only if SEL == TRUE if(temp@METADATA[i,"SEL"]==TRUE) { ##find measured unit measured_unit<-if(temp@METADATA[i,"LTYPE"]=="TL"){" \u00B0C"}else{"s"} ##set x and y values values.x <- seq(temp@METADATA[i,"HIGH"]/temp@METADATA[i,"NPOINTS"], temp@METADATA[i,"HIGH"],by=temp@METADATA[i,"HIGH"]/temp@METADATA[i,"NPOINTS"]) values.y <- unlist(temp@DATA[temp@METADATA[i,"ID"]]) values.xy <- data.frame(values.x, values.y) ##set curve transformation if wanted if((temp@METADATA[i,"LTYPE"] == "OSL" | temp@METADATA[i,"LTYPE"] == "IRSL") & missing(curve.transformation) == FALSE){ if(curve.transformation=="CW2pLM"){ values.xy <- CW2pLM(values.xy) }else if(curve.transformation=="CW2pLMi"){ values.xy <- CW2pLMi(values.xy)[,1:2] }else if(curve.transformation=="CW2pHMi"){ values.xy <- CW2pHMi(values.xy)[,1:2] }else if(curve.transformation=="CW2pPMi"){ values.xy <- CW2pPMi(values.xy)[,1:2] }else{ warning("Function for curve.transformation is unknown. No transformation is performed.") } } ##plot graph plot(values.xy, main=paste("pos=", temp@METADATA[i,"POSITION"],", run=", temp@METADATA[i,"RUN"], ", set=", temp@METADATA[i,"SET"],sep="" ), type="l", ylab=paste(temp@METADATA[i,"LTYPE"]," [cts/",round(temp@METADATA[i,"HIGH"]/temp@METADATA[i,"NPOINTS"],digits=3)," ", measured_unit,"]",sep=""), xlab=if(measured_unit=="\u00B0C"){paste("temp. [",temp.lab,"]",sep="")}else{"time [s]"}, col=if(temp@METADATA[i,"LTYPE"]=="IRSL" | temp@METADATA[i,"LTYPE"]=="RIR"){"red"} else if(temp@METADATA[i,"LTYPE"]=="OSL" | temp@METADATA[i,"LTYPE"]=="RBR"){"blue"} else{"black"}, sub=if(temp@METADATA[i,"LTYPE"]=="TL"){paste("(",temp@METADATA[i,"RATE"]," K/s)",sep="")}else{}, lwd=1.2*cex.global, cex=0.9*cex.global ) ##add mtext for temperature ##grep temperature (different for different verions) temperature<-if(temp@METADATA[i,"VERSION"]=="03"){temp@METADATA[i,"AN_TEMP"]} else{temp@METADATA[i,"TEMPERATURE"]} ##mtext mtext(side=3, if(temp@METADATA[i,"LTYPE"]=="TL"){paste("TL to ",temp@METADATA[i,"HIGH"], " ",temp.lab,sep="")} else{paste(temp@METADATA[i,"LTYPE"],"@",temperature," ",temp.lab ,sep="")}, cex=0.9*cex.global) ##add mtext for irradiation mtext(side=4,cex=0.8*cex.global, line=0.5, if(temp@METADATA[i, "IRR_TIME"]!=0){ if(missing("dose_rate")==TRUE){ paste("dose = ",temp@METADATA[i, "IRR_TIME"], " s", sep="") }else{ paste("dose = ",temp@METADATA[i, "IRR_TIME"]*dose_rate, " Gy", sep="") } } )#end mtext }#endif::selection }#endforloop if(fun==TRUE){sTeve()} } Luminescence/R/Risoe.BINfileData2RLum.Data.Curve.R0000644000176200001440000000726214264017373021165 0ustar liggesusers#' Convert an element from a Risoe.BINfileData object to an RLum.Data.Curve #' object #' #' The function converts one specified single record from a Risoe.BINfileData #' object to an RLum.Data.Curve object. #' #' The function extracts all `METADATA` from the `Risoe.BINfileData` #' object and stores them in the `RLum.Data.Curve` object. This function #' can be used stand-alone, but is the base function for [Risoe.BINfileData2RLum.Analysis]. #' #' @param object [Risoe.BINfileData-class] (**required**): #' `Risoe.BINfileData` object #' #' @param id [integer] (**required**): #' record id in the `Risoe.BINfileData` object of the curve that is to be #' stored in the `RLum.Data.Curve` object. If no value for id is provided, #' the record has to be specified by `pos`, `set` and `run`. #' #' @param pos [integer] (*optional*): #' record position number in the `Risoe.BINfileData` object of the curve that #' is to be stored in the `RLum.Data.Curve` object. If a value for `id` is #' provided, this argument is ignored. #' #' @param run [integer] (*optional*): #' record run number in the `Risoe.BINfileData` object of the curve that is #' to be stored in the `RLum.Data.Curve` object. If a value for `id` is #' provided, this argument is ignored. #' #' @param set [integer] (*optional*): #' record set number in the `Risoe.BINfileData` object of the curve that is #' to be stored in the `RLum.Data.Curve` object. If a value for `id` is #' provided, this argument is ignored. #' #' @return Returns an [RLum.Data.Curve-class] object. #' #' @note #' Due to changes in the BIN-file (version 3 to version 4) format the recalculation of TL-curves might be not #' overall correct for cases where the TL measurement is combined with a preheat. #' #' @section Function version: 0.5.0 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany)\cr #' Christoph Burow, Universtiy of Cologne (Germany) #' #' @seealso [Risoe.BINfileData2RLum.Analysis], [set_RLum], #' [RLum.Data.Curve-class], [RLum.Analysis-class], [Risoe.BINfileData-class], #' [plot_RLum] #' #' @keywords manip #' #' @examples #' #' ##get package example data #' data(ExampleData.BINfileData, envir = environment()) #' #' ##convert one record #' Risoe.BINfileData2RLum.Data.Curve(CWOSL.SAR.Data, id = 1) #' #' @md #' @noRd .Risoe.BINfileData2RLum.Data.Curve <- function( object, id, pos, run, set ){ ##disaggregate object ... this makes it much faster below ##we could also access via index, not number, but this is far to risky, as ##every update in the BIN-file version will break the code here METADATA <- as.list(object@METADATA) DATA <- object@DATA # grep id of record ------------------------------------------------------- ##if id is set, no input for pos and run is necessary if (missing(id)) { id <- METADATA[METADATA[["POSITION"]] == pos & METADATA[["SET"]] == set & METADATA[["RUN"]] == run, "ID"] } ##grep info elements info <- lapply(1:length(names(METADATA)), function(x){METADATA[[x]][id]}) names(info) <- names(METADATA) # Build object ------------------------------------------------------------ set_RLum( class = "RLum.Data.Curve", recordType = METADATA[["LTYPE"]][id], data = src_create_RLumDataCurve_matrix( DATA = DATA[[id]], NPOINTS = METADATA[["NPOINTS"]][id], VERSION = METADATA[["VERSION"]][id], LTYPE = METADATA[["LTYPE"]][id], LOW = METADATA[["LOW"]][id], HIGH = METADATA[["HIGH"]][id], AN_TEMP = METADATA[["AN_TEMP"]][id], TOLDELAY = METADATA[["TOLDELAY"]][id], TOLON = METADATA[["TOLON"]][id], TOLOFF = METADATA[["TOLOFF"]][id] ), info = info ) } Luminescence/R/convert_Concentration2DoseRate.R0000644000176200001440000002213314236146743021302 0ustar liggesusers#'@title Dose-rate conversion function #' #'@description This function converts radionuclide concentrations #'(K in %, Th and U in ppm) into dose rates (Gy/ka). Beta-dose rates are also #'attenuated for the grain size. Beta and gamma-dose rates are corrected #'for the water content. This function converts concentrations into dose rates #'(Gy/ka) and corrects for grain size attenuation and water content #' #'Dose rate conversion factors can be chosen from Adamiec and Aitken (1998), #'Guerin et al. (2011), Liritzis et al. (201) and Cresswell et al. (2018). #'Default is Guerin et al. (2011). #' #'Grain size correction for beta dose rates is achieved using the correction #'factors published by Guérin et al. (2012). #' #'Water content correction is based on factors provided by Aitken (1985), #'with the factor for beta dose rate being 1.25 and for gamma 1.14. #' #'@details #' #'**The input data** #' #'\tabular{lll}{ #'COLUMN \tab DATA TYPE \tab DESCRIPTION\cr #'`Mineral` \tab `character` \tab `'FS'` for feldspar, `'Q'` for quartz\cr #'`K` \tab `numeric` \tab K nuclide content in %\cr #'`K_SE` \tab `numeric` \tab error on K nuclide content in %\cr #'`Th` \tab `numeric` \tab Th nuclide content in ppm\cr #'`Th_SE` \tab `numeric` error on Th nuclide content in ppm\cr #'`U` \tab `numeric` U nuclide content in ppm\cr #'`U_SE` \tab `numeric` \tab error on U nuclide content in ppm\cr #'`GrainSize` \tab `numeric` \tab average grain size in µm\cr #'`WaterContent` \tab `numeric` \tab mean water content in %\cr #'`WaterContent_SE` \tab `numeric` \tab relative error on water content #'} #' #' #'**Water content** #'The water content provided by the user should be calculated according to: #' #'\deqn{(Wet_weight - Dry_weight) / Dry_weight * 100} #' #'The unit for the weight is gram (g). #' #'@param input [data.frame] (*optional*): a table containing all relevant information #' for each individual layer if nothing is provided, the function returns a template [data.frame] #' Please note that until one dataset per input is supported! #' #'@param conversion [character] (*with default*): which dose rate conversion factors to use, #' defaults uses Guérin et al. (2011). For accepted values see [BaseDataSet.ConversionFactors] #' #'@return The function returns an [RLum.Results-class] object for which the first #'element is [matrix] with the converted values. If no input is provided, the #'function returns a template [data.frame] that can be used as input. #' #'@section Function version: 0.1.0 #' #'@author Svenja Riedesel, Aberystwyth University (United Kingdom) \cr #'Martin Autzen, DTU NUTECH Center for Nuclear Technologies (Denmark) #' #'@references #'Adamiec, G., Aitken, M.J., 1998. Dose-rate conversion factors: update. Ancient TL 16, 37-46. #' #'Cresswell., A.J., Carter, J., Sanderson, D.C.W., 2018. Dose rate conversion parameters: #'Assessment of nuclear data. Radiation Measurements 120, 195-201. #' #'Guerin, G., Mercier, N., Adamiec, G., 2011. Dose-rate conversion factors: update. #'Ancient TL, 29, 5-8. #' #'Guerin, G., Mercier, N., Nathan, R., Adamiec, G., Lefrais, Y., 2012. On the use #'of the infinite matrix assumption and associated concepts: A critical review. #'Radiation Measurements, 47, 778-785. #' #'Liritzis, I., Stamoulis, K., Papachristodoulou, C., Ioannides, K., 2013. #'A re-evaluation of radiation dose-rate conversion factors. Mediterranean #'Archaeology and Archaeometry 13, 1-15. #' #'@keywords datagen #' #'@examples #' #'## create input template #'input <- convert_Concentration2DoseRate() #' #'## fill input #'input$Mineral <- "FS" #'input$K <- 2.13 #'input$K_SE <- 0.07 #'input$Th <- 9.76 #'input$Th_SE <- 0.32 #'input$U <- 2.24 #'input$U_SE <- 0.12 #'input$GrainSize <- 200 #'input$WaterContent <- 30 #'input$WaterContent_SE <- 5 #' #'## convert #'convert_Concentration2DoseRate(input) #' #'@md #'@export convert_Concentration2DoseRate <- function( input, conversion = "Guerinetal2011" ){ # Alternate mode ---------------------------------------------------------- if(missing(input)){ message("[convert_Concentration2DoseRate()] Input template returned. Please fill this data.frame and use it as input to the function!") df <- data.frame( Mineral = NA_character_, K = NA_integer_, K_SE = NA_integer_, Th = NA_integer_, Th_SE = NA_integer_, U = NA_integer_, U_SE = NA_integer_, GrainSize = NA_integer_, WaterContent = NA_integer_, WaterContent_SE = NA_integer_) return(df) } # Load datasets ----------------------------------------------------------- ## fulfil CRAN checks BaseDataSet.ConversionFactors <- BaseDataSet.GrainSizeAttenuation <- NA ## load datasets load(system.file("data", "BaseDataSet.ConversionFactors.rda", package = "Luminescence")) load(system.file("data", "BaseDataSet.GrainSizeAttenuation.rda", package = "Luminescence")) ## we do this to be consistent with the code written by Svenja and Martin GSA <- BaseDataSet.GrainSizeAttenuation # Integrity tests ------------------------------------------------------------ if(!inherits(input, "data.frame") & !inherits(input, "matrix")) stop("[convert_Concentration2DoseRate()] input must be of type 'data.frame or 'matrix'!", call. = FALSE) if(ncol(input) != ncol(suppressMessages(convert_Concentration2DoseRate())) || nrow(input) > 1) stop("[convert_Concentration2DoseRate()] number of rows/columns in input does not match the requirements. See manual!", call. = FALSE) if(!conversion[1] %in% names(BaseDataSet.ConversionFactors)) stop("[convert_Concentration2DoseRate()] You have not entered a valid conversion. Please check your spelling and consult the documentation!", call. = FALSE) if(!any(input[,1] %in% c("FS","Q"))) stop("[convert_Concentration2DoseRate()] As mineral only 'FS' or 'Q' is supported!", call. = FALSE) # Convert ----------------------------------------------------------------- InfDR <- matrix(data = NA, nrow = 2, ncol = 6) colnames(InfDR) <- c("K","SE","Th","SE","U","SE") rownames(InfDR) <- c("Beta","Gamma") ### --- BETA DOSE RATES for (i in 1:3){ if (i == 1){ Col <- "K" } else if (i == 2){ Col <- "Th" } else { Col <- "U" } for (j in 1:2){ if (j== 1){ Temp = "beta" } else { Temp = "gamma" } Nuclide <- i * 2 N <- 2 * i - 1 Error <- Nuclide + 1 InfDR[j, N] <- input[1, Nuclide] * BaseDataSet.ConversionFactors[[conversion]][[Temp]][[Col]][1] # Calculate Dose Rate InfDR[j, Nuclide] <- sqrt((input[1, Error] / input[1, Nuclide]) ^ 2 + ( BaseDataSet.ConversionFactors[[conversion]][[Temp]][[Col]][2] / BaseDataSet.ConversionFactors[[conversion]][[Temp]][[Col]][1] ) ^ 2 ) # Calculate Error } } ##### --- dose rate for grain size --- ##### if (input[1,1] == "FS") { # FELDSPAR KFit <- approx(GSA$GrainSize, GSA$FS_K, n = 981, method = "linear") ThFit <- approx(GSA$GrainSize, GSA$FS_Th,n = 981, method = "linear") UFit <- approx(GSA$GrainSize, GSA$FS_U, n = 981, method = "linear") Temp <- which(KFit$x == input[1, 8]) InfDR[1, 1] <- InfDR[1, 1] * (1 - KFit$y[Temp]) # K InfDR[1, 3] <- InfDR[1, 3] * (1 - ThFit$y[Temp]) # Th InfDR[1, 5] <- InfDR[1, 5] * (1 - UFit$y[Temp]) # U } else if (input[1,1] == "Q") { # QUARTZ KFit <- approx(GSA$GrainSize, GSA$Q_K, n = 981, method = "linear") ThFit <- approx(GSA$GrainSize, GSA$Q_Th, n = 981, method = "linear") UFit <- approx(GSA$GrainSize, GSA$Q_U, n = 981, method = "linear") Temp <- which(KFit$x == input[1, 8]) InfDR[1, 1] <- InfDR[1, 1] * (1 - KFit$y[Temp]) # K InfDR[1, 3] <- InfDR[1, 3] * (1 - ThFit$y[Temp]) # Th InfDR[1, 5] <- InfDR[1, 5] * (1 - UFit$y[Temp]) # U } ##### --- Correct beta sediment dose rate for water content --- ##### InfDRG <- matrix(data = NA, nrow = 2, ncol = 6) colnames(InfDRG) <- c("K", "SE", "Th", "SE", "U", "SE") rownames(InfDRG) <- c("Beta", "Gamma") WC <- input[1, 9] / 100 WCerr <- input[1, 10] / 100 for (i in 1:6){ for (j in 1:2){ if (j == 1){ k = 1.25 #Water content correction for beta } else { k = 1.14 #Water content correction for gamma } Remain <- i %% 2 if (Remain == 1){ InfDRG[j,i] <- InfDR[j,i]/(1 + k*WC) } else if (Remain == 0){ Temp <- (InfDR[j,i]/(1 + k*WC)) - (InfDR[j,i]/(1+k*(WC+WCerr))) InfDRG[j,i] <- InfDRG[j,i-1]*sqrt(InfDR[j,i]^2+(Temp/InfDR[j,i-1])^2) } } } InfDRG <- round(InfDRG, digits = 3) # Return ------------------------------------------------------------------ return( set_RLum( class = "RLum.Results", data = list( InfDRG = InfDRG, input_data = input ), info = list( call = sys.call() ))) } Luminescence/R/scale_GammaDose.R0000644000176200001440000007373314236146743016243 0ustar liggesusers#' Calculate the gamma dose deposited within a sample taking layer-to-layer #' variations in radioactivity into account (according to Aitken, 1985) #' #' This function calculates the gamma dose deposited in a luminescence sample #' taking into account layer-to-layer variations in sediment radioactivity . #' The function scales user inputs of uranium, thorium and potassium based on #' input parameters for sediment density, water content and given layer #' thicknesses and distances to the sample. #' #' **User Input** #' #' To calculate the gamma dose which is deposited in a sample, the user needs #' to provide information on those samples influencing the luminescence sample. #' As a rule of thumb, all sediment layers within at least 30 cm radius from #' the luminescence sample taken should be taken into account when calculating #' the gamma dose rate. However, the actual range of gamma radiation might be #' different, depending on the emitting radioelement, the water content and the #' sediment density of each layer (Aitken, 1985). Therefore the user is #' advised to provide as much detail as possible and physically sensible. #' #' The function requires a [data.frame] that is to be structured #' in columns and rows, with samples listed in rows. The first column contains #' information on the layer/sample ID, the second on the thickness (in cm) of #' each layer, whilst column 3 should contain `NA` for all layers that are not #' sampled for OSL/TL. For the layer the OSL/TL sample was taken from a numerical #' value must be provided, which is the distance (in cm) measured from **bottom** #' of the layer of interest. If the whole layer was sampled insert `0`. If the #' sample was taken from *within* the layer, insert a numerical value `>0`, #' which describes the distance from the middle of the sample to the bottom of #' the layer in cm. Columns 4 to 9 should contain radionuclide concentrations #' and their standard errors for #' potassium (in %), thorium (in ppm) and uranium (in ppm). Columns 10 and 11 #' give information on the water content and its uncertainty (standard error) #' in %. The layer density (in g/cm3) should be given in column 12. No cell #' should be left blank. Please ensure to keep the column titles as given in #' the example dataset (`data('ExampleData.ScaleGammaDose')`, see examples). #' #' The user can decide which dose rate #' conversion factors should be used to calculate the gamma dose rates. #' The options are: #' - `"Cresswelletal2018"` (Cresswell et al., 2018) #' - `"Liritzisetal2013"` (Liritzis et al., 2013) #' - `"Guerinetal2011"` (Guerin et al., 2011) #' - `"AdamiecAitken1998"` (Adamiec and Aitken, 1998) #' #' #' **Water content** #' #' The water content provided by the user should be calculated according to: #' #' \deqn{ ( Wet weight [g] - Dry weight [g] ) / Dry weight [g] * 100 } #' #' #' **Calculations** #' #' After converting the radionuclide concentrations into dose rates, the #' function will scale the dose rates based on the thickness of the layers, #' the distances to the sample, the water content and the density of the sediment. #' The calculations are based on Aitken (1985, Appendix H). As an example #' (equivalent to Aitken, 1985), assuming three layers of sediment, where **L** is #' inert and positioned in between the infinite thick and equally active #' layers **A** and **B**, the dose in **L** and **B** due to **A** is given by #' #' \deqn{ {1-f(x)}D_A } #' #' Where `x` is the distance into the inert medium, so `f(x)` is the weighted #' average fractional dose at `x` and `D_A` denotes that the dose is delivered by **A**. #' `f(x)` is derived from table H1 (Aitken, 1985), when setting `z = x`. #' Consequently, the dose in **A** and **L** due to **B** is given by #' #' \deqn{ {1 - f(t-x)}D_B } #' #' Here `t` is the thickness of **L** and the other parameters are denoted as above, #' just for the dose being delivered by B. `f(t-x)` is derived from table H1 #' (Aitken, 1985), when setting `z` equal to `t-x`. Following this, the dose in **L** #' delivered by **A** and **B** is given by #' #' \deqn{ {2 - f(x) - f(t-x)}D_{AB} } #' #' Since **A** and **B** are equally active `D_{AB} = D_A = D_B`. #' #' The function uses the value of the fractional dose rate at the layer #' boundary to start the calculation for the next layer. This way, the function #' is able to scale the gamma dose rate accurately for distant layers when the #' density and water content is not constant for the entire section. #' #' @param data [data.frame] (**required**): #' A table containing all relevant information for each individual layer. The #' table must have the following named columns: #' #' - `id` ([character]): an arbitrary id or name of each layer #' - `thickness` ([numeric]): vertical extent of each layer in cm #' - `sample_offset` ([logical]): distance of the sample in cm, #' **measured from the BOTTOM OF THE TARGET LAYER**. Except for the target layer #' all values must be `NA`. #' - `K` ([numeric]): K nuclide content in % #' - `K_se` ([numeric]): error on the K content #' - `Th` ([numeric]): Th nuclide content in ppm #' - `Th_se` ([numeric]): error on the Th content #' - `U` ([numeric]): U nuclide content in ppm #' - `U_se` ([numeric]): error on the U content #' - `water_content` ([numeric]): water content of each layer in % #' - `water_content_se` ([numeric]): error on the water content #' - `density` ([numeric]): bulk density of each layer in g/cm^-3 #' #' @param conversion_factors [character] (*optional*): #' The conversion factors used to calculate the dose rate from sediment #' nuclide contents. Valid options are: #' #' - `"Cresswelletal2018"` (default) #' - `"Liritzisetal2013"` #' - `"Guerinetal2011"` #' - `"AdamiecAitken1998"` #' #' @param fractional_gamma_dose [character] (*optional*): #' Factors to scale gamma dose rate values. Valid options are: #' #' - `"Aitken1985"` (default): Table H1 in the appendix #' #' @param verbose [logical] (*optional*): #' Show or hide console output (defaults to `TRUE`). #' #' @param plot [logical] (*optional*): #' Show or hide the plot (defaults to `TRUE`). #' #' @param plot_single [logical] (*optional*): #' Show all plots in one panel (defaults to `TRUE`). #' #' @param ... Further parameters passed to [barplot]. #' #' @return #' #' After performing the calculations the user is provided with different outputs. #' 1. The total gamma dose rate received by the sample (+/- uncertainties) as a #' print in the console. #' 2. A plot showing the sediment sequence, the user input sample information #' and the contribution to total gamma dose rate. #' 3. RLum Results. If the user wishes to save these results, writing a script #' to run the function and to save the results would look like this: #' #' ``` #' mydata <- read.table("c:/path/to/input/file.txt") #' results <- scale_GammaDose(mydata) #' table <- get_RLum(results) #' write.csv(table, "c:/path/to/results.csv") #' ``` #' #' -----------------------------------\cr #' `[ NUMERICAL OUTPUT ]`\cr #' -----------------------------------\cr #' #' **`RLum.Results`**-object #' #' **slot:** **`@data`** #' #' \tabular{lll}{ #' **Element** \tab **Type** \tab **Description**\cr #' `$summary` \tab `data.frame` \tab summary of the model results \cr #' `$data` \tab `data.frame` \tab the original input data \cr #' `$dose_rates` \tab `list` \tab two `data.frames` for the scaled and infinite matrix dose rates \cr #' `$tables` \tab `list` \tab several `data.frames` containing intermediate results \cr #' `$args` \tab `character` \tab arguments of the call \cr #' `$call` \tab `call` \tab the original function call \cr #' } #' #' **slot:** **`@info`** #' #' Currently unused. #' #' ------------------------\cr #' `[ PLOT OUTPUT ]`\cr #' ------------------------\cr #' #' Three plots are produced: #' #' - A visualisation of the provided sediment layer structure to quickly #' assess whether the data was provided and interpreted correctly. #' - A scatter plot of the nuclide contents per layer (K, Th, U) as well as the #' water content. This may help to correlate the dose rate contribution of #' specific layers to the layer of interest. #' - A barplot visualising the contribution of each layer to the total dose rate #' received by the sample in the target layer. #' #' @section Function version: 0.1.2 #' #' @keywords datagen #' #' @note #' **This function has BETA status. If possible, results should be** #' **cross-checked.** #' #' @author Svenja Riedesel, Aberystwyth University (United Kingdom) \cr #' Martin Autzen, DTU NUTECH Center for Nuclear Technologies (Denmark) \cr #' Christoph Burow, University of Cologne (Germany) \cr #' Based on an excel spreadsheet and accompanying macro written by Ian Bailiff. #' #' @seealso [ExampleData.ScaleGammaDose], #' [BaseDataSet.ConversionFactors], [approx], [barplot] #' #' @references #' #' Aitken, M.J., 1985. Thermoluminescence Dating. Academic Press, London. #' #' Adamiec, G., Aitken, M.J., 1998. Dose-rate conversion factors: update. #' Ancient TL 16, 37-46. #' #' Cresswell., A.J., Carter, J., Sanderson, D.C.W., 2018. #' Dose rate conversion parameters: Assessment of nuclear data. #' Radiation Measurements 120, 195-201. #' #' Guerin, G., Mercier, N., Adamiec, G., 2011. Dose-rate conversion #' factors: update. Ancient TL, 29, 5-8. #' #' Liritzis, I., Stamoulis, K., Papachristodoulou, C., Ioannides, K., 2013. #' A re-evaluation of radiation dose-rate conversion factors. Mediterranean #' Archaeology and Archaeometry 13, 1-15. #' #' #' @section Acknowledgements: #' #' We thank Dr Ian Bailiff for the provision of an excel spreadsheet, which has #' been very helpful when writing this function. #' #' @examples #' #' # Load example data #' data("ExampleData.ScaleGammaDose", envir = environment()) #' x <- ExampleData.ScaleGammaDose #' #' # Scale gamma dose rate #' results <- scale_GammaDose(data = x, #' conversion_factors = "Cresswelletal2018", #' fractional_gamma_dose = "Aitken1985", #' verbose = TRUE, #' plot = TRUE) #' #' get_RLum(results) #' #' @md #' @export scale_GammaDose <- function( data, conversion_factors = c("Cresswelletal2018", "Guerinetal2011", "AdamiecAitken1998", "Liritzisetal2013")[1], fractional_gamma_dose = c("Aitken1985")[1], verbose = TRUE, plot = TRUE, plot_single = TRUE, ...) { ## HELPER FUNCTION ---- # Wrapper for formatC to enforce precise digit printing f <- function(x, d = 3) formatC(x, digits = d, format = "f") ## ------------------------------------------------------------------------ ## ## LOAD TABLES ## ------------------------------------------------------------------------ ## # To satisfy CRAN check ('no visible global binding') BaseDataSet.ConversionFactors <- BaseDataSet.FractionalGammaDose <- NA load(system.file("data", "BaseDataSet.ConversionFactors.rda", package = "Luminescence")) load(system.file("data", "BaseDataSet.FractionalGammaDose.rda", package = "Luminescence")) ## ------------------------------------------------------------------------ ## ## DEFAULT SETTINGS ## ------------------------------------------------------------------------ ## settings <- list( main = "Contributions of each layer to the total \n gamma dose rate received by the sample", xlab = "Contribution to total gamma dose rate (%) received by the sample", cex = 1.0, col = "grey", info = list() ) # overwrite and append default values settings <- modifyList(settings, list(...)) ## ------------------------------------------------------------------------ ## ## CHECK INPUT ## ------------------------------------------------------------------------ ## ## Input data # basic class and length check if (!is.data.frame(data)) stop("'data' must be a data frame.", call. = FALSE) if (ncol(data) != 12) stop("'data' must have 12 columns (currently ", ncol(data), ").", call. = FALSE) # make sure that it has the correct column names colnames_expected <- c("id","thickness","sample_offset","K","K_se","Th","Th_se","U","U_se", "water_content","water_content_se", "density") if (is.null(names(data)) || any(names(data) != colnames_expected)) { if (verbose) warning("Unexpected column names for 'data'. New names were automatically assigned. ", "Please make sure that columns are in proper order. See documentation.", call. = FALSE) colnames(data) <- colnames_expected } # check if there is only one target layer if (sum(!is.na(data$sample_offset)) != 1) stop("Only one layer must be contain a numeric value in column 'sample_offset', all other rows must be `NA`.", call. = FALSE) if (!is.numeric(data$sample_offset[which(!is.na(data$sample_offset))])) stop("Non-numeric value in the the row of the target layer.", call. = FALSE) if (data$sample_offset[which(!is.na(data$sample_offset))] < 0) stop("The numeric value in 'sample_offset' must be positive.", call. = FALSE) if (data$sample_offset[which(!is.na(data$sample_offset))] > data$thickness[which(!is.na(data$sample_offset))]) stop("Impossible! Sample offset larger than the target-layer's thickness!", call. = FALSE) # conversion factors if (length(conversion_factors) != 1 || !is.character(conversion_factors)) stop("'conversion_factors' must be an object of length 1 and of class 'character'.", call. = FALSE) if (!conversion_factors %in% names(BaseDataSet.ConversionFactors)) stop("Invalid 'conversion_factors'. Valid options: ", paste(names(BaseDataSet.ConversionFactors), collapse = ", "), ".", call. = FALSE) # tables for gamma dose fractions if (length(fractional_gamma_dose) != 1 || !is.character(fractional_gamma_dose)) stop("'fractional_gamma_dose' must be an object of length 1 and of class 'character'.", call. = FALSE) if (!fractional_gamma_dose %in% names(BaseDataSet.FractionalGammaDose)) stop("Invalid 'fractional_gamma_dose'. Valid options: ", paste(names(BaseDataSet.FractionalGammaDose), collapse = ", "), ".", call. = FALSE) ## ------------------------------------------------------------------------ ## ## Select tables ## ------------------------------------------------------------------------ ## conv_fac <- BaseDataSet.ConversionFactors[[conversion_factors]]$gamma frac_dose <- BaseDataSet.FractionalGammaDose[[fractional_gamma_dose]] ## ------------------------------------------------------------------------ ## ## CALCULATION ## ------------------------------------------------------------------------ ## dose_rate <- data.frame( K = data$K * conv_fac$K[1], K_re = sqrt( (data$K_se / data$K)^2 + conv_fac$K[2]^2 ), Th = data$Th * conv_fac$Th[1], Th_re = sqrt( (data$Th_se / data$Th)^2 + conv_fac$Th[2]^2 ), U = data$U * conv_fac$U[1], U_re = sqrt( (data$U_se / data$U)^2 + conv_fac$U[2]^2 ) ) dose_rate$sum <- dose_rate$K + dose_rate$Th + dose_rate$U dose_rate$sum_re <- sqrt(dose_rate$K_re^2 + dose_rate$Th_re^2 + dose_rate$U_re^2) dose_rate$K_frac <- dose_rate$K / dose_rate$sum dose_rate$K_frac_re <- sqrt(dose_rate$K_re^2 + dose_rate$sum_re^2 ) dose_rate$Th_frac <- dose_rate$Th / dose_rate$sum dose_rate$Th_frac_re <- sqrt(dose_rate$Th_re^2 + dose_rate$sum_re^2 ) dose_rate$U_frac <- dose_rate$U / dose_rate$sum dose_rate$U_frac_re <- sqrt(dose_rate$U_re^2 + dose_rate$sum_re^2 ) ## weighted fractional dose z_scale <- do.call(cbind, Map(function(d, wc) { (frac_dose$z * 2) / (d + ( (wc / 100) * d)) }, data$density, data$water_content)) layer_fracDoseRate <- do.call(cbind, Map(function(K, Th, U, K_re, Th_re, U_re) { data.frame( val = frac_dose$K * K + frac_dose$Th * Th + frac_dose$U * U, err = sqrt( K_re^2 + Th_re^2 + U_re^2 ) ) }, dose_rate$K_frac, dose_rate$Th_frac, dose_rate$U_frac, dose_rate$K_frac_re, dose_rate$Th_frac_re, dose_rate$U_frac_re)) ## TODO: LEGACY CODE target <- which(!is.na(data$sample_offset)) distance <- data.frame(upper = c(rev(cumsum(data$thickness[target:1])[-1]) - data$sample_offset[target], abs(data$sample_offset[target]), cumsum(data$thickness[(target+1):nrow(data)]) + data$sample_offset[target])) distance$lower <- abs(distance$upper - data$thickness) ## Calculate infitite dose rate and dose received by the sample ## MAP: iterate over LAYERS Inf_frac <- as.data.frame(do.call(rbind, Map(function(z, n) { interpol <- Map(function(x) { approx(z, x, n = 1000, method = "linear") }, frac_dose[, c("K", "Th", "U")]) x1 = data$thickness[n] x2 = 0 C1 = which.min(abs(interpol$K$x - x1)) C2 = which.min(abs(interpol$K$x - x2)) ## MAP: iterate over NUCLIDE do.call(cbind, Map(function(x) { y1 = interpol[[x]]$y[C1] y2 = interpol[[x]]$y[C2] ### ---- if (n != target) { if (n < target) { k <- n + 1 seq <- k:target } else if (n > target) { k <- n - 1 seq <- target:k } for (j in seq) { fit <- approx(z_scale[ ,j], frac_dose[ , x], n = 1000, method = "linear") x1_temp <- which.min(abs(fit$y - y1)) x2_temp <- which.min(abs(fit$y - y2)) if (j != target) { x1 <- fit$x[x1_temp] + data$thickness[j] x2 <- fit$x[x2_temp] + data$thickness[j] } if (j == target) { if (n < target) { x1 <- fit$x[x1_temp] + data$thickness[target] - data$sample_offset[target] x2 <- fit$x[x2_temp] + data$thickness[target] - data$sample_offset[target] } if (n > target) { x1 <- fit$x[x1_temp] + data$sample_offset[target] x2 <- fit$x[x2_temp] + data$sample_offset[target] } } C1_temp <- which.min(abs(fit$x - x1)) C2_temp <- which.min(abs(fit$x - x2)) y1 <- fit$y[C1_temp] y2 <- fit$y[C2_temp] } r <- y1 - y2 } ### ---- if (n == target) { x1 <- data$sample_offset[target] x2 <- abs(data$thickness[target] - data$sample_offset[target]) C1_temp <- which.min(abs(interpol[[x]]$x - x1)) C2_temp <- which.min(abs(interpol[[x]]$x - x2)) r <- interpol[[x]]$y[C1_temp] + interpol[[x]]$y[C2_temp] - 1 } return(r) }, c("K", "Th", "U"))) }, as.data.frame(z_scale), 1:nrow(data)))) ## Generate output object op <- setNames(vector(mode = "list", length = 17), nm = c("K","K_se","Th","Th_se","U","U_se","sum","sum_se", "K_inf","K_inf_se","Th_inf","Th_inf_se","U_inf","U_inf_se","sum_inf","sum_inf_se", "contrib")) # fractional dose rate op$K <- Inf_frac$K * dose_rate$K / (1 + 1.14 * data$water_content / 100) op$K_se <- op$K * sqrt(dose_rate$K_re^2 + (data$water_content_se / data$water_content)^2) op$Th <- Inf_frac$Th * dose_rate$Th / (1 + 1.14 * data$water_content / 100) op$Th_se <- op$Th * sqrt(dose_rate$Th_re^2 + (data$water_content_se / data$water_content)^2) op$U <- Inf_frac$U * dose_rate$U / (1 + 1.14 * data$water_content / 100) op$U_se <- op$U * sqrt(dose_rate$U_re^2 + (data$water_content_se / data$water_content)^2) op$sum <- op$K + op$Th + op$U op$sum_se <- sqrt(op$K_se^2 + op$Th_se^2 + op$U_se^2) # infinite matrix dose rate op$K_inf <- op$K / Inf_frac$K op$K_inf_se <- op$K_inf * sqrt(dose_rate$K_re^2 + (data$water_content_se / data$water_content)^2) op$Th_inf <- op$Th / Inf_frac$Th op$Th_inf_se <- op$Th_inf * sqrt(dose_rate$Th_re^2 + (data$water_content_se / data$water_content)^2) op$U_inf <- op$U / Inf_frac$U op$U_inf_se <- op$U_inf * sqrt(dose_rate$U_re^2 + (data$water_content_se / data$water_content)^2) op$sum_inf <- op$K_inf + op$Th_inf + op$U_inf op$sum_inf_se <- sqrt(op$K_inf_se^2 + op$Th_inf_se^2 + op$U_inf_se^2) ## Calculate the total dose rates for(i in seq(1, length(op)-1, 2)) { op[[i]] <- c(op[[i]], sum(op[[i]])) op[[i+1]] <- c(op[[i+1]], sqrt(sum(op[[i+1]]^2))) } ## Calculate contributions for each layer op$contrib <- op$sum[1:nrow(data)] / op$sum[nrow(data)+1] * 100 op$contrib <- c(op$contrib, sum(op$contrib)) # Cast to data.frame op <- as.data.frame(do.call(cbind, op)) ## ------------------------------------------------------------------------ ## ## CONSOLE OUTPUT ## ------------------------------------------------------------------------ ## if (verbose) { cat(paste0("\n [scale_GammaDose()]\n\n")) cat(" ----\n") cat(" Conversion factors:", conversion_factors, "\n") cat(" Gamma dose fractions:", fractional_gamma_dose, "\n") cat(" Target layer:", data$id[target], "\n\n") cat(" ---- Infinite matrix gamma dose rate per layer ----\n\n") print(data.frame(ID = data$id, `K (Gy/ka)` = paste0(f(op$K_inf[-(nrow(data)+1)]), "\u00b1", f(op$K_inf_se[-(nrow(data)+1)])), `Th (Gy/ka)` = paste0(f(op$Th_inf[-(nrow(data)+1)]), "\u00b1", f(op$Th_inf_se[-(nrow(data)+1)])), `U (Gy/ka)` = paste0(f(op$U_inf[-(nrow(data)+1)]), "\u00b1", f(op$U_inf_se[-(nrow(data)+1)])), `Total (Gy/ka)` = f(op$sum_inf[-(nrow(data)+1)]), check.names = FALSE )) cat("\n") cat(sprintf(" ---- Scaled gamma dose rate for target layer: %s ----\n\n", data$id[target])) print(data.frame(ID = c(data$id, "TOTAL"), `K (Gy/ka)` = paste0(f(op$K), "\u00b1", f(op$K_se)), `Th (Gy/ka)` = paste0(f(op$Th), "\u00b1", f(op$Th_se)), `U (Gy/ka)` = paste0(f(op$U), "\u00b1", f(op$U_se)), `Contribution (%)` = round(op$contrib, 1), check.names = FALSE )) cat("\n ----\n") cat(" Infinite matrix gamma dose rate:\t", f(op$sum_inf[target]), "\u00b1", f(op$sum_inf_se[target]), "Gy/ka \n") cat(" Scaled gamma dose rate:\t\t", f(op$sum[length(op$sum)]), "\u00b1", f(op$sum_se[length(op$sum_se)]), "Gy/ka") cat("\n\n") } ## ------------------------------------------------------------------------ ## ## PLOT ## ------------------------------------------------------------------------ ## if (plot) { # save and recover plot parameters par.old <- par(no.readonly = TRUE) on.exit(par(par.old)) if (plot_single) layout(matrix(c(1,1, 2, 3, 4, 5, 1,1, 2, 3, 4, 5, 1,1, 6, 6, 6, 6, 1,1, 6, 6, 6, 6), ncol = 6, byrow = TRUE)) ## Plot 1 - Layer setup ## -------------------------------------------------------------- ## Global plot settings if (plot_single) par(mar = c(2, 5, 1, 4) + 0.1) else par(mar = c(2, 5, 4, 4) + 0.1) plot(NA, NA, main = ifelse(plot_single, "", "Profile structure"), xlim = c(0, 1), ylim = rev(range(pretty(c(sum(data$thickness), 0)))), xaxt = "n", xlab = "", ylab = "Depth below surface of uppermost layer (cm)", bty = "n", xaxs = "i") # x-axis label title(xlab = "Horizontal extent (a.u.)", line = 0) # horizontal layer lines abline(h = c(0, cumsum(data$thickness), sum(data$thickness)), lty = 1, col = "grey50", xpd = FALSE) # layer names mtext(side = 2, at = c(0, cumsum(data$thickness) - data$thickness / 2, sum(data$thickness)), text = c("", data$id, ""), las = 1, line = -5, cex = 0.75, padj = 0.3, col = "#428bca") # right y-axis axis(side = 4, at = c(0, cumsum(data$thickness), sum(data$thickness)), labels = FALSE, tck = -0.01) # right y-axis labels mtext(side = 4, at = c(0, cumsum(data$thickness) - data$thickness / 2, sum(data$thickness)), text = c("", paste(data$thickness, "cm"), ""), las = 1, line = ifelse(plot_single, -4, 0.5), cex = 0.8, col = "#b22222") # fill gap between lowermost layer and max range of pretty xlim polygon(x = c(0, 1, 1, 0), y = c(sum(data$thickness), sum(data$thickness), max(range(pretty(c(sum(data$thickness), 0)))), max(range(pretty(c(sum(data$thickness), 0))))), density = 10, angle = 45 ) # add sample points(x = 0.5, y = sum(data$thickness[1:target]) - data$sample_offset[target], pch = 13, col = "#b22222", cex = 3, lwd = 2) ## PLOT 2 - Nuclide contents ## -------------------------------------------------------------- # global plot settings if (plot_single) { par( mar = c(4, 2, 3, 0.5) + 0.1, cex = 0.6, # oma = c(1, 1, 1, 1) + 0.1, pch = 16) } else { par(par.old) par( mfrow = c(1, 4), mar = c(4, 5, 0, 0) + 0.1, oma = c(1, 1, 1, 1) + 0.1, pch = 16) } # calculate fancy x-axis limits by considering the error bars calc_xlim <- function(x, se) { range(pretty(c(x - se, x + se))) } # horizontal error bars plot_error <- function(x, se) { segments(x - se, length(x):1, x + se, length(x):1) epsilon <- 0.1 segments(x - se, length(x):1 - epsilon, x - se, length(x):1 + epsilon) segments(x + se, length(x):1 - epsilon, x + se, length(x):1 + epsilon) } # plot labels cols <- c("K", "Th", "U", "water_content") xlabs <- c("K content (%)", "Th content (ppm)", "U content (ppm)", "Water content (%)") # main plot for (i in 1:length(cols)) { # main plot(NA, NA, xlim = calc_xlim(data[[cols[i]]], data[[paste0(cols[i], "_se")]]), ylim = c(1, nrow(data)), ylab = "", xlab = xlabs[i], yaxt = "n") # vertical orientation lines abline(v = axTicks(side = 1), col = "grey", lty = 3, xpd = FALSE) # data points points(data[[cols[i]]], nrow(data):1, cex = 1.5) # errors plot_error(data[[cols[i]]], data[[paste0(cols[i], "_se")]]) # y-axis label for the first plot if (i == 1) axis(2, at = nrow(data):1, labels = data$id, las = 1) } ## PLOT 3 - Contribution ## -------------------------------------------------------------- ## Global plot settings # recover standard plot settings first if (plot_single) { par(mar = c(5, 5, 1, 6) + 0.1, cex = 0.7) } else { par(par.old) par(mar = c(5, 8, 4, 4) + 0.1, cex = settings$cex) } ## Set colors: target layer is blue, all other grey cols <- c("grey", "#428bca") pos <- rev(as.numeric(!is.na(data$sample_offset)) + 1) ## Contributions of each layer bp <- barplot(height = op$contrib[(nrow(op)-1):1], horiz = TRUE, main = ifelse(plot_single, "", settings$main), xlab = settings$xlab, xlim = range(pretty(op$contrib[1:(nrow(op)-1)])), col = cols[pos]) # layer names mtext(side = 2, at = bp, line = ifelse(plot_single, 3.5, 3), las = 1, cex = ifelse(plot_single, 0.7, 0.8), text = rev(data$id)) # contribution percentage mtext(side = 2, at = rev(bp), text = paste(signif(op$contrib[1:(nrow(op) - 1)], 2), "%"), col = "#b22222", las = 1, line = 0.5, cex = 0.7) # absolute dose rate values (right side) mtext(side = 4, at = rev(bp), text = paste(c(" ", rep("+", nrow(op) - 2)), f(op$sum[1:(nrow(op) - 1)]), "Gy/ka"), col = "black", las = 1, line = -0.5, cex = 0.7) # sum of absolute dose rate values (-> scaled total gamma dose rate) mtext(side = 4, at = min(bp) - diff(bp)[1] / 2, text = paste("=", f(op$sum[nrow(op)]), "Gy/ka"), col = "#b22222", las = 1, line = ifelse(plot_single, -0.5, -1), cex = ifelse(plot_single, 0.7, 0.8)) # recover old plot parameters par(par.old) } ## ------------------------------------------------------------------------ ## ## RETURN VALUE ## ------------------------------------------------------------------------ ## ## Infinity matrix dose rate table infinite_matrix <- data.frame( ID = data$id, K = op$K_inf[-(nrow(data)+1)], K_err = op$K_inf_se[-(nrow(data)+1)], Th = op$Th_inf[-(nrow(data)+1)], Th_err = op$Th_inf_se[-(nrow(data)+1)], U = op$U_inf[-(nrow(data)+1)], U_err = op$U_inf_se[-(nrow(data)+1)], Total = op$sum_inf[-(nrow(data)+1)] ) ## Scaled dose rate table scaled_dose_rate <- data.frame( ID = c(data$id, "TOTAL"), K = op$K, K_err = op$K_se, Th = op$Th, Th_err = op$Th_se, U = op$U, U_err = op$U_se, Contribution = op$contrib ) ## Summary table with the most important results summary <- data.frame( id = data$id[target], dose_rate_K = op$K[nrow(op)], dose_rate_K_err = op$K_se[nrow(op)], dose_rate_Th = op$Th[nrow(op)], dose_rate_Th_err = op$Th_se[nrow(op)], dose_rate_U = op$U[nrow(op)], dose_rate_U_err = op$U_se[nrow(op)], dose_rate_total = op$sum[length(op$sum)], dose_rate_total_err = op$sum_se[length(op$sum_se)] ) ## Create RLum.Results object (return object) results <- set_RLum(class = "RLum.Results", originator = "scale_GammaDose", data = list(summary = summary, data = data, dose_rates = list( infinite_matrix = infinite_matrix, scaled_dose_rate = scaled_dose_rate ), tables = list( conversion_factors = conv_fac, distances = distance, layer_fractional_dose_rate = layer_fracDoseRate, dose_rates = dose_rate, infnite_matrix_dose_fractions = Inf_frac, z_scale = z_scale ), args = as.list(sys.call()[-1]), call = sys.call()), info = settings$info ) return(results) } Luminescence/R/plot_AbanicoPlot.R0000644000176200001440000040422614264017373016460 0ustar liggesusers#' Function to create an Abanico Plot. #' #' A plot is produced which allows comprehensive presentation of data precision #' and its dispersion around a central value as well as illustration of a #' kernel density estimate, histogram and/or dot plot of the dose values. #' #' The Abanico Plot is a combination of the classic Radial Plot #' (`plot_RadialPlot`) and a kernel density estimate plot (e.g #' `plot_KDE`). It allows straightforward visualisation of data precision, #' error scatter around a user-defined central value and the combined #' distribution of the values, on the actual scale of the measured data (e.g. #' seconds, equivalent dose, years). The principle of the plot is shown in #' Galbraith & Green (1990). The function authors are thankful for the #' thought provoking figure in this article. #' #' The semi circle (z-axis) of the classic Radial Plot is bent to a straight #' line here, which actually is the basis for combining this polar (radial) #' part of the plot with any other Cartesian visualisation method #' (KDE, histogram, PDF and so on). Note that the plot allows displaying #' two measures of distribution. One is the 2-sigma #' bar, which illustrates the spread in value errors, and the other is the #' polygon, which stretches over both parts of the Abanico Plot (polar and #' Cartesian) and illustrates the actual spread in the values themselves. #' #' Since the 2-sigma-bar is a polygon, it can be (and is) filled with shaded #' lines. To change density (lines per inch, default is 15) and angle (default #' is 45 degrees) of the shading lines, specify these parameters. See #' `?polygon()` for further help. #' #' The Abanico Plot supports other than the weighted mean as measure of #' centrality. When it is obvious that the data #' is not (log-)normally distributed, the mean (weighted or not) cannot be a #' valid measure of centrality and hence central dose. Accordingly, the median #' and the weighted median can be chosen as well to represent a proper measure #' of centrality (e.g. `centrality = "median.weighted"`). Also #' user-defined numeric values (e.g. from the central age model) can be used if #' this appears appropriate. #' #' The proportion of the polar part and the cartesian part of the Abanico Plot #' can be modified for display reasons (`plot.ratio = 0.75`). By default, #' the polar part spreads over 75 \% and leaves 25 \% for the part that #' shows the KDE graph. #' #' #' A statistic summary, i.e. a collection of statistic measures of #' centrality and dispersion (and further measures) can be added by specifying #' one or more of the following keywords: #' #' - `"n"` (number of samples) #' - `"mean"` (mean De value) #' - `"median"` (median of the De values) #' - `"sd.rel"` (relative standard deviation in percent) #' - `"sd.abs"` (absolute standard deviation) #' - `"se.rel"` (relative standard error) #' - `"se.abs"` (absolute standard error) #' - `"in.2s"` (percent of samples in 2-sigma range) #' - `"kurtosis"` (kurtosis) #' - `"skewness"` (skewness) #' #' **Note** that the input data for the statistic summary is sent to the function #' `calc_Statistics()` depending on the log-option for the z-scale. If #' `"log.z = TRUE"`, the summary is based on the logarithms of the input #' data. If `"log.z = FALSE"` the linearly scaled data is used. #' #' **Note** as well, that `"calc_Statistics()"` calculates these statistic #' measures in three different ways: `unweighted`, `weighted` and #' `MCM-based` (i.e., based on Monte Carlo Methods). By default, the #' MCM-based version is used. If you wish to use another method, indicate this #' with the appropriate keyword using the argument `summary.method`. #' #' The optional parameter `layout` allows to modify the entire plot more #' sophisticated. Each element of the plot can be addressed and its properties #' can be defined. This includes font type, size and decoration, colours and #' sizes of all plot items. To infer the definition of a specific layout style #' cf. `get_Layout()` or type e.g., for the layout type `"journal"` #' `get_Layout("journal")`. A layout type can be modified by the user by #' assigning new values to the list object. #' #' It is possible for the z-scale to specify where ticks are to be drawn #' by using the parameter `at`, e.g. `at = seq(80, 200, 20)`, cf. function #' documentation of `axis`. Specifying tick positions manually overrides a #' `zlim`-definition. #' #' @param data [data.frame] or [RLum.Results-class] object (**required**): #' for `data.frame` two columns: De (`data[,1]`) and De error (`data[,2]`). #' To plot several data sets in one plot the data sets must be provided as #' `list`, e.g. `list(data.1, data.2)`. #' #' @param na.rm [logical] (*with default*): #' exclude NA values from the data set prior to any further operations. #' #' @param log.z [logical] (*with default*): #' Option to display the z-axis in logarithmic scale. Default is `TRUE`. #' #' @param z.0 [character] or [numeric]: #' User-defined central value, used for centring of data. One out of `"mean"`, #' `"mean.weighted"` and `"median"` or a numeric value (not its logarithm). #' Default is `"mean.weighted"`. #' #' @param dispersion [character] (*with default*): #' measure of dispersion, used for drawing the scatter polygon. One out of #' - `"qr"` (quartile range), #' - `"pnn"` (symmetric percentile range with `nn` the lower percentile, e.g. #' - `"p05"` depicting the range between 5 and 95 %), #' - `"sd"` (standard deviation) and #' - `"2sd"` (2 standard deviations), #' #' The default is `"qr"`. Note that `"sd"` and `"2sd"` are only meaningful in #' combination with `"z.0 = 'mean'"` because the unweighted mean is used to #' centre the polygon. #' #' @param plot.ratio [numeric]: #' Relative space, given to the radial versus the cartesian plot part, #' default is `0.75`. #' #' @param rotate [logical]: #' Option to turn the plot by 90 degrees. #' #' @param mtext [character]: #' additional text below the plot title. #' #' @param summary [character] (*optional*): #' add statistic measures of centrality and dispersion to the plot. #' Can be one or more of several keywords. See details for available keywords. #' Results differ depending on the log-option for the z-scale (see details). #' #' @param summary.pos [numeric] or [character] (*with default*): #' optional position coordinates or keyword (e.g. `"topright"`) for the #' statistical summary. Alternatively, the keyword `"sub"` may be #' specified to place the summary below the plot header. However, this latter #' option in only possible if `mtext` is not used. #' #' @param summary.method [character] (*with default*): #' keyword indicating the method used to calculate the statistic summary. #' One out of #' - `"unweighted"`, #' - `"weighted"` and #' - `"MCM"`. #' #' See [calc_Statistics] for details. #' #' @param legend [character] vector (*optional*): #' legend content to be added to the plot. #' #' @param legend.pos [numeric] or [character] (*with default*): #' optional position coordinates or keyword (e.g. `"topright"`) #' for the legend to be plotted. #' #' @param stats [character]: #' additional labels of statistically important values in the plot. #' One or more out of the following: #' - `"min"`, #' - `"max"`, #' - `"median"`. #' #' @param rug [logical]: #' Option to add a rug to the KDE part, to indicate the location of individual values. #' #' @param kde [logical]: #' Option to add a KDE plot to the dispersion part, default is `TRUE`. #' #' @param hist [logical]: #' Option to add a histogram to the dispersion part. Only meaningful when not #' more than one data set is plotted. #' #' @param dots [logical]: #' Option to add a dot plot to the dispersion part. If number of dots exceeds #' space in the dispersion part, a square indicates this. #' #' @param boxplot [logical]: #' Option to add a boxplot to the dispersion part, default is `FALSE`. #' #' @param y.axis [logical]: Option to hide standard y-axis labels and show 0 only. #' Useful for data with small scatter. If you want to suppress the y-axis entirely #' please use `yaxt == 'n'` (the standard [graphics::par] setting) instead. #' #' @param error.bars [logical]: #' Option to show De-errors as error bars on De-points. Useful in combination #' with `y.axis = FALSE, bar.col = "none"`. #' #' @param bar [numeric] (*with default*): #' option to add one or more dispersion bars (i.e., bar showing the 2-sigma range) #' centred at the defined values. By default a bar is drawn according to `"z.0"`. #' To omit the bar set `"bar = FALSE"`. #' #' @param bar.col [character] or [numeric] (*with default*): #' colour of the dispersion bar. Default is `"grey60"`. #' #' @param polygon.col [character] or [numeric] (*with default*): #' colour of the polygon showing the data scatter. Sometimes this #' polygon may be omitted for clarity. To disable it use `FALSE` or #' `polygon = FALSE`. Default is `"grey80"`. #' #' @param line [numeric]: #' numeric values of the additional lines to be added. #' #' @param line.col [character] or [numeric]: #' colour of the additional lines. #' #' @param line.lty [integer]: #' line type of additional lines #' #' @param line.label [character]: #' labels for the additional lines. #' #' @param grid.col [character] or [numeric] (*with default*): #' colour of the grid lines (originating at `[0,0]` and stretching to #' the z-scale). To disable grid lines use `FALSE`. Default is `"grey"`. #' #' @param frame [numeric] (*with default*): #' option to modify the plot frame type. Can be one out of #' - `0` (no frame), #' - `1` (frame originates at 0,0 and runs along min/max isochrons), #' - `2` (frame embraces the 2-sigma bar), #' - `3` (frame embraces the entire plot as a rectangle). #' #' Default is `1`. #' #' @param bw [character] (*with default*): #' bin-width for KDE, choose a numeric value for manual setting. #' #' @param interactive [logical] (*with default*): #' create an interactive abanico plot (requires the `'plotly'` package) #' #' @param ... Further plot arguments to pass (see [graphics::plot.default]). Supported are: `main`, `sub`, `ylab`, `xlab`, `zlab`, `zlim`, `ylim`, `cex`, `lty`, `lwd`, `pch`, `col`, `tck`, `tcl`, `at`, `breaks`. `xlab` must be a vector of length two, specifying the upper and lower x-axes labels. #' #' @return #' returns a plot object and, optionally, a list with plot calculus data. #' #' @section Function version: 0.1.17 #' #' @author #' Michael Dietze, GFZ Potsdam (Germany)\cr #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany)\cr #' Inspired by a plot introduced by Galbraith & Green (1990) #' #' @seealso [plot_RadialPlot], [plot_KDE], [plot_Histogram], [plot_ViolinPlot] #' #' @references #' Galbraith, R. & Green, P., 1990. Estimating the component ages #' in a finite mixture. International Journal of Radiation Applications and #' Instrumentation. Part D. Nuclear Tracks and Radiation Measurements, 17 (3), #' 197-206. #' #' Dietze, M., Kreutzer, S., Burow, C., Fuchs, M.C., Fischer, M., Schmidt, C., 2015. #' The abanico plot: visualising chronometric data with individual standard errors. #' Quaternary Geochronology. doi:10.1016/j.quageo.2015.09.003 #' #' @examples #' #' ## load example data and recalculate to Gray #' data(ExampleData.DeValues, envir = environment()) #' ExampleData.DeValues <- ExampleData.DeValues$CA1 #' #' ## plot the example data straightforward #' plot_AbanicoPlot(data = ExampleData.DeValues) #' #' ## now with linear z-scale #' plot_AbanicoPlot(data = ExampleData.DeValues, #' log.z = FALSE) #' #' ## now with output of the plot parameters #' plot1 <- plot_AbanicoPlot(data = ExampleData.DeValues, #' output = TRUE) #' str(plot1) #' plot1$zlim #' #' ## now with adjusted z-scale limits #' plot_AbanicoPlot(data = ExampleData.DeValues, #' zlim = c(10, 200)) #' #' ## now with adjusted x-scale limits #' plot_AbanicoPlot(data = ExampleData.DeValues, #' xlim = c(0, 20)) #' #' ## now with rug to indicate individual values in KDE part #' plot_AbanicoPlot(data = ExampleData.DeValues, #' rug = TRUE) #' #' ## now with a smaller bandwidth for the KDE plot #' plot_AbanicoPlot(data = ExampleData.DeValues, #' bw = 0.04) #' #' ## now with a histogram instead of the KDE plot #' plot_AbanicoPlot(data = ExampleData.DeValues, #' hist = TRUE, #' kde = FALSE) #' #' ## now with a KDE plot and histogram with manual number of bins #' plot_AbanicoPlot(data = ExampleData.DeValues, #' hist = TRUE, #' breaks = 20) #' #' ## now with a KDE plot and a dot plot #' plot_AbanicoPlot(data = ExampleData.DeValues, #' dots = TRUE) #' #' ## now with user-defined plot ratio #' plot_AbanicoPlot(data = ExampleData.DeValues, #' plot.ratio = 0.5) #' ## now with user-defined central value #' plot_AbanicoPlot(data = ExampleData.DeValues, #' z.0 = 70) #' #' ## now with median as central value #' plot_AbanicoPlot(data = ExampleData.DeValues, #' z.0 = "median") #' #' ## now with the 17-83 percentile range as definition of scatter #' plot_AbanicoPlot(data = ExampleData.DeValues, #' z.0 = "median", #' dispersion = "p17") #' #' ## now with user-defined green line for minimum age model #' CAM <- calc_CentralDose(ExampleData.DeValues, #' plot = FALSE) #' #' plot_AbanicoPlot(data = ExampleData.DeValues, #' line = CAM, #' line.col = "darkgreen", #' line.label = "CAM") #' #' ## now create plot with legend, colour, different points and smaller scale #' plot_AbanicoPlot(data = ExampleData.DeValues, #' legend = "Sample 1", #' col = "tomato4", #' bar.col = "peachpuff", #' pch = "R", #' cex = 0.8) #' #' ## now without 2-sigma bar, polygon, grid lines and central value line #' plot_AbanicoPlot(data = ExampleData.DeValues, #' bar.col = FALSE, #' polygon.col = FALSE, #' grid.col = FALSE, #' y.axis = FALSE, #' lwd = 0) #' #' ## now with direct display of De errors, without 2-sigma bar #' plot_AbanicoPlot(data = ExampleData.DeValues, #' bar.col = FALSE, #' ylab = "", #' y.axis = FALSE, #' error.bars = TRUE) #' #' ## now with user-defined axes labels #' plot_AbanicoPlot(data = ExampleData.DeValues, #' xlab = c("Data error (%)", #' "Data precision"), #' ylab = "Scatter", #' zlab = "Equivalent dose [Gy]") #' #' ## now with minimum, maximum and median value indicated #' plot_AbanicoPlot(data = ExampleData.DeValues, #' stats = c("min", "max", "median")) #' #' ## now with a brief statistical summary as subheader #' plot_AbanicoPlot(data = ExampleData.DeValues, #' summary = c("n", "in.2s")) #' #' ## now with another statistical summary #' plot_AbanicoPlot(data = ExampleData.DeValues, #' summary = c("mean.weighted", "median"), #' summary.pos = "topleft") #' #' ## now a plot with two 2-sigma bars for one data set #' plot_AbanicoPlot(data = ExampleData.DeValues, #' bar = c(30, 100)) #' #' ## now the data set is split into sub-groups, one is manipulated #' data.1 <- ExampleData.DeValues[1:30,] #' data.2 <- ExampleData.DeValues[31:62,] * 1.3 #' #' ## now a common dataset is created from the two subgroups #' data.3 <- list(data.1, data.2) #' #' ## now the two data sets are plotted in one plot #' plot_AbanicoPlot(data = data.3) #' #' ## now with some graphical modification #' plot_AbanicoPlot(data = data.3, #' z.0 = "median", #' col = c("steelblue4", "orange4"), #' bar.col = c("steelblue3", "orange3"), #' polygon.col = c("steelblue1", "orange1"), #' pch = c(2, 6), #' angle = c(30, 50), #' summary = c("n", "in.2s", "median")) #' #' ## create Abanico plot with predefined layout definition #' plot_AbanicoPlot(data = ExampleData.DeValues, #' layout = "journal") #' #' ## now with predefined layout definition and further modifications #' plot_AbanicoPlot( #' data = data.3, #' z.0 = "median", #' layout = "journal", #' col = c("steelblue4", "orange4"), #' bar.col = adjustcolor(c("steelblue3", "orange3"), #' alpha.f = 0.5), #' polygon.col = c("steelblue3", "orange3")) #' #' ## for further information on layout definitions see documentation #' ## of function get_Layout() #' #' ## now with manually added plot content #' ## create empty plot with numeric output #' AP <- plot_AbanicoPlot(data = ExampleData.DeValues, #' pch = NA, #' output = TRUE) #' #' ## identify data in 2 sigma range #' in_2sigma <- AP$data[[1]]$data.in.2s #' #' ## restore function-internal plot parameters #' par(AP$par) #' #' ## add points inside 2-sigma range #' points(x = AP$data[[1]]$precision[in_2sigma], #' y = AP$data[[1]]$std.estimate.plot[in_2sigma], #' pch = 16) #' #' ## add points outside 2-sigma range #' points(x = AP$data[[1]]$precision[!in_2sigma], #' y = AP$data[[1]]$std.estimate.plot[!in_2sigma], #' pch = 1) #' #' @md #' @export plot_AbanicoPlot <- function( data, na.rm = TRUE, log.z = TRUE, z.0 = "mean.weighted", dispersion = "qr", plot.ratio = 0.75, rotate = FALSE, mtext, summary, summary.pos, summary.method = "MCM", legend, legend.pos, stats, rug = FALSE, kde = TRUE, hist = FALSE, dots = FALSE, boxplot = FALSE, y.axis = TRUE, error.bars = FALSE, bar, bar.col, polygon.col, line, line.col, line.lty, line.label, grid.col, frame = 1, bw = "SJ", interactive = FALSE, ... ) { ## check data and parameter consistency-------------------------------------- ## Homogenise input data format if(is(data, "list") == FALSE) { data <- list(data) } ## Check input data for(i in 1:length(data)) { if(is(data[[i]], "RLum.Results") == FALSE & is(data[[i]], "data.frame") == FALSE) { stop(paste("[plot_AbanicoPlot()] Input data format is neither", "'data.frame' nor 'RLum.Results'")) } else { if(is(data[[i]], "RLum.Results")) data[[i]] <- get_RLum(data[[i]], "data") data[[i]] <- data[[i]][,c(1:2)] } } ## optionally, remove NA-values if(na.rm == TRUE) { for(i in 1:length(data)) { n.NA <- sum(!complete.cases(data[[i]])) if(n.NA == 1) {message(paste0("[plot_AbanicoPlot()] data set (", i, "): 1 NA value excluded.")) } else if(n.NA > 1) { message(paste0("[plot_AbanicoPlot()] data set (", i,"): ", n.NA, " NA values excluded.")) } data[[i]] <- na.exclude(data[[i]]) } } ##AFTER NA removal, we should check the data set carefully again ... ##(1) ##check if there is still data left in the entire set if(all(sapply(data, nrow) == 0)){ try(stop("[plot_AbanicoPlot()] Nothing plotted, your data set is empty!", call. = FALSE)) return(NULL) } ##(2) ##check for sets with only 1 row or 0 rows at all else if(any(sapply(data, nrow) <= 1)){ ##select problematic sets and remove the entries from the list NArm.id <- which(sapply(data, nrow) <= 1) data[NArm.id] <- NULL warning(paste0("[plot_AbanicoPlot()] Data sets ", paste(NArm.id, collapse = ", "), " are found to be empty or consisting of only 1 row. Sets removed!")) rm(NArm.id) ##unfortunately, the data set might become now empty at all if(length(data) == 0){ try(stop("[plot_AbanicoPlot()] After removing invalid entries, nothing is plotted!", call. = FALSE)) return(NULL) } } ## check for zero-error values for(i in 1:length(data)) { if(length(data[[i]]) < 2) { stop("Data without errors cannot be displayed!") } if(sum(data[[i]][,2] == 0) > 0) { data[[i]] <- data[[i]][data[[i]][,2] > 0,] if(nrow(data[[i]]) < 1) { stop("[plot_AbanicoPlot()] Data set contains only values with zero errors.", call. = FALSE) } warning("[plot_AbanicoPlot()] values with zero errors cannot be displayed and were removed!",call. = FALSE) } } ## save original plot parameters and restore them upon end or stop par.old.full <- par(no.readonly = TRUE) cex_old <- par()$cex ## this ensures par() is respected for several plots on one page if(sum(par()$mfrow) == 2 & sum(par()$mfcol) == 2){ on.exit(par(par.old.full)) } ## check/set layout definitions if("layout" %in% names(list(...))) { layout = get_Layout(layout = list(...)$layout) } else { layout <- get_Layout(layout = "default") } if(missing(stats) == TRUE) { stats <- numeric(0) } if(missing(bar) == TRUE) { bar <- rep(TRUE, length(data)) } if(missing(bar.col) == TRUE) { bar.fill <- rep(x = rep(x = layout$abanico$colour$bar.fill, length.out = length(data)), length(bar)) bar.line <- rep(rep(layout$abanico$colour$bar.line, length.out = length(data)), length(bar)) } else { bar.fill <- bar.col bar.line <- NA } if(missing(polygon.col) == TRUE) { polygon.fill <- rep(layout$abanico$colour$poly.fill, length.out = length(data)) polygon.line <- rep(layout$abanico$colour$poly.line, length.out = length(data)) } else { polygon.fill <- polygon.col polygon.line <- NA } if(missing(grid.col) == TRUE) { grid.major <- layout$abanico$colour$grid.major grid.minor <- layout$abanico$colour$grid.minor } else { if(length(grid.col) == 1) { grid.major <- grid.col[1] grid.minor <- grid.col[1] } else { grid.major <- grid.col[1] grid.minor <- grid.col[2] } } if(missing(summary) == TRUE) { summary <- c("n", "in.2s") } if(missing(summary.pos) == TRUE) { summary.pos <- "sub" } if(missing(mtext) == TRUE) { mtext <- "" } ## create preliminary global data set De.global <- data[[1]][,1] if(length(data) > 1) { for(i in 2:length(data)) { De.global <- c(De.global, data[[i]][,1]) } } ## calculate major preliminary tick values and tick difference extraArgs <- list(...) if("zlim" %in% names(extraArgs)) { limits.z <- extraArgs$zlim } else { z.span <- (mean(De.global) * 0.5) / (sd(De.global) * 100) z.span <- ifelse(z.span > 1, 0.9, z.span) limits.z <- c((ifelse(min(De.global) <= 0, 1.1, 0.9) - z.span) * min(De.global), (1.1 + z.span) * max(De.global)) } if("at" %in% names(extraArgs)) { ticks <- extraArgs$at } else { ticks <- round(pretty(limits.z, n = 5), 3) } if("breaks" %in% names(extraArgs)) { breaks <- extraArgs$breaks } else { breaks <- "Sturges" } ## check/set bw-parameter for(i in 1:length(data)) { bw.test <- try(density(x = data[[i]][,1], bw = bw), silent = TRUE) if(grepl(pattern = "Error", x = bw.test[1]) == TRUE) { bw <- "nrd0" warning("[plot_AbanicoPlot()] Option for bw not possible. Set to nrd0!", call. = FALSE) } } if ("fun" %in% names(extraArgs)) { fun <- list(...)$fun } else { fun <- FALSE } ## check for negative values, stop function, but do not stop if(min(De.global) < 0) { if("zlim" %in% names(extraArgs)) { De.add <- abs(extraArgs$zlim[1]) } else { ## estimate delta De to add to all data De.add <- min(10^ceiling(log10(abs(De.global))) * 10) ## optionally readjust delta De for extreme values if(De.add <= abs(min(De.global))) { De.add <- De.add * 10 } } } else { De.add <- 0 } ## optionally add correction dose to data set and adjust error if(log.z == TRUE) { for(i in 1:length(data)) { data[[i]][,1] <- data[[i]][,1] + De.add } De.global <- De.global + De.add } ## calculate and append statistical measures -------------------------------- ## z-values based on log-option z <- lapply(1:length(data), function(x){ if(log.z == TRUE) { log(data[[x]][,1]) } else { data[[x]][,1] } }) if(is(z, "list") == FALSE) { z <- list(z) } data <- lapply(1:length(data), function(x) { cbind(data[[x]], z[[x]]) }) rm(z) ## calculate dispersion based on log-option se <- lapply(1:length(data), function(x, De.add){ if(log.z == TRUE) { if(De.add != 0) { data[[x]][,2] <- data[[x]][,2] / (data[[x]][,1] + De.add) } else { data[[x]][,2] / data[[x]][,1] } } else { data[[x]][,2] }}, De.add = De.add) if(is(se, "list") == FALSE) { se <- list(se) } data <- lapply(1:length(data), function(x) { cbind(data[[x]], se[[x]]) }) rm(se) ## calculate initial data statistics stats.init <- list(NA) for(i in 1:length(data)) { stats.init[[length(stats.init) + 1]] <- calc_Statistics(data = data[[i]][,3:4]) } stats.init[[1]] <- NULL ## calculate central values if(z.0 == "mean") { z.central <- lapply(1:length(data), function(x){ rep(stats.init[[x]]$unweighted$mean, length(data[[x]][,3]))}) } else if(z.0 == "median") { z.central <- lapply(1:length(data), function(x){ rep(stats.init[[x]]$unweighted$median, length(data[[x]][,3]))}) } else if(z.0 == "mean.weighted") { z.central <- lapply(1:length(data), function(x){ rep(stats.init[[x]]$weighted$mean, length(data[[x]][,3]))}) } else if(is.numeric(z.0) == TRUE) { z.central <- lapply(1:length(data), function(x){ rep(ifelse(log.z == TRUE, log(z.0), z.0), length(data[[x]][,3]))}) } else { stop("Value for z.0 not supported!") } data <- lapply(1:length(data), function(x) { cbind(data[[x]], z.central[[x]])}) rm(z.central) ## calculate precision precision <- lapply(1:length(data), function(x){ 1 / data[[x]][,4]}) if(is(precision, "list") == FALSE) {precision <- list(precision)} data <- lapply(1:length(data), function(x) { cbind(data[[x]], precision[[x]])}) rm(precision) ## calculate standardised estimate std.estimate <- lapply(1:length(data), function(x){ (data[[x]][,3] - data[[x]][,5]) / data[[x]][,4]}) if(is(std.estimate, "list") == FALSE) {std.estimate <- list(std.estimate)} data <- lapply(1:length(data), function(x) { cbind(data[[x]], std.estimate[[x]])}) ## append empty standard estimate for plotting data <- lapply(1:length(data), function(x) { cbind(data[[x]], std.estimate[[x]])}) rm(std.estimate) ## append optional weights for KDE curve if("weights" %in% names(extraArgs)) { if(extraArgs$weights == TRUE) { wgt <- lapply(1:length(data), function(x){ (1 / data[[x]][,2]) / sum(1 / data[[x]][,2]^2) }) if(is(wgt, "list") == FALSE) { wgt <- list(wgt) } data <- lapply(1:length(data), function(x) { cbind(data[[x]], wgt[[x]])}) rm(wgt) } else { wgt <- lapply(1:length(data), function(x){ rep(x = 1, times = nrow(data[[x]])) / sum(rep(x = 1, times = nrow(data[[x]]))) }) if(is(wgt, "list") == FALSE) { wgt <- list(wgt) } data <- lapply(1:length(data), function(x) { cbind(data[[x]], wgt[[x]])}) rm(wgt) } } else { wgt <- lapply(1:length(data), function(x){ rep(x = 1, times = nrow(data[[x]])) / sum(rep(x = 1, times = nrow(data[[x]]))) }) if(is(wgt, "list") == FALSE) { wgt <- list(wgt) } data <- lapply(1:length(data), function(x) { cbind(data[[x]], wgt[[x]])}) rm(wgt) } ## generate global data set data.global <- cbind(data[[1]], rep(x = 1, times = nrow(data[[1]]))) colnames(data.global) <- rep("", 10) if(length(data) > 1) { for(i in 2:length(data)) { data.add <- cbind(data[[i]], rep(x = i, times = nrow(data[[i]]))) colnames(data.add) <- rep("", 10) data.global <- rbind(data.global, data.add) } } ## create column names colnames(data.global) <- c("De", "error", "z", "se", "z.central", "precision", "std.estimate", "std.estimate.plot", "weights", "data set") ## calculate global data statistics stats.global <- calc_Statistics(data = data.global[,3:4]) ## calculate global central value if(z.0 == "mean") { z.central.global <- stats.global$unweighted$mean } else if(z.0 == "median") { z.central.global <- stats.global$unweighted$median } else if(z.0 == "mean.weighted") { z.central.global <- stats.global$weighted$mean } else if(is.numeric(z.0) == TRUE) { z.central.global <- ifelse(log.z == TRUE, log(z.0), z.0) } else { stop("Value for z.0 not supported!") } ## create column names for(i in 1:length(data)) { colnames(data[[i]]) <- c("De", "error", "z", "se", "z.central", "precision", "std.estimate", "std.estimate.plot", "weights") } ## re-calculate standardised estimate for plotting for(i in 1:length(data)) { data[[i]][,8] <- (data[[i]][,3] - z.central.global) / data[[i]][,4] } data.global.plot <- data[[1]][,8] if(length(data) > 1) { for(i in 2:length(data)) { data.global.plot <- c(data.global.plot, data[[i]][,8]) } } data.global[,8] <- data.global.plot ## print message for too small scatter if(max(abs(1 / data.global[6])) < 0.02) { small.sigma <- TRUE message("[plot_AbanicoPlot()] Attention, small standardised estimate scatter. Toggle off y.axis?") } ## read out additional arguments--------------------------------------------- extraArgs <- list(...) main <- if("main" %in% names(extraArgs)) { extraArgs$main } else { expression(paste(D[e], " distribution")) } sub <- if("sub" %in% names(extraArgs)) { extraArgs$sub } else { "" } if("xlab" %in% names(extraArgs)) { if(length(extraArgs$xlab) != 2) { if (length(extraArgs$xlab) == 3) { xlab <- c(extraArgs$xlab[1:2], "Density") } else { stop("Argmuent xlab is not of length 2!") } } else {xlab <- c(extraArgs$xlab, "Density")} } else { xlab <- c(if(log.z == TRUE) { "Relative standard error (%)" } else { "Standard error" }, "Precision", "Density") } ylab <- if("ylab" %in% names(extraArgs)) { extraArgs$ylab } else { "Standardised estimate" } zlab <- if("zlab" %in% names(extraArgs)) { extraArgs$zlab } else { expression(paste(D[e], " [Gy]")) } if("zlim" %in% names(extraArgs)) { limits.z <- extraArgs$zlim } else { z.span <- (mean(data.global[,1]) * 0.5) / (sd(data.global[,1]) * 100) z.span <- ifelse(z.span > 1, 0.9, z.span) limits.z <- c((0.9 - z.span) * min(data.global[[1]]), (1.1 + z.span) * max(data.global[[1]])) } if("xlim" %in% names(extraArgs)) { limits.x <- extraArgs$xlim } else { limits.x <- c(0, max(data.global[,6]) * 1.05) } if(limits.x[1] != 0) { limits.x[1] <- 0 warning("Lower x-axis limit not set to zero, issue corrected!") } if("ylim" %in% names(extraArgs)) { limits.y <- extraArgs$ylim } else { y.span <- (mean(data.global[,1]) * 10) / (sd(data.global[,1]) * 100) y.span <- ifelse(y.span > 1, 0.98, y.span) limits.y <- c(-(1 + y.span) * max(abs(data.global[,7])), (1 + y.span) * max(abs(data.global[,7]))) } cex <- if("cex" %in% names(extraArgs)) { extraArgs$cex } else { 1 } lty <- if("lty" %in% names(extraArgs)) { extraArgs$lty } else { rep(rep(2, length(data)), length(bar)) } lwd <- if("lwd" %in% names(extraArgs)) { extraArgs$lwd } else { rep(rep(1, length(data)), length(bar)) } pch <- if("pch" %in% names(extraArgs)) { extraArgs$pch } else { rep(20, length(data)) } if("col" %in% names(extraArgs)) { bar.col <- extraArgs$col kde.line <- extraArgs$col kde.fill <- NA value.dot <- extraArgs$col value.bar <- extraArgs$col value.rug <- extraArgs$col summary.col <- extraArgs$col centrality.col <- extraArgs$col } else { if(length(layout$abanico$colour$bar) == 1) { bar.col <- 1:length(data) } else { bar.col <- layout$abanico$colour$bar.col } if(length(layout$abanico$colour$kde.line) == 1) { kde.line <- 1:length(data) } else { kde.line <- layout$abanico$colour$kde.line } if(length(layout$abanico$colour$kde.fill) == 1) { kde.fill <- rep(layout$abanico$colour$kde.fill, length(data)) } else { kde.fill <- layout$abanico$colour$kde.fill } if(length(layout$abanico$colour$value.dot) == 1) { value.dot <- 1:length(data) } else { value.dot <- layout$abanico$colour$value.dot } if(length(layout$abanico$colour$value.bar) == 1) { value.bar <- 1:length(data) } else { value.bar <- layout$abanico$colour$value.bar } if(length(layout$abanico$colour$value.rug) == 1) { value.rug <- 1:length(data) } else { value.rug <- layout$abanico$colour$value.rug } if(length(layout$abanico$colour$summary) == 1) { summary.col <- 1:length(data) } else { summary.col <- layout$abanico$colour$summary } if(length(layout$abanico$colour$centrality) == 1) { centrality.col <- rep(x = 1:length(data), times = length(bar)) } else { centrality.col <- rep(x = layout$abanico$colour$centrality, times = length(bar)) } } ## update central line colour centrality.col <- rep(centrality.col, length(bar)) tck <- if("tck" %in% names(extraArgs)) { extraArgs$tck } else { NA } tcl <- if("tcl" %in% names(extraArgs)) { extraArgs$tcl } else { -0.5 } ## define auxiliary plot parameters ----------------------------------------- ## set space between z-axis and baseline of cartesian part if(boxplot == TRUE) { lostintranslation <- 1.03 } else { lostintranslation <- 1.03 plot.ratio <- plot.ratio * 1.05 } ## create empty plot to update plot parameters if(rotate == FALSE) { plot(NA, xlim = c(limits.x[1], limits.x[2] * (1 / plot.ratio)), ylim = limits.y, main = "", sub = "", xlab = "", ylab = "", xaxs = "i", yaxs = "i", frame.plot = FALSE, axes = FALSE) } else { plot(NA, xlim = limits.y, ylim = c(limits.x[1], limits.x[2] * (1 / plot.ratio)), main = "", sub = "", xlab = "", ylab = "", xaxs = "i", yaxs = "i", frame.plot = FALSE, axes = FALSE) } ## calculate conversion factor for plot coordinates f <- 0 ## calculate major and minor z-tick values if("at" %in% names(extraArgs)) { tick.values.major <- extraArgs$at tick.values.minor <- extraArgs$at } else { tick.values.major <- signif(pretty(limits.z, n = 5), 3) tick.values.minor <- signif(pretty(limits.z, n = 25), 3) } tick.values.major <- tick.values.major[tick.values.major >= min(tick.values.minor)] tick.values.major <- tick.values.major[tick.values.major <= max(tick.values.minor)] tick.values.major <- tick.values.major[tick.values.major >= limits.z[1]] tick.values.major <- tick.values.major[tick.values.major <= limits.z[2]] tick.values.minor <- tick.values.minor[tick.values.minor >= limits.z[1]] tick.values.minor <- tick.values.minor[tick.values.minor <= limits.z[2]] if(log.z == TRUE) { tick.values.major[which(tick.values.major==0)] <- 1 tick.values.minor[which(tick.values.minor==0)] <- 1 tick.values.major <- log(tick.values.major) tick.values.minor <- log(tick.values.minor) } ## calculate z-axis radius r <- max(sqrt((limits.x[2])^2 + (data.global[,7] * f)^2)) ## create z-axes labels if(log.z == TRUE) { label.z.text <- signif(exp(tick.values.major), 3) } else { label.z.text <- signif(tick.values.major, 3) } ## calculate node coordinates for semi-circle ellipse.values <- c(min(ifelse(log.z == TRUE, log(limits.z[1]), limits.z[1]), tick.values.major, tick.values.minor), max(ifelse(log.z == TRUE, log(limits.z[2]), limits.z[2]), tick.values.major, tick.values.minor)) ## correct for unpleasant value ellipse.values[ellipse.values == -Inf] <- 0 if(rotate == FALSE) { ellipse.x <- r / sqrt(1 + f^2 * (ellipse.values - z.central.global)^2) ellipse.y <- (ellipse.values - z.central.global) * ellipse.x } else { ellipse.y <- r / sqrt(1 + f^2 * (ellipse.values - z.central.global)^2) ellipse.x <- (ellipse.values - z.central.global) * ellipse.y } ellipse <- cbind(ellipse.x, ellipse.y) ## calculate statistical labels if(length(stats == 1)) {stats <- rep(stats, 2)} stats.data <- matrix(nrow = 3, ncol = 3) data.stats <- as.numeric(data.global[,1]) if("min" %in% stats == TRUE) { stats.data[1, 3] <- data.stats[data.stats == min(data.stats)][1] stats.data[1, 1] <- data.global[data.stats == stats.data[1, 3], 6][1] stats.data[1, 2] <- data.global[data.stats == stats.data[1, 3], 8][1] } if("max" %in% stats == TRUE) { stats.data[2, 3] <- data.stats[data.stats == max(data.stats)][1] stats.data[2, 1] <- data.global[data.stats == stats.data[2, 3], 6][1] stats.data[2, 2] <- data.global[data.stats == stats.data[2, 3], 8][1] } if("median" %in% stats == TRUE) { stats.data[3, 3] <- data.stats[data.stats == quantile(data.stats, 0.5, type = 3)] stats.data[3, 1] <- data.global[data.stats == stats.data[3, 3], 6][1] stats.data[3, 2] <- data.global[data.stats == stats.data[3, 3], 8][1] } ## re-calculate axes limits if necessary if(rotate == FALSE) { limits.z.x <- range(ellipse[,1]) limits.z.y <- range(ellipse[,2]) } else { limits.z.x <- range(ellipse[,2]) limits.z.y <- range(ellipse[,1]) } if(!("ylim" %in% names(extraArgs))) { if(limits.z.y[1] < 0.66 * limits.y[1]) { limits.y[1] <- 1.8 * limits.z.y[1] } if(limits.z.y[2] > 0.77 * limits.y[2]) { limits.y[2] <- 1.3 * limits.z.y[2] } if(rotate == TRUE) { limits.y <- c(-max(abs(limits.y)), max(abs(limits.y))) } } if(!("xlim" %in% names(extraArgs))) { if(limits.z.x[2] > 1.1 * limits.x[2]) { limits.x[2] <- limits.z.x[2] } } ## calculate and paste statistical summary De.stats <- matrix(nrow = length(data), ncol = 12) colnames(De.stats) <- c("n", "mean", "median", "kde.max", "sd.abs", "sd.rel", "se.abs", "se.rel", "q.25", "q.75", "skewness", "kurtosis") for(i in 1:length(data)) { statistics <- calc_Statistics(data[[i]])[[summary.method]] statistics.2 <- calc_Statistics(data[[i]][,3:4])[[summary.method]] De.stats[i,1] <- statistics$n De.stats[i,2] <- statistics.2$mean De.stats[i,3] <- statistics.2$median De.stats[i,5] <- statistics$sd.abs De.stats[i,6] <- statistics$sd.rel De.stats[i,7] <- statistics$se.abs De.stats[i,8] <- statistics$se.rel De.stats[i,9] <- quantile(data[[i]][,1], 0.25) De.stats[i,10] <- quantile(data[[i]][,1], 0.75) De.stats[i,11] <- statistics$skewness De.stats[i,12] <- statistics$kurtosis ## account for log.z-option if(log.z == TRUE) { De.stats[i,2:4] <- exp(De.stats[i,2:4]) } ## kdemax - here a little doubled as it appears below again De.density <- try(density(x = data[[i]][,1], kernel = "gaussian", bw = bw, from = limits.z[1], to = limits.z[2]), silent = TRUE) if(inherits(De.density, "try-error")) { De.stats[i,4] <- NA } else { De.stats[i,4] <- De.density$x[which.max(De.density$y)] } } label.text = list(NA) if(summary.pos[1] != "sub") { n.rows <- length(summary) for(i in 1:length(data)) { stops <- paste(rep("\n", (i - 1) * n.rows), collapse = "") summary.text <- character(0) for(j in 1:length(summary)) { summary.text <- c(summary.text, paste( "", ifelse("n" %in% summary[j] == TRUE, paste("n = ", De.stats[i,1], "\n", sep = ""), ""), ifelse("mean" %in% summary[j] == TRUE, paste("mean = ", round(De.stats[i,2], 2), "\n", sep = ""), ""), ifelse("median" %in% summary[j] == TRUE, paste("median = ", round(De.stats[i,3], 2), "\n", sep = ""), ""), ifelse("kde.max" %in% summary[j] == TRUE, paste("kdemax = ", round(De.stats[i,4], 2), " \n ", sep = ""), ""), ifelse("sd.abs" %in% summary[j] == TRUE, paste("abs. sd = ", round(De.stats[i,5], 2), "\n", sep = ""), ""), ifelse("sd.rel" %in% summary[j] == TRUE, paste("rel. sd = ", round(De.stats[i,6], 2), " %", "\n", sep = ""), ""), ifelse("se.abs" %in% summary[j] == TRUE, paste("se = ", round(De.stats[i,7], 2), "\n", sep = ""), ""), ifelse("se.rel" %in% summary[j] == TRUE, paste("rel. se = ", round(De.stats[i,8], 2), " %", "\n", sep = ""), ""), ifelse("skewness" %in% summary[j] == TRUE, paste("skewness = ", round(De.stats[i,11], 2), "\n", sep = ""), ""), ifelse("kurtosis" %in% summary[j] == TRUE, paste("kurtosis = ", round(De.stats[i,12], 2), "\n", sep = ""), ""), ifelse("in.2s" %in% summary[j] == TRUE, paste("in 2 sigma = ", round(sum(data[[i]][,7] > -2 & data[[i]][,7] < 2) / nrow(data[[i]]) * 100 , 1), " %", sep = ""), ""), sep = "")) } summary.text <- paste(summary.text, collapse = "") label.text[[length(label.text) + 1]] <- paste(stops, summary.text, stops, sep = "") } } else { for(i in 1:length(data)) { summary.text <- character(0) for(j in 1:length(summary)) { summary.text <- c(summary.text, ifelse("n" %in% summary[j] == TRUE, paste("n = ", De.stats[i,1], " | ", sep = ""), ""), ifelse("mean" %in% summary[j] == TRUE, paste("mean = ", round(De.stats[i,2], 2), " | ", sep = ""), ""), ifelse("median" %in% summary[j] == TRUE, paste("median = ", round(De.stats[i,3], 2), " | ", sep = ""), ""), ifelse("kde.max" %in% summary[j] == TRUE, paste("kdemax = ", round(De.stats[i,4], 2), " | ", sep = ""), ""), ifelse("sd.abs" %in% summary[j] == TRUE, paste("abs. sd = ", round(De.stats[i,5], 2), " | ", sep = ""), ""), ifelse("sd.rel" %in% summary[j] == TRUE, paste("rel. sd = ", round(De.stats[i,6], 2), " %", " | ", sep = ""), ""), ifelse("se.abs" %in% summary[j] == TRUE, paste("abs. se = ", round(De.stats[i,7], 2), " | ", sep = ""), ""), ifelse("se.rel" %in% summary[j] == TRUE, paste("rel. se = ", round(De.stats[i,8], 2), " %", " | ", sep = ""), ""), ifelse("skewness" %in% summary[j] == TRUE, paste("skewness = ", round(De.stats[i,11], 2), " | ", sep = ""), ""), ifelse("kurtosis" %in% summary[j] == TRUE, paste("kurtosis = ", round(De.stats[i,12], 2), " | ", sep = ""), ""), ifelse("in.2s" %in% summary[j] == TRUE, paste("in 2 sigma = ", round(sum(data[[i]][,7] > -2 & data[[i]][,7] < 2) / nrow(data[[i]]) * 100 , 1), " % | ", sep = ""), "") ) } summary.text <- paste(summary.text, collapse = "") label.text[[length(label.text) + 1]] <- paste( " ", summary.text, sep = "") } ## remove outer vertical lines from string for(i in 2:length(label.text)) { label.text[[i]] <- substr(x = label.text[[i]], start = 3, stop = nchar(label.text[[i]]) - 3) } } ## remove dummy list element label.text[[1]] <- NULL if(rotate == FALSE) { ## convert keywords into summary placement coordinates if(missing(summary.pos) == TRUE) { summary.pos <- c(limits.x[1], limits.y[2]) summary.adj <- c(0, 1) } else if(length(summary.pos) == 2) { summary.pos <- summary.pos summary.adj <- c(0, 1) } else if(summary.pos[1] == "topleft") { summary.pos <- c(limits.x[1], limits.y[2] - par()$cxy[2] * 1) summary.adj <- c(0, 1) } else if(summary.pos[1] == "top") { summary.pos <- c(mean(limits.x), limits.y[2] - par()$cxy[2] * 1) summary.adj <- c(0.5, 1) } else if(summary.pos[1] == "topright") { summary.pos <- c(limits.x[2], limits.y[2] - par()$cxy[2] * 1) summary.adj <- c(1, 1) } else if(summary.pos[1] == "left") { summary.pos <- c(limits.x[1], mean(limits.y)) summary.adj <- c(0, 0.5) } else if(summary.pos[1] == "center") { summary.pos <- c(mean(limits.x), mean(limits.y)) summary.adj <- c(0.5, 0.5) } else if(summary.pos[1] == "right") { summary.pos <- c(limits.x[2], mean(limits.y)) summary.adj <- c(1, 0.5) }else if(summary.pos[1] == "bottomleft") { summary.pos <- c(limits.x[1], limits.y[1] + par()$cxy[2] * 3.5) summary.adj <- c(0, 0) } else if(summary.pos[1] == "bottom") { summary.pos <- c(mean(limits.x), limits.y[1] + par()$cxy[2] * 3.5) summary.adj <- c(0.5, 0) } else if(summary.pos[1] == "bottomright") { summary.pos <- c(limits.x[2], limits.y[1] + par()$cxy[2] * 3.5) summary.adj <- c(1, 0) } ## convert keywords into legend placement coordinates if(missing(legend.pos) == TRUE) { legend.pos <- c(limits.x[1], limits.y[2]) legend.adj <- c(0, 1) } else if(length(legend.pos) == 2) { legend.pos <- legend.pos legend.adj <- c(0, 1) } else if(legend.pos[1] == "topleft") { legend.pos <- c(limits.x[1], limits.y[2]) legend.adj <- c(0, 1) } else if(legend.pos[1] == "top") { legend.pos <- c(mean(limits.x), limits.y[2]) legend.adj <- c(0.5, 1) } else if(legend.pos[1] == "topright") { legend.pos <- c(limits.x[2], limits.y[2]) legend.adj <- c(1, 1) } else if(legend.pos[1] == "left") { legend.pos <- c(limits.x[1], mean(limits.y)) legend.adj <- c(0, 0.5) } else if(legend.pos[1] == "center") { legend.pos <- c(mean(limits.x), mean(limits.y)) legend.adj <- c(0.5, 0.5) } else if(legend.pos[1] == "right") { legend.pos <- c(limits.x[2], mean(limits.y)) legend.adj <- c(1, 0.5) } else if(legend.pos[1] == "bottomleft") { legend.pos <- c(limits.x[1], limits.y[1]) legend.adj <- c(0, 0) } else if(legend.pos[1] == "bottom") { legend.pos <- c(mean(limits.x), limits.y[1]) legend.adj <- c(0.5, 0) } else if(legend.pos[1] == "bottomright") { legend.pos <- c(limits.x[2], limits.y[1]) legend.adj <- c(1, 0) } } else { ## convert keywords into summary placement coordinates if(missing(summary.pos) == TRUE) { summary.pos <- c(limits.y[1] + par()$cxy[1] * 7.5, limits.x[1]) summary.adj <- c(0, 0) } else if(length(summary.pos) == 2) { summary.pos <- summary.pos summary.adj <- c(0, 1) } else if(summary.pos[1] == "topleft") { summary.pos <- c(limits.y[1] + par()$cxy[1] * 7.5, limits.x[2]) summary.adj <- c(0, 1) } else if(summary.pos[1] == "top") { summary.pos <- c(mean(limits.y), limits.x[2]) summary.adj <- c(0.5, 1) } else if(summary.pos[1] == "topright") { summary.pos <- c(limits.y[2], limits.x[2]) summary.adj <- c(1, 1) } else if(summary.pos[1] == "left") { summary.pos <- c(limits.y[1] + par()$cxy[1] * 7.5, mean(limits.x)) summary.adj <- c(0, 0.5) } else if(summary.pos[1] == "center") { summary.pos <- c(mean(limits.y), mean(limits.x)) summary.adj <- c(0.5, 0.5) } else if(summary.pos[1] == "right") { summary.pos <- c(limits.y[2], mean(limits.x)) summary.adj <- c(1, 0.5) }else if(summary.pos[1] == "bottomleft") { summary.pos <- c(limits.y[1] + par()$cxy[1] * 7.5, limits.x[1]) summary.adj <- c(0, 0) } else if(summary.pos[1] == "bottom") { summary.pos <- c(mean(limits.y), limits.x[1]) summary.adj <- c(0.5, 0) } else if(summary.pos[1] == "bottomright") { summary.pos <- c(limits.y[2], limits.x[1]) summary.adj <- c(1, 0) } ## convert keywords into legend placement coordinates if(missing(legend.pos) == TRUE) { legend.pos <- c(limits.y[1] + par()$cxy[1] * 7.5, limits.x[1]) legend.adj <- c(0, 0) } else if(length(legend.pos) == 2) { legend.pos <- legend.pos legend.adj <- c(1, 0) } else if(legend.pos[1] == "topleft") { legend.pos <- c(limits.y[1] + par()$cxy[1] * 11, limits.x[2]) legend.adj <- c(1, 0) } else if(legend.pos[1] == "top") { legend.pos <- c(mean(limits.y), limits.x[2]) legend.adj <- c(1, 0.5) } else if(legend.pos[1] == "topright") { legend.pos <- c(limits.y[2], limits.x[2]) legend.adj <- c(1, 1) } else if(legend.pos[1] == "left") { legend.pos <- c(limits.y[1] + par()$cxy[1] * 7.5, mean(limits.x)) legend.adj <- c(0.5, 0) } else if(legend.pos[1] == "center") { legend.pos <- c(mean(limits.y), mean(limits.x)) legend.adj <- c(0.5, 0.5) } else if(legend.pos[1] == "right") { legend.pos <- c(limits.y[2], mean(limits.x)) legend.adj <- c(0.5, 1) } else if(legend.pos[1] == "bottomleft") { legend.pos <- c(limits.y[1] + par()$cxy[1] * 7.5, limits.x[1]) legend.adj <- c(0, 0) } else if(legend.pos[1] == "bottom") { legend.pos <- c(mean(limits.y), limits.x[1]) legend.adj <- c(0, 0.5) } else if(legend.pos[1] == "bottomright") { legend.pos <- c(limits.y[2], limits.x[1]) legend.adj <- c(0, 1) } } ## define cartesian plot origins if(rotate == FALSE) { xy.0 <- c(min(ellipse[,1]) * lostintranslation, min(ellipse[,2])) } else { xy.0 <- c(min(ellipse[,1]), min(ellipse[,2]) * lostintranslation) } ## calculate coordinates for dispersion polygon overlay y.max.x <- 2 * limits.x[2] / max(data.global[6]) polygons <- matrix(nrow = length(data), ncol = 14) for(i in 1:length(data)) { if(dispersion == "qr") { ci.lower <- quantile(data[[i]][,1], 0.25) ci.upper <- quantile(data[[i]][,1], 0.75) } else if(grepl(x = dispersion, pattern = "p") == TRUE) { ci.plot <- as.numeric(strsplit(x = dispersion, split = "p")[[1]][2]) ci.plot <- (100 - ci.plot) / 100 ci.lower <- quantile(data[[i]][,1], ci.plot) ci.upper <- quantile(data[[i]][,1], 1 - ci.plot) } else if(dispersion == "sd") { if(log.z == TRUE) { ci.lower <- exp(mean(log(data[[i]][,1])) - sd(log(data[[i]][,1]))) ci.upper <- exp(mean(log(data[[i]][,1])) + sd(log(data[[i]][,1]))) } else { ci.lower <- mean(data[[i]][,1]) - sd(data[[i]][,1]) ci.upper <- mean(data[[i]][,1]) + sd(data[[i]][,1]) } } else if(dispersion == "2sd") { if(log.z == TRUE) { ci.lower <- exp(mean(log(data[[i]][,1])) - 2 * sd(log(data[[i]][,1]))) ci.upper <- exp(mean(log(data[[i]][,1])) + 2 * sd(log(data[[i]][,1]))) } else { ci.lower <- mean(data[[i]][,1]) - 2 * sd(data[[i]][,1]) ci.upper <- mean(data[[i]][,1]) + 2 * sd(data[[i]][,1]) } } else { stop("Measure of dispersion not supported.") } if(log.z == TRUE) { ci.lower[which(ci.lower < 0)] <- 1 y.lower <- log(ci.lower) y.upper <- log(ci.upper) } else { y.lower <- ci.lower y.upper <- ci.upper } if(rotate == FALSE) { polygons[i,1:7] <- c(limits.x[1], limits.x[2], xy.0[1], par()$usr[2], par()$usr[2], xy.0[1], limits.x[2]) polygons[i,8:14] <- c(0, (y.upper - z.central.global) * limits.x[2], (y.upper - z.central.global) * xy.0[1], (y.upper - z.central.global) * xy.0[1], (y.lower - z.central.global) * xy.0[1], (y.lower - z.central.global) * xy.0[1], (y.lower - z.central.global) * limits.x[2] ) } else { y.max <- par()$usr[4] polygons[i,1:7] <- c(limits.x[1], limits.x[2], xy.0[2], y.max, y.max, xy.0[2], limits.x[2]) polygons[i,8:14] <- c(0, (y.upper - z.central.global) * limits.x[2], (y.upper - z.central.global) * xy.0[2], (y.upper - z.central.global) * xy.0[2], (y.lower - z.central.global) * xy.0[2], (y.lower - z.central.global) * xy.0[2], (y.lower - z.central.global) * limits.x[2] ) } } ## append information about data in confidence interval for(i in 1:length(data)) { data.in.2s <- rep(x = FALSE, times = nrow(data[[i]])) data.in.2s[data[[i]][,8] > -2 & data[[i]][,8] < 2] <- TRUE data[[i]] <- cbind(data[[i]], data.in.2s) } ## calculate coordinates for 2-sigma bar overlay if(bar[1] == TRUE) { bars <- matrix(nrow = length(data), ncol = 8) for(i in 1:length(data)) { bars[i,1:4] <- c(limits.x[1], limits.x[1], ifelse("xlim" %in% names(extraArgs), extraArgs$xlim[2] * 0.95, max(data.global$precision)), ifelse("xlim" %in% names(extraArgs), extraArgs$xlim[2] * 0.95, max(data.global$precision))) bars[i,5:8] <- c(-2, 2, (data[[i]][1,5] - z.central.global) * bars[i,3] + 2, (data[[i]][1,5] - z.central.global) * bars[i,3] - 2) } } else { bars <- matrix(nrow = length(bar), ncol = 8) if(is.numeric(bar) == TRUE & log.z == TRUE) { bar <- log(bar) } for(i in 1:length(bar)) { bars[i,1:4] <- c(limits.x[1], limits.x[1], ifelse("xlim" %in% names(extraArgs), extraArgs$xlim[2] * 0.95, max(data.global$precision)), ifelse("xlim" %in% names(extraArgs), extraArgs$xlim[2] * 0.95, max(data.global$precision))) bars[i,5:8] <- c(-2, 2, (bar[i] - z.central.global) * bars[i,3] + 2, (bar[i] - z.central.global) * bars[i,3] - 2) } } if (rotate == TRUE) { bars <- matrix(bars[, rev(seq_len(ncol(bars)))], ncol = 8) } ## calculate error bar coordinates if(error.bars == TRUE) { arrow.coords <- list(NA) for(i in 1:length(data)) { arrow.x1 <- data[[i]][,6] arrow.x2 <- data[[i]][,6] arrow.y1 <- data[[i]][,1] - data[[i]][,2] arrow.y2 <- data[[i]][,1] + data[[i]][,2] if(log.z == TRUE) { arrow.y1 <- log(arrow.y1) arrow.y2 <- log(arrow.y2) } arrow.coords[[length(arrow.coords) + 1]] <- cbind( arrow.x1, arrow.x2, (arrow.y1 - z.central.global) * arrow.x1, (arrow.y2 - z.central.global) * arrow.x1) } arrow.coords[[1]] <- NULL } ## calculate KDE KDE <- list(NA) KDE.ext <- 0 KDE.bw <- numeric(0) for(i in 1:length(data)) { KDE.i <- density(x = data[[i]][,3], kernel = "gaussian", bw = bw, from = ellipse.values[1], to = ellipse.values[2], weights = data[[i]]$weights) KDE.xy <- cbind(KDE.i$x, KDE.i$y) KDE.bw <- c(KDE.bw, KDE.i$bw) KDE.ext <- ifelse(max(KDE.xy[,2]) < KDE.ext, KDE.ext, max(KDE.xy[,2])) KDE.xy <- rbind(c(min(KDE.xy[,1]), 0), KDE.xy, c(max(KDE.xy[,1]), 0)) KDE[[length(KDE) + 1]] <- cbind(KDE.xy[,1], KDE.xy[,2]) } KDE[1] <- NULL ## calculate mean KDE bandwidth KDE.bw <- mean(KDE.bw, na.rm = TRUE) ## calculate max KDE value for labelling KDE.max.plot <- numeric(length(data)) for(i in 1:length(data)) { KDE.plot <- density(x = data[[i]][,1], kernel = "gaussian", bw = bw, from = limits.z[1], to = limits.z[2]) KDE.max.plot[i] <- max(KDE.plot$y) } KDE.max.plot <- max(KDE.max.plot, na.rm = TRUE) ## calculate histogram data without plotting ## create dummy list hist.data <- list(NA) for(i in 1:length(data)) { hist.i <- hist(x = data[[i]][,3], plot = FALSE, breaks = breaks) hist.data[[length(hist.data) + 1]] <- hist.i } ## remove dummy list object hist.data[[1]] <- NULL ## calculate maximum histogram bar height for normalisation hist.max.plot <- numeric(length(data)) for(i in 1:length(data)) { hist.max.plot <- ifelse(max(hist.data[[i]]$counts, na.rm = TRUE) > hist.max.plot, max(hist.data[[i]]$counts, na.rm = TRUE), hist.max.plot) } hist.max.plot <- max(hist.max.plot, na.rm = TRUE) ## normalise histogram bar height to KDE dimensions for(i in 1:length(data)) { hist.data[[i]]$density <- hist.data[[i]]$counts / hist.max.plot * KDE.max.plot } ## calculate boxplot data without plotting ## create dummy list boxplot.data <- list(NA) for(i in 1:length(data)) { boxplot.i <- boxplot(x = data[[i]][,3], plot = FALSE) boxplot.data[[length(boxplot.data) + 1]] <- boxplot.i } ## remove dummy list object boxplot.data[[1]] <- NULL ## calculate line coordinates and further parameters if(missing(line) == FALSE) { ## check if line parameters are R.Lum-objects for(i in 1:length(line)) { if(is.list(line) == TRUE) { if(is(line[[i]], "RLum.Results")) { line[[i]] <- as.numeric(get_RLum(object = line[[i]], data.object = "summary")$de) } } else if(is(object = line, class2 = "RLum.Results")) { line <- as.numeric(get_RLum(object = line, data.object = "summary")$de) } } ## convert list to vector if(is.list(line) == TRUE) { line <- unlist(line) } if(log.z == TRUE) { line <- log(line) } line.coords <- list(NA) if(rotate == FALSE) { for(i in 1:length(line)) { line.x <- c(limits.x[1], min(ellipse[,1]), par()$usr[2]) line.y <- c(0, (line[i] - z.central.global) * min(ellipse[,1]), (line[i] - z.central.global) * min(ellipse[,1])) line.coords[[length(line.coords) + 1]] <- rbind(line.x, line.y) } } else { for(i in 1:length(line)) { line.x <- c(limits.x[1], min(ellipse[,2]),y.max) line.y <- c(0, (line[i] - z.central.global) * min(ellipse[,2]), (line[i] - z.central.global) * min(ellipse[,2])) line.coords[[length(line.coords) + 1]] <- rbind(line.x, line.y) } } line.coords[1] <- NULL if(missing(line.col) == TRUE) { line.col <- seq(from = 1, to = length(line.coords)) } if(missing(line.lty) == TRUE) { line.lty <- rep(1, length(line.coords)) } if(missing(line.label) == TRUE) { line.label <- rep("", length(line.coords)) } } ## calculate rug coordinates if(missing(rug) == FALSE) { if(log.z == TRUE) { rug.values <- log(De.global) } else { rug.values <- De.global } rug.coords <- list(NA) if(rotate == FALSE) { for(i in 1:length(rug.values)) { rug.x <- c(xy.0[1] * (1 - 0.013 * (layout$abanico$dimension$rugl / 100)), xy.0[1]) rug.y <- c((rug.values[i] - z.central.global) * min(ellipse[,1]), (rug.values[i] - z.central.global) * min(ellipse[,1])) rug.coords[[length(rug.coords) + 1]] <- rbind(rug.x, rug.y) } } else { for(i in 1:length(rug.values)) { rug.x <- c(xy.0[2] * (1 - 0.013 * (layout$abanico$dimension$rugl / 100)), xy.0[2]) rug.y <- c((rug.values[i] - z.central.global) * min(ellipse[,2]), (rug.values[i] - z.central.global) * min(ellipse[,2])) rug.coords[[length(rug.coords) + 1]] <- rbind(rug.x, rug.y) } } rug.coords[1] <- NULL } ## Generate plot ------------------------------------------------------------ ## determine number of subheader lines to shift the plot if(length(summary) > 0 & summary.pos[1] == "sub") { shift.lines <- (length(data) + 1) * layout$abanico$dimension$summary.line/100 } else {shift.lines <- 1} ## extract original plot parameters bg.original <- par()$bg on.exit(par(bg = bg.original), add = TRUE) par(bg = layout$abanico$colour$background) if(rotate == FALSE) { ## setup plot area par(mar = c(4.5, 4.5, shift.lines + 1.5, 7), xpd = TRUE, cex = cex) if(layout$abanico$dimension$figure.width != "auto" | layout$abanico$dimension$figure.height != "auto") { par(mai = layout$abanico$dimension$margin / 25.4, pin = c(layout$abanico$dimension$figure.width / 25.4 - layout$abanico$dimension$margin[2] / 25.4 - layout$abanico$dimension$margin[4] / 25.4, layout$abanico$dimension$figure.height / 25.4 - layout$abanico$dimension$margin[1] / 25.4 - layout$abanico$dimension$margin[3]/25.4)) } ## create empty plot par(new = TRUE) plot(NA, xlim = c(limits.x[1], limits.x[2] * (1 / plot.ratio)), ylim = limits.y, main = "", sub = sub, xlab = "", ylab = "", xaxs = "i", yaxs = "i", frame.plot = FALSE, axes = FALSE) ## add y-axis label mtext(text = ylab, at = mean(x = c(min(ellipse[,2]), max(ellipse[,2])), na.rm = TRUE), # at = 0, ## BUG FROM VERSION 0.4.0, maybe removed in future adj = 0.5, side = 2, line = 3 * layout$abanico$dimension$ylab.line / 100, col = layout$abanico$colour$ylab, family = layout$abanico$font.type$ylab, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$ylab)[1], cex = cex * layout$abanico$font.size$ylab/12) ## calculate upper x-axis label values label.x.upper <- if(log.z == TRUE) { as.character(round(1/axTicks(side = 1)[-1] * 100, 1)) } else { as.character(round(1/axTicks(side = 1)[-1], 1)) } # optionally, plot 2-sigma-bar if(bar[1] != FALSE) { for(i in 1:length(bar)) { polygon(x = bars[i,1:4], y = bars[i,5:8], col = bar.fill[i], border = bar.line[i]) } } ## remove unwanted parts polygon(x = c(par()$usr[2], par()$usr[2], par()$usr[2] * 2, par()$usr[2] * 2), y = c(min(ellipse[,2]) * 2, max(ellipse[,2]) * 2, max(ellipse[,2]) * 2, min(ellipse[,2]) * 2), col = bg.original, lty = 0) ## optionally, plot dispersion polygon if(polygon.fill[1] != "none") { for(i in 1:length(data)) { polygon(x = polygons[i,1:7], y = polygons[i,8:14], col = polygon.fill[i], border = polygon.line[i]) } } ## optionally, add minor grid lines if(grid.minor != "none") { for(i in 1:length(tick.values.minor)) { lines(x = c(limits.x[1], min(ellipse[,1])), y = c(0, (tick.values.minor[i] - z.central.global) * min(ellipse[,1])), col = grid.minor, lwd = 1) } for(i in 1:length(tick.values.minor)) { lines(x = c(xy.0[1], par()$usr[2]), y = c((tick.values.minor[i] - z.central.global) * min(ellipse[,1]), (tick.values.minor[i] - z.central.global) * min(ellipse[,1])), col = grid.minor, lwd = 1) } } ## optionally, add major grid lines if(grid.major != "none") { for(i in 1:length(tick.values.major)) { lines(x = c(limits.x[1], min(ellipse[,1])), y = c(0, (tick.values.major[i] - z.central.global) * min(ellipse[,1])), col = grid.major, lwd = 1) } for(i in 1:length(tick.values.major)) { lines(x = c(xy.0[1], par()$usr[2]), y = c((tick.values.major[i] - z.central.global) * min(ellipse[,1]), (tick.values.major[i] - z.central.global) * min(ellipse[,1])), col = grid.major, lwd = 1) } } ## optionally, plot lines for each bar if(lwd[1] > 0 & lty[1] > 0 & bar[1] != FALSE & length(data) == 1) { if(bar[1] == TRUE & length(bar) == 1) { bar[1] <- z.central.global } for(i in 1:length(bar)) { x2 <- r / sqrt(1 + f^2 * ( bar[i] - z.central.global)^2) y2 <- (bar[i] - z.central.global) * x2 lines(x = c(limits.x[1], x2, xy.0[1], par()$usr[2]), y = c(0, y2, y2, y2), lty = lty[i], lwd = lwd[i], col = centrality.col[i]) } } else if(lwd[1] > 0 & lty[1] > 0 & bar[1] != FALSE) { for(i in 1:length(data)) { z.line <- ifelse(test = is.numeric(bar[i]) == TRUE, yes = bar[i], no = data[[i]][1,5]) x2 <- r / sqrt(1 + f^2 * ( z.line - z.central.global)^2) y2 <- (z.line - z.central.global) * x2 lines(x = c(limits.x[1], x2, xy.0[1], par()$usr[2]), y = c(0, y2, y2, y2), lty = lty[i], lwd = lwd[i], col = centrality.col[i]) } } ## optionally add further lines if(missing(line) == FALSE) { for(i in 1:length(line)) { lines(x = line.coords[[i]][1,1:3], y = line.coords[[i]][2,1:3], col = line.col[i], lty = line.lty[i] ) text(x = line.coords[[i]][1,3], y = line.coords[[i]][2,3] + par()$cxy[2] * 0.3, labels = line.label[i], pos = 2, col = line.col[i], cex = cex * 0.9) } } ## add plot title cex.old <- par()$cex par(cex = layout$abanico$font.size$main / 12) title(main = main, family = layout$abanico$font.type$main, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$main)[1], col.main = layout$abanico$colour$main, line = shift.lines * layout$abanico$dimension$main / 100) par(cex = cex.old) ## calculate lower x-axis (precision) x.axis.ticks <- axTicks(side = 1) x.axis.ticks <- x.axis.ticks[c(TRUE, x.axis.ticks <= limits.x[2])] x.axis.ticks <- x.axis.ticks[x.axis.ticks <= max(ellipse[,1])] ## x-axis with lables and ticks axis(side = 1, at = x.axis.ticks, col = layout$abanico$colour$xtck1, col.axis = layout$abanico$colour$xtck1, labels = NA, tcl = -layout$abanico$dimension$xtcl1 / 200, cex = cex) axis(side = 1, at = x.axis.ticks, line = 2 * layout$abanico$dimension$xtck1.line / 100 - 2, lwd = 0, col = layout$abanico$colour$xtck1, family = layout$abanico$font.type$xtck1, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$xtck1)[1], col.axis = layout$abanico$colour$xtck1, cex.axis = layout$abanico$font.size$xlab1/12) ## extend axis line to right side of the plot lines(x = c(max(x.axis.ticks), max(ellipse[,1])), y = c(limits.y[1], limits.y[1]), col = layout$abanico$colour$xtck1) ## draw closing tick on right hand side axis(side = 1, tcl = -layout$abanico$dimension$xtcl1 / 200, lwd = 0, lwd.ticks = 1, at = limits.x[2], labels = FALSE, col = layout$abanico$colour$xtck1) axis(side = 1, tcl = layout$abanico$dimension$xtcl2 / 200, lwd = 0, lwd.ticks = 1, at = limits.x[2], labels = FALSE, col = layout$abanico$colour$xtck2) ## add lower axis label mtext(xlab[2], at = (limits.x[1] + max(ellipse[,1])) / 2, side = 1, line = 2.5 * layout$abanico$dimension$xlab1.line / 100, col = layout$abanico$colour$xlab1, family = layout$abanico$font.type$xlab1, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$xlab1)[1], cex = cex * layout$abanico$font.size$xlab1/12) ## add upper axis label mtext(xlab[1], at = (limits.x[1] + max(ellipse[,1])) / 2, side = 1, line = -3.5 * layout$abanico$dimension$xlab2.line / 100, col = layout$abanico$colour$xlab2, family = layout$abanico$font.type$xlab2, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$xlab2)[1], cex = cex * layout$abanico$font.size$xlab2/12) ## plot upper x-axis axis(side = 1, at = x.axis.ticks[-1], col = layout$abanico$colour$xtck2, col.axis = layout$abanico$colour$xtck2, labels = NA, tcl = layout$abanico$dimension$xtcl2 / 200, cex = cex) ## remove first tick label (infinity) label.x.upper <- label.x.upper[1:(length(x.axis.ticks) - 1)] axis(side = 1, at = x.axis.ticks[-1], labels = label.x.upper, line = -1 * layout$abanico$dimension$xtck2.line / 100 - 2, lwd = 0, col = layout$abanico$colour$xtck2, family = layout$abanico$font.type$xtck2, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$xtck2)[1], col.axis = layout$abanico$colour$xtck2, cex.axis = layout$abanico$font.size$xlab2/12) ## plot y-axis if(is.null(extraArgs$yaxt) || extraArgs$yaxt != "n"){ if(y.axis) { char.height <- par()$cxy[2] tick.space <- axisTicks(usr = limits.y, log = FALSE) tick.space <- (max(tick.space) - min(tick.space)) / length(tick.space) if(tick.space < char.height * 1.7) { axis(side = 2, tcl = -layout$abanico$dimension$ytcl / 200, lwd = 1, lwd.ticks = 1, at = c(-2, 2), labels = c("", ""), las = 1, col = layout$abanico$colour$ytck) axis(side = 2, at = 0, tcl = 0, line = 2 * layout$abanico$dimension$ytck.line / 100 - 2, labels = paste("\u00B1", "2"), las = 1, family = layout$abanico$font.type$ytck, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$ytck)[1], col.axis = layout$abanico$colour$ytck, cex.axis = layout$abanico$font.size$ylab/12) } else { axis(side = 2, at = seq(-2, 2, by = 2), col = layout$abanico$colour$ytck, col.axis = layout$abanico$colour$ytck, labels = NA, las = 1, tcl = -layout$abanico$dimension$ytcl / 200, cex = cex) axis(side = 2, at = seq(-2, 2, by = 2), line = 2 * layout$abanico$dimension$ytck.line / 100 - 2, lwd = 0, las = 1, col = layout$abanico$colour$ytck, family = layout$abanico$font.type$ytck, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$ytck)[1], col.axis = layout$abanico$colour$ytck, cex.axis = layout$abanico$font.size$ylab/12) } } else { axis(side = 2, at = 0, col = layout$abanico$colour$ytck, col.axis = layout$abanico$colour$ytck, labels = NA, las = 1, tcl = -layout$abanico$dimension$ytcl / 200, cex = cex) axis(side = 2, at = 0, line = 2 * layout$abanico$dimension$ytck.line / 100 - 2, lwd = 0, las = 1, col = layout$abanico$colour$ytck, family = layout$abanico$font.type$ytck, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$ytck)[1], col.axis = layout$abanico$colour$ytck, cex.axis = layout$abanico$font.size$ylab/12) } } ## plot minor z-ticks for(i in 1:length(tick.values.minor)) { lines(x = c(par()$usr[2], (1 + 0.007 * cex * layout$abanico$dimension$ztcl / 100) * par()$usr[2]), y = c((tick.values.minor[i] - z.central.global) * min(ellipse[,1]), (tick.values.minor[i] - z.central.global) * min(ellipse[,1])), col = layout$abanico$colour$ztck) } ## plot major z-ticks for(i in 1:length(tick.values.major)) { lines(x = c(par()$usr[2], (1 + 0.015 * cex * layout$abanico$dimension$ztcl / 100) * par()$usr[2]), y = c((tick.values.major[i] - z.central.global) * min(ellipse[,1]), (tick.values.major[i] - z.central.global) * min(ellipse[,1])), col = layout$abanico$colour$ztck) } ## plot z-axes lines(ellipse, col = layout$abanico$colour$border) lines(rep(par()$usr[2], nrow(ellipse)), ellipse[,2], col = layout$abanico$colour$ztck) ## plot z-axis text text(x = (1 + 0.04 * cex * layout$abanico$dimension$ztcl / 100) * par()$usr[2], y = (tick.values.major - z.central.global) * min(ellipse[,1]), labels = label.z.text, adj = 0, family = layout$abanico$font.type$ztck, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$ztck)[1], cex = cex * layout$abanico$font.size$ztck/12) ## plot z-label mtext(text = zlab, at = mean(x = c(min(ellipse[,2]), max(ellipse[,2])), na.rm = TRUE), # at = 0, ## BUG from version 0.4.0, maybe removed in future side = 4, las = 3, adj = 0.5, line = 5 * layout$abanico$dimension$zlab.line / 100, col = layout$abanico$colour$zlab, family = layout$abanico$font.type$zlab, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$zlab)[1], cex = cex * layout$abanico$font.size$zlab/12) ## plot values and optionally error bars if(error.bars == TRUE) { for(i in 1:length(data)) { arrows(x0 = arrow.coords[[i]][,1], x1 = arrow.coords[[i]][,2], y0 = arrow.coords[[i]][,3], y1 = arrow.coords[[i]][,4], length = 0, angle = 90, code = 3, col = value.bar[i]) } } for(i in 1:length(data)) { points(data[[i]][,6][data[[i]][,6] <= limits.x[2]], data[[i]][,8][data[[i]][,6] <= limits.x[2]], col = value.dot[i], pch = pch[i], cex = layout$abanico$dimension$pch / 100) } ## calculate KDE width KDE.max <- 0 for(i in 1:length(data)) { KDE.max <- ifelse(test = KDE.max < max(KDE[[i]][,2]), yes = max(KDE[[i]][,2]), no = KDE.max) } ## optionally adjust KDE width for boxplot option if(boxplot == TRUE) { KDE.max <- 1.25 * KDE.max } KDE.scale <- (par()$usr[2] - xy.0[1]) / (KDE.max * 1.05) ## optionally add KDE plot if(kde == TRUE) { ## plot KDE lines for(i in 1:length(data)) { polygon(x = xy.0[1] + KDE[[i]][,2] * KDE.scale, y = (KDE[[i]][,1] - z.central.global) * min(ellipse[,1]), col = kde.fill[i], border = kde.line[i], lwd = 1.7) } ## plot KDE x-axis axis(side = 1, at = c(xy.0[1], par()$usr[2]), col = layout$abanico$colour$xtck3, col.axis = layout$abanico$colour$xtck3, labels = NA, tcl = -layout$abanico$dimension$xtcl3 / 200, cex = cex) axis(side = 1, at = c(xy.0[1], par()$usr[2]), labels = as.character(round(c(0, KDE.max.plot), 3)), line = 2 * layout$abanico$dimension$xtck3.line / 100 - 2, lwd = 0, col = layout$abanico$colour$xtck3, family = layout$abanico$font.type$xtck3, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$xtck3)[1], col.axis = layout$abanico$colour$xtck3, cex.axis = layout$abanico$font.size$xtck3/12) mtext(text = paste(xlab[3], " (bw ", round(x = KDE.bw, digits = 3), ")", sep = ""), at = (xy.0[1] + par()$usr[2]) / 2, side = 1, line = 2.5 * layout$abanico$dimension$xlab3.line / 100, col = layout$abanico$colour$xlab3, family = layout$abanico$font.type$xlab3, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$xlab3)[1], cex = cex * layout$abanico$font.size$xlab3/12) } ## optionally add histogram or dot plot axis if(hist == TRUE) { axis(side = 1, at = c(xy.0[1], par()$usr[2]), labels = as.character(c(0, hist.max.plot)), line = -1 * layout$abanico$dimension$xtck3.line / 100 - 2, lwd = 0, col = layout$abanico$colour$xtck3, family = layout$abanico$font.type$xtck3, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$xtck3)[1], col.axis = layout$abanico$colour$xtck3, cex.axis = layout$abanico$font.size$xtck3/12) ## add label mtext(text = "n", at = (xy.0[1] + par()$usr[2]) / 2, side = 1, line = -3.5 * layout$abanico$dimension$xlab2.line / 100, col = layout$abanico$colour$xlab2, family = layout$abanico$font.type$xlab2, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$xlab2)[1], cex = cex * layout$abanico$font.size$xlab2/12) ## plot ticks axis(side = 1, at = c(xy.0[1], par()$usr[2]), col = layout$abanico$colour$xtck2, col.axis = layout$abanico$colour$xtck2, labels = NA, tcl = layout$abanico$dimension$xtcl2 / 200, cex = cex) ## calculate scaling factor for histogram bar heights hist.scale <- (par()$usr[2] - xy.0[1]) / (KDE.max.plot * 1.05) ## draw each bar for each data set for(i in 1:length(data)) { for(j in 1:length(hist.data[[i]]$density)) { ## calculate x-coordinates hist.x.i <- c(xy.0[1], xy.0[1], xy.0[1] + hist.data[[i]]$density[j] * hist.scale, xy.0[1] + hist.data[[i]]$density[j] * hist.scale) ## calculate y-coordinates hist.y.i <- c((hist.data[[i]]$breaks[j] - z.central.global) * min(ellipse[,1]), (hist.data[[i]]$breaks[j + 1] - z.central.global) * min(ellipse[,1]), (hist.data[[i]]$breaks[j + 1] - z.central.global) * min(ellipse[,1]), (hist.data[[i]]$breaks[j] - z.central.global) * min(ellipse[,1])) ## remove data out of z-axis range hist.y.i <- ifelse(hist.y.i < min(ellipse[,2]), min(ellipse[,2]), hist.y.i) hist.y.i <- ifelse(hist.y.i > max(ellipse[,2]), max(ellipse[,2]), hist.y.i) ## draw the bars polygon(x = hist.x.i, y = hist.y.i, col = kde.fill[i], border = kde.line[i]) } } } ## optionally add dot plot if(dots == TRUE) { for(i in 1:length(data)) { for(j in 1:length(hist.data[[i]]$counts)) { ## calculate scaling factor for histogram bar heights dots.distance <- (par()$usr[2] - (xy.0[1] + par()$cxy[1] * 0.4)) / hist.max.plot dots.x.i <- seq(from = xy.0[1] + par()$cxy[1] * 0.4, by = dots.distance, length.out = hist.data[[i]]$counts[j]) dots.y.i <- rep((hist.data[[i]]$mids[j] - z.central.global) * min(ellipse[,1]), length(dots.x.i)) ## remove data out of z-axis range dots.x.i <- dots.x.i[dots.y.i >= min(ellipse[,2]) & dots.y.i <= max(ellipse[,2])] dots.y.i <- dots.y.i[dots.y.i >= min(ellipse[,2]) & dots.y.i <= max(ellipse[,2])] if(max(c(0, dots.x.i), na.rm = TRUE) >= (par()$usr[2] - par()$cxy[1] * 0.4)) { dots.y.i <- dots.y.i[dots.x.i < (par()$usr[2] - par()$cxy[1] * 0.4)] dots.x.i <- dots.x.i[dots.x.i < (par()$usr[2] - par()$cxy[1] * 0.4)] pch.dots <- c(rep(20, max(length(dots.x.i) - 1),1), 15) } else { pch.dots <- rep(20, length(dots.x.i)) } ## plot points points(x = dots.x.i, y = dots.y.i, pch = "|", cex = 0.7 * cex, col = kde.line[i]) } } } ## optionally add box plot if(boxplot == TRUE) { for(i in 1:length(data)) { ## draw median line lines(x = c(xy.0[1] + KDE.max * 0.85, xy.0[1] + KDE.max * 0.95), y = c((boxplot.data[[i]]$stats[3,1] - z.central.global) * min(ellipse[,1]), (boxplot.data[[i]]$stats[3,1] - z.central.global) * min(ellipse[,1])), lwd = 2, col = kde.line[i]) ## draw p25-p75-polygon polygon(x = c(xy.0[1] + KDE.max * 0.85, xy.0[1] + KDE.max * 0.85, xy.0[1] + KDE.max * 0.95, xy.0[1] + KDE.max * 0.95), y = c((boxplot.data[[i]]$stats[2,1] - z.central.global) * min(ellipse[,1]), (boxplot.data[[i]]$stats[4,1] - z.central.global) * min(ellipse[,1]), (boxplot.data[[i]]$stats[4,1] - z.central.global) * min(ellipse[,1]), (boxplot.data[[i]]$stats[2,1] - z.central.global) * min(ellipse[,1])), border = kde.line[i]) ## draw whiskers lines(x = c(xy.0[1] + KDE.max * 0.9, xy.0[1] + KDE.max * 0.9), y = c((boxplot.data[[i]]$stats[2,1] - z.central.global) * min(ellipse[,1]), (boxplot.data[[i]]$stats[1,1] - z.central.global) * min(ellipse[,1])), col = kde.line[i]) lines(x = c(xy.0[1] + KDE.max * 0.87, xy.0[1] + KDE.max * 0.93), y = rep((boxplot.data[[i]]$stats[1,1] - z.central.global) * min(ellipse[,1]), 2), col = kde.line[i]) lines(x = c(xy.0[1] + KDE.max * 0.9, xy.0[1] + KDE.max * 0.9), y = c((boxplot.data[[i]]$stats[4,1] - z.central.global) * min(ellipse[,1]), (boxplot.data[[i]]$stats[5,1] - z.central.global) * min(ellipse[,1])), col = kde.line[i]) lines(x = c(xy.0[1] + KDE.max * 0.87, xy.0[1] + KDE.max * 0.93), y = rep((boxplot.data[[i]]$stats[5,1] - z.central.global) * min(ellipse[,1]), 2), col = kde.line[i]) ## draw outlier points points(x = rep(xy.0[1] + KDE.max * 0.9, length(boxplot.data[[i]]$out)), y = (boxplot.data[[i]]$out - z.central.global) * min(ellipse[,1]), cex = cex * 0.8, col = kde.line[i]) } } ## optionally add stats, i.e. min, max, median sample text if(length(stats) > 0) { text(x = stats.data[,1], y = stats.data[,2], pos = 2, labels = round(stats.data[,3], 1), family = layout$abanico$font.type$stats, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$stats)[1], cex = cex * layout$abanico$font.size$stats/12, col = layout$abanico$colour$stats) } ## optionally add rug if(rug == TRUE) { for(i in 1:length(rug.coords)) { lines(x = rug.coords[[i]][1,], y = rug.coords[[i]][2,], col = value.rug[data.global[i,10]]) } } ## plot KDE base line lines(x = c(xy.0[1], xy.0[1]), y = c(min(ellipse[,2]), max(ellipse[,2])), col = layout$abanico$colour$border) ## draw border around plot if(frame == 1) { polygon(x = c(limits.x[1], min(ellipse[,1]), par()$usr[2], par()$usr[2], min(ellipse[,1])), y = c(0, max(ellipse[,2]), max(ellipse[,2]), min(ellipse[,2]), min(ellipse[,2])), border = layout$abanico$colour$border, lwd = 0.8) } else if(frame == 2) { polygon(x = c(limits.x[1], min(ellipse[,1]), par()$usr[2], par()$usr[2], min(ellipse[,1]), limits.x[1]), y = c(2, max(ellipse[,2]), max(ellipse[,2]), min(ellipse[,2]), min(ellipse[,2]), -2), border = layout$abanico$colour$border, lwd = 0.8) } else if(frame == 3) { polygon(x = c(limits.x[1], par()$usr[2], par()$usr[2], limits.x[1]), y = c(max(ellipse[,2]), max(ellipse[,2]), min(ellipse[,2]), min(ellipse[,2])), border = layout$abanico$colour$border, lwd = 0.8) } ## optionally add legend content if(!missing(legend)) { ## store and change font family par.family <- par()$family par(family = layout$abanico$font.type$legend) legend(x = legend.pos[1], y = 0.8 * legend.pos[2], xjust = legend.adj[1], yjust = legend.adj[2], legend = legend, pch = pch, col = value.dot, text.col = value.dot, text.font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$legend)[1], cex = cex * layout$abanico$font.size$legend/12, bty = "n") ## restore font family par(family = par.family) } ## optionally add subheader text mtext(text = mtext, side = 3, line = (shift.lines - 2) * layout$abanico$dimension$mtext / 100, col = layout$abanico$colour$mtext, family = layout$abanico$font.type$mtext, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$mtext)[1], cex = cex * layout$abanico$font.size$mtext / 12) ## add summary content for(i in 1:length(data)) { if(summary.pos[1] != "sub") { text(x = summary.pos[1], y = summary.pos[2], adj = summary.adj, labels = label.text[[i]], col = summary.col[i], family = layout$abanico$font.type$summary, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$summary)[1], cex = cex * layout$abanico$font.size$summary / 12) } else { if(mtext == "") { mtext(side = 3, line = (shift.lines- 1 - i) * layout$abanico$dimension$summary / 100 , text = label.text[[i]], col = summary.col[i], family = layout$abanico$font.type$summary, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$summary)[1], cex = cex * layout$abanico$font.size$summary / 12) } } } } else { ## setup plot area par(mar = c(4, 4, shift.lines + 5, 4), xpd = TRUE, cex = cex) if(layout$abanico$dimension$figure.width != "auto" | layout$abanico$dimension$figure.height != "auto") { par(mai = layout$abanico$dimension$margin / 25.4, pin = c(layout$abanico$dimension$figure.width / 25.4 - layout$abanico$dimension$margin[2] / 25.4 - layout$abanico$dimension$margin[4] / 25.4, layout$abanico$dimension$figure.height / 25.4 - layout$abanico$dimension$margin[1] / 25.4 - layout$abanico$dimension$margin[3]/25.4)) } ## create empty plot par(new = TRUE) plot(NA, xlim = limits.y, ylim = c(limits.x[1], limits.x[2] * (1 / plot.ratio)), main = "", sub = sub, xlab = "", ylab = "", xaxs = "i", yaxs = "i", frame.plot = FALSE, axes = FALSE) ## add y-axis label mtext(text = ylab, at = 0, adj = 0.5, side = 1, line = 3 * layout$abanico$dimension$ylab.line / 100, col = layout$abanico$colour$ylab, family = layout$abanico$font.type$ylab, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$ylab)[1], cex = cex * layout$abanico$font.size$ylab/12) ## calculate upper x-axis label values label.x.upper <- if(log.z == TRUE) { as.character(round(1/axTicks(side = 2)[-1] * 100, 1)) } else { as.character(round(1/axTicks(side = 2)[-1], 1)) } # optionally, plot 2-sigma-bar if(bar[1] != FALSE) { for(i in 1:length(bar)) { polygon(x = bars[i,1:4], y = bars[i,5:8], col = bar.fill[i], border = bar.line[i]) } } ## remove unwanted parts polygon(y = c(par()$usr[2], par()$usr[2], par()$usr[2] * 2, par()$usr[2] * 2), x = c(min(ellipse[,2]) * 2, max(ellipse[,2]) * 2, max(ellipse[,2]) * 2, min(ellipse[,2]) * 2), col = bg.original, lty = 0) ## optionally, plot dispersion polygon if(polygon.fill[1] != "none") { for(i in 1:length(data)) { polygon(x = polygons[i,8:14], y = polygons[i,1:7], col = polygon.fill[i], border = polygon.line[i]) } } ## optionally, add minor grid lines if(grid.minor != "none") { for(i in 1:length(tick.values.minor)) { lines(y = c(limits.x[1], min(ellipse[,1])), x = c(0, (tick.values.minor[i] - z.central.global) * min(ellipse[,1])), col = grid.minor, lwd = 1) } for(i in 1:length(tick.values.minor)) { lines(y = c(xy.0[2], par()$usr[2]), x = c((tick.values.minor[i] - z.central.global) * min(ellipse[,1]), (tick.values.minor[i] - z.central.global) * min(ellipse[,1])), col = grid.minor, lwd = 1) } } ## optionally, add major grid lines if(grid.major != "none") { for(i in 1:length(tick.values.major)) { lines(y = c(limits.x[1], min(ellipse[,2])), x = c(0, (tick.values.major[i] - z.central.global) * min(ellipse[,2])), col = grid.major, lwd = 1) } for(i in 1:length(tick.values.major)) { lines(y = c(xy.0[2],y.max), x = c((tick.values.major[i] - z.central.global) * min(ellipse[,2]), (tick.values.major[i] - z.central.global) * min(ellipse[,2])), col = grid.major, lwd = 1) } } ## optionally, plot lines for each bar if(lwd[1] > 0 & lty[1] > 0 & bar[1] != FALSE & length(data) == 1) { if(bar[1] == TRUE & length(bar) == 1) { bar[1] <- z.central.global } for(i in 1:length(bar)) { x2 <- r / sqrt(1 + f^2 * ( bar[i] - z.central.global)^2) y2 <- (bar[i] - z.central.global) * x2 lines(x = c(0, y2, y2, y2), y = c(limits.x[1], x2, xy.0[2], par()$usr[4]), lty = lty[i], lwd = lwd[i], col = centrality.col[i]) } } ## optionally add further lines if(missing(line) == FALSE) { for(i in 1:length(line)) { lines(y = line.coords[[i]][1,1:3], x = line.coords[[i]][2,1:3], col = line.col[i], lty = line.lty[i] ) text(y = line.coords[[i]][1,3], x = line.coords[[i]][2,3] + par()$cxy[2] * 0.3, labels = line.label[i], pos = 2, col = line.col[i], cex = cex * 0.9) } } ## add plot title cex.old <- par()$cex par(cex = layout$abanico$font.size$main / 12) title(main = main, family = layout$abanico$font.type$main, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$main)[1], col.main = layout$abanico$colour$main, line = (shift.lines + 3.5) * layout$abanico$dimension$main / 100) par(cex = cex.old) ## calculate lower x-axis (precision) x.axis.ticks <- axTicks(side = 2) x.axis.ticks <- x.axis.ticks[c(TRUE, x.axis.ticks <= limits.x[2])] x.axis.ticks <- x.axis.ticks[x.axis.ticks <= max(ellipse[,2])] ## x-axis with lables and ticks axis(side = 2, at = x.axis.ticks, col = layout$abanico$colour$xtck1, col.axis = layout$abanico$colour$xtck1, labels = NA, tcl = -layout$abanico$dimension$xtcl1 / 200, cex = cex) axis(side = 2, at = x.axis.ticks, line = 2 * layout$abanico$dimension$xtck1.line / 100 - 2, lwd = 0, col = layout$abanico$colour$xtck1, family = layout$abanico$font.type$xtck1, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$xtck1)[1], col.axis = layout$abanico$colour$xtck1, cex.axis = layout$abanico$font.size$xlab1/12) ## extend axis line to right side of the plot lines(y = c(max(x.axis.ticks), max(ellipse[,2])), x = c(limits.y[1], limits.y[1]), col = layout$abanico$colour$xtck1) ## draw closing tick on right hand side axis(side = 2, tcl = -layout$abanico$dimension$xtcl1 / 200, lwd = 0, lwd.ticks = 1, at = limits.x[2], labels = FALSE, col = layout$abanico$colour$xtck1) axis(side = 2, tcl = layout$abanico$dimension$xtcl2 / 200, lwd = 0, lwd.ticks = 1, at = limits.x[2], labels = FALSE, col = layout$abanico$colour$xtck2) ## add lower axis label mtext(xlab[2], at = (limits.x[1] + max(ellipse[,2])) / 2, side = 2, line = 2.5 * layout$abanico$dimension$xlab1.line / 100, col = layout$abanico$colour$xlab1, family = layout$abanico$font.type$xlab1, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$xlab1)[1], cex = cex * layout$abanico$font.size$xlab1/12) ## add upper axis label mtext(xlab[1], at = (limits.x[1] + max(ellipse[,2])) / 2, side = 2, line = -3.5 * layout$abanico$dimension$xlab2.line / 100, col = layout$abanico$colour$xlab2, family = layout$abanico$font.type$xlab2, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$xlab2)[1], cex = cex * layout$abanico$font.size$xlab2/12) ## plot upper x-axis axis(side = 2, at = x.axis.ticks[-1], col = layout$abanico$colour$xtck2, col.axis = layout$abanico$colour$xtck2, labels = NA, tcl = layout$abanico$dimension$xtcl2 / 200, cex = cex) ## remove first tick label (infinity) label.x.upper <- label.x.upper[1:(length(x.axis.ticks) - 1)] axis(side = 2, at = x.axis.ticks[-1], labels = label.x.upper, line = -1 * layout$abanico$dimension$xtck2.line / 100 - 2, lwd = 0, col = layout$abanico$colour$xtck2, family = layout$abanico$font.type$xtck2, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$xtck2)[1], col.axis = layout$abanico$colour$xtck2, cex.axis = layout$abanico$font.size$xlab2/12) ## plot y-axis if(y.axis == TRUE) { char.height <- par()$cxy[2] tick.space <- axisTicks(usr = limits.y, log = FALSE) tick.space <- (max(tick.space) - min(tick.space)) / length(tick.space) if(tick.space < char.height * 1.7) { axis(side = 1, tcl = -layout$abanico$dimension$ytcl / 200, lwd = 1, lwd.ticks = 1, at = c(-2, 2), labels = c("", ""), las = 1, col = layout$abanico$colour$ytck) axis(side = 1, at = 0, tcl = 0, line = 2 * layout$abanico$dimension$ytck.line / 100 - 2, labels = paste("\u00B1", "2"), las = 1, family = layout$abanico$font.type$ytck, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$ytck)[1], col.axis = layout$abanico$colour$ytck, cex.axis = layout$abanico$font.size$ylab/12) } else { axis(side = 1, at = seq(-2, 2, by = 2), col = layout$abanico$colour$ytck, col.axis = layout$abanico$colour$ytck, labels = NA, las = 1, tcl = -layout$abanico$dimension$ytcl / 200, cex = cex) axis(side = 1, at = seq(-2, 2, by = 2), line = 2 * layout$abanico$dimension$ytck.line / 100 - 2, lwd = 0, las = 1, col = layout$abanico$colour$ytck, family = layout$abanico$font.type$ytck, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$ytck)[1], col.axis = layout$abanico$colour$ytck, cex.axis = layout$abanico$font.size$ylab/12) } } else { axis(side = 1, at = 0, col = layout$abanico$colour$ytck, col.axis = layout$abanico$colour$ytck, labels = NA, las = 1, tcl = -layout$abanico$dimension$ytcl / 200, cex = cex) axis(side = 1, at = 0, line = 2 * layout$abanico$dimension$ytck.line / 100 - 2, lwd = 0, las = 1, col = layout$abanico$colour$ytck, family = layout$abanico$font.type$ytck, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$ytck)[1], col.axis = layout$abanico$colour$ytck, cex.axis = layout$abanico$font.size$ylab/12) } ## plot minor z-ticks for(i in 1:length(tick.values.minor)) { lines(y = c(par()$usr[4], (1 + 0.015 * cex * layout$abanico$dimension$ztcl / 100) * y.max), x = c((tick.values.minor[i] - z.central.global) * min(ellipse[,2]), (tick.values.minor[i] - z.central.global) * min(ellipse[,2])), col = layout$abanico$colour$ztck) } ## plot major z-ticks for(i in 1:length(tick.values.major)) { lines(y = c(par()$usr[4], (1 + 0.03 * cex * layout$abanico$dimension$ztcl / 100) * y.max), x = c((tick.values.major[i] - z.central.global) * min(ellipse[,2]), (tick.values.major[i] - z.central.global) * min(ellipse[,2])), col = layout$abanico$colour$ztck) } ## plot z-axes lines(ellipse, col = layout$abanico$colour$border) lines(y = rep(par()$usr[4], nrow(ellipse)), x = ellipse[,1], col = layout$abanico$colour$ztck) ## plot z-axis text text(y = (1 + 0.06 * cex * layout$abanico$dimension$ztcl / 100) * y.max, x = (tick.values.major - z.central.global) * min(ellipse[,2]), labels = label.z.text, adj = 0.5, family = layout$abanico$font.type$ztck, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$ztck)[1], cex = cex * layout$abanico$font.size$ztck/12) ## plot z-label mtext(text = zlab, at = 0, side = 3, las = 1, adj = 0.5, line = 2.5 * layout$abanico$dimension$zlab.line / 100, col = layout$abanico$colour$zlab, family = layout$abanico$font.type$zlab, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$zlab)[1], cex = cex * layout$abanico$font.size$zlab/12) ## plot values and optionally error bars if(error.bars == TRUE) { for(i in 1:length(data)) { arrows(y0 = arrow.coords[[i]][,1], y1 = arrow.coords[[i]][,2], x0 = arrow.coords[[i]][,3], x1 = arrow.coords[[i]][,4], length = 0, angle = 90, code = 3, col = value.bar[i]) } } for(i in 1:length(data)) { points(y = data[[i]][,6][data[[i]][,6] <= limits.x[2]], x = data[[i]][,8][data[[i]][,6] <= limits.x[2]], col = value.dot[i], pch = pch[i], cex = layout$abanico$dimension$pch / 100) } ## calculate KDE width KDE.max <- 0 for(i in 1:length(data)) { KDE.max <- ifelse(test = KDE.max < max(KDE[[i]][,2]), yes = max(KDE[[i]][,2]), no = KDE.max) } ## optionally adjust KDE width for boxplot option if(boxplot == TRUE) { KDE.max <- 1.3 * KDE.max } KDE.scale <- (par()$usr[4] - xy.0[2]) / (KDE.max * 1.05) ## optionally add KDE plot if(kde == TRUE) { ## plot KDE lines for(i in 1:length(data)) { polygon(y = xy.0[2] + KDE[[i]][,2] * KDE.scale, x = (KDE[[i]][,1] - z.central.global) * min(ellipse[,2]), col = kde.fill[i], border = kde.line[i], lwd = 1.7) } ## plot KDE x-axis axis(side = 2, at = c(xy.0[2], y.max), col = layout$abanico$colour$xtck3, col.axis = layout$abanico$colour$xtck3, labels = NA, tcl = -layout$abanico$dimension$xtcl3 / 200, cex = cex) axis(side = 2, at = c(xy.0[2], y.max), labels = as.character(round(c(0, KDE.max.plot), 3)), line = 2 * layout$abanico$dimension$xtck3.line / 100 - 2, lwd = 0, col = layout$abanico$colour$xtck3, family = layout$abanico$font.type$xtck3, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$xtck3)[1], col.axis = layout$abanico$colour$xtck3, cex.axis = layout$abanico$font.size$xtck3/12) mtext(text = paste(xlab[3], " (bw ", round(x = KDE.bw, digits = 3), ")", sep = ""), at = (xy.0[2] + y.max) / 2, side = 2, line = 2.5 * layout$abanico$dimension$xlab3.line / 100, col = layout$abanico$colour$xlab3, family = layout$abanico$font.type$xlab3, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$xlab3)[1], cex = cex * layout$abanico$font.size$xlab3/12) } ## optionally add histogram or dot plot axis if(hist == TRUE) { axis(side = 2, at = c(xy.0[2], y.max), labels = as.character(c(0, hist.max.plot)), line = -1 * layout$abanico$dimension$xtck3.line / 100 - 2, lwd = 0, col = layout$abanico$colour$xtck3, family = layout$abanico$font.type$xtck3, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$xtck3)[1], col.axis = layout$abanico$colour$xtck3, cex.axis = layout$abanico$font.size$xtck3/12) ## add label mtext(text = "n", at = (xy.0[2] + y.max) / 2, side = 2, line = -3.5 * layout$abanico$dimension$xlab2.line / 100, col = layout$abanico$colour$xlab2, family = layout$abanico$font.type$xlab2, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$xlab2)[1], cex = cex * layout$abanico$font.size$xlab2/12) ## plot ticks axis(side = 2, at = c(xy.0[2], y.max), col = layout$abanico$colour$xtck2, col.axis = layout$abanico$colour$xtck2, labels = NA, tcl = layout$abanico$dimension$xtcl2 / 200, cex = cex) ## calculate scaling factor for histogram bar heights hist.scale <- (par()$usr[4] - xy.0[2]) / (KDE.max.plot * 1.05) ## draw each bar for each data set for(i in 1:length(data)) { for(j in 1:length(hist.data[[i]]$density)) { ## calculate x-coordinates hist.x.i <- c(xy.0[2], xy.0[2], xy.0[2] + hist.data[[i]]$density[j] * hist.scale, xy.0[2] + hist.data[[i]]$density[j] * hist.scale) ## calculate y-coordinates hist.y.i <- c((hist.data[[i]]$breaks[j] - z.central.global) * min(ellipse[,2]), (hist.data[[i]]$breaks[j + 1] - z.central.global) * min(ellipse[,2]), (hist.data[[i]]$breaks[j + 1] - z.central.global) * min(ellipse[,2]), (hist.data[[i]]$breaks[j] - z.central.global) * min(ellipse[,2])) ## remove data out of z-axis range hist.y.i <- ifelse(hist.y.i < min(ellipse[,1]), min(ellipse[,1]), hist.y.i) hist.y.i <- ifelse(hist.y.i > max(ellipse[,1]), max(ellipse[,1]), hist.y.i) ## draw the bars polygon(y = hist.x.i, x = hist.y.i, col = kde.fill[i], border = kde.line[i]) } } } ## optionally add dot plot if(dots == TRUE) { for(i in 1:length(data)) { for(j in 1:length(hist.data[[i]]$counts)) { ## calculate scaling factor for histogram bar heights dots.distance <- (par()$usr[4] - (xy.0[2] + par()$cxy[1] * 0.4)) / hist.max.plot dots.x.i <- seq(from = xy.0[2] + par()$cxy[2] * 0.4, by = dots.distance, length.out = hist.data[[i]]$counts[j]) dots.y.i <- rep((hist.data[[i]]$mids[j] - z.central.global) * min(ellipse[,2]), length(dots.x.i)) ## remove data out of z-axis range dots.x.i <- dots.x.i[dots.y.i >= min(ellipse[,1]) & dots.y.i <= max(ellipse[,1])] dots.y.i <- dots.y.i[dots.y.i >= min(ellipse[,1]) & dots.y.i <= max(ellipse[,1])] if(max(c(0, dots.x.i), na.rm = TRUE) >= (par()$usr[4] - par()$cxy[2] * 0.4)) { dots.y.i <- dots.y.i[dots.x.i < (par()$usr[4] - par()$cxy[2] * 0.4)] dots.x.i <- dots.x.i[dots.x.i < (par()$usr[4] - par()$cxy[2] * 0.4)] pch.dots <- c(rep(20, length(dots.x.i) - 1), 15) } else { pch.dots <- rep(20, length(dots.x.i)) } ## plot points points(y = dots.x.i, x = dots.y.i, pch = "-", cex = 0.7 * cex, col = kde.line[i]) } } } ## optionally add box plot if(boxplot == TRUE) { for(i in 1:length(data)) { ## draw median line lines(x = c((boxplot.data[[i]]$stats[3,1] - z.central.global) * min(ellipse[,2]), (boxplot.data[[i]]$stats[3,1] - z.central.global) * min(ellipse[,2])), y = c(min(ellipse[,2]) + KDE.max * 0.91, xy.0[2] + KDE.max * 0.96), lwd = 2, col = kde.line[i]) ## draw p25-p75-polygon polygon(y = c(min(ellipse[,2]) + KDE.max * 0.91, min(ellipse[,2]) + KDE.max * 0.91, xy.0[2] + KDE.max * 0.96, xy.0[2] + KDE.max * 0.96), x = c((boxplot.data[[i]]$stats[2,1] - z.central.global) * min(ellipse[,2]), (boxplot.data[[i]]$stats[4,1] - z.central.global) * min(ellipse[,2]), (boxplot.data[[i]]$stats[4,1] - z.central.global) * min(ellipse[,2]), (boxplot.data[[i]]$stats[2,1] - z.central.global) * min(ellipse[,2])), border = kde.line[i]) ## draw whiskers lines(y = rep(mean(c(min(ellipse[,2]) + KDE.max * 0.91, xy.0[2] + KDE.max * 0.96)), 2), x = c((boxplot.data[[i]]$stats[2,1] - z.central.global) * min(ellipse[,2]), (boxplot.data[[i]]$stats[1,1] - z.central.global) * min(ellipse[,2])), col = kde.line[i]) lines(y = c(min(ellipse[,2]) + KDE.max * 0.91, xy.0[2] + KDE.max * 0.96), x = rep((boxplot.data[[i]]$stats[1,1] - z.central.global) * min(ellipse[,2]), 2), col = kde.line[i]) lines(y = rep(mean(c(min(ellipse[,2]) + KDE.max * 0.91, xy.0[2] + KDE.max * 0.96)), 2), x = c((boxplot.data[[i]]$stats[4,1] - z.central.global) * min(ellipse[,2]), (boxplot.data[[i]]$stats[5,1] - z.central.global) * min(ellipse[,2])), col = kde.line[i]) lines(y = c(min(ellipse[,2]) + KDE.max * 0.91, xy.0[2] + KDE.max * 0.96), x = rep((boxplot.data[[i]]$stats[5,1] - z.central.global) * min(ellipse[,2]), 2), col = kde.line[i]) ## draw outlier points points(y = rep(mean(c(min(ellipse[,2]) + KDE.max * 0.91, xy.0[2] + KDE.max * 0.96)), length(boxplot.data[[i]]$out)), x = (boxplot.data[[i]]$out - z.central.global) * min(ellipse[,2]), cex = cex * 0.8, col = kde.line[i]) } } ## optionally add stats, i.e. min, max, median sample text if(length(stats) > 0) { text(y = stats.data[,1], x = stats.data[,2], pos = 2, labels = round(stats.data[,3], 1), family = layout$abanico$font.type$stats, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$stats)[1], cex = cex * layout$abanico$font.size$stats/12, col = layout$abanico$colour$stats) } ## optionally add rug if(rug == TRUE) { for(i in 1:length(rug.coords)) { lines(y = rug.coords[[i]][1,], x = rug.coords[[i]][2,], col = value.rug[data.global[i,10]]) } } ## plot KDE base line lines(y = c(xy.0[2], xy.0[2]), x = c(min(ellipse[,1]), max(ellipse[,1])), col = layout$abanico$colour$border) ## draw border around plot polygon(y = c(limits.x[1], min(ellipse[,2]), y.max, y.max, min(ellipse[,2])), x = c(0, max(ellipse[,1]), max(ellipse[,1]), min(ellipse[,1]), min(ellipse[,1])), border = layout$abanico$colour$border, lwd = 0.8) ## optionally add legend content if(missing(legend) == FALSE) { ## store and change font familiy par.family <- par()$family par(family = layout$abanico$font.type$legend) legend(y = legend.pos[2], x = 0.8 * legend.pos[1], xjust = legend.adj[2], yjust = legend.adj[1], legend = legend, pch = pch, col = value.dot, text.col = value.dot, text.font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$legend)[1], cex = cex * layout$abanico$font.size$legend/12, bty = "n") ## restore font family par(family = par.family) } ## optionally add subheader text mtext(text = mtext, side = 3, line = (shift.lines - 2 + 3.5) * layout$abanico$dimension$mtext / 100, col = layout$abanico$colour$mtext, family = layout$abanico$font.type$mtext, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$mtext)[1], cex = cex * layout$abanico$font.size$mtext / 12) ## add summary content for(i in 1:length(data)) { if(summary.pos[1] != "sub") { text(x = summary.pos[1], y = summary.pos[2], adj = summary.adj, labels = label.text[[i]], col = summary.col[i], family = layout$abanico$font.type$summary, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$summary)[1], cex = cex * layout$abanico$font.size$summary / 12) } else { if(mtext == "") { mtext(side = 3, line = (shift.lines - 1 + 3.5 - i) * layout$abanico$dimension$summary / 100 , text = label.text[[i]], col = summary.col[i], family = layout$abanico$font.type$summary, font = which(c("normal", "bold", "italic", "bold italic") == layout$abanico$font.deco$summary)[1], cex = cex * layout$abanico$font.size$summary / 12) } } } } ##sTeve if(fun & !interactive){sTeve()} ## create numeric output plot.output <- list(xlim = limits.x, ylim = limits.y, zlim = limits.z, polar.box = c(limits.x[1], limits.x[2], min(ellipse[,2]), max(ellipse[,2])), cartesian.box = c(xy.0[1], par()$usr[2], xy.0[2], max(ellipse[,2])), plot.ratio = plot.ratio, data = data, data.global = data.global, KDE = KDE, par = par(no.readonly = TRUE)) ## INTERACTIVE PLOT ---------------------------------------------------------- if (interactive) { if (!requireNamespace("plotly", quietly = TRUE)) stop("The interactive abanico plot requires the 'plotly' package. To install", " this package run 'install.packages('plotly')' in your R console.", call. = FALSE) ##cheat R check (global visible binding error) x <- NA y <- NA ## tidy data ---- data <- plot.output kde <- data.frame(x = data$KDE[[1]][ ,2], y = data$KDE[[1]][ ,1]) # radial scatter plot ---- point.text <- paste0("Measured value:
", data$data.global$De, " ± ", data$data.global$error,"
", "P(",format(data$data.global$precision, digits = 2, nsmall = 1),", ", format(data$data.global$std.estimate, digits = 2, nsmall = 1),")") IAP <- plotly::plot_ly(data = data$data.global, x = precision, y = std.estimate, type = "scatter", mode = "markers", hoverinfo = "text", text = point.text, name = "Points", yaxis = "y") ellipse <- as.data.frame(ellipse) IAP <- plotly::add_trace(IAP, data = ellipse, x = ellipse.x, y = ellipse.y, hoverinfo = "none", name = "z-axis (left)", type = "scatter", mode = "lines", line = list(color = "black", width = 1), yaxis = "y") ellipse.right <- ellipse ellipse.right$ellipse.x <- ellipse.right$ellipse.x * 1/0.75 IAP <- plotly::add_trace(IAP, data = ellipse.right, x = ellipse.x, y = ellipse.y, hoverinfo = "none", name = "z-axis (right)", type = "scatter", mode = "lines", line = list(color = "black", width = 1), yaxis = "y") # z-axis ticks major.ticks.x <- c(data$xlim[2] * 1/0.75, (1 + 0.015 * layout$abanico$dimension$ztcl / 100) * data$xlim[2] * 1/0.75) minor.ticks.x <- c(data$xlim[2] * 1/0.75, (1 + 0.01 * layout$abanico$dimension$ztcl / 100) * data$xlim[2] * 1/0.75) major.ticks.y <- (tick.values.major - z.central.global) * min(ellipse[ ,1]) minor.ticks.y <- (tick.values.minor - z.central.global) * min(ellipse[ ,1]) # major z-tick lines for (i in 1:length(major.ticks.y)) { major.tick <- data.frame(x = major.ticks.x, y = rep(major.ticks.y[i], 2)) IAP <- plotly::add_trace(IAP, data = major.tick, x = x, y = y, showlegend = FALSE, hoverinfo = "none", type = "scatter", mode = "lines", line = list(color = "black", width = 1), yaxis = "y") } # minor z-tick lines for (i in 1:length(minor.ticks.y)) { minor.tick <- data.frame(x = minor.ticks.x, y = rep(minor.ticks.y[i], 2)) IAP <- plotly::add_trace(IAP, data = minor.tick, hoverinfo = "none", x = x, y = y, showlegend = FALSE, type = "scatter", mode = "lines", line = list(color = "black", width = 1), yaxis = "y") } # z-tick label tick.text <- paste(" ", exp(tick.values.major)) tick.pos <- data.frame(x = major.ticks.x[2], y = major.ticks.y) IAP <- plotly::add_trace(IAP, data = tick.pos, x = x, y = y, showlegend = FALSE, text = tick.text, textposition = "right", hoverinfo = "none", type = "scatter", mode = "text", yaxis = "y") # Central Line ---- central.line <- data.frame(x = c(-100, data$xlim[2]*1/0.75), y = c(0, 0)) central.line.text <- paste0("Central value: ", format(exp(z.central.global), digits = 2, nsmall = 1)) IAP <- plotly::add_trace(IAP, data = central.line, x = x, y = y, name = "Central line", type = "scatter", mode = "lines", hoverinfo = "text", text = central.line.text, yaxis = "y", line = list(color = "black", width = 0.5, dash = 2)) # KDE plot ---- KDE.x <- xy.0[1] + KDE[[1]][ ,2] * KDE.scale KDE.y <- (KDE[[1]][ ,1] - z.central.global) * min(ellipse[,1]) KDE.curve <- data.frame(x = KDE.x, y = KDE.y) KDE.curve <- KDE.curve[KDE.curve$x != xy.0[1], ] KDE.text <- paste0("Value:", format(exp(KDE[[1]][ ,1]), digits = 2, nsmall = 1), "
", "Density:", format(KDE[[1]][ ,2], digits = 2, nsmall = 1)) IAP <- plotly::add_trace(IAP, data = KDE.curve, hoverinfo = "text", text = KDE.text, x = x, y = y, name = "KDE", type = "scatter", mode = "lines", line = list(color = "red"), yaxis = "y") # set layout ---- IAP <- plotly::layout(IAP, hovermode = "closest", dragmode = "pan", xaxis = list(range = c(data$xlim[1], data$xlim[2] * 1/0.65), zeroline = FALSE, showgrid = FALSE, tickmode = "array", tickvals = x.axis.ticks), yaxis = list(range = data$ylim, zeroline = FALSE, showline = FALSE, showgrid = FALSE, tickmode = "array", tickvals = c(-2, 0, 2)), shapes = list(list(type = "rect", # 2 sigma bar x0 = 0, y0 = -2, x1 = bars[1,3], y1 = 2, xref = "x", yref = "y", fillcolor = "grey", opacity = 0.2)) ) # show and return interactive plot ---- #print(plotly::subplot(IAP, IAP.kde)) print(IAP) return(IAP) } ## restore initial cex par(cex = cex_old) ## create and return numeric output invisible(plot.output) } Luminescence/R/plot_DRTResults.R0000644000176200001440000005565114264017373016304 0ustar liggesusers#' @title Visualise dose recovery test results #' #' @description The function provides a standardised plot output for dose recovery test #' measurements. #' #' @details Procedure to test the accuracy of a measurement protocol to reliably #' determine the dose of a specific sample. Here, the natural signal is erased #' and a known laboratory dose administered which is treated as unknown. Then #' the De measurement is carried out and the degree of congruence between #' administered and recovered dose is a measure of the protocol's accuracy for #' this sample.\cr #' In the plot the normalised De is shown on the y-axis, i.e. obtained De/Given Dose. #' #' @param values [RLum.Results-class] or [data.frame] (**required**): #' input values containing at least De and De error. To plot #' more than one data set in one figure, a `list` of the individual data #' sets must be provided (e.g. `list(dataset.1, dataset.2)`). #' #' @param given.dose [numeric] (*optional*): #' given dose used for the dose recovery test to normalise data. #' If only one given dose is provided this given dose is valid for all input #' data sets (i.e., `values` is a list). Otherwise a given dose for each input #' data set has to be provided (e.g., `given.dose = c(100,200)`). #' If `given.dose` in `NULL` the values are plotted without normalisation #' (might be useful for preheat plateau tests). #' **Note:** Unit has to be the same as from the input values (e.g., Seconds or #' Gray). #' #' @param error.range [numeric]: #' symmetric error range in percent will be shown as dashed lines in the plot. #' Set `error.range` to 0 to void plotting of error ranges. #' #' @param preheat [numeric]: #' optional vector of preheat temperatures to be used for grouping the De values. #' If specified, the temperatures are assigned to the x-axis. #' #' @param boxplot [logical]: #' optionally plot values, that are grouped by preheat temperature as boxplots. #' Only possible when `preheat` vector is specified. #' #' @param mtext [character]: #' additional text below the plot title. #' #' @param summary [character] (*optional*): #' adds numerical output to the plot. Can be one or more out of: #' - `"n"` (number of samples), #' - `"mean"` (mean De value), #' - `"weighted$mean"` (error-weighted mean), #' - `"median"` (median of the De values), #' - `"sd.rel"` (relative standard deviation in percent), #' - `"sd.abs"` (absolute standard deviation), #' - `"se.rel"` (relative standard error) and #' - `"se.abs"` (absolute standard error) #' #' and all other measures returned by the function [calc_Statistics]. #' #' @param summary.pos [numeric] or [character] (*with default*): #' optional position coordinates or keyword (e.g. `"topright"`) #' for the statistical summary. Alternatively, the keyword `"sub"` may be #' specified to place the summary below the plot header. However, this latter #' option in only possible if `mtext` is not used. #' #' @param legend [character] vector (*optional*): #' legend content to be added to the plot. #' #' @param legend.pos [numeric] or [character] (*with default*): #' optional position coordinates or keyword (e.g. `"topright"`) for the #' legend to be plotted. #' #' @param par.local [logical] (*with default*): #' use local graphical parameters for plotting, e.g. the plot is shown in one #' column and one row. If `par.local = FALSE`, global parameters are inherited, #' i.e. parameters provided via `par()` work #' #' @param na.rm [logical]: indicating whether `NA` values are #' removed before plotting from the input data set #' #' @param ... further arguments and graphical parameters passed to [plot], supported are: #' `xlab`, `ylab`, `xlim`, `ylim`, `main`, `cex`, `las` and `pch`` #' #' @return A plot is returned. #' #' @note #' Further data and plot arguments can be added by using the appropriate R #' commands. #' #' @section Function version: 0.1.14 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany)\cr #' Michael Dietze, GFZ Potsdam (Germany) #' #' @seealso [plot] #' #' @references #' Wintle, A.G., Murray, A.S., 2006. A review of quartz optically #' stimulated luminescence characteristics and their relevance in #' single-aliquot regeneration dating protocols. Radiation Measurements, 41, #' 369-391. #' #' @keywords dplot #' #' @examples #' #' ## read example data set and misapply them for this plot type #' data(ExampleData.DeValues, envir = environment()) #' #' ## plot values #' plot_DRTResults( #' values = ExampleData.DeValues$BT998[7:11,], #' given.dose = 2800, #' mtext = "Example data") #' #' ## plot values with legend #' plot_DRTResults( #' values = ExampleData.DeValues$BT998[7:11,], #' given.dose = 2800, #' legend = "Test data set") #' #' ## create and plot two subsets with randomised values #' x.1 <- ExampleData.DeValues$BT998[7:11,] #' x.2 <- ExampleData.DeValues$BT998[7:11,] * c(runif(5, 0.9, 1.1), 1) #' #' plot_DRTResults( #' values = list(x.1, x.2), #' given.dose = 2800) #' #' ## some more user-defined plot parameters #' plot_DRTResults( #' values = list(x.1, x.2), #' given.dose = 2800, #' pch = c(2, 5), #' col = c("orange", "blue"), #' xlim = c(0, 8), #' ylim = c(0.85, 1.15), #' xlab = "Sample aliquot") #' #' ## plot the data with user-defined statistical measures as legend #' plot_DRTResults( #' values = list(x.1, x.2), #' given.dose = 2800, #' summary = c("n", "weighted$mean", "sd.abs")) #' #' ## plot the data with user-defined statistical measures as sub-header #' plot_DRTResults( #' values = list(x.1, x.2), #' given.dose = 2800, #' summary = c("n", "weighted$mean", "sd.abs"), #' summary.pos = "sub") #' #' ## plot the data grouped by preheat temperatures #' plot_DRTResults( #' values = ExampleData.DeValues$BT998[7:11,], #' given.dose = 2800, #' preheat = c(200, 200, 200, 240, 240)) #' #' ## read example data set and misapply them for this plot type #' data(ExampleData.DeValues, envir = environment()) #' #' ## plot values #' plot_DRTResults( #' values = ExampleData.DeValues$BT998[7:11,], #' given.dose = 2800, #' mtext = "Example data") #' #' ## plot two data sets grouped by preheat temperatures #' plot_DRTResults( #' values = list(x.1, x.2), #' given.dose = 2800, #' preheat = c(200, 200, 200, 240, 240)) #' #' ## plot the data grouped by preheat temperatures as boxplots #' plot_DRTResults( #' values = ExampleData.DeValues$BT998[7:11,], #' given.dose = 2800, #' preheat = c(200, 200, 200, 240, 240), #' boxplot = TRUE) #' #' @md #' @export plot_DRTResults <- function( values, given.dose = NULL, error.range = 10, preheat, boxplot = FALSE, mtext, summary, summary.pos, legend, legend.pos, par.local = TRUE, na.rm = FALSE, ... ){ ## Validity checks ---------------------------------------------------------- ##avoid crash for wrongly set boxlot argument if(missing(preheat) & boxplot == TRUE){ warning("[plot_DRTResults()] Option 'boxplot' not valid without any value in 'preheat'. Reset to FALSE.") boxplot <- FALSE } if(missing(summary) == TRUE) {summary <- NULL} if(missing(summary.pos) == TRUE) {summary.pos <- "topleft"} if(missing(legend.pos) == TRUE) {legend.pos <- "topright"} if(missing(mtext) == TRUE) {mtext <- ""} ## Homogenise and check input data if(is(values, "list") == FALSE) {values <- list(values)} for(i in 1:length(values)) { if(is(values[[i]], "RLum.Results")==FALSE & is(values[[i]], "data.frame")==FALSE){ stop(paste("[plot_DRTResults()] Wrong input data format", "(!= 'data.frame' or 'RLum.Results')")) } else { if(is(values[[i]], "RLum.Results")==TRUE){ values[[i]] <- get_RLum(values[[i]])[,1:2] } } } ## Check input arguments ---------------------------------------------------- for(i in 1:length(values)) { ##check for preheat temperature values if(missing(preheat) == FALSE) { if(length(preheat) != nrow(values[[i]])){ stop("[plot_DRTResults()] number of preheat temperatures != De values!") } } ##remove NA values; yes Micha, it is not that simple if(na.rm == TRUE){ ##currently we assume that all input data sets comprise a similar of data if(!missing(preheat) & i == length(values)){ ##find and mark NA value indicies temp.NA.values <- unique(c(which(is.na(values[[i]][,1])), which(is.na(values[[i]][,2])))) ##remove preheat entries preheat <- preheat[-temp.NA.values] } values[[i]] <- na.exclude(values[[i]]) } } ## create global data set values.global <- NULL n.values <- NULL for(i in 1:length(values)) { values.global <- rbind(values.global, values[[i]]) n.values <- c(n.values, nrow(values[[i]])) } ## Set plot format parameters ----------------------------------------------- extraArgs <- list(...) # read out additional arguments list main <- if("main" %in% names(extraArgs)) {extraArgs$main} else {"Dose recovery test"} xlab <- if("xlab" %in% names(extraArgs)) {extraArgs$xlab} else { ifelse(missing(preheat) == TRUE, "# Aliquot", "Preheat temperature [\u00B0C]") } ylab <- if("ylab" %in% names(extraArgs)) {extraArgs$ylab} else {if(!is.null(given.dose)){ expression(paste("Normalised ", D[e], sep="")) }else{expression(paste(D[e], " [s]"), sep = "")}} xlim <- if("xlim" %in% names(extraArgs)) {extraArgs$xlim} else {c(1, max(n.values) + 1)} ylim <- if("ylim" %in% names(extraArgs)) {extraArgs$ylim} else {c(0.75, 1.25)} #check below for further corrections if boundaries exceed set range cex <- if("cex" %in% names(extraArgs)) {extraArgs$cex} else {1} pch <- if("pch" %in% names(extraArgs)) {extraArgs$pch} else { abs(seq(from = 20, to = -100)) } ##axis labels las <- if("las" %in% names(extraArgs)) extraArgs$las else 0 fun <- if("fun" %in% names(extraArgs)) {extraArgs$fun} else {FALSE} ## calculations and settings------------------------------------------------- ## normalise data if given.dose is given if(!is.null(given.dose)){ if(length(given.dose) > 1){ if(length(values) < length(given.dose)){ stop("[plot_DRTResults()] 'given.dose' > number of input data sets!", call. = FALSE) } for(i in 1:length(values)) { values[[i]] <- values[[i]]/given.dose[i] } }else{ for(i in 1:length(values)) { values[[i]] <- values[[i]]/given.dose } } } ##correct ylim for data set which exceed boundaries if((max(sapply(1:length(values), function(x){max(values[[x]][,1], na.rm = TRUE)}))>1.25 | min(sapply(1:length(values), function(x){min(values[[x]][,1], na.rm = TRUE)}))<0.75) & ("ylim" %in% names(extraArgs)) == FALSE){ ylim <- c( min(sapply(1:length(values), function(x){ min(values[[x]][,1], na.rm = TRUE) - max(values[[x]][,2], na.rm = TRUE)})), max(sapply(1:length(values), function(x){ max(values[[x]][,1], na.rm = TRUE) + max(values[[x]][,2], na.rm = TRUE)}))) } ## optionally group data by preheat temperature if(missing(preheat) == FALSE) { modes <- as.numeric(rownames(as.matrix(table(preheat)))) values.preheat <- list(NA) values.boxplot <- list(NA) for(i in 1:length(modes)) { for(j in 1:length(values)) { values.preheat[[length(values.preheat) + 1]] <- cbind(values[[j]][preheat == modes[i],], preheat[preheat == modes[i]]) values.boxplot[[length(values.boxplot) + 1]] <- values[[j]][preheat == modes[i],1] } j <- 1 } values.preheat[[1]] <- NULL values.boxplot[[1]] <- NULL modes.plot <- rep(modes, each = length(values)) } else { modes <- 1 } ## assign colour indices col <- if("col" %in% names(extraArgs)) {extraArgs$col} else { if(missing(preheat) == TRUE) { rep(seq(from = 1, to = length(values)), each = length(modes)) } else { rep(seq(from = 1, to = length(values)), length(modes)) } } ## calculate and paste statistical summary if(summary.pos[1] != "sub") { label.text <- lapply(1:length(values), function(i) { .create_StatisticalSummaryText( x = calc_Statistics(values[[i]]), keywords = summary, digits = 2, sep = " \n", prefix = paste(rep("\n", (i - 1) * length(summary)), collapse = "") ) }) }else{ label.text <- lapply(1:length(values), function(i) { .create_StatisticalSummaryText( x = calc_Statistics(values[[i]]), keywords = summary, digits = 2, sep = " | " ) }) } ## convert keywords into summary placement coordinates if(missing(summary.pos) == TRUE) { summary.pos <- c(xlim[1], ylim[2]) summary.adj <- c(0, 1) } else if(length(summary.pos) == 2) { summary.pos <- summary.pos summary.adj <- c(0, 1) } else if(summary.pos[1] == "topleft") { summary.pos <- c(xlim[1], ylim[2]) summary.adj <- c(0, 1) } else if(summary.pos[1] == "top") { summary.pos <- c(mean(xlim), ylim[2]) summary.adj <- c(0.5, 1) } else if(summary.pos[1] == "topright") { summary.pos <- c(xlim[2], ylim[2]) summary.adj <- c(1, 1) } else if(summary.pos[1] == "left") { summary.pos <- c(xlim[1], mean(ylim)) summary.adj <- c(0, 0.5) } else if(summary.pos[1] == "center") { summary.pos <- c(mean(xlim), mean(ylim)) summary.adj <- c(0.5, 0.5) } else if(summary.pos[1] == "right") { summary.pos <- c(xlim[2], mean(ylim)) summary.adj <- c(1, 0.5) }else if(summary.pos[1] == "bottomleft") { summary.pos <- c(xlim[1], ylim[1]) summary.adj <- c(0, 0) } else if(summary.pos[1] == "bottom") { summary.pos <- c(mean(xlim), ylim[1]) summary.adj <- c(0.5, 0) } else if(summary.pos[1] == "bottomright") { summary.pos <- c(xlim[2], ylim[1]) summary.adj <- c(1, 0) } ## convert keywords into legend placement coordinates if(missing(legend.pos) == TRUE) { legend.pos <- c(xlim[2], ylim[2]) legend.adj <- c(1, 1) } else if(length(legend.pos) == 2) { legend.pos <- legend.pos legend.adj <- c(0, 1) } else if(legend.pos[1] == "topleft") { legend.pos <- c(xlim[1], ylim[2]) legend.adj <- c(0, 1) } else if(legend.pos[1] == "top") { legend.pos <- c(mean(xlim), ylim[2]) legend.adj <- c(0.5, 1) } else if(legend.pos[1] == "topright") { legend.pos <- c(xlim[2], ylim[2]) legend.adj <- c(1, 1) } else if(legend.pos[1] == "left") { legend.pos <- c(xlim[1], mean(ylim)) legend.adj <- c(0, 0.5) } else if(legend.pos[1] == "center") { legend.pos <- c(mean(xlim), mean(ylim)) legend.adj <- c(0.5, 0.5) } else if(legend.pos[1] == "right") { legend.pos <- c(xlim[2], mean(ylim)) legend.adj <- c(1, 0.5) } else if(legend.pos[1] == "bottomleft") { legend.pos <- c(xlim[1], ylim[1]) legend.adj <- c(0, 0) } else if(legend.pos[1] == "bottom") { legend.pos <- c(mean(xlim), ylim[1]) legend.adj <- c(0.5, 0) } else if(legend.pos[1] == "bottomright") { legend.pos <- c(xlim[2], ylim[1]) legend.adj <- c(1, 0) } ## Plot output -------------------------------------------------------------- ## determine number of subheader lines to shif the plot shift.lines <- if(summary.pos[1] == "sub" & mtext == "") { length(label.text) - 1 } else {1} ## setup plot area if(par.local){ if (shift.lines <= 0) shift.lines <- 1 par.default <- par(mfrow = c(1, 1), cex = cex, oma = c(0, 1, shift.lines - 1, 1)) on.exit(par(par.default)) } ## optionally plot values and error bars if(boxplot == FALSE) { ## plot data and error if(missing(preheat) == TRUE) { ## create empty plot plot(NA,NA, xlim = xlim, ylim = ylim, xlab = xlab, ylab = ylab, xaxt = "n", las = las, main = "") ##add x-axis ... this avoids digits in the axis labelling axis(side = 1, at = 1:(nrow(values[[1]])+1), labels = 1:(nrow(values[[1]])+1), las = las) ## add title title(main = main, line = shift.lines + 2) ## add additional lines if (!is.null(given.dose)) { abline(h = 1) if (error.range > 0) { ## error range lines abline(h = 1 * (1 + error.range / 100), lty = 2) abline(h = 1 * (1 - error.range / 100), lty = 2) ## error range labels text( par()$usr[2], (1 + error.range / 100) + 0.02, paste("+", error.range , " %", sep = ""), pos = 2, cex = 0.8 ) text( par()$usr[2], (1 - error.range / 100) - 0.02, paste("-", error.range , "%", sep = ""), pos = 2, cex = 0.8 ) } } ## add data and error bars for(i in 1:length(values)) { points(x = c(1:nrow(values[[i]])), y = values[[i]][,1], pch = if(nrow(values[[i]]) == length(pch)){ pch } else { pch[i] }, col = if(nrow(values[[i]]) == length(col)){ col } else { col[i] }, cex = 1.2 * cex) arrows(c(1:nrow(values[[i]])), values[[i]][,1] + values[[i]][,2], c(1:nrow(values[[i]])), values[[i]][,1] - values[[i]][,2], angle = 90, length = 0.075, code = 3, col = if(nrow(values[[i]]) == length(col)){ col } else { col[i] }) ## add summary content if(summary.pos[1] != "sub") { text(x = summary.pos[1], y = summary.pos[2], adj = summary.adj, labels = label.text[[i]], cex = 0.8 * cex, col = if(nrow(values[[i]]) == length(col)){ "black" } else { col[i] }) } else { if(mtext == "") { mtext(side = 3, line = - i + 2.5, text = label.text[[i]], col = if(nrow(values[[i]]) == length(col)){ "black" } else { col[i] }, cex = cex * 0.8) } } } } else { ## option for provided preheat data ## create empty plot plot(NA,NA, xlim = c(min(modes.plot) * 0.9, max(modes.plot) * 1.1), ylim = ylim, xlab = xlab, ylab = ylab, las = las, main = "", axes = FALSE, frame.plot = TRUE) ## add axes axis(1, at = modes.plot, labels = modes.plot) axis(2) ## add title title(main = main, line = shift.lines + 2) ## add additional lines if (!is.null(given.dose)) { abline(h = 1) if (error.range > 0) { ## error range lines abline(h = 1 * (1 + error.range / 100), lty = 2) abline(h = 1 * (1 - error.range / 100), lty = 2) ## error range labels text( par()$usr[2], (1 + error.range / 100) + 0.02, paste("+", error.range , " %", sep = ""), pos = 2, cex = 0.8 ) text( par()$usr[2], (1 - error.range / 100) - 0.02, paste("-", error.range , "%", sep = ""), pos = 2, cex = 0.8 ) } } ## plot values for(i in 1:length(values.preheat)) { points(x = values.preheat[[i]][,3], y = values.preheat[[i]][,1], pch = pch[i], col = col[i], cex = 1.2 * cex) arrows(values.preheat[[i]][,3], values.preheat[[i]][,1] + values.preheat[[i]][,2], values.preheat[[i]][,3], values.preheat[[i]][,1] - values.preheat[[i]][,2], angle = 90, length = 0.075, code = 3, col = col[i]) } } } ## optionally, plot boxplot if(boxplot) { ## create empty plot boxplot(values.boxplot, names = modes.plot, ylim = ylim, xlab = xlab, ylab = ylab, las = las, xaxt = "n", main = "", border = col) ## add axis label, if necessary if (length(modes.plot) == 1) { axis(side = 1, at = 1, labels = modes.plot, las = las) } else if (length(modes.plot) > length(unique(modes.plot))){ ticks <- seq(from = 1 + ((length(values.boxplot)/length(unique(modes.plot)) - 1)/2), to = length(values.boxplot), by = length(values.boxplot)/length(unique(modes.plot))) axis( side = 1, at = ticks, las = las, labels = unique(modes.plot)) ##polygon for a better graphical representation of the groups polygon.x <- seq( 1,length(values.boxplot), by = length(values.boxplot) / length(unique(modes.plot)) ) polygon.step <- unique(diff(polygon.x) - 1) for (x.plyg in polygon.x) { polygon( x = c(x.plyg,x.plyg,x.plyg + polygon.step, x.plyg + polygon.step), y = c( par()$usr[3], ylim[1] - (ylim[1] - par()$usr[3]) / 2, ylim[1] - (ylim[1] - par()$usr[3]) / 2, par()$usr[3] ), col = "grey", border = "grey") } }else{ axis(side = 1, at = 1:length(unique(modes.plot)), labels = unique(modes.plot), las = las) } ## add title title(main = main, line = shift.lines + 2) ## add additional lines if(!is.null(given.dose)){ abline(h = 1) if(error.range > 0){ ## error range lines abline(h = 1 * (1 + error.range / 100), lty = 2) abline(h = 1 * (1 - error.range / 100), lty = 2) ## error range labels text(par()$usr[2], (1 + error.range / 100) + 0.02, paste("+", error.range ," %", sep = ""), pos = 2, cex = 0.8) text(par()$usr[2], (1 - error.range / 100) - 0.02, paste("-", error.range ,"%", sep = ""), pos = 2, cex = 0.8) } } ## plot data and error for(i in 1:length(values)) { ## add summary content if(summary.pos[1] != "sub") { text(x = summary.pos[1], y = summary.pos[2], adj = summary.adj, labels = label.text[[i]], cex = 0.8 * cex, col = if(nrow(values[[i]]) == length(col)){ "black" } else { col[i] }) } else { if(mtext == "") { mtext(side = 3, line = - i + 2.5, text = label.text[[i]], col = if(nrow(values[[i]]) == length(col)){ "black" } else { col[i] }, cex = cex * 0.8) } } } } ## optionally add legend content if(missing(legend) == FALSE) { legend(x = legend.pos[1], y = legend.pos[2], xjust = legend.adj[1], yjust = legend.adj[2], legend = legend, col = unique(col), pch = unique(pch), lty = 1, cex = cex * 0.8) } ## optionally add subheader text mtext(side = 3, line = shift.lines, text = mtext, cex = 0.8 * cex) ##FUN by R Luminescence Team if(fun == TRUE) {sTeve()} } Luminescence/R/RLum.Analysis-class.R0000644000176200001440000006512614264017373016775 0ustar liggesusers#' @include get_RLum.R set_RLum.R length_RLum.R structure_RLum.R names_RLum.R smooth_RLum.R NULL #' Class `"RLum.Analysis"` #' #' Object class to represent analysis data for protocol analysis, i.e. all curves, #' spectra etc. from one measurements. Objects from this class are produced, #' by e.g. [read_XSYG2R], [read_Daybreak2R] #' #' #' @name RLum.Analysis-class #' #' @docType class #' #' @slot protocol #' Object of class [character] describing the applied measurement protocol #' #' @slot records #' Object of class [list] containing objects of class [RLum.Data-class] #' #' @note #' The method [structure_RLum] is currently just available for objects #' containing [RLum.Data.Curve-class]. #' #' @section Objects from the Class: #' Objects can be created by calls of the form `set_RLum("RLum.Analysis", ...)`. #' #' @section Class version: 0.4.16 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [Risoe.BINfileData2RLum.Analysis], #' [Risoe.BINfileData-class], [RLum-class] #' #' @keywords classes methods #' #' @examples #' #' showClass("RLum.Analysis") #' #' ##set empty object #' set_RLum(class = "RLum.Analysis") #' #' ###use example data #' ##load data #' data(ExampleData.RLum.Analysis, envir = environment()) #' #' ##show curves in object #' get_RLum(IRSAR.RF.Data) #' #' ##show only the first object, but by keeping the object #' get_RLum(IRSAR.RF.Data, record.id = 1, drop = FALSE) #' #' @keywords internal #' #' @md #' @export setClass("RLum.Analysis", slots = list( protocol = "character", records = "list" ), contains = "RLum", prototype = list ( protocol = NA_character_, records = list() ) ) # as() ----------------------------------------------------------------------------------------- ##LIST ##COERCE RLum.Analyse >> list AND list >> RLum.Analysis #' as() - RLum-object coercion #' #' for `[RLum.Analysis-class]` #' #' **[RLum.Analysis-class]** #' #' \tabular{ll}{ #' **from** \tab **to**\cr #' `list` \tab `list`\cr #' } #' #' Given that the [list] consists of [RLum.Analysis-class] objects. #' #' @md #' @name as setAs("list", "RLum.Analysis", function(from,to){ new(to, protocol = NA_character_, records = from) }) setAs("RLum.Analysis", "list", function(from){ lapply(1:length(from@records), function(x){ from@records[[x]] }) }) # show() -------------------------------------------------------------------------------------- #' @describeIn RLum.Analysis #' Show structure of `RLum.Analysis` object #' #' @md #' @export setMethod("show", signature(object = "RLum.Analysis"), function(object){ ##print cat("\n [RLum.Analysis-class]") ##show slot originator, for compatibly reasons with old example data, here ##a check if(.hasSlot(object, "originator")){cat("\n\t originator:", paste0(object@originator,"()"))} cat("\n\t protocol:", object@protocol) cat("\n\t additional info elements: ", if(.hasSlot(object, "info")){length(object@info)}else{0}) cat("\n\t number of records:", length(object@records)) #skip this part if nothing is included in the object if(length(object@records) > 0){ ##get object class types temp <- vapply(object@records, function(x){ class(x)[1] }, FUN.VALUE = vector(mode = "character", length = 1)) ##print object class types lapply(1:length(table(temp)), function(x){ ##show RLum class type cat("\n\t .. :", names(table(temp)[x]),":",table(temp)[x]) ##show structure ##set width option ... just an implementation for the tutorial output if(getOption("width")<=50) temp.width <- 4 else temp.width <- 7 ##set line break variable linebreak <- FALSE env <- environment() ##create terminal output terminal_output <- vapply(1:length(object@records), function(i) { if (names(table(temp)[x]) == is(object@records[[i]])[1]) { if (i %% temp.width == 0 & i != length(object@records)) { assign(x = "linebreak", value = TRUE, envir = env) } ##FIRST first <- paste0("#", i, " ", object@records[[i]]@recordType) ##LAST if (i < length(object@records) && !is.null(object@records[[i]]@info[["parentID"]]) && !is.null(object@records[[i + 1]]@info[["parentID"]]) && (object@records[[i]]@info[["parentID"]] == object@records[[i+1]]@info[["parentID"]])) { last <- " <> " }else { last <- " | " if (i == length(object@records)) { last <- "" } else if (linebreak) { last <- "\n\t .. .. : " assign(x = "linebreak", value = FALSE, envir = env) } } return(paste0(first,last)) }else{ return("") } }, FUN.VALUE = vector(mode = "character", length = 1)) ##print on screen, differentiate between records with many ##curves or just one if(any(grepl(terminal_output, pattern = "<>", fixed = TRUE))){ cat("\n\t .. .. : ", gsub(pattern = "|", replacement = "\n\t .. .. :", x = terminal_output, fixed = TRUE), sep = "") } else{ cat("\n\t .. .. : ", terminal_output, sep = "") } }) }else{ cat("\n\t >> This is an empty object, which cannot be used for further analysis! <<") } } )##end show method # set_RLum() ---------------------------------------------------------------------------------- #' @describeIn RLum.Analysis #' Construction method for [RLum.Analysis-class] objects. #' #' @param class [`set_RLum`] [character] (**required**): #' name of the `RLum` class to be created #' #' @param originator [`set_RLum`] [character] (*automatic*): #' contains the name of the calling function (the function that produces this object); #' can be set manually. #' #' @param .uid [`set_RLum`] [character] (*automatic*): #' sets an unique ID for this object using the internal C++ function `create_UID`. #' #' @param .pid [`set_RLum`] [character] (*with default*): #' option to provide a parent id for nesting at will. #' #' @param protocol [`set_RLum`] [character] (*optional*): #' sets protocol type for analysis object. Value may be used by subsequent analysis functions. #' #' @param records [`set_RLum`] [list] (**required**): #' list of [RLum.Analysis-class] objects #' #' @param info [`set_RLum`] [list] (*optional*): #' a list containing additional info data for the object #' #' **`set_RLum`**: #' #' Returns an [RLum.Analysis-class] object. #' #' @md #' @export setMethod( "set_RLum", signature = "RLum.Analysis", definition = function( class, originator, .uid, .pid, protocol = NA_character_, records = list(), info = list()) { ##produce empty class object newRLumAnalysis <- new(Class = "RLum.Analysis") ##allow self set to reset an RLum.Analysis object if(inherits(records, "RLum.Analysis")){ #fill slots (this is much faster than the old code!) newRLumAnalysis@protocol <- if(missing(protocol)){records@protocol}else{protocol} newRLumAnalysis@originator <- originator newRLumAnalysis@records <- records@records newRLumAnalysis@info <- if(missing(info)){records@info}else{c(records@info, info)} newRLumAnalysis@.uid <- .uid newRLumAnalysis@.pid <- if(missing(.pid)){records@.uid}else{.pid} }else{ #fill slots (this is much faster than the old code!) newRLumAnalysis@protocol <- protocol newRLumAnalysis@originator <- originator newRLumAnalysis@records <- records newRLumAnalysis@info <- info newRLumAnalysis@.uid <- .uid newRLumAnalysis@.pid <- .pid } return(newRLumAnalysis) } ) # get_RLum() ---------------------------------------------------------------------------------- #' @describeIn RLum.Analysis #' Accessor method for RLum.Analysis object. #' #' The slots record.id, `@recordType`, `@curveType` and `@RLum.type` are optional to allow for records #' limited by their id (list index number), their record type (e.g. `recordType = "OSL"`) #' or object type. #' #' Example: curve type (e.g. `curveType = "predefined"` or `curveType ="measured"`) #' #' The selection of a specific RLum.type object superimposes the default selection. #' Currently supported objects are: RLum.Data.Curve and RLum.Data.Spectrum #' #' @param object [`get_RLum`]: [`names_RLum`], [`length_RLum`], [`structure_RLum`] (**required**): #' an object of class [RLum.Analysis-class] #' #' @param record.id [`get_RLum`]: [numeric] or [logical] (*optional*): #' IDs of specific records. If of type `logical` the entire id range is assumed #' and `TRUE` and `FALSE` indicates the selection. #' #' @param recordType [`get_RLum`]: [character] (*optional*): #' record type (e.g., "OSL"). Can be also a vector, for multiple matching, #' e.g., `recordType = c("OSL", "IRSL")` #' #' @param curveType [`get_RLum`]: [character] (*optional*): #' curve type (e.g. "predefined" or "measured") #' #' @param RLum.type [`get_RLum`]: [character] (*optional*): #' RLum object type. Defaults to "RLum.Data.Curve" and "RLum.Data.Spectrum". #' #' @param get.index [`get_RLum`]: [logical] (*optional*): #' return a numeric vector with the index of each element in the RLum.Analysis object. #' #' @param recursive [`get_RLum`]: [logical] (*with default*): #' if `TRUE` (the default) and the result of the `get_RLum()` request is a single #' object this object will be unlisted, means only the object itself and no #' list containing exactly one object is returned. Mostly this makes things #' easier, however, if this method is used within a loop this might be undesired. #' #' @param drop [`get_RLum`]: [logical] (*with default*): #' coerce to the next possible layer (which are `RLum.Data`-objects), #' `drop = FALSE` keeps the original `RLum.Analysis` #' #' @param info.object [`get_RLum`]: [character] (*optional*): #' name of the wanted info element #' #' @param subset [`get_RLum`]: [expression] (*optional*): #' logical expression indicating elements or rows to keep: missing values are #' taken as false. This argument takes precedence over all other arguments, #' meaning they are not considered when subsetting the object. #' #' @param env [`get_RLum`]: [environment] (*with default*): #' An environment passed to [eval] as the enclosure. This argument is only #' relevant when subsetting the object and should not be used manually. #' #' @return #' #' **`get_RLum`**: #' #' Returns: #' #' 1. [list] of [RLum.Data-class] objects or #' 2. Single [RLum.Data-class] object, if only one object is contained and `recursive = FALSE` or #' 3. [RLum.Analysis-class] objects for `drop = FALSE` #' #' @md #' @export setMethod("get_RLum", signature = ("RLum.Analysis"), function(object, record.id = NULL, recordType = NULL, curveType = NULL, RLum.type = NULL, protocol = "UNKNOWN", get.index = NULL, drop = TRUE, recursive = TRUE, info.object = NULL, subset = NULL, env = parent.frame(2)) { if (!is.null(substitute(subset))) { # To account for different lengths and elements in the @info slot we first # check all unique elements (in all records) info_el <- unique(unlist(lapply(object@records, function(el) names(el@info)))) envir <- as.data.frame(do.call(rbind, lapply(object@records, function(el) { val <- c(curveType = el@curveType, recordType = el@recordType, unlist(el@info)) # add missing info elements and set NA if (any(!info_el %in% names(val))) { val_new <- setNames(rep(NA, length(info_el[!info_el %in% names(val)])), info_el[!info_el %in% names(val)]) val <- c(val, val_new) } # order the named char vector by its names so we don't mix up the columns val <- val[order(names(val))] return(val) }) ), stringsAsFactors = FALSE) ##check for a logical expression, to avoid problems afterwards # if(class(eval(substitute(subset), envir = envir, enclos = env))[1] != "logical") # stop("[get_RLum] The argument 'subset' does not contain a logical expression!", call. = FALSE) ##select relevant rows sel <- tryCatch(eval( expr = substitute(subset), envir = envir, enclos = env ), error = function(e) { stop("[get_RLum()] Invalid subset options. \nValid terms are: ", paste(names(envir), collapse = ", "), call. = FALSE) }) if (all(is.na(sel))) sel <- FALSE if (any(sel)) { object@records <- object@records[sel] return(object) } else { tmp <- mapply(function(name, op) { message(" ",name, ": ", paste(unique(op), collapse = ", ")) }, names(envir), envir) message("\n [get_RLum()] Invalid value, please refer to unique options given above.") return(NULL) } } ##if info.object is set, only the info objects are returned else if(!is.null(info.object)) { if(info.object %in% names(object@info)){ unlist(object@info[info.object]) }else{ ##check for entries if(length(object@info) == 0){ warning( "[get_RLum()] This RLum.Analysis object has no info objects! NULL returned!)", call. = FALSE) }else{ warning(paste0( "[get_RLum()] Invalid info.object name. Valid names are: ", paste(names(object@info), collapse = ", ") ), call. = FALSE) } return(NULL) } } else { ##check for records if (length(object@records) == 0) { warning("[get_RLum()] This RLum.Analysis object has no records! NULL returned!)") return(NULL) } ##record.id if (is.null(record.id)) { record.id <- c(1:length(object@records)) } else if (!is.numeric(record.id) & !is.logical(record.id)) { stop("[get_RLum()] 'record.id' has to be of type 'numeric' or 'logical'!", call. = FALSE) } ##logical needs a slightly different treatment ##Why do we need this? Because a lot of standard R functions work with logical ##values instead of numerical indicies if (is.logical(record.id)) { record.id <- c(1:length(object@records))[record.id] } ##check if record.id exists if (FALSE %in% (abs(record.id) %in% (1:length(object@records)))) { try(stop("[get_RLum()] At least one 'record.id' is invalid!", call. = FALSE)) return(NULL) } ##recordType if (is.null(recordType)) { recordType <- unique(vapply(object@records, function(x) x@recordType, character(1))) } else if (!inherits(recordType, "character")){ stop("[get_RLum()] 'recordType' has to be of type 'character'!", call. = FALSE) } ##curveType if (is.null(curveType)) { curveType <- unique(unlist(lapply(1:length(object@records), function(x) { object@records[[x]]@curveType }))) } else if (!is(curveType, "character")) { stop("[get_RLum()] 'curveType' has to be of type 'character'!", call. = FALSE) } ##RLum.type if (is.null(RLum.type)) { RLum.type <- c("RLum.Data.Curve", "RLum.Data.Spectrum", "RLum.Data.Image") } else if (!is(RLum.type, "character")) { stop("[get_RLum()] 'RLum.type' has to be of type 'character'!", call. = FALSE) } ##get.index if (is.null(get.index)) { get.index <- FALSE } else if (!is(get.index, "logical")) { stop("[get_RLum()] 'get.index' has to be of type 'logical'!", call. = FALSE) } ##get originator if (.hasSlot(object, "originator")) { originator <- object@originator } else{ originator <- NA_character_ } ##-----------------------------------------------------------------## ##a pre-selection is necessary to support negative index selection object@records <- object@records[record.id] record.id <- 1:length(object@records) ##select curves according to the chosen parameter if (length(record.id) > 1) { temp <- lapply(record.id, function(x) { if (is(object@records[[x]])[1] %in% RLum.type == TRUE) { ##as input a vector is allowed temp <- lapply(1:length(recordType), function(k) { ##translate input to regular expression recordType[k] <- glob2rx(recordType[k]) recordType[k] <- substr(recordType[k], start = 2, stop = nchar(recordType[k]) - 1) if (grepl(recordType[k], object@records[[x]]@recordType) == TRUE & object@records[[x]]@curveType %in% curveType) { if (!get.index) { object@records[[x]] } else{ x } } }) ##remove empty entries and select just one to unlist temp <- temp[!sapply(temp, is.null)] ##if list has length 0 skip entry if (length(temp) != 0) { temp[[1]] } else{ temp <- NULL } } }) ##remove empty list element temp <- temp[!sapply(temp, is.null)] ##check if the produced object is empty and show warning message if (length(temp) == 0) warning("[get_RLum()] This request produced an empty list of records!", call. = FALSE) ##remove list for get.index if (get.index) { return(unlist(temp)) } else{ if (!drop) { temp <- set_RLum( class = "RLum.Analysis", originator = originator, records = temp, protocol = object@protocol, .pid = object@.pid ) return(temp) } else{ if (length(temp) == 1 & recursive == TRUE) { return(temp[[1]]) } else{ return(temp) } } } } else{ if (get.index == FALSE) { if (drop == FALSE) { ##needed to keep the argument drop == TRUE temp <- set_RLum( class = "RLum.Analysis", originator = originator, records = list(object@records[[record.id]]), protocol = object@protocol, .pid = object@.pid ) return(temp) } else{ return(object@records[[record.id]]) } } else{ return(record.id) } } } }) # structure_RLum() ---------------------------------------------------------------------------- ### #' @describeIn RLum.Analysis #' Method to show the structure of an [RLum.Analysis-class] object. #' #' @param fullExtent [structure_RLum]; [logical] (*with default*): #' extents the returned `data.frame` to its full extent, i.e. all info elements #' are part of the return as well. The default value is `FALSE` as the data #' frame might become rather big. #' #' @return #' #' **`structure_RLum`**: #' #' Returns [data.frame-class] showing the structure. #' #' @md #' @export setMethod("structure_RLum", signature= "RLum.Analysis", definition = function(object, fullExtent = FALSE) { ##check if the object containing other elements than allowed if(!all(vapply(object@records, FUN = class, character(1)) == "RLum.Data.Curve")) stop("[structure_RLum()] Only 'RLum.Data.Curve' objects are allowed!", call. = FALSE) ##get length object temp.object.length <- length(object@records) ##ID temp.id <- 1:temp.object.length ##recordType temp.recordType <- vapply(object@records, function(x) { x@recordType }, character(1)) ##PROTOCOL STEP temp.protocol.step <- c(NA) length(temp.protocol.step) <- temp.object.length ##n.channels temp.n.channels <- vapply(object@records, function(x){length(x@data[,1])}, numeric(1)) ##X.MIN temp.x.min <- vapply(object@records, function(x){min(x@data[,1])}, numeric(1)) ##X.MAX temp.x.max <- vapply(object@records, function(x){max(x@data[,1])}, numeric(1)) ##y.MIN temp.y.min <- vapply(object@records, function(x){min(x@data[,2])}, numeric(1)) ##X.MAX temp.y.max <- vapply(object@records, function(x){max(x@data[,2])}, numeric(1)) ##.uid temp.uid <- unlist(lapply(object@records, function(x){x@.uid})) ##.pid temp.pid <- paste( unlist(lapply(object@records, function(x){x@.pid})), collapse = ", ") ##originator temp.originator <- unlist(lapply(object@records, function(x){x@originator})) ##curveType temp.curveType <- unlist(lapply(object@records, function(x){x@curveType})) ##info elements as character value if (fullExtent) { temp.info.elements <- as.data.frame(data.table::rbindlist(lapply(object@records, function(x) { x@info }), fill = TRUE)) } else{ temp.info.elements <- unlist(sapply(1:temp.object.length, function(x) { if (length(object@records[[x]]@info) != 0) { do.call(paste, as.list(names(object@records[[x]]@info))) } else{ NA } })) } ##combine output to a data.frame return( data.frame( id = temp.id, recordType = temp.recordType, curveType = temp.curveType, protocol.step = temp.protocol.step, n.channels = temp.n.channels, x.min = temp.x.min, x.max = temp.x.max, y.min = temp.y.min, y.max = temp.y.max, originator = temp.originator, .uid = temp.uid, .pid = temp.pid, info = temp.info.elements, stringsAsFactors = FALSE ) ) }) # length_RLum() ------------------------------------------------------------------------------- #' @describeIn RLum.Analysis #' Returns the length of the object, i.e., number of stored records. #' #' @return #' #' **`length_RLum`** #' #' Returns the number records in this object. #' #' @md #' @export setMethod("length_RLum", "RLum.Analysis", function(object){ length(object@records) }) # names_RLum() -------------------------------------------------------------------------------- #' @describeIn RLum.Analysis #' Returns the names of the [RLum.Data-class] objects objects (same as shown with the show method) #' #' @return #' #' **`names_RLum`** #' #' Returns the names of the record types (`recordType`) in this object. #' #' @md #' @export setMethod("names_RLum", "RLum.Analysis", function(object){ sapply(1:length(object@records), function(x){ object@records[[x]]@recordType}) }) # smooth_RLum() ------------------------------------------------------------------------------- #' @describeIn RLum.Analysis #' #' Smoothing of `RLum.Data` objects contained in this `RLum.Analysis` object #' [zoo::rollmean] or [zoo::rollmedian][zoo::rollmean]. In particular the internal #' function `.smoothing` is used. #' #' @param ... further arguments passed to underlying methods #' #' @return #' #' **`smooth_RLum`** #' #' Same object as input, after smoothing #' #' @md #' @export setMethod( f = "smooth_RLum", signature = "RLum.Analysis", function(object, ...) { object@records <- lapply(object@records, function(x){ smooth_RLum(x, ...) }) return(object) } ) Luminescence/R/plot_ViolinPlot.R0000644000176200001440000002003414264017373016353 0ustar liggesusers#' @title Create a violin plot #' #' @description #' Draws a kernel density plot in combination with a boxplot in its middle. The shape of the violin #' is constructed using a mirrored density curve. This plot is especially designed for cases #' where the individual errors are zero or to small to be visualised. The idea for this plot is #' based on the the 'volcano plot' in the ggplot2 package by Hadley Wickham and Winston Chang. #' The general idea for the violin plot seems to be introduced by Hintze and Nelson (1998). #' #' The function is passing several arguments to the function [plot], #' [stats::density], [graphics::boxplot]: #' #' Supported arguments are: #' `xlim`, `main`, `xlab`, `ylab`, `col.violin`, `col.boxplot`, `mtext`, `cex`, `mtext` #' #' **`Valid summary keywords`** #' #' `'n'`, `'mean'`, `'median'`, `'sd.abs'`, `'sd.rel'`, `'se.abs'`, `'se.rel'`. #' `'skewness'`, `'kurtosis'` #' #' @param data [numeric] or [RLum.Results-class] (**required**): #' input data for plotting. Alternatively a [data.frame] or a [matrix] can #' be provided, but only the first column will be considered by the #' function #' #' @param boxplot [logical] (*with default*): #' enable or disable boxplot #' #' @param rug [logical] (*with default*): #' enable or disable rug #' #' @param summary [character] (*optional*): #' add statistic measures of centrality and dispersion to the plot. #' Can be one or more of several keywords. See details for available keywords. #' #' @param summary.pos [numeric] or [character] (*with default*): #' optional position keywords (cf. [legend]) for the statistical summary. #' Alternatively, the keyword `"sub"` may be specified to place the summary #' below the plot header. However, this latter option in only possible if #' `mtext` is not used. #' #' @param na.rm [logical] (*with default*): #' exclude NA values from the data set prior to any further operations. #' #' @param ... further arguments and graphical parameters passed to #' [plot.default], [stats::density] and [boxplot]. See details for further information #' #' @note #' Although the code for this function was developed independently and just the idea for the plot #' was based on the 'ggplot2' package plot type 'volcano', it should be mentioned that, beyond this, #' two other R packages exist providing a possibility to produces this kind of plot, namely: #' `'vioplot'` and `'violinmplot'` (see references for details). #' #' @section Function version: 0.1.4 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @references #' Daniel Adler (2005). vioplot: A violin plot is a combination of a box plot and a kernel density plot. #' R package version 0.2 http://CRAN.R-project.org/package=violplot #' #' Hintze, J.L., Nelson, R.D., 1998. A Box Plot-Density Trace Synergism. The American Statistician 52, 181-184. #' #' Raphael W. Majeed (2012). violinmplot: Combination of violin plot with mean and standard deviation. #' R package version 0.2.1. http://CRAN.R-project.org/package=violinmplot #' #' Wickham. H (2009). ggplot2: elegant graphics for data analysis. Springer New York. #' #' @seealso [stats::density], [plot], [boxplot], [rug], [calc_Statistics] #' #' @examples #' #' ## read example data set #' data(ExampleData.DeValues, envir = environment()) #' ExampleData.DeValues <- Second2Gray(ExampleData.DeValues$BT998, c(0.0438,0.0019)) #' #' ## create plot straightforward #' plot_ViolinPlot(data = ExampleData.DeValues) #' #' @md #' @export plot_ViolinPlot <- function( data, boxplot = TRUE, rug = TRUE, summary = NULL, summary.pos = "sub", na.rm = TRUE, ... ) { # Integrity tests and conversion -------------------------------------------------------------- ##Prechecks if(missing(data)){ stop("[plot_ViolinPlot()] I don't know what to do, data input needed." ) }else{ ##check for RLum.Results object if(is(data, "RLum.Results")){ data <- get_RLum(data, "data") } ##if data.frame or matrix if(is(data, "data.frame") | is(data, "matrix")){ data <- data[,1] } } ##Remove NA values if(na.rm){ data <- na.exclude(data) if(length(attr(data, "na.action")) > 0){ warning(paste("[plot_ViolinPlot()]", length(attr(data, "na.action")), "NA values removed!"), call. = FALSE) } } #Further checks if(!is(summary.pos, "character")){ stop("[plot_ViolinPlot()] argument 'summary.pos' needs to be of type character!") } ##stop if only one or 0 values are left in data if(length(data) == 0){ warning("[plot_ViolinePlot()] Actually it is rather hard to plot 0 values. NULL returned", call. = FALSE) return() } # Pre-calculations ---------------------------------------------------------------------------- ##density for the violin if(length(data)>1){ density <- density(x = data, bw = ifelse("bw" %in% names(list(...)),list(...)$bw,"nrd0")) }else{ density <- NULL warning("[plot_ViolinePlot()] single data point found, no density calculated.", call. = FALSE) } ##some statistical parameter, get rid of the weighted statistics stat.summary <- list(suppressWarnings(calc_Statistics(as.data.frame(data), digits = 2)[["unweighted"]])) names(stat.summary) <- "unweighted" ##make valid summary string if(is.null(summary)){ summary <- c("n","median") } ##at least show a warning for invalid keywords if(!all(summary %in% names(stat.summary[[1]]))){ warning(paste0("[plot_ViolinePlot()] Only keywords for weighted statistical measures are supported. Valid keywords are: ", paste(names(stat.summary[[1]]), collapse = ", ")), call. = FALSE) } ##make sure that only valid keywords make it summary <- summary[(summary %in% names(stat.summary[[1]]))] stat.text <- .create_StatisticalSummaryText(stat.summary, keywords = summary, sep = " \n ") stat.mtext <- .create_StatisticalSummaryText(stat.summary, keywords = summary, sep = " | ") # Plot settings ------------------------------------------------------------------------------- ##set default values plot.settings <- list( xlim = if(!is.null(density)){range(density$x)}else{c(data[1]*0.9, data[1]*1.1)}, main = "Violin Plot", xlab = expression(paste(D[e], " [a.u.]")), ylab = if(!is.null(density)){"Density"}else{" "}, col.violin = rgb(0,0,0,0.2), col.boxplot = NULL, mtext = ifelse(summary.pos != 'sub', "", stat.mtext), cex = 1 ) ##modify list accordingly plot.settings <- modifyList(plot.settings, val = list(...)) # Plot ---------------------------------------------------------------------------------------- ##open empty plot area plot( NA,NA, xlim = plot.settings$xlim, ylim = c(0.2,1.8), xlab = plot.settings$xlab, ylab = plot.settings$ylab, yaxt = "n", main = plot.settings$main, cex = plot.settings$cex ) ##add polygon ... the violin if(!is.null(density)){ polygon( x = c(density$x, rev(density$x)), y = c(1 + density$y / max(density$y) * 0.5, rev(1 - density$y / max(density$y) * 0.5)), col = plot.settings$col.violin, border = plot.settings$col.violin ) } ##add the boxplot if(boxplot){ boxplot( data, outline = TRUE, boxwex = 0.4, horizontal = TRUE, axes = FALSE, add = TRUE, col = plot.settings$col.boxplot ) } ##add rug if(rug){ rug(x = data) } ##add mtext if(!is.null(plot.settings$mtext)){ mtext(side = 3, text = plot.settings$mtext) } ##add stat.text if (summary.pos != "sub") { valid_keywords <- c( "bottomright", "bottom", "bottomleft", "left", "topleft", "top", "topright", "right", "center" ) if (any( summary.pos %in% valid_keywords )) { legend(summary.pos, legend = stat.text, bty = "n") }else{ warning_text <- paste0("Value provided for 'summary.pos' is not a valid keyword, valid keywords are:", paste(valid_keywords, collapse = ", ")) warning(warning_text) } } } Luminescence/R/fit_ThermalQuenching.R0000644000176200001440000003010714264017373017320 0ustar liggesusers#' @title Fitting Thermal Quenching Data #' #' @description Applying a nls-fitting to thermal quenching data. #' #' @details #' #' **Used equation**\cr #' #' The equation used for the fitting is #' #' \deqn{y = (A / (1 + C * (exp(-W / (k * x))))) + c} #' #' *W* is the energy depth in eV and *C* is dimensionless constant. *A* and *c* are used to #' adjust the curve for the given signal. *k* is the Boltzmann in eV/K and *x* is the absolute #' temperature in K. #' #' **Error estimation**\cr #' #' The error estimation is done be varying the input parameters using the given uncertainties in #' a Monte Carlo simulation. Errors are assumed to follow a normal distribution. #' #' **`start_param`** \cr #' #' The function allows the injection of own start parameters via the argument `start_param`. The #' parameters needs to be provided as names list. The names are the parameters to be optimised. #' Examples: `start_param = list(A = 1, C = 1e+5, W = 0.5, c = 0)` #' #' #' **`method_control`** \cr #' #' The following arguments can be provided via `method_control`. Please note that arguments provided #' via `method_control` are not further tested, i.e., if the function crashes your input was probably #' wrong. #' #' \tabular{lll}{ #' **ARGUMENT** \tab **TYPE** \tab **DESCRIPTION**\cr #' `upper` \tab named [vector] \tab sets upper fitting boundaries, if provided boundaries for all arguments #' are required, e.g., `c(A = 0, C = 0, W = 0, c = 0)` \cr #' `lower` \tab names [vector] \tab sets lower fitting boundaries (see `upper` for details) \cr #' `trace` \tab [logical] \tab enables/disables progression trace for [minpack.lm::nlsLM]\cr #' `weights` \tab [numeric] \tab option to provide own weights for the fitting, the length of this #' vector needs to be equal to the number for rows of the input `data.frame`. If set to `NULL` no weights #' are applied. The weights are defined by the third column of the input `data.frame`. #' } #' #' @param data [data.frame] (**required**): input data with three columns, the first column contains #' temperature values in deg. C, columns 2 and 3 the dependent values with its error #' #' @param start_param [list] (optional): option to provide own start parameters for the fitting, see #' details #' #' @param method_control [list] (optional): further options to fine tune the fitting, see details for #' further information #' #' @param n.MC [numeric] (*with default*): number of Monte Carlo runs for the error estimation. If `n.MC` is #' `NULL` or `<=1`, the error estimation is skipped #' #' @param verbose [logical] (*with default*): enables/disables terminal output #' #' @param plot [logical] (*with default*): enables/disables plot output #' #' @param ... further arguments that can be passed to control the plotting, support are `main`, `pch`, #' `col_fit`, `col_points`, `lty`, `lwd`, `xlab`, `ylab`, `xlim`, `ylim`, `xaxt` #' #' @return #' #' The function returns numerical output and an (*optional*) plot. #' #' -----------------------------------\cr #' `[ NUMERICAL OUTPUT ]`\cr #' -----------------------------------\cr #' #' **`RLum.Results`**-object #' #' **slot:** **`@data`** #' #' `[.. $data : data.frame]`\cr #' #' A table with all fitting parameters and the number of Monte Carlo runs used for the error estimation. #' #' `[.. $fit : nls object]` \cr #' #' The nls [stats::nls] object returned by the function [minpack.lm::nlsLM]. This object #' can be further passed to other functions supporting an nls object (cf. details section #' in [stats::nls]) #' #' **slot:** **`@info`** #' #' `[.. $call : call]`\cr #' #' The original function call. #' #' -----------------------------------\cr #' `[ GAPHICAL OUTPUT ]`\cr #' -----------------------------------\cr #' #' Plotted are temperature against the signal and their uncertainties. #' The fit is shown as dashed-line (can be modified). Please note that for the fitting the absolute #' temperature values are used but are re-calculated to deg. C for the plot. #' #' #' @section Function version: 0.1.0 #' #' @author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @references #' #' Wintle, A.G., 1975. Thermal Quenching of Thermoluminescence in Quartz. Geophys. J. R. astr. Soc. 41, 107–113. #' #' @seealso [minpack.lm::nlsLM] #' #' @examples #' #' ##create short example dataset #' data <- data.frame( #' T = c(25, 40, 50, 60, 70, 80, 90, 100, 110), #' V = c(0.06, 0.058, 0.052, 0.051, 0.041, 0.034, 0.035, 0.033, 0.032), #' V_X = c(0.012, 0.009, 0.008, 0.008, 0.007, 0.006, 0.005, 0.005, 0.004)) #' #' ##fit #' fit_ThermalQuenching( #' data = data, #' n.MC = NULL) #' #' @md #' @export fit_ThermalQuenching <- function( data, start_param = list(), method_control = list(), n.MC = 100, verbose = TRUE, plot = TRUE, ... ){ # Self-call ----------------------------------------------------------------------------------- if(inherits(data, "list")){ ##get arguments args <- as.list(match.call()) args[[1]] <- NULL args$data <- NULL ##run function results_list <- lapply(data, function(x){ do.call(fit_ThermalQuenching, c(list(data = x),args)) }) ##combine and return return(merge_RLum(results_list)) } # Integrity checks ---------------------------------------------------------------------------- if(!inherits(data, 'data.frame')){ stop("[fit_ThermalQuenching()] 'data' must by of type 'data.frame' or list of 'data.frames'!", call. = FALSE) }else{ if(nrow(data) < 1 || ncol(data) < 3) stop("[fit_ThermalQuenching()] 'data' is empty or has less than three columns!", call. = FALSE) if(ncol(data) > 3) warning("[fit_ThermalQuenching()] 'data' has more than 3 columns, taking only the first three!", call. = FALSE) if(any(is.na(data))) warning("[fit_ThermalQuenching()] NA values in 'data' automatically removed!", call. = FALSE) ##this we do anyway, you never know data <- na.exclude(data[,1:3]) } # Prepare data -------------------------------------------------------------------------------- ##set formula for quenching accordingt to Wintle 1973 ##we here add a constant, otherwise the fit will not really work k <- 8.6173303e-05 f <- y ~ (A / (1 + C * (exp(-W / (k * x))))) + c ##set translate values in data.frame to absolute temperature data_raw <- data data[[1]] <- data[[1]] + 273.15 ##start parameter start_param <- modifyList(x = list( A = max(data[[2]]), C = max(data[[1]] * 10e+5), W = 0.5, c = 0), val = start_param) ##method control method_control <- modifyList( x = list( lower = c(A = 0, C = 0, W = 0, c = 0), upper = c(A = 10 * start_param$A, C = Inf, W = 10, c = start_param$A), trace = FALSE, weights = data[[3]] ), val = method_control) # Fitting ------------------------------------------------------------------------------------- ##guine fitting fit <- try(minpack.lm::nlsLM( formula = f, data = data.frame(x = data[[1]], y = data[[2]]), weights = if(is.null(method_control$weights)){ rep(1, length(data[[2]])) } else { method_control$weights}, control = list( maxiter = 500, maxfev = 1000, trace = method_control$weights ), start = start_param, lower = method_control$lower, upper = method_control$upper ), silent = TRUE) ##only continue if the first fitting worked out if(!inherits(fit, "try-error")){ ##reset n.MC if(is.null(n.MC) || n.MC < 1) n.MC <- 1 ##Prepare MC runs for the fitting x_MC <- data[[1]] y_MC <- matrix( data = data[[2]] + rnorm(n.MC * length(x_MC)) * data[[3]], nrow = length(x_MC), ncol = n.MC) y_MC[y_MC < 0] <- 0 ##run fitting fit_MC <- lapply(1:n.MC, function(x){ temp <- try(minpack.lm::nlsLM( formula = f, data = data.frame(x = x_MC, y = y_MC[,x]), weights = if(is.null(method_control$weights)){ rep(1, length(data[[2]])) } else { method_control$weights}, control = list( maxiter = 500, maxfev = 1000 ), start = start_param, lower = method_control$lower, upper = method_control$upper ), silent = TRUE) ##return value if(inherits(temp, 'try-error')) { return(NULL) } else{ temp } }) }else{ try(stop("[fit_ThermalQuenching()] Fitting failed, NULL returned!", call. = FALSE), silent = FALSE) return(NULL) } ##remove NULL (the fit was not sucessfull) fit_MC <- fit_MC[!sapply(X = fit_MC, is.null)] n.MC <- length(fit_MC) # Extract values ------------------------------------------------------------------------------ ##(1) - extract parameters from main fit fit_coef <- coef(fit) A <- fit_coef[["A"]] C <- fit_coef[["C"]] W <- fit_coef[["W"]] c <- fit_coef[["c"]] ##(2) - extract values from MC run fit_coef_MC_full <- vapply(X = fit_MC, FUN = coef, FUN.VALUE = numeric(4)) fit_coef_MC <- round(matrixStats::rowSds(fit_coef_MC_full),3) A_MC_X <- fit_coef_MC[1] C_MC_X <- fit_coef_MC[2] W_MC_X <- fit_coef_MC[3] c_MC_X <- fit_coef_MC[4] # Terminal output ----------------------------------------------------------------------------- if(verbose){ cat("\n[fit_ThermalQuenching()]\n\n") cat(" A = ", A, " \u00b1 ",A_MC_X,"\n") cat(" C = ", C, " \u00b1 ",C_MC_X,"\n") cat(" W = ", W, " \u00b1 ",W_MC_X, " eV\n") cat(" c = ", c, " \u00b1 ",c_MC_X, "\n") cat(" --------------------------------\n") } # Potting ------------------------------------------------------------------------------------- if(plot) { ##plot settings plot_settings <- list( xlim = range(data[[1]]), ylim = c(min(data[[2]]) - data[[3]][which.min(data[[2]])], max(data[[2]]) + data[[3]][which.max(data[[2]])]), pch = 1, xaxt = "n", xlab = "Temperature [\u00b0C]", ylab = "Dependent [a.u.]", main = "Thermal quenching", lty = 2, col_points = "black", col_fit = "red", lwd = 1.3, mtext = if(n.MC == 1) "" else paste0("n.MC = ", n.MC) ) ##overwrite settings plot_settings <- modifyList(x = plot_settings, val = list(...)) ##create plot window plot( x = NA, y = NA, xlim = plot_settings$xlim, ylim = plot_settings$ylim, xaxt = plot_settings$xaxt, xlab = plot_settings$xlab, ylab = plot_settings$ylab, main = plot_settings$main ) ##add axis with correct temperature if(!is.null(plot_settings$xaxt) && plot_settings$xaxt == "n"){ at <- pretty(round(axTicks(side = 1) - 273.15)) axis(side = 1, at = at + 273.15, labels = at) } ##reset n.MC if(!is.null(n.MC) && n.MC > 1){ ##add MC curves for(i in 1:n.MC){ A <- fit_coef_MC_full[1,i] C <- fit_coef_MC_full[2,i] W <- fit_coef_MC_full[3,i] c <- fit_coef_MC_full[4,i] x <- 0 curve((A / (1 + C * (exp(-W / (k * x))))) + c, col = rgb(0,0,0,.1), add = TRUE) } } ##add points and uncertainties points(data[, 1:2], pch = plot_settings$pch, lwd = 2, col = plot_settings$col_points) segments(x0 = data[[1]], x1 = data[[1]], y0 = data[[2]] + data[[3]], y1 = data[[2]] - data[[3]], col = plot_settings$col_points ) ##add central fit A <- fit_coef[["A"]] C <- fit_coef[["C"]] W <- fit_coef[["W"]] c <- fit_coef[["c"]] x <- 0 curve((A / (1 + C * (exp( -W / (k * x) )))) + c, lty = plot_settings$lty, lwd = plot_settings$lwd, col = plot_settings$col_fit, add = TRUE ) ##add mtext mtext(side = 3, text = plot_settings$mtext) } # Return -------------------------------------------------------------------------------------- output_df <- data.frame( A = A, A_X = A_MC_X, C = C, C_X = C_MC_X, W = W, W_X = W_MC_X, c = c, c_X = c_MC_X, n.MC = n.MC ) output <- set_RLum( class = "RLum.Results", data = list( data = output_df, fit = fit ), info = list( call = sys.call() ) ) return(output) } Luminescence/R/convert_XSYG2CSV.R0000644000176200001440000000541014264017373016207 0ustar liggesusers#' Export XSYG-file(s) to CSV-files #' #' This function is a wrapper function around the functions [read_XSYG2R] and #' [write_RLum2CSV] and it imports an XSYG-file and directly exports its content #' to CSV-files. If nothing is set for the argument `path` ([write_RLum2CSV]) #' the input folder will become the output folder. #' #' @param file [character] (**required**): #' name of the XSYG-file to be converted to CSV-files #' #' @param ... further arguments that will be passed to the function #' [read_XSYG2R] and [write_RLum2CSV] #' #' @return #' The function returns either a CSV-file (or many of them) or for the option `export = FALSE` #' a list comprising objects of type [data.frame] and [matrix] #' #' @section Function version: 0.1.0 #' #' @author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [RLum.Analysis-class], [RLum.Data-class], [RLum.Results-class], #' [utils::write.table], [write_RLum2CSV], [read_XSYG2R] #' #' @keywords IO #' #' @examples #' #' ##transform XSYG-file values to a list #' data(ExampleData.XSYG, envir = environment()) #' convert_XSYG2CSV(OSL.SARMeasurement$Sequence.Object[1:10], export = FALSE) #' #' \dontrun{ #' ##select your BIN-file #' file <- file.choose() #' #' ##convert #' convert_XSYG2CSV(file) #' #' } #' #' @md #' @export convert_XSYG2CSV <- function( file, ... ){ # General tests ------------------------------------------------------------------------------- ##file is missing? if(missing(file)){ stop("[convert_XSYG2R()] file is missing!", call. = FALSE) } ##set input arguments convert_XSYG2R_settings.default <- list( recalculate.TL.curves = TRUE, pattern = ".xsyg", txtProgressBar = TRUE, export = TRUE ) ##modify list on demand convert_XSYG2R_settings <- modifyList(x = convert_XSYG2R_settings.default, val = list(...)) # Import file --------------------------------------------------------------------------------- if(!inherits(file, "RLum")){ object <- read_XSYG2R( file = file, fastForward = TRUE, recalculate.TL.curves = convert_XSYG2R_settings$recalculate.TL.curves, pattern = convert_XSYG2R_settings$pattern, txtProgressBar = convert_XSYG2R_settings$txtProgressBar ) }else{ object <- file } # Export to CSV ------------------------------------------------------------------------------- ##get all arguments we want to pass and remove the doubled one arguments <- c(list(object = object, export = convert_XSYG2R_settings$export), list(...)) arguments[duplicated(names(arguments))] <- NULL ##this if-condition prevents NULL in the terminal if(convert_XSYG2R_settings$export == TRUE){ invisible(do.call("write_RLum2CSV", arguments)) }else{ do.call("write_RLum2CSV", arguments) } } Luminescence/R/plot_DRCSummary.R0000644000176200001440000002261014264017373016244 0ustar liggesusers#'Create a Dose-Response Curve Summary Plot #' #'While analysing OSL SAR or pIRIR-data the view on the data is limited usually to one #'dose-response curve (DRC) at the time for one aliquot. This function overcomes this limitation #'by plotting all DRC from an [RLum.Results-class] object created by the function [analyse_SAR.CWOSL] #'in one single plot. #' #'If you want plot your DRC on an energy scale (dose in Gy), you can either use the option `source_dose_rate` provided #'below or your can SAR analysis with the dose points in Gy (better axis scaling). #' #'@param object [RLum.Results-class] object (**required**): input object created by the function [analyse_SAR.CWOSL]. The input object can be provided as [list]. #' #'@param source_dose_rate [numeric] (*optional*): allows to modify the axis and show values in Gy, instead seconds. Only a single numerical values is allowed. #' #'@param sel_curves [numeric] (optional): id of the curves to be plotting in its occurring order. A sequence can #'be provided for selecting, e.g., only every 2nd curve from the input object #' #'@param show_dose_points [logical] (with default): enable or disable plot of dose points in the graph #' #'@param show_natural [logical] (with default): enable or disable the plot of the natural `Lx/Tx` values #' #'@param n [integer] (with default): the number of x-values used to evaluate one curve object. Large numbers slow #'down the plotting process and are usually not needed #' #'@param ... Further arguments and graphical parameters to be passed. In particular: `main`, `xlab`, `ylab`, `xlim`, `ylim`, `lty`, `lwd`, `pch`, `col.pch`, `col.lty`, `mtext` #' #'@section Function version: 0.2.3 #' #'@return An [RLum.Results-class] object is returned: #' #' Slot: **@data**\cr #' #' \tabular{lll}{ #' **OBJECT** \tab **TYPE** \tab **COMMENT**\cr #' `results` \tab [data.frame] \tab with dose and LxTx values \cr #' `data` \tab [RLum.Results-class] \tab original input data \cr #' } #' #' Slot: **@info**\cr #' #' \tabular{lll}{ #' **OBJECT** \tab **TYPE** \tab **COMMENT** \cr #' `call` \tab `call` \tab the original function call \cr #' `args` \tab `list` \tab arguments of the original function call \cr #' } #' #'*Note: If the input object is a [list] a list of [RLum.Results-class] objects is returned.* #' #'@author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) \cr #' Christoph Burow, University of Cologne (Germany) #' #'@seealso [RLum.Results-class], [analyse_SAR.CWOSL] #' #'@examples #' #'#load data example data #'data(ExampleData.BINfileData, envir = environment()) #' #'#transform the values from the first position in a RLum.Analysis object #'object <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos=1) #' ##perform SAR analysis #' results <- analyse_SAR.CWOSL( #' object = object, #' signal.integral.min = 1, #' signal.integral.max = 2, #' background.integral.min = 900, #' background.integral.max = 1000, #' plot = FALSE #' ) #' #'##plot only DRC #'plot_DRCSummary(results) #' #'@md #'@export plot_DRCSummary <- function( object, source_dose_rate = NULL, sel_curves = NULL, show_dose_points = FALSE, show_natural = FALSE, n = 51L, ... ){ # Self-call ----------------------------------------------------------------------------------- if(inherits(object, "list")){ ##catch ... arguments plot_settings <- list(...) ##expand arguments if("main" %in% names(list(...))){ main <- as.list(rep(list(...)[["main"]], length(object))) ##filter main from the ... argument list otherwise we will have a collusion plot_settings["main" %in% names(plot_settings)] <- NULL }else{ main <- as.list(rep("DRC", length(object))) } results <- lapply(1:length(object), function(o){ plot_DRCSummary( object = object[[o]], sel_curves = sel_curves, show_dose_points = show_dose_points, show_natural = show_natural, n = n, main = main[[o]], ... = plot_settings ) }) ##return merged object return(results) } # Check input --------------------------------------------------------------------------------- if(!inherits(object, "RLum.Results")) stop("[plot_DRCSummary()] The input is not of class 'RLum.Results'!",call. = FALSE) # Extract data from object -------------------------------------------------------------------- ##get data from RLum.Results object if(object@originator %in% c("analyse_SAR.CWOSL", "analyse_pIRIRSequence")){ ##set limit if(is.null(sel_curves)){ sel_curves <- 1:length(object@data$Formula) }else{ if(min(sel_curves) < 1 || max(sel_curves) > length(object@data$Formula) || length(sel_curves) > length(object@data$Formula)){ warning("[plot_DRCSummary()] 'sel_curves' out of bounds, reset to full dataset.", call. = FALSE, immediate. = TRUE) sel_curves <- 1:length(object@data$Formula) } } ## check the whether the fitting was all the same if(length(unique(object@data[["data"]][["Fit"]])) != 1) stop("[plot_DRCSummary()] I can only visualise dose-response curves based on the same fitting equation!", call. = FALSE) ##get DRC DRC <- object@data$Formula[sel_curves] ## check for Lambert W function (we can only do all ) if(all(object@data$data[["Fit"]] == "LambertW")) W <- lamW::lambertW0 ##get limits for each set dataset_limits <- matrix( c(which(object@data$LnLxTnTx.table[["Name"]] == "Natural"), which(object@data$LnLxTnTx.table[["Name"]] == "Natural")[-1] - 1, nrow(object@data$LnLxTnTx.table)), ncol = 2) ##create list LxTx <- lapply(1:nrow(dataset_limits), function(x){ object@data$LnLxTnTx.table[dataset_limits[x,1]:dataset_limits[x,2],] })[sel_curves] }else{ stop( "[plot_DRCSummary()] 'object' was created by none supported function, cf. manual for allowed originators!", call. = FALSE) } # Plotting ------------------------------------------------------------------------------------ ##set default plot_settings <- modifyList(x = list( xlab = if(is.null(source_dose_rate)) {"Dose [s]"} else {"Dose [Gy]"}, ylab = expression(L[x]/T[x]), xlim = c(0,max(vapply(LxTx, function(x){max(x[["Dose"]])}, numeric(1)))), ylim = if(show_dose_points){ c(0,max(vapply(LxTx, function(x){max(x[["LxTx"]] + x[["LxTx.Error"]])}, numeric(1)), na.rm = TRUE)) }else{ c(0,max(vapply(1:length(LxTx), function(y){ x <- max(LxTx[[y]][["Dose"]], na.rm = TRUE) eval(DRC[[y]]) },numeric(1)), na.rm = TRUE)) }, main = "DRC Summary", mtext = paste0("n_curves: ",length(sel_curves)), lty = 1, lwd = 1, pch = 20, col.lty = rgb(0,0,0,0.5), col.pch = rgb(0,0,0,0.5) ), val = list(...), keep.null = TRUE) ## expand parameters plot_settings$col.lty <- rep(plot_settings$col.lty, length(sel_curves)) plot_settings$col.pch <- rep(plot_settings$col.pch, length(sel_curves)) plot_settings$pch <- rep(plot_settings$pch, length(sel_curves)) plot_settings$lty <- rep(plot_settings$lty, length(sel_curves)) ##create empty plot window plot( x = NA, y = NA, xlab = plot_settings$xlab, ylab = plot_settings$ylab, xlim = plot_settings$xlim, ylim = plot_settings$ylim, main = plot_settings$main, xaxt = "n" ) if(!is.null(plot_settings$mtext)) mtext(side = 3, text = plot_settings$mtext, cex = 0.8) #exchange x-axis if source dose rate is set if(!is.null(source_dose_rate)){ axis(side = 1, at = axTicks(side = 1), labels = round(axTicks(side = 1) * source_dose_rate[1],0)) }else{ axis(side = 1) } for(i in 1:length(sel_curves)){ ##plot natural if(show_natural){ segments(x0 = LxTx[[i]]$Dose[1], x1 = LxTx[[i]]$Dose[1], y0 = LxTx[[i]]$LxTx[1] - LxTx[[i]]$LxTx.Error[1], y1 = LxTx[[i]]$LxTx[1] + LxTx[[i]]$LxTx.Error[1], col = plot_settings$col.pch[[i]]) points( x = LxTx[[i]]$Dose[1], y = LxTx[[i]]$LxTx[1], col = plot_settings$col.pch[[i]], pch = plot_settings$pch[[i]] ) } ##plot dose points if(show_dose_points){ segments(x0 = LxTx[[i]]$Dose[-1], x1 = LxTx[[i]]$Dose[-1], y0 = LxTx[[i]]$LxTx[-1] - LxTx[[i]]$LxTx.Error[-1], y1 = LxTx[[i]]$LxTx[-1] + LxTx[[i]]$LxTx.Error[-1], col = plot_settings$col.pch[[i]]) points( x = LxTx[[i]]$Dose[-1], y = LxTx[[i]]$LxTx[-1], col = plot_settings$col.pch[[i]], pch = plot_settings$pch[[i]] ) } ##plot lines x <- seq(min(plot_settings$xlim),max(plot_settings$xlim), length.out = n) y <- eval(DRC[[i]]) if (any(is.na(y)) || any(is.nan(y))) { warning("[plot_DRCSummary()] Dose response curve ", i, " is NA/NaN and was removed before plotting.", call. = FALSE) next } lines( x = x, y = eval(DRC[[i]]), col = plot_settings$col.lty[[i]], lwd = plot_settings$lwd, lty = plot_settings$lty[[i]] ) } ## Results ------------------------------------------------------------------- results <- set_RLum( class = "RLum.Results", data = list( results = data.frame( dose = x, sapply(DRC, function(d, n) { eval(d) }, n) ), data = object ), info = list( call = sys.call(), args = as.list(sys.call())[-1]) ) ## Return value -------------------------------------------------------------- return(results) } Luminescence/R/apply_CosmicRayRemoval.R0000644000176200001440000002760714367174002017654 0ustar liggesusers#' Function to remove cosmic rays from an RLum.Data.Spectrum S4 class object #' #' The function provides several methods for cosmic-ray removal and spectrum #' smoothing [RLum.Data.Spectrum-class] objects and such objects embedded in [list] or #' [RLum.Analysis-class] objects. #' #' **`method = "Pych"`** #' #' This method applies the cosmic-ray removal algorithm described by Pych #' (2003). Some aspects that are different to the publication: #' #' - For interpolation between neighbouring values the median and not the mean is used. #' - The number of breaks to construct the histogram is set to: `length(number.of.input.values)/2` #' #' For further details see references below. #' #'**`method = "smooth"`** #' #' Method uses the function [smooth] to remove cosmic rays. #' #' Arguments that can be passed are: `kind`, `twiceit` #' #' **`method = "smooth.spline"`** #' #' Method uses the function [smooth.spline] to remove cosmic rays. #' #' Arguments that can be passed are: `spar` #' #' **How to combine methods?** #' #' Different methods can be combined by applying the method repeatedly to the #' dataset (see example). #' #' @param object [RLum.Data.Spectrum-class] or [RLum.Analysis-class] (**required**): input #' object to be treated. This can be also provided as [list]. If an [RLum.Analysis-class] object #' is provided, only the [RLum.Data.Spectrum-class] objects are treated. Please note: this mixing of #' objects do not work for a list of `RLum.Data` objects. #' #' @param method [character] (*with default*): #' Defines method that is applied for cosmic ray removal. Allowed methods are #' `smooth`, the default, ([smooth]), `smooth.spline` ([smooth.spline]) #' and `Pych`. See details for further information. #' #' @param method.Pych.smoothing [integer] (*with default*): #' Smoothing parameter for cosmic ray removal according to Pych (2003). #' The value defines how many neighbouring values in each frame are used for smoothing #' (e.g., `2` means that the two previous and two following values are used). #' #' @param method.Pych.threshold_factor [numeric] (*with default*): #' Threshold for zero-bins in the histogram. Small values mean that more peaks #' are removed, but signal might be also affected by this removal. #' #' @param MARGIN [integer] (*with default*): #' on which part the function cosmic ray removal should be applied on: #' #' - 1 = along the time axis (line by line), #' - 2 = along the wavelength axis (column by column). #' #' **Note:** This argument currently only affects the methods `smooth` and `smooth.spline` #' #' @param verbose [logical] (*with default*): #' Option to suppress terminal output., #' #' @param plot [logical] (*with default*): #' If `TRUE` the histograms used for the cosmic-ray removal are returned as plot #' including the used threshold. Note: A separate plot is returned for each frame! #' Currently only for `method = "Pych"` a graphical output is provided. #' #' @param ... further arguments and graphical parameters that will be passed #' to the [smooth] function. #' #' @return Returns same object as input. #' #' @section Function version: 0.3.0 #' #' @author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [RLum.Data.Spectrum-class], [RLum.Analysis-class], [smooth], [smooth.spline], #' [apply_CosmicRayRemoval] #' #' @references #' Pych, W., 2004. A Fast Algorithm for Cosmic-Ray Removal from #' Single Images. The Astronomical Society of the Pacific 116 (816), 148-153. #' \doi{10.1086/381786} #' #' @keywords manip #' #' @examples #' #' ##(1) - use with your own data and combine (uncomment for usage) #' ## run two times the default method and smooth with another method #' ## your.spectrum <- apply_CosmicRayRemoval(your.spectrum, method = "Pych") #' ## your.spectrum <- apply_CosmicRayRemoval(your.spectrum, method = "Pych") #' ## your.spectrum <- apply_CosmicRayRemoval(your.spectrum, method = "smooth") #' #' @md #' @export apply_CosmicRayRemoval <- function( object, method = "smooth", method.Pych.smoothing = 2, method.Pych.threshold_factor = 3, MARGIN = 2, verbose = FALSE, plot = FALSE, ... ){ # Self-call ---------------------------------------------------------------------------------- ##Black magic: The function recalls itself until all RLum.Data.Spectrum objects have been treated ##If you want to test the basics of the function please only use a single RLum.Data.Spectrum-object ##if it comes in as an RLum.Analysis object ... make a list out of it if(inherits(object, "RLum.Analysis")){ object <- list(object) class_original <- "RLum.Analysis" }else{ class_original <- NULL } ##handle the list and recall if(inherits(object, "list")){ results_list <- lapply(object, function(o){ ##preset objects record_id.spectra <- NULL ##RLum.Analysis if(inherits(o, "RLum.Analysis")){ ##get id of RLum.Data.Spectrum objects in this object record_id.spectra <- which( vapply(o@records, function(x) inherits(x, "RLum.Data.Spectrum"), logical(1))) ##rewrite o temp_o <- o@records[record_id.spectra] }else{ temp_o <- o } ##call function results <- apply_CosmicRayRemoval( object = temp_o, method = method, method.Pych.smoothing = method.Pych.smoothing, method.Pych.threshold_factor = method.Pych.threshold_factor, MARGIN = MARGIN, verbose = verbose, plot = plot, ... = list(...) ) ##combine in RLum.Analysis object if needed if(!is.null(record_id.spectra)){ o@records[record_id.spectra] <- results return(o) }else{ return(results) } }) ##final return, make sure that we return what we had as input if(!is.null(class_original)){ return(results_list[[1]]) }else{ return(results_list) } } # Integrity check ----------------------------------------------------------- ##check if object is of class RLum.Data.Spectrum if(!inherits(object,"RLum.Data.Spectrum")){ stop(paste0("[apply_CosmicRayRemoval()] An object of class '",class(object)[1], "' is not supported as input; please read the manual!"), call. = FALSE) } ##deal with addition arguments extraArgs <- list(...) kind <- if("kind" %in% names(extraArgs)) {extraArgs$kind} else {"3RS3R"} twiceit <- if("twiceit" %in% names(extraArgs)) {extraArgs$twiceit} else {TRUE} spar <- if("spar" %in% names(extraArgs)) {extraArgs$spar} else {NULL} # Apply method ------------------------------------------------------------ ## +++++++++++++++++++++++++++++++++++ (smooth) ++++++++++++++++++++++++++++## if(method == "smooth"){ ##apply smooth object.data.temp.smooth <- apply( X = object@data, MARGIN = MARGIN, FUN = stats::smooth, kind = kind, twiceit = twiceit ) ##rotate output matrix if necessary if(MARGIN == 1){ object.data.temp.smooth <- t(object.data.temp.smooth) } ## +++++++++++++++++++++++++++++++++++ (smooth.spline) +++++++++++++++++++++## }else if(method == "smooth.spline"){ ##write the function in a new function to acess the data more easily temp_smooth.spline <- function(x, spar){ stats::smooth.spline(x, spar = spar)$y } ##apply smooth.spline object.data.temp.smooth <- apply( X = object@data, MARGIN = MARGIN, FUN = temp_smooth.spline, spar = spar ) ##rotate output matrix if necessary if(MARGIN == 1){ object.data.temp.smooth <- t(object.data.temp.smooth) } ## +++++++++++++++++++++++++++++++++++ (Pych) ++++++++++++++++++++++++++++++## }else if(method == "Pych"){ ## grep data matrix object.data.temp <- object@data ## apply smoothing object.data.temp.smooth <- sapply(X = 1:ncol(object.data.temp), function(x){ ##(1) - calculate sd for each subframe temp.sd <- sd(object.data.temp[,x]) ##(2) - correct estimation of sd by 1-sigma clipping temp.sd.corr <- sd(object.data.temp[ object.data.temp[,x] >= (mean(object.data.temp[,x]) - temp.sd) & object.data.temp[,x] <= (mean(object.data.temp[,x]) + temp.sd) , x]) ##(3) - construct histogram of count distribution temp.hist <- hist(object.data.temp[,x], breaks = length(object.data.temp[,x])/2, plot = FALSE) ##(4) - find mode of the histogram (e.g. peak) temp.hist.max <- which.max(temp.hist$counts) ##(5) - find gaps in the histogram (bins with zero value) temp.hist.zerobin <- which(temp.hist$counts == 0) ##(5.1) ##select just values right from the peak temp.hist.zerobin <- temp.hist.zerobin[ (temp.hist.max[1] + 1):length(temp.hist.zerobin)] ##(5.2) ##select non-zerobins temp.hist.nonzerobin <- which(temp.hist$counts != 0) temp.hist.nonzerobin <- temp.hist.nonzerobin[ temp.hist.nonzerobin >= (temp.hist.zerobin[1]-1)] ##(6) - find the first gap which is wider than the threshold temp.hist.nonzerobin.diff <- diff( temp.hist$breaks[temp.hist.nonzerobin]) ## select the first value where the thershold is reached ## factor 3 is defined by Pych (2003) temp.hist.thres <- which( temp.hist.nonzerobin.diff >= method.Pych.threshold_factor * temp.sd.corr)[1] ##(7) - use counts above the threshold and recalculate values ## on all further values if(!is.na(temp.hist.thres)){ object.data.temp[,x] <- sapply(1:nrow(object.data.temp), function(n){ if(c(n + method.Pych.smoothing) <= nrow(object.data.temp) & (n - method.Pych.smoothing) >= 0){ ifelse( object.data.temp[n,x] >= temp.hist$breaks[temp.hist.thres], median(object.data.temp[(n-method.Pych.smoothing): (n+method.Pych.smoothing),x]), object.data.temp[n,x]) }else{ object.data.temp[n,x] } }) } ##(8) - return histogram used for the removal as plot if(plot){ plot(temp.hist, xlab = "Signal intensity [a.u.]", main = "Cosmic-ray removal histogram") abline(v = temp.hist$breaks[temp.hist.thres], col = "red") if(!is.na(temp.hist$breaks[temp.hist.thres])){ legend("topright", "threshold" ,lty = 1, lwd = 1, col = "red", bty = "n") mtext(side = 3, paste0("Frame: ", x, " (", colnames(object.data.temp)[x], ")")) }else{ mtext(side = 3, paste0("Frame: ", x, " (", colnames(object.data.temp)[x], ") - no threshold applied!")) } } ##(9) - return information on the amount of removed cosmic-rays if(verbose){ #sum up removed counts values above the threshold sum.corrected.channels <- try( sum(temp.hist$counts[temp.hist.thres:length(temp.hist$counts)]), silent = TRUE) if(is(sum.corrected.channels)[1] == "try-error"){sum.corrected.channels <- 0} cat("[apply_CosmicRayRemoval()] >> ") cat(paste(sum.corrected.channels, " channels corrected in frame ", x, "\n", sep = "")) } ##return object return(object.data.temp[,x]) })#end loop }else{ stop("[apply_CosmicRayRemoval()] Unkown method for cosmic ray removal.") } # Correct row and column names -------------------------------------------- colnames(object.data.temp.smooth) <- colnames(object@data) rownames(object.data.temp.smooth) <- rownames(object@data) # Return Output------------------------------------------------------------ temp.output <- set_RLum( class = "RLum.Data.Spectrum", recordType = object@recordType, curveType = object@curveType, data = object.data.temp.smooth, info = object@info) invisible(temp.output) } Luminescence/R/bin_RLum.Data.R0000644000176200001440000000306214264017373015577 0ustar liggesusers#' Channel binning - method dispatcher #' #' Function calls the object-specific bin functions for RLum.Data S4 class objects. #' #' The function provides a generalised access point for specific #' [RLum.Data-class] objects. \cr #' Depending on the input object, the corresponding function will be selected. #' Allowed arguments can be found in the documentations of the corresponding #' [RLum.Data-class] class. #' #' @param object [RLum.Data-class] (**required**): #' S4 object of class `RLum.Data` #' #' @param ... further arguments passed to the specific class method #' #' @return An object of the same type as the input object is provided #' #' @section Function version: 0.2.0 #' #' @author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @note Currently only `RLum.Data` objects of class [RLum.Data.Curve-class] and [RLum.Data.Spectrum-class] are supported! #' #' @seealso [RLum.Data.Curve-class], [RLum.Data.Spectrum-class] #' #' @examples #' #' ##load example data #' data(ExampleData.CW_OSL_Curve, envir = environment()) #' #' ##create RLum.Data.Curve object from this example #' curve <- #' set_RLum( #' class = "RLum.Data.Curve", #' recordType = "OSL", #' data = as.matrix(ExampleData.CW_OSL_Curve) #' ) #' #' ##plot data without and with 2 and 4 channel binning #' plot_RLum(curve) #' plot_RLum(bin_RLum.Data(curve, bin_size = 2)) #' plot_RLum(bin_RLum.Data(curve, bin_size = 4)) #' #' @keywords utilities #' #' @md #' @export setGeneric("bin_RLum.Data", function(object, ...) { standardGeneric("bin_RLum.Data") }) Luminescence/R/Risoe.BINfileData-class.R0000644000176200001440000004360514367174002017451 0ustar liggesusers#' @include get_Risoe.BINfileData.R set_Risoe.BINfileData.R NULL #' Class `"Risoe.BINfileData"` #' #' S4 class object for luminescence data in R. The object is produced as output #' of the function [read_BIN2R]. #' #' #' #' @name Risoe.BINfileData-class #' #' @docType class #' #' @slot METADATA Object of class "data.frame" containing the meta information for each curve. #' #' @slot DATA Object of class "list" containing numeric vector with count data. #' #' @slot .RESERVED Object of class "list" containing list of undocumented raw values for internal use only. #' #' @keywords internal #' #' @note #' #' **Internal METADATA - object structure** #' #' This structure is compatible with BIN/BINX-files version 03-08, however, it does not follow (in its #' sequential arrangement) the manual provided by the manufacturer, #' but an own structure accounting for the different versions. #' #' \tabular{rllll}{ #' **#** \tab **Name** \tab **Data Type** \tab **V** \tab **Description** \cr #' `[,1]` \tab `ID` \tab `numeric` \tab RLum \tab Unique record ID (same ID as in slot `DATA`)\cr #' `[,2]` \tab `SEL` \tab `logic` \tab RLum \tab Record selection, not part official BIN-format, triggered by TAG\cr #' `[,3]` \tab `VERSION` \tab `raw` \tab 03-08 \tab BIN-file version number \cr #' `[,4]` \tab `LENGTH` \tab `integer` \tab 03-08 \tab Length of this record\cr #' `[,5]` \tab `PREVIOUS` \tab `integer` \tab 03-08 \tab Length of previous record\cr #' `[,6]` \tab `NPOINTS` \tab `integer` \tab 03-08 \tab Number of data points in the record\cr #' `[,7]` \tab `RECTYPE` \tab `integer` \tab 08 \tab Record type \cr #' `[,8]` \tab `RUN` \tab `integer` \tab 03-08 \tab Run number\cr #' `[,9]` \tab `SET` \tab `integer` \tab 03-08 \tab Set number\cr #' `[,10]` \tab `POSITION` \tab `integer` \tab 03-08 \tab Position number\cr #' `[,11]` \tab `GRAIN` \tab `integer` \tab 03-04 \tab Grain number\cr #' `[,12]` \tab `GRAINNUMBER` \tab `integer` \tab 05-08 \tab Grain number\cr #' `[,13]` \tab `CURVENO` \tab `integer` \tab 05-08 \tab Curve number\cr #' `[,14]` \tab `XCOORD` \tab `integer` \tab 03-08 \tab X position of a single grain\cr #' `[,15]` \tab `YCOORD` \tab `integer` \tab 03-08 \tab Y position of a single grain\cr #' `[,16]` \tab `SAMPLE` \tab `factor` \tab 03-08 \tab Sample name\cr #' `[,17]` \tab `COMMENT` \tab `factor` \tab 03-08 \tab Comment name\cr #' `[,18]` \tab `SYSTEMID` \tab `integer` \tab 03-08 \tab Risø system id\cr #' `[,19]` \tab `FNAME` \tab `factor` \tab 05-08 \tab File name (*.bin/*.binx)\cr #' `[,20]` \tab `USER` \tab `factor` \tab 03-08 \tab User name\cr #' `[,21]` \tab `TIME` \tab `character` \tab 03-08 \tab Data collection time (`hh-mm-ss`)\cr #' `[,22]` \tab `DATE` \tab `factor` \tab 03-08 \tab Data collection date (`ddmmyy`)\cr #' `[,23]` \tab `DTYPE` \tab `character` \tab 03-08 \tab Data type\cr #' `[,24]` \tab `BL_TIME` \tab `numeric` \tab 03-08 \tab Bleaching time\cr #' `[,25]` \tab `BL_UNIT` \tab `integer` \tab 03-08 \tab Bleaching unit (mJ, J, s, min, h)\cr #' `[,26]` \tab `NORM1` \tab `numeric` \tab 03-08 \tab Normalisation factor (1)\cr #' `[,27]` \tab `NORM2` \tab `numeric` \tab 03-08 \tab Normalisation factor (2)\cr #' `[,28]` \tab `NORM3` \tab `numeric` \tab 03-08 \tab Normalisation factor (3)\cr #' `[,29]` \tab `BG` \tab `numeric` \tab 03-08 \tab Background level\cr #' `[,30]` \tab `SHIFT` \tab `integer` \tab 03-08 \tab Number of channels to shift data\cr #' `[,31]` \tab `TAG` \tab `integer` \tab 03-08 \tab Tag, triggers `SEL`\cr #' `[,32]` \tab `LTYPE` \tab `character` \tab 03-08 \tab Luminescence type\cr #' `[,33]` \tab `LIGHTSOURCE` \tab `character` \tab 03-08 \tab Light source\cr #' `[,34]` \tab `LPOWER` \tab `numeric` \tab 03-08 \tab Optical stimulation power\cr #' `[,35]` \tab `LIGHTPOWER` \tab `numeric` \tab 05-08 \tab Optical stimulation power\cr #' `[,36]` \tab `LOW` \tab `numeric` \tab 03-08 \tab Low (temperature, time, wavelength)\cr #' `[,37]` \tab `HIGH` \tab `numeric` \tab 03-08 \tab High (temperature, time, wavelength)\cr #' `[,38]` \tab `RATE` \tab `numeric` \tab 03-08 \tab Rate (heating rate, scan rate)\cr #' `[,39]` \tab `TEMPERATURE` \tab `integer` \tab 03-08 \tab Sample temperature\cr #' `[,40]` \tab `MEASTEMP` \tab `integer` \tab 05-08 \tab Measured temperature\cr #' `[,41]` \tab `AN_TEMP` \tab `numeric` \tab 03-08 \tab Annealing temperature\cr #' `[,42]` \tab `AN_TIME` \tab `numeric` \tab 03-08 \tab Annealing time\cr #' `[,43]` \tab `TOLDELAY` \tab `integer` \tab 03-08 \tab TOL 'delay' channels\cr #' `[,44]` \tab `TOLON` \tab `integer` \tab 03-08 \tab TOL 'on' channels\cr #' `[,45]` \tab `TOLOFF` \tab `integer` \tab 03-08 \tab TOL 'off' channels\cr #' `[,46]` \tab `IRR_TIME` \tab `numeric` \tab 03-08 \tab Irradiation time\cr #' `[,47]` \tab `IRR_TYPE` \tab `integer` \tab 03-08 \tab Irradiation type (alpha, beta or gamma)\cr #' `[,48]` \tab `IRR_UNIT` \tab `integer` \tab 03-04 \tab Irradiation unit (Gy, rad, s, min, h)\cr #' `[,49]` \tab `IRR_DOSERATE` \tab `numeric` \tab 05-08 \tab Irradiation dose rate (Gy/s)\cr #' `[,50]` \tab `IRR_DOSERATEERR` \tab `numeric` \tab 06-08 \tab Irradiation dose rate error (Gy/s)\cr #' `[,51]` \tab `TIMESINCEIRR` \tab `integer` \tab 05-08 \tab Time since irradiation (s)\cr #' `[,52]` \tab `TIMETICK` \tab `numeric` \tab 05-08 \tab Time tick for pulsing (s)\cr #' `[,53]` \tab `ONTIME` \tab `integer` \tab 05-08 \tab On-time for pulsing (in time ticks)\cr #' `[,54]` \tab `OFFTIME` \tab `integer` \tab 03 \tab Off-time for pulsed stimulation (in s) \cr #' `[,55]` \tab `STIMPERIOD` \tab `integer` \tab 05-08 \tab Stimulation period (on+off in time ticks)\cr #' `[,56]` \tab `GATE_ENABLED` \tab `raw` \tab 05-08 \tab PMT signal gating enabled\cr #' `[,57]` \tab `ENABLE_FLAGS` \tab `raw` \tab 05-08 \tab PMT signal gating enabled\cr #' `[,58]` \tab `GATE_START` \tab `integer` \tab 05-08 \tab Start gating (in time ticks)\cr #' `[,59]` \tab `GATE_STOP` \tab `integer` \tab 05-08 \tab Stop gating (in time ticks), `'Gateend'` for version 04, here only GATE_STOP is used\cr #' `[,60]` \tab `PTENABLED` \tab `raw` \tab 05-08 \tab Photon time enabled\cr #' `[,61]` \tab `DTENABLED` \tab `raw` \tab 05-08 \tab PMT dead time correction enabled\cr #' `[,62]` \tab `DEADTIME` \tab `numeric` \tab 05-08 \tab PMT dead time (s)\cr #' `[,63]` \tab `MAXLPOWER` \tab `numeric` \tab 05-08 \tab Stimulation power to 100 percent (mW/cm^2)\cr #' `[,64]` \tab `XRF_ACQTIME` \tab `numeric` \tab 05-08 \tab XRF acquisition time (s)\cr #' `[,65]` \tab `XRF_HV` \tab `numeric` \tab 05-08 \tab XRF X-ray high voltage (V)\cr #' `[,66]` \tab `XRF_CURR` \tab `integer` \tab 05-08 \tab XRF X-ray current (µA)\cr #' `[,67]` \tab `XRF_DEADTIMEF` \tab `numeric` \tab 05-08 \tab XRF dead time fraction\cr #' `[,68]` \tab `DETECTOR_ID` \tab `raw` \tab 07-08 \tab Detector ID\cr #' `[,69]` \tab `LOWERFILTER_ID` \tab `integer` \tab 07-08 \tab Lower filter ID in reader\cr #' `[,70]` \tab `UPPERFILTER_ID` \tab `integer` \tab 07-08 \tab Upper filter ID in reader\cr #' `[,71]` \tab `ENOISEFACTOR` \tab `numeric` \tab 07-08 \tab Excess noise filter, usage unknown \cr #' `[,72]` \tab `MARKPOS_X1` \tab `numeric` \tab 08 \tab Coordinates marker position 1 \cr #' `[,73]` \tab `MARKPOS_Y1` \tab `numeric` \tab 08 \tab Coordinates marker position 1 \cr #' `[,74]` \tab `MARKPOS_X2` \tab `numeric` \tab 08 \tab Coordinates marker position 2 \cr #' `[,75]` \tab `MARKPOS_Y2` \tab `numeric` \tab 08 \tab Coordinates marker position 2 \cr #' `[,76]` \tab `MARKPOS_X3` \tab `numeric` \tab 08 \tab Coordinates marker position 3 \cr #' `[,77]` \tab `MARKPOS_Y3` \tab `numeric` \tab 08 \tab Coordinates marker position 3 \cr #' `[,78]` \tab `EXTR_START` \tab `numeric` \tab 08 \tab usage unknown \cr #' `[,79]` \tab `EXTR_END` \tab `numeric` \tab 08 \tab usage unknown\cr #' `[,80]` \tab `SEQUENCE` \tab `character` \tab 03-04 \tab Sequence name #' } #' V = BIN-file version (RLum means that it does not depend on a specific BIN version) #' #' Note that the `Risoe.BINfileData` object combines all values from #' different versions from the BIN-file, reserved bits are skipped, however, #' the function [write_R2BIN] reset arbitrary reserved bits. Invalid #' values for a specific version are set to `NA`. Furthermore, the #' internal R data types do not necessarily match the required data types for #' the BIN-file data import! Data types are converted during data import.\cr #' #' **LTYPE** values #' #' \tabular{rll}{ #' VALUE \tab TYPE \tab DESCRIPTION \cr #' `[0]` \tab `TL` \tab: Thermoluminescence \cr #' `[1]` \tab `OSL` \tab: Optically stimulated luminescence \cr #' `[2]` \tab `IRSL` \tab: Infrared stimulated luminescence \cr #' `[3]` \tab `M-IR` \tab: Infrared monochromator scan\cr #' `[4]` \tab `M-VIS` \tab: Visible monochromator scan\cr #' `[5]` \tab `TOL` \tab: Thermo-optical luminescence \cr #' `[6]` \tab `TRPOSL` \tab: Time Resolved Pulsed OSL\cr #' `[7]` \tab `RIR` \tab: Ramped IRSL\cr #' `[8]` \tab `RBR` \tab: Ramped (Blue) LEDs\cr #' `[9]` \tab `USER` \tab: User defined\cr #' `[10]` \tab `POSL` \tab: Pulsed OSL \cr #' `[11]` \tab `SGOSL` \tab: Single Grain OSL\cr #' `[12]` \tab `RL` \tab: Radio Luminescence \cr #' `[13]` \tab `XRF` \tab: X-ray Fluorescence #' } #' #' **DTYPE** values #' #' \tabular{rl}{ #' VALUE \tab DESCRIPTION \cr #' `[0]` \tab Natural \cr #' `[1]` \tab N+dose \cr #' `[2]` \tab Bleach \cr #' `[3]` \tab Bleach+dose \cr #' `[4]` \tab Natural (Bleach) \cr #' `[5]` \tab N+dose (Bleach) \cr #' `[6]` \tab Dose \cr #' `[7]` \tab Background #' } #' #' **LIGHTSOURCE** values #' #' \tabular{rl}{ #' VALUE \tab DESCRIPTION \cr #' `[0]` \tab None \cr #' `[1]` \tab Lamp \cr #' `[2]` \tab IR diodes/IR Laser \cr #' `[3]` \tab Calibration LED \cr #' `[4]` \tab Blue Diodes \cr #' `[5]` \tab White light \cr #' `[6]` \tab Green laser (single grain) \cr #' `[7]` \tab IR laser (single grain) } #' #' (information on the BIN/BINX file format are kindly provided by Risø, DTU Nutech) #' #' @section Objects from the Class: Objects can be created by calls of the form #' `new("Risoe.BINfileData", ...)`. #' #' @section Function version: 0.4.0 #' #' @author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany)\cr #' based on information provided by Torben Lapp and Karsten Bracht Nielsen (Risø DTU, Denmark) #' #' @seealso [plot_Risoe.BINfileData], [read_BIN2R], [write_R2BIN], #' [merge_Risoe.BINfileData], [Risoe.BINfileData2RLum.Analysis] #' #' @references #' Risø DTU, 2013. The Sequence Editor User Manual - Feb 2013 and Risø DTU, 2016. #' #' The Sequence Editor User Manual - February 2016 #' #' [https://www.fysik.dtu.dk]() #' #' @keywords classes #' #' @examples #' #' showClass("Risoe.BINfileData") #' #' @md #' @export setClass("Risoe.BINfileData", slots = list( METADATA = "data.frame", DATA = "list", .RESERVED = "list" ), prototype = prototype( METADATA = data.frame( ID = integer(), SEL = logical(), VERSION = integer(), LENGTH = integer(), PREVIOUS = integer(), NPOINTS = integer(), RECTYPE = integer(), RUN = integer(), SET = integer(), POSITION = integer(), GRAIN = integer(), GRAINNUMBER = integer(), CURVENO = integer(), XCOORD = integer(), YCOORD = integer(), SAMPLE = character(), COMMENT = character(), SYSTEMID = integer(), FNAME = character(), USER = character(), TIME = character(), DATE = character(), DTYPE = character(), BL_TIME = numeric(), BL_UNIT = integer(), NORM1 = numeric(), NORM2 = numeric(), NORM3 = numeric(), BG = numeric(), SHIFT = integer(), TAG = integer(), LTYPE = character(), LIGHTSOURCE = character(), LPOWER = numeric(), LIGHTPOWER = numeric(), LOW = numeric(), HIGH = numeric(), RATE = numeric(), TEMPERATURE = numeric(), MEASTEMP = numeric(), AN_TEMP = numeric(), AN_TIME = numeric(), TOLDELAY = integer(), TOLON = integer(), TOLOFF = integer(), IRR_TIME = numeric(), IRR_TYPE = integer(), IRR_UNIT = integer(), IRR_DOSERATE = numeric(), IRR_DOSERATEERR = numeric(), TIMESINCEIRR = numeric(), TIMETICK = numeric(), ONTIME = numeric(), OFFTIME = numeric(), STIMPERIOD = integer(), GATE_ENABLED = numeric(), ENABLE_FLAGS = numeric(), GATE_START = numeric(), GATE_STOP = numeric(), PTENABLED = numeric(), DTENABLED = numeric(), DEADTIME = numeric(), MAXLPOWER = numeric(), XRF_ACQTIME = numeric(), XRF_HV = numeric(), XRF_CURR = numeric(), XRF_DEADTIMEF = numeric(), DETECTOR_ID = integer(), LOWERFILTER_ID = integer(), UPPERFILTER_ID = integer(), ENOISEFACTOR = numeric(), MARKPOS_X1 = numeric(), MARKPOS_Y1 = numeric(), MARKPOS_X2 = numeric(), MARKPOS_Y2 = numeric(), MARKPOS_X3 = numeric(), MARKPOS_Y3 = numeric(), EXTR_START = numeric(), EXTR_END = numeric(), SEQUENCE = character(), stringsAsFactors=FALSE ), DATA = list(), .RESERVED = list() ) ) ##set generic S4 function for object #' @describeIn Risoe.BINfileData #' Show structure of RLum and Risoe.BINfile class objects #' #' @md #' @export setMethod(f = "show", signature = signature(object = "Risoe.BINfileData"), definition = function(object){ if(nrow(object@METADATA) != 0){ version<-paste(unique(object@METADATA[,"VERSION"]), collapse = ", ") systemID<-paste(unique(object@METADATA[,"SYSTEMID"]), collapse = ", ") filename <- as.character(object@METADATA[1,"FNAME"]) records.overall<-length(object@DATA) records.type<-table(object@METADATA[,"LTYPE"]) user<-paste(unique(as.character(object@METADATA[,"USER"])), collapse = ", ") date<-paste(unique(as.character(object@METADATA[,"DATE"])), collapse = ", ") run.range<-range(object@METADATA[,"RUN"]) set.range<-range(object@METADATA[,"SET"]) grain.range <- range(object@METADATA[,"GRAIN"]) pos.range<-range(object@METADATA[,"POSITION"]) records.type.count <- sapply(1:length(records.type), function(x){paste( names(records.type)[x],"\t(n = ",records.type[x],")",sep="") }) records.type.count <- paste(records.type.count, collapse="\n\t ") ##print cat("\n[Risoe.BINfileData object]") cat("\n\n\tBIN/BINX version ", version) if(version>=6){ cat("\n\tFile name: ", filename) } cat("\n\tObject date: ", date) cat("\n\tUser: ", user) cat("\n\tSystem ID: ", ifelse(systemID == 0,"0 (unknown)", systemID)) cat("\n\tOverall records: ", records.overall) cat("\n\tRecords type: ", records.type.count) cat("\n\tPosition range: ",pos.range[1],":",pos.range[2]) cat("\n\tGrain range: ",grain.range[1],":",grain.range[2]) cat("\n\tRun range: ",run.range[1],":",run.range[2]) cat("\n\tSet range: ",set.range[1],":",set.range[2], "\n") }else{ cat("\n[Risoe.BINfileData object]") cat("\n\n >> This object is empty!<<") } }#end function )#end setMethod # constructor (set) method for object class ----------------------------------- #' @describeIn Risoe.BINfileData #' The Risoe.BINfileData is normally produced as output of the function read_BIN2R. #' This construction method is intended for internal usage only. #' #' @param METADATA Object of class "data.frame" containing the meta information #' for each curve. #' #' @param DATA Object of class "list" containing numeric vector with count data. #' #' @param .RESERVED Object of class "list" containing list of undocumented raw #' values for internal use only. #' #' @md #' @export setMethod(f = "set_Risoe.BINfileData", signature = signature("ANY"), definition = function(METADATA, DATA, .RESERVED) { if(length(METADATA) == 0){ new("Risoe.BINfileData") }else{ new( "Risoe.BINfileData", METADATA = METADATA, DATA = DATA, .RESERVED = .RESERVED ) } }) # accessor (get) method for object class ----------------------------------- #' @describeIn Risoe.BINfileData #' Formal get-method for Risoe.BINfileData object. It does not allow accessing #' the object directly, it is just showing a terminal message. #' #' @param object an object of class [Risoe.BINfileData-class] #' #' @param ... other arguments that might be passed #' #' @md #' @export setMethod("get_Risoe.BINfileData", signature= "Risoe.BINfileData", definition = function(object, ...) { cat("[get_Risoe.BINfileData()] No direct access is provided for this object type. Use the function 'Risoe.BINfileData2RLum.Analysis' for object coercing.") })##end setMethod ##-------------------------------------------------------------------------------------------------## ##=================================================================================================## Luminescence/R/analyse_pIRIRSequence.R0000644000176200001440000007435214464125673017371 0ustar liggesusers#' @title Analyse post-IR IRSL measurement sequences #' #' @description The function performs an analysis of post-IR IRSL sequences #' including curve #' fitting on [RLum.Analysis-class] objects. #' #' @details To allow post-IR IRSL protocol (Thomsen et al., 2008) measurement analyses #' this function has been written as extended wrapper function for the function #' [analyse_SAR.CWOSL], facilitating an entire sequence analysis in #' one run. With this, its functionality is strictly limited by the #' functionality of the function [analyse_SAR.CWOSL]. #' #' **Defining the sequence structure** #' #' The argument `sequence.structure` expects a shortened pattern of your sequence structure and was #' mainly introduced to ease the use of the function. For example: If your measurement data contains #' the following curves: `TL`, `IRSL`, `IRSL`, `TL`, `IRSL`, `IRSL`, the sequence pattern in `sequence.structure` #' becomes `c('TL', 'IRSL', 'IRSL')`. The second part of your sequence for one cycle should be #' similar and can be discarded. If this is not the case (e.g., additional hotbleach) such curves #' have to be removed before using the function. #' #' **If the input is a `list`** #' #' If the input is a list of RLum.Analysis-objects, every argument can be provided as list to allow #' for different sets of parameters for every single input element. #' For further information see [analyse_SAR.CWOSL]. #' #' @param object [RLum.Analysis-class] or [list] of [RLum.Analysis-class] objects (**required**): #' input object containing data for analysis. #' If a [list] is provided the functions tries to iterate over the list. #' #' @param signal.integral.min [integer] (**required**): #' lower bound of the signal integral. Provide this value as vector for different #' integration limits for the different IRSL curves. #' #' @param signal.integral.max [integer] (**required**): #' upper bound of the signal integral. Provide this value as vector for different #' integration limits for the different IRSL curves. #' #' @param background.integral.min [integer] (**required**): #' lower bound of the background integral. Provide this value as vector for #' different integration limits for the different IRSL curves. #' #' @param background.integral.max [integer] (**required**): #' upper bound of the background integral. Provide this value as vector for #' different integration limits for the different IRSL curves. #' #' @param dose.points [numeric] (*optional*): #' a numeric vector containing the dose points values. Using this argument overwrites dose point #' values in the signal curves. #' #' @param sequence.structure [vector] [character] (*with default*): #' specifies the general sequence structure. Allowed values are `"TL"` and #' any `"IR"` combination (e.g., `"IR50"`,`"pIRIR225"`). #' Additionally a parameter `"EXCLUDE"` is allowed to exclude curves from #' the analysis (Note: If a preheat without PMT measurement is used, i.e. #' preheat as none TL, remove the TL step.) #' #' @param plot [logical] (*with default*): #' enables or disables plot output. #' #' @param plot.single [logical] (*with default*): #' single plot output (`TRUE/FALSE`) to allow for plotting the results in single plot #' windows. Requires `plot = TRUE`. #' #' @param ... further arguments that will be passed to the function #' [analyse_SAR.CWOSL] and [plot_GrowthCurve]. Furthermore, the arguments `main` (headers), `log` (IRSL curves), `cex` (control #' the size) and `mtext.outer` (additional text on the plot area) can be passed to influence the plotting. If the input #' is list, `main` can be passed as [vector] or [list]. #' #' @return #' Plots (*optional*) and an [RLum.Results-class] object is #' returned containing the following elements: #' #' \tabular{lll}{ #' **DATA.OBJECT** \tab **TYPE** \tab **DESCRIPTION** \cr #' `..$data` : \tab `data.frame` \tab Table with De values \cr #' `..$LnLxTnTx.table` : \tab `data.frame` \tab with the `LnLxTnTx` values \cr #' `..$rejection.criteria` : \tab [data.frame] \tab rejection criteria \cr #' `..$Formula` : \tab [list] \tab Function used for fitting of the dose response curve \cr #' `..$call` : \tab [call] \tab the original function call #' } #' #' The output should be accessed using the function [get_RLum]. #' #' @note #' Best graphical output can be achieved by using the function `pdf` #' with the following options: #' #' `pdf(file = "", height = 15, width = 15)` #' #' @section Function version: 0.2.4 #' #' @author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [analyse_SAR.CWOSL], [calc_OSLLxTxRatio], [plot_GrowthCurve], #' [RLum.Analysis-class], [RLum.Results-class] [get_RLum] #' #' @references #' Murray, A.S., Wintle, A.G., 2000. Luminescence dating of quartz #' using an improved single-aliquot regenerative-dose protocol. Radiation #' Measurements 32, 57-73. \doi{10.1016/S1350-4487(99)00253-X} #' #' Thomsen, K.J., Murray, A.S., Jain, M., Boetter-Jensen, L., 2008. Laboratory #' fading rates of various luminescence signals from feldspar-rich sediment #' extracts. Radiation Measurements 43, 1474-1486. #' \doi{10.1016/j.radmeas.2008.06.002} #' #' @keywords datagen plot #' #' @examples #' #' #' ### NOTE: For this example existing example data are used. These data are non pIRIR data. #' ### #' ##(1) Compile example data set based on existing example data (SAR quartz measurement) #' ##(a) Load example data #' data(ExampleData.BINfileData, envir = environment()) #' #' ##(b) Transform the values from the first position in a RLum.Analysis object #' object <- Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos=1) #' #' ##(c) Grep curves and exclude the last two (one TL and one IRSL) #' object <- get_RLum(object, record.id = c(-29,-30)) #' #' ##(d) Define new sequence structure and set new RLum.Analysis object #' sequence.structure <- c(1,2,2,3,4,4) #' sequence.structure <- as.vector(sapply(seq(0,length(object)-1,by = 4), #' function(x){sequence.structure + x})) #' #' object <- sapply(1:length(sequence.structure), function(x){ #' #' object[[sequence.structure[x]]] #' #' }) #' #' object <- set_RLum(class = "RLum.Analysis", records = object, protocol = "pIRIR") #' #' ##(2) Perform pIRIR analysis (for this example with quartz OSL data!) #' ## Note: output as single plots to avoid problems with this example #' results <- analyse_pIRIRSequence(object, #' signal.integral.min = 1, #' signal.integral.max = 2, #' background.integral.min = 900, #' background.integral.max = 1000, #' fit.method = "EXP", #' sequence.structure = c("TL", "pseudoIRSL1", "pseudoIRSL2"), #' main = "Pseudo pIRIR data set based on quartz OSL", #' plot.single = TRUE) #' #' #' ##(3) Perform pIRIR analysis (for this example with quartz OSL data!) #' ## Alternative for PDF output, uncomment and complete for usage #' \dontrun{ #' tempfile <- tempfile(fileext = ".pdf") #' pdf(file = tempfile, height = 15, width = 15) #' results <- analyse_pIRIRSequence(object, #' signal.integral.min = 1, #' signal.integral.max = 2, #' background.integral.min = 900, #' background.integral.max = 1000, #' fit.method = "EXP", #' main = "Pseudo pIRIR data set based on quartz OSL") #' #' dev.off() #' } #' #' @md #' @export analyse_pIRIRSequence <- function( object, signal.integral.min, signal.integral.max, background.integral.min, background.integral.max, dose.points = NULL, sequence.structure = c("TL", "IR50", "pIRIR225"), plot = TRUE, plot.single = FALSE, ... ){ # SELF CALL ----------------------------------------------------------------------------------- if(is.list(object)){ ##make live easy if(missing("signal.integral.min")){ signal.integral.min <- 1 warning("[analyse_pIRIRSequence()] 'signal.integral.min' missing, set to 1", call. = FALSE) } if(missing("signal.integral.max")){ signal.integral.max <- 2 warning("[analyse_pIRIRSequence()] 'signal.integral.max' missing, set to 2", call. = FALSE) } ##now we have to extend everything to allow list of arguments ... this is just consequent signal.integral.min <- rep(list(signal.integral.min), length = length(object)) signal.integral.max <- rep(list(signal.integral.max), length = length(object)) background.integral.min <- rep(list(background.integral.min), length = length(object)) background.integral.max <- rep(list(background.integral.max), length = length(object)) sequence.structure <- rep(list(sequence.structure), length = length(object)) if(!is.null(dose.points)){ if(is(dose.points, "list")){ dose.points <- rep(dose.points, length = length(object)) }else{ dose.points <- rep(list(dose.points), length = length(object)) } }else{ dose.points <- rep(list(NULL), length(object)) } ##main if("main" %in% names(list(...))){ main_list <- rep(list(...)$main, length.out = length(object)) if(!inherits(main_list, "list")){ main_list <- as.list(main_list) } } ##run analysis temp <- lapply(1:length(object), function(x){ analyse_pIRIRSequence(object[[x]], signal.integral.min = signal.integral.min[[x]], signal.integral.max = signal.integral.max[[x]], background.integral.min = background.integral.min[[x]], background.integral.max = background.integral.max[[x]] , dose.points = dose.points[[x]], sequence.structure = sequence.structure[[x]], plot = plot, plot.single = plot.single, main = ifelse("main"%in% names(list(...)), main_list[[x]], paste0("ALQ #",x)), ...) }) ##combine everything to one RLum.Results object as this as what was written ... only ##one object ##merge results and check if the output became NULL results <- merge_RLum(temp) ##DO NOT use invisible here, this will stop the function from stopping if(length(results) == 0){ return(NULL) }else{ return(results) } } # General Integrity Checks --------------------------------------------------- ##GENERAL ##MISSING INPUT if(missing("object")==TRUE){ stop("[analyse_pIRIRSequence()] No value set for 'object'!") } ##INPUT OBJECTS if(is(object, "RLum.Analysis")==FALSE){ stop("[analyse_pIRIRSequence()] Input object is not of type 'RLum.Analyis'!") } ##CHECK ALLOWED VALUES IN SEQUENCE STRUCTURE temp.collect.invalid.terms <- paste(sequence.structure[ (!grepl("TL",sequence.structure)) & (!grepl("IR",sequence.structure)) & (!grepl("OSL",sequence.structure)) & (!grepl("EXCLUDE",sequence.structure))], collapse = ", ") if(temp.collect.invalid.terms != ""){ stop("[analyse_pIRIRSequence()] ", temp.collect.invalid.terms, " not allowed in 'sequence.structure'!") } # Deal with extra arguments ------------------------------------------------------------------- ##deal with addition arguments extraArgs <- list(...) mtext.outer <- if("mtext.outer" %in% names(extraArgs)) {extraArgs$mtext.outer} else {"MEASUREMENT INFO"} main <- if("main" %in% names(extraArgs)) {extraArgs$main} else {""} log <- if("log" %in% names(extraArgs)) {extraArgs$log} else {""} cex <- if("cex" %in% names(extraArgs)) {extraArgs$cex} else {.7} # Protocol Integrity Checks -------------------------------------------------- ##(1) Check structure and remove curves that fit not the recordType criteria ##get sequence structure temp.sequence.structure <- structure_RLum(object) ##remove data types that fit not to the allowed values temp.sequence.rm.id <- temp.sequence.structure[ (!grepl("TL",temp.sequence.structure[["recordType"]])) & (!grepl("OSL", temp.sequence.structure[["recordType"]])) & (!grepl("IRSL", temp.sequence.structure[["recordType"]])) ,"id"] if(length(temp.sequence.rm.id)>0){ ##removed record from data set object <- get_RLum(object, record.id = -temp.sequence.rm.id, drop = FALSE ) ##compile warning message temp.sequence.rm.warning <- paste( temp.sequence.structure[temp.sequence.rm.id, "recordType"], collapse = ", ") temp.sequence.rm.warning <- paste( "Record types are unrecognised and have been removed:", temp.sequence.rm.warning) warning(temp.sequence.rm.warning, call. = FALSE) } ##(2) Apply user sequence structure ##get sequence structure temp.sequence.structure <- structure_RLum(object) ##try to account for a very common mistake if(any(grepl(sequence.structure, pattern = "TL", fixed = TRUE)) && !any(grepl(temp.sequence.structure[["recordType"]], pattern = "TL", fixed = TRUE))){ warning("[analyse_pIRIRSequence()] Your sequence does not contain 'TL' curves, trying to adapt 'sequence.structure' for you ...", call. = FALSE, immediate. = TRUE) sequence.structure <- sequence.structure[!grepl(sequence.structure, pattern = "TL", fixed = TRUE)] } ##set values to structure data.frame ##but check first if(2 * length( rep(sequence.structure, nrow(temp.sequence.structure)/2/length(sequence.structure))) == length(temp.sequence.structure[["protocol.step"]])){ temp.sequence.structure[["protocol.step"]] <- rep( sequence.structure, nrow(temp.sequence.structure)/2/length(sequence.structure)) }else{ try(stop("[analyse_pIRIRSequence()] Number of records is not a multiple of the defined sequence structure! NULL returned!", call. = FALSE)) return(NULL) } ##remove values that have been excluded temp.sequence.rm.id <- temp.sequence.structure[ temp.sequence.structure[,"protocol.step"] == "EXCLUDE" ,"id"] if(length(temp.sequence.rm.id)>0){ ##remove from object object <- get_RLum( object, record.id = -temp.sequence.rm.id, drop = FALSE) ##remove from sequence structure sequence.structure <- sequence.structure[sequence.structure != "EXCLUDE"] ##set new structure temp.sequence.structure <- structure_RLum(object) temp.sequence.structure[, "protocol.step"] <- rep( sequence.structure, nrow(temp.sequence.structure)/2/length(temp.sequence.structure)) ##print warning message warning("[analyse_pIRIRSequence()] ", length(temp.sequence.rm.id), " records have been removed due to EXCLUDE!", call. = FALSE) } ##============================================================================## # Analyse data and plotting ---------------------------------------------------- ##============================================================================## ##(1) find out how many runs are needed for the analysis by checking for "IR" ## now should by every signal except the TL curves n.TL<- table(grepl("TL", sequence.structure))["TRUE"] if(is.na(n.TL)) {n.TL<- 0} n.loops <- as.numeric(length(grepl("TL", sequence.structure)) - n.TL) ##grep ids of TL curves (we need them later on) TL.curves.id <- temp.sequence.structure[ temp.sequence.structure[,"protocol.step"] == "TL","id"] ##grep ids of all OSL curves (we need them later on) IRSL.curves.id <- temp.sequence.structure[ grepl("IR", temp.sequence.structure[,"protocol.step"]),"id"] ##grep information on the names of the IR curves, we need them later on pIRIR.curve.names <- unique(temp.sequence.structure[ temp.sequence.structure[IRSL.curves.id,"id"],"protocol.step"]) ##===========================================================================# ## set graphic layout using the layout option ## unfortunately a little bit more complicated then expected previously due ## the order of the produced plots by the previous functions if(plot.single == FALSE & plot == TRUE){ ##first (Tx,Tn, Lx,Ln) temp.IRSL.layout.vector.first <- c(3,5,6,7,3,5,6,8) ##middle (any other Lx,Ln) if(n.loops > 2){ temp.IRSL.layout.vector.middle <- vapply( 2:(n.loops - 1), FUN = function(x) { offset <- 5 * x - 1 c((offset):(offset + 3), (offset):(offset + 2), offset + 4) }, FUN.VALUE = vector(mode = "numeric", length = 8) ) } ##last (Lx,Ln and legend) temp.IRSL.layout.vector.last <- c( ifelse(n.loops > 2,max(temp.IRSL.layout.vector.middle) + 1, max(temp.IRSL.layout.vector.first) + 1), ifelse(n.loops > 2,max(temp.IRSL.layout.vector.middle) + 2, max(temp.IRSL.layout.vector.first) + 2), ifelse(n.loops > 2,max(temp.IRSL.layout.vector.middle) + 4, max(temp.IRSL.layout.vector.first) + 4), ifelse(n.loops > 2,max(temp.IRSL.layout.vector.middle) + 5, max(temp.IRSL.layout.vector.first) + 5), ifelse(n.loops > 2,max(temp.IRSL.layout.vector.middle) + 1, max(temp.IRSL.layout.vector.first) + 1), ifelse(n.loops > 2,max(temp.IRSL.layout.vector.middle) + 2, max(temp.IRSL.layout.vector.first) + 2), ifelse(n.loops > 2,max(temp.IRSL.layout.vector.middle) + 4, max(temp.IRSL.layout.vector.first) + 4), ifelse(n.loops > 2,max(temp.IRSL.layout.vector.middle) + 6, max(temp.IRSL.layout.vector.first) + 6)) ##options for different sets of curves if(n.loops > 2){ temp.IRSL.layout.vector <- c(temp.IRSL.layout.vector.first, temp.IRSL.layout.vector.middle, temp.IRSL.layout.vector.last) }else{ temp.IRSL.layout.vector <- c(temp.IRSL.layout.vector.first, temp.IRSL.layout.vector.last) } ##get layout information def.par <- par(no.readonly = TRUE) ##set up layout matrix linked to the number of plot areas needed layout.matrix <- c( rep(c(2,4,1,1),2), #header row with TL curves and info window temp.IRSL.layout.vector, #IRSL curves, rep((max(temp.IRSL.layout.vector)-3),8), #legend, rep((max(temp.IRSL.layout.vector)+1),1), #GC rep((max(temp.IRSL.layout.vector)+2),1), #TnTc rep((max(temp.IRSL.layout.vector)+3),2), #Rejection criteria rep((max(temp.IRSL.layout.vector)+1),1), #GC rep((max(temp.IRSL.layout.vector)+2),1), #TnTc rep((max(temp.IRSL.layout.vector)+3),2)) #Rejection criteria ##set layout nf <- layout( matrix(layout.matrix,(max(layout.matrix)/2 + ifelse(n.loops > 2, 0,2)), 4, byrow = TRUE), widths = c(rep(c(1,1,1,.75),6),c(1,1,1,1)), heights = c(rep(c(1),(2+2*n.loops)),c(0.20, 0.20))) ## show the regions that have been allocated to each plot for debug #layout.show(nf) } ##(1) INFO PLOT if (plot) { plot(NA,NA, ylim = c(0,1), xlab = "", xlim = c(0,1), ylab = "", axes = FALSE, main = main) text(0.5,0.5, paste(sequence.structure, collapse = "\n"), cex = cex *2) } ##(2) set loop for(i in 1:n.loops){ ##compile record ids temp.id.sel <- sort(c(TL.curves.id, IRSL.curves.id[seq(i,length(IRSL.curves.id),by=n.loops)])) ##(a) select data set (TL curves has to be considered for the data set) temp.curves <- get_RLum(object, record.id = temp.id.sel, drop = FALSE) ##(b) grep integral limits as they might be different for different curves if(length(signal.integral.min)>1){ temp.signal.integral.min <- signal.integral.min[i] temp.signal.integral.max <- signal.integral.max[i] temp.background.integral.min <- background.integral.min[i] temp.backbround.integral.max <- background.integral.max[i] }else{ temp.signal.integral.min <- signal.integral.min temp.signal.integral.max <- signal.integral.max temp.background.integral.min <- background.integral.min temp.background.integral.max <- background.integral.max } ##(c) call analysis sequence and plot ## call single plots if(i == 1){ temp.plot.single <- c(1,2,3,4,6) }else if(i == n.loops){ temp.plot.single <- c(2,4,5,6) }else{ temp.plot.single <- c(2,4,6) } ##start analysis temp.results <- analyse_SAR.CWOSL( temp.curves, signal.integral.min = temp.signal.integral.min, signal.integral.max = temp.signal.integral.max, background.integral.min = temp.background.integral.min, background.integral.max = temp.background.integral.max, plot = plot, dose.points = dose.points, plot.single = temp.plot.single, output.plotExtended.single = TRUE, cex.global = cex, ... ) ##TODO should be replaced be useful explicit arguments ##check whether NULL was return if (is.null(temp.results)) { try(stop("[plot_pIRIRSequence()] An error occurred, analysis skipped. Check your sequence!", call. = FALSE)) return(NULL) } ##add signal information to the protocol step temp.results.pIRIR.De <- as.data.frame(c( get_RLum(temp.results, "data"), data.frame(Signal = pIRIR.curve.names[i]) )) temp.results.pIRIR.LnLxTnTx <- as.data.frame(c( get_RLum(temp.results, "LnLxTnTx.table"), data.frame(Signal = pIRIR.curve.names[i]) )) temp.results.pIRIR.rejection.criteria <- as.data.frame(c( get_RLum(temp.results, "rejection.criteria"), data.frame(Signal = pIRIR.curve.names[i]) )) temp.results.pIRIR.formula <- list(get_RLum(temp.results, "Formula")) names(temp.results.pIRIR.formula) <- pIRIR.curve.names[i] ##create now object temp.results <- set_RLum( class = "RLum.Results", data = list( data = temp.results.pIRIR.De, LnLxTnTx.table = temp.results.pIRIR.LnLxTnTx, rejection.criteria = temp.results.pIRIR.rejection.criteria, Formula = temp.results.pIRIR.formula ), info = list( call = sys.call() ) ) ##merge results if (exists("temp.results.final")) { temp.results.final <- merge_RLum(list(temp.results.final, temp.results)) } else{ temp.results.final <- temp.results } } ##============================================================================## # Plotting additionals-------------------------------------------------------- ##============================================================================## if(plot){ ##extract LnLnxTnTx.table LnLxTnTx.table <- get_RLum(temp.results.final, "LnLxTnTx.table") ##remove Inf if(any(is.infinite(LnLxTnTx.table[["LxTx"]]))) LnLxTnTx.table[["LxTx"]][is.infinite(LnLxTnTx.table[["LxTx"]])] <- NA if(any(is.infinite(LnLxTnTx.table[["LxTx.Error"]]))) LnLxTnTx.table[["LxTx.Error"]][is.infinite(LnLxTnTx.table[["LxTx.Error"]])] <- NA ##plot growth curves plot(NA, NA, xlim = range(get_RLum(temp.results.final, "LnLxTnTx.table")$Dose), ylim = c( if(min(LnLxTnTx.table$LxTx, na.rm = TRUE) - max(LnLxTnTx.table$LxTx.Error, na.rm = TRUE) < 0){ min(LnLxTnTx.table$LxTx, na.rm = TRUE)- max(LnLxTnTx.table$LxTx.Error, na.rm = TRUE) }else{0}, max(LnLxTnTx.table$LxTx, na.rm = TRUE)+ max(LnLxTnTx.table$LxTx.Error, na.rm = TRUE)), xlab = "Dose [s]", ylab = expression(L[x]/T[x]), main = "Summarised Dose Response Curves") ##set x for expression evaluation x <- seq(0,max(LnLxTnTx.table$Dose)*1.05,length = 100) for(j in 1:length(pIRIR.curve.names)){ ##dose points temp.curve.points <- LnLxTnTx.table[,c("Dose", "LxTx", "LxTx.Error", "Signal")] temp.curve.points <- temp.curve.points[ temp.curve.points[,"Signal"] == pIRIR.curve.names[j], c("Dose", "LxTx", "LxTx.Error")] points(temp.curve.points[-1,c("Dose", "LxTx")], col = j, pch = j) segments(x0 = temp.curve.points[-1,c("Dose")], y0 = temp.curve.points[-1,c("LxTx")] - temp.curve.points[-1,c("LxTx.Error")], x1 = temp.curve.points[-1,c("Dose")], y1 = temp.curve.points[-1,c("LxTx")] + temp.curve.points[-1,c("LxTx.Error")], col = j) ##De values lines(c(0, get_RLum(temp.results.final, "data")[j,1]), c(temp.curve.points[1,c("LxTx")], temp.curve.points[1,c("LxTx")]), col = j, lty = 2) lines(c(rep(get_RLum(temp.results.final, "data")[j,1], 2)), c(temp.curve.points[1,c("LxTx")], 0), col = j, lty = 2) ##curve temp.curve.formula <- get_RLum( temp.results.final, "Formula")[[pIRIR.curve.names[j]]] try(lines(x, eval(temp.curve.formula), col = j), silent = TRUE) } rm(x) ##plot legend legend("bottomright", legend = pIRIR.curve.names, lty = 1, col = c(1:length(pIRIR.curve.names)), bty = "n", pch = c(1:length(pIRIR.curve.names)) ) ##plot Tn/Tx curves ##select signal temp.curve.TnTx <- LnLxTnTx.table[, c("TnTx", "Signal")] temp.curve.TnTx.matrix <- matrix(NA, nrow = nrow(temp.curve.TnTx)/ length(pIRIR.curve.names), ncol = length(pIRIR.curve.names)) ##calculate normalised values for(j in 1:length(pIRIR.curve.names)){ temp.curve.TnTx.sel <- temp.curve.TnTx[ temp.curve.TnTx[,"Signal"] == pIRIR.curve.names[j] , "TnTx"] temp.curve.TnTx.matrix[,j] <- temp.curve.TnTx.sel/temp.curve.TnTx.sel[1] } plot(NA, NA, xlim = c(0,nrow(LnLxTnTx.table)/ n.loops), ylim = range(temp.curve.TnTx.matrix), xlab = "# Cycle", ylab = expression(T[x]/T[n]), main = "Sensitivity change") ##zero line abline(h = 1:nrow(temp.curve.TnTx.matrix), col = "gray") for(j in 1:length(pIRIR.curve.names)){ lines(1:nrow(temp.curve.TnTx.matrix), temp.curve.TnTx.matrix[,j], type = "b", col = j, pch = j) } ##plot legend legend("bottomleft", legend = pIRIR.curve.names, lty = 1, col = c(1:length(pIRIR.curve.names)), bty = "n", pch = c(1:length(pIRIR.curve.names)) ) ##Rejection criteria temp.rejection.criteria <- get_RLum(temp.results.final, data.object = "rejection.criteria") temp.rc.reycling.ratio <- temp.rejection.criteria[ grep("Recycling ratio",temp.rejection.criteria[,"Criteria"]),] temp.rc.recuperation.rate <- temp.rejection.criteria[ grep("Recuperation rate",temp.rejection.criteria[,"Criteria"]),] temp.rc.palaedose.error <- temp.rejection.criteria[ grep("Palaeodose error",temp.rejection.criteria[,"Criteria"]),] plot(NA,NA, xlim = c(-0.5,0.5), ylim = c(0,30), yaxt = "n", ylab = "", xaxt = "n", xlab = "", bty = "n", main = "Rejection criteria") axis(side = 1, at = c(-0.2,-0.1,0,0.1,0.2), labels = c("- 0.2", "- 0.1","0/1","+ 0.1", "+ 0.2")) ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++## ##polygon for recycling ratio text(x = -.4, y = 30, "Recycling ratio", pos = 1, srt = 0) polygon(x = c(-as.numeric(as.character(temp.rc.reycling.ratio$Threshold))[1], -as.numeric(as.character(temp.rc.reycling.ratio$Threshold))[1], as.numeric(as.character(temp.rc.reycling.ratio$Threshold))[1], as.numeric(as.character(temp.rc.reycling.ratio$Threshold))[1]), y = c(21,29,29,21), col = "gray", border = NA) polygon(x = c(-0.3,-0.3,0.3,0.3) , y = c(21,29,29,21)) ##consider possibility of multiple pIRIR signals and multiple recycling ratios col.id <- 1 ##the conditional case might valid if no rejection criteria could be calculated if(nrow(temp.rc.recuperation.rate)>0){ for(i in seq(1,nrow(temp.rc.recuperation.rate), length(unique(temp.rc.recuperation.rate[,"Criteria"])))){ for(j in 0:length(unique(temp.rc.recuperation.rate[,"Criteria"]))){ points(temp.rc.reycling.ratio[i+j, "Value"]-1, y = 25, pch = col.id, col = col.id) } col.id <- col.id + 1 } }#endif rm(col.id) ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++## ##polygon for recuperation rate text(x = -.4, y = 20, "Recuperation rate", pos = 1, srt = 0) if(length(as.character(temp.rc.recuperation.rate$Threshold))>0){ polygon(x = c(0, 0, as.numeric(as.character(temp.rc.recuperation.rate$Threshold))[1], as.numeric(as.character(temp.rc.recuperation.rate$Threshold))[1]), y = c(11,19,19,11), col = "gray", border = NA) polygon(x = c(-0.3,-0.3,0.3,0.3) , y = c(11,19,19,11)) polygon(x = c(-0.3,-0.3,0,0) , y = c(11,19,19,11), border = NA, density = 10, angle = 45) for(i in 1:nrow(temp.rc.recuperation.rate)){ points(temp.rc.palaedose.error[i, "Value"], y = 15, pch = i, col = i) } }#endif ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++## ##polygon for palaeodose error text(x = -.4, y = 10, "Palaeodose error", pos = 1, srt = 0) polygon(x = c(0, 0, as.numeric(as.character(temp.rc.palaedose.error$Threshold))[1], as.numeric(as.character(temp.rc.palaedose.error$Threshold))[1]), y = c(1,9,9,1), col = "gray", border = NA) polygon(x = c(-0.3,-0.3,0.3,0.3) , y = c(1,9,9,1)) polygon(x = c(-0.3,-0.3,0,0) , y = c(1,9,9,1), border = NA, density = 10, angle = 45) for(i in 1:nrow(temp.rc.palaedose.error)){ points(temp.rc.palaedose.error[i, "Value"], y = 5, pch = i, col = i) } ##add 0 value lines(x = c(0,0), y = c(0,19), lwd = 1.5*cex) lines(x = c(0,0), y = c(20,29), lwd = 1.5*cex) ##plot legend legend("bottomright", legend = pIRIR.curve.names, col = c(1:length(pIRIR.curve.names)), bty = "n", pch = c(1:length(pIRIR.curve.names))) ##reset graphic settings if(plot.single == FALSE){par(def.par)} }##end plot == TRUE ##============================================================================## # Return Values ----------------------------------------------------------- ##============================================================================## return(temp.results.final) } Luminescence/R/Luminescence-package.R0000644000176200001440000011444614521207343017226 0ustar liggesusers#' @title Comprehensive Luminescence Dating Data Analysis\cr #' #' #' \if{html}{ #' \figure{Luminescence_logo.png}{options: width="75" alt="r-luminescence.org"} #' } #' #' @description A collection of various R functions for the purpose of luminescence dating #' data analysis. This includes, amongst others, data import, export, #' application of age models, curve deconvolution, sequence analysis and #' plotting of equivalent dose distributions. #' #' @name Luminescence-package #' #' @aliases Luminescence-package Luminescence #' #' @docType package #' #' @details #' #' **Supervisor of the initial version in 2012** #' #' Markus Fuchs, Justus-Liebig-University Giessen, Germany #' #' **Support contact** #' #' - \email{developers@@r-luminescence.org} #' - [https://github.com/R-Lum/Luminescence/discussions]() #' #' **Bug reporting** #' #' - \email{developers@@r-luminescence.org} or #' - [https://github.com/R-Lum/Luminescence/issues]() #' #' **Project website** #' #' - [https://r-luminescence.org]() #' #' **Project source code repository** #' #' - [https://github.com/R-Lum/Luminescence]() #' #' **Related package projects** #' #' - [https://cran.r-project.org/package=RLumShiny]() #' - [https://cran.r-project.org/package=RLumModel]() #' - [https://cran.r-project.org/package=RLumCarlo]() #' #' **Package maintainer** #' #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany),\cr #' \email{sebastian.kreutzer@@aber.ac.uk} #' #' **Funding** #' #' 2011-2013: The initial version of the package was developed, while Sebastian Kreutzer #' was funded through the DFG programme "Rekonstruktion der Umweltbedingungen #' des Spätpleistozäns in Mittelsachsen anhand von Löss-Paläobodensequenzen" #' (DFG id: 46526743) #' #' 2014-2018: Cooperation and personal exchange between the developers is gratefully #' funded by the DFG (SCHM 3051/3-1) in the framework of the program #' "Scientific Networks". Project title: "RLum.Network: Ein #' Wissenschaftsnetzwerk zur Analyse von Lumineszenzdaten mit R" (2014-2018) #' #' 05/2014-12/2019: The work of Sebastian Kreutzer as maintainer of the package was supported #' by LabEx LaScArBx (ANR - n. ANR-10-LABX-52). #' #' 01/2020-04/2022: Sebastian Kreutzer as maintainer of the package has received funding #' from the European Union’s Horizon 2020 research and innovation programme under #' the Marie Skłodowska-Curie grant agreement No 844457 (CREDit), and could continue #' maintaining the package. #' #' @references #' Dietze, M., Kreutzer, S., Fuchs, M.C., Burow, C., Fischer, M., #' Schmidt, C., 2013. A practical guide to the R package Luminescence. #' Ancient TL, 31 (1), 11-18. #' #' Dietze, M., Kreutzer, S., Burow, C., Fuchs, M.C., Fischer, M., Schmidt, C., 2016. The abanico plot: #' visualising chronometric data with individual standard errors. Quaternary Geochronology 31, 1-7. #' https://doi.org/10.1016/j.quageo.2015.09.003 #' #' Fuchs, M.C., Kreutzer, S., Burow, C., Dietze, M., Fischer, M., Schmidt, C., #' Fuchs, M., 2015. Data processing in luminescence dating analysis: An #' exemplary workflow using the R package 'Luminescence'. Quaternary #' International, 362,8-13. https://doi.org/10.1016/j.quaint.2014.06.034 #' #' Kreutzer, S., Schmidt, C., Fuchs, M.C., Dietze, M., Fischer, M., Fuchs, M., #' 2012. Introducing an R package for luminescence dating analysis. Ancient TL, #' 30 (1), 1-8. #' #' Mercier, N., Kreutzer, S., Christophe, C., Guérin, G., Guibert, P., Lahaye, C., Lanos, P., Philippe, A., #' Tribolo, C., 2016. Bayesian statistics in luminescence dating: The 'baSAR'-model and its #' implementation in the R package ’Luminescence’. Ancient TL 34 (2), 14-21. #' #' Mercier, N., Galharret, J.-M., Tribolo, C., Kreutzer, S., Philippe, A., 2022. #' Luminescence age calculation through Bayesian convolution of equivalent dose #' and dose-rate distributions: the De_Dr model. #' Geochronology 4, 297–310. https://doi.org/10.5194/gchron-4-297-2022 #' #' Smedley, R.K., 2015. A new R function for the Internal External Uncertainty (IEU) model. #' Ancient TL, 33 (1), 16-21. #' #' King, E.G., Burow, C., Roberts, H., Pearce, N.J.G., 2018. Age determination #' using feldspar: evaluating fading-correction model performance. Radiation Measurements 119, 58-73. #' https://doi.org/10.1016/j.radmeas.2018.07.013 #' #' @keywords package #' #' @import utils methods data.table #' #' @importFrom Rcpp evalCpp #' #' @importFrom graphics plot plot.default frame abline mtext text lines par layout lines arrows axTicks axis barplot box boxplot contour curve grconvertX grconvertY hist legend persp points polygon rug segments title grid close.screen screen split.screen #' @importFrom grDevices adjustcolor axisTicks colorRampPalette gray.colors rgb topo.colors xy.coords dev.off #' @importFrom stats formula approx as.formula complete.cases density dnorm glm lm median na.exclude na.omit nls nls.control pchisq pnorm quantile rnorm runif sd smooth smooth.spline spline t.test uniroot var weighted.mean setNames coef confint predict update residuals fitted qf #' @importFrom parallel parLapply makeCluster stopCluster #' @importFrom httr GET accept_json status_code content #' #' @useDynLib Luminescence, .registration = TRUE #' #' @md NULL #' Base data set of dose-rate conversion factors #' #' Collection of published dose-rate conversion factors to convert concentrations #' of radioactive isotopes to dose rate values. #' #' @format #' #' A [`list`] with three elements with dose-rate conversion factors #' sorted by article and radiation type (alpha, beta, gamma): #' #' \tabular{ll}{ #' #' `AdamiecAitken1998`: \tab #' Conversion factors from Tables 5 and 6 \cr #' #' `Cresswelletal2018`: \tab #' Conversion factors from Tables 5 and 6 \cr #' #' `Guerinetal2011`: \tab #' Conversion factors from Tables 1, 2 and 3 \cr #' #' `Liritzisetal2013`: \tab #' Conversion factors from Tables 1, 2 and 3 \cr #' } #' #' @section Version: 0.2.0 #' #' @references #' #' Adamiec, G., Aitken, M.J., 1998. Dose-rate conversion factors: update. #' Ancient TL 16, 37-46. #' #' Cresswell., A.J., Carter, J., Sanderson, D.C.W., 2018. #' Dose rate conversion parameters: Assessment of nuclear data. #' Radiation Measurements 120, 195-201. #' #' Guerin, G., Mercier, N., Adamiec, G., 2011. Dose-rate conversion #' factors: update. Ancient TL, 29, 5-8. #' #' Liritzis, I., Stamoulis, K., Papachristodoulou, C., Ioannides, K., 2013. #' A re-evaluation of radiation dose-rate conversion factors. Mediterranean #' Archaeology and Archaeometry 13, 1-15. #' #' #' @source #' All gamma conversion factors were carefully read from the tables given in the #' references above. #' #' @keywords datasets #' #' @examples #' #' ## Load data #' data("BaseDataSet.ConversionFactors", envir = environment()) #' #' @name BaseDataSet.ConversionFactors #' @md NULL #' @title Base dataset for grain size attenuation data by Guérin et al. (2012) #' #' @description Grain size correction data for beta-dose rates #' published by Guérin et al. (2012). #' #'#' @format #' #' A [`data.frame`] seven columns and sixteen rows. Column headers #' are `GrainSize`, `Q_K`, `FS_K`, `Q_Th`, `FS_Th`, `Q_U`, `FS_U`. #' Grain sizes are quoted in µm (e.g., 20, 40, 60 etc.) #' #' @section Version: 0.1.0 #' #' @source Guérin, G., Mercier, N., Nathan, R., Adamiec, G., Lefrais, Y., 2012. #' On the use of the infinite matrix assumption and associated concepts: #' A critical review. Radiation Measurements, 47, 778-785. #' #' @keywords datasets #' #' @examples #' #' ## load data #' data("BaseDataSet.GrainSizeAttenuation", envir = environment()) #' #' @name BaseDataSet.GrainSizeAttenuation #' @md NULL #' Base data set of fractional gamma-dose values #' #' Collection of (un-)published fractional gamma dose-rate values to scale the #' gamma-dose rate considering layer-to-layer variations in soil radioactivity. #' #' @format #' #' A [`list`] with fractional gamma dose-rate values #' sorted by article: #' #' \tabular{ll}{ #' #' `Aitken1985`: \tab #' Fractional gamma-dose values from table H.1 #' } #' #' #' @section Version: 0.1 #' #' @references #' Aitken, M.J., 1985. Thermoluminescence Dating. Academic Press, London. #' #' @source #' Fractional gamma dose values were carefully read from the tables given in the #' references above. #' #' @keywords datasets #' #' @examples #' #' ## Load data #' data("BaseDataSet.FractionalGammaDose", envir = environment()) #' #' @name BaseDataSet.FractionalGammaDose #' @md NULL #' Example data for scale_GammaDose() #' #' An example data set for the function `scale_GammaDose()` containing layer #' specific information to scale the gamma dose rate considering variations in #' soil radioactivity. #' #' @format #' #' A [`data.frame`]. Please see `?scale_GammaDose()` for a detailed description #' of its structure. #' #' #' @section Version: 0.1 #' #' @keywords datasets #' #' @examples #' #' ## Load data #' data("ExampleData.ScaleGammaDose", envir = environment()) #' #' @name ExampleData.ScaleGammaDose #' @md NULL #' Example data for calc_CobbleDoseRate() #' #' An example data set for the function [calc_CobbleDoseRate] containing layer #' specific information for the cobble to be used in the function. #' #' @format #' #' A [`data.frame`]. Please see [calc_CobbleDoseRate] for detailed information #' on the structure of the [data.frame]. #' #' @section Version: 0.1.0 #' #' @keywords datasets #' #' @examples #' #' ## Load data #' data("ExampleData.CobbleData", envir = environment()) #' #' @name ExampleData.CobbleData #' @md NULL #' Base data set for cosmic dose rate calculation #' #' Collection of data from various sources needed for cosmic dose rate #' calculation #' #' #' @format #' #' \tabular{ll}{ #' #' `values.cosmic.Softcomp`: \tab #' data frame containing cosmic dose rates #' for shallow depths (< 167 g cm^-2) obtained using the "AGE" program by #' Rainer Gruen (cf. Gruen 2009). These data essentially reproduce the graph #' shown in Fig. 1 of Prescott & Hutton (1988). \cr #' #' `values.factor.Altitude`: \tab #' data frame containing altitude factors #' for adjusting geomagnetic field-change factors. Values were read from Fig. 1 #' in Prescott & Hutton (1994). \cr #' #' `values.par.FJH`: \tab #' data frame containing values for parameters F, J #' and H (read from Fig. 2 in Prescott & Hutton 1994) used in the expression \cr #' } #' #' \deqn{Dc = D0*(F+J*exp((altitude/1000)/H))} #' #' @section Version: 0.1 #' #' @references #' Gruen, R., 2009. The "AGE" program for the calculation of luminescence age estimates. #' Ancient TL, 27, pp. 45-46. #' #' Prescott, J.R., Hutton, J.T., 1988. Cosmic ray and gamma ray dosimetry for #' TL and ESR. Nuclear Tracks and Radiation Measurements, 14, pp. 223-227. #' #' Prescott, J.R., Hutton, J.T., 1994. Cosmic ray contributions to dose rates #' for luminescence and ESR dating: large depths and long-term time variations. #' Radiation Measurements, 23, pp. 497-500. #' #' @source #' The following data were carefully read from figures in mentioned #' sources and used for fitting procedures. The derived expressions are used in #' the function `calc_CosmicDoseRate`. #' #' **values.cosmic.Softcomp** #' #' \tabular{ll}{ #' Program: \tab "AGE"\cr #' Reference: \tab Gruen (2009) \cr #' Fit: \tab Polynomials in the form of #' } #' #' For depths between 40-167 g cm^-2: #' #' \deqn{y = 2*10^-6*x^2-0.0008*x+0.2535} #' #' (For depths <40 g cm^-2) #' #' \deqn{y = -6*10^-8*x^3+2*10^-5*x^2-0.0025*x+0.2969} #' #' **`values.factor.Altitude`** #' #' \tabular{ll}{ #' Reference: \tab Prescott & Hutton (1994) \cr #' Page: \tab 499 \cr #' Figure: \tab 1 \cr #' Fit: \tab 2-degree polynomial in the form of #' } #' #' \deqn{y = -0.026*x^2 + 0.6628*x + 1.0435} #' #' #' **`values.par.FJH`** #' #' \tabular{ll}{ #' Reference: \tab Prescott & Hutton (1994) \cr #' Page: \tab 500 \cr #' Figure: \tab 2 \cr #' Fits: \tab 3-degree polynomials and linear fits #' } #' #' F (non-linear part, \eqn{\lambda} < 36.5 deg.): #' #' \deqn{y = -7*10^-7*x^3-8*10^-5*x^2-0.0009*x+0.3988} #' #' F (linear part, \eqn{\lambda} > 36.5 deg.): #' #' \deqn{y = -0.0001*x + 0.2347} #' #' J (non-linear part, \eqn{\lambda} < 34 deg.): #' #' \deqn{y = 5*10^-6*x^3-5*10^-5*x^2+0.0026*x+0.5177} #' #' J (linear part, \eqn{\lambda} > 34 deg.): #' #' \deqn{y = 0.0005*x + 0.7388} #' #' H (non-linear part, \eqn{\lambda} < 36 deg.): #' #' \deqn{y = -3*10^-6*x^3-5*10^-5*x^2-0.0031*x+4.398} #' #' H (linear part, \eqn{\lambda} > 36 deg.): #' #' \deqn{y = 0.0002*x + 4.0914} #' #' @keywords datasets #' #' @examples #' #' ##load data #' data(BaseDataSet.CosmicDoseRate) #' #' @name BaseDataSet.CosmicDoseRate #' @aliases values.cosmic.Softcomp values.factor.Altitude values.par.FJH #' #' @md NULL #' @title Example data from a SAR OSL and SAR TL measurement for the package #' Luminescence #' #' @description Example data from a SAR OSL and TL measurement for package Luminescence #' directly extracted from a Risoe BIN-file and provided in an object of type #' [Risoe.BINfileData-class] #' @format #' #' `CWOSL.SAR.Data`: SAR OSL measurement data #' #' `TL.SAR.Data`: SAR TL measurement data #' #' Each class object contains two slots: (a) `METADATA` is a [data.frame] with #' all metadata stored in the BIN file of the measurements and (b) `DATA` #' contains a list of vectors of the measured data (usually count values). #' #' @section Version: 0.1 #' #' @references #' **CWOSL.SAR.Data**: unpublished data #' #' **TL.SAR.Data**: unpublished data #' #' @source **CWOSL.SAR.Data** #' #' \tabular{ll}{ #' Lab: \tab Luminescence Laboratory Bayreuth \cr #' Lab-Code: \tab BT607 \cr #' Location: \tab Saxony/Germany \cr #' Material: \tab Middle grain quartz measured on aluminium cups on a Risø TL/OSL DA-15 reader\cr #' Reference: \tab unpublished #' } #' #' **TL.SAR.Data** #' #' \tabular{ll}{ #' Lab: \tab Luminescence Laboratory of Cologne\cr #' Lab-Code: \tab LP1_5\cr #' Location: \tab Spain\cr #' Material: \tab Flint \cr #' Setup: \tab Risoe TL/OSL DA-20 reader (Filter: Semrock Brightline, HC475/50, N2, unpolished steel discs) \cr #' Reference: \tab unpublished \cr #' Remarks: \tab dataset limited to one position #' } #' #' @note #' Please note that this example data cannot be exported to a BIN-file using the function #' `writeR2BIN` as it was generated and implemented in the package long time ago. In the meantime #' the BIN-file format changed. #' #' @docType data #' #' @keywords datasets #' #' @examples #' #' ## show first 5 elements of the METADATA and DATA elements in the terminal #' data(ExampleData.BINfileData, envir = environment()) #' CWOSL.SAR.Data@@METADATA[1:5,] #' CWOSL.SAR.Data@@DATA[1:5] #' #' @name ExampleData.BINfileData #' @aliases CWOSL.SAR.Data TL.SAR.Data #' @md NULL #' Example CW-OSL curve data for the package Luminescence #' #' `data.frame` containing CW-OSL curve data (time, counts) #' #' @name ExampleData.CW_OSL_Curve #' #' @docType data #' #' @format Data frame with 1000 observations on the following 2 variables: #' #' \describe{ #' \item{list("x")}{a numeric vector, time} #' \item{list("y")}{a numeric vector, counts} #' } #' #' @references #' Baartman, J.E.M., Veldkamp, A., Schoorl, J.M., Wallinga, J., #' Cammeraat, L.H., 2011. Unravelling Late Pleistocene and Holocene landscape #' dynamics: The Upper Guadalentin Basin, SE Spain. Geomorphology, 125, #' 172-185. #' #' Bos, A.J.J. & Wallinga, J., 2012. How to visualize quartz OSL signal #' components. Radiation Measurements, 47, 752-758. #' #' @source **ExampleData.CW_OSL_Curve** #' #' \tabular{ll}{ #' Lab: \tab Luminescence Laboratory Bayreuth\cr #' Lab-Code: \tab BT607\cr #' Location: \tab Saxony/Germany\cr #' Material: \tab Middle grain quartz measured on aluminium cups on a Risø TL/OSL DA-15 reader.\cr #' Reference: \tab unpublished data } #' #' **CW_Curve.BosWallinga2012** #' #' \tabular{ll}{ #' Lab: \tab Netherlands Centre for Luminescence Dating (NCL)\cr #' Lab-Code: \tab NCL-2108077\cr #' Location: \tab Guadalentin Basin, Spain\cr #' Material: \tab Coarse grain quartz\cr #' Reference: \tab Bos & Wallinga (2012) and Baartman et al. (2011) #' } #' #' @keywords datasets #' #' @examples #' #' data(ExampleData.CW_OSL_Curve, envir = environment()) #' plot(ExampleData.CW_OSL_Curve) #' #' @aliases CW_Curve.BosWallinga2012 ExampleData.CW_OSL_Curve #' @md NULL #' Example portable OSL curve data for the package Luminescence #' #' A `list` of [RLum.Analysis-class] objects, each containing #' the same number of [RLum.Data.Curve-class] objects representing #' individual OSL, IRSL and dark count measurements of a sample. #' #' @name ExampleData.portableOSL #' #' @docType data #' #' @source #' #' **ExampleData.portableOSL** #' #' \tabular{ll}{ #' Lab: \tab Cologne Luminescence Laboratory\cr #' Lab-Code: \tab `none` \cr #' Location: \tab Nievenheim/Germany\cr #' Material: \tab Fine grain quartz \cr #' Reference: \tab unpublished data #' } #' #' @keywords datasets #' #' @examples #' #' data(ExampleData.portableOSL, envir = environment()) #' plot_RLum(ExampleData.portableOSL) #' #' @md NULL #' Example data for fit_LMCurve() in the package Luminescence #' #' Linearly modulated (LM) measurement data from a quartz sample from Norway #' including background measurement. Measurements carried out in the #' luminescence laboratory at the University of Bayreuth. #' #' @format Two objects (data.frames) with two columns (time and counts). #' #' @references #' Fuchs, M., Kreutzer, S., Fischer, M., Sauer, D., Soerensen, R., 2012. OSL and IRSL #' dating of raised beach sand deposits along the south-eastern coast of Norway. #' Quaternary Geochronology, 10, 195-200. #' #' @source #' #' \tabular{ll}{ #' Lab: \tab Luminescence Laboratory Bayreuth\cr #' Lab-Code: \tab BT900\cr #' Location: \tab Norway\cr #' Material: \tab Beach deposit, coarse grain quartz measured on aluminium discs on a Risø TL/OSL DA-15 reader\cr #' } #' #' @examples #' #' ##show LM data #' data(ExampleData.FittingLM, envir = environment()) #' plot(values.curve,log="x") #' #' @name ExampleData.FittingLM #' @aliases values.curve values.curveBG #' @md NULL #' Example Lx/Tx data from CW-OSL SAR measurement #' #' LxTx data from a SAR measurement for the package Luminescence. #' #' @format A [`data.frame`] with 4 columns (Dose, LxTx, LxTx.Error, TnTx). #' #' @references unpublished data #' #' @source #' #' \tabular{ll}{ #' Lab: \tab Luminescence Laboratory Bayreuth\cr #' Lab-Code: \tab BT607\cr #' Location: \tab Ostrau (Saxony-Anhalt/Germany)\cr #' Material: \tab Middle grain (38-63 \eqn{\mu}m) quartz measured on a Risoe TL/OSL DA-15 reader. #' } #' #' @examples #' #' ## plot Lx/Tx data vs dose [s] #' data(ExampleData.LxTxData, envir = environment()) #' plot(LxTxData$Dose,LxTxData$LxTx) #' #' @name ExampleData.LxTxData #' @aliases LxTxData #' @md NULL #' Example Lx and Tx curve data from an artificial OSL measurement #' #' `Lx` and `Tx` data of continuous wave (CW-) OSL signal curves. #' #' @format Two [`data.frame`]s containing time and count values. #' #' @references unpublished data #' #' @source #' Arbitrary OSL measurement. #' #' @examples #' #' ##load data #' data(ExampleData.LxTxOSLData, envir = environment()) #' #' ##plot data #' plot(Lx.data) #' plot(Tx.data) #' #' @name ExampleData.LxTxOSLData #' @aliases Lx.data Tx.data #' @md NULL #' Example data as [RLum.Analysis-class] objects #' #' Collection of different [RLum.Analysis-class] objects for #' protocol analysis. #' #' @format #' #' `IRSAR.RF.Data`: IRSAR.RF.Data on coarse grain feldspar #' #' Each object contains data needed for the given protocol analysis. #' #' @section Version: 0.1 #' #' @references #' **IRSAR.RF.Data** #' #' Kreutzer, S., Lauer, T., Meszner, S., Krbetschek, M.R., Faust, D., Fuchs, #' M., 2014. Chronology of the Quaternary profile Zeuchfeld in Saxony-Anhalt / #' Germany - a preliminary luminescence dating study. Zeitschrift fuer #' Geomorphologie 58, 5-26. doi: 10.1127/0372-8854/2012/S-00112 #' #' @source **IRSAR.RF.Data** #' #' These data were kindly provided by Tobias Lauer and Matthias Krbetschek. #' #' \tabular{ll}{ #' Lab: \tab Luminescence Laboratory TU Bergakademie Freiberg\cr #' Lab-Code: \tab ZEU/SA1\cr #' Location: \tab Zeuchfeld (Zeuchfeld Sandur; Saxony-Anhalt/Germany)\cr #' Material: \tab K-feldspar (130-200 \eqn{\mu}m)\cr #' Reference: \tab Kreutzer et al. (2014) #' } #' #' @keywords datasets #' #' @examples #' #' ##load data #' data(ExampleData.RLum.Analysis, envir = environment()) #' #' ##plot data #' plot_RLum(IRSAR.RF.Data) #' #' @name ExampleData.RLum.Analysis #' @aliases IRSAR.RF.Data #' @md NULL #' Example data as [RLum.Data.Image-class] objects #' #' Measurement of Princton Instruments camera imported with the function #' [read_SPE2R] to R to produce an #' [RLum.Data.Image-class] object. #' #' #' @format Object of class [RLum.Data.Image-class] #' #' @section Version: 0.1 #' #' @source #' **ExampleData.RLum.Data.Image** #' #' These data were kindly provided by Regina DeWitt. #' #' \tabular{ll}{ #' Lab.: \tab Department of Physics, East-Carolina University, NC, USA\cr #' Lab-Code: \tab - \cr #' Location: \tab - \cr #' Material: \tab - \cr #' Reference: \tab - \cr #' } #' #' Image data is a measurement of fluorescent ceiling lights with a cooled #' Princeton Instruments (TM) camera fitted on Risø DA-20 TL/OSL reader. #' #' @keywords datasets #' #' @examples #' #' ##load data #' data(ExampleData.RLum.Data.Image, envir = environment()) #' #' ##plot data #' plot_RLum(ExampleData.RLum.Data.Image) #' #' @name ExampleData.RLum.Data.Image #' @md NULL #' Example data for a SAR OSL measurement and a TL spectrum using a lexsyg #' reader #' #' Example data from a SAR OSL measurement and a TL spectrum for package #' Luminescence imported from a Freiberg Instruments XSYG file using the #' function [read_XSYG2R]. #' #' @format #' #' `OSL.SARMeasurement`: SAR OSL measurement data #' #' The data contain two elements: (a) `$Sequence.Header` is a #' [data.frame] with metadata from the measurement,(b) #' `Sequence.Object` contains an [RLum.Analysis-class] object #' for further analysis. #' #' `TL.Spectrum`: TL spectrum data #' #' [RLum.Data.Spectrum-class] object for further analysis. The #' spectrum was cleaned from cosmic-rays using the function #' #' `apply_CosmicRayRemoval`. Note that no quantum efficiency calibration #' was performed. #' #' @section Version: 0.1 #' #' @seealso [read_XSYG2R], [RLum.Analysis-class], [RLum.Data.Spectrum-class], #' [plot_RLum], [plot_RLum.Analysis], [plot_RLum.Data.Spectrum] #' #' @references #' Unpublished data measured to serve as example data for that #' package. Location origin of sample BT753 is given here: #' #' Fuchs, M., Kreutzer, S., Rousseau, D.D., Antoine, P., Hatte, C., Lagroix, #' F., Moine, O., Gauthier, C., Svoboda, J., Lisa, L., 2013. The loess sequence #' of Dolni Vestonice, Czech Republic: A new OSL-based chronology of the Last #' Climatic Cycle. Boreas, 42, 664--677. #' #' @source #' **OSL.SARMeasurement** #' #' \tabular{ll}{ #' Lab: \tab Luminescence Laboratory Giessen\cr #' Lab-Code: \tab no code\cr #' Location: \tab not specified\cr #' Material: \tab Coarse grain quartz on steel cups on lexsyg research reader\cr #' Reference: \tab unpublished #' } #' #' **TL.Spectrum** #' #' \tabular{ll}{ #' Lab: \tab Luminescence Laboratory Giessen\cr #' Lab-Code: \tab BT753\cr #' Location: \tab Dolni Vestonice/Czech Republic\cr #' Material: \tab Fine grain polymineral on steel cups on lexsyg research reader\cr #' Reference: \tab Fuchs et al., 2013 \cr #' Spectrum: \tab Integration time 19 s, channel time 20 s\cr #' Heating: \tab 1 K/s, up to 500 deg. C #' } #' #' @keywords datasets #' #' @examples #' ##show data #' data(ExampleData.XSYG, envir = environment()) #' #' ## ========================================= #' ##(1) OSL.SARMeasurement #' OSL.SARMeasurement #' #' ##show $Sequence.Object #' OSL.SARMeasurement$Sequence.Object #' #' ##grep OSL curves and plot the first curve #' OSLcurve <- get_RLum(OSL.SARMeasurement$Sequence.Object, #' recordType="OSL")[[1]] #' plot_RLum(OSLcurve) #' #' ## ========================================= #' ##(2) TL.Spectrum #' TL.Spectrum #' #' ##plot simple spectrum (2D) #' plot_RLum.Data.Spectrum(TL.Spectrum, #' plot.type="contour", #' xlim = c(310,750), #' ylim = c(0,300), #' bin.rows=10, #' bin.cols = 1) #' #' ##plot 3d spectrum (uncomment for usage) #' # plot_RLum.Data.Spectrum(TL.Spectrum, plot.type="persp", #' # xlim = c(310,750), ylim = c(0,300), bin.rows=10, #' # bin.cols = 1) #' #' @name ExampleData.XSYG #' @aliases OSL.SARMeasurement TL.Spectrum #' @md NULL #' Example De data sets for the package Luminescence #' #' Equivalent dose (De) values measured for a fine grain quartz sample from a #' loess section in Rottewitz (Saxony/Germany) and for a coarse grain quartz #' sample from a fluvial deposit in the rock shelter of Cueva Anton #' (Murcia/Spain). #' #' #' @format A [list] with two elements, each containing a two column [data.frame]: #' #' \describe{ #' `$BT998`: De and De error values for a fine grain quartz #' sample from a loess section in Rottewitz. #' #' `$CA1`: Single grain De #' and De error values for a coarse grain quartz sample from a fluvial deposit #' in the rock shelter of Cueva Anton #' } #' #' @references #' **BT998** #' #' Unpublished data #' #' **CA1** #' #' Burow, C., Kehl, M., Hilgers, A., Weniger, G.-C., Angelucci, D., Villaverde, #' V., Zapata, J. and Zilhao, J. (2015). Luminescence dating of fluvial #' deposits in the rock shelter of Cueva Anton, Spain. Geochronometria 52, 107-125. #' #' **BT998** #' #' \tabular{ll}{ #' Lab: \tab Luminescence Laboratory Bayreuth\cr #' Lab-Code: \tab BT998\cr #' Location: \tab Rottewitz (Saxony/Germany)\cr #' Material: \tab Fine grain quartz measured on aluminium discs on a Risø TL/OSL DA-15 reader\cr #' Units: \tab Values are given in seconds \cr #' Dose Rate: \tab Dose rate of the beta-source at measurement ca. 0.0438 Gy/s +/- 0.0019 Gy/s\cr #' Measurement Date: \tab 2012-01-27 #' } #' #' **CA1** #' #' \tabular{ll}{ #' Lab: \tab Cologne Luminescence Laboratory (CLL)\cr #' Lab-Code: \tab C-L2941\cr #' Location: \tab Cueva Anton (Murcia/Spain)\cr #' Material: \tab Coarse grain quartz (200-250 microns) measured on single grain discs on a Risoe TL/OSL DA-20 reader\cr #' Units: \tab Values are given in Gray \cr #' Measurement Date: \tab 2012 #' } #' #' @keywords datasets #' #' @examples #' #' ##(1) plot values as histogram #' data(ExampleData.DeValues, envir = environment()) #' plot_Histogram(ExampleData.DeValues$BT998, xlab = "De [s]") #' #' ##(2) plot values as histogram (with second to gray conversion) #' data(ExampleData.DeValues, envir = environment()) #' #' De.values <- Second2Gray(ExampleData.DeValues$BT998, #' dose.rate = c(0.0438, 0.0019)) #' #' #' plot_Histogram(De.values, xlab = "De [Gy]") #' #' @name ExampleData.DeValues #' @md NULL #' Example data for feldspar fading measurements #' #' Example data set for fading measurements of the IR50, IR100, IR150 and #' IR225 feldspar signals of sample UNIL/NB123. It further contains regular equivalent dose #' measurement data of the same sample, which can be used to apply a #' fading correction to. #' #' #' @format A [list] with two elements, each containing a further [list] of #' [data.frame]s containing the data on the fading and equivalent dose measurements: #' #' \describe{ #' `$fading.data`: A named [list] of [data.frame]s, #' each having three named columns (`LxTx, LxTx.error, timeSinceIrradiation`).\cr #' `..$IR50`: Fading data of the IR50 signal.\cr #' `..$IR100`: Fading data of the IR100 signal.\cr #' `..$IR150`: Fading data of the IR150 signal.\cr #' `..$IR225`: Fading data of the IR225 signal.\cr #' #' #' `$equivalentDose.data`: A named of [data.frame]s, #' each having three named columns (`dose, LxTx, LxTx.error`).\cr #' `..$IR50`: Equivalent dose measurement data of the IR50 signal.\cr #' `..$IR100`: Equivalent dose measurement data of the IR100 signal.\cr #' `..$IR150`: Equivalent dose measurement data of the IR150 signal.\cr #' `..$IR225`: Equivalent dose measurement data of the IR225 signal.\cr #' } #' #' @source #' #' These data were kindly provided by Georgina E. King. Detailed information #' on the sample UNIL/NB123 can be found in the reference given below. The raw #' data can be found in the accompanying supplementary information. #' #' @references #' #' King, G.E., Herman, F., Lambert, R., Valla, P.G., Guralnik, B., 2016. #' Multi-OSL-thermochronometry of feldspar. Quaternary Geochronology 33, 76-87. #' doi:10.1016/j.quageo.2016.01.004 #' #' **Details** #' #' \tabular{ll}{ #' Lab: \tab University of Lausanne \cr #' Lab-Code: \tab UNIL/NB123 \cr #' Location: \tab Namche Barwa (eastern Himalayas)\cr #' Material: \tab Coarse grained (180-212 microns) potassium feldspar \cr #' Units: \tab Values are given in seconds \cr #' Lab Dose Rate: \tab Dose rate of the beta-source at measurement ca. 0.1335 +/- 0.004 Gy/s \cr #' Environmental Dose Rate: \tab 7.00 +/- 0.92 Gy/ka (includes internal dose rate) #' } #' #' #' @keywords datasets #' #' @examples #' #' ## Load example data #' data("ExampleData.Fading", envir = environment()) #' #' ## Get fading measurement data of the IR50 signal #' IR50_fading <- ExampleData.Fading$fading.data$IR50 #' head(IR50_fading) #' #' ## Determine g-value and rho' for the IR50 signal #' IR50_fading.res <- analyse_FadingMeasurement(IR50_fading) #' #' ## Show g-value and rho' results #' gval <- get_RLum(IR50_fading.res) #' rhop <- get_RLum(IR50_fading.res, "rho_prime") #' #' gval #' rhop #' #' ## Get LxTx values of the IR50 DE measurement #' IR50_De.LxTx <- ExampleData.Fading$equivalentDose.data$IR50 #' #' ## Calculate the De of the IR50 signal #' IR50_De <- plot_GrowthCurve(IR50_De.LxTx, #' mode = "interpolation", #' fit.method = "EXP") #' #' ## Extract the calculated De and its error #' IR50_De.res <- get_RLum(IR50_De) #' De <- c(IR50_De.res$De, IR50_De.res$De.Error) #' #' ## Apply fading correction (age conversion greatly simplified) #' IR50_Age <- De / 7.00 #' IR50_Age.corr <- calc_FadingCorr(IR50_Age, g_value = IR50_fading.res) #' #' #' @name ExampleData.Fading #' @md NULL #' Example OSL surface exposure dating data #' #' A set of synthetic OSL surface exposure dating data to demonstrate the #' [fit_SurfaceExposure] functionality. See examples to reproduce the data #' interactively. #' #' @details #' #' **`$sample_1`** #' #' \tabular{ccc}{ #' **mu** \tab **`sigmaphi`** \tab **age** \cr #' 0.9 \tab 5e-10 \tab 10000 \cr #' } #' #' **`$sample_2`** #' #' \tabular{ccccc}{ #' **mu** \tab **`sigmaphi`** \tab **age** \tab **Dose rate** \tab **D0** \cr #' 0.9 \tab 5e-10 \tab 10000 \tab 2.5 \tab 40 \cr #' } #' #' **`$set_1`** #' #' \tabular{ccc}{ #' **mu** \tab **`sigmaphi`** \tab **ages** \cr #' 0.9 \tab 5e-10 \tab 1e3, 1e4, 1e5, 1e6 \cr #' } #' #' **`$set_2`** #' #' \tabular{ccccc}{ #' **mu** \tab **`sigmaphi`** \tab **ages** \tab **Dose rate** \tab **D0** \cr #' 0.9 \tab 5e-10 \tab 1e2, 1e3, 1e4, 1e5, 1e6 \tab 1.0 \tab 40 \cr #' } #' #' @format A [list] with 4 elements: #' #' \tabular{ll}{ #' **Element** \tab **Content** \cr #' `$sample_1` \tab A [data.frame] with 3 columns (depth, intensity, error) \cr #' `$sample_2` \tab A [data.frame] with 3 columns (depth, intensity, error) \cr #' `$set_1` \tab A [list] of 4 [data.frame]s, each representing a sample with different ages \cr #' `$set_2` \tab A [list] of 5 [data.frame]s, each representing a sample with different ages \cr #' } #' #' @references Unpublished synthetic data #' #' @source #' #' See examples for the code used to create the data sets. #' #' @examples #' #' ## ExampleData.SurfaceExposure$sample_1 #' sigmaphi <- 5e-10 #' age <- 10000 #' mu <- 0.9 #' x <- seq(0, 10, 0.1) #' fun <- exp(-sigmaphi * age * 365.25*24*3600 * exp(-mu * x)) #' #' set.seed(666) #' synth_1 <- data.frame(depth = x, #' intensity = jitter(fun, 1, 0.1), #' error = runif(length(x), 0.01, 0.2)) #' #' ## VALIDATE sample_1 #' fit_SurfaceExposure(synth_1, mu = mu, sigmaphi = sigmaphi) #' #' #' #' #' ## ExampleData.SurfaceExposure$sample_2 #' sigmaphi <- 5e-10 #' age <- 10000 #' mu <- 0.9 #' x <- seq(0, 10, 0.1) #' Ddot <- 2.5 / 1000 / 365.25 / 24 / 60 / 60 # 2.5 Gy/ka in Seconds #' D0 <- 40 #' fun <- (sigmaphi * exp(-mu * x) * #' exp(-(age * 365.25*24*3600) * #' (sigmaphi * exp(-mu * x) + Ddot/D0)) + Ddot/D0) / #' (sigmaphi * exp(-mu * x) + Ddot/D0) #' #' set.seed(666) #' synth_2 <- data.frame(depth = x, #' intensity = jitter(fun, 1, 0.1), #' error = runif(length(x), 0.01, 0.2)) #' #' ## VALIDATE sample_2 #' fit_SurfaceExposure(synth_2, mu = mu, sigmaphi = sigmaphi, Ddot = 2.5, D0 = D0) #' #' #' #' ## ExampleData.SurfaceExposure$set_1 #' sigmaphi <- 5e-10 #' mu <- 0.9 #' x <- seq(0, 15, 0.2) #' age <- c(1e3, 1e4, 1e5, 1e6) #' set.seed(666) #' #' synth_3 <- vector("list", length = length(age)) #' #' for (i in 1:length(age)) { #' fun <- exp(-sigmaphi * age[i] * 365.25*24*3600 * exp(-mu * x)) #' synth_3[[i]] <- data.frame(depth = x, #' intensity = jitter(fun, 1, 0.05)) #' } #' #' #' ## VALIDATE set_1 #' fit_SurfaceExposure(synth_3, age = age, sigmaphi = sigmaphi) #' #' #' #' ## ExampleData.SurfaceExposure$set_2 #' sigmaphi <- 5e-10 #' mu <- 0.9 #' x <- seq(0, 15, 0.2) #' age <- c(1e2, 1e3, 1e4, 1e5, 1e6) #' Ddot <- 1.0 / 1000 / 365.25 / 24 / 60 / 60 # 2.0 Gy/ka in Seconds #' D0 <- 40 #' set.seed(666) #' #' synth_4 <- vector("list", length = length(age)) #' #' for (i in 1:length(age)) { #' fun <- (sigmaphi * exp(-mu * x) * #' exp(-(age[i] * 365.25*24*3600) * #' (sigmaphi * exp(-mu * x) + Ddot/D0)) + Ddot/D0) / #' (sigmaphi * exp(-mu * x) + Ddot/D0) #' #' synth_4[[i]] <- data.frame(depth = x, #' intensity = jitter(fun, 1, 0.05)) #' } #' #' #' ## VALIDATE set_2 #' fit_SurfaceExposure(synth_4, age = age, sigmaphi = sigmaphi, D0 = D0, Ddot = 1.0) #' #' \dontrun{ #' ExampleData.SurfaceExposure <- list( #' sample_1 = synth_1, #' sample_2 = synth_2, #' set_1 = synth_3, #' set_2 = synth_4 #' ) #' } #' #' @keywords datasets #' @name ExampleData.SurfaceExposure #' @md NULL #' Example Al2O3:C Measurement Data #' #' Measurement data obtained from measuring Al2O3:C chips at the IRAMAT-CRP2A, Université Bordeaux #' Montaigne in 2017 on a Freiberg Instruments lexsyg SMART reader. #' The example data used in particular to allow test of the functions #' developed in framework of the work by Kreutzer et al., 2018. #' #' @format Two datasets comprising [RLum.Analysis-class] data imported using the function [read_XSYG2R] #' #' \describe{ #' `data_ITC`: Measurement data to determine the irradiation time correction, the data can #' be analysed with the function [analyse_Al2O3C_ITC] #' #' `data_CrossTalk`: Measurement data obtained while estimating the irradiation cross-talk of the #' reader used for the experiments. The data can be analysed either with the function #' [analyse_Al2O3C_CrossTalk] or [analyse_Al2O3C_Measurement] #' } #' #' @note From both datasets unneeded curves have been removed and #' the number of aliquots have been reduced to a required minimum to keep the file size small, but #' still being able to run the corresponding functions. #' #' @seealso [analyse_Al2O3C_ITC], [analyse_Al2O3C_CrossTalk], [analyse_Al2O3C_Measurement] #' #' @references Kreutzer, S., Martin, L., Guérin, G., Tribolo, C., Selva, P., Mercier, N., 2018. #' Environmental Dose Rate Determination Using a Passive Dosimeter: Techniques and Workflow for alpha-Al2O3:C Chips. #' Geochronometria 45, 56–67. #' #' @keywords datasets #' #' @examples #' #' ##(1) curves #' data(ExampleData.Al2O3C, envir = environment()) #' plot_RLum(data_ITC[1:2]) #' #' @name ExampleData.Al2O3C #' @aliases data_CrossTalk data_ITC #' @md NULL #' Example TR-OSL data #' #' Single TR-OSL curve obtained by Schmidt et al. (under review) for quartz sample BT729 #' (origin: Trebgast Valley, Germany, quartz, 90-200 µm, unpublished data). #' #' @format One [RLum.Data.Curve-class] dataset imported using the function [read_XSYG2R] #' #' \describe{ #' `ExampleData.TR_OSL`: A single [RLum.Data.Curve-class] object with the TR-OSL data #' #' } #' #' #' @seealso [fit_OSLLifeTimes] #' #' @references Schmidt, C., Simmank, O., Kreutzer, S., under review. #' Time-Resolved Optically Stimulated Luminescence of Quartz in the Nanosecond Time Domain. Journal #' of Luminescence, 1-90 #' #' @keywords datasets #' #' @examples #' #' ##(1) curves #' data(ExampleData.TR_OSL, envir = environment()) #' plot_RLum(ExampleData.TR_OSL) #' #' @name ExampleData.TR_OSL #' @md NULL #' Collection of External Data #' #' @description Description and listing of data provided in the folder `data/extdata` #' #' @details #' The **R** package `Luminescence` includes a number of raw data files, which are mostly used in #' the example sections of appropriate functions. They are also used internally for testing corresponding #' functions using the `testthat` package (see files in `tests/testthat/`) to ensure their operational #' reliability. #' #' **Accessibility** #' #' If the **R** package `Luminescence` is installed correctly the preferred way to access and use these #' data from within **R** is as follows: #' #' `system.file("extdata/", package = "Luminescence")` #' #' **Individual file descriptions** #' #' *>>Daybreak_TestFile.DAT/.txt<<* #' #' **Type:** raw measurement data \cr #' **Device:** Daybreak OSL/TL reader\cr #' **Measurement date:** unknown\cr #' **Location:** unknown\cr #' **Provided by:** unknown\cr #' **Related R function(s):** `read_Daybreak2R()`\cr #' **Reference:** unknown #' #' *>>DorNie_0016.psl<<* #' #' **Type:** raw measurement data \cr #' **Device:** SUERC portable OSL reader \cr #' **Measurement date:** 19/05/2016 \cr #' **Location:** Dormagen-Nievenheim, Germany \cr #' **Provided by:** Christoph Burow (University of Cologne) \cr #' **Related R function(s):** `read_PSL2R()` \cr #' **Reference:** unpublished \cr #' **Additional information:** Sample measured at an archaeological site near \cr #' Dormagen-Nievenheim (Germany) during a practical course on Luminescence dating in 2016. \cr #' #' *>>QNL84_2_bleached.txt*, *QNL84_2_unbleached.txt<<* #' #' **Type:** Test data for exponential fits \cr #' **Reference:** Berger, G.W., Huntley, D.J., 1989. Test data for exponential fits. Ancient TL 7, 43-46. \cr #' #' #' *>>STRB87_1_bleached.txt*, *STRB87_1_unbleached.txt<<* #' #' **Type:** Test data for exponential fits \cr #' **Reference:** Berger, G.W., Huntley, D.J., 1989. Test data for exponential fits. Ancient TL 7, 43-46. #' #' *>>XSYG_file.xsyg* #' #' **Type:** XSYG-file stump \cr #' **Info: ** XSYG-file with some basic curves to test functions \cr #' **Reference:** no reference available #' #' #' @keywords datasets #' @name extdata #' @md NULL Luminescence/R/apply_EfficiencyCorrection.R0000644000176200001440000001131514236146743020524 0ustar liggesusers#' Function to apply spectral efficiency correction to RLum.Data.Spectrum S4 #' class objects #' #' The function allows spectral efficiency corrections for RLum.Data.Spectrum #' S4 class objects #' #' The efficiency correction is based on a spectral response dataset provided #' by the user. Usually the data set for the quantum efficiency is of lower #' resolution and values are interpolated for the required spectral resolution using #' the function [stats::approx][stats::approxfun] #' #' If the energy calibration differs for both data set `NA` values are produces that #' will be removed from the matrix. #' #' @param object [RLum.Data.Spectrum-class] or [RLum.Analysis-class] (**required**): #' S4 object of class `RLum.Data.Spectrum`, `RLum.Analysis`or a [list] of such objects. Other objects in #' the list are skipped. #' #' @param spectral.efficiency [data.frame] (**required**): #' Data set containing wavelengths (x-column) and relative spectral response values #' (y-column) (values between 0 and 1). The provided data will be used to correct all spectra if `object` is #' a [list] #' #' @return Returns same object as provided as input #' #' @note #' Please note that the spectral efficiency data from the camera alone may not #' sufficiently correct for spectral efficiency of the entire optical system #' (e.g., spectrometer, camera ...). #' #' @section Function version: 0.2.0 #' #' @author #' Sebastian Kreutzer, IRAMAT-CRP2A, UMR 5060, CNRS-Université Bordeaux Montaigne (France)\cr #' Johannes Friedrich, University of Bayreuth (Germany) #' #' @seealso [RLum.Data.Spectrum-class], [RLum.Analysis-class] #' #' @keywords manip #' #' @examples #' #' ##(1) - use with your own data (uncomment for usage) #' ## spectral.efficiency <- read.csv("your data") #' ## #' ## your.spectrum <- apply_EfficiencyCorrection(your.spectrum, ) #' #' @md #' @export apply_EfficiencyCorrection <- function( object, spectral.efficiency ){ # self-call ----------------------------------------------------------------------------------- ##case we have a list if(inherits(object, "list")){ output_list <- lapply(object, function(o){ if(inherits(o, "RLum.Data.Spectrum") || inherits(o, "RLum.Analysis")){ apply_EfficiencyCorrection(object = o, spectral.efficiency = spectral.efficiency) }else{ warning(paste0("[apply_EfficiencyCorrection()] Skipping ",class(o)," object in input list."), call. = FALSE) return(o) } }) return(output_list) } ##the case of an RLum.Analysis object if(inherits(object, "RLum.Analysis")){ object@records <- lapply(object@records, function(o){ if(inherits(o, "RLum.Data.Spectrum")){ apply_EfficiencyCorrection(object = o, spectral.efficiency = spectral.efficiency) }else{ warning(paste0("[apply_EfficiencyCorrection()] Skipping ",class(o)," object in input list."), call. = FALSE) return(o) } }) return(object) } # Integrity check ----------------------------------------------------------- ##check if object is of class RLum.Data.Spectrum if(!inherits(object, "RLum.Data.Spectrum")) stop("[apply_EfficiencyCorrection()] Input object is not of type RLum.Data.Spectrum",call. = FALSE) if(!inherits(spectral.efficiency, "data.frame")) stop("[apply_EfficiencyCorrection()] 'spectral.efficiency' is not of type data.frame", call. = FALSE) ## grep data matrix from the input object temp.matrix <- as(object, "matrix") ## grep efficency values temp.efficiency <- as.matrix(spectral.efficiency[,1:2]) ##test max if(max(temp.efficiency[,2]) > 1) stop("[apply_EfficiencyCorrection()] Relative quantum efficiency values > 1 are not allowed.", call. = FALSE) # Apply method ------------------------------------------------------------ ##the interpolation is needed to align the resolution #set data for interpolation temp.efficiency.x <- as.numeric(row.names(temp.matrix)) temp.efficiency.interpolated <- approx( x = temp.efficiency[,1], y = temp.efficiency[,2], xout = temp.efficiency.x, ties = mean) ##correct for quantum efficiency temp.matrix <- vapply(X = 1:ncol(temp.matrix), FUN = function(x){ temp.matrix[,x]/temp.efficiency.interpolated$y*max(temp.efficiency.interpolated$y, na.rm = TRUE) }, FUN.VALUE = numeric(length = nrow(temp.matrix))) ##remove NA values temp.matrix <- na.exclude(temp.matrix) ##correct colnames colnames(temp.matrix) <- colnames(get_RLum(object)) # Return Output------------------------------------------------------------ temp.output <- set_RLum( class = "RLum.Data.Spectrum", recordType = object@recordType, curveType = object@curveType, data = temp.matrix, info = object@info) invisible(temp.output) } Luminescence/R/names_RLum.R0000644000176200001440000000277414264017373015273 0ustar liggesusers#' S4-names function for RLum S4 class objects #' #' Function calls object-specific names functions for RLum S4 class objects. #' #' The function provides a generalised access point for specific #' [RLum-class] objects.\cr #' Depending on the input object, the corresponding 'names' function will be #' selected. Allowed arguments can be found in the documentations of the #' corresponding [RLum-class] class. #' #' @param object [RLum-class] (**required**): #' S4 object of class `RLum` #' #' @return Returns a [character] #' #' @section Function version: 0.1.0 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [RLum.Data.Curve-class], [RLum.Data.Image-class], #' [RLum.Data.Spectrum-class], [RLum.Analysis-class], [RLum.Results-class] #' #' @keywords utilities #' #' @aliases names_RLum #' #' @md #' @export setGeneric("names_RLum", function(object) { standardGeneric("names_RLum") }) # Method for names_RLum method for RLum objects in a list for a list of objects ------------- #' @describeIn names_RLum #' Returns a list of [RLum-class] objects that had been passed to [names_RLum] #' #' #' @md #' @export setMethod("names_RLum", signature = "list", function(object) { ##apply method in the objects and return the same lapply(object, function(x) { if (inherits(x, "RLum")) { return(names_RLum(x)) } else{ return(x) } }) }) Luminescence/R/smooth_RLum.R0000644000176200001440000000376414264017373015501 0ustar liggesusers#' Smoothing of data #' #' Function calls the object-specific smooth functions for provided RLum S4-class objects. #' #' The function provides a generalised access point for specific #' [RLum-class] objects.\cr #' Depending on the input object, the corresponding function will be selected. #' Allowed arguments can be found in the documentations of the corresponding #' [RLum-class] class. The smoothing is based on an internal function #' called `.smoothing`. #' #' @param object [RLum-class] (**required**): #' S4 object of class `RLum` #' #' @param ... further arguments passed to the specific class method #' #' @return #' An object of the same type as the input object is provided #' #' @section Function version: 0.1.0 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @note #' Currently only `RLum` objects of class `RLum.Data.Curve` and `RLum.Analysis` #' (with curve data) are supported! #' #' @seealso [RLum.Data.Curve-class], [RLum.Analysis-class] #' #' @examples #' #' ##load example data #' data(ExampleData.CW_OSL_Curve, envir = environment()) #' #' ##create RLum.Data.Curve object from this example #' curve <- #' set_RLum( #' class = "RLum.Data.Curve", #' recordType = "OSL", #' data = as.matrix(ExampleData.CW_OSL_Curve) #' ) #' #' ##plot data without and with smoothing #' plot_RLum(curve) #' plot_RLum(smooth_RLum(curve)) #' #' @keywords utilities #' #' @md #' @export setGeneric("smooth_RLum", function(object, ...) { standardGeneric("smooth_RLum") }) # Method for smooth_RLum method for RLum objects in a list for a list of objects ------------------- #' @describeIn smooth_RLum #' Returns a list of [RLum-class] objects that had been passed to [smooth_RLum] #' #' #' @md #' @export setMethod("smooth_RLum", signature = "list", function(object, ...){ ##apply method in the objects and return the same lapply(object, function(x){ if(inherits(x, "RLum")){ return(smooth_RLum(x,...)) }else{ return(x) } }) }) Luminescence/R/methods_RLum.R0000644000176200001440000004624214236146743015634 0ustar liggesusers################################################################################## ## METHODS FOR S3 GENERICS ## ################################################################################## ##CAUTION NOTE: ##(1) Please DO NOT access to the S4 objects by using the slots this causes inconsistent ## behaviour, please use the corresponding RLum-methods instead! ## ##(2) Especially, please DO NOT include S3-methods for which no S4-method is implemented! Especially ##for coercing. ## ##(3) Finally, what ever you want to implement, check whether a S4-method exists, it should ##be just passed to this methods, not the opposite, otherwise this will yield in undesired behaviour ## ##TODO: For this S3 generics so far no proper documentation exists ... we should consider ##to provide an overview within a separate document, as it becomes otherwise rather ##complicated for beginners to work with the documentation. ## ## -------------------- INTRODUCED WITH 0.5.0 ----------------------- ## #' methods_RLum #' #' Methods for S3-generics implemented for the package 'Luminescence'. #' This document summarises all implemented S3-generics. The name of the function #' is given before the first dot, after the dot the name of the object that is #' supported by this method is given, e.g. `plot.RLum.Data.Curve` can be called #' by `plot(object, ...)`, where `object` is the `RLum.Data.Curve` object. #' #' The term S3-generics sounds complicated, however, it just means that something #' has been implemented in the package to increase the usability for users new #' in R and who are not familiar with the underlying `RLum`-object structure of #' the package. The practical outcome is that operations and functions presented #' in standard books on R can be used without knowing the specifics of the R #' package `'Luminescence'`. For examples see the example section. #' #' @param x [RLum-class] or [Risoe.BINfileData-class] (**required**): #' input object #' #' @param object [RLum-class] (**required**): #' input object #' #' @param y [integer] (*optional*): #' the row index of the matrix, data.frame #' #' @param z [integer] (*optional*): #' the column index of the matrix, data.frame #' #' @param i [character] (*optional*): #' name of the wanted record type or data object or row in the `RLum.Data.Curve` object #' #' @param j [integer] (*optional*): #' column of the data matrix in the `RLum.Data.Curve` object #' #' @param value [numeric] **(required)**: #' numeric value which replace the value in the `RLum.Data.Curve` object #' #' @param drop [logical] (*with default*): #' keep object structure or drop it #' #' @param subset `[subset]` [expression] (**required**): #' logical expression indicating elements or rows to keep, this function works #' in [Risoe.BINfileData-class] objects like [subset.data.frame], but takes care #' of the object structure. Works also on [RLum.Analysis-class] objects. #' #' @param row.names [logical] (*with default*): #' enables or disables row names (`as.data.frame`) #' #' @param recursive [logical] (*with default*): #' enables or disables further sub-setting (`unlist`) #' #' @param optional [logical] (*with default*): #' logical. If TRUE, setting row names and converting column names #' (to syntactic names: see make.names) is optional (see [base::as.data.frame]) #' #' @param ... further arguments that can be passed to the method #' #' @note #' `methods_RLum` are not really new functions, everything given here are mostly just #' surrogates for existing functions in the package. #' #' @examples #' #' ##load example data #' data(ExampleData.RLum.Analysis, envir = environment()) #' #' @keywords internal #' @md #' @name methods_RLum NULL #################################################################################################### # methods for generic: plot() # ################################################################################################## #' @rdname methods_RLum #' @method plot list #' @export plot.list <- function(x, y, ...) { if (all(sapply(x, function(x) inherits(x, "RLum")))) { plot_RLum(object = x, ...) } else { if (missing(y)) y <- NULL plot.default(x, y, ...) } } #' @rdname methods_RLum #' @method plot RLum.Results #' @export plot.RLum.Results <- function(x, y, ...) plot_RLum(object = x, ...) #' @rdname methods_RLum #' @method plot RLum.Analysis #' @export plot.RLum.Analysis <- function(x, y, ...) plot_RLum(object = x, ...) #' @rdname methods_RLum #' @method plot RLum.Data.Curve #' @export plot.RLum.Data.Curve <- function(x, y, ...) plot_RLum(object = x, ...) #' @rdname methods_RLum #' @method plot RLum.Data.Spectrum #' @export plot.RLum.Data.Spectrum <- function(x, y, ...) plot_RLum(object = x, ...) #' @rdname methods_RLum #' @method plot RLum.Data.Image #' @export plot.RLum.Data.Image <- function(x, y, ...) plot_RLum(object = x, ...) #' @rdname methods_RLum #' @method plot Risoe.BINfileData #' @export plot.Risoe.BINfileData <- function(x, y, ...) plot_Risoe.BINfileData(BINfileData = x, ...) #################################################################################################### # methods for generic: hist() # ################################################################################################## #' @rdname methods_RLum #' @export hist.RLum.Results <- function(x, ...) plot_Histogram(data = x, ...) #' @rdname methods_RLum #' @export hist.RLum.Data.Image <- function(x, ...) hist(x =get_RLum(x)@data@values, ...) #' @rdname methods_RLum #' @export hist.RLum.Data.Curve <- function(x, ...) hist(as(get_RLum(x),"matrix")[,2]) #' @rdname methods_RLum #' @export hist.RLum.Analysis <- function(x, ...) lapply(1:length_RLum(x), function(z){ hist(as(get_RLum(x, record.id = z, ...),"matrix")[,2])}) #################################################################################################### # methods for generic: summary() # ################################################################################################## # methods for generic: summary() #' @rdname methods_RLum #' @method summary RLum.Results #' @export summary.RLum.Results <- function(object, ...) get_RLum(object = object, ...) #' @rdname methods_RLum #' @method summary RLum.Analysis #' @export summary.RLum.Analysis <- function(object, ...) lapply(object@records, function(x) summary(x@data)) #' @rdname methods_RLum #' @method summary RLum.Data.Image #' @export summary.RLum.Data.Image <- function(object, ...) summary(object@data@data@values) # summary.RLum.Data.Spectrum <- function(object, ...) #' @rdname methods_RLum #' @method summary RLum.Data.Curve #' @export summary.RLum.Data.Curve <- function(object, ...) summary(object@data, ...) #################################################################################################### # methods for generic: subset() # ################################################################################################## #' @rdname methods_RLum #' @method subset Risoe.BINfileData #' #' @param records.rm [subset] [logical] (*with default*): #' remove records from data set, can be disabled, to just set the column `SET` to `TRUE` or `FALSE` #' #' @md #' @export subset.Risoe.BINfileData <- function(x, subset, records.rm = TRUE, ...) { if(length(list(...))) warning(paste("Argument not supported and skipped:", names(list(...)))) ##select relevant rows sel <- tryCatch(eval( expr = substitute(subset), envir = x@METADATA, enclos = parent.frame() ), error = function(e) { stop("\n\nInvalid subset options. \nValid terms are: ", paste(names(x@METADATA), collapse = ", ")) }) ##probably everything is FALSE now? if (records.rm) { if (any(sel)) { x@METADATA <- x@METADATA[sel, ] x@DATA <- x@DATA[sel] x@METADATA[["ID"]] <- 1:length(x@METADATA[["ID"]]) return(x) } else{ return(NULL) } }else{ x@METADATA[["SEL"]] <- sel return(x) } } #' @rdname methods_RLum #' @method subset RLum.Analysis #' @export subset.RLum.Analysis <- function(x, subset = NULL, ...) { do.call(get_RLum, list(object = x, drop = FALSE, subset = substitute(subset), env = parent.frame())) } #################################################################################################### # methods for generic: bin() # ################################################################################################## #' @rdname methods_RLum #' @export bin.RLum.Data.Curve <- function(x, bin_size = 2, ...) bin_RLum.Data(x, bin_size = bin_size) #' @rdname methods_RLum #' @export bin.RLum.Data.Spectrum <- function(x, bin_size.row = 1, bin_size.col = 1, ...){ bin_RLum.Data(x, bin_size.row = bin_size.row, bin_size.col = bin_size.col) } #################################################################################################### # methods for generic: length() # ################################################################################################## #' @rdname methods_RLum #' @export length.RLum.Results <- function(x, ...) length_RLum(x) #' @rdname methods_RLum #' @export length.RLum.Analysis <- function(x, ...) length_RLum(x) #' @rdname methods_RLum #' @export length.RLum.Data.Curve <- function(x, ...) length_RLum(x) #' @rdname methods_RLum #' @export length.Risoe.BINfileData <- function(x, ...) length(x@METADATA$ID) #################################################################################################### # methods for generic: dim() # ################################################################################################## # methods for generic: dim() #' @rdname methods_RLum #' @export dim.RLum.Data.Curve <- function(x) dim(as(x, "matrix")) #' @rdname methods_RLum #' @export dim.RLum.Data.Spectrum <- function(x) dim(as(x, "matrix")) #################################################################################################### # methods for generic: rep() # ################################################################################################## #' @rdname methods_RLum #' @export rep.RLum <- function(x, ...) replicate_RLum(x, ...) #################################################################################################### # methods for generic: name() # ################################################################################################## #' @rdname methods_RLum #' @export names.RLum.Data.Curve <- function(x, ...) names_RLum(x) #' @rdname methods_RLum #' @export names.RLum.Data.Spectrum <- function(x, ...) names_RLum(x) #' @rdname methods_RLum #' @export names.RLum.Data.Image <- function(x, ...) names_RLum(x) #' @rdname methods_RLum #' @export names.RLum.Analysis <- function(x, ...) names_RLum(x) #' @rdname methods_RLum #' @export names.RLum.Results <- function(x, ...) names_RLum(x) #' @rdname methods_RLum #' @export names.Risoe.BINfileData <- function(x) as.character(x@METADATA$LTYPE) #################################################################################################### # methods for generic: row.name() # ################################################################################################## #' @rdname methods_RLum #' @export row.names.RLum.Data.Spectrum <- function(x, ...) rownames(as(x, "matrix")) #################################################################################################### # methods for generic: as.data.frame() # ################################################################################################## #' @rdname methods_RLum #' @export as.data.frame.RLum.Data.Curve <- function(x, row.names = NULL, optional = FALSE, ...) as(x, "data.frame") #' @rdname methods_RLum #' @export as.data.frame.RLum.Data.Spectrum <- function(x, row.names = NULL, optional = FALSE, ...) as(x, "data.frame") # for RLum.Results ... makes no sense and may yield in unpredictable behaviour #' @rdname methods_RLum #' @export as.data.frame.Risoe.BINfileData <- function(x, row.names = NULL, optional = FALSE, ...) { ## set matrix m <- matrix(NA, ncol = max(sapply(x@DATA, length)), nrow = length(x@DATA)) ## rename columns colnames(m) <- paste0("C",1:ncol(m)) ## fill matrix for(i in 1:length(x@DATA)) m[i,1:length(x@DATA[[i]])] <- x@DATA[[i]] ##convert to data.frame and bind cbind(x@METADATA, as.data.frame(m)) } #################################################################################################### # methods for generic: as.list() # ################################################################################################## #' @rdname methods_RLum #' @export as.list.RLum.Results <- function(x, ...) as(x, "list") #' @rdname methods_RLum #' @export as.list.RLum.Data.Curve <- function(x, ...) as(x, "list") #' @rdname methods_RLum #' @export as.list.RLum.Data.Image <- function(x, ...) as(x, "list") #' @rdname methods_RLum #' @export as.list.RLum.Analysis <- function(x, ...) as(x, "list") #################################################################################################### # methods for generic: as.matrix() # ################################################################################################## #' @rdname methods_RLum #' @export as.matrix.RLum.Data.Curve <- function(x, ...) as(x, "matrix") #' @rdname methods_RLum #' @export as.matrix.RLum.Data.Spectrum <- function(x, ...) as(x, "matrix") #' @rdname methods_RLum #' @export as.matrix.RLum.Data.Image <- function(x, ...) as(x, "matrix") # for RLum.Results ... makes no sense and may yield in unpredictable behaviour #################################################################################################### # methods for generic: is() #################################################################################################### #For this function no S4 method was written, as this would come at the cost of performance and #is totally unnecessary #' @rdname methods_RLum #' @export is.RLum <- function(x, ...) is(x, "RLum") #' @rdname methods_RLum #' @export is.RLum.Data <- function(x, ...) is(x, "RLum.Data") #' @rdname methods_RLum #' @export is.RLum.Data.Curve <- function(x, ...) is(x, "RLum.Data.Curve") #' @rdname methods_RLum #' @export is.RLum.Data.Spectrum <- function(x, ...) is(x, "RLum.Data.Spectrum") #' @rdname methods_RLum #' @export is.RLum.Data.Image <- function(x, ...) is(x, "RLum.Data.Image") #' @rdname methods_RLum #' @export is.RLum.Analysis <- function(x, ...) is(x, "RLum.Analysis") #' @rdname methods_RLum #' @export is.RLum.Results <- function(x, ...) is(x, "RLum.Results") #################################################################################################### # methods for generic: merge() #################################################################################################### #' @rdname methods_RLum #' @export merge.RLum <- function(x, y, ...) merge_RLum(append(list(...), values = c(x, y))) #################################################################################################### # methods for generic: unlist() #################################################################################################### #' @rdname methods_RLum #' @method unlist RLum.Analysis #' @export unlist.RLum.Analysis <- function(x, recursive = TRUE, ...){ temp <- get_RLum(object = x, recursive = recursive, ... ) if(recursive){ unlist(lapply(1:length(temp), function(x){ get_RLum(temp) }), recursive = FALSE) }else{ return(temp) } } #################################################################################################### # methods for generic: `+` #################################################################################################### #' @rdname methods_RLum #' #' @examples #' #' ##combine curve is various ways #' curve1 <- IRSAR.RF.Data[[1]] #' curve2 <- IRSAR.RF.Data[[1]] #' curve1 + curve2 #' curve1 - curve2 #' curve1 / curve2 #' curve1 * curve2 #' #' @export `+.RLum.Data.Curve` <- function(x, y) merge_RLum(list(x, y), merge.method = "sum") #################################################################################################### # methods for generic: `-` #################################################################################################### #' @rdname methods_RLum #' @export `-.RLum.Data.Curve` <- function(x, y) merge_RLum(list(x, y), merge.method = "-") #################################################################################################### # methods for generic: `*` #################################################################################################### #' @rdname methods_RLum #' @export `*.RLum.Data.Curve` <- function(x, y) merge_RLum(list(x, y), merge.method = "*") #################################################################################################### # methods for generic: `/` #################################################################################################### #' @rdname methods_RLum #' @export `/.RLum.Data.Curve` <- function(x, y) merge_RLum(list(x, y), merge.method = "/") #################################################################################################### # methods for generic: `[` #################################################################################################### #' @rdname methods_RLum #' @export `[.RLum.Data.Curve` <- function(x,y,z, drop = TRUE) {as(x, "matrix")[y,z, drop = drop]} #' @rdname methods_RLum #' @export `[.RLum.Data.Spectrum` <- function(x,y,z, drop = TRUE) {as(x, "matrix")[y,z, drop = drop]} #' @rdname methods_RLum #' @export `[.RLum.Data.Image` <- function(x,y,z, drop = TRUE) {as(x, "matrix")[y,z, drop = drop]} #' @rdname methods_RLum #' @export `[.RLum.Analysis` <- function(x, i, drop = FALSE) { if (is(i, "character")) { get_RLum(x, recordType = i, drop = drop) } else{ get_RLum(x, record.id = i, drop = drop) } } #' @rdname methods_RLum #' @export `[.RLum.Results` <- function(x, i, drop = TRUE) {get_RLum(x, data.object = i, drop = drop)} #################################################################################################### # methods for generic: `[<-` #################################################################################################### #' @rdname methods_RLum #' @export `[<-.RLum.Data.Curve` <- function(x, i, j, value){ x@data[i,j] <- value #this is without any S4-method, but otherwise the overhead it too high return(x) } #################################################################################################### # methods for generic: `[[` #################################################################################################### #' @rdname methods_RLum #' @export `[[.RLum.Analysis` <- function(x, i) { if (is(i, "character")) { get_RLum(x, recordType = i) } else{ get_RLum(x, record.id = i) } } #' @rdname methods_RLum #' @export `[[.RLum.Results` <- function(x, i) {get_RLum(x, data.object = i)} #################################################################################################### # methods for generic: `$` #################################################################################################### #' @rdname methods_RLum #' @export `$.RLum.Data.Curve` <- function(x, i) {get_RLum(x, info.object = i)} #' @rdname methods_RLum #' #' @examples #' #' ##`$` access curves #' IRSAR.RF.Data$RF #' #' @export `$.RLum.Analysis` <- function(x, i) {get_RLum(x, recordType = i)} #' @rdname methods_RLum #' @export `$.RLum.Results` <- function(x, i) {get_RLum(x, data.object = i)} Luminescence/R/fit_SurfaceExposure.R0000644000176200001440000004655214521207352017212 0ustar liggesusers#' @title Nonlinear Least Squares Fit for OSL surface exposure data #' #' @description #' This function determines the (weighted) least-squares estimates of the #' parameters of either equation 1 in *Sohbati et al. (2012a)* or equation 12 in #' *Sohbati et al. (2012b)* for a given OSL surface exposure data set (**BETA**). #' #' @details #' **Weighted fitting** #' #' If `weights = TRUE` the function will use the inverse square of the error (\eqn{1/\sigma^2}) #' as weights during fitting using [minpack.lm::nlsLM]. Naturally, for this to #' take effect individual errors must be provided in the third column of the #' `data.frame` for `data`. Weighted fitting is **not** supported if `data` #' is a list of multiple `data.frame`s, i.e., it is not available for global #' fitting. #' #' **Dose rate** #' If any of the arguments `Ddot` or `D0` is at its default value (`NULL`), #' this function will fit equation 1 in Sohbati et al. (2012a) to the data. If #' the effect of dose rate (i.e., signal saturation) needs to be considered, #' numeric values for the dose rate (`Ddot`) (in Gy/ka) and the characteristic #' saturation dose (`D0`) (in Gy) must be provided. The function will then fit #' equation 12 in Sohbati et al. (2012b) to the data. #' #' **NOTE**: Currently, this function does **not** consider the variability #' of the dose rate with sample depth (`x`)! In the original equation the dose #' rate `D` is an arbitrary function of `x` (term `D(x)`), but here `D` is assumed #' constant. #' #' **Global fitting** #' If `data` is [list] of multiple `data.frame`s, each representing a separate #' sample, the function automatically performs a global fit to the data. This #' may be useful to better constrain the parameters `sigmaphi` or `mu` and #' **requires** that known ages for each sample is provided #' (e.g., `age = c(100, 1000)` if `data` is a list with two samples). #' #' #' @param data [data.frame] or [list] (**required**): #' Measured OSL surface exposure data with the following structure: #' #' ``` #' (optional) #' | depth (a.u.)| intensity | error | #' | [ ,1] | [ ,2] | [ ,3] | #' |-------------|-----------|-------| #' [1, ]| ~~~~ | ~~~~ | ~~~~ | #' [2, ]| ~~~~ | ~~~~ | ~~~~ | #' ... | ... | ... | ... | #' [x, ]| ~~~~ | ~~~~ | ~~~~ | #' #' ``` #' #' Alternatively, a [list] of `data.frames` can be provided, where each #' `data.frame` has the same structure as shown above, with the exception that #' they must **not** include the optional error column. Providing a [list] as #' input automatically activates the global fitting procedure (see details). #' #' @param sigmaphi [numeric] (*optional*): #' A numeric value for `sigmaphi`, i.e. the charge detrapping rate. #' Example: `sigmaphi = 5e-10` #' #' @param mu [numeric] (*optional*): #' A numeric value for mu, i.e. the light attenuation coefficient. #' Example: `mu = 0.9` #' #' @param age [numeric] (*optional*): #' The age (a) of the sample, if known. If `data` is a [list] of *x* samples, #' then `age` must be a numeric vector of length *x*. #' Example: `age = 10000`, or `age = c(1e4, 1e5, 1e6)`. #' #' @param Ddot [numeric] (*optional*): #' A numeric value for the environmental dose rate (Gy/ka). For this argument #' to be considered a value for `D0` must also be provided; otherwise it will be #' ignored. #' #' @param D0 [numeric] (*optional*): #' A numeric value for the characteristic saturation dose (Gy). For this argument #' to be considered a value for `Ddot` must also be provided; otherwise it will be #' ignored. #' #' @param weights [logical] (*optional*): #' If `TRUE` the fit will be weighted by the inverse square of the error. #' Requires `data` to be a [data.frame] with three columns. #' #' @param plot [logical] (*optional*): #' Show or hide the plot. #' #' @param legend [logical] (*optional*): #' Show or hide the equation inside the plot. #' #' @param error_bars [logical] (*optional*): #' Show or hide error bars (only applies if errors were provided). #' #' @param coord_flip [logical] (*optional*): #' Flip the coordinate system. #' #' @param ... Further parameters passed to [plot]. #' Custom parameters include: #' - `verbose` ([logical]): show or hide console output #' - `line_col`: Colour of the fitted line #' - `line_lty`: Type of the fitted line (see `lty` in `?par`) #' - `line_lwd`: Line width of the fitted line (see `lwd` in `?par`) #' #' @return #' #' Function returns results numerically and graphically: #' #' -----------------------------------\cr #' `[ NUMERICAL OUTPUT ]`\cr #' -----------------------------------\cr #' #' **`RLum.Results`**-object #' #' **slot:** **`@data`** #' #' \tabular{lll}{ #' **Element** \tab **Type** \tab **Description**\cr #' `$summary` \tab `data.frame` \tab summary of the fitting results \cr #' `$data` \tab `data.frame` \tab the original input data \cr #' `$fit` \tab `nls` \tab the fitting object produced by [minpack.lm::nlsLM] \cr #' `$args` \tab `character` \tab arguments of the call \cr #' `$call` \tab `call` \tab the original function call \cr #' } #' #'**slot:** **`@info`** #' #' Currently unused. #' #' ------------------------\cr #' `[ PLOT OUTPUT ]`\cr #' ------------------------\cr #' #' A scatter plot of the provided depth-intensity OSL surface exposure data #' with the fitted model. #' #' @section Function version: 0.1.0 #' #' @note #' **This function has BETA status. If possible, results should be** #' **cross-checked.** #' #' @author Christoph Burow, University of Cologne (Germany) #' #' @seealso [ExampleData.SurfaceExposure], [minpack.lm::nlsLM] #' #' @references #' #' Sohbati, R., Murray, A.S., Chapot, M.S., Jain, M., Pederson, J., 2012a. #' Optically stimulated luminescence (OSL) as a chronometer for surface exposure #' dating. Journal of Geophysical Research 117, B09202. doi: #' \doi{10.1029/2012JB009383} #' #' Sohbati, R., Jain, M., Murray, A.S., 2012b. Surface exposure dating of #' non-terrestrial bodies using optically stimulated luminescence: A new method. #' Icarus 221, 160-166. #' #' @keywords datagen #' #' @examples #' #' ## Load example data #' data("ExampleData.SurfaceExposure") #' #' ## Example 1 - Single sample #' # Known parameters: 10000 a, mu = 0.9, sigmaphi = 5e-10 #' sample_1 <- ExampleData.SurfaceExposure$sample_1 #' head(sample_1) #' results <- fit_SurfaceExposure( #' data = sample_1, #' mu = 0.9, #' sigmaphi = 5e-10) #' get_RLum(results) #' #' #' ## Example 2 - Single sample and considering dose rate #' # Known parameters: 10000 a, mu = 0.9, sigmaphi = 5e-10, #' # dose rate = 2.5 Gy/ka, D0 = 40 Gy #' sample_2 <- ExampleData.SurfaceExposure$sample_2 #' head(sample_2) #' results <- fit_SurfaceExposure( #' data = sample_2, #' mu = 0.9, #' sigmaphi = 5e-10, #' Ddot = 2.5, #' D0 = 40) #' get_RLum(results) #' #' ## Example 3 - Multiple samples (global fit) to better constrain 'mu' #' # Known parameters: ages = 1e3, 1e4, 1e5, 1e6 a, mu = 0.9, sigmaphi = 5e-10 #' set_1 <- ExampleData.SurfaceExposure$set_1 #' str(set_1, max.level = 2) #' results <- fit_SurfaceExposure( #' data = set_1, #' age = c(1e3, 1e4, 1e5, 1e6), #' sigmaphi = 5e-10) #' get_RLum(results) #' #' #' ## Example 4 - Multiple samples (global fit) and considering dose rate #' # Known parameters: ages = 1e2, 1e3, 1e4, 1e5, 1e6 a, mu = 0.9, sigmaphi = 5e-10, #' # dose rate = 1.0 Ga/ka, D0 = 40 Gy #' set_2 <- ExampleData.SurfaceExposure$set_2 #' str(set_2, max.level = 2) #' results <- fit_SurfaceExposure( #' data = set_2, #' age = c(1e2, 1e3, 1e4, 1e5, 1e6), #' sigmaphi = 5e-10, #' Ddot = 1, #' D0 = 40) #'get_RLum(results) #' #' @md #' @export fit_SurfaceExposure <- function( data, sigmaphi = NULL, mu = NULL, age = NULL, Ddot = NULL, D0 = NULL, weights = FALSE, plot = TRUE, legend = TRUE, error_bars = TRUE, coord_flip = FALSE, ...) { ## SETTINGS ---- settings <- list( verbose = TRUE, info = list() ) settings <- modifyList(settings, list(...)) ## Input object handling ----------------------------------------------------- ## Data type validation if (inherits(data, "RLum.Results")) object <- get_RLum(data, "data") if (inherits(data, "matrix")) data <- as.data.frame(data) if (inherits(data, "data.table")) data <- as.data.frame(data) ## For global fitting of multiple data sets 'data' must be a list if (inherits(data, "list")) { # Global fitting requires and equal amount of ages to be provided if (length(data) != length(age)) stop("If 'data' is a list of data sets for global fitting, 'age' must be of the same length.", call. = FALSE) # TODO: Support weighted fitting for global fit if (weights) { if (settings$verbose) warning("Argument 'weights' is not supported when multiple data sets are provided for global fitting.", call. = FALSE) weights <- FALSE } # collapse list into a data.frame with a $group column to distinguish # between individual samples data_list <- data for (i in 1:length(data)) data[[i]]$group <- LETTERS[[i]] data <- do.call(rbind, data) data$group <- as.factor(data$group) global_fit <- TRUE } else { # ignore 'global_fit' if 'data' is a data.frame global_fit <- FALSE } # Exit if data type is invalid if (!inherits(data, "data.frame")) stop("'data' must be of class data.frame.", call. = FALSE) # Check which parameters have been provided if (!is.null(age) && any(is.na(age))) age <- NULL if (!is.null(sigmaphi) && any(is.na(sigmaphi))) sigmaphi <- NULL if (!is.null(mu) && any(is.na(mu))) mu <- NULL ## Weighting options (only available for global fitting) if (ncol(data) >= 3 && weights && !global_fit) wi <- (1 / data[ ,3]^2) / sum(1 / data[ ,3]^2) else wi <- rep(1, times = nrow(data)) # extract errors into seperate variable if (ncol(data) >= 3 && !global_fit) error <- data[ ,3] else error <- NULL ## Take only the first to columns (depth, signal) if (ncol(data) > 2 && !global_fit) data <- data[ ,1:2] ## remove rows with NA if (any(is.na(data))) { data <- data[complete.cases(data), ] if (settings$verbose) warning("NA values in 'data' were removed.", call. = FALSE) } ## Data preprocessing ---- # set column names if (!global_fit) colnames(data) <- c("x", "y") else colnames(data) <- c("x", "y", "group") ## FITTING ---- ## Functions # w/o dose rate fun <- formula(y ~ exp(-sigmaphi * age * 365.25*24*3600 * exp(-mu * x))) fun_global <- formula(y ~ exp(-sigmaphi * age[group] * 365.25*24*3600 * exp(-mu * x))) # w/ dose rate (Sohbati et al. 2012, eq 12) if (!is.null(Ddot)) Ddot <- Ddot / 1000 / 365.25 / 24 / 60 / 60 fun_w_dr <- formula( y ~ (sigmaphi * exp(-mu * x) * exp(-(age * 365.25*24*3600) * (sigmaphi * exp(-mu * x) + Ddot/D0)) + Ddot/D0) / (sigmaphi * exp(-mu * x) + Ddot/D0) ) fun_global_w_dr <- formula( y ~ (sigmaphi * exp(-mu * x) * exp(-(age[group] * 365.25*24*3600) * (sigmaphi * exp(-mu * x) + Ddot/D0)) + Ddot/D0) / (sigmaphi * exp(-mu * x) + Ddot/D0) ) ## start parameter start <- list(sigmaphi = if (is.null(sigmaphi)) 5.890e-09 else NULL, mu = if (is.null(mu)) 1 else NULL, age = if (is.null(age)) 2 else NULL) start <- start[!sapply(start, is.null)] ## fitting boundaries lower <- list(sigmaphi = if (is.null(sigmaphi)) -Inf else NULL, mu = if (is.null(mu)) 0 else NULL, age = if (is.null(age)) 0 else NULL) upper <- list(sigmaphi = if (is.null(sigmaphi)) Inf else NULL, mu = if (is.null(mu)) Inf else NULL, age = if (is.null(age)) Inf else NULL) ## Decision tree which of the functions to use if (!is.null(Ddot) && !is.null(D0)) { if (global_fit) use_fun <- fun_global_w_dr else use_fun <- fun_w_dr } else { if (global_fit) use_fun <- fun_global else use_fun <- fun } # (un)constrained fitting fit <- tryCatch({ minpack.lm::nlsLM(formula = use_fun, data = data, start = start, lower = unlist(lower), upper = unlist(upper), weights = wi) }, error = function(e) { e } ) # return NULL if fitting failed if (!inherits(fit, "simpleError") && !inherits(try(summary(fit), silent = TRUE), "try-error")) { # Extract coefficients coef <- as.data.frame(coef(summary(fit))) } else { if (settings$verbose) message("[fit_SurfaceExposure()] \n- Unable to fit the data. Original error from minpack::nlsLM():\n\n", fit$message) # Fill with NA values coef <- data.frame( "Estimate" = rep(NA, 3), "Std. Error" = rep(NA, 3), row.names = c("age", "sigmaphi", "mu"), check.names = FALSE ) } ## RESULTS ---- summary <- data.frame( age = if (is.null(age)) coef["age", "Estimate"] else age, age_error = coef["age", "Std. Error"], sigmaphi = if (is.null(sigmaphi)) coef["sigmaphi", "Estimate"] else sigmaphi, sigmaphi_error = coef["sigmaphi", "Std. Error"], mu = if (is.null(mu)) coef["mu", "Estimate"] else mu, mu_error = coef["mu", "Std. Error"] ) ## Create RLum.Results object results <- set_RLum(class = "RLum.Results", originator = "fit_SurfaceExposure", data = list(summary = summary, data = data, fit = fit, args = as.list(sys.call()[-1]), call = sys.call()), info = settings$info ) ## PLOT ---- if (plot) { # remove $group column for easier data handling if (global_fit) data$group <- NULL # re-order x,y columns if (coord_flip) data <- data.frame(data$y, data$x) # set default plot settings plot_settings <- list( x = data, main = "", pch = 21, col = "black", bg = "red", xlab = if (!coord_flip) "Depth (mm)" else "OSL intensity (Ln/Tn)", ylab = if (!coord_flip) "OSL intensity (Ln/Tn)" else "Depth (mm)", cex = 1.0, lty = 1, lwd = 1, log = "", ylim = if (!coord_flip) range(pretty(data[ ,2])) else rev(range(pretty(data[ ,2]))), xlim = range(pretty(data[ ,1])) ) # override default settings with valid arguments in ... plot_settings <- modifyList(plot_settings, list(...)) valid_settings <- c(names(par()), formalArgs("title"), formalArgs("plot.default"), "cex") plot_settings <- plot_settings[names(plot_settings) %in% valid_settings] # set global plot settings par(cex = plot_settings$cex) if (grepl("y", plot_settings$log)) { plot_settings$ylim[1] <- 0.01 plot_settings$x <- data[which(data[ ,2] > 0),] } ## create main plot do.call("plot", modifyList(plot_settings, list(x = NA))) ## add data points if (!global_fit) { points(data, type = "p", pch = plot_settings$pch, bg = plot_settings$bg, col = plot_settings$col) } else { Map(function(d, i) { points(d, type = "p", pch = plot_settings$pch, bg = i, col = plot_settings$col) }, split(results$data, results$data$group), 1:length(unique(results$data$group))) } ## add fitted curve if (!inherits(fit, "error") && !inherits(fit, "simpleError")) { if (coord_flip) { oldx <- data[ ,2] } else { oldx <- data[ ,1] } newx <- seq(range(oldx)[1], range(oldx)[2], length.out = 10000) newy <- suppressWarnings(predict(fit, newdata = list(x = newx))) if (coord_flip) { tmp <- newx newx <- newy newy <- tmp } if (!global_fit) { points(newx, newy, type = "l", col = ifelse("line_col" %in% names(list(...)), list(...)$line_col, "blue"), lty = ifelse("line_lty" %in% names(list(...)), list(...)$line_lty, 1), lwd = ifelse("line_lwd" %in% names(list(...)), list(...)$line_lwd, 1)) } else { for (i in 1:length(data_list)) { seg <- seq(i * 101 - 100, 10000, nrow(data)) points(newx[seg], newy[seg], type = "l", col = ifelse("line_col" %in% names(list(...)), list(...)$line_col, i), lty = ifelse("line_lty" %in% names(list(...)), list(...)$line_lty, 1), lwd = ifelse("line_lwd" %in% names(list(...)), list(...)$line_lwd, 1)) } } } else { legend("center", legend = "Unable to fit the data!\t\t") } # add error bars (if weighted fit) if (!is.null(error) && error_bars) { segments(plot_settings$x[ ,1], plot_settings$x[ ,2] - error, plot_settings$x[ ,1], plot_settings$x[ ,2] + error) } # add formula if (legend && !inherits(fit, "simpleError")) { formula_text <- paste0("y = ", as.character(fit$m$formula())[3], "\t\t") if (!is.null(age)) { if (!global_fit) { formula_text <- gsub("age", age, formula_text) } else { formula_text <- gsub("age", paste0("[", paste(age, collapse = "|"), "]"), formula_text) formula_text <- gsub("\\[group\\]", "", formula_text) } } if (!is.null(sigmaphi)) formula_text <- gsub("sigmaphi", sigmaphi, formula_text) if (!is.null(mu)) formula_text <- gsub("mu", mu, formula_text) legend(ifelse(coord_flip, "bottomleft", "bottomright"), legend = formula_text, cex = 0.8, bty = "n") } } ## CONSOLE ---- if (settings$verbose) { cat("\n [fit_SurfaceExposure()] \n\n") if (!global_fit) { ## STANDARD OUTPUT cat(" Estimated paramater(s):\n", "-----------------------\n") if (is.null(age)) cat(paste0(" age (a):\t", signif(results$summary$age, 3), " \u00B1 ", signif(results$summary$age_error, 3), "\n")) if (is.null(sigmaphi)) cat(paste0(" sigmaphi:\t", signif(results$summary$sigmaphi, 3), " \u00B1 ", signif(results$summary$sigmaphi_error, 3), "\n")) if (is.null(mu)) cat(paste0(" mu:\t\t", signif(results$summary$mu, 3), " \u00B1 ", signif(results$summary$mu_error, 3), "\n")) cat("\n") } else { ## GLOBAL FIT OUTPUT cat(" Shared estimated paramater(s):\n", "-----------------------\n") if (is.null(sigmaphi)) cat(paste0(" sigmaphi:\t", signif(unique(results$summary$sigmaphi), 3), " \u00B1 ", signif(unique(results$summary$sigmaphi_error), 3), "\n")) if (is.null(mu)) cat(paste0(" mu:\t\t", signif(unique(results$summary$mu), 3), " \u00B1 ", signif(unique(results$summary$mu_error), 3), "\n")) cat("\n") } ## STANDARD OUTPUT cat(" Fixed parameters(s):\n", "--------------------\n") if (!is.null(age)) cat(paste0(" age (a):\t", paste(age, collapse = ", "), "\n")) if (!is.null(sigmaphi)) cat(paste0(" sigmaphi:\t", sigmaphi, "\n")) if (!is.null(mu)) cat(paste0(" mu:\t\t", mu, "\n")) cat("\n") if (!is.null(age)) { message(paste0("To apply the estimated parameters to a sample of unknown age run:\n\n", "fit_SurfaceExposure(data = ", capture.output(results$args[[1]]), ", sigmaphi = ", signif(unique(results$summary$sigmaphi), 3), ", mu = ", signif(unique(results$summary$mu), 3), ")\n\n")) } } ## EXIT ---- return(results) } Luminescence/R/methods_DRAC.R0000644000176200001440000002145114236146743015461 0ustar liggesusers################################################################################## ## METHODS FOR S3 GENERICS ## ################################################################################## ## ---------------------------------------------------------------------------## ## DATA FRAME COERCION METHOD ## This is a method for the as.data.frame S3 generic. We need this to intercept the ## DRAC list object after it hast passed the actual list-method. After it was ## coerced to a data.frame we assign new column names (DRAC ID keys) and ## make sure that all columns are either of class 'character' or 'numeric'. ## Finally, we attach a further class name to identify it as a valid DRAC object ## when passed to use_DRAC #' @export as.data.frame.DRAC.list <- function(x, row.names = NULL, optional = FALSE, ...) { DF <- as.data.frame.list(x) colnames(DF) <- paste0("TI:", 1:ncol(DF)) for (i in 1:ncol(DF)) { if (is.factor(DF[ ,i])) DF[ ,i] <- as.character(DF[, i]) } class(DF) <- c("data.frame", "DRAC.data.frame") return(DF) } ## ---------------------------------------------------------------------------## ## PRINT METHOD #' @export print.DRAC.highlights <- function(x, ...) { x <- as.list(x) names <- names(x) mapply(function(el, name) { cat(paste0(attributes(el)$key, " = ", name,":\n ", paste(el, collapse = ",\n "), "\n")) }, x, names) } #' @export print.DRAC.list <- function(x, blueprint = FALSE, ...) { ## CASE 1: Pretty print the structure of the DRAC list if (!blueprint) { limit <- 80 for (i in 1:length(x)) { # for pretty printing we insert newlines and tabs at specified lengths ls <- attributes(x[[i]])$description ls.n <- nchar(ls) ls.block <- floor(ls.n / limit) strStarts <- seq(0, ls.n, limit) strEnds <- seq(limit-1, ls.n + limit, limit) blockString <- paste(mapply(function(start, end) { trimmedString <- paste(substr(ls, start, end), "\n\t\t\t") if (substr(trimmedString, 1, 1) == " ") trimmedString <- gsub("^[ ]*", "", trimmedString) return(trimmedString) }, strStarts, strEnds), collapse="") msg <- paste(attributes(x[[i]])$key, "=>",names(x)[i], "\n", "\t VALUES =", paste(x[[i]], collapse = ", "), "\n", "\t ALLOWS 'X' = ", attributes(x[[i]])$allowsX, "\n", "\t REQUIRED =", attributes(x[[i]])$required, "\n", "\t DESCRIPTION = ", blockString, "\n" ) if (!is.null(levels(x[[i]]))) { msg <- paste(msg, "\t OPTIONS = ", paste(levels(x[[i]]), collapse = ", "), "\n\n") } else { msg <- paste(msg, "\n") } cat(msg) } } ## CASE 2: Return a 'blueprint' that can be copied from the console to a ## script so the user does not need to write down all >50 fields by hand if (blueprint) { var <- as.list(sys.call())[[2]] names <- names(x) for (i in 1:length(x)) { # in case of factors also show available levels as comments so you don't # have to look it up if (is.factor(x[[i]])) options <- paste("# OPTIONS:", paste(levels(x[[i]]), collapse = ", ")) else options <- "" # determine if values need brackets (strings) if (is.numeric(x[[i]]) | is.integer(x[[i]])) values <- paste(x[[i]], collapse = ", ") if (is.character(x[[i]]) | is.factor(x[[i]])) values <- paste0("'", paste0(x[[i]], collapse = "', '"), "'") cat(paste0(var, "$`", names[i], "` <- c(", values,") ", options ,"\n")) } message("\n\t You can copy all lines above to your script and fill in the data.") } } ## ---------------------------------------------------------------------------## ## DOUBLE SQUARE BRACKETS METHOD #' @export `[[<-.DRAC.list` <- function(x, i, value) { ## REJECT ALL INADEQUATE CLASSES ---- acceptedClasses <- c("integer", "character", "numeric", "factor") if (is.na(match(class(value), acceptedClasses))) { warning(paste("I cannot use objects of class", class(value)), call. = FALSE) return(x) } ## CHECK INPUT LENGTH ---- length.old <- length(x[[i]]) length.new <- length(value) if (length.old != length.new) { warning(paste(names(x)[i], ": Input must be of length", length.old), call. = FALSE) return(x) } ## CHECK INPUT CLASS ---- class.old <- class(x[[i]]) class.new <- class(value) ## CHECK INPUT FIELDS THAT ALLOW 'X' ----- # the following checks apply to fields that are normally numeric, but also # accept 'X' as input. this EXCLUDES factors! if (class.old != "factor") { # some input fields allow 'X' as input, so in terms of R can be of class # "character" or "numeric/integer". hence, we check if input is "X" and # if the filed allows it. If so, we change the old class to "character". if (any(value == "X") && attributes(x[[i]])$allowsX) { if (any(is.na(as.numeric(value[which(value != "X")])))) { warning(paste("Cannot coerce <", value[which(value != "X")], "> to a numeric value.", "Input must be numeric or 'X'."), call. = FALSE) return(x) } class.old <- "character" } # where the input field is alreay "X" we have to check whether the new # non-character input is allowed if (!all(is.na(x[[i]]))) { if (any(x[[i]] == "X") && attributes(x[[i]])$allowsX) { if (any(is.na(as.numeric(value[which(value != "X")])))) { warning(paste("Cannot coerce <", value[which(value != "X")], "> to a numeric value.", "Input must be numeric or 'X'. \n"), call. = FALSE) return(x) } class.new <- "character" value <- as.character(value) } } # when a numeric input field was inserted an "X" it was coerced to class # character. since we are now allowed to insert any character (later tests) # we need to make sure that the new input can be coerced to class numeric. # and if the new values are numeric, we coerce them to character if (attributes(x[[i]])$allowsX && class.old == "character") { if (any(is.na(as.numeric(value[which(value != "X")])))) { warning(paste("Cannot coerce <", value[which(value != "X")], "> to a numeric value.", "Input must be numeric or 'X'. \n"), call. = FALSE) return(x) } class.new <- "character" value <- as.character(value) } } # numeric input can be both of class 'integer' or 'numeric'. We will # allow any combination and reject only non-numeric/integer input if (class.old == "numeric" || class.old == "integer") { if (class.new != "numeric" && class.new != "integer") { warning(paste(names(x)[i], ": Input must be of class", class.old), call. = FALSE) return(x) } } # for 'factor' and 'character' elements only 'character' input is allowed if (class.old == "factor" || class.old == "character") { if (class.new != "character") { warning(paste(names(x)[i], ": Input must be of class", "character"), call. = FALSE) return(x) } } ## CHECK IF VALID OPTION ---- # in case of 'factor's the user is only allowed input that matches one of # the options specified by the factor levels. if it is a valid option, # the input is converted to a factor to keep the information. if (class.old == "factor") { levels <- levels(x[[i]]) if (any(`%in%`(value, levels) == FALSE)) { warning(paste(names(x)[i], ": Invalid option. Valid options are:", paste(levels, collapse = ", ")), call. = FALSE) return(x) } else { value <- factor(value, levels) } } ## WRITE NEW VALUES ---- # we strip our custom class and the attributes, pass the object to the default generic and # finally re-attach our class and attributes tmp.attributes <- attributes(x[[i]])[names(attributes(x[[i]])) != "class"] class(x) <- "list" x <- `[[<-`(x, i, value) attributes(x[[i]]) <- tmp.attributes if (class.old == "factor") class(x[[i]]) <- "factor" class(x) <- c("DRAC.list", "list") return(x) } ## ---------------------------------------------------------------------------## ## SINGLE SQUARE BRACKET METHOD #' @export `[<-.DRAC.list` <- function(x, i, value) { return(`[[<-`(x, i, value)) } ## ---------------------------------------------------------------------------## ## DOLLAR SIGN METHOD #' @export `$<-.DRAC.list`<- function(x, name, value) { # this is straightforward; retrieve the index and pass the object # to the custom [[<- function, which does the data verification index <- which(names(x) == name) x[[index]] <- value return(x) }Luminescence/R/plot_RLum.R0000644000176200001440000001171414264017373015140 0ustar liggesusers#' General plot function for RLum S4 class objects #' #' Function calls object specific plot functions for RLum S4 class objects. #' #' The function provides a generalised access point for plotting specific #' [RLum-class] objects.\cr #' Depending on the input object, the #' corresponding plot function will be selected. Allowed arguments can be #' found in the documentations of each plot function. #' #' \tabular{lll}{ #' **object** \tab \tab **corresponding plot function** \cr #' [RLum.Data.Curve-class] \tab : \tab [plot_RLum.Data.Curve] \cr #' [RLum.Data.Spectrum-class] \tab : \tab [plot_RLum.Data.Spectrum]\cr #' [RLum.Data.Image-class] \tab : \tab [plot_RLum.Data.Image]\cr #' [RLum.Analysis-class] \tab : \tab [plot_RLum.Analysis]\cr #' [RLum.Results-class] \tab : \tab [plot_RLum.Results] #' } #' #' @param object [RLum-class] (**required**): #' S4 object of class `RLum`. Optional a [list] containing objects of #' class [RLum-class] can be provided. In this case the function tries to plot #' every object in this list according to its `RLum` class. Non-RLum objects are #' removed. #' #' @param ... further arguments and graphical parameters that will be passed #' to the specific plot functions. The only argument that is supported directly is `main` #' (setting the plot title). In contrast to the normal behaviour `main` can be here provided as #' [list] and the arguments in the list will dispatched to the plots if the `object` #' is of type `list` as well. #' #' @return Returns a plot. #' #' @note The provided plot output depends on the input object. #' #' @section Function version: 0.4.4 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [plot_RLum.Data.Curve], [RLum.Data.Curve-class], [plot_RLum.Data.Spectrum], #' [RLum.Data.Spectrum-class], [plot_RLum.Data.Image], [RLum.Data.Image-class], #' [plot_RLum.Analysis], [RLum.Analysis-class], [plot_RLum.Results], #' [RLum.Results-class] #' #' #' @keywords dplot #' #' @examples #' #load Example data #' data(ExampleData.CW_OSL_Curve, envir = environment()) #' #' #transform data.frame to RLum.Data.Curve object #' temp <- as(ExampleData.CW_OSL_Curve, "RLum.Data.Curve") #' #' #plot RLum object #' plot_RLum(temp) #' #' @md #' @export plot_RLum<- function( object, ... ){ # Define dispatcher function ---------------------------------------------------------- ##check if object is of class RLum RLum.dispatcher <- function(object, ...) { if (inherits(object, "RLum")) { ##grep object class object.class <- is(object)[1] ##select which plot function should be used and call it switch ( object.class, RLum.Data.Curve = plot_RLum.Data.Curve(object = object, ...), RLum.Data.Spectrum = plot_RLum.Data.Spectrum(object = object, ...), RLum.Data.Image = plot_RLum.Data.Image(object = object, ...), ##here we have to do prevent the partial matching with 'sub' by 'subset' RLum.Analysis = if(!grepl(pattern = "subset", x = paste(deparse(match.call()), collapse = " "), fixed = TRUE)){ plot_RLum.Analysis(object = object, subset = NULL, ...) }else{ plot_RLum.Analysis(object = object, ...) }, RLum.Results = plot_RLum.Results(object = object, ...)) }else{ stop(paste0( "[plot_RLum()] Sorry, I don't know what to do for object of type '", is(object)[1], "'." ), call. = FALSE) } } # Run dispatcher ------------------------------------------------------------------------------ ##call for the list, if not just proceed as normal if(inherits(object, "list")) { ##(0) we might have plenty of sublists before we have the list containing only ##RLum-objects object <- .unlist_RLum(object) object <- .rm_nonRLum(object) ##(2) check if empty, if empty do nothing ... if (length(object) != 0) { ## If we iterate over a list, this might be extremely useful to have different plot titles if("main" %in% names(list(...))){ if(is(list(...)$main,"list")){ main.list <- rep(list(...)$main, length = length(object)) } } ##set also mtext, but in a different way if(!"mtext" %in% names(list(...))){ if(is(object[[1]], "RLum.Analysis")){ mtext <- paste("Record:", 1:length(object)) }else{ mtext <- NULL } }else{ mtext <- rep(list(...)$mtext, length.out = length(object)) } if(exists("main.list")){ ##dispatch objects for (i in 1:length(object)) { RLum.dispatcher(object = object[[i]], main = main.list[[i]], mtext = mtext[[i]], ...) } }else{ for (i in 1:length(object)) { RLum.dispatcher(object = object[[i]], mtext = mtext[[i]], ...) } } } }else{ ##dispatch object RLum.dispatcher(object = object, ...) } } Luminescence/R/analyse_SAR.TL.R0000644000176200001440000005220014521207352015667 0ustar liggesusers#' @title Analyse SAR TL measurements #' #' @description The function performs a SAR TL analysis on a #' [RLum.Analysis-class] object including growth curve fitting. #' #' @details This function performs a SAR TL analysis on a set of curves. The SAR #' procedure in general is given by Murray and Wintle (2000). For the #' calculation of the `Lx/Tx` value the function [calc_TLLxTxRatio] is #' used. #' #' **Provided rejection criteria** #' #' `[recyling.ratio]`: calculated for every repeated regeneration dose point. #' #' `[recuperation.rate]`: recuperation rate calculated by #' comparing the `Lx/Tx` values of the zero regeneration point with the `Ln/Tn` #' value (the `Lx/Tx` ratio of the natural signal). For methodological #' background see Aitken and Smith (1988) #' #' @param object [RLum.Analysis-class] or a [list] of such objects (**required**) : #' input object containing data for analysis #' #' @param object.background currently not used #' #' @param signal.integral.min [integer] (**required**): #' requires the channel number for the lower signal integral bound #' (e.g. `signal.integral.min = 100`) #' #' @param signal.integral.max [integer] (**required**): #' requires the channel number for the upper signal integral bound #' (e.g. `signal.integral.max = 200`) #' #' @param integral_input [character] (*with default*): #' defines the input for the the arguments `signal.integral.min` and #' `signal.integral.max`. These limits can be either provided `'channel'` #' number (the default) or `'temperature'`. If `'temperature'` is chosen the #' best matching channel is selected. #' #' @param sequence.structure [vector] [character] (*with default*): #' specifies the general sequence structure. Three steps are allowed #' (`"PREHEAT"`, `"SIGNAL"`, `"BACKGROUND"`), in addition a #' parameter `"EXCLUDE"`. This allows excluding TL curves which are not #' relevant for the protocol analysis. (**Note:** None TL are removed by default) #' #' @param rejection.criteria [list] (*with default*): #' list containing rejection criteria in percentage for the calculation. #' #' @param dose.points [numeric] (*optional*): #' option set dose points manually #' #' @param log [character] (*with default*): #' a character string which contains `"x"` if the x-axis is to be logarithmic, #' `"y"` if the y axis is to be logarithmic and `"xy"` or `"yx"` if both axes #' are to be logarithmic. See #' [plot.default]). #' #' @param ... further arguments that will be passed to the function [plot_GrowthCurve] #' #' @return #' A plot (*optional*) and an [RLum.Results-class] object is #' returned containing the following elements: #' #' \item{De.values}{[data.frame] containing De-values and further parameters} #' \item{LnLxTnTx.values}{[data.frame] of all calculated `Lx/Tx` values including signal, background counts and the dose points.} #' \item{rejection.criteria}{[data.frame] with values that might by used as rejection criteria. NA is produced if no R0 dose point exists.} #' #' **note:** the output should be accessed using the function [get_RLum] #' #' @note #' **THIS IS A BETA VERSION** #' #' None TL curves will be removed #' from the input object without further warning. #' #' @section Function version: 0.3.0 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [calc_TLLxTxRatio], [plot_GrowthCurve], [RLum.Analysis-class], #' [RLum.Results-class], [get_RLum] #' #' @references #' Aitken, M.J. and Smith, B.W., 1988. Optical dating: recuperation #' after bleaching. Quaternary Science Reviews 7, 387-393. #' #' Murray, A.S. and Wintle, A.G., 2000. Luminescence dating of quartz using an #' improved single-aliquot regenerative-dose protocol. Radiation Measurements #' 32, 57-73. #' #' @keywords datagen plot #' #' @examples #' #' ##load data #' data(ExampleData.BINfileData, envir = environment()) #' #' ##transform the values from the first position in a RLum.Analysis object #' object <- Risoe.BINfileData2RLum.Analysis(TL.SAR.Data, pos=3) #' #' ##perform analysis #' analyse_SAR.TL( #' object = object, #' signal.integral.min = 210, #' signal.integral.max = 220, #' fit.method = "EXP OR LIN", #' sequence.structure = c("SIGNAL", "BACKGROUND")) #' #' @md #' @export analyse_SAR.TL <- function( object, object.background, signal.integral.min, signal.integral.max, integral_input = "channel", sequence.structure = c("PREHEAT", "SIGNAL", "BACKGROUND"), rejection.criteria = list(recycling.ratio = 10, recuperation.rate = 10), dose.points, log = "", ... ){ # Self-call ----------------------------------------------------------------------------------- if(inherits(object, "list")){ if(!all(sapply(object, class) == "RLum.Analysis")) stop("[analyse_SAR.TL()] All elements in the input list need to be of class 'RLum.Analysis'!", call. = FALSE) ##run sequence results <- lapply(object, function(o){ analyse_SAR.TL( object = o, object.background = object.background, signal.integral.min = signal.integral.min, signal.integral.max = signal.integral.max, integral_input = integral_input, sequence.structure = sequence.structure, rejection.criteria = rejection.criteria, dose.points = dose.points, log = log, ... ) }) ##combine results results <- merge_RLum(results) ##return return(results) } # CONFIG ----------------------------------------------------------------- ##set allowed curve types type.curves <- c("TL") ##=============================================================================# # General Integrity Checks --------------------------------------------------- ##GENERAL ##MISSING INPUT if(missing("object")==TRUE){ stop("[analyse_SAR.TL()] No value set for 'object'!", call. = FALSE) } if(missing("signal.integral.min") == TRUE){ stop("[analyse_SAR.TL()] No value set for 'signal.integral.min'!", call. = FALSE) } if(missing("signal.integral.max") == TRUE){ stop("[analyse_SAR.TL()] No value set for 'signal.integral.max'!", call. = FALSE) } ##INPUT OBJECTS if(is(object, "RLum.Analysis") == FALSE){ stop("[analyse_SAR.TL()] Input object is not of type 'RLum.Analyis'!", call. = FALSE) } # Protocol Integrity Checks -------------------------------------------------- ##Remove non TL-curves from object by selecting TL curves object@records <- get_RLum(object, recordType = type.curves) ##ANALYSE SEQUENCE OBJECT STRUCTURE ##set vector for sequence structure temp.protocol.step <- rep(sequence.structure,length(object@records))[1:length(object@records)] ##grep object strucute temp.sequence.structure <- structure_RLum(object) ##set values for step temp.sequence.structure[,"protocol.step"] <- temp.protocol.step ##remove TL curves which are excluded temp.sequence.structure <- temp.sequence.structure[which( temp.sequence.structure[,"protocol.step"]!="EXCLUDE"),] ##check integrity; signal and bg range should be equal if(length( unique( temp.sequence.structure[temp.sequence.structure[,"protocol.step"]=="SIGNAL","n.channels"]))>1){ stop(paste( "[analyse_SAR.TL()] Signal range differs. Check sequence structure.\n", temp.sequence.structure )) } ##check if the wanted curves are a multiple of the structure if(length(temp.sequence.structure[,"id"])%%length(sequence.structure)!=0) stop("[analyse_SAR.TL()] Input TL curves are not a multiple of the sequence structure.", call. = FALSE) # # Calculate LnLxTnTx values -------------------------------------------------- ##grep IDs for signal and background curves TL.preheat.ID <- temp.sequence.structure[ temp.sequence.structure[,"protocol.step"] == "PREHEAT","id"] TL.signal.ID <- temp.sequence.structure[ temp.sequence.structure[,"protocol.step"] == "SIGNAL","id"] TL.background.ID <- temp.sequence.structure[ temp.sequence.structure[,"protocol.step"] == "BACKGROUND","id"] ##comfort ... translate integral limits from temperature to channel if(integral_input == "temperature"){ signal.integral.min <- which.min(abs( signal.integral.min - get_RLum(object, record.id = TL.signal.ID[1])[, 1] )) signal.integral.max <- which.min(abs( signal.integral.max - get_RLum(object, record.id = TL.signal.ID[1])[, 1] )) } ##calculate LxTx values using external function for(i in seq(1,length(TL.signal.ID),by=2)){ temp.LnLxTnTx <- get_RLum( calc_TLLxTxRatio( Lx.data.signal = get_RLum(object, record.id = TL.signal.ID[i]), Lx.data.background = if (length(TL.background.ID) == 0) { NULL } else{ get_RLum(object, record.id = TL.background.ID[i]) }, Tx.data.signal = get_RLum(object, record.id = TL.signal.ID[i + 1]), Tx.data.background = if (length(TL.background.ID) == 0){ NULL }else{ get_RLum(object, record.id = TL.background.ID[i + 1]) }, signal.integral.min, signal.integral.max ) ) ##grep dose temp.Dose <- object@records[[TL.signal.ID[i]]]@info$IRR_TIME ##take about NULL values if(is.null(temp.Dose)){ temp.Dose <- NA } ##bind data.frame temp.LnLxTnTx <- cbind(Dose=temp.Dose, temp.LnLxTnTx) if(exists("LnLxTnTx")==FALSE){ LnLxTnTx <- data.frame(temp.LnLxTnTx) }else{ LnLxTnTx <- rbind(LnLxTnTx,temp.LnLxTnTx) } } ##set dose.points manually if argument was set if(!missing(dose.points)){ temp.Dose <- dose.points LnLxTnTx$Dose <- dose.points } # Set regeneration points ------------------------------------------------- #generate unique dose id - this are also the # for the generated points temp.DoseID <- c(0:(length(LnLxTnTx[["Dose"]]) - 1)) temp.DoseName <- paste0("R", temp.DoseID) temp.DoseName <- cbind(Name = temp.DoseName, Dose = LnLxTnTx[["Dose"]]) ##set natural temp.DoseName[temp.DoseName[, "Name"] == "R0", "Name"] <- "Natural" ##set R0 temp.DoseName[temp.DoseName[,"Name"]!="Natural" & temp.DoseName[,"Dose"]==0,"Name"]<-"R0" ##find duplicated doses (including 0 dose - which means the Natural) temp.DoseDuplicated<-duplicated(temp.DoseName[,"Dose"]) ##combine temp.DoseName temp.DoseName<-cbind(temp.DoseName,Repeated=temp.DoseDuplicated) ##correct value for R0 (it is not really repeated) temp.DoseName[temp.DoseName[,"Dose"]==0,"Repeated"]<-FALSE ##combine in the data frame temp.LnLxTnTx <- data.frame(Name = temp.DoseName[, "Name"], Repeated = as.logical(temp.DoseName[, "Repeated"])) LnLxTnTx<-cbind(temp.LnLxTnTx,LnLxTnTx) LnLxTnTx[,"Name"]<-as.character(LnLxTnTx[,"Name"]) # Calculate Recycling Ratio ----------------------------------------------- ##Calculate Recycling Ratio if(length(LnLxTnTx[LnLxTnTx[,"Repeated"]==TRUE,"Repeated"])>0){ ##identify repeated doses temp.Repeated<-LnLxTnTx[LnLxTnTx[,"Repeated"]==TRUE,c("Name","Dose","LxTx")] ##find concering previous dose for the repeated dose temp.Previous<-t(sapply(1:length(temp.Repeated[,1]),function(x){ LnLxTnTx[LnLxTnTx[,"Dose"]==temp.Repeated[x,"Dose"] & LnLxTnTx[,"Repeated"]==FALSE,c("Name","Dose","LxTx")] })) ##convert to data.frame temp.Previous<-as.data.frame(temp.Previous) ##set column names temp.ColNames<-sapply(1:length(temp.Repeated[,1]),function(x){ paste(temp.Repeated[x,"Name"],"/", temp.Previous[temp.Previous[,"Dose"]==temp.Repeated[x,"Dose"],"Name"], sep="") }) ##Calculate Recycling Ratio RecyclingRatio<-as.numeric(temp.Repeated[,"LxTx"])/as.numeric(temp.Previous[,"LxTx"]) ##Just transform the matrix and add column names RecyclingRatio<-t(RecyclingRatio) colnames(RecyclingRatio)<-temp.ColNames }else{RecyclingRatio<-NA} # Calculate Recuperation Rate --------------------------------------------- ##Recuperation Rate if("R0" %in% LnLxTnTx[,"Name"]==TRUE){ Recuperation<-round(LnLxTnTx[LnLxTnTx[,"Name"]=="R0","LxTx"]/ LnLxTnTx[LnLxTnTx[,"Name"]=="Natural","LxTx"],digits=4) }else{Recuperation<-NA} # Combine and Evaluate Rejection Criteria --------------------------------- RejectionCriteria <- data.frame( citeria = c(colnames(RecyclingRatio), "recuperation rate"), value = c(RecyclingRatio,Recuperation), threshold = c( rep(paste("+/-", rejection.criteria$recycling.ratio/100) ,length(RecyclingRatio)), paste("", rejection.criteria$recuperation.rate/100) ), status = c( if(is.na(RecyclingRatio)==FALSE){ sapply(1:length(RecyclingRatio), function(x){ if(abs(1-RecyclingRatio[x])>(rejection.criteria$recycling.ratio/100)){ "FAILED" }else{"OK"}})}else{NA}, if(is.na(Recuperation)==FALSE & Recuperation>rejection.criteria$recuperation.rate){"FAILED"}else{"OK"} )) ##============================================================================## ##PLOTTING ##============================================================================## # Plotting - Config ------------------------------------------------------- ##grep plot parameter par.default <- par(no.readonly = TRUE) on.exit(par(par.default)) ##grep colours col <- get("col", pos = .LuminescenceEnv) ##set layout matrix layout(matrix(c( 1, 1, 2, 2, 1, 1, 2, 2, 3, 3, 4, 4, 3, 3, 4, 4, 5, 5, 5, 5 ), 5, 4, byrow = TRUE)) par(oma = c(0, 0, 0, 0), mar = c(4, 4, 3, 3)) ## 1 -> TL Lx ## 2 -> TL Tx ## 3 -> TL Lx Plateau ## 4 -> TL Tx Plateau ## 5 -> Legend ##recalculate signal.integral from channels to temperature signal.integral.temperature <- c(object@records[[TL.signal.ID[1]]]@data[signal.integral.min,1] : object@records[[TL.signal.ID[1]]]@data[signal.integral.max,1]) ##warning if number of curves exceed colour values if(length(col) 0) { mtext("[FAILED]", col = "red") } } # Plotting GC ---------------------------------------- #reset par par(par.default) ##create data.frame temp.sample <- data.frame( Dose = LnLxTnTx$Dose, LxTx = LnLxTnTx$LxTx, LxTx.Error = LnLxTnTx$LxTx.Error, TnTx = LnLxTnTx$TnTx ) ##set NA values to 0 temp.sample[is.na(temp.sample$LxTx.Error),"LxTx.Error"] <- 0 ##run curve fitting temp.GC <- try(plot_GrowthCurve( sample = temp.sample, ... )) ##check for error if(inherits(temp.GC, "try-error")){ return(NULL) }else{ temp.GC <- get_RLum(temp.GC)[, c("De", "De.Error")] } ##add rejection status if(length(grep("FAILED",RejectionCriteria$status))>0){ temp.GC <- data.frame(temp.GC, RC.Status="FAILED") }else{ temp.GC <- data.frame(temp.GC, RC.Status="OK") } # Return Values ----------------------------------------------------------- newRLumResults.analyse_SAR.TL <- set_RLum( class = "RLum.Results", data = list( data = temp.GC, LnLxTnTx.table = LnLxTnTx, rejection.criteria = RejectionCriteria ), info = list(info = sys.call()) ) return(newRLumResults.analyse_SAR.TL) } Luminescence/R/combine_De_Dr.R0000644000176200001440000006363114367174076015711 0ustar liggesusers#'@title Estimate Individual Age using Bayesian Inference #' #'@description A Bayesian robust estimation of central age from equivalent dose #' measurements under the assumption that the dose rate is modelled by #' finite Gaussian mixture model. #' #'@param theta [numeric] (**required**): the weight vector of the Gaussian mixture #' #'@param mu [numeric] (**required**): is the mean vector of the Gaussian mixture #' #'@param sigma [numeric] (**required**): is the standard deviation vector of the Gaussian mixture #' #'@param De [numeric] (**required**): the equivalent dose sample #' #'@param s [numeric] (**required**): the vector of measurement errors on De. #' #'@param sig0 [numeric] (**required**): the prior shrinkage parameter #' #'@param Age_range [numeric] (*with default*): the age range to investigate #' #'@param method_control [list] (*with default*): parameters passed down #' to the jags process #' #'@param verbose [logical] (*with default*): enable/disable terminal feedback #' #'@return An [RLum.Results-class] object to be used in [combine_De_Dr] #' #'@section Function version: 0.1.0 #' #'@note The function is intended to be called by [combine_De_Dr], however, for #' reasons of transparency #' #'@author Anne Philippe, Université de Nantes (France), #' Jean-Michel Galharret, Université de Nantes (France), #' Norbert Mercier, IRAMAT-CRP2A, Université Bordeaux Montaigne (France), #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #'@examples #' n <- 1000 #' sdt <- 0.3 #' Dr <- stats::rlnorm (n, 0, sdt) #' int_OD <- 0.1 #' tildeDr = Dr * (1 + rnorm(length(Dr), 0, int_OD)) #' De <- c(50 * sample(Dr, 50, replace = TRUE), 10, 12, 200, 250) #' k <- length(De) #' s <- stats::rnorm(k, 10, 2) #' a <- De / mean(tildeDr) #' sig_a2 <- a ^ 2 * (s / De) ^ 2 #' sig0 <- sqrt(1 / mean(1 / sig_a2)) #' fit <- mclust::Mclust(tildeDr, model = "V") #' theta <- fit$parameters$pro #' mu <- fit$parameters$mean #' sigma <- sqrt(fit$parameters$variance$sigmasq) #' Age_range <- c(0, Dr * (1 + rnorm(length(Dr), 0, int_OD[1]))) #' res <- .calc_IndividualAgeModel(theta, mu, sigma, De, s, sig0, Age_range = Age_range) #'@md #'@noRd .calc_IndividualAgeModel <- function( theta, mu, sigma, De, s, sig0, Age_range, method_control = list(), verbose = TRUE ){ # Set parameters and models ----------------------------------------------- nobs <- length(De) event1 <- "model{ for( i in 1 : N ) { D_e[i] ~ dnorm(a[i] * mu, tau[i]) tau[i] <- 1 / (a[i] * sigma) ^ 2 De[i] ~ dnorm(D_e[i], prec2[i]) a[i] ~ dnorm(A, prec_a[i]) u[i] ~ dunif(0,1) prec_a[i] <- 1 / s02 * u[i] / (1 - u[i]) prec2[i] <- 1 / (s2[i]) sig_a[i] <- 1 / sqrt(prec_a[i]) } A ~ dunif(Amin, Amax) }" event2 <- "model{ for( i in 1 : N ) { D_e[i] ~ dnorm(a[i] * mu[z[i]], tau[i]) tau[i] <- 1 / (a[i] * sigma[z[i]]) ^ 2 z[i] ~ dcat(theta) De[i] ~ dnorm(D_e[i], prec2[i]) a[i] ~ dnorm(A, prec_a[i]) u[i] ~ dunif(0, 1) prec_a[i] <- 1 / s02 * u[i] / (1 - u[i]) prec2[i] <- 1 / (s2[i]) sig_a[i] <- 1 / sqrt(prec_a[i]) } A ~ dunif(Amin, Amax) }" data1 <- list( 'theta' = theta, 'mu' = mu, 'sigma' = sigma, 'N' = nobs , 'De' = De, 's2' = s ^ 2, 's02' = sig0[1] ^ 2, 'Amin' = Age_range[1], 'Amax' = Age_range[2] ) # Run Bayesian model ------------------------------------------------------ method_control <- modifyList( x = list( variable.names = c('A', 'a', 'sig_a'), n.chains = 4, n.adapt = 1000, n.iter = 5000, thin = 1, progress.bar = if(verbose) "text" else "none", quiet = if(verbose) FALSE else TRUE, diag = if(verbose) TRUE else FALSE, return_mcmc = FALSE ), val = method_control) on.exit(close(model)) ## select model if(length(theta) == 1) { data1$theta <- NULL model <- textConnection(event1) } else { model <- textConnection(event2) } ## run model if(verbose) cat("(1) Running Bayesian modelling 'Individual Age Model' ... ") jags <- rjags::jags.model( file = model, data = data1, n.chains = method_control$n.chains, n.adapt = method_control$n.adapt, quiet = method_control$quiet ) stats::update( jags, n.iter = method_control$n.iter, progress.bar = method_control$progress.bar, quiet = method_control$quiet ) samp <- rjags::coda.samples( model = jags, variable.names = method_control$variable.names, n.iter = method_control$n.iter, thin = method_control$thin, progress.bar = method_control$progress.bar ) if(verbose & method_control$quiet) cat("DONE") if(method_control$diag) { cat("\n[.calc_IndividualAgeModel()]\n") print(coda::gelman.diag(samp)) } # Return ------------------------------------------------------------------ return(set_RLum( "RLum.Results", data = list( A = unlist(samp[, "A"]), a = do.call(rbind, samp[, 2:(nobs + 1)]), sig_a = do.call(rbind, samp[, (2 + nobs):(2 * nobs + 1)]), model = paste(jags$model(), ""), mcmc_IAM = if(method_control$return_mcmc) samp else NULL), info = list(call = sys.call()) )) } #'@title Central Bayesian Central Age Model #' #'@description A Bayesian estimation of central age from equivalent dose measurements #'under the assumption that the dose rate is modelled by finite Gaussian mixture model. #'MCMC outputs provide to JAGS program. #' #'@param theta [numeric] (**required**): the weight vector of the Gaussian mixture #' #'@param mu [numeric] (**required**): is the mean vector of the Gaussian mixture #' #'@param sigma [numeric] (**required**): is the standard deviation vector of the Gaussian mixture #' #'@param De [numeric] (**required**): the equivalent dose sample #' #'@param s [numeric] (**required**): the vector of measurement errors on De. #' #'@param Age_range [numeric] (*with default*): the age range to investigate #' #'@param method_control [list] (*with default*): parameters passed down to the jags process #' #'@param verbose [logical] (*with default*): enable/disable terminal feedback #' #'@return An [RLum.Results-class] object #' #'@section Function version: 0.1.0 #' #'@author Anne Philippe, Université de Nantes (France), #'Jean-Michel Galharret, Université de Nantes (France), #'Norbert Mercier, IRAMAT-CRP2A, Université Bordeaux Montaigne (France), #'Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #'@md #'@noRd .calc_BayesianCentralAgeModel <- function( theta, mu, sigma, De, s, Age_range, method_control = list(), verbose = TRUE ){ # Set models -------------------------------------------------------------- central_age_model1 <- " model{ for( i in 1:J) { D_e[i] ~ dnorm(A*mu,1/(A*sigma)^2) De[i] ~ dnorm(D_e[i], prec2[i]) prec2[i] <- 1/(s2[i]) } A ~ dunif(Amin,Amax) }" central_age_model2 <- " model{ for( i in 1:J) { D_e[i] ~ dnorm(A*mu[z[i]],tau[i]) tau[i]<-1/(A*sigma[z[i]])^2 z[i] ~ dcat(theta) De[i] ~ dnorm(D_e[i], prec2[i]) prec2[i] <- 1/(s2[i]) } A ~ dunif(Amin,Amax) }" # Run Bayesian modelling -------------------------------------------------- method_control <- modifyList( x = list( variable.names = c('A', 'D_e'), n.chains = 4, n.adapt = 1000, n.iter = 5000, thin = 1, progress.bar = if(verbose) "text" else "none", quiet = if(verbose) FALSE else TRUE, diag = if(verbose) TRUE else FALSE, return_mcmc = FALSE ), val = method_control) on.exit(close(model)) data <- list( 'theta' = theta, 'mu' = mu, 'sigma' = sigma, 'De' = De, 'J' = length(De), 's2' = s ^ 2, 'Amin' = Age_range[1], 'Amax' = Age_range[2] ) ## select model if(length(theta) == 1) { data$theta <- NULL model <- textConnection(central_age_model1) } else { model <- textConnection(central_age_model2) } ## run modelling if(verbose) cat("\n(2) Running Bayesian modelling 'Bayesian Central Age Model' ... ") jags2 <- rjags::jags.model( file = model, data = data, n.chains = method_control$n.chains, n.adapt = method_control$n.adapt, quiet = method_control$quiet ) stats::update( object = jags2, n.iter = method_control$n.iter, progress.bar = method_control$progress.bar, quiet = method_control$quiet ) samp2 <- rjags::coda.samples( model = jags2, variable.names = method_control$variable.names, n.iter = method_control$n.iter, thin = method_control$thin, progress.bar = method_control$progress.bar ) if(verbose & method_control$quiet) cat("DONE\n") if(method_control$diag) { cat("\n[.calc_BayesianCentralAgeModel()]\n") print(coda::gelman.diag(samp2)) } # Return ------------------------------------------------------------------ return(set_RLum( "RLum.Results", data = list( A = unlist(samp2[, "A"]), D_e = do.call(rbind, samp2[, -1]), model = paste(jags2$model(), ""), mcmc_BCAM = if(method_control$return_mcmc) samp2 else NULL), info = list(call = sys.call()) )) } #'@title Combine Dose Rate and Equivalent Dose Distribution #' #'@description A Bayesian statistical analysis of OSL age requiring dose rate sample. #'Estimation contains a preliminary step for detecting outliers in the equivalent #'dose sample. #' #'@details #' #'**Outlier detection** #' #'Two different outlier detection methods are implemented (full details are given #'in the cited literature). #' #'1. The *default* and recommend method, uses quantiles to compare prior and #'posterior distributions of the individual variances of the equivalent doses. #'If the corresponding quantile in the corresponding posterior distribution is larger #'than the quantile in the prior distribution, the value is marked #'as outlier (cf. Galharret et al., preprint) #' #'2. The alternative method employs the method suggested by Rousseeuw and Croux (1993) #'using the absolute median distance. #' #'**Parameters available for `method_control`** #' #'The parameters listed below are used to granular control Bayesian modelling using #'[rjags::rjags]. Internally the functions `.calc_IndividualAgeModel()` and #'`.calc_BayesianCentraAgelModel()`. The parameter settings affect both models. #'Note: `method_control` expects a **named** list of parameters #' #'\tabular{llll}{ #' **PARAMETER** \tab **TYPE** \tab **DEFAULT** \tab **REMARKS** \cr #' `variable.names_IAM` \tab [character] \tab `c('A', 'a', 'sig_a')` \tab variables names to be monitored in the modelling process using the internal function `.calc_IndividualAgeModel()`\cr #' `variable.names_BCAM` \tab [character] \tab `c('A', 'D_e')` \tab variables names to be monitored in the modelling process using the internal function `.calc_BayesianCentraAgelModel()`\cr #' `n.chains` \tab [integer] \tab `4` \tab number of MCMC chains\cr #' `n.adapt` \tab [integer] \tab `1000` \tab number of iterations for the adaptation\cr #' `n.iter` \tab [integer] \tab `5000` \tab number of iterations to monitor cf. [rjags::coda.samples]\cr #' `thin` \tab [numeric] \tab `1` \tab thinning interval for the monitoring cf. [rjags::coda.samples]\cr #' `diag` \tab [logical] \tab `FALSE` \tab additional terminal convergence diagnostic. #' `FALSE` if `verbose = FALSE`\cr #' `progress.bar` \tab [logical] \tab `FALSE` \tab enable/disable progress bar. `FALSE` if `verbose = FALSE`\cr #' `quiet` \tab [logical] \tab `TRUE` \tab silence terminal output. Set to `TRUE` if `verbose = FALSE`\cr #' `return_mcmc`\tab [logical] \tab `FALSE` \tab return additional MCMC diagnostic information\cr #'} #' #'@param De [numeric] (**required**): a equivalent dose sample #' #'@param s [numeric] (**required**): a vector of measurement errors on the equivalent dose #' #'@param Dr [numeric] (**required**): a dose rate sample #' #'@param int_OD [numeric] (**required**): the intrinsic overdispersion, typically the standard deviation #'characterizing a dose-recovery test distribution #' #'@param Age_range [numeric] (*with default*): the age range to be investigated by the algorithm, the larger #'the value the more iterations are needed and the longer it takes. Should not be set too narrow, cut #'the algorithm some slack. #' #'@param outlier_threshold [numeric] (*with default*): the required significance level used #'for the outlier detection. If set to `1`, no outliers are removed. If #'`outlier_method = "RousseeuwCroux1993"`, the median distance is used as outlier threshold. #'Please see details for further information. #' #'@param outlier_method [character] (*with default*): select the outlier detection #'method, either `"default"` or `"RousseeuwCroux1993"`. See details for further information. #' #'@param outlier_analysis_plot [logical] (*with default*): enables/disables the outlier analysis plot. Note: the outlier analysis will happen with or without plot output #' #'@param method_control [list] (*with default*): named [list] of further parameters passed down #' to the [rjags::rjags] modelling #' #'@param par_local [logical] (*with default*): if set to `TRUE` the function uses its #'own [graphics::par] settings (which will end in two plots next to each other) #' #'@param verbose [logical] (*with default*): enable/disable terminal feedback #' #'@param plot [logical] (*with default*): enable/disable plot output #' #'@param ... a few further arguments to fine-tune the plot output such as #'`cdf_ADr_quantiles` (`TRUE`/`FALSE`), `legend.pos`, `legend` (`TRUE`/`FALSE`) #' #'@return The function returns a plot if `plot = TRUE` and an [RLum.Results-class] #'object with the following slots: #' #' `@data`\cr #' `.. $Ages`: a [numeric] vector with the modelled ages to be further analysed or visualised\cr #' `.. $Ages_stats`: a [data.frame] with sum HPD, CI 68% and CI 95% for the ages \cr #' `.. $outliers_index`: the index with the detected outliers\cr #' `.. $cdf_ADr_mean` : empirical cumulative density distribution A * Dr (mean)\cr #' `.. $cdf_ADr_quantiles` : empirical cumulative density distribution A * Dr (quantiles .025,.975)\cr #' `.. $cdf_De_no_outlier` : empirical cumulative density distribution of the De with no outliers\cr #' `.. $cdf_De_initial` : empirical cumulative density distribution of the initial De\cr #' `.. $mcmc_IAM` : the MCMC list of the Individual Age Model, only of `method_control = list(return_mcmc = TRUE)` otherwise `NULL`\cr #' `.. $mcmc_BCAM` : the MCMC list of the Bayesian Central Age Model, only of `method_control = list(return_mcmc = TRUE)` otherwise `NULL`\cr #' #' `@info`\cr #' `.. $call`: the original function call\cr #' `.. $model_IAM`: the BUGS model used to derive the individual age\cr #' `.. $model_BCAM`: the BUGS model used to calculate the Bayesian Central Age\cr #' #'@references #' #'Mercier, N., Galharret, J.-M., Tribolo, C., Kreutzer, S., Philippe, A., preprint. #'Luminescence age calculation through Bayesian convolution of equivalent dose and #'dose-rate distributions: the De_Dr model. Geochronology, 1-22. #' #'Galharret, J-M., Philippe, A., Mercier, N., preprint. Detection of outliers with #'a Bayesian hierarchical model: application to the single-grain luminescence dating method. #'Electronic Journal of Applied Statistics #' #'**Further reading** #' #'Rousseeuw, P.J., Croux, C., 1993. Alternatives to the median absolute deviation. #'Journal of the American Statistical Association 88, 1273–1283. \doi{10.2307/2291267} #' #'Rousseeuw, P.J., Debruyne, M., Engelen, S., Hubert, M., 2006. Robustness and outlier detection in chemometrics. #'Critical Reviews in Analytical Chemistry 36, 221–242. \doi{10.1080/10408340600969403} #' #'@author Anne Philippe, Université de Nantes (France), #'Jean-Michel Galharret, Université de Nantes (France), #'Norbert Mercier, IRAMAT-CRP2A, Université Bordeaux Montaigne (France), #'Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #'@seealso [plot_OSLAgeSummary], [rjags::rjags], [mclust-package] #' #'@section Function version: 0.1.0 #' #'@keywords dplot distribution datagen #' #'@examples #'## set parameters #' Dr <- stats::rlnorm (1000, 0, 0.3) #' De <- 50*sample(Dr, 50, replace = TRUE) #' s <- stats::rnorm(50, 10, 2) #' #'## run modelling #'## note: modify parameters for more realistic results #'\dontrun{ #'results <- combine_De_Dr( #' Dr = Dr, #' int_OD = 0.1, #' De, #' s, #' Age_range = c(0,100), #' method_control = list( #' n.iter = 100, #' n.chains = 1)) #' #'## show models used #'writeLines(results@info$model_IAM) #'writeLines(results@info$model_BCAM) #'} #' #'@md #'@export combine_De_Dr <- function( De, s, Dr, int_OD, Age_range = c(1,300), outlier_threshold = .05, outlier_method = "default", outlier_analysis_plot = FALSE, method_control = list(), par_local = TRUE, verbose = TRUE, plot = TRUE, ... ) { # Check input data -------------------------------------------------------- if (!all(t_pkg <- c( requireNamespace("rjags", quietly = TRUE), requireNamespace("coda", quietly = TRUE), requireNamespace("mclust", quietly = TRUE)))) { t_names <- c('rjags', 'coda', 'mclust') stop(paste0("[combine_De_Dr()] To use this function you have to first install the package(s) ", paste(t_names[!t_pkg], collapse = ",")), call. = FALSE) } # Integrity checks -------------------------------------------------------- if(length(De) != length(s)) stop("[combine_De_Dr()] 'De' and 's' are not of similar length!", call. = FALSE) # Prepare data ------------------------------------------------------------ ## we have to fetch the function otherwise ## we would need it in import instead of suggests mclustBIC <- mclust::mclustBIC ## Estimation of the rate dose Dr1 by a Gaussian Mixture Model tildeDr <- Dr * (1 + rnorm(length(Dr), 0, int_OD[1])) fit <- mclust::Mclust(data = tildeDr, modelNames = "V", verbose = FALSE) theta <- fit$parameters$pro mu <- fit$parameters$mean sigma <- sqrt(fit$parameters$variance$sigmasq) a <- De / mean(tildeDr) sig_a2 <- a ^ 2 * (s / De) ^ 2 sig0 <- sqrt(1 / mean(1 / sig_a2)) # Set parameters ---------------------------------------------------------- method_control <- modifyList( x = list( variable.names_IAM = c('A', 'a', 'sig_a'), variable.names_BCAM = c('A', 'D_e'), n.chains = 4, n.adapt = 1000, n.iter = 5000, thin = 1, progress.bar = "none", quiet = TRUE, diag = FALSE, return_mcmc = FALSE ), val = method_control) # Bayesian Modelling IAM -------------------------------------------------- if(verbose) cat("\n[combine_De_Dr()]\n") fit_IAM <- .calc_IndividualAgeModel( theta = theta, mu = mu, sigma = sigma, De = De, s = s, sig0 = sig0, Age_range = Age_range[1:2], verbose = verbose, method_control = list( variable.names = method_control$variable.names_IAM, n.chains = method_control$n.chains, n.adapt = method_control$n.adapt, n.iter = method_control$n.iter, thin = method_control$thin, progress.bar = method_control$progress.bar, quiet = method_control$quiet, diag = method_control$diag, return_mcmc = method_control$return_mcmc) ) # Outlier detection ------------------------------------------------------- ## set threshold for outliers alpha <- outlier_threshold[1] ## apply method ... default is method develop by Jean-Michel and Anne if(outlier_method == "RousseeuwCroux1993") { ## calculate the median of the sig_a xj <- log(matrixStats::colMedians(fit_IAM$sig_a)) MAD <- 1.483 * median(abs(xj - median(xj))) test <- (xj - median(xj)) / MAD out <- sort(which(test > alpha)) } else { sig_max <- sig0 * ((1 - alpha) / alpha) ^ .5 test <- vapply(1:length(De), function(j){ mean(fit_IAM$sig_a[, j] >= sig_max) }, numeric(1)) out <- sort(which(test > alpha)) } ##some terminal output if(verbose){ if (length(out) > 0) { cat( paste0( "\n >> Outliers detected: ", length(out), "/", length(De), " (", round(length(out) / length(De) * 100, 1), "%)" ) ) } } ## apply the removal if (length(out) == 0) { De1 <- De s1 <- s } else { De1 <- De[-out] s1 <- s[-out] } # Bayesian modelling BCAM ------------------------------------------------- fit_BCAM <- .calc_BayesianCentralAgeModel( theta, mu, sigma, De = De1, s = s1, Age_range = Age_range, verbose = verbose, method_control = list( variable.names = method_control$variable.names_BCAM, n.chains = method_control$n.chains, n.adapt = method_control$n.adapt, n.iter = method_control$n.iter, thin = method_control$thin, progress.bar = method_control$progress.bar, quiet = method_control$quiet, diag = method_control$diag, return_mcmc = method_control$return_mcmc) ) # Calculate EDFC ------------------------------------------------- ## calculate various parameters D_e <- fit_BCAM$D_e A2 <- fit_BCAM$A ## calculate bandwidths h <- density(De)$bw h1 <- density(De1)$bw t <- seq(min(D_e), max(D_e), length.out = min(1000, round(max(D_e) - min(D_e), 0))) ind <- min(5000, length(A2)) subsamp <- sample(1:length(A2), ind, replace = FALSE) cdf_ADr <- matrix(0, nrow = ind, ncol = length(t)) ## De distribution re-sampled without outliers -> De2 ## De distribution re-sampled initial -> De3 De2 <- rnorm(length(subsamp), sample(De1, size = length(subsamp), replace = TRUE), h1) De3 <- rnorm(length(subsamp), sample(De, size = length(subsamp), replace = TRUE), h) ## calculate ecdf cdf_De_no_outlier<- stats::ecdf(De2)(t) cdf_De_initial <- stats::ecdf(De3)(t) for (i in 1:ind) cdf_ADr[i, ] <- stats::ecdf(A2[subsamp[i]] * tildeDr)(t) ## calculate mean value and quantiles for the ecdf A * Dr cdf_ADr_mean <- matrixStats::colMeans2(cdf_ADr) cdf_ADr_quantiles <- matrixStats::colQuantiles(cdf_ADr, probs = c(.025,.975)) ## further values to ease the interpretation d <- density(fit_BCAM$A) HPD <- d$x[which.max(d$y)[1]] CI_68 <- .calc_HPDI(fit_BCAM$A, prob = 0.68) CI_95 <- .calc_HPDI(fit_BCAM$A, prob = 0.95) # Additional terminal output ---------------------------------------------- if(verbose){ cat("(3) Age results (presumably in ka) \n") cat(" -----------------------------------\n") cat(" Age (HPD) :\t", format(round(HPD,2), nsmall = 2), "\n") cat(" Age (CI 68%):\t", paste(format(round(range(CI_68),2), nsmall =2), collapse = " : "), "\n") cat(" Age (CI 95%):\t", paste(format(round(range(CI_95),2), nsmall =2), collapse = " : "), "\n") cat(" -----------------------------------\n") } # Plotting ---------------------------------------------------------------- if(plot){ ##check incoming plot settings plot_settings <- modifyList(x = list( cdf_ADr_quantiles = FALSE, legend = TRUE, legend.pos = "bottomright" ), list(...)) ##make sure we reset plots if(par_local) { old.par <- par(mfrow = c(1, 2)) on.exit(par(old.par)) } if(outlier_analysis_plot){ N <- length(De) ##plot with outliers boxplot(fit_IAM$sig_a, outline = FALSE, col = (abs(as.numeric( 1:length(De) %in% out ) - 1) + 2), main = "Outlier detection", xaxt = "n", xlab = expression(paste("Index of ", sigma[a]))) ## add axis axis(side = 1, at = 1:length(De), labels = 1:length(De), ) mtext( text = paste0(length(out), "/", N, " (", round(length(out) / N * 100, 1), "%)"), side = 3, cex = 0.8 ) abline(h = sig0, col = "violet") ##plot sd of outliers if(length(out) > 0){ boxplot(fit_IAM$sig_a[, out], outline = FALSE, names = out, ylab = "Individual sd [a.u.]", main = "Outliers: posterior distr.") abline(h = sig0, col = "violet") } else { shape::emptyplot() text(0.5, 0.5, "No outlier detected!") } } ##plot age summary plot_OSLAgeSummary( object = fit_BCAM, level = 0.68, rug = FALSE, polygon_col = rgb(100, 149, 237, 75, maxColorValue = 255), verbose = FALSE ) ## open plot area plot(NA, xlim = range(t), ylim = c(0, 1), ylab = "ecdf (mean)", xlab = "Dose [Gy]", main= "ECDF") ## add quantile range (only for A * Dr) if(plot_settings$cdf_ADr_quantiles){ polygon( x = c(t, rev(t)), y = c(cdf_ADr_quantiles[,1], rev(cdf_ADr_quantiles[,2])), col = rgb(1,0,0,0.2), lty = 0) } ##add mean lines for the ecdfs lines(t, cdf_ADr_mean, col = 2, lty = 1, lwd = 2) lines(t, cdf_De_no_outlier, type = "l", col = 3, lty = 2, lwd = 2) lines(t, cdf_De_initial, type = "l", col = 4, lty = 3, lwd = 2) if(plot_settings$legend){ legend( plot_settings$legend.pos, legend = c( expression(A %*% Dr), expression(paste(D[e], " no outliers")), expression(paste(D[e], " initial"))), lty = c(1,2,3), bty = "n", col = c(2,3,4), cex = 0.8) } } # Return results ---------------------------------------------------------- return(set_RLum( "RLum.Results", data = list( Ages = fit_BCAM$A, Ages_stats = data.frame( HPD = HPD, CI_68_lower = CI_68[1], CI_68_upper = CI_68[2], CI_95_lower = CI_95[1], CI_95_upper = CI_95[2]), outliers_index = out, cdf_ADr_mean = cdf_ADr_mean, cdf_ADr_quantiles = cdf_ADr_quantiles, cdf_De_no_outlier = cdf_De_no_outlier, cdf_De_initial = cdf_De_initial, mcmc_IAM = fit_IAM$mcmc_IAM, mcmc_BCAM = fit_BCAM$mcmc_BCAM ), info = list( call = sys.call(), model_IAM = fit_IAM$model, model_BCAM = fit_BCAM$model) )) } Luminescence/R/calc_TLLxTxRatio.R0000644000176200001440000001525714521207352016343 0ustar liggesusers#'@title Calculate the Lx/Tx ratio for a given set of TL curves -beta version- #' #'@description Calculate Lx/Tx ratio for a given set of TL curves. #' #'@details #' **Uncertainty estimation** #' #' The standard errors are calculated using the following generalised equation: #' #' \deqn{SE_{signal} = abs(Signal_{net} * BG_f /BG_{signal})} #' #' where \eqn{BG_f} is a term estimated by calculating the standard deviation of the sum of #' the \eqn{L_x} background counts and the sum of the \eqn{T_x} background counts. However, #' if both signals are similar the error becomes zero. #' #' @param Lx.data.signal [RLum.Data.Curve-class] or [data.frame] (**required**): #' TL data (x = temperature, y = counts) (TL signal) #' #' @param Lx.data.background [RLum.Data.Curve-class] or [data.frame] (*optional*): #' TL data (x = temperature, y = counts). #' If no data are provided no background subtraction is performed. #' #' @param Tx.data.signal [RLum.Data.Curve-class] or [data.frame] (**required**): #' TL data (x = temperature, y = counts) (TL test signal) #' #' @param Tx.data.background [RLum.Data.Curve-class] or [data.frame] (*optional*): #' TL data (x = temperature, y = counts). #' If no data are provided no background subtraction is performed. #' #' @param signal.integral.min [integer] (**required**): #' channel number for the lower signal integral bound #' (e.g. `signal.integral.min = 100`) #' #' @param signal.integral.max [integer] (**required**): #' channel number for the upper signal integral bound #' (e.g. `signal.integral.max = 200`) #' #' @return #' Returns an S4 object of type [RLum.Results-class]. #' Slot `data` contains a [list] with the following structure: #' #' ``` #' $ LxTx.table #' .. $ LnLx #' .. $ LnLx.BG #' .. $ TnTx #' .. $ TnTx.BG #' .. $ Net_LnLx #' .. $ Net_LnLx.Error #' ``` #' #' @note #' **This function has still BETA status!** Please further note that a similar #' background for both curves results in a zero error and is therefore set to `NA`. #' #' @section Function version: 0.3.3 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) \cr #' Christoph Schmidt, University of Bayreuth (Germany) #' #' @seealso [RLum.Results-class], [analyse_SAR.TL] #' #' @keywords datagen #' #' @examples #' #' ##load package example data #' data(ExampleData.BINfileData, envir = environment()) #' #' ##convert Risoe.BINfileData into a curve object #' temp <- Risoe.BINfileData2RLum.Analysis(TL.SAR.Data, pos = 3) #' #' #' Lx.data.signal <- get_RLum(temp, record.id=1) #' Lx.data.background <- get_RLum(temp, record.id=2) #' Tx.data.signal <- get_RLum(temp, record.id=3) #' Tx.data.background <- get_RLum(temp, record.id=4) #' signal.integral.min <- 210 #' signal.integral.max <- 230 #' #' output <- calc_TLLxTxRatio( #' Lx.data.signal, #' Lx.data.background, #' Tx.data.signal, #' Tx.data.background, #' signal.integral.min, #' signal.integral.max) #' get_RLum(output) #' #' @md #' @export calc_TLLxTxRatio <- function( Lx.data.signal, Lx.data.background = NULL, Tx.data.signal, Tx.data.background = NULL, signal.integral.min, signal.integral.max ){ ##--------------------------------------------------------------------------## ##(1) - a few integrity check ##check DATA TYPE differences if(is(Lx.data.signal)[1] != is(Tx.data.signal)[1]) stop("[calc_TLLxTxRatio()] Data types of Lx and Tx data differ!", call. = FALSE) ##check for allowed data.types if(!inherits(Lx.data.signal, "data.frame") & !inherits(Lx.data.signal, "RLum.Data.Curve")){ stop("[calc_TLLxTxRatio()] Input data type for not allowed. Allowed are 'RLum.Data.Curve' and 'data.frame'", call. = FALSE) } ##--------------------------------------------------------------------------## ## Type conversion (assuming that all input variables are of the same type) if(inherits(Lx.data.signal, "RLum.Data.Curve")){ Lx.data.signal <- as(Lx.data.signal, "matrix") Tx.data.signal <- as(Tx.data.signal, "matrix") if(!missing(Lx.data.background) && !is.null(Lx.data.background)) Lx.data.background <- as(Lx.data.background, "matrix") if(!missing(Tx.data.background) && !is.null(Tx.data.background)) Tx.data.background <- as(Tx.data.background, "matrix") } ##(d) - check if Lx and Tx curves have the same channel length if(length(Lx.data.signal[,2])!=length(Tx.data.signal[,2])){ stop("[calc_TLLxTxRatio()] Channel numbers differ for Lx and Tx data!", call. = FALSE)} ##(e) - check if signal integral is valid if(signal.integral.min < 1 | signal.integral.max > length(Lx.data.signal[,2])){ stop("[calc_TLLxTxRatio()] signal.integral is not valid!", call. = FALSE)} # Background Consideration -------------------------------------------------- LnLx.BG <- TnTx.BG <- NA ##Lx.data if(!is.null(Lx.data.background)) LnLx.BG <- sum(Lx.data.background[signal.integral.min:signal.integral.max, 2]) ##Tx.data if(!is.null(Tx.data.background)) TnTx.BG <- sum(Tx.data.background[signal.integral.min:signal.integral.max, 2]) # Calculate Lx/Tx values -------------------------------------------------- ## preset variables net_LnLx <- net_LnLx.Error <- net_TnTx <- net_TnTx.Error <- NA ## calculate values LnLx <- sum(Lx.data.signal[signal.integral.min:signal.integral.max, 2]) TnTx <- sum(Tx.data.signal[signal.integral.min:signal.integral.max, 2]) ##calculate standard deviation of background if(!is.na(LnLx.BG) & !is.na(TnTx.BG)){ BG.Error <- sd(c(LnLx.BG, TnTx.BG)) if(BG.Error == 0) { warning( "[calc_TLLxTxRatio()] The background signals for Lx and Tx appear to be similar, no background error was calculated.", call. = FALSE ) BG.Error <- NA } } ## calculate net LnLx if(!is.na(LnLx.BG)){ net_LnLx <- LnLx - LnLx.BG net_LnLx.Error <- abs(net_LnLx * BG.Error/LnLx.BG) } ## calculate net TnTx if(!is.na(TnTx.BG)){ net_TnTx <- TnTx - TnTx.BG net_TnTx.Error <- abs(net_TnTx * BG.Error/TnTx.BG) } ## calculate LxTx if(is.na(net_TnTx)){ LxTx <- LnLx/TnTx LxTx.Error <- NA }else{ LxTx <- net_LnLx/net_TnTx LxTx.Error <- abs(LxTx*((net_LnLx.Error/net_LnLx) + (net_TnTx.Error/net_TnTx))) } ##COMBINE into a data.frame temp.results <- data.frame( LnLx, LnLx.BG, TnTx, TnTx.BG, net_LnLx, net_LnLx.Error, net_TnTx, net_TnTx.Error, LxTx, LxTx.Error ) # Return values ----------------------------------------------------------- return(set_RLum( class = "RLum.Results", data = list(LxTx.table = temp.results), info = list(call = sys.call()) )) } Luminescence/R/read_TIFF2R.R0000644000176200001440000000273214264017373015152 0ustar liggesusers#'@title Import TIFF Image Data into R #' #'@description Simple wrapper around [tiff::readTIFF] to import TIFF images #'and TIFF image stacks to be further processed within the package `'Luminescence'` #' #'@param file [character] (**required**): file name #' #'@return [RLum.Data.Image-class] object #' #'@author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #'@section Function version: 0.1.1 #' #'@seealso [tiff::readTIFF], [RLum.Data.Image-class] #' #'@keywords IO #' #'@examples #' #'\dontrun{ #'file <- file.choose() #'image <- read_TIFF2R(file) #' #'} #' #'@md #'@export read_TIFF2R <- function( file ){ # Integrity --------------------------------------------------------------- ## most of the users don't need this import, no need to bother them ## with required libraries if (!requireNamespace("tiff", quietly = TRUE)) stop("Importing TIFF files requires the package tiff.\n", "To install this package run 'install.packages('tiff')' in your R console.", call. = FALSE) if(!file.exists(file)) stop("[read_TIFF2R()] File does not exist or is not readable!", call. = FALSE) # Import ------------------------------------------------------------------ ## import temp <- tiff::readTIFF(file, all = TRUE, as.is = TRUE) if(is(temp, "list")) temp <- as(temp, "RLum.Data.Image") # Return ------------------------------------------------------------------ set_RLum(class = "RLum.Data.Image", data = temp@data) } Luminescence/R/calc_ThermalLifetime.R0000644000176200001440000002630414264017373017261 0ustar liggesusers#' Calculates the Thermal Lifetime using the Arrhenius equation #' #' The function calculates the thermal lifetime of charges for given E (in eV), s (in 1/s) and #' T (in deg. C.) parameters. The function can be used in two operational modes: #' #' **Mode 1 `(profiling = FALSE)`** #' #' An arbitrary set of input parameters (E, s, T) can be provided and the #' function calculates the thermal lifetimes using the Arrhenius equation for #' all possible combinations of these input parameters. An array with 3-dimensions #' is returned that can be used for further analyses or graphical output (see example 1) #' #' **Mode 2 `(profiling = TRUE)`** #' #' This mode tries to profile the variation of the thermal lifetime for a chosen #' temperature by accounting for the provided E and s parameters and their corresponding #' standard errors, e.g., `E = c(1.600, 0.001)` #' The calculation based on a Monte Carlo simulation, where values are sampled from a normal #' distribution (for E and s). #' #' **Used equation (Arrhenius equation)** #' #' \deqn{\tau = 1/s exp(E/kT)} #' where: #' \eqn{\tau} in s as the mean time an electron spends in the trap for a given \eqn{T}, #' \eqn{E} trap depth in eV, #' \eqn{s} the frequency factor in 1/s, #' \eqn{T} the temperature in K and \eqn{k} the Boltzmann constant in eV/K (cf. Furetta, 2010). #' #' #' @param E [numeric] (**required**): #' vector of trap depths in eV, #' if `profiling = TRUE` only the first two elements are considered #' #' @param s [numeric] (**required**): #' vector of frequency factor in 1/s, #' if `profiling = TRUE` only the first two elements are considered #' #' @param T [numeric] (*with default*): #' temperature in deg. C for which the lifetime(s) will be calculated. #' A vector can be provided. #' #' @param output_unit [character] (*with default*): #' output unit of the calculated lifetimes, accepted #' entries are: `"Ma"`, `"ka"`, `"a"`, `"d"`, `"h"`, `"min"`, `"s"` #' #' @param profiling [logical] (*with default*): #' this option allows to estimate uncertainties based on #' given E and s parameters and their corresponding standard error #' (cf. details and examples section) #' #' @param profiling_config [list] (*optional*): #' allows to set configuration parameters used for the profiling #' (and only have an effect here). Supported parameters are: #' #' - `n` (number of MC runs), #' - `E.distribution` (distribution used for the re-sampling for E) and #' - `s.distribution` (distribution used for the re-sampling for s). #' #' Currently only the normal distribution is supported #' (e.g., `profiling_config = list(E.distribution = "norm")` #' #' @param verbose [logical]: #' enables/disables verbose mode #' #' @param plot [logical]: #' enables/disables output plot, currently only in combination with `profiling = TRUE`. #' #' @param ... further arguments that can be passed in combination with the plot output. #' Standard plot parameters are supported ([plot.default]) #' #' @return #' A [RLum.Results-class] object is returned a along with a plot (for #' `profiling = TRUE`). The output object contain the following slots: #' #' **`@data`** #' #' \tabular{lll}{ #' **Object** \tab **Type** \tab **Description** \cr #' `lifetimes` \tab [array] or [numeric] \tab calculated lifetimes \cr #' `profiling_matrix` \tab [matrix] \tab profiling matrix used for the MC runs #' } #' #' **`@info`** #' #' \tabular{lll}{ #' **Object** \tab **Type** \tab **Description** \cr #' `call` \tab `call` \tab the original function call #' } #' #' @note #' The profiling is currently based on re-sampling from a normal distribution, this #' distribution assumption might be, however, not valid for given E and s parameters. #' #' @section Function version: 0.1.0 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [graphics::matplot], [stats::rnorm][stats::Normal], [get_RLum] #' #' @references #' #' Furetta, C., 2010. Handbook of Thermoluminescence, Second Edition. World Scientific. #' #' @keywords datagen #' #' @examples #' #' ##EXAMPLE 1 #' ##calculation for two trap-depths with similar frequency factor for different temperatures #' E <- c(1.66, 1.70) #' s <- 1e+13 #' T <- 10:20 #' temp <- calc_ThermalLifetime( #' E = E, #' s = s, #' T = T, #' output_unit = "Ma" #' ) #' contour(x = E, y = T, z = temp$lifetimes[1,,], #' ylab = "Temperature [\u00B0C]", #' xlab = "Trap depth [eV]", #' main = "Thermal Lifetime Contour Plot" #' ) #' mtext(side = 3, "(values quoted in Ma)") #' #' ##EXAMPLE 2 #' ##profiling of thermal life time for E and s and their standard error #' E <- c(1.600, 0.003) #' s <- c(1e+13,1e+011) #' T <- 20 #' calc_ThermalLifetime( #' E = E, #' s = s, #' T = T, #' profiling = TRUE, #' output_unit = "Ma" #') #' #' @md #' @export calc_ThermalLifetime <- function( E, s, T = 20, output_unit = "Ma", profiling = FALSE, profiling_config = NULL, verbose = TRUE, plot = TRUE, ... ){ # Integrity ----------------------------------------------------------------------------------- if(missing(E) | missing(s)){ stop("[calc_ThermalLifetime()] 'E' or 's' or both are missing, but required.", call. = FALSE) } # Set variables ------------------------------------------------------------------------------- ##Boltzmann constant k <- 8.6173324e-05 #eV/K ##recalculate temparature T.K <- T + 273.15 #K ##SETTINGS FOR PROFILING ##profiling settings profiling_settings <- list( n = 1000, E.distribution = "norm", s.distribution = "norm" ) ##replace if set if(!is.null(profiling_config)){ profiling_settings <- modifyList(profiling_settings, profiling_config) } ##check for odd input values if (profiling_settings$n < 1000){ profiling_settings$n <- 1000 warning("[calc_ThermalLifetime()] minimum MC runs are 1000, parameter 'n' in profiling_config reset to 1000.") } # Calculation --------------------------------------------------------------------------------- ##set function for the calculation f <- function(E, s, T.K) { 1 / s * exp(E / (k * T.K)) } ##PROFILING if(profiling) { ##set profiling matrix profiling_matrix <- matrix(NA, ncol = 4, nrow = profiling_settings$n) ##fill matrix ##E profiling_matrix[, 1] <- if( profiling_settings$E.distribution == "norm"){ rnorm(profiling_settings$n, mean = E[1], sd = E[2]) }else{ stop("[calc_ThermalLifetime()] unknown distribution setting for E profiling") } ##s profiling_matrix[, 2] <- if (profiling_settings$s.distribution == "norm") { rnorm(profiling_settings$n, mean = s[1], sd = s[2]) } else{ stop("[calc_ThermalLifetime()] unknown distribution setting for s profiling") } ##T profiling_matrix[, 3] <- rep(T.K[1], each = profiling_settings$n) ##calulate lifetimes profiling_matrix[, 4] <- f(profiling_matrix[, 1], profiling_matrix[, 2], profiling_matrix[, 3]) ##reduce E and s vector on the first entry T <- T[1] ##set lifetimes lifetimes <- profiling_matrix[, 4] } else{ ##set empty profiling matrix profiling_matrix <- matrix() ##calculate lifetimes lifetimes <- vapply( X = T.K, FUN = function(i) { vapply( X = E, FUN = function(j) { f(E = j, s = s, T.K = i) }, FUN.VALUE = vector(mode = "numeric", length = length(s)) ) }, FUN.VALUE = matrix(numeric(), ncol = length(E), nrow = length(s)) ) ##transform to an arry in either case to have the same output if (!is(lifetimes, "array")) { lifetimes <- array(lifetimes, dim = c(length(s), length(E), length(T))) } ##set dimnames to make reading more clear dimnames(lifetimes) <- list(s, E, paste0("T = ", T, " \u00B0C")) } ##re-calculate lifetimes accourding to the chosen output unit temp.lifetimes <- switch ( output_unit, "s" = lifetimes, "min" = lifetimes / 60, "h" = lifetimes / 60 / 60, "d" = lifetimes / 60 / 60 / 24, "a" = lifetimes / 60 / 60 / 24 / 365, "ka" = lifetimes / 60 / 60 / 24 / 365 / 1000, "Ma" = lifetimes / 60 / 60 / 24 / 365 / 1000 / 1000 ) ##check for invalid values if(is.null(temp.lifetimes)){ output_unit <- "s" warning("[calc_ThermalLifetime()] 'output_unit' unknown, reset to 's'") }else{ lifetimes <- temp.lifetimes rm(temp.lifetimes) } # Terminal output ----------------------------------------------------------------------------- if(verbose){ cat("\n[calc_ThermalLifetime()]\n\n") if(profiling){ cat("\tprofiling = TRUE") cat("\n\t--------------------------\n") } cat(paste("\tmean:\t", format(mean(lifetimes), scientific = TRUE), output_unit)) cat(paste("\n\tsd:\t", format(sd(lifetimes), scientific = TRUE), output_unit)) cat(paste("\n\tmin:\t", format(min(lifetimes), scientific = TRUE), output_unit)) if(!profiling){ cat(paste0(" (@",T[which(lifetimes == min(lifetimes), arr.ind = TRUE)[3]], " \u00B0C)")) } cat(paste("\n\tmax:\t", format(max(lifetimes), scientific = TRUE), output_unit)) if(!profiling){ cat(paste0(" (@",T[which(lifetimes == max(lifetimes), arr.ind = TRUE)[3]], " \u00B0C)")) } cat("\n\t--------------------------") cat(paste0("\n\t(", length(lifetimes), " lifetimes calculated in total)")) } # Plotting ------------------------------------------------------------------------------------ if(plot & profiling){ ##plot settings plot.settings <- list( main = "Thermal Lifetime Density Plot", xlab = paste0("Thermal lifetime [",output_unit,"]"), ylab = "Density", xlim = NULL, ylim = NULL, log = "", lwd = 1, lty = 1, col = rgb(0, 0, 0, 0.25) ) ##modify on request plot.settings <- modifyList(plot.settings, list(...)) ##split data and calculate density ##set seq id_seq <- seq( from = 1, to = length(lifetimes), length.out = 200) ##calculate lifetime of the density lifetimes_density <- lapply(1:(length(id_seq) - 1), function(x) { density(lifetimes[id_seq[x]:id_seq[x+1]]) }) ##get x values lifetimes_density.x <- matrix(unlist(lapply(1:length(lifetimes_density), function(i){ lifetimes_density[[i]]$x })), nrow = length(lifetimes_density[[1]]$x)) ##get y values lifetimes_density.y <- matrix(unlist(lapply(1:length(lifetimes_density), function(i){ lifetimes_density[[i]]$y })), nrow = length(lifetimes_density[[1]]$y)) ##plot density curves graphics::matplot( lifetimes_density.x, lifetimes_density.y, type = "l", lwd = plot.settings$lwd, lty = plot.settings$lty, col = plot.settings$col, main = plot.settings$main, xlab = plot.settings$xlab, ylab = plot.settings$ylab, xlim = plot.settings$xlim, ylim = plot.settings$ylim, log = plot.settings$log ) } # Return values ------------------------------------------------------------------------------- return(set_RLum( class = "RLum.Results", data = list(lifetimes = lifetimes, profiling_matrix = profiling_matrix), info = list(call = sys.call()) )) } Luminescence/R/calc_CentralDose.R0000644000176200001440000002562314236146743016417 0ustar liggesusers#' Apply the central age model (CAM) after Galbraith et al. (1999) to a given #' De distribution #' #' This function calculates the central dose and dispersion of the De #' distribution, their standard errors and the profile log likelihood function #' for sigma. #' #' This function uses the equations of Galbraith & Roberts (2012). The #' parameters `delta` and `sigma` are estimated by numerically solving #' eq. 15 and 16. Their standard errors are approximated using eq. 17. #' In addition, the profile log-likelihood function for `sigma` is #' calculated using eq. 18 and presented as a plot. Numerical values of the #' maximum likelihood approach are **only** presented in the plot and **not** #' in the console. A detailed explanation on maximum likelihood estimation can #' be found in the appendix of Galbraith & Laslett (1993, 468-470) and #' Galbraith & Roberts (2012, 15) #' #' @param data [RLum.Results-class] or [data.frame] (**required**): #' for [data.frame]: two columns with De `(data[,1])` and De error `(data[,2])` #' #' @param sigmab [numeric] (*with default*): #' additional spread in De values. #' This value represents the expected overdispersion in the data should the sample be #' well-bleached (Cunningham & Walling 2012, p. 100). #' **NOTE**: For the logged model (`log = TRUE`) this value must be #' a fraction, e.g. 0.2 (= 20 \%). If the un-logged model is used (`log = FALSE`), #' sigmab must be provided in the same absolute units of the De values (seconds or Gray). #' #' @param log [logical] (*with default*): #' fit the (un-)logged central age model to De data #' #' @param na.rm [logical] (*with default*): strip `NA` values before the computation proceeds #' #' @param plot [logical] (*with default*): #' plot output #' #' @param ... further arguments (`trace`, `verbose`). #' #' @return Returns a plot (*optional*) and terminal output. In addition an #' [RLum.Results-class] object is returned containing the following elements: #' #' \item{.$summary}{[data.frame] summary of all relevant model results.} #' \item{.$data}{[data.frame] original input data} #' \item{.$args}{[list] used arguments} #' \item{.$call}{[call] the function call} #' \item{.$profile}{[data.frame] the log likelihood profile for sigma} #' #' The output should be accessed using the function [get_RLum] #' #' @section Function version: 1.4.1 #' #' @author #' Christoph Burow, University of Cologne (Germany) \cr #' Based on a rewritten S script of Rex Galbraith, 2010 #' #' @seealso [plot], [calc_CommonDose], [calc_FiniteMixture], #' [calc_FuchsLang2001], [calc_MinDose] #' #' @references #' Galbraith, R.F. & Laslett, G.M., 1993. Statistical models for #' mixed fission track ages. Nuclear Tracks Radiation Measurements 4, 459-470. #' #' Galbraith, R.F., Roberts, R.G., Laslett, G.M., Yoshida, H. & Olley, #' J.M., 1999. Optical dating of single grains of quartz from Jinmium rock #' shelter, northern Australia. Part I: experimental design and statistical #' models. Archaeometry 41, 339-364. #' #' Galbraith, R.F. & Roberts, R.G., 2012. Statistical aspects of equivalent dose and error calculation and #' display in OSL dating: An overview and some recommendations. Quaternary #' Geochronology 11, 1-27. #' #' **Further reading** #' #' Arnold, L.J. & Roberts, R.G., 2009. Stochastic modelling of multi-grain equivalent dose #' (De) distributions: Implications for OSL dating of sediment mixtures. #' Quaternary Geochronology 4, 204-230. #' #' Bailey, R.M. & Arnold, L.J., 2006. Statistical modelling of single grain quartz De distributions and an #' assessment of procedures for estimating burial dose. Quaternary Science #' Reviews 25, 2475-2502. #' #' Cunningham, A.C. & Wallinga, J., 2012. Realizing the potential of fluvial archives using robust OSL chronologies. #' Quaternary Geochronology 12, 98-106. #' #' Rodnight, H., Duller, G.A.T., Wintle, A.G. & Tooth, S., 2006. Assessing the reproducibility and accuracy #' of optical dating of fluvial deposits. Quaternary Geochronology, 1 109-120. #' #' Rodnight, H., 2008. How many equivalent dose values are needed to #' obtain a reproducible distribution?. Ancient TL 26, 3-10. #' #' @examples #' #' ##load example data #' data(ExampleData.DeValues, envir = environment()) #' #' ##apply the central dose model #' calc_CentralDose(ExampleData.DeValues$CA1) #' #' @md #' @export calc_CentralDose <- function(data, sigmab, log = TRUE, na.rm = FALSE, plot = TRUE, ...) { ## ============================================================================## ## CONSISTENCY CHECK OF INPUT DATA ## ============================================================================## if (!missing(data)) { if (!is(data, "data.frame") & !is(data, "RLum.Results")) { stop("[calc_CentralDose()] 'data' has to be of type 'data.frame' or 'RLum.Results'!", call. = FALSE) } else { if (is(data, "RLum.Results")) { data <- get_RLum(data, "data") } } } ##remove NA values if(na.rm == TRUE && any(is.na(data))){ warning("[calc_CentralDose()] ", length(which(is.na(data))), " NA value(s) removed from dataset!", call. = FALSE) data <- na.exclude(data) } ##make sure we consider onlyt take the first two columns if(ncol(data) < 2 || nrow(data) < 2) stop("[calc_CentralDose()] 'data' should have at least two columns and two rows!", call. = FALSE) ##extract only the first two columns and set column names data <- data[,1:2] colnames(data) <- c("ED", "ED_Error") if (!missing(sigmab)) { if (sigmab < 0 | sigmab > 1 & log) stop("[calc_CentralDose()] sigmab needs to be given as a fraction between 0 and 1 (e.g., 0.2)!", call. = FALSE) } ## ============================================================================## ## ... ARGUMENTS ## ============================================================================## options <- list(verbose = TRUE, trace = FALSE) options <- modifyList(options, list(...)) ## ============================================================================## ## CALCULATIONS ## ============================================================================## # set default value of sigmab if (missing(sigmab)) sigmab <- 0 # calculate yu = log(ED) and su = se(logED) if (log) { yu <- log(data$ED) su <- sqrt((data$ED_Error / data$ED)^2 + sigmab^2) } else { yu <- data$ED su <- sqrt((data$ED_Error)^2 + sigmab^2) } # What does the code do? # >> email conversation with Rex Galbraith 2019-06-29 # >> "fixed point iteration" method to estimate sigma # >> starting with a fixed value # >> once sqrt(sum((wu^2) * (yu - delta)^2 / sum(wu))) gets equal to 1 # >> the iteration is complete # >> if everything has converged agains those fixed values # >> this is the maximum likelihood estimate for # >> sigma and delta # calculate starting values and weights sigma <- 0.15 # keep in mind that this is a relative value wu <- 1 / (sigma^2 + su^2) delta <- sum(wu * yu) / sum(wu) n <- length(yu) # compute mle's for (j in 1:200) { delta <- sum(wu * yu) / sum(wu) sigma <- sigma * sqrt(sum((wu^2) * (yu - delta)^2 / sum(wu))) wu <- 1 / (sigma^2 + su^2) # print iterations if (options$trace) print(round(c(delta, sigma), 4)) } # save parameters for terminal output out.delta <- ifelse(log, exp(delta), delta) out.sigma <- ifelse(log, sigma * 100, sigma / out.delta * 100) # log likelihood llik <- 0.5 * sum(log(wu)) - 0.5 * sum(wu * (yu - delta)^2) # save parameter for terminal output out.llik <- round(llik, 4) Lmax <- llik # standard errors sedelta <- 1 / sqrt(sum(wu)) sesigma <- 1 / sqrt(2 * sigma^2 * sum(wu^2)) # save parameters for terminal output if (log) { out.sedelta <- sedelta * 100 out.sesigma <- sesigma } else { out.sedelta <- sedelta / out.delta * 100 out.sesigma <- sqrt((sedelta / delta)^2 + (sesigma / out.delta * 100 / out.sigma)^2) * out.sigma / 100 } # profile log likelihood sigmax <- sigma llik <- 0 sig0 <- max(0, sigmax - 8 * sesigma) sig1 <- sigmax + 9.5 * sesigma sig <- try(seq(sig0, sig1, sig1 / 1000), silent = TRUE) if (!inherits(sig, "try-error")) { # TODO: rewrite this loop as a function and maximise with mle2 ll is the actual # log likelihood, llik is a vector of all ll for (s in sig) { wu <- 1 / (s^2 + su^2) mu <- sum(wu * yu)/sum(wu) ll <- 0.5 * sum(log(wu)) - 0.5 * sum(wu * (yu - mu)^2) llik <- c(llik, ll) } llik <- llik[-1] - Lmax } ## ============================================================================## ## TERMINAL OUTPUT ## ============================================================================## if (options$verbose) { cat("\n [calc_CentralDose]") cat(paste("\n\n----------- meta data ----------------")) cat(paste("\n n: ", n)) cat(paste("\n log: ", log)) cat(paste("\n----------- dose estimate ------------")) cat(paste("\n abs. central dose: ", format(out.delta, digits = 2, nsmall = 2))) cat(paste("\n abs. SE: ", format(out.delta * out.sedelta/100, digits = 2, nsmall = 2))) cat(paste("\n rel. SE [%]: ", format(out.sedelta, digits = 2, nsmall = 2))) cat(paste("\n----------- overdispersion -----------")) cat(paste("\n abs. OD: ", format(ifelse(log, sigma * out.delta, sigma), digits = 2, nsmall = 2))) cat(paste("\n abs. SE: ", format(ifelse(log, sesigma * out.delta, sesigma), digits = 2, nsmall = 2))) cat(paste("\n OD [%]: ", format(out.sigma, digits = 2, nsmall = 2))) cat(paste("\n SE [%]: ", if (!inherits(sig, "try-error")) { format(out.sesigma * 100, digits = 2, nsmall = 2) } else { "-" })) cat(paste("\n-------------------------------------\n\n")) } ## ============================================================================## ## RETURN VALUES ## ============================================================================## if (inherits(sig, "try-error")) { out.sigma <- 0 out.sesigma <- NA } if(!log) sig <- sig / delta summary <- data.frame( de = out.delta, de_err = out.delta * out.sedelta / 100, OD = ifelse(log, sigma * out.delta, sigma), OD_err = ifelse(log, sesigma * out.delta, sesigma), rel_OD = out.sigma, rel_OD_err = out.sesigma * 100, Lmax = Lmax) args <- list(log = log, sigmab = sigmab) newRLumResults.calc_CentralDose <- set_RLum( class = "RLum.Results", data = list( summary = summary, data = data, args = args, profile = data.frame( sig = if(!inherits(sig, "try-error")) sig else NA, llik = llik) ), info = list( call = sys.call() ) ) ## =========## PLOTTING if (plot && !inherits(sig, "try-error")) try(plot_RLum.Results(newRLumResults.calc_CentralDose, ...)) invisible(newRLumResults.calc_CentralDose) } Luminescence/R/calc_FastRatio.R0000644000176200001440000003441414236146743016106 0ustar liggesusers#' Calculate the Fast Ratio for CW-OSL curves #' #' Function to calculate the fast ratio of quartz CW-OSL single grain or single #' aliquot curves after Durcan & Duller (2011). #' #' This function follows the equations of Durcan & Duller (2011). The energy #' required to reduce the fast and medium quartz OSL components to `x` and #' `x2` \% respectively using eq. 3 to determine channels L2 and L3 (start #' and end). The fast ratio is then calculated from: \eqn{(L1-L3)/(L2-L3)}. #' #' @param object [RLum.Analysis-class], [RLum.Data.Curve-class] or [data.frame] (**required**): #' x, y data of measured values (time and counts). #' #' @param stimulation.power [numeric] (*with default*): #' Stimulation power in mW/cm^2 #' #' @param wavelength [numeric] (*with default*): #' Stimulation wavelength in nm #' #' @param sigmaF [numeric] (*with default*): #' Photoionisation cross-section (cm^2) of the fast component. #' Default value after Durcan & Duller (2011). #' #' @param sigmaM [numeric] (*with default*): #' Photoionisation cross-section (cm^2) of the medium component. #' Default value after Durcan & Duller (2011). #' #' @param Ch_L1 [numeric] (*with default*): #' An integer specifying the channel for L1. #' #' @param Ch_L2 [numeric] (*optional*): #' An integer specifying the channel for L2. #' #' @param Ch_L3 [numeric] (*optional*): #' A vector of length 2 with integer values specifying the start and end channels for L3 #' (e.g., `c(40, 50)`). #' #' @param x [numeric] (*with default*): #' \% of signal remaining from the fast component. #' Used to define the location of L2 and L3 (start). #' #' @param x2 [numeric] (*with default*): #' \% of signal remaining from the medium component. #' Used to define the location of L3 (end). #' #' @param dead.channels [numeric] (*with default*): #' Vector of length 2 in the form of `c(x, y)`. #' Channels that do not contain OSL data, i.e. at the start or end of measurement. #' #' @param fitCW.sigma [logical] (*optional*): #' fit CW-OSL curve using [fit_CWCurve] to calculate `sigmaF` and `sigmaM` (**experimental**). #' #' @param fitCW.curve [logical] (*optional*): #' fit CW-OSL curve using [fit_CWCurve] and derive the counts of L2 and L3 #' from the fitted OSL curve (**experimental**). #' #' @param plot [logical] (*with default*): #' plot output (`TRUE`/`FALSE`) #' #' @param ... available options: `verbose` ([logical]). #' Further arguments passed to [fit_CWCurve]. #' #' @return #' Returns a plot (*optional*) and an S4 object of type [RLum.Results-class]. #' The slot `data` contains a [list] with the following elements: #' #' \item{summary}{[data.frame] summary of all relevant results} #' \item{data}{the original input data} #' \item{fit}{[RLum.Results-class] object if either `fitCW.sigma` or `fitCW.curve` is `TRUE`} #' \item{args}{[list] of used arguments} #' \item{call}{`[call]` the function call} #' #' @section Function version: 0.1.1 #' #' @author #' Georgina E. King, University of Bern (Switzerland) \cr #' Julie A. Durcan, University of Oxford (United Kingdom) \cr #' Christoph Burow, University of Cologne (Germany) #' #' @references #' Durcan, J.A. & Duller, G.A.T., 2011. The fast ratio: A rapid measure for testing #' the dominance of the fast component in the initial OSL signal from quartz. #' Radiation Measurements 46, 1065-1072. #' #' Madsen, A.T., Duller, G.A.T., Donnelly, J.P., Roberts, H.M. & Wintle, A.G., 2009. #' A chronology of hurricane landfalls at Little Sippewissett Marsh, Massachusetts, USA, #' using optical dating. Geomorphology 109, 36-45. #' #' **Further reading** #' #' Steffen, D., Preusser, F. & Schlunegger, 2009. OSL quartz age underestimation #' due to unstable signal components. Quaternary Geochronology 4, 353-362. #' #' #' @seealso [fit_CWCurve], [get_RLum], [RLum.Analysis-class], #' [RLum.Results-class], [RLum.Data.Curve-class] #' #' @examples #' # load example CW-OSL curve #' data("ExampleData.CW_OSL_Curve") #' #' # calculate the fast ratio w/o further adjustments #' res <- calc_FastRatio(ExampleData.CW_OSL_Curve) #' #' # show the summary table #' get_RLum(res) #' #' @md #' @export calc_FastRatio <- function(object, stimulation.power = 30.6, wavelength = 470, sigmaF = 2.6E-17, sigmaM = 4.28E-18, Ch_L1 = 1, Ch_L2 = NULL, Ch_L3 = NULL, x = 1, x2 = 0.1, dead.channels = c(0,0), fitCW.sigma = FALSE, fitCW.curve = FALSE, plot = TRUE, ...) { ## Input verification -------------------------------------------------------- if (!is.null(Ch_L3) && length(Ch_L3) != 2) stop("Input for 'Ch_L3' must be a vector of length 2 (e.g., c(40, 50).", call. = FALSE) ## Input object handling ----------------------------------------------------- if (inherits(object, "RLum.Analysis")) object <- get_RLum(object) if (inherits(object, "RLum.Results")) object <- get_RLum(object, "data") if (!inherits(object, "list")) object <-list(object) ## Settings ------------------------------------------------------------------ settings <- list(verbose = TRUE, n.components.max = 3, fit.method = "LM", output.terminal = FALSE, info = list(), fit = NULL) # override defaults with args in ... settings <- modifyList(settings, list(...)) ## Calculations -------------------------------------------------------------- # iterate over all user provided objects and calculate the FR fast.ratios <- lapply(object, function(obj) { if (inherits(obj, "RLum.Data.Curve")) A <- get_RLum(obj) else A <- obj ## Energy calculation # P = user defined stimulation power in mW # lambdaLED = wavelength of stimulation source in nm P <- stimulation.power lamdaLED <- wavelength ## Constants # h = speed of light, h = Planck's constant h <- 6.62607004E-34 c <- 299792458 I0 <- (P / 1000) / (h * c / (lamdaLED * 10^-9)) Ch_width <- max(A[ ,1]) / length(A[ ,1]) # remove dead channels A <- as.data.frame(A[(dead.channels[1] + 1):(nrow(A)-dead.channels[2]), ]) A[ ,1] <- A[ ,1] - A[1,1] # estimate the photo-ionisation crossections of the fast and medium # component using the fit_CWCurve function if (fitCW.sigma | fitCW.curve) { fitCW.res <- try(fit_CWCurve(A, n.components.max = settings$n.components.max, fit.method = settings$fit.method, LED.power = stimulation.power, LED.wavelength = wavelength, output.terminal = settings$output.terminal, plot = plot)) settings$fit <- fitCW.res if (fitCW.sigma) { if (!inherits(fitCW.res, "try-error")) { sigmaF <- get_RLum(fitCW.res)$cs1 sigmaM <- get_RLum(fitCW.res)$cs2 if (settings$verbose) { message("\n [calc_FitCWCurve()]\n") message("New value for sigmaF: ", format(sigmaF, digits = 3, nsmall = 2)) message("New value for sigmaM: ", format(sigmaM, digits = 3, nsmall = 2)) } } else { if (settings$verbose) message("Fitting failed! Please call 'fit_CWCurve()' manually before ", "calculating the fast ratio.") } } if (fitCW.curve) { if (!inherits(fitCW.res, "try-error")) { nls <- get_RLum(fitCW.res, "fit") A[ ,2] <- predict(nls) } } } ## The equivalent time in s of L1, L2, L3 # Use these values to look up the channel t_L1 <- 0 if (is.null(Ch_L2)) t_L2 <- (log(x / 100)) / (-sigmaF * I0) else t_L2 <- A[Ch_L2, 1] if (is.null(Ch_L3)) { t_L3_start <- (log(x / 100)) / (-sigmaM * I0) t_L3_end <- (log(x2 / 100)) / (-sigmaM * I0) } else { t_L3_start <- A[Ch_L3[1], 1] t_L3_end <- A[Ch_L3[2], 1] } ## Channel number(s) of L2 and L3 if (is.null(Ch_L2)) Ch_L2 <- which.min(abs(A[,1] - t_L2)) if (Ch_L2 <= 1) { msg <- sprintf("Calculated time/channel for L2 is too small (%.f, %.f). Returned NULL.", t_L2, Ch_L2) settings$info <- modifyList(settings$info, list(L2 = msg)) warning(msg, call. = FALSE) return(NULL) } Ch_L3st<- which.min(abs(A[,1] - t_L3_start)) Ch_L3end <- which.min(abs(A[,1] - t_L3_end)) ## Counts in channels L1, L2, L3 # L1 ---- Cts_L1 <- A[Ch_L1, 2] # L2 ---- if (Ch_L2 > nrow(A)) { msg <- sprintf(paste("The calculated channel for L2 (%i) is equal", "to or larger than the number of available channels (%i).", "Returned NULL."), Ch_L2, nrow(A)) settings$info <- modifyList(settings$info, list(L2 = msg)) warning(msg, call. = FALSE) return(NULL) } Cts_L2 <- A[Ch_L2, 2] # optional: predict the counts from the fitted curve if (fitCW.curve) { if (!inherits(fitCW.res, "try-error")) { nls <- get_RLum(fitCW.res, "fit") Cts_L2 <- predict(nls, list(x = t_L2)) } } # L3 ---- if (Ch_L3st >= nrow(A) | Ch_L3end > nrow(A)) { msg <- sprintf(paste("The calculated channels for L3 (%i, %i) are equal to or", "larger than the number of available channels (%i).", "\nThe background has instead been estimated from the last", "5 channels."), Ch_L3st, Ch_L3end, nrow(A)) settings$info <- modifyList(settings$info, list(L3 = msg)) warning(msg, call. = FALSE) Ch_L3st <- nrow(A) - 5 Ch_L3end <- nrow(A) t_L3_start <- A[Ch_L3st,1] t_L3_end <- A[Ch_L3end,1] } Cts_L3 <- mean(A[Ch_L3st:Ch_L3end, 2]) # optional: predict the counts from the fitted curve if (fitCW.curve) { if (!inherits(fitCW.res, "try-error")) { nls <- get_RLum(fitCW.res, "fit") Cts_L3 <- mean(predict(nls, list(x = c(t_L3_start, t_L3_end)))) } } # Warn if counts are not in decreasing order if (Cts_L3 >= Cts_L2) warning(sprintf("L3 contains more counts (%.f) than L2 (%.f).", Cts_L3, Cts_L2), call. = FALSE) ## Fast Ratio FR <- (Cts_L1 - Cts_L3) / (Cts_L2 - Cts_L3) if (length(FR) != 1) FR <- NA ## Fast Ratio - Error calculation if (!is.na(FR)) { # number of channels the background was derived from nBG <- abs(Ch_L3end - Ch_L3st) # relative standard errors rse_L1 <- sqrt(Cts_L1 + Cts_L3 / nBG) / (Cts_L1 - Cts_L3) rse_L2 <- sqrt(Cts_L2 + Cts_L3 / nBG) / (Cts_L2 - Cts_L3) # absolute standard errors se_L1 <- rse_L1 * (Cts_L1 - Cts_L3) se_L2 <- rse_L2 * (Cts_L2 - Cts_L3) # absolute standard error on fast ratio FR_se <- (sqrt((se_L1 / (Cts_L1 - Cts_L3))^2 + ((se_L2 / (Cts_L2 - Cts_L3))^2) )) * FR FR_rse <- FR_se / FR * 100 } else { FR_se <- NA FR_rse <- NA } ## Return values ----------------------------------------------------------- summary <- data.frame(fast.ratio = FR, fast.ratio.se = FR_se, fast.ratio.rse = FR_rse, channels = nrow(A), channel.width = Ch_width, dead.channels.start = as.integer(dead.channels[1]), dead.channels.end = as.integer(dead.channels[2]), sigmaF = sigmaF, sigmaM = sigmaM, I0 = I0, stimulation.power = stimulation.power, wavelength = wavelength, t_L1 = t_L1, t_L2 = t_L2, t_L3_start = t_L3_start, t_L3_end = t_L3_end, Ch_L1 = as.integer(Ch_L1), Ch_L2 = as.integer(Ch_L2), Ch_L3_start = as.integer(Ch_L3st), Ch_L3_end = as.integer(Ch_L3end), Cts_L1 = Cts_L1, Cts_L2 = Cts_L2, Cts_L3 = Cts_L3) fast.ratio <- set_RLum(class = "RLum.Results", originator = "calc_FastRatio", data = list(summary = summary, data = obj, fit = settings$fit, args = as.list(sys.call(-2L)[-1]), call = sys.call(-2L)), info = settings$info ) ## Console Output ---------------------------------------------------------- if (settings$verbose) { table.names <- c( "Fast Ratio\t", " \U02EA Absolute error", " \U02EA Relative error (%)", "Channels\t", "Channel width (s)", "Dead channels start", "Dead channels end", "Sigma Fast\t", "Sigma Medium\t", "I0\t\t", "Stim. power (mW/cm^2)", "Wavelength (nm)", "-\n Time L1 (s)\t", "Time L2 (s)\t", "Time L3 start (s)", "Time L3 end (s)", "-\n Channel L1\t", "Channel L2\t", "Channel L3 start", "Channel L3 end\t", "-\n Counts L1\t", "Counts L2\t", "Counts L3\t") cat("\n[calc_FastRatio()]\n") cat("\n -------------------------------") for (i in 1:ncol(summary)) { cat(paste0("\n ", table.names[i],"\t: ", format(summary[1, i], digits = 2, nsmall = 2))) } cat("\n -------------------------------\n\n") } ## Plotting ---------------------------------------------------------------- if (plot) try(plot_RLum.Results(fast.ratio, ...)) # return return(fast.ratio) }) # End of lapply if (length(fast.ratios) == 1) fast.ratios <- fast.ratios[[1]] invisible(fast.ratios) } Luminescence/R/Risoe.BINfileData2RLum.Analysis.R0000644000176200001440000002333114264017373021007 0ustar liggesusers#' Convert Risoe.BINfileData object to an RLum.Analysis object #' #' Converts values from one specific position of a Risoe.BINfileData S4-class #' object to an RLum.Analysis object. #' #' The [RLum.Analysis-class] object requires a set of curves for #' specific further protocol analyses. However, the [Risoe.BINfileData-class] #' usually contains a set of curves for different aliquots and different #' protocol types that may be mixed up. Therefore, a conversion is needed. #' #' @param object [Risoe.BINfileData-class] (**required**): #' `Risoe.BINfileData` object #' #' @param pos [numeric] (*optional*): position number of the `Risoe.BINfileData` #' object for which the curves are stored in the `RLum.Analysis` object. #' If `length(position)>1` a list of `RLum.Analysis` objects is returned. #' If nothing is provided every position will be converted. #' If the position is not valid `NA` is returned. #' #' @param grain [vector], [numeric] (*optional*): #' grain number from the measurement to limit the converted data set #' (e.g., `grain = c(1:48)`). Please be aware that this option may lead to #' unwanted effects, as the output is strictly limited to the chosen grain #' number for all position numbers #' #' @param run [vector], [numeric] (*optional*): #' run number from the measurement to limit the converted data set #' (e.g., `run = c(1:48)`). #' #' @param set [vector], [numeric] (*optional*): #' set number from the measurement to limit the converted data set #' (e.g., `set = c(1:48)`). #' #' @param ltype [vector], [character] (*optional*): #' curve type to limit the converted data. Commonly allowed values are: #' `IRSL`, `OSL`, `TL`, `RIR`, `RBR` and `USER` #' (see also [Risoe.BINfileData-class]) #' #' @param dtype [vector], [character] (*optional*): #' data type to limit the converted data. Commonly allowed values are #' listed in [Risoe.BINfileData-class] #' #' @param protocol [character] (*optional*): #' sets protocol type for analysis object. Value may be used by subsequent #' analysis functions. #' #' @param keep.empty [logical] (*with default*): #' If `TRUE` (default) an `RLum.Analysis` object is returned even if it does #' not contain any records. Set to `FALSE` to discard all empty objects. #' #' @param txtProgressBar [logical] (*with default*): #' enables or disables [txtProgressBar]. #' #' @return Returns an [RLum.Analysis-class] object. #' #' @note #' The `protocol` argument of the [RLum.Analysis-class] #' object is set to 'unknown' if not stated otherwise. #' #' @section Function version: 0.4.2 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [Risoe.BINfileData-class], [RLum.Analysis-class], [read_BIN2R] #' #' @keywords manip #' #' @examples #' #' ##load data #' data(ExampleData.BINfileData, envir = environment()) #' #' ##convert values for position 1 #' Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos = 1) #' #' @md #' @export Risoe.BINfileData2RLum.Analysis<- function( object, pos = NULL, grain = NULL, run = NULL, set = NULL, ltype = NULL, dtype = NULL, protocol = "unknown", keep.empty = TRUE, txtProgressBar = FALSE ){ # Integrity Check --------------------------------------------------------- if (!is(object,"Risoe.BINfileData")){ stop("[Risoe.BINfileData2RLum.Analysis()] Input object is not of type 'Risoe.BINfileData'.", call. = FALSE) } if (!is.null(pos) && !is(pos,"numeric")){ stop("[Risoe.BINfileData2RLum.Analysis()] Argument 'pos' has to be of type numeric.", call. = FALSE) } if (is.null(pos)) { pos <- unique(object@METADATA[["POSITION"]]) } else{ ##get and check valid positions and remove invalid numbers from the input positions.valid <- unique(object@METADATA[, "POSITION"]) if (length(setdiff(pos, positions.valid)) > 0) { warning( paste0( "[Risoe.BINfileData2RLum.Analysis()] invalid position number skipped: ", paste(setdiff(pos, positions.valid), collapse = ", ") ), call. = FALSE ) pos <- intersect(pos, positions.valid) } } # Grep run and set data --------------------------------------------------- ##grain if (is.null(grain)) { grain <- unique(object@METADATA[["GRAIN"]]) }else{ grain.valid <- unique(object@METADATA[["GRAIN"]]) if(length(setdiff(grain, grain.valid)) > 0){ warning(paste0("[Risoe.BINfileData2RLum.Analysis()] Invalid grain number skipped: ", paste(setdiff(grain, grain.valid), collapse = ", ")), call. = FALSE) grain <- intersect(grain, grain.valid) } } ##run if (is.null(run)) { run <- unique(object@METADATA[["RUN"]]) } else{ if (TRUE %in% unique(unique(object@METADATA[["RUN"]]) %in% run) != TRUE) { ##get and check valid positions run.valid <- paste(as.character(unique(object@METADATA[, "RUN"])), collapse = ", ") stop( paste( "[Risoe.BINfileData2RLum.Analysis()] run = ", run, " contain invalid run(s). Valid runs are: ", run.valid, sep = "" ) ) } } #set if(is.null(set)){set <- unique(object@METADATA[["SET"]]) } else{ if(TRUE %in% unique(unique(object@METADATA[["SET"]]) %in% set) != TRUE){ ##get and check valid positions set.valid <- paste(as.character(unique(object@METADATA[,"SET"])), collapse=", ") stop(paste("[Risoe.BINfileData2RLum.Analysis] set = ", set, " contain invalid set(s). Valid sets are: ", set.valid, sep="")) } } ##ltype if (is.null(ltype)) { ltype <- unique(object@METADATA[["LTYPE"]]) } else{ if (TRUE %in% unique(unique(object@METADATA[, "LTYPE"]) %in% ltype) != TRUE) { ##get and check valid positions ltype.valid <- paste(as.character(unique(object@METADATA[, "LTYPE"])), collapse = ", ") stop( paste( "[Risoe.BINfileData2RLum.Analysis] ltype = ", ltype, " contain invalid ltype(s). Valid ltypes are: ", ltype.valid, sep = "" ) ) } } ##dtype if (is.null(dtype)) { dtype <- unique(object@METADATA[["DTYPE"]]) } else{ if (TRUE %in% unique(unique(object@METADATA[, "DTYPE"]) %in% dtype) != TRUE) { ##get and check valid positions dtype.valid <- paste(as.character(unique(object@METADATA[, "DTYPE"])), collapse = ", ") stop( paste( "[Risoe.BINfileData2RLum.Analysis] dtype = ", dtype, " contain invalid dtype(s). Valid dtypes are: ", dtype.valid, sep = "" ) ) } } # Select values and convert them----------------------------------------------------------- ##set progressbar to false if only one position is provided if(txtProgressBar & length(pos)<2){ txtProgressBar <- FALSE } ##This loop does: ## (a) iterating over all possible positions ## (b) consider grains in all possible positions ## (c) consider other selections ## (d) create the RLum.Analysis objects ##set progress bar if(txtProgressBar){ pb <- txtProgressBar(min=min(pos),max=max(pos), char="=", style=3) } object <- lapply(pos, function(pos){ ##update progress bar if(txtProgressBar){ setTxtProgressBar(pb, value = pos) } ##if no grain information is given, we select all grains in the particular position if(is.null(grain)){ grain <- unique(object@METADATA[object@METADATA[["POSITION"]] == pos, "GRAIN"]) } ##loop over the grains and produce RLum.Analysis objects object <- lapply(grain, function(grain){ ##select data ##the NA is necessary, as FI readers like to write a NA instead of 0 in the column ##and this causes some trouble if(is.na(grain)){ temp_id <- object@METADATA[ object@METADATA[["POSITION"]] == pos & object@METADATA[["RUN"]] %in% run & object@METADATA[["SET"]] %in% set & object@METADATA[["LTYPE"]] %in% ltype & object@METADATA[["DTYPE"]] %in% dtype , "ID"] }else{ temp_id <- object@METADATA[ object@METADATA[["POSITION"]] == pos & object@METADATA[["GRAIN"]] == grain & object@METADATA[["RUN"]] %in% run & object@METADATA[["SET"]] %in% set & object@METADATA[["LTYPE"]] %in% ltype & object@METADATA[["DTYPE"]] %in% dtype , "ID"] } ##create curve object object <- set_RLum( class = "RLum.Analysis", records = lapply(temp_id,function(x) { .Risoe.BINfileData2RLum.Data.Curve(object, id = x) }), protocol = protocol, originator = "Risoe.BINfileData2RLum.Analysis" ) if (!keep.empty && length(object@records) == 0) return(NULL) ##add unique id of RLum.Analysis object to each curve object as .pid using internal function .set_pid(object) return(object) }) return(object) }) ##this is necessary to not break with previous code, i.e. if only one element is included ##the output is RLum.Analysis and not a list of it if(length(object) == 1){ # special case: single grain data with only 1 position produces a nested list # the outer one is of length 1, the nested list has length 100 (100 grains) if (is.list(object[[1]]) && length(object[[1]]) > 1) invisible(unlist(object)) else invisible(object[[1]][[1]]) }else{ invisible(unlist(object)) } } Luminescence/R/get_RLum.R0000644000176200001440000001045414264017373014741 0ustar liggesusers#' General accessors function for RLum S4 class objects #' #' Function calls object-specific get functions for RLum S4 class objects. #' #' The function provides a generalised access point for specific #' [RLum-class] objects.\cr #' Depending on the input object, the corresponding get function will be selected. #' Allowed arguments can be found in the documentations of the corresponding #' [RLum-class] class. #' #' @param object [RLum-class] (**required**): #' S4 object of class `RLum` or an object of type [list] containing only objects #' of type [RLum-class] #' #' @param ... further arguments that will be passed to the object specific methods. For #' further details on the supported arguments please see the class #' documentation: [RLum.Data.Curve-class], [RLum.Data.Spectrum-class], #' [RLum.Data.Image-class], [RLum.Analysis-class] and [RLum.Results-class] #' #' @return Return is the same as input objects as provided in the list. #' #' @section Function version: 0.3.3 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [RLum.Data.Curve-class], [RLum.Data.Image-class], #' [RLum.Data.Spectrum-class], [RLum.Analysis-class], [RLum.Results-class] #' #' @keywords utilities #' #' @examples #' #' ##Example based using data and from the calc_CentralDose() function #' #' ##load example data #' data(ExampleData.DeValues, envir = environment()) #' #' ##apply the central dose model 1st time #' temp1 <- calc_CentralDose(ExampleData.DeValues$CA1) #' #' ##get results and store them in a new object #' temp.get <- get_RLum(object = temp1) #' #' @md #' @export setGeneric("get_RLum", function (object, ...) {standardGeneric("get_RLum") }) # Method for get_RLum method for RLum objects in a list for a list of objects ------------------- #' @describeIn get_RLum #' Returns a list of [RLum-class] objects that had been passed to [get_RLum] #' #' @param class [character] (*optional*): allows to define the class that gets selected if #' applied to a list, e.g., if a list consists of different type of RLum-class objects, this #' arguments allows to make selection. If nothing is provided, all RLum-objects are treated. #' #' @param null.rm [logical] (*with default*): option to get rid of empty and NULL objects #' #' @md #' @export setMethod("get_RLum", signature = "list", function(object, class = NULL, null.rm = FALSE, ...){ ##take care of the class argument if(!is.null(class)){ sel <- class[1] == vapply(object, function(x) class(x), character(1)) if(any(sel)) object <- object[sel] rm(sel) } ##make remove all non-RLum objects selection <- lapply(1:length(object), function(x){ ##get rid of all objects that are not of type RLum, this is better than leaving that ##to the user if(inherits(object[[x]], what = "RLum")){ ##it might be the case the object already comes with empty objects, this would ##cause a crash if(inherits(object[[x]], "RLum.Analysis") && length(object[[x]]@records) == 0) return(NULL) get_RLum(object[[x]], ...) } else { warning(paste0("[get_RLum()] object #",x," in the list was not of type 'RLum' and has been removed!"), call. = FALSE) return(NULL) } }) ##remove empty or NULL objects after the selection ... if wanted if(null.rm){ ##first set all empty objects to NULL ... for RLum.Analysis objects selection <- lapply(1:length(selection), function(x){ if(inherits(selection[[x]], "RLum.Analysis") && length(selection[[x]]@records) == 0){ return(NULL) }else{ return(selection[[x]]) } }) ##get rid of all NULL objects selection <- selection[!sapply(selection, is.null)] } return(selection) }) #' Method to handle NULL if the user calls get_RLum #' #' @describeIn get_RLum #' #' Returns NULL #' #' @md #' @export setMethod("get_RLum", signature = "NULL", function(object, ...){NULL}) Luminescence/R/convert_Activity2Concentration.R0000644000176200001440000001447314521207352021367 0ustar liggesusers#' @title Convert Nuclide Activities to Concentrations and Vice Versa #' #' @description The function performs the conversion of the specific activities into #' concentrations and vice versa for the radioelements U, Th, and K to #' harmonise the measurement unit with the required data input unit of #' potential analytical tools for, e.g. dose rate calculation or related #' functions such as [use_DRAC]. #' #' @details The conversion from nuclide activity of a sample to nuclide concentration #' is performed using conversion factors that are based on the mass-related #' specific activity of the respective nuclide. #' #' Constants used in this function were obtained from `https://physics.nist.gov/cuu/Constants/` #' all atomic weights and composition values from #' `https://www.nist.gov/pml/atomic-weights-and-isotopic-compositions-relative-atomic-masses` #' and the nuclide data from `https://www.iaea.org/resources/databases/livechart-of-nuclides-advanced-version` #' #' The factors can be calculated using the equation: #' #' \deqn{ #' A = N_A \frac{N_{abund}}{N_{mol.mass}} ln(2) / N.half.life #' } #' #' to convert in ppm we further use: #' #' \deqn{ #' f = A / 10^6 #' } #' #' where: #' #' - `N_A` - Avogadro constant in 1/mol #' - `A` - specific activity of the nuclide in Bq/kg #' - `N.abund` - relative natural abundance of the isotope #' - `N.mol.mass` molar mass in kg/mol #' - `N.half.life` half-life of the nuclide in s #' #' example for calculating the activity of the radionuclide U-238: #' #' * `N_A` = 6.02214076e+23 (1/mol) #' * `T_0.5` = 1.41e+17 (s) #' * `m_U_238` = 0.23802891 (kg/mol) #' * `U_abund` = 0.992745 (unitless) #' #' \deqn{A_{U} = N_{A} * U_{abund} / m_{U_238} * ln(2) / T_{1/2} = 2347046} (Bq/kg) #' #' \deqn{f.U = A_{U} / 10^6} #' #' @param data [data.frame] **(required)**: #' provide dose rate data (activity or concentration) in three columns. #' The first column indicates the nuclide, the 2nd column measured value and #' in the 3rd column its error value. Allowed nuclide data are #' `'U-238'`, `'Th-232'` and `'K-40'`. See examples for an example. #' #' @param input_unit [character] (*with default*): #' specify unit of input data given in the dose rate data frame, choose between #' 'Bq/kg' and 'ppm/%' the default is 'Bq/kg' #' #' @param verbose [logical] (*with default*): #' enable or disable verbose mode #' #' @section Function version: 0.1.1 #' #' @author Margret C. Fuchs, Helmholtz-Institute Freiberg for Resource Technology (Germany) #' #' @references #' Debertin, K., Helmer, R.G., 1988. Gamma- and X-ray Spectrometry with #' Semiconductor Detectors, Elsevier Science Publishers, p.283 #' #' Wiechen, A., Ruehle, H., Vogl, K., 2013. Bestimmung der massebezogenen #' Aktivitaet von Radionukliden. AEQUIVAL/MASSAKT, ISSN 1865-8725, #' [https://www.bmuv.de/fileadmin/Daten_BMU/Download_PDF/Strahlenschutz/aequival-massakt_v2013-07_bf.pdf]() #' #' @keywords IO #' #' @note Although written otherwise for historical reasons. Input values must be element values. #' For instance, if a value is provided for U-238 the function assumes that this value #' represents the sum (activity or concentration) of U-238, U-235 and U-234. #' In other words, 1 ppm of U means that this is the composition of 0.992 parts of U-238, #' 0.000054 parts of U-234, and 0.00072 parts of U-235. #' #' @examples #' #' ##construct data.frame #' data <- data.frame( #' NUCLIDES = c("U-238", "Th-232", "K-40"), #' VALUE = c(40,80,100), #' VALUE_ERROR = c(4,8,10), #' stringsAsFactors = FALSE) #' #' ##perform analysis #' convert_Activity2Concentration(data) #' #' @md #' @export convert_Activity2Concentration <- function( data, input_unit = "Bq/kg", verbose = TRUE ){ # Integrity checks ---------------------------------------------------------------------------- if(missing(data)) stop("[convert_Activity2Concentration()] I'm still waiting for input data ...", call. = FALSE) if(ncol(data)<3) stop("[convert_Activity2Concentration()] Input data.frame should have at least three columns.", call. = FALSE) # Set output data.frame ----------------------------------------------------------------------- output <- data.frame( NUCLIDE = rep(NA, nrow(data)), ACTIVIY = rep(NA, nrow(data)), ACTIVIY_ERROR = rep(NA, nrow(data)), CONC = rep(NA, nrow(data)), CONC_ERROR = rep(NA, nrow(data)), stringsAsFactors = FALSE ) ##set column names colnames(output) <- c( "NUCLIDE", "ACTIVIY (Bq/kg)", "ACTIVIY ERROR (Bq/kg)", "CONC. (ppm/%)", "CONC. ERROR (ppm/%)") ##set column for output output$NUCLIDE = data[[1]] # Set conversion factors ---------------------------------------------------------------------- ############################################################################# ### conversion factors mass_constant <- 1.66053906660e-27 # in kg ## set conversion factors ... this are the expected activity per kg of the radionuclide ## a = log(2) / ((unified_atomic_mass * mass_constant) / abundance) * T_0.5 convers.factor.U238 <- log(2) / (((238.0507884 * mass_constant)/ 0.992742) * 1.409963e+17) / 1e+06 convers.factor.Th232 <- log(2) / (((232.0380558 * mass_constant)/ 1) * 4.41797e+17) / 1e+06 convers.factor.K40 <- log(2) / (((39.963998166 * mass_constant)/ 0.000117) * 3.9383e+16) / 1e+02 # Run conversion ------------------------------------------------------------------------------ U <- which(data$NUCLIDE == "U-238") Th <- which(data$NUCLIDE == "Th-232") K <- which(data$NUCLIDE == "K-40") ##Activity to concentration if(input_unit == "Bq/kg"){ output[U,4:5] <- data[U,2:3] / convers.factor.U238 output[Th,4:5] <- data[Th,2:3] / convers.factor.Th232 output[K,4:5] <- data[K,2:3] / convers.factor.K40 output[U,2:3] <- data[U,2:3] output[Th,2:3] <- data[Th,2:3] output[K,2:3] <- data[K,2:3] } ##Concentration to activity if(input_unit == "ppm/%"){ data[U,2:3] <- data[U,2:3] * convers.factor.U238 data[Th,2:3] <- data[Th,2:3] * convers.factor.Th232 data[K,2:3] <- data[K,2:3] * convers.factor.K40 output[U,5:6] <- data[U,2:3] output[Th,5:6] <- data[Th,2:3] output[K,5:6] <- data[K,2:3] } # Return value -------------------------------------------------------------------------------- if(verbose) print(output) invisible(set_RLum( class = "RLum.Results", data = list(data = output), info = list(call = sys.call()))) } Luminescence/R/calc_CommonDose.R0000644000176200001440000001623214236146743016253 0ustar liggesusers#' Apply the (un-)logged common age model after Galbraith et al. (1999) to a #' given De distribution #' #' Function to calculate the common dose of a De distribution. #' #' **(Un-)logged model** #' #' When `log = TRUE` this function #' calculates the weighted mean of logarithmic De values. Each of the estimates #' is weighted by the inverse square of its relative standard error. The #' weighted mean is then transformed back to the dose scale (Galbraith & #' Roberts 2012, p. 14). #' #' The log transformation is not applicable if the #' De estimates are close to zero or negative. In this case the un-logged model #' can be applied instead (`log = FALSE`). The weighted mean is then #' calculated using the un-logged estimates of De and their absolute standard #' error (Galbraith & Roberts 2012, p. 14). #' #' @param data [RLum.Results-class] or [data.frame] (**required**): #' for [data.frame]: two columns with De `(data[,1])` and De error `(data[,2])` #' #' @param sigmab [numeric] (*with default*): #' additional spread in De values. #' This value represents the expected overdispersion in the data should the sample be #' well-bleached (Cunningham & Walling 2012, p. 100). #' **NOTE**: For the logged model (`log = TRUE`) this value must be #' a fraction, e.g. 0.2 (= 20 \%). If the un-logged model is used (`log = FALSE`), #' sigmab must be provided in the same absolute units of the De values (seconds or Gray). #' #' @param log [logical] (*with default*): #' fit the (un-)logged central age model to De data #' #' @param ... currently not used. #' #' @return #' Returns a terminal output. In addition an #' [RLum.Results-class] object is returned containing the #' following element: #' #' \item{.$summary}{[data.frame] summary of all relevant model results.} #' \item{.$data}{[data.frame] original input data} #' \item{.$args}{[list] used arguments} #' \item{.$call}{[call] the function call} #' #' The output should be accessed using the function [get_RLum] #' #' @section Function version: 0.1.1 #' #' @author #' Christoph Burow, University of Cologne (Germany) #' #' @seealso [calc_CentralDose], [calc_FiniteMixture], #' [calc_FuchsLang2001], [calc_MinDose] #' #' @references #' Galbraith, R.F. & Laslett, G.M., 1993. Statistical models for #' mixed fission track ages. Nuclear Tracks Radiation Measurements 4, 459-470. #' #' Galbraith, R.F., Roberts, R.G., Laslett, G.M., Yoshida, H. & Olley, #' J.M., 1999. Optical dating of single grains of quartz from Jinmium rock #' shelter, northern Australia. Part I: experimental design and statistical #' models. Archaeometry 41, 339-364. #' #' Galbraith, R.F. & Roberts, R.G., 2012. Statistical aspects of equivalent dose and error calculation and #' display in OSL dating: An overview and some recommendations. Quaternary #' Geochronology 11, 1-27. #' #' **Further reading** #' #' Arnold, L.J. & Roberts, R.G., 2009. Stochastic modelling of multi-grain equivalent dose #' (De) distributions: Implications for OSL dating of sediment mixtures. #' Quaternary Geochronology 4, 204-230. #' #' Bailey, R.M. & Arnold, L.J., 2006. Statistical modelling of single grain quartz De distributions and an #' assessment of procedures for estimating burial dose. Quaternary Science #' Reviews 25, 2475-2502. #' #' Cunningham, A.C. & Wallinga, J., 2012. Realizing the potential of fluvial archives using robust OSL chronologies. #' Quaternary Geochronology 12, 98-106. #' #' Rodnight, H., Duller, G.A.T., Wintle, A.G. & Tooth, S., 2006. Assessing the reproducibility and accuracy #' of optical dating of fluvial deposits. Quaternary Geochronology, 1 109-120. #' #' Rodnight, H., 2008. How many equivalent dose values are needed to #' obtain a reproducible distribution?. Ancient TL 26, 3-10. #' #' @examples #' #' ## load example data #' data(ExampleData.DeValues, envir = environment()) #' #' ## apply the common dose model #' calc_CommonDose(ExampleData.DeValues$CA1) #' #' @md #' @export calc_CommonDose <- function( data, sigmab, log=TRUE, ... ) { ##============================================================================## ## CONSISTENCY CHECK OF INPUT DATA ##============================================================================## if(missing(data)==FALSE){ if(is(data, "data.frame") == FALSE & is(data,"RLum.Results") == FALSE){ stop("[calc_CentralDose] Error: 'data' object has to be of type 'data.frame' or 'RLum.Results'!") }else{ if(is(data, "RLum.Results") == TRUE){ data <- get_RLum(data, "data") } } } try(colnames(data)<- c("ED","ED_Error"), silent = TRUE) if(colnames(data[1])!="ED"||colnames(data[2])!="ED_Error") { cat(paste("Columns must be named 'ED' and 'ED_Error'"), fill = FALSE) stop(domain=NA) } if(!missing(sigmab)) { if(sigmab <0 | sigmab >1) { cat(paste("sigmab needs to be given as a fraction between", "0 and 1 (e.g. 0.2)"), fill = FALSE) stop(domain=NA) } } ##============================================================================## ## ADDITIONAL ARGUMENTS ##============================================================================## settings <- list(verbose = TRUE) settings <- modifyList(settings, list(...)) ##============================================================================## ## CALCULATIONS ##============================================================================## # set default value of sigmab if (missing(sigmab)) sigmab<- 0 # calculate yu = log(ED) and su = se(logED) if (log) { yu<- log(data$ED) su<- sqrt( (data$ED_Error/data$ED)^2 + sigmab^2 ) } else { yu<- data$ED su<- sqrt((data$ED_Error)^2 + sigmab^2) } # calculate weights wu<- 1/su^2 delta<- sum(wu*yu)/sum(wu) n<- length(yu) #standard error sedelta<- 1/sqrt(sum(wu)) if (!log) { sedelta<- sedelta/delta } if (log){ delta<- exp(delta) } ##============================================================================## ## TERMINAL OUTPUT ##============================================================================## if (settings$verbose) { cat("\n [calc_CommonDose]") cat(paste("\n\n----------- meta data --------------")) cat(paste("\n n: ",n)) cat(paste("\n log: ",if(log==TRUE){"TRUE"}else{"FALSE"})) cat(paste("\n----------- dose estimate ----------")) cat(paste("\n common dose: ", round(delta,2))) cat(paste("\n SE: ", round(delta*sedelta, 2))) cat(paste("\n rel. SE [%]: ", round(sedelta*100,2))) cat(paste("\n------------------------------------\n\n")) } ##============================================================================## ## RETURN VALUES ##============================================================================## summary<- data.frame(de=delta, de_err=delta*sedelta) call<- sys.call() args<- list(log=log, sigmab=sigmab) newRLumResults.calc_CommonDose<- set_RLum( class = "RLum.Results", data = list(summary = summary, data = data, args = args, call = call)) invisible(newRLumResults.calc_CommonDose) } Luminescence/R/set_RLum.R0000644000176200001440000000512114264017373014750 0ustar liggesusers#' General set function for RLum S4 class objects #' #' Function calls object-specific set functions for RLum S4 class objects. #' #' The function provides a generalised access point for specific #' [RLum-class] objects.\cr #' Depending on the given class, the corresponding method to create an object #' from this class will be selected. Allowed additional arguments can be found #' in the documentations of the corresponding [RLum-class] class: #' - [RLum.Data.Curve-class], #' - [RLum.Data.Image-class], #' - [RLum.Data.Spectrum-class], #' - [RLum.Analysis-class], #' - [RLum.Results-class] #' #' @param class [RLum-class] (**required**): #' name of the S4 class to create #' #' @param originator [character] (*automatic*): #' contains the name of the calling function (the function that produces this object); #' can be set manually. #' #' @param .uid [character] (*automatic*): #' sets an unique ID for this object using the internal C++ function `create_UID`. #' #' @param .pid [character] (*with default*): #' option to provide a parent id for nesting at will. #' #' @param ... further arguments that one might want to pass to the specific set method #' #' @return #' Returns an object of the specified class. #' #' @section Function version: 0.3.0 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [RLum.Data.Curve-class], [RLum.Data.Image-class], #' [RLum.Data.Spectrum-class], [RLum.Analysis-class], [RLum.Results-class] #' #' @keywords utilities #' #' @examples #' #' ##produce empty objects from each class #' set_RLum(class = "RLum.Data.Curve") #' set_RLum(class = "RLum.Data.Spectrum") #' set_RLum(class = "RLum.Data.Spectrum") #' set_RLum(class = "RLum.Analysis") #' set_RLum(class = "RLum.Results") #' #' ##produce a curve object with arbitrary curve values #' object <- set_RLum( #' class = "RLum.Data.Curve", #' curveType = "arbitrary", #' recordType = "OSL", #' data = matrix(c(1:100,exp(-c(1:100))),ncol = 2)) #' #' ##plot this curve object #' plot_RLum(object) #' #' @md #' @export setGeneric("set_RLum", function (class, originator, .uid = create_UID(), .pid = NA_character_, ... ) { class(class) <- as.character(class) if(missing(originator)) { if (is(sys.call(which = -1)[[1]], "language")) { originator <- as.character(sys.call(which = -1)[[1]]) ##account for calls using the double colons, in this case the vector is ##of length 3, not only 1 if(length(originator) == 3){ originator <- originator[3] } } else{ originator <- NA_character_ } } standardGeneric("set_RLum") }) Luminescence/R/CW2pLM.R0000644000176200001440000001017714264017373014231 0ustar liggesusers#' Transform a CW-OSL curve into a pLM-OSL curve #' #' Transforms a conventionally measured continuous-wave (CW) curve into a #' pseudo linearly modulated (pLM) curve using the equations given in Bulur #' (2000). #' #' According to Bulur (2000) the curve data are transformed by introducing two #' new parameters `P` (stimulation period) and `u` (transformed time): #' #' \deqn{P=2*max(t)} \deqn{u=\sqrt{(2*t*P)}} #' #' The new count values are then calculated by #' \deqn{ctsNEW = cts(u/P)} #' #' and the returned `data.frame` is produced by: `data.frame(u,ctsNEW)` #' #' The output of the function can be further used for LM-OSL fitting. #' #' @param values [RLum.Data.Curve-class] or [data.frame] (**required**): #' `RLum.Data.Curve` data object. Alternatively, a `data.frame` of the measured #' curve data of type stimulation time (t) (`values[,1]`) and measured counts (cts) #' (`values[,2]`) can be provided. #' #' @return #' The function returns the same data type as the input data type with #' the transformed curve values ([data.frame] or [RLum.Data.Curve-class]). #' #' @note #' The transformation is recommended for curves recorded with a channel #' resolution of at least 0.05 s/channel. #' #' @section Function version: 0.4.1 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [CW2pHMi], [CW2pLMi], [CW2pPMi], [fit_LMCurve], [lm], #' [RLum.Data.Curve-class] #' #' #' @references #' Bulur, E., 2000. A simple transformation for converting CW-OSL #' curves to LM-OSL curves. Radiation Measurements, 32, 141-145. #' #' **Further Reading** #' #' Bulur, E., 1996. An Alternative Technique For Optically Stimulated #' Luminescence (OSL) Experiment. Radiation Measurements, 26, 701-709. #' #' @keywords manip #' #' @examples #' #' ##read curve from CWOSL.SAR.Data transform curve and plot values #' data(ExampleData.BINfileData, envir = environment()) #' #' ##read id for the 1st OSL curve #' id.OSL <- CWOSL.SAR.Data@@METADATA[CWOSL.SAR.Data@@METADATA[,"LTYPE"] == "OSL","ID"] #' #' ##produce x and y (time and count data for the data set) #' x<-seq(CWOSL.SAR.Data@@METADATA[id.OSL[1],"HIGH"]/CWOSL.SAR.Data@@METADATA[id.OSL[1],"NPOINTS"], #' CWOSL.SAR.Data@@METADATA[id.OSL[1],"HIGH"], #' by = CWOSL.SAR.Data@@METADATA[id.OSL[1],"HIGH"]/CWOSL.SAR.Data@@METADATA[id.OSL[1],"NPOINTS"]) #' y <- unlist(CWOSL.SAR.Data@@DATA[id.OSL[1]]) #' values <- data.frame(x,y) #' #' ##transform values #' values.transformed <- CW2pLM(values) #' #' ##plot #' plot(values.transformed) #' #' @md #' @export CW2pLM <- function( values ){ # Integrity Checks -------------------------------------------------------- ##(1) data.frame or RLum.Data.Curve object? if(is(values, "data.frame") == FALSE & is(values, "RLum.Data.Curve") == FALSE){ stop("[CW2pLM()] 'values' object has to be of type 'data.frame' or 'RLum.Data.Curve'!", call. = FALSE) } ##(2) if the input object is an 'RLum.Data.Curve' object check for allowed curves if(is(values, "RLum.Data.Curve") == TRUE){ if(!grepl("OSL", values@recordType) & !grepl("IRSL", values@recordType)){ stop(paste("[CW2pLM()] recordType ",values@recordType, " is not allowed for the transformation!", sep=""), call. = FALSE) }else{ temp.values <- as(values, "data.frame") } }else{ temp.values <- values } # Calculation ------------------------------------------------------------- ##curve transformation P<-2*max(temp.values[,1]) u<-((2*temp.values[,1]*P)^0.5) ##cw >> plm conversion, according Bulur, 2000 temp.values[,2]<-temp.values[,2]*(u/P) temp.values<-data.frame(u,temp.values[,2]) # Return values ----------------------------------------------------------- ##returns the same data type as the input if(is(values, "data.frame") == TRUE){ values <- temp.values return(values) }else{ newRLumDataCurves.CW2pLM <- set_RLum( class = "RLum.Data.Curve", recordType = values@recordType, data = as.matrix(temp.values), info = values@info) return(newRLumDataCurves.CW2pLM) } } Luminescence/R/calc_AliquotSize.R0000644000176200001440000004066614236146743016471 0ustar liggesusers#' Estimate the amount of grains on an aliquot #' #' Estimate the number of grains on an aliquot. Alternatively, the packing #' density of an aliquot is computed. #' #' This function can be used to either estimate the number of grains on an #' aliquot or to compute the packing density depending on the the arguments #' provided. #' #' The following function is used to estimate the number of grains `n`: #' #' \deqn{n = (\pi*x^2)/(\pi*y^2)*d} #' #' where `x` is the radius of the aliquot size (microns), `y` is the mean #' radius of the mineral grains (mm) and `d` is the packing density #' (value between 0 and 1). #' #' **Packing density** #' #' The default value for `packing.density` is 0.65, which is the mean of #' empirical values determined by Heer et al. (2012) and unpublished data from #' the Cologne luminescence laboratory. If `packing.density = "Inf"` a maximum #' density of \eqn{\pi/\sqrt12 = 0.9068\ldots} is used. However, note that #' this value is not appropriate as the standard preparation procedure of #' aliquots resembles a PECC (*"Packing Equal Circles in a Circle"*) problem #' where the maximum packing density is asymptotic to about 0.87. #' #' **Monte Carlo simulation** #' #' The number of grains on an aliquot can be estimated by Monte Carlo simulation #' when setting `MC = TRUE`. Each of the parameters necessary to calculate #' `n` (`x`, `y`, `d`) are assumed to be normally distributed with means #' \eqn{\mu_x, \mu_y, \mu_d} and standard deviations \eqn{\sigma_x, \sigma_y, \sigma_d}. #' #' For the mean grain size random samples are taken first from #' \eqn{N(\mu_y, \sigma_y)}, where \eqn{\mu_y = mean.grain.size} and #' \eqn{\sigma_y = (max.grain.size-min.grain.size)/4} so that 95\% of all #' grains are within the provided the grain size range. This effectively takes #' into account that after sieving the sample there is still a small chance of #' having grains smaller or larger than the used mesh sizes. For each random #' sample the mean grain size is calculated, from which random subsamples are #' drawn for the Monte Carlo simulation. #' #' The packing density is assumed #' to be normally distributed with an empirically determined \eqn{\mu = 0.65} #' (or provided value) and \eqn{\sigma = 0.18}. The normal distribution is #' truncated at `d = 0.87` as this is approximately the maximum packing #' density that can be achieved in PECC problem. #' #' The sample diameter has #' \eqn{\mu = sample.diameter} and \eqn{\sigma = 0.2} to take into account #' variations in sample disc preparation (i.e. applying silicon spray to the #' disc). A lower truncation point at `x = 0.5` is used, which assumes #' that aliqouts with smaller sample diameters of 0.5 mm are discarded. #' Likewise, the normal distribution is truncated at 9.8 mm, which is the #' diameter of the sample disc. #' #' For each random sample drawn from the #' normal distributions the amount of grains on the aliquot is calculated. By #' default, `10^5` iterations are used, but can be reduced/increased with #' `MC.iter` (see `...`). The results are visualised in a bar- and #' boxplot together with a statistical summary. #' #' @param grain.size [numeric] (**required**): #' mean grain size (microns) or a range of grain sizes from which the #' mean grain size is computed (e.g. `c(100,200)`). #' #' @param sample.diameter [numeric] (**required**): #' diameter (mm) of the targeted area on the sample carrier. #' #' @param packing.density [numeric] (*with default*): #' empirical value for mean packing density. \cr #' If `packing.density = "Inf"` a hexagonal structure on an infinite plane with #' a packing density of \eqn{0.906\ldots} is assumed. #' #' @param MC [logical] (*optional*): #' if `TRUE` the function performs a Monte Carlo simulation for estimating the #' amount of grains on the sample carrier and assumes random errors in grain #' size distribution and packing density. Requires a vector with min and max #' grain size for `grain.size`. For more information see details. #' #' @param grains.counted [numeric] (*optional*): #' grains counted on a sample carrier. If a non-zero positive integer is provided this function #' will calculate the packing density of the aliquot. If more than one value is #' provided the mean packing density and its standard deviation is calculated. #' Note that this overrides `packing.density`. #' #' @param plot [logical] (*with default*): #' plot output (`TRUE`/`FALSE`) #' #' @param ... further arguments to pass (`main, xlab, MC.iter`). #' #' @return #' Returns a terminal output. In addition an #' [RLum.Results-class] object is returned containing the #' following element: #' #' \item{.$summary}{[data.frame] summary of all relevant calculation results.} #' \item{.$args}{[list] used arguments} #' \item{.$call}{[call] the function call} #' \item{.$MC}{[list] results of the Monte Carlo simulation} #' #' The output should be accessed using the function [get_RLum]. #' #' @section Function version: 0.31 #' #' @author Christoph Burow, University of Cologne (Germany) #' #' @references #' Duller, G.A.T., 2008. Single-grain optical dating of Quaternary #' sediments: why aliquot size matters in luminescence dating. Boreas 37, #' 589-612. #' #' Heer, A.J., Adamiec, G., Moska, P., 2012. How many grains #' are there on a single aliquot?. Ancient TL 30, 9-16. #' #' **Further reading** #' #' Chang, H.-C., Wang, L.-C., 2010. A simple proof of Thue's #' Theorem on Circle Packing. [https://arxiv.org/pdf/1009.4322v1.pdf](), #' 2013-09-13. #' #' Graham, R.L., Lubachevsky, B.D., Nurmela, K.J., #' Oestergard, P.R.J., 1998. Dense packings of congruent circles in a circle. #' Discrete Mathematics 181, 139-154. #' #' Huang, W., Ye, T., 2011. Global #' optimization method for finding dense packings of equal circles in a circle. #' European Journal of Operational Research 210, 474-481. #' #' @examples #' #' ## Estimate the amount of grains on a small aliquot #' calc_AliquotSize(grain.size = c(100,150), sample.diameter = 1, MC.iter = 100) #' #' ## Calculate the mean packing density of large aliquots #' calc_AliquotSize(grain.size = c(100,200), sample.diameter = 8, #' grains.counted = c(2525,2312,2880), MC.iter = 100) #' #' @md #' @export calc_AliquotSize <- function( grain.size, sample.diameter, packing.density = 0.65, MC = TRUE, grains.counted, plot=TRUE, ... ){ ##==========================================================================## ## CONSISTENCY CHECK OF INPUT DATA ##==========================================================================## if(length(grain.size) == 0 | length(grain.size) > 2) { cat(paste("\nPlease provide the mean grain size or a range", "of grain sizes (in microns).\n"), fill = FALSE) stop(domain=NA) } if(packing.density < 0 | packing.density > 1) { if(packing.density == "inf") { } else { cat(paste("\nOnly values between 0 and 1 allowed for packing density!\n")) stop(domain=NA) } } if(sample.diameter < 0) { cat(paste("\nPlease provide only positive integers.\n")) stop(domain=NA) } if (sample.diameter > 9.8) warning("\n A sample diameter of ", sample.diameter ," mm was specified, but common sample discs are 9.8 mm in diameter.", call. = FALSE) if(missing(grains.counted) == FALSE) { if(MC == TRUE) { MC = FALSE cat(paste("\nMonte Carlo simulation is only available for estimating the", "amount of grains on the sample disc. Automatically set to", "FALSE.\n")) } } if(MC == TRUE && length(grain.size) != 2) { cat(paste("\nPlease provide a vector containing the min and max grain", "grain size(e.g. c(100,150) when using Monte Carlo simulations.\n")) stop(domain=NA) } ##==========================================================================## ## ... ARGUMENTS ##==========================================================================## # set default parameters settings <- list(MC.iter = 10^4, verbose = TRUE) # override settings with user arguments settings <- modifyList(settings, list(...)) ##==========================================================================## ## CALCULATIONS ##==========================================================================## # calculate the mean grain size range.flag<- FALSE if(length(grain.size) == 2) { gs.range<- grain.size grain.size<- mean(grain.size) range.flag<- TRUE } # use ~0.907... from Thue's Theorem as packing density if(packing.density == "inf") { packing.density = pi/sqrt(12) } # function to calculate the amount of grains calc_n<- function(sd, gs, d) { n<- ((pi*(sd/2)^2)/ (pi*(gs/2000)^2))*d return(n) } # calculate the amount of grains on the aliquot if(missing(grains.counted) == TRUE) { n.grains<- calc_n(sample.diameter, grain.size, packing.density) ##========================================================================## ## MONTE CARLO SIMULATION if(MC == TRUE && range.flag == TRUE) { # create a random set of packing densities assuming a normal # distribution with the empirically determined standard deviation of # 0.18. d.mc<- rnorm(settings$MC.iter, packing.density, 0.18) # in a PECC the packing density can not be larger than ~0.87 d.mc[which(d.mc > 0.87)]<- 0.87 d.mc[which(d.mc < 0.25)]<- 0.25 # create a random set of sample diameters assuming a normal # distribution with an assumed standard deviation of # 0.2. For a more conservative estimate this is divided by 2. sd.mc<- rnorm(settings$MC.iter, sample.diameter, 0.2) # it is assumed that sample diameters < 0.5 mm either do not # occur, or are discarded. Either way, any smaller sample # diameter is capped at 0.5. # Also, the sample diameter can not be larger than the sample # disc, i.e. 9.8 mm. sd.mc[which(sd.mc <0.5)]<- 0.5 if (sample.diameter <= 9.8) sd.mc[which(sd.mc >9.8)]<- 9.8 # create random samples assuming a normal distribution # with the mean grain size as mean and half the range (min:max) # as standard deviation. For a more conservative estimate this # is further devided by 2, so half the range is regarded as # two sigma. gs.mc<- rnorm(settings$MC.iter, grain.size, diff(gs.range)/4) # draw random samples from the grain size spectrum (gs.mc) and calculate # the mean for each sample. This gives an approximation of the variation # in mean grain size on the sample disc gs.mc.sampleMean<- vector(mode = "numeric") for(i in 1:length(gs.mc)) { gs.mc.sampleMean[i]<- mean(sample(gs.mc, calc_n( sample(sd.mc, size = 1), grain.size, sample(d.mc, size = 1) ), replace = TRUE)) } # create empty vector for MC estimates of n MC.n<- vector(mode="numeric") # calculate n for each MC data set for(i in 1:length(gs.mc)) { MC.n[i]<- calc_n(sd.mc[i], gs.mc.sampleMean[i], d.mc[i]) } # summarize MC estimates MC.q<- quantile(MC.n, c(0.05,0.95)) MC.n.kde<- density(MC.n, n = 10000) # apply student's t-test MC.t.test<- t.test(MC.n) MC.t.lower<- MC.t.test["conf.int"]$conf.int[1] MC.t.upper<- MC.t.test["conf.int"]$conf.int[2] MC.t.se<- (MC.t.upper-MC.t.lower)/3.92 # get unweighted statistics from calc_Statistics() function MC.stats<- calc_Statistics(as.data.frame(cbind(MC.n,0.0001)))$unweighted } }#EndOf:estimate number of grains ##========================================================================## ## CALCULATE PACKING DENSITY if(missing(grains.counted) == FALSE) { area.container<- pi*sample.diameter^2 if(length(grains.counted) == 1) { area.grains<- (pi*(grain.size/1000)^2)*grains.counted packing.density<- area.grains/area.container } else { packing.densities<- length(grains.counted) for(i in 1:length(grains.counted)) { area.grains<- (pi*(grain.size/1000)^2)*grains.counted[i] packing.densities[i]<- area.grains/area.container } std.d<- sd(packing.densities) } } ##==========================================================================## ##TERMINAL OUTPUT ##==========================================================================## if (settings$verbose) { cat("\n [calc_AliquotSize]") cat(paste("\n\n ---------------------------------------------------------")) cat(paste("\n mean grain size (microns) :", grain.size)) cat(paste("\n sample diameter (mm) :", sample.diameter)) if(missing(grains.counted) == FALSE) { if(length(grains.counted) == 1) { cat(paste("\n counted grains :", grains.counted)) } else { cat(paste("\n mean counted grains :", round(mean(grains.counted)))) } } if(missing(grains.counted) == TRUE) { cat(paste("\n packing density :", round(packing.density,3))) } if(missing(grains.counted) == FALSE) { if(length(grains.counted) == 1) { cat(paste("\n packing density :", round(packing.density,3))) } else { cat(paste("\n mean packing density :", round(mean(packing.densities),3))) cat(paste("\n standard deviation :", round(std.d,3))) } } if(missing(grains.counted) == TRUE) { cat(paste("\n number of grains :", round(n.grains,0))) } if(MC == TRUE && range.flag == TRUE) { cat(paste(cat(paste("\n\n --------------- Monte Carlo Estimates -------------------")))) cat(paste("\n number of iterations (n) :", settings$MC.iter)) cat(paste("\n median :", round(MC.stats$median))) cat(paste("\n mean :", round(MC.stats$mean))) cat(paste("\n standard deviation (mean) :", round(MC.stats$sd.abs))) cat(paste("\n standard error (mean) :", round(MC.stats$se.abs, 1))) cat(paste("\n 95% CI from t-test (mean) :", round(MC.t.lower), "-", round(MC.t.upper))) cat(paste("\n standard error from CI (mean):", round(MC.t.se, 1))) cat(paste("\n ---------------------------------------------------------\n")) } else { cat(paste("\n ---------------------------------------------------------\n")) } } ##==========================================================================## ##RETURN VALUES ##==========================================================================## # prepare return values for mode: estimate grains if(missing(grains.counted) == TRUE) { summary<- data.frame(grain.size = grain.size, sample.diameter = sample.diameter, packing.density = packing.density, n.grains = round(n.grains,0), grains.counted = NA) } # prepare return values for mode: estimate packing density/densities if(missing(grains.counted) == FALSE) { # return values if only one value for counted.grains is provided if(length(grains.counted) == 1) { summary<- data.frame(grain.size = grain.size, sample.diameter = sample.diameter, packing.density = packing.density, n.grains = NA, grains.counted = grains.counted) } else { # return values if more than one value for counted.grains is provided summary<- data.frame(rbind(1:5)) colnames(summary)<- c("grain.size", "sample.diameter", "packing.density", "n.grains","grains.counted") for(i in 1:length(grains.counted)) { summary[i,]<- c(grain.size, sample.diameter, packing.densities[i], n.grains = NA, grains.counted[i]) } } } if(!MC) { MC.n<- NULL MC.stats<- NULL MC.n.kde<- NULL MC.t.test<- NULL MC.q<- NULL } if(missing(grains.counted)) grains.counted<- NA call<- sys.call() args<- as.list(sys.call())[-1] # create S4 object newRLumResults.calc_AliquotSize <- set_RLum( class = "RLum.Results", data = list( summary=summary, MC=list(estimates=MC.n, statistics=MC.stats, kde=MC.n.kde, t.test=MC.t.test, quantile=MC.q)), info = list(call=call, args=args)) ##=========## ## PLOTTING if(plot==TRUE) { try(plot_RLum.Results(newRLumResults.calc_AliquotSize, ...)) } # Return values invisible(newRLumResults.calc_AliquotSize) } Luminescence/R/convert_RLum2Risoe.BINfileData.R0000644000176200001440000002030614264017373020764 0ustar liggesusers#'Converts RLum.Analysis-objects and RLum.Data.Curve-objects to RLum2Risoe.BINfileData-objects #' #'The functions converts [RLum.Analysis-class] and [RLum.Data.Curve-class] objects and a [list] of those #'to [Risoe.BINfileData-class] objects. The function intends to provide a minimum of compatibility #'between both formats. The created [RLum.Analysis-class] object can be later exported to a #'BIN-file using the function [write_R2BIN]. #' #'@param object [RLum.Analysis-class] or [RLum.Data.Curve-class] (**required**): input object to #'be converted #' #'@param keep.position.number [logical] (with default): keeps the original position number or re-calculate #'the numbers to avoid doubling #' #'@section Function version: 0.1.3 #' #'@author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #'@seealso [RLum.Analysis-class], [RLum.Data.Curve-class], [write_R2BIN] #' #'@note The conversion can be never perfect. The `RLum` objects may contain information which are #'not part of the [Risoe.BINfileData-class] definition. #' #'@keywords IO #' #'@examples #' #'##simple conversion using the example dataset #'data(ExampleData.RLum.Analysis, envir = environment()) #'convert_RLum2Risoe.BINfileData(IRSAR.RF.Data) #' #'@return The function returns a [Risoe.BINfileData-class] object. #' #'@md #'@export convert_RLum2Risoe.BINfileData <- function( object, keep.position.number = FALSE ){ # Self call ----------------------------------------------------------------------------------- if(is(object, "list")){ ##call function object_list <- lapply(object, function(x) { convert_RLum2Risoe.BINfileData(x) }) ##merge objects if(length(object_list) == 1){ return(object_list[[1]]) }else{ return(merge_Risoe.BINfileData(object_list, keep.position.number = keep.position.number)) } } # Integrity tests ----------------------------------------------------------------------------- ##RLum.Data.Curve if(inherits(object, "RLum.Data.Curve")) object <- set_RLum(class = "RLum.Analysis", records = list(object)) ##RLum.Analysis - final check, from here we just accept RLum.Analysis if(!inherits(object, "RLum.Analysis")) stop("[convert_RLum2Risoe.BINfileData()] Input object needs to be of class 'RLum.Analysis', 'RLum.Data.Curve' or a 'list' of such objects!", call. = FALSE) # Set PROTOTYPE & DATA -------------------------------------------------------------------------- ##set Risoe.BINfiledata prototype prototype <- set_Risoe.BINfileData() ##grep allowed names allowed_names <- names(prototype@METADATA) ##grep records (this will avoid further the subsetting) records <- object@records ##write DATA prototype@DATA <- lapply(records, function(x) {x@data[,2]}) # Create METADATA ----------------------------------------------------------------------------- ##create METADATA list METADATA_list <- lapply(records, function(x){ ##grep matching arguments only temp <- x@info[toupper(names(x@info)) %in% allowed_names] ##account for the case that no matching name was found if(length(temp) != 0){ ##correct names names(temp) <- toupper(names(temp)) return(temp) }else{ return(list(ID = NA)) } }) ##make data.frame out of it METADATA_df <- as.data.frame(data.table::rbindlist(METADATA_list, fill = TRUE)) ##write METADATA prototype@METADATA <- merge(prototype@METADATA, METADATA_df, all = TRUE) ## +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++# ##fill various missing values ##helper function ...otherwise the code gets too nasty ... only used for NA values! .replace <- function(field, value){ prototype@METADATA[[field]][which(sapply(prototype@METADATA[[field]], is.na))] <<- value } ## >> ID << ## prototype@METADATA[["ID"]] <- 1:length(records) ## >> SEL << ## prototype@METADATA[["SEL"]] <- TRUE ## >> VERSION << ## .replace("VERSION", "08") ## >> RECTYPE << ## .replace("RECTYPE", 0) ## >> NPOINTS << ## if(any(is.na(prototype@METADATA[["NPOINTS"]]))){ prototype@METADATA[["NPOINTS"]] <- vapply(records, function(x){ length(x@data)/2 }, FUN.VALUE = numeric(1)) } ## >> LENGTH << + PREVIOUS if(any(is.na(prototype@METADATA[["LENGTH"]]))){ ##even we have values here before, it will make no difference prototype@METADATA[["LENGTH"]] <- (prototype@METADATA[["NPOINTS"]] * 4) + 507 prototype@METADATA[["PREVIOUS"]] <- c(0,prototype@METADATA[["LENGTH"]][1:length(records) - 1]) } ## >> RUN << ## ##if there is only one NA, we should overwrite it, to be consistent if(any(is.na(prototype@METADATA[["RUN"]]))) prototype@METADATA[["RUN"]] <- 1:length(records) ## >> SET << ## .replace("SET", 1) ## >> GRAIN << ## .replace("GRAIN", 0) ## >> GRAINNUMBER << ## .replace("GRAINNUMBER", 0) ## >> USER << ## .replace("USER", "RLum.Data") ## >> DTYPE << ## .replace("DTYPE", "Natural") ## >> LIGHTSOURCE << ## .replace("LIGHTSOURCE", "None") ## >> SAMPLE << ## if(any(is.na(prototype@METADATA[["SAMPLE"]]))){ ##get only the id's to change temp_id <- which(is.na(prototype@METADATA[["SAMPLE"]])) ##set name prototype@METADATA[["SAMPLE"]] <- vapply(temp_id, function(x){ if(any(names(records[[x]]@info) == "name")){ records[[x]]@info$name }else{ "unknown" } }, character(length = 1)) } ## >> COMMENT << ## .replace("COMMENT", "convert_RLum2Risoe.BINfileData()") ## >> FNAME << ## .replace("FNAME", " ") ## >> DATE << ## + TIME if(any(is.na(prototype@METADATA[["DATE"]]))){ ##get only the id's to change temp_id <- which(is.na(prototype@METADATA[["DATE"]])) ##set date prototype@METADATA[["DATE"]] <- vapply(temp_id, function(x){ if(any(names(records[[x]]@info) == "startDate")){ strtrim(records[[x]]@info[["startDate"]], width = 8) }else{ as.character(format(Sys.Date(),"%Y%m%d")) } }, character(length = 1)) ##set time prototype@METADATA[["TIME"]] <- vapply(temp_id, function(x){ if(any(names(records[[x]]@info) == "startDate")){ substr(records[[x]]@info[["startDate"]], start = 9, stop = 14) }else{ as.character(format(Sys.time(),"%H%m%S")) } }, character(length = 1)) } ## >> LOW << ## if(any(is.na(prototype@METADATA[["LOW"]]))){ ##get only the id's to change temp_id <- which(is.na(prototype@METADATA[["LOW"]])) ##set date prototype@METADATA[["LOW"]] <- vapply(temp_id, function(x){ min(records[[x]]@data[,1]) }, numeric(length = 1)) } ## >> HIGH << ## if(any(is.na(prototype@METADATA[["HIGH"]]))){ ##get only the id's to change temp_id <- which(is.na(prototype@METADATA[["HIGH"]])) ##set date prototype@METADATA[["HIGH"]] <- vapply(temp_id, function(x){ max(records[[x]]@data[,1]) }, numeric(length = 1)) } ## >> SEQUENCE << ## .replace("SEQUENCE", "") # METADA >> correct information ------------------------------------------------------------------------- ##we have to correct the LTYPE, the format is rather strict ##(a) create LTYPE from names of objects LTYPE <- vapply(names(object), function(s){ if(grepl(pattern = " (", x = s, fixed = TRUE)){ strsplit(s, split = " (", fixed = TRUE)[[1]][1] }else{ s } }, FUN.VALUE = character(1)) ##(b) replace characters ##(b.1) irradiation LTYPE <- gsub(pattern = "irradiation", replacement = "USER", fixed = TRUE, x = LTYPE) ##(b.2 RF LTYPE <- gsub(pattern = "RF", replacement = "RL", fixed = TRUE, x = LTYPE) ##set value prototype@METADATA[["LTYPE"]] <- LTYPE ##correct USER ##limit user to 8 characters prototype@METADATA[["USER"]] <- strtrim(prototype@METADATA[["USER"]], 8) ##correct SAMPLE ##limit user to 21 characters prototype@METADATA[["SAMPLE"]] <- strtrim(prototype@METADATA[["SAMPLE"]], 20) ##replace all remaining NA values by 0 ##all remaining values are numbers prototype@METADATA <- replace(prototype@METADATA, is.na(prototype@METADATA), 0L) # Return -------------------------------------------------------------------------------------- return(prototype) } Luminescence/R/CW2pLMi.R0000644000176200001440000002233614264017373014402 0ustar liggesusers#' Transform a CW-OSL curve into a pLM-OSL curve via interpolation under linear #' modulation conditions #' #' Transforms a conventionally measured continuous-wave (CW) OSL-curve into a #' pseudo linearly modulated (pLM) curve under linear modulation conditions #' using the interpolation procedure described by Bos & Wallinga (2012). #' #' The complete procedure of the transformation is given in Bos & Wallinga #' (2012). The input `data.frame` consists of two columns: time (t) and #' count values (CW(t)) #' #' **Nomenclature** #' #' - P = stimulation time (s) #' - 1/P = stimulation rate (1/s) #' #' **Internal transformation steps** #' #' (1) #' log(CW-OSL) values #' #' (2) #' Calculate t' which is the transformed time: #' \deqn{t' = 1/2*1/P*t^2} #' #' (3) #' Interpolate CW(t'), i.e. use the log(CW(t)) to obtain the count values #' for the transformed time (t'). Values beyond `min(t)` and `max(t)` #' produce `NA` values. #' #' (4) #' Select all values for t' < `min(t)`, i.e. values beyond the time resolution #' of t. Select the first two values of the transformed data set which contain #' no `NA` values and use these values for a linear fit using [lm]. #' #' (5) #' Extrapolate values for t' < `min(t)` based on the previously obtained #' fit parameters. #' #' (6) #' Transform values using #' \deqn{pLM(t) = t/P*CW(t')} #' #' (7) #' Combine values and truncate all values for t' > `max(t)` #' #' #' **NOTE:** #' The number of values for t' < `min(t)` depends on the stimulation #' period (P) and therefore on the stimulation rate 1/P. To avoid the #' production of too many artificial data at the raising tail of the determined #' pLM curves it is recommended to use the automatic estimation routine for #' `P`, i.e. provide no own value for `P`. #' #' @param values [RLum.Data.Curve-class] or [data.frame] (**required**): #' [RLum.Data.Curve-class] or `data.frame` with measured curve data of type #' stimulation time (t) (`values[,1]`) and measured counts (cts) (`values[,2]`) #' #' @param P [vector] (*optional*): #' stimulation time in seconds. If no value is given the optimal value is #' estimated automatically (see details). Greater values of P produce more #' points in the rising tail of the curve. #' #' @return #' The function returns the same data type as the input data type with #' the transformed curve values. #' #' **`RLum.Data.Curve`** #' #' \tabular{rl}{ #' `$CW2pLMi.x.t` \tab: transformed time values \cr #' `$CW2pLMi.method` \tab: used method for the production of the new data points #' } #' #' @note #' According to Bos & Wallinga (2012) the number of extrapolated points #' should be limited to avoid artificial intensity data. If `P` is #' provided manually and more than two points are extrapolated, a warning #' message is returned. #' #' @section Function version: 0.3.1 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' Based on comments and suggestions from:\cr #' Adrie J.J. Bos, Delft University of Technology, The Netherlands #' #' @seealso [CW2pLM], [CW2pHMi], [CW2pPMi], [fit_LMCurve], #' [RLum.Data.Curve-class] #' #' @references #' Bos, A.J.J. & Wallinga, J., 2012. How to visualize quartz OSL #' signal components. Radiation Measurements, 47, 752-758. #' #' **Further Reading** #' #' Bulur, E., 1996. An Alternative Technique For #' Optically Stimulated Luminescence (OSL) Experiment. Radiation Measurements, #' 26, 701-709. #' #' Bulur, E., 2000. A simple transformation for converting CW-OSL curves to #' LM-OSL curves. Radiation Measurements, 32, 141-145. #' #' @keywords manip #' #' @examples #' #' ##(1) #' ##load CW-OSL curve data #' data(ExampleData.CW_OSL_Curve, envir = environment()) #' #' ##transform values #' values.transformed <- CW2pLMi(ExampleData.CW_OSL_Curve) #' #' ##plot #' plot(values.transformed$x, values.transformed$y.t, log = "x") #' #' ##(2) - produce Fig. 4 from Bos & Wallinga (2012) #' ##load data #' data(ExampleData.CW_OSL_Curve, envir = environment()) #' values <- CW_Curve.BosWallinga2012 #' #' ##open plot area #' plot(NA, NA, #' xlim = c(0.001,10), #' ylim = c(0,8000), #' ylab = "pseudo OSL (cts/0.01 s)", #' xlab = "t [s]", #' log = "x", #' main = "Fig. 4 - Bos & Wallinga (2012)") #' #' #' values.t <- CW2pLMi(values, P = 1/20) #' lines(values[1:length(values.t[,1]),1],CW2pLMi(values, P = 1/20)[,2], #' col = "red", lwd = 1.3) #' text(0.03,4500,"LM", col = "red", cex = .8) #' #' values.t <- CW2pHMi(values, delta = 40) #' lines(values[1:length(values.t[,1]),1],CW2pHMi(values, delta = 40)[,2], #' col = "black", lwd = 1.3) #' text(0.005,3000,"HM", cex =.8) #' #' values.t <- CW2pPMi(values, P = 1/10) #' lines(values[1:length(values.t[,1]),1], CW2pPMi(values, P = 1/10)[,2], #' col = "blue", lwd = 1.3) #' text(0.5,6500,"PM", col = "blue", cex = .8) #' #' @md #' @export CW2pLMi<- function( values, P ){ # (0) Integrity checks ------------------------------------------------------- ##(1) data.frame or RLum.Data.Curve object? if(is(values, "data.frame") == FALSE & is(values, "RLum.Data.Curve") == FALSE){ stop("[CW2pLMi()] 'values' object has to be of type 'data.frame' or 'RLum.Data.Curve'!", call. = FALSE) } ##(2) if the input object is an 'RLum.Data.Curve' object check for allowed curves if(is(values, "RLum.Data.Curve") == TRUE){ if(!grepl("OSL", values@recordType) & !grepl("IRSL", values@recordType)){ stop(paste("[CW2pLMi()] recordType ",values@recordType, " is not allowed for the transformation!", sep=""), call. = FALSE) }else{ temp.values <- as(values, "data.frame") } }else{ temp.values <- values } # (1) Transform values ------------------------------------------------------------------------ ##(a) log transformation of the CW-OSL count values CW_OSL.log<-log(temp.values[,2]) ##(b) time transformation t >> t' t<-temp.values[,1] ##set P ##if no values for P is set selected a P value for a maximum of ##two extrapolation points if(missing(P)==TRUE){ i<-10 P<-1/i t.transformed<-0.5*1/P*t^2 while(length(t.transformed[t.transformed2){ P<-1/i t.transformed<-0.5*1/P*t^2 i<-i+10 }#end::while }else{ if(P==0){stop("[CW2pLMi] P has to be > 0!", call. = FALSE)} t.transformed<-0.5*1/P*t^2 } #endif # (2) Interpolation --------------------------------------------------------------------------- ##interpolate values, values beyond the range return NA values CW_OSL.interpolated<-approx(t,CW_OSL.log, xout=t.transformed, rule=1 ) ##combine t.transformed and CW_OSL.interpolated in a data.frame temp<-data.frame(x=t.transformed, y=unlist(CW_OSL.interpolated$y)) ##Problem: I rare cases the interpolation is not working properely and Inf or NaN values are returned ##Fetch row number of the invalid values invalid_values.id<-c(which(is.infinite(temp[,2]) | is.nan(temp[,2]))) ##interpolate between the lower and the upper value invalid_values.interpolated<-sapply(1:length(invalid_values.id), function(x) { mean(c(temp[invalid_values.id[x]-1,2],temp[invalid_values.id[x]+1,2])) } ) ##replace invalid values in data.frame with newly interpolated values if(length(invalid_values.id)>0){ temp[invalid_values.id,2]<-invalid_values.interpolated } # (3) Extrapolate first values of the curve --------------------------------------------------- ##(a) - find index of first rows which contain NA values (needed for extrapolation) temp.sel.id<-min(which(is.na(temp[,2])==FALSE)) ##(b) - fit linear function fit.lm<-lm(y ~ x,data.frame(x=t[1:2],y=CW_OSL.log[1:2])) ##select values to extrapolate and predict (extrapolate) values based on the fitted function x.i<-data.frame(x=temp[1:(min(temp.sel.id)-1),1]) y.i<-predict(fit.lm,x.i) ##replace NA values by extrapolated values temp[1:length(y.i),2]<-y.i ##set method values temp.method<-c(rep("extrapolation",length(y.i)),rep("interpolation",(length(temp[,2])-length(y.i)))) ##print a warning message for more than two extrapolation points if(length(y.i)>2){warning("t' is beyond the time resolution and more than two data points have been extrapolated!")} # (4) Convert, transform and combine values --------------------------------------------------- ##unlog CW-OSL count values, i.e. log(CW) >> CW CW_OSL<-exp(temp$y) ##transform CW-OSL values to pLM-OSL values pLM<-1/P*t*CW_OSL ##combine all values and exclude NA values temp.values <- data.frame(x=t,y.t=pLM,x.t=t.transformed, method=temp.method) temp.values <- na.exclude(temp.values) # (5) Return values --------------------------------------------------------------------------- ##returns the same data type as the input if(is(values, "data.frame") == TRUE){ values <- temp.values return(values) }else{ ##add old info elements to new info elements temp.info <- c(values@info, CW2pLMi.x.t = list(temp.values$x.t), CW2pLMi.method = list(temp.values$method)) newRLumDataCurves.CW2pLMi <- set_RLum( class = "RLum.Data.Curve", recordType = values@recordType, data = as.matrix(temp.values[,1:2]), info = temp.info) return(newRLumDataCurves.CW2pLMi) } } Luminescence/R/calc_gSGC_feldspar.R0000644000176200001440000002603114264017373016646 0ustar liggesusers#'@title Calculate Global Standardised Growth Curve (gSGC) for Feldspar MET-pIRIR #' #'@description Implementation of the gSGC approach for feldspar MET-pIRIR by Li et al. (2015) #' #'@details ##TODO #' #'@param data [data.frame] (**required**): data frame with five columns per sample #'`c("LnTn", "LnTn.error", "Lr1Tr1", "Lr1Tr1.error","Dr1")` #' #'@param gSGC.type [character] (*with default*): growth curve type to be selected #'according to Table 3 in Li et al. (2015). Allowed options are #'`"50LxTx"`, `"50Lx"`, `"50Tx"`, `"100LxTx"`, `"100Lx"`, `"100Tx"`, `"150LxTx"`, #' `"150Lx"`, `"150Tx"`, `"200LxTx"`, `"200Lx"`, `"200Tx"`, `"250LxTx"`, `"250Lx"`, #' `"250Tx"` #' #'@param gSGC.parameters [data.frame] (*optional*): an own parameter set for the #'gSGC with the following columns `y1`, `y1_err`, `D1` #'`D1_err`, `y2`, `y2_err`, `y0`, `y0_err`. #' #'@param n.MC [numeric] (*with default*): number of Monte-Carlo runs for the #'error calculation #' #'@param plot [logical] (*with default*): enables/disables the control plot output #' #'@return Returns an S4 object of type [RLum.Results-class]. #' #' **`@data`**\cr #' `$ df` ([data.frame]) \cr #' `.. $DE` the calculated equivalent dose\cr #' `.. $DE.ERROR` error on the equivalent dose, which is the standard deviation of the MC runs\cr #' `.. $HPD95_LOWER` lower boundary of the highest probability density (95%)\cr #' `.. $HPD95_UPPER` upper boundary of the highest probability density (95%)\cr #' `$ m.MC` ([list]) numeric vector with results from the MC runs.\cr #' #' **`@info`**\cr #' `$ call`` ([call]) the original function call #' #' @section Function version: 0.1.0 #' #' @author Harrison Gray, USGS (United States), #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [RLum.Results-class], [get_RLum], [uniroot], [calc_gSGC] #' #' @references Li, B., Roberts, R.G., Jacobs, Z., Li, S.-H., Guo, Y.-J., 2015. #' Construction of a “global standardised growth curve” (gSGC) for infrared #' stimulated luminescence dating of K-feldspar 27, 119–130. \doi{10.1016/j.quageo.2015.02.010} #' #' @keywords datagen #' #' @examples #' #' ##test on a generated random sample #' n_samples <- 10 #' data <- data.frame( #' LnTn = rnorm(n=n_samples, mean=1.0, sd=0.02), #' LnTn.error = rnorm(n=n_samples, mean=0.05, sd=0.002), #' Lr1Tr1 = rnorm(n=n_samples, mean=1.0, sd=0.02), #' Lr1Tr1.error = rnorm(n=n_samples, mean=0.05, sd=0.002), #' Dr1 = rep(100,n_samples)) #' #' results <- calc_gSGC_feldspar( #' data = data, gSGC.type = "50LxTx", #' plot = FALSE) #' #' plot_AbanicoPlot(results) #' #'@md #'@export calc_gSGC_feldspar <- function ( data, gSGC.type = "50LxTx", gSGC.parameters, n.MC = 100, plot = FALSE ){ # Integrity checks -------------------------------------------------------- if (!is(data, "data.frame")) { stop("[calc_gSGC_feldspar()] 'data' needs to be of type data.frame.", call. = FALSE) } if (!is(gSGC.type[1], "character")) { stop("[calc_gSGC_feldspar()] 'gSGC.type' needs to be of type character.", call. = FALSE) } if (ncol(data) != 5) { stop("[calc_gSGC_feldspar()] Structure of 'data' does not fit the expectations.", call. = FALSE) } colnames(data) <- c("LnTn", "LnTn.error", "Lr1Tr1", "Lr1Tr1.error", "Dr1") # Parametrize ------------------------------------------------------------- params <- data.frame( # this is the data from Table 3 of Li et al., 2015 Type = c("50LxTx", "50Lx", "50Tx", "100LxTx", "100Lx", "100Tx", "150LxTx", "150Lx", "150Tx", "200LxTx", "200Lx", "200Tx", "250LxTx", "250Lx", "250Tx"), y1 = c( 0.57, 0.36, 0.2, 0.39, 0.41, 0.28, 0.43, 0.4, 0.31, 0.3, 0.34, 0.37, 0.37, 0.17, 0.48), y1_err = c( 0.19, 0.25, 0.24, 0.12, 0.28, 0.22, 0.11, 0.27, 0.33, 0.06, 0.28, 0.28, 0.1, 0.12, 0.37), D1 = c( 241, 276, 259, 159, 304, 310, 177, 327, 372, 119, 316, 372, 142, 197, 410), D1_err = c( 66, 137, 279, 48, 131, 220, 41, 132, 300, 32, 145, 218, 35, 116, 210), y2 = c( 0.88, 1.37, 0.34, 0.91, 1.22, 0.42, 0.88, 1.26, 0.45, 0.95, 1.24, 0.43, 0.74, 1.32, 0.45), y2_err = c( 0.15, 0.19, 0.15, 0.1, 0.23, 0.26, 0.09, 0.23, 0.18, 0.05, 0.25, 0.24, 0.09, 0.1, 0.15), D2 = c( 1115, 1187, 1462, 741, 1146, 2715, 801, 1157, 2533, 661, 1023, 2792, 545, 830, 2175), D2_err = c( 344, 287, 191, 105, 288, 639, 109, 263, 608, 49, 205, 709, 62, 79, 420), y0 = c( 0.008, 0.003, 0.685, 0.018, 0.01, 0.64, 0.026, 0.015, 0.61, 0.034, 0.02, 0.573, 0.062, 0.028, 0.455), y0_err = c( 0.009, 0.009, 0.014, 0.008, 0.008, 0.015, 0.006, 0.007, 0.014, 0.006, 0.006, 0.013, 0.005, 0.005, 0.011), D0_2.3 = c( 2000, 2450, 1420, 1420, 2300, 2900, 1500, 2340, 2880, 1320, 2080, 2980, 1000, 1780, 2500), D0_3 = c( 2780, 3280, 2520, 1950, 3100, 4960, 2060, 3130, 4760, 1780, 2800, 5120, 1380, 2360, 4060) ) # these are user specified parameters if they so desire if (!missing(gSGC.parameters)){ y1 <- gSGC.parameters$y1 y1_err <- gSGC.parameters$y1_err D1 <- gSGC.parameters$D1 D1_err <- gSGC.parameters$D1_err y2 <- gSGC.parameters$y2 y2_err <- gSGC.parameters$y2_err y0 <- gSGC.parameters$y0 y0_err <- gSGC.parameters$y0_err } else { if (gSGC.type[1] %in% params$Type){ # take the user input pIRSL temperature and assign the correct parameters index <- match(gSGC.type,params$Type) y1 <- params$y1[index] y1_err <- params$y1_err[index] D1 <- params$D1[index] D1_err <- params$D1_err[index] y2 <- params$y2[index] y2_err <- params$y2_err[index] D2 <- params$D2[index] D2_err <- params$D2_err[index] y0 <- params$y0[index] y0_err <- params$y0_err[index] } else { # give error if input is wrong stop( paste0("[calc_gSGC_feldspar()] 'gSGC.type' needs to be one of the accepted values, such as: ", paste(params$Type, collapse = ", ")), call. = FALSE) } } ##set function for uniroot ## function from Li et al., 2015 eq: 3 ## function that equals zero when the correct De is found. ## This is so uniroot can find the correct value or 'root' f <- function(De, Dr1, Lr1Tr1, LnTn, y1, D1, y2, D2, y0){ f_D <- y1 * (1 - exp(-De / D1)) + y2 * (1 - exp(-De / D2)) + y0 f_Dr <- y1 * (1 - exp(-Dr1 / D1)) + y2 * (1 - exp(-Dr1 / D2)) + y0 ##return(f_D/Lr1Tr1 - f_Dr/LnTn) ##TODO double check seems to be wrong return(f_Dr/Lr1Tr1 - f_D/LnTn) } # Run calculation --------------------------------------------------------- l <- lapply(1:nrow(data), function(i) { Lr1Tr1 <- data[i, "Lr1Tr1"] #assign user's input data Lr1Tr1.error <- data[i, "Lr1Tr1.error"] Dr1 <- data[i, "Dr1"] LnTn <- data[i, "LnTn"] LnTn.error <- data[i, "LnTn.error"] ## uniroot solution temp <- try({ uniroot( f, interval = c(0.1, 3000), tol = 0.001, Dr1 = Dr1, Lr1Tr1 = Lr1Tr1, LnTn = LnTn, y1 = y1, D1 = D1, y2 = y2, D2 = D2, y0 = y0, extendInt = "yes", check.conv = TRUE, maxiter = 1000) }, silent = TRUE) # solve for the correct De ## in case the initial uniroot solve does not work if(inherits(temp, "try-error")) { try(stop(paste0("[calc_gSGC_feldspar()] No solution was found for dataset: #", i,"! NA returned"), call. = FALSE)) return(NA) } De <- temp$root temp.MC.matrix <- matrix(nrow = n.MC, ncol = 8) # to estimate the error, use a monte carlo simulation. assume error in input data is gaussian # create a matrix colnames(temp.MC.matrix) <- c("LnTn", "Lr1Tr1","y1", "D1", "y2", "D2", "y0", "De") # simulate random values for each parameter temp.MC.matrix[, 1:7] <- matrix( rnorm(n.MC * 7, mean = c(LnTn, Lr1Tr1, y1, D1, y2, D2, y0), sd = c(LnTn.error, Lr1Tr1.error, y1_err, D1_err, y2_err, D2_err, y0_err)), ncol = 7, byrow = TRUE) # now use the randomly generated parameters to calculate De's with uniroot for (j in 1:n.MC){ temp2 <- try({ uniroot( f, interval = c(0.1, 3000), tol = 0.001, LnTn = temp.MC.matrix[j, 1], Lr1Tr1 = temp.MC.matrix[j, 2], y1 = temp.MC.matrix[j, 3], D1 = temp.MC.matrix[j, 4], y2 = temp.MC.matrix[j, 5], D2 = temp.MC.matrix[j, 6], y0 = temp.MC.matrix[j, 7], Dr1 = Dr1, extendInt = "yes", check.conv = TRUE, maxiter = 1000 ) }, silent = TRUE) if (!inherits(temp2, "try-error")){ temp.MC.matrix[j,8] <- temp2$root } else { # give an NA if uniroot cannot find a root (usually due to bad random values) temp.MC.matrix[j,8] <- NA } } # set the De uncertainty as the standard deviations of the randomly generated des De.error <- sd(temp.MC.matrix[, 8], na.rm = TRUE) return(list( DE = De, DE.ERROR = De.error, m.MC = temp.MC.matrix)) }) # Plotting ---------------------------------------------------------------- if(plot){ old.par <- par(no.readonly = TRUE) on.exit(par(old.par)) par(mfrow = c(mfrow = c(3,3))) for (i in 1:length(l)) { if(is.na(l[[i]][1])) next(); y_max <- max(l[[i]]$m.MC[, 1:2]) plot(NA, NA, xlab = "Dose [a.u.]", ylab = "Norm. Signal", xlim = c(0, 3000), main = paste0("Dataset #", i), ylim = c(0, y_max) ) for(j in 1:nrow(l[[i]]$m.MC)){ #y1 * (1 - exp(-De / D1)) + y2 * (1 - exp(-De / D2)) + y0 x <- NA curve( l[[i]]$m.MC[j, 3] * (1 - exp(-x / l[[i]]$m.MC[j, 4])) + l[[i]]$m.MC[j, 5] * (1 - exp(-x / l[[i]]$m.MC[j, 6])) + l[[i]]$m.MC[j, 7], col = rgb(0,0,0,0.4), add = TRUE) } par(new = TRUE) hist <- hist(na.exclude(l[[i]]$m.MC[, 8]), plot = FALSE ) hist$counts <- ((y_max/max(hist$counts)) * hist$counts) / 2 plot( hist, xlab = "", ylab = "", axes = FALSE, xlim = c(0, 3000), ylim = c(0, y_max), main = "" ) } } # Return ------------------------------------------------------------------ ##output matrix m <- matrix(ncol = 4, nrow = nrow(data)) ##calculate a few useful parameters for(i in 1:nrow(m)){ if(is.na(l[[i]][1])) next(); m[i,1] <- l[[i]]$DE m[i,2] <- l[[i]]$DE.ERROR HPD <- .calc_HPDI(na.exclude(l[[i]]$m.MC[,8])) m[i,3] <- HPD[1,1] m[i,4] <- HPD[1,2] } df <- data.frame( DE = m[, 1], DE.ERROR = m[, 2], HPD95_LOWER = m[, 3], HPD95_UPPER = m[, 4] ) return( set_RLum("RLum.Results", data = list( data = df, m.MC = lapply(l, function(x) {if(is.na(x[[1]])) {return(x)} else {x$m.MC} }) ), info = list( call = sys.call() ) )) } Luminescence/R/calc_OSLLxTxDecomposed.R0000644000176200001440000001407414236146743017472 0ustar liggesusers#' @title Calculate Lx/Tx ratio for decomposed CW-OSL signal components #' #' @description Calculate `Lx/Tx` ratios from a given set of decomposed #' CW-OSL curves decomposed by `[OSLdecomposition::RLum.OSL_decomposition]` #' #' @param OSL.component [integer] or [character] (*optional*): #' a single index or a name describing which OSL signal component shall be evaluated. #' This argument can either be the name of the OSL component assigned by #' `[OSLdecomposition::RLum.OSL_global_fitting]` or the index of component. #' Then `'1'` selects the fastest decaying component, `'2'` the #' second fastest and so on. If not defined, the fastest decaying component is selected. #' #' @param Lx.data [data.frame] (**required**): Component table created by #' `[OSLdecomposition::RLum.OSL_decomposition]` and per default located #' at `object@records[[...]]@info$COMPONENTS`.The value of `$n[OSL.component]` #' is set as `LnLx`. The value of `$n.error[OSL.component]` is set as `LnLx.error` #' #' @param Tx.data [data.frame] (*optional*): Component table created by #' `[OSLdecomposition::RLum.OSL_decomposition]` and per default located at #' `object@records[[...]]@info$COMPONENTS`. The value of `$n[OSL.component]` #' is set as `TnTx`. The value of `$n.error[OSL.component]` is set as `TnTx.error` #' #' @param sig0 [numeric] (*with default*): allows adding an extra error component #' to the final `Lx/Tx` error value (e.g., instrumental error). #' #' @param digits [integer] (*with default*): round numbers to the specified digits. #' If digits is set to `NULL` nothing is rounded. #' #' @return Returns an S4 object of type [RLum.Results-class]. #' #' Slot `data` contains a [list] with the following structure: #' #' **@data** #' ``` #' $LxTx.table (data.frame) #' .. $ LnLx #' .. $ TnTx #' .. $ Net_LnLx #' .. $ Net_LnLx.Error #' .. $ Net_TnTx #' .. $ Net_TnTx.Error #' .. $ LxTx #' .. $ LxTx.relError #' .. $ LxTx.Error #' ``` #' #' @section Function version: 0.1.0 #' #' @author Dirk Mittelstrass #' #' @seealso [RLum.Data.Curve-class], [plot_GrowthCurve], [analyse_SAR.CWOSL] #' #' @references Mittelstrass D., Schmidt C., Beyer J., Straessner A., 2019. #' Automated identification and separation of quartz CW-OSL signal components with R. #' talk presented at DLED 2019, Bingen, Germany #' [http://luminescence.de/OSLdecomp_talk.pdf]()\cr #' #' @keywords datagen #' @md #' @export calc_OSLLxTxDecomposed <- function( Lx.data, Tx.data = NULL, OSL.component = 1L, sig0 = 0, digits = NULL ){ # ToDo: # - Integrity checks for the component table # - Handle background-signal-component if present # - add Tx.data integrity checks # - add previous-residual-subtraction functionality # - add list with decomposition algorithm parameters to return object # - add example in documentation ##--------------------------------------------------------------------------## ## (1) - integrity checks if (!(is.data.frame(Lx.data) && (nrow(Lx.data) >= 1))) stop("[calc_OSLLxTxDecomposed()] No valid component data.frame for Lx value", call. = FALSE) if (!(is.null(Tx.data)) && !(is.data.frame(Tx.data) && (nrow(Tx.data) >= 1))) stop("[calc_OSLLxTxDecomposed()] No valid component data.frame for Tx value", call. = FALSE) # define the component component_index <- NA #select only the first element; we do this silently because it is clearly #written in the documentation OSL.component <- as.integer(OSL.component[1]) if (!(is.numeric(OSL.component) || is.character(OSL.component))) stop("[calc_OSLLxTxDecomposed()] Type error! No valid data type for OSL.component", call. = FALSE) # get component index from component name if (is.character(OSL.component)) { if (tolower(OSL.component) %in% tolower(Lx.data$name)) { component_index <- which(tolower(OSL.component) == tolower(Lx.data$name)) } else { stop(paste0("[calc_OSLLxTxDecomposed()] Invalid OSL component name! Valid names are: ", paste(Lx.data$name, collapse = ", ")), call. = FALSE) } } # if a numeric is given, check if it matches with any component index if (is.numeric(OSL.component)) { if (OSL.component %in% 1:nrow(Lx.data)) { component_index <- OSL.component # insert background-signal-component check here } else { stop(paste0("[calc_OSLLxTxDecomposed()] Invalid OSL component index! Component table has ", nrow(Lx.data), " rows.")) } } ##--------------------------------------------------------------------------## ## (2) - extract Lx and Tx values LnLx <- Lx.data$n[component_index] LnLx.Error <- Lx.data$n.error[component_index] TnTx <- 1 TnTx.Error <- 0 if (!is.null(Tx.data)) { TnTx <- Tx.data$n[component_index] TnTx.Error <- Tx.data$n.error[component_index] } ##combine results LnLxTnTx <- cbind( LnLx, LnLx.Error, TnTx, TnTx.Error ) # THE FOLLOWING CODE IS MOSTLY IDENTICAL WITH (4) IN calc_OSLLxTxRatio() ##--------------------------------------------------------------------------## ##(4) Calculate LxTx error according Galbraith (2014) #transform results in a data.frame LnLxTnTx <- as.data.frame(LnLxTnTx) #add col names colnames(LnLxTnTx)<-c("Net_LnLx", "Net_LnLx.Error", "Net_TnTx", "Net_TnTx.Error") ##calculate Ln/Tx LxTx <- LnLxTnTx$Net_LnLx/LnLxTnTx$Net_TnTx ##set NaN if(is.nan(LxTx)) LxTx <- 0 ##calculate Ln/Tx error LxTx.relError <- sqrt((LnLx.Error / LnLx)^2 + (TnTx.Error / TnTx)^2) LxTx.Error <- abs(LxTx * LxTx.relError) ##set NaN if(is.nan(LxTx.Error)) LxTx.Error <- 0 ##add an extra component of error LxTx.Error <- sqrt(LxTx.Error^2 + (sig0 * LxTx)^2) ##return combined values temp <- cbind(LnLxTnTx, LxTx, LxTx.Error) ##apply digits if wanted if(!is.null(digits)){ temp[1,] <- round(temp[1,], digits = digits) } # ToDo: Add decomposition algorithm parameters here # calc.parameters <- list(...) ##set results object return(set_RLum( class = "RLum.Results", data = list( LxTx.table = temp), # calc.parameters = calc.parameters), info = list(call = sys.call()) )) } Luminescence/R/read_RF2R.R0000644000176200001440000001631314236146743014734 0ustar liggesusers#' @title Import RF-files to R #' #' @description Import files produced by the IR-RF 'ImageJ' macro (`SR-RF.ijm`; Mittelstraß and Kreutzer, 2021) into R and create a list of [RLum.Analysis-class] #' objects #' #' @details The results of spatially resolved IR-RF data are summarised in so-called RF-files ((Mittelstraß and Kreutzer, 2021). #' This functions provides an easy import to process the data seamlessly with the R package 'Luminescence'. #' The output of the function can be passed to the function [analyse_IRSAR.RF] #' #' @param file [character] (**required**): path and file name of the RF file. Alternatively a list of file #' names can be provided. #' #' @return Returns an S4 [RLum.Analysis-class] object containing #' [RLum.Data.Curve-class] objects for each curve. #' #' @seealso [RLum.Analysis-class], [RLum.Data.Curve-class], [analyse_IRSAR.RF] #' #' @author Sebastian Kreutzer, Geography & Earth Science, Aberystwyth University (United Kingdom) #' #' @section Function version: 0.1.0 #' #' @keywords IO #' #' @references Mittelstraß, D., Kreutzer, S., 2021. Spatially resolved infrared radiofluorescence: #' single-grain K-feldspar dating using CCD imaging. Geochronology 3, 299–319. \doi{10.5194/gchron-3-299-2021} #' #' @examples #' #' ##Import #' file <- system.file("extdata", "RF_file.rf", package = "Luminescence") #' temp <- read_RF2R(file) #' #' @md #' @export read_RF2R <- function(file) { # Self-call ----------------------------------------------------------------------------------- if(inherits(file, "list")){ results_list <- lapply(file, function(f){ temp <- try(read_RF2R(file = f), silent = TRUE) ##check whether it worked if(inherits(temp, "try-error")){ try( stop("[read_RF2R()] Import for file ", f, " failed. NULL returned!", call. = FALSE)) return(NULL) }else{ return(temp) } }) return(unlist(results_list, recursive = FALSE)) } # Integrity check ----------------------------------------------------------------------------- ##throw warning if we have a vector if(length(file) > 1){ warning("[read_RF2R()] 'file' has a length > 1. Only the first element was taken! If you want to import multiple files, 'file' has to be of type 'list'.", call. = TRUE) file <- file[1] } ##check input if(!inherits(file, "character")) stop("[read_RF2R()] 'file' needs to be of type character!", call. = FALSE) ##check whether file is available if(!file.exists(file)) stop("[read_RF2R()] File '", file, "' does not exist!", call. = FALSE) ##read first line to ensure the format vers_str <- readLines(file, 1) version_supported <- c("17-10-2018", "27-11-2018", "0.1.0") version_found <- regmatches(vers_str, regexpr("(?<=macro\\_version=)[0-9-.]+", vers_str, perl = TRUE)) if (!any(version_found %in% version_supported)) stop("[read_RF2R()] File format not supported!", call. = FALSE) # Import -------------------------------------------------------------------------------------- ##import the entire file temp <- readLines(file, warn = FALSE) # Extract information ------------------------------------------------------------------------- ##extract header (here as function; that might be useful in future) .extract_header <- function(x){ x <- gsub(pattern = "<", replacement = "", fixed = TRUE, x = x) x <- gsub(pattern = ">", replacement = "", fixed = TRUE, x = x) header <- strsplit(x = x, split = " ", fixed = TRUE)[[1]] header <- unlist(strsplit(x = header, split = "=", fixed = TRUE)) header_names <- header[seq(1, length(header), by = 2)] header <- as.list(header[seq(2, length(header), by = 2)]) names(header) <- header_names return(header) } header <- try(.extract_header(temp[1]), silent = TRUE) ##test the header if(inherits(header, 'try-error')){ try(stop("[read_RF2R()] Header extraction failed, try to continue without ... ", call. = FALSE)) header <- NA } ##extract tag boundaries framed by tags +++++++++++++++++++ ##the 2nd line corrects the inner boundaries ##(1) statistics id_statistics <- grep(pattern = "Statistics>", x = temp, fixed = TRUE) id_statistics <- c(id_statistics[1] + 1, id_statistics[2] - 1) ##(2) Natural (henceforth: RF_nat) id_RF_nat <- grep(pattern = "Natural>", x = temp, fixed = TRUE) id_RF_nat <- c(id_RF_nat[1] + 1, id_RF_nat[2] - 1) ##(3) Bleached (henceforth: RF_reg) id_RF_reg <- grep(pattern = "Bleached>", x = temp, fixed = TRUE) id_RF_reg <- c(id_RF_reg[1] + 1, id_RF_reg[2] - 1) ##extract content within the tags +++++++++++++++++++ ##(1) statistics ## ####header statistics_header <- strsplit(x = temp[id_statistics[1]], split = "\t", fixed = TRUE)[[1]][-1] ##data m_statistics <- as.data.frame(lapply((id_statistics[1]+1):(id_statistics[2]), function(x){ strsplit(x = temp[x], split = "\t", fixed = TRUE)[[1]] }), stringsAsFactors = FALSE) ##extract colnames colnames(m_statistics) <- unlist(strsplit( x = as.character(m_statistics[1, ]), split = ":", fixed = TRUE )) ##remove first df_statistics <- cbind(ROI = statistics_header, m_statistics[-1, ], stringsAsFactors = FALSE) ##(2) RF_nat ## ####header RF_nat_header <- strsplit(x = temp[id_RF_nat[1]], split = "\t", fixed = TRUE)[[1]] ##data m_RF_nat <- matrix( data = as.numeric(strsplit( x = paste(temp[(id_RF_nat[1] + 1):(id_RF_nat[2])], collapse = "\t"), split = "\t", fixed = TRUE )[[1]]), ncol = length(RF_nat_header), byrow = TRUE ) ##set colnames colnames(m_RF_nat) <- RF_nat_header ##(3) RF_reg ## ####header RF_reg_header <- strsplit(x = temp[id_RF_reg[1]], split = "\t", fixed = TRUE)[[1]] ##data m_RF_reg <- matrix( data = as.numeric(strsplit( x = paste(temp[(id_RF_reg[1] + 1):(id_RF_reg[2])], collapse = "\t"), split = "\t", fixed = TRUE )[[1]]), ncol = length(RF_reg_header), byrow = TRUE ) ##set colnames colnames(m_RF_reg) <- RF_reg_header # Create RLum.Analysis objects ---------------------------------------------------------------- object_list <- lapply(1:nrow(df_statistics), function(a){ ##set records records <- lapply(1:2, function(o) { if(o == 1){ temp_curve <- m_RF_nat[,c(2,2 + a)] }else{ temp_curve <- m_RF_reg[,c(2,2 + a)] } ##write curve set_RLum( class = "RLum.Data.Curve", originator = "read_RF2R", curveType = "measured", recordType = "RF", data = temp_curve ) }) ##create RLum.Analysis object set_RLum(class = "RLum.Analysis", originator = "read_RF2R", records = records, info = c( as.list(df_statistics[a,]), header )) }) # Return -------------------------------------------------------------------------------------- return(object_list) } Luminescence/R/merge_RLum.R0000644000176200001440000001003614464125673015262 0ustar liggesusers#' General merge function for RLum S4 class objects #' #' Function calls object-specific merge functions for RLum S4 class objects. #' #' The function provides a generalised access point for merge specific #' [RLum-class] objects. Depending on the input object, the #' corresponding merge function will be selected. Allowed arguments can be #' found in the documentations of each merge function. #' Empty list elements (`NULL`) are automatically removed from the input `list`. #' #' \tabular{lll}{ #' **object** \tab \tab **corresponding merge function** \cr #' [RLum.Data.Curve-class] \tab : \tab `merge_RLum.Data.Curve` \cr #' [RLum.Analysis-class] \tab : \tab `merge_RLum.Analysis` \cr #' [RLum.Results-class] \tab : \tab `merge_RLum.Results` #' } #' #' @param objects [list] of [RLum-class] (**required**): #' list of S4 object of class `RLum` #' #' @param ... further arguments that one might want to pass to the specific merge function #' #' @return Return is the same as input objects as provided in the list. #' #' @note So far not for every `RLum` object a merging function exists. #' #' @section Function version: 0.1.3 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [RLum.Data.Curve-class], [RLum.Data.Image-class], #' [RLum.Data.Spectrum-class], [RLum.Analysis-class], [RLum.Results-class] #' #' #' @keywords utilities #' #' @examples #' #' #' ##Example based using data and from the calc_CentralDose() function #' #' ##load example data #' data(ExampleData.DeValues, envir = environment()) #' #' ##apply the central dose model 1st time #' temp1 <- calc_CentralDose(ExampleData.DeValues$CA1) #' #' ##apply the central dose model 2nd time #' temp2 <- calc_CentralDose(ExampleData.DeValues$CA1) #' #' ##merge the results and store them in a new object #' temp.merged <- get_RLum(merge_RLum(objects = list(temp1, temp2))) #' #' @md #' @export merge_RLum<- function( objects, ... ){ # Integrity check ---------------------------------------------------------- if(!inherits(objects, "list")) stop("[merge_RLum()] argument 'objects' needs to be of type list!", call. = FALSE) ##we are friendly and remove all empty list elements, this helps a lot if we place things ##we DO NOT provide a warning as this lower the computation speed in particular cases. objects <- objects[!sapply(objects, is.null)] ##if list is empty afterwards we do nothing if(length(objects) >= 1) { ##check if objects are of class RLum temp.class.test <- unique(sapply(1:length(objects), function(x) { if (!is(objects[[x]], "RLum")) { temp.text <- paste0( "[merge_RLum()]: At least element #", x, " is not of class 'RLum' or a derivative class!" ) stop(temp.text, call. = FALSE) } ##provide class of objects ... so far they should be similar is(objects[[x]])[1] })) ##check if objects are consistent if (length(temp.class.test) > 1) { ##This is not valid for RLum.Analysis objects if (!"RLum.Analysis" %in% temp.class.test) { stop("[merge_RLum()] So far only similar input objects in the list are supported!") } } ##grep object class objects.class <- ifelse("RLum.Analysis" %in% temp.class.test, "RLum.Analysis", temp.class.test) ##select which merge function should be used switch ( objects.class, RLum.Data.Image = stop( "[merge_RLum()] Sorry, merging of 'RLum.Data.Image' objects is currently not supported!" ), RLum.Data.Spectrum = stop( "[merge_RLum()] Sorry, merging of 'RLum.Data.Spectrum' objects is currently not supported!" ), RLum.Data.Curve = merge_RLum.Data.Curve(objects, ...), RLum.Analysis = merge_RLum.Analysis(objects, ...), RLum.Results = merge_RLum.Results(objects, ...) ) }else{ warning("[merge_RLum()] Nothing was merged as the object list was found to be empty or contains only one object!") return(NULL) } } Luminescence/R/calc_gSGC.R0000644000176200001440000003040014264017373014761 0ustar liggesusers#' Calculate De value based on the gSGC by Li et al., 2015 #' #' Function returns De value and De value error using the global standardised growth #' curve (gSGC) assumption proposed by Li et al., 2015 for OSL dating of sedimentary quartz #' #' The error of the De value is determined using a Monte Carlo simulation approach. #' Solving of the equation is realised using [uniroot]. #' Large values for `n.MC` will significantly increase the computation time. #' #' #' @param data [data.frame] (**required**): #' input data of providing the following columns: `LnTn`, `LnTn.error`, `Lr1Tr1`, `Lr1Tr1.error`, `Dr1` #' **Note:** column names are not required. The function expect the input data in the given order #' #' @param gSGC.type [character] (*with default*): #' define the function parameters that #' should be used for the iteration procedure: Li et al., 2015 (Table 2) #' presented function parameters for two dose ranges: `"0-450"` and `"0-250"` #' #' @param gSGC.parameters [list] (*optional*): #' option to provide own function parameters used for fitting as named list. #' Nomenclature follows Li et al., 2015, i.e. `list(A,A.error,D0,D0.error,c,c.error,Y0,Y0.error,range)`, #' range requires a vector for the range the function is considered as valid, e.g. `range = c(0,250)`\cr #' Using this option overwrites the default parameter list of the gSGC, meaning the argument #' `gSGC.type` will be without effect #' #' @param n.MC [integer] (*with default*): #' number of Monte Carlo simulation runs for error estimation, see details. #' #' @param verbose [logical]: #' enable or disable terminal output #' #' @param plot [logical]: #' enable or disable graphical feedback as plot #' #' @param ... parameters will be passed to the plot output #' #' @return Returns an S4 object of type [RLum.Results-class]. #' #' **`@data`**\cr #' `$ De.value` ([data.frame]) \cr #' `.. $ De` \cr #' `.. $ De.error` \cr #' `.. $ Eta` \cr #' `$ De.MC` ([list]) contains the matrices from the error estimation.\cr #' `$ uniroot` ([list]) contains the [uniroot] outputs of the De estimations\cr #' #' **`@info`**\cr #' `$ call`` ([call]) the original function call #' #' #' @section Function version: 0.1.1 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [RLum.Results-class], [get_RLum], [uniroot] #' #' @references #' Li, B., Roberts, R.G., Jacobs, Z., Li, S.-H., 2015. Potential of establishing #' a 'global standardised growth curve' (gSGC) for optical dating of quartz from sediments. #' Quaternary Geochronology 27, 94-104. doi:10.1016/j.quageo.2015.02.011 #' #' @keywords datagen #' #' @examples #' #' results <- calc_gSGC(data = data.frame( #' LnTn = 2.361, LnTn.error = 0.087, #' Lr1Tr1 = 2.744, Lr1Tr1.error = 0.091, #' Dr1 = 34.4)) #' #' get_RLum(results, data.object = "De") #' #' @md #' @export calc_gSGC<- function( data, gSGC.type = "0-250", gSGC.parameters, n.MC = 100, verbose = TRUE, plot = TRUE, ... ){ ##============================================================================## ##CHECK INPUT DATA ##============================================================================## if(!is(data, "data.frame")) stop("[calc_gSGC()] 'data' needs to be of type data.frame.", call. = FALSE) if(!is(gSGC.type, "character")) stop("[calc_gSGC()] 'gSGC.type' needs to be of type character.", call. = FALSE) ##check length of input data if(ncol(data) != 5) stop("[calc_gSGC()] Structure of 'data' does not fit the expectations.", call. = FALSE) ##rename columns for consistency reasons colnames(data) <- c('LnTn', 'LnTn.error', 'Lr1Tr1', 'Lr1Tr1.error', 'Dr1') ##============================================================================## ##DEFINE FUNCTION ##============================================================================## ##define function, nomenclature according to publication that should be solved f <- function(x,A,D0,c,Y0,Dr1,Lr1Tr1,LnTn) { (((A * (1 - exp( - Dr1 / D0))) + c * Dr1 + Y0)/Lr1Tr1) - (((A * (1 - exp( - x/D0))) + c * x + Y0)/LnTn) } ##set general parameters if (!missing(gSGC.parameters)) { A <- gSGC.parameters$A A.error <- gSGC.parameters$A.error D0 <- gSGC.parameters$D0 D0.error <- gSGC.parameters$D0.error c <- gSGC.parameters$c c.error <- gSGC.parameters$c.error Y0 <- gSGC.parameters$Y0 Y0.error <- gSGC.parameters$Y0.error range <- gSGC.parameters$range }else{ if (gSGC.type == "0-450") { A <- 0.723 A.error <- 0.014 D0 <- 65.1 D0.error <- 0.9 c <- 0.001784 c.error <- 0.000016 Y0 <- 0.009159 Y0.error <- 0.004795 range <- c(0.1,250) }else if (gSGC.type == "0-250") { A <- 0.787 A.error <- 0.051 D0 <- 73.9 D0.error <- 2.2 c <- 0.001539 c.error <- 0.000068 Y0 <- 0.01791 Y0.error <- 0.00490 range <- c(0.1,250) }else{ stop("[calc_gSGC()] Unknown input for 'gSGC.type'", call. = FALSE) } } ##Define size of output objects output.data <- data.table::data.table( DE = numeric(length = nrow(data)), DE.ERROR = numeric(length = nrow(data)), ETA = numeric(length = nrow(data)) ) ##set list for De.MC output.De.MC <- vector("list", nrow(data)) ##set list for uniroot output.uniroot <- vector("list", nrow(data)) ##============================================================================## ##CALCULATION ##============================================================================## for(i in 1:nrow(data)){ Lr1Tr1 <- data[i, "Lr1Tr1"] Lr1Tr1.error <- data[i,"Lr1Tr1.error"] Dr1 <- data[i,"Dr1"] Dr1.error <- data[i,"Dr1.error"] LnTn <- data[i,"LnTn"] LnTn.error <- data[i,"LnTn.error"] ##calculate mean value temp <- try(uniroot( f, interval = c(0.1,450), tol = 0.001, A = A, D0 = D0, c = c, Y0 = Y0, Dr1 = Dr1, Lr1Tr1 = Lr1Tr1, LnTn = LnTn, extendInt = 'yes', check.conv = TRUE, maxiter = 1000 ), silent = TRUE) if(!inherits(temp, "try-error")){ ##get De De <- temp$root ##calculate Eta, which is the normalisation factor Eta <- ((A * (1 - exp( - Dr1 / D0))) + c * Dr1 + Y0)/Lr1Tr1 ##--------------------------------------------------------------------------## ##Monte Carlo simulation for error estimation ##set matrix temp.MC.matrix <- matrix(nrow = n.MC, ncol = 8) ##fill matrix temp.MC.matrix[,1:6] <- matrix(rnorm( n.MC * 6, mean = c(LnTn, Lr1Tr1, A, D0, c, Y0), sd = c(LnTn.error, Lr1Tr1.error, A.error, D0.error, c.error, Y0.error) ), ncol = 6, byrow = TRUE) ##run uniroot to get the De temp.MC.matrix[,7] <- vapply(X = 1:n.MC, FUN = function(x){ uniroot(f, interval = c(0.1,450), tol = 0.001, A = temp.MC.matrix[x,3], D0 = temp.MC.matrix[x,4], c = temp.MC.matrix[x,5], Y0 = temp.MC.matrix[x,6], Dr1 = Dr1, Lr1Tr1 =temp.MC.matrix[x,2], LnTn = temp.MC.matrix[x,1], check.conv = TRUE, extendInt = 'yes', maxiter = 1000 )$root }, FUN.VALUE = vector(mode = "numeric", length = 1)) ##calculate also the normalisation factor temp.MC.matrix[,8] <- (temp.MC.matrix[,3] * (1 - exp( - Dr1 / temp.MC.matrix[,4])) + temp.MC.matrix[,5] * Dr1 + temp.MC.matrix[,6])/temp.MC.matrix[,2] ##re-name matrix colnames(temp.MC.matrix) <- c("LnTn","Lr1Tr1","A","D0","c","Y0","De","Eta") ##get De error as SD De.error <- sd(temp.MC.matrix[,7]) }else{ warning("No solution was found!", call. = FALSE) De <- NA Eta <- NA De.error <- NA ##set matrix temp.MC.matrix <- matrix(nrow = n.MC, ncol = 8) ##fill matrix temp.MC.matrix[,1:6] <- matrix(rnorm( n.MC * 6, mean = c(LnTn, Lr1Tr1, A, D0, c, Y0), sd = c(LnTn.error, Lr1Tr1.error, A.error, D0.error, c.error, Y0.error) ), ncol = 6, byrow = TRUE) } # Plot output ------------------------------------------------------------- if (plot) { ##set plot settings plot.settings <- list( main = "gSGC and resulting De", xlab = "Dose [a.u.]", ylab = expression(paste("Re-norm. ", L[x]/T[x])), xlim = NULL, ylim = NULL, lwd = 1, lty = 1, pch = 21, col = "red", grid = expression(nx = 10, ny = 10), mtext = "" ) plot.settings <- modifyList(plot.settings, list(...)) ##graphical feedback x <- NA curve( A * (1 - exp(-x / D0)) + c * x + Y0, from = 0, to = 500, xlab = plot.settings$xlab, ylab = plot.settings$ylab, main = plot.settings$main, xlim = plot.settings$xlim, ylim = plot.settings$ylim, lwd = plot.settings$lwd, lty = plot.settings$lty ) mtext(side = 3, plot.settings$mtext) if(!is.null(plot.settings$grid)){ graphics::grid(eval(plot.settings$grid)) } if(!inherits(temp, "try-error")){ if(temp$root < 450 & temp$root > 0){ points(temp$root,Eta*LnTn, col = plot.settings$col, pch = plot.settings$pch) segments(De - De.error,Eta * LnTn, De + De.error,Eta * LnTn) hist <- hist( temp.MC.matrix[, 7], freq = FALSE, add = TRUE, col = rgb(0, 0, 0, 0.2), border = rgb(0, 0, 0, 0.5) ) lines(hist$mids,hist$density) }else{ if(temp$root < 450){ shape::Arrows( x0 = 450, y0 = par()$usr[4] - 0.2, x1 = 500, y1 = par()$usr[4] - 0.2, arr.type = "triangle", col = "red" ) }else{ shape::Arrows( x0 = 50, y0 = par()$usr[4] - 0.2, x1 = 0, y1 = par()$usr[4] - 0.2, arr.type = "triangle", col = "red" ) } mtext(side = 1, text = "Out of bounds!", col = "red") } }else{ mtext(side = 1, text = "No solution found!", col = "red") } } # Terminal output --------------------------------------------------------- if (verbose) { cat("\n[calc_gSGC()]") cat("\n Corresponding De based on the gSGC\n") cat(paste0("\n"," Ln/Tn:\t\t ",LnTn," \u00B1 ", LnTn.error,"\n")) cat(paste0(""," Lr1/Tr1:\t ",Lr1Tr1," \u00B1 ", Lr1Tr1.error,"\n")) cat(paste0(""," Dr1:\t\t ",Dr1,"\n")) cat(paste0(""," f(D):\t\t ",A," * (1 - exp(-D /",D0,")) + c * D + ",Y0,"\n")) cat(paste0(""," n.MC:\t\t ",n.MC,"\n")) cat(paste0(" ------------------------------ \n")) cat(paste0(" De:\t\t",round(De,digits = 2)," \u00B1 ",round(De.error,digits = 2),"\n")) cat(paste0(" ------------------------------ \n")) } ##============================================================================## ##CREATE OUTPUT OBJECTS ##============================================================================## ##needed for data.table temp.De <- De temp.De.error <- De.error temp.Eta <- Eta ##replace values in the data.table with values output.data[i, `:=` (DE = temp.De, DE.ERROR = temp.De.error, ETA = temp.Eta)] rm(list = c('temp.De', 'temp.De.error', 'temp.Eta')) ##matrix - to prevent memory overload limit output if(n.MC * nrow(data) > 1e6){ if(i == 1){ output.De.MC[[i]] <- temp.MC.matrix }else{ output.De.MC[[i]] <- NA } warning("Only the first MC matrix is returned to prevent memory overload!", call. = FALSE) }else{ output.De.MC[[i]] <- temp.MC.matrix } output.uniroot[[i]] <- temp }##end for loop ##============================================================================## ##OUTPUT RLUM ##============================================================================## temp.RLum.Results <- set_RLum( class = "RLum.Results", data = list( De = as.data.frame(output.data), De.MC = output.De.MC, uniroot = output.uniroot ), info = list( call = sys.call()) ) return(temp.RLum.Results) } Luminescence/R/RLum.Data-class.R0000644000176200001440000000120114264017373016043 0ustar liggesusers#' Class `"RLum.Data"` #' #' Generalized virtual data class for luminescence data. #' #' #' @name RLum.Data-class #' #' @docType class #' #' @note Just a virtual class. #' #' @section Objects from the Class: #' A virtual Class: No objects can be created from it. #' #' @section Class version: 0.2.1 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [RLum-class], [RLum.Data.Curve-class], #' [RLum.Data.Spectrum-class] #' #' @keywords classes internal #' #' @examples #' #' showClass("RLum.Data") #' #' @md #' @export setClass("RLum.Data", contains = c("RLum", "VIRTUAL") ) Luminescence/R/extract_ROI.R0000644000176200001440000001657214264017373015415 0ustar liggesusers#'@title Extract Pixel Values through Circular Region-of-Interests (ROI) from an Image #' #'@description Light-weighted function to extract pixel values from pre-defined regions-of-interest (ROI) from #'[RLum.Data.Image-class], [array] or [matrix] objects and provide simple image processing #'capacity. The function is limited to circular ROIs. #' #'@details The function uses a cheap approach to decide whether a pixel lies within #'a circle or not. It assumes that pixel coordinates are integer values and #'that a pixel centring within the circle is satisfied by: #' #'\deqn{x^2 + y^2 <= (d/2)^2} #' #'where \eqn{x} and \eqn{y} are integer pixel coordinates and \eqn{d} is the integer #'diameter of the circle in pixel. #' #'@param object [RLum.Data.Image-class], [array] or [matrix] (**required**): input image data #' #'@param roi [matrix] (**required**): matrix with three columns containing the centre coordinates #'of the ROI (first two columns) and the diameter of the circular ROI. All numbers must by of type [integer] #'and will forcefully coerced into such numbers using `as.integer()` regardless. #' #'@param roi_summary (**with default**): if `"mean"` (the default) defines what is returned #'in the element `roi_summary`; alternatively `"mean"`, `"median"`, `"sd"` or `"sum"` can be chosen. #'Pixel values are conveniently summarised using the above defined keyword. #' #'@param plot [logical] (*optional*): enables/disables control plot. Only the first #'image frame is shown #' #'@return [RLum.Results-class] object with the following elements: #'`..$roi_signals`: a named [list] with all ROI values and their coordinates #'`..$roi_summary`: an [matrix] where rows are frames from the image, and columns are different ROI #'The element has two attributes: `summary` (the method used to summarise pixels) and `area` (the pixel area) #'`..$roi_coord`: a [matrix] that can be passed to [plot_ROI] #' #'If `plot = TRUE` a control plot is returned. #' #'@section Function version: 0.1.0 #' #'@author #'Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #'@seealso [RLum.Data.Image-class] #' #'@keywords manip #' #'@examples #' #' m <- matrix(runif(100,0,255), ncol = 10, nrow = 10) #' roi <- matrix(c(2.,4,2,5,6,7,3,1,1), ncol = 3) #' extract_ROI(object = m, roi = roi, plot = TRUE) #' #'@md #'@export extract_ROI <- function( object, roi, roi_summary = "mean", plot = FALSE ){ # Self call --------------------------------------------------------------- if (is(object, "list")) return(merge_RLum(lapply(object, extract_ROI, roi = roi, plot = plot))) # Check input ------------------------------------------------------------- ## check input for ROIs if (!is.matrix(roi) || nrow(roi) < 1 || ncol(roi) < 3) stop("[extract_ROI()] Please check the format of roi, it looks wrong!", call. = FALSE) ## check input for object if (!is(object, "matrix") && !is(object, "array") && !is(object, "RLum.Data.Image")) stop("[extract_ROI()] Input for argument 'object' not supported!", call. = FALSE) ## calculate the radius roi <- roi[,1:3] roi[,3] <- ceiling(roi[,3]/2) ## make sure that we have integer values only in the matrix roi[] <- as.integer(roi) ## copy object (to not work on the input data) a <- object ## try to convert into something meaningful if (is(object, "RLum.Data.Image")) a <- object@data if (is(object, "matrix")) a <- array(data = object, dim = c(nrow(object), ncol(object), 1)) # Helper function --------------------------------------------------------- .extract_pixel <- function(m, r, mid) { ## get row - column combinations grid <- as.matrix(expand.grid(x = 1:nrow(m), y = 1:ncol(m))) ## adjust values for mid point ## + get pixel coordinates if within the circle px_id <- grid[(grid[,"x"] - mid[1])^2 + (grid[,"y"] - mid[2])^2 <= r[1]^2,] ## extract values from matrix px_extract <- NA if (nrow(px_id) > 0) { px_extract <- vapply(1:nrow(px_id), function(x) { m[px_id[x,1],px_id[x,2]] }, numeric(1)) } attr(px_extract, "coord") <- px_id return(px_extract) } # Extract ROIs ------------------------------------------------------------ roi_signals <- lapply(1:nrow(roi), function(x){ ## iterate through a stack if needed temp <- lapply(1:(dim(a)[3]), function(z){ .extract_pixel(a[,,z], roi[x,3], mid = roi[x,1:2]) }) ## compile into matrix m <- matrix(unlist(temp), nrow = length(temp[[1]])) ## add attributes ... including coordinates; but only one time colnames(m) <- paste0("frame_", 1:ncol(m)) attr(m, "px_coord") <- attr(temp[[1]], "coord") return(m) }) ## add names names(roi_signals) <- paste0("ROI_", 1:nrow(roi)) # Plot check -------------------------------------------------------------- if (plot) { ## this is a control plot, so we plot only the first image; nothing more ## image graphics::image( x = 1:nrow(a[, , 1]), y = 1:ncol(a[, , 1]), a[, , 1], ylab = "y-dim [px]", xlab = "x-dim [px]", useRaster = TRUE, main = "extract_ROIs() - control plot") box() ## visualise ROIs overlay <- a[,,1] overlay[] <- 0 for (i in 1:length(roi_signals)) overlay[attr(roi_signals[[i]], "px_coord")[,1], attr(roi_signals[[i]], "px_coord")[,2]] <- 1 ## marked ROIs graphics::image( x = 1:nrow(a[, , 1]), y = 1:ncol(a[, , 1]), overlay, axes = FALSE, add = TRUE, useRaster = TRUE, col = c(rgb(1, 1, 1, 0), rgb(0, 1, 0, 0.5))) ## add circles and points for (i in 1:nrow(roi)) { lines(shape::getellipse(rx = roi[i, 3], mid = c(roi[i, 1:2], dr = 0.1)), col = "red", lwd = 1.5) text(x = roi[i,1], y = roi[i,2], i, col = "black", cex = 1.2) } } # ROI summary ------------------------------------------------------------- ## set roi fun and avoid add input if(!any(roi_summary[1]%in%c("mean", "median", "sd", "sum"))) stop("[extract_ROI()] roi_summary method not supported, check manual!", call. = FALSE) ## set function roi_fun <- roi_summary[1] ## create summary using matrixStats roi_summary <- matrix(unlist( switch(roi_fun, "mean" = lapply(roi_signals, matrixStats::colMeans2), "median" = lapply(roi_signals, matrixStats::colMedians), "sd" = lapply(roi_signals, matrixStats::colSds), "sum" = lapply(roi_signals, matrixStats::colSums2))), ncol = length(roi_signals)) ## set names to make it easier colnames(roi_summary) <- names(roi_signals) rownames(roi_summary) <- paste0("frame_", 1:nrow(roi_summary)) attr(roi_summary, "summary") <- roi_fun attr(roi_summary, "area") <- vapply(roi_signals, nrow, numeric(1)) ## add more roi information to the output for further processing roi <- cbind( ROI = 1:nrow(roi), x = roi[,1], y = roi[,2], area = attr(roi_summary, "area"), width = vapply(roi_signals, function(x) diff(range(attr(x, "px_coord")[,"x"])), numeric(1)), height = vapply(roi_signals, function(x) diff(range(attr(x, "px_coord")[,"y"])), numeric(1)), img_width = nrow(a[, , 1]), img_height = ncol(a[, , 1]), grain_d = roi[,3]) # Return ------------------------------------------------------------------ return( set_RLum( class = "RLum.Results", data = list( roi_signals = roi_signals, roi_summary = roi_summary, roi_coord = roi), info = list( call = sys.call()))) } Luminescence/R/get_rightAnswer.R0000644000176200001440000000063314236146743016360 0ustar liggesusers#' Function to get the right answer #' #' This function returns just the right answer #' #' @param ... you can pass an infinite number of further arguments #' #' @return Returns the right answer #' #' @section Function version: 0.1.0 #' #' @author inspired by R.G. #' #' @examples #' #' ## you really want to know? #' get_rightAnswer() #' #' @md #' @export get_rightAnswer <- function(...) { return(46) } Luminescence/R/analyse_portableOSL.R0000644000176200001440000005711114521207352017120 0ustar liggesusers#' @title Analyse portable CW-OSL measurements #' #' @description The function analyses CW-OSL curve data produced by a SUERC portable OSL reader and #' produces a combined plot of OSL/IRSL signal intensities, OSL/IRSL depletion ratios #' and the IRSL/OSL ratio. #' #' @details This function only works with [RLum.Analysis-class] objects produced by [read_PSL2R]. #' It further assumes (or rather requires) an equal amount of OSL and IRSL curves that #' are pairwise combined for calculating the IRSL/OSL ratio. For calculating the depletion ratios #' the cumulative signal of the last n channels (same number of channels as specified #' by `signal.integral`) is divided by cumulative signal of the first n channels (`signal.integral`). #' #' **Note: The function assumes the following sequence pattern: `DARK COUNT`, `IRSL`, `DARK COUNT`, `BSL`, `DARK COUNT`. If you have written a different sequence, the analysis function will (likely) not work!**. #' #' **Signal processing** #' The function processes the signals as follows: `BSL` and `IRSL` signals are extracted using the #' chosen signal integral, dark counts are taken in full. #' #' **Working with coordinates** #' Usually samples are taken from a profile with a certain stratigraphy. In the past the function #' calculated an index. With this newer version, you have two option of passing on xy-coordinates #' to the function: #' #' * (1) Add coordinates to the sample name during measurement. The form is rather #' strict and has to follow the scheme `_x:|y:`. Example: #' `sample_x:0.2|y:0.4`. #' #' * (2) Alternatively, you can provide a [list] or [matrix] with the sample coordinates. #' Example: `coord = list(c(0.2, 1), c(0.3,1.2))` #' #' Please note that the unit is meter (m) and the function expects always xy-coordinates. #' The latter one is useful for surface interpolations. If you have measured a profile where #' the x-coordinates to not measure, x-coordinates should be 0. #' #' @param object [RLum.Analysis-class] (**required**): [RLum.Analysis-class] object produced by [read_PSL2R]. #' The input can be a [list] of such objects, in such case each input is treated as a separate sample #' and the results are merged. #' #' @param signal.integral [numeric] (**required**): A vector of two values specifying the lower and upper channel used to calculate the OSL/IRSL signal. Can be provided in form of `c(1, 5)` or `1:5`. #' #' @param invert [logical] (*with default*): `TRUE` flip the plot the data in reverse order. #' #' @param normalise [logical] (*with default*): `TRUE` to normalise the OSL/IRSL signals #' to the *mean* of all corresponding data curves. #' #' @param mode [character] (*with default*): defines the analysis mode, allowed #' are `"profile"` (the default) and `"surface"` for surface interpolation. If you select #' something else, nothing will be plotted (similar to `plot = FALSE`). #' #' @param coord [list] [matrix] (*optional*): a list or matrix of the same length as #' number of samples measured with coordinates for the sampling positions. Coordinates #' are expected to be provided in meter (unit: m). #' Expected are x and y coordinates, e.g., #' `coord = list(samp1 = c(0.1, 0.2)`. If you have not measured x coordinates, please x should be 0. #' #' @param plot [logical] (*with default*): enable/disable plot output #' #' @param ... other parameters to be passed to modify the plot output. #' Supported are `run` to provide the run name , #' if the input is a `list`, this is set automatically. Further plot parameters are #' `surface_values` ([character] with value to plot), `legend` (`TRUE`/`FALSE`), `col_ramp` (for #' surface mode), `contour` (contour lines `TRUE`/`FALSE` in surface mode), `grid` (`TRUE`/`FALSE`), `col`, `pch` (for profile mode), `xlim` (a name [list] for profile mode), `ylim`, #' `zlim` (surface mode only), `ylab`, `xlab`, `zlab` (here x-axis labelling), `main`, `bg_img` (for #' profile mode background image, usually a profile photo; should be a raster object), #' `bg_img_positions` (a vector with the four corner positions, cf. [graphics::rasterImage]) #' #' @return #' Returns an S4 [RLum.Results-class] object with the following elements: #' #' `$data`\cr #' `.. $summary`: [data.frame] with the results\cr #' `.. $data`: [list] with the [RLum.Analysis-class] objects\cr #' `.. $args`: [list] the input arguments #' #' @seealso [RLum.Analysis-class], [RLum.Data.Curve-class], [read_PSL2R] #' #' @author Christoph Burow, University of Cologne (Germany), Sebastian Kreutzer, #' Institute of Geography, Ruprecht-Karl University of Heidelberg, Germany #' #' @section Function version: 0.1.0 #' #' @keywords datagen plot #' #' @examples #' #' ## example profile plot #' # (1) load example data set #' data("ExampleData.portableOSL", envir = environment()) #' #' # (2) merge and plot all RLum.Analysis objects #' merged <- merge_RLum(ExampleData.portableOSL) #' plot_RLum( #' object = merged, #' combine = TRUE, #' records_max = 5, #' legend.pos = "outside") #' merged #' #' # (3) analyse and plot #' results <- analyse_portableOSL( #' merged, #' signal.integral = 1:5, #' invert = FALSE, #' normalise = TRUE) #' get_RLum(results) #' #' @md #' @export analyse_portableOSL <- function( object, signal.integral = NULL, invert = FALSE, normalise = FALSE, mode = "profile", coord = NULL, plot = TRUE, ...) { ## TODO ## - add tests for background image option ## - clear docu # Self-call --------------------------------------------------------------- if (inherits(object, "list")) { temp <- .warningCatcher(lapply(1:length(object), function(x) { analyse_portableOSL( object = object[[x]], signal.integral = signal.integral, invert = invert, normalise = normalise, plot = plot, run = paste0("RUN #", x)) })) return(merge_RLum(temp)) } # Start function ---------------------------------------------------------- ## INPUT VERIFICATION ---- ## only RLum.Analysis objects if (!inherits(object, "RLum.Analysis")) stop("[analyse_portableOSL()] Only objects of class 'RLum.Analysis' are allowed.", call. = FALSE) ## only curve objects if (!all(sapply(object, class) == "RLum.Data.Curve")) stop("[analyse_portableOSL()] The 'RLum.Analysis' object must contain only objects of class 'RLum.Data.Curve'.", call. = FALSE) ## check originator if (!all(sapply(object, function(x) x@originator) == "read_PSL2R")) stop("[analyse_portableOSL()] Only objects originating from 'read_PSL2R()' are allowed.", call. = FALSE) ## check sequence pattern if(!all(names(object)[1:5] == c("USER", "IRSL", "USER", "OSL", "USER"))) stop("[analyse_portableOSL()] Sequence pattern not supported, please read manual for details!", call. = FALSE) if (is.null(signal.integral)) { signal.integral <- c(1, 1) warning("No value for 'signal.integral' provided. Only the first data point of each curve was used!", call. = FALSE) } ## set SAMPLE -------- if("run" %in% names(list(...))) run <- list(...)$run else if (!is.null(object@info$Run_Name)) run <- object@info$Run_Name else run <- "Run #1" ## CALCULATIONS ---- ## Note: the list ... unlist construction is used make sure that get_RLum() always ## returns a list ### get OSL ------- OSL <- .unlist_RLum(list(get_RLum(object, recordType = "OSL"))) OSL <- do.call(rbind, lapply(OSL, function(x) { .posl_get_signal(x, signal.integral) })) ### get IRSL ------- IRSL <- .unlist_RLum(list(get_RLum(object, recordType = "IRSL"))) IRSL <- do.call(rbind, lapply(IRSL, function(x) { .posl_get_signal(x, signal.integral) })) ### get DARK counts ---------- ### we assume that USER contains the dark count measurements DARK_COUNT <- .unlist_RLum(list(get_RLum(object, recordType = "USER"))) DARK_COUNT <- lapply(seq(1,length(DARK_COUNT),3), function(x) DARK_COUNT[x:(x+2)]) DARK_COUNT <- do.call(rbind, lapply(DARK_COUNT, function(x) { .posl_get_dark_count(x) })) ### NORMALISE ---- if (normalise) { OSL <- .posl_normalise(OSL) IRSL <- .posl_normalise(IRSL) } ### OSL/IRSL Ratio ------- RATIO <- IRSL$sum_signal / OSL$sum_signal ### extract coordinates ------- if(is.null(coord)) { coord <- .extract_PSL_coord(object) } else { if(!inherits(coord, "matrix") && !inherits(coord, "list")) stop("[analyse_portableOSL()] Argument 'coord' needs to be a matrix or list!", call. = FALSE) if(inherits(coord, "list")) coord <- do.call(rbind, coord) ## check length if(nrow(coord) != length(OSL$sum_signal)) stop("[analyse_portableOSL()] Number of coordinates differ from the number of samples!", call. = FALSE) } ### GENERATE SUMMARY data.frame ----- summary <- data.frame( ID = seq_along(OSL$sum_signal), RUN = run, BSL = OSL$sum_signal, BSL_error = OSL$sum_signal_err, IRSL = IRSL$sum_signal, IRSL_error = IRSL$sum_signal_err, BSL_depletion = OSL$sum_signal_depletion, IRSL_depletion = IRSL$sum_signal_depletion, IRSL_BSL_RATIO = RATIO, DARK = DARK_COUNT$mean_dark_count, DARK_error = DARK_COUNT$sd_dark_count, COORD_X = coord[,1], COORD_Y = coord[,2] ) ## if coordinates exist, sort by depth if(!any(is.na(coord[,2]))) summary <- summary[order(coord[,2]),] ### INVERT ---------- if(invert) summary <- summary[nrow(summary):1,] # PLOTTING ------------------------------------------------------------------- ## generate list of plot matrices ## this done to have consistent settings for all plot types parm <- c("BSL", "BSL_error", "IRSL", "IRSL_error", "BSL_depletion", "IRSL_depletion", "IRSL_BSL_RATIO", "DARK", "DARK_error") m_list <- lapply(parm, function(x){ cbind(x = summary[["COORD_X"]], y = summary[["COORD_Y"]], value = summary[[x]]) }) ## correct names of the list names(m_list) <- parm ## add a few attributes to be used later attr(m_list, "xlim") <- lapply(m_list, function(x) range(x[,1])) attr(m_list, "ylim") <- if(invert) rev(range(m_list[[1]][,2])) else range(m_list[[1]][,2]) attr(m_list, "zlim") <- lapply(m_list, function(x) range(x[,3])) ## account for surface case if (!is.null(mode) && mode == "surface") { attr(m_list, "ylim") <- if (invert) rev(range(summary$COORD_Y)) else range(summary$COORD_Y) attr(m_list, "xlim") <- range(summary$COORD_X) } if (!is.null(mode) && plot[1]) { ## account for surface case ## preset plot settings ## plot settings ------- plot_settings <- modifyList( x = list( col_ramp = grDevices::heat.colors(30, rev = TRUE, alpha = 0.5), bg_img = NULL, bg_img_positions = NULL, surface_value = c("BSL", "IRSL", "IRSL_BSL_RATIO"), legend = TRUE, col = c("blue", "red", "blue", "red", "black", "grey"), pch = rep(16, length(m_list)), xlim = attr(m_list, "xlim"), ylim = attr(m_list, "ylim"), zlim = if(mode == "surface") NA else attr(m_list, "zlim"), ylab = if(!any(is.na(summary$COORD_Y))) "Depth [m]" else "Index", xlab = "x [m]", grid = TRUE, contour = FALSE, zlab = c("BSL", "IRSL", "BSL depl.", "IRSL depl.", "IRSL/BSL", "mean DARK"), main = summary$RUN[1] ), val = list(...), keep.null = TRUE) ## mode == "surface" --------- if(mode[1] == "surface") { ### check for validity of surface value ------- if(!all(plot_settings$surface_value %in% names(m_list))) stop(paste0("[analyse_portableOSL()] Unknown value to plot: Valid are: ", paste(names(m_list), collapse = ", ")), call. = FALSE) ## set par ------- if(length(plot_settings$surface_value) > 1) { par.default <- par(mfrow = c(2,2)) on.exit(par(par.default)) } ## loop over surface values ------- for(i in plot_settings$surface_value) { ## set matrix for the plot m <- m_list[[i]] ## respect xlim and ylim range m <- m[m[,2] >= min(plot_settings$ylim) & m[,2] <= max(plot_settings$ylim), ] m <- m[m[,1] >= min(plot_settings$xlim) & m[,1] <= max(plot_settings$xlim), ] ## respect z_values if(!all(is.na(plot_settings$zlim))) m <- m[m[,3] >= min(plot_settings$zlim) & m[,3] <= max(plot_settings$zlim), ] ## interpolate ------ s <- try(interp::interp( x = m[, 1], y = m[, 2], z = m[, 3], nx = 200, ny = 200, ), silent = TRUE) ## show only warning if(inherits(s, "try-error")) warning("[analyse_portableOSL()] Surface interpolation failed, this happens when all points are arranged in one line. Nothing plotted!", call. = FALSE) ## show error if(!inherits(s, "try-error")) { par.default <- c( if(exists("par.default")) par.default else NULL, par(mar = c(4.5,4.5,4,2), xpd = FALSE)) on.exit(par(par.default)) ## open empty plot plot( x = NA, y = NA, ylim = plot_settings$ylim, xlim = plot_settings$xlim, xlab = plot_settings$xlab, ylab = plot_settings$ylab, main = plot_settings$main) ## add background image if available ------- if (!is.null(plot_settings$bg_img)) { ## get corner positions if(!is.null(plot_settings$bg_img_positions)) positions <- plot_settings$bg_img_positions[1:4] else positions <- par()$usr graphics::rasterImage( image = plot_settings$bg_img, xleft = positions[1], ybottom = positions[4], xright = positions[2], ytop = positions[3], interpolate = TRUE) } ## plot image ------- graphics::image( s, col = plot_settings$col_ramp, add = TRUE ) ## add contour if (plot_settings$contour) graphics::contour(m, add = TRUE, col = "grey") ## add points points(m[,1:2], pch = 20) ## add what is shown in the plot mtext(side = 3, text = i, cex = 0.7) ## add legend if(plot_settings$legend) { par.default <- c(par.default, par(xpd = TRUE)) on.exit(par(par.default)) col_grad <- plot_settings$col_ramp[ seq(1, length(plot_settings$col_ramp), length.out = 14)] slices <- seq(par()$usr[3],par()$usr[4],length.out = 15) for(s in 1:(length(slices) - 1)){ graphics::rect( xleft = par()$usr[2] * 1.01, xright = par()$usr[2] * 1.03, ybottom = slices[s], ytop = slices[s + 1], col = col_grad[s], border = TRUE) } ## add legend text text( x = par()$usr[2] * 1.04, y = par()$usr[4], labels = if(is.null(plot_settings$zlim_image)) { format(max(m[,3]), digits = 1, scientific = TRUE) } else { format(plot_settings$zlim_image[2], digits = 1, scientific = TRUE) }, cex = 0.6, srt = 270, pos = 3) text( x = par()$usr[2] * 1.04, y = par()$usr[3], labels = if(is.null(plot_settings$zlim_image)) { format(min(m[,3]), digits = 1, scientific = TRUE) } else { format(plot_settings$zlim_image[1], digits = 1, scientific = TRUE) }, cex = 0.6, pos = 3, srt = 270) ## add legend labelling (central) text( x = par()$usr[2] * 1.05, y = (par()$usr[4] - par()$usr[3])/2 + par()$usr[3], labels = "Intensity [a.u.]", cex = 0.7, pos = 3, srt = 270) } } }# end for loop } ## mode == "profile" --------- if (!is.null(mode[1]) && mode == "profile") { par.old.full <- par(no.readonly = TRUE) on.exit(par(par.old.full)) # default: par(mar = c(5, 4, 4, 2) + 0.1) // bottom, left, top, right par(mfrow = c(1, 7)) par(mar = c(5, 4, 4, 1) + 0.1) frame() mtext(side= 3, run, cex = 0.7, line = 2) par(mar = c(5, 0, 4, 1) + 0.1) ## make sure that wrong zlim settings do not screw up the function if(!inherits(plot_settings$zlim, "list")) { warning("[analyse_portableOSL()] In profile mode, zlim needs to be provided as named list. Example: list(BSL = c(0,1)). Reset to default.", call. = FALSE) plot_settings$zlim <- attr(m_list, "zlim") } #### BSL ------- plot( NA, NA, ylim = plot_settings$ylim, xlim = plot_settings$zlim[["BSL"]], xlab = plot_settings$zlab[1], bty = "n", yaxt = "n" ) if(plot_settings$grid) grid() lines( x = m_list[["BSL"]][,"value"], y = m_list[["BSL"]][,"y"], type = "b", pch = plot_settings$pch[1], col = plot_settings$col[1] ) ## add error bars segments( x0 = m_list[["BSL"]][,"value"] - m_list[["BSL_error"]][,"value"], x1 = m_list[["BSL"]][,"value"] + m_list[["BSL_error"]][,"value"], y0 = m_list[["BSL"]][,"y"], y1 = m_list[["BSL"]][,"y"], col = plot_settings$col[1]) axis(2, line = 3, at = m_list[["BSL"]][,"y"], labels = m_list[["BSL"]][,"y"]) axis(3) ## add general y-axis label mtext(plot_settings$ylab[1], side = 2, line = 6) ### IRSL -------------- plot( NA, NA, ylim = plot_settings$ylim, xlim = plot_settings$zlim[["IRSL"]], xlab = plot_settings$zlab[2], bty = "n", yaxt = "n" ) if(plot_settings$grid) grid() lines( x = m_list[["IRSL"]][,"value"], y = m_list[["IRSL"]][,"y"], type = "b", pch = plot_settings$pch[2], col = plot_settings$col[2]) ## add error bars segments( x0 = m_list[["IRSL"]][,"value"] - m_list[["IRSL_error"]][,"value"], x1 = m_list[["IRSL"]][,"value"] + m_list[["IRSL_error"]][,"value"], y0 = m_list[["IRSL"]][,"y"], y1 = m_list[["IRSL"]][,"y"], col = plot_settings$col[2]) axis(3) ### OSL DEPLETATION ------- plot( NA, NA, ylim = plot_settings$ylim, xlim = plot_settings$zlim[["BSL_depletion"]], xlab = plot_settings$zlab[3], bty = "n", yaxt = "n" ) if(plot_settings$grid) grid() lines( x = m_list[["BSL_depletion"]][,"value"], y = m_list[["BSL_depletion"]][,"y"], type = "b", lty = 2, pch = plot_settings$pch[3], col = plot_settings$col[3] ) axis(3) ### IRSL DEPLETION --------------- plot( NA, NA, ylim = plot_settings$ylim, xlim = plot_settings$zlim[["IRSL_depletion"]], xlab = plot_settings$zlab[4], bty = "n", yaxt = "n" ) if(plot_settings$grid) grid() lines( x = m_list[["IRSL_depletion"]][,"value"], y = m_list[["IRSL_depletion"]][,"y"], type = "b", lty = 2, pch = plot_settings$pch[4], col = plot_settings$col[4]) axis(3) ### RATIO ----------------------------- plot( NA, NA, ylim = plot_settings$ylim, xlim = plot_settings$zlim[["IRSL_BSL_RATIO"]], xlab = plot_settings$zlab[5], ylab = "", bty = "n", yaxt = "n" ) if(plot_settings$grid) grid() lines( x = m_list[["IRSL_BSL_RATIO"]][,"value"], y = m_list[["IRSL_BSL_RATIO"]][,"y"], type = "b", pch = plot_settings$pch[5], col = plot_settings$col[5]) axis(3) ### DARK ----------------------------- plot( x = m_list[["DARK"]][,"value"], y = m_list[["DARK_error"]][,"y"], type = "b", pch = plot_settings$pch, col = plot_settings$col[6], ylim = plot_settings$ylim, xlim = range(c( plot_settings$zlim[["DARK"]] - plot_settings$zlim[["DARK_error"]], plot_settings$zlim[["DARK"]] + plot_settings$zlim[["DARK_error"]])), xlab = plot_settings$zlab[6], ylab = "", bty = "n", yaxt = "n" ) ## add error bars segments( x0 = m_list[["DARK"]][,"value"] - m_list[["DARK_error"]][,"value"], x1 = m_list[["DARK"]][,"value"] + m_list[["DARK_error"]][,"value"], y0 = m_list[["DARK"]][,"y"], y1 = m_list[["DARK"]][,"y"], col = plot_settings$col[6]) axis(3) } ## end mode == "profile" } ## RETURN VALUE ---- call<- sys.call() args <- as.list(call)[2:length(call)] newRLumResults <- set_RLum( class = "RLum.Results", data = list( summary=summary, data = object, args=args ), info = list(call = call)) return(newRLumResults) } # HELPER FUNCTIONS ---------- ## This extracts the relevant curve data information of the RLum.Data.Curve ## objects .posl_get_signal <- function(x, signal.integral) { raw_signal <- get_RLum(x)[,2] sigint <- range(signal.integral) if (sigint[2] > length(raw_signal)) { sigint[2] <- length(raw_signal) warning("'signal.integral' (", paste(range(signal.integral), collapse = ", "),") ", "exceeded the number of available data points (n = ", length(raw_signal),") and ", "has been automatically reduced to the maximum number.", call. = FALSE) } sum_signal <- sum(raw_signal[sigint[1]:sigint[2]]) sum_signal_err <- sqrt(sum(x@info$raw_data$counts_per_cycle_error[sigint[1]:sigint[2]]^2)) sum_signal_depletion <- sum(raw_signal[(length(raw_signal)-length(sigint[1]:sigint[2])):length(raw_signal)]) / sum_signal return(data.frame(sum_signal, sum_signal_err, sum_signal_depletion)) } .posl_get_dark_count <- function(x) { ## we do assume a fixed sequence pattern, hence, we know what to ## expect that anything that comes in here, can be merged counts <- unlist(lapply(x, function(x) as.matrix(x)[,2])) return(data.frame(mean_dark_count = mean(counts), sd_dark_count = sd(counts))) } ## This function normalises the data curve by the mean signal .posl_normalise <- function(x) { rel.error <- x$sum_signal_err / x$sum_signal x$sum_signal <- x$sum_signal / mean(x$sum_signal) x$sum_signal_err <- x$sum_signal * rel.error x$sum_signal_depletion <- x$sum_signal_depletion / mean(x$sum_signal_depletion) return(x) } ## This function extracts the coordinates from the file name ## .extract_PSL_coord <- function(object){ ## get settings settings_sample <- vapply(object, function(x) x@info$settings$Sample, character(1)) |> unique() ## set character vector tmp_coord <- character(length(settings_sample)) ## search for pattern match ... why? ## because otherwise the dataset becomes inconsistent pattern_match <- grepl( pattern = "\\_x\\:[0-9].+\\|y\\:[0-9].+", x = settings_sample, perl = TRUE) ## extract coordinates tmp_coord[pattern_match] <- regexpr( pattern = "\\_x\\:[0-9].+\\|y\\:[0-9].+", text = settings_sample[pattern_match ], perl = TRUE) |> regmatches(x = settings_sample[pattern_match], m = _) ## extract x and y coord_split <- strsplit(tmp_coord, split = "|y:", fixed = TRUE) ## assign values coord <- vapply(coord_split, function(x) { if(length(x) == 0) return(c(x = NA_real_, y = NA_real_)) c(x = as.numeric(strsplit(x, "_x:", fixed = TRUE)[[1]][[2]]), y = as.numeric(x[2]))}, numeric(2)) |> t() ## if NA, assign index if(any(is.na(coord[,1]))) coord[,1] <- 0 if(any(is.na(coord[,2]))) coord[,2] <- 1:nrow(coord) return(coord) } Luminescence/R/structure_RLum.R0000644000176200001440000000350314264017373016217 0ustar liggesusers#' General structure function for RLum S4 class objects #' #' Function calls object-specific get functions for RLum S4 class objects. #' #' The function provides a generalised access point for specific #' [RLum-class] objects.\cr #' Depending on the input object, the corresponding structure function will #' be selected. Allowed arguments can be found in the documentations of the #' corresponding [RLum-class] class. #' #' @param object [RLum-class] (**required**): #' S4 object of class `RLum` #' #' @param ... further arguments that one might want to pass to the specific #' structure method #' #' @return #' Returns a [data.frame] with structure of the object. #' #' @section Function version: 0.2.0 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [RLum.Data.Curve-class], [RLum.Data.Image-class], #' [RLum.Data.Spectrum-class], [RLum.Analysis-class], [RLum.Results-class] #' #' @keywords utilities #' #' @examples #' #' ##load example data #' data(ExampleData.XSYG, envir = environment()) #' #' ##show structure #' structure_RLum(OSL.SARMeasurement$Sequence.Object) #' #' @md #' @export setGeneric("structure_RLum", function(object, ...) { standardGeneric("structure_RLum") }) # Method for structure_RLum method for RLum objects in a list for a list of objects ------------- #' @describeIn structure_RLum #' Returns a list of [RLum-class] objects that had been passed to [structure_RLum] #' #' #' @md #' @export setMethod("structure_RLum", signature = "list", function(object, ...) { ##apply method in the objects and return the same lapply(object, function(x) { if (inherits(x, "RLum")) { return(structure_RLum(x, ...)) } else{ return(x) } }) }) Luminescence/R/calc_AverageDose.R0000644000176200001440000003725614264017373016403 0ustar liggesusers#'Calculate the Average Dose and the dose rate dispersion #' #'This functions calculates the Average Dose and their extrinsic dispersion and estimates #'the standard errors by bootstrapping based on the Average Dose Model by Guerin et al., 2017 #' #' **`sigma_m`**\cr #' #'The program requires the input of a known value of `sigma_m`, #'which corresponds to the intrinsic overdispersion, as determined #'by a dose recovery experiment. Then the dispersion in doses (`sigma_d`) #'will be that over and above `sigma_m` (and individual uncertainties `sigma_wi`). #' #' @param data [RLum.Results-class] or [data.frame] (**required**): #' for [data.frame]: two columns with `De` `(data[,1])` and `De error` `(values[,2])` #' #' @param sigma_m [numeric] (**required**): #' the overdispersion resulting from a dose recovery #' experiment, i.e. when all grains have received the same dose. Indeed in such a case, any #' overdispersion (i.e. dispersion on top of analytical uncertainties) is, by definition, an #' unrecognised measurement uncertainty. #' #' @param Nb_BE [integer] (*with default*): #' sample size used for the bootstrapping #' #' @param na.rm [logical] (*with default*): #' exclude NA values from the data set prior to any further operation. #' #' @param plot [logical] (*with default*): #' enables/disables plot output #' #' @param verbose [logical] (*with default*): #' enables/disables terminal output #' #' @param ... further arguments that can be passed to [graphics::hist]. As three plots #' are returned all arguments need to be provided as [list], #' e.g., `main = list("Plot 1", "Plot 2", "Plot 3")`. #' Note: not all arguments of `hist` are #' supported, but the output of `hist` is returned and can be used of own plots. \cr #' #' Further supported arguments: `mtext` ([character]), `rug` (`TRUE/FALSE`). #' #' @section Function version: 0.1.5 #' #' @author Claire Christophe, IRAMAT-CRP2A, Université de Nantes (France), #' Anne Philippe, Université de Nantes, (France), #' Guillaume Guérin, IRAMAT-CRP2A, Université Bordeaux Montaigne, (France), #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [read.table], [graphics::hist] #' #' @return The function returns numerical output and an (*optional*) plot. #' #' -----------------------------------\cr #' `[ NUMERICAL OUTPUT ]` \cr #' -----------------------------------\cr #' **`RLum.Results`**-object\cr #' #' **slot:** **`@data`** \cr #' #' `[.. $summary : data.frame]`\cr #' #' \tabular{lll}{ #' **Column** \tab **Type** \tab **Description**\cr #' AVERAGE_DOSE \tab [numeric] \tab the obtained average dose\cr #' AVERAGE_DOSE.SE \tab [numeric] \tab the average dose error \cr #' SIGMA_D \tab [numeric]\tab sigma \cr #' SIGMA_D.SE \tab [numeric]\tab standard error of the sigma \cr #' IC_AVERAGE_DOSE.LEVEL \tab [character]\tab confidence level average dose\cr #' IC_AVERAGE_DOSE.LOWER \tab [character]\tab lower quantile of average dose \cr #' IC_AVERAGE_DOSE.UPPER \tab [character]\tab upper quantile of average dose\cr #' IC_SIGMA_D.LEVEL \tab [integer]\tab confidence level sigma\cr #' IC_SIGMA_D.LOWER \tab [character]\tab lower sigma quantile\cr #' IC_SIGMA_D.UPPER \tab [character]\tab upper sigma quantile\cr #' L_MAX \tab [character]\tab maximum likelihood value #' } #' #' `[.. $dstar : matrix]` \cr #' #' Matrix with bootstrap values\cr #' #' `[.. $hist : list]`\cr #' #' Object as produced by the function histogram #' #' ------------------------\cr #' `[ PLOT OUTPUT ]`\cr #' ------------------------\cr #' #' The function returns two different plot panels. #' #' (1) An abanico plot with the dose values #' #' (2) A histogram panel comprising 3 histograms with the equivalent dose and the bootstrapped average #' dose and the sigma values. #' #' @references #' Guerin, G., Christophe, C., Philippe, A., Murray, A.S., Thomsen, K.J., Tribolo, C., Urbanova, P., #' Jain, M., Guibert, P., Mercier, N., Kreutzer, S., Lahaye, C., 2017. Absorbed dose, equivalent dose, #' measured dose rates, and implications for OSL age estimates: Introducing the Average Dose Model. #' Quaternary Geochronology 1-32. doi:10.1016/j.quageo.2017.04.002 #' #' **Further reading**\cr #' #' Efron, B., Tibshirani, R., 1986. Bootstrap Methods for Standard Errors, Confidence Intervals, #' and Other Measures of Statistical Accuracy. Statistical Science 1, 54-75. #' #' @note This function has beta status! #' #' @keywords datagen #' #' @examples #' #'##Example 01 using package example data #'##load example data #'data(ExampleData.DeValues, envir = environment()) #' #'##calculate Average dose #'##(use only the first 56 values here) #'AD <- calc_AverageDose(ExampleData.DeValues$CA1[1:56,], sigma_m = 0.1) #' #'##plot De and set Average dose as central value #'plot_AbanicoPlot( #' data = ExampleData.DeValues$CA1[1:56,], #' z.0 = AD$summary$AVERAGE_DOSE) #' #' @md #' @export calc_AverageDose <- function( data, sigma_m = NULL, Nb_BE = 500, na.rm = TRUE, plot = TRUE, verbose = TRUE, ... ){ # Define internal functions ------------------------------------------------------------------ # function which compute mle's for data (yu,su) .mle <- function(yu , su, wu.start, sigma_d.start, delta.start){ ##set start parameters, otherwise the function will try to get them ##from the parent environment, which is not wanted ... delta.temp <- 0 sigma_d.temp <- 0 sigma_d <- sigma_d.start delta <- delta.start wu <- wu.start j <- 0 iteration_limit <- 10000 ##loop until convergence or the iteration limit is reached while(j < iteration_limit) { ##code by Claire; in the 2nd and 3rd line delta and sigma_d are replaced by delta.temp and ##sigma_d.temp; otherwise the iteration and its test for convergence will not work delta.temp <- exp( sum(wu*(yu+(0.5*(sigma_d^2)))) / sum(wu) ) sigma_d.temp <- sigma_d*sum( (wu^2) * (yu-log(delta.temp)+0.5*sigma_d^2)^2) / (sum( wu*(1+yu-log(delta.temp)+0.5*sigma_d^2))) wu <- 1/(sigma_d.temp^2 + su^2) ##break loop if convergence is reached ... if not update values if(is.infinite(delta.temp) | is.infinite(sigma_d.temp)){ break() }else if ( ##compare values ... if they are equal we have convergence all( c(round(c(delta, sigma_d), 4)) == c(round(c(delta.temp, sigma_d.temp), 4)) ) ) { break() } else{ ##update input values delta <- delta.temp sigma_d <- sigma_d.temp j <- j + 1 } } ##if no convergence was reached stop entire function; no stop as this may happen during the ##bootstraping procedure if(j == iteration_limit){ warning("[calc_AverageDoseModel()] .mle() no convergence reached for the given limits. NA returned!") return(c(NA,NA)) }else if(is.infinite(delta.temp) | is.infinite(sigma_d.temp)){ warning("[calc_AverageDoseModel()] .mle() gaves Inf values. NA returned!") return(c(NA,NA)) }else{ return(c(round(c(delta, sigma_d),4))) } } .CredibleInterval <- function(a_chain, level = 0.95) { ## Aim : estimation of the shortest credible interval of the sample of parameter a # A level % credible interval is an interval that keeps N*(1-level) elements of the sample # The level % credible interval is the shortest of all those intervals. ## Parameters : # a_chain : the name of the values of the parameter a # level : the level of the credible interval expected ## Returns : the level and the endpoints sorted_sample <- sort(a_chain) N <- length(a_chain) OutSample <- N * (1 - level) I <- cbind(sorted_sample[1:(OutSample + 1)] , sorted_sample[(N - OutSample):N]) l <- I[, 2] - I[, 1] # length of intervals i <- which.min(l) # look for the shortest interval return(c( level = level, CredibleIntervalInf = I[i, 1], CredibleIntervalSup = I[i, 2] )) } ##//////////////////////////////////////////////////////////////////////////////////////////////// ##HERE THE MAIN FUNCTION STARTS ##//////////////////////////////////////////////////////////////////////////////////////////////// # Integrity checks ---------------------------------------------------------------------------- if(!is(data, "RLum.Results") & !is(data, "data.frame")){ stop("[calc_AverageDose()] input is neither of type 'RLum.Results' nor of type 'data.frame'!") }else { if(is(data, "RLum.Results")){ data <- get_RLum(data) } } if(is.null(sigma_m)){ stop("[calc_AverageDose()] 'sigma_m' is missing but required") } # Data preparation ----------------------------------------------------------------------------- ##problem: the entire code refers to column names the user may not provide... ## >> to avoid changing the entire code, the data will shape to a format that ## >> fits to the code ##check for number of columns if(ncol(data)<2){ try(stop("[calc_AverageDose()] data set contains < 2 columns! NULL returned!", call. = FALSE)) return(NULL) } ##used only the first two colums if(ncol(data)>2){ data <- data[,1:2] warning("[calc_AverageDose()] number of columns in data set > 2. Only the first two columns were used.", call. = FALSE) } ##exclude NA values if(any(is.na(data))){ data <- na.exclude(data) warning("[calc_AverageDose()] NA values in data set detected. Rows with NA values removed!", call. = FALSE) } ##check data set if(nrow(data) == 0){ try(stop("[calc_AverageDose()] data set contains 0 rows! NULL returned!", call. = FALSE)) return(NULL) } ##data becomes to dat (thus, make the code compatible with the code by Claire and Anne) dat <- data ##preset column names, as the code refers to it colnames(dat) <- c("cd", "se") # Pre calculation ----------------------------------------------------------------------------- ##calculate yu = log(CD) and su = se(logCD) yu <- log(dat$cd) su <- sqrt((dat$se / dat$cd) ^ 2 + sigma_m ^ 2) # calculate starting values and weights sigma_d <- sd(dat$cd) / mean(dat$cd) wu <- 1 / (sigma_d ^ 2 + su ^ 2) delta <- mean(dat$cd) n <- length(yu) ##terminal output if (verbose) { cat("\n[calc_AverageDose()]") cat("\n\n>> Initialisation <<") cat(paste("\nn:\t\t", n)) cat(paste("\ndelta:\t\t", delta)) cat(paste("\nsigma_m:\t", sigma_m)) cat(paste("\nsigma_d:\t", sigma_d)) } # mle's computation dhat <- .mle(yu, su, wu.start = wu, sigma_d.start = sigma_d, delta.start = delta) delta <- dhat[1] sigma_d <- dhat[2] wu <- 1 / (sigma_d ^ 2 + su ^ 2) # maximum log likelihood llik <- sum(-log(sqrt(2 * pi / wu)) - (wu / 2) * ((yu - log(delta) + 0.5 * (sigma_d ^ 2)) ^ 2)) ##terminal output if(verbose){ cat(paste("\n\n>> Calculation <<\n")) cat(paste("log likelihood:\t", round(llik, 4))) } # standard errors obtained by bootstrap, we refer to Efron B. and Tibshirani R. (1986) # est ce qu'il faut citer l'article ici ou tout simplement dans la publi ? n <- length(yu) ##calculate dstar ##set matrix for I I <- matrix(data = sample(x = 1:n, size = n * Nb_BE, replace = TRUE), ncol = Nb_BE) ##iterate over the matrix and produce dstar ##(this looks a little bit complicated, but is far more efficient) dstar <- t(vapply( X = 1:Nb_BE, FUN = function(x) { .mle(yu[I[, x]], su[I[, x]], sigma_d.start = sigma_d, delta.start = delta, wu.start = wu) }, FUN.VALUE = vector(mode = "numeric", length = 2) )) ##exclude NA values dstar <- na.exclude(dstar) ##calculate confidence intervalls IC_delta <- .CredibleInterval(dstar[,1],0.95) IC_sigma_d <- .CredibleInterval(dstar[,2],0.95) IC <- rbind(IC_delta, IC_sigma_d) # standard errors sedelta <- sqrt ((1/(Nb_BE-1))*sum((dstar[,1]-mean(dstar[,1]))^2)) sesigma_d <- sqrt ((1/(Nb_BE-1))*sum((dstar[,2]-mean(dstar[,2]))^2)) ##Terminal output if (verbose) { cat("\nconfidence intervals\n") cat("--------------------------------------------------\n") print(t(IC), print.gap = 6, digits = 4) cat("--------------------------------------------------\n") cat(paste("\n>> Results <<\n")) cat("----------------------------------------------------------\n") cat(paste( "Average dose:\t ", round(delta, 4), "\tse(Aver. dose):\t", round(sedelta, 4) )) if(sigma_d == 0){ cat(paste( "\nsigma_d:\t ", round(sigma_d, 4), "\t\tse(sigma_d):\t", round(sesigma_d, 4) )) }else{ cat(paste( "\nsigma_d:\t ", round(sigma_d, 4), "\tse(sigma_d):\t", round(sesigma_d, 4) )) } cat("\n----------------------------------------------------------\n") } ##compile final results data frame results_df <- data.frame( AVERAGE_DOSE = delta, AVERAGE_DOSE.SE = sedelta, SIGMA_D = sigma_d, SIGMA_D.SE = sesigma_d, IC_AVERAGE_DOSE.LEVEL = IC_delta[1], IC_AVERAGE_DOSE.LOWER = IC_delta[2], IC_AVERAGE_DOSE.UPPER = IC_delta[3], IC_SIGMA_D.LEVEL = IC_sigma_d[1], IC_SIGMA_D.LOWER = IC_sigma_d[2], IC_SIGMA_D.UPPER = IC_sigma_d[3], L_MAX = llik, row.names = NULL ) # Plotting ------------------------------------------------------------------------------------ ##the plotting (enable/disable) is controlled below, as with this ##we always get a histogram object ##set data list data_list <- list(dat$cd, dstar[,1], dstar[,2]) ##preset plot arguments plot_settings <- list( breaks = list("FD", "FD", "FD"), probability = list(FALSE, TRUE, TRUE), main = list( "Observed: Equivalent dose", "Bootstrapping: Average Dose", "Bootstrapping: Sigma_d"), xlab = list( "Equivalent dose [a.u.]", "Average dose [a.u.]", "Sigma_d"), axes = list(TRUE, TRUE, TRUE), col = NULL, border = NULL, density = NULL, freq = NULL, mtext = list( paste("n = ", length(data_list[[1]])), paste("n = ", length(data_list[[2]])), paste("n = ", length(data_list[[3]]))), rug = list(TRUE, TRUE, TRUE) ) ##modify this list by values the user provides ##expand all elements in the list ##problem: the user might provid only one item, then the code will break plot_settings.user <- lapply(list(...), function(x){ rep(x, length = 3) }) ##modify plot_settings <- modifyList(x = plot_settings.user, val = plot_settings) ##get change par setting and reset on exit if(plot) { par.default <- par()$mfrow on.exit(par(mfrow = par.default)) par(mfrow = c(1,3)) } ##Produce plots ##(1) - histogram of the observed equivalent dose ##(2) - histogram of the bootstrapped De ##(3) - histogram of the bootstrapped sigma_d ##with lapply we get fetch also the return of hist, they user might want to use this later hist <- lapply(1:length(data_list), function(x){ temp <- suppressWarnings(hist( x = data_list[[x]], breaks = plot_settings$breaks[[x]], probability = plot_settings$probability[[x]], main = plot_settings$main[[x]], xlab = plot_settings$xlab[[x]], axes = plot_settings$axes[[x]], freq = plot_settings$freq[[x]], plot = plot, col = plot_settings$col[[x]], border = plot_settings$border[[x]], density = plot_settings$density[[x]] )) if (plot) { ##add rug if (plot_settings$rug[[x]]) { rug(data_list[[x]]) } ##plot mtext mtext(side = 3, text = plot_settings$mtext[[x]], cex = par()$cex) } return(temp) }) # Return -------------------------------------------------------------------------------------- set_RLum( class = "RLum.Results", data = list( summary = results_df, dstar = as.data.frame(dstar), hist = hist ), info = list(call = sys.call()) ) } Luminescence/R/report_RLum.R0000644000176200001440000006536414264017373015507 0ustar liggesusers#' @title Create a HTML-report for (RLum) objects #' #' @details This function creates a HTML-report for a given object, listing its complete #' structure and content. The object itself is saved as a serialised .Rds file. #' The report file serves both as a convenient way of browsing through objects with #' complex data structures as well as a mean of properly documenting and saving #' objects. #' #' The HTML report is created with [rmarkdown::render] and has the #' following structure: #' #' \tabular{ll}{ #' **Section** \tab **Description** \cr #' `Header` \tab A summary of general characteristics of the object \cr #' `Object content` \tab A comprehensive list of the complete structure and content of the provided object. \cr #' `Object structure` \tab Summary of the objects structure given as a table \cr #' `File` \tab Information on the saved RDS file \cr #' `Session Info` \tab Captured output from `sessionInfo()` \cr #' `Plots` \tab (*optional*) For `RLum-class` objects a variable number of plots \cr #' } #' #' The structure of the report can be controlled individually by providing one or more of the #' following arguments (all `logical`): #' #' \tabular{ll}{ #' **Argument** \tab **Description** \cr #' `header` \tab Hide or show general information on the object \cr #' `main` \tab Hide or show the object's content \cr #' `structure` \tab Hide or show object's structure \cr #' `rds` \tab Hide or show information on the saved RDS file \cr #' `session` \tab Hide or show the session info \cr #' `plot` \tab Hide or show the plots (depending on object) \cr #' } #' #' Note that these arguments have higher precedence than `compact`. #' #' Further options that can be provided via the `...` argument: #' #' \tabular{ll}{ #' **Argument** \tab **Description** \cr #' `short_table` \tab If `TRUE` only show the first and last 5 rows of long tables. \cr #' `theme` \tab Specifies the Bootstrap #' theme to use for the report. Valid themes include `"default"`, `"cerulean"`, `"journal"`, `"flatly"`, #' `"readable"`, `"spacelab"`, `"united"`, `"cosmo"`, `"lumen"`, `"paper"`, `"sandstone"`, #' `"simplex"`, and `"yeti"`. \cr #' `highlight` \tab Specifies the syntax highlighting style. #' Supported styles include `"default"`, `"tango"`, `"pygments"`, `"kate"`, `"monochrome"`, #' `"espresso"`, `"zenburn"`, `"haddock"`, and `"textmate"`. \cr #' `css` \tab `TRUE` or `FALSE` to enable/disable custom CSS styling \cr #' } #' #' The following arguments can be used to customise the report via CSS (Cascading Style Sheets): #' #' \tabular{ll}{ #' **Argument** \tab **Description** \cr #' `font_family` \tab Define the font family of the HTML document (default: `"arial"`) \cr #' `headings_size` \tab Size of the `

` to `

` tags used to define HTML headings (default: 166%). \cr #' `content_color` \tab Colour of the object's content (default: #a72925). \cr #' } #' #' Note that these arguments must all be of class [character] and follow standard CSS syntax. #' For exhaustive CSS styling you can provide a custom CSS file for argument `css.file`. #' CSS styling can be turned of using `css = FALSE`. #' #' @param object (**required**): #' The object to be reported on, preferably of any `RLum`-class. #' #' @param file [character] (*with default*): #' A character string naming the output file. If no filename is provided a #' temporary file is created. #' #' @param title [character] (*with default*): #' A character string specifying the title of the document. #' #' @param compact [logical] (*with default*): #' When `TRUE` the following report components are hidden: #' `@@.pid`, `@@.uid`, `'Object structure'`, `'Session Info'` #' and only the first and last 5 rows of long matrices and data frames are shown. #' See details. #' #' @param timestamp [logical] (*with default*): #' `TRUE` to add a timestamp to the filename (suffix). #' #' @param show_report [logical] (*with default*): If set to `TRUE` the function tries to display #' the report output in the local viewer, e.g., within *RStudio* after rendering. #' #' @param launch.browser [logical] (*with default*): #' `TRUE` to open the HTML file in the system's default web browser after #' it has been rendered. #' #' @param css.file [character] (*optional*): #' Path to a CSS file to change the default styling of the HTML document. #' #' @param quiet [logical] (*with default*): #' `TRUE` to suppress printing of the pandoc command line. #' #' @param clean [logical] (*with default*): #' `TRUE` to clean intermediate files created during rendering. #' #' @param ... further arguments passed to or from other methods and to control #' the document's structure (see details). #' #' @section Function version: 0.1.4 #' #' @author #' Christoph Burow, University of Cologne (Germany), #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) \cr #' #' @note #' This function requires the R packages 'rmarkdown', 'pander' and 'rstudioapi'. #' #' @seealso [rmarkdown::render], [pander::pander_return], #' [pander::openFileInOS], [rstudioapi::viewer], #' [browseURL] #' #' @return #' Writes a HTML and .Rds file. #' #' @examples #' #' \dontrun{ #' ## Example: RLum.Results ---- #' #' # load example data #' data("ExampleData.DeValues") #' #' # apply the MAM-3 age model and save results #' mam <- calc_MinDose(ExampleData.DeValues$CA1, sigmab = 0.2) #' #' # create the HTML report #' report_RLum(object = mam, file = "~/CA1_MAM.Rmd", #' timestamp = FALSE, #' title = "MAM-3 for sample CA1") #' #' # when creating a report the input file is automatically saved to a #' # .Rds file (see saveRDS()). #' mam_report <- readRDS("~/CA1_MAM.Rds") #' all.equal(mam, mam_report) #' #' #' ## Example: Temporary file & Viewer/Browser ---- #' #' # (a) #' # Specifying a filename is not necessarily required. If no filename is provided, #' # the report is rendered in a temporary file. If you use the RStudio IDE, the #' # temporary report is shown in the interactive Viewer pane. #' report_RLum(object = mam) #' #' # (b) #' # Additionally, you can view the HTML report in your system's default web browser. #' report_RLum(object = mam, launch.browser = TRUE) #' #' #' ## Example: RLum.Analysis ---- #' #' data("ExampleData.RLum.Analysis") #' #' # create the HTML report (note that specifying a file #' # extension is not necessary) #' report_RLum(object = IRSAR.RF.Data, file = "~/IRSAR_RF") #' #' #' ## Example: RLum.Data.Curve ---- #' #' data.curve <- get_RLum(IRSAR.RF.Data)[[1]] #' #' # create the HTML report #' report_RLum(object = data.curve, file = "~/Data_Curve") #' #' ## Example: Any other object ---- #' x <- list(x = 1:10, #' y = runif(10, -5, 5), #' z = data.frame(a = LETTERS[1:20], b = dnorm(0:9)), #' NA) #' #' report_RLum(object = x, file = "~/arbitray_list") #' } #' #' @md #' @export report_RLum <- function( object, file = tempfile(), title = "RLum.Report", compact = TRUE, timestamp = TRUE, show_report = TRUE, launch.browser = FALSE, css.file = NULL, quiet = TRUE, clean = TRUE, ...) { ## ------------------------------------------------------------------------ ## ## PRE-CHECKS ---- # check if required namespace(s) are available if (!requireNamespace("rmarkdown", quietly = TRUE)) stop("Creating object reports requires the 'rmarkdown' package.", " To install this package run 'install.packages('rmarkdown')' in your R console.", call. = FALSE) if (!requireNamespace("pander", quietly = TRUE)) stop("Creating object reports requires the 'pander' package.", " To install this package run 'install.packages('pander')' in your R console.", call. = FALSE) if (!requireNamespace("rstudioapi", quietly = TRUE)) { warning("Creating object reports requires the 'rstudioapi' package.", " To install this package run 'install.packages('rstudioapi')' in your R console.", call. = FALSE) isRStudio <- FALSE } else { isRStudio <- TRUE } # check if files exist if (!is.null(css.file)) if(!file.exists(css.file)) stop("Couldn't find the specified CSS file at '", css.file, "'", call. = FALSE) ## ------------------------------------------------------------------------ ## ## STRUCTURE ---- structure <- list(header = TRUE, main = TRUE, structure = ifelse(compact, FALSE, TRUE), rds = TRUE, session = ifelse(compact, FALSE, TRUE), plot = TRUE) # specifying report components has higher precedence than the 'compact' arg structure <- modifyList(structure, list(...)) ## OPTIONS ---- options <- list(short_table = ifelse(compact, TRUE, FALSE), theme = "cerulean", highlight = "haddock", css = TRUE) options <- modifyList(options, list(...)) ## CSS DEFAULTS ---- css <- list(font_family = "arial", headings_size = "166%", content_color = "#a72925") css <- modifyList(css, list(...)) ## ------------------------------------------------------------------------ ## ## CREATE FILE ---- isTemp <- missing(file) # make sure the filename ends with .Rmd extension if (!grepl(".rmd$", file, ignore.case = TRUE)) file <- paste0(file, ".Rmd") # Timestamp: currently added as a suffix to the filename # if we were to change it to a prefix, we need to first figure out the filename # (i.e., separate it from the possible path) using the following regular # expression strsplit(string, "\\\\|\\\\\\\\|\\/|\\/\\/"). This looks for # \, \\, /, // and the last element is the filename. if (timestamp) file <- gsub(".rmd$", paste0(format(Sys.time(), "_%Y%b%d"), ".Rmd"), file, ignore.case = TRUE) # sanitize file name file <- gsub("\\\\", "\\/", file) file.html <- gsub(".rmd$", ".html", file, ignore.case = TRUE) file.rds <- gsub(".rmd$", ".Rds", file, ignore.case = TRUE) # Create and open the file file.create(file) tmp <- file(file, open = "wt", blocking = TRUE) # save RDS file saveRDS(object, file.rds) # get object elements <- .struct_RLum(object, root = deparse(substitute(object))) ## ------------------------------------------------------------------------ ## ## WRITE CONTENT ---- # HEADER ---- writeLines("---", tmp) writeLines("title: RLum.Report", tmp) writeLines("output:", tmp) writeLines(" html_document:", tmp) writeLines(" mathjax: null", tmp) writeLines(" title: RLum.Report", tmp) writeLines(paste(" theme:", options$theme), tmp) writeLines(paste(" highlight:", options$highlight), tmp) writeLines(" toc: true", tmp) writeLines(" toc_float: true", tmp) writeLines(" toc_depth: 6", tmp) if (!is.null(css.file)) writeLines(paste(" css:", css.file), tmp) writeLines(" md_extensions: -autolink_bare_uris", tmp) writeLines("---", tmp) # CASCADING STYLE SHEETS ---- if (options$css) { writeLines(paste0( "" ), tmp) } # INFO ---- # check if Luminescence package is installed and get details pkg <- as.data.frame(installed.packages(), row.names = FALSE) if ("Luminescence" %in% pkg$Package) pkg <- pkg[which(pkg$Package == "Luminescence"), ] else pkg <- data.frame(LibPath = "-", Version = "not installed", Built = "-") # Title writeLines(paste("

", title, "

\n\n
"), tmp) # write information on R, Luminescence package, Object if (structure$header) { writeLines(paste("**Date:**", Sys.time(), "\n\n", "**R version:**", R.version.string, "\n\n", "**Luminescence package** \n\n", "**  » Path:**", pkg$LibPath, "\n\n", "**  » Version:**", pkg$Version, "\n\n", "**  » Built:**", pkg$Built, "\n\n", "**Object** \n\n", "**  » Created:**", tryCatch(paste(paste(strsplit(object@.uid, '-|\\.')[[1]][1:3], collapse = "-"), strsplit(object@.uid, '-|\\.')[[1]][4]), error = function(e) "-"), "\n\n", "**  » Class:**", class(object), "\n\n", "**  » Originator:**", tryCatch(object@originator, error = function(e) "-"), "\n\n", "**  » Name:**", deparse(substitute(object)), "\n\n", "**  » Parent ID:**", tryCatch(object@.pid, error = function(e) "-"), "\n\n", "**  » Unique ID:**", tryCatch(object@.uid, error = function(e) "-"), "\n\n", "
"), tmp) if (isTemp) { writeLines(paste("
Save report"), tmp) writeLines(paste("Save data \n\n"), tmp) } }#EndOf::Header # OBJECT ---- if (structure$main) { for (i in 1:nrow(elements)) { # SKIP ELEMENT? # hide @.pid and @.uid if this is a shortened report (default) if (elements$bud[i] %in% c(".uid", ".pid") && compact == TRUE) next(); # HEADER short.name <- elements$bud[i] links <- gsub("[^@$\\[]", "", as.character(elements$branch[i])) type <- ifelse(nchar(links) == 0, "", substr(links, nchar(links), nchar(links))) if (type == "[") type = "" # HTML header level is determined by the elements depth in the object # exception: first row is always the object's name and has depth zero if (i == 1) hlevel <- "#" else hlevel <- paste(rep("#", elements$depth[i]), collapse = "") # write header; number of dots represents depth in the object. because there # may be duplicate header names, for each further occurrence of a name # Zero-width non-joiner entities are added to the name (non visible) writeLines(paste0(hlevel, " ", "", paste(rep("..", elements$depth[i]), collapse = ""), type, "", paste(rep("‌", elements$bud.freq[i]), collapse = ""), short.name[length(short.name)], ifelse(elements$endpoint[i], "", paste0("{#root",i,"}")), ##ifelse(elements$endpoint[i], "", "{#root}"), "\n\n"), tmp) # SUBHEADER # contains information on Class, Length, Dimensions, Path writeLines(paste0("
",
                        "",
                        " Class: ", elements$class[i],
                        "",
                        "   Length: ", elements$length[i],
                        "",
                        "   Dimensions: ",
                        ifelse(elements$row[i] != 0, paste0(elements$row[i], ", ", elements$col[i]), "-"),
                        "",
                        "\n Path: ", gsub("@", "@", elements$branch[i]),
                        "
", "\n\n"), tmp) # TABLE CONTENT # the content of a branch is only printed if it was determined an endpoint # in the objects structure if (elements$endpoint[i]) { table <- tryCatch(eval(parse(text = elements$branch[i])), error = function(e) { return(NULL) }) # exceptions: content may be NULL; convert raw to character to stay # compatible with pander::pander if (is.null(table) | length(table) == 0) table <- "NULL" if (any(class(table) == "raw")) table <- as.character(table) # exception: surround objects of class "call" with
 tags to prevent
        # HTML autoformatting
        if (elements$class[i] == "call") {
          table <- capture.output(table)
          writeLines("
", tmp)
          for (i in 1:length(table))
            writeLines(table[i], tmp)
          writeLines("
", tmp) table <- NULL } # shorten the table if it has more than 15 rows if (options$short_table) { if (is.matrix(table) || is.data.frame(table)) { if (nrow(table) > 15) { text <- pander::pander_return( rbind(head(table, 5), tail(table, 5)), caption = "shortened (only first and last five rows shown)") writeLines(text, tmp) next } } } # write table using pander and end each table with a horizontal line writeLines(pander::pander_return(table), tmp) writeLines("\n\n
", tmp) } } }#EndOf::Main # OBJECT STRUCTURE ---- if (structure$structure) { writeLines(paste("\n\n# Object structure\n\n"), tmp) elements.html <- elements elements.html$branch <- gsub("\\$", "$", elements$branch) writeLines(pander::pander_return(elements.html, justify = paste(rep("l", ncol(elements)), collapse = "")), tmp) writeLines("\n\n", tmp) }#EndOf::Structure if (structure$rds) { # SAVE SERIALISED OBJECT (.rds file) ---- writeLines(paste("
# File \n\n"), tmp) writeLines(paste0("", "", "Click here to access the data file", "", ""), tmp) writeLines(paste("\nThe R object was saved to ", file.rds, ".", "To import the object into your R session with the following command:", paste0("
",
                            "x <- readRDS('", file.rds, "')",
                            "
"), "**NOTE:** If you moved the file to another directory or", "renamed the file you need to change the path/filename in the", "code above accordingly!"), tmp) }#EndOf::File # SESSION INFO ---- if (structure$session) { writeLines(paste("\n\n
# Session Info\n\n"), tmp) sessionInfo <- capture.output(sessionInfo()) writeLines(paste(sessionInfo, collapse = "\n\n"), tmp) } # PLOTTING ---- if (structure$plot) { isRLumObject <- length(grep("RLum", class(object))) if (is.list(object)) isRLumList <- all(sapply(object, function(x) inherits(x, "RLum.Data.Curve"))) else isRLumList <- FALSE if (isRLumObject | isRLumList) { # mutual exclusivity: it is either a list or an RLum-Object if (isRLumList) plotCommand <- "invisible(sapply(x, plot)) \n" else plotCommand <- "plot(x) \n" writeLines(paste("\n\n
# Plots\n\n"), tmp) writeLines(paste0( "```{r}\n", "library(Luminescence) \n", "x <- readRDS('", file.rds,"') \n", plotCommand, "```"), tmp) if (inherits(object, "RLum.Results")) { # AGE MODELS ---- models <- c("calc_AverageDose", "calc_CommonDose", "calc_CentralDose", "calc_FiniteMixture", "calc_MinDose", "calc_MaxDose", "calc_IEU", "calc_FuchsLang2001") if (object@originator %in% models) { writeLines(paste0( "```{r}\n", "plot_AbanicoPlot(x) \n", "plot_Histogram(x) \n", "plot_KDE(x) \n", "plot_ViolinPlot(x) \n", "```"), tmp) } } } }#EndOf::Plot close(tmp) ## ------------------------------------------------------------------------ ## ## CLOSE & RENDER ---- rmarkdown::render(file, clean = clean, quiet = quiet) ## ------------------------------------------------------------------------ ## ## SHOW FILE ----- # SHOW REPORT IN RSTUDIOS VIEWER PANE ---- if (isRStudio && show_report) { if (isTemp) { try(rstudioapi::viewer(file.html)) } else { # The Viewer Pane only works for files in a sessions temp directory # see: https://support.rstudio.com/hc/en-us/articles/202133558-Extending-RStudio-with-the-Viewer-Pane file.copy(file.html, file.path(tempdir(), "report.html"), overwrite = TRUE) try(rstudioapi::viewer(file.path(tempdir(), "report.html"))) } } # launch browser if desired # browseURL() listens on localhost to show the file with the problem that # the download links dont work anymore. hence, we try to open the file # with pander::openFileInOS and use browseURL() only as fallback if (launch.browser) { opened <- tryCatch(pander::openFileInOS(file.html), error = function(e) "error") if (!is.null(opened)) try(browseURL(file.html)) } ## ------------------------------------------------------------------------ ## ## CLEANUP ---- # note that 'clean' as also passed to rmarkdown::render if (clean) file.remove(file) invisible() } ################################################################################ ## ## ## HELPER FUNCTIONS ## ## ## ################################################################################ # ---------------------------------------------------------------------------- # # This is a recursive function that goes the objects structure and prints # all slots/elements along with their class, length, depth. # ---------------------------------------------------------------------------- # .tree_RLum <- function(x, root) { if (missing(root)) root <- deparse(substitute(x)) ## S4 object ----- if (isS4(x)) { # print ----- cat(c(root, .class(x), base::length(x), .depth(root), FALSE, .dimension(x), "\n"), sep = "|") for (slot in slotNames(x)) { s4.root <- paste0(root, "@", slot) .tree_RLum(slot(x, slot), root = s4.root) } invisible() ## List objects ----- } else if (inherits(x, "list") | typeof(x) == "list" & !inherits(x, "data.frame")) { if (!is.null(names(x)) && length(x) != 0) { # print ----- cat(c(root, .class(x), base::length(x), .depth(root), FALSE, .dimension(x), "\n"), sep = "|") element <- names(x) for (i in 1:length(x)) { if (grepl(" ", element[i])) element[i] <- paste0("`", element[i], "`") if (element[i] == "") list.root <- paste0(root, "[[", i, "]]") else list.root <- paste0(root, "$", element[i]) .tree_RLum(x[[i]], root = list.root) } } else if (length(x) != 0) { # print ----- cat(c(root, .class(x), base::length(x), .depth(root), FALSE, .dimension(x), "\n"), sep = "|") element <- paste0("[[", seq(1, length(x),1), "]]") for (i in 1:length(x)) { if (grepl(" ", element[i])) element[i] <- paste0("`", element[i], "`") list.root <- paste0(root, element[i]) .tree_RLum(x[[i]], root = list.root) } } else if (length(x) == 0) { cat(c(root, .class(x), base::length(x), .depth(root), FALSE, .dimension(x), "\n"), sep = "|") } invisible() ## Data frames ----- } else if (inherits(x, "data.frame")) { if (any(sapply(x, function(col) { inherits(col, "matrix") } ))) { element <- names(x) for (i in 1:length(x)) { if (grepl(" ", element[i])) element[i] <- paste0("`", element[i], "`") list.root <- paste0(root, "$", element[[i]]) .tree_RLum(x[[i]], root = list.root) } } else { # print ---- cat(c(root, .class(x), base::length(x), .depth(root), TRUE, .dimension(x), "\n"), sep = "|") } invisible() ## Last elements ----- } else { # print ---- cat(c(root, .class(x), base::length(x), .depth(root), TRUE, .dimension(x), "\n"), sep = "|") invisible() } } # ---------------------------------------------------------------------------- # # a) Derive depth in the structure tree by splitting the directory by # indicative accessors @, $, [[ # b) Wrapper for dim() to cope with NULL values # c) Wrapper for class() that collapses the classes of an object # ---------------------------------------------------------------------------- # .depth <- function(x) { length(strsplit(x, split = "\\$|@|\\[\\[")[[1]]) - 1 } .dimension <- function(x) { if (!is.null(dim(x))) dim <- paste(dim(x), collapse = "|") else dim <- c(0, 0) } .class <- function(x) { paste(class(x), collapse = "/") } # ---------------------------------------------------------------------------- # # This function captures the output of the real worker .tree_RLum and returns # the structure of the object as a data.frame # ---------------------------------------------------------------------------- # .struct_RLum <- function(x, root) { if (missing(root)) root <- deparse(substitute(x)) s <- capture.output(.tree_RLum(x, root = root)) df <- as.data.frame(do.call(rbind, strsplit(s, "|", fixed = TRUE)), stringsAsFactors = FALSE) names(df) <- c("branch", "class", "length", "depth", "endpoint", "row", "col") df$depth <- as.integer(df$depth) df$length <- as.numeric(df$length) df$endpoint <- as.logical(df$endpoint) df$row <- as.integer(df$row) df$col <- as.integer(df$col) df$bud <- do.call(c, lapply(strsplit(df$branch, "\\$|@|\\[\\["), function(x) x[length(x)])) if (length(grep("]", df$bud)) != 0) df$bud[grep("]", df$bud)] <- paste0("[[", df$bud[grep("]", df$bud)]) df$bud.freq <- NA # 1:nrow(df) # reorder data.frame df <- df[ ,c("branch", "bud", "bud.freq", "class", "length", "depth", "row", "col", "endpoint")] # for the report we must not have the same last element names of same # depth (HTML cannot discriminate between #links of headers) ## TODO: this is highly inefficient for unnamed list due to recurrent indices dlevel <- max(table(df$bud)) for (i in 1:dlevel) { unique.bud <- unique(df[is.na(df$bud.freq), ]$bud) df[is.na(df$bud.freq), ][match(unique.bud, df[is.na(df$bud.freq), ]$bud), ]$bud.freq <- i - 1 } invisible(df) } Luminescence/R/convert_PSL2CSV.R0000644000176200001440000001254214464125673016064 0ustar liggesusers#' @title Export PSL-file(s) to CSV-files #' #' @description This function is a wrapper function around the functions [read_PSL2R] and #' [write_RLum2CSV] and it imports an PSL-file (SUERC portable OSL reader file format) #' and directly exports its content to CSV-files. #' If nothing is set for the argument `path` ([write_RLum2CSV]) the input folder will #' become the output folder. #' #' @param file [character] (**required**): #' name of the PSL-file to be converted to CSV-files #' #' @param extract_raw_data [logical] (*with default*): enable/disable raw data #' extraction. The PSL files imported into R contain an element `$raw_data`, which #' provides a few more information (e.g., count errors), sometimes it makes #' sense to use this data of the more compact standard values created by [read_PSL2R] #' #' @param single_table [logical] (*with default*): enable/disable the creation #' of single table with n rows and n columns, instead of separate [data.frame] #' objects. Each curve will be represented by two columns for time and counts #' #' @param ... further arguments that will be passed to the function #' [read_PSL2R] and [write_RLum2CSV] #' #' @return #' The function returns either a CSV-file (or many of them) or for the option #' `export = FALSE` a list comprising objects of type [data.frame] and [matrix] #' #' @section Function version: 0.1.2 #' #' @author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [RLum.Analysis-class], [RLum.Data-class], [RLum.Results-class], #' [utils::write.table], [write_RLum2CSV], [read_PSL2R] #' #' @keywords IO #' #' @examples #' #' ## export into single data.frame #' file <- system.file("extdata/DorNie_0016.psl", package="Luminescence") #' convert_PSL2CSV(file, export = FALSE, single_table = TRUE) #' #' #' \dontrun{ #' ##select your BIN-file #' file <- file.choose() #' #' ##convert #' convert_PSL2CSV(file) #' #' } #' #' @md #' @export convert_PSL2CSV <- function( file, extract_raw_data = FALSE, single_table = FALSE, ... ){ # General tests ------------------------------------------------------------------------------- ##file is missing? if(missing(file)){ stop("[convert_PSL2CSV()] 'file' is missing!", call. = FALSE) } ##set input arguments convert_PSL2R_settings.default <- list( drop_bg = FALSE, as_decay_curve = TRUE, smooth = FALSE, merge = if(single_table) TRUE else FALSE, export = TRUE ) ##modify list on demand convert_PSL2R_settings <- modifyList(x = convert_PSL2R_settings.default, val = list(...)) # Import file --------------------------------------------------------------------------------- if(!inherits(file, "RLum")){ object <- read_PSL2R( file = file, drop_bg = convert_PSL2R_settings$drop_bg, as_decay_curve = convert_PSL2R_settings$as_decay_curve, smooth = convert_PSL2R_settings$smooth, merge = convert_PSL2R_settings$merge ) }else{ object <- file } ## try to extract file name from object ... this will be needed later filename <- try({ rev(strsplit(object@info$Datafile_Path, "\\", fixed = TRUE)[[1]])[1] }, silent = TRUE) # raw data ---------------------------------------------------------------- ## extract raw data instead of conventional data if(extract_raw_data) { psl_raw <- lapply(object@records, function(x) x@info$raw_data) names(psl_raw) <- names(object) object <- psl_raw } # single_table ------------------------------------------------------------ ## generate a single table if(single_table) { ## run the conversion to CSV objects if(inherits(object, "RLum")) { l <- convert_PSL2CSV(object, export = FALSE, compact = FALSE) } else { l <- object } ## get max row number nrow_max <- vapply(l, nrow, numeric(1)) ## create super matrix m <- matrix(NA, nrow = max(nrow_max), ncol = length(nrow_max) * ncol(l[[1]])) ## fill matrix s <- matrix(seq_len(length(l) * ncol(l[[1]])), nrow = ncol(l[[1]])) for(i in 1:length(l)) { m[1:nrow(l[[i]]), s[1,i]:(rev(s[,i])[1])] <- as.matrix(l[[i]]) } ## set column names if(!extract_raw_data) { colnames(m) <- paste0(rep(names(l), each = 2), c("_t", "_cts")) } else { colnames(m) <- paste0( rep(seq_along(l), each = ncol(l[[1]])), "_" , rep(names(l), each = ncol(l[[1]])), "_", rep(colnames(l[[1]]), length(l))) } ## overwrite object object <- as.data.frame(m) ## if possible, provide the file name as attribute if(!inherits(filename, "try-error")) attr(object, "filename") <- gsub(".", "_", filename, fixed = TRUE) } # Export to CSV ------------------------------------------------------------------------------- ##get all arguments we want to pass and remove the doubled one arguments <- c( list( object = object, col.names = if(single_table[1] || extract_raw_data[1]) TRUE else FALSE, export = convert_PSL2R_settings$export), list(...)) arguments[duplicated(names(arguments))] <- NULL ## now modify list again to ensure that the user input is always respected arguments <- modifyList(arguments, val = list(...), keep.null = TRUE) ##this if-condition prevents NULL in the terminal if(convert_PSL2R_settings$export){ invisible(do.call("write_RLum2CSV", arguments)) }else{ do.call("write_RLum2CSV", arguments) } } Luminescence/R/analyse_IRSAR.RF.R0000644000176200001440000022551714264017373016135 0ustar liggesusers#' @title Analyse IRSAR RF measurements #' #' @description Function to analyse IRSAR RF measurements on K-feldspar samples, performed #' using the protocol according to Erfurt et al. (2003) and beyond. #' #' @details The function performs an IRSAR analysis described for K-feldspar samples by #' Erfurt et al. (2003) assuming a negligible sensitivity change of the RF #' signal. #' #' **General Sequence Structure** (according to Erfurt et al., 2003) #' #' #' 1. Measuring IR-RF intensity of the natural dose for a few seconds (\eqn{RF_{nat}}) #' 2. Bleach the samples under solar conditions for at least 30 min without changing the geometry #' 3. Waiting for at least one hour #' 4. Regeneration of the IR-RF signal to at least the natural level (measuring (\eqn{RF_{reg}}) #' 5. Fitting data with a stretched exponential function #' 6. Calculate the the palaeodose \eqn{D_{e}} using the parameters from the fitting #' #' #' Actually two methods are supported to obtain the \eqn{D_{e}}: #' `method = "FIT"` and `method = "SLIDE"`: #' #' **`method = "FIT"`** #' #' The principle is described above and follows the original suggestions by #' Erfurt et al., 2003. For the fitting the mean count value of the `RF_nat` curve is used. #' #' Function used for the fitting (according to Erfurt et al. (2003)): #' #' \deqn{\phi(D) = \phi_{0}-\Delta\phi(1-exp(-\lambda*D))^\beta} #' #' with #' \eqn{\phi(D)} the dose dependent IR-RF flux, #' \eqn{\phi_{0}} the initial IR-RF flux, #' \eqn{\Delta\phi} the dose dependent change of the IR-RF flux, #' \eqn{\lambda} the exponential parameter, \eqn{D} the dose and #' \eqn{\beta} the dispersive factor. #' #' To obtain the palaeodose #' \eqn{D_{e}} the function is changed to: #' #' \deqn{D_{e} = ln(-(\phi(D) - \phi_{0})/(-\lambda*\phi)^{1/\beta}+1)/-\lambda} #' #' The fitting is done using the `port` algorithm of the [nls] function. #' #' #' **`method = "SLIDE"`** #' #' For this method, the natural curve is slid along the x-axis until #' congruence with the regenerated curve is reached. Instead of fitting this #' allows working with the original data without the need for any physical #' model. This approach was introduced for RF curves by Buylaert et al., 2012 #' and Lapp et al., 2012. #' #' Here the sliding is done by searching for the minimum of the squared residuals. #' For the mathematical details of the implementation see Frouin et al., 2017 #' #' **`method.control`** #' #' To keep the generic argument list as clear as possible, arguments to control the methods #' for De estimation are all pre set with meaningful default parameters and can be #' handled using the argument `method.control` only, e.g., #' `method.control = list(trace = TRUE)`. Supported arguments are: #' #' \tabular{lll}{ #' **ARGUMENT** \tab **METHOD** \tab **DESCRIPTION**\cr #' `trace` \tab `FIT`, `SLIDE` \tab as in [nls]; shows sum of squared residuals\cr #' `trace_vslide` \tab `SLIDE` \tab [logical] argument to enable or disable the tracing of the vertical sliding\cr #' `maxiter` \tab `FIT` \tab as in [nls]\cr #' `warnOnly` \tab `FIT` \tab as in [nls]\cr #' `minFactor` \tab `FIT` \tab as in [nls]\cr #' `correct_onset` \tab `SLIDE` \tab The logical argument shifts the curves along the x-axis by the first channel, #' as light is expected in the first channel. The default value is `TRUE`.\cr #' `show_density` \tab `SLIDE` \tab [logical] (*with default*) #' enables or disables KDE plots for MC run results. If the distribution is too narrow nothing is shown.\cr #' `show_fit` \tab `SLIDE` \tab [logical] (*with default*) #' enables or disables the plot of the fitted curve routinely obtained during the evaluation.\cr #' `n.MC` \tab `SLIDE` \tab [integer] (*with default*): #' This controls the number of MC runs within the sliding (assessing the possible minimum values). #' The default `n.MC = 1000`. Note: This parameter is not the same as controlled by the #' function argument `n.MC`. \cr #' `vslide_range` \tab `SLDE` \tab [logical] or [numeric] or [character] (*with default*): #' This argument sets the boundaries for a vertical curve #' sliding. The argument expects a vector with an absolute minimum and a maximum (e.g., `c(-1000,1000)`). #' Alternatively the values `NULL` and `'auto'` are allowed. The automatic mode detects the #' reasonable vertical sliding range (**recommended**). `NULL` applies no vertical sliding. #' The default is `NULL`.\cr #' `cores` \tab `SLIDE` \tab `number` or `character` (*with default*): set number of cores to be allocated #' for a parallel processing of the Monte-Carlo runs. The default value is `NULL` (single thread), #' the recommended values is `'auto'`. An optional number (e.g., `cores` = 8) assigns a value manually. #' } #' #' #' **Error estimation** #' #' For **`method = "FIT"`** the asymmetric error range is obtained by using the 2.5 % (lower) and #' the 97.5 % (upper) quantiles of the \eqn{RF_{nat}} curve for calculating the \eqn{D_{e}} error range. #' #' For **`method = "SLIDE"`** the error is obtained by bootstrapping the residuals of the slid #' curve to construct new natural curves for a Monte Carlo simulation. The error is returned in two #' ways: (a) the standard deviation of the herewith obtained \eqn{D_{e}} from the MC runs and (b) the confidence #' interval using the 2.5 % (lower) and the 97.5 % (upper) quantiles. The results of the MC runs #' are returned with the function output. #' #' **Test parameters** #' #' The argument `test_parameters` allows to pass some thresholds for several test parameters, #' which will be evaluated during the function run. If a threshold is set and it will be exceeded the #' test parameter status will be set to `"FAILED"`. Intentionally this parameter is not termed #' `'rejection criteria'` as not all test parameters are evaluated for both methods and some parameters #' are calculated by not evaluated by default. Common for all parameters are the allowed argument options #' `NA` and `NULL`. If the parameter is set to `NA` the value is calculated but the #' result will not be evaluated, means it has no effect on the status (`"OK"` or `"FAILED"`) #' of the parameter. #' Setting the parameter to `NULL` disables the parameter entirely and the parameter will be #' also removed from the function output. This might be useful in cases where a particular parameter #' asks for long computation times. Currently supported parameters are: #' #' `curves_ratio` [numeric] (default: `1.001`): #' #' The ratio of \eqn{RF_{nat}} over \eqn{RF_{reg}} in the range of\eqn{RF_{nat}} of is calculated #' and should not exceed the threshold value. #' #' `intersection_ratio` [numeric] (default: `NA`): #' #' Calculated as absolute difference from 1 of the ratio of the integral of the normalised RF-curves, #' This value indicates intersection of the RF-curves and should be close to 0 if the curves #' have a similar shape. For this calculation first the corresponding time-count pair value on the RF_reg #' curve is obtained using the maximum count value of the `RF_nat` curve and only this segment (fitting to #' the `RF_nat` curve) on the RF_reg curve is taken for further calculating this ratio. If nothing is #' found at all, `Inf` is returned. #' #' `residuals_slope` [numeric] (default: `NA`; only for `method = "SLIDE"`): #' #' A linear function is fitted on the residuals after sliding. #' The corresponding slope can be used to discard values as a high (positive, negative) slope #' may indicate that both curves are fundamentally different and the method cannot be applied at all. #' Per default the value of this parameter is calculated but not evaluated. #' #' `curves_bounds` [numeric] (default: \eqn{max(RF_{reg_counts})}: #' #' This measure uses the maximum time (x) value of the regenerated curve. #' The maximum time (x) value of the natural curve cannot be larger than this value. However, although #' this is not recommended the value can be changed or disabled. #' #' `dynamic_ratio` [numeric] (default: `NA`): #' #' The dynamic ratio of the regenerated curve is calculated as ratio of the minimum and maximum count values. #' #' `lambda`, `beta` and `delta.phi` #' [numeric] (default: `NA`; `method = "SLIDE"`): #' #' The stretched exponential function suggested by Erfurt et al. (2003) describing the decay of #' the RF signal, comprises several parameters that might be useful to evaluate the shape of the curves. #' For `method = "FIT"` this parameter is obtained during the fitting, for `method = "SLIDE"` a #' rather rough estimation is made using the function [minpack.lm::nlsLM] and the equation #' given above. Note: As this procedure requests more computation time, setting of one of these three parameters #' to `NULL` also prevents a calculation of the remaining two. #' #' #' @param object [RLum.Analysis-class] or a [list] of [RLum.Analysis-class]-objects (**required**): #' input object containing data for protocol analysis. The function expects to #' find at least two curves in the [RLum.Analysis-class] object: (1) `RF_nat`, (2) `RF_reg`. #' If a `list` is provided as input all other parameters can be provided as #' `list` as well to gain full control. #' #' @param sequence_structure [vector] [character] (*with default*): #' specifies the general sequence structure. Allowed steps are `NATURAL`, `REGENERATED`. #' In addition any other character is allowed in the sequence structure; #' such curves will be ignored during the analysis. #' #' @param RF_nat.lim [vector] (*with default*): #' set minimum and maximum channel range for natural signal fitting and sliding. #' If only one value is provided this will be treated as minimum value and the #' maximum limit will be added automatically. #' #' @param RF_reg.lim [vector] (*with default*): #' set minimum and maximum channel range for regenerated signal fitting and sliding. #' If only one value is provided this will be treated as minimum value and the #' maximum limit will be added automatically. #' #' @param method [character] (*with default*): #' setting method applied for the data analysis. #' Possible options are `"FIT"` or `"SLIDE"`. #' #' @param method.control [list] (*optional*): #' parameters to control the method, that can be passed to the chosen method. #' These are for (1) `method = "FIT"`: `'trace'`, `'maxiter'`, `'warnOnly'`, `'minFactor'` and for #' (2) `method = "SLIDE"`: `'correct_onset'`, `'show_density'`, `'show_fit'`, `'trace'`. #' See details. #' #' @param test_parameters [list] (*with default*): #' set test parameters. Supported parameters are: `curves_ratio`, `residuals_slope` (only for #' `method = "SLIDE"`), `curves_bounds`, `dynamic_ratio`, #' `lambda`, `beta` and `delta.phi`. All input: [numeric] #' values, `NA` and `NULL` (s. Details) #' #' (see Details for further information) #' #' @param n.MC [numeric] (*with default*): #' set number of Monte Carlo runs for start parameter estimation (`method = "FIT"`) or #' error estimation (`method = "SLIDE"`). This value can be set to `NULL` to skip the #' MC runs. Note: Large values will significantly increase the computation time #' #' @param txtProgressBar [logical] (*with default*): #' enables `TRUE` or disables `FALSE` the progression bar during MC runs #' #' @param plot [logical] (*with default*): #' plot output (`TRUE` or `FALSE`) #' #' @param plot_reduced [logical] (*optional*): #' provides a reduced plot output if enabled to allow common R plot combinations, #' e.g., `par(mfrow(...))`. If `TRUE` no residual plot #' is returned; it has no effect if `plot = FALSE` #' #' @param ... further arguments that will be passed to the plot output. #' Currently supported arguments are `main`, `xlab`, `ylab`, #' `xlim`, `ylim`, `log`, `legend` (`TRUE/FALSE`), #' `legend.pos`, `legend.text` (passes argument to x,y in #' [graphics::legend]), `xaxt` #' #' #' @return #' The function returns numerical output and an (*optional*) plot. #' #' -----------------------------------\cr #' `[ NUMERICAL OUTPUT ]`\cr #' -----------------------------------\cr #' #' **`RLum.Results`**-object #' #' **slot:** **`@data`** #' #' `[.. $data : data.frame]` #' #' \tabular{lll}{ #' **Column** \tab **Type** \tab **Description**\cr #' `DE` \tab `numeric` \tab the obtained equivalent dose\cr #' `DE.ERROR` \tab `numeric` \tab (only `method = "SLIDE"`) standard deviation obtained from MC runs \cr #' `DE.LOWER` \tab `numeric`\tab 2.5% quantile for De values obtained by MC runs \cr #' `DE.UPPER` \tab `numeric`\tab 97.5% quantile for De values obtained by MC runs \cr #' `DE.STATUS` \tab `character`\tab test parameter status\cr #' `RF_NAT.LIM` \tab `character`\tab used `RF_nat` curve limits \cr #' `RF_REG.LIM` \tab `character`\tab used `RF_reg` curve limits\cr #' `POSITION` \tab `integer`\tab (*optional*) position of the curves\cr #' `DATE` \tab `character`\tab (*optional*) measurement date\cr #' `SEQUENCE_NAME` \tab `character`\tab (*optional*) sequence name\cr #' `UID` \tab `character`\tab unique data set ID #' } #' #' `[.. $De.MC : numeric]` #' #' A `numeric` vector with all the De values obtained by the MC runs. #' #' `[.. $test_parameters : data.frame]` #' #' \tabular{lll}{ #' **Column** \tab **Type** \tab **Description**\cr #' `POSITION` \tab `numeric` \tab aliquot position \cr #' `PARAMETER` \tab `character` \tab test parameter name \cr #' `THRESHOLD` \tab `numeric` \tab set test parameter threshold value \cr #' `VALUE` \tab `numeric` \tab the calculated test parameter value (to be compared with the threshold)\cr #' `STATUS` \tab `character` \tab test parameter status either `"OK"` or `"FAILED"` \cr #' `SEQUENCE_NAME` \tab `character` \tab name of the sequence, so far available \cr #' `UID` \tab `character`\tab unique data set ID #' } #' #' `[.. $fit : data.frame]` #' #' An [nls] object produced by the fitting. #' #' `[.. $slide : list]` #' #' A [list] with data produced during the sliding. Some elements are previously #' reported with the summary object data. List elements are: #' #' \tabular{lll}{ #' **Element** \tab **Type** \tab **Description**\cr #' `De` \tab `numeric` \tab the final De obtained with the sliding approach \cr #' `De.MC` \tab `numeric` \tab all De values obtained by the MC runs \cr #' `residuals` \tab `numeric` \tab the obtained residuals for each channel of the curve \cr #' `trend.fit` \tab `lm` \tab fitting results produced by the fitting of the residuals \cr #' `RF_nat.slid` \tab `matrix` \tab the slid `RF_nat` curve \cr #' `t_n.id` \tab `numeric` \tab the index of the t_n offset \cr #' `I_n` \tab `numeric` \tab the vertical intensity offset if a vertical slide was applied \cr #' `algorithm_error` \tab `numeric` \tab the vertical sliding suffers from a systematic effect induced by the used #' algorithm. The returned value is the standard deviation of all obtained De values while expanding the #' vertical sliding range. I can be added as systematic error to the final De error; so far wanted.\cr #' `vslide_range` \tab `numeric` \tab the range used for the vertical sliding \cr #' `squared_residuals` \tab `numeric` \tab the squared residuals (horizontal sliding) #' } #' #' #' **slot:** **`@info`** #' #' The original function call ([methods::language-class]-object) #' #' The output (`data`) should be accessed using the function [get_RLum] #' #' ------------------------\cr #' `[ PLOT OUTPUT ]`\cr #' ------------------------\cr #' #' The slid IR-RF curves with the finally obtained De #' #' @note #' This function assumes that there is no sensitivity change during the #' measurements (natural vs. regenerated signal), which is in contrast to the #' findings by Buylaert et al. (2012). #' #' @section Function version: 0.7.8 #' #' @author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [RLum.Analysis-class], [RLum.Results-class], [get_RLum], #' [nls], [minpack.lm::nlsLM], `parallel::mclapply` #' #' #' @references #' Buylaert, J.P., Jain, M., Murray, A.S., Thomsen, K.J., Lapp, T., #' 2012. IR-RF dating of sand-sized K-feldspar extracts: A test of accuracy. #' Radiation Measurements 44 (5-6), 560-565. doi: 10.1016/j.radmeas.2012.06.021 #' #' Erfurt, G., Krbetschek, M.R., 2003. IRSAR - A single-aliquot #' regenerative-dose dating protocol applied to the infrared radiofluorescence #' (IR-RF) of coarse- grain K-feldspar. Ancient TL 21, 35-42. #' #' Erfurt, G., 2003. Infrared luminescence of Pb+ centres in potassium-rich #' feldspars. physica status solidi (a) 200, 429-438. #' #' Erfurt, G., Krbetschek, M.R., 2003. Studies on the physics of the infrared #' radioluminescence of potassium feldspar and on the methodology of its #' application to sediment dating. Radiation Measurements 37, 505-510. #' #' Erfurt, G., Krbetschek, M.R., Bortolot, V.J., Preusser, F., 2003. A fully #' automated multi-spectral radioluminescence reading system for geochronometry #' and dosimetry. Nuclear Instruments and Methods in Physics Research Section #' B: Beam Interactions with Materials and Atoms 207, 487-499. #' #' Frouin, M., Huot, S., Kreutzer, S., Lahaye, C., Lamothe, M., Philippe, A., Mercier, N., 2017. #' An improved radiofluorescence single-aliquot regenerative dose protocol for K-feldspars. #' Quaternary Geochronology 38, 13-24. doi:10.1016/j.quageo.2016.11.004 #' #' Lapp, T., Jain, M., Thomsen, K.J., Murray, A.S., Buylaert, J.P., 2012. New #' luminescence measurement facilities in retrospective dosimetry. Radiation #' Measurements 47, 803-808. doi:10.1016/j.radmeas.2012.02.006 #' #' Trautmann, T., 2000. A study of radioluminescence kinetics of natural #' feldspar dosimeters: experiments and simulations. Journal of Physics D: #' Applied Physics 33, 2304-2310. #' #' Trautmann, T., Krbetschek, M.R., Dietrich, A., Stolz, W., 1998. #' Investigations of feldspar radioluminescence: potential for a new dating #' technique. Radiation Measurements 29, 421-425. #' #' Trautmann, T., Krbetschek, M.R., Dietrich, A., Stolz, W., 1999. Feldspar #' radioluminescence: a new dating method and its physical background. Journal #' of Luminescence 85, 45-58. #' #' Trautmann, T., Krbetschek, M.R., Stolz, W., 2000. A systematic study of the #' radioluminescence properties of single feldspar grains. Radiation #' Measurements 32, 685-690. #' #' #' @keywords datagen #' #' #' @examples #' #' ##load data #' data(ExampleData.RLum.Analysis, envir = environment()) #' #' ##(1) perform analysis using the method 'FIT' #' results <- analyse_IRSAR.RF(object = IRSAR.RF.Data) #' #' ##show De results and test paramter results #' get_RLum(results, data.object = "data") #' get_RLum(results, data.object = "test_parameters") #' #' ##(2) perform analysis using the method 'SLIDE' #' results <- analyse_IRSAR.RF(object = IRSAR.RF.Data, method = "SLIDE", n.MC = 1) #' #' \dontrun{ #' ##(3) perform analysis using the method 'SLIDE' and method control option #' ## 'trace #' results <- analyse_IRSAR.RF( #' object = IRSAR.RF.Data, #' method = "SLIDE", #' method.control = list(trace = TRUE)) #' #' } #' #' @md #' @export analyse_IRSAR.RF<- function( object, sequence_structure = c("NATURAL", "REGENERATED"), RF_nat.lim = NULL, RF_reg.lim = NULL, method = "FIT", method.control = NULL, test_parameters = NULL, n.MC = 10, txtProgressBar = TRUE, plot = TRUE, plot_reduced = FALSE, ... ){ ##TODO ## - if a file path is given, the function should try to find out whether an XSYG-file or ## a BIN-file is provided ## - add NEWS for vslide_range ## - update documentary ... if it works as expected. # SELF CALL ----------------------------------------------------------------------------------- if(is.list(object)){ ##extent the list of arguments if set ##sequence_structure sequence_structure <- rep(list(sequence_structure), length = length(object)) ##RF_nat.lim RF_nat.lim <- rep(list(RF_nat.lim), length = length(object)) ##RF_reg.lim RF_reg.lim <- rep(list(RF_reg.lim), length = length(object)) ##method method <- rep(list(method), length = length(object)) ##method.control method.control <- rep(list(method.control), length = length(object)) ##test_parameters if(is(test_parameters[[1]], "list")){ test_parameters <- rep(test_parameters, length = length(object)) }else{ test_parameters <- rep(list(test_parameters), length = length(object)) } ##n.MC n.MC <- rep(list(n.MC), length = length(object)) ##main if("main"%in% names(list(...))){ if(is(list(...)$main, "list")){ temp_main <- rep(list(...)$main, length = length(object)) }else{ temp_main <- rep(list(list(...)$main), length = length(object)) } }else{ if(object[[1]]@originator == "read_RF2R"){ temp_main <- lapply(object, function(x) x@info$ROI) } else { temp_main <- as.list(paste0("ALQ #",1:length(object))) } } ##run analysis temp <- lapply(1:length(object), function(x){ analyse_IRSAR.RF( object = object[[x]], sequence_structure = sequence_structure[[x]], RF_nat.lim = RF_nat.lim[[x]], RF_reg.lim = RF_reg.lim[[x]], method = method[[x]], method.control = method.control[[x]], test_parameters = test_parameters[[x]], n.MC = n.MC[[x]], txtProgressBar = txtProgressBar, plot = plot, plot_reduced = plot_reduced, main = temp_main[[x]], ...) }) ##combine everything to one RLum.Results object as this as what was written ... only ##one object ##merge results and check if the output became NULL results <- merge_RLum(temp) ##DO NOT use invisible here, this will stop the function from stopping if(length(results) == 0){ return(NULL) }else{ return(results) } } ##===============================================================================================# ## INTEGRITY TESTS AND SEQUENCE STRUCTURE TESTS ##===============================================================================================# ##MISSING INPUT if(missing("object")){ stop("[analyse_IRSAR.RF()] No input 'object' set!", call. = FALSE) } ##INPUT OBJECTS if(!is(object, "RLum.Analysis")){ stop("[analyse_IRSAR.RF()] Input object is not of type 'RLum.Analysis'!", call. = FALSE) } ##CHECK OTHER ARGUMENTS if(!is(sequence_structure, "character")){ stop("[analyse_IRSAR.RF()] argument 'sequence_structure' needs to be of type character.", call. = FALSE) } ##n.MC if((!is(n.MC, "numeric") || n.MC <= 0) && !is.null(n.MC)){ stop("[analyse_IRSAR.RF()] argument 'n.MC' has to be of type integer and >= 0", call. = FALSE) } ##SELECT ONLY MEASURED CURVES ## (this is not really necessary but rather user friendly) if(!length(suppressWarnings(get_RLum(object, curveType= "measured"))) == 0){ object <- get_RLum(object, curveType= "measured", drop = FALSE) } ##INVESTIGATE SEQUENCE OBJECT STRUCTURE ##grep object strucute temp.sequence_structure <- structure_RLum(object) ##check whether both curve have the same length, in this case we cannot proceed (sliding ##is not allowed) if(length(unique(temp.sequence_structure[["x.max"]])) == 1 && method == "SLIDE" && (is.null(RF_nat.lim) & is.null(RF_reg.lim))) { stop("[analyse_IRSAR.RF()] There is no further sliding space left. All curves have the same length and no limitation was set!", call. = FALSE) } ##grep name of the sequence and the position this will be useful later on ##name if (!is.null(suppressWarnings(get_RLum(get_RLum(object, record.id = 1), info.object = "name")))) { aliquot.sequence_name <- get_RLum(get_RLum(object, record.id = 1), info.object = "name") }else{ aliquot.sequence_name <- NA } ##position if (!is.null(suppressWarnings(get_RLum(get_RLum(object, record.id = 1), info.object = "position")))){ aliquot.position <- get_RLum(get_RLum(object, record.id = 1), info.object = "position") }else{ aliquot.position <- NA } ##date if (!is.null(suppressWarnings(get_RLum(get_RLum(object, record.id = 1), info.object = "startDate")))){ aliquot.date <- get_RLum(get_RLum(object, record.id = 1), info.object = "startDate") ##transform so far the format can be identified if (nchar(aliquot.date) == 14) { aliquot.date <- paste(c( substr(aliquot.date, 1,4),substr(aliquot.date, 5,6), substr(aliquot.date, 7,8) ), collapse = "-") } }else{ aliquot.date <- NA } ##set structure values temp.sequence_structure$protocol.step <- rep(sequence_structure, length_RLum(object))[1:length_RLum(object)] ##check if the first curve is shorter than the first curve if (temp.sequence_structure[which(temp.sequence_structure[["protocol.step"]] == "NATURAL"),"n.channels"] > temp.sequence_structure[which(temp.sequence_structure[["protocol.step"]] == "REGENERATED"),"n.channels"]) { stop("[analyse_IRSAR.RF()] Number of data channels in RF_nat > RF_reg. This is not supported!", call. = FALSE) } ##===============================================================================================# ## SET CURVE LIMITS ##===============================================================================================# ##the setting here will be valid for all subsequent operations ##01 ##first get allowed curve limits, this makes the subsequent checkings easier and the code ##more easier to read RF_nat.lim.default <- c(1,max( subset( temp.sequence_structure, temp.sequence_structure$protocol.step == "NATURAL" )$n.channels )) RF_reg.lim.default <- c(1,max( subset( temp.sequence_structure, temp.sequence_structure$protocol.step == "REGENERATED" )$n.channels )) ##02 - check boundaris ##RF_nat.lim if (is.null(RF_nat.lim) || is.na(RF_nat.lim)) { RF_nat.lim <- RF_nat.lim.default }else { ##this allows to provide only one boundary and the 2nd will be added automatically if (length(RF_nat.lim) == 1) { RF_nat.lim <- c(RF_nat.lim, RF_nat.lim.default[2]) } if (min(RF_nat.lim) < RF_nat.lim.default[1] | max(RF_nat.lim) > RF_nat.lim.default[2]) { RF_nat.lim <- RF_nat.lim.default warning(paste0( "RF_nat.lim out of bounds, reset to: RF_nat.lim = c(", paste(range(RF_nat.lim), collapse = ":") ),")", call. = FALSE) } } ##RF_reg.lim ## if (is.null(RF_reg.lim)) { RF_reg.lim <- RF_reg.lim.default }else { ##this allows to provide only one boundary and the 2nd will be added automatically if (length(RF_reg.lim) == 1) { RF_reg.lim <- c(RF_reg.lim, RF_reg.lim.default[2]) } if (min(RF_reg.lim) < RF_reg.lim.default[1] | max(RF_reg.lim) > RF_reg.lim.default[2]) { RF_reg.lim <- RF_reg.lim.default warning(paste0( "RF_reg.lim out of bounds, reset to: RF_reg.lim = c(", paste(range(RF_reg.lim), collapse = ":") ),")", call. = FALSE) } } ##check if intervalls make sense at all if(length(RF_reg.lim[1]:RF_reg.lim[2]) < RF_nat.lim[2]){ RF_reg.lim[2] <- RF_reg.lim[2] + abs(length(RF_reg.lim[1]:RF_reg.lim[2]) - RF_nat.lim[2]) + 1 warning(paste0("Length intervall RF_reg.lim < length RF_nat. Reset to RF_reg.lim = c(", paste(range(RF_reg.lim), collapse=":")),")", call. = FALSE) } # Method Control Settings --------------------------------------------------------------------- ##===============================================================================================# ## SET METHOD CONTROL PARAMETER - FOR BOTH METHODS ##===============================================================================================# ## ##set supported values with default method.control.settings <- list( trace = FALSE, trace_vslide = FALSE, maxiter = 500, warnOnly = FALSE, minFactor = 1 / 4096, correct_onset = TRUE, show_density = TRUE, show_fit = FALSE, n.MC = if(is.null(n.MC)){NULL}else{1000}, vslide_range = NULL, cores = NULL ) ##modify list if necessary if(!is.null(method.control)){ if(!is(method.control, "list")){ stop("[analyse_IRSAR.RF()] 'method.control' has to be of type 'list'!", call. = FALSE) } ##check whether this arguments are supported at all if (length(which( names(method.control) %in% names(method.control.settings) == FALSE ) != 0)) { temp.text <- paste0( "[analyse_IRSAR.RF()] Argument(s) '", paste(names(method.control)[which(names(method.control) %in% names(method.control.settings) == FALSE)], collapse = " and "), "' are not supported for 'method.control'. Supported arguments are: ", paste(names(method.control.settings), collapse = ", ") ) warning(temp.text, call. = FALSE) rm(temp.text) } ##modify list method.control.settings <- modifyList(x = method.control.settings, val = method.control) } ##===============================================================================================# ## SET PLOT PARAMETERS ##===============================================================================================# ##get channel resolution (should be equal for all curves, but if not the mean is taken) resolution.RF <- round(mean((temp.sequence_structure$x.max/temp.sequence_structure$n.channels)),digits=1) plot.settings <- list( main = "IR-RF", xlab = "Time [s]", ylab = paste0("IR-RF [cts/", resolution.RF," s]"), log = "", cex = 1, legend = TRUE, legend.text = c("RF_nat","RF_reg"), legend.pos = "top", xaxt = "s" ##xlim and ylim see below as they has to be modified differently ) ##modify list if something was set plot.settings <- modifyList(plot.settings, list(...)) ##=============================================================================# ## ANALYSIS ##=============================================================================# ##grep first regenerated curve RF_reg <- as.data.frame(object@records[[ temp.sequence_structure[temp.sequence_structure$protocol.step=="REGENERATED","id"]]]@data) ##correct of the onset of detection by using the first time value if (method == "SLIDE" & method.control.settings$correct_onset == TRUE) { RF_reg[,1] <- RF_reg[,1] - RF_reg[1,1] } RF_reg.x <- RF_reg[RF_reg.lim[1]:RF_reg.lim[2],1] RF_reg.y <- RF_reg[RF_reg.lim[1]:RF_reg.lim[2],2] ##grep values from natural signal RF_nat <- as.data.frame(object@records[[ temp.sequence_structure[temp.sequence_structure$protocol.step=="NATURAL","id"]]]@data) ##correct of the onset of detection by using the first time value if (method == "SLIDE" & method.control.settings$correct_onset == TRUE) { RF_nat[,1] <- RF_nat[,1] - RF_nat[1,1] } ##limit values to fit range (at least to the minimum) RF_nat.limited<- RF_nat[min(RF_nat.lim):max(RF_nat.lim),] ##calculate some useful parameters RF_nat.mean <- mean(RF_nat.limited[,2]) RF_nat.sd <- sd(RF_nat.limited[,2]) RF_nat.error.lower <- quantile(RF_nat.limited[,2], 0.975, na.rm = TRUE) RF_nat.error.upper <- quantile(RF_nat.limited[,2], 0.025, na.rm = TRUE) ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++# ##METHOD FIT ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++# ## REGENERATED SIGNAL # set function for fitting ------------------------------------------------ fit.function <- as.formula(y ~ phi.0 - (delta.phi * ((1 - exp( -lambda * x )) ^ beta))) ##stretched expontial function according to Erfurt et al. (2003) ## + phi.0 >> initial IR-RF flux ## + delta.phi >> dose dependent change of the IR-RF flux ## + lambda >> exponential parameter ## + beta >> dispersive factor # set start parameter estimation ------------------------------------------ fit.parameters.start <- c( phi.0 = max(RF_reg.y), lambda = 0.0001, beta = 1, delta.phi = 1.5 * (max(RF_reg.y) - min(RF_reg.y)) ) if(method == "FIT"){ # start nls fitting ------------------------------------------------------- ##Monte Carlo approach for fitting fit.parameters.results.MC.results <- data.frame() ##produce set of start paramters phi.0.MC <- rep(fit.parameters.start["phi.0"], n.MC) lambda.MC <- seq(0.0001, 0.001, by=(0.001-0.0001)/n.MC) beta.MC <- rep(fit.parameters.start["beta"], n.MC) delta.phi.MC <- rep(fit.parameters.start["delta.phi"], n.MC) ##start fitting loop for MC runs for(i in 1:n.MC){ fit.MC <- try(nls( fit.function, trace = FALSE, data = list(x = RF_reg.x, y = RF_reg.y), algorithm = "port", start = list( phi.0 = phi.0.MC[i], delta.phi = delta.phi.MC[i], lambda = lambda.MC[i], beta = beta.MC[i] ), nls.control( maxiter = 100, warnOnly = FALSE, minFactor = 1 / 1024 ), lower = c( phi.0 = .Machine$double.xmin, delta.phi = .Machine$double.xmin, lambda = .Machine$double.xmin, beta = .Machine$double.xmin ), upper = c( phi.0 = max(RF_reg.y), delta.phi = max(RF_reg.y), lambda = 1, beta = 100 ) ), silent = TRUE ) if(inherits(fit.MC,"try-error") == FALSE) { temp.fit.parameters.results.MC.results <- coef(fit.MC) fit.parameters.results.MC.results[i,"phi.0"] <- temp.fit.parameters.results.MC.results["phi.0"] fit.parameters.results.MC.results[i,"lambda"] <- temp.fit.parameters.results.MC.results["lambda"] fit.parameters.results.MC.results[i,"delta.phi"] <- temp.fit.parameters.results.MC.results["delta.phi"] fit.parameters.results.MC.results[i,"beta"] <- temp.fit.parameters.results.MC.results["beta"] } } ##FINAL fitting after successful MC if(length(na.omit(fit.parameters.results.MC.results)) != 0){ ##choose median as final fit version fit.parameters.results.MC.results <- sapply(na.omit(fit.parameters.results.MC.results), median) ##try final fitting fit <- try(nls( fit.function, trace = method.control.settings$trace, data = data.frame(x = RF_reg.x, y = RF_reg.y), algorithm = "port", start = list( phi.0 = fit.parameters.results.MC.results["phi.0"], delta.phi = fit.parameters.results.MC.results["delta.phi"], lambda = fit.parameters.results.MC.results["lambda"], beta = fit.parameters.results.MC.results["beta"] ), nls.control( maxiter = method.control.settings$maxiter, warnOnly = method.control.settings$warnOnly, minFactor = method.control.settings$minFactor ), lower = c( phi.0 = .Machine$double.xmin, delta.phi = .Machine$double.xmin, lambda = .Machine$double.xmin, beta = .Machine$double.xmin ), upper = c( phi.0 = max(RF_reg.y), delta.phi = max(RF_reg.y), lambda = 1, beta = 100 ) ), silent = FALSE ) }else{ fit <- NA class(fit) <- "try-error" } # get parameters ---------------------------------------------------------- # and with that the final De if (!inherits(fit,"try-error")) { fit.parameters.results <- coef(fit) }else{ fit.parameters.results <- NA } ##calculate De value if (!is.na(fit.parameters.results[1])) { De <- suppressWarnings(round(log( -((RF_nat.mean - fit.parameters.results["phi.0"]) / -fit.parameters.results["delta.phi"] ) ^ (1 / fit.parameters.results["beta"]) + 1 ) / -fit.parameters.results["lambda"], digits = 2)) ##This could be solved with a MC simulation, but for this the code has to be adjusted ##The question is: Where the parameters are coming from? ##TODO De.error <- NA De.lower <- suppressWarnings(round(log( -((RF_nat.error.lower - fit.parameters.results["phi.0"]) / -fit.parameters.results["delta.phi"] ) ^ (1 / fit.parameters.results["beta"]) + 1 ) / -fit.parameters.results["lambda"],digits = 2)) De.upper <- suppressWarnings(round(log( -((RF_nat.error.upper - fit.parameters.results["phi.0"]) / -fit.parameters.results["delta.phi"] ) ^ (1 / fit.parameters.results["beta"]) + 1 ) / -fit.parameters.results["lambda"],digits = 2)) }else{ De <- NA De.error <- NA De.lower <- NA De.upper <- NA } } ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++# ##METHOD SLIDE - ANALYSIS ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++# else if(method == "SLIDE"){ ##convert to matrix (in fact above the matrix data were first transfered to data.frames ... here ##we correct this ... again) RF_nat.limited <- as.matrix(RF_nat.limited) RF_reg.limited <- matrix(c(RF_reg.x, RF_reg.y), ncol = 2) RF_nat <- as.matrix(RF_nat) ##DEFINE FUNCTION FOR SLIDING ##FIND MINIMUM - this is done in a function so that it can be further used for MC simulations # sliding() ----------------------------------------------------------------------------------- sliding <- function(RF_nat, RF_nat.limited, RF_reg.limited, n.MC = method.control.settings$n.MC, vslide_range = method.control.settings$vslide_range, trace = method.control.settings$trace_vslide, numerical.only = FALSE){ ##check for odd user input if(length(vslide_range) > 2){ vslide_range <- vslide_range[1:2] warning("[anlayse_IRSAR.RF()] method.control = list(vslide_range) has more than 2 elements. Only the first two were used!", call. = FALSE) } ##(0) set objects ... nomenclature as used in Frouin et al., please note that here the index ##is used instead the real time values t_max.id <- nrow(RF_reg.limited) t_max_nat.id <- nrow(RF_nat.limited) t_min.id <- 1 t_min <- RF_nat.limited[1,1] ##(1) calculate sum of residual squares using internal Rcpp function #pre-allocate object temp.sum.residuals <- vector("numeric", length = t_max.id - t_max_nat.id) ##initialise slide range for specific conditions, namely NULL and "auto" if (is.null(vslide_range)) { vslide_range <- 0 } else if (vslide_range[1] == "auto") { vslide_range <- -(max(RF_reg.limited[, 2]) - min(RF_reg.limited[, 2])):(max(RF_reg.limited[, 2]) - min(RF_reg.limited[, 2])) algorithm_error <- NA } else{ vslide_range <- vslide_range[1]:vslide_range[2] algorithm_error <- NULL } ##problem: the optimisation routine slightly depends on the chosen input sliding vector ##and it might get trapped in a local minimum ##therefore we run the algorithm by expanding the sliding vector if(!is.null(vslide_range) && any(vslide_range != 0)){ ##even numbers makes it complicated, so let's make it odd if not already the case if(length(vslide_range) %% 2 == 0){ vslide_range <- c(vslide_range[1], vslide_range, vslide_range) } ##construct list of vector ranges we want to check for, this should avoid that we ##got trapped in a local minimum median_vslide_range.index <- median(1:length(vslide_range)) vslide_range.list <- lapply(seq(1, median_vslide_range.index, length.out = 10), function(x){ c(median_vslide_range.index - as.integer(x), median_vslide_range.index + as.integer(x)) }) ##correct for out of bounds problem; it might occur vslide_range.list[[10]] <- c(0, length(vslide_range)) ##TODO ... this is not really optimal, but ok for the moment, better would be ##the algorithm finds sufficiently the global minimum. ##now run it in a loop and expand the range from the inner to the outer part ##at least this is considered for the final error range ... temp_minium_list <- lapply(1:10, function(x){ src_analyse_IRSARRF_SRS( values_regenerated_limited = RF_reg.limited[,2], values_natural_limited = RF_nat.limited[,2], vslide_range = vslide_range[vslide_range.list[[x]][1]:vslide_range.list[[x]][2]], n_MC = 0, #we don't need MC runs here, so make it quick trace = trace)[c("sliding_vector_min_index","vslide_minimum", "vslide_index")] }) ##get all horizontal index value for the local minimum (corresponding to the vslide) temp_hslide_indices <- vapply(temp_minium_list, function(x){ x$sliding_vector_min_index}, FUN.VALUE = numeric(length = 1)) ##get also the vertical slide indices temp_vslide_indicies <- vapply(temp_minium_list, function(x){ x$vslide_index}, FUN.VALUE = numeric(length = 1)) ##get all the minimum values temp_minium <- vapply(temp_minium_list, function(x){x$vslide_minimum}, FUN.VALUE = numeric(length = 1)) ##get minimum and set it to the final range vslide_range <- vslide_range[ vslide_range.list[[which.min(temp_minium)]][1]:vslide_range.list[[which.min(temp_minium)]][2]] ##get all possible t_n values for the range expansion ... this can be considered ##as somehow systematic uncertainty, but it will be only calculated of the full range ##is considered, otherwise it is too biased by the user's choice ##ToDo: So far the algorithm error is not sufficiently documented if(!is.null(algorithm_error)){ algorithm_error <- sd(vapply(1:length(temp_vslide_indicies), function(k){ temp.sliding.step <- RF_reg.limited[temp_hslide_indices[k]] - t_min matrix(data = c(RF_nat[,1] + temp.sliding.step, RF_nat[,2] + temp_vslide_indicies[k]), ncol = 2)[1,1] }, FUN.VALUE = numeric(length = 1))) }else{ algorithm_error <- NA } }else{ algorithm_error <- NA } ##now run the final sliding with the identified range that corresponds to the minimum value temp.sum.residuals <- src_analyse_IRSARRF_SRS( values_regenerated_limited = RF_reg.limited[,2], values_natural_limited = RF_nat.limited[,2], vslide_range = vslide_range, n_MC = if(is.null(n.MC)){0}else{n.MC}, trace = trace ) #(2) get minimum value (index and time value) index_min <- which.min(temp.sum.residuals$sliding_vector) if(length(index_min) == 0) t_n.id <- 1 else t_n.id <- index_min if (is.null(vslide_range)) { I_n <- 0 } else{ I_n <- vslide_range[temp.sum.residuals$vslide_index] } temp.sliding.step <- RF_reg.limited[t_n.id] - t_min ##(3) slide curve graphically ... full data set we need this for the plotting later RF_nat.slid <- matrix(data = c(RF_nat[,1] + temp.sliding.step, RF_nat[,2] + I_n), ncol = 2) t_n <- RF_nat.slid[1,1] ##the same for the MC runs of the minimum values if(!is.null(n.MC)) { t_n.MC <- vapply( X = 1:length(temp.sum.residuals$sliding_vector_min_MC), FUN = function(x) { ##get minimum for MC t_n.id.MC <- which( temp.sum.residuals$sliding_vector == temp.sum.residuals$sliding_vector_min_MC[x] ) ##there is low change to get two indicies, in ##such cases we should take the mean temp.sliding.step.MC <- RF_reg.limited[t_n.id.MC] - t_min if(length(temp.sliding.step.MC)>1){ t_n.MC <- (RF_nat[, 1] + mean(temp.sliding.step.MC))[1] }else{ t_n.MC <- (RF_nat[, 1] + temp.sliding.step.MC)[1] } return(t_n.MC) }, FUN.VALUE = vector(mode = "numeric", length = 1) ) } else{ t_n.MC <- NA_integer_ } ##(4) get residuals (needed to be plotted later) ## they cannot be longer than the RF_reg.limited curve if((t_n.id+length(RF_nat.limited[,2])-1) >= nrow(RF_reg.limited)){ residuals <- (RF_nat.limited[1:length(t_n.id:nrow(RF_reg.limited)),2] + I_n) - RF_reg.limited[t_n.id:nrow(RF_reg.limited), 2] }else{ residuals <- (RF_nat.limited[,2] + I_n) - RF_reg.limited[t_n.id:(t_n.id+length(RF_nat.limited[,2])-1), 2] } ##(4.1) calculate De from the first channel ... which is t_n here De <- round(t_n, digits = 2) De.MC <- round(t_n.MC, digits = 2) temp.trend.fit <- NA ##(5) calculate trend fit if(length(RF_nat.limited[,1]) > length(residuals)){ temp.trend.fit <- coef(lm(y~x, data.frame(x = RF_nat.limited[1:length(residuals),1], y = residuals))) }else{ temp.trend.fit <- coef(lm(y~x, data.frame(x = RF_nat.limited[,1], y = residuals))) } ##return values and limited if they are not needed if (numerical.only == FALSE) { return( list( De = De, De.MC = De.MC, residuals = residuals, trend.fit = temp.trend.fit, RF_nat.slid = RF_nat.slid, t_n.id = t_n.id, I_n = I_n, algorithm_error = algorithm_error, vslide_range = if(is.null(vslide_range)){NA}else{range(vslide_range)}, squared_residuals = temp.sum.residuals$sliding_vector ) ) }else{ return(list(De = De, De.MC = De.MC)) } }##end of function sliding() ##PERFORM sliding and overwrite values slide <- sliding( RF_nat = RF_nat, RF_nat.limited = RF_nat.limited, RF_reg.limited = RF_reg.limited ) ##write results in variables De <- slide$De residuals <- slide$residuals RF_nat.slid <- slide$RF_nat.slid I_n <- slide$I_n # ERROR ESTIMATION # MC runs for error calculation --------------------------------------------------------------- ##set residual matrix for MC runs, i.e. set up list of pseudo RF_nat curves as function ##(i.e., bootstrap from the natural curve distribution) if(!is.null(n.MC)){ slide.MC.list <- lapply(1:n.MC,function(x) { ##also here we have to account for the case that user do not understand ##what they are doing ... if(slide$t_n.id + nrow(RF_nat.limited)-1 > nrow(RF_reg.limited)){ cbind( RF_nat.limited[1:length(slide$t_n.id:nrow(RF_reg.limited)),1], (RF_reg.limited[slide$t_n.id:nrow(RF_reg.limited) ,2] + sample(residuals, size = length(slide$t_n.id:nrow(RF_reg.limited)), replace = TRUE) ) ) }else{ cbind( RF_nat.limited[,1], (RF_reg.limited[slide$t_n.id:(slide$t_n.id + nrow(RF_nat.limited)-1) ,2] + sample(residuals, size = nrow(RF_nat.limited), replace = TRUE) ) ) } }) ##set parallel calculation if wanted if(is.null(method.control.settings$cores)){ cores <- 1 }else{ ##case 'auto' if(method.control.settings$cores == 'auto'){ if(parallel::detectCores() <= 2){ warning("[analyse_IRSAR.RF()] For the multicore auto mode at least 4 cores are needed!", call. = FALSE) cores <- 1 }else{ cores <- parallel::detectCores() - 2 } }else if(is.numeric(method.control.settings$cores)){ if(method.control.settings$cores > parallel::detectCores()){ warning(paste0("[analyse_IRSAR.RF()] What do you want? Your machine has only ", parallel::detectCores(), " cores!"), call. = FALSE) ##assign all they have, it is not our problem cores <- parallel::detectCores() } else if (method.control.settings$cores >= 1 && method.control.settings$cores <= parallel::detectCores()) { cores <- method.control.settings$cores } else { # Negative values cores <- 1 } }else{ try(stop("[analyse_IRSAR.RF()] Invalid value for control argument 'cores'. Value set to 1", call. = FALSE)) cores <- 1 } ##return message if (cores == 1) message(paste("[analyse_IRSAR.RF()] Singlecore mode")) else message(paste("[analyse_IRSAR.RF()] Multicore mode using", cores, "cores...")) } ## SINGLE CORE ----- if (cores == 1) { if(txtProgressBar){ ##progress bar cat("\n\t Run Monte Carlo loops for error estimation\n") pb <- txtProgressBar(min = 0, max = n.MC, initial = 0, char = "=", style = 3) } De.MC <- sapply(1:n.MC, function(i) { # update progress bar if (txtProgressBar) setTxtProgressBar(pb, i) sliding( RF_nat = RF_nat, RF_reg.limited = RF_reg.limited, RF_nat.limited = slide.MC.list[[i]], numerical.only = TRUE )[[2]] }) ## close progress bar if (txtProgressBar) close(pb) ## MULTICORE ----- } else { ## Create the determined number of R copies cl <- parallel::makeCluster(cores) ##run MC runs De.MC <- parallel::parSapply(cl, X = slide.MC.list, FUN = function(x){ sliding( RF_nat = RF_nat, RF_reg.limited = RF_reg.limited, RF_nat.limited = x, numerical.only = TRUE )[[2]] }) ##destroy multicore cluster parallel::stopCluster(cl) } ##calculate absolute deviation between De and the here newly calculated De.MC ##this is, e.g. ^t_n.1* - ^t_n in Frouin et al. De.diff <- diff(x = c(De, De.MC)) De.error <- round(sd(De.MC), digits = 2) De.lower <- De - quantile(De.diff, 0.975, na.rm = TRUE) De.upper <- De - quantile(De.diff, 0.025, na.rm = TRUE) }else{ De.diff <- NA_integer_ De.error <- NA_integer_ De.lower <- NA_integer_ De.upper <- NA_integer_ De.MC <- NA_integer_ } }else{ warning("[analyse_IRSAR.RF()] Analysis skipped: Unknown method or threshold of test parameter exceeded.", call. = FALSE) } ##===============================================================================================# ## TEST PARAMETER ##===============================================================================================# ## Test parameter are evaluated after all the calculations have been done as ## it should be up to the user to decide whether a value should be taken into account or not. ##(0) ##set default values and overwrite them if there was something new ##set defaults TP <- list( curves_ratio = 1.001, intersection_ratio = NA, residuals_slope = NA, curves_bounds = ceiling(max(RF_reg.x)), dynamic_ratio = NA, lambda = NA, beta = NA, delta.phi = NA ) ##modify default values by given input if(!is.null(test_parameters)){TP <- modifyList(TP, test_parameters)} ##remove NULL elements from list TP <- TP[!sapply(TP, is.null)] ##set list with values we want to evaluate TP <- lapply(TP, function(x){ data.frame(THRESHOLD = as.numeric(x), VALUE = NA, STATUS = "OK", stringsAsFactors = TRUE) }) ##(1) check if RF_nat > RF_reg, considering the fit range ##TP$curves_ratio if ("curves_ratio" %in% names(TP)) { TP$curves_ratio$VALUE <- sum(RF_nat.limited[,2]) / sum(RF_reg[RF_nat.lim[1]:RF_nat.lim[2], 2]) if (!is.na(TP$curves_ratio$THRESHOLD)) { TP$curves_ratio$STATUS <- ifelse(TP$curves_ratio$VALUE > TP$curves_ratio$THRESHOLD, "FAILED", "OK") } } ##(1.1) check if RF_nat > RF_reg, considering the fit range ##TP$intersection_ratio if ("intersection_ratio" %in% names(TP)) { ##It is, as always, a little bit more complicated ... ##We cannot just normalise both curves and compare ratios. With increasing De the curve ##shape of the RF_nat curve cannot be the same as the RF_reg curve at t = 0. Therefore we ##have to find the segment in the RF_reg curve that fits to the RF_nat curve ## ##(1) get maximum count value for RF_nat IR_RF_nat.max <- max(RF_nat.limited[,2]) ##(2) find corresponding time value for RF_reg (here no limited) IR_RF_reg.corresponding_id <- which.min(abs(RF_reg[,2] - IR_RF_nat.max)) ##(3) calculate ratio, but just starting from the point where both curves correspond ##in terms of intensiy, otherwise the ratio cannot be correct ##the boundary check is necessary to avoid errors if((IR_RF_reg.corresponding_id + length(RF_nat.lim[1]:RF_nat.lim[2])) > length(RF_reg[,2])){ TP$intersection_ratio$VALUE <- Inf }else{ TP$intersection_ratio$VALUE <- abs(1 - sum((RF_nat.limited[, 2] / max(RF_nat.limited[, 2]))) / sum(RF_reg[IR_RF_reg.corresponding_id:(IR_RF_reg.corresponding_id + length(RF_nat.lim[1]:RF_nat.lim[2]) - 1), 2] / max(RF_reg[IR_RF_reg.corresponding_id:(IR_RF_reg.corresponding_id + length(RF_nat.lim[1]:RF_nat.lim[2]) - 1), 2]))) if (!is.na(TP$intersection_ratio$THRESHOLD)) { TP$intersection_ratio$STATUS <- ifelse(TP$intersection_ratio$VALUE > TP$intersection_ratio$THRESHOLD, "FAILED", "OK") } rm(IR_RF_nat.max, IR_RF_reg.corresponding_id) } } ##(2) check slop of the residuals using a linear fit ##TP$residuals_slope if ("residuals_slope" %in% names(TP)) { if (exists("slide")) { TP$residuals_slope$VALUE <- abs(slide$trend.fit[2]) if (!is.na(TP$residuals_slope$THRESHOLD)) { TP$residuals_slope$STATUS <- ifelse( TP$residuals_slope$VALUE > TP$residuals_slope$THRESHOLD, "FAILED", "OK") } } } ##(3) calculate dynamic range of regenrated curve ##TP$dynamic_ratio if ("dynamic_ratio"%in%names(TP)){ TP.dynamic_ratio <- subset(temp.sequence_structure, temp.sequence_structure$protocol.step == "REGENERATED") TP$dynamic_ratio$VALUE <- TP.dynamic_ratio$y.max/TP.dynamic_ratio$y.min if (!is.na(TP$dynamic_ratio$THRESHOLD)){ TP$dynamic_ratio$STATUS <- ifelse( TP$dynamic_ratio$VALUE < TP$dynamic_ratio$THRESHOLD , "FAILED", "OK") } } ##(4) decay parameter ##TP$lambda if ("lambda"%in%names(TP) & "beta"%in%names(TP) & "delta.phi"%in%names(TP)){ fit.lambda <- try(minpack.lm::nlsLM( fit.function, data = data.frame(x = RF_reg.x, y = RF_reg.y), algorithm = "LM", start = list( phi.0 = fit.parameters.start["phi.0"], delta.phi = fit.parameters.start["delta.phi"], lambda = fit.parameters.start["lambda"], beta = fit.parameters.start["beta"] ), lower = c( phi.0 = .Machine$double.xmin, delta.phi = .Machine$double.xmin, lambda = .Machine$double.xmin, beta = .Machine$double.xmin ), upper = c( phi.0 = max(RF_reg.y), delta.phi = max(RF_reg.y), lambda = 1, beta = 100 ) ), silent = TRUE ) if(!inherits(fit.lambda, "try-error")){ temp.coef <- coef(fit.lambda) TP$lambda$VALUE <- temp.coef["lambda.lambda"] TP$beta$VALUE <- temp.coef["beta.beta"] TP$delta.phi$VALUE <- temp.coef["delta.phi.delta.phi"] if (!is.na( TP$lambda$THRESHOLD)){ TP$lambda$STATUS <- ifelse(TP$lambda$VALUE <= TP$lambda$THRESHOLD, "FAILED", "OK") } if (!is.na( TP$beta$THRESHOLD)){ TP$beta$STATUS <- ifelse(TP$beta$VALUE <= TP$beta$THRESHOLD, "FAILED", "OK") } if (!is.na( TP$delta.phi$THRESHOLD)){ TP$delta.phi$STATUS <- ifelse(TP$delta.phi$VALUE <= TP$delta.phi$THRESHOLD, "FAILED", "OK") } } } ##(99) check whether after sliding the ##TP$curves_bounds if (!is.null(TP$curves_bounds)) { if(exists("slide")){ ## add one channel on the top to make sure that it works TP$curves_bounds$VALUE <- max(RF_nat.slid[RF_nat.lim,1]) + (RF_nat[2,1] - RF_nat[1,1]) if (!is.na(TP$curves_bounds$THRESHOLD)){ TP$curves_bounds$STATUS <- ifelse(TP$curves_bounds$VALUE >= floor(max(RF_reg.x)), "FAILED", "OK") } }else if(exists("fit")){ TP$curves_bounds$VALUE <- De.upper if (!is.na(TP$curves_bounds$THRESHOLD)){ TP$curves_bounds$STATUS <- ifelse(TP$curves_bounds$VALUE >= max(RF_reg.x), "FAILED", "OK") } } } ##Combine everything in a data.frame if(length(TP) != 0) { TP.data.frame <- as.data.frame( cbind( POSITION = as.integer(aliquot.position), PARAMETER = c(names(TP)), do.call(data.table::rbindlist, args = list(l = TP)), SEQUENCE_NAME = aliquot.sequence_name, UID = NA ) ) ##set De.status to indicate whether there is any problem with the De according to the test parameter if ("FAILED" %in% TP.data.frame$STATUS) { De.status <- "FAILED" }else{ De.status <- "OK" } }else{ De.status <- "OK" TP.data.frame <- NULL } ##===============================================================================================# # Plotting ------------------------------------------------------------------------------------ ##===============================================================================================# if (plot) { ##get internal colour definition col <- get("col", pos = .LuminescenceEnv) if (!plot_reduced) { ##grep par default and define reset def.par <- par(no.readonly = TRUE) on.exit(par(def.par)) ##set plot frame, if a method was choosen if (method == "SLIDE" | method == "FIT") { layout(matrix(c(1, 2), 2, 1, byrow = TRUE), c(2), c(1.3, 0.4), TRUE) par( oma = c(1, 1, 1, 1), mar = c(0, 4, 3, 0), cex = plot.settings$cex ) } }else{ if(plot.settings[["cex"]] != 1){ def.par <- par()[["cex"]] on.exit(par(def.par)) par(cex = plot.settings[["cex"]]) } } ##here control xlim and ylim behaviour ##xlim xlim <- if ("xlim" %in% names(list(...))) { list(...)$xlim } else { if (plot.settings$log == "x" | plot.settings$log == "xy") { c(min(temp.sequence_structure$x.min),max(temp.sequence_structure$x.max)) }else{ c(0,max(temp.sequence_structure$x.max)) } } ##ylim ylim <- if("ylim" %in% names(list(...))) {list(...)$ylim} else {c(min(temp.sequence_structure$y.min), max(temp.sequence_structure$y.max))} ##open plot area plot( NA,NA, xlim = xlim, ylim = ylim, xlab = ifelse((method != "SLIDE" & method != "FIT") | plot_reduced, plot.settings$xlab," "), xaxt = ifelse((method != "SLIDE" & method != "FIT") | plot_reduced, plot.settings$xaxt,"n"), yaxt = "n", ylab = plot.settings$ylab, main = plot.settings$main, log = plot.settings$log, ) if(De.status == "FAILED"){ ##build list of failed TP mtext.message <- paste0( "Threshold exceeded for: ", paste(subset(TP.data.frame, TP.data.frame$STATUS == "FAILED")$PARAMETER, collapse = ", "),". For details see manual.") ##print mtext mtext(text = mtext.message, side = 3, outer = TRUE, col = "red", cex = 0.8 * par()[["cex"]]) warning(mtext.message, call. = FALSE) } ##use scientific format for y-axis labels <- axis(2, labels = FALSE) axis(side = 2, at = labels, labels = format(labels, scientific = TRUE)) ##(1) plot points that have been not selected points(RF_reg[-(min(RF_reg.lim):max(RF_reg.lim)),1:2], pch=3, col=col[19]) ##(2) plot points that has been used for the fitting points(RF_reg.x,RF_reg.y, pch=3, col=col[10]) ##show natural points if no analysis was done if(method != "SLIDE" & method != "FIT"){ ##add points points(RF_nat, pch = 20, col = "grey") points(RF_nat.limited, pch = 20, col = "red") ##legend if (plot.settings$legend) { legend( plot.settings$legend.pos, legend = plot.settings$legend.text, pch = c(19, 3), col = c("red", col[10]), horiz = TRUE, bty = "n", cex = .9 * par()[["cex"]] ) } } ##Add fitted curve, if possible. This is a graphical control that might be considered ##as useful before further analysis will be applied if (method.control.settings$show_fit) { if(!is(fit.lambda, "try-error")){ fit.lambda_coef <- coef(fit.lambda) curve(fit.lambda_coef[[1]]- (fit.lambda_coef[[2]]* ((1-exp(-fit.lambda_coef[[3]]*x))^fit.lambda_coef[[4]])), add=TRUE, lty = 2, col="red") rm(fit.lambda_coef) }else{ warning("[analyse_IRSAR.RF()] No fit possible, no fit shown.", call. = FALSE) } } ## ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++# ## PLOT - METHOD FIT ## ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++# if(method == "FIT"){ ##dummy to cheat R CMD check x<-NULL; rm(x) ##plot fitted curve curve(fit.parameters.results["phi.0"]- (fit.parameters.results["delta.phi"]* ((1-exp(-fit.parameters.results["lambda"]*x))^fit.parameters.results["beta"])), add=TRUE, from = RF_reg[min(RF_reg.lim), 1], to = RF_reg[max(RF_reg.lim), 1], col="red") ##plotting to show the limitations if RF_reg.lim was chosen ##show fitted curve GREY (previous red curve) curve(fit.parameters.results["phi.0"]- (fit.parameters.results["delta.phi"]* ((1-exp(-fit.parameters.results["lambda"]*x))^fit.parameters.results["beta"])), add=TRUE, from = min(RF_reg[, 1]), to = RF_reg[min(RF_reg.lim), 1], col="grey") ##show fitted curve GREY (after red curve) curve(fit.parameters.results["phi.0"]- (fit.parameters.results["delta.phi"]* ((1-exp(-fit.parameters.results["lambda"]*x))^fit.parameters.results["beta"])), add=TRUE, from = RF_reg[max(RF_reg.lim), 1], to = max(RF_reg[, 1]), col="grey") ##add points points(RF_nat, pch = 20, col = col[19]) points(RF_nat.limited, pch = 20, col = col[2]) ##legend if (plot.settings$legend) { legend( plot.settings$legend.pos, legend = plot.settings$legend.text, pch = c(19, 3), col = c("red", col[10]), horiz = TRUE, bty = "n", cex = .9 * par()[["cex"]] ) } ##plot range choosen for fitting abline(v=RF_reg[min(RF_reg.lim), 1], lty=2) abline(v=RF_reg[max(RF_reg.lim), 1], lty=2) ##plot De if De was calculated if(is.na(De) == FALSE & is.nan(De) == FALSE){ lines(c(0,De.lower), c(RF_nat.error.lower,RF_nat.error.lower), lty=2, col="grey") lines(c(0,De), c(RF_nat.mean,RF_nat.mean), lty=2, col="red") lines(c(0,De.upper), c(RF_nat.error.upper,RF_nat.error.upper), lty=2, col="grey") lines(c(De.lower, De.lower), c(0,RF_nat.error.lower), lty=2, col="grey") lines(c(De,De), c(0, RF_nat.mean), lty=2, col="red") lines(c(De.upper, De.upper), c(0,RF_nat.error.upper), lty=2, col="grey") } ##Insert fit and result if(is.na(De) != TRUE & (is.nan(De) == TRUE | De > max(RF_reg.x) | De.upper > max(RF_reg.x))){ try(mtext(side=3, substitute(D[e] == De, list(De=paste0( De," (", De.lower," ", De.upper,")"))), line=0, cex=0.8 * par()[["cex"]], col="red"), silent=TRUE) De.status <- "VALUE OUT OF BOUNDS" } else{ if ("mtext" %in% names(list(...))) { mtext(side = 3, list(...)$mtext) }else{ try(mtext( side = 3, substitute(D[e] == De, list( De = paste0(De," [",De.lower," ; ", De.upper,"]") )), line = 0, cex = 0.7 * par()[["cex"]] ), silent = TRUE) } De.status <- "OK" } if (!plot_reduced) { ##==lower plot==## par(mar = c(4.2, 4, 0, 0)) ##plot residuals if (is.na(fit.parameters.results[1]) == FALSE) { plot( RF_reg.x, residuals(fit), xlim = c(0, max(temp.sequence_structure$x.max)), xlab = plot.settings$xlab, yaxt = "n", xaxt = plot.settings$xaxt, type = "p", pch = 20, col = "grey", ylab = "E", log = "" ) ##add 0 line abline(h = 0) } else{ plot( NA, NA, xlim = c(0, max(temp.sequence_structure$x.max)), ylab = "E", xlab = plot.settings$xlab, xaxt = plot.settings$xaxt, ylim = c(-1, 1) ) text(x = max(temp.sequence_structure$x.max) / 2, y = 0, "Fitting Error!") } } } ## ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++# ## PLOT - METHOD SLIDE ## ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++# else if(method == "SLIDE"){ ##(0) density plot if (method.control.settings$show_density) { ##showing the density makes only sense when we see at least 10 data points if (!any(is.na(De.MC)) && length(unique(De.MC)) >= 15) { ##calculate density De.MC density.De.MC <- density(De.MC) ##calculate transformation function x.1 <- max(density.De.MC$y) x.2 <- min(density.De.MC$y) ##with have to limit the scaling a little bit if (RF_nat.limited[1,2] > max(RF_reg.limited[,2]) - (max(RF_reg.limited[,2]) - min(RF_reg.limited[,2]))*.5) { y.1 <- max(RF_reg.limited[,2]) - (max(RF_reg.limited[,2]) - min(RF_reg.limited[,2]))*.5 }else{ y.1 <- RF_nat.limited[1,2] } y.2 <- par("usr")[3] m <- (y.1 - y.2) / (x.1 + x.2) n <- y.1 - m * x.1 density.De.MC$y <- m * density.De.MC$y + n rm(x.1,x.2,y.1,y.2,m,n) polygon(density.De.MC$x, density.De.MC$y, col = rgb(0,0.4,0.8,0.5)) }else{ warning("[analyse_IRSAR.RF()] Narrow density distribution, no density distribution plotted!", call. = FALSE) } } ##(1) plot unused points in grey ... unused points are points outside of the set limit points( matrix(RF_nat.slid[-(min(RF_nat.lim):max(RF_nat.lim)),1:2], ncol = 2), pch = 21, col = col[19] ) ##(2) add used points points(RF_nat.slid[min(RF_nat.lim):max(RF_nat.lim),], pch = 21, col = col[2], bg = col[2]) ##(3) add line to show the connection between the first point and the De lines(x = c(RF_nat.slid[1,1], RF_nat.slid[1,1]), y = c(.Machine$double.xmin,RF_nat.slid[1,2]), lty = 2, col = col[2] ) ##(4) add arrow at the lowest y-coordinate possible to show the sliding if (plot.settings$log != "y" & plot.settings$log != "xy") { shape::Arrows( x0 = 0, y0 = ylim[1], y1 = ylim[1], x1 = RF_nat.slid[1,1], arr.type = "triangle", arr.length = 0.3 * par()[["cex"]], code = 2, col = col[2], arr.adj = 1, arr.lwd = 1 ) } ##(5) add vertical shift as arrow; show nothing if nothing was shifted if (plot.settings$log != "y" & plot.settings$log != "xy" & I_n != 0) { shape::Arrows( x0 = (0 + par()$usr[1])/2, y0 = RF_nat[1,2], y1 = RF_nat[1,2] + I_n, x1 = (0 + par()$usr[1])/2, arr.type = "triangle", arr.length = 0.3 * par()[["cex"]], code = 2, col = col[2], arr.adj = 1, arr.lwd = 1 ) } ##TODO ##uncomment here to see all the RF_nat curves produced by the MC runs ##could become a polygone for future versions #lapply(1:n.MC, function(x){lines(slide.MC.list[[x]], col = rgb(0,0,0, alpha = 0.2))}) ##plot range choosen for fitting abline(v=RF_reg[min(RF_reg.lim), 1], lty=2) abline(v=RF_reg[max(RF_reg.lim), 1], lty=2) if (plot.settings$legend) { legend( plot.settings$legend.pos, legend = plot.settings$legend.text, pch = c(19, 3), col = c("red", col[10]), horiz = TRUE, bty = "n", cex = .9 * par()[["cex"]] ) } ##write information on the De in the plot if("mtext" %in% names(list(...))) { mtext(side = 3, list(...)$mtext) }else{ try(mtext(side=3, substitute(D[e] == De, list(De=paste0(De," [", De.lower, " ; ", De.upper, "]"))), line=0, cex=0.7 * par()[["cex"]]), silent=TRUE) } if (!plot_reduced) { ##==lower plot==## ##RESIDUAL PLOT par(mar = c(4, 4, 0, 0)) plot( NA, NA, ylim = range(residuals), xlim = xlim, xlab = plot.settings$xlab, type = "p", pch = 1, col = "grey", xaxt = plot.settings$xaxt, ylab = "E", yaxt = "n", log = ifelse( plot.settings$log == "y" | plot.settings$log == "xy", "", plot.settings$log ) ) ##add axis for 0 ... means if the 0 is not visible there is labelling axis(side = 4, at = 0, labels = 0) ##add residual indicator (should circle around 0) col.ramp <- colorRampPalette(c(col[19], "white", col[19])) col.polygon <- col.ramp(100) if (plot.settings$log != "x") { shape::filledrectangle( mid = c((xlim[2]) + (par("usr")[2] - xlim[2]) / 2, max(residuals) - diff(range(residuals)) / 2), wx = par("usr")[2] - xlim[2], wy = diff(range(residuals)), col = col.polygon ) } ##add 0 line abline(h = 0, lty = 3) ##0-line indicator and arrows if this is not visible ##red colouring here only if the 0 point is not visible to avoid too much colouring if (max(residuals) < 0 & min(residuals) < 0) { shape::Arrowhead( x0 = xlim[2] + (par("usr")[2] - xlim[2]) / 2, y0 = max(residuals), angle = 270, lcol = col[2], arr.length = 0.4, arr.type = "triangle", arr.col = col[2] ) } else if (max(residuals) > 0 & min(residuals) > 0) { shape::Arrowhead( x0 = xlim[2] + (par("usr")[2] - xlim[2]) / 2, y0 = min(residuals), angle = 90, lcol = col[2], arr.length = 0.4, arr.type = "triangle", arr.col = col[2] ) } else{ points(xlim[2], 0, pch = 3) } ##add residual points if (length(RF_nat.slid[c(min(RF_nat.lim):max(RF_nat.lim)), 1]) > length(residuals)) { temp.points.diff <- length(RF_nat.slid[c(min(RF_nat.lim):max(RF_nat.lim)), 1]) - length(residuals) points(RF_nat.slid[c(min(RF_nat.lim):(max(RF_nat.lim) - temp.points.diff)), 1], residuals, pch = 20, col = rgb(0, 0, 0, 0.4)) } else{ points(RF_nat.slid[c(min(RF_nat.lim):max(RF_nat.lim)), 1], residuals, pch = 20, col = rgb(0, 0, 0, 0.4)) } ##add vertical line to mark De (t_n) abline(v = De, lty = 2, col = col[2]) ##add numeric value of De ... t_n axis( side = 1, at = De, labels = De, cex.axis = 0.8 * plot.settings$cex, col = "blue", padj = -1.55, ) ##TODO- CONTROL PLOT! ... can be implemented in appropriate form in a later version if (method.control.settings$trace) { par(new = TRUE) plot( RF_reg.limited[1:length(slide$squared_residuals),1], slide$squared_residuals, ylab = "", type = "l", xlab = "", xaxt = plot.settings$xaxt, axes = FALSE, xlim = xlim, log = "y" ) } } } }#endif::plot # Return -------------------------------------------------------------------------------------- ##=============================================================================# ## RETURN ##=============================================================================# ##catch up worst case scenarios ... means something went wrong if(!exists("De")){De <- NA} if(!exists("De.error")){De.error <- NA} if(!exists("De.MC")){De.MC <- NA} if(!exists("De.lower")){De.lower <- NA} if(!exists("De.upper")){De.upper <- NA} if(!exists("De.status")){De.status <- NA} if (!exists("fit")) { if (exists("fit.lambda")) { fit <- fit.lambda }else{ fit <- list() } } if(!exists("slide")){slide <- list()} ##combine values for De into a data frame De.values <- data.frame( DE = De, DE.ERROR = De.error, DE.LOWER = De.lower, DE.UPPER = De.upper, DE.STATUS = De.status, RF_NAT.LIM = paste(RF_nat.lim, collapse = ":"), RF_REG.LIM = paste(RF_reg.lim, collapse = ":"), POSITION = as.integer(aliquot.position), DATE = aliquot.date, SEQUENCE_NAME = aliquot.sequence_name, UID = NA, row.names = NULL, stringsAsFactors = FALSE ) ##generate unique identifier UID <- create_UID() ##update data.frames accordingly De.values$UID <- UID if(!is.null(TP.data.frame)){ TP.data.frame$UID <- UID } ##produce results object newRLumResults.analyse_IRSAR.RF <- set_RLum( class = "RLum.Results", data = list( data = De.values, De.MC = De.MC, test_parameters = TP.data.frame, fit = fit, slide = slide ), info = list(call = sys.call()) ) invisible(newRLumResults.analyse_IRSAR.RF) } Luminescence/R/analyse_Al2O3C_CrossTalk.R0000644000176200001440000002670314521207352017645 0ustar liggesusers#' @title Al2O3:C Reader Cross Talk Analysis #' #' @description The function provides the analysis of cross-talk measurements on a #' FI lexsyg SMART reader using Al2O3:C chips #' #' @param object [RLum.Analysis-class] **(required)**: #' measurement input #' #' @param signal_integral [numeric] (*optional*): #' signal integral, used for the signal and the background. #' If nothing is provided the full range is used #' #' @param dose_points [numeric] (*with default*): #' vector with dose points, if dose points are repeated, only the general #' pattern needs to be provided. Default values follow the suggestions #' made by Kreutzer et al., 2018 #' #' @param recordType [character] (*with default*): input curve selection, which is passed to #' function [get_RLum]. To deactivate the automatic selection set the argument to `NULL` #' #' @param irradiation_time_correction [numeric] or [RLum.Results-class] (*optional*): #' information on the used irradiation time correction obtained by another experiments. #' #' @param method_control [list] (*optional*): #' optional parameters to control the calculation. #' See details for further explanations #' #' @param plot [logical] (*with default*): #' enable/disable plot output #' #' @param ... further arguments that can be passed to the plot output #' #' @return #' Function returns results numerically and graphically: #' #' -----------------------------------\cr #' `[ NUMERICAL OUTPUT ]`\cr #' -----------------------------------\cr #' #' **`RLum.Results`**-object #' #' **slot:** **`@data`** #' #' \tabular{lll}{ #' **Element** \tab **Type** \tab **Description**\cr #' `$data` \tab `data.frame` \tab summed apparent dose table \cr #' `$data_full` \tab `data.frame` \tab full apparent dose table \cr #' `$fit` \tab `lm` \tab the linear model obtained from fitting \cr #' `$col.seq` \tab `numeric` \tab the used colour vector \cr #' } #' #' **slot:** **`@info`** #' #' The original function call #' #' ------------------------\cr #' `[ PLOT OUTPUT ]`\cr #' ------------------------\cr #' #' - An overview of the obtained apparent dose values #' #' @section Function version: 0.1.3 #' #' @author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [analyse_Al2O3C_ITC] #' #' @references #' #' Kreutzer, S., Martin, L., Guérin, G., Tribolo, C., Selva, P., Mercier, N., 2018. Environmental Dose Rate #' Determination Using a Passive Dosimeter: Techniques and Workflow for alpha-Al2O3:C Chips. #' Geochronometria 45, 56-67. doi: 10.1515/geochr-2015-0086 #' #' @keywords datagen #' #' @examples #' #' ##load data #' data(ExampleData.Al2O3C, envir = environment()) #' #' ##run analysis #' analyse_Al2O3C_CrossTalk(data_CrossTalk) #' #' @md #' @export analyse_Al2O3C_CrossTalk <- function( object, signal_integral = NULL, dose_points = c(0,4), recordType = c("OSL (UVVIS)"), irradiation_time_correction = NULL, method_control = NULL, plot = TRUE, ... ){ # Integretiy check --------------------------------------------------------------------------- ##check input object if(!all(unlist(lapply(object, function(x){is(x, "RLum.Analysis")})))){ stop("[analyse_Al2O3C_CrossTalk()] The elements in 'object' are not all of type 'RLum.Analsyis'", call. = FALSE) } ##TODO ... do more, push harder ##Accept the entire sequence ... including TL and extract ##Add sufficient unit tests # Preparation --------------------------------------------------------------------------------- ##select curves based on the recordType selection; if not NULL if(!is.null(recordType)){ object <- get_RLum(object, recordType = recordType, drop = FALSE) } #set method control method_control_settings <- list( fit.method = "EXP" ) ##modify on request if(!is.null(method_control)){ method_control_settings <- modifyList(x = method_control_settings, val = method_control) } ##set signal integral if(is.null(signal_integral)){ signal_integral <- c(1:nrow(object[[1]][[1]][])) }else{ ##check whether the input is valid, otherwise make it valid if(min(signal_integral) < 1 | max(signal_integral) > nrow(object[[1]][[1]][])){ signal_integral <- c(1:nrow(object[[1]][[1]][])) warning( paste0( "[analyse_Al2O3C_ITC()] Input for 'signal_integral' corrected to 1:", nrow(object[[1]][[1]][]) ), call. = FALSE ) } } ##check irradiation time correction if (!is.null(irradiation_time_correction)) { if (is(irradiation_time_correction, "RLum.Results")) { if (irradiation_time_correction@originator == "analyse_Al2O3C_ITC") { irradiation_time_correction <- get_RLum(irradiation_time_correction) ##insert case for more than one observation ... if(nrow(irradiation_time_correction)>1){ irradiation_time_correction <- c(mean(irradiation_time_correction[[1]]), sd(irradiation_time_correction[[1]])) }else{ irradiation_time_correction <- c(irradiation_time_correction[[1]], irradiation_time_correction[[2]]) } } else{ stop( "[analyse_Al2O3C_CrossTalk()] The object provided for the argument 'irradiation_time_correction' was created by an unsupported function!", call. = FALSE ) } } } # Calculation --------------------------------------------------------------------------------- ##we have two dose points, and one background curve, we do know only the 2nd dose ##create signal table list signal_table_list <- lapply(1:length(object), function(i) { ##calculate all the three signals needed BACKGROUND <- sum(object[[i]][[3]][, 2]) NATURAL <- sum(object[[i]][[1]][, 2]) REGENERATED <- sum(object[[i]][[2]][, 2]) temp_df <- data.frame( POSITION = get_RLum(object[[i]][[1]], info.object = "position"), DOSE = if(!is.null(irradiation_time_correction)){ dose_points + irradiation_time_correction[1] }else{ dose_points }, DOSE_ERROR = if(!is.null(irradiation_time_correction)){ dose_points * irradiation_time_correction[2]/irradiation_time_correction[1] }else{ 0 }, STEP = c("NATURAL", "REGENERATED"), INTEGRAL = c(NATURAL, REGENERATED), BACKGROUND = c(BACKGROUND, BACKGROUND), NET_INTEGRAL = c(NATURAL - BACKGROUND, REGENERATED - BACKGROUND), row.names = NULL ) ##0 dose points should not be biased by the correction .. id_zero <- which(dose_points == 0) temp_df$DOSE[id_zero] <- 0 temp_df$DOSE_ERROR[id_zero] <- 0 return(temp_df) }) APPARENT_DOSE <- as.data.frame(data.table::rbindlist(lapply(1:length(object), function(x){ ##run in MC run if(!is.null(irradiation_time_correction)){ DOSE <- rnorm(1000, mean = signal_table_list[[x]]$DOSE[2], sd = signal_table_list[[x]]$DOSE_ERROR[2]) }else{ DOSE <- signal_table_list[[x]]$DOSE[2] } ##calculation temp <- (DOSE * signal_table_list[[x]]$NET_INTEGRAL[1])/signal_table_list[[x]]$NET_INTEGRAL[2] data.frame( POSITION = signal_table_list[[x]]$POSITION[1], AD = mean(temp), AD_ERROR = sd(temp)) }))) ##add apparent dose to the information signal_table_list <- lapply(1:length(signal_table_list), function(x){ cbind(signal_table_list[[x]], rep(APPARENT_DOSE[x,2:3], 2)) }) ##combine data_full <- as.data.frame(data.table::rbindlist(signal_table_list), stringsAsFactors = FALSE) # Plotting ------------------------------------------------------------------------------------ ## set colours col_pal <- grDevices::hcl.colors(100, palette = "RdYlGn", rev = TRUE) ##get plot settings par.default <- par(no.readonly = TRUE) on.exit(par(par.default)) ##settings plot_settings <- list( main = "Sample Carousel Crosstalk", mtext = "" ) ##modify on request plot_settings <- modifyList(x = plot_settings, list(...)) ##pre-calculations for graphical parameters n.positions <- length(unique(APPARENT_DOSE$POSITION)) arc.step <- (2 * pi) / n.positions step <- 0 ##condense data.frame, by calculating the mean for similar positions AD_matrix <- t(vapply(sort(unique(APPARENT_DOSE$POSITION)), function(x){ c(x,mean(APPARENT_DOSE[["AD"]][APPARENT_DOSE[["POSITION"]] == x]), sd(APPARENT_DOSE[["AD"]][APPARENT_DOSE[["POSITION"]] == x])) }, FUN.VALUE = vector(mode = "numeric", length = 3))) ##create colour ramp col.seq <- data.frame( POSITION = AD_matrix[order(AD_matrix[,2]),1], COLOUR = col_pal[seq(1,100, length.out = nrow(AD_matrix))], stringsAsFactors = FALSE) col.seq <- col.seq[["COLOUR"]][order(col.seq[["POSITION"]])] ##calculate model fit <- lm( formula = y ~ poly(x, 2, raw=TRUE), data = data.frame(y = APPARENT_DOSE$AD[order(APPARENT_DOSE$POSITION)], x = sort(APPARENT_DOSE$POSITION))) ##enable or disable plot ... we cannot put the condition higher, because we here ##calculate something we are going to need later if (plot) { ##set layout matrix layout(mat = matrix( c(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 3, 1, 1, 1, 1, 3), 5, 5, byrow = TRUE )) ##create empty plot par( mar = c(1, 1, 1, 1), omi = c(1, 1, 1, 1), oma = c(0.2, 0.2, 0.2, 0.2), cex = 1.1 ) shape::emptyplot(c(-1.15, 1.15), main = plot_settings$main, frame.plot = FALSE) ##add outher circle shape::plotcircle(r = 1.1, col = rgb(0.9, 0.9, 0.9, 1)) ##add inner octagon shape::filledcircle( r1 = 0.6, mid = c(0, 0), lwd = 1, lcol = "black", col = "white" ) ##add circles for (i in 1:n.positions) { shape::plotcircle( r = 0.05, mid = c(cos(step), sin(step)), cex = 6, pch = 20, col = col.seq[i] ) text(x = cos(step) * 0.85, y = sin(step) * .85, labels = i) step <- step + arc.step } ##add center plot with position plot(NA, NA, xlim = range(AD_matrix[,1]), ylim = range(APPARENT_DOSE[,2]), frame.plot = FALSE, type = "l") ##add points points(x = APPARENT_DOSE, pch = 20, col = rgb(0,0,0,0.3)) ##add linear model lines(sort(APPARENT_DOSE$POSITION), predict(fit), col = "red") ##add colour legend shape::emptyplot(c(-1.2, 1.2), frame.plot = FALSE) graphics::rect( xleft = rep(-0.6, 100), ybottom = seq(-1.2,1.1,length.out = 100), xright = rep(0, 100), ytop = seq(-1.1,1.2,length.out = 100), col = col_pal, lwd = 0, border = FALSE ) ##add scale text text( x = -0.3, y = 1.2, label = "[s]", pos = 3, cex = 1.1 ) text( x = 0.4, y = 1, label = round(max(AD_matrix[, 2]),2), pos = 3, cex = 1.1 ) text( x = 0.4, y = -1.5, label = 0, pos = 3, cex = 1.1 ) } # Output -------------------------------------------------------------------------------------- output <- set_RLum( class = "RLum.Results", data = list( data = data.frame( POSITION = AD_matrix[,1], AD = AD_matrix[,2], AD_ERROR = AD_matrix[,3] ), data_full = data_full, fit = fit, col.seq = col.seq ), info = list( call = sys.call() ) ) } Luminescence/R/tune_Data.R0000644000176200001440000000505414236146743015132 0ustar liggesusers#' Tune data for experimental purpose #' #' The error can be reduced and sample size increased for specific purpose. #' #' @param data [data.frame] (**required**): #' input values, structure: data (`values[,1]`) and data error (`values [,2]`) #' are required #' #' @param decrease.error [numeric]: #' factor by which the error is decreased, ranges between 0 and 1. #' #' @param increase.data [numeric]: #' factor by which the error is decreased, ranges between 0 and `Inf`. #' #' @return Returns a [data.frame] with tuned values. #' #' @note #' You should not use this function to improve your poor data set! #' #' @section Function version: 0.5.0 #' #' @author #' Michael Dietze, GFZ Potsdam (Germany) #' #' @keywords manip #' #' @examples #' #' ## load example data set #' data(ExampleData.DeValues, envir = environment()) #' x <- ExampleData.DeValues$CA1 #' #' ## plot original data #' plot_AbanicoPlot(data = x, #' summary = c("n", "mean")) #' #' ## decrease error by 10 % #' plot_AbanicoPlot(data = tune_Data(x, decrease.error = 0.1), #' summary = c("n", "mean")) #' #' ## increase sample size by 200 % #' #plot_AbanicoPlot(data = tune_Data(x, increase.data = 2) , #' # summary = c("n", "mean")) #' #' @md #' @export tune_Data <- function( data, decrease.error = 0, increase.data = 0 ){ if(missing(decrease.error) == FALSE) { error.rel <- data[,2] / data[,1] data[,2] <- error.rel * (1 - decrease.error) * data[,1] } if(missing(increase.data) == FALSE) { n <- round(x = increase.data * 100, digits = 0) i.new <- sample(x = 1:nrow(data), size = n, replace = TRUE) x.new <- rnorm(n = n, mean = data[i.new, 1], sd = data[i.new, 2]) e.new <- rnorm(n = n, mean = data[i.new, 2], sd = data[i.new, 2] * 0.05) x.merge <- c(data[,1], x.new) e.merge <- c(data[,2], e.new) e.merge <- e.merge[order(x.merge)] x.merge <- x.merge[order(x.merge)] data.out <- data.frame(x.merge, e.merge) names(data.out) <- names(data) data <- data.out } info <- Sys.info() user <- info[length(info)] os <- info[1] warning(paste("Dear ", user, ", these activities on your ", os, " machine have been tracked and will be submitted to ", "the R.Lum data base. Cheating does not pay off! [", Sys.time(), "]", sep = "")) return(data) } Luminescence/R/extract_IrradiationTimes.R0000644000176200001440000003603614521207352020222 0ustar liggesusers#' @title Extract Irradiation Times from an XSYG-file #' #' @description Extracts irradiation times, dose and times since last irradiation, from a #' Freiberg Instruments XSYG-file. These information can be further used to #' update an existing BINX-file. #' #' @details #' The function was written to compensate missing information in the BINX-file #' output of Freiberg Instruments lexsyg readers. As all information are #' available within the XSYG-file anyway, these information can be extracted #' and used for further analysis or/and to stored in a new BINX-file, which can #' be further used by other software, e.g., Analyst (Geoff Duller). #' #' Typical application example: g-value estimation from fading measurements #' using the Analyst or any other self written script. #' #' Beside the some simple data transformation steps the function applies the #' functions [read_XSYG2R], [read_BIN2R], [write_R2BIN] for data import and export. #' #' @param object [character], [RLum.Analysis-class] or [list] (**required**): #' path and file name of the XSYG file or an [RLum.Analysis-class] #' produced by the function [read_XSYG2R]; #' alternatively a `list` of [RLum.Analysis-class] can be provided. #' #' **Note**: If an [RLum.Analysis-class] is used, any input for #' the arguments `file.BINX` and `recordType` will be ignored! #' #' @param file.BINX [character] (*optional*): #' path and file name of an existing BINX-file. If a file name is provided the #' file will be updated with the information from the XSYG file in the same #' folder as the original BINX-file. #' #' **Note:** The XSYG and the BINX-file have to be originate from the #' same measurement! #' #' @param recordType [character] (*with default*): #' select relevant curves types from the XSYG file or [RLum.Analysis-class] #' object. As the XSYG-file format comprises much more information than usually #' needed for routine data analysis and allowed in the BINX-file format, only #' the relevant curves are selected by using the function #' [get_RLum]. The argument `recordType` works as #' described for this function. #' #' **Note:** A wrong selection will causes a function error. Please change this #' argument only if you have reasons to do so. #' #' @param compatibility.mode [logical] (*with default*): #' this option is parsed only if a BIN/BINX file is produced and it will reset all position #' values to a max. value of 48, cf.[write_R2BIN] #' #' @param txtProgressBar [logical] (*with default*): #' enables `TRUE` or disables `FALSE` the progression bars during import and export #' #' @note The function can be also used to extract irradiation times from [RLum.Analysis-class] objects #' previously imported via [read_BIN2R] (`fastForward = TRUE`) or in combination with [Risoe.BINfileData2RLum.Analysis]. #' Unfortunately the timestamp might not be very precise (or even invalid), #' but it allows to essentially treat different formats in a similar manner. #' #' @return #' An [RLum.Results-class] object is returned with the #' following structure: #' #' ``` #' .. $irr.times (data.frame) #' ``` #' #' If a BINX-file path and name is set, the output will be additionally #' transferred into a new BINX-file with the function name as suffix. For the #' output the path of the input BINX-file itself is used. Note that this will #' not work if the input object is a file path to an XSYG-file, instead of a #' link to only one file. In this case the argument input for `file.BINX` is ignored. #' #' In the self call mode (input is a `list` of [RLum.Analysis-class] objects #' a list of [RLum.Results-class] is returned. #' #' @note #' The produced output object contains still the irradiation steps to #' keep the output transparent. However, for the BINX-file export this steps #' are removed as the BINX-file format description does not allow irradiations #' as separate sequences steps. #' #' **BINX-file 'Time Since Irradiation' value differs from the table output?** #' #' The way the value 'Time Since Irradiation' is defined differs. In the BINX-file the #' 'Time Since Irradiation' is calculated as the 'Time Since Irradiation' plus the 'Irradiation #' Time'. The table output returns only the real 'Time Since Irradiation', i.e. time between the #' end of the irradiation and the next step. #' #' **Negative values for `TIMESINCELAS.STEP`?** #' #' Yes, this is possible and no bug, as in the XSYG-file multiple curves are stored for one step. #' Example: TL step may comprise three curves: #' #' - (a) counts vs. time, #' - (b) measured temperature vs. time and #' - (c) predefined temperature vs. time. #' #' Three curves, but they are all belonging to one TL measurement step, but with regard to #' the time stamps this could produce negative values as the important function #' ([read_XSYG2R]) do not change the order of entries for one step #' towards a correct time order. #' #' @section Function version: 0.3.3 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [RLum.Analysis-class], [RLum.Results-class], [Risoe.BINfileData-class], #' [read_XSYG2R], [read_BIN2R], [write_R2BIN] #' #' @references #' Duller, G.A.T., 2015. The Analyst software package for luminescence data: overview and #' recent improvements. Ancient TL 33, 35-42. #' #' @keywords IO manip #' #' @examples #' ## (1) - example for your own data #' ## #' ## set files and run function #' # #' # file.XSYG <- file.choose() #' # file.BINX <- file.choose() #' # #' # output <- extract_IrradiationTimes(file.XSYG = file.XSYG, file.BINX = file.BINX) #' # get_RLum(output) #' # #' ## export results additionally to a CSV.file in the same directory as the XSYG-file #' # write.table(x = get_RLum(output), #' # file = paste0(file.BINX,"_extract_IrradiationTimes.csv"), #' # sep = ";", #' # row.names = FALSE) #' #' @md #' @export extract_IrradiationTimes <- function( object, file.BINX, recordType = c("irradiation (NA)", "IRSL (UVVIS)", "OSL (UVVIS)", "TL (UVVIS)"), compatibility.mode = TRUE, txtProgressBar = TRUE ){ # SELF CALL ----------------------------------------------------------------------------------- if(is.list(object)){ ##show message for non-supported arguments if(!missing(file.BINX)){ warning("[extract_IrradiationTimes()] argument 'file.BINX' is not supported in the self call mode.", call. = FALSE) } ##extend arguments ##extent recordType if(is(recordType, "list")){ recordType <- rep(recordType, length = length(object)) }else{ recordType <- rep(list(recordType), length = length(object)) } ##run function results <- lapply(1:length(object), function(x) { extract_IrradiationTimes( object = object[[x]], recordType = recordType[[x]], txtProgressBar = txtProgressBar ) }) ##DO NOT use invisible here, this will stop the function from stopping if(length(results) == 0){ return(NULL) }else{ return(results) } } # Integrity tests ----------------------------------------------------------------------------- ##check whether an character or an RLum.Analysis object is provided if(is(object)[1] != "character" & is(object)[1] != "RLum.Analysis"){ stop("[extract_IrradiationTimes()] Input object is neither of type 'character' nor of type 'RLum.Analysis'.", call. = FALSE) } else if(is(object)[1] == "character"){ ##set object to file.XSYG file.XSYG <- object ##XSYG ##check if file exists if(file.exists(file.XSYG) == FALSE){ stop("[extract_IrradiationTimes()] Wrong XSYG file name or file does not exsits!", call. = FALSE) } ##check if file is XML file if(tail(unlist(strsplit(file.XSYG, split = "\\.")), 1) != "xsyg" & tail(unlist(strsplit(file.XSYG, split = "\\.")), 1) != "XSYG" ){ stop("[extract_IrradiationTimes()] File is not of type 'XSYG'!", call. = FALSE) } ##BINX if(!missing(file.BINX)){ ##check if file exists if(file.exists(file.BINX) == FALSE){ stop("[extract_IrradiationTimes()] Wrong BINX file name or file does not exist!", call. = FALSE) } ##check if file is XML file if(tail(unlist(strsplit(file.BINX, split = "\\.")), 1) != "binx" & tail(unlist(strsplit(file.BINX, split = "\\.")), 1) != "BINX" ){ stop("[extract_IrradiationTimes()] File is not of type 'BINX'!", call. = FALSE) } } # Settings and import XSYG -------------------------------------------------------------------- temp.XSYG <- read_XSYG2R(file.XSYG, txtProgressBar = txtProgressBar) if(!missing(file.BINX)){ temp.BINX <- read_BIN2R(file.BINX, txtProgressBar = txtProgressBar) temp.BINX.dirname <- (dirname(file.XSYG)) } # Some data preparation ----------------------------------------------------------------------- ##set list temp.sequence.list <- list() ##select all analysis objects and combine them for(i in 1:length(temp.XSYG)){ ##select sequence and reduce the data set to really wanted values temp.sequence.list[[i]] <- get_RLum( object = temp.XSYG[[i]]$Sequence.Object, recordType = recordType, drop = FALSE) ##get corresponding position number, this will be needed later on temp.sequence.position <- as.numeric(as.character(temp.XSYG[[i]]$Sequence.Header["position",])) } }else{ ##now we assume a single RLum.Analysis object ##select sequence and reduce the data set to really wanted values, note that no ##record selection was made! temp.sequence.list <- list(object) } ##merge objects if(length(temp.sequence.list)>1){ temp.sequence <- merge_RLum(temp.sequence.list) }else{ temp.sequence <- temp.sequence.list[[1]] } # Grep relevant information ------------------------------------------------------------------- ##Sequence STEP STEP <- names_RLum(temp.sequence) #START time of each step ## we try also to support BIN/BINX files with this function if imported ## accordingly if(any(temp.sequence@originator %in% c("Risoe.BINfileData2RLum.Analysis", "read_BIN2R"))) { temp.START <- vapply(temp.sequence, function(x) { paste0(get_RLum(x, info.object = c("DATE")), get_RLum(x, info.object = c("TIME"))) }, character(1)) ##a little bit reformatting. START <- strptime(temp.START, format = "%y%m%d%H%M%S", tz = "GMT") ## make another try in case it does not make sense if(any(is.na(START))) START <- strptime(temp.START, format = "%y%m%d%H:%M:%S", tz = "GMT") } else { temp.START <- vapply(temp.sequence, function(x) { get_RLum(x, info.object = c("startDate")) }, character(1)) ##a little bit reformatting. START <- strptime(temp.START, format = "%Y%m%d%H%M%S", tz = "GMT") } ##DURATION of each STEP DURATION.STEP <- vapply(temp.sequence, function(x){ max(get_RLum(x)[,1]) }, numeric(1)) ##Calculate END time of each STEP END <- START + DURATION.STEP ##add position number so far an XSYG file was the input if(exists("file.XSYG")){ POSITION <- rep(temp.sequence.position, each = length_RLum(temp.sequence)) }else if(!inherits(try( suppressWarnings(get_RLum( get_RLum(temp.sequence, record.id = 1), info.object = "position")), silent = TRUE), "try-error")){ ##POSITION of each STEP POSITION <- vapply(temp.sequence, function(x){ tmp <- suppressWarnings(get_RLum(x, info.object = c("position"))) if(is.null(tmp)) tmp <- get_RLum(x, info.object = c("POSITION")) tmp }, numeric(1)) }else{ POSITION <- NA } ##Combine the results temp.results <- data.frame(POSITION,STEP,START,DURATION.STEP,END) # Calculate irradiation duration ------------------------------------------------------------ if(any(temp.sequence@originator %in% c("Risoe.BINfileData2RLum.Analysis", "read_BIN2R"))) { IRR_TIME <- vapply(temp.sequence, function(x) get_RLum(x, info.object = c("IRR_TIME")), numeric(1)) } else { IRR_TIME <- numeric(length = nrow(temp.results)) temp_last <- 0 for(i in 1:nrow(temp.results)){ if(grepl("irradiation", temp.results[["STEP"]][i])) { temp_last <- temp.results[["DURATION.STEP"]][i] next() } IRR_TIME[i] <- temp_last } } # Calculate time since irradiation ------------------------------------------------------------ ##set objects time.irr.end <- NA TIMESINCEIRR <- unlist(sapply(1:nrow(temp.results), function(x){ if(grepl("irradiation", temp.results[x,"STEP"])){ time.irr.end<<-temp.results[x,"END"] return(-1) }else{ if(is.na(time.irr.end)){ return(-1) }else{ return(difftime(temp.results[x,"START"],time.irr.end, units = "secs")) } } })) # Calculate time since last step -------------------------------------------------------------- TIMESINCELAST.STEP <- unlist(sapply(1:nrow(temp.results), function(x){ if(x == 1){ return(0) }else{ return(difftime(temp.results[x,"START"],temp.results[x-1, "END"], units = "secs")) } })) # Combine final results ----------------------------------------------------------------------- ##results table, export as CSV results <- cbind(temp.results,IRR_TIME, TIMESINCEIRR,TIMESINCELAST.STEP) # Write BINX-file if wanted ------------------------------------------------------------------- if(!missing(file.BINX)){ ##(1) remove all irradiation steps as there is no record in the BINX file and update information results.BINX <- results[-which(results[,"STEP"] == "irradiation (NA)"),] ##(1a) update information on the irradiation time temp.BINX@METADATA[["IRR_TIME"]] <- results.BINX[["IRR_TIME"]] ##(1b) update information on the time since irradiation by using the Risoe definition of thi ##parameter, to make the file compatible to the Analyst temp.BINX@METADATA[["TIMESINCEIRR"]] <- results.BINX[["IRR_TIME"]] + results.BINX[["TIMESINCEIRR"]] ##(2) compare entries in the BINX-file with the entries in the table to make sure ## that both have the same length if(!missing(file.BINX)){ if(nrow(results.BINX) == nrow(temp.BINX@METADATA)){ ##update BINX-file try <- write_R2BIN(temp.BINX, version = "06", file = paste0(file.BINX,"_extract_IrradiationTimes.BINX"), compatibility.mode = compatibility.mode, txtProgressBar = txtProgressBar) ##set message on the format definition if(!inherits(x = try, 'try-error')){ message("[extract_IrradiationTimes()] 'Time Since Irradiation' was redefined in the exported BINX-file to: 'Time Since Irradiation' plus the 'Irradiation Time' to be compatible with the Analyst.") } } }else{ try( stop("[extract_IrradiationTimes()] XSYG-file and BINX-file did not contain similar entries. BINX-file update skipped!",call. = FALSE)) } } # Output -------------------------------------------------------------------------------------- return(set_RLum(class = "RLum.Results", data = list(irr.times = results))) } Luminescence/R/plot_NRt.R0000644000176200001440000001703314236146743014767 0ustar liggesusers#' Visualise natural/regenerated signal ratios #' #' This function creates a Natural/Regenerated signal vs. time (NR(t)) plot #' as shown in Steffen et al. 2009 #' #' This function accepts the individual curve data in many different formats. If #' `data` is a `list`, each element of the list must contain a two #' column `data.frame` or `matrix` containing the `XY` data of the curves #' (time and counts). Alternatively, the elements can be objects of class #' [RLum.Data.Curve-class]. #' #' Input values can also be provided as a `data.frame` or `matrix` where #' the first column contains the time values and each following column contains #' the counts of each curve. #' #' @param data [list], [data.frame], [matrix] or [RLum.Analysis-class] (**required**): #' X,Y data of measured values (time and counts). See details on individual data structure. #' #' @param log [character] (*optional*): #' logarithmic axes (`c("x", "y", "xy")`). #' #' @param smooth [character] (*optional*): #' apply data smoothing. Use `"rmean"` to calculate the rolling where `k` #' determines the width of the rolling window (see [rollmean]). `"spline"` #' applies a smoothing spline to each curve (see [smooth.spline]) #' #' @param k [integer] (*with default*): #' integer width of the rolling window. #' #' @param legend [logical] (*with default*): #' show or hide the plot legend. #' #' @param legend.pos [character] (*with default*): #' keyword specifying the position of the legend (see [legend]). #' #' @param ... further parameters passed to [plot] (also see [par]). #' #' #' @author Christoph Burow, University of Cologne (Germany) #' #' @seealso [plot] #' #' @return Returns a plot and [RLum.Analysis-class] object. #' #' @references #' Steffen, D., Preusser, F., Schlunegger, F., 2009. OSL quartz underestimation due to #' unstable signal components. Quaternary Geochronology, 4, 353-362. #' #' @examples #' #' ## load example data #' data("ExampleData.BINfileData", envir = environment()) #' #' ## EXAMPLE 1 #' #' ## convert Risoe.BINfileData object to RLum.Analysis object #' data <- Risoe.BINfileData2RLum.Analysis(object = CWOSL.SAR.Data, pos = 8, ltype = "OSL") #' #' ## extract all OSL curves #' allCurves <- get_RLum(data) #' #' ## keep only the natural and regenerated signal curves #' pos <- seq(1, 9, 2) #' curves <- allCurves[pos] #' #' ## plot a standard NR(t) plot #' plot_NRt(curves) #' #' ## re-plot with rolling mean data smoothing #' plot_NRt(curves, smooth = "rmean", k = 10) #' #' ## re-plot with a logarithmic x-axis #' plot_NRt(curves, log = "x", smooth = "rmean", k = 5) #' #' ## re-plot with custom axes ranges #' plot_NRt(curves, smooth = "rmean", k = 5, #' xlim = c(0.1, 5), ylim = c(0.4, 1.6), #' legend.pos = "bottomleft") #' #' ## re-plot with smoothing spline on log scale #' plot_NRt(curves, smooth = "spline", log = "x", #' legend.pos = "top") #' #' ## EXAMPLE 2 #' #' # you may also use this function to check whether all #' # TD curves follow the same shape (making it a TnTx(t) plot). #' posTD <- seq(2, 14, 2) #' curves <- allCurves[posTD] #' #' plot_NRt(curves, main = "TnTx(t) Plot", #' smooth = "rmean", k = 20, #' ylab = "TD natural / TD regenerated", #' xlim = c(0, 20), legend = FALSE) #' #' ## EXAMPLE 3 #' #' # extract data from all positions #' data <- lapply(1:24, FUN = function(pos) { #' Risoe.BINfileData2RLum.Analysis(CWOSL.SAR.Data, pos = pos, ltype = "OSL") #' }) #' #' # get individual curve data from each aliquot #' aliquot <- lapply(data, get_RLum) #' #' # set graphical parameters #' par(mfrow = c(2, 2)) #' #' # create NR(t) plots for all aliquots #' for (i in 1:length(aliquot)) { #' plot_NRt(aliquot[[i]][pos], #' main = paste0("Aliquot #", i), #' smooth = "rmean", k = 20, #' xlim = c(0, 10), #' cex = 0.6, legend.pos = "bottomleft") #' } #' #' # reset graphical parameters #' par(mfrow = c(1, 1)) #' #' #' @md #' @export plot_NRt <- function(data, log = FALSE, smooth = c("none", "spline", "rmean"), k = 3, legend = TRUE, legend.pos = "topright", ...) { ## DATA INPUT EVALUATION ----- if (inherits(data, "list")) { if (length(data) < 2) stop(paste("The provided list only contains curve data of the natural signal"), call. = FALSE) if (all(sapply(data, class) == "RLum.Data.Curve")) curves <- lapply(data, get_RLum) } else if (inherits(data, "data.frame") || inherits(data, "matrix")) { if (ncol(data) < 3) stop(paste("The provided", class(data), "only contains curve data of the natural signal"), call. = FALSE) if (is.matrix(data)) data <- as.data.frame(data) curves <- apply(data[2:ncol(data)], MARGIN = 2, function(curve) { data.frame(data[ ,1], curve) }) } else if (inherits(data, "RLum.Analysis")) { RLum.objects <- get_RLum(data) if (!any(sapply(RLum.objects, class) == "RLum.Data.Curve")) stop(paste("The provided RLum.Analysis object must exclusively contain RLum.Data.Curve objects."), call. = FALSE) curves <- lapply(RLum.objects, get_RLum) if (length(curves) < 2) stop(paste("The provided RLum.Analysis object only contains curve data of the natural signal"), call. = FALSE) } ## BASIC SETTINGS ------ natural <- curves[[1]] regCurves <- curves[2:length(curves)] time <- curves[[1]][ ,1] ## DATA TRANSFORMATION ----- # calculate ratios NR <- lapply(regCurves, FUN = function(reg, nat) { nat[ ,2] / reg[ ,2] }, natural) # smooth spline if (smooth[1] == "spline") { NR <- lapply(NR, function(nr) { smooth.spline(nr)$y }) } if (smooth[1] == "rmean") { NR <- lapply(NR, function(nr) { zoo::rollmean(nr, k) }) time <- zoo::rollmean(time, k) } # normalise data NRnorm <- lapply(NR, FUN = function(nr) { nr / nr[1] }) ## EXTRA ARGUMENTS ----- # default values settings <- list( xlim = if (log == "x" || log == "xy") c(0.1, max(time)) else c(0, max(time)), ylim = range(pretty(c(min(sapply(NRnorm, min)), max(sapply(NRnorm, max))))), xlab = "Time [s]", ylab = "Natural signal / Regenerated signal", cex = 1L, main = "NR(t) Plot") # override defaults with user settings settings <- modifyList(settings, list(...)) ## PLOTTING ---------- # set graphical parameter par(cex = settings$cex) # empty plot if (is.na(pmatch(log, c("x", "y", "xy")))) log <- "" do.call(plot, modifyList(list(x = NA, y = NA, log = log, xaxs = "i", yaxs = "i"), settings)) # horizontal line abline(h = 1, lty = 3, col = "grey") col <- 1:length(NRnorm) # add N/R lines mapply(FUN = function(curve, col) { points(time, curve, type = "l", col = col) }, NRnorm, col) # add legend if (legend) { labels <- paste0("N/R", 1:length(NRnorm)) ncol <- ifelse(length(NRnorm) > 4, ceiling(length(NRnorm) / 4) , 1) legend(legend.pos, legend = labels, col = col, lty = 1, ncol = ncol, cex = 0.8, bty = "n") } ## RETURN VALUES ---- obj <- set_RLum("RLum.Analysis", protocol = "UNKNOWN", records = mapply(FUN = function(curve, id) { set_RLum("RLum.Data.Curve", recordType = paste0("N/R", id), curveType = "NRt", data = matrix(c(time, curve), ncol = 2), info = list( data = curves, call = sys.call(-6L), args = as.list(sys.call(-6L)[-1]) )) }, NRnorm, seq_len(length(NRnorm))) ) invisible(obj) } Luminescence/R/plot_RLum.Data.Curve.R0000644000176200001440000001531714264017373017076 0ustar liggesusers#' @title Plot function for an RLum.Data.Curve S4 class object #' #' @description The function provides a standardised plot output for curve data of an #' `RLum.Data.Curve` S4-class object. #' #' @details Only single curve data can be plotted with this function.Arguments #' according to [plot]. #' #' **Curve normalisation** #' #' The argument `norm` normalises all count values, to date the following #' options are supported: #' #' `norm = TRUE` or `norm = "max"`: Curve values are normalised to the highest #' count value in the curve #' #' `norm = "last"`: Curves values are normalised to the last count value #' (this can be useful in particular for radiofluorescence curves) #' #' `norm = "huot"`: Curve values are normalised as suggested by Sébastien Huot #' via GitHub: #' \deqn{ #' y = (observed - median(background)) / (max(observed) - median(background)) #' } #' #' The background of the curve is defined as the last 20 % of the count values #' of a curve. #' #' @param object [RLum.Data.Curve-class] (**required**): #' S4 object of class `RLum.Data.Curve` #' #' @param par.local [logical] (*with default*): #' use local graphical parameters for plotting, e.g. the plot is shown in one #' column and one row. If `par.local = FALSE`, global parameters are inherited. #' #' @param norm [logical] [character] (*with default*): allows curve normalisation to the #' highest count value ('default'). Alternatively, the function offers the #' modes `"max"`, `"min"` and `"huot"` for a background corrected normalisation, see details. #' #' @param smooth [logical] (*with default*): #' provides an automatic curve smoothing based on [zoo::rollmean] #' #' @param ... further arguments and graphical parameters that will be passed #' to the `plot` function #' #' @return Returns a plot. #' #' @note Not all arguments of [plot] will be passed! #' #' @section Function version: 0.2.6 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [plot], [plot_RLum] #' #' @keywords aplot #' #' @examples #' #' ##plot curve data #' #' #load Example data #' data(ExampleData.CW_OSL_Curve, envir = environment()) #' #' #transform data.frame to RLum.Data.Curve object #' temp <- as(ExampleData.CW_OSL_Curve, "RLum.Data.Curve") #' #' #plot RLum.Data.Curve object #' plot_RLum.Data.Curve(temp) #' #' #' @md #' @export plot_RLum.Data.Curve<- function( object, par.local = TRUE, norm = FALSE, smooth = FALSE, ... ){ # Integrity check ------------------------------------------------------------- ##check if object is of class RLum.Data.Curve if(!inherits(object, "RLum.Data.Curve")) stop("[plot_RLum.Data.Curve()] Input object is not of type RLum.Data.Curve", call. = FALSE) ## check for NA values if(all(is.na(object@data))){ warning("[plot_RLum.Data.Curve()] Curve contains only NA-values, nothing plotted.", call. = FALSE) return(NULL) } # Preset plot ------------------------------------------------------------- ## preset lab.unit <- "Unknown" lab.xlab <- "Independent" xlab.xsyg <- ylab.xsyg <- NA ##set labelling unit if(!is.na(object@recordType)){ if(object@recordType[1] %in% c("OSL", "IRSL", "RL", "RF", "LM-OSL", "RBR")){ lab.unit <- "s" lab.xlab <- "Stimulation time" } else if(object@recordType[1] == "TL") { lab.unit <- "\u00B0C" lab.xlab <- "Temperature" } } ##XSYG ##check for curveDescripter if ("curveDescripter" %in% names(object@info)) { temp.lab <- strsplit(object@info$curveDescripter, split = ";", fixed = TRUE)[[1]] xlab.xsyg <- temp.lab[1] ylab.xsyg <- temp.lab[2] } ##normalise curves if argument has been set if(norm[1] %in% c('max', 'last', 'huot') || norm[1] == TRUE){ if (norm[1] == "max" || norm[1] == TRUE) { object@data[,2] <- object@data[,2] / max(object@data[,2]) } else if (norm[1] == "last") { object@data[,2] <- object@data[,2] / object@data[nrow(object@data),2] } else if (norm[1] == "huot") { bg <- median(object@data[floor(nrow(object@data)*0.8):nrow(object@data),2]) object@data[,2] <- (object@data[,2] - bg) / max(object@data[,2] - bg) } ##check for Inf and NA if(any(is.infinite(object@data[,2])) || anyNA(object@data[,2])){ object@data[,2][is.infinite(object@data[,2]) | is.na(object@data[,2])] <- 0 warning("[plot_RLum.Data.Curve()] Normalisation led to Inf or NaN values. Values replaced by 0.", call. = FALSE) } } ylab <- if (!is.na(ylab.xsyg)) { ylab.xsyg } else if (lab.xlab == "Independent") { "Dependent [unknown]" } else { paste( object@recordType, " [cts/", round(max(object@data[,1]) / length(object@data[,1]),digits = 2) , " ", lab.unit,"]", sep = "" ) } sub <- if ((grepl("TL", object@recordType) == TRUE) & "RATE" %in% names(object@info)) { paste("(",object@info$RATE," K/s)", sep = "") } else if ((grepl("OSL", object@recordType) | grepl("IRSL", object@recordType)) & "interval" %in% names(object@info)) { paste("(resolution: ",object@info$interval," s)", sep = "") } ##deal with additional arguments plot_settings <- modifyList(x = list( main = object@recordType[1], xlab = if (!is.na(xlab.xsyg)) xlab.xsyg else paste0(lab.xlab, " [", lab.unit, "]"), ylab = ylab, sub = sub, cex = 1, type = "l", las = NULL, lwd = 1, lty = 1, pch = 1, col = 1, axes = TRUE, xlim = range(object@data[,1], na.rm = TRUE), ylim = range(object@data[,2], na.rm = TRUE), log = "", mtext = "" ), val = list(...), keep.null = TRUE) ##par setting for possible combination with plot method for RLum.Analysis objects if (par.local) par(mfrow = c(1,1), cex = plot_settings$cex) ##smooth if(smooth){ k <- ceiling(length(object@data[, 2])/100) object@data[, 2] <- zoo::rollmean(object@data[, 2], k = k, fill = NA) } ##plot curve plot( object@data[,1], object@data[,2], main = plot_settings$main, xlim = plot_settings$xlim, ylim = plot_settings$ylim, xlab = plot_settings$xlab, ylab = plot_settings$ylab, sub = plot_settings$sub, type = plot_settings$type, log = plot_settings$log, col = plot_settings$col, lwd = plot_settings$lwd, pch = plot_settings$pch, lty = plot_settings$lty, axes = plot_settings$axes, las = plot_settings$las) ##plot additional mtext mtext(plot_settings$mtext, side = 3, cex = plot_settings$cex * 0.8) } Luminescence/R/calc_HomogeneityTest.R0000644000176200001440000001017614236146743017340 0ustar liggesusers#' Apply a simple homogeneity test after Galbraith (2003) #' #' A simple homogeneity test for De estimates #' #' For details see Galbraith (2003). #' #' @param data [RLum.Results-class] or [data.frame] (**required**): #' for [data.frame]: two columns with De `(data[,1])` and De error `(values[,2])` #' #' @param log [logical] (*with default*): #' perform the homogeneity test with (un-)logged data #' #' @param ... further arguments (for internal compatibility only). #' #' @return #' Returns a terminal output. In addition an #' [RLum.Results-class]-object is returned containing the #' following elements: #' #' \item{summary}{[data.frame] summary of all relevant model results.} #' \item{data}{[data.frame] original input data} #' \item{args}{[list] used arguments} #' \item{call}{[call] the function call} #' #' The output should be accessed using the function [get_RLum] #' #' @section Function version: 0.3.0 #' #' @author Christoph Burow, University of Cologne (Germany), Sebastian Kreutzer, #' IRAMAT-CRP2A, Université Bordeaux Montaigne (France) #' #' @seealso [pchisq] #' #' @references #' Galbraith, R.F., 2003. A simple homogeneity test for estimates #' of dose obtained using OSL. Ancient TL 21, 75-77. #' #' #' @examples #' #' ## load example data #' data(ExampleData.DeValues, envir = environment()) #' #' ## apply the homogeneity test #' calc_HomogeneityTest(ExampleData.DeValues$BT998) #' #' ## using the data presented by Galbraith (2003) #' df <- #' data.frame( #' x = c(30.1, 53.8, 54.3, 29.0, 47.6, 44.2, 43.1), #' y = c(4.8, 7.1, 6.8, 4.3, 5.2, 5.9, 3.0)) #' #' calc_HomogeneityTest(df) #' #' #' @md #' @export calc_HomogeneityTest <- function( data, log = TRUE, ... ){ ##============================================================================## ## CONSISTENCY CHECK OF INPUT DATA ##============================================================================## if(missing(data)==FALSE){ if(!is(data, "data.frame") & !is(data, "RLum.Results")){ stop( "[calc_HomogeneityTest()] 'data' object has to be of type 'data.frame' or 'RLum.Results'!", call. = FALSE ) } else { if(is(data, "RLum.Results")){ data <- get_RLum(data, "data") } } } ##==========================================================================## ## ... ARGUMENTS ##==========================================================================## extraArgs <- list(...) ## set plot main title if("verbose" %in% names(extraArgs)) { verbose<- extraArgs$verbose } else { verbose<- TRUE } ##============================================================================## ## CALCULATIONS ##============================================================================## if(log) { dat <- log(data) dat[[2]] <- data[[2]]/data[[1]] } else { dat <- data } wi <- 1 / dat[[2]] ^ 2 wizi <- wi * dat[[1]] mu <- sum(wizi) / sum(wi) gi <- wi * (dat[[1]] - mu) ^ 2 G <- sum(gi) df <- length(wi) - 1 n <- length(wi) P <- pchisq(G, df, lower.tail = FALSE) ##============================================================================## ## OUTPUT ##============================================================================## if(verbose) { cat("\n [calc_HomogeneityTest()]") cat(paste("\n\n ---------------------------------")) cat(paste("\n n: ", n)) cat(paste("\n ---------------------------------")) cat(paste("\n mu: ", round(mu,4))) cat(paste("\n G-value: ", round(G,4))) cat(paste("\n Degrees of freedom:", df)) cat(paste("\n P-value: ", round(P,4))) cat(paste("\n ---------------------------------\n\n")) } ##============================================================================## ## RETURN VALUES ##============================================================================## summary <- data.frame( n = n, g.value = G, df = df, P.value = P ) args <- list(log = log) return(set_RLum( class = "RLum.Results", data = list( summary = summary, data = data, args = args ), info = list(call = sys.call()) )) } Luminescence/R/plot_Histogram.R0000644000176200001440000007063714264017373016227 0ustar liggesusers#' Plot a histogram with separate error plot #' #' Function plots a predefined histogram with an accompanying error plot as #' suggested by Rex Galbraith at the UK LED in Oxford 2010. #' #' If the normal curve is added, the y-axis in the histogram will show the #' probability density. #' #' #' A statistic summary, i.e. a collection of statistic measures of #' centrality and dispersion (and further measures) can be added by specifying #' one or more of the following keywords: #' - `"n"` (number of samples), #' - `"mean"` (mean De value), #' - `"mean.weighted"` (error-weighted mean), #' - `"median"` (median of the De values), #' - `"sdrel"` (relative standard deviation in percent), #' - `"sdrel.weighted"` (error-weighted relative standard deviation in percent), #' - `"sdabs"` (absolute standard deviation), #' - `"sdabs.weighted"` (error-weighted absolute standard deviation), #' - `"serel"` (relative standard error), #' - `"serel.weighted"` (error-weighted relative standard error), #' - `"seabs"` (absolute standard error), #' - `"seabs.weighted"` (error-weighted absolute standard error), #' - `"kurtosis"` (kurtosis) and #' - `"skewness"` (skewness). #' #' @param data [data.frame] or [RLum.Results-class] object (**required**): #' for `data.frame`: two columns: De (`data[,1]`) and De error (`data[,2]`) #' #' @param na.rm [logical] (*with default*): #' excludes `NA` values from the data set prior to any further operations. #' #' @param mtext [character] (*optional*): #' further sample information ([mtext]). #' #' @param cex.global [numeric] (*with default*): #' global scaling factor. #' #' @param se [logical] (*optional*): #' plots standard error points over the histogram, default is `FALSE`. #' #' @param rug [logical] (*optional*): #' adds rugs to the histogram, default is `TRUE`. #' #' @param normal_curve [logical] (*with default*): #' adds a normal curve to the histogram. Mean and standard deviation are calculated from the #' input data. More see details section. #' #' @param summary [character] (*optional*): #' add statistic measures of centrality and dispersion to the plot. #' Can be one or more of several keywords. See details for available keywords. #' #' @param summary.pos [numeric] or [character] (*with default*): #' optional position coordinates or keyword (e.g. `"topright"`) #' for the statistical summary. Alternatively, the keyword `"sub"` may be #' specified to place the summary below the plot header. However, this latter #' option in only possible if `mtext` is not used. In case of coordinate #' specification, y-coordinate refers to the right y-axis. #' #' @param colour [numeric] or [character] (*with default*): #' optional vector of length 4 which specifies the colours of the following #' plot items in exactly this order: histogram bars, rug lines, normal #' distribution curve and standard error points #' (e.g., `c("grey", "black", "red", "grey")`). #' #' @param interactive [logical] (*with default*): #' create an interactive histogram plot (requires the 'plotly' package) #' #' @param ... further arguments and graphical parameters passed to [plot] or #' [hist]. If y-axis labels are provided, these must be specified as a vector #' of length 2 since the plot features two axes #' (e.g. `ylab = c("axis label 1", "axis label 2")`). Y-axes limits #' (`ylim`) must be provided as vector of length four, with the first two #' elements specifying the left axes limits and the latter two elements giving #' the right axis limits. #' #' @note The input data is not restricted to a special type. #' #' @section Function version: 0.4.5 #' #' @author #' Michael Dietze, GFZ Potsdam (Germany)\cr #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [hist], [plot] #' #' @examples #' #' ## load data #' data(ExampleData.DeValues, envir = environment()) #' ExampleData.DeValues <- #' Second2Gray(ExampleData.DeValues$BT998, dose.rate = c(0.0438,0.0019)) #' #' ## plot histogram the easiest way #' plot_Histogram(ExampleData.DeValues) #' #' ## plot histogram with some more modifications #' plot_Histogram(ExampleData.DeValues, #' rug = TRUE, #' normal_curve = TRUE, #' cex.global = 0.9, #' pch = 2, #' colour = c("grey", "black", "blue", "green"), #' summary = c("n", "mean", "sdrel"), #' summary.pos = "topleft", #' main = "Histogram of De-values", #' mtext = "Example data set", #' ylab = c(expression(paste(D[e], " distribution")), #' "Standard error"), #' xlim = c(100, 250), #' ylim = c(0, 0.1, 5, 20)) #' #' #' @md #' @export plot_Histogram <- function( data, na.rm = TRUE, mtext, cex.global, se, rug, normal_curve, summary, summary.pos, colour, interactive = FALSE, ... ) { # Integrity tests --------------------------------------------------------- ## check/adjust input data structure if(is(data, "RLum.Results") == FALSE & is(data, "data.frame") == FALSE) { stop(paste("[plot_Histogram()] Input data format is neither", "'data.frame' nor 'RLum.Results'")) } else { if(is(data, "RLum.Results")) { data <- get_RLum(data)[,1:2] } } ## handle error-free data sets if(length(data) < 2) { data <- cbind(data, rep(NA, length(data))) } ## Set general parameters --------------------------------------------------- ## Check/set default parameters if(missing(cex.global) == TRUE) { cex.global <- 1 } if(missing(mtext) == TRUE) { mtext <- "" } if(missing(se) == TRUE) { se = TRUE } if(missing(rug) == TRUE) { rug = TRUE } if(missing(colour) == TRUE) { colour = c("white", "black", "red", "black") } if(missing(summary) == TRUE) { summary <- "" } if(missing(summary.pos) == TRUE) { summary.pos <- "sub" } if(missing(normal_curve) == TRUE) { normal_curve = FALSE } ## read out additional arguments list extraArgs <- list(...) ## define fun if("fun" %in% names(extraArgs)) { fun <- extraArgs$fun } else { fun <- FALSE } ## optionally, count and exclude NA values and print result if(na.rm == TRUE) { n.NA <- sum(is.na(data[,1])) if(n.NA == 1) { print("1 NA value excluded.") } else if(n.NA > 1) { print(paste(n.NA, "NA values excluded.")) } data <- data[!is.na(data[,1]),] } if("main" %in% names(extraArgs)) { main.plot <- extraArgs$main } else { main.plot <- "Histogram" } if("xlab" %in% names(extraArgs)) { xlab.plot <- extraArgs$xlab } else { xlab.plot <- expression(paste(D[e], " [Gy]")) } if("ylab" %in% names(extraArgs)) { ylab.plot <- extraArgs$ylab } else { ylab.plot <- c("Frequency", "Standard error") } if("breaks" %in% names(extraArgs)) { breaks.plot <- extraArgs$breaks breaks_calc <- hist(x = data[,1], breaks = breaks.plot, plot = FALSE)$breaks } else { breaks.plot <- hist(x = data[,1], plot = FALSE)$breaks breaks_calc <- breaks.plot } if("xlim" %in% names(extraArgs)) { xlim.plot <- extraArgs$xlim } else { xlim.plot <- range(breaks_calc) } if("ylim" %in% names(extraArgs)) { ylim.plot <- extraArgs$ylim } else { H.lim <- hist(data[,1], breaks = breaks.plot, plot = FALSE) if(normal_curve == TRUE) { left.ylim <- c(0, max(H.lim$density)) } else { left.ylim <- c(0, max(H.lim$counts)) } range.error <- try(expr = range(data[,2], na.rm = TRUE), silent = TRUE) range.error[1] <- ifelse(is.infinite(range.error[1]), 0, range.error[1]) range.error[2] <- ifelse(is.infinite(range.error[2]), 0, range.error[2]) ylim.plot <- c(left.ylim, range.error) } if("pch" %in% names(extraArgs)) { pch.plot <- extraArgs$pch } else { pch.plot <- 1 } ## Set plot area format par(mar = c(4.5, 4.5, 4.5, 4.5), cex = cex.global) ## Plot histogram ----------------------------------------------------------- HIST <- hist(data[,1], main = "", xlab = xlab.plot, ylab = ylab.plot[1], xlim = xlim.plot, ylim = ylim.plot[1:2], breaks = breaks.plot, freq = !normal_curve, col = colour[1] ) ## add title title(line = 2, main = main.plot) ## Optionally, add rug ------------------------------------------------------ if(rug == TRUE) {rug(data[,1], col = colour[2])} ## Optionally, add a normal curve based on the data ------------------------- if(normal_curve == TRUE){ ## cheat the R check routine, tztztz how neat x <- NULL rm(x) ## add normal distribution curve curve(dnorm(x, mean = mean(na.exclude(data[,1])), sd = sd(na.exclude(data[,1]))), col = colour[3], add = TRUE, lwd = 1.2 * cex.global) } ## calculate and paste statistical summary data.stats <- list(data = data) ## calculate and paste statistical summary De.stats <- matrix(nrow = length(data), ncol = 18) colnames(De.stats) <- c("n", "mean", "mean.weighted", "median", "median.weighted", "kde.max", "sd.abs", "sd.rel", "se.abs", "se.rel", "q25", "q75", "skewness", "kurtosis", "sd.abs.weighted", "sd.rel.weighted", "se.abs.weighted", "se.rel.weighted") for(i in 1:length(data)) { statistics <- calc_Statistics(data) De.stats[i,1] <- statistics$weighted$n De.stats[i,2] <- statistics$unweighted$mean De.stats[i,3] <- statistics$weighted$mean De.stats[i,4] <- statistics$unweighted$median De.stats[i,5] <- statistics$unweighted$median De.stats[i,7] <- statistics$unweighted$sd.abs De.stats[i,8] <- statistics$unweighted$sd.rel De.stats[i,9] <- statistics$unweighted$se.abs De.stats[i,10] <- statistics$weighted$se.rel De.stats[i,11] <- quantile(data[,1], 0.25) De.stats[i,12] <- quantile(data[,1], 0.75) De.stats[i,13] <- statistics$unweighted$skewness De.stats[i,14] <- statistics$unweighted$kurtosis De.stats[i,15] <- statistics$weighted$sd.abs De.stats[i,16] <- statistics$weighted$sd.rel De.stats[i,17] <- statistics$weighted$se.abs De.stats[i,18] <- statistics$weighted$se.rel ##kdemax - here a little doubled as it appears below again if(nrow(data) >= 2){ De.density <-density(x = data[,1], kernel = "gaussian", from = xlim.plot[1], to = xlim.plot[2]) De.stats[i,6] <- De.density$x[which.max(De.density$y)] }else{ De.denisty <- NA De.stats[i,6] <- NA } } label.text = list(NA) if(summary.pos[1] != "sub") { n.rows <- length(summary) for(i in 1:length(data)) { stops <- paste(rep("\n", (i - 1) * n.rows), collapse = "") summary.text <- character(0) for(j in 1:length(summary)) { summary.text <- c(summary.text, paste( "", ifelse("n" %in% summary[j] == TRUE, paste("n = ", De.stats[i,1], "\n", sep = ""), ""), ifelse("mean" %in% summary[j] == TRUE, paste("mean = ", round(De.stats[i,2], 2), "\n", sep = ""), ""), ifelse("mean.weighted" %in% summary[j] == TRUE, paste("weighted mean = ", round(De.stats[i,3], 2), "\n", sep = ""), ""), ifelse("median" %in% summary[j] == TRUE, paste("median = ", round(De.stats[i,4], 2), "\n", sep = ""), ""), ifelse("median.weighted" %in% summary[j] == TRUE, paste("weighted median = ", round(De.stats[i,5], 2), "\n", sep = ""), ""), ifelse("kdemax" %in% summary[j] == TRUE, paste("kdemax = ", round(De.stats[i,6], 2), " \n ", sep = ""), ""), ifelse("sdabs" %in% summary[j] == TRUE, paste("sd = ", round(De.stats[i,7], 2), "\n", sep = ""), ""), ifelse("sdrel" %in% summary[j] == TRUE, paste("rel. sd = ", round(De.stats[i,8], 2), " %", "\n", sep = ""), ""), ifelse("seabs" %in% summary[j] == TRUE, paste("se = ", round(De.stats[i,9], 2), "\n", sep = ""), ""), ifelse("serel" %in% summary[j] == TRUE, paste("rel. se = ", round(De.stats[i,10], 2), " %", "\n", sep = ""), ""), ifelse("skewness" %in% summary[j] == TRUE, paste("skewness = ", round(De.stats[i,13], 2), "\n", sep = ""), ""), ifelse("kurtosis" %in% summary[j] == TRUE, paste("kurtosis = ", round(De.stats[i,14], 2), "\n", sep = ""), ""), ifelse("sdabs.weighted" %in% summary[j] == TRUE, paste("abs. weighted sd = ", round(De.stats[i,15], 2), "\n", sep = ""), ""), ifelse("sdrel.weighted" %in% summary[j] == TRUE, paste("rel. weighted sd = ", round(De.stats[i,16], 2), "\n", sep = ""), ""), ifelse("seabs.weighted" %in% summary[j] == TRUE, paste("abs. weighted se = ", round(De.stats[i,17], 2), "\n", sep = ""), ""), ifelse("serel.weighted" %in% summary[j] == TRUE, paste("rel. weighted se = ", round(De.stats[i,18], 2), "\n", sep = ""), ""), sep = "")) } summary.text <- paste(summary.text, collapse = "") label.text[[length(label.text) + 1]] <- paste(stops, summary.text, stops, sep = "") } } else { for(i in 1:length(data)) { summary.text <- character(0) for(j in 1:length(summary)) { summary.text <- c(summary.text, ifelse("n" %in% summary[j] == TRUE, paste("n = ", De.stats[i,1], " | ", sep = ""), ""), ifelse("mean" %in% summary[j] == TRUE, paste("mean = ", round(De.stats[i,2], 2), " | ", sep = ""), ""), ifelse("mean.weighted" %in% summary[j] == TRUE, paste("weighted mean = ", round(De.stats[i,3], 2), " | ", sep = ""), ""), ifelse("median" %in% summary[j] == TRUE, paste("median = ", round(De.stats[i,4], 2), " | ", sep = ""), ""), ifelse("median.weighted" %in% summary[j] == TRUE, paste("weighted median = ", round(De.stats[i,5], 2), " | ", sep = ""), ""), ifelse("kdemax" %in% summary[j] == TRUE, paste("kdemax = ", round(De.stats[i,6], 2), " | ", sep = ""), ""), ifelse("sdrel" %in% summary[j] == TRUE, paste("rel. sd = ", round(De.stats[i,8], 2), " %", " | ", sep = ""), ""), ifelse("sdabs" %in% summary[j] == TRUE, paste("abs. sd = ", round(De.stats[i,7], 2), " | ", sep = ""), ""), ifelse("serel" %in% summary[j] == TRUE, paste("rel. se = ", round(De.stats[i,10], 2), " %", " | ", sep = ""), ""), ifelse("seabs" %in% summary[j] == TRUE, paste("abs. se = ", round(De.stats[i,9], 2), " | ", sep = ""), ""), ifelse("skewness" %in% summary[j] == TRUE, paste("skewness = ", round(De.stats[i,13], 2), " | ", sep = ""), ""), ifelse("kurtosis" %in% summary[j] == TRUE, paste("kurtosis = ", round(De.stats[i,14], 2), " | ", sep = ""), ""), ifelse("sdabs.weighted" %in% summary[j] == TRUE, paste("abs. weighted sd = ", round(De.stats[i,15], 2), " %", " | ", sep = ""), ""), ifelse("sdrel.weighted" %in% summary[j] == TRUE, paste("rel. weighted sd = ", round(De.stats[i,16], 2), " %", " | ", sep = ""), ""), ifelse("seabs.weighted" %in% summary[j] == TRUE, paste("abs. weighted se = ", round(De.stats[i,17], 2), " %", " | ", sep = ""), ""), ifelse("serel.weighted" %in% summary[j] == TRUE, paste("rel. weighted se = ", round(De.stats[i,18], 2), " %", " | ", sep = ""), "") ) } summary.text <- paste(summary.text, collapse = "") label.text[[length(label.text) + 1]] <- paste( " ", summary.text, sep = "") } ## remove outer vertical lines from string for(i in 2:length(label.text)) { label.text[[i]] <- substr(x = label.text[[i]], start = 3, stop = nchar(label.text[[i]]) - 3) } } ## remove dummy list element label.text[[1]] <- NULL ## convert keywords into summary placement coordinates if(missing(summary.pos) == TRUE) { summary.pos <- c(xlim.plot[1], ylim.plot[2]) summary.adj <- c(0, 1) } else if(length(summary.pos) == 2) { summary.pos <- summary.pos summary.adj <- c(0, 1) } else if(summary.pos[1] == "topleft") { summary.pos <- c(xlim.plot[1], ylim.plot[2]) summary.adj <- c(0, 1) } else if(summary.pos[1] == "top") { summary.pos <- c(mean(xlim.plot), ylim.plot[2]) summary.adj <- c(0.5, 1) } else if(summary.pos[1] == "topright") { summary.pos <- c(xlim.plot[2], ylim.plot[2]) summary.adj <- c(1, 1) } else if(summary.pos[1] == "left") { summary.pos <- c(xlim.plot[1], mean(ylim.plot[1:2])) summary.adj <- c(0, 0.5) } else if(summary.pos[1] == "center") { summary.pos <- c(mean(xlim.plot), mean(ylim.plot[1:2])) summary.adj <- c(0.5, 0.5) } else if(summary.pos[1] == "right") { summary.pos <- c(xlim.plot[2], mean(ylim.plot[1:2])) summary.adj <- c(1, 0.5) }else if(summary.pos[1] == "bottomleft") { summary.pos <- c(xlim.plot[1], ylim.plot[1]) summary.adj <- c(0, 0) } else if(summary.pos[1] == "bottom") { summary.pos <- c(mean(xlim.plot), ylim.plot[1]) summary.adj <- c(0.5, 0) } else if(summary.pos[1] == "bottomright") { summary.pos <- c(xlim.plot[2], ylim.plot[1]) summary.adj <- c(1, 0) } ## add summary content for(i in 1:length(data.stats)) { if(summary.pos[1] != "sub") { text(x = summary.pos[1], y = summary.pos[2], adj = summary.adj, labels = label.text[[i]], col = colour[2], cex = cex.global * 0.8) } else { if(mtext == "") { mtext(side = 3, line = 1 - i, text = label.text[[i]], col = colour[2], cex = cex.global * 0.8) } } } ## Optionally, add standard error plot -------------------------------------- if(sum(is.na(data[,2])) == length(data[,2])) { se <- FALSE } if(se == TRUE) { par(new = TRUE) plot.data <- data[!is.na(data[,2]),] plot(x = plot.data[,1], y = plot.data[,2], xlim = xlim.plot, ylim = ylim.plot[3:4], pch = pch.plot, col = colour[4], main = "", xlab = "", ylab = "", axes = FALSE, frame.plot = FALSE ) axis(side = 4, labels = TRUE, cex = cex.global ) mtext(ylab.plot[2], side = 4, line = 3, cex = cex.global) # par(new = FALSE) } ## Optionally add user-defined mtext mtext(side = 3, line = 0.5, text = mtext, cex = 0.8 * cex.global) ## FUN by R Luminescence Team if(fun & !interactive) sTeve() ## Optionally: Interactive Plot ---------------------------------------------- if (interactive) { if (!requireNamespace("plotly", quietly = TRUE)) stop("The interactive histogram requires the 'plotly' package. To install", " this package run 'install.packages('plotly')' in your R console.", call. = FALSE) ## tidy data ---- data <- as.data.frame(data) colnames(data) <- c("x", "y") x <- y <- NULL # suffice CRAN check for no visible binding if (length(grep("paste", as.character(xlab.plot))) > 0) xlab.plot <- "Equivalent dose [Gy]" ## create plots ---- # histogram hist <- plotly::plot_ly(data = data, x = x, type = "histogram", showlegend = FALSE, name = "Bin", opacity = 0.75, marker = list(color = "428BCA", line = list(width = 1.0, color = "white")), histnorm = ifelse(normal_curve, "probability density", ""), yaxis = "y" ) # normal curve ---- if (normal_curve) { density.curve <- density(data$x) normal.curve <- data.frame(x = density.curve$x, y = density.curve$y) hist <- plotly::add_trace(hist, data = normal.curve, x = x, y = y, type = "scatter", mode = "lines", marker = list(color = "red"), name = "Normal curve", yaxis = "y") } # scatter plot of individual errors if (se) { yaxis2 <- list(overlaying = "y", side = "right", showgrid = FALSE, title = ylab.plot[2], ticks = "", showline = FALSE) se.text <- paste0("Measured value:
", data$x, " ± ", data$y,"
") hist <- plotly::add_trace(hist, data = data, x = x, y = y, type = "scatter", mode = "markers", name = "Error", hoverinfo = "text", text = se.text, marker = list(color = "black"), yaxis = "y2") hist <- plotly::layout(yaxis2 = yaxis2) } # set layout ---- hist <- plotly::layout(hist, hovermode = "closest", title = paste("", main.plot, ""), margin = list(r = 90), xaxis = list(title = xlab.plot, ticks = ""), yaxis = list(title = ylab.plot[1], ticks = "", showline = FALSE, showgrid = FALSE) ) ## show and return plot ---- print(hist) return(hist) } } Luminescence/R/RLum-class.R0000644000176200001440000000477414264017373015215 0ustar liggesusers#' @include replicate_RLum.R RcppExports.R NULL #' Class `"RLum"` #' #' Abstract class for data in the package Luminescence #' Subclasses are: #' #' **RLum-class**\cr #' |\cr #' |----[RLum.Data-class]\cr #' |----|-- [RLum.Data.Curve-class]\cr #' |----|-- [RLum.Data.Spectrum-class]\cr #' |----|-- [RLum.Data.Image-class]\cr #' |----[RLum.Analysis-class]\cr #' |----[RLum.Results-class] #' #' @name RLum-class #' #' @docType class #' #' @slot originator #' Object of class [character] containing the name of the producing #' function for the object. Set automatically by using the function [set_RLum]. #' #' @slot info #' Object of class [list] for additional information on the object itself #' #' @slot .uid #' Object of class [character] for a unique object identifier. This id is #' usually calculated using the internal function `create_UID()` if the function [set_RLum] #' is called. #' #' @slot .pid #' Object of class [character] for a parent id. This allows nesting RLum-objects #' at will. The parent id can be the uid of another object. #' #' @note `RLum` is a virtual class. #' #' @section Objects from the Class: #' A virtual Class: No objects can be created from it. #' #' @section Class version: 0.4.0 #' #' @author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [RLum.Data-class], [RLum.Data.Curve-class], [RLum.Data.Spectrum-class], [RLum.Data.Image-class], #' [RLum.Analysis-class], [RLum.Results-class], [methods_RLum] #' #' @keywords classes #' #' @examples #' #' showClass("RLum") #' #' @md #' @export setClass("RLum", slots = list( originator = "character", info = "list", .uid = "character", .pid = "character" ), contains = "VIRTUAL", prototype = prototype( originator = NA_character_, info = list(), .uid = NA_character_, .pid = NA_character_ ) ) # replication method for object class ------------------------------------------ #' @describeIn RLum #' Replication method RLum-objects #' #' @param object [RLum-class] (**required**): #' an object of class [RLum-class] #' #' @param times [integer] (*optional*): #' number for times each element is repeated element #' #' @md #' @export setMethod( "replicate_RLum", "RLum", definition = function(object, times = NULL) { ##The case this is NULL if (is.null(times)) { times <- 1 } lapply(1:times, function(x) { object }) } ) Luminescence/R/plot_ROI.R0000644000176200001440000002070114264017373014706 0ustar liggesusers#'@title Create Regions of Interest (ROI) Graphic #' #'@description Create ROI graphic with data extracted from the data imported #'via [read_RF2R]. This function is used internally by [analyse_IRSAR.RF] but #'might be of use to work with reduced data from spatially resolved measurements. #'The plot dimensions mimic the original image dimensions #' #'@param object [RLum.Analysis-class], [RLum.Results-class] or a [list] of such objects (**required**): #'data input. Please note that to avoid function errors, only input created #'by the functions [read_RF2R] or [extract_ROI] is accepted #' #'@param exclude_ROI [numeric] (*with default*): option to remove particular ROIs from the #'analysis. Those ROIs are plotted but not coloured and not taken into account #'in distance analysis. `NULL` excludes nothing. #' #'@param dist_thre [numeric] (*optional*): euclidean distance threshold in pixel #'distance. All ROI for which the euclidean distance is smaller are marked. This #'helps to identify ROIs that might be affected by signal cross-talk. Note: #'the distance is calculated from the centre of an ROI, e.g., the threshold #'should include consider the ROIs or grain radius. #' #'@param dim.CCD [numeric] (*optional*): metric x and y for the recorded (chip) #'surface in µm. For instance `c(8192,8192)`, if set additional x and y-axes are shown #' #'@param bg_image [RLum.Data.Image-class] (*optional*): background image object #'please note that the dimensions are not checked. #' #'@param plot [logical] (*with default*): enable or disable plot output to use #'the function only to extract the ROI data #' #'@param ... further parameters to manipulate the plot. On top of all arguments of #'[graphics::plot.default] the following arguments are supported: `lwd.ROI`, `lty.ROI`, #'`col.ROI`, `col.pixel`, `text.labels`, `text.offset`, `grid` (`TRUE/FALSE`), `legend` (`TRUE/FALSE`), #'`legend.text`, `legend.pos` #' #'@return An ROI plot and an [RLum.Results-class] object with a matrix containing #'the extracted ROI data and a object produced by [stats::dist] containing #'the euclidean distance between the ROIs. #' #'@section Function version: 0.2.0 #' #'@author Sebastian Kreutzer, Department of Geography & Earth Sciences, Aberystwyth University #' (United Kingdom) #' #'@seealso [read_RF2R], [analyse_IRSAR.RF] #' #'@keywords datagen plot #' #'@examples #' #'## simple example #'file <- system.file("extdata", "RF_file.rf", package = "Luminescence") #'temp <- read_RF2R(file) #'plot_ROI(temp) #' #'## in combination with extract_ROI() #'m <- matrix(runif(100,0,255), ncol = 10, nrow = 10) #'roi <- matrix(c(2.,4,2,5,6,7,3,1,1), ncol = 3) #'t <- extract_ROI(object = m, roi = roi) #'plot_ROI(t, bg_image = m) #' #'@md #'@export plot_ROI <- function( object, exclude_ROI = c(1), dist_thre = -Inf, dim.CCD = NULL, bg_image = NULL, plot = TRUE, ...) { ##helper function to extract content .spatial_data <- function(x) { ##ignore all none RLum.Analysis if (!inherits(x, "RLum.Analysis") || x@originator != "read_RF2R") stop("[plot_ROI()] Input for 'object' not supported, please check documentation!", call. = FALSE) ##extract some of the elements info <- x@info info$ROI <- strsplit(split = " ", info$ROI, fixed = TRUE)[[1]][2] c(ROI = info$ROI, x = info$x, y = info$y, area = info$area, width = info$width, height = info$height, img_width = info$image_width, img_height = info$image_height, grain_d = info$grain_d) } if(is(object, "RLum.Results") && object@originator == "extract_ROI") { m <- object@data$roi_coord } else { ## make sure the object is a list if(!is.list(object)) object <- list(object) ##extract values and convert to numeric matrix m <- t(vapply(object, .spatial_data, character(length = 9))) ##make numeric storage.mode(m) <- "numeric" ## correct coordinates (they come in odd from the file) m[,"x"] <- m[,"x"] + m[,"width"] / 2 m[,"y"] <- m[,"y"] + m[,"height"] / 2 } ##make sure the ROI selection works if(is.null(exclude_ROI[1]) || exclude_ROI[1] <= 0) exclude_ROI <- nrow(m) + 1 ## add mid_x and mid_y m <- cbind(m, mid_x = c(m[,"x"] + m[,"width"] / 2), mid_y = c(m[,"y"] + m[,"height"] / 2)) rownames(m) <- m[,"ROI"] ## distance calculation euc_dist <- sel_euc_dist <- stats::dist(m[-exclude_ROI,c("mid_x","mid_y")]) ## distance threshold selector sel_euc_dist[sel_euc_dist < dist_thre[1]] <- NA sel_euc_dist <- suppressWarnings(as.numeric(rownames(na.exclude(as.matrix(sel_euc_dist))))) ## add information to matrix m <- cbind(m, dist_sel = FALSE) m[m[,"ROI"]%in%sel_euc_dist,"dist_sel"] <- TRUE ## --- Plotting --- if(plot) { plot_settings <- modifyList(x = list( xlim = c(0, max(m[, "img_width"])), ylim = c(max(m[, "img_height"]), 0), xlab = "width [px]", ylab = "height [px]", main = "Spatial ROI Distribution", frame.plot = FALSE, lwd.ROI = 0.75, lty.ROI = 2, col.ROI = "black", col.pixel = rgb(0,1,0,0.6), text.labels = m[,"ROI"], text.offset = 0.3, grid = FALSE, legend = TRUE, legend.text = c("ROI", "sel. pixel", "> dist_thre"), legend.pos = "topright" ), val = list(...)) ## set plot area do.call( what = plot.default, args = c(x = NA, y = NA, plot_settings[names(plot_settings) %in% methods::formalArgs(plot.default)]) ) ## add background image if available if(!is.null(bg_image)){ a <- try({ as(bg_image, "RLum.Data.Image") }, silent = TRUE) if(inherits(a, "try-error")) { warning("[plot_ROI()] 'bg_image' is not of type RLum.Data.Image and cannot be converted into such; background image plot skipped!") } else { a <- a@data graphics::image( x = 1:nrow(a[, , 1]), y = 1:ncol(a[, , 1]), z = a[,order(1:dim(a)[2], decreasing = TRUE),1], add = TRUE, col = grDevices::hcl.colors(20, "inferno", rev = FALSE), useRaster = TRUE) } } if (plot_settings$grid) grid(nx = max(m[,"img_width"]), ny = max(m[,"img_height"])) ## plot metric scale if (!is.null(dim.CCD)) { axis( side = 1, at = axTicks(1), labels = paste(floor(dim.CCD[1] / max(m[,"img_width"]) * axTicks(1)), "\u00b5m"), lwd = -1, lwd.ticks = -1, line = -2.2, cex.axis = 0.8 ) axis( side = 2, at = axTicks(2)[-1], labels = paste(floor(dim.CCD[2] / max(m[,"img_height"]) * axTicks(2)), "\u00b5m")[-1], lwd = -1, lwd.ticks = -1, line = -2.2, cex.axis = 0.8 ) } ## add circles and rectangles for (i in 1:nrow(m)) { if (!i%in%exclude_ROI) { ## mark selected pixels polygon( x = c(m[i, "x"] - m[i, "width"]/ 2, m[i, "x"] - m[i, "width"]/ 2, m[i, "x"] + m[i, "width"]/2, m[i, "x"] + m[i, "width"]/2), y = c(m[i, "y"] - m[i, "height"]/ 2, m[i, "y"] + m[i, "height"]/ 2, m[i, "y"] + m[i, "height"]/ 2, m[i, "y"] - m[i, "height"]/ 2), col = plot_settings$col.pixel ) } ## add ROIs shape::plotellipse( rx = m[i, "width"] / 2, ry = m[i, "width"] / 2, mid = c(m[i, "x"], m[i, "y"]), lcol = plot_settings$col.ROI, lty = plot_settings$lty.ROI, lwd = plot_settings$lwd.ROI) } ## add distance marker points( x = m[!m[,"ROI"]%in%sel_euc_dist & !m[,"ROI"]%in%exclude_ROI, "x"], y = m[!m[,"ROI"]%in%sel_euc_dist & !m[,"ROI"]%in%exclude_ROI, "y"], pch = 4, col = "red") ## add text if(length(m[-exclude_ROI,"x"]) > 0) { graphics::text( x = m[-exclude_ROI, "x"], y = m[-exclude_ROI, "y"], labels = plot_settings$text.labels[-exclude_ROI], cex = 0.6, col = if(!is.null(bg_image)) "white" else "black", pos = 1, offset = plot_settings$text.offset ) } ##add legend if(plot_settings$legend) { legend( plot_settings$legend.pos, bty = "", pch = c(1, 15, 4), box.lty = 0, bg = rgb(1,1,1,0.7), legend = plot_settings$legend.text, col = c(plot_settings$col.ROI, plot_settings$col.pixel, "red") ) } }##end if plot ## return results invisible(set_RLum( class = "RLum.Results", data = list( ROI = m, euc_dist = euc_dist), info = list( call = sys.call() ))) } Luminescence/R/read_PSL2R.R0000644000176200001440000002661714521207352015062 0ustar liggesusers#' @title Import PSL files to R #' #' @description Imports PSL files produced by a SUERC portable OSL reader into R **(BETA)**. #' #' @details This function provides an import routine for the SUERC portable OSL Reader PSL #' format. PSL files are just plain text and can be viewed with any text editor. #' Due to the formatting of PSL files this import function relies heavily on #' regular expression to find and extract all relevant information. See **note**. #' #' @param file [character] (**required**): #' path and file name of the PSL file. If input is a `vector` it should comprise #' only `character`s representing valid paths and PSL file names. #' Alternatively the input character can be just a directory (path). In this case the #' the function tries to detect and import all PSL files found in the directory. #' #' @param drop_bg [logical] (*with default*): #' `TRUE` to automatically remove all non-OSL/IRSL curves. #' #' @param as_decay_curve [logical] (*with default*): #' Portable OSL Reader curves are often given as cumulative light sum curves. #' Use `TRUE` (default) to convert the curves to the more usual decay form. #' #' @param smooth [logical] (*with default*): #' `TRUE` to apply Tukey's Running Median Smoothing for OSL and IRSL decay curves. #' Smoothing is encouraged if you see random signal drops within the decay curves related #' to hardware errors. #' #' @param merge [logical] (*with default*): #' `TRUE` to merge all `RLum.Analysis` objects. Only applicable if multiple #' files are imported. #' #' @param ... currently not used. #' #' @return #' Returns an S4 [RLum.Analysis-class] object containing #' [RLum.Data.Curve-class] objects for each curve. #' #' @seealso [RLum.Analysis-class], [RLum.Data.Curve-class], [RLum.Data.Curve-class] #' #' @author Christoph Burow, University of Cologne (Germany) #' #' @section Function version: 0.0.2 #' #' @note #' Because this function relies heavily on regular expressions to parse #' PSL files it is currently only in beta status. If the routine fails to import #' a specific PSL file please report to `` so the #' function can be updated. #' #' @keywords IO #' #' @examples #' #' # (1) Import PSL file to R #' #' file <- system.file("extdata", "DorNie_0016.psl", package = "Luminescence") #' psl <- read_PSL2R(file, drop_bg = FALSE, as_decay_curve = TRUE, smooth = TRUE, merge = FALSE) #' print(str(psl, max.level = 3)) #' plot(psl, combine = TRUE) #' #' @md #' @export read_PSL2R <- function(file, drop_bg = FALSE, as_decay_curve = TRUE, smooth = FALSE, merge = FALSE, ...) { ## INPUT VALIDATION ---- if (length(file) == 1) { if (!grepl(".psl$", file, ignore.case = TRUE)) { file <- list.files(file, pattern = ".psl$", full.names = TRUE, ignore.case = TRUE) message("The following files were found and imported: \n", paste(file, collapse = "\n")) } } if (!all(file.exists(file))) stop("The following files do not exist, please check: \n", paste(file[!file.exists(file)], collapse = "\n"), call. = FALSE) ## MAIN ---- results <- vector("list", length(file)) for (i in 1:length(file)) { ## Read in file ---- doc <- readLines(file[i]) ## Document formatting ---- # remove lines with i) blanks only, ii) dashes, iii) equal signs doc <- gsub("^[ ]*$", "", doc) doc <- gsub("^[ -]*$", "", doc) doc <- gsub("^[ =]*$", "", doc) # the header ends with date and time with the previous line starting with a single slash lines_with_slashes <- doc[grepl("\\", doc, fixed = TRUE)] ## OFFENDING LINE: this deletes the line with sample name and time and date sample_and_date <- lines_with_slashes[length(lines_with_slashes)] sample <- trimws(gsub("\\\\", "", strsplit(sample_and_date, "@")[[1]][1])) date_and_time <- strsplit(strsplit(sample_and_date, "@")[[1]][2], " ")[[1]] date_and_time_clean <- date_and_time[date_and_time != "" & date_and_time != "/" & date_and_time != "PM" & date_and_time != "AM"] date <- as.Date(date_and_time_clean[1], "%m/%d/%Y") time <- format(date_and_time_clean[2], format = "%h:%M:%S") doc <- gsub(lines_with_slashes[length(lines_with_slashes)], "", fixed = TRUE, doc) # last delimiting line before measurements are only apostrophes and dashes lines_with_apostrophes <- doc[grepl("'", doc, fixed = TRUE)] doc <- gsub(lines_with_apostrophes[length(lines_with_apostrophes)], "", fixed = TRUE, doc) # finally remove all empty lines doc <- doc[doc != ""] ## Split document ---- begin_of_measurements <- grep("Measurement :", doc, fixed = TRUE) number_of_measurements <- length(begin_of_measurements) # Parse and format header header <- doc[1:(begin_of_measurements[1]-1)] header <- format_Header(header) # add sample name, date and time to header list header$Date <- date header$Time <- time header$Sample <- sample # Parse and format the easurement values measurements_split <- vector("list", number_of_measurements) # save lines of each measurement to individual list elements for (j in seq_len(number_of_measurements)) { if (j != max(number_of_measurements)) measurements_split[[j]] <- doc[begin_of_measurements[j]:(begin_of_measurements[j+1] - 1)] else measurements_split[[j]] <- doc[begin_of_measurements[j]:length(doc)] } # format each measurement; this will return a list of RLum.Data.Curve objects measurements_formatted <- lapply(measurements_split, function(x) { format_Measurements(x, convert = as_decay_curve, header = header) }) # drop dark count measurements if needed if (drop_bg) { measurements_formatted <- lapply(measurements_formatted, function(x) { if (x@recordType != "USER") return(x) }) measurements_formatted <- measurements_formatted[!sapply(measurements_formatted, is.null)] } # decay curve smoothing using Tukey's Running Median Smoothing (?smooth) if (smooth) { measurements_formatted <- lapply(measurements_formatted, function(x) { if (x@recordType != "USER") x@data[,2] <- smooth(x@data[ ,2]) return(x) }) } ## RETURN ---- results[[i]] <- set_RLum("RLum.Analysis", protocol = "portable OSL", info = header, records = measurements_formatted) }#Eof::Loop ## MERGE ---- if (length(results) > 1 && merge) results <- merge_RLum(results) ## RETURN ---- if (length(results) == 1) results <- results[[1]] return(results) } ################################################################################ ## HELPER FUNCTIONS ################################################################################ ## ------------------------- FORMAT MEASUREMENT ----------------------------- ## format_Measurements <- function(x, convert, header) { ## measurement parameters are given in the first line settings <- x[1] settings_split <- unlist(strsplit(settings, "|", fixed = TRUE)) # welcome to regex/strsplit hell settings_measurement <- trimws(gsub(".*: ", "", settings_split[which(grepl("Measure", settings_split))])) settings_stimulation_unit <- gsub("[^0-9]", "", settings_split[which(grepl("Stim", settings_split))]) settings_on_time <- as.integer(unlist(strsplit(gsub("[^0-9,]", "", settings_split[which(grepl("Off", settings_split))]), ","))[1]) settings_off_time <- as.integer(unlist(strsplit(gsub("[^0-9,]", "", settings_split[which(grepl("Off", settings_split))]), ","))[2]) settings_cycle <- na.omit(as.integer(unlist(strsplit(gsub("[^0-9,]", "", settings_split[which(grepl("No", settings_split))]), ","))))[1] settings_stimulation_time <- na.omit(as.integer(unlist(strsplit(gsub("[^0-9,]", "", settings_split[which(grepl("No", settings_split))]), ","))))[2] settings_list <- list("measurement" = settings_measurement, "stimulation_unit" = switch(settings_stimulation_unit, "0" = "USER", "1" = "IRSL", "2" = "OSL"), "on_time" = settings_on_time, "off_time" = settings_off_time, "cycle" = settings_cycle, "stimulation_time" = settings_stimulation_time) ## terminal counts are given in the last line terminal_count_text <- x[length(x)] terminal_count_text_formatted <- gsub("[^0-9]", "", unlist(strsplit(terminal_count_text, "/"))) terminal_count <- as.numeric(terminal_count_text_formatted[1]) terminal_count_error <- as.numeric(terminal_count_text_formatted[2]) ## parse values and create a data frame x_stripped <- x[-c(1, 2, length(x))] df <- data.frame(matrix(NA, ncol = 5, nrow = length(x_stripped))) for (i in 1:length(x_stripped)) { x_split <- unlist(strsplit(x_stripped[i], " ")) x_split <- x_split[x_split != ""] x_split_clean <- gsub("[^0-9\\-]", "", x_split) x_split_cleaner <- x_split_clean[x_split_clean != "-"] df[i, ] <- as.numeric(x_split_cleaner) } names(df) <- c("time", "counts", "counts_error", "counts_per_cycle", "counts_per_cycle_error") # shape of the curve: decay or cumulative if (convert) data <- matrix(c(df$time, df$counts_per_cycle), ncol = 2) else data <- matrix(c(df$time, df$counts), ncol = 2) # determine the stimulation type if (grepl("Stim 0", settings)) { recordType <- "USER" } if (grepl("Stim 1", settings)) { recordType <- "IRSL" } if (grepl("Stim 2", settings)) { recordType <- "OSL" } object <- set_RLum(class = "RLum.Data.Curve", originator = "read_PSL2R", recordType = recordType, curveType = "measured", data = data, info = list(settings = c(settings_list, header), raw_data = df)) return(object) } ## ---------------------------- FORMAT HEADER ------------------------------- ## format_Header <- function(x) { header_formatted <- list() # split by double blanks header_split <- strsplit(x, " ", fixed = TRUE) # check wether there are twice as many values # as colons; if there is an equal amount, the previous split was not sufficient # and we need to further split by a colon (that is followed by a blank) header_split_clean <- lapply(header_split, function(x) { x <- x[x != ""] n_elements <- length(x) n_properties <- length(grep(":", x, fixed = TRUE)) if (n_elements / n_properties == 1) x <- unlist(strsplit(x, ": ", fixed = TRUE)) return(x) }) # format parameter/settings names and corresponding values values <- vector(mode = "character") names <- vector(mode = "character") for (i in 1:length(header_split_clean)) { for (j in seq(1, length(header_split_clean[[i]]), 2)) { names <- c(names, header_split_clean[[i]][j]) values <- c(values, header_split_clean[[i]][j + 1]) } } # some RegExing for nice reading names <- gsub("[: ]$", "", names, perl = TRUE) names <- gsub("^ ", "", names) names <- gsub(" $", "", names) # for some weird reason "offset subtract" starts with '256 ' names <- gsub("256 ", "", names) # finally, replace all blanks with underscores names <- gsub(" ", "_", names) values <- gsub("[: ]$", "", values, perl = TRUE) values <- gsub("^ ", "", values) values <- gsub(" $", "", values) # return header as list header <- as.list(values) names(header) <- names return(header) } Luminescence/R/analyse_Al2O3C_ITC.R0000644000176200001440000002622214503014746016356 0ustar liggesusers#' @title Al2O3 Irradiation Time Correction Analysis #' #' @description The function provides a very particular analysis to correct the irradiation #' time while irradiating Al2O3:C chips in a luminescence reader. #' #' @details Background: Due to their high dose sensitivity Al2O3:C chips are usually #' irradiated for only a very short duration or under the closed beta-source #' within a luminescence reader. However, due to its high dose sensitivity, during #' the movement towards the beta-source, the pellet already receives and non-negligible #' dose. Based on measurements following a protocol suggested by Kreutzer et al., 2018, #' a dose response curve is constructed and the intersection (absolute value) with the time axis #' is taken as real irradiation time. #' #' **`method_control`** #' #' To keep the generic argument list as clear as possible, arguments to allow a #' deeper control of the method are all preset with meaningful default parameters and can be #' handled using the argument `method_control` only, e.g., #' `method_control = list(fit.method = "LIN")`. Supported arguments are: #' #' \tabular{lll}{ #' **ARGUMENT** \tab **FUNCTION** \tab **DESCRIPTION**\cr #' `mode` \tab `plot_GrowthCurve` \tab as in [plot_GrowthCurve]; sets the mode used for fitting\cr #' `fit.method` \tab `plot_GrowthCurve` \tab as in [plot_GrowthCurve]; sets the function applied for fitting\cr #' } #' #' @param object [RLum.Analysis-class] or [list] **(required)**: #' results obtained from the measurement. #' Alternatively a list of [RLum.Analysis-class] objects can be provided to allow an automatic analysis #' #' @param signal_integral [numeric] (*optional*): #' signal integral, used for the signal and the background. #' If nothing is provided the full range is used. Argument can be provided as [list]. #' #' @param dose_points [numeric] (*with default*): #' vector with dose points, if dose points are repeated, only the general #' pattern needs to be provided. Default values follow the suggestions #' made by Kreutzer et al., 2018. Argument can be provided as [list]. #' #' @param recordType [character] (*with default*): input curve selection, which is passed to #' function [get_RLum]. To deactivate the automatic selection set the argument to `NULL` #' #' @param method_control [list] (*optional*): #' optional parameters to control the calculation. #' See details for further explanations #' #' @param verbose [logical] (*with default*): #' enable/disable verbose mode #' #' @param plot [logical] (*with default*): #' enable/disable plot output #' #' @param ... further arguments that can be passed to the plot output #' #' @return #' Function returns results numerically and graphically: #' #' -----------------------------------\cr #' `[ NUMERICAL OUTPUT ]`\cr #' -----------------------------------\cr #' #' **`RLum.Results`**-object #' #' **slot:** **`@data`** #' #' \tabular{lll}{ #' **Element** \tab **Type** \tab **Description**\cr #' `$data` \tab `data.frame` \tab correction value and error \cr #' `$table` \tab `data.frame` \tab table used for plotting \cr #' `$table_mean` \tab `data.frame` \tab table used for fitting \cr #' `$fit` \tab `lm` or `nls` \tab the fitting as returned by the function [plot_GrowthCurve] #' } #' #'**slot:** **`@info`** #' #' The original function call #' #' ------------------------\cr #' `[ PLOT OUTPUT ]`\cr #' ------------------------\cr #' #' - A dose response curve with the marked correction values #' #' @section Function version: 0.1.1 #' #' @author Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [plot_GrowthCurve] #' #' @references #' #' Kreutzer, S., Martin, L., Guérin, G., Tribolo, C., Selva, P., Mercier, N., 2018. Environmental Dose Rate #' Determination Using a Passive Dosimeter: Techniques and Workflow for alpha-Al2O3:C Chips. #' Geochronometria 45, 56-67. doi: 10.1515/geochr-2015-0086 #' #' @keywords datagen #' #' @examples #' #' ##load data #' data(ExampleData.Al2O3C, envir = environment()) #' #' ##run analysis #' analyse_Al2O3C_ITC(data_ITC) #' #' @md #' @export analyse_Al2O3C_ITC <- function( object, signal_integral = NULL, dose_points = c(2,4,8,12,16), recordType = c("OSL (UVVIS)"), method_control = NULL, verbose = TRUE, plot = TRUE, ... ){ # SELF CALL ----------------------------------------------------------------------------------- if(is.list(object)){ ##check whether the list contains only RLum.Analysis objects if(!all(unique(sapply(object, class)) == "RLum.Analysis")){ stop("[analyse_Al2O3C()] All objects in the list provided by 'objects' need to be of type 'RLum.Analysis'", call. = FALSE) } ##expand input arguments if(!is.null(signal_integral)){ signal_integral <- rep(list(signal_integral, length = length(object))) } ##dose points if(is(dose_points, "list")){ dose.points <- rep(dose_points, length = length(object)) }else{ dose_points <- rep(list(dose_points), length = length(object)) } ##method_control ##verbose ##plot ##run analysis results_full <- lapply(1:length(object), function(x){ ##run analysis results <- try(analyse_Al2O3C_ITC( object = object[[x]], signal_integral = signal_integral[[x]], dose_points = dose_points[[x]], method_control = method_control, verbose = verbose, plot = plot, main = ifelse("main"%in% names(list(...)), list(...)$main, paste0("ALQ #",x)), ... )) ##catch error if(inherits(results, "try-error")){ return(NULL) }else{ return(results) } }) ##return return(merge_RLum(results_full)) } # Integrity check --------------------------------------------------------------------------- ##check input object if(!inherits(object, "RLum.Analysis")){ stop("[analyse_Al2O3C_ITC()] 'object' needs to be of type 'RLum.Analysis'", call. = FALSE) } ##TODO ##implement more checks ... if you find some time, somehow, somewhere # Preparation --------------------------------------------------------------------------------- ##select curves based on the recordType selection; if not NULL if(!is.null(recordType[1])) object <- get_RLum(object, recordType = recordType, drop = FALSE) #set method control method_control_settings <- list( mode = "extrapolation", fit.method = "EXP" ) ##modify on request if(!is.null(method_control)){ method_control_settings <- modifyList(x = method_control_settings, val = method_control) } ##dose points enhancement ##make sure that the dose_point is enhanced dose_points <- rep(dose_points, times = length(object)/2) # Calculation --------------------------------------------------------------------------------- ##set signal integral if(is.null(signal_integral)){ signal_integral <- c(1:nrow(object[[1]][])) }else{ ##check whether the input is valid, otherwise make it valid if(min(signal_integral) < 1 | max(signal_integral) > nrow(object[[1]][])){ signal_integral <- c(1:nrow(object[[1]][])) warning( paste0( "[analyse_Al2O3C_ITC()] Input for 'signal_integral' corrected to 1:", nrow(object[[1]][]) ), call. = FALSE ) } } ##calculate curve sums, assuming the background net_SIGNAL <- vapply(1:length(object[seq(1,length(object), by = 2)]), function(x){ temp_signal <- sum(object[seq(1,length(object), by = 2)][[x]][,2]) temp_background <- sum(object[seq(2,length(object), by = 2)][[x]][,2]) return(temp_signal - temp_background) }, FUN.VALUE = vector(mode = "numeric", length = 1)) ##create data.frames ##single points df <- data.frame( DOSE = dose_points, net_SIGNAL = net_SIGNAL, net_SIGNAL.ERROR = 0, net_SIGNAL_NORM = net_SIGNAL/max(net_SIGNAL), net_SIGNAL_NORM.ERROR = 0 ) ##take mean ##make data frame for all curves for MC runs df_mean <- as.data.frame(data.table::rbindlist(lapply(unique(df$DOSE), function(x){ data.frame( DOSE = x, net_SIGNAL = mean(df[df$DOSE == x, "net_SIGNAL"]), net_SIGNAL.ERROR = sd(df[df$DOSE == x, "net_SIGNAL"]), net_SIGNAL_NORM = mean(df[df$DOSE == x, "net_SIGNAL_NORM"]), net_SIGNAL_NORM.ERROR = sd(df[df$DOSE == x, "net_SIGNAL_NORM"]) ) }))) ##calculate GC GC <- plot_GrowthCurve( sample = df_mean, mode = method_control_settings$mode, output.plotExtended = FALSE, output.plot = FALSE, fit.method = method_control_settings$fit.method, verbose = FALSE ) ##output if(verbose){ cat("\n[analyse_Al2O3C_ITC()]\n") cat(paste0("\n Used fit:\t\t",method_control_settings$fit.method)) cat(paste0("\n Time correction value:\t", round(GC$De$De,3), " \u00B1 ", round(GC$De$De.Error, 3))) cat("\n\n") } # Plotting ------------------------------------------------------------------------------------ if(plot){ ##set plot settings plot_settings <- list( xlab = "Dose [s]", ylab = "Integrated net GSL [a.u.]", main = "Irradiation Time Correction", xlim = c(-5, max(df$DOSE)), ylim = c(0,max(df$net_SIGNAL)), legend.pos = "right", legend.text = "dose points", mtext = "" ) ##modify list on request plot_settings <- modifyList(x = plot_settings, val = list(...)) ##make plot area plot(NA, NA, xlim = plot_settings$xlim, ylim = plot_settings$ylim, xlab = plot_settings$xlab, ylab = plot_settings$ylab, main = plot_settings$main) ##add zero lines abline(v = 0) abline(h = 0) ##add dose points points(x = df$DOSE, y = df$net_SIGNAL) ##add dose response curve x <- seq(min(plot_settings$xlim), max(plot_settings$xlim), length.out = 100) lines( x = x, y = eval(GC$Formula) ) ##show offset x <- 0 lines(x = c(-GC$De[1], -GC$De[1]), y = c(eval(GC$Formula), 0), lty = 2, col = "red") shape::Arrows( x0 = 0, y0 = eval(GC$Formula), x1 = as.numeric(-GC$De[1]), y1 = eval(GC$Formula), arr.type = "triangle", arr.adj = -0.5, col = 'red', cex = par()$cex) ##add text text( x = -GC$De[1] / 2, y = eval(GC$Formula), pos = 3, labels = paste(round(GC$De[1],3), "\u00B1", round(GC$De[2], 3)), col = 'red', cex = 0.8) ##add 2nd x-axis axis( side = 1, at = axTicks(side = 1), labels = paste0("(",(axTicks(side = 1) + round(as.numeric(GC$De[1]),2)), ")"), line = 1, col.axis = "red", lwd.ticks = 0, lwd = 0, cex.axis = 0.9 ) ##add legend legend( plot_settings$legend.pos, bty = "n", pch = 1, legend = plot_settings$legend.text ) ##add mtext mtext(side = 3, text = plot_settings$mtext) } # Output -------------------------------------------------------------------------------------- return(set_RLum( class = "RLum.Results", data = list( data = data.frame( VALUE = as.numeric(GC$De$De), VALUE_ERROR = as.numeric(sd(GC$De.MC)) ), table = df, table_mean = df_mean, fit = GC$Fit ), info = list(call = sys.call()) )) } Luminescence/R/use_DRAC.R0000644000176200001440000003731514264017373014615 0ustar liggesusers#' Use DRAC to calculate dose rate data #' #' The function provides an interface from R to DRAC. An R-object or a #' pre-formatted XLS/XLSX file is passed to the DRAC website and the #' results are re-imported into R. #' #' #' @param file [character] (**required**): #' spreadsheet to be passed to the DRAC website for calculation. Can also be a #' DRAC template object obtained from `template_DRAC()`. #' #' @param name [character] (*with default*): #' Optional user name submitted to DRAC. If omitted, a random name will be generated #' #' @param print_references (*with default*): #' Print all references used in the input data table to the console. #' #' @param citation_style (*with default*): #' If `print_references = TRUE` this argument determines the output style of the #' used references. Valid options are `"Bibtex"`, `"citation"`, `"html"`, `"latex"` #' or `"R"`. Default is `"text"`. #' #' @param ... Further arguments. #' #' - `url` [character]: provide an alternative URL to DRAC #' - `verbose` [logical]: show or hide console output #' #' @return Returns an [RLum.Results-class] object containing the following elements: #' #' \item{DRAC}{[list]: a named list containing the following elements in slot `@@data`: #' #' \tabular{lll}{ #' `$highlights` \tab [data.frame] \tab summary of 25 most important input/output fields \cr #' `$header` \tab [character] \tab HTTP header from the DRAC server response \cr #' `$labels` \tab [data.frame] \tab descriptive headers of all input/output fields \cr #' `$content` \tab [data.frame] \tab complete DRAC input/output table \cr #' `$input` \tab [data.frame] \tab DRAC input table \cr #' `$output` \tab [data.frame] \tab DRAC output table \cr #' `references`\tab [list] \tab A list of bib entries of used references \cr #' } #' #' } #' \item{data}{[character] or [list] path to the input spreadsheet or a DRAC template} #' \item{call}{[call] the function call} #' \item{args}{[list] used arguments} #' #' The output should be accessed using the function [get_RLum]. #' #' @section Function version: 0.14 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany)\cr #' Michael Dietze, GFZ Potsdam (Germany)\cr #' Christoph Burow, University of Cologne (Germany) #' #' @references #' Durcan, J.A., King, G.E., Duller, G.A.T., 2015. DRAC: Dose Rate and Age Calculator for trapped charge dating. #' Quaternary Geochronology 28, 54-61. doi:10.1016/j.quageo.2015.03.012 #' #' @examples #' #' ## (1) Method using the DRAC spreadsheet #' #' file <- "/PATH/TO/DRAC_Input_Template.csv" #' #' # send the actual IO template spreadsheet to DRAC #' \dontrun{ #' use_DRAC(file = file) #' } #' #' #' #' ## (2) Method using an R template object #' #' # Create a template #' input <- template_DRAC(preset = "DRAC-example_quartz") #' #' # Fill the template with values #' input$`Project ID` <- "DRAC-Example" #' input$`Sample ID` <- "Quartz" #' input$`Conversion factors` <- "AdamiecAitken1998" #' input$`External U (ppm)` <- 3.4 #' input$`errExternal U (ppm)` <- 0.51 #' input$`External Th (ppm)` <- 14.47 #' input$`errExternal Th (ppm)` <- 1.69 #' input$`External K (%)` <- 1.2 #' input$`errExternal K (%)` <- 0.14 #' input$`Calculate external Rb from K conc?` <- "N" #' input$`Calculate internal Rb from K conc?` <- "N" #' input$`Scale gammadoserate at shallow depths?` <- "N" #' input$`Grain size min (microns)` <- 90 #' input$`Grain size max (microns)` <- 125 #' input$`Water content ((wet weight - dry weight)/dry weight) %` <- 5 #' input$`errWater content %` <- 2 #' input$`Depth (m)` <- 2.2 #' input$`errDepth (m)` <- 0.22 #' input$`Overburden density (g cm-3)` <- 1.8 #' input$`errOverburden density (g cm-3)` <- 0.1 #' input$`Latitude (decimal degrees)` <- 30.0000 #' input$`Longitude (decimal degrees)` <- 70.0000 #' input$`Altitude (m)` <- 150 #' input$`De (Gy)` <- 20 #' input$`errDe (Gy)` <- 0.2 #' #' # use DRAC #' \dontrun{ #' output <- use_DRAC(input) #' } #' #' @md #' @export use_DRAC <- function( file, name, print_references = TRUE, citation_style = "text", ... ){ ## TODO: ## (1) Keep the data set as unmodified as possible. Check structure and order of parameters ## for meaningful cominbination. ## ## (2) ## Leave it to the user where the calculations made in our package should be used # Integrity tests ----------------------------------------------------------------------------- if (inherits(file, "character")) { if(!file.exists(file)){ stop("[use_DRAC()] It seems that the file doesn't exist!", call. = FALSE) } # Import data --------------------------------------------------------------------------------- ## Import and skip the first rows and remove NA lines and the 2 row, as this row contains ## only meta data ## DRAC v1.1 - XLS sheet ##check if is the original DRAC table if (tools::file_ext(file) == "xls" || tools::file_ext(file) == "xlsx") { if (readxl::excel_sheets(file)[1] != "DRAC_1.1_input") stop("[use_DRAC()] It looks like that you are not using the original DRAC v1.1 XLSX template. This is currently not supported!", call. = FALSE) warning("\n[use_DRAC()] The current DRAC version is 1.2, but you provided the v1.1 excel input template.", "\nPlease transfer your data to the new CSV template introduced with DRAC v1.2.", call. = FALSE) input.raw <- na.omit(as.data.frame(readxl::read_excel(path = file, sheet = 1, skip = 5)))[-1, ] } ## DRAC v1.2 - CSV sheet if (tools::file_ext(file) == "csv") { if (read.csv(file, nrows = 1, header = FALSE)[1] != "DRAC v.1.2 Inputs") stop("[use_DRAC()] It looks like that you are not using the original DRAC v1.2 CSV template. This is currently not supported!", call. = FALSE) input.raw <- read.csv(file, skip = 8, check.names = FALSE, header = TRUE, stringsAsFactors = FALSE)[-1, ] } } else if (inherits(file, "DRAC.list")) { input.raw <- as.data.frame(file) } else if (inherits(file, "DRAC.data.frame")) { input.raw <- file } else { stop("The provided data object is not a valid DRAC template.", call. = FALSE) } if (nrow(input.raw) > 5000) stop("[use_DRAC()] The limit of allowed datasets is 5000!", call. = FALSE) # Settings ------------------------------------------------------------------------------------ settings <- list( name = ifelse(missing(name), paste0(sample(if(runif(1,-10,10)>0){LETTERS}else{letters}, runif(1, 2, 4))), name), verbose = TRUE, url = "https://www.aber.ac.uk/en/dges/research/quaternary/luminescence-research-laboratory/dose-rate-calculator/?show=calculator") # override defaults with args in ... settings <- modifyList(settings, list(...)) # Set helper function ------------------------------------------------------------------------- ## The real data are transferred without any encryption, so we have to mask the original ##(0) set masking function .masking <- function(mean, sd, n) { temp <- rnorm(n = 30 * n, mean = mean, sd = sd) t(vapply(seq(1, length(temp), 30), function(x) { c(format(mean(temp[x:(x + 29)]), digits = 2), format(sd(temp[x:(x + 29)]), digits = 2)) }, character(2))) } # Process data -------------------------------------------------------------------------------- if (settings$verbose) message("\n\t Preparing data...") ##(1) expand the rows in the data.frame a little bit mask.df <- input.raw[rep(1:nrow(input.raw), each = 3), ] ##(2) generate some meaningful random variables mask.df <- lapply(seq(1, nrow(input.raw), 3), function(x) { if (mask.df[x,"TI:52"] != "X") { ##replace some values - the De value mask.df[x:(x + 2), c("TI:52","TI:53")] <- .masking( mean = as.numeric(mask.df[x,"TI:52"]), sd = as.numeric(mask.df[x,"TI:53"]), n = 3) return(mask.df) } }) ##(3) bin values DRAC_submission.df <- rbind(input.raw, mask.df[[1]]) ##(4) replace ID values DRAC_submission.df$`TI:1` <- paste0( paste0( paste0( sample(if(runif(1,-10,10)>0) LETTERS else letters, runif(1, 2, 4))), ifelse(runif(1,-10,10)>0, "-", "")), gsub(" ", "0", prettyNum(seq(sample(1:50, 1, prob = 50:1/50, replace = FALSE), by = 1, length.out = nrow(DRAC_submission.df)), width = 2))) ##(5) store the real IDs in a separate object DRAC_results.id <- DRAC_submission.df[1:nrow(input.raw), "TI:1"] ##(6) create DRAC submission string DRAC_submission.df <- DRAC_submission.df[sample(x = 1:nrow(DRAC_submission.df), nrow(DRAC_submission.df), replace = FALSE), ] ##convert all columns of the data.frame to class 'character' for (i in 1:ncol(DRAC_submission.df)) DRAC_submission.df[ ,i] <- as.character(DRAC_submission.df[, i]) if (settings$verbose) message("\t Creating submission string...") ##get line by line and remove unwanted characters DRAC_submission.string <- sapply(1:nrow(DRAC_submission.df), function(x) { paste0(gsub(",", "", toString(DRAC_submission.df[x, ])), "\n") }) ##paste everything together to get the format we want DRAC_input <- paste(DRAC_submission.string, collapse = "") # Send data to DRAC --------------------------------------------------------------------------- if (settings$verbose) message(paste("\t Establishing connection to", settings$url)) ## send data set to DRAC website and receive response DRAC.response <- httr::POST(settings$url, body = list("drac_data[name]" = settings$name, "drac_data[table]" = DRAC_input)) ## check for correct response if (DRAC.response$status_code != 200) { stop(paste0("[use_DRAC()] transmission failed with HTTP status code: ", DRAC.response$status_code)) } else { if (settings$verbose) message("\t The request was successful, processing the reply...") } ## assign DRAC response data to variables http.header <- DRAC.response$header DRAC.content <- httr::content(x = DRAC.response, as = "text") ## if the input was valid from a technical standpoint, but not with regard ## contents, we indeed get a valid response, but no DRAC output if (!grepl("DRAC Outputs", DRAC.content)) { error_start <- max(gregexpr("drac_field_error", DRAC.content)[[1]]) error_end <- regexec('textarea name=', DRAC.content)[[1]] error_msg <- substr(DRAC.content, error_start, error_end) on.exit({ reply <- readline("Do you want to see the DRAC error message (Y/N)?") if (reply == "Y" || reply == "y" || reply == 1) cat(error_msg) }) stop(paste("\n\t We got a response from the server, but it\n", "\t did not contain DRAC output. Please check\n", "\t your data and verify its validity.\n"), call. = FALSE) } else { if (settings$verbose) message("\t Finalising the results...") } ## split header and content DRAC.content.split <- strsplit(x = DRAC.content, split = "DRAC Outputs\n\n") ## assign DRAC header part DRAC.header <- as.character(DRAC.content.split[[1]][1]) ## assign DRAC content part DRAC.raw <- read.table(text = as.character(DRAC.content.split[[1]][2]), sep = ",", stringsAsFactors = FALSE) ## remove first two lines DRAC.content <- data.table::fread(as.character(DRAC.content.split[[1]][2]), sep = ",", skip = 2, stringsAsFactors = FALSE, colClasses = c(V3 = "character"), data.table = FALSE) ##Get rid of all the value we do not need anymore DRAC.content <- subset(DRAC.content, DRAC.content$V1 %in% DRAC_results.id) DRAC.content <- DRAC.content[with(DRAC.content, order(V1)), ] ##replace by original names DRAC.content[ ,1] <- input.raw[ ,1] ## assign column names colnames(DRAC.content) <- DRAC.raw[1, ] ## save column labels and use them as attributes for the I/O table columns DRAC.labels <- DRAC.raw[2, ] for (i in 1:length(DRAC.content)) { attr(DRAC.content[ ,i], "description") <- DRAC.labels[1,i] } ## DRAC also returns the input, so we need to split input and output DRAC.content.input <- DRAC.content[ ,grep("TI:", names(DRAC.content))] DRAC.content.output <- DRAC.content[ ,grep("TO:", names(DRAC.content))] ## The DRAC ouput also contains a hightlight table, which results in ## duplicate columns. When creating the data.frame duplicate columns ## are automatically appended '.1' in their names, so we can identify ## and remove them easily DRAC.content.input <- DRAC.content.input[ ,-grep("\\.1", names(DRAC.content.input))] DRAC.content.output <- DRAC.content.output[ ,-grep("\\.1", names(DRAC.content.output))] ## for some reason the returned input table is unsorted, so we resort it in increasing order DRAC.content.input <- DRAC.content.input[ , paste0("TI:", 1:ncol(DRAC.content.input))] ## The output table (v1.2) has 198 columns, making it unreasonable complex ## for standard data evaluation. We reproduce the DRAC highlight table ## and use the descriptions (saved as attributes) as column names. highlight.keys <- c("TI:1","TI:2","TI:3","TO:FQ","TO:FR", "TO:FS", "TO:FT", "TO:FU", "TO:FV", "TO:FW", "TO:FX", "TO:FY", "TO:FZ", "TO:GG", "TO:GH", "TO:GI", "TO:GJ", "TO:GK", "TO:GL", "TO:GM", "TO:GN", "TI:52", "TI:53", "TO:GO", "TO:GP") DRAC.highlights <- subset(DRAC.content, select = highlight.keys) DRAC.highlights.labels <- as.character(DRAC.labels[1, which(unique(names(DRAC.content)) %in% highlight.keys)]) colnames(DRAC.highlights) <- DRAC.highlights.labels for (i in 1:length(DRAC.highlights)) { attr(DRAC.highlights[ ,i], "key") <- highlight.keys[i] } ## finally, we add the 'DRAC.highlights' class so that we can use a custom print method class(DRAC.highlights) <- c("DRAC.highlights", "data.frame") ## Final Disclaimer messages <- list("\t Done! \n", "\t We, the authors of the R package 'Luminescence', do not take any responsibility and we are not liable for any ", "\t mistakes or unforeseen misbehaviour. All calculations are done by DRAC and it is outside our reference to", "\t verify the input and output. \n", "\t Note that this function is only compatible with DRAC version 1.2. Before using this function make sure that", "\t this is the correct version, otherwise expect unspecified errors.\n", "\t Please ensure you cite the use of DRAC in your work, published or otherwise. Please cite the website name and", "\t version (e.g. DRAC v1.2) and the accompanying journal article:", "\t Durcan, J.A., King, G.E., Duller, G.A.T., 2015. DRAC: Dose rate and age calculation for trapped charge", "\t dating. Quaternary Geochronology 28, 54-61. \n", "\t Use 'verbose = FALSE' to hide this message. \n") if (settings$verbose) lapply(messages, message) ## Get and print used references references <- get_DRAC_references(DRAC.content.input) if (print_references && settings$verbose) { for (i in 1:length(references$refs)) { message("\nReference for: ", references$desc[i]) print(references$refs[[i]], style = citation_style) } } ## return output DRAC.return <- set_RLum( "RLum.Results", data = list( DRAC = list(highlights = DRAC.highlights, header = DRAC.header, labels = DRAC.labels, content = DRAC.content, input = DRAC.content.input, output = DRAC.content.output, references = references), data = file, call = sys.call(), args = as.list(sys.call()[-1]))) invisible(DRAC.return) } Luminescence/R/read_XSYG2R.R0000644000176200001440000006653014264017373015222 0ustar liggesusers#' Import XSYG files to R #' #' Imports XSYG-files produced by a Freiberg Instruments lexsyg reader into R. #' #' **How does the import function work?** #' #' The function uses the [xml] package to parse the file structure. Each #' sequence is subsequently translated into an [RLum.Analysis-class] object. #' #' **General structure XSYG format** #' #' ``` #' #' #' #' #' #' x0 , y0 ; x1 , y1 ; x2 , y2 ; x3 , y3 #' #' #' #' ``` #' #' So far, each #' XSYG file can only contain one ``, but multiple #' sequences. #' #' Each record may comprise several curves. #' #' **TL curve recalculation** #' #' On the FI lexsyg device TL curves are recorded as time against count values. #' Temperature values are monitored on the heating plate and stored in a #' separate curve (time vs. temperature). If the option #' `recalculate.TL.curves = TRUE` is chosen, the time values for each TL #' curve are replaced by temperature values. #' #' Practically, this means combining two matrices (Time vs. Counts and Time vs. #' Temperature) with different row numbers by their time values. Three cases #' are considered: #' #' 1. HE: Heating element #' 2. PMT: Photomultiplier tube #' 3. Interpolation is done using the function [approx] #' #' CASE (1): `nrow(matrix(PMT))` > `nrow(matrix(HE))` #' #' Missing temperature values from the heating element are calculated using #' time values from the PMT measurement. #' #' CASE (2): `nrow(matrix(PMT))` < `nrow(matrix(HE))` #' #' Missing count values from the PMT are calculated using time values from the #' heating element measurement. #' #' CASE (3): `nrow(matrix(PMT))` == `nrow(matrix(HE))` #' #' A new matrix is produced using temperature values from the heating element #' and count values from the PMT. #' #' **Note:** #' Please note that due to the recalculation of the temperature #' values based on values delivered by the heating element, it may happen that #' multiple count values exists for each temperature value and temperature #' values may also decrease during heating, not only increase. #' #' **Advanced file import** #' #' To allow for a more efficient usage of the function, instead of single path #' to a file just a directory can be passed as input. In this particular case #' the function tries to extract all XSYG-files found in the directory and import #' them all. Using this option internally the function constructs as list of #' the XSYG-files found in the directory. Please note no recursive detection #' is supported as this may lead to endless loops. #' #' @param file [character] or [list] (**required**): #' path and file name of the XSYG file. If input is a `list` it should comprise #' only `character`s representing each valid path and XSYG-file names. #' Alternatively the input character can be just a directory (path), in this case the #' the function tries to detect and import all XSYG-files found in the directory. #' #' @param recalculate.TL.curves [logical] (*with default*): #' if set to `TRUE`, TL curves are returned as temperature against count values #' (see details for more information) Note: The option overwrites the time vs. #' count TL curve. Select `FALSE` to import the raw data delivered by the #' lexsyg. Works for TL curves and spectra. #' #' @param fastForward [logical] (*with default*): #' if `TRUE` for a more efficient data processing only a list of `RLum.Analysis` #' objects is returned. #' #' @param import [logical] (*with default*): #' if set to `FALSE`, only the XSYG file structure is shown. #' #' @param pattern [regex] (*with default*): #' optional regular expression if `file` is a link to a folder, to select just #' specific XSYG-files #' #' @param verbose [logical] (*with default*): enable or disable verbose mode. If verbose is `FALSE` #' the `txtProgressBar` is also switched off #' #' @param txtProgressBar [logical] (*with default*): #' enables `TRUE` or disables `FALSE` the progression bar during import #' #' @return #' **Using the option `import = FALSE`** #' #' A list consisting of two elements is shown: #' - [data.frame] with information on file. #' - [data.frame] with information on the sequences stored in the XSYG file. #' #' **Using the option `import = TRUE` (default)** #' #' A list is provided, the list elements #' contain: \item{Sequence.Header}{[data.frame] with information on the #' sequence.} \item{Sequence.Object}{[RLum.Analysis-class] #' containing the curves.} #' #' @note #' This function is a beta version as the XSYG file format is not yet #' fully specified. Thus, further file operations (merge, export, write) should #' be done using the functions provided with the package [xml]. #' #' **So far, no image data import is provided!** \cr #' Corresponding values in the XSXG file are skipped. #' #' #' @section Function version: 0.6.8 #' #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' #' @seealso [xml], [RLum.Analysis-class], [RLum.Data.Curve-class], [approx] #' #' #' @references #' Grehl, S., Kreutzer, S., Hoehne, M., 2013. Documentation of the #' XSYG file format. Unpublished Technical Note. Freiberg, Germany #' #' **Further reading** #' #' XML: [https://en.wikipedia.org/wiki/XML]() #' #' @keywords IO #' #' @examples #' #' ##(1) import XSYG file to R (uncomment for usage) #' #' #FILE <- file.choose() #' #temp <- read_XSYG2R(FILE) #' #' ##(2) additional examples for pure XML import using the package XML #' ## (uncomment for usage) #' #' ##import entire XML file #' #FILE <- file.choose() #' #temp <- XML::xmlRoot(XML::xmlTreeParse(FILE)) #' #' ##search for specific subnodes with curves containing 'OSL' #' #getNodeSet(temp, "//Sample/Sequence/Record[@@recordType = 'OSL']/Curve") #' #' ##(2) How to extract single curves ... after import #' data(ExampleData.XSYG, envir = environment()) #' #' ##grep one OSL curves and plot the first curve #' OSLcurve <- get_RLum(OSL.SARMeasurement$Sequence.Object, recordType="OSL")[[1]] #' #' ##(3) How to see the structure of an object? #' structure_RLum(OSL.SARMeasurement$Sequence.Object) #' #' @md #' @export read_XSYG2R <- function( file, recalculate.TL.curves = TRUE, fastForward = FALSE, import = TRUE, pattern = ".xsyg", verbose = TRUE, txtProgressBar = TRUE ){ ##TODO: this function should be reshaped: ## - metadata from the sequence should go into the info slot of the RLum.Analysis object ## >> however, the question is whether this works with subsequent functions ## - currently not all metadata are supported, it should be extended ## - the should be a mode importing ALL metadata ## - xlum should be general, xsyg should take care about subsequent details # Self Call ----------------------------------------------------------------------------------- # Option (a): Input is a list, every element in the list will be treated as file connection # with that many file can be read in at the same time # Option (b): The input is just a path, the function tries to grep ALL xsyg/XSYG files in the # directory and import them, if this is detected, we proceed as list if(is(file, "character")) { ##If this is not really a path we skip this here if (dir.exists(file) & length(dir(file)) > 0) { if(verbose) ("[read_XSYG2R()] Directory detected, trying to extract '*.xsyg' files ...\n") file <- as.list(paste0(file,dir( file, recursive = TRUE, pattern = pattern ))) } } if (is(file, "list")) { temp.return <- lapply(1:length(file), function(x) { read_XSYG2R( file = file[[x]], recalculate.TL.curves = recalculate.TL.curves, fastForward = fastForward, import = import, verbose = verbose, txtProgressBar = txtProgressBar ) }) ##return if (fastForward) { if(import){ return(unlist(temp.return, recursive = FALSE)) }else{ return(as.data.frame(data.table::rbindlist(temp.return))) } }else{ return(temp.return) } } # On exit case -------------------------------------------------------------------------------- ##set file_link for internet downloads file_link <- NULL on_exit <- function(){ ##unlink internet connection if(!is.null(file_link)){ unlink(file_link) } } on.exit(expr = on_exit()) # Consistency check ------------------------------------------------------- ##check if file exists if(!file.exists(file)){ ##check if the file as an URL ... you never know if(grepl(pattern = "http", x = file, fixed = TRUE)){ if(verbose){ cat("[read_XSYG2R()] URL detected, checking connection ... ") } ##check URL if(!httr::http_error(file)){ if(verbose) cat("OK") ##dowload file file_link <- tempfile("read_XSYG2R_FILE") download.file(file, destfile = file_link, quiet = if(verbose){FALSE}else{TRUE}) file <- file_link }else{ cat("FAILED") file <- NULL try(stop("[read_XSYG2R()] File does not exist! Return NULL!", call. = FALSE)) return(NULL) } }else{ file <- NULL try(stop("[read_XSYG2R()] File does not exist, return NULL!", call. = FALSE)) return(NULL) } } #TODO to be included again in a future version, if the format is given in the file itself # ##check if file is XML file # if(tail(unlist(strsplit(file, split = "\\.")), 1) != "xsyg" & # tail(unlist(strsplit(file, split = "\\.")), 1) != "XSYG" ){ # # warning("[read_XSYG2R()] File is not of type 'XSYG', nothing imported!") # return(NULL) # # } # (0) config -------------------------------------------------------------- #version.supported <- c("1.0") #additional functions #get spectrum values # TODO: This function could be written also in C++, however, not necessary due to a low demand get_XSYG.spectrum.values <- function(curve.node){ ##1st grep wavelength table wavelength <- XML::xmlAttrs(curve.node)["wavelengthTable"] ##string split wavelength <- as.numeric(unlist(strsplit(wavelength, split = ";", fixed = TRUE))) ##2nd grep time values curve.node <- unlist(strsplit(XML::xmlValue(curve.node), split = ";", fixed = TRUE)) curve.node <- unlist(strsplit(curve.node, split = ",", fixed = TRUE), recursive = FALSE) curve.node.time <- as.numeric(curve.node[seq(1,length(curve.node),2)]) ##3rd grep count values curve.node.count <- as.character(curve.node[seq(2,length(curve.node),2)]) ##remove from pattern... curve.node.count <- do.call("gsub", list(pattern="[[]|[]]", replacement=" ", x=curve.node.count)) ##4th combine to spectrum matrix spectrum.matrix <- matrix(0,length(wavelength),length(curve.node.time)) spectrum.matrix <- sapply(1:length(curve.node.time), function(x){ as.numeric(unlist(strsplit(curve.node.count[x], "[|]"))) }) ##change row names (rows are wavelength) rownames(spectrum.matrix) <- round(wavelength, digits=3) ##change column names (columns are time/temp values) colnames(spectrum.matrix) <- round(curve.node.time, digits=3) return(spectrum.matrix) } # (1) Integrity tests ----------------------------------------------------- ##set HUGE for larger nodes HUGE <- 524288 ##parse XML tree using the package XML temp <- try( XML::xmlRoot(XML::xmlTreeParse(file, useInternalNodes = TRUE, options = HUGE)), silent = TRUE) ##show error if(is(temp, "try-error") == TRUE){ try(stop("[read_XSYG2R()] XML file not readable, nothing imported!)", call. = FALSE)) return(NULL) } # (2) Further file processing --------------------------------------------- ##==========================================================================## ##SHOW STRUCTURE if(import == FALSE){ ##sample information temp.sample <- as.data.frame(XML::xmlAttrs(temp), stringsAsFactors = FALSE) ##grep sequences files ##set data.frame temp.sequence.header <- data.frame(t(1:length(names(XML::xmlAttrs(temp[[1]])))), stringsAsFactors = FALSE) colnames(temp.sequence.header) <- names(XML::xmlAttrs(temp[[1]])) ##fill information in data.frame for(i in 1:XML::xmlSize(temp)){ temp.sequence.header[i,] <- t(XML::xmlAttrs(temp[[i]])) } ##additional option for fastForward == TRUE if(fastForward){ ##change column header temp.sample <- t(temp.sample) colnames(temp.sample) <- paste0("sample::", colnames(temp.sample)) output <- cbind(temp.sequence.header, temp.sample) }else{ output <- list(Sample = temp.sample, Sequences = temp.sequence.header) } return(output) }else{ ##==========================================================================## ##IMPORT XSYG FILE ##Display output if(verbose) paste0("[read_XSYG2R()]\n Importing: ",file) ##PROGRESS BAR if(verbose && txtProgressBar){ pb <- txtProgressBar(min=0,max=XML::xmlSize(temp), char = "=", style=3) } ##loop over the entire sequence by sequence output <- lapply(1:XML::xmlSize(temp), function(x){ ##read sequence header temp.sequence.header <- as.data.frame(XML::xmlAttrs(temp[[x]]), stringsAsFactors = FALSE) ##account for non set value if(length(temp.sequence.header)!= 0) colnames(temp.sequence.header) <- "" ###----------------------------------------------------------------------- ##LOOP ##read records >> records are combined to one RLum.Analysis object temp.sequence.object <- unlist(lapply(1:XML::xmlSize(temp[[x]]), function(i){ ##get recordType temp.sequence.object.recordType <- try(XML::xmlAttrs(temp[[x]][[i]])["recordType"], silent = TRUE) ##the XSYG file might be broken due to a machine error during the measurement, this ##control flow helps; if a try-error is observed NULL is returned if(!inherits(temp.sequence.object.recordType, "try-error")){ ##create a fallback, the function should not fail if(is.null(temp.sequence.object.recordType) || is.na(temp.sequence.object.recordType)){ temp.sequence.object.recordType <- "not_set" } ##correct record type in depending on the stimulator if(temp.sequence.object.recordType == "OSL"){ if(XML::xmlAttrs(temp[[x]][[i]][[ XML::xmlSize(temp[[x]][[i]])]])["stimulator"] == "ir_LED_850" | XML::xmlAttrs(temp[[x]][[i]][[ XML::xmlSize(temp[[x]][[i]])]])["stimulator"] == "ir_LD_850"){ temp.sequence.object.recordType <- "IRSL" } } ##loop 3rd level lapply(1:XML::xmlSize(temp[[x]][[i]]), function(j){ ##get values temp.sequence.object.curveValue <- temp[[x]][[i]][[j]] ##get curveType temp.sequence.object.curveType <- as.character( XML::xmlAttrs(temp[[x]][[i]][[j]])["curveType"]) ##get detector temp.sequence.object.detector <- as.character( XML::xmlAttrs(temp[[x]][[i]][[j]])["detector"]) ##get stimulator temp.sequence.object.stimulator <- as.character( XML::xmlAttrs(temp[[x]][[i]][[j]])["stimulator"]) ##get additional information temp.sequence.object.info <- as.list(XML::xmlAttrs(temp.sequence.object.curveValue)) ##add stimulator and detector and so on temp.sequence.object.info <- c(temp.sequence.object.info, position = as.integer(as.character(temp.sequence.header["position",])), name = as.character(temp.sequence.header["name",])) ## TL curve recalculation ============================================ if(recalculate.TL.curves){ ##TL curve heating values is stored in the 3rd curve of every set if(temp.sequence.object.recordType == "TL" && j == 1){ #grep values from PMT measurement or spectrometer if("Spectrometer" %in% temp.sequence.object.detector == FALSE){ temp.sequence.object.curveValue.PMT <- src_get_XSYG_curve_values(XML::xmlValue( temp[[x]][[i]][[j]])) ##round values (1 digit is the technical resolution of the heating element) temp.sequence.object.curveValue.PMT[,1] <- round( temp.sequence.object.curveValue.PMT[,1], digits = 1) #grep values from heating element temp.sequence.object.curveValue.heating.element <- src_get_XSYG_curve_values(XML::xmlValue( temp[[x]][[i]][[3]])) }else{ temp.sequence.object.curveValue.spectrum <- get_XSYG.spectrum.values( temp.sequence.object.curveValue) ##get time values which are stored in the row labels temp.sequence.object.curveValue.spectrum.time <- as.numeric( colnames(temp.sequence.object.curveValue.spectrum)) ##round values (1 digit is technical resolution of the heating element) temp.sequence.object.curveValue.spectrum.time <- round( temp.sequence.object.curveValue.spectrum.time, digits = 1) } #grep values from heating element temp.sequence.object.curveValue.heating.element <- src_get_XSYG_curve_values(XML::xmlValue( temp[[x]][[i]][[3]])) if("Spectrometer" %in% temp.sequence.object.detector == FALSE){ #reduce matrix values to values of the detection temp.sequence.object.curveValue.heating.element <- temp.sequence.object.curveValue.heating.element[ temp.sequence.object.curveValue.heating.element[,1] >= min(temp.sequence.object.curveValue.PMT[,1]) & temp.sequence.object.curveValue.heating.element[,1] <= max(temp.sequence.object.curveValue.PMT[,1]), ,drop = FALSE] }else{ #reduce matrix values to values of the detection temp.sequence.object.curveValue.heating.element <- temp.sequence.object.curveValue.heating.element[ temp.sequence.object.curveValue.heating.element[,1] >= min(temp.sequence.object.curveValue.spectrum.time) & temp.sequence.object.curveValue.heating.element[,1] <= max(temp.sequence.object.curveValue.spectrum.time),] } ## calculate corresponding heating rate, this makes only sense ## for linear heating, therefore is has to be the maximum value ##remove 0 values (not measured) and limit to peak heating.rate.values <- temp.sequence.object.curveValue.heating.element[ temp.sequence.object.curveValue.heating.element[,2] > 0 & temp.sequence.object.curveValue.heating.element[,2] <= max(temp.sequence.object.curveValue.heating.element[,2]),,drop = FALSE] heating.rate <- (heating.rate.values[length(heating.rate.values[,2]), 2] - heating.rate.values[1,2])/ (heating.rate.values[length(heating.rate.values[,1]), 1] - heating.rate.values[1,1]) ##round values heating.rate <- round(heating.rate, digits=1) ##add to info element temp.sequence.object.info <- c(temp.sequence.object.info, RATE = heating.rate) ##PERFORM RECALCULATION ##check which object contains more data if("Spectrometer" %in% temp.sequence.object.detector == FALSE){ ##CASE (1) if(nrow(temp.sequence.object.curveValue.PMT) > nrow(temp.sequence.object.curveValue.heating.element)){ temp.sequence.object.curveValue.heating.element.i <- approx( x = temp.sequence.object.curveValue.heating.element[,1], y = temp.sequence.object.curveValue.heating.element[,2], xout = temp.sequence.object.curveValue.PMT[,1], rule = 2) temperature.values <- temp.sequence.object.curveValue.heating.element.i$y count.values <- temp.sequence.object.curveValue.PMT[,2] ##CASE (2) }else if((nrow(temp.sequence.object.curveValue.PMT) < nrow(temp.sequence.object.curveValue.heating.element))){ temp.sequence.object.curveValue.PMT.i <- approx( x = temp.sequence.object.curveValue.PMT[,1], y = temp.sequence.object.curveValue.PMT[,2], xout = temp.sequence.object.curveValue.heating.element[,1], rule = 2) temperature.values <- temp.sequence.object.curveValue.heating.element[,2] count.values <- temp.sequence.object.curveValue.PMT.i$y ##CASE (3) }else{ temperature.values <- temp.sequence.object.curveValue.heating.element[,2] count.values <- temp.sequence.object.curveValue.PMT[,2] } ##combine as matrix temp.sequence.object.curveValue <- as.matrix(cbind( temperature.values, count.values)) ##set curve identifier temp.sequence.object.info$curveDescripter <- "Temperature [\u00B0C]; Counts [a.u.]" }else{ ##CASE (1) here different approach. in contrast to the PMT measurements, as ## usually the resolution should be much, much lower for such measurements ## Otherwise we would introduce some pseudo signals, as we have to ## take care of noise later one if(length(temp.sequence.object.curveValue.spectrum.time) != nrow(temp.sequence.object.curveValue.heating.element)){ temp.sequence.object.curveValue.heating.element.i <- approx( x = temp.sequence.object.curveValue.heating.element[,1], y = temp.sequence.object.curveValue.heating.element[,2], xout = temp.sequence.object.curveValue.spectrum.time, rule = 2, ties = mean) temperature.values <- temp.sequence.object.curveValue.heating.element.i$y ##check for duplicated values and if so, increase this values if(anyDuplicated(temperature.values)>0){ temperature.values[which(duplicated(temperature.values))] <- temperature.values[which(duplicated(temperature.values))]+1 warning("[read_XSYG2R()] Temperatures values are found to be duplicated and increased by 1 K") } ##CASE (2) (equal) }else{ temperature.values <- temp.sequence.object.curveValue.heating.element[,2] } ##reset values of the matrix colnames(temp.sequence.object.curveValue.spectrum) <- temperature.values temp.sequence.object.curveValue <- temp.sequence.object.curveValue.spectrum ##change curve descriptor temp.sequence.object.info$curveDescripter <- "Temperature [\u00B0C]; Wavelength [nm]; Counts [1/ch]" } }##endif }##endif recalculate.TL.curves == TRUE # Cleanup info objects ------------------------------------------------------------------------ if("curveType" %in% names(temp.sequence.object.info)) temp.sequence.object.info[["curveType"]] <- NULL # Set RLum.Data-objects ----------------------------------------------------------------------- if("Spectrometer" %in% temp.sequence.object.detector == FALSE){ if(is(temp.sequence.object.curveValue, "matrix") == FALSE){ temp.sequence.object.curveValue <- src_get_XSYG_curve_values(XML::xmlValue(temp.sequence.object.curveValue)) } set_RLum( class = "RLum.Data.Curve", originator = "read_XSYG2R", recordType = paste(temp.sequence.object.recordType, " (", temp.sequence.object.detector,")", sep = ""), curveType = temp.sequence.object.curveType, data = temp.sequence.object.curveValue, info = temp.sequence.object.info) }else if("Spectrometer" %in% temp.sequence.object.detector == TRUE) { if(is(temp.sequence.object.curveValue, "matrix") == FALSE){ temp.sequence.object.curveValue <- get_XSYG.spectrum.values(temp.sequence.object.curveValue) } set_RLum( class = "RLum.Data.Spectrum", originator = "read_XSYG2R", recordType = paste(temp.sequence.object.recordType, " (",temp.sequence.object.detector,")", sep = ""), curveType = temp.sequence.object.curveType, data = temp.sequence.object.curveValue, info = temp.sequence.object.info) } }) }else{ return(NULL) }##if-try condition }), use.names = FALSE) ##if the XSYG file is broken we get NULL as list element if (!is.null(temp.sequence.object)) { ##set RLum.Analysis object temp.sequence.object <- set_RLum( originator = "read_XSYG2R", class = "RLum.Analysis", records = temp.sequence.object, protocol = as.character(temp.sequence.header["protocol",1]) ) ##set parent uid of RLum.Anlaysis as parent ID of the records temp.sequence.object <- .set_pid(temp.sequence.object) ##update progress bar if (verbose && txtProgressBar) { setTxtProgressBar(pb, x) } ##merge output and return values if(fastForward){ return(temp.sequence.object) }else{ return(list(Sequence.Header = temp.sequence.header, Sequence.Object = temp.sequence.object)) } }else{ return(temp.sequence.object) } })##end loop for sequence list ##close ProgressBar if(verbose && txtProgressBar ){close(pb)} ##show output informatioj if(length(output[sapply(output, is.null)]) == 0){ if(verbose) paste("\t >>",XML::xmlSize(temp), " sequence(s) loaded successfully.\n") }else{ if(verbose){ paste("\t >>",XML::xmlSize(temp), " sequence(s) in file.", XML::xmlSize(temp)-length(output[sapply(output, is.null)]), "sequence(s) loaded successfully. \n") } warning(paste0(length(output[sapply(output, is.null)])), " incomplete sequence(s) removed.") } ##output invisible(output) }#end if ##get rid of the NULL elements (as stated before ... invalid files) return(output[!sapply(output,is.null)]) } Luminescence/R/read_SPE2R.R0000644000176200001440000003243414264017373015053 0ustar liggesusers#' @title Import Princeton Instruments (TM) SPE-file into R #' #' @description Function imports Princeton Instruments (TM) SPE-files into R environment and #' provides [RLum.Data.Image-class] objects as output. #' #' @details Function provides an R only import routine for the Princeton Instruments #' SPE format. Import functionality is based on the file format description provided by #' Princeton Instruments and a MatLab script written by Carl Hall (s. #' references). #' #' @param file [character] (**required**): #' SPE-file name (including path), e.g. #' - `[WIN]`: `read_SPE2R("C:/Desktop/test.spe")` #' - `[MAC/LINUX]`: `readSPER("/User/test/Desktop/test.spe")`. Additionally internet connections #' are supported. #' #' @param output.object [character] (*with default*): #' set `RLum` output object. Allowed types are `"RLum.Data.Spectrum"`, #' `"RLum.Data.Image"` or `"matrix"` #' #' @param frame.range [vector] (*optional*): #' limit frame range, e.g. select first 100 frames by `frame.range = c(1,100)` #' #' @param txtProgressBar [logical] (*with default*): #' enables or disables [txtProgressBar]. #' #' @param verbose [logical] (*with default*): enables or disables verbose mode #' #' @return #' Depending on the chosen option the functions returns three different #' type of objects: #' #' `output.object` #' #' `RLum.Data.Spectrum` #' #' An object of type [RLum.Data.Spectrum-class] is returned. Row #' sums are used to integrate all counts over one channel. #' #' `RLum.Data.Image` #' #' An object of type [RLum.Data.Image-class] is returned. Due to #' performance reasons the import is aborted for files containing more than 100 #' frames. This limitation can be overwritten manually by using the argument #' `frame.range`. #' #' `matrix` #' #' Returns a matrix of the form: Rows = Channels, columns = Frames. For the #' transformation the function [get_RLum] is used, #' meaning that the same results can be obtained by using the function #' [get_RLum] on an `RLum.Data.Spectrum` or `RLum.Data.Image` object. #' #' @note #' **The function does not test whether the input data are spectra or pictures for spatial resolved analysis!** #' #' The function has been successfully tested for SPE format versions 2.x. #' #' *Currently not all information provided by the SPE format are supported.* #' #' @section Function version: 0.1.4 #' #' @author #' Sebastian Kreutzer, Institute of Geography, Heidelberg University (Germany) #' #' @seealso [readBin], [RLum.Data.Spectrum-class] #' #' @references #' Princeton Instruments, 2014. Princeton Instruments SPE 3.0 File #' Format Specification, Version 1.A (for document URL please use an internet search machine) #' #' Hall, C., 2012: readSPE.m. #' `https://www.mathworks.com/matlabcentral/fileexchange/35940-readspe` #' #' @keywords IO #' #' @examples #' #' ## to run examples uncomment lines and run the code #' #' ##(1) Import data as RLum.Data.Spectrum object #' #file <- file.choose() #' #temp <- read_SPE2R(file) #' #temp #' #' ##(2) Import data as RLum.Data.Image object #' #file <- file.choose() #' #temp <- read_SPE2R(file, output.object = "RLum.Data.Image") #' #temp #' #' ##(3) Import data as matrix object #' #file <- file.choose() #' #temp <- read_SPE2R(file, output.object = "matrix") #' #temp #' #' ##(4) Export raw data to csv, if temp is a RLum.Data.Spectrum object #' # write.table(x = get_RLum(temp), #' # file = "[your path and filename]", #' # sep = ";", row.names = FALSE) #' #' #' @md #' @export read_SPE2R <- function( file, output.object = "RLum.Data.Image", frame.range, txtProgressBar = TRUE, verbose = TRUE ){ # Consistency check ------------------------------------------------------- ##check if file exists if(!file.exists(file)){ ##check if the file as an URL ... you never know if(grepl(pattern = "http", x = file, fixed = TRUE)){ if(verbose){ cat("[read_SPE2R()] URL detected, checking connection ... ") } ##check URL if(!httr::http_error(file)){ if(verbose) cat("OK") ##download file file_link <- tempfile("read_SPE2R_FILE", fileext = ".SPE") download.file(file, destfile = file_link, quiet = if(verbose){FALSE}else{TRUE}, mode = "wb") file <- file_link }else{ cat("FAILED") file <- NULL try(stop("[read_SPE2R()] File does not exist! Return NULL!", call. = FALSE)) return(NULL) } }else{ file <- NULL try(stop("[read_SPE2R()] File does not exist! Return NULL!", call. = FALSE)) return(NULL) } } ##check file extension if(!grepl(basename(file), pattern = "SPE$", ignore.case = TRUE)){ if(strsplit(file, split = "\\.")[[1]][2] != "SPE"){ temp.text <- paste("[read_SPE2R()] Unsupported file format: *.", strsplit(file, split = "\\.")[[1]][2], sep = "") stop(temp.text, call. = FALSE) }} # Open Connection --------------------------------------------------------- #open connection con <- file(file, "rb") # read header ------------------------------------------------------------- temp <- readBin(con, what="int", 2, size=2, endian="little", signed = TRUE) ControllerVersion <- temp[1] #Hardware version LogicOutput <- temp[2] #Definition of Output BNC temp <- readBin(con, what="int", 2, size=2, endian="little", signed = FALSE) AmpHiCapLowNoise <- temp[1] #Amp Switching Mode xDimDet <- temp[2] #Detector x dimension of chip. #timing mode mode <- readBin(con, what="int", 1, size=2, endian="little", signed = TRUE) #alternative exposure, in sec. exp_sec <- readBin(con, what="double", 1, size=4, endian="little") temp <- readBin(con, what="int", 2, size=2, endian="little", signed = TRUE) VChipXdim <- temp[1] # Virtual Chip X dim VChipYdim <- temp[2] # Virtual Chip Y dim #y dimension of CCD or detector. yDimDet <- readBin(con, what="int", 1, size=2, endian="little", signed = TRUE) #Date Date <- suppressWarnings(readChar(con, 10, useBytes=TRUE)) ##jump stepping <- readBin(con, what="raw", 4, size=1, endian="little", signed = TRUE) #Old number of scans - should always be -1 noscan <- readBin(con, what="int", 1, size=2, endian="little", signed = TRUE) #Detector Temperature Set DetTemperature <- readBin(con, what="double", 1, size=4, endian="little") # CCD/DiodeArray type DetType <- readBin(con, what="int", 1, size=2, endian="little", signed = TRUE) #actual # of pixels on x axis xdim <- readBin(con, what="int", 1, size=2, endian="little", signed = FALSE) ##jump stepping <- readBin(con, what="raw", 64, size=1, endian="little", signed = TRUE) ##experiment data type ##0 = 32f (4 bytes) ##1 = 32s (4 bytes) ##3 = 16u (2 bytes) ##8 = 32u (4 bytes) datatype <- readBin(con, what="int", 1, size=2, endian="little", signed = TRUE) ##jump stepping <- readBin(con, what="raw", 546, size=1, endian="little") #y dimension of raw data. ydim <- readBin(con, what="int", 1, size=2, endian="little", signed = FALSE) ##0=scrambled,1=unscrambled scramble <- readBin(con, what="int", 1, size=2, endian="little", signed = FALSE) ##jump stepping <- readBin(con, what="raw", 4, size=1, endian="little") #Number of scans (Early WinX) lnoscan <- readBin(con, what="int", 1, size=4, endian="little", signed = TRUE) #Number of Accumulations lavgexp <- readBin(con, what="int", 1, size=4, endian="little", signed = TRUE) ##Experiment readout time ReadoutTime <- readBin(con, what="double", 1, size=4, endian="little") #T/F Triggered Timing Option TriggeredModeFlag <- readBin(con, what="int", 1, size=2, endian="little", signed = TRUE) ##jump stepping <- readBin(con, what="raw", 768, size=1, endian="little") ##number of frames in file. NumFrames <- readBin(con, what="int", 1, size=4, endian="little", signed = TRUE) if(NumFrames > 100 & missing(frame.range) & output.object == "RLum.Data.Image"){ error.message <- paste0("[read_SPE2R()] Import aborted. This file containes > 100 (", NumFrames, "). Use argument 'frame.range' to force import.") stop(error.message) } ##set frame.range if(missing(frame.range) == TRUE){frame.range <- c(1,NumFrames)} ##jump stepping <- readBin(con, what="raw", 542, size=1, endian="little") #file_header_ver file_header_ver <- readBin(con, what="double", 1, size=4, endian="little") ##jump stepping <- readBin(con, what="raw", 1000, size=1, endian="little") ##WinView_id - set to 19,088,743 (or 1234567 hex) (required for legacy reasons) WinView_id <- readBin(con, what="integer", 1, size=4, endian="little", signed = TRUE) ##jump stepping <- readBin(con, what="raw", 1098, size=1, endian="little") ##lastvalue - set to 21,845 (or 5555 hex) (required for legacy reasons) lastvalue <- readBin(con, what="integer", 1, size=2, endian="little", signed = TRUE) ##end header ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ##create info element list from data temp.info <- list(ControllerVersion, LogicOutput, AmpHiCapLowNoise, xDimDet, yDimDet, xdim, ydim, VChipXdim, VChipYdim, Date, noscan, mode, exp_sec, DetTemperature, DetType, datatype, scramble, lnoscan, lavgexp, ReadoutTime, TriggeredModeFlag, NumFrames, file_header_ver) ##set name for list elements names(temp.info) <- c("ControllerVersion", "LogicOutput", "AmpHiCapLowNoise", "xDimDet", "yDimDet", "xdim", "ydim", "VChipXdim", "VChipYdim", "Date", "noscan", "mode", "exp_sec", "DetTemperature", "DetType", "datatype", "scramble", "lnoscan", "lavgexp", "ReadoutTime", "TriggeredModeFlag", "NumFrames", "file_header_ver") # read count value data --------------------------------------------------- ##set functions if(datatype == 0){ read.data <- function(n.counts){ readBin(con, what="double", n.counts, size=4, endian="little") } }else if(datatype == 1){ read.data <- function(n.counts){ readBin(con, what="integer", n.counts, size=4, endian="little", signed = TRUE) } }else if(datatype == 2){ read.data <- function(n.counts){ readBin(con, what="integer", n.counts, size=2, endian="little", signed = TRUE) } }else if(datatype == 3){ read.data <- function(n.counts){ readBin(con, what="int", n.counts, size=2, endian="little", signed = FALSE) } }else if(datatype == 8){ read.data <- function(n.counts){ readBin(con, what="integer", n.counts, size=4, endian="little", signed = FALSE) } }else{ stop("[read_SPE2R()] Unknown 'datatype'.") } ##loop over all frames ##output cat(paste("\n[read_SPE2R.R]\n\t >> ",file,sep=""), fill=TRUE) ##set progressbar if(txtProgressBar & verbose){ pb<-txtProgressBar(min=0,max=diff(frame.range)+1, char="=", style=3) } ##stepping for frame range temp <- readBin(con, what = "raw", (min(frame.range)-1)*2, size = 1, endian = "little") for(i in 1:(diff(frame.range)+1)){#NumFrames temp.data <- matrix(read.data(n.counts = (xdim * ydim)), ncol = ydim, nrow = xdim) if(exists("data.list") == FALSE){ data.list <- list(temp.data) }else{ data.list <- c(data.list, list(temp.data)) } ##update progress bar if(txtProgressBar & verbose){ setTxtProgressBar(pb, i) } } ##close if(txtProgressBar & verbose){close(pb) ##output cat(paste("\t >> ",i," records have been read successfully!\n\n", sep="")) } # Output ------------------------------------------------------------------ if(output.object == "RLum.Data.Spectrum" | output.object == "matrix"){ ##to create a spectrum object the matrix has to transposed and ##the row sums are needed data.spectrum.vector <- sapply(1:length(data.list), function(x){ rowSums(data.list[[x]]) }) ##split vector to matrix data.spectrum.matrix <- matrix(data.spectrum.vector, nrow = xdim, ncol = length(data.list)) ##set column and row names colnames(data.spectrum.matrix) <- as.character(1:ncol(data.spectrum.matrix)) rownames(data.spectrum.matrix) <- as.character(1:nrow(data.spectrum.matrix)) ##set output object object <- set_RLum( class = "RLum.Data.Spectrum", originator = "read_SPE2R", recordType = "Spectrum", curveType = "measured", data = data.spectrum.matrix, info = temp.info) ##optional matrix object if(output.object == "matrix"){ object <- get_RLum(object)} }else if(output.object == "RLum.Data.Image"){ object <- as(data.list, "RLum.Data.Image") object@originator <- "read_SPE2R" object@recordType = "Image" object@curveType <- "measured" object@info <- temp.info }else{ stop("[read_SPE2R()] Chosen 'output.object' not supported. Please check manual!") } ##close con close(con) ##return values return(object) } Luminescence/NEWS.md0000644000176200001440000000641714521210045013764 0ustar liggesusers # Changes in version 0.9.23 (2023-11-03) **This package version requires R \>= 4.2** ## Bugfixes and changes ### `analyse_portableOSL()` (**potentially breaking changes**) The function received a major update, including an updated output object. Please make sure that you read the changes before updating, because it may break your existing code! - New argument `mode` added with two options `'profile'` (the default) and `'surface'`. The latter is intended for 2D surface interpolation of luminescence signals and was newly added. - New argument `coord` added, to allow the user to provide a list or matrix of xy-coordinates of the sampling position. If nothing is provided, the coordinates are extracted from the file name or an index is calculated. - The function now always translates input order in x and y coordinates, to have the treatment consistent. - In profile mode, the function now returns also the dark count values with their standard deviation. The dark counts can also be plotted in the surface mode, although it probably does not make much sense. - `invert = TRUE` the y-axis is now also inverted and the inversion works more consistently - `...` argument `grid` (enabled by default) for better parameter reading - `...` argument `sample` was changed to `run` better complying with the terminology of the PSL format, where - `...` argument `bg_img` was added to provide a background image (e.g., a profile photo) for overlay the sample name is called run. - `...` supports a lot more arguments to enable better plot modifications in particular for the newly added surface interpolation mode. ### `calc_Huntley2006()` - The function can now handle `"extrapolation"` for `fit.method = "GOK"` or `fit.method = "EXP"` and is therefore suited to additive measurement protocols. This implementation has beta character and needs further testing. ### `read_PSL()` - Remove unwanted characters less aggressively from sample name; with this new setting, coordinates can be passed. ### `plot_GrowthCurve()` - The function crashed for the setting `plot_GrowthCurve(..., mode = "alternate", fit.method = "LambertW")`; fixed (#114, @Eiskeil, thanks for reporting). - Now throws a warning in `mode = "extrapolation", fit.method = "LambertW"` of the standard root estimation failed and hence the results can become inconclusive. - `fit.method = "GOK"` now supports fits that are not necessarily forced through the origin. - If the calculated fit weights were invalid, the parameter was not set always correctly set; fixed. - Non-user visible change: the fit formula is now extracted more consistently and less error-prone. - If the first `Lx/Tx` value was very high, the x-axis origin in the plot was not always shown by default. This was intended behaviour, however, sometimes this causes confusion. Now the axis origin is always shown. Still the axis limits can still be modified using `ylim` and `xlim`. (Thanks to Salome Oehler for flagging this issue). - The fitted curve now always extends to the plot margins. ## Internals - Adding new dependency to `'interp'` - Remove dependency to orphaned package `'plotrix'`; the code in one affected function was replaced using base R code Luminescence/MD50000644000176200001440000006644214521216670013214 0ustar liggesusers0bda1f944d6986fe835da376a93aca89 *DESCRIPTION 2c1e4c7cfe8e39a957c95d9024aff24b *NAMESPACE 45b36c434e025f482f09295f5e9878c2 *NEWS.md 01f2af27ec92890c854143953bd3d9c0 *R/Analyse_SAR.OSLdata.R e70a1b32bee7860db7e70dd00de6af12 *R/CW2pHMi.R 10753627b17691f72325cac440db6a3e *R/CW2pLM.R 9fbbdaa24b01a5c8d425a5c2f9d7a583 *R/CW2pLMi.R c106bb1c6eb668d870e70a47b6b7f3ec *R/CW2pPMi.R b85758696da84860cc8eed076f3deaf2 *R/Luminescence-package.R 39fe8c1c69bbac4d563f5540bea32c3f *R/PSL2Risoe.BINfileData.R 4559aaf510748f79ee696abdd0fe9a4d *R/RLum-class.R 4a6ae6726b6623c1fc41f9f4ba8a6eee *R/RLum.Analysis-class.R 0253be2cb3e82ad16da4a22135fd9354 *R/RLum.Data-class.R 0a7401df8cb3e970783a84357a4e6cdc *R/RLum.Data.Curve-class.R 584ed9e4b349e55b24c76c9440d500e3 *R/RLum.Data.Image-class.R f98295d2147bdc528e6f9cc56fc46061 *R/RLum.Data.Spectrum-class.R 591de4ef52a55ca22f4fc84a735abd43 *R/RLum.Results-class.R 047363cf43452f576e13706b73027b87 *R/RcppExports.R c236a9f10a558120b59c42f4894e7d13 *R/Risoe.BINfileData-class.R 058e267d78125f2ace33ab6903c678c4 *R/Risoe.BINfileData2RLum.Analysis.R 3505c82586d16f111620d9272d37f2a2 *R/Risoe.BINfileData2RLum.Data.Curve.R 7c645d467cfa78253065354b1eaa780f *R/Second2Gray.R 07dfccf6809b9f380bb02dbf83e15852 *R/addins_RLum.R 78546bc9e94d8d50903d7ddb5d2e98fd *R/analyse_Al2O3C_CrossTalk.R 4d14e904ed23887edf11e9472e673033 *R/analyse_Al2O3C_ITC.R 81f7a777e228a99f712f69a5ed290f83 *R/analyse_Al2O3C_Measurement.R 673a409740fa3517953ed5c39bccbd79 *R/analyse_FadingMeasurement.R a97a4abda6c21a42b9b7a01d866457a6 *R/analyse_IRSAR.RF.R f85608aa2f9e0cca4e801e779d271ea1 *R/analyse_SAR.CWOSL.R 4baba9fc819415026b86f1dd11537829 *R/analyse_SAR.TL.R aa5c4bd857433f42eee404a1e31dbedf *R/analyse_baSAR.R 3a1da133d2274227a78a335f9eb49e8a *R/analyse_pIRIRSequence.R 0b9202929ee5436313ade534a12bcb17 *R/analyse_portableOSL.R fdbb754091965c3a43db94ddff478ba7 *R/apply_CosmicRayRemoval.R 3969c17aaf9c262253f9caf255d5ac56 *R/apply_EfficiencyCorrection.R ade367d26825ff488af78d022eed408f *R/bin_RLum.Data.R 84d9d48cc12b70f1535e354faa0f6564 *R/calc_AliquotSize.R 0b01aee78b9071c882d1a048a0a8bc4b *R/calc_AverageDose.R 37fe1636fe5eca657888558556d7eff9 *R/calc_CentralDose.R d82b545b8d8676b7eda7370dce9eaa95 *R/calc_CobbleDoseRate.R 2963d9a6593cc3c3a0cbddedb18e55b7 *R/calc_CommonDose.R cd7e76136d246ec027393c445ef4800f *R/calc_CosmicDoseRate.R 7affd9a814b2554b8cdcfdd82ebc5b64 *R/calc_FadingCorr.R 081c8ad483baea23bfc0e1556c913fae *R/calc_FastRatio.R a8e04dcdff779899bd5813ad5860a0c2 *R/calc_FiniteMixture.R 0c6ace267acc02610c02d55452b47958 *R/calc_FuchsLang2001.R a37b114c2bf86627b6ac039a84a0312a *R/calc_HomogeneityTest.R 43b258b65b55ba18fa8040b6bece8961 *R/calc_Huntley2006.R f3a485c04b15e8a24d7d6ba3ecc9e656 *R/calc_IEU.R e961e05a49e053cf2a06203ff301ab67 *R/calc_Kars2008.R df9e549f6cdb6db67e24c78a17e4d6ed *R/calc_Lamothe2003.R c47a6bad8a6ca7aef9ff9857dd28732c *R/calc_MaxDose.R 9b2d216cfb1cb457ecabfcadb7317cf7 *R/calc_MinDose.R cc42d24aa5d6d3f24c78393af4234a76 *R/calc_OSLLxTxDecomposed.R da2d161e039704d7a9ab2b7546954375 *R/calc_OSLLxTxRatio.R a9954824870ae9f970b10b5b3288f30d *R/calc_SourceDoseRate.R ca8d9a3466b2df583204982b3f1de530 *R/calc_Statistics.R 98ac3232593e002353579e5c34cb14d4 *R/calc_TLLxTxRatio.R 02ce86652486207b75545ac27a3cca6d *R/calc_ThermalLifetime.R c871baec9db4f8a7d52027f0b5d38be3 *R/calc_WodaFuchs2008.R e2ed5157f4e688e7249d1f8af688bf4d *R/calc_gSGC.R c81898d4a89827f0b493412bb2540cf1 *R/calc_gSGC_feldspar.R a58de2b4fd3395b6eda9e9eebe1afd15 *R/combine_De_Dr.R 3cbf939356b3cb92acbb2e5ea1792817 *R/convert_Activity2Concentration.R ff55493c20128f878051d59d426162fe *R/convert_BIN2CSV.R ed218703fa5c97e05d9d2239c9754df5 *R/convert_Concentration2DoseRate.R 869f4e1aa98186162615436bab7856e8 *R/convert_Daybreak2CSV.R ba1588daabb293042f2e5231bd7ca46e *R/convert_PSL2CSV.R 31f5a5462cee57b09cb7e4028d92e83d *R/convert_RLum2Risoe.BINfileData.R 2fdaf5c6e52c142ef7813e1585efdd56 *R/convert_SG2MG.R fd55ce23d8b74554b8f94cd10bad2be1 *R/convert_Wavelength2Energy.R 82041455f4b494deebdb3258711ff463 *R/convert_XSYG2CSV.R 53c96ea2c0e73ecf511a2ad8810498d7 *R/extract_IrradiationTimes.R f56b1812d959bdea353577bd9e16da7c *R/extract_ROI.R 7dc5e56c8ab9289763e32c30c1ab88d7 *R/fit_CWCurve.R b70e33f95dea855f2e0bffcc8ecae747 *R/fit_EmissionSpectra.R eb4b443c3ea0edd5d15e93085c9f21cf *R/fit_LMCurve.R 90ddfb8f9aec33b51e3d0f3d23c9e7a8 *R/fit_OSLLifeTimes.R e8bdd54836e2f2f9f7a5f1660ce65e2d *R/fit_SurfaceExposure.R 84259e1d1870e70f192aafbe302c67aa *R/fit_ThermalQuenching.R a4cb2d1c8b454e0009a92b7b0c98d818 *R/get_Layout.R 00e678330130fdf96af1054fbd155575 *R/get_Quote.R 00f23d4563139496903fb3ce986ecde1 *R/get_RLum.R b17ff3b027a93af908ba532c267fb11b *R/get_Risoe.BINfileData.R fd947c56470a6462fc0bcbf49b84daca *R/get_rightAnswer.R 3672f4b45b3d804dd3a431c73410b09a *R/github.R 25c9ef15c500b88f1fdd2814400c069a *R/install_DevelopmentVersion.R bcec18025b2b68683ca37136fcbeaf66 *R/internal_as.latex.table.R cec4ad4254b8257c9748f558066243de *R/internals_RLum.R 89a10d3cdc099fd20fd9f1b0e0c44295 *R/length_RLum.R d120660d15a50bb390db15b5f8f122d5 *R/merge_RLum.Analysis.R 9a29cd6bd8c82452e6f9c9b2c1bcee80 *R/merge_RLum.Data.Curve.R 9dcc7423c904aeeb2cd773113d461d6d *R/merge_RLum.R c04ef7a5432af56b2bca2b1d05fdb452 *R/merge_RLum.Results.R 9a1e60b4409233c7985e810d6d8de1d2 *R/merge_Risoe.BINfileData.R 918618008c45ec5d0f05427e75b249eb *R/methods_DRAC.R e327d3183029ce5a166a8b03e5e6737d *R/methods_RLum.R 86750fcc1d770cd881fcb8579ca3ae87 *R/names_RLum.R 64135db24b7a0770bc8943d1509deab7 *R/plot_AbanicoPlot.R d9e94c1a9731fd951cdd7c6f36eb09ff *R/plot_DRCSummary.R affe69b35b42a0b1edf9b4bb86a561b3 *R/plot_DRTResults.R 11ace37d02529e9dacb0c8fd7564c1f8 *R/plot_DetPlot.R aa2c259427559f6451a22bac6bf63c81 *R/plot_FilterCombinations.R 602296176b6e63396e412ba17205c7ac *R/plot_GrowthCurve.R 26820c6c62d2bbdc371564fc2abe222a *R/plot_Histogram.R 28a829facaf3226a269ba3e8889a468b *R/plot_KDE.R a7dd46bcc5a1866d6bdf968039da3997 *R/plot_NRt.R 37572f7036402310f5150a6c7e86834b *R/plot_OSLAgeSummary.R f4b58aba06acbe4cec862748ace8ae47 *R/plot_RLum.Analysis.R 500a61aa6e8eee46d8520ee66754c27c *R/plot_RLum.Data.Curve.R 76df36e0b1da641bb3eae0049c1baa06 *R/plot_RLum.Data.Image.R 6174fbbd4fa3a828e15bc77bb1a1b646 *R/plot_RLum.Data.Spectrum.R 8c86da3aaf87cbd10cc0954a8d84a164 *R/plot_RLum.R 2b3ca8f3c8b4b5195969e6118fa66841 *R/plot_RLum.Results.R ecaa4ad69be9c85662b02de17a18bc9e *R/plot_ROI.R f1a12c183ce08d4e6ebc9a51018021dc *R/plot_RadialPlot.R 06c59c28dbb1b4130839afbf4275cc67 *R/plot_Risoe.BINfileData.R 903989652051b59c46cd0b4fe66a2442 *R/plot_ViolinPlot.R 2b08cab7302f718c9896b0b45c21c8d4 *R/read_BIN2R.R 60ca132b1945cbb913f7e0e47bee6d8e *R/read_Daybreak2R.R 9c0e18ac2e8b5570d98f75b0abe91a95 *R/read_PSL2R.R 7ba997d8030d37f59b6f53c584d328db *R/read_RF2R.R 6c1e3086f7a6e76cc08fcae4056d5080 *R/read_SPE2R.R 42c69e54ac8b32b64e9d493bf2463c5e *R/read_TIFF2R.R c4d739264be19119c718e6a030570694 *R/read_XSYG2R.R b5c8e27781978b0d1695282d9963c7bd *R/replicate_RLum.R e1935099fbe064c1bef3b2db75f8446c *R/report_RLum.R 1c4c6f18260d76f0c905f4312cae49b0 *R/scale_GammaDose.R ed6c3a4252738b68a2faf2a79b516633 *R/set_RLum.R 62ed577a616bb170c21c23b781d65e0d *R/set_Risoe.BINfileData.R b1d2e13613711adc817648e45554799f *R/smooth_RLum.R 8ea983e4752431c4e6499bcb1c4abaca *R/structure_RLum.R e036c23092797e1ea56b0069b62f164b *R/subset_SingleGrainData.R 74f27edbda833d80e114924730e04af4 *R/template_DRAC.R a6ddbbf00eaa64f25c6eb11e94644141 *R/tune_Data.R 44f0687d68b447fbb700da592ff98d54 *R/use_DRAC.R f0c4d24dafd7b92924d2e3e4276cf500 *R/utils_DRAC.R 63821a3fdcd8b7685a99fc7071adb821 *R/verify_SingleGrainData.R 254a06cc5758dedfb6a747552fb35cb3 *R/write_R2BIN.R ab60a8d254d57d9278894ae2d7344cea *R/write_R2TIFF.R 8baf2cbbfc63538e9140ea9da5d4126a *R/write_RLum2CSV.R fbe27ef6dee65cc5d2ae64f6ba8281ad *R/zzz.R 8c0e61f0ca4bb96d54620dc6acc59b0a *README.md f61db891eb928123aba4412ca322f508 *build/partial.rdb 498606022ccc0d937402065df0afb4b3 *build/vignette.rds ab9a2ca554dace598a6cf515647269fb *data/BaseDataSet.ConversionFactors.rda 3fd02aa03b62ed93d291df5585160c62 *data/BaseDataSet.CosmicDoseRate.rda 902b0e4d39fa28d1d35a17de092285cb *data/BaseDataSet.FractionalGammaDose.rda c97eec2563d8046daa99f4fc973d7f88 *data/BaseDataSet.GrainSizeAttenuation.rda 300a9f7f2b667aa572b23ffba3b5281f *data/ExampleData.Al2O3C.rda 9c8f76e70d6a022c14f9b9937a6e48b2 *data/ExampleData.BINfileData.rda 98b340cd13dfc0c89873bc8c53ec092c *data/ExampleData.CW_OSL_Curve.rda a6f4804e5b4677181635c7e396197f8e *data/ExampleData.CobbleData.rda 420acab318984756ac02db5396ca91b1 *data/ExampleData.DeValues.rda c3e0676c122950754bcaf1c9ceb1fb11 *data/ExampleData.Fading.rda cc538618675734cda21cd2f4aec30c35 *data/ExampleData.FittingLM.rda a194b7273caa95df091f6a0b32b1a0be *data/ExampleData.LxTxData.rda 0fb9b7fc02638049534e67c103251193 *data/ExampleData.LxTxOSLData.rda af5f6dfb15646a13a093355407077247 *data/ExampleData.RLum.Analysis.rda 5b1ec3fefbdaf4d50d18af61481021a9 *data/ExampleData.RLum.Data.Image.rda 90a303d00d0678d2dc2419209cfd534a *data/ExampleData.ScaleGammaDose.rda 00927dacb7eb977ed78471bd00f6095b *data/ExampleData.SurfaceExposure.rda 6bdc582e91b77789c0927656181a5415 *data/ExampleData.TR_OSL.rda 2efbd548fbf40f3051896c67744f3968 *data/ExampleData.XSYG.rda 46cb02411ce47f89f50800707f91e90c *data/ExampleData.portableOSL.rda 6f72caf56b9406a9553b131bc4154841 *data/datalist e6f00d26be7bfd63cd3f4da0f6bb5fc3 *inst/CITATION 285d980b7e36eb53d03ff61303b697fe *inst/WORDLIST 07ad61ad16dd1fd1e110767e4b05bbb2 *inst/doc/S4classObjects.pdf 6beca119f1590a3626e80cab485f8002 *inst/doc/S4classObjects.pdf.asis e416f3c9418a6023d66971c8e28e797f *inst/extdata/BINfile_V8.binx 5c342b7cba17fe33015293da421bc228 *inst/extdata/Daybreak_TestFile.DAT 251753a30fe82779d40ad3f17ba448be *inst/extdata/Daybreak_TestFile.txt fb458a7047c03e4ebae19b8aefa610f0 *inst/extdata/DorNie_0016.psl d70d1998a51bfeb77e3ba3f54c474418 *inst/extdata/QNL84_2_bleached.txt a06f36f48c83f10917989c829a8aafe9 *inst/extdata/QNL84_2_unbleached.txt 33e0c898019686514224b81ba65c770b *inst/extdata/RF_file.rf 4473f6a1f4375066efe52ff7824966d1 *inst/extdata/STRB87_1_bleached.txt 17e386e1832e46b61e912f7c9535e8d0 *inst/extdata/STRB87_1_unbleached.txt 0d420de38d9b112b53f8d18c1ae07195 *inst/extdata/TIFFfile.tif 189b1a6916c8026334dd49b372468e61 *inst/extdata/XSYG_file.xsyg b286b56ca5e51dd11b14da750452f2c7 *inst/rstudio/addins.dcf 07ff05c1af532fad77d0d897144829c2 *man/Analyse_SAR.OSLdata.Rd c163b35bfdf74c78ea57863928b206ac *man/BaseDataSet.ConversionFactors.Rd ed1bf0e597b74a0363daffa8ca89e64f *man/BaseDataSet.CosmicDoseRate.Rd a1cbc9fee3a15aac887dd6728d920656 *man/BaseDataSet.FractionalGammaDose.Rd f94286d316e36796c60e939bd90590b6 *man/BaseDataSet.GrainSizeAttenuation.Rd d823a05fcfa225985a7885800f003a57 *man/CW2pHMi.Rd 4232509ed0cb09308810a194d7b78211 *man/CW2pLM.Rd a39b16b673af276aa62d96663b83a7cf *man/CW2pLMi.Rd 2afb45572600024bb897b3380be49239 *man/CW2pPMi.Rd 989059c8142e22ef3fee9c0471e8ae1c *man/ExampleData.Al2O3C.Rd ba50e9021b2992c439378d84ed8aa60c *man/ExampleData.BINfileData.Rd db0cfa450c76a39f60f96971f231d0b7 *man/ExampleData.CW_OSL_Curve.Rd c08aecae2b8de5e5610cc9733e9b727d *man/ExampleData.CobbleData.Rd 5ee4f0a50c2515340ff6eec49bc1e47b *man/ExampleData.DeValues.Rd f79bffc6f8e9bfcf66da4277389760eb *man/ExampleData.Fading.Rd 750c4fef60118e09ba21ca0a0117f5bc *man/ExampleData.FittingLM.Rd 11fdbc4b8da1f05411a8afadf59fdaff *man/ExampleData.LxTxData.Rd 5624d6db648daa91ea8aebcb4efebcc9 *man/ExampleData.LxTxOSLData.Rd 8fd18a46968903bfacfdc964ba698940 *man/ExampleData.RLum.Analysis.Rd f9bcae9e5f42af56b8289df29d8b1d58 *man/ExampleData.RLum.Data.Image.Rd 5a4bae3f44aea6d8dc016c792b19f7b7 *man/ExampleData.ScaleGammaDose.Rd f0998331cec4912bbbe6db32e8207d2b *man/ExampleData.SurfaceExposure.Rd b3b4409491b7406dd54edf053c84493f *man/ExampleData.TR_OSL.Rd b9310786a156526441ff3954b5e64430 *man/ExampleData.XSYG.Rd 8f2182a4144aba13187cb14cffdb094a *man/ExampleData.portableOSL.Rd 1b690254adf26b7cde057a08053df880 *man/GitHub-API.Rd fa1ef5b19a616ce0097628923f7eb694 *man/Luminescence-package.Rd 032546788a3198afeb4cc5cb0805b9aa *man/PSL2Risoe.BINfileData.Rd 27fbab719732083e945c970ded346a1e *man/RLum-class.Rd 6619ef1ae2cbbae33db47f5262f7955d *man/RLum.Analysis-class.Rd 5a87f2c157c7893bd6d870dbbc3018d7 *man/RLum.Data-class.Rd 99507de6771c5d51d1b2e8475f5f891c *man/RLum.Data.Curve-class.Rd 93ed2baaa669dc8ba26036ba2f942488 *man/RLum.Data.Image-class.Rd 9d7aca3d1b63926ad6ed28eb1ec48dc6 *man/RLum.Data.Spectrum-class.Rd 7846b642df12380ae76c424753a48f59 *man/RLum.Results-class.Rd 19064746216c48b38b85715fb33d5f7b *man/Risoe.BINfileData-class.Rd 9f9cf0d93ad28b85aba73ea2a22b0268 *man/Risoe.BINfileData2RLum.Analysis.Rd 146e209e07d48f693d41d117c1dd3692 *man/Second2Gray.Rd 39480e2b78637e6557e7bc4b1a33b824 *man/analyse_Al2O3C_CrossTalk.Rd 37f0e7a2f6ddaf38cfea58c8baa33f81 *man/analyse_Al2O3C_ITC.Rd 3b27e7c7cd9768b61f9bf32562c291ef *man/analyse_Al2O3C_Measurement.Rd 31d98a43d9fceb857b49e141e82a1393 *man/analyse_FadingMeasurement.Rd f1ca917776673158d0ced71bc8745877 *man/analyse_IRSAR.RF.Rd 5a201b1bb0b7e79d7ce2d90901851ccd *man/analyse_SAR.CWOSL.Rd 25293d0aecc4a6941bc01fea607d873d *man/analyse_SAR.TL.Rd 503858f94b1e7080fdc3ced17f6f375d *man/analyse_baSAR.Rd dc46e78bc7fcfd81264e3d0c7cb361c6 *man/analyse_pIRIRSequence.Rd c010ad2efa64e5c55d0e8d1c3adf57ab *man/analyse_portableOSL.Rd a6f9768da648f2b0ea4e476d8083e8bf *man/apply_CosmicRayRemoval.Rd 5f8e6e08f46bbaf8c8c85a49af930257 *man/apply_EfficiencyCorrection.Rd 89a3a781638762a630d400143683c081 *man/as.Rd 63e7313144a49700f10b056c32d4b50f *man/bin_RLum.Data.Rd 076c6246cf2d5b52d0ab44c5a2f58285 *man/calc_AliquotSize.Rd 253c38ad3d7149a74c9a425f4d323c81 *man/calc_AverageDose.Rd 24dac612bc0a3d709cbeecdb648eea2d *man/calc_CentralDose.Rd 66c73aeadc2368a5fd27c27a3e795289 *man/calc_CobbleDoseRate.Rd a413a381fcdea47e5390533f5ecd0002 *man/calc_CommonDose.Rd f002dff3330f444c75cf66cab69fa682 *man/calc_CosmicDoseRate.Rd 9f6925da7394b919d3f5753eedd67efa *man/calc_FadingCorr.Rd 1b5a18fb78c02d41d53fc17fcd687b22 *man/calc_FastRatio.Rd bde51937f930fa1bfd477258190e25a0 *man/calc_FiniteMixture.Rd e17c0fca046c3d6faf2a2264f50043ec *man/calc_FuchsLang2001.Rd 7a3eaff91a1dd5948120b5a261634be3 *man/calc_HomogeneityTest.Rd bf3138f81746630e179e7204e107901d *man/calc_Huntley2006.Rd 9f61a5ff83d1ec34afd6de885a38e3ed *man/calc_IEU.Rd ede2ad2e5b6e219310afa36fdc4a7c54 *man/calc_Kars2008.Rd e0e17d946fd073bad97a9c4dbe1d18a4 *man/calc_Lamothe2003.Rd 39de03b119188e46b22271aa3ae18347 *man/calc_MaxDose.Rd 461535ad52d82da249c1e6f0b9b8207a *man/calc_MinDose.Rd 07ed36f045ad178ba06555ad0a19a05e *man/calc_OSLLxTxDecomposed.Rd 9334d40160224cde81d24f79b387ea91 *man/calc_OSLLxTxRatio.Rd a54afaebcbf5ca1f5dc5bc1973cb842d *man/calc_SourceDoseRate.Rd 59322c7d975ac42e36083a527d8d4947 *man/calc_Statistics.Rd 202f3c1c2674fb9fdcbf8cb3f897af63 *man/calc_TLLxTxRatio.Rd f4ce8ac9c456147f5e8357454cf2947d *man/calc_ThermalLifetime.Rd f054079697894eab1e7470bbe9e08f58 *man/calc_WodaFuchs2008.Rd 60e90f0b018b08d747e990ee244b8fcd *man/calc_gSGC.Rd 4fb02f857ca73ad328d06cfaa5be0dcf *man/calc_gSGC_feldspar.Rd e0d5a08fcf26c974899ea8e443686fcb *man/combine_De_Dr.Rd 5addfd943344dfb279ef77275fa86062 *man/convert_Activity2Concentration.Rd 2b4ee2c7c1f97aa3ae29e12e327b4dcd *man/convert_BIN2CSV.Rd bc10a5a51f437b2ad8914487e9c1b0b4 *man/convert_Concentration2DoseRate.Rd 3636d175934b0b3d310301a1dfefbf2c *man/convert_Daybreak2CSV.Rd eb424f9499455c4f8373dba7c464de39 *man/convert_PSL2CSV.Rd 8c1e2e72c1ebfd7801e766884adcadac *man/convert_RLum2Risoe.BINfileData.Rd b24ad94c3f6b88458a3b325e008e0eda *man/convert_SG2MG.Rd 7df781d0a6a4be7347e2da8569adfd2d *man/convert_Wavelength2Energy.Rd 765b2289acbb7768482b540153b76c64 *man/convert_XSYG2CSV.Rd f51be9661f99aef63bc632d662de054d *man/extdata.Rd 7b890baee5ca17c4b53fea8f24990f58 *man/extract_IrradiationTimes.Rd 6f610ffdcab06c7de0af61788bbda8c9 *man/extract_ROI.Rd efd9a10a97e54b2cc6015704b0f6e57f *man/figures/Luminescence_logo.png 29deb119b951b526ca2049533e297386 *man/figures/README-Screenshot_AddIn.png e31d5f0110244e649fb696ae7c06bf1e *man/fit_CWCurve.Rd cf9c5596935a64f10c2d25b57859b9ae *man/fit_EmissionSpectra.Rd 190570614fd66f32d6e49e7673b117ae *man/fit_LMCurve.Rd 7236a586e544fbc5ed46d5fd716db532 *man/fit_OSLLifeTimes.Rd 39633dfd414fe0243bcea481a9afeb81 *man/fit_SurfaceExposure.Rd c4dcbdc81bc56be24e32b8f8af9ae57c *man/fit_ThermalQuenching.Rd 193a319442d1f89597c314e86a9db364 *man/get_Layout.Rd 60c29c1a91c5e8308be81ec1499fdcdd *man/get_Quote.Rd 3c4233cff755aed94bc552e0f45a1813 *man/get_RLum.Rd e725442fd6e3061aa679050b35f1d453 *man/get_Risoe.BINfileData.Rd c4a32d6ef42c73ab3dced0481fbc66c7 *man/get_rightAnswer.Rd 3174d6a366f5e7c66cea070c4dc5f0a5 *man/install_DevelopmentVersion.Rd f3614a502f8597be956deca60a1de0ad *man/length_RLum.Rd 8c77e8c334f18250f3b7b3722b0b4cd0 *man/merge_RLum.Analysis.Rd 9f06f05836120f3da8a1525706d98879 *man/merge_RLum.Data.Curve.Rd b53e8688cb371a9ca598d850cf281617 *man/merge_RLum.Rd a83ff4f772129bdbab43f33532ecbb8b *man/merge_RLum.Results.Rd 0207998fe17094e0b540c7ca05fc09ef *man/merge_Risoe.BINfileData.Rd 9ac40ea86eea9492569caa5b04b3c5ca *man/methods_RLum.Rd be3d5566faa516648267032c321cb670 *man/names_RLum.Rd fe714109797566ca088318d44b6e09c3 *man/plot_AbanicoPlot.Rd 6cf08e3d7dd7a91211d904f8c465ac4a *man/plot_DRCSummary.Rd b2680b83fbafaa24de1e16e26284547c *man/plot_DRTResults.Rd a13338b33b4f37d2ed358e40c399f2eb *man/plot_DetPlot.Rd 545b50c3c205173885b6410dfdcb70c2 *man/plot_FilterCombinations.Rd 9781c8abe740c7c574fb0a9be95e08ae *man/plot_GrowthCurve.Rd 7e3cc4607422de2bcc43cae79b19ad0d *man/plot_Histogram.Rd 3bca4bd18f711c7f686a3d1e2973678a *man/plot_KDE.Rd b9a73be14f47cb74613849b9e9575454 *man/plot_NRt.Rd 077aa8d35b59a396ba287de64dd6734f *man/plot_OSLAgeSummary.Rd 56a31453a1acc07742ec26677aa1f9f2 *man/plot_RLum.Analysis.Rd 95136737f4b652b74e6cc27296da2c9b *man/plot_RLum.Data.Curve.Rd 6c296a05cda9450e528a67eb93580952 *man/plot_RLum.Data.Image.Rd a99bafe33f8e8446c63aa7db20c46e7b *man/plot_RLum.Data.Spectrum.Rd e589e3b4de8370172dcbc3c341be8ea9 *man/plot_RLum.Rd 2d40b60b6858c4478947210b73383b90 *man/plot_RLum.Results.Rd 41f611f77f514e3642ae203965294f08 *man/plot_ROI.Rd ad314ce1d9c5e57fcdecb0a0a39272b6 *man/plot_RadialPlot.Rd 4e1d2a05299548f356cc362148ebf7df *man/plot_Risoe.BINfileData.Rd 02b9e01e3a1274144a09992042f2037c *man/plot_ViolinPlot.Rd a4bd8fa2991640d0119d5765cd6fa93c *man/read_BIN2R.Rd 6d477abc10830dda963d1818421ef21b *man/read_Daybreak2R.Rd 299ff42646ffb4c6d6dcd0da315e4bcb *man/read_PSL2R.Rd e38be5af7b417bc0c9a89d952b05efab *man/read_RF2R.Rd c7ae49ea6278b0ff20c625157b06ecaf *man/read_SPE2R.Rd d10f1e3e08c5f092a90e8880f14b5ae5 *man/read_TIFF2R.Rd ecefd85f174bc281d9dc5e195cafe4d1 *man/read_XSYG2R.Rd 4fd80fc3d81149beddcd30f1e17b8643 *man/replicate_RLum.Rd f13c08e4c7c2bdde513492e846c5d81f *man/report_RLum.Rd 4a322b7cdb3ff082cdf8f16dad21d298 *man/sTeve.Rd d5562f31cc84b6049564061fb7f94da3 *man/scale_GammaDose.Rd 6ea03d69a1b7208cd883716819b5a04d *man/set_RLum.Rd 82124f4186215aa86936537a4f5d1e79 *man/set_Risoe.BINfileData.Rd deea4665d4356ff792d2781379170b09 *man/smooth_RLum.Rd 6f3f2ad92d1cd5478bb7df70009bad12 *man/structure_RLum.Rd 2ec12a56b3ebf7be637d9b70bd9203c1 *man/subset_SingleGrainData.Rd 62db22b6b918dcadbd3e579f879352b4 *man/template_DRAC.Rd 9fea7b338282ce012741c7b600af7516 *man/tune_Data.Rd 83c1958f9291a87ad010a78028b23865 *man/use_DRAC.Rd be6ebcc2cb9d31094344b47351b4377c *man/verify_SingleGrainData.Rd a61b39b30f2752ae52e49b839eebf69d *man/write_R2BIN.Rd 493201b861b487fe5fffce3c9f90f52c *man/write_R2TIFF.Rd a00c755d494e754642ff78f52e27a2cb *man/write_RLum2CSV.Rd a565f97e2225a4504986d6d350275010 *src/Makevars a565f97e2225a4504986d6d350275010 *src/Makevars.win 22a4e8a213c8456134424dab6b4690a2 *src/RcppExports.cpp 22b3eabbcb7b82326cddee23ef07209b *src/create_UID.cpp ab23f9482c31448020734a4c8f376d69 *src/src_analyse_IRSARRF_SRS.cpp 48dafd4dfc58e600c698be16b782aadf *src/src_create_RLumDataCurve_matrix.cpp 9932430d05f2842a3cfbea959478876d *src/src_get_XSYG_curve_values.cpp 92744a83b4a79b4bcabf0371105152ea *tests/spelling.R 5c33e2021366df205be163e82ca1a759 *tests/testthat.R dd60d533744b2119f44b7585f7361f7c *tests/testthat/test_Analyse_SAROSLdata.R b289ae104eb1ed2aba857d5d8cc5d5b1 *tests/testthat/test_CW2pX.R 047035f6fc00df5723e8fb8be17162c4 *tests/testthat/test_PSL2RisoeBINfiledata.R f40b5e2e9d25676f106d3421107e78ef *tests/testthat/test_RLum.Analysis-class.R 8bee083db1e85f34b275a1babd15396d *tests/testthat/test_RLum.Data.Curve.R ebe1d27e5690372eb7708b3ad4a954a7 *tests/testthat/test_RLum.Data.Image.R d0961a1070c0fe426ef08d338edeb3e5 *tests/testthat/test_RLum.Data.Spectrum.R ddcd4ba07e79014d1fa90dd34ad0100c *tests/testthat/test_RLum.R 3a5e4f7affab68784a8045158b316388 *tests/testthat/test_RisoeBINfileData-class.R 08ff4d5a40f0b74c12e565976e1c5254 *tests/testthat/test_Second2Gray.R 08f4519c9305424de0dc4e11be4f9980 *tests/testthat/test_analyse_Al2O3C_CrossTalk.R 170384a323954fc658e3c7312d2a2995 *tests/testthat/test_analyse_Al2O3C_ITC.R 9f5258d9fd1dd8574a34d35e720c1f64 *tests/testthat/test_analyse_Al2O3C_Measurement.R eabd98af12cbe2708a267435d8524eed *tests/testthat/test_analyse_FadingMeasurement.R 796340176d2b5e005cb4b041feaf493d *tests/testthat/test_analyse_IRSARRF.R df1ce1e698775b2efb4c852ab73f11e5 *tests/testthat/test_analyse_SARCWOSL.R 6aa6228584c7dd754af7339d22a3d5ea *tests/testthat/test_analyse_SARTL.R 4ad97fab77e9ce4c44bb2323dec36ea1 *tests/testthat/test_analyse_baSAR.R e940b6c36eadbd5890395fb122054bd4 *tests/testthat/test_analyse_pIRIRSequence.R fde9438e54f9cdad4c0a0e960e72ecc5 *tests/testthat/test_analyse_portableOSL.R 7c9a53af2e1a7ac8129eb094c8b3635c *tests/testthat/test_apply_CosmicRayRemoval.R a55c9e2cf6f05ce377de0e85a3fc6885 *tests/testthat/test_apply_EfficiencyCorrection.R f22b6a8408a41dad15cefcdc8e900838 *tests/testthat/test_as_latex_table.R f78a6b9185b13323657fa853610dae87 *tests/testthat/test_bin_RLumData.R e5f5d7fb8bf9036d49c5eba15846169d *tests/testthat/test_calc_AliquotSize.R 7dfae8e59ff695916d941ad476ab6ab0 *tests/testthat/test_calc_AverageDose.R 6a2972892800ed8b315f37c96a2fcdcf *tests/testthat/test_calc_CentralDose.R e3eb3e0056e9998d1e687c8348c9d559 *tests/testthat/test_calc_CobbeDoseRate.R cdf914ecc1f29065ea12e0fc91850b68 *tests/testthat/test_calc_CommonDose.R 45759193c17ca3f760698413a4de8a67 *tests/testthat/test_calc_CosmicDoseRate.R 4d40dacf9016e66742ff66d1e89867e4 *tests/testthat/test_calc_FadingCorr.R 459266b23450654e55feb4d015d472fd *tests/testthat/test_calc_FastRatio.R 28e53a454391c28a77a6b0791eed2e44 *tests/testthat/test_calc_FiniteMixture.R 1100a7e71efddfb373d610474b7a2b54 *tests/testthat/test_calc_FuchsLang2001.R 9b8c3613c428e36e269aafa339ff224c *tests/testthat/test_calc_HomogeneityTest.R 2348ad242ce4f668d0089c9563679a6e *tests/testthat/test_calc_Huntley2006.R d46368b01db2c93d0eb9da2898fedd7f *tests/testthat/test_calc_IEU.R a3a2b3475b3779f927b75cbe236afea0 *tests/testthat/test_calc_Lamothe2003.R b3f804c48fd46c43e1cf29048066f849 *tests/testthat/test_calc_MaxDose.R 145f8403cb662e50f90370cabdc8aa23 *tests/testthat/test_calc_MinDose.R 567900a630db174f7b44aa9aab122a4e *tests/testthat/test_calc_OSLLxTxRatio.R 00538047a9a63fc31b78add2890be09d *tests/testthat/test_calc_SourceDoseRate.R 83ef228431aa9684e284cc0736f7e175 *tests/testthat/test_calc_Statistics.R b124dc11de034f537d53df6967aee377 *tests/testthat/test_calc_TLLxTxRatio.R b85a469f1b37bce68a61a960c770aed5 *tests/testthat/test_calc_ThermalLifetime.R 072eaea199bc4e9f2c495021bb33e102 *tests/testthat/test_calc_WodaFuchs2008.R 67f6b1f0572c958cafaeacb5f9980ebe *tests/testthat/test_calc_gSGC.R 188ad28f3243522512627bf727e85743 *tests/testthat/test_calc_gSGC_feldspar.R e881ea42573eb0ce88a9edd089f14398 *tests/testthat/test_combine_De_Dr.R 787ef934c6b2bda60dc4e60cf4438ce9 *tests/testthat/test_convert_Activity2Concentration.R 569eb6394a12b0cf072d12cac98e0f94 *tests/testthat/test_convert_Concentration2DoseRate.R 96c07d6512489e914986cf627c81585b *tests/testthat/test_convert_PSL2CSV.R f0efc2d01f839b7d88a22efe71768352 *tests/testthat/test_convert_RLum2Risoe.BINfileData.R c408bcfaa851248f4a44c208773ef23d *tests/testthat/test_convert_SG2MG.R 7a8a066d2a7a3873b09ed358f41a4a05 *tests/testthat/test_convert_Wavelength2Energy.R 860a9083acdf52249916127993be5589 *tests/testthat/test_convert_XYSG2CSV.R baf9c092242304d9aded0c37feee9b9a *tests/testthat/test_extract_IrradiationTimes.R f24bda855f8f12a604b60e8f6d208301 *tests/testthat/test_extract_ROI.R 74d59e92ff281369522e23f337582612 *tests/testthat/test_fit_CWCurve.R 1bcc33f7989350ddb6aae1891b93d8e2 *tests/testthat/test_fit_EmissionSpectra.R 1f900311e448990daea9159c4f49d2e1 *tests/testthat/test_fit_LMCurve.R 399154948671969203f1ca8855c9fb7a *tests/testthat/test_fit_OSLLifeTimes.R 074d9c8cdcccaefd0228324b796d72bf *tests/testthat/test_fit_SurfaceExposure.R ff28cbb830b59450013e6f703bbdbd28 *tests/testthat/test_fit_ThermalQuenching.R 2148ea44f374ad6c439be5028c9d8c9f *tests/testthat/test_get_RLum.R 19b2bc343b8f0da44e70ce57e05a2856 *tests/testthat/test_github.R 6ab16a3535b2fe38ea35459366321129 *tests/testthat/test_internals.R e1d21c97d86dbed1e82504ef8c189ceb *tests/testthat/test_merge_RLum.R 92912f370dc83a6339d85bc10409e80b *tests/testthat/test_merge_RLumDataCurve.R 26f252d124a119f070dfd12612423377 *tests/testthat/test_merge_RLumResults.R f1db1dd3ffd07af85ad454a9fd543d3b *tests/testthat/test_merge_RisoeBINfileData.R e1cf39c0b1bef659b301008bb97fbb65 *tests/testthat/test_methods_DRAC.R 8e03a7b4ba4105e9240b9279ada6fa28 *tests/testthat/test_methods_S3.R 829eea8a341e9eff304d4b5ba5099858 *tests/testthat/test_names_RLum.R fcc6348d853f6846f6f0045c8352de03 *tests/testthat/test_plot_AbanicoPlot.R 6b6b5889a8620e7d15462a048c7b82fa *tests/testthat/test_plot_DRCSummary.R f64b5152891e84e00194c62c22b6edd7 *tests/testthat/test_plot_DetPlot.R fba98b9d981568618ec1234274518e81 *tests/testthat/test_plot_Functions.R 7267de54c8c5c7f36838c8dbf12100b8 *tests/testthat/test_plot_GrowthCurve.R 78a4c649d7a6280043ccd5b4464b25b6 *tests/testthat/test_plot_OSLAgeSummary.R aaac3814293f5db52c14a917d0cacc37 *tests/testthat/test_plot_RLum.Analysis.R 5e82b966dafd532e41cdb6bba738ccdc *tests/testthat/test_plot_RLum.Data.Curve.R 479df95d2b63b2166804199a97eb03be *tests/testthat/test_plot_RLum.Data.Image.R 379d52934c7e0338d80b3b343bcd2680 *tests/testthat/test_plot_RLum.Data.Spectrum.R 4e865b5f282ad3a83f7d9f34d823c911 *tests/testthat/test_plot_RLum.R d9c8cf42d7badddd33d2a6c10fb8a4fe *tests/testthat/test_plot_ROI.R 6f01aa94a6609122647c312a9cd2ed76 *tests/testthat/test_plot_RadialPlot.R d4efe0c67efcc39a1961b817500ed5ec *tests/testthat/test_read_BIN2R.R c7e70ae0a410afdce7d5ed6886101189 *tests/testthat/test_read_Daybreak2R.R 63d07234c116b44b5c6f9f597ed7ed9c *tests/testthat/test_read_PSL2R.R 31c346ef463d87579350346576fdb418 *tests/testthat/test_read_RF2R.R 8ba0e6fdc9a36d8016462b87e4de94b6 *tests/testthat/test_read_SPE2R.R 073cfd70d3260225a38b4c08e83853a1 *tests/testthat/test_read_TIFF2R.R 558eede292b4b34459f75ad4066290ac *tests/testthat/test_read_XSYG2R.R c97352440d5828ab493a498ed471769a *tests/testthat/test_replicate_RLum.R 9a2c07a786f951eb1a360a83ee0b4ada *tests/testthat/test_report_RLum.R e51f23ca997a41b5acfeb9f0be652dda *tests/testthat/test_scale_GammaDose.R 3122074636116ee079cff7452a0aae7b *tests/testthat/test_smooth_RLum.R 10614cd9bbcb85ad6543316ca4a3b0c3 *tests/testthat/test_structure_RLum.R fe2a9434fad12889640860ee971007a5 *tests/testthat/test_subset_RLum.R 84b38bc09179d06fe70b28a44a2eda0f *tests/testthat/test_subset_SingleGrainData.R b83a8ac6686ecdcbb6932140a7513f60 *tests/testthat/test_template_DRAC.R b04acc2ac2f65f6ab7dbda3225eb15d5 *tests/testthat/test_use_DRAC.R 9a32d13b9c9824e615ef530841484ff1 *tests/testthat/test_verify_SingleGrainData.R 3a025ef8370e353f4b2033dbe7264414 *tests/testthat/test_write_R2BIN.R bda010453ae5e5bb68c51ad1b1f7728f *tests/testthat/test_write_R2TIFF.R abe46831a4efdfdfeddf794d111ae952 *tests/testthat/test_write_RLum2CSV.R dea7a924173a02cce54351c8ec825c74 *tests/testthat/test_zzz.R 6beca119f1590a3626e80cab485f8002 *vignettes/S4classObjects.pdf.asis Luminescence/inst/0000755000176200001440000000000014521210067013637 5ustar liggesusersLuminescence/inst/rstudio/0000755000176200001440000000000013540751607015342 5ustar liggesusersLuminescence/inst/rstudio/addins.dcf0000644000176200001440000000042713540751607017265 0ustar liggesusersName: Search for TODOs Description: List TODOs in the source code Binding: .listTODO Interactive: false Name: Install package development version Description: Install the developement version of the R package 'Luminescence' Binding: .installDevelopmentVersion Interactive: true Luminescence/inst/doc/0000755000176200001440000000000014521210067014404 5ustar liggesusersLuminescence/inst/doc/S4classObjects.pdf0000644000176200001440000020746714521210067017745 0ustar liggesusers%PDF-1.7 % 3 0 obj <> stream xڝwTTϽwz0z.0. Qf Ml@DEHb!(`HPb0dFJ|yyǽgs{.$O./ 'z8WGбx0Y驾A@$/7z HeOOҬT_lN:K"N3"$F/JPrb[䥟}Qd[Sl1x{#bG\NoX3I[ql2$ 8xtrp/8 pCfq.Knjm͠{r28?.)ɩL^6g,qm"[Z[Z~Q7%" 3R`̊j[~: w!$E}kyhyRm333: }=#vʉe tqX)I)B>== <8Xȉ9yP:8p΍Lg kk Ѐ$t!0V87`ɀ2A. @JPA#h'@8 .: ``a!2D!UH 2 dA>P ECqB**Z:]B=h~L2  5pN:|ó@ QC !H,G6 H9R ]H/r Aw( Q(OTJCm@*QGQ-(j MF+ 6h/*t:].G7Зw7 Xa<1:L1s3bXyeb~19 vGĩp+5qy^ oó|= ?'Htv`Ba3BDxHxE$Չ"XAP44077&9$An0;T2421t.54ld+s;# V]=iY9FgM֚k&=%Ō:nc1gcbcfX.}lGv{c)LŖN퉛w/p+/<j$.$%&㒣OdxTԂԑ4i3|o~C:&S@L u[Uo3C3OfIgwdO|;W-wsz 17jl8c͉̈́3+{%lKWr[ $ llGmnacOkE&EEY׾2⫅;K,KhtiN=e²{^-_V^Oo§s]?TWީrjVQ=w}`嚢zԶiו8>k׍ E  [ly邟~_Y53rW򯎼^{7so}x>|쇊z>yz endstream endobj 2 0 obj 2594 endobj 9 0 obj <> stream xء 0@))]$@ P8R$iS8yǴ 1 @ @١z\1uP endstream endobj 10 0 obj 85 endobj 12 0 obj <> stream xj@8̅ UϷG$I$Iy{mv,Ѻ<,}MӴ,K[:2]L'teLraŇ9]6&A1/R^ei.~]dRG]Z_v@Ce{lXtСC:tСC:tСmXtСC:tСC:tСot(I$I$Q b endstream endobj 13 0 obj 246 endobj 15 0 obj <> stream xڡ @ALӎ*Q $^J$IԘ| `0 `0 `0 `0 -#1@If endstream endobj 16 0 obj 99 endobj 17 0 obj <> stream xr0 D/Ph ;ACB''1.FVmaaaaaqCz\p8ˀ)8P:NS=ӀhxKHǠ4:Cy=4wNq@՜<} Fah]e'DM5L4h@/hӁ}gy!՞Mޡl5UO]Q- 3╉:nELNO7M51+Ѧ?:*ɫrRzxno8jb1{ Zr ;ZӜ 9OvVN Y5TCS1M-x |#Y/{4DͨIF1C4Sڙjrhw\䮩v`~%AO7}͒ͨo_LmB4GTm9EBa# Ր ġ43wB5 dM9Vnj(Ҏx߈)\ec/~ydx8rKubvi<-1-:8qTYkl@F o G +3տLxh^V|a߮)mVNaKngoTo)?Ze#3"բ귋Z0!ͨV5>V٘6Oxi\tJO TcGS=juLTOV0^7SG~mZT6v{1@Luq>zz/5]֜x@S=uCrUETuq*TZÔ$r="j~ki/ӍthfUjvDչ4p)Ѻ &#%*LE}0hh%եXWH^^i~e vA5aL ,;zh=S1.聦z<5 29fhbC8W| ++H^WCQMQ:JLE(?ȘlK_+[j ~/Esrż61y?$Y9S=]nBA[hoas?xv ڧr^H%)4;Q_=YdZ$/PwcJv ?."f/ y#WXt|?Ե:v\uJZ,Y2 ^_Ӣ6oHr?vRpG5W,4.NPEVv@AHJ?JBAu#o*Yg8cTר>C0 0 0 0 0 0:# endstream endobj 18 0 obj 1387 endobj 19 0 obj <> stream xڱ 0@EMS8 xo[$IV[Le׈u1u0 `0 `0 `0 _K;b=$ _ endstream endobj 20 0 obj 101 endobj 21 0 obj <> stream xr0  4h8Ѝ+dAAAAAA< >0՗r9NzӀTp<?}vT9TCۀ*jR.Rp FZXTo @5iP)Cy@Nw@V=Tz1\ǺE|iۡn*?U^nQcU+8uhcnv~,~&(S0 ןzADA_%) Mܪ2~3>r5Eoߵn_Av:UzΌA_,gH=8U q#PmƩIk}S=PXDsD1=f:0ڇuwNMg(玉sRmvjkyyP-j1uT B5K7ҫG3#tV)iQ]Ђ[6d˱8ߣmofbl[Je̻e"K|akj6% l*HB53Lۖ.Rmω)D(?n[o BE&Buա:TP2 !M]zCb ~\z(DH ?LN0mY>цjB"s^Pȵt^x6e d.& 025n!Dg 'T^JH Eͬ]b1gnk@.j&!㘫=cxwQs/M2HSn/j[ =,< /7W(fZL7h1mxQu.>/`ڞ-hG~HקXoA`:/z&8A.XP}M\ Az'Cuա:Tg-AAAAAoGD\ endstream endobj 22 0 obj 1131 endobj 26 0 obj <> stream x1 0@6i҂t`dAkWO8/%IT&pFL}qǴ`0 `0 `0 `0 iHM$u endstream endobj 27 0 obj 101 endobj 28 0 obj <> stream JFIF``CCgy }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?( ( ( ( ( ( ( ( (>k[fkkvfFfhT%FI8 cKo@/PWQ8d0mDyzjE$S`]=:P_cp eMyœ=!bP1^88)=s@m)uizD ~Sh}}-cKo@/P_)@K&?`_0[TP@[VILx'3ո3@b%2bXlwt ?k@m╾_($[$`{P$o;|ڿ.szPp/mg W~nۇ'pE!F߽ws?xFH$ \?@OP<yq4EQs;ôP@i[úH٢gU'=3Ϩ >D-FhT]60}<#SBP+ñ>#r3@ ( k%7 f wUh(? #<[N<=A [.KG(r 0G#_p19Q-Lsm $Hf`(Ā3zPP@P@o]6}@P-,nc|I^."\!` ’N wPmN饕FyD D7C0ŎyZ go?.Gpvk-P@JoEk4P@ݤ+olZkE[i8!TyoҀ' i'֧.loنsaPg?vƀYݷ_mhe>g?vƀYݷ_mh%d?/9-߇h( ( ( ( ( ( ( ( ( endstream endobj 29 0 obj 2291 endobj 30 0 obj <> stream x١ @AdU,bdk*_5^J$i\;Vxk+u&d2L&d2L&d2߮H~ ⌛ endstream endobj 31 0 obj 89 endobj 32 0 obj <> stream xm0`,k b@i u''(((((~|7\ra-GZs:BWAԧY`FuDڰ+lcB9lI{E5Lo1KCIqƭ.\ԉ5Mbr a6Rgjf{vu(kKh߮}N $SVIsy."0[}I;<@ $Q (&|:Yt3_s~yFn<'܊LTOu:/>|͛oA)7Uy@z{{ aH`,:Iu@9B6tᶱl9܎60ZAf!XT8!jc:܉{ ALL 9*⽰m;~օniUSk[J~<-"⮖aSq,lYn (TI:tˡu2#r6fz{斠oM2օ?CĄy;ړl Gujaćofh"v=OPn'ۮu,vIz焛ˁeRkYq}\,^rT~뉗2̖\U,r\)  rlޓas_JU_4-;UZnFg!+_>-խ!"ǝX2ocsr V /Pf$Fn dcԪZŖr0Hx3l{=Mah0y\?PL/p'4OxDZM# $Y}>F#ܜI6xwb<h3s:Cp񚑷2Q-Frk% !;"TF|ӒIBF1omQ c2c^MQ1}M''$}9{GSY1yqd|y"N<pn]G>=R-KB GB==R%ƭPv|I&Z2y☇G*5ƹ;&C-TEQEQEQEQE1Ѽ endstream endobj 33 0 obj 1069 endobj 34 0 obj <> stream xڱ 0@ L^p+H?#D/,-p $Ia9mtL2N@ @ @ |1U${/gJC endstream endobj 35 0 obj 97 endobj 36 0 obj <> stream JFIF``CCmi }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?( ( ( ( ( ( ( (>k[fkkvfFfhT%FI8 cKo@e ֫&r`Lt~" %-p;:Cf-BONZge?q vT}?#@2 ,<.Xc@}}-cKo@. ~n;@}P@ ok[$f2 LHVۨK4< ͷ9';cלRDRT*<e,d1*E'?@*ܯ;c'O#׸(ن 1?ZEV`S.N[}8@".?ȠR._`^xv> ( s6X#^y%[VRC9$=@'! q#XGJ b|ρ؝ٺ!W!ak8 uک{w xrKbg( k%7 f wUh(?^+ k@#1 לyCːw)~r.*mhԂێpO\Bb<́hZ( k%7 f wUh(?nn7o6uhxzP&_3Yhwa9ǫzͺ:d2@BY<`1Ǩ$p d,#X'# rȠ (&7Wh f0ڪ$^P ǔ8:q@!K[IbG(-WX|mܟ\N@4&@IS0hJ(? M(mك|y (% H`7tRP?)/৤?`< aP@'>??2|€  >8~@/G==?O*j?*?xdQu>!@AE=G S0T_.7OS7GU_ُ_axNږo :>=O%C#F^(ؔNP@P@P@P@P@P@P@P@ endstream endobj 37 0 obj 2455 endobj 39 0 obj <> stream xٱ @AL,s:6p(~a̽n$Il j>b&q?1@ @ @ NsQ ΃p endstream endobj 40 0 obj 92 endobj 41 0 obj <> stream xn0 צs;JNKLk7%Z\ 0 0 0 0 ?EwWN_reYޙnxy&3PG:{0s[9Lc1F|r7u}uNiޫrPwO!6@]HBCGΡD]69uDA)iCQ (U ֫$?.y^u19 8Y&LsJ%v{9F͸mQϚԩ,-ތ+ކPom8?+u"ِehknk:-u[+N)szl%+˽Y̙:%$*DՄҘH;Xi*r軫wAu=l,ů{v)Vg9 x oD+D6*)$t;YL2u٨=Ϝt;aUh}3VX++[Dދaaaaa[ endstream endobj 42 0 obj 938 endobj 43 0 obj <> stream xء 0@*R60JZ hr y)$Ikvu }"uw20000000000000000000Oza?yI~ 8j.r endstream endobj 44 0 obj 95 endobj 45 0 obj <> stream xَ0o*1|(ެֶEQ"\B!B!B!kX t=a~|>W.V@.SRr`p8԰sY ~Y)U.kQYƿrY.bJy.G1jF+'Ŝ d. X?L`K GR]3KZኀ 2ux$zEMZ;J.ɹL\x<mGʇ[~Y^$X̡ M-ĴCMbȘU'vz /lrqA(rXkޝc0G^3Z)"x uWb"%ۯ˺E,"w[P+hEXI9c>%}Q3\k%spw[3 !9Caܧ;ycRr+Q̜$9 gғ1~,85o9wCq|.mZ qB!B!B! endstream endobj 46 0 obj 884 endobj 47 0 obj <> stream xձ 0EA0X 4?/A>,p{$t0J=4]}54gmWRsjF{ D迡]lյN8B R endstream endobj 48 0 obj 97 endobj 49 0 obj <> stream xۊ0 7p˜%ʲO|ȷs;F{܈eYCqdfmC]nVb6+!| 0JfjJj)]Z[udV]b)uu$דuSXYj6q"(-eIB~Fg3۪ԏQǩsIX*ui  kyzv {$ҁY4[q-g=ɏ;SW L=e݋ʺ٨F]VUy~{:թNuS^d`` endstream endobj 50 0 obj 311 endobj 51 0 obj <> stream xث 0@AJ'40` TWUirvۦ:OG$O_yξw"D!B"D4!ͼd~P] endstream endobj 52 0 obj 102 endobj 53 0 obj <> stream x풛0 EMLh K4KJr!m]|~AAAAAA{/%58N5!orl`ztm7АYNYX|>kkj;ө9b=.:FbPᆲ :u#JE7_E.D8fer'8w +כQCD߱ӽ* @  n4U萅/;PMyg> stream x+0~0T\ Sd ,mVͻ1}njE/g멵&3RmKN#SjdU2wG 2d8 `yD $(~ endstream endobj 56 0 obj 100 endobj 57 0 obj <> stream x o0 af7}d:$R)pM     ޅ͊0e eϛk u陪*?Vr<\%9P3R@m,iyD>"oq @v$S ^a-DWg=54uV`$0 hk !e%h#K1${ i}2'dAAAAg|^9 endstream endobj 58 0 obj 673 endobj 59 0 obj <> stream xͱ 0@%(牍D{K vw/aBsp{hEɢ͝E+WOKKKKK,XS0 endstream endobj 60 0 obj 83 endobj 61 0 obj <> stream x혁N0 D7`Лt-t>EU8NrI|i?>Fh4Fh4FѸ>v~)x0$p.q\E-{{y(X:g?"_Zk 4b,yH#!j 4V?Fl F(VC]J2!~xpEuȼ$@IDˆ}Ƀrh[(;k"YbcY&L-PꨢA|&C^q2ñ.|ۤ; pg,K918LwԶI *\J蠖d86d ~plgod<ɧ<WU6!Нū~m '."yع8 UPcUwQO׉5Z>b$7~O` i endstream endobj 62 0 obj 679 endobj 63 0 obj <> stream xڱ P@Q!Љ!(2FS0|$I:a[h}^S>)QlJQw/FBP( BP( BP( BPǡ5ePm!I^ endstream endobj 64 0 obj 125 endobj 65 0 obj <> stream xr0FqV39] pSYRЇػ߿h 5#v]C<_Mm]PԿ[֑-6B,`Bөմ+5\Ojku'> 4k90ٵ!\(ZB[v`"0pX>: qR!8T?0*JDjp{5 z(sYe iˁLw\q- D,8_@붜|$O~8NQ>)X&˒Ge`+AQ^E4o#/ iYO.} D򣤎 FpDJ"5Ze"82?e6c?, ,o *梷Lz'a *Ƌ.Ä6d Ha43pL~<9<̐.VF 0=XdOC'Q% , f~a"=mގKAԏ ~x1¼L0a9aoG4Nl?3q 3N dʆG3023NO3Ov+$=᳔ˇ4bԒ\1/81#VƿB$1;nҼ1uw-N}D9n&DRx7nK7zny>0m.N邌InLB1-blSueR4I5(:G8> stream xױ @Q:fpp Kio) @ ^uעSSF%9ʬJ'%J(QDºX8sϘ endstream endobj 68 0 obj 97 endobj 69 0 obj <> stream xr0 EkKb|&g0E5Hh4Fh4Fh ~X3x%]\n@9ﷹsΆiƎ]b<8{O T,\(XhʤlgsN@榨zgZ턽b&rظ2ǖ5Ŧ§X=50~dW>T^z:PY g|2L>d _q3gXzh4Fh4Fh4?Z, endstream endobj 70 0 obj 912 endobj 71 0 obj <> stream x֡ 0@цapT(Bgn) I >yGCK')QY7 !B!B2lߥ㊠ endstream endobj 72 0 obj 101 endobj 73 0 obj <> stream x혉n0 De񀇁&iSx CAM(((((((((ERIkܽ{wkc|H,7sG iY㔽lޙ.OOl TWSu5kidf c<搦c.謏َcex=S"v?ȃdn> սKyek)%.[y1VK+_g?pgi^vّ/6 ye zsO3%G_ˉP|/o)g#vMH/Qwo+#-syt:{޼@ѡ̏}_V8^iaJ/Kcq KV<GyR# ݼEF*S_R̻̓:‚yB9L&fDJ&zd7 2c)ixt)'ט+¸KȓdUHK/M}&-0Hd2ɑse ݒf>sZu bɊ x<rգo[h{7W˼'sn~Gk &r_|k;rCX,7|sXf~}c-'tυEᕒe$o$Ͽ߷hOe>?=!~Z*JjZ*J(Mš endstream endobj 74 0 obj 884 endobj 6 0 obj <> stream xڵ}]7Ȧ=ZQpf)qZ󱫖Ko`O7o&}z6!c}Aս};Q"xRs@"Nf~ϗ_|| 3ir/MOҕ0^xokS~O?M3{a\.-SJ2)l/}\1߾+BBSD/l'oɏ/RLwުO<wU' ޺+Ҕ"ibl2;sRY_ZޛC3ir](֬}oח ݽwK+&K2ɤ%es&{+bx'xTn`lr96V_2q %/>L;}mDߤNKx<[o-?O:䴓O: w asd\Ud%Ԧ|wccƘٻڿsȚ)Dz¥)#)?O//LOӿ'W/8)N%>1bʴg#MI&[?|ѳ+#>/?Lv5XX8ȓs.AkZE;'IrN!5)@ T )+p?]Me>EuS4E M8|Bȥ82闢uH, Mbd3ET>z{L囏^b2C\,YqcAxx:EIEz^AKO棿?A N<'g4?NY^_"*bsҤUOVqY-s2 !Kn!ѐ="=i؃c#^%yeR[m#Ko#Lj5eκONlw| >ILŲl0A6vO<ž*]7Ro>%m<x>B4'>uN2މIcnwHCAܥb S@CTLYk )"[Fl9TuLs}- 4T)AKtt*F̫`<>g ֦.= wwN7IU%N 1@*B(C$cETwɻx8;iKrU펋Sy:աKx& u{v=UQe]3O =}_Xd77eyu f{`{(,9¿2ztkq;Ym0Oɇ64S L8'ֱUYZ]J*]=U 6_S7fR@g)p2, "vIqKγGsʼnQm\r⼆.O1 IGjצs jPiҔ쓳&[_ذGӅţaGӅţ +G%NG'K7K-͎^y4][<.- Yy4;D<hx4}ˣ-ZDmVD_ ڤ}#'KGP"? #o!%+n-$Clq!zA*Q0yrs۸m<9$jzA' m]2à>*Br73]bt0>NVxL{>"wgzŝu[̙ogzśm[Ϩ<O"tlfH3][.-Li `4@(eIȏV)UV0>&vI,@y@Xl@Xs6 vm!H8O1ĂA-;Ф۸h r@`L@a-A}RuS #45; ~SO;.4/Qsh^ʂмDsP{.Qm4/Qh^*U5*y%wOMJ&[qK[It+6Ar{n%>TrHsLrxp+[ͭݚo'e?hNҭ[9}F&en^fx%NsoR6(:~:H^Vौ7?]ES+Ãzp9tN-X /C"д)2BJ+w"&YR]m]y%ÄjkAmN$ID!%|'e,im1VFh]!TxnY8-f ?8.κ)YK }dwBJX/[/KUZ; XK]T6ՃAb8$n'[{ᐶ qZĴ[U<6Y#&BdPH i]BmL$FIݔ}ozB!mrP(M2 J6)&OB$PHRdnҍB !&٤8,gBlr^M } ZX"^n* ͉ᐶ1KڢpH[i@!m0i8-J ] YV2N~!6!'jч[gԶθdX]VYD6GNW\;+faKB&+nE0@ 'f<%neM;wi:'q!"sAd+N[ɤtTVSP*%Ft~ ,4yflOT<ҀܝXfZ]mU7KUӏ V6萯 ź#cZ3Ċ]^GV!}"!;{ި:SU_b O.aIה+"|ܮ#\E 0qkhCF.;ev@~X1@^}U{ϯ5/_jkz*Rt  nąKQ'u~ 5Nڎ񺵞'+KrWzPKD%d-q.AP1ܫxr8U4&o) 1ߨ%YA|4 H uAiّPRxd|EB0pL"TF< nMqNЕգ4ʉB7ΰJ[HuKmkH [ l v,O.\|mSV~y-?ԓHH%T+fy7#µp)>h~X5沢Np,[xu֎9aa1j9_@2@W #S;w|-!94"caP= 9(mvB\S2;̈hJbv͇k1eV&tnT@ dZ`Uu&"x\zή+#ಯݯrH_LJ$+}J'Bwâi v[.}F"yר~iLy1t5R4β2}!~XYBi8wC~4ǙkzE=@BfX"\!Z\DYPwSf>a.afjޫH.mef )i\#/I舻vEMNNJ{5yI62d#T".-P,_qP*rQe-AA}l!5G G:{Zϛu-*AiDK&;"u>)E!K:8%) UlX>LZFh֊Գ"6!*b`ɛmS]4 c']7aQ ]4bdgD~jׄdsARyFZ>o5v@GDSJhPs`Rgwt 8Z$euFrsOVթ[R!THGr۔Yz-HjNq;s7$=ڃ)Ho=ci)n^]IDzRbc!k-ΐHOZd&n,Yk4L;\D()n q_=9HqH _ͬL-%Mn!=mq-%-H"sԉPzn' Ns*7Bjtgx1}" 'E &H_b!ީH'Of iSW?klj YjUAѐ=gG#O ɪwa|Z#5lE!=irMcfZYPH&YR\QLc]D"]lsH#+?YtupO ,'O{fr`gn^݂<Ϗ1=Q`BSHO:iʖ!g?ҳIc2UCZ03HvVnL"ky$ғƆ(#Bzڸ8\LtضJ"=k-9zڗ!{+HϚZoI wPQH%X/[jApR-RH-g7{iJǹH"HefGn,315VV%c%AMA<,tPΜ;ٵ-&)Y :w^ޝ/<9yBx:}(/XhXJSĥNߝ|K^ǘPbrhi&u/K^8cǵ!(y((P>48'2HKCsxiӷnpQ`zYd(_ns,>ZIќ\M/ʉUy|#Y]-+"-QcaAxĪctkj*DIRe]N>eeTR +r `=V.DgLdul3?Ls~ۅy Kn]XVZf|'NrvtCYC=(E`I@*'pH[RHd9;YiNCں吶d9;YhNCںeun lapx- 00[glYq@h RMl&!m8-EhMCڢ Z9jlh Oܢ pH[4i&!m8-E }nqH[7x u!mqH[7x uG)ud҈<iCں6o8;qH[7x u!m }wڄy3[&P,3V%?"(^yD"p>{츛}YO\23$NcW$ҫFi# ,NZбF"L!jL ~cHc㶯-{G]^Ԡ$'wzދ_\3V﬷GBߍw!zQ_o.4lg KnZ*{w.ٯSXov.c;S v 3ޒ.>'SKx[z|NڴԗZ9K7ܾ\?6AY 6ޮ%Tiq# OH>V> \@@Rd/iodBeռ}QAc`UeooSf0ʤ^̣Lt5~^Y/Pm{Gy"[S8TIΧOdNxS&r۪Kd9ٓ=J3dv$IMBi6Y'[*LqI5{[(jͬ|(\ zTsQ55u2$RW tt~j_ZY UKI‚[U3@n~mhpX8V׼' =l''aY'csjL"f2rHlֵVgi{B]=՚Z;VzV_8u3!> 6Кwk|k[kOsP獚<ƳB]4>ЃQһFM=iT}OŌԸYV qH{<ءsuѤ^5xg{k24;<[a x(u̪ʈe0*02MF2ѐ Bbib 4pZTf,NP PU\bUw*mJh=:u~Y+ѐYtj#& $2eJRBRX r)-6-W6>NS '\UMs>me?j>x)Ђ."Q.2i]56P|䖿h>{DH:DwŧbmSlE,t*;Ms 0qcֱS1l[Y9dEp~ruׯmV>!Q?$7$\4b@I;ljGHUmh]SV^TֺZ달ZWz?GW m( נ!Ecji[+yz7mrp}y+;se>p U~ )eV2F5=Psˌh^6ǭ>񧑁Fc*?h54w^dP_՚c.Kd vJPpj2 ˨.6>OT}s|p3?/]¨o#Xq-;'5cx!i\Ӥǹ5E7W{V@J%e; Lpha;~CojmU I5~vdJȷCf dv,5$ԠȎKf~hV=/l.5^?`'y-[YDsJb1Aeb^(ג@ zEêO!Ճ | H-C-@ 饾8z,D`"NbMA! _gc?(T#_֨5&)!Hdpl=cRM]%qIxess\+@VN:il5χ>QQegKmUJ+[R$uL# Rp ֗NM"֕f-;QT%~.H-GoL{Rp,ycGRHj\# [RkLHOC$f0بEjmԒeuQCb]Q ư#нQӔ4RR>U9ت) ٩HN-&?@GPIT#tdnѷi(N,у)f\u6*AHB}Ҋԙi}U,ApU+8TD:jRq{wF*dx +^r5#[3ۛ"ޗfѥ >;ÑCۺYFR{#p~VmTf^89!  \.˟9N%0 tjS,%K"oS$ [F''IW'!77t  }t'TV $[)~6S`[~jM$Ҥ̎Fz4zV*-2ff rTB{iRPv1 Ydf#g'fSE&1HIKqԛ q!ljD C& J~2Hz0Evӫ7v$֌Ua(KKqT,]F(Pݙ=X̔l1; CiRc?@@TX5ؙl[(E]֊Gur5PšR#"N= 2> AŚޛCTSʝCh6cpH/Ch$(s& kZ }!k$1 qJWyOpBk'Գ:8@9hXBEBJ2Oҧ*YMZ}αǿ8 ^`Ql4+3 4WxVW2j!eKTҁVwd5>b]8%i5 _Hpj~eE RNX[tyo>ȢSMcoq|<Ҿ+^ ;Vu/;X$H[b )$;H(EG_!XԽ,~NlR9CS[ dnd+uC:*:8(q^NK7*)ҔaTEu-TP՛g\11OŰ:)57>-GV[xVSQIQ7>H9;bDR%CҩEeHOzH/dS6N .id h+̮S[oYHV05#&MM幞G39L*-5fuQETG񆟁朄QEBNK"vKl);L2EZ_YDˈJaJ3庭[ʷpHORNDR]5S+;q\ɼo;_d [[XI/=/"';@lNV`S2Qb>û ;C!/.{GBly) cb|oUkʓڹƅpcE|pygILLCRM"t ۺOIQ;R>$εxt!]4oAalIāh%<|J d^5LD$!ԑ;L:}#+$~":-8ET$]yiYZ''x`;9MDٲnLiĻwQd4~eGOMM5P@kMkViN#4lB鴦5#[ӚbC!5ͩ']v˞;4|Ȟj34QzT%f?e;~fn<ضYHl)VsV Ҷb6ԶV(bd۶V Ҧbe?ݖڂ"Y&(\`2__MkPRsfz93]"cqwzH ؜ΑHiYtvlޞ Мߦ;a+jYv.F]/6HS]NKᲚٚMzY*u@_ߡ6\?/iݒƛ-x\:S$7O~ERys67+P)\+8'z4lnyk}f-xȓ#W1*_N^##8C{9R֦&Zx|||< S‰2pĠ|O'ߛ'V~g]{+ػ?AБm2EA|/{rx$ֶZ YIq3qy.H찋M I{J6nR539 2s6&y έm~x #t"%dty.$үe*G!ܨM!>iMJk)FKpH_h .gb5U%4nAfnO)$5\-d$ƊYQ+s4 w~uu㑼0ӭ>kߺ=K ܚ}vΰ,HCmٵWjq,s6$,Yޫp;s̄Jη눀 _7. vsHF,quY˟l?lm1ʲZg&ȞVM}ɶuG@r,r6D׭@Nl!2x pdY.>m|`;J*Q1zHR}.٦P/VpK='WXx81F}+[J`̶L2prKJ @APCTL2LqO0HSqQJtָ]00n&_YA ʊUjfshG2v}Nr (PFMP\5:ON.0E~/BZylzqCgBz^O`X3 fufz_ЈP8 Ce޷*G(kNJ9KjzgT%i& (iPA SCQӈˤf^?45M[%%BRɚ4FEeRߪYtb"7W#dU$/raКq=]-sat\O6HM% j4( mAaD?nM+ܚDSH_5䤣򈱳Ni5Sz5Ej ¿U -FI؟Üh먀| Kfd;KFY[>68l}9LM%eK~"/[151(P4 I3;v#(Njj'3iyq>TN6Z^!S+~ҨbfnݯU[B]"O8lʌdFs7 ĶL2m`TFͤ9W%!Q# %7Fz +Az"I=8->f\9@OU 7eru`> endobj 79 0 obj <> stream x]ˎ@E 3PcF5b"Avzwv6 O=ڸoHu:w[ \*] Ş 9DXJxE[yCvXMFtpt@>>eBy%)  Xl4ЇǛ҇'}xG%َ'(>NSoOv#NjapNB[?ZNvwFNЉǥ+ Aî>kQb`l2YXG0~:>4G!kmL endstream endobj 81 0 obj 480 endobj 78 0 obj <> stream xڽ| |TExW;潹33 50Pn"E0r^ 5xUwY?s\ ɿK./^NBTRG tۤ䝘 !L١oO|BľHOymsZXouu_C GȀ)&}?.w_6 x tfϵ, nq$"]!Y1}IsgJ٢= C323KeSEl!aI>'ӀA  6 c9!\dk@nרgP0!zc(6b 3{=F  !<QD.3"\n@O+ Bkqp^m03rU&Ddpa "L60>S sv5An[ pivf`Ci8202\< @ iIr$$l}f$ ECФ3%G/>~3AkX\gT~9QS#a#sYF "{乤6b;i;)WVJZ)&3Pg' ф`ü~D83l^u MHQbќB;zt$Ua+DEʥ/1^Oo\Jz6>$; u7v T `z ^VMǘ>Sb#;9sO8}F"ґ ?%̒Dg<1'{  "w[:v;1㡎/Xwib y"ɳdU9֑r܏;2Q9K9d# ڭL/ FnQl!Q_E!lWσx27\NtG8f$!@%n7Z\p 7XO.Y{=DQ&[;Hc?CvSP7;2o:t r' ؋Є=@7MOx2Jƶ !Un|.T4dx[C|H䷤Ef4]r c߇t;\ F.||^eS|\xH9HpfOgneJs.Aұir-] _9NNᒟ /į"Wwq16#" TڃkE:'WԀ1G e2R- ؅Ia 6M7\c9_Ɩ > ߿N{8ϒPGs_h)Z2}UV4tHA ͹_h޽zfgdCi]NfY-fU1ɒ(0 /qK$+<'NpϩgԊ8q+vʸi+,K{_<* Y=ir؊pD;*w)#*`f(,ѓBZ9懃ɜQqR^I+1[OLVV^{pnW@Q'yxxo8)bG4魑8WFNbY1E gs<,!;)1Y|NR㱕 ݰucߪE_댹d'҈DhZ<6)NVv׸E#7VG-#'PgN²Jk)S5!>%z'/TZj4[aOykTs6H )#*q'Ҹ#b5/?-ē Vn0 ~ +>OIN״8j>9B'yoҪN7jq?8;8?AyO,j\9E*}hȯ%|ҚH1²~n8D&c]Ƃs2gD8Qqq?؆1]:9m$ƌ*5AI]3Uzoob|ddducHhdcu㤖#!-ҸbiYZ%0ŕUq$s~9vLu|zFj&%Eq$<8vTv)bCΐ﹜5j_c,\VƵ\L'Tܤ|ƃ\RXeVq z*# l1"$Ź%:K<Β׫#8W1ύ3T_W17 65AcjWQ_4A-٨EB#q-GT U47bEΦ!J8 *U]3`,z1TXmY+ hg0Y𡾡6CegB 'k۸b۾\pDEUz$TU2=˼Jo +J1WwV, ίBB\IlR}y6q_7*V5}1Oc"or֯Td8&cʜgNԗ0 vETVvz;tz @;V;5Uvy*:T>D>}|| ]=Ehu>> e6QؘLE8#1# ص@]^TH8)_YީZHտS“ Q Sx2&N)pfw'i,IH:pĈF#ut.ް(M( cU3H1C(Bt3`Ny?'25pt̿oN rz )Ea!|է_/>9bQĘ{b>, 3܉2^`^ keīa y^La/k  lnh%, VSdc!6,ti;Rqn$D75_k5M, ~ٴio{Q|plTs=rNKsfPȧ皳á9=\8, ?4~z=}F$;_zafx DZQ' x?rGcXǧD˃*+ ܈3y !yhlsGW5/稼y9/D7噙L[̌̏BGSONOja(;>ʝ x @D14FC[44gwR^YY9UMO{ ui<YVIć\FG҃ٳ+4ާtRoiuI.'J'D/̊3 #B ]|fvEi r^u,wg==]ctnҿw{Vt/&y/Ϻ͊ˆ`e4#$/:WZ`!bmEI8$Q]ݝDK$7hY5$$Z12z.-J$*NX KI4"J4F&ѥ'xfT8>&nP~}<% Kp8F FU9!l>BH$FJr+!3rgô[BXDZgc>%SBO#!!){ :IKHaB2BB{~$a3!=f"%$4! 6zE/CMH_w_snBr\l/eB"$}Q_}_oAܰ#;cugD# uDsF<:6 HRDIplueSQ^a ;;dT*Mdke0Mĵ5XEղ&p2i&u ;qBU_-QXFrxY?AԢQm?TEC&TUFk'z4 @~1  "5H\V%v+|yW⽅c3; :&w-p% % 3qԻw<{ߢŃg٢!q\/\BdsL{ Fq>WRplBJZ|[Ehe*~hSP@b-+α[['Јf6g^\^ꔧtPk+tD jk^P."87?obvOz[[8!fi06ZQN ג 9) E388i-t#߸>s )HΔߤ|"2̑ˏ˭QYŀtIKTRa:R٦;M+d w  )3^e+Edv,XlcE~y:eR9Tht;v uH^IZ-~yuc|Mg}N5$((; I:r|^FYРEG`iKb~ϡw/ȻϊobpbubM&~fBaRU=pon tKouWiqkK֘!2Sl\o B-Ƈ XUI&DvD'[&Z,NU֋BF=BBzBBco+~s2" =a}`ܶ|y+&l93uֵ?uy@oMLJOl#aNz@b@vV\uJ K[GbCVZ:B&#TQ+HFt^P(tXO+xTv%CQJ.>ARrvf[b> evRA;EV82*jIR +"1D$剞L O&w`N~oӐ[~ѓ:-bĞw:H'NAQD"qqbpxr8/p|!oƦ)ޖB ;* kVf37 |D2= $f'E mO?fo.ԣP<=405)ZeI].ju9$^eו.fc>4ߩѤB٥V7AkVC$ᒫBRGsvG!`ɖ'5C `iT1">/}jWmñRl}l墲 =E^ zI{HRS59M&F)054RjUFD@Zssmx!jf9%NIʂv⼗<4fS] UÍsN0,*]'u{Z~2|7P $cl⟉ x$QS)JlGc )^8\޶Rd( ӗ̯cf{FZ5Zh:ooV &Zb=ܔMMϛvvSRQU%^Ab#.?Eh!vIш )G*HeuHөl(G9Nn4GMh 6$#L4/ZpO ^lߑ+q ;3(/" 7r_\ TXWK) Z|}t9yOz`K>Bu:ZeYRT*1I{! =UhRT8䠍ʋ R 0 RYi~vfjZp1 V%A׽&4pQ?.ہa:s@:M:vBʕ*F2{~=.6rk$w=gߒ#zO< 9ŻF!=|D}BIi=8F=P&<,s"a0&8B껸5-MbDUI/!|6%\zAVŦCo{}}}]S^~hG+u"2շzH(B ("J!U+́s)5J>[*^-.˽5 ͟߷ұm~wy7 ӈ0+Bbej?)!\k"kmMҤilۏ{2/|%( 8s!9=o馧P'v$+Ȓ*&mq,r)Fr"Q j śPюj01 WF;QZ4Qk'1JhEުBPGsփ퀣Pb}KhCt YC]ςeJyҭFPfaKbWp1dC~W=?&&o? NMn LwbWTzXNaP@SV!EW3xB *4C)! ,ՕI)TQ{T`޷PP!3< !X^PDܚ;fNd DhT`v1m2;fwVjO]鱔2Bct JTtS EMV%W"tC%{IKJY92sp΀t%8߬fYv\^]3etwc+/UTE)bt;3uJPPLݗc'4C&*.S锉XwE0/NXBpZ`(IWG𩠨PTW'TdZZ[H!.d?qS>err;"ˊI1,U6UbU"%&v0I$. )f1B3*z)b2I dfL[r IB:VPnL5Sut\~WըS{תAmRKH>q5#gϡ3|I-7[Q@ݞaGn]#vYZ[J7WNG}Ȯ ~/HК n81;>d'6(;e/7sy@ԓipm:\gC,---qC8323-VZZi" ;@酓OL4;TI BdO ^PoS@|łZJYbwpVn;hZbKԴ8#,zYGrCsG|? VreyugUs&Xv\ֵ해l3,l~+N{կfvM}%vx3ֽKsfWʢ^[駲y-䛟g^wp JjaTSHnK]z_*+H)M\+e(+`ut>`KL^J-Qk@cWbr37ݢ}Cx 77}-ky(3\,:)4R+WsB Zm(I9o % TN;P\!3?kv 9u%-~exүwmOcŃNw%m{wԯ烤( }r{,}$ \.ZH ɪNr@D$PFK}HjbuT=Uq qMZp#zr^+ qRsT3B4IE%0. NŴ_DgK0Nj:ڎN03$3E]_˨yܯq 7#~YpRO4 vxڸniq}KiskmuKUШ}ρy1Alc-sag@]ܾ0 A!*$yp۷-AXxe¬*خQ" V,M:*Nے[Aˬ7AV1idԤʥ5R\'z[I%p1jnDY,wA s#N>& vE(p`#<D* s&Z  NuJ+X3%jWMr|ݕݯLSM P6(Ԣ*}”^ YE(2ʄARL5K.PC9|O쮾_-WUG;X]}*wtԑ oOdȟcLq3N8mSԠW. UPM.o\Ƙ)K)3ؾ&xR=GRR0y^"^<#e"\X6fj6+M13;27ʂw@k(ekk-◇URu/z]7ܒ ~~?vg/kˢ<{mJk UnO-PϢ)@/[  m];so3f0 WΖ1^qHovf~Atm@*j$ٲl5/ng;wɈv^yQH;y_%Ϙ`\͉kхU:`sU"h;{gKuFF}0(|ځ4:_k!;:otvPh[.#}R5)NDHxt.aYt0]qy+%`ma&ɖc*Fv436d[QM>8̩gޕW,XH3 λުK mDoBG_RuuC܆kɗm{_J[o'm_wW_@4(O,M g<5/+ Wts_1#@}ݑr>B^H4ۙ=<{llqq̎#`cv]./'s,fS#c,Qp"5כ)+K۠^Gڑ6 u0NN^9oAɐ4ޢ)I!_f%+6>JH|x)1VA2h[I_h~0o5W8۫ /2X%KNv;}h;wFZs]"m XgJIQ(bҤ~*iJ?XǘcQ8ēI15*vd%I7B"1ĈaS3r=XõСe!f&yyۉQy6R@ گwVK:oˤ(CޢX/z%/0tnSv!?qH&?p q1ҿ3 TCl%KbWR-͌6 omYXTqρ1xt=[sv#*tD&}1vFWF_L$ #v~(e9ڙ$Q!3mdLst%vCtǤ36Wse᷆ŹH#o `6~ |_?U.\0K@p͍ wnȗQJ>5=`mr •I bi] yfzy{D4I=3iK$hN~ގ@qqyUE/ Lk[lxf#o|[z%UCiWܻmY5%UC*r.aX؊ؕK#bel.[N0D=lw P~#_pѸE0noP!J%~Jwcn $o,Y{X-l=4$cڛko:xbY +ә>PB~Ш mtc V*^#Nٞ{Gq{,vzʦːjkvMB_ 2l-RSMhnwː&r!D͒ߴs7:79ܸP@Mij?]C/\) j2ADW*_a[NNN8ǥډG%bc%HAà#_ o oϿogb}]@uo+-W?~ 1gKqV9 endstream endobj 84 0 obj 15158 endobj 86 0 obj <> stream x]n0E|" !%H,Pi?" Y5w.j KG?+BwfYn M$nt'Ct{WgR䟚Fۢ!=wJPk\BC6w򄄍j2?ٌ_>z[Dile$t݇TN^ؕ&WRtX)κWe-SU WP;ΝV2=@2D+.,FΕ g8ip99]72 dV . #|Q %|ˁrXA'taQ4Gw]wD0Ǝ &sNHoxa6kY endstream endobj 88 0 obj 367 endobj 85 0 obj <> stream xڽ{ |=sl&Y0!!@ BDkx A PF0BZX?&A!A-Q*ڪ}bՊB&sg'-ogνss<@b& EGyx|ΪÛf?L%D9wUVzd݄ Zxn&$4@N}/}Zs a};ֳ]}VדX .3@ B2~Mf_TdJ^ncg4#qI9_ <ґFE>B@4 u "ɄTC*Ӊ8Q' bHdDɎ4Y'):qLɂ4M'lD9:9tr!]iN>:H uEZS R@ZiNU:Ygj7qk4=Ӝ~'o*Of`? x5>Vrhf~'Ӏf2G@jvwNq2Yh$wZHB4NVջOxtć)~Dv bd(c}I .7@ƒК:J%!k~H,kDI_FjG~ U+zF6^סsm}FϾm`># |hk&`$+ZE'Ln&p}j~aT(Q=A~Ccq˜fo>d Us9}l"(kg!yÿ~=9N>'L& ymj}J\IP {UZ4q9WϽe r ^lP oyx>k>v J |>Fi^{F>;Cħ J/*/=h ?dply9dgE3#PFzZ05Oy=nSvmVY2 (W%Ue@IbFbɉKr FpAn"PZ1~' ˖eٵJHT>o1JjIU$Th6~/&NUJ &\*Něddjuh1Mr5PRO~/vb$QHCAd,i\<WÎ<eeښ3H" 7&W9 ]Tf1DKxh7Hقw,N VֲQml[iXh)-3-]n"8NB(Ř" '+?$_m5^T)'qvXir~foHXA~p6Pf>yl̲ᦍuR7iKC{ /i8zFUY}qX`?(TE]'EƆ3s)QS5Fj:'gV30RSZ]I*pјxbru \14^%UtX ~ӔLbT1%Z1)i}IǮzm֗<>:i|4x ލ,E&^V_ >X^S,ZK`/8kk3,^<oU2w^=%kgN )uĮ:o\XỮ$NJ[R{indqk\tk(2h!zhzY4MiܼNH Imwv[gAO!fG :$ow:MGt2[mBL/A,0Z]bNk2ލbrގ `[{F=fxkxF]="#QkVjߠޟ>w| 'yqhQІ< m#iUӪҧU4IBk OA=#5ϠFr7Yp E܍pс@G ~^;Bqy]$ss l䛡~Q; ~!%?Q+_H[6#ĎbB E>' Ǐb{;BHH[#^o5(ZP!r|A1#3jϊ:2BpᐝVl&+D+j% K~˄cnSm^9<Ńsgg3boQ,Y1),UA*S܀|8 V“X"U^LB7`d:(2WɌ˫ysc(5UQ2N JRdq;¸jerxXROe J>/lNƃIJ0:.vϊgr)f+jJc6AjvʊIjVs/( VsGdw[=QdO?$;I.gIi$ 'PL*/WRX9XFo&Ƭ4°.L@!01"=E B,BqBlbGrxB|=!N|wx_ {4!)8Gv;7nϮ"H*xAL"B :ٻXF. gw>G8HaTqFxq:N7 #$J_Ut0z` |IBJ4s0/SjzMb1YRMBL5g]a7Ͳݷnq%[5!&Xl+'Y|(brJ)"cz&z9C H~%# } ȅpXVA~w@4B^ɜ#?u5sl湫t__)[+v (}o_k~ sab&6)~jkYeu;*XlۙaL0Yj&0hl̛Bd *ro} nt)0c c,WCdLWQnwW~ܵ^<2Zg7D=Їsg7cNwL]IRD;b K/0tR#A~WJd:;Թ#%Ͳf67[l&Bub| 4}#)>q!_>4kf"*_-*ԘT-\!&NDP]YLłb[Qu:\bUp~ck.X`KԷկ7'b/ֈ.Q2fмtN1ľ}hzTCrol.:xV#].<-.UJFI@ ۩ђkcvkS6 C=vnaN2AbR:ĐD/ ~6P !BhXHcnG4/B3@0@к#âxlȅh nÛ(8.}ng a8|0x)ԯCٴ`Wtz`Sǻ{?e1Tf ¿)ScJ edHQJ  ;OVg9 r5[uAoʝA`C#z0-Fsx<AXÒ?\xGh޽M;zKgʎ ӟ٦r(+,};pyŰE~ aa!)x65`3M`m&WApX< -~PFB vHI֮.L5vq.u!B5\y>l3~ PWP[ՙ/@`zX=Ō۹8!+zpʹL{!"ßM@M&f5ea8ݾ[fL712I0f4@lyfbv*1,PQTB5|kH!Iq<|1\r\%vh ĈPhHbV}^YC;ξB'|{/?R{.Z{G/얃_ߧ~N}iVӰxEޜxY+xo- i{ k~ 9t ;1dz\EOCYg&@m!Gk̯coTONQ>T??oF|EDTlIf`6SmVţأ0.t :==;@_TRGΟ` >H΋͌GF|I"kc''>]g9={&)}qfKnoh[Abm4Xbc\aҌlvٙwhj1Ob36S d{!Db&KH, H~\M67%,-=JysC\UQ^ٌ@킄)i0fz4Dj{4XL`؜vheGns\Wo2@hF@4aH>VoUo"\j.蹟PrىK. 7pZy@l&6n3a ];,`)]@C)ivPx@9g((@|;aض?궿|4o[v/u颫+9ԵЮ/.">>N'/3? yd-K'\\hݸיW{vYˀ1_5,;\?\,V'匋3`mC9;`s C\Έ+?"r4SnY~ű2`t˓=S鉎 a${azDݫ*?M"0ܭ1dD1a0{ojܐܟ ç̯'ca"UA 0ro~!9tb>F[ j\^^nEN/ϓRn٨l9XntI,5.: ugM@ti. Y`5RfQɖG Mk!L=c|>'ч6$>ܱ nt\KGx->BAZY*1~v/񚜭foephcAtjbcixe;!͋i^퐆XqRJH1Z o[`POFcK܃(+& pN/`wb]tdk Irݯ'>gh3Qt_KEWݧ^{ ;NհJZ˹BFPh)>-qB10IvT ź?7-orq/Ӧv@cAΝ6&P (X- H^:摏v}i0eS)i@VB1hϩlPi=N] |;vA\aY$ca?E?Eû2s6gyaVs]%)u.C&BIxJxNt4:(!(fL;i(-@.ŝ@'pơj@k$rK&f hbY䦜6|ͫ~f%NC qoNZۣES;^? 㟩/ڼjZK zaSSV !_k}cԝD.{ 2˵(iqˍЂgL,ƽr `̮ieT *RU:2^,⎋cXn a$)Nl]N#в2 0":W6yxȞ}W6+N#J;=ZaKLπD<2 !2HcdZ@(`-chu l >Ixj:Bii_Mە&ű!8_ dX3p@F7dN`B2YYaȽi".ꊡrg޼[sv $0@{ӎ:q*AW;F<aQ7AwЅ[MXnms co~`ׯ{Ļ_5EniuU ':4 endstream endobj 91 0 obj 9072 endobj 93 0 obj <> stream uuid:FC44B33E-D641-7BDA-8C7D-388334CA1382 uuid:3A6BD1AB-DDA3-3067-F61C-80A2706C2382 2022-01-09T16:04:36+00:00 2022-01-09T16:04:36+00:00 Serif Affinity Designer 1.10.4 2022-01-09T16:04:36+00:00 PDFlib+PDI 9.3.1-i (macOS (x86_64)) false S4classObjects.pdf endstream endobj 92 0 obj 1509 endobj 94 0 obj <> endobj 76 0 obj <> stream xڭUnF}߯ %DzZ+2,7qa*[D gIvZ9I>;;93- 4iiYdaAQp V=I4 I+<I/iq \^;& V15HÔj4K[$Aػwڴ.$ut'-*$M{9d2ΛXP's qT`Duf%%{ʓ =v Ғd=Ay7!d7)OnSy=s;]y.:KIa!=I}! )۫8:6v]6Dx~|y}~UtWEU]՚ϊXe8:WUH$6Xy&= }N>]o[iì2˦=9./vE7_{ #&>`KK{r; }N)0~O7ơVJ(Z ȣ*ޠN^y ŹX'T;Cb%&}Vܛ^u,'Y3CX'g'`'.6G>2D:f+s&_7pZ|{)=7#)h|VCV}ru\7_1S2~=Zt NMDC+1P~-W6GzS|v ?D*` yL!*Y;!;G4`R@"GHt̋NOm1iA@\%~e-@}'])twpkdLz@` }Ca`4MyŜ! )~3\-0{7.WK:v)m4AH526~ ҿ+z_!Y6ɮd<eRcf;dRh"3?Q! endstream endobj 95 0 obj 968 endobj 96 0 obj <]/DecodeParms<>/Length 323 >> stream xeRJA}ww('FAD-RVFv  V(Ż(9gg!k޼7A_Qh Dj$ZUy&O06R/e0):>GL9#3m"\R -碾-lo NWwPp+"{FfcZR%Br5 4y Rд-6ӫQ"S _GK֩*1dql5oK[Ρ/#yop+5~E65P" ]B3o1` ;&,w c>N endstream endobj startxref 68847 %%EOF Luminescence/inst/doc/S4classObjects.pdf.asis0000644000176200001440000000030613231137116020662 0ustar liggesusers%\VignetteIndexEntry{S4-class Object Structure in 'Luminescence'} %\VignetteEngine{R.rsp::asis} %\VignetteKeyword{PDF} %\VignetteKeyword{HTML} %\VignetteKeyword{vignette} %\VignetteKeyword{package} Luminescence/inst/extdata/0000755000176200001440000000000014521210067015271 5ustar liggesusersLuminescence/inst/extdata/STRB87_1_unbleached.txt0000644000176200001440000000103313231137116021352 0ustar liggesusers# -------------------------------------------------------------------------------------- # CURVE FITTING TEST DATASET # Berger, G.W., Huntley, D.J., 1989. Test data for exponential fits. Ancient TL 7, 43-46. # # SAMPLE: STRB87 - 1 (unbbleached) # -------------------------------------------------------------------------------------- 0 20522.2 0 19373.6 0 21040.6 0 18899.1 1 50382.5 1 48570.2 1 49529.5 2 77706.6 2 75291.3 2 74563.8 4 111547.5 4 113899.1 4 109461.1 8 164564.9 8 151504.2 8 168042.1 16 204726.5 16 201964.3 16 193457.6 Luminescence/inst/extdata/QNL84_2_unbleached.txt0000644000176200001440000000075113231137116021236 0ustar liggesusers# -------------------------------------------------------------------------------------- # CURVE FITTING TEST DATASET # Berger, G.W., Huntley, D.J., 1989. Test data for exponential fits. Ancient TL 7, 43-46. # # SAMPLE: QNL84 - 2 (Unbleached) # -------------------------------------------------------------------------------------- 0 38671 0 40646 0 38149 0 35836 120 65931 120 67887 120 66133 240 82496 240 86708 240 86580 480 110978 480 113807 480 114192 480 109652 960 130373 960 137789Luminescence/inst/extdata/BINfile_V8.binx0000644000176200001440000000570614264017373020021 0ustar liggesusersBT 607+Main Measurement Middle Grain SachsenLoesseExampleData.BINfileDataDefault191432060920]C@\C A                          #-++,53*8(1-UBHCSXZNQJlWnc{lxHBT 607+Main Measurement Middle Grain SachsenLoesseExampleData.BINfileDataDefault191611060920]C@\C A                            ) ' %&#*1)$<3+)017K5LC;=XL_Ue)Luminescence/inst/extdata/QNL84_2_bleached.txt0000644000176200001440000000070513231137116020672 0ustar liggesusers# -------------------------------------------------------------------------------------- # CURVE FITTING TEST DATASET # Berger, G.W., Huntley, D.J., 1989. Test data for exponential fits. Ancient TL 7, 43-46. # # SAMPLE: QNL84 - 2 (bleached) # -------------------------------------------------------------------------------------- 0 20766 0 21393 0 22493 120 31290 120 33779 240 43221 240 43450 240 41427 480 51804 480 59555 480 54013 960 75748 960 76613Luminescence/inst/extdata/STRB87_1_bleached.txt0000644000176200001440000000076013231137116021015 0ustar liggesusers# -------------------------------------------------------------------------------------- # CURVE FITTING TEST DATASET # Berger, G.W., Huntley, D.J., 1989. Test data for exponential fits. Ancient TL 7, 43-46. # # SAMPLE: STRB87 - 1 (bleached) # -------------------------------------------------------------------------------------- 0 11814.6 0 11587.8 0 11708.6 1 26645.2 1 26445.2 1 26368.6 2 41487.1 2 39125.1 2 40582.5 4 61532.1 4 57023.6 8 93015.8 8 87907.7 8 87655.2 16 107618.3 16 110394.02 Luminescence/inst/extdata/XSYG_file.xsyg0000644000176200001440000016250313232574234020014 0ustar liggesusers 0.1,6;0.2,6;0.3,5;0.4,5;0.5,3;0.6,5;0.7,0;0.8,2;0.9,10;1,3;1.1,11;1.2,7;1.3,4;1.4,9;1.5,6;1.6,7;1.7,6;1.8,3;1.9,5;2,5;2.1,0;2.2,7;2.3,5;2.4,11;2.5,5;2.6,9;2.7,10;2.8,6;2.9,6;3,1;3.1,7;3.2,5;3.3,4;3.4,1;3.5,4;3.6,3;3.7,5;3.8,7;3.9,3;4,8;4.1,3;4.2,1;4.3,6;4.4,10;4.5,3;4.6,4;4.7,4;4.8,8;4.9,4;5,1;5.1,11;5.2,5;5.3,3;5.4,1;5.5,2;5.6,4;5.7,4;5.8,4;5.9,5;6,4;6.1,6;6.2,3;6.3,3;6.4,2;6.5,8;6.6,7;6.7,4;6.8,3;6.9,6;7,5;7.1,2;7.2,5;7.3,3;7.4,3;7.5,3;7.6,9;7.7,7;7.8,7;7.9,3;8,6;8.1,11;8.2,6;8.3,5;8.4,8;8.5,2;8.6,3;8.7,7;8.8,2;8.9,2;9,4;9.1,6;9.2,6;9.3,2;9.4,5;9.5,9;9.6,6;9.7,7;9.8,3;9.9,4;10,6;10.1,6;10.2,13;10.3,7;10.4,8;10.5,7;10.6,4;10.7,5;10.8,9;10.9,4;11,8;11.1,3;11.2,6;11.3,2;11.4,8;11.5,3;11.6,8;11.7,7;11.8,4;11.9,5;12,5;12.1,5;12.2,6;12.3,4;12.4,1;12.5,5;12.6,1;12.7,3;12.8,4;12.9,12;13,2;13.1,4;13.2,13;13.3,1;13.4,5;13.5,4;13.6,7;13.7,6;13.8,6;13.9,3;14,3;14.1,6;14.2,9;14.3,5;14.4,7;14.5,2;14.6,4;14.7,1;14.8,3;14.9,9;15,8;15.1,4;15.2,2;15.3,8;15.4,4;15.5,7;15.6,9;15.7,3;15.8,7;15.9,4;16,5;16.1,1;16.2,4;16.3,8;16.4,3;16.5,6;16.6,2;16.7,4;16.8,3;16.9,4;17,7;17.1,2;17.2,3;17.3,4;17.4,3;17.5,9;17.6,5;17.7,2;17.8,1;17.9,14;18,2;18.1,1;18.2,4;18.3,6;18.4,6;18.5,11;18.6,11;18.7,10;18.8,4;18.9,5;19,8;19.1,0;19.2,2;19.3,4;19.4,3;19.5,6;19.6,2;19.7,9;19.8,2;19.9,2;20,5;20.1,7;20.2,1;20.3,7;20.4,4;20.5,4;20.6,4;20.7,6;20.8,3;20.9,2;21,4;21.1,4;21.2,6;21.3,6;21.4,5;21.5,5;21.6,2;21.7,10;21.8,4;21.9,7;22,8;22.1,6;22.2,6;22.3,7;22.4,10;22.5,4;22.6,7;22.7,4;22.8,5;22.9,8;23,8;23.1,2;23.2,2;23.3,2;23.4,11;23.5,6;23.6,3;23.7,4;23.8,4;23.9,3;24,5;24.1,10;24.2,4;24.3,2;24.4,3;24.5,6;24.6,6;24.7,6;24.8,10;24.9,3;25,5;25.1,10;25.2,8;25.3,4;25.4,5;25.5,7;25.6,5;25.7,8;25.8,7;25.9,1;26,7;26.1,4;26.2,7;26.3,5;26.4,8;26.5,13;26.6,4;26.7,8;26.8,4;26.9,7;27,6;27.1,9;27.2,2;27.3,8;27.4,5;27.5,6;27.6,10;27.7,3;27.8,10;27.9,12;28,10;28.1,8;28.2,10;28.3,3;28.4,9;28.5,13;28.6,7;28.7,5;28.8,12;28.9,5;29,7;29.1,9;29.2,9;29.3,9;29.4,12;29.5,7;29.6,8;29.7,10;29.8,12;29.9,6;30,13;30.1,11;30.2,7;30.3,14;30.4,18;30.5,18;30.6,18;30.7,20;30.8,19;30.9,19;31,17;31.1,11;31.2,20;31.3,17;31.4,14;31.5,25;31.6,23;31.7,21;31.8,20;31.9,16;32,20;32.1,25;32.2,28;32.3,30;32.4,25;32.5,31;32.6,23;32.7,19;32.8,25;32.9,30;33,36;33.1,31;33.2,29;33.3,30;33.4,40;33.5,35;33.6,34;33.7,47;33.8,43;33.9,45;34,36;34.1,56;34.2,56;34.3,63;34.4,61;34.5,47;34.6,61;34.7,66;34.8,76;34.9,73;35,80;35.1,79;35.2,85;35.3,81;35.4,79;35.5,86;35.6,84;35.7,97;35.8,91;35.9,103;36,112;36.1,112;36.2,105;36.3,107;36.4,128;36.5,117;36.6,139;36.7,146;36.8,156;36.9,151;37,145;37.1,136;37.2,137;37.3,185;37.4,182;37.5,173;37.6,189;37.7,197;37.8,223;37.9,209;38,208;38.1,257;38.2,232;38.3,218;38.4,279;38.5,263;38.6,254;38.7,290;38.8,275;38.9,289;39,330;39.1,328;39.2,309;39.3,353;39.4,361;39.5,361;39.6,353;39.7,338;39.8,413;39.9,400;40,417;40.1,431;40.2,473;40.3,482;40.4,446;40.5,470;40.6,502;40.7,526;40.8,535;40.9,508;41,529;41.1,498;41.2,572;41.3,552;41.4,581;41.5,593;41.6,650;41.7,668;41.8,613;41.9,653;42,680;42.1,659;42.2,729;42.3,715;42.4,798;42.5,765;42.6,742;42.7,773;42.8,852;42.9,834;43,844;43.1,918;43.2,886;43.3,885;43.4,977;43.5,952;43.6,1013;43.7,967;43.8,1043;43.9,1007;44,1005;44.1,1007;44.2,1100;44.3,1094;44.4,1117;44.5,1134;44.6,1220;44.7,1236;44.8,1188;44.9,1254;45,1242;45.1,1299;45.2,1334;45.3,1352;45.4,1406;45.5,1339;45.6,1413;45.7,1439;45.8,1504;45.9,1488;46,1579;46.1,1655;46.2,1643;46.3,1687;46.4,1628;46.5,1725;46.6,1743;46.7,1800;46.8,1831;46.9,1871;47,1870;47.1,1905;47.2,1911;47.3,2007;47.4,2037;47.5,2087;47.6,2133;47.7,2084;47.8,2157;47.9,2195;48,2256;48.1,2190;48.2,2360;48.3,2348;48.4,2260;48.5,2329;48.6,2317;48.7,2340;48.8,2346;48.9,2402;49,2385;49.1,2400;49.2,2419;49.3,2413;49.4,2370;49.5,2468;49.6,2404;49.7,2472;49.8,2405;49.9,2357;50,2423;50.1,2418;50.2,2535;50.3,2503;50.4,2436;50.5,2350;50.6,2484;50.7,2358;50.8,2454;50.9,2286;51,2448;51.1,2422;51.2,2455;51.3,2377;51.4,2305;51.5,2317;51.6,2366;51.7,2329;51.8,2334;51.9,2310;52,2321;52.1,2288;52.2,2250;52.3,2235;52.4,2216;52.5,2221;52.6,2176;52.7,2198;52.8,2123;52.9,2174;53,2219;53.1,2176;53.2,2146;53.3,2113;53.4,2153;53.5,2098;53.6,2093;53.7,2034;53.8,2087;53.9,2081;54,2004;54.1,1998;54.2,1961;54.3,1935;54.4,1895;54.5,1904;54.6,1838;54.7,1843;54.8,1852;54.9,1856;55,1822;55.1,1857;55.2,1766;55.3,1728;55.4,1748;55.5,1777;55.6,1721;55.7,1770;55.8,1669;55.9,1707;56,1685;56.1,1651;56.2,1654;56.3,1652;56.4,1576;56.5,1594;56.6,1566;56.7,1626;56.8,1557;56.9,1592 0,25;47,260;57,260;77,60;197,60 0.2,27.0804882049561;0.4,27.0798873901367;0.6,27.0809726715088;0.8,27.0831527709961;1,27.079402923584;1.2,27.0986099243164;1.4,27.1498222351074;1.6,27.4493923187256;1.8,27.7364311218262;2,28.1406993865967;2.2,29.3179683685303;2.4,30.0697174072266;2.6,30.8972682952881;2.8,32.6637191772461;3,33.5518417358398;3.2,34.4185752868652;3.4,36.0412330627441;3.6,36.7972145080566;3.8,37.5352096557617;4,38.9637413024902;4.2,39.6741523742676;4.4,40.3962631225586;4.6,41.8783111572266;4.8,42.6388664245605;5,43.4063034057617;5.2,44.9716835021973;5.4,45.7557830810547;5.6,46.534740447998;5.8,48.0852394104004;6,48.8538589477539;6.2,49.6121978759766;6.4,51.1420059204102;6.6,51.8980140686035;6.8,52.6662635803223;7,54.2057876586914;7.2,54.9717025756836;7.4,55.7467079162598;7.6,57.299144744873;7.8,58.0755920410156;8,58.8502769470215;8.2,60.3935203552246;8.4,61.1761207580566;8.6,61.9391098022461;8.8,63.4827728271484;9,64.2569732666016;9.2,65.0282821655273;9.4,66.5849609375;9.6,67.3621063232422;9.8,68.1395645141602;10,69.6889266967773;10.2,70.4668197631836;10.4,71.241340637207;10.6,72.7888565063477;10.8,73.569206237793;11,74.3448333740234;11.2,75.892333984375;11.4,76.6711959838867;11.6,77.4519729614258;11.8,79.0002975463867;12,79.7770538330078;12.2,80.5531387329102;12.4,82.1066741943359;12.6,82.8803253173828;12.8,83.6636352539063;13,85.2125549316406;13.2,85.9880065917969;13.4,86.7693023681641;13.6,88.3218078613281;13.8,89.0941390991211;14,89.8683547973633;14.2,91.4216079711914;14.4,92.1995391845703;14.6,92.979362487793;14.8,94.5301284790039;15,95.3131713867188;15.2,96.0798416137695;15.4,97.6346435546875;15.6,98.4167404174805;15.8,99.1912155151367;16,100.738433837891;16.2,101.516716003418;16.4,102.289123535156;16.6,103.838607788086;16.8,104.619148254395;17,105.393913269043;17.2,106.942420959473;17.4,107.717399597168;17.6,108.493896484375;17.8,110.046989440918;18,110.817626953125;18.2,111.597969055176;18.4,113.14608001709;18.6,113.917327880859;18.8,114.693809509277;19,116.238647460938;19.2,117.017677307129;19.4,117.788551330566;19.6,119.33903503418;19.8,120.111328125;20,120.892349243164;20.2,122.442260742188;20.4,123.205062866211;20.6,123.975341796875;20.8,125.522903442383;21,126.289619445801;21.2,127.067794799805;21.4,128.617599487305;21.6,129.396072387695;21.8,130.169692993164;22,131.714385986328;22.2,132.482055664063;22.4,133.251129150391;22.6,134.79736328125;22.8,135.571197509766;23,136.341293334961;23.2,137.880615234375;23.4,138.646469116211;23.6,139.423828125;23.8,140.970932006836;24,141.745162963867;24.2,142.518051147461;24.4,144.061538696289;24.6,144.825424194336;24.8,145.598175048828;25,147.139190673828;25.2,147.901931762695;25.4,148.677780151367;25.6,150.212387084961;25.8,150.982879638672;26,151.75358581543;26.2,153.292510986328;26.4,154.066223144531;26.6,154.832290649414;26.8,156.379409790039;27,157.139953613281;27.2,157.91096496582;27.4,159.454376220703;27.6,160.217498779297;27.8,160.987350463867;28,162.522369384766;28.2,163.289428710938;28.4,164.056579589844;28.6,165.584976196289;28.8,166.359237670898;29,167.126953125;29.2,168.658935546875;29.4,169.438171386719;29.6,170.199920654297;29.8,171.738403320313;30,172.50244140625;30.2,173.267990112305;30.4,174.79801940918;30.6,175.5595703125;30.8,176.326644897461;31,177.857833862305;31.2,178.626693725586;31.4,179.398071289063;31.6,180.936935424805;31.8,181.697387695313;32,182.463363647461;32.2,183.990783691406;32.4,184.752243041992;32.6,185.516082763672;32.8,187.049896240234;33,187.808700561523;33.2,188.581573486328;33.4,190.114852905273;33.6,190.872268676758;33.8,191.641326904297;34,193.167633056641;34.2,193.924880981445;34.4,194.690124511719;34.6,196.223922729492;34.8,196.992218017578;35,197.74934387207;35.2,199.275482177734;35.4,200.034881591797;35.6,200.803588867188;35.8,202.325668334961;36,203.088180541992;36.2,203.857315063477;36.4,205.385589599609;36.6,206.148376464844;36.8,206.910629272461;37,208.424377441406;37.2,209.183807373047;37.4,209.955673217773;37.6,211.479721069336;37.8,212.24462890625;38,213.007949829102;38.2,214.533065795898;38.4,215.2900390625;38.6,216.052368164063;38.8,217.570816040039;39,218.326766967773;39.2,219.090530395508;39.4,220.613418579102;39.6,221.377105712891;39.8,222.141738891602;40,223.659576416016;40.2,224.421234130859;40.4,225.183578491211;40.6,226.702407836914;40.8,227.457229614258;41,228.221450805664;41.2,229.746429443359;41.4,230.503631591797;41.6,231.264053344727;41.8,232.785171508789;42,233.552597045898;42.2,234.305999755859;42.4,235.822235107422;42.6,236.583145141602;42.8,237.334365844727;43,238.853652954102;43.2,239.620758056641;43.4,240.379119873047;43.6,241.898056030273;43.8,242.660369873047;44,243.413619995117;44.2,244.93571472168;44.4,245.695297241211;44.6,246.450881958008;44.8,247.961395263672;45,248.722198486328;45.2,249.476806640625;45.4,250.992065429688;45.6,251.753646850586;45.8,252.515777587891;46,254.033126831055;46.2,254.793487548828;46.4,255.552825927734;46.6,257.073181152344;46.8,257.824768066406;47,258.58056640625;47.2,260.091033935547;47.4,260.847320556641;47.6,261.493530273438;47.8,262.138610839844;48,262.115509033203;48.2,261.912475585938;48.4,261.2255859375;48.6,260.858612060547;48.8,260.529357910156;49,260.125823974609;49.2,260.076873779297;49.4,260.136444091797;49.6,260.468597412109;49.8,260.676849365234;50,260.878051757813;50.2,261.156524658203;50.4,261.207946777344;50.6,261.216674804688;50.8,261.105773925781;51,261.019134521484;51.2,260.9423828125;51.4,260.819061279297;51.6,260.793029785156;51.8,260.791900634766;52,260.858612060547;52.2,260.905822753906;52.4,260.957702636719;52.6,261.050323486328;52.8,261.079284667969;53,261.092193603516;53.2,261.094604492188;53.4,261.093994140625;53.6,261.075714111328;53.8,261.056945800781;54,261.048553466797;54.2,261.042602539063;54.4,261.053253173828;54.6,261.062683105469;54.8,261.072814941406;55,261.088165283203;55.2,261.101715087891;55.4,261.113525390625;55.6,261.12939453125;55.8,261.126953125;56,261.135833740234;56.2,261.135314941406;56.4,261.128875732422;56.6,261.134704589844;56.8,261.128875732422;57,261.12939453125;57.2,261.134704589844;57.4,261.099945068359;57.6,260.892761230469;57.8,260.446807861328;58,259.040069580078;58.2,258.224456787109;58.4,256.755523681641;58.6,253.193466186523;58.8,251.453521728516;59,249.752563476563;59.2,246.430221557617;59.4,244.806564331055;59.6,243.209350585938;59.8,240.094573974609;60,238.556488037109;60.2,237.046081542969;60.4,234.089981079102;60.6,232.649429321289;60.8,231.218368530273;61,228.399658203125;61.2,227.032424926758;61.4,225.676895141602;61.6,223.024215698242;61.8,221.716033935547;62,220.423355102539;62.2,217.906204223633;62.4,216.662475585938;62.6,215.430816650391;62.8,212.999862670898;63,211.818008422852;63.2,210.635437011719;63.4,208.332061767578;63.6,207.189147949219;63.8,206.061294555664;64,203.851501464844;64.2,202.75505065918;64.4,201.680480957031;64.6,199.544631958008;64.8,198.490310668945;65,197.442199707031;65.2,195.382476806641;65.4,194.368988037109;65.6,193.359420776367;65.8,191.373626708984;66,190.388824462891;66.2,189.414733886719;66.4,187.494064331055;66.6,186.543563842773;66.8,185.606567382813;67,183.743392944336;67.2,182.82405090332;67.4,181.912994384766;67.6,180.105880737305;67.8,179.215347290039;68,178.338424682617;68.2,176.585922241211;68.4,175.719039916992;68.6,174.863342285156;68.8,173.159088134766;69,172.325500488281;69.2,171.495529174805;69.4,169.844390869141;69.6,169.031860351563;69.8,168.223068237305;70,166.627349853516;70.2,165.827835083008;70.4,165.044616699219;70.6,163.475372314453;70.8,162.702651977539;71,161.936935424805;71.2,160.411987304688;71.4,159.660614013672;71.6,158.900985717773;71.8,157.422973632813;72,156.683319091797;72.2,155.945175170898;72.4,154.504684448242;72.6,153.785354614258;72.8,153.069152832031;73,151.66162109375;73.2,150.967590332031;73.4,150.26628112793;73.6,148.895736694336;73.8,148.216705322266;74,147.534896850586;74.2,146.172744750977;74.4,145.513717651367;74.6,144.855392456055;74.8,143.533325195313;75,142.88916015625;75.2,142.232864379883;75.4,140.955703735352;75.6,140.318008422852;75.8,139.690628051758;76,138.439239501953;76.2,137.815124511719;76.4,137.202072143555;76.6,135.979049682617;76.8,135.376571655273;77,134.767471313477;77.2,133.564407348633;77.4,132.979965209961;77.6,132.38737487793;77.8,131.223968505859;78,130.640701293945;78.2,130.053131103516;78.4,128.916748046875;78.6,128.349349975586;78.8,127.783264160156;79,126.668235778809;79.2,126.114944458008;79.4,125.563949584961;79.6,124.461952209473;79.8,123.927589416504;80,123.386833190918;80.2,122.321189880371;80.4,121.794692993164;80.6,121.26774597168;80.8,120.214950561523;81,119.701385498047;81.2,119.188995361328;81.4,118.16577911377;81.6,117.657684326172;81.8,117.153617858887;82,116.160949707031;82.2,115.662902832031;82.4,115.173240661621;82.6,114.19718170166;82.8,113.712287902832;83,113.228805541992;83.2,112.279266357422;83.4,111.799919128418;83.6,111.335151672363;83.8,110.401062011719;84,109.93384552002;84.2,109.475608825684;84.4,108.56623840332;84.6,108.111137390137;84.8,107.660675048828;85,106.757667541504;85.2,106.318016052246;85.4,105.877777099609;85.6,105.000068664551;85.8,104.577430725098;86,104.13655090332;86.2,103.279029846191;86.4,102.856719970703;86.6,102.432334899902;86.8,101.588821411133;87,101.180786132813;87.2,100.758453369141;87.4,99.9411315917969;87.6,99.5317993164063;87.8,99.126823425293;88,98.3296432495117;88.2,97.9283752441406;88.4,97.5387878417969;88.6,96.7560195922852;88.8,96.3656845092773;89,95.9857482910156;89.2,95.2189788818359;89.4,94.8394241333008;89.6,94.453857421875;89.8,93.7114715576172;90,93.3393478393555;90.2,92.9710845947266;90.4,92.2304916381836;90.6,91.8669128417969;90.8,91.5017547607422;91,90.786979675293;91.2,90.4297180175781;91.4,90.0780181884766;91.6,89.3754730224609;91.8,89.0322799682617;92,88.686393737793;92.2,87.9870910644531;92.4,87.6550979614258;92.6,87.3139038085938;92.8,86.6430816650391;93,86.3086242675781;93.2,85.9720001220703;93.4,85.3243255615234;93.6,84.9928359985352;93.8,84.6707305908203;94,84.0191116333008;94.2,83.7049789428711;94.4,83.3874282836914;94.6,82.7576904296875;94.8,82.4469833374023;95,82.1352081298828;95.2,81.5245208740234;95.4,81.2173767089844;95.6,80.9107437133789;95.8,80.3119659423828;96,80.0120468139648;96.2,79.7183074951172;96.4,79.1320114135742;96.6,78.8399353027344;96.8,78.5524520874023;97,77.9741058349609;97.2,77.6871719360352;97.4,77.4086151123047;97.6,76.8514633178711;97.8,76.5680618286133;98,76.2945175170898;98.2,75.7460098266602;98.4,75.472038269043;98.6,75.2009201049805;98.8,74.6652297973633;99,74.3968658447266;99.2,74.132453918457;99.4,73.6074829101563;99.6,73.3448486328125;99.8,73.0854263305664;100,72.5733871459961;100.2,72.3168411254883;100.4,72.0712356567383;100.6,71.5643768310547;100.8,71.3123168945313;101,71.0647048950195;101.2,70.5778656005859;101.4,70.3347015380859;101.6,70.0816421508789;101.8,69.6113662719727;102,69.3711090087891;102.2,69.1319580078125;102.4,68.6619338989258;102.6,68.4288482666016;102.8,68.1968841552734;103,67.7358093261719;103.2,67.507698059082;103.4,67.2813034057617;103.6,66.8351745605469;103.8,66.6088256835938;104,66.3952331542969;104.2,65.9519195556641;104.4,65.7383651733398;104.6,65.5165023803711;104.8,65.0887298583984;105,64.876335144043 30.1,2263;30.2,2328;30.3,2229;30.4,2069;30.5,2142;30.6,1923;30.7,1812;30.8,1858;30.9,1698;31,1660;31.1,1584;31.2,1537;31.3,1457;31.4,1388;31.5,1322;31.6,1331;31.7,1260;31.8,1208;31.9,1146;32,1103;32.1,1123;32.2,1033;32.3,1004;32.4,1091;32.5,953;32.6,961;32.7,876;32.8,877;32.9,885;33,859;33.1,775;33.2,796;33.3,731;33.4,735;33.5,740;33.6,733;33.7,710;33.8,681;33.9,670;34,608;34.1,625;34.2,630;34.3,617;34.4,584;34.5,567;34.6,530;34.7,570;34.8,553;34.9,476;35,503;35.1,517;35.2,513;35.3,472;35.4,470;35.5,477;35.6,418;35.7,491;35.8,461;35.9,435;36,404;36.1,424;36.2,391;36.3,423;36.4,410;36.5,426;36.6,389;36.7,358;36.8,372;36.9,354;37,406;37.1,366;37.2,374;37.3,358;37.4,338;37.5,365;37.6,355;37.7,372;37.8,379;37.9,358;38,338;38.1,363;38.2,371;38.3,369;38.4,323;38.5,324;38.6,321;38.7,360;38.8,321;38.9,297;39,295;39.1,336;39.2,293;39.3,283;39.4,301;39.5,304;39.6,287;39.7,295;39.8,298;39.9,332;40,305;40.1,284;40.2,319;40.3,292;40.4,274;40.5,289;40.6,286;40.7,254;40.8,288;40.9,258;41,271;41.1,285;41.2,283;41.3,299;41.4,283;41.5,316;41.6,266;41.7,286;41.8,282;41.9,292;42,269;42.1,269;42.2,277;42.3,288;42.4,312;42.5,247;42.6,260;42.7,273;42.8,265;42.9,256;43,272;43.1,259;43.2,231;43.3,287;43.4,268;43.5,247;43.6,267;43.7,258;43.8,257;43.9,279;44,277;44.1,278;44.2,283;44.3,267;44.4,249;44.5,247;44.6,253;44.7,256;44.8,256;44.9,281;45,246;45.1,232;45.2,221;45.3,263;45.4,233;45.5,251;45.6,282;45.7,257;45.8,245;45.9,228;46,263;46.1,240;46.2,263;46.3,236;46.4,269;46.5,261;46.6,252;46.7,251;46.8,224;46.9,210;47,251;47.1,252;47.2,250;47.3,255;47.4,254;47.5,228;47.6,241;47.7,266;47.8,264;47.9,257;48,237;48.1,222;48.2,243;48.3,248;48.4,221;48.5,263;48.6,233;48.7,239;48.8,252;48.9,232;49,250;49.1,235;49.2,230;49.3,236;49.4,255;49.5,248;49.6,212;49.7,234;49.8,238;49.9,265;50,256;50.1,245;50.2,241;50.3,237;50.4,244;50.5,242;50.6,228;50.7,225;50.8,214;50.9,240;51,248;51.1,255;51.2,231;51.3,202;51.4,235;51.5,220;51.6,239;51.7,253;51.8,281;51.9,247;52,215;52.1,230;52.2,214;52.3,254;52.4,220;52.5,256;52.6,259;52.7,246;52.8,234;52.9,218;53,204;53.1,233;53.2,227;53.3,236;53.4,229;53.5,225;53.6,222;53.7,236;53.8,247;53.9,228;54,234;54.1,256;54.2,265;54.3,240;54.4,240;54.5,230;54.6,240;54.7,236;54.8,229;54.9,200;55,219;55.1,220;55.2,221;55.3,212;55.4,224;55.5,235;55.6,220;55.7,242;55.8,247;55.9,222;56,228;56.1,239;56.2,226;56.3,213;56.4,218;56.5,247;56.6,236;56.7,227;56.8,235;56.9,238;57,192;57.1,247;57.2,228;57.3,220;57.4,243;57.5,225;57.6,226;57.7,235;57.8,200;57.9,237;58,200;58.1,223;58.2,227;58.3,214;58.4,193;58.5,221;58.6,225;58.7,226;58.8,238;58.9,267;59,236;59.1,233;59.2,222;59.3,210;59.4,226;59.5,209;59.6,220;59.7,215;59.8,239;59.9,224;60,259;60.1,236;60.2,240;60.3,226;60.4,237;60.5,213;60.6,218;60.7,234;60.8,220;60.9,219;61,201;61.1,235;61.2,196;61.3,202;61.4,216;61.5,198;61.6,229;61.7,249;61.8,211;61.9,225;62,224;62.1,204;62.2,214;62.3,213;62.4,209;62.5,201;62.6,212;62.7,219;62.8,216;62.9,191;63,245;63.1,209;63.2,244;63.3,229;63.4,224;63.5,211;63.6,226;63.7,227;63.8,221;63.9,228;64,229;64.1,242;64.2,218;64.3,198;64.4,207;64.5,216;64.6,209;64.7,239;64.8,241;64.9,220;65,196;65.1,211;65.2,239;65.3,226;65.4,219;65.5,238;65.6,226;65.7,246;65.8,231;65.9,253;66,220;66.1,209;66.2,206;66.3,209;66.4,215;66.5,220;66.6,220;66.7,216;66.8,242;66.9,211;67,224;67.1,232;67.2,226;67.3,239;67.4,232;67.5,222;67.6,238;67.7,235;67.8,218;67.9,226;68,223;68.1,196;68.2,223;68.3,225;68.4,237;68.5,236;68.6,215;68.7,203;68.8,229;68.9,223;69,214;69.1,213;69.2,207;69.3,229;69.4,222;69.5,210;69.6,226;69.7,224;69.8,222;69.9,241;70,228;70.1,220;70.2,224;70.3,226;70.4,229;70.5,213;70.6,207;70.7,236;70.8,218;70.9,213;71,206;71.1,212;71.2,219;71.3,213;71.4,215;71.5,207;71.6,231;71.7,201;71.8,238;71.9,213;72,232;72.1,223;72.2,242;72.3,185;72.4,229;72.5,212;72.6,228;72.7,183;72.8,216;72.9,224;73,200;73.1,229;73.2,193;73.3,233;73.4,206;73.5,218;73.6,194;73.7,217;73.8,226;73.9,214;74,198;74.1,228;74.2,226;74.3,227;74.4,217;74.5,230;74.6,212;74.7,222;74.8,230;74.9,218;75,205;75.1,239;75.2,201;75.3,218;75.4,215;75.5,208;75.6,198;75.7,208;75.8,233;75.9,199;76,231;76.1,226;76.2,191;76.3,219;76.4,193;76.5,223;76.6,216;76.7,236;76.8,230;76.9,217;77,223;77.1,211;77.2,247;77.3,204;77.4,232;77.5,260;77.6,237;77.7,219;77.8,215;77.9,225;78,224;78.1,225;78.2,226;78.3,228;78.4,243;78.5,235;78.6,190;78.7,221;78.8,191;78.9,237;79,191;79.1,229;79.2,213;79.3,214;79.4,213;79.5,245;79.6,218;79.7,245;79.8,220;79.9,214;80,222;80.1,201;80.2,231;80.3,243;80.4,230;80.5,205;80.6,245;80.7,220;80.8,215;80.9,220;81,209;81.1,242;81.2,240;81.3,212;81.4,196;81.5,212;81.6,221;81.7,202;81.8,247;81.9,211;82,237;82.1,212;82.2,208;82.3,211;82.4,219;82.5,205;82.6,223;82.7,224;82.8,209;82.9,218;83,223;83.1,228;83.2,250;83.3,228;83.4,211;83.5,210;83.6,216;83.7,183;83.8,219;83.9,203;84,205;84.1,210;84.2,191;84.3,206;84.4,197;84.5,212;84.6,242;84.7,211;84.8,218;84.9,223;85,209;85.1,209;85.2,200;85.3,190;85.4,234;85.5,200;85.6,198;85.7,227;85.8,202;85.9,229;86,213;86.1,248;86.2,219;86.3,213;86.4,213;86.5,197;86.6,221;86.7,223;86.8,220;86.9,216;87,207;87.1,230;87.2,200;87.3,232;87.4,208;87.5,222;87.6,170;87.7,231;87.8,202;87.9,225;88,238;88.1,213;88.2,192;88.3,212;88.4,206;88.5,225;88.6,202;88.7,247;88.8,204;88.9,206;89,189;89.1,224;89.2,217;89.3,227;89.4,222;89.5,214;89.6,196;89.7,250;89.8,214;89.9,216;90,214;90.1,231;90.2,206;90.3,223;90.4,221;90.5,200;90.6,200;90.7,243;90.8,199;90.9,215;91,218;91.1,187;91.2,200;91.3,244;91.4,213;91.5,213;91.6,201;91.7,203;91.8,195;91.9,224;92,222;92.1,197;92.2,228;92.3,220;92.4,210;92.5,185;92.6,202;92.7,188;92.8,233;92.9,220;93,204;93.1,237;93.2,241;93.3,225;93.4,233;93.5,223;93.6,220;93.7,212;93.8,196;93.9,228;94,198;94.1,191;94.2,189;94.3,184;94.4,224;94.5,218;94.6,224;94.7,198;94.8,216;94.9,193;95,204;95.1,223;95.2,197;95.3,223;95.4,186;95.5,205;95.6,221;95.7,240;95.8,173;95.9,240;96,230;96.1,201;96.2,219;96.3,234;96.4,222;96.5,235;96.6,221;96.7,214;96.8,207;96.9,196;97,206;97.1,202;97.2,200;97.3,194;97.4,195;97.5,232;97.6,207;97.7,224;97.8,226;97.9,221;98,198;98.1,209;98.2,217;98.3,192;98.4,198;98.5,198;98.6,214;98.7,208;98.8,215;98.9,201;99,219;99.1,216;99.2,211;99.3,215;99.4,207;99.5,212;99.6,255;99.7,223;99.8,220;99.9,228;100,226;100.1,228;100.2,183;100.3,204;100.4,222;100.5,216;100.6,207;100.7,228;100.8,231;100.9,213;101,217;101.1,199;101.2,206;101.3,231;101.4,208;101.5,210;101.6,197;101.7,225;101.8,197;101.9,219;102,212;102.1,223;102.2,212;102.3,219;102.4,218;102.5,210;102.6,203;102.7,215;102.8,217;102.9,217;103,224;103.1,227;103.2,210;103.3,202;103.4,240;103.5,208;103.6,206;103.7,237;103.8,217;103.9,219;104,216;104.1,216;104.2,210;104.3,229;104.4,201;104.5,216;104.6,212;104.7,200;104.8,191;104.9,203;105,201;105.1,193;105.2,214;105.3,208;105.4,217;105.5,191;105.6,205;105.7,230;105.8,201;105.9,205;106,204;106.1,233;106.2,202;106.3,230;106.4,215;106.5,206;106.6,232;106.7,214;106.8,196;106.9,200;107,238;107.1,184;107.2,225;107.3,221;107.4,198;107.5,191;107.6,225;107.7,218;107.8,222;107.9,237;108,226;108.1,199;108.2,206;108.3,207;108.4,210;108.5,194;108.6,203;108.7,206;108.8,187;108.9,201;109,238;109.1,210;109.2,215;109.3,179;109.4,223;109.5,198;109.6,204;109.7,230;109.8,214;109.9,205;110,201;110.1,200;110.2,214;110.3,225;110.4,206;110.5,217;110.6,199;110.7,210;110.8,205;110.9,190;111,219;111.1,213;111.2,183;111.3,224;111.4,195;111.5,191;111.6,207;111.7,197;111.8,195;111.9,214;112,198;112.1,232;112.2,232;112.3,215;112.4,219;112.5,217;112.6,214;112.7,194;112.8,194;112.9,207;113,208;113.1,202;113.2,206;113.3,200;113.4,216;113.5,219;113.6,214;113.7,193;113.8,229;113.9,205;114,176;114.1,183;114.2,199;114.3,219;114.4,210;114.5,209;114.6,216;114.7,210;114.8,212;114.9,219;115,212;115.1,193;115.2,178;115.3,231;115.4,182;115.5,200;115.6,207;115.7,182;115.8,206;115.9,214;116,201;116.1,186;116.2,195;116.3,231;116.4,217;116.5,219;116.6,204;116.7,210;116.8,211;116.9,219;117,215;117.1,214;117.2,215;117.3,215;117.4,205;117.5,173;117.6,198;117.7,202;117.8,224;117.9,215;118,212;118.1,211;118.2,212;118.3,215;118.4,235;118.5,227;118.6,204;118.7,220;118.8,235;118.9,203;119,198;119.1,199;119.2,209;119.3,216;119.4,193;119.5,204;119.6,221;119.7,226;119.8,218;119.9,197;120,215;120.1,191;120.2,212;120.3,180;120.4,233;120.5,202;120.6,210;120.7,206;120.8,220;120.9,197;121,202;121.1,197;121.2,225;121.3,220;121.4,190;121.5,181;121.6,216;121.7,185;121.8,231;121.9,202;122,228;122.1,218;122.2,198;122.3,180;122.4,208;122.5,200;122.6,218;122.7,207;122.8,213;122.9,201;123,182;123.1,189;123.2,219;123.3,208;123.4,229;123.5,190;123.6,209;123.7,227;123.8,208;123.9,217;124,232;124.1,205;124.2,201;124.3,216;124.4,230;124.5,192;124.6,224;124.7,185;124.8,204;124.9,209;125,208;125.1,216;125.2,210;125.3,217;125.4,226;125.5,207;125.6,209;125.7,215;125.8,202;125.9,215;126,215;126.1,228;126.2,234;126.3,215;126.4,182;126.5,237;126.6,217;126.7,194;126.8,180;126.9,198;127,217;127.1,187;127.2,221;127.3,238;127.4,225;127.5,210;127.6,188;127.7,192;127.8,210;127.9,216;128,201;128.1,197;128.2,198;128.3,205;128.4,215;128.5,235;128.6,224;128.7,217;128.8,197;128.9,195;129,208;129.1,187;129.2,197;129.3,209;129.4,181;129.5,234;129.6,201;129.7,223;129.8,222;129.9,209 0,25;20,125;130,125;136,60;256,60 0.2,39.7064247131348;0.4,39.6876220703125;0.6,39.6655387878418;0.8,39.6451530456543;1,39.6338691711426;1.2,39.6161575317383;1.4,39.5904350280762;1.6,39.5738143920898;1.8,39.556583404541;2,39.5318374633789;2.2,39.5103645324707;2.4,39.5028419494629;2.6,39.4786987304688;2.8,39.4873123168945;3,39.5404472351074;3.2,39.8584518432617;3.4,40.1625366210938;3.6,40.5810928344727;3.8,41.7658767700195;4,42.4898452758789;4.2,43.2938079833984;4.4,44.9959945678711;4.6,45.8377342224121;4.8,46.6636543273926;5,48.2162895202637;5.2,48.9440460205078;5.4,49.6450691223145;5.6,51.017650604248;5.8,51.7064933776855;6,52.3967132568359;6.2,53.8201637268066;6.4,54.5556182861328;6.6,55.2924613952637;6.8,56.7957305908203;7,57.5503349304199;7.2,58.3024253845215;7.4,59.8001937866211;7.6,60.5326728820801;7.8,61.2675323486328;8,62.737133026123;8.2,63.4740943908691;8.4,64.2151336669922;8.6,65.7017669677734;8.8,66.450798034668;9,67.1968231201172;9.2,68.6919555664063;9.4,69.4382476806641;9.6,70.182746887207;9.8,71.6857528686523;10,72.4256057739258;10.2,73.1812210083008;10.4,74.6761474609375;10.6,75.4326400756836;10.8,76.1799697875977;11,77.6811447143555;11.2,78.4361114501953;11.4,79.1841201782227;11.6,80.6909408569336;11.8,81.4454574584961;12,82.1952209472656;12.2,83.6999206542969;12.4,84.4549942016602;12.6,85.2054138183594;12.8,86.7225036621094;13,87.469841003418;13.2,88.2296524047852;13.4,89.7452239990234;13.6,90.4993591308594;13.8,91.2458877563477;14,92.75830078125;14.2,93.5157852172852;14.4,94.2712097167969;14.6,95.7909240722656;14.8,96.5478134155273;15,97.3038635253906;15.2,98.8213806152344;15.4,99.567756652832;15.6,100.331108093262;15.8,101.834899902344;16,102.600677490234;16.2,103.362907409668;16.4,104.876678466797;16.6,105.636390686035;16.8,106.397003173828;17,107.916793823242;17.2,108.66707611084;17.4,109.427169799805;17.6,110.941955566406;17.8,111.700622558594;18,112.455581665039;18.2,113.973770141602;18.4,114.73511505127;18.6,115.495742797852;18.8,117.019912719727;19,117.777381896973;19.2,118.536209106445;19.4,120.050338745117;19.6,120.807106018066;19.8,121.569854736328;20,123.083839416504;20.2,123.846885681152;20.4,124.608947753906;20.6,125.848709106445;20.8,126.18286895752;21,126.311019897461;21.2,126.139694213867;21.4,125.937522888184;21.6,125.715599060059;21.8,125.316909790039;22,125.183181762695;22.2,125.107444763184;22.4,125.087791442871;22.6,125.146751403809;22.8,125.22200012207;23,125.394401550293;23.2,125.47688293457;23.4,125.541557312012;23.6,125.628517150879;23.8,125.649917602539;24,125.65998840332;24.2,125.668441772461;24.4,125.65998840332;24.6,125.666709899902;24.8,125.675788879395;25,125.686981201172;25.2,125.700416564941;25.4,125.73127746582;25.6,125.748191833496;25.8,125.767845153809;26,125.802673339844;26.2,125.81275177002;26.4,125.83464050293;26.6,125.861022949219;26.8,125.871726989746;27,125.884658813477;27.2,125.902076721191;27.4,125.914886474609;27.6,125.920608520508;27.8,125.945373535156;28,125.946487426758;28.2,125.964401245117;28.4,125.975601196289;28.6,125.986801147461;28.8,125.995880126953;29,126.010437011719;29.2,126.0205078125;29.4,126.028350830078;29.6,126.042419433594;29.8,126.052612304688;30,126.057586669922;30.2,126.069396972656;30.4,126.080101013184;30.6,126.082344055176;30.8,126.086822509766;31,126.097511291504;31.2,126.106483459473;31.4,126.116058349609;31.6,126.115432739258;31.8,126.126136779785;32,126.133979797363;32.2,126.135215759277;32.4,126.139068603516;32.6,126.144668579102;32.8,126.146911621094;33,126.155990600586;33.2,126.160980224609;33.4,126.155990600586;33.6,126.168312072754;33.8,126.173408508301;34,126.17391204834;34.2,126.177276611328;34.4,126.17552947998;34.6,126.177276611328;34.8,126.183486938477;35,126.185722351074;35.2,126.189582824707;35.4,126.190208435059;35.6,126.196426391602;35.8,126.195808410645;36,126.187973022461;36.2,126.183486938477;36.4,126.185104370117;36.6,126.18684387207;36.8,126.183990478516;37,126.189582824707;37.2,126.183486938477;37.4,126.189094543457;37.6,126.190826416016;37.8,126.188461303711;38,126.187339782715;38.2,126.194068908691;38.4,126.181747436523;38.6,126.180122375488;38.8,126.180625915527;39,126.173408508301;39.2,126.17839050293;39.4,126.17391204834;39.6,126.171051025391;39.8,126.16943359375;40,126.16495513916;40.2,126.16047668457;40.4,126.15648651123;40.6,126.15648651123;40.8,126.15648651123;41,126.153625488281;41.2,126.15251159668;41.4,126.151390075684;41.6,126.145301818848;41.8,126.137954711914;42,126.131851196289;42.2,126.13346862793;42.4,126.129005432129;42.6,126.127891540527;42.8,126.119422912598;43,126.113319396973;43.2,126.112075805664;43.4,126.105361938477;43.6,126.101997375488;43.8,126.103622436523;44,126.093536376953;44.2,126.08967590332;44.4,126.087440490723;44.6,126.081230163574;44.8,126.072265625;45,126.073883056641;45.2,126.063804626465;45.4,126.056587219238;45.6,126.055961608887;45.8,126.051986694336;46,126.047004699707;46.2,126.044769287109;46.4,126.038558959961;46.6,126.032455444336;46.8,126.028350830078;47,126.020027160645;47.2,126.017272949219;47.4,126.017768859863;47.6,126.009323120117;47.8,126.00658416748;48,125.997001647949;48.2,125.992515563965;48.4,125.98518371582;48.6,125.97908782959;48.8,125.971618652344;49,125.974479675293;49.2,125.96216583252;49.4,125.955940246582;49.6,125.95482635498;49.8,125.957061767578;50,125.948112487793;50.2,125.939643859863;50.4,125.936904907227;50.6,125.927963256836;50.8,125.919990539551;51,125.917259216309;51.2,125.906555175781;51.4,125.908790588379;51.6,125.901458740234;51.8,125.891998291016;52,125.891372680664;52.2,125.885284423828;52.4,125.877319335938;52.6,125.873458862305;52.8,125.869483947754;53,125.86164855957;53.2,125.854927062988;53.4,125.849822998047;53.6,125.843109130859;53.8,125.840873718262;54,125.83576965332;54.2,125.829048156738;54.4,125.823455810547;54.6,125.816101074219;54.8,125.813369750977;55,125.804916381836;55.2,125.802673339844;55.4,125.798202514648;55.6,125.789115905762;55.8,125.784141540527;56,125.779663085938;56.2,125.77742767334;56.4,125.77294921875;56.6,125.755523681641;56.8,125.758758544922;57,125.751541137695;57.2,125.74259185791;57.4,125.744201660156;57.6,125.731887817383;57.8,125.731887817383;58,125.734634399414;58.2,125.727905273438;58.4,125.714477539063;58.6,125.712242126465;58.8,125.707138061523;59,125.70092010498;59.2,125.696563720703;59.4,125.694816589355;59.6,125.680267333984;59.8,125.678031921387;60,125.678031921387;60.2,125.671188354492;60.4,125.665588378906;60.6,125.665588378906;60.8,125.651519775391;61,125.647674560547;61.2,125.645927429199;61.4,125.635231018066;61.6,125.627395629883;61.8,125.627395629883;62,125.626281738281;62.2,125.624664306641;62.4,125.620185852051;62.6,125.606628417969;62.8,125.602638244629;63,125.606628417969;63.2,125.594306945801;63.4,125.588706970215;63.6,125.58423614502;63.8,125.582489013672;64,125.579132080078;64.2,125.583602905273;64.4,125.571296691895;64.6,125.563331604004;64.8,125.560592651367;65,125.557235717773;65.2,125.548774719238;65.4,125.545913696289;65.6,125.542678833008;65.8,125.539825439453;66,125.530853271484;66.2,125.527503967285;66.4,125.521789550781;66.6,125.52352142334;66.8,125.518424987793;67,125.51456451416;67.2,125.508964538574;67.4,125.503868103027;67.6,125.496658325195;67.8,125.500511169434;68,125.497650146484;68.2,125.483093261719;68.4,125.483093261719;68.6,125.476379394531;68.8,125.47688293457;69,125.47127532959;69.2,125.462326049805;69.4,125.465682983398;69.6,125.467926025391;69.8,125.455604553223;70,125.454986572266;70.2,125.450004577637;70.4,125.447769165039;70.6,125.438194274902;70.8,125.43147277832;71,125.430847167969;71.2,125.429229736328;71.4,125.424125671387;71.6,125.417419433594;71.8,125.42015838623;72,125.41854095459;72.2,125.414558410645;72.4,125.401741027832;72.6,125.407341003418;72.8,125.398384094238;73,125.394401550293;73.2,125.396026611328;73.4,125.390419006348;73.6,125.391052246094;73.8,125.380348205566;74,125.376365661621;74.2,125.375862121582;74.4,125.371887207031;74.6,125.371887207031;74.8,125.36629486084;75,125.370269775391;75.2,125.358955383301;75.4,125.356216430664;75.6,125.351119995117;75.8,125.351737976074;76,125.342651367188;76.2,125.345024108887;76.4,125.34440612793;76.6,125.340545654297;76.8,125.334815979004;77,125.329216003418;77.2,125.330963134766;77.4,125.324119567871;77.6,125.322006225586;77.8,125.323623657227;78,125.318023681641;78.2,125.314170837402;78.4,125.309074401855;78.6,125.309074401855;78.8,125.306205749512;79,125.30509185791;79.2,125.30347442627;79.4,125.293266296387;79.6,125.297752380371;79.8,125.292785644531;80,125.2900390625;80.2,125.286552429199;80.4,125.282585144043;80.6,125.284942626953;80.8,125.278228759766;81,125.274238586426;81.2,125.269760131836;81.4,125.27075958252;81.6,125.263046264648;81.8,125.266906738281;82,125.260810852051;82.2,125.265289306641;82.4,125.26180267334;82.6,125.260810852051;82.8,125.251724243164;83,125.257942199707;83.2,125.24340057373;83.4,125.249488830566;83.6,125.247253417969;83.8,125.24227142334;84,125.241653442383;84.2,125.237174987793;84.4,125.237174987793;84.6,125.236549377441;84.8,125.228713989258;85,125.229965209961;85.2,125.229347229004;85.4,125.221382141113;85.6,125.22200012207;85.8,125.219261169434;86,125.212417602539;86.2,125.221382141113;86.4,125.215278625488;86.6,125.215278625488;86.8,125.210678100586;87,125.209060668945;87.2,125.205207824707;87.4,125.200736999512;87.6,125.201225280762;87.8,125.205841064453;88,125.206321716309;88.2,125.196754455566;88.4,125.192764282227;88.6,125.191650390625;88.8,125.188911437988;89,125.186050415039;89.2,125.186553955078;89.4,125.185554504395;89.6,125.184936523438;89.8,125.181579589844;90,125.187797546387;90.2,125.179832458496;90.4,125.17374420166;90.6,125.17423248291;90.8,125.171997070313;91,125.169761657715;91.2,125.166397094727;91.4,125.164161682129;91.6,125.167526245117;91.8,125.163047790527;92,125.165908813477;92.2,125.158073425293;92.4,125.163047790527;92.6,125.165283203125;92.8,125.160804748535;93,125.158561706543;93.2,125.154586791992;93.4,125.153465270996;93.6,125.15682220459;93.8,125.154586791992;94,125.146751403809;94.2,125.147369384766;94.4,125.141151428223;94.6,125.14338684082;94.8,125.144020080566;95,125.140029907227;95.2,125.13493347168;95.4,125.141151428223;95.6,125.138916015625;95.8,125.138290405273;96,125.136672973633;96.2,125.133316040039;96.4,125.133316040039;96.6,125.12996673584;96.8,125.12996673584;97,125.129333496094;97.2,125.131080627441;97.4,125.12996673584;97.6,125.124855041504;97.8,125.12149810791;98,125.117515563965;98.2,125.12149810791;98.4,125.117515563965;98.6,125.119262695313;98.8,125.115905761719;99,125.118141174316;99.2,125.115409851074;99.4,125.109680175781;99.6,125.118644714355;99.8,125.110801696777;100,125.111923217773;100.2,125.112548828125;100.4,125.105827331543;100.6,125.106941223145;100.8,125.109680175781;101,125.101348876953;101.2,125.098991394043;101.4,125.100723266602;101.6,125.098991394043;101.8,125.098991394043;102,125.09561920166;102.2,125.092887878418;102.4,125.096252441406;102.6,125.104705810547;102.8,125.096870422363;103,125.096252441406;103.2,125.088287353516;103.4,125.092399597168;103.6,125.09065246582;103.8,125.09065246582;104,125.096252441406;104.2,125.090034484863;104.4,125.094017028809;104.6,125.084434509277;104.8,125.084434509277;105,125.081703186035;105.2,125.08219909668;105.4,125.08219909668;105.6,125.083320617676;105.8,125.077713012695;106,125.077713012695;106.2,125.085556030273;106.4,125.080581665039;106.6,125.08219909668;106.8,125.081085205078;107,125.077713012695;107.2,125.08381652832;107.4,125.078826904297;107.6,125.073860168457;107.8,125.077224731445;108,125.070999145508;108.2,125.070381164551;108.4,125.072120666504;108.6,125.069877624512;108.8,125.075981140137;109,125.070381164551;109.2,125.072120666504;109.4,125.064155578613;109.6,125.068145751953;109.8,125.072120666504;110,125.068145751953;110.2,125.061424255371;110.4,125.063659667969;110.6,125.067024230957;110.8,125.067024230957;111,125.068145751953;111.2,125.057441711426;111.4,125.061424255371;111.6,125.056945800781;111.8,125.062049865723;112,125.064155578613;112.2,125.062049865723;112.4,125.050231933594;112.6,125.054206848145;112.8,125.056945800781;113,125.062538146973;113.2,125.058067321777;113.4,125.057441711426;113.6,125.05135345459;113.8,125.050727844238;114,125.050231933594;114.2,125.056449890137;114.4,125.055824279785;114.6,125.052467346191;114.8,125.050231933594;115,125.053588867188;115.2,125.046875;115.4,125.048492431641;115.6,125.047370910645;115.8,125.047988891602;116,125.047370910645;116.2,125.050727844238;116.4,125.048492431641;116.6,125.05135345459;116.8,125.047988891602;117,125.047988891602;117.2,125.04288482666;117.4,125.047988891602;117.6,125.047988891602;117.8,125.045753479004;118,125.045127868652;118.2,125.044013977051;118.4,125.047370910645;118.6,125.046875;118.8,125.040649414063;119,125.040649414063;119.2,125.038414001465;119.4,125.034439086914;119.6,125.045127868652;119.8,125.03678894043;120,125.03678894043;120.2,125.045127868652;120.4,125.044639587402;120.6,125.041282653809;120.8,125.036178588867;121,125.037300109863;121.2,125.033317565918;121.4,125.038414001465;121.6,125.041282653809;121.8,125.033935546875;122,125.035057067871;122.2,125.031700134277;122.4,125.035057067871;122.6,125.03231048584;122.8,125.033317565918;123,125.034439086914;123.2,125.03231048584;123.4,125.033935546875;123.6,125.034439086914;123.8,125.03231048584;124,125.03108215332;124.2,125.03231048584;124.4,125.033317565918;124.6,125.02995300293;124.8,125.025482177734;125,125.026596069336;125.2,125.032821655273;125.4,125.033317565918;125.6,125.034439086914;125.8,125.034439086914;126,125.031700134277;126.2,125.03231048584;126.4,125.027221679688;126.6,125.027221679688;126.8,125.027717590332;127,125.027717590332;127.2,125.027717590332;127.4,125.027221679688;127.6,125.027717590332;127.8,125.033317565918;128,125.024971008301;128.2,125.025482177734;128.4,125.022735595703;128.6,125.027717590332;128.8,125.021614074707;129,125.022735595703;129.2,125.028831481934;129.4,125.021614074707;129.6,125.020500183105;129.8,125.024360656738;130,125.025482177734;130.2,125.022735595703;130.4,125.004089355469;130.6,124.89525604248;130.8,124.68692779541;131,123.83283996582;131.2,123.110824584961;131.4,122.418769836426;131.6,121.063827514648;131.8,120.399826049805;132,119.754051208496;132.2,118.476852416992;132.4,117.854606628418;132.6,117.242012023926;132.8,116.039070129395;133,115.444869995117;133.2,114.852989196777;133.4,113.699516296387;133.6,113.127235412598;133.8,112.569427490234;134,111.464576721191;134.2,110.918518066406;134.4,110.387062072754;134.6,109.325691223145;134.8,108.798645019531;135,108.279472351074;135.2,107.25471496582;135.4,106.753219604492;135.6,106.261192321777;135.8,105.272605895996;136,104.787696838379;136.2,104.310028076172;136.4,103.357948303223;136.6,102.894561767578;136.8,102.429130554199;137,101.502754211426;137.2,101.049858093262;137.4,100.608001708984;137.6,99.7148361206055;137.8,99.2816696166992;138,98.8530120849609;138.2,97.9905242919922;138.4,97.5648574829102;138.6,97.1460189819336;138.8,96.3052978515625;139,95.8938980102539;139.2,95.4903182983398;139.4,94.6834182739258;139.6,94.2828063964844;139.8,93.8863220214844;140,93.1000137329102;140.2,92.7063598632813;140.4,92.3200378417969;140.6,91.5559005737305;140.8,91.1714248657227;141,90.799186706543;141.2,90.0549697875977;141.4,89.6922225952148;141.6,89.3213882446289;141.8,88.6113586425781;142,88.236930847168;142.2,87.8806381225586;142.4,87.183723449707;142.6,86.8316192626953;142.8,86.4849624633789;143,85.8002471923828;143.2,85.4559173583984;143.4,85.1123580932617;143.6,84.450065612793;143.8,84.1181640625;144,83.7869110107422;144.2,83.1300582885742;144.4,82.8115692138672;144.6,82.4926223754883;144.8,81.8487930297852;145,81.5360794067383;145.2,81.2262268066406;145.4,80.6058731079102;145.6,80.2954788208008;145.8,79.9846343994141;146,79.3774032592773;146.2,79.0820083618164;146.4,78.7862548828125;146.6,78.1936187744141;146.8,77.8962249755859;147,77.6039047241211;147.2,77.033561706543;147.4,76.7408294677734;147.6,76.4579238891602;147.8,75.8883972167969;148,75.6101303100586;148.2,75.3323593139648;148.4,74.7714996337891;148.6,74.511344909668;148.8,74.2376022338867;149,73.6994934082031;149.2,73.4362335205078;149.4,73.1712799072266;149.6,72.6494216918945;149.8,72.3851470947266;150,72.1308212280273;150.2,71.6179504394531;150.4,71.3582916259766;150.6,71.1051483154297;150.8,70.6094741821289;151,70.3647232055664;151.2,70.1111755371094;151.4,69.6239852905273;151.6,69.3836059570313;151.8,69.1390609741211;152,68.6608428955078;152.2,68.4239501953125;152.4,68.1924743652344;152.6,67.7189178466797;152.8,67.4912872314453;153,67.2590103149414;153.2,66.7975921630859;153.4,66.5767669677734;153.6,66.3493270874023;153.8,65.9007568359375;154,65.6810913085938;154.2,65.4614410400391;154.4,65.0216827392578;154.6,64.8125991821289 0,50;100,50 0.012,47.8;0.512,48.9;1.012,50.1;1.512,50;2.012,50;2.512,50;3.012,50;3.512,50;4.012,50.1;4.512,50;5.012,50;5.512,50;6.012,50;6.512,50;7.012,50;7.512,50;8.012,50;8.512,50;9.012,50;9.512,49.9;10.012,50;10.512,49.9;11.012,49.9;11.512,50;12.012,50.1;12.512,50;13.012,49.9;13.512,49.9;14.012,49.9;14.512,50;15.012,50;15.512,49.9;16.012,50;16.512,50;17.012,49.9;17.512,49.8;18.012,49.9;18.512,49.9;19.012,50;19.512,50;20.012,50.1;20.512,50;21.012,50;21.512,50;22.012,50.1;22.512,50;23.012,49.9;23.512,50;24.012,50;24.512,50.1;25.012,50;25.512,50;26.012,50;26.512,50;27.012,50;27.512,49.9;28.012,49.8;28.512,49.9;29.012,50;29.512,50;30.012,49.9;30.512,49.9;31.012,50;31.512,50;32.012,50;32.512,50.1;33.012,50;33.512,49.9;34.012,49.9;34.512,50;35.012,49.9;35.512,49.9;36.012,50.1;36.512,50;37.012,49.9;37.512,49.9;38.012,49.9;38.512,49.9;39.012,49.9;39.512,50;40.012,50;40.512,50;41.012,49.9;41.512,50;42.012,50.1;42.512,50;43.012,50;43.512,50.1;44.012,50.1;44.512,50;45.012,49.9;45.512,49.9;46.012,50;46.512,50;47.012,50.1;47.512,50;48.012,49.9;48.512,50;49.012,50;49.512,50;50.012,50;50.512,50;51.012,50;51.512,50;52.012,50;52.512,50.1;53.012,50.2;53.512,50.1;54.012,50.1;54.512,50;55.012,49.9;55.512,49.9;56.012,49.9;56.512,49.9;57.012,49.9;57.512,50;58.012,50;58.512,50.1;59.012,50;59.512,50;60.012,50;60.512,50;61.012,50;61.512,50.1;62.012,50.1;62.512,50;63.012,50;63.512,49.9;64.012,49.8;64.512,49.9;65.012,50;65.512,50;66.012,50.1;66.512,50.1;67.012,50.1;67.512,49.9;68.012,50;68.512,50;69.012,49.9;69.512,49.9;70.012,50;70.512,50.1;71.012,50.1;71.512,50.2;72.012,50.1;72.512,50;73.012,50;73.512,50;74.012,50;74.512,50;75.012,49.9;75.512,50;76.012,50;76.512,50;77.012,50;77.512,50.1;78.012,50;78.512,50;79.012,50.1;79.512,50;80.012,50;80.512,50;81.012,50;81.512,50;82.012,50;82.512,50.1;83.012,50;83.512,50;84.012,50.1;84.512,50;85.012,49.9;85.512,49.8;86.012,49.9;86.512,50;87.012,50.1;87.512,50.1;88.012,50;88.512,50;89.012,50.1;89.512,50;90.012,49.9;90.512,50;91.012,50;91.512,50;92.012,50.1;92.512,50;93.012,49.9;93.512,49.9;94.012,49.9;94.512,50;95.012,50;95.512,49.9;96.012,50;96.512,50.1;97.012,50.1;97.512,50;98.012,50;98.512,50;99.012,49.9;99.512,49.9;100.012,50;100.512,50.1;101.011,25;101.511,0;102.011,0;102.511,0;103.011,0;103.511,0;104.011,0;104.511,0;105.011,0;105.511,0;106.011,0;106.511,0;107.011,0;107.511,0;108.011,0;108.511,0;109.011,0;109.511,0;110.011,0;110.511,0;111.011,0;111.511,0;112.011,0;112.511,0;113.011,0;113.511,0;114.011,0;114.511,0;115.011,0;115.511,0;116.011,0;116.511,0;117.011,0;117.511,0;118.011,0;118.511,0;119.011,0;119.511,0;120.011,0;120.511,0;121.011,0;121.511,0;122.011,0;122.511,0;123.011,0;123.511,0;124.011,0;124.511,0 0,1;80,1 0.1,89;0.2,69;0.3,77;0.4,81;0.5,71;0.6,74;0.7,69;0.8,70;0.9,63;1,66;1.1,66;1.2,63;1.3,72;1.4,76;1.5,68;1.6,62;1.7,61;1.8,79;1.9,64;2,69;2.1,73;2.2,72;2.3,73;2.4,70;2.5,67;2.6,77;2.7,87;2.8,73;2.9,71;3,63;3.1,79;3.2,67;3.3,79;3.4,72;3.5,79;3.6,75;3.7,73;3.8,75;3.9,63;4,89;4.1,77;4.2,84;4.3,87;4.4,79;4.5,102;4.6,89;4.7,89;4.8,92;4.9,91;5,100;5.1,99;5.2,105;5.3,107;5.4,85;5.5,120;5.6,102;5.7,118;5.8,101;5.9,114;6,103;6.1,116;6.2,107;6.3,113;6.4,133;6.5,120;6.6,149;6.7,140;6.8,157;6.9,157;7,148;7.1,181;7.2,187;7.3,157;7.4,164;7.5,189;7.6,195;7.7,186;7.8,237;7.9,200;8,210;8.1,229;8.2,217;8.3,246;8.4,248;8.5,267;8.6,258;8.7,290;8.8,298;8.9,292;9,284;9.1,325;9.2,297;9.3,317;9.4,371;9.5,376;9.6,390;9.7,356;9.8,425;9.9,415;10,433;10.1,454;10.2,443;10.3,479;10.4,496;10.5,529;10.6,539;10.7,535;10.8,543;10.9,587;11,625;11.1,593;11.2,633;11.3,650;11.4,690;11.5,695;11.6,752;11.7,798;11.8,778;11.9,778;12,802;12.1,897;12.2,881;12.3,957;12.4,994;12.5,958;12.6,1055;12.7,1077;12.8,1085;12.9,1179;13,1146;13.1,1247;13.2,1260;13.3,1274;13.4,1342;13.5,1417;13.6,1472;13.7,1432;13.8,1528;13.9,1632;14,1655;14.1,1601;14.2,1716;14.3,1724;14.4,1807;14.5,1902;14.6,1930;14.7,1965;14.8,2006;14.9,2035;15,2216;15.1,2194;15.2,2318;15.3,2218;15.4,2428;15.5,2556;15.6,2682;15.7,2622;15.8,2727;15.9,2801;16,2931;16.1,2916;16.2,2967;16.3,3042;16.4,3140;16.5,3277;16.6,3185;16.7,3247;16.8,3294;16.9,3442;17,3561;17.1,3603;17.2,3674;17.3,3726;17.4,3796;17.5,3728;17.6,3855;17.7,3832;17.8,3798;17.9,3946;18,3910;18.1,3930;18.2,3937;18.3,3898;18.4,4057;18.5,3985;18.6,3801;18.7,3786;18.8,3762;18.9,3756;19,3630;19.1,3609;19.2,3537;19.3,3555;19.4,3541;19.5,3367;19.6,3345;19.7,3146;19.8,3108;19.9,2975;20,2788;20.1,2688;20.2,2478;20.3,2452;20.4,2349;20.5,2141;20.6,2065;20.7,1890;20.8,1803;20.9,1604;21,1498;21.1,1394;21.2,1291;21.3,1204;21.4,1059;21.5,1028;21.6,931;21.7,866;21.8,770;21.9,691;22,614;22.1,559;22.2,582;22.3,476;22.4,490;22.5,450;22.6,370;22.7,403;22.8,390;22.9,317;23,342;23.1,319;23.2,291;23.3,307;23.4,306;23.5,278;23.6,311;23.7,297;23.8,296;23.9,330;24,323;24.1,289;24.2,284;24.3,305;24.4,284;24.5,294;24.6,271;24.7,288;24.8,317;24.9,307;25,282;25.1,297;25.2,303;25.3,281;25.4,323;25.5,327;25.6,294;25.7,290;25.8,276;25.9,271;26,272;26.1,282;26.2,262;26.3,241;26.4,286;26.5,277;26.6,247;26.7,267;26.8,289;26.9,263;27,261;27.1,262;27.2,256;27.3,239;27.4,263;27.5,253;27.6,241;27.7,235;27.8,229;27.9,220;28,203;28.1,225;28.2,225;28.3,206;28.4,209;28.5,188;28.6,214;28.7,196;28.8,185;28.9,174;29,168;29.1,175;29.2,184;29.3,187;29.4,176;29.5,162;29.6,173;29.7,168;29.8,132;29.9,151;30,134;30.1,175;30.2,143;30.3,127;30.4,137;30.5,140;30.6,115;30.7,138;30.8,139;30.9,120;31,124;31.1,121;31.2,111;31.3,149;31.4,137;31.5,120;31.6,132;31.7,113;31.8,127;31.9,116;32,108;32.1,117;32.2,116;32.3,98;32.4,120;32.5,101;32.6,87;32.7,111;32.8,107;32.9,108;33,101;33.1,118;33.2,86;33.3,106;33.4,121;33.5,105;33.6,87;33.7,80;33.8,101;33.9,105;34,103;34.1,86;34.2,87;34.3,79;34.4,84;34.5,84;34.6,95;34.7,96;34.8,77;34.9,99;35,82;35.1,81;35.2,78;35.3,91;35.4,81;35.5,95;35.6,90;35.7,100;35.8,90;35.9,79;36,92;36.1,78;36.2,95;36.3,96;36.4,81;36.5,82;36.6,71;36.7,78;36.8,67;36.9,79 0,25;27,160;37,160;47,60;167,60 0.2,30.7578144073486;0.4,30.749828338623;0.6,30.7449951171875;0.8,30.7407608032227;1,30.7310848236084;1.2,30.7257633209229;1.4,30.7359218597412;1.6,30.7620487213135;1.8,30.9956016540527;2,31.2360744476318;2.2,31.5899219512939;2.4,32.6551284790039;2.6,33.3335723876953;2.8,34.1144981384277;3,35.822639465332;3.2,36.6979446411133;3.4,37.552906036377;3.6,39.1688652038574;3.8,39.9255523681641;4,40.6455459594727;4.2,42.0563163757324;4.4,42.7529144287109;4.6,43.4563598632813;4.8,44.8957214355469;5,45.6382179260254;5.2,46.3881988525391;5.4,47.9173316955566;5.6,48.6773834228516;5.8,49.4495544433594;6,50.9787979125977;6.2,51.7368240356445;6.4,52.4800605773926;6.6,53.98974609375;6.8,54.7403602600098;7,55.4934730529785;7.2,56.9991645812988;7.4,57.7553939819336;7.6,58.5142555236816;7.8,60.0343589782715;8,60.8039169311523;8.2,61.5676765441895;8.4,63.0930557250977;8.6,63.855655670166;8.8,64.6153869628906;9,66.1405181884766;9.2,66.9011611938477;9.4,67.665901184082;9.6,69.199089050293;9.8,69.9642486572266;10,70.7248153686523;10.2,72.2572479248047;10.4,73.0180969238281;10.6,73.7821807861328;10.8,75.3098907470703;11,76.0807800292969;11.2,76.8520736694336;11.4,78.3845138549805;11.6,79.1556015014648;11.8,79.9225692749023;12,81.4514846801758;12.2,82.2133102416992;12.4,82.9787368774414;12.6,84.5177536010742;12.8,85.2853012084961;13,86.0507965087891;13.2,87.5911712646484;13.4,88.3543395996094;13.6,89.1260528564453;13.8,90.664436340332;14,91.4320907592773;14.2,92.2000350952148;14.4,93.7308502197266;14.6,94.4997711181641;14.8,95.2650299072266;15,96.7986221313477;15.2,97.5664596557617;15.4,98.3374176025391;15.6,99.8789672851563;15.8,100.646324157715;16,101.412368774414;16.2,102.946762084961;16.4,103.704116821289;16.6,104.481300354004;16.8,106.01197052002;17,106.784912109375;17.2,107.550453186035;17.4,109.088424682617;17.6,109.864204406738;17.8,110.630226135254;18,112.167663574219;18.2,112.926696777344;18.4,113.696151733398;18.6,115.221885681152;18.8,115.993766784668;19,116.762702941895;19.2,118.296165466309;19.4,119.069755554199;19.6,119.839645385742;19.8,121.374252319336;20,122.141204833984;20.2,122.905448913574;20.4,124.441665649414;20.6,125.207939147949;20.8,125.970626831055;21,127.496505737305;21.2,128.26904296875;21.4,129.039993286133;21.6,130.574813842773;21.8,131.350021362305;22,132.108551025391;22.2,133.64616394043;22.4,134.416809082031;22.6,135.176849365234;22.8,136.704681396484;23,137.470108032227;23.2,138.233657836914;23.4,139.769119262695;23.6,140.533935546875;23.8,141.302368164063;24,142.833724975586;24.2,143.610900878906;24.4,144.36669921875;24.6,145.90007019043;24.8,146.661865234375;25,147.426055908203;25.2,148.952987670898;25.4,149.718017578125;25.6,150.48828125;25.8,152.018951416016;26,152.778060913086;26.2,153.551071166992;26.4,155.086044311523;26.6,155.845001220703;26.8,156.606582641602;27,158.139953613281;27.2,158.892471313477;27.4,159.657852172852;27.6,160.827529907227;27.8,161.087600708008;28,161.141891479492;28.2,160.823669433594;28.4,160.564743041992;28.6,160.285919189453;28.8,159.846450805664;29,159.716018676758;29.2,159.65950012207;29.4,159.734176635742;29.6,159.842422485352;29.8,159.968490600586;30,160.21923828125;30.2,160.31672668457;30.4,160.389678955078;30.6,160.449066162109;30.8,160.448455810547;31,160.43766784668;31.2,160.404434204102;31.4,160.389678955078;31.6,160.388534545898;31.8,160.398315429688;32,160.413116455078;32.2,160.442169189453;32.4,160.495681762695;32.6,160.523132324219;32.8,160.545959472656;33,160.592041015625;33.2,160.6103515625;33.4,160.632049560547;33.6,160.654235839844;33.8,160.662231445313;34,160.667999267578;34.2,160.687286376953;34.4,160.698822021484;34.6,160.709594726563;34.8,160.729034423828;35,160.737548828125;35.2,160.748352050781;35.4,160.778045654297;35.6,160.786560058594;35.8,160.802597045898;36,160.817886352539;36.2,160.826538085938;36.4,160.829925537109;36.6,160.853225708008;36.8,160.853225708008;37,160.861251831055;37.2,160.876693725586;37.4,160.860641479492;37.6,160.729034423828;37.8,159.924011230469;38,159.381454467773;38.2,158.391235351563;38.4,156.103927612305;38.6,155.00016784668;38.8,153.917785644531;39,151.826400756836;39.2,150.803482055664;39.4,149.796920776367;39.6,147.840072631836;39.8,146.885192871094;40,145.955184936523;40.2,144.127105712891;40.4,143.218612670898;40.6,142.34147644043;40.8,140.618179321289;41,139.770263671875;41.2,138.937942504883;41.4,137.314926147461;41.6,136.512878417969;41.8,135.73030090332;42,134.185089111328;42.2,133.424102783203;42.4,132.679168701172;42.6,131.209274291992;42.8,130.489364624023;43,129.784790039063;43.2,128.397766113281;43.4,127.703979492188;43.6,127.028472900391;43.8,125.690956115723;44,125.033935546875;44.2,124.385581970215;44.4,123.114181518555;44.6,122.48316192627;44.8,121.858070373535;45,120.640235900879;45.2,120.034065246582;45.4,119.440391540527;45.6,118.255317687988;45.8,117.675064086914;46,117.100479125977;46.2,115.96696472168;46.4,115.405158996582;46.6,114.846908569336;46.8,113.754699707031;47,113.218643188477;47.2,112.687225341797;47.4,111.629211425781;47.6,111.10595703125;47.8,110.589569091797;48,109.565940856934;48.2,109.056579589844;48.4,108.559059143066;48.6,107.566543579102;48.8,107.089897155762;49,106.58903503418;49.2,105.639739990234;49.4,105.161338806152;49.6,104.683753967285;49.8,103.761405944824;50,103.304016113281;50.2,102.845703125;50.4,101.93480682373;50.6,101.486671447754;50.8,101.044288635254;51,100.161399841309;51.2,99.7320098876953;51.4,99.297737121582;51.6,98.4410781860352;51.8,98.0126419067383;52,97.6025238037109;52.2,96.7665100097656;52.4,96.3555526733398;52.6,95.9420471191406;52.8,95.1333160400391;53,94.7310638427734;53.2,94.3320541381836;53.4,93.5500793457031;53.6,93.1568908691406;53.8,92.7682876586914;54,91.9939346313477;54.2,91.616081237793;54.4,91.2365188598633;54.6,90.4877777099609;54.8,90.1127777099609;55,89.7463226318359;55.2,89.0256195068359;55.4,88.6665496826172;55.6,88.3052978515625;55.8,87.5967178344727;56,87.2427139282227;56.2,86.8883819580078;56.4,86.1946105957031;56.6,85.8464202880859;56.8,85.5104598999023;57,84.8315963745117;57.2,84.4985504150391;57.4,84.1595153808594;57.6,83.5019683837891;57.8,83.171875;58,82.8478546142578;58.2,82.1979293823242;58.4,81.8703155517578;58.6,81.5580902099609;58.8,80.931640625;59,80.6152114868164;59.2,80.3119659423828;59.4,79.6913986206055;59.6,79.3894500732422;59.8,79.08642578125;60,78.4937286376953;60.2,78.1925048828125;60.4,77.8928985595703;60.6,77.3071823120117;60.8,77.0236206054688;61,76.7281875610352;61.2,76.1668319702148;61.4,75.8796997070313;61.6,75.5985794067383;61.8,75.0381698608398;62,74.7599639892578;62.2,74.4850845336914;62.4,73.9419097900391;62.6,73.6765518188477;62.8,73.4071655273438;63,72.8792572021484;63.2,72.6132431030273;63.4,72.3490982055664;63.6,71.8273544311523;63.8,71.5741806030273;64,71.3145294189453;64.2,70.8061981201172;64.4,70.5586090087891;64.6,70.3095855712891;64.8,69.8118209838867;65,69.56884765625;65.2,69.3230743408203;65.4,68.8386917114258;65.6,68.5990982055664;65.8,68.3627166748047;66,67.895622253418;66.2,67.6616134643555;66.4,67.4270095825195;66.6,66.9568557739258;66.8,66.7256011962891;67,66.500373840332;67.2,66.0505599975586;67.4,65.8270950317383;67.6,65.604248046875;67.8,65.1671600341797;68,64.9509582519531 Luminescence/inst/extdata/RF_file.rf0000644000176200001440000017312514062436223017145 0ustar liggesusers ROI: ROI 1 ROI 2 ROI 3 ROI 4 ROI 5 ROI 6 ROI 7 ROI 8 ROI 9 ROI 10 ROI 11 x: 19 117 135 54 49 78 209 108 246 232 28 y: 19 73 82 168 175 167 110 54 175 172 169 width: 351 20 20 20 20 20 20 20 20 20 20 height: 351 20 20 20 20 20 20 20 20 20 20 area: 96765 316 316 316 316 316 316 316 316 316 316 No. time (sec) aliquot grain 1 grain 2 grain 3 grain 4 grain 5 grain 6 grain 7 grain 8 grain 9 grain 10 1 12.5 613.6036 650.769 643.1361 646.7025 644.8513 629.8418 626.5759 627.2722 618.2816 619.0475 623.5696 2 37.5 615.3152 651.269 644.5728 649.4241 646.5759 631.7563 629.0032 628.8703 619.8639 621.1551 626.2848 3 62.5 615.2543 651.9747 645.1139 649.3703 646.712 630.5253 628.0095 628.962 619.7184 620.6994 625.5443 4 87.5 616.8258 652.0823 646.2278 650.3987 648.3861 632.5475 630.3165 630.2563 621.5348 622.0475 627.1835 5 112.5 617.073 652.5411 646.9177 649.6266 648.0475 632.4747 630.0222 629.9272 621.981 623 627.5633 6 137.5 618.0075 653.7753 647.9937 651.3354 648.9494 634.1614 631.9715 631.5696 622.4968 622.9684 627.5158 7 162.5 617.5387 653.9209 646.3797 651.0443 649.0348 633.4367 630.481 630.4747 622.519 623.2975 627.7089 8 187.5 618.7645 654.5538 648.3861 652.8513 650.4019 633.9399 632.2816 631.6487 624.1519 625.0823 629.0158 9 212.5 618.8933 654.8228 647.731 653.0506 650.2975 634.5823 632.0348 632.693 623.8101 624.962 628.8386 10 237.5 618.4924 654.6329 648.269 651.8291 649.788 634.8228 631.8291 631.7152 623.212 624.0759 628.3987 11 262.5 618.6822 654.4241 647.7278 651.3513 649.9146 634.2405 632.8829 632.0506 623.2184 624.3513 628.8829 12 287.5 619.1796 654.0253 648.481 653.1424 651.1551 634.3259 632.212 632.3101 625.1867 625.4019 630.5665 13 312.5 618.8217 653.9462 647.8544 652.3924 650.0886 633.5854 632.1867 633.1139 623.538 624.4589 628.6772 14 337.5 618.9307 654.1108 649.0443 652.3133 650.019 634.3987 632.2215 631.9747 623.9778 624.5285 628.443 15 362.5 619.3048 654.7722 648.2816 653.1171 650.7658 634.3861 631.8892 632.3956 624.212 624.6424 629.6266 16 387.5 619.0163 654.6171 648.943 651.8513 649.5728 634.6392 632.3323 632.8386 624.6551 625.5095 628.1329 17 412.5 618.9434 654.5285 649.2057 651.7753 649.8165 634.5538 632.0728 632.5063 624.3608 624.1013 629.7627 18 437.5 618.737 654.5949 648.0696 651.4525 648.8608 634.0443 632.4873 631.6804 623.231 624.4873 628.7627 19 462.5 618.7779 654.2563 648.1171 651.5127 649.6899 633.6835 631.6203 632.5886 623.9272 624.5222 628.5285 20 487.5 619.3553 654.6582 648.6772 652.0949 650.1076 635.0918 632.6487 632.3259 624.2848 624.9525 629.0949 21 512.5 619.4536 654.6234 648.6994 653.0854 650.5253 635.1614 632.4209 633.2658 624.1266 625.2911 629.0032 22 537.5 618.7475 653.8101 647.7658 651.9209 649.943 634.1456 631.7975 631.6677 624.3101 624.5696 628.1677 23 562.5 618.4579 654.2532 647.5728 651.0823 649.0601 634.2405 631.4589 632.3544 623.5918 625.1424 628.4968 24 587.5 618.923 653.9114 647.8608 651.6835 650.2215 634.4019 631.9968 631.8196 624.4272 624.1582 628.6266 25 612.5 618.8005 653.6139 648.0443 651.3892 649.3323 634.6108 632.3133 632.3418 623.8703 624.7437 628.6994 26 637.5 619.1491 653.5411 647.693 652.3608 649.557 634.6994 632.3513 632.5222 624.1962 624.2722 628.731 27 662.5 618.89 654.4905 648.2563 652.2184 650.3354 634.3766 632.3892 631.3797 624.0633 624.943 628.5854 28 687.5 618.9607 654.693 647.8734 652.4873 649.6361 634.0475 632.4114 631.8639 624.4557 625.1899 629.2753 29 712.5 618.8 654.7658 647.0728 651.0665 648.5222 634.2658 632.0032 632.4747 623.8165 624.2057 629.0886 30 737.5 619.0514 653.5032 647.5411 651.807 650.0918 634.0222 631.6804 632.4082 623.6203 624.7247 628.9019 31 762.5 619.311 654.7943 647.5601 652.1709 650.1804 634.6329 632.0285 632.8449 624.5348 624.9747 629.0601 32 787.5 618.7398 653.4525 647.3924 650.8101 649.0316 634.2373 631.5032 631.1013 623.8354 625.3513 627.7627 33 812.5 619.0412 654.1234 648.4557 652.557 650.0063 634.1456 631.7057 632.5728 624.2373 624.5601 628.4905 34 837.5 619.0312 653.9335 647.8133 650.7627 649.4747 634.4272 631.3671 632.0601 624.0696 624.3513 629.1899 35 862.5 618.9551 654.1899 647.6456 651.0886 648.7975 633.9304 631.9177 632.5918 623.6013 624.3671 628.7057 36 887.5 618.4504 653.8513 647.269 651.5063 648.481 634.9241 631.2658 632.0095 623.9684 624 627.6013 37 912.5 619.1043 654.5886 648.0063 651.3259 649.2025 634.0127 632.3323 632.0633 624.0411 625.0443 629.8924 38 937.5 619.1799 653.9241 648.1044 651.1867 649 634.6361 631.5949 631.0348 624.6677 624.557 628.7595 39 962.5 619.041 653.2753 647.9873 651.788 649.1899 634.1456 631.557 632.2658 624.2658 624.4778 628.7753 40 987.5 619.302 654.4525 648.0032 651.731 649.1899 634.5095 632.3956 632.6614 624.9114 625.6108 628.9873 41 1012.5 618.2681 652.7184 646.9778 650.7278 648.5095 633.3513 631.1361 631.2278 622.4652 623.5696 627.6646 42 1037.5 618.4242 652.693 646.9525 650.1614 648.3766 632.9747 631.6139 631.2943 622.9968 624.4367 627.9304 43 1062.5 619.0967 653.4399 646.8133 650.3449 648.2184 635.1392 632.1456 631.8797 624.1899 624.6108 629.0443 44 1087.5 618.6087 653.2532 646.7342 651.5949 649.1487 633.9177 631.3956 631.7468 623.981 624.9146 628.7943 45 1112.5 619.049 653.8165 648.0981 650.8797 648.8924 634.8323 631.6677 631.8418 624.3386 624.8544 628.9462 46 1137.5 619.0551 653.1424 647.1171 650.5665 648.3038 634.1772 631.25 632.0285 624.5506 624.731 629.1297 47 1162.5 618.4463 653.3639 646.2943 649.9747 648.2785 633.7468 631.5665 631.6424 623.7405 623.9241 628.3703 48 1187.5 618.7559 654 647.1424 650.6044 647.8956 633.9146 630.8892 632.2753 624.1741 624.1867 628.212 49 1212.5 618.488 652.8101 647.1171 650.3703 648.8386 633.9272 631.3101 631.9462 622.9114 623.807 628.0759 50 1237.5 618.7313 652.7627 647.2247 651.0475 648.462 633.9019 632.8354 631.6582 623.8259 624.731 628.1551 51 1262.5 618.7288 652.981 647.8165 650.3703 648.6203 633.8259 631.6741 631.6392 624.0348 624.712 628.1392 52 1287.5 618.7365 652.7753 647.019 650.9272 648.0759 633.7658 632.0886 631.7532 623.7184 624.269 629.0981 53 1312.5 618.9991 653.269 647.519 650.3418 648.0475 634.5348 631.5601 631.8892 624.3323 624.8639 628.8766 54 1337.5 618.4219 652.8291 647.0506 649.7437 648.1203 633.25 630.9747 631.3829 623.7753 624.4051 627.7468 55 1362.5 618.9692 653.0886 646.9177 650.9241 649.5316 635.4051 632.2753 631.5253 624.9114 624.6677 628.8608 56 1387.5 619.48 653.5918 647.8481 651.5949 649.2184 634.7816 631.5158 631.8892 624.1867 625.1044 628.5633 57 1412.5 618.6523 653.1772 647.0316 650.288 648.3703 634.193 631.4494 631.6487 623.7785 624.4462 628.6171 58 1437.5 618.9045 654.0411 646.9968 650.193 648.057 633.3165 631.6709 631.7342 624.519 623.9399 628.8006 59 1462.5 618.7706 652.6234 647.0759 650.3513 648.3449 633.5443 631.7342 631.1013 623.7342 624.5728 628.4589 60 1487.5 618.4059 652.1835 646.6582 649.807 647.7025 633.0095 631.1108 630.8323 623.6297 623.5728 627.6994 61 1512.5 618.8161 653.0285 648.1076 650.1013 648.1804 634.1835 631.9747 631.3766 623.7532 625.5095 627.4335 62 1537.5 618.4463 652.4715 646.8165 649.1804 647.2532 633.8513 631.1044 631.9715 623.1171 623.8101 627.2373 63 1562.5 618.9866 653.0949 647.0633 650.0981 647.519 634.3228 631.8418 631.7563 623.1424 624.4842 628.1835 64 1587.5 618.334 652.5475 646.3133 649.9462 647.7532 633.2278 630.8133 630.9684 623.2025 623.8797 627.2152 65 1612.5 618.952 653.3354 647.8291 650.7911 648.7722 634.5475 631.7975 632.3924 624.1013 625.0475 629.0886 66 1637.5 618.6641 651.6646 647.1772 649.1677 647.0538 633.25 631.0601 631.7057 623.8259 623.8639 627.7627 67 1662.5 618.5372 652.2785 646.7342 650.1646 647.8956 633.5127 631.1203 630.981 623.5665 624.443 627.7722 68 1687.5 618.8991 652.8639 646.7025 650.3639 648.2025 634.1772 631.0823 631.5665 624.1772 624.9241 628.0633 69 1712.5 618.3047 651.5222 646.6772 648.7722 646.9937 633.2247 630.8766 631.0633 623.0601 624.481 627.0316 70 1737.5 618.8147 653.2342 647.193 649.3861 647.6203 634.5127 631.1804 631.4652 623.7563 624.4652 628.8734 71 1762.5 618.501 652.0728 646.9589 648.8513 647.1424 633.2278 631.0633 631.3165 623.5759 624.4177 628.1108 72 1787.5 619.1409 654.0158 646.712 650.2753 648.0506 634.3133 631.9652 631.9652 623.5665 624.2658 628.2911 73 1812.5 618.8329 652.3608 647.4304 649.5791 647.7753 634.2184 631.5316 631.0696 624.3608 625.0506 627.4399 74 1837.5 618.4696 653.3544 646.0981 649.3259 647.4494 633.8639 630.7753 631.1203 622.8734 626.2722 627.8196 75 1862.5 618.4497 652.5854 647.1709 648.3639 646.4051 633.5348 631.5918 631.0095 623.9399 624.2057 628.2627 76 1887.5 618.8617 652.2278 646.0411 649.5506 647.8861 633.519 631.4367 631.1203 624.0759 624.2215 628.0095 77 1912.5 618.2807 651.7278 646.557 649.1962 646.8924 632.481 631.9051 631.7753 622.8861 623.6361 627.3133 78 1937.5 619.117 653.0918 647.0949 649.3418 647.8797 634.1203 631.3797 631.9525 624.8671 624.7532 628.25 79 1962.5 618.8144 652.9747 646.5348 649.9873 647.5095 634.2468 631.6171 630.9272 624.1424 624.6709 627.7658 80 1987.5 618.6177 652.7405 646.6614 649.6867 646.6329 633.5823 631.6108 631.231 623.9304 624.1709 628.6487 81 2012.5 618.7662 652.481 646.3481 649.7278 647.4209 634.1171 631.2785 630.6962 623.6266 625.2247 627.5728 82 2037.5 618.5925 652.3956 646.0063 649.6139 647.0475 633.6013 631.0949 631.3513 623.4557 624.7373 627.5601 83 2062.5 619.0939 652.7785 646.5728 649.981 647.6044 633.8449 631.0063 632.2532 624.1392 624.5316 628.1994 84 2087.5 619.1902 652.3797 647.1962 649.4367 647.1835 633.0696 631.3924 632.0285 624.3703 624.6614 628.3861 85 2112.5 618.8515 652.519 646.0665 649.6203 647.7215 633.5285 631.3703 632.1108 624.2627 624 627.9905 86 2137.5 618.7562 653.0158 646.75 649.7753 648.4051 633.8101 632.0222 630.6772 624.1361 623.8671 627.6266 87 2162.5 618.5491 652.1013 646.0032 649.6646 647.9494 633.2658 631.25 631.2184 624.1424 624.0285 627.8291 88 2187.5 618.7426 652.3671 646.4367 649.0443 647.4652 633.8165 630.6519 630.7247 623.8797 624.8259 628.0538 89 2212.5 618.5531 652.0601 646.0158 647.9589 646.462 634.2025 631.1772 630.7152 623.3101 624.1171 628.0158 90 2237.5 618.6534 652.3165 646.4146 649.1108 646.9525 632.8671 631.7816 631.712 623.1772 623.5538 628.0696 91 2262.5 618.6187 652.4905 646.7057 648.4114 646.5728 633.5348 631.5443 631.3513 623.8449 625.1361 628.2658 92 2287.5 618.4215 651.0918 645.9905 649.0633 646.9937 632.712 631.0127 631.4241 623.2785 623.9747 627.5032 93 2312.5 618.7115 651.693 645.0127 648.7342 647.5 632.8481 631.3133 631.038 623.1171 624.1646 627.4494 94 2337.5 619.0205 652.3892 646.4652 648.8924 646.6013 633.7532 632.9399 631.4778 624.769 624.4304 628.2532 95 2362.5 618.5737 651.2405 646.6108 648.25 646.3924 633.3291 631.6203 631.7405 624.0032 623.8703 629.1203 96 2387.5 617.6356 650.3861 645.2089 646.943 645.4715 632.9715 630.2595 629.5823 622.4747 623.3449 627.038 97 2412.5 618.6216 651.9462 646.1677 648.962 646.9335 633.712 631.2722 631.1203 623.8766 624.519 627.8038 98 2437.5 618.6898 651.8196 645.7278 648.4968 646.3861 633.7405 631.288 631.3354 624.2342 624.3386 628.0918 99 2462.5 618.0993 650.731 644.7089 647.9177 646.5728 632.8513 629.9272 630.9462 622.6361 623.4241 627.7373 100 2487.5 618.5218 651.4873 645.4304 649.0285 646.9082 632.9051 631.0316 630.4589 623.1392 624.0316 627.4652 101 2512.5 619.0366 651.538 646.4019 648.9684 646.8449 633.8418 631.288 632.3481 624.1297 624.2563 628.5285 102 2537.5 618.8007 651.6519 645.8481 648.5759 646.7595 633.3386 631.3228 630.8038 624.1835 624.2785 627.3133 103 2562.5 618.554 652.0253 645.5886 648.7658 646.8228 632.1646 630.1741 630.8608 623.3924 623.6171 627.7342 104 2587.5 618.0766 651.2278 645.057 647.3956 645.5601 632.6392 630.5759 630.3956 622.6867 623.6646 626.4462 105 2612.5 618.5946 650.9937 645.8861 647.9873 646.3449 632.9873 631.9272 631.0222 623.693 623.5759 626.7816 106 2637.5 617.9828 650.7563 645.7215 647.6424 645.75 632.5759 630.4652 629.8576 622.3386 623.2595 626.7468 107 2662.5 619.0805 651.9304 645.9747 648.6392 646.9209 634.6234 631.3956 631.3987 624.019 624.8766 627.8418 108 2687.5 618.3992 651.4399 646.1709 647.5854 646.4905 632.4937 630.3101 630.4747 622.962 623.1456 628.2532 109 2712.5 618.4353 651.6171 646.6297 647.7405 646.0601 633.5886 630.5475 630.4019 623.0348 623.5316 627.4082 110 2737.5 618.943 651.4462 645.5443 649.0253 646.9335 634.2437 631.2595 630.9177 623.981 624.8133 627.3892 111 2762.5 618.6558 651.6203 646.0285 647.9335 646.4209 633.3924 631.2563 630.9968 622.9905 624.0127 627.6013 112 2787.5 618.4095 651.962 645.6108 648.7816 646.519 633.3196 631.1994 630.481 623.4209 623.5538 627.4652 113 2812.5 618.5409 651.3038 645.4082 648.0063 646.1203 633.1519 631.8259 630.2373 623.8386 624.3449 626.6835 114 2837.5 618.4967 651.4146 645.3766 648.2816 645.9462 633.2943 630.1614 631.3259 623.3797 624.0759 627.4715 115 2862.5 618.8603 651.3165 646.3766 648.3576 646.5918 633.25 631.8829 631.1297 624.2215 624.2278 627.8544 116 2887.5 619.1797 651.481 646.2405 648.4905 647.0411 634.2658 630.9557 631.7184 624.0095 624.2816 627.981 117 2912.5 618.3571 651.3956 645.1234 647.1551 645.6456 633.3418 631.0854 630.6835 623.2658 623.5728 627.8924 118 2937.5 618.0335 650.3924 645.481 647.3892 645.5 632.1741 630.3196 630.4114 623.2658 623.4241 627.1266 119 2962.5 618.1177 650.0063 645.8006 647.8165 645.4873 632.4335 630.8291 630.788 623.0316 623.8861 627.2437 120 2987.5 618.9252 650.75 645.769 648.7911 646.8544 633.462 631.0854 631.0063 623.6266 624.1519 627.6456 121 3012.5 618.2519 651.3006 645.9462 647.8418 645.6392 632.3354 630.2057 630.2278 623.1076 623.8101 627.4304 122 3037.5 618.8213 650.7342 645.8259 648.1551 646.7595 632.7437 630.6013 631.2785 623.9873 624.3101 628.019 123 3062.5 618.7147 651.7627 645.8133 647.9684 645.9715 633.8924 631.231 630.6835 623.1361 624.1709 627.5728 124 3087.5 618.7709 650.8671 646.1551 647.8987 645.9778 633.2025 631.0823 631.2373 623.6076 624.2215 628.1139 125 3112.5 618.1167 650.9684 645.4209 647.4019 645.7152 632.7785 630.1962 630.2089 623.1013 624.269 626.9462 126 3137.5 619.3078 650.8892 646.0063 648.2184 646.6835 633.5791 631.3386 631.4082 624.3576 625.7184 627.8513 127 3162.5 618.7881 651.2532 646.2215 647.6044 646.0981 633.3608 631.0728 631.1741 624.212 624.3038 627.6709 128 3187.5 618.6196 651.038 646.038 647.6551 646.3449 632.8797 630.9367 630.9842 623.3006 624.0854 627.0696 129 3212.5 618.9873 650.8449 646.25 648.231 645.9525 633.9747 630.7278 631.4209 623.8576 624.1234 628.2215 130 3237.5 618.4587 650.8386 645.557 647.6329 645.3861 632.9937 631.0601 631.4209 623.6709 623.3576 627.019 131 3262.5 618.0786 649.6329 644.2437 646.9494 644.5601 631.712 630.1835 630.5918 622.6804 623.6013 626.8449 132 3287.5 618.625 650.6044 644.8892 648.2184 645.5316 632.8481 630.5665 631.3133 623.3101 623.9019 627.019 133 3312.5 618.6231 651.6424 645.8829 647.1234 645.5253 633.9019 630.7152 631.0538 623.3987 623.1551 627.6424 134 3337.5 618.6622 650.5 645.1044 647.9177 645.3101 632.9304 630.6677 630.3259 623.7247 623.7943 627.4146 135 3362.5 618.5464 650.6044 644.7089 646.8038 645.3924 632.6329 630.481 630.7278 624.0443 624.2785 626.9589 136 3387.5 618.7836 650.3671 645.6835 647.2595 645.2563 633.4177 630.693 631.1835 623.3386 624.3418 626.9241 137 3412.5 618.8434 651.3323 644.9684 647.3671 645.3449 633.3449 630.943 630.3418 623.5759 623.9525 628.3797 138 3437.5 618.6767 650.7437 645.8133 646.9082 644.5981 633.2911 630.2785 631.0918 623.8639 624.1139 626.5791 139 3462.5 618.2843 649.8323 644.7595 647.7943 645.6392 632.7405 629.9304 630.1329 622.5886 623.2247 626.7437 140 3487.5 618.3255 651.6203 644.9146 647.5854 645.3481 632.6614 630.1424 630.0601 623.7057 623.8829 627.0348 141 3512.5 618.8023 650.8291 644.9462 647.7184 645.6804 632.4146 630.9209 631.0063 623.8133 624.1456 627.5791 142 3537.5 618.2801 650.3734 644.4146 647.8165 645.1361 632.9715 630.4462 630.1962 623.2563 624.462 626.481 143 3562.5 618.6223 650.7215 644.8703 647.1392 645.5538 632.7816 630.5601 629.9873 623.5665 623.4209 626.8544 No. time (sec) aliquot grain 1 grain 2 grain 3 grain 4 grain 5 grain 6 grain 7 grain 8 grain 9 grain 10 1 12.5 616.0312 658.6906 649.9153 658.2347 655.7046 633.8033 631.1412 632.9204 621.5427 622.2058 629.0102 2 37.5 612.2725 655.4048 646.6503 655.4416 652.5029 629.4791 626.8322 628.7589 618.7269 618.154 625.8077 3 62.5 613.7485 656.4286 647.5412 656.7557 653.8102 631.0449 628.7583 629.5886 619.1664 619.6345 626.887 4 87.5 614.9674 658.2303 648.6834 657.0633 653.7187 632.5811 629.3318 631.163 620.3468 620.5988 628.0899 5 112.5 615.9991 657.9084 648.8509 657.479 654.627 633.0045 632.493 631.985 622.7405 623.5552 629.6421 6 137.5 616.2784 658.4475 649.7241 658.8042 656.2519 634.8908 631.4959 632.1283 621.2177 622.3616 629.4942 7 162.5 616.1318 659.4131 649.8941 658.4598 656.1778 633.8129 631.2871 632.5899 620.932 622.107 628.9506 8 187.5 616.8854 659.6138 650.2727 658.8553 656.225 634.6475 632.155 632.6959 622.0203 622.6792 629.8228 9 212.5 617.3767 660.7341 651.209 659.1971 656.6876 634.6072 632.0881 633.5808 622.4895 623.2277 629.7101 10 237.5 617.4728 659.6334 650.7071 658.6893 656.4377 635.3214 632.2872 632.9481 622.7366 623.164 630.1439 11 262.5 618.0988 660.6009 651.5431 660.2137 658.0555 635.9507 632.8362 633.7608 623.0455 623.0344 630.3384 12 287.5 618.1591 660.6857 651.6801 660.0739 657.2581 636.1234 633.2454 634.4526 623.7032 624.5858 630.8197 13 312.5 618.4908 661.368 652.1233 660.3118 657.2619 636.2012 633.1976 634.0285 624.1963 624.7369 631.0497 14 337.5 618.6913 661.0175 652.3968 661.1084 658.283 636.5828 633.3447 634.4143 623.9483 624.9399 631.358 15 362.5 618.5761 660.6205 652.3064 660.9544 658.1209 636.4211 633.6617 634.9462 623.7633 624.4986 631.2697 16 387.5 619.0878 661.6962 652.0899 660.2787 657.1431 636.2823 634.0592 635.3468 624.1583 625.4925 631.0609 17 412.5 619.1123 661.0516 653.4275 661.4284 658.7825 636.6452 633.8941 635.0008 624.4952 625.1524 631.2235 18 437.5 619.1219 661.83 652.6724 660.51 657.6139 637.0458 634.0824 635.157 625.0518 625.1468 631.8565 19 462.5 619.1186 660.596 652.6746 660.2996 657.7479 636.0863 634.0373 635.409 625.2233 626.069 631.0531 20 487.5 618.6514 660.9863 652.2574 659.3774 657.0476 635.8082 633.5955 634.8518 624.9103 624.4519 631.1783 21 512.5 618.6185 661.108 652.826 660.109 657.1883 636.7313 633.7887 634.8854 623.9059 624.4369 631.8754 22 537.5 619.0751 660.9557 653.2617 660.274 657.0657 636.7279 633.791 635.2694 624.6487 625.6795 630.7416 23 562.5 618.8382 660.4928 651.8307 659.4779 656.3989 636.0958 633.3189 634.919 624.4194 624.9968 631.0872 24 587.5 619.1382 660.9742 652.1005 660.8257 657.4648 636.3843 633.3951 635.4573 624.4644 624.8242 631.8506 25 612.5 619.7459 661.0917 652.6333 660.569 658.3732 637.754 634.7651 635.4046 624.9538 626.0424 631.7531 26 637.5 619.3593 661.2368 652.5708 660.5365 657.9042 636.8119 634.2966 634.7033 624.1106 625.0724 631.2318 27 662.5 619.4168 661.4003 652.8548 660.5254 658.0511 636.8973 633.3523 635.1406 624.1972 625.0206 631.5518 28 687.5 619.8258 660.9017 652.6172 660.6988 658.0219 637.6791 634.7789 635.3681 624.8823 626.8548 631.3864 29 712.5 619.4522 661.8692 651.9301 660.1876 657.8309 637.6165 634.5131 635.77 624.4221 625.0042 631.6815 30 737.5 619.6236 661.1969 653.3451 660.4172 657.922 636.145 634.2412 635.0599 625.5943 625.7517 632.543 31 762.5 619.3767 660.958 652.1418 660.562 658.2986 636.2887 634.3318 635.3714 625.2461 624.884 631.8345 32 787.5 619.1879 660.5759 652.2561 659.0396 656.5601 635.7813 634.2179 635.4272 624.7261 625.9287 632.3679 33 812.5 619.5383 660.4396 652.2926 659.4839 657.0866 636.5578 633.9843 635.2885 624.8658 625.742 631.2716 34 837.5 619.8293 661.4764 652.6684 660.3092 658.0901 636.7865 634.6386 635.0527 625.0525 626.3872 632.4097 35 862.5 619.3868 661.2335 652.5278 659.1183 656.4801 637.0851 634.5441 634.3174 624.1022 624.6214 630.8086 36 887.5 619.5994 660.5377 652.3172 660.0516 657.437 636.587 634.1476 635.7901 625.9885 626.2344 632.9402 37 912.5 619.0942 659.3482 652.157 658.9576 655.6765 637.0414 633.3473 633.5141 624.4448 623.8679 630.0886 38 937.5 619.4612 659.5308 652.6359 660.1829 656.9747 637.1701 634.1015 636.1454 625.1366 626.2227 631.9286 39 962.5 619.23 660.2986 651.4306 659.4451 656.2312 636.3953 633.2892 634.6489 624.569 625.2839 631.4595 40 987.5 619.5328 660.9764 651.7374 659.2391 656.6079 636.4839 633.713 635.7279 624.8239 626.4778 631.2861 41 1012.5 619.5093 659.6945 651.6841 659.2604 655.8882 635.5746 633.858 635.6404 625.4522 625.3535 631.8243 42 1037.5 618.6795 659.061 651.4754 658.0793 655.6121 636.4216 632.8933 633.8865 624.0987 624.48 630.4105 43 1062.5 619.2866 659.7778 652.5493 659.2619 656.6086 637.068 633.6919 634.7811 624.4517 624.8592 631.1715 44 1087.5 619.1471 660.3686 652.1764 658.2279 656.1815 636.1857 634.2407 634.8808 625.1851 625.1814 630.8224 45 1112.5 619.6001 660.9443 652.8683 658.8397 656.3722 636.2745 634.5092 635.2982 624.781 624.8203 631.3737 46 1137.5 619.6578 660.1256 652.3762 659.4315 656.9237 636.7025 634.0145 635.1175 625.2248 626.2819 631.1542 47 1162.5 619.195 659.4838 651.4366 658.5505 655.7848 636.5201 633.5235 634.7034 624.5456 625.142 631.4184 48 1187.5 619.4215 659.7769 651.7305 657.9934 655.585 636.2133 633.8861 635.2471 624.3509 625.1392 631.3784 49 1212.5 619.4365 660.417 651.6828 658.4544 656.0292 636.839 633.8779 636.0436 624.8158 625.3572 631.5027 50 1237.5 619.2843 659.2626 652.1786 658.4305 656.2508 636.8049 632.9646 634.1576 624.554 625.3334 631.2679 51 1262.5 619.6429 659.0053 651.6919 658.7217 656.6364 636.5588 634.3722 635.2047 624.8602 625.6231 631 52 1287.5 619.0538 659.3861 651.2924 657.7921 655.3807 635.633 633.1669 634.139 624.5008 625.173 631.1169 53 1312.5 620.032 660.536 652.2995 660.0736 657.1688 637.2071 634.6034 634.9736 625.1411 626.5192 631.8403 54 1337.5 619.4964 659.4269 651.6734 658.4676 656.1291 636.8711 633.8782 634.7606 624.4274 625.0013 630.9104 55 1362.5 619.7869 660.1535 652.2897 658.3939 655.9227 637.2328 633.873 635.0491 625.0588 626.2417 631.6512 56 1387.5 619.4803 660.4408 651.9966 658.3815 656.0329 637.2461 633.9087 634.7836 624.2137 625.2806 631.6511 57 1412.5 619.6992 660.2073 651.504 658.0831 655.4986 636.1798 634.1972 635.0368 625.1055 626.2759 631.6616 58 1437.5 619.5442 658.662 651.3918 657.9064 655.5442 635.9153 633.7881 634.1599 624.1967 625.1631 631.4295 59 1462.5 619.4614 659.7977 651.5216 658.4922 655.9853 636.4371 634.2494 634.2409 625.1905 625.6924 632.02 60 1487.5 619.6884 659.9299 651.3604 658.3508 655.3531 636.6494 634.5504 634.9116 624.3557 625.6612 631.0877 61 1512.5 619.321 658.5944 651.0599 658.6518 655.6142 636.0016 633.4034 634.5415 624.6537 625.0472 630.9542 62 1537.5 618.9828 658.6687 651.6243 657.6624 655.6001 635.7524 633.4592 634.2011 624.1516 624.5758 631.3773 63 1562.5 618.8885 658.6872 650.5297 657.4567 654.5753 635.5946 632.2904 634.1618 623.8654 625.468 630.3673 64 1587.5 619.4941 658.6496 651.5327 657.9368 655.829 636.1975 633.2635 634.4595 625.245 625.7107 631.5946 65 1612.5 619.3935 659.1844 650.7135 657.4629 654.9711 636.492 634.176 635.4589 624.9972 624.9031 630.5326 66 1637.5 620.0406 659.5483 651.796 658.3489 655.9309 637.283 634.1377 635.8601 624.9593 625.9175 632.1007 67 1662.5 619.3584 658.9118 651.6015 657.316 655.3936 636.2646 633.7975 633.9978 625.6658 625.9268 630.9313 68 1687.5 619.5718 659.0942 651.0267 656.9502 654.5683 636.1923 633.5457 634.6133 624.7929 625.713 631.9673 69 1712.5 619.2193 658.7454 651.3887 657.2803 655.3566 635.1547 634.0597 634.3789 624.0603 624.6939 631.1956 70 1737.5 618.9174 657.8332 651.453 657.6417 654.7302 635.399 634.1921 634.3673 624.1435 624.8887 630.1608 71 1762.5 619.4594 658.7201 651.0777 657.3186 654.5621 636.7052 633.9346 634.8716 624.0879 625.0527 631.0306 72 1787.5 619.7125 659.3703 651.2185 656.8614 654.7953 636.302 633.8636 634.1315 624.8647 626.2475 630.7097 73 1812.5 619.5472 657.7522 651.5681 657.1853 654.6893 635.6448 633.6509 634.3233 624.8097 625.5929 631.1763 74 1837.5 619.0432 657.8746 651.3865 656.5779 655.0572 635.7624 633.8879 634.907 624.4179 625.2225 630.7489 75 1862.5 619.6406 659.345 651.2913 657.4556 655.0584 635.8297 633.7887 634.3467 625.2603 625.3803 630.8645 76 1887.5 619.2242 657.9095 651.259 656.5765 654.3004 636.3673 633.3679 634.3054 624.8213 625.1158 629.9752 77 1912.5 619.1395 657.4696 650.5891 656.6981 653.9891 635.7321 633.3386 634.003 623.9899 624.5865 629.89 78 1937.5 619.5915 659.051 651.9002 656.2175 653.737 636.5006 633.1654 634.2198 625.5754 625.6681 630.9369 79 1962.5 619.1232 658.8884 651.6986 656.3629 654.0432 636.0789 633.3622 634.2651 624.0047 625.3912 630.3894 80 1987.5 619.1797 657.7236 650.7777 656.6434 654.3553 636.203 633.3624 633.9448 624.7588 625.7231 631.2615 81 2012.5 619.4284 657.9564 650.2802 657.3472 654.7189 635.5213 632.7538 633.3559 624.2177 625.1658 630.4095 82 2037.5 619.8186 659.4537 650.7789 656.8068 653.9533 635.7599 634.2697 634.5317 625.5351 625.7355 630.6639 83 2062.5 619.0361 658.4525 650.6979 655.9353 654.8402 636.3408 633.0554 634.0612 625.0946 625.2953 630.1426 84 2087.5 619.0679 658.2028 650.7546 656.263 653.9579 635.3997 632.9751 633.8175 624.7005 625.0064 630.2469 85 2112.5 619.5036 657.6676 650.9825 656.0741 653.8184 635.8177 633.756 634.3187 625.1032 625.7711 631.028 86 2137.5 619.5838 658.24 651.1151 656.7514 654.7543 636.0051 633.4436 635.0968 624.5704 625.4066 630.8816 87 2162.5 619.7774 658.8586 650.8417 656.5809 653.8432 636.021 633.6121 635.1417 625.2152 625.9074 631.0059 88 2187.5 619.3818 658.008 651.2315 655.5812 653.7966 635.8189 633.3187 634.5345 625.5258 625.5007 629.6714 89 2212.5 619.4099 657.9034 650.5906 656.2516 654.5323 635.8962 633.5433 634.6133 624.7552 624.6984 630.9105 90 2237.5 619.4089 657.8727 650.5439 656.8112 653.9223 636.1951 633.3068 634.0905 624.7773 625.0253 629.772 91 2262.5 619.0846 657.3841 650.6034 654.7351 652.6919 635.3334 633.3407 634.7331 625.0086 624.9161 630.2352 92 2287.5 619.1711 657.5069 650.5812 655.9594 653.3334 635.3078 633.4448 634.1535 624.0512 624.5332 630.9288 93 2312.5 619.6704 657.979 650.9181 656.2473 654.2735 635.9193 634.164 634.3943 625.4596 625.8996 630.9707 94 2337.5 618.9883 657.2547 649.8742 655.3017 653.7401 634.9956 633.05 633.1128 624.573 625.1108 630.315 95 2362.5 618.77 656.8524 650.6318 655.4716 652.9324 635.1069 632.2976 633.7103 623.8329 624.6676 630.0484 96 2387.5 619.18 656.759 649.9029 654.9803 653.013 635.6059 633.0676 634.4104 624.1588 625.0173 630.1128 97 2412.5 619.5943 657.0053 650.5318 655.9102 653.5507 636.4233 633.3031 634.4716 625.0143 626.319 630.4698 98 2437.5 619.5089 657.0871 650.8551 655.4628 653.8055 635.4576 633.061 634.1545 624.5815 625.6028 630.2713 99 2462.5 619.1695 656.976 650.3881 654.7235 652.6026 635.3256 633.1645 633.4862 624.8209 624.662 630.4411 100 2487.5 618.513 656.6107 649.1075 654.3781 652.9325 634.9442 632.8046 633.1775 623.6661 624.6586 629.0236 101 2512.5 619.2534 657.1351 650.5383 656.098 653.0957 635.2848 633.0267 634.5299 624.5957 624.9733 629.6232 102 2537.5 619.2428 656.8518 649.9549 655.1723 652.813 635.4614 633.5816 633.4532 624.7441 625.315 630.2144 103 2562.5 619.3259 657.2634 650.6989 655.4452 652.9216 635.4495 632.8607 633.0796 624.5442 625.4914 629.8364 104 2587.5 618.8588 655.8009 649.4842 654.8497 652.251 635.2849 633.4936 633.4721 624.7792 624.7345 629.9124 105 2612.5 619.1994 656.7192 650.2401 655.0245 652.8849 635.9081 633.3239 633.1811 624.7036 625.19 630.1405 106 2637.5 619.9097 656.9048 650.4774 655.2567 653.4062 635.5823 633.2125 634.011 625.769 625.796 631.052 107 2662.5 619.4985 657.6028 650.6427 655.4614 653.269 635.1971 633.4538 633.2507 624.8212 625.5746 630.1575 108 2687.5 619.5252 657.6984 649.8065 655.6652 653.447 635.831 633.2355 633.1174 624.3204 625.6367 629.853 109 2712.5 618.6822 656.1635 650.075 653.9919 651.5348 634.6455 633.0949 633.5289 624.6241 623.9609 629.9132 110 2737.5 619.5611 657.4272 650.3853 655.1591 653.2736 634.8469 633.1436 634.2127 624.3819 625.4382 629.7827 111 2762.5 619.2262 656.3097 649.845 654.0503 652.607 635.4814 633.1084 633.1744 624.3562 625.4302 629.8924 112 2787.5 619.0123 656.8555 650.4595 654.914 652.7917 634.7565 632.5618 633.6617 624.5954 625.0725 629.9964 113 2812.5 619.0788 655.4473 649.5705 654.366 652.6426 635.1044 633.3604 633.324 623.7475 624.5335 629.8858 114 2837.5 618.9793 656.731 649.5823 654.2256 652.1266 634.853 633.2974 633.3168 624.1936 624.96 629.4152 115 2862.5 618.7719 655.694 649.4038 654.2175 652.1053 635.1691 633.1611 632.8076 623.1571 624.4319 630.0259 116 2887.5 619.1117 656.9854 650.0139 653.9536 651.8239 635.6695 632.1369 632.7 623.9512 624.8383 629.6973 117 2912.5 619.1237 656.0197 650.2586 654.5489 652.2038 635.4597 632.4124 633.5215 624.9163 625.3797 629.4735 118 2937.5 618.9059 656.6491 648.8929 654.4913 651.8942 635.3355 631.9433 633.5877 623.8877 624.385 629.1411 119 2962.5 619.3233 656.8432 649.3856 654.7177 653.3717 635.5564 633.6213 633.4085 625.1093 624.9929 630.0352 120 2987.5 619.8558 656.845 650.6094 654.5986 652.7007 636.4113 633.2379 633.598 625.1809 626.0924 630.5962 121 3012.5 618.9967 655.3859 649.56 654.115 652.3905 635.3777 632.0422 633.4079 624.9945 624.5787 629.6779 122 3037.5 619.3723 656.248 649.6002 654.6798 653.4212 635.6108 632.0718 633.4992 625.0539 624.9248 629.7259 123 3062.5 619.1507 656.0902 649.1019 653.3824 651.2278 635.6783 633.2289 633.1657 623.6423 624.6499 630.4444 124 3087.5 619.0089 656.5217 649.2501 653.7768 651.6482 634.6413 632.5473 633.2417 623.4923 623.9783 629.4027 125 3112.5 619.3791 656.786 649.4983 654.3871 652.0517 635.4073 632.6579 632.9998 624.485 624.9732 629.9129 126 3137.5 618.9518 655.0029 649.4529 653.5898 651.7109 634.7494 632.6919 632.8519 623.8593 624.4421 629.7089 127 3162.5 618.9306 654.9382 649.198 653.5625 652.1641 635.0823 632.6989 633.2711 623.9617 625.4174 630.0145 128 3187.5 619.4031 656.3798 649.6674 654.3949 652.3793 635.1739 632.3031 633.898 624.6795 625.2214 630.1257 129 3212.5 619.3513 655.6458 648.8299 653.5794 651.51 635.39 633.0969 632.8097 624.3643 625.5809 629.4613 130 3237.5 619.3712 655.9213 650.0245 654.0766 651.2707 635.3219 632.8387 633.6629 624.7969 625.624 629.8997 131 3262.5 619.7244 656.536 649.5171 653.7458 652.416 635.3601 633.2222 633.986 625.4069 625.3191 630.0709 132 3287.5 619.252 655.9579 649.5582 653.9075 651.657 635.4694 633.4507 633.4692 624.5755 625.2649 630.2173 133 3312.5 618.7681 655.9811 649.3099 652.7297 650.7115 634.4634 632.3627 633.4456 624.0453 623.7238 629.3844 134 3337.5 618.5923 655.5339 649.1084 653.0037 651.1682 634.2581 632.8341 632.6062 623.3144 624.769 629.4238 135 3362.5 618.7292 655.2741 648.5827 653.3296 650.7324 635.1216 631.6936 632.1359 624.1303 624.507 629.8434 136 3387.5 618.8453 655.0162 649.3634 653.23 650.9911 634.3368 632.558 633.0361 624.3333 625.1946 629.0829 137 3412.5 619.0023 655.6075 648.5613 653.0687 650.2505 634.5349 632.2745 633.4133 624.0704 624.4851 630.5593 138 3437.5 619.4083 656.1359 649.5212 653.6081 650.8799 635.3503 632.8908 633.652 624.937 626.2503 630.021 139 3462.5 619.1464 655.9159 650.2939 652.9551 651.3079 635.4584 632.5818 633.2102 625.1875 625.5281 629.0551 140 3487.5 618.6135 654.8158 648.183 651.9918 650.1188 634.7103 631.5634 632.6345 624.2616 624.8559 628.3536 141 3512.5 618.9261 655.6843 648.6419 653.0123 650.9351 634.9509 631.7626 632.6395 623.7726 624.7664 629.5252 142 3537.5 619.0118 655.2039 649.4776 653.4562 651.3888 634.5172 632.2866 632.4602 624.7291 624.9897 629.2447 143 3562.5 618.9974 656.1514 649.4982 653.2829 650.7573 635.6186 632.8572 632.0904 624.5361 625.1963 629.7383 144 3587.5 619.2582 654.5657 649.0555 653.6096 651.5511 634.9013 632.5776 633.134 624.4961 624.8664 629.3546 145 3612.5 618.9818 654.2458 648.5193 652.3381 650.2084 634.7433 631.8828 632.7137 624.1804 625.0461 629.5779 146 3637.5 618.885 655.1888 648.4858 653.0299 650.7826 634.6405 633.4996 632.4273 623.8651 624.1743 628.95 147 3662.5 618.738 654.3687 649.2513 651.7636 649.9919 634.0802 631.0274 632.905 624.3196 625.1019 627.5411 148 3687.5 619.1882 655.8884 648.9971 652.8715 651.0577 635.1871 632.2535 633.0031 624.3674 624.9991 629.8081 149 3712.5 619.0847 655.2343 649.6264 653.1473 651.5538 635.2048 632.7107 633.1395 625.5345 624.3397 629.1202 150 3737.5 619.2066 655.369 648.4612 652.5426 650.9735 635.1124 632.6933 632.6564 624.7077 624.9398 629.0221 151 3762.5 618.6994 654.3894 648.0961 652.2881 649.7615 635.0301 631.8474 632.3641 623.9357 624.5118 628.5999 152 3787.5 618.9899 654.8033 648.2224 652.3129 649.9764 634.7707 632.9237 632.5437 623.5759 624.7902 629.6734 153 3812.5 619.3187 654.9825 649.1985 652.5379 650.9844 634.6611 633.0142 633.3398 624.703 625.1121 629.1493 154 3837.5 618.8222 654.5908 648.3707 652.4884 650.0787 634.7839 632.0598 632.7098 624.1325 624.4341 629.077 155 3862.5 618.3551 654.2616 647.2054 650.4855 648.6326 634.2486 631.8976 632.4435 622.8922 623.8339 628.2844 156 3887.5 618.9439 654.8196 648.0153 652.6733 650.5895 634.6042 632.3701 632.7489 624.1487 624.2561 628.8858 157 3912.5 619.2987 655.654 648.8183 652.8049 650.9519 635.0246 632.7168 633.0453 624.6938 625.5101 629.5049 158 3937.5 619.1912 654.6866 648.7767 651.8879 649.8744 634.7485 632.1456 632.5525 625.0925 625.234 629.4699 159 3962.5 619.1578 655.1568 648.922 652.4542 649.8878 634.1395 632.115 632.5593 624.7185 624.8243 629.4701 160 3987.5 618.954 653.5426 648.1742 652.4137 650.4536 635.095 632.2903 632.9483 624.0543 625.6944 628.8961 161 4012.5 619.0859 654.4974 648.9592 652.6077 650.5737 634.72 632.2438 632.2209 624.3072 625.0119 628.9657 162 4037.5 618.9742 654.8866 649.5078 651.8472 649.6595 634.9389 632.4768 632.697 624.3441 625.2724 628.9327 163 4062.5 618.5499 654.163 648.7347 651.0778 649.625 634.4678 632.9044 631.8673 623.7386 623.8049 628.7251 164 4087.5 618.9236 654.6388 647.8996 652.0784 649.914 634.8287 631.9617 632.2116 624.1101 625.0873 628.792 165 4112.5 618.4588 653.8673 647.7265 651.611 648.9233 633.7383 631.6205 631.6542 624.0559 624.7463 628.8468 166 4137.5 618.7164 654.4114 648.5582 651.5499 649.8556 634.1824 632.1235 631.708 623.8893 623.9264 628.8113 167 4162.5 618.8752 654.1756 647.7811 651.4162 649.5315 634.7821 632.1283 632.8556 624.2156 624.1171 628.1944 168 4187.5 619.3569 654.6552 648.099 651.7625 650.9024 634.5983 632.9384 633.0293 624.1294 624.8762 629.8321 169 4212.5 618.4404 653.5798 647.4559 652.1242 649.8545 634.6389 632.2939 631.8735 623.4035 624.022 628.4769 170 4237.5 618.6147 653.7813 647.3801 650.9257 648.8427 633.3048 631.7706 633.0439 623.0475 624.9094 628.6247 171 4262.5 618.5714 652.9237 646.916 651.2937 648.8039 634.0328 632.0201 632.2848 623.6776 624.8949 628.707 172 4287.5 618.4561 654.454 648.1065 651.0211 649.0695 633.6688 631.2935 631.864 623.6754 624.7348 629.1651 173 4312.5 619.0053 654.5872 648.0999 652.0448 649.8237 634.4566 631.8808 632.6611 624.0995 624.6749 628.3811 174 4337.5 618.7321 654.0076 647.7945 651.0182 649.4733 634.1226 631.7433 632.5805 624.262 624.6751 628.9124 175 4362.5 618.8796 654.0749 648.2349 651.6931 649.5934 634.7741 632.4299 632.2821 623.5638 624.6331 629.2547 176 4387.5 618.9158 653.4054 648.0452 651.3552 649.3086 634.5742 632.0158 632.6057 623.9611 625.1525 629.1208 177 4412.5 618.8451 655.1949 647.7965 651.2479 649.4624 634.492 631.9341 632.1602 624.2743 623.874 627.9982 178 4437.5 619.0385 654.3906 647.7725 651.0811 649.1565 634.1971 632.6773 632.0087 624.7515 624.6455 628.8649 179 4462.5 618.9516 654.4443 648.1627 650.8967 649.2179 634.1522 632.333 631.9168 623.8803 624.6349 628.3874 180 4487.5 619.0969 654.2161 648.8333 652.1127 650.1702 634.502 631.7016 632.349 623.9839 624.3378 628.9979 181 4512.5 619.0494 653.2785 647.5071 651.651 650.4825 634.0314 632.3306 633.0096 623.6339 624.9298 629.3816 182 4537.5 618.364 653.3339 647.7706 650.8002 649.297 633.0787 630.4672 631.3637 624.3894 624.2077 628.7537 183 4562.5 619.2493 653.2704 648.3747 651.6715 649.2655 634.2729 631.8156 632.6542 624.8062 625.5303 630.5464 184 4587.5 618.8602 653.9763 648.3001 650.8425 649.372 634.2168 631.493 631.8857 623.7743 624.0267 628.5333 185 4612.5 618.8645 653.8773 648.1708 651.5908 649.8723 634.3427 632.026 632.8272 623.6715 624.4646 628.6919 186 4637.5 618.9328 653.9892 648.087 651.1106 649.2112 634.2443 632.2555 632.2057 623.7227 624.9694 629.0577 187 4662.5 618.8254 653.7014 647.6111 650.5595 648.8868 633.9651 631.9907 631.7869 624.0993 624.759 627.9094 188 4687.5 618.9911 653.4959 648.0767 651.2509 648.958 634.2695 631.7548 632.4656 624.4515 624.6573 628.4349 189 4712.5 619.1516 653.4524 647.6667 650.73 648.4824 634.3106 632.5616 632.5456 624.0686 624.596 628.713 190 4737.5 618.5762 653.2439 647.0638 650.0874 648.158 635.021 631.304 631.6893 623.7921 623.4843 627.5759 191 4762.5 618.6338 653.1327 647.8505 650.8884 648.4176 633.7389 631.458 631.6995 623.4723 623.624 628.1091 192 4787.5 618.7654 652.8275 648.0166 650.9777 649.6025 633.3308 631.7606 631.9956 623.6587 624.4399 627.9926 193 4812.5 618.4063 653.5255 647.7662 650.8618 648.3132 633.7052 631.0402 631.5484 623.2467 624.2804 627.402 194 4837.5 618.1688 652.8664 646.6762 650.8303 648.195 633.496 630.8218 631.3971 623.5202 623.6875 627.8834 195 4862.5 619.1283 654.2303 647.4854 650.7154 649.0818 634.702 632.3014 632.281 623.2738 624.3089 628.6441 196 4887.5 619.2227 653.2196 648.4142 651.187 649.3789 634.2461 631.2062 632.2326 624.1006 625.3618 629.5652 197 4912.5 618.7722 653.8734 647.7479 650.426 648.2083 633.5101 631.7288 631.7939 623.9939 624.3515 627.9678 198 4937.5 619.203 653.266 647.6406 651.2157 649.2556 634.3605 632.2576 632.7565 624.7757 624.7714 628.4938 199 4962.5 618.6023 652.9386 647.7863 650.4492 648.3712 633.7404 631.6671 631.9933 623.9734 624.5115 628.193 200 4987.5 618.5643 653.054 646.8557 649.6625 647.9863 633.8609 631.5303 631.7036 623.9881 624.3636 627.8617 201 5012.5 618.9945 653.9163 647.5579 650.4969 648.5955 633.7961 632.3524 631.8314 624.0592 625.0209 628.7744 202 5037.5 618.8036 653.3631 647.6549 650.2506 647.6292 633.5085 631.7846 632.1385 623.4423 624.4778 627.9361 203 5062.5 618.9353 653.2382 647.4943 649.9781 648.3486 634.2175 631.3141 632.1607 623.8845 624.994 628.1153 204 5087.5 618.8769 652.2405 647.3362 649.9139 648.6146 634.112 632.1768 631.6975 624.1771 625.1053 628.9446 205 5112.5 618.9629 652.2856 648.2336 651.5798 649.2323 633.871 631.3724 632.2623 624.4001 624.2176 628.3257 206 5137.5 618.5888 653.0329 646.4013 650.6897 648.7911 633.3618 631.4572 631.302 623.522 623.9393 627.7615 207 5162.5 618.5332 652.966 647.0297 650.0679 648.0988 632.9859 630.8757 631.049 623.7867 624.8207 627.6892 208 5187.5 619.2727 653.5433 648.2781 650.5863 649.1801 633.7943 632.6711 632.0327 624.2103 624.7424 629.4621 209 5212.5 618.7765 652.9074 647.416 649.6592 647.4137 634.0584 631.0725 632.0022 623.3719 624.4958 627.8934 210 5237.5 618.9615 652.582 647.2526 650.5398 648.5964 633.5985 632.227 631.8234 624.2246 624.4247 628.0643 211 5262.5 619.0009 652.7223 647.438 650.2225 647.9529 634.3578 632.3067 632.3832 624.3578 624.3336 628.4965 212 5287.5 618.9458 652.6265 647.3358 649.77 647.63 633.5897 632.2106 631.3565 624.2521 625.0172 628.9265 213 5312.5 618.8072 652.4581 646.9609 649.2367 647.822 633.8555 632.4647 631.2376 624.2442 624.3329 628.6963 214 5337.5 618.5354 652.092 647.0957 649.1379 647.9707 633.4178 631.472 631.2896 623.2217 624.1828 628.4373 215 5362.5 618.7768 652.0957 647.1739 649.217 648.299 634.1184 631.7813 631.9216 623.8468 624.4162 628.0313 216 5387.5 618.9748 652.3557 646.764 649.8083 647.7198 634.2619 632.4328 631.9969 624.6183 625.6374 628.4755 217 5412.5 619.439 653.6304 648.3546 650.8978 648.4926 634.5473 632.1081 632.3116 624.7527 625.4262 628.7746 218 5437.5 618.7111 653.4909 647.0508 649.2834 646.8163 633.4722 631.3457 631.6077 624.0118 623.8951 627.7093 219 5462.5 618.8112 652.3801 647.352 649.8355 647.8631 633.4998 631.3351 632.343 623.451 624.0698 628.0892 220 5487.5 618.2131 652.578 646.3652 648.7379 647.3924 634.0418 631.1596 631.0277 623.2672 623.6799 627.7253 221 5512.5 618.5854 653.149 646.8393 649.188 646.6886 633.166 631.5174 631.1911 623.5634 623.8946 627.9638 222 5537.5 618.5213 652.0917 645.9027 649.1051 646.8119 634.7473 632.009 631.2304 623.8827 624.8598 627.0569 223 5562.5 619.0323 652.4625 646.7866 650.3601 647.4203 633.6518 632.1259 631.6863 624.6704 624.6772 628.4052 224 5587.5 618.6252 652.0872 646.4479 648.502 646.9255 633.5339 632.1697 630.8464 623.5466 624.4852 627.8798 225 5612.5 618.5673 652.2964 647.0101 649.1359 647.6871 633.5874 630.7991 632.2396 624.2537 624.4285 627.2764 226 5637.5 618.8451 652.1103 647.0721 649.4681 647.1686 634.5487 631.163 631.5285 624.3839 624.7169 629.0421 227 5662.5 618.9258 652.987 647.0167 650.0562 647.9343 634.1851 632.0204 631.5921 623.5986 624.1024 628.602 228 5687.5 619.0433 652.563 647.48 649.4209 647.8456 633.9053 631.3552 631.7692 623.9901 625.3299 628.6911 229 5712.5 618.9093 653.0607 647.2772 649.6101 648.1616 633.8955 631.5607 631.7954 623.5102 624.4634 628.29 230 5737.5 618.2741 651.683 646.157 649.5037 647.0727 633.3031 630.8218 631.244 623.6108 624.7005 627.5605 231 5762.5 619.143 652.87 647.2256 650.0595 647.7934 634.1636 631.401 632.8653 624.1015 624.1788 627.9962 232 5787.5 618.9545 652.5454 646.8469 649.2721 647.5413 634.1884 631.5245 631.0145 624.1489 624.9394 628.0456 233 5812.5 618.8908 652.3326 647.5903 649.397 648.1425 634.0223 631.2271 631.0431 623.513 624.3311 627.9605 234 5837.5 618.9236 652.8054 646.764 648.8079 647.2703 633.6219 631.3885 630.6707 624.1685 624.5713 627.8141 235 5862.5 618.811 651.6793 646.3536 649.6607 647.9751 634.0148 631.5314 631.5793 624.4696 624.8814 627.7988 236 5887.5 619.0164 652.2158 646.9457 649.9317 647.8622 633.8169 630.9459 632.0796 624.1904 624.4197 627.5697 237 5912.5 618.739 652.3353 647.0415 648.8077 646.7442 633.8036 630.6798 631.9432 623.8518 624.3821 627.8839 238 5937.5 618.4722 652.3319 646.5427 648.6056 646.5049 633.3466 630.897 630.6872 624.8246 624.0144 627.0977 239 5962.5 618.4676 651.7141 645.9844 648.5367 646.2891 633.38 631.3219 631.1309 623.3089 624.2309 627.3183 240 5987.5 618.5736 651.9366 646.8758 648.7231 646.8516 634.081 631.132 631.3326 622.8598 624.5669 627.4215 241 6012.5 618.5775 652.3941 646.6208 648.9467 647.336 633.8886 631.0278 631.8381 623.9088 624.679 627.7326 242 6037.5 618.4216 651.8822 645.9825 648.4448 646.5095 633.8057 630.9413 631.1101 623.8846 624.1879 627.4334 243 6062.5 618.093 651.1863 645.6364 648.0116 645.9717 632.7636 630.8826 630.7844 623.6428 623.76 628.4916 244 6087.5 618.011 650.7627 645.1145 647.7979 645.8697 632.7751 630.0246 629.9156 622.7402 623.46 627.2273 245 6112.5 618.393 651.7139 645.8012 648.4682 646.4808 633.1117 630.7762 630.6297 623.1604 623.7627 627.5273 246 6137.5 618.4089 651.1519 645.9778 647.8734 646.1238 633.5215 631.1628 630.885 624.1536 624.0203 627.4756 247 6162.5 618.4086 650.6174 646.2551 647.4361 645.6442 633.1803 630.8233 631.2525 623.3068 623.3222 628.213 248 6187.5 618.4941 650.7368 646.2873 648.206 645.8069 633.2143 631.2049 630.8191 624.0545 624.7554 627.856 249 6212.5 618.3587 651.5488 646.0264 648.4196 646.4547 633.1145 630.4532 630.44 623.2864 624.6866 626.9993 250 6237.5 618.8981 652.0637 646.6025 648.0815 646.7123 632.9885 631.2276 632.0408 623.7819 624.257 628.1196 251 6262.5 618.2112 651.0093 645.9306 647.6297 645.5979 632.6874 630.309 631.9392 622.8574 623.9678 626.5563 252 6287.5 618.3797 650.8463 645.115 648.2466 646.4703 632.9657 631.0185 630.9376 623.6062 624.1074 627.3731 253 6312.5 618.6171 651.0803 646.4533 647.7855 645.7398 632.955 630.9232 631.6985 623.9085 623.8961 627.4292 254 6337.5 618.6821 651.1905 646.9589 648.7276 646.8346 633.3911 631.5856 631.1552 623.1318 624.2305 627.5624 255 6362.5 618.6308 651.8107 646.0864 648.1322 646.6225 633.5327 631.3765 631.0332 623.7023 623.9898 627.2907 256 6387.5 618.6265 651.64 646.1128 648.3339 646.3668 632.7732 630.5766 630.7189 623.9275 625.5066 627.7556 257 6412.5 618.4316 651.2918 647.3736 646.9403 645.4584 633.2405 630.9283 631.7579 623.6013 623.7031 627.3668 258 6437.5 618.8508 651.4332 647.1757 648.4575 646.2968 634.4756 631.2028 631.7496 624.1447 625.1056 628.1667 259 6462.5 618.2028 651.6175 645.2017 648.1664 646.5113 632.6745 631.1601 630.247 622.4272 623.6615 626.9515 260 6487.5 618.1785 650.8184 645.2308 647.8696 645.9977 632.9228 630.7756 630.3073 622.8306 623.8459 626.6337 261 6512.5 618.2871 651.1972 646.1493 648.7333 646.6203 633.7166 630.508 630.387 623.4867 623.6911 626.775 262 6537.5 618.9505 651.377 646.7259 648.7837 646.4083 633.7539 631.0143 631.2833 625.0178 624.33 627.725 263 6562.5 618.5302 650.8953 645.8485 648.3132 646.1617 633.0473 631.371 630.3833 623.5478 623.6528 627.4871 264 6587.5 618.5667 650.5818 645.5633 647.6813 645.8256 632.8955 631.3222 631.2744 624.285 624.4284 627.361 265 6612.5 618.1587 650.5808 646.0063 647.2793 645.5067 633.5839 630.3443 630.6163 622.3919 623.8034 626.9281 266 6637.5 618.4789 651.6074 646.1382 647.9849 646.1978 632.0409 631.0717 630.9895 623.8639 624.0661 627.6185 267 6662.5 618.6895 651.1743 645.0934 648.2558 646.2116 632.8265 630.8822 630.2263 623.8095 623.5925 627.3405 268 6687.5 618.824 652.0283 646.1471 647.5767 645.7159 634.2792 630.9498 631.313 623.7428 624.2681 628.0645 269 6712.5 618.6317 650.9717 645.122 648.1999 645.7176 633.7947 631.0049 631.0455 623.8011 623.8533 626.908 270 6737.5 618.5133 651.2345 645.0101 647.6289 646.5774 632.9885 631.0599 631.5557 624.08 623.6674 626.7621 271 6762.5 618.4715 650.5145 646.1657 646.9936 645.7424 632.5369 630.9801 631.683 623.3716 623.7228 627.4534 272 6787.5 618.512 651.0828 645.5219 646.6826 644.795 633.5704 630.6304 630.1678 624.1887 624.2213 626.3114 273 6812.5 618.6452 651.2954 645.7911 647.2936 645.9355 632.8019 631.0129 630.7956 623.8557 624.532 626.6105 274 6837.5 618.8978 651.0166 646.9765 648.2441 645.8021 633.5285 631.0142 631.1585 623.8816 624.6734 628.1299 275 6862.5 618.3586 650.302 644.2611 648.542 645.8646 632.945 630.6007 630.6872 623.4219 623.7298 626.6519 276 6887.5 618.4875 650.8549 645.873 647.2126 645.4785 632.9577 630.4811 630.9634 623.2826 623.3281 627.4324 277 6912.5 618.6757 650.3267 645.4689 647.3181 645.8541 632.8485 631.4462 631.3532 623.8546 624.0416 628.1203 278 6937.5 618.2115 650.3977 645.1668 646.0031 644.3501 632.735 630.4579 631.429 623.5469 624.2447 626.2979 279 6962.5 618.3628 650.3816 644.6562 646.7577 645.142 633.5831 630.3956 630.2956 622.966 623.9322 626.9581 280 6987.5 618.5374 650.4845 645.6938 647.8725 646.2198 633.1813 631.1056 630.7955 623.8157 623.9563 626.9682 281 7012.5 618.5039 650.4199 645.5979 647.872 645.4787 632.1809 630.9974 630.6805 623.1477 624.2579 627.2339 282 7037.5 618.7678 651.0885 645.2875 647.7262 645.6397 632.7452 631.1904 630.6738 623.8352 624.3883 627.5483 283 7062.5 618.2261 650.6754 645.1489 647.1713 645.9271 632.7379 630.2042 630.4537 622.8319 623.7862 626.3857 284 7087.5 618.5986 650.8426 646.1051 646.8343 645.2433 633.3667 630.7269 630.8247 623.623 623.9239 627.6721 285 7112.5 618.3443 651.1782 645.5043 646.5859 645.2304 633.4053 630.373 631.0143 623.5604 623.8627 627.244 286 7137.5 618.7396 650.7602 645.9398 648.3223 645.9961 633.6356 630.7913 630.7576 623.138 624.1686 627.8974 287 7162.5 619.0795 651.3266 645.8681 647.5622 645.5512 634.4992 631.1298 631.4903 623.7069 624.4254 628.2348 288 7187.5 618.3279 650.2048 644.8116 646.5444 644.5427 632.7031 630.5645 630.8341 623.2977 624.2077 627.2958 289 7212.5 618.2973 650.6044 645.1635 646.8084 644.6295 632.4833 630.0304 630.7622 623.752 624.2131 627.506 290 7237.5 618.859 650.8688 645.9185 647.6035 645.3818 633.1843 631.3684 631.0757 623.5035 624.3592 626.7896 291 7262.5 618.2963 650.1216 645.4653 646.7793 645.3006 633.4122 630.8581 630.724 623.6299 623.5449 626.261 292 7287.5 618.7335 650.2448 646.2549 646.7894 644.9746 632.6145 631.1034 630.9787 623.3859 624.0314 627.3662 293 7312.5 618.4626 650.4178 645.0825 646.8085 645.4283 632.6787 630.565 630.6512 623.6212 623.6796 626.8558 294 7337.5 618.6748 649.8851 645.3406 647.0794 644.8849 632.8644 631.1923 631.0913 623.8293 625.2235 627.555 295 7362.5 618.2332 648.9786 644.2948 646.8331 644.4878 633.0242 629.955 630.1825 623.5684 623.5459 626.9243 296 7387.5 618.4091 650.1669 644.8131 646.3815 644.3585 632.6979 630.2908 629.8451 623.6335 624.1096 626.9751 297 7412.5 618.4987 650.7976 644.4052 646.1215 644.2645 633.0426 630.2771 630.1676 623.1582 623.245 627.1333 298 7437.5 618.4745 650.0045 644.9857 646.6431 645.1226 632.4447 630.7097 630.8005 622.7082 623.7896 627.8796 299 7462.5 618.7395 650.1959 645.7572 646.3635 644.1454 632.8343 630.291 630.8889 623.9828 623.597 626.7385 300 7487.5 618.7043 650.5124 645.9306 647.0941 645.1926 633.5556 630.5643 630.8277 623.6326 624.196 627.044 301 7512.5 619.0649 650.7897 645.3888 647.4891 645.7593 633.4466 631.4775 630.8706 624.4205 624.4989 627.328 302 7537.5 618.5242 650.8239 645.1647 646.4609 644.4305 632.8471 631.4025 630.4154 623.3036 623.7999 627.6354 303 7562.5 618.4352 650.4218 645.1364 646.7873 645.2778 632.3872 630.5753 630.4445 623.0562 624.3973 626.6241 304 7587.5 618.4319 649.9424 644.7924 646.772 645.3847 633.2489 630.5264 630.5124 622.8138 624.1622 627.0306 305 7612.5 618.4504 650.152 645.4666 646.6695 644.4017 632.5882 630.2798 630.1396 623.9076 623.6975 627.1513 306 7637.5 618.3089 650.7886 644.414 647.0801 645.5416 632.6067 630.0021 630.5348 623.5274 623.8616 626.629 307 7662.5 618.6721 650.4858 645.5833 646.2829 644.9763 633.039 630.5968 630.9221 623.7247 624.3492 626.4539 308 7687.5 618.3895 650.6616 644.9204 646.2094 644.0641 632.6111 630.8601 630.9184 623.2283 623.6508 626.9643 309 7712.5 619.0706 651.3523 645.5729 646.3172 644.9272 633.3322 630.9194 630.8345 623.827 624.3725 627.6029 310 7737.5 618.3439 649.728 645.1075 646.5331 644.3375 632.4734 630.283 629.9038 623.1606 623.4886 627.0319 311 7762.5 618.9544 650.4042 645.4888 646.8574 645.0176 633.0137 631.1789 630.9664 623.8693 624.6666 627.8776 312 7787.5 618.1977 649.8716 644.6861 645.3126 644.4462 631.8128 630.6642 629.7904 623.3611 623.0477 626.8131 313 7812.5 618.1473 649.7101 643.9476 645.6463 644.142 632.8172 630.344 630.3991 622.835 622.895 626.627 314 7837.5 618.862 650.1162 644.3231 647.1564 645.4676 632.6408 631.6468 630.9702 623.8692 623.9261 626.4991 315 7862.5 618.4185 649.6833 645.1225 646.0168 644.3929 632.5783 630.7495 630.5514 622.6778 624.2944 626.9491 316 7887.5 618.3152 649.4984 644.7363 645.7152 644.3641 632.9649 629.771 630.5899 623.5342 624.5155 626.9422 317 7912.5 618.2918 649.4324 644.4608 647.1509 644.4112 633.0677 629.9365 629.7813 623.2294 622.9217 626.7955 318 7937.5 618.4807 650.8046 645.1009 645.7318 644.1698 632.419 630.4016 630.3552 624.068 623.7154 626.8045 319 7962.5 618.2901 649.5621 644.9576 645.4758 644.1941 632.5734 630.2385 629.8744 623.7524 623.7316 626.7106 320 7987.5 618.364 649.5074 643.8624 646.2436 644.4735 632.7982 630.3678 630.9499 623.7981 624.6382 626.9544 321 8012.5 618.8354 650.1159 645.4016 647.2214 645.6133 632.8678 630.6392 630.8544 623.2534 624.1601 626.9893 322 8037.5 618.6215 649.928 644.6645 645.8989 643.6923 633.2473 630.9215 630.2488 623.0786 624.1518 626.9227 323 8062.5 618.9782 650.3453 644.4418 646.5901 644.9668 633.245 630.9773 630.8305 624.1211 624.282 627.9896 324 8087.5 618.0019 648.5852 644.6289 645.2636 643.9906 632.0613 630.5516 629.8139 622.9913 623.1181 626.175 325 8112.5 618.2685 649.8962 645.0746 646.1486 643.9857 631.9631 631.4108 630.1446 623.9561 623.8182 626.7123 326 8137.5 618.2903 649.6125 644.0575 644.9006 643.1916 631.7471 630.3981 629.9039 623.3989 623.994 627.0982 327 8162.5 618.5256 649.6985 644.9219 646.5496 644.2649 632.31 630.8333 630.2354 623.1154 623.4292 626.9045 328 8187.5 618.6263 649.13 644.3508 645.7005 643.6443 631.9247 630.8098 630.7128 624.0049 623.5818 627.8002 329 8212.5 618.565 651.0052 644.5112 645.7753 644.4512 632.8293 630.2637 630.5492 623.4625 623.5892 626.6317 330 8237.5 618.3657 649.8342 644.8589 645.9157 643.9619 633.1589 630.0736 630.5122 622.9489 623.7293 627.0794 331 8262.5 618.3354 649.5816 643.9188 645.6946 643.9122 632.5254 630.2308 630.3366 623.895 623.3626 626.2339 332 8287.5 618.489 650.1469 644.9995 646.0249 643.9678 632.7201 630.8447 630.2843 623.1595 623.6675 627.4992 333 8312.5 618.4103 649.2427 643.7491 645.1183 643.585 632.7074 630.0562 630.8584 623.9041 624.5068 627.1074 334 8337.5 618.775 649.9411 645.0695 646.5077 644.542 633.5217 630.9005 631.5416 623.9577 623.5901 627.5317 335 8362.5 618.6649 649.6666 645.0583 647.0481 644.9319 632.4419 630.9266 630.2653 624.0309 624.2949 627.7756 336 8387.5 618.5972 650.51 644.6694 645.3269 643.7284 632.702 630.8527 630.397 623.556 623.7085 626.4101 337 8412.5 618.2018 649.096 644.4631 645.6769 643.7876 632.2838 630.0569 629.9414 623.4953 623.3194 626.4029 338 8437.5 618.5864 650.9217 644.8315 645.6452 643.2927 632.428 630.5806 630.6544 624.0461 623.5732 627.5798 339 8462.5 618.0637 648.3583 644.2733 645.4712 643.3705 632.1783 630.677 630.224 622.2156 623.6408 625.8602 340 8487.5 618.3464 650.4567 644.1441 646.0295 643.9643 631.7144 630.3444 631.081 623.825 624.0898 627.4506 341 8512.5 618.6875 649.3386 645.7851 646.3093 643.873 632.8727 630.5329 630.3559 623.0921 623.7292 627.4044 342 8537.5 618.226 648.528 644.6844 645.4502 643.583 631.4378 629.3295 630.3208 623.9322 623.3319 625.8648 343 8562.5 618.3907 649.3694 643.7564 645.5451 643.8227 632.4523 630.2999 629.6681 623.0829 623.3485 625.5932 344 8587.5 618.6668 649.3559 644.3465 645.8364 643.7917 632.6209 630.7156 630.521 623.3915 624.6564 626.7504 345 8612.5 618.4699 649.3436 644.804 646.1493 644.0891 633.1641 630.395 630.5449 623.3689 623.4048 626.454 346 8637.5 618.5243 648.9337 644.5682 645.1041 643.015 633.1662 630.1375 629.7279 622.9173 623.6332 627.0146 347 8662.5 618.2064 648.5491 643.8893 645.4478 643.2366 632.1683 630.1043 629.596 622.4739 624.0199 626.508 348 8687.5 618.4275 649.0491 644.4008 645.3 643.4925 632.7113 630.6609 630.2512 623.1657 623.8705 626.3277 349 8712.5 618.5667 650.2475 643.9579 645.3668 643.8265 633.2668 631.1766 630.0999 623.5194 623.6311 626.7078 350 8737.5 618.5017 649.0488 644.7107 645.0626 644.102 631.778 630.7302 630.2522 623.2993 624.086 626.9363 351 8762.5 618.4543 648.3395 644.4734 645.1038 642.6601 632.8849 630.4172 630.5424 623.0777 623.0308 627.3421 352 8787.5 618.8428 649.9798 644.0939 645.4286 643.7637 632.8143 630.5114 630.8691 624.4572 623.7593 627.2918 353 8812.5 618.4342 649.3737 644.246 645.4604 642.9952 632.1868 630.2811 629.83 623.5446 624.1014 626.0372 354 8837.5 618.7335 649.1455 645.1523 645.6904 643.6562 632.4996 630.3286 630.1065 623.2224 624.2629 627.9404 355 8862.5 618.8819 649.1021 644.8431 645.9183 643.7847 632.6263 630.8952 630.333 623.681 624.6149 627.1219 356 8887.5 618.7337 648.9698 645.845 645.7788 644.5348 632.7433 629.8293 630.2963 623.6382 624.24 626.7588 357 8912.5 618.5966 649.8865 644.7819 645.2094 643.2773 631.7107 630.7093 630.3165 623.5106 623.9584 626.8737 358 8937.5 618.2953 648.5237 643.2282 644.8661 642.9077 632.307 630.0069 629.4535 623.3817 624.2337 626.9178 359 8962.5 618.4522 648.8473 643.9328 644.5106 642.81 632.3212 630.2171 630.0487 624.0746 623.7379 625.8119 360 8987.5 618.1987 649.5801 644.1971 644.4061 642.3501 631.8334 630.0769 629.7371 623.4826 623.7143 625.7715 361 9012.5 618.6604 648.8025 644.5723 644.9618 643.9131 633.0034 631.1027 630.4953 623.3564 624.161 626.3954 362 9037.5 618.4766 648.3843 643.907 645.296 642.8569 632.3137 629.6657 629.796 622.7613 623.72 626.1989 363 9062.5 617.9552 647.5717 643.8892 644.3449 642.853 632.3802 629.3721 629.4203 622.4843 623.1292 626.0959 364 9087.5 618.0586 648.6588 643.6195 644.3138 642.8918 632.3866 630.0131 629.6002 623.2503 624.1777 625.8431 365 9112.5 618.2538 648.8638 644.0906 644.6384 643.2183 632.2125 629.9573 630.113 623.1203 623.9502 625.9283 366 9137.5 618.3348 649.3889 643.5032 644.5769 642.7786 633.0311 630.1023 630.3598 622.764 623.3851 626.3935 367 9162.5 618.2285 648.3715 643.6462 643.8999 643.0072 631.9955 630.0289 630.4701 622.5838 624.1422 626.8893 368 9187.5 618.3764 648.4641 644.1332 644.8917 642.5606 631.5427 630.1089 629.5101 623.274 623.6395 626.2112 369 9212.5 618.4925 648.7271 644.0112 644.3022 642.6092 632.4091 630.4191 630.0229 623.7411 624.1999 626.5547 370 9237.5 617.9371 648.4581 643.4852 644.2721 642.51 631.1778 630.1696 628.9105 622.242 623.0448 626.217 371 9262.5 618.0937 649.1332 644.2218 644.5459 642.7458 631.5524 630.086 629.4436 622.7374 623.4835 626.7291 372 9287.5 617.9835 648.7028 643.4191 644.2373 642.3337 631.7539 629.1179 630.0249 622.7879 622.9156 625.9305 373 9312.5 618.6342 649.1277 643.5998 644.5541 643.1012 632.5915 629.2926 630.784 623.463 623.4691 626.2484 374 9337.5 617.7979 648.6783 643.8123 644.7457 642.3512 631.7824 629.779 628.7636 622.6756 623.4227 625.9601 375 9362.5 618.1734 647.8283 644.1797 644.8148 642.7339 631.7168 630.1532 629.9965 622.914 623.3928 626.1645 376 9387.5 618.7312 649.4527 644.3163 644.5857 643.1813 632.8515 630.3759 629.651 623.5558 624.5426 626.368 377 9412.5 618.0063 648.1059 643.4379 643.7064 642.2692 631.7398 629.3768 629.355 622.5813 623.0003 626.8682 378 9437.5 618.5759 648.2842 643.6792 644.917 643.1413 632.4457 630.8254 630.3641 623.4134 623.9466 626.8719 379 9462.5 618.3521 648.3616 644.0281 644.0362 642.2481 631.0841 630.0707 629.6455 622.8475 623.1573 626.0066 380 9487.5 618.157 647.7067 643.0498 644.0736 642.7409 631.2069 629.6264 629.6725 622.9569 622.7905 625.7662 381 9512.5 617.9725 648.8606 643.9327 644.3573 642.2167 631.1654 628.9784 629.2826 623.0738 623.7036 625.817 382 9537.5 618.7489 648.3842 644.2837 645.1919 643.1051 632.1657 631.0128 630.867 623.4777 623.5722 627.5416 383 9562.5 617.8265 646.8775 643.2375 644.2315 642.5984 631.5545 629.6814 629.7804 623.1103 622.591 625.656 384 9587.5 618.4409 648.4052 643.6649 643.5457 642.6738 632.0575 629.8965 629.6938 622.908 623.8966 626.826 385 9612.5 619.0416 649.5977 643.6657 644.7118 643.0229 632.8607 631.5474 630.3487 624.4663 623.8067 627.0891 386 9637.5 618.3461 648.2963 642.905 644.1572 642.5398 631.8768 629.5719 630.1785 622.8795 623.7967 626.035 387 9662.5 618.3498 648.3428 643.8818 644.5497 642.6002 631.9827 629.6941 629.5495 623.1228 622.9446 626.1307 388 9687.5 618.1572 648.089 643.369 644.2119 642.1496 631.1705 629.9102 629.8906 622.5808 623.9903 626.354 389 9712.5 618.3647 648.6657 644.0529 644.183 642.9127 632.0826 629.4771 630.1224 623.7083 623.5812 626.0413 390 9737.5 618.5168 648.5977 643.9052 645.2474 643.2351 631.804 630.3215 629.216 623.5666 624.1424 626.3207 391 9762.5 618.0275 648.2418 643.6906 643.7616 642.0941 631.4929 629.4654 629.28 622.3514 623.2917 625.2658 392 9787.5 618.2667 647.3649 643.4853 644.7407 643.2198 632.41 629.6619 630.2525 623.24 623.6723 626.3983 393 9812.5 618.3468 648.5295 644.1379 643.5131 642.0088 632.3274 628.9454 630.5922 623.2969 624.2035 625.8965 394 9837.5 618.4358 649.0129 643.9139 644.2854 642.763 631.7873 629.3355 629.2727 623.1277 623.571 625.6473 395 9862.5 618.1271 647.406 643.7066 642.863 642.0241 632.1009 629.909 629.0679 622.7357 623.1508 626.1223 396 9887.5 617.9229 647.6817 642.7045 643.7678 642.1103 631.4221 628.8112 628.9842 622.3607 622.786 626.744 397 9912.5 618.6469 648.8355 643.3469 644.6775 643.2816 631.9663 630.1039 629.7807 623.4054 624.1391 626.7914 398 9937.5 617.7221 646.9633 642.0548 643.6641 642.0682 631.4566 629.7685 628.4507 622.8747 623.107 626.0303 399 9962.5 618.2958 648.0546 642.8984 644.578 642.1019 631.8155 629.3374 628.9635 623.1402 622.9276 625.9091 400 9987.5 618.0993 647.0377 642.5144 644.368 642.0623 631.8915 629.7869 629.003 622.5751 623.6693 625.8073 401 10012.5 618.1272 647.2462 642.4072 644.4166 641.7524 631.1989 629.263 629.419 622.7052 623.7405 625.7961 402 10037.5 618.4154 647.6229 643.4176 644.0985 643.03 631.8913 629.5855 630.1053 623.6872 623.5963 626.1135 403 10062.5 618.5835 648.9072 644.0621 644.5265 642.5212 633.0004 630.1475 629.7055 622.766 624.4352 625.7347 404 10087.5 618.0216 647.6785 642.9856 643.8038 642.4717 631.2042 629.8713 629.7814 622.5835 623.238 626.5525 405 10112.5 618.2201 647.6977 643.0132 644.4261 642.548 631.6777 629.1582 629.689 623.3602 623.2825 625.6037 406 10137.5 618.2251 647.7091 642.9163 643.212 641.877 631.297 629.2489 629.2729 623.52 623.3742 626.0713 407 10162.5 618.3507 648.5285 643.1431 643.9415 642.2181 631.7845 630.0111 630.0578 622.9511 623.6954 625.783 408 10187.5 618.1182 647.6229 642.7609 642.9742 641.2529 631.3859 629.7497 629.6435 623.1985 624.1358 625.8631 409 10212.5 617.9782 647.1648 643.171 643.8479 641.2091 631.2998 628.9912 629.2563 623.5774 623.1912 625.8413 410 10237.5 617.6732 647.3444 642.3775 642.8249 641.534 630.1176 629.1761 628.7322 622.1397 622.1891 625.0741 411 10262.5 618.1717 647.6369 643.7163 644.1544 642.0439 631.5784 628.9904 629.7674 622.5278 623.7442 626.3526 412 10287.5 618.6397 647.2312 643.4321 644.5102 642.5814 632.5985 629.7049 629.5013 624.2129 623.7569 626.4116 413 10312.5 618.3368 648.2129 643.3842 643.9775 642.4043 631.991 630.7045 628.701 623.5927 624.3752 626.6277 414 10337.5 618.0742 647.8487 643.1686 643.7808 642.3062 631.3772 629.9242 629.0876 623.2576 623.6289 625.4375 415 10362.5 617.7497 647.013 641.7815 643.2179 641.4163 630.8726 629.2208 629.2222 622.5629 622.5882 625.5398 416 10387.5 618.4549 647.6987 643.0436 643.9575 642.4734 631.6887 629.8469 629.3193 623.7414 623.7411 625.8074 417 10412.5 618.2528 647.8874 643.2693 643.1774 642.4411 631.8712 629.4941 629.2646 623.7571 624.1682 625.9747 418 10437.5 618.131 647.8884 642.6342 643.031 641.5251 631.7709 629.5714 629.9105 623.192 623.973 625.5489 419 10462.5 618.3785 647.001 643.1022 643.6928 642.6541 631.5855 629.5517 629.547 622.94 624.0887 626.5947 420 10487.5 617.9627 647.6881 643.2336 642.816 641.5233 631.1578 628.8576 629.158 622.453 622.9434 625.2868 421 10512.5 618.729 647.9235 643.2214 643.8734 642.0082 631.4508 630.1806 629.7437 623.5755 623.8893 626.5532 422 10537.5 617.5241 646.6207 642.648 642.2259 641.1804 630.6105 628.5996 628.8971 621.6964 622.9705 624.9856 423 10562.5 618.4024 648.6393 642.9753 643.8009 642.6087 632.1996 629.7879 629.7999 623.0841 623.8905 626.3713 424 10587.5 618.0944 647.4147 642.4924 642.7857 641.5279 632.8795 630.2167 629.3568 622.8895 623.4556 625.6895 425 10612.5 618.331 647.5699 642.7685 644.1743 641.4024 631.2979 629.5014 628.7688 623.1797 622.6307 626.3287 426 10637.5 618.2501 648.0763 643.1176 643.3727 641.4119 631.1455 629.911 628.9497 623.8503 623.3407 625.3356 427 10662.5 618.2344 646.9236 643.4489 643.5155 641.561 631.5596 629.6456 629.5593 623.126 623.1235 625.3179 428 10687.5 617.8233 646.361 642.6951 643.7252 641.8952 631.7112 628.3871 628.7961 623.5494 623.0217 626.3651 429 10712.5 617.9861 647.0634 643.0438 642.6142 640.8954 632.15 629.1689 629.1661 622.4247 622.9066 625.6096 430 10737.5 618.3168 647.869 643.2796 644.0039 642.3156 631.5433 629.3045 629.8434 624.0207 624.1194 625.8442 431 10762.5 618.2615 648.145 643.1503 644.2762 642.096 631.206 629.7108 629.457 623.3217 623.8368 626.7375 Luminescence/inst/extdata/Daybreak_TestFile.DAT0000644000176200001440000015422013231137116021150 0ustar liggesusers_ c=@??Z# Gy/min u-2/secnone  Sr-90 Am-241b s@A@B@H@A@H@t@&@@ج@t@@.@H@P@z@Щ@@@^@@@@l@@J@@ @@@@@@@@@Ф@@@@ڣ@@@@@Ң@@@@2@@@v@@L@r@@@@4@@F@T@ȟ@0@@Ȟ@@@@@؜@H@h@4@@l@@@8@@@@@T@h@0@@ؚ@ܙ@Ԙ@L@p@@@x@_ c=@??Z# Gy/min u-2/secnone  Sr-90 Am-241b t@H@F@D@G@r@.@"@@@@@@@@f@@@0@@L@D@@Ψ@t@ @8@2@@@r@,@@@@r@@Ԥ@"@t@@@N@@(@@@@Ң@@.@&@@@@@@@ @@@@@@J@ȟ@`@D@H@L@@@@$@x@̝@@@8@t@d@\@ܛ@@@X@X@\@ě@@0@p@x@@ @_ c=@??Z# Gy/min u-2/secnone  Sr-90 Am-241b pt@A@A@D@C@@ @@.@J@@֪@@@ة@:@̨@@@p@@@ܦ@@@Ȧ@<@ @@@@x@@@@@t@&@\@@Ƣ@*@̡@H@.@@:@ơ@@j@@@@@@@J@@ğ@@(@@@@@@D@D@@@ܜ@@@@@|@@@H@4@@l@@ @|@H@@@x@@@X@,@@Ę@_ c=@??Z# Gy/min u-2/secnone  Sr-90 Am-241b q@A@G@A@E@֨@-@\@@X@D@x@ڬ@$@̫@@|@@@2@ک@h@0@$@@ا@@@@@@`@F@@@@@@8@h@J@V@(@أ@p@@@j@@@@P@@@@@v@b@P@@@@@F@@P@@@4@@@p@@@,@ܝ@\@@؝@@P@@@@H@@@t@x@@`@@x@ę@l@@_ c=@??Z# Gy/min u-2/secnone  Sr-90 Am-241b pp@@@A@G@?@t@@@d@@&@@@@@R@:@N@@@@ @̦@b@@L@v@d@ڤ@T@@X@t@@@Σ@F@@F@D@@ޡ@.@@@ܡ@r@@@@d@ @@@@@@ @T@$@@@ @@,@@@d@@8@@@D@ @@d@̚@@@@ @l@@\@@@4@x@(@ @@x@@@ @@_ c=@??Z# Gy/min u-2/secnone  Sr-90 Am-241b r@=@B@@@A@@@@n@L@<@Ȩ@.@,@@@@p@@D@@<@@<@T@ @4@@.@@n@@@Ƣ@L@@@@@^@@@ @T@^@@p@@@@@ܞ@$@l@d@@4@H@@@|@@@@@$@\@@T@@@@`@@ؘ@Ę@t@@@@X@H@@ @@@З@Ė@ė@@@@@@P@ @_ c=@??Z# Gy/min u-2/secnone  Sr-90 Am-241b s@@@@@<@F@@6@@h@@@@ҩ@v@$@<@@ @v@|@ڦ@@@4@@>@@@@6@@d@@@@8@Ģ@x@@@4@(@@@h@0@Ԡ@Ơ@ @p@ؠ@@(@@@@@@@L@@@@@|@,@@@t@@D@@@@@X@@@@H@d@P@(@@И@ @@@@@(@@d@0@0@_ c=@??Z# Gy/min u-2/secnone  Sr-90 Am-241b s@@@<@D@B@ @@ܪ@R@@:@L@r@@@@@@x@ʦ@ @@2@@@Z@@X@@ܣ@@@ң@@f@@@D@,@@@@@t@:@@ܝ@ʠ@r@@@@@*@@@,@ȝ@L@@8@@@Ĝ@\@@H@ě@@@h@d@@p@T@@ԙ@@@p@p@@\@@Ę@@@4@ @@x@ @@X@0@@_ c=@??Z# Gy/min u-2/secnone  Sr-90 Am-241b s@B@B@D@G@@ @@t@B@@@H@@$@ @L@@d@6@Ĩ@@@@@@@֦@&@d@@@P@B@ @t@@f@,@@@@@@@8@@|@@@@ @@^@$@@ @0@|@Ġ@D@@Ƞ@@0@@\@`@|@@D@@ܝ@T@\@@М@X@d@ @@@̚@ܛ@@Ԛ@H@К@@ԙ@@ؙ@@l@t@_  c=@??Z# Gy/min u-2/secnone  Sr-90 Am-241b q@D@@@A@C@@ګ@«@@P@B@@X@>@v@*@̧@x@@^@0@&@@@n@Ф@>@Ф@@֣@У@@@@ @|@<@@@@@t@@ء@@ @J@@j@@@0@@P@ @h@<@@@@T@؝@@@@$@T@@p@@@@`@@p@@@@@ @$@@@|@8@@@ @4@̖@@@,@@`@@_  c=@??I@Z# Gy/min u-2/secnone  Sr-90 Am-241b x@E@H@N@E@@d@S@@^@@@T@R@@@@L@@#@@P@R@ȭ@@|@V@2@B@^@@@n@Ы@@@@ʩ@$@b@@@*@Χ@&@6@@@@&@v@@f@@@@֤@ԥ@@t@@x@p@ң@D@2@@@ģ@@@"@¢@f@R@B@J@@@@@@@X@@@@@ʠ@@@@ @@ @,@_  c=@??I@Z# Gy/min u-2/secnone  Sr-90 Am-241b `y@H@G@I@G@@Q@ȳ@@B@@˲@h@@ޱ@o@b@@@g@@@w@ʮ@X@@@b@.@@@\@@~@Ϋ@h@@ث@^@(@`@@:@@F@@@"@@T@@@@B@4@@¦@6@@v@6@ȥ@>@@*@f@@@h@l@@`@X@@@n@@ܢ@¢@@@B@@@¡@@r@@@@,@ڠ@@@@ܠ@:@@@_  c=@??I@Z# Gy/min u-2/secnone  Sr-90 Am-241b pu@F@J@F@F@@@@@W@@@@@#@[@B@ @@@@&@@ȭ@t@4@l@@ī@@H@֪@@@@z@@@p@@`@@ @4@&@P@@@"@Х@ƥ@@@@@Ĥ@@@ܤ@@@@V@@h@΢@̢@¢@@@P@ʡ@@D@@@:@@N@@Р@p@b@Ơ@D@@(@@@Ԟ@|@@@@@ܞ@_  c=@??I@Z# Gy/min u-2/secnone  Sr-90 Am-241b x@K@I@K@J@x@@@@@ϲ@{@'@@@ @ް@@E@G@n@@`@~@@@@@L@Ҭ@v@`@@$@@@l@@@@@@@l@B@h@@@F@ @@@@t@@`@ȥ@إ@@@@ؤ@¤@@ڣ@@Σ@@*@L@@D@@X@@@.@:@ڡ@n@^@@@@F@@ @Z@@Ҡ@@@d@,@p@@_ c=@??I@Z# Gy/min u-2/secnone  Sr-90 Am-241b y@D@I@F@G@\@@ײ@@@ű@S@ܰ@@b@_@@f@Į@t@<@(@@:@@@@$@@@@v@@@|@v@j@@J@6@@@Ч@@,@@Ʀ@@0@@¥@@@J@@Ҥ@|@\@0@@\@@@,@R@0@@̢@@ @z@P@6@@@@0@֡@Ġ@@@^@r@@@@X@H@d@؞@@@̞@t@\@ @_ c=@??Y@Z# Gy/min u-2/secnone  Sr-90 Am-241b P@L@F@F@L@@@@&@ @@V@@ٲ@@@@3@@@@m@@!@~@@@@@@@b@@@ȫ@@ @Ϊ@v@p@@z@@ީ@@@ʧ@@ʧ@@@ڦ@4@x@(@@2@ĥ@@@@Ĥ@@@¤@6@@Z@@r@x@&@ @L@@@4@j@@T@@@ڡ@@@V@@@~@@Ơ@h@@H@6@_ c=@??Y@Z# Gy/min u-2/secnone  Sr-90 Am-241b `|@K@K@P@L@n@A@@@4@Y@@R@@i@@v@A@@`@@@@v@ձ@@@@:@گ@@b@@@b@@@@6@@x@*@@@|@Ϋ@@@@@@b@@@T@@@@*@@@Z@"@F@@@<@@r@@@@Τ@@@0@@@(@֣@@@@@B@ @@ @\@@T@N@@@|@@_ c=@??Y@Z# Gy/min u-2/secnone  Sr-90 Am-241b e@J@M@H@O@1@@E@@@͵@@0@@@(@8@=@@w@@@@)@Y@@j@'@@@@@@ί@9@@@ޮ@<@@@@ @`@,@^@@l@@Z@@@@@ܩ@@@P@@@@P@|@@6@@8@@@@ڥ@@@ƥ@*@@(@@@@Ԥ@l@8@@R@@ @@ @ʣ@@X@Ģ@¢@@@_ c=@??Y@Z# Gy/min u-2/secnone  Sr-90 Am-241b 0v@J@K@J@L@V@@@ݵ@@J@@}@@@|@Y@@~@b@@@l@h@@@@@@%@@@@@@B@@ޭ@ڬ@@8@,@\@@@F@F@@x@@@@h@@@@Ҩ@@L@@J@ @@Ħ@@@ئ@@@j@@:@@@@@b@f@@@@@v@D@@@̣@@6@ڢ@V@@t@¢@@@_ c=@??Y@Z# Gy/min u-2/secnone  Sr-90 Am-241b ~@Q@L@N@K@@@ö@b@@@@e@´@@@R@@@g@@@w@x@ڱ@@˰@w@{@@@z@̮@<@J@@Ҭ@x@@@@ȫ@֪@z@@b@ܪ@P@V@@\@@@@*@Ƨ@"@@b@x@̦@@@2@$@@@إ@0@@8@^@@ʤ@>@@ @L@x@@Z@4@Т@8@\@r@>@,@@@@@@@@_ c=@??@i@Z# Gy/min u-2/secnone  Sr-90 Am-241b P@@S@S@@T@@R@¶@@@C@@@t@+@]@@@w@@T@@_@@۶@¶@@͵@C@}@-@Ӵ@@@@@d@(@W@@@@W@;@@@@X@@h@@@[@!@ʯ@@ԯ@@@@@ @@@Ь@@@l@֫@r@@2@ƪ@Ҫ@l@Ī@&@@@J@D@<@f@@b@6@@@@@@ @f@N@.@@>@@_ c=@??@i@Z# Gy/min u-2/secnone  Sr-90 Am-241b @Q@S@Q@R@@2@@ϻ@@g@@@@@ܸ@]@$@@@@Ѷ@^@J@ϵ@h@@@F@ϴ@´@@@@Y@d@>@Բ@@|@@@@(@@S@@y@ɰ@@-@_@<@@@>@ޮ@@@@@@`@*@,@h@@@z@t@@V@@b@@@T@@@@R@v@@0@@,@X@h@ا@2@>@@H@@@_ c=@??@i@Z# Gy/min u-2/secnone  Sr-90 Am-241b @R@Q@Q@@T@S@@ݻ@6@d@@B@ڹ@@O@@j@@@N@@@$@j@1@@[@@@@@@ó@@@@@@6@m@W@>@y@@@ @ٰ@ް@@G@X@0@ @@F@@Ү@@>@@8@@@ު@@2@B@n@f@@@.@@,@@@@J@@@^@@@4@@@@J@@F@@@ʦ@r@@@@@_ c=@??@i@Z# Gy/min u-2/secnone  Sr-90 Am-241b Ȁ@O@@Q@O@Q@0@@d@Y@@'@@ѷ@G@@@B@]@@_@@@@\@J@@@:@ @@g@W@@/@@@0@X@)@°@/@@@f@@Я@:@.@@@ԭ@@̭@p@n@@ެ@@ʬ@@@ī@0@@֪@@f@@@Ʃ@b@L@@ި@@$@$@֧@@@֧@>@@l@\@@~@Z@0@*@ @@@@@_ c=@??@i@Z# Gy/min u-2/secnone  Sr-90 Am-241b @T@T@S@Q@d@2@@׺@ @3@@p@@@V@@@@ڶ@@b@5@@@@@n@D@@@@/@@@@q@p@I@@U@@ɰ@@@@@Я@@@@@V@L@@@@@@ޭ@@@@@Ȭ@@֫@@J@.@ @ڪ@"@4@P@X@\@T@@@L@x@ڧ@@@<@@T@<@ڦ@@@@@@_ c=@??@y@Z# Gy/min u-2/secnone  Sr-90 Am-241b @\@]@@]@^@n@u@L@@ @@@ @@E@1@@G@e@@@Q@@@@@/@@@x@@g@=@ѽ@޼@@@)@Ļ@g@e@غ@@@@@@@Ը@V@^@@ڷ@v@L@ @@@ֶ@>@_@ߵ@@@D@@@z@T@>@@@@y@_@(@;@6@v@@@y@@D@@@߱@@@Q@@@@@E@_ c=@??@y@Z# Gy/min u-2/secnone  Sr-90 Am-241b X@[@@Z@W@Z@M@@@@z@q@@}@@@@|@;@I@(@¿@@5@ν@m@@@@`@׻@@9@@I@@r@@@T@@&@(@@@J@+@@߶@@@@ǵ@o@U@@@\@T@@@@@9@@A@[@?@}@@@@ @@̱@ϱ@@@v@@@@ڰ@ɰ@I@R@@@@ޯ@@|@®@®@:@&@_ c=@??@y@Z# Gy/min u-2/secnone  Sr-90 Am-241b H@W@U@@X@@V@W@P@/@@X@P@@7@T@@@@H@@t@@@>@4@ߺ@]@u@@A@@@<@@9@`@\@?@@g@B@ص@̵@@e@U@@ƴ@o@@@@@_@;@Dz@@c@ܲ@@@@@@?@ڱ@@@\@߰@@@@@;@@0@|@@¯@®@@@@@@ʭ@@~@.@2@@@@(@@_ c=@??@y@Z# Gy/min u-2/secnone  Sr-90 Am-241b @@[@Z@@[@Z@@v@*@}@F@@@L@@|@J@@@@g@@@ھ@@@B@@i@@@x@@r@'@@@@@ù@1@@@@0@[@@@@@J@@A@?@F@~@ @@ߴ@@@G@W@ @@b@@@@@@g@@"@@@@@@)@԰@@v@ڰ@G@1@@̯@ @5@|@®@X@v@l@@6@_ c=@??@y@Z# Gy/min u-2/secnone  Sr-90 Am-241b @Y@\@\@@\@@@|@B@@M@j@@U@(@@@S@@@b@@@@p@@;@@@6@@Ž@ҽ@,@@@m@@»@@@ں@@H@@ǹ@K@U@"@@n@ݷ@@5@ @@,@@@Ķ@@@@@@@[@Ҵ@޴@S@@ҳ@ݳ@@@7@@@@˲@@|@8@@@̱@@R@"@Ͱ@@@2@@@_ c=@??@Z# Gy/min u-2/secnone  Sr-90 Am-241b @\@]@@^@_@.@E@@@d@@A@$@@]@@@p@)@@@^@@@@s@/@@@T@@@@@@,@@@T@/@Ӽ@}@@̻@@T@@@@@@@@@@@@Y@@@2@@ж@@g@@A@ @M@@@D@H@|@@@@4@˳@س@@K@R@@@@@@j@i@9@@@@@_  c=@??@Z# Gy/min u-2/secnone  Sr-90 Am-241b @\@[@@[@X@l@Q@@@d@@M@@@}@&@@@s@@@^@R@@@q@@@@D@@@@r@@μ@ż@6@b@I@ں@@@}@_@*@@@߸@ɸ@@@@׷@*@2@@^@z@8@@@@w@δ@@@Ŵ@K@@$@@@@@B@@@Բ@k@L@0@K@Ա@@α@@@Q@@@@װ@ǰ@@_ ! c=@??@Z# Gy/min u-2/secnone  Sr-90 Am-241b @\@_@`@@^@@@T@@@j@@q@*@@w@&@@@q@@@a@H@@@G@b@3@B@T@E@@e@2@@@c@j@ @@]@@}@7@@@@@@\@@@@@@C@@m@)@@͵@͵@@d@`@P@#@̴@ô@r@1@Z@o@@h@@J@@t@@@@@@@@@u@@;@$@@@ư@_!" c=@??@Z# Gy/min u-2/secnone  Sr-90 Am-241b @a@]@\@`@N@@@o@Y@@@@@/@@@@@@@p@2@@@I@2@@@w@A@4@@S@ξ@~@@˽@@@a@@@\@ @W@@@@@$@@ @˹@&@@x@@3@'@@ܷ@l@@ж@@5@\@4@#@@@}@ܴ@K@@@G@@2@̳@@@@s@l@IJ@@@ @y@@@:@B@_"# c=@??@Z# Gy/min u-2/secnone  Sr-90 Am-241b @@Z@]@@_@]@$@w@@@z@"@@@@@@ @@@i@@@@*@@@@9@@@i@@@ӽ@f@G@E@ @m@f@ @k@b@@>@@;@@@k@˸@ȸ@U@@@\@@w@g@@ƶ@@@@@@Z@@@޴@ @@6@i@@+@@@{@j@@@@@@x@.@ @@@@n@@u@@@_#$ cf@=@??I@Z# Gy/min u-2/secnone  Sr-90 Am-241b \@(@,@0@*@@@@H@@@T@<@\@@d@t@@@l@@@@@@P@@@@@@@@@p@@X@@@@@ @`@P@@@@`@@@ȉ@@x@@@@@@@@@ @@І@@@@@`@@@@(@p@X@p@@@X@@`@@؄@@ȃ@@X@8@p@X@@P@@@@@@@X@_$% cf@=@??I@Z# Gy/min u-2/secnone  Sr-90 Am-241b U@"@.@$@.@@@x@@@@đ@@X@@@(@`@@@@h@h@@T@@P@0@@H@P@@@@x@ @0@x@ @ȉ@؈@ȉ@H@@@@@0@Ї@P@@@@h@؅@؇@@(@P@؅@@@@@X@p@X@@@@@(@h@ @8@@@@@@@@x@@@Ё@@@@؀@@h@@X@@@@0@@_%& cf@=@??I@Z# Gy/min u-2/secnone  Sr-90 Am-241b \@*@0@,@0@@|@@@@@@̓@h@@̒@@@@@@@@h@@p@@@@0@T@@@ @@ȍ@@@H@0@@@p@x@@@x@@@ @@@X@@P@@@@@@@@@@@@@P@@@@@@@p@ @Ѕ@@h@Є@@@@@@P@@@(@@@@0@@@@@_&( cf@=@??Y@Z# Gy/min u-2/secnone  Sr-90 Am-241b h@5@<@9@8@,@ʢ@@@@r@2@@0@|@@@x@@@<@@@@L@@4@@@x@h@8@@ؙ@|@̚@8@Ę@@̘@Й@@@@4@@@`@L@@@@@@@@Ԗ@L@\@@@ē@<@L@@@(@ē@@T@l@@T@@<@@<@ؑ@@@@@@0@ @ @D@@@@@<@l@D@@@_') cf@=@??Y@Z# Gy/min u-2/secnone  Sr-90 Am-241b `i@5@<@A@>@~@@@2@b@@*@T@|@"@V@ޢ@֢@@ @Ρ@n@"@@@@x@@0@؟@@@@@ @T@P@@@ě@8@@<@@@Ě@@ @p@@@@ȗ@h@d@@ܗ@ @h@@@\@P@`@@t@@t@P@x@@@@$@@ @@@L@ @4@X@x@0@ @@@@@@@@8@0@0@_(* cf@=@??Y@Z# Gy/min u-2/secnone  Sr-90 Am-241b o@<@6@<@:@@@@@ܢ@ @&@j@@0@@@@@&@@@@d@p@Ԟ@`@P@@@@@X@p@ԛ@@x@L@@$@@@@$@,@ @8@ȗ@@ė@|@ܖ@@P@ԕ@@@@@(@4@ܔ@Д@@@ؔ@@8@@ @@D@@@@L@@@\@@@@@đ@đ@@@x@@@@T@@@@@X@@ @@_)+ cf@=@??c@Z# Gy/min u-2/secnone  Sr-90 Am-241b o@>@B@C@E@@@@@<@P@ĩ@t@@@D@d@@0@\@@B@@@ĥ@@@@@@n@@2@~@@֢@ʢ@D@ @Ģ@@r@֠@@@H@6@r@p@@r@<@D@@(@@@\@@@@@؜@P@<@@@t@@@(@p@d@@t@Й@@$@0@ԙ@@@@З@@l@D@ؗ@@З@@ @@X@@_*, cf@=@??c@Z# Gy/min u-2/secnone  Sr-90 Am-241b 0r@A@D@@@C@@Ь@@n@ @@ @@@@@~@@ @.@X@@x@x@@l@8@@<@@@@p@T@@@\@@آ@ʢ@@@@@@@@t@N@ڠ@\@@R@@@ @@؟@؟@@@@@H@@@Ԝ@@$@@l@@@@@@@@ܚ@@Ț@@p@@|@T@@@@@Ę@@@`@(@T@_+- cf@=@??c@Z# Gy/min u-2/secnone  Sr-90 Am-241b @w@C@B@C@B@\@,@J@@>@@@@@@@@F@@,@@@@@@ @@@@|@(@(@B@@"@4@@أ@@ @¢@@@@~@@ȡ@.@@@@ @@Ƞ@@R@2@@>@,@̞@@p@ܞ@L@@@Н@ @@`@@P@D@H@|@@@ؚ@p@@@\@,@@<@@X@\@Ԙ@@ @@@@\@H@_,. cf@=@??@i@Z# Gy/min u-2/secnone  Sr-90 Am-241b 0y@H@H@G@L@R@@@@±@o@@\@@@@7@@ʯ@\@"@@@ƭ@l@@j@.@@ @&@F@Ԫ@@Ω@@ީ@@@$@D@8@ @ @`@@@8@@@6@@Х@z@@@@@@6@@~@ܣ@@@v@|@@d@@p@~@@@@ҡ@@@N@t@l@@@Р@@@@@~@*@@(@ @@@_-/ cf@=@??@i@Z# Gy/min u-2/secnone  Sr-90 Am-241b u@I@G@H@E@@@(@@ٰ@?@2@b@j@@@@B@@j@L@0@@@@@8@@L@@@ڨ@@@r@@@2@@@x@@@@t@,@@@@X@@ @j@*@@v@@0@@@@@ȡ@@@(@ġ@ޡ@t@@`@@Ԡ@~@@>@X@X@H@@@@@@؞@\@X@T@h@Н@H@@\@@@ț@_.0 cf@=@??@i@Z# Gy/min u-2/secnone  Sr-90 Am-241b w@G@H@F@G@:@y@"@@*@հ@ް@@@@2@@@X@@@@F@@@@V@.@@ @H@:@̨@r@ب@@@@v@Z@>@:@@n@@@@@P@f@@T@n@@B@ң@z@@@@0@@J@\@@@@@R@L@H@@:@@p@@@@@@ȟ@@&@@@|@$@@d@@@ @@@l@̝@_/1 cf@=@??r@Z# Gy/min u-2/secnone  Sr-90 Am-241b @R@P@Q@P@@I@M@@ȹ@ȸ@@@@v@@I@ @@9@@N@@@@g@@@ij@ʳ@ @@@,@J@@ұ@@1@@@A@@@&@\@.@@n@b@(@R@@`@ڭ@^@Ԭ@r@@D@~@@@N@@Ԫ@@t@@@@*@@@P@X@@>@֨@F@X@@Ħ@Z@@ʦ@ @@@@@Х@@Х@@*@_02 cf@=@??r@Z# Gy/min u-2/secnone  Sr-90 Am-241b @K@F@N@O@װ@@@@@>@޳@@@@Y@@߱@@P@b@ @۰@@M@ޯ@!@ @ܮ@h@@@@@@@@$@@@@>@@$@@@P@@2@&@F@@@@~@@@@@ڦ@"@$@@n@ @@@@8@z@@X@@j@,@\@Ң@@@@@~@@@@@4@`@@@^@@^@@@@_13 cf@=@??r@Z# Gy/min u-2/secnone  Sr-90 Am-241b (@P@N@@P@P@@@_@@@@@@@@϶@g@@@@@մ@@@#@@@c@@m@z@5@D@ȱ@@G@ @U@@@@v@<@@@\@ @@`@<@@@ @H@$@@>@Ϋ@ҫ@"@J@0@@@N@j@@ܩ@D@L@|@\@@@Ԩ@ܧ@ʧ@@ȧ@@|@4@@Φ@Ҧ@Ҧ@@@,@Υ@ҥ@@@4@@F@@_24 c=@??Z# Gy/min u-2/secnone  Sr-90 Am-241b c@2@;@;@4@@ޡ@@t@ҡ@@@@D@@H@l@@@@̜@d@@@@@@@@@@@@p@D@@@@@@@@4@h@@T@ؕ@@<@l@$@@@ܓ@0@@@@@x@@<@@@@@@@@@@@@@А@p@̐@@@@@@p@@X@@@@@@H@@@@x@@@_35 c=@??Z# Gy/min u-2/secnone  Sr-90 Am-241b @h@:@@@3@2@ؙ@$@@@@@(@$@@l@ @ @؜@@@ԛ@@@@h@@@L@\@,@@@@@@@Е@d@8@@@0@@@@ؓ@@@@@@ @@T@ @@@@@ @@`@@@@\@H@@@H@0@@0@Џ@(@@@@@@@@@@`@@0@@@@@P@@@@@_46 c=@??Z# Gy/min u-2/secnone  Sr-90 Am-241b d@4@8@1@2@@6@@@@@f@@@@4@О@<@@@d@d@@@(@@@@@@h@@<@|@@@@@@p@@4@p@@@\@`@@|@0@@P@P@ԓ@ @\@|@ @@@@@@@ @|@ @ @@@l@@Ȑ@@ @<@Џ@T@8@8@@\@H@4@@@@@Ȍ@0@@@x@h@P@@_57 c=@??Z# Gy/min u-2/secnone  Sr-90 Am-241b e@;@8@=@8@@@@@f@@*@|@@@@ĝ@(@@@P@@@@`@8@@@̙@ܙ@0@@@@@@$@@@@@@@@@`@h@`@@@@@@ @@\@ȓ@@@H@̒@@(@@@@@x@@Б@@0@@8@@̐@@@l@@@(@@x@@@@@@X@@ @@Ѝ@@_68 c=@??Z# Gy/min u-2/secnone  Sr-90 Am-241b f@6@3@6@8@D@r@@ܟ@P@T@@@ @@@ @ț@@@l@@@@@@T@$@@|@D@,@@ė@t@\@@@@@8@\@@@p@@@@@@@@P@@@@P@P@@`@@@@@@ؐ@\@Ȑ@@@l@X@ȏ@@@@h@P@@@؎@@0@@@ȋ@X@@X@`@Ћ@@@@Њ@@@@@_79 c=@??Z# Gy/min u-2/secnone  Sr-90 Am-241b h@8@8@<@8@H@@@x@֡@ @@@,@@B@@"@ @@@@@@ @@D@@؛@@@̚@ܙ@X@@t@\@$@@@ @@@@ܖ@8@P@@@|@x@@x@ @@@4@,@H@,@@p@@@8@P@ @d@@đ@,@ܑ@ؑ@@,@@@Đ@@@@@x@@0@H@@@@@Ѝ@Ѝ@(@X@@@Luminescence/inst/extdata/DorNie_0016.psl0000644000176200001440000003675613231137116017660 0ustar liggesusers ========================================================================================= Run Name: ALU Sample no: 0016 Sequence Name: Praktikum2016 Filename: Praktikum2016 Dark Count: 15 c/s Light Count: 0 c/s Dark Count Correction: OFF 256 Offset Subtract: ON Datafile Path: D:\Results\DORNIE\ALU\ALU0016.psl Summary Path : D:\Results\DORNIE\ALU\summary\ALU.sum Run Sequence : Praktikum2016 ----------------------------------------------------------------------------------------- \ L11 @ 5/19/2016 4:45:12 PM / '--------------------------' ---------------------------------------------------------------------------------------- Measurement : DARK 15s | Stim 0 | On/Off(us) 15,15 | Cycle(ms),No 1000, 15 ---------------------------------------------------------------------------------------- Time (s) Total Count Counts per Cycle -------- --------------------- --------------------- 1 0 +/- 4 0 +/- 4 2 -2 +/- 6 -2 +/- 4 3 12 +/- 8 14 +/- 5 4 19 +/- 9 7 +/- 5 5 25 +/- 10 6 +/- 5 6 18 +/- 10 -7 +/- 5 7 17 +/- 11 -1 +/- 4 8 23 +/- 12 6 +/- 5 9 15 +/- 12 -8 +/- 5 10 18 +/- 13 3 +/- 4 11 16 +/- 13 -2 +/- 4 12 22 +/- 14 6 +/- 5 13 22 +/- 15 0 +/- 4 14 28 +/- 15 6 +/- 5 15 25 +/- 16 -3 +/- 4 Terminal Count = 25 +/- 16 ---------------------------------------------------------------------------------------- Measurement : S1 15 0 100s | Stim 1 | On/Off(us) 15, 0 | Cycle(ms),No 1000, 100 ---------------------------------------------------------------------------------------- Time (s) Total Count Counts per Cycle -------- --------------------- --------------------- 1 16660 +/- 129 16660 +/- 129 2 32789 +/- 181 16129 +/- 127 3 48649 +/- 221 15860 +/- 126 4 64422 +/- 254 15773 +/- 126 5 77931 +/- 279 13509 +/- 116 6 93102 +/- 305 15171 +/- 123 7 108127 +/- 329 15025 +/- 123 8 122880 +/- 351 14753 +/- 122 9 137318 +/- 371 14438 +/- 120 10 151523 +/- 389 14205 +/- 119 11 165571 +/- 407 14048 +/- 119 12 179457 +/- 424 13886 +/- 118 13 193226 +/- 440 13769 +/- 117 14 206845 +/- 455 13619 +/- 117 15 220210 +/- 470 13365 +/- 116 16 233057 +/- 483 12847 +/- 113 17 246024 +/- 496 12967 +/- 114 18 258717 +/- 509 12693 +/- 113 19 271443 +/- 521 12726 +/- 113 20 284106 +/- 533 12663 +/- 113 21 296846 +/- 545 12740 +/- 113 22 309240 +/- 556 12394 +/- 111 23 321382 +/- 567 12142 +/- 110 24 333521 +/- 578 12139 +/- 110 25 345488 +/- 588 11967 +/- 109 26 357358 +/- 598 11870 +/- 109 27 369194 +/- 608 11836 +/- 109 28 380867 +/- 617 11673 +/- 108 29 392406 +/- 627 11539 +/- 107 30 403919 +/- 636 11513 +/- 107 31 415362 +/- 645 11443 +/- 107 32 426753 +/- 654 11391 +/- 107 33 438016 +/- 662 11263 +/- 106 34 449144 +/- 671 11128 +/- 106 35 460170 +/- 679 11026 +/- 105 36 471161 +/- 687 10991 +/- 105 37 481959 +/- 695 10798 +/- 104 38 492740 +/- 702 10781 +/- 104 39 503494 +/- 710 10754 +/- 104 40 513949 +/- 717 10455 +/- 102 41 524654 +/- 725 10705 +/- 104 42 535111 +/- 732 10457 +/- 102 43 545469 +/- 739 10358 +/- 102 44 556047 +/- 746 10578 +/- 103 45 566590 +/- 753 10543 +/- 103 46 576850 +/- 760 10260 +/- 101 47 587113 +/- 767 10263 +/- 101 48 597322 +/- 773 10209 +/- 101 49 607258 +/- 780 9936 +/- 100 50 617201 +/- 786 9943 +/- 100 51 626983 +/- 792 9782 +/- 99 52 636819 +/- 798 9836 +/- 99 53 646695 +/- 805 9876 +/- 99 54 655509 +/- 810 8814 +/- 94 55 665363 +/- 816 9854 +/- 99 56 674934 +/- 822 9571 +/- 98 57 684515 +/- 828 9581 +/- 98 58 693948 +/- 834 9433 +/- 97 59 703516 +/- 839 9568 +/- 98 60 712982 +/- 845 9466 +/- 97 61 722344 +/- 850 9362 +/- 97 62 731611 +/- 856 9267 +/- 96 63 740851 +/- 861 9240 +/- 96 64 750276 +/- 867 9425 +/- 97 65 759405 +/- 872 9129 +/- 96 66 768537 +/- 877 9132 +/- 96 67 777573 +/- 882 9036 +/- 95 68 786739 +/- 888 9166 +/- 96 69 795598 +/- 893 8859 +/- 94 70 804571 +/- 898 8973 +/- 95 71 813485 +/- 903 8914 +/- 94 72 822406 +/- 907 8921 +/- 95 73 831225 +/- 912 8819 +/- 94 74 840160 +/- 917 8935 +/- 95 75 848871 +/- 922 8711 +/- 93 76 857733 +/- 927 8862 +/- 94 77 866580 +/- 932 8847 +/- 94 78 875191 +/- 936 8611 +/- 93 79 883976 +/- 941 8785 +/- 94 80 892559 +/- 945 8583 +/- 93 81 900899 +/- 950 8340 +/- 91 82 909380 +/- 954 8481 +/- 92 83 917905 +/- 959 8525 +/- 92 84 926191 +/- 963 8286 +/- 91 85 934738 +/- 967 8547 +/- 93 86 943030 +/- 972 8292 +/- 91 87 951445 +/- 976 8415 +/- 92 88 959676 +/- 980 8231 +/- 91 89 967730 +/- 984 8054 +/- 90 90 976219 +/- 989 8489 +/- 92 91 984294 +/- 993 8075 +/- 90 92 992370 +/- 997 8076 +/- 90 93 1000418 +/- 1001 8048 +/- 90 94 1008331 +/- 1005 7913 +/- 89 95 1016283 +/- 1009 7952 +/- 89 96 1024276 +/- 1013 7993 +/- 89 97 1032190 +/- 1017 7914 +/- 89 98 1040099 +/- 1021 7909 +/- 89 99 1047962 +/- 1024 7863 +/- 89 100 1055928 +/- 1028 7966 +/- 89 Terminal Count = 1055928 +/- 1028 ---------------------------------------------------------------------------------------- Measurement : DARK 15s | Stim 0 | On/Off(us) 15,15 | Cycle(ms),No 1000, 15 ---------------------------------------------------------------------------------------- Time (s) Total Count Counts per Cycle -------- --------------------- --------------------- 1 4 +/- 4 4 +/- 4 2 14 +/- 7 10 +/- 5 3 9 +/- 7 -5 +/- 4 4 8 +/- 8 -1 +/- 4 5 0 +/- 9 -8 +/- 5 6 -6 +/- 10 -6 +/- 5 7 7 +/- 11 13 +/- 5 8 12 +/- 11 5 +/- 4 9 -12 +/- 12 -24 +/- 6 10 -1 +/- 12 11 +/- 5 11 8 +/- 13 9 +/- 5 12 15 +/- 14 7 +/- 5 13 7 +/- 14 -8 +/- 5 14 -5 +/- 15 -12 +/- 5 15 -12 +/- 15 -7 +/- 5 Terminal Count = -12 +/- 15 ---------------------------------------------------------------------------------------- Measurement : S2 15 0 100s | Stim 2 | On/Off(us) 15, 0 | Cycle(ms),No 1000, 100 ---------------------------------------------------------------------------------------- Time (s) Total Count Counts per Cycle -------- --------------------- --------------------- 1 88023 +/- 297 88023 +/- 297 2 173608 +/- 417 85585 +/- 293 3 257002 +/- 507 83394 +/- 289 4 337842 +/- 581 80840 +/- 284 5 416668 +/- 646 78826 +/- 281 6 493191 +/- 702 76523 +/- 277 7 567633 +/- 753 74442 +/- 273 8 640003 +/- 800 72370 +/- 269 9 710432 +/- 843 70429 +/- 265 10 779934 +/- 883 69502 +/- 264 11 847323 +/- 921 67389 +/- 260 12 912894 +/- 956 65571 +/- 256 13 977444 +/- 989 64550 +/- 254 14 1040046 +/- 1020 62602 +/- 250 15 1101576 +/- 1050 61530 +/- 248 16 1161549 +/- 1078 59973 +/- 245 17 1220325 +/- 1105 58776 +/- 242 18 1277027 +/- 1130 56702 +/- 238 19 1333490 +/- 1155 56463 +/- 238 20 1389034 +/- 1179 55544 +/- 236 21 1443594 +/- 1202 54560 +/- 234 22 1497249 +/- 1224 53655 +/- 232 23 1548759 +/- 1245 51510 +/- 227 24 1600292 +/- 1265 51533 +/- 227 25 1650904 +/- 1285 50612 +/- 225 26 1700538 +/- 1304 49634 +/- 223 27 1749403 +/- 1323 48865 +/- 221 28 1796880 +/- 1341 47477 +/- 218 29 1844491 +/- 1358 47611 +/- 218 30 1890984 +/- 1375 46493 +/- 216 31 1937212 +/- 1392 46228 +/- 215 32 1982618 +/- 1408 45406 +/- 213 33 2027030 +/- 1424 44412 +/- 211 34 2071016 +/- 1439 43986 +/- 210 35 2113676 +/- 1454 42660 +/- 207 36 2156344 +/- 1469 42668 +/- 207 37 2198478 +/- 1483 42134 +/- 205 38 2240192 +/- 1497 41714 +/- 204 39 2281314 +/- 1511 41122 +/- 203 40 2321794 +/- 1524 40480 +/- 201 41 2361658 +/- 1537 39864 +/- 200 42 2401167 +/- 1550 39509 +/- 199 43 2439834 +/- 1562 38667 +/- 197 44 2474234 +/- 1573 34400 +/- 186 45 2511724 +/- 1585 37490 +/- 194 46 2549656 +/- 1597 37932 +/- 195 47 2586736 +/- 1609 37080 +/- 193 48 2623506 +/- 1620 36770 +/- 192 49 2659445 +/- 1631 35939 +/- 190 50 2695350 +/- 1642 35905 +/- 190 51 2730486 +/- 1653 35136 +/- 187 52 2765285 +/- 1663 34799 +/- 187 53 2799384 +/- 1673 34099 +/- 185 54 2833349 +/- 1683 33965 +/- 184 55 2866981 +/- 1693 33632 +/- 183 56 2900327 +/- 1703 33346 +/- 183 57 2933060 +/- 1713 32733 +/- 181 58 2965878 +/- 1722 32818 +/- 181 59 2997817 +/- 1732 31939 +/- 179 60 3029628 +/- 1741 31811 +/- 178 61 3061474 +/- 1750 31846 +/- 178 62 3092488 +/- 1759 31014 +/- 176 63 3123846 +/- 1768 31358 +/- 177 64 3154486 +/- 1776 30640 +/- 175 65 3184857 +/- 1785 30371 +/- 174 66 3215039 +/- 1793 30182 +/- 174 67 3244570 +/- 1802 29531 +/- 172 68 3274321 +/- 1810 29751 +/- 173 69 3303533 +/- 1818 29212 +/- 171 70 3332132 +/- 1826 28599 +/- 169 71 3360314 +/- 1833 28182 +/- 168 72 3388958 +/- 1841 28644 +/- 169 73 3417112 +/- 1849 28154 +/- 168 74 3444699 +/- 1856 27587 +/- 166 75 3472236 +/- 1864 27537 +/- 166 76 3499511 +/- 1871 27275 +/- 165 77 3526575 +/- 1878 27064 +/- 165 78 3553717 +/- 1885 27142 +/- 165 79 3580372 +/- 1893 26655 +/- 163 80 3606875 +/- 1899 26503 +/- 163 81 3633171 +/- 1906 26296 +/- 162 82 3658384 +/- 1913 25213 +/- 159 83 3684158 +/- 1920 25774 +/- 161 84 3708882 +/- 1926 24724 +/- 157 85 3734219 +/- 1933 25337 +/- 159 86 3759580 +/- 1939 25361 +/- 159 87 3784542 +/- 1946 24962 +/- 158 88 3808953 +/- 1952 24411 +/- 156 89 3833380 +/- 1958 24427 +/- 156 90 3857979 +/- 1965 24599 +/- 157 91 3882107 +/- 1971 24128 +/- 155 92 3906050 +/- 1977 23943 +/- 155 93 3927964 +/- 1982 21914 +/- 148 94 3951776 +/- 1988 23812 +/- 154 95 3975292 +/- 1994 23516 +/- 153 96 3998638 +/- 2000 23346 +/- 153 97 4021603 +/- 2006 22965 +/- 152 98 4044558 +/- 2011 22955 +/- 152 99 4067386 +/- 2017 22828 +/- 151 100 4089938 +/- 2023 22552 +/- 150 Terminal Count = 4089938 +/- 2023 ---------------------------------------------------------------------------------------- Measurement : DARK 15s | Stim 0 | On/Off(us) 15,15 | Cycle(ms),No 1000, 15 ---------------------------------------------------------------------------------------- Time (s) Total Count Counts per Cycle -------- --------------------- --------------------- 1 -22 +/- 6 -22 +/- 6 2 -23 +/- 7 -1 +/- 4 3 2 +/- 7 25 +/- 6 4 2 +/- 8 0 +/- 4 5 14 +/- 9 12 +/- 5 6 18 +/- 10 4 +/- 4 7 30 +/- 12 12 +/- 5 8 17 +/- 12 -13 +/- 5 9 43 +/- 13 26 +/- 6 10 27 +/- 13 -16 +/- 6 11 1 +/- 13 -26 +/- 6 12 -11 +/- 14 -12 +/- 5 13 17 +/- 15 28 +/- 7 14 9 +/- 15 -8 +/- 5 15 -1 +/- 15 -10 +/- 5 Terminal Count = -1 +/- 15 Luminescence/inst/extdata/TIFFfile.tif0000644000176200001440000017403214062436223017400 0ustar liggesusersII*z 6FCA(f-F#J& H=ሌp8 C<1'IR.#3B$ Pд/, `H#"Qpl*`oW(CA) ÄpD!ph>Vht>AMš|N0 \?߄pBh.Db>8ȇca 9Je PIJ`بf0eb@Ȝā\lAD6#ZP" bb@pVCIQVfprA[jH< PB`za5!8@!0@Ebx~" `B (ZH2!f$sa<!H,(P@F @,a0DF$u3Sh@H'!PЩj+a cThD P)k8 JiX`x`L`<`Hap6@l1.6 1 \/ jr +K8L8J$aHR.KA@f! hahJ08R]) f"PdT'BŁ0h8N!- @h`ҷp>+B8"iJBJ]"`P(L2aLL 1H*0$]5gPz64nDТ`*XF bR!B@JDhdn$er0?b/!b`4"Pd!+?N!0*"(M-RBhy!nƆ#a DJa SBOC+Y| AX ӫo"L+aZY. PR t@ėC <3NaY-|\Xc8"6@808dPD2'bD Ac t@a/JxC !6d #I/V ApY, T^@^@O0HC&A`#M<l K` qyKL 4=QDdzAj X @  n Թ-8JH6qFT D=R<4 %)f a5 0O E@( .Vΐ  2e[6cq(P:A-)9;V =1Cٱ}ӴN4`$ bDN Z@*,G H(=D9x#P~JE@p2R i`~p?4h 4TD^EEp\0`\H!``Z W DD`PIb ~ _@l#PDh: ^Rq*Z (N,n؁0 PF$# ʗP:0zV$' 29@GSx 4U(@* M*w`X pxS#SL-ïWX`9Mw0d0D(*.ȝ*ЊB0v Z*` 8+AP , .y3 D ëOE`)bIh" \ ;ʦy@ 0'@uނ08-ŏô A2` f0kPL\tt$z ,L t6 a JѶf q X* a B sV8< A4RJ``!B(ju K\- x (5L<BpH J 4 D8IA^pD : (u1wLT0@),xaX/@:b'mMJ)"L8`U6"*ktxB`;hPh AhD 0V +_eEY !i^ +d'$L5VJpkMcB :' tP^ ΤS`%3ma.lePYIka+@c!#rk[B$KadEbbK-тkҥfڇb xH`l M~ؖ#h0Vlt@`Lbh_ч&&L`i[ rt fc`DRO4hTB(\D(`di `@m l RE`L`_` 4jh %@c:ƨ2LT`h t@buKg`K``8" Z",XZ;"Ȯ/F\; p T2; d4/.(L 4`R T' =F^ @` Bv.CtVrLH@ff `p CB0`KE@| `.1`j}%LB @P@>#-`N r`P ּڲ`~>Z@ `t C@` $Զ [`! H 6pC@~``D (*_ V&%=%*LO`%-@:[^-VԣR&4 p4Lt =R @t`x@b6PF|?o_d̀@jLJ VaZ- :| HڑD2K`>hdnP bcZ%J @ blp /H砪 @ng !jt/FĤ%f0Ln^hZe'ꨓ.*eZ2`^nN@\BX\< N rBVeF9%: MJ G""&8ࢲ%D+>MBh,*`bh_l]'`/dǠbDe l9} V" ]Ўl@|dl'vT((P;g.L `X<ŝ# W6zd%.y8) NJ@. ~"p7ZbdV-N 4J w@l V n( gEj `\ʰF `!2yL.CK)gLKW2VD"@@6V-$JRU`6r# F(m@Bg@LatgŜ@v }H`P, H `DVY.kBHl`.BÈcn@~< xwklEd}@B T+Ki un `*`Mw `Ryz>bZM/ BESHT #E]`^'6xEH'ύT:HtO [;BvGFM܈v]:0)jLGfD¼)B@J&2b<*gJ%@~ @nby4 @0mH()f^h` Vx@UQx%2d`od ^ 59T' Im5w(yI"ކTFdX T`p/.((`xp Ig=.$G z8g9~ nG" >)hX <@< :H*Ŧ 8|hR0DAN2($2p/ Da8! \ C@b<(" ʢ1l^ qxB=RHrp= HT# I`N*2"AHA4! Ј4 E%a@P!A|JơF# CG rA`P< Չp.PQ`>&Ȥl-b2#l@"[!HF DP2'\TbCMr>-!H@0ڃBˠT0B#ptH0n@t ۞ AT"0$P<2  R>`aPVp X !>Ԇ*  #>s@B(<Hlʰ@! | !bqd!:0_(\A #F(LO0@3@)P%Q@T$X  76`C,,Sa(@Hی&4{n(DlL%@3Pb8py$ H BhfAp˅[~ÈHaH(`F!He2V…"h ! xa])T.)XLS!NH,$,b3YrX0;EbC->>PD& KPH.S@V~#'a!jc#&hۃh2Z Q@*84jdtb 2~* ,S,c5P-J JN)= !@m~ w]xZRH#Bxj A0q/$f@ vZJ-5Zâg9i(POLL P2tA`+-0p9 `.X8"@  P-/"SL͸"F(āR7 D:A$Tp p ptP5j@XA2̃2iA3 ;l;Є`N8U0t A'2( 4$<Av @R k$`72 $#('@R d3X0< N.!dDR !Moʣ@@j h(P0z 9 @ԲYh:! L@T g%> D/&NX5a:0^ ! oPJ@@ b bV 3D `E!AHR;Dl@'K ,pS>Ur|ViP@B A`|6O-9`"p6A - xAGaA0` P䪃 | / Ă3,)PF ( AbF x2 5 @"l| U A! t ; %A1 鈀 `HA\z&zʂ :S-AQktNn A'ۺ@bOu!'`0NkW3(Pj x61*3,% ?@J=c\$@GA_|8 hJD'\"snCa#& VK@1z /DTKЬ X/ p\;` D0'@ A<1!5􌀩g؊J JOp[bAHLP!@m\KƠx1K347.hO@ԃt@)?LyuatVyy9 "@%p .rE@b1cc(Hx@"0*P(2**0X0*3Hp#H((+ 2 A5 .HC1A3j0({/ )G`A p250,X)@ `7)"y Ɩ">Nh#")|0 : `k.%C6 $ RyLI`(/H2:.$Š Iu8zHP!c*K"`-t83!AHY.9!;rLG9}4I 8!(,Wx3 / )1)+*I= q 2 $ 0$@-'y*!P@((!Ajꬑ-X (3hWH%pπнr\p=/0N${sQכ hPH%iB9HTB % HB9\(R0ؑ.`) qEy5X5,聨&#, )8!9p q "'ꨵZY 0H c ?  PzAJ㈱h) !%&W+8 X뉂p=P‰ @p,Jt*ʉE|8(;9KX 23d2=۳S)u% (I3`$*l0iD9ԓΌ)CH- @@x%IU <"0kzΐ& h ܚ*>8ຊK@(63|.R(S)5!,8"B%V’(A%:@{k.'* =U6P@ 38 W x1) 9;zCi'$A<08BS܋(+/P#+(貁IH+@AL@=HQ*$y5.(#=9`)0!C "`KiLԻ!L8(Ī'H0ʂtq`0 %!i33 )T"URqHĀW+ x  1pQhp8x,ۀ7 H;&/p&IV ,1^K&(ԁ3"4H Es},?X3p(ACh`r"  p H'2H9tSP&A[E" -yIʑ4J| )~y2&iP((FL" t7tl6,(@ # h"+ :0JX3x/0F2W.q&yP&ݐSMh)R("`H0{ (BHXb ,M%za!  Y ( i?jX ӂh/h Dr)hJf Xc=)0#E 2Sv {c &D e 9 3+Rxh%\/ 0x6+#e2r`,?IRa44AQW;<)P<(3@<HkLJ*^B. "?sp$:I-)Bm' ( %^ԓ̦cy 8"i, JV:"#L p+!?hA17@8ʼ1Aj1\/&/C4E 8 볒 z, *m(9qW.l)ӖFڿ:$BMVP 8 B4ǖQ=zq;N0D2ԁ  =6Qc 36R02!;Q/*,6Э#׽3#A=AL P*GyWX4j`б4%4H(%6:/p !/rvvX0-X08(Ā/!AZqc47 Q %cﯰ  8 !"P'8 0C3p.-LPҀh A0 pp2 % _-ڌRaW4/tI>/$F@(ē@p#0{?a; C^$/H Q!ޕ7Ԑ طxY` @P<A`5 5߂p"x:h %+p`Z &2>Hkh+Uу. 6P2QZ0&80$28i3)'1!-Q,  ⬈T"C1DcȘLZ8C<,Ga`NG Cl4!`Tbd:; RAD"(tnL' b@#F|f!"l0"Ih: KDȶ0$"xt9 QxP7EqXP+F"1a1B*V!؞x)0+1VbJ­H<6#B1Q,  ?x %Dp8(|T B R br H 0B )L Xbpd+b4K +@(PTnaLdJ, hXAn bېN `j@!T@j>B `tHB .Hb  Haxh(R!^P("aP:GbHV86a(< lfnT0桄Rɧ@<+aPhB,B1{-VN!(8Z=%rBX@a*Xn$XdV& 2A(^OP F6 Rj20DlH#Cl>8J QЀH b(X%`DbXH@%>@.oHMAІ ar""A:^*0f!(V@Bؠ, "dwUFP#(p \"B"HTEN < : aV  +X p/H4<ea0G/(|9Q2!txF!  F7Aʽ K, AqT+, VCGXVY5` |`@HAP)B J +. @x`8Iaap :Se'̓Xj3o0pUBDxW,Q@-`4 |.X !l@,%Z pPpTp&B#7@ ;=M(78^ {yy tL1IS !.&L h6`X  ¡8d$ xQn & VjQ?̀hCiq7:KXj< 9|MPl | %@|ZU%\l(Đ< ahQ€;Πʁ0  l =.JGD& %+lS+"tu/ 1o6]' #B?@TXbԞE379&0 A@| F @ZylTXO`NA 4T}" 4K &ʀhoaTAA p0R@7 !4 x-Pѳ@f5%0 >Q +M\ :F ŒP3B0P+ȃN<4pE7@$+vĜ-F@نA03%(M`W)cI0&`x10<@"`ND .JAPczBB X ")% :>AR | t|@~5l8~9b BRu\QH>3\ 0J0xD;٠DQA ?*AX9a: @<$b؄ D$gH 7# R8,+,]5 /2 7- `H R ^##Q 1h F:oIH !`tͮ @%e'0A)Y;cZVF%FlJ`^N82A􄧥`W4|0$ n#V܂@b@]#U!iaVّ g A U \yD0?`kTp)1v ,QeLZ'MHBl0xC@p M AX'gT]I]fSؕY]4|f頌%*N&b"rZedl2.*rNj&;L2\$jC*z@P L@b34DL~s`f@ @V`Z`@P> F ``&CH*EtBh@opY vVd&JxO L@@T5dȖv5 +(9@LȎ `` EV @l@@>o|`o bz}<> 4(n0f@T#bEjD 'ЊF(a *JBw:,F\ B*/X*<"H[`@Π@iuH"J 0L q&ZPcs&@|1 Y,^"Q `/Rx@@f^1l+:`V XY"& saࠦ vFa >x> ~rg lr؄d,{@0` ,h8`ɢ gɩ *z!, (Xwg)  h@b\| l @dnbB(W #d@tX@(  p R/tc(S@f>f J 6 D*SkD&oj.:}tذC":t@ zB D+j4 rdKx6еv @| ry@X C/%lt4>3t@2^hƧ@ EM*J @ DV 3(7 :D# X ( cX[D: Q`< ~ L' 76KLR4@roB D Z0 X " $sFl Ku 6mD d#. |x-On`F 2~Gp2`29D  6 J)L,"@2Zэz.WBbǴ.hm 8,`=Œ6V `tI tKQ$|z2 H[`>F0tT n`HǞ lނD@^HI0>X DŊ5`8@ +n#kSoXo%Hc;rzEp2m+N @ @T, X , n4H D]Fe)"*QȊ)~')`Eh@B0 t o#%2gS.NKF6pi` @| @P VL `^By@h>Gr"BVl2J#G(4x< @dAzboIb30=B( 'iLܠhgLy=0RH T@N@@ k-z`QbRj֌XB10%RV=^!̀ KlR \QPSj)%:d@]N@b(z@v c"$RAQ@bi<+R=RPEZ`&H5h sG8CW7 EX' @z3B4%蘀Z : 5 N ,aG`~*`n <l`͐Ԁ<c |&0Z'N&pa̺'Պ]J*``4 f|j6pB b ``@ xYhdJ34|@dX ~;#@x|,F.^Q % j5@L&L#1VqQ<9@uS$H +HdiX:O'J^(0*XFŚx+Dzb"Ci1at fvj7fEc^@7͘@<@<'65- s# x l1kwLt)F(G@Ov8 ^JdʰZ_ Fl awi:Q&Jh"yʋUވ$TyFo;\b5?><"OiDb$> =>N? 2h,kpXJB"|z)adžH~ppT LlN2R>  jUVlz|@:=z  `@*_@xP@Ll1%`aކ ')"p*N1BTnb3CP>UzZ#,jGe{@,ʜ>}b`%C "V+Ak MV/r$]33},ECʧlD`ffX\jbv@ ` @ `S 8s\:bFkH# dXH? 0'd&`H8 ,4W  }"-ฐR""a蠢! saHHd6 zQ!:=l!Áh8cDP  p(-  2- ူX$b`P4-Bl4a 05.X- 1-d(X(MPA]8D0N<.^4Z/hd"h :$0rbx08'@8Ϡ&Ěሎsv4X7A p"35xX)!xPx-4 ^+QNbhD0ѳD0!HB ؒ0al% rnV)(8Ԗ8c@ `V݄CX< ӊ|!@! raC 9)xf# ɍ!BМK^b!JOa4a: ^S؊@*ژ 仿EXXh<hx !HKpD 0G3a ĒX$ 9θp BI\f@n .@@G #JX) 4 A/ @ hCjFJ N9IR .<I t]D"%$6`Dp$`pj  }@ R'0S\* X("" 04 d dCB@!p. D0 ; @| AP%!Gv a'@,AP*b YV -'L`NQ2 +r%8!82tkAxJ`RIE`DN z, 8a"3J 'L"2,n('. "C8&Q*tnX 0p/J`dH7OtRV@5 %de"Qf6ˢbq=|B=\C@l_A \0X# A\ >S&L,D˜/7ʞD(R(&O5@FYR y:C> - gHj,XA50` B=9]$Jw4X1V5uP>E!@'FB%@  DAQK>@ST͍(;;DI-`2Z1@` c`O0^ APPP;Sê<`@ ՗BA4P<X% ̗<}P p#%Aa+ <#B\TpU8RxDMx% s#t΁D\D 0!'r/?[ $ @2B4(Њkgc`DBM@D t. $X&6 ' @t>R&o@"P_S6 ̶A0]`  6~( @RAɶea˃H"?J4`a7ta_ Bp6 A*Di7T*> (l`&@`%x14P 2p@.pT F`t @>dAi3  @$n4"Kh<Cr0(l \@sJ PC3.Gry|(@)AЀ D 0ʁ.S 6S8ap-P5, "LA. 2eA8+p !3 tQIއ8;* Хk@J R6r@00ہ0*O'%`AqVr80J8YӃH>%% b߂*/ 8!;h.a%"!pYr3(+Rb‹&i1H1h%zDhy1ڶ;13<Zr#$PL $ށ#z.n@*P A< p$% 'x,`K j7 1 1;0,+`HށQw,:l(18*+)jSW&x h `9dJ#X#&B (H-/P*#]E`)>5 $!a{/r P6P%0l>" X ri%I- Jp qa뀹W 1& ׊q2> P(<8?- 7PJM+0 (,(zPRˁX ' $͐$k)e% % 2s@)p8"xxPT&hؖX*w@D: ȯ *KGZdkDy h08:%()hC%5ʑHB2Aq}͂mHp)|+2`[$P'h00##΂ :"%݉ X&)Hx!\ 7<Ѿ " FN\@.#`: 4IIg,Z; & )/W@x-a5_b$X ͷ  1/^*ṚzH0p # ȣ#jx&E)8( XHn(d@!! /`) X h8@='%M;QV BR ԰ ڋLFxYt|Z ` ֯ <XS˒0j 1H # ko>,5 u3|!B` txr $߁%F[H#X1Ңł#p j |5 KLq{^0> }3p0!Hl K-x+Y0q h*r#)5g< sFNP(A`*إ֗ 4-za@ A.tH`2xWS-203;MtPJI6p)iB݀@g't:!"  ` [/B`px[0Ձ8< 1s1Z$ b$߂R(v)6`i#  ;[38 b7 hȄ8ƃ+LDKŁ*"@4ȒR)m *0/b,)6`u ]x6  '` HJM3b0 !ujw11* 8 R"KE0)ݡ0-W3H %IΡ cI؈hVxTDZ0 {&"Iǁ ko\iJ0Bx(1h;z"h0N^F}H d ǦPO?* hݿDl8:9P 22({KWف%`xАjM'+߀! 8;<( `Ё |㐩X(6`!90Đ_"X"Xh4yr2h$p*(Wnp|8?x ⳰ܮ=T) D*p>Áh:( a`P;Q"r`2%)C"A`>`c@&Gѭ>.D"0J${?)#GAhFI,iR2 |.31؞vB8bnbAXt* ȥ2PbVChkE趰8 `8%C1pBL&E"^$dH( QML\' T2p(F f> J>@.0Pd@D0 RxM%!X>D2ಈEA`1cpK4aJ:Hਆ0˜L),σA8bp$X'~ +RJ/8A`fHL!(nAf1.P8rP|!8$X+pHa0Rلa`#U !%!IP*6! ah<t! ^,4%`L;0!xV2?P%A4!x!B @0H&!H p:Z ࡦaDbI=]N850WLn̗ (o+2卣(@zAh-ār8vΊaCSAAD!Xp+BNZblV$@H8QHH![8;dD dU@@\ lb\, h! "8dn A8`@>)`DRn 8ӈ:A 8ޔX0leHY-P@Md!\Ղ@6 BhEr~0 @pH@P&qA/j/J Q.r<ΪP-0DP#K@A0I@r¸/@ "p@\ A&V,r /h@ Mh@`@" `t ɰ` 8n.NH7q`x2o@x&$NQIdR5N$GYc*P\F N!Rh@*@~@8pJ &w b 4 P IG@1 ɚ`X H?p- # ALdV4')&p4pA 408 qθ:0x$8w328Ay1&_ N8l1X ψN@lfAml uL_AV. d'`,Y0IsA#H9L!c L!u' A0' - 03 hH tK`?ҰT 8'a0 Z1TP;v_<b+8(eYH Xh %`Q0b) z  Z$`@ `̵00`< > Bp%PR4RA8` @h$6Lpu Ds7ȹѿ P'@vH,aD@L8 (/S!D 0.Qo+ p 0' t s  1`Z dXs~`F)uAUP!;@9r) Y$ E)j-tjh 5Y4ARTbBO VX- <0< /Z"R@H'@-t!1Zf+ ѵ>@Op| .#)Oek B9 A XE1X"m`3x >bp (!PI;x'E0V ktoF_@LU1F@00a"5Cp+,:x;˃JE(cua7`'D XfΆ.G@p d (KMA;`XAN,OE@4&208A+Da`W8IkNMw~: aT&+ Dl`6r6`~#lmr BN`nn xc@f@n")%Ld$d$`H }@6>6F/TFJ@q>T`@ tc _Cn& `t ~ LΞ`XBZ V 23"&Ȣ@"pVBjY^ "(/j @b d;D]0 @ F,䖚2 jah|5 EVH#r$`^z ne?tO<4'$8YZ"f!zb] 4A XB B T}V f@eDB@lMH K  N x#XJ4`v#@D d i7n* L t1e d Ap byBYw! Jq :@Dഏ@` n/LhbvB>@R@l )]ڑ5(j L@`Ӡj$e^!@ , ƠXL$@<(`/CF4dB I@R |$4$;NE@ ]b DXv#PLdJK.(6nH@` \T@^9` @^*@ > AX9v\BJ,Vt@LW. 은8N&#@Ăi&Eު#WV flC C&@@ID>D v-n @R`ފ3wk@h!jȸT n,{2@@@BIo#FnL{ PG>0`.<`'` ,@0(T#`~8Dh@?O8^SRd>jl5^F+`n`F^@0(~#l?NrD ͂@8D q"@ &$/bpÈAvb@M J)"KH/P$jaI' v`r@#d `Z@/益Av  p@bHNb|G=k*Y FĖ?Ljc  @` `b L@ ^`ܧ!P ̀J0 gb``}~bS6@Yt\r(Lx`h ̀ȂhQ X `ǁaNI0Ơv RTp @ bD ӎ à~aE;@@.,baXl!l`R-@: P@6@`BK㦼D`v@`[c* TjH}T ǖ` B Tvtzz v]l" XFj'*3+DS\fN"Pk2B)܈:>  `Nl:F@\ (`Sj"HÏ^56 6/pi} dgxҌ9(" fp+)RЀ.b֪<g.JрtFEghe`P " z# 6`{/` n@Ҏ @@} ,(R)`> 2Kz Tnr*+syf}NDo0&yTV$.&݂>Wp)'7 |:vMj `8X] `T3V%VYϨB.*}dknЂ|qv0r`4#JvFB&B"֠2B~S*lN@gB<4< H @ [ -,4ͦ$4(ԝI*}`N@ "f rt4'@@@F*b03*pW IRNd :$Bp|z @@k[ ^Q(/eBVrhp$El8,?X'9ø$k܅O2o7:j/#Ȏ 2"MZG( d璓Gg"ڬC '$bzL!xdPRl hA {*TI"$ˆ#iJJ Rmi]d':z ~ ̻Tg^rJ P@5KvC-޲vn uŀPbJy@;@Dฎb]u rx2dT|?v " td `n@^b H]KDl,KތLTB(m0g&f2#LbxlWv{'MՇIE{`bC *j:rG>'`@X <* L 1=&D㠨X"(, EN1 bUǢpB@"4.?E!A`>"B(\ CBq)HcBbTt"Px8@уbF P "ɮ/dhDAВf$ "x5B`xd+ȰVD2 C ؠ<-Ș\CbqH-0Gp!HR+5i)0f4 ƄQyR<bH|hłD" xt=0H^\z>@0> `aJA:  b1q8Z#00Gٵ`JAOl +@\R 'b .O\PJ(fj ݸ+Ox P1`QFH5%7i<YSDD&$b+a  V4t ppƿ h0D8;@pVh4G}4b(*uͥD E UT5 4N,AtܘE"6!FR!z m; -E-Bh% nڜJQ02#a #p,  PF x.'P (M)T$ 89' #1p&xRT*e I\a@,M`&K,c ޒ(A  B*ܜ<(eDت4`@lx2@$Zk 4Ԧ PN3KQ ZadH9d bZcF  %h- 30 x,!P "P[UxG>KhL$&A^`6AL`q tX #%\* {]nAK A`X c`LDC0=.EL\_E-"4 @Bqz+cpqp02H8F(Ф`8# ":ĂP +ID QI;P6 @: B@Cb5xC? V\]]b\k0D ƀwDТd**K," (R lY @!dBH%*z A(1`|Azn X$10iK`0 LFI/| `_XSDgPbp(i6 @(kp L ADl@CKW6T3@t= @ɦ Ulzǁ(g(@/gT@S%y]SrBh`P #ȐPFdiCk@, 6H;In 3L+X[)$ .YFAJdk&@>fIfY)#M-PCS) b( d)h) q)'0%H"c!&RA%гH @X`(F AԗP(iH Y:S``x6`&`2 ((CKp@@P1j]4 /Կ!Y hyFY HэI⅞ @ 20ªx(# U8,雷v= ͓&(# P{ Dځ .(A:(qFAb#B~́BH .x 1P͍hp%`/X 9R7nذXP H:khX:v1X #(Z hj}E2׶(Y'07) ! $+!rm"3$!. 7 PցO#$# .B8C]e H y' P (p$삫We`Ȑ0 0-H["TUa%ځ`0ASqG%6x hב!UdCa4K_X!8 :Q& D1*ʹ ˆ$)9 "*UeH5y ژ0-02]aw)U!B:f H iD =8`q D΃9 L"h$Jq:{)z'0(-`wW -p 0" [!)X4B) EVAdq$f8%!/$~5P ZAы 谌@2z&#Hy:8#p .F1Y1 ="8m\2R p P@(.bS!Ä$XJY] zX$Mř#P; "$8G4!,4 ȥp@... Y H!L0X3M`?AqXȝ銘&PJ$ 2 -3!<;P!Ӿ 0&(  X4OJ2401(qcTZ2X@% /!(Q;@ >b):[ROn x/#&j(M&@lI ,T b|E@Iy;I+ XR8jYH (#pڗF6pSZoP8)#tN`,kM|$`AJLz<ϒ(#(X @!\'i8A;4q:5 X!U%arx5;@ݨ0H(!"IG Q@q`0_hC $2 ! gp90'PZY0B m@3reaࠐ:DCX4 OrpV" ¸t:$#0`6! OU F5>"t7k |Z]1WHf,$!ML> b0y[¡0>IVp Ef" bPɄgh!frT*Dbо2: Dr`ECA9`D'vC@!5xDNGЄ,%5 0,`88 !Vt B`>aX߆8  b(D9l@BN!|* 2pP'pd`1-ay tD@B!*@>@ `5>hPAJ& +\ M T6^BC32P)΁RH ^`Rۄb0a0:&UZ 02 +8kT@RXT10%> !tad |1D+!ĵTHFaԸA8X`P b 62 TPKKs x XRJ8B+(PHBI:%5@H6 FF`T  |@j%* * * p4(6X!Na΅b 4x` XLl@Z2/H" O.D3BP\:(fuE$ 0D Fb!b*h:!f!@hˆnt/Lxd "2 ]mC-PXMZ#("& h8y:(O ĚAp'\z k,xu  F`:f )c!CJ .&d*fX(36 F hP5h :'@H8СxA`@!B BP- +FC1 Ȅt Ĉ)u l ֥6cxpDˮa !p` aؼԴK"@~% x#LPk@h 2Ԃ CP.VI9~CRp B^A FШ%B@CȂh SSAP 4 8^ V@V hU&!%`!ܟ 6P\cP.E PXA&8ihlzbN8!@lІ dg`h"@z QR_%`0@g\0. BP LAA: ` 4j  Oe9XVP% H < =h@a,% PR0VHE A*p  a T2OJ0V H|08!NY& |!LbB[ERB!`DxFo A8MA p@255@pX q-4u A@vȋWa5$ p ?0:VT@&I0w0[0 E `2ʰ_8"A4pd  nAHM !(й\ ­KQ pP @ G:} ,`$xZGER Jܖh1j6DP@ XQa%:87 |4ߟ`  Vݥ#3 88@!@ \]G(2hyHge~P+@@$DAYLeU3TR- A$- A6QL D& ҂0H%Pdu-qb( 7`8"-СIB]?XPlF'?ACLگ@xKUav5p $:(P@Mg(O| Ql3sR6~<'2XtĜig}#ݸ ?qrCf5^U8%F <PjHAN ; x1+\"Pn p/ wr{ Z89, MbGL Rz(o NbÀL k:~ep4@n3c`Nq Tg| `D@V$C6@Rk@R`: `?*w"ns@B`Z6E`p J f^PVg`B@Z0LA¶ l*$F 0: *< M>dLN\` @\ jeF1 F༟)Jv| t"HT6&H @B `C@PO`z&c A$ D6x`V >vfT`byVb-Ҿ `,E5Gi \ N" x@tL'<*Qʓ.!6R@ !ɢ+@`g`|I,F#:ob`l^Ҭx~T$ c=#n RO>`P{0>EPdl^$j ``^u ``\ =xfr DҀ~VlPV WBB@࢓ ðТw Nw< Z@~,-. J ~ FPF$R"L $ P<ˬ 8Pw>%,`_Z#CZ2L´ P3.!#3'^ f J |u `j@'R`i(c0` Ts < HeG*BjD-H탌mnr 2@ `3@ O6͜|&&H&"֡L ˞ctn R '* qF@T: b4 ,c"C$8~0p̩CD PdGSj jd}o@| Z@I$j` `( ^f^ RJ^K@u%t,. 6MD@18|Il3zrT( @[$^\ ^'jXHX i@ @?QgZ B`N$+O@\g  z f]`4^dB-.<p`%FȠ<>lĸA (N+#B2EH *8V@))?0ErXt@ `X ҰF e$Eb+Rf@ n\ RF;8Ƽ> : `fO6 V( =mL@N փ $B t Wk,Z5h)>u*D ,E 03@F @@ED@SpØ^ `l,'Bh,KS$TӸya> ʖcE$,6b `[wo`D &d&4JC&hCJ->h(/VH JxlN D TbMD&! ,8R@<9 dcG!#~ h.BXh-j# L; $$;"h L v!['Fx`cna nF=@6 s.,@r!Գ0@XIIW . *F5@.p%j8@nҠrZ% bc@.Ҭ `N0r4nB `r `28H0ȕ-g`VD c`C`3+(b| *U R!lN u > `L hxg n `.P X ` *}@`@> `FJ ĒKEb6*,a :|1 4`X,6f9~Ddcđ`X B"غF׊ 3:JꚠJʧ4$B9Jb .+ "H-bIr5<* @wqy0) !"j`~:g ' 66 #Vhny`@cu3Ů%`k[ch$1 O``B 2#PK7X4.6ZdM@[. |)~F dB @H0hc@@c`r 5b$6P턟Z`B'K:~F hNK `tV 8 Lz6~&OL7; kFl& =`> |K;%⊳A ״2̅mTB^lC\:@p&_< flFoP#.2A;$ @Bd 掻@j)=O@4  ca'U6  (҄c `?mgJ \ `^{Qn@-B$`p@YH`|‰*P\vl K^*O@ g6Šx$:0NCp6.єK z ('F~~0ERt jsTu4B s,FMc5|5KP,P0\ @jT~ @`T . HH $6XrPNz;$)2dT!vYM@? >`A l2;sCo'7b2es 0E O lAe`@ NPH z: < `@GcQ"?PMf2FN ecQB dkdDvg>6&`V<4-@q6-(f zFڇ`f`n瑉vP Ӑc ~RY@MVj \U:[q"@Jn4`#/,8ϸE*~ DK" `O@ *"@HB7x(cl&."c NL}$1|kLkx @: Zf膢 gZ 8Є`HPʨab؃Hl 4 (V! D-0nF@@փ2aPL $ @L$6A:at@ @xhN`b# "DAF$Y;aM8R:.a|H`Ў2al,h4X\:".< D81+Ԣ WJ56eg~T:&xa<a@H@fHXπd-P *rVx<j, (VJ- FHD h(ďpNY@N`J*@@$KOkP V `B`*CAB aFΛgH H d@` @@h!o`/B Na kH(;lXiʘ>``T[/D@*$zzVJj@J;OD=}P> @P4I! \$$6JPp Oj(pw 0%@;!t`c8p^03b!H Q | pb 8"^*agT @L|+RB!kd"8t@^B8; 4X %`3-aʰ9((- ahh .j.M-. bjr @],KqL 0pD@l,xC9]\P Z A%x38)7r,A38GY%dY`Ta3P8 P h'h/P>Qf| =b."D 8y7 KX!6ThM) ~xC%PS%Sð "f UF 0;`Ck6aPcG=(=x+ ڮQ@Y(X-E-/v;A6f D`;8 F}ah\ځ a@T`0.N00@`Xp7 i/wI9ZyF Aa`r)T/  b@ xMxA)r<j;}Zy{P0@A/4!bj29G {ZX< @DCWps> @"C\ޱ PJJT 0 BAG tO, 2N) 2Em 7e]L4- Ҫ`P ,, ;rܮ<Q E"f bPA蘄vj0>S`)N%*tq)wP]f3S/p#'*pMׂ Z' J 'Pyh>5I`4%A?8ƍ`iXM Ts hK`; `2 A= ~R!͠TUz|A"d4 A`7{`t.72"48p @$ pDYrr09CM4dJk5p( CNa |& ˻r VA@d Bk@0l A|;(p #?CESKܚ*J~?>69JOrWz#<JT X)r1/ rځ BSz k!i !hӢ"G,ؖi;` Ip>(3x/YU` :X#T*3yc XH#1Ё#p 70 0 @x x 0Up9 9B j b XkHA݁ *r`(hk p@089U ?]h% hρ! 6_9x:-(@1-(q;5ȣ" h$jKB8?8*%@p-i30B0' 9$z/&d ! 0H Y`7  k$:) hX$!xbd|/jd8:Ղʳ.P9,"1U *R /2f\ (*~488@6ˡ1n1 0 p'YX!-5K8"u&"" 1p`@p(` p H p=M(6 h*KX"9` ]:!3hPR-)vȜK 0@px2(6$P(7 y9lh9(3( Ma?h8|M3Ҁ'2G<9əCc5 # &L7x(e8h EX! |- ( $ 1 M$K0 *㨁$9K*`*g!!$|:0P"Cл084ihI q'-$S0 E2Ak;P, !  y82=Ig)I$ \o =RJ /XGwHpMՐ8Vx" 3H7CM,@#q:%RKh>+' Ch"x@9dj&ģh Z$ 0RE:K*+1P1ÙZ.:(C ҆`  hJ(: ȑZn9h x i $? NXY h/'/*X$ Jq1kґfĈI=E#8} u; X!B,~1%&A k`P + F@p0:X 0 K02D  %x P|88Քx- x-x X-\  (Q1(#5"h*C.:ގP JR!ox!rHK('_ .z|r @ YxB:0 i!{&Fp$ My@("ùM p'X% E8 ? i,#h'N*i$P*cɁ G . x؁eeqF2ӈ;ׁ 60"&@,~&HTC&"Hc`aM=PHH41'^&- ݦie0a8R D4Ӷ)7)&]G2|+h.`$0*'X40$ϥIB7ϣml { J۷Ǫ,+x!BXƂR !1p/Xً#xs+.(.HP/3^8I *4 oч6%Ib890"KC'#W-h%Li(&/'~asb{,}Ի8HHI)( 5ŵP@0p ߙP%GF/XqĊpXSVU8ˆ{ԍ6ł$6*(]P0# h4SXh #i( =0qH@2h @ h_XGiPA"0AJ6 5 X #މɟ#`$p%<;f|XH5 i2; `w(.I ok<$>f b5#ȭ9qUx$ @('DL &d7ؾ7 XIr  8`4Bh#OoFӛ:@%ԩ2_x!(8)n= J4sT3h#r,HYU\<$vRe"R# ʂqC:0 @lRWyh1-ӈQ\N91p)X;’:QMD!@T!"Qn,`-#A(J!V: cl<*FpH+B84)"zG آ+C@#$t`!1r*i7DA :  G@Ь pT61d`F9"61H,D . ְ* ȗh' E( \6;+@Ap@:YX.d,Oa$#L0<A^a@N(RP(L!@BknXA \ Pb(D(( Z$xhX$BPF(hp4,- 9 F6o5X) d$DN 2_"3 h!J j6l0*21\ 2LLhD#8R BP=Z: t62@@0.8b +Q%kcx@(%j#AD3`h.2l8aÌ $gJ,dk`8@.JPMŊܠHD1Uh _.감 6u A!&PP €9a$ZEpPJ|DOi\a5: h΀9}&,XP< ,3 t@-!GSE % 24"]#C $P,0>@d.dj10VVZ0x;.~N#S1Bbq5 p!R0p0APî0‸+۾## ,D (Y\ Cdgfx x)ȡ00_A$2!A8qdPtbdx=<Mnt4ir C1M«l:El]n* 3Kk`^s 3{: :*7&cHR8/ Ví]pBNjYi5?0fBX/1@PJJ` #`~n n7 -PfF5ԫo,b$.} :dn ֊~f$`jl'L> jթ@X h\aZ-~bzs”(R 6`|^)Lg4R@^@:`8,B'B g0JB0P PO26%:P'PoNn,0ƪ7@CD xG` gV N`z3#2}@r`R>Xf 0@V[ ,@P /KX tJ@ @RDc4pP^ %b I>yL^ 8  x%3bTK \1@ F @r3 Inh %"nObDt`8 fd1bT`JCf6!.<x`` Dګ `hhgR`ndоfd+7@fzx `x)vA : V`Nm4# E% TX'N @OCN`/"@:JνpX^| R;`#D ~ *xz&cx j8Ldr`6 `6f0@: l$b&Śv ^ IJ`lfƻ b0`fr 2X f,l'n+n>F$\@`z jQT$: 1 Lt *Z'Rٮ&'HmnoH@`<q<H`J ` 5#X#>\J ~s͌Lt mzdf. T@>Ly/IFT3(jFaJG7  \:  к8=NJ$:X@YxoB6@hT b`J8Y#;/:udp B / nE&b5>:l06\_n %e4 L Xl'df@~B(,BH&`tRd7: "V_Z %c*z () f"fL6G h6  DJ1q`0PCA@|LT F e`#* X`(`o60 @z#\Kp`q6,; *9n R3 $W.Z:\& tGnL#XfB r yDf5`HBbNM~Q9=MDPNDLJ'\ `` P TJ TZF X l@|& .r*8I*%YevO X&)ƋfpR>d2H (Bv1 Pnz@bMD1vL@@`7X 7Ҧ +4@b&4@`z @>L `b>bKv3@>rN `6`ZLd@FflK=@`HS7ը @\`!0P`> :``NN&.d&.)^ | Y J4 `Tr@{ʊ*\3jgR fBuF8`@i Ԑ %+`8i X&E7^ B "lZ bjv'jd#x0f`vcwRPZ {+4 语>uRf`i) RO|`>dnjT4T^ @2"``b@<BI`p - `zW@H`|7q'6] {i@_C@P `rLI'J`aR'`$ g^'C: bl@x`ZIŽ@vկ,D)ľ٨X  `^@FT]`8(PJ5' PeG('FLԢj8J%f0@О圝N:`>Լ?HRà2 D FH 55ã\jW5 `B l`r0~W\ӑ:N h0Y'$VhfJ VG4 &w$$| HYeo] -e @@/e E#n%g%`p` : 2hU x0h@))YhBxK(2"X_4c 1 WT:L4Jxm=H.@Mğ#4*jC4PqR y`VXѢhbL "lA 6 R\# $Z* J`2 `V`~2aHX,l(,:&Q$@઒}q@QRR'`e |`@T@< <Vgʤ٠}T@p „``m2lf`hBLC8 d!(K<@P PsV$@A8 o( XDψoXzX cn5l`y4Bd,4.x*'z' @ > H @h Sh=`Y2 Y &Yhۇj@Fge`v* f (0eXY6剫箌8XԸXДoeνPF &`]`&ՒN`x@En䠏F HCDí~DHJE ٣lDrC\TNa6 n J60H`s+I£rimr87'Rl$ `:8d0+#@`xVa* Ch2#1R-q`+HQ 8 KAH] р|DbQT $z%qb5aB!qn#D @߄B`R.!T!偰8l" DH1_WF;D!{@R`CX 2 2)PݥlNatF=B>' h?g ;EQ\L4. T@2!y$ Oh0 a%a !Xr,j#T!0@@ LJqP`a8*rF&+B$J8PRX>4j'z* .`L3hX( ~19ǁ\6%0x^V,H>(  I% 6Ã`3 2 !+tP a67`P'<8G!(O&ȃ!P<ᐺT੅: C*FѠ*B֎ԇ! C_СJ@<a_˕X/A( "4%<Ё*!8Ht%S 3=tp>a$) @A0<` d-0, 8`4S_7`@@ f L@ҁ`2[Փ)2nFXJVX?`|R207@uVMcT~ #ʆPq+ATU x2rČB $*`8%)l5p 0)t Q)*$`AI0&*PCfX1 *(5fsʰfو% `R)1 @. E*\MH+͐ J;G6Bx$vA86B(@RP<0ROf ` Td$BvpG @mg YI /-x/$#<B2Ly05Z@>j cA-@LQ8-꙰]V#yˠaɓ  @ }6O|Y4^̕ J k.`> :1 a$N 7s` j 8._02 6I)`|xೕUhJFo0 81@,O"& ]|0ɐ< ( ,S?{dQ2 AT FAHq' 8! E *B!@Ԁ<Wf/%\-#z8' @$PE7Z 1RUX (6!lUdɈ.E7*t B5v  h`.8%bpV>5 'ߤZ.D" GMIA8/yoTAh(I ^`8Z!Cufh]vB,S TJ}cMhh1)P4 Y!( @AP#h4@Q{ #dR?As x QjFw5@\ qDRA(ȃ~C`rpp&X Zp0h :h:;`y9=:xe Ѡ؁ X%K 0/>08hh5 ) ) yBJGP2MQčඓjp ! amk;ˎ[B1:( x +84h4(iI )P&XЯ5!p18!0+p"9=p q@+"A>0- yF&Bh3`+&1˷4@96ȝ#!o014-! 0/@+"p Wع?h$X Ä: ɜX ߁p+0SbDZ"c/ I#![q0AEU@ ,e3/h9c 5C"$}`ɷWz] 邠Q1aMP XJP3ȨA,6 ?I]9 >aKÀ%ySN 3s Mb8 's8xD1 'X.D,G(` (II@ 0H ? h>#IX)Sc0|6 p!'C(& A@Ш+ 0 274Qb:xk #@ 'JP y""y^Ȅ 8( M"0h  %K [.:E[iX2 %h!J :*Ȃ"X X&'p7X I6'h#b 4"hTÏl& I<0hM0F\&+ H e,-z4i  Hp /"IL8 ҁx` p  7khI0#yLM}<0 pڍ`F(1&")(*5~IܥН1gOHBI ( dr)  C(,h- T @ @3OH[X P*/3P,P =(35 x >k˒@&.I )迂P&8Xڱx%M(@ 傘@8{7IAW 8&h#0țH&"m'QL+8#&p)3kA(X $ LzUr.|P (߻)9@9$*;,8ؐ P!H@1;6C<7: _I0P$ٷ@ZH48@#Hx܀[Acry%h $P")b0)=&R x('S(0;̦ Z@WX30DEQ 뱁"`|./ߦa0e[ @<-Anb!5ƁkDr($@(Hb0x h5826"q# @٠(2/ )ЉУ@/`,"/`A%M1 *A Y &'!!#0=4V$ =ڎ؏ (V9>Qe C*H-h通);&<^kG7'`Q9F$e#0+6eҿ;@*p*a|01!L@(Z dBH% e#[۵=ʼnDǨ  8qڒ`5ݫ$Xv&铡 b(ځ>Bm И( HRn`a&+%4Ao>ʂ3x01T}@4a 0")M J&X Hbk #`x5 02X9t #BF 8 0X U[PJ۷>cM V!%(/D( xBЀXiqPu5('ΦH 'Xx0 *p  > Ѱ*h$WQ^qX +aơ'aֱ";Q @(0jQ! B_,PH'੥8'YԸ+ox㧵r88XP9ʦ顙ԽA}v ` *r`ߠ)(tP , P"@:B7Ө @( :@> '2྘ p:'Rq|_}8-R:%))X5FkM 8`#aL53Ui!h$$З-`!*m6&`!tH'h-Z4Xe!)*^ђ4Z4p-; ^Oa\x dr``\: ̃8P0#N4)@E!Qhp0' Ș@c`?!IB. 4:YhHH @|hCH2@|>$NB@B3DCh:ѸL:t 8օ"HA@:@`f.H6=K 0J(K:008< !(HAH2 ?6! !+(4PXH *  @0 @ h*xxZPUX( c`J(D]:,H@ZЁ< j@0^4p(VHTEPAb3H0ldR  @ @ZGohSWjLX^:6rUYި \%@;PZA0H% 6 6 `Nz  (Zp0 ET T ,  b40R @>&08 hgzu@RD`@ 8P*5^u-<ULA(F"aX{f("!Xn!0}0VʅbpP&:pF80> Aa{N"qÓ !@ #Z8$h$p*t lM p2(`К gUک4Uh[GX UЁ Ada<| C=i C^ Ts l:oA$ d8# @`B0!6\>, + 1ft< 谣y GA\2- $@M>X4fK$ B@6!hH+ q6 `evfY`+P: H! apTn6ul A, '.5kA?` px$.`& -A`P 9f \1 pjh,+1>r40?< -D8K(҂"mx hH%`!Aҩ3lI."~X0tjN|Ҭ8]ZAu a `RR APj@!+4 00\ŎIafzz".@E l3]*DrA`pDpl t x-j8X؂ 4A/p#PB W\ce~\j(1RO@gH 81…@n -gKTDA8.`020#pL 4)=9@0h ZaYP2 !/Z0!02N/ F g8SA.5(K\0Cġp&/t@t]ָBxp7tL)'/V Sr Q.f!gP}lωu,A*p 0&O)7% w i!q+5!ca0$9025-cdB%(_jTp6\۶R`X Z Vh D VX4 `J A~wUl `ڪځB؝q~uuh(K@ A4742_(ϷU^P 8> AqmD,:u ֮ǐI26Dm$1K+)èx z*: 2[@N\UGf` `$dJbD&`V8Ie%rq <ik-~Q 1N ɰ0  B {5 oAB`@J lFr@j@l #|@v@D"L ~6g@>B IS;@ft#4h>!ʁ)@P%^Dyp`xyF[j `TDMঋF@Xm,<N @= `L/`vc4hRx !^瀠! X bƀ &JdVJ f^.HeT!^: pq& G*d 045a XW`l@L J2ޡ@> *Ld,@7tk47tP81p^\>f8Rn<8!)<#9N6R@ HOZ6bn%$w # ̄0PBKjvɛ` b @B`:. @*6 `j5& R@\Ej bV fL}}V@@N7Nlq Hp,VH\KH6,j @Te5>g^8lE VBnP J mPK L,<Q-rRRN\C^bp4l`. C*  bFX / D D'N=?my@~D)B@*jjJ(1t6g`Js:2B-R* arh@D kƒ *,gF ނc.T_Xv bv ``f(`< @DY<@¦4ZUS(v@J,,yH4,'zP%Nw6eI`@8XQ|@?.<`0W*XpCPa,O@bV(\<O6@{8R䠢W* Z <2IL~@X`|t 쳠T >,@qlbC2A@< !<j f`` T:`p-,@#`O&F @FF =: : tp @3CJ2h B"@vsk z B| m=b #,>@]%r`(s@*P8XC 3hJ4}ܴ%PE,lR`5``l b .Nh@@x`fE V @>d F B:H 7@Oxn6T`b```X jt'N t*:^5E8d,v|gl5H Nh&N vY28 X%,Fk8Bp\&D| nJd#ЅxDI.,P$ `X(n#{J `d[j 6N6 ʃ h V爐|`H^ M:><04u^ RB/@C`/l`\ 4` }4Z&(q\bRW =`@WGJ!4OF`^4t`6l d(`, p6ՠ:?4@&s2, b@t  ^r .X:A`8g'm?@ @nY8d5 EcJ` " 9GN|aɚ;fb KH`L "v |S:N@Ï2?% :2l<3GJZ%4CP$)J34 +Zmh2Od{4ndhl`̡4HfnWEAt0!%g4ش`C?BxT`$0SD@>%Gƪ`efP+ o 8@\>&pJ d|:0Ll K8 Wx @4` Q&:Q| N`L'j: P+ Pp:䬆:  >f c(bZ46[e !@28ڊ Q/+vю V"&[B^.U,O '|%Bi*L&* ¨f @b TTaL&zv@ ~}xO8cf&TW D _OC/e|@\P P Ąq.<2*¶2`,Sl2; ǿZv`VaKMF:рf@/n\$i 1Ea:Xv9wxLgxj -+ @PIJu@l@'aZ" D`v / 3jc4 f9Pf>@F^ปn e ʂh )Hf92|D>n貤qN|`|J<ࠑ Ps@c#< |AAqDF Ed @Nżt*/2`2'/29SQR@*l8.Bjb/2: F pଢ T2D[_-Υu|vg.v re HfS @X!`t`x 8 cb R@,>" #O" !2gC&5fY`6S`> & B "$~Tt̥*sxcƢ)=`@VRb&( ZDQRei /IE&7 [ۙU$vE ` k$T[ܝb?3lL@ l `0 TFPMaS/>z.i$Z].}`h ¸+` gjA0*7R `r.lyV4k:+g`a-jtz`TL)?`& hPhD$dh*D`8 tZ1EQ 0 @؜<1cQITdJ6AjJR* SA\J D¡`X8- B04. j"GB|H0@{Cua|~=2F?xJS,ÁB&c!A@tA?AD P#.0bDcBp<$DX tjŀfTAxJh 4 i|z sX`P< (|:"c08  (8( X 6x,nN L 2 $HTIa_X6%9vXP PΠD0 `hNB(dn V!HXAX>f c~f"!`\b&!(@L> 2@4!H k!^((N2JZaBr Y"`x bnnHD ex'v&25a8؆&Aj a1~, : 8&

0% 2By`\f B0( jx;qk'0 %!UR1 c00B@F8, VAPQi`LAD"N`& %>:'Z-l 9[)`R Z#jn<8M!);{@U*lgSz$ d = ~Xq >>dA;t @P'u#}_H& FO5밡@x)$~ (#8 ҁ` jtB^$A bS& N3A&8X'seXB6Sm;j;#PWB> - v c !ph BBSd'(Dn+l@h[$%"V\p $p3)99PI{!y@&3FL TaCP* * +/x*W08e0#Y4H@;@5z&A!^ /_|^bv $>P ntb&@h"`L| N@{=&R[( |¡W a3gL› R&A]Ѝh @1~IP Ʌ"( HJ0 $?X`jnx)Q̲01 PP-Y yZ ;Hqh0h H!"thx* h !x!TF%yrL" 0/#,8`~(()suAlI !P>aRFP3X(1GX5))qK̾!ˁ)}z&0906 t#xꙀ!1I@1*28΂$ 07:"`aiЃ9@ h3y0̛)(# q*A+(4 Ģ# ]R€YH91((3j2 (23 "23!G+ 4` H"3(/y1 $KV$#qV<""  ȁP"Cʸ` 6I#؈ ,  h2;HA2ce:S8 +܁hO#x S9rH 3yБ2~KDށl #HX(!:>$Qm!9 Rg!1/(8m{!*x(,Q\{2)fh$V3i(J)0ܤp י=5DWQ7c٭N$p i΀vBAya & (*AQF̅Rax&żP (ԂB0 :"#:.ᏁY? (h (@X$  PxP聃 (@/퓬P) Db81GH9 i $8#8.(eO  kL"墝; 8 x q1tiff package, R 4.2.0Luminescence/inst/extdata/Daybreak_TestFile.txt0000644000176200001440000003151313231137116021356 0ustar liggesusersScriptFile=C:\Program Files\Daybreak\FL Console\Scripts\active\QSAR23c.TXT ScriptName=Single aliquot regenerative dose method Sample= ReaderID=TLINFO.DTA ReaderType=2200 SingleAliquot=true [NewRecord] Operation=1 SampleType=Disk Disk=0 DataType=PREHEAT TL MaxTemp=200 HoldTime=10 RampRate=3 Started=6/4/2015 3:58:46 PM Stopped=6/4/2015 4:00:52 PM Points=40 0; 5.000; 30.000; valid 1; 10.000; 32.000; valid 2; 15.000; 30.000; valid 3; 20.000; 28.000; valid 4; 25.000; 32.000; valid 5; 30.000; 33.000; valid 6; 35.000; 37.000; valid 7; 40.000; 31.000; valid 8; 45.000; 29.000; valid 9; 50.000; 25.000; valid 10; 55.000; 37.000; valid 11; 60.000; 37.000; valid 12; 65.000; 36.000; valid 13; 70.000; 34.000; valid 14; 75.000; 28.000; valid 15; 80.000; 36.000; valid 16; 85.000; 35.000; valid 17; 90.000; 32.000; valid 18; 95.000; 31.000; valid 19; 100.000; 31.000; valid 20; 105.000; 31.000; valid 21; 110.000; 41.000; valid 22; 115.000; 28.000; valid 23; 120.000; 39.000; valid 24; 125.000; 28.000; valid 25; 130.000; 39.000; valid 26; 135.000; 28.000; valid 27; 140.000; 31.000; valid 28; 145.000; 36.000; valid 29; 150.000; 43.000; valid 30; 155.000; 34.000; valid 31; 160.000; 39.000; valid 32; 165.000; 31.000; valid 33; 170.000; 35.000; valid 34; 175.000; 41.000; valid 35; 180.000; 45.000; valid 36; 185.000; 46.000; valid 37; 190.000; 50.000; valid 38; 195.000; 70.000; valid 39; 200.000; 82.000; valid [NewRecord] Operation=2 SampleType=Disk Disk=0 DataType=OSL SampleTemp=125 Power=95.00 Started=6/4/2015 4:01:23 PM Stopped=6/4/2015 4:03:03 PM Points=325 0; 0.100; 303.000; valid 1; 0.200; 335.000; valid 2; 0.300; 344.000; valid 3; 0.400; 332.000; valid 4; 0.500; 304.000; valid 5; 0.600; 325.000; valid 6; 0.700; 324.000; valid 7; 0.800; 292.000; valid 8; 0.900; 290.000; valid 9; 1.000; 301.000; valid 10; 1.100; 310.000; valid 11; 1.200; 279.000; valid 12; 1.300; 288.000; valid 13; 1.400; 298.000; valid 14; 1.500; 272.000; valid 15; 1.600; 292.000; valid 16; 1.700; 323.000; valid 17; 1.800; 273.000; valid 18; 1.900; 290.000; valid 19; 2.000; 315.000; valid 20; 2.100; 296.000; valid 21; 2.200; 286.000; valid 22; 2.300; 280.000; valid 23; 2.400; 293.000; valid 24; 2.500; 299.000; valid 25; 2.600; 260.000; valid 26; 2.700; 275.000; valid 27; 2.800; 267.000; valid 28; 2.900; 252.000; valid 29; 3.000; 278.000; valid 30; 3.100; 276.000; valid 31; 3.200; 268.000; valid 32; 3.300; 278.000; valid 33; 3.400; 293.000; valid 34; 3.500; 266.000; valid 35; 3.600; 279.000; valid 36; 3.700; 257.000; valid 37; 3.800; 272.000; valid 38; 3.900; 264.000; valid 39; 4.000; 279.000; valid 40; 4.100; 263.000; valid 41; 4.200; 264.000; valid 42; 4.300; 246.000; valid 43; 4.400; 285.000; valid 44; 4.500; 252.000; valid 45; 4.600; 238.000; valid 46; 4.700; 257.000; valid 47; 4.800; 280.000; valid 48; 4.900; 264.000; valid 49; 5.000; 242.000; valid 50; 5.100; 256.000; valid 51; 5.200; 257.000; valid 52; 5.300; 238.000; valid 53; 5.400; 234.000; valid 54; 5.500; 239.000; valid 55; 5.600; 252.000; valid 56; 5.700; 220.000; valid 57; 5.800; 239.000; valid 58; 5.900; 234.000; valid 59; 6.000; 233.000; valid 60; 6.100; 227.000; valid 61; 6.200; 215.000; valid 62; 6.300; 252.000; valid 63; 6.400; 214.000; valid 64; 6.500; 231.000; valid 65; 6.600; 232.000; valid 66; 6.700; 214.000; valid 67; 6.800; 242.000; valid 68; 6.900; 216.000; valid 69; 7.000; 216.000; valid 70; 7.100; 225.000; valid 71; 7.200; 204.000; valid 72; 7.300; 224.000; valid 73; 7.400; 235.000; valid 74; 7.500; 227.000; valid 75; 7.600; 207.000; valid 76; 7.700; 209.000; valid 77; 7.800; 221.000; valid 78; 7.900; 207.000; valid 79; 8.000; 221.000; valid 80; 8.100; 200.000; valid 81; 8.200; 214.000; valid 82; 8.300; 228.000; valid 83; 8.400; 207.000; valid 84; 8.500; 200.000; valid 85; 8.600; 195.000; valid 86; 8.700; 193.000; valid 87; 8.800; 187.000; valid 88; 8.900; 212.000; valid 89; 9.000; 197.000; valid 90; 9.100; 206.000; valid 91; 9.200; 227.000; valid 92; 9.300; 202.000; valid 93; 9.400; 180.000; valid 94; 9.500; 186.000; valid 95; 9.600; 193.000; valid 96; 9.700; 203.000; valid 97; 9.800; 204.000; valid 98; 9.900; 189.000; valid 99; 10.000; 202.000; valid 100; 10.100; 186.000; valid 101; 10.200; 174.000; valid 102; 10.300; 180.000; valid 103; 10.400; 208.000; valid 104; 10.500; 194.000; valid 105; 10.600; 185.000; valid 106; 10.700; 194.000; valid 107; 10.800; 184.000; valid 108; 10.900; 178.000; valid 109; 11.000; 170.000; valid 110; 11.100; 181.000; valid 111; 11.200; 181.000; valid 112; 11.300; 183.000; valid 113; 11.400; 192.000; valid 114; 11.500; 167.000; valid 115; 11.600; 179.000; valid 116; 11.700; 175.000; valid 117; 11.800; 161.000; valid 118; 11.900; 145.000; valid 119; 12.000; 175.000; valid 120; 12.100; 192.000; valid 121; 12.200; 178.000; valid 122; 12.300; 164.000; valid 123; 12.400; 162.000; valid 124; 12.500; 188.000; valid 125; 12.600; 173.000; valid 126; 12.700; 170.000; valid 127; 12.800; 191.000; valid 128; 12.900; 156.000; valid 129; 13.000; 159.000; valid 130; 13.100; 173.000; valid 131; 13.200; 154.000; valid 132; 13.300; 155.000; valid 133; 13.400; 151.000; valid 134; 13.500; 165.000; valid 135; 13.600; 161.000; valid 136; 13.700; 150.000; valid 137; 13.800; 161.000; valid 138; 13.900; 161.000; valid 139; 14.000; 168.000; valid 140; 14.100; 152.000; valid 141; 14.200; 157.000; valid 142; 14.300; 171.000; valid 143; 14.400; 154.000; valid 144; 14.500; 162.000; valid 145; 14.600; 162.000; valid 146; 14.700; 173.000; valid 147; 14.800; 162.000; valid 148; 14.900; 154.000; valid 149; 15.000; 155.000; valid 150; 15.100; 139.000; valid 151; 15.200; 134.000; valid 152; 15.300; 148.000; valid 153; 15.400; 148.000; valid 154; 15.500; 155.000; valid 155; 15.600; 140.000; valid 156; 15.700; 151.000; valid 157; 15.800; 140.000; valid 158; 15.900; 147.000; valid 159; 16.000; 146.000; valid 160; 16.100; 146.000; valid 161; 16.200; 145.000; valid 162; 16.300; 127.000; valid 163; 16.400; 156.000; valid 164; 16.500; 144.000; valid 165; 16.600; 145.000; valid 166; 16.700; 137.000; valid 167; 16.800; 131.000; valid 168; 16.900; 130.000; valid 169; 17.000; 146.000; valid 170; 17.100; 133.000; valid 171; 17.200; 121.000; valid 172; 17.300; 114.000; valid 173; 17.400; 146.000; valid 174; 17.500; 122.000; valid 175; 17.600; 131.000; valid 176; 17.700; 144.000; valid 177; 17.800; 134.000; valid 178; 17.900; 147.000; valid 179; 18.000; 126.000; valid 180; 18.100; 131.000; valid 181; 18.200; 120.000; valid 182; 18.300; 133.000; valid 183; 18.400; 138.000; valid 184; 18.500; 127.000; valid 185; 18.600; 140.000; valid 186; 18.700; 125.000; valid 187; 18.800; 133.000; valid 188; 18.900; 128.000; valid 189; 19.000; 114.000; valid 190; 19.100; 131.000; valid 191; 19.200; 113.000; valid 192; 19.300; 123.000; valid 193; 19.400; 128.000; valid 194; 19.500; 127.000; valid 195; 19.600; 132.000; valid 196; 19.700; 120.000; valid 197; 19.800; 109.000; valid 198; 19.900; 107.000; valid 199; 20.000; 112.000; valid 200; 20.100; 108.000; valid 201; 20.200; 124.000; valid 202; 20.300; 110.000; valid 203; 20.400; 120.000; valid 204; 20.500; 117.000; valid 205; 20.600; 115.000; valid 206; 20.700; 122.000; valid 207; 20.800; 102.000; valid 208; 20.900; 128.000; valid 209; 21.000; 105.000; valid 210; 21.100; 114.000; valid 211; 21.200; 114.000; valid 212; 21.300; 107.000; valid 213; 21.400; 118.000; valid 214; 21.500; 118.000; valid 215; 21.600; 120.000; valid 216; 21.700; 113.000; valid 217; 21.800; 126.000; valid 218; 21.900; 119.000; valid 219; 22.000; 108.000; valid 220; 22.100; 92.000; valid 221; 22.200; 92.000; valid 222; 22.300; 94.000; valid 223; 22.400; 107.000; valid 224; 22.500; 117.000; valid 225; 22.600; 92.000; valid 226; 22.700; 105.000; valid 227; 22.800; 99.000; valid 228; 22.900; 123.000; valid 229; 23.000; 111.000; valid 230; 23.100; 102.000; valid 231; 23.200; 109.000; valid 232; 23.300; 96.000; valid 233; 23.400; 90.000; valid 234; 23.500; 94.000; valid 235; 23.600; 103.000; valid 236; 23.700; 98.000; valid 237; 23.800; 101.000; valid 238; 23.900; 108.000; valid 239; 24.000; 101.000; valid 240; 24.100; 100.000; valid 241; 24.200; 108.000; valid 242; 24.300; 93.000; valid 243; 24.400; 107.000; valid 244; 24.500; 107.000; valid 245; 24.600; 92.000; valid 246; 24.700; 94.000; valid 247; 24.800; 89.000; valid 248; 24.900; 97.000; valid 249; 25.000; 95.000; valid 250; 26.000; 892.000; valid 251; 27.000; 890.000; valid 252; 28.000; 841.000; valid 253; 29.000; 850.000; valid 254; 30.000; 807.000; valid 255; 31.000; 791.000; valid 256; 32.000; 753.000; valid 257; 33.000; 728.000; valid 258; 34.000; 703.000; valid 259; 35.000; 651.000; valid 260; 36.000; 640.000; valid 261; 37.000; 612.000; valid 262; 38.000; 550.000; valid 263; 39.000; 539.000; valid 264; 40.000; 524.000; valid 265; 41.000; 501.000; valid 266; 42.000; 503.000; valid 267; 43.000; 478.000; valid 268; 44.000; 468.000; valid 269; 45.000; 438.000; valid 270; 46.000; 434.000; valid 271; 47.000; 441.000; valid 272; 48.000; 388.000; valid 273; 49.000; 381.000; valid 274; 50.000; 372.000; valid 275; 51.000; 380.000; valid 276; 52.000; 349.000; valid 277; 53.000; 326.000; valid 278; 54.000; 317.000; valid 279; 55.000; 297.000; valid 280; 56.000; 299.000; valid 281; 57.000; 294.000; valid 282; 58.000; 258.000; valid 283; 59.000; 260.000; valid 284; 60.000; 264.000; valid 285; 61.000; 271.000; valid 286; 62.000; 250.000; valid 287; 63.000; 241.000; valid 288; 64.000; 239.000; valid 289; 65.000; 241.000; valid 290; 66.000; 220.000; valid 291; 67.000; 215.000; valid 292; 68.000; 217.000; valid 293; 69.000; 203.000; valid 294; 70.000; 196.000; valid 295; 71.000; 201.000; valid 296; 72.000; 198.000; valid 297; 73.000; 188.000; valid 298; 74.000; 183.000; valid 299; 75.000; 173.000; valid 300; 76.000; 166.000; valid 301; 77.000; 167.000; valid 302; 78.000; 187.000; valid 303; 79.000; 169.000; valid 304; 80.000; 162.000; valid 305; 81.000; 145.000; valid 306; 82.000; 163.000; valid 307; 83.000; 151.000; valid 308; 84.000; 154.000; valid 309; 85.000; 135.000; valid 310; 86.000; 134.000; valid 311; 87.000; 133.000; valid 312; 88.000; 133.000; valid 313; 89.000; 141.000; valid 314; 90.000; 134.000; valid 315; 91.000; 133.000; valid 316; 92.000; 120.000; valid 317; 93.000; 128.000; valid 318; 94.000; 114.000; valid 319; 95.000; 147.000; valid 320; 96.000; 139.000; valid 321; 97.000; 114.000; valid 322; 98.000; 128.000; valid 323; 99.000; 113.000; valid 324; 100.000; 127.000; valid [NewRecord] Operation=3 SampleType=Disk Disk=0 DataType=NORM Irrad SampleTemp=20 IrradTime=257 IrradDose=20.02 Started=6/4/2015 4:03:51 PM Stopped=6/4/2015 4:08:15 PM Luminescence/inst/CITATION0000644000176200001440000001014514367174077015017 0ustar liggesuserscitHeader("The R package 'Luminescence' is the joint work of many over many years. To cite the R package 'Luminescence' we suggest using the first entry and applying the rest were justified. To credit all authors **and** contributors, please see https://doi.org/10.5281/zenodo.596252 for an always-up-tp-date citation record with DOI.") citation(auto = meta) bibentry(bibtype = "Article", title = "Introducing an R package for luminescence dating analysis", author = "Sebastian Kreutzer, Christoph Schmidt, Margret C. Fuchs, Michael Dietze, Manfred Fischer, Markus Fuchs", year = "2012", journal = "Ancient TL", volume = "30", number = "1", pages = "1-8") bibentry(bibtype = "Article", title = "A practical guide to the R package Luminescence", author = "Michael Dietze, Sebastian Kreutzer, Margret C. Fuchs, Christoph Burow, Manfred Fischer, Christoph Schmidt", year = "2013", journal = "Ancient TL", volume = "31", number = "1", pages = "11-18") bibentry(bibtype = "Article", title = "Data processing in luminescence dating analysis: An exemplary workflow using the R package 'Luminescence'", author = "Margret C. Fuchs, Sebastian Kreutzer, Christoph Burow, Michael Dietze, Manfred Fischer, Christoph Schmidt, Markus Fuchs", year = "2015", journal = "Quaternary International", volume = "362", pages = "8-13", doi = "10.1016/j.quaint.2014.06.034") bibentry(bibtype = "Article", title = "A new R function for the Internal External Uncertainty (IEU) model", author = "Rachel K Smedley", journal = "Ancient TL", year = "2015", volume = "33", number = "1", pages = "16-21") bibentry(bibtype = "Article", title = "The abanico plot: visualising chronometric data with individual standard errors", author = "Michael Dietze, Sebastian Kreutzer, Christoph Burow, Margret C. Fuchs, Manfred Fischer, Christoph Schmidt", year = "2016", journal = "Quaternary Geochronology", volume = "31", pages = "12-18", doi = "10.1016/j.quageo.2015.09.003") bibentry(bibtype = "Article", title = "Bayesian statistics in luminescence dating: The baSAR-model and its implementation in the R package 'Luminescence'", author = "Norbert Mercier and Sebastian Kreutzer and Claire Christophe and Guillaume Guerin and Pierre Guibert and Christelle Lahaye and Philippe Lanos and Anne Philippe and Chantal Tribolo", year = "2016", journal = "Ancient TL", volume = "34", number = "2", pages = "14-21") bibentry(bibtype = "Article", title = "Software in the context of luminescence dating: status, concepts and suggestions exemplified by the R package 'Luminescence'", author = "Sebastian Kreutzer and Christoph Burow and Michael Dietze and Margret C. Fuchs and Manfred Fischer and Christoph Schmidt", year = "2017", journal = "Ancient TL", volume = "35", number = "2", pages = "1-11") bibentry(bibtype = "Article", title = "Environmental Dose Rate Determination Using a Passive Dosimeter: Techniques and Workflow for alpha-Al2O3:C Chips", author = "Sebastian Kreutzer and Loic Martin and Guillaume Guerin and Chantal Tribolo and Pierre Selva and Norbert Mercier", year = "2018", journal = "Geochronometria", volume = "45", pages = "56-67") bibentry(bibtype = "Article", title = "Age determination using feldspar: Evaluating fading-correction model performance", author = "Georgina E.King and Christoph Burow and Helen M.Roberts and Nicholas J.G.Pearce", year = "2018", journal = "Radiation Measurements", volume = "119", pages = "58-73", doi = "10.1016/j.radmeas.2018.07.013") bibentry(bibtype = "Article", title = "Luminescence age calculation through Bayesian convolution of equivalent dose and dose-rate distributions:the D_e D_r model", author = "Norbert Mercier and Jean-Michel Galharret and Chantal Tribolo and Sebastian Kreutzer and Anne Philippe", year = "2022", journal = "Geochronology", volume = "4", issue = "1", pages = "292-310", doi = "10.5194/gchron-4-297-2022") Luminescence/inst/WORDLIST0000644000176200001440000001215514521207352015040 0ustar liggesusersAEQUIVAL ANR Abanico AbanicoPlot Aberystwyth Acknowledgements Acta Adamiec Adrie Aitken Aktivitaet Alastair AliquotSize Aliquots Allkofer Amidon Analyse Angelucci Angulo Anhalt Archaeometry Auclair AverageDose Azevedo BINX BINfile BINfileData BT Baartman Barbouti Barwa BaseDataSet BayLum Bergakademie Bestimmung Bingen Biometrika Biomolecular Blasse Bluszcz Boca Boetter Bolker Bortolot Bos BosWallinga Bq Bracht Brightline Brookhaven Bugfixes Bulur Buylaert CCD CLL CMD CNRS CRC CREDit CRP CWCurve CWOSL Cammeraat Carstensen CentralDose Centre Chapot Characterisation CobbleDoseRate Colour Combès CommonDose ConversionFactors CosmicDoseRate CosmicRayRemoval Cresswell CrossTalk Croux Cueva Cunha DEoptim DFG DLED DOI DRAC DRC DRCSummary DRTResults DTU DTYPE Dau De DeLong DeWitt Debertin Debruyne Deconvolution DetPlot Dolni Donnelly DorNie Dormagen DoseRate Dosimetry Dunson Durbin EBG EfficiencyCorrection Efron Ein Elsevier EmissionSpectra Engelen Eq Erfurt ExampleData FDist FFM FI FMM FadingCorr FadingMeasurement FastRatio FilterCombinations FiniteMixture Freiberg Frouin FuchsLang Furetta GEPRIS GFZ GammaDose Gauthier Gelman Geochronology Geochronometria Geoff Geomorphologie Geomorphology Geophys Giessen Gorin Grabmaier Grabmeier Grampp Grehl GrowthCurve Gruen Guadalentin Guo Guralnik Guérin Gy HC HZDR Hadley Hamzaoui Hase Hatte Heer Helmer Hilgers Hintze Hoehne HomogeneityTest Hornik Huot IC IEU IRAMAT IRSAR IRSL ISSN ITC ImageJ Ioannides IrradiationTimes Jakob Jinmium Jokisch Justus KDE Kambhampati Kars Karsten Kehl Kinahan Kitis Krbetschek LABX LEDs LIGHTSOURCE LM LMCurve LTYPE LaScArBx LabEx Lagroix Lahaye Lamothe Lanos Laslett Lauer Lausanne Lefrais Lenovo Lett Levenberg Liebig Liritzis LnTn Lubachevsky Lum Lumineszenzdaten Lx LxTx Löss MAAD MASSAKT MCM MERCHANTABILITY Madsen Majeed Marquardt MatLab MaxDose McKeever Meszner MinDose Mittelsachsen Mittelstraß Moine Morthekai Moska Mungo NCL NNDC NRt NUTECH Namche Nievenheim Normalisation Normalise Nuclide Nurmela Nutech OSL OSLAgeSummary OSLLifeTimes OSLLxTxDecomposed OSLLxTxRatio OSLdata Oestergard Olley Ostrau PECC PMT POSL PSL Pagonis Palais Paläobodensequenzen Papachristodoulou Parmigiani Pearce Pederson Petr Photoionisation Photomultiplier Pikal Plummer Preusser Princton Pych QNL RCarb RECTYPE RLum RLumCarlo RLumDocker RLumModel RLumShiny RMF ROI ROIs RStudio RadialPlot Radionukliden Rainer Ramped Rastin Rastin's Raton Rds Rekonstruktion Risoe RisoeBINfileData Risø Rodnight Rosspeintner Rottewitz Rousseeuw Rtools Ruehle Ruprecht SAR SARMeasurement SCHM SG SHA SPE STRB SUERC Sanderson Sandur Sauer ScaleGammaDose Schlunegger Schoorl Selva Semrock Simmank Singarayer Singhvi SingleGrainData Sippewissett Skłodowska Soerensen Softcomp Sohbati SourceDoseRate Spectrochimica Spectrometry Springer Spätpleistozäns Stamoulis Standardised Steffen Stolz Storn Straessner Streibig Subclasses SurfaceExposure Svoboda Sébastien TLAPLLIC TLLxTxRatio TOL Technometrics TestFile ThermalLifetime ThermalQuenching Thermo Thermoluminescence Thomsen Thue's Tibshirani TnTx Tobias Torben Trautmann Trave Trebgast Tribolo Tx UMR UNIL USGS Umweltbedingungen Un Université Unravelling Urbanova Valla Vehtari Veldkamp Vestonice Villaverde ViolinPlot Visualise Vogl Wageningen Wallinga Weniger Wickham Wiechen Willian Wintle Wissenschaftsnetzwerk Woda WodaFuchs XLS XLSX XRF XSXG XSYG Xcode YAML Yoshida ZEU Zeitschrift Zeuchfeld Zilhao abanico abline absorber al aliqouts aliquot aliquots aluminium amongst analyse analysed analysing anhand ascendantly astr athermal baSAR bbmle behaviour binx calc centre centred centres centring chemometrics chronometric colour coloured colours confint cts curveType customise customised data's de deconvolution deconvolve dependences der des detrapping devtools dispersive doi dosimetric dosimetry du eV eq et feldspars fuer gSGC gcc gchron generalised geochr geochronometry ggplot github grey harmonise hg hotbleach http https initialisation irradiations isochrons isothermal iteratively ka labelling lamW lambertW lexsyg lm loess logarithmized lossy ly mGy mJ mW macOS massebezogenen matplot matrixStats minimisation minpack mit modelled modelling mol monochromator mtext muon muons nd neighbouring nls nlsLM nm normalisation normalise normalised normalises nuclide onwards openFileInOS optimisation optimise optimised pHM pHMi pIRIR pIRIRSequence pLM pLMi pPM pPMi packings palaeodose pandoc pch persp photoionisation photomultiplier physica plotly poisson polymineral portableOSL pre preprint programme pseudoR psl quageo quartile radioelement radioelements radiofluorescence radioluminescence radionuclide radmeas rasterImage readBin readSPE readTIFF readxl realised recognised reorganisation reproducibility retrapping rightAnswer rjags rmarkdown rnorm rollmean rollmedian rowMeans rowMedians rowMins rowSds rowSums rowVars rstudioapi sTeve serialised sig sigmab standardisation standardised stratigraphy summand summarise summarised summarises summarising svg tc thermochronometry thermoluminescence txtProgressBar uid un unfaded uniroot unitless unrecognised utilises violinmplot violplot vioplot visualisation visualised visualising von writeTIFF xsyg xy ymax ymin zur µA µm µs ’Luminescence’