DEoptim/0000755000176200001440000000000014333510001011601 5ustar liggesusersDEoptim/NAMESPACE0000644000176200001440000000072114332756015013040 0ustar liggesusersuseDynLib(DEoptim, .registration = TRUE) export(DEoptim, DEoptim.control) S3method("plot", "DEoptim") S3method("summary", "DEoptim") importFrom("grDevices", "devAskNewPage") importFrom("graphics", "abline", "matplot", "par", "plot", "plot.new") importFrom("stats", "runif") importFrom("parallel", "clusterExport", "parApply", "makeCluster", "clusterCall", "stopCluster") importFrom("methods", "hasArg") importFrom("utils", "installed.packages")DEoptim/demo/0000755000176200001440000000000014332756015012545 5ustar liggesusersDEoptim/demo/benchmarks.R0000644000176200001440000000460514332756015015012 0ustar liggesusers# This benchmark script is based on one written by Dirk Eddelbuettel # while he was porting DEoptim to Rcpp. His efforts in code review # and benchmarking are greatly appreciated. demo.DEbenchmark <- function() { Rosenbrock <- function(x){ x1 <- x[1] x2 <- x[2] 100 * (x2 - x1 * x1)^2 + (1 - x1)^2 } Wild <- function(x) { ## 'Wild' function, global minimum at about -15.81515 sum(10 * sin(0.3 * x) * sin(1.3 * x^2) + 0.00001 * x^4 + 0.2 * x + 80)/length(x) } Rastrigin <- function(x) { sum(x+2 - 10 * cos(2*pi*x)) + 20 } Genrose <- function(x) { ## One generalization of the Rosenbrock banana valley function (n parameters) n <- length(x) 1.0 + sum (100 * (x[-n]^2 - x[-1])^2 + (x[-1] - 1)^2) } maxIt <- 250 # not excessive but so that we get some run-time on simple problems basicDE <- function(n, maxIt, fun) DEoptim(fn=fun, lower=rep(-25, n), upper=rep(25, n), control=list(NP=10*n, itermax=maxIt, trace=FALSE))#, bs=TRUE)) adaptDE <- function(n, maxIt, fun) DEoptim(fn=fun, lower=rep(-25, n), upper=rep(25, n), control=list(NP=10*n, itermax=maxIt, trace=FALSE, strategy=6))#, bs=TRUE)) runPair <- function(n, maxIt, fun) { gc() set.seed(42) bt <- mean(replicate(10, system.time(invisible(basicDE(n, maxIt, fun)))[3]), trim=0.1) gc() set.seed(42) ct <- mean(replicate(10, system.time(invisible(adaptDE(n, maxIt, fun)))[3]), trim=0.1) return(data.frame(DE=bt, JADE=ct)) } cat("# At", format(Sys.time()), "\n") reps <- c(5, 10, 20) res <- rbind( do.call(rbind, lapply(reps, runPair, maxIt, function(...) Rosenbrock(...))), do.call(rbind, lapply(reps, runPair, maxIt, function(...) Rastrigin(...))), do.call(rbind, lapply(reps, runPair, maxIt, function(...) Wild(...))), do.call(rbind, lapply(reps, runPair, maxIt, function(...) Genrose(...))) ) res <- rbind(res, colMeans(res)) rownames(res) <- c( paste("Rosenbrock", reps, sep=""), paste("Rastrigin", reps, sep=""), paste("Wild", reps, sep=""), paste("Genrose", reps, sep=""), "MEANS") res } demo.DEbenchmark()DEoptim/demo/DEoptim.R0000644000176200001440000000472414332756015014240 0ustar liggesusersdemo.DEoptim <- function(){ 'print.comments' <- function(str){ star <- "**********" cat(paste("\n",star,"\n",str,"\n",star,"\n",sep="")) } 'wait' <- function(){ t <- readline("\nPlease 'q' to quit the demo or any other key to continue...\n") if (t == "q") TRUE else FALSE } 'Rosenbrock' <- function(x){ x1 <- x[1] x2 <- x[2] 100 * (x2 - x1 * x1)^2 + (1 - x1)^2 } 'Wild' <- function(x) 10 * sin(0.3*x) * sin(1.3*x^2) + 0.00001 * x^4 + 0.2 * x + 80 'demo.1' <- function(){ r <- DEoptim(Rosenbrock, rep(-10,2), rep(10,2)) summary(r) } 'demo.2' <- function(){ r <- DEoptim(Rosenbrock, rep(-10,2), rep(10,2), control = list(NP = 100, trace = 1)) summary(r) } 'demo.3' <- function(){ r <- DEoptim(Rosenbrock, rep(-10,2), rep(10,2), control = list(NP = 50, itermax = 300, F = 1.5, CR = 0.2, trace = 1)) summary(r) plot(r, type = 'b') } 'demo.4' <- function(){ r <- DEoptim(Wild, lower = -50, upper = 50, control = list(NP = 50, trace = 1)) par(mfrow = c(2,1)) plot(r, type = 'b') plot(r, plot.type = "bestvalit", type = 'l') } 'demo.5' <- function(){ r <- DEoptim(Wild, lower = -50, upper = 50, control = list(NP = 50, trace = 1, digits = 8)) } str.stop <- "end of the demo" tstr <- "\nRun the optimization process for the 'Rosenbrock'" tstr <- paste(tstr, "\nBanana function. Search space [-10,10]^2.\n", sep = "") print.comments(tstr) print(Rosenbrock) print(demo.1) if (wait()) stop(str.stop) else demo.1() tstr <- "\nDecrease to 100 the members in the population.\n" print.comments(tstr) print(demo.2) if (wait()) stop(str.stop) else demo.2() tstr <- "\nIncrease the number of iterations to 300, and" tstr <- paste(tstr, "\nmodify crossover and F parameters.\n", sep = "") tsts <- paste(tstr, "the result") print.comments(tstr) print(demo.3) if (wait()) stop(str.stop) else demo.3() tstr <- "\nRun the optimization process for the 'Wild' function." tstr <- paste(tstr, "\nSearch space [-50,50].\n", sep = "") print.comments(tstr) print(Wild) plot(Wild, -50, 50, n = 1000, main = "DEoptim minimizing 'Wild function'") if (wait()) stop(str.stop) else demo.4() # tstr <- "\nIncrease the number of printed digits" # print.comments(tstr) # if (wait()) stop(str.stop) else demo.5() cat("\n",str.stop,"\n") } demo.DEoptim() DEoptim/demo/00Index0000644000176200001440000000023614332756015013700 0ustar liggesusersDEoptim some examples of the DEoptim function. benchmarks some common optimization benchmarks, comparing various strategies (e.g. DE vs. JADE) DEoptim/THANKS0000755000176200001440000000063314332756015012541 0ustar liggesusersThe authors would like to thank the following people for their help: Tarmo Leinonen Eugene Demidenko Rainer Storn Soren Macbeth Hans Werner Borchers Enrico Schumann Jean-Luc Jannink Dirk Eddelbuettel Ralf Tautenhahn Vinícius Veloso Kris Boudt Suraj Gupta Jeff Allard Jonathan Owen Alec Solway Tobias Weber Alexey Stukalov Eliot McIntire Jason Thorpe Henrik BengtssonDEoptim/README.md0000644000176200001440000000414114332756015013100 0ustar liggesusers# DEoptim [![CRAN](https://www.r-pkg.org/badges/version/DEoptim)](https://cran.r-project.org/package=DEoptim) [![Downloads](https://cranlogs.r-pkg.org/badges/DEoptim?color=brightgreen)](https://www.r-pkg.org/pkg/DEoptim) [![Downloads](https://cranlogs.r-pkg.org/badges/grand-total/DEoptim?color=brightgreen)](https://www.r-pkg.org/pkg/DEoptim) `DEoptim` ([Ardia et al., 20xx](https://CRAN.R-project.org/package=DEoptim)) implements the Differential Evolution algorithm for global optimization of a real-valued function of a real-valued parameter vector. The implementation of Differential Evolution in DEoptim interfaces with C code for efficiency. The latest stable version of `DEoptim` is available at [https://cran.r-project.org/package=DEoptim](https://cran.r-project.org/package=DEoptim). The latest development version of `DEoptim` is available at [https://github.com/ArdiaD/DEoptim](https://github.com/ArdiaD/DEoptim). ## Please cite the package in publications! By using `DEoptim` you agree to the following rules: 1) You must cite [Mullen et al. (2011)](https://doi.org//10.18637/jss.v040.i06) in working papers and published papers that use `DEoptim`. 2) You must place the following URL in a footnote to help others find `DEoptim`: [https://CRAN.R-project.org/package=DEoptim](https://CRAN.R-project.org/package=DEoptim). 3) You assume all risk for the use of `DEoptim`. Mullen, K., Ardia, D., Gil, D., Windover, D., Cline, J. (2011). DEoptim: An R package for global optimization by Differential Evolution. _Journal of Statistical Software_, 40(6), 1-26. [https://doi.org//10.18637/jss.v040.i06](https://doi.org//10.18637/jss.v040.i06) ## Other references Ardia, D., Mullen, K., Peterson, B.G., Ulrich, J. (20xx). _DEoptim: Differential Evolution in R_. [https://CRAN.R-project.org/package=DEoptim](https://CRAN.R-project.org/package=DEoptim) Ardia, D., Boudt, K., Carl, P., Mullen, K., Peterson, B.G. (2010). Differential Evolution with DEoptim: An application to non-convex portfolio optimization. _R Journal_, 3(1), 27-34. [https://doi.org/10.32614/RJ-2011-005](https://doi.org/10.32614/RJ-2011-005) DEoptim/data/0000755000176200001440000000000014332756015012532 5ustar liggesusersDEoptim/data/xrrData.rda0000644000176200001440000002622414332756015014635 0ustar liggesusers}w\TdfTH$"3BR I[)lRF޻N4Nu )3[VPzU>]>?v?q>OHb""""|)&m$װc..vnv""C'DDMtq\k/8~sX{ݹYsߑyW|qc p\|LW#<0uD\|,хO_X׊)cw_S4kO`|xƑuv>k er.[a zokSsB(9w]8V:E_L枳Qwݬ vE[5}O˸x9gpWmÚIV3xy=L Յկss#bv# %#^"dHx:"xK"P9Bw!5ӏMB9ieQGo ev'{"__}M'lT=_y=RFA'U*_tA֔tly=yrLÂCp0B!UtB:u/*,w뼒'U"#x.!P۩c299]!vXN*(Mڊʓ 23ZW5\uBFlD̥r]W}5bq#e&73o:cT%*kFOVIM/X-OvWB> Cg~Ը ",nLEDe9[ש!ER\zN "[Fĝk7z/n 5lz5GëX~R.'q]֚/ֻ+'0hݞ#~)u_IN*;[(/@ 15?~{MğC|mħ(/C|K"k;m\1⽝]RxaR '?Aoҫ km,fg^fռW!zH 5e &*V{]! |G[=یCм!&!> ߨ:~GsHx~ORZ!۲ $45@dg_8cWH\s CFDyԀ{. @۝P?MAb|9C̄Xf*w7p:Αa.G21jHlR{74gw@bϽ4曂[&$ɟW.%. iUI$m25e:;fOxCMCqNH h*>ya,*$叻0T I Htu$=?%I$׎C[J) H]WRqs<#6U Uz\A򚍬בո8$[g>RC]BV eH3Uz:)Ʀf#A2A#ɕ3lF#zAH~Qɟ^B/SYC>!E±|HL g"e#g"Eu^’[ )_j4?"Ůi鐐q)C5Z9HɸhRL?>*E rU:M;)/MiFJTS8;1D&oHtR,AZ7j#RM%Nnp=T{۾IHusPH շLVT*j r}~Ԫ"agW8R,ŠWHm;FRRى?ӑ6eI}-?׃rfDGUҶX3i{#v]J}嫓!o Ƭ'_c{ꀡ:e504S.L}JE`Za+}g6Daۮћ05T G{?Xa|0-Bv#w`D n F?aȊu y dj{0: ,5a^/Rx*#PhׁѼyVm0ZEBMc0z/0CUDdJ5dsOc'm5 'EՋh͉VɾݲaH"JW?F׽'6vjozl L mm5ٻ7l"qJdi+yl "=6dlxD#;/r&o̡ 1ldd.u_/i :ϫkރI'Ev}udo^ 70|Nd/VlS7VX-stn9qeog SeV9ؙ`ΟqK{LSu 0si(i740m+u,s__^0܋^Lf~ӫNy /ܕxZ;WZqAj`_$)=w{Y^lҩb%h?|{57y7Hn>O&|R?o\~Hy-cٷ[1]ui=j4QXq;X?$ZbԨ`(Q ڰk37-X-RK=~ ֈ/ -[Ww%ooL:j*cҨ`9XJ+/'F3`Ԓz,_%a9`<$+wͲ$6Vn׮aCk?/U-`=xa1$dMz4!X/ַ`WƃkwϫK\d3j"Vș#~cw!gs8,`޵9+_n9k)-BFU]'/_WTz@ƶE=6vi r9'9!紶i5 #'kƳx$=[74r˜EN0w9Ӯ ,t|+_ٹn\}1{4+rOSwG㴄P6FeQ4U|⍜3VXP"GGK&{ w̏ȕ]o= CRk"R]6¥j Z;}*_OA:na/MTGUsCcom4rl-)\b QE7 ?O|כ#7yLpyr'O ڎuL[k=cR O!}rh,[gq7}U8QãmYܟ~6{d2G;7f,䍿nMh-f=`߈#oɌ9.[ o?RwyG!u-|y/D #~k (~f 1Mly 9uW8#/<~)ȋKy85:Մ|kzowSdJ4V;Cdklֿ 9J|:T<%ޛU GyҦk΋cZS @/y?"1.,_X%*kb.C燑S.cݞL4r"CZ>JZ-{wN qN?{skVxz9u l[qˋJ`c_:ƕ6Jt+fRN`[Uw z c$v3i;x{؞n(a4{vfQ`VEUM),9:U˓OKK^)a=صG|~1b17|o-4q[e/5F8`YEt^>uK?<]}(n߅CץlػU(طO*) |ۆc+M^\PxCqH7D7,2EA;jP rޫ@Af((?^?C;P/k2vYh.6zFAgQc(޼H{ 8CBDmGJ̗p&vLXMi53|^x5pTV JތY<.K?2*%p;7k`E>mp\nرV;PWPGĔ:5ɎR]b;9jM8\wѫ +eT;pn\kPoKpN[Λa!IͪF߷_SLg (6 G>=Z P8̫QX1@G%P…P2iQlk3(ԋ< /CELTl(X½WǣȞ{Qn^pc: OlDdˆUu50Ai*fL{o~EYO3PXy e.EtW|քN}ګPXa][ WFaS;Q=%ǢvEawлu=2(xy$/ Ⴂs,zP$sb(գ|B6xYE "5 RR(ZNs(>286-O<˧.Pdcٚ=r(z*5 EMY"qQ(:o6~~Q2DpEWgyk&1y.9fNAQ瀵RmlEU"z7PtӪ%(_ղ.znXEY97Yl9"k.ۗ4th>n #'D,x֗P bLB.5d8βg|Qr\WE$JW|%A]{W$Zam(I3z J2L={_%CT5$az=J.5%x0Pr/ʧ <=o`EI AZ(b'JZ3˶GDHyJIӎR[*T2}߉~(7}vNwz ~latN_(]lS2-8k'YڣTwZcJ 'ܮBi>FjL_mCYuԭ;DkJOnTv[aFqBQ{Z)n>uLMTn]RD(R%Y+ޭwJ9YogUͷ-/ez~w\j_Oe: (g~JX/0!w0(4+#|3pgleyY#ukU.s` 6ŮoƜbt#pW,츖fɳ x߰<d306yXZ]`;xUw_޾/Msb xNss4JX2NxLo'и*y|,k5]7Wj dWm5OwA11n5PH +)ߐ&~u=DNWoP3_@IpIPCR}*2b^pɛe\NUypSE\A{1B@& l$3~;?JqԽ*0)TS2O( Ipz6 (i+d.e^ (]D Nz*fj)\5(VQOS8n8EIj'RW8zR 8FP^d0yP;VPD?P,ѧQv+o )SPJ?ħϔ߇cTuԹ(HwMP$A^?)2kuˉ''RvP~)P[ `o%Qq():T9}[Зo6:FFзj[8-LJx:R?kѧ@ٷe >:L(dR?j|Tm oIC}@)6_P)띩8 ˩Wn.Pҟ9O⤿=?Q)G^i!Q~(}}!')>2xOwWK~{)>OR|n#} >ߤP|BX*n*tGvW?Qr/-M'/}wSy(>)9u|BΣFJRwS7H(V1)Xƥ-X:m|x֜OZsm>śkI_wVS"l%| K"΢x,xi#"/ba E!ӄŤ_^* Qj/Ox,ŚUTdD&v|)C}8N"u{ΡM1b!/9"EDQF*)[J<bO*< I~2,RW1lCI<<3Vy~r(G>m)fR,%KEI$n'sdymҠ },OFTS |DWw0E/O~N1b* bϔ $yrs3b.̅IV H)ɜcHR/ϻi$!L氏b<ϤH]T2I$OyƑåH?M$sB읫 _&|}J0Q>ܢ(Ɛ&&ȹ7]Q^?LP'u:DGs' O>d)x9 qɽsoir~܃;#ϑħ]A#~6DoMhEvn/Gυ+v؟ |ГMItǃFّ&u?+LlAhNwD%<yH܅םJxs']irNabwqzp?L߇5kJdS"ٜNCQkp]IG""vHDO#q~nJhBIDo=΂Nw˞[K齰W~^l:hY>z7xۈ~-Wi?KF?t_t}DO~wy>l-C(/eGݗ 3 1F{iFd~vZsK}@?mBFĞ;^CZ&47Ꟗi;zH\Cﱕ~]7OǧF?^CE f"oLڟ_:sKσ:i{ZF>k  5?~ C5:.=W^C//Y(}siPc!ڏ=]s#!}{BB!i?} y׍t=B};}o!uspk g d#Tt= MK νM?tz^x{IǧAρK7M~yۿ`=]Ͽ9h KoBqhj GV!?C!;:~|G 2}|=gB]P^:p{o?"$vtϹP{$_'/B2}{?7=_O"BuO(P<~hB{%|Oÿ}yq]!?zt]t|7MA[/ {Lȟ_}~ꏮi;&!_|:?Py~:xF˴ݿ9>#}ŸŸug]!ӵtt}"P=tkzz_ߏB~B{~ !=w.sgP\o9 #|{`9+kY ?犞3OG!~/n:~}{@x _{v]_oAERlɿ 9HXD;GY;0[)+Vps[DžϗN[+CEDEoptim/data/SMI.rda0000755000176200001440000005772614332756015013676 0ustar liggesusers< <{lɒRB*M$H҂xJQI(R!Jم,Ǿ>w9g:cY庾׌s6.`,@7  <.X NCgXɊIWl"Ex m[<6i4P1;֞%SJcvЁ *HmqNK Un,*Zq`O t| sYK 4%$-q/Aè-jn&yMo?e48le3{bB\%4v~x>gy i[}^£ BEP%WLM Ť"H<8͒"4|QAwNX^[t;avH76Rr4܇"YF3-p+£O1qۨ5( ƵQCH,RwԲ"7K2.2"׳f٨ָ?sun?sA.|/J1QlD<^Ho6Rw)8_D꩟KxGϩDMόh gč$yS;GcZęZ*[&)4v#t;+]!iU]h_$AWJs5M0ښ٪x$$6.s]%HIvI텼*n}h!}m "K<rT,x+S }p*zdGc: Ar,Vf8A堿 Bd+Y&G<$P}9Kfr=Ba_3 X ;PZMZr OuWCO+ 4}k4O(Gug=sIp5 -bEOheZNZx݃ ę6;MV ppb˵ :䖹:gٸ0i >+&pS츙Dk |CR+ WtU)eDG}׃!4N?I l\Y.JVhb _&K%/o>x'U #pUևMGLAq܇is3x(j> ~Ef#pekSt/˄ml9_*Z?$IT/CKW]џy6ΦxCC \C)DqfN}(KW!R>Vp!Ӊ8^vP+f z[JK˟Q(b],b?zK (p),hW*-*dbw&*r!mkAO-KmymH3bO7[-Ry&@M֢x$ײa_Xަpm'g :{QCUַ`s%m$*nYe/7L]Qig;# ֶP8=!pӖMV~Me 1mpB snc'[Z:^ ژ"8ʔ(W|-i6{٘sJlP /Sܪ>T^, pHЗ'}>_' ䷪ֱ.Ef?kb jђSjQݙ ̾qR:C)4sD"z8}T̏%_\ٸk4 Wh8kx W\أiC"K _qAiPu=: Yv%o_zپ1PߔB84Oܸ9'Xe:}kY *ۚ#/`͜hQO ʿ hNew"i2Qmw (Ҕ왱3&λn.gOf>hIX:! ǟ3աp׾/:m)|sg44>sQ}?BwI T+;YqH 5=w:=0%mI>;JtxyXQ?@neb(wC;j+w۩ߺ :&?/C{3nHֳPD{{hpGp'b/ӽ*I\az5-Qk4uU [kђRPB6uMD\)E TӇDU!?7:ef7Z0P_]4*8Ngʭ{i*_$k`-W*lZJ >P>n~i63J'z ͹$:y7(Lzs |,- }O);x`@vo)4wu{Ğe[uo<8x2My\>P[Wh S4Tg =$Mz\0 yR}__:*fVѽp?# 4n{\EAF | 0kw ijs;$4׳]+teoPKY+vkQ7ObW Aby0{)cN{9k] wMC1'"t)0ǃSu? b)̮o)\uhY0/Hqp?G@rT\8pSkDS,xޖU:cXzq\BVV Py%>h(Qj‘_(\_]zikϳ_.bB2w(㏔)ɣSuh^w+\Rmai$5Z7l a֫b$X sXfle *mlT~^C mӿ ׇuK`ڭF{SmBv10ߣq\7q $YoNWk|aXvbg%by}+Iq"90]6R@A8Ž8n̪P\Jmw?|FרOhЇ9,Fظw-Z]`= GW I%p'xpvբE#yo`JvĂ~f|@Ƌ#~+ ˽y`3b5ڥǘ|ClԷz`E/I"P[+qpz7K_hqX`vJo\:o5v`!Nz^&ʺڟξ|&m-HPWPtEfJ ԏ[ꅫ,h4huy5MG#4Ywќ|8p[ǜVT}[xaِu6^|p={?i<^2 VrGT͒x7c)4f̈́ LtBS;6+IZ;$P- q3 texoUa.x3`qʹyvǃ_8xG\*Ug0l %ao8W"ܐ 7@^&. FOYq_YSg?xMgANgZ6 hTB>_{hthY5(4[001x=ީxGɋѓXc[vڶ"-H{_b!'.ványk{.p BunO&Q}&Te=n1rpn{KM+TdzISq"1=(-[I!dw> n]"9Z)@j)tUm98˥}䜡aɷ+U9Sh&>VQxt;ɭI}rݰM/\ʻ [9VZhh=48i<$j.:@6HA6b s-8F$q{R©f"(ڝ bC-/&8.e zrKUK_v2c R[)]ؘC &sU? .^%f'9 &P_) "k޿]pkikXqpO|}b. v㱍a.ow/ڐB~x'+ C5t\ه;~'ԩU9)l=yȋUxLL yQ0"@߸IWm4ĥ[a? WB>P6{')'9p]mtvA}~ 3+a:Wmx0w{% ëc9SKb!WpFI9 >BfDܾl43m'F JY-}RDB~cDcsiM,kگ Jl&QQR .}h䇮%da 6Τi@JP:¶ ]{ٖ h'o<:n upjK@EvnA'~6?M KS[V2WM` \#dAn;qƜzWuѸU:GqTӧ?0Q/PJoIpX13%X#$p-(\kj"#ޘ_{w܀aCU-6Ɲqʟ2 p5DU,DosK}@a ;wBX Zq 䎡&! o@!Mz<a4׽ۻ\^n@c+&HO&QJ \wq(гo_~0pMjiN\# uٷJ;65C)Hw[#{$zA9pLXnj2P"4@g.P055Q߫85"Ptϖ8֧GMan_\rte}+Ѡ_9xA rBE#b8]K"bP~$T/S#PV5 )rDOB'>&Pnߪ Cçr@ eaGcܤpAI~*K4<߳.UiC:fYxm*<Օ=$DŽ~m~dmK4aӖPs(T$-"~F~[$*JY#!KjN/}J0kP6:sK+QK4~avV plMpɳYp<0gH,!AUУSۮfw/bΧ1cBG WhF/,Ӥ0](#AzPӇܬ#V~ң7ͩK!/LO2ZM\—@e-4NkO,= 겨I82!DJȠ0o+4h1-a^bwW(@:Wn') 6VIf{Ǧ:SЯmy[ (eʸ(B7\i>4 ~XfD%}B%weئ0h`7KnjQS K\W6y%#ڷzWaZDΠđsܪ) #PB>v' x@;>}:e;k ];Cзm|y4*(Qn{g\ |~$f2|(ǃ^ߘc*П CQݏ[H/>wVp^ݳ{'3:N=27nߍRoH Zct2Be8YnN%}$n~dR)F1<+I\wm; *D(?壯 ڝ 4,^ )W%uwk;jʔr =CDb"x ]M oϩ&|QǻGS3<[f+M?YfIԕ y?p7ðeV8Ĩ64OYFW\MWM%+v.}h5 &Bᮼ*(smk=Yξa^d4k|l$c*I/kn˦E ,'o/_c{Mdw9žq.H:[׍׿E(mUͰW]pԷ.k|IH WԖ8Mۦٱ1ϣ*oFL=*]7'+nߩ6h6r<{`; &PN]ec0X{$@$ʿ]е u.ٴj %oGI 1D OB)WrZ{hs}\߻<U8"dql2"EFI~,uD׻s".ͯ,;Ä9`1P0۲pm!PӔĞ.^U xRZ+'1=JXـt"ʡQݶAu'IO^vhHN!pg ݚcH%a5Н~iJ_2!4M9;$j/m >iB'907I4=G{kLT99e!_ 4ﭸ D|.β(Zhr*WG:8+Ӹr+3i믬Du~`Dq90L5#y'##A$Px/,( #?3;L3-ѣ" y5wNJ]=uN1pש]TaN4sKPUƵ,O˱ZG(G\|Z>BtIF|9Meh`uiȓ$]} 5FWeLVgh)0Q.ߵ_ 9֘En)Զ"M&cDcEX{'AN>g4irD&<ͬ{b j(ul‰?:|y{/M ?=8?|2X&2YcFePʙ1.$񥪠8K[]^}!fNs>f|ߟ6-B3=`~ow'8(,CMemif'cc\} ǻ p3<-M?+iY{dY\(`t'ɫN-,Ԩ1g~o3"@F\N2qMe O(ԷA_Ȧ ꆒm4XB-cDbj,ZAUS|1θxJ"W~>@n xڪD9,ɵUIi#ДW gfN7>:>XKz˦m)B zd]CU'Md[3>Պ4./En/maۇs$p5EqySV2kJE8̏Yqr=̢OϵoQWK]HˌiIa2cr&yxLEړ>v'%Sy[G቏RLR|R6r FO녛̮|57E,1 fuIޏ$nP$"~C֗H6>Z ? :;Cgk>y7 t~@=$?O:` <'Xh`em);zn6KYK<Ȍacζ K0QS(ۇ/ EAt;:cT-٦Q''!ɪ*w[:d A?l;@ʼnQ@~UYL/i\szAXo[ACt3yGJG$)QX:[Uւ^{@OT—Z-z|P+4FuA/a)H UnV/Ncjb2~gr( ׽-v ݈6?YģO džVx6_x\^ޕt֒^UZh{ԹVln%K. ܦ3M SorBDg|q ' d5 ڦE h1!7u-q֏bk1Dq o6/;3'A/~QP}kHA+/9O6`D[:rG䗛Gii9.Is-=q,E^/L?ksQEw:syxw]"BWN<8pH1_laTke}e" l=s~9%=p?kR*+gt( rHM&E.xđI)zu.4UpфAI\XYjP^wU;m?u~dN8~ߌct us@xґqw_`J"N6ʕ&_[SW* %[shn;Qx2Fgvh~³e_[iQ!Aɟə|AHpԔǞ$V ̄<}0 79w|oqWdžUQ#ك|f\xptmZfk.3 9x0\Wl4ppu(z*w(\K檩hR8\SzU|'{`ԆB0/[tkRq[BQ"ǖfJ zVDeV8cc6Ll}1{"mGO.X:%~W ?qwu'0:=zԩTo7^r\ݷk)ٓb$u"r%&,譱WSN&3|:S`RkC'=].Eᦦ-;'!Gν׹3K(1+ [³Oe+d`GΛ Z8`>H6{gVUgZI]iW M uEၷwp8Y=K<A_R^Fe j2y'*e}Oy*ur 0@oirY>,ܻKfә{RYêL~Ov)g _ۇh4|auغHO;L~v@k&PZyZ_W[>K )8h@Z|4n0H׀Xa>N":~<`ȵkи$jy&33]SxZ* t"FX{s#z[4ᾜ4~B rdC*޼dӑ z|4Jx;}wmLl7jn-pd&䁏7Qݖ=+"a/ؿfvxx_ƃ/YP22nRXDe;ǡ^;_sԔu0L43=AhTIV=;t~[1l1 ex{c"/cIJBE'fT c&&d_ 79_f:?ǬS欻Va~bdpw_ ur#\ڇPwuwb6??鹯L~~=ŷn71u΅ǏtS^W;^0ԽZ.}+AwKw_^ym։_Gn݆my:þ+?mݣ^HwnꎚuwϿ)g_z^}}~9+ϥ\zޚqb]r.5O?wγ?/l4xya#ke_e|~+>^wENXgW\C'=aYی ^nvpw^YYǫ{t[=]}) }lzbO|Cw\Et׊cgtM=xY7:V/84xw h1#݌ Wxoջވ_|1W0k;/O핫>qgto\A:i0֑<ڱr%7>soϞtxދ=䴓7o uqެ'ݺon[cn[xm?3/Oc6na[}= W^mOLKb%6~n;;n}pّonߏ{?|ߌ=;\g﯇˶6<漵Gp5G~u[Gv覿9~hp_q9fY{q)|pY7?;֯;??N{%{ljό#;~ϷG'.xb1ϳt\wa+7?O]. zPwߔ޽Ƞ<څg}ŋ9uot{Rz _cW={F2םcS:oW0V#M g,gXkxV+mstkn{"~dɩn?}S컶:`S܇7=dᇧmύ579ລGg/.3SiO|V _7,|W7'G|r~>\P Er \J.-r+WɏrMI)9N%זz3rHn,rS 7[ȭ Kr{I,*w_rO G~K+ʃ!Py*3:jC>)Qsu\}N>/QsvԜ}UK&Qw-4GnH#qgvo_r!iwogvŤyܙ}P~Hǝۙ݇}H7Ӳ:t@Z^w΃\HcuNO+΋yVѝiU>|Z]w5tJ|IН3S:q'Z::s({.H;z)}VwNuUP6ҝcic}X֝giݹ6=g .m;Җs/}Nwu#s1}Aw.mtcVwN/c3;?ӎs4}Ewu;髺s6}}n|W}WvНiO7}tI[s;;s<;!=;Dy=o#uHGy }Gt,B:^=}D!{K׹"sE:gSt}:SuHNy$T|~sJ:St+:_#ӯtIsL:Wt2>IsN;|{҅tC:Kt.J|.=+$/+unJW>tSzҟtJ7wM:w?ܕFuJ{.NETu>KMtk6ss\s\s鯺ty;ݩ{NtKw<=tNL뜘y1={~Oӣ:WtL2=sf{O3=gzZOGӳ9S:u^M/ܚ^ѽ7H|8{8q:g>|8{ߜ8qyi3g>||8q^Arqy9g^:g<|8q^!qǙ:g>|8|8qm3g>|[d>|8qǙbv92g>|8qǙ3g.\8Dř3g%3g.\ҹ8sqٽJř3_eNΜ99ɜ99srVə3'g43'gNΜ/99sr}Nəy˙3/g^}unܜ9_ss͙͙37gn7ܜ9ss)ss͙sܜ9ss)ss͙:7gnܜ9ܜ9ss|9/g>|8q~@Ǚ3gZ3g>|g|8q+8qǙ3g>2g>|8qǙǙ3g>ί|8q-qǙ;:g>|xηq2e~ x >.<\xp~pÅs\8+\8ppಌ . .\VX .\8spYM  .\>sp .u.\8ppqoY8ppe .\os-[o,[o߲ο -Iu.<\xlpÅÅ .;<\xp޵pÅn:.{2 ..\\>.|\-|\qrDž >. >.|\qDžDž >.|\q➹qs :>.|\.|\qŅis[o¿Žu¿ _[o~o¿ot-[oq^o¿ -[ܗ-[oο - -[\o¿¿ -7[o»w n»w:-[nyL>w_8pn¹o:-|[t-[8;7}λw}»w -[xn2C»w 't-3u-[!-[xnyF»w  w -[^yn»^C»s˛>¹s s -[ޛ<~Ns+V{s+uέ"*Vέ[9ڋct[ڋ[roj/.oʿ^Dʿ+V{+V[ET[z\ETcu\ETt+Ut{+W<\ET<\ypQyp^DÕ+W+W<\GT<\ypQyp~D>DʽCʽ{+V{+V[?T[roProʽCʽ{+V{+V[wѹroʽDʽ{=ʽ{+V+V[soʽ{=ʽ{+Vέ[9rnQ9rnʹ^Dʹs+V{s+Vέ[ETέ[ڇ|[rm=\[rm#imjìV[?T[roProʽCʽ{+st[roQroʽ>Dʽ{+V{+V!*V[ڇ[roQynʻ>Dʻw+V{w+Vޭ[ETޭ[ynQV[&+V[EToj/o}oj/rp+W{Õ+W"=:W.\\ET.\\rqQrq^Dsq^Du^Dŕ+W{+W.\\ET.\\rqQrq^DDCʿ+V{|;NCooo̯{ {a!7777=߰^poo7A=߰wwwwCnnn77}ιa!87878787;ưa!7777}ް{{{{þCpopopopow {νa!7777?Cpppppppp}o{Csqpqpqa"8888A=|||| DqqqqL8888A:=||ÞCpppppppps{a!888888889D9888889GÞCpppppppps{a?888888888?ޘsopow }ް{{{{þCpopons ͹s ~Cppppppppo7;u 7n74n8qp8qp;C77n=4n8qp8qpC77ncu7nA4n8qp8qpDܸqqfqqō=ō7.n 7>n|كh|qfq~p8qrmnr>Dˍ/7/7^nMsrDɍ'7{'7NnA4Nn8qr8qrDɍ^s|pÍ}Í7n7nDƿ6o6CfpÍ}Í7n!7nžG>ޝ|rȜ_zZ 3ٜDEoptim/man/0000755000176200001440000000000014332756015012374 5ustar liggesusersDEoptim/man/DEoptim-methods.Rd0000644000176200001440000001213314332756015015665 0ustar liggesusers\name{DEoptim-methods} \alias{DEoptim-methods} \alias{plot.DEoptim} \alias{summary.DEoptim} \title{DEoptim-methods} \description{Methods for DEoptim objects.} \usage{ \method{summary}{DEoptim}(object, \dots) \method{plot}{DEoptim}(x, plot.type = c("bestmemit", "bestvalit", "storepop"), \dots) } \arguments{ \item{object}{an object of class \code{DEoptim}; usually, a result of a call to \code{\link{DEoptim}}.} \item{x}{an object of class \code{DEoptim}; usually, a result of a call to \code{\link{DEoptim}}.} \item{plot.type}{should we plot the best member at each iteration, the best value at each iteration or the intermediate populations?} \item{\dots}{further arguments passed to or from other methods.} } \details{ Members of the class \code{DEoptim} have a \code{plot} method that accepts the argument \code{plot.type}. \cr \code{plot.type = "bestmemit"} results in a plot of the parameter values that represent the lowest value of the objective function each generation. \code{plot.type = "bestvalit"} plots the best value of the objective function each generation. Finally, \code{plot.type = "storepop"} results in a plot of stored populations (which are only available if these have been saved by setting the \code{control} argument of \code{DEoptim} appropriately). Storing intermediate populations allows us to examine the progress of the optimization in detail. A summary method also exists and returns the best parameter vector, the best value of the objective function, the number of generations optimization ran, and the number of times the objective function was evaluated. } \note{ Further details and examples of the \R package \pkg{DEoptim} can be found in Mullen et al. (2011) and Ardia et al. (2011a, 2011b) or look at the package's vignette by typing \code{vignette("DEoptim")}. Please cite the package in publications. Use \code{citation("DEoptim")}. } \author{ David Ardia, Katharine Mullen \email{mullenkate@gmail.com}, Brian Peterson and Joshua Ulrich. } \seealso{ \code{\link{DEoptim}} and \code{\link{DEoptim.control}}. } \references{ Ardia, D., Boudt, K., Carl, P., Mullen, K.M., Peterson, B.G. (2011) Differential Evolution with \pkg{DEoptim}. An Application to Non-Convex Portfolio Optimization. \emph{R Journal}, 3(1), 27-34. \doi{10.32614/RJ-2011-005} Ardia, D., Ospina Arango, J.D., Giraldo Gomez, N.D. (2011) Jump-Diffusion Calibration using Differential Evolution. \emph{Wilmott Magazine}, 55 (September), 76-79. \doi{10.1002/wilm.10034} Mullen, K.M, Ardia, D., Gil, D., Windover, D., Cline,J. (2011). \pkg{DEoptim:} An R Package for Global Optimization by Differential Evolution. \emph{Journal of Statistical Software}, 40(6), 1-26. \doi{10.18637/jss.v040.i06} } \examples{ ## Rosenbrock Banana function ## The function has a global minimum f(x) = 0 at the point (1,1). ## Note that the vector of parameters to be optimized must be the first ## argument of the objective function passed to DEoptim. Rosenbrock <- function(x){ x1 <- x[1] x2 <- x[2] 100 * (x2 - x1 * x1)^2 + (1 - x1)^2 } lower <- c(-10, -10) upper <- -lower set.seed(1234) outDEoptim <- DEoptim(Rosenbrock, lower, upper) ## print output information summary(outDEoptim) ## plot the best members plot(outDEoptim, type = 'b') ## plot the best values dev.new() plot(outDEoptim, plot.type = "bestvalit", type = 'b', col = 'blue') ## rerun the optimization, and store intermediate populations outDEoptim <- DEoptim(Rosenbrock, lower, upper, DEoptim.control(itermax = 500, storepopfrom = 1, storepopfreq = 2)) summary(outDEoptim) ## plot intermediate populations dev.new() plot(outDEoptim, plot.type = "storepop") ## Wild function Wild <- function(x) 10 * sin(0.3 * x) * sin(1.3 * x^2) + 0.00001 * x^4 + 0.2 * x + 80 outDEoptim = DEoptim(Wild, lower = -50, upper = 50, DEoptim.control(trace = FALSE, storepopfrom = 50, storepopfreq = 1)) plot(outDEoptim, type = 'b') dev.new() plot(outDEoptim, plot.type = "bestvalit", type = 'b') \dontrun{ ## an example with a normal mixture model: requires package mvtnorm library(mvtnorm) ## neg value of the density function negPdfMix <- function(x) { tmp <- 0.5 * dmvnorm(x, c(-3, -3)) + 0.5 * dmvnorm(x, c(3, 3)) -tmp } ## wrapper plotting function plotNegPdfMix <- function(x1, x2) negPdfMix(cbind(x1, x2)) ## contour plot of the mixture x1 <- x2 <- seq(from = -10.0, to = 10.0, by = 0.1) thexlim <- theylim <- range(x1) z <- outer(x1, x2, FUN = plotNegPdfMix) contour(x1, x2, z, nlevel = 20, las = 1, col = rainbow(20), xlim = thexlim, ylim = theylim) set.seed(1234) outDEoptim <- DEoptim(negPdfMix, c(-10, -10), c(10, 10), DEoptim.control(NP = 100, itermax = 100, storepopfrom = 1, storepopfreq = 5)) ## convergence plot dev.new() plot(outDEoptim) ## the intermediate populations indicate the bi-modality of the function dev.new() plot(outDEoptim, plot.type = "storepop") } } \keyword{methods} DEoptim/man/DEoptim.Rd0000644000176200001440000004134414332756015014232 0ustar liggesusers\name{DEoptim} \alias{DEoptim} \title{Differential Evolution Optimization} \concept{minimization} \description{ Performs evolutionary global optimization via the Differential Evolution algorithm. } \usage{ DEoptim(fn, lower, upper, control = DEoptim.control(), ..., fnMap=NULL) } \arguments{ \item{fn}{the function to be optimized (minimized). The function should have as its first argument the vector of real-valued parameters to optimize, and return a scalar real result. \code{NA} and \code{NaN} values are not allowed.} \item{lower, upper}{two vectors specifying scalar real lower and upper bounds on each parameter to be optimized, so that the i-th element of \code{lower} and \code{upper} applies to the i-th parameter. The implementation searches between \code{lower} and \code{upper} for the global optimum (minimum) of \code{fn}.} \item{control}{a list of control parameters; see \code{\link{DEoptim.control}}.} \item{fnMap}{an optional function that will be run after each population is created, but before the population is passed to the objective function. This allows the user to impose integer/cardinality constraints. See the the sandbox directory of the source code for a simple example.} \item{...}{further arguments to be passed to \code{fn}.} } \details{ \code{DEoptim} performs optimization (minimization) of \code{fn}. The \code{control} argument is a list; see the help file for \code{\link{DEoptim.control}} for details. The \R implementation of Differential Evolution (DE), \pkg{DEoptim}, was first published on the Comprehensive \R Archive Network (CRAN) in 2005 by David Ardia. Early versions were written in pure \R. Since version 2.0-0 (published to CRAN in 2009) the package has relied on an interface to a C implementation of DE, which is significantly faster on most problems as compared to the implementation in pure \R. The C interface is in many respects similar to the MS Visual C++ v5.0 implementation of the Differential Evolution algorithm distributed with the book \emph{Differential Evolution -- A Practical Approach to Global Optimization} by Price, K.V., Storn, R.M., Lampinen J.A, Springer-Verlag, 2006. Since version 2.0-3 the C implementation dynamically allocates the memory required to store the population, removing limitations on the number of members in the population and length of the parameter vectors that may be optimized. Since version 2.2-0, the package allows for parallel operation, so that the evaluations of the objective function may be performed using all available cores. This is accomplished using either the built-in \pkg{parallel} package or the \pkg{foreach} package. If parallel operation is desired, the user should set \code{parallelType} and make sure that the arguments and packages needed by the objective function are available; see \code{\link{DEoptim.control}}, the example below and examples in the sandbox directory for details. Since becoming publicly available, the package \pkg{DEoptim} has been used by several authors to solve optimization problems arising in diverse domains; see Mullen et al. (2011) for a review. To perform a maximization (instead of minimization) of a given function, simply define a new function which is the opposite of the function to maximize and apply \code{DEoptim} to it. To integrate additional constraints (other than box constraints) on the parameters \code{x} of \code{fn(x)}, for instance \code{x[1] + x[2]^2 < 2}, integrate the constraint within the function to optimize, for instance: \preformatted{ fn <- function(x)\{ if (x[1] + x[2]^2 >= 2)\{ r <- Inf else\{ ... \} return(r) \} } This simplistic strategy usually does not work all that well for gradient-based or Newton-type methods. It is likely to be alright when the solution is in the interior of the feasible region, but when the solution is on the boundary, optimization algorithm would have a difficult time converging. Furthermore, when the solution is on the boundary, this strategy would make the algorithm converge to an inferior solution in the interior. However, for methods such as DE which are not gradient based, this strategy might not be that bad. Note that \code{DEoptim} stops if any \code{NA} or \code{NaN} value is obtained. You have to redefine your function to handle these values (for instance, set \code{NA} to \code{Inf} in your objective function). It is important to emphasize that the result of \code{DEoptim} is a random variable, i.e., different results may be obtained when the algorithm is run repeatedly with the same settings. Hence, the user should set the random seed if they want to reproduce the results, e.g., by setting \code{set.seed(1234)} before the call of \code{DEoptim}. \code{DEoptim} relies on repeated evaluation of the objective function in order to move the population toward a global minimum. Users interested in making \code{DEoptim} run as fast as possible should consider using the package in parallel mode (so that all CPU's available are used), and also ensure that evaluation of the objective function is as efficient as possible (e.g. by using vectorization in pure \R code, or writing parts of the objective function in a lower-level language like C or Fortran). Further details and examples of the \R package \pkg{DEoptim} can be found in Mullen et al. (2011) and Ardia et al. (2011a, 2011b) or look at the package's vignette by typing \code{vignette("DEoptim")}. Also, an illustration of the package usage for a high-dimensional non-linear portfolio optimization problem is available by typing \code{vignette("DEoptimPortfolioOptimization")}. Please cite the package in publications. Use \code{citation("DEoptim")}. } \value{ The output of the function \code{DEoptim} is a member of the \code{S3} class \code{DEoptim}. More precisely, this is a list (of length 2) containing the following elements:\cr \code{optim}, a list containing the following elements: \itemize{ \item \code{bestmem}: the best set of parameters found. \item \code{bestval}: the value of \code{fn} corresponding to \code{bestmem}. \item \code{nfeval}: number of function evaluations. \item \code{iter}: number of procedure iterations. } \code{member}, a list containing the following elements: \itemize{ \item \code{lower}: the lower boundary. \item \code{upper}: the upper boundary. \item \code{bestvalit}: the best value of \code{fn} at each iteration. \item \code{bestmemit}: the best member at each iteration. \item \code{pop}: the population generated at the last iteration. \item \code{storepop}: a list containing the intermediate populations. } Members of the class \code{DEoptim} have a \code{plot} method that accepts the argument \code{plot.type}.\cr \code{plot.type = "bestmemit"} results in a plot of the parameter values that represent the lowest value of the objective function each generation. \code{plot.type = "bestvalit"} plots the best value of the objective function each generation. Finally, \code{plot.type = "storepop"} results in a plot of stored populations (which are only available if these have been saved by setting the \code{control} argument of \code{DEoptim} appropriately). Storing intermediate populations allows us to examine the progress of the optimization in detail. A summary method also exists and returns the best parameter vector, the best value of the objective function, the number of generations optimization ran, and the number of times the objective function was evaluated. } \note{ \emph{Differential Evolution} (DE) is a search heuristic introduced by Storn and Price (1997). Its remarkable performance as a global optimization algorithm on continuous numerical minimization problems has been extensively explored; see Price et al. (2006). DE belongs to the class of genetic algorithms which use biology-inspired operations of crossover, mutation, and selection on a population in order to minimize an objective function over the course of successive generations (see Mitchell, 1998). As with other evolutionary algorithms, DE solves optimization problems by evolving a population of candidate solutions using alteration and selection operators. DE uses floating-point instead of bit-string encoding of population members, and arithmetic operations instead of logical operations in mutation. DE is particularly well-suited to find the global optimum of a real-valued function of real-valued parameters, and does not require that the function be either continuous or differentiable. Let \eqn{\mathit{NP}}{NP} denote the number of parameter vectors (members) \eqn{x \in R^d}{x in R^d} in the population. In order to create the initial generation, \eqn{\mathit{NP}}{NP} guesses for the optimal value of the parameter vector are made, either using random values between lower and upper bounds (defined by the user) or using values given by the user. Each generation involves creation of a new population from the current population members \eqn{\{ x_i \,|\, i = 1, \ldots, \mathit{NP}\}}{{x_i | i=1,...,NP}}, where \eqn{i} indexes the vectors that make up the population. This is accomplished using \emph{differential mutation} of the population members. An initial mutant parameter vector \eqn{v_i} is created by choosing three members of the population, \eqn{x_{r_0}}, \eqn{x_{r_1}} and \eqn{x_{r_2}}, at random. Then \eqn{v_i} is generated as \deqn{v_i \doteq x_{r_0} + \mathit{F} \cdot (x_{r_1} - x_{r_2})}{v_i := x_{r_0} + F * (x_{r_1} - x_{r_2})} where \eqn{\mathit{F}}{F} is the differential weighting factor, effective values for which are typically between 0 and 1. After the first mutation operation, mutation is continued until \eqn{d} mutations have been made, with a crossover probability \eqn{\mathit{CR} \in [0,1]}{CR in [0,1]}. The crossover probability \eqn{\mathit{CR}}{CR} controls the fraction of the parameter values that are copied from the mutant. If an element of the trial parameter vector is found to violate the bounds after mutation and crossover, it is reset in such a way that the bounds are respected (with the specific protocol depending on the implementation). Then, the objective function values associated with the children are determined. If a trial vector has equal or lower objective function value than the previous vector it replaces the previous vector in the population; otherwise the previous vector remains. Variations of this scheme have also been proposed; see Price et al. (2006) and \code{\link{DEoptim.control}}. Intuitively, the effect of the scheme is that the shape of the distribution of the population in the search space is converging with respect to size and direction towards areas with high fitness. The closer the population gets to the global optimum, the more the distribution will shrink and therefore reinforce the generation of smaller difference vectors. As a general advice regarding the choice of \eqn{\mathit{NP}}{NP}, \eqn{\mathit{F}}{F} and \eqn{\mathit{CR}}{CR}, Storn et al. (2006) state the following: Set the number of parents \eqn{\mathit{NP}}{NP} to 10 times the number of parameters, select differential weighting factor \eqn{\mathit{F} = 0.8}{F = 0.8} and crossover constant \eqn{\mathit{CR} = 0.9}{CR = 0.9}. Make sure that you initialize your parameter vectors by exploiting their full numerical range, i.e., if a parameter is allowed to exhibit values in the range [-100, 100] it is a good idea to pick the initial values from this range instead of unnecessarily restricting diversity. If you experience misconvergence in the optimization process you usually have to increase the value for \eqn{\mathit{NP}}{NP}, but often you only have to adjust \eqn{\mathit{F}}{F} to be a little lower or higher than 0.8. If you increase \eqn{\mathit{NP}}{NP} and simultaneously lower \eqn{\mathit{F}}{F} a little, convergence is more likely to occur but generally takes longer, i.e., DE is getting more robust (there is always a convergence speed/robustness trade-off). DE is much more sensitive to the choice of \eqn{\mathit{F}}{F} than it is to the choice of \eqn{\mathit{CR}}{CR}. \eqn{\mathit{CR}}{CR} is more like a fine tuning element. High values of \eqn{\mathit{CR}}{CR} like \eqn{\mathit{CR} = 1}{CR = 1} give faster convergence if convergence occurs. Sometimes, however, you have to go down as much as \eqn{\mathit{CR} = 0}{CR = 0} to make DE robust enough for a particular problem. For more details on the DE strategy, we refer the reader to Storn and Price (1997) and Price et al. (2006). } \references{ Ardia, D., Boudt, K., Carl, P., Mullen, K.M., Peterson, B.G. (2011) Differential Evolution with \pkg{DEoptim}. An Application to Non-Convex Portfolio Optimization. \emph{R Journal}, 3(1), 27-34. \doi{10.32614/RJ-2011-005} Ardia, D., Ospina Arango, J.D., Giraldo Gomez, N.D. (2011) Jump-Diffusion Calibration using Differential Evolution. \emph{Wilmott Magazine}, 55 (September), 76-79. \doi{10.1002/wilm.10034} Mitchell, M. (1998) \emph{An Introduction to Genetic Algorithms}. The MIT Press. ISBN 0262631857. Mullen, K.M, Ardia, D., Gil, D., Windover, D., Cline,J. (2011). \pkg{DEoptim:} An R Package for Global Optimization by Differential Evolution. \emph{Journal of Statistical Software}, 40(6), 1-26. \doi{10.18637/jss.v040.i06} Price, K.V., Storn, R.M., Lampinen J.A. (2006) \emph{Differential Evolution - A Practical Approach to Global Optimization}. Berlin Heidelberg: Springer-Verlag. ISBN 3540209506. Storn, R. and Price, K. (1997) Differential Evolution -- A Simple and Efficient Heuristic for Global Optimization over Continuous Spaces, \emph{Journal of Global Optimization}, 11:4, 341--359. } \author{ David Ardia, Katharine Mullen \email{mullenkate@gmail.com}, Brian Peterson and Joshua Ulrich. } \seealso{ \code{\link{DEoptim.control}} for control arguments, \code{\link{DEoptim-methods}} for methods on \code{DEoptim} objects, including some examples in plotting the results; \code{\link{optim}} or \code{\link{constrOptim}} for alternative optimization algorithms. } \examples{ ## Rosenbrock Banana function ## The function has a global minimum f(x) = 0 at the point (1,1). ## Note that the vector of parameters to be optimized must be the first ## argument of the objective function passed to DEoptim. Rosenbrock <- function(x){ x1 <- x[1] x2 <- x[2] 100 * (x2 - x1 * x1)^2 + (1 - x1)^2 } ## DEoptim searches for minima of the objective function between ## lower and upper bounds on each parameter to be optimized. Therefore ## in the call to DEoptim we specify vectors that comprise the ## lower and upper bounds; these vectors are the same length as the ## parameter vector. lower <- c(-10,-10) upper <- -lower ## run DEoptim and set a seed first for replicability set.seed(1234) DEoptim(Rosenbrock, lower, upper) ## increase the population size DEoptim(Rosenbrock, lower, upper, DEoptim.control(NP = 100)) ## change other settings and store the output outDEoptim <- DEoptim(Rosenbrock, lower, upper, DEoptim.control(NP = 80, itermax = 400, F = 1.2, CR = 0.7)) ## plot the output plot(outDEoptim) ## 'Wild' function, global minimum at about -15.81515 Wild <- function(x) 10 * sin(0.3 * x) * sin(1.3 * x^2) + 0.00001 * x^4 + 0.2 * x + 80 plot(Wild, -50, 50, n = 1000, main = "'Wild function'") outDEoptim <- DEoptim(Wild, lower = -50, upper = 50, control = DEoptim.control(trace = FALSE)) plot(outDEoptim) DEoptim(Wild, lower = -50, upper = 50, control = DEoptim.control(NP = 50)) ## The below examples shows how the call to DEoptim can be ## parallelized. ## Note that if your objective function requires packages to be ## loaded or has arguments supplied via \code{...}, these should be ## specified using the \code{packages} and \code{parVar} arguments ## in control. \dontrun{ Genrose <- function(x) { ## One generalization of the Rosenbrock banana valley function (n parameters) n <- length(x) ## make it take some time ... Sys.sleep(.001) 1.0 + sum (100 * (x[-n]^2 - x[-1])^2 + (x[-1] - 1)^2) } # get some run-time on simple problems maxIt <- 250 n <- 5 oneCore <- system.time( DEoptim(fn=Genrose, lower=rep(-25, n), upper=rep(25, n), control=list(NP=10*n, itermax=maxIt))) withParallel <- system.time( DEoptim(fn=Genrose, lower=rep(-25, n), upper=rep(25, n), control=list(NP=10*n, itermax=maxIt, parallelType=1))) ## Compare timings (oneCore) (withParallel) } } \keyword{nonlinear} \keyword{optimize} DEoptim/man/DEoptim.control.Rd0000755000176200001440000002673114332756015015717 0ustar liggesusers\name{DEoptim.control} \alias{DEoptim.control} \title{Control various aspects of the DEoptim implementation} \description{ Allow the user to set some characteristics of the Differential Evolution optimization algorithm implemented in \code{DEoptim}. } \usage{ DEoptim.control(VTR = -Inf, strategy = 2, bs = FALSE, NP = NA, itermax = 200, CR = 0.5, F = 0.8, trace = TRUE, initialpop = NULL, storepopfrom = itermax + 1, storepopfreq = 1, p = 0.2, c = 0, reltol, steptol, parallelType = c("none", "auto", "parallel", "foreach"), cluster = NULL, packages = c(), parVar = c(), foreachArgs = list(), parallelArgs = NULL) } \arguments{ \item{VTR}{the value to be reached. The optimization process will stop if either the maximum number of iterations \code{itermax} is reached or the best parameter vector \code{bestmem} has found a value \code{fn(bestmem) <= VTR}. Default to \code{-Inf}.} \item{strategy}{defines the Differential Evolution strategy used in the optimization procedure:\cr \code{1}: DE / rand / 1 / bin (classical strategy)\cr \code{2}: DE / local-to-best / 1 / bin (default)\cr \code{3}: DE / best / 1 / bin with jitter\cr \code{4}: DE / rand / 1 / bin with per-vector-dither\cr \code{5}: DE / rand / 1 / bin with per-generation-dither\cr \code{6}: DE / current-to-p-best / 1\cr any value not above: variation to DE / rand / 1 / bin: either-or-algorithm. Default strategy is currently \code{2}. See *Details*. } \item{bs}{if \code{FALSE} then every mutant will be tested against a member in the previous generation, and the best value will proceed into the next generation (this is standard trial vs. target selection). If \code{TRUE} then the old generation and \code{NP} mutants will be sorted by their associated objective function values, and the best \code{NP} vectors will proceed into the next generation (best of parent and child selection). Default is \code{FALSE}.} \item{NP}{number of population members. Defaults to \code{NA}; if the user does not change the value of \code{NP} from \code{NA} or specifies a value less than 4 it is reset when \code{DEoptim} is called as \code{10*length(lower)}. For many problems it is best to set \code{NP} to be at least 10 times the length of the parameter vector. } \item{itermax}{the maximum iteration (population generation) allowed. Default is \code{200}.} \item{CR}{crossover probability from interval [0,1]. Default to \code{0.5}.} \item{F}{differential weighting factor from interval [0,2]. Default to \code{0.8}.} \item{trace}{Positive integer or logical value indicating whether printing of progress occurs at each iteration. The default value is \code{TRUE}. If a positive integer is specified, printing occurs every \code{trace} iterations. } \item{initialpop}{an initial population used as a starting population in the optimization procedure. May be useful to speed up the convergence. Default to \code{NULL}. If given, each member of the initial population should be given as a row of a numeric matrix, so that \code{initialpop} is a matrix with \code{NP} rows and a number of columns equal to the length of the parameter vector to be optimized. } \item{storepopfrom}{from which generation should the following intermediate populations be stored in memory. Default to \code{itermax + 1}, i.e., no intermediate population is stored.} \item{storepopfreq}{the frequency with which populations are stored. Default to \code{1}, i.e., every intermediate population is stored.} \item{p}{when \code{strategy = 6}, the top (100 * p)\% best solutions are used in the mutation. \code{p} must be defined in (0,1].} \item{c}{\code{c} controls the speed of the crossover adaptation. Higher values of \code{c} give more weight to the current successful mutations. \code{c} must be defined in (0,1].} \item{reltol}{relative convergence tolerance. The algorithm stops if it is unable to reduce the value by a factor of \code{reltol * (abs(val) + reltol)} after \code{steptol} steps. Defaults to \code{sqrt(.Machine$double.eps)}, typically about \code{1e-8}.} \item{steptol}{see \code{reltol}. Defaults to \code{itermax}.} \item{parallelType}{Defines the type of parallelization to employ, if any. \code{none}: The default, this uses \code{DEoptim} on only one core. \code{auto}: will attempt to auto-detect \code{foreach}, or \code{parallel}. \code{parallel}: This uses all available cores, via the \pkg{parallel} package, to run \code{DEoptim}. \code{foreach}: This uses the \pkg{foreach} package for parallelism; see the \code{sandbox} directory in the source code for examples. } \item{cluster}{Existing \pkg{parallel} cluster object. If provided, overrides + specified \code{parallelType}. Using \code{cluster} allows fine-grained control + over the number of used cores and exported data.} \item{packages}{Used if \code{parallelType='parallel'}; a list of package names (as strings) that need to be loaded for use by the objective function. } \item{parVar}{Used if \code{parallelType='parallel'}; a list of variable names (as strings) that need to exist in the environment for use by the objective function or are used as arguments by the objective function. } \item{foreachArgs}{A list of named arguments for the \code{foreach} function from the package \pkg{foreach}. The arguments \code{i}, \code{.combine} and \code{.export} are not possible to set here; they are set internally. } \item{parallelArgs}{A list of named arguments for the parallel engine. For package \pkg{foreach}, the argument \code{i} is not possible to set here; it is set internally. } } \value{ The default value of \code{control} is the return value of \code{DEoptim.control()}, which is a list (and a member of the \code{S3} class \code{DEoptim.control}) with the above elements. } \details{ This defines the Differential Evolution strategy used in the optimization procedure, described below in the terms used by Price et al. (2006); see also Mullen et al. (2009) for details. \itemize{ \item \code{strategy = 1}: DE / rand / 1 / bin. \cr This strategy is the classical approach for DE, and is described in \code{\link{DEoptim}}. \item \code{strategy = 2}: DE / local-to-best / 1 / bin. \cr In place of the classical DE mutation the expression \deqn{ v_{i,g} = old_{i,g} + (best_{g} - old_{i,g}) + x_{r0,g} + F \cdot (x_{r1,g} - x_{r2,g}) }{ v_i,g = old_i,g + (best_g - old_i,g) + x_r0,g + F * (x_r1,g - x_r2,g) } is used, where \eqn{old_{i,g}}{old_i,g} and \eqn{best_{g}}{best_g} are the \eqn{i}-th member and best member, respectively, of the previous population. This strategy is currently used by default. \item \code{strategy = 3}: DE / best / 1 / bin with jitter.\cr In place of the classical DE mutation the expression \deqn{ v_{i,g} = best_{g} + jitter + F \cdot (x_{r1,g} - x_{r2,g}) }{ v_i,g = best_g + jitter + F * (x_r1,g - x_r2,g) } is used, where \eqn{jitter} is defined as 0.0001 * \code{rand} + F. \item \code{strategy = 4}: DE / rand / 1 / bin with per vector dither.\cr In place of the classical DE mutation the expression \deqn{ v_{i,g} = x_{r0,g} + dither \cdot (x_{r1,g} - x_{r2,g}) }{ v_i,g = x_r0,g + dither * (x_r1,g - x_r2,g) } is used, where \eqn{dither} is calculated as \eqn{F + \code{rand} * (1 - F)}. \item \code{strategy = 5}: DE / rand / 1 / bin with per generation dither.\cr The strategy described for \code{4} is used, but \eqn{dither} is only determined once per-generation. \item \code{strategy = 6}: DE / current-to-p-best / 1.\cr The top \eqn{(100*p)} percent best solutions are used in the mutation, where \eqn{p} is defined in \eqn{(0,1]}. \item any value not above: variation to DE / rand / 1 / bin: either-or algorithm.\cr In the case that \code{rand} < 0.5, the classical strategy \code{strategy = 1} is used. Otherwise, the expression \deqn{ v_{i,g} = x_{r0,g} + 0.5 \cdot (F + 1) \cdot (x_{r1,g} + x_{r2,g} - 2 \cdot x_{r0,g}) }{ v_i,g = x_r0,g + 0.5 * (F + 1) * (x_r1,g + x_r2,g - 2 * x_r0,g) } is used. } Several conditions can cause the optimization process to stop: \itemize{ \item{if the best parameter vector (\code{bestmem}) produces a value less than or equal to \code{VTR} (i.e. \code{fn(bestmem) <= VTR}), or} \item{if the maximum number of iterations is reached (\code{itermax}), or} \item{if a number (\code{steptol}) of consecutive iterations are unable to reduce the best function value by a certain amount (\code{reltol * (abs(val) + reltol)}). \code{100*reltol} is approximately the percent change of the objective value required to consider the parameter set an improvement over the current best member.} } Zhang and Sanderson (2009) define several extensions to the DE algorithm, including strategy 6, DE/current-to-p-best/1. They also define a self-adaptive mechanism for the other control parameters. This self-adaptation will speed convergence on many problems, and is defined by the control parameter \code{c}. If \code{c} is non-zero, crossover and mutation will be adapted by the algorithm. Values in the range of \code{c=.05} to \code{c=.5} appear to work best for most problems, though the adaptive algorithm is robust to a wide range of \code{c}. } \note{ Further details and examples of the \R package \pkg{DEoptim} can be found in Mullen et al. (2011) and Ardia et al. (2011a, 2011b) or look at the package's vignette by typing \code{vignette("DEoptim")}. Also, an illustration of the package usage for a high-dimensional non-linear portfolio optimization problem is available by typing \code{vignette("DEoptimPortfolioOptimization")}. Please cite the package in publications. Use \code{citation("DEoptim")}. } \seealso{ \code{\link{DEoptim}} and \code{\link{DEoptim-methods}}. } \references{ Ardia, D., Boudt, K., Carl, P., Mullen, K.M., Peterson, B.G. (2011) Differential Evolution with \pkg{DEoptim}. An Application to Non-Convex Portfolio Optimization. \emph{R Journal}, 3(1), 27-34. \doi{10.32614/RJ-2011-005} Ardia, D., Ospina Arango, J.D., Giraldo Gomez, N.D. (2011) Jump-Diffusion Calibration using Differential Evolution. \emph{Wilmott Magazine}, 55 (September), 76-79. \doi{10.1002/wilm.10034} Mullen, K.M, Ardia, D., Gil, D., Windover, D., Cline,J. (2011). \pkg{DEoptim:} An R Package for Global Optimization by Differential Evolution. \emph{Journal of Statistical Software}, 40(6), 1-26. \doi{10.18637/jss.v040.i06} Price, K.V., Storn, R.M., Lampinen J.A. (2006) \emph{Differential Evolution - A Practical Approach to Global Optimization}. Berlin Heidelberg: Springer-Verlag. ISBN 3540209506. Zhang, J. and Sanderson, A. (2009) \emph{Adaptive Differential Evolution} Springer-Verlag. ISBN 978-3-642-01526-7 } \author{ David Ardia, Katharine Mullen \email{mullenkate@gmail.com}, Brian Peterson and Joshua Ulrich. } \examples{ ## set the population size to 20 DEoptim.control(NP = 20) ## set the population size, the number of iterations and don't ## display the iterations during optimization DEoptim.control(NP = 20, itermax = 100, trace = FALSE) } \keyword{nonlinear} \keyword{optimize}DEoptim/man/xrrData.Rd0000644000176200001440000000070114332756015014266 0ustar liggesusers\name{xrrData} \alias{xrrData} \docType{data} \title{X-ray reflectometry data} \usage{ data("xrrData") } \description{ See Mullen et al. (2011) for description of this dataset. } \references{ Mullen, K.M, Ardia, D., Gil, D., Windover, D., Cline,J. (2011). \pkg{DEoptim:} An R Package for Global Optimization by Differential Evolution. \emph{Journal of Statistical Software}, 40(6), 1-26. \doi{10.18637/jss.v040.i06} } \keyword{datasets} DEoptim/man/SMI.Rd0000644000176200001440000000067614332756015013324 0ustar liggesusers\name{SMI} \alias{SMI} \alias{y} \docType{data} \title{Swiss Market Index data} \usage{ data("SMI") } \description{ See Mullen et al. (2011) for description of this dataset. } \references{ Mullen, K.M, Ardia, D., Gil, D., Windover, D., Cline,J. (2011). \pkg{DEoptim:} An R Package for Global Optimization by Differential Evolution. \emph{Journal of Statistical Software}, 40(6), 1-26. \doi{10.18637/jss.v040.i06} } \keyword{datasets} DEoptim/DESCRIPTION0000644000176200001440000000235314333510001013312 0ustar liggesusersPackage: DEoptim Version: 2.2-8 Title: Global Optimization by Differential Evolution Authors@R: c(person("David", "Ardia", role = c("aut"), email = "david.ardia.ch@gmail.com", comment = c(ORCID = "0000-0003-2823-782X")), person("Katharine", "Mullen", role = c("aut", "cre"), email="mullenkate@gmail.com"), person("Brian", "Peterson", role = "aut"), person("Joshua", "Ulrich", role = "aut"), person("Kris", "Boudt", role = "ctb")) Description: Implements the Differential Evolution algorithm for global optimization of a real-valued function of a real-valued parameter vector as described in Mullen et al. (2011) . Suggests: foreach, iterators, colorspace, lattice, parallelly Depends: parallel Imports: methods BugReports: https://github.com/ArdiaD/DEoptim/issues URL: https://github.com/ArdiaD/DEoptim License: GPL (>= 2) Repository: CRAN Maintainer: Katharine Mullen NeedsCompilation: yes Packaged: 2022-11-09 22:45:39 UTC; kmm-windog Author: David Ardia [aut] (), Katharine Mullen [aut, cre], Brian Peterson [aut], Joshua Ulrich [aut], Kris Boudt [ctb] Date/Publication: 2022-11-11 18:10:09 UTC DEoptim/build/0000755000176200001440000000000014333026623012714 5ustar liggesusersDEoptim/build/partial.rdb0000644000176200001440000000007414333026623015042 0ustar liggesusersb```b`a 00 FN ͚Z d@$w7DEoptim/tests/0000755000176200001440000000000014332756015012763 5ustar liggesusersDEoptim/tests/tests.R0000644000176200001440000000100714332756015014246 0ustar liggesuserslibrary(DEoptim) # Tests for partial argument matching (#2) # 'p' matched 'params' (prior to ae82a815751f6c1eeaef4e51430df8fcef556af9) # 'M' matched MARGIN # 'F' matched FUN RosenbrockPartialMatch <- function(x, p, M, F) { x1 <- x[1] x2 <- x[2] 100 * (x2 - x1 * x1)^2 + (1 - x1)^2 } lower <- c(-10, -10) upper <- -lower set.seed(21) deCtrl <- DEoptim.control(trace = FALSE, parallelType = "parallel") deopt <- DEoptim(RosenbrockPartialMatch, lower, upper, deCtrl, p = NULL, M = NULL, F = NULL)DEoptim/src/0000755000176200001440000000000014333026623012404 5ustar liggesusersDEoptim/src/de4_0.c0000644000176200001440000005416514332756015013462 0ustar liggesusers /*************************************************************** Implementation of DE based loosely on DE-Engine v4.0, Rainer Storn, 2004 by Katharine Mullen, 2009 modified by Joshua Ulrich, 2010 Storn's MS Visual C++ v5.0 was downloaded from http://www.icsi.berkeley.edu/~storn/DeWin.zip Note that the struct t_pop was not used since we want to store in it a parameter vector of arbitrary length -- using a construction like typedef struct t_pop { double fa_cost[MAXCOST]; double fa_vector[]; } t_pop; I have not been able to use Calloc or R_alloc to set aside memory for type t_pop dynamically; I did not look into the bowels but believe this is likely a limitation of these functions. ***************************************************************/ /*------Include section----------------------------------------------*/ #include #include #include SEXP getListElement(SEXP list, char *str); SEXP DEoptimC(SEXP lower, SEXP upper, SEXP fn, SEXP control, SEXP rho, SEXP fnMap); void devol(double VTR, double d_weight, double fcross, int i_bs_flag, double *d_lower, double *d_upper, SEXP fcall, SEXP rho, int i_trace, int i_strategy, int i_D, int i_NP, int i_itermax, double *initialpopv, int i_storepopfreq, int i_storepopfrom, int i_specinitialpop, double *gt_bestP, double *gt_bestC, double *gd_pop, double *gd_storepop, double *gd_bestmemit, double *gd_bestvalit, int *gi_iter, double d_pPct, double d_c, long *l_nfeval, double d_reltol, int i_steptol, SEXP fnMap); void permute(int ia_urn2[], int i_urn2_depth, int i_NP, int i_avoid, int ia_urn1[]); SEXP popEvaluate(long *l_nfeval, SEXP parMat, SEXP fcall, SEXP env, int incrementEval); /*------General functions-----------------------------------------*/ SEXP DEoptimC(SEXP lower, SEXP upper, SEXP fn, SEXP control, SEXP rho, SEXP fnMap) { int i, j, P=0; if (!isFunction(fn)) error("fn is not a function!"); if (!isEnvironment(rho)) error("rho is not an environment!"); /*-----Initialization of annealing parameters-------------------------*/ /* value to reach */ double VTR = NUMERIC_VALUE(getListElement(control, "VTR")); /* chooses DE-strategy */ int i_strategy = INTEGER_VALUE(getListElement(control, "strategy")); /* Maximum number of generations */ int i_itermax = INTEGER_VALUE(getListElement(control, "itermax")); /* Dimension of parameter vector */ int i_D = INTEGER_VALUE(getListElement(control, "npar")); /* Number of population members */ int i_NP = INTEGER_VALUE(getListElement(control, "NP")); /* When to start storing populations */ int i_storepopfrom = INTEGER_VALUE(getListElement(control, "storepopfrom"))-1; /* How often to store populations */ int i_storepopfreq = INTEGER_VALUE(getListElement(control, "storepopfreq")); /* User-defined inital population */ int i_specinitialpop = INTEGER_VALUE(getListElement(control, "specinitialpop")); double *initialpopv = NUMERIC_POINTER(getListElement(control, "initialpop")); /* stepsize */ double d_weight = NUMERIC_VALUE(getListElement(control, "F")); /* crossover probability */ double d_cross = NUMERIC_VALUE(getListElement(control, "CR")); /* Best of parent and child */ int i_bs_flag = NUMERIC_VALUE(getListElement(control, "bs")); /* Print progress? */ int i_trace = NUMERIC_VALUE(getListElement(control, "trace")); /* p to define the top 100p% best solutions */ double d_pPct = NUMERIC_VALUE(getListElement(control, "p")); /* crossover adaptation (a positive constant between 0 and 1) */ double d_c = NUMERIC_VALUE(getListElement(control, "c")); /* relative tolerance */ double d_reltol = NUMERIC_VALUE(getListElement(control, "reltol")); /* relative tolerance steps */ int i_steptol = NUMERIC_VALUE(getListElement(control, "steptol")); int i_nstorepop = ceil((i_itermax - i_storepopfrom) / i_storepopfreq); /* Use S_alloc, since it initializes with zeros FIXME: these should be SEXP */ double *gd_storepop = (double *)S_alloc(i_NP,sizeof(double) * i_D * i_nstorepop); /* External pointers to return to R */ SEXP sexp_bestmem, sexp_bestval, sexp_nfeval, sexp_iter, out, sexp_pop, sexp_storepop, sexp_bestmemit, sexp_bestvalit; PROTECT(sexp_bestmem = NEW_NUMERIC(i_D)); P++; PROTECT(sexp_pop = allocMatrix(REALSXP, i_D, i_NP)); P++; PROTECT(sexp_bestmemit = allocMatrix(REALSXP, i_itermax, i_D)); P++; PROTECT(sexp_bestvalit = allocVector(REALSXP, i_itermax)); P++; double *gt_bestP = REAL(sexp_bestmem); double *gd_pop = REAL(sexp_pop); double *gd_bestmemit = REAL(sexp_bestmemit); double *gd_bestvalit = REAL(sexp_bestvalit); /* ensure lower and upper are double */ if(TYPEOF(lower) != REALSXP) {PROTECT(lower = coerceVector(lower, REALSXP)); P++;} if(TYPEOF(upper) != REALSXP) {PROTECT(upper = coerceVector(upper, REALSXP)); P++;} double *d_lower = REAL(lower); double *d_upper = REAL(upper); double gt_bestC; int gi_iter = 0; long l_nfeval = 0; /*---optimization--------------------------------------*/ devol(VTR, d_weight, d_cross, i_bs_flag, d_lower, d_upper, fn, rho, i_trace, i_strategy, i_D, i_NP, i_itermax, initialpopv, i_storepopfrom, i_storepopfreq, i_specinitialpop, gt_bestP, >_bestC, gd_pop, gd_storepop, gd_bestmemit, gd_bestvalit, &gi_iter, d_pPct, d_c, &l_nfeval, d_reltol, i_steptol, fnMap); /*---end optimization----------------------------------*/ j = i_nstorepop * i_NP * i_D; PROTECT(sexp_storepop = NEW_NUMERIC(j)); P++; for (i = 0; i < j; i++) NUMERIC_POINTER(sexp_storepop)[i] = gd_storepop[i]; PROTECT(sexp_nfeval = ScalarInteger(l_nfeval)); P++; PROTECT(sexp_iter = ScalarInteger(gi_iter)); P++; PROTECT(sexp_bestval = ScalarReal(gt_bestC)); P++; const char *out_names[] = {"bestmem", "bestval", "nfeval", "iter", "bestmemit", "bestvalit", "pop", "storepop", ""}; PROTECT(out = mkNamed(VECSXP, out_names)); P++; SET_VECTOR_ELT(out, 0, sexp_bestmem); SET_VECTOR_ELT(out, 1, sexp_bestval); SET_VECTOR_ELT(out, 2, sexp_nfeval); SET_VECTOR_ELT(out, 3, sexp_iter); SET_VECTOR_ELT(out, 4, sexp_bestmemit); SET_VECTOR_ELT(out, 5, sexp_bestvalit); SET_VECTOR_ELT(out, 6, sexp_pop); SET_VECTOR_ELT(out, 7, sexp_storepop); UNPROTECT(P); return out; } void devol(double VTR, double d_weight, double d_cross, int i_bs_flag, double *d_lower, double *d_upper, SEXP fcall, SEXP rho, int trace, int i_strategy, int i_D, int i_NP, int i_itermax, double *initialpopv, int i_storepopfrom, int i_storepopfreq, int i_specinitialpop, double *gt_bestP, double *gt_bestC, double *gd_pop, double *gd_storepop, double *gd_bestmemit, double *gd_bestvalit, int *gi_iter, double d_pPct, double d_c, long *l_nfeval, double d_reltol, int i_steptol, SEXP fnMap) { #define URN_DEPTH 5 /* 4 + one index to avoid */ int P=0; /* Data structures for parameter vectors */ SEXP sexp_gta_popP, sexp_gta_oldP, sexp_gta_newP, sexp_map_pop; PROTECT(sexp_gta_popP = allocMatrix(REALSXP, i_NP, i_D)); P++; /* FIXME THIS HAD 2x the rows!!! */ PROTECT(sexp_gta_oldP = allocMatrix(REALSXP, i_NP, i_D)); P++; PROTECT(sexp_gta_newP = allocMatrix(REALSXP, i_NP, i_D)); P++; double *ngta_popP = REAL(sexp_gta_popP); /* FIXME THIS HAD 2x the rows!!! */ double *ngta_oldP = REAL(sexp_gta_oldP); double *ngta_newP = REAL(sexp_gta_newP); /* Data structures for objective function values associated with * parameter vectors */ SEXP sexp_gta_popC, sexp_gta_oldC, sexp_gta_newC; PROTECT(sexp_gta_popC = allocVector(REALSXP, i_NP)); P++; PROTECT(sexp_gta_oldC = allocVector(REALSXP, i_NP)); P++; PROTECT(sexp_gta_newC = allocVector(REALSXP, i_NP)); P++; double *ngta_popC = REAL(sexp_gta_popC); double *ngta_oldC = REAL(sexp_gta_oldC); double *ngta_newC = REAL(sexp_gta_newC); double *t_bestitP = (double *)R_alloc(1,sizeof(double) * i_D); SEXP sexp_t_tmpP, sexp_t_tmpC; PROTECT(sexp_t_tmpP = allocMatrix(REALSXP, i_NP, i_D)); P++; double *nt_tmpP = REAL(sexp_t_tmpP); int i, j, k; /* counting variables */ int i_r1, i_r2, i_r3; /* placeholders for random indexes */ int ia_urn2[URN_DEPTH]; int ia_urnTemp[i_NP]; int popcnt, bestacnt; /* lazy cnters */ double d_jitter, d_dither; double **initialpop = (double **)R_alloc(i_NP,sizeof(double *)); for (int i = 0; i < i_NP; i++) initialpop[i] = (double *)R_alloc(i_D,sizeof(double)); /* vars for DE/current-to-p-best/1 */ int i_pbest; int p_NP = round(d_pPct * i_NP); /* choose at least two best solutions */ p_NP = p_NP < 2 ? 2 : p_NP; int sortIndex[i_NP]; /* sorted values of ngta_oldC */ for(i = 0; i < i_NP; i++) sortIndex[i] = i; //double goodCR = 0, goodF = 0, goodF2 = 0, meanCR = 0.5, meanF = 0.5; double goodCR = 0, goodF = 0, goodF2 = 0, meanCR = d_cross, meanF = d_weight; int i_goodNP = 0; /* vars for when i_bs_flag == 1 */ // int i_len, done, step, bound; // double tempC; GetRNGstate(); /* if initial population provided, initialize with values */ if (i_specinitialpop > 0) { k = 0; for (j = 0; j < i_D; j++) { for (i = 0; i < i_NP; i++) { initialpop[i][j] = initialpopv[k]; k += 1; } } } /*------Initialization-----------------------------*/ for (j = 0; j < i_D; j++) { for (i = 0; i < i_NP; i++) { if (i_specinitialpop <= 0) { /* random initial member */ ngta_popP[i+i_NP*j] = d_lower[j] + unif_rand() * (d_upper[j] - d_lower[j]); } else /* or user-specified initial member */ ngta_popP[i+i_NP*j] = initialpop[i][j]; } } if(!isNull(fnMap)) { PROTECT(sexp_map_pop = popEvaluate(l_nfeval, sexp_gta_popP, fnMap, rho, 0)); memmove(REAL(sexp_gta_popP), REAL(sexp_map_pop), i_NP * i_D * sizeof(double)); // valgrind reports memory overlap here UNPROTECT(1); // sexp_map_pop } PROTECT(sexp_gta_popC = popEvaluate(l_nfeval, sexp_gta_popP, fcall, rho, 1)); ngta_popC = REAL(sexp_gta_popC); double t_bestC = R_PosInf; for (i = 0; i < i_NP; i++) { if (ngta_popC[i] <= t_bestC) { t_bestC = ngta_popC[i]; for (j = 0; j < i_D; j++) gt_bestP[j]=ngta_popP[i+i_NP*j]; } } /*---assign pointers to current ("old") population---*/ memmove(REAL(sexp_gta_oldP), REAL(sexp_gta_popP), i_NP * i_D * sizeof(double)); memmove(REAL(sexp_gta_oldC), REAL(sexp_gta_popC), i_NP * sizeof(double)); UNPROTECT(1); // sexp_gta_popC /*------Iteration loop--------------------------------------------*/ int i_iter = 0; popcnt = 0; bestacnt = 0; int i_iter_tol = 0; while ((i_iter < i_itermax) && (t_bestC > VTR) && (i_iter_tol <= i_steptol)) { /* store intermediate populations */ if (i_iter % i_storepopfreq == 0 && i_iter >= i_storepopfrom) { for (i = 0; i < i_NP; i++) { for (j = 0; j < i_D; j++) { gd_storepop[popcnt] = ngta_oldP[i+i_NP*j]; popcnt++; } } } /* end store pop */ /* store the best member */ for(j = 0; j < i_D; j++) { gd_bestmemit[bestacnt] = gt_bestP[j]; bestacnt++; } /* store the best value */ gd_bestvalit[i_iter] = t_bestC; for (j = 0; j < i_D; j++) t_bestitP[j] = gt_bestP[j]; i_iter++; /*----compute dithering factor -----------------*/ if (i_strategy == 5) d_dither = d_weight + unif_rand() * (1.0 - d_weight); /*---DE/current-to-p-best/1 ----------------------------------------------*/ if (i_strategy == 6) { /* create a copy of ngta_oldC to avoid changing it */ double temp_oldC[i_NP]; for(j = 0; j < i_NP; j++) temp_oldC[j] = ngta_oldC[j]; /* sort temp_oldC to use sortIndex later */ rsort_with_index( (double*)temp_oldC, (int*)sortIndex, i_NP ); } /*----start of loop through ensemble------------------------*/ for (i = 0; i < i_NP; i++) { /*nt_tmpP is the vector to mutate and eventually select*/ for (j = 0; j < i_D; j++) nt_tmpP[i+i_NP*j] = ngta_oldP[i+i_NP*j]; permute(ia_urn2, URN_DEPTH, i_NP, i, ia_urnTemp); /* Pick 4 random and distinct */ i_r1 = ia_urn2[1]; /* population members */ i_r2 = ia_urn2[2]; i_r3 = ia_urn2[3]; if (d_c > 0) { d_cross = rnorm(meanCR, 0.1); d_cross = d_cross > 1.0 ? 1 : d_cross; d_cross = d_cross < 0.0 ? 0 : d_cross; do { d_weight = rcauchy(meanF, 0.1); d_weight = d_weight > 1 ? 1.0 : d_weight; }while(d_weight <= 0.0); } /*===Choice of strategy===============================================*/ j = (int)(unif_rand() * i_D); /* random parameter */ k = 0; do { switch (i_strategy) { case 1: { /*---classical strategy DE/rand/1/bin-------------------*/ nt_tmpP[i+i_NP*j] = ngta_oldP[i_r1+i_NP*j] + d_weight * (ngta_oldP[i_r2+i_NP*j] - ngta_oldP[i_r3+i_NP*j]); break; } case 2: { /*---DE/local-to-best/1/bin-----------------------------*/ nt_tmpP[i+i_NP*j] = nt_tmpP[i+i_NP*j] + d_weight * (t_bestitP[j] - nt_tmpP[i+i_NP*j]) + d_weight * (ngta_oldP[i_r2+i_NP*j] - ngta_oldP[i_r3+i_NP*j]); break; } case 3: { /*---DE/best/1/bin with jitter--------------------------*/ d_jitter = 0.0001 * unif_rand() + d_weight; nt_tmpP[i+i_NP*j] = t_bestitP[j] + d_jitter * (ngta_oldP[i_r1+i_NP*j] - ngta_oldP[i_r2+i_NP*j]); break; } case 4: { /*---DE/rand/1/bin with per-vector-dither---------------*/ nt_tmpP[i+i_NP*j] = ngta_oldP[i_r1+i_NP*j] + (d_weight + unif_rand()*(1.0 - d_weight))* (ngta_oldP[i_r2+i_NP*j]-ngta_oldP[i_r3+i_NP*j]); break; } case 5: { /*---DE/rand/1/bin with per-generation-dither-----------*/ nt_tmpP[i+i_NP*j] = ngta_oldP[i_r1+i_NP*j] + d_dither * (ngta_oldP[i_r2+i_NP*j] - ngta_oldP[i_r3+i_NP*j]); break; } case 6: { /*---DE/current-to-p-best/1 (JADE)----------------------*/ /* select from [0, 1, 2, ..., (pNP-1)] */ i_pbest = sortIndex[(int)(unif_rand() * p_NP)]; nt_tmpP[i+i_NP*j] = ngta_oldP[i+i_NP*j] + d_weight * (ngta_oldP[i_pbest+i_NP*j] - ngta_oldP[i+i_NP*j]) + d_weight * (ngta_oldP[i_r1+i_NP*j] - ngta_oldP[i_r2+i_NP*j]); break; } default: { /*---variation to DE/rand/1/bin: either-or-algorithm---*/ if (unif_rand() < 0.5) { /* differential mutation, Pmu = 0.5 */ nt_tmpP[i+i_NP*j] = ngta_oldP[i_r1+i_NP*j] + d_weight * (ngta_oldP[i_r2+i_NP*j] - ngta_oldP[i_r3+i_NP*j]); } else { /* recombination with K = 0.5*(F+1) -. F-K-Rule */ nt_tmpP[i+i_NP*j] = ngta_oldP[i_r1+i_NP*j] + 0.5 * (d_weight + 1.0) * (ngta_oldP[i_r2+i_NP*j] + ngta_oldP[i_r3+i_NP*j] - 2 * ngta_oldP[i_r1+i_NP*j]); } } } /* end switch */ j = (j + 1) % i_D; k++; }while((unif_rand() < d_cross) && (k < i_D)); /*===End choice of strategy===========================================*/ /*----boundary constraints, bounce-back method was not enforcing bounds correctly*/ for (j = 0; j < i_D; j++) { if (nt_tmpP[i+i_NP*j] < d_lower[j]) { nt_tmpP[i+i_NP*j] = d_lower[j] + unif_rand() * (d_upper[j] - d_lower[j]); } if (nt_tmpP[i+i_NP*j] > d_upper[j]) { nt_tmpP[i+i_NP*j] = d_upper[j] - unif_rand() * (d_upper[j] - d_lower[j]); } } } /* NEW End mutation loop through ensemble */ /*------Trial mutation now in nt_tmpP-----------------*/ /* evaluate mutated population */ if(!isNull(fnMap)) { PROTECT(sexp_map_pop = popEvaluate(l_nfeval, sexp_t_tmpP, fnMap, rho, 0)); memmove(REAL(sexp_t_tmpP), REAL(sexp_map_pop), i_NP * i_D * sizeof(double)); // valgrind reports memory overlap here UNPROTECT(1); // sexp_map_pop } PROTECT(sexp_t_tmpC = popEvaluate(l_nfeval, sexp_t_tmpP, fcall, rho, 1)); double *nt_tmpC = REAL(sexp_t_tmpC); /* compare old pop with mutated pop */ for (i = 0; i < i_NP; i++) { /* note that i_bs_flag means that we will choose the *best NP vectors from the old and new population later*/ if (nt_tmpC[i] <= ngta_oldC[i] || i_bs_flag) { /* replace target with mutant */ for (j = 0; j < i_D; j++) ngta_newP[i+i_NP*j]=nt_tmpP[i+i_NP*j]; ngta_newC[i]=nt_tmpC[i]; if (nt_tmpC[i] <= t_bestC) { for (j = 0; j < i_D; j++) gt_bestP[j]=nt_tmpP[i+i_NP*j]; t_bestC=nt_tmpC[i]; } if (d_c > 0) { /* calculate new goodCR and goodF */ goodCR += d_cross / ++i_goodNP; goodF += d_weight; goodF2 += pow(d_weight,2.0); } } else { for (j = 0; j < i_D; j++) ngta_newP[i+i_NP*j]=ngta_oldP[i+i_NP*j]; ngta_newC[i]=ngta_oldC[i]; } } /* End mutation loop through ensemble */ UNPROTECT(1); // sexp_t_tmpC if (d_c > 0) { /* calculate new meanCR and meanF */ meanCR = (1-d_c)*meanCR + d_c*goodCR; meanF = (1-d_c)*meanF + d_c*goodF2/goodF; } if(i_bs_flag) { /* FIXME */ error("bs = TRUE not currently supported"); // /* examine old and new pop. and take the best NP members // * into next generation */ // for (i = 0; i < i_NP; i++) { // for (j = 0; j < i_D; j++) // gta_popP[i][j] = gta_oldP[i][j]; // gta_popC[i] = gta_oldC[i]; // } // for (i = 0; i < i_NP; i++) { // for (j = 0; j < i_D; j++) // gta_popP[i_NP+i][j] = gta_newP[i][j]; // gta_popC[i_NP+i] = gta_newC[i]; // } // i_len = 2 * i_NP; // step = i_len; /* array length */ // while (step > 1) { // step /= 2; /* halve the step size */ // do { // done = 1; // bound = i_len - step; // for (j = 0; j < bound; j++) { // i = j + step + 1; // if (gta_popC[j] > gta_popC[i-1]) { // for (k = 0; k < i_D; k++) // tempP[k] = gta_popP[i-1][k]; // tempC = gta_popC[i-1]; // for (k = 0; k < i_D; k++) // gta_popP[i-1][k] = gta_popP[j][k]; // gta_popC[i-1] = gta_popC[j]; // for (k = 0; k < i_D; k++) // gta_popP[j][k] = tempP[k]; // gta_popC[j] = tempC; // done = 0; // /* if a swap has been made we are not finished yet */ // } /* if */ // } /* for */ // } while (!done); /* while */ // } /*while (step > 1) */ // /* now the best NP are in first NP places in gta_pop, use them */ // for (i = 0; i < i_NP; i++) { // for (j = 0; j < i_D; j++) // gta_newP[i][j] = gta_popP[i][j]; // gta_newC[i] = gta_popC[i]; // } } /*i_bs_flag*/ /* have selected NP mutants move on to next generation */ for (i = 0; i < i_NP; i++) { for (j = 0; j < i_D; j++) ngta_oldP[i+i_NP*j] = ngta_newP[i+i_NP*j]; ngta_oldC[i] = ngta_newC[i]; } for (j = 0; j < i_D; j++) t_bestitP[j] = gt_bestP[j]; if( trace > 0 ) { if( (i_iter % trace) == 0 ) { Rprintf("Iteration: %d bestvalit: %f bestmemit:", i_iter, t_bestC); for (j = 0; j < i_D; j++) Rprintf("%12.6f", gt_bestP[j]); Rprintf("\n"); } } /* check for user interrupt */ /*if( i_iter % 10000 == 999 ) R_CheckUserInterrupt();*/ /* check relative tolerance (as in src/main/optim.c) */ if( gd_bestvalit[i_iter-1] - t_bestC < (d_reltol * (fabs(gd_bestvalit[i_iter-1]) + d_reltol))) { i_iter_tol++; } else { i_iter_tol = 0; } } /* end iteration loop */ /* last population */ k = 0; for (i = 0; i < i_NP; i++) { for (j = 0; j < i_D; j++) { gd_pop[k] = ngta_oldP[i+i_NP*j]; k++; } } *gi_iter = i_iter; *gt_bestC = t_bestC; PutRNGstate(); UNPROTECT(P); } void permute(int ia_urn2[], int i_urn2_depth, int i_NP, int i_avoid, int ia_urn1[]) /******************************************************************** ** Function : void permute(int ia_urn2[], int i_urn2_depth) ** Author : Rainer Storn (w/bug fixes contributed by DEoptim users) ** Description : Generates i_urn2_depth random indices ex [0, i_NP-1] ** which are all distinct. This is done by using a ** permutation algorithm called the "urn algorithm" ** which goes back to C.L.Robinson. ** Functions : - ** Globals : - ** Parameters : ia_urn2 (O) array containing the random indices ** i_urn2_depth (I) number of random indices (avoided index included) ** i_NP (I) range of indices is [0, i_NP-1] ** i_avoid (I) is the index to avoid and is located in ** ia_urn2[0]. ** Preconditions : # Make sure that ia_urn2[] has a length of i_urn2_depth. ** # i_urn2_depth must be smaller than i_NP. ** Postconditions : # the index to be avoided is in ia_urn2[0], so fetch the ** indices from ia_urn2[i], i = 1, 2, 3, ..., i_urn2_depth. ** Return Value : - *********************************************************************/ { GetRNGstate(); int k = i_NP; int i_urn1 = 0; int i_urn2 = 0; for (int i = 0; i < i_NP; i++) ia_urn1[i] = i; /* initialize urn1 */ i_urn1 = i_avoid; /* get rid of the index to be avoided and place it in position 0. */ while (k > i_NP - i_urn2_depth) /* i_urn2_depth is the amount of indices wanted (must be <= NP) */ { ia_urn2[i_urn2] = ia_urn1[i_urn1]; /* move it into urn2 */ ia_urn1[i_urn1] = ia_urn1[k-1]; /* move highest index to fill gap */ k = k - 1; /* reduce number of accessible indices */ i_urn2 = i_urn2 + 1; /* next position in urn2 */ i_urn1 = (int)(unif_rand() * k); /* choose a random index */ } PutRNGstate(); } DEoptim/src/get_element.c0000755000176200001440000000051314332756015015046 0ustar liggesusers#include #include SEXP getListElement(SEXP list, char *str) { SEXP elmt = R_NilValue, names = getAttrib(list, R_NamesSymbol); int i; for (i = 0; i < length(list); i++) if (strcmp(CHAR(STRING_ELT(names, i)), str) == 0) { elmt = VECTOR_ELT(list, i); break; } return elmt; } DEoptim/src/DEoptim_init.c0000644000176200001440000000076714332756015015152 0ustar liggesusers#include #include #include // for NULL #include /* FIXME: Check these declarations against the C/Fortran source code. */ /* .Call calls */ extern SEXP DEoptimC(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP); static const R_CallMethodDef CallEntries[] = { {"DEoptimC", (DL_FUNC) &DEoptimC, 6}, {NULL, NULL, 0} }; void R_init_DEoptim(DllInfo *dll) { R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); R_useDynamicSymbols(dll, FALSE); } DEoptim/src/evaluate.c0000755000176200001440000000205514332756015014367 0ustar liggesusers#include #include /*------objective function---------------------------------------*/ SEXP popEvaluate(long *l_nfeval, SEXP parMat, SEXP fcall, SEXP env, int incrementEval) { SEXP sexp_fvec, fn; double *d_result; int P = 0; int nr; if (isNull(fcall)) return parMat; PROTECT(fn = lang3(fcall, parMat, R_DotsSymbol)); P++; PROTECT(sexp_fvec = eval(fn, env)); P++; nr = nrows(sexp_fvec); if(incrementEval) (*l_nfeval) += nr; if(nr != nrows(parMat)) error("objective function result has different length than parameter matrix"); switch(TYPEOF(sexp_fvec)) { case INTSXP: PROTECT(sexp_fvec = coerceVector(sexp_fvec, REALSXP)); P++; break; case REALSXP: break; default: error("unsupported objective function return value"); break; } d_result = REAL(sexp_fvec); for(int i=0; i < nr; i++) { if(ISNAN(d_result[i])) error("NaN value of objective function! \nPerhaps adjust the bounds."); } UNPROTECT(P); return(sexp_fvec); } DEoptim/NEWS0000755000176200001440000002505714332756015012334 0ustar liggesusersChanges in version 2.2-8 o Fixed uninitialized variable issue found via valgrind. Thanks to Prof Ripley for the report. Changes in version 2.2-7 o Added if(is.null(args$.combine)) args$.combine <- c to DEoptim.R to allow for optional custom .combine o Set default mapping function to NULL; This improves performance by allowing us to avoid one R call during the optimization if there is no mapping function. o Remove unused variable par in de4_0.c o Remove unnecessary allocation of sexp_t_tmpC, sexp_gta_oldC in de4_0.c that led to stack imbalance, potential crash. o Update comments in de4_0.c to reflect variable names that were changed when objects were converted from arrays to SEXPs. o Refactor the loop that populates t_bestC from the initial population, so there's no longer the potential for t_bestC to be uninitialized. o Fix call to parallel::clusterExport o Use full names to avoid partial matching; Partial argument name matching could cause confusing errors if the user's objective function contains arguments similar to the MARGIN and FUN arguments in apply(). o Added tests/ directory; tests.R now tests for problems in arguement names o Import from parallelly package; use parallelly::availableCores() instead of parallel::detectCores() in extension suggested by Henrik Bengtsson o The 'parallelType' argument to DEoptim.control() now may be specified using numeric values (as before), or using "none", "auto", "parallel", or "foreach"; "none" corresponds to the numeric value 0, "parallel" with 1, and "foreach" with 2; "auto" will attempt to decide which package to use Changes in version 2.2-6 o README modified o DESCRIPTION modified o References updated Changes in version 2.2-5 o Used tools::package_native_routine_registration_skeleton(".") to create src/DEoptim_init.c o In NAMESPACE, changed useDynLib(DEoptim) to useDynLib(DEoptim, .registration = TRUE)) o removed unused variables from de4_0.c: tmp_best, t_tmpC, same, i_xav, i_nstorepop, tempP, gta_newC, gta_oldC, popC, d_par o definition of popEvaluate(long *l_nfeval, SEXP parMat, SEXP fcall, SEXP env) changed to include a 5th integer argument, to remove warning about type mismatch with previous definition Changes in version 2.2-4 o Vignette source code included in package. o New option 'cluster' allows passing existing parallel cluster, using code by Alexey Stukalov. o As pointed out by Jason Thorpe: "parallel:::.onLoad() makes a call to stats::runif(1L) depending on whether or not Sys.getenv("R_PARALLEL_PORT") returns an integer, which appears to be platform dependent. The result of this bug is that the return value of DEoptim() will differ by platform the first time it is called, even when taking care to call set.seed() prior to calling DEoptim()." We now require 'parallel' (to ensure it is loaded) to avoid this behaviour. Changes in version 2.2-3 o DESCRIPTION updated. o Change to steptol interpretation in C, thanks to Alec Solway. o Change to foreach logic, thanks to Jonathan Owen. o Change to NP setting when initialpop provided, thanks to Tobias KD Weber. o More flexible passing of args to foreach. o Makefile changed to remove references to shell Changes in version 2.2-2 o Change to de4_0.c to replace memcpy with memmove Changes in version 2.2-1 o Invisible-to-users changes to evaluate.c to fix misuse of isnan Changes in version 2.2-0 o Thanks to Joshua Ulrich and Kris Boudt, parallel operation is possible, using foreach. o sandbox directory added, with example parallel code o Kris Boudt added as a contributor o foreach added as suggested package o foreachArgs arguement added to DEoptim.control() function o Fixed typo in box constraint example in DEoptim.Rd o VignetteDepends metadata added to .Rnw o Added mapping function for integer / cardinality constraints o checkWinner and avWinner options have been removed. If the user desires averaging (to deal with a stochastic objective function) then a wrapper function may be used to give the average value over several calls. Changes in version 2.1-2 o Modified CITATION file and Rd files. o Joshua Ulrich fixed the crash caused by a stack overflow when optimizing many parameters. Thanks to Suraj Gupta for pointing out the problem. Changes in version 2.1-1 o Added portfolio optimization vignette. o Bug with reltol / steptol stopping criteria fixed. o Changed JADE to be 'on' when c > 0 for *any* strategy and set c=0 as default in DEoptim.control (JADE is 'off'). Changes in version 2.1-0 o Added documentation of strategy six. o Fixed typos in vignette. o Modified CITATION file and Rd files. Changes in version 2.0-9 o Added vignette. Changes in version 2.0-8 o Many improvements in the C code, thanks to Dirk Eddelbuettel's detailed code review. Dirk's contributions include: - fixed gd_bestmemit and gd_bestval initialization to avoid segfaults - propegate nfeval back to DEoptimC function - allocate parameter vector once, rather than once per evaluate call - allocate ia_urn1 once, rather than once per permute call - replace strategy if/else block with switch statement o More bug fixes and optimizations when bs=TRUE o Fixed bug in initial population check. Thanks to Vinecius Veloso. o Added '...' to objective function call in evaluate rather than in DEoptim R function. This yields a speed gain, since it avoids an extra function call. Thanks to Dirk Eddelbuettel for the idea. Changes in version 2.0-7 o Many improvements in the C code, including removal of all global variables thanks to Joshua Ulrich. o Thanks to Ralf Tautenhahn and Joshua Ulrich, a bug with bs=TRUE is removed. o Thanks to Dirk Eddelbuettel several bugs (some possibly causing segfaults) removed. o Added DE/current-to-p-best/1 strategy. o Added ability to only print every "trace" iterations. Changes in version 2.0-6 o Added dataset documentation now required by R CMD check. Changes in version 2.0-5 o CITATION file modified, updated references. o Removed check on length of parameter vector (it was left over from versions prior to 2.0-3, thanks to Jean-Luc Jannink. Changes in version 2.0-4 o added check to prevent the C code being called with NP<4, thanks to Joshua Ulrich. o fixed bug introduced in version 2.0-3 that made the objective function values associated with the winning population members incorrect o added new options checkWinner and avWinner to the 'control' arguement. Changes in version 2.0-3 o Re-write of much of the underlying C code. Now dynamically allocate storage, so can optimize on parameter vectors of arbitrary size, in a population of arbitrary size. Changed the documentation to reflect the new lack of limitations. o Stop with an error right away if a NaN objective function value occurs. o Default value of CR changed to .9 from .5. o Added reference and minor changes to documentation. Changes in version 2.0-2 o The maximum number of parameters that can be optimized (set statically in de.h with #define MAXDIM) was changed from 20 to 200. o zzz.R file removed and replaced with call to `useDynLib(DEoptim)' in NAMESPACE o Brian Peterson pointed out problems occurring when the objective function returns a NaN value. Error messages are now added to report when this happens. o permute patched, see below. Thanks to Hans Werner Borchers for pointing out that the problem was not fixed in the CRAN version. Changes in version 2.0-1 o Soren Macbeth and Joshua Ulrich pointed out and patched bugs in the function 'permute'. Note that the version 2.0-1 on CRAN does not patch these bugs correctly. The correction is made in the next version. Changes in version 2.0-0 o The R-based implementation of Differential Evolution has been replaced with a C-based implementation similar to the MS Visual C++ v5.0 implementation accompanying the book `Differential Evolution - A Practical Approach to Global Optimization',downloaded from http://www.icsi.berkeley.edu/~storn/DeWin.zip. The new C implementation is significantly faster. o The S3 method for plotting has been enhanced. It allows now to plot the intermediate populations if provided. o The package maintainer has been changed to Katharine Mullen, . o A NAMESPACE has been added. o Argument FUN for DEoptim is now called fn for compatibility with optim. o demo file has been removed o CITATION file modified Changes in version 1.3-3 o CITATION file modified. Changes in Version 1.3-2 o CITATION file modified. Changes in Version 1.3-1 o new plotting argument 'storepop' which displays intermediate population locations. Changes in Version 1.3-0 o the function 'DEoptim' has two arguments: 'storepopfrom' and 'storepopfreq', for tracking intermediate populations; the output contains also the list 'storepop' which belongs to the 'member' list's element. o small bug fixed for the number of iterations. Changes in Version 1.2-1 o fix a bug in the optimization procedure. Thanks to Tarmo Leinonen for pointing out this bug. o add a demo to show how to increase the number of printed digits while performing the optimization. Changes in Version 1.2-0 o the function 'DEoptim' has the new argument 'initial', which is an initial or starting population. You can therefore introduce a starting population in the optimization procedure. This can be useful when the optimization has to be run many times on data sets which differ sligthly. Thanks to Tarmo Leinonen for this nice suggestion. o the function 'DEoptim' outputs now 'pop' in the 'member' list's element. This is the population obtained at the last iteration which can be used as a starting population in 'DEoptim' via the argument 'initial'. o the function stops if any 'NA' or 'NaN' value is returned by the function 'FUN' to be optimized. o the function DEoptim does not handle exponential crossover anymore. This is so to simplify and accelerate the optimization procedure. Thanks to Vladimir Eremeev for pointing out a bug with the exponential crossover. o the function 'DEoptim' outputs a list of lists. This is more natural and in the same spirit of usual optimization functions. Thanks to Vladimir Eremeev for proposing this change. o documentation for the 'digits'. Thanks to Eugene Demidenko for pointing out this. DEoptim/R/0000755000176200001440000000000014332756015012022 5ustar liggesusersDEoptim/R/methods.R0000644000176200001440000000424114332756015013611 0ustar liggesusers'summary.DEoptim' <- function(object, ...){ digits <- max(5, getOption('digits') - 2) cat("\n***** summary of DEoptim object *****", "\nbest member : ", round(object$optim$bestmem, digits), "\nbest value : ", round(object$optim$bestval, digits), "\nafter : ", round(object$optim$iter), "generations", "\nfn evaluated : ", round(object$optim$nfeval), "times", "\n*************************************\n") invisible(object) } plot.DEoptim <- function (x, plot.type = c("bestmemit", "bestvalit", "storepop"), ...) { z <- x$member niter <- length(z$bestvalit) npar <- length(z$lower) nam <- names(z$lower) nstorepop <- length(z$storepop) if (identical(plot.type[1], "bestmemit")) { plot.new() par(mfrow = c(min(3, npar), 1)) for (i in 1:npar) { if (identical(i%%4, 0)) { cat("new plot\n") devAskNewPage(ask = TRUE) } plot(1:niter, z$bestmemit[, i], xlim = c(1, niter), las = 1, xlab = "iteration", ylab = "value", main = nam[i], ...) abline(h = c(z$lower[i], z$upper[i]), col = "red") } } else if (identical(plot.type[1], "bestvalit")) { plot(1:niter, z$bestvalit, xlim = c(1, niter), las = 1, xlab = "iteration", ylab = "function value", main = "convergence plot", ...) } else if (identical(plot.type[1], "storepop") && nstorepop > 0) { plot.new() par(mfrow = c(min(3, npar), 1)) for (i in 1:npar) { if (identical(i%%4, 0)) { cat("new plot\n") devAskNewPage(ask = TRUE) } tmp <- NULL for (j in 1:nstorepop) { tmp <- cbind(tmp, z$storepop[[j]][, i]) } matplot(t(tmp), col = "black", pch = 20, las = 1, xlab = "stored population", ylab = "value", main = nam[i], ...) abline(h = c(z$lower[i], z$upper[i]), col = "red") par(new = FALSE) } } else { warning("'plot.type' does not correspond to any plotting type", immediate. = TRUE) } } DEoptim/R/zzz.R0000755000176200001440000000042614332756015013007 0ustar liggesusers".onLoad" <- function (lib, pkg) { library.dynam(pkg, pkg, lib) } ".onAttach" <- function (lib, pkg) { packageStartupMessage("\nDEoptim package", "\nDifferential Evolution algorithm in R", "\nAuthors: D. Ardia, K. Mullen, B. Peterson and J. Ulrich\n") } DEoptim/R/DEoptim.R0000644000176200001440000002722414332756015013515 0ustar liggesusersDEoptim.control <- function(VTR = -Inf, strategy = 2, bs = FALSE, NP = NA, itermax = 200, CR = 0.5, F = 0.8, trace = TRUE, initialpop = NULL, storepopfrom = itermax + 1, storepopfreq = 1, p = 0.2, c = 0, reltol, steptol, parallelType = c("none", "auto", "parallel", "foreach"), cluster = NULL, packages = c(), parVar = c(), foreachArgs = list(), parallelArgs = NULL) { if (itermax <= 0) { warning("'itermax' <= 0; set to default value 200\n", immediate. = TRUE) itermax <- 200 } if (F < 0 || F > 2) { warning("'F' not in [0,2]; set to default value 0.8\n", immediate. = TRUE) F <- 0.8 } if (CR < 0 || CR > 1) { warning("'CR' not in [0,1]; set to default value 0.5\n", immediate. = TRUE) CR <- 0.5 } if (strategy < 1 || strategy > 6) { warning("'strategy' not in {1,...,6}; set to default value 2\n", immediate. = TRUE) strategy <- 2 } bs <- (bs > 0) if ( trace < 0 ) { warning("'trace' cannot be negative; set to 'TRUE'") trace <- TRUE } storepopfreq <- floor(storepopfreq) if (storepopfreq > itermax) storepopfreq <- 1 if (p <= 0 || p > 1) { warning("'p' not in (0,1]; set to default value 0.2\n", immediate. = TRUE) p <- 0.2 } if (c < 0 || c > 1) { warning("'c' not in [0,1]; set to default value 0\n", immediate. = TRUE) c <- 0 } if (missing(reltol)) { reltol <- sqrt(.Machine$double.eps) } if (missing(steptol)) { steptol <- itermax } if(!(is.null(initialpop))) { if(is.na(NP)) if(is.matrix(initialpop)) NP <- dim(initialpop)[1] else stop("initialpop must be a matrix") else if(NP != dim(initialpop)[1]) { warning("Resetting NP to the number of rows in initialpop") NP <- dim(initialpop)[1] } } ##################### # handle parallel options #check for a single parallelType if(missing(parallelType) || length(parallelType)>1){ parallelType<-parallelType[1] } # handle 'auto' auto-detect if(parallelType=='auto'){ pkgs<-.packages() rv<-R.Version() if('foreach' %in% pkgs){ parallelType='foreach' } else if (('parallel' %in% pkgs) || (rv$major>=2 && rv$minor>=14.2) ){ parallelType='parallel' } else { parallelType='none' } } #support old deprecated parallelType arguments if(is.numeric(parallelType)) { parallelType <- switch(parallelType+1, 'none', 'parallel', 'foreach') } #handle deptrecated parallel arguments, set sensible defaults, etc. switch(parallelType, foreach = { if(missing(parallelArgs) && hasArg(foreachArgs)){ parallelArgs<-match.call(expand.dots=TRUE)$foreachArgs } if(is.null(parallelArgs$.packages) ){ if(hasArg(packages)) parallelArgs$.packages<-match.call(expand.dots=TRUE)$packages } }, parallel = { if(missing(packages) || !hasArg(packages)){ packages<-(.packages()) } } ) # end parallel options ###################### # format and return list(VTR = VTR, strategy = strategy, NP = NP, itermax = itermax, CR = CR, F = F, bs = bs, trace = trace, initialpop = initialpop, storepopfrom = storepopfrom, storepopfreq = storepopfreq, p = p, c = c, reltol = reltol, steptol = steptol, parallelType = parallelType, cluster = cluster, packages = packages, parVar = parVar, foreachArgs = foreachArgs, parallelArgs = parallelArgs) } DEoptim <- function(fn, lower, upper, control = DEoptim.control(), ..., fnMap=NULL) { if (length(lower) != length(upper)) stop("'lower' and 'upper' are not of same length") if (!is.vector(lower)) lower <- as.vector(lower) if (!is.vector(upper)) upper <- as.vector(upper) if (any(lower > upper)) stop("'lower' > 'upper'") if (any(lower == "Inf")) warning("you set a component of 'lower' to 'Inf'. May imply 'NaN' results", immediate. = TRUE) if (any(lower == "-Inf")) warning("you set a component of 'lower' to '-Inf'. May imply 'NaN' results", immediate. = TRUE) if (any(upper == "Inf")) warning("you set a component of 'upper' to 'Inf'. May imply 'NaN' results", immediate. = TRUE) if (any(upper == "-Inf")) warning("you set a component of 'upper' to '-Inf'. May imply 'NaN' results", immediate. = TRUE) if (!is.null(names(lower))) nam <- names(lower) else if (!is.null(names(upper)) && is.null(names(lower))) nam <- names(upper) else nam <- paste("par", 1:length(lower), sep = "") ctrl <- do.call(DEoptim.control, as.list(control)) ctrl$npar <- length(lower) if(is.na(ctrl$NP)) ctrl$NP <- 10*length(lower) if (ctrl$NP < 4) { warning("'NP' < 4; set to default value 10*length(lower)\n", immediate. = TRUE) ctrl$NP <- 10*length(lower) } if (ctrl$NP < 10*length(lower)) warning("For many problems it is best to set 'NP' (in 'control') to be at least ten times the length of the parameter vector. \n", immediate. = TRUE) if (!is.null(ctrl$initialpop)) { ctrl$specinitialpop <- TRUE if(!identical(as.numeric(dim(ctrl$initialpop)), as.numeric(c(ctrl$NP, ctrl$npar)))) stop("Initial population is not a matrix with dim. NP x length(upper).") } else { ctrl$specinitialpop <- FALSE ctrl$initialpop <- 0.0 } ## ctrl$trace <- as.numeric(ctrl$trace) ctrl$specinitialpop <- as.numeric(ctrl$specinitialpop) ctrl$initialpop <- as.numeric(ctrl$initialpop) if(!is.null(ctrl$cluster)) { ## use provided cluster if(!inherits(ctrl$cluster, "cluster")) stop("cluster is not a 'cluster' class object") parallel::clusterExport(ctrl$cluster, ctrl$parVar) if(!is.null(ctrl$parallelArgs)) fnPop <- function(`*params`, ...) { parallel::parApply(cl=ctrl$cluster,X=`*params`,MARGIN=1,FUN=fn,ctrl$parallelArgs,...) } else fnPop <- function(`*params`, ...) { parallel::parApply(cl=ctrl$cluster,X=`*params`,MARGIN=1,FUN=fn,...) } } else if(ctrl$parallelType == 'foreach') { ## use foreach use.foreach <- 'foreach' %in% installed.packages() if(!use.foreach) stop("foreach package not available but parallelType set to 'foreach'") if(!foreach::getDoParRegistered()) { foreach::registerDoSEQ() } args <- ctrl$foreachArgs fnPop <- function(`*params`, ...) { my_chunksize <- ceiling(NROW(`*params`)/foreach::getDoParWorkers()) my_iter <- iterators::iter(`*params`,by="row",chunksize=my_chunksize) args$i <- my_iter if(is.null(args$.combine)) args$.combine <- c if (!is.null(args$.export)) args$.export = c(args$.export, "fn") else args$.export = "fn" if (is.null(args$.errorhandling)) args$.errorhandling = c('stop', 'remove', 'pass') if (is.null(args$.verbose)) args$.verbose = FALSE if (is.null(args$.inorder)) args$.inorder = TRUE if (is.null(args$.multicombine)) args$.multicombine = FALSE foreach::"%dopar%"(do.call(foreach::foreach, args), apply(X=i,MARGIN=1,FUN=fn,...)) } } else if(ctrl$parallelType == 'parallel'){ ## use parallel if (!requireNamespace("parallelly", quietly = TRUE)) { stop("the parallelly package is required for parallelType = 'parallel'\n", "please install it via install.packages('parallelly')") } cl <- parallel::makeCluster(parallelly::availableCores()) packFn <- function(packages) { for(i in packages) library(i, character.only = TRUE) } parallel::clusterCall(cl, packFn, ctrl$packages) if(is.null(ctrl$parVar)) ctrl$parVar <- ls() parallel::clusterExport(cl=cl, varlist=ctrl$parVar, envir = environment()) fnPop <- function(`*params`, ...) { parallel::parApply(cl=cl,X=`*params`,MARGIN=1,FUN=fn,...) } } else { ## use regular for loop / apply fnPop <- function(`*params`, ...) { apply(X = `*params`, MARGIN = 1, FUN = fn, ...) } } ## Mapping function fnMapC <- NULL if(!is.null(fnMap)) { fnMapC <- function(`*params`,...) { ## run mapping function mappedPop <- t(apply(X = `*params`, MARGIN = 1, FUN = fnMap)) if(all(dim(mappedPop) != dim(`*params`))) ## check results stop("mapping function did not return an object with ", "dim NP x length(upper).") dups <- duplicated(mappedPop) ## check for duplicates np <- NCOL(mappedPop) tries <- 0 while(tries < 5 && any(dups)) { ##print('dups!'); flush.console() nd <- sum(dups) ## generate new random population member newPop <- matrix(runif(nd*np),ncol=np) newPop <- rep(lower,each=nd) + newPop * rep(upper-lower,each=nd) ## replace duplicate with _mapped_ random member mappedPop[dups,] <- t(apply(newPop, MARGIN = 1, FUN = fnMap)) dups <- duplicated(mappedPop) ## re-check for duplicates tries <- tries + 1 } if(tries==5) warning("Could not remove ",sum(dups)," duplicates from the mapped ", "population in 5 tries. Evaluating population with duplicates.", call.=FALSE, immediate.=TRUE) ## memcpy fails if mappedPop isn't double (need TYPEOF switch in C?) storage.mode(mappedPop) <- "double" mappedPop } } outC <- .Call("DEoptimC", lower, upper, fnPop, ctrl, new.env(), fnMapC, PACKAGE="DEoptim") if(ctrl$parallelType == "parallel") parallel::stopCluster(cl) if (length(outC$storepop) > 0) { nstorepop <- floor((outC$iter - ctrl$storepopfrom) / ctrl$storepopfreq) storepop <- list() cnt <- 1 for(i in 1:nstorepop) { idx <- cnt:((cnt - 1) + (ctrl$NP * ctrl$npar)) storepop[[i]] <- matrix(outC$storepop[idx], nrow = ctrl$NP, ncol = ctrl$npar, byrow = TRUE) cnt <- cnt + (ctrl$NP * ctrl$npar) dimnames(storepop[[i]]) <- list(1:ctrl$NP, nam) } } else { storepop = NULL } names(outC$bestmem) <- nam iter <- max(1,as.numeric(outC$iter)) names(lower) <- names(upper) <- nam bestmemit <- matrix(outC$bestmemit[1:(iter * ctrl$npar)], nrow = iter, ncol = ctrl$npar, byrow = TRUE) dimnames(bestmemit) <- list(1:iter, nam) storepop <- as.list(storepop) outR <- list(optim = list( bestmem = outC$bestmem, bestval = outC$bestval, nfeval = outC$nfeval, iter = outC$iter), member = list( lower = lower, upper = upper, bestmemit = bestmemit, bestvalit = outC$bestvalit, pop = t(outC$pop), storepop = storepop)) attr(outR, "class") <- "DEoptim" return(outR) } DEoptim/MD50000644000176200001440000000225714333510001012117 0ustar liggesusersba5472562398f85fff7328dbe520a48f *DESCRIPTION 86d11286932dea50e8374ef8bc273af3 *NAMESPACE 351b5b2f6e6187a758217d01e4e199fa *NEWS a3a2a6b8c77846febddbf4722cc2dbea *R/DEoptim.R b842f7e58d4b9733360ac58a71e90a95 *R/methods.R a9a0289d3f2083fd6232760e941b70a1 *R/zzz.R c6c11d291a7de5bd0cb780bbea370758 *README.md e2cee66caaa3ef30b2f509f1dc1206d9 *THANKS 439bf689fa27cf9affd0335332142165 *build/partial.rdb 29b17760c2b67a7a0ceef6c670b5fe23 *data/SMI.rda 772610daaf6366cea9ed0e5be0b36fdd *data/xrrData.rda 1ded5f89ba776af46661a7f929162227 *demo/00Index e77b142399eeddc9fcf419eb727d00c1 *demo/DEoptim.R 655491657b3423710d87f1cd76b36d76 *demo/benchmarks.R a2af50217c551fb658490c3c37123ec9 *inst/CITATION 5bf787cf77e810708489bc5610718b3f *man/DEoptim-methods.Rd 49bcca08f159e0342ad8d02705438e31 *man/DEoptim.Rd e479bdb74cb5b98763bc09f3983c87b4 *man/DEoptim.control.Rd 63621de55ebb83c3a6951b7ee6215adf *man/SMI.Rd 3e89edc23cca26ccf3093a962aec0478 *man/xrrData.Rd 7bd7be8eb8490da9458fad71556e3b2c *src/DEoptim_init.c ec80a3ec79fbff8fabf3cce9ac02843d *src/de4_0.c 415ec1e3e787725ba91373d18257359e *src/evaluate.c 6a2399c9da194de475dd25a6d4addf28 *src/get_element.c 3680d9a3a93430c75999ddb2d72fa394 *tests/tests.R DEoptim/inst/0000755000176200001440000000000014332756015012576 5ustar liggesusersDEoptim/inst/CITATION0000755000176200001440000000404614332756015013742 0ustar liggesusersif(!exists("meta") || is.null(meta)) meta <- packageDescription("DEoptim") year <- sub(".*(2[[:digit:]]{3})-.*", "\\1", meta$Date) vers <- paste("version", meta$Version) citHeader("To cite 'DEoptim' in publications use:") citEntry(entry = "article", title = "{DEoptim}: An {R} Package for Global Optimization by {D}ifferential {E}volution", author = personList(as.person("Katharine Mullen"), as.person("David Ardia"), as.person("David Gil"), as.person("Donald Windover"), as.person("James Cline")), journal = "Journal of Statistical Software", year = "2011", volume = "40", number = "6", pages = "1--26", doi = "10.18637/jss.v040.i06", textVersion = paste("Katharine Mullen, David Ardia, David Gil, Donald Windover, James Cline (2011).", "'DEoptim': An R Package for Global Optimization by Differential Evolution.", "Journal of Statistical Software, 40(6), 1-26.", "doi:10.18637/jss.v040.i06.") ) citEntry(entry = "article", title = "{D}ifferential {E}volution with {DEoptim}: {A}n Application to Non-Convex Portfolio Optimization", author = personList(as.person("David Ardia"), as.person("Kris Boudt"), as.person("Peter Carl"), as.person("Katharine M. Mullen"), as.person("Brian G. Peterson")), journal = "R Journal", year = "2011", volume = "3", number = "1", pages = "27--34", doi = "10.32614/RJ-2011-005", textVersion = paste("Ardia, D., Boudt, K., Carl, P., Mullen, K.M., Peterson, B.G. (2010).", "Differential Evolution with 'DEoptim': An Application to Non-Convex Portfolio Optimization.", "R Journal, 3(1), 27-34.", "doi:10.32614/RJ-2011-005") ) citFooter("BibTeX entries for LaTeX users: use\n", sQuote('toBibtex(citation("DEoptim"))'))