gbm/0000755000176200001440000000000012143234277011024 5ustar liggesusersgbm/MD50000644000176200001440000001417712143234277011346 0ustar liggesusers108bdba2eb6f2ba6ce890f47224ef68f *CHANGES 1b3ceeedb04f24b7a07b2a247e864688 *DESCRIPTION c2cae135a9c0d4ae15e14e89166ba841 *LICENSE bc5d59fd7c71bb320f9d3a0d3ca2ec30 *NAMESPACE 6a1293bc6f87d439da835b1b837f9c52 *R/basehaz.gbm.R cc5e4cd5f5d5e23382bae904e9ada152 *R/calibrate.plot.R 5615ac799ce14603a692a2c29be9648f *R/checks.R cf5a5bce0653ae59317ddac8bfe8d389 *R/gbm.R 428c0d3515d5fcbbdd992e10f5d22793 *R/gbm.fit.R 1de9823ae906c064f61a39bd1e0241d3 *R/gbm.loss.R ab8e510ccde4446a7c93ff384ba3217c *R/gbm.more.R 5a79d41470d1f8ae3b8c278bc5e12389 *R/gbm.perf.R 0fdb6a883897939376827795e4ee5230 *R/gbmCluster.R f4651f14ae6acdfa96319bb257f9d0e1 *R/gbmCrossVal.R 7201fac67c6152443cf2a2c3b5989116 *R/gbmDoFold.R f5cc3af1a8eb7ddbf962038e88d27953 *R/getCVgroup.R efd18f017f7a73397141bf4239c922ce *R/getStratify.R 696197960954e0845b8998911987cab2 *R/guessDist.R be47e36ef092244404831df5227f6d65 *R/interact.gbm.R f8c4c5e164b772b3bfc152b8e5659e2e *R/ir.measures.R bbfe015167ca3c75ecd155f6b090f661 *R/permutation.test.gbm.R 51c2749906af39dc17eb1af54b4d861d *R/plot.gbm.R b9c2bb5000212628b390b81dfdd895c0 *R/predict.gbm.R 7e3daea77a7b6ffa18e9f81cf0e0b152 *R/pretty.gbm.tree.R 13ac361d8e3f54893f7de0b66351eee4 *R/print.gbm.R 36d2345c029a4e8384703c92d46f9b2e *R/reconstructGBMdata.R 792e2a5c7cdfeeee3e29c4e418f8af35 *R/relative.influence.R e8cf40a7c7efcd820e908a43252cfc2b *R/shrink.gbm.R eefc2a06d746e77ac2ba101d240640b8 *R/shrink.gbm.pred.R b74827c0b11ad8180998c3cd4eb41e3c *R/test.gbm.R 4e38ebb4d3578e523b7d94fc9ece3d65 *demo/00Index e3bd8606063f15ded6ab3261c13d22af *demo/OOB-reps.R 354344b4f6e8a232508ef872ced5efa3 *demo/bernoulli.R f7599f6ddc6852ba0721651a46601b06 *demo/coxph.R bb1c84d68320171ac205bb33114d49e1 *demo/gaussian.R 31906c0a7bce9676949413f0fbff2c6c *demo/multinomial.R af763746809ed98e48e065f77942cb05 *demo/pairwise.R dbff7ebcc6a18e27c1b423fd5db70ae3 *demo/printExamples.R 79316127956b8f5291f5021f1e7c89ef *demo/robustReg.R 5e674508b7fde23e247a6e1a6c6b6ec6 *inst/doc/gbm.Sweave e73636a53327b5e049e5764b0620d03e *inst/doc/gbm.pdf b63bc1c2450ad4bca8db60e03b932a53 *inst/doc/gbm.tex 64dbd4ec219c6e855b87bc4ddeba111e *inst/doc/index.html dc706f07b81a76bf9aab2edf4641e86f *inst/doc/oobperf2.eps 7ba661d197d25537a69fc34d737b4d29 *inst/doc/oobperf2.pdf 9d73da9632fed38c327ffbd1b072347b *inst/doc/shrinkage-v-iterations.eps 3fda19791155842b0e48565781441aa2 *inst/doc/shrinkage-v-iterations.pdf 4d55dd49b93485a78ecb50caafd19b4e *inst/doc/shrinkageplot.R 90fd593dd07098b5600fb650e86733ff *inst/doc/srcltx.sty ce7a173a73fb952a1bf40cb65e3b99f2 *man/basehaz.gbm.Rd 7fca3316fe15ef40546c3db911d67086 *man/calibrate.plot.Rd 99fab30dc167a5c90a1d1424e71a25f4 *man/gbm-internal.Rd dbbaa87e0b50024671667d8d38008e64 *man/gbm-package.Rd eac981fe86aac2cf2b76f2bcee97289f *man/gbm.Rd 089cf48c905c3429ed63f69a0cd982b5 *man/gbm.object.Rd 3ed5b048c81d016868ca2799e4504419 *man/gbm.perf.Rd 7359f0a3b1f2d27cf29e497745c6ba59 *man/gbm.roc.area.Rd ea065b8eb338215054b29478af99e224 *man/gbmCrossVal.Rd 8fca4f44be806cb17eb37affe8334618 *man/interact.gbm.Rd a8728abc1dc77b599c2aa7d1df6f982e *man/plot.gbm.Rd 5896d84873dd1ed5d22005b5b37b17b6 *man/predict.gbm.Rd 1656ffd7646d41236545e0399a70afdd *man/pretty.gbm.tree.Rd 894215a9e1e715f39a6cb79a6fe81baf *man/print.gbm.Rd 0da8961be170c9a72df248d6f0fe985e *man/quantile.rug.Rd 9fbb2bddffae7a639d4f702817eeecb3 *man/reconstructGBMdata.Rd 8754bdc10a30057f74edf6037ce0cc71 *man/relative.influence.Rd b58470798d31cfceceeec40252ce833f *man/shrink.gbm.Rd ef52c476e46b4c64eee269064ea58b64 *man/shrink.gbm.pred.Rd c6b082b4bf980d66e355c056ceabe421 *man/summary.gbm.Rd 3e0b677bccf30388ec0fc96f77f5fb62 *man/validate.Rd 0d32ce72a7b02fc57d602c60b9ba8305 *src/adaboost.cpp 2f5d22dc3043e69628763cbe303e6b5f *src/adaboost.h 6d2bd44a11975c8f023640eb7a9036c3 *src/bac/gaussian.cpp c877a1d31fa93463ed5d3ccd2164aa80 *src/bernoulli.cpp 323f73ab809cff64ad5b4f336157f295 *src/bernoulli.h 088062cab2532d24fa3a9fc5affcf69a *src/buildinfo.h e15f767c646f66e54eb5bb20ccd7cebd *src/coxph.cpp e110cbd0b715934c4e0257cf20e9c1da *src/coxph.h 3616890b5d7af2b3edd52dc5f29544b0 *src/dataset.cpp d30f46362b1915f76e5a328ce95c7136 *src/dataset.h b5824ccf353076bf59018429ae3ac6ac *src/distribution.cpp 91d88e455827695f63bf23df5dfb3108 *src/distribution.h 6d2bd44a11975c8f023640eb7a9036c3 *src/gaussian.cpp 6c2bf2616a3b4491aaaf501346246d3f *src/gaussian.h 1d8d4e59887769602b1d3c8dc3d5f94f *src/gbm.cpp 0f49e8549558916322ec80e29b591a73 *src/gbm.h c0c572eb464dae70700ffe8fdc3f6b9f *src/gbm_engine.cpp b3f1f49fa614ac6cfd52b28191bfdb70 *src/gbm_engine.h f1da15864dab021cdac1617ffba4ff0f *src/gbmentry.cpp 1fba83f37e9f092d8b005e0c8f32a97b *src/huberized.cpp 141e5b762944c14a0b6294e15046296f *src/huberized.h 10dcf061e2807ca52f811ec6650f33ad *src/laplace.cpp 53b4d97c482517fbbc97162da1adf891 *src/laplace.h d25bcfb8da3565604f902270b25eb470 *src/locationm.cpp 932f3d98f158ebf6ae11ed47e873a7f3 *src/locationm.h 39094967ceaabf7c744bc93d0b86d22f *src/matrix.h 7242e54abea29c46990c4aabba7a65b6 *src/multinomial.cpp 8798fe266a8bad59ac9b3e7019cebbe8 *src/multinomial.h 75737afcbdd3162c62fcdd82b027e1d2 *src/node.cpp 3f7d35689f88a25a8f536d31c4ce172b *src/node.h 49da51b394dccb0063fa7b5e4ed662d6 *src/node_categorical.cpp 98afbdcf5bb70211102e58ed262fcec1 *src/node_categorical.h 74913ea93e6707eb49e52ac24047ae07 *src/node_continuous.cpp f09bd89f861430f58cb80ccf0de77c6a *src/node_continuous.h af2b9dd107d657344891521829c52243 *src/node_factory.cpp 3b80b8101a773a42a06eb41b5c6b01c9 *src/node_factory.h 56dc9a7a6309294654e641c14a32023d *src/node_nonterminal.cpp 062cbcf913ad61d33048c36ab0b76735 *src/node_nonterminal.h a99c0738f82cb857c87b45a65d4e8f25 *src/node_search.cpp 76b812a554f8ce9e7ea64c6f3c7631ee *src/node_search.h c6943942255ce8138259b6b47caa0c08 *src/node_terminal.cpp 084bcc63d1b33ca200460b88ef36b8f6 *src/node_terminal.h b763976a9c68d9e975417a84b7e2b3c4 *src/pairwise.cpp 8dc9c440afcb8d96f881c6d56ecae4d6 *src/pairwise.h 756422dc1f3f394260fa4d77ec42d1ed *src/poisson.cpp 0c901877981c1df8c4d82f6dd99c9231 *src/poisson.h 64e10460138c1b67923020b58cf1a599 *src/quantile.cpp 491d792d90d047d5a8c192253b632252 *src/quantile.h 519b30584e7e752480750e86027aea7e *src/tdist.cpp 9ab15eb81fc9a18ee7d14a76f7aefd2a *src/tdist.h 276e36bf158250eb458a1cdabcf975b5 *src/tree.cpp 6b2f1cd60e5d67638e110e1ac9552b27 *src/tree.h gbm/src/0000755000176200001440000000000012143232747011613 5ustar liggesusersgbm/src/tree.h0000644000176200001440000000756712143232747012742 0ustar liggesusers//------------------------------------------------------------------------------ // GBM by Greg Ridgeway Copyright (C) 2003 // // File: tree.h // // License: GNU GPL (version 2 or later) // // Contents: regression tree // // Owner: gregr@rand.org // // History: 3/26/2001 gregr created // 2/14/2003 gregr: adapted for R implementation // //------------------------------------------------------------------------------ #ifndef TREGBM_H #define TREGBM_H #include #include #include #include "dataset.h" #include "node_factory.h" #include "node_search.h" class CCARTTree { public: CCARTTree(); ~CCARTTree(); GBMRESULT Initialize(CNodeFactory *pNodeFactory); GBMRESULT grow(double *adZ, CDataset *pData, double *adAlgW, double *adF, unsigned long nTrain, unsigned long nBagged, double dLambda, unsigned long cMaxDepth, unsigned long cMinObsInNode, bool *afInBag, unsigned long *aiNodeAssign, CNodeSearch *aNodeSearch, VEC_P_NODETERMINAL &vecpTermNodes); GBMRESULT Reset(); GBMRESULT TransferTreeToRList(CDataset *pData, int *aiSplitVar, double *adSplitPoint, int *aiLeftNode, int *aiRightNode, int *aiMissingNode, double *adErrorReduction, double *adWeight, double *adPred, VEC_VEC_CATEGORIES &vecSplitCodes, int cCatSplitsOld, double dShrinkage); GBMRESULT PredictValid(CDataset *pData, unsigned long nValid, double *adFadj); GBMRESULT Predict(double *adX, unsigned long cRow, unsigned long cCol, unsigned long iRow, double &dFadj); GBMRESULT Adjust(unsigned long *aiNodeAssign, double *adFadj, unsigned long cTrain, VEC_P_NODETERMINAL &vecpTermNodes, unsigned long cMinObsInNode); GBMRESULT GetNodeCount(int &cNodes); GBMRESULT SetShrinkage(double dShrink) { this->dShrink = dShrink; return GBM_OK; } double GetShrinkage() {return dShrink;} GBMRESULT Print(); GBMRESULT GetVarRelativeInfluence(double *adRelInf); double dError; // total squared error before carrying out the splits private: GBMRESULT GetBestSplit(CDataset *pData, unsigned long nTrain, CNodeSearch *aNodeSearch, unsigned long cTerminalNodes, unsigned long *aiNodeAssign, bool *afInBag, double *adZ, double *adW, unsigned long &iBestNode, double &dBestNodeImprovement); CNode *pRootNode; double dShrink; // objects used repeatedly unsigned long cDepth; unsigned long cTerminalNodes; unsigned long cTotalNodeCount; unsigned long iObs; unsigned long iWhichNode; unsigned long iBestNode; double dBestNodeImprovement; double dSumZ; double dSumZ2; double dTotalW; signed char schWhichNode; CNodeFactory *pNodeFactory; CNodeNonterminal *pNewSplitNode; CNodeTerminal *pNewLeftNode; CNodeTerminal *pNewRightNode; CNodeTerminal *pNewMissingNode; CNodeTerminal *pInitialRootNode; }; typedef CCARTTree *PCCARTTree; #endif // TREGBM_H gbm/src/tree.cpp0000644000176200001440000002666012143232747013270 0ustar liggesusers// GBM by Greg Ridgeway Copyright (C) 2003 #include "tree.h" CCARTTree::CCARTTree() { pRootNode = NULL; pNodeFactory = NULL; dShrink = 1.0; } CCARTTree::~CCARTTree() { if(pRootNode != NULL) { pRootNode->RecycleSelf(pNodeFactory); } } GBMRESULT CCARTTree::Initialize ( CNodeFactory *pNodeFactory ) { GBMRESULT hr = GBM_OK; this->pNodeFactory = pNodeFactory; return hr; } GBMRESULT CCARTTree::Reset() { GBMRESULT hr = GBM_OK; if(pRootNode != NULL) { // delete the old tree and start over hr = pRootNode->RecycleSelf(pNodeFactory); } if(GBM_FAILED(hr)) { goto Error; } iBestNode = 0; dBestNodeImprovement = 0.0; schWhichNode = 0; pNewSplitNode = NULL; pNewLeftNode = NULL; pNewRightNode = NULL; pNewMissingNode = NULL; pInitialRootNode = NULL; Cleanup: return hr; Error: goto Cleanup; } //------------------------------------------------------------------------------ // Grows a regression tree //------------------------------------------------------------------------------ GBMRESULT CCARTTree::grow ( double *adZ, CDataset *pData, double *adW, double *adF, unsigned long nTrain, unsigned long nBagged, double dLambda, unsigned long cMaxDepth, unsigned long cMinObsInNode, bool *afInBag, unsigned long *aiNodeAssign, CNodeSearch *aNodeSearch, VEC_P_NODETERMINAL &vecpTermNodes ) { GBMRESULT hr = GBM_OK; #ifdef NOISY_DEBUG Rprintf("Growing tree\n"); #endif if((adZ==NULL) || (pData==NULL) || (adW==NULL) || (adF==NULL) || (cMaxDepth < 1)) { hr = GBM_INVALIDARG; goto Error; } dSumZ = 0.0; dSumZ2 = 0.0; dTotalW = 0.0; #ifdef NOISY_DEBUG Rprintf("initial tree calcs\n"); #endif for(iObs=0; iObsGetNewNodeTerminal(); pInitialRootNode->dPrediction = dSumZ/dTotalW; pInitialRootNode->dTrainW = dTotalW; vecpTermNodes.resize(2*cMaxDepth + 1,NULL); // accounts for missing nodes vecpTermNodes[0] = pInitialRootNode; pRootNode = pInitialRootNode; aNodeSearch[0].Set(dSumZ,dTotalW,nBagged, pInitialRootNode, &pRootNode, pNodeFactory); // build the tree structure #ifdef NOISY_DEBUG Rprintf("Building tree 1 "); #endif cTotalNodeCount = 1; cTerminalNodes = 1; for(cDepth=0; cDepthWhichNode(pData,iObs); if(schWhichNode == 1) // goes right { aiNodeAssign[iObs] = cTerminalNodes-2; } else if(schWhichNode == 0) // is missing { aiNodeAssign[iObs] = cTerminalNodes-1; } // those to the left stay with the same node assignment } } // set up the node search for the new right node aNodeSearch[cTerminalNodes-2].Set(aNodeSearch[iBestNode].dBestRightSumZ, aNodeSearch[iBestNode].dBestRightTotalW, aNodeSearch[iBestNode].cBestRightN, pNewRightNode, &(pNewSplitNode->pRightNode), pNodeFactory); // set up the node search for the new missing node aNodeSearch[cTerminalNodes-1].Set(aNodeSearch[iBestNode].dBestMissingSumZ, aNodeSearch[iBestNode].dBestMissingTotalW, aNodeSearch[iBestNode].cBestMissingN, pNewMissingNode, &(pNewSplitNode->pMissingNode), pNodeFactory); // set up the node search for the new left node // must be done second since we need info for right node first aNodeSearch[iBestNode].Set(aNodeSearch[iBestNode].dBestLeftSumZ, aNodeSearch[iBestNode].dBestLeftTotalW, aNodeSearch[iBestNode].cBestLeftN, pNewLeftNode, &(pNewSplitNode->pLeftNode), pNodeFactory); } // end tree growing // DEBUG // Print(); Cleanup: return hr; Error: goto Cleanup; } GBMRESULT CCARTTree::GetBestSplit ( CDataset *pData, unsigned long nTrain, CNodeSearch *aNodeSearch, unsigned long cTerminalNodes, unsigned long *aiNodeAssign, bool *afInBag, double *adZ, double *adW, unsigned long &iBestNode, double &dBestNodeImprovement ) { GBMRESULT hr = GBM_OK; int iVar = 0; unsigned long iNode = 0; unsigned long iOrderObs = 0; unsigned long iWhichObs = 0; unsigned long cVarClasses = 0; double dX = 0.0; for(iVar=0; iVar < pData->cCols; iVar++) { cVarClasses = pData->acVarClasses[iVar]; for(iNode=0; iNode < cTerminalNodes; iNode++) { hr = aNodeSearch[iNode].ResetForNewVar(iVar,cVarClasses); } // distribute the observations in order to the correct node search for(iOrderObs=0; iOrderObs < nTrain; iOrderObs++) { iWhichObs = pData->aiXOrder[iVar*nTrain + iOrderObs]; if(afInBag[iWhichObs]) { iNode = aiNodeAssign[iWhichObs]; dX = pData->adX[iVar*(pData->cRows) + iWhichObs]; hr = aNodeSearch[iNode].IncorporateObs (dX, adZ[iWhichObs], adW[iWhichObs], pData->alMonotoneVar[iVar]); if(GBM_FAILED(hr)) { goto Error; } } } for(iNode=0; iNode dBestNodeImprovement) { iBestNode = iNode; dBestNodeImprovement = aNodeSearch[iNode].BestImprovement(); } } Cleanup: return hr; Error: goto Cleanup; } GBMRESULT CCARTTree::GetNodeCount ( int &cNodes ) { cNodes = cTotalNodeCount; return GBM_OK; } GBMRESULT CCARTTree::PredictValid ( CDataset *pData, unsigned long nValid, double *adFadj ) { GBMRESULT hr = GBM_OK; int i=0; for(i=pData->cRows - nValid; icRows; i++) { pRootNode->Predict(pData, i, adFadj[i]); adFadj[i] *= dShrink; } return hr; } GBMRESULT CCARTTree::Predict ( double *adX, unsigned long cRow, unsigned long cCol, unsigned long iRow, double &dFadj ) { if(pRootNode != NULL) { pRootNode->Predict(adX,cRow,cCol,iRow,dFadj); dFadj *= dShrink; } else { dFadj = 0.0; } return GBM_OK; } GBMRESULT CCARTTree::Adjust ( unsigned long *aiNodeAssign, double *adFadj, unsigned long cTrain, VEC_P_NODETERMINAL &vecpTermNodes, unsigned long cMinObsInNode ) { unsigned long hr = GBM_OK; unsigned long iObs = 0; hr = pRootNode->Adjust(cMinObsInNode); if(GBM_FAILED(hr)) { goto Error; } // predict for the training observations for(iObs=0; iObsdPrediction; } Cleanup: return hr; Error: goto Cleanup; } GBMRESULT CCARTTree::Print() { GBMRESULT hr = GBM_OK; if(pRootNode != NULL) { pRootNode->PrintSubtree(0); Rprintf("shrinkage: %f\n",dShrink); Rprintf("initial error: %f\n\n",dError); } return hr; } GBMRESULT CCARTTree::GetVarRelativeInfluence ( double *adRelInf ) { GBMRESULT hr = GBM_OK; if(pRootNode != NULL) { hr = pRootNode->GetVarRelativeInfluence(adRelInf); if(GBM_FAILED(hr)) { goto Error; } } Cleanup: return hr; Error: goto Cleanup; } GBMRESULT CCARTTree::TransferTreeToRList ( CDataset *pData, int *aiSplitVar, double *adSplitPoint, int *aiLeftNode, int *aiRightNode, int *aiMissingNode, double *adErrorReduction, double *adWeight, double *adPred, VEC_VEC_CATEGORIES &vecSplitCodes, int cCatSplitsOld, double dShrinkage ) { GBMRESULT hr = GBM_OK; int iNodeID = 0; if(pRootNode != NULL) { hr = pRootNode->TransferTreeToRList(iNodeID, pData, aiSplitVar, adSplitPoint, aiLeftNode, aiRightNode, aiMissingNode, adErrorReduction, adWeight, adPred, vecSplitCodes, cCatSplitsOld, dShrinkage); } else { hr = GBM_FAIL; } return hr; } gbm/src/tdist.h0000644000176200001440000000543712143232747013124 0ustar liggesusers//------------------------------------------------------------------------------ // GBM alteration by Daniel Edwards // // File: tdist.h // // Contains: Distribution object to implement t-distribution // // History: 04/04/2008 Created // //------------------------------------------------------------------------------ #ifndef TDISTCGBM_H #define TDISTCGBM_H #include #include "distribution.h" #include "locationm.h" class CTDist : public CDistribution { public: CTDist(double adNu); virtual ~CTDist(); GBMRESULT UpdateParams(double *adF, double *adOffset, double *adWeight, unsigned long cLength) { return GBM_OK; }; GBMRESULT ComputeWorkingResponse(double *adY, double *adMisc, double *adOffset, double *adF, double *adZ, double *adWeight, bool *afInBag, unsigned long nTrain, int cIdxOff); GBMRESULT InitF(double *adY, double *adMisc, double *adOffset, double *adWeight, double &dInitF, unsigned long cLength); GBMRESULT FitBestConstant(double *adY, double *adMisc, double *adOffset, double *adW, double *adF, double *adZ, unsigned long *aiNodeAssign, unsigned long nTrain, VEC_P_NODETERMINAL vecpTermNodes, unsigned long cTermNodes, unsigned long cMinObsInNode, bool *afInBag, double *adFadj, int cIdxOff); double Deviance(double *adY, double *adMisc, double *adOffset, double *adWeight, double *adF, unsigned long cLength, int cIdxOff); double BagImprovement(double *adY, double *adMisc, double *adOffset, double *adWeight, double *adF, double *adFadj, bool *afInBag, double dStepSize, unsigned long nTrain); private: double mdNu; CLocationM *mpLocM; }; #endif // TDISTCGBM_H gbm/src/tdist.cpp0000644000176200001440000001055712143232747013456 0ustar liggesusers// GBM by Greg Ridgeway Copyright (C) 2003 #include "tdist.h" CTDist::CTDist(double adNu) { mdNu = adNu; double *adParams = new double[1]; adParams[0] = adNu; mpLocM = new CLocationM("tdist", 1, adParams); delete[] adParams; } CTDist::~CTDist() { delete mpLocM; } GBMRESULT CTDist::ComputeWorkingResponse ( double *adY, double *adMisc, double *adOffset, double *adF, double *adZ, double *adWeight, bool *afInBag, unsigned long nTrain, int cIdxOff ) { unsigned long i = 0; double dU = 0.0; if(adOffset == NULL) { for(i=0; iLocationM(iN, adArr, adWeight); delete[] adArr; return GBM_OK; } double CTDist::Deviance ( double *adY, double *adMisc, double *adOffset, double *adWeight, double *adF, unsigned long cLength, int cIdxOff ) { unsigned long i=0; double dL = 0.0; double dW = 0.0; double dU = 0.0; if(adOffset == NULL) { for(i=cIdxOff; icN >= cMinObsInNode) { // Get the number of nodes here int iNumNodes = 0; for (iObs = 0; iObs < nTrain; iObs++) { if(afInBag[iObs] && (aiNodeAssign[iObs] == iNode)) { iNumNodes++; } } // Create the arrays to centre double *adArr = new double[iNumNodes]; double *adWeight = new double[iNumNodes]; int iIdx = 0; for(iObs=0; iObsdPrediction = mpLocM->LocationM(iNumNodes, adArr, adWeight); delete[] adArr; delete[] adWeight; } } return hr; } double CTDist::BagImprovement ( double *adY, double *adMisc, double *adOffset, double *adWeight, double *adF, double *adFadj, bool *afInBag, double dStepSize, unsigned long nTrain ) { double dReturnValue = 0.0; double dF = 0.0; double dW = 0.0; unsigned long i = 0; double dU = 0.0; double dV = 0.0; for(i=0; i #include "distribution.h" class CQuantile: public CDistribution { public: CQuantile(double dAlpha); virtual ~CQuantile(); GBMRESULT UpdateParams(double *adF, double *adOffset, double *adWeight, unsigned long cLength) { return GBM_OK; }; GBMRESULT ComputeWorkingResponse(double *adY, double *adMisc, double *adOffset, double *adF, double *adZ, double *adWeight, bool *afInBag, unsigned long nTrain, int cIdxOff); GBMRESULT InitF(double *adY, double *adMisc, double *adOffset, double *adWeight, double &dInitF, unsigned long cLength); GBMRESULT FitBestConstant(double *adY, double *adMisc, double *adOffset, double *adW, double *adF, double *adZ, unsigned long *aiNodeAssign, unsigned long nTrain, VEC_P_NODETERMINAL vecpTermNodes, unsigned long cTermNodes, unsigned long cMinObsInNode, bool *afInBag, double *adFadj, int cIdxOff); double Deviance(double *adY, double *adMisc, double *adOffset, double *adWeight, double *adF, unsigned long cLength, int cIdxOff); double BagImprovement(double *adY, double *adMisc, double *adOffset, double *adWeight, double *adF, double *adFadj, bool *afInBag, double dStepSize, unsigned long nTrain); private: vector vecd; double dAlpha; }; #endif // QUANTILE_H gbm/src/quantile.cpp0000644000176200001440000001213712143232747014145 0ustar liggesusers// GBM by Greg Ridgeway Copyright (C) 2003 #include "quantile.h" CQuantile::CQuantile(double dAlpha) { this->dAlpha = dAlpha; } CQuantile::~CQuantile() { } GBMRESULT CQuantile::ComputeWorkingResponse ( double *adY, double *adMisc, double *adOffset, double *adF, double *adZ, double *adWeight, bool *afInBag, unsigned long nTrain, int cIdxOff ) { unsigned long i = 0; if(adOffset == NULL) { for(i=0; i adF[i]) ? dAlpha : -(1.0-dAlpha); } } else { for(i=0; i adF[i]+adOffset[i]) ? dAlpha : -(1.0-dAlpha); } } return GBM_OK; } // DEBUG: needs weighted quantile GBMRESULT CQuantile::InitF ( double *adY, double *adMisc, double *adOffset, double *adWeight, double &dInitF, unsigned long cLength ) { double dOffset=0.0; unsigned long i=0; vecd.resize(cLength); for(i=0; i adF[i]) { dL += adWeight[i]*dAlpha *(adY[i] - adF[i]); } else { dL += adWeight[i]*(1.0-dAlpha)*(adF[i] - adY[i]); } dW += adWeight[i]; } } else { for(i=cIdxOff; i adF[i] + adOffset[i]) { dL += adWeight[i]*dAlpha *(adY[i] - adF[i]-adOffset[i]); } else { dL += adWeight[i]*(1.0-dAlpha)*(adF[i]+adOffset[i] - adY[i]); } dW += adWeight[i]; } } return dL/dW; } // DEBUG: needs weighted quantile GBMRESULT CQuantile::FitBestConstant ( double *adY, double *adMisc, double *adOffset, double *adW, double *adF, double *adZ, unsigned long *aiNodeAssign, unsigned long nTrain, VEC_P_NODETERMINAL vecpTermNodes, unsigned long cTermNodes, unsigned long cMinObsInNode, bool *afInBag, double *adFadj, int cIdxOff ) { GBMRESULT hr = GBM_OK; unsigned long iNode = 0; unsigned long iObs = 0; unsigned long iVecd = 0; double dOffset; vecd.resize(nTrain); // should already be this size from InitF for(iNode=0; iNodecN >= cMinObsInNode) { iVecd = 0; for(iObs=0; iObsdPrediction = *max_element(vecd.begin(), vecd.begin()+iVecd); } else { nth_element(vecd.begin(), vecd.begin() + int(iVecd*dAlpha), vecd.begin() + int(iVecd)); vecpTermNodes[iNode]->dPrediction = *(vecd.begin() + int(iVecd*dAlpha)); } } } return hr; } double CQuantile::BagImprovement ( double *adY, double *adMisc, double *adOffset, double *adWeight, double *adF, double *adFadj, bool *afInBag, double dStepSize, unsigned long nTrain ) { double dReturnValue = 0.0; double dF = 0.0; double dW = 0.0; unsigned long i = 0; for(i=0; i dF) { dReturnValue += adWeight[i]*dAlpha*(adY[i]-dF); } else { dReturnValue += adWeight[i]*(1-dAlpha)*(dF-adY[i]); } if(adY[i] > dF+dStepSize*adFadj[i]) { dReturnValue -= adWeight[i]*dAlpha* (adY[i] - dF-dStepSize*adFadj[i]); } else { dReturnValue -= adWeight[i]*(1-dAlpha)* (dF+dStepSize*adFadj[i] - adY[i]); } dW += adWeight[i]; } } return dReturnValue/dW; } gbm/src/poisson.h0000644000176200001440000000567412143232747013472 0ustar liggesusers//------------------------------------------------------------------------------ // GBM by Greg Ridgeway Copyright (C) 2003 // File: poisson.h // // License: GNU GPL (version 2 or later) // // Contents: poisson object // // Owner: gregr@rand.org // // History: 3/26/2001 gregr created // 2/14/2003 gregr: adapted for R implementation // //------------------------------------------------------------------------------ #ifndef POISSON_H #define POISSON_H #include #include "distribution.h" class CPoisson : public CDistribution { public: CPoisson(); virtual ~CPoisson(); GBMRESULT UpdateParams(double *adF, double *adOffset, double *adWeight, unsigned long cLength) { return GBM_OK; }; GBMRESULT ComputeWorkingResponse(double *adY, double *adMisc, double *adOffset, double *adWeight, double *adF, double *adZ, bool *afInBag, unsigned long nTrain, int cIdxOff); double Deviance(double *adY, double *adMisc, double *adOffset, double *adWeight, double *adF, unsigned long cLength, int cIdxOff); GBMRESULT InitF(double *adY, double *adMisc, double *adOffset, double *adWeight, double &dInitF, unsigned long cLength); GBMRESULT FitBestConstant(double *adY, double *adMisc, double *adOffset, double *adW, double *adF, double *adZ, unsigned long *aiNodeAssign, unsigned long nTrain, VEC_P_NODETERMINAL vecpTermNodes, unsigned long cTermNodes, unsigned long cMinObsInNode, bool *afInBag, double *adFadj, int cIdxOff); double BagImprovement(double *adY, double *adMisc, double *adOffset, double *adWeight, double *adF, double *adFadj, bool *afInBag, double dStepSize, unsigned long nTrain); private: vector vecdNum; vector vecdDen; vector vecdMax; vector vecdMin; }; #endif // POISSON_H gbm/src/poisson.cpp0000644000176200001440000001234112143232747014012 0ustar liggesusers// GBM by Greg Ridgeway Copyright (C) 2003 #include "poisson.h" CPoisson::CPoisson() { } CPoisson::~CPoisson() { } GBMRESULT CPoisson::ComputeWorkingResponse ( double *adY, double *adMisc, double *adOffset, double *adF, double *adZ, double *adWeight, bool *afInBag, unsigned long nTrain, int cIdxOff ) { unsigned long i = 0; double dF = 0.0; // compute working response for(i=0; i < nTrain; i++) { dF = adF[i] + ((adOffset==NULL) ? 0.0 : adOffset[i]); adZ[i] = adY[i] - exp(dF); } return GBM_OK; } GBMRESULT CPoisson::InitF ( double *adY, double *adMisc, double *adOffset, double *adWeight, double &dInitF, unsigned long cLength ) { GBMRESULT hr = GBM_OK; double dSum = 0.0; double dDenom = 0.0; unsigned long i = 0; if(adOffset == NULL) { for(i=0; idPrediction = -19.0; } else if(vecdDen[iNode] == 0.0) { vecpTermNodes[iNode]->dPrediction = 0.0; } else { vecpTermNodes[iNode]->dPrediction = log(vecdNum[iNode]/vecdDen[iNode]); } vecpTermNodes[iNode]->dPrediction = fmin2(vecpTermNodes[iNode]->dPrediction, 19-vecdMax[iNode]); vecpTermNodes[iNode]->dPrediction = fmax2(vecpTermNodes[iNode]->dPrediction, -19-vecdMin[iNode]); } } return hr; } double CPoisson::BagImprovement ( double *adY, double *adMisc, double *adOffset, double *adWeight, double *adF, double *adFadj, bool *afInBag, double dStepSize, unsigned long nTrain ) { double dReturnValue = 0.0; double dF = 0.0; double dW = 0.0; unsigned long i = 0; for(i=0; i CDoubleUintPair; // Buffer memory allocation void Init(unsigned int cMaxItemsPerGroup); // Initialize ranker with scores of items belonging to the same group // - adScores is a score array, (at least) cNumItems long bool SetGroupScores(const double* const adScores, unsigned int cNumItems); // Perform the ranking // - Return true if any item changed its rank bool Rank(); // Getter / setter unsigned int GetNumItems() const { return cNumItems; } unsigned int GetRank(int i) const { return vecdipScoreRank[i].second; } unsigned int GetItem(unsigned int iRank) const { return (vecpdipScoreRank[iRank-1] - &(vecdipScoreRank[0])); } void SetRank(int i, unsigned int r) { vecdipScoreRank[i].second = r; } void AddToScore(int i, double delta) { vecdipScoreRank[i].first += delta; } protected: // Number of items in current group unsigned int cNumItems; // Pairs of (score, rank) for current group vector vecdipScoreRank; // Array of pointers to elements of vecdipScoreRank, used for sorting // Note: We need a separate array for sorting in order to be able to // quickly look up the rank for any given item. vector vecpdipScoreRank; }; // Abstract base class for all IR Measures class CIRMeasure { public: // Constructor CIRMeasure() : cRankCutoff(UINT_MAX) {} // Destructor virtual ~CIRMeasure() { } // Getter / Setter unsigned int GetCutoffRank() const { return cRankCutoff; } void SetCutoffRank(unsigned int cRankCutoff) { this->cRankCutoff = cRankCutoff; } // Auxiliary function for sanity check bool AnyPairs(const double* const adY, unsigned int cNumItems) const { return (cNumItems >= 2 // at least two instances && adY[0] > 0.0 // at least one positive example (targets are non-increasing) && adY[cNumItems-1] != adY[0]); // at least two different targets } // Memory allocation virtual void Init(unsigned long cMaxGroup, unsigned long cNumItems, unsigned int cRankCutoff = UINT_MAX) { this->cRankCutoff = cRankCutoff; } // Calculate the IR measure for the group of items set in the ranker. // Precondition: CRanker::SetGroupScores() has been called // - adY are the target scores virtual double Measure(const double* const adY, const CRanker& ranker) = 0; // Calculate the maximum achievable IR measure for a given group. // Side effect: the ranker state might change // Default implementation for MRR and MAP: if any positive items exist, // ranking them at the top yields a perfect measure of 1. virtual double MaxMeasure(unsigned int iGroup, const double* const adY, unsigned int cNumItems) { return (AnyPairs(adY, cNumItems) ? 1.0 : 0.0); } // Calculate the difference in the IR measure caused by swapping the ranks of two items. // Assumptions: // * iItemBetter has a higher label than iItemWorse (i.e., adY[iItemBetter] > adY[iItemWorse]). // * ranker.setGroup() has been called. virtual double SwapCost(int iItemBetter, int iItemWorse, const double* const adY, const CRanker& ranker) const = 0; protected: // Cut-off rank below which items are ignored for measure unsigned int cRankCutoff; }; // Class to implement IR Measure 'CONC' (fraction of concordant pairs). For the case of binary labels, this is // equivalent to the area under the ROC curve (AUC). class CConc : public CIRMeasure { public: virtual ~CConc() { } void Init(unsigned long cMaxGroup, unsigned long cNumItems, unsigned int cRankCutoff = UINT_MAX); double Measure(const double* const adY, const CRanker& ranker); // The maximum number of correctly classified pairs is simply all pairs with different labels double MaxMeasure(unsigned int iGroup, const double* const adY, unsigned int cNumItems) { return PairCount(iGroup, adY, cNumItems); } // (Cached) calculation of the number of pairs with different labels unsigned int PairCount(unsigned int iGroup, const double* const adY, unsigned int cNumItems); double SwapCost(int iItemBetter, int iItemWorse, const double* const adY, const CRanker& ranker) const; protected: // Calculate the number of pairs with different labels int ComputePairCount(const double* const adY, unsigned int cNumItems); // Caches the number of pairs with different labels, for each group vector veccPairCount; }; // Class to implement IR Measure 'Normalized Discounted Cumulative Gain' // Note: Labels can have any non-negative value class CNDCG : public CIRMeasure { public: void Init(unsigned long cMaxGroup, unsigned long cNumItems, unsigned int cRankCutoff = UINT_MAX); // Compute DCG double Measure(const double* const adY, const CRanker& ranker); // Compute best possible DCG double MaxMeasure(unsigned int iGroup, const double* const adY, unsigned int cNumItems); double SwapCost(int iItemBetter, int iItemWorse, const double* const adY, const CRanker& ranker) const; protected: // Lookup table for rank weight (w(rank) = 1/log2(1+rank)) vector vecdRankWeight; // Caches the maximum achievable DCG, for each group vector vecdMaxDCG; }; // Class to implement IR Measure 'Mean Reciprocal Rank' // Assumption: Labels are 0 or 1 class CMRR : public CIRMeasure { public: double Measure(const double* const adY, const CRanker& ranker); double SwapCost(int iItemPos, int iItemNeg, const double* const adY, const CRanker& ranker) const; }; // Class to implement IR Measure 'Mean Average Precision' // Assumption: Labels are 0 or 1 class CMAP : public CIRMeasure { public: void Init(unsigned long cMaxGroup, unsigned long cNumItems, unsigned int cRankCutoff = UINT_MAX); double Measure(const double* const adY, const CRanker& ranker); double SwapCost(int iItemPos, int iItemNeg, const double* const adY, const CRanker& ranker) const; protected: // Buffer to hold positions of positive examples mutable vector veccRankPos; }; // Main class for 'pairwise' distribution // Notes and Assumptions: // * The items are sorted such that // * Instances belonging to the same group occur in // a contiguous range // * Within a group, labels are non-increasing. // * adGroup supplies the group ID (positive integer, but double // format for compliance with the base class interface). // * The targets adY are non-negative values, and binary {0,1} // for measures MRR and MAP. // * Higher IR measures are better. // * Only pairs with different labels are used for training. // * Instance weights (adWeight) are constant among groups. // * CPairwise::Initialize() is called before any of the other // functions, with same values for adY, adGroup, adWeight, and // nTrain. Certain values have to be precomputed for // efficiency. class CPairwise : public CDistribution { public: // Constructor: determine IR measure as either "conc", "map", "mrr", or "ndcg" CPairwise(const char* szIRMeasure); virtual ~CPairwise(); GBMRESULT Initialize(double *adY, double *adGroup, double *adOffset, double *adWeight, unsigned long cLength); GBMRESULT UpdateParams(double *adF, double *adOffset, double *adWeight, unsigned long cLength) { return GBM_OK; }; GBMRESULT ComputeWorkingResponse(double *adY, double *adGroup, double *adOffset, double *adF, double *adZ, double *adWeight, bool *afInBag, unsigned long nTrain, int cIdxOff); double Deviance(double *adY, double *adGroup, double *adOffset, double *adWeight, double *adF, unsigned long cLength, int cIdxOff); GBMRESULT InitF(double *adY, double *adGroup, double *adOffset, double *adWeight, double &dInitF, unsigned long cLength); GBMRESULT FitBestConstant(double *adY, double *adGroup, double *adOffset, double *adW, double *adF, double *adZ, unsigned long *aiNodeAssign, unsigned long nTrain, VEC_P_NODETERMINAL vecpTermNodes, unsigned long cTermNodes, unsigned long cMinObsInNode, bool *afInBag, double *adFadj, int cIdxOff); double BagImprovement(double *adY, double *adGroup, double *adOffset, double *adWeight, double *adF, double *adFadj, bool *afInBag, double dStepSize, unsigned long nTrain); protected: // Calculate and accumulate up the gradients and Hessians from all training pairs void ComputeLambdas(int iGroup, unsigned int cNumItems, const double* const adY, const double* const adF, const double* const adWeight, double* adZ, double* adDeriv); CIRMeasure* pirm; // The IR measure to use CRanker ranker; // The ranker vector vecdHessian; // Second derivative of loss function, for each training instance; used for Newton step vector vecdNum; // Buffer used for numerator in FitBestConstant(), for each node vector vecdDenom; // Buffer used for denominator in FitBestConstant(), for each node vector vecdFPlusOffset; // Temporary buffer for (adF + adOffset), if the latter is not null }; #endif // PAIRWISE_H gbm/src/pairwise.cpp0000644000176200001440000007344712143232747014161 0ustar liggesusers// Implementation file for 'pairwise' distribution // // Author: Stefan Schroedl (schroedl@a9.com) #include "pairwise.h" #include #include #include #include //#define NOISY_DEBUG #ifdef NOISY_DEBUG #endif void CRanker::Init(unsigned int cMaxItemsPerGroup) { // Allocate sorting buffers vecdipScoreRank.resize(cMaxItemsPerGroup); vecpdipScoreRank.resize(cMaxItemsPerGroup); } bool CRanker::SetGroupScores(const double* const adScores, const unsigned int cNumItems) { const double dEPS = 1e-10; if (cNumItems > vecdipScoreRank.size()) { // Allocate additional space // (We should never get here if CPairwise::Initialize has been called before, as expected) Init(cNumItems); } this->cNumItems = cNumItems; // Copy scores to buffer, and // initialize pointer array to score entries for(unsigned int i = 0; i < cNumItems; i++) { // Add small random number to break possible ties vecdipScoreRank[i].first = adScores[i] + dEPS * (unif_rand() - 0.5); vecpdipScoreRank[i] = &(vecdipScoreRank[i]); } return true; } // Auxiliary struct to compare pair pointers // decreasing order based on the first component (score) struct CDoubleUintPairPtrComparison { bool operator() (const CRanker::CDoubleUintPair* lhs, const CRanker::CDoubleUintPair* rhs) { return (lhs->first > rhs->first); } }; bool CRanker::Rank() { // Sort the pointer array, based on decreasing score CDoubleUintPairPtrComparison comp; sort(vecpdipScoreRank.begin(), vecpdipScoreRank.begin() + cNumItems, comp); bool bChanged = false; // Create inverted rank lookup for(unsigned int i = 0; i < cNumItems; i++) { // Note: ranks are 1-based const unsigned int cNewRank = i + 1; if (!bChanged) { bChanged = (cNewRank != vecpdipScoreRank[i]->second); } // Store the rank with the corresponding score in the vecdipScoreRank array vecpdipScoreRank[i]->second = cNewRank; } return bChanged; } void CConc::Init ( unsigned long cMaxGroup, unsigned long cMaxItemsPerGroup, unsigned int cRankCutoff ) { CIRMeasure::Init(cMaxGroup, cMaxItemsPerGroup, cRankCutoff); veccPairCount.resize(cMaxGroup + 1, -1); } unsigned int CConc::PairCount(unsigned int iGroup, const double* const adY, unsigned int cNumItems) { if (iGroup >= veccPairCount.size()) { // Allocate additional space // (We should never get here if CPairwise::Initialize has been called before, as expected) veccPairCount.resize(iGroup + 1, -1); } if (veccPairCount[iGroup] < 0.0) { // Not yet initialized veccPairCount[iGroup] = ComputePairCount(adY, cNumItems); } return veccPairCount[iGroup]; } // Calculate the number of pairs with different labels, and store in veccPairCount // Assumption: instances are sorted such that labels are non-increasing int CConc::ComputePairCount(const double* const adY, unsigned int cNumItems) { if (!AnyPairs(adY, cNumItems)) { return 0; } double dLabelCurrent = adY[0]; int iLabelEnd = 0; // End of range with higher labels int cPairs = 0; for (unsigned int j = 1; j < cNumItems; j++) { if (adY[j] != dLabelCurrent) { // i.e., dYj < dLabelCurrent iLabelEnd = j; dLabelCurrent = adY[j]; } // All items in 0 .. iLabelEnd - 1 are better than item j; // i.e, we have pairs (j,0), (j,1), ... (j, iLabelEnd - 1) cPairs += iLabelEnd; } return cPairs; } // Count the number of correctly ranked pairs with different labels double CConc::Measure(const double* const adY, const CRanker& ranker) { double dLabelCurrent = adY[0]; int iLabelEnd = 0; // End of the range with higher labels int cGoodPairs = 0; for (unsigned int j = 1; j < ranker.GetNumItems(); j++) { const double dYj = adY[j]; if (dYj != dLabelCurrent) { // i.e., dYj < dLabelCurrent iLabelEnd = j; dLabelCurrent = dYj; } // All items in 0 .. iLabelEnd - 1 are better than this item for (int i = 0; i < iLabelEnd; i++) { if (ranker.GetRank(i) < ranker.GetRank(j)) { cGoodPairs++; } } } return cGoodPairs; } double CConc::SwapCost(int iItemBetter, int iItemWorse, const double* const adY, const CRanker& ranker) const { // Note: this implementation can handle arbitrary non-negative target values. // For binary (0/1) targets, the swap cost would reduce to the much simpler expression: // (int)ranker.GetRank(iItemBetter) - (int)ranker.GetRank(iItemWorse) const unsigned int cRankBetter = ranker.GetRank(iItemBetter); const unsigned int cRankWorse = ranker.GetRank(iItemWorse); // Which one of the two has the higher rank? unsigned int cRankUpper, cRankLower; double dYUpper, dYLower; int cDiff; if (cRankBetter > cRankWorse) { // Concordance increasing cRankUpper = cRankWorse; cRankLower = cRankBetter; dYUpper = adY[iItemWorse]; dYLower = adY[iItemBetter]; cDiff = 1; // The direct impact of the pair (iItemBetter, iItemWorse) } else { // Concordance decreasing cRankUpper = cRankBetter; cRankLower = cRankWorse; dYUpper = adY[iItemBetter]; dYLower = adY[iItemWorse]; cDiff = -1; // // The direct impact of the pair (iItemBetter, iItemWorse) } // Compute indirect impact for pairs involving items in between the two for (unsigned int cRank = cRankUpper + 1; cRank < cRankLower; cRank++) { const double dYi = adY[ranker.GetItem(cRank)]; double dScoreDiff = dYi - dYLower; if (dScoreDiff != 0) { cDiff += (dScoreDiff < 0) ? 1 : -1; } dScoreDiff = dYi - dYUpper; if (dScoreDiff != 0) { cDiff += (dScoreDiff < 0) ? -1 : 1; } } return cDiff; } void CNDCG::Init ( unsigned long cMaxGroup, unsigned long cMaxItemsPerGroup, unsigned int cRankCutoff ) { CIRMeasure::Init(cMaxGroup, cMaxItemsPerGroup, cRankCutoff); // Initialize rank weights (note: ranks are 1-based) vecdRankWeight.resize(cMaxItemsPerGroup + 1, 0.0); const unsigned int cMaxRank = std::min((unsigned int)cMaxItemsPerGroup, GetCutoffRank()); // Precompute rank weights for (unsigned int i = 1; i <= cMaxRank; i++) { vecdRankWeight[i] = log((double)2) / log((double)(i+1)); } // Allocate buffer vecdMaxDCG.resize(cMaxGroup + 1, -1.0); } // Sum of target values, weighted by rank weight double CNDCG::Measure(const double* const adY, const CRanker& ranker) { double dScore = 0; for (unsigned int i = 0; i < ranker.GetNumItems(); i++) { dScore += adY[i] * vecdRankWeight[ranker.GetRank(i)]; } return dScore; } double CNDCG::MaxMeasure(unsigned int iGroup, const double* const adY, unsigned int cNumItems) { if (iGroup >= vecdMaxDCG.size()) { // Allocate additional space // (We should never get here if CPairwise::Initialize has been called before, as expected) vecdMaxDCG.resize(iGroup + 1, -1.0); } if (vecdMaxDCG[iGroup] < 0.0) { // Not initialized if (!AnyPairs(adY, cNumItems)) { // No training pairs exist vecdMaxDCG[iGroup] = 0.0; } else { // Compute maximum possible DCG. // Note: By assumption, items are pre-sorted by descending score. double dScore = 0; unsigned int i = 0; while (i < cNumItems && adY[i] > 0) { // Note: Due to sorting, we can terminate early for a zero score. dScore += adY[i] * vecdRankWeight[i + 1]; i++; } vecdMaxDCG[iGroup] = dScore; #ifdef NOISY_DEBUG if (vecdMaxDCG[iGroup] == 0) { Rprintf("max score is 0: iGroup = %d, maxScore = %f, sz = %d\n", iGroup, vecdMaxDCG[iGroup], ranker.GetNumItems()); assert(false); } #endif } } return vecdMaxDCG[iGroup]; } double CNDCG::SwapCost(int iItemBetter, int iItemWorse, const double* const adY, const CRanker& ranker) const { const unsigned int cRanki = ranker.GetRank(iItemBetter); const unsigned int cRankj = ranker.GetRank(iItemWorse); return (vecdRankWeight[cRanki] - vecdRankWeight[cRankj]) * (adY[iItemBetter] - adY[iItemWorse]); } // Auxiliary function to find the top rank of a positive item (cRankTop), and the number of positive items (cPos) inline void TopRankPos(const double* const adY, const CRanker& ranker, unsigned int& cRankTop, unsigned int& cPos) { const unsigned int cNumItems = ranker.GetNumItems(); cRankTop = cNumItems + 1; // Ranks are 1-based for (cPos = 0; cPos < cNumItems; cPos++) { if (adY[cPos] <= 0.0) { // All subsequent items are zero, because of presorting return; } cRankTop = min(cRankTop, ranker.GetRank(cPos)); } } double CMRR::Measure(const double* const adY, const CRanker& ranker) { unsigned int cRankTop, cPos; TopRankPos(adY, ranker, cRankTop, cPos); const unsigned int cNumItems = min(ranker.GetNumItems(), GetCutoffRank()); if (cRankTop >= cNumItems + 1) { // No positive item found return 0.0; } // Ranks start at 1 return 1.0 / cRankTop; } double CMRR::SwapCost(int iItemPos, int iItemNeg, const double* const adY, const CRanker& ranker) const { unsigned int cRankTop, cPos; TopRankPos(adY, ranker, cRankTop, cPos); const unsigned int cNumItems = ranker.GetNumItems(); if (cRankTop >= cNumItems + 1 // No positive item (ranks are 1-based) || cPos >= cNumItems) // No negative item { return 0.0; } const unsigned int cRankPos = ranker.GetRank(iItemPos); const unsigned int cRankNeg = ranker.GetRank(iItemNeg); const unsigned int cCutoffRank = GetCutoffRank(); const double dMeasureCurrent = (cRankTop > cCutoffRank) ? 0.0 : 1.0 / cRankTop; const double dMeasureNeg = (cRankNeg > cCutoffRank) ? 0.0 : 1.0 / cRankNeg; // Only pairs where the negative item is above the top positive result, // or else where the positive item *is* the top item, can change the MRR return ((cRankNeg < cRankTop || cRankPos == cRankTop) ? (dMeasureNeg - dMeasureCurrent) : 0.0); } void CMAP::Init ( unsigned long cMaxGroup, unsigned long cMaxItemsPerGroup, unsigned int cRankCutoff ) { CIRMeasure::Init(cMaxGroup, cMaxItemsPerGroup, cRankCutoff); // Allocate rank buffer (note: ranks are 1-based) veccRankPos.resize(cMaxItemsPerGroup + 1); } // Auxiliary function to find the sorted ranks of positive items (veccRankPos), and their number (cPos) inline void SortRankPos(const double* const adY, const CRanker& ranker, vector& veccRankPos, unsigned int& cPos) { // Store all ranks of positive items in veccRankPos for (cPos = 0; cPos < ranker.GetNumItems(); cPos++) { if (adY[cPos] <= 0.0) { // All subsequent items are zero, because of presorting break; } veccRankPos[cPos] = ranker.GetRank(cPos); } sort(veccRankPos.begin(), veccRankPos.begin() + cPos); } double CMAP::SwapCost(int iItemPos, int iItemNeg, const double* const adY, const CRanker& ranker) const { unsigned int cPos; SortRankPos(adY, ranker, veccRankPos, cPos); if (cPos == 0) { return 0.0; } // Now veccRankPos[i] is the i-th highest rank of a positive item, and // cPos is the total number of positive items. const int iRankItemPos = ranker.GetRank(iItemPos); const int iRankItemNeg = ranker.GetRank(iItemNeg); // Search for the position of the two items to swap const vector::iterator itItemPos = upper_bound(veccRankPos.begin(), veccRankPos.begin() + cPos, iRankItemPos); const vector::iterator itItemNeg = upper_bound(veccRankPos.begin(), veccRankPos.begin() + cPos, iRankItemNeg); // The number of positive items up to and including iItemPos const unsigned int cNumPosNotBelowItemPos = (unsigned int)(itItemPos - veccRankPos.begin()); // The number of positive items up to iItemNeg (Note: Cannot include iItemNeg itself) const unsigned int cNumPosAboveItemNeg = (unsigned int)(itItemNeg - veccRankPos.begin()); // Range of indices of positive items between iRankItemPos and iRankItemNeg (exclusively) int cIntermediateHigh, cIntermediateLow; // Current contribution of iItemPos double dContribBefore = (double) cNumPosNotBelowItemPos / iRankItemPos; double dSign, dContribAfter; if (iRankItemNeg > iRankItemPos) { // MAP is decreasing dSign = -1.0; // The first positive item after iRankItemPos cIntermediateLow = cNumPosNotBelowItemPos; // The last positive item before iRankItemNeg cIntermediateHigh = cNumPosAboveItemNeg - 1; // Note: iItemPos already counted in cNumPosAboveItemNeg dContribAfter = (double)cNumPosAboveItemNeg / iRankItemNeg; } else { // MAP is increasing dSign = 1.0; // The first positive result after iRankItemNeg cIntermediateLow = cNumPosAboveItemNeg; // The first positive result after iRankItemPos, minus iItemPos itself cIntermediateHigh = cNumPosNotBelowItemPos - 2; // Note: iItemPos not yet counted in cNumPosAboveItemNeg dContribAfter = (double) (cNumPosAboveItemNeg + 1) / iRankItemNeg; } // The direct effect of switching iItemPos double dDiff = dContribAfter - dContribBefore; // The indirect effect for all items in between the two items for (int j = cIntermediateLow; j <= cIntermediateHigh; j++) { dDiff += dSign / veccRankPos[j]; } return dDiff / cPos; } double CMAP::Measure(const double* const adY, const CRanker& ranker) { unsigned int cPos; SortRankPos(adY, ranker, veccRankPos, cPos); if (cPos == 0) { return 0.0; } // Now veccRankPos[i] is the i-th highest rank of a positive item double dPrec = 0.0; for (unsigned int j = 0; j < cPos; j++) { dPrec += double(j + 1) / veccRankPos[j]; } return dPrec / cPos; } CPairwise::CPairwise(const char* szIRMeasure) { // Construct the IR Measure if (!strcmp(szIRMeasure, "conc")) { pirm = new CConc(); } else if (!strcmp(szIRMeasure, "map")) { pirm = new CMAP(); } else if (!strcmp(szIRMeasure, "mrr")) { pirm = new CMRR(); } else { if (strcmp(szIRMeasure, "ndcg")) { Rprintf("Unknown IR measure '%s' in initialization, using 'ndcg' instead\n", szIRMeasure); } pirm = new CNDCG(); } } CPairwise::~CPairwise() { delete pirm; } // Auxiliary function for addition of optional offset parameter inline const double* OffsetVector(const double* const adX, const double* const adOffset, unsigned int iStart, unsigned int iEnd, vector& vecBuffer) { if (adOffset == NULL) { // Optional second argument is not set, just return first one return adX + iStart; } else { for (unsigned int i = iStart, iOut = 0; i < iEnd; i++, iOut++) { vecBuffer[iOut] = adX[i] + adOffset[i]; } return &vecBuffer[0]; } } GBMRESULT CPairwise::ComputeWorkingResponse ( double *adY, double *adGroup, double *adOffset, double *adF, double *adZ, double *adWeight, bool *afInBag, unsigned long nTrain, int cIdxOff ) { #ifdef NOISY_DEBUG Rprintf("compute working response, nTrain = %u, cIdxOff = %d\n", nTrain, cIdxOff); #endif if (nTrain <= 0) { return GBM_OK; } try { // Iterate through all groups, compute gradients unsigned int iItemStart = 0; unsigned int iItemEnd = 0; while (iItemStart < nTrain) { adZ[iItemEnd] = 0; vecdHessian[iItemEnd] = 0; const double dGroup = adGroup[iItemStart]; // Find end of current group, initialize working response for (iItemEnd = iItemStart + 1; iItemEnd < nTrain && adGroup[iItemEnd] == dGroup; iItemEnd++) { // Clear gradients from last iteration adZ[iItemEnd] = 0; vecdHessian[iItemEnd] = 0; } #ifdef NOISY_DEBUG // Check sorting for (unsigned int i = iItemStart; i < iItemEnd-1; i++) { assert(adY[i] >= adY[i+1]); } #endif if (afInBag[iItemStart]) { // Group is part of the training set const int cNumItems = iItemEnd - iItemStart; // If offset given, add up current scores const double* adFPlusOffset = OffsetVector(adF, adOffset, iItemStart, iItemEnd, vecdFPlusOffset); // Accumulate gradients ComputeLambdas((int)dGroup, cNumItems, adY + iItemStart, adFPlusOffset, adWeight + iItemStart, adZ + iItemStart, &vecdHessian[iItemStart]); } // Next group iItemStart = iItemEnd; } } catch (std::bad_alloc&) { return GBM_OUTOFMEMORY; } return GBM_OK; } // Referring to MSR-TR-2010-82-2, section 7 (see also the vignette): // // Let P be the set of pairs (i,j) where Y(i)>Y(j) (i is better than j). // The approximation to the IR measure is the utility function C (to be maximized) // C // = \Sum_{(i,j) in P} |Delta Z_ij| C(s_i - s_j) // = \Sum_{(i,j) in P} |Delta Z_ij| / (1 + exp(-(s_i - s_j))), // where |Delta Z_ij| is the cost of swapping (only) i and j in the current ranking, // and s_i, s_j are the prediction scores (sum of the tree predictions) for items // i and j. // // For (i,j) in P, define // lambda_ij // = dC(s_i-s_j) / ds_i // = - |Delta Z_ij| / (1 + exp(s_i - s_j)) // = - |Delta Z_ij| * rho_ij, // with // rho_ij = - lambda_ij / |Delta Z_ij| = 1 / (1 + exp(s_i - s_j)) // // So the gradient of C with respect to s_i is // dC / ds_i // =(def) lambda_i // = \Sum_{j|(i,j) in P} lambda_ij - \Sum_{j|(j,i) in P} lambda_ji // = - \Sum_{j|(i,j) in P} |Delta Z_ij| * rho_ij // + \Sum_{j|(j,i) in P} |Delta Z_ji| * rho_ji; // it is stored in adZ[i]. // // The second derivative is // d^2C / ds_i^2 // =(def) gamma_i // = \Sum_{j|(i,j) in P} |Delta Z_ij| * rho_ij * (1-rho_ij) // - \Sum_{j|(j,i) in P} |Delta Z_ji| * rho_ji * (1-rho_ji); // it is stored in vecdHessian[i]. // // The Newton step for a particular leaf node is (a fraction of) // g'/g'', where g' (resp. g'') is the sum of dC/ds_i = lambda_i // (resp. d^2C/d^2s_i = gamma_i) over all instances falling into this leaf. This // summation is calculated later in CPairwise::FitBestConstant(). void CPairwise::ComputeLambdas(int iGroup, unsigned int cNumItems, const double* const adY, const double* const adF, const double* const adWeight, double* adZ, double* adDeriv) { // Assumption: Weights are constant within group if (adWeight[0] <= 0) { return; } // Normalize for maximum achievable group score const double dMaxScore = pirm->MaxMeasure(iGroup, adY, cNumItems); if (dMaxScore <= 0.0) { // No pairs return; } // Rank items by current score ranker.SetGroupScores(adF, cNumItems); ranker.Rank(); double dLabelCurrent = adY[0]; // First index of instance that has dLabelCurrent // (i.e., each smaller index corresponds to better item) unsigned int iLabelCurrentStart = 0; // Number of pairs with unequal labels unsigned int cPairs = 0; #ifdef NOISY_DEBUG double dMeasureBefore = pirm->Measure(adY, ranker); #endif for (unsigned int j = 1; j < cNumItems; j++) { const double dYj = adY[j]; if (dYj != dLabelCurrent) { iLabelCurrentStart = j; dLabelCurrent = dYj; } for (unsigned int i = 0; i < iLabelCurrentStart; i++) { // Instance i is better than j const double dSwapCost = fabs(pirm->SwapCost(i, j, adY, ranker)); #ifdef NOISY_DEBUG double dDelta = fabs(pirm->SwapCost(i, j, adY, ranker)); const int cRanki = ranker.GetRank(i); const int cRankj = ranker.GetRank(j); ranker.SetRank(i, cRankj); ranker.SetRank(j, cRanki); double dMeasureAfter = pirm->Measure(adY, ranker); if (fabs(dMeasureBefore-dMeasureAfter) - dDelta > 1e-5) { Rprintf("%f %f %f %f %f %d %d\n", pirm->SwapCost(i, j, adY, ranker), dMeasureBefore, dMeasureAfter, dMeasureBefore - dMeasureAfter, dDelta , i, j); for (unsigned int k = 0; k < cNumItems; k++) { Rprintf("%d\t%d\t%f\t%f\n", k, ranker.GetRank(k), adY[k], adF[k]); } assert(false); } assert(fabs(dMeasureBefore - dMeasureAfter) - fabs(dDelta) < 1e-5); ranker.SetRank(j, cRankj); ranker.SetRank(i, cRanki); #endif assert(isfinite(dSwapCost)); if (dSwapCost > 0.0) { cPairs++; const double dRhoij = 1.0 / (1.0 + exp(adF[i]- adF[j])) ; assert(isfinite(dRhoij)); const double dLambdaij = dSwapCost * dRhoij; adZ[i] += dLambdaij; adZ[j] -= dLambdaij; const double dDerivij = dLambdaij * (1.0 - dRhoij); assert(dDerivij >= 0); adDeriv[i] += dDerivij; adDeriv[j] += dDerivij; } } } if (cPairs > 0) { // Normalize for number of training pairs const double dQNorm = 1.0 / (dMaxScore * cPairs); for (unsigned int j = 0; j < cNumItems; j++) { adZ[j] *= dQNorm; adDeriv[j] *= dQNorm; } } } GBMRESULT CPairwise::Initialize ( double *adY, double *adGroup, double *adOffset, double *adWeight, unsigned long cLength ) { if (cLength <= 0) { return GBM_OK; } try { // Allocate memory for derivative buffer vecdHessian.resize(cLength); // Count the groups and number of items per group unsigned int cMaxItemsPerGroup = 0; double dMaxGroup = 0; unsigned int iItemStart = 0; unsigned int iItemEnd = 0; while (iItemStart < cLength) { const double dGroup = adGroup[iItemStart]; // Find end of current group for (iItemEnd = iItemStart + 1; iItemEnd < cLength && adGroup[iItemEnd] == dGroup; iItemEnd++); const unsigned int cNumItems = iItemEnd - iItemStart; if (cNumItems > cMaxItemsPerGroup) { cMaxItemsPerGroup = cNumItems; } if (dGroup > dMaxGroup) { dMaxGroup = dGroup; } // Next group iItemStart = iItemEnd; } // Allocate buffer for offset addition vecdFPlusOffset.resize(cMaxItemsPerGroup); // Allocate ranker memory ranker.Init(cMaxItemsPerGroup); // Allocate IR measure memory // The last element of adGroup specifies the cutoff // (zero means no cutoff) unsigned int cRankCutoff = cMaxItemsPerGroup; if (adGroup[cLength] > 0) { cRankCutoff = (unsigned int)adGroup[cLength]; } pirm->Init((unsigned long)dMaxGroup, cMaxItemsPerGroup, cRankCutoff); #ifdef NOISY_DEBUG Rprintf("Initialization: instances=%ld, groups=%u, max items per group=%u, rank cutoff=%u, offset specified: %d\n", cLength, (unsigned long)dMaxGroup, cMaxItemsPerGroup, cRankCutoff, (adOffset != NULL)); #endif } catch (std::bad_alloc&) { return GBM_OUTOFMEMORY; } return GBM_OK; } GBMRESULT CPairwise::InitF ( double *adY, double *adGroup, double *adOffset, double *adWeight, double &dInitF, unsigned long cLength ) { dInitF = 0.0; return GBM_OK; } double CPairwise::Deviance ( double *adY, double *adGroup, double *adOffset, double *adWeight, double *adF, unsigned long cLength, int cIdxOff ) { #ifdef NOISY_DEBUG Rprintf("Deviance, cLength = %u, cIdxOff = %d\n", cLength, cIdxOff); #endif if (cLength <= 0) { return 0; } double dL = 0.0; double dW = 0.0; unsigned int iItemStart = cIdxOff; unsigned int iItemEnd = iItemStart; const unsigned int cEnd = cLength + cIdxOff; while (iItemStart < cEnd) { const double dGroup = adGroup[iItemStart]; const double dWi = adWeight[iItemStart]; // Find end of current group for (iItemEnd = iItemStart + 1; iItemEnd < cEnd && adGroup[iItemEnd] == dGroup; iItemEnd++) ; const int cNumItems = iItemEnd - iItemStart; const double dMaxScore = pirm->MaxMeasure((int)dGroup, adY + iItemStart, cNumItems); if (dMaxScore > 0.0) { // Rank items by current score // If offset given, add up current scores const double* adFPlusOffset = OffsetVector(adF, adOffset, iItemStart, iItemEnd, vecdFPlusOffset); ranker.SetGroupScores(adFPlusOffset, cNumItems); ranker.Rank(); dL += dWi * pirm->Measure(adY + iItemStart, ranker) / dMaxScore; dW += dWi; } // Next group iItemStart = iItemEnd; } // Loss = 1 - utility return 1.0 - dL / dW; } GBMRESULT CPairwise::FitBestConstant ( double *adY, double *adGroup, double *adOffset, double *adW, double *adF, double *adZ, unsigned long *aiNodeAssign, unsigned long nTrain, VEC_P_NODETERMINAL vecpTermNodes, unsigned long cTermNodes, unsigned long cMinObsInNode, bool *afInBag, double *adFadj, int cIdxOff ) { #ifdef NOISY_DEBUG Rprintf("FitBestConstant, nTrain = %u, cIdxOff = %d, cTermNodes = %d, \n", nTrain, cIdxOff, cTermNodes); #endif // Assumption: ComputeWorkingResponse() has been executed before with // the same arguments try { // Allocate space for numerators and denominators, and set to zero vecdNum.reserve(cTermNodes); vecdDenom.reserve(cTermNodes); for (unsigned int i = 0; i < cTermNodes; i++) { vecdNum[i] = 0.0; vecdDenom[i] = 0.0; } } catch (std::bad_alloc&) { return GBM_OUTOFMEMORY; } for (unsigned int iObs = 0; iObs < nTrain; iObs++) { if (afInBag[iObs]) { assert(isfinite(adW[iObs])); assert(isfinite(adZ[iObs])); assert(isfinite(vecdHessian[iObs])); vecdNum[aiNodeAssign[iObs]] += adW[iObs] * adZ[iObs]; vecdDenom[aiNodeAssign[iObs]] += adW[iObs] * vecdHessian[iObs]; } } for (unsigned int iNode = 0; iNode < cTermNodes; iNode++) { if (vecpTermNodes[iNode] != NULL) { vecpTermNodes[iNode]->dPrediction = vecdNum[iNode]; if (vecdDenom[iNode] <= 0.0) { vecpTermNodes[iNode]->dPrediction = 0.0; } else { vecpTermNodes[iNode]->dPrediction = vecdNum[iNode]/vecdDenom[iNode]; } } } return GBM_OK; } double CPairwise::BagImprovement ( double *adY, double *adGroup, double *adOffset, double *adWeight, double *adF, double *adFadj, bool *afInBag, double dStepSize, unsigned long nTrain ) { #ifdef NOISY_DEBUG Rprintf("BagImprovement, nTrain = %u\n", nTrain); #endif if (nTrain <= 0) { return 0; } double dL = 0.0; double dW = 0.0; unsigned int iItemStart = 0; unsigned int iItemEnd = 0; while (iItemStart < nTrain) { const double dGroup = adGroup[iItemStart]; // Find end of current group for (iItemEnd = iItemStart + 1; iItemEnd < nTrain && adGroup[iItemEnd] == dGroup; iItemEnd++) ; if (!afInBag[iItemStart]) { // Group was held out of training set const unsigned int cNumItems = iItemEnd - iItemStart; const double dMaxScore = pirm->MaxMeasure((int)dGroup, adY + iItemStart, cNumItems); if (dMaxScore > 0.0) { // If offset given, add up current scores const double* adFPlusOffset = OffsetVector(adF, adOffset, iItemStart, iItemEnd, vecdFPlusOffset); // Compute score according to old score, adF ranker.SetGroupScores(adFPlusOffset, cNumItems); ranker.Rank(); const double dOldScore = pirm->Measure(adY + iItemStart, ranker); // Compute score according to new score: adF' = adF + dStepSize * adFadj for (unsigned int i = 0; i < cNumItems; i++) { ranker.AddToScore(i, adFadj[i+iItemStart] * dStepSize); } const double dWi = adWeight[iItemStart]; if (ranker.Rank()) { // Ranking changed const double dNewScore = pirm->Measure(adY + iItemStart, ranker); dL += dWi * (dNewScore - dOldScore) / dMaxScore; } dW += dWi; } } // Next group iItemStart = iItemEnd; } return dL / dW; } gbm/src/node_terminal.h0000644000176200001440000000402612143232747014606 0ustar liggesusers//------------------------------------------------------------------------------ // GBM by Greg Ridgeway Copyright (C) 2003 // // File: node_terminal.h // // License: GNU GPL (version 2 or later) // // Contents: terminal node class // // Owner: gregr@rand.org // // History: 3/26/2001 gregr created // 2/14/2003 gregr: adapted for R implementation // //------------------------------------------------------------------------------ #ifndef NODETERMINAL_H #define NODETERMINAL_H #include #include "dataset.h" #include "node.h" using namespace std; class CNodeTerminal : public CNode { public: CNodeTerminal(); ~CNodeTerminal(); GBMRESULT Adjust(unsigned long cMinObsInNode); GBMRESULT PrintSubtree(unsigned long cIndent); GBMRESULT TransferTreeToRList(int &iNodeID, CDataset *pData, int *aiSplitVar, double *adSplitPoint, int *aiLeftNode, int *aiRightNode, int *aiMissingNode, double *adErrorReduction, double *adWeight, double *adPred, VEC_VEC_CATEGORIES &vecSplitCodes, int cCatSplitsOld, double dShrinkage); GBMRESULT ApplyShrinkage(double dLambda); GBMRESULT Predict(CDataset *pData, unsigned long i, double &dFadj); GBMRESULT Predict(double *adX, unsigned long cRow, unsigned long cCol, unsigned long iRow, double &dFadj); GBMRESULT GetVarRelativeInfluence(double *adRelInf); GBMRESULT RecycleSelf(CNodeFactory *pNodeFactory); }; typedef CNodeTerminal *PCNodeTerminal; typedef vector VEC_P_NODETERMINAL; #endif // NODETERMINAL_H gbm/src/node_terminal.cpp0000644000176200001440000000432312143232747015141 0ustar liggesusers//------------------------------------------------------------------------------ // GBM by Greg Ridgeway Copyright (C) 2003 // // File: node_terminal.cpp // //------------------------------------------------------------------------------ #include "node_terminal.h" #include "node_factory.h" CNodeTerminal::CNodeTerminal() { isTerminal = true; } CNodeTerminal::~CNodeTerminal() { #ifdef NOISY_DEBUG Rprintf("terminal destructor\n"); #endif } GBMRESULT CNodeTerminal::Adjust ( unsigned long cMinObsInNode ) { return GBM_OK; } GBMRESULT CNodeTerminal::ApplyShrinkage ( double dLambda ) { GBMRESULT hr = GBM_OK; dPrediction *= dLambda; return hr; } GBMRESULT CNodeTerminal::Predict ( CDataset *pData, unsigned long iRow, double &dFadj ) { dFadj = dPrediction; return GBM_OK; } GBMRESULT CNodeTerminal::Predict ( double *adX, unsigned long cRow, unsigned long cCol, unsigned long iRow, double &dFadj ) { dFadj = dPrediction; return GBM_OK; } GBMRESULT CNodeTerminal::PrintSubtree ( unsigned long cIndent ) { unsigned long i = 0; for(i=0; i< cIndent; i++) Rprintf(" "); Rprintf("N=%f, Prediction=%f *\n", dTrainW, dPrediction); return GBM_OK; } GBMRESULT CNodeTerminal::GetVarRelativeInfluence ( double *adRelInf ) { return GBM_OK; } GBMRESULT CNodeTerminal::RecycleSelf ( CNodeFactory *pNodeFactory ) { pNodeFactory->RecycleNode(this); return GBM_OK; }; GBMRESULT CNodeTerminal::TransferTreeToRList ( int &iNodeID, CDataset *pData, int *aiSplitVar, double *adSplitPoint, int *aiLeftNode, int *aiRightNode, int *aiMissingNode, double *adErrorReduction, double *adWeight, double *adPred, VEC_VEC_CATEGORIES &vecSplitCodes, int cCatSplitsOld, double dShrinkage ) { GBMRESULT hr = GBM_OK; aiSplitVar[iNodeID] = -1; adSplitPoint[iNodeID] = dShrinkage*dPrediction; aiLeftNode[iNodeID] = -1; aiRightNode[iNodeID] = -1; aiMissingNode[iNodeID] = -1; adErrorReduction[iNodeID] = 0.0; adWeight[iNodeID] = dTrainW; adPred[iNodeID] = dShrinkage*dPrediction; iNodeID++; return hr; } gbm/src/node_search.h0000644000176200001440000000625112143232747014242 0ustar liggesusers//------------------------------------------------------------------------------ // GBM by Greg Ridgeway Copyright (C) 2003 // // File: node_search.h // // License: GNU GPL (version 2 or later) // // Contents: does the searching for where to split a node // // Owner: gregr@rand.org // // History: 3/26/2001 gregr created // 2/14/2003 gregr: adapted for R implementation // //------------------------------------------------------------------------------ #ifndef NODESEARCH_H #define NODESEARCH_H #include "node_factory.h" #include "dataset.h" using namespace std; class CNodeSearch { public: CNodeSearch(); ~CNodeSearch(); GBMRESULT Initialize(unsigned long cMinObsInNode); GBMRESULT IncorporateObs(double dX, double dZ, double dW, long lMonotone); GBMRESULT Set(double dSumZ, double dTotalW, unsigned long cTotalN, CNodeTerminal *pThisNode, CNode **ppParentPointerToThisNode, CNodeFactory *pNodeFactory); GBMRESULT ResetForNewVar(unsigned long iWhichVar, long cVarClasses); double BestImprovement() { return dBestImprovement; } GBMRESULT SetToSplit() { fIsSplit = true; return GBM_OK; }; GBMRESULT SetupNewNodes(PCNodeNonterminal &pNewSplitNode, PCNodeTerminal &pNewLeftNode, PCNodeTerminal &pNewRightNode, PCNodeTerminal &pNewMissingNode); GBMRESULT EvaluateCategoricalSplit(); GBMRESULT WrapUpCurrentVariable(); double ThisNodePrediction() {return pThisNode->dPrediction;} bool operator<(const CNodeSearch &ns) {return dBestImprovementcMinObsInNode = cMinObsInNode; Cleanup: return hr; Error: goto Cleanup; } GBMRESULT CNodeSearch::IncorporateObs ( double dX, double dZ, double dW, long lMonotone ) { GBMRESULT hr = GBM_OK; static double dWZ = 0.0; if(fIsSplit) goto Cleanup; dWZ = dW*dZ; if(ISNA(dX)) { dCurrentMissingSumZ += dWZ; dCurrentMissingTotalW += dW; cCurrentMissingN++; dCurrentRightSumZ -= dWZ; dCurrentRightTotalW -= dW; cCurrentRightN--; } else if(cCurrentVarClasses == 0) // variable is continuous { if(dLastXValue > dX) { error("Observations are not in order. gbm() was unable to build an index for the design matrix. Could be a bug in gbm or an unusual data type in data.\n"); hr = GBM_FAIL; goto Error; } // Evaluate the current split // the newest observation is still in the right child dCurrentSplitValue = 0.5*(dLastXValue + dX); if((dLastXValue != dX) && (cCurrentLeftN >= cMinObsInNode) && (cCurrentRightN >= cMinObsInNode) && ((lMonotone==0) || (lMonotone*(dCurrentRightSumZ*dCurrentLeftTotalW - dCurrentLeftSumZ*dCurrentRightTotalW) > 0))) { dCurrentImprovement = CNode::Improvement(dCurrentLeftTotalW,dCurrentRightTotalW, dCurrentMissingTotalW, dCurrentLeftSumZ,dCurrentRightSumZ, dCurrentMissingSumZ); if(dCurrentImprovement > dBestImprovement) { iBestSplitVar = iCurrentSplitVar; dBestSplitValue = dCurrentSplitValue; cBestVarClasses = 0; dBestLeftSumZ = dCurrentLeftSumZ; dBestLeftTotalW = dCurrentLeftTotalW; cBestLeftN = cCurrentLeftN; dBestRightSumZ = dCurrentRightSumZ; dBestRightTotalW = dCurrentRightTotalW; cBestRightN = cCurrentRightN; dBestImprovement = dCurrentImprovement; } } // now move the new observation to the left // if another observation arrives we will evaluate this dCurrentLeftSumZ += dWZ; dCurrentLeftTotalW += dW; cCurrentLeftN++; dCurrentRightSumZ -= dWZ; dCurrentRightTotalW -= dW; cCurrentRightN--; dLastXValue = dX; } else // variable is categorical, evaluates later { adGroupSumZ[(unsigned long)dX] += dWZ; adGroupW[(unsigned long)dX] += dW; acGroupN[(unsigned long)dX] ++; } Cleanup: return hr; Error: goto Cleanup; } GBMRESULT CNodeSearch::Set ( double dSumZ, double dTotalW, unsigned long cTotalN, CNodeTerminal *pThisNode, CNode **ppParentPointerToThisNode, CNodeFactory *pNodeFactory ) { GBMRESULT hr = GBM_OK; dInitSumZ = dSumZ; dInitTotalW = dTotalW; cInitN = cTotalN; dBestLeftSumZ = 0.0; dBestLeftTotalW = 0.0; cBestLeftN = 0; dCurrentLeftSumZ = 0.0; dCurrentLeftTotalW = 0.0; cCurrentLeftN = 0; dBestRightSumZ = dSumZ; dBestRightTotalW = dTotalW; cBestRightN = cTotalN; dCurrentRightSumZ = 0.0; dCurrentRightTotalW = dTotalW; cCurrentRightN = cTotalN; dBestMissingSumZ = 0.0; dBestMissingTotalW = 0.0; cBestMissingN = 0; dCurrentMissingSumZ = 0.0; dCurrentMissingTotalW = 0.0; cCurrentMissingN = 0; dBestImprovement = 0.0; iBestSplitVar = UINT_MAX; dCurrentImprovement = 0.0; iCurrentSplitVar = UINT_MAX; dCurrentSplitValue = -HUGE_VAL; fIsSplit = false; this->pThisNode = pThisNode; this->ppParentPointerToThisNode = ppParentPointerToThisNode; this->pNodeFactory = pNodeFactory; return hr; } GBMRESULT CNodeSearch::ResetForNewVar ( unsigned long iWhichVar, long cCurrentVarClasses ) { GBMRESULT hr = GBM_OK; long i=0; if(fIsSplit) goto Cleanup; for(i=0; icCurrentVarClasses = cCurrentVarClasses; dCurrentLeftSumZ = 0.0; dCurrentLeftTotalW = 0.0; cCurrentLeftN = 0; dCurrentRightSumZ = dInitSumZ; dCurrentRightTotalW = dInitTotalW; cCurrentRightN = cInitN; dCurrentMissingSumZ = 0.0; dCurrentMissingTotalW = 0.0; cCurrentMissingN = 0; dCurrentImprovement = 0.0; dLastXValue = -HUGE_VAL; Cleanup: return hr; } GBMRESULT CNodeSearch::WrapUpCurrentVariable() { GBMRESULT hr = GBM_OK; if(iCurrentSplitVar == iBestSplitVar) { if(cCurrentMissingN > 0) { dBestMissingSumZ = dCurrentMissingSumZ; dBestMissingTotalW = dCurrentMissingTotalW; cBestMissingN = cCurrentMissingN; } else // DEBUG: consider a weighted average with parent node? { dBestMissingSumZ = dInitSumZ; dBestMissingTotalW = dInitTotalW; cBestMissingN = 0; } } return hr; } GBMRESULT CNodeSearch::EvaluateCategoricalSplit() { GBMRESULT hr = GBM_OK; long i=0; long j=0; unsigned long cFiniteMeans = 0; if(fIsSplit) goto Cleanup; if(cCurrentVarClasses == 0) { hr = GBM_INVALIDARG; goto Error; } cFiniteMeans = 0; for(i=0; i1) && ((ULONG)i= cMinObsInNode) && (cCurrentRightN >= cMinObsInNode) && (dCurrentImprovement > dBestImprovement)) { dBestSplitValue = dCurrentSplitValue; if(iBestSplitVar != iCurrentSplitVar) { iBestSplitVar = iCurrentSplitVar; cBestVarClasses = cCurrentVarClasses; for(j=0; jGetNewNodeTerminal(); pNewRightNode = pNodeFactory->GetNewNodeTerminal(); pNewMissingNode = pNodeFactory->GetNewNodeTerminal(); // set up a continuous split if(cBestVarClasses==0) { pNewNodeContinuous = pNodeFactory->GetNewNodeContinuous(); pNewNodeContinuous->dSplitValue = dBestSplitValue; pNewNodeContinuous->iSplitVar = iBestSplitVar; pNewSplitNode = pNewNodeContinuous; } else { // get a new categorical node and its branches pNewNodeCategorical = pNodeFactory->GetNewNodeCategorical(); // set up the categorical split pNewNodeCategorical->iSplitVar = iBestSplitVar; pNewNodeCategorical->cLeftCategory = (ULONG)dBestSplitValue + 1; pNewNodeCategorical->aiLeftCategory = new ULONG[pNewNodeCategorical->cLeftCategory]; for(i=0; icLeftCategory; i++) { pNewNodeCategorical->aiLeftCategory[i] = aiBestCategory[i]; } pNewSplitNode = pNewNodeCategorical; } *ppParentPointerToThisNode = pNewSplitNode; pNewSplitNode->dPrediction = pThisNode->dPrediction; pNewSplitNode->dImprovement = dBestImprovement; pNewSplitNode->dTrainW = pThisNode->dTrainW; pNewSplitNode->pLeftNode = pNewLeftNode; pNewSplitNode->pRightNode = pNewRightNode; pNewSplitNode->pMissingNode = pNewMissingNode; pNewLeftNode->dPrediction = dBestLeftSumZ/dBestLeftTotalW; pNewLeftNode->dTrainW = dBestLeftTotalW; pNewLeftNode->cN = cBestLeftN; pNewRightNode->dPrediction = dBestRightSumZ/dBestRightTotalW; pNewRightNode->dTrainW = dBestRightTotalW; pNewRightNode->cN = cBestRightN; pNewMissingNode->dPrediction = dBestMissingSumZ/dBestMissingTotalW; pNewMissingNode->dTrainW = dBestMissingTotalW; pNewMissingNode->cN = cBestMissingN; pThisNode->RecycleSelf(pNodeFactory); return hr; } gbm/src/node_nonterminal.h0000644000176200001440000000470712143232747015327 0ustar liggesusers//------------------------------------------------------------------------------ // GBM by Greg Ridgeway Copyright (C) 2003 // // File: node_nonterminal.h // // License: GNU GPL (version 2 or later) // // Contents: a node in the tree // // Owner: gregr@rand.org // // History: 3/26/2001 gregr created // 2/14/2003 gregr: adapted for R implementation // //------------------------------------------------------------------------------ #ifndef NODENONTERMINAL_H #define NODENONTERMINAL_H #include "node.h" #include "node_terminal.h" class CNodeNonterminal : public CNode { public: CNodeNonterminal(); virtual ~CNodeNonterminal(); virtual GBMRESULT Adjust(unsigned long cMinObsInNode); virtual signed char WhichNode(CDataset *pData, unsigned long iObs) = 0; virtual signed char WhichNode(double *adX, unsigned long cRow, unsigned long cCol, unsigned long iRow) = 0; virtual GBMRESULT TransferTreeToRList(int &iNodeID, CDataset *pData, int *aiSplitVar, double *adSplitPoint, int *aiLeftNode, int *aiRightNode, int *aiMissingNode, double *adErrorReduction, double *adWeight, double *adPred, VEC_VEC_CATEGORIES &vecSplitCodes, int cCatSplitsOld, double dShrinkage) = 0; GBMRESULT Predict(CDataset *pData, unsigned long iRow, double &dFadj); GBMRESULT Predict(double *adX, unsigned long cRow, unsigned long cCol, unsigned long iRow, double &dFadj); GBMRESULT GetVarRelativeInfluence(double *adRelInf); virtual GBMRESULT RecycleSelf(CNodeFactory *pNodeFactory) = 0; CNode *pLeftNode; CNode *pRightNode; CNode *pMissingNode; unsigned long iSplitVar; double dImprovement; }; typedef CNodeNonterminal *PCNodeNonterminal; #endif // NODENONTERMINAL_H gbm/src/node_nonterminal.cpp0000644000176200001440000000467512143232747015666 0ustar liggesusers// GBM by Greg Ridgeway Copyright (C) 2003 #include "node_nonterminal.h" CNodeNonterminal::CNodeNonterminal() { pLeftNode = NULL; pRightNode = NULL; iSplitVar = 0; dImprovement = 0.0; pMissingNode = NULL; } CNodeNonterminal::~CNodeNonterminal() { } GBMRESULT CNodeNonterminal::Adjust ( unsigned long cMinObsInNode ) { GBMRESULT hr = GBM_OK; hr = pLeftNode->Adjust(cMinObsInNode); hr = pRightNode->Adjust(cMinObsInNode); if(pMissingNode->isTerminal && (pMissingNode->cN < cMinObsInNode)) { dPrediction = ((pLeftNode->dTrainW)*(pLeftNode->dPrediction) + (pRightNode->dTrainW)*(pRightNode->dPrediction))/ (pLeftNode->dTrainW + pRightNode->dTrainW); pMissingNode->dPrediction = dPrediction; } else { hr = pMissingNode->Adjust(cMinObsInNode); dPrediction = ((pLeftNode->dTrainW)* (pLeftNode->dPrediction) + (pRightNode->dTrainW)* (pRightNode->dPrediction) + (pMissingNode->dTrainW)*(pMissingNode->dPrediction))/ (pLeftNode->dTrainW + pRightNode->dTrainW + pMissingNode->dTrainW); } return hr; } GBMRESULT CNodeNonterminal::Predict ( CDataset *pData, unsigned long iRow, double &dFadj ) { GBMRESULT hr = GBM_OK; signed char schWhichNode = WhichNode(pData,iRow); if(schWhichNode == -1) { hr = pLeftNode->Predict(pData, iRow, dFadj); } else if(schWhichNode == 1) { hr = pRightNode->Predict(pData, iRow, dFadj); } else { hr = pMissingNode->Predict(pData, iRow, dFadj); } return hr; } GBMRESULT CNodeNonterminal::Predict ( double *adX, unsigned long cRow, unsigned long cCol, unsigned long iRow, double &dFadj ) { GBMRESULT hr = GBM_OK; signed char schWhichNode = WhichNode(adX,cRow,cCol,iRow); if(schWhichNode == -1) { hr = pLeftNode->Predict(adX,cRow,cCol,iRow,dFadj); } else if(schWhichNode == 1) { hr = pRightNode->Predict(adX,cRow,cCol,iRow,dFadj); } else { hr = pMissingNode->Predict(adX,cRow,cCol,iRow,dFadj); } return hr; } GBMRESULT CNodeNonterminal::GetVarRelativeInfluence ( double *adRelInf ) { GBMRESULT hr = GBM_OK; adRelInf[iSplitVar] += dImprovement; pLeftNode->GetVarRelativeInfluence(adRelInf); pRightNode->GetVarRelativeInfluence(adRelInf); return hr; } gbm/src/node_factory.h0000644000176200001440000000317512143232747014446 0ustar liggesusers//------------------------------------------------------------------------------ // GBM by Greg Ridgeway Copyright (C) 2003 // // File: node_factory.h // // License: GNU GPL (version 2 or later) // // Contents: manager for allocation and destruction of all nodes // // Owner: gregr@rand.org // // History: 3/26/2001 gregr created // 2/14/2003 gregr: adapted for R implementation // //------------------------------------------------------------------------------ #ifndef NODEFACTORY_H #define NODEFACTORY_H #include #include #include "node_terminal.h" #include "node_continuous.h" #include "node_categorical.h" #define NODEFACTORY_NODGBM_RESERVE ((unsigned long)101) using namespace std; class CNodeFactory { public: CNodeFactory(); ~CNodeFactory(); GBMRESULT Initialize(unsigned long cDepth); CNodeTerminal* GetNewNodeTerminal(); CNodeContinuous* GetNewNodeContinuous(); CNodeCategorical* GetNewNodeCategorical(); GBMRESULT RecycleNode(CNodeTerminal *pNode); GBMRESULT RecycleNode(CNodeContinuous *pNode); GBMRESULT RecycleNode(CNodeCategorical *pNode); private: stack TerminalStack; stack ContinuousStack; stack CategoricalStack; CNodeTerminal* pNodeTerminalTemp; CNodeContinuous* pNodeContinuousTemp; CNodeCategorical* pNodeCategoricalTemp; CNodeTerminal aBlockTerminal[NODEFACTORY_NODGBM_RESERVE]; CNodeContinuous aBlockContinuous[NODEFACTORY_NODGBM_RESERVE]; CNodeCategorical aBlockCategorical[NODEFACTORY_NODGBM_RESERVE]; }; #endif // NODEFACTORY_H gbm/src/node_factory.cpp0000644000176200001440000000673312143232747015004 0ustar liggesusers// GBM by Greg Ridgeway Copyright (C) 2003 #include "node_factory.h" CNodeFactory::CNodeFactory() { } CNodeFactory::~CNodeFactory() { #ifdef NOISY_DEBUG Rprintf("destructing node factory\n"); #endif } GBMRESULT CNodeFactory::Initialize ( unsigned long cDepth ) { GBMRESULT hr = GBM_OK; unsigned long i = 0; for(i=0; idPrediction = 0.0; } return pNodeTerminalTemp; } CNodeContinuous* CNodeFactory::GetNewNodeContinuous() { if(ContinuousStack.empty()) { #ifdef NOISY_DEBUG Rprintf("Continuous stack is empty\n"); #endif pNodeContinuousTemp = NULL; } else { pNodeContinuousTemp = ContinuousStack.top(); ContinuousStack.pop(); pNodeContinuousTemp->dPrediction = 0.0; pNodeContinuousTemp->dImprovement = 0.0; pNodeContinuousTemp->pMissingNode = NULL; pNodeContinuousTemp->pLeftNode = NULL; pNodeContinuousTemp->pRightNode = NULL; pNodeContinuousTemp->iSplitVar = 0; pNodeContinuousTemp->dSplitValue = 0.0; } return pNodeContinuousTemp; } CNodeCategorical* CNodeFactory::GetNewNodeCategorical() { if(CategoricalStack.empty()) { #ifdef NOISY_DEBUG Rprintf("Categorical stack is empty\n"); #endif pNodeCategoricalTemp = NULL; } else { pNodeCategoricalTemp = CategoricalStack.top(); CategoricalStack.pop(); pNodeCategoricalTemp->dPrediction = 0.0; pNodeCategoricalTemp->dImprovement = 0.0; pNodeCategoricalTemp->pMissingNode = NULL; pNodeCategoricalTemp->pLeftNode = NULL; pNodeCategoricalTemp->pRightNode = NULL; pNodeCategoricalTemp->iSplitVar = 0; pNodeCategoricalTemp->aiLeftCategory = NULL; pNodeCategoricalTemp->cLeftCategory = 0; } return pNodeCategoricalTemp; } GBMRESULT CNodeFactory::RecycleNode ( CNodeTerminal *pNode ) { if(pNode != NULL) { TerminalStack.push(pNode); } return GBM_OK; } GBMRESULT CNodeFactory::RecycleNode ( CNodeContinuous *pNode ) { if(pNode != NULL) { if(pNode->pLeftNode != NULL) pNode->pLeftNode->RecycleSelf(this); if(pNode->pRightNode != NULL) pNode->pRightNode->RecycleSelf(this); if(pNode->pMissingNode != NULL) pNode->pMissingNode->RecycleSelf(this); ContinuousStack.push(pNode); } return GBM_OK; } GBMRESULT CNodeFactory::RecycleNode ( CNodeCategorical *pNode ) { if(pNode != NULL) { if(pNode->pLeftNode != NULL) pNode->pLeftNode->RecycleSelf(this); if(pNode->pRightNode != NULL) pNode->pRightNode->RecycleSelf(this); if(pNode->pMissingNode != NULL) pNode->pMissingNode->RecycleSelf(this); if(pNode->aiLeftCategory != NULL) { delete [] pNode->aiLeftCategory; pNode->aiLeftCategory = NULL; } CategoricalStack.push(pNode); } return GBM_OK; } gbm/src/node_continuous.h0000644000176200001440000000351012143232747015176 0ustar liggesusers//------------------------------------------------------------------------------ // GBM by Greg Ridgeway Copyright (C) 2003 // // File: node_continuous.h // // License: GNU GPL (version 2 or later) // // Contents: a node with a continuous split // // Owner: gregr.rand.org // // History: 3/26/2001 gregr created // 2/14/2003 gregr: adapted for R implementation // //------------------------------------------------------------------------------ #ifndef NODECONTINUOUS_H #define NODECONTINUOUS_H #include #include "node_nonterminal.h" class CNodeContinuous : public CNodeNonterminal { public: CNodeContinuous(); ~CNodeContinuous(); GBMRESULT PrintSubtree(unsigned long cIndent); GBMRESULT TransferTreeToRList(int &iNodeID, CDataset *pData, int *aiSplitVar, double *adSplitPoint, int *aiLeftNode, int *aiRightNode, int *aiMissingNode, double *adErrorReduction, double *adWeight, double *adPred, VEC_VEC_CATEGORIES &vecSplitCodes, int cCatSplitsOld, double dShrinkage); signed char WhichNode(CDataset *pData, unsigned long iObs); signed char WhichNode(double *adX, unsigned long cRow, unsigned long cCol, unsigned long iRow); GBMRESULT RecycleSelf(CNodeFactory *pNodeFactory); double dSplitValue; }; typedef CNodeContinuous *PCNodeContinuous; #endif // NODECONTINUOUS_H gbm/src/node_continuous.cpp0000644000176200001440000001211212143232747015527 0ustar liggesusers// GBM by Greg Ridgeway Copyright (C) 2003 #include "node_continuous.h" #include "node_factory.h" CNodeContinuous::CNodeContinuous() { dSplitValue = 0.0; } CNodeContinuous::~CNodeContinuous() { #ifdef NOISY_DEBUG Rprintf("continuous destructor\n"); #endif } GBMRESULT CNodeContinuous::PrintSubtree ( unsigned long cIndent ) { GBMRESULT hr = GBM_OK; unsigned long i = 0; for(i=0; i< cIndent; i++) Rprintf(" "); Rprintf("N=%f, Improvement=%f, Prediction=%f, NA pred=%f\n", dTrainW, dImprovement, dPrediction, (pMissingNode == NULL ? 0.0 : pMissingNode->dPrediction)); for(i=0; i< cIndent; i++) Rprintf(" "); Rprintf("V%d < %f\n", iSplitVar, dSplitValue); hr = pLeftNode->PrintSubtree(cIndent+1); for(i=0; i< cIndent; i++) Rprintf(" "); Rprintf("V%d > %f\n", iSplitVar, dSplitValue); hr = pRightNode->PrintSubtree(cIndent+1); for(i=0; i< cIndent; i++) Rprintf(" "); Rprintf("missing\n"); hr = pMissingNode->PrintSubtree(cIndent+1); return hr; } signed char CNodeContinuous::WhichNode ( CDataset *pData, unsigned long iObs ) { signed char ReturnValue = 0; double dX = pData->adX[iSplitVar*(pData->cRows) + iObs]; if(!ISNA(dX)) { if(dX < dSplitValue) { ReturnValue = -1; } else { ReturnValue = 1; } } // if missing value returns 0 return ReturnValue; } signed char CNodeContinuous::WhichNode ( double *adX, unsigned long cRow, unsigned long cCol, unsigned long iRow ) { signed char ReturnValue = 0; double dX = adX[iSplitVar*cRow + iRow]; if(!ISNA(dX)) { if(dX < dSplitValue) { ReturnValue = -1; } else { ReturnValue = 1; } } // if missing value returns 0 return ReturnValue; } GBMRESULT CNodeContinuous::RecycleSelf ( CNodeFactory *pNodeFactory ) { GBMRESULT hr = GBM_OK; pNodeFactory->RecycleNode(this); return hr; }; GBMRESULT CNodeContinuous::TransferTreeToRList ( int &iNodeID, CDataset *pData, int *aiSplitVar, double *adSplitPoint, int *aiLeftNode, int *aiRightNode, int *aiMissingNode, double *adErrorReduction, double *adWeight, double *adPred, VEC_VEC_CATEGORIES &vecSplitCodes, int cCatSplitsOld, double dShrinkage ) { GBMRESULT hr = GBM_OK; int iThisNodeID = iNodeID; aiSplitVar[iThisNodeID] = iSplitVar; adSplitPoint[iThisNodeID] = dSplitValue; adErrorReduction[iThisNodeID] = dImprovement; adWeight[iThisNodeID] = dTrainW; adPred[iThisNodeID] = dShrinkage*dPrediction; iNodeID++; aiLeftNode[iThisNodeID] = iNodeID; hr = pLeftNode->TransferTreeToRList(iNodeID, pData, aiSplitVar, adSplitPoint, aiLeftNode, aiRightNode, aiMissingNode, adErrorReduction, adWeight, adPred, vecSplitCodes, cCatSplitsOld, dShrinkage); if(GBM_FAILED(hr)) goto Error; aiRightNode[iThisNodeID] = iNodeID; hr = pRightNode->TransferTreeToRList(iNodeID, pData, aiSplitVar, adSplitPoint, aiLeftNode, aiRightNode, aiMissingNode, adErrorReduction, adWeight, adPred, vecSplitCodes, cCatSplitsOld, dShrinkage); if(GBM_FAILED(hr)) goto Error; aiMissingNode[iThisNodeID] = iNodeID; hr = pMissingNode->TransferTreeToRList(iNodeID, pData, aiSplitVar, adSplitPoint, aiLeftNode, aiRightNode, aiMissingNode, adErrorReduction, adWeight, adPred, vecSplitCodes, cCatSplitsOld, dShrinkage); if(GBM_FAILED(hr)) goto Error; Cleanup: return hr; Error: goto Cleanup; } gbm/src/node_categorical.h0000644000176200001440000000362312143232747015252 0ustar liggesusers//------------------------------------------------------------------------------ // GBM by Greg Ridgeway Copyright (C) 2003 // // File: node_categorical.h // // License: GNU GPL (version 2 or later) // // Contents: a node with a categorical split // // Owner: gregr@rand.org // // History: 3/26/2001 gregr created // 2/14/2003 gregr: adapted for R implementation // //------------------------------------------------------------------------------ #ifndef NODECATEGORICAL_H #define NODECATEGORICAL_H #include #include #include "node_nonterminal.h" class CNodeCategorical : public CNodeNonterminal { public: CNodeCategorical(); ~CNodeCategorical(); GBMRESULT PrintSubtree(unsigned long cIndent); GBMRESULT TransferTreeToRList(int &iNodeID, CDataset *pData, int *aiSplitVar, double *adSplitPoint, int *aiLeftNode, int *aiRightNode, int *aiMissingNode, double *adErrorReduction, double *adWeight, double *adPred, VEC_VEC_CATEGORIES &vecSplitCodes, int cCatSplitsOld, double dShrinkage); signed char WhichNode(CDataset *pData, unsigned long iObs); signed char WhichNode(double *adX, unsigned long cRow, unsigned long cCol, unsigned long iRow); GBMRESULT RecycleSelf(CNodeFactory *pNodeFactory); unsigned long *aiLeftCategory; unsigned long cLeftCategory; }; typedef CNodeCategorical *PCNodeCategorical; #endif // NODECATEGORICAL_H gbm/src/node_categorical.cpp0000644000176200001440000001404112143232747015601 0ustar liggesusers// GBM by Greg Ridgeway Copyright (C) 2003 #include "node_categorical.h" #include "node_factory.h" CNodeCategorical::CNodeCategorical() { aiLeftCategory = NULL; cLeftCategory = 0; } CNodeCategorical::~CNodeCategorical() { #ifdef NOISY_DEBUG Rprintf("categorical destructor\n"); #endif if(aiLeftCategory != NULL) { delete [] aiLeftCategory; aiLeftCategory = NULL; } } GBMRESULT CNodeCategorical::PrintSubtree ( unsigned long cIndent ) { GBMRESULT hr = GBM_OK; unsigned long i = 0; for(i=0; i< cIndent; i++) Rprintf(" "); Rprintf("N=%f, Improvement=%f, Prediction=%f, NA pred=%f\n", dTrainW, dImprovement, dPrediction, (pMissingNode == NULL ? 0.0 : pMissingNode->dPrediction)); for(i=0; i< cIndent; i++) Rprintf(" "); Rprintf("V%d in ",iSplitVar); for(i=0; iPrintSubtree(cIndent+1); for(i=0; i< cIndent; i++) Rprintf(" "); Rprintf("V%d not in ",iSplitVar); for(i=0; iPrintSubtree(cIndent+1); for(i=0; i< cIndent; i++) Rprintf(" "); Rprintf("missing\n"); hr = pMissingNode->PrintSubtree(cIndent+1); return hr; } signed char CNodeCategorical::WhichNode ( CDataset *pData, unsigned long iObs ) { signed char ReturnValue = 0; double dX = pData->adX[iSplitVar*(pData->cRows) + iObs]; if(!ISNA(dX)) { if(std::find(aiLeftCategory, aiLeftCategory+cLeftCategory, (ULONG)dX) != aiLeftCategory+cLeftCategory) { ReturnValue = -1; } else { ReturnValue = 1; } } // if missing value returns 0 return ReturnValue; } signed char CNodeCategorical::WhichNode ( double *adX, unsigned long cRow, unsigned long cCol, unsigned long iRow ) { signed char ReturnValue = 0; double dX = adX[iSplitVar*cRow + iRow]; if(!ISNA(dX)) { if(std::find(aiLeftCategory, aiLeftCategory+cLeftCategory, (ULONG)dX) != aiLeftCategory+cLeftCategory) { ReturnValue = -1; } else { ReturnValue = 1; } } // if missing value returns 0 return ReturnValue; } GBMRESULT CNodeCategorical::RecycleSelf ( CNodeFactory *pNodeFactory ) { GBMRESULT hr = GBM_OK; hr = pNodeFactory->RecycleNode(this); return hr; }; GBMRESULT CNodeCategorical::TransferTreeToRList ( int &iNodeID, CDataset *pData, int *aiSplitVar, double *adSplitPoint, int *aiLeftNode, int *aiRightNode, int *aiMissingNode, double *adErrorReduction, double *adWeight, double *adPred, VEC_VEC_CATEGORIES &vecSplitCodes, int cCatSplitsOld, double dShrinkage ) { GBMRESULT hr = GBM_OK; int iThisNodeID = iNodeID; unsigned long cCatSplits = vecSplitCodes.size(); unsigned long i = 0; int cLevels = pData->acVarClasses[iSplitVar]; aiSplitVar[iThisNodeID] = iSplitVar; adSplitPoint[iThisNodeID] = cCatSplits+cCatSplitsOld; // 0 based adErrorReduction[iThisNodeID] = dImprovement; adWeight[iThisNodeID] = dTrainW; adPred[iThisNodeID] = dShrinkage*dPrediction; vecSplitCodes.push_back(VEC_CATEGORIES()); vecSplitCodes[cCatSplits].resize(cLevels,1); for(i=0; iTransferTreeToRList(iNodeID, pData, aiSplitVar, adSplitPoint, aiLeftNode, aiRightNode, aiMissingNode, adErrorReduction, adWeight, adPred, vecSplitCodes, cCatSplitsOld, dShrinkage); if(GBM_FAILED(hr)) goto Error; aiRightNode[iThisNodeID] = iNodeID; hr = pRightNode->TransferTreeToRList(iNodeID, pData, aiSplitVar, adSplitPoint, aiLeftNode, aiRightNode, aiMissingNode, adErrorReduction, adWeight, adPred, vecSplitCodes, cCatSplitsOld, dShrinkage); if(GBM_FAILED(hr)) goto Error; aiMissingNode[iThisNodeID] = iNodeID; hr = pMissingNode->TransferTreeToRList(iNodeID, pData, aiSplitVar, adSplitPoint, aiLeftNode, aiRightNode, aiMissingNode, adErrorReduction, adWeight, adPred, vecSplitCodes, cCatSplitsOld, dShrinkage); if(GBM_FAILED(hr)) goto Error; Cleanup: return hr; Error: goto Cleanup; } gbm/src/node.h0000644000176200001440000000670112143232747012715 0ustar liggesusers//------------------------------------------------------------------------------ // GBM by Greg Ridgeway Copyright (C) 2003 // // File: node.h // // License: GNU GPL (version 2 or later) // // Contents: a node in the tree // // Owner: gregr@rand.org // // History: 3/26/2001 gregr created // 2/14/2003 gregr: adapted for R implementation // //------------------------------------------------------------------------------ #ifndef NODGBM_H #define NODGBM_H #include #include "dataset.h" #include "buildinfo.h" class CNodeFactory; using namespace std; typedef vector VEC_CATEGORIES; typedef vector VEC_VEC_CATEGORIES; class CNode { public: CNode(); virtual ~CNode(); virtual GBMRESULT Adjust(unsigned long cMinObsInNode); virtual GBMRESULT Predict(CDataset *pData, unsigned long iRow, double &dFadj); virtual GBMRESULT Predict(double *adX, unsigned long cRow, unsigned long cCol, unsigned long iRow, double &dFadj) = 0; static double Improvement ( double dLeftW, double dRightW, double dMissingW, double dLeftSum, double dRightSum, double dMissingSum ) { double dTemp = 0.0; double dResult = 0.0; if(dMissingW == 0.0) { dTemp = dLeftSum/dLeftW - dRightSum/dRightW; dResult = dLeftW*dRightW*dTemp*dTemp/(dLeftW+dRightW); } else { dTemp = dLeftSum/dLeftW - dRightSum/dRightW; dResult += dLeftW*dRightW*dTemp*dTemp; dTemp = dLeftSum/dLeftW - dMissingSum/dMissingW; dResult += dLeftW*dMissingW*dTemp*dTemp; dTemp = dRightSum/dRightW - dMissingSum/dMissingW; dResult += dRightW*dMissingW*dTemp*dTemp; dResult /= (dLeftW + dRightW + dMissingW); } return dResult; } virtual GBMRESULT PrintSubtree(unsigned long cIndent); virtual GBMRESULT TransferTreeToRList(int &iNodeID, CDataset *pData, int *aiSplitVar, double *adSplitPoint, int *aiLeftNode, int *aiRightNode, int *aiMissingNode, double *adErrorReduction, double *adWeight, double *adPred, VEC_VEC_CATEGORIES &vecSplitCodes, int cCatSplitsOld, double dShrinkage); double TotalError(); virtual GBMRESULT GetVarRelativeInfluence(double *adRelInf); virtual GBMRESULT RecycleSelf(CNodeFactory *pNodeFactory) = 0; double dPrediction; double dTrainW; // total training weight in node unsigned long cN; // number of training observations in node bool isTerminal; protected: double GetXEntry(CDataset *pData, unsigned long iRow, unsigned long iCol) { return pData->adX[iCol*(pData->cRows) + iRow]; } }; typedef CNode *PCNode; #endif // NODGBM_H gbm/src/node.cpp0000644000176200001440000000224012143232747013242 0ustar liggesusers// GBM by Greg Ridgeway Copyright (C) 2003 #include "node.h" CNode::CNode() { dPrediction = 0.0; dTrainW = 0.0; isTerminal = false; } CNode::~CNode() { // the nodes get deleted by deleting the node factory } GBMRESULT CNode::Adjust ( unsigned long cMinObsInNode ) { GBMRESULT hr = GBM_NOTIMPL; return hr; } GBMRESULT CNode::Predict ( CDataset *pData, unsigned long iRow, double &dFadj ) { GBMRESULT hr = GBM_NOTIMPL; return hr; } double CNode::TotalError() { GBMRESULT hr = GBM_NOTIMPL; return hr; } GBMRESULT CNode::PrintSubtree ( unsigned long cIndent ) { GBMRESULT hr = GBM_NOTIMPL; return hr; } GBMRESULT CNode::GetVarRelativeInfluence ( double *adRelInf ) { GBMRESULT hr = GBM_NOTIMPL; return hr; } GBMRESULT CNode::TransferTreeToRList ( int &iNodeID, CDataset *pData, int *aiSplitVar, double *adSplitPoint, int *aiLeftNode, int *aiRightNode, int *aiMissingNode, double *adErrorReduction, double *adWeight, double *adPred, VEC_VEC_CATEGORIES &vecSplitCodes, int cCatSplitsOld, double dShrinkage ) { return GBM_NOTIMPL; } gbm/src/multinomial.h0000755000176200001440000000557712143232747014337 0ustar liggesusers//------------------------------------------------------------------------------ // GBM alteration by Daniel Edwards // // File: multinomial.h // // // Contains: Distribution object to implement multinomial // // History: 04/04/2008 Created // //------------------------------------------------------------------------------ #ifndef KMULTICGBM_H #define KMULTICGBM_H #include #include "distribution.h" #include "locationm.h" class CMultinomial : public CDistribution { public: CMultinomial(int cNumClasses, int cRows); virtual ~CMultinomial(); GBMRESULT UpdateParams(double *adF, double *adOffset, double *adWeight, unsigned long cLength); GBMRESULT ComputeWorkingResponse(double *adY, double *adMisc, double *adOffset, double *adF, double *adZ, double *adWeight, bool *afInBag, unsigned long nTrain, int cIdxOff); GBMRESULT InitF(double *adY, double *adMisc, double *adOffset, double *adWeight, double &dInitF, unsigned long cLength); GBMRESULT FitBestConstant(double *adY, double *adMisc, double *adOffset, double *adW, double *adF, double *adZ, unsigned long *aiNodeAssign, unsigned long nTrain, VEC_P_NODETERMINAL vecpTermNodes, unsigned long cTermNodes, unsigned long cMinObsInNode, bool *afInBag, double *adFadj, int cIdxOff); double Deviance(double *adY, double *adMisc, double *adOffset, double *adWeight, double *adF, unsigned long cLength, int cIdxOff); double BagImprovement(double *adY, double *adMisc, double *adOffset, double *adWeight, double *adF, double *adFadj, bool *afInBag, double dStepSize, unsigned long nTrain); private: unsigned long mcNumClasses; unsigned long mcRows; double *madProb; }; #endif // KMULTICGBM_H gbm/src/multinomial.cpp0000755000176200001440000001152112143232747014654 0ustar liggesusers// GBM by Greg Ridgeway Copyright (C) 2003 #include "multinomial.h" CMultinomial::CMultinomial(int cNumClasses, int cRows) { mcNumClasses = cNumClasses; mcRows = cRows; madProb = new double[cNumClasses * cRows]; } CMultinomial::~CMultinomial() { if(madProb != NULL) { delete [] madProb; } } GBMRESULT CMultinomial::UpdateParams ( double *adF, double *adOffset, double *adWeight, unsigned long cLength ) { // Local variables unsigned long ii=0; unsigned long kk=0; // Set the probabilities for each observation in each class for (ii = 0; ii < mcRows; ii++) { double dClassSum = 0.0; for (kk = 0; kk < mcNumClasses; kk++) { int iIdx = ii + kk * mcRows; double dF = (adOffset == NULL) ? adF[iIdx] : adF[iIdx] + adOffset[iIdx]; madProb[iIdx] = adWeight[iIdx] * exp(dF); dClassSum += adWeight[iIdx] * exp(dF); } dClassSum = (dClassSum > 0) ? dClassSum : 1e-8; for (kk = 0; kk < mcNumClasses; kk++) { madProb[ii + kk * mcRows] /= dClassSum; } } return GBM_OK; } GBMRESULT CMultinomial::ComputeWorkingResponse ( double *adY, double *adMisc, double *adOffset, double *adF, double *adZ, double *adWeight, bool *afInBag, unsigned long nTrain, int cIdxOff ) { unsigned long i = 0; for(i=cIdxOff; icN >= cMinObsInNode) { // Get the number of nodes here double dNum = 0.0; double dDenom = 0.0; for (iObs = 0; iObs < nTrain; iObs++) { if(afInBag[iObs] && (aiNodeAssign[iObs] == iNode)) { int iIdx = iObs + cIdxOff; dNum += adW[iIdx] * adZ[iIdx]; dDenom += adW[iIdx] * fabs(adZ[iIdx]) * (1 - fabs(adZ[iIdx])); } } dDenom = (dDenom > 0) ? dDenom : 1e-8; vecpTermNodes[iNode]->dPrediction = dNum / dDenom; } } return hr; } double CMultinomial::BagImprovement ( double *adY, double *adMisc, double *adOffset, double *adWeight, double *adF, double *adFadj, bool *afInBag, double dStepSize, unsigned long nTrain ) { double dReturnValue = 0.0; double dW = 0.0; unsigned long ii; unsigned long kk; // Calculate the probabilities after the step double *adStepProb = new double[mcNumClasses * mcRows]; // Assume that this is last class - calculate new prob as in updateParams but // using (F_ik + ss*Fadj_ik) instead of F_ik. Then calculate OOB improve for (ii = 0; ii < mcRows; ii++) { double dClassSum = 0.0; for (kk = 0; kk < mcNumClasses; kk++) { int iIdx = ii + kk * mcRows; double dF = (adOffset == NULL) ? adF[iIdx] : adF[iIdx] + adOffset[iIdx]; dF += dStepSize * adFadj[iIdx]; adStepProb[iIdx] = adWeight[iIdx] * exp(dF); dClassSum += adWeight[iIdx] * exp(dF); } dClassSum = (dClassSum > 0) ? dClassSum : 1e-8; for (kk = 0; kk < mcNumClasses; kk++) { adStepProb[ii + kk * mcRows] /= dClassSum; } } // Calculate the improvement for(ii=0; ii // generic object (class) definition of matrix: template class matrix{ // NOTE: maxsize determines available memory storage, but // actualsize determines the actual size of the stored matrix in use // at a particular time. int maxsize; // max number of rows (same as max number of columns) int actualsize; // actual size (rows, or columns) of the stored matrix D* data; // where the data contents of the matrix are stored void allocateD() { delete[] data; data = new D [maxsize*maxsize]; }; public: matrix() { maxsize = 5; actualsize = 5; data = 0; allocateD(); }; // private ctor's matrix(int newmaxsize) {matrix(newmaxsize,newmaxsize);}; matrix(int newmaxsize, int newactualsize) { // the only public ctor if (newmaxsize <= 0) newmaxsize = 5; maxsize = newmaxsize; if ((newactualsize <= newmaxsize)&&(newactualsize>0)) actualsize = newactualsize; else actualsize = newmaxsize; // since allocateD() will first call delete[] on data: data = 0; allocateD(); }; ~matrix() { delete[] data; }; void dumpMatrixValues() { bool xyz; double rv; for (int i=0; i < actualsize; i++) { cout << "i=" << i << ": "; for (int j=0; j maxunitydeviation ) { maxunitydeviation = currentunitydeviation; worstdiagonal = i; } } int worstoffdiagonalrow = 0; int worstoffdiagonalcolumn = 0; D maxzerodeviation = 0.0; D currentzerodeviation ; for ( i = 0; i < actualsize; i++ ) { for ( int j = 0; j < actualsize; j++ ) { if ( i == j ) continue; // we look only at non-diagonal terms currentzerodeviation = data[i*maxsize+j]; if ( currentzerodeviation < 0.0) currentzerodeviation *= -1.0; if ( currentzerodeviation > maxzerodeviation ) { maxzerodeviation = currentzerodeviation; worstoffdiagonalrow = i; worstoffdiagonalcolumn = j; } } } cout << "Worst diagonal value deviation from unity: " << maxunitydeviation << " at row/column " << worstdiagonal << endl; cout << "Worst off-diagonal value deviation from zero: " << maxzerodeviation << " at row = " << worstoffdiagonalrow << ", column = " << worstoffdiagonalcolumn << endl; } void settoproduct(matrix& left, matrix& right) { actualsize = left.getactualsize(); if ( maxsize < left.getactualsize() ) { maxsize = left.getactualsize(); allocateD(); } for ( int i = 0; i < actualsize; i++ ) { for ( int j = 0; j < actualsize; j++ ) { D sum = 0.0; D leftvalue, rightvalue; bool success; for (int c = 0; c < actualsize; c++) { left.getvalue(i,c,leftvalue,success); right.getvalue(c,j,rightvalue,success); sum += leftvalue * rightvalue; } setvalue(i,j,sum); } } } void copymatrix(matrix& source) { actualsize = source.getactualsize(); if ( maxsize < source.getactualsize() ) { maxsize = source.getactualsize(); allocateD(); } for ( int i = 0; i < actualsize; i++ ) { for ( int j = 0; j < actualsize; j++ ) { D value; bool success; source.getvalue(i,j,value,success); data[i*maxsize+j] = value; } } }; void setactualsize(int newactualsize) { if ( newactualsize > maxsize ) { maxsize = newactualsize ; // * 2; // wastes memory but saves // time otherwise required for // operation new[] allocateD(); } if (newactualsize >= 0) actualsize = newactualsize; }; int getactualsize() { return actualsize; }; void getvalue(int row, int column, D& returnvalue, bool& success) { if ( (row>=maxsize) || (column>=maxsize) || (row<0) || (column<0) ) { success = false; return; } returnvalue = data[ row * maxsize + column ]; success = true; }; bool setvalue(int row, int column, D newvalue) { if ( (row >= maxsize) || (column >= maxsize) || (row<0) || (column<0) ) return false; data[ row * maxsize + column ] = newvalue; return true; }; void invert() { int i = 0; int j = 0; int k = 0; if (actualsize <= 0) return; // sanity check if (actualsize == 1) { data[0] = 1.0/data[0]; return; } for (i=1; i < actualsize; i++) data[i] /= data[0]; // normalize row 0 for (i=1; i < actualsize; i++) { for ( j=i; j < actualsize; j++) { // do a column of L D sum = 0.0; for ( k = 0; k < i; k++) sum += data[j*maxsize+k] * data[k*maxsize+i]; data[j*maxsize+i] -= sum; } if (i == actualsize-1) continue; for ( j=i+1; j < actualsize; j++) { // do a row of U D sum = 0.0; for ( k = 0; k < i; k++) sum += data[i*maxsize+k]*data[k*maxsize+j]; data[i*maxsize+j] = (data[i*maxsize+j]-sum) / data[i*maxsize+i]; } } for ( i = 0; i < actualsize; i++ ) // invert L { for ( j = i; j < actualsize; j++ ) { D x = 1.0; if ( i != j ) { x = 0.0; for ( k = i; k < j; k++ ) x -= data[j*maxsize+k]*data[k*maxsize+i]; } data[j*maxsize+i] = x / data[j*maxsize+j]; } } for ( i = 0; i < actualsize; i++ ) // invert U { for ( j = i; j < actualsize; j++ ) { if ( i == j ) continue; D sum = 0.0; for ( k = i; k < j; k++ ) sum += data[k*maxsize+j]*( (i==k) ? 1.0 : data[i*maxsize+k] ); data[i*maxsize+j] = -sum; } } for ( i = 0; i < actualsize; i++ ) // final inversion { for ( j = 0; j < actualsize; j++ ) { D sum = 0.0; for ( k = ((i>j)?i:j); k < actualsize; k++ ) sum += ((j==k)?1.0:data[j*maxsize+k])*data[k*maxsize+i]; data[j*maxsize+i] = sum; } } }; }; #endif gbm/src/locationm.h0000644000176200001440000000157612143232747013762 0ustar liggesusers//------------------------------------------------------------------------------ // GBM alteration by Daniel Edwards // File: locationm.h // // History: 27/3/2008 created // //------------------------------------------------------------------------------ #ifndef LOCMCGBM_H #define LOCMCGBM_H #include #include #include #include using namespace std; class CLocationM { public: CLocationM(const char *sType, int iN, double *adParams); virtual ~CLocationM(); double Median(int iN, double *adV, double *adW); double PsiFun(double dX); double LocationM(int iN, double *adX, double *adW); private: double *madParams; const char *msType; double mdEps; struct comp{ bool operator()(pair prP, pair prQ) { return (prP.second < prQ.second); } }; }; #endif // LOCMCGBM_H gbm/src/locationm.cpp0000644000176200001440000001145712143232747014314 0ustar liggesusers//------------------------------------------------------------------------------ // GBM alteration by Daniel Edwards // File: locationm.cpp // // Purpose: Class to provide methods to calculate the location M-estimates // of a variety of functions // // History: 31/03/2008 created // //------------------------------------------------------------------------------ #include "locationm.h" #include using namespace std; ///////////////////////////////////////////////// // Constructor // // Creates a new instance of this class ///////////////////////////////////////////////// CLocationM::CLocationM(const char *sType, int iN, double *adParams) { int ii; msType = sType; mdEps = 1e-8; madParams = new double[iN]; for (ii = 0; ii < iN; ii++) { madParams[ii] = adParams[ii]; } } ///////////////////////////////////////////////// // Destructor // // Frees any memory from variables in this class ///////////////////////////////////////////////// CLocationM::~CLocationM() { if (madParams != NULL) { delete[] madParams; } } ///////////////////////////////////////////////// // Median // // Function to return the weighted quantile of // a vector of a given length // // Parameters: iN - Length of vector // adV - Vector of doubles // adW - Array of weights // dAlpha - Quantile to calculate (0.5 for median) // // Returns : Weighted quantile ///////////////////////////////////////////////// double CLocationM::Median(int iN, double *adV, double *adW) { // Local variables int ii, iMedIdx; vector vecW; vector< pair > vecV; double dCumSum, dWSum, dMed; // Check the vector size if (iN == 0) { return 0.0; } else if(iN == 1) { return adV[0]; } // Create vectors containing the values and weights vecV.resize(iN); for (ii = 0; ii < iN; ii++) { vecV[ii] = make_pair(ii, adV[ii]); } // Sort the vector std::stable_sort(vecV.begin(), vecV.end(), comp()); // Sort the weights correspondingly and calculate their sum vecW.resize(iN); dWSum = 0.0; for (ii = 0; ii < iN; ii++) { vecW[ii] = adW[vecV[ii].first]; dWSum += adW[ii]; } // Get the first index where the cumulative weight is >=0.5 iMedIdx = -1; dCumSum = 0.0; while (dCumSum < 0.5 * dWSum) { iMedIdx ++; dCumSum += vecW[iMedIdx]; } // Get the index of the next non-zero weight int iNextNonZero = iN; for (ii = (iN - 1); ii > iMedIdx; ii--) { if (vecW[ii] > 0) { iNextNonZero = ii; } } // Use this index unless the cumulative sum is exactly alpha if (iNextNonZero == iN || dCumSum > 0.5 * dWSum) { dMed = vecV[iMedIdx].second; } else { dMed = 0.5 * (vecV[iMedIdx].second + vecV[iNextNonZero].second); } return dMed; } ///////////////////////////////////////////////// // PsiFun // // Function to calculate the psi of the supplied // value, given the type of function to use and // the supplied parameters // // Parameters: dX - Value // // Returns : Psi(X) ///////////////////////////////////////////////// double CLocationM::PsiFun(double dX) { // Local variables double dPsiVal = 0.0; // Switch on the type of function if(strncmp(msType,"tdist",2) == 0) { dPsiVal = dX / (madParams[0] + (dX * dX)); } else { // TODO: Handle the error Rprintf("Error: Function type %s not found\n", msType); } return dPsiVal; } ///////////////////////////////////////////////// // LocationM // // Function to calculate location M estimate for // the supplied weighted data, with the psi-function // type and parameters specified in this class // // Parameters: iN - Number of data points // adX - Data vector // adW - Weight vector // // Returns : Location M-Estimate of (X, W) ///////////////////////////////////////////////// double CLocationM::LocationM(int iN, double *adX, double *adW) { // Local variables int ii; // Get the initial estimate of location double dBeta0 = Median(iN, adX, adW); // Get the initial estimate of scale double *adDiff = new double[iN]; for (ii = 0; ii < iN; ii++) { adDiff[ii] = fabs(adX[ii] - dBeta0); } double dScale0 = 1.4826 * Median(iN, adDiff, adW); dScale0 = fmax(dScale0, mdEps); // Loop over until the error is low enough double dErr = 1.0; int iCount = 0; while (iCount < 50) { double dSumWX = 0.0; double dSumW = 0.0; for (ii = 0; ii < iN; ii++) { double dT = fabs(adX[ii] - dBeta0) / dScale0; dT = fmax(dT, mdEps); double dWt = adW[ii] * PsiFun(dT) / dT; dSumWX += dWt * adX[ii]; dSumW += dWt; } double dBeta = dBeta0; if (dSumW > 0){ dBeta = dSumWX / dSumW; } dErr = fabs(dBeta - dBeta0); if (dErr > mdEps) { dErr /= fabs(dBeta0); } dBeta0 = dBeta; if (dErr < mdEps) { iCount = 100; } else { iCount++; } } // Cleanup memory delete[] adDiff; return dBeta0; } gbm/src/laplace.h0000644000176200001440000000572212143232747013373 0ustar liggesusers//------------------------------------------------------------------------------ // GBM by Greg Ridgeway Copyright (C) 2003 // File: laplace.h // // License: GNU GPL (version 2 or later) // // Contents: laplace object // // Owner: gregr@rand.org // // History: 3/26/2001 gregr created // 2/14/2003 gregr: adapted for R implementation // //------------------------------------------------------------------------------ #ifndef LAPLACGBM_H #define LAPLACGBM_H #include #include "distribution.h" #include "locationm.h" class CLaplace : public CDistribution { public: CLaplace(); virtual ~CLaplace(); GBMRESULT UpdateParams(double *adF, double *adOffset, double *adWeight, unsigned long cLength) { return GBM_OK; }; GBMRESULT ComputeWorkingResponse(double *adY, double *adMisc, double *adOffset, double *adF, double *adZ, double *adWeight, bool *afInBag, unsigned long nTrain, int cIdxOff); GBMRESULT InitF(double *adY, double *adMisc, double *adOffset, double *adWeight, double &dInitF, unsigned long cLength); GBMRESULT FitBestConstant(double *adY, double *adMisc, double *adOffset, double *adW, double *adF, double *adZ, unsigned long *aiNodeAssign, unsigned long nTrain, VEC_P_NODETERMINAL vecpTermNodes, unsigned long cTermNodes, unsigned long cMinObsInNode, bool *afInBag, double *adFadj, int cIdxOff); double Deviance(double *adY, double *adMisc, double *adOffset, double *adWeight, double *adF, unsigned long cLength, int cIdxOff); double BagImprovement(double *adY, double *adMisc, double *adOffset, double *adWeight, double *adF, double *adFadj, bool *afInBag, double dStepSize, unsigned long nTrain); private: vector vecd; vector::iterator itMedian; CLocationM *mpLocM; }; #endif // LAPLACGBM_H gbm/src/laplace.cpp0000644000176200001440000001027012143232747013720 0ustar liggesusers// GBM by Greg Ridgeway Copyright (C) 2003 #include "laplace.h" CLaplace::CLaplace() { mpLocM = NULL; } CLaplace::~CLaplace() { if(mpLocM != NULL) { delete mpLocM; } } GBMRESULT CLaplace::ComputeWorkingResponse ( double *adY, double *adMisc, double *adOffset, double *adF, double *adZ, double *adWeight, bool *afInBag, unsigned long nTrain, int cIdxOff ) { unsigned long i = 0; if(adOffset == NULL) { for(i=0; i 0.0 ? 1.0 : -1.0; } } else { for(i=0; i 0.0 ? 1.0 : -1.0; } } return GBM_OK; } GBMRESULT CLaplace::InitF ( double *adY, double *adMisc, double *adOffset, double *adWeight, double &dInitF, unsigned long cLength ) { GBMRESULT hr = GBM_OK; double dOffset = 0.0; unsigned long ii = 0; int nLength = int(cLength); double *adArr = NULL; // Create a new LocationM object (for weighted medians) double *pTemp = NULL; mpLocM = new CLocationM("Other", 0, pTemp); if(mpLocM == NULL) { hr = GBM_OUTOFMEMORY; goto Error; } adArr = new double[cLength]; if(adArr == NULL) { hr = GBM_OUTOFMEMORY; goto Error; } for (ii = 0; ii < cLength; ii++) { dOffset = (adOffset==NULL) ? 0.0 : adOffset[ii]; adArr[ii] = adY[ii] - dOffset; } dInitF = mpLocM->Median(nLength, adArr, adWeight); Cleanup: return hr; Error: goto Cleanup; } double CLaplace::Deviance ( double *adY, double *adMisc, double *adOffset, double *adWeight, double *adF, unsigned long cLength, int cIdxOff ) { unsigned long i=0; double dL = 0.0; double dW = 0.0; if(adOffset == NULL) { for(i=cIdxOff; icN >= cMinObsInNode) { iVecd = 0; for(iObs=0; iObsdPrediction = mpLocM->Median(iVecd, adArr, adW2); } } return hr; } double CLaplace::BagImprovement ( double *adY, double *adMisc, double *adOffset, double *adWeight, double *adF, double *adFadj, bool *afInBag, double dStepSize, unsigned long nTrain ) { double dReturnValue = 0.0; double dF = 0.0; double dW = 0.0; unsigned long i = 0; for(i=0; i vecdNum; vector vecdDen; }; #endif // HUBERIZED_H gbm/src/huberized.cpp0000644000176200001440000001324412143232747014304 0ustar liggesusers// GBM by Greg Ridgeway Copyright (C) 2003 // huberized.ccp & huberized.h and associated R code added // by Harry Southworth, April 2009. #include "huberized.h" CHuberized::CHuberized() { } CHuberized::~CHuberized() { } GBMRESULT CHuberized::ComputeWorkingResponse ( double *adY, double *adMisc, double *adOffset, double *adF, double *adZ, double *adWeight, bool *afInBag, unsigned long nTrain, int cIdxOff ) { unsigned long i = 0; double dF = 0.0; for(i=0; idPrediction = 0.0; } else { vecpTermNodes[iNode]->dPrediction = vecdNum[iNode]/vecdDen[iNode]; } } } return hr; } double CHuberized::BagImprovement ( double *adY, double *adMisc, double *adOffset, double *adWeight, double *adF, double *adFadj, bool *afInBag, double dStepSize, unsigned long nTrain ) { double dReturnValue = 0.0; double dF = 0.0; double dW = 0.0; unsigned long i = 0; for(i=0; i #include SEXP gbm ( SEXP radY, // outcome or response SEXP radOffset, // offset for f(x), NA for no offset SEXP radX, SEXP raiXOrder, SEXP radWeight, SEXP radMisc, // other row specific data (eg failure time), NA=no Misc SEXP rcRows, SEXP rcCols, SEXP racVarClasses, SEXP ralMonotoneVar, SEXP rszFamily, SEXP rcTrees, SEXP rcDepth, // interaction depth SEXP rcMinObsInNode, SEXP rcNumClasses, SEXP rdShrinkage, SEXP rdBagFraction, SEXP rcTrain, SEXP radFOld, SEXP rcCatSplitsOld, SEXP rcTreesOld, SEXP rfVerbose ) { unsigned long hr = 0; SEXP rAns = NULL; SEXP rNewTree = NULL; SEXP riSplitVar = NULL; SEXP rdSplitPoint = NULL; SEXP riLeftNode = NULL; SEXP riRightNode = NULL; SEXP riMissingNode = NULL; SEXP rdErrorReduction = NULL; SEXP rdWeight = NULL; SEXP rdPred = NULL; SEXP rdInitF = NULL; SEXP radF = NULL; SEXP radTrainError = NULL; SEXP radValidError = NULL; SEXP radOOBagImprove = NULL; SEXP rSetOfTrees = NULL; SEXP rSetSplitCodes = NULL; SEXP rSplitCode = NULL; VEC_VEC_CATEGORIES vecSplitCodes; int i = 0; int iT = 0; int iK = 0; int cTrees = INTEGER(rcTrees)[0]; const int cResultComponents = 7; // rdInitF, radF, radTrainError, radValidError, radOOBagImprove // rSetOfTrees, rSetSplitCodes const int cTreeComponents = 8; // riSplitVar, rdSplitPoint, riLeftNode, // riRightNode, riMissingNode, rdErrorReduction, rdWeight, rdPred int cNodes = 0; int cTrain = INTEGER(rcTrain)[0]; int cNumClasses = INTEGER(rcNumClasses)[0]; double dTrainError = 0.0; double dValidError = 0.0; double dOOBagImprove = 0.0; CGBM *pGBM = NULL; CDataset *pData = NULL; CDistribution *pDist = NULL; int cGroups = -1; // set up the dataset pData = new CDataset(); if(pData==NULL) { hr = GBM_OUTOFMEMORY; goto Error; } // initialize R's random number generator GetRNGstate(); // initialize some things hr = gbm_setup(REAL(radY), REAL(radOffset), REAL(radX), INTEGER(raiXOrder), REAL(radWeight), REAL(radMisc), INTEGER(rcRows)[0], INTEGER(rcCols)[0], INTEGER(racVarClasses), INTEGER(ralMonotoneVar), CHAR(STRING_ELT(rszFamily,0)), INTEGER(rcTrees)[0], INTEGER(rcDepth)[0], INTEGER(rcMinObsInNode)[0], INTEGER(rcNumClasses)[0], REAL(rdShrinkage)[0], REAL(rdBagFraction)[0], INTEGER(rcTrain)[0], pData, pDist, cGroups); if(GBM_FAILED(hr)) { goto Error; } // allocate the GBM pGBM = new CGBM(); if(pGBM==NULL) { hr = GBM_OUTOFMEMORY; goto Error; } // initialize the GBM hr = pGBM->Initialize(pData, pDist, REAL(rdShrinkage)[0], cTrain, REAL(rdBagFraction)[0], INTEGER(rcDepth)[0], INTEGER(rcMinObsInNode)[0], INTEGER(rcNumClasses)[0], cGroups); if(GBM_FAILED(hr)) { goto Error; } // allocate the main return object PROTECT(rAns = allocVector(VECSXP, cResultComponents)); // allocate the initial value PROTECT(rdInitF = allocVector(REALSXP, 1)); SET_VECTOR_ELT(rAns,0,rdInitF); UNPROTECT(1); // rdInitF // allocate the predictions PROTECT(radF = allocVector(REALSXP, (pData->cRows) * cNumClasses)); SET_VECTOR_ELT(rAns,1,radF); UNPROTECT(1); // radF hr = pDist->Initialize(pData->adY, pData->adMisc, pData->adOffset, pData->adWeight, pData->cRows); if(ISNA(REAL(radFOld)[0])) // check for old predictions { // set the initial value of F as a constant hr = pDist->InitF(pData->adY, pData->adMisc, pData->adOffset, pData->adWeight, REAL(rdInitF)[0], cTrain); for(i=0; i < (pData->cRows) * cNumClasses; i++) { REAL(radF)[i] = REAL(rdInitF)[0]; } } else { for(i=0; i < (pData->cRows) * cNumClasses; i++) { REAL(radF)[i] = REAL(radFOld)[i]; } } // allocate space for the performance measures PROTECT(radTrainError = allocVector(REALSXP, cTrees)); PROTECT(radValidError = allocVector(REALSXP, cTrees)); PROTECT(radOOBagImprove = allocVector(REALSXP, cTrees)); SET_VECTOR_ELT(rAns,2,radTrainError); SET_VECTOR_ELT(rAns,3,radValidError); SET_VECTOR_ELT(rAns,4,radOOBagImprove); UNPROTECT(3); // radTrainError , radValidError, radOOBagImprove // allocate the component for the tree structures PROTECT(rSetOfTrees = allocVector(VECSXP, cTrees * cNumClasses)); SET_VECTOR_ELT(rAns,5,rSetOfTrees); UNPROTECT(1); // rSetOfTrees if(INTEGER(rfVerbose)[0]) { Rprintf("Iter TrainDeviance ValidDeviance StepSize Improve\n"); } for(iT=0; iTUpdateParams(REAL(radF), pData->adOffset, pData->adWeight, cTrain); if(GBM_FAILED(hr)) { goto Error; } REAL(radTrainError)[iT] = 0.0; REAL(radValidError)[iT] = 0.0; REAL(radOOBagImprove)[iT] = 0.0; for (iK = 0; iK < cNumClasses; iK++) { hr = pGBM->iterate(REAL(radF), dTrainError,dValidError,dOOBagImprove, cNodes, cNumClasses, iK); if(GBM_FAILED(hr)) { goto Error; } // store the performance measures REAL(radTrainError)[iT] += dTrainError; REAL(radValidError)[iT] += dValidError; REAL(radOOBagImprove)[iT] += dOOBagImprove; // allocate the new tree component for the R list structure PROTECT(rNewTree = allocVector(VECSXP, cTreeComponents)); // riNodeID,riSplitVar,rdSplitPoint,riLeftNode, // riRightNode,riMissingNode,rdErrorReduction,rdWeight PROTECT(riSplitVar = allocVector(INTSXP, cNodes)); PROTECT(rdSplitPoint = allocVector(REALSXP, cNodes)); PROTECT(riLeftNode = allocVector(INTSXP, cNodes)); PROTECT(riRightNode = allocVector(INTSXP, cNodes)); PROTECT(riMissingNode = allocVector(INTSXP, cNodes)); PROTECT(rdErrorReduction = allocVector(REALSXP, cNodes)); PROTECT(rdWeight = allocVector(REALSXP, cNodes)); PROTECT(rdPred = allocVector(REALSXP, cNodes)); SET_VECTOR_ELT(rNewTree,0,riSplitVar); SET_VECTOR_ELT(rNewTree,1,rdSplitPoint); SET_VECTOR_ELT(rNewTree,2,riLeftNode); SET_VECTOR_ELT(rNewTree,3,riRightNode); SET_VECTOR_ELT(rNewTree,4,riMissingNode); SET_VECTOR_ELT(rNewTree,5,rdErrorReduction); SET_VECTOR_ELT(rNewTree,6,rdWeight); SET_VECTOR_ELT(rNewTree,7,rdPred); UNPROTECT(cTreeComponents); SET_VECTOR_ELT(rSetOfTrees,(iK + iT * cNumClasses),rNewTree); UNPROTECT(1); // rNewTree hr = gbm_transfer_to_R(pGBM, vecSplitCodes, INTEGER(riSplitVar), REAL(rdSplitPoint), INTEGER(riLeftNode), INTEGER(riRightNode), INTEGER(riMissingNode), REAL(rdErrorReduction), REAL(rdWeight), REAL(rdPred), INTEGER(rcCatSplitsOld)[0]); } // Close for iK // print the information if((iT <= 9) || ((iT+1+INTEGER(rcTreesOld)[0])/20 == (iT+1+INTEGER(rcTreesOld)[0])/20.0) || (iT==cTrees-1)) { R_CheckUserInterrupt(); if(INTEGER(rfVerbose)[0]) { Rprintf("%6d %13.4f %15.4f %10.4f %9.4f\n", iT+1+INTEGER(rcTreesOld)[0], REAL(radTrainError)[iT], REAL(radValidError)[iT], REAL(rdShrinkage)[0], REAL(radOOBagImprove)[iT]); } } } if(INTEGER(rfVerbose)[0]) Rprintf("\n"); // transfer categorical splits to R PROTECT(rSetSplitCodes = allocVector(VECSXP, vecSplitCodes.size())); SET_VECTOR_ELT(rAns,6,rSetSplitCodes); UNPROTECT(1); // rSetSplitCodes for(i=0; i<(int)vecSplitCodes.size(); i++) { PROTECT(rSplitCode = allocVector(INTSXP, size_of_vector(vecSplitCodes,i))); SET_VECTOR_ELT(rSetSplitCodes,i,rSplitCode); UNPROTECT(1); // rSplitCode hr = gbm_transfer_catsplits_to_R(i, vecSplitCodes, INTEGER(rSplitCode)); } // dump random number generator seed #ifdef NOISY_DEBUG Rprintf("PutRNGstate\n"); #endif PutRNGstate(); Cleanup: UNPROTECT(1); // rAns #ifdef NOISY_DEBUG Rprintf("destructing\n"); #endif if(pGBM != NULL) { delete pGBM; pGBM = NULL; } if(pDist != NULL) { delete pDist; pDist = NULL; } if(pData != NULL) { delete pData; pData = NULL; } return rAns; Error: goto Cleanup; } SEXP gbm_pred ( SEXP radX, // the data matrix SEXP rcRows, // number of rows SEXP rcCols, // number of columns SEXP rcNumClasses, // number of classes SEXP rcTrees, // number of trees, may be a vector SEXP rdInitF, // the initial value SEXP rTrees, // the list of trees SEXP rCSplits, // the list of categorical splits SEXP raiVarType, // indicator of continuous/nominal SEXP riSingleTree // boolean whether to return only results for one tree ) { unsigned long hr = 0; int iTree = 0; int iObs = 0; int cRows = INTEGER(rcRows)[0]; int cPredIterations = LENGTH(rcTrees); int iPredIteration = 0; int cTrees = 0; int iClass = 0; int cNumClasses = INTEGER(rcNumClasses)[0]; SEXP rThisTree = NULL; int *aiSplitVar = NULL; double *adSplitCode = NULL; int *aiLeftNode = NULL; int *aiRightNode = NULL; int *aiMissingNode = NULL; int iCurrentNode = 0; double dX = 0.0; int iCatSplitIndicator = 0; bool fSingleTree = (INTEGER(riSingleTree)[0]==1); SEXP radPredF = NULL; // allocate the predictions to return PROTECT(radPredF = allocVector(REALSXP, cRows*cNumClasses*cPredIterations)); if(radPredF == NULL) { hr = GBM_OUTOFMEMORY; goto Error; } // initialize the predicted values if(!fSingleTree) { // initialize with the intercept for only the smallest rcTrees for(iObs=0; iObs0)) { // copy over from the last rcTrees for(iObs=0; iObs 0) { cStackNodes--; iCurrentNode = aiNodeStack[cStackNodes]; if(aiSplitVar[iCurrentNode] == -1) // terminal node { REAL(radPredF)[iClass*cRows + iObs] += adWeightStack[cStackNodes]*adSplitCode[iCurrentNode]; } else // non-terminal node { // is this a split variable that interests me? iPredVar = -1; for(i=0; (iPredVar == -1) && (i < cCols); i++) { if(INTEGER(raiWhichVar)[i] == aiSplitVar[iCurrentNode]) { iPredVar = i; // split is on one that interests me } } if(iPredVar != -1) // this split is among raiWhichVar { dX = REAL(radX)[iPredVar*cRows + iObs]; // missing? if(ISNA(dX)) { aiNodeStack[cStackNodes] = aiMissingNode[iCurrentNode]; cStackNodes++; } // continuous? else if(INTEGER(raiVarType)[aiSplitVar[iCurrentNode]] == 0) { if(dX < adSplitCode[iCurrentNode]) { aiNodeStack[cStackNodes] = aiLeftNode[iCurrentNode]; cStackNodes++; } else { aiNodeStack[cStackNodes] = aiRightNode[iCurrentNode]; cStackNodes++; } } else // categorical { iCatSplitIndicator = INTEGER( VECTOR_ELT(rCSplits, (int)adSplitCode[iCurrentNode]))[(int)dX]; if(iCatSplitIndicator==-1) { aiNodeStack[cStackNodes] = aiLeftNode[iCurrentNode]; cStackNodes++; } else if(iCatSplitIndicator==1) { aiNodeStack[cStackNodes] = aiRightNode[iCurrentNode]; cStackNodes++; } else // handle unused level { iCurrentNode = aiMissingNode[iCurrentNode]; } } } // iPredVar != -1 else // not interested in this split, average left and right { aiNodeStack[cStackNodes] = aiRightNode[iCurrentNode]; dCurrentW = adWeightStack[cStackNodes]; adWeightStack[cStackNodes] = dCurrentW * adW[aiRightNode[iCurrentNode]]/ (adW[aiLeftNode[iCurrentNode]]+ adW[aiRightNode[iCurrentNode]]); cStackNodes++; aiNodeStack[cStackNodes] = aiLeftNode[iCurrentNode]; adWeightStack[cStackNodes] = dCurrentW-adWeightStack[cStackNodes-1]; cStackNodes++; } } // non-terminal node } // while(cStackNodes > 0) } // iObs } // iClass } // iTree Cleanup: UNPROTECT(1); // radPredF return radPredF; Error: goto Cleanup; } // gbm_plot SEXP gbm_shrink_pred ( SEXP radX, SEXP rcRows, SEXP rcCols, SEXP rcNumClasses, SEXP racTrees, SEXP rdInitF, SEXP rTrees, SEXP rCSplits, SEXP raiVarType, SEXP rcInteractionDepth, SEXP radLambda ) { unsigned long hr = 0; int iTree = 0; int iPredictionIter = 0; int iObs = 0; int iClass = 0; int i = 0; int cRows = INTEGER(rcRows)[0]; int cNumClasses = INTEGER(rcNumClasses)[0]; double *adLambda = REAL(radLambda); double dLambda = 0.0; double dPred = 0.0; SEXP rThisTree = NULL; int *aiSplitVar = NULL; double *adSplitCode = NULL; int *aiLeftNode = NULL; int *aiRightNode = NULL; int *aiMissingNode = NULL; double *adNodeW = NULL; int iCurrentNode = 0; double dX = 0.0; int iCatSplitIndicator = 0; SEXP rResult = NULL; SEXP radPredF = NULL; // The predictions double *adPredF = NULL; // The shrunken predictions double *adNodePred = NULL; int *aiNodeStack = NULL; unsigned long cNodeStack = 0; int cMaxNodes = 1+3*(INTEGER(rcInteractionDepth)[0]); adPredF = new double[cRows * cNumClasses]; if(adPredF == NULL) { hr = GBM_OUTOFMEMORY; goto Error; } for(iObs=0; iObs0) { i = aiNodeStack[cNodeStack-1]; if(aiSplitVar[i]==-1) { adNodePred[i] = adSplitCode[i]; cNodeStack--; } else if(ISNA(adNodePred[aiLeftNode[i]])) { aiNodeStack[cNodeStack] = aiLeftNode[i]; cNodeStack++; aiNodeStack[cNodeStack] = aiRightNode[i]; cNodeStack++; // check whether missing node is the same as parent node // occurs when X_i has no missing values if(adNodeW[i] != adNodeW[aiMissingNode[i]]) { aiNodeStack[cNodeStack] = aiMissingNode[i]; cNodeStack++; } else { adNodePred[aiMissingNode[i]] = 0.0; } } else { // compute the parent node's prediction adNodePred[i] = (adNodeW[aiLeftNode[i]]*adNodePred[aiLeftNode[i]] + adNodeW[aiRightNode[i]]*adNodePred[aiRightNode[i]]+ adNodeW[aiMissingNode[i]]*adNodePred[aiMissingNode[i]])/ adNodeW[i]; cNodeStack--; } } // predict for the observations for(iObs=0; iObs 1) { adProb = new double[cNumClasses]; } // initialize the predicted values for(iObs=0; iObs 1) then calculate the probabilities if (cNumClasses > 1) { dDenom = 0.0; for (iClass = 0; iClass < cNumClasses; iClass++) { adProb[iClass] = exp(REAL(radPredF)[iObs + iClass * cRows]); dDenom += adProb[iClass]; } dDJDf = 0.0; for (iClass = 0; iClass < cNumClasses; iClass++) { adProb[iClass] /= dDenom; REAL(rdObjective)[0] += (adY[iObs + iClass * cRows] - adProb[iClass]) * (adY[iObs + iClass * cRows] - adProb[iClass]); dDJDf += -2*(adY[iObs + iClass * cRows] - adProb[iClass]); } REAL(rdObjective)[0] /= double(cNumClasses); dDJDf /= double(cNumClasses); } else { // DEBUG: need to make more general for other loss functions! REAL(rdObjective)[0] += (adY[iObs]-REAL(radPredF)[iObs])* (adY[iObs]-REAL(radPredF)[iObs]); dDJDf = -2*(adY[iObs]-REAL(radPredF)[iObs]); } for(iLambda=0; iLambda #include "buildinfo.h" #include "distribution.h" #include "tree.h" #include "dataset.h" #include "node_factory.h" using namespace std; class CGBM { public: CGBM(); ~CGBM(); GBMRESULT Initialize(CDataset *pData, CDistribution *pDist, double dLambda, unsigned long nTrain, double dBagFraction, unsigned long cLeaves, unsigned long cMinObsInNode, unsigned long cNumClasses, int cGroups); GBMRESULT iterate(double *adF, double &dTrainError, double &dValidError, double &dOOBagImprove, int &cNodes, int cNumClasses, int cClassIdx); GBMRESULT TransferTreeToRList(int *aiSplitVar, double *adSplitPoint, int *aiLeftNode, int *aiRightNode, int *aiMissingNode, double *adErrorReduction, double *adWeight, double *adPred, VEC_VEC_CATEGORIES &vecSplitCodes, int cCatSplitsOld); GBMRESULT Predict(unsigned long iVar, unsigned long cTrees, double *adF, double *adX, unsigned long cLength); GBMRESULT Predict(double *adX, unsigned long cRow, unsigned long cCol, unsigned long cTrees, double *adF); GBMRESULT GetVarRelativeInfluence(double *adRelInf, unsigned long cTrees); GBMRESULT PrintTree(); bool IsPairwise() const { return (cGroups >= 0); } CDataset *pData; // the data CDistribution *pDist; // the distribution bool fInitialized; // indicates whether the GBM has been initialized CNodeFactory *pNodeFactory; // these objects are for the tree growing // allocate them once here for all trees to use bool *afInBag; unsigned long *aiNodeAssign; CNodeSearch *aNodeSearch; PCCARTTree ptreeTemp; VEC_P_NODETERMINAL vecpTermNodes; double *adZ; double *adFadj; private: double dLambda; unsigned long cTrain; unsigned long cValid; unsigned long cTotalInBag; double dBagFraction; unsigned long cDepth; unsigned long cMinObsInNode; int cGroups; }; #endif // GBM_ENGINGBM_H gbm/src/gbm_engine.cpp0000644000176200001440000003063212143232747014415 0ustar liggesusers// GBM by Greg Ridgeway Copyright (C) 2003 //#define NOISY_DEBUG #include "gbm_engine.h" CGBM::CGBM() { adFadj = NULL; adZ = NULL; afInBag = NULL; aiNodeAssign = NULL; aNodeSearch = NULL; cDepth = 0; cMinObsInNode = 0; dBagFraction = 0.0; dLambda = 0.0; fInitialized = false; cTotalInBag = 0; cTrain = 0; cValid = 0; pData = NULL; pDist = NULL; pNodeFactory = NULL; ptreeTemp = NULL; } CGBM::~CGBM() { if(adFadj != NULL) { delete [] adFadj; adFadj = NULL; } if(adZ != NULL) { delete [] adZ; adZ = NULL; } if(afInBag != NULL) { delete [] afInBag; afInBag = NULL; } if(aiNodeAssign != NULL) { delete [] aiNodeAssign; aiNodeAssign = NULL; } if(aNodeSearch != NULL) { delete [] aNodeSearch; aNodeSearch = NULL; } if(ptreeTemp != NULL) { delete ptreeTemp; ptreeTemp = NULL; } // must delete the node factory last!!! at least after deleting trees if(pNodeFactory != NULL) { delete pNodeFactory; pNodeFactory = NULL; } } GBMRESULT CGBM::Initialize ( CDataset *pData, CDistribution *pDist, double dLambda, unsigned long cTrain, double dBagFraction, unsigned long cDepth, unsigned long cMinObsInNode, unsigned long cNumClasses, int cGroups ) { GBMRESULT hr = GBM_OK; unsigned long i=0; if(pData == NULL) { hr = GBM_INVALIDARG; goto Error; } if(pDist == NULL) { hr = GBM_INVALIDARG; goto Error; } this->pData = pData; this->pDist = pDist; this->dLambda = dLambda; this->cTrain = cTrain; this->dBagFraction = dBagFraction; this->cDepth = cDepth; this->cMinObsInNode = cMinObsInNode; this->cGroups = cGroups; // allocate the tree structure ptreeTemp = new CCARTTree; if(ptreeTemp == NULL) { hr = GBM_OUTOFMEMORY; goto Error; } cValid = pData->cRows - cTrain; cTotalInBag = (unsigned long)(dBagFraction*cTrain); adZ = new double[(pData->cRows) * cNumClasses]; if(adZ == NULL) { hr = GBM_OUTOFMEMORY; goto Error; } adFadj = new double[(pData->cRows) * cNumClasses]; if(adFadj == NULL) { hr = GBM_OUTOFMEMORY; goto Error; } for (i=0; i<(pData->cRows)*cNumClasses; i++) { adFadj[i] = 0.0; } pNodeFactory = new CNodeFactory(); if(pNodeFactory == NULL) { hr = GBM_OUTOFMEMORY; goto Error; } hr = pNodeFactory->Initialize(cDepth); if(GBM_FAILED(hr)) { goto Error; } ptreeTemp->Initialize(pNodeFactory); // array for flagging those observations in the bag afInBag = new bool[cTrain]; if(afInBag==NULL) { hr = GBM_OUTOFMEMORY; goto Error; } // aiNodeAssign tracks to which node each training obs belongs aiNodeAssign = new ULONG[cTrain]; if(aiNodeAssign==NULL) { hr = GBM_OUTOFMEMORY; goto Error; } // NodeSearch objects help decide which nodes to split aNodeSearch = new CNodeSearch[2*cDepth+1]; if(aNodeSearch==NULL) { hr = GBM_OUTOFMEMORY; goto Error; } for(i=0; i<2*cDepth+1; i++) { aNodeSearch[i].Initialize(cMinObsInNode); } vecpTermNodes.resize(2*cDepth+1,NULL); fInitialized = true; Cleanup: return hr; Error: goto Cleanup; } GBMRESULT CGBM::Predict ( unsigned long iVar, unsigned long cTrees, double *adF, double *adX, unsigned long cLength ) { GBMRESULT hr = GBM_OK; return hr; } GBMRESULT CGBM::Predict ( double *adX, unsigned long cRow, unsigned long cCol, unsigned long cTrees, double *adF ) { GBMRESULT hr = GBM_OK; return hr; } GBMRESULT CGBM::GetVarRelativeInfluence ( double *adRelInf, unsigned long cTrees ) { GBMRESULT hr = GBM_OK; int iVar=0; for(iVar=0; iVarcCols; iVar++) { adRelInf[iVar] = 0.0; } return hr; } GBMRESULT CGBM::PrintTree() { GBMRESULT hr = GBM_OK; hr = ptreeTemp->Print(); if(GBM_FAILED(hr)) goto Error; Cleanup: return hr; Error: goto Cleanup; } GBMRESULT CGBM::iterate ( double *adF, double &dTrainError, double &dValidError, double &dOOBagImprove, int &cNodes, int cNumClasses, int cClassIdx ) { GBMRESULT hr = GBM_OK; unsigned long i = 0; unsigned long cBagged = 0; int cIdxOff = cClassIdx * (cTrain + cValid); // for(i=0; i < cTrain + cIdxOff; i++){ adF[i] = 0;} if(!fInitialized) { hr = GBM_FAIL; goto Error; } dTrainError = 0.0; dValidError = 0.0; dOOBagImprove = 0.0; vecpTermNodes.assign(2*cDepth+1,NULL); // randomly assign observations to the Bag if (cClassIdx == 0) { if (!IsPairwise()) { // regular instance based training for(i=0; i= cTotalInBag){ break; } */ } // the remainder is not in the bag for( ; iadMisc[i]; if (dGroup != dLastGroup) { if (cBaggedGroups >= cTotalGroupsInBag) { break; } // Group changed, make a new decision chosen = (unif_rand()*(cGroups - cSeenGroups) < cTotalGroupsInBag - cBaggedGroups); if (chosen) { cBaggedGroups++; } dLastGroup = dGroup; cSeenGroups++; } if (chosen) { afInBag[i] = true; cBagged++; } else { afInBag[i] = false; } } // the remainder is not in the bag for( ; iComputeWorkingResponse(pData->adY, pData->adMisc, pData->adOffset, adF, adZ, pData->adWeight, afInBag, cTrain, cIdxOff); if(GBM_FAILED(hr)) { goto Error; } #ifdef NOISY_DEBUG Rprintf("Reset tree\n"); #endif hr = ptreeTemp->Reset(); #ifdef NOISY_DEBUG Rprintf("grow tree\n"); #endif hr = ptreeTemp->grow(&(adZ[cIdxOff]), pData, &(pData->adWeight[cIdxOff]), &(adFadj[cIdxOff]), cTrain, cTotalInBag, dLambda, cDepth, cMinObsInNode, afInBag, aiNodeAssign, aNodeSearch, vecpTermNodes); if(GBM_FAILED(hr)) { goto Error; } #ifdef NOISY_DEBUG Rprintf("get node count\n"); #endif hr = ptreeTemp->GetNodeCount(cNodes); if(GBM_FAILED(hr)) { goto Error; } // Now I have adF, adZ, and vecpTermNodes (new node assignments) // Fit the best constant within each terminal node #ifdef NOISY_DEBUG Rprintf("fit best constant\n"); #endif hr = pDist->FitBestConstant(pData->adY, pData->adMisc, pData->adOffset, pData->adWeight, adF, adZ, aiNodeAssign, cTrain, vecpTermNodes, (2*cNodes+1)/3, // number of terminal nodes cMinObsInNode, afInBag, adFadj, cIdxOff); if(GBM_FAILED(hr)) { goto Error; } // update training predictions // fill in missing nodes where N < cMinObsInNode hr = ptreeTemp->Adjust(aiNodeAssign,&(adFadj[cIdxOff]),cTrain, vecpTermNodes,cMinObsInNode); if(GBM_FAILED(hr)) { goto Error; } ptreeTemp->SetShrinkage(dLambda); if (cClassIdx == (cNumClasses - 1)) { dOOBagImprove = pDist->BagImprovement(pData->adY, pData->adMisc, pData->adOffset, pData->adWeight, adF, adFadj, afInBag, dLambda, cTrain); } // update the training predictions for(i=0; i < cTrain; i++) { int iIdx = i + cIdxOff; adF[iIdx] += dLambda * adFadj[iIdx]; } dTrainError = pDist->Deviance(pData->adY, pData->adMisc, pData->adOffset, pData->adWeight, adF, cTrain, cIdxOff); // update the validation predictions hr = ptreeTemp->PredictValid(pData,cValid,&(adFadj[cIdxOff])); for(i=cTrain; i < cTrain+cValid; i++) { adF[i + cIdxOff] += adFadj[i + cIdxOff]; } if(pData->fHasOffset) { dValidError = pDist->Deviance(pData->adY, pData->adMisc, pData->adOffset, pData->adWeight, adF, cValid, cIdxOff + cTrain); } else { dValidError = pDist->Deviance(pData->adY, pData->adMisc, NULL, pData->adWeight, adF, cValid, cIdxOff + cTrain); } Cleanup: return hr; Error: goto Cleanup; } GBMRESULT CGBM::TransferTreeToRList ( int *aiSplitVar, double *adSplitPoint, int *aiLeftNode, int *aiRightNode, int *aiMissingNode, double *adErrorReduction, double *adWeight, double *adPred, VEC_VEC_CATEGORIES &vecSplitCodes, int cCatSplitsOld ) { GBMRESULT hr = GBM_OK; hr = ptreeTemp->TransferTreeToRList(pData, aiSplitVar, adSplitPoint, aiLeftNode, aiRightNode, aiMissingNode, adErrorReduction, adWeight, adPred, vecSplitCodes, cCatSplitsOld, dLambda); return hr; } gbm/src/gbm.h0000644000176200001440000000355612143232747012542 0ustar liggesusers//------------------------------------------------------------------------------ // GBM by Greg Ridgeway Copyright (C) 2003 // // File: gbm.h // // License: GNU GPL (version 2 or later) // // Contents: Entry point for gbm.dll // // Owner: gregr@rand.org // // History: 2/14/2003 gregr created // 6/11/2007 gregr added quantile regression // written by Brian Kriegler // //------------------------------------------------------------------------------ #include #include "dataset.h" #include "distribution.h" #include "bernoulli.h" #include "adaboost.h" #include "poisson.h" #include "gaussian.h" #include "coxph.h" #include "laplace.h" #include "quantile.h" #include "tdist.h" #include "multinomial.h" #include "pairwise.h" #include "gbm_engine.h" #include "locationm.h" #include "huberized.h" typedef vector VEC_CATEGORIES; typedef vector VEC_VEC_CATEGORIES; GBMRESULT gbm_setup ( double *adY, double *adOffset, double *adX, int *aiXOrder, double *adWeight, double *adMisc, int cRows, int cCols, int *acVarClasses, int *alMonotoneVar, const char *pszFamily, int cTrees, int cLeaves, int cMinObsInNode, int cNumClasses, double dShrinkage, double dBagFraction, int cTrain, CDataset *pData, PCDistribution &pDist, int& cGroups ); GBMRESULT gbm_transfer_to_R ( CGBM *pGBM, VEC_VEC_CATEGORIES &vecSplitCodes, int *aiSplitVar, double *adSplitPoint, int *aiLeftNode, int *aiRightNode, int *aiMissingNode, double *adErrorReduction, double *adWeight, double *adPred, int cCatSplitsOld ); GBMRESULT gbm_transfer_catsplits_to_R ( int iCatSplit, VEC_VEC_CATEGORIES &vecSplitCodes, int *aiSplitCodes ); int size_of_vector ( VEC_VEC_CATEGORIES &vec, int i ); gbm/src/gbm.cpp0000644000176200001440000001332712143232747013072 0ustar liggesusers//------------------------------------------------------------------------------ // // GBM by Greg Ridgeway Copyright (C) 2003 // File: gbm.cpp // //------------------------------------------------------------------------------ #include #include "gbm.h" // Count the number of distinct groups in the input data int num_groups(const double* adMisc, int cTrain) { if (cTrain <= 0) { return 0; } double dLastGroup = adMisc[0]; int cGroups = 1; for(int i=1; iSetData(adX,aiXOrder,adY,adOffset,adWeight,adMisc, cRows,cCols,acVarClasses,alMonotoneVar); if(GBM_FAILED(hr)) { goto Error; } // set the distribution if(strncmp(pszFamily,"bernoulli",2) == 0) { pDist = new CBernoulli(); if(pDist==NULL) { hr = GBM_OUTOFMEMORY; goto Error; } } else if(strncmp(pszFamily,"gaussian",2) == 0) { pDist = new CGaussian(); if(pDist==NULL) { hr = GBM_OUTOFMEMORY; goto Error; } } else if(strncmp(pszFamily,"poisson",2) == 0) { pDist = new CPoisson(); if(pDist==NULL) { hr = GBM_OUTOFMEMORY; goto Error; } } else if(strncmp(pszFamily,"adaboost",2) == 0) { pDist = new CAdaBoost(); if(pDist==NULL) { hr = GBM_OUTOFMEMORY; goto Error; } } else if(strncmp(pszFamily,"coxph",2) == 0) { pDist = new CCoxPH(); if(pDist==NULL) { hr = GBM_OUTOFMEMORY; goto Error; } } else if(strncmp(pszFamily,"laplace",2) == 0) { pDist = new CLaplace(); if(pDist==NULL) { hr = GBM_OUTOFMEMORY; goto Error; } } else if(strncmp(pszFamily,"quantile",2) == 0) { pDist = new CQuantile(adMisc[0]); if(pDist==NULL) { hr = GBM_OUTOFMEMORY; goto Error; } } else if(strncmp(pszFamily,"tdist",2) == 0) { pDist = new CTDist(adMisc[0]); if(pDist==NULL) { hr = GBM_OUTOFMEMORY; goto Error; } } else if(strncmp(pszFamily,"multinomial",2) == 0) { pDist = new CMultinomial(cNumClasses, cRows); if(pDist==NULL) { hr = GBM_OUTOFMEMORY; goto Error; } } else if(strncmp(pszFamily,"huberized",2) == 0) { pDist = new CHuberized(); if(pDist==NULL) { hr = GBM_OUTOFMEMORY; goto Error; } } else if(strcmp(pszFamily,"pairwise_conc") == 0) { pDist = new CPairwise("conc"); if(pDist==NULL) { hr = GBM_OUTOFMEMORY; goto Error; } } else if(strcmp(pszFamily,"pairwise_ndcg") == 0) { pDist = new CPairwise("ndcg"); if(pDist==NULL) { hr = GBM_OUTOFMEMORY; goto Error; } } else if(strcmp(pszFamily,"pairwise_map") == 0) { pDist = new CPairwise("map"); if(pDist==NULL) { hr = GBM_OUTOFMEMORY; goto Error; } } else if(strcmp(pszFamily,"pairwise_mrr") == 0) { pDist = new CPairwise("mrr"); if(pDist==NULL) { hr = GBM_OUTOFMEMORY; goto Error; } } else { hr = GBM_INVALIDARG; goto Error; } if(pDist==NULL) { hr = GBM_INVALIDARG; goto Error; } if (!strncmp(pszFamily, "pairwise", strlen("pairwise"))) { cGroups = num_groups(adMisc, cTrain); } Cleanup: return hr; Error: goto Cleanup; } GBMRESULT gbm_transfer_to_R ( CGBM *pGBM, VEC_VEC_CATEGORIES &vecSplitCodes, int *aiSplitVar, double *adSplitPoint, int *aiLeftNode, int *aiRightNode, int *aiMissingNode, double *adErrorReduction, double *adWeight, double *adPred, int cCatSplitsOld ) { GBMRESULT hr = GBM_OK; hr = pGBM->TransferTreeToRList(aiSplitVar, adSplitPoint, aiLeftNode, aiRightNode, aiMissingNode, adErrorReduction, adWeight, adPred, vecSplitCodes, cCatSplitsOld); if(GBM_FAILED(hr)) goto Error; Cleanup: return hr; Error: goto Cleanup; } GBMRESULT gbm_transfer_catsplits_to_R ( int iCatSplit, VEC_VEC_CATEGORIES &vecSplitCodes, int *aiSplitCodes ) { unsigned long i=0; for(i=0; i= cRows) || (iCol >= cCols)) { hr = GBM_INVALIDARG; goto Error; } dValue = adX[iCol*cRows + iRow]; Cleanup: return hr; Error: goto Cleanup; } bool fHasOffset; double *adX; int *aiXOrder; double *adXTemp4Order; double *adY; double *adOffset; double *adWeight; double *adMisc; char **apszVarNames; int *acVarClasses; int *alMonotoneVar; int cRows; int cCols; private: }; #endif // DATASET_H gbm/src/dataset.cpp0000644000176200001440000000314112143232747013743 0ustar liggesusers// GBM by Greg Ridgeway Copyright (C) 2003 #include "dataset.h" CDataset::CDataset() { fHasOffset = false; adX = NULL; aiXOrder = NULL; adXTemp4Order = NULL; adY = NULL; adOffset = NULL; adWeight = NULL; apszVarNames = NULL; cRows = 0; cCols = 0; } CDataset::~CDataset() { } GBMRESULT CDataset::ResetWeights() { GBMRESULT hr = GBM_OK; int i = 0; if(adWeight == NULL) { hr = GBM_INVALIDARG; goto Error; } for(i=0; icRows = cRows; this->cCols = cCols; this->adX = adX; this->aiXOrder = aiXOrder; this->adY = adY; this->adOffset = adOffset; this->adWeight = adWeight; this->acVarClasses = acVarClasses; this->alMonotoneVar = alMonotoneVar; if((adOffset != NULL) && !ISNA(*adOffset)) { this->adOffset = adOffset; fHasOffset = true; } else { this->adOffset = NULL; fHasOffset = false; } if((adMisc != NULL) && !ISNA(*adMisc)) { this->adMisc = adMisc; } else { this->adMisc = NULL; } Cleanup: return hr; Error: goto Cleanup; } gbm/src/coxph.h0000644000176200001440000000577712143232747013125 0ustar liggesusers//------------------------------------------------------------------------------ // GBM by Greg Ridgeway Copyright (C) 2003 // // File: coxph.h // // License: GNU GPL (version 2 or later) // // Contents: Cox proportional hazard object // // Owner: gregr@rand.org // // History: 3/26/2001 gregr created // 2/14/2003 gregr: adapted for R implementation // //------------------------------------------------------------------------------ #ifndef COXPH_H #define COXPH_H #include "distribution.h" #include "matrix.h" class CCoxPH : public CDistribution { public: CCoxPH(); virtual ~CCoxPH(); GBMRESULT UpdateParams(double *adF, double *adOffset, double *adWeight, unsigned long cLength) { return GBM_OK; }; GBMRESULT ComputeWorkingResponse(double *adT, double *adDelta, double *adOffset, double *adF, double *adZ, double *adWeight, bool *afInBag, unsigned long nTrain, int cIdxOff); GBMRESULT InitF(double *adT, double *adDelta, double *adOffset, double *adWeight, double &dInitF, unsigned long cLength); GBMRESULT FitBestConstant(double *adT, double *adDelta, double *adOffset, double *adW, double *adF, double *adZ, unsigned long *aiNodeAssign, unsigned long nTrain, VEC_P_NODETERMINAL vecpTermNodes, unsigned long cTermNodes, unsigned long cMinObsInNode, bool *afInBag, double *adFadj, int cIdxOff); double Deviance(double *adT, double *adDelta, double *adOffset, double *adWeight, double *adF, unsigned long cLength, int cIdxOff); double BagImprovement(double *adT, double *adDelta, double *adOffset, double *adWeight, double *adF, double *adFadj, bool *afInBag, double dStepSize, unsigned long nTrain); private: vector vecdP; vector vecdRiskTot; vector vecdG; vector veciK2Node; vector veciNode2K; matrix matH; matrix matHinv; }; #endif // COXPH_H gbm/src/coxph.cpp0000644000176200001440000001375512143232747013453 0ustar liggesusers// GBM by Greg Ridgeway Copyright (C) 2003 #include "coxph.h" CCoxPH::CCoxPH() { } CCoxPH::~CCoxPH() { } GBMRESULT CCoxPH::ComputeWorkingResponse ( double *adT, double *adDelta, double *adOffset, double *adF, double *adZ, double *adWeight, bool *afInBag, unsigned long nTrain, int cIdxOff ) { unsigned long i = 0; double dF = 0.0; double dTot = 0.0; double dRiskTot = 0.0; vecdRiskTot.resize(nTrain); dRiskTot = 0.0; for(i=0; icN >= cMinObsInNode) { veciK2Node[K] = i; veciNode2K[i] = K; K++; } } vecdP.resize(K); matH.setactualsize(K-1); vecdG.resize(K-1); vecdG.assign(K-1,0.0); // zero the Hessian for(k=0; kcN >= cMinObsInNode)) { dF = adF[i] + ((adOffset==NULL) ? 0.0 : adOffset[i]); vecdP[veciNode2K[aiNodeAssign[i]]] += adW[i]*exp(dF); dRiskTot += adW[i]*exp(dF); if(adDelta[i]==1.0) { // compute g and H for(k=0; kdPrediction = 0.0; } for(m=0; mdPrediction = 0.0; break; } else { vecpTermNodes[veciK2Node[k]]->dPrediction -= dTemp*vecdG[m]; } } } // vecpTermNodes[veciK2Node[K-1]]->dPrediction = 0.0; // already set to 0.0 return hr; } double CCoxPH::BagImprovement ( double *adT, double *adDelta, double *adOffset, double *adWeight, double *adF, double *adFadj, bool *afInBag, double dStepSize, unsigned long nTrain ) { double dReturnValue = 0.0; double dNum = 0.0; double dDen = 0.0; double dF = 0.0; double dW = 0.0; unsigned long i = 0; dNum = 0.0; dDen = 0.0; for(i=0; i #define GBM_FAILED(hr) ((unsigned long)hr != 0) typedef unsigned long GBMRESULT; #define GBM_OK 0 #define GBM_FAIL 1 #define GBM_INVALIDARG 2 #define GBM_OUTOFMEMORY 3 #define GBM_INVALID_DATA 4 #define GBM_NOTIMPL 5 #define LEVELS_PER_CHUNK ((unsigned long) 1) typedef unsigned long ULONG; typedef char *PCHAR; // #define NOISY_DEBUG #endif // BUILDINFO_H gbm/src/bernoulli.h0000644000176200001440000000555312143232747013767 0ustar liggesusers//------------------------------------------------------------------------------ // GBM by Greg Ridgeway Copyright (C) 2003 // // File: bernoulli.h // // License: GNU GPL (version 2 or later) // // Contents: bernoulli object // // Owner: gregr@rand.org // // History: 3/26/2001 gregr created // 2/14/2003 gregr: adapted for R implementation // //------------------------------------------------------------------------------ #ifndef BERNOULLI_H #define BERNOULLI_H #include "distribution.h" #include "buildinfo.h" class CBernoulli : public CDistribution { public: CBernoulli(); virtual ~CBernoulli(); GBMRESULT UpdateParams(double *adF, double *adOffset, double *adWeight, unsigned long cLength) { return GBM_OK; }; GBMRESULT ComputeWorkingResponse(double *adY, double *adMisc, double *adOffset, double *adF, double *adZ, double *adWeight, bool *afInBag, unsigned long nTrain, int cIdxOff); double Deviance(double *adY, double *adMisc, double *adOffset, double *adWeight, double *adF, unsigned long cLength, int cIdxOff); GBMRESULT InitF(double *adY, double *adMisc, double *adOffset, double *adWeight, double &dInitF, unsigned long cLength); GBMRESULT FitBestConstant(double *adY, double *adMisc, double *adOffset, double *adW, double *adF, double *adZ, unsigned long *aiNodeAssign, unsigned long nTrain, VEC_P_NODETERMINAL vecpTermNodes, unsigned long cTermNodes, unsigned long cMinObsInNode, bool *afInBag, double *adFadj, int cIdxOff); double BagImprovement(double *adY, double *adMisc, double *adOffset, double *adWeight, double *adF, double *adFadj, bool *afInBag, double dStepSize, unsigned long nTrain); private: vector vecdNum; vector vecdDen; }; #endif // BERNOULLI_H gbm/src/bernoulli.cpp0000644000176200001440000001071512143232747014316 0ustar liggesusers// GBM by Greg Ridgeway Copyright (C) 2003 #include "bernoulli.h" CBernoulli::CBernoulli() { } CBernoulli::~CBernoulli() { } GBMRESULT CBernoulli::ComputeWorkingResponse ( double *adY, double *adMisc, double *adOffset, double *adF, double *adZ, double *adWeight, bool *afInBag, unsigned long nTrain, int cIdxOff ) { unsigned long i = 0; double dProb = 0.0; double dF = 0.0; for(i=0; i 0.0001) { dNum=0.0; dDen=0.0; for(i=0; idPrediction = 0.0; } else { vecpTermNodes[iNode]->dPrediction = vecdNum[iNode]/vecdDen[iNode]; } } } return hr; } double CBernoulli::BagImprovement ( double *adY, double *adMisc, double *adOffset, double *adWeight, double *adF, double *adFadj, bool *afInBag, double dStepSize, unsigned long nTrain ) { double dReturnValue = 0.0; double dF = 0.0; double dW = 0.0; unsigned long i = 0; for(i=0; i vecdNum; vector vecdDen; }; #endif // ADABOOST_H gbm/src/adaboost.cpp0000644000176200001440000001027412143232747014117 0ustar liggesusers// GBM by Greg Ridgeway Copyright (C) 2003 #include "adaboost.h" CAdaBoost::CAdaBoost() { } CAdaBoost::~CAdaBoost() { } GBMRESULT CAdaBoost::ComputeWorkingResponse ( double *adY, double *adMisc, double *adOffset, double *adF, double *adZ, double *adWeight, bool *afInBag, unsigned long nTrain, int cIdxOff ) { unsigned long i = 0; if(adOffset == NULL) { for(i=0; idPrediction = 0.0; } else { vecpTermNodes[iNode]->dPrediction = vecdNum[iNode]/vecdDen[iNode]; } } } return hr; } double CAdaBoost::BagImprovement ( double *adY, double *adMisc, double *adOffset, double *adWeight, double *adF, double *adFadj, bool *afInBag, double dStepSize, unsigned long nTrain ) { double dReturnValue = 0.0; double dF = 0.0; double dW = 0.0; unsigned long i = 0; for(i=0; i 1 which are not possible. } \value{ Returns the value of \eqn{H}. } \references{ J.H. Friedman and B.E. Popescu (2005). \dQuote{Predictive Learning via Rule Ensembles.} Section 8.1 } \author{Greg Ridgeway \email{gregridgeway@gmail.com}} \seealso{ \code{\link{gbm}}, \code{\link{gbm.object}} } \keyword{ methods } gbm/man/gbmCrossVal.Rd0000644000176200001440000000557212134211007014303 0ustar liggesusers\name{gbmCrossVal} \alias{gbmCrossVal} \alias{gbmCrossValModelBuild} \alias{gbmDoFold} \alias{gbmCrossValErr} \alias{gbmCrossValPredictions} \title{Cross-validate a gbm} \description{Functions for cross-validating gbm. These functions are used internally and are not intended for end-user direct usage.} \usage{ gbmCrossVal(cv.folds, nTrain, n.cores, class.stratify.cv, data, x, y, offset, distribution, w, var.monotone, n.trees, interaction.depth, n.minobsinnode, shrinkage, bag.fraction, var.names, response.name, group) gbmCrossValModelBuild(cv.folds, cv.group, n.cores, i.train, x, y, offset, distribution, w, var.monotone, n.trees, interaction.depth, n.minobsinnode, shrinkage, bag.fraction, var.names, response.name, group) gbmDoFold(X, i.train, x, y, offset, distribution, w, var.monotone, n.trees, interaction.depth, n.minobsinnode, shrinkage, bag.fraction, cv.group, var.names, response.name, group, s) gbmCrossValErr(cv.models, cv.folds, cv.group, nTrain, n.trees) gbmCrossValPredictions(cv.models, cv.folds, cv.group, best.iter.cv, distribution, data, y) } \arguments{ \item{cv.folds}{The number of cross-validation folds.} \item{nTrain}{The number of training samples.} \item{n.cores}{The number of cores to use.} \item{class.stratify.cv}{Whether or not stratified cross-validation samples are used.} \item{data}{The data.} \item{x}{The model matrix.} \item{y}{The response variable.} \item{offset}{The offset.} \item{distribution}{The type of loss function. See \code{\link{gbm}}.} \item{w}{Observation weights.} \item{var.monotone}{See \code{\link{gbm}}.} \item{n.trees}{The number of trees to fit.} \item{interaction.depth}{The degree of allowed interactions. See \code{\link{gbm}}.} \item{n.minobsinnode}{See \code{\link{gbm}}.} \item{shrinkage}{See \code{\link{gbm}}.} \item{bag.fraction}{See \code{\link{gbm}}.} \item{var.names}{See \code{\link{gbm}}.} \item{response.name}{See \code{\link{gbm}}.} \item{group}{Used when \code{distribution = "pairwise"}. See \code{\link{gbm}}.} \item{i.train}{Items in the training set.} \item{cv.models}{A list containing the models for each fold.} \item{cv.group}{A vector indicating the cross-validation fold for each member of the training set.} \item{best.iter.cv}{The iteration with lowest cross-validation error.} \item{X}{Index (cross-validation fold) on which to subset.} \item{s}{Random seed.} } % Close arguments \details{ These functions are not intended for end-user direct usage, but are used internally by \code{gbm}.} \value{A list containing the cross-validation error and predictions.} \references{ J.H. Friedman (2001). "Greedy Function Approximation: A Gradient Boosting Machine," Annals of Statistics 29(5):1189-1232. L. Breiman (2001). \href{http://oz.berkeley.edu/users/breiman/randomforest2001.pdf}{Random Forests}. } \author{Greg Ridgeway \email{gregridgeway@gmail.com}} \seealso{ \code{\link{gbm}} } \keyword{ models } gbm/man/gbm.roc.area.Rd0000644000176200001440000000354212102666411014322 0ustar liggesusers\name{gbm.roc.area} \alias{gbm.roc.area} \alias{gbm.conc} \alias{ir.measure.conc} \alias{ir.measure.auc} \alias{ir.measure.mrr} \alias{ir.measure.map} \alias{ir.measure.ndcg} \alias{perf.pairwise} \title{ Compute Information Retrieval measures. } \description{ Functions to compute Information Retrieval measures for pairwise loss for a single group. The function returns the respective metric, or a negative value if it is undefined for the given group. } \usage{ gbm.roc.area(obs, pred) ir.measure.conc(y.f, max.rank) ir.measure.auc(y.f, max.rank) ir.measure.mrr(y.f, max.rank) ir.measure.map(y.f, max.rank) ir.measure.ndcg(y.f, max.rank) perf.pairwise(y, f, group, metric="ndcg", w=NULL, max.rank=0) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{obs}{Observed value} \item{pred}{Predicted value} \item{metric}{What type of performance measure to compute.} \item{y, y.f, f, w, group, max.rank}{Used internally.} } \details{ For simplicity, we have no special handling for ties; instead, we break ties randomly. This is slightly inaccurate for individual groups, but should have only a small effect on the overall measure. \code{gbm.conc} computes the concordance index: Fraction of all pairs (i,j) with i Define data, use random, ##-- or do help(data=index) for the standard data sets. } \keyword{ models } gbm/man/gbm.perf.Rd0000644000176200001440000000353112134211007013552 0ustar liggesusers\name{gbm.perf} \alias{gbm.perf} \title{GBM performance} \description{ Estimates the optimal number of boosting iterations for a \code{gbm} object and optionally plots various performance measures } \usage{ gbm.perf(object, plot.it = TRUE, oobag.curve = FALSE, overlay = TRUE, method) } \arguments{ \item{object}{a \code{\link{gbm.object}} created from an initial call to \code{\link{gbm}}.} \item{plot.it}{an indicator of whether or not to plot the performance measures. Setting \code{plot.it=TRUE} creates two plots. The first plot plots \code{object$train.error} (in black) and \code{object$valid.error} (in red) versus the iteration number. The scale of the error measurement, shown on the left vertical axis, depends on the \code{distribution} argument used in the initial call to \code{\link{gbm}}.} \item{oobag.curve}{indicates whether to plot the out-of-bag performance measures in a second plot.} \item{overlay}{if TRUE and oobag.curve=TRUE then a right y-axis is added to the training and test error plot and the estimated cumulative improvement in the loss function is plotted versus the iteration number.} \item{method}{indicate the method used to estimate the optimal number of boosting iterations. \code{method="OOB"} computes the out-of-bag estimate and \code{method="test"} uses the test (or validation) dataset to compute an out-of-sample estimate. \code{method="cv"} extracts the optimal number of iterations using cross-validation if \code{gbm} was called with \code{cv.folds}>1} } \value{ \code{gbm.perf} returns the estimated optimal number of iterations. The method of computation depends on the \code{method} argument.} \author{Greg Ridgeway \email{gregridgeway@gmail.com}} \seealso{\code{\link{gbm}}, \code{\link{gbm.object}}} \keyword{nonlinear} \keyword{survival} \keyword{nonparametric} \keyword{tree} gbm/man/gbm.object.Rd0000644000176200001440000000450312142655062014100 0ustar liggesusers\name{gbm.object} \alias{gbm.object} \title{Generalized Boosted Regression Model Object} \description{These are objects representing fitted \code{gbm}s.} \section{Structure}{The following components must be included in a legitimate \code{gbm} object.} \value{ \item{initF}{the "intercept" term, the initial predicted value to which trees make adjustments} \item{fit}{a vector containing the fitted values on the scale of regression function (e.g. log-odds scale for bernoulli, log scale for poisson)} \item{train.error}{a vector of length equal to the number of fitted trees containing the value of the loss function for each boosting iteration evaluated on the training data} \item{valid.error}{a vector of length equal to the number of fitted trees containing the value of the loss function for each boosting iteration evaluated on the validation data} \item{cv.error}{if \code{cv.folds}<2 this component is NULL. Otherwise, this component is a vector of length equal to the number of fitted trees containing a cross-validated estimate of the loss function for each boosting iteration} \item{oobag.improve}{a vector of length equal to the number of fitted trees containing an out-of-bag estimate of the marginal reduction in the expected value of the loss function. The out-of-bag estimate uses only the training data and is useful for estimating the optimal number of boosting iterations. See \code{\link{gbm.perf}}} \item{trees}{a list containing the tree structures. The components are best viewed using \code{\link{pretty.gbm.tree}}} \item{c.splits}{a list of all the categorical splits in the collection of trees. If the \code{trees[[i]]} component of a \code{gbm} object describes a categorical split then the splitting value will refer to a component of \code{c.splits}. That component of \code{c.splits} will be a vector of length equal to the number of levels in the categorical split variable. -1 indicates left, +1 indicates right, and 0 indicates that the level was not present in the training data} \item{cv.fitted}{If cross-validation was performed, the cross-validation predicted values on the scale of the linear predictor. That is, the fitted values from the ith CV-fold, for the model having been trained on the data in all other folds.} } \author{Greg Ridgeway \email{gregridgeway@gmail.com}} \seealso{ \code{\link{gbm}} } \keyword{methods} gbm/man/gbm.Rd0000644000176200001440000004123412143232225012626 0ustar liggesusers\name{gbm} \alias{gbm} \alias{gbm.more} \alias{gbm.fit} \title{Generalized Boosted Regression Modeling} \description{Fits generalized boosted regression models.} \usage{ gbm(formula = formula(data), distribution = "bernoulli", data = list(), weights, var.monotone = NULL, n.trees = 100, interaction.depth = 1, n.minobsinnode = 10, shrinkage = 0.001, bag.fraction = 0.5, train.fraction = 1.0, cv.folds=0, keep.data = TRUE, verbose = "CV", class.stratify.cv=NULL, n.cores = NULL) gbm.fit(x, y, offset = NULL, misc = NULL, distribution = "bernoulli", w = NULL, var.monotone = NULL, n.trees = 100, interaction.depth = 1, n.minobsinnode = 10, shrinkage = 0.001, bag.fraction = 0.5, nTrain = NULL, train.fraction = NULL, keep.data = TRUE, verbose = TRUE, var.names = NULL, response.name = "y", group = NULL) gbm.more(object, n.new.trees = 100, data = NULL, weights = NULL, offset = NULL, verbose = NULL) } \arguments{\item{formula}{a symbolic description of the model to be fit. The formula may include an offset term (e.g. y~offset(n)+x). If \code{keep.data=FALSE} in the initial call to \code{gbm} then it is the user's responsibility to resupply the offset to \code{\link{gbm.more}}.} \item{distribution}{either a character string specifying the name of the distribution to use or a list with a component \code{name} specifying the distribution and any additional parameters needed. If not specified, \code{gbm} will try to guess: if the response has only 2 unique values, bernoulli is assumed; otherwise, if the response is a factor, multinomial is assumed; otherwise, if the response has class "Surv", coxph is assumed; otherwise, gaussian is assumed. Currently available options are "gaussian" (squared error), "laplace" (absolute loss), "tdist" (t-distribution loss), "bernoulli" (logistic regression for 0-1 outcomes), "huberized" (huberized hinge loss for 0-1 outcomes), "multinomial" (classification when there are more than 2 classes), "adaboost" (the AdaBoost exponential loss for 0-1 outcomes), "poisson" (count outcomes), "coxph" (right censored observations), "quantile", or "pairwise" (ranking measure using the LambdaMart algorithm). If quantile regression is specified, \code{distribution} must be a list of the form \code{list(name="quantile",alpha=0.25)} where \code{alpha} is the quantile to estimate. The current version's quantile regression method does not handle non-constant weights and will stop. If "tdist" is specified, the default degrees of freedom is 4 and this can be controlled by specifying \code{distribution=list(name="tdist", df=DF)} where \code{DF} is your chosen degrees of freedom. If "pairwise" regression is specified, \code{distribution} must be a list of the form \code{list(name="pairwise",group=...,metric=...,max.rank=...)} (\code{metric} and \code{max.rank} are optional, see below). \code{group} is a character vector with the column names of \code{data} that jointly indicate the group an instance belongs to (typically a query in Information Retrieval applications). For training, only pairs of instances from the same group and with different target labels can be considered. \code{metric} is the IR measure to use, one of \describe{ \item{\code{conc}:}{Fraction of concordant pairs; for binary labels, this is equivalent to the Area under the ROC Curve} \item{\code{mrr}:}{Mean reciprocal rank of the highest-ranked positive instance} \item{\code{map}:}{Mean average precision, a generalization of \code{mrr} to multiple positive instances} \item{\code{ndcg:}}{Normalized discounted cumulative gain. The score is the weighted sum (DCG) of the user-supplied target values, weighted by log(rank+1), and normalized to the maximum achievable value. This is the default if the user did not specify a metric.} } \code{ndcg} and \code{conc} allow arbitrary target values, while binary targets \{0,1\} are expected for \code{map} and \code{mrr}. For \code{ndcg} and \code{mrr}, a cut-off can be chosen using a positive integer parameter \code{max.rank}. If left unspecified, all ranks are taken into account. Note that splitting of instances into training and validation sets follows group boundaries and therefore only approximates the specified \code{train.fraction} ratio (the same applies to cross-validation folds). Internally, queries are randomly shuffled before training, to avoid bias. Weights can be used in conjunction with pairwise metrics, however it is assumed that they are constant for instances from the same group. For details and background on the algorithm, see e.g. Burges (2010). } \item{data}{an optional data frame containing the variables in the model. By default the variables are taken from \code{environment(formula)}, typically the environment from which \code{gbm} is called. If \code{keep.data=TRUE} in the initial call to \code{gbm} then \code{gbm} stores a copy with the object. If \code{keep.data=FALSE} then subsequent calls to \code{\link{gbm.more}} must resupply the same dataset. It becomes the user's responsibility to resupply the same data at this point.} \item{weights}{an optional vector of weights to be used in the fitting process. Must be positive but do not need to be normalized. If \code{keep.data=FALSE} in the initial call to \code{gbm} then it is the user's responsibility to resupply the weights to \code{\link{gbm.more}}.} \item{var.monotone}{an optional vector, the same length as the number of predictors, indicating which variables have a monotone increasing (+1), decreasing (-1), or arbitrary (0) relationship with the outcome.} \item{n.trees}{the total number of trees to fit. This is equivalent to the number of iterations and the number of basis functions in the additive expansion.} \item{cv.folds}{Number of cross-validation folds to perform. If \code{cv.folds}>1 then \code{gbm}, in addition to the usual fit, will perform a cross-validation, calculate an estimate of generalization error returned in \code{cv.error}.} \item{interaction.depth}{The maximum depth of variable interactions. 1 implies an additive model, 2 implies a model with up to 2-way interactions, etc.} \item{n.minobsinnode}{minimum number of observations in the trees terminal nodes. Note that this is the actual number of observations not the total weight.} \item{shrinkage}{a shrinkage parameter applied to each tree in the expansion. Also known as the learning rate or step-size reduction.} \item{bag.fraction}{the fraction of the training set observations randomly selected to propose the next tree in the expansion. This introduces randomnesses into the model fit. If \code{bag.fraction}<1 then running the same model twice will result in similar but different fits. \code{gbm} uses the R random number generator so \code{set.seed} can ensure that the model can be reconstructed. Preferably, the user can save the returned \code{\link{gbm.object}} using \code{\link{save}}.} \item{train.fraction}{The first \code{train.fraction * nrows(data)} observations are used to fit the \code{gbm} and the remainder are used for computing out-of-sample estimates of the loss function.} \item{nTrain}{An integer representing the number of cases on which to train. This is the preferred way of specification for \code{gbm.fit}; The option \code{train.fraction} in \code{gbm.fit} is deprecated and only maintained for backward compatibility. These two parameters are mutually exclusive. If both are unspecified, all data is used for training.} \item{keep.data}{a logical variable indicating whether to keep the data and an index of the data stored with the object. Keeping the data and index makes subsequent calls to \code{\link{gbm.more}} faster at the cost of storing an extra copy of the dataset.} \item{object}{a \code{gbm} object created from an initial call to \code{\link{gbm}}.} \item{n.new.trees}{the number of additional trees to add to \code{object}.} \item{verbose}{If TRUE, gbm will print out progress and performance indicators. If this option is left unspecified for gbm.more then it uses \code{verbose} from \code{object}.} \item{class.stratify.cv}{whether or not the cross-validation should be stratified by class. Defaults to \code{TRUE} for \code{distribution="multinomial"} and is only implementated for \code{multinomial} and \code{bernoulli}. The purpose of stratifying the cross-validation is to help avoiding situations in which training sets do not contain all classes.} \item{x, y}{For \code{gbm.fit}: \code{x} is a data frame or data matrix containing the predictor variables and \code{y} is the vector of outcomes. The number of rows in \code{x} must be the same as the length of \code{y}.} \item{offset}{a vector of values for the offset} \item{misc}{For \code{gbm.fit}: \code{misc} is an R object that is simply passed on to the gbm engine. It can be used for additional data for the specific distribution. Currently it is only used for passing the censoring indicator for the Cox proportional hazards model.} \item{w}{For \code{gbm.fit}: \code{w} is a vector of weights of the same length as the \code{y}.} \item{var.names}{For \code{gbm.fit}: A vector of strings of length equal to the number of columns of \code{x} containing the names of the predictor variables.} \item{response.name}{For \code{gbm.fit}: A character string label for the response variable.} \item{group}{\code{group} used when \code{distribution = 'pairwise'.}} \item{n.cores}{The number of CPU cores to use. The cross-validation loop will attempt to send different CV folds off to different cores. If \code{n.cores} is not specified by the user, it is guessed using the \code{detectCores} function in the \code{parallel} package. Note that the documentation for \code{detectCores} makes clear that it is not failsave and could return a spurious number of available cores.} } \details{See the \href{../doc/gbm.pdf}{gbm vignette} for technical details. This package implements the generalized boosted modeling framework. Boosting is the process of iteratively adding basis functions in a greedy fashion so that each additional basis function further reduces the selected loss function. This implementation closely follows Friedman's Gradient Boosting Machine (Friedman, 2001). In addition to many of the features documented in the Gradient Boosting Machine, \code{gbm} offers additional features including the out-of-bag estimator for the optimal number of iterations, the ability to store and manipulate the resulting \code{gbm} object, and a variety of other loss functions that had not previously had associated boosting algorithms, including the Cox partial likelihood for censored data, the poisson likelihood for count outcomes, and a gradient boosting implementation to minimize the AdaBoost exponential loss function. \code{gbm.fit} provides the link between R and the C++ gbm engine. \code{gbm} is a front-end to \code{gbm.fit} that uses the familiar R modeling formulas. However, \code{\link[stats]{model.frame}} is very slow if there are many predictor variables. For power-users with many variables use \code{gbm.fit}. For general practice \code{gbm} is preferable.} \value{ \code{gbm}, \code{gbm.fit}, and \code{gbm.more} return a \code{\link{gbm.object}}. } \references{ Y. Freund and R.E. Schapire (1997) \dQuote{A decision-theoretic generalization of on-line learning and an application to boosting,} \emph{Journal of Computer and System Sciences,} 55(1):119-139. G. Ridgeway (1999). \dQuote{The state of boosting,} \emph{Computing Science and Statistics} 31:172-181. J.H. Friedman, T. Hastie, R. Tibshirani (2000). \dQuote{Additive Logistic Regression: a Statistical View of Boosting,} \emph{Annals of Statistics} 28(2):337-374. J.H. Friedman (2001). \dQuote{Greedy Function Approximation: A Gradient Boosting Machine,} \emph{Annals of Statistics} 29(5):1189-1232. J.H. Friedman (2002). \dQuote{Stochastic Gradient Boosting,} \emph{Computational Statistics and Data Analysis} 38(4):367-378. B. Kriegler (2007). \href{http://statistics.ucla.edu/theses/uclastat-dissertation-2007:2}{Cost-Sensitive Stochastic Gradient Boosting Within a Quantitative Regression Framework}. PhD dissertation, UCLA Statistics. C. Burges (2010). \dQuote{From RankNet to LambdaRank to LambdaMART: An Overview,} Microsoft Research Technical Report MSR-TR-2010-82. \href{http://sites.google.com/site/gregridgeway}{Greg Ridgeway's site}. The \href{http://www-stat.stanford.edu/~jhf/R-MART.html}{MART} website. } \author{Greg Ridgeway \email{gregridgeway@gmail.com} Quantile regression code developed by Brian Kriegler \email{bk@stat.ucla.edu} t-distribution, and multinomial code developed by Harry Southworth and Daniel Edwards Pairwise code developed by Stefan Schroedl \email{schroedl@a9.com}} \seealso{ \code{\link{gbm.object}}, \code{\link{gbm.perf}}, \code{\link{plot.gbm}}, \code{\link{predict.gbm}}, \code{\link{summary.gbm}}, \code{\link{pretty.gbm.tree}}. } \examples{ # A least squares regression example # create some data N <- 1000 X1 <- runif(N) X2 <- 2*runif(N) X3 <- ordered(sample(letters[1:4],N,replace=TRUE),levels=letters[4:1]) X4 <- factor(sample(letters[1:6],N,replace=TRUE)) X5 <- factor(sample(letters[1:3],N,replace=TRUE)) X6 <- 3*runif(N) mu <- c(-1,0,1,2)[as.numeric(X3)] SNR <- 10 # signal-to-noise ratio Y <- X1**1.5 + 2 * (X2**.5) + mu sigma <- sqrt(var(Y)/SNR) Y <- Y + rnorm(N,0,sigma) # introduce some missing values X1[sample(1:N,size=500)] <- NA X4[sample(1:N,size=300)] <- NA data <- data.frame(Y=Y,X1=X1,X2=X2,X3=X3,X4=X4,X5=X5,X6=X6) # fit initial model gbm1 <- gbm(Y~X1+X2+X3+X4+X5+X6, # formula data=data, # dataset var.monotone=c(0,0,0,0,0,0), # -1: monotone decrease, # +1: monotone increase, # 0: no monotone restrictions distribution="gaussian", # see the help for other choices n.trees=1000, # number of trees shrinkage=0.05, # shrinkage or learning rate, # 0.001 to 0.1 usually work interaction.depth=3, # 1: additive model, 2: two-way interactions, etc. bag.fraction = 0.5, # subsampling fraction, 0.5 is probably best train.fraction = 0.5, # fraction of data for training, # first train.fraction*N used for training n.minobsinnode = 10, # minimum total weight needed in each node cv.folds = 3, # do 3-fold cross-validation keep.data=TRUE, # keep a copy of the dataset with the object verbose=FALSE, # don't print out progress n.cores=1) # use only a single core (detecting #cores is # error-prone, so avoided here) # check performance using an out-of-bag estimator # OOB underestimates the optimal number of iterations best.iter <- gbm.perf(gbm1,method="OOB") print(best.iter) # check performance using a 50\% heldout test set best.iter <- gbm.perf(gbm1,method="test") print(best.iter) # check performance using 5-fold cross-validation best.iter <- gbm.perf(gbm1,method="cv") print(best.iter) # plot the performance # plot variable influence summary(gbm1,n.trees=1) # based on the first tree summary(gbm1,n.trees=best.iter) # based on the estimated best number of trees # compactly print the first and last trees for curiosity print(pretty.gbm.tree(gbm1,1)) print(pretty.gbm.tree(gbm1,gbm1$n.trees)) # make some new data N <- 1000 X1 <- runif(N) X2 <- 2*runif(N) X3 <- ordered(sample(letters[1:4],N,replace=TRUE)) X4 <- factor(sample(letters[1:6],N,replace=TRUE)) X5 <- factor(sample(letters[1:3],N,replace=TRUE)) X6 <- 3*runif(N) mu <- c(-1,0,1,2)[as.numeric(X3)] Y <- X1**1.5 + 2 * (X2**.5) + mu + rnorm(N,0,sigma) data2 <- data.frame(Y=Y,X1=X1,X2=X2,X3=X3,X4=X4,X5=X5,X6=X6) # predict on the new data using "best" number of trees # f.predict generally will be on the canonical scale (logit,log,etc.) f.predict <- predict(gbm1,data2,best.iter) # least squares error print(sum((data2$Y-f.predict)^2)) # create marginal plots # plot variable X1,X2,X3 after "best" iterations par(mfrow=c(1,3)) plot(gbm1,1,best.iter) plot(gbm1,2,best.iter) plot(gbm1,3,best.iter) par(mfrow=c(1,1)) # contour plot of variables 1 and 2 after "best" iterations plot(gbm1,1:2,best.iter) # lattice plot of variables 2 and 3 plot(gbm1,2:3,best.iter) # lattice plot of variables 3 and 4 plot(gbm1,3:4,best.iter) # 3-way plots plot(gbm1,c(1,2,6),best.iter,cont=20) plot(gbm1,1:3,best.iter) plot(gbm1,2:4,best.iter) plot(gbm1,3:5,best.iter) # do another 100 iterations gbm2 <- gbm.more(gbm1,100, verbose=FALSE) # stop printing detailed progress } \keyword{models} \keyword{nonlinear} \keyword{survival} \keyword{nonparametric} \keyword{tree} gbm/man/gbm-package.Rd0000644000176200001440000000512612143176426014231 0ustar liggesusers\name{gbm-package} \alias{gbm-package} \docType{package} \title{Generalized Boosted Regression Models} \description{This package implements extensions to Freund and Schapire's AdaBoost algorithm and J. Friedman's gradient boosting machine. Includes regression methods for least squares, absolute loss, logistic, Poisson, Cox proportional hazards partial likelihood, multinomial, t-distribution, AdaBoost exponential loss, Learning to Rank, and Huberized hinge loss.} \details{ \tabular{ll}{ Package: \tab gbm\cr Version: \tab 2.1\cr Date: \tab 2013-05-10\cr Depends: \tab R (>= 2.9.0), survival, lattice, mgcv\cr License: \tab GPL (version 2 or newer)\cr URL: \tab http://code.google.com/p/gradientboostedmodels/\cr } Index: \preformatted{basehaz.gbm Baseline hazard function calibrate.plot Calibration plot gbm Generalized Boosted Regression Modeling gbm.object Generalized Boosted Regression Model Object gbm.perf GBM performance plot.gbm Marginal plots of fitted gbm objects predict.gbm Predict method for GBM Model Fits pretty.gbm.tree Print gbm tree components quantile.rug Quantile rug plot relative.influence Methods for estimating relative influence shrink.gbm L1 shrinkage of the predictor variables in a GBM shrink.gbm.pred Predictions from a shrunked GBM summary.gbm Summary of a gbm object } Further information is available in the following vignettes: \tabular{ll}{ \code{gbm} \tab Generalized Boosted Models: A guide to the gbm package (source, pdf)\cr} } % Close \details \author{ Greg Ridgeway \email{gregridgeway@gmail.com} with contributions by Daniel Edwards, Brian Kriegler, Stefan Schroedl and Harry Southworth. } \references{ Y. Freund and R.E. Schapire (1997) \dQuote{A decision-theoretic generalization of on-line learning and an application to boosting,} \emph{Journal of Computer and System Sciences,} 55(1):119-139. G. Ridgeway (1999). \dQuote{The state of boosting,} \emph{Computing Science and Statistics} 31:172-181. J.H. Friedman, T. Hastie, R. Tibshirani (2000). \dQuote{Additive Logistic Regression: a Statistical View of Boosting,} \emph{Annals of Statistics} 28(2):337-374. J.H. Friedman (2001). \dQuote{Greedy Function Approximation: A Gradient Boosting Machine,} \emph{Annals of Statistics} 29(5):1189-1232. J.H. Friedman (2002). \dQuote{Stochastic Gradient Boosting,} \emph{Computational Statistics and Data Analysis} 38(4):367-378. The \href{http://www-stat.stanford.edu/~jhf/R-MART.html}{MART} website. } % Close \references \keyword{package} gbm/man/gbm-internal.Rd0000644000176200001440000000274012134211007014432 0ustar liggesusers\name{gbm-internal} \alias{guessDist} \alias{getStratify} \alias{getCVgroup} \alias{checkMissing} \alias{checkID} \alias{checkWeights} \alias{checkOffset} \alias{getVarNames} \alias{gbmCluster} \title{gbm internal functions} \description{Helper functions for preprocessing data prior to building the model} \usage{ guessDist(y) getCVgroup(distribution, class.stratify.cv, y, i.train, cv.folds, group) getStratify(strat, d) checkMissing(x, y) checkWeights(w, n) checkID(id) checkOffset(o, y) getVarNames(x) gbmCluster(n) } \arguments{ \item{y}{The response variable} \item{d, distribution}{The distribution, either specified by the user or implied} \item{class.stratify.cv}{Whether or not to stratify, if provided by the user} \item{i.train}{Computed internally by \code{gbm}} \item{group}{The group, if using \code{distibution='pairwise'}} \item{strat}{Whether or not to stratify} \item{cv.folds}{The number of cross-validation folds} \item{x}{The design matrix} \item{id}{The interaction depth} \item{w}{The weights} \item{n}{The number of cores to use in the cluster.} \item{o}{The offset} % \item{verbose}{Whether or not to print output to screen} % \item{X, var.monotone, n.trees, n.minobsinnode, shrinkage, bag.fraction, % var.names, response.name, cv.group}{Arguments passed % through to gbm.fit} } % Close \arguments \details{ These are functions used internally by \code{gbm} and not intended for direct use by the user. } gbm/man/calibrate.plot.Rd0000644000176200001440000000563512102666411014774 0ustar liggesusers\name{calibrate.plot} \alias{calibrate.plot} \title{Calibration plot} \description{ An experimental diagnostic tool that plots the fitted values versus the actual average values. Currently developed for only \code{distribution="bernoulli"}. } \usage{ calibrate.plot(y,p, distribution="bernoulli", replace=TRUE, line.par=list(col="black"), shade.col="lightyellow", shade.density=NULL, rug.par=list(side=1), xlab="Predicted value", ylab="Observed average", xlim=NULL,ylim=NULL, knots=NULL,df=6, ...) } \arguments{ \item{y}{ the outcome 0-1 variable } \item{p}{ the predictions estimating E(y|x) } \item{distribution}{the loss function used in creating \code{p}. \code{bernoulli} and \code{poisson} are currently the only special options. All others default to squared error assuming \code{gaussian}} \item{replace}{ determines whether this plot will replace or overlay the current plot. \code{replace=FALSE} is useful for comparing the calibration of several methods} \item{line.par}{ graphics parameters for the line } \item{shade.col}{ color for shading the 2 SE region. \code{shade.col=NA} implies no 2 SE region} \item{shade.density}{ the \code{density} parameter for \code{\link{polygon}}} \item{rug.par}{graphics parameters passed to \code{\link{rug}}} \item{xlab}{x-axis label corresponding to the predicted values} \item{ylab}{y-axis label corresponding to the observed average} \item{xlim,ylim}{x and y-axis limits. If not specified te function will select limits} \item{knots,df}{these parameters are passed directly to \code{\link[splines]{ns}} for constructing a natural spline smoother for the calibration curve} \item{...}{ other graphics parameters passed on to the plot function } } \details{ Uses natural splines to estimate E(y|p). Well-calibrated predictions imply that E(y|p) = p. The plot also includes a pointwise 95% confidence band. } \value{ \code{calibrate.plot} returns no values. } \references{ J.F. Yates (1982). "External correspondence: decomposition of the mean probability score," Organisational Behaviour and Human Performance 30:132-156. D.J. Spiegelhalter (1986). "Probabilistic Prediction in Patient Management and Clinical Trials," Statistics in Medicine 5:421-433. } \author{Greg Ridgeway \email{gregridgeway@gmail.com}} \examples{ # Don't want R CMD check to think there is a dependency on rpart # so comment out the example #library(rpart) #data(kyphosis) #y <- as.numeric(kyphosis$Kyphosis)-1 #x <- kyphosis$Age #glm1 <- glm(y~poly(x,2),family=binomial) #p <- predict(glm1,type="response") #calibrate.plot(y, p, xlim=c(0,0.6), ylim=c(0,0.6)) } \keyword{ hplot } gbm/man/basehaz.gbm.Rd0000644000176200001440000000344212102666411014244 0ustar liggesusers\name{basehaz.gbm} \alias{basehaz.gbm} \title{ Baseline hazard function } \description{ Computes the Breslow estimator of the baseline hazard function for a proportional hazard regression model } \usage{ basehaz.gbm(t, delta, f.x, t.eval = NULL, smooth = FALSE, cumulative = TRUE) } \arguments{ \item{t}{ the survival times } \item{delta}{ the censoring indicator } \item{f.x}{ the predicted values of the regression model on the log hazard scale } \item{t.eval}{ values at which the baseline hazard will be evaluated } \item{smooth}{ if \code{TRUE} \code{basehaz.gbm} will smooth the estimated baseline hazard using Friedman's super smoother \code{\link{supsmu}}} \item{cumulative}{ if \code{TRUE} the cumulative survival function will be computed } } \details{ The proportional hazard model assumes h(t|x)=lambda(t)*exp(f(x)). \code{\link{gbm}} can estimate the f(x) component via partial likelihood. After estimating f(x), \code{basehaz.gbm} can compute the a nonparametric estimate of lambda(t). } \value{ a vector of length equal to the length of t (or of length \code{t.eval} if \code{t.eval} is not \code{NULL}) containing the baseline hazard evaluated at t (or at \code{t.eval} if \code{t.eval} is not \code{NULL}). If \code{cumulative} is set to \code{TRUE} then the returned vector evaluates the cumulative hazard function at those values. } \references{N. Breslow (1972). "Disussion of `Regression Models and Life-Tables' by D.R. Cox," Journal of the Royal Statistical Society, Series B, 34(2):216-217. N. Breslow (1974). "Covariance analysis of censored survival data," Biometrics 30:89-99. } \author{ Greg Ridgeway \email{gregridgeway@gmail.com}} \seealso{ \code{\link[survival]{survfit}}, \code{\link{gbm}} } \keyword{ methods } \keyword{ survival } gbm/inst/0000755000176200001440000000000012102666411011773 5ustar liggesusersgbm/inst/doc/0000755000176200001440000000000012134211007012530 5ustar liggesusersgbm/inst/doc/srcltx.sty0000644000176200001440000001170412102666411014623 0ustar liggesusers%% %% This is file `srcltx.sty', %% generated with the docstrip utility. %% %% The original source files were: %% %% srcltx.dtx (with options: `package,latex') %% %% This package is in the public domain. It comes with no guarantees %% and no reserved rights. You can use or modify this package at your %% own risk. %% Originally written by: Aleksander Simonic %% Current maintainer: Stefan Ulrich %% \NeedsTeXFormat{LaTeX2e} \ProvidesPackage{srcltx}[2006/11/12 v1.6 Source specials for inverse search in DVI files] \newif\ifSRCOK \SRCOKtrue \newif\ifsrc@debug@ \newif\ifsrc@dviwin@ \newif\ifsrc@winedt@\src@winedt@true \newif\ifsrc@everypar@\src@everypar@true \newif\ifsrc@everymath@\src@everymath@true \RequirePackage{ifthen} \DeclareOption{active}{\SRCOKtrue} \DeclareOption{inactive}{\SRCOKfalse} \DeclareOption{nowinedt}{\src@winedt@false} \DeclareOption{debug}{\src@debug@true} \DeclareOption{nopar}{\global\src@everypar@false} \DeclareOption{nomath}{\global\src@everymath@false} \newcommand*\src@maybe@space{} \let\src@maybe@space\space \DeclareOption{dviwin}{\let\src@maybe@space\relax} \ExecuteOptions{active} \ProcessOptions \newcount\src@lastline \global\src@lastline=-1 \newcommand*\src@debug{} \def\src@debug#1{\ifsrc@debug@\typeout{DBG: |#1|}\fi} \newcommand*\MainFile{} \def\MainFile{\jobname.tex} \newcommand*\CurrentInput{} \gdef\CurrentInput{\MainFile} \newcommand*\WinEdt{} \def\WinEdt#1{\ifsrc@winedt@\typeout{:#1}\fi} \newcommand\src@AfterFi{} \def\src@AfterFi#1\fi{\fi#1} \AtBeginDocument{% \@ifpackageloaded{soul}{% \let\src@SOUL@\SOUL@ \def\SOUL@#1{% \ifSRCOK \SRCOKfalse\src@SOUL@{#1}\SRCOKtrue \else \src@AfterFi\src@SOUL@{#1}% \fi }% }{}% } \newcommand*\srcIncludeHook[1]{\protected@xdef\CurrentInput{#1.tex}} \newcommand*\srcInputHook[1]{% \src@getfilename@with@ext{#1}% } \newcommand*\src@spec{} \def\src@spec{% \ifSRCOK \ifnum\inputlineno>\src@lastline \global\src@lastline=\inputlineno \src@debug{% src:\the\inputlineno\src@maybe@space\CurrentInput}% \special{src:\the\inputlineno\src@maybe@space\CurrentInput}% \fi \fi } \newcommand\src@before@file@hook{} \newcommand\src@after@file@hook{} \def\src@before@file@hook{% \WinEdt{<+ \CurrentInput}% \global\src@lastline=0 \ifSRCOK\special{src:1\src@maybe@space\CurrentInput}\fi } \def\src@after@file@hook#1{% \WinEdt{<-}% \global\src@lastline=\inputlineno \global\advance\src@lastline by -1% \gdef\CurrentInput{#1}% \src@spec } \newcommand*\src@fname{}% \newcommand*\src@tempa{}% \newcommand*\src@extensions@path{}% \newcommand*\src@getfilename@with@ext{}% \def\src@extensions@path#1.#2\end{% \ifthenelse{\equal{#2}{}}{% \protected@edef\src@extensions@last{#1}% \let\src@tempa\relax }{% \def\src@tempa{\src@extensions@path#2\end}% }% \src@tempa } \def\src@getfilename@with@ext#1{% \expandafter\src@extensions@path#1.\end \ifthenelse{\equal{\src@extensions@last}{tex}}{% \protected@xdef\CurrentInput{#1}% }{% \protected@xdef\CurrentInput{#1.tex}% }% \PackageInfo{srcltx}{Expanded filename `#1' to `\CurrentInput'}% } \newcommand*\src@include{} \newcommand*\src@@include{} \let\src@include\include \def\include#1{% \src@spec \clearpage \expandafter\src@@include\expandafter{\CurrentInput}{#1}% }% \def\src@@include#1#2{% \srcIncludeHook{#2}% \src@before@file@hook \src@include{#2}% \src@after@file@hook{#1}% } \newcommand*\src@input{} \newcommand*\src@@input{} \newcommand*\src@@@input{} \let\src@input\input \def\input{\src@spec\@ifnextchar\bgroup\src@@input\@@input}% \def\src@@input#1{% \expandafter\src@@@input\expandafter{\CurrentInput}{#1}% } \def\src@@@input#1#2{% \srcInputHook{#2}% \src@before@file@hook \src@input{#2}% \src@after@file@hook{#1}% } \newcommand\Input{} \let\Input\input \ifsrc@everypar@ \newcommand*\src@old@everypar{} \let\src@old@everypar\everypar \newtoks\src@new@everypar \let\everypar\src@new@everypar \everypar\expandafter{\the\src@old@everypar} \src@old@everypar{\the\src@new@everypar\src@spec} \fi \ifsrc@everymath@ \def\@tempa#1\the\everymath#2\delimiter{{#1\src@spec\the\everymath#2}} \frozen@everymath=\expandafter\@tempa\the\frozen@everymath\delimiter \fi \newcommand*\src@bibliography{} \newcommand*\src@@bibliography{} \let\src@bibliography\bibliography \def\bibliography#1{% \expandafter\src@@bibliography\expandafter{\CurrentInput}{#1}% } \def\src@@bibliography#1#2{% \protected@xdef\CurrentInput{\jobname.bbl}% \src@before@file@hook \src@bibliography{#2}% \src@after@file@hook{#1}% } \newcommand*\src@old@output{} \let\src@old@output\output \newtoks\src@new@output \let\output\src@new@output \output\expandafter{\the\src@old@output} \src@old@output{\SRCOKfalse\the\src@new@output} \endinput %% %% End of file `srcltx.sty'. gbm/inst/doc/shrinkageplot.R0000644000176200001440000000361212102666411015537 0ustar liggesusersif(FALSE) { library(gbm) N <- 10000 X1 <- runif(N) X2 <- 2*runif(N) X3 <- ordered(sample(letters[1:4],N,replace=TRUE),levels=letters[4:1]) X4 <- factor(sample(letters[1:6],N,replace=TRUE)) X5 <- factor(sample(letters[1:3],N,replace=TRUE)) X6 <- 3*runif(N) mu <- c(-1,0,1,2)[as.numeric(X3)] SNR <- 10 # signal-to-noise ratio Y <- X1**1.5 + 2 * (X2**.5) + mu sigma <- sqrt(var(Y)/SNR) Y <- Y + rnorm(N,0,sigma) # introduce some missing values X1[sample(1:N,size=500)] <- NA X4[sample(1:N,size=300)] <- NA data <- data.frame(Y=Y,X1=X1,X2=X2,X3=X3,X4=X4,X5=X5,X6=X6) # fit initial model shrink <- c(0.1,0.05,0.01,0.005,0.001) err <- vector("list",length(shrink)) for(i in 1:length(shrink)) { gbm1 <- gbm(Y~X1+X2+X3+X4+X5+X6, data=data, distribution="gaussian", n.trees=10000, shrinkage=shrink[i], interaction.depth=3, bag.fraction = 0.5, train.fraction = 0.2, n.minobsinnode = 10, keep.data=FALSE, verbose=TRUE) err[[i]] <- gbm1$valid.error } ylim <- range(unlist(lapply(err,range))) mins <- min(sapply(err,min)) ylim <- c(0.19,0.21) postscript("shrinkage-v-iterations.eps",horizontal=FALSE,width=9,height=6) plot(0,0,ylim=ylim,xlim=c(0,10000),type="n",xlab="Iterations",ylab="Squared error") for(i in 1:length(shrink)) { x <- which.min(err[[i]]) y <- err[[i]][x] j <- round(seq(1,10000,length=500)) j <- sort(c(j,x)) #k <- which((err[[i]][j] > ylim[1]) & (err[[i]][j] < ylim[2])) #k <- unique(c(1,k)) k <- 1:length(j) lines(j[k],err[[i]][j][k],col=i) rug(x, col=i) text(x,y-0.0005,as.character(shrink[i]),adj=1) } abline(h=min(mins)) dev.off() }gbm/inst/doc/shrinkage-v-iterations.pdf0000644000176200001440000003213612102666411017635 0ustar liggesusers%PDF-1.3 %Çì¢ 6 0 obj <> stream xœÅ|MÏ­;RÝüüŠ=„AÞØåïi¤…YÓWbŒ I@§A}!âï§Ö‡Ÿ½\: ZjµnŸ·Ê~üíry­òþÝ«|ÕWÁÿüï_ÿöÛï¾ý÷¿è¯ÿýOß~•B÷ÛüÿÞ¢¼¾ãüø7þø?ßþòõßÖ×xýË·úúóüïïó¿¿û¶fOÝjíkg£ÖùÕ,~ÿöë•,iÎêÔ:[Ï?o²ÅwzŒ2¾âI·øNom¯ù¤[|§÷èãë<éßé?4÷iý;ýw¯Êq»ÿüõo_ÿã§¹k¼~úÛoÏš%|ÕÚ_}}õùúé·ßþ¤üéOÿm­¯‘~ú›o¥P¹SÙ­ë¿ ›ºfݾºöµžŒµXû?zýê[ÎÓWíÞÕJmÖSΞþ˜œú+½SëhÈ~“-¾Ó#ZÍnºÅôŸÅ[|§»97ý£u¿oœ1ÌîÿV1”½~­œâ“½5Ò_õpÆ×Zkxp ¿ òäÝ­}¨1sý×ã«•ÒW)Ù½ƒÅøóo²ýÿ¹Mô^U‘Ýl3^9NSËêýóo~þ«þ»ü‡BÃ~q€j|™ŸÌ¯úè׿û¿õóoþæõ›ŸþÇŸŸ¡ÍŸÊi‰ñûÛŠéSøwÁnÙçz¸JÖ©¯ºFäŽüžÂùZ¯ÚvÍ öýÛ.-Ç®¶5YÕ.\pý ë®+—EY ³ÆÈs|õT´’½gŽÕ³ûÕ"ܬÙÎT´¹¸)Ï©ÙÎTô¬9`.²}­/kÊYàèÚƒµä¨/ìÉÊ~×3?KƒºœÞ°ÚÛ©-ª%þ¼zéÅ¥q¢{ÛòÄhôZ6¥…k»šÒκz Ù̃6çÂ)Nß;+ñBúþŸÞ_X¾?®K-·»Î?%”þK©;4¿þì“ÿð~Ÿíkæ6È‘ÏlÛžýúow›>û‹?Ë&c`Ÿ§Ý3-A}ï§™h¾÷S›ZÞO1¼{péըڸϋúàn1|R?·XË0>·X›“ƒx7YísIám–ŠñÃ6KÅüØdy-–»^+èn±:sšÞ[,»µè!ÜME/ÚÜdP¤ÍéÏ&KÅö1§MÅñò&Ë“d~l±<+Ü o±´…­ð‹ZÇ•sg¼Ì¹Å"«÷‹,Í‹ž,÷³v¸7X´±,sƒEÛ?l°è1-çK©¿7-Òyo°”›7óÁÔFù³²lDÚ'殕5¯zSƒ5¯ÚeHr¢Qóê.½ö¬yݲkgÝé²ÈPÔÁºwª9&u²îÓm 0%žY¿úÇ9³ÒOŠ´‘¹òª£CKÎô¿sjáºy‰ÉNžœ¤—ñÐÉ!_܇‡RžTvz&-¦èíÁmC h8^!›) o. Œ [[¯®;lšd kšA§ÒDב–¸½Mt.ÛLtn·ð÷üƒ6à1ÐÍ­×@§5³i‰NkØ>MtZËæã€&:mï²Ñ¥‰ÎªÍ(4]¥#ņÍ&tÊãšh4Àyi¢³öíd¢3ýšpšèL?®‹&:åi™&šuÅÛDW˜èvM´;ø˜è÷×Q'¿žööd²S¾&&;¿wOe²³§ím°sx®§Áæp}˜ì§aO“]q³ßo‹Ãs[³1ª×‹•Á~˲×){EÈ\ç€Ù –µNÙ#+c²gA¶:+×m¦:°¼ítJF:‡+ÔÙè¯>™è‹,t€î!6Ð)û(“}Îñ ‚yÎî{­Ê:§ì£MÆ9åõõa›s@ªz*Óœ¢k”-sˆ}x攽Ód—S¶»³œ­Ë“Öé;Ÿ{Ç&y¾<6Ǹ.N¯¨AO…òº¦¸Böê½°u¬”òaë¶WÉ›­Û^ƒ·¤ gJé‹+>=FË›+>JK3%~;KÐ T·eòª˜Zõí¬Ø6nË!Èô+·ì†›-Æ ÃMit^„§çxæ¦(vœÙ”™Ã¼¨pOgv±Ck(Å+‘®sœ÷Ñý™Í]eâ\Ì!æ\:ksü=Q+‡š¾våL¬¬p„—<ܾN×ÜÙ…޶\‚ÎÙœöaÍ¥ ‡|uzà¬r²…+Çš.›£•Ã=¡Øþ`î²qm½Üøý¦/—ÓbûsŽ«AÕÝ9®á)/—Öá1kTic@²¾gãÐmvn˜œ‘9½Ã÷®¼Mec•sc”ÿpÏ;ܧp¯âïO¼ ‘em«ênUOã:_ÙÉë|Ýó ¾Óª¢Ò×ù¶¥gr¯¡kU= Ë,§`hjÎÆÍ$e¯Ås:[7±¢þ­ŸF¼²mKu¥]"H¸¶Æ9eÕ½uÙÉCUuû‚¥ç×»¨åyÍá:ßµ¸ôÉ•¾kw:N^§¼q?HùXNƒ? kߤ|PÛn¬;O ôsw]Q¢êŒÁT³íiÑÏ=d‡# 1¤¡²+.½€Ìºs˜¨{kÉÌ7Úvª¿ž›m;õlª¸¦nÊXUÅ-•#Q VåäjWçççéš•À]»áš©U©>eWr9µ:+R>„d »¤oE .'Vvaѳ¾eÇâ¡“3-‰Æ+gÕöu î ̲¾žÙ–QëZÜÃw¾”¹1põQnº69áÍun‹aÏ6`˜Q[×ý#6Ýœ~[ÐÜcÚÆm¯œhaÖ±YPÈq¢ŒÜ[þ[^Éuéb yN¶îØÂí5غ},sˆ2]7¹ØÌp­˜~ ¥\ UÎÜÀ ξ$§sÎÜ\òÂsá"µ³e0º(iÊo À(kݼ“`×\žÓCT?—‚¤tÜcL‰P×Ò*XƒM\óVº;[¹›¶nƒ±E3 }«D“¹@Žœ*š8{Sѵ•n©¸ˆWïþ¦ëU*Æ-”g]~r®bààç'®e«­¶Êyšp·í°ÍÍ«·ÛnóæØÜo8õÕ µD5žyЊÛÍJ‹N,2헞Ɠ­XœÌ-ÊX†®[èü¾“„û:¿÷-ƒ®€7 VàÆ…vž§ §qÀTç³I)‚îxxÄ–fêðùÒpÍŠk±‚[à¤Ëâ1‰Ë ;N I•¶Žôu§¹ÎJ¯Ílm6Vš{XýhÜQãëÔ~åÂŽ›ƒû7·§¸@E*ªç¼ÿ.¢·y©Y=ýß\èõ¦ÓN…O›4ühE.ô§„ÎE•+ݦ¼ua£ .d³†‡hæñéš}ݘ̷âÅS±±fsƼ—|Xbî‰p9°Æ¨¿5•7°œ‘»åFð –»àöj—yj|Œ5\Ž64­ÞrxÃhð=i.ÆÀIYF¬™Ã—D½ð”娷±ae<ÓI™xˆ÷ e¸¨!M’—­šm³âÊý³-Ô\S4T°«(ë h“è"v%6©]~¨áf/î$•L²¹6É‚6 ,Ê›¡EË¥ ] ò”ÇVÈT]nFÀóµ8ïØ ì”|ز1-·Í¶Zj«/¶myLu/jaZµ-2°)«t êoiãË}ÛuºÃM®’‰¹çq¢tpn<‚ŽÓôD]†¯ ¬y†pz#ÞÊ“åÁÓP=C›]LYdUÛì·½ê#}ׯ°§0X¡tŽÄÞSåéJÓhÄs°<0lMé¬?ï.d=Ô:Ü\äc¹ Îí­ÚÀ®UÉúšîoZÈíÐûmw”ŸÎokwµò¢"™­r‹Ö-nÑž-0 a2)¹e權¥^8hiYtÕì…K¯õÐ^é ÍdA¯°-XœÎ?È0õ~ÑˬHÝNd˜znË"ÃÔ‡àƒàv˜¢|T;wy Ô½›uÀ¶¨{iÞ:¯)`ˆ¥ö´HuÙµ©nSö]4Wë[Ðj¯dç°4ŽDd·F Oo­á@ƒÝÜfuÀnš¹å}xŠô(\æ„Ó¬8"¬lŽ{0h§PS‘Øè¨sڭ鸠䀌ë°w@¶¨e훃 _ÃÁÕïJ£µî'‹ÜÒ,ã¶&w–sÅ­h…(·‡ÜÊÑ<¤b]oçäÑ–ôpÓºÝJÞâ<˜ÓœW|Û¨™·”În6ø‡]šAÿ$íš­ŠÙ©yªZôƒ.‚¶y2¤æÜÚ/^H5tq†Á£<ÖðâÕ@ͪß9£ˆu×>1©Á$½“}HÃ(¶µwÆ|¥,0¬ÖØo:úù8}Á¥á\’‘†Ó{¯íIÓé½™K´¢€ž ò‘è€u;XFÁ0·— Y O6Íh`äõ}‡çIYé$V?dnºvÙú £Ñ€èAZÜ pÿ$or½ð%r½{ê$ë³íÝË}ÇícJ’Iöî-)Pö‘ý쓤TÛw'Ö×YºË¢»›²Àù>éí¦Ù]þž8|®ö{òØiˆYRi›gêq´CÇ $[všû L7[v9õ´=(«+2 ÑE]¶Æ}1’#²g“}>eìmÈayÒ‡;¾uEP¤Ñvß0Ýè›vÐ[Ôv¼"Þ¢¶c ·éÊ¦ì‘ØôdÓ¨+Ú ‹Ëµ¯¯éÅ’VjgšWߦ›_þ@ßtaŸ ¤¾éÁ¦ìsd¥såúnOá²£¶y7eºì"›Õ·SëÛÞ:ðÛþAGw¸U9´A5'𥠿AX?Ÿ ´¦ãR¥}æ"¥}‘“®‘ç'ÎÁ ÖArÉÀ¤Ñ~ùÀ`V•°¤¨lN›!E¨a¨hÍE+®gžŽ/>èåI(µãW¥ zÚë TpÇ«EЬtør5 ã(z¶£‚J×¾9¸*ú› ô9»RL9¢Û6Ãvþ×÷i³“ODò§Èˆå_Lÿ£0ïX«e¿rŸ•-V~_¸˜¾B+=¡‡u7WœÇòqHN/Úþ=mîÀC¶8ž®¸Åe·Ä§|ËR8Ïî—{&WsšgWãFŠ+8‚ÜpD;+ü Íÿ`†IDèkqçõÏ`žºŸà qÃËNüå†W¹¡Eâ†g3çIngŠ$qÃãácÅ ÷éÐ"sÃ=²¸á<ÍçŠn¸3>*/3ÃŒ§¹áˆ[úe‡•*n¸š‹¹ÜpvŲ¸aL•Ø]qÃÀÀÅÖŠ® ¯ÖiIÜp&¿ÞÌp-Í<³˜á‡q73\¶¾/ /™ý4/\Öm‰xaDÐC¾Ä0¬Ç”BÌ0Œ²–©áGE¢¸a¤« UpûBl1̾sˆ.Fà^H!¾Še…ÊoòÉ£Ú"…(c(Î'g .>Ic6T)Ä#²A SÇèë0W\4·ó&q•Oö¸8¸Xìñ[{\|4_þø-ϦX IÜVf˜s7cH³½É©•m.tHÙÄp3и3ÌüV²j8wDÜ׉yñÕQ½¢WWl„’–¡U÷ÐÆX…qÓ £¥ì•´ˆ¢UY*­`µZÊn‹9ãÜ)úÚœqõµî²ÆØíõf«¯°—5®7”Ϭqí ²ÆUfæŒët<Œ9ãœ×½éö׺<Ðð;6ަ€ã1¨p°Â Â<3\°äÕ7ERÇ“ŠùæŽÇTôrVä˜-æY“—ÉãNÑÔ>Øc‰\s ÇcS!îZìqÜ3{QÌJÓü¯øãðµûòÇiÜ_oöðªø`±ÇaøKìqørÙã7Uì1 ¼Øa±Ç†ÎëÍR[ÄÇlf›ÅÇô÷æùàâõæq@ˆ¾˜‹?Ç÷_þ8pùãØíþ8ö-]üqøD½üqœpéâqà|òÇáè”Ë·âq7,ÔøòÇ­˜)7}ÜŠ¹³Ç-Ì­˜¬0Õ^M&¦G`N—8Y*ºkš¸â;óÆS!†Í¦‰Ó,•š¸âQ‡«$MÌCYEŠ&®ˆA¾¼ñ¡_2ú%«Z1Ë41ƒñ?ibžÝ.T‘Žý¡\‡Bû¾”ùP´cßý*î˜÷¡[‚âóêòAw)Ü E<އBžÄÎê¸ðÙá Åü ‡\²Ëo§ë&Ñ„.ñ³:î-‡¼pwì’CÚ¹Ì1!4*Dí‹f¨Ëà¶®€Ä4À¢…ëÔm^8]£=&†©PµË/ìžÈPÃåH£®,Uéw\â¬#¯¦©'RÄŒ¤Z7>Ñ; ¦ºí¨h*3m¤Š57Pd1k(þ¥gÄX¹vЬýÒZ¤Œq\Ìbx‘¤ñfìÕ­ûdjîˆã&ÔÜñƒ®Çõj†qãÒɤ阋N&}LßHt1éãLOú¾’‰ZÒÇyÛh‰@NÙd¶d}‰$&\Auˆ`nH"— W\`”—rúV&ÊE §‰·1œò­k„‡ù …\§Ã‚M"§§eG$rÊæD"Óó"’.™²ÒI!§_avE rÊBcL 3ÀŒH½øãt;ŠI뉯Ö\Ùc†Ÿ)•ä1ÞÔ¼æ8%…˜8NÙ¬’xcúyëõÐÆu9ÄÙ¬1ƒÙXºHãŠ'gì›8㔿'†V×ìÎO -eì"¿ò0UêÖånm·Ž§/ªt=2#ª8}J͸™b³qD§—yÓåÜ®Ëïˆ&Î:Y—è,¿Í×C§l~E qÝÆéMÓ'åÈ€® Œ”*×v·riWôu·v©[bjT˜©%¨Vw/7QµTÜàÑÃéÆÞXÐÃ)ž[¡µôD~¤‡ëžO ×Rq‰pÑÃéý>EòâP ŠS=\÷›pUtä¾D¤‡+¶¯EÅGîs‹=œ.n\¾X’§<9"iÚä0P°FRL®y+Ö’è"7Wý©O‘‡Ëp¬š=ˆ¯¥â’éƒøZ=ÏñHRÈ_±‚‹ÿ4Yd„¸/LB¸¶•‚Œp*ÎUàuà¹g†9኷‚¤p~è‡X{³Î‡¥gWƪ˜šáþƒ†]Gh€‚ ÀojæÍÓºJÞ7cv èU—§Ž» ¨WíW\­q­ž¼:Øö“/ÔÜ 0ƃís›C†qZ{à'5w/ñó ©ºÀ# Ñab{˜niô®Œ»ˆYßá€Fó˜âS‡Î%rÈq6˜CNÙûZr_:Ì!GY—#&‡Ì»8_rÈ£,N˜2ï:úžräšCλNýäùZQ39ä¸O%Ì!§l–Wrˆ²|8äÀ…^,.9ä¼|˜#‡ì›ÒÃ!G½qâ£úŒ6‡œò0GìÊfkAé3Ûrà‚.Yñ“uˆ1&ƒu„k§¡Žûƒ rÔËb‰ANÙ ‰ä¨“6ƒ8P”ŸˆbÊ7?dľþ8¥ucôëT³Óä)ë[òÇq/åæóNgVJü±ïtƒŒ;8 MLJL˜£b @!ó·ŠÅ£8ÒK¶ŠDÎí2b‘Sá`ÓÈ»¸ä‘ÇÚ' r]é žš¿åÕ4¯ŽýQp­ÇìˆGæsWq›â‘©É!9Êšã%œ7Î~Y`òÈ©°k9âM,“GÞ¼­p$«+%œŠKƈG\­MÔ’GNÅm–xdþ€‰[EJ¶úð¿ uaäë›FF¡÷Ù(YdO-¼§â2½€{½¸ü[kW±õ>øÆ’@ÆáMþ¸Aqg%×ÄsÙdrÇŸ ’Ç9î»Lö˜oÕNÑÇû¼s?Λ¶¯À&ƒ/u¤ ƒÌ»øU0†¿z"9p×b‰¼µS‹œ “W¦‘ùÚ Ùø¼Àß%<g•š20tƒÎeI³¡+rôêøAòÉ83zÜ%6ªžš}[³6¾‘WfÉ7T»û½]æPÜ2ñ€‡YŽÞÍÈŠYŽÞå)“[NirËÑõdn9e[+qËÑõdn9º‰ sË·Gß“]Žîç{f—£ûñ”Ùe†-3]ìrtÃÂd—ã¾'2»ýlç&»L™ölá{šÍ6Ç(âŽÉ5£ ¿+®9ðI¥-—康‡«^±Í);ÖGl3!1Ä œdˆÞëa›cDw~…MŸZ曉ˆ(A“€CÄ/+fr8vÍ|s ÿ…ùæ”i$¾9Í¢¾&Û¸¾‹Û&Û  D_ƒl^å_×Ì7û QÍüQŽ„˜æçGFL4Ç}4ežù É6ÍÌkÖ.–™!Ù*ŸœAÌRœNf*î³*RÌ1‹|3Ì1‹î~&˜S¯‡]æ·çõËŠî™[NI£jjÁÝN%³œ²üË1›v‹ye†¯×C+çYCºWœr`“˜†&|— _\Ì)Ǽ1Óæ”‰Ñ"ýA8es­"§¾øS$Ÿü‹é>á«'æK åÿÏSîª^Æàí‡Q6–×[+l(¯G¿2˜^/". ¯×ûz8=§x=0^/f4Š—â•Ç€ë׃á¥?|ßqÂï¼ü޾¾ÚB,†· ÊBïÚ4sdð®=o„ݵaPN?0Ñ Èéç%z7¶¾üóç–Í7 ø¸òO„E–î7KA¦ ×›b*o:Z]j_t4Pßï{ŽV¶ÇLï9Œ÷|¿ï9Ò¯1Ðä÷ç¾Uñ{ŽãM¥÷ÛFý¾çØؼï9¶C,ï{ŽÅŸ6¹¯9.ps_s,ÿØóšãôûºCï9æ¯9̦ø5Ç8ç ÆÑˆ²e~φW©zÏ1ÊØôž£ÛŒÞ÷ý¾ 䋎î°Úû¢£¿Áñ›Žæ—ð÷M}¬×ÇÑÕkÂ×ô¦›2¤Ð›Žìœhãû¦ƒÏ¸? ¹«Ö¥Ð›°>C ¼é¨hƒD½é )…Þt€)u½é€¡kÐ\ £Zá7ñ|ßtàf# Ëo:Ð!‘ûMG)OÂç H!€hƒs¡;OK ÑÙFåˆÒk)Ÿ0éªüIž}ÇÓ@N^cmBê°ÙU©¡:¾$•BXÐ6õÄ`ÝBðÞ'^‡ýæ2Ø!ÌQhÄ€€Ëd3¬$f·`% ´Ã暑v3W†ºnÐÀqµB¨ôtPA;‚ç7¨ÁóvǼõað|¸!@î¦!w  »F >n9+ ø?š-nx &Àšcºáö¹é_r«Ý4õÀˆJcž¸:ÞMTÃÓèv3Õ@Íü•©jl[õÂÐn¯û¥ôÅðz7øl¯_¨Û^6öõ†ïðC?Jz‡ø[‚ïz¹7„ݵchÄÐ~oàܵŖ¶kv/j‡+”êâSŽÚ†âŒü”£6Ÿf~ÌQ[3ì­Ç)ßômk»¿žsÔV˜¥UƒLzÎQqeOôœ£â!Ã~}¿Ï9jžsWyä=]J>‘Ð1pÉé„èø[;.’]ÅMÂí$HÇ8åJW˵V†éøû<ªE8çÇŸ€«Æ‰ì—"âªËöX§KE¹9†ç)R\5bë--œŽ?ÄãÄéô«@RœÐiø…Ó1ܬZÁP¦] ãtü­ +ˆÓéç|¤ NWk»PâtTh„Ó1ÊÍµè· Þ1~ꂚúhÄ C£X7ÏG€Ñ:$x÷' àºr•§r€×-jîŠif©ì§abGïºì*.¢Jç½E±¯®ƒüz°:þúoî‚êjÅ¥mýT“/?ê ¿”®âÁ#KH§ÐÅ׃Ñ1ØQ©¡(Ä.ÞÒ‰êè˜Îº„ÏQÖû©ÄûbBèÜÇ÷Kñ‡Ý(‚°¹”UÛa[ª_s6ãF‚åjñåǯ;˜Î²ôºƒ2KÓ뎊È/¥wíÿŽŒ_wPVþ©õß<6zßA™}Óûȯï÷uGJóóue¿æ çYükbÆà( E‹-‰s* Ž¿¦Tbpµ8ÔØÜG~y¾ïüÄà ¿Ž©ª›\½?Kfî- cmBÉFUü{±Däúzð¸wÙÂãв׃ƽGEhÜ“:ö­WXÝ\wLTÑ8¦ }Û ¾£ 4®Þ_©5÷Î/4Žå «·v’и;ƒÂâÞµ‹£$dN5f_e‰Ÿ¾ßŠœ~ÖŠ8¬µ×ƒÄqå)÷Ñ*¿oþ€ÄݼÂá)´Æo›„ÂÝZ…Á½Û$ Žý?¯ƒ{÷AÜGikºÿÆÜèU=uŸþn³8§ã—’Ã5/chñÔ¬¯›s÷׃À¹Æß>R§¸^Týpë¼w.¸~ÞH®•ñ€p´¹Þüáxø­A¸8osñ‡á Oy¿> /Contents 6 0 R >> endobj 3 0 obj << /Type /Pages /Kids [ 5 0 R ] /Count 1 >> endobj 1 0 obj <> endobj 4 0 obj <> endobj 11 0 obj <> endobj 12 0 obj <> endobj 9 0 obj <> endobj 8 0 obj <>stream xœUU{\מaav“ÝÙ`ª¼ä%(AE`,òX]>peW@—G`AE´&$&pA£ÕH­ ĶŠ]"‚ŠhXQ]QD­VÒž!—þÚYLÓ_ÿ¹¿ùÝ;ç»ß9ßwÎ% K ‚$IÛ°Oâ>‰ŒwÔhó5ºô•ys&ïDòS-øi"Š~)ÿ%Æj!?ðĺj*h­¶ð‡IPð!"ÉeûeeoÊIOMÓI]–&,suw÷øßŽO`` tͦÿžHÃ4¹é©™ÒÂG¾F›•¡ÉÔI kµé)ÒTí¦ì´\©J­Ö¨ÍaJ•V³^‘®MÏÎÎÊ—º,r•úz{ûÌßÅékòr¥ Uf®T.MФæiU9ÿ·I„}f–ZóiN®.]‘¥òóŸè)õöñ%ˆX"Ž'"ˆBA,!–Ñ„¡$܈P‚%&ö„#1…˜D„-aG0„ƒP,Â’#Љ!RF"û-¢-Š,ªE^¢u¢m–Ž–1–µVŽV唘r œ¨ljPg÷ J>¾;­Âõ„ôÃŽ>{æÔ-¾’eþ=ý0tHö§ÌïP½ãùÓu]7Nd.)“Ø@®›wë"o÷‰`·@Ó> À†ÉnI)å‰ŒÎŠŠ²oj$·ÄÛ¾ÞZ²Ñ©Ÿí­çˆm GHrCÄT¥B¬›0VÙGfXÎuî§lF½tù×;dŠ‚—&Ù¹ºCü>¶¦»öáyB*~.ØÁ÷œâyQË­˜ÇOs›Ö&LAêüm^憕ÛâÐ<´ôPZSöñßÿµô¬UidyrmJ³¬?DȈnWœ;¡o¬ëDhPqÝå(þXïÀôÌ®^_Ó6ån§þ%ÐJÒqu'šÙÚ¡>Ø)輑ϟÌWàf1sVª÷‹Rìðâ€1±_§âï=ú£7õfc¨fª « Ñ3([ÏúWÆ3Ê x+¼›÷î1§|ð¸^ðMñç_¡íN™›÷áàªxHÖŒÙùòqc ˆü B×ÚÎÆ¢¨êÔ;«$Ì›Ô)qÁSðä—> éËA`ï§\ l”`¾ïOŽˆJJ^¸0éÌ.ý™»n¶dÞ˜Úsc>þ±—úúÚÛL¿^Ÿ`€kÂX4§Ûsؘþ)d@·Nuœ½YaDÀ"˜°é'U‡úRL]°Ð Nn31‡=áýúêÖï¹¼Pá>Å Õ[ºhxŸ_Ç>¼&óò[ã?Gyex°½óg6ç‰FD a§O¡lÆŸ¡¡ óýõƒ"¨ÄŒB±™k“VÆè<¶§qQ/fÀÜ S| Hòž®øQ²¶9®&ÑÁ–ÃçÜq^¹r–«çŠ— å¹áanÜV£Þ‚§|OíãËØ±2Óhb¼X‹É …xj-7¿'ÈÈ7õ¼½ ò#;öµçòñzã¯.:üíâÝB¿^ÉmU5¨–ŒD>(r½:6G³-¹xm¢vœßU»¯ªêô÷GZÝÛž0_±n¹<•óZ†]殎,ÂÞŽ¼–²ÑUñ•æ‰#¯¢ ïöM0|3qâ£=­ â?i˜Ô endstream endobj 13 0 obj 2012 endobj 10 0 obj <> endobj 2 0 obj <>endobj xref 0 14 0000000000 65535 f 0000009358 00000 n 0000012997 00000 n 0000009299 00000 n 0000009406 00000 n 0000009139 00000 n 0000000015 00000 n 0000009119 00000 n 0000009844 00000 n 0000009537 00000 n 0000011962 00000 n 0000009475 00000 n 0000009505 00000 n 0000011941 00000 n trailer << /Size 14 /Root 1 0 R /Info 2 0 R >> startxref 13047 %%EOF gbm/inst/doc/shrinkage-v-iterations.eps0000644000176200001440000005332212102666411017653 0ustar liggesusers%!PS-Adobe-3.0 %%DocumentNeededResources: font Helvetica %%+ font Helvetica-Bold %%+ font Helvetica-Oblique %%+ font Helvetica-BoldOblique %%+ font Symbol %%DocumentMedia: a4 595 841 0 () () %%Title: R Graphics Output %%Creator: R Software %%Pages: (atend) %%Orientation: Portrait %%BoundingBox: 18 205 577 637 %%EndComments %%BeginProlog /bp { gs gs } def % begin .ps.prolog /gs { gsave } def /gr { grestore } def /ep { showpage gr gr } def /m { moveto } def /l { rlineto } def /np { newpath } def /cp { closepath } def /f { fill } def /o { stroke } def /c { newpath 0 360 arc } def /r { 4 2 roll moveto 1 copy 3 -1 roll exch 0 exch rlineto 0 rlineto -1 mul 0 exch rlineto closepath } def /p1 { stroke } def /p2 { gsave bg setrgbcolor fill grestore newpath } def /p3 { gsave bg setrgbcolor fill grestore stroke } def /t { 6 -2 roll moveto gsave rotate ps mul neg 0 2 1 roll rmoveto 1 index stringwidth pop mul neg 0 rmoveto show grestore } def /cl { grestore gsave newpath 3 index 3 index moveto 1 index 4 -1 roll lineto exch 1 index lineto lineto closepath clip newpath } def /rgb { setrgbcolor } def /s { scalefont setfont } def % end .ps.prolog %%IncludeResource: font Helvetica /Helvetica findfont dup length dict begin {1 index /FID ne {def} {pop pop} ifelse} forall /Encoding ISOLatin1Encoding def currentdict end /Font1 exch definefont pop %%IncludeResource: font Helvetica-Bold /Helvetica-Bold findfont dup length dict begin {1 index /FID ne {def} {pop pop} ifelse} forall /Encoding ISOLatin1Encoding def currentdict end /Font2 exch definefont pop %%IncludeResource: font Helvetica-Oblique /Helvetica-Oblique findfont dup length dict begin {1 index /FID ne {def} {pop pop} ifelse} forall /Encoding ISOLatin1Encoding def currentdict end /Font3 exch definefont pop %%IncludeResource: font Helvetica-BoldOblique /Helvetica-BoldOblique findfont dup length dict begin {1 index /FID ne {def} {pop pop} ifelse} forall /Encoding ISOLatin1Encoding def currentdict end /Font4 exch definefont pop %%IncludeResource: font Symbol /Symbol findfont dup length dict begin {1 index /FID ne {def} {pop pop} ifelse} forall currentdict end /Font5 exch definefont pop %%EndProlog %%Page: 1 1 bp 18.00 204.94 577.28 636.94 cl 0 0 0 rgb 0.75 setlinewidth [] 0 setdash 1 setlinecap 1 setlinejoin 10.00 setmiterlimit np 94.45 278.38 m 435.18 0 l o np 94.45 278.38 m 0 -7.20 l o np 181.48 278.38 m 0 -7.20 l o np 268.52 278.38 m 0 -7.20 l o np 355.56 278.38 m 0 -7.20 l o np 442.59 278.38 m 0 -7.20 l o np 529.63 278.38 m 0 -7.20 l o /ps 12 def /Font1 findfont 12 s 94.45 252.46 (0) .5 0 0 t 181.48 252.46 (2000) .5 0 0 t 268.52 252.46 (4000) .5 0 0 t 355.56 252.46 (6000) .5 0 0 t 442.59 252.46 (8000) .5 0 0 t 529.63 252.46 (10000) .5 0 0 t np 77.04 289.48 m 0 277.33 l o np 77.04 289.48 m -7.20 0 l o np 77.04 358.81 m -7.20 0 l o np 77.04 428.14 m -7.20 0 l o np 77.04 497.48 m -7.20 0 l o np 77.04 566.81 m -7.20 0 l o 59.76 289.48 (0.190) .5 0 90 t 59.76 358.81 (0.195) .5 0 90 t 59.76 428.14 (0.200) .5 0 90 t 59.76 497.48 (0.205) .5 0 90 t 59.76 566.81 (0.210) .5 0 90 t np 77.04 278.38 m 470.00 0 l 0 299.52 l -470.00 0 l 0 -299.52 l o 18.00 204.94 577.28 636.94 cl /ps 12 def /Font1 findfont 12 s 0 0 0 rgb 312.04 223.66 (Iterations) .5 0 0 t 30.96 428.14 (Squared error) .5 0 90 t 77.04 278.38 547.04 577.90 cl 0 0 0 rgb 0.75 setlinewidth [] 0 setdash 1 setlinecap 1 setlinejoin 10.00 setmiterlimit np 96.30 636.94 m 0.80 -256.68 l 0.87 -37.13 l 0.35 -0.45 l 0.52 11.92 l 0.87 2.33 l 0.87 -2.31 l 0.87 9.87 l 0.87 15.91 l 0.87 2.35 l 0.87 14.48 l 0.88 3.97 l 0.87 10.90 l 0.91 13.99 l 0.87 9.66 l 0.87 14.32 l 0.87 12.06 l 0.87 0.63 l 0.87 7.84 l 0.87 0.86 l 0.87 16.91 l 0.87 13.95 l 0.87 6.15 l 0.87 19.28 l 0.87 13.99 l 0.87 7.21 l 0.87 3.98 l 0.87 6.88 l 0.87 6.38 l 0.88 14.52 l 0.87 15.71 l 0.87 12.61 l 0.87 1.80 l 0.87 4.97 l 0.87 5.38 l 0.87 8.96 l 0.87 5.94 l 0.08 0.86 l o 18.00 204.94 577.28 636.94 cl 0 0 0 rgb 0.38 setlinewidth [] 0 setdash 1 setlinecap 1 setlinejoin 10.00 setmiterlimit np 98.32 278.38 m 0 0 l o np 98.32 278.38 m 0 8.99 l o 77.04 278.38 547.04 577.90 cl /ps 12 def /Font1 findfont 12 s 0 0 0 rgb 98.32 331.64 (0.1) 1 0 0 t 1 0 0 rgb 0.75 setlinewidth [] 0 setdash 1 setlinecap 1 setlinejoin 10.00 setmiterlimit np 98.18 636.94 m 0.66 -170.73 l 0.87 -91.55 l 0.87 -32.71 l 0.87 -11.27 l 0.83 -4.42 l 0.04 1.99 l 0.87 2.36 l 0.87 5.74 l 0.88 5.32 l 0.87 10.10 l 0.91 -0.18 l 0.87 0.01 l 0.87 7.30 l 0.87 5.28 l 0.87 3.18 l 0.87 8.16 l 0.87 3.52 l 0.87 3.60 l 0.87 6.17 l 0.87 5.93 l 0.87 5.86 l 0.87 9.58 l 0.87 4.88 l 0.87 1.50 l 0.87 5.88 l 0.87 1.56 l 0.88 1.58 l 0.87 6.89 l 0.87 8.30 l 0.87 3.06 l 0.87 3.80 l 0.87 7.77 l 0.87 6.79 l 0.87 0.12 l 0.87 5.03 l 0.87 3.56 l 0.91 5.44 l 0.87 0.11 l 0.87 2.74 l 0.87 3.12 l 0.87 2.71 l 0.87 4.84 l 0.87 9.32 l 0.87 2.49 l 0.88 2.34 l 0.87 8.43 l 0.87 3.91 l 0.87 3.49 l 0.87 8.17 l 0.87 6.87 l 0.87 2.14 l 0.87 -1.13 l 0.87 1.81 l 0.87 5.45 l 0.87 3.55 l 0.87 4.15 l 0.87 -0.88 l 0.87 3.64 l 0.87 6.16 l 0.87 6.55 l 0.87 3.76 l 0.87 -1.51 l 0.92 3.77 l 0.87 4.83 l 0.87 4.47 l 0.87 -0.78 l 0.87 3.07 l 0.87 2.23 l 0.87 3.07 l 0.87 4.25 l 0.87 0.91 l 0.87 2.07 l 0.87 3.87 l 0.87 -0.27 l 0.87 4.13 l 0.87 2.34 l 0.87 6.20 l 0.87 -0.02 l 0.87 2.70 l 0.87 4.35 l 0.87 1.13 l 0.87 1.01 l 0.87 6.80 l 0.87 1.41 l 0.87 4.57 l 0.34 2.26 l o 18.00 204.94 577.28 636.94 cl 1 0 0 rgb 0.38 setlinewidth [] 0 setdash 1 setlinecap 1 setlinejoin 10.00 setmiterlimit np 102.28 278.38 m 0 0 l o np 102.28 278.38 m 0 8.99 l o 77.04 278.38 547.04 577.90 cl /ps 12 def /Font1 findfont 12 s 0 0 0 rgb 102.28 315.22 (0.05) 1 0 0 t 0 0.8039 0 rgb 0.75 setlinewidth [] 0 setdash 1 setlinecap 1 setlinejoin 10.00 setmiterlimit np 112.73 636.94 m 0.08 -6.15 l 0.87 -56.32 l 0.87 -45.80 l 0.87 -38.10 l 0.87 -32.80 l 0.87 -27.05 l 0.87 -22.21 l 0.87 -20.65 l 0.87 -16.95 l 0.88 -11.58 l 0.87 -9.86 l 0.87 -9.21 l 0.87 -6.46 l 0.87 -5.19 l 0.87 -3.64 l 0.87 -3.58 l 0.87 -1.01 l 0.87 -2.63 l 0.87 -2.59 l 0.91 -1.32 l 0.87 -1.63 l 0.87 0.29 l 0.87 -0.46 l 0.87 -0.33 l 0.87 0.60 l 0.87 -0.35 l 0.87 -0.30 l 0.88 0.56 l 0.87 0.21 l 0.87 0.44 l 0.87 -1.21 l 0.69 -0.58 l 0.18 0.27 l 0.87 0.81 l 0.87 1.14 l 0.87 0.71 l 0.87 0.66 l 0.87 0.85 l 0.87 1.16 l 0.87 1.98 l 0.87 0.37 l 0.87 0 l 0.87 0.88 l 0.87 0.51 l 0.87 -0.38 l 0.87 0.17 l 0.92 0.88 l 0.87 1.91 l 0.87 0.90 l 0.87 1.37 l 0.87 2.06 l 0.87 1.83 l 0.87 0.91 l 0.87 0.82 l 0.87 0.86 l 0.87 0.51 l 0.87 -0.11 l 0.87 0.64 l 0.87 1.33 l 0.87 1.11 l 0.87 0.91 l 0.87 0.58 l 0.87 0.11 l 0.87 1.19 l 0.87 2.27 l 0.87 1.39 l 0.87 -0.25 l 0.87 0.04 l 0.87 0.37 l 0.87 1.15 l 0.87 0.02 l 0.87 0.51 l 0.92 0.29 l 0.87 1.00 l 0.87 1.59 l 0.87 -0.20 l 0.87 0.27 l 0.87 0.64 l 0.87 -0.20 l 0.87 1.00 l 0.87 0.78 l 0.87 0.81 l 0.87 1.66 l 0.87 0.58 l 0.87 0.25 l 0.87 0.42 l 0.87 1.09 l 0.87 1.17 l 0.87 1.33 l 0.87 -0.17 l 0.87 0.97 l 0.88 0.62 l 0.87 1.02 l 0.87 0.29 l 0.87 0.87 l 0.87 0.76 l 0.87 1.14 l 0.87 1.21 l 0.87 0.65 l 198.28 363.84 lineto 0.87 0.66 l 0.87 0.85 l 0.87 0.97 l 0.87 0.44 l 0.87 1.68 l 0.87 0.14 l 0.87 1.08 l 0.87 1.38 l 0.87 1.74 l 0.88 0.28 l 0.87 1.61 l 0.87 0.37 l 0.87 0.22 l 0.87 0.99 l 0.87 1.60 l 0.87 0.66 l 0.87 1.76 l 0.87 -0.19 l 0.87 0.51 l 0.87 0.94 l 0.87 0.38 l 0.87 -0.91 l 0.87 0.12 l 0.87 1.15 l 0.87 1.08 l 0.91 0.21 l 0.87 0.27 l 0.88 1.68 l 0.87 0.51 l 0.87 0.39 l 0.87 1.43 l 0.87 0.10 l 0.87 -0.25 l 0.87 0.91 l 0.87 0.85 l 0.87 0.22 l 0.87 0.42 l 0.87 0.45 l 0.87 0.93 l 0.87 1.95 l 0.87 0.77 l 0.87 0.36 l 0.87 1.83 l 0.87 0.75 l 0.87 1.03 l 0.87 1.37 l 0.87 0.72 l 0.87 0.91 l 0.87 0.23 l 0.87 -0.03 l 0.87 1.04 l 0.92 0.62 l 0.87 2.08 l 0.87 0.94 l 0.87 1.17 l 0.87 0.61 l 0.87 0.70 l 0.87 0.75 l 0.87 0.41 l 0.87 1.05 l 0.87 0.98 l 0.87 1.62 l 0.87 1.02 l 0.87 0.90 l 0.87 0.61 l 0.87 1.07 l 0.87 0.31 l 0.87 1.95 l 0.87 0.57 l 0.87 0.43 l 0.87 0.26 l 0.87 1.33 l 0.87 1.35 l 0.88 0.39 l 0.87 0.46 l 0.87 1.00 l 0.87 0.97 l 0.91 1.56 l 0.87 0.76 l 0.87 0.31 l 0.87 0.64 l 0.87 1.48 l 0.87 0.36 l 0.87 1.65 l 0.87 0.04 l 0.87 0.60 l 0.87 0.97 l 0.87 -0.25 l 0.87 1.14 l 0.87 1.21 l 0.87 0.97 l 0.88 0.71 l 0.87 1.29 l 0.87 2.65 l 0.87 0.27 l 0.87 0.83 l 0.87 0.48 l 0.87 1.17 l 0.87 1.93 l 285.45 447.56 lineto 0.87 1.61 l 0.87 1.50 l 0.87 0.99 l 0.87 0.53 l 0.91 1.19 l 0.87 0.24 l 0.87 0.58 l 0.87 1.15 l 0.88 0.63 l 0.87 0.98 l 0.87 0.41 l 0.87 0.41 l 0.87 0.54 l 0.87 0.71 l 0.87 0.36 l 0.87 1.06 l 0.87 1.10 l 0.87 0.91 l 0.87 0.15 l 0.87 1.07 l 0.87 0.36 l 0.87 0.21 l 0.87 -0.30 l 0.87 0.62 l 0.87 1.48 l 0.87 1.05 l 0.87 1.33 l 0.87 0.22 l 0.87 0.56 l 0.87 0.59 l 0.92 1.34 l 0.87 1.07 l 0.87 0.45 l 0.87 0.94 l 0.87 1.14 l 0.87 1.16 l 0.87 0.24 l 0.87 0.68 l 0.87 1.09 l 0.87 0.54 l 0.87 0.18 l 0.87 0.45 l 0.87 0.34 l 0.87 0.82 l 0.87 1.84 l 0.87 0.53 l 0.87 0.76 l 0.87 1.45 l 0.87 0.78 l 0.87 -0.06 l 0.87 -0.05 l 0.87 0.72 l 0.87 0.31 l 0.87 -0.60 l 0.88 0.38 l 0.87 0.46 l 0.91 1.02 l 0.87 0.88 l 0.87 0.81 l 0.87 0.38 l 0.87 0.96 l 0.87 1.18 l 0.87 0.54 l 0.87 0.37 l 0.87 0.55 l 0.87 0.44 l 0.87 0.68 l 0.87 0.28 l 0.87 0.87 l 0.87 0.37 l 0.87 2.00 l 0.87 2.04 l 0.88 0.18 l 0.87 1.12 l 0.87 0.16 l 0.87 0.73 l 0.87 0.50 l 0.87 0.32 l 0.87 0.53 l 0.87 0.79 l 0.87 0.81 l 0.87 0.58 l 0.87 1.01 l 0.91 0.83 l 0.87 0.47 l 0.87 0.19 l 0.87 1.51 l 0.87 0.76 l 0.87 0.82 l 0.87 -0.46 l 0.88 0.31 l 0.87 0.80 l 0.87 1.23 l 0.87 0.96 l 0.87 0.80 l 0.87 1.16 l 0.87 0.92 l 0.87 0.23 l 0.87 0.79 l 372.66 519.24 lineto 0.87 0.41 l 0.87 0.50 l 0.87 0.44 l 0.87 0.47 l 0.87 1.13 l 0.87 -0.09 l 0.87 0.65 l 0.87 1.10 l 0.87 0.35 l 0.92 0.59 l 0.87 0.81 l 0.87 0.66 l 0.87 1.19 l 0.87 0.17 l 0.87 0.55 l 0.87 1.10 l 0.87 0.75 l 0.87 -0.18 l 0.87 0.06 l 0.87 0.26 l 0.87 0.50 l 0.87 0.42 l 0.87 1.57 l 0.87 -0.33 l 0.87 0.58 l 0.87 0 l 0.87 0.12 l 0.87 0.19 l 0.87 1.21 l 0.87 0.97 l 0.87 -0.84 l 0.87 0.74 l 0.87 0.72 l 0.87 0.74 l 0.87 -0.11 l 0.92 0.28 l 0.87 -0.04 l 0.87 0.08 l 0.87 -0.49 l 0.87 -0.63 l 0.87 0.57 l 0.87 1.38 l 0.87 0.20 l 0.87 0.99 l 0.87 -0.07 l 0.87 0.52 l 0.87 0.85 l 0.87 0.78 l 0.87 0.01 l 0.87 1.07 l 0.87 1.58 l 0.87 0.84 l 0.87 0.79 l 0.88 1.16 l 0.87 1.08 l 0.87 0.38 l 0.87 0.24 l 0.87 -0.48 l 0.87 1.08 l 0.87 0.71 l 0.87 0.92 l 0.91 0.40 l 0.87 1.26 l 0.87 0.13 l 0.87 0.65 l 0.87 0.34 l 0.87 -0.78 l 0.87 0.49 l 0.87 0.94 l 0.87 0.88 l 0.87 1.49 l 0.88 1.43 l 0.87 0.77 l 0.87 0.39 l 0.87 0.26 l 0.87 0.77 l 0.87 1.01 l 0.87 1.04 l 0.87 0.13 l 0.87 0.77 l 0.87 0.92 l 0.87 1.01 l 0.87 0.85 l 0.87 0.40 l 0.87 1.08 l 0.87 0.02 l 0.87 0.70 l 0.87 0.79 l 0.91 1.16 l 0.88 1.49 l 0.87 0.46 l 0.87 0.96 l 0.87 0.51 l 0.87 0.42 l 0.87 0.53 l 0.87 0.83 l 0.87 0.11 l 0.87 1.15 l 0.87 0.17 l 459.87 576.51 lineto 0.87 1.18 l 0.87 0.49 l 0.87 0.58 l 0.87 0.42 l 0.87 0.78 l 0.87 0.61 l 0.87 0.78 l 0.87 1.21 l 0.87 0.97 l 0.87 1.38 l 0.87 1.18 l 0.87 0.85 l 0.87 1.89 l 0.87 0.01 l 0.92 0 l 0.87 0.64 l 0.87 1.02 l 0.87 0.35 l 0.87 -1.00 l 0.87 1.41 l 0.87 1.70 l 0.87 -0.01 l 0.87 0.55 l 0.87 0.91 l 0.87 0.34 l 0.87 0.71 l 0.87 0.43 l 0.87 0.82 l 0.87 0.59 l 0.87 -0.05 l 0.87 0.11 l 0.87 1.12 l 0.87 0.70 l 0.87 0.45 l 0.88 0.65 l 0.87 0.81 l 0.87 0 l 0.87 0 l 0.87 0.20 l 0.87 1.37 l 0.91 0.80 l 0.87 0.44 l 0.87 0.61 l 0.87 -0.29 l 0.87 0.67 l 0.87 0.45 l 0.87 0.76 l 0.87 0.44 l 0.87 -0.19 l 0.87 0.81 l 0.87 1.87 l 0.87 0.69 l 0.88 0.20 l 0.87 1.37 l 0.87 0.65 l 0.87 0.85 l 0.87 0.57 l 0.87 -0.65 l 0.87 0.66 l 0.87 0.69 l 0.87 1.65 l 0.87 1.24 l 0.87 -0.53 l 0.87 -0.67 l 0.87 -0.17 l 0.87 -0.31 l 0.91 1.21 l 0.87 0.78 l 0.87 0.44 l 0.87 0.46 l 0.88 0.91 l 0.87 0.45 l 0.87 0.56 l 0.87 -0.07 l 0.87 0.52 l 0.87 1.12 l 0.87 1.18 l 0.87 1.15 l 0.87 1.25 l 0.87 0.33 l o 18.00 204.94 577.28 636.94 cl 0 0.8039 0 rgb 0.38 setlinewidth [] 0 setdash 1 setlinecap 1 setlinejoin 10.00 setmiterlimit np 139.66 278.38 m 0 0 l o np 139.66 278.38 m 0 8.99 l o 77.04 278.38 547.04 577.90 cl /ps 12 def /Font1 findfont 12 s 0 0 0 rgb 139.66 300.04 (0.01) 1 0 0 t 0 0 1 rgb 0.75 setlinewidth [] 0 setdash 1 setlinecap 1 setlinejoin 10.00 setmiterlimit np 130.97 636.94 m 0.16 -5.96 l 0.87 -29.77 l 0.87 -27.35 l 0.87 -23.76 l 0.87 -22.19 l 0.87 -18.93 l 0.88 -19.94 l 0.87 -17.25 l 0.87 -15.60 l 0.87 -14.01 l 0.87 -13.88 l 0.87 -10.34 l 0.87 -9.97 l 0.87 -9.50 l 0.87 -8.65 l 0.87 -6.63 l 0.87 -7.77 l 0.87 -7.32 l 0.87 -6.40 l 0.87 -4.39 l 0.87 -5.45 l 0.87 -4.60 l 0.87 -4.51 l 0.87 -4.87 l 0.92 -4.37 l 0.87 -3.10 l 0.87 -2.55 l 0.87 -2.58 l 0.87 -1.95 l 0.87 -1.30 l 0.87 -1.30 l 0.87 -1.61 l 0.87 -1.12 l 0.87 -1.40 l 0.87 -0.32 l 0.87 -0.93 l 0.87 -0.70 l 0.87 -1.47 l 0.87 -0.31 l 0.87 -0.14 l 0.87 -1.08 l 0.87 -0.21 l 0.87 -0.21 l 0.87 -0.90 l 0.87 -0.70 l 0.48 -0.22 l 0.39 0.32 l 0.87 0.37 l 0.87 0.05 l 0.87 0.46 l 0.87 -0.33 l 0.92 0.09 l 0.87 0.43 l 0.87 0.19 l 0.87 0.30 l 0.87 0.14 l 0.87 0.01 l 0.87 0.44 l 0.87 0.23 l 0.87 0 l 0.87 0.53 l 0.87 0 l 0.87 -0.06 l 0.87 0.59 l 0.87 -0.07 l 0.87 0.46 l 0.87 0.37 l 0.87 0.59 l 0.87 0.67 l 0.87 0.26 l 0.88 0.96 l 0.87 -0.17 l 0.87 0.37 l 0.87 0.63 l 0.87 0.63 l 0.87 -0.22 l 0.87 0.38 l 0.87 0.16 l 0.91 0.58 l 0.87 0.02 l 0.87 0.32 l 0.87 0.30 l 0.87 0.54 l 0.87 -0.02 l 0.87 0.95 l 0.87 0.32 l 0.87 0.27 l 0.87 0.63 l 0.88 0.11 l 0.87 0.08 l 0.87 0.47 l 0.87 0.44 l 0.87 0.43 l 0.87 0.41 l 0.87 0.71 l 0.87 0.43 l 0.87 0.22 l 0.87 0.77 l 0.87 0.21 l 216.56 326.63 lineto 0.87 0.44 l 0.87 0.56 l 0.87 0.34 l 0.87 0.73 l 0.91 0.42 l 0.87 0.53 l 0.88 0.31 l 0.87 0.69 l 0.87 -0.31 l 0.87 -0.15 l 0.87 0.84 l 0.87 0.28 l 0.87 0.03 l 0.87 0.59 l 0.87 0.09 l 0.87 0.42 l 0.87 0.32 l 0.87 0.30 l 0.87 0.26 l 0.87 0.61 l 0.87 -0.07 l 0.87 0.38 l 0.87 0.39 l 0.87 0.18 l 0.87 0.26 l 0.87 0.17 l 0.87 0.45 l 0.87 0.26 l 0.87 1.10 l 0.87 0.59 l 0.92 0.35 l 0.87 0.58 l 0.87 0.82 l 0.87 0.31 l 0.87 0.04 l 0.87 0.88 l 0.87 0.16 l 0.87 0.25 l 0.87 1.26 l 0.87 0.33 l 0.87 0.34 l 0.87 -0.03 l 0.87 -0.05 l 0.87 0.15 l 0.87 0.59 l 0.87 0.50 l 0.87 0.65 l 0.87 0.12 l 0.87 0.73 l 0.87 0.56 l 0.87 0.42 l 0.87 0.61 l 0.88 0.13 l 0.87 0.68 l 0.87 0.75 l 0.87 0.28 l 0.91 -0.07 l 0.87 0.56 l 0.87 0.43 l 0.87 0.49 l 0.87 0.64 l 0.87 0.46 l 0.87 0.58 l 0.87 0.17 l 0.87 0.88 l 0.87 0.30 l 0.87 0.85 l 0.87 0.47 l 0.87 0.09 l 0.87 0.31 l 0.88 0.99 l 0.87 0.49 l 0.87 0.37 l 0.87 -0.04 l 0.87 0.68 l 0.87 0.75 l 0.87 0.49 l 0.87 -0.01 l 0.87 0.47 l 0.87 0.47 l 0.87 -0.10 l 0.87 0.34 l 0.87 0.04 l 0.91 0.58 l 0.87 0.03 l 0.87 0.28 l 0.87 0.85 l 0.88 0.36 l 0.87 0.68 l 0.87 0.37 l 0.87 0.38 l 0.87 0.64 l 0.87 0.22 l 0.87 0.31 l 0.87 0.82 l 0.87 0.35 l 0.87 0.43 l 0.87 0.42 l 0.87 0.30 l 303.77 367.68 lineto 0.87 0.46 l 0.87 -0.27 l 0.87 0.40 l 0.87 0.47 l 0.87 0.65 l 0.87 0.07 l 0.87 0.22 l 0.87 0.45 l 0.87 0.37 l 0.92 1.28 l 0.87 0.77 l 0.87 0.69 l 0.87 0.22 l 0.87 0.73 l 0.87 0.78 l 0.87 0.22 l 0.87 0.56 l 0.87 0.81 l 0.87 -0.01 l 0.87 0.72 l 0.87 0.24 l 0.87 0.32 l 0.87 0.21 l 0.87 0.31 l 0.87 0.19 l 0.87 0.46 l 0.87 0.61 l 0.87 0.24 l 0.87 0.22 l 0.87 0.31 l 0.87 -0.14 l 0.87 0.52 l 0.87 0.21 l 0.88 0.47 l 0.87 0.38 l 0.91 0.58 l 0.87 0.41 l 0.87 -0.05 l 0.87 -0.34 l 0.87 0.71 l 0.87 0.76 l 0.87 0.24 l 0.87 0.42 l 0.87 0.55 l 0.87 0.46 l 0.87 0.23 l 0.87 0.45 l 0.87 0.84 l 0.87 0.46 l 0.87 0.53 l 0.87 0.27 l 0.88 0.02 l 0.87 0.68 l 0.87 0.87 l 0.87 0.72 l 0.87 0.34 l 0.87 0.17 l 0.87 0.77 l 0.87 -0.02 l 0.87 0.74 l 0.87 0.35 l 0.87 0.34 l 0.91 0.75 l 0.87 0.20 l 0.87 0.36 l 0.87 0.62 l 0.87 0.38 l 0.87 0.10 l 0.87 0.15 l 0.88 0.10 l 0.87 -0.03 l 0.87 0.68 l 0.87 0.59 l 0.87 0.60 l 0.87 0.25 l 0.87 0.04 l 0.87 0.12 l 0.87 0.61 l 0.87 0.80 l 0.87 0.30 l 0.87 0.79 l 0.87 0.69 l 0.87 0.31 l 0.87 0.15 l 0.87 0.16 l 0.87 0.51 l 0.87 0.65 l 0.87 0.45 l 0.92 0.01 l 0.87 0.55 l 0.87 0.62 l 0.87 -0.02 l 0.87 0.36 l 0.87 0.73 l 0.87 0.54 l 0.87 0.45 l 0.87 0.08 l 0.87 0.34 l 0.87 0.82 l 390.98 409.25 lineto 0.87 0.30 l 0.87 0.16 l 0.87 0.35 l 0.87 0.40 l 0.87 0.41 l 0.87 0.48 l 0.87 0.19 l 0.87 0.39 l 0.87 0.36 l 0.87 0.03 l 0.87 0.22 l 0.87 0.59 l 0.87 0.43 l 0.87 0.58 l 0.92 0.91 l 0.87 0.47 l 0.87 0.46 l 0.87 0.38 l 0.87 0.10 l 0.87 0.56 l 0.87 0.51 l 0.87 0.09 l 0.87 0.65 l 0.87 0.31 l 0.87 0.39 l 0.87 0.42 l 0.87 0.10 l 0.87 0.91 l 0.87 -0.17 l 0.87 -0.02 l 0.87 0.36 l 0.87 0.06 l 0.88 0.90 l 0.87 0.64 l 0.87 0.46 l 0.87 0.47 l 0.87 0.61 l 0.87 0.44 l 0.87 -0.02 l 0.87 0.64 l 0.91 1.08 l 0.87 0.38 l 0.87 0.96 l 0.87 0 l 0.87 0.33 l 0.87 0.59 l 0.87 0.62 l 0.87 0.33 l 0.87 0.33 l 0.87 0.30 l 0.88 0.38 l 0.87 0.15 l 0.87 0.42 l 0.87 0.80 l 0.87 0.50 l 0.87 0.56 l 0.87 -0.05 l 0.87 0.52 l 0.87 0.52 l 0.87 0.19 l 0.87 0.05 l 0.87 0.21 l 0.87 0.46 l 0.87 0.31 l 0.87 0.30 l 0.87 0.44 l 0.87 0.29 l 0.91 0.42 l 0.88 0.27 l 0.87 0.27 l 0.87 0.31 l 0.87 0.19 l 0.87 0.03 l 0.87 1.10 l 0.87 0.75 l 0.87 0.19 l 0.87 0.37 l 0.87 0.36 l 0.87 0 l 0.87 0.37 l 0.87 0.34 l 0.87 -0.04 l 0.87 0.51 l 0.87 0.30 l 0.87 0.50 l 0.87 0.15 l 0.87 0.79 l 0.87 0.12 l 0.87 0.22 l 0.87 0.60 l 0.87 0.53 l 0.87 0.60 l 0.87 0.59 l 0.92 0.89 l 0.87 1.02 l 0.87 0.35 l 0.87 0.01 l 0.87 0.42 l 0.87 0.52 l 478.19 448.90 lineto 0.87 0.38 l 0.87 -0.21 l 0.87 0.74 l 0.87 0.35 l 0.87 0.37 l 0.87 0.55 l 0.87 0.64 l 0.87 0.14 l 0.87 0.36 l 0.87 0.77 l 0.87 0.33 l 0.87 0.26 l 0.87 1.09 l 0.88 0.15 l 0.87 0.03 l 0.87 0.50 l 0.87 0.25 l 0.87 0.34 l 0.87 -0.12 l 0.91 -0.04 l 0.87 0.25 l 0.87 0.56 l 0.87 0.16 l 0.87 0.29 l 0.87 0.45 l 0.87 0.04 l 0.87 0.16 l 0.87 0.87 l 0.87 0.71 l 0.87 0.87 l 0.87 0.83 l 0.88 0.63 l 0.87 0.21 l 0.87 0.23 l 0.87 0.20 l 0.87 0.08 l 0.87 0.46 l 0.87 -0.02 l 0.87 0.60 l 0.87 0.32 l 0.87 0.83 l 0.87 -0.29 l 0.87 0.79 l 0.87 0.34 l 0.87 0.58 l 0.91 -0.32 l 0.87 -0.11 l 0.87 -0.11 l 0.87 0.96 l 0.88 0.21 l 0.87 0.59 l 0.87 0.37 l 0.87 0.76 l 0.87 0.19 l 0.87 0.53 l 0.87 0.51 l 0.87 0.40 l 0.87 0.33 l 0.87 0.49 l o 18.00 204.94 577.28 636.94 cl 0 0 1 rgb 0.38 setlinewidth [] 0 setdash 1 setlinecap 1 setlinejoin 10.00 setmiterlimit np 169.95 278.38 m 0 0 l o np 169.95 278.38 m 0 8.99 l o 77.04 278.38 547.04 577.90 cl /ps 12 def /Font1 findfont 12 s 0 0 0 rgb 169.95 298.39 (0.005) 1 0 0 t 0 1 1 rgb 0.75 setlinewidth [] 0 setdash 1 setlinecap 1 setlinejoin 10.00 setmiterlimit np 277.47 636.94 m 0.14 -1.03 l 0.88 -6.00 l 0.87 -5.98 l 0.87 -6.15 l 0.87 -5.86 l 0.87 -5.45 l 0.87 -6.00 l 0.87 -5.63 l 0.87 -5.28 l 0.87 -5.66 l 0.87 -5.09 l 0.87 -5.20 l 0.87 -4.82 l 0.87 -4.75 l 0.91 -5.14 l 0.87 -5.00 l 0.87 -4.85 l 0.87 -4.44 l 0.88 -4.82 l 0.87 -4.31 l 0.87 -4.45 l 0.87 -4.48 l 0.87 -4.15 l 0.87 -4.00 l 0.87 -4.48 l 0.87 -3.96 l 0.87 -3.99 l 0.87 -3.78 l 0.87 -3.72 l 0.87 -3.65 l 0.87 -3.75 l 0.87 -3.48 l 0.87 -3.59 l 0.87 -3.03 l 0.87 -3.49 l 0.87 -3.03 l 0.87 -3.15 l 0.87 -3.24 l 0.87 -3.13 l 0.87 -3.11 l 0.92 -3.59 l 0.87 -3.04 l 0.87 -2.87 l 0.87 -2.65 l 0.87 -2.75 l 0.87 -2.57 l 0.87 -2.39 l 0.87 -2.70 l 0.87 -2.83 l 0.87 -2.41 l 0.87 -2.33 l 0.87 -2.53 l 0.87 -2.52 l 0.87 -2.19 l 0.87 -2.49 l 0.87 -2.01 l 0.87 -2.09 l 0.87 -2.13 l 0.87 -2.24 l 0.87 -2.03 l 0.87 -2.16 l 0.87 -1.93 l 0.87 -1.88 l 0.87 -1.85 l 0.88 -1.75 l 0.87 -1.94 l 0.91 -1.67 l 0.87 -1.68 l 0.87 -1.89 l 0.87 -1.69 l 0.87 -1.72 l 0.87 -1.71 l 0.87 -1.75 l 0.87 -1.57 l 0.87 -1.86 l 0.87 -1.42 l 0.87 -1.66 l 0.87 -1.56 l 0.87 -1.41 l 0.87 -1.42 l 0.87 -1.41 l 0.87 -1.45 l 0.88 -1.57 l 0.87 -1.34 l 0.87 -1.42 l 0.87 -1.11 l 0.87 -1.08 l 0.87 -1.16 l 0.87 -1.15 l 0.87 -1.00 l 0.87 -1.20 l 0.87 -1.28 l 0.87 -1.39 l 0.91 -1.23 l 0.87 -1.02 l 0.87 -1.04 l 0.87 -0.94 l 0.87 -0.99 l 0.87 -0.91 l 363.95 353.37 lineto 0.88 -1.01 l 0.87 -0.95 l 0.87 -1.14 l 0.87 -0.89 l 0.87 -1.03 l 0.87 -0.95 l 0.87 -0.75 l 0.87 -0.73 l 0.87 -0.68 l 0.87 -0.78 l 0.87 -0.87 l 0.87 -0.89 l 0.87 -0.89 l 0.87 -0.66 l 0.87 -0.82 l 0.87 -0.77 l 0.87 -0.74 l 0.87 -0.55 l 0.87 -0.76 l 0.92 -0.61 l 0.87 -0.61 l 0.87 -0.55 l 0.87 -0.36 l 0.87 -0.59 l 0.87 -0.62 l 0.87 -0.64 l 0.87 -0.81 l 0.87 -0.67 l 0.87 -0.75 l 0.87 -0.71 l 0.87 -0.49 l 0.87 -0.52 l 0.87 -0.37 l 0.87 -0.51 l 0.87 -0.53 l 0.87 -0.51 l 0.87 -0.42 l 0.87 -0.35 l 0.87 -0.37 l 0.87 -0.60 l 0.87 -0.49 l 0.87 -0.37 l 0.87 -0.50 l 0.87 -0.42 l 0.87 -0.34 l 0.92 -0.41 l 0.87 -0.33 l 0.87 -0.50 l 0.87 -0.57 l 0.87 -0.29 l 0.87 -0.35 l 0.87 -0.42 l 0.87 -0.23 l 0.87 -0.47 l 0.87 -0.25 l 0.87 -0.39 l 0.87 -0.32 l 0.87 -0.30 l 0.87 -0.39 l 0.87 -0.32 l 0.87 -0.34 l 0.87 -0.28 l 0.87 -0.37 l 0.88 -0.30 l 0.87 -0.20 l 0.87 -0.27 l 0.87 -0.27 l 0.87 -0.18 l 0.87 -0.36 l 0.87 -0.24 l 0.87 -0.35 l 0.91 -0.26 l 0.87 -0.24 l 0.87 -0.35 l 0.87 -0.36 l 0.87 -0.19 l 0.87 -0.23 l 0.87 -0.21 l 0.87 -0.19 l 0.87 -0.18 l 0.87 -0.18 l 0.88 -0.22 l 0.87 -0.28 l 0.87 -0.19 l 0.87 -0.11 l 0.87 -0.05 l 0.87 -0.10 l 0.87 -0.10 l 0.87 -0.14 l 0.87 0.01 l 0.87 -0.12 l 0.87 -0.16 l 0.87 -0.10 l 0.87 -0.05 l 0.87 -0.09 l 0.87 -0.10 l 0.87 -0.06 l 0.87 -0.08 l 0.91 -0.08 l 451.17 310.65 lineto 0.87 -0.14 l 0.87 -0.12 l 0.87 -0.09 l 0.87 -0.06 l 0.87 -0.19 l 0.87 -0.14 l 0.87 -0.01 l 0.87 -0.07 l 0.87 -0.08 l 0.87 -0.12 l 0.87 -0.21 l 0.87 -0.03 l 0.87 0.02 l 0.87 0.06 l 0.87 0.01 l 0.87 -0.09 l 0.87 -0.03 l 0.87 -0.16 l 0.87 -0.09 l 0.87 -0.12 l 0.87 0.06 l 0.87 -0.09 l 0.87 -0.04 l 0.87 -0.06 l 0.92 -0.13 l 0.87 -0.01 l 0.87 0.03 l 0.87 0.02 l 0.87 -0.07 l 0.87 -0.04 l 0.87 -0.01 l 0.87 0.02 l 0.87 0.01 l 0.87 -0.04 l 0.87 -0.06 l 0.87 -0.09 l 0.87 0.15 l 0.87 -0.06 l 0.87 -0.08 l 0.87 0.07 l 0.87 -0.06 l 0.87 0 l 0.87 -0.07 l 0.22 -0.07 l 0.65 0.03 l 0.88 0.17 l 0.87 0.03 l 0.87 0.06 l 0.87 -0.06 l 0.87 -0.05 l 0.87 0.12 l 0.91 -0.14 l 0.87 -0.04 l 0.87 -0.02 l 0.87 0.06 l 0.87 0.08 l 0.87 0.07 l 0.87 0.02 l 0.87 -0.03 l 0.87 0.02 l 0.87 0.02 l 0.87 -0.01 l 0.87 0.05 l 0.88 0.05 l 0.87 0.07 l 0.87 -0.06 l 0.87 -0.01 l 0.87 0 l 0.87 -0.13 l 0.87 0.03 l 0.87 0.01 l 0.87 -0.12 l 0.87 0.10 l 0.87 -0.02 l 0.87 0.08 l 0.87 0.05 l 0.87 0.07 l 0.91 -0.07 l 0.87 -0.02 l 0.87 -0.06 l 0.87 -0.05 l 0.88 0.07 l 0.87 -0.04 l 0.87 0.04 l 0.87 0.14 l 0.87 -0.02 l 0.87 -0.05 l 0.87 -0.02 l 0.87 0.13 l 0.87 0.11 l 0.87 -0.07 l o 18.00 204.94 577.28 636.94 cl 0 1 1 rgb 0.38 setlinewidth [] 0 setdash 1 setlinecap 1 setlinejoin 10.00 setmiterlimit np 488.85 278.38 m 0 0 l o np 488.85 278.38 m 0 8.99 l o 77.04 278.38 547.04 577.90 cl /ps 12 def /Font1 findfont 12 s 0 0 0 rgb 488.85 297.33 (0.001) 1 0 0 t 0.75 setlinewidth [] 0 setdash 1 setlinecap 1 setlinejoin 10.00 setmiterlimit np 77.04 308.37 m 470.00 0 l o ep %%Trailer %%Pages: 1 %%EOF gbm/inst/doc/oobperf2.pdf0000644000176200001440000002317112102666411014755 0ustar liggesusers%PDF-1.3 %Çì¢ 6 0 obj <> stream xœ­ZKof· Ý¿ân ´æVêµMÑM¢Mc´ët^I`O03ió÷K‰Qòg{&6‚‰MŠ’Ž)ŠÒõÇÃþpý?þùúîòñò§Âñþóå[ü5Ÿ±µvõ,ḻ¤r¢¢ªævjœ‡3vtRÅ—.?¦’¯ïŽonp2½?[ˆù¸yw!øÿ3ñḹ»üþøÃÍO—¿ÜßlîÌ5 ]Mݺ¹p|zݼ¤ÏpøÂì 8'ŸÞ^ÞýñRÎáømÿŠÿ~zÜþ;\ޝ¨ÿõ);i.ñL*ï[:³*nѤâ‚|míôÝ`H-ôµkkpÈg’Öàb“¾ Ï§½½[ÓdÁ&¢P¥êr©™Ukëˆ\û¥FÂX+œ£¿§ñ*6 {'öƒ“R2ÅF­®K‘’”‹Hd›Ali¬ì¥æì‚%mXSb{^Kb¬ =‹ ƺ¹¼ŒÖ\]fk’Á ižÑ1Z•‹´ÚâÖvY»ÊYÚi|aNæf_]ÐWEKkS?¯Š—ô©¦—Eæ$4÷Êl‚¾['ó z`NϤŠ^=—bÒv‡RÔV”BYmC\Ç nËmXü†Õ+6Z‹ï´N&¬ ÆšXœ£Ës6òÂDC^šhE–Õ—Shk»®]ä´Ž¯ÌñüÊ,ドŒ_xmÓoÆ«ÝË>B§ ÎnLŽÔ³ï–êž05YîiS¶ÀüÞyìy¦¥pVU ll2c8ú±ÚÈ^< Z`¶7Ó3 ÙØ|6–$:æ¡a%«¢¢x[úGižó**zŠö¼Qû´;Z:êàÕbìÜîʲ*¼–Ä,ƒÖ–ì¤=”½U›¢ì {è³9gzÜ‘è¼*`é€åeÀ4Ò‹™23F…”£‚&…YV)®¹´[(¬˜\e&V†oBjü‘`ñ­Ôús=4IÁLÜ]㊸,×u:x0Õ5ÄCi2!{³lþ®%Ë ¬Èeë’Ó6hÖUŒIó•5)Ê:%]4-Ê(ú¢“‚æx˜íi5çx˜ãsÈp1nt7Ñ.Àå½Ú÷I°“‚§ë‰ÂϰªZBW*]” TÙ’i•O¦| tõÈê3ëFH\j¡b\@.CÂ`W,g0ýÖ†èÔ vaV' 躧©…Í«ÚnÜÈ ¯4‡l"Æ&踤Y]EPúx’ÂPQ–œ&ô™.žSõdœ“\Z4a±'pv4ì¡0¹.Ê– ª|Ê´i"%`&6ºúL§^]ý>B¡—³€¦' ·çÅlN\î.Fâžùc³¸½oñÝ%:W–,£Û³ÐÙx¾€d~{A…6™ ‹è!“{ |¹à-Š–Ý}l4m’ãPFEP RÈþEE6—œÙ·$Uë²+Cœ  £d]EÔUð:ƒÂf’§"ûá†ÙeœÁvМ—Is¡ *ï°‹(æÂDѶ.“+t²9&l3¨é †=ýÅ ›]|nÂÀ7à”éAÅ‹‚.Œ0)žo|ê"TõÝ ã¨ðj‘Í™­ídQ¸m¯ >idÂrÒUøÉ…[n©Ì–Udº¥Î.ÛÝlxÄN: \a¦eùöELˆ,Ì´˜f¦ü|f0?•—gÆcÕ…«^˜R]Þ˜©XRGÖå+Ì`mêèe”˜ùVå ¯l/òó™ñX}à­ã¥™Á`õõUÌÔkÌya&ô[±Èú&Àö"¿33ùÕ?rx3XšeæÛ?±›¼=¯7f‚‡Æ™r0!ò­Ê^ÞBØ^äç3º¦½@]釳ð8òÁç1ÜßúpeÖ{]¹W:?líû¡ý†J/·UFOùŸº¯÷å:L=·C•zá¡¥¼&9åuibþãÖýÃÖýýõÚ#?TqïL]©,·~Hüï6ȹ+“ÿ<=ϧ­òÚË¦Ý ì¥|ªvlù‡ãd'ŸÛS Éß?gâ aG¹;èóºkñNø$ÅAhe<$©X=ñû4ÞŠ¿©¤nx¿X¶×Kä1> /Contents 6 0 R >> endobj 3 0 obj << /Type /Pages /Kids [ 5 0 R ] /Count 1 >> endobj 1 0 obj <> endobj 4 0 obj <> endobj 14 0 obj <> endobj 15 0 obj <> endobj 12 0 obj <> endobj 11 0 obj <>stream xœ]V TÇíq˜îV“wYÅ=€²ÈòA6²ˆ0:ÈŽ¢q‹[D â↚î¨ ›{T "aP02¢¨ä›¨?¯I çÿ4?çÿsæTwW×}õú½{oˆ2@‰D"÷¹n ÷¹V^JU†2-!&Z?9A° ##ÅØŸý³àO_ÉHJqàå’†{M¡ÇÃÊ/(±H”•Wè–”œ’Ÿ&Ÿ°ÐÒÊjòß3ö3gΔ/ÉþëÜ]™š—(On2”ª¤dµ21m¶Ü¬V©bäqªìäøTytl¬2V ‰V)—Ëç%¨’““2äÝ,åvvöÖdp˜Ÿ ^’ž*ŒNL•+äʸtUtÊÿLR5.1Æ×5)ÖMé·Ô?%.5>-!=(C¢^â8uÚ k¹Ý8{‡)åG¹S6ÔÊŸò ÆRó(;Ê“²§)/*ˆò¦‚)*„šJ-¤|)Wj:5Ÿr£8ÊŒ2§†QF”1%¢L(š2¥¤Ô—ÔxRVÊ€„ûŽº) íý9`ñ€jñpqxŸ¸Í@eð³Ä\â/I–œ–Ü•ôÐèýôuf“ÌeþÍ&³ïŠšä.øjÐàA‰ƒÞÁ)#¡ šã‹LQ«ó3ØÞa.½Ð$sÒïuÅÏà"#mþ£¾éùƒ‹1 þw‡ g:}êÇ;…ª]#dÒ ZÆHØ’Ö*Ln¡N±°~ã6__û}fi\¥¢Ô±x”-6 mvê’Ãh0ik/wóÓéÕÓG»#Ö6¨¾óZÍ˦ËK\wóF½Qq¯{‘i…Z±oÑHt íUcŽÁñ}ÆXÕk,Á\ŸZ+ 1:õÚ¢vÈo‡œvSÔ mÝó»Í¥B›´Óèú¡sYéDz³EÕ7†#mLM+œ:Sûx8ªÉ¼ªüAy&t¿I´’îÆ9È/¦•n<šu,sŸÅ ØÕª™)Y‰’E^ø9ùÉT “k§’ȇÓö.Žâ¿Yžž–žª^‰XRÑ¿*5b8*”qx‚ÃhìŽÝµ£Á&t}„9àíø[ñ¹ÎÜëzgl†8ÙÚ´‚Lohºy}MP± Ð€3éL'T“¾äzÕÜ |„ޝ <¡Ïz„áÙ¼´{tbŒzTuü~…Lš3ï1ƒW Ñ\×ÙØDÿzÈ‚ÙSlç?#0ºóô¥Ì¨wiZ»ÐÓ.BÝâÞIâÁÑVƒ‘%úJìçí7áA–mt¯_ð(ù ‚yèÝÛÃÌZÑB×%ä$ªý|œHãlðÓ£nÕe'䧪öªx5€c«(÷¹XØ k8ôäÛk9çžÏ®¶$ØñÖ„s𜗣`v4]Lè‘6/<Î … È#I—3On<¹­šÍkàvöܺ÷ ±í÷<§nE[·m%t9ô×G…ÝbaXo6‡•x4¶ÁY8 È–jÛŽWÝâÛ^ ‚Á,|ƒy°ÆËd3 ÀáwlˆØ}4¹Ø`Û1`^àõ\lx#8ETáÙÍ¢¦1ì!‚pl§Ï<»s;ÚvHÖÈdå¯Í]‰X爨9¼ƒ‡g“.¼Cïdôt,† í°KŸ¼èƒîrMß@ι|þÐUô…‘³ZñX®íSwÓ‚…ì‚æEÍ¢Ù³C9ퟓí]µŸEi.-oÔ+²T¯È£ j9Ty¾”•6;ttÇÍ\öfuÞÆÜµ(-Y¾È‰•–¿!ª¬Á‰M`¢ça|?.(O;Æ…‘öÜþºÔ˳Ç,vžZâq)”?U¿¢=@WN\}À&3È}ýâŒÄtÕâ쯑%ìN;˜U¸aߦSì4z×ÄV0FôðèÏ_+|€À˜%Á}H`œåËuÕ¸`slì2Å.¨ŸÐõšü}¢ Ö«ðŒ¸mï=oâ!„F_)&ͼ _¨yMÖÍÕ%H9,,l™k¤²àp†lMá·…›ÊÙ)t>Ò°F"†>»ÿ›&êÒ˜£üìCžû£òa×.jxX–”'ûliÀ…8Iq4q‘MŸºƒ¸™ÞËŽ_ŸS$;²rw ZÊ~ò4­¢nÜÜÈ´„Éà$CšŸÖ,LjýD:¿“Tl*ŒÇº{þ¤“Ó!eêûEEyß51k·¬Ù¶ ±qëv—ó€Ÿ` Ù8³|‹D¹;‰`gèŠ;è^µÁôÑÏh½šCõ+LI^ùDÑYB†™P<W1Ò y ‡£wôù{2™¡›È8Þü½µòxc¥LšåJ Äévlìýlh×5΄}Ý$ð–þEpÖ "„b‰5'ꜰ\p"· ÐKúsë÷Ögb˜Lê1½Oý)£&fÁ®UÏ‘‚þŒNWrÒŠ­ë7£M‰« ñp‡éö¬Âœ‹"=6^–š¼N½u!ûŒÞùãÙR b[.&…òé ŠÏÈñÞ€ådo^¾f~Š*y²Ò¬Éüþõ ¦¤ö–lGȱÔZtäßIdžJÚ°2%-AµdUb}”§jêÊO¼(àµ{æŸ(`ûe¤OWa;zÕ’‰4ŽÔõáeBŸd …}jIÿiñùäzÉúŸ¹´RtéeÒÎHŸþ\w§á ÷é'ôwÊb½õÂHæupµ¥Gt†˜LU}Äy ˆ‘ VZù˜ù?÷í€êNâ¾µý,jyÈH+矯Il¶YHÀœ§~Ä2·¯“ýcy(e`<.æºoÏÂ&x°ï,») ž€1ß|¢åû‰ –m‚‚tªZ+†'àÌag-8l-,öap‹.@bÃ@u‡ShH{’þT S¶"ah—XHî åtè™ä)}¡ò‡uˆ}sÛgô8_+çðsO2ø)ôÞ»“ŠWœ~¤zBÄ2æÝ˜r›·xDhÌõþ„Iàý)ôMGñs(÷\"Ñ7‘ò­Ö¥SÈ y(£Qó2æ|Ðå¢âlÀ]é:Xô¼†Úwc©GPòB%Ÿ sëßþ†î¢K±û}váA/d¼–Çú¥(×Fmucµôök;J÷–”\ºr¬±On¸.[¤ˆãmâ‰Ó{mÀvÃÕgº•´@N«i¡ ºæ‘Þ °4ܺ> endobj 8 0 obj <>stream xœcd`ab`ddöuóññÐÉÌM-Ö ÊÏMÌ «ýfü!ÃôC–ù·Íﵿfÿ2`•eð™ËÌ»àû¡ïG¿äÿ¾O€™‘qãé»Îù•E™é% ¡AášÚÚ:CKKK…¤J˜Œ‚Kjqfzž‚Q–š“_›šWb­à T““™¬žSYQ¬˜’’šÒ–˜“š­à–™“YP_¦ á¬©`d``¨ $Œ¬üJsS‹òu2óÒ2ó2K*óRüsSÓrSRA¸äf–U*˜dæÁuûeæ&•+€=«à—o©à£”š^š“X„)ÃÀÀÀ¨Ä LŒŒ,ìßûø€¨|þ°ùßCæ³­äzÀ½r2σ¹<¼ }nx endstream endobj 17 0 obj 298 endobj 10 0 obj <> endobj 13 0 obj <> endobj 2 0 obj <>endobj xref 0 18 0000000000 65535 f 0000002821 00000 n 0000009361 00000 n 0000002762 00000 n 0000002869 00000 n 0000002602 00000 n 0000000015 00000 n 0000002582 00000 n 0000006633 00000 n 0000006395 00000 n 0000007036 00000 n 0000003360 00000 n 0000003011 00000 n 0000008197 00000 n 0000002938 00000 n 0000002968 00000 n 0000006374 00000 n 0000007016 00000 n trailer << /Size 18 /Root 1 0 R /Info 2 0 R >> startxref 9411 %%EOF gbm/inst/doc/oobperf2.eps0000644000176200001440000007527612102666411015010 0ustar liggesusers%!PS-Adobe-3.0 EPSF-3.0 %%Title: WMF2EPS 1.32 : WMF->EPS conversion for oobperf2.wmf %%Creator: PScript5.dll Version 5.2.2 %%CreationDate: 1/30/2005 11:3:37 %%For: gregr %%BoundingBox: 56 56 404 271 %%Pages: 1 %%Orientation: Portrait %%PageOrder: Ascend %%DocumentNeededResources: (atend) %%DocumentSuppliedResources: (atend) %%DocumentData: Clean7Bit %%TargetDevice: (WMF2EPS Color PS) (2010.0) 2 %%LanguageLevel: 2 %%EndComments %%BeginDefaults %%PageBoundingBox: 0 0 405 271 %%ViewingOrientation: 1 0 0 1 %%EndDefaults %%BeginProlog %%BeginResource: file Pscript_WinNT_ErrorHandler 5.0 0 /currentpacking where{pop/oldpack currentpacking def/setpacking where{pop false setpacking}if}if/$brkpage 64 dict def $brkpage begin/prnt{dup type/stringtype ne{=string cvs}if dup length 6 mul/tx exch def/ty 10 def currentpoint/toy exch def/tox exch def 1 setgray newpath tox toy 2 sub moveto 0 ty rlineto tx 0 rlineto 0 ty neg rlineto closepath fill tox toy moveto 0 setgray show}bind def /nl{currentpoint exch pop lmargin exch moveto 0 -10 rmoveto}def/=={/cp 0 def typeprint nl}def/typeprint{dup type exec}readonly def/lmargin 72 def/rmargin 72 def/tprint{dup length cp add rmargin gt{nl/cp 0 def}if dup length cp add/cp exch def prnt}readonly def/cvsprint{=string cvs tprint( )tprint}readonly def /integertype{cvsprint}readonly def/realtype{cvsprint}readonly def/booleantype {cvsprint}readonly def/operatortype{(--)tprint =string cvs tprint(-- )tprint} readonly def/marktype{pop(-mark- )tprint}readonly def/dicttype{pop (-dictionary- )tprint}readonly def/nulltype{pop(-null- )tprint}readonly def /filetype{pop(-filestream- )tprint}readonly def/savetype{pop(-savelevel- ) tprint}readonly def/fonttype{pop(-fontid- )tprint}readonly def/nametype{dup xcheck not{(/)tprint}if cvsprint}readonly def/stringtype{dup rcheck{(\()tprint tprint(\))tprint}{pop(-string- )tprint}ifelse}readonly def/arraytype{dup rcheck {dup xcheck{({)tprint{typeprint}forall(})tprint}{([)tprint{typeprint}forall(]) tprint}ifelse}{pop(-array- )tprint}ifelse}readonly def/packedarraytype{dup rcheck{dup xcheck{({)tprint{typeprint}forall(})tprint}{([)tprint{typeprint} forall(])tprint}ifelse}{pop(-packedarray- )tprint}ifelse}readonly def/courier /Courier findfont 10 scalefont def end errordict/handleerror{systemdict begin $error begin $brkpage begin newerror{/newerror false store vmstatus pop pop 0 ne{grestoreall}if errorname(VMerror)ne{showpage}if initgraphics courier setfont lmargin 720 moveto errorname(VMerror)eq{userdict/ehsave known{clear userdict /ehsave get restore 2 vmreclaim}if vmstatus exch pop exch pop PrtVMMsg}{ (ERROR: )prnt errorname prnt nl(OFFENDING COMMAND: )prnt/command load prnt $error/ostack known{nl nl(STACK:)prnt nl nl $error/ostack get aload length{==} repeat}if}ifelse systemdict/showpage get exec(%%[ Error: )print errorname =print(; OffendingCommand: )print/command load =print( ]%%)= flush}if end end end}dup 0 systemdict put dup 4 $brkpage put bind readonly put/currentpacking where{pop/setpacking where{pop oldpack setpacking}if}if %%EndResource userdict /Pscript_WinNT_Incr 230 dict dup begin put %%BeginResource: file Pscript_FatalError 5.0 0 userdict begin/FatalErrorIf{{initgraphics findfont 1 index 0 eq{exch pop}{dup length dict begin{1 index/FID ne{def}{pop pop}ifelse}forall/Encoding {ISOLatin1Encoding}stopped{StandardEncoding}if def currentdict end /ErrFont-Latin1 exch definefont}ifelse exch scalefont setfont counttomark 3 div cvi{moveto show}repeat showpage quit}{cleartomark}ifelse}bind def end %%EndResource userdict begin/PrtVMMsg{vmstatus exch sub exch pop gt{[ (This job requires more memory than is available in this printer.)100 500 (Try one or more of the following, and then print again:)100 485 (For the output format, choose Optimize For Portability.)115 470 (In the Device Settings page, make sure the Available PostScript Memory is accurate.) 115 455(Reduce the number of fonts in the document.)115 440 (Print the document in parts.)115 425 12/Times-Roman showpage (%%[ PrinterError: Low Printer VM ]%%)= true FatalErrorIf}if}bind def end version cvi 2016 ge{/VM?{pop}bind def}{/VM? userdict/PrtVMMsg get def}ifelse %%BeginResource: file Pscript_Win_Basic 5.0 0 /d/def load def/,/load load d/~/exch , d/?/ifelse , d/!/pop , d/`/begin , d/^ /index , d/@/dup , d/+/translate , d/$/roll , d/U/userdict , d/M/moveto , d/- /rlineto , d/&/currentdict , d/:/gsave , d/;/grestore , d/F/false , d/T/true , d/N/newpath , d/E/end , d/Ac/arc , d/An/arcn , d/A/ashow , d/D/awidthshow , d/C /closepath , d/V/div , d/O/eofill , d/L/fill , d/I/lineto , d/-c/curveto , d/-M /rmoveto , d/+S/scale , d/Ji/setfont , d/Lc/setlinecap , d/Lj/setlinejoin , d /Lw/setlinewidth , d/Lm/setmiterlimit , d/sd/setdash , d/S/show , d/LH/showpage , d/K/stroke , d/W/widthshow , d/R/rotate , d/L2? false/languagelevel where{pop languagelevel 2 ge{pop true}if}if d L2?{/xS/xshow , d/yS/yshow , d/zS/xyshow , d}if/b{bind d}bind d/bd{bind d}bind d/xd{~ d}bd/ld{, d}bd/bn/bind ld/lw/Lw ld /lc/Lc ld/lj/Lj ld/sg/setgray ld/ADO_mxRot null d/self & d/OrgMx matrix currentmatrix d/reinitialize{: OrgMx setmatrix[/TextInit/GraphInit/UtilsInit counttomark{@ where{self eq}{F}?{cvx exec}{!}?}repeat cleartomark ;}b /initialize{`{/Pscript_Win_Data where{!}{U/Pscript_Win_Data & put}?/ADO_mxRot ~ d/TextInitialised? F d reinitialize E}{U/Pscript_Win_Data 230 dict @ ` put /ADO_mxRot ~ d/TextInitialised? F d reinitialize}?}b/terminate{!{& self eq {exit}{E}?}loop E}b/suspend/terminate , d/resume{` Pscript_Win_Data `}b U ` /lucas 21690 d/featurebegin{countdictstack lucas[}b/featurecleanup{stopped {cleartomark @ lucas eq{! exit}if}loop countdictstack ~ sub @ 0 gt{{E}repeat} {!}?}b E/snap{transform 0.25 sub round 0.25 add ~ 0.25 sub round 0.25 add ~ itransform}b/dsnap{dtransform round ~ round ~ idtransform}b/nonzero_round{@ 0.5 ge{round}{@ -0.5 lt{round}{0 ge{1}{-1}?}?}?}b/nonzero_dsnap{dtransform nonzero_round ~ nonzero_round ~ idtransform}b U<04>cvn{}put/rr{1 ^ 0 - 0 ~ - neg 0 - C}b/irp{4 -2 $ + +S fx 4 2 $ M 1 ^ 0 - 0 ~ - neg 0 -}b/rp{4 2 $ M 1 ^ 0 - 0 ~ - neg 0 -}b/solid{[]0 sd}b/g{@ not{U/DefIf_save save put}if U/DefIf_bool 2 ^ put}b/DefIf_El{if U/DefIf_bool get not @{U/DefIf_save get restore}if}b/e {DefIf_El !}b/UDF{L2?{undefinefont}{!}?}b/UDR{L2?{undefineresource}{! !}?}b /freeVM{/Courier findfont[40 0 0 -40 0 0]makefont Ji 2 vmreclaim}b/hfRedefFont {findfont @ length dict `{1 ^/FID ne{d}{! !}?}forall & E @ ` ~{/CharStrings 1 dict `/.notdef 0 d & E d}if/Encoding 256 array 0 1 255{1 ^ ~/.notdef put}for d E definefont !}bind d/hfMkCIDFont{/CIDFont findresource @ length 2 add dict `{1 ^ @/FID eq ~ @/XUID eq ~/UIDBase eq or or{! !}{d}?}forall/CDevProc ~ d/Metrics2 16 dict d/CIDFontName 1 ^ d & E 1 ^ ~/CIDFont defineresource ![~]composefont !} bind d %%EndResource %%BeginResource: file Pscript_Win_Utils_L2 5.0 0 /rf/rectfill , d/fx{1 1 dtransform @ 0 ge{1 sub 0.5}{1 add -0.5}? 3 -1 $ @ 0 ge {1 sub 0.5}{1 add -0.5}? 3 1 $ 4 1 $ idtransform 4 -2 $ idtransform}b/BZ{4 -2 $ snap + +S fx rf}b/rs/rectstroke , d/rc/rectclip , d/UtilsInit{currentglobal{F setglobal}if}b/scol{! setcolor}b/colspA/DeviceGray d/colspABC/DeviceRGB d /colspRefresh{colspABC setcolorspace}b/SetColSpace{colspABC setcolorspace}b /resourcestatus where{!/ColorRendering/ProcSet resourcestatus{! ! T}{F}?}{F}? not{/ColorRendering<>/defineresource where{!/ProcSet defineresource !}{! !}?}if/buildcrdname{/ColorRendering/ProcSet findresource ` mark GetHalftoneName @ type @/nametype ne ~/stringtype ne and{!/none}if(.) GetPageDeviceName @ type @/nametype ne ~/stringtype ne and{!/none}if(.)5 ^ 0 5 -1 1{^ length add}for string 6 1 $ 5 ^ 5{~ 1 ^ cvs length 1 ^ length 1 ^ sub getinterval}repeat ! cvn 3 1 $ ! ! E}b/definecolorrendering{~ buildcrdname ~ /ColorRendering defineresource !}b/findcolorrendering where{!}{ /findcolorrendering{buildcrdname @/ColorRendering resourcestatus{! ! T}{ /ColorRendering/ProcSet findresource ` GetSubstituteCRD E F}?}b}? /selectcolorrendering{findcolorrendering !/ColorRendering findresource setcolorrendering}b/G2UBegin{findresource/FontInfo get/GlyphNames2Unicode get `}bind d/G2CCBegin{findresource/FontInfo get/GlyphNames2HostCode get `}bind d /G2UEnd{E}bind d/AddFontInfoBegin{/FontInfo 8 dict @ `}bind d/AddFontInfo{ /GlyphNames2Unicode 16 dict d/GlyphNames2HostCode 16 dict d}bind d /AddFontInfoEnd{E d}bind d/T0AddCFFMtx2{/CIDFont findresource/Metrics2 get ` d E}bind d %%EndResource end %%EndProlog %%BeginSetup [ 1 0 0 1 0 0 ] false Pscript_WinNT_Incr dup /initialize get exec 1 setlinecap 1 setlinejoin /mysetup [ 72 600 V 0 0 -72 600 V 0 270.99212 ] def %%EndSetup %%Page: 1 1 %%PageBoundingBox: 0 0 405 271 %%EndPageComments %%BeginPageSetup /DeviceRGB dup setcolorspace /colspABC exch def mysetup concat colspRefresh %%EndPageSetup 0 0 0 1 scol : 472 1 43 112 rc Pscript_WinNT_Incr begin %%BeginResource: file Pscript_Text 5.0 0 /TextInit{TextInitialised? not{/Pscript_Windows_Font & d/TextInitialised? T d /fM[1 0 0 1 0 0]d/mFM matrix d/iMat[1 0 0.212557 1 0 0]d}if}b/copyfont{1 ^ length add dict `{1 ^/FID ne{d}{! !}?}forall & E}b/EncodeDict 11 dict d/bullets {{/bullet}repeat}b/rF{3 copyfont @ ` ~ EncodeDict ~ get/Encoding ~ 3 ^/0 eq{& /CharStrings known{CharStrings/Eth known not{! EncodeDict/ANSIEncodingOld get} if}if}if d E}b/mF{@ 7 1 $ findfont ~{@/Encoding get @ StandardEncoding eq{! T}{ {ISOLatin1Encoding}stopped{! F}{eq}?{T}{@ ` T 32 1 127{Encoding 1 ^ get StandardEncoding 3 -1 $ get eq and}for E}?}?}{F}?{1 ^ ~ rF}{0 copyfont}? 6 -2 $ ! ! ~ !/pd_charset @ where{~ get 128 eq{@ FDV 2 copy get @ length array copy put pd_CoverFCRange}if}{!}? 2 ^ ~ definefont fM 5 4 -1 $ put fM 4 0 put fM makefont Pscript_Windows_Font 3 1 $ put}b/sLT{: Lw -M currentpoint snap M 0 - 0 Lc K ;}b/xUP null d/yUP null d/uW null d/xSP null d/ySP null d/sW null d/sSU{N /uW ~ d/yUP ~ d/xUP ~ d}b/sU{xUP yUP uW sLT}b/sST{N/sW ~ d/ySP ~ d/xSP ~ d}b/sT {xSP ySP sW sLT}b/sR{: + R 0 0 M}b/sRxy{: matrix astore concat 0 0 M}b/eR/; , d /AddOrigFP{{&/FontInfo known{&/FontInfo get length 6 add}{6}? dict ` /WinPitchAndFamily ~ d/WinCharSet ~ d/OrigFontType ~ d/OrigFontStyle ~ d /OrigFontName ~ d & E/FontInfo ~ d}{! ! ! ! !}?}b/mFS{makefont Pscript_Windows_Font 3 1 $ put}b/mF42D{0 copyfont `/FontName ~ d 2 copy ~ sub 1 add dict `/.notdef 0 d 2 copy 1 ~{@ 3 ^ sub Encoding ~ get ~ d}for & E /CharStrings ~ d ! ! & @ E/FontName get ~ definefont}b/mF42{15 dict ` @ 4 1 $ FontName ~ d/FontType 0 d/FMapType 2 d/FontMatrix[1 0 0 1 0 0]d 1 ^ 254 add 255 idiv @ array/Encoding ~ d 0 1 3 -1 $ 1 sub{@ Encoding 3 1 $ put}for/FDepVector Encoding length array d/CharStrings 2 dict `/.notdef 0 d & E d 0 1 Encoding length 1 sub{@ @ 10 lt{! FontName length 1 add string}{100 lt{FontName length 2 add string}{FontName length 3 add string}?}? @ 0 FontName @ length string cvs putinterval @ 3 -1 $ @ 4 1 $ 3 string cvs FontName length ~ putinterval cvn 1 ^ 256 mul @ 255 add 3 -1 $ 4 ^ findfont mF42D FDepVector 3 1 $ put}for & @ E /FontName get ~ definefont ! ! ! mF}b/mF_OTF_V{~ ! ~ ! 4 -1 $ ! findfont 2 ^ ~ definefont fM @ @ 4 6 -1 $ neg put 5 0 put 90 matrix R matrix concatmatrix makefont Pscript_Windows_Font 3 1 $ put}b/mF_TTF_V{3{~ !}repeat 3 -1 $ ! findfont 1 ^ ~ definefont Pscript_Windows_Font 3 1 $ put}b/UmF{L2? {Pscript_Windows_Font ~ undef}{!}?}b/UmF42{@ findfont/FDepVector get{/FontName get undefinefont}forall undefinefont}b %%EndResource end reinitialize Pscript_WinNT_Incr begin %%BeginResource: file Pscript_Encoding256 5.0 0 /CharCol256Encoding[/.notdef/breve/caron/dotaccent/dotlessi/fi/fl/fraction /hungarumlaut/Lslash/lslash/minus/ogonek/ring/Zcaron/zcaron/.notdef/.notdef /.notdef/.notdef/.notdef/.notdef/.notdef/.notdef/.notdef/.notdef/.notdef /.notdef/.notdef/.notdef/.notdef/.notdef/space/exclam/quotedbl/numbersign /dollar/percent/ampersand/quotesingle/parenleft/parenright/asterisk/plus/comma /hyphen/period/slash/zero/one/two/three/four/five/six/seven/eight/nine/colon /semicolon/less/equal/greater/question/at/A/B/C/D/E/F/G/H/I/J/K/L/M/N/O/P/Q/R/S /T/U/V/W/X/Y/Z/bracketleft/backslash/bracketright/asciicircum/underscore/grave /a/b/c/d/e/f/g/h/i/j/k/l/m/n/o/p/q/r/s/t/u/v/w/x/y/z/braceleft/bar/braceright /asciitilde/.notdef/Euro/.notdef/quotesinglbase/florin/quotedblbase/ellipsis /dagger/daggerdbl/circumflex/perthousand/Scaron/guilsinglleft/OE/.notdef /.notdef/.notdef/.notdef/quoteleft/quoteright/quotedblleft/quotedblright/bullet /endash/emdash/tilde/trademark/scaron/guilsinglright/oe/.notdef/.notdef /Ydieresis/.notdef/exclamdown/cent/sterling/currency/yen/brokenbar/section /dieresis/copyright/ordfeminine/guillemotleft/logicalnot/.notdef/registered /macron/degree/plusminus/twosuperior/threesuperior/acute/mu/paragraph /periodcentered/cedilla/onesuperior/ordmasculine/guillemotright/onequarter /onehalf/threequarters/questiondown/Agrave/Aacute/Acircumflex/Atilde/Adieresis /Aring/AE/Ccedilla/Egrave/Eacute/Ecircumflex/Edieresis/Igrave/Iacute /Icircumflex/Idieresis/Eth/Ntilde/Ograve/Oacute/Ocircumflex/Otilde/Odieresis /multiply/Oslash/Ugrave/Uacute/Ucircumflex/Udieresis/Yacute/Thorn/germandbls /agrave/aacute/acircumflex/atilde/adieresis/aring/ae/ccedilla/egrave/eacute /ecircumflex/edieresis/igrave/iacute/icircumflex/idieresis/eth/ntilde/ograve /oacute/ocircumflex/otilde/odieresis/divide/oslash/ugrave/uacute/ucircumflex /udieresis/yacute/thorn/ydieresis]def EncodeDict/256 CharCol256Encoding put %%EndResource end reinitialize %%IncludeResource: font Times-Roman Pscript_WinNT_Incr begin %%BeginResource: file Pscript_Win_Euro_L2 5.0 0 /UseT3EuroFont{/currentdistillerparams where{pop currentdistillerparams /CoreDistVersion get 4000 le}{false}ifelse}bind def/NewEuroT3Font?{dup/FontType get 3 eq{dup/EuroFont known exch/BaseFont known and}{pop false}ifelse}bind def /T1FontHasEuro{dup/CharStrings known not{dup NewEuroT3Font?{dup/EuroGlyphName get exch/EuroFont get/CharStrings get exch known{true}{false}ifelse}{pop false} ifelse}{dup/FontType get 1 eq{/CharStrings get/Euro known}{dup/InfoDict known{ /InfoDict get/Euro known}{/CharStrings get/Euro known}ifelse}ifelse}ifelse}bind def/FontHasEuro{findfont dup/Blend known{pop true}{T1FontHasEuro}ifelse}bind def/EuroEncodingIdx 1 def/EuroFontHdr{12 dict begin/FontInfo 10 dict dup begin /version(001.000)readonly def/Notice(Copyright (c)1999 Adobe Systems Incorporated. All Rights Reserved.)readonly def/FullName(Euro)readonly def /FamilyName(Euro)readonly def/Weight(Regular)readonly def/isFixedPitch false def/ItalicAngle 0 def/UnderlinePosition -100 def/UnderlineThickness 50 def end readonly def/FontName/Euro def/Encoding 256 array 0 1 255{1 index exch/.notdef put}for def/PaintType 0 def/FontType 1 def/FontMatrix[0.001 0 0 0.001 0 0]def /FontBBox{-25 -23 1500 804}readonly def currentdict end dup/Private 20 dict dup begin/ND{def}def/NP{put}def/lenIV -1 def/RD{string currentfile exch readhexstring pop}def/-|{string currentfile exch readstring pop}executeonly def /|-{def}executeonly def/|{put}executeonly def/BlueValues[-20 0 706 736 547 572] |-/OtherBlues[-211 -203]|-/BlueScale 0.0312917 def/MinFeature{16 16}|-/StdHW [60]|-/StdVW[71]|-/ForceBold false def/password 5839 def/Erode{8.5 dup 3 -1 roll 0.1 mul exch 0.5 sub mul cvi sub dup mul 71 0 dtransform dup mul exch dup mul add le{pop pop 1.0 1.0}{pop pop 0.0 1.5}ifelse}def/OtherSubrs[{}{}{} {systemdict/internaldict known not{pop 3}{1183615869 systemdict/internaldict get exec dup/startlock known{/startlock get exec}{dup/strtlck known{/strtlck get exec}{pop 3}ifelse}ifelse}ifelse}executeonly]|-/Subrs 5 array dup 0 <8E8B0C100C110C110C210B>put dup 1<8B8C0C100B>put dup 2<8B8D0C100B>put dup 3<0B> put dup 4<8E8C8E0C100C110A0B>put |- 2 index/CharStrings 256 dict dup begin /.notdef<8b8b0d0e>def end end put put dup/FontName get exch definefont pop}bind def/AddEuroGlyph{2 index exch EuroEncodingIdx 1 eq{EuroFontHdr}if systemdict begin/Euro findfont dup dup/Encoding get 5 1 roll/Private get begin/CharStrings get dup 3 index known{pop pop pop pop end end}{begin 1 index exch def end end end EuroEncodingIdx dup 1 add/EuroEncodingIdx exch def exch put}ifelse}bind def /GetNewXUID{currentdict/XUID known{[7 XUID aload pop]true}{currentdict/UniqueID known{[7 UniqueID]true}{false}ifelse}ifelse}bind def/BuildT3EuroFont{exch 16 dict begin dup/FontName exch def findfont dup/Encoding get/Encoding exch def dup length 1 add dict copy dup/FID undef begin dup dup/FontName exch def /Encoding 256 array 0 1 255{1 index exch/.notdef put}for def GetNewXUID{/XUID exch def}if currentdict end definefont pop/BaseFont exch findfont 1000 scalefont def/EuroFont exch findfont 1000 scalefont def pop/EuroGlyphName exch def/FontType 3 def/FontMatrix[.001 0 0 .001 0 0]def/FontBBox BaseFont/FontBBox get def/Char 1 string def/BuildChar{exch dup begin/Encoding get 1 index get /Euro eq{BaseFont T1FontHasEuro{false}{true}ifelse}{false}ifelse{EuroFont setfont pop userdict/Idx 0 put EuroFont/Encoding get{EuroGlyphName eq{exit} {userdict/Idx Idx 1 add put}ifelse}forall userdict/Idx get}{dup dup Encoding exch get BaseFont/Encoding get 3 1 roll put BaseFont setfont}ifelse Char 0 3 -1 roll put Char stringwidth newpath 0 0 moveto Char true charpath flattenpath pathbbox setcachedevice 0 0 moveto Char show end}bind def currentdict end dup /FontName get exch definefont pop}bind def/AddEuroToT1Font{dup findfont dup length 10 add dict copy dup/FID undef begin/EuroFont 3 -1 roll findfont 1000 scalefont def CharStrings dup length 1 add dict copy begin/Euro{EuroFont setfont pop EuroGBBox aload pop setcachedevice 0 0 moveto EuroGName glyphshow} bind def currentdict end/CharStrings exch def GetNewXUID{/XUID exch def}if 3 1 roll/EuroGBBox exch def/EuroGName exch def currentdict end definefont pop}bind def/BuildNewFont{UseT3EuroFont{BuildT3EuroFont}{pop AddEuroToT1Font}ifelse}bind def/UseObliqueEuro{findfont/FontMatrix get dup 2 get 0 eq exch dup 0 get exch 3 get eq and UseT3EuroFont or}bind def %%EndResource end reinitialize /Times-Roman FontHasEuro not { /Euro.Times-Roman [500 0 24 -14 493 676 ] AddEuroGlyph /Euro /Times-Roman /Times-Roman-Copy BuildNewFont } if F /F0 0 /256 T /Times-Roman mF /F0S63 F0 [99.363 0 0 -99.363 0 0 ] mFS F0S63 Ji 469 89 M ( )S Pscript_WinNT_Incr begin %%BeginResource: file Pscript_Win_GdiObject 5.0 0 /SavedCTM null d/CTMsave{/SavedCTM SavedCTM currentmatrix d}b/CTMrestore {SavedCTM setmatrix}b/mp null d/ADO_mxRot null d/GDIHMatrix null d /GDIHPatternDict 22 dict d GDIHPatternDict `/PatternType 1 d/PaintType 2 d/Reps L2?{1}{5}? d/XStep 8 Reps mul d/YStep XStep d/BBox[0 0 XStep YStep]d/TilingType 1 d/PaintProc{` 1 Lw[]0 sd PaintData , exec E}b/FGnd null d/BGnd null d /HS_Horizontal{horiz}b/HS_Vertical{vert}b/HS_FDiagonal{fdiag}b/HS_BDiagonal {biag}b/HS_Cross{horiz vert}b/HS_DiagCross{fdiag biag}b/MaxXYStep XStep YStep gt{XStep}{YStep}? d/horiz{Reps{0 4 M XStep 0 - 0 8 +}repeat 0 -8 Reps mul + K}b /vert{Reps{4 0 M 0 YStep - 8 0 +}repeat 0 -8 Reps mul + K}b/biag{Reps{0 0 M MaxXYStep @ - 0 YStep neg M MaxXYStep @ - 0 8 +}repeat 0 -8 Reps mul + 0 YStep M 8 8 - K}b/fdiag{Reps{0 0 M MaxXYStep @ neg - 0 YStep M MaxXYStep @ neg - 0 8 +}repeat 0 -8 Reps mul + MaxXYStep @ M 8 -8 - K}b E/makehatch{4 -2 $/yOrg ~ d /xOrg ~ d GDIHPatternDict/PaintData 3 -1 $ put CTMsave GDIHMatrix setmatrix GDIHPatternDict matrix xOrg yOrg + mp CTMrestore ~ U ~ 2 ^ put}b/h0{/h0 /HS_Horizontal makehatch}b/h1{/h1/HS_Vertical makehatch}b/h2{/h2/HS_FDiagonal makehatch}b/h3{/h3/HS_BDiagonal makehatch}b/h4{/h4/HS_Cross makehatch}b/h5{/h5 /HS_DiagCross makehatch}b/GDIBWPatternMx null d/pfprep{save 8 1 $ /PatternOfTheDay 8 1 $ GDIBWPatternDict `/yOrg ~ d/xOrg ~ d/PaintData ~ d/yExt ~ d/Width ~ d/BGnd ~ d/FGnd ~ d/Height yExt RepsV mul d/mx[Width 0 0 Height 0 0]d E build_pattern ~ !}b/pfbf{/fEOFill ~ d pfprep hbf fEOFill{O}{L}? restore}b /GraphInit{GDIHMatrix null eq{/SavedCTM matrix d : ADO_mxRot concat 0 0 snap + : 0.48 @ GDIHPatternDict ` YStep mul ~ XStep mul ~ nonzero_dsnap YStep V ~ XStep V ~ E +S/GDIHMatrix matrix currentmatrix readonly d ; : 0.24 -0.24 +S GDIBWPatternDict ` Width Height E nonzero_dsnap +S/GDIBWPatternMx matrix currentmatrix readonly d ; ;}if}b %%EndResource %%BeginResource: file Pscript_Win_GdiObject_L2 5.0 0 /GDIBWPatternDict 25 dict @ `/PatternType 1 d/PaintType 1 d/RepsV 1 d/RepsH 1 d /BBox[0 0 RepsH 1]d/TilingType 1 d/XStep 1 d/YStep 1 d/Height 8 RepsV mul d /Width 8 d/mx[Width 0 0 Height neg 0 Height]d/FGnd null d/BGnd null d /SetBGndFGnd{BGnd null ne{BGnd aload ! scol BBox aload ! 2 ^ sub ~ 3 ^ sub ~ rf}if FGnd null ne{FGnd aload ! scol}if}b/PaintProc{` SetBGndFGnd RepsH{Width Height F mx PaintData imagemask Width 0 +}repeat E}b E d/mp/makepattern , d /build_pattern{CTMsave GDIBWPatternMx setmatrix/nupangle where{! nupangle -90 eq{nupangle R}if}if GDIBWPatternDict @ ` Width Height ne{Width Height gt{Width Height V 1}{1 Height Width V}? +S}if xOrg yOrg E matrix + mp CTMrestore}b/hbf {setpattern}b/hf{:/fEOFill ~ d ~ ! setpattern fEOFill{O}{L}? ;}b/pbf{: ! /fEOFill ~ d GDIBWPatternDict `/yOrg ~ d/xOrg ~ d/PaintData ~ d/OutputBPP ~ d /Height ~ d/Width ~ d/PaintType 1 d/PatternType 1 d/TilingType 1 d/BBox[0 0 Width Height]d/XStep Width d/YStep Height d/mx xOrg yOrg matrix + d 20 dict @ ` /ImageType 1 d/Width Width d/Height Height d/ImageMatrix[1 0 0 1 0 0]d /BitsPerComponent 8 d OutputBPP 24 eq{/Decode[0 1 0 1 0 1]d}{OutputBPP 8 eq{ /Decode[0 1]d}{/Decode[0 1 0 1 0 1 0 1]d}?}?/DataSource{PaintData}d E/ImageDict ~ d/PaintProc{` ImageDict image E}b & mx makepattern setpattern E fEOFill{O}{L} ? ;}b/mask_pbf{:/fEOFill ~ d 20 dict `/yOrg ~ d/xOrg ~ d/PaintData ~ d/Height ~ d/Width ~ d/PatternType 1 d/PaintType 2 d/TilingType 1 d/BBox[0 0 Width Height] d/XStep Width d/YStep Height d/mx xOrg yOrg matrix + d/PaintProc{` Width Height T 1 1 dtransform abs ~ abs ~ 0 0 3 -1 $ 0 0 6 array astore{PaintData}imagemask E}b & mx makepattern setpattern E fEOFill{O}{L}? ;}b %%EndResource end reinitialize ; N 961 92 M 961 176 I 1463 176 I 1463 92 I 961 92 I C 0.68 0.848 0.902 1 scol O 0 0 0 1 scol 1 Lj 1 Lc 5 Lw solid N 961 92 M 961 176 I 1463 176 I 1463 92 I C : 1.289 1.289 +S K ; 4 Lw N 961 92 M 961 176 I 1463 176 I 1463 92 I C : 1.289 1.289 +S K ; N 961 147 M 1463 147 I : 1.289 1.289 +S K ; N 1215 209 M 1215 190 I : 1.289 1.289 +S K ; N 1215 77 M 1215 92 I : 1.289 1.289 +S K ; N 1087 209 M 1337 209 I : 1.289 1.289 +S K ; N 1087 77 M 1337 77 I : 1.289 1.289 +S K ; N 1215 302 M 1211 303 I 1208 303 I 1207 305 I 1204 307 I 1203 308 I 1202 311 I 1200 314 I 1200 316 I 1200 319 I 1202 321 I 1203 324 I 1204 327 I 1207 328 I 1208 329 I 1211 330 I 1215 330 I 1217 330 I 1220 329 I 1222 328 I 1224 327 I 1226 324 I 1228 321 I 1228 319 I 1229 316 I 1228 314 I 1228 311 I 1226 308 I 1224 307 I 1222 305 I 1220 303 I 1217 303 I 1215 302 I : 1.289 1.289 +S K ; N 1215 1336 M 1211 1336 I 1208 1337 I 1207 1338 I 1204 1340 I 1203 1342 I 1202 1345 I 1200 1347 I 1200 1350 I 1200 1352 I 1202 1355 I 1203 1358 I 1204 1360 I 1207 1361 I 1208 1363 I 1211 1364 I 1215 1364 I 1217 1364 I 1220 1363 I 1222 1361 I 1224 1360 I 1226 1358 I 1228 1355 I 1228 1352 I 1229 1350 I 1228 1347 I 1228 1345 I 1226 1342 I 1224 1340 I 1222 1338 I 1220 1337 I 1217 1336 I 1215 1336 I : 1.289 1.289 +S K ; N 1591 86 M 1591 129 I 2094 129 I 2094 86 I 1591 86 I C 0.68 0.848 0.902 1 scol O 0 0 0 1 scol 5 Lw N 1591 86 M 1591 129 I 2094 129 I 2094 86 I C : 1.289 1.289 +S K ; 4 Lw N 1591 86 M 1591 129 I 2094 129 I 2094 86 I C : 1.289 1.289 +S K ; N 1591 110 M 2094 110 I : 1.289 1.289 +S K ; N 1840 77 M 1840 86 I : 1.289 1.289 +S K ; N 1712 129 M 1966 129 I : 1.289 1.289 +S K ; N 1712 77 M 1966 77 I : 1.289 1.289 +S K ; N 1840 190 M 1837 190 I 1835 191 I 1832 192 I 1830 194 I 1828 196 I 1827 199 I 1826 201 I 1826 204 I 1826 207 I 1827 209 I 1828 212 I 1830 214 I 1832 216 I 1835 217 I 1837 218 I 1840 218 I 1843 218 I 1845 217 I 1848 216 I 1849 214 I 1852 212 I 1853 209 I 1853 207 I 1854 204 I 1853 201 I 1853 199 I 1852 196 I 1849 194 I 1848 192 I 1845 191 I 1843 190 I 1840 190 I : 1.289 1.289 +S K ; N 1840 218 M 1837 218 I 1835 219 I 1832 221 I 1830 222 I 1828 225 I 1827 226 I 1826 228 I 1826 232 I 1826 235 I 1827 237 I 1828 240 I 1830 241 I 1832 244 I 1835 245 I 1837 245 I 1840 247 I 1843 245 I 1845 245 I 1848 244 I 1849 241 I 1852 240 I 1853 237 I 1853 235 I 1854 232 I 1853 228 I 1853 226 I 1852 225 I 1849 222 I 1848 221 I 1845 219 I 1843 218 I 1840 218 I : 1.289 1.289 +S K ; N 1840 588 M 1837 590 I 1835 590 I 1832 591 I 1830 594 I 1828 595 I 1827 598 I 1826 600 I 1826 603 I 1826 605 I 1827 608 I 1828 610 I 1830 613 I 1832 614 I 1835 616 I 1837 617 I 1840 617 I 1843 617 I 1845 616 I 1848 614 I 1849 613 I 1852 610 I 1853 608 I 1853 605 I 1854 603 I 1853 600 I 1853 598 I 1852 595 I 1849 594 I 1848 591 I 1845 590 I 1843 590 I 1840 588 I : 1.289 1.289 +S K ; N 2216 92 M 2216 185 I 2719 185 I 2719 92 I 2216 92 I C 0.68 0.848 0.902 1 scol O 0 0 0 1 scol 5 Lw N 2216 92 M 2216 185 I 2719 185 I 2719 92 I C : 1.289 1.289 +S K ; 4 Lw N 2216 92 M 2216 185 I 2719 185 I 2719 92 I C : 1.289 1.289 +S K ; N 2216 101 M 2719 101 I : 1.289 1.289 +S K ; N 2469 232 M 2469 213 I : 1.289 1.289 +S K ; N 2469 195 M 2469 185 I : 1.289 1.289 +S K ; N 2469 77 M 2469 92 I : 1.289 1.289 +S K ; N 2343 232 M 2597 232 I : 1.289 1.289 +S K ; N 2343 77 M 2597 77 I : 1.289 1.289 +S K ; N 2469 327 M 2467 327 I 2464 327 I 2461 328 I 2460 330 I 2458 332 I 2456 334 I 2456 337 I 2455 339 I 2456 343 I 2456 346 I 2458 348 I 2460 350 I 2461 352 I 2464 354 I 2467 354 I 2469 354 I 2473 354 I 2476 354 I 2477 352 I 2479 350 I 2481 348 I 2482 346 I 2483 343 I 2483 339 I 2483 337 I 2482 334 I 2481 332 I 2479 330 I 2477 328 I 2476 327 I 2473 327 I 2469 327 I : 1.289 1.289 +S K ; N 2469 579 M 2467 579 I 2464 581 I 2461 582 I 2460 583 I 2458 586 I 2456 588 I 2456 591 I 2455 594 I 2456 596 I 2456 599 I 2458 601 I 2460 604 I 2461 605 I 2464 607 I 2467 608 I 2469 608 I 2473 608 I 2476 607 I 2477 605 I 2479 604 I 2481 601 I 2482 599 I 2483 596 I 2483 594 I 2483 591 I 2482 588 I 2481 586 I 2479 583 I 2477 582 I 2476 581 I 2473 579 I 2469 579 I : 1.289 1.289 +S K ; N 2846 77 M 2846 77 I 3348 77 I 3348 77 I 2846 77 I C 0.68 0.848 0.902 1 scol O 0 0 0 1 scol 5 Lw N 2846 77 M 2846 77 I 3348 77 I 3348 77 I C : 1.289 1.289 +S K ; 4 Lw N 2846 77 M 3348 77 I 2846 77 I 3348 77 I : 1.289 1.289 +S K ; N 2973 77 M 3222 77 I : 1.289 1.289 +S K ; N 2973 77 M 3222 77 I : 1.289 1.289 +S K ; N 3094 72 M 3092 72 I 3089 74 I 3087 75 I 3085 76 I 3083 79 I 3082 81 I 3082 84 I 3080 86 I 3082 89 I 3082 92 I 3083 94 I 3085 97 I 3087 98 I 3089 99 I 3092 101 I 3094 101 I 3098 101 I 3101 99 I 3102 98 I 3105 97 I 3106 94 I 3107 92 I 3109 89 I 3109 86 I 3109 84 I 3107 81 I 3106 79 I 3105 76 I 3102 75 I 3101 74 I 3098 72 I 3094 72 I : 1.289 1.289 +S K ; N 3094 170 M 3092 172 I 3089 172 I 3087 173 I 3085 176 I 3083 177 I 3082 179 I 3082 182 I 3080 185 I 3082 188 I 3082 191 I 3083 192 I 3085 195 I 3087 196 I 3089 197 I 3092 199 I 3094 199 I 3098 199 I 3101 197 I 3102 196 I 3105 195 I 3106 192 I 3107 191 I 3109 188 I 3109 185 I 3109 182 I 3107 179 I 3106 177 I 3105 176 I 3102 173 I 3101 172 I 3098 172 I 3094 170 I : 1.289 1.289 +S K ; N 1215 1401 M 1215 1443 I : 1.289 1.289 +S K ; N 1840 1401 M 1840 1443 I : 1.289 1.289 +S K ; N 2469 1401 M 2469 1443 I : 1.289 1.289 +S K ; N 3094 1401 M 3094 1443 I : 1.289 1.289 +S K ; %%IncludeResource: font Helvetica /Helvetica FontHasEuro not { /Euro.Helvetica [556 0 24 -19 541 703 ] AddEuroGlyph /Euro /Helvetica /Helvetica-Copy BuildNewFont } if F /F1 0 /256 T /Helvetica mF /F1S4A F1 [74.844 0 0 -74.844 0 0 ] mFS F1S4A Ji 1132 1567 M (OOB)[58 58 0]xS : 1298 1497 30 94 rc F0S63 Ji 1298 1567 M ( )S ; 1681 1567 M (Test 33%)[47 41 37 21 21 41 41 0]xS : 1997 1497 27 94 rc F0S63 Ji 1997 1567 M ( )S ; 2312 1567 M (Test 20%)[46 42 37 21 20 41 42 0]xS : 2628 1497 27 94 rc F0S63 Ji 2628 1567 M ( )S ; 2940 1567 M (5)S 2981 1567 M (-)S 3005 1567 M (fold CV)[21 41 17 41 21 54 0]xS : 3250 1497 39 94 rc F0S63 Ji 3250 1567 M ( )S ; N 794 1392 M 752 1392 I : 1.289 1.289 +S K ; N 794 1063 M 752 1063 I : 1.289 1.289 +S K ; N 794 734 M 752 734 I : 1.289 1.289 +S K ; N 794 405 M 752 405 I : 1.289 1.289 +S K ; N 794 77 M 752 77 I : 1.289 1.289 +S K ; /F1S00IFFFFFFB5 F1 [0 -74.844 -74.844 0 0 0 ] mFS F1S00IFFFFFFB5 Ji 712 1443 M (0.2)[-42 -20 0]yS : 641 1315 95 26 rc /F0S00IFFFFFF9C F0 [0 -99.363 -99.363 0 0 0 ] mFS F0S00IFFFFFF9C Ji 712 1340 M ( )S ; 712 1114 M (0.4)[-42 -20 0]yS : 641 986 95 25 rc F0S00IFFFFFF9C Ji 712 1010 M ( )S ; 712 786 M (0.6)[-41 -21 0]yS : 641 658 95 26 rc F0S00IFFFFFF9C Ji 712 683 M ( )S ; 712 457 M (0.8)[-41 -21 0]yS : 641 329 95 26 rc F0S00IFFFFFF9C Ji 712 354 M ( )S ; 712 128 M (1.0)[-42 -20 0]yS : 641 0 95 26 rc F0S00IFFFFFF9C Ji 712 25 M ( )S ; F1S4A Ji 1416 1734 M (Method for selecting the number of iterations)[62 41 21 41 41 43 20 21 41 25 20 39 41 17 41 38 20 17 41 41 22 21 41 42 20 43 41 63 41 42 24 21 41 22 21 16 21 41 26 41 21 17 41 41 0]xS : 2895 1665 47 93 rc F0S63 Ji 2895 1734 M ( )S ; F1S00IFFFFFFB5 Ji 543 1210 M (Performance over 13 datasets)[-50 -41 -25 -20 -42 -24 -63 -42 -41 -37 -42 -20 -42 -38 -42 -24 -21 -42 -42 -20 -42 -41 -22 -41 -38 -41 -21 0]yS : 472 162 95 48 rc F0S00IFFFFFF9C Ji 543 209 M ( )S ; 5 Lw : 789 0 2573 1406 rc N 794 4 M 794 1400 I 3356 1400 I 3356 4 I C : 1.289 1.289 +S K ; ; LH %%PageTrailer %%Trailer %%DocumentNeededResources: %%+ font Times-Roman %%+ font Helvetica %%DocumentSuppliedResources: %%+ procset Pscript_WinNT_ErrorHandler 5.0 0 %%+ procset Pscript_FatalError 5.0 0 %%+ procset Pscript_Win_Basic 5.0 0 %%+ procset Pscript_Win_Utils_L2 5.0 0 %%+ procset Pscript_Text 5.0 0 %%+ procset Pscript_Encoding256 5.0 0 %%+ procset Pscript_Win_Euro_L2 5.0 0 %%+ procset Pscript_Win_GdiObject 5.0 0 %%+ procset Pscript_Win_GdiObject_L2 5.0 0 Pscript_WinNT_Incr dup /terminate get exec %%EOF gbm/inst/doc/index.html0000644000176200001440000000135112102666411014535 0ustar liggesusers R: Vignettes

Vignettes


[Top]

Vignettes from package 'gbm'

gbm/inst/doc/gbm.tex0000644000176200001440000007567012102666411014046 0ustar liggesusers% setwd("c:/dev/gbm/inst/doc") % Sweave("gbm.rnw"); system("texify gbm.tex"); system("c:\\MiKTeX\\texmf\\miktex\\bin\\yap.exe gbm.dvi",wait=FALSE) \documentclass{article} \bibliographystyle{plain} \usepackage[active]{srcltx} \newcommand{\EV}{\mathrm{E}} \newcommand{\Var}{\mathrm{Var}} \newcommand{\aRule}{\begin{center} \rule{5in}{1mm} \end{center}} \title{Generalized Boosted Models:\\A guide to the gbm package} \author{Greg Ridgeway} %\VignetteIndexEntry{Generalized Boosted Models: A guide to the gbm package} \newcommand{\mathgbf}[1]{{\mbox{\boldmath$#1$\unboldmath}}} \usepackage{Sweave} \begin{document} \maketitle Boosting takes on various forms with different programs using different loss functions, different base models, and different optimization schemes. The gbm package takes the approach described in \cite{Friedman:2001} and \cite{Friedman:2002}. Some of the terminology differs, mostly due to an effort to cast boosting terms into more standard statistical terminology (e.g. deviance). In addition, the gbm package implements boosting for models commonly used in statistics but not commonly associated with boosting. The Cox proportional hazard model, for example, is an incredibly useful model and the boosting framework applies quite readily with only slight modification \cite{Ridgeway:1999}. Also some algorithms implemented in the gbm package differ from the standard implementation. The AdaBoost algorithm \cite{FreundSchapire:1997} has a particular loss function and a particular optimization algorithm associated with it. The gbm implementation of AdaBoost adopts AdaBoost's exponential loss function (its bound on misclassification rate) but uses Friedman's gradient descent algorithm rather than the original one proposed. So the main purposes of this document is to spell out in detail what the gbm package implements. \section{Gradient boosting} This section essentially presents the derivation of boosting described in \cite{Friedman:2001}. The gbm package also adopts the stochastic gradient boosting strategy, a small but important tweak on the basic algorithm, described in \cite{Friedman:2002}. \subsection{Friedman's gradient boosting machine} \label{sec:GradientBoostingMachine} \begin{figure} \aRule Initialize $\hat f(\mathbf{x})$ to be a constant, $\hat f(\mathbf{x}) = \arg \min_{\rho} \sum_{i=1}^N \Psi(y_i,\rho)$. \\ For $t$ in $1,\ldots,T$ do \begin{enumerate} \item Compute the negative gradient as the working response \begin{equation} z_i = -\frac{\partial}{\partial f(\mathbf{x}_i)} \Psi(y_i,f(\mathbf{x}_i)) \mbox{\Huge $|$}_{f(\mathbf{x}_i)=\hat f(\mathbf{x}_i)} \end{equation} \item Fit a regression model, $g(\mathbf{x})$, predicting $z_i$ from the covariates $\mathbf{x}_i$. \item Choose a gradient descent step size as \begin{equation} \rho = \arg \min_{\rho} \sum_{i=1}^N \Psi(y_i,\hat f(\mathbf{x}_i)+\rho g(\mathbf{x}_i)) \end{equation} \item Update the estimate of $f(\mathbf{x})$ as \begin{equation} \hat f(\mathbf{x}) \leftarrow \hat f(\mathbf{x}) + \rho g(\mathbf{x}) \end{equation} \end{enumerate} \aRule \caption{Friedman's Gradient Boost algorithm} \label{fig:GradientBoost} \end{figure} Friedman (2001) and the companion paper Friedman (2002) extended the work of Friedman, Hastie, and Tibshirani (2000) and laid the ground work for a new generation of boosting algorithms. Using the connection between boosting and optimization, this new work proposes the Gradient Boosting Machine. In any function estimation problem we wish to find a regression function, $\hat f(\mathbf{x})$, that minimizes the expectation of some loss function, $\Psi(y,f)$, as shown in (\ref{NonparametricRegression1}). \begin{eqnarray} \hspace{0.5in} \hat f(\mathbf{x}) &=& \arg \min_{f(\mathbf{x})} \EV_{y,\mathbf{x}} \Psi(y,f(\mathbf{x})) \nonumber \\ \label{NonparametricRegression1} &=& \arg \min_{f(\mathbf{x})} \EV_x \left[ \EV_{y|\mathbf{x}} \Psi(y,f(\mathbf{x})) \Big| \mathbf{x} \right] \end{eqnarray} We will focus on finding estimates of $f(\mathbf{x})$ such that \begin{equation} \label{NonparametricRegression2} \hspace{0.5in} \hat f(\mathbf{x}) = \arg \min_{f(\mathbf{x})} \EV_{y|\mathbf{x}} \left[ \Psi(y,f(\mathbf{x}))|\mathbf{x} \right] \end{equation} Parametric regression models assume that $f(\mathbf{x})$ is a function with a finite number of parameters, $\beta$, and estimates them by selecting those values that minimize a loss function (e.g. squared error loss) over a training sample of $N$ observations on $(y,\mathbf{x})$ pairs as in (\ref{eq:Friedman1}). \begin{equation} \label{eq:Friedman1} \hspace{0.5in} \hat\beta = \arg \min_{\beta} \sum_{i=1}^N \Psi(y_i,f(\mathbf{x}_i;\beta)) \end{equation} When we wish to estimate $f(\mathbf{x})$ non-parametrically the task becomes more difficult. Again we can proceed similarly to \cite{FHT:2000} and modify our current estimate of $f(\mathbf{x})$ by adding a new function $f(\mathbf{x})$ in a greedy fashion. Letting $f_i = f(\mathbf{x}_i)$, we see that we want to decrease the $N$ dimensional function \begin{eqnarray} \label{EQ:Friedman2} \hspace{0.5in} J(\mathbf{f}) &=& \sum_{i=1}^N \Psi(y_i,f(\mathbf{x}_i)) \nonumber \\ &=& \sum_{i=1}^N \Psi(y_i,F_i). \end{eqnarray} The negative gradient of $J(\mathbf{f})$ indicates the direction of the locally greatest decrease in $J(\mathbf{f})$. Gradient descent would then have us modify $\mathbf{f}$ as \begin{equation} \label{eq:Friedman3} \hspace{0.5in} \hat \mathbf{f} \leftarrow \hat \mathbf{f} - \rho \nabla J(\mathbf{f}) \end{equation} where $\rho$ is the size of the step along the direction of greatest descent. Clearly, this step alone is far from our desired goal. First, it only fits $f$ at values of $\mathbf{x}$ for which we have observations. Second, it does not take into account that observations with similar $\mathbf{x}$ are likely to have similar values of $f(\mathbf{x})$. Both these problems would have disastrous effects on generalization error. However, Friedman suggests selecting a class of functions that use the covariate information to approximate the gradient, usually a regression tree. This line of reasoning produces his Gradient Boosting algorithm shown in Figure~\ref{fig:GradientBoost}. At each iteration the algorithm determines the direction, the gradient, in which it needs to improve the fit to the data and selects a particular model from the allowable class of functions that is in most agreement with the direction. In the case of squared-error loss, $\Psi(y_i,f(\mathbf{x}_i)) = \sum_{i=1}^N (y_i-f(\mathbf{x}_i))^2$, this algorithm corresponds exactly to residual fitting. There are various ways to extend and improve upon the basic framework suggested in Figure~\ref{fig:GradientBoost}. For example, Friedman (2001) substituted several choices in for $\Psi$ to develop new boosting algorithms for robust regression with least absolute deviation and Huber loss functions. Friedman (2002) showed that a simple subsampling trick can greatly improve predictive performance while simultaneously reduce computation time. Section~\ref{GBMModifications} discusses some of these modifications. \section{Improving boosting methods using control of the learning rate, sub-sampling, and a decomposition for interpretation} \label{GBMModifications} This section explores the variations of the previous algorithms that have the potential to improve their predictive performance and interpretability. In particular, by controlling the optimization speed or learning rate, introducing low-variance regression methods, and applying ideas from robust regression we can produce non-parametric regression procedures with many desirable properties. As a by-product some of these modifications lead directly into implementations for learning from massive datasets. All these methods take advantage of the general form of boosting \begin{equation} \hat f(\mathbf{x}) \leftarrow \hat f(\mathbf{x}) + \EV(z(y,\hat f(\mathbf{x}))|\mathbf{x}). \end{equation} So far we have taken advantage of this form only by substituting in our favorite regression procedure for $\EV_w(z|\mathbf{x})$. I will discuss some modifications to estimating $\EV_w(z|\mathbf{x})$ that have the potential to improve our algorithm. \subsection{Decreasing the learning rate} As several authors have phrased slightly differently, ``...boosting, whatever flavor, seldom seems to overfit, no matter how many terms are included in the additive expansion''. This is not true as the discussion to \cite{FHT:2000} points out. In the update step of any boosting algorithm we can introduce a learning rate to dampen the proposed move. \begin{equation} \label{eq:shrinkage} \hat f(\mathbf{x}) \leftarrow \hat f(\mathbf{x}) + \lambda \EV(z(y,\hat f(\mathbf{x}))|\mathbf{x}). \end{equation} By multiplying the gradient step by $\lambda$ as in equation~\ref{eq:shrinkage} we have control on the rate at which the boosting algorithm descends the error surface (or ascends the likelihood surface). When $\lambda=1$ we return to performing full gradient steps. Friedman (2001) relates the learning rate to regularization through shrinkage. The optimal number of iterations, $T$, and the learning rate, $\lambda$, depend on each other. In practice I set $\lambda$ to be as small as possible and then select $T$ by cross-validation. Performance is best when $\lambda$ is as small as possible performance with decreasing marginal utility for smaller and smaller $\lambda$. Slower learning rates do not necessarily scale the number of optimal iterations. That is, if when $\lambda=1.0$ and the optimal $T$ is 100 iterations, does {\it not} necessarily imply that when $\lambda=0.1$ the optimal $T$ is 1000 iterations. \subsection{Variance reduction using subsampling} Friedman (2002) proposed the stochastic gradient boosting algorithm that simply samples uniformly without replacement from the dataset before estimating the next gradient step. He found that this additional step greatly improved performance. We estimate the regression $\EV(z(y,\hat f(\mathbf{x}))|\mathbf{x})$ using a random subsample of the dataset. \subsection{ANOVA decomposition} Certain function approximation methods are decomposable in terms of a ``functional ANOVA decomposition''. That is a function is decomposable as \begin{equation} \label{ANOVAdecomp} f(\mathbf{x}) = \sum_j f_j(x_j) + \sum_{jk} f_{jk}(x_j,x_k) + \sum_{jk\ell} f_{jk\ell}(x_j,x_k,x_\ell) + \cdots. \end{equation} This applies to boosted trees. Regression stumps (one split decision trees) depend on only one variable and fall into the first term of \ref{ANOVAdecomp}. Trees with two splits fall into the second term of \ref{ANOVAdecomp} and so on. By restricting the depth of the trees produced on each boosting iteration we can control the order of approximation. Often additive components are sufficient to approximate a multivariate function well, generalized additive models, the na\"{\i}ve Bayes classifier, and boosted stumps for example. When the approximation is restricted to a first order we can also produce plots of $x_j$ versus $f_j(x_j)$ to demonstrate how changes in $x_j$ might affect changes in the response variable. \subsection{Relative influence} Friedman (2001) also develops an extension of a variable's ``relative influence'' for boosted estimates. For tree based methods the approximate relative influence of a variable $x_j$ is \begin{equation} \label{RelInfluence} \hspace{0.5in} \hat J_j^2 = \hspace{-0.1in}\sum_{\mathrm{splits~on~}x_j}\hspace{-0.2in}I_t^2 \end{equation} where $I_t^2$ is the empirical improvement by splitting on $x_j$ at that point. Friedman's extension to boosted models is to average the relative influence of variable $x_j$ across all the trees generated by the boosting algorithm. \begin{figure} \aRule Select \begin{itemize} \item a loss function (\texttt{distribution}) \item the number of iterations, $T$ (\texttt{n.trees}) \item the depth of each tree, $K$ (\texttt{interaction.depth}) \item the shrinkage (or learning rate) parameter, $\lambda$ (\texttt{shrinkage}) \item the subsampling rate, $p$ (\texttt{bag.fraction}) \end{itemize} Initialize $\hat f(\mathbf{x})$ to be a constant, $\hat f(\mathbf{x}) = \arg \min_{\rho} \sum_{i=1}^N \Psi(y_i,\rho)$ \\ For $t$ in $1,\ldots,T$ do \begin{enumerate} \item Compute the negative gradient as the working response \begin{equation} z_i = -\frac{\partial}{\partial f(\mathbf{x}_i)} \Psi(y_i,f(\mathbf{x}_i)) \mbox{\Huge $|$}_{f(\mathbf{x}_i)=\hat f(\mathbf{x}_i)} \end{equation} \item Randomly select $p\times N$ cases from the dataset \item Fit a regression tree with $K$ terminal nodes, $g(\mathbf{x})=\EV(z|\mathbf{x})$. This tree is fit using only those randomly selected observations \item Compute the optimal terminal node predictions, $\rho_1,\ldots,\rho_K$, as \begin{equation} \rho_k = \arg \min_{\rho} \sum_{\mathbf{x}_i\in S_k} \Psi(y_i,\hat f(\mathbf{x}_i)+\rho) \end{equation} where $S_k$ is the set of $\mathbf{x}$s that define terminal node $k$. Again this step uses only the randomly selected observations. \item Update $\hat f(\mathbf{x})$ as \begin{equation} \hat f(\mathbf{x}) \leftarrow \hat f(\mathbf{x}) + \lambda\rho_{k(\mathbf{x})} \end{equation} where $k(\mathbf{x})$ indicates the index of the terminal node into which an observation with features $\mathbf{x}$ would fall. \end{enumerate} \aRule \caption{Boosting as implemented in \texttt{gbm()}} \label{fig:gbm} \end{figure} \section{Common user options} This section discusses the options to gbm that most users will need to change or tune. \subsection{Loss function} The first and foremost choice is \texttt{distribution}. This should be easily dictated by the application. For most classification problems either \texttt{bernoulli} or \texttt{adaboost} will be appropriate, the former being recommended. For continuous outcomes the choices are \texttt{gaussian} (for minimizing squared error), \texttt{laplace} (for minimizing absolute error), and quantile regression (for estimating percentiles of the conditional distribution of the outcome). Censored survival outcomes should require \texttt{coxph}. Count outcomes may use \texttt{poisson} although one might also consider \texttt{gaussian} or \texttt{laplace} depending on the analytical goals. \subsection{The relationship between shrinkage and number of iterations} The issues that most new users of gbm struggle with are the choice of \texttt{n.trees} and \texttt{shrinkage}. It is important to know that smaller values of \texttt{shrinkage} (almost) always give improved predictive performance. That is, setting \texttt{shrinkage=0.001} will almost certainly result in a model with better out-of-sample predictive performance than setting \texttt{shrinkage=0.01}. However, there are computational costs, both storage and CPU time, associated with setting \texttt{shrinkage} to be low. The model with \texttt{shrinkage=0.001} will likely require ten times as many iterations as the model with \texttt{shrinkage=0.01}, increasing storage and computation time by a factor of 10. Figure~\ref{fig:shrinkViters} shows the relationship between predictive performance, the number of iterations, and the shrinkage parameter. Note that the increase in the optimal number of iterations between two choices for shrinkage is roughly equal to the ratio of the shrinkage parameters. It is generally the case that for small shrinkage parameters, 0.001 for example, there is a fairly long plateau in which predictive performance is at its best. My rule of thumb is to set \texttt{shrinkage} as small as possible while still being able to fit the model in a reasonable amount of time and storage. I usually aim for 3,000 to 10,000 iterations with shrinkage rates between 0.01 and 0.001. \begin{figure}[ht] \begin{center} \includegraphics[width=5in]{shrinkage-v-iterations} \end{center} \caption{Out-of-sample predictive performance by number of iterations and shrinkage. Smaller values of the shrinkage parameter offer improved predictive performance, but with decreasing marginal improvement.} \label{fig:shrinkViters} \end{figure} \subsection{Estimating the optimal number of iterations} gbm offers three methods for estimating the optimal number of iterations after the gbm model has been fit, an independent test set (\texttt{test}), out-of-bag estimation (\texttt{OOB}), and $v$-fold cross validation (\texttt{cv}). The function \texttt{gbm.perf} computes the iteration estimate. Like Friedman's MART software, the independent test set method uses a single holdout test set to select the optimal number of iterations. If \texttt{train.fraction} is set to be less than 1, then only the \textit{first} \texttt{train.fraction}$\times$\texttt{nrow(data)} will be used to fit the model. Note that if the data are sorted in a systematic way (such as cases for which $y=1$ come first), then the data should be shuffled before running gbm. Those observations not used in the model fit can be used to get an unbiased estimate of the optimal number of iterations. The downside of this method is that a considerable number of observations are used to estimate the single regularization parameter (number of iterations) leaving a reduced dataset for estimating the entire multivariate model structure. Use \texttt{gbm.perf(...,method="test")} to obtain an estimate of the optimal number of iterations using the held out test set. If \texttt{bag.fraction} is set to be greater than 0 (0.5 is recommended), gbm computes an out-of-bag estimate of the improvement in predictive performance. It evaluates the reduction in deviance on those observations not used in selecting the next regression tree. The out-of-bag estimator underestimates the reduction in deviance. As a result, it almost always is too conservative in its selection for the optimal number of iterations. The motivation behind this method was to avoid having to set aside a large independent dataset, which reduces the information available for learning the model structure. Use \texttt{gbm.perf(...,method="OOB")} to obtain the OOB estimate. Lastly, gbm offers $v$-fold cross validation for estimating the optimal number of iterations. If when fitting the gbm model, \texttt{cv.folds=5} then gbm will do 5-fold cross validation. gbm will fit five gbm models in order to compute the cross validation error estimate and then will fit a sixth and final gbm model with \texttt{n.trees}iterations using all of the data. The returned model object will have a component labeled \texttt{cv.error}. Note that \texttt{gbm.more} will do additional gbm iterations but will not add to the \texttt{cv.error} component. Use \texttt{gbm.perf(...,method="cv")} to obtain the cross validation estimate. \begin{figure}[ht] \begin{center} \includegraphics[width=5in]{oobperf2} \end{center} \caption{Out-of-sample predictive performance of four methods of selecting the optimal number of iterations. The vertical axis plots performance relative the best. The boxplots indicate relative performance across thirteen real datasets from the UCI repository. See \texttt{demo(OOB-reps)}.} \label{fig:oobperf} \end{figure} Figure~\ref{fig:oobperf} compares the three methods for estimating the optimal number of iterations across 13 datasets. The boxplots show the methods performance relative to the best method on that dataset. For most datasets the method perform similarly, however, 5-fold cross validation is consistently the best of them. OOB, using a 33\% test set, and using a 20\% test set all have datasets for which the perform considerably worse than the best method. My recommendation is to use 5- or 10-fold cross validation if you can afford the computing time. Otherwise you may choose among the other options, knowing that OOB is conservative. \section{Available distributions} This section gives some of the mathematical detail for each of the distribution options that gbm offers. The gbm engine written in C++ has access to a C++ class for each of these distributions. Each class contains methods for computing the associated deviance, initial value, the gradient, and the constants to predict in each terminal node. In the equations shown below, for non-zero offset terms, replace $f(\mathbf{x}_i)$ with $o_i + f(\mathbf{x}_i)$. \subsection{Gaussian} \begin{tabular}{ll} Deviance & $\displaystyle \frac{1}{\sum w_i} \sum w_i(y_i-f(\mathbf{x}_i))^2$ \\ Initial value & $\displaystyle f(\mathbf{x})=\frac{\sum w_i(y_i-o_i)}{\sum w_i}$ \\ Gradient & $z_i=y_i - f(\mathbf{x}_i)$ \\ Terminal node estimates & $\displaystyle \frac{\sum w_i(y_i-f(\mathbf{x}_i))}{\sum w_i}$ \end{tabular} \subsection{AdaBoost} \begin{tabular}{ll} Deviance & $\displaystyle \frac{1}{\sum w_i} \sum w_i\exp(-(2y_i-1)f(\mathbf{x}_i))$ \\ Initial value & $\displaystyle \frac{1}{2}\log\frac{\sum y_iw_ie^{-o_i}}{\sum (1-y_i)w_ie^{o_i}}$ \\ Gradient & $\displaystyle z_i= -(2y_i-1)\exp(-(2y_i-1)f(\mathbf{x}_i))$ \\ Terminal node estimates & $\displaystyle \frac{\sum (2y_i-1)w_i\exp(-(2y_i-1)f(\mathbf{x}_i))} {\sum w_i\exp(-(2y_i-1)f(\mathbf{x}_i))}$ \end{tabular} \subsection{Bernoulli} \begin{tabular}{ll} Deviance & $\displaystyle -2\frac{1}{\sum w_i} \sum w_i(y_if(\mathbf{x}_i)-\log(1+\exp(f(\mathbf{x}_i))))$ \\ Initial value & $\displaystyle \log\frac{\sum w_iy_i}{\sum w_i(1-y_i)}$ \\ Gradient & $\displaystyle z_i=y_i-\frac{1}{1+\exp(-f(\mathbf{x}_i))}$ \\ Terminal node estimates & $\displaystyle \frac{\sum w_i(y_i-p_i)}{\sum w_ip_i(1-p_i)}$ \\ & where $\displaystyle p_i = \frac{1}{1+\exp(-f(\mathbf{x}_i))}$ \\ \end{tabular} Notes: \begin{itemize} \item For non-zero offset terms, the computation of the initial value requires Newton-Raphson. Initialize $f_0=0$ and iterate $\displaystyle f_0 \leftarrow f_0 + \frac{\sum w_i(y_i-p_i)}{\sum w_ip_i(1-p_i)}$ where $\displaystyle p_i = \frac{1}{1+\exp(-(o_i+f_0))}$. \end{itemize} \subsection{Laplace} \begin{tabular}{ll} Deviance & $\frac{1}{\sum w_i} \sum w_i|y_i-f(\mathbf{x}_i)|$ \\ Initial value & $\mbox{median}_w(y)$ \\ Gradient & $z_i=\mbox{sign}(y_i-f(\mathbf{x}_i))$ \\ Terminal node estimates & $\mbox{median}_w(z)$ \end{tabular} Notes: \begin{itemize} \item $\mbox{median}_w(y)$ denotes the weighted median, defined as the solution to the equation $\frac{\sum w_iI(y_i\leq m)}{\sum w_i}=\frac{1}{2}$ \item \texttt{gbm()} currently does not implement the weighted median and issues a warning when the user uses weighted data with \texttt{distribution="laplace"}. \end{itemize} \subsection{Quantile regression} Contributed by Brian Kriegler (see \cite{Kriegler:2010}). \begin{tabular}{ll} Deviance & $\frac{1}{\sum w_i} \left(\alpha\sum_{y_i>f(\mathbf{x}_i)} w_i(y_i-f(\mathbf{x}_i))\right. +$ \\ & \hspace{0.5in}$\left.(1-\alpha)\sum_{y_i\leq f(\mathbf{x}_i)} w_i(f(\mathbf{x}_i)-y_i)\right)$ \\ Initial value & $\mathrm{quantile}^{(\alpha)}_w(y)$ \\ Gradient & $z_i=\alpha I(y_i>f(\mathbf{x}_i))-(1-\alpha)I(y_i\leq f(\mathbf{x}_i))$ \\ Terminal node estimates & $\mathrm{quantile}^{(\alpha)}_w(z)$ \end{tabular} Notes: \begin{itemize} \item $\mathrm{quantile}^{(\alpha)}_w(y)$ denotes the weighted quantile, defined as the solution to the equation $\frac{\sum w_iI(y_i\leq q)}{\sum w_i}=\alpha$ \item \texttt{gbm()} currently does not implement the weighted median and issues a warning when the user uses weighted data with \texttt{distribution=list(name="quantile")}. \end{itemize} \subsection{Cox Proportional Hazard} \begin{tabular}{ll} Deviance & $-2\sum w_i(\delta_i(f(\mathbf{x}_i)-\log(R_i/w_i)))$\\ Gradient & $\displaystyle z_i=\delta_i - \sum_j \delta_j \frac{w_jI(t_i\geq t_j)e^{f(\mathbf{x}_i)}} {\sum_k w_kI(t_k\geq t_j)e^{f(\mathbf{x}_k)}}$ \\ Initial value & 0 \\ Terminal node estimates & Newton-Raphson algorithm \end{tabular} \begin{enumerate} \item Initialize the terminal node predictions to 0, $\mathgbf{\rho}=0$ \item Let $\displaystyle p_i^{(k)}=\frac{\sum_j I(k(j)=k)I(t_j\geq t_i)e^{f(\mathbf{x}_i)+\rho_k}} {\sum_j I(t_j\geq t_i)e^{f(\mathbf{x}_i)+\rho_k}}$ \item Let $g_k=\sum w_i\delta_i\left(I(k(i)=k)-p_i^{(k)}\right)$ \item Let $\mathbf{H}$ be a $k\times k$ matrix with diagonal elements \begin{enumerate} \item Set diagonal elements $H_{mm}=\sum w_i\delta_i p_i^{(m)}\left(1-p_i^{(m)}\right)$ \item Set off diagonal elements $H_{mn}=-\sum w_i\delta_i p_i^{(m)}p_i^{(n)}$ \end{enumerate} \item Newton-Raphson update $\mathgbf{\rho} \leftarrow \mathgbf{\rho} - \mathbf{H}^{-1}\mathbf{g}$ \item Return to step 2 until convergence \end{enumerate} Notes: \begin{itemize} \item $t_i$ is the survival time and $\delta_i$ is the death indicator. \item $R_i$ denotes the hazard for the risk set, $R_i=\sum_{j=1}^N w_jI(t_j\geq t_i)e^{f(\mathbf{x}_i)}$ \item $k(i)$ indexes the terminal node of observation $i$ \item For speed, \texttt{gbm()} does only one step of the Newton-Raphson algorithm rather than iterating to convergence. No appreciable loss of accuracy since the next boosting iteration will simply correct for the prior iterations inadequacy. \item \texttt{gbm()} initially sorts the data by survival time. Doing this reduces the computation of the risk set from $O(n^2)$ to $O(n)$ at the cost of a single up front sort on survival time. After the model is fit, the data are then put back in their original order. \end{itemize} \subsection{Poisson} \begin{tabular}{ll} Deviance & -2$\frac{1}{\sum w_i} \sum w_i(y_if(\mathbf{x}_i)-\exp(f(\mathbf{x}_i)))$ \\ Initial value & $\displaystyle f(\mathbf{x})= \log\left(\frac{\sum w_iy_i}{\sum w_ie^{o_i}}\right)$ \\ Gradient & $z_i=y_i - \exp(f(\mathbf{x}_i))$ \\ Terminal node estimates & $\displaystyle \log\frac{\sum w_iy_i}{\sum w_i\exp(f(\mathbf{x}_i))}$ \end{tabular} The Poisson class includes special safeguards so that the most extreme predicted values are $e^{-19}$ and $e^{+19}$. This behavior is consistent with \texttt{glm()}. \subsection{Pairwise} This distribution implements ranking measures following the \emph{LambdaMart} algorithm \cite{Burges:2010}. Instances belong to \emph{groups}; all pairs of items with different labels, belonging to the same group, are used for training. In \emph{Information Retrieval} applications, groups correspond to user queries, and items to (feature vectors of) documents in the associated match set to be ranked. For consistency with typical usage, our goal is to \emph{maximize} one of the \emph{utility} functions listed below. Consider a group with instances $x_1, \dots, x_n$, ordered such that $f(x_1) \geq f(x_2) \geq \dots f(x_n)$; i.e., the \emph{rank} of $x_i$ is $i$, where smaller ranks are preferable. Let $P$ be the set of all ordered pairs such that $y_i > y_j$. \begin{enumerate} \item[{\bf Concordance:}] Fraction of concordant (i.e, correctly ordered) pairs. For the special case of binary labels, this is equivalent to the Area under the ROC Curve. $$\left\{ \begin{array}{l l}\frac{\|\{(i,j)\in P | f(x_i)>f(x_j)\}\|}{\|P\|} & P \neq \emptyset\\ 0 & \mbox{otherwise.} \end{array}\right. $$ \item[{\bf MRR:}] Mean reciprocal rank of the highest-ranked positive instance (it is assumed $y_i\in\{0,1\}$): $$\left\{ \begin{array}{l l}\frac{1}{\min\{1 \leq i \leq n |y_i=1\}} & \exists i: \, 1 \leq i \leq n, y_i=1\\ 0 & \mbox{otherwise.}\end{array}\right.$$ \item[{\bf MAP:}] Mean average precision, a generalization of MRR to multiple positive instances: $$\left\{ \begin{array}{l l} \frac{\sum_{1\leq i\leq n | y_i=1} \|\{1\leq j\leq i |y_j=1\}\|\,/\,i}{\|\{1\leq i\leq n | y_i=1\}\|} & \exists i: \, 1 \leq i \leq n, y_i=1\\ 0 & \mbox{otherwise.}\end{array}\right.$$ \item[{\bf nDCG:}] Normalized discounted cumulative gain: $$\frac{\sum_{1\leq i\leq n} \log_2(i+1) \, y_i}{\sum_{1\leq i\leq n} \log_2(i+1) \, y'_i},$$ where $y'_1, \dots, y'_n$ is a reordering of $y_1, \dots,y_n$ with $y'_1 \geq y'_2 \geq \dots \geq y'_n$. \end{enumerate} The generalization to multiple (possibly weighted) groups is straightforward. Sometimes a cut-off rank $k$ is given for \emph{MRR} and \emph{nDCG}, in which case we replace the outer index $n$ by $\min(n,k)$. The initial value for $f(x_i)$ is always zero. We derive the gradient of a cost function whose gradient locally approximates the gradient of the IR measure for a fixed ranking: \begin{eqnarray*} \Phi & = & \sum_{(i,j) \in P} \Phi_{ij}\\ & = & \sum_{(i,j) \in P} |\Delta Z_{ij}| \log \left( 1 + e^{-(f(x_i) - f(x_j))}\right), \end{eqnarray*} where $|\Delta Z_{ij}|$ is the absolute utility difference when swapping the ranks of $i$ and $j$, while leaving all other instances the same. Define \begin{eqnarray*} \lambda_{ij} & = & \frac{\partial\Phi_{ij}}{\partial f(x_i)}\\ & = & - |\Delta Z_{ij}| \frac{1}{1 + e^{f(x_i) - f(x_j)}}\\ & = & - |\Delta Z_{ij}| \, \rho_{ij}, \end{eqnarray*} with $$ \rho_{ij} = - \frac{\lambda_{ij }}{|\Delta Z_{ij}|} = \frac{1}{1 + e^{f(x_i) - f(x_j)}}$$ For the gradient of $\Phi$ with respect to $f(x_i)$, define \begin{eqnarray*} \lambda_i & = & \frac{\partial \Phi}{\partial f(x_i)}\\ & = & \sum_{j|(i,j) \in P} \lambda_{ij} - \sum_{j|(j,i) \in P} \lambda_{ji}\\ & = & - \sum_{j|(i,j) \in P} |\Delta Z_{ij}| \, \rho_{ij}\\ & & \mbox{} + \sum_{j|(j,i) \in P} |\Delta Z_{ji}| \, \rho_{ji}. \end{eqnarray*} The second derivative is \begin{eqnarray*} \gamma_i & \stackrel{def}{=} & \frac{\partial^2\Phi}{\partial f(x_i)^2}\\ & = & \sum_{j|(i,j) \in P} |\Delta Z_{ij}| \, \rho_{ij} \, (1-\rho_{ij})\\ & & \mbox{} + \sum_{j|(j,i) \in P} |\Delta Z_{ji}| \, \rho_{ji} \, (1-\rho_{ji}). \end{eqnarray*} Now consider again all groups with associated weights. For a given terminal node, let $i$ range over all contained instances. Then its estimate is $$-\frac{\sum_i v_i\lambda_{i}}{\sum_i v_i \gamma_i},$$ where $v_i=w(\mbox{\em group}(i))/\|\{(j,k)\in\mbox{\em group}(i)\}\|.$ In each iteration, instances are reranked according to the preliminary scores $f(x_i)$ to determine the $|\Delta Z_{ij}|$. Note that in order to avoid ranking bias, we break ties by adding a small amount of random noise. \begin{thebibliography}{77} % start the bibliography \small % put the bibliography in a small font \bibitem{FreundSchapire:1997} Y. Freund and R.E. Schapire (1997). ``A decision-theoretic generalization of on-line learning and an application to boosting,'' \textit{Journal of Computer and System Sciences}, 55(1):119-139. \bibitem{Friedman:2001} J.H. Friedman (2001). ``Greedy Function Approximation: A Gradient Boosting Machine,'' \textit{Annals of Statistics} 29(5):1189-1232. \bibitem{Friedman:2002} J.H. Friedman (2002). ``Stochastic Gradient Boosting,'' \textit{Computational Statistics and Data Analysis} 38(4):367-378. \bibitem{FHT:2000} J.H. Friedman, T. Hastie, R. Tibshirani (2000). ``Additive Logistic Regression: a Statistical View of Boosting,'' \textit{Annals of Statistics} 28(2):337-374. \bibitem{Kriegler:2010} B. Kriegler and R. Berk (2010). ``Small Area Estimation of the Homeless in Los Angeles, An Application of Cost-Sensitive Stochastic Gradient Boosting,'' \textit{Annals of Applied Statistics} 4(3):1234-1255. \bibitem{Ridgeway:1999} G. Ridgeway (1999). ``The state of boosting,'' \textit{Computing Science and Statistics} 31:172-181. \bibitem{Burges:2010} C. Burges (2010). ``From RankNet to LambdaRank to LambdaMART: An Overview'', \textit{Microsoft Research Technical Report MSR-TR-2010-82} \end{thebibliography} % end the bibliography \end{document} gbm/inst/doc/gbm.pdf0000644000176200001440000053664512143234277014031 0ustar liggesusers%PDF-1.5 %¿÷¢þ 1 0 obj << /Type /ObjStm /Length 3884 /Filter /FlateDecode /N 65 /First 516 >> stream xœ­[kWÜ8ý¾¿Bß&9» ëeÙ{æÌ9<’H e& mÀ“¦›t7 ä×ï-Éo÷ÃtætŒmI–äª[U·dE²€)&DÀ4“b†i¥XÈŒUÌ2G,b±ÂÁ„C;øƒËX&$ž²– ŤЍ“F¡Ò0i êC&£õ–É8FyÄ”P!fJ¢Äè:FcÁTˆr)™Š-îÓ‚Ò˜¦"1/Cƒ„LGƒX¦ãí"fD¡s†aÑYÀŒ 4a&Ô!Á›ÄèoEƒ±P`Þøb‚”…:Àd, ÝsÎ1îcFxO0 ‘Ì #™–ÌJ*‡t¤ÅYÓ9Âä˜ 18†´QŒÉ[ *‡üµ‹YB˜&`Q !©â%Ñe˜¤bqŒ~†ˆƒoÙÓ5Va^R²4.0{f o© ‚ ÁÛ@o° Ð ”¸-„r•˜CôŒ™H¼] < é"ŽþõûïŒïG3æ.Hµ ¨øH×1@à/½RókŒ_Ò„E~M(¡«?þ`üx2¾:IgìO\îî3~š>ÎØT¹Ñv0Z:šMY˜w˜²d{üˆö~!pEjû‚§“ Z\Ãétü0¹J§Lú‚Ó§û”ZݤEç[Ó+z"Œ5Jî_§ÙÍ-n#…ÛÛdB³zÁ¯ÆÃñˆoñmþŠò„_ò+>à)¿á·<ã_ùßñó{>áS>ãüçKÆwSß÷†fùþ0¹™Ânœð¶ýì7€´ |L€Ú¿øêýl˜b¹¤Pð>¹Ã¼wOÏO>mÿ{çð#ÀÎfÉ0»ÚÝ mÀøÉ,½ûLÐàçùKh@,Ëídš:¥µúØ]Ùè† àË&Ó½8Ç Mo1Éîgã &ïÚ¼Kò&ÈO.gNª$[QˆØv– f·S‚•×ôs$~w—ðŸédÌÇ£”Ï~Œùìv’¦N¡/þ ÿÁŸšÂÖ‹„­Iذ¥(bBo;n ûüíë£Ý7NPr°a5a‹®°›}TÂÚÂÖº#ì°#lÑOØâÙÂÞM‡³„ßfüxšñëk~¿C\ÐÍƳa:EÍ·‡ñ,\'Ô ¿O'Ô·/Í‹Èü†éu~•¦¹>oŸîoÓ=™ó´{ sÅèßS>Íù4ýŽÖnÊ|”¡¡·Ãiz—ù«ôÛC2tv¹Ãwùß<^óþ–¿LÞó#~ °œðSþ‰ægür’\}Mgn†Å˸›¼ÂOø*›\=Ü]ÓÇš©_çÆþwÃÜ¿•ÿ8|äOü'dé$fÓ~ $@:Oý7}€ Ú°üttzq¶å ,‚e¼–Í>JXš¨ KÑAeÔA¥í…J÷AeXG%WƒÖ<õÅN ïrp|”Ht9QÐOÚ N´3~ a3þ6L! _)¼·U~"ÊÛ°òÆf½C­8o¨ÛwZô^£ë”3ål= |*¿®³xMyÓ/)ò×.ÕjÒx¾S>õLF/t°6¥ö—8}Í‘»™çgœW9†Gz…ç°ö^ÅG‡oð+Ϋô‚xPà íQ„î…½O‡ÞØe%+qJù¼rwèo’ÉdüÃ)fuGùß(½qVä}gú˜MÉ·ƒŸñôî~ö4Mg•ú+¯õËdBR0>J.‡IOuÆlÙ ÆZÓR§n«óÃÖùáÙ tqò¿¹¬L™Ò‰®Êæ[]UŠíв9z5½ê~z}>+ëɶÀ÷¼aȈÑ’¤iKòÍÅÑÇϯOZd.\J¶šT"ìp-Ç£ÚB´k’-ýü¨"sè×Uád”î&{¢´üR–ïöOßl¹xl–ö8XÉfG•<»|z[žëFv݈ìÏ£—Š–Õ‰j>‹_®¿d¬Ä ~ÙÇ»V^ô~’Ý¥¥Ó\áûY0¶ÓƒD®b£6Õ謿þ¸uqqäØ<:Y¹ÂX­€]³§vªC7ºF¬:.è·„¤âõA·hõï¿ ´x} ™E@«º7ëçIva÷»Ùõ5âúˆZQ³´ê·9Ïé5I³‚0Ųèò&X–’Eä'ú_–‹´³Ä<«žVWêíÉ`,·úšUÕ~)^¢^äOFŸÐ‚^¿þ-Û?å|¥µë=ýüŸ¡Ã äÓõ—ùJ«dûÍÿ›ñ¯M¹Ñ[>"ÊT+#e>ùÅ¡òuÝ7“ÈK1sçlaZ«æH€zôƒ¯•Üé‚Þ­F…ÝÒQYƒ)¨1} ú‡­Ä1KǨqiVÊÙÚˆÑGi:¯Æ¥]ÒRAh‡î3n ênqÚôzv C ª¾é5ÛãDj!ÌJW‡ë2oeµÄ•hkÕ ˜¡DÖ}Ë«{IJle¶ŒRÛ&†€JuðÑÀ4¬uO$ J…Yl:†ïPQ•¡GþŽ Ÿcü{úQ ž Íh-j%F4¬v,6²•ò í<ȆÈËûzëµØê½ø)©ÊsL;0Ü.¦rm”›óJÄš O¬!¦5¢Xs%Š5—¢Xk-ÊyŸÒ…¬¹6EéLcµ*âØem„ÚÆÚ•[ñ+k#ÖYËB¨«ƒA³Öâk¯n5á³JñÓÐÖV…î¨kÆšeúŒ‚pIí:?à¿Ý¥Ð.)_öÓVºc> h·TÌÜZq¤ÒÉí¸¡Hrîn-¹áÙɦ}(BmŠ(«-®˜­96þ–ÙšJà»ø{·öTGÅq èç„Eµ~bô/^³ØÖ!‡3¡bºQo2FÐçÇ¡!ëBBß|ä©ÊÂŽ{=ü ГzXxÕP/ LfmJWs%.µ еˆ Ú8ôwq@ÄÌJïÜl0Ÿys7ðØñÂÿ”†Mjè= ¬ƒ0Dg1ˆqŸqŒôèÕîÓlF†N ê_WŠ$Cb‰?èt‹öpU%PËúq»JŒK¯Ý9@–…hyèà·xPåG¿xx*aqí©˜,£¿i‘OR»ÐB›I“É<ƒE*ÑŒ‘‹&bDÉ\™šÅ5R”û ¹€Ê¥Ø1ngƒ›‰¦ ÞXÓhåêò2m뮉ôÿL俘3ÿÉœÕCñŸÐäv®ŸŠ ½†¿Hëá¼J¸PÊïT–H‚3ß©ñ£ùx‚/ã§n]¶Ä:¿`q#³‹µ*šK‹æ9-<ú½ò[ü'm¤\¿hâ.°h<ÞŽF&ªÜŒ¿Ê‘dÎà=ÑõìܨºS Ý(¡^Ó¬¨DÚÛE×ï¶wºX 0‡‚v¿6 ÍwŠo§c4½ÒœNŠب¡,6(žVnk•YA{†¥:\ÙBÚ×CV.°§Úî GŸJ*¥ DçÛ…J ë:áŠkœÜï bn s{ˆm"b~óÛˆ[o:T¾;Q¾G ÷¾³ɨwΉÎ=ÿÐvœ½› §ÕýÏàÒmçÊ/Û•ßÎI,áy.à\¬OøCö†ƒuáº'ô¥™ ˜å?%dãZÑŽë¼ÌÀ /;Ú}E´uŒð¶üHû\ËŸæÜ·½bo+}4\½.«šqÃÒxàðhÙ‡jE‡ìµþT,ßø¾éú$tåJ–*{+VU­Ö¶(‹µ` nÖ ›°õ’n*Ñ#q¨rU·@Z«Eœ¯çe>²±Ô{E\Ú"ä8S;úýlXÇ:ý—§ÊF'“Åx>üPè0eI5£Z@­­Ó[—f×ü…ü®0´ÏÓo×'ÍÓá?Ã[Q_ƒ®ÈVˆcðaS™‰•õå¥E„`Ž® AJ 5™l~®Jý¨Ä†¥óÒ·Ék¡=Ì@áŽÎ¾$ï5(Ö4ý¦Q‚u¾k4ÿöù~"ß›ú…ÍÛêoš›Fw’Y2ß”_)óËÆ£]äìÅîe{1|F"ŸÝìoAðÛ˼ÝxÂ^œ¦ç¸=>Ý;ßÜ/“Ñ(¥â[ ü»\§hòö9LÑ/S›B Ë ±)E²!7%ûëÅYz)w˜Ý4›ú¯—ìë}2»¦ û^>ƒŠ—î»ìàኺϻõ¼¤ÉÿdZçúendstream endobj 67 0 obj << /Filter /FlateDecode /Length 2076 >> stream xÚXKsÛ6¾çWèj&b€Éö”tš´É¥UO‰;CK´¬±)º‚Ò<~}÷”H9Ù$°XìóÛ]¾]¿xýΔ U¦Zùb}·ÐY‘Ú¢P «ŠÔÔÚ,ÖÛÉûö°TI{làï#üöðûÞn—+“eÉÛ~¹ÒEÒÚ;ùż¸ÅcxÂý´¼Yÿ±0iVÕõb¥UZ«²"æoð„Jv@õYxÓ1^>õÌðt?¬!é-ü:~}‚G”l³Ô6yX†ÞvD—¾~§ê…‚ ‹B£ŠE™Ö¹2 D•V kxDò0©ÿŒÄ 6_–º¦ºJ¾‘yj˼&5eÎ6ú ûÄAÃ)¿WÃk??Í2U‹:­­¶(ÑJiÚ¼ŽÆ¦•Ul2l™ôî$òHàX'¸Ô<à­Ã•*éüÿ¿ea“æ(Gz4©PÜá;ntŽ™|A ätÏ[9ô)S¦%ƒð†î–df\ìwø·ñLè‚3é®3Âxè#K®TÞ0ì!EšßI, Ë×¾Ç7‡-®ÞPäÃh)Фc3¡(,š¹˜\g×?‰¤£ào"©ˆÆm0F(H»Ö¥ËU‘UÉÚ-ˆ±ãˆE¥3¯­&m)~éøùn‡'ª ×™øŠYAƒ'ƒ[‘€¶H[·ñap &° %gUI,ÝGŒÈ^g\77ð4±Ê$á ÜÛß Fâœ#¼‰YO®Þ}›ÒúÂää—Rƒ¿(àñ(f“EÑxµtOÏ; ªaó¤E.Û'^46Ù4NÙ sYì /ð>EBà×Ñ=-1œâ´Ûòe¼³«Îáío"lšG1“©[çX¤ÍÞ³…;'Ôka®©}yòKfþÊ/!½Ÿ˜Ç1¸N³ë(t€4ßC­çöÜ &†CíW‚òà?"a€p [”aÀÌ LÒ"ÏÉÐÞKi«å™÷‡ôG\<÷-ÑyøÁí(ñõjØä™!¸*q­&3=_’¾sA4ÅCÿFÇi¸Œl|šEôÈ«È8 ‰pþºÝ½/%¹õðË)ÿGå$/ ­ào^Ùä %"P^ÁBb kpy×#-î=|ÁölrRªÁ%‹s—S¹ÿ< ¯r3ø¥ïð=ùÕä3è‰g%oF¸Ö)AöuÄó ±Šz'â7g(>ƒÖV7LIyäx½™ ñ'/j@“Ï"'¬BSY‡g¥¡fÏõ2Décžž™þyöϵ#ÄlV_¼cÈè`ä>ö›ˆZþµJ3L™8=ë=:E­ýK¯á*eaP×1ó R•¼ô»†M°ò5rR™qÜ‚ž©8ë/DC©¿ûH"Xxò§¶¼êχ܆<èÜ|Öé1xƒ xèe}FSºØ&ï–Užx(wQ/Ù¸Y÷¹µµ2œM¢¸Û „Yy%˜ ”® ô=7/´èß©|‹]`;ðÚy 'Ï!; Ÿë‹Úç¼–Цà¦t|ŸœìlÃ×gaå9‘Q»©ðÜ|XØò]˜3È« dtLçExt|ÕÞG&ì½'ÍЦÀ#y‚òc?Ð~¹gÃÒK¬Ÿ‰’ ž';³k Ó  0ñÃôØOc1˜Àè´Ö–ÇYˆ>¥`t}OCÿvÇ ‹^RÀVS±WNûÃîr®ÍhЯ2gבEi\{‡8縋| % |D´.n4‰]d!¤ÜúNÙðL|~bþ¿Ú]èkÔ†)-C“öE[‡¦n"ÀÿÍtÖÝM(ùX‰.Ðã j*1ê‡ÑfèyùÌóLsî/0 †€ŸW" ³w]Â=î¯gjIwÖÕ6qVð™Ú¹Æ`ΟÃd:ð¼´œ/÷¯¢|›œŽÇyMÁ!c†IÞeZ•ë£RŸ3ï–µIŽûvÛaî^JáÚýx*ÑVǵ`ã~h'ÓKU©©³§k¡¡T-eJêJ}þÈÆƒ¼ÊpöyòY(O‚Ç€Ú‘—~èV=ÜJåùÄ‘'ßù¦eˆÛø‰à¡¾Ag³>¼™ä·!þ[Y zÃéµWœÇ¦ã 7P¨H‰L”€c iÌöËX ئtu°8QðPÉ´¢DÒL³ÌDqÞÉÇØ‹v ÒÌbŠçRùÊà€ÐUèäïÑ'=3àü†ê4 ²9BÒ)JßöðCbYoÙ)‡_ozÙã2D%YLIÏcÓýEÑF…<‚˜÷>8& SϪ…“¾ßG›-AÊâìó'ëŠßkre ¿ñËl_Š›m¸¸kÎ÷‚†·¡À\|#Ï{ÎxѾ‡ÁuiUýhŽBñ/çZìâëæ¿Ý¾’/¾:-ëZ-Vuj´å˜þG>Ô«ø«¸NUžWøßêZzó ,À~u Ñ‚û”­DbbN@:Ï…æë%›f¹- c©¦UjÂ*=|”b¢¥GK5šB8?Æ™ ¼øªö³<œ S#¾\³"ÉŽQƒº#Ãi«ÓÌ•(D¯’Ÿ¡6ÙrÊ‚ªH­ª'ÔoDGú&õåÀ.ÔµI m‹Ë‚.…S`3ß!)Ö-xÜ¢G‹,­+¼MØ{ñëúÅÿüŒ_}endstream endobj 68 0 obj << /Filter /FlateDecode /Length1 1077 /Length2 4668 /Length3 532 /Length 5368 >> stream xÚí“gX“鶆© ¡H¯âDz„Ò{ïJ‡@S0„^¤ƒ0€tP‘Ž MAzA¥)E¥W©;Μ=ã™ýóœ_ç:ùþä^ïúžç¹Ö»>ž›&æ"ªP´3L Šˆ‹ŠËÕ ÍÄe€â¢bu ‚…£Q,L(.''Tõv‚Å€âÒò²ò`IPí黹cüê?›d€ªHîA !Xw¯áAÍÑ.pÖ_¨Š@Í~¾á4ƒyÁ0>0¨(@\…»`Î078 úHåŠÊüY†z{þûȆñ‡òãC ñ¡hÂ…¹@Fh¼ Ÿä#Ô?ŵ¼#ò§üÏ!ýÇ1 GøÿWéé…a€†h( ƒúgëØŸÙ aP¸7òŸ§ºXrCÀ€b–à^Zp?ÔŽuqºB^°?ê0ôŸ!ðsû#HÃÂÊÜRMèÏûüãÌGa-ü=ÿRýÙü‹ÿÍøé`à~@[1Q11q|#þù÷?ûxi¢\ÐP8 ¿RÒ@ñà7ORÀ@q …ùa~øÀ Q‹ˆŸI0Ðü¼N)Y ÈÀß$¾üGEZ Rý›¤ µ¿HFÒþ›d€ ÿ» Èß„×uþ›äð.‘¸˜ýñª°_Prû% ÷_þ â]ïþ‚x[Ä/ˆ÷Eþâx_Ô/ˆ÷Eÿ‚` ÈóÄûb~A¼¯×/ˆŸ öÄÇðþÁx©€?ð?CM í("!K‰å$¥€2’rÁÿ­ÏŃ¡°|søíú7»Âñ»ƒùÁ\S“h…HÌúè²Í‚÷夂„jn ê:ÆZ)">%"Šõï ÎÕZ<DO#Åýæ{Æî×d:¬µvïaÖÄÎÇ ÷( ‘Ýjï‘¡ÿŠ'çQÄúµæúñƒ )Bãѹ·eiÖ…½9;}ëù&ü_¯ÎÞ$è´õyÙù8RZÆJëÂ2&ª–Wâ¦MÖ½²XɘϾ4i)$ŸG"B % ¿9.ç{ïˆÿ„>-#Öû[Ò hY”Ö!‘×…ža\sà|ã+UGF…àÞqŒ9«&æÍÐú‹áv%j¡£ž3þª‹¯œ:Kß “õMû!9{Lö® “¼±)¤€ÛŠúðU Òh,÷_èa¶ôß6Õ%=-"ì‹÷È«ñ¾2©”Ÿi0Á BÑÎ¥ >79ðг#ÔæR°³{ýrÝæ–SÎÿ÷³>ôä+ôO3Gó%©,Õ¾œ[ÆÄ <ÛÑ x« ¼á ³zCdaè›æ³„+6 ” ?€…\¹¹•^f“/.^{˜Ã>Á5-ò5W,Íb}t4Ãmk›½Ësî‰@Ù`Ûé':]'¯Ðéß6î´qÓZ'Ô\õ]ã²vmlç¡vL¨ßíRjÎz]€âFâ¢%u9X”":)ߪG?2YÂØ¿?ð ÙÛÎ:xoÚø–ŸBÕ] ×^“ñULÖÙƒYPó°øá:½Ólœ…¿!uŽ'+æYp*yÍõÛ¯÷äÈ¢=ªš‘› ³ø irrΊ”®[>š¤ÅCÜ×¶‰Á’©UìõpŽ§Çº}°ˆ¡O±vž•qß*zô>3:Ëeúñª–ÐkEゾD”˜ÄÍ7†+qß2ýòA½J’R%ÜŽ±t«U°~éÓ±¯Mj U°‹]J«Õ…ò­”BI܉°ÎìI¤kè ’‹ ¦bS¤–Ú³¨IfVç°CºP>##‡`[—»…Ù­ëóý´ÓAþR×¾û‹Vp²G¦Ù5o | ©O9•×äù¸nX÷ÕÙ"³FÀ—ºx|No2”ÏAËû“ŸÑ´J² ?Æ\︃C»f¦†N Ó;Wž˜¿r¶j*J!áé1vÙéÌVA’w~bi[²hìÍÁåmç?ÉÚÁj`²(=D¸Gê#63óQßÄ¢´ß¾H~9 }3¿ôÚ“ðMΪ1ÖÊ„CqÕùEáÎ~Úë;qÍ7 |gé‹îjÿr1••+3º}¢¦Ì˜jW˜ºv홨’W]…zÒìÓþ(ßP_Ú¼ÙÊî0w…O ®æL&‚i3êÒÂh_‹‰¬7팶ž#,X¶ä“ÄŒ–PU;älB–"4êd$çó¿æga×€Íâo9?™‡ú°=Lü‘ |îýÚÂ{³^ÇiþugÅWn€?… !Çi˜‘ 1‚\!b'GVê”Óå‡d°Y <Ý£ìýñÙ&èc}±cj\e¶l¸ÊšÚ0GãÝ`ô„ S2n´Ã÷™üÊo¶Âf•œÊ«ÅÌ ´mtÈ!ò–å¡]´co–®Ô½{Ï®ÑéY@cP †iE¿pý±ýÇî†e£(\òÒbîªæõq1¨F…Ç©çø¾6(1&3Òg˜œÔܹ±ô”J?"Œ{ÍhÌQ#-VîHëusüïL8&.OÕ‚ÀøOkÛZà;ç«Bú†¢ þÓW2H¦++×~_v\~>™üž²¶Êï$U*<2¼p›+æJoÆÇ­NÏo’ç L‡Éƒ'û³FOû4Óºú ˜ U7+§8êYfý«Û+<¼}:n·dÃâ7Q÷ >yÇ_Ѩ_ßì¤/Oð¹Ø{&^ 8K´¥ÙÉÚˆ•¿TSœ¾Öà¦NÎàWØÎ»×ëùc@½Ï Us"|/)"‰U7pQ‚ë÷—%¤ãîê7‡±B¤wáNÊ7†‚²¶½åÈw?ºGž"¨#Æ(ÞG~WNéô°œsæ Ž2QÙuì8¶û\šè }—R÷ÛUGAíJCB•H›•Œn«| ‡o©W‚…e¼¬Ó7ZÜÈ|{Š„eWXî1 ïŠiož¶¦ÝkaäïnQÓØ¼æôto’I‡lWµáY%¿Ì­XÛ{Rõ¯BC“¢ßˆ‹·ƒG¿t¿Š¸mϽкµÈ—ª­ï£»L<ç¶N<ͤhäî?ûÃø&ýFy€"6Õ"¢ä“á°IÐÚJðƒÒ–-—µä†x‡l—¥†K]ãþñú¨~>?ÙÑ~¡¤––ã£û &DoUöÀJvz8>)z±ý|¯ð¾7»û‘I¢Ù-–Á­Î(™ÎÉ’¦Š}óü(þ@bE¥Um¤¹¢d{aòGÍóìÏñ`dB$hÕË3Ô Äy\„F¶ùfÃt ª9èI­ŠjnhïöԤ곓ÞËnîW dÙxÈÎ5—“©Ô~˸eÐN¡EÚL‡æZ=E»ý~_îØw)„£5méþS…_÷8‘’ˆÞ3‡õHÿŠþÑÊøbÊ!⻂Z×Õ©Ìüà™¸ðJG§ÉmZò´ 5è‘Ížjž­ªJƒ¶¦Zïï+)tÓç‚'рĖNzÆ^]éëÊ/츻?W™8yÁ†ešq2áŠVnç|M ¹«vÜ©jÅÔ$ˆoÕ•óÏ_Ö]¯–ûÌÌD*LqôØO÷ù,È'ÜOfk›R Ý>¹l:ùû‹S‰, ¼ìjg/D, o¾ƒYW½-ŒÓrÐE¡øßê’Ô¤÷­€X÷Jú-Em´ôgq¹(ѾÇàd‡Î4²ùk. þî¯_¯Q¨m¢Íi1—¶Æ-½‰à¦;ýÆ¡µuN ‡AªÁ˜e‚MM2•L22×µ¯©# º§eº¦Ð"ˆî´b¤¢H”êB(­Aí:6ñcdžUxȗȈ‘-µ4+Yykr u LŒkš¢-¤ÐÑC›°&ðq9d·E¶fr˜S¾n­¦ÜËÞ×ÄMmõJ·ÖÊ }õÐ<탴ü þþÄ×™]EÙ'@qu+¥¶&§DV`âÃFÃ,éî—ïÀ ]ºn6c5îõ–èŽnsúèrHÛ#×lwÛÒ}ØUB–8ƒšþG4+Âßxßå>VWx_s÷V÷ų Æ™òàk“ ºms½8]¾ù{»bæ¼®û>èÌí¡Në#N”ÍA·=ÕR•nôfíkGf©Žãõ l]œ£µ2z¸cÅ(¨!fûa<±<ûmœ·èÐGú+݇G+ÛA\ù-AýÄ¥¬ñÌ 휹өoœ!dy2Up${*CTQ–ÆÈD¦–N|!ÊeS±â Hm”@™­Y uÒX< %ó! 7w¬y‰I¸ŸÍõº5}ÂÚ’°I÷ÙíÊ¥°à¾ö;ùêò-ù]ýdî â¼Ë…ãåxJû‘XgÖÇQ·¦`³}'`å“ 0gÇû€4Úú0y*%/‹{sìà€NŸW ?–͘¸rÛ³5-ÁØq7ê-]Ö'‹ÇÈ;ÖŸ”#Hd/ÏQ–e}ë;kÆÌh›aÛäƒð‚;LÞ]ëµý°ª+´q*J7Jƒ¦¯„CzdzSOR¢C7N/m6ÔÀLbù‡`Å%‘Št2gPkѹ¹HÙÙ{õ¹8¿$r£G›“"åûðêç‡q‹ƒÙ7ªÎúYÎìSâµs,²ÌŸÀ’Q4=­mÜ.K~™ð¹]Öqp‹-_f¸ûŒ(7¿©0奋\¾nÓ*!Ïî±c€[«ú=?{iâNßÊ6éÁ"â¬ædanô&[|ö¢ð»_ùΩIð¤]Auá×ÞiŠf-½;¥«/ŸrÞ D–¼yÂä2ÜÀàæÜÌÕ²H˜c»aCVGÚ^ž oÐÔNBÚgÒJãù@©z3[pL€XƒIÌ_æödY^Ôúø½i¿G’Dò%ÇmÝœùé9·ÃŤ8å °·éœ«žòøIdo6¤cXŠàY¢rFH’ƒüqÿ„Eáù´A²?Ýéè‚gkœ»¤\¡4ÛÝnç³æ6)‰ñZ-Ÿó ZÏŠÒ³ƒô”ÀÝ>GNóÖÍkìçŒê^¿bD¾œ9¯‘¬ÝSв‹ºñäq»RO& I>Z WüͤtëšNÝÔH\X,‡£b…—ÞX…± 1BöUmjÑH9.Õ„þ("ãëc$k=7‹hpµUõ¢Cq¶›huú,#j, ÑtUqž£vˆº”ì5€Ž×öÐÌCcQ•r…yˆ¦gº±¡Iú$}h£u> F#Û79£4³ô’OÏEg5å>ÌëÐO ˆ‹Ò ”¡‹Ý¦Ò u&,2±Á¢%ú ÚX0Ïݤ#»ÐƒXYÅ ¨5™?À‰ÎáÝ yÀ­ÛüG=rAû ¢(¦¬V)‚xŽ»•×ÓÏÀ•œ¾õ:t1”Ïc̵F4È:ùgç©“Á}â½W2,Þ…Tù.æªç‘zÉrÅ<‹ù`Ïo׸ûþÿ/ðBÀƒ`°h$sð/ð½âendstream endobj 69 0 obj << /Filter /FlateDecode /Length1 967 /Length2 3772 /Length3 532 /Length 4438 >> stream xÚí“y<”ûÛÇíË´XŠ$q˱o3${²ƒ}iŒ{ÌbûVdЬ£d'Ù…Jʱd'²¥H–&Ê’cùMçw:Ïùýù<=¯ç¾ÿ¹ß×u}?ßÏ÷º¾·¨°…•¬¶;Î 4ÀaI²09˜  ·„)09(DTT—"HhVAÕ˜ª* Ð&{ PvNMQE ªtqø@ÚÓHèJþ(R´1 D`8‚ä bhH„`…C¢AR  íãXþXA,A"HðÝå 0àŽF’7Ð…Èÿ0dŒEáå?Ãîdü¿S~ H3HÐLJ4‹î8¬O ࢠòf8Ú^ ÍÉÿ†©Š}|̘ò?šôiíø_8 žL ç°ÿ,µÿôÝÑdÌ?³Æ$„©õðèŸ!4Ѻ[ IHO…ð!‚?ã ÖýŸ&h}ûiAÞÎÄÈ\ï¢ôŸóü™³@ ±$ë@ü_ª?Š2ìÓºC@ŽP9(F+¤½ÿþrþÇ^úX$Î¥]¥s‚@@Bh7ƒFJ@0 @cÝÁ  –—ÃâH´%­'¡ G€üçÙ³€<‡Á ~„ÿŒ¨òA ÷+  Èã°à_¬äIþ¿òJ0{À_Ê´ˆá/Räᑊ oùÑ®„ü¯­aPš´ûߦþ y¿¡ þ…0ÚICškÿ_¨@“ ü‰ÿ9*\@°¬âY@VvU@Y úßêdÄ’~þ´yÿ›QhÚíÁ Å!Õ£¼Òë¢KÃôóû˘¥èu<êoš=~:Ôr(òÍ-zŸ¢._©éj»YÇŽ.0/ùïž"Æ? ¹Ôk@½ê{3cdÁïÊBVPã)»õ,xà<þÌVä2GsÝð·%zóÁéîÒTû‚Žœ//—ï[HèYd¦{æèWó,;ꜲA–Ïå˜ëÕbŠÂ–œ¾¥±gc¦ü9So3MõE†yŰ5H¿]Ã=!sïä³å¦ì;}û4xÁ‘—2rÎûzdÌsãã‰HH4’>+”˜Ÿ”ÝØ´»SA‘×êW¬»™Ö¤¤ Îé7ˆÜézÇR“&wmrøzw®©¿‚\õBqqò¤™ý3 o¢9{KNfÝf [k¶Òdw•þÁ¸bmÈb~ĥ1cs";fC}Õ¼V3“¦ uEgKÎ0æÆ‰LÍB·ªý§2™¸()™<ñ/ÞYôîÊ Vo/×ô°K•±&¾s3¼¯è0¤0Qj1Èx§XÅTñ€°”Ïïç‡Eºê¿xêY¶è¡šÌxõQH#\ò€=Ôþ 1p§Ôèý›Ø5ÁÛw 1Â1ŽÚÉA¼÷íðí¯N™é^nV^¿Œí룚ø~89{çô¤’g‰éï"¾Åq üœjügñ{E+[7µ<>bxñ¦U4–»à‚VhŠ)>½ôˆ>%nΓÁþ"C0ºš*WÍõèHÛÆ“WÀ·9¿vÅlXÝRó£Ý£^ìÍÊ¡PDÞ!ÁƘ´†+a+OЩúK%¨ö$ÆÕPóóo»NŠImyTöµƒþ®»Ã%hÚ›ODn=¯w©´ƒ>268zDZ10vÍ¢‘O.㮿EÇ©ͯ~×´¢¾‘bµ´¼LÂýn–Å-®!²¦.ÞfŸŽ7—°E=Å–Ø#³o÷ôvmd H7ŒÄv‘áܶm[ÞVðo)ø”2j ]ŽB^–)ïˆÃ¡ÇŒdŒÆ¾/jæ «Àz0pÚÔ1^>ûñó€ÙºY”A‘÷‰‹óѯØc²É÷Ëö®–\­¿«0´$ÖÍ<çX2ÎübómðhÚôû„j¢ù~IC’3f+_m"ú‚¨:¦Q—¢Øiõ‹©¶ãi|§âÉî½RE¨þlR MìNxH" ïTtS¯F<¡ãx½ÂwŠ¿½‚5øêÚæÀQ5¨t0ä³:*©EîKzÓj¸¯<û0x>*Æâí¡åæÒlV ³Í JL¯ÎR¼õ”2rÜÓRÝ8Yô{µ: ϧ‘BÁWÕJSàê5«â/UŸSméj]ÚÔl>cÞÔDD=RÈ\q›îØQcÊ5-©Ç7ý…T´°ê̹ÿ°Õ–ÌøÀ6—³0ÆCL‰êåžbŸín¸W²,°©M°ˆŒÚ6O2—FÏV›ÃÙl÷’MÔ»»,>rKÊbHçœæ!%¹­¾óS‰‚á[>€ ÇÑÚ‹læ> N L_Y®ó³Ô•L7_Ú(.Gö³,†M”ÌŸœŽ)Úl­Ê³ ×7ý-äÿzüb•ˆP #Qg¥“áta¸€›1 ç¯ÄT¢i׆Ë) 'Y§ÌxWÚ“ÛÃÏ.Ö$®´eÒÓ]9wäVü‡ÍÖ½ÑêÏ`KÜÅÒzÞð^‡Œýæûu#k¹N³[ô»Î½M Í»59iq¬³Æ'ÙøÒGµ„|Tsc¡¾»£U¡âÓwLݨWJÙD¶®élÈF|8ƒ¹â|f+eºUaÒ_棫Ò8ßÐ¾Ž®ðcóè¦=a-ŒxVï ôíFžcDf k%Ï ý%Qìpøûޏm_ó“"{¬Ãà—ƚ7*;—¯!vPB*?Ä;gyöI)N—9®¦»çJwqc™s2·×¨<£¹3ù޳ݪ‡MÌŸˆ=›ót‚w¹†ö·&ƒªä9Ÿ;å¦ò4âõÓëï$·†5¶öu>ê«xZÆ#°jöÉU¦<ž—éL¸µÿѪ…ÛNg󃓊Ë$± Lʪ÷SF\ô¹öyù.z/¬½¤Î¥xGúp˜h߯fÄÇÒèÕBØ+€ø)‹±£Ý!E$CLÙ§¶m øHî®l÷pÍÞtÉ$ùîàKiHÿäð7!Ñ€ùÞ½kùTÇ•›Öa=jÛiBïÇý1QX•âeS·Úê¬JXÐç—ªLŽm²\^"Q¼_´±V(›¯ª? ’lD–ŠRt×ì5Ì·øÅ'J‘n5ÌåÙœ¡ÕQ¯y{qHƒzA"mFʺ»«Lm4N7˜¹ ¡Î¤Ì›gKí_ò‹d'¸Ÿ—YK>Žtt–u =´!–Ü 9åñ–7¬4ݽ]´Éä4¶­¶­Á×êK‰yE¢0y7ØiX—`R­µ÷‹ÃÚ[J¡goÞ¼†7ïX…Ø`°ñùbb‚€5"ÊR¸Q7@w¹'\o(7ô­RP.0²Ãâ½;M¼ÞÔ–e@NéÃtý¡tAèÛšanϱ¼/KÙm¤„=£§í™IÓf½ÌhWi®}s:í—ÕJ$ÃäÏI`ÛùXy±¯LùŸè¾¾÷^µLb'`.S—åÌ>Ïä«Y¤Ë#"r=Þ·)Ö?,ë¸BîŶ$  -¶/šfçpíö«Š63»e-Fêg¤ÄO|+ î“P_–A#Gäˆ}LÚWØÔ¨Hµ™…÷ñ󂪟Å|z_ÞBä¯J9„H'ž÷›Yfp•A»1.fºñ!:Pâ¦ê±üö¦æ§_7ÒIU­aá=–¤3ÑwϽ2tH‰Òß,°Fý½0²Æ68ü] ãžÕçµ=cú,*\§bññšº×åÉ\•x’æùÇÆ.·Íí£ç¨×"UòJµóå8¬œú\ôÜ&Y•žO~_VMÏÌË]ô‹šE†m‰ß}¬‰ël8Íͯ:Ùë\0öZ^,†«“ öv‡œ½†óU°BWèÂ-wÓ­F+„êxš¿Ûò<ÌÓPâeß4DzÏP¹ŒSMRßѽmœ¹úH¨›êˆR<ïÐÅgÉØX._/…Å×ÃÅÏmðˆÆ]ôýí{ Ó†%¶´?´ :xNeÖ“ÏzqïDÌ-®bºØLÖêP1d¹@]w<ßΈ*—þ J[*¦ZÉœ]¨Ü’­œUr>èºå¨¬€4ÿ—ùö‰™¶çã!Oýog«µ|Yò› ;L?ÎXv­ÓÐç^v2£³Ù¾u uEðºÀHíÞ‚ˆÑ¡á¾Zö^v­Á5%e¹_Ù&•Àû±ÈÞÓ“’±Ïú@'zÔ—ðÓõ†Ñ‘8±^üaÍŒ8CãQ™NrUü}¸søÀ²–sLQ ™q¨>š´ÔÕŽ51Ÿ¹ašÑΫù6a]Ý … ”½~Ô.È>S‚-ÞMnL4Æ@rÆ/À-b~/w†Øóµn-„wuNµÓEÙ[|¡Ïñ«Ù%¶oWî2ÊXhÒìÄi¡G¶Ô|‚‘ž›:sC‰eŸ•È(ÕËÖ.’“#÷Hãºò¯ùD 1¾Ñ»û‚ŽMˆý\ûÙ ¯·Ü|rOb œ½çäaaáÉ“‚õÉŸ­Ì´.Þ²ƒIS­"9ýá)^G¢;k÷Ÿ[ §gK+$ÃüXS"¢©ƒq¾·ð›wª”,‰9»< Nóg“YVíÏjR—±+X—¾¡V¶8[·ƒZ=©“ANí]~mî ཆð.™ãU}nÉlñ;6W×ì1iù—ŸŒ÷a\®:&A;ÁQº¼/$æÂT¦3½ÞIt ¼{y0ѺK¾Ä¾!Q˜#~,Ÿµw<˜ŠÉ‚•ÈŠF(7ug×Õ—ZVöo•Ä~²6þ½Äfñãë"u£»T=ŽÏ·ÄŒ>Îb²Üµ]‡©E3E‡¶8.”¼ŠÉÀØ`mb‹uœ¨’¤ qÞ÷¢e.Ž»iž§…gHÏà gy[-¡aåA ˜E»SœHÉWUÔ*M×Cìë+ŸÊªëÖ_N'ƒ‰}ÂUæa•Ž•„7æ&‘{£‹ÉùËv’”Qxw• rÅ­ä®Öîˆ?¿†Œ&CÞ’Yäœ*çŠÅCÉ×Ã/Þ1AšçªàÂa­ð¦´ÕžÈ¸ Ô»¯sÃè«Ê%¥†¾|fzeç豿ÔA?ùï~/NIä ¥M8'–jj¢?^À‡¯ío _“1}NâÚê)ÐL•é~ò¤|’ÜYÌ…B„ XÖüR+×0>vꥡÏÎtšÙªÑú?| ÿ/ðBé"$Að†ü ¨çÈendstream endobj 70 0 obj << /Filter /FlateDecode /Length1 2087 /Length2 15504 /Length3 532 /Length 16646 >> stream xÚí¸UTͶµ‹»»3qww‡àî.w îÜ=¸»»&¸»kð`Á=ðó­½÷JÖþ/Ϲ:íÆ3j¼Õ{õ@A¢¤Ê bæ`”t°we`adáˆÉ«°0X™™Eá((ĜƮVöâÆ®@^ +@hòñÃÇ/;/+@ÌÁÑÓÙÊÂÒ@-FóO@Äèlejl7vµÚ}Ìajl Pu0µºz2Dlm*ÿ<áPºÝfŒp,,3+SW€ ÐÂÊŽéKÒöæ®ÿ ›¹9þÏ;ÐÙåÀú_6i&Íìm=f@s8&‡5à‡—ÿ7lýïÉ%ÝlmŒíþ™þŸBý_ÃÆvV¶žÿà`çèæ tÈ;˜íÿwª&ð¿¼ÉͬÜìþ÷¨´«±­•©ˆ½…-Àü_!+I+ ™’•«©%ÀÜØÖø¯8ÐÞì›ø¨Ü¿,0©+ªéhŠÐýמþkLÉØÊÞUÍÓñß³þ“ü/fùÃÕq¶òè2”—å#ñãó~Òÿ_Zö¦fVöVN€±³³±'ÜG÷|À›`eoô=> 31Ú;¸~<ø¨‰/ÀÜÁîŸ e0‰m]ÿ‰þ+À `R²´ú7ò| Ëüh&só?Èú²} ídÿ'ù¯aŽø¯qN“™ƒ«-Ðå/¶‡œÜ\f&¶ÿÕ<ÿá09MöÅxþ;û?SÙ™?Rö¶@ó¿¢,ÿý_É®mÝ\þ>,˜:ØÙý) û‡sKOGK ýŸç¿ÜX9˜ý }ÔÎ èìð'ðáÎÁøoæø°åúùÏ8LJ!WKgà_ÿÔÁÍùOàŸ’Z¹ÿ•ñaÎå£7þÍÖ\€î9ûè&à¬ã£röVáþg…¶=ÄóÏ4vVÿåü0tr3þ³gœr"èCJô}Ȉý¡ ñ?ô1¿Ä¿‰ë£’èCEê}”àÓúX¿ôúP—ýCêrèC]þ}¨+ü¡uÅ?-þ¡®ô‡>ôTþЇžêú(¶ÚúPWÿCêèC]óÏ©ùX‘‰³±© Ðõ?:ðãíýïæþ϶?ðŸÍÉóO/Z9›ºÙ™ÛÿìúÇ«”éO{ò|,Öä},ÖôÏ!cþX­Ù_øÏ–þ…ÿ4Ü_øáÃâ/üP·ü ? ð×qfþ¨€õ_øáÉæ/ü0õ×Ygþpe÷×{äÕý_øáÊá/üpåø~¸rú ?\9ÿ…ÿœ€¿ðÕë_øáÊí/üpåþ~¸úü×ëìÕÇ_øáÊó/üpåõ~ÌlftºXý—üÿ} ˆŠ:xx3°~œóoÌÿ쀇“Ç÷?2MÝœ?ÞI®ÿºc?î’ÿas«›ôšÂ­.9˜ò…X§µ„•ûIÎT@Ò‚ŠZ´Æ+4öÍwï$€Ú–ŒÉ:Ñn5h=Wf¢#B~Åw‰ìòQž”ü蟾øvènt˜éÕŽ¯u)ïyàHú|ŠÜÙ²p{ƪ8·5^ž¬]4˜}9tš¯D-®v½AòM×½é[V'—–d¦­zxh%‰ JºSùöðÍÏ(ɉ›ÓÁ~Öá0mtëW]nhÏ…0yIoz˜q¨¦]Äl*»´*GBgeEá(Èõï#µÂY UgÀ!äŒÊ˜tpzXm¶ž„áì%ö”Äy÷_ôí]Z$¿)8õ·Jö Ùè -‹dVŠ™/ ªb¨ž`¼ÞN©<]²Iþä;¿|“\óc4ôht Iۭɰu3³f0¼Xn¿ªã‰øÁ‘: %¿{» gê‹ø–¸ÀYí¡<[‹Ò¼ú)šÀ# ñ+ýÌÁÎi&’“PÁvf"оú!ëÉX»£§£4­‚*ÿ:'¯æÓK¶‰êaº%¬RZ­Ú"Rธì2ÅaŽ/)”ŸG†àm¬æÄ,pXU†¤ „ åî~ØÙD›I^Õ%X7æíÅ÷V¦xÎ3ñb§¦ÛÐ5c€•f]™¬n»p CþK3ˤeƒ·6^h(ˆÚ™3–OÔujŒ³6ÎTò¾Ø°ó9 ½ì2hŒS~©\fò9HRñ PóvÇ Eªv~VÚVc‰ŠœÎ•zx…!­¼­ëÁZÀü»°ÙM‡»ù(¶òT^Æë#N¶SÛúmŠRÇÉ׫vŸÞá‰T騉¤(‘“9îµ«ÉØúÍj‘ªgjæIumu6âEG]mëÑr`\ÖC7<ÆÚ¤*ìv/ÿS{öžCRÐÉ"GÅ_QÂá³,íãÛÍÊSTƲK‡xã4f ºQápþ¤‡€`±ÌÔÀ-ËZNÜ|wéa8̦ê`‚}nú5hôta%žqλ¿¦jÜg „%ês…]ë¼r|\ªÕçîèèßÝ·3Zâ ¾¾ 1'>=ñŒ¸²Þ…“ð·j¯%ð™A›fʹ9ân;]¼$F4~dû[¼JÛ;|eÁ^é…Uz„¤ 8.P8¹ˆøÆ9u¢ÓBû¨˜o¤õb´6åe%®c †?jú9"ò…ìzjk­ZU»Ë…±«~¾sB½Ù”j0—.q~{]Or°yâ´ÃÄóL¦$ÐTÍ 43´@ÅhÞ½ï‚)[†»pÁpº¨fިʋ ô_OŽÿÆ¿D*÷>iR´W$œ'±=V㪠/0‡¤Ú)!_?;?øk½j…Û{ÑkN£Š"!k¨”G‘66&ýˆ²6õª¨â”ºÐªÉ(‡ó2¤n¡vÞÄŸ˜æTÎØ3;Òr(©Ü |·B½Ïšš§g ßÉ’ ðt¡!²×øÒj«ÄÒ¿ó«|•R¡ãMᥠk*§ø$ÈGgþ6ù —Ð,œ2«øw“%¯=ç™/p½Òç!Þ -¥i  ùÊÿû ¹‘ŠóÒAàzº¤³{Õœ$¨Ök|çò‘ùë÷—Ѧz±»ÎÜ‚~ eƒ>ó46Óe£˜†‡ 6°ã;å¦ööLhe12f9tÆD%ñHÈ©%Ék‚¬šüH¼Ði\†;{w嬹ÀKƵKIf`ímób+•{sÛ3ÜQe=ü'RœX©)o·¯¼Å GþTùéR · ‡²eì±)mÜ#Ì“Ïñ Wl{ú›j˜ñÏ'tø‚„‹ø`Ä}[I"ñ{"½ìß [ãUçñ.ëø¹8 ÃQ{ÛLÉRŸÌŒ°kŸÛÏ…á¡ÒÎô ÅÀÓÜFBY<3«lWAÕÇ%IÅÙBœôMõ„}Þmk”öi.%\©_WsPᜄp‘[)©ÌDlñFXÌ› 87ºÃ×᫄6ïñ,Š/2/»´ÐÎBö ú&ƒ‰äŽ|H°¸Átàh[šß÷EÐ\±µ™(›Ù:Kë¹­¼º Ô¢4tó¬¹ÍP$ \OEÕý†"“â[/1ï]°T×[œN°úÍ®•ȲïoEœÉf¡eà¶<”ŠÕ\©Sn.µK¢?ïÒÎàÌÚÃÔœTg0µ$³tã Âà k-}íÅUÔ9£^4 ÁZ’€¥Œ"sËp¯Xïà^I×XαFXgÆó=‹¾å5£‹‡yŸ=:—{3oð àÄÖ‘ '{Šä÷´þ úʃó: 3Fö\—$Ô.·öÖAƒM›Îyмæì&JO{Ÿ[Š ¥@çH ¡4³Žèݾâø7Dí®-\Q|ÝÙ•ùí¶×¤ËêŸ ¥"Þ.ø*ÅÍ7áGHÌ8˜‡Æµ RöhŒýÀÓkUý°¥Ê OFIÑwšˆîwoXÂ<*׉Úc¶}×/‚´ÒÁA‹Œ®­SéK~Ð › 5)©CÄÍC^»h‰ÞÐø…õz±/‡Ô¿WÜóÝÖ‘`EûìyìåÏ:Öyi¢Æñ‰$T8Kä6ËçaŸ~þþ ãÂ"÷õ©‘¯˜@@d9¯òq¥}#W•G•´U–…ÅsþtÈèõÉót÷bqC=ÀŸhûÓ®Ôx5"gõ”pÛA"Œx¦©Z\}©ßÙüp‡V)T¾6m©4WÑÄ% ýÉ›Nxý“=CZ*¥Ñê£À¥þïª5î3s(^‚»WbÆd™ÉË…œË@C?s]]Ø…ýUÉ΀¯(C Y$Gσû4–·@nD‚KüsÚ¯Þg¾K÷ -àÍæ^gÅäLÍ3ó l—z‡²é·Áq âBš¦X½; \ú4XFsþ[$wsåžõ$ï6±ÜOË ¦µì.PŸ* ,?õ€VÞÎqè·aªt4eÕ`s9b)z£s3¨$ |•í\¥!d5¶# ?mî9Ðoo6‘@AANað &3‚©N”YR•Ž’QAƒ6¸g’Qæ€K¶‰®wʼn¯}#MÑSùþÈ …šÒâ»òä‘à$·ÙwŠyÉÕš>‚Ï”xT/NªHÿKM9_5ú<ÄŽ«â6v±—¦òÅнy·-·|Ú"½ÎÓž…í‘Û ‰YŸªeÍ3U'Ìõ«MJv¼Ú³u­Hîà) í,üõ¨%†»íc†ˆ6‚-Í+>²M0ÚjWΔã9?xÎ0 ¦­¢÷l°î–Y9ZCb—HçåAß[N…úD^]Uºáòì2]¯W~éŒ?!z·ŒT¿"Õ|W˜øìVÚ…‘¢Í%ó0Æèô^@Mâý˜Çëy|Ûú§†VÁËïýÑüÕXE­÷€;=C´‡Ù±xf;ŒÞüzšæíµàU Q«CPµn]¡ÐY¯a vòU «ŠLÛ|ÝÜø<€µ¤¯p—l’\´ÖÂªÒæª­Ü8ïa‰9D3µê×øà(‹Dq4A¥bHE½Šu„P¿¨ogÑ{®Ê©hðVoÆ‚g cŸîAÖ K¡³ù…ó·k‡Œ¾àž[+&˜¼¨ÜÎC#]æÆ®y›˜b1’â1" µç>ð$Ôšt÷~×+éÂøQXæa!ÛÑ(àr·HÐ^«Gà š ‹ù[žœ‘ÊÌhTœÐò%Î 6h ‹ô=yø®Ó;Ø„§û¨[¯¢¿ï{/5ö"ÚSæz£ÐiË‘µ\èךܼ'dôņ°ùO(,§[·G0á9…¥!cÔÊ+®ç°ayUN8É•öVI»Ñ”Ô™Of÷ºdÆ`â °y¢p‹H˜Ô)>À'õâ@~C-bÏ€ÕÜç\ÆòÝœ¸,\Øpìºi¤$Š.’ˆÆNg;5ÄžÀ'~^‹ ÁF6Ï›K©Á>ª3´·Ñڳž1—z<ü­y ³©X2¡^©:˜`[&ýÉ7cÓ½‹<émÊ-‚¢1 9}ñê%€åWí*J¼çÖA‰œòñYB¸hOÓÇß–£Š+_ð‹òÓx­óº†caç¾M Üyxײ´g?,†i'ÒSL¿.¦tŠ)+7ÿ13éµ&‡¸X#gçÅ/¨‡Ç&êXqÌv´2rÖ$p,‹3D­–Nå¿5ÀU#陋ú-¾¡Ô\ÛüÙhø1ÊÒK¼+(TÙyµÃ’OŤ:¨¢-äÅúŒ¯,±W)i§Ž>ÅçóôÙü)ªveŒõ.]idR±í´;(êBõ¨6螈ê9᪗.qÎ6U| ys‰ëPu†âÁJˆ\NG™YíÂÏ+C;5>È<¹ %|÷\Ûn3`(EFÌÚþ¨ÃwÈ«r›È‘ä¶ýÇûïlí1ÛgŸlãÆ¦2«m/íä]mM8rªClêwúNâÏC'pµ´T”ìÝùä¬àKŠ`]+X©Øðé£gs­Ø öA–Z~s‘ƵUl§húLÁ#\Riæ«=DÉ“ÎI#3ÖiߢCü•/ÿ Õ¯XõbOœƒhM$ áX°¨ç€hXЗßGïU´ÁI³Ps±>Ù-o¤oè;+ÿ%¿ï &HVÌ¥ÞòÙÎ Ô:HØ|ò¤ ôû(+QèùøÛ啃NO;n²ªÑØÚ\¸»ˆà£€i4šO-p¯o×v úu$Ú”“ÚUh9»„. ¢‘ÇËÒß'6&ø©Â™,µ< éáÝ#ÕnFmVÄÜ[W¬²¹ÎA‚üÎ¥÷Ç¢I® ¿šŸ9yó½ÆšÍiÒFExj Zø.Cà™oü¦“/d ò$јޙ§+4¯qÇH5z70Éö‹'2p½ùôËy±Q „ …«8—°¬ÐYËN÷J£$d ãØÀb»w^MÆÔú¬,ŽU[êk"J¿ÅªNñºìÚoo£ŒWvûšžzwK¡4`•xå×¾Ç4rÖy‹KžùAyf–ýº ¶ß^ne|2%µo» !ˆ³#m#j¯}è¹òw«ÝY‡ºÄ ¡./ŒúMÂu¹z}#i•ˆˆ:Ö³9HzÊèN>{’ ÓGT1DG&+ç­ÌjAÏF>Rm„Â>š-i?éÈ·±"W£’†\EiºymÿfçtÂ[Mù˜ÓvRy¶ÛùŒ9N­%f‚ÇÕµöqm·›—j0Z l=WÀ·•p&h«äróØi8{ ÒF0”Œ½8:7kU4s*ÊDЯ=(A&m$ò3kôÞú¯]Ât\Ø"ªa#‰‹Û¦!± ¾U¤h JJÿHvÌlÖhv께â¥êŒmf8Ìp€iö²G Œ-$Wgëþ™"y¨­Œ‚ÏhbIRZíÊblzù†Ý¢´‘ããOø¦V&ˆ1a’NÄôJŽo/+…Æ;Û™$¤p¶cNl²xÖ(ËõSÁÆÃ‘Ê /YÝV>óiÏ펧o«¡°6}ÒµTzܽöà§ÊßÇùc"1ãØ[¾Hø›Þ%,‡‡G!t?’t¢áÕ6çOª®·ˆY–Ç/ÐyÚu1ØóK•¤ÉÚL÷Š®vß#äôhÈnK#W%—ÀMMcb¯Co. îšße%ÆW&.h©"±W-· ZjaYe÷Û ¿%â寯Õ‡äoܪ†Ê+룧‘•j'(à:r> ø4iÅ=Du=ϳ®[\ :O¥˜Äù4¾°&ºÊótos[©Ü™È¯^üTëä¡Ó e†z`‘@UÉÔ¨ÜLkƒ5ñ"Ž_ã)©qÚ/GPäˆõà ٳS¼@Ⱦü¥ ‰®7ß7‡zõ«žq®:¿b˜ }Ö+•0îì› ²Ü®üs:³rhÛp@éE/°htˆýÌA‹X/Ñÿ¨ÿØ`æ¸ýôÌ„ÏÃi&{?&Ó¿Üi~?å MèBžãðºèþLEf†öܼ°–Ȫá_=ëtíÍ®["äQQz+°ë¬ò-¡~ ñ)Ä^ePÐnJnøÀo©^Á¬kbüQá\XŸšp›Ä,¦Éz·|8NäçÕwE|D¸¯AØ—èÛƒð < ÙŸp`¦;œ,6'vü~‘ï¦,ÅSŸçs"qº;†Û|B¶ü†b2Ý~%æë»Òõ˜g5sãæûY¬ü±ÿà šôuO…³#ÿwiª—V¾ §ÔŠ”‘]b#U¦Ú^”Xº[“ßÎLI„ªa|¥jB©ðÉ0ÜÎÓýÄ’äQœëÕh›‹[ˆwçxÙä>êMiï™?ôvþÁ™ÐÛ[/YóÆÏöÔìPY_ˆÈ¼'6Úé•ò3ëŽ_Á$ªý04 Ó†ÄS7€ÛÏQ±ã\P-j4'ÇJóYsmù=B]ö/zpߘÓfkŸWÖ-Î[ 7·º7.¾„\]ÑÁ"Õ¿8ZÉwCœ •3Òl¼üè~%~Õ‹Ê1ÇÍ„‰ÐYÌ¿jÎF ‰(Ö?Úç%݈ ™ÂÉ4J¸*Îû¹õÖ'¦3ùG,ÎÚ43ñ©Š›R\7&§3©Ð AžÁ¢A•6ùÅ¥äj…ó3UxwQ?e(àÍìðàœp*ôÓzÍf)`N-G¢Ðyöy*<î¢Ï¹Þ— t|¥Û$Ä0qš#pe͘iž‹!^‚`X BèÁ†É̘,½Ôá?yµû=¤:¹jbT-ƒ#Yí*µ[ÒXrzŸVƨ¥»Gê~-EÒ2R$³§ì„ %òÁ/n^pèåHÂ;©.å|åÓwzl=hj^{ÓF[pФôŽ’îƒîÆ¡ÒôAG2/ ÄRÀ:\ƒhX‡žïá*{pIÂ/C¨^@̧å, â«|qô¼×ªV=×{§u2WR¸$¹ ŠP° ãɃŽùÚgQ¸ÀÄ^~ìcå_OX–xN+O¢á.¯ %RÓW*ƒf1kàc.¹sY”Ž÷…+ aäîQUŸ$je•rT‰íS|Ž1¬«­g0TéÍÝA¬Ôí¡‰À$wI.ù´eY²PŠŒªÒ9ºúD»£âa$óé0¥‰‰•aÛvÂùÊ úü€ùrζQo5¥ž¤Òƒ;^Gý"ƒÔ »D)ÇRyª_Áƒ©áï|\-÷×ø ß>ö+ˆV‡¿¨KÓ“ñÍ}%–$ÙËQÄïY SV’×VÏúËÁàΕA÷rËgQ>Y>У_†‘_'O¹)zÕ˜_õ³wo²+"q3g2nj>@ObY—d¯³ã÷Ϫ °-°èY§¹XÃÞ·Õk©gŒ­ŽvC"ÓÂøŒMÀA—eë ›§,SÔË’ :äP•l„¦ÊÉ÷Æg‰ #8¾ŠAøGÅÙ¤Õb!†j€ÐQh "v‰Å© Ú"Ø6FSrÅžyðPþ 98¢‘£ˆ¯U‡Qþ©|pœ*ŽjIê¿¦ŽŒëlP+X™%Ì"”{¦1ñ¨~ {~ŒóÞ¥}Bë¸ I¹ÒåÐwÓç—&^ôü2‡„š"@°õý1»,)#ÖHV®\`1hAˆÎ›˜-1*ð+A‘<'QäZ¹òr¼ˆAª°< Ïqb¤èhRÁÞŒòûFþCšÒe‰.ÿf[´M{Ý|n×’¸Ë/?uzÝÔÌ;b!Ú(þ½ï½Ëm4ƒM~'ëP¹ö& Œˆ1Œ âû›?/…縥—“ð¾~¾qáL©©îî’ª›ÍWæ3P ôìùçWë‰nDždñô! U4ýNDZæVF¢W¶^ä7çΊq+ê…5Õ7ð<(û*ÒFUí³c]Ü-¬ÙÀ>[Ò6y¥¸)Ûæ]f´Æ³¦©ˆIGª–|³>ËÑýêÿ•Îõ¢]¯×¾z€P‹³UP¥L.^Õæ¡›xÜåVme]vê§¿ò-¥Å ÑáÊLˆ¸Ç9\פ¤E èäÔÞ`I/©ÅüN1pà³±&3lT¬u+D‰“_—FbC@CÊo-Ṵ̂Ã9¯Ì ¸²%t=û¥q'íîOn6A‰Ó™iöbš9‘búí—ç*YŸlÊðÙö~òGÐFàFâÔ¢úí­íš íïÜâ)úä;ÍT}$x¬‹²íÒP}L(0l‰Mgú[c¾ r57a M™_7å„P#$+ó™Üã0è¥C? xºÕab7š%b“h¾‰·U(Y½Ü¾&Åt6anp,Š#Ió õ|ñ•`Cg#.û ž 8.AI/¨r@²„ÕeôÖ×VW}z]„퇺}ê@kV8ÐoÊ7)!~.˜æR>d­ó]ž›kgÌSSF`Ô&LÀ yS/f‡îgríÄÍ/7(ÂHÄDJ¨ì©¬¦ÐÊïB;ã9,ò¥÷>¸Ø¼<=g˜7Š?d¯õó~jÁò0ÒS›c¹Å¶Z©§Ä0–r¼5¼3¶íž!þŠ*+–-¢r{smWû“é½è8X°]ÉKÿÀ°ý ƒï.EPP¹<Ÿ4÷ä X«bgx=šGS„®¹ÙcÓ­éÒÍ£jå :‘M*X==)§äVì´è,¤±Ä3¿û’¿¸Õdoè>)ÃäX$ëWNA¢þϡ볓ÔEävU÷lÒneü 8í}ºj÷òŠvŒ:‚‡çñí·&§f‰‡ÙÐÝJÕ;“Žw&PA'ÃלC K ~RQÇ sE¼_¹J‡ä^¹í¥ g¬|…–&ÀŸF¾óȱ†º[û0ÃÚ¿J°,ÇofÕp¬×²Â~Sø+ö µÜÿ„K²ÐܧâšÇÂ9µ[›ÌöÆzæ_w‹[Ö×= Û茀ÞÓ•À¨£lml!I4ûš&ù8•”åCöÕü&¸Òv´›¢Â{Ú2ó¬ÀsOI'»þ»E/äÞ§Ž×3¦*‚ªÃWCÇìïÇ'߸Ë< (=ù¿ï*¥­‡­ª4,†ÎƒbGr¥ Óx#zÈq}i¹+xïMõ‚D:Ml»6È7@fµh«Å¼S}·èÖ«‡¼è FÊ|Vި嘻wR¨¶äZÃn Ç'\`­~j'õóD-™P–ÀlÂo$Ìžf5›láÖ±J߬„NÞÅšêﱞ*M:’t©¯Y;Q8gD³~ùâ·Y=þœ½¦¶•ÈpvÑú$ÆiO°¢/IN¢‘¤þÈ-­„¾ ²VšmJÁ÷^r±?·†ªÝ°F/[ƒ´_ÃxeâÏ ¶•³£ÓÉ× ˜ìƒ@Ü/hG¨Ü£;žK¢{zï]»u¹h–”[¿»_¾– QIŒ4v¦ZzMH^ ˆ˜ž:H_«cx×ôKè(àNà.}9°¬+y?4 X‹fsI„6UcpÎÕ*Ki`x¦BQÅ~O6¥\••¡bôø\­##oC2BñÊb#;ªÜFy8ù"*..î-´äûhÝ¢(;¦Œ}]ÒíÌÓ€éá¤ë¾~Iô­2‹‚Îö'd¤TÔ¡ê•$Mãóz›p¡Ó[†š¾‰—¯@Yg±ÏéÀF5ö7CäfOè&zÌnƒx,Z0vüõ73'™IÔùr¢ ý6 l‰GŠD°ޱ¡9,ð~ƒ3bO!·^üë»dö1ªzw˜zàõqy³‹Ï¸Ê;¶GŒ‘äÊF¼¬4ºÝ3ﳫx^ ¸ýô¶H CìÂu¶}êSÛvò˜Ó£Bk·Ø,ô!6F¨à 3T‰ÄÞÆCébG*,59ä R]ñz¿]qøP:Ľjý»…H¨ÊGöDöŽzª'iY@„$L³B™ßj;ýTƒü¼ì”¸ë!mºŸ-õ“ú¾tmûLSÙÊ*CN*&Njï{Q¾ž±+ÂR¨™Ô:OЦ\×}>åB”kæiˆs?#R¤°rµ+ËïòrÝè<½S ´ÀnwÊ<ÝTÊw1¢Q‡_åÊÌí ²Ý7þtj’… ü{ðZƒ7h\NÝfO7ÈòϲëÑ¢ˆÏà¤Ñ–'sBí£“Ôd¾p>_ȈösÚUÞ«;êбLä×7÷6½Vx¤ö#ÿ÷·Ðþ"êúáqÞ|—øÒm†x%ctS|,·0~« Ë!v›—Ä‘÷iÜ:°yÏâÐÿð-€Ç÷È»zS_ŸØ óéj•Gê¶\.Ì% [ØÔð!bô°û°T-¦ˆ¥æÆ¹%Š·7½D²¼—§™èÖT0/JÅ@ñAÊ£aàw]vsž ]1b˜Û$Ðr9˜:ŸÊþ,tÅJ‚¶¾ ØoÜ*ÓÝM}yf‘CËM­ç¾a@;û n½€Â/Ë 4ÞÁ92JßÐ š%‡è·!i›!ïµÛ¸;.r1{¤–uèbÎ#JÍË;ͦ•ÚT5c•¯h¾aAääÚŒ{íà¸eSšÊª”ÝîäG`E8×PàÑ»Ýôq};'A9òOܾ‘pÒÐÌÙá¡d'ô˜÷p;loã,„E·'/ÏÕ=èªkÅô—oUu3ŠØ¿Xœã ›Ú‰•ì-¾¢çR—a»°¹o³œ[w3Ã2¸Æžh‹õž uþ`geÕ•’dé©^?3BêßâÊ`Ä †¢6}8„çs¨&.óè ôšÂîs–«ÁæŠTöjtokVÄ6òøäœ¬Î—k»GžR˜2`2×6"„c(¶Õ!¢¹¢Àà“ÔcºJ¨KØo^¯œºcšŒùýgÀ5kÌЬœ6bÊ0i­¬JÇ+ÜD­hð›{dEÄæHÇïºGðb~IÕJº^WFÕ= †)MF·´þý1y‡F¸ ©ç=È_ñ^¿sj'‚±Yà3éV*R^‹6_­^¬“3š&¿ÉAÏ>Óá?–E¥‚+‚FãäŠcÔu!zTM¢7´÷^ ØËÊ!¹ÉäÍ»yuÐ3Ø´ÊT6Ìp‡gœSiȰYúc1XMlq¦ôGì‘R`6æ™WS~ʵâv~•ã¦õŠ hl@û•Œ\7[¥¾2G¸ ¢KØá‚e†žäQ¸äK¡¼C!MÔáÇF#Í0ņZŠbµ7[gø±²` ÞXÑJèV6ɧÒn÷62/e5>b~B’KõæÔÈÑ1_ñÔðpÉÙJ¿w˜A2ÝwJM-I‹@RJ´ýŒƒË5J¤YA@tgGË•êõ²^\šj5šÖ9Êå°ÏQ° š?P”é°žŽ‘òèîIK­ÓTBå±Èû«dG×–ßø‹„»pàxSXVÍ1ÊˆŽ±2KžMEýø$>§È6¿dèÄ?æÁÉ{r,;)o;³Ã9#ikº·ÅRãš½Núö·Ø¹°r˜ÎW€íaÜ¥OG@Ö&u^̃¤ŠnÆ,e5¢Ë °T´ÒÚ‹6±ãd­BKПÖ,ÿ:z‹X.Æî‚‡ Iü¢øÂØ]=@¢¼ÕõrR5O'ôŽXPwÃ$ŒOëo=Ó—ïEäP0¶=ÄKu¬™o­–R=›‰|–[×Ù‡Z½L­üš8cµî’û«ùÊ6,wDjÎÎîsÈn¸ óØ‘¥×#ÜAa'WŸžhP¤“ÿBB«¦!Þ¸unw[‚(0)ƒíªo‡BÒKÒéøw§¤zúf(è\‘C¬>#_vâ!2UªëoÛôk:1„˜³³¹Æ™…™Óeú‰“c`é06ø²öñýM÷ðí,m+õv—èÈ è'ŠÎpß³“Ñò%ǸAª«ò¯SxlùÔ]÷4o™p@,½4¦w´‹‡Q¼ðªx&{^m)Lþ¯ßûê!à¤!–cki#„>‘·søZ‘¾©™w4eºñ<»‚ždœÂŽ4vß§êOâ.\‘–vŒ¤Ý¢Ó}+=‰7Bðõü=¢ rÔÆ«¨?’Rƒ2pÒrËPÕÑY¹&³¶ª“ ¶L¥ߓúBÎÕ–](­Ám¿Njļ¿ªAD-}å‚ç`•úeåðÇ~¤65Á/bÚ\EoÕâkŽüà ®³º­08õ º3ÐÛ™¬ªéÜj[ù>‚5ªWí.µÙšf/ó“eÇœ>ÿC51÷À–hd^‡ßsTúË  ƒ^,ÿà\íi²ÎÚõŸRÜAè}o¤]!Ʊ¯öZt¼C«IXü^_Ù2œ‚f—wÑÈÙ4¥£Uºe?¢ç ܨžÑ5òyÓáÖ$-…ûêéÍHùÅP-ƒ\7S z³„S ôg9ß^%)³ß>“Ä©\*Rû‹En~S£È„}vëéò€²ÑöN3WÝvr®e#d—:Æ~IíC•Àprc´íð>^ecÝA@ô¯üé™ >þ3*©Fp 9Lbõ8'™/AÊ7±{:mи›ù–WÖ<Ü_ddBæá|ŠK`âS®«€å=ÿ™†É¹ü^S¥·ŽxÉ!å>­à'¾°„¢'ì#Ã9b=:õ<6™aHþÆ Û…HÌe„ïcˆß¸ùL«¡ L#…†L#àå}dƒrU“‘ÌpA4\¾%¹ãÎU)伊—è8}ß'{ÿµ.Ú¿û9”Î’ŸÛ¡êl L(c§OËØ˜\›&”¨‡IQv—e‚Ö },>lF:ÿŠç\^iËDرù›Åiµ0Ì]âž~Þž íñ‘Ô7̲­»™ä©DТ‹ãA¸o)—„ó˜q[ï—Ž_Æ5WGHíß¡ßÒɧ3*¾Ëw?¶µº½×¼ºzÂ'!‰´F@œí›½¹*ØÅQò"pèë"੽z?°Në†wr0Ó™Guírì±¾-Í}V5u‘1NâV˜A[,ùl|ã|k¦kKÝDMA%wä['W@nÁÄ8ŠÆã¤ k?gZæ±4NÆ‚•½¡Á„ÔÌwÝHzνLªïëOfåÛ¼xg9·Û¯ûË3„€Øû•EÊJf&Ü'|üj)ÖÅAÄkO&!•‡ç_[Š˜'JL‚v?¢$˶ˆ¡†›¾bÈL\hÌNÈ^ÀßaÐ!Èy¤>]ÐýÊœùáýG®úȧ>.RÎr{MÂàò„kO4DÚ’…å¤×rö.|Ž’N£b>zi¦ûD­ò0Ó£d›KƒòÂöèàíó˜–p’ˆß#Ù‚Åi†tï^rÙ–Æ÷+Ó‚šK–¯²æ);f%‰¤á¸Q±Ńí”Zz–žJeÉM?t g¡ú7÷𳬅iʼnÖ=Ð| õø±xenëµøvÑ­¹/¸¹3ñȾ͡2“¡y•š£æÙUï˜$Yåø¹ø°ºSÃ(PYw÷ ÙþP›W…˜™§é»wÛA«v#Ý•µòv;¥²å¦ÛëÛòÓ·þåݧ¶ˆð••A²¤qý×,Þ½5GeÏäûRÔ×BÖä¶uB>Š^yÒpŸ6¡Võ\8¤¤ºvã |”nà õì…B‚;×}gGG% gRqç•òkз ¨ÜñrÈ<®p±Ý\R¾~7˜[#9Œ¿™µ_ iÛ)ÐGªeØc4’‹Â;éˬe–w¢(Š[ó'R&h‚5‰r…Ö• û*õuðØ´ªˆPv°Ç‹f,B²ö| Å€\M&«Á–ú|yöm)Î3íV¡ªÊl1mELŽáhžš_éõB"PpÀäð…‚á÷™¢íÎP'Cé6¬bàD*`µI@§“ØÖÐp²Å“å*Ë&ÝN­*–\½¦‚Ñ{µÈ≊ò„ô¢WÒéÒ›tñ}¬ä7ÚIÔñò™hŽ»ÞtÈWãç²YLÎR‚”äŽ> u50»ñ{5«dDå5aÂ}ÖžKÉnCy˜É)VŒ)>n= ëáÛzçCGUò!˜aE8»ÚÔ*Wh‹£ÈÕŽH-¤ˆÁI]ø\›må’õÂÎ{k[n±þ|ˆâÈoe(Óõ½JËöësA4×T4X_%m®27y.ï\Å5 OÍG>ÝD‰Ó'áz”òâû>ëo¥ì¡g?ý‹H|ÌÜð°NðWJY®Y· <@b0œ#LYèf­¸AÚj<µCŒŽúìP÷Àãb^‹¾ù‘àoX*)€Ækø°[{Š@ÂÞ‰Þ„oé |C´‡©4mUM˜«‰ÖÚØÆSl Ò)'Ìu,nƒŒÈUöêtó­oæ±D ˆCfÅv÷š‡Џ+¿ð5›Ì× ð"ÝÓ©¤¯-ÒݵÀÒ/ Anß·úµâäDqKn3ÜT·Ð?x|á]È>¦ ä‘ SÎxÌ7ܪé¯ìÍr¯IßµmðPæÁÅ«†à;ý ªF‡l|p É;LЗíj¸ó¥ÆÈ¥Ò(@ÛΈì˜ÐK-ÎåV-¾@à€éì^–o ke"ì@§dŽa:˜ Iöl ‰ÏþDåGöñ ÅþU¯‘aIESŽbäc7Š ðx›u` ³Ùà$s/kKÀÓ·$gÖ CÚËðú†æÎïa¯÷ÍÚ$#Ü-¡äKøëHÏŸ]ÎÈèý?fd-0;XYàqã:qÜJŸJ—ƾé‚{HB>›~‹³Câ(TM\lx”³Ø¬Vý1¹x£FøûSðtùÁ5%<<½!ä©Ht]fgþEÀÏn—é7ëæ 8ç¾æÌ¸;×ÉáÓÃóý¥Ÿ ÓfΚå j~†NOÒqJKuݳ7º"EnáÒn1k¶i¹W¹ûÄßMâœEäe8+ðúÉú/NӣǜùlPÐ]ÊÆ$fŠšRyù_tÉ O˜¦£ò×3&g¯,6(K=”-õ\üÂãèSÄšËZ¤ÙW­øydf P»I¸û굕V6Ò2I´FL|r‰QP#]vž7$·ÁÈ«òwuºÜÑñšVÎøtŶæÀs=£‰EeBP pª(Í‹bÔ[Aí»þÝò Ý«|±²a]J$ü‚Èï§ïR¶ã«ÏÈ÷dž‰Q _cƒ?Û—ãÒ4¥»/\–Š Í•å×}·#ç_Pðd^Â=¬BÈfâZxq&Z•š#¬×=TþH°tãHðcvïÃð ËlúÆ#©ÆWËHiz¥‘¿ìbŒéù£J=4qýF!ùè¡é;Y4rRÃõÛøY¡[4ÄÃЦéV‡øè«™î쉴„‘²ñ,+¥ÛH”Î%Þ¯ƒÌç${-9î]tu‰Òà;ÞÒMÇ•FÒÂ}°g¥—s™‹ZE]«ÜžðÊ,á Ëà“fõŸ$†×ÔÃ9‚¯=3Ù‹D´”#é÷ú ð¹Äê£À#S]N(0*š7e´¦ù=¦¦KÚ´ä’3ã¯-ïõ).gv=&¼4ë–ÁfÀ¬Éu7Ê%ë~542S#¥q õø§Ž…î‹Û¸…1#+6K!t±@f°ôÐå ™&90¿y?`‘Ôuéd&žuŸ,eDL^‚ òYbk¤Ð4: ¬"§ƒ–†¯ëÌ»–jaÀs¬áäÑz¼Y~òŽ úa8Ká䓞,9æOëœÐ—lH9IJ֫êôLøÖöÏ¿#™¼`4-¸š£éÑ&xâ´Ì† ¢Imp3oÙJ¶Îu˜Q°…ñÔð¤¢5¸É e¸øÝÐÆ0]-h.Ê5M÷q«8Ï_õć ž× Iõ‰ãÓ²Sí¯)ÎÊS®ÒMª>}ÝêzrN1ñôîçëÑ4Üõ5—úÖÑ×åÐ }l :/ãv ½äUâ¨Ú¡ªÈsUï°æãb½³¶³>±²XÃr/[ Ë˜ï¬‹å0¼îOB6 »¨†&;»åÇ:çÒ ˜¶O*ü2jr`ÿ}‡ÌzB Ôh!÷ÆÝ5õ+¶H‹B¸7åVõ×I×’B—v³'8ÚðOï‹î!,”Ê=x)-Aý¸BF¡M‡¹Q=r¾»A·Hªèò™¿¯ða<;¿ƒ´Òm. ]ÕªèŠåÇ””îËrTÂÀ íz­vÂœÈ ç{ô»RñƒØiKÓª8æZ¯¦Š,—a•a³ê/3ÅK.’«ÿò£½4ÔX®#˜¥¨m¹<ÜÂ(á°SËÄbR!Ô±—œ¸£ æ[ß“\(H¦(%égaoP¨rÌÀ…;ÄU¸,`ð]弨Ÿ> 4­¸t­Áökh³-M—LV:iüô%ÃFç3@âÈ’ZÏå“þ‘"|{ HM‡ÉÀìâP,ÊÚQfhÀåE¯ªìÙbÿtˆ”G~¡‡^ú‚>I§¨­pZCü¯sr%Ù¥Ûwø"™À!äQ<_»”ÁZöHò3›ãÏIA† ütPº2…]îä=ôa*xe¹7d$×ÝÎ݈ć5NÈÎ:\œ«^_ª$ïGq±&‘˜Î¥ÎÌK’o¾¢€tP—Y\€"Ä@~)Lã&ØŠÕàuOçì×3hÕ1LóS—är³VÑbVlüБvƒÜcsˆ¾=À)]2Ç÷K›U7„j%[ ÁRáiRÔ› Œ +ÿöÏ4ep{èxñ߀ óÿøÿ‚ÿOL`j 4vvu°3v¶û?Žñ&endstream endobj 71 0 obj << /Filter /FlateDecode /Length1 1533 /Length2 8733 /Length3 532 /Length 9620 >> stream xÚí—UX\Û¶nq·à V¸Káîîà/œ*Ü!¸ww×Bá ‚\B‚„àîvj­}öJÎ>÷>ÝïR/´>û«>æÐSkj³KYA,@ò°;;( Q“Ör€\ôô2® sw;XÖÜ$ R6n._˜‡W˜ƒ qöqµ³±u0É0ÿÕ$r¹ÚYšƒjæî¶ 'Ø–æŽmˆ¥È݇ åèÐúë7€È äê ²âÀVv–î ƒó/#%°5 𯲕‡ó¿/y‚\Ý`R&˜$3¦h;ú¬@ÖœêXfòCê?—÷ptT7wúkù¿§ô¿®›;Ù9úüwÄÉÙÃä PƒX\ÁÿÙªú—œ4ÄñÅ(¹›;ÚYJmA®•ìÜäí¼AVšvî–¶ksG7ÐßuØê?`cû[€ó¥œ†’¬!ëïçß5ÍíÀî¯|œÿYö¯î¿ø›aÓqµóqqpqa°Ï¿{ýar`Kˆ•ö@ðñÌ]]Í}0`OŒø~@€Ø ä yÃŒ99ÀwØ-ØHÖWŒ¿¶Èà´vü«ö7ò8]< °™ý=š—yyœ–''óß>§­³-ü»Äàt†Ý±ú]‚-ƒþa>.§»ä7alë ú£ƒæñpý]øKÐÎó˜‹l8ÿ0ÌÄ äù‡lœ ÿáÏë‘úM°ëÒ¿IÀ)󛜲¿ æ/÷ ÀìåÌ]á7Á¼LZé7ÁòT,Aý7Á4þ!AX‚æo‚%¼üM°­ß›Â«ßKÐý‡`ïçï‚åYü&Xžå?ä‚Zý°DÐø×fü°oeóÂlÿ@ØŒíþ@˜„ópüaN¿Óÿ0 ÈÓpþa¹®à_ûÿÂFáþÂ4<þ@˜†çÓðú°3•Óû„åúþÿûý––†xû±ÃNvnØC Â^>®€ÿÑhéáê »ÿ}tÂN‰³µìL¼A–‹sK‘0û hDU \Éd52 ¼´MK¢zSÏ×.ÌÐ…$xÇò/*.,ßÞëßÖdàüDþIåuOæÓéÿrLþ(Ø%1söñ§§ÙÏlß62ýÓl5Ÿ-gš«Ð=ÜèÌù>¼Æô·‘ªTƒÒ¼ãÁ½"M&ÙWÛ¨+Ôp½FžzsÂøôå³u"Ãß3ðPkáeºTEñF®zá¥&#­N„ÚG¢µ².Ÿ@:=ðoKÐ SM­ÆtÍ®TÓ}°7 oG¯ðƒ^WÆlxªeÄ×Ý[­PåOcsfÏØÜ³ê»—êgòK o4ОóV+D+ `ÛÅ´w•RëÀ5&60¼™hY-î´53T¿¤-™#·UQxjŽò9øþá+N|ûG@&-t§)‡™îI™G%|1a.\î¡îÛ,J·ÄFœ_öòe QÖ=x4št³[ìJßšöGÒ¥C0øÅlK]duñŽE7ÌÐåžÅ8ô4UË¥à©ÃNÉÁXe…kôù@ÕS?þàKèY­R¼?ÙÍwªÆ‹Ql®0qfˆ8"7™±Îk:}½˜èmv uÉÀ¼dSM#ã„4ÐéF/R¬Põx™¾‚Ͻô‡ûkõxL”Wox_-‹3xõ¦zª×鄟矠É<"à¬-ÂÅYcoaæÌWºfuó­çƒñÈÚt7PdŠrR>§¾Øt)àÂ|Ô·>ž[20!yA÷µõ¨„~æ‚€ÕNÎçF¼¸`ãÓÚº[GyË_\á¯mDwbZLC Þ3Ôèû‰¨ûoà|mÙÃæâ Œ«\¼åUä%|¨ÜXÉwTÈ?Ì~VøêÍ0ši©å‹©ÜþïñÀ À›Š°ÎÄì‘k}/{vº¹¯ Ѓ0n½È•Ÿ/¿T~„§c};$xì8‡ÃêBÀþ†’–Çßc›Ù™2e¶»Çkåzš+Ï>ú ÉÖgÑÀ”0¢L·0‰Aó¼ï$Ÿ„ÈX·Ù÷ŠÃÛŠFM¬¦ëÚ‚*0»„…½,–[½¬ ¡ß©—çz÷Ä=ç³ÏX[>(¤^~…G 5;Œ{kc+°³WºÉZy’NÄ¥è¯:þÃÀCÚDwå€'xkNÓ×`aþ޾k^²:4×RÁÈ¥€ân|§EàöÆð¢1sSÕÑR.—µ6Øó&äKÞˆh¸‡ŒR×o‡\Ô'ËÖú:I:ÏóZ-ëʽ¯”— k+‰?Q‰ ypÞ O×ë7¢ã ò+I)§»¾b` "¾C—}õáHû}¦Ê aùÃŒr„‘15ÇPÎÞ‰léEØ#ÚãÆ:0~À1£Þiá[ §©¡ú󢔨ð¼XÞéøÜÔi·æ¬¢HyP39 Ø›t+lÈíàÊ爇‰œR1Ù¾Gy®´‹6yžF¨Ê“ $ªô»ÀQ: nìyòÿâ¿\È3³ø¸e/¬|qùó“£öt•Ç2òg!B¯þ9Ê^§°¦¾!”ÍT±<ï±{ã‚öĽÙk$µí'gŒ™^!ûúæ”Úúd¯á·æs«69œœ–¢\xê} -펱¾€ŸQZ@×fÊðfË È>ÖBšt§ˆÇ‹2BǸf˜4˜îüÃ_ÈîíʸØsñ óÙ“¥é}MsçÕ’0Å ‹í±´ÊC½’ðyc£;kY ’¸¦‹ç:H‰Ñâ9¾¡t%×<°é[qˆöì›yÿ\'£PN MS‘xz9³Ð4Àí±þ¢!C2ÓU°¶üm²­GúîšVV¼À´~Û OXJÞa_¾çÀÔM«ºŽç6ßÕA@õäÕ~\ð‘§[¤Üiã%rCiN µXï͉–ó/^âž—¡ÔV>Q*K^48hJÃtŒ(­ÃóZÕ~¤¤–$ úzPÿ »‰áêË~q8ÃB;2`ƒ4I.½[nD~‹ó°/Æ@Ö\ïä÷ ê@ÒŒò“Æáù³oÔç¡BI6öï!/”rx>.ÊfK÷¾S·´•šÔ[°FÅëÇ›º%ª2¤ÆÖ=[À3Úðê­1›gæiÄ„KÌóòù®èÀ£ˆ‹Ê3D0ÅfÛ§0i½ç÷»iïÊ;ªï“ÅF˜º%¨6[$ȵ‘D¼>PÈ:UxŨ­„M8@]u؇Cìñ¬Öj迯ùÞ›zVV/ͼú0öóL)+ ‰CéZ\þ&ß•´Á“8"s[—Ùtõyž¸Ò#±•èí=ÙÓ†Wf ÖY§6½)øÏn«®¬b ÎjÓ¯ÎFÖî˾@äi1°žÒjpwÔð½õ/Ù œ èLkŽ™,Óô)Le§?懕ÊAÎð#ÀíNžÜ‘®ˆu…í¤´üO/Z~Z¦@¸€•È ó¿,’;ÙžT´š9 HïéÕs­KmFÕ³ñxÔ+®ST’Vý˜s,'@’8Ó©õ8–¾2”+:XÊ6¨»ÜéhZ « K÷Égý!zdÉœ4Nx¥}ñj‡á£Ù¹l Þøðr QÑ“>:H²r1_+¥è¸Ô¦ƒ4~„À ˜²–Ñy\…Ìd—;ûÅÁ z¥M0âŒ#É™ç4/æ' så ®ÎIûÓÖÌøKöSOÓóš¥áÆÄèûÛu´¡>hèáôͨßÞæ®7Iø :¨ £íD÷ž ¢­[WÀÆ¡;?,úZÉ©Qò Š_X«`þ+n´Ê¶ÐÁg=sÓ|¦Û„iG±L9b pP¼¥ûèìÑajk|ëX h{Z/yK–|$RphRóõàCW†Ãc:3ï¥Ï’ÑúaûQ{1NÌ‹Ý`ø¸›[îP¬ü%‰1$¶šÏšõæ±RÉiÑEÀ³»+]önß ¬Ž1M›½9œÇÛ ñ3M¸‘ŽqLµ&ÒÏ…VÈÙ7™G¶?wQXu|§hb¿[Øs:ƒ:CÌ^†/`ÉX3ÍMß§Ç *“ Ü«E{I6vÙÖÀTñN¾Ïdf3ã Wß&Ðj}·{-£eªˆÚ’%¨†ì)v‹¢L¥ï¾Þ£Ëtþ‚–lè;'›®\…Ê;ÎáGúeÈ¡÷­ùêÝE¨¡ÚÄá,¢•aJ ö\àªÕj߯®g[Z í·ή_}²éCpOMU©ãiÔâl7?‡è~åŠT Ý)¸P{ªs¹Ô@Y÷cè%½÷v}KoQá@ †ãOCzãõÀm-®­+A½XàΛݵù<½:³ôhÉÚ[×iÕ»ÄE²AÇÊT»ŒXÞÎW˜M„áN‹œq8@ÛºÄüa>ñvJœï‰Éuãš*‡— ‘`‘*±Ú’l³v@ÃB8k=ü‚R5?É7ÎoIw¤(1?÷ ³bÊ›ê­ísñzLê7¯û¤@”á³%Ü`ÖmsA‡—ت¦X|„ð÷е™#;´ubå }Ãêý*I j·¹‘«6!ø^m·qY!_É_ü!®Á¸ÒXgRWãln6Ý}€t„J©£tL|Žëé#e’&vFÓEô-] ÄڥӸ•ªxJµùt*‹÷Ä™í7ùŽ‚Äܧ O ^'5›öœ§Ì[,oÕôë´?T‚,.|Þ±)µ»Š–š­ûõ§|{¬õÜmZïÔ‹éûº“¹?¬5~BŠâK3“4w³¯0[ "Ìò§•p¯¶lÍzµ¸ ‘pcÙ@ùèòNõ—J̹6d¬&à°+3TìN¶d>pc|Œt)>Ìkw£M\Ò -ÆNY÷ñÃøe_vÉ¿Co¦ J{bŽ©ÓŒY„íE±ÃJ[“<}ËBŒRG`ÇúËAìjCŸŸ5#E}CaÑû¹Q¿˜6Ä£Å@±éã?KÉPZÑÍ2È XV èôÏPºHJlò‘ðjÐh}?æךcšåõÝ–d¦èŽEºnÙ£Í “ /` «1ë'AR²%Rø‹Ãï-<^ˆ\jbYªÊvü"OѶë#•1æšSvùúñPëÏ@'ÏÏ…mzß0o(/öqÒatdh×~çâŽàÛnßW4GÕ£|»§KÇ*çB"Ýô*5hÐ׳ rœðýý*Ÿ±KF+ä܆_¾/*­Š!×6\ó¿ÜíxML‘N³º¶òÖG`Ì(!€ÝŠ¸í¨½Ô°ÄÖî‚óÁLÆ8£Jë>9P-ê#´ùàe•æ þw¢‚*‹íÖOdäÌNB&Tì“¶QO!ƒç^_\A?#«ô=^\V5y›¹"ë˜ sZ’JdºŽÞkòÖa¹UOyª…çìŒ4·|²ZPj!q-À„X½|§C=uaÚ±=™XÞÆK¹ÈÕÑ:¬¯¢*yxb |*tO$œ Á±Ò²ºÛ™7ÈÕ’®ÃKŽ{rd…ਙ²&kZÓEÓ‚eL#?²¸ˆö*5±y®,µï è«¾ç‘ëNø£Bȸ.°ÿNÝ®l‘ô Eqò1S¡u­h¤ À”rNžÈgBC:A+rÝt“,Ù®¯ šv1/ZsºÍJú¶ü€:ŸúúÅä†á)¢w¨]©ê¹SŠè#À‰O_±¿çÊ|5ø…í¢Í0’x`‹7•v©“úlaŽH³!þvúÚ°iBÝÅѸ$ñ+Àpϯ¿E»Ãëfç×Ö ª/”¨ß¥Ú¾ãI5é.¬D?Œ¹o¡ƒß¬UôLê Ôb ÐD—³Î0 M8 n+ß¼c{c¥ò>–ð)“#ñ[Ö»7Ÿ„ß]1’¯O`¦¼pÿäK¸(‹¼‰ÈL;„ã4Cþ˜5ñ+<~ÛXÔ&m2Š^·ÆÃ†L‰FGù›£ê .½ü½©xk®Ž›š•(dGÇ]îyú\à|¬€üÏhÔÓgY4Òùä1ÇýÍZÑ$UÜvÜ;1@ª¢/´Hó KºƒˆTk‘bÇ Ù%“ò»¯”z{&à8Aݤt3ÙZô\ògªPŠ>Fyãyµ<¹I¡ÔñœË»*e•u[ŽO5i?hRá[CÅ]íª%B+ë ÚfÛœßàTvSìk´L[èÙŽtïL0Oú=X˜Yæ8`| ¥hN±éw} vé–i€?“æáa¿–CæõxðÑÚâÂkñ*í"Z¾ |ýœ«ün¼wÂàW¢fñ侟"'“óL=ô kRW˜Œ¯µwÜ"|vgg”D˜¨Ñ>bw;ý† Šú×b\d²¤û"Fh~/ÇA› Ñk ýc±‹=žìwe3щ^ìªJ‚[S#©Y¡ZÆžÁª|ööLF=Š“ˆÉívd§€`dƯ+ÝëîkôAÛœ ­K¹Á¾ì­´yž+ñr—-ð”B,‹ã§‡W?¼^,50XrÑŸÇàF«˜£ê Smc¿sd/THÅÙ^È@µÞh)úëyí&ù2Ä]•!OääÉzÔÁûq9ú'hi‘n¤ž(çv+=9"îZ»{ B¯žªÞÆ’{>Ïße÷q貫·TS´û‰i÷¯Ï¶ ‡¹QF¢ ¥ŒŒb9Ú÷ædiA¾ƒ!-˜×·0JÅd5R „¸ÖhÆ|”ר¸J œ¬GÝÓð«­ÊŸº/.É–ˆé)Y¡ÄgÂáàË®[Ƹ§€Dc2ĬR¯(3Åðð‰ø¾p˜k•Ü[†§Ò;t¡·Ÿšú¤pGÞzÕ¹i™Íª1"–$»ÕÙ^<¾Ä`#³gl¶‚èŸÎ[)HŒŸ «Ð+Ÿ¿Œ`Ióïû¾RKÐVòªËá 1[µ»+÷‹¥OeüU1ÍÅZÏÍ'”¬›Õm­#5ªRCŠ’ªž7ª'Á¡eiái©"p²ó îrÑ}½fô§ð©K¸ïß§äPeÅLïT…½Ø‹QZÉM5å·™-.S ¨I•~/+¡­q|“ÍÌ5œAn@²\6 ÆôŠ+“D^R½Š—öÐOÄÚÌ÷!hwéIŒ¹hå9û9=£S ÑÔ¨ÎLw‚ZÃÏiÍ<&%jõ@(jÅá{3Šè{ô5rÐF 1¤Y^»ëf¶"4Œš²… {¹8U*âNÕJßÄuY›þÎÀÌ­ïwëTÖó§6+϶‹fKÛ&:×Þí·Yu:t‹”E¶õÂÁH{XHK˗䮄‘’n‘@ÚÄæ0‹)l‰EbÜ¡Sr#±3>éò—]¢ÇQƒv„2 €x&n+Fª¡œ|KC+6°¹¯ÿ‰¾A N±Hz0´ÿ‡žVhô:;Ùj§ ²°šmýµï×!ä’´,Š* |›§/êüSc›LŠ˜½Wº\”;™CrÛ|¤*Ã5-£¹JWÖc£÷¬VÜëwv.NH]‡8¹Ž”<¸! ¢'­ÊU5‰†×•™&õ§†„ŸÚ>§ûmò‰AǼ“i'‰j™ë<¶kF~™(R÷Ë_®iGiî¥ßÚÒ# v—½*­dÆaØàËL<Û’Aᡞû‹JQk9ükëMɨiƒ˜y(´ÑjÜöQa7¾¾=Ÿœ·Õx.”\>ý"…ÇHMïìYÌÈGa¤`Ÿ#¨Ô™5­´½Î ’A|7’Ì}âÇ‘£s”È»ÊQ>ö+µû” s‚—ïd°µ™}O#æÎe­ÎÃî¦t „‰U9µk}}BäÍ Mà]Æ$Þú½MŸï»·3ÛçnXìË+‡PX‘37‡ÛRMnÉò­áó>;ŠM•Î^ù²‚M`iNP1ŸÊ΢E¸—Rînä÷gȘ W×2ö\#Ù$’žÛ³û[xñ¨üèÜÇÒéÔj*4•‘ÌGC•èÊ”X^ˆ•v»žËŒŒNê8CìSå5ž¨1/ÔÎ$‰/lì>]àQÖšÍZ*R{¬Ã°¡Ãk›O;=_N¾;EiÍñp57þVž %…xNì…#nç‡WùîâhŒK&ºÝ™5ðZxú@{»u9볟4¡q%sí_KµÙ­$)|—rÙ‘˜,e4*µòs‡T-p¬]ŸÇ £ÚˆÓE‹Ÿª³)âx3Пµ%j}ÊTZb ¬—^Có¢’¦ùb½S~sËei¨>fúg¢T¦‡ïîƒf³<}¹êÃUþ¢×ê¹ïa|§Tju5VÆg½ºuÓ-Jò3ÿvl‡®©]%³Ëê†Þj“Iy..IF¾ã,i¿ÍY‹ø³”¾°iZ58˜‡5jÅó‹(.Nl}Ê2­êÆu?) 9>H&€1ŸÉDº©Hƒ*¶{:‘&'ð§Jýs†®ð`­<.Á¦^_Ûk½þÙ„eS6Äg•¢È×F³ÞxhGO•å„êÆrÛ _Öú¬ÝÞ q¿2/‹G¾#êMAmnE­ŸM‹8‚¿kÊÅRG±L` …Z°vò ò¼ºÞ¦eô‹ß%µ‹c…Ô'ñgû„)l4rœEìP†SÇ.˜vñx©X)”žv ¬=…Ž}ÿ>eðV§´dHMô–—AePcíüí‡j¡ }>$wv“[qþ6ïùËúqÿ× ×ÔrÃâÍO™Êà€‰¸Tλh0 ûVL†¬w­µs=g/²?ßø9¡¦—„# Uo¹„€žââ©ÄáUþY$º«pƒ–E5½,aròfTjyr)°nØü ^ NB¢PPs¥K9u¦î7ÿ¹À‘ãÓü°ai½_ð’Cµ4ôPʘ©Exyäg‡¡ê —B¶WoHíʺ8%rï¹PÇÁéZW{KðMgIùdæ2¬Ú®±]` 7¡´j«¦‡IYBW‘Hõ+êLEîd0[„÷Ù Û*uûG8½nÑ®¨üj\(…ƒ–öeL*_aŠ„¶(¶Ò% n³'ŸáÃ3ÿ ¯ŒFo©`F/–$‰_LúŸîjúÍÍ…m:ÖÞÝŸÑ„GEÆÔ*“õ‡U 4'• "]ª%'Ö¨GK‘ï!»R8—Ù„¦ MŠèÒÒ“ãÊø­þ²$H4Õ.xmãGí[]‡ bØlДHᅵ-Ø“W«hmÇ´c9·ŠsÛž“ÞŠæÈa’”òB›×´A•[æ|Æ!îˆøvGZ”"Áã#Tl=p퀆¤dÛ¹F¿ÐI⌉U†ºÛQúFÞöÈÀ0ƒ-ýe4ѯŸyxâ™Û™ôµÍ}ZÚ¶tÄÝoPPYºâÂÁѪµ‹›ÑMOnñ‡É—¢Nº®Ap6²¿€Mc×6ÞƒR!-·2&~)بisŸÎ­ dz—i¢³¤U½Ä&ü>_ö¶€à"êÚ@àÌmÍ›`p#í"ÖÛóYyà=›K5˜¾v®‚4PÓÈëx7Äè±ÒsB–Ùƒ·A³â©49¾Çª> ' Ñáez¼V›„“Àö.«};#–y;J¾‘uVéÕÏܰôð1zx|,ƒÀ1N7îS¢Oekê'R;,îdÐó$ãƒB¥¬æÍ$¢QÜ)ê„TG_Øq€¿Ëòc*ܬé?†!ÒªOÀCŽ(¶ÉÐí,2>_>z|ÔËçÙé¨I|B±[ñfìøzcä_g…$κ‘ÀQl»Å剼&·ìˆ9Ow‰ãJùæ<ølI>.¸MÿÔEÿYfœk 6ªN™®£‡£Trdº‰¥egÕÆºÑªìIë!'Ì~•À›¼Ø´éÙ%ï5¸Ýç¡¥=£c×Aå« !^ß$¶}w©’1ÄlRý\ðü½¯sNìúk—N)x- h9ÌÄ lµ¿™¢˜æbð\fy›ÈôÝŸ§×ªiÞQϹ°Rn´.‰ËȰµ-,û„As1=£/ó aç«&Ž•WÃ2ˆ·¸îbä-òyE“˜·c!eûW¸ïˆ¼~Ï[}¼ÍÛmƒtÒj®UI%cuq¤Ç_*Î.‡–9PµG± È;¹ZhÓán¯åW;ÞÕÔrzh¸ŠU ¼h;ç›ÛX%Þ4QG¡ŒÎ—¢QÄï©­òN;(#/PŦ{ì5“Þ˜%ÛªÓ~a”÷âKäÏÎ6퉚~}šñI…UñØÊ>½ Mw¾deßé @aÝÃ…Ê€UE$_¥H˜pû#Óh:½<è¶?>ýÈÚVªBùZ)ãÔJ÷ÌyªŒ¶hÍql³¢˜Z`)­å5PVÃ(C¦‘€ƒ&3Råý^aeØ#~(kSúŽÜ <([(J÷]+Åžçn¾+D¡Ž‡ò£Ô›øS¬YÄBó=®Moí=+×$—•xê7Ò/Šr‡¯ÅmàɃ½‚0Þcì¡÷ýšdÄ ùZ\oËc\峺*þŒ±s®ZuQ7âöФFfW»Zü£”ùQ߇,B8+°Eô‡í~ï˜ËH¾—Ðû‹ýNAñBì~‡1³CJá^¾.&6iƒöѓŭp&~bèàój0Aˆ¦XQƒ¥È‡Î‚û¸cÝ!± w»Í?¬º1ö~¬SÔú|žûsÐç$É&8!ÌÖ5˦‹Ÿ\´ìæ‚“2Ìü¦A’> t¶WyZóøåy!êýªÙÅœ£TKiï9 c…uõßbœ­h–pKØ“ Ù‹Xß™ƒ|ÃÖ«¼ŽS.~ÝÖ ÊÎ*.°xOD{'.§jª@Ìæ‰L¶Yy‹%\Eø˜„”^6/Ò©“’°"̆C!Ь$wÁc;êKÑÝ©„A¯G½×Åp@m“¡æÇ“m säÕí»3NÏãGâQòEZƒËÊ­yÕT;$@© ÿx>ÿð“/TóÇ!A~þ¶o»@~©]$T–óÙˆ³ƒëŠð/$²³‡Z¢‡i*«„ ý¢=jCBø<²^ÞÈG}¤/–6hÈWOÓM#eÌTDqÉýÍåõfÍ œmCR8Q¨(ô]”WÎDMýÐëL¡ÅS8$‘)ç9ów=˜Uw\I+Ê|ÏŒ‰Mû1AM±ñNŒ;&㫼l…ƒ…º5Û•ƒÖù|oåÐG¦äŒè{ŒçÜ ê»PïR6M0264¶n&”v©ø2ç£a›eƒ 9ÍdfØÖ¢[+È¥²É?=SÀÑ¡˜$ìí+Ö¡¡~*Óª""R·‘Ëhè6ßGÌÙ…v¡7¯6n^¾{_m÷ˆBÖ±ãqÞ=è+ð¤Ø¢TéäøÜ¤iî3Avâ/MWËöTä„Äè¬q^ŸW´ÚÏBÙT!£˜XæìyŒ T_îm"暌¶0¡r1|±ÊgæŸcí dãV Q9í>XerÚâ'܇Šn /ÏaÖÔÚ&’BpZ µœ«³ ÚðaÛɃy¾o«|,iù8Ö F©ŸmEþ8x ÈCŒ¡²ž¡@QãÑgWOÚ³ožeçZúœ²2+ô‹hüÉEíu!“ø ™Ë¥ò+ÌK*ñYÉ÷V8 ¥> stream xÚí—eX\Û¶¦q'hp©àîN€àîîV…Vî–`Á ÁÝ=8.ÁÁ‚w §kïsïÙésvÿê§«þ¬wÌ1¿ñ­1ç\ÏZô¯4´Ù%.V 9g(;7·@ZUU‘› »æâ §—†€,¡ö.Î2–P€[XX äðð¸EøyEø1èÒ.®>{[;(€Išù¯$A€¤bomé Pµ„Úœ`Ö–`€¶‹µ=êÃZÍphÜAOƒ›´·†¬@¶öΜyRt¶qþ+ ôpýï!OÄf Àô·MfÌ$ÐÅì‚l08Õ\`Õ@0/ÿ7lý§¸œ¬féô—üßúã–Nö`ŸÿÊpqrõ€‚ U âüŸ©ú ™Sí=œþsTj ¶·–t¶ƒìÜ|\|ÿŠÛ»ËÙ{ƒ€öPk;€%Øôwä üO'°þýíƒSRÖÈ@]žõ¿–öïA K{g¨Ž+ÀõOößÌýÚ±÷sqpqqÃaÿÿ¾2ýb²ÎÖ.@{g[¿À±ôÁ€m"ñü¸öÎ@7ä sÌÉáì…MÀ:°q`üµ®°¥ç´»ÚYþþW„Ài‚þàpÚZ:9ýápAà?rx`2`K'+à!'ÄÎåßÌ/àt…mà?!a§µËŸÊ0w°¥»Ý?˜Û¿Îl5ÿƒÕwµ„@í-Á@{›Ç¹œrÿl¦Â?» Ŧ ôñ8•ÿ!˜QµæQýß$« ñÁ*hýC° Úÿ¬‚οI6ÏèŸþÁ–’ôÂtlþÀ¿þÂÜÙÿ°¾:ü‚NǶÍ8ÿ@X!—?VÈõ„IAÿ@Ø{þ°;÷úc[À”½ÿ@˜²ÏSöýÿçY’rñöcçå°óðÃö&Ÿ¬ã\ÿ[¢µr†þý¼¨ÿf{Ø!¼AÖ‹ß]¬EÃÒšß–ÊNU 3»S¤—†uE4¦‹r®ŠâÏBZ&u@Txxö•¸Ž•«96üë¤Í’þC~Úœ xCØ„ÌùÚ¨§Ö‡&fùƒ½8NÙQ»‹Ô[®-¬§K´Wkž]ȵ*ûÜ‚¤ÇÑHåóß/ƒÆ×1?i·?`˜æv¨¤‚ôöÖ¾ÞÉ»º,›d\<-’ö®Ì8þÀNŽÉUEœK{@çbö¶ÙÇàÎt¤^4ÈVÒA£˜ÕËKÀ‘‰½J7okUV=Aû9Í,üóU:ÖŒbhÀ}EÝkòŠ||ërr°˜|1)<¨®×¨ÛIܹçÝVf(¾cÝX ·kK*½–Åí#ýÝ{¤_¦÷÷«¿ŸcH»O­ý^Á‘Ô è³ eJx2±2q‘"‹zehi—¼ •”PXÅ|3è4!±áßv£ 0ž &ü^I»Èƒ±/r딖ǦjÄú©u(ÉDÖNذßùõFÁÙAwL&™œ´ñªÅgyÖ᪦/…2UïL‡Zåä éZÑ\~J¿' }‰+i²¥°‹[ì™ñN8²ëôÇç¿»¥¬ÈžyÝžü ;Ôr\*@ˆ¼FîÇq¤Ãâ't‚Ø¥\eá3%TÕ04BĘÔñg×Ls¬,W.ëäs.ŒžZ!\wƘçßùK²uÀ&’¨eRºba¤7­:‡â&hÜXp@ %>w÷:Cœ·jný«›ó‹šØ>͈—jûÅžÏóQF™cˆ`n,ü&u•õýø~½ ï‰'Û:f¼è¼[‘¾2²n/ü3¬³σVÌ šA¼›´¶7P)u*·.L÷úºÍUå–¾(ssg·ZÀá¯Ïz< A"˜Cç“Ì”ÿZJù[]–ûBÇMÍDGÇ›@Œ1­ÎAlø‚Ô1õäØëÙñÌ2ƒ9u¡ÄèaF€¢ÞB♎³ÆCmÉòz"íÕÑl%ïûÜe•<^ߨ¸ÅLá7#27‘g_ñЕ¾¸O¡ Ù° •±æUóïysF(]ƒ_žÎß¿ŸZ¿jã Ô^ˆÍÛ£¿KE®ÐïtKʬ,EÿšÔ[Û„èÚ‰‚NèIÿî=›§]Æ Æ_ Þœk;:f¼Âµ 7@lº¡ý^)˽T*”Ø€•˜"Nöò¢ &À¨Ì*•öÔò0;`Ì@²ËVŸUmðÃB"½ûBÍ!wSîé¤79ˆ¢˜)EµÉ óÔœ¾ëJÃ;âïõ©ÙLIŽtÑ ¿;WÍi½WDóGèÿßÛOõjáŸ1QúHúSk;®ÃŠ$ÒÈûçÉjÁ¯<õ°‚…7Ê5ÈMˆÜB1ù{+Íì‘nh•見™5>®P¿e¸'²ìjÛ¡†Ø·? ÷y„¿Xº¤µ•Üáv¾ZÂÌ{lIŸÌfÜO?wϰ!£Ü»åy̦S¢Þ«èwÏAÙ] cNp !Ý];šd‹7ê¥Àžc¬ ÊißLßkÍ ÷#¨4Ñd9]õé\¯þèYBj±Ðçz>„­LYåàÿ#ci$¸€Lžß攉 þ9úš¹7ãt%Fp»–t)(o'ô"oaÑÜšÚê¤H1Aý›ÀFPãI”Érçʸ rÚtãIlSf⑘[uc‰[hLQÇ>Ï›*}hú\JZï5 ò¿Ï<•­%{Üœïc·¶pÍ(j%7ú¹Î 21‚ßF9{á!÷¸ëµR!-õã Ònñ%¯Æoª‚güu‚Àñå÷£&†&Žö•ÑÏx{óÄë5¸hµœN?…[Ùà{ÛI…Íc]`¡U½àVŽÐª(9…º ¡Ùêä=Ž–€”ŽD²´Fßi¾Øð9ÙwÐô®È{E`Þ]‹H+=øÐ’Cƒ¨×Ûq‡º’L¤xgüÙÝ´?ÿ€Óû‹XKÕX~ꆳ?È)Лž9cŠÎ6Mq;X—cê“©[XSŠ3hÔnì\'8Vv—0^¢‡/‹‹§ÈÚIŸÕã|Çâ}J¬òäöŠÆŽG·:JܷЉ¹¤ ÑÙÒEaM,L>Tß Æšj3úVòÜ+M2#/ ”lü,¬p8~2¸Ö¤ö ¡Ó„Í^:¿ÔýݨæÚ6÷Ö'ëµjÕ¬ÞTLL‹AÖ«÷×$ž3¦Õ~@Ñ3†gÅáðI/Û~jèé)ÀWå\mj/ÏU†Cål‡&#ŸO@ªÝeƒØ~Ú3ýÃIËmÜCn¯—W•Ás4s Æ—þƒ4'ÐÛ†D+8$_4Ôbv™+Îð€[ÓËÊ!”×±Â}L„‡•–)4-é4pT÷ð)3;}äÖS‚\4‰Ä!ż¥ fd·<¢E‰1ŠŒ4`suÌ?VáxªÖÍØpœÃ`2+ÂÓ;» à qšBE•å‚G¬oýªvÛ~‚Qæ•Hš½øR4cö/ÍIEf±Üà]ïq_5'x”ªÄöŽ1é¼â¼û¢V-š1ºÑ™Ç%߯á+¢Å#8ŽÕ7‰îàìì è½£„äÓ f%(õ±Ç…×àÇßÏñáúSi¢`Ϫð⌾¡1ŸoЩFáoÒê÷]×uEñ˜®ØÑè5:A~+Tñéz1Œµ¯ÇV³4®ºCX ³|¤ìñ×FÓß›ùôF‡ Gùµ!£‡ë@Ò_ziŸ¼Þ^Ÿœ¸Cè´û`&Úù ¢d´ëï@yÀþÚ|åà·RÙù8f`‹Úþ–óÛÕî+J×-–)èÙÆ#.ü`˼¦Ä{zÚSÏ”ö›<œ½d&ÃVoÁB{‚§seLÂsµS{ž}Pöx€¾õ¦ˆŒÛ£ìb®a@·n޽pVß••DÛªúËÃ*Ÿlrí;gKGu›~<ÔŒüi\³ê@l}*϶…,GÁjVõGúÂv ­ŽòÈb€>œ)]`ÇÑßl¿êf+2»K-k¹á„ïw¶ãž½ Q ‚y™DjïÙ…æÇ{ÌûMè^.±Ó¾Ú¦ W±»Wù&&k$ôÞá°“e˜“¶V¾ôà·±¾g=îœz p]Ò¡ GíÛYEg"Í=Œ´gux®‘®Äugª·Ýª9¦_6-±`Ú*©™cØ-®,€§¡¹þÝŸ$2Éæ #wÖBݸ e“ÇÚ{ª˜ÀM±à9‰†ÑžüüéÜ€uþˆÍœF$9…Èã­Ù@Š Õàé¶éƒ/´æÈ|èŠñ"8qüwÊ©±ž‡ž8·™*5/r§?ôcaÓ}µ§V4™§·S~ÑÚ’zYœŽ¢õѵ¼#Õ”KŽ9ï¬*¿r@CÒL×&öÍÓ‘í¥ öE3T‰6å c½Ý~Æúׄк^iô:«Ã¶JëI1”Ï8Ø{÷ô1’ÆuŸÏõ…qX]jWÅÒ¢~Â-æ¤eÎÝQTHš~*3RQ‹¿}ôÿü~ç ÆQ{†Ÿ÷­?‚× ´ƒ"Ÿ•¿Çñèj_ÌTäW!šÛ‘íÃõ¨áß¾˜~؆{žo}¼@ˆy¢ û·¦\ôT)§ùÉðQ€OH:0çnÊ ¾‚G:æÎ0j}Z2ŠÕŸuV›Í¢ý)Ö䀬¥h©:G^ÿÊa¨D‡ÐR ¹Â¶)Ò·PL —’­9WïÀÉÿ2îŸË˜ÎYÈm>Yë,©ãxkAì=Îç ‰‚Œ”Æ]§‰7ž!á:!:(“Ö¯o˜¤Þö¬+¢ÝÓ^é„ +‚›Ÿ%X™9Ó½Þ-ÅäsfÒxš¹fêÙùô ²@ü&íãxÖA½SDñ`JªIXÕ\Ué} Ó§¢sF"¸Ò3S;R¾ ŽBL,Ӗ؉{¯âߨ$%ü*¥k—ûÄò>sHí› jð$žŸÁû¸¶•²T”ÀœRRÔó—ÂËÀŒñéÄñÈ)N¡L¿#*ûF+—£Hp •Êrxˆû¦;£:“ñëB¹ç_‡ì}=/TþÄt[þ›¹Pý„š]’ƒÕ­*yo?ƒ¹­Iy+·™“Tî׈'Uƒï •ü`g6»ÕTè?¿Kš›¥ö@¯/—„y0¤Û`žœýÃúmœ«WÈö¦‘ˆ¢V4Cîa"&a†öI¦Øô††};ácznŽ‘ˆPO‰.½ÅÅžRñ~M£¬\|QÖΑêI0´–ªG¢éYn ÞëÑí|²‰ÖLlÅUüs€ã€]`Ö<¤Ý=†°/¸9,ÕÒ6*Ú–c3•w5—“(lpëYUÜ—Œ¥9R$ݲ°òч‘ƒíæ}ˆÐQºQ¥íïlëßýÎ7o5wv¨”ihý‡ø^úbÀw¾zbw}–ÝTÌ .PôŸþ ÀÄQÛ pd†Äá_dŠËÏ4ÿ”=ù‡ÚRÉf;œZ°Ë©¦I^ŸžU&a}{‘ùwëo&¶y'!»‡éEß—®Fo=—épp©ÑŒBB؆]¬uÓÕ¬õè5PR{w; c´*[åâ¢CÌ5­µNY0¦º­'Fð¬èjF«ÕÊM’ÛKyfSÐMŽÞÒÉ|F²©ç®Y”y95ÿ$Põ[Íc“ùæÑjÇ8±¢†…£U<6Kû¦9:tì{γSÒFIò³G)¢3¢Õœ Ú‰û°sìçž¿Äæowu{7tøꃚíì…µJº¡ƒ+nÉqúh<,˜ luàF>æï«Ø}*«ìxÉØïN|¦û‡ÜÀhÑ%Ô’ Ùã˜'®:Ì‹nBÊy`´”Ô®<³vnþ™‘!V’Ô»s¥Û^¾öC'ÏÛ¬(¥:- F ™ü'·|zŸrÄi?Â^UàÞ†å2›Žù ”¬¥±ŽÒÓÚô¶nøÈQãsýé|}·}†|¶¥Ã6žš"&;}éËÈ1v¶(¼êF ¢Å׃au‘Û°GÃúcËÎÚ²®µ>‡+­jçxøÏþy2ìɳ&‡¬¢VÆêŸS˜› © Fâ‹zÎÝÞT¢·!~•W´Ã&y=è™qI¥íDÏÓL&ä“p¶‘L `šœó fÙ£è¼-ݽSWYªÃª´â É}vÄæˆ4FÝqEûrÛÖ©’Q‡ß¹T¨7Ô—ÀÓÐþ>Bø1ërn¤/û47âä‚™?Fõmo¶ü\ŽZ{àWÐO4ô%0¢82¥Æ” æ–²+qe×Þ%é;ülÂ÷0ÁöÃ3`}yÕž’Pæf婸pI³[ùC¿ÐÍów%gOڔΗÍ,ð'¾ÊØûº8À¼zo’Ï÷*–Ïq!™“û!ë©i{Ø1ÞÛ\JhûË ã¨­û¶ ãe/p'§Ñù|=ÚKvE¥Z‡ÑS×ss}Ï"â=¯Ñ3¦Ü”ßÃåÃÙØ„ýÖ3Va°ÃÜ4úŠÞ¹¿¢“^ò&ƒI¦># N‰Ð#²Ž.a“YX•¸£kÚÔR&1Ø¿´°oøÒ»Z x‚%¯}ÚEÎp…'¢ex !ü¤HRšwÄjˆ{K×NѱýóbUÑî:u ¯‘Ï»€[d”ÝQ±(gÆž™œ¬f±Û·gìŸé(z®Òµ+n|ÐTðëà»ä[‚ÚH½i¢t{„F'ù‹'oÈ_¯+|ƒfxÈ r¹À\ÞP H¼üøãM6Ü„Â65»YxƒŠPzå/ÖØ5[™é«$œ÷ÜG¹£–™ _ü,úoÈHÎÅuSR„tg=äu:¼é \9ó=~â¸Øô¶>K«Id²Ö›ï†N¿~ùY)M¬¬ æóâ!ìÁA) wŠÂK4-xªæwï´WQEpî“Â>ìÃu†,£Ñ‰ Ýþ*¬Ì©$²FÊQ¤‘§enõ½  ýŒ¶ÑÞà_û̆HÑB[ßkï;­Èý傞°©f»X¬_|xO|2»G™x V1pS'òVÉ0œ‚4ߤ2^,0±Âíæä?¼ôͯÝeí³~)6zA'vè÷3öwâóKwæSÁv*íSÉi×JOm3y”C º9»ëdLìå•€ë¶bbåFÚ MX²µŽàûKëÖ¥ö Ÿ¾É5#ã©Ö1{ûrƒŸ7d/–ê—(FG h§Ýë[{ccëæ¦ýEÓÄÏŽÂ5BM+ØÑü¸óŽ£ñ˜5'Vuˆ H$LÚƒ!(P àîFfædyŠƒÒ·,ÜK¦ŽW-4<ñ¸¾ÒÀWйQ™ Ý8Jƒ]ŠDS>Æ §VTБ›H:W6§gfÃÖ¿~—Ê$Ƀÿž¡é­ IÒñls6Dè`è±ä£&jÌ%Ús«—~õ鞪UëÖ XÆq‚gs]Žø€W%9Bt|ˬ3µ|ëó±àUñæŠyÏV™` ÏâxáœD?áJÏ@ZÊÛ ´S¢DÎn51Î ‹žqúËWëëÚÙ²P)dlÀμbòSÕtÉ\¹5ÃÊËÌv:à§z‘ߺŸ5.ø¸+¢:kÕÞê¼OúI©?{ Y°kNËãxQp p9Ú Ïd ì%«G¡Y T­‘òyÿ¬’úw#IA,Æä3h¤Ø‚u¯ÁÆKn¦xÏÌôži ½–ª­÷Þœ¦Ìÿ˓秖ÒÓÍË9Ü×Ñ4}âºÖ¤*³ÉDB؃ Žƒ£º<ïîÑŽ)\Ð+8Qh$Ú^w_š^Ò‡+EDN½ªÐW_²;R½½£¿õ|Ý]øº>¡T9$MÊ*™-~Ò³“ù•Â"5B ñŸg¶ÝÇjnðÛl-€äB5U«¦•à Ñbhë£Ôè ßrkoÇQ”D»0«püòÝþux“ôË‹Ÿç±8†(c±t·)ÒfêlÜ”PµË´¢LžÁ8‹´ìïA¡L7œQe~¼ðnåf;¿ýTáÏÑFüö9³¦‰mR,»ÐÙœT×0‹ÍÑ·& ß™*iQ<1|î‰Îx(™1!ÝDTšn|æ75xòvÃxM˜'¦‹S»ÕævEfïì9·‚[¦(¼’“ybì!_ÚY¸xÉò|´°ÁG…ünš-ØF»a«88èì¸ c.’lHr²ðNnŠjè359`DG©5ïÇæÇŽëj4aïý¢úÐ?܈ȄxQ, d Ïuõ¿³ÓLC‘Fù ÂÏ&‹€£°³b=E÷idëJ‚ŽÆ¡å‰zMa|*Íè6Ôj"‰€SÁ"‡\Ô ê„ê\÷m*¸&µš};8ºÁÛWæs—Ö/ô!‰ Š<;CDà—ä3KâÀÒž'³ŠÂ¬ZÐ|ÉÂh¿J={ññÌyå4;õ6?ÿš‹™ßõáCrÄãOÓsgŸÐ7†MýÑXR^ xßAÏŽðá\ƒ¾;@\ÌëƒS@·aº†7ÿüzãôxïUï{Ì^aÔ³êï~ï >*UÆÜ]Gý: Vû±§~ \=l(’*vQnNO×¼NöƒJˆrx÷Î¥zÍî³¶:ñ2”Ö¡§<5P'¼fæ/¯n°ös(JM¨Ò-Cöy(ÎãRÜ$öã}ßÍ'87×¢Õ~±¹ÏÄxqËù›mÖÅWËj“|ö‚³Ö+©Â¤œ:uàÞ-)}ý»lž¥Ú<“5y{ öy$è…Ñ»‡–Ò(û_[›çüUZ;š?n½®pJ—Õ@Ÿ5»ãeð§,•K)´ÇÎóå“õÁá™Jf®±¸x CFÄË;oÓ¼JHÜ®–l9tгf^ Z< tª(+1¨x.E¢eb–ÿ Kºy]EùÑÿ…Ç»Glùø'sß`õ%d+Á$„°Ç_OüvŽM¼‰ûJôRGסâRÁ“v‹šã,} ÇÚKFká2`M7’ö¼ˆX¤mÁÊG›Yß–pH™Þ>#.õFs“X—® çãeshgêîNpëœ4§ýBÑ;˜ÕKSøIG…A-“ˆã¹y[$'KŒo€Nѯ@µ†»®5·Rî|ð½L`K³_¹HÈI¹t9hkPg¤ÇÚÀÐ7AA±õ%¾<¡Na¶øu²Å‰!¡™+•\ù„ÈF )›þwWÉç×?«šåhËËöˆWÊú_w³$[‚R_“–pD‡€‹Œ"EÏpÈ(ˆË“’€vËð;÷髱¬ IÅù¤‰ðùðWñéhÛN™QfÍ¡ÍäN›‚ßu Oiô°nù«c˜áÖôÉ+Þ{òÖÁ©âü•á»%„§|Iõá\+¾6022ÕÖ{óR›îð£Þ’u!œãñ¹†iƒÕh¥7m߀ñ6J2þuÖµŒëÎÅ5ÍûƒÚKõè½­ãV|WQtê¢bÓSˆ´ÚÜ’7R¼ƒß.#×KZãt×S¶^Û+^—çLüôMÂæÇ¥w„YŸä¹«¨ã‡„Aö¸"BÚJ&>ò×ôÒ/ç ŠjyÓ»W¡19Xôs7‚NƒR4É<ݹ~ï)ÕšˆèÄv mí[HÝ$Ç·1‰Ý®²”+Øï 69Zñ¹A™––y1éY¤ï5³Oæåê#?SÿBCú`Lê`buDn“¤‚Û±œ*\ä$k,ü°ÔOª+å0r%–!yéûFn$ˆ¬ÕÚ£Àv²šÎõWû³F¸ë¾‹» £ß×›oéØpË‘e¡E*R)êÌÅYŒ/)ð }õ|ÁH9d «L6òŽHKß’Dª7=M¢$kõý>ªh§#¸â1"¹g±~ ·{7.åOÊFtFð>nb˜Ž#,1¤ÚxO+XûÈóã f {Ö[’ÔuÐ&]IjrY(×ÿáãÿ ü?!` YB .N–GŒÿ)¼ƒOendstream endobj 73 0 obj << /Filter /FlateDecode /Length1 1006 /Length2 4911 /Length3 532 /Length 5592 >> stream xÚí“gXSÛ¶† ˆô"HG M:!Té¥K¤J!4¥wé‰ôŽJ‘)ÒAi‚¥÷"MA7Û}Îvß}~Þûë>w­õc¾c~cŒïs.nv}#!e{„-TG …Á2@U3°,,"¢BÊÍ­Š„BP0\ ‚‚ÊÁÒÒ¢@ ¨-nûd$ÄeD$I¹ªw?$ÌÑ äUåûC$Tvƒ"av8P‚r‚ºájØA\F;å' Tvuþ‘á 4„zB‘ÞP{aR0h³Cm¡Ž08)èOZpPêϰ½—û¿·¼¡HOœ) ï/›|@œI{ÜÕhu é"pÝ 8/ÿ¶þY\ÃËÕUâöGù_“ú}ˆÌÕï_ „›» Šê ì¡Hø?¥¦Ð?Í© \ÿ£ â ³S†;ºB"†`ž0_¨½> eçt€¸zBÅ¡pûZÀ î—©¹ºª¾ŽÀ¿Îôצ>GÝ÷sÿ«ìê_ þ͸é a¾@ ÜxÁ8!îý÷ÊòÍÔáv{Ü(*! „ ‘?RÜíÁ‘ð1ƒÛC}P_œc0Â¥q# : ¤¨Äm ÈáŠ;K\øWDRRþMR@êo©Õþ")0¤ù›D ;¿ —§óÝ‚ôNiøánò›¤q~þ"°.Ñþoˆëýâ 9ü Å€ Ç߈›8þ7Äå"þ†â@ò7Šâľ¿ð?ÏREáûXH ×\HT7\°¤8PZ\2à¿)í¼H(õëOÁ]‰³ w P_¨éÌG„l¸sfCdE zÑèsB~€Šcc’n]LJ7daÓÉ×ÒÁ»üóµfg/д”ë„ë7}ÎY“ÓÉ6ì£Ð«•¬Ì#û â7Dh e`Ú@|8ºv H8RD_ù>ùÒP» i[C`ÐCÙ2šXÿÞx|v¼)Ù/ø€þêÒZŸâ¼bÑà8²×ßµ]µ±Úäðr¯ %Z%ž[ó å¹Ó£*·O«¢º~ÙgZÓ¼_…cy ¹0ÁʇëºÃJFÝy¤»0´Wžp^z¨[ݱªXbDð5¼±˜'IÀ vï‡Ra™ý+nÉýqQó¯äÖß-è5dQQFŽ¡HÙ}ä|ð"jaEJ®0½¡¢üÚ#_Ž-ˆ$­ÓåméÝúĪº”XHñ!IVé8yCÐ;Cf›³å#Âe‘ð„ ™Cÿ)Q¶qQ îƒjþ\Éñ¼ÂŽg5ß¿©*èî÷Y)>Ù!¶ D´Ýr¨4—Ž:â½Ê?’7hb"MJ毵+¥4í9²/Ì«6H«‘Zú@g& Ó~“¾ûýniJ7¥ÚEÛÉêkŒ#S‹#ðÆ‰‡Ÿx•‡y—¬¥¤_cø€’Æú{9Òiʱc“[5ºb#³ðÚPËnµ3[Äš,8äP¼!ÿœÆ"0VM³¿d‰9HùÞ¢ê‹çfwAŽóŠK¢FâCvÍñÚÑ?&EÜ+bm‘ e$ÁˆÐ/'¡_ÔÕ_ùk0ì”P áo]Ù!‘5Us’¢j–k›ÑŽÿsMù¦xÜzÔè–ÿ·3ýg"K›Bp‚YÏ oBîÙÞ*âF2Äu/ÊóÜU˜Y)6°;¼Igbnº]PÙv9S½4ׇÁóF—±Ëv£/Ã׳¾¦XVÀT—wºùѲ*ÈÂõK,MA”– µ.¶»ch•µÀ·õu(ç†è!U'S‚¨ÁÍ4«kȨªÈ5†[*7Ä-}9Źîz6Bo4^ Ù|9d5OÀ?#2*ÖX ©–Lîàù h›Ïó ¯ÕhÓ+Ó’I1ÊÃzÛ——Ï´Kð¡ÉC¶´Ã‚§“2Vƒ§jhÊE0#9òΑmÀiÛYB­]Ç“ajÔ;(Äø‹ pºtÙ& /|µ#£M·¿?&Zn_(Üé+Z.‹{ÂstàJ-x&îrª6ÔJùøE‡²¢cè•æŸr4S‹ V¢å{¥I]r¤±½OºQ¦¯ÕX [vâêÉ™EŠN Ê$ªÓ]v~N¢–®Öè"«áŤ.¶^ÿ´SOåY]l°å¹O¾{¢£Rœå_B®W¡?Ú¾ñi_”øEªLÚuë@é5Ù‡U¼ %Oük_*½U´Ù+ÏÛ :|KÛq;º¨ñ³‚ÒHÉÆ7}¥êèㆌ°ì ï«çÛj(Óˆ[GIß—Á®F¬ð:Ö¯ò5,äfÑoI*·QøwÂ_ñ¾f˜7ú`4“ o• ®$çÊ3UJñ³ÆZ&ê,’ˆxðu¨,ž‹¡ Î%ž|¯Ž^E9n.FËC”„5¶E—¶üœˆ3²¬D’ÖúoÚ¼º£4®(\DËeQºŽ-WäDÛo@\¸cE{I]w¤G(Øõ‡³ *ìܵ² Rx3·PaãšV«¢²-œ“Ÿ(`³M/m“eî]úû ârV>Øê:šÇÔ?¤¦Zvѳ&oû̼‡ÈjR“Ê´Y(½öÈ2–§m¿ã7«+ÿzéËe›O„ž}žàÀÝu½ÉÊ jÑÚòþQëÉmNžÂÓ* Â·(¢ jk ¦£ƒÐØW÷“Ý´>&‰h aU÷Ä3÷R$\ÚÅ>·g%;Q`"•…Y# .¸ùQZm‹ûO‘œÂÎ ŸD·Íöîå}·¦¡²Ø3ÙKÜP(Íß{\ÐõcÒ 1!ŸÐ¡Äfs”}» h‰ƒØVµâ2Koz®9ýÅüÜkK°{m#â ûbÆüëú$ÕBùKäyzb<ÍhÌ @ÞóBá|È%ú‚~ à¿ñ$YZä,Ç1VÍ¿©s>É%uX‹ûy½¡@àmn¸6¤$@{̬aˆC¶È,«­éqQÁ‘ȺÍl®sÃz—'“ ¿RArâÑ1ü‡ô^ꄱùÃôµm±¦62ªy¿i&€âˆÍ‹ÀÑ|ñmšÙÀ•ဨçI„ÍšåkÑìc”á†ÓÉDÅÊ£ÅâDF/)ßìCÞoÒGæØˆG’>–!ú²¿nœö~ÐziíþþY}CÿëÙ×Ïð¥è^#§#ƒF2™ïqvõ1Øv¦kٯ΢<*óô¦Yýº_лáÑŠ¾êÜüH{:2ya`Š"k§SÂ1ö®Ý‰÷X/÷dý;Ïî/fCö(´_™ƒ5]`öêhÏSÝ(Ù4!e÷ò¨j«©ÌŸ±^÷‡n|瘘Ð~ë${• »/£ðœ‹×ûé+@–!,ôTi³ÇC cÐj2šË ­‹ô4=öõ¼ˆã[š1<ÖÛn¥w áés#Ñ.$q¿¯Ë¾žèÒÐ'Iî‘–eO6á?i€¹ ævõ5Sm¾¼£Í„ þè0ànRÛˆ´Î¸]%ª&k²ÉÃD¨`ÙÄ*ç½RÑøS¯Ê&Êw™“ð×XQ@=|±ôÝ~̵J£ùµSü´¦Œ¶jÏ2u€f¬¾º°×ðï,ù£MâÞ&­•»…÷w‰¾T ôñgÉRøUß›ßÿÞÓU²#9ŽQÔ…¾“®©;1 ÒØF{hÙ¨Š@•ÏC:bšÍÔIt,2¬YÁëÂñ$X¦ºnÂé;æÝ“«áC‹à7ƪ‚o ºçˆLù¹ßOš›¯v´Æ×­¡éø‰ßxp>×°©rÎHÛ Ñµaœ¹À+“vÒ6ãÈñ´£íI2kÓ €¿l9®CáÑZ2Dnò\µÝtÌ÷üƒ8yïŒI™9)ùÞýs§r‰vï…Ç×ǯÌÑÁWž2SÊ~Þ—/o;ÙOÕKuòÑì‰^´}ôŒPœÜY9](3^ª„—qBι)Úá Õ!¤lÝŸs£MõCÝŠœl¡›¼Ÿ6d_Þ¼ ©=l#¥ûV=GE¬ò4B)b7Nòâà«z],ªPø°3âKÆÜ,.—¦ CGÁSmö«.¸‹mÑú­ÒµÑƒÅèÜ®SµX-“«ºv)±¾¶ʱ'ä±Áœûr×S[Çg0Ø’€j™àU^¿§ÚÉL7"Èá,áǬ±)ÝnZÝÆÆ¼ßÔÞªàØbPsâþ.:Ò¯dsÁ4 'ßÅ3(õh›ä§~…Ü ±UÓ)[t›ßÅp„‡7 ¸Ä/ðŸ<øg‘O3f†~i„\XZ¹/~m¬XýÈÖiþpÜ6ßÂÅÎÚŸW⊧2jS3Ò­,ßVß‹3\®¦iNGœúukD/vÑÃ6˜ 7+ñçÀÊ›å¡\—„Õô'!|:´ gzœ²ì¬m4ëUâQ®D0eèM¿‹®Yz_ߘ½NÑPîÍÃQ[ªo.m0u!-x2oË|]Ö‡k~?ÂˆŠÆA¥+Y‹œötœÑqxãÍLn$¤›yb‚Ú¹nß²4(E=4¢GžAæÚ‡šñ!†`XœšÛññδ‹×Ž_ÞÍo1ɺsõLIQÛÂ)ÞÆóÙ¿##xh[£mõáGõÙ½œð˜fÎ?¦å(ªô?Ymx{V~D›zqùÔïIÉ=05 `3MÙ ÊËðÉM³ÔîÖ«Ùb&©g E €#7t'ù\þõtÝå°¨ª1Tëyr*”—Ì!2µ~ÎÙýøf[B$¨óy¹çµÕ-Ó­X‰.m†ìµ3ž)·æI=Ñ;_Wbö(A*¤A=5¥6:·„æ»›6’édC»…ڄ㊜‘|H3­RÍX™ÝZŠ'¾ÔCé§X¬‰*],:¬/OÞÄ—ò2‰[îÖ2wór™«'ÀeCjG½q#+îÌg²æÐ "ä¢10\âßÊu ÿö¹|µq2žºAF[¼¦dBŒ?ÀÑØ?_ø0£äTlïÞhÃlzÿæTöAxèÝäŸ/^UãÝx ]¶·k¡)“ú ù­C’ æWjÃÞš.ˆ®1 ³¸Rìʼãd™“᜘ݚú°Éÿ*ËúþJWd¯¬b÷0å‡ýðf!^ÛÉ cÅ`×FW9<{çàõÈ·tÆW¹ØÍC< šÏi¬€uCsëá+ãv3îZ ¦³U~qr%9b«£Ò4ž\›ŸÎ[JL¢w\¯}æÂÞ&æn1ÍL…¼mª¼•ÉhÞkƒßú–ÍÂøšwŽšDh.õðÈùx?Q ·{Šü¼'sXáN£"a$løÑeæÁ}‚àÌ3];}q$+ºîNIW m>ª¢è…{gb*/V‹ÔbWe¬ºB¤pÝýöÆK˺¯ƒ{†,te‘60iÐݨÔò¨Í(-¯¡^ý;fÉìô2¿©¿À’—õrþŠ‚ä-f;–&´^Z ªœ%•«‹¡Ö6íbï7˜‹Ô~jZ¬ÎŸ âã,éÆÄš<²:ü-RÙ Þ^Ûyä_ëà!ýìb ®yåP”îŸrçí5нÖãH|GnÒK÷¢D‡Ý÷AˆÝÕ§R‚5¹sýÙÑEç]Í_®s l1 ±uÆëÒ6¤±ìƒ®¼0!»Rc%·—~Wý©[’mÓ¶v–EæZx³²‘Qÿ×FÒö¾¾âvŽæp2efRQñÚ,{Ó£ûÅâQ†÷Nb±x ·ë+uwtãë&¤”5§óM£;aâsø›Eº-Faó—‘,÷@š3mDûÓ CŽië[§ {ÕÜb¼úm‹ü­/É>IIô±j]®2 : Û(!´Wb—ƒY¾Cšz86ˆ§¨Õ|ꦋžÐðÏLÞÖ¹\ºßܦùPbYêv¶òõ.}B­vÊøö y!q.©ÎaëÏïõ8óRËúpžQ`´,04ðåòf˜Vá[›ìµÃÆëâ§æÅM¥ÎÝ©E6= j<ß–®xßgÕœWËßǃ0æwíÑúe[FaAËô T‡ÖÀOS7öÑî§÷\è©qDWQž­à}¹P6â>tõ¨ t©d§„6ºþë›m8Èÿð!ýÿÿ' عB!H ‚t!ý/Ðq3endstream endobj 74 0 obj << /Type /ObjStm /Length 3334 /Filter /FlateDecode /N 65 /First 571 >> stream xœí[[o7~ï¯àc‹ ápx›Š¾&N¢ØµÝ4Ù²<¶µµ-G’§¿~¿CGœ›$;N·X, f(ò<÷sx‰Õ,aÖ0¡-³–©D2›1­ð+g6ÍX–°,•,L$€ÉR&¤6,“’ O1aDÊ2Í„MЩ²D³Ì2‘'9Ë2–&‰bYޝ’,OX*ÃrÁ¤P ËS&•Ò,—Lj•³\1i”b¹Æ‹ä†I«Ür—1“˜Œå93ó à` ŠD0“‰ •”™\Phi‚Š)Â̪„` ³Zƒ†„ȶD\ °&¤šP€:¥… WRt*°G0ĸ (ËRðD€ä,§QÀ1!. "-µ[ z…yV1à”ç H¤´°Pø—æHR@ˆ”°’„mJ4Kpšt# ¥©fi è”É ÍBã°OÈ„f'¤%µ "Q¦´­&¥«Ú¡cÀH•fÒø—F¥™¡­ãx)3ª´¤Q©IøùgÆw'7sæ*‚„„R2NBV uÅ€¾¯Vº¬kÌ*ʺqjèë–¾ –µœI?÷/¿0~0ŒŽŠ9ûÕí]Æ‹û9;A—Cnc6*€žNñ­áí«b|q‰ŸÐ*¾u9œÒÀùù˜oñmþ’ïñ·|ÀùŸò3^ðs~ÁÇüO~ůù Ÿð[>å3>çwü3¿ç_ù_?1¾]ø%ž Zc÷jx1cÊsessr¼ž HèyªTÃäÀÏ쎯 ð Ž–wÃë4ì¿>ØùýÙÖàxŒä{óáÕx´qsèçB½€.ó£yqý„0þ¡$JŸ%Õ›ÃYáÄÒœiçf49ß\8aúEÇÓÙœXÙzˆ éøv>™’Sp@o‡ €ŽîNç_o Mü+ü§Tþûøl~9sâôP+Ôjœ ¤Y˜¬ÍÒ'R- Á‚]N9(ÎÆC/Sš„|ž…ó‚0†SÒQâzXÌ&wÓQ1#GêZ<†E5=øIÌœ‰9 H:¦%4ÅJ¥yG8Æ Kƒá|:v¨½Hà´C1®ŸÄr¤Ù¼®ÿ"„ µ&_ö®÷¦ãLCú²Gú:–~°A9¶A÷³²ÁOw“yqvzÅo‰•WÅùÜצÌG“ëë!¿üz{YÜðÛb:žœñ¿Šé„On`¥ãÏ/>Ý ¯ø&ßwÆ;ªÌ÷²aÀŸ"þÂïcNI¹; X¡4Y¨`–$ ë•Më}ôî·×{dsÇMëM*ÃÍWnc’H5dS5¤jn&Z†›¬i¸ªÇp{œxŸãŽ-1˜ßƒlN­is™nÚ\&{mnrGŠÈø›ñÙ 3f~‘Ì#ûq”Eøoøí»KÕ\`¶œadÿ°úïbä'Ú»&¿ ªƒQæñŒ2}Œbgw£bÊ~Ü9:`//'³¹×-f_$ö§æõýú·0`Ш£¹˜Ñ1¦Ý;òãù×°ÀÎýüåÑ|8/ÚÚ¾ýáàë_Ÿ½*®>óñhXSïv\Ê›ê‚A±ž¦ÿÁR8g*ƒ5ØH rI„¤e¦æ Ú4T…`Ã×ÕË1«J±UÑ”Š"žÑbT,œ÷ºc1€~».ÁµA ä`ÑîÆ•pÕ¸²ž# sS{@‚Š‚>:Ä[!Š5b„«:á@pè§/­ß$˜äúÃ8Ôm9&)ÉiøoÌÄo-•zJ Ó„¿[3IE—E„•¶ä ð®ÚðÅ.Ä·!ƒs%Ì…þj-j7^Á2òe!™Tòilh’…P£Ò$>(S‚F!Äl S)˜ñôi,YA¬MM^˰~çn^í¾Í4=×2N¬ª¥³Ûá¨hEÿù— ?‡CóiÀl|ÏÝ`~3Fç?B>@¹ÀØÅÿ*ú×â>œ¶ßZh>:ò¾ØÅ:ºÈ’j=×gTïÆÚ¬æÃ—•Ê%`ŽPÿ<5Wæê¥[.)ÙЗ•u[Ƨ0žê!ÆQ¡W¹5!j>ÝŸ¤¤pT¿Í1îeÝvÐpŒyÇ8šSe´¥MÜWÃwÝœ%Oœóïñľ¡Œ8D}—%ûrf7:r%Ê2=C=&·";ÂÀ"ÝÔÔÀš :¾Ml‘¢å9j%[ô¢¨HDÍr- £Ò á}ãûMÿü!Ú¦5 ­ÝÕÞŠrËTߨ•¦qÒ‘ïnl½ÝÞèÎw;¼P¾Ì ÉÇz¡½à…ªŒ0«{¡ØB¾9[ëHµEÞj<ÿ'RíZšù_Hµ+æ”vQó:µSªƒûÑ’¡C1’—NJþÖàûü€ó÷§á ²:|¢=ˆi]´£Ë÷Þƒ<Í©~í,1:Õº“|³îI~{C·Œ=»˜ïz½ÑÇ”ž«w©Ÿˆ[ò¸Õ»çë»’«ùžéådÍ»5]^­Ñå.eœµÃùpR»°Ýí_Û}¶5ìm.¿[£Þ…?A(keq͹¢Cú ±*«KM˦ÈÚyYó>ýÛn×zÕ¯ç¬ÿ{©¢þUì=æ~çR§ë³Ñ?xú;íˆ+ëžýÓ#…[z¯&û,4‰-”¢»t[ÏN%â­7Tz²‘CžuCm¤½ß~¹±»ã:Ô=whÙÊ;´xŠ…q¦ç•qª¼mœ"i§kgcÌKòLý®ŠÙÌ_`ž§kñ9øsJ¯…RH9±ƒªs3krs°÷îíÆ°âè£îpzR/R±‚«õ©"¶Ú&[éαÅÖ´ÁV2ϵøšf1_ýUž¬®òèÍŽ»­+ïàEiϽµ{¼}ÐÊ”E=˜ª–zœ#X÷p¶Ë‡µ¾#]7÷þ¦þá×÷}Yõ¬ø èè04¹šÜð —hïð]þ’¿â¯ù÷fçïC~äRïüt:ýYÌ=÷£ìðX-(P{&°þ[¤ÇÞÍ%²#±­mЫwÛÇÿr>*ïqsHÿ—dmŠÈuÓ; е†=®ûħTü‡¨J©5Ñ{o¸X¯Ýë+’â°gó´Ë×5å¡«§WY‡‡³=>?/0ŽÀhFþâf2?+μö`k5cëJÚÄHéÓbÑ­Zn[í¢•©Œß91.:IC¾zû m'ÕXÐ?0ú&]º•0 JRýÕù?ëö‚–AÙ¬{èGox#X¤S¼²²=O} 5¢}¥ÐÕo)üÈÔÐkÛÄͶÀSÑÖ£”R»Ç“ô§Àí“åJDþ¯­˜Œd†uÑ‹Eá ‹EP˜Ã…³JË€ä—ê¥qÌ9梣0Ç|œ zê"ó¡Žé¼©´|“ÕDR'Ý3þ‘ÒÃí…>CÙ¢0Ëjq–Õ-=ènX?]Ëûçƒ ‚fü ã÷dü/çlWÙ‚v*üÑ]—J|Îܤ’î›8¸uЇެÁ ÷ˆÞ*r™†­[zFh\Y%ðhUt¡™zøÜ[•¥ó[úíÆzøð¥uêë9 ÐGOÃÃWÌð»¤C¥®–•s‡^¢;EÑþ¾z]šo……QÆ_mfh“ ’=uù¾])øAÙë:þyJ¯t=´Ã$W £6ÚPzF™Õ.Ûd¢guw,¼J.`þôrÂÒÕ¼§þ?ú7¹Ð˜otïü§V«˜IA~Ó…÷Ê)æM/øÍ‰êöA"Öÿ>ïÓŠ¼¡¥›ëQº¡ã–®ø½N_ee=¨®rº•x¡ºµÖ=MÒ¿,£®WXŸH¯Êé¿5œY5ºÓ=A!ÔFÊC±tÏEѪŔ±´f²C£ò<Ö¤¦^î®eÑ©Ÿ(üÑÎ!¢=y`0t$ߟu‡ö^%­¥@,Ý£dJ¸*úßNdš/WYz/eâšÿ|‹t/¸©žÒe«È§Ý‰J)ðÑã™ÔÝ%ûQŠ^0ê9ùá?ÃÃaendstream endobj 140 0 obj << /Filter /FlateDecode /Length 1348 >> stream xÚÍX[oE~÷¯Ø7Ö¢;sæŠ„Š¢ˆ !µE2‰ãâ8Ľ@=çÌe=‹§M"¬¦ªõîž9sîß7 ¤?è@)á¬×QV@Û®güJYÙ¡ðJÛîfÙÏ /ÆC½äÉÉìñ±ï‚mwr^)-Ð)ßœ=︚CAÿ_çëâ2ÿx¿œ¿[ °‹Ü$yYß?-×…÷ÚL<¿h(â‚¶¡«¤¾¦hZ[…©¶2ã¼­2"¢ûTcN8”i ´ŒÇs¯ûÍMË~'¼Å’Ú×­Ô0Å?îÀ+®]ì¡¥Ì ã]±*Ûÿ_\ºŒwùÝÉþ~ŠJÙ"fg±¢g„×J‘c$ŒÙ1r{ æí¿Ý¬¯IôMË]û½ÚÝr³-W‹2SÞrÇåW«îÁ³üf™›1:ºØ6•½›£ãBÿg^ÅêWé%?^n¯c˧_6Â$ Ni»AQÖ=¤’}ß,!«´Jè?Mê5Öt”ué:G±u`¬Ùªj”B;£I¥S!õÀ7,6ûîdœðhÉ@KË´Q%u!Í÷¿fÏ_ÊáéL ZkºwŒÔÕëRcKʃËÙϳŸv˜1ªj3¦æ[k-¤£ŠKÆ´æu‚ hï?á'Iˆ3ÐÝ–„ý6Žã’+Øi g,%]…O;W|c®´E)sÎ?` J°bѢΒ4 5=Ní?š›zoh8Xa Ík$tÓF¢ûþa%£)™úheð ’gé× f¬úÌCídسØeo»îµTl¯=¢øY¦UC%÷ù¹ÔCAHé¸ÎÇLD³ ³ËÏë4ûœ–&ÊažæÇ#ûK¸ª«4Z·üz“¡h½a±byYÔ^?ARhުŒ@uè*T~”,dlІ„9}=}{ O¸‘’}ðvP "Ru›g[6ë&®nØÞÎ%^WØwÄÔm# DS­¹uø; ƒÉ*t&PZ”rõSá©‚ú¯¿f$ÞN©õG=ÖÉötò,rðåu¾É¤oÛBnF9äêIŸòÙÚ5b£à 9J¼96rfè×M¾L,H w¢îÞŠúŽ*ٳ먅4%cä¾S=( ª;ûkc'Ií‡Ôûé”öh³öo ¶F›Å·F‡.¦àާ)8ôyñplb@ÄþË6'{v®pÕDy´ï"†#E½om*][þ—DªÏÆ©ÔbËm™¯ëZjsÞ¬D+´†RÛŸöAš.±Y O9"Þ*{üúQ(kœ%høï÷BjÙ(6ÍûøÏùCÍš ë$±£ÒªÄ¿>_ÎÐQô€xòÝ?ÉÕKö>Éѹ‹˜ñEÔäœÊèV8ÙêMá2©‚NôZ«ô‰¡†ÈsÖ &7_ðé7‰~ŸW¶aùIòÂú˜Ê­67Õ—@îðuæÐÄCyú"ÓRÇ` Ñ¿€±!endstream endobj 141 0 obj << /Filter /FlateDecode /Length1 1136 /Length2 6300 /Length3 532 /Length 7051 >> stream xÚí—gXSk·®)J"‚´ ]Jé )Pz $@( „Ð{ïMÞé]Št©*½£€Ò#½wØYkïÓóíŸçüÚ×ÎÌyw¼Ïxæï¼r…¤Í+A˜CŸ"à(^> €‚ººŠ(sËÏOÄÆ¦€„‚Q0\Œ‚JÄÅEÏí‚ü¢Â1_"6€ÂÁ ³²F8¸þJÈÙC‘0 0 FYCí1`;€6ÂE¹óäììZíphA H(„H@Y æP+œø—%¸% úOâìð¯%(Ò c Àù·M.Æ$·s@ –D@ ¦ãåÿ‡­ÿêlg§¶ÿKþ¯Fý·e°=ÌÎýÿ$ ìœQP$@"áÿ™ª ýÇ›:s¶ÿÏUØf!·²ƒx„øø…þ‰ÃœžÂÜ  ea °Û9AÿŽCáÿt‚ißß>€ J¯ Ô•¹ÿìßk 0 ŽzéîðÿNþ›~3¦EH˜ÀŸŸ_“ˆ¹þugüµ”à n€‘H°;?FJPXà)€Á!P7Ô cÈG 0[˜Æx,H¢¿¦Š<lç` þ+üOD4‡¢~E@¤5âß,,Z ìígˆ`DœìÀNÖ¿#«¿Î/fÿЉ>U~“0¨ú›Ä@“?úM˜}Úÿ&Ì[´s²@ÂP¿-óc6@þ@Œèˆ1cùb*Ãþ@Œ Í( Úþ˜‡µÿ˜Bð?Sñbœ:þå?L `¤\ÿh3FÊíÄH¹ÿÿý,ÉË#Ü<ù¼‚Â˜Ñ ˆ `šÇïý¥Y8#‘P8êï—sÿÅ–0̆BÝ D3“ É ›ä!E>JyÃÅ·¹œèS ƒ¾¶צJòmÌKRŒ#ë‡^B÷ɃÉm`%w“¶TË€Ká´ä¼z=µÁÓä½$T\oÓ´ñw,6ŒLpú²{:Éì3Ã×f˜–ê¹wfYŽ\ZoW¨¡Dsi·"nMM^øØúü˜!N°¬$gT{ÕY_ˆûr¾©ŒÈCÌä÷Ý7J;¢¼ž¡íœµýN’*™­Ž;å"¾@!øn’E"£V¥¹0&ÓžÄW —nk(c³‘‚jŸåßÑlFƒ?ÜÂî®(ñ¬t%V°÷ÅÅ‚iÀtn2¸ú^|5Ê ‹+Ö©j6[·ž +U^­ú†¾5Ñ囹î£l{šÌ —`AaüŒ\= >÷}î}AåUþî¯ìüqqÉjøƒ*z^ÔMÁõÒùEÏ®¡=½ KÕ]¿#qœ:hÂÁqÕ{2n¬HãR‰K'Ú‡I…—ã=:[:ÑC™Úž„OZÃ&]Û¸¯u„«›BÔyb244?*+{böUÕòFMë>!®ÞÐüOÉØ,4¿<òR±ú=3Åô—†+•A âÄ{{Í\ß6—6{òz'xè¯ë¯ÐœYÝrˆ6¥Ä*,”*t­}O¤T¼VàH»’¢öJä ÏÓÚ¿ñ³žÖl¨Là»ë ]ªÓQé{à%nQ}\¯†£Æ³íŽä\ê¹¼fÉÁ4ý@ؽš^ºm¤¨Í=Ä!)ŸùHþI¦¹OÎõh0”Ô!»‡`4RI°sÂÖ!Bų–®‘á)³†xÞŒ¸5"·^ÿÜé'6ÿKñVªÓ„‰Ÿ8 ·9øççmî%Wf°ÅÞf§ë:VËrдP-xvép¬ßOÒ.ß>ù!jdAdòëyƒÞ¤AŒý•žôYaš|,K\Nx̆ÛaÝûÌ´Ñüû?DzÞUÆgI¹™ ‘–Ñ 8¯"_‘NZ¤( ·¾{~j8­ÎPË×ÛMÁ5&½¶.I'«æž–:ÐxÍ-o¬*ïtl¸ÝüÈŸ.“¹ÀI¦˜½‰c5$CøA¡lÝËf•òv Êa\ngU’“t4‹ôVõ¨ÇÃï ~î“cšÉ4¹`Ï™–Û<=²?ÿD‰óN¢1€Ñ#–«|3h¾Û«“'5¹O˹²îؾØr&‡VPÿ$§Æ ËÌ]hߊü´Å\1»Ä'‡v«ÿt×GõF-ÝÇ¿ù»v ­‹ÏB­pD:ÉO³n.?÷- b—ÃLC3îˉ^pý²*âZT*Òúž2;Dª«’>g\}Æjüɤ=‡>Fi[|NóU§TqæÁõ¹¯Â²ZEt¹I3Ÿ§<¸q¤ŸxÞ2¼½¶lÉÃþ TIo§ÿ $Ž:ó§ÑQzóñž+î]\¥–w÷kôð±z|ñÚ;÷Mk»åÞVu…¹1úïÒU„.ôò{M-g È›pL™*TZz2!®’~h%å¿ë¯¾x¶,úت]U„î韪„a6$A!Q>cÇð0!*…¤m%çqªËêÛŠÜP…ç«}žò[¾Y$òæ KéÃgp,d@™Ó¥ÎçîçmzæOTeQ ¿Ê(Ÿ!rß öÓT¥?Ò8v*Sx¿ )f™Üì‡Âzò &‚B#´;_ëDš¿H\{›P?¿gÔ¬ rÏÎR¨½à7ÉŠ¬„ê~aZ-6”®8–B,çéL­ÔPÔMâÍ+QÖV[é·Ç†·›4ðI‚ûÔ¯ÁÙc5×»kmõ¢'í,;¶ä­&Örʶ!* êè>+!Ï£V¦XZø/Žf$ÍëLFº ¼'É>Ø-C©Ø¥ù?Ÿòù÷Ë“yiJløq)¥â\ÇsÎêœ]bq,ýæ©4†yýÛ&òT®Ùç{¿ÂÄE^vT¶”ô‰Zåb3ú =¸¬È„ŸÏð…Ü–)<Ñ)tE¥ñª\ã|n­ ¢²I7|:T<¶OÓrâÚªk%¬{ñð ¡Oé¾ÿBŽ!z²Ë‹õ8`ÒF.Àšw¾ ¡‡¢ûò€d©s&w~ z­@ö -:cdgð=Ñàz è*ƒí+$¡" bÒ†³šãÚͼ½:û}…¸ý,‹ìË[ã’WÌB¾(ãÛÿ4¢iÕ$<œî‡î¬ éç]¹W¾Ô¿Úîz7HàðN2’¾Õ’=êÜέq¡0¼ÍÆÌåÀ½®¿oä3¸×Oå›ã8ùʟ²»çËäY áš]9kÂfèÚ”Ú¤{Šwg„öØÇù¦¦K÷“HÀ[†Iðö©…çÞï¢7\Hµ"Š}Yˆèÿº[xZ(!ý˜Ú“gšTüËhL‘åúòÆÑ ÙÙ¤¾¸o|:v„ö;\l¯ 6&Ù w4ÀöQ|*ÀÁéÔÖæ@„e,b/úR^ëƒ#:¯“ÌÒƒÞ–ª;JœŠ¢á–æ÷ãi§î „…†h¨,Ù#Ý«·Û±n¯šÂª°kLi©1?¢ò){$¼”]Í%%_6Ûéö…ö ½B¸éEžäš¬X>$Ч þÄPœPo`”Ýä*F8V`tpž;ž7âÊœ7R$Õö‰Þ4ÿþÚè=U ÷PÍ/áþí¯U†Ùú!r \¢Ç/_Žr8ëg¾ïŸgüù1-åÕ„w0Þ±Á,h4ËýâŠG¡ôõ¤@)ÉC‡ÃT°ÙÒêkib6Æ I j)òˆgæ¶Êk+%Z'ò|ª ŽÓŒ9„àŒ|aÚ›å44Éô¹äЏùÎóyËñY³Ñw•=u§kçÈZÖÑ\}úm›ª "*Ù±Õb]œ"Ñ™Yàòr­ŸãHš™«Ù‘Ä-™ÕÛøëÙ‡¦t‹¹µ¦%B¸K-ñ›†Ìáà-fÒ¢.HòwÏÖé#¾òö5.%—וú½þÇe¶²kD!"W.Ë%õ>xX³)Q—÷ýLß¹É6æØ°6WoEÊ>— ­uû<€ë®!Ó«“ªÄ`„Föß»‹ Nöî㟫áÞ¦0oƒkšÉn^©V4×`9ï3ÒþÔ»Ju4²=ìÊz´Š ¸MÊÔŠð—ØxÄtµŒVaizÛ’êÐ{ÛÛxkèÈ~|y0qc §žº«+X%dÝSÇËõ¶C!›þ¬à@@š^$›¸CÀ’•ËÙ7ˆ³M«˜à ¯£äŠõÊÓs°½,x¥\„w'½PÛDªû¨ƒ¸Åc‘†Ÿ’dÅ£ÔA]hÙ©é5oȧ¾ØÜwj˜ß ›ç] ÎÎñÝT5þ|+bt”WçÒê"HP’lıa³¼~ì²èkçø¶[ŽÇ)«À¼¤¸ ø¯éj à+#øU釸ÓOž“bñ³]{sÙdt×?Ø”Xeލ$Káwsâ¿•· J­ØŸÔGˆÔ{H™¨¾0d•ê+n3ì­–Åè¼Z‘ߨ„g|ßÁ§|¡óüñZÃ…>C¬MÌDIÙ®\ï=…Ž|Ū,5”‰!ÖGNÁOãÑósJ¼<èŠkêÔÊZ|×Õ¸1ÙÓF6YrtóÖ[h=ñÆ[”[ò•ÎØpw9°1 ªM(XóJåÛ»´~QjIJЏ÷±úôQ ZJPÐëWbÀÁ:¤nÚÖ¿ž.=¬Öwlqg,™ºÁI]¦ãKðI+ñ·VÎÛB&BÉôdƒmQù€Gô‰ÜÂûxö„ ºöœ²ŒÈ=Š_YÔªtx³áÚ&¬f vh2±h/ljŸµ0ÉÚ¡@§‘ò)~|¨¥[®äÅâÃôduYgù¦áJ¨bù È0ZÆBø)(a·¸ÕRqÑ $‡e´,N,o«Q‘0…/iÁ©h]žŠFœãߊrÞs¤'Ãç'aµàD´¼Œ°.½ÃÅ1zW†ƽ½lzÖ´GsÙ­\1•ß›î턜׿#drfŒ yòĹP¿ýp&o/ÂOîÇiV¥p›‰6O•!²K¥øvhŠ©äâYÖW—+›QG×8 ,ù¡öƒòú¢ûv“Ù&^×F„¶”½lá¢PoXÄÔeƒ‘´ÕÏ3ÉÜGoÞéÆ?ñ*q ÜQ²ŒÂˆ ØåÞf_jËð5(Ò–‹RºÉߜؼ•ý­Äù®ÆZ<.}>aŠ7Ó&ÌlˆCܦMÐ#||™³šÂ)ÙÆi+Vb¯r=˜nWÌèÞómvÙ$ˆ~kêU¹-8cÛV³:N`võåçÅ {­÷°¸-‘EDÚè˜pg ð0ïÑ/s]«èÂÆÖ±»;0ìfk蔋ŸL!Ù÷Ò ˆ–»4ccèl£jÖ‹yÒPü‘¯ºa*©–[©Qv–;°‡xîô;M€±wšFgèróTæÇ…µ2• HqùOC/5j¦†h½c¤ÙÕÈa<;%1AÁÏ'ë-§>¼FŽsô¸•‹Kñ[¡¸DäÁ†^½'Ï㎯ç;` ÒR8ÜŦ_£—Er[Fr;¤-aÑ|[©çÚ¡f¹êÙ¸™•Š‡Æ´Õn ±RV¡7©Ë6’VYuª¤fÔ¡Û|i›Œ[²žBJ©öÕ6/:&Ÿ_n –kF³óY> h/PΣ^å9”Ï[‡r‹Ù]O)ú_í¾¯€€},èá#v™O VýÄ+6žV³¬†÷OüoÖ˜äÄ—vgÓÜèñ”HKäºÎöø¤_0RìöÈ’ÛÜ+¾íé¶AÒή¯/Á&ú>qo#ì凒‹£Bà .9Û7­Wš'㪪LÖmx,9o“ µºó.Ä„¦„Y†úôŸÏO­JЛFW¿Fh9 <Ùèv†OH Œ…IÚ†uí1š?’Ò;i>j Ÿn‚IA!i ”2¤§rœƒýV&¸@Î8ŒæÚŠÂN#¯Õ¡¤N†5Ï;_aK´\«’i‡–‹y›>E›¹S4C4½m‹¥t uÄ6=–/Ôüùœç)÷§<¢È)“äÇi¥æV2 × ¡Ö—=Ó,DÃNr(,+®1óJ¨‡§‹ëÑ|_ù˜Rë¨e~÷%°·*ó‚Ô ö¾ VìºãßȳŸy+çÐä,Ó÷c 'l‘óòRî¼ïÍdçºE d^7Ý‘ú%qT!—åö±¹XçùP ._àÎsÔ¸ÏÀ&vk9â9vï¸ÇÔxñßðÇk÷e¸¡óR?µ¢—_CeAœxÃT³Ñ ›mqkóôGc}$ê™Ûã$$2wg·\k6o4ÙÎ ýxS­ÿ’"à×à—k§AHÐO¸†µþÜ÷ð.#òR´@ðÀÜ W —g+ e-ÌïD¼¸€îXZŒ!HÉ®Ø ¦>tî5’3À·*•c(Ñ{fЖU`j.Ý“)=vÝ^üt ·WÛÍ¿ÿd¹ÕA³Äâý--†Ê×(Â.Ù³ "{ 2­}ƒt’×ý²°eÉ7rª‰fŠ=XÍô”Ö¡œÝ!êÇ"ë7І„ Ã|Xhäsqab¯˜<ÈÚr‘g -û®ñÒËöŸ_oããÞ`)5ãN}{ÇËdfѳ险84µÂ?ÏŽ´ {äzt¤ ®¸-;ü9H{ é£Ï>{¦ÌXû“l®!øP¯ü¸N’·.x³÷žÝ3É!ý¾LÃæÖ©vÑÕÕì«FÀ݉`g:º|Öé¾})Ê1ž÷1’ Ò5°ºÙ“¶qSùª^4)ÛC+*w¸1Ùžá&@Ó1pØ©`¸Þóÿ?~ˆþWà„€…ŒD!ìÁH[¢ÿä3Eendstream endobj 142 0 obj << /Filter /FlateDecode /Length1 1022 /Length2 2639 /Length3 532 /Length 3305 >> stream xÚí“y<”kÇ“-#[¥“ýAÖl3Æ.eße)ŽÆÌà c†Y0dK¶²/'œdÍ8Y³†”¥’=…,ÇÞÉVÞIç¼§Óùó}ÿz?ïóüóÜ÷÷º®ûwý®ç¶´‘ÕFá]@<Ž$ •ƒªºæúvP*§ Ó%€ÓC@uª¦LÈX¦À ê0˜:\"èâ½(ŒšHêJ} R´=A‰Àæô¤Õ@"°€ ‰I9@‹¬¿dk|@” P$ pÝ08ˆüIÆ8W< òuEöúù€"M ùE¥@ÓˆÂã°ºBä-ð´Ã@š”ÿ†ªï‹±X „ç—ò>ýƒ#<1XÊxO/2 $æxHÀ}zü*ÎDaÈžßSc‹Ajãܰ  ðu C4Àø(K ‰\X"x°âPß‹ 9w AÞÆê²Ù«3ÌôZ"08ÒEŠ×¿Ë~‰>XCÿZÓü!`üš¿PZ íýóËé»ÃôqH< ƒs`JÊ‚@@P ´¿‡¶R ‡ýЦX^‡'ÑRš)€+žù2Q(  ÓD낈9è–†¿e@Þ AqXЕ¤ƒqû ¨ü¦û7¢úMŠ Æí¢ömÎߌvŽ ÿ‘Wø†è|KTi„Höô<¸$šü¿ˆê7…!za”¿T(À¿VôIÿèJAéßìoýs¶::x¿Y…©)Ó̆+ÁXàß"‘d­]ÒÁÕ¡™ûçÚC³ý@$äõ©qÃývUDQ~no1£4Ž[u‚EEóËÖ°áD:l~§©·ôD¹Ý΃Œã쳌³B¾{|Ä›õ׬z VB½Ò?Ïú\Íð¯å³[Ï0§Ìx‰| [äxT5°¹¤Dw¡¢«(Åþ~{æê“ÅKI½‹sÌc‡Z|¶ü|CYÅÎ {)2¼\\QØš3Í»( 9îË™’Ä0þ",È=òHÍ™Ñ5|=ùØNɟ?Ôt>)h¬ìñ™áÚÿEú ç3þ4T ¥÷ZI«èÆÕ¨`Á¾ñ‚¤à ˜}ħ_L>3=?—-¼!Ò8¡v¯ÚÂìÈúç´Iú†áÔ¦šÙ¬EI¥ñe\4ƒ…õB£½Ã3·gf̙Ʈ©×xÏþÝLßÁÈ‹|?ɰSb¨$x½sŠbR„‘AY£o’ ò ¨,5E~š¢?¶,ÛŠóÅW@¯{ëO@p-ÃUŠ¢ ›E ›™y0ÉòI Yj÷¢¡ÙÆ\Œa>Ã+uöQ¦¹Ð‡g¶7ÖC§ìùšûÊïÛKÞ Câ©)1(ÅÕ³FÅéE¢Ã,ç©–¤e## þw%­±1õñg+$Vhÿ’áJ'É[§jrûVš‘:r¯Í®÷mýì§0ö –_GÓü=Õª{ÙÂy1hÃ,Þp[¯WwÆgJX}?¥tz±:s¬m?Ñú4%ª´j’ ´ò¤•IO¹ú´ºœà-ÎMíYOP´×wµÓŒo§?ï~Zì$fò3{ Ѻž·û˜ 2þª®íÊòrϾ¿aafØ,ë²øÉ–L®©3Ûûe‹Él“ úH4ËéáÁj­U j°þík…TâÀ¸þkn?}ÄI3%»qq–§ì?õ=ˆàO²'Å¥ýî-`qšW5g4R¾M¹P½t)¦ìRC‡¦ŒÔ=¥ö];³è +‹Ý« ¥ÏO´'“=0WwepÐM/Ÿ»}’`ÆZ4×ê'ªÌÁ{ʽ¢3TÃG&[@ÙÌŠ(À¡sºIc¦L/³à+»aÐs¡N¯¬9±Ù´›®_„·û¨°:oœ´4ʆ‘~5ž ª¼èe”táô¡U± ´åé µ`rl®òöÒn~8ËQøõ„äÇJ£Àtç`ŽÛ¤ÃŒ@F-¢Ö~”Qs8ïHÆÕ›!ú×û+YAêDcS.‡ÓzÓÂLût ©ÿĨ½_ÇÚ.ø¹)6«YEFUÄÖ0Ú6mêpÌto!E•k®!'=¹Ô5.Wi`L0…»` ÆN%ù'6VÇÇ­‚Ê;å߉±~8ë ƒ"—N÷z>ïl3œ7tþn>‡„籉H&^I6‹áÇ…ƒM¢T=y§R-ˆ{+âlÊ&™"2Zƃ,O—”d*³UfJ3.eÔ÷nI+"ç^„ÅÛb/χ]|>(¡oóºyñSY¤†å‘îÓ¸aº4ƒ@á‡.É<þ…ò=ª}õ5î™Xv˜\3gËáXô¤Ý"Ì1Ÿn.„£¿ŽH·öŸgfóÀ ‰†C¡wl~Nåo‡Dv†}/åoïLŽ–9×\÷NïIkGµpIŠ'7Ys1Cý]3°wî?uK'Ó£M7e%âätدm®¬¶ G×:UïUnó»n»l¶Q0ݱR$ÇÁH„‹m«vì­ªCVVàÙŽéR—kñ>jÚØc_®å(JxûX/É¡CŸWüÇ›s--wWToF²0¸Ü#ë¹<(ÒÎÎïòµ?ïwZXÀlbo¨Y{´}¹\»ôä'ãÖ¹2îwM¯&Ь¸vÆ:7òy[‡ÅÒ£,|·rÏ“´M«.CÎʰJº€ïá Ý=ƒ¤…&«AžF›¤³]É¡Ü7鶉þÝ!ñsRÅ%Š—ù–=ÈtGs—©M`{·à˜èëö2·FÃK¯j¸2ó·vÂÛ"¤{3Ó:=V-ó-©½{ŽË¨ÍZr“*•ž»2‡­Âº*:0S˜O ySwûb‰MŸìàò¢ƒc^ë¹°”nÞ©¥pœ{ôz›³ÉÞôDhþú-òÑ ¾ÜùX,)‡_*Žáä¼o™]›Ý^Q^ݘTabÇ~èÈÆ…²ý¾å¬„ZÍy¡ãòýãÛ‘yÎ÷–Ê˲@G$Š‰ÝŒòÈH}ð!ñ¤vÓúXLgÕ¥˜À±î6•¥F–“ §EïQRß÷ Á鼞„ Ê]:÷^Ep¯<}©ƒ¨oZèÏÜà—‰7wIJÖ¼y*8¤/²Òuônë£k5·†[­~ûÍ\ÑzÈxƒ¤Ë] $©~•ß¶kžä§<­lѤÛ$!–)_PÖ¸>Ôý¡¯¹HE%ˆžw£#oQÅUwkø“ôü™Fëx¾›úÛ+«ïvHã®í¤¬ôd¼RZÀ¼øpêanœÝçàÅžy)zV6K!÷äͯ†ݳ‘—[š\¡·¡›ã xZÞ5¸’b0†ÑÄ,3©ØQ»î;^> stream xÚíSi<”}Û¦²=B„KöufdÍ2–Ù“]3ƒaÌ03blÙB)‰H–H²“,Ù’$û¾FB–,‘-[ey§îç¾ëíùø¾Ÿžßs]_®ã8Ïÿqÿólä.%&»ý‚—ÄD©õ·‚d6¦9ª9~Ÿ=ÂÍf]º«!^±ICsW¯Ì%ûUóXo$‘>zžÞ [b®}>¸¹,OiÒ?Ñ‘wÏ&«)m­yé‘©Ø9‹yš÷ vWËR­u“1—"¯—Šœ0gIòÊ‹’‹÷a¹wl¼',È-’¶Jrl÷Âûø·Ç´ñö[Uíµé{˜ÆõÅbÙ ¹¥+3ú Ï ͵b2b÷ 3ñöKƒLLEž¯¢ütøx¸[õŠ'4ïvŽEÃå{=÷!o¾xhÞ×ÑÏ4+Ûæ-ˆ UÒ Ýf‘XX9˜@êY¹}ãâÍ åÈk ±dGÏú–FlŠm‡¯r©Í²¦jˆzeêç7|³GƒÄúʉ,WZ)aSõ:lª»z™L‘٪Ÿ(—ÃóOïS\&†Qj¹»Æ&pöme¯O}µ^çoYc𯭠Ä<’n ?eÕJ§Ûÿz/DÎI?¹ü6³›ÆT¡;G>½/öÕvDc­èÊ7¡ó;u4†sLNn&aÍé‹g/ ±õUCbnw3èO)vµÒÖÌÄ…N»ùÝ:‘’°!Z—™ò4ïÝÒ~SíàRÚ㙬=éK¬2¯“aeuê!ÁéÍâr‘9:—˪Hýåuº÷$Ÿ )%Šèî°aÇÀPu6ÕnÄg’¥ÉlO9õe°'¬/ð°¿b"^¹Ññ,È5ÜÁ_=ÃŒje>ƒ¾ÖfÿÒC·å’ ^ÌUWYïSÛ£ØF¥º"«!ó-9æÂô:n}PÊP-¹R§B†‚,§¥wÊ$»bÌÁ}˳¦ùSë Q‘Ê.W"òBøì“ ‚还¨ [Þ€í2Óª kÜlðLYÄ8…fRA–FŽ9ÉìŽz´ hd×í¨¦à;é•ï#µ¾3~ZÊ}=-f­§2×Ül¸Ê¦OS èôt9û•Ü›né ½;#Þ6+?ŸÅwL¢·&æl×I\fë9£'ìSÓ¥'aœØ”~¬Æ¿NàçûÔ=?>ޏÛ­§®$×XŒÀ"XŠ::$G ?ÔäŠó–óZÝ·aÕò ëäÝ0œçˆ¸˜&ãMEÔ㢇NU]hÎÛ;þÙ^Sž`¸Þ´RIÌb¥ÑìùeÃÒ§¡‘ÇÒpŸ}õ]jÌà^—£À4›”ÀkÜ¥êÓ– ÔþÅÊ«¹ëâŒåfMñmg`ÖöÝ¡."Ôñ“ÏŽqÖ¹æ´?jhµÆ#‰Éì‡Bü\ÀíMžT£œ]6q-cü$þzBQaU‚B,kdA‹OµhFz„wB!þŸcüI´peÿ0äë1³é/Ø ê›Fð[ j Ó /Åö±Æ¹Ìw³'cÇšs^›˜Ȯ|K‹SùT7}üËdIû•^(ÝvΩ;Š,‘^Úq‰7>éP‰&ªsì’ÖoÔª'Ö³‰Ö;X-ûÒvž¬¾õ!¦¹&愤<¸S­˜ù¬ë²–;½Æ Ï]s{éÖÐ¥bOÑìàÑ¥Á¤I/Q!1ª°‡“‘”v\à@û£ÑvÑ`i]8ØÚùÁû›o^èdÖ•|¹ÍËPºl£3S«T7èc\Z¼WqN3C- B0 Õ1ÇýõÎy%Îî8Dö™ðÖÂ}1î©Þùm„„ÑVpòqmòŸw™"£zšrÉëù9±Ly1{Ú|<’¾S÷ ¸BO¹Ãù«ªKÇyÚ¯|ñ7 ¤(cé#c” Ľš(°È—Q£Z–4ÒŸ+ ø>ÌiLè=X|ýÒ“WÎSü‰þ¡ò“ñíÎÃ_ïëò„šž’ ©Õ¨fS©ãå+´ÕþºÝënÍa}'b/\?ÿXRV¹§[9qÒ GÈþ~¥ZÚð¶áµ ͨ;J²N5…>þ¥#(8ø¨ú;ÕîèWOü‡\Ô‹¸ƒT()ÙetSt$g¨Ä‹%Ï5Öùý÷§(žÝµ´JTÇÜ{®5v›;rÓ¢¯yk‡u7ñµKñõ#tW¡l:XÑ)Ç göîRwЏwëÅj)’4²ðÒ3†ô&á7X[{‚÷.«:ì·LJSDÇŠ œ$ÀÉN‘i4öX\¢«|©…T}aMΔN—Í…søûÊ~•N0üÑ\ëÁjs«a;î%kÊ‚~/|•Ê‘Ii¬b!n»­’#·‡åîZ±;fÿ}˸*:?x}F ¯ás“º¼1w‹Öb ‘PÈ—6•³Wd®Dõ\Ö7oMë>õ®»ŒãþÇ·BûM½LŒqœ3<¾¹^ï¯õ-š! ÍnåÊ=°ýÒgy,”‰PÂÎ>t‡ÉX³¿và6ïÑ춉mX%Q æØÍÔ­ iT_Y 9#ü¹#¸}°1k„ìeú0õºw;[Ç×´84 ½Y¯?èWCÐÅ€Ö˜iH“)>G·S³¬ÝgóÂ儃§ ½Õ2g¸«ò,?\Kžf“ýú\6ü¡Wd¢¡Ý9Õ.‹Œ;´ ùéÜÈ$šjcÒ‚4C|0ŽCmìf3523"øÅ÷¬®~rFw[Úo1Ôá• ¨Õ¦s¢N'±yd Yv-d6sqˆåPfÏŒQ+Ø,™«Ê‹Ò^]˜âÊ¿)êàÚb•ÌÑÓ`m/%Ù:8+Ùxë|kâ’Ùè\ <¥î»^Æ…z#¼ìöž<1¾dácW±rÛ@«ÝG_ΛqÔe"Aý ´›ô»Š,{v€ÑV ËiÈëo“üB¹âÎ…O·0ÒG¼_¤„ûžNYÄÐp†Yb¹ÜvÊ$8æØœ‘ÝÑæƒ<Ä”³­¬Mfm'&¿?³•0Ð6EsFÚo‚í~g±e ¹¡‡l‹H|}‚Yc憭Ê[}¡NVŽÜ“¯± bÑâ™ñÖ_œU$íO!><°óy‹±âp¡pO8wFòÖjé“,ßÅrÊúåôJ ‡£ëÛ0¹¥ÜKNÈݶ„K‰üåžawÃáz/Ýß&¿ _[€ OÈ*çó×ÕÏ%ª•[Ïm1uÜŒ¹E Ò%âœDÐÞ&?—Řî9ûQtCüÇ:ç˜·Úø¼\5o×-¨€¡ÒáΖ µ”vK™ÅÐÍJ›íp¦Õˆï~ù÷Tì9–J/imzOe:ÍOOHSm›h“¹KpsŠRš.ÜQ=)~¿Ë 1¤;æcší4Œ.î—_î  q»-‹+Œ®xOÙz×À½Oõ¸‚!#õŠdnÕäÌ4lªÈ.im3Iç£e¯£áðg£ ;¿§dšð¡WC„¶UÌîóÝF?HÅN#v<' À×”pï>ÛI×ã×ÿ¼ÔëÎÁÐÉú8ÍoK>ëŒöpò¨Kgpz¡¿Ê}˜u&qí³.ÂtÝ6UÃÉØ4&þN1'ÿ1m%úòPÐÚt£÷–*cåœ:«IêW Öªº¸cXŸibµ×Ç5AYð²ù¾.‹]£,jáÌðšÞ?mÅå’ÿ=CŒY*6QZôÉ,fYÔdréôùÃLš£M ¸dLÆêë|< ’ÚÊ £ÛM¥ÚXÕˆ>›—YÂ¥ £*øþò²å̆ÑòWÉ…³,ƒ‚Üßß µ=ößL*(ÒNy’M¯ÖÍ«Kê½nÛ f+Y÷Öâ„̧٬“ýeãJ+æâèåߌ¦ˆdz÷&Qç©û—Ô›t4Þjiaów0"ÝX ÐÖêÊSªfìÒ©|ºîi1/Wó|=?B˜>BÒî´ø·öktO»Ñž ó)&–¼÷¸jü t† ¬’›_¦fk™,žèÛÔÚ×ɤ’*\;(t,´h£Gß¾Ó6ÏíûFdG•.ÊqI\›+†±¢]Äñ2 É/Y>^õ¹ÏpÑb5Ù&½Uvyô<íZsÆešýZaÛs³ÌÅÆÒU˜‡ƒl9ý á‡n:‘Ok³Ò<ŒÖyx†ïÄŠDSêÕꄊÏé™ð~'«v Ñó—LàÍåëŸ "ò×%ªzN¨dAþè¿ÿ Ž'â<àxwÐÿ¿ó&endstream endobj 144 0 obj << /Filter /FlateDecode /Length1 1057 /Length2 2504 /Length3 532 /Length 3229 >> stream xÚíSy<”ûW¥$[È+Ùcf˜!¤É¾dWö0fÞ£Y4 3YR‡d'DŽBöTRGŽ$si$…ì•,iåNÎÒ=Ý?ïýë~îûþó>ßçûû>ß÷yžŸŠ’³›¶)–ZQÈtm¸Ü0wpó‚ø ¢¢bNÑt…l¦ƒFÜИ2ð À ŒtaFHˆ `N aQ ø : n®ñd˜’@*ƒ&hzHâi`ÐDÀ‚!€t–`J$®ßNÐWRCA¬° ñ2úÍ‘-G ~‡±Œ?S¡ •Æ3¨óLj<‹X ™È° u¤ðj<'ÿ S?Š[1ˆDG4é›üF—þ-&ˆ¬?RƒR ¤’¤z€¿›s±éǬ-M$`LÉx"hÃ:0=äï ÍŠÀ±Î:&À¡‰4pÉØ­ðÚ·aêbêéàá¦õÇ\7’Îh™~”°ïìþ=æu‰J`>0 Î#òÞ?¿ŽÿPÌ’Œ¡` d< ‹ÔÐT*šám/Báp€@Æ‚LdòCuÈ:ïÀkM$€£P!߯  $™Aû†np›‹ɼ‚Ø¿2º<*ƒH'„ðJÿ…ñÎA <ÉàÍó/”'‚ÿ¶¾¼óKèñ4x)aDGÿ EòD@"HâüŽ!(ÄoÜM úž1à±™G'ü‹6Ò‡“Bè,ø]ãU ¤¢1àß*ÂazÀ¿ïéw\Ÿ‡£©ÿlØÀï…àðoÞÐDôôï³73£0õu mC}Þ,àp}ÀÀù7"†A¥ò~`ãzñ6èÏGàm2A dKÁÇgמ«ˆ²,ê½&Ýö±ýÆÌ àϺ¨Ÿ ØH7ä%<½x¶pìnJ‹ô ŽŒÆ8K‚Z#›ì˜ä·|UâúÝ„‚RÏ´hd“ÎonÒ¬Ÿ.Š®i©ÍÚ¶œtÏÉ‹“3j¢ŽçÆ+ï\V>Ý.r;mæ]ôëês®ðNM× š{w – Žl’’fLõ).ÖÎf›`ÞtÙÛ:KÓ¬Ô8¸CÂËè÷ã¸PGËeÒ´¹z,Lûe‡SR“s^áüÞ8•wÜJ™+ Qrá!‚Òÿ8?—\¸á´ Ò¤`Ö:ÒÛæF[ÿª`/ß>®ôyw»–ãÅ]xØs½Í·'ëdO«®‚Ç­O î.‰úPŠ>‘w/SÁbþÔɲ“ÕcÒÉ‘:Gjs¼Þ7´s$á¨þAóÔùÛ»¶&è:0X”2•~õÑÅíµ.&ñ‰È^Žp9~’ì„¶ZÛšÜüêybkbáNHsgµbâtîf2˜^ƒ°žRK œ¤l61‹,ºéIÙ¹8¦gZVôªÌ›¦~sôS¬tQ6Ðãã¹¢9£ÌpÄ.sg¢úNªÐFÒž‚Ock[$[Š‘b±N¾=j*äQéÅ)‘Üö•“tv‡øªb+­Ã¹ì¿†xe«ËŽ=Ñf4_:yðLÚ³'ˆ­¿ñ×ïo±‹:iÙÇ1ho / ÖT…“ìßyY|­¸ú1]OV«ùuð|ûÖ®Ùo´ŒƒÚO4:>7ÒïÌþZ"3z‰_µ^Õ“EôQï{îÊ/xM«WHÌi VA…eí´¢‹ %/eX+¡2å4C}âS*µÃÅnöÞÎÎù _ûö†ÁÒuªU>EÏS=²ÌHÒ}(dÌT^Í+­óg_«U3ÝÚøÂZÛ·ßþDÁˆ£ÍŽHXK{†[vÝ¢êäQq>2ZR`#dÙ·É‘ÚÇ{™¡Ñp—­3ÿ.²YÕÕo#r¼{oüé0E\qb˦ýPû|Áʼnºþ<+bøݵÛéôsÖ‡/žH«) a<¯»c(¹ý–¦g|Ôò© 솼-~{Ηôú6¶ãN‘Ó¬;u¿³ÈPiE7ÿÒ1K…ŒÌöA± ë¤óäD»+ó˜g6E“I¡ÖuêOjd÷d˜Ö$Yz{ÇçOí|H ùc…_ƒ¶k6uZÍ÷ä¾OÙ'¹?¶Xbçõ2‚ÿ)Ä ¾ã%Qpè\¶;2/^ wЋÌàðºeqûsk F‘ßwSñ,WóP©urݤ€æ*C’A’ͪn¶c;¬ ¯u„ä])V[ýÛÜí4Ò÷Ä(ï–˜mù-tõéçá­6©y°t5_”âs&a¢êKÜJýÏ(¹·ü+UYÂÙëºv6šá„@ág?4™\*;.¯q¶·1Šð[÷åˆø"«©²dh%1ä©ÀXì¾âÀp‡\NÔŸ÷B<=Rùz›ÊÇ« 9¬1 ­.¡;ºæåj¤þz‰æ¼‹¬mðŽKOö†$<þ»SN‘ùzû—&Aî?íΑš“£2I¡– /W¦ºÎDkµÎ»Ç¤ì³Ö9ÓôÕÎ¥ï– láU÷ÌO9à‰O–xí—¼±ª#m¶Ã‘47ÁË牚—mN>l–ØT¼ãÞc>ÛڵϬØäº0I =@œÝ§pÚÄOÙÁ[a ô{yÌ`‹Éïúµ;Þ)Gv6c|y‡QÖM¥Gë…Ï:YæÉ{–.è• Ø;i&& ßgÜOªôy߸¤sè‘Ù1!··ËFCa“Õ(5d(wFÑɯ`GŠûY‘P±j¿}W¶TÍ{GŸâ~Èþ4·xƪKçý§µc‡M>n:73¶ýšèÑB0F Jš¢EgœQ£Ò¦ÆGåï\§uá ¨Ó ¡P»*–­YËñt6šXhŸÔÚ¼_ ‹6A}ºòt]„Å‹@˜®œù(ºÇ•Yñàì¸J…`ñNÍ«H}¨£ŸpÇq¡SÑŸkêO©ÎÕsž…¦ƒò•äûïåÞ»Þåè¯Д^Ù¼¨X‘šóÐsåáÜEÙùs7OÅ©Çû¦êL-dÒË:È>º˜0¼R°«#/îQ¤n»ùnœ‰y‚Dî)%`)Ò•ÀK>´L·‡ô ïºxnßA­Ñ£Ü›'|,=ùR9±VWX©¾É{Ï›§étMΉòë÷Yª2â cV¹ú«+tù§m 5‚¾ÍÏS~AÙßÖò»¤ì1^ùy5ko ›çzkfÑûœÉãTsjóÙ™Ô“Ñ ùG®óá~¹=’:œd-4s¸óuóz’VÆ$o¡ÞWº§ÍU¹êåÚ½Ñ&GËLûxë5½,¾ðŠ#Dë;5~u× çÎò“!°¦Âÿ–³Ô-·¡Â1uÑkbïPE‰x1fôÛ3!æ¶•ä‚––ÐçD‡¾NË=çWãåev*¨’ÖàRóœkQlð&»{éÚ–V1hâêž~¿·74 ß½w€Hzû·?yZ{xØC6`fG‰zEÌÇnoCñÌC{£,Þ[½ ”¸ªŒµl ½VO0³ŽMÌ(¤rõoNÔÜœ]ì” —X-ôÆ”§zmbÏ} —ßÊì¹ÙQÃUl›Õ=åòk±={,HÑ´o136ÚSÎø‡k`BÊ® ®”AT2ÓS)õâkŠoØÝS –Mo„$Á"Ï;˜tÔë³Q”ßÖ²§ºö¢ÏQ’wKwT³–âÀq´æŒQ_&Jx4l®D£Ò”Š-=äÄ¥í¥1N|´ sÛÛCàqµŸo˜¬2ØHÏæ;©,}Q¯j–9¿Ü…ô˜ß}C‘-ŽÁ¥—ÉA&’?ÕTô[çkÏÂ;Bdº7åBÛ¼ô„ù%êÏ ¸¹©õm,4‹ˆó«N(¨‚¹v~ñ¿Ýø®°âÕ}ŸòŸ^wâõ߹—ËLدØi›xþ¤îlŸ½RZ]$_Ô¶_ž^ìÏ\±ÈY²;ÜŸzjeÓǬí¥goêEO)õØà\ò÷µ ÍæJ´¹¢t„õCž²/ %"’’r²1K© ýO‡®ãcšbôkåл RÌ;«øµƒ5Ãh?[â(n^îø„+ƒÓ©1mÙVäc“rqúÊ ñ«ƒ5ºq馞qåÂc7ä‘ë çßÀœûT—ÌÕ½œ¸ôÉÖK…Û,váEVñ¡…/›—-Nàz TñîžÏ’w<Ò`‡Ÿ‹ÒyC?²m `éRïžÖ©”CÊÂ*s¾y†;î”=#Üz7q™ëU,”ìUɵoŽõTû8ü@üRåé%Ï^ß}~‘Jë‹ï½ð‰ÿzâ m£ó[-I©¹.!Øø@þ/ð?!€!‚h*BBSO@þ ˆš endstream endobj 145 0 obj << /Filter /FlateDecode /Length1 735 /Length2 1133 /Length3 532 /Length 1681 >> stream xÚíRkTSW†ÅQ*£b)õE ’ kVDQÀðF±…\ÂÅ›{ÃÍ &*‰„—Ö (”j©ŒÈø€a%Z_¥>xƒ.¨ ¢P"‹ð(Î\pœY£?g~Íšsþœ½÷w¾ýïl'*?Â#Â!†î dÜÿX&Ò=)NN\0†ˆ €,päbÀËloöZÅ àbR%‹“ À…K›1ŽÂa¡BD2$!9„ˆÀ„0D(éA€ð¹2 ’Ax$¢S@ÁBH„Ä0Jñ˜„&aómZ$—¾+¥A¸Œ¸"i)Q„¡ˆAIPŒì‘Jþ¢Þ'çÉ$T ™£Ÿ3郲@#Ê0‰TN@8‚‰ }½Õæ!t ",ä b<ߦ`V@">L“$"ƒæó*z_éÚ¼MÛ¶„Gop{û›ó5¾F‰H¥ôŸ¬sàùüWLzƒÃ Γîé ’@r¿;}ù^¯@Tˆ‰`” Àq’BÎù»AFE€¤`:Šä€t$HÂpÊÜg‚äy(ær”ßàï)v»“Lî^>$©—/ `ú€éÿÊqB‰ùé x'Á¤o¤€„”žNLè·/¥¸^]•XqÿŒ…«©¿ø¢&ôÏ×Ú®X©º M‘Ê¿nNuí«®.µ]4h1¸rçoö²|íž°VÞèÞTMIÇ›Á´„ÁÒ]—ìc ¥!ʧRG£jxqc}ûøKÓ-ûîTmýáÆñ_o—ó]"‡,QMšâÒêš¾ÛÇ`ÆòJ‘¨œìÚ5ÞÔp›’Ԫܵ9ú6E‡Ìõ÷T)9 Ütc˜V¾dºbAÙá7ñf¢Öèãæ‘£JëmÓš»Æ%5™_žÎ—mY§iJè[6©yîg[·£v¼ØAcõþÓ?¼LܨrUÝ °ß›½¡„uþ ÍìdZÚÓYmgoOœ8¦×§z›æÊÊ­¶ôtÙ4ö\ ;PòXè=)u-j^ל°sãL×u­¤*›yë´_kkŽ}‹ÿ€öÑ·¡§ÆÃy ?FÕë?Ó‰éA3»š¿ù8`ÏW¦`U0«Í«sxpHÒaV«–ŒëÄÄV™oNó'„ryøä±çš·,=÷ÈA?C¢-#ï,çÔ¥*kƒ*ßñs¾ø¢g’ÀuñY»†´¶òør±Gå“ðQQu ôZ5¥:]ý³©)sãa£ṉ÷˜/÷Aê¶;£‡ ù/ŸñK–„=xo{¡ <Ä<àlHt¥ M”1ϯ-[‘Síz"•f²òk!ægùdÌ£ý“îÌÅËý…_ø­\}ŽVz#Ñyæ8Úª¿1ù¸Õ®û~Íæñ–:câ{¼°¼ÌèûʹC}ùÜ~ó'ÖqërÁ”6fp˜ˆz0*wD7•d±=ê¹ÂA^Óu©éä욬¡;¹ÓÇ’óöGÙ\Y!jäo¯ž:QxëSËÀÇÚÍù®KiÀ îâWûn>-"šU.[OõG Þyë5‡ŽKœ²o±±õàŽ£&Ìm é.Zоbÿ>bdiætrûÂFÏyº.§÷Ú¦;†UmÊÈ,ãhôé¾0y{Oî±¹ÕnÞÚšÀ‰ŸŽ˜s8šJÍ`Ê ä›ÖŒÀ¢³VÙÕF×^÷sîÏ£;-*÷õæd-wøökö Ý›» Ã쳦~©·‰¿¨éôiþé…n{½sñE;óNËæG‹+¦Á¼ÇŽ¢ò±³â‡ÚÛné›–SCã‚V_îWºÜ®051ß¿cá/3·©tW³Fåeu:p_!k. 2òg ~~ÎW—ˆíê–ìÕV±îFü@Œ,Q§ä,^ìöŠËxBSž0T<+hàpµwåPœ¾ñÚÍsS!A‡_X¹Yíì-Ð×.u^¡ígyh¦Þ<³ù‘ÔyÓ:¶³öúŠG_87¯þ¤Ë–}Am­ŸŽ á]•ÕTÎfX>{õ;žeì. ò©ÀªRoâ‘Ú0y)að›Hi¤b|-I¤jέ7™˜‰ ÚàP!qëX ‡ñøÉþ©?6ÿí³ÛÃiÛ~\ýsH°ƒ³8GÖg~Üd=b”V]0U·~þ=­Ù½½Øjù¬[eÉ•×GO™1–uŸ½6rëæUô¤ZüÛHW¤Ÿ;|=·wÌË‚¥Ë^a°³TÕyþ‡‹ò‚ÿ ! p“ð”¿"Xendstream endobj 146 0 obj << /Filter /FlateDecode /Length1 805 /Length2 2171 /Length3 532 /Length 2757 >> stream xÚí’y<”íÇñJ%!d y"²YÌŒ­ÒزÎX¦„Иy03fŒe¡¬{”Hᔵ&¡)kY“ÐbiñЇ„3é}?Óûç9Ïyžîßuýîßý}®ûQÞcëÁh Ê„Àµàú€± à.a0AeecH`’iTÔàzzhàÀLWׇsM€1ÍÅ {y3Ucµï&ã 2ÈD°!0½A_n‘@hD2Èdi °ÿ¾Ã°ýAF H҄ÙÈ<@/2UúÉ‚êIt~”I~µA†? PÝÀT¸$•ÂH § KãžrYþX¿†›P(X‚ï÷øïƒú[›àK¦°þ4Ð|ý˜ °¡‘@õW«#øƒÍ$‘|íZ0 2Cõ¢€ŽÔ‚!ÔÉþfä`dKf½OÅܨƒTÒ¯$Üñmp@±fxKŒÆ‹ÝèÙÈT&žå°Ÿæ ÿ©¹#bƒ˜ ç¹ï_+×_Î2¥i$2Õ @ ÐÁ °aÜ( „Â2•`0ªE¥1¹[î`ÂOCðû­Âa(Jþ^ûS¢èÉ‘:Ôç§„Ã(õ§DÀ(kCþýûŒhÁ¡Ú:âòh#õ,ìß|Ĥ27þ0îÿÒždîÜA0$ >§ ΜÌâÄ”„›t]xûN,ºzíhQö׊@íøW‰U¼ýCfûÌ-OÂ9åCus0Ž}åoÃ}äÎè“R^²îÇó•Æ•†Ï¼šk|¿§|ô”Ã-fÇ?ô¤Òyô¶&ÈD\²zïdÃ2ÑÐLð2 7ËFì0ðÎIQr+æð\g+&ÍâUÔx»–ôZ¼|©÷)ìÍ…ô€kï–Ó^}ÓÍíH IQbÆ%_ž×¹`í¿ïx³¢Åà¢7-öKÆJ(Ç[Ü‘¢é&)õÎ-¶¢Ò¢“4¯°bkÞûLÀ<}¨=_a‹/[áØòª9u>°@Ú^Êæ3áR¡*ÕaAÖ°,S;<éåÓ” 5»§;ÂïYÙ©”7­æ¾®V+—Ë|¤PX´˜G¶½¨t!ªKT R(¤-«ão D=¦%ø-ŒœcüÞ.kH%ð±oš¿):a^w ûûsåæ…5Þ½%l‹T÷]²™=bǘháOrƬµ/S&‚…Øåss†7‰ÇMjÛ¦7ó9º`ª³‡$ß10©.͘úýiÙ:Ô¦ÎÞ.LLï áÜXb£ÞHô\+$uÔëV-íÍs(ðD~©×šžfÚG_Pé’,»~ûÁO©ZÓíÞEtÌ¿D Û9–ÕX ®ðw}hŒ“%Ì ß:Ç%ÉÜ;e¤^¸ØGž`é¶|Ž«¿º©¸Rë¥<¼ÕÏÍE82öô‰øc)Z£nyÍ;+ºÚ°òUÛþàÁµ_”žF. ÑÍêoöRÝËÀ ½ùj-9µ+uû¬1xÒk$¶Ebtþ^#=š‡}7„ÿì–§&³ôóƒ9ͺ¶¦ë|wOªò» ÉÙÝZ¶¦\¦ÖÕêf¸êê™xà‹“0w?KzÝ~ãlyY>9ÿ̰}W¯vw+äÿD^$wlæÄk©Ï,=ýG^rW]äd±ÐyÚñšþMĶŽkx¸Êm‚ûWèÐÂ"âµáêÕ}R³†ý…½ ñ¯–:çŽFÂìùãø ‡víÂ8«^h9‡ 2^–vN4VüÒŸ-S™Ý“ªfq)V8Õü[…±ײ?h¦–x³ šüM¯LèÜßNñDPRïj@è¯Ïµ67Jbáõ@ëê}ãû›µºh*”£Ñ3o›CÙëSÒp“¯ÑÑj±ÛVnLBз«›Ì‹U””,‹tÝJ3“ÒÆ„î‡ÓCqô²iVçí²RÑ£ÈËÅýõ3;VÓèu:•ƒ²)¸ \élwüº;?»8dbÿæÂ*úùë͸¥+í<°þ¬)öŽë‰ãBO —„¦Ž³„O)ɘ$ˆ_zÕ©…a‰‘p#tizDålÃäÎôù}r ²WÏít[ )h‰ïzY$±,Ã7‰z±:t“D¦¼ã¿Îa¯¶ûG¯ «fî„à†ÜŽŸv„Ø•xâÒcâ4 `Iò/S¯Gvc/{g¹€b‡ÓJ·lNé,ÌŠ7Ñ’·Üi™æjH‘Oý@³°ƒ6ˉâR¶ “>0bº#Tgò{ã(H×6ÞÎèg=˜á5aÇùÓïS­û·"17 FxÓ}Ø¢i&MßfG¢Õ­oî噲˜ûMîK.«úêÊhÌÄÜËŠ¸¢£x9õʯœ\AGgõO#äÄL& WpV. Ÿ–(w‹‰p¡„<÷ÊÃê£|¯ve3\Û‹Rzä#íÄ­òêf] ßx§4ôoת´)—ìÛ}P/bp‘üTxQ€Ç¦Êa=Ö'ý¹¼øÎúÅWÏ"ðÍ2š³>.ðs¼[˜(Õº6Åëq«t'•ÔdìpΛŒ«7D†½^´)|J¿Ï©—-™ºc¥}”ña,gE¦æ¤<Ò½=ÿεäLø’dÜ׃Þ×YÎŽÀnw–õí…ã’£ÂOÉJ_A°XâiÛ¢cC–ÏœµjRë —p>—!)@³šÍ=ð¨(é<±ëQµºŒùý9—åÓ­¼ë»6yáï¿¥tLlÍ·oÙo7ÒÆž^o2tî‰Ý‹;« ±róÝ…òdâõ›r®I^îò›X3æc¡î¦›ª@C휃Vyên^Ïï–ôˆ‰,?Ñ~ëÌy*ê§±ð> stream xÚíZY·~ׯØÇ^@Có>èÁ¬$F`ˆ€ 5Û{Ä;ÛʶdIþõ©*MöpW+DR"ÀVÓÃæQ¿¯Xœïž=úæ©? ,XiOžŸHå˜pÊXxÚBãÙó៧/žýðÍS!ªž;É„Ö?­ ‚úÇ~õ„f á„ÓûŸ¸á±‹ª§RÌ;­SŸwÇsXƵõëât'8çÓô9ÞžŠá¾x1®n’°Æ§<Π³¦ØYæ÷µ°®îÆ,ü;VV}B©VØvg…ª…íØDjcŠ,ßwd¶Ìy.Nv‚éÖÁnïOwÊ¿ïHe™5ÅDïºKš Oª¹~âB¬*6®\ âÍ¢;a]ÏÃÂ0+‚û.®æÙr ?wÌ@˜ÆtÔááŽW°°_½ãÓr¢±˜u<*™1Š:]/2]a*Á¸ñöKºà}r{ì¦eàÙKÿêØH2e­x@–e¾’0lmï˜Ó ?¨YMñÊOv€yÎÙ~³kšÄS’X‚ã½¼Ûñ$cíÒ«Ž¥˜Zµž'£ˆA¯{K ˜ÍàîòŒ[ƒíï§^˜[©áí ¸¦?ø.Áðb˜OwÒ û7ð¸Ä^óM|Kj‰áþÎàï*=_Ä^Óò:5F|šòèóžó­bF_}Nfq ³ D ê¶?-/£b(î%üÜd7Á†”´“"Þ‹²ä&Ié6ãïä™÷Ú4g£«ÎDH ˆ‚ªñ¯íò/2•óü›L„ ¿#˜ßÊZ¹Ž¥×F•@„ÚÂ÷—úMÇ©qw­Ì1ˆ0—òü»ü@²ol-2-˜ýJE`3Çö£¾ëE |¥Í—O݉ í¶ÜVrñ:!ãûU½Ë”0~—Ÿ£M™îçÑ ¼>Ì·¹¯.§$„]"\È£©júHäM‡¦ëÄÞÀ –óáÛ‹1#7«ày?R£&ôº]O^tX…Å9IÙ¬ŒÊnƒŽÏñØ÷">FöM^•ÕHƒç7´f”ãMZÿ6F‹DR¥^ÝpÀ³nƒüçý­®ð`ôe³1¥ôš]Þg½£ªŽ,«©ezŸïâüŽjyÀýT«˜,i“éé,9ØîœÂû²æmLR´þBÛ¤M´:Ú„x'7»{ ÒXÿø‚kíô¤·‡dÌÛO%Ÿ½ÑôS)E9Æ“í¾/Û« Szh2RðU݉žÇ²ÇRíaŽ/éü³'—ŽËf².|öúþôŽ—y'&JçJ€ŒUõça{`§SÞ)Jš„‹Öú¡ÚKûÞ;ï9·¡w=ÐI¢ $4@M¨·Qú·<ékÉ“Ú;8¶H´SöšhУ^ ü2ÿæî¹ûigNð·Š~«üÖUÝÅ«ÐøÝñDÁ1+¬ªãK ®ªU+έÉaŸ¥ÄM{Ùú¢Iã±_…Hƒc†·’ÂhïîÊN,“6da¾p¡W›ú¾KP•*[ÅιÓmS%ÁÃvRŽzm]¯¹d 㫘0¤_ç”ã]m¨­GFQôc{p.GÞ/eNÊkÔðÇ\+9ò?r©±ì›6"á9çêgíÀ”2,bðµ> P]ŠtsëNÁ•ã‘]Þ“ ;8O½ÇTd¶‹á&_AÔE‰f~ÎDp²wM¿) +£×ͧyµŒA«™þ‹-óœ¯lÂ{À—žr%;ó&L™ç¶3‰gŠË…ÞCÊïêþÐs› ĆR ôÕQÜe4wD"A¸è¸·—¹øÚKã°îdµ;²C[Sr¦\©¤‚³²¢9Üb!_Q½ÞQxÛFÒæ…_x,§Îùhdå¶óÝøƒ£ó T¡w›GÇØÃ¨hYf᜙slÌj[Êò$µâ«¯ŠS|›;ÊXÇ……Ê3±ñ{>ãsµ¨yg¾˜inÛÁvZ‚x ½ŒJµz@Rd¬…iSQàu.nÝî!=P÷”Q€¡¥,GÉñuœõ¸ NêœwÂ’s°¤Õ¦=°wœ1j%•¬ƒˆ‘{U_—bc>ЄLBãêÈX‘|øÅÔœ¹ Êm6(®sVxŒª301•Ò[*äü\­Ù)EÛÛÆý~~³¶—õmoR¾½Aù@%|§@Ôú&Gñ¶^#1òŽ]"°0bõ=?å0åruÌÅ(Xí:IUÎ… nŠºðxì xTF¢®ÛèÛO—t`î¨&ðÙo¸‘ß•¾›+xZ¦¸Þ2Á…kÝW*j/ó-Éôî‡ÆŽÙÖÔk—\_œ3ÿó0L°½U„Ä45¡"|^ÐñšbgÌNûul°“»æv(Öh”þD7Bqå{!ÚJH€ €Ê\椚ϮW;`YQsæpq‹xKjëß·iN÷ø±§ Y–ø2L®î, ¤‰MSôÀ…ÈRSûnНöéÚ ã/«“Ê‹Ô=ËužsÛbĪñÊ3Ž…yÐók=Öy·©`B7ÙJ$î8?DHñÙdÓøµ8[÷^ß縙&„<ây&3eO߬¢E‹ó!eÞóͶ`·Õ¾­Ÿ%Gí#3؆<áëÝÙ²–öM2/ÛøÀª {1ßVÔ—t)MóRÉ1FïMœ+U+ñ1“çE®4Oqnq'Õð-ŽK"Lc¦š> stream xÚí“{íŸÏþµ¯çœÎïýýü¾ßÏù~?mŒ«;žÎ‚6¾O&M¦“» H(mm& ²Ä€¿•%M²‰  ‡d*@¢™RȦT J` „‘0"t™zó"@ç0Äfñ'–8ä!9Ø,.à.`C 8’й\Àm~‡pE ¼äPd2ÀØb  †ø(â¼!{~ -bN¸ðsh/‹S€.bR@,r|n$ÀƒPDgR Dœü¦¾NnÎå:³xóéç›ô§0‹q#xÂp1Nó¿–z‹Þœ@Îû:j/fq!6Ìý#ζÇN>#ù;!yH»ÍÂCÉ&joókblFs¼òÆRñ9TõK6·ÍÌÉ5—{{Æ üÔ“¤ ®¥o>šgôØxí@›b÷ê0TÑwDc3×óº»Ê)ð óŒp2›ÃûОØÝ@½è•_!.lJ{ýlØ=:­’¸ó¼{ô —eô½A­Ñi#£m•ög3¦\¶U Ãdì%»*ÚÑþž„s9EL£ŒJÐ|åÑèÄÄ‘|«MÌèêõGÝ¢Íø%&#< q+jžÀ:„ºÌ 3sëíjôï*ý3Wªx“¯Ó§~ù¬‰r²b¹j3ŽP¢ ﲺ<»íœ¯|­]SÊ+“§‰…FÃuXúK¯‘†S«ÞL,7  géÅŠEÕnEvùi.²z{4},×þ;ìýÛÄ ¡¥º3½/žÈîrÄŒsé<æ #Ð|x\ÁEè¿"ÓM‰,5 ºˆsÇ"Ó¼-­Ñ-ziÁôäœÒ§3“ë/}(œ­Åq]/|î¹qÕâ hWÖ%¯«Áõá «§ßÝìéõxaçàÔÆì¾äŠyfä30z¿Ã~Řü¯ÚSy—Žt5f'Ø:OLÍ^óÝ¡"8tYʾ‡Oâ.”ÚÄ;D›Èí›­›»o”¼ômšîw¿¤2œ£´–Ú U§¬›ö+ìR_ßǬ¥†Zž¡ã$*«¿õÐJo÷Ð’RoHú¾ÎTsèúÃãoßhëu¥e&Ÿ“ŒÄíÜxºÌ×P‘ô?>¨ÿ'ø[$`sA,ðXp(ê7UWgˆendstream endobj 149 0 obj << /Filter /FlateDecode /Length 2951 >> stream xÚÕ]oãÆñý~…Ð' =1ûEî²AôMŠ1Ї\ Ð-«'Y.)ßÅ÷ë;»Ë]i%;@Ð^d“ÜáÌì|Ï,¸yóÍ{7k«¶QÍìæn&µ®lã̬©m¥t­g7ëŸçã¿rþ¿n€?ýþ,{¼ÄßaX,UÓÎwx=ŽoáÎÉù!åQ‹Å/7ýæ½” VWFÚz&÷³‡³¦j­va*çL=[ÊÊ´†YØéJËÆÍÊÔ·‹¥lšùg•¶Öy¢g:Gè,`b˜_ Œ5•0õŒ™”1wÊW›òôPdcÝü;9yEÉJh%g¶2VÑ+/‡MÔ²Q³¥ªLÃrù‚ÇK[µ­mS–lò¦ªœtÖïì;Y”Oí@ݪjs'B’…ô¢ö2!I“à1•²BåR%‚®B8¦WÐl]µõ+Ôª_R«{•Z]Q­gò¶•5 F]5²fXuŽÐ€[=[&PÞ{ŽàK÷ðÛÂoD£óýksüÓ±çÕÕ!¸c?>¢?Îp³¦÷ÑB–JÿÊÀ&%¬f#ëE§^!2Dÿ¼Xj­÷/µÇ¸õ¨(ìÜx>Žž'¤¸©ˆœ4xn¨I¢vƒ SÜèK£œ'xÝÎ?-ê† ªÃï¡>/”wøç9^;B¸ á }Zà{¼Š”¢ÈèÝ [ÏÙ-îšd°â7î<ªnßo$û,擳˜G¤±Ùôã‘Ã'Ó^bp©æïý®7OA‰øÜÌe…¬;ãã¬ò:£ý Îz4k&ˆBô¾óèÉ-ä\ ‚'žpkã11©§ JÏ䨳¨¯yD·Â=£|$“~ !(ݲßÑ–Ä…Ìã5¦¼rQº%Óòt(‰<2,jLз¬ Ü*$™Ìàà’{Œ¼<ñàØ nýf#©oØÜI×Ïýç Ý“Ì;x-Û4xGò<ìRAÂ[­[dæºÀ?ÑhÕd¬ùÖ n·xÙgV^¿óÈñ•U‚ƒµ´Ê\²Ä4Y‚bKÀ‡ã}p ²æ~̓»cI¿JÞ¾µó›Å‡Ñ´&3ÍÔÅÈ‹dOñ9\§pÁªè’Èd´¸æÉ€4,О·©|r(/[¬‚„¦ÌŸCèå=vÏÛFúO )’žCW`ÚÕKO†k `Ó>‚™ä&ú ÷ûµ*Ôü'IJÊÁjÈ*„-lx\ñ±yy<ì=½Ã?‰±mìK¼g ø|_u§†F¹ r„;m "l˜4TÊ&À™”ªžÿH* j,`ºhãšó Z7Ê—\ ›>ÞóÂzä'Ocúê Ø€ÅãpØñòê‚™‚šBH ¾P iÅeÌñD ¨}‡òÀhàÁ€÷G  ZÕ¢:n—ø¯#žwè×ðÁÚßêðˆ¨.£ ‰ %‚Ó–¶Ç-jJÛ:F ¼Þ2ûýð8ôGbÁÎêƒeÜ”‡N¸$‹úÂ=e<± ÈX”"‚ç8¨1pšÚH!iÒ=Ñ2»ü?y3úÚ§ÓŒ­Åõ`,ÓÈBìÒuîÌ)§3õùÚ'}墻PuÒ˜XÂÀå嘡›Ä¶Ao¼GðÍëqñ„°¸ˆœÒK‘ ùÿmrXfûŒ‘ü¿†¬õãÃÄilY LÑUˆKê­ø–q‚u¶äBž’Ëî¸[ǶbÆïë$ñ'.¬¬çäsªàpœ]°t¿Ç‚,Ž Çù¯0ÌaXOó#-¼.#ú Cj^ûᆘ¸C‹‹™y˜R^Jí§ÀvJ«°ÿXß*}ÙÑ ‡©ñáý¾cÝN‘!Ήf7’]`ai¥ÏA‚ˆdr"D%wá%Bõ1!Э!÷L›ÜøEJ8)…’‰o¨×‹m 6¤SæÖ•4ªÕÕÖZUÑTÊ€—²­T~ü³0ˆXªJãð£Ú+óÓ¶¿}Дâð‰ˆCæ'MÕi#Œ…¡ŽA:n6ñûl˦ÛZ*ˆõäï. œTSYQ´_J¬ +õkצVgæßu–4õ×%°Ô.Ø›‚`ÿU ¤*ÛØß¤™Dø§s­©d]ÛtÇrÞdÐhQµ-W³­´ìg?…øÇUT7ðuÌrBž—‡Z¨4¦`VârPApïât°†KYÜ>ÈÆP-¦ª Ö®MÒÄ/Â-ã|òÕ%à»Ë¶“ÉÅ^³˜†•×Ó0‚Ä”®ß•Ê@¨6&Ÿh.™‡1íɬöe7ûR°ÔÌWKÅ™TÐgÂ.×>O¡N;c–ç}¼zmzGÜ>Wãk}4€}wžR–Rו­µÍ5ù®8z¶NÈWÉ_¿Vþú¿*ζiO‡Y÷ÜiµÎº ¸½ÞÅåƒè+]æúà_ðÒ¥æ3Z@(ǘäZP€² :ÁŠ‚øî§îW ¦ùôïTáj{e©ò ·ÔUt!â‡*€0Ç‹7¾*ZÝV¢±õU“é‚R)ëÄù.aïã×Eù(ÁÀ±T¥ÂžO³y\íñ䣤¥l\%\c®Ä_©¹’•Éi ?÷¦ÅTp{5ß²‹ùWyºê§O6JD²/®ðÁº=j)WÌA9y o¡ê•ãÇx¿Íæz!ëA–sgÝb²_6VÍÿè<”L’™BÙ%Óq¦Ÿ-!Ízާ֢r´Ó1J`çÙ ßxñ´&+ Úx´Æœ€á‚‡ 4{ˆ÷7?•†šRj¡53­ 'òüÍÉ·øö³‹öX.Ô.Ÿ§øïv.> stream xÚÕZ[sܶ~÷¯Ðä¥Ü©—Á`3~°;ñ´yˆ3©&éLÝN©]JV#íjÄ•/yé_ï¹$¸WrÒÌ´ö’xpp.ß¹@¯ÎŸ}ù:œ5uã”;;¿<“Z×Þs欯•¶úì|û·js¿’Õ¾ï×ïWÖUí ¼]ÿ-ükñe¿ƒÿêÕÚ(U}·R¡êð£KœÁ‡Ûç7Ýj­\Côüx?U×øõÃ;˜êv«¿Ÿóåk)sÖ‚«}êLOo…²¼*ç?Ô¦1!.Éviñ׋ª¿%îop4ð(ÌÞp@üâ‚æ þ‚çûŸ­eck4<Ⱥ±VÒÞÊæ’ñ$«Žäص´ R¸Â¹ÈáøUmoø“‡Há:2ÄA²Ÿp>$vhíp.‰¬Òñ¸å¥Óé¹l•‘µ´.œ”­­ƒ”..!MÛê/‘ê~¥|õÿKÛÓ^t°]ä=;ñ}²œ®g*ã`°»(DE›l‰òÚxMïãs·éúžÈ'ñ|ÂIUõ›6j—²†×Šåz‹¬^$ÕÒÜþ’ ìï2¹£Üh4©³»Ÿ}²ÎTçïxjXß?_­­ô‰/›·˜½¬)j)TuðhjYÉ’³øZ(iâÂ?Ì))psŸæ*Ó ƒÏ¹ààu&”ù–ÁÔ 8S$y^`^ÖFëfêœH\ ÁÖtJÈ Kc\2 4´ž·p~" Y;£tÜd&3ç#ÔJYŸ–,“Ò|Ú»|l”L{MW*aüQÑÊÖM3p¸cÖ‡LÍèâ¤f x矪f¹Zk=ñ |}‚j|Lµ¢¶J«©j‘8¨VðÓi"ŠÖœ .p âTˆ«N‚ˆ}¼àX¤jµIƒµü°jÀfáûûkü‡ÁE{[ÝwÛ‡ÍáHÓëC{í®ø _.ðÿ–t{ƒ3³ó 3†ZÑО¯WÁTÉ<ºmŒk-èÏ xWB(xD KY¡D)d¦ÀßÐê\ôðÚkÞ ‘YõI6\¤«+Ø6íNvà}À˜À~ø.â,~KPxµ¿¢o˳™%9ŠK·k2ë#8Ê|@`¼¼¢ëyè!Cú1Pfß|È9Øsp£ Љ\»énÇ3¦E²D¦ÍÔzáuÈAÚ¾‹_DX˜ˆ+»~4ò©¨ çÍvAß#øÈ,+ï–tB‘øŽS3^ü©ËÂu’Ò–ç@ºé=e.ðí²Í8¸ ç nØ‘(^qŠqõ…£ŽÝ§XýžR´-SÈrœ#í3˜Í<Ìèx<fâù µg]3œm¡—âÄ×ðL¾TŠePNÙˆ.?—2á刨E* Ä$$D“tÕWŤ±€›N±ïÿ£@k­ji Í\Ë.K\5vÆ“žñ”øþ8§àja²¤ÌJ‚æÇdë|íMHí_…&a`i#ŸmÄÊCk$« ¨–§ÈvÑ4Ñ7q°ˆùsŽ1J*S[#ÃÔÍ0çÒZ‡¤©S£'âƒHqAŸ c\x _| ÿÞ¬€ò+ ¦ýÁßTÛn³Æì.ÑZ)J,¢?ÿÉ/ˆÅZ‹†\"9ó&sKšlñðwû8ƒž´ö¶0‘p|ƒ§U›Ð ž¹~ î)¢´Y™‚ókC”½tHÊÈ(Øê­ÔfÂþz 'PƒzzùíÄ *xÿËQY9g3œúb…ZAÈ’d$—% -ÿ,‹uºü¤8\_‚WpW0ÕZ[eNø­iš'xîg:€ù‹ÈÕ4S—ìÁˆqÆÑú¿ÆUÈ4^fJ°ÿµ’µ’Ê=} –¢öÂZto#½Ë=ðT޳ÉÉåÇ0µl€·lÑSÐõc‘×@îÿKöCÙ)Uý¾ ;³…íÙi@kíHvÆx7_ÛPýT’ ªá5AM˜¢ûã̈Nóy'±S9NèÌù8¿sEšXU*‚æqùO%å4B~®r5l ç*+j«êŸ%A,ñÆÿe¤â£ÿiMù’¦Žˆ«ê3‰DÖ ë“ê7ý' ’YHOžŸ ÚÕØN³'ŠZP’˜¤{²’Cnt¶Ö ¶–ÞãSí¼dnϳÄZ;3éÔ[ëâD,Êðq^`Q%ç8c¡ðÜaûÇB6ùý,¯ÕNsrþÒï¸C䙢cÇCýݤËG{p»NIX"™mKNb1yÉýPu(ι±ãµË«5ð£ UâNo*ÛISTg½EE!{hªz3ª¡¸ÕÓr Ï/M<õ}½Ê’–ÈÛ%ÿÂI¸s¾ :›§¦-VͽQêAÆ]dŠâ/$:9ÿÚ˜ÿw§E'( R(|ÍyÇÕ”oÁ¸”üÞk§Hqh—û¦z•Šdn2p%•D¿9.ëÙ±¬û»QHˆyÐÇK3ÛY*óï'©èÕ{ȼ1gP_Æv¼=Ö…r¹õCĸkÌ+7mDùF?7<ŸŸFÄšt{ÞÄà%ÿ>1•űǼ‰-Œ×öp.[.ùˆæzLFwYK –ñnÈå©` bWºÍQ÷ÀŽ>ã=C$Öò5´¢½üMœ8ûj)Ï.Øë']ÈëZpè?‚¼­ÑÆq+)+‘‚«~LÇG'0~ê»þéU%ì✠KB>m܈rTÚãG茋¦ŽƒÓÂzvZ/àŠA¨ ,ðÆ(¼OcûËBjàäC§ã—×°¾Á\ô~¸²$ï,• 2Yßȸùb¾iÿoŠª ¼ Q½·ÜOÖ2@FH- ºæ£÷±cMPßE½1ö—rvU› åo »[Üóê]‚Ñø6¬»Mº¡iT­ ¸˜:dÿtO±+v¥Ø‘2ïãÄR®ô´¶•‰m«ï»¼’8\¿Ç¾U¼Ë¸ÞÁIìC·ãËæS}ª…K e|vI!£êÁm“·Ò )Ÿ}à†/.ØMS%"±ÿ-9•›·ð5%k øÅ’<~ÇD%ds؃ºOðßÎb„i†Ïá¢ãæóL"j!b-díiM›Ô eJÞ!±$á<û&K’øý‚;‘‘ÆRÃÌI&ƒ â)—-Ë·[–C gSCµ=m™ÿ´àWÁº_íž¾¶ÒNZ×ChlL­¥WIJ±‹µÐ`7àøke ëßð:ŸqäQà8©¹s  |CÎÐØÄ\m½[d<Lšæ vN=ýŦF|Í{9[k¨`mãô´©ÑßÝ\<ñ­7¸öŸw¥v•®!s?ºh°¦R3 [ €Žç¾; ë«`C -Œ[¸îÏsnm-éq!Bþ&B,Ý­ÃÑFèLŒ©BWÅ ]ÔR¿ìÎü~Á@U@3O±ù À€Œ·ôÈA„¦u(Yƒ7Š¡'—ÚVLâ‚Å ylH°¨_A¬tï–]q½‹øG=øÅ¬f=®§lÊÓãkj%ú/8° Í$¾R»Þ4Óûb›þ4j?)˜1•ml!­c,¶ 1`@l¾WŒñAÙå`cÇÉúÅÄÜÍ+ ¾wÈÈ´£b(é¹êxõ¤ñ'pgOà4£ ¿ŸÉGaíQ•ê§cr›þ¤/^—¤þÇ<ëÉ;4—~Cl…ï+žÓ8Ý!Ð˧À幄Ø$-ĦµãÌ£#ПÍ<ûúüÙ†˜¦endstream endobj 151 0 obj << /Filter /FlateDecode /Length1 1161 /Length2 7231 /Length3 532 /Length 7970 >> stream xÚí—UT\Û¶®ƒ;„à! .A ×àî(œ*H¨‚ÂÝ ’$¸CàîÜÝÜÝå²Ö>g¯Ü}ï}ºíÎ9Æ7Fýÿ[}¶6'ý+u-6 0Ü" ‡9²ÙB)m 'ÈÎÉ)‰MO/…€˜9Bá0i3Gˆ((È…˜?!^>! 6=@ n熀Z½s0I1ÿİ…  f0€Š™ã;ˆíc 3€Ü qtcHØØ4ÿÚáЄ8@Î0;6C-æ+( ›ã/O 0K8€ÿ_Ó`'»ÿ^r† M˜þ¶É x4 †ÃlÜ`ˆ%6‡*üQ òèåÿ†­ÿL.ëdc£jfûWú¿+õ?ÖÍl¡6nÿ·µsr„ *p0ûÏP=ȿ̩@ÀP'Ûÿ\Up4³ZHÀ¬l 6 ;'Ͽ桲PWXêhñ`ifãù{ÿ§“ÇúýíƒC[MQ]Fõ¿ŽöïEu3(ÌQÛÍàü'úoþÃEB@] ÎÇ*ïÿý‡˜ ̆¬\¼|3ÂÌ û±‰‰à@a`ˆ+âú蘃w|Üx¬ŒÀŽÀþë\\Kè_s#?€CêpHÿ›ø¹ˆÀ¡ðñ8”ÿ¡Ç,*ÿ&G ÍÓc/p˜ýC æÿ& ''€ü>*BþÀ¿¼þ¬þ@^ô|Ôyÿ> Ùü‚Ûðñp8`à£.ü|Ôµûyˆ?ðQ×á|¬„ãøhÃé|´áür=êºþºn࣮ûßø?ûKRîêÁäã°qñrþeŠ È'èõ¿EZ8!˜ãß/ñc›þ7[B;q…X`ÏLÂ-„­ã«ƒ¿yËd 1;¼HÌ ìo ªLfßY~6ލÒ†PZC ŸÆí)s€-y—)ª%<»=´8‚¦ »ñˆ™3“´0,v ‘{Ò»ÚlSC7f¨WíjXfiÏ–œ›ÐJ•·€üY{a¨ß¦&ïN¼ßû ,Ïà$€i×â°Òë•c!º›K‘}×»rðløLN™aÒÑý EÛÂèûy¼DÁðt”‰DÑ[,NfWË•l`ò{ê™·i‚òe•¯Ø{'Õ§UñøóòìÛŸ¾´µF#âÿüj—O #mw¹·P»Ò…Ýè§ÿðxQ÷)GeLjv³"å$úîJ0Ш)å®ï9 ÊæØ&'ã€ÐõgœqËüÍŸøöà! ®ëqÇOã™}bâ}ÔÆV¬þ¤Ó÷œW‡ CÒ³ª~"l%Œ¸}tíXIŽ:M[Í3©¨²* î£üm˜×Á8„ì:ƒüþÙJm좿ÉtfJÒp#¤jÉv7}ò눃HéFÕøÛ“¯âÙ×wåî-8)ŠBc³/h¡fÞØ³gÚ4Ç,?UT®nG>äXÖÜX¾Dã”QÁ›ìêÄvpêê»Üuž°P‹ù´¶ ~,öº|ßÂÂËÄB†ŠÌ 3H™)œÆîàaÑ X}›Ç­yªý¢¬Y‘6¡Âm‹C‘2RÈP°HcïV'Ì ¡ Õ#ËbyÊ¿ª:î¥×Þ"Æù bkHðMŠè½"XLà –ñ´+*Óݹ" SiÉ©×Y1‚}/5‡ÔËš½°}À'™þTØ{ ì%U ÷/50?’ð'v-‡VML¿Pec•Â]¢¾¦íÿnk¨2Ķ{Ï…ýÝð“ãk8s,—öµölÉ:þ¸Ë/Š†Ñˆ¤*ç²mL?û»'8¦SÂ"zÚ ;½ÍN”^¬\2©‰f5e3%Y›/ષø?÷Colö´~[}rà¶`;|Wrxïu£'(<ûU0÷k{Ü¥)×7_¬í:¯MaáUÛãР„c›ÜX £Kœ·Úø‹ÓÉU"´¤·×:°Î(×*OÒ<‹¡Þ¦]F¨tã6½^è*ÕÀʶÔþ\¬Ñ)ûÂ`‰&Ë˶iFD™Q.h«ŸÁûÒ½öÄîX˜“€FoÈ/f¾›‹œ€'Ìá„Fþ†˜¢¥DÂå _žßÍœ¨ãø~8`yÛ©Vóÿ÷ÞÛ’ZaD_$Ó)¢¸™-ÏYãˆÜ_Ífë ÷%׸µëfÏ|mÙ†¤„°Ù’æ“I¿Ð^ò¶‘_ ËÎúIÇ%_ y{N%ç®GÉ£«òtË«¤}:”æ4llز»ubòkš+Xí/>a'n5V…×3V®„^ ß›$ÈÙ;OvB7 /™ž§ÍKÍÑd­´#”OE6=Éëh,‚–ó5jÍGSS4ó}ë«_â¥jE ^žs®œW¤T™b4{vúë×½`êÅ1u’=QŒyñ ÊÞ(Ï–ÛæØ¡¦%Rku¢ÒëkL¶;s1Åþ£Áí&óº)êm嘯óÏ2g”zEFœ³ø±%yº—4ï>Í2"ÕVßËP:£ã#Š•õ¡^G:E¿?“¾R{êÔÅÕÀ¥:$7¬\TT¤î£p÷YbþÄ:Ý;hn­‘\^‚ø‰üïþÜæ¸.ã6ø¤©ÆÏuojÿþ}~µkd';ÿtÞs}u·*˜-øÛÜ4ŠÈù.è?až•±=ÿ5w|ɬÕ<–ëMþ¸Ú`ãªWm¡ž$±o¯Œ”#`b9zÈõÝ/A¡Ñ­!TÇäüýô,——~„ð}¯É(þêªmçî×ǯ1ô(€°ùé×*’;o"¡—™¸Ô´ »ƒÑ~çÜœ:ýÑ~$²T$$Ýun5¹£Aå¥qËîŽÊÕ– äûmßÏíÛÏ lR¯DOBüj]Å.xnU#)6;ÍÜ(ðpVÞ\³Ârº&q]ßÒ~žÍ«I[Vc™|¸%l|µL=e&-ê­œvCÿâRùµoF®­Ï¨˜ eˆ<²³¨p9ß™RŽŽ«ƒ¢OÇßÿk×Òîµw!ê £»¬‡nÒ¥‘ü™5ÖÊÖàŒVîqøŠrðT)ÙõÒU¿?Ž˜æüDÀk÷Ÿ¤êá½=ïNáÆê(¿¯TkÙº‘£të—õÑ~à¾zUÚi‹Ãvdk3,õÚB‡ó€ºŸ`t¼hbDUY Z”E‡f…™x¼òõ¸mi’¶Ä›ýûI¾tã½ZîÕ„ùKý² c€YÇဃ8)EŽÃ•v}q®*F#+Üã-Tù7;Nèˆ+I%e¿L`¢xà,½;­üÙ$–ìþs‡[«÷ý˜•‹¦Ú›Yg´yŽ ^ÒɽW|c–w٢О0Zûåtò“üÞþÈì%HýîGK†ÉùìE™äŠ‚×0*ãKí¨Åa#Œ×ú-mÂW2B¼ÄôJ$‰”3×ÛÓØß™?šÖËYÏÌ¢ Ö—ÁúÍdr‡[t._n:/°<ù*{슿Íû`TOw“!ܽ+U@-Üf7Ì­ä-•yþ*’ÚŽÂåoµÄSލöp)›*ðm=MO#q²+ “Gæ·ÄÍKÚº¨”â-Á7`Ñ‘üƒÁkמc J=ÚŠÄÒ™S)[ƒ³ …´.’³§ªI Ö|ÿ*n\th$S PåζV%¬oï­In¾¤¥Óé«,ïaÿÁ§¹sÑš¤ô|¥gI¯\èYi†’"kvæ‰Ëx¹[–… sòÁù"g)it‘ïË µ¾‡R#ºÑç´…ëæk2`=,»¡+’NtG³Õøó èÊʪO3ÊFEœ¸®Š¿}Ô™%[õ”c1«šl›®!Åó³Z©\#ý…B‘† ѳUÖ+§³=Èâºp¯õ3ºäªfÞÙ‰ã`±b]ªÕaùD¡Îä÷óß;lðß4¿2«¡•»~€®ËØ ô˜6ôiîÖgZ^AµÖ4k1Ô·taþÛõEtÌ:¿Ëx‘;¬Ä°øƒ7Õ…·†i9ˈ{à÷R¿-­Žr:Æ;üâW*<¿õ _ ãŠ{eÌ]•_Ut.s)?7š ^Þà ¡¡Äw+Ìÿjœz`§¯!ðØúHærü‘¿VE§.½$A8¶xr#Xdí¨}ý_ËXÞ‚›Þç\ú½zúø²R*Õé[éÒ!ÚI%ÅegW);²Ègˆ:WD…µ^ÜÆº]z*¼è µ°á¯…Eö&΀Öj˜Õ…Àï— þúr¢œ'-ìmö¹ZF˜‘VzM_%NÿËÝÂØp^®ÉÉZVuzª.ÿÐ÷bãµÖg á&Óý–‚×b>«Û˜– X´V. õ§u½Ý×Ää $'™Ût·(L) ¹Þ‘£CÁ>×+ÚÉÜÓµ+Ì7®#ôàB¨È’¤»Fë\j@ ÞGS÷—J&¹[æÖ˜‘«øwò¹ŠÝ²(ÝE:öAcôóçn oÃóë¤ÖˆÕoäª?™„ ÆóT&ó ó~F#}zÁÙõе84Éþ–D'!Äñç>ºx¡[Ëz+F_RøŽÈ )ÄK{Ý>}'áÿ ÿMq~ºžnW:TZóºy/p)]gLz:0ƒµ±Æòbc‚6ÄÇÓÙ{¹ïÈIêr=Å<šÃú),ñ­+(Äa‚Ò½9‚™~p^p¯îhhÎ1¨’Ê H¢f`{¾`€Ë PuÂ,Kõ "ŠNU£&µÿT‚Ÿò¥—}véHŒ×&˜€V\M>IÞ(ó…¡k¸U}‹Ùê%ê¡b \,™+•”å³µb¨.MOt”‰Gð—Áà´—FFxW ÊÊì6µîÌ óäx …ô‚Bǽ…Ñ1 ·Åiì,s«S¬Ø½¤¡R°xyz¸ÇÒÐ÷Vµ¤^`ü.†ñ6©í #ùµ€äeóœÁôÞ…ŠÁK ®dë _jô‡_û–—;Y÷EL•í¢kC¨×¦‹oÎíâ-ô!²]»Ÿú¤Z±®;MCتœ ¢£Ñ·xË•8vùBr<ŽÜë~È%¸ÕÞÉ  ¨j‡j~/î Ü›¾MÑvf•؉S°èsñy8ÄúÅæœO¹»­KlQÒD“˜?Ñý"§§Á}vYl€§}LY§i[<аžJcr ƒ¢Ú­*&]³ ˜é> žï¬"BøˆÔgr¸ >v0…²šmõí©kZÒKò'YNÿùÔÃr½U?üøãhïƒôîEs™­ý5õkbÍÑ™å˜!ÙºJ6)ÔgDdˆÈÏ#’s*S\­½lÊc>ÈÓGd;Õ‹ir!¿=|šðÏOáéÍxc¾<»HVPîd¶ß Ç4¶ë«€…û™De„|‡(Ø?•Ád‹vAåÆÜN/,‚j²ëûËáÂQ~œ—âQ„YRÇÕG°«ƒOÂìo¿¬Ïù|fëìåkUˆ»·yeVmÌ^onŸ$U¿·m{øÐ²R·&ñ!h«ƒtèLŸÞóÏÏ?×úé\NùwMQ o¿}m/tÀwN”¨\ûð^Ü‚–_ØÖÍšô tÅ7]_.ý·M|‰ÐÄL ë¡ù &:YÖ]ÎöüŠÚtôOã(õͬòG“R Ö+­ÈB£×ZÉŠ?'«}»MbºIxË?;p1K²%ÙR÷òw‘phBà\¾ü)jwÁ z¸‚66oÔÞaïiƬ³©DLfc§VúŸç¡7:jŠÎ$Ԡ怬aƒ²,°li@h)'ïÑFæ–ªÄqÄœ–8µ6¯6°l4ýúT›Í¡"LEj¢–õõ@åù-éöÆÂ¤^7ã@rÞý§¼ÙbWׯÜâ‰#ÝõÍb)á•t I”ŸgSôì*oW‰F*‰2ƒ¾›Œ>±yT^ÈËÈØ!Õd³FÜþJ÷É^ÏÖæ‡¯;Mv@y[üö”*1yÇ;vú¹ÍÖ"óldž@¾iV½Œª‰4i‘¯wÛùšd[¯M§0Q+©% ÆÞD$Šó¦¤ÀˆUãâMÅØœ›ÒVe§ ¾KT‹è ¸ï9v2'QVí@÷¨À¦ŒuÕ>ó“ç^< iÁ-´8‹v•˜ßW ,+¢H¡Ï©ÍÇY„9¢÷C«Ûûr*Ï“ì¡$©x9V¸¦O…AퟓíãÔiÞ ß†dÊ•¤%ùT£¤d2œ`JúV)]>/¢z-‹ýžÝ†‹ŒRË“—Ò÷º¬Æª”x{¬“ì厡·ó¥½G¨…§Š§—SÛ4 )EŒ°+¹ÚFD€¥5†1N`îÜðã0½1ïË¡>+~nì¹O6EÕ,Vüö@ø/Iªœ¯†pZxNš´±-?HSfˆ?Ë|–2ðýÙ¾o[û8ññhËDœàíb— y Ož»QìðøRÔf)N_5 Ú!cù-έ…ô åÞ$Øœ™Ð»Êo¤šTÔ^Ãm#±ëÙ æqz®Úm¯gÌϞ݋zG?DQøî3 Ú%úî#Z…=9¦‰Ã"ú:B"•T9Ò§qB]„º[uî¯+È› SfðÙÇJ„ÍÚVÑÆêô.ÑÑ€M‌A©>¢iF/åiu ´}zŒÐ8Žù,R3`΀Ö"ù•Ò6ÞãØ[ØðxÙ5ŠG(hÓêåïcyå¾Û£"-¥kk=ÁEôÞŒÏë ÛÛ~…ú£Lx®U˜¸TÝÑ˦xeO)†;w$§«›SˆÙÑ´÷›Òßöà ¦˜b$Ïý²âè¦Íh%¬7kÍó.¸šîmîºËa‚C!5Ó‹·í‘Õ&¯œT>Ð4 ¿ ¼FÛ£G"½#†O‹k|¬ÏZð[õ¼/+²¯V5†0xFÃqðäÆìÙÈ+"᩹÷º u”@ÆzM¾V]épK˹‘]ÙXlĶ÷ÑYŸJ¶œ½‹}oÖ’uÜölý hjjyz¯¬Û4PLµŽ´šÈ©p$+‘HÜ›º,žò:Ù°è¯>‘ß 2*£Ù œ ÿü–™ˆ0(—èQ¢Nƒz°² \šÐ¦P͈$¦ê!!}€†"ÎÐÖÙ”=·/>±çø! ÇìîF” 8él`™<Á¾˜ÿÍ;w m ¯ä”¦CL Þç7 ›ô¢Ohk…UVT^­_ö«È{-ÜÜ=ûv¥}RáüÒݽ¬¨p@ZcmýlgêµcnPDp›*Û´Œ£|ȯ_Å•K›!>ßP2îÓ“ ŒÖ#,4W<…!\[¨µ˜¨5EncYžÄNžæ>d-u‘k0Eä’'E ši¸eóZë4þ<©é°‡¯—é h4ƒÚK·#ó³›&ÑŽI‚Tr)ë°Á‚Ö× £¾‡;°ueÓ04œÓ‚5ûÇy R|p.ôU,†(ì°¢Ý&‚À@wf©/,*Çõ%áàfǾÎgâ@´´n¯ÿ[Œ^Ƕkö"Rñ“wzJ)?iÔ¸žš`êü2Û‹E¾T×­¹4f§Á`€¹*˜ ~™ ³cœ@ë2‡Öé>cè”6ämùŽŽGãP0{%¶¡‹ª²A`.¦}ÿê%5Q•O=ýâµì_®M2yÞ,Ÿ5h7d¯Y°æÁk¥ÈiÑ@´ÜÃÖ2žÁ^•«õ{·ÛÈf":åáù u<–SQ•›dÜù›£'cA¡dû9½£q®’²C©-sªç|œ6£'´KÌB‹SZÝUñ22 hz¹M<à#ÞåiÀ—¦0¨Ä^]Ûìüض{S.¿nN\Yä’”ä )¥ÝežPs¾0±óY¦új4Û‡4"›fŒ8´‹Ù2=â¬çJÞ Nµ(SAÉIO~frè$+}8L»@$‹Û Õ«u ’Whp¾¢®ã@q;Þ×g­AW-øPÚÔ<–aæ÷ø5o$Ï%\¶=&nÌìõyñ~FÃ]@áe:Ù!™¦Ó]Y–Ðêבt0/Kô—ÿÄvÉä‡Ñò#È~ÊÂÅ¥Š(Õ¦¼F×”*þ$Æ58†`žÁr²DÄÎÚdO"ëCÕô´Ì·5ö±+Ã)Ñ1|™v©F-Ì÷¼Æs’±»7ì°öÄ&íéòùyï¶vNÅZx”ÕÔ-(~ þ=²‰¯¸\ùxùiH*'(ê07¿Í…²<ð«Üs±}—Ë*GÆa¤_l8ó™Ÿ\¦R¤HVêxÜÔWÝ"‡[DàWˆ9PDNE=&ÝWßJ_.µ<íÐwV§nÛ î3èP×sÄPrn•A6¸^zJ­aÜen¸œ·HѓʙG¹ðÕ3=|^†©•ôö{‚M¿¼ÜAÞÛ+ÃüÞóøˆÅBF#Z%l0;,?72¼…—ŽŸN€¦±â…;Ù홀í½Ò°ùÊòާæ­—K׸?µ·xKYÜ–{{ù`ÍŒjß@ààƒÀo•„ñèM?c×£Þ݉2CI2Xé cEf]qf«Ö’&£w8¸x_LÞx£LT~]”a×¼gë+ÖVâ—Èá}£ÄxË_8î¾?X[zàÏ•$œ$Í[.q4­·”—7.R¸pw‹KRv …ÛåÄB£€nmâé.³Èr‘R·X#ʇªQÏ%à±^È <É׎ãx÷ȯL0# W ­fššRïõ’'êYãýŒSN ¸NÖÆ·A¿Œît>™XPÝ«0pÎ.®ýþvÔéwy¾þ¶³ýEüsÉ܇6$\S"Ï'ßw-·{ÛC9ä"d‘<ðåÑÎèô™Ïh  Ðq…¹–PÅ8ѼFŒH(”«Óxf¿ŒÎé®®e1c[!ÒîøþŠó”{¸4k"BïHöµ$=îÙÞGš9p¯:©ó/÷>š=é@¶¹–ßÖGWÓÇÞ#âu¬• ¸m“ð"•&ó&߇?Í A!³“U–Êñòð,43ýŽÖ‰iœ¸rN“’ÜŒz¢7Û1‘ÁoŠ.JɬY2DX½!jœN¿|¯¸ÛÀøÌxìwÄó÷§ºòýÕcaË`¡!—·dŸ¿Ægì5ûžò%‹ok¹;wríi¤yp:­ùvùàSŽûÊ…ë‡Ã¸ƒ±“1ÑèÉObi*ñ$Á=­Y9‰ 4™Ááõ"àyúÌT¬IŸÑ”—v†§èßn¼ä4óÕRkd¼§ † $Ò*ò~2¤ °çs•ŽÚƒ¸­(±•ÅðYèG?s RéîË-‹lv"é’Õׄ²´žvaÑôÙûu‘‹†ß%# œöª©ZãÖþ]‘£"ÀÜŽð&Œ4Á]ÀKÒñ6·)îs‡=WÞ’M™Éü÷#-͹U™Ê­ŸZ®/{8½ð<ߤ¯¯Gй;ýÒŸ_­»·üåˆOÆ–yøºC”aœþþ¹ZòK§éL9%iPΦ`ðíîñ»Âú®’·Þû×êÓ«æ\N2•…“ØDzÜ‚žmE ªh= „¶zÃ=Tz±…»—F¡´/BíÝ/ä~:¸ÎptÓ?Ì2kÅ‰ëæ¶¹Ò™£¾û²úl…áT¹&NóHfIÇe$WÊÖÒ ½Hüz/eÜ”"LBä”PµÿJ(%4–[««@ãÇp@fÀÏña}.á¼dù–òñç¾ÐêBºå˜)Y6]L_¾§w«ÛH3êо pàìa–i¬kŠà`ÿF1­‹UÿüçÍè =$Ì€$Ÿa‹óÿðÂþÿ þŸH`a1C8ÂmÍï±ÿü/ÿ'endstream endobj 152 0 obj << /Filter /FlateDecode /Length 2234 >> stream xÚÍZÛn#Ç}×Wð‘‚=í¾_,à8ðqÃÉ*€ÛhiH)»QkÙûõ9ÕÝÓì¶H–½ÞE§/UÕU§NÕPÌ8þ‹™PŠ9ëõÌ*Î\fv¹>£[Êò™d^i;»ïgË3‘'teFWOùòâì³×~X°ÒÎ.–ÕÂÜ3ï”›]\}?s.æý;úsùpþãÅן½¶¦ž¤YÐFb|'Šs~‚§±õúôIhˆDCçRjN o¶Ût±ÄÅ{¼nñÂfb~Cwo馜ÿÀM^Ó†jQÇ×VåU¯ò¤-^´À}¾þ)¯\»ìIh%³Z‡¼v ;¡1Ìà"ë,Ÿ«3í~M–LêÞžK¥ZŸK;içý}Òv³Lï7Yèž4YT6ŽŸ¦í„¨Cæ|yÇ‹}‰gF*±Ó±eUż5ƒØ´« Úç×vqe™Ñò¨ èE8Ùi&¤Óîï²d×cÃõ‹K²jþ¶ˆÞ7Æa4c±ÿÑ0šd:1žc´›ìÇÙXñüȯ÷¼0÷*Û©³'„÷ÌiwĶRX楿նÛë*|HÈ·çÆÎ«|;šdwø°¸ÏC‡)«4¶¸n…ŽHÍ8~±î‹7Ï&p&”–EmiZŠNqémVv¬`:¡U4ÂÞÚ²àÕQó[à‘Sþ7›ÿ}F­íbM¶zwȬ-»Ç´VƒÙîÞÄ” á›ýTÙ‡]äüw™ œF-b>xâ`æ-“Ú‰—SÑ4=È*%Z4 QTY z•×Ò(ì.]ò–SKδö[æTH!ð ;ûê¢ÒZJab¦%>¡¬øßÙ÷?òÙ*’¯Ï8Ã\3{¤ò(˜^ŸI„5'޾xwöæìŸ»R¥,ÙÕkÆRe,^ÙšÜW“%ìŒi¡±â íé?:„ˆ€îØ!ìGq‰MøEžCS*ü±°â°²lVÊ9ÿ µcVpZ©QÍ R¡ò[@û¿ÍM½ßê,³Èæutã@¿}ýd5F1Zx¬_ÀÌà…Jk8‹OG–É™§‘e°ŠÁF»1¤YŒG²²·ŠT÷š«Qj¯5‚ý,Uó]5îϧR†4ã™»:‰\/µËSJ(ŠÁOx™BXf8ÿ×â6WE›5q¿_oÇ­ˆ‘Ÿ{ *,ç®|ŽIYz>Ù‚dú4øÙ7íR(›\.¶}Õ½¸ò6³ÅÕ@×1ã!s_`—VvR«l×…wÊêHñóÊ*e—méàÆ®Ä¥k3¼W’cl‘¸q+^Ï©ª“X¢­‡4B·½Mü™0V°Í¦„³wfXkÕ"§‚õüÛMø· zªŸõÈJêtñk|hø 0Û 5Æ‘`¡ãN çk”Ÿ_\—fU<°ñê|+Q:C)Cš1¥CîÐÙ?D|¶‰G=ƒKn¶}UC 4ü½Ê½•XF?ø9vÆý¨ìÔ°‡”rLÒô)$msW~ëèhYö† âÎÄÓ"wƒ5Fwy¼y&Üš]¯a`Ý5ÂW ÷£ÜÖ"ÂEIФ{ˆ WÕP –ô!Œöl„,…4åÓO mÔÂið@¬*‘ˆ©û5©ÇøTé±o›XT #úkÄØé‰õ!_«6ç×þ2Õ9{ 4D!8ì/ ÓZ&ïú®‘ð:Mq«c[!w²OJ{øö@ÞkÛHY?0§7­½|ˆ}9Æ“OÙhA{!ŠÒ7±ôlÈÁsÏå™ýˆ—ïRóOÚ%,§Ÿ‚À$aQ<ÔÉ®ï ÍÑU9®¨ÜBýL4Ç+›Nþñz€³¾•”+d÷MSudLq< ½€ÇŽìó #Ô•ÿ!}Ø,ga$J9mŸ™å&;,âʹdzÖí°³zÙiÂÙ[n8Š·­£2¼$xJC`]-rƒvĬlrý]º3k?¤bi€rj’ŠŸJ´¿GšeÍ<›pÉä<ûïÔx(œ35M gèÿÌÅØM¦¶ Y/¶±8·Ñ q ¼ÏQôÀ!ËÝhü Mûø ¤G¨óap ÕÓ˜' s4!” >ÔÑ9®]QغÓk×ñ1‰ói *f•0T›ª"å(D5ŸP˜%ýó ÚÛÒ09†?/XÛhYZŽ…â–pߦ5 MÆ÷¿Ðw<=/Õb:ô ¶ ûl»¬œ_lÒŠ×E¬ü VGŒïÆuÉøZÉc\ÝÿUi¯—ñéãC.,Rmܰrüq~ÀÌ ®àUêî¾Ï¸~µÛ)>Šz‚Mú¡åÂ98Rýó™QOÙûe†äžr\¡ 5ã}né'³¬ŠÖ ?%Š ­ÕüËtL›íÃ~{zhiÇkxÀدûrˆC2ºÙ=Í=DÕä)‰w•W®ñʑՎÄÎ)$leAõA' JhºLù2Gü4endstream endobj 153 0 obj << /Filter /FlateDecode /Length1 1286 /Length2 6454 /Length3 532 /Length 7261 >> stream xÚí–gXÓËö¨©R¥#‚”Ð;$ôQJè 4é5B „PÒ›RiÒ;H‘*]P¤HoJ‘Þ;*›½Ï9[ïù¼÷Ó}n’y׬YóÎÌúå 'ë#=Á‡P„ ‚pE ‚„@2E-}}ˆ99‘0káªd‚É@ÒÒ"€‡nH€ˆ”Á|ˆ9Š74nï€ð(òþ•$ xèCÂm­]ZÖ(˜ ¦†­µ3@a ‡¡ÐB€‡ÎÎÝ¿fxta0¤ *D  p[Àfw%þËIÍÕüWêéöŸ!/Ò#àù[“€‘„"\Ñ(ÌŽXXY †qù¿¡õßÅ!žÎÎÚÖ.•ÿû¤þǸµ Üýï „‹›' †h! 0¤ë§>ýKN …{ºü÷¨ÊÚnûÐÕÞþ+÷€À}`ÐGp”­…ô„ý†¹BÿÛsrêi¨«ñÿûRÿ|d wEé£Ýþ©úWöß ú͘ãAÂ}¦@Ìù‚0‰˜÷¾™ÿ×bÊ®¶(ÜÕ ".°F"­ÑĘöÁ8À€»Ba>˜FXXÈÂL`ÎÄ`‡@ÿu£¢bawO µqþkäï  ìf„¹:ÃìP¿£ Gÿu©ÿ„1l..Ö¿#âa´›ÌõwH3s÷èï@؆DüH„®°X\ l÷úÀÀÜ=­‹J`Ê*üC’˜:ÿ¦Ý„;IcV³ùM˜LÛÄìúþµÎ(‚ñø1Zö fû fïð?#áôb,œÿ@Œ†ËoÄ´ˆ°ëˆÑ@ü ·?£áþb4 FÃãÄœêÄXyþ+¯?cåýE0V>ãÿìr„Ÿ @PDT ) Hþÿ[š­'Ó1¨¿A0OÊØŽy¬`0˜-ñÌ$ÂV6Ì1¥>¢$@9o¸Ÿ[Á¾!^»¶c¬•$t:Û¹°OÃo¾Æè¼,šl ÅûòžGTËÓǃý`÷øÔ‰ë5/«µ4ߦ{FGiZèU7¶_¡ÛäïëÇOvıuFçûK’Œó{2>nç<âQÒ_'˜eÅê4õz×™&!iIs6ˆ ¯áeÕ¥Hu/y&9çM‘ôon(4À1’°‘ÿÛ!¢Å“ê<0;ñÚëÌöVtˆˆBÖtÃ#82M^jÎLÉþl.Æœ€U[Ø.Ìi“^Ùùº2z¼Ùuæý¯¯íËzÍÕh¬ñË%"¾AúõOš<ÃÒOv Ê ·uñ¶e”ʵ(œ¯­õ„j}XT•A/׿H_Àý«n6¶Oªð©ãjrv ®^‘lãà[U{¥6ËͼcÐ8œ¢ \Èq-†»LIÆŸ‚ =\¾âÜîí’#Õëߪþõ!Â/dØ„ðÀöPF\|çy÷hYšÕöçËd;ND^ìÒš]´gžÆí;ØX]ßD›Àd|Í%+ŽùzSþ\L'‹šÆ ^Ù.$fèï:L·FäPú§Ñþ¿i°¯‡Å+U ñv5RVæ¤/“ȹöff £ï&¢6ýÍÈbû^\^ÿçÛ*8C*:;dÓiÖ<$RØg;k®óá^47Ž™Ìøà½d¶ž‡·ÕéÇÌ BùâöKB&¢øVž÷Í0bi=ØUÉzP¶jé¸Ç˜}ìþ.+〳Øl憺 Ú­±ktº÷4€DËì›_1£EÍT<²4e¢ ¬ qoxû™¥ŒJH|iXë:8IF£±T÷Δ «'99ì@‘t¬ßQL@laEã~…”»’6Ž)éûÛ#Ô2{8 §»{Û7¢Þ¯ŽWo,ë;Ê9tÖœëÆõ«Ÿõ«\zoÍ$éõ#›êh¶3%¡·èÇŽéR_=/Õf2~÷²4%éõÚÀ˜6ó×'¦#™ªÎ¯ˆ…t|Πv*Ñ Âëk!-¹6ÌDÂß°T$aÇȾÚòymƒ[wÐÇP#°ËrÓ¦\¶p7[Ã\½CôÉ퇼÷—À&©àÏ̧OÅ_ßõˆÞTAíéᚃ4˜Ëúìbmâ2!s¬vå  ÝÎt»*Á´®Ü—'{ÜšÑÏÕçç"2µžyJ™ÄôŇ ÜÓùiØØFnÈ<*@ž@hªàŶ¼£ÞÒ~NÚøÉ\ì©®³–†¿^1¦aåÍ”€ðpU¡ˆ…SW’õªâCí%ðPÒ'Á4Æëßn, Ÿëz*N¹:;+øÖãb«×Éh’êžtxØ+ÅáP†R•@1·Ï5â9ÌCÌ^œ^T®»±Þã’H©¾È3bÅë˜Ñw1ô6¢ËÐW)Ðù­\õÒ^Ãí¹ÈgÄT>‚o&N©0·ûÖ)èúy®¬Ë.â¢Í@*Û.¥‹O2ƒr1ŽðEóÑaU"Á•QïE->Å¢+‘ö¢‡¢ñžûù5×Rï3ù²dcöO|O͘¥ïS"܉ÃÉSú{O#»ŒÞαä…És×…I|Ѫ17nk ÏE×¶W삉ˆÚÛ¤³fQÅ“Ôqò©ÓÙ7Gdö>½³©)Œì¬p–Xr-vg¼ Úͨ=àìÛ;îþô%¶ª­»ÌeyDdñu†s‹¾Jƒõ¦Ï>z²Çþ˜œ[¬Šw±ïn‰Õ64"Šä˜%yw Ñ V~á#âV\‡=ð¾,FìnâzY]»_ÍupuÇ­T*ò&kƒÏIRQîÍ+ºï¤„Ë ÜVןĥ <‹Å‚¬j¿§'§²¸NPù^¬w²Õ“õf(?¹i»ã¤úÁŠ;†]!ÉåÞ#QI‘ÁXSš©ïT€ß.ªùî@&d°ÖƒÀ·–¡y9£wÅ×Àÿ⌥ٔ´Z ëÑw"›b¡(¶¢0å=óg¨?QãÙúgÍ~Ra-¼9èèË?óÊ­ j*U2/6z1yЋ{ײĒÿ Duº¯¥Ø–Рó§]2ˆ#»}ý³ÜÜ‹L¡4â9±z Y1D,mziÛ‹Òᓲ½Ó36ÙŒù†±ñ¢_s([²?ôhÂ&m_¥EcSé1ãáŠÏÛbÞæäT­ÚI(U‡¿¯™öuãàŸ¦ ùñT«9mJõǾÆ3–ýøHÙ|áe¼µéáŒõˆ›ªE}ÆÂð>sá\ :Àëê±7{Ròi@=ûËÍäÞéÒép è‚sXªŒ¿Ý½ºhaÔt\ MØv…À> ¥f½K9‚ß xtÄlD]½ºîñãÚyø¡sÏÓøÛJLÔ oöé¨ò»Áß [q&zñ\ƒ†ÌŽvšðr²ŒÜ¤Œ?Ø”t8Ü/‡å5MT§æsªoîe=ó )oœt«·{ò£Òè+6ljðžÄ·¾Wüø<…*Í&€4eûpæ6|0½½Œü¬?½ü³GѧÊ@vwò{­@ÿá™4Ôã(@·Þ±‹ Þ‘ò`ÔLXy`äxSŽõÜçhböí¬çæúã,1U‚tDf­}{>(š!&³'* LÓ`Óðtð\lñ¤’ô:»øýîi@ÀûNw(YvËQEÝÈk ÔsÚ6*¦XÉ[×늟GöT%ê-üÎ7’=xŸ×ØV¿Á)ܪf¹¢×÷áDj¿@~_"8­òÇcÐ9Xåàe‰®´O ¸Í%  ùoð…èúbm´SaçÁCŠñ/îÝΧüíV¼à~¢ZB–’cyÃ|d¾?â£t¢RÇô¾Ûÿ;‰Ä¦Hè½ Hø'0‹FcËŒ5ãZÙõ”‹ï½p(ö1õm|Ë%üìMRÌI¯8» †¶ZW][™Ft šTv”edÄÓ²ë”_«óR„Ò,`…¯ºvüå»ìã}§s‰À eGèAk†æ5~IºQuf3ß‹H2í¬¯À¹¥äɳáiÜØÛ ãó‚ÂW ¬¯è`/Úyö-î™ôÍæg&>—©_Ž ‡±^×d…} R磋íP**‘Q:••._9ÃW{á pý¯Y:fà<‹€Ä㯰Œéá;%Š“ÉéíuÆ£@µ-°!¤jײå„$;€ Íî!Ïpß!H~_u,…´Vøi¦ÿM} Å.êC ·f3Ãx“á˜çà\–8R¼7úFRhǪ 0þ9$©ûó–1EœØ<"C0 .EJƸä‰K?{À $”•:íÚ¥3wð÷º´jVªyz×*=Ä·A‡¾‰/ાBƒƒÊïUM$*®Ò¼W% ›÷1ïQkl±ZH't¿äJù íF&Þ$óu¾-Qö]£ÝD¢Çg#¦®;Ÿ®Í-‰ê¹ÏNIé†åQxg®b“6D^.\obJ9{œ<†ldy5á©ì.Ûß-,Ï¡.˜q=ÏÑ…"Êà'ÿ74ü³^Û »p7óÃ1Jõ:°r&ó\!]Ðd¬¦rVž×Vv oðjéˆÍ+sˆ¸,ð…~™Ó› ×|•JwiBq'lÞò‹ÓËý£Ú‘Î]ÝQ—øGÕYTJfÈõ/.Ê7ù4dýw””qUË¥–_UK)5†XÄë úRßo[·h‰qÃ_u¹‚lXªÝÜM'¼ï-§é¾rÎ.”\Ƙ1O휭¼}÷ Å;ÑjËKÛ3«/ˆaZÇÅÀpÝX÷kŽoØôi¨b¾>ÚžœwwûM#3Ó«:X¢÷õJ‡u¬°ƒÓhôf}•¢´ }?z*ã²;:âÌgu͉Et¶š*äpH £2ÚRü;!ÀŽ+nµ¦ÆA W<|ߣa§<o”$Ëyæ‹–±¿‰57ÑZ¯?]{¼у¢»½ôoÙ%«Kù~‚CŸpDgaÏ^¸c‘¿àÑÓ-{z¤+÷qü˜Óq)ªuŸâÁœZ}áÛÊ‹AŸÇyí¦oË ÚH,U¤­"‹ª¾´ÈOÐÔÚíÔ5†^XûE,Òkß u¨"|׫âccpþ¤µRZ/k½7ìVWMï7„x³ ø5éØò`¨Ÿ’LôîÞ•Â\×Eé^KAîL˜ÔQyäø…uPØsûãèžjÎ…Ùð½LÉCÁ»†nXSKhD>d’3šÕÜ£å4œ?Å}-Fö*ÕÐ4üÈÛŠ¼dØ*èɘ¿ˆÒö»æ¨/î3œ@©c"nEåª7g²ðôqžá $˜àX é*ë5A1EvJ(!j>iÝWTÁ­s¨Ä»% ½îGîÕÚÓ'ÚÜ.Ûdxø¿b´nŒú £A¼AÝÕk_‘¾CUÀéçl÷ÀÏf\Ô“k ܲãjÜAÀ|ç›§ÒãN/GS8hKm‚.­CA yvâ=)-~vV'Î"¶-ë÷„L%4ëÛȵ” ³ÐÈÓé³OT¶ó¶Œ÷Žû=×p‚Šäµéd›Tº£K üm '‚`ó)¿=˜¥É[MªXƒgTî‚ 4ð9ˆSùª~“l©'y¦•ù»ü [Ç<üÄññƒ—ç$æ kúþŸ#Ë%=ÄÒV¾:ÖYà9 Õ©¦_,„ϪP:ÂA–.µ]fF–\H‡^™fÙ³änA\?ôL#wù£JØÕ`9îú{ýàh{çKNÒÜŠê_¼ø“8†™¿¼YcÃ.ðUË|h§Úï{’§ßiz]dÞÕ6v¾Ä˜rcIrþÈ@˜7ø ·•¾˜î Ñu±ð(*ãD¥¡ p'âtôcÈ’ûÝÏl ûÓÊ…ôÕt"3¤´&šž½v¬u”†*O·•ûP.?EÒÊ[¦ís)ë4iÏ7¿ú|"M¸±ktßCH’ëžD¾ÔÓÚ~âU«?X‡£3Ü\¢ˆœ}ÞºU]«Ÿ“ø’ålÖík~ êÛÊ4¬Œ=nÀ¹’<pÓâÆg‹®K Š·›k´9ĪØ-(9¤4«K¯³R·×§{ëù{–]`×ܬYâÞÞùnà:„;Ó£þ¹HI]^@Ál¸_…Ùû²θHlÍïÊ)Î]Ÿ}yaïøþòýJ_¦ë©n!Åå¤Åw.·my7˜ ˜ºÅ[ó?P’Ò:ØÄ] ·«þïb{œêôÁ´•Ú{È&ƒ}pÍ8ï‹Â÷„{ø·iäó¬õ.†«£Ç% —$¦Þæšì QV¥Þ< õåÑËx˜>ž"z´baïõúèò>o¬=ÝKgüøg¾ÐxyÓ ÒS>ÒÀ™¤ÜúÆÇ×)êŒuT\åÓGßφ®¢æ!¹Îº¢oº£¤pz@MJ 1r/kæÇNì÷)1á:^î›ÂkU¶)ùh{DÑYÕfàÖÆÕd|Xå—¸±1‘áËo©¯¥O#Ï‹—ñxÏj}õ^jÛõ­x‚h¤"Þ›ªÀÓNþoŒGäx•–§½Mçg¦Ùc<¹|ÜuM“ĖìÙx‚í̳mq+sÎp×Q&ìχ¹S§UçH¢÷ˆ‡§©¤ðbf³¥Ø’´ãKšµàHé§Õ»DsËÄ8ý’vŒü Eö9£h{öY¥ö6þ"NWK}Oêgys_÷wóúÆ™"¹wL»>[•_´dÜ-ÌÕgÒ?<–æ11þfœäÊ6èKÑbvy¾à˜.ki,Ãü?|ÿÿÿO°u†Y#Qk¤ñÿ—Ëh[endstream endobj 154 0 obj << /Filter /FlateDecode /Length 3021 >> stream xÚÅËrã¸ñ¾_á[¤*‹‹AðCvj§²©Í£RÎ)“-ÓÕH¦#ÊãM¾>ý>$;;•ÊA 4~wC?Ü}÷ýÇÒÝhWXçÍÍÝã¶¶¨|p7¾¬ cK{s÷ð÷•]o´6åêCwºmú(Ìñt$‹ô®Á/, nc”+Jʱï×&ÐÚºÎUÐ)µjPŸ=álF T{çV×Á‘àa. Û⪦'úD6£õô,Œëîe›öØ3¢v¿Îlâ´ ã]at¥…‡÷D8£{¾¿$EÇmg2(<Öµ è–6Ñdn‚€4k¶ß sÔšP(W‡ÈQ€î…€.)Èls[AAœ›¤+Õ d|Ir N=gºÆò½°j,½R Dæ\‘î…A;†#›Ø‚ßkŸätí ·ª}&\ d bm9¯ÍŠ‘ÀËKTÑ^xÂÇ;‰h° º;㫜Z|š"A…UCÔ-ÉE¨ÿvÈ x/Ÿ½Œ-Ÿs…3e´¾OªTëÄ0Ø×¯Ž‘IøK/ÿÎØ†÷ÿ”í˜Ä%7{ŠœÄ~n—è>Õ*TBÂAhÖ†ÏÛáè#½© xi­¬ŠÚª‰_?‘ õªA¥í»Cæ¿ÐËÔ*žÊLNvjÂ*±DÔ„7;"Ø‚Xµ£ov ä`b"DÖöÁ1¹ŒÚgVæiË|¿~¬Ø2»G6„ÜRàuÛEÍßOcns`äì¯zú'æmÂØ ÕØ „‰devõl¯ï¢*!–þE¶ú*æöu]úDÐĤ"óñ öO_ÖÞv‚Šxæaj8,y9â·`:1š4øÕÕþ̡ߪÂk`üFÛ¢vUEt¶§¦7Ã’…üÕ(Öd2z² £Æ)½I¹•®9Ë~åÑqzL«ªxN~‹~w¶}ò,B-wD½ê¥t™·›«¡UU¡}ˆzÈÙâ'·òYæ“,'U¬… _®L~§ŠO™¾~Åß-+¾óEl—ƒÒç'd´’qÇeÁ‘M‚üô9 t+'€ý’rPàÊÉ™±4á½?615%-sƒ‡g1µ²ñ"ƒ 3ÇþúsCòkk?É#hó,»­Be'yDGºDñ qÉK^ñ âÆ™c0³ã¨¢ðï# %CBhž}`ø8ÓŽj˜)’,ð).©Ç¶Å°£4›ñ°’oÑÅCÒÖž3§ÄÎìMå½ÄÛȦ‰¾šTìDË % ä£¢‹2¤jdTx‘ {ç¡tm“Fš¤D`ΰbì?ó ÃÑꆎ ® rö²MîDjLùÏ¢©8œÂø¦C!lúæøœR=bK<ÍÈŠß!am®J˜!’-¡s×úrÕ|U]úwÛÌ\t b«,å2÷Ö¯~/–_-íÄ)³Õe^Õêxæ&{†ÔHtËX]è 'IÇs–w6³tÕBæ¹e#íeÏÔ5袣w‡âmC¡’ʔˊç¿ñsJÄ[Áe=«ÎvŸuí(žÀÚÿ‡ÉÍdAÂ$E?íuÖKñ¬¼,6lÇÀoŠÎ^OMÅúQä\Êà ¤‹Ú}ƒ®]зGVfÉM Q±Uó…”.ö…`âBæO‡#ù=1Ü k)è±…/ÄcjQy²Ki •eªŸH³›Y÷²¬Òº<µ@4ö®ëì…ú$”ÉYþOLù–™ñm‰aÜo‹i0P>5#Œà)WA ô®¬Æª ~Ɉ)N BàÒdhÒa”¥!ôŒÍö f˜ ¼~µÂ0hÃ꣎»—LܰÔòˆ¤ð=äB‰Åâa.ÉØ¥|æý†ÄãµOKé{œuÕùßÎÓRX‘ÚPGÜü>®‡9Ï<˜kŠy+‰ot~:6§ ï œëŠy—H–.PslÓ&ÅRâO]쪘IBhÆÆÌôn4<_`Ç7G F_æÌq¥ ß2[c–e,DÄ¡ŽAç¼]ˆ ®pי؎§ÇSV²ù HŽÚ#&]’ࢼ #8†#ò5lÆ Åá7[{m©€p"®E쨮f޲¾QŒ¯[–½rcQfh—T*'ø µ÷‚Æ¢eÒÑT½÷aVJAɽҌ-ß§ý…–¥l…iN³ ûé^# ƒXð³×S~…Qq N.¶ºDjó£Ñà\¯Ñ#‰î¥Uo¸ å\Sö‰tŸy¿h/2,ÆA‘Sp Œ§ñ©ê>Ìc†®¤Šnmb­8-@¥Ó)C‰Õ<0‹æ›ÊÂ:¤>P#ûÚEC2*Õ0·rï‡V/’—d’Ƥ{š×5J-]Jds?^™Îî’¤-§­ƒø ¤Xš[ä¤ÓìÂù¤=Ž*üˆ›å<:Ûž@ŽãF¦“hëü8T#hÊ¡'¹VP¾þÄ`Ôµy™?®ßKׯUƒÝ.{‹y ˜å‚iÅŒÖZþŽ^ßðì1Ê×íUoŒà O+ë/DÀ¤(ÓÒY[ð!ŸPúú5±\ÌñjþGºGÛ¹Çö´ã{ù3u傞æü¾ÜÊCÀG¾fÞŸ¯öëâE³ªÙ]Çfv>Amê¸Y›ÒR!a;VN±¢ò x+ðâÚ-Ǽ¹ºâèbˆ‡9:­ÕïP„æqYðR£dÃeÍE;3/¢(Õó³Ä ƒì—kSÀÕÈL<ûRó¹M¶'ѽ¥^(FéVF©…µÐ)³6»i?§Îã⥬QEºšbz sÚó¸'»æ¹\Œ±v/]Q_ÍÙ8üä2Ñ®¨¢ÿ3€ã燅º¶,¼O— ¹•uLµÕÚ…V±qøu!:øØ$Ù²Æ$üS%a½üWcèXÆ[æ'¬H%ü[ Ëóó©Â¹tá1Ü™•Þ ÕyÅÿ}’ÿáf¾m…e¹Éï_X‘¹.|ÎþCð¸xÿ$GUšq±^†Ä¬¨liæ.:3䋿¸èø5ø/¦ÚÙz¬P?Õ>XŒ |c8%?œÏü†Mݸš²“ßýuìêŽ×ôôáܺEt£›zXü.+5®¬ÔHˆFç~‘†ùCèkdMÜJ,jñüñ.Àø<誎æ :#Åh!Ť¤WöTç⟵ÒÌçïÜíš+••‘ÊʼQYaB  !XlïW¶¨ªtOïMš,B’²ž²{½7þs´Ih3s°gv¦ù¿Ì\Ö$KãËuµaaR£ë YÔ’ñ.kOãûÀ^>y5êìÔ…/Ýðß íNc=¹)‚qúÛù¤ÝÅFÐ'¥Ì’+Á›Ü*»á: ÆW¾QÑ#Æ.O¼6™{ܲ¨·ó–«} Ž@XTʳÐ*œüîÇ»ïþú¹endstream endobj 155 0 obj << /BBox [ 0.00000000 0.00000000 559.00000000 432.00000000 ] /Filter /FlateDecode /FormType 1 /Matrix [ 1.00000000 0.00000000 0.00000000 1.00000000 0.00000000 0.00000000 ] /PTEX.FileName (./shrinkage-v-iterations.pdf) /PTEX.InfoDict 88 0 R /PTEX.PageNumber 1 /Resources << /ExtGState << /R4 89 0 R >> /Font << /R10 90 0 R >> /ProcSet [ /PDF /Text ] >> /Subtype /Form /Type /XObject /Length 9034 >> stream xœÅ|MÏ­;RÝüüŠ=„AÞØåïi¤…YÓWbŒ I@§A}!âï§Ö‡Ÿ½\: ZjµnŸ·Ê~üíry­òþÝ«|ÕWÁÿüï_ÿöÛï¾ý÷¿è¯ÿýOß~•B÷ÛüÿÞ¢¼¾ãüø7þø?ßþòõßÖ×xýË·úúóüïïó¿¿û¶fOÝjíkg£ÖùÕ,~ÿöë•,iÎêÔ:[Ï?o²ÅwzŒ2¾âI·øNom¯ù¤[|§÷èãë<éßé?4÷iý;ýw¯Êq»ÿüõo_ÿã§¹k¼~úÛoÏš%|ÕÚ_}}õùúé·ßþ¤üéOÿm­¯‘~ú›o¥P¹SÙ­ë¿ ›ºfݾºöµžŒµXû?zýê[ÎÓWíÞÕJmÖSΞþ˜œú+½SëhÈ~“-¾Ó#ZÍnºÅôŸÅ[|§»97ý£u¿oœ1ÌîÿV1”½~­œâ“½5Ò_õpÆ×Zkxp ¿ òäÝ­}¨1sý×ã«•ÒW)Ù½ƒÅøóo²ýÿ¹Mô^U‘Ýl3^9NSËêýóo~þ«þ»ü‡BÃ~q€j|™ŸÌ¯úè׿û¿õóoþæõ›ŸþÇŸŸ¡ÍŸÊi‰ñûÛŠéSøwÁnÙçz¸JÖ©¯ºFäŽüžÂùZ¯ÚvÍ öýÛ.-Ç®¶5YÕ.\pý ë®+—EY ³ÆÈs|õT´’½gŽÕ³ûÕ"ܬÙÎT´¹¸)Ï©ÙÎTô¬9`.²}­/kÊYàèÚƒµä¨/ìÉÊ~×3?KƒºœÞ°ÚÛ©-ª%þ¼zéÅ¥q¢{ÛòÄhôZ6¥…k»šÒκz Ù̃6çÂ)Nß;+ñBúþŸÞ_X¾?®K-·»Î?%”þK©;4¿þì“ÿð~Ÿíkæ6È‘ÏlÛžýúow›>û‹?Ë&c`Ÿ§Ý3-A}ï§™h¾÷S›ZÞO1¼{péըڸϋúàn1|R?·XË0>·X›“ƒx7YísIám–ŠñÃ6KÅüØdy-–»^+èn±:sšÞ[,»µè!ÜME/ÚÜdP¤ÍéÏ&KÅö1§MÅñò&Ë“d~l±<+Ü o±´…­ð‹ZÇ•sg¼Ì¹Å"«÷‹,Í‹ž,÷³v¸7X´±,sƒEÛ?l°è1-çK©¿7-Òyo°”›7óÁÔFù³²lDÚ'殕5¯zSƒ5¯ÚeHr¢Qóê.½ö¬yݲkgÝé²ÈPÔÁºwª9&u²îÓm 0%žY¿úÇ9³ÒOŠ´‘¹òª£CKÎô¿sjáºy‰ÉNžœ¤—ñÐÉ!_܇‡RžTvz&-¦èíÁmC h8^!›) o. Œ [[¯®;lšd kšA§ÒDב–¸½Mt.ÛLtn·ð÷üƒ6à1ÐÍ­×@§5³i‰NkØ>MtZËæã€&:mï²Ñ¥‰ÎªÍ(4]¥#ņÍ&tÊãšh4Àyi¢³öíd¢3ýšpšèL?®‹&:åi™&šuÅÛDW˜èvM´;ø˜è÷×Q'¿žööd²S¾&&;¿wOe²³§ím°sx®§Áæp}˜ì§aO“]q³ßo‹Ãs[³1ª×‹•Á~˲×){EÈ\ç€Ù –µNÙ#+c²gA¶:+×m¦:°¼ítJF:‡+ÔÙè¯>™è‹,t€î!6Ð)û(“}Îñ ‚yÎî{­Ê:§ì£MÆ9åõõa›s@ªz*Óœ¢k”-sˆ}x攽Ód—S¶»³œ­Ë“Öé;Ÿ{Ç&y¾<6Ǹ.N¯¨AO…òº¦¸Böê½°u¬”òaë¶WÉ›­Û^ƒ·¤ gJé‹+>=FË›+>JK3%~;KÐ T·eòª˜Zõí¬Ø6nË!Èô+·ì†›-Æ ÃMit^„§çxæ¦(vœÙ”™Ã¼¨pOgv±Ck(Å+‘®sœ÷Ñý™Í]eâ\Ì!æ\:ksü=Q+‡š¾våL¬¬p„—<ܾN×ÜÙ…޶\‚ÎÙœöaÍ¥ ‡|uzà¬r²…+Çš.›£•Ã=¡Øþ`î²qm½Üøý¦/—ÓbûsŽ«AÕÝ9®á)/—Öá1kTic@²¾gãÐmvn˜œ‘9½Ã÷®¼Mec•sc”ÿpÏ;ܧp¯âïO¼ ‘em«ênUOã:_ÙÉë|Ýó ¾Óª¢Ò×ù¶¥gr¯¡kU= Ë,§`hjÎÆÍ$e¯Ås:[7±¢þ­ŸF¼²mKu¥]"H¸¶Æ9eÕ½uÙÉCUuû‚¥ç×»¨åyÍá:ßµ¸ôÉ•¾kw:N^§¼q?HùXNƒ? kߤ|PÛn¬;O ôsw]Q¢êŒÁT³íiÑÏ=d‡# 1¤¡²+.½€Ìºs˜¨{kÉÌ7Úvª¿ž›m;õlª¸¦nÊXUÅ-•#Q VåäjWçççéš•À]»áš©U©>eWr9µ:+R>„d »¤oE .'Vvaѳ¾eÇâ¡“3-‰Æ+gÕöu î ̲¾žÙ–QëZÜÃw¾”¹1põQnº69áÍun‹aÏ6`˜Q[×ý#6Ýœ~[ÐÜcÚÆm¯œhaÖ±YPÈq¢ŒÜ[þ[^Éuéb yN¶îØÂí5غ},sˆ2]7¹ØÌp­˜~ ¥\ UÎÜÀ ξ$§sÎÜ\òÂsá"µ³e0º(iÊo À(kݼ“`×\žÓCT?—‚¤tÜcL‰P×Ò*XƒM\óVº;[¹›¶nƒ±E3 }«D“¹@Žœ*š8{Sѵ•n©¸ˆWïþ¦ëU*Æ-”g]~r®bààç'®e«­¶Êyšp·í°ÍÍ«·ÛnóæØÜo8õÕ µD5žyЊÛÍJ‹N,2헞Ɠ­XœÌ-ÊX†®[èü¾“„û:¿÷-ƒ®€7 VàÆ…vž§ §qÀTç³I)‚îxxÄ–fêðùÒpÍŠk±‚[à¤Ëâ1‰Ë ;N I•¶Žôu§¹ÎJ¯Ílm6Vš{XýhÜQãëÔ~åÂŽ›ƒû7·§¸@E*ªç¼ÿ.¢·y©Y=ýß\èõ¦ÓN…O›4ühE.ô§„ÎE•+ݦ¼ua£ .d³†‡hæñéš}ݘ̷âÅS±±fsƼ—|Xbî‰p9°Æ¨¿5•7°œ‘»åFð –»àöj—yj|Œ5\Ž64­ÞrxÃhð=i.ÆÀIYF¬™Ã—D½ð”娷±ae<ÓI™xˆ÷ e¸¨!M’—­šm³âÊý³-Ô\S4T°«(ë h“è"v%6©]~¨áf/î$•L²¹6É‚6 ,Ê›¡EË¥ ] ò”ÇVÈT]nFÀóµ8ïØ ì”|ز1-·Í¶Zj«/¶myLu/jaZµ-2°)«t êoiãË}ÛuºÃM®’‰¹çq¢tpn<‚ŽÓôD]†¯ ¬y†pz#ÞÊ“åÁÓP=C›]LYdUÛì·½ê#}ׯ°§0X¡tŽÄÞSåéJÓhÄs°<0lMé¬?ï.d=Ô:Ü\äc¹ Îí­ÚÀ®UÉúšîoZÈíÐûmw”ŸÎokwµò¢"™­r‹Ö-nÑž-0 a2)¹e權¥^8hiYtÕì…K¯õÐ^é ÍdA¯°-XœÎ?È0õ~ÑˬHÝNd˜znË"ÃÔ‡àƒàv˜¢|T;wy Ô½›uÀ¶¨{iÞ:¯)`ˆ¥ö´HuÙµ©nSö]4Wë[Ðj¯dç°4ŽDd·F Oo­á@ƒÝÜfuÀnš¹å}xŠô(\æ„Ó¬8"¬lŽ{0h§PS‘Øè¨sڭ鸠䀌ë°w@¶¨e훃 _ÃÁÕïJ£µî'‹ÜÒ,ã¶&w–sÅ­h…(·‡ÜÊÑ<¤b]oçäÑ–ôpÓºÝJÞâ<˜ÓœW|Û¨™·”În6ø‡]šAÿ$íš­ŠÙ©yªZôƒ.‚¶y2¤æÜÚ/^H5tq†Á£<ÖðâÕ@ͪß9£ˆu×>1©Á$½“}HÃ(¶µwÆ|¥,0¬ÖØo:úù8}Á¥á\’‘†Ó{¯íIÓé½™K´¢€ž ò‘è€u;XFÁ0·— Y O6Íh`äõ}‡çIYé$V?dnºvÙú £Ñ€èAZÜ pÿ$or½ð%r½{ê$ë³íÝË}ÇícJ’Iöî-)Pö‘ý쓤TÛw'Ö×YºË¢»›²Àù>éí¦Ù]þž8|®ö{òØiˆYRi›gêq´CÇ $[všû L7[v9õ´=(«+2 ÑE]¶Æ}1’#²g“}>eìmÈayÒ‡;¾uEP¤Ñvß0Ýè›vÐ[Ôv¼"Þ¢¶c ·éÊ¦ì‘ØôdÓ¨+Ú ‹Ëµ¯¯éÅ’VjgšWߦ›_þ@ßtaŸ ¤¾éÁ¦ìsd¥såúnOá²£¶y7eºì"›Õ·SëÛÞ:ðÛþAGw¸U9´A5'𥠿AX?Ÿ ´¦ãR¥}æ"¥}‘“®‘ç'ÎÁ ÖArÉÀ¤Ñ~ùÀ`V•°¤¨lN›!E¨a¨hÍE+®gžŽ/>èåI(µãW¥ zÚë TpÇ«EЬtør5 ã(z¶£‚J×¾9¸*ú› ô9»RL9¢Û6Ãvþ×÷i³“ODò§Èˆå_Lÿ£0ïX«e¿rŸ•-V~_¸˜¾B+=¡‡u7WœÇòqHN/Úþ=mîÀC¶8ž®¸Åe·Ä§|ËR8Ïî—{&WsšgWãFŠ+8‚ÜpD;+ü Íÿ`†IDèkqçõÏ`žºŸà qÃËNüå†W¹¡Eâ†g3çIngŠ$qÃãácÅ ÷éÐ"sÃ=²¸á<ÍçŠn¸3>*/3ÃŒ§¹áˆ[úe‡•*n¸š‹¹ÜpvŲ¸aL•Ø]qÃÀÀÅÖŠ® ¯ÖiIÜp&¿ÞÌp-Í<³˜á‡q73\¶¾/ /™ý4/\Öm‰xaDÐC¾Ä0¬Ç”BÌ0Œ²–©áGE¢¸a¤« UpûBl1̾sˆ.Fà^H!¾Še…ÊoòÉ£Ú"…(c(Î'g .>Ic6T)Ä#²A SÇèë0W\4·ó&q•Oö¸8¸Xìñ[{\|4_þø-ϦX IÜVf˜s7cH³½É©•m.tHÙÄp3и3ÌüV²j8wDÜ׉yñÕQ½¢WWl„’–¡U÷ÐÆX…qÓ £¥ì•´ˆ¢UY*­`µZÊn‹9ãÜ)úÚœqõµî²ÆØíõf«¯°—5®7”Ϭqí ²ÆUfæŒët<Œ9ãœ×½éö׺<Ðð;6ަ€ã1¨p°Â Â<3\°äÕ7ERÇ“ŠùæŽÇTôrVä˜-æY“—ÉãNÑÔ>Øc‰\s ÇcS!îZìqÜ3{QÌJÓü¯øãðµûòÇiÜ_oöðªø`±ÇaøKìqørÙã7Uì1 ¼Øa±Ç†ÎëÍR[ÄÇlf›ÅÇô÷æùàâõæq@ˆ¾˜‹?Ç÷_þ8pùãØíþ8ö-]üqøD½üqœpéâqà|òÇáè”Ë·âq7,ÔøòÇ­˜)7}ÜŠ¹³Ç-Ì­˜¬0Õ^M&¦G`N—8Y*ºkš¸â;óÆS!†Í¦‰Ó,•š¸âQ‡«$MÌCYEŠ&®ˆA¾¼ñ¡_2ú%«Z1Ë41ƒñ?ibžÝ.T‘Žý¡\‡Bû¾”ùP´cßý*î˜÷¡[‚âóêòAw)Ü E<އBžÄÎê¸ðÙá Åü ‡\²Ëo§ë&Ñ„.ñ³:î-‡¼pwì’CÚ¹Ì1!4*Dí‹f¨Ëà¶®€Ä4À¢…ëÔm^8]£=&†©PµË/ìžÈPÃåH£®,Uéw\â¬#¯¦©'RÄŒ¤Z7>Ñ; ¦ºí¨h*3m¤Š57Pd1k(þ¥gÄX¹vЬýÒZ¤Œq\Ìbx‘¤ñfìÕ­ûdjîˆã&ÔÜñƒ®Çõj†qãÒɤ阋N&}LßHt1éãLOú¾’‰ZÒÇyÛh‰@NÙd¶d}‰$&\Auˆ`nH"— W\`”—rúV&ÊE §‰·1œò­k„‡ù …\§Ã‚M"§§eG$rÊæD"Óó"’.™²ÒI!§_avE rÊBcL 3ÀŒH½øãt;ŠI뉯Ö\Ùc†Ÿ)•ä1ÞÔ¼æ8%…˜8NÙ¬’xcúyëõÐÆu9ÄÙ¬1ƒÙXºHãŠ'gì›8㔿'†V×ìÎO -eì"¿ò0UêÖånm·Ž§/ªt=2#ª8}J͸™b³qD§—yÓåÜ®Ëïˆ&Î:Y—è,¿Í×C§l~E qÝÆéMÓ'åÈ€® Œ”*×v·riWôu·v©[bjT˜©%¨Vw/7QµTÜàÑÃéÆÞXÐÃ)ž[¡µôD~¤‡ëžO ×Rq‰pÑÃéý>EòâP ŠS=\÷›pUtä¾D¤‡+¶¯EÅGîs‹=œ.n\¾X’§<9"iÚä0P°FRL®y+Ö’è"7Wý©O‘‡Ëp¬š=ˆ¯¥â’éƒøZ=ÏñHRÈ_±‚‹ÿ4Yd„¸/LB¸¶•‚Œp*ÎUàuà¹g†9኷‚¤p~è‡X{³Î‡¥gWƪ˜šáþƒ†]Gh€‚ ÀojæÍÓºJÞ7cv èU—§Ž» ¨WíW\­q­ž¼:Øö“/ÔÜ 0ƃís›C†qZ{à'5w/ñó ©ºÀ# Ñab{˜niô®Œ»ˆYßá€Fó˜âS‡Î%rÈq6˜CNÙûZr_:Ì!GY—#&‡Ì»8_rÈ£,N˜2ï:úžräšCλNýäùZQ39ä¸O%Ì!§l–Wrˆ²|8äÀ…^,.9ä¼|˜#‡ì›ÒÃ!G½qâ£úŒ6‡œò0GìÊfkAé3Ûrà‚.Yñ“uˆ1&ƒu„k§¡Žûƒ rÔËb‰ANÙ ‰ä¨“6ƒ8P”ŸˆbÊ7?dľþ8¥ucôëT³Óä)ë[òÇq/åæóNgVJü±ïtƒŒ;8 MLJL˜£b @!ó·ŠÅ£8ÒK¶ŠDÎí2b‘Sá`ÓÈ»¸ä‘ÇÚ' r]é žš¿åÕ4¯ŽýQp­ÇìˆGæsWq›â‘©É!9Êšã%œ7Î~Y`òÈ©°k9âM,“GÞ¼­p$«+%œŠKƈG\­MÔ’GNÅm–xdþ€‰[EJ¶úð¿ uaäë›FF¡÷Ù(YdO-¼§â2½€{½¸ü[kW±õ>øÆ’@ÆáMþ¸Aqg%×ÄsÙdrÇŸ ’Ç9î»Lö˜oÕNÑÇû¼s?Λ¶¯À&ƒ/u¤ ƒÌ»øU0†¿z"9p×b‰¼µS‹œ “W¦‘ùÚ Ùø¼Àß%<g•š20tƒÎeI³¡+rôêøAòÉ83zÜ%6ªžš}[³6¾‘WfÉ7T»û½]æPÜ2ñ€‡YŽÞÍÈŠYŽÞå)“[NirËÑõdn9e[+qËÑõdn9º‰ sË·Gß“]Žîç{f—£ûñ”Ùe†-3]ìrtÃÂd—ã¾'2»ýlç&»L™ölá{šÍ6Ç(âŽÉ5£ ¿+®9ðI¥-—康‡«^±Í);ÖGl3!1Ä œdˆÞëa›cDw~…MŸZ曉ˆ(A“€CÄ/+fr8vÍ|s ÿ…ùæ”i$¾9Í¢¾&Û¸¾‹Û&Û  D_ƒl^å_×Ì7û QÍüQŽ„˜æçGFL4Ç}4ežù É6ÍÌkÖ.–™!Ù*ŸœAÌRœNf*î³*RÌ1‹|3Ì1‹î~&˜S¯‡]æ·çõËŠî™[NI£jjÁÝN%³œ²üË1›v‹ye†¯×C+çYCºWœr`“˜†&|— _\Ì)Ǽ1Óæ”‰Ñ"ýA8es­"§¾øS$Ÿü‹é>á«'æK åÿÏSîª^Æàí‡Q6–×[+l(¯G¿2˜^/". ¯×ûz8=§x=0^/f4Š—â•Ç€ë׃á¥?|ßqÂï¼ü޾¾ÚB,†· ÊBïÚ4sdð®=o„ݵaPN?0Ñ Èéç%z7¶¾üóç–Í7 ø¸òO„E–î7KA¦ ×›b*o:Z]j_t4Pßï{ŽV¶ÇLï9Œ÷|¿ï9Ò¯1Ðä÷ç¾Uñ{ŽãM¥÷ÛFý¾çØؼï9¶C,ï{ŽÅŸ6¹¯9.ps_s,ÿØóšãôûºCï9æ¯9̦ø5Ç8ç ÆÑˆ²e~φW©zÏ1ÊØôž£ÛŒÞ÷ý¾ 䋎î°Úû¢£¿Áñ›Žæ—ð÷M}¬×ÇÑÕkÂ×ô¦›2¤Ð›Žìœhãû¦ƒÏ¸? ¹«Ö¥Ð›°>C ¼é¨hƒD½é )…Þt€)u½é€¡kÐ\ £Zá7ñ|ßtàf# Ëo:Ð!‘ûMG)OÂç H!€hƒs¡;OK ÑÙFåˆÒk)Ÿ0éªüIž}ÇÓ@N^cmBê°ÙU©¡:¾$•BXÐ6õÄ`ÝBðÞ'^‡ýæ2Ø!ÌQhÄ€€Ëd3¬$f·`% ´Ã暑v3W†ºnÐÀqµB¨ôtPA;‚ç7¨ÁóvǼõað|¸!@î¦!w  »F >n9+ ø?š-nx &Àšcºáö¹é_r«Ý4õÀˆJcž¸:ÞMTÃÓèv3Õ@Íü•©jl[õÂÐn¯û¥ôÅðz7øl¯_¨Û^6öõ†ïðC?Jz‡ø[‚ïz¹7„ݵchÄÐ~oàܵŖ¶kv/j‡+”êâSŽÚ†âŒü”£6Ÿf~ÌQ[3ì­Ç)ßômk»¿žsÔV˜¥UƒLzÎQqeOôœ£â!Ã~}¿Ï9jžsWyä=]J>‘Ð1pÉé„èø[;.’]ÅMÂí$HÇ8åJW˵V†éøû<ªE8çÇŸ€«Æ‰ì—"âªËöX§KE¹9†ç)R\5bë--œŽ?ÄãÄéô«@RœÐiø…Ó1ܬZÁP¦] ãtü­ +ˆÓéç|¤ NWk»PâtTh„Ó1ÊÍµè· Þ1~ꂚúhÄ C£X7ÏG€Ñ:$x÷' àºr•§r€×-jîŠif©ì§abGïºì*.¢Jç½E±¯®ƒüz°:þúoî‚êjÅ¥mýT“/?ê ¿”®âÁ#KH§ÐÅ׃Ñ1ØQ©¡(Ä.ÞÒ‰êè˜Îº„ÏQÖû©ÄûbBèÜÇ÷Kñ‡Ý(‚°¹”UÛa[ª_s6ãF‚åjñåǯ;˜Î²ôºƒ2KÓ뎊È/¥wíÿŽŒ_wPVþ©õß<6zßA™}Óûȯï÷uGJóóue¿æ çYükbÆà( E‹-‰s* Ž¿¦Tbpµ8ÔØÜG~y¾ïüÄà ¿Ž©ª›\½?Kfî- cmBÉFUü{±Däúzð¸wÙÂãв׃ƽGEhÜ“:ö­WXÝ\wLTÑ8¦ }Û ¾£ 4®Þ_©5÷Î/4Žå «·v’и;ƒÂâÞµ‹£$dN5f_e‰Ÿ¾ßŠœ~ÖŠ8¬µ×ƒÄqå)÷Ñ*¿oþ€ÄݼÂá)´Æo›„ÂÝZ…Á½Û$ Žý?¯ƒ{÷AÜGikºÿÆÜèU=uŸþn³8§ã—’Ã5/chñÔ¬¯›s÷׃À¹Æß>R§¸^Týpë¼w.¸~ÞH®•ñ€p´¹Þüáxø­A¸8osñ‡á Oy¿> stream xÚ…WKoã6¾ûW{’µ">D‰öТ °=´Xlzêö ØŠm4в–Ó4ýõ)Ê‘Ž(Γ3ÃoFjYÀŸZ*còÊÕviê*÷¾tËM·X~_¹³–xÆP¾/”ÊÆÕçN-î_Ë/‘¶ŽZשڟnW×õÒçÞi·¼¹KŒ—.WªÔË›íŸÙõJeøížàß~íjmM™VkWêìwØBÒ ~ëþÿM÷Ïû‘ù1ÈnEÛæ$‹Vº¹ÖÚe-2¢ž]ó€ìÂq‹Ü/¸®²\£ån¥+ ˆ,Ð\~ÏA#M°Ù£Ê5’úíꯛ_!Vk¥r_ÊÁ‡½8"Èõ÷ªtY³ks¬«ì+yw –­à<Èt/AiÞEð‰ì9,ÆÖ—M07F­!Ï»ö”Øè¿Ê„íB|{ ‡t‹d÷~ägNþV>‚Z£!Ü!ëôú½çw4ÖnÈn3„Ãíf@îïÂ.ÄŠD.£k)Ѩ<Ε׺pä¬$”8vi £»‚Dz\ê fÅ[!„,€PL`Ž|¾'3ë,þÖŸFž ÒˆŠÅfBÎm¨´†ß›ãH(”'NGÔó ŒÂó‚g ž.Vì†iÏ–ÿ½Ló¦(ߊ²@á'’®=ָΚŸ›fhiiB^iûy­ h¾ºV*E¥s/ÈÎ ³¤ØásWEú'Ðkm¦ØÔ¦ïZ^IŽá€à¯‚””…²q|`·’¸Z)©WTFw§’ë·e¦X ø‚`¸jù’¯•†âñJOK^c¹·|{ŽRæòKKÈ;¼áÎnÈ¡A$û[*Å#]4¼ÔçàƒL¸ì±¢l™”.„.”ìO Ã+q$Å o›FÇ‹bAûÌm—kbµÉv3°ÐˆŠôä³ëšî;:Õ‹Hj‡p".Ó–UòêiöpûQ ÿ ¨ »ê2„ƒ‚ è-‡ÈWÊpäÅÐ6 Ç3r¬ç® ÒVBê-ƒ³7Ñåƒ$¶w÷! ;mG; 5üذƒÑ9;6E½tz”>½_F (B Œe$'êçr>—> stream xœUU{\מaav“ÝÙ`ª¼ä%(AE`,òX]>peW@—G`AE´&$&pA£ÕH­ ĶŠ]"‚ŠhXQ]QD­VÒž!—þÚYLÓ_ÿ¹¿ùÝ;ç»ß9ßwÎ% K ‚$IÛ°Oâ>‰ŒwÔhó5ºô•ys&ïDòS-øi"Š~)ÿ%Æj!?ðĺj*h­¶ð‡IPð!"ÉeûeeoÊIOMÓI]–&,suw÷øßŽO`` tͦÿžHÃ4¹é©™ÒÂG¾F›•¡ÉÔI kµé)ÒTí¦ì´\©J­Ö¨ÍaJ•V³^‘®MÏÎÎÊ—º,r•úz{ûÌßÅékòr¥ Uf®T.MФæiU9ÿ·I„}f–ZóiN®.]‘¥òóŸè)õöñ%ˆX"Ž'"ˆBA,!–Ñ„¡$܈P‚%&ö„#1…˜D„-aG0„ƒP,Â’#Љ!RF"û-¢-Š,ªE^¢u¢m–Ž–1–µVŽV唘r œ¨ljPg÷ J>¾;­Âõ„ôÃŽ>{æÔ-¾’eþ=ý0tHö§ÌïP½ãùÓu]7Nd.)“Ø@®›wë"o÷‰`·@Ó> À†ÉnI)å‰ŒÎŠŠ²oj$·ÄÛ¾ÞZ²Ñ©Ÿí­çˆm GHrCÄT¥B¬›0VÙGfXÎuî§lF½tù×;dŠ‚—&Ù¹ºCü>¶¦»öáyB*~.ØÁ÷œâyQË­˜ÇOs›Ö&LAêüm^憕ÛâÐ<´ôPZSöñßÿµô¬UidyrmJ³¬?DȈnWœ;¡o¬ëDhPqÝå(þXïÀôÌ®^_Ó6ån§þ%ÐJÒqu'šÙÚ¡>Ø)輑ϟÌWàf1sVª÷‹Rìðâ€1±_§âï=ú£7õfc¨fª « Ñ3([ÏúWÆ3Ê x+¼›÷î1§|ð¸^ðMñç_¡íN™›÷áàªxHÖŒÙùòqc ˆü B×ÚÎÆ¢¨êÔ;«$Ì›Ô)qÁSðä—> éËA`ï§\ l”`¾ïOŽˆJJ^¸0éÌ.ý™»n¶dÞ˜Úsc>þ±—úúÚÛL¿^Ÿ`€kÂX4§Ûsؘþ)d@·Nuœ½YaDÀ"˜°é'U‡úRL]°Ð Nn31‡=áýúêÖï¹¼Pá>Å Õ[ºhxŸ_Ç>¼&óò[ã?Gyex°½óg6ç‰FD a§O¡lÆŸ¡¡ óýõƒ"¨ÄŒB±™k“VÆè<¶§qQ/fÀÜ S| Hòž®øQ²¶9®&ÑÁ–ÃçÜq^¹r–«çŠ— å¹áanÜV£Þ‚§|OíãËØ±2Óhb¼X‹É …xj-7¿'ÈÈ7õ¼½ ò#;öµçòñzã¯.:üíâÝB¿^ÉmU5¨–ŒD>(r½:6G³-¹xm¢vœßU»¯ªêô÷GZÝÛž0_±n¹<•óZ†]殎,ÂÞŽ¼–²ÑUñ•æ‰#¯¢ ïöM0|3qâ£=­ â?i˜Ôendstream endobj 158 0 obj << /BBox [ 0.00000000 0.00000000 348.00000000 215.00000000 ] /Filter /FlateDecode /FormType 1 /Matrix [ 1.00000000 0.00000000 0.00000000 1.00000000 0.00000000 0.00000000 ] /PTEX.FileName (./oobperf2.pdf) /PTEX.InfoDict 94 0 R /PTEX.PageNumber 1 /Resources << /ExtGState << /R4 95 0 R >> /Font << /R10 96 0 R /R13 97 0 R >> /ProcSet [ /PDF /Text ] >> /Subtype /Form /Type /XObject /Length 2497 >> stream xœ­ZKof· Ý¿ân ´æVêµMÑM¢Mc´ët^I`O03ió÷K‰Qòg{&6‚‰MŠ’Ž)ŠÒõÇÃþpý?þùúîòñò§Âñþóå[ü5Ÿ±µvõ,ḻ¤r¢¢ªævjœ‡3vtRÅ—.?¦’¯ïŽonp2½?[ˆù¸yw!øÿ3ñḹ»üþøÃÍO—¿ÜßlîÌ5 ]Mݺ¹p|zݼ¤ÏpøÂì 8'ŸÞ^ÞýñRÎáømÿŠÿ~zÜþ;\ޝ¨ÿõ);i.ñL*ï[:³*nѤâ‚|míôÝ`H-ôµkkpÈg’Öàb“¾ Ï§½½[ÓdÁ&¢P¥êr©™Ukëˆ\û¥FÂX+œ£¿§ñ*6 {'öƒ“R2ÅF­®K‘’”‹Hd›Ali¬ì¥æì‚%mXSb{^Kb¬ =‹ ƺ¹¼ŒÖ\]fk’Á ižÑ1Z•‹´ÚâÖvY»ÊYÚi|aNæf_]ÐWEKkS?¯Š—ô©¦—Eæ$4÷Êl‚¾['ó z`NϤŠ^=—bÒv‡RÔV”BYmC\Ç nËmXü†Õ+6Z‹ï´N&¬ ÆšXœ£Ës6òÂDC^šhE–Õ—Shk»®]ä´Ž¯ÌñüÊ,ドŒ_xmÓoÆ«ÝË>B§ ÎnLŽÔ³ï–êž05YîiS¶ÀüÞyìy¦¥pVU ll2c8ú±ÚÈ^< Z`¶7Ó3 ÙØ|6–$:æ¡a%«¢¢x[úGižó**zŠö¼Qû´;Z:êàÕbìÜîʲ*¼–Ä,ƒÖ–ì¤=”½U›¢ì {è³9gzÜ‘è¼*`é€åeÀ4Ò‹™23F…”£‚&…YV)®¹´[(¬˜\e&V†oBjü‘`ñ­Ôús=4IÁLÜ]㊸,×u:x0Õ5ÄCi2!{³lþ®%Ë ¬Èeë’Ó6hÖUŒIó•5)Ê:%]4-Ê(ú¢“‚æx˜íi5çx˜ãsÈp1nt7Ñ.Àå½Ú÷I°“‚§ë‰ÂϰªZBW*]” TÙ’i•O¦| tõÈê3ëFH\j¡b\@.CÂ`W,g0ýÖ†èÔ vaV' 躧©…Í«ÚnÜÈ ¯4‡l"Æ&踤Y]EPúx’ÂPQ–œ&ô™.žSõdœ“\Z4a±'pv4ì¡0¹.Ê– ª|Ê´i"%`&6ºúL§^]ý>B¡—³€¦' ·çÅlN\î.Fâžùc³¸½oñÝ%:W–,£Û³ÐÙx¾€d~{A…6™ ‹è!“{ |¹à-Š–Ý}l4m’ãPFEP RÈþEE6—œÙ·$Uë²+Cœ  £d]EÔUð:ƒÂf’§"ûá†ÙeœÁvМ—Is¡ *ï°‹(æÂDѶ.“+t²9&l3¨é †=ýÅ ›]|nÂÀ7à”éAÅ‹‚.Œ0)žo|ê"TõÝ ã¨ðj‘Í™­ídQ¸m¯ >idÂrÒUøÉ…[n©Ì–Udº¥Î.ÛÝlxÄN: \a¦eùöELˆ,Ì´˜f¦ü|f0?•—gÆcÕ…«^˜R]Þ˜©XRGÖå+Ì`mêèe”˜ùVå ¯l/òó™ñX}à­ã¥™Á`õõUÌÔkÌya&ô[±Èú&Àö"¿33ùÕ?rx3XšeæÛ?±›¼=¯7f‚‡Æ™r0!ò­Ê^ÞBØ^äç3º¦½@]釳ð8òÁç1ÜßúpeÖ{]¹W:?líû¡ý†J/·UFOùŸº¯÷å:L=·C•zá¡¥¼&9åuibþãÖýÃÖýýõÚ#?TqïL]©,·~Hüï6ȹ+“ÿ<=ϧ­òÚË¦Ý ì¥|ªvlù‡ãd'ŸÛS Éß?gâ aG¹;èóºkñNø$ÅAhe<$©X=ñû4ÞŠ¿©¤nx¿X¶×Kä1> stream xÚµXKÛ6¾ï¯0 Cñ%©@/i Eº95=(^í&­mm¬Í&ù÷%J–m´@²%j8Î|óÍP/n®ž¿¬Vµªƒ «›»Ua­*CåVÁ—ÊXoW7·dŸß¯‹ì\›µ)³÷ëÜ–!;Às{ ?ŸðEÛãh™=ÂJ·øXѤ=\wpu8c×<® w{žÑ Î§µYƒÃ[¸šwrÓ²ˆÌ¦ei¸9ˆÞ¨ÿ~aõ2Ûu딣‘í–‡z9D«å†ö¢Ö¹/}ö¦o×Þüôüe¨ÇèU^ªö¾ ÜÃ4r—‚ëeEhí[íµ¼Š×µˆ£X´³ƒ ­û®oàúM®ò j 6f¤ZiWY0 my$E¹µ6ëÞ‰ò†ƒfâ× Ç–<òABCn€a·Æ©B;÷mh­ŸÇ |]W6•;²{öEîŒËº·º°-:¢gÓ‹"±½²*T¦ÓŸŽ7çUQ… ïsŠü–¼Êm¶A½]ßóRŒš­ØŽlâf]8!Bïç[MÀƒÚ?Ñ>Ä—>ÌL_úJ[d¯D”R®Ýó|𨓩Gv×S»ë$Õ,ƒÇÈ6Æ+œ—o@ìI {'ÝŠw{ª_[P®ô!dºÔ¦Ï1Ë·ì@SÊ;]Lì–Aí ¬6EAQOPP\@î$/µOl(RHáà`x«ãÓ†ºå¡tòÜ©b†dª#¦»å@ÓôǸmºÝƒ°Aœ §èI"êO¶ëT@ﺄ¯ƒ 5%97{ö©OÑ¿çw©‹ðýè"šÉ= }G¥>N‘Fôˆ'v=cë!N¬û¸Þ)­«B ·ÌF~oåê±jnà sI‰VŠìÔŒIg8b%§·ž&¢CÓ«ì&yI&¦ågÏ唯p™9ƒÈ]X§Ö 3e’2¡ì1\›)ÂkKvI¥TÃ`ƒŽ!‰|E›n‰âDTc"³-[xˆ"XåËÚ,0H¬‰a’Ãq4\‡Å \UJ~í¦h_ô+8óxñ UY•LŸ×åݰ®¤Úlí B0u™ "wÛ ÉÏŠas+ ø!!æ.‚‰`¤ }gš ÿ$4“ç$…’¼“¥á¾JÉeDàÏ;´öÿ1J^Õ®ŒÉ¶Œ¡Zw¢…<j]¦^üŸZ©¸¿smTn3„ÿ_¶Q#)ãÓRÆùËÖÕ7WèU½*Ææá ^½Úì®VW…ÒÖyÏRÉ=¼ý8L–ç¯vfõCwõújõzx—šóTõ‹“Ý¿©´*BåÉ1/Åìû¡SFjr™ûñÇ e,gy‡!Ëû†ŠÜv~ˆsc¾l¢7"!±”´M“Þ,\kJéëØ zÞµ#"qöü*Né™Á6³ µNòf^—ŽA[š¤DVû=[rA@™ ýžf¨ ¶´â‰C²Ú†WƒÉ_d¤gÑè]¢ÉaìœóОClYš¹óqþY'È{Éò ËÑô*î¿CW|‘ž˜WGÁ±Áùg,ƒ©vó›$«jØÊA­„Ͱ9¶ I‘îÛÇØ.ÌO{w‘wœí3*xóý+¾!ÍBŒ} >YK‡%…‡%›ýNÆ,ÕQ«•þBº†‹5Lˆ0=#æI³ó ú2Ù•VéÊWC•å³KU)SÔlU §½“ùnÎn²Ž ,ÁéÔïôÔ!ã‰äÀ;%‘ ŠâÃñîÏœÑRÍ¢t!5m}:5-Åxëê™›†ù|…NLa†…åÿ#a¤¡ý‚22Ezöžþx Ù-Ô©“’ˆ$Æ ËO9&l\Õ±"šzj ¼Ù«ÃH +ŸI²±ñ¹»\_bŽ Heןî-Z4ás,íŸÒu7|€:Ì>r`‘H¢?íÈÖôÞçTC«ô&\:§Òax”Þ0îhäqÒd±]$Ÿ:!“°øš _*µSÜ=r½ÄvŸâºÃ‡5:ï៵Ïð† &­ƒ—k$Ôñø‡/Îh2úß$š,i¢™ñxeÅï“£ N[@_ íÒWË •~-¹q“pÜñ.‰°á‚&õ‡=£aó÷¬%—›ÆëERÄLöK\EHHx×îaäFÉŠß~bÃöíspN¨Ùs¡Ê =ùä‚CI}ÁKínàrÇ·_1nЬÑÔM# ~'”/%4r{¨çJR…z$}úlëσÔJǧù~á£AЗï[_ùA #ÐõRâ›]7T¡£úß%ë’–IYbà\ó›¿eÌ#“âf'<‰Â³/µ±ýÃÃ+¥}r,¦üÍõ½µ²Áիܕʺ"Ðæëxìø“Q©·endstream endobj 160 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 298 >> stream xœcd`ab`ddöuóññÐÉÌM-Ö ÊÏMÌ «ýfü!ÃôC–ù·Íﵿfÿ2`•eð™ËÌ»àû¡ïG¿äÿ¾O€™‘qãé»Îù•E™é% ¡AášÚÚ:CKKK…¤J˜Œ‚Kjqfzž‚Q–š“_›šWb­à T““™¬žSYQ¬˜’’šÒ–˜“š­à–™“YP_¦ á¬©`d``¨ $Œ¬üJsS‹òu2óÒ2ó2K*óRüsSÓrSRA¸äf–U*˜dæÁuûeæ&•+€=«à—o©à£”š^š“X„)ÃÀÀÀ¨Ä LŒŒ,ìßûø€¨|þ°ùßCæ³­äzÀ½r2σ¹<¼ }nxendstream endobj 161 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2928 >> stream xœ]V TÇíq˜îV“wYÅ=€²ÈòA6²ˆ0:ÈŽ¢q‹[D â↚î¨ ›{T "aP02¢¨ä›¨?¯I çÿ4?çÿsæTwW×}õú½{oˆ2@‰D"÷¹n ÷¹V^JU†2-!&Z?9A° ##ÅØŸý³àO_ÉHJqàå’†{M¡ÇÃÊ/(±H”•Wè–”œ’Ÿ&Ÿ°ÐÒÊjòß3ö3gΔ/ÉþëÜ]™š—(On2”ª¤dµ21m¶Ü¬V©bäqªìäøTytl¬2V ‰V)—Ëç%¨’““2äÝ,åvvöÖdp˜Ÿ ^’ž*ŒNL•+äʸtUtÊÿLR5.1Æ×5)ÖMé·Ô?%.5>-!=(C¢^â8uÚ k¹Ý8{‡)åG¹S6ÔÊŸò ÆRó(;Ê“²§)/*ˆò¦‚)*„šJ-¤|)Wj:5Ÿr£8ÊŒ2§†QF”1%¢L(š2¥¤Ô—ÔxRVÊ€„ûŽº) íý9`ñ€jñpqxŸ¸Í@eð³Ä\â/I–œ–Ü•ôÐèýôuf“ÌeþÍ&³ïŠšä.øjÐàA‰ƒÞÁ)#¡ šã‹LQ«ó3ØÞa.½Ð$sÒïuÅÏà"#mþ£¾éùƒ‹1 þw‡ g:}êÇ;…ª]#dÒ ZÆHØ’Ö*Ln¡N±°~ã6__û}fi\¥¢Ô±x”-6 mvê’Ãh0ik/wóÓéÕÓG»#Ö6¨¾óZÍ˦ËK\wóF½Qq¯{‘i…Z±oÑHt íUcŽÁñ}ÆXÕk,Á\ŸZ+ 1:õÚ¢vÈo‡œvSÔ mÝó»Í¥B›´Óèú¡sYéDz³EÕ7†#mLM+œ:Sûx8ªÉ¼ªüAy&t¿I´’îÆ9È/¦•n<šu,sŸÅ ØÕª™)Y‰’E^ø9ùÉT “k§’ȇÓö.Žâ¿Yžž–žª^‰XRÑ¿*5b8*”qx‚ÃhìŽÝµ£Á&t}„9àíø[ñ¹ÎÜëzgl†8ÙÚ´‚Lohºy}MP± Ð€3éL'T“¾äzÕÜ |„ޝ <¡Ïz„áÙ¼´{tbŒzTuü~…Lš3ï1ƒW Ñ\×ÙØDÿzÈ‚ÙSlç?#0ºóô¥Ì¨wiZ»ÐÓ.BÝâÞIâÁÑVƒ‘%úJìçí7áA–mt¯_ð(ù ‚yèÝÛÃÌZÑB×%ä$ªý|œHãlðÓ£nÕe'䧪öªx5€c«(÷¹XØ k8ôäÛk9çžÏ®¶$ØñÖ„s𜗣`v4]Lè‘6/<Î … È#I—3On<¹­šÍkàvöܺ÷ ±í÷<§nE[·m%t9ô×G…ÝbaXo6‡•x4¶ÁY8 È–jÛŽWÝâÛ^ ‚Á,|ƒy°ÆËd3 ÀáwlˆØ}4¹Ø`Û1`^àõ\lx#8ETáÙÍ¢¦1ì!‚pl§Ï<»s;ÚvHÖÈdå¯Í]‰X爨9¼ƒ‡g“.¼Cïdôt,† í°KŸ¼èƒîrMß@ι|þÐUô…‘³ZñX®íSwÓ‚…ì‚æEÍ¢Ù³C9ퟓí]µŸEi.-oÔ+²T¯È£ j9Ty¾”•6;ttÇÍ\öfuÞÆÜµ(-Y¾È‰•–¿!ª¬Á‰M`¢ça|?.(O;Æ…‘öÜþºÔ˳Ç,vžZâq)”?U¿¢=@WN\}À&3È}ýâŒÄtÕâ쯑%ìN;˜U¸aߦSì4z×ÄV0FôðèÏ_+|€À˜%Á}H`œåËuÕ¸`slì2Å.¨ŸÐõšü}¢ Ö«ðŒ¸mï=oâ!„F_)&ͼ _¨yMÖÍÕ%H9,,l™k¤²àp†lMá·…›ÊÙ)t>Ò°F"†>»ÿ›&êÒ˜£üìCžû£òa×.jxX–”'ûliÀ…8Iq4q‘MŸºƒ¸™ÞËŽ_ŸS$;²rw ZÊ~ò4­¢nÜÜÈ´„Éà$CšŸÖ,LjýD:¿“Tl*ŒÇº{þ¤“Ó!eêûEEyß51k·¬Ù¶ ±qëv—ó€Ÿ` Ù8³|‹D¹;‰`gèŠ;è^µÁôÑÏh½šCõ+LI^ùDÑYB†™P<W1Ò y ‡£wôù{2™¡›È8Þü½µòxc¥LšåJ Äévlìýlh×5΄}Ý$ð–þEpÖ "„b‰5'ꜰ\p"· ÐKúsë÷Ögb˜Lê1½Oý)£&fÁ®UÏ‘‚þŒNWrÒŠ­ë7£M‰« ñp‡éö¬Âœ‹"=6^–š¼N½u!ûŒÞùãÙR b[.&…òé ŠÏÈñÞ€ådo^¾f~Š*y²Ò¬Éüþõ ¦¤ö–lGȱÔZtäßIdžJÚ°2%-AµdUb}”§jêÊO¼(àµ{æŸ(`ûe¤OWa;zÕ’‰4ŽÔõáeBŸd …}jIÿiñùäzÉúŸ¹´RtéeÒÎHŸþ\w§á ÷é'ôwÊb½õÂHæupµ¥Gt†˜LU}Äy ˆ‘ VZù˜ù?÷í€êNâ¾µý,jyÈH+矯Il¶YHÀœ§~Ä2·¯“ýcy(e`<.æºoÏÂ&x°ï,») ž€1ß|¢åû‰ –m‚‚tªZ+†'àÌag-8l-,öap‹.@bÃ@u‡ShH{’þT S¶"ah—XHî åtè™ä)}¡ò‡uˆ}sÛgô8_+çðsO2ø)ôÞ»“ŠWœ~¤zBÄ2æÝ˜r›·xDhÌõþ„Iàý)ôMGñs(÷\"Ñ7‘ò­Ö¥SÈ y(£Qó2æ|Ðå¢âlÀ]é:Xô¼†Úwc©GPòB%Ÿ sëßþ†î¢K±û}váA/d¼–Çú¥(×Fmucµôök;J÷–”\ºr¬±On¸.[¤ˆãmâ‰Ó{mÀvÃÕgº•´@N«i¡ ºæ‘Þ °4ܺ> stream xÚí[Ks9¾ûWè(W¶9$ÀçVÍa³;™Ú9LíÇ­šÌA‘;‰«dË9ãÙüúÈî[MI-[rœ-lu³I€¯/ξ{côDiÚÂäâýD! g½žXã ÁÉÅå/S}^)fú—s5ýýÜâtFW þÿnQŸWèÌôòjE¯wŸ®Þ}¾»Zެνøé»7~D°`™¶œT „—>Ò¼øÈ4èoEí®êù]S²¼IE®"?ðÓº­´¼®ÓÓò}úå6L©)¾žué‘IÌg þJ}är.qñ"5yÏ?¥ õl~ŽÚÇ·-<.ÛŽsù§æåý}f¬Ç@à1+%‚1½¼íWI£èXPŸãû¦wÍÏ8]¾• kf´ç•ÆdW§¯ëš0­™b”ÚͺÂ}ÛÅ»æ¯fñB*»I ÿúŠž_¥âØUj;›ÏëUÖÅeSÜkÕ§Šãœ/"j >tï3 óK˜; ¯êÔd¬Œ™@˜þÀsÚý>Ì©2$F³V@ͧëºeOãö—ëOyÏçËëÛDq—æ9Ó{‘HϯÒÓ—Ëô)ªäï ½œ×"ñJÓq¸Ê8²&Cðd¤ˆ³EÓŸÔÂn²ýÀ‚›µ¢¬;4ô#·¦-çI´w©OM3Ö)[=àj·ÍäÔ-2á‚4ZECÈ “JEžnÇ-•¾Üôf£‘,áT õï«Úß™²6=#ÕnZÿÖÈf607M8’f›Èßߤ’wÄÌNëEWÎBÒ~T禥R}‰½^¦âhŸ«ú.½­Ç´jHD ÝFÀa­2*•A#xAZM’ã1½‚§¡¡‡Ð|+LU0§‚Â;ÝÒø£a#'V‡ž+X!µõ$?¡ULs3`…ÕÊ*?•Fwß*ãÇÂ(*TNHô°z…žhá!¨}=1 tyO^Ø6OÏWt"UdOÛŠ……„QÉ):aÈÕ&O+Tãkdïúyu–x ¤46ç,ÑÂxHòþ[R’ý€'±“@­p’Vüå쇋3Eƒ—5‰ÂL M¦§¹œ_Ÿývö˯rry&'?IAMÍäž^¤P!Àäú @X0¾-XœýûìŸÁª£Xe$_sìÑ35_+¼‹ÝûGa¾å,‘sB»4äûÂŒ9¡tiÆrb–h͘.@ÉJìøžvFYÉ …Áùé÷C=¦ÞP´ì#A©°Çxlc<êÖó *òÃ=Pq€CÌ œCœP-J¥Œš†ô'xÜš-É*§9@MT’‡`‰3ap|¢‰¯TLÇ2"¢ÕIá~lâÁA̹Ï|¾'È"î ²¯!Ÿ ïKÜÈ=*«ž~åxŽ#‚J¹ ¬3{@Ÿ¾_œ{˜s!U]µø™’îzU(EÏl ¡üHE=@ù±¥_dèk‹Á¡fô2”‹:GÝTÚísšÃðÐèqv¤t'º@]9Ir¦¸§‰kÖÄd«b—Ÿ¿nrBú×Âö…ë8*\÷ãÂu-)”¸#Æë9Íí»–š%íÿvÍ󆹅ÔÜfH’#ᜠFu–ÛZN/F¬.3n( ­Á¸ÓB’߆HY¨½½S϶XIÌ–< Ûm6ò¯Ö(DOŠÎAå$ò ÷Z`N2Z /Y ÄJ‰~û†y$[³ôƒià‹å‡¡5Ájõ§| Fª3w ¨!z›®áF4IÞ…Ufà—ûº`7‡0i¦+² 4DÉ*•FO#­÷vJXîìWÍL·Cæ7>ùO©_bá¬é9,–—cUmÉG¸Q+•Ê¥«z¾˜†&Α<8Óì2õS-UF}…#ëë úŸv{gc Xiù‰Z6­6K0¯ã¦ûÍòóbqõ¸MÒA蛇Z# º“ Bôi£Rom)äÀ´†ÿø•›–d•Ó,XbÇ«_¶Z·Cµ;Ÿ8©¥¹‡ï`m¨:g~wàÛC^O¨”n²Æ øÿôñ5™qGp™GK€úa(J ¥O‹¿Ý7®¬“¡¼ô¢ÈY/ílÙîY|9DÛûF=ç=Jp„UG1†0·î´$«œfÁZÎÖ G„¾ænÒó]2¡–Þ ô<~e(’/û´Á¡¦‰'ñìrÔ(•@œ@óñ%ÊDÇlzIpëî~G²Êixל‘ÔÜ¢j:Ø[aÂA5ÝÓ¢%a·*öb˜ç‰ËͲh 1¤9“`N·,@±–1¨^6¬ hu[`h„Tð´*æ©<ßhFyK¶L1÷¾$5£¸}O8ÐæÕøq‘±:þ*ô‘%|$/uŠyŸTVìÇC(™“ºÿØž¤.Ÿ˜Î+÷ènE¿¤~‰ƒ rwLJ¯TÜ œöÖáCK²ÊiC[Mñ11 §õ þ)– ˜Èô=ƒU–=ƒdÀN'D~^&|ÿs"H›]HR*ä ‰HCCÔ–ÄGµCxsîu¼@@Qº {w´Öw´Ãü¶W߸[’ßYˆ\øžŒî_ràfÛnˆ0ÏA& ¢5µ7#®ÚÛ«îÒ¡•nîgý\ßßucû×ì¶á¼Š|çÆšb”¾ÌR è6n9¸¾Ïr®o³Å4~Ãf+’êV¨ew±1t½‹âO¾¾¤æ¤ ÔžŽÙRÇz6{ÖWQ-ËÞÃ…á×/‡ªx?"çÅ7úˆ×KóôaŒ&¡ÊÀ²çS²Þ<:Œi V9ÅA³fKø­šk8§Yß{ bº•"64äÅWÊLl'Χ‹b†ÙõÄp ±ç!{K>ÀMlÈÔܨìÖn‰µ[’UNsx|©ãÌ©œ…‡g·xØã>úÖ-4ßfÙyŠÌ4æÀÒ%JäåÞª#W¬šÀQ¶Jñ?·ýEendstream endobj 163 0 obj << /Filter /FlateDecode /Length 2763 >> stream xÚí[Ûr7}×W°ö‰¬d4îØ*çÁÙÍ®“Tj“ÕÃVÙ~ -Z–-‰²hDZ¿~OÏ`H #u±”Ý”Ë6©Á4¾œ>€|ó½5"­U“ÃWÒZxÌÄY/”¶zrxôtj„™UDÊNšÑtŽ¿§üïËÅìùáß|&QD§K°"zï&A6Ô¯ÿ C¿áŸ~û<{ÑO†ëPO­‚0D-¢bQx‘xÔÁßh"ñ‡&ÊFá±gHhüäåÙÁ»ƒ§Ïåäè@N~8BÇ`'ñE ŠQMΔڵ?8=ø÷Á/k‰ÕZd•Ë|̦¡lYÙÌ*KU+ø¯f$óu0!ä ¯Lc͸81"Ò¼ÓfRAZt™O’´lZ'$QĶ-·æÌ‡‘ä¬î…ñ*Ÿ²£š¤æ$a¢19M& “zS˜SaÞˆ<àSa6#B`»î˜Í «¥Ïg{&¥,L˜>5¾*zž‰1=&m’¡©³´€]Lc~/( ƒçKJ‡m Å®Î–Š¦Ìæ{ÓT)l ¼'NaOÎS°¼oƒætVi­¦¿Í¬ã/4ýÀ15«œ—qz¶8ÚWÁ!-¢B™î¾ìë ⣻¨ÒFtõ©´ʨÍ>ÔváZ"þo³ žþ:_²Þíç3°ì‚‹#¡©“ØÏEs›½cë°ÁÁtVùhV)¦+žÿ˜-?´l6Xÿ°<ÞŽðw½ËßÃ(Ûþ¾Ù[%¥pž|wwgAO¼¿glÛóÌ‘ùó’í>=jYk ]µþ~6çO‹ªD߯ƒ.Ù]zÒ}·Hù(™llpSÃ~^6Kùk#ÆÙLŽRÇT2Ú3E²?#¢V£æ`?Utá^é š}­·¹ÝJ oàϯӎcÐÇ<`QÞë&ðñ舟Qw•4ý?¦ûL’IÕŽž—'X-ÛdÙºÖò¼yá}ãÝwêm4)Â×Ùbd½®Å»$fžÉéUkˆ •år ;Œ…ñJˆ`DPtÒÙo-$‰Í '}pe౪_·åþu÷9W2Œ,«£¯Rç™T¦˜yóÚlzÖW»à¢Û ²DÒ±ÄZ-ØŒ#° åÜl%V™È>$ä]©=æU"X˜äΠ`fJ K–0:¨ Råê£hžÈ]¢Þ˽ yž1t+°g·\µÍ´ˆ0«TYª‘@#" [‚U¿•ñd 7#4_pië8eЬSÒP±Th_{ù!!‘Ë „Д“ǧYe¬®OS±8—k§)¹ÕOë¢u‘ÞXœµêgYJâ׆ríä¼ú'uî;ÚL²Z}Hé”eÍ›ŒÕ®¥S}ëyæ—)O¶øKPH¼¯›uâ å ò3ž`…²]?k¾¬Ò[ªóÀ£69ÎÓØ¿¾.l™’„îrÚE¯Ò+—éû‹íÜñÿ’Œ]w£Ùç—õBøyoˉÐL¤4«hFtàJsÇ)[èMM—6uÀ¿°NóZ ÍNN¹Òx;½\_.Vi'muȧ—uSÜV”ï–k/+-·®kJ/xЧæóãËÌ9ø?f//Žk×»lž$×_-´zŠ/öy Ô¬Û€,A X‘Œ¸ÅfÝ‘O pq“sóf½Yå2ûz33r+7…wW‰ ͺCàB[ûJ*µª¯Ò†”Ýû2 ·4“º¯§áep–„b>'ã•%¼L: Šfз¯ . ÑÎiÕï?Ô6¦Øê·» yǸ¯¿JŸ¬ÛîšUyl€â×`Av¶5"øÔØà‹÷|!)x7óOAïáÜû¥”Ñ:õØ#×z%•ØÊh™[ˆ58RJ^‹®|÷¡AŒ~NNÛ¾{«¸ p^i¬5Ün[7™ —fùhú¹ S¡¢òŒ";Äš0T”È+ É@Å›Ò&û$c—Z‘•ÒC"J ³¥ô˜™¿Ò-±e8[w;m¦¯WKéJÝ¿mHÞBŠ`âݹ{<6H[N°‚*WýË—ÓA+=)y<øgRò3ÿsytõý8w]ÛBòygWn ¼ð&´™M•8jgùúœ×­^'»9—ƒÔâŠèß(¿$}ôà®”Õ’ßž.‘4È ™@ÄÒà_‹Ûl݈ž»¿€G%§aß"}ƒV9kˆLNºdS{>òÇ¡J<Üì° ï xÃ4Hâ?…I­Ð±RHëÕà•O¬ÀK´}|ÉÀ·g %Zô“è1´Ù+Ó”¤/ßqš!‚ö`!Þñ¬uÁBlݱRv§oŒœ±pŸ´¶Àå;¸Âŀ燈qŽR÷:t áFœBè;;…ÈQ;ú#a=Æ)ãjºƒÚ#'@¾È\Dík‘U.³‡Ú³™ óœn¿£Ú·EÓ:E×8Oy[Œ 4'ò{£éë+ºh[Ïs¼Ì¨nrÖB—ÍGùuÚÈNë/#S)¯Ñª¿@,Nµ„tºº."&ÞÖ÷dàÉ*Ùà§E ¡÷´Ür6ò.J¡$$f Çó½E7Eòt;¨F@M¾Dí­[„6ðïŸxß[]6²¾ |á¥Òûeã7ÅÓí`ƒÊ¯–ÜF}[’aù7}ÆË(æ:çu—A…—1)ê‡õ~¼:_äÜg¿,¿…¸âÆ#®]ú±_p™/víc¿;W_ôŽB©ì ¡ÍYÝQçl¿uW¸¢‚ÁŽhnô¨[Éd¬0ܺ Q®If• íS®š8o¢¯qÜàìyh0[è1Ù‚þ ‹†¶î),h(Wä!Õ»+…ž Äf^½GY?.ÚÕ[ %œ¶az‘ÙèûÌsŠ;Ȧýù”n{ä¡t´V︸kÛk»îÆõNß" (¡ÑjöÇ#˜Éá‹&CpÓŽ„›zÜôׄ›[H’øp†a§¶WÞŽÃSÿþŸE9j‰Ö,ÅSh endstream endobj 164 0 obj << /Filter /FlateDecode /Length1 751 /Length2 1072 /Length3 532 /Length 1617 >> stream xÚí’kTeÇ‘r‘Ð/ø"¬ö,ä…ÛšÈ"¸*(¬:̼ËìάÃÂî"·ƒ’ hr3qQ*JHš‚B!ÈJ1ˆ“ˆH‘4`Oú±>ušù2Ïóþßÿó›ÿûÚX <12òIBáÀeqÝ·@°Î‹ËtÁá0ll¼)ˆ(p’ðAÐpÝÜ\€_Œ8:Ž«;ÏÙ}J¼I¹šÂ#$ `çÍœ¹O¤p!€QH Œö@)’(jð”JÁÆ©Ñ`#Œ†T,ÄX .`8ªá0'ì)¨u„˜®/ÚXŒüåR,¤¢i(`7É4$FR5À ˜Á éifù7°^7çÇH¥ˆlÊþETo.Uÿ%!eò¤€€Ä E¼. †/è¼HésÖ))ŽzR¸Î,Žó‹>ÍÇU Ĩˆi4œîC{ƒŽoš‚Í÷ñ ÚÌ·y´Ó«N(6©åp^ɧk‰ÂU ”Ãâp¸´~_~‰^›æK $†À‘çŠBÔ úÑìæœÀ  @Ìf¤‚Þè`€˜¤SçêèØ”„œj2Þü //RµÛËŽ<ÚÕ‘ë\yœ„¿ ÑŠ‚„bú¦Ða¼¬Å8„*ˆ2ºï¨ÇÞÈÜ –&úžºyZ½(¿dosujE¾kð“ÛTUÛ&hùØ8Õ8?37gh}óî[\ðŒoØ-d§v7Ícž8*4Föë6jêkçÈ üÔ½ä¼Ê~ø[ëÑ{±Õzçý¸®'-†Òf–vÞùc$1*©å~·afÝ—Ãi¾ôφ[úïeÞøíçµä)²»¨Ñ,ìè¨éón‹ÚïoE}g”ï–®è¬ß#sîϰyÚ½ŸßØÓwԾͯ-ù¤Lµ•å](Ü]wE9dyKöÁmçº`ÍLÛ<}wn-*è7Hdœ>§]ÊRfætõN2LÊ‚‚Ÿî\9´ e$Ètë<'c³Ìäës†keÍαÓ~fÕ¶aQßê~þ¥sÙ)ƒ3?^¶f8íbgku,þ¼Ðžz&¡:ºbüÌO„[Sruv69ϰÞ)|mŠºŽè¢=ëP¥¾3}tćä dpÌÎÿ“{{«Ž%è4û»`ã±qÖTLkoÄ»>%;,qY©~E½D~·?)ÔÏ WUeÕKGÃf­4oéY¸ãò¢È´Ïé U·_»–¾ U^Z.IUß{{¼¨mÖë½Çm—Í%rM®Pn߉a-¢Ú°âœâýÆ[úV=ü_“MÏÖ7hóBK=&(}›µ¡³²“M2*­‹¶5EÆ-Ÿ‘UäÒ—É´ìþFYiÛtÓ•¥WÓdÎ|Þ.vПç·ã·ðŒ¸ …¡fš‰ÖJøÇÕM¶ ŒjŽÙW‘1cl8Wksr?õ¬líÆOÅ÷Uµå`pÛ:þb%j©½5tWG^°kr—®ú59õÑ£Ë3,5¼¯«×‹ wîAF˜•*ÁÙÍÁź½µ33 öÞÍûÈðÌá€}-’°„¢°í†š±æ&Ü*åûL?0×§òw][ÍœvS}¾Ÿ»Ã8ªùB|H‘Ï9yTGÓ›ð^n~ÖÏ5N™æAK8 ÏÚéT-)Wô•|ôžUÙ×õžÏ’Í„uUAžLKæ‡hMâG£–¾µò¶[§¬°ûb‹¯Í…a§_TZ<.°îðâ‰N)ZÐzä÷CIwJÆšÊN?Œd§Íþ Sÿý“çOu˜µÚì7ç©§¼ƒäê_ù4gTâfdÇ«Býy“Ãot!§?EÚÂ.ð°ÚÚ!Úž‚Ü(UkZŽ$©Ó/7‰ÙUUõõÜ0¦Ýô®6ÏDsbr5L»'ˆ$41ûÉk!Å}¢ñ1õê‹+ëòå¢]yªpÃIXlp#]œspÒƒ¹pr$éÇã3œùó÷M„Ö¦ݼ^åðÁúãÌ&¢ÊO·žZ’­É_ú´9kŒôãüÇñ¿Á•B„R2„Šbü 8ögendstream endobj 165 0 obj << /Filter /FlateDecode /Length 3501 >> stream xÚÝ[[s·~ׯà#95ààžN:Ó´M›L’ºŽÞ’<ÐÔŠV,ŠŠ(Yv~}ÏÁ%Ûu&}ÈÝÅçòøåéÉg_¹‰gÞ€™œžO„ÒÌkp£-©åäôìÇ©b³¹ò~úíp;ûùô›Ï¾’¢zGr¦µÆþ+Žh&Nº4àålf:ÌæRÊé"Žõ| ˜“6F¨j„eV¨¼àOœCg–ô­™¤&Ê1'¬MÏ׋ۙ˜Þàßþ½%Ú`zOßéþ«x}–/V›+ú¸Œ·‡K¼ÖÃÕ \¾¥Å&s«€Ò“9îGI­±šÓ»ø)"O ·oßc…½ý ¡™TÚ·b|b˜·Èü°ep Bz¼Šô¬×qY¦¸²“jØi¶zJ]…²Æ<ïLXceØ}‡0Ë„ÕvÑ™M2)Œ¯éú‰ Ó™O1¨@šÏÕó]ÇQ¶šK3.@ㇰz'Âñš’ !eÖ§ñ<–qãMÑYÿxŽ92U8ðÄ/´Â†ðš«š)­p‰†¢MšøX”h̨c8¯`\»8Rt JÐzÅ ø1ƒúƒ0J2K¢Ýc”3jJ1§*¡† hÌõ%þõ-vƒ(#”}ñ"†â¢î‰Æ{Õ1^Í@{ß3^5²^1–x#pŽøôîkáþÿËÂ{š Ì¢“ú˜„_çQ̃òGwŒKºGfÄÑÒæÂ ÷Kdé>|?Ü“ó˜¿X\׊7‚©ÜáÅuˆÈ–‚Kµ­ÊáVªB°„ކ£7Èœœõ,›£˜­·E§žˆo#òåãÈ'“U8{d*ŠßZ†‡Ú!ëg.¤eš2‚¤UúÑòyÑuÚ(ýtù„mÙIº{¥¯‹ß7awa³çqkETÕàõo_ÇÇÛ`Ïz¢Óµª$²-î<*ÀîùßzÁ$;ÍUOû‚ŠÛî¢e,rIB>4»ClýjæTÀEèàbì5 g;üÃt»â8'Üʆ°J‰Ñ:&G<çHc&#mà2ýg…3ÄT*ƃ”%½‹t„ë¾ë]0œÇÏJ ôþCa%=_ÐÜ«MÆùüö:–igŠl€rd¯Sh{“ãÏWQÂ8©òâ‚æõ²L6”ÑY V4@ç Š¾ ”0(óxïû4.PaòÉ—‰=CB_7Ûm¤)°?ËeOËwÿã–DØÒ6‡ÁŠY×\¤Ë«¬ë·t­R™.Jj³M[­6Hïô9@|§î/á—ñ:P°¾Þ ›n.ƒ\ânÓÊÙWƒ‘ûD^WUº8D&"Wâx+ÛbªÁ2M±0q m¡ù¤`æ€q5&ÒÄ=O±e´, ¼¨˜¾¸¬lDZTî›ô”AºÆ&ð²$P‹xý’/¿{0¥§»@UÓj;ý;q³Öìjµ‹A„g‰›ËaDV0¶¼ÑF ËMPŠ»´¡F8"lB³Eºlâ6¼Þ†š ~9OÛÛ¬{hŠ ªò%Ëú÷X}†y‚[¹ê%æÞú䟣§‡ñ:M2§j>AG‡|Ƀ„Ný^Ѩç{$-n»bXn¶·­œq|œÕeíSŠtxˆ@]« Ex)Ýœ£[+ÚŽÕ‚nÎÇ ´j¬¥˜þõ|‡áýzˆ4ëÊ]_Æ9™Â§˜¦©ôþ3ºãö_¯ ŽÖÉGzV†&ÊwÚÞÁé—ŒfHÊë:Ok3—‹°õLd;XíÇ ››ÂT9jZaw¨×0Ï©|”f¤°£¾Ò„î÷ùŒæ¹ØFìâ“}m Á¶fN¦, cÈbYd§ƒI/:h›‹†YŽš ™¨ªÔ'ÿ8=¡l‹OÄŒc\I9‘†Hëäd¹>ùõäÇŸùäì„O¾9Á»ÞéÉ=^pL5öê-$þÚ£uÓ/èÃN/7«±õIÁŒC` ß-åuè JX1‰UÇ{#âã7Gžby5–JAžDØw4ý#°Tâ̬¥eÊy=çK«• îåïÖXz ®¨N.Šk3FotRô1ô¦n¢Gõq8Êõ‘½– j“—´SÔ9çxl÷ü3gs¹<›ˆÇLè·û’âý«’â`çð‰Ø¯Úúä¶…›?’¾§#Ù¡¨ðC_Ôê¹° ¸8ætæäSÊIö¹‹\ ÀyÐ0ÇÀÔ,! Êüng6Þ´€ÔX ÃÀêÊ=´,ÞĽˆÿòŒózÊqü·[u?÷cÿ7€ß&ˆY7öøaÚ¯?]ñî;‡»çZáŠG&¾Ú¥ˆÏ çBÁa»Í%€%… *zшRÉÊÅY.ÑRÚ™ª˜Ë‡Pm`AÙß° •žœå6»”ûUÌâÆy®Çü0繩J–ªVëô¼íæiŸ(>%}›Ý–^Ûm¬–ô‚¤TjN äXÔÖ0Á¥ÞkÈíõè~³ï&ÌjjúWSnlz&µ…–.û Yê.ª˜Ö”ÑT©u­dT“ªÅ™Ž©QÆü¦* æÔ9 ]Æ‚`¸s[¹Óý“~ã’žÀ¬O [Õô.SÓd‹ìgßÃyäIJ¿óiKW§ß ÚÔÍ}H‡ñ:<ôË¥« )Wå¸. ·úYò²ª¶åB›â±’Zê³í‘¥0½Äÿz¯ÖÏÖâd†4>rsárCºoÞRmyùnØ¥fQ)¦~;Ó(åu _Ãôlñ]¶åÛÚyfé(A|5ØV2K73lk—½îuÓñ«ý9”OæJÛ˜éf¦²Æ6ŽKšx9ƒXy^ÅÛU¯Ù›PÔyƒJ§nhS›PÛv” 㙦É}® ãwÐgq‘ø¿÷b»Éî äëDl{°Õ%¡³l!þ¨ŒÃá-Êþpög à,¢yŒ½òUQK\¨Òª)Yãåv±ßôt €w©òŒ*ÉÃлÐ?;£ YÚyʛ̊«ZKQ~˜Ï‘ü:š¦\¬?ñÔ–ÆwÎóYˆup ÅLÐ _&Ĭ4CªB¢DzG¢ÒìÆ•FÏesüb׺ m ¿Ç‹`XP7M’»ÛDTO3‹9$FÝÄ‹Üê Ùq—1=Œj´¥™NfÀ;/GIŠÅÇ¡±½µ,çHºýRKkJ·ªŽ`èaµ¸³kÇöÎn`HÞ¼('Æ’W1q(¤¾ÚìØ?ìÔF¿Éfý:ìã,WM'Á—ï)zi‚·=ï‚k¿‹ksoðkèÅD‰FÑ_Æ‘A\‹Õ@%fDÇMdox´ÚÄA&£;øÃðBb8[:¨ðÛœ+¤Ïßúî„Ú#²DÍ­XZŒäF”<„ÙJ3cKC=ø˜‹JÇoßuìãsÎtúw9ÎÛkêQá¶Ì¹ÍBcô þ†``ú·|RA< _Ä©j ÷‰[P¤þÎ/g Ê¡·½Îñ²=×%ºgeBo¨…˜/Œ™~N6~”«ô¬³S-†œj¦n _AsŒ ßë d&?Ÿ +†jf¿‡^ÅÓÝßxH«àÉÙº8–±<ŽÙîA^¬ú%vçö8*$>ù¾à“îëóž2v6KÝJYÔ=¼Û‡Š÷í>F‡i·)öËHƒYê߉£0¦Ï™QÍW#H€Qœ*¿TÚœw‹sLÐ艹´½nqN)ïÆG={禜(-àCgž2¿3î_#§zº]—¡_*ï=3Æ[»$Q@¢—¬Ü$¤cë)B8/G4Êy–”Õ•_•5Û0–yQôây79å¾ßnùþÉÝòktk9Cãèî8ž– î5ÁHá„Õ;øð&Ù¸.û—Ø¿èÍI¿ â\÷?ºýØMYŽ¥¨½±r@aT$gcCW@®ÆýoëûTendstream endobj 166 0 obj << /Filter /FlateDecode /Length 3090 >> stream xÚíZms·þ®_Áäć‹w{ÜIâÖf&mªh¦&ù@Ë”LYSŠìüúîâå8ñT«©“©ghyÀX,žgw±_}þRЉgÞ€™œœMœaLŒ¶ ¤–““×ßO_ÌÄtŸ+üœÒÃ;ü¼ÆÏ<ü²x:ûñäëÏ_ºBŽf”q¼œ9:ÍOoðÿ%É»šuRÃt}ÿž®£ôB4¸é ½”Ӹ橛Átñ„~v©5}Aî%~>$±YÎ"¼¦'"âËk’¿L]7lÖi®â±[hAÒÞPÇ8þæzÖÁQ¨Óü2Íx¾YÐÂqÌk-ÂbÊ$L_¥ÓºæïÒÔðwšåüUHÏ›'ñ÷<&õÙÐO²|\ü„Ï·éõÏ3mh8Á^MIÂ:6/€_¿ Z˜Ç/·iRI=Õà©ýñ ìôoøýEüþâ6iëgnÁ↗†c-R*T0ŵªû›Ê‰aÞJGm…bÀAN:46c£Öޞņ¶hç˜0Ò'*dñ¢dÇMm–Ï.ZRœSz+E4f¤¨ÔÃh&³©Á· † ¥rƒ‹†@ F¤Å*fðßû>6ðØÓ;œK¡…u“Ž3ïM<»Ëý‘PŠÆ¶m ¥ð!¥ü¡1eÁ™óFÌY=hέ9[~`ÒrHEço©ÅÑŸNŽHñ|"& 9ã‚£hPŒ+­&§«£ŸŽ¾ÿ‘O^ñÉ×GœIïôä¿p&¼‡ÉêÈy Owúáò軣¿÷»^dWÊüŠ µš8Ë”õbb„cRà Áî*C)Nø±Æ–%”§Q³ÜIÔ†’ZWrTÑ 7P+›7ÐìÃ8O¯žt½E?ÛïŒP€,`u$$ dBK>ë¼ñnºî1‡€å.`]F–Š“PŽaR) |àÈN$æl|Ü i]Tž×7ølpUÐH× Ï.éN Ósƒ@joãwÂrú[À#(ÛÃôyømCo»¢+AäëØ1ÇzC­ozô¦ôÞôd$:o 뉲ÐËùfCˆ¼Âaö7¿ç£¨¨M ˜Ø ¦¼R%PT[LÚV0)ZéËfü*ÕŽ .=Ï7æμ¶ÅØì2ˆÆth¤³ÝP ðALŸîS<Lj"d•Ö)‘šKh€22•Æ¥w’MIìaŠtØ —Šžp7 SŒfÂ!`JÙ•2¦”ÓÛŽ¬4³ZÄ ®–W-ÒÅ%pnî# uÚÐê­Ø¢=¨†aáÙ“‘iÙ̉žHÇi­7”K¶Z¤ÃÑIy0Q>o©q»DÏ[H©‘?¸(ÑØ·@›iÙ¯j¹oÞ†L%oàÓY§”j<‰^I·¯c1ˆÞíÃ_ 7JÊÕ³Y'Œm"éÀ´ ÇÝ‹8Ï q\\(öFfsjR,8ùJÁîÍâ–“º'‹/¿mqЦuê]N‘Èsr•#¤ 8_Ä߯ûÐ$L#ÅQW‡4‘BJvÚ4aˆ{áB'ƒ;Qùƒ½»/˜ÆóW:#¦8’Ñ”È@8S†» † h(‰gÄž]–ǽußïÖ²!ýmýxW d7V× ä«1Ð$+ ©c<΀á ÊÙ™j;E›!Ø ! r˜4÷‡Pî1Aý¢)Äxo“þcPW$Æ zËM÷LCX"L ¯ãÂ*ç¡»+&ˆU8ô6F88¢¥÷CÞA–Ù•B‡¨o, Ç2ˆ™Й’eÒn'ž4h.E£"EU¹/ÌY%‡Ïž¦\vÖÏgh’SQ䟪à’S¸fkø{W¬€­9 láÞYgƆGàs´mÎÈ»Bd¤Œröý¸Ü3!ÁþÎlK´-õ¸¶¥Ø–øÛ’UšT¤š” hed‘ù¤F,ë(Q–ÒËf¾F R0"¹ã­$pÍO O3ŽGyw=/Z qhâÐBÐKCÇR$ýòý½Š QáÁ¥´ h.$oVŽMWâQ¢¬§Õ·ô®5+°tÄâ¬ÔýrëäA¢(³2àÓ&ô×=éª&(°¾ËÊÌp[¬Ï ¶–ñPë×ÞA÷(®hõÅñZqÖ«¤ht—ãÕ7-3¢ô²2#ÎÃî~ÙíÚµ"UH˜ÂЉôA¿èu«‡X7<Þ¼žîìdÓ•ÄH»púy±C£Äå™aÑ5”ÈW gHÁã2EÄ'9goäˆL ‘³*ÔáPV…Ú¤$}Îò‡ìÊ«íM4I¼#1‹t[°uó}4˜ž‡›ˆÛ$~Eçl?¾é—wùÒz+挺o—Ƙg˜H* ‘€ª9ßQÕ"¯fµ $'¸ðÑ= ·ë¸ñ]q±ÑÚ~Ç@¸ú*ª¦]d,ßó_\J=ßú¼¤zü›VÓ!¶D!ŒN¥µEòíøx ÀE˜<Ò<^pï £»7°™¯zŸ¿!ƒ ™=þ'QA|Ãtïò%Ð))ÿMR Õ„§¸í[ .²åÌO)š‘†i ¬¬عˆ_çÉõËòòþ}cG >êÝÄU¹.±X¯€W´©0aµ¼rb x¯Ûé5v=ϋĚÙÑ5åì+sÌçSÂ6Y&(C©EH¯æz‡Ûm‡Ò^ªÙ:O!°¬o:jñzÿÒz0"x߬0^úC­d’L²h”ŠPÒz7ñ1¬,žaúïÃöÍ/¯ÖŒâ}1ý•¨¤ÕWþ@;&`(Ô°ežãÒm¹ŽÓï= œ.»´]@i2*Ñ(·«t=tôQ´šž®77±"æ,mPHïú„ó²Þ,b§¡iéX³½–­ªzæ×ÕÕ-6yŸ‘, 9ÁIß)ß¹g°X~$w{üå¸UÙ³ZÌ7¹&Ïy*½IþZjô¢@ÏÂw t;Á`Ëitoq(*Éä-(§Á9ïoÝë<„wFâ/Š<êðφë@X.7Ã Ç®Ïø¸y¨€ÆÕ5 5O`è (ü’–Ë­&ö%YŒoõN.题hœ“Ç £ϸ¶®*d(q;`ô»PéŽg¸§Ó†¢€Yc··æ¼ irÈñÄ¿šÑ·ãÜ77egJœîÑŠf ú(]Ô®© ãñtríLvPyë•Û·1Ìþ¬åÏ ué~ìE3ÿl¤£|2îŽÎ:k¸ïU6|Œ¡|²5aƒF¹]øp&æ×[Õ誱biUòÙ’‚•Î%!Ð: –ò!²¶¨fbÄÒ奒$]7;QubDÉ>Õø›8¬šYéÕŽƒï+?¼§ŠW|µÎ>ÜM~ãú¯ËÄõÑŒ® zª}N"’¨¸¾LÊ'7<„øe¨ì%þÊÞŒ\ÍÌ›8Ëf6C0ª(Ó÷]Î(Þ»‹eèQÉ!G‹+vb²&+·‘ù}¸q¹u>.cQop÷CÉw Ë»‘*Ѝ.ö›•ÛùjÜOîG¸ *uneö<]ÏIJ¢‡l9ì¦jþr±$rA=¼¨¯…»D¦é£=F²ÿ¢e­NÁ{† Ù˜W¡×Œ“PŒ„(óâǤèñxã®’f‘])3æèë$}ZãQÏa_Ì:­›…y‚YfDÀ£þ[Oˆ)”dÚ)ôÇ„G MâÍÂRˬ(êø8oYEº€ø„£dídi€{¥‚2\— • ºšçrT5€!BzÀjz‘])3XM5¹~d)˜¦j´Qžßñ„ø®'äpϹ´ðsaÔÆ|º.L«Øšp8[c`@…)p¾Äÿú|í^Glã‚mÁîN =4¢¸wĦLi­C é(ÌÇúߦèèendstream endobj 167 0 obj << /Filter /FlateDecode /Length1 758 /Length2 1158 /Length3 532 /Length 1719 >> stream xÚíRkTSW…ªi+¢ ‡—B„@t BPÂ[*%$76¹—„$^•yXlAÁb˜R*òX.«´!jXbdäéÊ# £…´Â ¢ UzÁqf ý9ýÕ5÷ÜgïoŸoçÛÇÞúp¸ ……$BT»°/àK£“‹Çû`ìí}ECÌC`?†òOOW@…Ñ ú{‘H^xWŒ=ðE’d"‡+»}WŠÈ€"€D<&4†˜ P&ƒÂ&˰€Âçúʉd@‡’!‘ba1`ñ˜bqx0·¢(f#€ü&Í’$½…¤(v¯Êt¨Hóe€±1¸Pí¡Z~YkÉ©>?”!X¡G}ú Êðø²ሠI"†D€†° ¼¶4z#±xÁZ4HÌàó˜˜Ã‡þMŠ—Lå¥B¬Ã<1“ Ø ~2´š‡`ÖZ¨q«pQ~ªïžÕ‰®B‡X0ùb SmE¤?tÚJ×þiºö›°â¨MŠqø”#¯‰F1Y ¦p!ínQÂfk¼™C®Í¨M¡iÃè:ÚÃ[ó®aQéFãåýÞ÷ ±™Aº³m+=³:íÓÊ+Ƶ/ò*ÈÐËû"‰3_ }ž\ÀÅJ®f¹é™mI8{IÑÕEn ³?ëgz:Xµ_$õ4ܸ-º¬E4ºhôÕú1ÓøŽáMå6 Õg¯GóÉæà g"F¿GžT'B­ ú!u*Û.cøZåþ®ÀéÇó¹þQœ±WšCýÁÍõ½F·Â×/ÊÝ˾úܳ£DmâlFyy¶·I+ /¨4¥[Î9ž¶<š9kñÐ$ðU [uÌW¢W²¤Ã>Ý!ïóGµBRéu“ëç¶Ò˜Å¥ÿœj~4+Ô~±ßØ#"®£àTas _–ñ-³ñÖã9S¸ÈY’¼ÇÐ[úc³åâW©ßù wLÜ~YYÁÙ%Pþµ±±³@44Ô?_¥"z´TÄß'—ûö·±üzÓ†ôO.,ëMÕ}2XÍÍÓé)k®}¾¹tóX€cðžÏ¸¾j'—¼áóûʘ“¾'%âƒú'Êó÷q†5ØñÌ[–ì~·qؽ vvØtÖ|Š4³.qâ™Ûüµ­ÞÜtÎ2Ñ#_†ÿ?Ìÿ þL>ĉCô1æWî˜Åendstream endobj 168 0 obj << /Filter /FlateDecode /Length1 766 /Length2 737 /Length3 532 /Length 1292 >> stream xÚíRkPW…¶ŽŸ€ˆh±b $Ù<ˆ`I¢´Q^%)RgÖì ,nva³ÁDÄ2 FP R+:¨à8JÁT`P­ÆÁŠ–’bÔB‡ˆÖŠ´ÔVº@§ø³ýÕqïŸý¾sî¹çžïøÆ©CäµFR$‚ð‘p ŒV*_È PÒepŠ\20 aaR"BY¸ GB9@Ie˜h<5‹•¼1’ ÈuÆ5( ¢Q& êX J5¥Á!câ9A€ø±zõ΂Ÿƒ Ã5 XSq’#3¤"µM´1CÆK( ÒzÖXÌšäÖ"F‘„ `PËÄPìYuò_˜š,i ˆT7&?Òk0ªÃ ÓßJ—a` ¢) Òädê8á-b¸A7U1(käd*A"á ÅÒ ×GâFˆÅáŒ& hQBÇûÄ&[aÓ7"ˆVÅDÉWOLu‹Cq’I0e@ |E¯‘W5›ÁZ!_(DX"»^þ}6鬕¤†Âp2ˆ¤¡¥iÔIJR"©d#'1hÐÈðIŠa·6™ ¥hÎØPEB  ^3 ì8Xh¼‹C`=J7^¿›BA³E “H"‘,²0$ç<¦!ÉŒ?6 —µgC…Ð5k¥YšŸ¾·~Ûñ-+«nTO¸ŽXj¬S[’ÎmÙc9jiÅǾ©Ì‘û‡{Ïî¼èµáæ¼Uš¥eôï☢uŽJš³æCG“vçJ/ð»Õ^¦<ó—3^ÞW]ÌLüêÀÖÞáè¾òíþï[þŸ[¦W`÷ÉUçŸÝîšæ_ä7_f=6Õæ<×Ëd³wþ±À×5¯bmüÆáøº…£õ‡‰Áå  5ÙÎ}tgzÛ '¯X²bÖÈ‹N‘#"´Ö/Ñ-ÿÛÅ2?ÛïôÖþÀYH¦.cUËÝi‚ôúÌªú£xÏõ—{WyS_ЇN&÷¥g—,ªé²oçL‹ße»·ªlÖå\{Ê=lÀÝMuí\eÓì4?¬8¿ªãùPÙ¯ÄjÇÛM‡[Ÿþøðf¿¢æÒ¶~׌eYîþœ}‘;|+ÓÅíÙÓ9¡_œqêÛV ˆš<ìOÓ_–ùôpƒëœÍÁ)ÇT{U7•ݨHÚÝ>ú0¹eNYw¹ºãjmòÿÎ)3Õµ3{Ú˜öŽ^Þ¦Ù¾CgêN•>vêh-<«ñÙÆéÍD-CºŸ÷{úo ÒÕ¹ü»—J÷å¶T™œ–”>êZWÔzçdvŒ¾©yð |ÀÑ7«®¼¹a7ÿägŠÄ*‹ÍöéÏ"6Ýxâ-Ú¶ÜÝZÚxÞRéçH4 S9 7}Ònýž7rntˆ0…N%Îé*ø¡‘gùU"£K*]:ËwLi}/”¨ièαU5* pÅþ=6uAlTJÝ!ÁÒ¯ó¸±WÜ´ˆçïuËBKN(no6ǺÎ/v:?/‰n_—U&¬Þ®õô›·Ó§Ö£¨a‘^•Q¯á¶ý’°‹·/ÀýZ¹¸oÇ2~CÄ73KìAÓGö˜…¶¹§#ÚvîoÆ\b®'u=–¯-ð.´ü\ô¡hÇ;‚÷VnoÞo¬nr²©‘~ùåËéïŽ>Î~êì±ÒÓbN?q;äŠyÍæÀȃڄÓÁÅJ/wá¿ü8oþ¢4CéPzç/*cÎAendstream endobj 169 0 obj << /Filter /FlateDecode /Length 2561 >> stream xÚíZms·þ®_qÓO䤇àý%žÎÔnã´ži&ô¡­í4y’iK¢CÊQÜ_ŸgÜG‚¦¶glÏP>Þí-»Ïî>øèìäëǾ ,Xi›³óF(Åœõº±Æ1©ŒjÎO'·Ë©˜ÜàórúüìÉ×…(^ IoDÓ Í¬Ñ&¾ñŒ+™eycYpÊ“¬aÂpQ¦ƒÖQrù*É•Væ¼ M!õ§¬L—¶rf„mxóŠqÂ2eœ NŸ%¥©šæ´í›vòíÙ‰À(¼´Žií¡Okæ”Íüêä§“§Ïy³8áÍ“Î0Œinñ…3‚l®N¤K~É7.ONOþ9hl•m©óf4ÛíÐÊ0§mˆ&Vœ'@§·n·x¦8J2ÿ©xÃ2Ïy¨jd•c\XSFªb9Ÿ+Ò©T؆³Ñ’)%\±ëq…(;‡ù¼Ç­eRŠCTî{¼´z;2®9ô$[)åä«ÖÐí|öjWñ*DùÐ(Æ•KêΓ” áß?Ã+ŠÈÔ,óKx3x¡’#`IËY6åðr$h1h+ƒÑz´¨C£‰6ŒB³Ì‡›Õ«Ú¬œ¡>­2¾ˆ#Œr±rán0. ?žz=Y­§­R2×<`⫚\¬q=[àÄîz*=D¢äê<ýÿŒ ž„‹¢ŸÐ»Ýæ ðc'Ýüf;ÀªZ·4ÒXŽýYZoXÐ!ìû³T¢n­ÇþÜ©y6¨,ßJ1%쨓Cÿ˜&@Îè0iÿ¯£§öíhƒgB$J¡à.Ü[‡Í}6 Ť ¦Ö"Ĩhvº,0®¹ÒkéW ¾WõdÂÂl2XWõ=3FŠCU¿WÙ–:SÕM`Úý܇<‰Ö˜*„`.Hûa {«ƒÁ* UY3 Ð8>ÞÆ†¢mfÿªÔFDßN²!ô¬T´kÅ5¨wÎòAE‹gÞS7?ºbÖ‰²¾o?T‚äЀQšlv\½oÂÔZ5hšâz¯¡”—ÂQqSLìá˜xDÖ|Œ¨¼šêɃe-0ÚûAst\Æ„òòÖ×r©…$1.wzÆ­0‡Y¯cN½ßÇ!4&ˆ/iUá«EZéO›B†Ó =Ê&^]…{WW­³)c-åøW_*ÁoÁÒ¨ü/PRï¥Q)ùâÉß1¬ØÖ–Ã~SãÜ<€mq0%Áë“ÛÎ Š½éæ+â‘‹‚^®3áþyjìdv3|ûÎoÑ÷MÍ( Ò)"ž¨ÆÜ÷õZ˜³õû¤g šXÅ ™,ºóZ¤µ”´Œ°üaF÷ðÓ>%uãÞ¡!„F—õãZ"Õ…T•½Z`ÐÙh#ð.·Ç°W…5êáô ²-uVØë04–ú¾ßMúÄÙëîjÃ)×;T‘h5Pí# Ã…Ðî ߥ½2w_Eý̳ú<³gÖíar˜£g ·áž½Ï|Ň Ùýâ¾]ë Žv‹¼^h§ GÑ ó…NÜA'ôçH'vSÁŸ æwÊ„pœuû>)Pq°YUùB©÷ˆVɇ¾_M¥›Ü‚Úp1Ith³Œœ(’!Üæ“ÙÅl·Ü¢Ðì×—éIܤ\½ÅŸ7‘EÑf$½¾Ù¬¦-F™ÓƒÈ©ºEztKƒwtûâeÚà„6m5xÖvs”t$Õ[2vÜô¤íj™wa_ªš‘ǰÜeÁ˜´~·@É–8¿¼¯HênjHL8ÝCmY=mС7li„‹Ì£k“µywð®ÉÙyʳÞö.SÑþûæ¦×9ï¢_´Ê öz+x“]»MÏX¯’Ÿ÷)ëøT ˆFŒ›¥R9Ç:ß“7e¯*m娲”ÐNe’J5J/÷¥çv²88'B¢OR?WO1¼¤r¿O°v°†åÇIrôÉÛ‰ ‚‘\£¤E5F¢Cbcµô‡Hl¯²-u&;¢l’q:BTRÇ }¤—ÃQ^w{YçåÎppyã½ú0¬‚¯ÐCd®FjÛUð]艴… J„÷öeŸøµSH‚B÷ýúÞ)«ÃùÍ=þîÿ¾ûßÖj¸æÜì±ë•ï Ìk=5²¯­û½Ya‚Ü¿Çò¥º.Ñj`¯ÔUt‡*7ЉÚ>a>6K¯Ïk‡ì¸rïa-Ú¡ZaÝäueõ¦×®²PУàY=`@V\è¨U|\—Wì#áõ!ŽáîhõÂañ'‚w¿¿Çži‘1h*ó©´©C›¡‹Ä|6=" )D½!%m³un4Ü¥sÇ¡¾Ž­o‘ƘÍç«u&½¶‹ôV:˜ŒÊÊCP> stream xÚí·UXœÝ²®[pwwwww în»Ü‚»»;ÁÝ-@p‡ìþæÚk&kîÃÿ?Ú×î>黪FÕ3jÔ;®·)IUÔEͦR@G7FV&V>€¸¢/€•‰’RÜÅÂÄÍè(aâfÁ`ååeˆº[ØX¬\|ì¼|œ¼”q “·‹•µ€FœöŸ n€¨ƒ…‹™‰#@ÑÄÍÚ”ÃÌÄ 4³±pófˆÚÛÔþYá P³pµpñ°0gB`e˜Û˜¹L-¬l˜ÿÑ#ëh pÿ—ÙÜÝé¿].® QHZH¢9ÐÑÞ`na‰À¬Õ²)ùÿCÔ&—r··W2qø'=¨Gÿ‡×ÄÁÆÞûûNîn.E ¹…‹ã†~´ø/iŠæ6îÿé•u3±·1u´²·°ü—ÉÆUÊÆËÂ\ÅÆÍÌ`ibïjñ/»…£ùеí_˜e”$4tÅéÿušÿr©˜Ø8ºix;ý;é?±ÿbÖ? ê‹@…‰……úþ÷/ƒÿ(%éh4·q'ÀÄÅÅÄ4 âø²lÍ-¼^ ½ÌLŽ@7Ш%þK  Â?‡ÉÎ`vvºY˜›Úÿ×IÜÿòp°˜L\,í-,ÿ²²þoëƒÒ˜LþX8ÌÖÞNÖŽL\ µ Óšÿ1ñ˜},\€ ¼f £Å¿™¤ÂÍóŸTßÍÚÅâ¯6³%ÐÝåd°ñø+$ÎÔÎ3Hš«…Ç_Ê@­c¶øâä0;Úü-„çŸÚÿ,â¥ýC b´\ü6%ùoâmIê6$ý‡@›‘ùC árTOþê)ü!P=Å?Òªô‡@Õ•ÿM<  jÔ+õ?ª§ñ‡@´þ(‹Î¿ t1›º˜˜ÙY¸ýñàeû3NÿÓÁþïÿsr@WóŸ©á)7ýC šfÿ&VPãÌÿB‹¿ðŸ9ø A­þBÐÖ¬ÿBP/mþB»¿¤Âþ/Épøƒ¬ Ž!Hð/Épú Au]þÂFï/µØí/Épÿ A2<þB Ï?ºþ™½þB ï¿$Ãç_øÞLbb@/_Fv^#èñuŠ 4[,þÿ#ÐÌÝô»ýëÒÝoÿÍ–6 ÛÐÂÂË amhÆÿÉ6£%¼<@²h¶š\̪5Q©±o¡ûCèj¸}É„¼3ÝvƒöKeòô‰ç/×è.?Õ)©«`çÄÌ¥÷#ã£,ŸvíÛ,EïC'²§Ð3”ΖÅûsNpåùíÉòTâáœë‘³ cØMR°~=¦þ/Ÿ¸¸µ¥²ì5#¨ØIÕP3Ë#9"¶ø’Fs×8Ó’‘ŠW í]Ú¤o”\Û%ߨ鬋åV¿²\'«[¼g´;Þ‰Þ0Ûñ5±Vh/C”ÐoJpœ9\z˜}] j×dѱØ$8Œ”ªS„VSH·—Ø ¡€µÏõèIVÉD« :æÏ[;»óÖÖîݰFc¸¬å¢·ÈÃS‰PðLšyDüÈ_ZqØutwOSžÃ¹}Òa>ÜÐ4SN啊¾C-ɱõLi–Yo™¡e‰;ýdc˜<ü2PJ>)‚õK®á‡ë)æ6Cù RÜ«ÅuåÅ[–y§°¢+1aNY½(69‚Ó ïxpº]#~¶vÖ ¼<|$@µZŠ»ß)åÇ𝒝ô§”vâà†pÉ OE½ìk>XÝçlR ƒœ@ÒÕ–ŠF›±¡’æs-i;-\âÉkþ÷×Þ§ÓüìÊ@ù§šn†øRî²æ§Éä=Û¯8{2â‰)¼ÍÇÃz1w²Âü‘r„F¹ íè“û¬Ÿ«s„>rS;«Ù[šø¦a.ß ¥| Êè÷ÂÓ#xû’Á‡GÀþŸ«JŠ*ˆb\è Œ©L·vpÓ¯?\´¶sdkÎVº‹T„gÕr{㎮ܜ›Dî›CV(#.²ãí ]Ü9ÔæŠørçÐižã½ä/^*žonâ1Õ§ñºíæȰò‚;ªˆž¾¼íŽW@]O°£¡pâö|©¾g¼ìæ}£`H)q·¿jN™ˆjÆ}T2%˜:©ð'ÇÇ­ÇÏUœÖ‹esš¬Ý–ø2å«—E9_‚ÅÝ2ùPãß>¾úÍÖŽy Ý(Ø`ýî7ϤâY*¾š\àŸ’5Aš‚Zú=0#³±Œº8þ¦Ã߉“£•zü¦s¨æDGß…ã¦c7…öÍúXí¾6Ú€é!P„-I¶h­1?Ô8ֺܑAU0I6~ˆ¾Uð0ûi¢ôIϲ͸I#h?žDVºB><âÌ‘p iÚ™b5ñV– ½ƒM5A“öÔ'fLlßZ«:y¼ µô‰$fO6_e'ÔÓ‰»îƒyÖWcø7¨±²ÛZd+ù#ôa3Û´A q¦þAt±ësu{U:kdÏõG^ÄŸaÃÆàšºûùú#®‹…mùµ¿p¢µaݨ¸ÞÚr} [€ÄüM‹Á­ß üë :½â‰_hYΤ‡T¨éÉk\s ï"]ëÄ8 ¨šÿØ3—ß&zôZ.YŽ-Nò…ôdG¬xܼÎÍSc@ðÐàüˆŸòU*U" éÇe" ˆAå²=Œa~>–k(ðÕùí<77œ~g¯EÔÄ—jŽ ìµù->Ó‘Ôš”§¸vµ«§¾€»5iõ–R%ÉÖßäÜ4«GzV‹rzÑŸÓ"Ǫô¡1€e³Oås*rßô#Ò{ì¾>Ÿ;±Ójb-j"žoÎ7ø;Sÿˆ³©dGÍYpy‰íP,Òî‹>t»ÍÇÆq’¥Ú90wb4¡oÃKѽ™½¿Ûh¡«ËõùëÒæù_B54Â×q1Lù\¶ó·QUJ$å4ËÞH£½)HèÏ'†õ3§cÙ½?ÿŽ?쀈¯ÓªÖ’èÍߺ«ïŒ%G]ºî¤ªÂ/K+Ho.MS ™‰Z¢OünüžºqùÕnê³çKÁåÚCpä—wƒ@dcx®±˜Jþå™’5¶¸#J„þ¤QÍ_à`N¬>?{’PÏ^]¾æ¾H5R ˜tHbàPž“Ô„Z ½¶…ÁŽÊÃÉ”—]صŠ)öí:gÂázmUqb,¨Ý¿·Èï6ô9éÏ`³ƒ;gxšq²\éFëúæ¶[äþÁ"Ll¤2y‰â3äÃÉlRvª¦1¶>®N [×MµßØ‹l˜Oö-«ó'ßlݳ¸M'‘•_·œé *7¥6‘2¬ ŸÙ3 µ #•ò¼©Iß!º®íu&«üð­îÞ0Rß®üx¦ƒH¢'2»º¦õdyo§bɰ4bíþ#”1ÏñsÁJx‚îäŸWîY›«ö.”¿­-¯{Ès±T]‡û%ïæ GÍÜÏVÍßñ1n.¾»}¸»é‘ˆ:‚IU[Sº±þÞ†é¢Ó¨ª--5;ÃçÆ¹m\%Ï'꺊ÅVõ©ÉÚoÀåvf䯱 §5t­å³¹ŒšV¸1±»0‡UÄ=ÁøÙèùÍ¥«ÔVg/äúÚÒÓä„•§ÄßR4HÞ%hßÁŸh¾¥šok¸n=òŽõà»=xºCÚºî}CIáéó…-:Ü£½±M{­Ñ#NU‰›\"”eB¤ÙDúp«lŒ<<) ÀÜ”bjûü×…š‘º«¶IaÔˆ Ù ÷óôàÃZ|•ËC2‡åQáA-fY÷½YÉC¸çÆœ§²—‚ ¶˜05näÚÛ¶894Ö¤r½Ò¯_|RøÐNQ{ ÷N.É4%ïèo,%†9Ô¤¤Wu^Ó¶ñØÃ™°3rÍ‚¯Õž 蔄Fòr‰ðD!+Ùl8,,/æÙP ¼þPOAâ¿Ò‰rãQ«e·êK*EíÔ"‹Í¯³XOð¥·_GJm5¼cµš4dâ/‘»ìRÑ|Ü2IÐ{9h‚€»ƒ17£°ë¨Ÿ©›0Ù•—ÄuO?ÝU$ûÒ½¿‹'ûˆIãWrq%l´ÕÒî¤5±vçËî&=ò¾¹» :]rÅE”ÙUݯÝ~#ÜÔ®4SL¾DI£‹á~.¬Ñ5:]9UgšIpø÷ž®Â’rë“®á*е7|ípÃ¥ñúðZ߸¾–S/¨% ÒùÔÜgRWöêY¬¤¡ùe¿ƒ2_NÅz+O·×y8Ž*M†lâZ[`C+%Q<¨×ÙšÓU[YFZ6áxßx<⤾Պø10ûvG`/ISªT}N³Í³t“Ûã[ *¼3–<¶rPrÙ\7Ë >~µèäÞÏ¿7Ïío>*"EAÍH»ˆÏg%Œ¼&ͯB“Ú꟱?}Ç›q¯çwìD:\¿¸Y/–_ÓF¾—Äß7‰z¸U­2žG­¥g‰‚ÝŸewLà-÷Sî$4µý¿6(„m÷ëœq¬Wê˜eªîú^YS Öªv{mù ›Äöç·A¤©2æÊðeèØCÕ¶0xíÙ$Î)MEõ]Þ·!žÙ˜VÃà(µèñ-Â|U½Äëu¦íÓEË\Ü<ó=Îå—~)öó™'ûÙg©Ç» v‘¸7™{å­ÅLX dÓû”W0¢ùëÑç“5DC—¿›ÒGF>ºüó’¢¹w°.»q4¿sµQOh[pg÷¤ƒÿ ‘®#Íó|—¨l#”ð¼,XÛ?9É÷nžDƒýjF—˜L2œ¹“¥p?Ú‚CL> Œö¡Fkô¦€jð‰»ó.ÖvÀP5R9~S jDýüŠ_|îÕVHÌý}1ÿ‹™G¾÷Ï=±Ñ®*»ö®b?iœ2¹ðçZtçÁ½'Ã7i¤‚y#vÜQ´fpP ¬ùfÌå"/+&V¨aÄô'Ôͨ.aùú1Žçg¾ÔÉB”ÚŽ„Õx·EÜU/Ã'Ãé$ 9Á{v@®£nùsNcÅm*ä.#>ý†G™ª­œ-‡Gÿ.Ïת—Úea¼öÒ•&ͦƒ«3©¿¤A‚^®ÑÓìû.`¬@†ì^ v¢Tõ+»­Ÿgô•w£ xñp9!Ô¸‚íÂÈó-@QsØù2¹HoÍB[Î9C9¹y7H9ÔãJñˆ×HÑŽ:‘Á2¼‘2^QÕ‚úfÍŒ.bÛ{P&´Ö[y¸J¹Tâ‡u|8dhŽü°òú~­ÈyèG‘:G¢³½âkn%B.Ä’@$|Öå'7A⾦–³»)'+µþQ‚¡ß«œ«ëXÜyŒªe€‡q=F­ï7qýPæÇ-‚’öÏV³=¥¢¿é?×42ø…»ã¨Ëp1`˜vúa‘ÇŸÐÕãh›Üæ´+Bü~1~/bÖÆ?›½øùkAL ðSÀc#º™¿èVƒÚäÜEx :Jg¥h׽ܬä^ijÊPlf5„¨åYH+fˆª²‚›vž>õË4wÄ;6£wë€>pe¾$V9™ÿÈ>À戠™è´ÑCÿ…!1¾âŒ‡@vdè øçèbyéðh¡Î¦ÕSÿ¡3™ã>ܶ`°¨0Ý7¯žô’áöœ‘…’@˱=ë ÙQ±¸÷D°Qûí^öðà­`ÿö©K! *ßœ'& Dª2KKÁƒx%}Å€ªSCÐ3ˆŽ€‘>O+©…`bF…=:5N¸ | 6lHå´dÎÏ€³.•ÃZ“‰ömêézQ¯©ÒLnŠ“ä©tg8 ¿Néð£Zb+ú%†ÃH£jý‰ä²ÑÍh\jûÓ.xt=àéÓ¬j`ÈÄ:ÎÕš"– X ‘loÌ^>l×±¥*ÛÚÁ¦aüÜ£æÓ ¨\°¤˜þü’éët\1Ň‚¼¾fµß†äAZC³7æ%3ªß™ŠÖ¾ÝÉé颠žs›‚=¢›i‚•Ïb%Ÿ¬¥ý^cïUÕ˳†Á–øŽ{qÄî?Àï5 neÛ € ã­[ˆujª!%t=÷Ç{ÐŨzÉ“‡~@Êhèœ`¼%…Ÿ[U쯭wîsùUú¹óZ€>¶¨®7LæÈ›XËK&Eè_1ܰ샮²ëUÕûƒ®üÎÿDåÜ£Q6KRä­XÙ*ÿBf"\¯© b4½º­u›G„†æZðSF£á®R~Ú fáéeÄ$%‘EéP¼f$y]Q{‘XqT­éÉîà»_ˆh¬1Y\p ûGùŸfçMè¼6„F=ñÙ2\c¼ôžêQOÙÄÊÌÏŒ¡(ËZ?©²º?ì¥üª5éM?²»ƒ‡Ê:Uqq{·¿«@Ý6?æ )¥ÕÝæïƒ»sC}vbÀND.+à ?7WÔ‡¹Ò<,cÆÛïu’Q*Úƒ,{TÀ Ô›î¨Ãl‰%Å\ÛÜnøJðtÅâñËÔ$˜f5ã(_´ì{ĘÍ0s5ÀœÀ+$%cn‹:áÃÉj{ŒËø-×J¬Z÷²xŠýMÒ”æè`ÆOJ™ã¥(éú¢‰æt•R á>r+ùv#±²LQo¬9¸ð ŸŽÆy3û÷z°Kk€+0þÄÏS(ÜF` Þ陸–÷úƒ?˜¡Ö*v/Œ½ƒæÁê„pÔP9ŠŸïÏg²¦ÛûÇÍß›gÝ«'Q磊 1ÌE;á;ú¾×`‘k쵸Ú~]b¹à¹çOãj¨Œyxš‰ž9j ™wo¼z}UØ““Q>À®ÙKœ  æ!“(/Á"ŧå¡všÍª 7+eó",Œô‡‹}Œuø”½@²¼‹Ðêú^6+EúõDOÛ¯Ä1“ÌIÔÞÑ v¡6²‚"vCâí{—2Îwfúp©â¶ôª¸2焉£ŸXa!C¾ÚµEhv³æ6%×˜Ì ÝVÔåt[­ˆQ–ƒÎW?™FdmÞõÆ0òϵf=­ò“!`n=,ì8X«—Éï05+ß’³¨m†15d¼M—¼Å˜ò>SI(…» X!- >ÑÝš~>ò7üµþâÁhè/€Oµ¢èêAÊ2#€Yþ!¸¹†¯ÍÙ×^ZšÆ"•C¡…ª´¿XßݬŠêÝ´ÿƒãäÄÆ–c\ë*)c)î5@a—0»Ö!í[ÊBƒ¢øW­ë° #L¹„MoŸp†ì•Ș ÝÎ=·0jöõÀÁÔ¼çÝá´TGïãQó MÅ´$J Y2âá%T:›kʵ™,.É®ªÜDù~k‚>{ §9Þš8C«@‡‰¹½eùý¹n[NÞŒfÞT¢PMãGS]’jÔ<п@_ïešu<÷‡JÍ3â@[O ž|: #ì÷Ö3,ù5Ñý6|ð–··1²ßOY.¢æ-`"|M“]÷.5¬{ÇX¿I’–æ(;óž3+,Æ>§—S5ô ÷êc5Ñv{…l<§†NßlU)ÚÔ_ÏÑøº$e†@.i0^0FAâ× z^"͸¶èýú಼«ç_Ö¡kÛ0jÏía¤Ûe¨ð3Š»ÿ¥-­‡À†xAuïk~Dä0ÊùÕ­;0RÔˆ«"‘ªê ЙÂ*ý¢;ð…Ó½WçÞT¦lw'Ñ'ŽI~~Õ‘®!ËYÜÎŒYS‚—‡Ð„0ßc}P~Èèk/Aå¯Ì‚w.´#³{’òxÆ7•Šo UˆYËYÚyä‹ÐlGŸÑû »£æÚ$PWÀU«üçÔüU£(~ä–B±möN A²›™§r‰è+“ÔØ™ñ{¯"þœ5‹ Z.èá-tÜl¸é…¹»ƒJ0“úº-Õ­*¦¿)Ÿ­o>*tï´ÝÂgX:Öî“K×$1*áÊWÑf‰^B„ÑÃðA×U2<¶Šã!·‡û(æ™jÊäû¨±­íÅT#.–A ¹p—r‹†æ³þ°•=ǡî®þÅß´“m` O«æ¨óŠ8ì+ÊôÙƒÛe‰q×>›CTh¡ÑÀIÒj KìqxwüÇõVËRŒ üS%„—‡­Ùé‘tZËt ?w|3l?è(H,_”¯Ÿs~*)‹gBïGK£òƒ×I ôvû;dj‹6°ÚËoP>‡Ö'Ó^àé(FÌ£1ØŽ•zà;vÔ9W°ŠÚ +&}Rj ¡x¡3Ÿc56DTõK#ï$X”Wï^Á<`®;EËd‰à%ÀÉEBä,¿Žyg³äyÙë/²òÊ `s5¦€’Y‡Dð×ׯ¨¡#Om=™òï êjlÇöÀYâ?K¶­þD Ap`2û{ £øev¶^8 ò¬/йB¿zhh§[’–Ô™Ò­gLØV”,æZAjzO8^ˆæÇ±ž‰`;šbÇrr¢<‰!‹ùD ®8FœºŸÌ—Œ3 ÝGaØž+5t—ïU:+*ò;QÔ$3h€6¸“) \ƒÔ»3ŒêþÄÈýÆûÓ‘v¢²ª—ÔËF[fþ8+rxç¥Oj)t–'i~®Hmf⼇>E¯ÌÔ˜Év@k›âG $ƒýâ’iîÛÊNMÌ6õU¼¨4ú#H‘oiXu$§7Z1¨¨à.hŸÉ MÍpmJߊbßqÛŸ™ žÝ[Âcô¶(¯‘|s?›öÇj}'ö¿-kh´Ô#:6ÌhFzôÕîÉîÎõ%úVO0¨tZã0D°U¿q>þ2N·ªÌèåD±ù'©áaßÕ6Ð,‡y Ùó0ü$\¯](êá MË‚·á>˜d&Óa‚8éÙÀh#m¿€d6°W(2¬çÖ|sEËà ¬TÔ®Ùu(ÛPDÑl~ý~QI+rˆÓÉsAåzâÌl/ÂFD>KÊÈv1tH¢ÜXJ|üù¬Yk}ÎòñrqD^ŽÇáÓx}Žúò›ù{8ªTh¥À´Ð„ϳ2SùÖrPP Aklû©Þrʈ缈ùàez‰Y>ü}B%ÒŸµ¹6yô»Ù¨;ÜâóS® C úB~œZÝ»—óà|†9"38³†¹3PÁµo 諜T ¶Œ#>4Ìwà\j¶\ŸÒ^ÛMCF„¸(gt+ßYÇòjÕq©›{6U'ÜH_YX©º‘Õ%è2ÆZÔø³‹ë0¹Ò¡ @Ús\ÏK ýæÁ„K êú+Íé8djÚrÅ­Íúð8IVÉ7<4}·VÑOК¡Vt}a¨jþÅ))wpãQ]BÐÌ¥[G#… ¿Õ$4³¹4™Ãw,i¬â…8HùD<•PÒ¨»’Ï·Ž[XdXa0Ÿ<¹wñÜêÊ¾Š€ç¦ámû z¿üöÅ«o‹ƒþ«‘zÒ;ô¦«“Ûw33§€Ó˜â0ðÒ!çã7]qÇ^êË/A‘±I‘S¡ÁÃ&T{xGa¾4?…Žigɨä˜uß®4²Ž{1Y@”Ͻ0ãGëá@B&=U%1f$ÆÉv¤ hÖÚ¯ãtàK†üE¨€ pGX=qƵU7z«‡Ëøæ~›<°Ê¶W½c>¯5Þ2ïz»|Ž7Qú%Àl*àùl‹¡bÓ·)ªæR$D|æ3I 9„‡ƒ@fö%Ú&ñ4:¯êÃÔÅj°;jüšIìëeHâ9+ô©ñà6–óamwýUðô$¾4LmlGgÜ}ÇP¢.o‚g©$sëñ¦?ôUcã=_úòùqz· üUH_ÂC‹N`OÿpjLŠîïƒàÛ.kÓo9Ó”Áî4ÙWƒÞŸ• r—V6ªšß½è5ù¿sÛ¡±ä¬#´·¹HiÙ=ž~ß1œ…Û¨¶{”» &åÅaƒÅ‡£jX·ªÝy’ÊZ);ÂïÚô ~«°ýd&… ^‰øO(ø®Zkz¤Z²õ=×´˜_—Ñw‹ìdb€r‚ú>-ÈšfýG%¶š[›ºVqzÇqT<@JV#oü“|ç,C¶bmFÜ¥ ~üf#$½q”!ÿ†eÜŽ0º¡åyŽÒñ:yúÛýÄoa+Ó¼´¨ðßH%[}¹•qá¡ï,ËQÚ:ÐLµ©MßÄ´ºûJGê¡öèÑÊj…ц$ ts°ˆµææn»P[ýè­ÚBüðq0hΑ{wÁ fjïv±ªèÖ•VkŽ×Ì*'o¥¡§JBô¡W»g®}ù·)>yä…q•ŸˆECƦÞeˆXû>ÚI÷Ê«hÌ—$ñ}F†ÒÊç§è*ù7°¦LtÓ¢Sh ÿ^/ÿ# u®¯¸lÇ@¡Gì+vŸv´À¶ Rÿb·À%™ R¥ã:üÖ³>êx7íi#DrÕÝ¢îfêºáF"»üV³’Žu¸@ÿE"TëÇi¥Pq“’Ño^¦x+!ÑÑ¡R‡ê Ûª† qø¦.fGf°-Hf)Ü 6^ÐŒ)Ë};V Ò¸±0ÓV€‡Óò[óÍÙ›*­\ÚÑùü¯ÌŸÏ¸ži»!ˆ«¯—/ ‡ˆ0ã„'¯|rfÍ×!4,ÈåK냮௭3!ŸH*q§X>'%*ºábª‡£MmÐ<0žíïsú7.;–A%ë,¹2øÚ“dG3Ñè> bÁÖ†‡^B5Vøz~ó_cª›W-B tñõF¡#Y8X‡BèGB;ØŠ§cˆP` ¿FˆôP}µ™-FV”ÎèÚHÝ8ÿbÕr—zY5ÄJ¹²/æØþ LN;'†hgw5"¤1Òãý 2ÊÐB?/+©4ÝrÁH˜ÄÓr?²X¦ª¥–]|¡Å­ñœ‚y7c™iÜ¿ MÄ«¬“’åxF»[£®+SçÔXZØ)ƒbÒÇžsÁE¨w™Pƒrþg '^@Ø€°ût ZˆmïK¤”Ìoð][‘™þ‰<ɳ¹e­„òs¢ynWÆN‡â_—Š 1"]=øf¹éX©ý²KQ^­EyD‡s¢%èO’Œª,žaΰ¨Q¼Í2“×ò˜%‰b8Ôè*ÚÅZi‘ŽõÂYé³»u]ƒé²ñe u¹æ ­ê!ž|bëÞ<,õÛ¸^ÄVxiC%\ÇVº³ Ñ}þ€È©•2šd‚w9ÿôÌø„æÙ2 &âú~:‡ˆ©eÆìîbŨ¨’ZÊÊ’^Ó {- ‡¥džµ;‰=£¿€*TBÜØ¦ëCݨóŒ8Ö¼N©WgÕñ~Ñ ñ‘gGkä.vú\’ûEü®QëŽlçÜ,þ¦ƒ±ÍKïÇÓÕ¯U¾‰;Ó6r­]g°þœ`ëKC6Ñe¤“¨Öø× ç!͘>ådÄTüèÔÔÏBÍT„_¢qx[Ëp³q¹I8Mr-âêñü|@Ö =„É0>Ë·P1EÉ7^´øÕé²>–ÿ„ÿ—àÿŠfö&.n@;„ÿ¤,$_endstream endobj 171 0 obj << /Filter /FlateDecode /Length1 1193 /Length2 6984 /Length3 532 /Length 7746 >> stream xÚí—eX›[·®qJŠ- '×â^4@â îZ¤¸-Å‹;EŠK)xq/RجµÎ÷µçÛ?Ïùµ¯ýæÏ{1æxž9æÌu% ´jv s„)TaçÌæ ¥”5`Nƒ”#Ôư“6q† Á‚‚` „‹%,ä q qò€R{G˜¥•3IŠù¯"~ „-ÔffbT6q¶‚Ú>ô03!3ÔÙƒ(‡5þZáÔ€:A]¡æ0h3sšB-avÐ_†ì,@þÂæ.öÿJ¹BL™L2,š#ìà@s¨¤‚xЂ>8ùÿaê?›ËºÀá*&¶µÿkHÿ-mb ƒ{üŸ„­½‹3Ô¨Œ0‡:Úýgé+è?Þ”¡æ0ÛÿÌ*8›Àafv–p(ÌÃÁÉóOæ$ s‡š«ÁœÍ¬€&p'èßq¨ù:yÞß>@’ÚIu]Öõ ÌÎYÓà äü]ü7ƒóÈaîÀלœœà‡Â‡Ï¿Þ þCKÆÎ a³³rñòMM<œ­¸xy^` ÌÎꄺ?qØ!œ–ã´@8þ:S^ ÈÊÃÞ j÷WüŸä uDüA;è¿™—rvûý3ÐEøºJü&~ Hê7=´—þ7ñóAŠ¿é¡Rùß$Àiü&n ò›Öiþ›®Èä7=ø5û79ìšÿà·àƒˆÅø bù>ÈXýû‚ý[ÿº¶¿ü k÷>è"þÀ]û?ðAÈñ|rú&ìü>l×å7r=töøÿû5””D¸{±sóÙ¹ŽíAFÈÏËéóš¹8:Bíœÿþž?\æ±ìáþC¡îP3ÀÜ4ÂL8È:¹.¤ÈW&w´Ù‰2µ h°5¸6U˜cwY˜pÒ±~DJ}BL` +ÁOÚW*™[ð~#¯“ðþêÏ|Å!bÎNƒ`šíê¢ôfötâÙf„mÍѬÛ׳ÎÓŸ¯¸¶¢W¼ÜóçÍLÿ:õµñú6÷8Åœ~# `ÙôòT{{%vàzO‘‹˜Ëï%ÕO;r7GÞ¹4n³ˆ“*‘©Œ:•*z‹ÅÉìn±:§ÛÐÌé|”¿¯YñºÇïkÉ;ܸ¬Ò€ÅŽñznŽ_vŽ%©ÈÕ|°$Ü {”ûÜШD\Û7þLžH:“:U¨D’MŒj¥ð+ApÐɹßd[naù‡w"ðIK6¼sj‰®"rç÷O,ÔV^’*К·4¯Ð§9·\¤TÊL̼eñwÖ öàƒ«J´¬þº”—¹&oªK…siÆ;Þ]9QýˆJo@±æQ•~ ›wiXj~›}/mXL²¦×ú£‘–\Ëå…´;tBäÉ)nÜÂÍÔ- à¯z»/öðºkƒcU1Óåu6v\òËyS¾Ý ¼IeÛeåt.‹®Æô‰ˆYÞæÂ€÷˜<þ7ÉlíLãdÙÒÃfg7'½ÏÏWí£ç˜0„èýò²ç¬uoŠèi„ðv}NÄ¥äycyv¸òjýQ'?F‹&5à…]Žê1S Ô\váIšw‹ç ˆÁ§¦ØŽŽ6—oå­oªÑ1O;ÃQ-£Tª™£UÙK/Å ^´J5½Ò+S‡£aV ð«óîÕ+÷=q'¶È^Îc_óË”ÜÐ}’Fjº$—^qîÇñ v¶‰ì^#DD‡Þýlñçý_iÔfº$¼æhY5B§¸U Ô×s÷–ú1'·&šAVTpà}úE§é‘e±·Û)ö]smÍæžÕE5ÏÍsÏŒÐܘ̮%ÿÀ…¥íü‘ãS™‹ UKxSH~*£±›ñ˜D øòÜÚ™Ñö2e]CÃÖÆp±ò7F€;ýdßø?÷‡h€q3T;e˜Öi.CãkT»$,è½ç¶†@-/w«‘Ê4ºezèØk¨\¯\Ag³ ÏêË-.ë oL™&sñ÷­¬æâ~=CϲíËwaËWU½»í>àÝöªÖ÷¡ÇÝbä;í ««Ïï4núi`Êdö”8›¤ ÝXwÎ^k ž z­çµ§‡!XŸ VŒ¨t0Š`†œK¢r'Ù¶ù×±^Ú¾.5¥ê!žvÄÅO)2s[l´5‹}Þ:M%åK“#ñæ>š²¯wÐÀ,‘S?ŸWÓ±Åj¦NÀ›]”*ß<ÂàN:7yŽ3î÷¢?MìÚg`:àîåäWASûÒ›µÍÈÕŠtC&pn„S.E¹zÜáö>$5ɽ°ŒCÂÐóm‡·dÃÕ„4^FÕN _›Sð:´|ò7l÷£~ØÎ-~ëÊÏꄦÚ.‹lÜòÎ: " ÷O=‘YÃ$ƒö¯®—GgÙw?œš´ ¹P°;´÷ªóxŸ{­À! 8½$Æ-… ”†GŸe1}Æ^¸E ˜Ì_ÂcklQzuß)·¶î¤§É’¹(³Q{jI=EgÀå ÆÑ€QUt0sC°ø†NËnµ—g¿S{e»Ù‰?ÚE°q ƒÀvvç+bPkùÊ·ª`W²]^úO„òë©úúi3³>ú“,„¬=9›#§¹ŽåƒJ&€îDŠŽø¯E‰‡,˜fp®ÔôS¥É=ƒâk¯ùz\;yXðD_û†Èª®MrKÕó¢ `MfNVæ$ïÇz剾ƀpñcìê00ªx»fï§#djb$ lnÔ“Y±6 9#ÍöBñ4 °0ÇC!ñ•ejÃ8zZ¬œ!FŸ£G6tßu ôÛ¯¼¨ÂM¯÷«R ~:»€*f¸ÿVh&ÚÀr :sµö÷ü)•½˜âob'¦…í "ë•¿0~¿hßp¸ûx½ܼ#ô­!up:}ÙÑ7©ýÐç@ͪ™mp_BÚcì‚—·JÁÊ­[—Ų)êPÅ `ÏZOp7µ¬RõÐÕKÿD''tŸDÓ¡ü¶7 ¬9,g¥Ã×­»G¿‘ôŒzTŸ€ë ²j0Öz±çþ,Üç­~n„zPÉ'”Íâý!$¯Ñ0_ÐÜÓ¬ò­ÒgåEÌ|S¼ l…§mÄô㻟ï¼ñYÔâõC‡ý¶ˆÔ¤HõÈ"ðI€Qêwì%(¹»†K£ˆp4.AxáiÖ÷Qnöš ôb ·ŒZ{-1a"1š[d.®b2}Ðl<}w²ÈÓä¸!dßqò‰ˆ°FCù›¦¹©¾1f»·îáô)Ë5ƒ/õþVÆ>F^q[4s7­i÷É;¤rk1‡ôsM…/#ÑyJ¯¼IT–KŒGL }ÂO©¹32Øò>^ˆù(xÞÛ3áèÈëhÿLm1ѱiéCwx=@•Aj(ªÄ dvȱNL?Võ`§ )N%+±R¼ À-÷Ïæ[<ÀÕ.ŠGB¾ŸÑHgÏ‹¥¬Ø}9)Uµõ”±BuPŠÓiíÆIÛ¾v£‚Þ4(¦PjO+‚n6€šø]{ÂsÒ8ÿõµð”™Ëéx” ¥çбã¨ìqÇÑžd½ÿxŠoÛbJ:Läj¸7L,¸”øk– ƒ° ø€aÖâLØ¿zÔ»ù‡…’’–æzþóIÔR“.öÀ y>q½‹ ñü}›©-îÔ¯¹›8å>–è´¹CûÄŒJ_™¡ V¤û‚œzªåb,!óY¶G§- $$æËÇØ°õ£®úMœ±„Ï«|KI¬Á"CûUEô(YÙg–9ã?<Ù¯¶ÕÚ¿/pðÎJnÉ®˜%+ž/¿|ª‘²ÃÜêÇòüÈPŸo\¥§d—ju©ÒÜÑí6‡ÉRj<¤ê¿kLòãàÅçÏ[«ÏíÊ»,.Âù‘ÚÕì©7'̨”™=Òú¬1ò÷•cRIW~c£–×Õêõ<¾ÛÇž$©Ÿ8,sZŠínÌKpAó¾ÂæðBË»V.»åsÖZ·XõôùÒì%Ýô¸ÁZoßEñš1’(Vkì BR¨)š®†Cc©‘¾:®|ƒ7ÑjdÌ£EìU!TH*J-9#jº²(Μ.hmüH~D~‰y-H‰_˜ö©–;Vшx5 rl°³Ü‘‡IÒÕ|ÝdxçÖCñYÃ?ýhÍqÏk!-„[áU…£uÁDH²-ÿŠ?üúJdJG,¿1¹©SIf±|þ3OІÛïä¨Áº«¥£c–œ’0®ÇЀÿªÔìÇK)¼_*Zpüy„‹èËôü$w%çL}šFdþ˜‹Í­B² ‚€Ï:›x[y1âý‹Eí8Bš¡aa;ƒ¹~ѰErÕYñd_.žO¯f_—ØÞŸ?Â~1ëKE©aÐshkª¯ËJ>T±R DÔÕ„)Фn§b+áÊ ¤f'°°½Rnú‰„0—¤ibTZ‰÷ºb¸<`UÆ[™ùhYê÷C$šÙñ… š—åÈÎÏûò…n_#¿@ J—*å„y×Ï ¬©– ø–µre×R4²ûP„>–xò¬~[|³©8|£òŽ$WÓù´ø .#<Š2zÞ»Ç,ÎWa§K(tç‹q6#¯ |1’véû9uªO¤fó¥$ëÍ{Y½Jç›"Üp½ô¬çÚo¾{)¸ jmÞÏÍȲ˜;eÊu} ’mL#•ÝM¥àèø”ÞLQ¿Ññô69!¡†)xAë“õm…r‚GNp²£ç#˜Þ—u=Ò8„·úû7rZñÏÛ9;®,ßÅÊV{h¸û̳µ¢Çßa–è颜Y ,E‰*÷ÇÕê*6DòëJÜÐÅëI±S››žš0|è-rÚ¸=ƒ7ÝêùË>^õSÂŽ°0¶wDƒ÷•(à§'>óF—>¯B`¬:û¥Ÿ¬é.ÐN¸½àÕ]7 aR-Ñæˆ\fÇOŹá¬@Ìã*OÃÑ_AzLfúÄ15LOiÐðPËxâœøp|#åª3‘ìÅõ1‘FW{„ˆs[–¼G¢­‚›¿” Æ%I“Ü-û•Ê*”êÓ‚õ/Þ_#½ Ï 3µ½”×ê¨ö-«z¬SÁ4ݱù¢ŒuNK¥L:€ž(ÛýÅZ¶K7sÚæ}O‡¨ ÎpÏ*a¥ÖXTjèNX­?ÂVuýz{aÓø}þ+}K+®ÎõÎû}_B#Îcý$³)dEÿãŽÖ¢9Ó'|ög>ïÑ•h]ëÞïì›"‘âi–ñ‰¿ÞŸ•#¸Í…¢_μ2K´õîÞW¿‹bFÖVÿÀéWÏgÏQ5.q".W=>‚‹†q³Œ*gÙJi'z+“ßo¾E5Ãÿ†´q=aè p›y«'ü4”á –»ùu0qì§ËT.’ïB’ÿȼ¥™Ü›ëÐfd‡û –Ÿ ç5kö@2VÑ];’C|Ÿ7Þ¨·Î õÆsfƒlpLÂ"Jõ¿sÃZºa?& 2ܤ_KLOvGÊ—˜éµBVØâù³Ÿ\”UõžŽNº×Òڻþ,ÒqF¶Ä{G{º¶v;JkyóöÈ÷—ZpŽ j+׿hvxó³ña7*H׎Wëãë“S§;9ÓO¯žº=üÝ e‘ëû´i‹×>¾ùË©Ï#Ѳ…¹2miø–°š°]=8`}y:æY¿:w8\’g7UqªqεVØs{‹äÙØˆ>tù¬J—k@ÄìN(¾oOËyÉ}‹4yX£AÁ¬øK(ϵμÝ_ÚmpD­ã]îÁéçI:Ïï]h4ÛüëÑ(âØH(æ"]ÝÄñ|&‰V„w’cyÃ=44XQ² | ho·yt”Ë€¼ø1í°!òÉžsç4ÚkÃËe1L¶m˹Y”ù!L˜LýM\t7¹™K`|ª^áDÌá“eycߟœéŸÁ'nÑN@‚ )ïíÌ‘š›ò K×øO¡ë,{§G½ ïŸÝäLFÐÈSÎ}âhŸ”ÄYkrq{+޾Õ\˜G£M ݳBÎ`cæ1´åM’œˆ cœxYËÎØÛÐ[u.žLh–ô‡âeÐÉ¿~iÅ;ΓD9ºè| bxâÀ±;¿=éÿ\ûå(×ë÷íÝH–?†–£•œ¤‰{¶zJarŒ\FxŒjŽ{à‘±}~£Üƒ·©K˜'´£‘Ô–uH®áem YÅ}%NZ9Ê WŒrj›­óö¼d¦ßß}]áš!ýšw;K€^âÆìhKVJÎÚŽ£ö9H‡¡Z_0 …;óðÔI`/®ÿx”\;ûÒrúT².Ï| á*Ía¬ ƒy„çBïäw­š µ/MøÞæNèñTù°.²ƒ¥ß–ï\Ü÷„Î}³¬GË$Ì=É×’öôßóøPË Ot0P!£>Jî\hÍÞàüVfæð%ÞýëóŽ£²ž°OƒËÊϤ';W ­Fob‡eÕ`ÈIZ6yT´O.¶¡Jˆ•nRbµt‚8ëûpcH,*ðÂäc¬Æ‘&0^‘GÏë¦ödFé:ˆƒ}Vÿ‘/«/.ú> òu{°'ýõ3ý˜8Ú:qø.Ž£éDÆ5®¦8jo‰ü,Z[cp)·.»o oÀØ«Ññ1ì hHÙê(IQEšži`j‚ô‡EÙk“]Ü'Ö–YisPó7†7G\qx¾Jn48ºl*馠ƒÓÉ8ÄyáÅ~ãÊkÏw¢Åh±Þ³toáï.'ŒÒOL‹»Šñ®p Ò©ióªÕDvfÈ ×Aô|û¹-bxV%7ëb¤aD‘´¬ßÚíJ¤‰ S:žF…d'qãK ÏýŽ¿Kzn‡GžŒž˜D¯ÁߨZoÔÓîôrÈj‹œ=+ŽSÓžŽßwæíjxvñý`qYüøV*Vq'·)!ð¼IW ŸKyÎ:Žœš+“78H—œ8üu3›†O§ëôiÆiÎÉçüŒ.Ã~_ŸWÅv$!¢ž'©f æôO5Ðf\ £Õ´2’´=+£%%´ÁÐÝu°Ñ%¶À>.ék`íŒ×ÏžªðàÓHË&§Í$t@3RIùF®%,í³ö9¤ç`ìèÜicûÜœ? P=õn—Wóâ…„.w~K¤4õÏc”š’îšÐˆ°DEBpv¸°SRNÿLP²<¼yP¢“Ÿ° ›MǯhÒ%^2Òæ™éiyÞ¯€†ÑO”JÔºëÝ0Ÿ¾üQ÷ÊÍáËl>[!Ú¨qyjQD1‘gEãµ@RÈîû¥l|gN›Ú û¼ å!$À“v÷îm‡›qGY„Þ¡ü¯oc;îRó—ÄÉdàäì–ĉöäKµÙÀ–šãyb [*ÝŤZèLóä&Ñ&ž ¤„¯¬ßÓÑÅÇGOã—w½½`¿‰Ò–,ůnZjm{uICHb°*…;[ƒ ‘ýàvϼ`y Öé\ª\…yuIX%a}‰½aþ:ݯ·êBàé›ò^ª·Q3‚î&ZƒB/Ö­¬h•‘«øk+'G¢#DFfòþŽ2TnOJ:*R ”[C0.Ä·8MºbuÝRé?'ËÎ8½ûBL¥A/L%ž",½¿Ÿ£¢tÊÙ&3Q˜IÎ÷R£Dø*ž9kAåÌý»o<ÅZìöÓ\æ­™f׳pícÆü7—³à¥& ˆ—´EÒÐr…0«ieIbÉšÓÒuþ¥j¼Þ"E÷E¼\gïCkv£`×îêæ^î_ À¨ù¦mh«ü ª\Ö— Ûi N™—“Äi+ )ÒŸÙMik’×׋(uJKA-›ôh3©Ñ;.›¤ý÷h”"¯‘MXûš¨óä0.'בg¿ãl bÊš“½Dw´5ôjŸ]å´ôk2Š8¤ËB^±dâ»EüØ?æ+ù°Ÿi«â ¡\J`§'øßéó}2³G—øÞ û‹)hXs<ë> !›ÝgëO¸û~æz©×ñ¦EðQÍwª‹cþ—»ƒYéw'Úa¶×A?Ŷ¡[‚É20©4Þ è[› h®~Œ §,<¨.ÃM-®ê|f£m”Æ8#'•~¿\š9ÛF÷¦Ñ5˰û²‘zß)ÕŸâÍ5ò,I˜¢·ÙsŠï˜‡+œ?õíœêv¤{¹*Ã/Ýä1;ËßÄ'¬qËýx´«òõµÜÍì¦äd2 ´)Uå [‡ìµçÙ*žï¼Õ>K«L ùÐMr´°÷‰>.¬‹ äµhìûþ4*FdJÿÉ.í¸0âò©|(B…À©¦–‰]Îm5=nqzî$Ô·j4•'¦µ’k†8€áí-ÓS†c.ŸÄÚÛ/Æ*÷‰{Ÿn\Éw¥xé¬ÈB¡Qb•[ÖWv]1.©Í·ƒ!£< ¥$?×»ïtŽzõSxg 7”@¬L¨îœÈûÛcÜÝŒ¡+ SÄxWê©¡ñ“ŒrÌ–õB*œ© Í+iT}v­óñW™Å7D!L4±¯šYaõÚÙ_²\¯ö¯Ý7[Lö|\Ý#^ˆ®­¶™ ‘ÜŽk’Ÿã}›:~/×WueûA£ dÐ?¦“ɬ›WŒÎ))gABMöè›­Ø'O«°'bÁ i¯â·¸µó<Ê!^¦$GB©•“7#1ò^±¿ÃB†1nǺ›Åξw»÷‘Ûîô!ÄN¶šî&–õ¤éC’—Ë9ÿÀÿ6øÑÀ 5qtFØš8Úþ 9ÿköendstream endobj 172 0 obj << /Filter /FlateDecode /Length 861 >> stream xÚ•UMoÔ0½÷WDœ‰˜ØŽ¿zk+  ÒnÄ¥pXvSuÙ¢diÅ¿gÆ3ÞͶTЃgf<žyïÙ9m^UkŠöªP&Jk¢/œõRkŠvu).›R‰¯ee­ïä[øeetçepb軘~,à±ñ|F‹–c0]ã}ÇÞzM-Úþú®K-ú!gèÉ÷¥¶5|iõd€YQ ºnÄe°º+}Úé®ÔAt”éÃ-X¾£•ªXbiAÌ:´ Ý8ö·¼õqY5¶†‚¿¶ï ÕH•/*UËhCL`Ì!jË=m§)É´†ÔŸS³÷ôɯhz Ó[¨Ü'#-Ýpq/_à®Àˆ›2¢•ôÑÙ¢N»Ÿ”°t³I;Ò¹]v˜Î·©ªSçÉrä¬Sžƒ–µ†“"ÂÍ€3ÈÇ8<Œj2oR¥U¶ñ`‚ 4¡ti÷’9eÁ8%ÞƒX°Õu7 A‹LùŠüIŽ`ê†2>Ðz¬ã³æY‰ë5e:I /(ÓküÇŽ;¢|õF\×X}¾MœAÞn R![¿¡ŒIYãÓj9áþ°i\ò×1°¹ÒÁ§éO댦ÕaW‡Ó³r*n¸ÃÆCÙëvR%Á-K8×§q‰qN¼I'/HQ[ÚéÙbÕÑÊ&zóW±’XrÄJ ‘H°šäñ}7R9Çe4{i›‰´kʰœ+ÖÚ,–x 2Xïîö ƒ²µ˜õ«Ìõ=ò°@0£oWî'c¢bhUÜfõE†=K¢£}2^àþÆü<‡¥ZÃ%Ÿo”³¬nýWvóîB¥Å|™hØ,‘ˆŽl騮p^ÿ?)€ž—¡1È7ŒJ"Ä3™ŒÀo&bÆ&Dø=g€£ãk#ÿyX2ôg#\ÒâY¾©n>bÊ-Yùt¹n‚E Ö‰’Ó¶_ˆqæ–\ðmV+ÚcZGWÆ“Í':øÃüu^”J;ø?¦¾qÒ7udê/UCi I‡•p•ÄÎCàëÆ| ±¸¾¼¦Ù–>’k‰’ݤ„9r=Íñ3ï2ì³_ÌgHu›Z…ÉCfªè¢6Š]h‡]Ä(¯AFu.:B@YŒ> stream xÚ31Ô3±P0P0Bc3cs…C®B.c46K$çr9yré‡+pé{E¹ô=}JŠJS¹ôœ ¹ô]¢  b¹<]ä00üÿÃÀøÿûÿÿ üÿÿÿÿÿýÿÿ@¸þÿÿ0üÿÿÿ?Ä`d=0s@f‚ÌÙ² d'Èn.WO®@.Ê›uiendstream endobj 174 0 obj << /Type /XRef /Length 192 /Filter /FlateDecode /DecodeParms << /Columns 5 /Predictor 12 >> /W [ 1 3 1 ] /Info 66 0 R /Root 65 0 R /Size 175 /ID [] >> stream xœcb&F~0ù‰ $À8JùŸá ›3¢Y Rè8ˆt¬‘jl RõˆóÅš|Ëh¬ 4ùŸáý+P¬±öã‚Qæ(Føb@$ÿu0[D²3ƒHns0i "9Á1Ës,’"¦‚ÅY@¤Ì°¬88ÆMH‘ ¹óÁl0ÉN<§A$×Y°i¢`õ‡Á¶Ë‚Ýî "ÙjÁ*½A¤#ˆ”ß "™—M»ÇjÅí endstream endobj startxref 179152 %%EOF gbm/inst/doc/gbm.Sweave0000644000176200001440000007564512134211007014472 0ustar liggesusers% setwd("c:/dev/gbm/inst/doc") % Sweave("gbm.rnw"); system("texify gbm.tex"); system("c:\\MiKTeX\\texmf\\miktex\\bin\\yap.exe gbm.dvi",wait=FALSE) \documentclass{article} \bibliographystyle{plain} \usepackage[active]{srcltx} \newcommand{\EV}{\mathrm{E}} \newcommand{\Var}{\mathrm{Var}} \newcommand{\aRule}{\begin{center} \rule{5in}{1mm} \end{center}} \title{Generalized Boosted Models:\\A guide to the gbm package} \author{Greg Ridgeway} %\VignetteIndexEntry{Generalized Boosted Models: A guide to the gbm package} \newcommand{\mathgbf}[1]{{\mbox{\boldmath$#1$\unboldmath}}} \begin{document} \maketitle Boosting takes on various forms with different programs using different loss functions, different base models, and different optimization schemes. The gbm package takes the approach described in \cite{Friedman:2001} and \cite{Friedman:2002}. Some of the terminology differs, mostly due to an effort to cast boosting terms into more standard statistical terminology (e.g. deviance). In addition, the gbm package implements boosting for models commonly used in statistics but not commonly associated with boosting. The Cox proportional hazard model, for example, is an incredibly useful model and the boosting framework applies quite readily with only slight modification \cite{Ridgeway:1999}. Also some algorithms implemented in the gbm package differ from the standard implementation. The AdaBoost algorithm \cite{FreundSchapire:1997} has a particular loss function and a particular optimization algorithm associated with it. The gbm implementation of AdaBoost adopts AdaBoost's exponential loss function (its bound on misclassification rate) but uses Friedman's gradient descent algorithm rather than the original one proposed. So the main purposes of this document is to spell out in detail what the gbm package implements. \section{Gradient boosting} This section essentially presents the derivation of boosting described in \cite{Friedman:2001}. The gbm package also adopts the stochastic gradient boosting strategy, a small but important tweak on the basic algorithm, described in \cite{Friedman:2002}. \subsection{Friedman's gradient boosting machine} \label{sec:GradientBoostingMachine} \begin{figure} \aRule Initialize $\hat f(\mathbf{x})$ to be a constant, $\hat f(\mathbf{x}) = \arg \min_{\rho} \sum_{i=1}^N \Psi(y_i,\rho)$. \\ For $t$ in $1,\ldots,T$ do \begin{enumerate} \item Compute the negative gradient as the working response \begin{equation} z_i = -\frac{\partial}{\partial f(\mathbf{x}_i)} \Psi(y_i,f(\mathbf{x}_i)) \mbox{\Huge $|$}_{f(\mathbf{x}_i)=\hat f(\mathbf{x}_i)} \end{equation} \item Fit a regression model, $g(\mathbf{x})$, predicting $z_i$ from the covariates $\mathbf{x}_i$. \item Choose a gradient descent step size as \begin{equation} \rho = \arg \min_{\rho} \sum_{i=1}^N \Psi(y_i,\hat f(\mathbf{x}_i)+\rho g(\mathbf{x}_i)) \end{equation} \item Update the estimate of $f(\mathbf{x})$ as \begin{equation} \hat f(\mathbf{x}) \leftarrow \hat f(\mathbf{x}) + \rho g(\mathbf{x}) \end{equation} \end{enumerate} \aRule \caption{Friedman's Gradient Boost algorithm} \label{fig:GradientBoost} \end{figure} Friedman (2001) and the companion paper Friedman (2002) extended the work of Friedman, Hastie, and Tibshirani (2000) and laid the ground work for a new generation of boosting algorithms. Using the connection between boosting and optimization, this new work proposes the Gradient Boosting Machine. In any function estimation problem we wish to find a regression function, $\hat f(\mathbf{x})$, that minimizes the expectation of some loss function, $\Psi(y,f)$, as shown in (\ref{NonparametricRegression1}). \begin{eqnarray} \hspace{0.5in} \hat f(\mathbf{x}) &=& \arg \min_{f(\mathbf{x})} \EV_{y,\mathbf{x}} \Psi(y,f(\mathbf{x})) \nonumber \\ \label{NonparametricRegression1} &=& \arg \min_{f(\mathbf{x})} \EV_x \left[ \EV_{y|\mathbf{x}} \Psi(y,f(\mathbf{x})) \Big| \mathbf{x} \right] \end{eqnarray} We will focus on finding estimates of $f(\mathbf{x})$ such that \begin{equation} \label{NonparametricRegression2} \hspace{0.5in} \hat f(\mathbf{x}) = \arg \min_{f(\mathbf{x})} \EV_{y|\mathbf{x}} \left[ \Psi(y,f(\mathbf{x}))|\mathbf{x} \right] \end{equation} Parametric regression models assume that $f(\mathbf{x})$ is a function with a finite number of parameters, $\beta$, and estimates them by selecting those values that minimize a loss function (e.g. squared error loss) over a training sample of $N$ observations on $(y,\mathbf{x})$ pairs as in (\ref{eq:Friedman1}). \begin{equation} \label{eq:Friedman1} \hspace{0.5in} \hat\beta = \arg \min_{\beta} \sum_{i=1}^N \Psi(y_i,f(\mathbf{x}_i;\beta)) \end{equation} When we wish to estimate $f(\mathbf{x})$ non-parametrically the task becomes more difficult. Again we can proceed similarly to \cite{FHT:2000} and modify our current estimate of $f(\mathbf{x})$ by adding a new function $f(\mathbf{x})$ in a greedy fashion. Letting $f_i = f(\mathbf{x}_i)$, we see that we want to decrease the $N$ dimensional function \begin{eqnarray} \label{EQ:Friedman2} \hspace{0.5in} J(\mathbf{f}) &=& \sum_{i=1}^N \Psi(y_i,f(\mathbf{x}_i)) \nonumber \\ &=& \sum_{i=1}^N \Psi(y_i,F_i). \end{eqnarray} The negative gradient of $J(\mathbf{f})$ indicates the direction of the locally greatest decrease in $J(\mathbf{f})$. Gradient descent would then have us modify $\mathbf{f}$ as \begin{equation} \label{eq:Friedman3} \hspace{0.5in} \hat \mathbf{f} \leftarrow \hat \mathbf{f} - \rho \nabla J(\mathbf{f}) \end{equation} where $\rho$ is the size of the step along the direction of greatest descent. Clearly, this step alone is far from our desired goal. First, it only fits $f$ at values of $\mathbf{x}$ for which we have observations. Second, it does not take into account that observations with similar $\mathbf{x}$ are likely to have similar values of $f(\mathbf{x})$. Both these problems would have disastrous effects on generalization error. However, Friedman suggests selecting a class of functions that use the covariate information to approximate the gradient, usually a regression tree. This line of reasoning produces his Gradient Boosting algorithm shown in Figure~\ref{fig:GradientBoost}. At each iteration the algorithm determines the direction, the gradient, in which it needs to improve the fit to the data and selects a particular model from the allowable class of functions that is in most agreement with the direction. In the case of squared-error loss, $\Psi(y_i,f(\mathbf{x}_i)) = \sum_{i=1}^N (y_i-f(\mathbf{x}_i))^2$, this algorithm corresponds exactly to residual fitting. There are various ways to extend and improve upon the basic framework suggested in Figure~\ref{fig:GradientBoost}. For example, Friedman (2001) substituted several choices in for $\Psi$ to develop new boosting algorithms for robust regression with least absolute deviation and Huber loss functions. Friedman (2002) showed that a simple subsampling trick can greatly improve predictive performance while simultaneously reduce computation time. Section~\ref{GBMModifications} discusses some of these modifications. \section{Improving boosting methods using control of the learning rate, sub-sampling, and a decomposition for interpretation} \label{GBMModifications} This section explores the variations of the previous algorithms that have the potential to improve their predictive performance and interpretability. In particular, by controlling the optimization speed or learning rate, introducing low-variance regression methods, and applying ideas from robust regression we can produce non-parametric regression procedures with many desirable properties. As a by-product some of these modifications lead directly into implementations for learning from massive datasets. All these methods take advantage of the general form of boosting \begin{equation} \hat f(\mathbf{x}) \leftarrow \hat f(\mathbf{x}) + \EV(z(y,\hat f(\mathbf{x}))|\mathbf{x}). \end{equation} So far we have taken advantage of this form only by substituting in our favorite regression procedure for $\EV_w(z|\mathbf{x})$. I will discuss some modifications to estimating $\EV_w(z|\mathbf{x})$ that have the potential to improve our algorithm. \subsection{Decreasing the learning rate} As several authors have phrased slightly differently, ``...boosting, whatever flavor, seldom seems to overfit, no matter how many terms are included in the additive expansion''. This is not true as the discussion to \cite{FHT:2000} points out. In the update step of any boosting algorithm we can introduce a learning rate to dampen the proposed move. \begin{equation} \label{eq:shrinkage} \hat f(\mathbf{x}) \leftarrow \hat f(\mathbf{x}) + \lambda \EV(z(y,\hat f(\mathbf{x}))|\mathbf{x}). \end{equation} By multiplying the gradient step by $\lambda$ as in equation~\ref{eq:shrinkage} we have control on the rate at which the boosting algorithm descends the error surface (or ascends the likelihood surface). When $\lambda=1$ we return to performing full gradient steps. Friedman (2001) relates the learning rate to regularization through shrinkage. The optimal number of iterations, $T$, and the learning rate, $\lambda$, depend on each other. In practice I set $\lambda$ to be as small as possible and then select $T$ by cross-validation. Performance is best when $\lambda$ is as small as possible performance with decreasing marginal utility for smaller and smaller $\lambda$. Slower learning rates do not necessarily scale the number of optimal iterations. That is, if when $\lambda=1.0$ and the optimal $T$ is 100 iterations, does {\it not} necessarily imply that when $\lambda=0.1$ the optimal $T$ is 1000 iterations. \subsection{Variance reduction using subsampling} Friedman (2002) proposed the stochastic gradient boosting algorithm that simply samples uniformly without replacement from the dataset before estimating the next gradient step. He found that this additional step greatly improved performance. We estimate the regression $\EV(z(y,\hat f(\mathbf{x}))|\mathbf{x})$ using a random subsample of the dataset. \subsection{ANOVA decomposition} Certain function approximation methods are decomposable in terms of a ``functional ANOVA decomposition''. That is a function is decomposable as \begin{equation} \label{ANOVAdecomp} f(\mathbf{x}) = \sum_j f_j(x_j) + \sum_{jk} f_{jk}(x_j,x_k) + \sum_{jk\ell} f_{jk\ell}(x_j,x_k,x_\ell) + \cdots. \end{equation} This applies to boosted trees. Regression stumps (one split decision trees) depend on only one variable and fall into the first term of \ref{ANOVAdecomp}. Trees with two splits fall into the second term of \ref{ANOVAdecomp} and so on. By restricting the depth of the trees produced on each boosting iteration we can control the order of approximation. Often additive components are sufficient to approximate a multivariate function well, generalized additive models, the na\"{\i}ve Bayes classifier, and boosted stumps for example. When the approximation is restricted to a first order we can also produce plots of $x_j$ versus $f_j(x_j)$ to demonstrate how changes in $x_j$ might affect changes in the response variable. \subsection{Relative influence} Friedman (2001) also develops an extension of a variable's ``relative influence'' for boosted estimates. For tree based methods the approximate relative influence of a variable $x_j$ is \begin{equation} \label{RelInfluence} \hspace{0.5in} \hat J_j^2 = \hspace{-0.1in}\sum_{\mathrm{splits~on~}x_j}\hspace{-0.2in}I_t^2 \end{equation} where $I_t^2$ is the empirical improvement by splitting on $x_j$ at that point. Friedman's extension to boosted models is to average the relative influence of variable $x_j$ across all the trees generated by the boosting algorithm. \begin{figure} \aRule Select \begin{itemize} \item a loss function (\texttt{distribution}) \item the number of iterations, $T$ (\texttt{n.trees}) \item the depth of each tree, $K$ (\texttt{interaction.depth}) \item the shrinkage (or learning rate) parameter, $\lambda$ (\texttt{shrinkage}) \item the subsampling rate, $p$ (\texttt{bag.fraction}) \end{itemize} Initialize $\hat f(\mathbf{x})$ to be a constant, $\hat f(\mathbf{x}) = \arg \min_{\rho} \sum_{i=1}^N \Psi(y_i,\rho)$ \\ For $t$ in $1,\ldots,T$ do \begin{enumerate} \item Compute the negative gradient as the working response \begin{equation} z_i = -\frac{\partial}{\partial f(\mathbf{x}_i)} \Psi(y_i,f(\mathbf{x}_i)) \mbox{\Huge $|$}_{f(\mathbf{x}_i)=\hat f(\mathbf{x}_i)} \end{equation} \item Randomly select $p\times N$ cases from the dataset \item Fit a regression tree with $K$ terminal nodes, $g(\mathbf{x})=\EV(z|\mathbf{x})$. This tree is fit using only those randomly selected observations \item Compute the optimal terminal node predictions, $\rho_1,\ldots,\rho_K$, as \begin{equation} \rho_k = \arg \min_{\rho} \sum_{\mathbf{x}_i\in S_k} \Psi(y_i,\hat f(\mathbf{x}_i)+\rho) \end{equation} where $S_k$ is the set of $\mathbf{x}$s that define terminal node $k$. Again this step uses only the randomly selected observations. \item Update $\hat f(\mathbf{x})$ as \begin{equation} \hat f(\mathbf{x}) \leftarrow \hat f(\mathbf{x}) + \lambda\rho_{k(\mathbf{x})} \end{equation} where $k(\mathbf{x})$ indicates the index of the terminal node into which an observation with features $\mathbf{x}$ would fall. \end{enumerate} \aRule \caption{Boosting as implemented in \texttt{gbm()}} \label{fig:gbm} \end{figure} \section{Common user options} This section discusses the options to gbm that most users will need to change or tune. \subsection{Loss function} The first and foremost choice is \texttt{distribution}. This should be easily dictated by the application. For most classification problems either \texttt{bernoulli} or \texttt{adaboost} will be appropriate, the former being recommended. For continuous outcomes the choices are \texttt{gaussian} (for minimizing squared error), \texttt{laplace} (for minimizing absolute error), and quantile regression (for estimating percentiles of the conditional distribution of the outcome). Censored survival outcomes should require \texttt{coxph}. Count outcomes may use \texttt{poisson} although one might also consider \texttt{gaussian} or \texttt{laplace} depending on the analytical goals. \subsection{The relationship between shrinkage and number of iterations} The issues that most new users of gbm struggle with are the choice of \texttt{n.trees} and \texttt{shrinkage}. It is important to know that smaller values of \texttt{shrinkage} (almost) always give improved predictive performance. That is, setting \texttt{shrinkage=0.001} will almost certainly result in a model with better out-of-sample predictive performance than setting \texttt{shrinkage=0.01}. However, there are computational costs, both storage and CPU time, associated with setting \texttt{shrinkage} to be low. The model with \texttt{shrinkage=0.001} will likely require ten times as many iterations as the model with \texttt{shrinkage=0.01}, increasing storage and computation time by a factor of 10. Figure~\ref{fig:shrinkViters} shows the relationship between predictive performance, the number of iterations, and the shrinkage parameter. Note that the increase in the optimal number of iterations between two choices for shrinkage is roughly equal to the ratio of the shrinkage parameters. It is generally the case that for small shrinkage parameters, 0.001 for example, there is a fairly long plateau in which predictive performance is at its best. My rule of thumb is to set \texttt{shrinkage} as small as possible while still being able to fit the model in a reasonable amount of time and storage. I usually aim for 3,000 to 10,000 iterations with shrinkage rates between 0.01 and 0.001. \begin{figure}[ht] \begin{center} \includegraphics[width=5in]{shrinkage-v-iterations} \end{center} \caption{Out-of-sample predictive performance by number of iterations and shrinkage. Smaller values of the shrinkage parameter offer improved predictive performance, but with decreasing marginal improvement.} \label{fig:shrinkViters} \end{figure} \subsection{Estimating the optimal number of iterations} gbm offers three methods for estimating the optimal number of iterations after the gbm model has been fit, an independent test set (\texttt{test}), out-of-bag estimation (\texttt{OOB}), and $v$-fold cross validation (\texttt{cv}). The function \texttt{gbm.perf} computes the iteration estimate. Like Friedman's MART software, the independent test set method uses a single holdout test set to select the optimal number of iterations. If \texttt{train.fraction} is set to be less than 1, then only the \textit{first} \texttt{train.fraction}$\times$\texttt{nrow(data)} will be used to fit the model. Note that if the data are sorted in a systematic way (such as cases for which $y=1$ come first), then the data should be shuffled before running gbm. Those observations not used in the model fit can be used to get an unbiased estimate of the optimal number of iterations. The downside of this method is that a considerable number of observations are used to estimate the single regularization parameter (number of iterations) leaving a reduced dataset for estimating the entire multivariate model structure. Use \texttt{gbm.perf(...,method="test")} to obtain an estimate of the optimal number of iterations using the held out test set. If \texttt{bag.fraction} is set to be greater than 0 (0.5 is recommended), gbm computes an out-of-bag estimate of the improvement in predictive performance. It evaluates the reduction in deviance on those observations not used in selecting the next regression tree. The out-of-bag estimator underestimates the reduction in deviance. As a result, it almost always is too conservative in its selection for the optimal number of iterations. The motivation behind this method was to avoid having to set aside a large independent dataset, which reduces the information available for learning the model structure. Use \texttt{gbm.perf(...,method="OOB")} to obtain the OOB estimate. Lastly, gbm offers $v$-fold cross validation for estimating the optimal number of iterations. If when fitting the gbm model, \texttt{cv.folds=5} then gbm will do 5-fold cross validation. gbm will fit five gbm models in order to compute the cross validation error estimate and then will fit a sixth and final gbm model with \texttt{n.trees}iterations using all of the data. The returned model object will have a component labeled \texttt{cv.error}. Note that \texttt{gbm.more} will do additional gbm iterations but will not add to the \texttt{cv.error} component. Use \texttt{gbm.perf(...,method="cv")} to obtain the cross validation estimate. \begin{figure}[ht] \begin{center} \includegraphics[width=5in]{oobperf2} \end{center} \caption{Out-of-sample predictive performance of four methods of selecting the optimal number of iterations. The vertical axis plots performance relative the best. The boxplots indicate relative performance across thirteen real datasets from the UCI repository. See \texttt{demo(OOB-reps)}.} \label{fig:oobperf} \end{figure} Figure~\ref{fig:oobperf} compares the three methods for estimating the optimal number of iterations across 13 datasets. The boxplots show the methods performance relative to the best method on that dataset. For most datasets the method perform similarly, however, 5-fold cross validation is consistently the best of them. OOB, using a 33\% test set, and using a 20\% test set all have datasets for which the perform considerably worse than the best method. My recommendation is to use 5- or 10-fold cross validation if you can afford the computing time. Otherwise you may choose among the other options, knowing that OOB is conservative. \section{Available distributions} This section gives some of the mathematical detail for each of the distribution options that gbm offers. The gbm engine written in C++ has access to a C++ class for each of these distributions. Each class contains methods for computing the associated deviance, initial value, the gradient, and the constants to predict in each terminal node. In the equations shown below, for non-zero offset terms, replace $f(\mathbf{x}_i)$ with $o_i + f(\mathbf{x}_i)$. \subsection{Gaussian} \begin{tabular}{ll} Deviance & $\displaystyle \frac{1}{\sum w_i} \sum w_i(y_i-f(\mathbf{x}_i))^2$ \\ Initial value & $\displaystyle f(\mathbf{x})=\frac{\sum w_i(y_i-o_i)}{\sum w_i}$ \\ Gradient & $z_i=y_i - f(\mathbf{x}_i)$ \\ Terminal node estimates & $\displaystyle \frac{\sum w_i(y_i-f(\mathbf{x}_i))}{\sum w_i}$ \end{tabular} \subsection{AdaBoost} \begin{tabular}{ll} Deviance & $\displaystyle \frac{1}{\sum w_i} \sum w_i\exp(-(2y_i-1)f(\mathbf{x}_i))$ \\ Initial value & $\displaystyle \frac{1}{2}\log\frac{\sum y_iw_ie^{-o_i}}{\sum (1-y_i)w_ie^{o_i}}$ \\ Gradient & $\displaystyle z_i= -(2y_i-1)\exp(-(2y_i-1)f(\mathbf{x}_i))$ \\ Terminal node estimates & $\displaystyle \frac{\sum (2y_i-1)w_i\exp(-(2y_i-1)f(\mathbf{x}_i))} {\sum w_i\exp(-(2y_i-1)f(\mathbf{x}_i))}$ \end{tabular} \subsection{Bernoulli} \begin{tabular}{ll} Deviance & $\displaystyle -2\frac{1}{\sum w_i} \sum w_i(y_if(\mathbf{x}_i)-\log(1+\exp(f(\mathbf{x}_i))))$ \\ Initial value & $\displaystyle \log\frac{\sum w_iy_i}{\sum w_i(1-y_i)}$ \\ Gradient & $\displaystyle z_i=y_i-\frac{1}{1+\exp(-f(\mathbf{x}_i))}$ \\ Terminal node estimates & $\displaystyle \frac{\sum w_i(y_i-p_i)}{\sum w_ip_i(1-p_i)}$ \\ & where $\displaystyle p_i = \frac{1}{1+\exp(-f(\mathbf{x}_i))}$ \\ \end{tabular} Notes: \begin{itemize} \item For non-zero offset terms, the computation of the initial value requires Newton-Raphson. Initialize $f_0=0$ and iterate $\displaystyle f_0 \leftarrow f_0 + \frac{\sum w_i(y_i-p_i)}{\sum w_ip_i(1-p_i)}$ where $\displaystyle p_i = \frac{1}{1+\exp(-(o_i+f_0))}$. \end{itemize} \subsection{Laplace} \begin{tabular}{ll} Deviance & $\frac{1}{\sum w_i} \sum w_i|y_i-f(\mathbf{x}_i)|$ \\ Initial value & $\mbox{median}_w(y)$ \\ Gradient & $z_i=\mbox{sign}(y_i-f(\mathbf{x}_i))$ \\ Terminal node estimates & $\mbox{median}_w(z)$ \end{tabular} Notes: \begin{itemize} \item $\mbox{median}_w(y)$ denotes the weighted median, defined as the solution to the equation $\frac{\sum w_iI(y_i\leq m)}{\sum w_i}=\frac{1}{2}$ \item \texttt{gbm()} currently does not implement the weighted median and issues a warning when the user uses weighted data with \texttt{distribution="laplace"}. \end{itemize} \subsection{Quantile regression} Contributed by Brian Kriegler (see \cite{Kriegler:2010}). \begin{tabular}{ll} Deviance & $\frac{1}{\sum w_i} \left(\alpha\sum_{y_i>f(\mathbf{x}_i)} w_i(y_i-f(\mathbf{x}_i))\right. +$ \\ & \hspace{0.5in}$\left.(1-\alpha)\sum_{y_i\leq f(\mathbf{x}_i)} w_i(f(\mathbf{x}_i)-y_i)\right)$ \\ Initial value & $\mathrm{quantile}^{(\alpha)}_w(y)$ \\ Gradient & $z_i=\alpha I(y_i>f(\mathbf{x}_i))-(1-\alpha)I(y_i\leq f(\mathbf{x}_i))$ \\ Terminal node estimates & $\mathrm{quantile}^{(\alpha)}_w(z)$ \end{tabular} Notes: \begin{itemize} \item $\mathrm{quantile}^{(\alpha)}_w(y)$ denotes the weighted quantile, defined as the solution to the equation $\frac{\sum w_iI(y_i\leq q)}{\sum w_i}=\alpha$ \item \texttt{gbm()} currently does not implement the weighted median and issues a warning when the user uses weighted data with \texttt{distribution=list(name="quantile")}. \end{itemize} \subsection{Cox Proportional Hazard} \begin{tabular}{ll} Deviance & $-2\sum w_i(\delta_i(f(\mathbf{x}_i)-\log(R_i/w_i)))$\\ Gradient & $\displaystyle z_i=\delta_i - \sum_j \delta_j \frac{w_jI(t_i\geq t_j)e^{f(\mathbf{x}_i)}} {\sum_k w_kI(t_k\geq t_j)e^{f(\mathbf{x}_k)}}$ \\ Initial value & 0 \\ Terminal node estimates & Newton-Raphson algorithm \end{tabular} \begin{enumerate} \item Initialize the terminal node predictions to 0, $\mathgbf{\rho}=0$ \item Let $\displaystyle p_i^{(k)}=\frac{\sum_j I(k(j)=k)I(t_j\geq t_i)e^{f(\mathbf{x}_i)+\rho_k}} {\sum_j I(t_j\geq t_i)e^{f(\mathbf{x}_i)+\rho_k}}$ \item Let $g_k=\sum w_i\delta_i\left(I(k(i)=k)-p_i^{(k)}\right)$ \item Let $\mathbf{H}$ be a $k\times k$ matrix with diagonal elements \begin{enumerate} \item Set diagonal elements $H_{mm}=\sum w_i\delta_i p_i^{(m)}\left(1-p_i^{(m)}\right)$ \item Set off diagonal elements $H_{mn}=-\sum w_i\delta_i p_i^{(m)}p_i^{(n)}$ \end{enumerate} \item Newton-Raphson update $\mathgbf{\rho} \leftarrow \mathgbf{\rho} - \mathbf{H}^{-1}\mathbf{g}$ \item Return to step 2 until convergence \end{enumerate} Notes: \begin{itemize} \item $t_i$ is the survival time and $\delta_i$ is the death indicator. \item $R_i$ denotes the hazard for the risk set, $R_i=\sum_{j=1}^N w_jI(t_j\geq t_i)e^{f(\mathbf{x}_i)}$ \item $k(i)$ indexes the terminal node of observation $i$ \item For speed, \texttt{gbm()} does only one step of the Newton-Raphson algorithm rather than iterating to convergence. No appreciable loss of accuracy since the next boosting iteration will simply correct for the prior iterations inadequacy. \item \texttt{gbm()} initially sorts the data by survival time. Doing this reduces the computation of the risk set from $O(n^2)$ to $O(n)$ at the cost of a single up front sort on survival time. After the model is fit, the data are then put back in their original order. \end{itemize} \subsection{Poisson} \begin{tabular}{ll} Deviance & -2$\frac{1}{\sum w_i} \sum w_i(y_if(\mathbf{x}_i)-\exp(f(\mathbf{x}_i)))$ \\ Initial value & $\displaystyle f(\mathbf{x})= \log\left(\frac{\sum w_iy_i}{\sum w_ie^{o_i}}\right)$ \\ Gradient & $z_i=y_i - \exp(f(\mathbf{x}_i))$ \\ Terminal node estimates & $\displaystyle \log\frac{\sum w_iy_i}{\sum w_i\exp(f(\mathbf{x}_i))}$ \end{tabular} The Poisson class includes special safeguards so that the most extreme predicted values are $e^{-19}$ and $e^{+19}$. This behavior is consistent with \texttt{glm()}. \subsection{Pairwise} This distribution implements ranking measures following the \emph{LambdaMart} algorithm \cite{Burges:2010}. Instances belong to \emph{groups}; all pairs of items with different labels, belonging to the same group, are used for training. In \emph{Information Retrieval} applications, groups correspond to user queries, and items to (feature vectors of) documents in the associated match set to be ranked. For consistency with typical usage, our goal is to \emph{maximize} one of the \emph{utility} functions listed below. Consider a group with instances $x_1, \dots, x_n$, ordered such that $f(x_1) \geq f(x_2) \geq \dots f(x_n)$; i.e., the \emph{rank} of $x_i$ is $i$, where smaller ranks are preferable. Let $P$ be the set of all ordered pairs such that $y_i > y_j$. \begin{enumerate} \item[{\bf Concordance:}] Fraction of concordant (i.e, correctly ordered) pairs. For the special case of binary labels, this is equivalent to the Area under the ROC Curve. $$\left\{ \begin{array}{l l}\frac{\|\{(i,j)\in P | f(x_i)>f(x_j)\}\|}{\|P\|} & P \neq \emptyset\\ 0 & \mbox{otherwise.} \end{array}\right. $$ \item[{\bf MRR:}] Mean reciprocal rank of the highest-ranked positive instance (it is assumed $y_i\in\{0,1\}$): $$\left\{ \begin{array}{l l}\frac{1}{\min\{1 \leq i \leq n |y_i=1\}} & \exists i: \, 1 \leq i \leq n, y_i=1\\ 0 & \mbox{otherwise.}\end{array}\right.$$ \item[{\bf MAP:}] Mean average precision, a generalization of MRR to multiple positive instances: $$\left\{ \begin{array}{l l} \frac{\sum_{1\leq i\leq n | y_i=1} \|\{1\leq j\leq i |y_j=1\}\|\,/\,i}{\|\{1\leq i\leq n | y_i=1\}\|} & \exists i: \, 1 \leq i \leq n, y_i=1\\ 0 & \mbox{otherwise.}\end{array}\right.$$ \item[{\bf nDCG:}] Normalized discounted cumulative gain: $$\frac{\sum_{1\leq i\leq n} \log_2(i+1) \, y_i}{\sum_{1\leq i\leq n} \log_2(i+1) \, y'_i},$$ where $y'_1, \dots, y'_n$ is a reordering of $y_1, \dots,y_n$ with $y'_1 \geq y'_2 \geq \dots \geq y'_n$. \end{enumerate} The generalization to multiple (possibly weighted) groups is straightforward. Sometimes a cut-off rank $k$ is given for \emph{MRR} and \emph{nDCG}, in which case we replace the outer index $n$ by $\min(n,k)$. The initial value for $f(x_i)$ is always zero. We derive the gradient of a cost function whose gradient locally approximates the gradient of the IR measure for a fixed ranking: \begin{eqnarray*} \Phi & = & \sum_{(i,j) \in P} \Phi_{ij}\\ & = & \sum_{(i,j) \in P} |\Delta Z_{ij}| \log \left( 1 + e^{-(f(x_i) - f(x_j))}\right), \end{eqnarray*} where $|\Delta Z_{ij}|$ is the absolute utility difference when swapping the ranks of $i$ and $j$, while leaving all other instances the same. Define \begin{eqnarray*} \lambda_{ij} & = & \frac{\partial\Phi_{ij}}{\partial f(x_i)}\\ & = & - |\Delta Z_{ij}| \frac{1}{1 + e^{f(x_i) - f(x_j)}}\\ & = & - |\Delta Z_{ij}| \, \rho_{ij}, \end{eqnarray*} with $$ \rho_{ij} = - \frac{\lambda_{ij }}{|\Delta Z_{ij}|} = \frac{1}{1 + e^{f(x_i) - f(x_j)}}$$ For the gradient of $\Phi$ with respect to $f(x_i)$, define \begin{eqnarray*} \lambda_i & = & \frac{\partial \Phi}{\partial f(x_i)}\\ & = & \sum_{j|(i,j) \in P} \lambda_{ij} - \sum_{j|(j,i) \in P} \lambda_{ji}\\ & = & - \sum_{j|(i,j) \in P} |\Delta Z_{ij}| \, \rho_{ij}\\ & & \mbox{} + \sum_{j|(j,i) \in P} |\Delta Z_{ji}| \, \rho_{ji}. \end{eqnarray*} The second derivative is \begin{eqnarray*} \gamma_i & \stackrel{def}{=} & \frac{\partial^2\Phi}{\partial f(x_i)^2}\\ & = & \sum_{j|(i,j) \in P} |\Delta Z_{ij}| \, \rho_{ij} \, (1-\rho_{ij})\\ & & \mbox{} + \sum_{j|(j,i) \in P} |\Delta Z_{ji}| \, \rho_{ji} \, (1-\rho_{ji}). \end{eqnarray*} Now consider again all groups with associated weights. For a given terminal node, let $i$ range over all contained instances. Then its estimate is $$-\frac{\sum_i v_i\lambda_{i}}{\sum_i v_i \gamma_i},$$ where $v_i=w(\mbox{\em group}(i))/\|\{(j,k)\in\mbox{\em group}(i)\}\|.$ In each iteration, instances are reranked according to the preliminary scores $f(x_i)$ to determine the $|\Delta Z_{ij}|$. Note that in order to avoid ranking bias, we break ties by adding a small amount of random noise. \begin{thebibliography}{77} % start the bibliography \small % put the bibliography in a small font \bibitem{FreundSchapire:1997} Y. Freund and R.E. Schapire (1997). ``A decision-theoretic generalization of on-line learning and an application to boosting,'' \textit{Journal of Computer and System Sciences}, 55(1):119-139. \bibitem{Friedman:2001} J.H. Friedman (2001). ``Greedy Function Approximation: A Gradient Boosting Machine,'' \textit{Annals of Statistics} 29(5):1189-1232. \bibitem{Friedman:2002} J.H. Friedman (2002). ``Stochastic Gradient Boosting,'' \textit{Computational Statistics and Data Analysis} 38(4):367-378. \bibitem{FHT:2000} J.H. Friedman, T. Hastie, R. Tibshirani (2000). ``Additive Logistic Regression: a Statistical View of Boosting,'' \textit{Annals of Statistics} 28(2):337-374. \bibitem{Kriegler:2010} B. Kriegler and R. Berk (2010). ``Small Area Estimation of the Homeless in Los Angeles, An Application of Cost-Sensitive Stochastic Gradient Boosting,'' \textit{Annals of Applied Statistics} 4(3):1234-1255. \bibitem{Ridgeway:1999} G. Ridgeway (1999). ``The state of boosting,'' \textit{Computing Science and Statistics} 31:172-181. \bibitem{Burges:2010} C. Burges (2010). ``From RankNet to LambdaRank to LambdaMART: An Overview'', \textit{Microsoft Research Technical Report MSR-TR-2010-82} \end{thebibliography} % end the bibliography \end{document} gbm/demo/0000755000176200001440000000000012143232747011750 5ustar liggesusersgbm/demo/robustReg.R0000644000176200001440000000313612134211007014034 0ustar liggesuserslibrary( MASS ) set.seed( 20090415 ) x <- mvrnorm( 100, mu=rep( 0, 5 ) , Sigma=diag( rep( 1, 5 ) ) ) r <- rnorm( 100 ) r <- ifelse( runif( 100 ) < .25 , r * 4, r ) y <- apply( x, 1, sum ) + r d <- data.frame( y=y , x) gmod <- gbm( y ~ ., data=d, distribution="gaussian", n.tree = 2000, shrinkage = .01 , cv.folds=5, verbose = FALSE, n.cores=1) tmod4 <- gbm( y ~ ., data=d, distribution="tdist", # defaults to 4 df n.tree=2000, shrinkage = .01, cv.folds=5, verbose = FALSE, n.cores=1) tmod6 <- gbm( y ~ ., data=d, distribution=list( name="tdist", df=6 ), n.tree=2000, shrinkage = .01, cv.folds=5, verbose = FALSE, n.cores=1) tmod100 <- gbm( y ~ ., data=d, distribution=list( name="tdist", df=100 ), n.tree=2000, shrinkage = .01, cv.folds=5, verbose = FALSE, n.cores=1) par(mfrow=c( 2, 2 ) ) gbest <- gbm.perf( gmod , method="cv" ) t4best <- gbm.perf( tmod4 , method="cv" ) t6best <- gbm.perf( tmod6 , method="cv" ) t100best <- gbm.perf( tmod100 , method="cv" ) qscale <- function( x ){ x / abs( diff( quantile( x , prob=c( .25, .75 ) ) ) ) } rg <- qscale( resid( gmod , n.trees=gbest) ) rt4 <- qscale( resid( tmod4 , n.trees=t4best) ) rt6 <- qscale( resid( tmod6 , n.trees=t6best) ) rt100 <- qscale( resid( tmod100 , n.trees=t100best ) ) ylimits <- range(rg, rt4, rt6, rt100) plot( rg, main="Gaussian", ylim=ylimits ); abline( h=0 ) plot( rt4, main="t(4)", ylim=ylimits ); abline( h=0 ) plot( rt6, main="t(6)", ylim=ylimits ); abline( h=0 ) plot( rt100, main="t(100)", ylim=ylimits ); abline( h=0 ) dev.off() gbm/demo/printExamples.R0000644000176200001440000000401612134211007014711 0ustar liggesusers# Create some output to test the new print function and # to be comparable with any future changes to gbm. library( MASS ) data( birthwt ) data( VA ) data( iris ) data( fgl ) data( cpus ) data( housing ) set.seed( 20090417 ) bMod <- gbm( low ~ ., data=birthwt, n.tree=1000, shrinkage=.01, cv.folds=5, verbose = FALSE, n.cores=1) bMod bwt <- birthwt bwt <- bwt[ sample( 1:nrow( bwt ) ),] aMod <- gbm( low ~ ., data=bwt, distribution="adaboost", n.trees=1000, shrinkage=.01, cv.folds=10, train.fraction=.9, verbose = FALSE , n.cores=1) aMod cMod <- gbm( Surv( stime, status ) ~ treat + age + Karn + diag.time + cell + prior, data = VA, n.tree = 1000, shrinkage=.1, cv.folds = 5, verbose = FALSE, n.cores=1) cMod kMod <- gbm( Species ~ . , data=iris , n.tree=1000, shrinkage=.1, cv.folds=5, train.fraction=.9, n.cores=1 ) kMod kMod2 <- gbm( type ~ ., data=fgl, n.tree=1000, shrinkage=.01, cv.folds=5, n.cores=1 ) kMod2 mycpus <- cpus mycpus <- mycpus[, -1 ] gMod <- gbm( log( perf ) ~ ., data = mycpus, distribution="gaussian", cv.folds=5, n.trees=1000, shrinkage=.01, verbose = FALSE, n.cores=1) gMod biMod <- gbm( log(perf) ~ ., data=mycpus, cv.folds=5, n.trees=1000, shrinkage=.01, n.cores=1 ) biMod tMod <- gbm( log(perf) ~ ., data=mycpus, distribution="tdist", cv.folds=5, n.trees=1000, shrinkage=.01, interaction.depth= 3, n.cores=1) tMod lMod <- gbm( log(perf) ~ ., data=mycpus, distribution="laplace", cv.folds=5, n.trees=1000, shrinkage=.01, interaction.depth= 3, n.cores=1) lMod qMod <- gbm( log(perf) ~ ., data=mycpus, distribution=list(name="quantile", alpha=.7 ), cv.folds=5, n.trees=1000, shrinkage=.01, interaction.depth= 3, verbose = FALSE, n.cores=1) qMod pMod <- gbm( Freq ~ ., data=housing , distribution="poisson", n.trees=1000, cv.folds=5 , shrinkage=.01, interaction.depth = 3, n.cores=1) pMod gbm/demo/pairwise.R0000644000176200001440000001605612102666411013720 0ustar liggesusers# RANKING EXAMPLE cat("Running ranking (LambdaMart) example.\n") # Create synthetic data that shows how pairwise training can be better # Note: no claim to represent 'real world' data! generate.data <- function(N) { # create query groups, with an average size of 25 items each num.queries <- floor(N/25) query <- sample(1:num.queries, N, replace=TRUE) # X1 is a variable determined by query group only query.level <- runif(num.queries) X1 <- query.level[query] # X2 varies with each item X2 <- runif(N) # X3 is uncorrelated with target X3 <- runif(N) # The target Y <- X1 + X2 # Add some random noise to X2 that is correlated with # queries, but uncorrelated with items X2 <- X2 + scale(runif(num.queries))[query] # Add some random noise to target SNR <- 5 # signal-to-noise ratio sigma <- sqrt(var(Y)/SNR) Y <- Y + runif(N, 0, sigma) data.frame(Y, query=query, X1, X2, X3) } cat('Generating data\n') N=1000 data.train <- generate.data(N) # Now we fit 3 different models to the same data: # * Gaussian # * Pairwise with NDCG ranking metric # * Pairwise with CONC (fraction of concordant pairs) ranking metric cat('Fitting a model with gaussian loss function\n') gbm.gaussian <- gbm(Y~X1+X2+X3, # formula data=data.train, # dataset distribution='gaussian', # loss function: gaussian n.trees=2000, # number of trees shrinkage=0.005, # learning rate interaction.depth=3, # number per splits per tree bag.fraction = 0.5, # subsampling fraction train.fraction = 1, # fraction of data for training n.minobsinnode = 10, # minimum number of obs for split keep.data=TRUE, # store copy of input data in model cv.folds=5, # number of cross validation folds verbose = FALSE, # don't print progress n.cores = 1) # use a single core (to prevent possible problems caused by wronly detecting cores) # estimate number of trees best.iter.gaussian <- gbm.perf(gbm.gaussian, method="cv") title('Training of gaussian model') cat('Fitting a model with pairwise loss function (ranking metric: normalized discounted cumulative gain)\n') gbm.ndcg <- gbm(Y~X1+X2+X3, # formula data=data.train, # dataset distribution=list( # loss function: name='pairwise', # pairwise metric="ndcg", # ranking metric: normalized discounted cumulative gain group='query'), # column indicating query groups n.trees=2000, # number of trees shrinkage=0.005, # learning rate interaction.depth=3, # number per splits per tree bag.fraction = 0.5, # subsampling fraction train.fraction = 1, # fraction of data for training n.minobsinnode = 10, # minimum number of obs for split keep.data=TRUE, # store copy of input data in model cv.folds=5, # number of cross validation folds verbose = FALSE, # don't print progress n.cores = 1) # use a single core # estimate number of trees best.iter.ndcg <- gbm.perf(gbm.ndcg, method='cv') title('Training of pairwise model with ndcg metric') cat('Fit a model with pairwise loss function (ranking metric: fraction of concordant pairs)\n') gbm.conc <- gbm(Y~X1+X2+X3, # formula data=data.train, # dataset distribution=list( # loss function: name='pairwise', # pairwise metric="conc", # ranking metric: concordant pairs group='query'), # column indicating query groups n.trees=2000, # number of trees shrinkage=0.005, # learning rate interaction.depth=3, # number per splits per tree bag.fraction = 0.5, # subsampling fraction train.fraction = 1, # fraction of data for training n.minobsinnode = 10, # minimum number of obs for split keep.data=TRUE, # store copy of input data in model cv.folds=5, # number of cross validation folds verbose = FALSE, # don't print progress n.cores = 1) # use a single core # estimate number of trees best.iter.conc <- gbm.perf(gbm.conc, method='cv') title('Training of pairwise model with conc metric') # plot variable importance par.old <- par(mfrow=c(1,3)) summary(gbm.gaussian, n.trees=best.iter.gaussian, main='gaussian') summary(gbm.ndcg, n.trees=best.iter.ndcg, main='pairwise (ndcg)') summary(gbm.conc, n.trees=best.iter.conc, main='pairwise (conc)') par(par.old) cat("Generating some new data\n") data.test <- generate.data(N) cat("Calculating predictions\n") predictions <- data.frame(random=runif(N), X2=data.test$X2, gaussian=predict(gbm.gaussian, data.test, best.iter.gaussian), pairwise.ndcg=predict(gbm.ndcg, data.test, best.iter.ndcg), pairwise.conc=predict(gbm.conc, data.test, best.iter.conc)) cat("Computing loss metrics\n") result.table <- data.frame(measure=c('random', 'X2 only', 'gaussian', 'pairwise (ndcg)', 'pairwise (conc)'), squared.loss=sapply(1:length(predictions), FUN=function(i) { gbm.loss(y=data.test$Y, predictions[[i]], w=rep(1,N), offset=NA, dist=list(name="gaussian"), baseline=0) }), ndcg5.loss=sapply(1:length(predictions), FUN=function(i) { gbm.loss(y=data.test$Y, predictions[[i]], w=rep(1,N), offset=NA, dist=list(name='pairwise', metric="ndcg"), baseline=0, group=data.test$query, max.rank=5) }), concordant.pairs.loss=sapply(1:length(predictions), FUN=function(i) { gbm.loss(y=data.test$Y, predictions[[i]], w=rep(1,N), offset=NA, dist=list(name='pairwise', metric="conc"), baseline=0, group=data.test$query, max.rank=0) }), row.names=NULL) cat('Performance measures for the different models on the test set (smaller is better):\n') print(result.table,digits=2) # Brief explanation: Variable X1 is not correlated with the order of items, only # with queries. Variable X2 is the only one that is correlated with the order of # items within queries. However, it has a high query-correlated variance. # Therefore, the 'optimal' possible ranking is just by X2. Of course, the # pairwise models don't know this and don't completely achieve the same # accuracy, due to noise and data limitation. # # The Gaussian model uses mostly X1, due to the high variance of X2; on the # contrary, the pairwise models rely mainly on X2. The loss table shows that # both pairwise models are better in terms of the ranking metrics, but worse in # terms of squared loss. gbm/demo/multinomial.R0000644000176200001440000000060312102666411014416 0ustar liggesusersdata( iris ) set.seed( 20090415 ) mod <- gbm(Species ~ ., data = iris, distribution = "multinomial", n.tree = 5000, shrinkage = 0.001, cv.folds = 2, bag.fraction = 0.8, interaction.depth = 3, verbose = FALSE) gbm.perf( mod, method="cv" ) mod gbm/demo/gaussian.R0000644000176200001440000000762212134235103013701 0ustar liggesusers# LEAST SQUARES EXAMPLE cat("Running least squares regression example.\n") # create some data N <- 1000 X1 <- runif(N) X2 <- 2*runif(N) X3 <- factor(sample(letters[1:4],N,replace=T)) X4 <- ordered(sample(letters[1:6],N,replace=T)) X5 <- factor(sample(letters[1:3],N,replace=T)) X6 <- 3*runif(N) mu <- c(-1,0,1,2)[as.numeric(X3)] SNR <- 10 # signal-to-noise ratio Y <- X1**1.5 + 2 * (X2**.5) + mu sigma <- sqrt(var(Y)/SNR) Y <- Y + rnorm(N,0,sigma) # create a bunch of missing values X1[sample(1:N,size=100)] <- NA X3[sample(1:N,size=300)] <- NA # random weights if you want to experiment with them # w <- rexp(N) # w <- N*w/sum(w) w <- rep(1,N) data <- data.frame(Y=Y,X1=X1,X2=X2,X3=X3,X4=X4,X5=X5,X6=X6) # fit initial model gbm1 <- gbm(Y~X1+X2+X3+X4+X5+X6, # formula data=data, # dataset var.monotone=c(0,0,0,0,0,0), # -1: monotone decrease, +1: monotone increase, 0: no monotone restrictions distribution="gaussian", # bernoulli, adaboost, gaussian, poisson, coxph, or # list(name="quantile",alpha=0.05) for quantile regression n.trees=2000, # number of trees shrinkage=0.005, # shrinkage or learning rate, 0.001 to 0.1 usually work interaction.depth=3, # 1: additive model, 2: two-way interactions, etc bag.fraction = 0.5, # subsampling fraction, 0.5 is probably best train.fraction = 0.5, # fraction of data for training, first train.fraction*N used for training n.minobsinnode = 10, # minimum number of obs needed in each node keep.data=TRUE, cv.folds=10, # do 10-fold cross-validation verbose = FALSE) # don't print progress # plot the performance best.iter <- gbm.perf(gbm1,method="OOB") # returns out-of-bag estimated best number of trees best.iter <- gbm.perf(gbm1,method="test") # returns test set estimate of best number of trees best.iter <- gbm.perf(gbm1,method="cv") # returns cv estimate of best number of trees # plot variable influence summary(gbm1,n.trees=1) # based on the first tree summary(gbm1,n.trees=best.iter) # based on the estimated best number of trees # print the first and last trees print(pretty.gbm.tree(gbm1,1)) print(pretty.gbm.tree(gbm1,gbm1$n.trees)) print(gbm1$c.splits[1:3]) # make some new data N <- 1000 X1 <- runif(N) X2 <- 2*runif(N) X3 <- factor(sample(letters[1:4],N,replace=TRUE)) X4 <- ordered(sample(letters[1:6],N,replace=TRUE)) X5 <- factor(sample(letters[1:3],N,replace=TRUE)) X6 <- 3*runif(N) mu <- c(-1,0,1,2)[as.numeric(X3)] Y <- X1**1.5 + 2 * (X2**.5) + mu Y <- Y + rnorm(N,0,sigma) data2 <- data.frame(Y=Y,X1=X1,X2=X2,X3=X3,X4=X4,X5=X5,X6=X6) print(data2[1:10,]) # predict on the new data using "best" number of trees f.predict <- predict(gbm1,data2,best.iter) # f.predict will be on the canonical scale (logit,log,etc.) print(f.predict[1:10]) # least squares error print(sum((data2$Y-f.predict)^2)) # create marginal plots # plot variable X1,X2,X3 after "best" iterations par(mfrow=c(1,3)) plot(gbm1,1,best.iter) plot(gbm1,2,best.iter) plot(gbm1,3,best.iter) par(mfrow=c(1,1)) plot(gbm1,1:2,best.iter) # contour plot of variables 1 and 2 after "best" number iterations plot(gbm1,2:3,best.iter) # lattice plot of variables 2 and 3 after "best" number iterations plot(gbm1,3:4,best.iter) # lattice plot of variables 2 and 3 after "best" number iterations plot(gbm1,c(1,2,6),best.iter,cont=20) # 3-way plots plot(gbm1,1:3,best.iter) plot(gbm1,2:4,best.iter) plot(gbm1,3:5,best.iter) # check interactions interact.gbm(gbm1,data=data,i.var=1:2,n.trees=best.iter) # get all two way interactions i.var <- subset(expand.grid(x1=1:6,x2=1:6), x1=data2$tt[i])*exp(f.predict) ) } cat("Boosting:",sum( data2$delta*( f.predict - log(risk) ) ),"\n") # linear model coxph1 <- coxph(Surv(tt,delta)~X1+X2+X3,data=data) f.predict <- predict(coxph1,newdata=data2) risk <- rep(0,N) for(i in 1:N) { risk[i] <- sum( (data2$tt>=data2$tt[i])*exp(f.predict) ) } cat("Linear model:",sum( data2$delta*( f.predict - log(risk) ) ),"\n") gbm/demo/bernoulli.R0000644000176200001440000000641312134211007014054 0ustar liggesusers# LOGISTIC REGRESSION EXAMPLE cat("Running logistic regression example.\n") # create some data N <- 1000 X1 <- runif(N) X2 <- runif(N) X3 <- factor(sample(letters[1:4],N,replace=T)) mu <- c(-1,0,1,2)[as.numeric(X3)] p <- 1/(1+exp(-(sin(3*X1) - 4*X2 + mu))) Y <- rbinom(N,1,p) # random weights if you want to experiment with them w <- rexp(N) w <- N*w/sum(w) data <- data.frame(Y=Y,X1=X1,X2=X2,X3=X3) # fit initial model gbm1 <- gbm(Y~X1+X2+X3, # formula data=data, # dataset weights=w, var.monotone=c(0,0,0), # -1: monotone decrease, +1: monotone increase, 0: no monotone restrictions distribution="bernoulli", n.trees=3000, # number of trees shrinkage=0.001, # shrinkage or learning rate, 0.001 to 0.1 usually work interaction.depth=3, # 1: additive model, 2: two-way interactions, etc bag.fraction = 0.5, # subsampling fraction, 0.5 is probably best train.fraction = 0.5, # fraction of data for training, first train.fraction*N used for training cv.folds=5, # do 5-fold cross-validation n.minobsinnode = 10, # minimum total weight needed in each node verbose = FALSE) # don't print progress # plot the performance best.iter.oob <- gbm.perf(gbm1,method="OOB") # returns out-of-bag estimated best number of trees print(best.iter.oob) best.iter.cv <- gbm.perf(gbm1,method="cv") # returns 5-fold cv estimate of best number of trees print(best.iter.cv) best.iter.test <- gbm.perf(gbm1,method="test") # returns test set estimate of best number of trees print(best.iter.test) best.iter <- best.iter.test # plot variable influence summary(gbm1,n.trees=1) # based on the first tree summary(gbm1,n.trees=best.iter) # based on the estimated best number of trees # create marginal plots # plot variable X1,X2,X3 after "best" iterations par(mfrow=c(1,3)) plot.gbm(gbm1,1,best.iter) plot.gbm(gbm1,2,best.iter) plot.gbm(gbm1,3,best.iter) par(mfrow=c(1,1)) plot.gbm(gbm1,1:2,best.iter) # contour plot of variables 1 and 2 after "best" number iterations plot.gbm(gbm1,2:3,best.iter) # lattice plot of variables 2 and 3 after "best" number iterations # 3-way plot plot.gbm(gbm1,1:3,best.iter) # print the first and last trees print(pretty.gbm.tree(gbm1,1)) print(pretty.gbm.tree(gbm1,gbm1$n.trees)) # make some new data N <- 1000 X1 <- runif(N) X2 <- runif(N) X3 <- factor(sample(letters[1:4],N,replace=T)) mu <- c(-1,0,1,2)[as.numeric(X3)] p <- 1/(1+exp(-(sin(3*X1) - 4*X2 + mu))) Y <- rbinom(N,1,p) data2 <- data.frame(Y=Y,X1=X1,X2=X2,X3=X3) # predict on the new data using "best" number of trees # f.predict will be on the canonical scale (logit,log,etc.) f.predict <- predict.gbm(gbm1,data2, n.trees=c(best.iter.oob,best.iter.cv,best.iter.test)) # transform to probability scale for logistic regression p.pred <- 1/(1+exp(-f.predict)) # calibration plot for logistic regression - well calibrated means a 45 degree line par(mfrow=c(1,1)) calibrate.plot(Y,p.pred[,3]) # logistic error sum(data2$Y*f.predict[,1] - log(1+exp(f.predict[,1]))) sum(data2$Y*f.predict[,2] - log(1+exp(f.predict[,2]))) sum(data2$Y*f.predict[,3] - log(1+exp(f.predict[,3]))) gbm/demo/OOB-reps.R0000644000176200001440000004661112134211007013453 0ustar liggesusersset.seed(06182001) # number of replicates n.reps <- 20 # should data be loaded from the web? If FALSE use alt.path load.from.web <- TRUE run.all <- TRUE # if data not downloaded from the web, give path to datasets alt.path <- "" n.datasets <- 12 # needs to match the number of datasets i.data <- 0 squared.error.loss <- function(y,f.x) { mean((y-f.x)^2) } bernoulli.loglikelihood <- function(y,f.x) { mean(y*f.x - log(1+exp(f.x))) } if(run.all) { dataset <- vector("list",n.datasets) # abalone i.data <- i.data + 1 dataset[[i.data]] <- list(name="Abalone", distribution="gaussian", urlpath="http://ftp.ics.uci.edu/pub/machine-learning-databases/abalone/", filename="abalone.data", var.names=c("sex","length","diameter","height","whole.weight", "shucked.weight","viscera.weight","shell.weight", "Rings"), outcome="Rings", factors="sex", na.strings="", sep=",", shrinkage=0.02) # Adult i.data <- i.data + 1 dataset[[i.data]] <- list(name="Adult", distribution="bernoulli", urlpath="http://ftp.ics.uci.edu/pub/machine-learning-databases/adult/", filename="adult.data", var.names=c("age","workclass","w","education","education.num", "marital.status","occupation","relationship","race", "male","capital.gain","capital.loss", "hours.per.week","native.country","income"), outcome="income", factors=c("workclass","education","marital.status","occupation", "relationship","race","native.country","male"), na.strings="?", sep=",", shrinkage=0.04) # Housing i.data <- i.data + 1 dataset[[i.data]] <- list(name="Boston housing", distribution="gaussian", urlpath="http://ftp.ics.uci.edu/pub/machine-learning-databases/housing/", filename="housing.data", var.names=c("CRIM","ZN","INDUS","CHAS","NOX","RM","AGE", "DIS","RAD","TAX","PTRATIO","B","LSTAT","MEDV"), factors=NULL, outcome="MEDV", na.strings="", sep="", shrinkage=0.005) # mushrooms i.data <- i.data + 1 dataset[[i.data]] <- list(name="Mushrooms", distribution="bernoulli", urlpath="http://ftp.ics.uci.edu/pub/machine-learning-databases/mushroom/", filename="agaricus-lepiota.data", var.names=c("poisonous","cap-shape","cap-surface","cap-color", "bruises","odor","gill-attachment", "gill-spacing","gill-size","gill-color", "stalk-shape","stalk-root","stalk-surface-above-ring", "stalk-surface-below-ring","stalk-color-above-ring", "stalk-color-below-ring","veil-type","veil-color", "ring-number","ring-type","spore-print-color", "population","habitat"), factors=c("cap-shape","cap-surface","cap-color", "bruises","odor","gill-attachment", "gill-spacing","gill-size","gill-color", "stalk-shape","stalk-root","stalk-surface-above-ring", "stalk-surface-below-ring","stalk-color-above-ring", "stalk-color-below-ring","veil-type","veil-color", "ring-number","ring-type","spore-print-color", "population","habitat"), outcome="poisonous", drop.vars=c("veil-type"), na.strings="?", sep=",", shrinkage=0.05) # autoprices 1 i.data <- i.data + 1 dataset[[i.data]] <- list(name="Auto Prices", distribution="gaussian", urlpath="http://ftp.ics.uci.edu/pub/machine-learning-databases/autos/", filename="imports-85.data", var.names=c("symboling","normalizedlosses","make","fueltype", "aspiration","ndoors","bodystyle", "drivewheels","enginelocation", "wheelbase", "length", "width", "height", "curbweight", "enginetype", "numerofcylinders", "enginesize", "fuelsystem", "bore", "stroke", "compressionratio", "horsepower", "peakrpm", "citympg", "highwatmpg", "price"), factors=c("symboling","make","fueltype","aspiration","ndoors", "bodystyle","drivewheels","enginelocation", "enginetype", "numerofcylinders", "fuelsystem"), outcome="price", na.strings="?", sep=",", shrinkage=0.002) # auto MPG i.data <- i.data + 1 dataset[[i.data]] <- list(name="Auto MPG", distribution="gaussian", urlpath="http://ftp.ics.uci.edu/pub/machine-learning-databases/auto-mpg/", filename="auto-mpg.data", var.names=c("mpg","cylinders","displacement","horsepower","weight", "acceleration","modelyear","origin","carname"), factors=c("cylinders", "modelyear", "origin"), outcome="mpg", drop.vars=c("carname"), na.strings="?", sep="", shrinkage=0.005) # CPU i.data <- i.data + 1 dataset[[i.data]] <- list(name="CPU Performance", distribution="gaussian", urlpath="http://ftp.ics.uci.edu/pub/machine-learning-databases/cpu-performance/", filename="machine.data", var.names=c("vendorname","modelname","myct","mmin","mmax", "cach","chmin","chmax","prp","ERP"), factors=c("vendorname","modelname"), outcome="prp", na.strings="", drop.vars=c("vendorname","modelname"), sep=",", shrinkage=0.01) # credit i.data <- i.data + 1 dataset[[i.data]] <- list(name="Credit rating", distribution="bernoulli", urlpath="http://ftp.ics.uci.edu/pub/machine-learning-databases/credit-screening/", filename="crx.data", var.names=c("A1","A2","A3","A4","A5","A6","A7","A8","A9","A10","A11", "A12", "A13", "A14", "A15","CLASS"), factors=c("A1","A4", "A5", "A6", "A7", "A9", "A10", "A12", "A13","CLASS"), outcome="CLASS", na.strings="?", sep=",", shrinkage=0.005) # Haberman i.data <- i.data + 1 dataset[[i.data]] <- list(name="Haberman", distribution="bernoulli", urlpath="http://ftp.ics.uci.edu/pub/machine-learning-databases/haberman/", filename="haberman.data", var.names=c("age","year","nodes","CLASS"), outcome="CLASS", factors=c("CLASS"), na.strings="", sep=",", shrinkage=0.001) # Diabetes i.data <- i.data + 1 dataset[[i.data]] <- list(name="Diabetes", distribution="bernoulli", urlpath="http://ftp.ics.uci.edu/pub/machine-learning-databases/pima-indians-diabetes/", filename="pima-indians-diabetes.data", var.names=c("n_preg","plasma","blood-pre","triceps","serum", "mass-index","pedigree","age","CLASS"), factors=c("CLASS"), outcome="CLASS", na.strings="?", sep=",", shrinkage=0.005) # Ionosphere i.data <- i.data + 1 dataset[[i.data]] <- list(name="Ionosphere", distribution="bernoulli", urlpath="http://ftp.ics.uci.edu/pub/machine-learning-databases/ionosphere/", filename="ionosphere.data", var.names=c("A1","A2","A3","A4","A5","A6","A7","A8","A9","A10","A11", "A12","A13","A14","A15","A16","A17","A18","A19","A20", "A21","A22","A23","A24","A25","A26","A27","A28","A29", "A30","A31","A32","A33","A34","CLASS"), factors=c("CLASS"), outcome="CLASS", na.strings="", sep=",", shrinkage=0.005) # Breast cancer i.data <- i.data + 1 dataset[[i.data]] <- list(name="breast cancer", distribution="bernoulli", urlpath="http://ftp.ics.uci.edu/pub/machine-learning-databases/breast-cancer-wisconsin/", filename="breast-cancer-wisconsin.data", var.names=c("CODE","thickness","cellsize","cellshape","adhension", "singleecell","bnuclei","chromatin","nnucleo","mitoses", "CLASS"), factors=c("CODE","CLASS"), outcome="CLASS", drop.vars=c("CODE"), na.strings="?", sep=",", shrinkage=0.005) if(FALSE) # this dataset is not public, can substitute other datasets { # time in treatment i.data <- i.data + 1 dataset[[i.data]] <- list(name="time in treatment", distribution="gaussian", urlpath="./", filename="txdet.csv", var.names=NULL, factors=c("b1","xsite4","b3new","b8new","s1a1new","m3dnew","e1new","e13anew"), outcome="txdet", drop.vars=c("xpid","xobs","maxcefu","recovfu","nontxdet","s7e5","r2f", "r3a9","e4a6","l5p","v2.4","v2.7","v2.8"), na.strings="NA", sep=",", shrinkage=0.0022) } # Load datasets for(i.data in 1:n.datasets) # for(i.data in which(sapply(dataset,function(x){is.null(x$oob.iter)}))) { # Progress cat("Dataset ",i.data,":",dataset[[i.data]]$name," N = ") filename <- paste(switch(load.from.web+1, alt.path, dataset[[i.data]]$url), dataset[[i.data]]$filename, sep="") dataset[[i.data]]$data <- read.table(file=filename, na.strings=dataset[[i.data]]$na.strings, sep=dataset[[i.data]]$sep, header=is.null(dataset[[i.data]]$var.names)) if(!is.null(dataset[[i.data]]$var.names)) { names(dataset[[i.data]]$data) <- dataset[[i.data]]$var.names } # take care of nominal predictors for(j in dataset[[i.data]]$factors) { dataset[[i.data]]$data[,j] <- factor(dataset[[i.data]]$data[,j]) } # take care of factor binary outcomes if( with(dataset[[i.data]], (distribution=="bernoulli") && is.factor(data[,outcome])) ) { dataset[[i.data]]$data[,dataset[[i.data]]$outcome] <- with(dataset[[i.data]], as.numeric(data[,outcome])-1) } # drop observations with missing outcomes i <- with(dataset[[i.data]], !is.na(data[,outcome])) dataset[[i.data]]$data <- dataset[[i.data]]$data[i,] # drop selected predictor variables if(!is.null(dataset[[i.data]]$drop.vars)) { j <- match(dataset[[i.data]]$drop.vars,names(dataset[[i.data]]$data)) dataset[[i.data]]$data <- dataset[[i.data]]$data[,-j] } dataset[[i.data]]$loss <- switch(dataset[[i.data]]$distribution, gaussian=squared.error.loss, bernoulli=bernoulli.loglikelihood) cat(nrow(dataset[[i.data]]$data),"\n") } save(dataset,file="dataset.RData") } # run.all # make sure gbm is installed if(!is.element("gbm",installed.packages()[,1])) { stop("The gbm package is not installed. Use install.packages(\"gbm\") to install. On Unix machines this must be executed in an R session started as root or installed to a local library, see help(install.packages)") } library(gbm) # loop over all the datasets i.datasets <- which(sapply(dataset,function(x){is.null(x$oob.loss)})) for(i.data in i.datasets) # for(i.data in which(sapply(dataset,function(x){is.null(x$oob.iter)}))) { N <- nrow(dataset[[i.data]]$data) # Progress cat("Dataset ",i.data,":",dataset[[i.data]]$name," N = ",N,"\n",sep="") # construct model formula for this dataset formula.fit <- formula(paste(dataset[[i.data]]$outcome,"~ .")) # initialize prediction pred.oob <- pred.base <- pred.test33 <- pred.test20 <- pred.cv5 <- rep(0,N) # track iteration estimates dataset[[i.data]]$oob.iter <- rep(NA,n.reps) dataset[[i.data]]$test33.iter <- rep(NA,n.reps) dataset[[i.data]]$test20.iter <- rep(NA,n.reps) dataset[[i.data]]$cv5.iter <- rep(NA,n.reps) # do replicates for(i.rep in 1:n.reps) { cat("rep:",i.rep,"") i.train <- sample(1:N,size=0.75*N,replace=FALSE) i.valid <- (1:N)[-i.train] # use out-of-bag method cat("OOB, ") gbm1 <- gbm(formula.fit, data=dataset[[i.data]]$data[i.train,], distribution=dataset[[i.data]]$distribution, train.fraction=1.0, bag.fraction=0.5, shrinkage=dataset[[i.data]]$shrinkage, n.trees=1000, verbose = FALSE) best.iter.oob <- gbm.perf(gbm1,method="OOB",plot.it=FALSE) while((gbm1$n.trees-best.iter.oob < 1000) && !all(gbm1$oobag.improve[(gbm1$n.trees-100):gbm1$n.trees] < 1e-6)) { gbm1 <- gbm.more(gbm1,1000) best.iter.oob <- gbm.perf(gbm1,method="OOB",plot.it=FALSE) } pred.oob[i.valid] <- predict(gbm1, newdata=dataset[[i.data]]$data[i.valid,], n.trees=best.iter.oob) dataset[[i.data]]$oob.iter[i.rep] <- best.iter.oob # use a 1/3 test set cat("33% test data, ") gbm1 <- gbm(formula.fit, data=dataset[[i.data]]$data[i.train,], distribution=dataset[[i.data]]$distribution, train.fraction=2/3, bag.fraction=0.5, shrinkage=dataset[[i.data]]$shrinkage, n.trees=1000, verbose = FALSE) best.iter.test <- gbm.perf(gbm1,method="test",plot.it=FALSE) while((gbm1$n.trees-best.iter.test < 1000) && !all(abs(gbm1$valid.error[(gbm1$n.trees-100):gbm1$n.trees]) < 1e-6)) { gbm1 <- gbm.more(gbm1,1000) best.iter.test <- gbm.perf(gbm1,method="test",plot.it=FALSE) } pred.test33[i.valid] <- predict(gbm1, newdata=dataset[[i.data]]$data[i.valid,], n.trees=best.iter.test) dataset[[i.data]]$test33.iter[i.rep] <- best.iter.test # use a 20% test set cat("20% test data, ") gbm1 <- gbm(formula.fit, data=dataset[[i.data]]$data[i.train,], distribution=dataset[[i.data]]$distribution, train.fraction=0.80, bag.fraction=0.5, shrinkage=dataset[[i.data]]$shrinkage, n.trees=1000, verbose = FALSE) best.iter.test <- gbm.perf(gbm1,method="test",plot.it=FALSE) while((gbm1$n.trees-best.iter.test < 1000) && !all(abs(gbm1$valid.error[(gbm1$n.trees-100):gbm1$n.trees]) < 1e-6)) { gbm1 <- gbm.more(gbm1,1000) best.iter.test <- gbm.perf(gbm1,method="test",plot.it=FALSE) } pred.test20[i.valid] <- predict(gbm1, newdata=dataset[[i.data]]$data[i.valid,], n.trees=best.iter.test) dataset[[i.data]]$test20.iter[i.rep] <- best.iter.test # use 5-fold cross-validation cat("5-fold CV") n.cv <- 5 cv.group <- sample(rep(1:n.cv,length=length(i.train))) max.iters <- round(best.iter.test*1.2) cv.loss <- matrix(0,ncol=n.cv,nrow=max.iters) for(i.cv in 1:n.cv) { cat(".") i <- order(cv.group==i.cv) # used to put the held out obs last gbm1 <- gbm(formula.fit, data=dataset[[i.data]]$data[i.train[i],], distribution=dataset[[i.data]]$distribution, train.fraction=mean(cv.group!=i.cv), bag.fraction=0.5, shrinkage=dataset[[i.data]]$shrinkage, n.trees=max.iters, verbose = FALSE) cv.loss[,i.cv] <- gbm1$valid.error } cat("\n") best.iter.cv <- which.min(apply(cv.loss,1,weighted.mean,w=table(cv.group))) gbm1 <- gbm(formula.fit, data=dataset[[i.data]]$data[i.train,], distribution=dataset[[i.data]]$distribution, train.fraction=1.0, bag.fraction=0.5, shrinkage=dataset[[i.data]]$shrinkage, n.trees=best.iter.cv, verbose = FALSE) pred.cv5[i.valid] <- predict(gbm1, newdata=dataset[[i.data]]$data[i.valid,], n.trees=best.iter.cv) dataset[[i.data]]$cv5.iter[i.rep] <- best.iter.cv # baseline prediction pred.base[i.valid] <- gbm1$initF # evalute the methods dataset[[i.data]]$base.loss[i.rep] <- with(dataset[[i.data]], loss(data[i.valid,outcome],pred.base[i.valid])) dataset[[i.data]]$oob.loss[i.rep] <- with(dataset[[i.data]], loss(data[i.valid,outcome],pred.oob[i.valid])) dataset[[i.data]]$test33.loss[i.rep] <- with(dataset[[i.data]], loss(data[i.valid,outcome],pred.test33[i.valid])) dataset[[i.data]]$test20.loss[i.rep] <- with(dataset[[i.data]], loss(data[i.valid,outcome],pred.test20[i.valid])) dataset[[i.data]]$cv5.loss[i.rep] <- with(dataset[[i.data]], loss(data[i.valid,outcome],pred.cv5[i.valid])) with(dataset[[i.data]], cat(oob.iter[i.rep],test33.iter[i.rep],test20.iter[i.rep], cv5.iter[i.rep],"\n")) } save.image(compress=TRUE) } #rm(dataset) save.image(compress=TRUE) results <- data.frame(problem=sapply(dataset,function(x){x$name}), N=sapply(dataset,function(x){nrow(x$data)}), d=sapply(dataset,function(x){ncol(x$data)-1}), loss=sapply(dataset,function(x){x$distribution}), base=sapply(dataset,function(x){mean(x$base.loss)}), oob=sapply(dataset,function(x){mean(x$oob.loss)}), test33=sapply(dataset,function(x){mean(x$test33.loss)}), test20=sapply(dataset,function(x){mean(x$test20.loss)}), cv5=sapply(dataset,function(x){mean(x$cv5.loss)})) j <- match(c("base","oob","test33","test20","cv5"),names(results)) results[results$loss=="bernoulli",j] <- -2*results[results$loss=="bernoulli",j] results$win <- c("base","oob","test33","test20","cv5")[apply(results[,j],1,which.min)] results$oob.rank <- apply(results[,j],1,rank)[2,] results$perf <- (results$base-results$oob)/apply(results$base-results[,j],1,max) plot(0,0,ylim=c(0,14000),xlim=c(0,n.datasets+1), xlab="Dataset",ylab="Number of iterations", type="n",axes=FALSE) lines(sapply(dataset,function(x){mean(x$oob.iter)}), col="blue") lines(sapply(dataset,function(x){mean(x$test33.iter)}), col="red") lines(sapply(dataset,function(x){mean(x$test20.iter)}), col="green") lines(sapply(dataset,function(x){mean(x$cv5.iter)}), col="purple") axis(1,at=1:n.datasets,labels=as.character(results$problem)) gbm/demo/00Index0000644000176200001440000000104712134211007013066 0ustar liggesusersgaussian example of boosting for least squares, aka LSBoost bernoulli example of boosting for logistic regression, aka LogitBoost coxph example of boosting the Cox propotional hazards model OOB-reps experiment testing the predictive performance of the OOB estimator multinomial example of multiclass prediction printExamples simple examples used to test gbm updates robustReg comparison of gaussian and t-distribution pairwise comparison of gaussian and pairwise distributions (LambdaMART) for ranking gbm/R/0000755000176200001440000000000012143232747011225 5ustar liggesusersgbm/R/test.gbm.R0000644000176200001440000002431712134211007013064 0ustar liggesuserstest.gbm <- function(){ # Based on example in R package # Gaussian example ############################################################################ ## test Gaussian distribution gbm model set.seed(1) cat("Running least squares regression example.\n") # create some data N <- 1000 X1 <- runif(N) X2 <- 2*runif(N) X3 <- factor(sample(letters[1:4],N,replace=T)) X4 <- ordered(sample(letters[1:6],N,replace=T)) X5 <- factor(sample(letters[1:3],N,replace=T)) X6 <- 3*runif(N) mu <- c(-1,0,1,2)[as.numeric(X3)] SNR <- 10 # signal-to-noise ratio Y <- X1**1.5 + 2 * (X2**.5) + mu sigma <- sqrt(var(Y)/SNR) Y <- Y + rnorm(N,0,sigma) # create a bunch of missing values X1[sample(1:N,size=100)] <- NA X3[sample(1:N,size=300)] <- NA w <- rep(1,N) data <- data.frame(Y=Y,X1=X1,X2=X2,X3=X3,X4=X4,X5=X5,X6=X6) # fit initial model gbm1 <- gbm(Y~X1+X2+X3+X4+X5+X6, # formula data=data, # dataset var.monotone=c(0,0,0,0,0,0), # -1: monotone decrease, +1: monotone increase, 0: no monotone restrictions distribution="gaussian", # bernoulli, adaboost, gaussian, poisson, coxph, or # list(name="quantile",alpha=0.05) for quantile regression n.trees=2000, # number of trees shrinkage=0.005, # shrinkage or learning rate, 0.001 to 0.1 usually work interaction.depth=3, # 1: additive model, 2: two-way interactions, etc bag.fraction = 0.5, # subsampling fraction, 0.5 is probably best train.fraction = 0.5, # fraction of data for training, first train.fraction*N used for training n.minobsinnode = 10, # minimum number of obs needed in each node keep.data=TRUE, cv.folds=10) # do 10-fold cross-validation # Get best model best.iter <- gbm.perf(gbm1,method="cv", plot.it=FALSE) # returns cv estimate of best number of trees set.seed(2) # make some new data N <- 1000 X1 <- runif(N) X2 <- 2*runif(N) X3 <- factor(sample(letters[1:4],N,replace=TRUE)) X4 <- ordered(sample(letters[1:6],N,replace=TRUE)) X5 <- factor(sample(letters[1:3],N,replace=TRUE)) X6 <- 3*runif(N) mu <- c(-1,0,1,2)[as.numeric(X3)] # Actual underlying signal Y <- X1**1.5 + 2 * (X2**.5) + mu # Want to see how close predictions are to the underlying signal; noise would just interfere with this # Y <- Y + rnorm(N,0,sigma) data2 <- data.frame(Y=Y,X1=X1,X2=X2,X3=X3,X4=X4,X5=X5,X6=X6) # predict on the new data using "best" number of trees f.predict <- predict(gbm1,data2,best.iter) # f.predict will be on the canonical scale (logit,log,etc.) # Base the validation tests on observed discrepancies checkTrue(abs(mean(data2$Y-f.predict)) < 0.01, msg="Gaussian absolute error within tolerance") checkTrue(sd(data2$Y-f.predict) < sigma , msg="Gaussian squared erroor within tolerance") ############################################################################ ## test coxph distribution gbm model ## COX PROPORTIONAL HAZARDS REGRESSION EXAMPLE cat("Running cox proportional hazards regression example.\n") # create some data set.seed(1) N <- 3000 X1 <- runif(N) X2 <- runif(N) X3 <- factor(sample(letters[1:4],N,replace=T)) mu <- c(-1,0,1,2)[as.numeric(X3)] f <- 0.5*sin(3*X1 + 5*X2^2 + mu/10) tt.surv <- rexp(N,exp(f)) tt.cens <- rexp(N,0.5) delta <- as.numeric(tt.surv <= tt.cens) tt <- apply(cbind(tt.surv,tt.cens),1,min) # throw in some missing values X1[sample(1:N,size=100)] <- NA X3[sample(1:N,size=300)] <- NA # random weights if you want to experiment with them w <- rep(1,N) data <- data.frame(tt=tt,delta=delta,X1=X1,X2=X2,X3=X3) # fit initial model gbm1 <- gbm(Surv(tt,delta)~X1+X2+X3, # formula data=data, # dataset weights=w, var.monotone=c(0,0,0), # -1: monotone decrease, +1: monotone increase, 0: no monotone restrictions distribution="coxph", n.trees=3000, # number of trees shrinkage=0.001, # shrinkage or learning rate, 0.001 to 0.1 usually work interaction.depth=3, # 1: additive model, 2: two-way interactions, etc bag.fraction = 0.5, # subsampling fraction, 0.5 is probably best train.fraction = 0.5, # fraction of data for training, first train.fraction*N used for training cv.folds = 5, # do 5-fold cross-validation n.minobsinnode = 10, # minimum total weight needed in each node keep.data = TRUE) best.iter <- gbm.perf(gbm1,method="test", plot.it=FALSE) # returns test set estimate of best number of trees # make some new data set.seed(2) N <- 1000 X1 <- runif(N) X2 <- runif(N) X3 <- factor(sample(letters[1:4],N,replace=T)) mu <- c(-1,0,1,2)[as.numeric(X3)] f <- 0.5*sin(3*X1 + 5*X2^2 + mu/10) # -0.5 <= f <= 0.5 via sin fn. tt.surv <- rexp(N,exp(f)) tt.cens <- rexp(N,0.5) data2 <- data.frame(tt=apply(cbind(tt.surv,tt.cens),1,min), delta=as.numeric(tt.surv <= tt.cens), f=f, X1=X1,X2=X2,X3=X3) # predict on the new data using "best" number of trees # f.predict will be on the canonical scale (logit,log,etc.) f.predict <- predict(gbm1,data2,best.iter) #plot(data2$f,f.predict) # Use observed sd checkTrue(sd(data2$f - f.predict) < 0.4, msg="Coxph: squared error within tolerance") ############################################################################ ## Test bernoulli distribution gbm model set.seed(1) cat("Running logistic regression example.\n") # create some data N <- 1000 X1 <- runif(N) X2 <- runif(N) X3 <- factor(sample(letters[1:4],N,replace=T)) mu <- c(-1,0,1,2)[as.numeric(X3)] p <- 1/(1+exp(-(sin(3*X1) - 4*X2 + mu))) Y <- rbinom(N,1,p) # random weights if you want to experiment with them w <- rexp(N) w <- N*w/sum(w) data <- data.frame(Y=Y,X1=X1,X2=X2,X3=X3) # fit initial model gbm1 <- gbm(Y~X1+X2+X3, # formula data=data, # dataset weights=w, var.monotone=c(0,0,0), # -1: monotone decrease, +1: monotone increase, 0: no monotone restrictions distribution="bernoulli", n.trees=3000, # number of trees shrinkage=0.001, # shrinkage or learning rate, 0.001 to 0.1 usually work interaction.depth=3, # 1: additive model, 2: two-way interactions, etc bag.fraction = 0.5, # subsampling fraction, 0.5 is probably best train.fraction = 0.5, # fraction of data for training, first train.fraction*N used for training cv.folds=5, # do 5-fold cross-validation n.minobsinnode = 10) # minimum total weight needed in each node best.iter.test <- gbm.perf(gbm1,method="test", plot.it=FALSE) # returns test set estimate of best number of trees best.iter <- best.iter.test # make some new data set.seed(2) N <- 1000 X1 <- runif(N) X2 <- runif(N) X3 <- factor(sample(letters[1:4],N,replace=T)) mu <- c(-1,0,1,2)[as.numeric(X3)] p <- 1/(1+exp(-(sin(3*X1) - 4*X2 + mu))) Y <- rbinom(N,1,p) data2 <- data.frame(Y=Y,X1=X1,X2=X2,X3=X3) # predict on the new data using "best" number of trees # f.predict will be on the canonical scale (logit,log,etc.) f.1.predict <- predict.gbm(gbm1,data2, n.trees=best.iter.test) # compute quantity prior to transformation f.new = sin(3*X1) - 4*X2 + mu # Base the validation tests on observed discrepancies checkTrue(sd(f.new - f.1.predict) < 1.0 ) invisible() } ################################################################################ ########################### test.relative.influence() ########################## ########################### ########################## test.relative.influence <- function(){ # Test that relative.influence really does pick out the true predictors set.seed(1234) X1 <- matrix(nrow=1000, ncol=50) X1 <- apply(X1, 2, function(x) rnorm(1000)) # Random noise X2 <- matrix(nrow=1000, ncol=5) X2 <- apply(X2, 2, function(x) c(rnorm(500), rnorm(500, 3))) # Real predictors cls <- rep(c(0, 1), ea=500) # Class X <- data.frame(cbind(X1, X2, cls)) mod <- gbm(cls ~ ., data= X, n.trees=1000, cv.folds=5, shrinkage=.01, interaction.depth=2) ri <- rev(sort(relative.influence(mod))) wh <- names(ri)[1:5] res <- sum(wh %in% paste("V", 51:55, sep = "")) checkEqualsNumeric(res, 5, msg="Testing relative.influence identifies true predictors") } ################################################################################ ################################ validate.gbm() ################################ ################################ ################################ validate.gbm <- function () { check <- "package:RUnit" %in% search() if (!check) { check <- try(library(RUnit)) if (class(check) == "try-error") { stop("You need to attach the RUnit package to validate gbm") } } wh <- (1:length(search()))[search() == "package:gbm"] tests <- objects(wh)[substring(objects(wh), 1, 5) == "test."] # Create temporary directory to put tests into if (.Platform$OS.type == "windows"){ sep <- "\\" } else { sep <- "/" } dir <- file.path(tempdir(), "gbm.tests", fsep = sep) dir.create(dir) for (i in 1:length(tests)) { str <- paste(dir, sep, tests[i], ".R", sep = "") dump(tests[i], file = str) } res <- defineTestSuite("gbm", dirs = dir, testFuncRegexp = "^test.+", testFileRegexp = "*.R") cat("Running gbm test suite.\nThis will take some time...\n\n") res <- runTestSuite(res) res } gbm/R/shrink.gbm.pred.R0000644000176200001440000000352212102666411014337 0ustar liggesusersshrink.gbm.pred <- function(object,newdata,n.trees, lambda=rep(1,length(object$var.names)), ...) { if(length(lambda) != length(object$var.names)) { stop("lambda must have the same length as the number of variables in the gbm object.") } if(!is.null(object$Terms)) { x <- model.frame(delete.response(object$Terms), newdata, na.action=na.pass) } else { x <- newdata } cRows <- nrow(x) cCols <- ncol(x) for(i in 1:cCols) { if(is.factor(x[,i])) { j <- match(levels(x[,i]), object$var.levels[[i]]) if(any(is.na(j))) { stop(paste("New levels for variable ", object$var.names[i],": ", levels(x[,i])[is.na(j)],sep="")) } x[,i] <- as.numeric(x[,i])-1 } } x <- as.vector(unlist(x)) if(missing(n.trees) || any(n.trees > object$n.trees)) { n.trees <- n.trees[n.trees<=object$n.trees] if(length(n.trees)==0) n.trees <- object$n.trees warning("n.trees not specified or some values exceeded number fit so far. Using ",n.trees,".") } # sort n.trees so that predictions are easier to generate and store n.trees <- sort(n.trees) predF <- .Call("gbm_shrink_pred", X=as.double(x), cRows=as.integer(cRows), cCols=as.integer(cCols), n.trees=as.integer(n.trees), initF=object$initF, trees=object$trees, c.split=object$c.split, var.type=as.integer(object$var.type), depth=as.integer(object$interaction.depth), lambda=as.double(lambda), PACKAGE = "gbm") return(predF) } gbm/R/shrink.gbm.R0000644000176200001440000000265712102666411013416 0ustar liggesusers# evaluates the objective function and gradient with respect to beta # beta = log(lambda/(1-lambda)) shrink.gbm <- function(object,n.trees, lambda=rep(10,length(object$var.names)), ...) { if(length(lambda) != length(object$var.names)) { stop("lambda must have the same length as the number of variables in the gbm object.") } if(is.null(object$data)) { stop("shrink.gbm requires keep.data=TRUE when gbm model is fit.") } y <- object$data$y x <- object$data$x cCols <- length(object$var.names) cRows <- length(x)/cCols if(missing(n.trees) || (n.trees > object$n.trees)) { n.trees <- object$n.trees warning("n.trees not specified or some values exceeded number fit so far. Using ",n.trees,".") } result <- .Call("gbm_shrink_gradient", y=as.double(y), X=as.double(x), cRows=as.integer(cRows), cCols=as.integer(cCols), n.trees=as.integer(n.trees), initF=object$initF, trees=object$trees, c.split=object$c.split, var.type=as.integer(object$var.type), depth=as.integer(object$interaction.depth), lambda=as.double(lambda), PACKAGE = "gbm") names(result) <- c("predF","objective","gradient") return(result) } gbm/R/relative.influence.R0000644000176200001440000000302112134211007015110 0ustar liggesusersrelative.influence <- function(object, n.trees, scale. = FALSE, sort. = FALSE ) { if( missing( n.trees ) ){ if ( object$train.fraction < 1 ){ n.trees <- gbm.perf( object, method="test", plot.it=FALSE ) } else if ( !is.null( object$cv.error ) ){ n.trees <- gbm.perf( object, method="cv", plot.it = FALSE ) } else{ # If dist=multinomial, object$n.trees = n.trees * num.classes # so use the following instead. n.trees <- length( object$train.error ) } cat( paste( "n.trees not given. Using", n.trees, "trees.\n" ) ) if (object$distribution == "multinomial"){ n.trees <- n.trees * object$num.classes } } get.rel.inf <- function(obj) { lapply(split(obj[[6]],obj[[1]]),sum) # 6 - Improvement, 1 - var name } temp <- unlist(lapply(object$trees[1:n.trees],get.rel.inf)) rel.inf.compact <- unlist(lapply(split(temp,names(temp)),sum)) rel.inf.compact <- rel.inf.compact[names(rel.inf.compact)!="-1"] # rel.inf.compact excludes those variable that never entered the model # insert 0's for the excluded variables rel.inf <- rep(0,length(object$var.names)) i <- as.numeric(names(rel.inf.compact))+1 rel.inf[i] <- rel.inf.compact names(rel.inf) <- object$var.names if (scale.){ rel.inf <- rel.inf / max(rel.inf) } if (sort.){ rel.inf <- rev(sort(rel.inf)) } return(rel.inf=rel.inf) } gbm/R/reconstructGBMdata.R0000644000176200001440000000240612134234270015076 0ustar liggesusersreconstructGBMdata <- function(x) { if(class(x) != "gbm") { stop( "This function is for use only with objects having class 'gbm'" ) } else if (is.null(x$data)) { stop("Cannot reconstruct data from gbm object. gbm() was called with keep.data=FALSE") } else if (x$distribution$name=="multinomial") { y <- matrix(x$data$y, ncol=x$num.classes, byrow=FALSE) yn <- apply(y, 1, function(z,nc) {(1:nc)[z == 1]}, nc = x$num.classes) y <- factor(yn, labels=x$classes) xdat <- matrix(x$data$x, ncol=ncol(x$data$x.order), byrow=FALSE) d <- data.frame(y, xdat) names(d) <- c(x$response.name, x$var.names) } else if (x$distribution$name == "coxph") { xdat <- matrix(x$data$x, ncol=ncol(x$data$x.order), byrow=FALSE) status <- x$data$Misc y <- x$data$y[order(x$data$i.timeorder)] d <- data.frame(y, status, xdat) names(d) <- c(x$response.name[-1], colnames(x$data$x.order)) } else { y <- x$data$y xdat <- matrix(x$data$x, ncol=ncol(x$data$x.order), byrow=FALSE) d <- data.frame(y, xdat) rn <- ifelse(length(x$response.name) > 1, x$response.name[2], x$response.name) names(d) <- c(rn, colnames(x$data$x.order)) } invisible(d) } gbm/R/print.gbm.R0000644000176200001440000000505712131267445013257 0ustar liggesusers# print, show and summary functions for gbm print.gbm <- function(x, ... ) { if (!is.null(x$call)){ print(x$call) } dist.name <- x$distribution$name if (dist.name == "pairwise") { if (!is.null(x$distribution$max.rank) && x$distribution$max.rank > 0) { dist.name <- sprintf("pairwise (metric=%s, max.rank=%d)", x$distribution$metric, x$distribution$max.rank) } else { dist.name <- sprintf("pairwise (metric=%s)", x$distribution$metric) } } cat( paste( "A gradient boosted model with", dist.name, "loss function.\n" )) cat( paste( length( x$train.error ), "iterations were performed.\n" ) ) best <- length( x$train.error ) if ( !is.null( x$cv.error ) ) { best <- gbm.perf( x, plot.it = FALSE, method="cv" ) cat( paste("The best cross-validation iteration was ", best, ".\n", sep = "" ) ) } if ( x$train.fraction < 1 ) { best <- gbm.perf( x, plot.it = FALSE, method="test" ) cat( paste("The best test-set iteration was ", best, ".\n", sep = "" ) ) } if ( is.null( best ) ) { best <- length( x$train.error ) } ri <- relative.influence( x, n.trees=best ) cat( "There were", length( x$var.names ), "predictors of which", sum( ri > 0 ), "had non-zero influence.\n" ) invisible() } show.gbm <- print.gbm summary.gbm <- function(object, cBars=length(object$var.names), n.trees=object$n.trees, plotit=TRUE, order=TRUE, method=relative.influence, normalize=TRUE, ...) { if(n.trees < 1) { stop("n.trees must be greater than 0.") } if(n.trees > object$n.trees) { warning("Exceeded total number of GBM terms. Results use n.trees=",object$n.trees," terms.\n") n.trees <- object$n.trees } rel.inf <- method(object,n.trees) rel.inf[rel.inf<0] <- 0 if(order) { i <- order(-rel.inf) } else { i <- 1:length(rel.inf) } if(cBars==0) cBars <- min(10,length(object$var.names)) if(cBars>length(object$var.names)) cBars <- length(object$var.names) if(normalize) rel.inf <- 100*rel.inf/sum(rel.inf) if(plotit) { barplot(rel.inf[i[cBars:1]], horiz=TRUE, col=rainbow(cBars,start=3/6,end=4/6), names=object$var.names[i[cBars:1]], xlab="Relative influence",...) } return(data.frame(var=object$var.names[i], rel.inf=rel.inf[i])) } gbm/R/pretty.gbm.tree.R0000644000176200001440000000075012102666411014375 0ustar liggesuserspretty.gbm.tree <- function(object,i.tree=1) { if((i.tree<1) || (i.tree>length(object$trees))) { stop("i.tree is out of range. Must be less than ",length(object$trees)) } else { temp <- data.frame(object$trees[[i.tree]]) names(temp) <- c("SplitVar","SplitCodePred","LeftNode", "RightNode","MissingNode","ErrorReduction", "Weight","Prediction") row.names(temp) <- 0:(nrow(temp)-1) } return(temp) } gbm/R/predict.gbm.R0000644000176200001440000000740012142655162013546 0ustar liggesuserspredict.gbm <- function(object,newdata,n.trees, type="link", single.tree = FALSE, ...) { if ( missing( newdata ) ){ newdata <- reconstructGBMdata(object) } if ( missing(n.trees) ) { if ( object$train.fraction < 1 ){ n.trees <- gbm.perf( object, method="test", plot.it = FALSE ) } else if (!is.null(object$cv.error)){ n.trees <- gbm.perf( object, method="cv", plot.it = FALSE ) } else{ best <- length( object$train.error ) } cat( paste( "Using", n.trees, "trees...\n" ) ) } if(!is.element(type, c("link","response" ))) { stop("type must be either 'link' or 'response'") } if(!is.null(object$Terms)) { x <- model.frame(terms(reformulate(object$var.names)), newdata, na.action=na.pass) } else { x <- newdata } cRows <- nrow(x) cCols <- ncol(x) for(i in 1:cCols) { if(is.factor(x[,i])) { if (length(levels(x[,i])) > length(object$var.levels[[i]])) { new.compare <- levels(x[,i])[1:length(object$var.levels[[i]])] } else { new.compare <- levels(x[,i]) } if (!identical(object$var.levels[[i]], new.compare)) { x[,i] <- factor(x[,i], union(object$var.levels[[i]], levels(x[,i]))) } x[,i] <- as.numeric(x[,i])-1 } } x <- as.vector(unlist(x, use.names=FALSE)) if(missing(n.trees) || any(n.trees > object$n.trees)) { n.trees[n.trees>object$n.trees] <- object$n.trees warning("Number of trees not specified or exceeded number fit so far. Using ",paste(n.trees,collapse=" "),".") } i.ntree.order <- order(n.trees) # Next if block for compatibility with objects created with version 1.6. if (is.null(object$num.classes)){ object$num.classes <- 1 } predF <- .Call("gbm_pred", X=as.double(x), cRows=as.integer(cRows), cCols=as.integer(cCols), cNumClasses = as.integer(object$num.classes), n.trees=as.integer(n.trees[i.ntree.order]), initF=object$initF, trees=object$trees, c.split=object$c.split, var.type=as.integer(object$var.type), single.tree = as.integer(single.tree), PACKAGE = "gbm") if((length(n.trees) > 1) || (object$num.classes > 1)) { if(object$distribution$name=="multinomial") { predF <- array(predF, dim=c(cRows,object$num.classes,length(n.trees))) dimnames(predF) <- list(NULL, object$classes, n.trees) predF[,,i.ntree.order] <- predF } else { predF <- matrix(predF, ncol=length(n.trees), byrow=FALSE) colnames(predF) <- n.trees predF[,i.ntree.order] <- predF } } if(type=="response") { if(is.element(object$distribution$name, c("bernoulli", "pairwise"))) { predF <- 1/(1+exp(-predF)) } else if(object$distribution$name=="poisson") { predF <- exp(predF) } else if (object$distribution$name == "adaboost"){ predF <- 1 / (1 + exp(-2*predF)) } if(object$distribution$name=="multinomial") { pexp <- exp(predF) psum <- apply(pexp, c(1, 3), function(x) { x / sum(x) }) # Transpose each 2d array predF <- aperm(psum, c(2, 1, 3)) } if((length(n.trees)==1) && (object$distribution$name!="multinomial")) { predF <- as.vector(predF) } } if(!is.null(attr(object$Terms,"offset"))) { warning("predict.gbm does not add the offset to the predicted values.") } return(predF) } gbm/R/plot.gbm.R0000644000176200001440000003242112131264765013076 0ustar liggesusersplot.gbm <- function(x, i.var=1, n.trees=x$n.trees, continuous.resolution=100, return.grid=FALSE, type="link", ...) { if (!is.element(type, c("link", "response"))){ stop( "type must be either 'link' or 'response'") } if(all(is.character(i.var))) { i <- match(i.var,x$var.names) if(any(is.na(i))) { stop("Plot variables not used in gbm model fit: ",i.var[is.na(i)]) } else { i.var <- i } } if((min(i.var)<1) || (max(i.var)>length(x$var.names))) { warning("i.var must be between 1 and ",length(x$var.names)) } if(n.trees > x$n.trees) { warning(paste("n.trees exceeds the number of trees in the model, ",x$n.trees, ". Plotting using ",x$n.trees," trees.",sep="")) n.trees <- x$n.trees } if(length(i.var) > 3) { warning("gbm.int.plot creates up to 3-way interaction plots.\nplot.gbm will only return the plotting data structure.") return.grid = TRUE } # generate grid to evaluate gbm model grid.levels <- vector("list",length(i.var)) for(i in 1:length(i.var)) { # continuous if(is.numeric(x$var.levels[[i.var[i]]])) { grid.levels[[i]] <- seq(min(x$var.levels[[i.var[i]]]), max(x$var.levels[[i.var[i]]]), length=continuous.resolution) } # categorical or ordered else { grid.levels[[i]] <- as.numeric(factor(x$var.levels[[i.var[i]]], levels=x$var.levels[[i.var[i]]]))-1 } } X <- expand.grid(grid.levels) names(X) <- paste("X",1:length(i.var),sep="") # Next if block for compatibility with objects created with 1.6 if (is.null(x$num.classes)){ x$num.classes <- 1 } # evaluate at each data point y <- .Call("gbm_plot", X = as.double(data.matrix(X)), cRows = as.integer(nrow(X)), cCols = as.integer(ncol(X)), n.class = as.integer(x$num.classes), i.var = as.integer(i.var-1), n.trees = as.integer(n.trees) , initF = as.double(x$initF), trees = x$trees, c.splits = x$c.splits, var.type = as.integer(x$var.type), PACKAGE = "gbm") if (x$distribution$name=="multinomial") { ## Put result into matrix form X$y <- matrix(y, ncol = x$num.classes) colnames(X$y) <- x$classes ## Use class probabilities if (type=="response"){ X$y <- exp(X$y) X$y <- X$y / matrix(rowSums(X$y), ncol=ncol(X$y), nrow=nrow(X$y)) } } else if(is.element(x$distribution$name, c("bernoulli", "pairwise")) && type=="response") { X$y <- 1/(1+exp(-y)) } else if ((x$distribution$name=="poisson") && (type=="response")){ X$y <- exp(y) } else if (type=="response"){ warning("type 'response' only implemented for 'bernoulli', 'poisson', 'multinomial', and 'pairwise'. Ignoring" ) } else { X$y <- y } # transform categorical variables back to factors f.factor <- rep(FALSE,length(i.var)) for(i in 1:length(i.var)) { if(!is.numeric(x$var.levels[[i.var[i]]])) { X[,i] <- factor(x$var.levels[[i.var[i]]][X[,i]+1], levels=x$var.levels[[i.var[i]]]) f.factor[i] <- TRUE } } if(return.grid) { names(X)[1:length(i.var)] <- x$var.names[i.var] return(X) } # create the plots if(length(i.var)==1) { if(!f.factor) { j <- order(X$X1) if (x$distribution$name == "multinomial") { if ( type == "response" ){ ylabel <- "Predicted class probability" } else { ylabel <- paste("f(",x$var.names[i.var],")",sep="") } plot(range(X$X1), range(X$y), type = "n", xlab = x$var.names[i.var], ylab = ylabel) for (ii in 1:x$num.classes){ lines(X$X1,X$y[,ii], xlab=x$var.names[i.var], ylab=paste("f(",x$var.names[i.var],")",sep=""), col = ii, ...) } } else if (is.element(x$distribution$name, c("bernoulli", "pairwise"))) { if ( type == "response" ){ ylabel <- "Predicted probability" } else { ylabel <- paste("f(",x$var.names[i.var],")",sep="") } plot( X$X1, X$y , type = "l", xlab = x$var.names[i.var], ylab=ylabel ) } else if ( x$distribution$name == "poisson" ){ if (type == "response" ){ ylabel <- "Predicted count" } else{ ylabel <- paste("f(",x$var.names[i.var],")",sep="") } plot( X$X1, X$y , type = "l", xlab = x$var.names[i.var], ylab=ylabel ) } else { plot(X$X1,X$y, type="l", xlab=x$var.names[i.var], ylab=paste("f(",x$var.names[i.var],")",sep=""),...) } } else { if (x$distribution$name == "multinomial") { nX <- length(X$X1) dim.y <- dim(X$y) if (type == "response" ){ ylabel <- "Predicted probability" } else{ ylabel <- paste("f(",x$var.names[i.var],")",sep="") } plot(c(0,nX), range(X$y), axes = FALSE, type = "n", xlab = x$var.names[i.var], ylab = ylabel) axis(side = 1, labels = FALSE, at = 0:nX) axis(side = 2) mtext(as.character(X$X1), side = 1, at = 1:nX - 0.5) segments(x1 = rep(1:nX - 0.75, each = dim.y[2]), y1 = as.vector(t(X$y)), x2 = rep(1:nX - 0.25, each = dim.y[2]), col = 1:dim.y[2]) } else if (is.element(x$distribution$name, c("bernoulli", "pairwise")) && type == "response" ){ ylabel <- "Predicted probability" plot( X$X1, X$y, type = "l", xlab=x$var.names[i.var], ylab=ylabel ) } else if ( x$distribution$name == "poisson" & type == "response" ){ ylabel <- "Predicted count" plot( X$X1, X$y, type = "l", xlab=x$var.names[i.var], ylab=ylabel ) } else { plot(X$X1,X$y, type="l", xlab=x$var.names[i.var], ylab=paste("f(",x$var.names[i.var],")",sep=""),...) } } } else if(length(i.var)==2) { if(!f.factor[1] && !f.factor[2]) { if (x$distribution$name == "multinomial") { for (ii in 1:x$num.classes){ X$temp <- X$y[, ii] print(levelplot(temp~X1*X2,data=X, xlab=x$var.names[i.var[1]], ylab=x$var.names[i.var[2]],...)) title(paste("Class:", dimnames(X$y)[[2]][ii])) } X$temp <- NULL } else { print(levelplot(y~X1*X2,data=X, xlab=x$var.names[i.var[1]], ylab=x$var.names[i.var[2]],...)) } } else if(f.factor[1] && !f.factor[2]) { if (x$distribution$name == "multinomial") { for (ii in 1:x$num.classes){ X$temp <- X$y[, ii] print( xyplot(temp~X2|X1,data=X, xlab=x$var.names[i.var[2]], ylab=paste("f(",x$var.names[i.var[1]],",",x$var.names[i.var[2]],")",sep=""), type="l", panel = panel.xyplot, ...) ) title(paste("Class:", dimnames(X$y)[[2]][ii])) } X$temp <- NULL } else { print(xyplot(y~X2|X1,data=X, xlab=x$var.names[i.var[2]], ylab=paste("f(",x$var.names[i.var[1]],",",x$var.names[i.var[2]],")",sep=""), type="l", panel = panel.xyplot, ...)) } } else if(!f.factor[1] && f.factor[2]) { if (x$distribution$name == "multinomial") { for (ii in 1:x$num.classes){ X$temp <- X$y[, ii] print( xyplot(temp~X1|X2,data=X, xlab=x$var.names[i.var[1]], ylab=paste("f(",x$var.names[i.var[1]],",",x$var.names[i.var[2]],")",sep=""), type="l", panel = panel.xyplot, ...) ) title(paste("Class:", dimnames(X$y)[[2]][ii])) } X$temp <- NULL } else { print(xyplot(y~X1|X2,data=X, xlab=x$var.names[i.var[1]], ylab=paste("f(",x$var.names[i.var[1]],",",x$var.names[i.var[2]],")",sep=""), type="l", panel = panel.xyplot, ...)) } } else { if (x$distribution$name == "multinomial") { for (ii in 1:x$num.classes){ X$temp <- X$y[, ii] print( stripplot(X1~temp|X2,data=X, xlab=x$var.names[i.var[2]], ylab=paste("f(",x$var.names[i.var[1]],",",x$var.names[i.var[2]],")",sep=""), ...) ) title(paste("Class:", dimnames(X$y)[[2]][ii])) } X$temp <- NULL } else { print(stripplot(X1~y|X2,data=X, xlab=x$var.names[i.var[2]], ylab=paste("f(",x$var.names[i.var[1]],",",x$var.names[i.var[2]],")",sep=""), ...)) } } } else if(length(i.var)==3) { i <- order(f.factor) X.new <- X[,i] X.new$y <- X$y names(X.new) <- names(X) # 0 factor, 3 continuous if(sum(f.factor)==0) { X.new$X3 <- equal.count(X.new$X3) if (x$distribution$name == "multinomial") { for (ii in 1:x$num.classes){ X.new$temp <- X.new$y[, ii] print( levelplot(temp~X1*X2|X3,data=X.new, xlab=x$var.names[i.var[i[1]]], ylab=x$var.names[i.var[i[2]]],...) ) title(paste("Class:", dimnames(X.new$y)[[2]][ii])) } X.new$temp <- NULL } else { print(levelplot(y~X1*X2|X3,data=X.new, xlab=x$var.names[i.var[i[1]]], ylab=x$var.names[i.var[i[2]]],...)) } } # 1 factor, 2 continuous else if(sum(f.factor)==1) { if (x$distribution$name == "multinomial") { for (ii in 1:x$num.classes){ X.new$temp <- X.new$y[, ii] print( levelplot(temp~X1*X2|X3,data=X.new, xlab=x$var.names[i.var[i[1]]], ylab=x$var.names[i.var[i[2]]],...)) title(paste("Class:", dimnames(X.new$y)[[2]][ii]) ) } X.new$temp <- NULL } else { print(levelplot(y~X1*X2|X3,data=X.new, xlab=x$var.names[i.var[i[1]]], ylab=x$var.names[i.var[i[2]]],...)) } } # 2 factors, 1 continuous else if(sum(f.factor)==2) { if (x$distribution$name == "multinomial") { for (ii in 1:x$num.classes){ X.new$temp <- X.new$y[, ii] print( xyplot(temp~X1|X2*X3,data=X.new, type="l", xlab=x$var.names[i.var[i[1]]], ylab=paste("f(",paste(x$var.names[i.var[1:3]],collapse=","),")",sep=""), panel = panel.xyplot, ...) ) title(paste("Class:", dimnames(X.new$y)[[2]][ii]) ) } X.new$temp <- NULL } else { print(xyplot(y~X1|X2*X3,data=X.new, type="l", xlab=x$var.names[i.var[i[1]]], ylab=paste("f(",paste(x$var.names[i.var[1:3]],collapse=","),")",sep=""), panel = panel.xyplot, ...)) } } # 3 factors, 0 continuous else if(sum(f.factor)==3) { if (x$distribution$name == "multinomial") { for (ii in 1:x$num.classes){ X.new$temp <- X.new$y[, ii] print( stripplot(X1~temp|X2*X3,data=X.new, xlab=x$var.names[i.var[i[1]]], ylab=paste("f(",paste(x$var.names[i.var[1:3]],collapse=","),")",sep=""), ...) ) title(paste("Class:", dimnames(X.new$y)[[2]][ii]) ) } X.new$temp <- NULL } else { print(stripplot(X1~y|X2*X3,data=X.new, xlab=x$var.names[i.var[i[1]]], ylab=paste("f(",paste(x$var.names[i.var[1:3]],collapse=","),")",sep=""), ...)) } } } } gbm/R/permutation.test.gbm.R0000644000176200001440000000320112102666411015427 0ustar liggesuserspermutation.test.gbm <- function(object, n.trees) { # get variables used in the model i.vars <- sort(unique(unlist(lapply(object$trees[1:n.trees], function(x){unique(x[[1]])})))) i.vars <- i.vars[i.vars!=-1] + 1 rel.inf <- rep(0,length(object$var.names)) if(!is.null(object$data)) { y <- object$data$y os <- object$data$offset Misc <- object$data$Misc w <- object$data$w x <- matrix(object$data$x, ncol=length(object$var.names)) object$Terms <- NULL # this makes predict.gbm take x as it is if (object$distribution$name == "pairwise") { # group and cutoff are only relevant for distribution "pairwise" # in this case, the last element specifies the max rank # max rank = 0 means no cut off group <- Misc[1:length(y)] max.rank <- Misc[length(y)+1] } } else { stop("Model was fit with keep.data=FALSE. permutation.test.gbm has not been implemented for that case.") } # the index shuffler j <- sample(1:nrow(x)) for(i in 1:length(i.vars)) { x[ ,i.vars[i]] <- x[j,i.vars[i]] new.pred <- predict.gbm(object,newdata=x,n.trees=n.trees) rel.inf[i.vars[i]] <- gbm.loss(y,new.pred,w,os, object$distribution, object$train.error[n.trees], group, max.rank) x[j,i.vars[i]] <- x[ ,i.vars[i]] } return(rel.inf=rel.inf) } gbm/R/ir.measures.R0000644000176200001440000000644312102666411013606 0ustar liggesusers# Functions to compute IR measures for pairwise loss for # a single group # Notes: # * Inputs are passed as a 2-elemen (y,f) list, to # facilitate the 'by' iteration # * Return the respective metric, or a negative value if # it is undefined for the given group # * For simplicity, we have no special handling for ties; # instead, we break ties randomly. This is slightly # inaccurate for individual groups, but should have # a small effect on the overall measure. # Area under ROC curve = ratio of correctly ranking pairs gbm.roc.area <- function(obs, pred) { n1 <- sum(obs) n <- length(obs) if (n==n1) { return(1) } # Fraction of concordant pairs # = sum_{pos}(rank-1) / #pairs with different labels # #pairs = n1 * (n-n1) return ((mean(rank(pred)[obs > 0]) - (n1 + 1)/2)/(n - n1)) } # Concordance Index: # Fraction of all pairs (i,j) with i0) if (length(f) <= 1 || num.pos == 0 || num.pos == length(f)) { return (-1.0) } else { return (gbm.roc.area(obs=y, pred=f)) } } ir.measure.mrr <- function(y.f, max.rank) { y <- y.f[[1]] f <- y.f[[2]] num.pos <- sum(y>0) if (length(f) <= 1 || num.pos == 0 || num.pos == length(f)) { return (-1.0) } ord <- order(f, decreasing=TRUE) min.idx.pos <- min(which(y[ord]>0)) if (min.idx.pos <= max.rank) { return (1.0 / min.idx.pos) } else { return (0.0) } } ir.measure.map <- function(y.f, max.rank=0) { # Note: max.rank is meaningless for MAP y <- y.f[[1]] f <- y.f[[2]] ord <- order(f, decreasing=TRUE) idx.pos <- which(y[ord]>0) num.pos <- length(idx.pos) if (length(f) <= 1 || num.pos == 0 || num.pos == length(f)) { return (-1.0) } # Above and including the rank of the i-th positive result, # there are exactly i positives and rank(i) total results return (sum((1:length(idx.pos))/idx.pos) / num.pos) } ir.measure.ndcg <- function(y.f, max.rank) { y <- y.f[[1]] f <- y.f[[2]] if (length(f) <= 1 || all(diff(y)==0)) { return (-1.0) } num.items <- min(length(f), max.rank) ord <- order(f, decreasing=TRUE) dcg <- sum(y[ord][1:num.items] / log2(2:(num.items+1))) # The best possible DCG: order by target ord.max <- order(y, decreasing=TRUE) dcg.max <- sum(y[ord.max][1:num.items] / log2(2:(num.items+1))) # Normalize return (dcg / dcg.max) } gbm/R/interact.gbm.R0000644000176200001440000000725312142724707013735 0ustar liggesusers# Compute Friedman's H statistic for interaction effects interact.gbm <- function(x, data, i.var = 1, n.trees = x$n.trees){ ############################################################### # Do sanity checks on the call if (x$interaction.depth < length(i.var)){ stop("interaction.depth too low in model call") } if (all(is.character(i.var))){ i <- match(i.var, x$var.names) if (any(is.na(i))) { stop("Variables given are not used in gbm model fit: ", i.var[is.na(i)]) } else { i.var <- i } } if ((min(i.var) < 1) || (max(i.var) > length(x$var.names))) { warning("i.var must be between 1 and ", length(x$var.names)) } if (n.trees > x$n.trees) { warning(paste("n.trees exceeds the number of trees in the model, ", x$n.trees,". Using ", x$n.trees, " trees.", sep = "")) n.trees <- x$n.trees } # End of sanity checks ############################################################### unique.tab <- function(z,i.var) { a <- unique(z[,i.var,drop=FALSE]) a$n <- table(factor(apply(z[,i.var,drop=FALSE],1,paste,collapse="\r"), levels=apply(a,1,paste,collapse="\r"))) return(a) } # convert factors for(j in i.var) { if(is.factor(data[,x$var.names[j]])) data[,x$var.names[j]] <- as.numeric(data[,x$var.names[j]])-1 } # generate a list with all combinations of variables a <- apply(expand.grid(rep(list(c(FALSE,TRUE)), length(i.var)))[-1,],1, function(x) as.numeric(which(x))) FF <- vector("list",length(a)) for(j in 1:length(a)) { FF[[j]]$Z <- data.frame(unique.tab(data, x$var.names[i.var[a[[j]]]])) FF[[j]]$n <- as.numeric(FF[[j]]$Z$n) FF[[j]]$Z$n <- NULL FF[[j]]$f <- .Call("gbm_plot", X = as.double(data.matrix(FF[[j]]$Z)), cRows = as.integer(nrow(FF[[j]]$Z)), cCols = as.integer(ncol(FF[[j]]$Z)), n.class = as.integer(x$num.classes), i.var = as.integer(i.var[a[[j]]] - 1), n.trees = as.integer(n.trees), initF = as.double(x$initF), trees = x$trees, c.splits = x$c.splits, var.type = as.integer(x$var.type), PACKAGE = "gbm") # FF[[jj]]$Z is the data, f is the predictions, n is the number of levels for factors # Need to restructure f to deal with multinomial case FF[[j]]$f <- matrix(FF[[j]]$f, ncol=x$num.classes, byrow=FALSE) # center the values FF[[j]]$f <- apply(FF[[j]]$f, 2, function(x, w){ x - weighted.mean(x, w, na.rm=TRUE) }, w=FF[[j]]$n) # precompute the sign of these terms to appear in H FF[[j]]$sign <- ifelse(length(a[[j]]) %% 2 == length(i.var) %% 2, 1, -1) } H <- FF[[length(a)]]$f for(j in 1:(length(a)-1)){ i1 <- apply(FF[[length(a)]]$Z[,a[[j]], drop=FALSE], 1, paste, collapse="\r") i2 <- apply(FF[[j]]$Z,1,paste,collapse="\r") i <- match(i1, i2) H <- H + with(FF[[j]], sign*f[i,]) } # Compute H w <- matrix(FF[[length(a)]]$n, ncol=1) f <- matrix(FF[[length(a)]]$f^2, ncol=x$num.classes, byrow=FALSE) top <- apply(H^2, 2, weighted.mean, w = w, na.rm = TRUE) btm <- apply(f, 2, weighted.mean, w = w, na.rm = TRUE) H <- top / btm if (x$distribution$name=="multinomial"){ names(H) <- x$classes } # If H > 1, rounding and tiny main effects have messed things up H[H > 1] <- NaN return(sqrt(H)) } gbm/R/guessDist.R0000644000176200001440000000052712102666411013320 0ustar liggesusersguessDist <- function(y){ # If distribution is not given, try to guess it if (length(unique(y)) == 2){ d <- "bernoulli" } else if (class(y) == "Surv" ){ d <- "coxph" } else if (is.factor(y)){ d <- "multinomial" } else{ d <- "gaussian" } cat(paste("Distribution not specified, assuming", d, "...\n")) list(name=d) } gbm/R/getStratify.R0000644000176200001440000000063212102666411013650 0ustar liggesusersgetStratify <- function(strat, d){ if (is.null(strat)){ if (d$name == "multinomial" ){ strat <- TRUE } else { strat <- FALSE } } else { if (!is.element(d$name, c( "bernoulli", "multinomial"))){ warning("You can only use class.stratify.cv when distribution is bernoulli or multinomial. Ignored.") strat <- FALSE } } # Close else strat } gbm/R/getCVgroup.R0000644000176200001440000000202212102666411013423 0ustar liggesusersgetCVgroup <- # Construct cross-validation groups depending on the type of model to be fit function(distribution, class.stratify.cv, y, i.train, cv.folds, group){ if (distribution$name %in% c( "bernoulli", "multinomial" ) & class.stratify.cv ){ nc <- table(y[i.train]) # Number in each class uc <- names(nc) if (min(nc) < cv.folds){ stop( paste("The smallest class has only", min(nc), "objects in the training set. Can't do", cv.folds, "fold cross-validation.")) } cv.group <- vector(length = length(i.train)) for (i in 1:length(uc)){ cv.group[y[i.train] == uc[i]] <- sample(rep(1:cv.folds , length = nc[i])) } } # Close if else if (distribution$name == "pairwise") { # Split into CV folds at group boundaries s <- sample(rep(1:cv.folds, length=nlevels(group))) cv.group <- s[as.integer(group[i.train])] } else { cv.group <- sample(rep(1:cv.folds, length=length(i.train))) } cv.group } gbm/R/gbmDoFold.R0000644000176200001440000000213512134211007013170 0ustar liggesusersgbmDoFold <- # Do specified cross-validation fold - a self-contained function for # passing to individual cores. function(X, i.train, x, y, offset, distribution, w, var.monotone, n.trees, interaction.depth, n.minobsinnode, shrinkage, bag.fraction, cv.group, var.names, response.name, group, s){ library(gbm, quietly=TRUE) cat("CV:", X, "\n") set.seed(s[[X]]) i <- order(cv.group == X) x <- x[i.train,,drop=TRUE][i,,drop=FALSE] y <- y[i.train][i] offset <- offset[i.train][i] nTrain <- length(which(cv.group != X)) group <- group[i.train][i] res <- gbm.fit(x, y, offset=offset, distribution=distribution, w=w, var.monotone=var.monotone, n.trees=n.trees, interaction.depth=interaction.depth, n.minobsinnode=n.minobsinnode, shrinkage=shrinkage, bag.fraction=bag.fraction, nTrain=nTrain, keep.data=FALSE, verbose=FALSE, response.name=response.name, group=group) res } gbm/R/gbmCrossVal.R0000644000176200001440000001020512134211007013552 0ustar liggesusers##' Perform gbm cross-validation ##' ##' This function has far too many arguments, but there isn't the ##' abstraction in gbm to lose them. gbmCrossVal <- function(cv.folds, nTrain, n.cores, class.stratify.cv, data, x, y, offset, distribution, w, var.monotone, n.trees, interaction.depth, n.minobsinnode, shrinkage, bag.fraction, var.names, response.name, group) { i.train <- 1:nTrain cv.group <- getCVgroup(distribution, class.stratify.cv, y, i.train, cv.folds, group) ## build the models cv.models <- gbmCrossValModelBuild(cv.folds, cv.group, n.cores, i.train, x, y, offset, distribution, w, var.monotone, n.trees, interaction.depth, n.minobsinnode, shrinkage, bag.fraction, var.names, response.name, group) ## get the errors cv.error <- gbmCrossValErr(cv.models, cv.folds, cv.group, nTrain, n.trees) best.iter.cv <- which.min(cv.error) ## get the predictions predictions <- gbmCrossValPredictions(cv.models, cv.folds, cv.group, best.iter.cv, distribution, data[i.train,], y) list(error=cv.error, predictions=predictions) } ##' Get the gbm cross-validation error gbmCrossValErr <- function(cv.models, cv.folds, cv.group, nTrain, n.trees) { in.group <- tabulate(cv.group, nbins=cv.folds) cv.error <- vapply(1:cv.folds, function(index) { model <- cv.models[[index]] model$valid.error * in.group[[index]] }, double(n.trees)) ## this is now a (n.trees, cv.folds) matrix ## and now a n.trees vector rowSums(cv.error) / nTrain } ##' Get the predictions for GBM cross validation ##' ##' This function is not as nice as it could be (leakage of y) gbmCrossValPredictions <- function(cv.models, cv.folds, cv.group, best.iter.cv, distribution, data, y) { ## test cv.group and data match if (nrow(data) != length(cv.group)) { stop("mismatch between data and cv.group") } ## this is a little complicated due to multinomial distribution num.cols <- if (distribution$name == "multinomial") { nlevels(factor(y)) } else { 1 } result <- matrix(nrow=nrow(data), ncol=num.cols) ## there's no real reason to do this as other than a for loop data.names <- names(data) for (ind in 1:cv.folds) { ## these are the particular elements flag <- cv.group == ind model <- cv.models[[ind]] ## the %in% here is to handle coxph my.data <- data[flag, !(data.names %in% model$response.name)] predictions <- predict(model, newdata=my.data, n.trees=best.iter.cv) predictions <- matrix(predictions, ncol=num.cols) result[flag,] <- predictions } if (distribution$name != "multinomial") { result <- as.numeric(result) } result } ##' Perform gbm cross-validation ##' ##' This function has far too many arguments. gbmCrossValModelBuild <- function(cv.folds, cv.group, n.cores, i.train, x, y, offset, distribution, w, var.monotone, n.trees, interaction.depth, n.minobsinnode, shrinkage, bag.fraction, var.names, response.name, group) { ## set up the cluster and add a finalizer cluster <- gbmCluster(n.cores) on.exit(stopCluster(cluster)) ## get ourselves some random seeds seeds <- as.integer(runif(cv.folds, -(2^31 - 1), 2^31)) ## now do the cross-validation model builds parLapply(cl=cluster, X=1:cv.folds, gbmDoFold, i.train, x, y, offset, distribution, w, var.monotone, n.trees, interaction.depth, n.minobsinnode, shrinkage, bag.fraction, cv.group, var.names, response.name, group, seeds) } gbm/R/gbmCluster.R0000644000176200001440000000035512134211007013444 0ustar liggesusersgbmCluster <- function(n){ # If number of cores (n) not given, try to work it out from the number # that appear to be available and the number of CV folds. if (is.null(n)){ n <- detectCores() } makeCluster(n) } gbm/R/gbm.perf.R0000644000176200001440000001351612102666411013050 0ustar liggesusersgbm.perf <- function(object, plot.it=TRUE, oobag.curve=FALSE, overlay=TRUE, method) { smoother <- NULL if ( missing( method ) ){ if ( object$train.fraction < 1 ){ method <- "test" } else if ( !is.null( object$cv.error ) ){ method <- "cv" } else { method <- "OOB" } cat( paste( "Using", method, "method...\n" ) ) } if((method == "OOB") || oobag.curve) { if(object$bag.fraction==1) stop("Cannot compute OOB estimate or the OOB curve when bag.fraction=1") if(all(!is.finite(object$oobag.improve))) stop("Cannot compute OOB estimate or the OOB curve. No finite OOB estimates of improvement") x <- 1:object$n.trees smoother <- loess(object$oobag.improve~x, enp.target=min(max(4,length(x)/10),50)) smoother$y <- smoother$fitted smoother$x <- x best.iter.oob <- x[which.min(-cumsum(smoother$y))] best.iter <- best.iter.oob } if(method == "OOB") { warning("OOB generally underestimates the optimal number of iterations although predictive performance is reasonably competitive. Using cv.folds>0 when calling gbm usually results in improved predictive performance.") } if(method == "test") { best.iter.test <- which.min(object$valid.error) best.iter <- best.iter.test } if(method == "cv") { if(is.null(object$cv.error)) stop("In order to use method=\"cv\" gbm must be called with cv.folds>1.") if(length(object$cv.error) < object$n.trees) warning("cross-validation error is not computed for any additional iterations run using gbm.more().") best.iter.cv <- which.min(object$cv.error) best.iter <- best.iter.cv } if(!is.element(method,c("OOB","test","cv"))) stop("method must be cv, test, or OOB") if(plot.it) { par(mar=c(5,4,4,4)+.1) if (object$distribution$name !="pairwise") { ylab <- switch(substring(object$distribution$name,1,2), ga="Squared error loss", be="Bernoulli deviance", po="Poisson deviance", ad="AdaBoost exponential bound", co="Cox partial deviance", la="Absolute loss", qu="Quantile loss", mu="Multinomial deviance", td="t-distribution deviance" ) } else # object$distribution$name =="pairwise" { ylab <- switch(object$distribution$metric, conc ="Fraction of concordant pairs", ndcg="Normalized discounted cumulative gain", map ="Mean average precision", mrr ="Mean reciprocal rank" ) } if(object$train.fraction==1) { # HS Next line changed to scale axis to include other error # ylim <- range(object$train.error) if ( method=="cv" ){ ylim <- range(object$train.error, object$cv.error) } else if ( method == "test" ){ ylim <- range( object$train.error, object$valid.error) } else { ylim <- range(object$train.error) } } else { ylim <- range(object$train.error,object$valid.error) } plot(object$train.error, ylim=ylim, type="l", xlab="Iteration",ylab=ylab) if(object$train.fraction!=1) { lines(object$valid.error,col="red") } if(method=="cv") { lines(object$cv.error,col="green") } if(!is.na(best.iter)) abline(v=best.iter,col="blue",lwd=2,lty=2) if(oobag.curve) { if(overlay) { par(new=TRUE) plot(smoother$x, cumsum(smoother$y), col="blue", type="l", xlab="",ylab="", axes=FALSE) axis(4,srt=0) at <- mean(range(smoother$y)) mtext(paste("OOB improvement in",ylab),side=4,srt=270,line=2) abline(h=0,col="blue",lwd=2) } plot(object$oobag.improve,type="l", xlab="Iteration", ylab=paste("OOB change in",ylab)) lines(smoother,col="red",lwd=2) abline(h=0,col="blue",lwd=1) abline(v=best.iter,col="blue",lwd=1) } } return(best.iter) } perf.pairwise <- function(y, f, group, metric="ndcg", w=NULL, max.rank=0) { func.name <- switch(metric, conc = "ir.measure.conc", mrr = "ir.measure.mrr", map = "ir.measure.map", ndcg = "ir.measure.ndcg", stop(paste("Metric",metric,"is not supported")) ) # Optimization: for binary targets, # AUC is equivalent but faster than CONC if (metric == "conc" && all(is.element(y, 0:1))) { func.name <- "ir.measure.auc" } # Max rank = 0 means no cut off if (max.rank <= 0) { max.rank <- length(y)+1 } # Random tie breaking in case of duplicate scores. # (Without tie breaking, we would overestimate if instances are # sorted descending on target) f <- f + 1E-10 * runif(length(f), min=-0.5, max=0.5) measure.by.group <- as.matrix(by(list(y, f), INDICES=group, FUN=get(func.name), max.rank=max.rank)) # Exclude groups with single result or only negative or positive instances idx <- which((!is.null(measure.by.group)) & measure.by.group >= 0) if (is.null(w)) { return (mean(measure.by.group[idx])) } else { # Assumption: weights are constant per group w.by.group <- tapply(w, group, mean) return (weighted.mean(measure.by.group[idx], w=w.by.group[idx])) } } gbm/R/gbm.more.R0000644000176200001440000002212312102666411013050 0ustar liggesusersgbm.more <- function(object, n.new.trees = 100, data = NULL, weights = NULL, offset = NULL, verbose = NULL) { theCall <- match.call() nTrain <- object$nTrain if (object$distribution$name != "pairwise") { distribution.call.name <- object$distribution$name } else { distribution.call.name <- sprintf("pairwise_%s", object$distribution$metric) } if(is.null(object$Terms) && is.null(object$data)) { stop("The gbm model was fit using gbm.fit (rather than gbm) and keep.data was set to FALSE. gbm.more cannot locate the dataset.") } else if(is.null(object$data) && is.null(data)) { stop("keep.data was set to FALSE on original gbm call and argument 'data' is NULL") } else if(is.null(object$data)) { m <- eval(object$m, parent.frame()) Terms <- attr(m, "terms") a <- attributes(Terms) y <- as.vector(model.extract(m, "response")) offset <- model.extract(m,offset) x <- model.frame(delete.response(Terms), data, na.action=na.pass) w <- weights if(length(w)==0) w <- rep(1, nrow(x)) if (object$distribution$name != "pairwise") { w <- w*length(w)/sum(w) # normalize to N } if(is.null(offset) || (offset==0)) { offset <- NA } Misc <- NA if(object$distribution$name == "coxph") { Misc <- as.numeric(y)[-(1:cRows)] y <- as.numeric(y)[1:cRows] # reverse sort the failure times to compute risk sets on the fly i.train <- order(-y[1:nTrain]) i.test <- order(-y[(nTrain+1):cRows]) + nTrain i.timeorder <- c(i.train,i.test) y <- y[i.timeorder] Misc <- Misc[i.timeorder] x <- x[i.timeorder,,drop=FALSE] w <- w[i.timeorder] if(!is.na(offset)) offset <- offset[i.timeorder] object$fit <- object$fit[i.timeorder] } else if(object$distribution$name == "tdist" ){ Misc <- object$distribution$df } else if (object$distribution$name == "pairwise"){ # Check if group names are valid distribution.group <- object$distribution$group i <- match(distribution.group, colnames(data)) if (any(is.na(i))) { stop("Group column does not occur in data: ", distribution.group[is.na(i)]) } # construct group index group <- factor(do.call(paste, c(data[,distribution.group, drop=FALSE], sep=":"))) # Check that weights are constant across groups if ((!missing(weights)) && (!is.null(weights))) { w.min <- tapply(w, INDEX=group, FUN=min) w.max <- tapply(w, INDEX=group, FUN=max) if (any(w.min != w.max)) { stop("For distribution 'pairwise', all instances for the same group must have the same weight") } # Normalize across groups w <- w * length(w.min) / sum(w.min) } # Shuffle groups, to remove bias when splitting into train/test set and/or CV folds perm.levels <- levels(group)[sample(1:nlevels(group))] group <- factor(group, levels=perm.levels) # The C function expects instances to be sorted by group and descending by target ord.group <- object$ord.group group <- group[ord.group] y <- y[ord.group] x <- x[ord.group,,drop=FALSE] w <- x[ord.group] object$fit <- object$fit[ord.group] # object$fit is stored in the original order # Split into train and validation set, at group boundary num.groups.train <- max(1, round(object$train.fraction * nlevels(group))) # include all groups up to the num.groups.train nTrain <- max(which(group==levels(group)[num.groups.train])) metric <- object$distribution[["metric"]] if (is.element(metric, c("mrr", "map")) && (!all(is.element(y, 0:1)))) { stop("Metrics 'map' and 'mrr' require the response to be in {0,1}") } # Cut-off rank for metrics # We pass this argument as the last element in the Misc vector # Default of 0 means no cutoff max.rank <- 0 if (!is.null(object$distribution[["max.rank"]]) && object$distribution[["max.rank"]] > 0) { if (is.element(metric, c("ndcg", "mrr"))) { max.rank <- object$distribution[["max.rank"]] } else { stop("Parameter 'max.rank' cannot be specified for metric '", metric, "', only supported for 'ndcg' and 'mrr'") } } Misc <- c(group, max.rank) } # create index upfront... subtract one for 0 based order x.order <- apply(x[1:nTrain,,drop=FALSE],2,order,na.last=FALSE)-1 x <- data.matrix(x) cRows <- nrow(x) cCols <- ncol(x) } else { y <- object$data$y x <- object$data$x x.order <- object$data$x.order offset <- object$data$offset Misc <- object$data$Misc w <- object$data$w nTrain <- object$nTrain cRows <- length(y) cCols <- length(x)/cRows if(object$distribution$name == "coxph") { i.timeorder <- object$data$i.timeorder object$fit <- object$fit[i.timeorder] } if (object$distribution$name == "pairwise") { object$fit <- object$fit[object$ord.group] # object$fit is stored in the original order } } if(is.null(verbose)) { verbose <- object$verbose } x <- as.vector(x) gbm.obj <- .Call("gbm", Y = as.double(y), Offset = as.double(offset), X = as.double(x), X.order = as.integer(x.order), weights = as.double(w), Misc = as.double(Misc), cRows = as.integer(cRows), cCols = as.integer(cCols), var.type = as.integer(object$var.type), var.monotone = as.integer(object$var.monotone), distribution = as.character(distribution.call.name), n.trees = as.integer(n.new.trees), interaction.depth = as.integer(object$interaction.depth), n.minobsinnode = as.integer(object$n.minobsinnode), n.classes = as.integer(object$num.classes), shrinkage = as.double(object$shrinkage), bag.fraction = as.double(object$bag.fraction), train.fraction = as.integer(nTrain), fit.old = as.double(object$fit), n.cat.splits.old = as.integer(length(object$c.splits)), n.trees.old = as.integer(object$n.trees), verbose = as.integer(verbose), PACKAGE = "gbm") names(gbm.obj) <- c("initF","fit","train.error","valid.error", "oobag.improve","trees","c.splits") gbm.obj$initF <- object$initF gbm.obj$train.error <- c(object$train.error, gbm.obj$train.error) gbm.obj$valid.error <- c(object$valid.error, gbm.obj$valid.error) gbm.obj$oobag.improve <- c(object$oobag.improve, gbm.obj$oobag.improve) gbm.obj$trees <- c(object$trees, gbm.obj$trees) gbm.obj$c.splits <- c(object$c.splits, gbm.obj$c.splits) # cv.error not updated when using gbm.more gbm.obj$cv.error <- object$cv.error gbm.obj$cv.folds <- object$cv.folds gbm.obj$n.trees <- length(gbm.obj$trees) gbm.obj$distribution <- object$distribution gbm.obj$train.fraction <- object$train.fraction gbm.obj$shrinkage <- object$shrinkage gbm.obj$bag.fraction <- object$bag.fraction gbm.obj$var.type <- object$var.type gbm.obj$var.monotone <- object$var.monotone gbm.obj$var.names <- object$var.names gbm.obj$interaction.depth <- object$interaction.depth gbm.obj$n.minobsinnode <- object$n.minobsinnode gbm.obj$num.classes <- object$num.classes gbm.obj$nTrain <- object$nTrain gbm.obj$response.name <- object$response.name gbm.obj$Terms <- object$Terms gbm.obj$var.levels <- object$var.levels gbm.obj$verbose <- verbose if(object$distribution$name == "coxph") { gbm.obj$fit[i.timeorder] <- gbm.obj$fit } if (object$distribution$name == "pairwise") { # Data has been reordered according to queries. # We need to permute the fitted values to correspond # to the original order. gbm.obj$fit <- gbm.obj$fit[order(object$ord.group)] object$fit <- object$fit[order(object$ord.group)] gbm.obj$ord.group <- object$ord.group } if(!is.null(object$data)) { gbm.obj$data <- object$data } else { gbm.obj$data <- NULL } gbm.obj$m <- object$m gbm.obj$call <- theCall class(gbm.obj) <- "gbm" return(gbm.obj) } gbm/R/gbm.loss.R0000644000176200001440000000227012102666411013067 0ustar liggesusersgbm.loss <- function(y, f, w, offset, dist, baseline, group=NULL, max.rank=NULL) { if (!is.na(offset)) { f <- offset+f } if (dist$name != "pairwise") { switch(dist$name, gaussian = weighted.mean((y - f)^2,w) - baseline, bernoulli = -2*weighted.mean(y*f - log(1+exp(f)),w) - baseline, laplace = weighted.mean(abs(y-f),w) - baseline, adaboost = weighted.mean(exp(-(2*y-1)*f),w) - baseline, poisson = -2*weighted.mean(y*f-exp(f),w) - baseline, stop(paste("Distribution",dist$name,"is not yet supported for method=permutation.test.gbm"))) } else # dist$name == "pairwise" { if (is.null(dist$metric)) { stop("No metric specified for distribution 'pairwise'") } if (!is.element(dist$metric, c("conc", "ndcg", "map", "mrr"))) { stop("Invalid metric '", dist$metric, "' specified for distribution 'pairwise'") } if (is.null(group)) { stop("For distribution 'pairwise', parameter 'group' has to be supplied") } # Loss = 1 - utility (1 - perf.pairwise(y, f, group, dist$metric, w, max.rank)) - baseline } } gbm/R/gbm.fit.R0000644000176200001440000003145412131277636012710 0ustar liggesusersgbm.fit <- function(x,y, offset = NULL, misc = NULL, distribution = "bernoulli", w = NULL, var.monotone = NULL, n.trees = 100, interaction.depth = 1, n.minobsinnode = 10, shrinkage = 0.001, bag.fraction = 0.5, nTrain = NULL, train.fraction = NULL, keep.data = TRUE, verbose = TRUE, var.names = NULL, response.name = "y", group = NULL) { if(is.character(distribution)) { distribution <- list(name=distribution) } cRows <- nrow(x) cCols <- ncol(x) if(nrow(x) != ifelse(class(y)=="Surv", nrow(y), length(y))) { stop("The number of rows in x does not equal the length of y.") } # the preferred way to specify the number of training instances is via parameter 'nTrain'. # parameter 'train.fraction' is only maintained for backward compatibility. if(!is.null(nTrain) && !is.null(train.fraction)) { stop("Parameters 'nTrain' and 'train.fraction' cannot both be specified") } else if(!is.null(train.fraction)) { warning("Parameter 'train.fraction' of gbm.fit is deprecated, please specify 'nTrain' instead") nTrain <- floor(train.fraction*cRows) } else if(is.null(nTrain)) { # both undefined, use all training data nTrain <- cRows } if (is.null(train.fraction)){ train.fraction <- nTrain / cRows } if(is.null(var.names)) { var.names <- getVarNames(x) } # if(is.null(response.name)) { response.name <- "y" } # check dataset size if(nTrain * bag.fraction <= 2*n.minobsinnode+1) { stop("The dataset size is too small or subsampling rate is too large: nTrain*bag.fraction <= n.minobsinnode") } if (distribution$name != "pairwise") { w <- w*length(w)/sum(w) # normalize to N } # Do sanity checks ch <- checkMissing(x, y) interaction.depth <- checkID(interaction.depth) w <- checkWeights(w, length(y)) offset <- checkOffset(offset, y) Misc <- NA # setup variable types var.type <- rep(0,cCols) var.levels <- vector("list",cCols) for(i in 1:length(var.type)) { if(all(is.na(x[,i]))) { stop("variable ",i,": ",var.names[i]," has only missing values.") } if(is.ordered(x[,i])) { var.levels[[i]] <- levels(x[,i]) x[,i] <- as.numeric(x[,i])-1 var.type[i] <- 0 } else if(is.factor(x[,i])) { if(length(levels(x[,i]))>1024) stop("gbm does not currently handle categorical variables with more than 1024 levels. Variable ",i,": ",var.names[i]," has ",length(levels(x[,i]))," levels.") var.levels[[i]] <- levels(x[,i]) x[,i] <- as.numeric(x[,i])-1 var.type[i] <- max(x[,i],na.rm=TRUE)+1 } else if(is.numeric(x[,i])) { var.levels[[i]] <- quantile(x[,i],prob=(0:10)/10,na.rm=TRUE) } else { stop("variable ",i,": ",var.names[i]," is not of type numeric, ordered, or factor.") } # check for some variation in each variable if(length(unique(var.levels[[i]])) == 1) { warning("variable ",i,": ",var.names[i]," has no variation.") } } nClass <- 1 if(!("name" %in% names(distribution))) { stop("The distribution is missing a 'name' component, for example list(name=\"gaussian\")") } supported.distributions <- c("bernoulli","gaussian","poisson","adaboost","laplace","coxph","quantile", "tdist", "multinomial", "huberized", "pairwise") distribution.call.name <- distribution$name # check potential problems with the distributions if(!is.element(distribution$name,supported.distributions)) { stop("Distribution ",distribution$name," is not supported") } if((distribution$name == "bernoulli") && !all(is.element(y,0:1))) { stop("Bernoulli requires the response to be in {0,1}") } if((distribution$name == "huberized") && !all(is.element(y,0:1))) { stop("Huberized square hinged loss requires the response to be in {0,1}") } if((distribution$name == "poisson") && any(y<0)) { stop("Poisson requires the response to be positive") } if((distribution$name == "poisson") && any(y != trunc(y))) { stop("Poisson requires the response to be a positive integer") } if((distribution$name == "adaboost") && !all(is.element(y,0:1))) { stop("This version of AdaBoost requires the response to be in {0,1}") } if(distribution$name == "quantile") { if(length(unique(w)) > 1) { stop("This version of gbm for the quantile regression lacks a weighted quantile. For now the weights must be constant.") } if(is.null(distribution$alpha)) { stop("For quantile regression, the distribution parameter must be a list with a parameter 'alpha' indicating the quantile, for example list(name=\"quantile\",alpha=0.95).") } else if((distribution$alpha<0) || (distribution$alpha>1)) { stop("alpha must be between 0 and 1.") } Misc <- c(alpha=distribution$alpha) } if(distribution$name == "coxph") { if(class(y)!="Surv") { stop("Outcome must be a survival object Surv(time,failure)") } if(attr(y,"type")!="right") { stop("gbm() currently only handles right censored observations") } Misc <- y[,2] y <- y[,1] # reverse sort the failure times to compute risk sets on the fly i.train <- order(-y[1:nTrain]) n.test <- cRows - nTrain if(n.test > 0) { i.test <- order(-y[(nTrain+1):cRows]) + nTrain } else { i.test <- NULL } i.timeorder <- c(i.train,i.test) y <- y[i.timeorder] Misc <- Misc[i.timeorder] x <- x[i.timeorder,,drop=FALSE] w <- w[i.timeorder] if(!is.na(offset)) offset <- offset[i.timeorder] } if(distribution$name == "tdist") { if (is.null(distribution$df) || !is.numeric(distribution$df)){ Misc <- 4 } else { Misc <- distribution$df[1] } } if (distribution$name == "multinomial") { ## Ensure that the training set contains all classes classes <- attr(factor(y), "levels") nClass <- length(classes) if (nClass > nTrain){ stop(paste("Number of classes (", nClass, ") must be less than the size of the training set (", nTrain, ")", sep = "")) } # f <- function(a,x){ # min((1:length(x))[x==a]) # } new.idx <- as.vector(sapply(classes, function(a,x){ min((1:length(x))[x==a]) }, y)) all.idx <- 1:length(y) new.idx <- c(new.idx, all.idx[!(all.idx %in% new.idx)]) y <- y[new.idx] x <- x[new.idx, ] w <- w[new.idx] if (!is.null(offset)){ offset <- offset[new.idx] } ## Get the factors y <- as.numeric(as.vector(outer(y, classes, "=="))) ## Fill out the weight and offset w <- rep(w, nClass) if (!is.null(offset)){ offset <- rep(offset, nClass) } } # close if (dist... == "multinomial" if(distribution$name == "pairwise") { distribution.metric <- distribution[["metric"]] if (!is.null(distribution.metric)) { distribution.metric <- tolower(distribution.metric) supported.metrics <- c("conc", "ndcg", "map", "mrr") if (!is.element(distribution.metric, supported.metrics)) { stop("Metric '", distribution.metric, "' is not supported, use either 'conc', 'ndcg', 'map', or 'mrr'") } metric <- distribution.metric } else { warning("No metric specified, using 'ndcg'") metric <- "ndcg" # default distribution[["metric"]] <- metric } if (any(y<0)) { stop("targets for 'pairwise' should be non-negative") } if (is.element(metric, c("mrr", "map")) && (!all(is.element(y, 0:1)))) { stop("Metrics 'map' and 'mrr' require the response to be in {0,1}") } # Cut-off rank for metrics # Default of 0 means no cutoff max.rank <- 0 if (!is.null(distribution[["max.rank"]]) && distribution[["max.rank"]] > 0) { if (is.element(metric, c("ndcg", "mrr"))) { max.rank <- distribution[["max.rank"]] } else { stop("Parameter 'max.rank' cannot be specified for metric '", distribution.metric, "', only supported for 'ndcg' and 'mrr'") } } # We pass the cut-off rank to the C function as the last element in the Misc vector Misc <- c(group, max.rank) distribution.call.name <- sprintf("pairwise_%s", metric) } # close if (dist... == "pairwise" # create index upfront... subtract one for 0 based order x.order <- apply(x[1:nTrain,,drop=FALSE],2,order,na.last=FALSE)-1 x <- as.vector(data.matrix(x)) predF <- rep(0,length(y)) train.error <- rep(0,n.trees) valid.error <- rep(0,n.trees) oobag.improve <- rep(0,n.trees) if(is.null(var.monotone)) var.monotone <- rep(0,cCols) else if(length(var.monotone)!=cCols) { stop("Length of var.monotone != number of predictors") } else if(!all(is.element(var.monotone,-1:1))) { stop("var.monotone must be -1, 0, or 1") } fError <- FALSE gbm.obj <- .Call("gbm", Y=as.double(y), Offset=as.double(offset), X=as.double(x), X.order=as.integer(x.order), weights=as.double(w), Misc=as.double(Misc), cRows=as.integer(cRows), cCols=as.integer(cCols), var.type=as.integer(var.type), var.monotone=as.integer(var.monotone), distribution=as.character(distribution.call.name), n.trees=as.integer(n.trees), interaction.depth=as.integer(interaction.depth), n.minobsinnode=as.integer(n.minobsinnode), n.classes = as.integer(nClass), shrinkage=as.double(shrinkage), bag.fraction=as.double(bag.fraction), nTrain=as.integer(nTrain), fit.old=as.double(NA), n.cat.splits.old=as.integer(0), n.trees.old=as.integer(0), verbose=as.integer(verbose), PACKAGE = "gbm") names(gbm.obj) <- c("initF","fit","train.error","valid.error", "oobag.improve","trees","c.splits") gbm.obj$bag.fraction <- bag.fraction gbm.obj$distribution <- distribution gbm.obj$interaction.depth <- interaction.depth gbm.obj$n.minobsinnode <- n.minobsinnode gbm.obj$num.classes <- nClass gbm.obj$n.trees <- length(gbm.obj$trees) / nClass gbm.obj$nTrain <- nTrain gbm.obj$train.fraction <- train.fraction gbm.obj$response.name <- response.name gbm.obj$shrinkage <- shrinkage gbm.obj$var.levels <- var.levels gbm.obj$var.monotone <- var.monotone gbm.obj$var.names <- var.names gbm.obj$var.type <- var.type gbm.obj$verbose <- verbose gbm.obj$Terms <- NULL if(distribution$name == "coxph") { gbm.obj$fit[i.timeorder] <- gbm.obj$fit } ## If K-Classification is used then split the fit and tree components if (distribution$name == "multinomial"){ gbm.obj$fit <- matrix(gbm.obj$fit, ncol = nClass) dimnames(gbm.obj$fit)[[2]] <- classes gbm.obj$classes <- classes ## Also get the class estimators exp.f <- exp(gbm.obj$fit) denom <- matrix(rep(rowSums(exp.f), nClass), ncol = nClass) gbm.obj$estimator <- exp.f/denom } if(keep.data) { if(distribution$name == "coxph") { # put the observations back in order gbm.obj$data <- list(y=y,x=x,x.order=x.order,offset=offset,Misc=Misc,w=w, i.timeorder=i.timeorder) } else if ( distribution$name == "multinomial" ){ # Restore original order of the data new.idx <- order( new.idx ) gbm.obj$data <- list( y=as.vector(matrix(y, ncol=length(classes),byrow=FALSE)[new.idx,]), x=as.vector(matrix(x, ncol=length(var.names), byrow=FALSE)[new.idx,]), x.order=x.order, offset=offset[new.idx], Misc=Misc, w=w[new.idx] ) } else { gbm.obj$data <- list(y=y,x=x,x.order=x.order,offset=offset,Misc=Misc,w=w) } } else { gbm.obj$data <- NULL } class(gbm.obj) <- "gbm" return(gbm.obj) } gbm/R/gbm.R0000644000176200001440000001350512134211007012103 0ustar liggesusers.onAttach <- function(lib, pkg) { vers <- library(help=gbm)$info[[1]] vers <- vers[grep("Version:",vers)] vers <- rev(strsplit(vers," ")[[1]])[1] packageStartupMessage(paste("Loaded gbm",vers)) } gbm <- function(formula = formula(data), distribution = "bernoulli", data = list(), weights, var.monotone = NULL, n.trees = 100, interaction.depth = 1, n.minobsinnode = 10, shrinkage = 0.001, bag.fraction = 0.5, train.fraction = 1.0, cv.folds=0, keep.data = TRUE, verbose = 'CV', class.stratify.cv=NULL, n.cores=NULL){ theCall <- match.call() lVerbose <- if (!is.logical(verbose)) { FALSE } else { verbose } mf <- match.call(expand.dots = FALSE) m <- match(c("formula", "data", "weights", "offset"), names(mf), 0) mf <- mf[c(1, m)] mf$drop.unused.levels <- TRUE mf$na.action <- na.pass mf[[1]] <- as.name("model.frame") m <- mf mf <- eval(mf, parent.frame()) Terms <- attr(mf, "terms") y <- model.response(mf) if (missing(distribution)){ distribution <- guessDist(y) } else if (is.character(distribution)){ distribution <- list(name=distribution) } w <- model.weights(mf) offset <- model.offset(mf) var.names <- attributes(Terms)$term.labels x <- model.frame(terms(reformulate(var.names)), data, na.action=na.pass) # get the character name of the response variable response.name <- as.character(formula[[2]]) lVerbose <- if (!is.logical(verbose)) { FALSE } else { verbose } class.stratify.cv <- getStratify(class.stratify.cv, distribution) # groups (for pairwise distribution only) group <- NULL num.groups <- 0 # determine number of training instances if (distribution$name != "pairwise"){ nTrain <- floor(train.fraction * nrow(x)) } else { # distribution$name == "pairwise": # Sampling is by group, so we need to calculate them here distribution.group <- distribution[["group"]] if (is.null(distribution.group)) { stop("For pairwise regression, the distribution parameter must be a list with a parameter 'group' for the a list of the column names indicating groups, for example list(name=\"pairwise\",group=c(\"date\",\"session\",\"category\",\"keywords\")).") } # Check if group names are valid i <- match(distribution.group, colnames(data)) if (any(is.na(i))) { stop("Group column does not occur in data: ", distribution.group[is.na(i)]) } # Construct group index group <- factor(do.call(paste, c(data[,distribution.group, drop=FALSE], sep=":"))) # Check that weights are constant across groups if ((!missing(weights)) && (!is.null(weights))) { w.min <- tapply(w, INDEX=group, FUN=min) w.max <- tapply(w, INDEX=group, FUN=max) if (any(w.min != w.max)) { stop("For distribution 'pairwise', all instances for the same group must have the same weight") } # Normalize across groups w <- w * length(w.min) / sum(w.min) } # Shuffle groups, to remove bias when splitting into train/test set and/or CV folds perm.levels <- levels(group)[sample(1:nlevels(group))] group <- factor(group, levels=perm.levels) # The C function expects instances to be sorted by group and descending by target ord.group <- order(group, -y) group <- group[ord.group] y <- y[ord.group] x <- x[ord.group,,drop=FALSE] w <- w[ord.group] # Split into train and validation set, at group boundary num.groups.train <- max(1, round(train.fraction * nlevels(group))) # include all groups up to the num.groups.train nTrain <- max(which(group==levels(group)[num.groups.train])) Misc <- group } # close if(distribution$name=="coxph") ... cv.error <- NULL if(cv.folds>1) { cv.results <- gbmCrossVal(cv.folds, nTrain, n.cores, class.stratify.cv, data, x, y, offset, distribution, w, var.monotone, n.trees, interaction.depth, n.minobsinnode, shrinkage, bag.fraction, var.names, response.name, group) cv.error <- cv.results$error p <- cv.results$predictions } # Close if(cv.folds > 1 gbm.obj <- gbm.fit(x,y, offset = offset, distribution = distribution, w = w, var.monotone = var.monotone, n.trees = n.trees, interaction.depth = interaction.depth, n.minobsinnode = n.minobsinnode, shrinkage = shrinkage, bag.fraction = bag.fraction, nTrain = nTrain, keep.data = keep.data, verbose = lVerbose, var.names = var.names, response.name = response.name, group = group) gbm.obj$train.fraction <- train.fraction gbm.obj$Terms <- Terms gbm.obj$cv.error <- cv.error gbm.obj$cv.folds <- cv.folds gbm.obj$call <- theCall gbm.obj$m <- m if (cv.folds > 0){ gbm.obj$cv.fitted <- p } if (distribution$name == "pairwise") { # Data has been reordered according to queries. # We need to permute the fitted values to correspond # to the original order. gbm.obj$ord.group <- ord.group gbm.obj$fit <- gbm.obj$fit[order(ord.group)] } return(gbm.obj) } gbm/R/checks.R0000644000176200001440000000251012131277224012602 0ustar liggesuserscheckMissing <- function(x, y){ nms <- getVarNames(x) #### Check for NaNs in x and NAs in response j <- apply(x, 2, function(z) any(is.nan(z))) if(any(j)) { stop("Use NA for missing values. NaN found in predictor variables:", paste(nms[j],collapse=",")) } if(any(is.na(y))) stop("Missing values are not allowed in the response") invisible(NULL) } checkID <- function(id){ # Check for disallowed interaction.depth if(id < 1) { stop("interaction.depth must be at least 1.") } else if(id > 49) { stop("interaction.depth must be less than 50. You should also ask yourself why you want such large interaction terms. A value between 1 and 5 should be sufficient for most applications.") } invisible(id) } checkWeights <- function(w, n){ # Logical checks on weights if(length(w)==0) { w <- rep(1, n) } else if(any(w < 0)) stop("negative weights not allowed") w } checkOffset <- function(o, y){ # Check offset if(is.null(o) | all(o==0)) { o <- NA } else if(length(o) != length(y)) { stop("The length of offset does not equal the length of y.") } o } getVarNames <- function(x){ if(is.matrix(x)) { var.names <- colnames(x) } else if(is.data.frame(x)) { var.names <- names(x) } else { var.names <- paste("X",1:ncol(x),sep="") } var.names } gbm/R/calibrate.plot.R0000644000176200001440000000457612102666411014261 0ustar liggesusersquantile.rug <- function(x,prob=(0:10)/10,...) { quants <- quantile(x[!is.na(x)],prob=prob) if(length(unique(quants)) < length(prob)) { quants <- jitter(quants) } rug(quants,...) } calibrate.plot <- function(y,p, distribution="bernoulli", replace=TRUE, line.par=list(col="black"), shade.col="lightyellow", shade.density=NULL, rug.par=list(side=1), xlab="Predicted value", ylab="Observed average", xlim=NULL,ylim=NULL, knots=NULL,df=6, ...) { data <- data.frame(y=y,p=p) if(is.null(knots) && is.null(df)) stop("Either knots or df must be specified") if((df != round(df)) || (df<1)) stop("df must be a positive integer") if(distribution=="bernoulli") { family1 = binomial } else if(distribution=="poisson") { family1 = poisson } else { family1 = gaussian } gam1 <- glm(y~ns(p,df=df,knots=knots),data=data,family=family1) x <- seq(min(p),max(p),length=200) yy <- predict(gam1,newdata=data.frame(p=x),se.fit=TRUE,type="response") x <- x[!is.na(yy$fit)] yy$se.fit <- yy$se.fit[!is.na(yy$fit)] yy$fit <- yy$fit[!is.na(yy$fit)] if(!is.na(shade.col)) { se.lower <- yy$fit-2*yy$se.fit se.upper <- yy$fit+2*yy$se.fit if(distribution=="bernoulli") { se.lower[se.lower < 0] <- 0 se.upper[se.upper > 1] <- 1 } if(distribution=="poisson") { se.lower[se.lower < 0] <- 0 } if(is.null(xlim)) xlim <- range(se.lower,se.upper,x) if(is.null(ylim)) ylim <- range(se.lower,se.upper,x) } else { if(is.null(xlim)) xlim <- range(yy$fit,x) if(is.null(ylim)) ylim <- range(yy$fit,x) } if(replace) { plot(0,0, type="n", xlab=xlab,ylab=ylab, xlim=xlim,ylim=ylim, ...) } if(!is.na(shade.col)) { polygon(c(x,rev(x),x[1]), c(se.lower,rev(se.upper),se.lower[1]), col=shade.col, border=NA, density=shade.density) } lines(x,yy$fit,col=line.par$col) quantile.rug(p,side=rug.par$side) abline(0,1,col="red") } gbm/R/basehaz.gbm.R0000644000176200001440000000206512131261772013531 0ustar liggesusers# compute Breslow estimator of the baseline hazard function basehaz.gbm <- function(t,delta,f.x, t.eval=NULL, smooth=FALSE, cumulative=TRUE) { t.unique <- sort(unique(t[delta==1])) alpha <- length(t.unique) for(i in 1:length(t.unique)) { alpha[i] <- sum(t[delta==1]==t.unique[i])/ sum(exp(f.x[t>=t.unique[i]])) } if(!smooth && !cumulative) { if(!is.null(t.eval)) { stop("Cannot evaluate unsmoothed baseline hazard at t.eval.") } } else if(smooth && !cumulative) { lambda.smooth <- supsmu(t.unique,alpha) } else if(smooth && cumulative) { lambda.smooth <- supsmu(t.unique,cumsum(alpha)) } else # (!smooth && cumulative) - THE DEFAULT { lambda.smooth <- list(x=t.unique,y=cumsum(alpha)) } if(!is.null(t.eval)) { obj <- approx(lambda.smooth$x,lambda.smooth$y,xout=t.eval)$y } else { obj <- approx(lambda.smooth$x,lambda.smooth$y,xout=t)$y } return(obj) } gbm/NAMESPACE0000644000176200001440000000050412102666411012234 0ustar liggesusers# Export all names that don't start with "." exportPattern("^[^\\.]") useDynLib(gbm) importFrom(survival, Surv) # ns from splines is used in one of the examples importFrom(splines, ns, splineDesign) # xyplot is used, which means several functions internal # to lattice will also be used. Import the lot. import(lattice) gbm/LICENSE0000644000176200001440000000122112143202340012007 0ustar liggesusersGeneralized Boosted Regression package for the R environment Copyright (C) 2003 Greg Ridgeway This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Copies of the relevant licenses can be found at: http://www.r-project.org/Licenses/ gbm/DESCRIPTION0000644000176200001440000000173312143234277012536 0ustar liggesusersPackage: gbm Version: 2.1 Date: 2013-05-10 Title: Generalized Boosted Regression Models Author: Greg Ridgeway with contributions from others Maintainer: Harry Southworth Depends: R (>= 2.9.0), survival, lattice, splines, parallel Suggests: RUnit Description: This package implements extensions to Freund and Schapire's AdaBoost algorithm and Friedman's gradient boosting machine. Includes regression methods for least squares, absolute loss, t-distribution loss, quantile regression, logistic, multinomial logistic, Poisson, Cox proportional hazards partial likelihood, AdaBoost exponential loss, Huberized hinge loss, and Learning to Rank measures (LambdaMart). License: GPL (>= 2) | file LICENSE URL: http://code.google.com/p/gradientboostedmodels/ Packaged: 2013-05-10 18:01:11 UTC; harry NeedsCompilation: yes Repository: CRAN Date/Publication: 2013-05-10 20:13:19 gbm/CHANGES0000644000176200001440000003330512143200270012005 0ustar liggesusersChanges in version 2.1 - The cross-validation loop is now parallelized. The functions attempt to guess a sensible number of cores to use, or the user can specify how many through new argument n.cores. - A fair amount of code refactoring. - Added type='response' for predict when distribution='adaboost'. - Fixed a bug that caused offset not to be used if the first element of offset was 0. - Updated predict.gbm and plot.gbm to cope with objects created using gbm version 1.6. - Changed default value of verbose to 'CV'. gbm now defaults to letting the user know which block of CV folds it is running. If verbose=TRUE is specified, the final run of the model also prints its progress to screen as in earlier versions. - Fixed bug that caused predict to return wrong result when distribution == 'multinomial' and length(n.trees) > 1. - Fixed bug that caused n.trees to be wrong in relative.influence if no CV or validation set was used. - Relative influence was computed wrongly when distribution="multinomial". Fixed. - Cross-validation predictions now included in the output object. - Fixed bug in relative.influence that caused labels to be wrong when sort.=TRUE. - Modified interact.gbm to do additional sanity check, updated help file - Fixed bug in interact.gbm so that it now works for distribution="multinomial" - Modified predict.gbm to improve performance on large datasets Changes in version 2.0 Lots of new features added so it warrants a change to the first digit of the version number. Major changes: - Several new distributions are now available thanks to Harry Southworth and Daniel Edwards: multinomial and tdist. - New distribution 'pairwise' for Learning to Rank Applications (LambdaMART), including four different ranking measures, thanks to Stefan Schroedl. - The gbm package is now managed on R-Forge by Greg Ridgeway and Harry Southworth. Visit http://r-forge.r-project.org/projects/gbm/ to get the latest or to contribute to the package Minor changes: - the "quantile" distribution now handles weighted data - relative.influence changed to give names to the returned vector - Added print.gbm and show.gbm. These give basic summaries of the fitted model - Added support function and reconstructGBMdata() to facilitate reconstituting the data for certain plots and summaries - gbm was not using the weights when using cross-validation due to a bug. That's been fixed (Thanks to Trevor Hastie for catching this) - predict.gbm now tries to guess the number of trees, also defaults to using the training data if no newdata is given. - relative.influence has has 2 new arguments, scale. and sort. that default to FALSE. The returned vector now has names. - gbm now tries to guess what distribution you meant if you didn't specify. - gbm has a new argument, class.stratifiy.cv, to control if cross-validation is stratified by class with distribution is "bernoulli" or "multinomial". Defaults to TRUE for multinomial, FALSE for bernoulli. The purpose is to avoid unusable training sets. - gbm.perf now puts a vertical line at the best number of trees when method = "cv" or "test". Tries to guess what method you meant if you don't tell it. - .First.lib had a bug that would crash gbm if gbm was installed as a local library. Fixed. - plot.gbm has a new argument, type, defaulting to "link". For bernoulli, multinomial, poisson, "response" is allowed. - models with large interactions (>24) were using up all the terminal nodes in the stack. The stack has been increased to 101 nodes allowing interaction.depth up to 49. A more graceful error is now issued if interaction.depth exceeds 49. (Thanks to Tom Dietterich for catching this). - gbm now uses the R macro R_NaN in the C++ code rather than NAN, which would not compile on Sun OS. - If covariates marked missing values with NaN instead of NA, the model fit would not be consistent (Thanks to JR Lockwood for noting this) Changes in version 1.6 - Quantile regression is now available thanks to a contribution from Brian Kriegler. Use list(name="quantile",alpha=0.05) as the distribution parameter to construct a predictor of the 5% of the conditional distribution - gbm() now stores cv.folds in the returned gbm object - Added a normalize parameter to summary.gbm that allows one to choose whether or not to normalize the variable influence to sum to 100 or not - Corrected a minor bug in plot.gbm that put the wrong variable label on the x axis when plotting a numeric variable and a factor variable - the C function gbm_plot can now handle missing values. This does not effect the R function plot.gbm(), but it makes gbm_plot potentially more useful for computing partial dependence plots - mgcv is no longer a required package, but the splines package is needed for calibrate.plot() - minor changes for compatibility with R 2.6.0 (thanks to Seth Falcon) - corrected a bug in the cox model computation when all terminal nodes had exactly the minimum number of observations permitted, which caused gbm and R to crash ungracefully. This was likely to occur with small datasets (thanks to Brian Ring) - corrected a bug in Laplace that always made the terminal node predictions slightly larger than the median. Corrected again in a minor release due to a bug caught by Jon McAuliffe - corrected a bug in interact.gbm that caused it to crash for factors. Caught by David Carslaw - added a plot of cross-validated error to the plots generated by gbm.perf Changes in version 1.5 - gbm would fail if there was only one x. Now drop=FALSE is set in all data.frame subsetting (thanks to Gregg Keller for noticing this). - Corrected gbm.perf() to check if bag.fraction=1 and skips trying to create the OOB plots and estimates. - Corrected a typo in the vignette specifying the gradient for the Cox model. - Fixed the OOB-reps.R demo. For non-Gaussian cases it was maximizing the deviance rather than minimizing. - Increased the largest factor variable allowed from 256 levels to 1024 levels. gbm stops if any factor variable exceeds 1024. Will try to make this cleaner in the future. - predict.gbm now allows n.trees to be a vector and efficiently computes predictions for each indicated model. Avoids having to call predict.gbm several times for different choices of n.trees. - fixed a bug that occurred when using cross-validation for coxph. Was computing length(y) when y is a Surv object which return 2*N rather than N. This generated out-of-range indices for the training dataset. - Changed the method for extracting the name of the outcome variable to work around a change in terms.formula() when using "." in formulas. Changes in version 1.4 - The formula interface now allows for "-x" to indicate not including certain variables in the model fit. - Fixed the formula interface to allow offset(). The offset argument has now been removed from gbm(). - Added basehaz.gbm that computes the Breslow estimate of the baseline hazard. At a later stage this will be substituted with a call to survfit, which is much more general handling not only left-censored data. - OOB estimator is known to be conservative. A warning is now issued when using method="OOB" and there is no longer a default method for gbm.perf() - cv.folds now an option to gbm and method="cv" is an option for gbm.perf. Performs v-fold cross validation for estimating the optimal number of iterations - There is now a package vignette with details on the user options and the mathematics behind the gbm engine. Changes in version 1.3 - All likelihood based loss functions are now in terms of Deviance (-2*log likelihood). As a result, gbm always minimizes the loss. Previous versions minimized losses for some choices of distribution and maximized a likelihood for other choices. - Fixed the Poisson regression to avoid predicting +/- infinity which occurs when a terminal node has only observations with y=0. The largest predicted value is now +/-19, similar to what glm predicts for these extreme cases for linear Poisson regression. The shrinkage factor will be applied to the -19 predictions so it will take 1/shrinkage gbm iterations locating pure terminal nodes before gbm would actually return a predicted value of +/-19. - Introduces shrink.gbm.pred() that does a lasso-style variable selection Consider this function as still in an experimental phase. - Bug fix in plot.gbm - All calls to ISNAN now call ISNA (avoids using isnan) Changes in version 1.2 - fixed gbm.object help file and updated the function to check for missing values to the latest R standard. - gbm.plot now allows i.var to be the names of the variables to plot or the index of the variables used - gbm now requires "stats" package into which "modreg" has been merged - documentation for predict.gbm corrected Changes in version 1.1 - all calculations of loss functions now compute averages rather than totals. That is, all performance measures (text of progress, gbm.perf) now report average log-likelihood rather than total log-likelihood (e.g. mean squared error rather than sum of squared error). A slight exception applies to distribution="coxph". For these models the averaging pertains only to the uncensored observations. The denominator is sum(w[i]*delta[i]) rather than the usual sum(w[i]). - summary.gbm now has an experimental "method" argument. The default computes the relative influence as before. The option "method=permutation.test.gbm" performs a permutation test for the relative influence. Give it a try and let me know how it works. It currently is not implemented for "distribution=coxph". - added gbm.fit, a function that avoids the model.frame call, which is tragically slow with lots of variables. gbm is now just a formula/model.frame wrapper for the gbm.fit function. (based on a suggestion and code from Jim Garrett) - corrected a bug in the use of offsets. Now the user must pass the offset vector with the offset argument rather than in the formula. Previously, offsets were being used once as offsets and a second time as a predictor. - predict.gbm now has a single.tree option. When set to TRUE the function will return predictions from only that tree. The idea is that this may be useful for reweighting the trees using a post-model fit adjustment. - corrected a bug in CPoisson::BagImprovement that incorrectly computed the bagged estimate of improvement - corrected a bug for distribution="coxph" in gbm() and gbm.more(). If there was a single predictor the functions would drop the unused array dimension issuing an error. - corrected gbm() distribution="coxph" when train.fraction=1.0. The program would set two non-existant observations in the validation set and issue a warning. - if a predictor variable has no variation a warning (rather than an error) is now issued - updated the documentation for calibrate.plot to match the implementation - changed the some of the default values in gbm(), bag.fraction=0.5, train.fraction=1.0, and shrinkage=0.001. - corrected a bug in predict.gbm. The C code producing the predictions would go into an infinite loop if predicting an observation with a level of a categorical variable not seen in the training dataset. Now the routine uses the missing value prediction. (Feng Zeng) - added a "type" parameter to predict.gbm. The default ("link") is the same as before, predictions are on the canonical scale (gradient scale). The new option ("response") converts back the same scale as the outcome (probability for bernoulli, mean for gaussian, etc.). - gbm and gbm.more now have verbose options which can be set to FALSE to suppress the progress and performance indicators. (several users requested this nice feature) - gbm.perf no longer prints out verbose information about the best iteration estimate. It simply returns the estimate and creates the plots if requested. - ISNAN, since R 1.8.0, R.h changed declarations for ISNAN(). These changes broke gbm 1.0. I added the following code to buildinfo.h to fix this #ifdef IEEE_754 #undef ISNAN #define ISNAN(x) R_IsNaNorNA(x) #endif seems to work now but I'll look for a more elegant solution. Changes in version 0.8 - Additional documentation about the loss functions, graphics, and methods is now available with the package - Fixed the initial value for the adaboost exponential loss. Prior to version 0.8 the initial value was 0.0, now half the baseline log-odds - Changes in some headers and #define's to compile under gcc 3.2 (Brian Ripley) Changes in version 0.7 - gbm.perf, the argument named best.iter.calc has been renamed "method" for greater simplicity - all entries in the design matrix are now coerced to doubles (Thanks to Bonnie Ghosh) - now checks that all predictors are either numeric, ordinal, or factor - summary.gbm now reports the correct relative influence when some variables do not enter the model. (Thanks to Hugh Chipman) - renamed several #define'd variables in buildinfo.h so they do not conflict with standard winerror.h names. Planned future changes 1. Add weighted median functionality to Laplace 2. Automate the fitting process, ie, selecting shrinkage and number of iterations 3. Add overlay factor*continuous predictor plot as an option rather than lattice plots 4. Add multinomial and ordered logistic regression procedures Thanks to RAND for sponsoring the development of this software through statistical methods funding. Kurt Hornik, Brian Ripley, and Jan De Leeuw for helping me get gbm up to the R standard and into CRAN. Dan McCaffrey for testing and evangelizing the utility of this program. Bonnie Ghosh for finding bugs. Arnab Mukherji for testing and suggesting new features. Daniela Golinelli for finding bugs and marrying me. Andrew Morral for suggesting improvements and finding new applications of the method in the evaluation of drug treatment programs. Katrin Hambarsoomians for finding bugs. Hugh Chipman for finding bugs. Jim Garrett for many suggestions and contributions. gbm/.Rinstignore0000644000176200001440000000026212102666411013322 0ustar liggesusersinst/doc/gbm.tex inst/doc/srcltx.sty inst/doc/shrinkage-v-iterations.eps inst/doc/shrinkage-v-iterations.pdf inst/doc/oobperf2.eps inst/doc/oobperf2.pdf inst/doc/shrinkageplot.R