NLP/0000755000175100001440000000000014717314035010726 5ustar hornikusersNLP/MD50000644000175100001440000000526414717314035011245 0ustar hornikusersc10d489e7ced35239bc5f164cd3211ec *DESCRIPTION 9552cc52c3c42eecc77e08b6a019973b *NAMESPACE 68230f5b697cb282615c5f24544fb720 *R/annotate.R f79c725657649563c8168706c2ff3a25 *R/annotation.R 3b044be645e56951fbf4e5400333f33d *R/annotators.R 85c822b7ddd467ecfdf7e1fd5e3b1e24 *R/aptd.R cb5569e546faa23d9b4d487d8ca3bc73 *R/cleannlp.R 4d137191ce73c09e783ce10354878782 *R/conll.R 6cc85b24b8c291393929609848b7c018 *R/conllu.R 628ed6e68ce4cd8143e831b89bdfcb33 *R/datetime.R 8719adb9925c8f93aab0b0d86c3dcfe6 *R/generics.R ec80ee5ba8abe5a495770a8ba47a2a07 *R/language.R 8379bdef18713b0ecb99b8ac396450a0 *R/ngram.R c6495d9526f33fda1de5b5e3d7279561 *R/spacyr.R 58bcbd30d5d9a8e395a4fda8d478a767 *R/span.R 35d690b3ed8f2afe80711a6d651678a2 *R/string.R 5b7aef625f3020a507fd8a5f6a62ba75 *R/sysdata.rda 5de59c9b5892f7d8d02528145c59204a *R/tnt.R 765811bd3df5707c9cb346bcc90e03a5 *R/tokenize.R ff6b88b533ee9947d9678898b585b4e1 *R/tree.R 4b2706888e9753775a53ff49a970e36f *R/ttd.R b311a3b5d8975bf6668c2d291285e85c *R/udpipe.R 6af7bebbf00ecd79f85c99187bc7cefa *R/utils.R 0f724ef4a983691cf0685e3d565f62c5 *R/viewers.R c1f351e3ab36cc882b6ae7626f423533 *R/wordlist.R 76161b65639451c966be75488458b3c3 *build/partial.rdb c5b17a0d86d6a55cd79435ecdcb2250b *inst/po/en@quot/LC_MESSAGES/R-NLP.mo 1e811cdef1cdf887c80a60f01c86ac77 *inst/texts/spanish.conllu 708dff54d1cbd38c26083b534ceec207 *inst/texts/stanford.rds 1b0c1fd84503835a5879c263bdf56c2d *man/AnnotatedPlainTextDocument.Rd 53f4422eefea39e1d9d23639393ccae6 *man/Annotation.Rd 6e4f79c7f155ed43628104851db52cb5 *man/Annotator.Rd 66c69ec92efc8911236ec939335fc4e4 *man/CoNLLTextDocument.Rd 8b86c29eace3169bf3491bd1ea81b349 *man/CoNLLUTextDocument.Rd f153bffba3e2259288df2f51564e91c1 *man/Span.Rd 97a707a3526f6a0f2166a2c5f56bf6de *man/String.Rd 68149e3bfd8b13393fdba54447c9fe76 *man/TaggedTextDocument.Rd 82a5eb65b8fff6ecb1bda2818873dec5 *man/Tagged_Token.Rd bd0514ec3a9f5f18a422c7e5e7113a05 *man/TextDocument.Rd 424e17206654615bf8686fe16939f363 *man/Tokenizer.Rd cf3d71312c15619fb8bcfa90213c8d26 *man/Tree.Rd 87f52a9560fccb3f7d8591d2f90939b0 *man/WordListDocument.Rd 029f8becf569972e5809e7e5917f7314 *man/annotate.Rd cd795f9471a96beac4c836b1d6ea7d19 *man/annotations_in_spans.Rd 4f1067404a36f11ac97cbb38f9e4acde *man/annotators.Rd 5c3f85fbed985be1eba2a91efb1ac76e *man/datetime.Rd b3c864a9f0355de71a82f94c86664026 *man/features.Rd b95f45ebfcda3d640a3045daf4bff61a *man/generics.Rd d486e25e557c2678687fa461a6a4baa2 *man/language.Rd 1c83d8d4b6d39bd74ce34c8c5f7f3516 *man/ngrams.Rd 54cbee6f9391b43ad700d67ae2e909ef *man/tagsets.Rd cde03cad007456c9cbc598543e16e12b *man/tokenizers.Rd 02e1fc6afdcc13025e034272636e061d *man/utils.Rd b0716fb9f48a3b7ffa824dd8fa23b38a *man/viewers.Rd 4d1dfef1a602b0b2cd87100a04c1da7e *po/R-NLP.pot NLP/po/0000755000175100001440000000000013143661406011343 5ustar hornikusersNLP/po/R-NLP.pot0000644000175100001440000000330313143661406012716 0ustar hornikusersmsgid "" msgstr "" "Project-Id-Version: NLP 0.1-10.1\n" "POT-Creation-Date: 2017-08-12 22:20\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=CHARSET\n" "Content-Transfer-Encoding: 8bit\n" msgid "arguments must have the same length" msgstr "" msgid "invalid element name" msgstr "" msgid "replacement must have the same length as object" msgstr "" msgid "'subset' must be logical" msgstr "" msgid "Annotators must have formals 's' and 'a'." msgstr "" msgid "Invalid result from underlying paragraph tokenizer." msgstr "" msgid "Invalid result from underlying sentence tokenizer." msgstr "" msgid "no sentence token annotations found" msgstr "" msgid "Invalid result from underlying word tokenizer." msgstr "" msgid "no word token annotations found" msgstr "" msgid "Invalid result from underlying POS tagger." msgstr "" msgid "Invalid result from underlying name finder." msgstr "" msgid "Invalid result from underlying chunker." msgstr "" msgid "all pipeline elements must be annotator objects" msgstr "" msgid "argument 'annotations' must give a positive number of Annotation objects" msgstr "" msgid "content modification is not possible for AnnotatedPlainTextDocument objects" msgstr "" msgid "argument 'x' must be an AnnotatedPlainTextDocument object" msgstr "" msgid "Cannot determine position of '%s'" msgstr "" msgid "Invalid entries:" msgstr "" msgid "Invalid language tag(s):" msgstr "" msgid "Invalid operands." msgstr "" msgid "'%s' not defined for \"Span\" objects" msgstr "" msgid "Need a non-empty string." msgstr "" msgid "end of string" msgstr "" NLP/R/0000755000175100001440000000000014717071006011125 5ustar hornikusersNLP/R/conllu.R0000644000175100001440000001066114660340221012543 0ustar hornikusers## See . read_CoNNLU <- function(con) { lines <- readLines(con, encoding = "UTF-8") ind_b <- lines == "" ind_c <- startsWith(lines, "#") ind <- !ind_b & !ind_c ## Now using scan(text = lines[ind]) to read in the records is ## possible but unbearably slow for large documents: instead, try to ## proceed "directly". ## records <- strsplit(lines[ind], "\t", fixed = TRUE) ## records <- as.data.frame(do.call(rbind, records), ## stringsAsFactors = FALSE) ## names(records) <- ## c("ID", "FORM", "LEMMA", "UPOSTAG", "XPOSTAG", "FEATS", "HEAD", ## "DEPREL", "DEPS", "MISC") ## sent <- cumsum(ind_b) + 1L ## tab <- cbind(data.frame(sent = sent), ## as.data.frame(do.call(cbind, records), ## stringsAsFactors = FALSE))[ind , ] pos <- which(ind_b) sent <- rep.int(seq_along(pos), diff(c(0L, pos))) tab <- cbind(data.frame(sent[ind]), as.data.frame(do.call(rbind, strsplit(lines[ind], "\t", fixed = TRUE)), stringsAsFactors = FALSE)) names(tab) <- c("sent", "ID", "FORM", "LEMMA", "UPOSTAG", "XPOSTAG", "FEATS", "HEAD", "DEPREL", "DEPS", "MISC") comments <- split(lines[ind_c], sent[ind_c]) attr(tab, "comments") <- comments ## From CoNLL-U v2 on 'sent_id' and 'text' comments are compulsory ## for every sentence. Be defensive and add these as attributes ## only if always available. ind <- startsWith(lines, "# sent_id =") if(all(diff(sent[ind]) == 1)) attr(tab, "sent_id") <- sub("^# sent_id = *", "", lines[ind]) ind <- startsWith(lines, "# text =") if(all(diff(sent[ind]) == 1)) attr(tab, "text") <- sub("^# text = *", "", lines[ind]) class(tab) <- c("CoNNLU_Annotation", "data.frame") tab } CoNLLUTextDocument <- function(con, meta = list(), text = NULL) { tab <- read_CoNNLU(con) doc <- list(content = tab, meta = meta, text = if(is.null(text)) attr(tab, "text") else text) class(doc) <- c("CoNLLUTextDocument", "TextDocument") doc } ## CoNLL-U allows to represent both words and (multiword) tokens, which ## both have FORM entries, with ID single integers for words and integer ## ranges for the tokens. We provide the tokens with as.character() and ## the words with the other "viewers", in particular, words(). format.CoNLLUTextDocument <- function(x, ...) { y <- x$content ind <- !grepl("[.-]", y$ID) c(.format_TextDocument(x), sprintf("Content: words: %d, sents: %d", sum(ind), y[NROW(y), "sent"])) } content.CoNLLUTextDocument <- function(x) x$content as.character.CoNLLUTextDocument <- function(x, ...) { if(!is.null(y <- x$text)) y else otoks(x) } ## ## All methods below could also be provided for CoNNLU_Annotation ## objects. ## otoks.CoNLLUTextDocument <- function(x, ...) { y <- x$content ## Drop empty nodes. y <- y[!grepl(".", y$ID, fixed = TRUE), ] ## Expand ranges to determine forms to be skipped for tokens. ind <- grepl("-", y$ID, fixed = TRUE) ids <- y$ID[ind] skip <- Map(seq, sub("-.*", "", ids), sub(".*-", "", ids)) skip <- paste(rep.int(y$sent[ind], lengths(skip)), unlist(skip), sep = ".") y$FORM[is.na(match(paste(y$sent, y$ID, sep = "."), skip))] } words.CoNLLUTextDocument <- function(x, ...) { ind <- !grepl("[.-]", x$content$ID) x$content$FORM[ind] } sents.CoNLLUTextDocument <- function(x, ...) { ind <- !grepl("[.-]", x$content$ID) split(x$content$FORM[ind], x$content$sent[ind]) } tagged_words.CoNLLUTextDocument <- function(x, which = c("UPOSTAG", "XPOSTAG"), ...) { which <- match.arg(which) ind <- !grepl("[.-]", x$content$ID) Tagged_Token(x$content$FORM[ind], x$content[[which]][ind]) } tagged_sents.CoNLLUTextDocument <- function(x, which = c("UPOSTAG", "XPOSTAG"), ...) { which <- match.arg(which) ind <- !grepl("[.-]", x$content$ID) split(Tagged_Token(x$content$FORM[ind], x$content[[which]][ind]), x$content$sent[ind]) } NLP/R/aptd.R0000644000175100001440000001544114654077116012215 0ustar hornikusersAnnotatedPlainTextDocument <- function(s, a, meta = list()) { s <- as.String(s) ## Be nice. a <- as.Annotation(a) doc <- list(content = s, annotation = a, meta = meta) class(doc) <- c("AnnotatedPlainTextDocument", "PlainTextDocument", "TextDocument") doc } format.AnnotatedPlainTextDocument <- function(x, ...) { c(.format_TextDocument(x), sprintf("Annotations: length: %s", length(x$annotation)), sprintf("Content: chars: %d", nchar(x$content))) } content.AnnotatedPlainTextDocument <- function(x) x$content `content<-.AnnotatedPlainTextDocument` <- function(x, value) stop("content modification is not possible for AnnotatedPlainTextDocument objects") ## meta.AnnotatedPlainTextDocument <- ## function(x, tag = NULL, ...) ## if(is.null(tag)) x$meta else x$meta[[tag]] ## `meta<-.AnnotatedPlainTextDocument` <- ## function(x, tag = NULL, ..., value) ## { ## if(is.null(tag)) ## x$meta <- value ## else ## x$meta[[tag]] <- value ## x ## } as.character.AnnotatedPlainTextDocument <- function(x, ...) x$content annotation <- function(x) { if(!inherits(x, "AnnotatedPlainTextDocument")) stop("argument 'x' must be an AnnotatedPlainTextDocument object") x$annotation } ## NLTK style functions for high level access words.AnnotatedPlainTextDocument <- function(x, ...) { if(!inherits(x, "AnnotatedPlainTextDocument")) stop("argument 'x' must be an AnnotatedPlainTextDocument object") s <- x$content a <- x$annotation a <- a[a$type == "word"] .words_from_annotation_and_text(a, s) } .words_from_annotation_and_text <- function(a, s) { a <- a[a$type == "word"] w <- s[a] ## Use a word feature where available. f <- lapply(a$features, `[[`, "word") i <- (lengths(f) > 0L) w[i] <- unlist(f[i]) w } sents.AnnotatedPlainTextDocument <- function(x, ...) { if(!inherits(x, "AnnotatedPlainTextDocument")) stop("argument 'x' must be an AnnotatedPlainTextDocument object") s <- x$content a <- x$annotation .sents_from_annotation_and_text(a, s) } .sents_from_annotation_and_text <- function(a, s) { a <- annotations_in_spans(a[a$type == "word"], a[a$type == "sentence"]) lapply(a, .words_from_annotation_and_text, s) } paras.AnnotatedPlainTextDocument <- function(x, ...) { if(!inherits(x, "AnnotatedPlainTextDocument")) stop("argument 'x' must be an AnnotatedPlainTextDocument object") s <- x$content a <- x$annotation lapply(annotations_in_spans(a, a[a$type == "paragraph"]), .sents_from_annotation_and_text, s) } tagged_words.AnnotatedPlainTextDocument <- function(x, map = NULL, ...) { if(!inherits(x, "AnnotatedPlainTextDocument")) stop("argument 'x' must be an AnnotatedPlainTextDocument object") s <- x$content a <- x$annotation a <- a[a$type == "word"] if(!is.null(map)) a <- .map_POS_tags_Annotation(a, map) .tagged_words_from_annotation_and_text(a, s) } .tagged_words_from_annotation_and_text <- function(a, s) { pos <- .annotation_features_with_template(a, "POS") Tagged_Token(.words_from_annotation_and_text(a, s), pos) } tagged_sents.AnnotatedPlainTextDocument <- function(x, map = NULL, ...) { if(!inherits(x, "AnnotatedPlainTextDocument")) stop("argument 'x' must be an AnnotatedPlainTextDocument object") s <- x$content a <- x$annotation if(!is.null(map)) a <- .map_POS_tags_Annotation(a, map) .tagged_sents_from_annotation_and_text(a, s) } .tagged_sents_from_annotation_and_text <- function(a, s) { lapply(annotations_in_spans(a[a$type == "word"], a[a$type == "sentence"]), .tagged_words_from_annotation_and_text, s) } tagged_paras.AnnotatedPlainTextDocument <- function(x, map = NULL, ...) { if(!inherits(x, "AnnotatedPlainTextDocument")) stop("argument 'x' must be an AnnotatedPlainTextDocument object") s <- x$content a <- x$annotation if(!is.null(map)) a <- .map_POS_tags_Annotation(a, map) lapply(annotations_in_spans(a, a[a$type == "paragraph"]), .tagged_sents_from_annotation_and_text, s) } parsed_sents.AnnotatedPlainTextDocument <- function(x, ...) { if(!inherits(x, "AnnotatedPlainTextDocument")) stop("argument 'x' must be an AnnotatedPlainTextDocument object") a <- x$annotation .parsed_sents_from_annotation(a) } .parsed_sents_from_annotation <- function(a) { a <- a[a$type == "sentence"] ptexts <- .annotation_features_with_template(a, "parse") lapply(ptexts, Tree_parse) } parsed_paras.AnnotatedPlainTextDocument <- function(x, ...) { if(!inherits(x, "AnnotatedPlainTextDocument")) stop("argument 'x' must be an AnnotatedPlainTextDocument object") a <- x$annotation lapply(annotations_in_spans(a, a[a$type == "paragraph"]), .parsed_sents_from_annotation) } chunked_sents.AnnotatedPlainTextDocument <- function(x, ...) { if(!inherits(x, "AnnotatedPlainTextDocument")) stop("argument 'x' must be an AnnotatedPlainTextDocument object") s <- x$content a <- x$annotation ## Require annotations with POS and chunk_tag features, as obtained ## e.g. with the Apache OpenNLP POS tag and chunk annotators. We ## could alternatively use annotations with parse features and ## flatten the parse trees. lapply(annotations_in_spans(a[a$type == "word"], a[a$type == "sentence"]), function(a) { ptags <- .annotation_features_with_template(a, "POS") ctags <- .annotation_features_with_template(a, "chunk_tag") words <- .words_from_annotation_and_text(a, s) chunk_tree_from_chunk_info(words, ptags, ctags) }) } otoks.AnnotatedPlainTextDocument <- function(x, ...) { if (!inherits(x, "AnnotatedPlainTextDocument")) stop("argument 'x' must be an AnnotatedPlainTextDocument object") s <- x$content a <- x$annotation a <- a[a$type == "word"] a <- a[!duplicated(sprintf("%d-%d", a$start, a$end))] s[a] } .map_POS_tags_Annotation <- function(x, map) { map <- POS_tag_mapper(map, meta(x, "POS_tagset")) x$features <- lapply(x$features, function(e) { if(!is.null(pos <- e$POS)) e$POS <- map(pos) e }) x } .annotation_features_with_template <- function(x, tag, FUN.VALUE = "") { tryCatch(vapply(x$features, function(e) e[[tag]], FUN.VALUE), error = function(e) { stop(sprintf("incomplete or invalid '%s' features", tag), call. = FALSE) }) } NLP/R/wordlist.R0000644000175100001440000000177612521153434013127 0ustar hornikusersWordListDocument <- function(con, encoding = "unknown", meta = list()) { words <- readLines(con, encoding = encoding, warn = FALSE) doc <- list(content = words, meta = meta) class(doc) <- c("WordListDocument", "TextDocument") doc } format.WordListDocument <- function(x, ...) c(.format_TextDocument(x), sprintf("Content: words: %d", length(x$content))) ## print.WordListDocument <- ## function(x, ...) ## { ## writeLines(sprintf("<>", ## length(x$content))) ## invisible(x) ## } content.WordListDocument <- function(x) x$content ## meta.WordListDocument <- ## function(x, tag = NULL, ...) ## if(is.null(tag)) x$meta else x$meta[[tag]] ## `meta<-.WordListDocument` <- ## function(x, tag = NULL, ..., value) ## { ## if(is.null(tag)) ## x$meta <- value ## else ## x$meta[[tag]] <- value ## x ## } as.character.WordListDocument <- words.WordListDocument <- function(x, ...) x$content NLP/R/generics.R0000644000175100001440000000040012314326274013042 0ustar hornikuserscontent <- function(x) UseMethod("content", x) `content<-` <- function(x, value) UseMethod("content<-", x) meta <- function(x, tag = NULL, ...) UseMethod("meta", x) `meta<-` <- function(x, tag = NULL, ..., value) UseMethod("meta<-", x) NLP/R/language.R0000644000175100001440000002336114717071006013040 0ustar hornikusersparse_IETF_language_tag <- function(x, expand = FALSE, strict = TRUE) { n <- length(x) y <- rep.int(list(character()), n) names(y) <- x pos <- seq_along(x) ## How nice should we be? ## Allow for empty or missing elements ... ind <- which(!is.na(x) & nzchar(x)) pos <- pos[ind] x <- x[ind] ## See . ## Language tags can be of the form (in ABNF, see ## ): ## langtag / privateuse / grandfathered ## where ## privateuse = ("x"/"X") 1*("-" (1*8alphanum)) ## grandfathered = 1*3ALPHA 1*2("-" (2*8alphanum)) re_privateuse <- "[xX]((-[[:alnum:]]{1,8}){1,})" ## Grandfathered tags must really be determined by exact matching. ind <- !is.na(match(x, IANA_language_subtag_registry_grandfathered_table$Tag)) if(any(ind)) { y[pos[ind]] <- as.list(sprintf("Grandfathered=%s", x[ind])) ind <- which(!ind) pos <- pos[ind] x <- x[ind] } if(length(pos)) { pat <- sprintf("^%s$", re_privateuse) ind <- grepl(pat, x, perl = TRUE) if(any(ind)) { y[pos[ind]] <- as.list(sprintf("Privateuse=%s", substring(x[ind], 3L))) ind <- which(!ind) pos <- pos[ind] x <- x[ind] } } ## Now for the real thing. ## Remaining tags should now be as follows: ## (language ## ["-" script] ## ["-" region] ## *(["-" variant]) ## *(["-" extension]) ## ["-" privateuse] ## where ## language = (2*3ALPHA [-extlang]) ; shortest ISO 639 code ## / 4ALPHA ; reserved for future use ## / 5*8ALPHA ; registered language subtag ## extlang = *3("-" 3*ALPHA) ; reserved for future use ## script = 4ALPHA ; ISO 15924 code ## region = 2ALPHA ; ISO 3166 code ## / 3DIGIT ; UN M.49 code ## variant = 5*8alphanum ; registered variants ## / (DIGIT 3alphanum) ## extension = singleton 1*("-" (2*8alphanum)) ## singleton = %x41-57 / %x59-5A / %x61-77 / %x79-7A / DIGIT ## ; "a"-"w" / "y"-"z" / "A"-"W" / "Y"-"Z" / "0"-"9" ## We handle language/extlang a bit differently (more generously). re_extlang <- "[[:alpha:]]{3}" re_language <- sprintf("[[:alpha:]]{2,3}(-%s){0,3}|[[:alpha:]]{4,8}", re_extlang) re_script <- "[[:alpha:]]{4}" re_region <- "[[:alpha:]]{2}|[[:digit:]]{3}" re_variant <- "[[:alnum:]]{5,8}|[[:digit:]][[:alnum:]]{3}" re_singleton <- "[abcdefghijklmnopqrstuvwyzABCDEFGHIJKLMNOPQRSTUVWYZ0123456789]" re_extension <- sprintf("(%s)(-[[:alnum:]]{2,8}){1,}", re_singleton) bad <- integer() if(length(pos)) { pat <- sprintf("^(%s)(-.*|$)", re_language) ind <- grepl(pat, x, perl = TRUE) bad <- pos[!ind] pos <- pos[ind] x <- x[ind] y[pos] <- lapply(strsplit(sub(pat, "\\1", x, perl = TRUE), "-", fixed = TRUE), function(e) { c(sprintf("Language=%s", e[1L]), sprintf("Extension=%s", e[-1L])) }) x <- sub(pat, "\\3", x, perl = TRUE) ind <- which(nzchar(x)) pos <- pos[ind] x <- x[ind] } if(length(pos)) { repeat { ## Use a loop so that we can finally stop when done. ## Script. pat <- sprintf("^-(%s)(-.*|$)", re_script) if(any(ind <- grepl(pat, x, perl = TRUE))) { y[pos[ind]] <- Map(c, y[pos[ind]], sprintf("Script=%s", sub(pat, "\\1", x[ind], perl = TRUE))) x[ind] <- sub(pat, "\\2", x[ind], perl = TRUE) ind <- which(nzchar(x)) pos <- pos[ind] x <- x[ind] if(!length(x)) break } ## Region. pat <- sprintf("^-(%s)(-.*|$)", re_region) if(any(ind <- grepl(pat, x, perl = TRUE))) { y[pos[ind]] <- Map(c, y[pos[ind]], sprintf("Region=%s", sub(pat, "\\1", x[ind], perl = TRUE))) x[ind] <- sub(pat, "\\2", x[ind], perl = TRUE) ind <- which(nzchar(x)) pos <- pos[ind] x <- x[ind] if(!length(x)) break } ## Variant(s). pat <- sprintf("^-(%s)(-.*|$)", re_variant) while(any(ind <- grepl(pat, x, perl = TRUE))) { y[pos[ind]] <- Map(c, y[pos[ind]], sprintf("Variant=%s", sub(pat, "\\1", x[ind], perl = TRUE))) x[ind] <- sub(pat, "\\2", x[ind], perl = TRUE) ind <- which(nzchar(x)) pos <- pos[ind] x <- x[ind] } if(!length(x)) break ## Extension(s). pat <- sprintf("^-%s(-.*|$)", re_extension) while(any(ind <- grepl(pat, x, perl = TRUE))) { ## ## We keep the singleton prefix: this could be used in ## expansions of registered extensions: currently, ## BCP 47 Extension U ## BCP 47 Extension T y[pos[ind]] <- Map(c, y[pos[ind]], sprintf("Extension=%s", sub(pat, "\\1\\2", x[ind], perl = TRUE))) ## x[ind] <- sub(pat, "\\3", x[ind], perl = TRUE) ind <- which(nzchar(x)) pos <- pos[ind] x <- x[ind] } if(!length(x)) break ## Private use. pat <- sprintf("^-%s(-.*|$)", re_privateuse) if(any(ind <- grepl(pat, x, perl = TRUE))) { y[pos[ind]] <- Map(c, y[pos[ind]], sprintf("Privateuse=%s", substring(sub(pat, "\\1", x[ind], perl = TRUE), 2L))) x[ind] <- sub(pat, "\\4", x[ind], perl = TRUE) } break } } ## Be a nuisance: singletons for extensions must not be duplicated. ind <- vapply(y, function(e) { e <- grep("^Extension=", e, value = TRUE) if(!length(e)) return(FALSE) any(duplicated(sub("^Extension=(.).*", "\\1", e))) }, NA) if(any(ind)) bad <- c(bad, which(ind)) if(any(ind <- nzchar(x))) { bad <- c(bad, pos[ind]) } if(length(bad) && strict) { stop("Invalid language tag(s):", paste("\n ", sQuote(names(y)[bad]), collapse = " "), call. = FALSE) } if(!expand) return(y) n <- lengths(y) i <- (n > 0L) x <- tolower(unlist(y, use.names = FALSE)) p <- match(x, IANA_language_subtag_registry$Index) z <- IANA_language_subtag_registry$Description[p] ## Special case private use ranges. if(!all(lengths(z))) { p <- match(x, IANA_language_subtag_registry_private_use_index_table) z[p > 0L | grepl("^privateuse=", x)] <- "Private use" } z <- Map(`names<-`, split(z, rep.int(seq_along(y), n)), y[i]) y[i] <- z y } get_IANA_language_subtag_registry <- function(con = "https://www.iana.org/assignments/language-subtag-registry") { ## This is a collection of records in tag-value format, but ## unfortunately separated by '%%' lines rather than empty lines, so ## we cannot use read.dcf() directly. Let us keep things simple: ## extract the records, write them out as DCF, and call read.dcf(). lines <- readLines(con) ## The first line is something like ## File-Date: 2009-03-13 ## which we drop for reading the records. fdate <- sub(".*: *", "", lines[1L]) pos <- grep("^%%", lines) lines[c(seq_len(pos[1L]), pos[-1L])] <- "" tcon <- textConnection(lines, encoding = "UTF-8") on.exit(close(tcon)) db <- read.dcf(tcon, all = TRUE) ## Add index for lookups. subtag <- db$Subtag db$Index <- tolower(sprintf("%s=%s", db$Type, ifelse(is.na(subtag), db$Tag, subtag))) db$Type <- factor(db$Type) attr(db, "File_Date") <- fdate db } IANA_language_subtag_registry_language_private_use_subtags <- outer(letters[1L : 20L], letters, function(u, v) sprintf("q%s%s", u, v)) IANA_language_subtag_registry_script_private_use_subtags <- outer(c("a", "b"), letters[1L : 24L], function(u, v) sprintf("Qa%s%s", u, v)) IANA_language_subtag_registry_region_private_use_subtags <- c(sprintf("Q%s", LETTERS[13L : 26L]), sprintf("X%s", LETTERS)) IANA_language_subtag_registry_private_use_index_table <- tolower(c(sprintf("Language=%s", IANA_language_subtag_registry_language_private_use_subtags), sprintf("Script=%s", IANA_language_subtag_registry_script_private_use_subtags), sprintf("Region=%s", IANA_language_subtag_registry_region_private_use_subtags))) NLP/R/annotation.R0000644000175100001440000002631614144531065013432 0ustar hornikusers## Annotations. ## Conceptually, a single annotation is a quintuple with "slots" id, ## type, start, end and features, and our Annotation objects are ## sequences (to allow positional access) of annotations, i.e., sequence ## of such quintuples. ## The implementation actually uses a "quintuple" (named list of length ## five) with slots giving the respective sequences of slot values. ## The feature slot of a single annotation is a feature map which we ## represent as named lists (at least for now also allowing NULL for an ## empty feature map), hence the sequence of feature values is a list of ## named lists. ## Subscripting via [ extracts subsets of annotations. ## Subscripting via $ extracts one slot value sequence. ## As Annotation objects have all slots of Span objects, we take them to ## have class "Annotation" and also inherit from class "Span". ## We allow for ids to be missing, and Annotation(id = NULL) creates ## missing ids as needed. Annotation_classes <- c("Annotation", "Span") Annotation_slot_names <- c("id", "type", "start", "end", "features") Annotation <- function(id = NULL, type = NULL, start, end, features = NULL, meta = list()) { if(nargs() == 0L) { ## Could also provide default values (e.g., NULL) for all ## arguments ... return(.Annotation_from_args(integer(), character(), integer(), integer(), list(), meta)) } start <- as.integer(start) end <- as.integer(end) n <- length(start) id <- if(is.null(id)) rep.int(NA_integer_, n) else as.integer(id) type <- if(is.null(type)) rep.int(NA_character_, n) else as.character(type) features <- if(is.null(features)) rep.int(list(list()), n) else lapply(features, as.list) ## ## Should perhaps check that all elements of 'features' are named or ## empty lists. ## .Annotation_from_args(id, type, start, end, features, meta) } .Annotation_from_args <- function(id, type, start, end, features, meta) { x <- list(id, type, start, end, features) if(any(diff(lengths(x)) != 0L)) stop("arguments must have the same length") names(x) <- Annotation_slot_names .Annotation_from_list_and_meta(x, meta) } .Annotation_from_list_and_meta <- function(x, meta) { class(x) <- Annotation_classes attr(x, "meta") <- meta x } as.Annotation <- function(x, ...) UseMethod("as.Annotation") as.Annotation.Annotation <- function(x, ...) x as.Annotation.Span <- function(x, id = NULL, type = NULL, ...) { ## Call Annotation() so we get coercion and length checking. Annotation(id, type, x$start, x$end, NULL) } is.Annotation <- function(x) inherits(x, "Annotation") `[.Annotation` <- function(x, i) .Annotation_from_list_and_meta(lapply(unclass(x), `[`, i), attr(x, "meta")) ## ## Implement eventually ... `[<-.Annotation` <- function(x, i, value) .NotYetImplemented() ## `[[.Annotation` <- function(x, i) { y <- lapply(unclass(x), `[[`, i) y$features <- list(y$features) .Annotation_from_list_and_meta(y, attr(x, "meta")) } ## ## Implement eventually ... `[[<-.Annotation` <- function(x, i, value) .NotYetImplemented() ## ## $.Annotation is not really necessary. `$<-.Annotation` <- function(x, name, value) { n <- length(x) x <- unclass(x) if(is.na(pos <- pmatch(name, Annotation_slot_names))) stop("invalid element name") name <- Annotation_slot_names[pos] value <- if(name == "type") as.character(value) else if(name == "features") as.list(value) else as.integer(value) ## This is not very elegant: we could record the slot modes as ## Annotation_slot_modes <- ## c("integer", "character", "integer", "integer", "list") ## but then coercion via the respective as.MODE functions would need ## some trickery ... maybe do this at a later stage, and modify the ## Annotation() creator accordingly. if(length(value) != n) stop("replacement must have the same length as object") x[[pos]] <- value .Annotation_from_list_and_meta(x, attr(x, "meta")) } as.data.frame.Annotation <- function(x, row.names = NULL, optional = FALSE, ...) { y <- data.frame(id = x$id, type = x$type, start = x$start, end = x$end, stringsAsFactors = FALSE, row.names = row.names) y$features <- x$features y } as.list.Annotation <- function(x, ...) lapply(seq_along(x), function(i) x[i]) c.Annotation <- function(..., recursive = FALSE) { args <- lapply(list(...), as.Annotation) meta <- do.call(c, lapply(args, meta)) args <- lapply(args, unclass) y <- lapply(Annotation_slot_names, function(e) unlist(lapply(args, `[[`, e), recursive = FALSE)) names(y) <- Annotation_slot_names ## Remove *exact* duplicates from metadata: if(length(meta)) { meta <- tapply(meta, names(meta), unique, simplify = FALSE) tags <- rep.int(names(meta), lengths(meta)) meta <- unlist(meta, recursive = FALSE, use.names = FALSE) names(meta) <- tags } .Annotation_from_list_and_meta(y, meta) } ## This is at the mercy of duplicated() working well on lists ... duplicated.Annotation <- function(x, incomparables = FALSE, ...) { Reduce(`&`, lapply(unclass(x), duplicated)) } format.Annotation <- function(x, values = TRUE, ...) { if(!length(x)) return(character()) y <- .format_Annotation_without_features(x) wy <- nchar(y[1L], type = "width") wf <- max(16L, 0.95 * getOption("width") - wy) collapse <- format("\n", width = wy + 2L) features <- lapply(x$features, function(e) { if(!(l <- length(e))) return("") s <- if(values) .format_feature_map(e) else names(e) s <- paste0(s, c(rep_len(",", l - 1L), "")) w <- nchar(strwrap(paste(gsub(".", "X", s), collapse = " "), width = wf)) v <- c(0L, head(cumsum(w + 1L), -1L)) f <- v + 1L t <- v + w paste(substring(paste(s, collapse = " "), f, t), collapse = collapse) }) paste0(y, c("features", features), collapse = "\n") } inspect.Annotation <- function(x) { x$features <- vapply(x$features, function(e) { if(length(s <- .format_feature_map(e))) { paste(sprintf("\n %s", s), collapse = "") } else NA_character_ }, "") write.dcf(x, keep.white = "features") } length.Annotation <- function(x) length(x$start) merge.Annotation <- function(x, y, ...) { pos <- match(paste(y$id, y$type, y$start, y$end, sep = "\r"), paste(x$id, x$type, x$start, x$end, sep = "\r"), nomatch = 0L) ## ## This should really combine the unique tag/value pairs. ## In fact, duplicated tags are a problem, but how should they be ## handled (take the pair from x or from y)? x$features[pos] <- Map(c, x$features[pos], y$features[pos > 0L]) ## c(x, y[pos == 0L]) } ## meta.Annotation <- ## function(x, tag = NULL, ...) ## { ## m <- attr(x, "meta") ## if(is.null(tag)) m else m[[tag]] ## } ## `meta<-.Annotation` <- ## function(x, tag = NULL, ..., value) ## { ## if(is.null(tag)) ## attr(x, "meta") <- value ## else ## attr(x, "meta")[[tag]] <- value ## x ## } names.Annotation <- function(x) NULL ## print.Annotation <- ## function(x, values = TRUE, ...) ## { ## writeLines(format(x, values = values)) ## invisible(x) ## } subset.Annotation <- function(x, subset, ...) { e <- substitute(subset) i <- eval(e, unclass(x), parent.frame()) if(!is.logical(i)) stop("'subset' must be logical") i <- i & !is.na(i) x[i] } unique.Annotation <- function(x, incomparables = FALSE, ...) x[!duplicated(x)] .format_Annotation_without_features <- function(x) { sprintf(" %s %s %s %s ", .format_values_with_header(x$id, "id", "right"), .format_values_with_header(x$type, "type", "left"), .format_values_with_header(x$start, "start", "right"), .format_values_with_header(x$end, "end", "right")) } .format_values_with_header <- function(v, h, justify = c("left", "right")) { justify <- match.arg(justify) width <- max(nchar(h), nchar(v)) len <- length(v) fmt <- sprintf("%%%s%ds", c("-", rep.int(if(justify == "left") "-" else "", len)), rep.int(width, len + 1L)) sprintf(fmt, c(h, v)) } ## Try formatting feature maps nicely. ## Similar to what we do in package 'sets', I guess ... .format_feature_map <- function(x, ...) { if(!length(x)) return(character()) sprintf("%s=%s", names(x), vapply(x, .format_feature_value, "")) } ## Formatter for a single value. .format_feature_value <- function(x) { ## Could also make this a generic, which currently seems an ## overkill, in particular if it is not exported so that no one else ## can register methods. if(inherits(x, "Stanford_typed_dependencies")) sprintf("<<%s,%s>>", class(x)[1L], nrow(x)) else if(is.object(x)) sprintf("<<%s>>", class(x)[1L]) else if(is.array(x)) sprintf("<>", paste(dim(x), collapse = ",")) else if(is.character(x) && (length(x) == 1L)) { if(nchar(x) <= 32L) x else "<>" } else if(is.atomic(x) && (length(x) == 1L)) { ## ## Should this take ... args? ## Also, might want to ensure this does not get too long. format(x) ## } else if(is.vector(x)) sprintf("<<%s,%s>>", typeof(x), length(x)) else if(is.null(x)) "NULL" else "<>" } annotations_in_spans <- function(x, y) { y <- as.Span(y) ## An annotation node is contained in a span if it does not start ## ahead of the span and does not end later than the span. ind <- outer(x$start, y$start, `>=`) & outer(x$end, y$end, `<=`) lapply(seq_len(ncol(ind)), function(j) x[ind[, j]]) } features <- function(x, type = NULL, simplify = TRUE) { if(inherits(x, "AnnotatedPlainTextDocument")) x <- x$annotation else if(!is.Annotation(x)) stop("argument 'x' must be an Annotation object") if(!is.null(type)) { types <- unique(x$type) i <- pmatch(type, types) if(any(is.na(i))) stop("incomplete or invalid 'type'") x <- x[x$type %in% types[i]] } features <- x$features tags <- unique(unlist(lapply(features, names))) y <- lapply(tags, function(tag) lapply(features, `[[`, tag)) if(simplify) y <- lapply(y, .simplify) names(y) <- tags class(y) <- "data.frame" attr(y, "row.names") <- .set_row_names(length(features)) y } .simplify <- function(x) { if((length(len <- unique(lengths(x))) == 1L) && (len == 1L)) unlist(x, recursive = FALSE) else x } NLP/R/annotators.R0000644000175100001440000003543314144531116013445 0ustar hornikusers## All annotators should have formals s and a, giving the string to ## annotate and an annotation to start from, and return "their own" ## annotation. Annotator <- function(f, meta = list(), classes = NULL) { if(!identical(names(formals(f)), c("s", "a"))) stop("Annotators must have formals 's' and 'a'.") attr(f, "meta") <- meta class(f) <- .classes_with_default(classes, "Annotator") f } is.Annotator <- function(x) inherits(x, "Annotator") format.Annotator <- function(x, ...) { d <- meta(x, "description") c(sprintf("An annotator inheriting from classes\n %s", paste(class(x), collapse = " ")), if(is.null(d)) { "with no additional description." } else { c("with description", strwrap(d, indent = 2L, exdent = 2L)) }) } ## Annotator generators. ## Provide annotator generators for composite basic NLP tasks (e.g., ## obtaining POS tags for the tokens in all sentences) based on ## functions which perform simple tasks (e.g., obtaining POS tags for ## the token in a single sentence) and return spans/features or simple ## annotations (but do not provide ids themselves). Simple_Para_Token_Annotator <- function(f, meta = list(), classes = NULL) { ## f should be a simple paragraph tokenizer, which takes a string s ## representing the whole text, and returns the spans of the ## paragraphs in s, or a simple annotation with these spans and ## (possibly) additional features. force(f) default <- "Simple_Para_Token_Annotator" classes <- .classes_with_default(classes, default) g <- function(s, a = Annotation()) { s <- as.String(s) y <- f(s) n <- length(y) id <- .seq_id(next_id(a$id), n) type <- rep.int("paragraph", n) if(is.Annotation(y)) { ## Could check whether ids are really missing. y$id <- id y$type <- type # Just making sure ... } else if(is.Span(y)) { y <- as.Annotation(y, id = id, type = type) } else stop("Invalid result from underlying paragraph tokenizer.") y } Annotator(g, meta, classes) } Simple_Sent_Token_Annotator <- function(f, meta = list(), classes = NULL) { ## f should be a simple sentence tokenizer, which takes a string s ## representing the whole text, and returns the spans of the ## sentences in s, or a simple annotation with these spans and ## (possibly) additional features. ## Note that in case paragraph annotations are available, we ## (currently) do not split the whole text into paragraphs before ## performing sentence tokenization. Instead, we add a sentence ## constituents feature for the paragraphs. force(f) default <- "Simple_Sent_Token_Annotator" classes <- .classes_with_default(classes, default) g <- function(s, a = Annotation()) { s <- as.String(s) y <- f(s) n <- length(y) id <- .seq_id(next_id(a$id), n) type <- rep.int("sentence", n) if(is.Annotation(y)) { ## Could check whether ids are really missing. y$id <- id y$type <- type # Just making sure ... } else if(is.Span(y)) { y <- as.Annotation(y, id = id, type = type) } else stop("Invalid result from underlying sentence tokenizer.") if(length(i <- which(a$type == "paragraph"))) { a <- a[i] a$features <- lapply(annotations_in_spans(y, a), function(e) list(constituents = e$id)) y <- c(y, a) } y } Annotator(g, meta, classes) } Simple_Word_Token_Annotator <- function(f, meta = list(), classes = NULL) { ## f should be a simple "word" tokenizer, which takes a string s ## representing a single sentence, and returns the spans of the word ## tokens in s, or a simple annotation with these spans and ## (possibly) additional features. ## The generated annotator adds the sentence offsets and unique ## word token ids, and constituents features for the sentences. force(f) default <- "Simple_Word_Token_Annotator" classes <- .classes_with_default(classes, default) g <- function(s, a) { s <- as.String(s) ## Use the given annotation to extract the sentences. i <- which(a$type == "sentence") if(!length(i)) stop("no sentence token annotations found") ## Obtain the results of the word tokenizer for these sentences. y <- lapply(substring(s, a$start[i], a$end[i]), f) ## Compute ids for the word tokens, and turn results into ## annotations. ## If m is the maximal id used in a and sentence i has n_i ## tokens, then the ids for these start from ## m + 1 + sum(n_j: j < i) ## and have length n_i, of course. if(all(vapply(y, is.Annotation, NA))) { y <- Map(function(u, v) { u$start <- u$start + v u$end <- u$end + v u }, y, a$start[i] - 1L) n <- lengths(y) id <- Map(.seq_id, next_id(a$id) + c(0L, cumsum(head(n, -1L))), n) type <- Map(rep.int, "word", n) y <- Map(function(u, id, type) { u$id <- id u$type <- type # Just making sure ... u }, y, id, type) } else if(all(vapply(y, is.Span, NA))) { y <- Map(`+`, y, a$start[i] - 1L) # Add sentence offsets. n <- lengths(y) id <- Map(.seq_id, next_id(a$id) + c(0L, cumsum(head(n, -1L))), n) type <- Map(rep.int, "word", n) y <- Map(function(u, id, type) as.Annotation(u, id = id, type = type), y, id, type) } else stop("Invalid result from underlying word tokenizer.") ## Constituent features for the sentences. a <- a[i] a$features <- lapply(id, single_feature, "constituents") ## Combine sentence annotation with constituent features and the ## word token annotations. c(a, do.call(c, y)) } Annotator(g, meta, classes) } Simple_POS_Tag_Annotator <- function(f, meta = list(), classes = NULL) { ## f should be a simple POS tagger, which takes a character vector ## giving the word tokens in a sentence, and returns either a ## character vector with the tags, or a list of feature maps with ## the tags as 'POS' feature and possibly other features. ## The generated annotator simply computes an annotation for the ## word tokens with the features obtained from the POS tagger. force(f) default <- "Simple_POS_Tag_Annotator" classes <- .classes_with_default(classes, default) g <- function(s, a) { s <- as.String(s) a <- annotations_in_spans(a[a$type == "word"], a[a$type == "sentence"]) if(!length(a)) stop("no sentence token annotations found") if(!any(lengths(a) > 0L)) stop("no word token annotations found") y <- lapply(s[a], f) if(all(vapply(y, is.character, NA))) features <- lapply(unlist(y), single_feature, "POS") else if(all(vapply(y, is.list, NA))) features <- unlist(y, recursive = FALSE) else stop("Invalid result from underlying POS tagger.") a <- do.call(c, a) a$features <- features ## As simple POS taggers do not return annotations, information ## about the POS tagset cannot be passed as annotation metadata. ## Instead, for now we look for a 'POS_tagset' attribute. ## Similarly for 'POS_tagset_URL'. for(tag in c("POS_tagset", "POS_tagset_URL")) { if(!is.null(val <- attr(f, tag))) attr(a, "meta")[[tag]] <- val } a } Annotator(g, meta, classes) } Simple_Entity_Annotator <- function(f, meta = list(), classes = NULL) { ## f should be a simple entity detector ("named entity recognizer") ## which takes a character vector giving the word tokens in a ## sentence, and return a simple annotation containing the word ## token spans and types of the entities found. ## The generated annotator adds ids and transforms word token spans ## to character spans. force(f) default <- "Simple_Entity_Annotator" classes <- .classes_with_default(classes, default) g <- function(s, a) { s <- as.String(s) i <- next_id(a$id) a <- annotations_in_spans(a[a$type == "word"], a[a$type == "sentence"]) if(!length(a)) stop("no sentence token annotations found") if(!any(lengths(a) > 0L)) stop("no word token annotations found") y <- lapply(a, function(e) { result <- f(s[e]) if(!inherits(result, "Annotation")) stop("Invalid result from underlying name finder.") result$start <- e$start[result$start] result$end <- e$end[result$end] result }) y <- do.call(c, y) y$id <- .seq_id(i, length(y)) y } Annotator(g, meta, classes) } Simple_Chunk_Annotator <- function(f, meta = list(), classes = NULL) { ## f should be a simple chunker, which takes character vectors ## giving the word tokens and the corresponding POS tags as inputs, ## and returns either a character vector with the chunk tags, or a ## list of feature maps with the tags as 'chunk_tag' feature and ## possibly other features. ## The generated annotator simply extracts the word token ## annotations for the sentences, obtains the chunk features for ## these, and returns the word token annotations with these features ## (only). force(f) default <- "Simple_Chunk_Annotator" classes <- .classes_with_default(classes, default) g <- function(s, a) { s <- as.String(s) a <- annotations_in_spans(a[a$type == "word"], a[a$type == "sentence"]) if(!length(a)) stop("no sentence token annotations found") if(!any(lengths(a) > 0L)) stop("no word token annotations found") y <- lapply(a, function(e) f(s[e], .annotation_features_with_template(e, "POS"))) if(all(vapply(y, is.character, NA))) features <- lapply(unlist(y), single_feature, "chunk_tag") else if(all(vapply(y, is.list, NA))) features <- unlist(y, recursive = FALSE) else stop("Invalid result from underlying chunker.") a <- do.call(c, a) a$features <- features a } Annotator(g, meta, classes) } Simple_Stem_Annotator <- function(f, meta = list(), classes = NULL) { ## f should be a simple stemmer, which takes a character vector of ## word tokens and returns the corresponding word stems. ## The generated annotator simply computes an annotation for the ## word tokens with the stem features obtained from the stemmer. force(f) default <- "Simple_Stem_Annotator" classes <- .classes_with_default(classes, default) g <- function(s, a) { s <- as.String(s) a <- a[a$type == "word"] a$features <- lapply(f(s[a]), single_feature, "stem") a } Annotator(g, meta, classes) } sentence_constituents <- function(a) { i <- which(a$type == "sentence") constituents <- lapply(a$features[i], `[[`, "constituents") if(!all(lengths(constituents) > 0L)) { ## Looks like we have an annotation with no constituents ## features for the sentences ... need to compute these. ## Make sure sentences are ordered by character offsets. i <- i[order(a$end[i])] j <- which(a$type == "word") ## Should we also make sure tokens are ordered by character ## offsets? k <- rowSums(outer(a$start[j], a$start[i], `>=`)) constituents <- split(a$id[j], k) names(constituents) <- a$id[i][as.integer(names(constituents))] ## Assuming there can not be empty sentences, we could more ## simply do ## names(constituents) <- a$id[i] } else names(constituents) <- a$id[i] constituents } next_id <- function(id) .max_id(id) + 1L single_feature <- function(value, tag) { y <- list(value) names(y) <- tag y } .max_id <- function(id) { id <- id[!is.na(id)] if(!length(id)) 0L else max(id) } .seq_id <- function(f, l) as.integer(seq.int(from = f, length.out = l)) .classes_with_default <- function(classes, default) c(classes[classes != default], default) ## .simple_feature_map <- ## function(x, tag) ## { ## ## Turn a sequence of values x into a list of feature maps with ## ## given tag and respective values in x. ## lapply(x, single_feature, tag) ## } ### * Annotator pipelines Annotator_Pipeline <- function(..., meta = list()) { x <- list(...) if(!all(vapply(x, is.Annotator, FALSE))) stop("all pipeline elements must be annotator objects") .Annotator_Pipeline_from_list_and_meta(x, meta) } ## ## Should we move the is.Annotator checking here, perhaps with a way to ## turn it off? .Annotator_Pipeline_from_list_and_meta <- function(x, meta = list()) { attr(x, "meta") <- meta class(x) <- "Annotator_Pipeline" x } ## as.Annotator_Pipeline <- function(x) UseMethod("as.Annotator_Pipeline") as.Annotator_Pipeline.Annotator_Pipeline <- identity as.Annotator_Pipeline.Annotator <- function(x) .Annotator_Pipeline_from_list_and_meta(list(x)) as.Annotator_Pipeline.list <- function(x) { if(!all(vapply(x, is.Annotator, FALSE))) stop("all pipeline elements must be annotator objects") .Annotator_Pipeline_from_list_and_meta(x) } `[.Annotator_Pipeline` <- function(x, i) .Annotator_Pipeline_from_list_and_meta(unclass(x)[i], meta(x)) as.list.Annotator_Pipeline <- function(x, ...) { x <- unclass(x) attr(x, "meta") <- NULL x } ## No merging of metadata for now. c.Annotator_Pipeline <- function(..., recursive = FALSE) { annotators <- unlist(lapply(list(...), as.Annotator_Pipeline), recursive = FALSE) .Annotator_Pipeline_from_list_and_meta(annotators) } format.Annotator_Pipeline <- function(x, ...) sprintf("An annotator pipeline of length %d.", length(x)) NLP/R/annotate.R0000644000175100001440000000053612517716566013102 0ustar hornikusers## annotate() can use a single annotator or an annotator pipeline or ## something coercible to this, such as a list of annotators, and ## recursively calls the given annotators and merges annotations. annotate <- function(s, f, a = Annotation()) { s <- as.String(s) for(e in as.Annotator_Pipeline(f)) a <- merge(a, e(s, a)) a } NLP/R/ttd.R0000644000175100001440000001163713333632544012056 0ustar hornikusersTaggedTextDocument <- function(con, encoding = "unknown", word_tokenizer = whitespace_tokenizer, sent_tokenizer = Regexp_Tokenizer("\n", invert = TRUE), para_tokenizer = blankline_tokenizer, sep = "/", meta = list()) { s <- String(paste(readLines(con, encoding = encoding, warn = FALSE), collapse = "\n")) paras <- if(!is.null(para_tokenizer)) s[para_tokenizer(s)] else as.character(s) x <- lapply(paras, function(para) { ## Avoid as.String() coercion. spans <- sent_tokenizer(para) sents <- substring(para, spans$start, spans$end) lapply(sents, function(sent) { spans <- word_tokenizer(sent) words <- substring(sent, spans$start, spans$end) toks <- strsplit(words, sep, fixed = TRUE) one <- vapply(toks, `[[`, "", 1L) two <- vapply(toks, `[[`, "", 2L) data.frame(word = one, POS = toupper(two), stringsAsFactors = FALSE) }) }) ## Use sentence ids which are unique across paras. lens <- lapply(x, length) ids <- Map(function(f, l) as.integer(seq.int(from = f, length.out = l)), c(0L, head(cumsum(lens), -1L)) + 1L, lens) x <- Map(function(u, v) { cbind(data.frame(sent = rep.int(u, vapply(v, nrow, 0L))), do.call(rbind, v)) }, ids, x) doc <- list(content = x, meta = meta) class(doc) <- c("TaggedTextDocument", "TextDocument") doc } format.TaggedTextDocument <- function(x, ...) { content <- x$content len <- length(content) c(.format_TextDocument(x), sprintf("Content: words: %d, sents: %d, paras: %d", sum(vapply(content, NROW, 0L)), tail(content[[len]]$sent, 1L), len)) } ## print.TaggedTextDocument <- ## function(x, ...) ## { ## content <- x$content ## len <- length(content) ## writeLines(sprintf("<>", ## sum(vapply(content, NROW, 0L)), ## tail(content[[len]]$sent, 1L), ## len)) ## invisible(x) ## } content.TaggedTextDocument <- function(x) x$content ## meta.TaggedTextDocument <- ## function(x, tag = NULL, ...) ## if(is.null(tag)) x$meta else x$meta[[tag]] ## `meta<-.TaggedTextDocument` <- ## function(x, tag = NULL, ..., value) ## { ## if(is.null(tag)) ## x$meta <- value ## else ## x$meta[[tag]] <- value ## x ## } ## ## It would be nice if the as.character() method could "suitably" ## detokenize the word tokens into sentences. But this requires ## (a) knowing at least the language of the text ## (b) having code to detokenize when knowing the language ... ## as.character.TaggedTextDocument <- words.TaggedTextDocument <- function(x, ...) { unlist(lapply(x$content, `[[`, "word")) } ## ## Could more simply do ## sents.TaggedTextDocument <- function(x, ...) ## unlist(paras(x), recursive = FALSE) ## sents.TaggedTextDocument <- function(x, ...) { unlist(lapply(x$content, function(e) split(e$word, e$sent)), recursive = FALSE) } paras.TaggedTextDocument <- function(x, ...) { lapply(x$content, function(e) split(e$word, e$sent)) } tagged_words.TaggedTextDocument <- function(x, map = NULL, ...) { if(!is.null(map)) { x <- .map_POS_tags_TaggedTextDocument(x, map) } Tagged_Token(unlist(lapply(x$content, `[[`, "word")), unlist(lapply(x$content, `[[`, "POS"))) } ## ## Could more simply do ## tagged_sents.TaggedTextDocument <- function(x, ...) ## unlist(tagged_paras(x), recursive = FALSE) ## tagged_sents.TaggedTextDocument <- function(x, map = NULL, ...) { if(!is.null(map)) { x <- .map_POS_tags_TaggedTextDocument(x, map) } unlist(lapply(x$content, function(e) split(Tagged_Token(e$word, e$POS), e$sent)), recursive = FALSE) } tagged_paras.TaggedTextDocument <- function(x, map = NULL, ...) { if(!is.null(map)) { x <- .map_POS_tags_TaggedTextDocument(x, map) } lapply(x$content, function(e) split(Tagged_Token(e$word, e$POS), e$sent)) } .map_POS_tags_TaggedTextDocument <- function(x, map) { map <- POS_tag_mapper(map, meta(x, "POS_tagset")) x$content <- lapply(x$content, function(e) { e$POS <- map(e$POS) e }) x } NLP/R/string.R0000644000175100001440000000470413334576423012572 0ustar hornikusers## A simple string class. String <- function(x) { .String_from_string(as.character(x)[[1L]]) } ## Note subscripting by [[: this insists on the first element, and ## hence gives an error instead of NA_character_ if there is none. as.String <- function(x) UseMethod("as.String") as.String.String <- identity as.String.default <- function(x) String(paste(x, collapse = "\n")) is.String <- function(x) inherits(x, "String") print.String <- function(x, ...) writeLines(x) ## Provide a '[' method performing slicing (as we cannot provide S3 ## methods for substr, and clearly substrings of strings should be ## strings. ## Note that we have no distinction between spans and span arrays (same ## issue as having no distinction between strings and string arrays in ## base R). Hence, we take spans to always operate in an array context ## (for now: we could add a drop = FALSE argument to have subscripting ## turn character vectors of length one back to strings again). `[.String` <- function(x, i, j) { mysubstring <- function(x, i, j) { ## substring() recycles to max length of args only when this is ## positive. if(!length(i)) character() else substring(x, i, j) } if(missing(j)) { if(is.Span(i)) return(mysubstring(x, i$start, i$end)) if(is.list(i)) { if(!length(i)) return(list()) else if(all(vapply(i, is.Span, NA))) return(lapply(i, function(e) mysubstring(x, e$start, e$end))) } } ## Regular slicing operators in a scalar context. String(substr(x, i, j)) } ## More methods? ## ## A popular mailing list discussion item is to use a Java style '+' ## operator for concatenating strings (not uniformly liked as the ## corresponding operation is not commutative): `+.String` <- function(e1, e2) .String_from_string(paste0(as.String(e1), as.String(e2))) ## Also provide Python-style string repetition. `*.String` <- function(e1, e2) { if(is.numeric(e1) && (length(e1) == 1L)) .String_from_string(paste(rep.int(e2, e1), collapse = "")) else if(is.numeric(e2) && (length(e2) == 1L)) .String_from_string(paste(rep.int(e1, e2), collapse = "")) else stop("Invalid operands.") } ## What about c.String? .String_from_string <- function(x) { y <- enc2utf8(x) class(y) <- "String" y } NLP/R/cleannlp.R0000644000175100001440000000246013741573436013060 0ustar hornikusers## Viewer methods for objects of class "cnlp_annotation" as obtained by ## cleanNLP::cnlp_annotate(). words.cnlp_annotation <- function(x, ...) { x$token$token } sents.cnlp_annotation <- function(x, ...) { x <- x$token split(x$token, x$sid) } ## paras.cnlp_annotation <- ## function(x, ...) ## { ## x <- x$token ## if(is.na(match("pid", names(x)))) ## stop("unavailable paragraph ids") ## lapply(split(x, x$pid), ## function(e) split(e$token, e$sid)) ## } tagged_words.cnlp_annotation <- function(x, which = c("upos", "pos"), ...) { x <- x$token which <- match.arg(which) Tagged_Token(x$token, x[[which]]) } tagged_sents.cnlp_annotation <- function(x, which = c("upos", "pos"), ...) { x <- x$token which <- match.arg(which) .tagged_sents_from_cnlp_token_frame(x, which) } .tagged_sents_from_cnlp_token_frame <- function(x, which) { lapply(split(x, x$sid), function(e) Tagged_Token(e$token, e[[which]])) } ## tagged_paras.cnlp_annotation <- ## function(x, which = c("upos", "pos"), ...) ## { ## x <- x$token ## if(is.na(match("pid", names(x)))) ## stop("unavailable paragraph ids") ## which <- match.arg(which) ## lapply(split(x, x$pid), ## .tagged_sents_from_cnlp_token_frame, ## which) ## } NLP/R/viewers.R0000644000175100001440000000213614654064663012751 0ustar hornikuserswords <- function(x, ...) UseMethod("words") sents <- function(x, ...) UseMethod("sents") paras <- function(x, ...) UseMethod("paras") tagged_words <- function(x, ...) UseMethod("tagged_words") tagged_sents <- function(x, ...) UseMethod("tagged_sents") tagged_paras <- function(x, ...) UseMethod("tagged_paras") chunked_sents <- function(x, ...) UseMethod("chunked_sents") parsed_sents <- function(x, ...) UseMethod("parsed_sents") parsed_paras <- function(x, ...) UseMethod("parsed_paras") otoks <- function(x, ...) UseMethod("otoks") chunk_tree_from_chunk_info <- function(words, ptags, ctags) { ind <- grepl("^[BO]", ctags) ## ## Should this also use Tagged_Token()? chunks <- split(sprintf("%s/%s", words, ptags), cumsum(ind)) ## nms <- sub(".*-", "", ctags[ind]) ind <- nms != "O" chunks[ind] <- Map(Tree, nms[ind], chunks[ind]) Tree("S", chunks) } POS_tag_mapper <- function(map, set) { if(is.function(map)) return(map) if(is.list(map)) map <- map[[set]] function(pos) map[pos] } NLP/R/datetime.R0000644000175100001440000000764513741575236013072 0ustar hornikusersISO_8601_datetime_components <- c("year", "mon", "day", "hour", "min", "sec", "tzd") parse_ISO_8601_datetime <- function(x) { x <- as.character(x) n <- length(x) y <- matrix("", n, 7L) dimnames(y) <- list(x, ISO_8601_datetime_components) pos <- seq_along(x) bad <- (is.na(x) | (x == "") | ((nzchar(x) > 10L) & (substring(x, 11L, 11L) != "T"))) if(any(bad)) { pos <- pos[!bad] x <- x[pos] } dates <- substring(x, 1L, 10L) pat <- "^([[:digit:]]{4})(-[[:digit:]]{2})?(-[[:digit:]]{2})?$" m <- regmatches(dates, regexec(pat, dates)) ind <- lengths(m) > 0L if(!all(ind)) { bad[pos[!ind]] <- TRUE pos <- pos[ind] x <- x[ind] m <- m[ind] } y[pos, 1L : 3L] <- do.call(rbind, m)[, 2L : 4L] ind <- (nchar(x) > 10L) if(any(ind)) { if(!all(ind)) { pos <- pos[ind] x <- x[ind] } times <- substring(x, 12L) pat <- paste("^", "([[:digit:]]{2}):([[:digit:]]{2})", "(:[[:digit:]]{2}([.][[:digit:]]+)?)?", "(Z|[+-][[:digit:]]{2}:[[:digit:]]{2})", "$", sep = "") m <- regmatches(times, regexec(pat, times)) ind <- lengths(m) > 0L if(!all(ind)) bad[pos[!ind]] <- TRUE y[pos[ind], 4L : 7L] <- do.call(rbind, m[ind])[, c(2L, 3L, 4L, 6L)] } y[, c(2L, 3L, 6L)] <- substring(y[, c(2L, 3L, 6L)], 2L) ## Warn about the bad entries. if(any(bad)) { warning("Invalid entries:", paste("\n ", rownames(y)[bad], collapse = " ")) y[bad, ] <- "" } ## If we want year to sec as numeric and tzd as character, we need ## to do ## y <- as.data.frame(y, stringsAsFactors = FALSE) ## and convert variables 1 to 6: note that this would turn empty to ## missing ... x <- rownames(y) w <- which(y != "", arr.ind = TRUE) y <- as.data.frame(y, stringsAsFactors = FALSE) y[, 1L : 5L] <- lapply(y[, 1L : 5L], as.integer) y[[6L]] <- as.numeric(y[[6L]]) y <- Map(function(u, v) as.list(u[v]), split(y, seq_len(n)), split(w[, 2L], factor(w[, 1L], seq_len(n)))) names(y) <- x class(y) <- "ISO_8601_datetime" y } `[.ISO_8601_datetime` <- function(x, i) { y <- unclass(x)[i] class(y) <- class(x) y } `$.ISO_8601_datetime` <- function(x, name) { name <- pmatch(name, ISO_8601_datetime_components) as.data.frame(x)[[name]] } as.matrix.ISO_8601_datetime <- function(x, ...) { y <- matrix("", length(x), 7L, dimnames = list(names(x), ISO_8601_datetime_components)) nms <- lapply(x, names) y[cbind(rep.int(seq_along(x), lengths(nms)), match(unlist(nms), ISO_8601_datetime_components))] <- as.character(unlist(x)) y } as.data.frame.ISO_8601_datetime <- function(x, row.names = NULL, optional = FALSE, ...) { y <- as.matrix(x) y[y == ""] <- NA_character_ y <- as.data.frame(y, stringsAsFactors = FALSE) y[, 1L : 5L] <- lapply(y[, 1L : 5L], as.integer) y[[6L]] <- as.numeric(y[[6L]]) y } as.Date.ISO_8601_datetime <- function(x, ...) { y <- as.matrix(x) y[y == ""] <- NA_character_ as.Date(sprintf("%s-%s-%s", y[, 1L], y[, 2L], y[, 3L]), "%Y-%m-%d") } as.POSIXct.ISO_8601_datetime <- function(x, tz = "", ...) as.POSIXct(as.POSIXlt(x)) as.POSIXlt.ISO_8601_datetime <- function(x, tz = "", ...) { y <- as.matrix(x) y[y == ""] <- NA_character_ offsets <- sub(":", "", y[, 7L]) offsets[offsets == "Z"] <- "+0000" y[, 7L] <- offsets strptime(do.call(paste, split(y, col(y))), "%Y %m %d %H %M %OS %z", tz = "UTC") } print.ISO_8601_datetime <- function(x, ...) { y <- as.matrix(x) y <- as.data.frame(y, stringsAsFactors = FALSE) print(y) invisible(x) } NLP/R/tree.R0000644000175100001440000000671712517657432012233 0ustar hornikusersTree <- function(value, children = list()) { y <- list(value = value, children = as.list(children)) class(y) <- "Tree" y } format.Tree <- function(x, width = 0.9 * getOption("width"), indent = 0, brackets = c("(", ")"), ...) { ffmt <- function(x) { sprintf("%s%s %s%s", brackets[1L], x$value, paste(sapply(x$children, function(e) { if(inherits(e, "Tree")) ffmt(e) else format(e) }), collapse = " "), brackets[2L]) } s <- ffmt(x) if(nchar(s) + indent < width) return(s) y <- sapply(x$children, function(e) { if(inherits(e, "Tree")) format(e, width = width, indent = indent + 2L, brackets = brackets) else format(e) }) y <- sprintf("\n%s%s", paste(rep.int(" ", indent + 2L), collapse = ""), y) sprintf("%s%s%s%s", brackets[1L], x$value, paste(y, collapse = ""), brackets[2L]) } ## print.Tree <- ## function(x, ...) ## { ## writeLines(format(x, ...)) ## invisible(x) ## } Tree_parse <- function(x, brackets = c("(", ")")) { errfmt <- function(token, expected) { sprintf("expected %s but got %s", expected, token) } re_o <- sprintf("\\%s", brackets[1L]) # open re_c <- sprintf("\\%s", brackets[2L]) # close re_n <- sprintf("[^\\s%s%s]+", re_o, re_c) # node re_l <- sprintf("[^\\s%s%s]+", re_o, re_c) # leaf re <- sprintf("%s\\s*(%s)?|%s|(%s)", re_o, re_n, re_c, re_l) m <- gregexpr(re, x, perl = TRUE) stack <- list(list(NULL, list())) for(token in regmatches(x, m)[[1L]]) { if(substring(token, 1L, 1L) == "(") { if((length(stack) == 1L) && (length(stack[[1L]][[2L]]) > 0L)) stop(errfmt(sQuote(token), "end of string")) value <- sub("\\s*", "", substring(token, 2L)) stack <- c(stack, list(Tree(value, list()))) } else if(token == ")") { if((n <- length(stack)) == 1L) { if(!length(stack[[1L]][[2L]])) stop(errfmt(sQuote(token), sQuote(brackets[1L]))) else stop(errfmt(sQuote(token), "end of string")) } elt <- stack[[n]] ## class(elt) <- "Tree" stack <- stack[-n] n <- n - 1L stack[[n]][[2L]] <- c(stack[[n]][[2L]], list(elt)) } else { if((n <- length(stack)) == 1L) stop(errfmt(sQuote(token), sQuote(brackets[1L]))) stack[[n]][[2L]] <- c(stack[[n]][[2L]], list(token)) } } if(length(stack) > 1L) stop(errfmt("end of string", sQuote(brackets[2L]))) else if(!length(stack[[1L]][[2L]])) stop(errfmt("end of string", sQuote(brackets[1L]))) stack[[1L]][[2L]][[1L]] } Tree_apply <- function(x, f, recursive = FALSE) { if(!recursive) return(lapply(x$children, f)) g <- function(e) { y <- f(e) if(inherits(e, "Tree")) list(y, lapply(e$children, g)) else y } lapply(x$children, g) } NLP/R/utils.R0000644000175100001440000000145612521153333012411 0ustar hornikusers### Format and print. .format_TextDocument <- function(x, ...) c(sprintf("<<%s>>", class(x)[1L]), sprintf("Metadata: %d", length(meta(x)))) .print_via_format <- function(x, ...) { writeLines(format(x, ...)) invisible(x) } ### Get and set metadata. .get_meta_if_attr <- function(x, tag = NULL, ...) { m <- attr(x, "meta") if(is.null(tag)) m else m[[tag]] } .set_meta_if_attr <- function(x, tag = NULL, ..., value) { if(is.null(tag)) attr(x, "meta") <- value else attr(x, "meta")[[tag]] <- value x } .get_meta_if_slot <- function(x, tag = NULL, ...) if(is.null(tag)) x$meta else x$meta[[tag]] .set_meta_if_slot <- function(x, tag = NULL, ..., value) { if(is.null(tag)) x$meta <- value else x$meta[[tag]] <- value x } NLP/R/span.R0000644000175100001440000000654612503505414012220 0ustar hornikusers## A simple span class for storing start and end integer offsets. ## Apache OpenNLP optionally allows storing types in spans: we use ## Annotation objects (with no ids or features) for this. ## Conceptually, a single span is a start/end pair and our Span objects ## are sequences (to allow positional access) of spans, i.e., sequences ## of pairs. ## The implementation actually uses a "pair" (named list of length two) ## of "slots" giving the start and end value sequences. ## Subscripting via [ extracts subsets of spans. ## Subscripting via $ extracts one slot. Span_slot_names <- c("start", "end") Span <- function(start, end) { start <- as.integer(start) end <- as.integer(end) if(length(start) != length(end)) stop("arguments must have the same length") .Span_from_args(start, end) } .Span_from_args <- function(start, end) .Span_from_list(list(start = start, end = end)) .Span_from_list <- function(x) { class(x) <- "Span" x } as.Span <- function(x) UseMethod("as.Span") as.Span.Span <- identity as.Span.Annotation <- function(x) .Span_from_args(x$start, x$end) is.Span <- function(x) inherits(x, "Span") `[.Span` <- function(x, i) .Span_from_list(lapply(unclass(x), `[`, i)) ## ## Implement eventually ... `[<-.Span` <- function(x, i, value) .NotYetImplemented() ## `[[.Span` <- function(x, i) .Span_from_list(lapply(unclass(x), `[[`, i)) ## ## Implement eventually ... `[[<-.Span` <- function(x, i, value) .NotYetImplemented() ## ## $.Span is not really necessary. `$<-.Span` <- function(x, name, value) { n <- length(x) x <- unclass(x) if(is.na(pos <- pmatch(name, Span_slot_names))) stop("invalid element name") value <- as.integer(value) if(length(value) != n) stop("replacement must have the same length as object") x[[pos]] <- value .Span_from_list(x) } Ops.Span <- function(e1, e2) { ## Allow to add offsets. switch(as.character(.Generic), "+" = { if(is.Span(e1) && is.numeric(e2) && (length(e2) == 1L)) return(Span(e1$start + e2, e1$end + e2)) if(is.Span(e2) && is.numeric(e1) && (length(e1) == 1L)) return(Span(e2$start + e1, e2$end + e1)) stop("Invalid operands.") }, stop(gettextf("'%s' not defined for \"Span\" objects", .Generic), domain = NA)) } as.data.frame.Span <- function(x, row.names = NULL, optional = FALSE, ...) { data.frame(start = x$start, end = x$end, row.names = row.names) } as.list.Span <- function(x, ...) lapply(seq_along(x), function(i) x[i]) c.Span <- function(..., recursive = FALSE) { args <- lapply(list(...), function(e) unclass(as.Span(e))) y <- lapply(Span_slot_names, function(e) unlist(lapply(args, `[[`, e))) names(y) <- Span_slot_names .Span_from_list(y) } duplicated.Span <- function(x, incomparables = FALSE, ...) { do.call(`&`, lapply(unclass(x), duplicated)) } format.Span <- function(x, ...) { format(as.data.frame(x), ...) } length.Span <- function(x) length(x$start) names.Span <- function(x) NULL print.Span <- function(x, ...) { print.data.frame(format(x), ..., row.names = FALSE) invisible(x) } unique.Span <- function(x, incomparables = FALSE, ...) x[!duplicated(x)] NLP/R/sysdata.rda0000644000175100001440000030021014717312552013265 0ustar hornikusersý7zXZi"Þ6!ÏXÌå¦'ïþ])ThänRÊ 3ÅT¬ñ‰ÉhnaD¯ò‚80 ¼t:Mçîÿµ„x£pš‹Mû…H¢‘XM5¹ÃËdŒ½ÿvнPŸ¦³ßK÷!°qæ(J‹± 9ŽHõù=§‹LÓ¥tZ‹ÕŸºÙG43y§Ìv`ãŸïAk,ßCÃÆýúÒMçD‘šÀ"Ü+{‹Ë—†´>îÙE¤]‰™“)‚Sâf1azxdÚ“üx3ÁæÁÆŽ5\¬Ú/Zƒ è¥î¢i™W¦¬ù»t )»=¸£ÎÆ_ÞcÍSé:Z“ö^Ó°ÒTvÃÇl"þ²±âOƒÂ 6ðJaˆÌl êÒD‹ê²ä橬ËevNÏÎU«U¢{£)’õ5„kc? Yðp ¡c¿ þöízkÌ1+¦íPŒ(Z¯Ì"Ÿ0B,šëàUúœS>6õa¡¬æ2AýXå)ÙÞc³¤?€áW„ƒ]oQêêoÊp®/‰AÉ{}p5JºdÜ4Å1³>ní¥‚-€(|ggðH… åö×òù~š5ˆDªAå„k1û9›ç#j©ð'ÿ‡NöªÀ“9^Êx¢ x&ã,SòÁ¶älÞ<Õn„" UÜsà1ŠÆwÿ¶nÕØúG ˜²ÜŸRÅ`vÏ»éLý 6 ÉÍÚNR½3ÿ %AT÷ÊlIÊŸÁó{gžëÖÏÕM00ކÀes‘è•õCq‹¡°¤€]0Äž£h-ìUv~êmlàä °&MéW¥ÿ¤;Ä+Ì+>¾0Üš)²ôçƒX|ᘇohÊÎÈ6©jêªùy®æmË˶V8ÅÜ~e»l—).~“ºÍB Œ¼ëÆ–±¹ žURìUÕ r¯%IX+pÙ7wð±pâXËF}¶èl?¶)ÄùrUü϶OÆm˜ÌÈã&xœ£ûàÙæuþ2M­~¶ñIé |~»Á£Üš{ž0gÙCúR@î=Ÿ‹÷6\9á(Hyp z sSîµ':±vÿôA€?ô…eŒgó?Zó2¶G"GARÐÈÅãâTµ»ÙƒZ™jÐ÷ûo;FP5‰ŒgY[ðwÕf1ª8Ä’Òl‚NIz2̦—™ ñè°¹ÌfCŸ¢LT¿ «dEn˜Ö\\nçD£†+Õúa•&ÒæŸÏÐ2§OÛ| @`βRä_éF1G¦éEaÜ‚§G„(\°iöÝÓý50Dì9[‘y2D~ xµÑnM¦° ;ÌðÍÍ’zurC˜è£Ó”¸¢Ïy9¤¯Ž£/%lÕÊ`Dkº·e%çH&Ñy:XÚ:ÆØvãrZ%#!ÎÚt±×~ápoG–næØÁ¥èå¶h8óÓ€„",Rí¾{”nø1šy• ­oO¹´ÞK¥l@ëŒí[ƒ ÌG£ümínâ« ®RVA E9]Ô¤ ŸµØrËÝPUÔÍhsf'ÀN› ('Ñá8Ár7£kú¯Øñ„õ-Øò|ºýzª€p²V”@{be®Bí+¯xUÊ)è¢AL:xCg/^Én¾¹W{m§ÌŸ¬ªP㫸jä¥S\4!’âªÖKq¤W'ƒV9†.»ì ¼GQÒvlW>%UAî­/=|3/Ú£"Œ˜¤Ä˜ª$7<¹ƒ4e³ *À¢¥j1Ç.ÒàÇbÑŠçs'-s´PÚu2† Ú¼"ü3Vÿdt}¦?18MPçÙfD9xÁpho–zž@ qaà!Ö1©XÙZý9¾´zÊû*mÙñÁñ9<€läcƒÅ ïÑï$|Êó„jàx‹+~_’Ù¾²ÏoGtG{y«ºÇ+®§¿{u,Úxf/ vœK!ˆélw­n[ÍfµaãÕwÉy‹ÜKа WÉ&ĘÃýÁÍÛ-¿˜ÞÝýÇý¾71 z] ³`V¿Š;$\[¥‚s G2ÍMH‹{ì$£í¾["3áØ¯ý‹š\dpæ?öUnÞHwÙŸxxa°­¥Jûqúj~“(Q²¢øôHk1l„Ä×U®Ïé¡Í°šqø€šk‘Kañ^øø"h\-È"\d…îIq‚@ ^¤rùé(™5¨É‘ §^ %<éìƒ\õ’ò_*½åÏ' gy¦1”ö©ß9×dhah¦bLM™d§”‚|‚ùÛ˜¹Ëºh ž ìb_t]rä©U¬‹Ðs28Œëþ´ñ´Dd(±¼AzœTÔ‚‡…±4Ê“¼EpZHe^(„™…± 9¾(Ž^‹‡ý‡ÔÿG‰.5q¹xé·ƒOðéÕŠç Ôb¥+MbëÐÜiº«réÿ ‰…¡î/­Ñ’ÜaW(Ú%£WcÂ3Cr(uè =„Ê%¸_bÞ”TğЀ€'AŠDÝ2 |Nõ2’SEïo!~á‘Eä}¬s¥tºH=~3*·ëÒçOÅî:ºYjë¾õÏp@`‡¨Ÿ.ÄhÀÝBbD0Þ%8Ú?jXKï,Ãõ z ûºb¡åI\2Àî°D¶þ¡åP1AÆÛ¶xÞëñhêÕëùCEYôPãD“ ²6)*¼È÷%~D#¶{µIœÛòæýy‡ªˆo¡б΅÷ /‚ŒžÔ%º–n°/ •r;yÜ"Á~v=C½«_G,˜ß/BfŒâÓ ÓÜΑbè%`Ü‘®"7·Ø3VPê@|E{KLæs)?‡Õ½%Zguxѯn¤ ™A¢ZeËÒ—oú„N¢„¸kMŒÍÍÞÕÛÙBÁ†FșДìÍ“‰p‡¦Ãeï¼qÛº*¼‡^¦¡RaÊÀÇéÌþ0zD¦Üu®Œ,òß˱ë\NÛíP~»Ð(RVüKìE}†N£&O€YîÒ‡Ð$)ØäFu·ú…"¼«tb¾5UzŒLj:¾x¶`4¶Ý!{”U<èô/Ñt:Û{ÄMÓI™/‹8º¶§o‰Ý¥ÇQâ8Ëà¤c4…³áxZfÝ@è§yФUƼ¶¬¨™3Y2äž(5Jö*Ë9¹vÂ×êœ-çÍ«ë ’_öd˜UÏn±Ç¥ùð~áþ|âBÔ´,Ó~îõãÐïba÷õÈМh¹f5mN;>Ú›«Rî”ÀÄþœŠ›ŠJ_Âáëžú“†{ÖÔ®¼ i¶¸[B^­ªʸ“IÓV)_áºhçJ*ŒÅ ðCx½€[L=ˆm>t€@ó÷¥]T@¯ï\£C¶4Îû:ÓïPäH­¤¹\'ÓíN³ „²˜¼G®¤Àqðiù3#m\ji‰¹0÷©¨Êj¦ã½‹òe";D^RéÂi´’¥^úÙK»ñ7\¥…Q¹GkûaÛÈÅ‘+Ûá±ûú};®_Tâ +aD „Ìæ‰þ\UEf ×¢¹1T+˜×/$º\Högf¸¶aU•ÐÅ)¿ 6 ‚B’HÈ€Ç÷Ìr9¤ej?ÅœûzD¬^O)±w”$Å (÷®o‘¼ãWé+bã¸5šÞA±|Š±Ò ãùêV]àDà “pˉž²IvófeÌJ¿”­3ùJᆊâDƒ¯]™U8þM)BÉ^‚º fßÃBglÉ2t;â~K¬Ù}"nRÍ¥8«H//Í ñÃJy]T¾Îï ¾îÓ%Ó/÷“÷ “ª‹Ej{—Re fp˜üë ¿©ú£F¬¡Ü Aÿ÷²/eåk}Ç €éðS…Œ‚·•GX$|müÜœQŒó‚l'´j]¨LŒ>†ŽöÍhŒÛHÕhÏ&]ëÎD+}'Bõ*€ A¶Ø¼QB$ƒ¦^cul£åÓ5Fà-eÆ€˜áÀÕÕ‚ ·Û"l¸q&öô¸ /‘J¨>Ëò[» MG!ÞçÈ#?ÛæÒ‚F¶®•‡ êçK;ð GÝNFŸëÐÃOfLþòzQUNÇçAå|Þ:Du¾@|¼7ª÷ÐÙG® ÷ª—…x̹•¬ŸŸ6Ô³‘ò@Ã÷5ëFåÎ6Èö‘4híûNƒ¶øKû¥?§qF—ÎKÛ·%èZmdçÂ*cÌàÎ90—æ8 _€ò7Þªµ6BK èY$5®6±Ê•i ·dƒ›¸ù’Õ †IGµ= ÐðÁª9‡C¨¶ñiþØâ8zš¦7ˆÁí š*ñ6¨ù|C³`)^ìaÏ2Œ<0@ïÓìùçBTW¡`*•EH=u¢=5>äçOKSûpœÇóÿ4Vª~‚„À¶_Âü½ÿ’³EZa®ìD¿U->©`Ư'Hé¨X ðœ6’ïHî Ð$aY é+ØFNîݬlüm…½èg0]ô@¤ùÜM.íPxÿŠº’ÇÆ½A€,ì±T€ ãÞI`dl±íÉì7I2®È2ÙÑP¤JÌ¢–…€LÌ;öé2hÔ”Nx©$1Øë®¨À9¯a¸è 4®ÛIcˆxÉš‹«C´;8åØêªÛ:EÛYlÒ±º_£7³èïÛHô§¨c~±vpTïo<±®®Âõé‹¿5’oWD4†× Äe6àÇH®3˜^¼¯ |âw/.Û<³¡ž—[{ìJu);DÑÎÿ¼P‹4ÛÍì <娅mi̬R9Ám嵋’ ´>ÃÑDª[ÁëbΖ¡—óˆpíдâ¾Gv2™µOw¢F »þ¶°-sù8é'>¢ª:r!ßfÑŠßzÎ,Œ;Y¹Ã/ o ÚF(:ãnKyÎÝÍÆù‚‡Ò‰R;4|Ý]¿H¯ñÑ¢•ëȈ"¤F¢Ù`ËÚâ´#òP3î¢h<¾2å ß/ÿ\üùÃÄ¡†ÿ ä±]çó™ Âû:¸ØÝnižÖ  ·‰pøJR;ƒ°äÃe£ù«¿£Ì#«êéWÔ ›ÂàßÿŒŠ¾á™X°&ò5W¹>7ö—Œª³øŠ,ñ6m±4y6:dŠ‘°†”hãÉKErü?F8„#õ›L+³'w×¾s3Ê8 ˜oÛ™;ê/«°Aøm]$Η!ÈŠk£!u\RZÏL›DO¸Ìá&S‘‡+¹ŸÐqÏ Èü}ÅÐu0I¦þâqÓ:³ÐÉï‡~¨’ëø–ŠƒnÙ¾ߨ‹$ZÍz2g­ÃÄ¡xË\)ýè,Y‰®s™ïßÙÂÈ»ÏÃ÷çe$ˆ&-AX6ôÔU8J³mÖmÝ;ðÐûpv Ál¨ _¶Â8øBÊOMò½öÔÃÍ›î©è]é^bŸ›ËȺ ”eø…j°]zÿv‹±àPÔì³Â²&1wÈiž¨>Óg¡>1Ž.oa,ñ¥2;”ŠbÁš:J[f“J¤`+*µРê陪ÜNR÷/æô©÷\-ϫȜéü5îê‹{ÜVÞ$ÄhýEøsáþ{Àø‚ø"PÏšɃ„£‡_I ›AE¨GÈ  íuu‰p}>Ô.ÜÖGç»wUëóGá% åÜ„€c/sÜQD+ÏpðX<¢)Úœ·.Ù­ªaUËÍj踚yõÃ9F*ØBÁP^ì†B| xÌÞƒ¨—ú‰Õ$æA:#xá"î3 X ç`þ_‘ØaÂwгeú/‚gvÿuû=I{L’i;ïY_3ØØ¿ëýêÔìÿõ˜‚Ú{Ù ¡!ãÉ¡r+¤¯Ø†.¿ù„B‹ ø¦Öt©ÅÉ5äN¬³¸Â¯7oÀZѵ=0sQ…-žÖñ5ÈõGD^É8iŒ™&3åÕ‰·¬¿ÎC O¬;s@”ÇMi|w‚U,‘V ºòñšèjz?f%…(FfŸ±)gÕjªh™»í=eÆëØ~–eð#¯O±[·Ê…भÓ°ÈÖÈq€?ØkÀ…qÚ· ©_(’}~ij{‰ïÓ ÆÐa–l[1P¿äÎr4¤»íúhžÈÑÃBs^Éß+c2è¯s Ú×yþ²ìey-fÄ¥½æ´‡cžaò þTªýk¿^VZôœ ëîJÃ&3ôòW=c,BœS¥;ÔÄO¯çÖhOk°àð\Ôj.¯‡Ö@¬p¢“Lwg¾æÙ»ÄÊ6é_óŸKtÃÿýb´qùN ÅÊ%sán¦06S3}éS ?vÙ1ÀRË5:'_ x'­‹å‚P$ùVY¯„«nˆÀl3l6†ÅBÍR%k>QÜO‹ˆ3´†…ðŽ‹0qß~$‘}Jål³qï*{ÜKM©€.LJzШMý*þ¯€¶Ÿô¨’K¡Ö8=ÐvsûÜuW£Ô”ï¨Z~BA{Vkû­g×}ÿ_7T|ÁU:œ·lä‚Ä~°J¥¤êìŒS_D*Á©ªÃáм9HXn¥m™µ–Ÿm¿öè{Cq€2 vz–e^ÜžÃ8˜3‡Áï³¾^®Åãsé6À¯øÀ,[rRb–:‹±îdÓ/à€,˜ Ó.•*ƒïÔ5‡s–ðÙp>„.PÇ}Ž>PAÃÊ»Xwh7ffùÞž€@[Ñ=ùEÜy¢ÄЋkÚØV’vãuë²Rì6[ P…@‚Õ¬¸ØuXimü…÷°eüûäª] jWÑ®”q•õ'´¸ü˜§=Þ5'bt^Þü‡¼KØ}j}ð…$V^` jÃInt¯.õéû0­ƒ ÆÝÊwð}Ê99>£´“Œ«î„o*š¬ÄRs¹ð'%IBB¯û8Pi?=¡EI¹C6ˆ€y»§O „e¾•‘,Ó¶W‘ïLa²±úÔøGbl;j̲.ï+~Dh!÷rÒØ1¾Xr²ÄTNÔÜW(Å”k¬¢ßL¿6Y‚URb'ÐÎî];_& ó¹åÌZLbõV*·‰Ž¿Z=·›J×V^Íä¬dpL¬Š/…ƒÆqÈÖMïLCquétž“ÐÔ -á‡w7ÅÞù…L…‚82[Û+¶âÇm=13¿ ±1SäfZ}I†ŸãÁatqèÚÍäía<¤¢±Þ‰Dƒ÷¡¼°ÕÿE ìüžJp¸m7£¾pj‹+ÑŠ w^‹§é‡auG Þ5¨³ŠæÅGµC‰ °NÍÏ?Þºýï/Û¥µõ"b3ÿõ÷¥¤7…bGHmOý¯­hÞIíø]K.)ø©¤ïµ(±ð‡¨±–Ø—ïõŸ¤ÀÞí¦Çæ+Íà%òöñ F›þ.ÃU4lºýtH%Lܲ꜈œ2­ïòܬššKDŠæMH1¦®îÝ*Kƒ|ožga¤"ö®ÔZ†®–ëF,å4.! ‡ŠLäîâ‹à=n”SEÃaã…¨ç …ÂïçÄàô÷çv  Ï×E®¯B;Ž­QN}¤"Ý× ùL?w(–°0é©î`·Ína Œäl>°ã_ñ¤pde­ãy š‡¶—üíÔ¸|”5 äA ·uŒÄ‹):Czs(~&•ƒ½¼tÀ©™¨’yª÷ !VÁ)€×¤Æ¤Ú誶´ö^F…c,£N‡«Âeé òÛõâ„j “ÅÏáúà—æNRÞLŽcòÖŸw¥G}|z.Úöh{Ýœ˜ðØ=þÍ­†Fo:W [Êå!ß8šÔڠÞ.ƒf÷ûÁÁFõ?ì!àZ4¹wœXןuWÖÄ|­ÀHJ«1-*Î^Á%U éÅâï 7.ˆö˰LÕç=#äð¶Oˆ¨Ïkƒ=4‚=fMÅ¿ ˜„DÓv˞ѣòƒG-âø¦/ã÷F#ÓÅõæÏ¯Jõ@èßöåÕ¶}w÷ieb?è AK‹)·FBœIÒý…)K~3 ±b~ ˜ºÂ¤0n‡¦z`·” ºËßö)”ÿÉxšå¦»‡æ gVˆv ëMØÂV$²9 wüªe„¸¾Ï½¯¸'"ïtI-„ÐŽOÙG¬ñìh®’\ŠázYA£ïÆÐ@²BNí¡R‡Ê)¢4Ìï±é#Ö.õ&w¨¬ÊLn\¬N² t+ _øO= (AÜåsLÜæ…è%yøÙù@÷±™%¯÷€öóeL/¼ñêtfÙ]‡Àša$¶â*¾QÀþp 'ýkâI{ªåt„Áù;›„ÆÚio`Ø8‚îñEé£?yËÝÝ2¬£rN†£Í¶W¿Þ%K"¸žã 4Cò,v: ºqšªd!­,É|Ô³óF‡îב¸]Vóв$ĹÚó¶¡îò¸+B¶:²:eÔ©¦]Â4 Æ‚7õ(•ØxlbžŸ0l}ü+ûã|{ÿÅ (ïªtX[8_»êI3Q fësÑC*GÇD®öÿª%É”6Tœ,Œ'œ&q+{-^ʪ lÇ*»›1Ì?ˆ÷·„ÐÎpÍO„Uë¶»ë<ÊÜ(››ìtçý†.¬üÉaVÚc-zäJtÀ¯ü±°ŸCá,÷¦,ÐRÒóÆa´J¢‰¶€’ñoäârUtO·ê{Ë}’F¬GÎ2ï¯ÿCÁƒŒ°UÄ€ró:LÎÆáÂå¹ê%3þ»‰ì?]ÄÙ€Š*óóM‚½‰t¤EP?î=PÛã^q Vlp„ýöï‡íü¿'Èw™ä" N(Fñœ‡¤SvÏZ[À›€O^™aÌ®{–`û7AŒfÆÓûúIûyón˜ínc»pMãWœUX—}­`Ñ•a¦Œ‹§JCÔøE„¸ÿÊ\ ž1¼÷d-ÒÕa¿-î4ƒÃ«#dØŸæ 0p¯àxZ¶rün(©AÉ ¡…²@aQ‹£'°!è²¾Õû’ô՘ܟĮò®,Dm1t{käÌ}QK©m–M¥nNTV¯Ú#‡â#½Àô× ;0oâdn<Ñûo£”í¯íŒ³ sXOËx )®þ1µ¦^âlüæ·,öÚX…·ã„<0øW<󞣽bA=`ø )ш©šC=g ýÕ¿ƒôRLÁù ˆP£¼ bÌx¥P;‡±5á•ó“r.jÑgÞ¿Õ H=Ü<ÚŽYª ü¶¼}1FÚ.XÃkãlå§ñÕÚ®‹‰éÉ<׌7 hIg9}À |$ CBY©ÞS™Áç¬ôIõHð|§Š0^ÆZc‰Àܬ…í>Þ.îšd‡ã¶*ši¸•9-K“ûp¤Ñ€{ z«ÂSeâã`w(›ßŒ¼]Ðvã˜+I‘JnW¿dØœ‹, ਴ø;ÖÑ‘]¼sÇtš˜ô-wØÈÈ7"Tܧ.†ùza1#*Zk7ïËNP¨ú?É«êî OYþzkZÅÑÏïpßÁ)9Ÿu]óN´ aÖÒ’g´ñÊÈ'ÊôÁØ>B¶Ø¨U¥É€ù‡rß}Úcîç“Ñ%°iVÒ° ¨žw»3™ñ\l6âFà–Žp4…„º¹›ª/Puçƒbù›ýò™=¡ F% ÅUœ¾iîåíhDØIh§ácÒ\ß¡)…Y¬B -:äþoúdFèùq—éP#çÚˆŸúÜA4ž®b¦)X¸ŒJR…Eس;W#Q&"6€ÈÁÆ®«Ác_ ›Áˬø 7abzŸ†£²a{Yø/´JÆå£ÙÐD v4ú@&\Š¡Tø þÎmÿ-BÆÚ›Ãc¬ç™7‚i ÅÄúÆô.äJú èäóé†WB77Hû¤~x´,:ÒÀPð]Oú uS¾Aqµº¡ !’xÑç'WJ ©ÝJ.c_ØCPY`–2ËêK™ø2â™`칫Œ±/ŒÏ\‹‚­×ÕNýØ•Cèùùý£?H¡h¤œªð©ñŠSÿÇ(ïDvØà.-‹ÈÇ/"=sÿ¸´íe:%öýkL_©Ž«P^pÏB|eÈ ~-k¯A)aóöžE a+¡‘Vtã"[ùkT둆qv|éKS…OÂû˜+åU|à<™Ä ±„kj·Nf;–ïS 0úæŽÅ-"}]`‰Ô^€©¸îVC³è*ÕÛñ“áÛ0å®Gùq3qX€0þË=¾ ÁÌ¥,T'Öm†.Ð…o ð$ŽGQ;­ÄÄ'‘<‹²uÙí*‘‹p=D¶ºÑÞW|Œ—n^!ͨÐÄz' ÎæóÅT+§‰èB‡AhqÊ´??ÝJŒÚöuôTh"W`=_ÜnÉw °øINð~œ®w2â¨Òwþ§$˜EÓÜäfR±2I‘u¨û~ú1¢&ûÕVî«ãnº“{»–”1à”]Å-Vîðþ·é¾7MjG”…q¡ÜžùgÛ»K‰+„DÕà$µæ€FéÁÎa%z•ß䌩HØW ÔýaL>)'$Ò=åð@­?ù%Ý)eáK·Œ4ÆC¦µÙŸâTŠ#C =,“ÑÑÖM?ãõ³®?ĨfNØEÇ":øÊÝûø ¬ËZò½[‹>Ô÷€à`¤x7€5bÿ¡f¾[|fЬQ?AêšHþ,ÛÓBÚ`—‚´æz‡?L>%ø¹­ÍuQDG-/ô[â3î4V–3,••à½X×tÝqGõ$Ñ0¿ÊEqÿ*°Ë} ¢¸(å_eG/¹~ކ›3Æ#‚r%ŒŠˆ—‰V1î ¾QÕ»ˆ´á ½%w¾@Qš¤¼(.]”%¶ð“|2qëÝ`4Ò¥C#¨!ÞÔ¬oÁ4þãH6â¼+”,§RTHÛ QóÑñŸŽ÷¯Ó* æhÈ4~Ö›—2),œ—âµø2ϧöŒô”L´ŸÜIù[ÆÑY] "x¶<´÷í¤!Õˆèê…‚ì¢aéÀ-Mºo0ÔsÛ‡M¿;>QáÞ¬sÛ Ú@’}:&íþ¡Rs\Ýá£;Á”A?è ð.ÏæíN,ÆXw8‘RÏ*^`LÂ#¸2¤Æ¶ZðšËjËÒy€dWŒž¥é  KÜÛxö^ÍpR¼6Kw ÅTöô†M·®æjb€H×F65a©hÊ–â˜2%JÓ‹óŠn‰¤´#¨™©†·Q”PŠŠ=öÛïà I»¼{rà4ÉkŒÎÊ­!7âjpe†CªU?ݬ|·CÁ°¤®!¨ùük#eóÖ#±—g+ëSZSúVßI£«•±LðmNwÉ¢„µùê¯ØÉ5}b à’°î'-Ó·NN YŒ¬³œCǡÌsìd $$¶‘¯dîS“ÿΤs‡tF8Ç ;¥@àòÝ‹÷Võ5aäÖå´wTPØ/l"¦‹16&[ZÄ)ƒVªÕÌÔɤ!I…-«ÞÒýÙÍó®HSóE=KB1™WN•>QýÊóäXGÏÐö–†“@à¿ÈëýÊÏ%\!çØB‡ ãÉÓŽ†Žã‹6kõöÿ­M‚çϺü '-|MvÅ)î*¬šøÚ6/Ö22%å»÷ˆ’eÊ·ÌÂÌû ©ði®ç&X°UŸôœ‹@Óõj-"àÙñ£=fè–ýD8õê\a˜tèf¾ g_×Qzq^8Ï(ë7ÓN¦¿¦Ô%Ëi’P*Hn+ȼG(crìS>/¥ØP ™¹œ)ë±]68ÿ$ Æ®Ù=ЭiFâÑ!ñ.1‡Ù%$X»¯8-u£jm½óY{Ã^v<^¥‘Þ¬þ¤'[“l½4É)6go{ÇíïÛ šïz‘'˾š#£ŸÜðL ®r«–C‡}:8¥Þ]{?÷[›XzVàÚÇ+â~ó?ÜÒÀ­{0¦9C®Û'õüÊ[÷®‚NA ý6Nx'øV×û'+ZD†uXuU¢º (*AÐÝ÷ew‘v"0¯Ð×[oôBèg\ŸÐð>ľSÍïîÞd‰²ºÉ^S>Õ®“ËB¢ÕÛ„ë;•$¡ ÄTÇ-‘VñàQ¢Ë -þXàI¡¥—®k¤Øù‡&¹Ép0†­8À'õAûôÌý—ÆfO:ÜÝ=ŠŸŒVŒ pÅàÈm}0lU Êý™×£JI7•º œ¿ùãg^êÿ –¤ªïÜP§Ì{-“Úè(Îð©w•ÃWûNY;Ú/ºÒkˆT-6±µ—.$³šÐ¾ýòê…À`=¾F×Zæ(ºèc ¾´¬¿PõxÙ¼w$»™¦ü·æiQ®Ù#†8²‰O¿­ ‰n2iîtÔ±ÖNU¡UgpÔ±röŸžÇ rðoÊé»)ÈöÎéa–±êxnÉ2·ã¢’ïÐÐhðÊçb†ì{iLÐd,·%äǪì­¢séíÿøéò“)£nGëÙv™¿D©ß®¿Ü+:Àj9ªïó|°¤­ˆÍHBÇø‡{jd÷C9S}Ǹp—º•)ÝkaÇÒõ15ÿ¿â†Bà ±1 s»—©TTƒÈ¹‚QÖ{f°±œý†„Énö¬¹\ O¥a»FS\ô%Q!ŸÅË­¯Ø•N·Óî”rÇg²<¢óáI®íh¨~«gä¢ Äç¶½äôVû"Å`XW–*"Isq*Iìl¨*ç¦á å·•ŸuŸ—íÜC1NBz¾øx Œ“ -ë_D–º&/Ìq)Ýxp´Ö$åÑhõÅ©ÝTᑪ;®@j.ÌÀ/Õë5Á“ùÃÀâN,›ÎáÀç šmîÛÿÜO%t²vÊÐ à5 Oð"[‰å¯~ù>eC¼?zû1gj¾§¦l®qwµÂÊ@]QY¤Ô*»Â8ú<³sÎf{›œ*¾Ž‡L|Ë„s5ž‡ yá’O½žCEŠ3(<*Ù‘Të‘£zS ëx ªf¼œå D<Û¹ÖëÉ%c&K˜´„MÉ)x†C?Ótl>eÝ ¢‚Ï€¼¿¨†õ/¬×øR©\•“ƒqù““Ã$¢ØG éèóHD)6Ã,Õˆ€¯Ü‘HK©Ó¿:ºª¸ ¡jh”l†Ê \…ß³(²½¬¿Ãe*õ¸Ih l‡˜•éÏàjY:Bu‰Ò’i‘gõ{-fnpŸ»ÓDÞ¥« ‰žÔ·Å3`+ûÛ?Û÷ý¬p¼Í&ËŽ©aâržÃ…Sú†óiÔvÙ<‘7â%J°Îo‚®šHï]9®žÂÖ ‡÷V+z[³íÁÉŽyH[”¶3Üs§h6­£ñBZ­ ø tÎ’ÞÏÁ “úxé;r¿7)„?IïH­0÷S^¯‘ÄQD¯ÄÎÔç á:U[M…Ó€nÀìþ¥ÿ&¤Ö¥µàrŠtŒ| chèó> û)‘ÉzÏ>nlQèË=eà,RKçzéQ6\hTðð,«LŸ~¸„\}vªÄ&$Â¥¹%nå2¥‡,³§Ž³Î·˜ŠlEÎô}sÕhXÍáiD…´¤²?ŠhYüÚTÌq…‘ûÖ§Jµ‰á|rô7%Eg7>;ŒèÍüåýÕ‹0¨*™¯@cÀò5zpôÄPôk.ú«3 š†à® ^§‰Sìq¤M™jª4¨šaï(Ë” ÂÝÄܶ&³_ù¤™EÝ—ÕçºßrpMcË™e« ’ Œ\–f ª¨S\ÆŠ“S’ô‡E¡-Nìʺç‚ÄfîBçøÝJ=U>ÇÈøJrfoû‹–å¥êÓA>¯pz#º°Ûõƒ¾Áv’‡Â­TðÑ.€hD1;¿!âØËfTºì†JWE¸rÿ¡Ï¬¬ÑG«m9KŸU‚”@ŸQñؘ:ìãî Z¥f~7C¯ÞŠcî}ex“ÁöV|OÒÿ×€#ŽD϶†šéA>¸Üº#›hi£ Î†ýЛ*›"™Ž«’]p–ý+ÃhpÝgðî#¶iOþ—Cy Ì}úrŽ3oSÃ?àgÇeì¨"—²¤ÃbÏY}ðɨwó–E¼;À´¾ã× é w«º«òŠ ’¯Lè/°î¼›e¡<â>zcU^ÜùˆñÑÖ¤Uà° ©.]#dPMyüMõŸn¶¡ÊoÉqKZLÐ[á^•,€Öªk¯0u©- Z³‘ªô·ìWºÌ"f¢…déø Ü†Ltqçá3»&Ey$A4\Õ>-W×äJ •õxâê‘i€(>b ]âihj‘Ϧ¾é¦øÜ ³C#…[›UÓˆ[@Q Ö]]ijx}™RVmÿ°°­Ožn™s¸ñ¬š%nGFN;‘»5õ¶à²Í¥›q>—©GW_ô¢÷§/ý&9óäÖØÅ¡È·\ƒL]`­¾k!«L½Ñ³c—‰bãóÁ sF¨ùCnÕƒ#<ס1„ɘy\¶¨% ÌûœìÈ>µ.€ô²ºþË2â˳äxRÄS”¹'g‚æ+X›A£ž÷þ«ÏgÞIœo¶²êO ™Ó0pÇUq»úqK`Ë£×4‡½¹S«I|ýgx@–Àëͧ³Uûóy…+­zV®7Š6]BQMð´Ïímàú°^\WxŸh³»)ȧ•mÌT’Õkˆ Cª¥M¾û‡˜f…"´eB™&;­KƆ˜’ò"0Kô#ù²·p‚ÏëWÔô&Zêâ"ò(¬N^™<®¼N !ŠŸ 54ê9 Z:,~¥ëÂ4¯ŽL&FÒ0 ⽘õYðòÓ¦f·u¤ó0WñëäGwªÒµUf½ò*Øý=FâÁ‰\ú û•\ÁGìÚÝ™ .W/Ð6û™ò²z½d {måPÆÅ$¬óíƒFÿy8Œ~qÕ}ŒMûëGÝÑñTM£'w^Õfð:KŸ1z_=ˆQïoâ »ÅÕÅ„^ Ì=QïºY‡k’/‡D `\÷§ÃÆ¥4«³Á ìÐ,`|Hš_3€wÎVsuà‘¿‡P+žà©ã [„Ñ¥ÁAöD‹pûNË@ª‘~©q·ž±ÂVO¹½øF‡Øü|š?,{°É+®ÀaKªsê>ŒÀ°óRÁŸ4Ó5F±žðáÖ7ý$PU‡~¨[(Ñ>‡Zš§Íß/î_Â+Ù±ëH³dn•lš›ékiŽ¡UÓÊE²zÏ "ž­Ÿ ´ !OkÕ–š¶ôVÚVI²©Æå8–n{Úžü@;^Eí#"¥"èUV´qòq¨¸k²ßŽ„xÕåFY‹ÈÈ#ñ !H»Ãtu†yÈt™f^‹ÚƒóX OtŽb3F§’cê‘ר ò$nôÒå"ZxÉ?…âÌã 9É…*÷$gKß}£YÀyqÿv!ÑXX‘ÿuÁ8_ k¢ÒìS{Ÿðã|Þ5@žFrðÅA|‰“ñÁµ{ö—5—Æ`o´e2(j2#²³blf™Äìr5é]c ðLå[æ{—Õ¬ø\pZz%`¹4 Q¨†¬bc1…”¶Â`øÝ¹ýt=ò§§Ô÷k·–´x–&÷|§ÖNo/£÷ $¤hÎ ÜhG›ï½:B¸8ç²PÇáÜãµÙu[]¸š@*ι˜õ+ ¥þ{³ŽoGVçÉ0W0ÎñXTm€ä&z_Ã^õ;¯“8ù eÈSÁ7&¤~•ëÜŠ#­›Aa@§nö2·]:º~tLŸôQž]¦,!Ð`_É/-ÇlªöÀ@’gÚHÌT>~ å¡;Ó L8ÄB•_²B‡Ýêr‘yp¡i4Œ%ûaÓž ù‚FXÝxóç_lwÞ+fÔô†«ißQyå×m°œqð¼aö‡5éŒût`²wuÞ^«¯â@›œÊÆtÓ“äɃ׻ñlÌ™ýC¨½ŒrÊw÷<1òNPÈÞÓaÍ…˜Ós :ód[ÍÈ(¬sgP\»³Læ–²ûÄ´1d…Ojê§`s£7šæ„;ŒIw¸ ;°EÐë}ïâ|Ƚ ¾ú¢ÐK|ÚÔÂÙÿvA\Á 1GjM.Ýa ‹g)p &aH?“ ttd YÁ®ë´jˆˆGß[ñ²Æ‡8Ìn$ë ñÈwcäYyåµhá?Á“E2?ROØHÑîÜ+\ãìætýš]é3 ëHGÄá•w éd‚yÚç8 Ư×#¹…ŸÕðîU¼Ô%÷k/ÍÚû“opàÒ,΋¼_ø¢àyn!׆òû{ädgl1cÅBÕP“uÇ  Íê[©·Z4¯º~°9êo‚nƒš(žÆfYßCw¾,f¹€…OóÃg$:Pʹ̡üòó–*Ëp‡®Ã„n*ýßµ«;bjç¬ß`öÉ %SïI7Ò3h^ }ht4Âóàì~3]$HÄc–¿A`æêàA²^¨>‹g²+6m*¨®¶]Æà·‰bµ‰ã>²°)½D¶­~ôà_l§uI'…Œ©Œ]úÍ©.L ¯+NÂ4tPÒŽ†°¶•â*dšÕêÌ{v uCÚÒœ¡i"Ç:–—ŽÅC=ADêm»ôQ4%\/P£·G$"!Ç1r@ˆ²h§Dîslª‚ž˜Ë¢ß'PvLC\F؉'9>Z˃ îC@2çë…‰§…bS†©¿Ñ¤(4Ï%9ËPóê_ÈUÙ½+L.öæÙ¨Ë} hÇU®«œzˆ>“œ=¸ÁXrpÑ/¹7­Qj{ÝV×ßÎÚ­q/»|ÄMFL¢Ð÷\½Ü#¦`.jºH9!â?2‚i«ÉŠù314…Ÿ©²ævWä™…).Ao€Z—v‰˜˜¼‘ýxçS‘±–ÚÉÓ,ÙŸAlZ¸7ðˆ¬UCžzŒÃ*é#Ñç|»Ý®€ä ð3 ÁWtN2¦QÛ^îô%ê ;`dÕÓ}l÷Žý{Ž×ïx/ÒòR0ÄŽ3ï´<mA¢õÓ…N öþý!ãÆ;ó{¤ÇF÷½wàe1€hc•%ná>3êG§=鎇‡0k¬Gr%«ˆ,°Y™õ™ŒöÚ |V÷g¤iÎ[ æ¦<h<ÕýgN뱦ǦÕñUhÉK<) “'2ñ8¸·ò%-d•ñae[Õnƒ@G@oä¬ë«‘¹ËZËÖå͇(¤"Ò"Û}n8Áçç§h`ž @ó¸èz›[¼w… ]9 %«¢Ì¸j®{ÇæQºHg»BŠg²¾P¤ËZžq 7cJÊ9d±t6e†ò]÷Ã'Ýs\ðä <¦Ðy-’×××’wÙáŠ~e„”•ጓz8e›? WT,xç6§ò‰¶³éPwöOiÃ垆²;›­³ö¨}††ñŠà#°ýx*“Ž5–AËÏ7$é«a>¿w¹ÿáШ ÍÈz~M¶Ìj5‡5¾=°®¹=Ç=èÊ-´€ä1¾û´v³% ›¢[çæCçòØ2b8_ï8¢VpÈ”qµKM©€'Uƒ½Œçìj«ujA@òâûÄT`i â4¨zÜxI²ÁÚJܘî1lÇ&† «Z)wD;«â@_)UPâ´Y î}TMÑæi»îË@AÛü÷aæ\5V~¦‹HÔÜÎêÊûEv DkigªôJs0 ÉûËqÞèêZ¤ÏÄ_$Wƽê ùɆ¥/©Ó½,HëCªÿwŸ·îª Z»Þw4 åƒL:k¯^£bR¯†+­ß©%!|«L½,1%œ, †x¨åô~;ÎZb2žm˜a'¿`!EÜnë*ù|t¼·‹±†}¬~—¯ F—Ý*¼îò&¸y¼2ç6¼š>ûÔ/sÕ¬n‹;±½J]„9cˆ¾dh–!ì Þ{æu7”ἂó¨a|·o:7<‘ÿbÓaÛ .ª(ƒñ·þšýX•â`¼ôgÆÅfs<6/Aa]W2¨Ã¹Úlï|Œ†°Œ],Gr@ñÔeÖý”Æé¯p±K¶Qük¤Ò¢ä »j ]FQô£yjt~bK <^š]®øB†e~7‹rƒ,(ÇãcÅòšÛ]ž§&Žû@Y·ú/4[¬žIp€¾ôÜn24Yâ6g0p× "a³Ô~¡½sZB’9û‰$0&¢±9”•zŠ­úT™Ál®ßgüÁxœÒ ¢õYÙv|u¦—zݽ´tœ1IÎ~QÜæ‰AÍ+jìÊ/²EZÏëáõË殪ÆÓ•}V iØ,îãú4i±C\jõ¢È…à¹íÁ]-¹ß\Ñ €/×—­¤ùãD ꩌÍ|þUU¹)8;ÝÜGÒjÆ€€ð‘–£½UÐӒ̨Fd6€@ fxáüHqà GF“ûÏ›î‚ñli¦OèÝa"8oÍ€õüZ˜\þ !¢ÿ¬›n÷sä7…øæ“á,q´æÈk×\ïð·u}~ô’û‡¢¢òb"<®—Ÿ>¨Þ*ì#µE¢ Þ*@e×(ëM®Ä—"ïqŒohéÇvÈ Ø­?YBpV.s­-k‡ÝÖmÜÕa¶§Ë² aÙªÿködŽ*b¶XˉwÑbDÉœD}ØäùCLl™)(줟¹LØšOç®3EOпBŰš5TÔk4Ôg=§ PÐ…7^ò¯Pt2‡ÐíÕ9/Õ('ÝfN¯F×´ð#QôI~ãØíõx„¢d“õ?sIoR¾ o‘{Wñ¢nYç·5ãuß o¤ÓVEHù>µ†·É£™¯¡\öå×!æ%¿ KN‘Yر ­!ù– m¢­$ð)ò¤ÜFsH½v¦$™¹º5ë0®|-7>§QsìMx­s-AÍIïÅóJ6מOAkSœe,÷y3„CY8ýÙ®ûÒq"M¥À¥ŠÄ)ÜÕ[÷F´è.ùÚ*Õl´? µ)¬·ý“ 0;ኈB€] õR 磶sd»1“°fAº ª7ù$›ølj>,Ñq„ ó î¼ÚD…ë/áÓ5}jy]JÑ´lÞ/¬+_¼w º¹e¦¨ÙÊü{i¬Î¦ÚUxú–±Tñ'z‰@ÜŸ^޲™Üé^/5pKá—Z!~ ©·Za¤?¬':ý;¸XMÅ܆`榮a:;¯ŒªcAÀëVŒ°„Ʋ$ð©‘ÿ±Cœ VñN†¾Øóœ›ê®úޝRrŒWO—ˆŸË—M<¯ß°bèôhÔE‘:öKÝSãK n þ~ø±'Ø!Æàn~Sõà°ØFvœ·o„E ‰þ`ÃÜ4œ-·‡êzƽf‰ÓÚqçZQÞ· G«>Ðå=þ¬èuSëd8'JL2Ép,ÔD2U¤ÿ|ጽ+§IPØ(òÿQÐ ¹wÕŠ{HâÑÖ;Šº¿#Ã2U6-ur\?.HÕ<1ãÄ’†’…Ao?ü±¨[|º1¨§±–UŒº k¬ùºqhõaþq^Ö;ÃÈHÉo—ã–Ž¨NÓp9.Ó ]ÇC _lûë"%sÖÛó=çÈDª¥ï{^ûR å’Ž<éR”JŒpV| _Äu5˜˜´VËP¥ckT%¿ ¾§sà,w‰[==¨/î&œƒöÉ¡þ÷}°¬IdK‚œZv›œ±‹Kð£FãÀBßôÞÍ:ï»ê•8"”¥^!‰ ëà„ñ滋]À~:W)pñà$ô\`´9KOLŠì bD›7& ˆbé?ÙFµ¡¥fåÚËK$®!Ó™9Åâ!*tKuf5ëƒÃvKiÙÎÛÇú†Ŧ{Êšc´Ü,žéöw8Ó}z‚¶aãòŸ!–™¶d¿,V]¢i^¥Ÿ{R÷ܾléo¸Ÿ¢ÈZ²B‡ÓPÂÐîÛ‰W±”úÒ!†HQs¤$)W±©\µMý¥ˆ± 6ÄœYÑ•ìÄnpèWüäeF,Óg>=°-@w4aýÓ^Ž¿ý=´µìAvÛ='L‘:2,(ˆ^éùmĸ…áVy\›¸Óyu—Ç- èîõÓWÞ“”Ž%û²Ñ}©WZ"/ž˜ýzM}ó¯êé²^É¡rìDbƒþÕìSX›4{¾–·¡Ö«šÀÁZ‰µïdVcH–ß§&5,{1và¸ö»ÞßÕsùšwBc×a¨ŠÑžËŸOâ!x…ƃ> `]cè¤ (vTBÿÐ?«fÖCTqåñœž(€/Q»WBùèÇ–v6©!úV.]ä¬õнƒ-¶·ë’Z3ØcPÊ Ð–Ÿ…\oÝ>PÎ#˜2nkO:à- ß{Ç”Z½¯ìŠ)qb5®øy€X2Õ(62N²¥2jG}¹Æ_èq $Zo„È3¦´ôÐø×å®Ö_pj ûâ 6Öx1i©‚‰¿Ãæ¨a”Zˆ‘k5æk¦E÷#gO•Ç\íðy* ‘Trðº×ÎFÝõŽCfŒnn/W©Üc(c±#iöŸ—_(lãfì[¡ÞqCw¼›ôòÅÍâû¢:Âðß×êÒ€ÌÑöª›wkn9)«ˆ4‚Ð!l\çÍUHáìʃ¶¥m0ç¸åœ|zñŠý„)ÙæÛ©p1kc¥1r´ÞÔÂû¦¶Št³YNq–Î¥ré_¤ÓuvÞ†kžŒPQÏæ HA dB‹€°ÕA”4vëÚ¶¨+ì3fÌ"§"˜‹T1ó›Òö$ÑÔè8P•êùÜ‘)+ÝÃy#Óõ{°Ýó\'˜wWΓ߄™¨;íŸÓëç[;;ó}pàUÛ 9é|Cµ l®šÑôqu"*éj þÖ^˜°ÈrEÝþø„Ft,QÚ·½CÑ]¤Aüo@Ólõ¾­BÕý2™Ø:ÑNfÎ8gˆA‰ëcXëÿúy•([ŸžÔ)!ñ”ä.S†ºg¾AÞ í™á•P1°EAòšW°äE­ •ïLn“AŠç´ùhhãØë& /{oQ‰iý4¤rVË5DÌSOpõ~ŸÚ‰ª²ãØÌYL¬”‹J¦U¥b0„ϸZó'Z@}° ˜1¹\à`ƒñ+Çg%Öo×G5 >X± ?8>È8¦$ö«ùRZõ öd×ʤ-Äch‘qgmÒGÒøY`gì;ao;&îxÅI‰þ(Vžœ¿¦{“ÉPa¬çDªÃÂ8BRªÝˆ´å aÃ飯øQ¿;âÝÊa¬é;ùÌyušõPüµL¬TSJÉ‚Ðû8gŽ•óíJ3ð·ìä­ Õ£ ÁšVk5 éž¡Ø·Ò®…3”²®Ê*‡ójµ ËG8 ³ |wªG„„ÿ÷˜IjZÙ@ÓÖV¹1wÕøpqCQÌ¢¡á‰‘æ¡‚"¢ŒCÑÛ8£ë4þZC ¨áªÚ›Æh›>5D®»‰Ó”Z¹ÈÙK¨åñ߳Β®]¡¹hæVrä•‚;„Ã$¾åùò÷H\ ‰üŒa¥aß¾dú(ÛÇo†tÉ¢ÓC¥ùó{ ›¤Ù¸–½hˆçNÍžs;­¹‹/öéœÆÌtÃåô Š*R´ ¸Ä8àiÁ5-³q.¬÷ ªIŒ3¥»;¢}[›9ª 1]J,vh!Y}] |¨k¡U©bùœDþ¸Ùb/NÒÐbÓ˜Dç¦êÏ~í) F6O‰Á Öo_¡á’–fT¹"½ ²”—êB„±Ë˜<}ìh½<ç˜P`iòJd*×ßÀ)’ÔŽ‡ß!OƒNl¦@ÃU…’ý<˜ƒ3w¬¯k¾„£]Ó»9¶Þ¾‡]&W*^Òî&ÏØÆ‡ß7‹â• ¯*X°ÇüX¨üÕíL$ex°Ë>l±Ï$i©Õu â!¼ïÓ²D`V^jt0_éz9¦˜oÆKJC1íòÇÅËí5â'€¸é°Åˆj•»âÜÒ7,òöSà«”+¥KÏØýào‰¡¢ÂüìZ (n#ïüâ¹´Q£/‚ê` nŸ|–å ù‹¨8íã{'ÐÎïáÜC*.ÛWt¢_.aÿµƒwÇ:þÒmãB? Ø·–\„t?äƒô-í’¹µJ¾ çm/»˜­„™fa£5EŽïú£^Xƒ? –Ì\Zf§×_‚w •Z‚e3~¢¹…Іš³ARŸ)2ºÒno†D <^­x­!ÇH;ªèj‰É#϶—!#4¢Žƒw/o/äq§Yÿy»€-0¥zë.&5Î8ð—´fö*ó}“ª@ÔÇò%Û7.®•?Y&=«Â¼ ®ífh ¸·6„Dz.<é4å±.&í‘õ#[¶-eÆR»›; ð§…ŸšçfnH¸â;ãý¡?ÀÒl|œÖ@påÛ6éMŽqÛH«”CçZ™ÀªX¦}ë¬^kŸ"ý7%Fšè yn©ÒTÀ]OlZ­NRù¿‰¤ ƒæ×NÚ/;ù ¨¨:9Ç« ´•ßfôì{Ü£V)¾ˆ6kRéšhˆ¨Ÿ©”ÉlÓØ«‚@áýHÁÂÖ!:¼LmÙ‹õŽ=ˆûÎ'o ´´tüܬrifäcsH çvZ„CµµÄŽï4\“C)•AKös€•ÁR¬r©Ì"KZ·\2G¶–ÇB/3”äñV\±/Õ€ù÷K˜îҮ̳+_ÿBß9ÙG¶âXßb"¸…ùuU˜+X‘b Ž÷° ñ³« ßmab„FàÉ4( Ϭ"s‚¹HôIž¼ù¨hÚÓVáð9Ž[¼;m1£ˆÕ;þ³‡kìûVîñ8Ï'Uu84|ƒ ‘¿7²ÒE' -›¤7œ ¥µ“u€±Ä4¡÷!ïºP¡ç-‡hÜS®ÓÛÏ Ö·®Š+eù ± •iÄŸ|ÅBtŒÚIËtb7,XN ò c¥õ†°¡%ó®©(É–Kóžt>?µ°™\Ϻ Àë`lÖ÷IêšO·rP¦#øFÁG@t¾6ÖŒÂèÒ=ÓÊÁüÍ´›×õ¤CbÞö3±ñå]n;šxŒîÚ°6&»EH®ÏT:ܰ#1â•ÅD¾cN#ÁÏÉyн¶K)ÉBÎRøT¬/Ûw/®¡“ªÄFZ.â xÎÙ:½]Ѩæ4_kÃ`c¥Öúý¤Sœ€dÏe@z9Ì”£¶`ŠuàØÓûnàH#‹Fß/èi“Á­+¦2Qqp‰’3àw•„?ˆ{ذ—ãt_Ápêî½gÛa0ŠM‰ˆ|&?‘Ê+3Ö‚í/^« uIœ@&sMböÜCß׃æªízäi<Ê1cSFa>ÅÑHVkäu*8€™Á“ç˜~Æü—.{ÜàÌ îšbödÆ6PìCöƒÞ/òHŠ ~~°HQÆ:ûº Z8`PÔ8ž ,ËìðÑíìºÓWVμíÿÈÕÊ ­Ñø(Ü 9ÆÀ-"€Ü7[³á|lõP•hÃû•¾Æ÷.Êë‹Me ÜàU‚t6àÍÒ¨Îñ+§‘øÇýî¨ Òa¥ãÔwo 4]÷ê’æÄ/çUžJű'xþ«ÚD»aç’ Ç0² dpkNØi_y!4`Vý–öòïEi/ RC~00IºÔ‡÷Ý„‚ûütýv%âb>›ßña ;93®§zfQÜ÷ý‘ÅÝÝôúàç™ã-–Œ˜ÔpÕÌXö¸Ä[êíDØ‘€Â –Ù2Þ åI½µ™'ËšM6ˆÒ4þE˜>CÉ»²"Ê9µ¯þ·\•!³ž8V½ŽÒ1ͨ¶ÍÈlù<ëï÷ â>@Ïu¡‰¥–ä™"%N™ƒ@ÏÙ)ð¬óÂÈRIúU"Cçßš­×9òDŒ·(R’Á*‰;¯ÄçÀ‹¬nc¬Õp—1ÕÔr&ÿ´‹I1¢m!´›þöî’@@ËЪbŽÇ>Î\0Þ<ŽÐÂ\¾çØÕˆòè í«€Oa'}ÝÄ©›$~UÕ¨nÏš¦Iý;Ëê;8¹ÆG‰7œI©Ü8èEŠè²S+P|˜òáö·bã¯ÂlW@BØë`ß—þòC¿üo´Ž³¹pŒ™× †Ò-FÑwÊiõ 𩃯v [ÁÙŸ ÂÊ‹TÞ+ì3­€. ¥ò…¸Pô(wýC¶ÇzµÈÕÄ2ˆ› Û¼r ®ßÞœbxjöd«!JOÎ `ï71Ê SéBÛ–Ió;íд<Û@˜'ë¯%íÑ­Tqè©~[טÛp ÚUîÓOAƹÖÈM¾ zqwêß™ÃXb¨G[,ÒÀÊ+N/å˜"`?ø Ûy‹X­§6ͼ%%·ÕŠUH†Ðj%œv]ÂÀ©-ºjT1ß p8‚¦ ÅhÉ^)Š<¬¾JWU6ç3ù麓j ·WˆÈv8G5@¼¿Ä>µ¿ŒP7í©ÒÈ›<Ÿ´#~>O(@«>FFáÇÆWùâìQttSº$0ÉtëÒ~ëñ|»u2ònzÆÊxÿ—{\¥&ˆw]꪿ޱP½Ø®“*’ëÁÎ ±LrSc"Ýé=ØGV€û TÊêïöv*ÉlX¸1Î>*¿7Š5 ZRª×÷(;ð·"Ù" ‹}al›X^„ƒÿ5ê =Bò¨ßϾ.–®î£X!~ò=š¡þ½ØðÖ¯(/–‚+/Š"Óz%ûǨ€Î1¢ôz›Ù¸ýq8T¨ß¼WôÐç{îÕ¥¬¿Ü›udX'4Š„«†`Pƒ4%‡˜•øÙOÓ~ `”Ù¶jî9ŸÂª ä‹¸IXטR +ÎçMK‰#‡Žx.ízZ ~kW_¡µAÙä) È--/£%›ç²¬ÿvGž·?&àŠœ ™u‘ÃÏ‚‚‰"0.Ì¿’øò«žv Q‚kßÉ:%O–ÜJÑíK[•ø] æŽhA7έÕh/ÑYäY‘½ ɼd5ŸMöé=ΟÊ|øçà š&kÛzÃ,2Ò­ö|Ò3$—âwtŠ­;ºQŸÖà[Xö©H1FdY鎵Ž÷s6EÆSýrý0ã;ÏHbl#3ÌÃ<4î-Y%ázópŸõ ¼„M"ý¸6s‚—äµ’QÀc…GŒÌ|+J5j$ŸŠµ±¸ºt- ½„)¤h‚׿2ÐøÒ!Îà5p¤èI^èJ îoÁL™£·ß KXò¨§êuÊœ{È~“ˆv›G]P´e"ËÈè¸IGˆ{ç"‹ù¼6ÕH¢ô²ÆI‚SÙö3“p3Ò*Éý_Ë­ëÆ¦·üôõ‡Šváæð„χ TÅ}¥Þîg*´ŽŸ·¤1ð'Ríåø%¼‹/CU3ˆµ„3uI,$ì²zº’ýÉ÷eMb:–ÑÞ¾ûË¤š¹§¸1>¯ÈÕº®kÐ$Yã¡Ó` ’ Úè ÓŠ¯½Ž˜Ÿˆ+oúfÜÀpº” -Š XBP¦N#02½^øBü‚z¡00q&ÊO¸ŵìÔã¢Á¬*j—úbn¬OÔP\fè1`ð>ñè<» J³vï¦^<å›k66ܽ1°c‹v –\v‹¨D`ÑYö´å\qçKSåøáU ›mW忌.ÊáUl¥OCbmÌ©2S‡?=òÇ—£"´=²õð©ûM*C&]± 3®ØAùoÙ;¦œ´ý„[3+«wATZ¹wšõ–F3&;Z»O(²$ˆc檪Dš¼C+©HìpìíŒÑYM÷¸QˆYÀL•f§V©×Z~­vÖ\Ÿô Ž|­±žáØp!ÑsÕÚ{w&¢Œ|èåØëG¶füוv¡¶l`åæF÷džX`ÄåÝ96Z)3OŒ­[‹©@cäWòÿSÓ/ŠMä—F˜zdÙê¶¢,Ò¡gÆâ_ö¢&ˆ©d©ÍÙp]­”¢ù“æl™(yÛ 2¯ÈÒ©&P¶¬ãv±VÜ{C˜ÿ³¤ÊºN ŒYÖËs^™cTA‹9©øª¸fNi%ãàäøþhpjÃÔý¦ ¤t ¶ðÖ>;OÒ ò+Xvú@e›r%vS_dä¨]¾WšçäÐyø>WÜSábçãzG±[°ƒÏŒ¢‚‡¡FF)F2á¤ô¥Ö`W>[жˆiÃa/6Än…YÄMn}.ôªÄS¾L!ÀQýéTßü˜¸˜­q£ˆè:%v¨ë!è÷5Î'îâãǦm·#¶Ëü%6]äsd¿Œ\Ük€!àüömâ„$.¹´Žëö?f™áð¶,—dÞªúÉ¬ÜµÔæ­ßéùw§CqÀ°vL^(/ðûo½7€H1ÜØ™Ü,X-”.÷P—Cs+S§×õ[D#Åõ`š‘ r»‘r9› !FtÁŽ^Í9à°±JtêTð‚PÄÃcØïSøí±¡=Z&8Ú¯ú| þšØ¦Åu<ÝdÓËÞ´]¢PûƒTª¸ç7DÝìÀÜþ±òþ×ÙNëŽ'„„aŒt½Ó.ïÖPÑ™0ñdâ§­¡+$|¥ †ÎÿA6l^‡°÷0»Õ;AÂc:ÞœÐÇó~‚ulÿ6šßØälþÞED[pʶóz»3 {®ÅfÌn”ïƒÄë®nÀm3ÉÌQELÓ~ã*-3à©"ƒ¢ðOÁfsÛ—ü3Q‰ÎÀÁŒfûë²U¬LùJq?â7G²iÀýI޳n&›ÝŸ©¢ÚOEÛûê9¯õôòÈÉzj*ª1ÄQðx´ø¢GZÇ¡2¦²”àö%ÆEù=à ö)(ÅÙ•t¨Š<ä!ÝbXÉöcãÜÕõ÷}Õ-µÏ_ñ†-ºš¥Eèúj›D3w4µÐ#K4yp®ƒ”•"ê#ÝMžØýzÆòÂT”9WÍ?XÎtÔWîÁËò¿m½)9×ðÛÞ˜TÎê®+$º°BR$ȳ!XŒ=&F˜ÐÕ§Æ ÎÅg ®@b¡ZžË%-LÌúø–ù¤ºD\âŒEWE­{RrLó‡_:ÓÅç×I$ @‘ÈÌ ÒK&Ó²°Íl~Y‹pâ2c¬'éë‡|c3޵W¼›ïºñ #Ç/X*JÇÿçNwè9°T—íÎ^™ GIjÚÞ öCÕQÍ/‡’.ëÕ7}‰}”‘Í#X»ymJ`ë.eˆÔú33xz×Í£¨õj¨3h…/Î[Kº;¾2½[Š7î=+IiÇÀH˰±‹²ЈÁ„Áy„ÓÅQ`¾‡Gi¯R²¿Øe\D ]—°‘ ™L÷'ÅL¶¸¿¡i z¹©F%hˆT8B"XížN¢ÚD) ¶¾u¢Oo†ÒäF0±‘ª¸:_H<âÙ£ù¾Ž–ô¯ –Û«M— ¢òvL®Ñ¢ ‰\ÍyNaüðZð³Û{aJ̳Cͧ]“àÒûåWc”5 ~Ä"hJ—…Ó'¡»™«m;k¬¹±ª< ^²5ý»ê|ðœÀÓ!Ÿ´‰U¤?¢@u É“jS­òîÓùêF"Åâ>™¥ÿZùGðWôø~| ‰ðD-t  l¤j\9¶Fâ€/òD.^5{›‹7Ydò—FSÚtêŽ#] B`J;Âðà¸Øo) ?‚î ‡Äqhˆ ªh¼nºæë¡÷~‰–L½9ÿç°\Ê ç_ïð¼:áƒú}«·êȃðû²ƒÀãy'ÀØŸd¸k?"­Š’6e ö8&¾… å+¡O‡:Nö«ŽY-/)Cÿ}ä&’)‘¨ëi¦Æ®:d¡;•&Þ2=ºL–kh êå¢ ÌkÏVˆDü7eÍšQADðmg¤ Jw#¢;FŒ³]‚Ë@¾ÆG¸¯ã˜‘¯é¸)dÃ÷¶DpN—”‰uä²`í\öëÅ,ÂÞ¹¯1¶ß£6ÀŒmm‰É!äV§9Ä¢ÀQÝœ™Y)ݰ‰EÕÆÞ§–9‘íÖ5ƒShè6@‰/¿Þ‚ 53suú¬$¢ZÚÓ•Ú‰˜æ( ŸH¾}ë"´ †ê ÜH±¼ðõ1ïö §úÝ4! È•(Ý4¡¸ÚHsǦ&b›`íÓsn­Í™Ô d’QOîB\óFÉ—2”Ú¿ê[Pg¹ŸÂm[ÒјI-@n¿~Ä)’ šûá @¿¾÷§q!è‚xpó“^kÊÁdÐ]EK$$(qä«,oXkˆÒ¦Hê)i5³–ó©‡ú}’„Äa˜ˆcÓ¢åZEë(qHá1rëgNPð_6O£§'%÷¨Ù‚ Ñï2\7Dº×ƒàË' €8\C3¿ª‹wŠ‘Vœ1‡æ5jÜÉŠ°p9n¼(]2kgBȻݢi%Cz¿mæÜû‹ñ¸H¹†RXãû\¸Z_•ÜõÅ=`æƒ#…XÐô§²æ‹Q·ÖïµL¹¥dn‚„uþ[Vq»-9'wŠ7oŠv;{‰Ä«|ü±Mî.KÂÌŠð}$–•tÒsK-Ëò×)†o#ÊöEÜâu7ÛI„#r$·,Ö3ë|CÿmpSc$Š%M³§ÿN¢äçÍàãÞ÷éLDº+(Àq ¿7ñ zÅwTù÷+¢Ò™…óÀP.|˜?¶cÆIWûý;h ‡âl3²+èËnƒ12 ŽtîÌ>žóY*jÃ`#ʾöîë郱=õ€lp‚Ÿ€XÕìðN×ÂOß$»ÀsV‡Ð-¡äæ.UQgBîÏ’LZš0w¶ÊSÎàÚž ]2€Œ¯5—.V“8)Ixzw¸]„5¹Lþã#ÓË~ÑOiØMÞ¡Ž¥òÓ©g—å!b1î>Kƒòùq%ãðŃôm'`  óOÙYª4©Ô0Øæ‘þ†we§1ô¼à¬@æM¯>BB/@boSuÈ`QŸâ9‡P#[9ƒjúN{gFä)æL=9'„1òz=)G'ôë „ÿD Ú„¿%‚bß¾#šµã@‡Çž·hEQÛ Ò‘µ˜&q-ý®ã3j„ÍÎʇ̼®Ñ]š;æ.m)åb$ϔ͊1¹›K,º¿´¹ü¾• #wÒÈ‚y¬ã€š²á´À—ör*yŽ2µŽêI=Õlç o£ôƒbÈ¿h5P}Î…ñ<¤˜¼ÎçEoIÓbŸÍdZwœæï€€]ŽeÛhH;‘ëÓWô%ä´8·>µQÇܼL±È÷wŠ|i‚õ{›^¾7‹‡ ÄRÃ…¾Ó/S&èPÔ*šYÌ“ J¨t/I*Ö—§Ól=ؤ½.«`ÃëÃß!>O,¥¸ M«ÖÁTKTO…ö-çö\…ÊÜŒ–*]*œå¶¶Ñóªøó×€ vbGé™1±Î59é ;>s,Û˜tö-õV“ ùEt¬?2¢K¥'½EÊ!)¿Üg“X,z·síhþ7˜?·ã¸½…Ïéð)ÒcЭÊ;šãd|fü×Jb=Ô­µÖûí"1«:ùÌf¨YÞ”ü(ë2„ýæ'bM饠Ť…™lûHïÍ–H)ÃydU¼i› &6w²­×µŽÜs~ |qCß,  ‚ÅI­¼`•"i±‡AòQ¼ø¯$¹‰íW–ä!¸cLÞµpdž¶> }öQÔú=Ó@ð@Õï»éOžq‹!Z“v˜aá-(6Ë È¨DºÅ@+Š\7Ijõiïó/ç©¿í{yÎìë£ý>^Ë¥Áëùýj¢©8L"„øE¬9}F)P+59É©¸,2VïBóàf?§Òva‘ ¬?ö¨ÆÜVE/i4†m‘ —åÊMß-ÒZŽÝ¤’¶ë5$×­ ޏši¥ŒD¢b,êßÈÅ"ëWŰ/zøˆ»ÉXvd^™eƒÆNd [Ý5Ëà¸ÆAhºÑßL4~ÕLÏbª—\z­’4 üŸc?×°éàpâjcïȴK ‘ˆ8v·•rbÆ•|ÂuõÚBZ V-Ó&èÈ~°Oë÷T5h{X݆n«#;ÁABc¿õÙ=À ä#2a ‹ÇLÓ Ð–½ÁÜQ×a람#¬»)2ÐWq‘,£ ˆöœÌs$ÑÆFÔΡvh®ŒžÐ¬ÃUQà9&ö’r"s§}AGõaš³!")¸×ü08Ÿò :Ì †à3;àKF¨„× M&´>ÑO’/›¥EDJ ÅÅòÏö§=SETYßæ%t+^ÿ¸ ðóÒ}/À¥X(~acòA¼FC]ºî6Øà«äåk&Ó#ˆ]Ï2ÛXý£³³4ªþŽv[…2窂s„ú åEŽ ÀÊ£È×a9Ü–&âmÚ0´p,”r=Yćƒ[k© ëá'Ä੉K}q$¹îólt¼k¤z@“‚Šó‰¡$^PºÞ)æO‘˜(N‡"zD.êz}¬/ñî§o VÙ²Êtã“ЇD+ÿómÀò±x¤k¦—{D!H e·™·ÈÞ´ÄSÊtÑ×]ý=A“‰KCv0¶ë’,¶7íJåËQÔAK¿šÑT9šBÆMüv¨•} sºb­ySÇ-*‘ó…}§3¾‘ ¥÷œÉËæd¨Á` ‡ƒä#íSú´ €ÞÏÆs.©Ñ06•ÌìÑr[tÊ@­˜å¯M¤ m÷Þ ™Ü2l@ã S©\ ¼g­]ÉÎ~ë™ÆÂ~|¯:ðñ¼I)lÖÄ^@å­e£˜'æÉ°‘äÎHSFÅ©¢L`¨™ÜZÈ÷Ü‚?5XßFôLÿ ær*¢ÒçL‹6¹\Ѹ¾u—"4»é%ïl±Y…Ý:Œ0¸ÄeŸÇæ°wÛa'Qd<-IRš‰²±kÖûÊ€n1ì}I`èt„R«±POŽ(¯Ž±©ï(í‚ØÕ»±¶„ê®øÈJp½s¡´&I“{Ü*%PØá7ÿ7'îkabú~ã 1aÆèßòÞy4…ÊI­â¡Ð­Áj£-ÙrÎ2žg@örc1ÃK VRERþô³°H–§ZàŠ`?ïÆ¹œL°xè~Œ9¨ÿŽÖ¿C8§÷º´¥eç60‰Ö†±‰aGŽqËgš¬Ä6,Ñ5Í¢¬sa¸8Ì‹æÏäSMxO¸ÖHbÄù÷;m>s"è`³‘a6ND²#zŠ¬Ò•Áõ6s8ýSéÔU×â]ùLJ¶Áƒæª¡ yN`Œö…&Ò6ò–ã[iôæoªêÄîè÷"x<äÙÖ@ûM« í­'Ø¢S‚-ŠßîÉáÔº×v†oÝáy×ãL^ŠÂü¡ºió¥/® î±;é*LqÛ€kxú;Ö)*&QãSEÃO3¯ß׽ؠ]Ò.ŽÇué”;“yU¾‡|UûÿjÐF‰.Úãêy–– ¨¶²Àý\cY ¼ž‡s:°)ÝžL¾³´˲!j @=·Êü÷ž ´Ç“ð¼D;Z^ƺ03_æÖ$•™g/M^‹uúž86TW…„Œœ@ß 見\gÃNœD×U{K©gm3À¤7ü»Š1YŒ;­Åäñ;Î,n×s4'1»œè’ü0«ÝVµR£CoøÑ>‡N*±ÆÚŠ®¶=–¯àÑ|ô„ʹÊÛâà߆„*ݽcKbMT-7ušeXÐ(ê Å£ž3ò†ù}™é¢Mut§cËóïnÊÕæ°`ªÞPÚn¾°ó_]­Ÿ÷é/†íÄWþ‚€Qè(d³›K'u–“å&ÂHRÔÜTß®EÆÇ¿÷ÉÂfôLÞn‰F‰w]Ämí@ õâÚÁêÆœÖšâ;,Iü3l½ÒaȳN””ú±ÚÒ—šSÀ*‹vÝ—£lâÙ—Œ¢Ý=‡Û7êÿŒ‚»lÐŒCSÕÅ‹+´&wN \`7@^k=¼IËï]dÆ…ÏÊì5âhw9ƒ/ v˜WÊô~$'s¼¶yZÆv›‡ø`kÀ¡O:!™nqéV“Jô¿Çð;ÚΓÔT¿<°ܸP8îþËUƒ¸>˜h¥‡òÿS6I‡aD¶aöjÌ&¬7T¤WÞ¯ º‰=…ôíÎz‰7ÍVªój×ú× z¦þÊ-˜™ ‡çw{´9øƒœC°Çƒó¤¢ž&ÖŸvðÆ#ÙÔêº2®éjàØcÑlÓgPíÍ„1òsÈíµøãëZ¯(õÏDßñ J_ÙƒäÚ²í=ðq¢õ‡°kŸ)üÐÔÒ”„ú§1Ô`Ì™¢ã}šÂêTTyœ·S•ì:XÓ‚­°uMȵF÷1-$÷ôôlXvÉâï¨VéÞKgrÐwšËʦ §À˜í ¬¯è“³È->6Þ/ õ3Ýš; 뿸µqHlºøøÑN[³Ñðp­! D]î ‰½À|¨‘µ`Ý–„ÿM+PG·­y­‘¦/põyÅô0¹t÷¹gzJEN„-}¤ ÙÍ?¯Vž)_âëÄt)[§±ú¡ë0ħK”À¡ClKo×~ÁÚ¯ˆw‚ª¶UVI’ƒù (G+Û:kaŸ'o½ãÒr*—üÆÔ2ÒK.êE–!! Æáöê’&Ͻël,$¾õKœÜØI Î[¯?±.}^á}I?õIîéËézø–Øâ–yõA÷"Üéʘ¸Ä+6Ü– Îy÷WN«Û"ŽTƒŽ¬¹¡À;)=ˆ× Inx1\8Z$oŠg:Ížº?ä4³­s¤ñ ¿´7czêeš ‡ñmÃ[õÃæÓ¯¶SÐòìCUOThÄM)õ‡kö3z²·³ÛŒ¡=cã·] =eùË6ÈsZù1ð"QB4SÑÃxÉð£•héË·b:‹@©Kø?Q—]p–µcz‹¦ºÌûýFV+JGqï°²8ȸ ¥œ(Ïã_ßlÏ-éÄ©±sW•ã^*unà•ǃޞ7ÔÛ@óøÁµÛ x£'1„Øé=µ±U½U~{YN?IƧ§«óõ!tسں̖Ûôö Ç ¾€…R»šÈPYW)=Š/XòUÆ×X+Lb¡}ÿ\ ¨x—þ«øƒÞ ȱ—Ÿæ– 9/<ªlna¨]4ðìÐ׃êMv)&ôNHÅÌŠÝ9Óž¡t'µÁ+Iã?äWöŽv÷M[Œ ¿™åw¯ÅèÎ)HF¦;¯MPQº>ö™ÌtÄ}¡WG£ÐW[ï0)ç$Põ;wIÓe`G!©ÝfL%A 9¬J3PÞ!²œÝùŒïÕø>rÄK¼;òåã• ™ ¶ˆ± ¿ÌQX˜¡Ý¶ú2ý! ž˜ýƒßKt!³¨FšÁÜÀiàÀŒG9Îhåaé”p€Ë{»Ø­*9¬Í4é7ͬèq0ª Ì©(-2¿Bøf=¹ðSòbé|Ü”AÒëó.Õ Ï´”†=ãÏÃu´Çõ_õñùK†1Z¹—jª32#Ma ®çiZôPKxçE°s:_2nëeE%2yA*ÝAݣȬa7ZîW×ÓŸbs¤Nión±$¼=Øáa!¬ H=ÿ_ÄIË[{lŠÝ8”_6Ð&Æ:Ì%¨˜µá÷ Qâ° sºÇŸI;ãßÜ ?á€K/·špeªIbÇcÅâͽl}o~ÈÔû%ÍHÐÿ¾ob!ÂÍD“@·kÓF7’jÆ·¦.õ£Aë«È»—'Ö­Ìér½ø‹–#ãzÚ8šá/d%ֺϣ«$¿ x­MôËßT%÷Aÿœ•KÚS`w¶ÎùlÙMÌ Vv’›¡VМª]cW0møbp18ãßü â¥|~?dE؅ИB´ÃÕZY ø½Áχ0êÊtõ'•­mÁ£Ð sœ`M‚ã¡û’+ƒè•²,ÊÍ(¤D½Ëxû­æœ>÷„œ0å|Úþ-q·IQuÈ.=BôüT.D“nLÚõ&2]›÷Ç#œWj—ò*Té-Á¨d/mðïÝö¯ôŽI‰½?ƒvBªU¢º•SÃÃ[ü¼ VŹ´õ¤’Mæ8xÓ(³CwÔ;c`ËͺÞiG<ùZ2÷×m¸c䤆AÐÅœYiÝeËÈ lþܳ/õ—˜U ”¥°}ˆR6øÊ{©¨$68fÙyíI „Âfß.E'Sj%¼€Š«û?{\ï×pÃù‰ºÀ„ ©/cÏ`ÙÏ+¶²{&üx¤ãëÇŸˆ™±÷ §aΞ¢> Ù3–RYL·,‘äõ²—çÁÓRúb&H˜ín¬cýÝxÏJè4ío×|fri$.Ôû7§*²a«’wÅJ'§-Ö¼xoM¹Lù0ðCVçW]æ€ÝÈbòëWr¸áˆeM¡ª*û„!Y—jàÖk5¡(tÚ ?Ðè·²†ÂžG¢RˆàIÌyzAÖSÿËa©/E`¡­ÊŸŒö‘8OßµÁzÜîýÃ%Óï’$lxîbî»/ùô¸Ì×A ëæÿ3¶ƒâçã­ÑºÏ‘0Þ΂U&ÂþcŠËè¾ ~¯þkŒÂ "§Òú¥¥¥úòPá—v©«ô§¶÷sŽý¡nÀ¾„âÑ¡2„Fà|Fë4—´ö›Ûzì«ÚÁ8—ä+Ì¿êg޹ïyÌ Pàô0ì/úXÑ4Ï 5íVfõmëö*/ͧÃÙÃ$ŽÏM”3å×®»õiÆ©åª&·Ýb¼hF’¢79Tm’RÙQ1?—’mr©ž BìTj45ŸÝ¨âï«áÓç媘rÉ2òoª¦ïbsxG"WÞŸww“†¸krcNWº!7aJIHEåŒâ·5ø?r±¬Ï[G?ˆßx‡„ѺRj¦aâþ4<ìz/ô£’ë®7ñ³þýÏ]«;Ÿ/3üœps¼bkë!zGÇHMÓ zn9=êYÇ2|i9瘟žºÓ„hYv„ª¦ó—G°¶]Þì9£L]*/Ü'Â&«ÈØ÷Åäå$¾•b¢VQÁä ”˜X±+ˆ„Rå‹|ÕQ‡ Oo>ß{!:mjVµÖF„”G“Ä]ÂJ&Ú=éaJN<0Èׯ‹ºYeˆ&t—寮å?¯Ñ)íÈÓû5Ó>eWóÉË*@–¡†ëÒ4ÅWg«àÞ8t“æÚ¦ò‘=‹*ndZÏ~Dò…áQíL“æ6j8«ÆJlbûº\ ùÒú¯nÝVHÍOÕNH‘ÙE[Ø×£÷Ä¥nâ3Ü/çŽB>V‘žý¬£”ßÞB] %p®.¢.AXMîYÿû÷–•ÍÅ/ÿ¤7ÂöÉ£Ýzâ·gWÞS†,òº½‹Ø ÿ‚ „^âQk·Ç4%¼5ñsŠgÈ®Íý+“ZCTÒÑèøœÅ4ç÷2<éÚÍØ¥ˆ;må¿äZ+,˜#¼iWÂà²6 $4P±Ê/fòª@µÂ~PmÈ¢|™|M9ÏÏJ$\gúf+'Úδ„Òf«`‘_ñÚˆ)$­ à•S\[¨p† ¬ab†©ÊÙ›º¬O<4¾D]ò€r¢Meݰ;þ:zÂJK…ÚâŽyƯ¯Œ³Ç¦îÖØ˜x'µý“óì50p)f€½Æ´ù1"è)ޝ&:gšTlSþ€Œ,4j;4 ˜sЮ6xÞÓÐÀIÈú$^‘\¬|ão‘ÌžMIåãÄ;Îl¡euíìgÓd¹,a/˜óp˜7#’×´$®ÂKÖˆuw?€ ¶'âíä÷/“mïŠ*ÿë´‹rD†Ó­ˆjÖo ¡¹—`£RšC‰ Åô÷ji‹ж_jòq#W=À½ã©îϰXV2sj`»!ú·¾µæf_ {”Ù”M=&êÚ-j&¿x´hê'&k< Ó NêÙñV¢Çí8ÑQ¨Jš{N¿4(O]u»‰gNgü·¸î4$r\¸çû/¿¬êKôˆKŸ<9xx†ŽîL­M÷|…±•ìœw´î.×VogF>/èrÛþ“_¥ûÕ«?ºßBŬ—½‘¼‰óTÜD çYIÓ2Æ“À¿mºdl“:Ì0ãCŠàœþ>‡Ù3£Zòpz *ü‰¢Qþ¥¤IW+LňvÛýr* Ií©-Ì¡DÅÂéû%Ùœ+ ø_ )îÎB3c,j}*Fñ+æY\+yJº‘ß\Ä­òkFëq%ÊyÖ‡sd”½”è/:ÝL×ݨý€$¬m„2z®”בRŸR/LrS xÁêjusbÀÑËÞ-L¬Åªr̯Üsò=2Äí›ôÿ¯®hY·Yšbó³½ÛvŠq§©J „ÃÜo–'\…`_öá&N‘`|D/fZ¢HÌ8‡Pu^OÈ<à>ãuÉݨ&h2 îØd©¸â8ÀƒýD¯£?IÝJ ÊÛ6F³+“Ø„×ÄÅl5£Âã<>‰-“ê3lÃ{ ˆÞ­›ˆÔmÔLŸ°¿ßußs“—ÎÜâ•wx–Ze¸4d&U¿…q«!¾œ[½áæ ÇíêfÚê|uÅTHêÚÞf(¥ó¤‰2¢—œ/ú9„? ®h'¹{˜;4‘Hµ8îpvue§þÄŽ&åíóï¢ È¿äØÊÛ®úüñ­%qVõ¶Ê0¯ªáµ¦Ù¦@%§ÜÕ&Î^Oñ·˜`r'ná¬(peÇ&Ä?T ^ýÍH6·Ëòâ•/¹ ÔE°¯„“L&ù”+­ÂCí=ÅHöꦻµßb>?fŒ‘‰™ÈïÛ¯»Ç â ú×8±¿°Ðn²![‚¡“&¬½-…ÖÁJ2XyŠVÁìMþfò¿Ø¡‡"Ég}ñíQ‚³"»(’¯ñÚÞe5àÿÚ_1_ ($ÃBº‘œ&ôX%/Ws»7p’é˜È´•½†Èáá×ahbÊ·̤úÀŒé/2ºÈHlý'è`ài‰%›V&ø„W1Ù‚?-ŽêMé¶°DÑ‚Ä\h54®^gîßÃ.Q£ š¸YA•õ«~?l)ø'bÿ›!ü]š~¿ÉãÆð9!•mqªm­‘ø.ú»$¢;°(têqÅØFõÌp S­–}'¶™N£‚…ˆ $øx¦ó¡x•‘l%×Y¼f*z¥ƒ Ðð)¦ƒð"|Ó ›Y(1æY.Š›£ïï|Öp ]0“åB-}QwÞÊ‹ŒmM'jl§~Z„¥Ÿ»–}©êðîáÉIí®N™œÏ°œ[i×Swƒ‹É¬9€‹Ye¥îpl™Q9T]ÌCοŒ³“ŠzþšbnR7B aݤ¡¦Ó•©*™{d¼¥‰Õ*3Káãû÷7Q“†œáw©_*™¨*ÛFH挾ñR®Ù ”Ì/™¢Y%A 6E±ã+þæS÷Deȳ1·Pˆþ´¹ÁvoJ-+b Fy'|LN&jþÝû/Ú›¾ÅÎ6óDÙXe (áOaex<ßô®ÅîNÔExÙ¯®SOyÊ ·” `½mÍF¾ Ù+ð‰´vr„}ÈÉüÄ{Ù\·.X©Û=k×ñ—½¨À€ŒYOkÞÑD÷ÒQT¾³MOKÁBù<­·˜/£þHá ü¬ëEA „$ÊÅ€ž‘ppöç›nÈtAÏ? ÐŰbüù*r}&*ùª]vµª–8š],)Ƥ©y„Õexà ٿ÷ ¢»âº¨ ZHôD>⋼+QŠ}Û÷ {¹~>:ùÕ˜…—I§ˆÎHÉR±>É?Š×b>ËÁ¹ï¦¿'aÊ׆7>D¥È­@_üv˜W6ëH§¢ûî1M&yiR¿Üuz ­y~Ü ç:] &ç€_ÇÌÈQêhS{EoMðÙ° µõu?Ç/¿²Óhøf”@ÃéßöhÛÏšÈ ì“V@'>?‚1}€¥µÞ=sûæþ|)™ß¼çõôÍ1“:¼',Rœ¦gè¢&OŠò–”#[X›†,B[ÂxŸ«Žób'Ç›¨š§aæG Cýà'æø¤•òÁï4{jv²Ñj)ú¯<}ú&“ßÝÅM6î ]vI\`:þþSß®âXÛmÐÒMH.JK+7Jgzñ a(y¬Üf@0¦¤›ÚÏR‘×ZÁ!ˆ ýÒì'¨®"9TC/º1FÎ&F™bÑÔ©†Gü$MPt«\ÇŽòQ}ê8ðM^Ü™'ÏšSů!Vž‰¨ÆÕªX;­„z^܉Ñüü\ÍAËHˆP óPK=b4ÇØ/‘è=ޱÿí „WWœ<Ø3†æáÖj½êé×Ϫ-3õµRÚºÚŸ½(fµ0„ÙQPûYñS}©Æ‡~Ô‡8‘•éÐÎÙ麑ÑmÎR[Dû‹ý!ž«þ;bI†ò+„Ÿvw„9â–בݪäöÒ4„ÌB½èš4‡< Ê$¤–˜cIÜúׂuDª»)2FRf.{ØÎw¬™ëÜé 5qú`W{*&'FÆN"ÎG¯Õ*’ –«ð*zœ¯øvßæ³R]M…œ”áû?`K¾*=›5¾w*xÄÝ 0M(–2bé4µãüi«%/ 2ïò4= )5«`>Òbûæ6Œ2ÊÅòöYX;.ࢰ™ŠQÁžRvÍ1òôs–>*î}J¡æU„q6wõà÷æ…nú;fœe¼¶µh ¡¨(ðZJùCãª@“§]:Ú¨T“; @¸‚ÃñÖR.éë¨Fó\Hnø•>»¿bjŸœÃ©k‹{|Åðĵƒ( lÐu‰P›²ÛâòýÄ6EשNåf‘<£kç×W_yq¢ Þ3bkúÄ.¥%ó‚œHö›÷Á[0©”9­<ˆù|Ûˆ ÌgõXvXŒxÑ5ïu jÌbèúmYp=ÊU¨0óA¥´ßç‡.|~Ù¬ýÑÝÓ-Ü$øŸP?úD¿³W+(DLÕ/œ³3ûÌvÅÞ:¥)[Ö[Šääýe!iÄyöU8å¿]õ¾™\õDû-r¡K®?sòõãÇÌÈ ˆ –;Z˜ç oÔÞŽú5›?`K½[óßE²aâF¼ê|Öëjˆ¦Kc’²Ýâ¾$_cŒ¡ß"áÓÓ—™('VU™¼g9°­çåSÏK fÿ•Òˆ÷VEÍéÜ:ÅìWÕÚ¤oê9N ìð¦¬]£ÖmÕ@ß_ß¿Íh¸kÉ&g}jÀwÛ%ÇA†‹5Î8@ íÔbŸ#Ë2‰x~Ò¨BKá~Œž$¶‹x¤T¨ÍÏÏû»½õ"0¬ädC%ˆèý-Œ¦"­|H'³ü ¯å¼–»#cÑ\Žw¤àv4¹ßÝ¢DážY_[åbŠveœ5Ìa‘ÂÖù‹OOó, ‰±·M&å«iξè£Mf¢$öUÑt™ïr+ ôïÆ’ðRM *ºÈÏUØ•¨N¥|Ìç¿¥¢x—›‰¾ÚÜPâ•ïÕƒ?ÿæùЩ¸*£CÂL™[Úõ&.ÐàIýrS>­­Í¬sÒTA£,¤Èð²ë4k!†±ŒÁ•B”°kï;&ñg ™« &ŸÞDT{!*Ç‘DNb=”LlVÃù± ˜bUÈçµT!#,æÆBã+öú‘³²ê¹¦^A9fÚŽëû§Ø­µK#R«¨éí½å† ‹Àhãìý:¦Ð_/ʼ¯¦™ÇÚ.õLºçƒ¨7ö=øO–LjðÅ› ý¢ny¹“­y:Z•ãW8›>gÏöuX„`0~®Ög¹ Â7AûÎ%ÂÃÃ#Î:Ý4Iî 8 ¡(ã¿ÈX´áÏ8dý¦àæOoo÷¶gŸxŽÝѼ+¶­™¦záF!i“]r6ÄDEËã_ Ó0¿]fÚj´j¾ºÕÊnÐM\«…yè]ö”{òå5ižsvš0¬ë021÷ÁùG.no»’xJ n:dÞ|Èc«½ÒæåÁ«hë\’¨ö‹š]ûBÙ.…D/ €sg´)97<+âxðLSÆÿq1 ¥†öÙ¼Y3ï¹`=eE÷߆â-œ‡Xôoy¥HMŸ'8¬‡eKPŒL´QulmëIèÞ5R B8òS ¸Ÿ3²%>8W”¨s–;hÖ¯VÑõ¼STÀ½´”2gëO2<+õ¥ü©ÛQ/«jŒ°+HtÉ!r§)Yñ—úÂ?ž®”R,޶{Ä*“üÆNFRìΪ:ŽBÉJîj2ãÅVÚÏæ­ê9Ðzïxã~EŒüÅËJCj¢¶’î_)}ùM2ëIý¢Aÿh9.B‹ sÎ,ý±DV1|ìŠjPÊ:p¯mòåä™,Õʲs¢¬Ð6š¦P5@ã;x”%ŽóøJ·¦:S%¢¸J#ƒ‘ÊaŸÞ¯ô;[½g“ ¦i§[ÖOš9>ų,€À,déàgÿˆ~Òù6°+g¥–d(îAèCmg<øÜ‹«jÑ‚<žÓh­¡Ó\>nªM°d†zKÀfü¤œÊóš¹ÏN‡•Pkt;íºŽ’ ê>*]»v©Ÿžá´9óhT¡x¥‰ÖAGE~[bQ ‘¨sx3 ¾¬óP›ŽÀ|›´Ä7¶K,O‚.ùÅ׫Þ~Ë»õ¤}–¿'½+&{Æ5­W'ŸÌn—”·¤B¢ ]¹¡˜(‘¾ q–V*qè4û~åÂ:ˆ…rD! šc„“j~G nšÄFü}'ÝjTÚïŽú“^üR]B^cr²!ÈŠý³"ÅšbV·Xm $À]ûŒ©ç£<G—“©ØZb&ùîUQ¢Jx© Ї)í¦\»ö9yzøÈ8ÔÉš¥9—††Ã–¬7ë”ÔºŒ}F@²Ä¾yzÚ20à ®vCKð•õ¦Ç  Ïø®W¹¬qm„t ¸L«lxY\«y¬Êpëá~1î6K6«øc £Ùhr™‰¢HÂLÃô{ƒQå)¾‡kà–/›ûõ+açzœ‡hÑTLN¥(ŽÕ`F '`8ï÷· µîL°ÿ!¢tjùâköÅíž3BÀX'û º~`ˆ–¨÷q§\›1–¹å¥Ä_ñ¡í×oòéóýà%ilâø9ëÞpBÍ(ƒÃ‚­s i9Œ_úù)1©#‡Ç܆@ÏŠg±º×è `>®ð‚x›ë%–”„qJG¾sô™Otý²r ŒÞêv'_utFWëd`ÑF£SlÌt1R­ _ñÞsÿgœ+šl>£²÷щtú}«¤nS¹ØÐOMŠ?Ò¼AÆäOB˜o GòÑÈ´ÚnQ¹Ö¹,[îl¢ÔW/1¸!ñBxHróyb³Ÿƒ¨X4EUsó¾_ 9 ®ÍV ŠLÐY]¨«¿ºŽo(.cî€ÌŽXà]Ü"3_WŒP#wp ›lGÉô%^%d·-“r ÊFþ¹oRÙæsxyãœU&úðuJ–À{5…¼uw›·_ùëµw%„ŒF¸¼ÏËB!rÅìïGG¡+b›³ ×$O”ö5·›Ì+NŸ„_£:ÕhIñÖYÇ/*ì½$Ø®†â³™¿Ø†Š{õCN‘eº¸ü4@#$SíÃ_Æ`AÝOÿN)ôÐÚ%ú–x5ˆš,ŸÁÇ[’õÖI¤½„·ñQH6u|DÞ;fë꿃Óû»²Îº­qëûÏñ1(ý®%:Ÿ@¢ÕËrBT`ÖÎøñݺã"_RâV? ~:)8ðÛhLEUÇ~p'[Î úÜ‹ZåçH:…J¯»> ]ð>à£/ö$€Õ;Ú«™pȤGéöJ¨¢a OCð€àÈÅ“Ý)ów:&îíT¢™L ¦šØ8™€ö?rC”ê#sœpô…’„bò6%_¼,Ãf8nøNù‡”ÜKœÍ”Ô°'L‚i“Þª½€}"/ÿILÀTÄXrmåS¼µšnSJ7âž)cp¿.ùd~ö Ñ/ÛC‚ÈÓ£ºÆ"n3`C¼>>,Òu ø ŒUÙ»²âЏÀ–%Y¸ ŠÇpÍ]•6ŒV‚Ñq¢E vŽNâõÓªríör´âƒ1@Ò)ç?ÃÆiõ¥ý¾E¡[d¸+öJ®öç8T’¸½ÔDWz𲿨½ÞIºr½°´¦ðݺM‚ŸAÌRlâØ\Ð)l*D›¢H*•á9åéü#È,LË)ÖðªCÒÓ5Ɖ.hFÈ9ÁÒ¸¬ŒàDZˆës@ea³¡Èþ?Õ~í6V6o¬y£Ã²…JNdz=\1Û3A£ ÔŒ’ºª3;/X°êT,¥ÌùùßÂ;6¾ÉØp÷)çH&k|ûÀ=ì®jùhàîžq'ÑèpÊ1I#Üé}Ûé/ñ.-Ŭ¨±<ÔKâô#tA{ÃF «i³Æcîð:Y€€>—׀ó>F¢ ðiBB|îçi‰®ã ùDÊüÒ!da÷…–OÇ ¼ Vƒ_ØÒ쥧>2=G²u¾ Å™=J@B¾ŽíÔÞ FG¿áç :-nÔ®r_U‡w‡aGR½~bqtl.¦[D•ÂÎ÷#ÄÀç˜ëRB:´Ç¥“ãþ%Æ …’Óàö $š-*c ¡]ÒŽ^<íº„hç_õëiO„"ñI8ØÛóƒ0å:,¸ ÈqGmé'EU nòàKmVÎC>àÌO&P0AŒ"ì-ê˜,·5dž$Bl¶!?z@o•ci«ä˱¡Ý”íáã«…‚f„¡4<ûÑ.òŸÅ{°¾ÇñÍÏeQc7?ø­hktk=:s·†žZç”@½e:É-’îÐŽ+:S3¬ðÏ ‘GÛ½«’Ý®‚·³×¸M¦üÂ×i¡5^Ý]<%Ü)‚R(¨M¡Ѻ7 ù]‹›PzDŒ)?OÁ„fX -zO#¸“ýàþÿõTþåT ýwA óØHH“\Mˆ£´!Òýj!ñ±­ØÛŒ×®ÏmX~)8òø§°Å•zãHE{Û)LJ«¤j-sÿØ÷y>üFo( bîÐWŒ¼ðtÎäf%NãϤª#„Ô¦k ¿¯ˆWïòyíËKzb«¶K!â4ÁÈ>©JŽlž«–Ðà€qAŠy-•³n­Ÿdvãj°˜Kœ¹±O7Õ”4ƒçŠ«H7b®˜.ýCò?v‰CIQmZ‘‘ëÉ|„Ä=Tò£4O+6<º6ªïUD…:÷™¿–~£LÝÞÍ¿GèëXa4º^­äY„`(Xh¥GO6N“u‚ø<‰Öíª¿Ï×9ÇS§v…sR5ƒ$2-ºîˆu öúH\÷º;)‹ý³·Péf±ßK~¥»äÓQ/á¿>î3y”¬tH‘¹Lîíë ƒÖ·ž2‰3;ÅqsþšÁ/•ù¦ ³7Oú €K)Ù\¥+úÎÎù¨öÐNÑê/8ö Íèà\”‚Õ(aKé´ÙÌQËÄìÄ}¸Ñr–vã ³Ä?û0b²Ÿàr¾Ÿ˜Ð@Qk쫯WÐ@>I ¼áF;mAµ’ùØ5s×™¸ûÐB î´‹h¨.Çör[$T®=sšBC”eåÁÛ Â ³ù,É/cÇJ šxêB"¦®¨¥¼ ´Zú?Fä ~@ß.ådå_‚cE¦fÜÀ(j~øÅu³Œ«ŒkÉz;àV¬ã"ÄÇU¸´ú‰ÏÀàÐmJZÄ‹rÌÿäøÕšÂ__´¢JTáœX»ƒÚoüUMðä Ì:BÁ8+c.÷hÜSus+­ZWkç…lÑç ®7‘äp‚Æû_9z¡¾yô‚™äþÅùÿÑ…«g¹†.Ь¼ù0l@íUCg?>Woõ%ø€LHó‹·¸îÓY_ÛØ¹±2íìo‘ùYén= éI°ÒµqÄG’þ+½gÉV(Rçdú“b=¢)~Â>Ã$è~²/ݸ»HXH²<ï%½e…É€dFi=_ï*ˆÅ+žºÀ4Ö¢ò@éêÛK–êŽ~›s+ªÃ.×NòÔf´½`B¢èÉé@9©PBGÏfy†ô©^ó­8Ë}!Q’Ôz¬$›ÆAÇ”ß`ÏÝäÝ,•:÷'É+óÍ´_©]¤Æå£³,³¼îZ„:ïN[Ôú…tpÒ»ÃÐ,å1óÚÌڤڤ鑞wáùßÖÄY;é“h¶IÉ te|×àæ ¥rlÉc¶c5|Þ:ÑëœSÃv1APe¼=<ŽL_Òì©ýõ ’ƒïGòšX”sÚü¬–àh6‘À2-<õpˆÃ™Îñ”?†ÕNĪD•‡2µ–¯¿Tc9zcÒΕ`éR¹ýARuü…áF–Ñ&cÜ ˆŸ#ÊšÍ~~V"Uï_—g‘…½áÆe1ée­À °sfOÈ4z't¥ê¤ÇȼHáp„ÈYtŠAl.è³@’C@à¶ùÄ›Ãä²Õ¶[ CVíÛ‰ß Ú'ªà©‹á=a\0MÉB%¸E f·«¿¡±*ÙŒyû˜µÞÃ?¦`.;Bô>é¿5:X)ZDŽæàøâlÂÁÆÝt[-•Æàn/JÀB¡FmR§‚ÐOh_¼˜É© ‡}ŸÙ%ÿ½`JÈúçbìí,w¾Jžƒþ\3N}õ褧qxðæG¿Nš3±_*ÎaL“¸ WÛпôZ2ÇϾ>…Žm”åÙÔTÁUŸ\¶/Z6¤b¡‘Ãö ñÉëÁ{›¬ñ×bW`u H£ö¤RTÓVQœ{ÐNPNÂ:4ç£ýR’„O(‡`„°Vë‘l£ö¦BÔ7ýGô f‰Nƒj%Û½IáÏž´Gë:Z@.rZvÕ1ê!¨»è#uäirSrsêÕü¼OòGƒ´E ±n#TvþEÜFº#)æDj¿žkI›íOÊ[®qêÚõ‡Äýàe?ÅIw<Í» Š^¬ûá8]Ìe¶daãC jŠ[iÈI>rßb{¦•F/ÿô@ £7ýr@¢©L¶5ŒHŒà£Ù”G€þ§«A-!ãíÙ¬<–ŸÍÐ<¿eóàhØ$ƒåzA±VÙ  ‡aßO5+V»±½æV<¦0AFsƒß ­ÿn<oSlP  'RÍ ÄáT¡Ö”Š$êÍ 5OµÏ«QPGʼÁ®À‹:Ó‰+–Ìx´U¦ØC%9èfšJ¼óÝ4ûwå¨IAÎÏŒ¥Ë hu áµ9é6saq¿K9‚$¤o}K‘¤ôQ›ðƬ|vW‰#øù^Y\9HÏ?Í-l†÷µ? P㿲?Ñõ‡ ˆ&Ñ#ØÛîíq’¼È[oçÃbÞê¦߯cæË%êZÿæ`ªåEƒèÀ'%€‰_j %=c ?~ex@FØÜyç7JïHª§%÷ÀT«±ÇlœTi»äëÆÇ5ƒæ×ÍcîÒ†žr’f6ÿ‘7.èûÑB,\D¸–)EÕ=½1ý¥FÿW‘—Œ0fD…‡!17“Þ¦B’xMydtbÑø°œ!BC“qœû e‡°Y]ñ§lÿç!gjIžœ²gíåÇ!]M¯âû™™Æ|67J”}ÒªíJØ,5hÆ™¡å¬G()¦¦%Ðx2Ö—Á³QÑòe×Ù˜¯ÞË f- P‚¸S¨ñÊ@FDiüsÊet‘Ó–5nýfÛÝ.´­|2äJÙ¶Þ“R·‘ò1€ú6‰‹à#¸ø¼ ­é>UŒ‘ú¸Ç±}iéÚKf0[»ÆÅQÅ®'VË>d×Û Û–c=Ë)®•º‹¨K¨xns"ä¾Ô—õ¦é·Â4ú47냽‘TÖ³í!ˆ'“áæÅcìCˆ}Û\ ÆtúÎó[t·¶œK·m’$OXËaŒhcºl¾Ì,áo]þzp@» TÒ5³nœoñÀ8Ý̨ý|¶i/i8ÕÞ®ks\°4ÿ~ö+ÝóßWFmÓ+û=ØD­œ\pŽã˜ßø® ›^\BªX¶¿_xM€9Voä†$fÍÚSð9%n–}Ãöû“UvŽe@èauÇWÌ ¥×mÄ:™$Ül–0:‰Ÿæ,Ľ#ç Ô¨¨2ðYæc Ò\0Šé÷£~k‚E—aTAë³ "£JT`¤`rRhE\¨5YwØ:ã4»€)Â{ùºCNᯩ ÇÛé­2¡ªq$eµâ1‘o;¢jz“[߉—3Ø\Ìl8ª¤VÀÒÝ‘‹ Ã6,½ã$‰´Ž¾f g’ÚŽ©áIjÌ"izVXé§°-Aöù‹[m Äï=À‡?šŸvH RÇÌZìfóê`W¦Àkr—ã+ñP ÝBJ5ÆW­ö‘Ô–Hž‘àgÞ¼9Koà.‚¼_vl*Ùõ¸n7V„Äòw 0²ô’¡ò*jÈŽDIb‹ãÜ÷¢Â1-œE¤ ž%ð™hüÍêè^]žÅ\ µ²<^ÒПͤŠÃ3.5¶1 Úâ¢÷Y¬ROk6b· ßÓÃÚƒÛ…z±;Û yùlOÑÇFÚ¸¨¬Í ÀmuáÓ.7ÏkHæqù*«LÉ6·tdSÊ|“£¦¾Æåp¯Œ­âÅšUïn]é¶–w€ßìöÑZÖbJ8%<Še¦†ðmœ5˜ÿ'96x¨:§ðvPq¶J¾¡>Õ¤¨’³…ÅÉÆødüYE¿"7º?rîâ¯y’"Fþ•ƒ*j¦ ý›ÅätìFÂu¦2Í ®˜Ô°ÃvtîPGïäu“M+7ô\m©$  yÁ~Óscʲs"ÃnšøÖTU`†¤o.q¬‹æÕXxÌhA¦>eÿôœ <ÉTQÝÜ´ˆèn¹·:6,ç,cÏ~ÉbC6¶®E‰-dÖýB¯©ï¡ˆ0±—ËÀW,|aBŒæ0ù™œ²éu@ ~Y_6sWš¸y†<7„µ5S"gzÍT?5Y ìõµG+[âuuÌZ¬‹åÃ|F:$)tb(2,U˜Gæ¸ZôìUZUŸ)UìÌÚ¦å{áûÅ&ÚÓ1Hb'>öñL5Kìu3 :Ÿ)Ëkg„û1™ûÆ™¹Á¥KuóŒ8ù¦ähÏú ø•8gíye÷±:S\m6’£¬’dˆVíO:bëÞÊCT×EÚ€]E£òžJ‡ Gž "¤©ÙZé’·½<+õ‘'0Ëø ž#'£Ú†Äô£èiu8iX;èæ ªa6pµß;÷ö°:ñ܉ï‘ÖÇÒ½12ŒÚee8¸‰WG‚Œ°»ûØ“¼â—ÕÁ×g){‰yÓñH@ß×ÊÉ=<É׸ÃÞÕæJ2nXÅ(q-‡›eŽÅ9ÉHA‚œ²D2ÀÉwˆÊ#¯=ƒ³#²ïCìÝ `E0dŠeÐfp2¸±€15"OÕF{WzÓ ò2'u„¹IZ´]æ˜ç:5˜·Z˹N0^ÖP2èùWÅ5A½$¬'3ÄšðÝqŒÚ<•±]£È}iñŠÜÐR¸™Ðçè)çͲi‡¢Þ[DÕµ'ò×WIq"!¦SG=…{ì_Q¥ÓHŽZFÿ8ÞÈXj´R2 â.ãl ³Gƒ¹_ÂÞ%f)faÁèA9„{Ë5÷_PF,k6ŸêÍiÀ‹d;>£óš‹:-@i“)6@V‚‡t"º˜j“/s‹§w#BÞšÌdH³^6B(…ì[WíN;×Gú¾ce^Nƒ¯µKÊŠO\m‡þ— »ÔHp8¶zŠ )í ô¦€¥«ÅecöªÍ¨ÝË6°`›Ý‚Ù ò ƒ©<|P› Ý~ÐÚ>–„··Ù7T•«N›¹¥%EºnÆ—öŽ×7¬P?“É>¬Zy=RuÙ -(åø)8¸ !ºÚeGïLEˆ2Í®ZÊxÙ}|H‘›´=ò¤iÝMÛ0’Ô×P:–©êåïHÈ/½|õPøÔÑù%%³Ê€ÌA+|oÔ²SîSnÈ{k€þOòê]t›¼‰àÝö N·s鸇<¾~p$-jðcv^Å\HᕦÞ(we…??EУåtFS6 ¿½ÿkWîxTúý<+rÂ@1DŸBjìr Z™1¼i}/™2 ÂV\À|~ųè¥eG1QõêõP´×;tmüí@²)Á¶Hë_’ʦP>†pbÍ\BÄ!šœS(‚™ÜÐß2eë<1»Óü{®tÞ§Ö’×Hf´ð:M1-y›÷¿8LXL' Õù“+…‘^K‚ ß*¦uwä—kˆ›VD¶J| ƒ&W…fóú.yJ‘X¿¡áhDÓÔžj²CæµØ"ßçf(F€¼lGð•¥3û_}híК¦Ò w0k.ÿCÔÍ­žÊJ¥jÁÀD9º’‘¯²ùÀ¤0”ú›§Ç¶¬FsG×éhYÈä[²ùÍÆªÙ«kî„sõ×¾w£¨Ð%l„2‹IW*ªùH¨ùûˆÌGÄ_Â[Ñ]`Aue[DÎGòJÀRG9 ‘K§$xúï±<ÛQ³§üÙ,E?Á%Ÿª™&A[;ÈdKù·.nð XÕãóCÈ`Ê3í‚_°iYDSŒÓÝÂÑD oïu"Æ*ëá1º;-ø6‹šÕv.'½5Ç—Õ)0\3”ø(¶ã‘Ôùüè´õH„ Š1 îâ¿“ôàña¸ÚRïP}À òá[>:§f¦Ò$’Ð6i¨d|$0ÕüÍøÎç\dSdà •‰çÅÃ8°É†Ûà¿tøZ±U ï•äcœ8dèØ Â$)41ð¥[éOoty~˱ŒY_D‡½·šUI>†s‘XNp¼Ûñ`pK(p]„Wà£$½1lâ§k— ,Jw_dWc²ƒ s&ÆOë ƒÝ}ôÇho`DepUîkAÃcŸŽ=G&Ìmaô‘÷ä'¹?ósn¿("Ñ–“ùð§¹À) 0~!p2ØÒšYEƒ*Ûú@?»ßtñàj‹zÉ ‰YËÆ°î|¤Ôô^;¾QÃo…ÚïNBÞöægÑŠüLi‚ VôQ°j(Ó&§¯ qŸžÖ I’¯Æ ¾ÛÐÇ:úÞ½Ñ6ÚÄ¥!–¸6Á Y¸±µÆˆ"s‚½ŠÁJ¾‡·ÌOL´ŠIÂØů¼åör ¤[-R |2 ÷¦ãj1Ðdùëi÷Àþò|²éÙ²S,x)(½zÒÂD{>„ĘÄTúÌèGj5"Sƺ¦|#C¨èŒ i)š‡†œ²ÆÌ”Myp.áÞÎVVÂq©Ãí8«¯Å~NœKÐ+ ÛE¤ŸÊÕÛ"…?‘FßýIs‰/Á#><ÛŒø°ïƒú“\0uܵ—•é5<ÎÑ4e† IFk øQê#KÖM‰B"Åè9ÝØÜ¾JDÇ–Ëòýüyla ‰š`÷Χ…‚¦ž±>ýÊ;yÑEja<ïÑQ~0ÑðrÜÚ1UhgÖw¯7õ²B[¯BcD‡‡¹N?=Ü“CŒëX‘™pÆßOz?[;´Þ‚ãE#=”µ—ÆMæßƤÐåÖœa¶ eŠ JÓb;–;Bõ ìx†ÃvXyõ='å Ê:u}&ª‚¦²uWn{¦øÑ—ƒX˜‹ãÐiyxãþ´±½fŒXhdI&ÌqÐn„sç¦6 ¨ÎÉ—4pñáÓ|‡¤wî|- Yâ üÄ57ø¤¿ôvΆšÈ…“³h¨h·"9™ÍØe»j­O[,–›HG@@º¤øõkY‘°ãWÒÃõñ¶¿ðrU‘8‘.ŠêJþÏÈ „K£>g}+ÎØŠ Œ¯+ÆŒ)˜ž‡dŠ[îÝÉ{!LÞÆPð¨ë¬Ø ¨tǼér0ã@u_Š-}þËêð;KžË‚§Ô@ÌЉkÛÊ£Q÷Œ,‡fʽMmÔˆ*´¾v\$ì|¶,6MÛ*gJPYŸµÁ™SÚEù€ÈT­''§ÁxSiѲÓ<†)cpÊ+äðu9³gÓrâ€ûD¤òeËø< \¶LPõê[=Ø‘Fá% g.‘,¼¹Ü3» žYf'ö²¦tÝ¡²Ãy±í¢ë»¹•9€_`Ž»L ¨„³jN)3d¹{…ßôÕFD×Gj}ïç$:ø´àMÁ^´«& Ma‘Òö–`Ï¿'µÁ,ËŠQw“þ<… ”pvâ[®³Pní®MrÄÄ1¥uÚº$ÑÿÈ*I*w©uKñ¤c–KûåàgQÿ‹éUóÈCgÖh¦#ã¼®L—Ë.` n%5²ðè]õÍsªåøíŒnN0W¨×¥[ösŠ‚|Mƒ­ôÀ™ÂÐ5áRù—ï;IØ™}xí¸\ù+äöH»¢‡‡ê,™ °Y•`ÚT@üNrbÐ|µ—Hbÿ÷u7»k\¹xm/‚–áIª˜]Œ Aù TJg,,q‹ ùFÁŠ`ÑÏgŽÄ<÷ÇÅIt¥õ‡nqãŸ@øÏÑ.=oµo-̶‹]²hû;ÎK¡q8± •?½,a2<ípÔÜCˆG--9(4½?ŸÐlO÷ Û¨ ¸n÷˜û­Vó΂W}Z×¼#A±QÝvø;m ŸxÝ¡ÄGŒÌoå5Oû²UËØÇçÜœ0Ãú…Æ—¿4]!çÚTB„H¦áG#Òlß ýÙäXfáï:ùTJ©°¬I\€™¦¯ÍÕ–?Ó|M*æ\iÕjÙJQKdUe@78[EI{¼Š¡êU¶ïû9Hælæ8dþùÎØk@Ç]$føV¶dc§4¤-ZÞ¹Ã=il5—‹t|RßøÓH§9AÒqPŸ€Öñæ‘íGäxøÎúøÞaTEoËÆP£*öý¿©gª0pŠ‘ :ZfÏ…çí¢Ò@çO>XRsÇRŸ<í“é"aæ9³pòTã¶`¨¥…Q±Ë^pdØ\#mÆ\ÜÎéÿ›ÐîWÿä´ˆòÎìÖ³øs‚'4>uuäP!>ä£í6âþã„mH‰Éw$@=dõ5id¬Ò%á¶2} ì8Ô>ŽEbAî ÅëL€½ñŸÜÝ]ä$» ÛÝÝ­Pº„þè#Zé/< †A–øo|‚ÛáO´»²+<,~¤¶º¡-©ÙÂ",]Nùü‘N”_œŸØ7ïõ}]SÓqö­µ–yÙÝáëçfž8q± ›F cÇx_8´Ù|6^NK…M—}l´KäÇ’j¨2cê2 A­†Á$_Jš“#Ê@ÉAÝþ¿±†ÐŠ9­l›ëwWEDð `oÒ4 î‡v&%#àŽZSÅwΩéüatÔÞZp®Ï]'¯å#ä,,2}ö¢–šñ$¦²²‹¬N_ÿ‹-»§óPÍÐ7£=é*Káà« ÄZ(¼(½a «zí±–ÑÄED拤£œŠ~añÒF%%{b à´|„ìÆ´¥åÉâ>jž±wÖ=yE}ÇŸR¥¾êi ž”PÇý!ûÆœ{³.y©=„ÎÀªÜÕÖ3gd×ãI<˜Ìù+À¥ªHIÛltŽØ?ã›à`ðòh…Iü {Ÿ\Ï7«-™Úþ®2Àõÿ¶_YZ¬1@ŒÓ¸Ä‡F} ¦€ßéñçë6•á!S¡ì´\íÀ¼CÞx2¶1äÇßz°Ó–äsšû±èå»DçÀ•‚½ÜŠÛ×äo·‰üÂÐaÓ­ ˆM“G0F'øo'QÙ(k•¦^9 44amP"߯Ç}Ê ,6G·ÃìéLÝú§«4„$äIT‹Ÿ¢ÆºRlé¢ÿ-Êç¥`£ôû[™²œI.<Ž1(ŒÍõ0²³oÆÅ¬\“¹L…y”³û¢7Лݴ`5•…^‰É¥ÕÜ«nÜZ°[Vm:‰¼¯ó2ýzg"cJžA”îÛµ§n‹£bYÿuý³â)GL×vÒ±lzj"îŠ;¸¦/ÒÞú6Ÿ÷x¡/ÊjTzgÓ!׿»úΕ:"‚†FE·fÊÅ» 9CÄ\¤CHß„ŠšçòŠ‹Æh‘`Ä`”*6ps÷¯Ã톳ca7\±¤«Çw_H–ú r†ÄýÓ?ðn†Ô}‹Ô³€½-ÆTµ# GópOv“ÒV-Ò¨’WRÎÈÂf7h‚OO £ÀM¨C?€Jšãm†ÙÒbã‘ÿ¬Ž6=…UÊ„²ÀüÍ— }FVÄG_S¾2î$+[©ž¯ê}¤YU&þ©Žx3W…Xoâò<™`„ÿ«t™óó‚;„£œÀ>ñ,éhø—‹ö u5•߬ýì:æZgò‹7eJ6öÍÅÓ=;Æ ¼³R7„ÆŒžÊ;:Q?’òPûˆ”˜¼^aØ!ðÉk§É:÷RH éœÈêMÞ_Åïä.Ôç«”q2ö9p™ót€ŒKò;çð™Ãº|S®Ñ¬â0’Oðb’]å[˜â8¯Ðߎë/Ð`ºÛ¸Ä@TƒÁƒcÆdKô킪=×ó›]ÿ哾¾x½g—UªfõÂÇ’Ç眑œæªqm‘d&×Ë0´@Ycù°˜N™âq^ÔO´nœq^z ãR‚*îî¸gbÌKJ›`°ŸÃ¢š=„ÄôAñšVž!íÍî³<á5BFIM½FUb†Üú^í­òî MÓ’8mý)¾êž±€‚U ²4ß+ã–|®’¬ØÃÄ©öÚwIy¡”àá?z!ª6HÄ6¢£Pò{ö Át £ãá £KØD‡4/dà? ˆîvŒþC1¯›m‰J7Z"µª,0õ¦0_HE.[¬Ÿ6AÈÓÁ@‚ï‰Ñ ì ¼\o (ûóÄ\ênñßÅs*\'ÜK“¢w#ë¬ æ/®’ËÏðÕc#NMÁæÚÎ<“4óø"¸]Kù'î ”ùdc÷h€®ï„߈‚‰m¸_ðæî ÀªS}ì]“£5{J“)b5`´Pê(ð î¯IþÒ•Pç°Œ´CK ¢Â&¾òÏÉ÷ô•ŽÍGu³}KkÓƒmNŒ¹Õßm.d®6¤ˆåb´Š\xpàÇ`+¾yC¿VÁ‘|ƒ‚€žW·<2á¯üóЧ¢ˆÏ<|Ú—°ÿlòÞ ²ê'‹5öÌÆø¿e ›-\¾ß©·A"U‚ŒÀ/ÿ!—NmV©”oqòf‰^ f5šÒ“l2`@€Hœ¢É þ¤T—¦:‰on˜À—÷D Ô¯Î\` q˜ ê^¶²5÷oɼäÚfÆ_o\؆i»úá VyÊwæÖþOÆoB¿¤¯×ñ[“#Þáê•…‹‹xÞ˜yÒmü}&Ô Pâr§?ªÎžÓ.í\"BÈácd±CüËØšaO¤x[/#&ÿDÅHÀ¿~ü[ÓúÆZ…|Ÿ´¨Xñ¾ØnuUJá ¬¡¨´r(aøæÓ­ÕÇf6”K°È`™Œ¤–›ÏÅdûÀ4jx(ŽbžMz™Ûä+[@öI—ÀNmL¥µÖÉ¢êªëúò)ÔÁG1è“é í|9ïÿdôWJ¾œoù õ¶ X[é9zòò¦òîp}ø+èÀöô]²â§;µ<%bê¸+ è1¥Lc' ÈÙ¾ªÈálqÔ™ºßç²ü<ñŸûx6Œ92Ëÿi€’"†EV±/s-)Òû¦Õ²H}›fÛÜÛÕcù¦#¯°ËÑ!çt'\/ßÎ'~È*ñ´?oHkãMsÝ´q ëçæã" ךwŒÈ,Óüû@_gA¿\Q™´Ù?Žã#°$O(7Èþcž=+¦Íõ‡å*+O0Zðul.å'‡˜è*ôk ¬G]ÑVÜÍ€÷S”Ìmo;ž”\›²f¸fÏ´ùZn3kÛ¶ºÙÔ÷.{n”CŒð.MeÓí{ÕÞdÖœ“†æeÌ…¶?Cón›m0ïÂNQŹo;÷Wd黤®äБSsÃÏ;MbDmF€×$ÐÞpŒãÌ(cÉ=Öëya&/~÷˜K‚Ú³°¸æ½žÆÈ;©œ£´/'TfʵäQӢثöO`u]5äȹö7X„°­§7r9D¬iüKÙÆ]0±§V ’‡2ãpØ-<°`éfAX„çzˆ·ug¨þ?àÄ™ËfzHÖzJŸ˜Káp^]”JÔõ4æ– SÎ…ÔJú@…ôË7ev |ŽÜ[FµËÇ«TØqŒÍhržªÉŠ=øeJKŸÓÕþ^bUµ)ã0pôÝ%yw­afŒ<9P”…GýÈ€7I‚Sšé”õG\K:¯‰>K£üÐ e åal(ÔðZYèj±CryaÌo\fNžØ&2›ï™Á²—éø J$®èÐ>D2G‘O€\¼Ôi]Ó†‚5:quîýìI©Sö} ç­ûCàŽÉ>ƒü½1`Néੳ¼véÛ+4ÕnmN9ò~Ÿ¿Š[ŒošÀaË`­V!96tþ²Æl'æòé4Ð,8ÞøA«(¯ðuRÞt>¬Òyv7¯{„D/Zmàûdyži<÷¯Ý€1 :ò"~Ÿc¤fMÊó ¾¹§ÙÞçáKƒq…ôÎÔ2*„ömD½Ì$5 l;Ï’*̈ ÐHGw\ —‹úCPHæP‘ѱ»ý§lÅPTz¥ej8ê¾g…dn ]½p€IÅ++9ƬjðÏA«qBtkŸb~ÆÉõ%Ã^FðCdÑ­ÿ1Ñ9M©¥¤Gx¡Ûfp]ˆ’C¥¤ƒqvwË1tm„ðó?ý·ï=p4þKþª 0ÊŒîòÝ=ä²ì]ïà!õ¨[O—_¬$„õ&Pd¬°ëÑŒ¼ÞÓÿô<›]62`'~˜ãñ•I³A.ýÈnÜ*ìfåñþ|—Ù¾'‰#¥¶6>7µÙ:^tUóÛ¹‹ô%¼Kq[aSýÈY–‚¾]Ÿös¿!Ôcxa'³çPìËõ¡Æýî5× k`ºFy9³wŽÝ\Ö‹õr©«&<ù½àqß'üSCŸ*þlþ:Øÿãx$&#›)>æ(Ø×³²,á;s’â,c(â´G1: R\/hCª¶þööü›£y‘+±X¤ïF›&=mŸg}” •ý#<°’ºŸºÍkiC­‚kŒÑ">.§H4ÀÈÈyÝ{€wN˜¯›Á<¿}ÜË5VÑvéIb­-og2køÄ¾÷ÐižQyë§S؉Š˜˜ºà\»¯@­ti>¤\À$Dk.YÅ…õ]¨x1Ó·2âèΜ©gõ B(û#„µ+ë¬sQµ’w(ìEÔo· ‰›8ô'•šdò[ðÍÐV ÐSu—|ºˆ¼ÄyÌjèTÿµ@ô"ÛÍüÅ}WWvÏ/οKÇBœÊ´§½ódœš§áâZû„¯Ÿ»ŒI_•=lȾø;¿#w‡fMϨ’»Ú7þRËáþì‡Ã$mÕõœXœª «%·“Ú\ãÉ:ÓâL’²9ÚæÜòÇIA\qe÷q±½Bô¬–.Š2c«ÃâîÚŠ›ÀEeS]h¯ßtŽl’Ð)™GhVhVÍâp{Nv8Â8ðc]÷bäfÞÉÑ÷¨ý×!xJµ$LF’À™]“V²Ûp¤V–y£iò¸ð&•ˆr%jS¼\'`“˜9¯ö¢î0Ë új“{Î\´ò¤6vï©+-é1ÇÍhËž}>'sÁ¢gä K†¯Î/Ec]*-"2ƒÖU°FŽŽIØÜAb}åõn·RñÀ‰s;õð”ã «¨ -+¼|¸"øÆšþ<…LÏØYS#¨)â‹ß(ÍìÔ¶û$°$gÄ×ÌàjJ—þòìkÇ©¨j]ëj7@YRfô®íc?çÚ‹aB …½ÿø ¾· 5®o%㓺HjŒ‚…Ku®rè§ë£´J÷‚èþ~DÝÒUaT³“í²Õ¤¿Žû-ÃÜ$ñooÙ°b»%˜ïO–JÆü_|)(móp`¥‰ >³;Ä/ÈK0 Qí š:eöáhØxÞ÷↙5m0çvï>/y®ÞP%KÙ{©83þñÛ†Ýì¥ÜÒæ±`“e~áÖ+âÎ G¢È½åµÔDúB[.Ä0 «¡Ø¬õ ?$wmÁQÊŽ¡V¶¤)i¥’ 0ÇVœå_ÖZïÈ£ç·WÛçÄ#ëW‹Ì$òLjü!%E½í*Nþr7úwöÚ3˜Ób' Kú$D;ÁlM7Ï­×ó“ÀküA—û6 I’lÉ1 lxc»—Ô@ùÙÖoAE]°w²(¼q8ÿß=ô±Ã´ù'ŒO;Ø&¶{àË¥œË¤&…^tfƒ¤x®éÚ¥Te»Í‡öc\…55 ÍkÒÍI–GÂPÃÙ” Åè¢_0™üA;~ 3Ñø=\=LRÃS*“˜Ô©…LŠÔ‚È)UñKÙ$QKQd«¡k ’jOD8&Ȱ9âþZIi­3²&*aµu²;-Kksîß®[Ø‚Ä㥖¸îkeäX1?ëý¾º°%íz;^ÿ†<òÑYHédÙú–‹¦’†:+›ÓŽS–FŽ_¨„䛓Dë’ÖG;ÿ^Ø»ªGnªŒHY>¼ ñnÈKz}ç=?¬’Mm~ƒ¡¥?j®áŸÔï†d3•ÅæÊÝ#À¶c¼eê, ~¢ÊŸðØ’+³gå©{P¿‰¢ÀÀ¢¡0ën/؆¦ÒxʸämÍÿþD¼m©Åw|Ü÷pV"´'˜Æ'üY¬ýUžÉôÜ3‡O~nåÀéOŒ³àn-j¹ 7æÀß§yö$æ]::MLë/èP»®’ïùÉ 1ñ¶iMAÁ‡"cñ@ÊHÑYþµ£ÕuŒ¿ûá®0¥„Oòšh„—K7ÀŠI‘3  .¢*?1,A)wp›"T5Céî_ˆ ì¸!_‹#ÍSYNŽ—ëÚRG7E=)ýÓ{(3À)û“4T»x›¯úeV"âÊ‘x¹¹Ú]'›åG¹€’ã(ôM•Ê%]4Èĵt(%[ì¿SÅöje{ØwÊPU•Í5žsŒ£:] ¡-.¤1žºh1”¸¿Z0þ‚ó[>0W#]¸`0¥ûÜ6Yð†>ôq¿t¶.ï––Pà†’dHô~×D4ÐXº ’=s|Tÿx™¿¯5¹ƒœ›Âmm¾e˜ [zè¯ÆOô«¦h!!j8Ú¥2ºD €Ž"Î'S=£Ð˜•t[FxçÚèÃcŸyoúµEþøZ0‰Us¿q(Æ–- “¹³‡5'IÅòa‰t_\²¢©JçnhV2Ü™ÀáY¹Ú‹övþ\5Ï•%+wŸQ‘{"ƒt5^Ù :È0»#œÆ›+_Äû‚x›÷݈=ˆ9Q“àb«&$/$-æŠm}E&ÓÅax_*Q;<üzÙHbÓl¸úaèaU»`E#?]¯9Ùh8N®­/VÊ#Þ¿JdÙæ¡<›¾©e'œ¶u7ó%:8Òÿáðš7TeÉúx}·¹‹=7e¶‘z'î²ü°-HÂÊã½qÀLBä¢ÿ‘“ñz#bÇÞÜÍ–›ÿÔ÷øÄ«5”Xus7‘ùïÝц1ü§rðGZ¿–´âòé ¯ŽŸQv‰Ù»²½³µ®ÁosÝæàn¹w`®ˆìÀ§TTmóò?Ý‹û{Îuu~“(1Ó ‹À… êm7Ç¿½ðö7Õ£·Qùú2Ñ›g„$èŒ*rŒ™0ÕèÊØso×ÇšÛ v|—³vœ¸sĘcû:êsO‹.ºù2à‚-Å;4éPÙC|]éçö­ë'Mô-ܽd°E…?šªú¬OY&?\]pg%휇%èé!×V¿‚ç]Äk=ß/‘d!G†#{1x—ÚI¼ÙXóL•$"z­eÞ8|y[ ø0AFŸ£ÕÈ/B? d?³ï£r4Q§¹áA€o™ö-;íqj„!<=‡p~3ÛðÐV:i™Ûz|×9€³ezÌ™_ Æûx6ºõßGvˆ AC#äVœ9çÖ¼öó0™ã~Q nóôG÷|•¡>Ns½9Ñf7>ÂC]XÙ½~”f/»cVÅHÏY»ÅwÑϪàÄÏ¥öàM¿ÉýÊZ“–5Φ<.ï ›¡î3 ïén Ò|A»îöÞÁ©uT¾JŠVKÚö˜Í /W¸#¡d‰ÃßË—ÂÕJÏ‘é¡k©å@GËšÏþo·€Zö0» t¼5@‘²þüêËd x>šÐT"´]Aø7³ÿÆCݶ¤þCãs\k/‹úûÑí~¯dWïšJ¶o±Ä«ošE¯<¯sîeŠ&šº©BùÍÛGì;a_ÕR¶ŒÚ¨û›„º;¨61"Dà@+Ö |ˆèÒŠ®é#Gšm rwœ\R•e«õàó ÿûHt öFvKý*„ Ž¢Ý–MC–G±ÍxÎõº3þ‡ÿÌ\[¥“¾÷€ñBEI‹àÁ]ýù§ÇT}`‰dÁaK1¥ n78kW ä!ŽÔèÙ_ Ï*_d%£F%OªG=q–×Bû†DÀž8@_hQë¿ßðà ?ijWÇÿŠª=('R™F³€;©éA.÷´œ1ÍDy{ðm`fDšo~€ENLðÂ^éœòu¢¶æ½«¦³ÈÚŽ[Tˆã9ÈTªßܹØóÉèG…á6¾œ+x e’† ºÎÿˆXÇT¹`ŸþçàU” 4$* ¤2¾Àú¬S9ÀlšK{ÿw› 7Á[X¿¢™ÒolÚþaWðÞ+KË=DÔ{ÕiZá“]TœèÝk_@#éU¯‰Ší\úþ ·¨{ÇòXÒbǘ”awã!ùáØÄÝ·MZ5mX°¯“íÍ50σQÈÚeÈîê(@4ÐpÚmÓ‚2M¡ìûîR= {miŸoJ®P̦ï¾<V(u¤òO‘ÏêÐÄ1_:ë£fØn©ÏJZ4k""h6Ô@êÂËΠT¨h_1?m xågx¾ÏÐ7¾ ¶â‹·"èGÝ©®‡ó¿r0²l' 6yOŔ얒Èh­ôÌïªóàV7É=vúIL'Iü´ 0ÇLÛ™Â×A!á•Oa’Dx*äl&Ïnî5Iã ª%Ð~›ú¸‚#ùuá±YK{5“îû¦-Æå(xKnÔ6r¦ßh{>ïVòg‡ÓsHxÝã„®4xQÇ÷ý¥ mj}|ÒMxù™ÒlÂc(þ¸ï+^Ç‘r„#7Â\ÞÊ ¡ùùWN:jŒ[¸¹s¢û¹ŸWΫ¬3ËUêÖ¿¡@çå•áüHY¥7þúrŽ#€6_ñ«¢ŸxT¥lïZ»’Mr“œ£zw'òÃÛÞ¤£(ZÿeÅ¿a zº"¼œÍÛx#’Ò2Îúô3ŸM­VOÁKs,½Ôzú˜¾¯7ÐïÞ—âÈêVfùü©j”PAéËoãò¡.š ¾õ0oW±ûò¦]Qð²9*êÍÆñ\H ΢°;`J£¿Ÿ¬÷AWh–g»îŽ÷ûÌ;3NUø,œ =aDœ¨¯˜ 4{[àóÖw\À²h‘Q€|E.àzÏW¹¸þ[½tWn-§oÂ*àâÏè?< v“ "EG ßàcï(ª¬þù Šltã[Ç:æÿ4¥ÀʱETÖú?hFtÁ¡_¦ÕC†Pz´„4+„©&•ÌaéÑq3ðâ“¿\Ds¼ÒÙDÚl;‰¾Âƒoå3ÃS1†Ã´j‚BˆmMd‡'¦&ü¤LG«¾‚TîAÐ@‡oMÌɶ™ödÕˆÃ}•ÌYîLp 7f’Ç 8~£sQ©vªTWó¡¬Ýæ‹À…([eŠÛo ÍJ§&¡Ã`’â>oÀìúQêEÓd2‘‡ØJëÎk µc7Ð¥Ž5îâúQrFHމkÈãg9w<Ñ\@ÂÛuÔ9ÉÜÕ x©«ç)ã(¡Åì—{H’ã‘S“Ù ×ÎΩ!oð!üÀϵ-Èî±úxÍTVQqz°½ÖÏ9²œÃì²~¼µ T­¡ª‚-Šú¸“mÝù ƒë\.>vÒic÷s¦Ðmr5–·ÐÑ_°»o,=:í5–~&ñ•™Ð¢¤°‡€vvB»º‘5¬Ëi´)Ÿ­VC±r¼7=nЗÊjVý9”S‡¤€ò+'º 6Ùk«óEv!H‡dï¦ù(žAiY²6ˆØõ{¢J54çé®hb\ä7ÈéâW>HMÝ- ´%µdšdð;6£[ÍèDÔýqÖ¬À¾}•¸¸¯d¨.‘2Ö°¡w[÷<ÆG7d°µ#š oœ[¹@5lsQ·ë¢DNº#™[je쳿û‡º×ÓŽºª˜ª¹I#ÚÉù6­)m@n‚áMFBö¦ u>‡àPíc“ªA>Ux_/ Ma¹`ÞÉ×FcdH%Æè‰4! l˜“F§µŠè¸4dǼ^0Û^c•9VÚä+àŽµ+„¯*ÍRý<¿ ¸åW¹,­Ï½‘æ»c@Çþ{¤ÔPXóBƒìØÖgÃQ¡6c˜¶w°˜Aw…&'ë°øÑ#y_|ÑO«-œœºÝÍ—îN½+YÕu"ÔÎýÛ;Ò¬áaf#“t"µ².®»}QRÇ$ÄžN©Cïô:±²yߢ¹H?£O¥iæ´—(ê­Fƒàº ¤GâÒ¡ÑöpÐ× £”‘ùRº7 ÅFòЗ٧#åþÞÙçŸE:üNÜêÒ|ˆ"¢¤üÎüÊvÓ9ˆ­Y¤;2º®•ÔüŽ·O=a6æem1ÆýÒŸåÞë)ªþw|C°?•öô¨•ŸØÜicÛg*6‚NDÐØYN7*³8dý* \A™Ú±fåƒâL9Ä?6B+ùœƆd„-8¾ž›âþ1\i·LÓù¢õôbî{_ÈŠüăçÓT‡DóÐý†böû^z+‹‰oÉXëÇRÎv)Ïfå´Áºú?#ˆyG’M(\nž`4dSxd@0­"3Øò3¦Âý{þ¤üÖ^29ø’éeÎ~êwºû’ôé ê>ôÔ7´/ª?¬ãTú=ê7WÏšüeÇí Ç@=— ЦW\DÔ§%xi$Jâ¿3«™‹'ÆiØñïIX_E‚¹Ãv“–D¯*’­ý@T8 ¡àÉlGé…QU¸H{6àn±54Ó ÈÓRÀ÷M“j³•Kºï¶Â9ÊaQBZ¦®Tôü?gÀ1R“»°Á·Ã¨l¾u‡1­ ¼/«‚;ñô —P JγN¦` ÏJ°ÒÜs§LèQד–z{¾*Íó´wz‡—¥áµ¶ò@Œþr7Óº&6©²´Lº ¼I]’Ë3¤i âÄÙs¢õµÆx(,ð-kö S¿ÀצU°6>Êê!A¬¬\‹õÀR$þNßu[Z3 Qİ_&_Ó\–ÊÝq ½M±µ&7ª…n˜ ¾ôZ0ÐPÜØl§¥Ûqaêqï/ NÞ˜?“BO²¸¤æ]ckªÂm+Ã]½sF’u¼¹½˜½þ=Õ«M i²4S÷N„E?ñË(½²*tªNZ\{M#Zºj¸éè4àÂÇN€­Ô 7U­ð ‰c f­‡'©(Ú…1–1b£´BsTbpŸ7Rš¼¦?M"k¨~=}%iQƒ>6‘îã¶ u•Kš¶í¢p÷tµd‡ŽXθ)'N@„K”)ÒyçiÅ2ë´HYÛ@Îv 7–qcζþ( ØEE˦T# Iä¬ì^ª`®Cö–™ÙD¸¢Æ.ûªùjÈC¾¾ÄÄ©œ¨»¶@,M©ßª‚ðr†) á÷£qÙŠ® ü7àè e‹WßÛ‰s7µK|ÛÔaþ¾Ýx³1€Òõ™XÓýôÕ,“ªØ¡(é™5yt?P…V‡8?¡1¸?/éD[š8‘:šþ­Yà·Vò™dO HLè¦T'ämYi»ùBaß×Z QhÚé^î3›4ÊqÍ3MdØ"°Óo9cÉÇHB=±b/zJPŸe¿ï€t3•p`ø5ÿ”¼¸’‡g×òÊ3n}n{\`LMU Kþ•ä`ý„tÎy²ÖÑØâ+Àl:æ Páž®¤>7rœ(ì¤âÏìŽùiÀF\jŒ¬|wÉCrmz•¬ÈØ5¡£ ùÙòƒÇˆªµIÉÏl^´ðÆ]4zà{²ØÞ1`x7„ù‹k\ÈúDÕ¬U­ ÓeL˜t¤/'0Õ á æŠ\ Pm¨¿n,SaYÇ÷›šM?BÌ VwÑ0Ý¥ÿ¤·ÈÅH{Q=ÝôBZZ›"Z«#¸ @¹ÎÓò>;ÑŠè=tö)üy(áÝËÃkÊ)­£tKúa¸\…ôµð $QÚOZ¿ ­PW`/|Ó4–ö´ô „©ðszµZôµÉ–aõ?ÿRòd™ÿâ7rJ¥%_ýUvÞÖÅ›e8Ó~×Zéäyi[År¦23cëB<ÙTŸJèbtl«V˜Õ°âX¿Ä4q½ÁÁ|Òœ«ã€zyîÉG1AÛïãK¨6MQTñÃîìÚçκ,/oðµ¿ì+ Äq¦xŸà± œÎÎ,’™¸—¬Ü@‘V?Æu Çbž\Ï%³%#¦%Ž¥"ÇìôâÑkç‚‚’ 7™'ÓŠ SÚe¿ Ð1$•+ Wà‹Åë)EЖkÒ}ñ¶™”JvFªÖŠ„£ÆF´òÛ…‚a}ä½(ÞÒY5á`=eV½éÙ÷´2ƒ‚3訿í2#6);½êðî‚SÄéª}옃™Ü‹¸9ÄlPêçxS™·Féè,v¶ò4¥Qô°¶GsOG~ȸ¹À ð0ñ ÔCyª%}Úî²v°Jz\Á£ß}·“Í*ï¼DjúÝAsJ\ª¨+üðR#åEÿ"ÂñùÜŸÏñö·+(*\ª"ŸL8I®'n—H*å·®‚<3»öÏÓûΤéÔ¹S®š/K†¶GÑŒ'xWgõxÜ<€„›¶)›ö `“PT°P1¦Ž¥žÉáîì]äðŽÇöÆ1n0ƃY:öe™ð*I¦3•‡l?Ztw± 2XdF`ôÈîš%Þõ ÅhõÐ%hú,Jõ\Jÿ"äâÅ+ »PX^PfsÍ€¼¨/2 ‹Ð'¹ýóÍ£Ø~ÒÕ*±î”gÔ®­ìº Q`ÝœŽñÚÞ— ^7¡M¤(Ž›g™ðÔwÅa.¬ÒëkiSŽïNä’P»zû‘Ðè9H§Ã]ãƒùÞ (À²œ/óºÐ_GˆÆR†éöÛO‡¾TއF=W©Xu¬mùRÇVð©ÉŒŽî¯9XàïµÌº0\É€’Eƒä .ß?9†ƒ„ÀÔ¹ÅÌ»dcÒ´çAvcKù>)íÐuTJ|öÕ_êÔ.g÷÷YºÙØebd½QLNn¨·;Ó¸…·¥µ§cqSÍûdh­›hç9ÀÄfºÔø°ö}m?k‹äfЈߧV¹W´F4Mr™i›ßŠ9>îÊ3øã¥<´tô Jym é½,Šä¬£g§LIMglêáò7»2€Žw–¸]2ƒ¾-h›’'ÎëUzVZ¬»Ê q!¯÷ÁF e-[~øô¥ñߨ„–#W>C ˜–Îôø(¦¶†Vìå!‹‡ç–Üú#;EE”3Ô4¨•u—Ñ|€lN8”,»OÍ61Ê´àïî3F„Ãt…²+ª°Úwÿ:óz*²|Wè9x3?‡‰Ã`¨bF +ž)æm퉤88¬è÷a¨Ú±-Ý4èlµ4MÞìEl‘$Þ}A˜E@nô^×xëÈíˇXSúeô“Ã%:¸ ^…zléQTZÁNÖa_¿LÉ:¦Ž4ÆÆèñ'JLÎ83¹FkÞJÙ»M|oµæf¡€ñ–8ºÉÒä>rNÃIž= ±kp,í ]NdSâæ¤4~Ø ûXç$ûÝh5£\ XâÎ`uSÈ3ßÄ¥þ¿|MÁ˜ E7}dŽ\enK-uÛ5Ò:pOŪÔEL ͦ¦4($t~7Ò ©÷Ég†½ ¼îQe§]H¢áË_9¼}&õŠ3–¬­ë Ü¡‡:Oc»m‰Pƒ+!ƒ¬W=ôœ±÷$÷~!§‹}×ïé|Ñòâ-RˆLάµZ$ŠÚ˜eJyšˆ¨ø9v&y›ózpSÒèЇíh‹”Él!HÄ9v® Ž>ìð–:JÎU@þúpriÂ>Ä-i‹.˜Î­Ã™›ºzbfÿºü˜á!QùnÊá¨äž.IO+=ñ`þÞ§S›eöÈ¢x¡¬ŽøttO`k/ahÒ¬«ØÄÎS˯m¬rxEX4Á%(,Øjt|¶ÈtôÆé§3¥ –>ÂGg¿÷²Õ¦×Þâu+Mïà ­Ðv#‘ÀÑžg 2ê )e* á2Â]…ûáÖÛן·‘!£‡kUž·‡~„–Súº½C‰Ë)îøãrûm3*úHF戴¨X¿r“•ÄÜÀq¤%Ê_UÊáòñ Œ-ðö1Ãé­ºE]”Ý9)!ÑóÅZ¶ÉÃ}&k°¿¿GŸÑ¢Æa³.H»4zŸÐÚÎiÆý™^a¦ý4 4Æ™?™0ÅŠ×Ï71Eåƒ_`®l¦jAõß%?`!ER.µM[ÓJÑ4r§5#:[#hÜóT‘†jíÆ ª§±&'‡ž,ungQZI_•@ο‡ЂÓ]E×¼]3«_'ˆË@F_7ýÑÇ#áGŠŸÿ‰~CÐ3®3åªê¼›~62¼Ÿàzà>ܬÎQ§2öéX¨|ô~: •g6³™~D¾êúJȘ4dªIÅi’ñö¹ HŸ*9V`õq&ßÓõ:ë;&I¯¹ûÓr ãÛË•.ûd­¿!&¼´i})·ŽªŒ—Q+á´ö8È Âºº[ó1å6Ü ]÷Ô.†ÙwW¤w ?þ+&jÒ³ëÆ¹Ò)°7!LËTÐù5*—L†öqÙ"Ž Çm_Iá8» w/ôH.•nN oû®øìï?ŠFZ×SñEC GÙŽ}'`ØÐ« Ö•0Ô#UµÓ‘FT® a¸·¢Ðãø!\eBÒ„LRP(˜UŽ’_gB]·UÜi¹Î‹‡H¶CÊØr¾«¢Õ To¹NÈ,œï&9ø´AµvwÛw7é·u[mˉ¡Â»I)ô*X óL׋›Eg$喇ý²*ÒXª •¬9ìá6ÙÖ’ªÀ¨Ý~È¡ØLR‰àÄFd[7§ªŠ¿fסdîö3ñ[Ädž$·ì¤Þ»RAø¢T÷ özMέÒrvøÕ;ž%ÀA$$;j'4Ÿ÷Ò`zѸÄ8óëV̰!H‚zšÆßܹäÝ)ìiP¨CW8/\É¿kËú]I‹¤“ÿ¬_忆=Ä¢ÁGÀaùraŸ\‹=ÑÕÞ.ÜúŠœˆºö­xáu~€¾+5êDàèAzü¨BžÁEfù#X^ø¡°'¡ã‚ ÐGÌnŽŠv|¿.ŒªfΑc>X%ŽâY­CÔ×ø‰ÔÙ²½GŠNQE~d;È&fâ![©qö°Àœ5Vü_›É ’¯!t6;ÿŸð\­Ò\Z–Ñ•¢ZuÉR6ò*œŽA‚[(UÔþ€ÃyÛ š¶Õñtòb¶79âÈZsÖJ=æ£ã #~ÞC&ùÓ„ýéJ*ß¡¬&ßd4ãèå‹dûø¹gcŸûYïÿ,ó>óz6V™ÌR§-ÛnKbXÖŒOýí—ÈÒ¢uìë:‹ù.¯-Ù­ {¢úJ$~©ìÞFæöfËb%*°ß;Dˆ80vLaÀxØÁHøS&›ð)nzw=Ý((÷2áOõ¦n5<öM.7Eð¹ `ÁŠøÓ‰†T¨ñ:~ºO#-ó>]#fÉ-?R†^Þ¡2oæN>ªuêYJ¼6WØ$àÔûpÝ»i©QªC÷Aiçù[‡:º @¦+´¹^HNj«níÐ|Ë$FeÚæ‰®Eþ™Èè+ǵé*kxÈÌE•E-óÖŠ ›3G‡ Î-þëF»Ú謷q*[¼94«ñTÁãsò¾ÿdà'Š;aÁo5 Ôc˜ }/¤´Ëó¤m5ÙN'äÑÁ¼I‰Eð<ƒ o],Á{¸¹lò *X &H‡¶‰¹ÖÞ)¼6·Ïå) =ÉÈ:{ø¨—вp‚»>¹Æ¼#UãÅYÙK_øÖ­c`Á‹¸5I›ÒÓ~“HãÆ`!™ËÉ+Á”Ú Æ YÏêû“‘úô,àz8vpz b-®’Á¨ã6 vàeÍm ÐçšÅÕÓç»ÅŠLµ %ÂeVÇâ#Î`’ývØÔàªê¾9RLï*tû(Þò“¢œŸØHV‚|@-m­U±²ÊRŠQ·´©&Ø4èšùßòŒÆã »™AÇD/ ¢^2†)ïví¬j¢)×ÿW™7j¶ÝX)·ûõ•`›1^õõnÄž»Z}Ä—´´Ïz–ç^»áLJJÇ3§• ®zš)BÔø¦ÛoË?U-t’Öª´ýó'Ûk*ŒœÉÅòLBÅ!baZFEÕà0ü>°;\|úE>SYÕ­ßÒIã9÷;à‰ù)u©Í–7 ³Ü×Ä2Ãß§Îy䝸ˆ©+ÒÃq”(&‹;Ñ-Éá´ª|Ù+±pHë+€=J&컾_ VTÎu‰Œï'é‚~Y•ÆcH¢ÕòcZΚåØtV‚m[I[[naqÊØOÅ}L(Ü 6Ô 5­Tt$Ϲy0HÊ‹³mÄy[…VŸGD*q¢Dâ¸`™šÑ[‚ßÒ¦xìa(k/†¦æ‰.Áa¹µÜ‹j°P/ÛXöœèÑDaÞÁWÈrŸ\ãÿL­ÜvAW³kãç§Ð\]ÊãÛQþƒÔêßd—álJ.hoƒ˜aC'"þéeíI±G…>«Ø,2ÈÞ:vûâFŠT?Åçúªšö–£;ù”IéL™á8¢às÷ÊÉ“a¢ŸA×Ì¥ºPõó§Ìæ$Ô)+ •s7(èÃu8íYš³Q,Xh-À›3‘ ?X˜Jh’>Ã5³‹€lªóÏï!Ïâ?ÃÑk‰¤¡s‡ÅÝœ¾*ßxe¶ÐX%Œ_ü!Ö K81ñ;“Ð/7‹ÌùjÈ ø’… H¹ã—D˜næ‡ûœo4|”…¡ð_ä°nÑA÷š¹‚j~a’ÁÚ€Ã=á‚‘(·Ü 8Èš…%å—°˜xkï„–¡”^˜/0}?Ô_*à:yÀ8ˉÿõØ:4ª¯`ªiï`/€iêi[Ÿ[±§µ«Ä}‡­Ãöâ°ïI#søN†'óÍÌîb×9JõÁÞi¶Æ¤¶±ðAx¹6ÐýVÊ„/„/†)F²Dܼ+¦P8ÿá^1 çvaD¨ãÛÀšpéœKp`÷j|V$ŽžIî:ÈLœæïd³bO8(qôF4¬ÈBÙÌñÓh¡ÍÿI•{Ñ@¡T諃1 ÈB –`¿À7SL«b£;N·óšŽô@ºƒ¦mœÆº5u÷7þIÚóxè¢î#¬‰`[åjÜ›ñQ/š[“DJ;a+Ù ž&=Œ€Çô°­„{{–É?Æ®›µf©èeL ÄÇEG‹×¶…KÍ IœCOX|ÊÝœfú©h: ¼toAfmÝLHN€a„û0ú=²‚é#V#»ú¦9ÀÂôåGû,JŠé²9XñºP)=¬®ÙÁ…Ày.cêwOƒv•Knúá†þK nÏ“5ž[±‡#ÎÆÔ*êÔ@÷ñM ½Úü¼ÀgcÊ¡!¡'8÷Ítd«j^ea3¶e‘ŠÓ>P‹ ·~‚Œêg2¾p@Î¥u’ȱRÂER_†Ê¼:ƒ¯³ƒÈ \È»O\¯ h)‡-šKFÈ£X<eZýöS…Wc,V–­{ÚzªÐŒ¬DèåFçL1qÛ ˆŸk§ª¿”Y¡íø“³'§3&…[cšqt¦Åwa¶¢©Ûγ}Í´U¾`­ÝwuÏn4 Ëþ[ä+Ô>NR¾ÉK‹}v'š•)¼V -bÃo´dkþÂKG<‚·[BŽ ÞŒ®âà}²Jcý?Î߈èè<<÷£/5œ,z4tB:íHsŠÓEŒ"ˆ ó<ÿŒWµ”Çn”} s¤©@Š7îxD"gO}vÇHá}b[AjÌY5®ý÷¶¸>–¼ºpºã!k»mfGñIÏOBHƒM[n€- ©ùOþX?5£­­õ3âHòߟ"\<—åD€&HoU|3úHšr.0Åüx‚â30­¸¤»e1€›U|UnÁàTË/}pOÈI7ʺƒí›l]ùÝ€#S+¡/6—}Ííûu%GΦC“a+™·™”\vyÓÛÜ}j0ï5©zåŠ4᩺òX1«a0H9ž<ÉÖ$Ü,êtnjS‚p¢bÇâúßr­Òî%Bë뺉`çãÍËRܽ[ðí}Áç8s[–\ÖZð‡NZÛ_b()Ô'ËbÈRH˜l·ô­úõü>ë.£ÙÛ_שDžX39ÝG«FÙu¦2?(òó}¥¿ß9Ð|ˆžœ” ±ÛHýôôÝGæH’ ÑSg¡®e€â§Óe'q$/ˆ·.†C ËíÔôÃÑ}0+% 4Âzbµ‚Óƒ„âpž½á´µ& ¿­w›S,'Å›‚¥Z“’C‰»;Òê&Îà¿ ]ÇÁtWZð*ï¿ÜˆÞËD#ìšC|H¿¤9bKÇΫo˜”p¥èÐBø½ج{êÑŧ«ó-+5õ¨ÜzƒpPH(õD(y$O¥ÈA öv÷YðkÁ+öV^z¿¦Ùê^¦õ¼“²ïÝ!6½Îã|Ðò½æFð!R Ý)>Ù[0DÙÒ}šÕX€¾ËwÂé%øÚž\7RŽp%nâ2DòŒÛ¼Ö˽šÊÍW g§)ÃÂêØe‡kˆ<+-”Ì”ÿš®ëŸE~¾žª×C¤NÏB\ ˆò-8#ébÞ´…PÁßPÞOg΀ûh ­LÀÞ H¨£?•¿É9+|‚~V-\6§¤ífšLÝmo!^„*ýà K–Ú•ŸcÊîÞèrŠm AJ뢴ûßs :ãÌn¦çðµd4^Hà=¼ÅÿJ 2¦ožÂ‚¹.Z)zR—£ ÃÒö÷ÌŒä÷§«_ú}¹7¨×¦ôð%ÁÿÀ”R´ôÌÓΤ"zÏçJÀKÍK§$"BÜÿ{É™AíDo/€3 GR®ƒ?>~΂ÙVÑ—nË?Å Oò úËØRØ|¤ ÉLÜCYÄoÛŠ÷˜Ÿ'Ûã`Šðt,ê¡ ± ¹×ûõWPÁ^µ¼/¥^¡å¶š|¯OÑê¿tyG§ˆZ|!içýú²Â¦=Ý:ñ9¿‚6£N{}ô—°þ@ï?ÒQÊLIš2â{–Ý£FƒØþn·î½K½ðôŽ®ã½Â'¨’iÛG)%B1iM˜kïŒÁÃŽ"ñšÎ¬†¢ŸýÆ¡Ž Âƒ33=)ùæÃÔ!嬶'oëhüQ0F¡úà ÓÛÉÖnów ΰVçIÄü ~6¥YFfK¯XÈK÷ÌzÎ͈.rt g¸õ²oq¬&i®lªN €Û¿]á0)Træ7”v#4NV—•çSX3¥%ÁÉ•°´õPÑ@¥(2T}òabÚÖž†ìÔ„¡Ýn–íÑ‚–eƒóAœFý5èûº½_¾r×€ñ¤p2QC|ù…4Ì"ÖÍ0·”Ä2 [s»^¤K(“;èrb‹›Þ –“¸æ¢ï©kxü¢Q0A}›£dŽÍ‰×šB6mÎF¶vi‹¼¹éü$Ú[¾%+žcÝÔFiÁÂ^ÈÀçùÉÄR&±æ»cÁÄ:}Óáp:‚ç‰Íjò†nà·ã –ÿ¢uÿ>áò‚×e!ùŽPF¿*p5(kwL =ŽWEE´GõØËD…²9 0Cß‘TŠØOw óŽä–±·ìÝ\/g×ãòšÚus0ŒRÔbƒT€UýÛ6‰#Îô_þNt1ùÌã:§5å¢q{#|¸àâoçPŸ/ â g¶ßmÃ}ü€xûAgJÒ€FLØbG”yª¿jCw”G“'æ’¬’ý7:|‚>CÔÖYµá¨Ñçۢ뭱êu©œAh¥ÜŒ;é8#Q~ÍW$M©t„iüGbÝÆõ_!o'†2)8ÚÑ´{ ïînçÀ}‰•K_ûÞ‘9¥'Ò‰ž§šz&¿5:(³*_˜®²¿HÍ9愳M´á¬Œ”!Èf·ÉíV8” b1¡ƒ¢FÒ«hÏ'*.ÛR‹L\`!A,Åt½AôO •-dûù«¬$;â98æú8ºñ¤ìd·èSøÒ í¼ng[fqúûê§ß„~µ}๠ðåz©Á픀ü6餋BاÞä2[^ªi¾%‹XDÌÝÚÇ)°>&c‡"G6$Hi“òöãXéIÔŒtÖ:zúÀ_Í÷®ÁJ@êð,ªfe§8ÃtŽz“‚¥åž‡Ê™ÓÑ‹ÑÝ…•zØÊýúªvt¥1®â(¢e@ú'EæŠâùV¸P§$6x)Iå~«Z±áÕ¼Ÿ‡-âGŽ/³Ï_¼À—bÔ¤Žýgìgë¢Í¢ìmÊêoÛ_Ëàn² d¦V(ÎŽ\§ó—ߊõ âv4rň ¿®‹îš°ãÉ‹#A\g»30¶/?´ù’™fo;GŒ(Ö\ôÜkgÌ +ɼXtÌ™µ_ %Rõ¾dH"Ãø,“üz¿‘ÜWyßÅlFüÌ­h=rÃŽ ›îÙ,(ó½m_t³C“!Ç1¢Av¨ä@Yè Ü×E\â!ëØˆh:⸈«$Uð–z{yò?4e¦JS ³UÜ¥šóÞ-ÍÛ-Þ½j'dâe³FÓý¶ûž¶à­'z—{‚B:Á€UÊVo%™¨A©1¦JëŸÜáGr(ç„åCgr§më,MÉ(ŒÕ›tÖèE˜î =´SÙûá#ŸÊ3RŽŠ4*-ÏœMD:Š)¿pí¡¶=SÙ•^ÚqVâj¼Ä É„>B[•œµ\%bl¢À/µþªöYN]‚.{ ø¹ ‡† mô½»݈¤W ,0êøÌ$· ,`í¤UO„HÝgèKJ»å%N?T娄¶ÈW͘²ŠÖõÆK®j)ˆ˜Aµµ~ш¯yu’‹4ØÒOt˜¹41-h ËÆT[X h÷^d¿¶ ¥,܈*bW"™cu.»=xR¾uâ9S#xœjeâsÕ‰¸_´|>XâRêâÆŸþ k_Hü=Ž¢¡7S¦œ$ŽÜy05(ædÉuÐV?Ž'ÃðÌŒ2é›9.“L{j‚Ég!ÖÊ(Ä&ÓÉCèþ§…ƒºlkÀ¿Õ·LälP –² É`BýÉÓœWcÕ\N× ÑÀº×;Õô/ÀÜø±‰n-{0èQx»íËðÿpŸçÝS¦ÿq³á(¹Ü&«ÃòÖÉ@4ƒ~Å=VoàÞ$¼ŠQ÷/&Lo+]‚ÐË9Yt˜wÃóQ ¡ñxB`„w æðÍš•¿‘.nûÏàlp>úïVäYrur ™ÝÝX©ò[6Dáa¼iðò›„æ¹XœûéKðF'$o5Ö¹ZʇèêÒ˜C6”Cÿ$ñnyìxq#ö&­ü9É|¯ìﺰǨ‡­¹½”*l‹7 (RÛ¡¡ŒTãì^ Ë©]JEãÑs•wƒº8wQ_@‰ãG4w#䄬ÍÀ…‘gB„}s†ïf¸},!žåæí–öfPv\S¢åŽJ¹ç½)Ïñ)¡¹-Y¡ kŸuM4µ]n±nû_™ß|N ÚeÍŒ¶@ E ¢ñ¯Éšã¿Ë2ü>†R[R r»KXØÈ_ [ˆ-ÿÖX™§]QWáð£VdƒmÅN'òq%ÝïG;Ny3äuø¹i~Æ2WûÄbô×Å4ó~ÀfƒÅ™z(Y”_ØîOuí[&@‹)æ†e„éCEàYî”ðˆw›ØGw¼ µšKS6‹ÁzP~$}B'é 2c6ZCÕßm¼[pF‚•N?h]°F£Lª»Ic\·ÝЪ0Ãî™Z9r†þqü¯ýÖ•!0N¼ÙÐl²1`ŠÝ«·ZúÃÓMâozûDã܈¨ñk+"—©{Ü`qÅÝY/S5³(Æ6S%5 -]ðÕ¡¬|NБ~“N{‚ôçq±¯~ˆkÕL`PUÞ62sûØ”:Þì2îâ_?F rûEº‘I•G¿ |D+¼Î[Q I]¾ñÖ¯ËFnÉHœÐûMÙ?û•Wð‘3ÿ‡Cî‰ÄÆÆµ8ryjYK¸ 9•¨šÄ‡‰~ý4Á!cIKRš`D¯ÞÖ ˆ†X¤ÿ¶7áZa¡¹½«9t aŸŸô$r7.,% ÏáZEÛ_ˆÊ@û÷ê‹oÞòK‡í—BœMżÛÃ-Èyº+…«_ÝVPÞLímõžK­-¶y€e!ºƒÛ›$ÎúÄ炉ÔE‘L[Í›Ã,”#½e‘1ýŸ¢ÿ†³¬AëÂ<ÜÃß7Za;½j•ON;$Çù"A þÌh˜EPåp×°÷í'xêÔÝ„˜À:ÍcŠØõ.Þµô@ÄÏp.¦÷+Pº±¸î‘†9;«:¾P¥ýu<ñj-£{T?ß]ÛHŸ?à6ROÈžtÊR_ÐMaùàÙlNjÌúø¥b¼k‹·ÙÖʤÿÖH¾Š0Ïq…EØ0ÖßK†rœšXqT÷Š:’Aràuï7L f™òÓvÛÛ'ëæÚ©W“û‘ç÷[ÓѲ|a°‡Yan[>œáàMkþçýD¢|pÓ*Éß}M}°Ðx¡0CÿKVºF’ê¹È¶·¡×`>.²6½;+8D.X{Ù°b3„&ݰ÷lÆÚ µm›‘â£F{—Öì½þ.{I>ÜíLUJG¶½ü Ÿ mÕw gæ­Å2o¾°®Õ†>sÿåmñZ.&=Q•¦gϬÁÓ· ´Aó'p|HǨ’TÒÂ0ÙN•¶,?] &˜%‡[jȈcKÐá¿2Ÿ½tíypÎ_èwÓ,Ù•™iÚ?ÝÒÀÞ#¬@ŠdæVklú#k G‹Q+Öi U—ý%~šÁZÎv_lî…!CMó'ZoÚÑc3Ã7BgÍqK´#OzåL0è‹DÔÎ—Ç –œk µ‹wÍNJê'äïõËDÖQKýwgFRë«æo÷÷¡™NrïH‰ã|iò¢é€ƒ3ªi†Nñß,ê'ð$ÝM+-´¿6¦},K¶²ûÀ¸u€}÷úϸ×Sߞ ¶˜àFdåâòzDîWh¹V™â((Ðé´(BÊÛ}÷©5û¢x0ªC´ÄölØÆS>(L“FÛ‘Ì¹ç ”Z±í”0\£ËâIHt%M‚Êb\ô4íòÖÌ ÁT"²¤ÈЗMó¤÷›ZÆô˜Ó`i“éBò­$‰hJÛöø Õ“}ÝlôF­%7­ Þʰ¾í¡6|³<þó„´]¸ŸVùó(ZÀ¨Ycxåÿ¾?BJÈW§g·×N˜'¬v%“dž¼È½¢£ö´Rl¿:ÉáäÀ,¬ûŽëÿæF¼Ãé¿oßhJGÁs˜ëDŸàÝ;|íuÀ*.‡B猼‡Ø%rP¹­w©ðƒ•@J;Müt[Ù¬þ êØV;r#—5c∃_¨”[ÈÇšH‰ã†Ò{qØVDôŸÚæ>åáBY¢ŽDíûÚÏ„ˆpy šƒæ›²jK¸×KÊõ‰Iè%#ljåÕd» ›KÁZ>Zñ¨V wcÓ—y]ÁûÃW$Áþa3ëÊ Y’ãö÷¸k›ã*¢Qªq\ÌO}g+ÉêÏnÃ3~2]SKmq¬úæÕݰ0ÖõR)£¹MïfÒ{ Ò7yô­›ïH€ïÄ®µNlëñ[ºC³û5ìÖ@0Óg·ùª¶[1¸Z‚GX+"Ÿå9òÙK¢³ m^ox¥Å"E‰‰n"’§à 2JÁjl˜sE [•‘ƒqµÄÔà9!+Ö‘¸% ÜÆÎŠ<£¹d¸Û +`ѬÜe,¼a6ЦPågøÃþãâ¡3%K°¢N³è2#ÿÍtìHîœÉuÝÀbšêßÊ>ϵ4$ÈZüMÃÂútÇî¡``ÂxÚôôÎã~½>§Í9n„Œ?á`j"i¶óFÚk†6ÂEÉPZ˜ì­xu5£V˜ÂzOu°³±vXt€°ï õ· ÑY½bÂí·{¢ª-ÇÃhØ£i §m¼Ð<½sȾ”^t9s F–¤gz¢µF8ÓnT÷Ö#†nmM¶€8O72À©³.C ¯‚k™Å­Ðh2*71V¤]aãÚ#¿÷©Ü÷µédÃúœè•×@jy¤áIŒœÁ`â}‰á ÊÉN°n®ZäܦÍF{Ù“€÷S\¬# &ÿqUÜ]ÚŸ3"‹'3k†èâ–ì‡Ài¸/ú@°¬²†Mõà÷®.sÑ£ñ+º&œ'â° :ÞíÜ ªÍàÑh9ËŸ9@…ð 5ªvAè— õ‚ÅXÇxÕNîOÇ™Ÿ¢ãRaw JóD"bûa3f€QÞ„gÃh:;T¦äKŸPÃ4ƒ· ;¥x>Æ5.P¨™eËö –90z' Y˜+&ÁyߊŽk˜ãR/ Ž #µ{°laH^G¤i+0ΛÆBã²il#›'UØàÇ©³=ªç†þ…Åž£í=sáù(·ŒÜö– jJFYý¸òO‚k90ZEJM?IC ÎRHÕò¤J“§’g·Áli—ÄiJ¨aÈLBoÎ’š¸Ýü±ä¡ûÊ3?dàÛ¡Äø‡S—95h"«€²²£vïz²\ §òUÄgÕàÆpš,ï‹DØ—²ÚnšshñcÇ0¥©`JøÖ"[]w©¾Ubm›¦æ¯$%À]6¯ˆì±š$Pû‹zÿ"à"Â7aÍÊMÓ#Àb¯‚g–ÚD~]0GÏG (7eøÏJm¼X@¡ã–¤ÁÝ¥ˆãžçÍÜ"ÿ¿–bf H\ÿ†ÊÛ €ÿÎ¥pê®,„£¶)5€)&µâ}Ý€IÆ’G5@XZ»æãÞLÚ·O©0~aOÜhܱñ ·.CÿDPr"ÄÓ kͦ“Ãm¦ð°n" ‰„4¨zÀí?nysR|ã+ïØìÛl\v-Ûx3®¶ÌÐ+yTÜ×Ó|šŽ.•·œâˆïQ ú’bS«ž,Áðè'ãd%í²Ã¯ð¼ò*­w“ÜÕ7^•Ttë>¯½¦M‹7N!VEEËsˆ‹œÞ` 8.ÄbÒwzwp•…Ð:ÿ ìN˦µÐ›,Ÿ¹5¦³¢Ëm^[¥ÛXxµ¨ §ëŒqñ_JBƒð¿˜€x[HqË g>’û•t 94•Äa:ÄCäBec€Òׯ€Ä^žÔ±¡þ…µqŸŠðÊy"⟎|£ÔÖÅñü¬—Ï#ú@åtðU¤ÕÉ 1³Éå,6yà‡ šÏ^1EgŸx±)ÌÿÉ#º É f±MÔôSÏ(Ã’³ €Å.X9’º»«Ú/p_ÝJò,7|søÈ÷η†l¿îŽBüŸ7ÿõê†ó¼V‚¿ò.‹¼;i­Ž¥« òOâ !”º|ªôoÙàÙ™ÄÚè€_¤Š~_ÏÛŒ-À€ô4ç‰Hw~ÚØÕöw-ŒL…©Úh[t”mh¸ì´ù ŒJ!§ÓÙ6£¹ÐìËìÄDy¸Í"¥¼ &&³œ¸œå\„}•¹‰UÙWØJæ8gKFê.ì72Gç˜,‘»G!zÛ„y(¸Š½$V¾ÅÆÂ¯‡ÊÏ>®º×{ýƒSª0’q÷Er2ûèûÕû vK»R›ƒ÷ó}YP–iš æ_š#ûQ`$T¿4²×5¸¸çÂÆ?qO„*Ãcbl^þÍƒë–Æ½âoT‚›w¤ÈÕ¢[)"%|‡Û©rTJÎkžÕ ! ¹ŠPåªÕLÔ‡@ÎÒÙ‡ÄF=Ägû ØFßfæt!g_ßT܈ËIº‡ y$Uap kƒÞ³œÉ‰Ë\‡Ø“fMp»õÏÂÑ”#1EÅÚn‹`Ûôf*ƒ ©cC »»ðˆ&/Þ ´„gá^zý6[Àý]Ï%TJéQðÛÆ×"E¨¸Ì_\>íHû–O†­I0È„÷ZôEIhæ-‰`ëÿ¥ÄÞ¹¿é&Wâx"DPŽm1ü'Oƒ.ƒæþuöfÀ)S{å÷Ã6‰lwg½™œu›ªÚãf©¹Ç9Ñí9å·º¤#m‘àà tá£Ñˆú€;"3©uz£«UeïXšÁy¬M䣊•¨jœ[ sQéÎC£ÀFP©˜ £v‹NÀ«:1:EðKw¦•ìxbcûš°øï¹õuûp‡Ê5‚˜x{x+ÿ3ËÿöMÙaöè‘xÞ>’¨å-¼üÖ¿]ç~]ÚÃEbÖ÷Ò:În„r!C3#.Åš± ý™?:y 6¢O¶^–~λwqÍtÌŸ? yIãuQ¸³Òe<Þe?Œò÷‹Š$ªë¬@W÷ÃÝtŒ)ýx¨ŽÎx1 ø,CcSÁ[®‰¨œ IRÏR‹'O„ÿ„5LÝä\Çþ)’‹+âŒ7ó/´|’²ðŠ%þa•ÎYÑ3Ìó÷@ xÀµËK7Î|ý©œy€ƒú#QI›\&¸„è\³Þ cƒyIOR2ö(7<ò~ûV…ˆ[Sd\襁Òc2‘cS·¡‹&¼G…©”<«ªgãžA…iâpëC¿»Þ!/JÓcfMj?ܹÊNN*.û©ý3’ #ë<ëý• é>œÜæ-ƒ(‚<)äÞmsLÑùÂÃ,p…`´6È6…P}0­³í/Îâ¡a`Á¤\¦ªëðÀ3gò¯’rTäØP ÿ r¼r ýWb•KQë{¥;7ÿ’›‚ž‹ÄcûZÏ:ÇšGuþi&r.ð¡Û€Ñ\ ^PSIפOÊïÕÑø_©£cèTe¦0ˆK=¥úªw7…ÆèÙÉ Œ,$щg¶ð1Ë»áñ£ÿn€É†d_þ7ù4›Ãˆ“SÿÜãèë½/2(Œ•ÃY0Z ~Jº‰ïâ?`E•¨f…¼XêPâ äÀÇ3Mž @ãËÂè% ƒ‚ä¿F¹¼Ô8½§Ž@÷ó~±#LjnBy0tÖØù(ÀÏIAŽyCyŠ?P¼68»gòyQpï ¦ézű±ÿiv™0eO{§CŒ·Rr–E9ÓYÓ%À0ˆ(fÍ ´9+”7|ysã©U‘ÈUsý7·ùYk;úâZ˜-öÖÁÐŒÀCá?ÄtA8›sˆ‘'`àò· ©Ct ‰ÍGi8òlÓóÈb!·êO/"i¹d£Øæ±H'Ø8àgGÂC~¦ù^¼gs× Ã{dÐÇlÙSp-%á‘_…Ê<(òvÂZÂÃå½×)êdH'‹ªLæÛ BÆÔ0ËŽ ‰’;RFrgÚʽÂY ²­m¥§º˜_™ô^Fnsˆx_rÏÿ]UrhªÑv}žk…r!·ÃUƒh“]…´Œ"]˜ncÂIµÅËantûbãªojܹÌ$Rëh½ù£~Ë'žZ!y˜F~ žƒãŒÑÌA¶ˆºqoùùHB¸]•qìŠÛï®Ø}‰ìï„é“Ã@lò«nÀ1H«%›¹ÌÂ)úÞæà–A9Y³ÚýH­ÙIí¿±6³{l²1WëÓÀfU–Å‹QÛ4«(œ"+Ç3y/²iÞ­ÞôŽ)—½Ú6O÷ƒb¸H:è¨ ,ý“@µô:å"ý¥2¨Oõ‚í‹¡oô sùíÎûYõèæt9DÄÍP1/¤W¬Ý|¢«áÇÎid” "L‚—`e[}ŠÙ¦ÊÚSáüѱP6õ*ü—zØ06? öÓaŸ m¸aƒÍF×w² ž—Ùµ`ÁXw(4Z¦ósó—3ìÓ"‘öçÔ3MX¿ ÁçL‘™"ÄòŸ´.6¥ôAp”zô æ ‹õî„Ô¹ó’Ÿcá¯:8šëšs£žú@w¶Í©u?™^ÜÿÒP‡¬ßXÇ`VÁ^,Yή^™ ²¼Éfùqë* fµ¸×øj§D§Y™a &¿ NF¢fKîP nßþiëó1ÆYªE‹³UTÒ5ô˜GÇ«/ ¡’>È}CŽ–«ÎpoR†úîšíÛµµ1xýê'Å!˜¢ˆü3±&¨6È‹1öÄŠvÛüNµëÒ ßP, @$OYñÎÚÁÂÛ$¥Ä,ié!‚ ç¿H~ ÿ°¥*·xçhÐû7ryb«$è¡îŒ‹Â­!Ù&æQ95®HùSÍ• d r‘vvÀb]iš¿‚à(ž–Z“ÜœŸÄ ˆ¬±D¡ß>YÂ\B:^ìâyOV—Ðw.X dRLõ ®ç¶•ðdo•œ¥"ºŠ8(çßÚçÝ8ë­!•æÖ7q ~tâÓ*sgøcKÌdŒÌ‹úmî$g†Œ\ãÐÛð¯‡ô?E}7Š÷$½T3;,= LŠ++0-v2À´Šk¥ã…ï,<ºÈ;cßL.¨ ÚJÙW z"ÐiîÖ5Æ»“½#|› ž¨âÊ«­b˜lPc»·Á’‘s9p_"Á4äÚi`E³» "þ ÚDÔjþ°]% ·b• ø=óˆ9¢F;F?ðó x¯œÌz&ï¹ÉÁ ò!˜–õ¦D2¿™‰gþé—rc7ÎøØø§ËRÞþž™Õ££Þ¿ðÎdoú^9”ùˆ:׃¾ö*š Rè‹x`¢6ÍÌÐèÚ£oÉk‡àl~Æ–khŸš‰©y‚ÿj‰aÿêelÉeðÁÒw©°½*÷`0ýíeêzäã^J‘*H]ãƒgL‡Îdа–Q>¦q÷}¿¿îÊÍÓŠ´7dGÅ6Ã#cÉhâ3a>z àwjdÜõ¤«t€!‚(¾*€Ä$ áýö ÚeÍGl=ßApÉ4nÄ«¢È ë¨òι½ÒÚyVÊ…vk­R-L¢gº' ˆ—V[‰êº´DïdiSÕ.b°Úúè½*¢ýô»8ò¦bŸ„%Àx·zíXx›ëJ„íÙËo¤Þ`Pes…t”9ãåŽÝHÄÝ*}9¯ØšûÇ^ëimÝð}À%xlþÐ÷õš¿UhA>”ÜE6”ÈaB”o¾>ô1‰¢Éº4ÏÿçÌ]Wåä82…4ÿÍá¡ ÖGq¢Q÷æ·À¢Å1ïåpßÚ¤Hj«Å£UíuEé`ϼAúORq2µÕä.”\ú-L?62BŽº/å?øþ"gRŠhú¤+-CçÌ„ºSljšñô)%ç%U“FßXdB-ÈAŽK"=ªkáXñ¤$½÷>aJÄÄ7ÊýÇT!ò–úž†óéòF™í¨KSƒÛ²Ú²‡spÝ&Ÿ" ä}°4³óncÿK;èuyq€]&ü+§XÞɳÃÑã '€IļÑ:nAÖEK¶ð.Ö#›Eê8jÖÓ|GrGÎñþzqÍÜt:8PÛt|E}?ÐêôÑY˜;o›Úi³`&#À¯ÄEaÌF}¥ÇÓO¤òQ½Ñ±ù¬ˆcÚÄQ3P¢,†Ë"ÒÈj“˜EƒšðfZtn\X˜uÚœD¼ï! Bï¬ïbj”ˆ®Ñ‹ Çþ"ƒÝ/·„ÙÖŽî«æefôB¤:÷ÄÜ7xÛýùM¦•—;ÄFÆ7âP­œSe’¼iB» R T Ÿ­&4t¥Ó^·Œ‚} }–..Ï¥ýXN½lö:^oeP,8(IÕWÞÁðt\h§1¶<÷8i’EÆb [`¬l<ŽtîÐmA(ÿ‹m[ ˜èBz¢ßÓOüžS$ŵ¡ÉOjy*À›¯¡„”G{³Kp»ˆÃ im; *êß °{F…võ¼Ñ¶u§³>TUf$Þeze¤È³ƒ5s¶Qëìâ>[Q((¨ÝÌS^Æ•]s¢ìþO\Ì$§‰S­–ÕªÍ.©½ÔY¯;?€.‚ó¥juˈU„1„qA×M4 ¸Ho´8ŒÛ˜_=Þ9&2d7É €iÔüA“d.ëoßè”üÊ׈q†_PF€4€Æ/R͈δøþÔ£·æ“zPXY²Šiº\“-°6d¬a¸X(óq­«9!îV“E¢ÃåðHa×ÞQôÖ×^ŒbN2ãz•3NoÜì$.v¦IìÓêí¼ÈýßîØ<±ñô\BNV%pˆ¤mÌÊkš0=ç6d§scïý¡ ËÌ+{=Ò"³ä¯×ý1´aC3‹ç’f¬¢MÕ %ÒvLüdŠÛ¢¸Óí:£ ӟȾ„)PÞÞë¶õ¦xß)è©z£ˆ’ÂXWúšT¸–ÔêÖš„í­8íwœ½Ç:ŒsvséÒöcÇ0j F dÑŽ–Éé\‰F• ­üOòoâRmþ§NÕAGDEY «ô‰ˆR /hö:ŸÎ}Á,‰¬Â×Z?³²r™¸ v-[±ØW@'Ñ7‡© Hµ¤)ÁÖ‘ëpæbøÈ×Bå1ÛÕ@à%G¦Å|+q_.9"} þþy¸¥Òb¯ÛºCŠZÒu%g‡rÏà|‹ñ*6ï‡9Xr@.-F[´Wç¾\æîظôF¶a93µ¬æUªðøÜ l3‹lšÛÿ:wÌùþ‚úZɤñ"ªX9³‡¸$Q¼hՕ憵=œ!^;?VaÞ4…*¯$ÛŸoØ®/`³hGepƦŸK¡$t¬èéJú™Âû#ô ^fÊ÷Œt”Iç™éÇ(Ö—3ļa0æG«èŽWm“¦Õwœ.0ÖaZrÇm³‚9;¨Pp£)$ïãL ’yóKÊ¡`ªû©q—!ìÃÊ,¹‚‚ã’‘MBå÷Mò¡ÿŽÐÙaõ» óŠ_q7•&e $œvY¨gò•¬5)UÄ«ºéò¿V3¸Ø‘džgÀ‘‚‡³FnˆTµñÕ°ŒIfƒ~ƒÚ0Âb‰ú^ÑJ÷ý6Üo@‰q@Ì:¨LéPÃQ”6ò,¦Æj—íQ ¿ñà¯6Hñ5ÔŸG‹$®â¤—· ³Ñl+7½EGÜÄ"æ|5ÃÍ6’ºüJ…%®@ñoIû •äM—rÅËÍ<¬EbðLa¿Êv¸T,r-¢9öñ¥zy;€ÝÇÊã^­øsd‹t#ìÔì„ÌùöΓ%É%ÝsÑY­“ó4HýŸ¨8Iƒ!W·ØôÞÃ3gfsŇlt/ÞïˆàfâZ÷ЬŸÒÏb;d •Š9Ô•·X9 †²0½|ö¬Q††;(t$ ÆÉ¤üƒ€×º&9Q-ÒcôØFfÎP³YÈ®…¾NÓ`}c‘Fa90°àùÞZØ>ñBm~Ü`$gPè>IíeìT·Ôü;<'Š‹^P#1 HY&¢\ -¦D®VÛúOýô]¼¶êl5Ý0Ù&aFlÀTz¤àÄ(Qß @¼ýÁ©û/«æcÄÓU›¹ŸVz7;^Ó¤¡`¡EüOŒÒ«Ïá3%uÓ²‡ëýUwŸÔ]fƒ-ŽÀm%tã̶ûf~ °‰×ÏÖžj9ª3/´¹ýËñõÙ'hïÈð–Ã!蜉Sòä=t.UÖšãºÇМk¥Äʹ¨ó s‹p+w]»7ÛŸýpþ¤ba%ig¨„Ä–4~r§Îv×wœ±‰*€/œVqó»v ªEÆ‹ÐÖ_ãøÆn [æøKå˜ Ç.%eõTÝË›->êRè¤Ç7K…׃m®›×{øˆÜ™g"hÅ«e\¨d·½ˆFKöšLÙ p†°‘–Ø&©¥EmF4»8 ŽE§Ò]{®ÕÐÇFRC‚ýÞ˜aòïUä0#™Öœqa¹«¡ÿN;†­4¬»æÂ7!دl›YÛÑSòÝ´¤å‡ƒÎÆ êÕ´o»ÌWú%éq”»ÙpŒˆ( ÂÀ¿WvE}Iö¦‡?ƒcñÆ™!¶H¢`œ­öð¬ýþÜJŠZY™#š´æº >¥3‹ ¶Ð׊sËø—%%áËXêÃH«Õ- l *ÜLö• Ї¤j¶KZäsêSÅQYô5 ÍýÅR¾Àæç½ ““Y¼˜/9à³{*÷)Cþk…­¥)h¬³ÈôºÁ@DœIch2.×_·ìV¾Mù—Á†e·ž™ äšP®|KUÈÊ'©nˆ…Fò®äÄEI-ÏŸ¶»z¨@ „dÐ{e[Ø¿y„×H]›éÃçQá}(TZ>‘1"FPåeù3ŠŒÔ‚…f_1á4üºX”ˆI¬ýäÝ( ögz,ð‚ɸȇäÒf^ìÝ*û5Õ½ÀùýP«»Gv­Ñˆ =ÁS?-àLÒÐŽÛ„O\ôLäð_ÏF‘ñ_øTE5a„±ÌËÂõ¡à0NîsÌíe¥|†ÓŠºu© :AAeK¾Y½ó^#_è("œÇ¼‚IK‰ÝY—ûÇàt‰Ð( ×_xÀº²C{}d›åÂÊY#¼;Ä×É‹ëÃÌÑ\UlÅ%„Z­ñ=Î1òpŒÀVjs‚Öš|Ì]-º¤(˜%»§™®ø‹˜S`Ì‘‰Q2B@Vã¡b +ÿc²©Z1v¡í‹o[nsX+Zvfj猊Ddå]ÚoôP¹ÎÕ—ôÒƒEqgD¤2š)>½S¹`߸ Sz¦‹Êî!mõ{®gG`€º9Œwñ–¬Ë‰9ˆ«ÚŒŒfïÃnú6ü²u0Ê‹Xß/t5¶¶¦<㺞™M2Èyð(ýN†xÅŽ­.‘=k$’xb9é¢î} ½ÉÀX—hu»Œf„d,AEn!¾Æ;¦¶JÙiþË´5Öa*Ì¡âÙû×Gj]±·ÃÎûŒS‚ÎIQÒY¶ª£db=1»–<£LÒ%¹Ê˜"O‡Îs lÃÞ‰­Ðž87¥ Y¯Ú°MñÓï†éÏ3¾ÀÝÉUëGàð>$"  `Õs^7VÎ5ÜØXZMv ¤”Ç"À;¾º>Î^ÐS Fæó0ÕWÄûŽÌàœÿ»D |þGî'F kÉyœÓN4—ã‡,=XÉ },/Å §š± v™F’Méà,;½ât“4ÞB¶Ô«‰û;WŒ„I"R`±ÌþêSZÚèPÀá\4ÒÊ›u"æièÂVȹZ¨®(HJKÛÁ½Ún…œ­È˜ýÀT»3d³æ[ÕàëÀÄ,€Ÿ!îµ;•G桵zx¸åf%˜<3r >ürÞwŸ¾Å¬t²(u# Ï=ìü¦Å;)‡ã“åÐö8A±G¾ÏñŒoÃUY0‰´),ÙšÎMä–¬¸àA-@|æÌŒe_­AŸVþÌ‚YüúJ ›ËU½)éÒ3µz¾Ú»¬³NÿÄŒôí;&DÕ\óP4¦±Œ‘sæ²³ÞÙ/Dߪš‹z+êsÑÖçâY)§Õ„ tcém± …ð»?M/Ú$ÑÒÔš[ŽKþÛ `œ¬d 7ê­XørËpÄZ½"ãÙ§PqŠÃ¤¨÷&†ˆ¶n[’zëÃûÞ2ÓVVåp= GüOã¨>Cñfå°(ƒkѱÞ8aûºª1äUŽœQ´w8¤zˆ=jK¥rp^IÊZ„i•œƒé=ÕgÊ™4&UÙN´Žü…ôC¥jC!<ä3‰‚¾˜Û¢»qëèÞ4ÍŒ/(ÝtTøÛ’±ÙÞ€¯£¡[›[‘õÊý }LJþ|'§,Æl.ö³M"© iE©‚n7×ß4hJ¶}!0¦_¸@öîÇ«V† /¾î>`¨O7|YÖ©Õ¢Üqý&[rf¥øKLÎJp±ÅêïA|anæ˜l>N‘áøElÆB¿²Õ«ü²!¨葇ŸXè³Ë¯É)Œ,ÖÍ׌¾>˜”ÄA hïrÉsðÇÖÅe¤Ø¿ LZm‚÷ë_ýç¦>ìÒ§ gcu!‚.ŽÜE˼k‹{$¦ÄUþ³½™xÿV¼Ó„¨›=6ÌÞËÁþ’ pÚé;HY8Ñ y¹yš#¶Îè¤0¡5V?d¿ƒÆ½H%Û[·œK½C¥ÂÊÎjü#ƒ¸±Fvèc×Á›Vqh„)ÒÏ>µÕŸ5ç¢iññ§ìàçwu-åÏp…qM¨¬L]¿–5š½ì,¬xyÔŠ³Á´óy’®ë\šp×YQÙ?…X—¬ì¹Â‰1Šœ:Âèü±ãµš‹mžrK)®’i -›R'÷l2ŽöQ&¼ °³ JÓáZÁ¯m”&îç•=¢)É•ä»ôD†Ó}$NŸJtÏ Ð£Í>fÅõ¤Æ=¦Ç‘Ê7(Õ{k¡QûX¸s4x%Jx‹æg S#í1ch»ÁýOe¥oÂ\4?ýÒµ²‘vŸ•üeÏ·šƒjW¥+½;ÄY¹˜î‰!]tI&T‘žæ\ûf©uKñÔs›qÒÞ Vìb’´šï9M— .ç²a9ôLl­ÜÑŽ0 »õŒ O]WE,_B»Ué,@'É®B´g±Ûÿ…†=ðóת´fž‹4Õ¿gÿe‘ýŠò:“ÊSËTqQBuð{ùÃt§sùH@6~+KóáÍÝL#+§=€ã+ NS Ëiu½·d3Ø~QDxIfû%M½W2ÈZ2<4g3 /5À&kiù”L âZ`R­=h`e£a)ĪøØF­±¸ø0üv{ó:n‹°.¸ŽvŽâÀá̓”o©€ZÅÅAˆsÖå¶×K } Ø´½ Ï„Œ‚ZíøGQ¾^kÙı½øOˆþO‹é^w™Ž"3kxf_>ÿ\Àôì)<¤"’mö òÖ™‚íSL(¿Òa[þù(ôÐkùÃ@ú‹ÊŽ#롦ƒcã±h~”ðÂTÆišøòU¿?7jÜZÆÇR¦“÷‚а~}‡[Ö•Ô½i†­çE=ªw…½7$5ôöcà¥çT§vï(õÊ4ŒÑßÞG™Œ§ªM\¢SôÿW¨ä´oþtẌsÀlÚ;6Ââ‰4Sð¯j˜†[\=(†ýiVéÀÛã4,ÁðØþR})5N}~$oD¡ŠÉÅm®»q½7—ˬvp¡fË£7õŸ"ŠÝwy gÃyÿ¨«•õ£Ymßy´ð¢XÂZí+<Ð=­Õc*i$ÐhWînW܃÷ŸËo€ÈŇ«£Å1s_×Á…,@ưcJÞ,ÕÏð/ RNPGJ ŽöTµlIlµ½coÙd/ß/èVm_»9–ç,Q²Ë›LÂsÞ7곿MñÝݱƒðep¸9™×C:iã%Óaá¼® w!ÕkVL˜ŒÚa{ÄjÝ÷œ—/¸?‚éy{8I&—rÑ¡ Ã9EñH znCjd èÞ„SÑë{Aø½¥=-ˆ:‡ÐcÌ%Y— fBþ>ŒéZëööüÏ]þ‚ák±`¹™Äù*· ²ò€¾¹76ã/ˆƒÎ}Ó`Ô{ÙÒE5Fk.•Ž]o‡­PÖS¶»æÚÓ'ÙÚ’Gü~XÂŒBÕq%y±¥l }&Ži†ÿ³.MÅ R¾;âµ0Rþ§V ù•Ki,ˆtìÃÛ\Ū"Ò`£ÿ®ï7A~¨£nfuÑòž¬•ÆH¹®Év–Â=*ô{©ËlÈ¢sXpÅŸÝÇvçh=v1‚ùH›pœ§$#3V:ã~& ¤ÕÈŸ NcèPܶ—5Zç[®dþÜ"Vk$Joˆì[]HgÒ«¸É¦Ÿ’©¶‘S<¢P“³Š´él¡T§ìoF}qüˆ0ž0Ï4Ûâ± ±ƒƒÚÀK„ú-AQß²äUŠO$² T*ô|Œ*P áÐq¾q}yŒwg¾Ñ¯TpioÒ²3ø2/Ð0¨ôA5§‰œÜ1÷zø))ú\.Ûú^ŠÇœÔ©J9N¢£Bhî†x~5«öÔÿ”ÿæ¸# .º¢ÍÔ²C£Ä^;÷Pˆƒ>U¼ã|Ï… œuS1º÷êCå&å¼&›#Ècï*è£.‘í²t6]ÂF.ádŽËsÎkVŽFr¬ÔøBI1;³Gbã“°€Ä,¿„Á™!–ºšÀ̦5òe"éc}•²Y[“kü²£ 5Ô,;¶P›e`mÔîщ–Þ4ÌØ”®ïD\ ¶„EÃ?u§úoìO,àã,zxªžÈû “–ÙŠÝ' ù½NxEøšŽ È–Øwó’Á]¼[;Šäþ<7u.3élÖ2†iä?åÞá Œ:^àh=ÚK[MÒÈZg1xtéH¯í|r¸¿¬”úB`-eaLŽiºµY«'NßI²ÐŸÓ•–¾·9˜·RK”àÚíí6˜êáþoYêµ;9¸ÁZ=„½UÔ4LVõÃÆ‰}Ñ…RgøU;ÝPÙaó@»ø+ØkÚ½­KИg hÌO> F‹Û¾Ëb®‹s¤ÛJ®Ré04×oEI‡pô¯è¶^ÝPƒ•cñ `"-­þþÀŒ+Z6œ:†~ZÔ|Š{}›°{[ =¿¥r,Ç>_œX¸T¤³u¬Ú±àe2<ðÁÚÙ ºÅÝŽ}MH„²!:Ìêy³ÅÒ~S-à­Ha Í7þÜ@N²µ›”ÓÔâ³æžV„êo/™jƒµN­1ÚZZ-éc“Aç"×qÙ;y±O-çï™Ék6Œ(íøôÒ­šµÿ¬´B^€)âlȬ—cï¸îñ'0H¡  §› ¿VfG*'õ:\ÌãJŽ Ï^‘+¨}Q¥ÈK"át iJޮث-¨×úŠ=‰ëͬe)jdEm4Õ˜*t•HoC. Í[DAˆxnöBÂŒ}T³ôòu7ú‡,Ö”z¨ýXRÜ|}ŒÍ¾§3Q™Cûrtl©ÊÂy„¨9Iøì³«æ)£!§´Ô(ÇÕ‹%¾‡Ù/D ’F}?øšZŒªª®û¯_Ö²¤·O7ÒøeO UUöb°#ŽóÊõ ¨tspMj(jÐÞj#ë T>6/‡L¹%ѰbÃDæë©ðì€4µòÝÈ“c.ûø#z‘q†˜ìD4’f8Õõã—ûgܓܸ!ÅH.h·üø/ûßÍÖ'W&wrÅ2±Ö ¸øi‰©D_ý¿ õ’IÕT—óæÅÀ7õŒ›=ýˆ¸7| y â8Ü0޾´øqI"ðº6%5 ܶá î3Έ’[VÖé$hü)ª•]¥8DdÎòLGnßfŸ{›«òŘ,Dž²¿t-Qa˜7ª‚àïŠÔ*¦RLÿ‰;òsƒ²ÝéükhàšÁ|qЬîï|P¨|{ÞÔãÿŒcä>>Æ%ùMÌ2Tìb•Aå,Š¼Â²5 å‘9@ƒÍ«9¨¥uú¾¤8ŸÖºó¶e¥<Æ\¨1PTË£2’?Ѐ,À™÷<˜Žy74UI_ÙmŽ[ì¿:òù .ü|Érެª†½q·^O˜ ÍBŒ 5ÈÌWÑŽ™xÁS!Ë$áÖµã"¡±€ÔÚGr/imŸÍǼ¿KN³Ó{Ƥ”ä”)n¾Ë4&újÆí(©GâªÍ1Ü`8Ç`Ýó®söŒ†ÌâiÞŠKt¹7븂3lŠ7· éÔrE[ÊÕÒ•n$Ÿ'^fÖ‡‘·±-€bb'çAœ¦×gM¹Ýþ•‚–Ï|CÓÑT…†ÌK‡47—Æ•{®xŒû~–‚;c¬7Õ㓴昦„“+™$øöÍkÆ”ÚÃÎì¶àÃEô¤^ß‹i¶§N¥rU\Ü쉮…ûÔ‚YNsó§¹öñ[u;TŸ"ôò}E¯ê5ì Ó^$álE“»»vÕ¿PŸè0PçÖð ¶?c‰i]°tÇ6¾R—®!YÅ2» ¤ó¿ú§‚Ò¡ycc€^9¼!X¶0Óñ³ I°]mÆ•3P]#;cVùÚɱР?ÉF­˜÷µ½^ÊÏúÆn ¤ŒuÛÔû§§.¡ÌÇI€ØÙ¤Z–Ÿ)ªö&=;­Yu!¼iðAgÁ¬¤·Õ09ªqû ¿¨Q©°¢9²ýþš+‹’áÑ"§õî9Gó™ú¼¦¶Dý¬ñ?•Rˆ ܱç«âïQFF?PÃ!â#4 …êZØ—Ÿ\&ê/UÔqN {R.«{ɼ9sÇ`´kÉühÝ» ,°’Ý)çÝ¥jì¹·9À,—†Šø)ƒ®ãƒßL%=fÛ$ôVr¤TCÖÉÙvoTk:=wªÀa—ĺ·ÞîÑ|äÌ%”6ü÷¹3éü–‘.—ꃞnç—›$×"’Ó¬Ób?Ú“guŽFX[/–œ[ƒzÂ¥³)ó²id¡·Å<ˆÓÒ_³ËC;:¿Y x°kZØÌ³ËA“¢˜‚Fm+>–¦ÐyÌèÞŸÔ&”÷—ÇZDÏXþ¢ÌàÑ’ÏÅZ‹A/šÍ–ÿÑ ³ò-vÀ¥òÒ¨NÈì¿$_1ûÈOè9Õ@¾Â’uµåˆ––´³ÔVªÂƽ®¼˜RŸ-è…KA˜µŠ¢ ë&ÞxÅÿ=A+«m^€»FšQé´áÔ?ƒí‹IÏÃE5×­ó½=xê–0ÜÔŽ¢--x!ôCƒô^×l Ë!j#kÝ/ÐaRJ–25»$ëî*KµÒõ•~´Ý‡•{®®Ìrøtˆtß(´}®‹Ÿ€ÿƒ'5¨j âKLvÇ)6±˜–:Gb»·9€™0ª_‹q“"i¸£Æ1¼ö×uªå÷ûÞ’ á,‘oC™wxÚù.U•¸3Ut¬tË[e°r²?èðôÎríMhå?á2v² ]ü›+Ú™_ –`N#²£°ÜÆÎ.T‘8[@ÔkH‰Wð|Žî»îú¯™•oETr£á9cÎæ!AÍhôSì©o½oXèÄâÒ»~¿\Aq§˜¾Ý7Rî°Éò¿í‰œ¤t¼¤dnHçºÅVH$ãž.ÿø˜]‹m²ßIA‘ÅMNvž³oŸÏœvÍ©"$äYKt•“çlï]ÌõÙ%¡>c㘣D‡æ¾Ì0:´:ŒKT•#ÔQίmOú IL59¤áÓ_ÊÑ+Nú×N|_PØd§ä‘{ÃöþñC€~ZqðPa_d8trR 8NëXQ[É[$Itíæîò~˜òS*>c0E1ò{vÉ]×ÝÇ7ˆº°šêZ•,ÌÃÈ*™Zj³jD]ûu–0[®¤F„h8Ü2¬ßk“S„}¨Óö|§U# ¸Ó‰éŽÕ8‰åCµ]}—X_ Ë‘­3`½ãóC”¸[3úÏ E1Dæ ÂPp´buÃ豦•tã¢àï ‡kx`BP)ף첹<Àû_Ä­¿ó¿ª¤Ë kê÷n7ù¹•äÎÍ@ Õ knÛßzy —3Õg#î®å€ñWÇ®¶‡O°â(Õ;By-A65|=i€¯% ü‹t/œ î}*N݈Íë½B(7¾Ï–B;BæórúÊëR‘ë8Þ²×t°ÂÝ ìЂ¶¹nžRÀÆ)r½v¾`$Uã(¨æì C{¼5iX?¥Ð·ùJ_ñè|b`¨¬‰ „v+^Ä ’#÷¢Í•èYP-uºàé¿¢£¿?§l@X°>Àí£´S,³4z8)åÊãoÙ[Rêb3ëô™š,´Ÿ?EP²ÌðIÆØûô^ÉhN¥ß½“Q&±‡§â2£s1š…òÿ|·¹'¯æWi©T³}IFéH™WÉí^>ËvBž2‰"üû´Í«Ví‰ào%æñoω„#ÊsàYW @ìëxVŸì4‡²‚Í1Ž&ÂýWåj, #`–,v¥¼-‰S_U*cÌ}9æ%FÁ)râº\ßÚ÷¿Šíã°Ôf u;ˆË@Ù9—œWƒ#â‡ôáU $z‰lTJìÄ–¢2vÞ·tYúÒìl õ¾S¡¡Ö$ìëç–~É^_Eʼnx:ì,íö“ÄìKpã½@©¡uÚ+k` .¡Õ{Ó3(˜·¦Ä~Uû™BXþ?Ü™i"úzòe¬Ì4ßoPÒEÊ‹%;*üº¹E.3¸ÜäÙ49ù;s°#U{?ÌÎiÓníÆ´‰`K9sk0a¤ÃOè¿Zä‘=*ñ™UbñjB‰¡^%.»fé"ÇÒFÖ^tv<¯p/’ÑÄ<œ `¯¨¾®¼‚Š6Ö¡är¢`Á^*—“<Å}‚¾ðÕãÏ»¥ã€TÀUÁ%ûF¬1n³bßü$ÎÓ”!Î5Œ¯Éì`Cì¯ù%wËljIÛ¾ï£%½ &…/L)¨ƒœWÈÏ¿¢„wÈKß§n0»Ä^Š?'ù¿¾mŒ,ZfF•ýj¤à¡DOpÛ´ìZ¨¡)rÂ2Ú*‹_œÖ¡Á¿ gê^ÊúÑø$¨×W^ƒãÒ!õ—átMSH¾uLÄÞ)ò›Ü ³í»°ïRã‡I½4¢ô­¬Þ.ß=©«ÑÆÝ·÷7lü^†^Ê\¾:(f÷çGù6åÚ[sîá¢1a«O¥„6óeë)ùÑfn÷­x‰ÎIŠ©è qw)N2ëvõ °òîÄK‰ÞH´,`¦¼¼¦jƒH¡µy .éWMþ{ ¸j#Ž›%5á[p²†Ñ  þ½ön˜«…O ÍýI*çAºHG˜X…û&‰Íãõ\å¶èäì,‡éN{û!Ó¼jÛÊ‘EU üñ`ü £°“ð$~¢…uIêi* "Õ6> *Ò‰]ä¦c‡jbÇâ.g¨“ë@ AŸ;õHs#Ѹ`q7.kúºYÌpx´a+ÞhNpÙl||(M2˜CÚº°Æ/6B$Ld‰2¬ªEö­¤HRºQm¿oOóÒ?ïìNÆØÎU¯îNÚ-^Ží¶Ê'ã{ôH0’„¿ïÿZ‡¦®ý®™ú1±­A/R9“Iö$ß2 N×Ñ2l9ï®öµ*.N §ýbã1ðdFí7ÜëhÛùÞB¥¯è ¡ÆU‡Ñõ õÏ‹öàÀ}®ƒ… 89Ô\«Îý "Qá‹<trYïú#:8mCz‚½¼Öc¢2µ“.+ šE&‡TÃÀÚR`)uÓ; e‡Èh,ÅÒöÍk?n.6•ÞþàT‰ThѤÅW¾^j˜¯<ê7‹ZQ|î›^==¢xz°xó±·”º%U’ÝÂ+†´W¿€sGpóܨ¨û08T5"€„k21,)yï„SkzÅßD|ç°P,þKɾSÿ\Üç‹Ôr«DîôB_øña‚¾¬p‹Þ `w´½†(8)_Ôëc¯ ^^ÍWã nõX‚ÅyöÐ12ÓÕLUÚJîûÙɵmÖÅ{ž„ºŸ«]ªí„W¿(?#‰qÇ<xQÓÞÜg³uθhrñ¾Xô‰‰»M’È«©|î¯,£j N]ëè"ƒ5-ÐË] Ò\ ðÈQfЭ(y2¥¼}_”óÎìµtœ§‘  ©B3ÛßM’ëd13<^h Õ:×»\bS­éQãyºÏlˆ}èÚÄÞvŽ·v͆éw•´Sø{'`Bù(0hŽEdwFDÕ&t'¾©p¢E†Çºn¾GÀ-ÔE;÷§Ÿ[g]Ê=v²k¥sÆ;©Ð 9 õuý4…µ#C³Øýü3ö÷È‘/\+» û”A÷Ðêlƒ,ÁçiØbìÆ»‡Ø­•Åôªþ¤v&¥§ÔsµÁÔÙõ$êQ阳VF6¨é¾® ´¢;i¬-–úÙW' ²ov mEÝ~-: ±×oퟵw%¸¡FÓë´7 ·‘R°à“†•$ôÔN…¥÷ä Ò°ecîæÈO-ùM“ìyròM—戹lšç»®A‘«‘`ÁDLf÷ä!”ç=—ÓPSy˜2pTô4<Þ«áÓÁÅä25”Ú9Ý­h¨îÒxrè¿ òJÆ;å 7b¬–NœrÎZ³ÌE²ß¯©žÞöèä£Oíû®À7¼é¸&^i•¡éË€-Ô—ÌT}euq•áÌ’ÑwQyªn[/ †óïyBcÈÕ&³GÒ2ý4Ç•ÞÜ~Á@ß÷ÑÕ¶#•ªŒ¯ãɽ•˰ù¹²‚âH‚ïênX…ÿz ËãsªÌLˆ±E¹›ÃŸe mà# Ï?+µÞ‡-Qêt8+R;=Õ`mÞ D~ýAN,b³H­Ñ¬÷ªŽ2PÌåÊí ’·Ì ¶‡õz|âæ¦µ30›ƒ® š‚±åD¡/–vÛû!ƒÒÍ#"0dZRûȵ+:É€®ÙÌ$1L@l­Çúé åE0ö#ÉsN·ÀnS©#ÝÆ?zÌbþ¬(Š–¥‚ÿçÁû3^{…móÆS¯Îzºî,!^¥wÀN…à> RWa$ÿ¬-]$j‹‚«üJ•8ŒmÅzcI…™Ã¢«=od-LÖ¢{ ¸ª ¸] Ê]ða[Ã×8x€ú1kó¸z'Ä ­g¼c’¾?†(éÏF5½ó¶5v¤øƒU-3Á{-ðáÇnUMké6“›"Þs˜Íëà¯#ØèÜø†’µUÓ¹Ã;ÎÆØ{/X~„ W# º]Åq¾ý‰‰’-…ß¹.vC¥ä4(]4~öÕ¶°S;›å½ÏAy_ ûBI ˆ9ãS(mŸº…D.%G3A—ÛuZ=tkÚ¡1“§L(mcJU¼S–ê*ÌÍCäÑ‘~1óÖw4d.—oèDÊòG–fk´× ´àôFóâ² ‘ÜÝÒˆ—x$ô{ñcÊ*d c)Û>k?üJá’†’÷©pj23‹ÿ?cÝMbl¦Fn‰hæ÷yV)ôX¯>t.;·î4>µˆ€E?ŸºYùfª-ääéR±°…#È8øŽã‡ ±X]…\h8é‘p^NÌ=¤{A*«ß«mÉ Mhµ€©ÞÈ}ýf¡Ž$õH ÷Êk[KŠù‰60~Ãã™Üw æ´ä]MhûÖÎ*Ž)OˆÝCE3Š|Q_¥˜ÂÝ—(Oøl3Ü{nŽ™…/ß_MFþp £5à7Þs„&‰ 6h(ûw('4<Âã # _`òq‘i”H=´Ë’Cãóz b´Lܶ šp¿W„¶wmtÖÛ#:ës¼€–™'~¢Á-íˆüS5$ñ†ýfk↠üj­™…ûG3W±¾ÀÀ"g×ϨäMdVòø¿ GŒ”ã‚§'N0œÐYõÜÈ&¯¹‚GÀ_ÞW5ç†8L@ql?U˜»…ÏÀõ„ïŽÞ«»{‰:2ôÚi'BØ„—J$ þ]"ˆÃp1/’arbŸ¯´¾ ß'_V¬Z+¼Âô…¾äðiñÇHô±T"ÅAz¯d?§´í®ˆµI¾è>bó[fö©75.Œ$0~d·À·73FŒG­HQ©OaÇäscÝDýNߎä@ ëßägx4+‚۬𛉼×õ¨ÚÈÿŒÁ5 -âꈱˆÄxýþüN¥˜ÅÒžP§uü”$àÔ­ÓŒqEÜ^ª§R,ŽY]AÛ¸oŸãÛRÍúXÄÃyìTÑŽ´ôÑu gØWZÚ i·aº¼°g­Òâ|¥®ÕÙÙ×bTÃ[º+ˆN5Ìßþ-Ü]0$µÖ(‚;@Á sZ•Åûí û¯/él&Y7 Ÿ0ÎĺJº¤û­²És¨û#¯ˆ±p†fSÁå5p@ƒqDíÒ¤/βÿêT~\êŽ"c…Ÿf¦ž´r™L>VÈKû¿¢~½Ü¬æná‡yõ?˜­(“‘”göa‡Èåò(‰ ñÌŸ0=DÞõˆTòçœÏ‡Ôw­h¶Þ;Ô‚ÑöS¦PÌ™ÇNnܼ_Ýt$TèBZÌûPY—¶ø•`Q£:?œ‘‡'àÜODüèÕ-FTU-ü3p Ô}öËsÈÒ¹=þ øâBJÚÛÐQ%LÍ0r³2ÿ  òâ:‚—S¡°°C£ |paDn‰äØã„úæ­;º ûÄ$?6”á>‰ŠyöWàÐÿ‹¦rÙfóQ­Ð5îöQ,Ú`ÅG=“™éO ,u nc¯‘x ÕÃG'AÞ§{\ø«éʳ!c6¹²|íÆú}^w<tÒ Ï“îN®Íh׃ÁÑù¸~ëmð:Έþ#]û œ˜¤ì>áFµÝÕñ»Øpß,| &›©¥XÕJ(ÖÂñj‚ <´ó"œ;=:År,g–Ã˶vëJPvÕ¡÷¹­TèÑvüHÍ:ŸÄ¼HO§[r¢Ýàf·¬á³ì}ÙÅ% È5LqQ7§e;/!n5‡×¢Çœa}›¢Þ E®SrØ> à“ˆWe`+z)*ð§†ós#z_Á½zJ6!Õ°öÿÈ´s5«m™ËñoÀªyïŒôy3ðG?yv¬JW3%Ç$³5¸3ÆmæÍüjýæü0b'N ·ç5úHd±µ Lf½Çù <Øœ{)—€5íÿ5Âüرy¾`p“€ÛB1¹I=k™¦Ätʉ13Ø+=jï´èêýxR^0½ËÄgy²›9êlK~ðeþýÛ×™•æ.&sXN…˜­tZX†hΦÕG²Ê¬jóÎâœÎ¸I3k䟽ÁAxæ½IH(¬õc-À÷é¹®(v份ÞÐz.Ô@„LTÓ®=ä©GÃé³ghaæqiÍé±4.#o~]’{¸gnõ7Õ§yMAq;~|#Áà@_"±×·p7R:s= xèÓâ±ÃÅ쌟ƒs)ÒùXïµ 2! ||ÝüÖØä›-F+ùk†p}Ú{õ¢ä…žXŒ46øO!é4žãñڶ«àÞÂ…ÒíŠc2Þ }ÖƒÖRžsg –íXœ-ƒ6 åhiÞ*ä»ÂP¶]’OÈ´N¦5ßâÇLÎb…ècØÃ.½¾©TÞùe¸¯ÚáD“׿ùÌŒžÏWá¸èŽád ‡Y !épÈñÉ"q½‚ŽÌ^yVü’P˜â©k5QÀjd'`êþl圂‘ °õaÖXüÆA¾O2/{Æ¥*A% Òþž½áê`ʈ1¡™=2|:?‡A{w?WÈE^‡ßã³Æ¾‹þ3ÙÓÖ¯‰w9[1ÈÃÚB~„]W‹ÎœHËZLÆ-[ó¾+ctò²ŒB Uˆþ{Ts?.£ˆ¾?Ø`{¨[jðïøŽ´á» +Ï󾋰}áNÌü‡ME!`óúBc4š-„QºÇø#xñ_âƒô·®@NRïãÞ¥U¥6p¬ŒFõ´ß>z~Üâ!÷]y29œ|ªa~Ø|êÞÕÊ”‚üØ=3¡Ë2œfK Î/({¢SÄÀ誻<ôþ „ßûóʲ›Û¸ØçMðpéV3¼BÉj³”*¢7í‹”ÙlðܯÉZÁHLÈû.þ”f/z ´£vW’±ö ¦0ã€Þ—Èö¥Ÿ5µëGAñ‰jŽäH#cØoóÈs¼&ìÓP G§1Þšh½æß$úíÈñÄΞ3—¤ƒa¯°°¸çÙM‘F`*sAÜØ¯ÌN?Ãr}ŒÀR©´öà°eCÁð †~OÍÏpÙëAàX¬¨;| 5.•Ž‚¾” ¹^ÉÕ¢O(AÛr"d©£PÕ*¥@ÂÔZ™?BȉKwÅ™k¿}pR:©ëAæãÁróÎ`á,H>ªmû‰0ýFdÕŸŒQv¬@}4½M\`…€m˜3ÚI+º+D±s¤®²~¼Co?ìݵ¯Zrb¼«HcÝEqSz’um^œ¹È™rù°Ý2O§9´J+<‚B¦¬S:bãV¥d4Ý78"°7¾©ÞÌ4!\Êòt©œw!»ïØfã_rš)ö[ÄrÖWWÑ‚¿±¸»tß D~fy(G ÿ\aß«l‘|¿/ØZz›0ÝøŒ[Â(Q g;wj “Ÿê"»a­¼œÕ!}쯋µÛß Ù%š47tÄ-:+Ù œýMÇŒÁÿUï­|ë×$Ld»¢Ñ‰¯#Í|ÖpÍ?©û´¯ÐÇߨ æºç]ž<¢R“ëÚØßUž…-_R€z®Š&/$8Þ·+a=i»x$ݦ"ÍD×™l;3¸ÃŠF¯dòh’O¥ôÒPsf=„÷=úÇ=ضÞ3Ÿ^•’ÉâZ&„g.÷3fÑ·•‹€³áMJúd’nÀKFØ—"TnºLnù®%NÀ/€É·1ßzÍ¢úË#–}M¹…1í\Ì]«c©UaÁþù-ÏIå "¼ ÕŒˆ6¼­N”æJ z«OeŸÑ&0K}Z>¹tr sÚ*Ê^?¡é¢ð#·“àÁ• ;ïÕŠÒˆé¹ É;¶åãfxöƒ²šÎÎÍìôQÈæE»¿ ª£bÍô}W¸ö=±”7»Û#H+º¯ï7wñ§³éOJØPƒ(ׇÀ¥ÿo°LúIÔ„Ïðï÷1ö¹õŸ>·G^ÈŸB$°{†¦ ÁP¥œD$õÃQŒ9àHì^Uk/c1î~â‘whÿg²SØ”hîK·$È}í“éalzØ œ°ði'êõ·˜’%ÿ2Š»Eó‘iO)÷µùÞ/¢n4¹FÝŽn+É «®Ò¯À Ó ñ²Àˆ©õmI`~#±‹:*ºÜÈqàzÂ2jˆUˆ b3¬pÙ”4í`…ðÈú¡Lþsö÷÷¹Òú·ÖH¦^~iŸÌêìux× Zq9ŽÌv”lp²…to{bJÛâH½‹ù£ïF"õˆqë°èí~+>ùçÍñ§õ4žo#/d3xw%¢éu¾ GƉ¡Ù ÷UÖ´¹’$²˜eôÌLL8Qµºv‡÷]²qH"Û§¨Å†µ¾ã_=gÃËãø™t퀟à裭§ÕlÃô@N†Zö?²k§Di ˱O7;Q›·‡æ8°ãüÉ5+¶à(8¼Ý]);?µ½ö ² :Û‚¥ºø(zbx1OGƒoÈŽÁ øÏ£‘ú¢ái(NãëMž'òÏy¬j±öäíyIšAÓ_ÔÓ­*Ü”Ô`¶¥/= ¨…òÜ™!‡l?âôá¢Û Yv¼ŒtŸ6˜yxè¥,AÿLUbOÉœy}ø Ì_ ÎrÚF,jÏ•ÑüX&9_'f½TòXHFÄÜ»6ት çÞ˜bœq+IŒ†]\H’Ê~%(£Pc¡!ÛœÀ{ÒÈ@_mêvvûÁª³¿·#•¿šñ±-ÏÀèðäÀ*éf¥ÔloÈ1ªñ!G¤n‘Ïþf¤8Žô¦®'ܤ5ªo7ÒÞVV¾ÐBC‘]®^kZÏ>¢À%„Ö°E¤")j«»œgúZf)?$ üì‘Õ%­ý%¦†+ŽÌD$÷×émd8nÉtªdÔåo1ÙI‘äÕ¢¹&û’ˆí‘ð€[âpWvY»Õãû–ïé¼Ë—_T>¦úþ5Ÿ–^Sl5¦?U&&·š÷)ï‚€ŸËZùÎòÊ»€ˆSeIûŸ¿úõQAá1ÚëšÁt¯Þ¹jO«_õ³•Ôëᨘ`j|’0 P%”o…Æ)¦¢i“Ç¡ÂÚ¿{6LR$& Çá úâ:8Hk„T+/HgÉΡWq"~*#úÅLk¶Ì‘r–dKâG¥ÕÀÒ€ɬZz§å} h^ftuW×È";—`(ÑÎÎÞ™ÞñŒ4>Å“&R¤ò§ODGZÂl€ö¢ýÛ9½sTlýƒåǺ 1èÅîRõ&êF¯¦ øH£µÿ¾Í@÷w»@*ðó]ÏBT1¯Ù¶|†® æJºbÕŽé{qç@êƒJÑú¦Ý¾D©sÏèÿ­¨\»þÿw¦<)8‹/–DMü£÷g¨ååÖ^볡ö4,9³–`nzðKúrÈ5hô †üjDUjóz[Ë?²X溱Pá\^L‰h÷£°l6*œG½÷£‚È)5®Ô¬{ sðoÀPáƒCä€àÂ_é8ñ߬…¯`{’â'6QÚÈöØ•õ¾‚v.oøÆ++軡«ŠŸùUUlº‰àפÊl©˜†‘oMŠeéÖt™<˜ FÔ+ÌõŽv#ަUç²ýPwmÅÿáµáv|št€8˜=øSìÍsñ–Ëi¨²mY— 2¢õü½|òTù£3e‚"%¶tÏ`4”K—³ýƒ«³­þ ¬r›uz;ŒÈÝp Ài,¢&6L ÷Õ”bÁ¶p§òPÎÔÒ^,A¬‰ÝíšyC½áøilI¼K9úô$vO¡hÅ9S£¨òFbQ¹Fõìņ cdÔÓË9-‹MˆMÐÌØñN€wþMö'Ç·ÿ8jw.'OÁb[\î’?J¸xÊÆë¸õ’ª$üüi3,Ö߸º¬ Ì¥ƒ:^2¼ÖÎý³nÌž4žk¡öv>NÝÒ`X"ÎHéeKpں݀ŸdQ¼^ùÍu©ãÀa…ÑrîO¨êoMlÆ^/Û–9 ®‘#“Ô LO¨‚®–;Ë|V“BÄ"]›Ek[=¬?"dü¿Š-P@ݘ&V6 ëï8±tòJÌîþ¡/r84l­sê2Írý1#Ô¥4†»â‹Ž,ÿ§§ÇÆq§86¿ôí*íÖJ¾ÿªA2&zÜøglÁ‚›"­^ŠCo$:§„8oþqg¬ÿæÌ)ËÀÏâ•mšøiýÔwæ©ùSÿf#‚tU‰­¦5ݱì=@|¨f.ãë)»m/}#áãLDìz0Å™ÌK\E[ƒ¢2‹-Zrí&LG9k¥žB÷,»ó(ª‘J¤ˆ\2ecç®_ÿ&êÔŠ3.×™õF3J©D@$4h³áKFþö8[gõˆmôíÕ•x]-uèÛüæÂmK…{ê½/¬“ëÖ÷­bû@r\0Ž2Mj… wâY‘aÊ¢—Øùvuz@»°õü™~eáW oS‡¸˜lÞ%™²|›{¬´,¬í$- Gë%¢±6’Îh¹ò q€|÷ÄáAää5=‹GŒ¨ /V0Ò®Yœõ»:Ý€àsT`m @Ê “C¾ÈT “)2½dŠ8j¶#Œð–<Éí¿ s/ã •öP š(Y~òî_ˆ;O×<Œ° ÿ–±‚|–°'¥§"Z¦!ëY»ö KÔ¢Bøß—d‚BúÜG;æÐǬàÚ©ÞÔQùÅN¦ç÷‘ÅrÓ|©œÕݨŸvêt°@"ò—3[» PÐ'“ÓÝЧlhvL-’½¼µïf™\7À.gý>Ú)cÌ)uk¸5bªß‡ásóÚ}¿\”:C²i ŽZ?pA_6¨þ¤Wm9ÿ«€áaÿéD~ÔËóû9®g}(üÇcŒ®™ÄV¶Õvõ›yøiSЧ >IÛ¹Aá@ܰs¥‘Ñ^ò¼Oµì~¼ãQ€PâůB'!L +×àT•§ü½Ï-‚ci®Zàúo_s)X=˜py£²›úÿ©IîQùm>F5Qáp‘)¤aÎçaëX["Vd®½Z¦À´=‹@ÕŒÛ9È@Uü&ºïü¼ÑúdÍÝ‚¤Û…HÕê<„nå²fYÃj »Ú'´kw Ì¿^D‡Z£«:·ìÍÝñ_C¯/a!ÀGaÈXÞ4Óâ°0ÿ£þrˆÀ¬"Þt‘§ä¸ßGÇm†¨(?{+*Ù>è+p½’ÛÏo7®%Dézx‘òT:žšAÌçOO Ȉ¢…b’_ë¦Vî« Ã¥¬7؉’'î,·‰(‰Û 09\w͸ìæÖ§€œ'Ë éÇñ`ŸúËƳ¬üb[û:²a)Hã"îÝ&u‡9áÈÜ]º…¤ t#é Ó‰Q^Ê7ͺ²“Þ Þ”­ÜÑ<î@/| xîåo„{óyÚZû$³ÏLì@*À«Å‘ì¸;h—õ.U&¦º0±¶ÿñ N½´Â’¬P´¿-cjÙ$&L‡9¢ ¤½M‘_ƒ¥=„ÖøpH™]Æ.µ&-‡8“>©Ü6ÁÿO›ÝA*ÃlØ‚ Ô€(¸Áèx¸OÀ÷i…“¬¼%駈çã;Öa½yþ?Ù½Ä ¥*@®0@l¥BLX ¸BŠSfY&FÅÏ(¥ôüK !–Íöõ1LåÉYém‡†–€2}iõGç¬=?Ië òéÇ^'L^ŠØK2 RËì©|—€¢×´®äæÉ"“Ñ{[Kû¤ˆ³Z•I¿íð(Òd/³§Ší–Ñ'Qklþx^WÜTõ\::[Ï530袪îŸÐœ˜¦gÃ;z©ÛZ¢·G8ŸU%oi…Ëh½;ôÚ=½”r LÖÖkìœ|*‡¯•Ü™FùOé˜ÆÍâÍÖÀ ­û ÞDæePòÉ]¤,èƒÁô‹1? ˆtÉzÛŒ…2K¦ ïrÒåVÕ~âPYTŒ–µPq¥±8Á%´&‹è¹^ǯ ûó5[.«.y< €ó›¼] ŽW"þÙŽÈ ¹užÑˆGZéè°Ëñ¬¨ü¤³’@tf{Píµ0cT|„îUøæF`m‡óåÔ.É¿îÌwœ@)¿Clä|Œ&Tç ÏpŒe.é°¥­z2Ž·Eò޶ûV*«Ê}. qL úSA±=\o`ýðgõ+mÓºL¢eôæpñ fÁ~xõÁ‚­}sG)ú‡oÎÓ&ÚúͼxÐ^ߤ˜<ñ²š ƒÇŽÞLEŽ¥÷ü¯Ðšž¸å ˆJ€”~aÜŸßM©Z¶gVœ€âÕ{Ô±-0ã1ªâ¸Ô³I-ðÒ z§ß)Ž· â:Á÷Ô…QZö‹b.«€*“&?ßzn:–ý~JÊÓ¸Øm¿\Àæ@3 ±Âó.[zQ€žÝÉÄh‘AS8ýúÛ ±Ü´ü“ùS©˜h˜êËøF\%]zÒ@ ô€Ñ¥AbI¹3î÷(Uç¤&dï`æÆðÙ>wú¯‘øðàpR.Úºãú%í-FÝyGº9+#'þvÁ’øÒå~?/SqÃGŽ?ô¯)›_‚d0Óe6sn¢²äleVÐÖK›U”Oî‡q?Vãï`D6 ­u²T\ÎQŒŠáþ ú½f k' ¾Ì—Fñ•ôlSxóΓ§ô šR¡V²òÍÉ®–Ò[’øîíüu5žD9¸ýV0oû MÕý{ +NmÙ#ž­…}ŽÏÅC¾y—ûò,PeKùÌ•û$Â)Ë]pjáBœ”Pú&`/"çIö#,ô3E+–K"cq°µ–c,JÑrBþS KªÝ˜,³ÔØðC@ßh¦± ¿r!‰¯€Åm·a<@ BÆ›o—f,‹ÂíµÚÖ8ûÉ3S—o¨.BÍ[•"( éݺû¨:ù”­§ja¨LZæ:?,ûÎ;[ÔÎ{ON|„²¯M¾W<%"5âÔõ[‡®5#t1FÂÜ÷.69¹yCKVt‡´lµÆ‚P#'Ðú¼õÜ“V³KSû€ì LÀió0•×Di¼"´ZŒgþV!Ñ„ߤe .f³[É݆º8t‰Ãû`DÅD|ßJ´>Ù­¿þˆ_Ó…³<åð Çß‘yï‰~ÇÎdþ¨?2üžTN½d ˆØpĶ -!-{Òá4+b|¹IRÓpü¯:²o8;íîú“Ÿã_´3êV¢â\¸²@úáPŸ…ËZæ³s~‡ú5;áÜa$àSAGÝ4hÙO ÓõKcvè)·æHÏùÁÌ!ú\¼myž+ùg£RçhkŠëYhv«ÔR”à:{ßSÈGßé¨HÚÒýÓþ}¤Jù:Ô¥5Ûð¦%ùýWíMÀ…V7ØznÁ ЕŶã{òÒQ+;œ1ßý´Ng”³6a@°%júošoÀƒRÝÆe’šnÉa«>ßÌB¸)`fvc¯(ÂÞß÷ǘ‘îí-CvV_ų„Heé …ÿšá-ÍlðIÂ’^'·8‚'¡à“F-)y’{xzXõU)Ã4gÜÛåBü"æ*QçÆ<†%Ïrʪd’5"´1 £HÜ)á÷–—6Ô o;2jeûös.Z`¾»¯kþYwPf[CðÌBLwSwaÌDÊ»% £˜ý[J&áœçÎ?„®ˆ’ðÕ°Íêbž¥óyt™²V]–ÊíºYy"¾¥#2àßÄįðýñHš’\p´|-î×,÷›blÁû‚zß+&¯mƒõÒ²^½MÚÜÿ•Pça°ôýŽ|¡©¬ê7B6ÊÚbÖ¦¾#3ع2¿Ý18nã̧0˜n¸ÖåœGR¢ÁÛ`ˆÕ2¹¿NIRÊÃî?Pb†•ZË4}ÌÔlàu°‡ûƒ .¢¡È_\ÛB<y»¹V¬Ÿo¬™ ïV*(br=ÿUÀ¶6޵-Ar½’™l;}¨h›ŽD|À(>0úÝàACá>C¡Z$¯m$l´:‚¼ÁífÏduã%‚|Ì~¸ê3„Œ!Kj,ÚWt P:ãFâ±øã×-—Ðô\Í…†Tñî–«Îm¡¬8L“­eèþð»É+mÙO|¢9iVðÍæ6´B†d×0ý¸Ü?Oàðß~ÕÀ‘óëöþÕXî&­öîø¬²y't?g6ûÚ¶W·†½äs°Ù«`fôæç`Úq¢«XóSŠ£YK6=ÙÏ\Æ¢±Å5jn~á×±€Véþ›nH°Â²Ä)‡iíiÅÓY`½ÎÈ67‚Ò¤õꌊ5õcÀÆd¼ñlgˆh íh©Agª cù´Ô&…«FJ[ª‚k'èÀSYN÷Ï„èá…mjän 6^Ú<^õ®Zô¼‚P3Ñ5gÀ—£ë¹V`ÝÅ᠉Ⱥôîì‘t-ÆOxfI×ЩJ·‚,Ôæ:HÎ41~ö|NT©ýѵ›.G_Yö7þÛKÇ4iÊØ-~»¿Bž71XÂÝé7ùÌö®Îæ‹'äƒkKàò»³fL”r¬âþ·4CÇâHæ:â4«­aúŽ–—:í)úêô–Èëi¤F Z]:­O£‘7c{poZ»Û™ô‘î½=\¨á=¢f!;yn²B0/#$:.ïÛL–¥/ ñ,%%gŒ >ò=³/ˆAðó·sŽáW©à“1ùA_â?P€B?CúfÄ çÓUÕêeé5øõ8AL™ÝÉã„Õ¢1-P¯a#P²ÿH0’—õf”"«§¢hÇaï?2ë:3Œ|ìÇÜâ×{Ôª‘q ‡ÆÌtú ÓvzØ’¹sÌF0[Òë7‰üÄáþ-•h¾«H=qP® Ë.¸Xã f§#Lµn÷œÌ@®Yf3â¶RjíôS´ª[rýëµ4w÷óS£e@,Ȭݽ¿Äñ?‹”-Ôƒjß­_ ˜asZ8mÙ_&ƒIùXëà‚ñ¬aÔâÚâÉ}£ß—Õ7ÚNÙ|Â¥¢€ÉîÓª¬BoË)­ Õ;º¤Uªžz±‰ÁzgÈÀÐé•Á_ð £ö EGYÏKž=ÞêPhBc÷ˉžJq7äÛ!¢†å¶;=Äv¹Âð+ø5‰n=‚±ÎžhÁÆÏ3‚9¼€aÃjµÄÿHmv¡º½™»,~⊺ŠÉ&&†ÃÇOëßÕÊ¡hÞ-- ©¬4µz6` pì^¥ú–¬ÞƵïè‘û&å¾zäfà×*GB.·ócmkU»Œú¡÷‘¬!Fù'€­ —9×;Æ’`HiK¿/“7gú Ñ…ûå®é,E™ù²XÝkkÕüû~šÿ„b^w”Ë$áx[KoAúmìVú@oÃÜóªÞE»Üß~‹Û`É™é'n¤n¼ÿj_¯Ã5UQðÍ{¬¹]ÙŠ\ßšš¾ëÙ€¼v<öÆmT¯ªpP@ÚóX˜Ê(t2ãcPØ ã¼BjôÚ4º9ŒD{×X9n ¢-xcJ%'éõ"¸›s l&:'3n nN›;ð¥L'ªƒ€ ¢ Wº¥“cmX_}„úûÆF³¡ÉLµÇ©è¯ág¼iïR™-¡úHÔœÔ? ôÇð`çÏä%ÁÀ ZˆŠæŒäñ\ë!C]a¿ Oú‰@>~õäMˆVê†Æ¨&G¡+šzK1 Å–Q{ÏøÁÜâ.>þð,ó#•jHêá`aų@‡'U¾Ñ ’ƒú¡›RGþÞGþæ¹­†­—(¤pêuŒ†ƒ˜z!Ë|E#@–õ‹^±žZpL¯-£LE–„H퓌‰CªëF&—ñ%}òÇyIàuÆ“b7[T^\f²’.¸Ùø)è{}{Ūía‘f9a³,wØÆQêCjú5ÃRî+´R&Yãß®¢Þê_Þ®÷[//0ekZÈ%‘õäŒ|¶‡ï¢øY8‘¬þ}àKH]î [×”]Ó'­}©Mu8 !irÚmèzŸ{R_ÝCI¶ñ{€u|ÞåP}–xœ-C·aâ™Þ4‘Bq˜ •囬Ïè[ãä5bdoÐ΄@˜)±»Ð`ª‰óCþ¨hÈI«§]Að_¬ªŠ2®¤_`óî!Tïš'·hC%Û$øL.W'(ê›àlÓ$œ¸UïŒgÿ¡Wÿe cJ(áÉ—cÏí`“ZuðµÝþµøã±™µ©cRXgÚ«l䙕ɵóóÚ”¼Õÿ_$Îv´8ø8ÌŠ#ޤ$“ðR±êŧIrL[ç$ƒA$•3FvÆÛsòÃüjW‘únÖÞëFŽFæk››BߺœI›Ïo nkO ¼2/ؤGïu8OOÃ>>ñ…Ññ(ÀKSí&é–(w‚¹ý×Y0Žž˜®²7µó,义±nRI7…{]¾P9ý|Ud‘#qL«G9ýíë4ï ÍŠ1¦¾ªÿÖ^¹¹ê›™­¼Ç!Q5·ãŠ¿óÄÙÎ]¬hÀ†Ñyݳå‚Û*;n»Þã5º‚î¸-¡›ÚP Û1°€ÿ³>Kaèê ‡WV‘C5hþq{qD‹Ÿ{ù¨Ð®‹É×ä€É×`Hý¸Ç>0 ‹YZNLP/R/ngram.R0000644000175100001440000000061012474110665012354 0ustar hornikusersngrams <- function(x, n) { N <- length(x) n <- n[(n >= 1L) & (n <= N)] lapply(unlist(lapply(n, function(k) { pos <- seq_len(k) lapply(seq.int(0, N - k), `+`, pos) }), recursive = FALSE), function(e) x[e]) } NLP/R/udpipe.R0000644000175100001440000000262413337763133012550 0ustar hornikusers## Viewer methods for objects of class "udpipe_connlu" as obtained by ## udpipe::udpipe_annotate(). ## ## All methods will need the udpipe namespace loaded to use the ## as.data.frame() method for class "udpipe_connlu". ## Should we check for this? ## Perhaps simply call loadNamespace("udpipe") in the methods? ## words.udpipe_connlu <- function(x, ...) { x <- as.data.frame(x) x$token } sents.udpipe_connlu <- function(x, ...) { x <- as.data.frame(x) split(x$token, x$sentence_id) } paras.udpipe_connlu <- function(x, ...) { x <- as.data.frame(x) lapply(split(x, x$paragraph_id), function(e) split(e$token, e$sentence_id)) } tagged_words.udpipe_connlu <- function(x, which = c("upos", "xpos"), ...) { x <- as.data.frame(x) which <- match.arg(which) Tagged_Token(x$token, x[[which]]) } tagged_sents.udpipe_connlu <- function(x, which = c("upos", "xpos"), ...) { x <- as.data.frame(x) which <- match.arg(which) .tagged_sents_from_conllu_frame(x, which) } .tagged_sents_from_conllu_frame <- function(x, which) { lapply(split(x, x$sentence_id), function(e) Tagged_Token(e$token, e[[which]])) } tagged_paras.udpipe_connlu <- function(x, which = c("upos", "xpos"), ...) { x <- as.data.frame(x) which <- match.arg(which) lapply(split(x, x$paragraph_id), .tagged_sents_from_conllu_frame, which) } NLP/R/tnt.R0000644000175100001440000000613112517713330012055 0ustar hornikusers## A simple class for storing tokens and tags ("tagged tokens"). ## Conceptually, a single tagged token is a token/tag pair and our ## Tagged_Token objects are sequences (to allow positional access) of ## tagged tokens, i.e., sequences of pairs. ## The implementation actually uses a "pair" (named list of length two) ## of "slots" giving the token and tag sequences. ## Subscripting via [ extracts subsets of tagged tokens. ## Subscripting via $ extracts one slot. Tagged_Token_slot_names <- c("token", "tag") Tagged_Token <- function(token, tag) { token <- as.character(token) tag <- as.character(tag) if(length(token) != length(tag)) stop("arguments must have the same length") .Tagged_Token_from_args(token, tag) } .Tagged_Token_from_args <- function(token, tag) { x <- list(token, tag) names(x) <- Tagged_Token_slot_names .Tagged_Token_from_list(x) } .Tagged_Token_from_list <- function(x) { class(x) <- "Tagged_Token" x } as.Tagged_Token <- function(x) UseMethod("as.Tagged_Token") as.Tagged_Token.Tagged_Token <- identity ## ## Should this get a '...'? (And hence the generic, too?) as.Tagged_Token.TextDocument <- function(x) tagged_words(x) ## is.Tagged_Token <- function(x) inherits(x, "Tagged_Token") `[.Tagged_Token` <- function(x, i) .Tagged_Token_from_list(lapply(unclass(x), `[`, i)) ## ## Implement eventually ... `[<-.Tagged_Token` <- function(x, i, value) .NotYetImplemented() ## `[[.Tagged_Token` <- function(x, i) .Tagged_Token_from_list(lapply(unclass(x), `[[`, i)) ## ## Implement eventually ... `[[<-.Tagged_Token` <- function(x, i, value) .NotYetImplemented() ## ## $.Tagged_Token is not really necessary. `$<-.Tagged_Token` <- function(x, name, value) { n <- length(x) x <- unclass(x) if(is.na(pos <- pmatch(name, Tagged_Token_slot_names))) stop("invalid element name") value <- as.integer(value) if(length(value) != n) stop("replacement must have the same length as object") x[[pos]] <- value .Tagged_Token_from_list(x) } as.data.frame.Tagged_Token <- function(x, row.names = NULL, optional = FALSE, ...) { data.frame(token = x$token, tag = x$tag, row.names = row.names) } as.list.Tagged_Token <- function(x, ...) lapply(seq_along(x), function(i) x[i]) c.Tagged_Token <- function(..., recursive = FALSE) { args <- lapply(list(...), function(e) unclass(as.Tagged_Token(e))) y <- lapply(Tagged_Token_slot_names, function(e) unlist(lapply(args, `[[`, e))) names(y) <- Tagged_Token_slot_names .Tagged_Token_from_list(y) } duplicated.Tagged_Token <- function(x, incomparables = FALSE, ...) { do.call(`&`, lapply(unclass(x), duplicated)) } format.Tagged_Token <- function(x, ...) { sprintf("%s/%s", x$token, x$tag) } length.Tagged_Token <- function(x) length(x$token) names.Tagged_Token <- function(x) NULL ## print.Tagged_Token <- ## function(x, ...) ## { ## print(format(x, ...)) ## invisible(x) ## } unique.Tagged_Token <- function(x, incomparables = FALSE, ...) x[!duplicated(x)] NLP/R/conll.R0000644000175100001440000001136513143571336012370 0ustar hornikusersCoNLLTextDocument <- function(con, encoding = "unknown", format = "conll00", meta = list()) { if(length(format) == 1L) { format <- switch(format, conll00 = c(WORD = "WORD", POS = "POS", CHUNK = "CHUNK"), conll01 = c(WORD = "WORD", POS = "POS", CHUNK = "CHUNK", "CLAUSE"), conll02 = c(WORD = "WORD", NE = "NE"), ## conll03 would have different fields for the German ## variant conllx = c("ID", WORD = "FORM", "LEMMA", POS = "CPOSTAG", "POSTAG", "FEATS", "HEAD", "DEPREL", "PHEAD", "PDEPREL"), ## Corresponding to CoNLL X (10) from 2006, also used ## for conll07 conll09 = c("ID", WORD = "FORM", "LEMMA", "PLEMMA", POS = "POS", "PPOS", "FEAT", "PFEAT", "HEAD", "PHEAD", "DEPREL", "PDEPREL", "FILLPRED", "PRED", "APREDs")) } records <- scan(con, what = rep.int(list(""), length(format)), encoding = encoding, quote = NULL, quiet = TRUE, fill = TRUE, blank.lines.skip = FALSE) names(records) <- format ind <- (records[[1L]] == "") tab <- cbind(data.frame(sent = cumsum(ind) + 1L), as.data.frame(do.call(cbind, records), stringsAsFactors = FALSE))[!ind, ] attr(tab, "format") <- c("sent", format) doc <- list(content = tab, meta = meta) class(doc) <- c("CoNLLTextDocument", "TextDocument") doc } format.CoNLLTextDocument <- function(x, ...) { content <- x$content nr <- NROW(content) c(.format_TextDocument(x), sprintf("Content: words: %d, sents: %d", nr, content[[nr, "sent"]])) } ## print.CoNLLTextDocument <- ## function(x, ...) ## { ## content <- x$content ## nr <- NROW(content) ## writeLines(sprintf("<>", ## nr, content[[nr, "sent"]])) ## invisible(x) ## } content.CoNLLTextDocument <- function(x) x$content ## meta.CoNLLTextDocument <- ## function(x, tag = NULL, ...) ## if(is.null(tag)) x$meta else x$meta[[tag]] ## `meta<-.CoNLLTextDocument` <- ## function(x, tag = NULL, ..., value) ## { ## if(is.null(tag)) ## x$meta <- value ## else ## x$meta[[tag]] <- value ## x ## } as.character.CoNLLTextDocument <- words.CoNLLTextDocument <- function(x, ...) { fmt <- attr(x$content, "format") pos <- .position_of_field(fmt, "WORD") x$content[[pos]] } sents.CoNLLTextDocument <- function(x, ...) { fmt <- attr(x$content, "format") pos <- .position_of_field(fmt, "WORD") split(x$content[[pos]], x$content$sent) } tagged_words.CoNLLTextDocument <- function(x, map = NULL, ...) { if(!is.null(map)) x <- .map_POS_tags_CoNLLTextDocument(x, map) fmt <- attr(x$content, "format") pos_W <- .position_of_field(fmt, "WORD") pos_P <- .position_of_field(fmt, "POS") Tagged_Token(x$content[[pos_W]], x$content[[pos_P]]) } tagged_sents.CoNLLTextDocument <- function(x, map = NULL, ...) { if(!is.null(map)) x <- .map_POS_tags_CoNLLTextDocument(x, map) fmt <- attr(x$content, "format") pos_W <- .position_of_field(fmt, "WORD") pos_P <- .position_of_field(fmt, "POS") split(Tagged_Token(x$content[[pos_W]], x$content[[pos_P]]), x$content$sent) } chunked_sents.CoNLLTextDocument <- function(x, ...) { fmt <- attr(x$content, "format") pos_W <- .position_of_field(fmt, "WORD") pos_P <- .position_of_field(fmt, "POS") pos_C <- .position_of_field(fmt, "CHUNK") Map(chunk_tree_from_chunk_info, split(x$content[[pos_W]], x$content$sent), split(x$content[[pos_P]], x$content$sent), split(x$content[[pos_C]], x$content$sent)) } .map_POS_tags_CoNLLTextDocument <- function(x, map) { map <- POS_tag_mapper(map, meta(x, "POS_tagset")) fmt <- attr(x$content, "format") pos <- .position_of_field(fmt, "POS") x$content[[pos]] <- map(x$content[[pos]]) x } .position_of_field <- function(fmt, kind) { pos <- which(names(fmt) == kind) if(length(pos) != 1L) stop(gettextf("Cannot determine position of '%s'", kind), call. = FALSE, domain = NA) pos } NLP/R/spacyr.R0000644000175100001440000000110713337763355012564 0ustar hornikusers## Viewer methods for objects of class "spacyr_parsed" as obtained by ## spacyr::spacy_parse(). words.spacyr_parsed <- function(x, ...) { x$token } sents.spacyr_parsed <- function(x, ...) { split(x$token, x$sentence_id) } tagged_words.spacyr_parsed <- function(x, which = c("pos", "tag"), ...) { which <- match.arg(which) Tagged_Token(x$token, x[[which]]) } tagged_sents.spacyr_parsed <- function(x, which = c("pos", "tag"), ...) { which <- match.arg(which) lapply(split(x, x$sentence_id), function(e) Tagged_Token(e$token, e[[which]])) } NLP/R/tokenize.R0000644000175100001440000001365313334313212013100 0ustar hornikusers## Tokenizers break text up into words, phrases, symbols, or other ## meaningful elements called tokens, see e.g. ## . ## This can be accomplished by returning the sequence of tokens, or the ## corresponding spans (character start and end positions). ## Apache OpenNLP provides a Tokenizer interface, with methods ## String[] tokenize() and Span[] tokenizePos() for the two variants. ## See e.g. ## . ## NLTK provides an interface class nltk.tokenize.api.TokenizerI, for ## which subclasses must define a tokenize() method, and can define a ## span_tokenize() method. ## See e.g. . ## In R, this could be mimicked by having two generics for getting the ## tokens or spans, and have a virtual Tokenizer class for which ## extension classes must provide methods for at least one of the ## generics. ## However, it seems more natural to have tokenizers be *functions* ## (instead of interface classes) which can be called directly (instead ## of calling the respective generics), and have two "kinds" of such ## functions: token tokenizers and span tokenizers. We use the class ## information to indicate the kind, which in turn allows to provide a ## generic mechanism for mapping between the two kinds (straightforward ## when going from spans to tokens, doable for the opposite direction). ## This also allows to "extract" both kinds of tokenizers from suitable ## annotators or annotator pipelines. ## For now, there is no underlying virtual Tokenizer class. ### * Span tokenizers Span_Tokenizer <- function(f, meta = list()) { attr(f, "meta") <- meta class(f) <- "Span_Tokenizer" f } as.Span_Tokenizer <- function(x, ...) UseMethod("as.Span_Tokenizer") as.Span_Tokenizer.Span_Tokenizer <- function(x, ...) x ## For now, pass metadata as is. as.Span_Tokenizer.Token_Tokenizer <- function(x, ...) { f <- function(s) { s <- as.String(s) spans_from_tokens(s, x(s)) } Span_Tokenizer(f, meta(x)) } ## For now, do not pass metadata. as.Span_Tokenizer.Annotator <- as.Span_Tokenizer.Annotator_Pipeline <- function(x, type = "word", ...) { f <- function(s) { a <- x(as.String(s)) as.Span(a[a$type == "word", ]) } Span_Tokenizer(f) } is.Span_Tokenizer <- function(x) inherits(x, "Span_Tokenizer") format.Span_Tokenizer <- function(x, ...) { d <- meta(x, "description") if(is.null(d)) { "A span tokenizer." } else { c("A span tokenizer, with description", strwrap(d, indent = 2L, exdent = 2L)) } } ### * Token tokenizers Token_Tokenizer <- function(f, meta = list()) { attr(f, "meta") <- meta class(f) <- "Token_Tokenizer" f } as.Token_Tokenizer <- function(x, ...) UseMethod("as.Token_Tokenizer") as.Token_Tokenizer.Token_Tokenizer <- function(x, ...) x ## For now, pass metadata as is. as.Token_Tokenizer.Span_Tokenizer <- function(x, ...) { f <- function(s) { s <- as.String(s) s[x(s)] } Token_Tokenizer(f, meta(x)) } ## For now, do not pass metadata. as.Token_Tokenizer.Annotator <- as.Token_Tokenizer.Annotator_Pipeline <- function(x, type = "word", ...) { f <- function(s) { s <- as.String(s) a <- x(s) s[a[a$type == "word", ]] } Token_Tokenizer(f) } is.Token_Tokenizer <- function(x) inherits(x, "Token_Tokenizer") format.Token_Tokenizer <- function(x, ...) { d <- meta(x, "description") if(is.null(d)) { "A token tokenizer." } else { c("A token tokenizer, with description", strwrap(d, indent = 2L, exdent = 2L)) } } ### Regexp span tokenizers a la NLTK. Regexp_Tokenizer <- function(pattern, invert = FALSE, ..., meta = list()) { force(pattern) args <- list(...) f <- if(invert) { ## Pattern gives the separators. function(s) { s <- as.String(s) if(is.na(s) || !nchar(s)) stop("Need a non-empty string.") m <- do.call(gregexpr, c(list(pattern = pattern, text = s), args))[[1L]] if((length(m) == 1L) && (m == -1L)) return(Span(1L, nchar(s))) start <- c(1L, m + attr(m, "match.length")) end <- c(m - 1L, nchar(s)) ind <- start <= end Span(start[ind], end[ind]) } } else { ## Pattern gives the tokens. function(s) { s <- as.String(s) if(is.na(s) || !nchar(s)) stop("Need a non-empty string.") m <- do.call(gregexpr, c(list(pattern = pattern, text = s), args))[[1L]] Span(m, m + attr(m, "match.length") - 1L) } } Span_Tokenizer(f, meta) } whitespace_tokenizer <- Regexp_Tokenizer("\\s+", invert = TRUE, meta = list(description = "Divides strings into substrings by treating any sequence of whitespace characters as a separator.")) blankline_tokenizer <- Regexp_Tokenizer("\\s*\n\\s*\\n\\s*", invert = TRUE, meta = list(description = "Divides strings into substrings by treating any sequence of blank lines as a separator.")) wordpunct_tokenizer <- Regexp_Tokenizer("\\w+|[^\\w\\s]+", perl = TRUE, meta = list(description = "Divides strings into substrings of alphabetic and (non-whitespace) non-alphabetic characters.")) ### * Utilities spans_from_tokens <- function(x, tokens) { start <- end <- integer(length(tokens)) off <- 0L for(i in seq_along(tokens)) { m <- regexpr(tokens[i], x, fixed = TRUE) pos <- m + attr(m, "match.length") x <- substring(x, pos) start[i] <- off + m end[i] <- off <- off + pos - 1L } Span(start, end) } NLP/NAMESPACE0000644000175100001440000002141114654066676012163 0ustar hornikusersimportFrom("utils", "head", "tail") export("content", "content<-", "meta", "meta<-") export("String", "as.String", "is.String") S3method("as.String", "String") S3method("as.String", "default") S3method("[", "String") S3method("*", "String") S3method("+", "String") S3method("print", "String") export("Span", "as.Span", "is.Span") S3method("as.Span", "Span") S3method("as.Span", "Annotation") S3method("[", "Span") ## S3method("[<-", "Span") S3method("[[", "Span") ## S3method("[[<-", "Span") S3method("$<-", "Span") S3method("Ops", "Span") S3method("as.data.frame", "Span") S3method("as.list", "Span") S3method("c", "Span") S3method("duplicated", "Span") S3method("format", "Span") S3method("length", "Span") S3method("names", "Span") S3method("print", "Span") S3method("unique", "Span") export("Annotation", "as.Annotation", "is.Annotation") S3method("as.Annotation", "Annotation") S3method("as.Annotation", "Span") S3method("[", "Annotation") ## S3method("[<-", "Annotation") S3method("[[", "Annotation") ## S3method("[[<-", "Annotation") S3method("$<-", "Annotation") S3method("as.data.frame", "Annotation") S3method("as.list", "Annotation") S3method("c", "Annotation") S3method("duplicated", "Annotation") S3method("format", "Annotation") S3method("length", "Annotation") S3method("merge", "Annotation") S3method("meta", "Annotation", .get_meta_if_attr) S3method("meta<-", "Annotation", .set_meta_if_attr) S3method("names", "Annotation") S3method("print", "Annotation", .print_via_format) S3method("subset", "Annotation") S3method("unique", "Annotation") export("annotations_in_spans", "features") export("annotate") export("Annotator") S3method("format", "Annotator") S3method("meta", "Annotator", .get_meta_if_attr) S3method("meta<-", "Annotator", .set_meta_if_attr) S3method("print", "Annotator", .print_via_format) export("Simple_Para_Token_Annotator", "Simple_Sent_Token_Annotator", "Simple_Word_Token_Annotator", "Simple_POS_Tag_Annotator", "Simple_Entity_Annotator", "Simple_Chunk_Annotator", "Simple_Stem_Annotator") export("Annotator_Pipeline", "as.Annotator_Pipeline") S3method("as.Annotator_Pipeline", "Annotator_Pipeline") S3method("as.Annotator_Pipeline", "Annotator") S3method("as.Annotator_Pipeline", "list") S3method("[", "Annotator_Pipeline") S3method("as.list", "Annotator_Pipeline") S3method("c", "Annotator_Pipeline") S3method("format", "Annotator_Pipeline") S3method("meta", "Annotator_Pipeline", .get_meta_if_attr) S3method("meta<-", "Annotator_Pipeline", .set_meta_if_attr) S3method("print", "Annotator_Pipeline", .print_via_format) export("next_id", "single_feature") export("Regexp_Tokenizer", "blankline_tokenizer", "whitespace_tokenizer", "wordpunct_tokenizer") export("Tree") S3method("format", "Tree") S3method("print", "Tree", .print_via_format) export("Tree_parse", "Tree_apply") export("words", "sents", "paras", "tagged_words", "tagged_sents", "tagged_paras", "chunked_sents", "parsed_sents", "parsed_paras", "otoks") S3method("format", "TextDocument", .format_TextDocument) S3method("print", "TextDocument", .print_via_format) export("AnnotatedPlainTextDocument", "annotation") S3method("format", "AnnotatedPlainTextDocument") S3method("print", "AnnotatedPlainTextDocument", .print_via_format) S3method("content", "AnnotatedPlainTextDocument") S3method("content<-", "AnnotatedPlainTextDocument") S3method("meta", "AnnotatedPlainTextDocument", .get_meta_if_slot) S3method("meta<-", "AnnotatedPlainTextDocument", .set_meta_if_slot) S3method("as.character", "AnnotatedPlainTextDocument") S3method("words", "AnnotatedPlainTextDocument") S3method("sents", "AnnotatedPlainTextDocument") S3method("paras", "AnnotatedPlainTextDocument") S3method("tagged_words", "AnnotatedPlainTextDocument") S3method("tagged_sents", "AnnotatedPlainTextDocument") S3method("tagged_paras", "AnnotatedPlainTextDocument") S3method("chunked_sents", "AnnotatedPlainTextDocument") S3method("parsed_sents", "AnnotatedPlainTextDocument") S3method("parsed_paras", "AnnotatedPlainTextDocument") S3method("otoks", "AnnotatedPlainTextDocument") export("CoNLLTextDocument") S3method("format", "CoNLLTextDocument") S3method("print", "CoNLLTextDocument", .print_via_format) S3method("content", "CoNLLTextDocument") S3method("meta", "CoNLLTextDocument", .get_meta_if_slot) S3method("meta<-", "CoNLLTextDocument", .set_meta_if_slot) S3method("as.character", "CoNLLTextDocument") S3method("words", "CoNLLTextDocument") S3method("sents", "CoNLLTextDocument") S3method("tagged_words", "CoNLLTextDocument") S3method("tagged_sents", "CoNLLTextDocument") S3method("chunked_sents", "CoNLLTextDocument") export("CoNLLUTextDocument") S3method("format", "CoNLLUTextDocument") S3method("print", "CoNLLUTextDocument", .print_via_format) S3method("content", "CoNLLUTextDocument") S3method("meta", "CoNLLUTextDocument", .get_meta_if_slot) S3method("meta<-", "CoNLLUTextDocument", .set_meta_if_slot) S3method("as.character", "CoNLLUTextDocument") S3method("words", "CoNLLUTextDocument") S3method("sents", "CoNLLUTextDocument") S3method("tagged_words", "CoNLLUTextDocument") S3method("tagged_sents", "CoNLLUTextDocument") S3method("otoks", "CoNLLUTextDocument") export("TaggedTextDocument") S3method("format", "TaggedTextDocument") S3method("print", "TaggedTextDocument", .print_via_format) S3method("content", "TaggedTextDocument") S3method("meta", "TaggedTextDocument", .get_meta_if_slot) S3method("meta<-", "TaggedTextDocument", .set_meta_if_slot) S3method("as.character", "TaggedTextDocument") S3method("words", "TaggedTextDocument") S3method("sents", "TaggedTextDocument") S3method("paras", "TaggedTextDocument") S3method("tagged_words", "TaggedTextDocument") S3method("tagged_sents", "TaggedTextDocument") S3method("tagged_paras", "TaggedTextDocument") export("WordListDocument") S3method("format", "WordListDocument") S3method("print", "WordListDocument", .print_via_format) S3method("content", "WordListDocument") S3method("meta", "WordListDocument", .get_meta_if_slot) S3method("meta<-", "WordListDocument", .set_meta_if_slot) S3method("as.character", "WordListDocument") S3method("words", "WordListDocument") export("Penn_Treebank_POS_tags") export("Brown_POS_tags") export("Universal_POS_tags") export("Universal_POS_tags_map") export("parse_IETF_language_tag") export("parse_ISO_8601_datetime") S3method("[", "ISO_8601_datetime") S3method("$", "ISO_8601_datetime") S3method("as.Date", "ISO_8601_datetime") S3method("as.POSIXct", "ISO_8601_datetime") S3method("as.POSIXlt", "ISO_8601_datetime") S3method("as.data.frame", "ISO_8601_datetime") S3method("as.matrix", "ISO_8601_datetime") S3method("print", "ISO_8601_datetime") export("ngrams") export("Tagged_Token", "as.Tagged_Token", "is.Tagged_Token") S3method("as.Tagged_Token", "Tagged_Token") S3method("as.Tagged_Token", "TextDocument") S3method("[", "Tagged_Token") ## S3method("[<-", "Tagged_Token") S3method("[[", "Tagged_Token") ## S3method("[[<-", "Tagged_Token") S3method("$<-", "Tagged_Token") S3method("as.data.frame", "Tagged_Token") S3method("as.list", "Tagged_Token") S3method("c", "Tagged_Token") S3method("duplicated", "Tagged_Token") S3method("format", "Tagged_Token") S3method("length", "Tagged_Token") S3method("names", "Tagged_Token") S3method("print", "Tagged_Token", .print_via_format) S3method("unique", "Tagged_Token") export("Span_Tokenizer", "as.Span_Tokenizer", "is.Span_Tokenizer") S3method("as.Span_Tokenizer", "Span_Tokenizer") S3method("as.Span_Tokenizer", "Token_Tokenizer") S3method("as.Span_Tokenizer", "Annotator") S3method("as.Span_Tokenizer", "Annotator_Pipeline") S3method("format", "Span_Tokenizer") S3method("print", "Span_Tokenizer", .print_via_format) S3method("meta", "Span_Tokenizer", .get_meta_if_attr) S3method("meta<-", "Span_Tokenizer", .set_meta_if_attr) export("Token_Tokenizer", "as.Token_Tokenizer", "is.Token_Tokenizer") S3method("as.Token_Tokenizer", "Token_Tokenizer") S3method("as.Token_Tokenizer", "Span_Tokenizer") S3method("as.Token_Tokenizer", "Annotator") S3method("as.Token_Tokenizer", "Annotator_Pipeline") S3method("format", "Token_Tokenizer") S3method("print", "Token_Tokenizer", .print_via_format) S3method("meta", "Token_Tokenizer", .get_meta_if_attr) S3method("meta<-", "Token_Tokenizer", .set_meta_if_attr) S3method("words", "udpipe_connlu") S3method("sents", "udpipe_connlu") S3method("paras", "udpipe_connlu") S3method("tagged_words", "udpipe_connlu") S3method("tagged_sents", "udpipe_connlu") S3method("tagged_paras", "udpipe_connlu") S3method("words", "spacyr_parsed") S3method("sents", "spacyr_parsed") S3method("tagged_words", "spacyr_parsed") S3method("tagged_sents", "spacyr_parsed") S3method("words", "cnlp_annotation") S3method("sents", "cnlp_annotation") ## S3method("paras", "cnlp_annotation") S3method("tagged_words", "cnlp_annotation") S3method("tagged_sents", "cnlp_annotation") ## S3method("tagged_paras", "cnlp_annotation") NLP/inst/0000755000175100001440000000000013143661406011702 5ustar hornikusersNLP/inst/texts/0000755000175100001440000000000014654075367013066 5ustar hornikusersNLP/inst/texts/stanford.rds0000644000175100001440000000204113336510534015377 0ustar hornikusers‹íYÝoÓ0O“¶ÛÊ€ÁB A€—Ub)bš € Ô mÕZâeò¯ Jí(vø/üÛð@9§v¾ÚM+â+S/±ÏöùüóïÜž÷¦a†iXuxZP´Öå>kð®UcÞ/úÑCºö+ê½'!÷Ä'Ûã¶Ï0ĵ=j?C¾=¨‡»#d#²‡!AÂŽ’!ŽaTÍŒÙöçPÈÎUï‹Ð£C(}WÞÔ@΂œ{Ioc¡ÿ¤ \7–@–A¤¹¢3 «jÜ95v-~OfYæ„ B1QõêX݂ˋŸ#CE­õÈä6È&HKÉ=mû åNfll*Ì$Æ—A®ƒÜ¹£Údß» [ÊÆ#çJ¿>±a®+ìϪ÷q{“ÝåÏ{»»{£ooìô@à¡9ÖœTS¦5Aó:½~úx•'í.T»]Í?Põ@ÑÙ"6Sƒ)%›òÏÞplG¾3´Z‹ÝS‡Œ ½u˜Q òèàªX˜z F<Ù^:bIÿ ¢Xdp16U%õL9ç„„E•)J³¤<»ÚHü“þ©_ÕBLe÷,ëWÁÒ²Þ¬¹®šOJt®ƒ•©#¢ÑGBMƒ">ekè%N ÌIY¨›Ü¤iÅ%¡.ÄßÄ~e¬LšªÃ5½€}iÛÝ×ý±G´ß  ä†àPÁÉ•}p´£Ió <ÆãñW#j-’^K’^,K†Œa¦fÀF*k\r¨äPÉ¡’C%‡Jý›Êù¬1ZÅŒrá‰|à P˜äââÞœ1ñeÌ|œ¸³Zo¥­!ÄÏéy‰Ð#ùÙÌÆIcàG|òÈu‹—'@¦‘On,È> ª”y44ÜVo·¯AðÉh„Nf>˨L™VAe6»Ý‚& ²Óïì5K¿îLhž~ÖSŠ[ÕY¸<Ž»Ñ“ òxo¯gwÄTª-›Ú5ã|;¾Ð‰óóÌ­Î1©u}Æ÷żïË%ºKÍɤåÚ e¿(¹ù²ß Qñ°=½&õ¦fLßIÕ ÞLÛ2;"¡¨>o*ú”¯Å(ÿ­Gþ7cúgCIƒ’% J”4(iPÒà¿§Á—Âoë‚ÊôħµMe‰Îtú§ø¼Ó;Åä/“¿?G›¨f€Ù­qBäInÖ–«(”\›£'”2„ÇôRµ ZøçUuDš³Ê:¡›8(h·Ž„øÃV 2uä³¡ã»Ø‰€úÔ!nÔræ­—íg¶÷[ذå‰Ñpξ@CžœççRÍþ«½—°b‘;–µ§…§•2‹¦V>Ye'=øªÂ‰¸=yt@>Š6ÃÑ(µwa^ÃjN~þñד?!NLP/inst/texts/spanish.conllu0000644000175100001440000000032314654075367015747 0ustar hornikusers# sent_id = 1 # text = vámonos al mar 1-2 vámonos _ _ _ _ _ _ _ _ 1 vamos ir _ _ _ _ _ _ _ 2 nos nosotros _ _ _ _ _ _ _ 3-4 al _ _ _ _ _ _ _ _ 3 a a _ _ _ _ _ _ _ 4 el el _ _ _ _ _ _ _ 5 mar mar _ _ _ _ _ _ _ NLP/inst/po/0000755000175100001440000000000013143661406012320 5ustar hornikusersNLP/inst/po/en@quot/0000755000175100001440000000000013143661406013733 5ustar hornikusersNLP/inst/po/en@quot/LC_MESSAGES/0000755000175100001440000000000013143661406015520 5ustar hornikusersNLP/inst/po/en@quot/LC_MESSAGES/R-NLP.mo0000644000175100001440000000535713143661406016717 0ustar hornikusersÞ•ä%¬@#Ae)~!¨ÊÛô*'1+Y3…2¹.ì/4Hd9­#çK  We#zž/¾7î'&N1k%ÃÔí*ÿ'*+R3~2².å /- L] =ª #è K X f #{ Ÿ /¿     '%s' not defined for "Span" objects'subset' must be logicalAnnotators must have formals 's' and 'a'.Cannot determine position of '%s'Invalid entries:Invalid language tag(s):Invalid operands.Invalid result from underlying POS tagger.Invalid result from underlying chunker.Invalid result from underlying name finder.Invalid result from underlying paragraph tokenizer.Invalid result from underlying sentence tokenizer.Invalid result from underlying word tokenizer.Need a non-empty string.all pipeline elements must be annotator objectsargument 'annotations' must give a positive number of Annotation objectsargument 'x' must be an AnnotatedPlainTextDocument objectarguments must have the same lengthcontent modification is not possible for AnnotatedPlainTextDocument objectsend of stringinvalid element nameno sentence token annotations foundno word token annotations foundreplacement must have the same length as objectProject-Id-Version: NLP 0.1-10.1 POT-Creation-Date: 2017-08-12 22:20 PO-Revision-Date: 2017-08-12 22:20 Last-Translator: Automatically generated Language-Team: none MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Language: en Plural-Forms: nplurals=2; plural=(n != 1); ‘%s’ not defined for "Span" objects‘subset’ must be logicalAnnotators must have formals ‘s’ and ‘a’.Cannot determine position of ‘%s’Invalid entries:Invalid language tag(s):Invalid operands.Invalid result from underlying POS tagger.Invalid result from underlying chunker.Invalid result from underlying name finder.Invalid result from underlying paragraph tokenizer.Invalid result from underlying sentence tokenizer.Invalid result from underlying word tokenizer.Need a non-empty string.all pipeline elements must be annotator objectsargument ‘annotations’ must give a positive number of Annotation objectsargument ‘x’ must be an AnnotatedPlainTextDocument objectarguments must have the same lengthcontent modification is not possible for AnnotatedPlainTextDocument objectsend of stringinvalid element nameno sentence token annotations foundno word token annotations foundreplacement must have the same length as objectNLP/build/0000755000175100001440000000000014717312551012026 5ustar hornikusersNLP/build/partial.rdb0000644000175100001440000000007514717312551014155 0ustar hornikusers‹‹àb```b`aab`b1…ÀÈg``d`aàÒ¬y‰¹©Å@†D’áÝ?M7NLP/man/0000755000175100001440000000000014660157443011506 5ustar hornikusersNLP/man/TextDocument.Rd0000644000175100001440000000302313144533560014410 0ustar hornikusers\name{TextDocument} \alias{TextDocument} \title{Text Documents} \description{ Representing and computing on text documents. } \details{ \emph{Text documents} are documents containing (natural language) text. In packages which employ the infrastructure provided by package \pkg{NLP}, such documents are represented via the virtual S3 class \code{"TextDocument"}: such packages then provide S3 text document classes extending the virtual base class (such as the \code{\link{AnnotatedPlainTextDocument}} objects provided by package \pkg{NLP} itself). All extension classes must provide an \code{\link{as.character}()} method which extracts the natural language text in documents of the respective classes in a \dQuote{suitable} (not necessarily structured) form, as well as \code{\link{content}()} and \code{\link{meta}()} methods for accessing the (possibly raw) document content and metadata. In addition, the infrastructure features the generic functions \code{\link{words}()}, \code{\link{sents}()}, etc., for which extension classes can provide methods giving a structured view of the text contained in documents of these classes (returning, e.g., a character vector with the word tokens in these documents, and a list of such character vectors). } \seealso{ \code{\link{AnnotatedPlainTextDocument}}, \code{\link{CoNLLTextDocument}}, \code{\link{CoNLLUTextDocument}}, \code{\link{TaggedTextDocument}}, and \code{\link{WordListDocument}} for the text document classes provided by package \pkg{NLP}. } NLP/man/generics.Rd0000644000175100001440000000273212314546106013570 0ustar hornikusers\name{generics} \alias{content} \alias{content<-} \alias{meta} \alias{meta<-} \title{Access or Modify Content or Metadata} \description{ Access or modify the content or metadata of \R objects. } \usage{ content(x) content(x) <- value meta(x, tag = NULL, ...) meta(x, tag = NULL, ...) <- value } \arguments{ \item{x}{an \R object.} \item{value}{a suitable \R object.} \item{tag}{a character string or \code{NULL} (default), indicating to return the single metadata value for the given tag, or all metadata tag/value pairs.} \item{...}{arguments to be passed to or from methods.} } \details{ These are generic functions, with no default methods. Often, classed \R objects (e.g., those representing text documents in packages \pkg{NLP} and \pkg{tm}) contain information that can be grouped into \dQuote{content}, metadata and other components, where content can be arbitrary, and metadata are collections of tag/value pairs represented as named or empty lists. The \code{content()} and \code{meta()} getters and setters aim at providing a consistent high-level interface to the respective information (abstracting from how classes internally represent the information). } \value{ Methods for \code{meta()} should return a named or empty list of tag/value pairs if no tag is given (default), or the value for the given tag. } \seealso{ \code{\link{TextDocument}} for basic information on the text document infrastructure employed by package \pkg{NLP}. } NLP/man/CoNLLUTextDocument.Rd0000644000175100001440000000725414660165046015403 0ustar hornikusers\name{CoNLLUTextDocument} \alias{CoNLLUTextDocument} \alias{read_CoNNLU} \title{ CoNNL-U Text Documents } \description{ Create text documents from CoNNL-U format files. } \usage{ CoNLLUTextDocument(con, meta = list(), text = NULL) read_CoNNLU(con) } \arguments{ \item{con}{a connection object or a character string. See \code{\link{scan}()} for details. } \item{meta}{a named or empty list of document metadata tag-value pairs.} \item{text}{a character vector giving the text of the CoNNL-U annotation. If \code{NULL}, the \code{text} comments of the annotation are used.} } \details{ The CoNLL-U format (see \url{https://universaldependencies.org/format.html}) is a CoNLL-style format for annotated texts popularized and employed by the Universal Dependencies project (see \url{https://universaldependencies.org/}). For each \dQuote{word} in the text, this provides exactly the 10 fields \code{ID}, \code{FORM} (word form or punctuation symbol), \code{LEMMA} (lemma or stem of word form), \code{UPOSTAG} (universal part-of-speech tag, see \url{https://universaldependencies.org/u/pos/index.html}), \code{XPOSTAG} (language-specific part-of-speech tag, may be unavailable), \code{FEATS} (list of morphological features), \code{HEAD}, \code{DEPREL}, \code{DEPS}, and \code{MISC}. \code{read_CoNNLU()} reads the lines with these fields and optional comments from the given connection and splits into fields using \code{\link{scan}()}. This is combined with consecutive sentence ids into a data frame inheriting from class \code{"CoNNLU_Annotation"} used for representing the annotation information, \code{CoNLLUTextDocument()} combines this annotation information with the given metadata (and optionally the original pre-tokenized text) into a CoNLL-U text document inheriting from classes \code{"CoNLLUTextDocument"} and \code{"\link{TextDocument}"}. The complete annotation information data frame can be extracted via \code{content()}. CoNLL-U v2 requires providing the complete texts of each sentence (or a reconstruction thereof) in \samp{# text =} comment lines. Where consistently provided, these are made available in the \code{text} attribute of the content data frame. In addition, there are methods for generics \code{\link{as.character}()}, \code{\link{words}()}, \code{\link{sents}()}, \code{\link{tagged_words}()}, and \code{\link{tagged_sents}()} and class \code{"CoNLLUTextDocument"}, which should be used to access the text in such text document objects. The CoNLL-U format allows to represent both words and (multiword) tokens (see section \sQuote{Words, Tokens and Empty Nodes} in the format documentation), as distinguished by ids being integers or integer ranges, with the words being annotated further. One can use \code{as.character()} to extract the \emph{tokens}; all other viewers listed above use the \emph{words}. Finally, the viewers incorporating POS tags take a \code{which} argument to specify using the universal or language-specific tags, by giving a substring of \code{"UPOSTAG"} (default) or \code{"XPOSTAG"}. } \value{ For \code{CoNLLUTextDocument()}, an object inheriting from \code{"CoNLLUTextDocument"} and \code{"\link{TextDocument}"}. For \code{read_CoNNLU()}, an object inherting from \code{"CoNNLU_Annotation"} and \code{"\link{data.frame}"} } \seealso{ \code{\link{TextDocument}} for basic information on the text document infrastructure employed by package \pkg{NLP}. \url{https://universaldependencies.org/} for access to the Universal Dependencies treebanks, which provide annotated texts in \emph{many} different languages using CoNLL-U format. } NLP/man/Tree.Rd0000644000175100001440000000636213741575543012707 0ustar hornikusers\name{Tree} \alias{Tree} \alias{format.Tree} \alias{print.Tree} \alias{Tree_parse} \alias{Tree_apply} \title{Tree objects} \description{Creation and manipulation of tree objects.} \usage{ Tree(value, children = list()) \method{format}{Tree}(x, width = 0.9 * getOption("width"), indent = 0, brackets = c("(", ")"), ...) Tree_parse(x, brackets = c("(", ")")) Tree_apply(x, f, recursive = FALSE) } \arguments{ \item{value}{a (non-tree) node value of the tree.} \item{children}{a list giving the children of the tree.} \item{x}{a tree object for the \code{format()} method and \code{Tree_apply()}; a character string for \code{Tree_parse()}.} \item{width}{a positive integer giving the target column for a single-line nested bracketting.} \item{indent}{a non-negative integer giving the indentation used for formatting.} \item{brackets}{a character vector of length two giving the pair of opening and closing brackets to be employed for formatting or parsing.} \item{...}{further arguments passed to or from other methods.} \item{f}{a function to be applied to the children nodes.} \item{recursive}{a logical indicating whether to apply \code{f} recursively to the children of the children and so forth.} } \details{ Trees give hierarchical groupings of leaves and subtrees, starting from the root node of the tree. In natural language processing, the syntactic structure of sentences is typically represented by parse trees (e.g., \url{https://en.wikipedia.org/wiki/Concrete_syntax_tree}) and displayed using nested brackettings. The tree objects in package \pkg{NLP} are patterned after the ones in NLTK (\url{https://www.nltk.org}), and primarily designed for representing parse trees. A tree object consists of the value of the root node and its children as a list of leaves and subtrees, where the leaves are elements with arbitrary non-tree values (and not subtrees with no children). The value and children can be extracted via \code{$} subscripting using names \code{value} and \code{children}, respectively. There is a \code{format()} method for tree objects: this first tries a nested bracketting in a single line of the given width, and if this is not possible, produces a nested indented bracketting. The \code{print()} method uses the \code{format()} method, and hence its arguments to control the formatting. \code{Tree_parse()} reads nested brackettings into a tree object. } \examples{ x <- Tree(1, list(2, Tree(3, list(4)), 5)) format(x) x$value x$children p <- Tree("VP", list(Tree("V", list("saw")), Tree("NP", list("him")))) p <- Tree("S", list(Tree("NP", list("I")), p)) p ## Force nested indented bracketting: print(p, width = 10) s <- "(S (NP I) (VP (V saw) (NP him)))" p <- Tree_parse(s) p ## Extract the leaves by recursively traversing the children and ## recording the non-tree ones: Tree_leaf_gatherer <- function() { v <- list() list(update = function(e) if(!inherits(e, "Tree")) v <<- c(v, list(e)), value = function() v, reset = function() { v <<- list() }) } g <- Tree_leaf_gatherer() y <- Tree_apply(p, g$update, recursive = TRUE) g$value() } NLP/man/viewers.Rd0000644000175100001440000001040714714616132013456 0ustar hornikusers\name{viewers} \alias{sents} \alias{words} \alias{paras} \alias{tagged_sents} \alias{tagged_paras} \alias{tagged_words} \alias{chunked_sents} \alias{parsed_sents} \alias{parsed_paras} \alias{otoks} \title{Text Document Viewers} \description{ Provide suitable \dQuote{views} of the text contained in text documents. } \usage{ words(x, ...) sents(x, ...) paras(x, ...) tagged_words(x, ...) tagged_sents(x, ...) tagged_paras(x, ...) chunked_sents(x, ...) parsed_sents(x, ...) parsed_paras(x, ...) } \arguments{ \item{x}{a text document object.} \item{...}{further arguments to be passed to or from methods.} } \details{ Methods for extracting POS tagged word tokens (i.e., for generics \code{tagged_words()}, \code{tagged_sents()} and \code{tagged_paras()}) can optionally provide a mechanism for mapping the POS tags via a \code{map} argument. This can give a function, a named character vector (with names and elements the tags to map from and to, respectively), or a named list of such named character vectors, with names corresponding to POS tagsets (see \code{\link{Universal_POS_tags_map}} for an example). If a list, the map used will be the element with name matching the POS tagset used (this information is typically determined from the text document metadata; see the the help pages for text document extension classes implementing this mechanism for details). Text document classes may provide support for representing both (syntactic) words (for which annotations can be provided) and orthographic (word) tokens, e.g., in Spanish \emph{dámelo = da me lo}. For these, \code{words()} gives the syntactic word tokens, and \code{otoks()} the orthographic word tokens. This is currently supported for \link[=CoNLLUTextDocument]{CoNNL-U text documents} (see \url{https://universaldependencies.org/format.html} for more information) and \link[=AnnotatedPlainTextDocument]{annotated plain text documents} (via \code{word} features as used for example for some Stanford CoreNLP annotator pipelines provided by package \pkg{StanfordCoreNLP} available from the repository at \url{https://datacube.wu.ac.at}). In addition to methods for the text document classes provided by package \pkg{NLP} itself, (see \link{TextDocument}), package \pkg{NLP} also provides word tokens and POS tagged word tokens for the results of \code{\link[udpipe]{udpipe_annotate}()} from package \CRANpkg{udpipe}, \code{\link[spacyr]{spacy_parse}()} from package \CRANpkg{spacyr}, and \code{\link[cleanNLP]{cnlp_annotate}()} from package \CRANpkg{cleanNLP}. } \value{ For \code{words()}, a character vector with the word tokens in the document. For \code{sents()}, a list of character vectors with the word tokens in the sentences. For \code{paras()}, a list of lists of character vectors with the word tokens in the sentences, grouped according to the paragraphs. For \code{tagged_words()}, a character vector with the POS tagged word tokens in the document (i.e., the word tokens and their POS tags, separated by \samp{/}). For \code{tagged_sents()}, a list of character vectors with the POS tagged word tokens in the sentences. For \code{tagged_paras()}, a list of lists of character vectors with the POS tagged word tokens in the sentences, grouped according to the paragraphs. For \code{chunked_sents()}, a list of (flat) \code{\link{Tree}} objects giving the chunk trees for the sentences in the document. For \code{parsed_sents()}, a list of \code{\link{Tree}} objects giving the parse trees for the sentences in the document. For \code{parsed_paras()}, a list of lists of \code{\link{Tree}} objects giving the parse trees for the sentences in the document, grouped according to the paragraphs in the document. For \code{otoks()}, a character vector with the orthographic word tokens in the document. } \seealso{ \code{\link{TextDocument}} for basic information on the text document infrastructure employed by package \pkg{NLP}. } \examples{ ## Example from : d <- CoNLLUTextDocument(system.file("texts", "spanish.conllu", package = "NLP")) content(d) ## To extract the syntactic words: words(d) ## To extract the orthographic word tokens: otoks(d) } NLP/man/WordListDocument.Rd0000644000175100001440000000233613333071074015236 0ustar hornikusers\name{WordListDocument} \alias{WordListDocument} \title{Word List Text Documents} \description{ Create text documents from word lists. } \usage{ WordListDocument(con, encoding = "unknown", meta = list()) } \arguments{ \item{con}{a connection object or a character string. See \code{\link{readLines}()} for details. } \item{encoding}{encoding to be assumed for input strings. See \code{\link{readLines}()} for details. } \item{meta}{a named or empty list of document metadata tag-value pairs.} } \details{ \code{WordListDocument()} uses \code{\link{readLines}()} to read collections of words from connections for which each line provides one word, with blank lines ignored, and returns a word list document object which inherits from classes \code{"WordListDocument"} and \code{"\link{TextDocument}"}. The methods for generics \code{\link{words}()} and \code{\link{as.character}()} and class \code{"WordListDocument"} can be used to extract the words. } \value{ A word list document object inheriting from \code{"WordListDocument"} and \code{"\link{TextDocument}"}. } \seealso{ \code{\link{TextDocument}} for basic information on the text document infrastructure employed by package \pkg{NLP}. } NLP/man/String.Rd0000644000175100001440000000440012532125025013224 0ustar hornikusers\name{String} \alias{String} \alias{as.String} \alias{is.String} \title{String objects} \description{ Creation and manipulation of string objects. } \usage{ String(x) as.String(x) is.String(x) } \arguments{ \item{x}{a character vector with the appropriate encoding information for \code{String()}; an arbitrary \R object otherwise. } } \details{ String objects provide character strings encoded in UTF-8 with class \code{"String"}, which currently has a useful \code{[} subscript method: with indices \code{i} and \code{j} of length one, this gives a string object with the substring starting at the position given by \code{i} and ending at the position given by \code{j}; subscripting with a single index which is an object inheriting from class \code{"\link{Span}"} or a list of such objects returns a character vector of substrings with the respective spans, or a list thereof. Additional methods may be added in the future. \code{String()} creates a string object from a given character vector, taking the first element of the vector and converting it to UTF-8 encoding. \code{as.String()} is a generic function to coerce to a string object. The default method calls \code{String()} on the result of converting to character and concatenating into a single string with the elements separated by newlines. \code{is.String()} tests whether an object inherits from class \code{"String"}. } \value{ For \code{String()} and \code{as.String()}, a string object (of class \code{"String"}). For \code{is.String()}, a logical. } \examples{ ## A simple text. s <- String(" First sentence. Second sentence. ") ## ****5****0****5****0****5****0****5** ## Basic sentence and word token annotation for the text. a <- c(Annotation(1 : 2, rep.int("sentence", 2L), c( 3L, 20L), c(17L, 35L)), Annotation(3 : 6, rep.int("word", 4L), c( 3L, 9L, 20L, 27L), c( 7L, 16L, 25L, 34L))) ## All word tokens (by subscripting with an annotation object): s[a[a$type == "word"]] ## Word tokens according to sentence (by subscripting with a list of ## annotation objects): s[annotations_in_spans(a[a$type == "word"], a[a$type == "sentence"])] } NLP/man/Span.Rd0000644000175100001440000000450012502573125012665 0ustar hornikusers\name{Span} \alias{Span} \alias{as.Span} \alias{is.Span} \alias{[.Span} %% \alias{[<-.Span} \alias{[[.Span} %% \alias{[[<-.Span} \alias{$<-.Span} \alias{Ops.Span} \alias{as.data.frame.Span} \alias{as.list.Span} \alias{c.Span} \alias{duplicated.Span} \alias{format.Span} \alias{length.Span} \alias{names.Span} \alias{print.Span} \alias{unique.Span} \title{Span objects} \description{ Creation and manipulation of span objects. } \usage{ Span(start, end) as.Span(x) is.Span(x) } \arguments{ \item{start, end}{integer vectors giving the start and end positions of the spans.} \item{x}{an \R object.} } \details{ A single span is a pair with \dQuote{slots} \sQuote{start} and \sQuote{end}, giving the start and end positions of the span. Span objects provide sequences (allowing positional access) of single spans. They have class \code{"Span"}. Span objects can be coerced to annotation objects via \code{\link{as.Annotation}()} (which of course is only appropriate provided that the spans are character spans of the natural language text being annotated), and annotation objects can be coerced to span objects via \code{as.Span()} (giving the character spans of the annotations). Subscripting span objects via \code{[} extracts subsets of spans; subscripting via \code{$} extracts integer vectors with the sequence of values of the named slot. There are several additional methods for class \code{"Span"}: \code{print()} and \code{format()}; \code{c()} combines spans (or objects coercible to these using \code{as.Span()}), and \code{as.list()} and \code{as.data.frame()} coerce, respectively, to lists (of single span objects) and data frames (with spans and slots corresponding to rows and columns). Finally, one can add a scalar and a span object (resulting in shifting the start and end positions by the scalar). \code{Span()} creates span objects from the given sequences of start and end positions, which must have the same length. \code{as.Span()} coerces to span objects, with a method for annotation objects. \code{is.Span()} tests whether an object inherits from class \code{"Span"} (and hence returns \code{TRUE} for both span and annotation objects). } \value{ For \code{Span()} and \code{as.Span()}, a span object (of class \code{"Span"}). For \code{is.Span()}, a logical. } NLP/man/CoNLLTextDocument.Rd0000644000175100001440000000737214715041203015244 0ustar hornikusers\name{CoNLLTextDocument} \alias{CoNLLTextDocument} \title{CoNLL-Style Text Documents} \description{ Create text documents from CoNLL-style files. } \usage{ CoNLLTextDocument(con, encoding = "unknown", format = "conll00", meta = list()) } \arguments{ \item{con}{a connection object or a character string. See \code{\link{scan}()} for details. } \item{encoding}{encoding to be assumed for input strings. See \code{\link{scan}()} for details. } \item{format}{a character vector specifying the format. See \bold{Details}. } \item{meta}{a named or empty list of document metadata tag-value pairs.} } \details{ CoNLL-style files use an extended tabular format where empty lines separate sentences, and non-empty lines consist of whitespace separated columns giving the word tokens and annotations for these. Such formats were popularized through their use for the shared tasks of CoNLL (Conference on Natural Language Learning), the yearly meeting of the Special Interest Group on Natural Language Learning of the Association for Computational Linguistics (see \url{https://www.signll.org/content/conll/} for more information about CoNLL). The precise format can vary according to corpus, and must be specified via argument \code{format}, as either a character string giving a pre-defined format, or otherwise a character vector with elements giving the names of the \sQuote{fields} (columns), and names used to give the field \sQuote{types}, with \sQuote{WORD}, \sQuote{POS} and \sQuote{CHUNK} to be used for, respectively, word tokens, POS tags, and chunk tags. For example, \preformatted{ c(WORD = "WORD", POS = "POS", CHUNK = "CHUNK")} would be a format specification appropriate for the CoNLL-2000 chunking task, as also available as the pre-defined \code{"conll00"}, which serves as default format for reasons of back-compatibility. Other pre-defined formats are \code{"conll01"} (for the CoNLL-2001 clause identification task), \code{"conll02"} (for the CoNLL-2002 language-independent named entity recognition task), \code{"conllx"} (for the CoNLL-X format used in at least the CoNLL-2006 and CoNLL-2007 multilingual dependency parsing tasks), and \code{"conll09"} (for the CoNLL-2009 shared task on syntactic and semantic dependencies in multiple languages). The lines are read from the given connection and split into fields using \code{\link{scan}()}. From this, a suitable representation of the provided information is obtained, and returned as a CoNLL text document object inheriting from classes \code{"CoNLLTextDocument"} and \code{"\link{TextDocument}"}. There are methods for class \code{"CoNLLTextDocument"} and generics \code{\link{words}()}, \code{\link{sents}()}, \code{\link{tagged_words}()}, \code{\link{tagged_sents}()}, and \code{\link{chunked_sents}()} (as well as \code{\link{as.character}()}), which should be used to access the text in such text document objects. The methods for generics \code{\link{tagged_words}()} and \code{\link{tagged_sents}()} provide a mechanism for mapping POS tags via the \code{map} argument, see section \bold{Details} in the help page for \code{\link{tagged_words}()} for more information. The POS tagset used will be inferred from the \code{POS_tagset} metadata element of the CoNLL-style text document. } \value{ An object inheriting from \code{"CoNLLTextDocument"} and \code{"\link{TextDocument}"}. } \seealso{ \code{\link{TextDocument}} for basic information on the text document infrastructure employed by package \pkg{NLP}. \url{https://www.clips.uantwerpen.be/conll2000/chunking/} for the CoNLL-2000 chunking task, and training and test data sets which can be read in using \code{CoNLLTextDocument()}. } NLP/man/datetime.Rd0000644000175100001440000000366413333066244013574 0ustar hornikusers\name{datetime} \alias{parse_ISO_8601_datetime} \title{Parse ISO 8601 Date/Time Strings} \description{ Extract date/time components from strings following one of the six formats specified in the NOTE-datetime ISO 8601 profile (\url{https://www.w3.org/TR/NOTE-datetime}). } \arguments{ \item{x}{a character vector.} } \details{ For character strings in one of the formats in the profile, the corresponding date/time components are extracted, with seconds and decimal fractions of seconds combined. Other (malformed) strings are warned about. The extracted components for each string are gathered into a named list with elements of the appropriate type (integer for year to min; double for sec; character for the time zone designator). The object returned is a (suitably classed) list of such named lists. This internal representation may change in future versions. One can subscript such ISO 8601 date/time objects using \code{[} and extract components using \code{$} (where missing components will result in \code{NA}s), and convert them to the standard R date/time classes using \code{\link{as.Date}()}, \code{\link{as.POSIXct}()} and \code{\link{as.POSIXlt}()} (incomplete elements will convert to suitably missing elements). In addition, there are \code{print()} and \code{as.data.frame()} methods for such objects. } \value{ An object inheriting from class \code{"ISO_8601_datetime"} with the extracted date/time components. } \examples{ ## Use the examples from , plus one ## in UTC. x <- c("1997", "1997-07", "1997-07-16", "1997-07-16T19:20+01:00", "1997-07-16T19:20:30+01:00", "1997-07-16T19:20:30.45+01:00", "1997-07-16T19:20:30.45Z") y <- parse_ISO_8601_datetime(x) y ## Conversions: note that "incomplete" elements are converted to ## "missing". as.Date(y) as.POSIXlt(y) ## Subscripting and extracting components: head(y, 3) y$mon } NLP/man/tokenizers.Rd0000644000175100001440000000415113333064645014170 0ustar hornikusers\name{tokenizers} \alias{Regexp_Tokenizer} \alias{blankline_tokenizer} \alias{whitespace_tokenizer} \alias{wordpunct_tokenizer} \title{Regexp tokenizers} \description{ Tokenizers using regular expressions to match either tokens or separators between tokens. } \usage{ Regexp_Tokenizer(pattern, invert = FALSE, ..., meta = list()) blankline_tokenizer(s) whitespace_tokenizer(s) wordpunct_tokenizer(s) } \arguments{ \item{pattern}{a character string giving the regular expression to use for matching.} \item{invert}{a logical indicating whether to match separators between tokens.} \item{...}{further arguments to be passed to \code{\link{gregexpr}()}.} \item{meta}{a named or empty list of tokenizer metadata tag-value pairs.} \item{s}{a \code{\link{String}} object, or something coercible to this using \code{\link{as.String}()} (e.g., a character string with appropriate encoding information).} } \details{ \code{Regexp_Tokenizer()} creates regexp span tokenizers which use the given \code{pattern} and \code{...} arguments to match tokens or separators between tokens via \code{\link{gregexpr}()}, and then transform the results of this into character spans of the tokens found. \code{whitespace_tokenizer()} tokenizes by treating any sequence of whitespace characters as a separator. \code{blankline_tokenizer()} tokenizes by treating any sequence of blank lines as a separator. \code{wordpunct_tokenizer()} tokenizes by matching sequences of alphabetic characters and sequences of (non-whitespace) non-alphabetic characters. } \value{ \code{Regexp_Tokenizer()} returns the created regexp span tokenizer. \code{blankline_tokenizer()}, \code{whitespace_tokenizer()} and \code{wordpunct_tokenizer()} return the spans of the tokens found in \code{s}. } \seealso{ \code{\link{Span_Tokenizer}()} for general information on span tokenizer objects. } \examples{ ## A simple text. s <- String(" First sentence. Second sentence. ") ## ****5****0****5****0****5****0****5** spans <- whitespace_tokenizer(s) spans s[spans] spans <- wordpunct_tokenizer(s) spans s[spans] } NLP/man/utils.Rd0000644000175100001440000000113212313516303013115 0ustar hornikusers\name{utils} \alias{next_id} \alias{single_feature} \title{Annotation Utilities} \description{Utilities for creating annotation objects.} \usage{ next_id(id) single_feature(value, tag) } \arguments{ \item{id}{an integer vector of annotation ids.} \item{value}{an \R object.} \item{tag}{a character string.} } \details{ \code{next_id()} obtains the next \dQuote{available} id based on the given annotation ids (one more than the maximal non-missing id). \code{single_feature()} creates a single feature from the given value and tag (i.e., a named list with the value named by the tag). } NLP/man/ngrams.Rd0000644000175100001440000000113513333064622013254 0ustar hornikusers\name{ngrams} \alias{ngrams} \title{Compute N-Grams} \description{ Compute the \eqn{n}-grams (contiguous sub-sequences of length \eqn{n}) of a given sequence. } \arguments{ \item{x}{a sequence (vector).} \item{n}{a positive integer giving the length of contiguous sub-sequences to be computed.} } \value{ a list with the computed sub-sequences. } \examples{ s <- "The quick brown fox jumps over the lazy dog" ## Split into words: w <- strsplit(s, " ", fixed = TRUE)[[1L]] ## Word tri-grams: ngrams(w, 3L) ## Word tri-grams pasted together: vapply(ngrams(w, 3L), paste, "", collapse = " ") } NLP/man/Annotation.Rd0000644000175100001440000001513013357140760014103 0ustar hornikusers\name{Annotation} \alias{Annotation} \alias{as.Annotation} \alias{as.Annotation.Span} \alias{is.Annotation} \alias{[.Annotation} %% \alias{[<-.Annotation} \alias{[[.Annotation} %% \alias{[[<-.Annotation} \alias{$<-.Annotation} \alias{as.data.frame.Annotation} \alias{as.list.Annotation} \alias{c.Annotation} \alias{duplicated.Annotation} \alias{format.Annotation} \alias{length.Annotation} \alias{merge.Annotation} \alias{meta.Annotation} \alias{meta<-.Annotation} \alias{names.Annotation} \alias{print.Annotation} \alias{subset.Annotation} \alias{unique.Annotation} \title{Annotation objects} \description{ Creation and manipulation of annotation objects. } \usage{ Annotation(id = NULL, type = NULL, start, end, features = NULL, meta = list()) as.Annotation(x, ...) \method{as.Annotation}{Span}(x, id = NULL, type = NULL, ...) is.Annotation(x) } \arguments{ \item{id}{an integer vector giving the annotation ids, or \code{NULL} (default) resulting in missing ids.} \item{type}{a character vector giving the annotation types, or \code{NULL} (default) resulting in missing types.} \item{start, end}{integer vectors giving the start and end positions of the character spans the annotations refer to.} \item{features}{a list of (named or empty) feature lists, or \code{NULL} (default), resulting in empty feature lists.} \item{meta}{a named or empty list of annotation metadata tag-value pairs.} \item{x}{an \R object (an object of class \code{"\link{Span}"} for the coercion methods for such objects).} \item{...}{further arguments passed to or from other methods.} } \details{ A single annotation (of natural language text) is a quintuple with \dQuote{slots} \sQuote{id}, \sQuote{type}, \sQuote{start}, \sQuote{end}, and \sQuote{features}. These give, respectively, id and type, the character span the annotation refers to, and a collection of annotation features (tag/value pairs). Annotation objects provide sequences (allowing positional access) of single annotations, together with metadata about these. They have class \code{"Annotation"} and, as they contain character spans, also inherit from class \code{"\link{Span}"}. Span objects can be coerced to annotation objects via \code{as.Annotation()} which allows to specify ids and types (using the default values sets these to missing), and annotation objects can be coerced to span objects using \code{\link{as.Span}()}. The features of a single annotation are represented as named or empty lists. Subscripting annotation objects via \code{[} extracts subsets of annotations; subscripting via \code{$} extracts the sequence of values of the named slot, i.e., an integer vector for \sQuote{id}, \sQuote{start}, and \sQuote{end}, a character vector for \sQuote{type}, and a list of named or empty lists for \sQuote{features}. There are several additional methods for class \code{"Annotation"}: \code{print()} and \code{format()} (which both have a \code{values} argument which if \code{FALSE} suppresses indicating the feature map values); \code{c()} combines annotations (or objects coercible to these using \code{as.Annotation()}); \code{merge()} merges annotations by combining the feature lists of annotations with otherwise identical slots; \code{subset()} allows subsetting by expressions involving the slot names; and \code{as.list()} and \code{as.data.frame()} coerce, respectively, to lists (of single annotation objects) and data frames (with annotations and slots corresponding to rows and columns). \code{Annotation()} creates annotation objects from the given sequences of slot values: those not \code{NULL} must all have the same length (the number of annotations in the object). \code{as.Annotation()} coerces to annotation objects, with a method for span objects. \code{is.Annotation()} tests whether an object inherits from class \code{"Annotation"}. } \value{ For \code{Annotation()} and \code{as.Annotation()}, an annotation object (of class \code{"Annotation"} also inheriting from class \code{"Span"}). For \code{is.Annotation()}, a logical. } \examples{ ## A simple text. s <- String(" First sentence. Second sentence. ") ## ****5****0****5****0****5****0****5** ## Basic sentence and word token annotations for the text. a1s <- Annotation(1 : 2, rep.int("sentence", 2L), c( 3L, 20L), c(17L, 35L)) a1w <- Annotation(3 : 6, rep.int("word", 4L), c( 3L, 9L, 20L, 27L), c( 7L, 16L, 25L, 34L)) ## Use c() to combine these annotations: a1 <- c(a1s, a1w) a1 ## Subscripting via '[': a1[3 : 4] ## Subscripting via '$': a1$type ## Subsetting according to slot values, directly: a1[a1$type == "word"] ## or using subset(): subset(a1, type == "word") ## We can subscript string objects by annotation objects to extract the ## annotated substrings: s[subset(a1, type == "word")] ## We can also subscript by lists of annotation objects: s[annotations_in_spans(subset(a1, type == "word"), subset(a1, type == "sentence"))] ## Suppose we want to add the sentence constituents (the ids of the ## words in the respective sentences) to the features of the sentence ## annotations. The basic computation is lapply(annotations_in_spans(a1[a1$type == "word"], a1[a1$type == "sentence"]), function(a) a$id) ## For annotations, we need lists of feature lists: features <- lapply(annotations_in_spans(a1[a1$type == "word"], a1[a1$type == "sentence"]), function(e) list(constituents = e$id)) ## Could add these directly: a2 <- a1 a2$features[a2$type == "sentence"] <- features a2 ## Note how the print() method summarizes the features. ## We could also write a sentence constituent annotator ## (note that annotators should always have formals 's' and 'a', even ## though for computing the sentence constituents s is not needed): sent_constituent_annotator <- Annotator(function(s, a) { i <- which(a$type == "sentence") features <- lapply(annotations_in_spans(a[a$type == "word"], a[i]), function(e) list(constituents = e$id)) Annotation(a$id[i], a$type[i], a$start[i], a$end[i], features) }) sent_constituent_annotator(s, a1) ## Can use merge() to merge the annotations: a2 <- merge(a1, sent_constituent_annotator(s, a1)) a2 ## Equivalently, could have used a2 <- annotate(s, sent_constituent_annotator, a1) a2 ## which merges automatically. } NLP/man/annotators.Rd0000644000175100001440000002111312520713754014157 0ustar hornikusers\name{annotators} \alias{Simple_Para_Token_Annotator} \alias{Simple_Sent_Token_Annotator} \alias{Simple_Word_Token_Annotator} \alias{Simple_POS_Tag_Annotator} \alias{Simple_Entity_Annotator} \alias{Simple_Chunk_Annotator} \alias{Simple_Stem_Annotator} \alias{Simple annotator generators} \title{Simple annotator generators} \description{ Create annotator objects for composite basic NLP tasks based on functions performing simple basic tasks. } \usage{ Simple_Para_Token_Annotator(f, meta = list(), classes = NULL) Simple_Sent_Token_Annotator(f, meta = list(), classes = NULL) Simple_Word_Token_Annotator(f, meta = list(), classes = NULL) Simple_POS_Tag_Annotator(f, meta = list(), classes = NULL) Simple_Entity_Annotator(f, meta = list(), classes = NULL) Simple_Chunk_Annotator(f, meta = list(), classes = NULL) Simple_Stem_Annotator(f, meta = list(), classes = NULL) } \arguments{ \item{f}{a function performing a \dQuote{simple} basic NLP task (see \bold{Details}).} \item{meta}{an empty or named list of annotator (pipeline) metadata tag-value pairs.} \item{classes}{a character vector or \code{NULL} (default) giving classes to be used for the created annotator object in addition to the default ones (see \bold{Details}).} } \details{ The purpose of these functions is to facilitate the creation of annotators for basic NLP tasks as described below. \code{Simple_Para_Token_Annotator()} creates \dQuote{simple} paragraph token annotators. Argument \code{f} should be a paragraph tokenizer, which takes a string \code{s} with the whole text to be processed, and returns the spans of the paragraphs in \code{s}, or an annotation object with these spans and (possibly) additional features. The generated annotator inherits from the default classes \code{"Simple_Para_Token_Annotator"} and \code{"Annotator"}. It uses the results of the simple paragraph tokenizer to create and return annotations with unique ids and type \sQuote{paragraph}. \code{Simple_Sent_Token_Annotator()} creates \dQuote{simple} sentence token annotators. Argument \code{f} should be a sentence tokenizer, which takes a string \code{s} with the whole text to be processed, and returns the spans of the sentences in \code{s}, or an annotation object with these spans and (possibly) additional features. The generated annotator inherits from the default classes \code{"Simple_Sent_Token_Annotator"} and \code{"Annotator"}. It uses the results of the simple sentence tokenizer to create and return annotations with unique ids and type \sQuote{sentence}, possibly combined with sentence constituent features for already available paragraph annotations. \code{Simple_Word_Token_Annotator()} creates \dQuote{simple} word token annotators. Argument \code{f} should be a simple word tokenizer, which takes a string \code{s} giving a sentence to be processed, and returns the spans of the word tokens in \code{s}, or an annotation object with these spans and (possibly) additional features. The generated annotator inherits from the default classes \code{"Simple_Word_Token_Annotator"} and \code{"Annotator"}. It uses already available sentence token annotations to extract the sentences and obtains the results of the word tokenizer for these. It then adds the sentence character offsets and unique word token ids, and word token constituents features for the sentences, and returns the word token annotations combined with the augmented sentence token annotations. \code{Simple_POS_Tag_Annotator()} creates \dQuote{simple} POS tag annotators. Argument \code{f} should be a simple POS tagger, which takes a character vector giving the word tokens in a sentence, and returns either a character vector with the tags, or a list of feature maps with the tags as \sQuote{POS} feature and possibly other features. The generated annotator inherits from the default classes \code{"Simple_POS_Tag_Annotator"} and \code{"Annotator"}. It uses already available sentence and word token annotations to extract the word tokens for each sentence and obtains the results of the simple POS tagger for these, and returns annotations for the word tokens with the features obtained from the POS tagger. \code{Simple_Entity_Annotator()} creates \dQuote{simple} entity annotators. Argument \code{f} should be a simple entity detector (\dQuote{named entity recognizer}) which takes a character vector giving the word tokens in a sentence, and return an annotation object with the \emph{word} token spans, a \sQuote{kind} feature giving the kind of the entity detected, and possibly other features. The generated annotator inherits from the default classes \code{"Simple_Entity_Annotator"} and \code{"Annotator"}. It uses already available sentence and word token annotations to extract the word tokens for each sentence and obtains the results of the simple entity detector for these, transforms word token spans to character spans and adds unique ids, and returns the combined entity annotations. \code{Simple_Chunk_Annotator()} creates \dQuote{simple} chunk annotators. Argument \code{f} should be a simple chunker, which takes as arguments character vectors giving the word tokens and the corresponding POS tags, and returns either a character vector with the chunk tags, or a list of feature lists with the tags as \sQuote{chunk_tag} feature and possibly other features. The generated annotator inherits from the default classes \code{"Simple_Chunk_Annotator"} and \code{"Annotator"}. It uses already available annotations to extract the word tokens and POS tags for each sentence and obtains the results of the simple chunker for these, and returns word token annotations with the chunk features (only). \code{Simple_Stem_Annotator()} creates \dQuote{simple} stem annotators. Argument \code{f} should be a simple stemmer, which takes as arguments a character vector giving the word tokens, and returns a character vector with the corresponding word stems. The generated annotator inherits from the default classes \code{"Simple_Stem_Annotator"} and \code{"Annotator"}. It uses already available annotations to extract the word tokens, and returns word token annotations with the corresponding stem features (only). In all cases, if the underlying simple processing function returns annotation objects these should not provide their own ids (or use such in the features), as the generated annotators will necessarily provide these (the already available annotations are only available at the annotator level, but not at the simple processing level). } \value{ An annotator object inheriting from the given classes and the default ones. } \seealso{ Package \pkg{openNLP} which provides annotator generators for sentence and word tokens, POS tags, entities and chunks, using processing functions based on the respective Apache OpenNLP MaxEnt processing resources. } \examples{ ## A simple text. s <- String(" First sentence. Second sentence. ") ## ****5****0****5****0****5****0****5** ## A very trivial sentence tokenizer. sent_tokenizer <- function(s) { s <- as.String(s) m <- gregexpr("[^[:space:]][^.]*\\\\.", s)[[1L]] Span(m, m + attr(m, "match.length") - 1L) } ## (Could also use Regexp_Tokenizer() with the above regexp pattern.) sent_tokenizer(s) ## A simple sentence token annotator based on the sentence tokenizer. sent_token_annotator <- Simple_Sent_Token_Annotator(sent_tokenizer) sent_token_annotator a1 <- annotate(s, sent_token_annotator) a1 ## Extract the sentence tokens. s[a1] ## A very trivial word tokenizer. word_tokenizer <- function(s) { s <- as.String(s) ## Remove the last character (should be a period when using ## sentences determined with the trivial sentence tokenizer). s <- substring(s, 1L, nchar(s) - 1L) ## Split on whitespace separators. m <- gregexpr("[^[:space:]]+", s)[[1L]] Span(m, m + attr(m, "match.length") - 1L) } lapply(s[a1], word_tokenizer) ## A simple word token annotator based on the word tokenizer. word_token_annotator <- Simple_Word_Token_Annotator(word_tokenizer) word_token_annotator a2 <- annotate(s, word_token_annotator, a1) a2 ## Extract the word tokens. s[subset(a2, type == "word")] ## A simple word token annotator based on wordpunct_tokenizer(): word_token_annotator <- Simple_Word_Token_Annotator(wordpunct_tokenizer, list(description = "Based on wordpunct_tokenizer().")) word_token_annotator a2 <- annotate(s, word_token_annotator, a1) a2 ## Extract the word tokens. s[subset(a2, type == "word")] } NLP/man/TaggedTextDocument.Rd0000644000175100001440000000661114714616161015535 0ustar hornikusers\name{TaggedTextDocument} \alias{TaggedTextDocument} \title{POS-Tagged Word Text Documents} \description{ Create text documents from files containing POS-tagged words. } \usage{ TaggedTextDocument(con, encoding = "unknown", word_tokenizer = whitespace_tokenizer, sent_tokenizer = Regexp_Tokenizer("\n", invert = TRUE), para_tokenizer = blankline_tokenizer, sep = "/", meta = list()) } \arguments{ \item{con}{a connection object or a character string. See \code{\link{readLines}()} for details. } \item{encoding}{encoding to be assumed for input strings. See \code{\link{readLines}()} for details. } \item{word_tokenizer}{a function for obtaining the word token spans.} \item{sent_tokenizer}{a function for obtaining the sentence token spans.} \item{para_tokenizer}{a function for obtaining the paragraph token spans, or \code{NULL} in which case no paragraph tokenization is performed.} \item{sep}{the character string separating the word tokens and their POS tags.} \item{meta}{a named or empty list of document metadata tag-value pairs.} } \details{ \code{TaggedTextDocument()} creates documents representing natural language text as suitable collections of POS-tagged words, based on using \code{\link{readLines}()} to read text lines from connections providing such collections. The text read is split into paragraph, sentence and tagged word tokens using the span tokenizers specified by arguments \code{para_tokenizer}, \code{sent_tokenizer} and \code{word_tokenizer}. By default, paragraphs are assumed to be separated by blank lines, sentences by newlines and tagged word tokens by whitespace. Finally, word tokens and their POS tags are obtained by splitting the tagged word tokens according to \code{sep}. From this, a suitable representation of the provided collection of POS-tagged words is obtained, and returned as a tagged text document object inheriting from classes \code{"TaggedTextDocument"} and \code{"\link{TextDocument}"}. There are methods for generics \code{\link{words}()}, \code{\link{sents}()}, \code{\link{paras}()}, \code{\link{tagged_words}()}, \code{\link{tagged_sents}()}, and \code{\link{tagged_paras}()} (as well as \code{\link{as.character}()}) and class \code{"TaggedTextDocument"}, which should be used to access the text in such text document objects. The methods for generics \code{\link{tagged_words}()}, \code{\link{tagged_sents}()} and \code{\link{tagged_paras}()} provide a mechanism for mapping POS tags via the \code{map} argument, see section \bold{Details} in the help page for \code{\link{tagged_words}()} for more information. The POS tagset used will be inferred from the \code{POS_tagset} metadata element of the CoNLL-style text document. } \value{ A tagged text document object inheriting from \code{"TaggedTextDocument"} and \code{"\link{TextDocument}"}. } \seealso{ \url{https://www.nltk.org/nltk_data/packages/corpora/brown.zip} which provides the W. N. Francis and H. Kucera Brown tagged word corpus as an archive of files which can be read in using \code{TaggedTextDocument()}. Package \pkg{tm.corpus.Brown} available from the repository at \url{https://datacube.wu.ac.at} conveniently provides this corpus as a \pkg{tm} \link[tm]{VCorpus} of tagged text documents. } NLP/man/tagsets.Rd0000644000175100001440000000401314654102717013442 0ustar hornikusers\name{tagsets} \alias{Penn_Treebank_POS_tags} \alias{Brown_POS_tags} \alias{Universal_POS_tags} \alias{Universal_POS_tags_map} \title{NLP Tag Sets} \description{ Tag sets frequently used in Natural Language Processing. } \usage{ Penn_Treebank_POS_tags Brown_POS_tags Universal_POS_tags Universal_POS_tags_map } \details{ \code{Penn_Treebank_POS_tags} and \code{Brown_POS_tags} provide, respectively, the Penn Treebank POS tags (\url{https://catalog.ldc.upenn.edu/docs/LDC95T7/cl93.html}, Table 2) and the POS tags used for the Brown corpus (\url{https://en.wikipedia.org/wiki/Brown_Corpus}), both as data frames with the following variables: \describe{ \item{entry}{a character vector with the POS tags} \item{description}{a character vector with short descriptions of the tags} \item{examples}{a character vector with examples for the tags} } \code{Universal_POS_tags} provides the universal POS tagset introduced by Slav Petrov, Dipanjan Das, and Ryan McDonald (\doi{10.48550/arXiv.1104.2086}), as a data frame with character variables \code{entry} and \code{description}. \code{Universal_POS_tags_map} is a named list of mappings from language and treebank specific POS tagsets to the universal POS tags, with elements named \samp{en-ptb} and \samp{en-brown} giving the mappings, respectively, for the Penn Treebank and Brown POS tags. } \source{ \url{https://catalog.ldc.upenn.edu/docs/LDC95T7/cl93.html}, % As of 2024-07, \url{http://www.hit.uib.no/icame/brown/bcm.html} no % longer works, so use the download info from Wikipedia. \url{http://www.nltk.org/nltk_data/}, \url{https://github.com/slavpetrov/universal-pos-tags}. } \examples{ ## Penn Treebank POS tags dim(Penn_Treebank_POS_tags) ## Inspect first 20 entries: write.dcf(head(Penn_Treebank_POS_tags, 20L)) ## Brown POS tags dim(Brown_POS_tags) ## Inspect first 20 entries: write.dcf(head(Brown_POS_tags, 20L)) ## Universal POS tags Universal_POS_tags ## Available mappings to universal POS tags names(Universal_POS_tags_map) } NLP/man/Tagged_Token.Rd0000644000175100001440000000433712510736106014326 0ustar hornikusers\name{Tagged_Token} \alias{Tagged_Token} \alias{as.Tagged_Token} \alias{is.Tagged_Token} \alias{[.Tagged_Token} %% \alias{[<-.Tagged_Token} \alias{[[.Tagged_Token} %% \alias{[[<-.Tagged_Token} \alias{$<-.Tagged_Token} \alias{as.data.frame.Tagged_Token} \alias{as.list.Tagged_Token} \alias{c.Tagged_Token} \alias{duplicated.Tagged_Token} \alias{format.Tagged_Token} \alias{length.Tagged_Token} \alias{names.Tagged_Token} \alias{print.Tagged_Token} \alias{unique.Tagged_Token} \title{Tagged_Token objects} \description{ Creation and manipulation of tagged token objects. } \usage{ Tagged_Token(token, tag) as.Tagged_Token(x) is.Tagged_Token(x) } \arguments{ \item{token, tag}{character vectors giving tokens and the corresponding tags.} \item{x}{an \R object.} } \details{ A tagged token is a pair with \dQuote{slots} \sQuote{token} and \sQuote{tag}, giving the token and the corresponding tag. Tagged token objects provide sequences (allowing positional access) of single tagged tokens. They have class \code{"Tagged_Token"}. Subscripting tagged token objects via \code{[} extracts subsets of tagged tokens; subscripting via \code{$} extracts character vectors with the sequence of values of the named slot. There are several additional methods for class \code{"Tagged_Token"}: \code{print()} and \code{format()} (which concatenate tokens and tags separated by \samp{/}); \code{c()} combines tagged token objects (or objects coercible to these using \code{as.Tagged_Token()}), and \code{as.list()} and \code{as.data.frame()} coerce, respectively, to lists (of single tagged token objects) and data frames (with tagged tokens and slots corresponding to rows and columns). \code{Tagged_Token()} creates tagged token objects from the given sequences of tokens and tags, which must have the same length. \code{as.Tagged_Token()} coerces to tagged token objects, with a method for \code{\link{TextDocument}} objects using \code{\link{tagged_words}()}. \code{is.Tagged_Token()} tests whether an object inherits from class \code{"Tagged_Token"}. } \value{ For \code{Tagged_Token()} and \code{as.Tagged_Token()}, a tagged token object (of class \code{"Tagged_Token"}). For \code{is.Tagged_Token()}, a logical. } NLP/man/annotations_in_spans.Rd0000644000175100001440000000214213333064566016222 0ustar hornikusers\name{annotations_in_spans} \alias{annotations_in_spans} \title{Annotations contained in character spans} \description{ Extract annotations contained in character spans. } \usage{ annotations_in_spans(x, y) } \arguments{ \item{x}{an \code{\link{Annotation}} object.} \item{y}{a \code{\link{Span}} object, or something coercible to this (such as an \code{\link{Annotation}} object).} } \value{ A list with elements the annotations in \code{x} with character spans contained in the respective elements of \code{y}. } \examples{ ## A simple text. s <- String(" First sentence. Second sentence. ") ## ****5****0****5****0****5****0****5** ## Basic sentence and word token annotation for the text. a <- c(Annotation(1 : 2, rep.int("sentence", 2L), c( 3L, 20L), c(17L, 35L)), Annotation(3 : 6, rep.int("word", 4L), c( 3L, 9L, 20L, 27L), c( 7L, 16L, 25L, 34L))) ## Annotation for word tokens according to sentence: annotations_in_spans(a[a$type == "word"], a[a$type == "sentence"]) } NLP/man/language.Rd0000644000175100001440000000701414716700067013560 0ustar hornikusers\name{language} \alias{parse_IETF_language_tag} \alias{language} \title{Parse IETF Language Tag} \description{ Extract language, script, region and variant subtags from IETF language tags. } \usage{parse_IETF_language_tag(x, expand = FALSE, strict = TRUE)} \arguments{ \item{x}{a character vector with IETF language tags.} \item{expand}{a logical indicating whether to expand subtags into their description(s).} \item{strict}{a logical indicating whether invalid language tags should result in an error (default) or not.} } \details{ Internet Engineering Task Force (IETF) language tags are defined by \href{https://www.rfc-editor.org/info/bcp47}{IETF BCP 47}, which is currently composed by the normative \href{https://datatracker.ietf.org/doc/html/rfc5646}{RFC 5646} and \href{https://datatracker.ietf.org/doc/html/rfc4647}{RFC 4647}, along with the normative content of the \href{https://www.iana.org/assignments/language-subtag-registry/}{IANA Language Subtag Registry} regulated by these RFCs. These tags are used in a number of modern computing standards. Each language tag is composed of one or more \dQuote{subtags} separated by hyphens. Normal language tags have the following subtags: \itemize{ \item a language subtag (optionally, with language extension subtags), \item an optional script subtag, \item an optional region subtag, \item optional variant subtags, \item optional extension subtags, \item an optional private use subtag. } Language subtags are mainly derived from ISO 639-1 and ISO 639-2, script subtags from ISO 15924, and region subtags from ISO 3166-1 alpha-2 and UN M.49, see package \pkg{ISOcodes} for more information about these standards. Variant subtags are not derived from any standard. The Language Subtag Registry (\url{https://www.iana.org/assignments/language-subtag-registry}), maintained by the Internet Assigned Numbers Authority (IANA), lists the current valid public subtags, as well as the so-called \dQuote{grandfathered} language tags. See \url{https://en.wikipedia.org/wiki/IETF_language_tag} for more information. } \value{ If \code{expand} is false, a list of character vectors of the form \code{"\var{type}=\var{subtag}"}, where \var{type} gives the type of the corresponding subtag (one of \sQuote{Language}, \sQuote{Extlang}, \sQuote{Script}, \sQuote{Region}, \sQuote{Variant}, or \sQuote{Extension}), or \code{"\var{type}=\var{tag}"} with \var{type} either \sQuote{Privateuse} or \sQuote{Grandfathered}. Otherwise, a list of lists of character vectors obtained by replacing the subtags by their corresponding descriptions (which may be multiple) from the IANA registry. Note that no such descriptions for Extension and Privateuse subtags are available in the registry; on the other hand, empty expansions of the other subtags indicate malformed tags (as these subtags must be available in the registry). } \examples{ ## German as used in Switzerland: parse_IETF_language_tag("de-CH") ## Serbian written using Latin script as used in Serbia and Montenegro: parse_IETF_language_tag("sr-Latn-CS") ## Spanish appropriate to the UN Latin American and Caribbean region: parse_IETF_language_tag("es-419") ## All in one: parse_IETF_language_tag(c("de-CH", "sr-Latn-CS", "es-419")) parse_IETF_language_tag(c("de-CH", "sr-Latn-CS", "es-419"), expand = TRUE) ## Two grandfathered tags: parse_IETF_language_tag(c("i-klingon", "zh-min-nan"), expand = TRUE) } \keyword{utilities} NLP/man/Tokenizer.Rd0000644000175100001440000000544413333070646013751 0ustar hornikusers\name{Tokenizer} \alias{Span_Tokenizer} \alias{as.Span_Tokenizer} \alias{is.Span_Tokenizer} \alias{Token_Tokenizer} \alias{as.Token_Tokenizer} \alias{is.Token_Tokenizer} \title{Tokenizer objects} \description{ Create tokenizer objects. } \usage{ Span_Tokenizer(f, meta = list()) as.Span_Tokenizer(x, ...) Token_Tokenizer(f, meta = list()) as.Token_Tokenizer(x, ...) } \arguments{ \item{f}{a tokenizer function taking the string to tokenize as argument, and returning either the tokens (for \code{Token_Tokenizer}) or their spans (for \code{Span_Tokenizer}).} \item{meta}{a named or empty list of tokenizer metadata tag-value pairs.} \item{x}{an \R object.} \item{...}{further arguments passed to or from other methods.} } \details{ Tokenization is the process of breaking a text string up into words, phrases, symbols, or other meaningful elements called tokens. This can be accomplished by returning the sequence of tokens, or the corresponding spans (character start and end positions). We refer to tokenization resources of the respective kinds as \dQuote{token tokenizers} and \dQuote{span tokenizers}. \code{Span_Tokenizer()} and \code{Token_Tokenizer()} return tokenizer objects which are functions with metadata and suitable class information, which in turn can be used for converting between the two kinds using \code{as.Span_Tokenizer()} or \code{as.Token_Tokenizer()}. It is also possible to coerce annotator (pipeline) objects to tokenizer objects, provided that the annotators provide suitable token annotations. By default, word tokens are used; this can be controlled via the \code{type} argument of the coercion methods (e.g., use \code{type = "sentence"} to extract sentence tokens). There are also \code{print()} and \code{format()} methods for tokenizer objects, which use the \code{description} element of the metadata if available. } \seealso{ \code{\link{Regexp_Tokenizer}()} for creating regexp span tokenizers. } \examples{ ## A simple text. s <- String(" First sentence. Second sentence. ") ## ****5****0****5****0****5****0****5** ## Use a pre-built regexp (span) tokenizer: wordpunct_tokenizer wordpunct_tokenizer(s) ## Turn into a token tokenizer: tt <- as.Token_Tokenizer(wordpunct_tokenizer) tt tt(s) ## Of course, in this case we could simply have done s[wordpunct_tokenizer(s)] ## to obtain the tokens from the spans. ## Conversion also works the other way round: package 'tm' provides ## the following token tokenizer function: scan_tokenizer <- function(x) scan(text = as.character(x), what = "character", quote = "", quiet = TRUE) ## Create a token tokenizer from this: tt <- Token_Tokenizer(scan_tokenizer) tt(s) ## Turn into a span tokenizer: st <- as.Span_Tokenizer(tt) st(s) ## Checking tokens from spans: s[st(s)] } NLP/man/annotate.Rd0000644000175100001440000000442313333064776013613 0ustar hornikusers\name{annotate} \alias{annotate} \title{Annotate text strings} \description{ Compute annotations by iteratively calling the given annotators with the given text and current annotations, and merging the newly computed annotations with the current ones. } \usage{ annotate(s, f, a = Annotation()) } \arguments{ \item{s}{a \code{\link{String}} object, or something coercible to this using \code{\link{as.String}} (e.g., a character string with appropriate encoding information).} \item{f}{an \code{\link{Annotator}} or \code{\link{Annotator_Pipeline}} object, or something coercible to the latter via \code{\link{as.Annotator_Pipeline}()} (such as a list of annotator objects).} \item{a}{an \code{\link{Annotation}} object giving the annotations to start with.} } \value{ An \code{\link{Annotation}} object containing the iteratively computed and merged annotations. } \examples{ ## A simple text. s <- String(" First sentence. Second sentence. ") ## ****5****0****5****0****5****0****5** ## A very trivial sentence tokenizer. sent_tokenizer <- function(s) { s <- as.String(s) m <- gregexpr("[^[:space:]][^.]*\\\\.", s)[[1L]] Span(m, m + attr(m, "match.length") - 1L) } ## (Could also use Regexp_Tokenizer() with the above regexp pattern.) ## A simple sentence token annotator based on the sentence tokenizer. sent_token_annotator <- Simple_Sent_Token_Annotator(sent_tokenizer) ## Annotate sentence tokens. a1 <- annotate(s, sent_token_annotator) a1 ## A very trivial word tokenizer. word_tokenizer <- function(s) { s <- as.String(s) ## Remove the last character (should be a period when using ## sentences determined with the trivial sentence tokenizer). s <- substring(s, 1L, nchar(s) - 1L) ## Split on whitespace separators. m <- gregexpr("[^[:space:]]+", s)[[1L]] Span(m, m + attr(m, "match.length") - 1L) } ## A simple word token annotator based on the word tokenizer. word_token_annotator <- Simple_Word_Token_Annotator(word_tokenizer) ## Annotate word tokens using the already available sentence token ## annotations. a2 <- annotate(s, word_token_annotator, a1) a2 ## Can also perform sentence and word token annotations in a pipeline: p <- Annotator_Pipeline(sent_token_annotator, word_token_annotator) annotate(s, p) } NLP/man/Annotator.Rd0000644000175100001440000000634013112000174013720 0ustar hornikusers\name{Annotator} \alias{Annotator} \alias{Annotator_Pipeline} \alias{as.Annotator_Pipeline} \title{Annotator (pipeline) objects} \description{ Create annotator (pipeline) objects. } \usage{ Annotator(f, meta = list(), classes = NULL) Annotator_Pipeline(..., meta = list()) as.Annotator_Pipeline(x) } \arguments{ \item{f}{an annotator function, which must have formals \code{s} and \code{a} giving, respectively, the string with the natural language text to annotate and an annotation object to start from, and return an annotation object with the computed annotations.} \item{meta}{an empty or named list of annotator (pipeline) metadata tag-value pairs.} \item{classes}{a character vector or \code{NULL} (default) giving classes to be used for the created annotator object in addition to \code{"Annotator"}.} \item{...}{annotator objects.} \item{x}{an \R object.} } \details{ \code{Annotator()} checks that the given annotator function has the appropriate formals, and returns an annotator object which inherits from the given classes and \code{"Annotator"}. There are \code{print()} and \code{format()} methods for such objects, which use the \code{description} element of the metadata if available. \code{Annotator_Pipeline()} creates an annotator pipeline object from the given annotator objects. Such pipeline objects can be used by \code{\link{annotate}()} for successively computing and merging annotations, and can also be obtained by coercion with \code{as.Annotator_Pipeline()}, which currently handles annotator objects and lists of such (and of course, annotator pipeline objects). } \value{ For \code{Annotator()}, an annotator object inheriting from the given classes and class \code{"Annotator"}. For \code{Annotator_Pipeline()} and \code{as.Annotator_Pipeline()}, an annotator pipeline object inheriting from class \code{"Annotator_Pipeline"}. } \seealso{ \link{Simple annotator generators} for creating \dQuote{simple} annotator objects based on function performing simple basic NLP tasks. Package \pkg{StanfordCoreNLP} available from the repository at \url{https://datacube.wu.ac.at} which provides generators for annotator pipelines based on the Stanford CoreNLP tools. } \examples{ ## Use blankline_tokenizer() for a simple paragraph token annotator: para_token_annotator <- Annotator(function(s, a = Annotation()) { spans <- blankline_tokenizer(s) n <- length(spans) ## Need n consecutive ids, starting with the next "free" ## one: from <- next_id(a$id) Annotation(seq(from = from, length.out = n), rep.int("paragraph", n), spans$start, spans$end) }, list(description = "A paragraph token annotator based on blankline_tokenizer().")) para_token_annotator ## Alternatively, use Simple_Para_Token_Annotator(). ## A simple text with two paragraphs: s <- String(paste(" First sentence. Second sentence. ", " Second paragraph. ", sep = "\n\n")) a <- annotate(s, para_token_annotator) ## Annotations for paragraph tokens. a ## Extract paragraph tokens. s[a] } NLP/man/features.Rd0000644000175100001440000000310114654100232013571 0ustar hornikusers\name{features} \alias{features} \title{Extract Annotation Features} \description{ Conveniently extract features from annotations and annotated plain text documents. } \usage{ features(x, type = NULL, simplify = TRUE) } \arguments{ \item{x}{an object inheriting from class \code{"Annotation"} or \code{"AnnotatedPlainTextDocument"}.} \item{type}{a character vector of annotation types to be used for selecting annotations, or \code{NULL} (default) to use all annotations. When selecting, the elements of \code{type} will partially be matched against the annotation types.} \item{simplify}{a logical indicating whether to simplify feature values to a vector.} } \details{ \code{features()} conveniently gathers all feature tag-value pairs in the selected annotations into a data frame with variables the values for all tags found (using a \code{NULL} value for tags without a value). In general, variables will be \emph{lists} of extracted values. By default, variables where all elements are length one atomic vectors are simplified into an atomic vector of values. The values for specific tags can be extracted by suitably subscripting the obtained data frame. } \examples{ ## Use a pre-built annotated plain text document, ## see ? AnnotatedPlainTextDocument. d <- readRDS(system.file("texts", "stanford.rds", package = "NLP")) ## Extract features of all *word* annotations in doc: x <- features(d, "word") ## Could also have abbreviated "word" to "w". x ## Only lemmas: x$lemma ## Words together with lemmas: paste(words(d), x$lemma, sep = "/") } NLP/man/AnnotatedPlainTextDocument.Rd0000644000175100001440000000673214654100175017243 0ustar hornikusers\name{AnnotatedPlainTextDocument} \alias{AnnotatedPlainTextDocument} \alias{annotation} \title{Annotated Plain Text Documents} \description{ Create annotated plain text documents from plain text and collections of annotations for this text. } \usage{ AnnotatedPlainTextDocument(s, a, meta = list()) annotation(x) } \arguments{ \item{s}{a \code{\link{String}} object, or something coercible to this using \code{\link{as.String}()} (e.g., a character string with appropriate encoding information).} \item{a}{an \code{\link{Annotation}} object with annotations for \code{s}.} \item{meta}{a named or empty list of document metadata tag-value pairs.} \item{x}{an object inheriting from class \code{"AnnotatedPlainTextDocument"}.} } \details{ Annotated plain text documents combine plain text with annotations for the text. A typical workflow is to use \code{\link{annotate}()} with suitable annotator pipelines to obtain the annotations, and then use \code{AnnotatedPlainTextDocument()} to combine these with the text being annotated. This yields an object inheriting from \code{"AnnotatedPlainTextDocument"} and \code{"\link{TextDocument}"}, from which the text and annotations can be obtained using, respectively, \code{\link{as.character}()} and \code{annotation()}. There are methods for class \code{"AnnotatedPlainTextDocument"} and generics \code{\link{words}()}, \code{\link{sents}()}, \code{\link{paras}()}, \code{\link{tagged_words}()}, \code{\link{tagged_sents}()}, \code{\link{tagged_paras}()}, \code{\link{chunked_sents}()}, \code{\link{parsed_sents}()} and \code{\link{parsed_paras}()} providing structured views of the text in such documents. These all require the necessary annotations to be available in the annotation object used. The methods for generics \code{\link{tagged_words}()}, \code{\link{tagged_sents}()} and \code{\link{tagged_paras}()} provide a mechanism for mapping POS tags via the \code{map} argument, see section \bold{Details} in the help page for \code{\link{tagged_words}()} for more information. The POS tagset used will be inferred from the \code{POS_tagset} metadata element of the annotation object used. } \value{ For \code{AnnotatedPlainTextDocument()}, an annotated plain text document object inheriting from \code{"AnnotatedPlainTextTextDocument"} and \code{"\link{TextDocument}"}. For \code{annotation()}, an \code{\link{Annotation}} object. } \seealso{ \code{\link{TextDocument}} for basic information on the text document infrastructure employed by package \pkg{NLP}. } \examples{ ## Use a pre-built annotated plain text document obtained by employing an ## annotator pipeline from package 'StanfordCoreNLP', available from the ## repository at , using the following code: ## require("StanfordCoreNLP") ## s <- paste("Stanford University is located in California.", ## "It is a great university.") ## p <- StanfordCoreNLP_Pipeline(c("pos", "lemma", "parse")) ## d <- AnnotatedPlainTextDocument(s, p(s)) d <- readRDS(system.file("texts", "stanford.rds", package = "NLP")) d ## Extract available annotation: a <- annotation(d) a ## Structured views: sents(d) tagged_sents(d) tagged_sents(d, map = Universal_POS_tags_map) parsed_sents(d) ## Add (trivial) paragraph annotation: s <- as.character(d) a <- annotate(s, Simple_Para_Token_Annotator(blankline_tokenizer), a) d <- AnnotatedPlainTextDocument(s, a) ## Structured view: paras(d) } NLP/DESCRIPTION0000644000175100001440000000122714717314035012436 0ustar hornikusersPackage: NLP Version: 0.3-2 Title: Natural Language Processing Infrastructure Authors@R: person("Kurt", "Hornik", role = c("aut", "cre"), email = "Kurt.Hornik@R-project.org", comment = c(ORCID = "0000-0003-4198-9911")) Description: Basic classes and methods for Natural Language Processing. License: GPL-3 Imports: utils Depends: R (>= 3.5.0) Enhances: udpipe, spacyr, cleanNLP Encoding: UTF-8 NeedsCompilation: no Packaged: 2024-11-20 07:53:13 UTC; hornik Author: Kurt Hornik [aut, cre] () Maintainer: Kurt Hornik Repository: CRAN Date/Publication: 2024-11-20 08:04:45 UTC