blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
327
content_id
stringlengths
40
40
detected_licenses
listlengths
0
91
license_type
stringclasses
2 values
repo_name
stringlengths
5
134
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
46 values
visit_date
timestamp[us]date
2016-08-02 22:44:29
2023-09-06 08:39:28
revision_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
committer_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
github_id
int64
19.4k
671M
star_events_count
int64
0
40k
fork_events_count
int64
0
32.4k
gha_license_id
stringclasses
14 values
gha_event_created_at
timestamp[us]date
2012-06-21 16:39:19
2023-09-14 21:52:42
gha_created_at
timestamp[us]date
2008-05-25 01:21:32
2023-06-28 13:19:12
gha_language
stringclasses
60 values
src_encoding
stringclasses
24 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
7
9.18M
extension
stringclasses
20 values
filename
stringlengths
1
141
content
stringlengths
7
9.18M
f72982c7600c2575ff701726b0318a85f4e912a0
1f27ad535035612c2731e95cbca8e86b43e043c1
/R/inla.climate.mu.R
62a0d0f39f95d29aba8e32bed3804d6b83c82f4f
[]
no_license
eirikmn/INLA.climate
8417e699d97b24b4d408174c03b8a6896e30d9ee
c467e6e2f9fd2e4341cbc1bf15b9629b82eb7089
refs/heads/master
2023-02-08T15:06:14.385088
2023-01-27T09:52:09
2023-01-27T09:52:09
168,170,133
2
1
null
null
null
null
UTF-8
R
false
false
5,780
r
inla.climate.mu.R
inla.climate.mu = function(result,forcing,quick=FALSE,T0.corr=NULL,nsamples=100000,seed=1234, print.progress=FALSE,model="fgn"){ catch = tryCatch(attachNamespace("INLA"),error=function(x){}) if(length(find.package("INLA",quiet=TRUE))==0){ stop("This function requires INLA. Please install at www.R-INLA.org or by calling 'install.packages(\"INLA\", repos=c(getOption(\"repos\"), INLA=\"https://inla.r-inla-download.org/R/testing\"), dep=TRUE)' from R.") } if(class(result)=="inla.climate"){ climate.res = result$inla.result if(is.null(T0.corr)){ if(!is.null(result$misc$T0)){ T0.corr = result$misc$T0 }else{ T0.corr=0 } } }else if(class(result)=="inla"){ climate.res = result if(is.null(T0.corr)){ if(!is.null(result$climate.misc$T0)){ T0.corr = result$climate.misc$T0 }else{ T0.corr=0 } } }else{ stop("Input 'result' not a valid class.") } if(print.progress){ cat("Starting mu Monte Carlo sampling with n=",format(nsamples,scientific=F)," simulations..\n",sep="") } set.seed(seed) inla.seed = as.integer(runif(1)*.Machine$integer.max) #n=climate.res$misc$configs$contents$length[1] n=length(forcing) #x = inla.posterior.sample(nsamples,r,seed=inla.seed) #int.strategy=grid x = INLA::inla.hyperpar.sample(nsamples,climate.res) if(dim(x)[2]>4){ model = "ar1" } if(model %in% c("fgn","arfima")){ hyperpars = matrix(NA,nrow=nsamples,ncol=4) #c(H,sf,F0,TCR) hyperpars[,1] = 0.5+0.5/(1+exp(-x[,2])) hyperpars[,2] = 1/sqrt(exp(x[,3])) a=3 hyperpars[,3] = x[,4] #hyperpars[,3] = -a+2*a/(1+exp(-x[,4])) }else{ hyperpars = matrix(NA,nrow=nsamples,ncol=(2)) #c(H,sf,F0,w1,...,wm,L1,...,Lm) hyperpars[,1] = 1/sqrt(exp(x[,2])) a=3 #hyperpars[,2] = -a+2*a/(1+exp(-x[,3])) hyperpars[,2] = x[,3] m = (dim(x)[2]-2)/2 ar1.temp= inla.climate.ar1(climate.res,m=m,nsamples=nsamples,seed=seed,print.progress=print.progress) ww = matrix(NA,nrow=nsamples,ncol=m) LL = matrix(NA,nrow=nsamples,ncol=m) if(m == 1){ ww = rep(1,nsamples) LL = INLA::inla.rmarginal(nsamples,ar1.temp$p$density)-1 #lambda }else{ for(k in 1:m){ ww[,k] = ar1.temp[[paste0("w",k)]]$samples LL[,k] = ar1.temp[[paste0("p",k)]]$samples-1 #lambda } } } tid.start = proc.time()[[3]] #if(!is.loaded('Rc_mu')){ #print('hallo') # dyn.load(file.path(.Library,"INLA.climate/libs/Rc_mu.so")) #dyn.load('./src/colmeansr.so') #} if(!quick){ mu.samples=matrix(NA,ncol=n,nrow=nsamples) } meansmc=numeric(n) xsumvec = numeric(n) x2sumvec =numeric(n) for(iter in 1:nsamples){ if(model %in% c("fgn","arfima")){ res = .C('Rc_mu',mumeans=as.matrix(meansmc,ncol=1),as.double(forcing),as.integer(length(forcing)), as.double(hyperpars[iter,1]),as.double(hyperpars[iter,2]),as.double(hyperpars[iter,3])) }else if(model == "ar1"){ if(!is.loaded('Rc_mu_ar1')){ #dyn.load(file.path(.Library,"INLA.climate/libs/Rc_Q.so")) dyn.load(file.path("Rc_mu_ar1.so")) } if(m == 1){ res = .C('Rc_mu_ar1',mumeans=as.matrix(meansmc,ncol=1),as.double(forcing),as.integer(length(forcing)),as.integer(m), as.double(1),as.double(LL[iter]),as.double(hyperpars[iter,1]), as.double(hyperpars[iter,2])) }else{ res = .C('Rc_mu_ar1',mumeans=as.matrix(meansmc,ncol=1),as.double(forcing),as.integer(length(forcing)),as.integer(m), as.double(ww[iter,]),as.double(LL[iter,]),as.double(hyperpars[iter,1]), as.double(hyperpars[iter,2])) } } if(!quick){ mu.samples[iter,]=res$mumeans } xsumvec = xsumvec + res$mumeans x2sumvec = x2sumvec + res$mumeans^2 } mu.mean = as.numeric(xsumvec/nsamples) mu.sd=as.numeric(sqrt( 1/(nsamples-1)*( x2sumvec -2*mu.mean*xsumvec + nsamples*mu.mean^2 ) )) if(!quick){ mu.quant0.025 = numeric(n) mu.quant0.5 = numeric(n) mu.quant0.975 = numeric(n) for(iter in 1:n){ dens = density(mu.samples[,iter]) mu.quant0.025[iter]=INLA::inla.qmarginal(0.025,dens) mu.quant0.5[iter]=INLA::inla.qmarginal(0.5,dens) mu.quant0.975[iter]=INLA::inla.qmarginal(0.975,dens) } } tid.slutt = proc.time()[[3]] tid.mc=tid.slutt-tid.start if(print.progress){ cat("Finished mu Monte Carlo sampling procedure in ",tid.mc," seconds\n",sep="") } ret = list(mean=mu.mean+T0.corr, sd = mu.sd) if(!quick){ ret$quant0.025=mu.quant0.025+T0.corr ret$quant0.5=mu.quant0.5+T0.corr ret$quant0.975=mu.quant0.975+T0.corr if(model %in% c("fgn","arfima")){ ret$samples=list(mu=mu.samples+T0.corr, H=hyperpars[,1],sigmaf=hyperpars[,2],F0=hyperpars[,3]) }else if(model == "ar1"){ ret$samples=list(mu=mu.samples+T0.corr, sigmaf=hyperpars[,1],F0=hyperpars[,2]) if(m==1){ ret$samples$p = LL+1 }else{ for(k in 1:m){ ret$samples[[paste0("w",k)]] = ww[,k] } for(k in 1:m){ ret$samples[[paste0("p",k)]] = LL[,k]+1 } } } } if(class(result) == "inla.climate"){ if(print.progress){ print("Exporting inla.climate object") } result$mu = ret result$time$mu = tid.mc result$time$Total = result$time$Total + tid.mc result$misc$mu.options$nsamples = nsamples result$misc$mu.options$seed = seed if(quick){ compute.mu = 2 }else{ compute.mu=1 } result$misc$mu.options$compute.mu = compute.mu return(result) }else{ if(print.progress){ print("Exporting list object") } ret$time = tid.mc return(ret) } }
29fdf7e79eaf0a213695cda48b17c7f8889fd820
dc681828c7f3845280e7585574ed376d56b77138
/CAVIAR/ecaviar_eqtl.R
868a4217ce826ff8ae3867464eb7a32ca28187bc
[]
no_license
egeoffroy/Advanced_BIOI_Final
111d2f1185c7a22deced76322beb06aa3d675993
a0c57391744b1e442a77bbd9c8714c49261f104c
refs/heads/master
2021-04-11T17:31:50.819852
2020-04-07T15:51:40
2020-04-07T15:51:40
249,040,864
1
0
null
null
null
null
UTF-8
R
false
false
617
r
ecaviar_eqtl.R
library(dplyr) library(data.table) eqtl <- fread('coloc/Sig_Height_eQTL_YRI_Height.txt.gz', header = T, stringsAsFactors=F) eqtl <- unique(eqtl) for(i in c(1:22)){ snps_1 <- fread(paste('snps_', i, '.txt', sep = ''), header = F, stringsAsFactors=F) eqtl1 <- eqtl %>% filter(eqtl$variant_id %in% snps_1$V1) eqtl1$Z <- as.numeric(eqtl1$slope)/as.numeric(eqtl1$slope_se) eqtl2 <- data.frame(eqtl1$variant_id, eqtl1$Z) eqtl2 <- unique(eqtl2) eqtl2 <- eqtl2[!duplicated(eqtl2$eqtl1.variant_id),] write.table(eqtl2, paste('caviar/eqtl_chr', i '_one_val.txt', sep =''), quote = F, row.names=F, col.names=F) }
664b83c9e5f1fc72348f09305748796f68dbf7bd
e45c6f36a065b6a44e873a773428105de4d3758e
/r-package/R/metadata.R
59eebcd5b5f53aaf5f0e076c857b529d9e04c267
[ "MIT" ]
permissive
basedosdados/mais
080cef1de14376699ef65ba71297e40784410f12
2836c8cfad11c27191f7a8aca5ca26b94808c1da
refs/heads/master
2023-09-05T20:55:27.351309
2023-09-02T03:21:02
2023-09-02T03:21:02
294,702,369
376
98
MIT
2023-08-30T21:17:28
2020-09-11T13:26:45
SQL
UTF-8
R
false
false
4,508
r
metadata.R
#' Search for a dataset by keyword #' @param search_term keyword for search #' #' @return A tibble with search results #' #' @importFrom purrr map_chr pluck #' @importFrom stringr str_replace_all #' @importFrom rlang .data #' #' @export #' @examples #' #' \dontrun{ #' #' dataset_search("agua") #' dataset_search("educação") #' #'} #' #' dataset_search <- function(search_term) { bd_request( endpoint = "dataset_search", query = list( resource_type = "bdm_table", q = search_term, page_size = 100)) -> search tibble::tibble( dataset_name = purrr::map_chr( .x = search$datasets, .f = ~ purrr::pluck(.x, "name") %>% stringr::str_replace_all("-", "_")), dataset_tables = purrr::map( .x = .data$dataset_name, .f = basedosdados::list_dataset_tables), url = purrr::map_chr( .x = search$datasets, .f = ~ glue::glue("https://basedosdados.org/dataset/{purrr::pluck(.x, 'id')}")), title = purrr::map_chr( .x = search$datasets, .f = ~ purrr::pluck(.x, "title"))) } #' List tables in a dataset #' @param dataset_id a dataset name e.g. if addressing table "br_sp_alesp.deputado" then table_id is `br_sp_alesp` #' @export #' @importFrom purrr pluck map_chr discard #' @importFrom dplyr bind_rows #' @return A tibble listing all tables in a given dataset #' @examples #' \dontrun{ #' list_dataset_tables("br_sp_alesp") #' } list_dataset_tables <- function(dataset_id) { bd_request( endpoint = "bdm_dataset_show", query = list( dataset_id = dataset_id)) -> results fetch_function <- purrr::possibly( .f = function(resource) { tibble::tibble( name = ifelse(rlang::is_null(resource$name), NA_character_, resource$name), description = ifelse(rlang::is_null(resource$description), NA_character_, resource$description)) }, otherwise = "Error") results %>% purrr::pluck("resources") %>% purrr::keep(~ .x$resource_type == "bdm_table") %>% purrr::map(fetch_function) %>% purrr::reduce(dplyr::bind_rows) } #' Get columns in a table #' @param dataset_id a dataset name e.g. if addressing table "br_sp_alesp.deputado" then table_id is `br_sp_alesp` #' @param table_id a table name e.g. if addressing table "br_sp_alesp.deputado" then table_id is `deputado` #' #' @export #' @examples #' \dontrun{ #' get_table_columns("br_sp_alesp", "deputado") #' } #' @importFrom httr content #' @importFrom purrr pluck map reduce #' @importFrom dplyr bind_rows #' @return A tibble describing all columns in a table get_table_columns <- function( dataset_id, table_id) { bd_request( endpoint = "bdm_table_show", query = list( table_id = table_id, dataset_id = dataset_id)) %>% purrr::pluck("columns") %>% purrr::map(tibble::as_tibble) %>% purrr::reduce(dplyr::bind_rows) %>% dplyr::select(- c(.data$is_in_staging, .data$is_partition)) } #' Describe a dataset #' @param dataset_id a dataset name e.g. if addressing table "br_sp_alesp.deputado" then table_id is `br_sp_alesp` #' #' @export #' @examples #' #' \dontrun{ #' #' get_dataset_description("br_sp_alesp") #' } #' @return A tibble describing the specified dataset get_dataset_description <- function(dataset_id) { bd_request( endpoint = "bdm_dataset_show", query = list( dataset_id = dataset_id)) -> result tibble::tibble( name = result$name, title = result$title, tables = list(list_dataset_tables(dataset_id)), notes = result$notes) } #' Describe a table within a dataset #' #' @param dataset_id a dataset name e.g. if addressing table "br_sp_alesp.deputado" then table_id is `br_sp_alesp` #' @param table_id a table name e.g. if addressing table "br_sp_alesp.deputado" then table_id is `deputado` #' @export #' @examples #' \dontrun{ #' get_table_description("br_sp_alesp", "deputado") #' } #' @return A tibble describing the specified table #' get_table_description <- function( dataset_id = ? typed::Character(1), table_id = ? typed::Character(1)) { bd_request( endpoint = "bdm_table_show", query = list( dataset_id = dataset_id, table_id = table_id)) -> result tibble::tibble( dataset_id = dataset_id, table_id = table_id, description = result$description, columns = result %>% purrr::pluck("columns") %>% purrr::map(tibble::as_tibble) %>% purrr::reduce(dplyr::bind_rows) %>% list()) }
e61ba698430e0ccb002ad62814b43e20db085d74
9a9bdabe6174b493c712e3a9d3a9a00b3ec1027e
/man/make_dada2_fasta_name.Rd
b391ba1ca0ff4a637fd129a6597a889acfc9d278
[ "LicenseRef-scancode-warranty-disclaimer" ]
no_license
griffinp/metabarcodedb
51383f41880280b060affde8b9203bdc6fe82bd3
a42f16b59d7575051a2a7d060428e41b008cde6f
refs/heads/master
2021-01-01T15:37:09.011794
2017-07-21T02:44:06
2017-07-21T02:44:06
97,654,579
1
1
null
null
null
null
UTF-8
R
false
true
732
rd
make_dada2_fasta_name.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/file_naming_functions.R \name{make_dada2_fasta_name} \alias{make_dada2_fasta_name} \title{make_dada2_fasta_name} \usage{ make_dada2_fasta_name(taxon, directory, suffix = "_dada2.fasta") } \arguments{ \item{taxon}{taxon name (character string)} \item{directory}{path to the directory containing the fasta file (character string)} \item{suffix}{desired suffix for the file. Defaults to "_dada2.fasta"} } \description{ This function creates the file path for the dada2-formatted fasta file (fasta format) } \examples{ make_mapping_name(taxon="Chlorophyta", directory="~/Documents/metabarcoding/ref_datafiles/", suffix="_dada2.fasta") } \keyword{filename}
6211daaaeb6b8bcc51272211c8b5dbc20b6ed60d
c2a3647efa1093e8a8d72b8ec65f2ead24060336
/R/TargetExperimentList-initialize.R
b201f049c6d7d7cbfd26933a055421e2c8973fcb
[]
no_license
gamerino/TarSeqQC
2363d1ffe1dadcc6f14e552840f90fd79b4a1a30
ebdf8ca544f5d5073966cf0a16c912ceeac7202b
refs/heads/master
2021-07-15T03:28:58.735473
2020-01-31T11:24:13
2020-01-31T11:24:13
101,313,918
1
0
null
2017-08-24T16:06:36
2017-08-24T16:06:36
null
UTF-8
R
false
false
5,052
r
TargetExperimentList-initialize.R
#'TargetExperimentList object constructor. #' #'\code{initialize} creates the TargetExperimentList object containing the #'experiment results of several targeted sequencing experiments carried out #'using a unique bed file. #' #'@param .Object TargetExperimentList class. #'@param TEList List containing all the TargetExperiment objects corresponding #'to the experiments that will be compared. #'@param feature Character indicating the name of the feature that will #'be explored (e.g 'amplicon', 'exon', 'gene'). #'@param attribute Character indicating the name of the attribute that will #'be explored. Should be 'coverage' or 'medianCounts'. #'@return TargetExperimentList object. #' #'@include TargetExperimentList-TEList.R #'@exportMethod initialize #'@docType methods #'@rdname TargetExperimentList-initialize #'@import Rsamtools #'@import GenomicRanges #'@importFrom stats IQR #'@importFrom stats aggregate #'@importFrom stats median #'@importFrom stats sd #'@importMethodsFrom BiocGenerics strand #'@importClassesFrom S4Vectors Rle #'@importMethodsFrom S4Vectors Rle #'@importClassesFrom IRanges IRanges #'@importFrom IRanges IRanges #'@aliases TargetExperimentList-method #'@seealso \code{\link{TargetExperimentList}} #'@note see full example in \code{\link{TargetExperimentList-class}} #'@family TargetExperimentList #'@author Gabriela A. Merino \email{gmerino@@bdmg.com.ar}, Cristobal Fresno #'\email{cfresno@@bdmg.com.ar}, Yanina Murua \email{ymurua@leloir.org.ar}, #'Andrea S. Llera \email{allera@leloir.org.ar} and Elmer A. Fernandez #'\email{efernandez@bdmg.com.ar} #'@examples #'# Defining the set of TargetExperiment objects #'data(ampliPanel, package="TarSeqQC") #'data(ampliPanel2, package="TarSeqQC") #'ampliList<-list(ampliPanel, ampliPanel2) #'# Defining feature parameter #'feature<-"amplicon" #'# Defining attribute parameter #'attribute<-"coverage" #'##Calling the constructor #'object<-TargetExperimentList(TEList=ampliList, attribute=attribute, #'feature=feature) #' setMethod(f="initialize", signature=signature(.Object="TargetExperimentList"), definition=function(.Object, TEList,feature=NULL, attribute="coverage"){ ##Set the different slots if(nargs() >=2){ if(is.null(names(TEList))){ names(TEList)<-paste("subject", 1:length(TEList), sep="_") } if (length(TEList) <2){ stop("The TEList should contain at least two TargetExperiment objects") } # bedFile slot bed<-getBedFile(TEList[[1]]) bed_df<-as.data.frame(bed) for (i in 2:length(TEList)){ bed_i<-getBedFile(TEList[[i]]) if(any(bed != bed_i)){ stop("The TargetExperiments should have the same bed file") } } if(!(all(c("seqnames", "start","end","gene") %in% names(bed_df)))){ stop("Error! Bed file should contain at least five columns: 'seqnames', 'start', 'end' and 'gene'") } if(any(duplicated(rownames(bed_df)))){ stop("Error! Bed file should one row per feature. The provided file has duplicated 'name' IDs.") } if (any(duplicated(bed_df[,"start"]) & duplicated(bed_df[,"end"]))){ warning("Your bed file have duplicated features. Removing duplicated ...") index<-which(duplicated(bed_df[,"start"])& duplicated(bed_df[,"end"])) bed_df<-bed_df[-index,] } .Object@bedFile<-bed # panels slot GRpanel<-as.data.frame(getFeaturePanel(TEList[[1]])) panel<-GRpanel[,!(colnames(GRpanel) %in% c("coverage", "sdCoverage", "medianCounts", "IQRCounts"))] panelNames<-colnames(panel) attribute<-getAttribute(TEList[[1]]) if(!(attribute %in% c("coverage", "medianCounts"))){ stop("Attribute slot should be defined in order to call the function") } for (i in 1:length(TEList)){ panel<-cbind(panel, mcols(getFeaturePanel(TEList[[i]]))[, attribute]) } # colnames(panel)<-c(panelNames, paste(attribute,"subject", # 1:length(TEList), sep="_")) colnames(panel)<-c(panelNames, paste(attribute,names(TEList), sep="_")) finalPanel<-GRanges(seqnames=panel[,"seqnames"], ranges=IRanges(start= panel[,"start"], end=panel[,"end"], names=rownames(panel)), strand=panel[,"strand"]) mcols(finalPanel)<-panel[,!(names(panel) %in% c("seqnames", "start", "end", "strand", "width")), drop=FALSE] .Object@panels <-finalPanel # feature slot .Object@feature<-getFeature(TEList[[1]]) # attribute slot .Object@attribute<-attribute }else{ .Object@bedFile<-GRanges() .Object@panels<-GRanges() .Object@feature<-character(0) .Object@attribute<-character(0) } ##Check the object's validity validObject(.Object) return(.Object) })
914df6163bf8b22153399f3df8779981089b6d45
1313b6a881934a32be3bc23ca950da1571373ebd
/Twiitter Activites - Share Price/TimeSeries Analysis.R
7382718ed5270a216ae713bef719ee16b5028e28
[ "MIT" ]
permissive
PKarhade/Projects-ML-DL
1690d789de061d79cabb62e551b4c997b8554188
06c71efcaa570925a4aa8c070a9918a31447f730
refs/heads/master
2020-04-22T01:02:11.288272
2019-04-22T05:40:07
2019-04-22T05:40:07
170,001,198
0
0
null
null
null
null
UTF-8
R
false
false
1,452
r
TimeSeries Analysis.R
library(xts) library(tseries) library(TSA) library(forecast) data1 <- read.zoo(file="C:\\Users\\Srikanth\\Desktop\\old\\Datamining\\MSFT.csv",header=TRUE,format="%m/%d/%Y",sep=","); index(data1) = as.yearmon(index(data1)) msft = ts(data1$Close,start=start(data1), end=end(data1),frequency = 12) time_series = msft #plot the data, assuming time_series is a ts object plot(time_series ,col="black", lwd=3) plot(decompose(time_series)) #create the ARIMA models (see lecture notes) arima_fit <- Arima(time_series, order= c(1,1,1), seasonal = c(0,0,0)) auto_arima_fit <- auto.arima(time_series, stepwise=FALSE, seasonal=FALSE, approximation=FALSE, D=1) ets_fit <- ets(time_series) #We can force a seasonal ets by explictly specifying the model type as shown below #ets_fit <- ets(time_series, model="ZZM") #plot the models. red is the hard-coded arima model, blue is the auto-fit arima model, and green is the exponential smoothing model plot(arima_fit$x,col="black", lwd=3) lines(fitted(arima_fit),col="red") lines(fitted(auto_arima_fit),col="blue") lines(fitted(ets_fit),col="green") #create the acf plots to determine the level of differencing acf(time_series) #create the differenced time series to account for the seasonal pattern diff_time_series <- diff(time_series, 4) #create the acf and pacf plots to determine the order of the AR and MA terms acf(diff_time_series) pacf(diff_time_series) #error analysis of the models summary(arima_fit)
edfd3ede40496dd2c42efb0a50d24e8ba44067e8
4d391985cb3d99eb4e0d88f777bf70c4dd8d1327
/06_0_Load_or_compute_species_files.r
e19d507ff5aeb079749e4c85553468c6fa81872e
[]
no_license
naiamh/Predictor_decomposition
f03c1f21967b0bc21dd09b03398e6198957f0748
5f937cd425caeca073aabc0f5d8b322d418b1e54
refs/heads/master
2020-12-24T18:03:50.140827
2015-06-12T23:30:18
2015-06-12T23:30:18
37,349,456
0
0
null
null
null
null
UTF-8
R
false
false
2,952
r
06_0_Load_or_compute_species_files.r
# Scripts to load in species specific data for plotting trailing and leading # edges. # If files are missing, relevant models and projections are run # The script requires that directories have been defined already, as well as species name # Load occurrence data. Create if it doesn't exist. occurData = paste(sp.data.dir,"/", mySpecies, ".rdata", sep="") if(!file.exists(occurData)) { print(paste("No occurrence data. Extracting from Ecoengine species...",mySpecies)) getOccur(mySpecies=mySpecies, db="Ecoengine", out.dir=sp.data.dir, in.project=orig.project, out.project=ta.project, save=T) } occur = readRDS(occurData) rm(occurData) # Load maxent model. Run model if it doesn't exist. mx.obj = paste(out.dir, mySpecies, mxModelType,'ModelObject', sep="/") if(!file.exists(mx.obj)) { print(paste("No maxent model found. Computing for...", mxModelType)) env.files <- list.files(path=pres.clim.data.dir, pattern='CA.img', full.names=FALSE) env.files = env.files[-grep("xml",env.files)] runMxModel(mySpecies, mxModelType, env.files, pres.clim.data.dir, sp.data.dir, out.dir) rm(env.files) } mx = readRDS(mx.obj) rm(mx.obj) # Load present projection for Bay Area. Project if it doesn't exist. proj.dir = paste(out.dir, mySpecies, mxModelType, "Projections", sep="/") pres.pred = paste(proj.dir, "Present_Bay.img", sep="/") if(!file.exists(pres.pred)) { print(paste("No present projection. Getting climate data...")) pres.clim = getClim(env.dir=pres.clim.data.dir, region="Bay", period="HST",mxModelType=mxModelType) print("Projecting...") names(pres.clim) = paste(names(pres.clim),"1981_2010_ave_HST_CA",sep="") #match names for mx object if(!file.exists(proj.dir)) {dir.create(proj.dir, recursive=F)} predict(mx, pres.clim, filename=paste(proj.dir, "Present_Bay.img", sep="/"), overwrite=F) rm(pres.clim) } present.bay = raster(pres.pred) rm(pres.pred) # Load future projections (all scenarios) for Bay Area. Project if they don't exist. fut.preds = paste(proj.dir, "/", allScenarios, "_Suitability_Bay.img",sep="") if(!file.exists(fut.preds[1])){ for (i in 1:length(allScenarios)) { myScenario = allScenarios[i] future.clim = getClim(env.dir=fut.clim.data.dir, period="Future", mxModelType=mxModelType, scen=myScenario) names(future.clim) = paste(names(future.clim),"1981_2010_ave_HST_CA",sep="") #match names for mx object print(paste("Projecting...", myScenario)) if(!file.exists(proj.dir)) {dir.create(proj.dir, recursive=F)} predict(mx, future.clim, file=paste(proj.dir, "/", myScenario, "_Suitability_Bay.img", sep=""), overwrite=F) rm(future.clim,myScenario) } } futures.bay = stack(fut.preds) rm(fut.preds) # Set directory to write figures to. fig.dir = paste(out.dir, mySpecies, "Figures", sep="/") if(file.exists(fig.dir) == F) {dir.create(fig.dir, recursive=F)}
ce3a1fb61bf479964f9a075f82d8e3c78a9127e4
eab78e955aaee69c14d206c8e5bd76cf246a2372
/man/render.Rd
68fef8a199e5ffd1b152863250608f081f917370
[ "MIT" ]
permissive
rstudio/connectapi
3ad96af4e7935035658bf09aa8082cae4c68ffb5
427ac1fe2eb72d45f6048c376ec94b6c545faf8d
refs/heads/main
2023-08-08T11:55:20.698746
2023-07-10T16:36:27
2023-07-10T16:36:27
167,249,814
34
17
NOASSERTION
2023-08-03T13:26:37
2019-01-23T20:32:57
R
UTF-8
R
false
true
843
rd
render.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/variant.R \name{get_variant_renderings} \alias{get_variant_renderings} \alias{variant_render} \title{Render a Variant} \usage{ get_variant_renderings(variant) variant_render(variant) } \arguments{ \item{variant}{An R6 Variant object. As returned by \code{get_variant()} or \code{get_variant_default()}} } \description{ \lifecycle{experimental} Get details about renderings (i.e. render history) or execute a variant on demand } \details{ \itemize{ \item \code{get_variant_renderings()} returns all renderings / content for a particular variant. Returns a \code{tibble} \item \code{variant_render()} executes a variant on demand. Returns a \code{VariantTask} object } } \seealso{ Other variant functions: \code{\link{get_variants}()} } \concept{variant functions}
745fa4463aea41e478d50cff6d4d65ffc5dc7f80
cc0b3690ac5af79694ab2d78f4c6780a745d7919
/R/arc.write.R
852a4c61da9aec953c0f49298878380f3b685a6b
[ "LicenseRef-scancode-warranty-disclaimer", "Apache-2.0" ]
permissive
amitgajurel/ArcGIS_R_Bridge_raster
06aa337d9de7129d262a7b5ab225886c9e25fee9
763e8d66ec94dcf6a5394c5e7da7aa57c66346fe
refs/heads/master
2020-03-22T08:39:34.883102
2018-04-30T15:32:16
2018-04-30T15:32:16
139,781,749
0
0
null
null
null
null
UTF-8
R
false
false
533
r
arc.write.R
#' @export arc.write <- function(path, data, ...) { if (inherits(path, "arc.container")) path <- path@path stopifnot(is.character(path)) if (missing(data)) data<-NULL args <- list(path=path, data=data, ...) overwrite <- args$overwrite if (is.null(overwrite)) args$overwrite <- FALSE else stopifnot(is.logical(overwrite)) if (inherits(data, c("Raster", "arc.raster", "SpatialPixels", "SpatialGrid"))) do.call(.write_raster, args) else do.call(.write_feature, args) }
672106ad4efa4409ddf70129c40d4b958d5f0809
fb56c48278bb20037275d1c1a53b049d4cdf3ae1
/April_18.R
3d517f7c2e657e881b2c2e9140fbe8b52cfcfa51
[]
no_license
rjcmarkelz/rclub
301306615ecf3852b2dc0b6430ce1b21f3c07c70
1d9da7b9c01fb9751407ff01c071ed813f75883a
refs/heads/master
2021-01-01T19:10:54.366718
2016-01-27T01:04:36
2016-01-27T01:04:36
9,964,173
0
1
null
2016-01-27T01:04:37
2013-05-09T17:12:39
R
UTF-8
R
false
false
551
r
April_18.R
#2013_04_18 #plyr package--- look at hadley library(plyr) setwd("~/Documents/Maloof Lab/R_club/plyr-tutorial/examples") bnames <- read.csv("bnames.csv", stringsAsFactors = FALSE) head(bnames) #Proportion of us children that have a name by in the top 100. head(bnames, n = 100L) names(bnames) ddply(bnames, c("name"), summarise, tot = sum(percent)) bnames.2 <- ddply(bnames, c("year"), head = head(bnames, n = 100L)) bnames.2 <- ddply(bnames[1:100,], ~ year) tail(bnames.2) bnames.2 <- ddply(bnames, c("year", "sex"), summarise, tot = sum(percent))
b0fd6a05feb8b3cbabe13b74d87eb7380d862dbb
9e4df408b72687493cc23144408868a975971f68
/SMS_r_prog/function/msy_batch.r
3343bc16a9e9cf0cfb0ece6cc912605bcfa64c45
[ "MIT" ]
permissive
ices-eg/wg_WGSAM
7402ed21ae3e4a5437da2a6edf98125d0d0e47a9
54181317b0aa2cae2b4815c6d520ece6b3a9f177
refs/heads/master
2023-05-12T01:38:30.580056
2023-05-04T15:42:28
2023-05-04T15:42:28
111,518,540
7
0
null
null
null
null
UTF-8
R
false
false
9,209
r
msy_batch.r
######### end user options ######### do.HCR.batch<-function(do.simulation=T,read.condense=T,do.plots=T,read.detailed=T,cutof.year.detailed=2006,NewPlot=T,stochastich=T,YieldPerRecruit=T) { HCR<-new("FLSMS.predict.control") HCR@years.wsea[1,1]<-min(refYear) HCR@years.wsea[2,1]<-max(refYear) HCR@years.weca<-HCR@years.wsea HCR@years.propmat[1,1]<-min(refYearPropmat) HCR@years.propmat[2,1]<-max(refYearPropmat) HCR@HCR.F.TAC[1]<-0 HCR@rec.noise.input[1]<-0 HCR@inter.year[1]<-1 HCR@rec.noise[1,1]<- recruit.noise.low HCR@rec.noise[2,1]<- recruit.noise.high HCR@last.prediction.year<-2026 control<-read.FLSMS.control(file=file.path(data.path,'SMS.dat')) #recruitment geometric mean s<-Read.summary.data() tmp<-subset(s,Year < (control@last.year.model-1) & Quarter==control@rec.season & Age==control@first.age,select=c(Year,N)) GM.mean.log<-mean(log(tmp$N)) GM.mean<-exp(GM.mean.log) GM.sd.log<-sd(log(tmp$N)) if (stochastich) { HCR@no.MCMC.iterations<-no.MCMC.iterations HCR@read.rec.SSB.parm<-0 include.probability<-T if (YieldPerRecruit) { HCR@read.rec.SSB.parm<-1 pp<-file.path(data.path,'SSB_R.in') cat(paste("#model alfa beta std \n", "3 ",GM.mean.log, 0, GM.sd.log,"\n",sep=' '),file=pp) include.probability<-F } } else { HCR@no.MCMC.iterations<-1 HCR@read.rec.SSB.parm<-1 if (YieldPerRecruit) { pp<-file.path(data.path,'SSB_R.in') cat(paste("#model alfa beta std \n", "3 ",GM.mean.log, 0, GM.sd.log,"\n",sep=' '),file=pp) include.probability<-F } else { p<-Read.SSB.Rec.data() pp<-file.path(data.path,'SSB_R.in') cat("#model alfa beta std \n",file=pp) p<-subset(p,select=c(model,alfa,beta,std)) p$std<-1E-4 write.table(p,file=pp,append=T,row.names=F,col.names=F) include.probability<-F } } # make sms.psv file source(file.path(prog.path,"make_psv.R")) SMS.option<-" " first.year.in.mean<-HCR@last.prediction.year-3 # years where output is independent of initial conditions and used as "equilibrium" last.year.in.mean<-HCR@last.prediction.year-1 #Read refence points from file reference_points.in ref<-Read.reference.points() blim<-ref[,"Blim"] bpa<-ref[,"Bpa"] T1<-blim T2<-bpa # files for output ssb.out<-'HCR_SSB.dat' yield.out<-'HCR_yield.dat' F.out<-'HCR_F.dat' prob.out<-'HCR_prob.dat' ssb.out.all<-'mcout_SSB.out.all' yield.out.all<-'mcout_yield.out.all' F.out.all<-'mcout_mean_F.out.all' prob.out.all<-'HCR_prob.dat.all' if (do.simulation) { sp.name<-control@species.names percentiles<-c(0.025,0.05,0.10,0.25,0.50,0.75,0.95,0.975) # headers in output files heading<-"targetF" cat(paste(heading,"Species.n "),file=ssb.out) cat(paste("SSB",formatC(percentiles*1000,width=3,flag='0'),sep=''),file=ssb.out,append=T); cat('\n',file=ssb.out,append=T) cat(paste(heading,"Species.n "),file=yield.out) cat(paste("y",formatC(percentiles*1000,width=3,flag='0'),sep=''),file=yield.out,append=T); cat('\n',file=yield.out,append=T) cat(paste(heading,"Species.n "),file=F.out) cat(paste("F",formatC(percentiles*1000,width=3,flag='0'),sep=''),file=F.out,append=T); cat('\n',file=F.out,append=T) cat(paste(heading,"Species.n "," p.T1 p.T2 \n"),file=prob.out) iter<-0 i<-0 for (targetF in (targetFs)) i<-1+i #print(paste("You have asked for",i,"runs. Do you want to continue? (Y/N):")) #a<-readLines(n=1) #if(a=='N') stop("Script stopped'") #print('OK') tot.rep<-i for (targetF in (targetFs)) { iter<-iter+1 print(paste("targetF:",targetF)) print(paste("run no.:",iter, "out of a total of",tot.rep,"runs")) HCR@constant.F[1]<-targetF write.FLSMS.predict.control(HCR,SMS=control,file='HCR_options.dat') #run SMS shell(paste( file.path(data.path,"sms.exe"),"-mceval",SMS.option,sep=" "), invisible = TRUE) condense_file<-function(filename){ file<-file.path(data.path,filename) a<-read.table(file,header=TRUE) a<-subset(a,Year>cutof.year.detailed) a<-data.frame(a,targetF=targetF) file<-paste(file,".all",sep='') if (iter==1) write.table(a, file =file, row.names = F,col.names = T,quote = F) else write.table(a, file =file, row.names =F,col.names =F, quote =F,append=T) } condense_file("mcout_SSB.out") condense_file("mcout_recruit.out") condense_file("mcout_mean_F.out") condense_file("mcout_yield.out") a<-Read.MCMC.SSB.rec.data() a<-subset(a,Year>=first.year.in.mean & Year<=last.year.in.mean ,drop=T) b<-tapply(a$SSB,list(a$Species.n), function(x) quantile(x,probs = percentiles)) for (i in (1:length(b))) { cat(paste(targetF,i,' '),file=ssb.out,append=TRUE) cat(b[[i]],file=ssb.out,append=TRUE) cat('\n',file=ssb.out,append=TRUE) } dummy<-by(a,list(a$Species.n),function(x) { q<-tapply(x$SSB,list(x$Year,x$Repetion,x$Iteration),sum) sp<-x[1,"Species.n"] q[q>T2[sp]]<-0 q[q>0]<-1 p.T2<-sum(q)/(dim(q)[1]*dim(q)[2]*dim(q)[3]) q<-tapply(x$SSB,list(x$Year,x$Repetion,x$Iteration),sum) q[q>T1[sp]]<-0 q[q>0]<-1 p.T1<-sum(q)/(dim(q)[1]*dim(q)[2]*dim(q)[3]) cat(paste(targetF,sp,p.T1,p.T2,'\n'),file=prob.out,append=TRUE) }) a<-Read.MCMC.F.yield.data(dir=data.path) a<-subset(a,Year>=first.year.in.mean & Year<=last.year.in.mean ,drop=T) b<-tapply(a$Yield,list(a$Species.n), function(x) quantile(x,probs = percentiles)) for (i in (1:length(b))) { cat(paste(targetF,i,' '),file=yield.out,append=TRUE) cat(b[[i]],file=yield.out,append=TRUE) cat('\n',file=yield.out,append=TRUE) } b<-tapply(a$mean.F,list(a$Species.n), function(x) quantile(x,probs = percentiles)) for (i in (1:length(b))) { cat(paste(targetF,i,' '),file=F.out,append=TRUE) cat(b[[i]],file=F.out,append=TRUE) cat('\n',file=F.out,append=TRUE) } } } # end do.simulations if (read.condense) { # read data and options into FLR objects HCR<-read.FLSMS.predict.control(control=control,file='HCR_options.dat') sp.name<-control@species.names ssb<-read.table(ssb.out,header=TRUE) yield<-read.table(yield.out,header=TRUE) proba<-read.table(prob.out,header=TRUE) fi<-read.table(F.out,header=TRUE) a<-merge(ssb,yield) a<-merge(a,proba) a<-merge(a,fi) condensed<-data.frame(a,targetF.fac=as.factor(a$targetF)) write.csv(condensed,file=file.path(data.path,"condensed_MSY.csv")) } if (read.detailed) { # read data and options into FLR objects control<-read.FLSMS.control() #HCR<-read.FLSMS.predict.control(control=control,file='HCR_options.dat') sp.name<-control@species.names Years<- c(2010,2012,2014,2030) ssb<-read.table(ssb.out.all,header=TRUE) ssb<-subset(ssb,Year %in% Years) cat("Year targetF p.T2 p.T1 Species.n\n",file=prob.out.all) dummy<-by(ssb,list(ssb$Species.n),function(x) { q<-tapply(x$SSB,list(x$Repetion,x$Iteration,x$Year,x$targetF),sum) sp<-x[1,"Species.n"] q[q>T2[sp]]<-0 q[q>0]<-1 p.T2<-apply(q,c(3,4),sum)/ (dim(q)[1]*dim(q)[2]) q<-tapply(x$SSB,list(x$Repetion,x$Iteration,x$Year,x$targetF),sum) q[q>T1[sp]]<-0 q[q>0]<-1 p.T1<-apply(q,c(3,4),sum)/ (dim(q)[1]*dim(q)[2]) a<-arr2dfny(p.T2,name="p.T2") b<-arr2dfny(p.T1,name="p.T1") a<-data.frame(merge(a,b),Species.n=sp) write.table(a,row.names=F,col.names=F,quote=F,file=prob.out.all,append=T) }) #proba<-read.table(prob.out.all,header=TRUE) yield<-read.table(yield.out.all,header=TRUE) yield<-subset(yield,Year %in% Years) fi<-read.table(F.out.all,header=TRUE) fi<-subset(fi,Year %in% Years) a<-merge(ssb,yield) a<-merge(a,fi) detailed<-data.frame(a,Year.fac=as.factor(a$Year),targetF.fac=as.factor(a$targetF)) } if (do.plots) { # read data and options into FLR objects control<-read.FLSMS.control() sp.name<-control@species.names ssb<-read.table(ssb.out,header=TRUE) yield<-read.table(yield.out,header=TRUE) prob<-read.table(prob.out,header=TRUE) fi<-read.table(F.out,header=TRUE) a<-merge(ssb,yield) a<-merge(a,prob) a<-merge(a,fi) if (paper) dev<-"wmf" else dev<-"screen" if (NewPlot) newplot(dev,nox=nox,noy=noy,filename=paste("HCR_",sp.name,sep=''),Portrait=T); par(mar=c(4,4,3,5)+.1) # c(bottom, left, top, right) s<-a$SSB500/1000 y<-a$y500/1000 x<-a$targetF x.lab<-'F(1-2)' plot(x,s,ylab='SSB & Yield (1000 t)',xlab=x.lab ,ylim=c(min(s,y,0),max(s,y)),lty=1,type='l',lwd=2,col=1,main=NULL) if (include.probability) { legend(legendPlace, c('SSB','Yield', paste('p(SSB<',round(T1),'t)')), pch=" 1",lty=c(1,2,3),col=c(1,2,4), lwd=rep(2,3)) } else legend(legendPlace,c('SSB','Yield'),pch=" ",lty=c(1,2),lwd=rep(2,2),col=c(1,2)) lines(x,y,lty=2,lwd=2,col=2) if (include.probability) { par(new=T) plot(x,a$p.T1,axes=F,xlab=x.lab, ylab=' ',lty=3,lwd=2,ylim=c(0,1),type='b',pch="1",col=4) abline(h=0.05) axis(side=4) mtext(side=4,line=3.0,"Probability") par(xaxs="r") } #if (paper) cleanup() } #end do.plots }
48d835d5ad4f9f2143c5a7338617c3ba8a2cf1e5
ea5a4eb6717a31b11b78509a672bd6416f825b24
/assignments/assignment_1/HW1.R
00fa0e2b40636cd114f6bdccab944cba37a38c2a
[]
no_license
ChaebinIm/MFE_course_financial_time_series_analysis
5b4d8b110f4ad58a690bd15834a4099042c08494
883d706991e47d7c9a00bf171dc0fd46f41ff92c
refs/heads/main
2023-06-26T12:27:47.390849
2021-07-31T12:43:25
2021-07-31T12:43:25
391,353,382
0
0
null
null
null
null
UTF-8
R
false
false
1,647
r
HW1.R
################# Chapter 2 ########################### # data import install.packages('readxl') install.packages("lmtest") install.packages('fBasics') install.packages("car") library(readxl) library(lmtest) library(fBasics) library(car) setwd('/Users/imchaebin/Desktop/시계열분석/숙제1/data/') # need to be customized data <- read_excel('capm.xls') # add log return # SAP rsandp <- 100*diff(log(data$SANDP)) data$RSANDP <- c(NA, rsandp) # Ford stock ford <- 100*diff(log(data$FORD)) data$RFORD <- c(NA, ford) data # US - TBond : annually to monthly data$USTB3M <- data$USTB3M/12 # Excess return of S&P500 data$ERSANDP <- data$RSANDP - data$USTB3M data$ERFORD <- data$RFORD - data$USTB3M # draw plot (ERSANDP, ERFORD) plot(data$Date, data$ERSANDP, type = 'l', ylim=range(-80, 80), col = 'red') # line # plot(data$Date, data$ERSANDP, ylim=range(-80, 80), col = 'red') # scatter par(new=TRUE) plot(data$Date, data$ERFORD, type = 'l', ylim=range(-80, 80), col = 'blue') # line # plot(data$Date, data$ERFORD, ylim=range(-80, 80), col = 'blue') # scatter # simple linear regression m <- lm(ERFORD ~ ERSANDP, data = data) coef(m) summary(m) plot(ERFORD ~ ERSANDP, data = data) abline(m, col = 'red') #Testing normal distribution and independence assumptions jarqueberaTest(m$resid) #Test residuals for normality #Null Hypothesis: Skewness and Kurtosis are equal to zero dwtest(m) #Test for independence of residuals #Null Hypothesis: Errors are serially UNcorrelated par(mfrow = c(2, 2)) plot(m) # linear regression with constraints linearHypothesis(m, "ERSANDP = 1") linearHypothesis(m, c("(Intercept) = 1", "ERSANDP = 1"))
977e772f319f861215e016a6f74dfa67d5be837f
f5081a73a7e2cae81e35f9c09f9a4b63d402520c
/GlobalVariable/Variable.r
60c020bbac93527de9baf1d422b2063d3e38f887
[]
no_license
Alex-OuYang/FinanceAnalysis
e5099f8b4b6a99671cd1051ee791f37ac2997f18
f8761c56c084399f5c9891d475f1cf9ab6fc76ef
refs/heads/master
2023-07-26T02:03:12.335062
2021-09-05T14:06:35
2021-09-05T14:06:35
393,606,669
0
0
null
null
null
null
UTF-8
R
false
false
1,125
r
Variable.r
# 清除所有環境變數 rm(list=ls()) # 初始系統編碼 encodingStr="utf-8" #Sys.setenv(TZ = "Asia/Taipei") # 設定初始資料夾位置 projectPathStr="C:/Users/alex1/Desktop/FinanceAnalysis" setwd(projectPathStr) # 各檔案列表 # 變數 variablestr=paste0(projectPathStr,"/GlobalVariable/Variable.r") # Func funcDBSelectstr=paste0(projectPathStr,"/Function/FuncDBSelect.r") funcStockInfoToTimeSerialstr=paste0(projectPathStr,"/Function/FuncStockInfoToTimeSerial.r") funcKDCrossstr=paste0(projectPathStr,"/Function/FuncKDCross.r") funcGetStockDatastr=paste0(projectPathStr,"/Function/FuncGetStockData.r") # TradeRule tradeRuleKD=paste0(projectPathStr,"/TradeRule/KD_Golden.r") # Finance 參數 stockno='2330' startDate="2010-01-01" endDate="2021-08-07" dateInterval=paste0(startDate,"/",endDate) stockclass="股票" # 連接 DB 參數 sqlConnString = "driver={SQL Server};server=ALEX-NB\\SQLEXPRESS;database=Finance;;Trusted_Connection=Yes" # SQL 語法 sp_sqlstr_SECURITYINFO="EXEC sp_GET_SECURITYINFO " sp_sqlstr_COMPANYCLASS="EXEC sp_GET_COMPANYCLASS" sp_sqlstr_COMPANYLIST="EXEC sp_GET_COMPANYLIST "
59041dbf13bf24ac64cf4b8d6569cfe16730ffa3
faaa6f8046404a7e8f616fd0f127441403792b9c
/Extraccion_datos.R
32d1e9ef6accfe6bf7b9eef4cc60102fa91d6d37
[ "MIT" ]
permissive
ArrigoCoen/Mexican_vaccination
577c2b10cfec842c5dd1d16e545216385293f67e
15d1bd4b68c1597b433693dbcb4a19bb051e75e3
refs/heads/main
2023-06-29T17:47:28.005835
2021-07-27T14:57:31
2021-07-27T14:57:31
389,700,814
0
0
null
null
null
null
UTF-8
R
false
false
33,418
r
Extraccion_datos.R
#-------------------------------------------------------------------------# # # # NOMBRE DE ARCHIVO # # # #-------------------------------------------------------------------------# # V1 FECHA #-------------------------------------------------------------------------# # OBSERVACIONES: # 1. Los boletines fueron extraidos de: # https://www.gob.mx/salud/acciones-y-programas/historico-boletin-epidemiologico # 2. Parkinson se tiene desde 2014-2 hasta 2019-52 # 3. El Boletin es semanal a partir de 1995 semana 26 # - 2017 tiene muchos archivos extra que fueron borrados # - 2011 HAY QUE HACER LA CORRECIION DE LOS ARCHIVOS ZIP PARA ESTE ANHO # - 2011 HAY QUE HACER LA CORRECIION DE LOS ARCHIVOS ZIP PARA ESTE ANHO # - 2011 HAY QUE HACER LA CORRECIION DE LOS ARCHIVOS ZIP PARA ESTE ANHO # - 2011 HAY QUE HACER LA CORRECIION DE LOS ARCHIVOS ZIP PARA ESTE ANHO # - EN 2003 pasaron a formato electronico # - Faltan cuadros en 2004 semana 7 (falta cuadro 3.2) preguntar a Betty #-------------------------------------------------------------------------# # Packages ---------------------------------------------------------------- library(pdftools) library(manipulate) library(tidyverse) #install.packages("manipulate") # Carpeta del proyecto ---------------------------------------------------- cat("\014") # borra consola rm(list=ls()) # Borra todo # 1. Home # 2. Work # 3. Laptop # 4. Other rutas_prueba <- c("/Users/arrigocoen/Dropbox","/Users/useradmin/Dropbox","","") eleccion_compu <- which(file.exists(rutas_prueba)) # Direcciones de cada computadora source_raiz <- c("/Users/arrigocoen/Dropbox/1 Proyectos/2020/Asthma/R",# 1. Home "",# 2. Work "",# 3. Laptop "")[eleccion_compu]# 4. Other setwd(source_raiz) source("Fn_epidemiologic.R") # library() # Function ---------------------------------------------------------------- # Example of extraction --------------------------------------------------- # De # https://rstudio-pubs-static.s3.amazonaws.com/415060_553527fd13ed4f30aae0f1e4483aa970.html cat("\014") # borra consola rm(list=ls()) # Borra todo PDF <- pdf_text("oregon_grass_and_legume_seed_crops_preliminary_estimates_2017.pdf") %>% readr::read_lines() #open the PDF inside your project folder PDF.grass <-PDF[-c(1:3,6:8,20:35)] # remove lines PDF.grass all_stat_lines <- PDF.grass[3:13] %>% str_squish() %>% strsplit(split = " ")# remove empty spaces var_lines <- c("Species", "Acreage", "Yield", "Production", "Price", "Value") # create your variable names var_lines # The next line is for some kind of correction that our file doesn't need, that's why I # commented it # all_stat_lines[[6]] <- c("Orchard", "grass","15,190","1,046","15,889","225.00","35,750") #change the line 6 df <- plyr::ldply(all_stat_lines) #create a data frame head(df) # asoetuh ----------------------------------------------------------------- # My data ----------------------------------------------------------------- cat("\014") # borra consola rm(list=ls()) # Borra todo pdf_name <- "sem01.pdf" pdf_name <- "boletin 2019/sem01.pdf" PDF <- pdf_text(pdf_name) %>% readr::read_lines() #open the PDF inside your project folder PDF length(PDF) # asoetuh ----------------------------------------------------------------- texto <- "Nonsense? kiss off, geek. what I said is true. I'll have your account terminated." grep(pattern ="bajo",PDF[1]) # encuentra una palabra en un texto grep(pattern ="Tatata",texto) # encuentra una palabra en un texto grep(pattern ="ru",texto) # encuentra una palabra en un texto i <- 1 linea <- PDF[i] linea grep(pattern ="de",linea) # asueth ------------------------------------------------------------------ i <- 2 linea <- PDF[i] linea grep(pattern ="de",linea) if(length(grep(pattern ="Parkinson",linea))==1) print(i) # asoetuh ----------------------------------------------------------------- cat("\014") # borra consola # rm(list=ls()) # Borra todo intervalo <- 1:1000 intervalo <- 1:length(PDF) intervalo <- 2621+ 0:1000 # CUADRO DE Parkinson en i=2622 for(i in intervalo) { linea <- PDF[i] print(linea) # if(length(grep(pattern ="CUADRO",linea))==1) { # print(linea) # print(i) # } } # asoetuh ----------------------------------------------------------------- anho <- 2018 semana <- 16 semana <- 1 pdf_path <- genera_ruta_boletin(anho,semana) intervalo <- 1:1000 intervalo <- 2621+ 0:1000 # CUADRO DE Parkinson en i=2622 intervalo <- 1:length(PDF) for(i in intervalo) { linea <- PDF[i] # print(linea) if(length(grep(pattern ="Neurológicas",linea))==1) { print(linea) print(i) break } } i text_table <- PDF[i+10:41] text_table # soeuh ------------------------------------------------------------------- all_stat_lines <- text_table %>% str_squish() %>% strsplit(split = " ")# remove empty spaces all_stat_lines i <- 2 n_palabras <- 2 all_stat_lines # nauht ------------------------------------------------------------------- all_stat_lines <- text_table %>% str_squish() %>% strsplit(split = " ")# remove empty spaces # var_lines <- c("Species", "Acreage", "Yield", "Production", "Price", "Value") # create your variable names # var_lines # The next line is for some kind of correction that our file doesn't need, that's why I # commented it # all_stat_lines[[6]] <- c("Orchard", "grass","15,190","1,046","15,889","225.00","35,750") #change the line 6 df <- plyr::ldply(all_stat_lines) #create a data frame head(df) # Revision de correccion de espacios en lineas ---------------------------- cat("\014") # borra consola rm(list=ls()) # Borra todo source("Fn_epidemiologic.R") # set.seed(42) # Variables pdf anho <- 2015 anho <- 2017 semana <- 16 semana <- 25 # en 2019 semana 25 BC tiene dos lugares de numeros grandes semana <- sample(1:52,1) # Variables tabla palabra_clave1 <- "Neurológicas" palabra_clave1 <- "CUADRO 17" # el cuadro 17 es de enfermedades neurologicas palabra_clave2 <- "Aguascalientes" longitud_tabla <- 32 # numero de estados semanas <- 21:27 semanas <- 24 semanas <- 1 semanas <- 1:52 semanas <- 40:52 # Error en 2019 sem 24 "PDF error: Invalid Font Weight" pero los valores estan bien salvados # Error: 2016 sem1 = tabla con longitudes incorrectas # Error: 2016 = MUCHAS TABLAS con longitudes incorrectas 24-29 anhos <- 2017:2019 # ya fueron revisados para enfermedades neurologicas anhos <- 2019 anhos <- 2016 # ya fueron revisados para enfermedades neurologicas anhos <- 2015 # ya fueron revisados para enfermedades neurologicas correcciones_a_mano_CUADRO_17 <- function(text_table,semana,anho) { source("Fn_Correcciones_CUADRO_17_anho2016.R") source("Fn_Correcciones_CUADRO_17_anho2015.R") if(anho==2015) text_table <- fun_corrige_CUADRO_17_anho2015(text_table,semana,anho) if(anho==2016) text_table <- fun_corrige_CUADRO_17_anho2016(text_table,semana,anho) return(text_table) } for(anho in anhos) for(semana in semanas) { # Path of pdf pdf_path <- genera_ruta_boletin(anho,semana) cat("Trabajando: ",pdf_path,"\n") # Raw text of pdf PDF <- pdf_to_text(pdf_path) # Extrayendo encabezados text_table <- extrae_text_tabla(PDF,palabra_clave1,palabra_clave2,longitud_tabla) # Correccion cuadro text_table <- correcciones_a_mano_CUADRO_17(text_table,semana,anho) # print(text_table) all_stat_lines <- correccion_espacios_en_lineas(text_table) print(lengths(all_stat_lines)) # Extrayendo encabezados encabezado_table <- extrae_encabezado_tabla(PDF,palabra_clave1,palabra_clave2,longitud_tabla) # print(encabezado_table) } # otnh -------------------------------------------------------------------- # Correcciones de extraccion de informacion ------------------------------- cat("\014") # borra consola rm(list=ls()) # Borra todo source("Fn_epidemiologic.R") # Variables tabla palabra_clave1 <- "Neurológicas" palabra_clave1 <- "CUADRO 17" # el cuadro 17 es de enfermedades neurologicas palabra_clave2 <- "Aguascalientes" longitud_tabla <- 32 # numero de estados longitud_esperada <- 13 # longitud de numero de columnas semanas <- 2 semanas <- 24:26 semanas <- c(1:3, 20:28) semanas <- 25:52 semanas <- 1 semanas <- 24 semanas <- 20:30 semanas <- 2:52 anhos <- 2017:2019 # 2017:2019 ya fueron revisados para enfermedades neurologicas f tienen columnas y datos bien anhos <- 2014 anhos <- 2015:2016 # anhos <- 2014 # de 2014 de la semana 25 a 52 no hay problema for(anho in anhos) { if(anho %in% c(2015,2016)) break # Tablas ya corregidas texto <- paste0("#--------------------------------------------------------\n", "#-- fun_corrige_CUADRO_17_anho",anho," --\n", "#--------------------------------------------------------\n", "#-- This function was generated by Fn_epidemiologic", "\n#-- Author = Arrigo Coen \n\n\n", "\nfun_corrige_CUADRO_17_anho",anho," <- function(text_table,semana,anho) {\n") for(semana in semanas) { longitud_esperada <- fun_longitud_esperada(semana,anho) pdf_path <- genera_ruta_boletin(anho,semana) cat("Trabajando: ",pdf_path,"\n") # Raw text of pdf PDF <- pdf_to_text(pdf_path) # Extrayendo encabezados text_table <- extrae_text_tabla(PDF,palabra_clave1,palabra_clave2,longitud_tabla) print(text_table) all_stat_lines <- correccion_espacios_en_lineas(text_table) if(T) { df <- plyr::ldply(all_stat_lines) #create a data frame print(head(df)) } idx_mal <- which(lengths(all_stat_lines)!=longitud_esperada) if(length(idx_mal)!=0) { texto <- paste0(texto,"\tif(semana==",semana," && anho==",anho,") {\n") for(i in idx_mal) { for(k in 1:10) text_table[i] <- gsub(' ', ' ', text_table[i]) # elimina el signo $ texto <- paste0(texto,"\t\ti <- ",i, ";\n\t\tx <- '",text_table[i],"'\n\t\ttext_table[i] <- x\n") } texto <- paste0(texto,"\t}\n") } } texto <- paste0(texto,"\treturn(text_table)\n}") name_file <- paste0(c("Fn_Correcciones_CUADRO_17_anho",anho,".R"),collapse = "") fileConn<-file(name_file) writeLines(texto, fileConn) close(fileConn) cat("Se genero el archivo ",name_file,"\n") } # cat(texto) if(F) { df <- plyr::ldply(all_stat_lines) #create a data frame head(df) } # Indices de todos los cuadros -------------------------------------------- cat("\014") # borra consola rm(list=ls()) # Borra todo source("Fn_epidemiologic.R") semanas <- 25:52 semanas <- 52 semanas <- 1:2 semanas <- 1:52 semanas <- 1 anhos <- 2017:2019 anhos <- 2013 imprime_CUADRO_anho_semana(anhos,semanas) # Cuadros un año ---------------------------------------------------------- # Revisando Titulo general ------------------------------------------------ cat("\014") # borra consola rm(list=ls()) # Borra todo source("Fn_epidemiologic.R") # set.seed(42) # Variables pdf anho <- 2015 anho <- 2017 semana <- 16 semana <- 25 # en 2019 semana 25 BC tiene dos lugares de numeros grandes semana <- sample(1:52,1) # Variables tabla palabra_clave1 <- "Neurológicas" palabra_clave1 <- "CUADRO 17" # el cuadro 17 es de enfermedades neurologicas palabra_clave1 <- "CUADRO 3" # el cuadro 17 enfermedades prevenibles palabra_clave2 <- "Aguascalientes" longitud_tabla <- 32 # numero de estados semanas <- 21:27 semanas <- 24 semanas <- 1 semanas <- 1:52 # Error en 2019 sem 24 "PDF error: Invalid Font Weight" pero los valores estan bien salvados # Error: 2016 sem1 = tabla con longitudes incorrectas # Error: 2016 = MUCHAS TABLAS con longitudes incorrectas anhos <- 2015 # ya fueron revisados para enfermedades neurologicas anhos <- 2016 # ya fueron revisados para enfermedades neurologicas anhos <- 2019 anhos <- 2015:2018 # ya fueron revisados para enfermedades neurologicas anhos <- 2014:2015 # ya fueron revisados para enfermedades neurologicas for(anho in anhos) for(semana in semanas) { # Path of pdf pdf_path <- genera_ruta_boletin(anho,semana) cat("Trabajando: ",pdf_path,"\n") # Raw text of pdf PDF <- pdf_to_text(pdf_path) # Extrayendo encabezados text_table <- extrae_text_tabla(PDF,palabra_clave1,palabra_clave2,longitud_tabla) # print(text_table) all_stat_lines <- correccion_espacios_en_lineas(text_table) print(lengths(all_stat_lines)) # Extrayendo encabezados # encabezado_table <- extrae_encabezado_tabla(PDF,palabra_clave1,palabra_clave2,longitud_tabla) # print(encabezado_table) } # astohu ------------------------------------------------------------------ # Correccion nombres 2013 ------------------------------------------------- # Revisor de primera linea ------------------------------------------------ cat("\014") # borra consola rm(list=ls()) # Borra todo source("Fn_epidemiologic.R") semanas <- 21:27 semanas <- 24 semanas <- 1 semanas <- 1:52 # Error en 2019 sem 24 "PDF error: Invalid Font Weight" pero los valores estan bien salvados # Error: 2016 sem1 = tabla con longitudes incorrectas # Error: 2016 = MUCHAS TABLAS con longitudes incorrectas anhos <- 2015 # ya fueron revisados para enfermedades neurologicas anhos <- 2018 anhos <- 2016:2019 # ya fueron revisados para enfermedades neurologicas anhos <- 2015 # ya fueron revisados para enfermedades neurologicas anhos <- 2014 revisa_linea_1_sem(anhos,semanas) # asotnu ------------------------------------------------------------------ colnames(df) <- c("Estado","Sem","Acum M","Acum F") # asoetuh ----------------------------------------------------------------- for(i in 1:length(all_stat_lines)) print(length(all_stat_lines[[i]])) # Analisis de encabezados de tablas --------------------------------------- cat("\014") # borra consola rm(list=ls()) # Borra todo source("Fn_epidemiologic.R") # set.seed(42) # Variables pdf anho <- 2015 anho <- 2017 semana <- 16 semana <- 25 # en 2019 semana 25 BC tiene dos lugares de numeros grandes semana <- sample(1:52,1) # Variables tabla palabra_clave1 <- "Neurológicas" palabra_clave2 <- "Aguascalientes" longitud_tabla <- 32 # numero de estados # Path of pdf pdf_path <- genera_ruta_boletin(anho,semana) pdf_path # Raw text of pdf PDF <- pdf_to_text(pdf_path) # text_table <- extrae_text_tabla(PDF,palabra_clave1,palabra_clave2,longitud_tabla) encabezado_table <- extrae_encabezado_tabla(PDF,palabra_clave1,palabra_clave2,longitud_tabla) for(semana in 1:52) { # Path of pdf pdf_path <- genera_ruta_boletin(anho,semana) # Raw text of pdf PDF <- pdf_to_text(pdf_path) # Extrayendo encabezados encabezado_table <- extrae_encabezado_tabla(PDF,palabra_clave1,palabra_clave2,longitud_tabla) print(encabezado_table) } # asothe ------------------------------------------------------------------ # Extraccion de un cuadro ------------------------------------------------- cat("\014") # borra consola rm(list=ls()) # Borra todo source("Fn_epidemiologic.R") # set.seed(42) # Variables pdf anho <- 2015 anho <- 2017 semana <- 16 semana <- 25 # en 2019 semana 25 BC tiene dos lugares de numeros grandes semana <- sample(1:52,1) # Variables tabla cuadro <- 2 cuadro <- 3.1 palabra_clave1 <- paste0("CUADRO ",as.character(cuadro)) # el cuadro 17 enfermedades prevenibles palabra_clave2 <- "Aguascalientes" longitud_tabla <- 32 # numero de estados semanas <- 7 semanas <- 1 semanas <- 1:4 # Error en 2019 sem 24 "PDF error: Invalid Font Weight" pero los valores estan bien salvados # Error: 2016 sem1 = tabla con longitudes incorrectas # Error: 2016 = MUCHAS TABLAS con longitudes incorrectas anhos <- 2015 # ya fueron revisados para enfermedades neurologicas anhos <- 2016 # ya fueron revisados para enfermedades neurologicas anhos <- 2019 anhos <- 2015:2018 # ya fueron revisados para enfermedades neurologicas anhos <- 2005 # ya fueron revisados para enfermedades neurologicas anhos <- 2005:2019 # ya fueron revisados para enfermedades neurologicas # CORRER ESTA PARTE PARA DESCUBRIR CUALES SON LOS CUADROS CON NUMER DE COLUMNAS INCORRECTOS # CORRER ESTA PARTE PARA DESCUBRIR CUALES SON LOS CUADROS CON NUMER DE COLUMNAS INCORRECTOS # CORRER ESTA PARTE PARA DESCUBRIR CUALES SON LOS CUADROS CON NUMER DE COLUMNAS INCORRECTOS # CORRER ESTA PARTE PARA DESCUBRIR CUALES SON LOS CUADROS CON NUMER DE COLUMNAS INCORRECTOS # CORRER ESTA PARTE PARA DESCUBRIR CUALES SON LOS CUADROS CON NUMER DE COLUMNAS INCORRECTOS # CORRER ESTA PARTE PARA DESCUBRIR CUALES SON LOS CUADROS CON NUMER DE COLUMNAS INCORRECTOS # CORRER ESTA PARTE PARA DESCUBRIR CUALES SON LOS CUADROS CON NUMER DE COLUMNAS INCORRECTOS # CORRER ESTA PARTE PARA DESCUBRIR CUALES SON LOS CUADROS CON NUMER DE COLUMNAS INCORRECTOS for(anho in anhos) for(semana in semanas) { n_col_esperado <- n_col_esperado_X_CUADRO(cuadro,semana,anho) # Path of pdf pdf_path <- genera_ruta_boletin(anho,semana) cat("Trabajando: ",pdf_path,"\n") # Raw text of pdf PDF <- pdf_to_text(pdf_path) # Extrayendo encabezados text_table <- extrae_text_tabla(PDF,palabra_clave1,palabra_clave2,longitud_tabla) # print(text_table) all_stat_lines <- correccion_espacios_en_lineas(text_table) # print(lengths(all_stat_lines)) if(any(lengths(all_stat_lines)!=n_col_esperado)) { print("Posible error en el numero columnas -----------------") } # Extrayendo encabezados # encabezado_table <- extrae_encabezado_tabla(PDF,palabra_clave1,palabra_clave2,longitud_tabla) # print(encabezado_table) } # Extraccion un cuadro ---------------------------------------------------- cat("\014") # borra consola rm(list=ls()) # Borra todo source("Fn_epidemiologic.R") set.seed(42) # Variables pdf anho <- 2015 anho <- 2017 semana <- 16 semana <- 25 # en 2019 semana 25 BC tiene dos lugares de numeros grandes semana <- sample(1:52,1) semana <- 1 anho <- 2018 # Variables tabla cuadro <- 2 cuadro <- 3.2 cuadro <- 3.1 palabra_clave1 <- paste0("CUADRO ",as.character(cuadro)) # el cuadro 17 enfermedades prevenibles palabra_clave2 <- "Aguascalientes" longitud_tabla <- 32 # numero de estados semanas <- 7 semanas <- 1 semanas <- 1:4 # Error en 2019 sem 24 "PDF error: Invalid Font Weight" pero los valores estan bien salvados # Error: 2016 sem1 = tabla con longitudes incorrectas # Error: 2016 = MUCHAS TABLAS con longitudes incorrectas anhos <- 2015 # ya fueron revisados para enfermedades neurologicas anhos <- 2016 # ya fueron revisados para enfermedades neurologicas anhos <- 2019 anhos <- 2015:2018 # ya fueron revisados para enfermedades neurologicas anhos <- 2005 # ya fueron revisados para enfermedades neurologicas anhos <- 2010:2019 # ya fueron revisados para enfermedades neurologicas # CORRER ESTA PARTE PARA DESCUBRIR CUALES SON LOS CUADROS CON NUMER DE COLUMNAS INCORRECTOS # CORRER ESTA PARTE PARA DESCUBRIR CUALES SON LOS CUADROS CON NUMER DE COLUMNAS INCORRECTOS # CORRER ESTA PARTE PARA DESCUBRIR CUALES SON LOS CUADROS CON NUMER DE COLUMNAS INCORRECTOS # CORRER ESTA PARTE PARA DESCUBRIR CUALES SON LOS CUADROS CON NUMER DE COLUMNAS INCORRECTOS # CORRER ESTA PARTE PARA DESCUBRIR CUALES SON LOS CUADROS CON NUMER DE COLUMNAS INCORRECTOS # CORRER ESTA PARTE PARA DESCUBRIR CUALES SON LOS CUADROS CON NUMER DE COLUMNAS INCORRECTOS # CORRER ESTA PARTE PARA DESCUBRIR CUALES SON LOS CUADROS CON NUMER DE COLUMNAS INCORRECTOS # CORRER ESTA PARTE PARA DESCUBRIR CUALES SON LOS CUADROS CON NUMER DE COLUMNAS INCORRECTOS for(anho in anhos) for(semana in semanas) { (n_col_esperado <- n_col_esperado_X_CUADRO(cuadro,semana,anho)) # Path of pdf pdf_path <- genera_ruta_boletin(anho,semana) cat("Trabajando: ",pdf_path,"\n") # Raw text of pdf PDF <- pdf_to_text(pdf_path) # Extrayendo encabezados text_table <- extrae_text_tabla(PDF,palabra_clave1,palabra_clave2,longitud_tabla) # print(text_table) all_stat_lines <- correccion_espacios_en_lineas(text_table) # print(lengths(all_stat_lines)) if(any(lengths(all_stat_lines)!=n_col_esperado)) { print("Posible error en el numero columnas -----------------") } # Extrayendo encabezados encabezado_table <- extrae_encabezado_tabla(PDF,palabra_clave1,palabra_clave2,longitud_tabla) print(encabezado_table) } # asoetuh ----------------------------------------------------------------- all_stat_lines <- correccion_espacios_en_lineas(text_table) zero_value_character <- "-" # text_table <- extrae_text_tabla(PDF,palabra_clave1,palabra_clave2,longitud_tabla) from_text_to_numeric_matrix(all_stat_lines,anho,semana,longitud_tabla,zero_value_character) # satoeh ------------------------------------------------------------------ cat("\014") # borra consola rm(list=ls()) # Borra todo source("Fn_epidemiologic.R") set.seed(42) cuadro <- 4.2; ncol_mat_cuadro <- ncol_CUADRO_X(cuadro,semana=1,anho=2019) cuadro <- 3.1; # cuadro <- 3.6; ncol_mat_cuadro <- 11 palabra_clave1 <- paste0("CUADRO ",as.character(cuadro)) # el cuadro 17 enfermedades prevenibles palabra_clave2 <- "Aguascalientes" longitud_tabla <- 32 # numero de estados semanas <- 1:4 semanas <- 33 semanas <- 1:52 # ERROR EN Boletines/2010/sem36.pdf CUADRO 3.1 anhos <- 2017 anhos <- 2019 anhos <- 2012:2019 zero_value_character <- "-" mat_cuadro <- matrix(0,length(anhos)*length(semanas)*longitud_tabla,ncol_mat_cuadro+2) # +2 because the columns year week anho <- anhos[1] semana <- semanas[1] l_main <- list_useful_var(cuadro,anho,semana,anhos,semanas,palabra_clave1,palabra_clave2) name_file_All_Dat <- paste0(c("Data/All_Dat_",l_main$text_cuadro_guion,".RData"),collapse = "") load(name_file_All_Dat) head(BIG_mat_CUADRO) dim(BIG_mat_CUADRO) idx_mat_cuadro <- 1 for(anho in anhos) for(semana in semanas) { index_key <- find_index_key(anho,semana,BIG_mat_CUADRO) if(length(index_key)!=0) { cat("Datos de ",l_main$text_cuadro_guion,"anho=",anho," semana ",semana," ya obtenidos\n") } else { (n_col_esperado <- ncol_CUADRO_X(cuadro,semana=1,anho=2019)) # Path of pdf pdf_path <- genera_ruta_boletin(anho,semana) cat("Trabajando: ",pdf_path,"\n") # Raw text of pdf PDF <- pdf_to_text(pdf_path) # Extrayendo encabezados text_table <- extrae_text_tabla(PDF,palabra_clave1,palabra_clave2,longitud_tabla) # print(text_table) text_table <- Correcting_text_table(text_table,l_main) all_stat_lines <- correccion_espacios_en_lineas(text_table) mat_dat <- from_text_to_numeric_matrix(all_stat_lines,anho,semana,longitud_tabla,zero_value_character) BIG_mat_CUADRO <- rbind(BIG_mat_CUADRO,mat_dat) save(BIG_mat_CUADRO,file=name_file_All_Dat) cat("Datos de anho-semana-cuadro ACTUALIZADOS") } # mat_cuadro[idx_mat_cuadro:(idx_mat_cuadro+longitud_tabla-1),] <- mat_dat # idx_mat_cuadro <- idx_mat_cuadro + longitud_tabla # print(mat_dat) } s # aseuh ------------------------------------------------------------------- cat("\014") # borra consola rm(list=ls()) # Borra todo source("Fn_epidemiologic.R") set.seed(42) cuadro <- 4.2; ncol_mat_cuadro <- ncol_CUADRO_X(cuadro,semana=1,anho=2019) cuadro <- 3.1; # cuadro <- 3.6; ncol_mat_cuadro <- 11 palabra_clave1 <- paste0("CUADRO ",as.character(cuadro)) # el cuadro 17 enfermedades prevenibles palabra_clave2 <- "Aguascalientes" longitud_tabla <- 32 # numero de estados semanas <- 1:52 # ERROR EN Boletines/2010/sem36.pdf CUADRO 3.1 anhos <- 2017 anhos <- 2019 anhos <- 2012:2019 zero_value_character <- "-" mat_cuadro <- matrix(0,length(anhos)*length(semanas)*longitud_tabla,ncol_mat_cuadro+2) # +2 because the columns year week anho <- anhos[1] semana <- semanas[1] l_main <- list_useful_var(cuadro,anho,semana,anhos,semanas,palabra_clave1,palabra_clave2) names(l_main) name_file_All_Dat <- paste0(c("Data/All_Dat_",l_main$text_cuadro_guion,".RData"),collapse = "") load(name_file_All_Dat) cuadro estados_to_plot <- 9 estados_to_plot <- "Todos" col_plot <- 7 # 7 equivale a columna 4, y 11 equivale a 8 col_plot <- 11 # 7 equivale a columna 4, y 11 equivale a 8 head(BIG_mat_CUADRO) plot_state_info(estados_to_plot,col_plot,BIG_mat_CUADRO) # imprime_CUADRO_anho_semana(anhos=2019,semanas=1) # sauhteo ----------------------------------------------------------------- Unirse a la reunión Zoom https://uammx.zoom.us/j/84954674369 ID de reunión: 849 5467 4369 Código de acceso: 330103 # Relacion estados numero ------------------------------------------------- l_main$state_numbers # saoethu ----------------------------------------------------------------- imprime_CUADRO_anho_semana(anhos=2010,semanas=52) # sutha ------------------------------------------------------------------- # Saving all data for a CUADRO -------------------------------------------- cat("\014") # borra consola rm(list=ls()) # Borra todo source("Fn_epidemiologic.R") cuadro <- 3.1; ncol_mat_cuadro <- ncol_CUADRO_X(cuadro,semana=1,anho=2019) Initialize_All_Dat_RData_file(cuadro) # fun_corrige_CUADRO_4_1_anho2016(text_table,semana,anho) # all cuadros inizialized ------------------------------------------------- cat("\014") # borra consola rm(list=ls()) # Borra todo source("Fn_epidemiologic.R") l_main <- Simple_list_useful_var() for(cuadro in l_main$all_CUADRO_numbers_no_points) { cat("cuadro",cuadro,"\n") Initialize_All_Dat_RData_file(cuadro) } # astoeuh ----------------------------------------------------------------- load("Data/Dat CUADRO 3_6 2012-2019.RData") col_plot <- 8 #posibles valores entre 3 y 11 ncol(mat_cuadro) estados_to_plot <- 7 estados_to_plot <- 1:10 estados_to_plot <- "Todos" estados_to_plot <- c(1,2,7,16) # CORREGIR ERROR AL EMPEZAR AQUI # CORREGIR ERROR AL EMPEZAR AQUI # CORREGIR ERROR AL EMPEZAR AQUI # CORREGIR ERROR AL EMPEZAR AQUI # CORREGIR ERROR AL EMPEZAR AQUI # CORREGIR ERROR AL EMPEZAR AQUI # CORREGIR ERROR AL EMPEZAR AQUI # CORREGIR ERROR AL EMPEZAR AQUI # CORREGIR ERROR AL EMPEZAR AQUI plot_state_info(estados_to_plot,col_plot,mat_cuadro) # Number of columns for 2019 any Cuadro ----------------------------------- cat("\014") # borra consola rm(list=ls()) # Borra todo source("Fn_epidemiologic.R") set.seed(42) cuadro <- 4.1 semana <- 52 anho <- 2019 # imprime_CUADRO_anho_semana(anhos,semanas) ncol_CUADRO_X(cuadro,semana,anho) # Number of columns for 2019 any Cuadro ----------------------------------- cat("\014") # borra consola rm(list=ls()) # Borra todo source("Fn_epidemiologic.R") set.seed(42) cuadro <- 3.2 cuadro <- 4.1 anhos <- c(2005:2010,2012:2019) anhos <- 2015:2019 semanas <- 1 # imprime_CUADRO_anho_semana(anhos,semanas) semana <- 52 anho <- 2019 for(anho in anhos) for(semana in semanas) { cat("Anho",anho," semana ",semana,"CUADRO ",cuadro," num. col.", ncol_CUADRO_X(cuadro,semana,anho),"\n") } # Correccion de numeros con espacios -------------------------------------- # Correcciones de extraccion de informacion ------------------------------- cat("\014") # borra consola rm(list=ls()) # Borra todo source("Fn_epidemiologic.R") # Variables tabla cuadro <- 3.1 cuadro <- 4.1 # cuadro <- 3.6; ncol_mat_cuadro <- 11 palabra_clave1 <- paste0("CUADRO ",as.character(cuadro)) palabra_clave2 <- "Aguascalientes" longitud_esperada <- 13 # longitud de numero de columnas longitud_tabla <- 32 # numero de estados remplaza_file <- F semanas <- 2 semana <- 14:20 semanas <- 14:20 anhos <- 2018 genera0_imprime1 <- 0 anho <- anhos[1] for(anho in anhos) { texto <- GEN_file_fun_corrige(cuadro,anho,palabra_clave1,palabra_clave2,remplaza_file) } cat(texto) # MAT cuadros con errores ------------------------------------------------- cat("\014") # borra consola rm(list=ls()) # Borra todo source("Fn_epidemiologic.R") initialize_mat_errores_cuadro() # asuth ------------------------------------------------------------------- semana <- 1 cuadro <- 1 anho <- 1981 load(file = "Data/Mat_errores_cuadro.RData") head(mat_errores_cuadro) mat_errores_cuadro[mat_errores_cuadro[,1]==anho & mat_errores_cuadro[,2]==semana & mat_errores_cuadro[,3]==cuadro, ] <- c(T,"ncol wrong") save(mat_errores_cuadro,file = "Data/Mat_errores_cuadro.RData") # which(mat_errores_cuadro[,3]==cuadro) # astoueh ----------------------------------------------------------------- load(file = "Data/Mat_errores_cuadro.RData") head(mat_errores_cuadro) which(apply(mat_errores_cuadro[,1:3] == c("2019","4","1"),1,all)) idx <- which(apply(mat_errores_cuadro[,1:2] == c(2019,4),1,all)) idx <- which(apply(mat_errores_cuadro[,1:3] == c(2019,4,13.4),1,all)) idx mat_errores_cuadro[idx,] unique(mat_errores_cuadro[,1]) # sotuh ------------------------------------------------------------------- # Works mat_errores_cuadro <- matrix(1:9,3) apply(mat_errores_cuadro[,1:3] == c(1,4,7),1,all) # Correcciones de extraccion de informacion ------------------------------- cat("\014") # borra consola rm(list=ls()) # Borra todo source("Fn_epidemiologic.R") # Variables tabla cuadro <- 3.1; ncol_mat_cuadro <- 11 cuadro <- 4.1 # cuadro <- 3.6; ncol_mat_cuadro <- 11 palabra_clave1 <- paste0("CUADRO ",as.character(cuadro)) palabra_clave2 <- "Aguascalientes" longitud_esperada <- 13 # longitud de numero de columnas longitud_tabla <- 32 # numero de estados remplaza_file <- T semanas <- 2 semana <- 14:20 semanas <- 14:20 anhos <- 2014 anho <- 2016 genera0_imprime1 <- 0 GEN_file_fun_corrige(anho,remplaza_file) # Revisa correcciones de cuadro ------------------------------------------- review_correcciones_a_mano_CUADRO_17 <- function(text_table,semana,anho) { source("Fn_Correcciones_CUADRO_17_anho2016.R") source("Fn_Correcciones_CUADRO_17_anho2015.R") if(anho==2015) text_table <- fun_corrige_CUADRO_17_anho2015(text_table,semana,anho) if(anho==2016) text_table <- fun_corrige_CUADRO_17_anho2016(text_table,semana,anho) return(text_table) } for(anho in anhos) for(semana in semanas) { # Path of pdf pdf_path <- genera_ruta_boletin(anho,semana) cat("Trabajando: ",pdf_path,"\n") # Raw text of pdf PDF <- pdf_to_text(pdf_path) # Extrayendo encabezados text_table <- extrae_text_tabla(PDF,palabra_clave1,palabra_clave2,longitud_tabla) # Correccion cuadro text_table <- correcciones_a_mano_CUADRO_17(text_table,semana,anho) # print(text_table) all_stat_lines <- correccion_espacios_en_lineas(text_table) print(lengths(all_stat_lines)) # Extrayendo encabezados encabezado_table <- extrae_encabezado_tabla(PDF,palabra_clave1,palabra_clave2,longitud_tabla) # print(encabezado_table) } # stoehu ------------------------------------------------------------------ if(F) { df <- plyr::ldply(all_stat_lines) #create a data frame head(df) } # stnheu ------------------------------------------------------------------ # satoeh ------------------------------------------------------------------ cat("\014") # borra consola rm(list=ls()) # Borra todo source("Fn_epidemiologic.R") set.seed(42) cuadro <- 3.1; ncol_mat_cuadro <- 11 cuadro <- 4.1; ncol_mat_cuadro <- ncol_CUADRO_X(cuadro,semana=1,anho=2019) # cuadro <- 3.6; ncol_mat_cuadro <- 11 palabra_clave1 <- paste0("CUADRO ",as.character(cuadro)) # el cuadro 17 enfermedades prevenibles palabra_clave2 <- "Aguascalientes" longitud_tabla <- 32 # numero de estados semanas <- 1:4 semanas <- 33 semanas <- 1:52 # ERROR EN Boletines/2010/sem36.pdf CUADRO 3.1 anhos <- 2012:2019 anhos <- 2017 anhos <- 2017 zero_value_character <- "-" mat_cuadro <- matrix(0,length(anhos)*length(semanas)*longitud_tabla,ncol_mat_cuadro+2) # +2 because the columns year week anho <- anhos[1] semana <- semanas[1] l_main <- list_useful_var(cuadro,anho,semana,anhos,semanas,palabra_clave1,palabra_clave2) idx_mat_cuadro <- 1 for(anho in anhos) for(semana in semanas) { index_key <- find_index_key(anho,semana,BIG_mat_CUADRO) if(length(index_key)!=0) { cat("Datos de anho-semana-cuadro ya obtenidos") } else { (n_col_esperado <- ncol_CUADRO_X(cuadro,semana=1,anho=2019)) # Path of pdf pdf_path <- genera_ruta_boletin(anho,semana) cat("Trabajando: ",pdf_path,"\n") # Raw text of pdf PDF <- pdf_to_text(pdf_path) # Extrayendo encabezados text_table <- extrae_text_tabla(PDF,palabra_clave1,palabra_clave2,longitud_tabla) # print(text_table) text_table <- Correcting_text_table(text_table,l_main) all_stat_lines <- correccion_espacios_en_lineas(text_table) mat_dat <- from_text_to_numeric_matrix(all_stat_lines,anho,semana,longitud_tabla,zero_value_character) BIG_mat_CUADRO <- rbind(BIG_mat_CUADRO,mat_dat) } # mat_cuadro[idx_mat_cuadro:(idx_mat_cuadro+longitud_tabla-1),] <- mat_dat # idx_mat_cuadro <- idx_mat_cuadro + longitud_tabla # print(mat_dat) } (correction_text_cuadro <- gsub('\\.', '_', palabra_clave1)) (text_andos <- paste0(as.character(range(anhos)),collapse = "-")) (name_file_save <- paste0(c("Data/Dat ",correction_text_cuadro," ",text_andos,".RData"),collapse = "")) save(mat_cuadro,anhos,cuadro,ncol_mat_cuadro,palabra_clave1,palabra_clave2,longitud_tabla,file=name_file_save) ncol(mat_dat) ncol(mat_cuadro) # nsotauh -----------------------------------------------------------------
ecbf0c82c1f3ef1e1e9c50536c8a60fd013efae9
2ca96e9d3d4d595682a06de876a8af59113779ba
/cachematrix.R
4f2f41d4e90efc3fbe7cd7aa021061a88336e5fd
[]
no_license
JeffreyGonlin/ProgrammingAssignment2
b6b466f56cca51b79114bfd5715c629201f55047
23fcd47f9c1ced6985dbd2c602b760f8db11976d
refs/heads/master
2021-01-16T18:13:19.747397
2016-02-21T12:04:15
2016-02-21T12:04:15
51,803,404
0
0
null
2016-02-16T02:57:09
2016-02-16T02:57:09
null
UTF-8
R
false
false
4,350
r
cachematrix.R
## Assignment2: ## Write the following functions: ## 1. makeCacheMatrix: ## This function creates a special "matrix" object that can cache its inverse. ## 2. cacheSolve: ## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. ## If the inverse has already been calculated (and the matrix has not changed), ## then the cachesolve should retrieve the inverse from the cache. ## To create special "YOUR_MATRIX", just enter "YOUR_MATRIX<-makeCacheMatrix". ## It gets added to a list, along with its inverse. ## YOUR_MATRIX and its inverse can be called up by entering "YOUR_MATRIX$get()" or(once solved) "YOUR_MATRIX$getinverse" ## Any number of matrices can be added to the list, but name them distinctly (e.g., YOUR_MATRIX2, YOUR_MATRIX3, etc.) makeCacheMatrix <- function(x = matrix()) { nvrs <- NULL ## This first part sets inverse to NULL for a clear start and set <- function(y) { ## sets the value for YOUR_MATRIX<-makeCacheMatrix in the list. xp <<- y ## the value of the matrix and its inverse will be held in the nvrs <<- NULL ## parent environment. The various matrices will not show up as data } ## but rather as Values, as a List of 4: set, get, setinverse, getinverse. get <- function() { x } setinverse <- function(solve) { nvrs<<-solve(x) } getinverse <- function() { nvrs } list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## The "cacheSolve" function below is meant to work with "makeCacheMatrix" above. ## First use "makeCacheMatrix" to cache a matrix and its inverse for later return. ## Then enter "cacheSolve(YOUR_MATRIX)" to calculate the inverese or return the cached inverse, if available. ## Cached inverses are returned without the need to repeat the inversion calculations, thus freeng up resources. cacheSolve <- function(x, ...) { nvrs <- x$getinverse() if (!is.null(nvrs)) { ## Checks to see if the inverse has already been solved and is avaiable. message("getting cached data") return(nvrs) ## If inverse is available, cached solution gets returned. } data <- x$get() ## If not in cache, i.e., this is first time to solve, the matrix is retrieved, nvrs <- solve(data, ...) ## the inverse calculated and then saved to the list for later recall. x$setinverse(nvrs) nvrs } ## Testing: ## Test using different matrices matrix1 <- makeCacheMatrix(matrix(1:4, 2, 2)) cacheSolve(matrix1) matrix2 <- makeCacheMatrix(matrix(4:1, 2, 2)) cacheSolve(matrix2) ## If we want to get either the matrix or its inverse, we can do that. matrix1$get() matrix1$getinverse() ## Now call again for the inverse of the matrices. cacheSolve(matrix1) cacheSolve(matrix2) ## We see (red message) that the cached solution was used. ## As a check, take the inverse of matrix1, call it matrix1i, and see if we get matrix1 in return. matrix1i <- makeCacheMatrix(matrix(c(-2,1,1.5,-.5), 2, 2)) cacheSolve(matrix1i) ## From further testing, I found that matrix multiplication doesn't work: matrix1%*%matrix1i ##bacause the 'matrices' are now lists. class(matrix1) ## However, one can still multiply them by doing the following: matrix1solution <- cacheSolve(matrix1)%*%cacheSolve(matrix1i) matrix1solution ## This shows that matrix1i is indeed the inverse. The solution matrix is class 'matrix'. class(matrix1solution) ## What if the matrix has changed since the time it was originally "made" with makeCacheMatrix? ## If the matrix changes but is still called the same, cacheSolve won't work. Consider the following example: ## matrix1 is multiplied by 10, then cacheSolve(matrix1) is tried. matrix1 <- matrix1$get()*10 cacheSolve(matrix1) ## matrix1 has become a new object, as seen under Data in Environemnt. ## An Error message gets returned: "Error in x$getinverse : $ operator is invalid for atomic vectors" ## So if a matrix gets changed, that matrix needs to become listed via makeCacheMatrix before cacheSolve will work for it.
1a53edc7c4470fdedec7ee565ef7cb1c0d6c8d2d
7f72ac13d08fa64bfd8ac00f44784fef6060fec3
/RGtk2/man/gtkCListColumnTitlePassive.Rd
e4de273a0c443419b05d88acb76601ad15863640
[]
no_license
lawremi/RGtk2
d2412ccedf2d2bc12888618b42486f7e9cceee43
eb315232f75c3bed73bae9584510018293ba6b83
refs/heads/master
2023-03-05T01:13:14.484107
2023-02-25T15:19:06
2023-02-25T15:20:41
2,554,865
14
9
null
2023-02-06T21:28:56
2011-10-11T11:50:22
R
UTF-8
R
false
false
626
rd
gtkCListColumnTitlePassive.Rd
\alias{gtkCListColumnTitlePassive} \name{gtkCListColumnTitlePassive} \title{gtkCListColumnTitlePassive} \description{ Causes the specified column title button to become passive, i.e., does not respond to events, such as the user clicking on it. \strong{WARNING: \code{gtk_clist_column_title_passive} is deprecated and should not be used in newly-written code.} } \usage{gtkCListColumnTitlePassive(object, column)} \arguments{ \item{\verb{object}}{The \code{\link{GtkCList}} to affect.} \item{\verb{column}}{The column to make passive, counting from 0.} } \author{Derived by RGtkGen from GTK+ documentation} \keyword{internal}
733c1b7ad04ba1f653729ef9afeb2c2b7d00bf61
db17cf7919d13b04253a44c18129a652034b7148
/tests/testthat/test-integrate_primitive.R
06f3c93fdb68968796f9bb6bfa12b3503dc21342
[ "MIT" ]
permissive
hericks/KDE
6b286619567ee1bf534c6bb5428dfaddc00330fa
96fe54291e7a3890c41e00a10b5106813403c7c4
refs/heads/master
2022-12-03T10:56:02.986745
2020-08-21T22:00:24
2020-08-21T22:00:24
null
0
0
null
null
null
null
UTF-8
R
false
false
2,685
r
test-integrate_primitive.R
test_that("integrant should be a function",{ not_fun <- "x" expect_error(integrate_primitive(not_fun, -1, 1)) a_fun <- function(x) x+1 expect_error(integrate_primitive(a_fun, -1, 1), NA) }) test_that("support should make sense",{ fun <- rectangular$fun expect_error(integrate_primitive(fun, rectangular$support[1], rectangular$support[2]), NA) expect_error(integrate_primitive(fun, 1, -1)) expect_error(integrate_primitive(fun, "-1", 1)) expect_error(integrate_primitive(fun, -1, "-1")) expect_error(integrate_primitive(fun, c(-1,0), 1)) expect_error(integrate_primitive(fun, -1, c(1,2144))) }) test_that("support should be finite",{ fun <- rectangular$fun expect_error(integrate_primitive(fun, -Inf, -1)) expect_error(integrate_primitive(fun, -1, Inf)) }) test_that("subdivisions should be numeric value", { fun <- rectangular$fun expect_error(integrate_primitive(fun, -1, 1, subdivisions=1000L),NA) expect_error(integrate_primitive(fun, -1, 1, subdivisions=1000.2342),NA) expect_error(integrate_primitive(fun, -1, 1, subdivisions="1000")) expect_error(integrate_primitive(fun, -1, 1, subdivisions=c(100, 10))) }) test_that("non integrable functions will not converge with more subdivisions",{ non_integrable_fun <- function(x) 1/x lower <- 0 upper <- 1 rel_error <- c() for(i in 1:6){ subdivisions <- 10^i rel_error <- c(rel_error,integrate_primitive(non_integrable_fun, 0, 1, subdivisions, check=TRUE)$relError) } # rel_error does get smaller using more subdivision steps, but it will not get sufficiently small expect_error(stopifnot(res[length(rel_error)] < 0.01)) }) test_that("integrating integrable functions will converge",{ convergence <- function(x){ for(i in seq_along(x)){ for(j in (i:length(x))){ if(x[i] < x[j] && i != j) return(FALSE) } } return(TRUE) } fun <- function(x) x^2 + 2 lower <- 0 upper <- 1 res <- c() for(i in 1:5){ subdivisions <- 10^i res <- c(res,integrate_primitive(fun, 0, 1, subdivisions, check=TRUE)$value) } true_value <- 2+1/3 expect_true(convergence(abs(res - true_value))) }) test_that("rel_error has to be boolean value",{ fun <- function(x) 1/x lower <- 0 upper <- 1 subdivisions <- 1000L res <- c() expect_error(integrate_primitive(fun, lower, upper, subdivisions, check=TRUE), NA) expect_error(integrate_primitive(fun, lower, upper, subdivisions, check=c(TRUE, FALSE))) expect_error(integrate_primitive(fun, lower, upper, subdivisions, check="TRUE")) expect_equal(integrate_primitive(fun, lower, upper, subdivisions, check=FALSE)$relError, NULL) })
e10c548f3c0f19cb7b7863df8679438a4959434a
e3c46e15b16f7097c1839dcea45c4a7834062cc2
/SVM traincode.R
ec9716d6d88a7a1bab4fe26ee1e49e171b206be2
[]
no_license
jaychoi4830/SMRIbasemodels
3ae17ac190d4c04730d56c29e69566c91e1a663d
15e4323bfea40fa7c32a1cbce88a874ee3161efb
refs/heads/main
2023-03-27T20:24:41.405988
2021-03-25T20:21:41
2021-03-25T20:21:41
351,565,090
0
0
null
null
null
null
UTF-8
R
false
false
2,437
r
SVM traincode.R
require('e1071') require(caret) require(kernlab) library(e1071) library(caret) library(kernlab) library(pROC) ##SVM Linear Kernal tuning: set.seed(00100) linear.tune <- tune.svm(Labels~., data = traindt1, kernel = "linear", cost = c(0.001,0.01,0.1,1,5,10)) summary(linear.tune) #list of model performance based on 10-fold CV on different cost levels best.linearsvm <- linear.tune$best.model lineartune.test = predict(best.linearsvm, newdata=testdt) table(lineartune.test, testdt$Labels) ##then caculate accuracy by = (TN+TP)/(TN+TP+FN+FP) ##SVM Radial Kernal tuning: set.seed(00101) radial.tune <- tune.svm(Labels~., data = traindt1, kernal = "radial", gamma = c(0.1, 0.3, 0.5, 1, 2, 3, 4, 5), cost = c(0.001, 0.01,0.1, 0.5, 1, 10, 30, 50, 70, 100)) summary(radial.tune) best.radialsvm <- radial.tune$best.model radialtune.test = predict(best.radialsvm, newdata = testdt) table(radialtune.test, testdt$Labels) ## calculate accuracy ##SVM Polynomial Kernal tuning: set.seed(00110) poly.tune <- tune.svm(Labels~., data = traindt1, kernal = "polynomial", degree = c(3,4,5), coef0 = c(0.1,0.5,1,2,3,4,5)) summary(poly.tune) best.polysvm <- poly.tune$best.model polytune.test <- predict(best.polysvm, newdata=testdt) table(polytune.test, testdt$Labels) ## calculate accuracy set.seed(01110) sig.tune <- tune.svm(Labels~., data=traindt1, kernal ="sigmoid", gamma = c(0.1,0.3,0.5,1,2,3,4,5), coef0 = c(0.1,0.5,1,2,3,4,5)) summary(sig.tune) best.sigsvm <- sig.tune$best.model sigtune.test <- predict(best.sigsvm, newdata = testdt) table(sigtune.test, testdt$Labels) ## calculate accuracy ## Confusion Matrix confusionMatrix(sigtune.test, test$labels, positive = "1") #change test ## K-Fold Cross-validation folds <- createFolds(traindt1$Labels, k =5) svm.cv = lapply(folds, function(x){ training_fold = traindt[-x,] test_fold = traindt[x,] classifier = best.radialsvm cv_pred = predict(classifier, newdata = test_fold[-1]) cm = table(test_fold[, 1], cv_pred) accuracy = (cm[1,1] + cm[2,2])/(cm[1,1]+cm[1,2]+cm[2,1]+cm[2,2]) return(accuracy) }) #mean of accuracy of k-fold cv accuracy_cv = mean(as.numeric(svm.cv)) accuracy_cv
6589dee7ad95c0e7e784fdb4ad092c81b25a711a
7a95abd73d1ab9826e7f2bd7762f31c98bd0274f
/netrankr/inst/testfiles/checkPairs/libFuzzer_checkPairs/checkPairs_valgrind_files/1612798874-test.R
0c071e9ed6328f7e845d6af29dbffafec5f5e86a
[]
no_license
akhikolla/updatedatatype-list3
536d4e126d14ffb84bb655b8551ed5bc9b16d2c5
d1505cabc5bea8badb599bf1ed44efad5306636c
refs/heads/master
2023-03-25T09:44:15.112369
2021-03-20T15:57:10
2021-03-20T15:57:10
349,770,001
0
0
null
null
null
null
UTF-8
R
false
false
897
r
1612798874-test.R
testlist <- list(x = c(NaN, 2.12196353786585e-314, Inf, 1.39068914879695e-309, -5.49817854063981e+303, 2.78542174921362e+180, 6.08113124126605e-13, NaN, -5.82900682303162e+303, 3.24244610965235e-05, -1.27733779809297e+294, 1.08435642727488e-311, NaN, NaN, 9.56817916472489e-303, NaN, 2.12196366434665e-314, NaN, NaN, 2.78231844119784e-309, 1.65436122203656e-24, NaN, NaN, NaN, NaN, 2.04114723650203e-317, NaN, 1.73833895195875e-307, NaN, 3.56011817360252e-307, 1.82359013286078e-314, NaN, 4.77772868874966e-299, 0), y = c(6.48841590713934e-319, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) result <- do.call(netrankr:::checkPairs,testlist) str(result)
72ad34d0184f48a14555a84223f27d462f526d1e
abc067dec9d3193de799af17da412abc2c4346ed
/plot1.R
a7301e8f55d763b748502d1529e5bfe8773772aa
[]
no_license
hungerpangg/Exploratory_Data_Analysis_Project1
d555554c2448b41924948f74a0e7388ca82cf798
adec65930c17796819fb17e6b987cba7a137c4b8
refs/heads/master
2022-12-02T15:44:52.171704
2020-08-12T16:56:47
2020-08-12T16:56:47
287,066,972
0
0
null
null
null
null
UTF-8
R
false
false
714
r
plot1.R
hpc <- read.table("household_power_consumption.txt", sep = ";", header = TRUE, na.strings = "?") #1 #hpc <- hpc[complete.cases(hpc),] hpc$Date <- as.Date(hpc$Date, format = "%d/%m/%Y") hpc2 <- subset(hpc, Date == "2007-02-01" | Date == "2007-02-02") hist(hpc2$Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)") dev.copy(png, file = "plot1.png") dev.off() #2 hpc2 <- hpc2 %>% mutate(datetime = paste(Date, Time)) hpc2$datetime <- strptime(hpc2$datetime, format = "%Y-%m-%d %H:%M:%S") plot(hpc2$datetime, hpc2$Global_active_power, type = "s", xlab = "", ylab = "Global Active Power (kilowatts)") dev.copy(png, file = "plot2.png") dev.off()
320edc6dec7c2f2da9c906211bf70d59d3151374
fca927c08250034c15f3dd3c55251c9f7d48d00a
/pkg/man/shiny.app.Rd
fc5ff0a0e6e8607615be547253ef083352774e32
[]
no_license
rn1x0n/SwissMortgage
a773b9216d251d994ce08ba8a66ab5308e698663
4042797aa95a843020416bdaa19d6f057dd5654c
refs/heads/master
2021-01-13T01:57:52.629092
2013-06-10T20:37:10
2013-06-10T20:37:10
null
0
0
null
null
null
null
UTF-8
R
false
false
314
rd
shiny.app.Rd
\name{shiny.app} \alias{shiny.app} \title{Run Shiny application} \usage{ shiny.app() } \description{ Runs a Swiss Mortgage Calculator Shiny application. This function normally does not return; interrupt R to stop the application (usually by pressing Ctrl+C or Esc). } \examples{ \dontrun{ shiny.app() } }
970dc404183241b97fdf6a192022b72f1fe7ea3c
2e6d94697d4349c2b9f4569acc70d5379fd980d7
/d_prepareBase.R
57461bb77a3e0050fd5b3007efae15dfebc904af
[]
no_license
plusy/TitanicLearningR
d5ea50f34cb190376cf7f5e2f10ff6612fae1fdf
18480b9ce50307b660e9c5605df79976f13fb9b4
refs/heads/master
2022-02-26T05:57:45.266933
2019-07-29T04:41:53
2019-07-29T04:41:53
107,499,232
0
0
null
null
null
null
UTF-8
R
false
false
7,083
r
d_prepareBase.R
.VAR_BEFORE <- ls() # Descritpion from https://github.com/IamGianluca/titanic # This is an analysis based on the data made available by Kaggle # (www.kaggle.com) on the Titanic distaster competition. This work aims to # predict what sorts of people were more likely to survive. # # On the folder text you can find the R Markdown file to generate a pdf # version of the final report, which summarises the key findings of our # reseach. # # On the folder R code you can find two sub-folders. Raw scripts # contains all tests we conducted and it is not intended to be a place # where find perfectly formatted and optimised R code. Whereas final # scripts include the final version of R code we wrote to conduct # our analysis. We advice you to give it a read to have a better # overview of how the analysis has been conducted. # # Data contains both the raw dataset we downloaded from Kaggle.com and # a tiny dataset with variables transformed in the format we used when # conducting our analysis. # # # VARIABLES DESCRIPTION: # # survival Survival # (0 = No; 1 = Yes) # pclass Passenger Class # (1 = 1st; 2 = 2nd; 3 = 3rd) # name Name # sex Sex # age Age # sibsp Number of Siblings/Spouses Aboard # parch Number of Parents/Children Aboard # ticket Ticket Number # fare Passenger Fare # cabin Cabin # embarked Port of Embarkation # (C = Cherbourg; Q = Queenstown; S = Southampton) # # # SPECIAL NOTES: # # Pclass is a proxy for socio-economic status (SES) # 1st ~ Upper; 2nd ~ Middle; 3rd ~ Lower # # Age is in Years; Fractional if Age less than One (1) # If the Age is Estimated, it is in the form xx.5 # # With respect to the family relation variables (i.e. sibsp and parch) # some relations were ignored. The following are the definitions used # for sibsp and parch. # # Sibling: Brother, Sister, Stepbrother, or Stepsister of Passenger Aboard Titanic # Spouse: Husband or Wife of Passenger Aboard Titanic (Mistresses and Fiances Ignored) # Parent: Mother or Father of Passenger Aboard Titanic # Child: Son, Daughter, Stepson, or Stepdaughter of Passenger Aboard Titanic # # Other family relatives excluded from this study include cousins, # nephews/nieces, aunts/uncles, and in-laws. Some children travelled # only with a nanny, therefore parch=0 for them. As well, some # travelled with very close friends or neighbors in a village, however, # the definitions do not support such relations. # Based on this post: http://wiekvoet.blogspot.com/2015/08/predicting-titanic-deaths-on-kaggle-iv.html # Load raw data ----------------------------------------------------------- df_RawInput_Train <- read_csv(FILE_SRC_DATA_RAW_TRAIN) df_RawInput_Train$DataTag <- DATA_TAG_TRAIN df_RawInput_Test <- read_csv(FILE_SRC_DATA_RAW_TEST) df_RawInput_Test$DataTag <- DATA_TAG_TEST df_RawInput_Test$Survived <- NA df_RawInput <- rbind(df_RawInput_Train,df_RawInput_Test) df_working <- df_RawInput # Factorize ----------------------------------------------------------------- colList <- c('PassengerId', 'Pclass', 'Survived', 'Sex') df_working[colList] <- lapply(df_working[colList], factor) #Embarked ---------------------------------------------------------- df_working%>% mutate(Embarked=factor(coalesce(Embarked,"N/A"))) -> df_working #Fare ---------------------------------------------------------- MadiaOfAll <- median(df_working$Fare,na.rm=TRUE) MadiaOfTrain <- median(df_working$Fare[df_working$DataTag==DATA_TAG_TRAIN],na.rm=TRUE) df_working%>% mutate(Fare_Adj_MedianOfAll=coalesce(Fare, MadiaOfAll), Fare_Adj_MedianOfTrain=coalesce(Fare, MadiaOfTrain)) -> df_working #Cabin ---------------------------------------------------------- df_working%>% mutate(Cabin = coalesce(Cabin, "")) -> df_working for(cab in LETTERS[1:7]){ # 1:7 -> A:G df_working[paste0("Cabin_",cab)] <- factor(grepl(cab,df_working$Cabin)) } df_working%>% mutate(Cabin_Length = nchar(Cabin), Cabin_Section = factor(if_else(Cabin_Length==0, 0, str_count(df_working$Cabin, ' ') + 1))) -> df_working df_working%>% separate(Cabin, sep = " ", into = c("Cabin_Number"), extra = "drop", remove = FALSE) %>% mutate(Cabin_Number = coalesce(as.integer(str_replace(Cabin_Number, "[[:alpha:]]", "")), as.integer(0))) %>% mutate(Cabin_NumberOE = factor(if_else(is.na(Cabin_Number), -1, Cabin_Number%%2))) -> df_working #Name ---------------------------------------------------------- df_working%>% #Name_Title = sapply(Name,function(x) strsplit(as.character(x),'[.,]')[[1]][2]), separate(Name, sep = '[.,]', into = c("Name_Last", "Name_Title", "Name_First"), extra = "merge", remove = FALSE) %>% mutate(Name_TitleSimple = str_trim(Name_Title), Name_TitleSimple = ifelse(Name_TitleSimple %in% c('Capt','Col','Don','Sir','Jonkheer','Major'), 'Mr', ifelse(Name_TitleSimple %in% c('Lady','Ms','the Countess','Mlle','Mme','Ms','Dona'), 'Miss', Name_TitleSimple)), Name_TitleSimple = factor(Name_TitleSimple), Name_Title = factor(Name_Title)) -> df_working #Ticket ---------------------------------------------------------- df_working%>% mutate(Ticket_PC = factor(grepl('PC',Ticket)), Ticket_STON = factor(grepl('STON',Ticket))) -> df_working #Age ---------------------------------------------------------- df_working%>% mutate(Age_ImputeBy35 = coalesce(Age, 35)) %>% mutate(Age_ImputeBy35_Bin=cut(Age_ImputeBy35, AGE_BIN_CUTOFF))-> df_working # Save clean data --------------------------------------------------------- saveRDS(df_working, file = FILE_CLEAN_DATA_BASE_RDS) write_csv(df_working, FILE_CLEAN_DATA_BASE_CSV) # Prepare ground Truth ---------------------------------------------------- # Groud truth data from http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic3.csv # Two dup name: Kelly, Mr. James and Connolly, Miss. Kate read_csv(FILE_SRC_DATA_RAW_TRUTH) %>% select(name, survived, ticket) %>% mutate(name = str_replace_all(name, '\"', '')) -> df_GroundTruth df_working %>% mutate(Name = str_replace_all(Name, '\"', '')) %>% select(Name, Ticket, PassengerId, DataTag) %>% inner_join(df_GroundTruth, by = c("Name"="name", "Ticket"="ticket" )) %>% select(PassengerId, GTSurvived = survived, Name, Ticket, DataTag) -> df_GroundTruth # if inner join found mismatch stopifnot(nrow(df_GroundTruth)==nrow(df_working)) write_csv(df_GroundTruth, FILE_CLEAN_DATA_GROUND_TRUTH) # Clean up ---------------------------------------------------------------- rm(list = setdiff(ls(), .VAR_BEFORE)) rm(.VAR_BEFORE)
c627f8034d6d30a396bb9e9d45aef6fecb44d0cc
92b0b62ce2a42cf733f3c564d2a7306a86662ba7
/plot2.r
c693d36e13491f66475293da97bf18093e343c6c
[]
no_license
naboldyrev/EDA
1b2c9d3f4a4485d236f7496c1e53a46560133207
9250f4b5378b32c99736161ef0b574993024d7b1
refs/heads/master
2021-01-22T08:06:55.770065
2016-09-19T09:34:26
2016-09-19T09:34:26
68,553,982
0
0
null
null
null
null
UTF-8
R
false
false
1,467
r
plot2.r
#column types colClss <- c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric") #reading the data directly from the host, interpreting '?' as Na's zip_file <- paste0(getwd(), '/household_power_consumption.zip') download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile = zip_file, method = "curl") unzip(zip_file) consumption <- read.csv("data/household_power_consumption.txt", colClasses = colClss, sep = ';', na.strings=c("?"), nrows=2075259) #concatenating date and time for plots 2-4 consumption$full_date = paste(consumption$Date, consumption$Time, sep=" ") consumption$full_date = strptime(consumption$full_date, "%d/%m/%Y %H:%M:%S") #cast the data types consumption$Date = as.Date(consumption$Date, "%d/%m/%Y") consumption$Time = as.Date(consumption$Time, "%H:%M:%S") #constraints for the dates startDate = as.Date("2007-02-01", "%Y-%m-%d") endDate = as.Date("2007-02-02", "%Y-%m-%d") #select subset of data between the startDate and endDate needed_consumption <- subset(consumption, (Date >= startDate & Date <= endDate)) #plot png(file = "plot2.png", width=480, height=480) with(needed_consumption, plot(full_date, Global_active_power, type = "l", ylab="Global Active Power (Kilowatt)", xlab = "")) dev.off()
e7266be4c8c6d40ae8f33ce20202d90fed888562
7ec0c3344e1bffcaa1c7e7380b0618c6995c73ac
/scripts/08_compile-HTML.r
609d327a0319c6df627234a02ff5aee213917f91
[]
no_license
jennybc/vanNH
03afe89da5ff6315727b6cf692076d5ae13b7794
52d4c2302fd17cddc944402738ef5d781a1ab204
refs/heads/master
2020-06-02T06:25:45.305171
2015-06-09T06:34:45
2015-06-09T06:34:45
18,771,831
2
1
null
2014-04-16T06:30:54
2014-04-14T18:36:08
Python
UTF-8
R
false
false
1,976
r
08_compile-HTML.r
#!/usr/bin/Rscript ## this script has two purposes ## 1 command line argument processing -- specifically getting the game ## 2 knitting vanNH-nowPlaying.rmd to HTML ## note that purpose #2 can be achieved through a target like this in a Makefile: # web: vanNH-nowPlaying.rmd # Rscript -e "library(knitr); knit2html('vanNH-nowPlaying.rmd')" ## but purpose #1 is not easy to address that way; hence this script library(rmarkdown) options <- commandArgs(trailingOnly = TRUE) if(length(options) < 1) { #game <- "2014-04-12_vanNH-at-pdxST" #game <- "2014-04-20_sfoDF-at-vanNH" #game <- "2014-05-10_seaRM-at-vanNH" #game <- "2014-05-24_pdxST-at-vanNH" #game <- "2014-05-31_vanNH-at-seaRM" #game <- "2014-06-07_seaRM-at-vanNH" #game <- "2014-05-03_sfoDF-at-pdxST" #game <- "2014-07-19_vanNH-at-wdcCT" game <- "2015-05-23_vanNH-at-wdcCT" } else { game <- options[1] } local_html_file <- paste0(game, "_live-stats.html") local_md_file <- paste0(game, "_live-stats.md") local_figure_dir <- file.path(paste0(game, "_live-stats_files"), "figure-html") render('09_make-live-stats.rmd', output_file = local_html_file, quiet = TRUE) game_html_dir <- file.path("..", "games", game, "09_html") game_figure_dir <- file.path(game_html_dir, paste0(game, "_live-stats_files"), "figure-html") if(!file.exists(game_html_dir)) dir.create(game_html_dir) if(!file.exists(game_figure_dir)) { dir.create(game_figure_dir, recursive = TRUE) } else { foo <- file.remove(list.files(game_figure_dir, full.names = TRUE)) } game_md_file <- file.path(game_html_dir, paste0(game, "_live-stats.md")) game_html_file <- file.path(game_html_dir, paste0(game, "_live-stats.html")) foo <- file.rename(local_md_file, game_md_file) #message("wrote ", game_md_file) foo <- file.rename(local_html_file, game_html_file) #message("wrote ", game_html_file) foo <- file.rename(local_figure_dir, game_figure_dir) foo <- file.remove(dirname(local_figure_dir))
9066a8db9d90ecacb4a04553cc4574a4faaa1e15
65553a50702fb30c40ec78f75ec305a8fab298f8
/R/getNeighborhood.R
9e588fff07fc150e52948f4fb4f5d42ff86b062d
[]
no_license
lzcyzm/Guitar
e22f1b9a34952b48b219a5bfcb8a91a9d1283408
cb1e1146c86ce8cf9caff505d1b22dca2bc37a3e
refs/heads/master
2021-01-10T07:42:46.819016
2015-10-22T02:55:25
2015-10-22T02:55:25
44,791,600
1
0
null
2015-10-23T04:54:56
2015-10-23T04:54:56
null
UTF-8
R
false
false
1,536
r
getNeighborhood.R
getNeighborhood <- function(comp, side=5, Width=1000) { # transcript info tx_name <- names(comp) granges <- unlist(comp) tx <- granges[tx_name] strand <- as.character(as.data.frame(strand(tx))[[1]]) chr <- as.character(as.data.frame(seqnames(tx))[[1]]) if (side == 5) { checkpoints <- rep(1,length(tx)) checkpoints_transcript <- GRanges(seqnames=tx_name, IRanges(start=checkpoints, end=checkpoints, names=tx_name), strand=strand) # convert to genomic coordinates checkPoints_genomic <- mapFromTranscripts(checkpoints_transcript, comp) # resize checkRegion_genomic <- resize(x=checkPoints_genomic, width=Width, fix="end") } else if (side == 3) { checkpoints <- sum(width(comp)) checkpoints_transcript <- GRanges(seqnames=tx_name, IRanges(start=checkpoints, end=checkpoints, names=tx_name), strand=strand) # convert to genomic coordinates checkPoints_genomic <- mapFromTranscripts(checkpoints_transcript, comp) # resize checkRegion_genomic <- resize(x=checkPoints_genomic, width=Width, fix="start") } # convert to list names(checkRegion_genomic) <- rep("",length(tx)) sidelist <- split(checkRegion_genomic, tx_name, drop=TRUE) sidelist <- sidelist[tx_name] mapped_chr <- as.character(as.data.frame(seqnames(checkRegion_genomic))[[1]]) mcols(sidelist) <- data.frame(mapped_chr) return(sidelist) }
965d40f66cb6bacb0e24e09bac1e62d061321e78
d165abb74a6c882f89cbacc021ae78da018ea38f
/plot_MAF.R
e09d9c183a14c0815531362167fd8e977e1d8c28
[]
no_license
bkiranmayee/Scripts
4fec4f2620b6f86bddb618812e169fa77c9c1c90
03c2169956fabc154d9f2d976a76aa5344b56137
refs/heads/master
2020-04-25T17:48:21.074457
2019-04-17T16:27:22
2019-04-17T16:27:22
172,961,784
0
0
null
null
null
null
UTF-8
R
false
false
1,029
r
plot_MAF.R
#!/usr/bin/env Rscript #Date:June 8 2018 #Author:Kiranmayee Bakshy # A program to plot densities of MAF/fraction missing per variant of 2 datasets (designed for plink output files like file.frq and file.lmiss) # The input and output files have to be passed as arguments to this program # Input = 2 stat files and output = fileplot.pdf args = commandArgs(trailingOnly=TRUE) # test if there is at least one argument: if not, return an error if (length(args)==0) { stop("At least two arguments must be supplied (input file).\n", call.=FALSE) } else if (length(args)==2) { # default output file args[3] = "out.pdf" } library(ggplot2) library(reshape2) combined<-readRDS(args[1]) filtered<-readRDS(args[2]) pdf(file=args[3], onefile=T, paper='A4r') plot(density(combined[[2]], na.rm=T), col="blue", border="black", main="Density plot", xlab=names(filtered[2])) lines(density(filtered[[2]], na.rm=T), col="red", border="black") legend("top", c("Combined","Filtered"), lty = c(1,1), col = c("blue","red")) dev.off()
f2cfcbe3b71cff17006ec8d6835ab7a8deb873dd
9dc3d2ba0bc6ee5cea5b9d1ea1d4ca9296acf103
/pract_5.R
aa7a6a4f2f58dca8ee28fdf62235e7a124977bb3
[]
no_license
purva-d/SNA
b043e272c7e4c22d92c67ce2c6f35668babc7f7b
f1ec545ce14504139f672ce2acf339ff93c5038c
refs/heads/master
2022-12-28T02:51:23.765426
2020-10-15T17:51:25
2020-10-15T17:51:25
290,763,545
0
0
null
null
null
null
UTF-8
R
false
false
326
r
pract_5.R
##A network as a graph library(igraph) ng<-graph.formula(Garth-+Angela,Bill-+Elena, Elena++Frank,Carol-+Andy,Carol-+Elena,Carol++Dan, Carol++Bill,Dan++Bill, Elena-+Daphne,Daphne++Garth) plot(ng) ##A network as a matrix get.adjacency(ng) ##A network as an edge list E(ng)
6a16c074e2f3d819993babe89845e117279dfb6b
dc35bc08b2d9eebf783d0a1445d3120b8d83494d
/man/annotate_df.Rd
c4d904d5030afa427f0afd5f29fd2c6dbb7c7d98
[ "MIT" ]
permissive
hbc/CHBUtils
c83272fd84922fc6fa7e5d34e42b2199552afd5c
a226ceeced2353ee2c7ba2eb22f52bf90c023898
refs/heads/master
2021-01-15T15:27:14.308614
2017-10-26T18:01:19
2017-10-26T18:01:19
14,430,820
2
5
null
2017-10-26T18:01:20
2013-11-15T17:35:32
R
UTF-8
R
false
true
1,128
rd
annotate_df.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/annotate_df.R \name{annotate_df} \alias{annotate_df} \title{Annotate Dataframe containing Ensembl IDs with Gene Symbols and Descriptions} \usage{ annotate_df(df, df_ensemblid_header, biomart_ensembl_dataset, biomart_ensemblid_filter, biomart_genesymbol_attribute) } \arguments{ \item{df}{dataframe, required} \item{df_ensemblid_header, }{character string containing the name of the dataframe column with ensembl ids, required} \item{biomart_ensembl_dataset, }{character string describing the biomart dataset to use, required} \item{biomart_ensemblid_filter, }{character string describing the biomart ensembl id filter to use, required} \item{biomart_genesymbol_attribute, }{character string describing the biomart gene symbol attribute to obtain, required} } \value{ annotated dataframe } \description{ Annotate Dataframe containing Ensembl IDs with Gene Symbols and Descriptions } \examples{ annotate_df(temp, "id", 'mmusculus_gene_ensembl', "ensembl_gene_id", "mgi_symbol") } \seealso{ \code{\link{biomaRt}} used to annotate the dataframe }
9327d57159b7f33f7336c222e1aaa470ce2d5f2d
616f19c9ecab3330a70d7378f5f73bbbc84ee071
/cpp/ctrlbars/ctlbmac.r
e688d65bd72a8c908917deee76f0c2c4966c0797
[]
no_license
rjking58/development
07dd809da012f736d82fde79c051cc2df328e6b7
9566648c6ecfc8b8cb6d166912dd674346da246b
refs/heads/master
2021-10-11T12:25:31.025767
2021-10-01T16:41:06
2021-10-01T16:41:06
14,648,819
0
3
null
null
null
null
UTF-8
R
false
false
3,174
r
ctlbmac.r
#include "mrc\types.r" #include "mrc\balloons.r" #include "resource.h" #include "systypes.r" ///////////////////////////////////////////////////////////////////////// // WLM resources #include "ftab.r" ///////////////////////////////////////////////////////////////////////// // MFC resources #include "afxaete.r" ///////////////////////////////////////////////////////////////////////// // Code fragment resource #include "CodeFrag.r" ///////////////////////////////////////////////////////////////////////// // CtrlBars resources resource 'SIZE' (-1) { reserved, acceptSuspendResumeEvents, reserved, canBackground, doesActivateOnFGSwitch, backgroundAndForeground, dontGetFrontClicks, ignoreAppDiedEvents, is32BitCompatible, isHighLevelEventAware, localAndRemoteHLEvents, notStationeryAware, dontUseTextEditServices, reserved, reserved, reserved, #ifdef _MPPC_ 2500 * 1024, 2500 * 1024 #else // 68K Mac #ifdef _DEBUG 3000 * 1024, 3000 * 1024 #else 2000 * 1024, 2000 * 1024 #endif #endif }; resource 'vers' (1) { 0x01, 0x00, final, 0x00, verUS, "1.0", "ControlBars 1.0, Copyright \251 Microsoft Corp. 1994-1997" }; resource 'BNDL' (128) { 'CTLB', 0, { 'FREF', { 0, 128 }, 'ICN#', { 0, IDR_MAINFRAME } } }; type 'CTLB' as 'STR '; resource 'CTLB' (0) { "ControlBars 1.0 Copyright \251 1994-1997 Microsoft Corp." }; resource 'FREF' (128) { 'APPL', 0, "" }; /* Balloon help resources */ resource 'hfdr' (-5696) { HelpMgrVersion, hmDefaultOptions, 0, 0, { HMSTRResItem {500} } }; resource 'hovr' (1000) { HelpMgrVersion, hmDefaultOptions, 0, 0, HMStringItem /* missing items override */ { "Miscellaneous part of the Microsoft ControlBars " "Sample Application." }, { HMSkipItem {}, /* title bar */ HMSkipItem {}, /* reserved. always skip item here */ HMStringItem /* close box */ { "Click here to close the Microsoft ControlBars " "Sample Application." }, HMStringItem /* zoom box */ { "Click here to Zoom In or Zoom Out." }, HMSkipItem {}, /* active app's inactive window */ HMStringItem /* inactive app's window */ { "This is not part of the Microsoft ControlBars " "Application. It may be part of the Apple " "Finder, or some other application." }, HMSkipItem {} /* outside modal dialog */ } }; #ifdef _MPPC_ resource 'STR ' (500) { "This is the Win32 ControlBars sample application " "ported to the Power Macintosh using Microsoft VC++ " "Edition for the Apple Power Macintosh" }; #else // 68K Mac resource 'STR ' (500) { "This is the Win32 ControlBars sample application " "ported to the Macintosh using Microsoft VC++ Edition " "for the Apple Macintosh" }; #endif #ifdef _MPPC_ resource 'cfrg' (0) { { kPowerPC, kFullLib, kNoVersionNum,kNoVersionNum, 0, 0, kIsApp,kOnDiskFlat,kZeroOffset,kWholeFork, "" } }; #endif
43c95a99331a5fb54d504a2d377c7a26ca78e983
0a906cf8b1b7da2aea87de958e3662870df49727
/grattan/inst/testfiles/IncomeTax/libFuzzer_IncomeTax/IncomeTax_valgrind_files/1610125576-test.R
a21db92b3fd58fd2d3dce1d4b80e0b53990f6b67
[]
no_license
akhikolla/updated-only-Issues
a85c887f0e1aae8a8dc358717d55b21678d04660
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
refs/heads/master
2023-04-13T08:22:15.699449
2021-04-21T16:25:35
2021-04-21T16:25:35
360,232,775
0
0
null
null
null
null
UTF-8
R
false
false
263
r
1610125576-test.R
testlist <- list(rates = numeric(0), thresholds = c(4.48309464024909e-120, 4.48309464024909e-120, 4.48309464024909e-120, 1.3906710913539e-309, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x = 5.0758836746313e-116) result <- do.call(grattan::IncomeTax,testlist) str(result)
826dffb2754a71403f8bdd1b8328e24b2cac2ca2
bdda050a3713f1ac5b996c2f5c600daecbad920b
/man/convertFlowFrame.Rd
b96b47452f92bdc58690b1a33259fbb983ace2d6
[ "MIT" ]
permissive
astrolabediagnostics/orloj
dd466acb293447b33426a22766770323b50fee99
f24b7ef7710e4ba3adaf0d615238bfcd8fe46380
refs/heads/master
2021-06-04T16:20:04.772433
2021-05-19T14:14:23
2021-05-19T14:14:23
110,556,030
5
3
MIT
2020-08-09T14:17:33
2017-11-13T14:07:22
R
UTF-8
R
false
true
892
rd
convertFlowFrame.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fcs.R \name{convertFlowFrame} \alias{convertFlowFrame} \title{Convert a FlowCore flow_frame to Astrolabe sample.} \usage{ convertFlowFrame(experiment, filename, flow_frame) } \arguments{ \item{experiment}{An Astrolabe experiment.} \item{filename}{The name of the FCS file to import.} \item{flow_frame}{FlowCore flow frame.} } \value{ FCS data, in orloj internal FCS list format. } \description{ Convert the flow_frame class into orloj's internal FCS list format. } \details{ The orloj FCS list format will accumulate more fields as analyses are applied to it. For example, pre-processing will add a mask to find the non-bead indices. You can use \code{\link{fcsExprs}} the get the expression data after applying all of the masks that are in the list. } \seealso{ \code{\link{isSample}}, \code{\link{fcsExprs}} }
1252907dc008f1f69d546b324a97245af4263fdf
b28f74d681bb5dfbf34549c82a8c932f77c1b0a8
/man/aggrNums.Rd
c23fe3d05194b0f0f06d74ee179a2a38072175cd
[ "MIT" ]
permissive
sailfish009/proteoQ
b07e179e9fe27a90fd76cde2ed7caa55e793e9d6
e6a4fe79a21f9a9106a35d78c2ce42d59e9d82e2
refs/heads/master
2022-12-25T20:06:40.340740
2020-10-15T20:18:14
2020-10-15T20:18:14
null
0
0
null
null
null
null
UTF-8
R
false
true
477
rd
aggrNums.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{aggrNums} \alias{aggrNums} \title{Summarizes numeric values} \usage{ aggrNums(f) } \arguments{ \item{f}{A function for data summarization.} } \description{ \code{aggrNums} summarizes \code{log2FC} and \code{intensity} by the descriptive statistics of \code{c("mean", "median", "weighted.mean", "top.3")} } \examples{ \donttest{df_num <- aggrNums(median)(df, prot_acc, na.rm = TRUE)} }
ad38a2611a9311087731b87532e364dbe1b1bf8a
9cf86927ef16f81fb62f9661134a3fc82fe34a00
/Scripts/Script3.R
e7cccd972e5c19a926555b2876bfd1cc99070dc6
[]
no_license
esorolla/RepData_PeerAssessment1
97e14d70a90b2cc404323967482099b6a491388d
53436253f7633804ddab5faf88c9f9fea6f9349e
refs/heads/master
2022-11-24T04:31:21.973535
2020-07-15T19:09:09
2020-07-15T19:09:09
279,573,724
0
0
null
2020-07-14T12:04:08
2020-07-14T12:04:08
null
UTF-8
R
false
false
533
r
Script3.R
## We aggregate (or group by) the minute intervals of the data set and we ## evaluate the mean value across the days: timeSeries <- aggregate(data$steps, by=list(interval = data$interval), FUN=mean) ## We plot the requested time series: plot(timeSeries$interval, timeSeries$x, type = "l", xlab = "interval (minutes)", ylab = "average number of steps") ## We calculate the minute interval where the average number of steps across ## the days is maximum: reqInterval <- timeSeries$interval[which(timeSeries$x == max(timeSeries$x))]
28dd600753a2fa036e271b998dae55f8b11c846e
902037115141ead7b315e7b63e437ec61c01c2c1
/R/removeRedundancy.R
e1b922594b612c39c210be9e905d21de092ecfcd
[]
no_license
cran/scrime
4bdc7e989ba9e648d004ca47cd2d10bb5e78a717
cf0033dbfe2a6fa807593a460ef4bcb0931db96a
refs/heads/master
2021-06-02T21:50:17.706604
2018-12-01T10:00:03
2018-12-01T10:00:03
17,699,500
1
1
null
null
null
null
UTF-8
R
false
false
173
r
removeRedundancy.R
`removeRedundancy` <- function(mat,vec.ia,red=0){ mat<-as.data.frame(mat) for(i in 1:length(vec.ia)) vec.ia[i]<-evalRedundancy(mat,vec.ia[i],red=red) vec.ia }
df173cc54b9aa3b513727951cf7ac289b14ce2d0
073e4e7c9c2f4822e798f4a56e4ff90b11b5a85c
/Code/baseline_binary_comparison.R
04515bd098f1c40030758334f35dbfd3399a935d
[]
no_license
peteryzheng/RET_ACCESS
2cff82bd261beff926affd24798ac02ef2b8775a
ac4e3544d85c90ef723aa3dc433d468515020133
refs/heads/master
2022-12-13T08:56:32.229201
2020-08-06T04:19:45
2020-08-06T04:19:45
285,464,497
0
0
null
null
null
null
UTF-8
R
false
false
7,547
r
baseline_binary_comparison.R
library(data.table) library(tidyverse) baseline.sample.pairs = fread('/ifs/work/bergerm1/zhengy1/RET_all/Sample_mapping/baseline_pairs_121519.tsv') # pull data for both samples # DMP.maf <- fread('/ifs/work/bergerm1/zhengy1/dmp/mskimpact/data_mutations_extended.txt') %>% # filter(Mutation_Status == 'SOMATIC' & Tumor_Sample_Barcode %in% baseline.sample.pairs$DMP_ID) %>% data.table() maf.dir = '/ifs/work/bergerm1/zhengy1/RET_all/Analysis_files/run_011520/results_combined_permissive/' access.maf = fread(paste0(gsub('_combined','',maf.dir),'all_sample_maf2maf_oncokb.maf')) %>% filter(Tumor_Sample_Barcode %in% c(baseline.sample.pairs$`CMO sample ID.plasma`,baseline.sample.pairs$DMP_ID)) %>% data.table() x = baseline.sample.pairs$Study_ID[1] x = 'EDD_ret_pt013' total.detection.comp = do.call(rbind,lapply(baseline.sample.pairs$Study_ID,function(x){ print(x) # get all possible mutation called in either or both samples all.unique.calls = access.maf[Tumor_Sample_Barcode %in% c(baseline.sample.pairs[Study_ID == x]$`CMO sample ID`, baseline.sample.pairs[Study_ID == x]$DMP_ID)] %>% transmute(Hugo_Symbol,Chromosome,Start_Position,End_Position,Variant_Classification,Reference_Allele, Tumor_Seq_Allele2,HGVSp_Short,oncogenic,call_confidence) %>% filter(call_confidence %in% c('Called','Genotyped')) %>% select(-call_confidence) %>% unique() %>% data.table() reviewed.calls = fread(paste0(maf.dir,'/',gsub('EDD_|pt','',x),'_table.csv'))[call_confidence == 'High', .(Hugo_Symbol,Chromosome = as.character(Chromosome), Start_Position,End_Position,Variant_Classification, Reference_Allele,Tumor_Seq_Allele2,HGVSp_Short)] all.unique.calls = merge(all.unique.calls,reviewed.calls,by = c('Hugo_Symbol','Chromosome','Start_Position','End_Position','Variant_Classification', 'Reference_Allele','Tumor_Seq_Allele2','HGVSp_Short')) # get those all possible calls' genotypes in access sample access.tmp.maf = access.maf[Tumor_Sample_Barcode == baseline.sample.pairs[Study_ID == x]$`CMO sample ID`] %>% transmute(Hugo_Symbol,Chromosome,Start_Position,End_Position,Variant_Classification,Reference_Allele, Tumor_Seq_Allele2,HGVSp_Short,t_alt_count,t_ref_count,t_depth,Tumor_Sample_Barcode,oncogenic,call_confidence) %>% merge(all.unique.calls,by = c('Hugo_Symbol','Chromosome','Start_Position','End_Position','Variant_Classification', 'Reference_Allele','Tumor_Seq_Allele2','HGVSp_Short','oncogenic'))%>% data.table() # get those all possible calls' genotypes in DMP sample dmp.tmp.maf = access.maf[Tumor_Sample_Barcode == baseline.sample.pairs[Study_ID == x]$DMP_ID] %>% transmute(Hugo_Symbol,Chromosome,Start_Position,End_Position,Variant_Classification,Reference_Allele, Tumor_Seq_Allele2,HGVSp_Short,t_alt_count,t_ref_count,t_depth,Tumor_Sample_Barcode,oncogenic,call_confidence) %>% merge(all.unique.calls,by = c('Hugo_Symbol','Chromosome','Start_Position','End_Position','Variant_Classification', 'Reference_Allele','Tumor_Seq_Allele2','HGVSp_Short','oncogenic'))%>% data.table() detection.comp = merge(dmp.tmp.maf,access.tmp.maf, by = c('Hugo_Symbol','Chromosome','Start_Position','End_Position','Variant_Classification', 'Reference_Allele','Tumor_Seq_Allele2','HGVSp_Short','oncogenic'), all = T,suffixes = c('.tumor','.plasma')) %>% rowwise() %>% mutate(EDD_Patient_ID = x,detection_status = case_when( call_confidence.tumor != 'Not Called' & call_confidence.plasma != 'Not Called' ~ 'Both', call_confidence.tumor != 'Not Called' ~ 'Tumor', call_confidence.plasma != 'Not Called' ~ 'Plasma', TRUE ~ 'neither' )) %>% data.table() })) # graphing section -------------------------------------------------------- # artifact filtering for plasma artifacts = total.detection.comp[detection_status == 'Plasma',.N,.(Hugo_Symbol,Chromosome,Start_Position,End_Position,Variant_Classification,Reference_Allele,Tumor_Seq_Allele2,oncogenic)] total.detection.comp = merge(total.detection.comp,artifacts,by = c('Hugo_Symbol','Chromosome','Start_Position','End_Position','Variant_Classification', 'Reference_Allele','Tumor_Seq_Allele2','oncogenic'), all = T) %>% filter(N == 1 | is.na(N)) %>% data.table() # overview of detection status table(total.detection.comp$detection_status) # graphing snippet detection.graph.df = total.detection.comp[,.(oncogenic_mutation = length(which(oncogenic %in% c('Oncogenic','Likely Oncogenic'))), non_oncogenic_mutation = length(which(!oncogenic %in% c('Oncogenic','Likely Oncogenic')))),detection_status] %>% melt(id.vars = 'detection_status',variable.name = 'Oncogenicity',value.name = 'Count') pdf('/ifs/work/bergerm1/zhengy1/RET_all/For_Ezra/RET_Detection/baseline_concordance.pdf',width = 7,height = 7) ggplot(detection.graph.df) + geom_bar(aes(x = detection_status,y = Count,fill = Oncogenicity),stat = 'identity') + scale_fill_brewer(palette = "Pastel2") + theme_classic() dev.off() detection.graph.df = total.detection.comp[,.(Count = .N),.(detection_status,Variant_Classification)] pdf('/ifs/work/bergerm1/zhengy1/RET_all/For_Ezra/RET_Detection/baseline_concordance_class.pdf',width = 7,height = 7) ggplot(detection.graph.df) + geom_bar(aes(x = detection_status,y = Count,fill = Variant_Classification),stat = 'identity') + scale_fill_brewer(palette = "Pastel2") + theme_classic() dev.off() total.detection.comp[,.(tumor_only = nrow(.SD[detection_status == 'Tumor']), plasma_only = nrow(.SD[detection_status == 'Plasma']), both = nrow(.SD[detection_status == 'Both'])),EDD_Patient_ID] colourCount = nrow(unique(total.detection.comp[,.(EDD_Patient_ID)])) getPalette = colorRampPalette(brewer.pal(8, "Set2")) pdf('/ifs/work/bergerm1/zhengy1/RET_all/For_Ezra/RET_Detection/baseline_VAFs.pdf',width = 10,height = 7) ggplot(total.detection.comp[detection_status == 'Both']) + geom_point(aes(x = as.numeric(t_alt_count.tumor)/as.numeric(t_depth.tumor), y = as.numeric(t_alt_count.plasma)/as.numeric(t_depth.plasma), color = EDD_Patient_ID,shape = oncogenic)) + scale_color_manual(values = getPalette(colourCount),name = 'Alteration') + theme_classic() + xlab('Tumor_VAF') + ylab('Plasma_VAF') dev.off() pdf('/ifs/work/bergerm1/zhengy1/RET_all/For_Ezra/RET_Detection/baseline_VAFs_all.pdf',width = 10,height = 7) ggplot(total.detection.comp) + geom_point(aes(x = as.numeric(t_alt_count.tumor)/as.numeric(t_depth.tumor), y = as.numeric(t_alt_count.plasma)/as.numeric(t_depth.plasma), color = EDD_Patient_ID,shape = oncogenic)) + scale_color_manual(values = getPalette(colourCount),name = 'Alteration') + theme_classic() + xlab('Tumor_VAF') + ylab('Plasma_VAF') dev.off() # analysis of each category ----------------------------------------------- table(total.detection.comp[detection_status == 'Plasma']$Hugo_Symbol)
0a04660177a936a99589afe92c00016638c2ebe9
3b24d08de0d126d7906c23d6574607ba0dc8281c
/niko.rd
ea1e6cd4278df39755cbde0eeb5b99fcde159c16
[]
no_license
htbrdd/rdworks-files
de6602f90dcc1933f13f4fc970d5ad5315146569
af17e751cf3a31b71de4c748fb1cf9d9032d16ef
refs/heads/master
2021-03-19T08:29:59.037313
2018-01-21T01:06:43
2018-01-21T01:06:43
82,507,071
0
0
null
null
null
null
ISO-8859-1
R
false
false
160,874
rd
niko.rd
қúz‹‰Ò‰p‰‰‰‰‰‰‰‰‰‰p wwww¹‰‰‰‰‰p‰‰Oµ‰‰ƒûÇpى‰‰‰‰‰‰‰‰‰pY‰‰Ï剉ƒûÇp‰ ‰ ‰‰‰‰‰‰‰‰‰‰p ‰B‰‰‰ùÐ9‰¥ïÐI‰¯»Ð»‰¥ïÐˉϻ‰‰‰‰ÄI‰‹pۉwwww¹‰‰‰‰‰p[‰‰‰Oµ‰‰ƒûÇpi‰wwww¹‰‰‰‰‰p뉉‰Oµ‰‰ƒûÇÄ«‰p݉‰‰‰‰‰pÝ ‰‰‰‰‰p]‰‰‰‰‰‰p] ‰‰‰‰‰z ‰‰‰‰‰‰‰‰‰‰z‰‰z ‰ü‰‰ü ‰ü‹ £1•Iã©ü ‰‰‰‰‰‰‰‰‰‰ü‰‰Ïù‰‰ƒûÙü‰‰‰‰‰‰‰‰‰‰ü‰ü ‰ ‰ wwû1™‰‰ƒûÙä‰pé‰p‰‰‰‰‰‰‰‰‰‰p‰‰Ïù‰‰ƒûÙp+‰‰‰‰‰‰‰‰‰‰p­‰p‰ ‰ wwû1™‰‰ƒûÙÄ Ä‹‰Ä ¹Ä ™Ä B‹‰‰ùÐ ¥ïÐ)¯»Ð‹¥ïЫϻЛ‰‰‰‰‰Ð‰‰‰‰‰Ä ‚‰‰  ‰‰‰1ᢉ‰ LJ‰‰‰1ႉ‰Y+‰‰‰1ᢉ‰Û‰1á÷ã‰í¢‰‰Ù‰‰‰³Å‚‰‰ Åñ‰‰‰³Å¢‰‰ g‰‰‰³Å Ÿ‰í¢‰‰ LJ‰‰‰3¹‚‰‰Y+‰‰‰3¹¢‰‰Û‰3¹÷ã‰í¢‰‰Ù‰‰‰µ‚‰‰ Çm‰‰‰µ¢‰‰ g‰‰‰µ Ÿ‰í¢‰‰ Gs‰‰‰µñ‚‰‰Y+‰‰‰µñ¢‰‰Û‰µñ÷ã‰í¢‰‰Ù‰‰‰5Ղ‰‰ Çm‰‰‰5Õ¢‰‰ g‰‰‰5Õ Ÿ‰í¢‰‰ Gs‰‰‰·É‚‰‰Y+‰‰‰·É¢‰‰Û‰·É÷ã‰í¢‰‰Ù‰‰‰7­‚‰‰ Ùۉ‰‰7­¢‰‰ g‰‰‰7­ Ÿ‰í¢‰‰ Y቉‰É‚‰‰Y+‰‰‰É¢‰‰Û‰É÷ã‰í¢‰‰Ù‰‰‰É傉‰ Ùۉ‰‰É墉‰ g‰‰‰Éå Ÿ‰í¢‰‰ []‰‰‰Iق‰‰Y+‰‰‰IÙ¢‰‰Û‰IÙ÷ã‰í¢‰‰Ù‰‰‰Ë½‚‰‰ Û7‰‰‰Ë½¢‰‰ g‰‰‰Ë½ Ÿ‰í¢‰‰ ]K‰‰‰K‘‚‰‰Y+‰‰‰K‘¢‰‰Û‰K‘÷ã‰í¢‰‰Ù‰‰‰Kõ‚‰‰ Ý%‰‰‰Kõ¢‰‰ g‰‰‰Kõ Ÿ‰í¢‰‰ _¹‰‰‰Í邉‰Y+‰‰‰Í颉‰Û‰Íé÷ã‰í¢‰‰Ù‰‰‰M͂‰‰ ߓ‰‰‰MÍ¢‰‰ g‰‰‰MÍ Ÿ‰í¢‰‰ _¹‰‰‰Ï¡‚‰‰Y+‰‰‰Ï¡¢‰‰Û‰Ï¡÷ã‰í¢‰‰Ù‰‰‰O…‚‰‰ ߓ‰‰‰O…¢‰‰ g‰‰‰O… Ÿ‰í¢‰‰ _¹‰‰‰Où‚‰‰Y+‰‰‰Où¢‰‰Û‰Où÷ã‰í¢‰‰Ù‰‰‰Á݂‰‰ щ‰‰ÁÝ¢‰‰ g‰‰‰ÁÝ Ÿ‰í¢‰‰ Q‰‰‰A±‚‰‰Y+‰‰‰A±¢‰‰Û‰A±÷ã‰í¢‰‰Ù‰‰‰Ã•‚‰‰ щ‰‰Ã•¢‰‰ g‰‰‰Ã• Ÿ‰í¢‰‰ Sƒ‰‰‰C‰‚‰‰Y+‰‰‰C‰¢‰‰Û‰C‰÷ã‰í¢‰‰Ù‰‰‰C킉‰ Qý‰‰‰Cí¢‰‰ g‰‰‰Cí Ÿ‰í¢‰‰ Õñ‰‰‰ÅÁ‚‰‰Y+‰‰‰ÅÁ¢‰‰Û‰ÅÁ÷ã‰í¢‰‰Ù‰‰‰E¥‚‰‰ S뉉‰E¥¢‰‰ g‰‰‰E¥ Ÿ‰í¢‰‰ ×m‰‰‰Ç™‚‰‰Y+‰‰‰Ç™¢‰‰Û‰Ç™÷ã‰í¢‰‰Ù‰‰‰Çý‚‰‰ UG‰‰‰Çý¢‰‰ g‰‰‰Çý Ÿ‰í¢‰‰ ×m‰‰‰Gт‰‰Y+‰‰‰GÑ¢‰‰Û‰GÑ÷ã‰í¢‰‰Ù‰‰‰Ùµ‚‰‰ UG‰‰‰Ùµ¢‰‰ g‰‰‰Ùµ Ÿ‰í¢‰‰ ×m‰‰‰Y©‚‰‰Y+‰‰‰Y©¢‰‰Û‰Y©÷ã‰í¢‰‰Ù‰‰‰Û‚‰‰ Wµ‰‰‰Û¢‰‰ g‰‰‰Û Ÿ‰í¢‰‰ éۉ‰‰Ûႉ‰Y+‰‰‰Ûᢉ‰Û‰Ûá÷ã‰í¢‰‰Ù‰‰‰[ł‰‰ i!‰‰‰[Å¢‰‰ g‰‰‰[Å Ÿ‰í¢‰‰ ë7‰‰‰Ý¹‚‰‰Y+‰‰‰Ý¹¢‰‰Û‰Ý¹÷ã‰í¢‰‰Ù‰‰‰]‚‰‰ i!‰‰‰]¢‰‰ g‰‰‰] Ÿ‰í¢‰‰ í%‰‰‰]ñ‚‰‰Y+‰‰‰]ñ¢‰‰Û‰]ñ÷ã‰í¢‰‰Ù‰‰‰ßՂ‰‰ k‰‰‰ßÕ¢‰‰ g‰‰‰ßÕ Ÿ‰í¢‰‰ ‰‰_ɂ‰‰Y+‰‰‰_É¢‰‰Û‰_É÷ã‰í¢‰‰Ù‰‰‰Ñ­‚‰‰ m‰‰‰Ñ­¢‰‰ g‰‰‰Ñ­ Ÿ‰í¢‰‰ ‰‰Q‚‰‰Y+‰‰‰Q¢‰‰Û‰Q÷ã‰í¢‰‰Ù‰‰‰Q傉‰ m‰‰‰Q墉‰ g‰‰‰Qå Ÿ‰í¢‰‰ ‰‰Óق‰‰Y+‰‰‰ÓÙ¢‰‰Û‰ÓÙ÷ã‰í¢‰‰Ù‰‰‰S½‚‰‰ ïy‰‰‰S½¢‰‰ g‰‰‰S½ Ÿ‰í¢‰‰ ቉‰Õ‘‚‰‰Y+‰‰‰Õ‘¢‰‰Û‰Õ‘÷ã‰í¢‰‰Ù‰‰‰Õõ‚‰‰ á׉‰‰Õõ¢‰‰ g‰‰‰Õõ Ÿ‰í¢‰‰ aý‰‰‰U邉‰Y+‰‰‰U颉‰Û‰Ué÷ã‰í¢‰‰Ù‰‰‰×͂‰‰ á׉‰‰×Í¢‰‰ g‰‰‰×́C‰í¢‰‰ c뉉‰W¡‚‰‰a'‰‰‰W¡¢‰‰³Ó‰‰‰W¡÷ã‰í¢‰‰á‰‰‰é…‚‰‰ ãʼn‰‰é…¢‰‰ …­‰‰‰é…‡‰í¢‰‰ eG‰‰‰éù‚‰‰ó‰‰‰éù¢‰‰¡Ÿ‰‰‰éù÷ã‰í¢‰‰ñù‰‰‰i݂‰‰ å1‰‰‰iÝ¢‰‰ )‰‰‰iÝ…-‰í¢‰‰ eG‰‰‰ë±‚‰‰ ¡‰‰‰ë±¢‰‰‰‰‰ë±÷ã‰í¢‰‰‹›‰‰‰k•‚‰‰ å1‰‰‰k•¢‰‰ ­¹‰‰‰k• Ÿ‰í¢‰‰ eG‰‰‰í‰‚‰‰ ¡‰‰‰í‰¢‰‰‰‰‰í‰}Á‰í¢‰‰=‰‰‰í킉‰ 築‰‰íí¢‰‰ %ۉ‰‰íí Ÿ‰í¢‰‰ gµ‰‰‰mÁ‚‰‰…C‰‰‰mÁ¢‰‰퉉‰mÁ=‰í¢‰‰™õ‰‰‰ï¥‚‰‰ ù‰‰‰ï¥¢‰‰ ¿}‰‰‰ï¥ Ÿ‰í¢‰‰ y!‰‰‰o™‚‰‰››‰‰‰o™¢‰‰…'‰‰‰o™÷ã‰í¢‰‰™õ‰‰‰oý‚‰‰ ù‰‰‰oý¢‰‰ ¿}‰‰‰oýC‰í¢‰‰ {‰‰‰áт‰‰O‰‰‰áÑ¢‰‰o‰‰‰áÑ÷ã‰í¢‰‰‘9‰‰‰aµ‚‰‰ û ‰‰‰aµ¢‰‰ ·£‰‰‰aµU‰í¢‰‰ }‰‰‰ã©‚‰‰«‰‰‰ã©¢‰‰ ‰‰‰ã©÷ã‰í¢‰‰•…‰‰‰c‚‰‰ {牉‰c¢‰‰ Ky‰‰‰c×‰í¢‰‰ ÿy‰‰‰cႉ‰«ã‰‰‰cᢉ‰ƒsщ‰‰cá÷ã‰í¢‰‰)݉‰‰åł‰‰ }S‰‰‰åÅ¢‰‰ A1‰‰‰åÅ Ÿ‰í¢‰‰ ÿy‰‰‰e¹‚‰‰«ã‰‰‰e¹¢‰‰ƒsщ‰‰e¹s‰í¢‰‰-§‰‰‰ç‚‰‰ }S‰‰‰ç¢‰‰ G ‰‰‰ç Ÿ‰í¢‰‰ ÿy‰‰‰çñ‚‰‰¯Í‰‰‰çñ¢‰‰ƒu‰‰‰çñs™‰í¢‰‰!‰‰‰gՂ‰‰ Á‰‰‰gÕ¢‰‰ ÝA‰‰‰gÕ Ÿ‰í¢‰‰ ñ׉‰‰ùɂ‰‰£‰‰‰ùÉ¢‰‰ƒý+‰‰‰ùÉs‰í¢‰‰¥k‰‰‰y­‚‰‰ q¿‰‰‰y­¢‰‰ Ñ+‰‰‰y­ Ÿ‰í¢‰‰ óʼn‰‰û‚‰‰%q‰‰‰û¢‰‰ƒùÁ‰‰‰û÷ã‰í¢‰‰¥k‰‰‰û傉‰ q¿‰‰‰û墉‰ Ñ+‰‰‰ûåy‰í¢‰‰ õ1‰‰‰{ق‰‰9݉‰‰{Ù¢‰‰ƒå牉‰{Ù÷ã‰í¢‰‰¹·‰‰‰ý½‚‰‰ s+‰‰‰ý½¢‰‰ S÷‰‰‰ý½U‰í¢‰‰ ÷¯‰‰‰}‘‚‰‰;I‰‰‰}‘¢‰‰ƒa‰‰‰}‘÷ã‰í¢‰‰»#‰‰‰}õ‚‰‰ u™‰‰‰}õ¢‰‰ iM‰‰‰}õ Ÿ‰í¢‰‰ ÷¯‰‰‰ÿ邉‰;I‰‰‰ÿ颉‰ƒa‰‰‰ÿéõu‰í¢‰‰¿ ‰‰‰͂‰‰ u™‰‰‰Í¢‰‰ m©‰‰‰Í Ÿ‰í¢‰‰ ÷¯‰‰‰ñ¡‚‰‰?‰‰‰ñ¡¢‰‰ƒo¯‰‰‰ñ¡s‰í¢‰‰?{‰‰‰q…‚‰‰ ÷u‰‰‰q…¢‰‰ á󉉉q… Ÿ‰í¢‰‰‰‰‰‰qù‚‰‰1‰‰‰qù¢‰‰ƒkC‰‰‰qùõu‰í¢‰‰1鉉‰ó݂‰‰‰c‰‰‰óÝ¢‰‰ å]‰‰‰óÝ Ÿ‰í¢‰‰‹ ‰‰‰s±‚‰‰³ÿ‰‰‰s±¢‰‰ƒi׉‰‰s±÷ã‰í¢‰‰1鉉‰õ•‚‰‰‰c‰‰‰õ•¢‰‰ å]‰‰‰õ• ‰í¢‰‰ 牉‰u‰‚‰‰µk‰‰‰u‰¢‰‰ƒWy‰‰‰u‰÷ã‰í¢‰‰3E‰‰‰u킉‰‹Ñ‰‰‰uí¢‰‰ çˉ‰‰uíù‰í¢‰‰ S‰‰‰÷Á‚‰‰É·‰‰‰÷Á¢‰‰ƒÕŸ‰‰‰÷Á÷ã‰í¢‰‰7¡‰‰‰w¥‚‰‰M‰‰‰w¥¢‰‰ û•‰‰‰w¥ Ÿ‰í¢‰‰ S‰‰ ‰™‚‰‰É·‰‰ ‰™¢‰‰ƒÕŸ‰‰ ‰™õu‰í¢‰‰I‰‰ ‰ý‚‰‰M‰‰ ‰ý¢‰‰ }‰‰ ‰ý Ÿ‰í¢‰‰ S‰‰ т‰‰Ë#‰‰ Ñ¢‰‰ƒÓ!‰‰ Ñõu‰í¢‰‰K‹‰‰ ‹µ‚‰‰»‰‰ ‹µ¢‰‰ 퉉 ‹µ Ÿ‰í¢‰‰Á‰‰ ©‚‰‰͑‰‰ ©¢‰‰ƒÑµ‰‰ ©õu‰í¢‰‰Íg‰‰ ‚‰‰©‰‰ ¢‰‰ s7‰‰  Ÿ‰í¢‰‰¿‰‰ á‚‰‰Ï ‰‰ á¢‰‰ƒßG‰‰ á÷ã‰í¢‰‰Íg‰‰ ł‰‰©‰‰ Å¢‰‰ s7‰‰ Å ‰í¢‰‰+‰‰ ¹‚‰‰Ï ‰‰ ¹¢‰‰ƒÝi‰‰ ¹÷ã‰í¢‰‰Íg‰‰ ‚‰‰ƒ‰‰ ¢‰‰ u¥‰‰  ‰í¢‰‰™‰‰ ñ‚‰‰O{‰‰ ñ¢‰‰ƒÛý‰‰ ñ÷ã‰í¢‰‰ÏU‰‰ Õ‚‰‰ó‰‰ Õ¢‰‰ w‰‰ Õy‰í¢‰‰™‰‰ ɂ‰‰A鉉 É¢‰‰ƒÛý‰‰ É÷ã‰í¢‰‰Áɉ ƒ­‚‰‰ó‰‰ ƒ­¢‰‰‹‹ý‰‰ ƒ­ Ÿ‰í¢‰‰™‰‰ ‚‰‰A鉉 ¢‰‰ƒÛý‰‰ õu‰í¢‰‰Ã?‰‰ 傉‰o‰‰ 墉‰‹i‰‰ å Ÿ‰í¢‰‰‡u‰‰ …Ù‚‰‰CE‰‰ …Ù¢‰‰ƒY‰‰ …Ùõu‰í¢‰‰Å­‰‰ ½‚‰‰]‰‰ ½¢‰‰‹Ç‰‰ ½ Ÿ‰í¢‰‰™c‰‰ ‡‘‚‰‰E³‰‰ ‡‘¢‰‰ƒG“‰‰ ‡‘÷ã‰í¢‰‰Å­‰‰ ‡õ‚‰‰]‰‰ ‡õ¢‰‰‹Ç‰‰ ‡õ ‰í¢‰‰›Ñ‰‰ 邉‰E³‰‰ 颉‰ƒE¥‰‰ é÷ã‰í¢‰‰Å­‰‰ ™Í‚‰‰ˉ‰ ™Í¢‰‰‹3‰‰ ™Í ‰í¢‰‰M‰‰ ¡‚‰‰G¡‰‰ ¡¢‰‰ƒE¥‰‰ ¡÷ã‰í¢‰‰Ǜ‰‰ ›…‚‰‰'‰‰ ›…¢‰‰‹ƒ!‰‰ ›… ‰í¢‰‰M‰‰ ›ù‚‰‰Y‰‰ ›ù¢‰‰ƒC7‰‰ ›ù÷ã‰í¢‰‰Gw‰‰ ݂‰‰'‰‰ Ý¢‰‰‹…Ÿ‰‰ Ý Ÿ‰í¢‰‰M‰‰ ±‚‰‰Y‰‰ ±¢‰‰ƒC7‰‰ ±õu‰í¢‰‰Gw‰‰ •‚‰‰•‰‰ •¢‰‰‹‡ ‰‰ • Ÿ‰í¢‰‰Ÿ»‰‰ Ÿ‰‚‰‰Y‰‰ Ÿ‰¢‰‰ƒAۉ‰ Ÿ‰÷ã‰í¢‰‰Y剉 Ÿí‚‰‰ƒ‰‰ Ÿí¢‰‰‹ù‰‰ Ÿí Ÿ‰í¢‰‰‘©‰‰ Á‚‰‰[‹‰‰ Á¢‰‰ƒAۉ‰ Áõu‰í¢‰‰Y剉 ‘¥‚‰‰‘‰‰ ‘¥¢‰‰‹׉‰ ‘¥ Ÿ‰í¢‰‰“‰‰ ™‚‰‰[‹‰‰ ™¢‰‰ƒOm‰‰ ™÷ã‰í¢‰‰Y剉 ý‚‰‰‘‰‰ ý¢‰‰‹׉‰ ý ‰í¢‰‰ó‰‰ “Ñ‚‰‰Ýg‰‰ “Ñ¢‰‰ƒOm‰‰ “Ñ÷ã‰í¢‰‰[Q‰‰ µ‚‰‰“퉉 µ¢‰‰‹C‰‰ µ ‰í¢‰‰o‰‰ •©‚‰‰Ýg‰‰ •©¢‰‰ƒM‰‰ •©÷ã‰í¢‰‰[Q‰‰ ‚‰‰•Y‰‰ ¢‰‰‹±‰‰  Ÿ‰í¢‰‰o‰‰ ႉ‰Ýg‰‰ ᢉ‰ƒM‰‰ á÷ã‰í¢‰‰]O‰‰ —Å‚‰‰•Y‰‰ —Å¢‰‰‹-‰‰ —Å Ÿ‰í¢‰‰o‰‰ ¹‚‰‰ßU‰‰ ¹¢‰‰ƒM‰‰ ¹õu‰í¢‰‰]O‰‰ ©‚‰‰—7‰‰ ©¢‰‰‹-‰‰ © Ÿ‰í¢‰‰]‰‰ ©ñ‚‰‰ßU‰‰ ©ñ¢‰‰ƒÍƒ‰‰ ©ñ÷ã‰í¢‰‰_½‰‰ )Ղ‰‰©¥‰‰ )Õ¢‰‰‹‰‰ )Õ Ÿ‰í¢‰‰)ˉ‰ «É‚‰‰Ñɉ «É¢‰‰ƒÍƒ‰‰ «É÷ã‰í¢‰‰_½‰‰ +­‚‰‰©¥‰‰ +­¢‰‰‹‰‰ +­ ‰í¢‰‰+'‰‰ ­‚‰‰Ñɉ ­¢‰‰ƒË‰‰ ­÷ã‰í¢‰‰_½‰‰ ­å‚‰‰«‰‰ ­å¢‰‰‹‰‰‰ ­å ‰í¢‰‰-•‰‰ -ق‰‰Ñɉ -Ù¢‰‰ƒË‰‰ -Ù÷ã‰í¢‰‰_½‰‰ ¯½‚‰‰­‰‰ ¯½¢‰‰‹•e‰‰ ¯½ Ÿ‰í¢‰‰-•‰‰ /‘‚‰‰Ó?‰‰ /‘¢‰‰ƒÉ¹‰‰ /‘÷ã‰í¢‰‰Q)‰‰ /õ‚‰‰­‰‰ /õ¢‰‰‹•e‰‰ /õ Ÿ‰í¢‰‰-•‰‰ ¡é‚‰‰Ó?‰‰ ¡é¢‰‰ƒÉ¹‰‰ ¡é÷ã‰í¢‰‰Q)‰‰ !͂‰‰-ý‰‰ !Í¢‰‰‹—Ó‰‰ !Í Ÿ‰í¢‰‰/ƒ‰‰ £¡‚‰‰Ó?‰‰ £¡¢‰‰ƒÉ¹‰‰ £¡÷ã‰í¢‰‰S‡‰‰ #…‚‰‰/i‰‰ #…¢‰‰‹©Á‰‰ #… Ÿ‰í¢‰‰¡‰‰ #ù‚‰‰Õ­‰‰ #ù¢‰‰ƒÉ¹‰‰ #ù÷ã‰í¢‰‰S‡‰‰ ¥Ý‚‰‰/i‰‰ ¥Ý¢‰‰‹©Á‰‰ ¥Ý Ÿ‰í¢‰‰£í‰‰ %±‚‰‰Õ­‰‰ %±¢‰‰ƒ·Ë‰‰ %±÷ã‰í¢‰‰S‡‰‰ §•‚‰‰!lj‰ §•¢‰‰‹©Á‰‰ §• ‰í¢‰‰¥Y‰‰ '‰‚‰‰Õ­‰‰ '‰¢‰‰ƒ·Ë‰‰ '‰÷ã‰í¢‰‰S‡‰‰ '킉‰#3‰‰ 'í¢‰‰‹«=‰‰ 'í Ÿ‰í¢‰‰¥Y‰‰ ¹Á‚‰‰כ‰‰ ¹Á¢‰‰ƒµ]‰‰ ¹Á÷ã‰í¢‰‰Õõ‰‰ 9¥‚‰‰#3‰‰ 9¥¢‰‰‹«=‰‰ 9¥ Ÿ‰í¢‰‰¥Y‰‰ »™‚‰‰כ‰‰ »™¢‰‰ƒµ]‰‰ »™÷ã‰í¢‰‰Õõ‰‰ »ý‚‰‰%!‰‰ »ý¢‰‰‹­«‰‰ »ý Ÿ‰í¢‰‰§7‰‰ ;т‰‰כ‰‰ ;Ñ¢‰‰ƒµ]‰‰ ;Ñ÷ã‰í¢‰‰Õõ‰‰ ½µ‚‰‰'Ÿ‰‰ ½µ¢‰‰‹¯‰‰ ½µ Ÿ‰í¢‰‰¹¥‰‰ =©‚‰‰כ‰‰ =©¢‰‰ƒµ]‰‰ =©÷ã‰í¢‰‰×a‰‰ ¿‚‰‰9 ‰‰ ¿¢‰‰‹¯‰‰ ¿ Ÿ‰í¢‰‰»‰‰ ¿á‚‰‰Ww‰‰ ¿á¢‰‰ƒµ]‰‰ ¿á÷ã‰í¢‰‰×a‰‰ ?ł‰‰9 ‰‰ ?Å¢‰‰‹¯‰‰ ?Å ‰í¢‰‰½‰‰ ±¹‚‰‰Ww‰‰ ±¹¢‰‰ƒ³á‰‰ ±¹÷ã‰í¢‰‰×a‰‰ 1‚‰‰»ù‰‰ 1¢‰‰‹/u‰‰ 1 Ÿ‰í¢‰‰½‰‰ 1ñ‚‰‰Ww‰‰ 1ñ¢‰‰ƒ³á‰‰ 1ñ÷ã‰í¢‰‰×a‰‰ ³Õ‚‰‰»ù‰‰ ³Õ¢‰‰‹/u‰‰ ³Õ Ÿ‰í¢‰‰½‰‰ 3ɂ‰‰Ww‰‰ 3É¢‰‰ƒ³á‰‰ 3É÷ã‰í¢‰‰×a‰‰ µ­‚‰‰½×‰‰ µ­¢‰‰‹!㉉ µ­ Ÿ‰í¢‰‰=ý‰‰ 5‚‰‰Ww‰‰ 5¢‰‰ƒ³á‰‰ 5õu‰í¢‰‰é߉‰ 5傉‰¿C‰‰ 5墉‰‹!㉉ 5å Ÿ‰í¢‰‰?i‰‰ ·Ù‚‰‰i剉 ·Ù¢‰‰ƒ±s‰‰ ·Ù÷ã‰í¢‰‰é߉‰ 7½‚‰‰±±‰‰ 7½¢‰‰‹!㉉ 7½ Ÿ‰í¢‰‰1lj‰ ɑ‚‰‰i剉 ɑ¢‰‰ƒ±s‰‰ ɑ÷ã‰í¢‰‰é߉‰ Éõ‚‰‰±±‰‰ Éõ¢‰‰‹!㉉ Éõ ‰í¢‰‰33‰‰ I邉‰i剉 I颉‰ƒ±s‰‰ Ié÷ã‰í¢‰‰é߉‰ Ë͂‰‰³-‰‰ ËÍ¢‰‰‹#_‰‰ ËÍ Ÿ‰í¢‰‰33‰‰ K¡‚‰‰i剉 K¡¢‰‰ƒ±s‰‰ K¡÷ã‰í¢‰‰é߉‰ ͅ‚‰‰³-‰‰ ͅ¢‰‰‹#_‰‰ ͅ ‰í¢‰‰5!‰‰ Íù‚‰‰kQ‰‰ Íù¢‰‰ƒ?‡‰‰ Íù÷ã‰í¢‰‰ëK‰‰ M݂‰‰µ‰‰ MÝ¢‰‰‹%͉‰ MÝ Ÿ‰í¢‰‰5!‰‰ ϱ‚‰‰kQ‰‰ ϱ¢‰‰ƒ?‡‰‰ ϱ÷ã‰í¢‰‰ëK‰‰ O•‚‰‰·‰‰‰ O•¢‰‰‹%͉‰ O• Ÿ‰í¢‰‰7Ÿ‰‰ Á‰‚‰‰kQ‰‰ Á‰¢‰‰ƒ?‡‰‰ Á‰÷ã‰í¢‰‰ëK‰‰ Á킉‰7e‰‰ Áí¢‰‰‹%͉‰ Áí Ÿ‰í¢‰‰I ‰‰ AÁ‚‰‰kQ‰‰ AÁ¢‰‰ƒ?‡‰‰ AÁ÷ã‰í¢‰‰ëK‰‰ Ã¥‚‰‰7e‰‰ Ã¥¢‰‰‹%͉‰ Ã¥ ‰í¢‰‰Ëù‰‰ C™‚‰‰kQ‰‰ C™¢‰‰ƒ?‡‰‰ C™÷ã‰í¢‰‰ëK‰‰ Cý‚‰‰IӉ‰ Cý¢‰‰‹'»‰‰ Cý Ÿ‰í¢‰‰Í׉‰ Åт‰‰mO‰‰ ÅÑ¢‰‰ƒ=©‰‰ ÅÑ÷ã‰í¢‰‰í9‰‰ Eµ‚‰‰KÁ‰‰ Eµ¢‰‰‹'»‰‰ Eµ ‰í¢‰‰Í׉‰ Ç©‚‰‰mO‰‰ Ç©¢‰‰ƒ=©‰‰ Ç©÷ã‰í¢‰‰í9‰‰ G‚‰‰KÁ‰‰ G¢‰‰‹9‰‰ G Ÿ‰í¢‰‰Í׉‰ Gႉ‰mO‰‰ Gᢉ‰ƒ=©‰‰ Gá÷ã‰í¢‰‰í9‰‰ Ùł‰‰M=‰‰ ÙÅ¢‰‰‹9‰‰ ÙÅ Ÿ‰í¢‰‰ÏC‰‰ Y¹‚‰‰mO‰‰ Y¹¢‰‰ƒ=©‰‰ Y¹÷ã‰í¢‰‰í9‰‰ ۝‚‰‰O«‰‰ ۝¢‰‰‹9‰‰ ۝ Ÿ‰í¢‰‰Á±‰‰ Ûñ‚‰‰mO‰‰ Ûñ¢‰‰ƒ=©‰‰ Ûñ÷ã‰í¢‰‰í9‰‰ [Ղ‰‰A‰‰ [Õ¢‰‰‹;…‰‰ [Õ Ÿ‰í¢‰‰Ã-‰‰ Ýɂ‰‰mO‰‰ ÝÉ¢‰‰ƒ=©‰‰ ÝÉ÷ã‰í¢‰‰í9‰‰ ]­‚‰‰A‰‰ ]­¢‰‰‹;…‰‰ ]­ Ÿ‰í¢‰‰Å‰‰ ߁‚‰‰o½‰‰ ߁¢‰‰ƒ=©‰‰ ߁÷ã‰í¢‰‰‰ ß傉‰Ãu‰‰ ß墉‰‹;…‰‰ ßå Ÿ‰í¢‰‰Å‰‰ _ق‰‰o½‰‰ _Ù¢‰‰ƒ;;‰‰ _Ù÷ã‰í¢‰‰‰ ѽ‚‰‰Ãu‰‰ ѽ¢‰‰‹;…‰‰ ѽ Ÿ‰í¢‰‰Å‰‰ Q‘‚‰‰o½‰‰ Q‘¢‰‰ƒ;;‰‰ Q‘÷ã‰í¢‰‰‰ Qõ‚‰‰Å㉉ Qõ¢‰‰‹;…‰‰ Qõ Ÿ‰í¢‰‰Ç‰‰‰ Ó邉‰o½‰‰ Ó颉‰ƒ;;‰‰ Óé÷ã‰í¢‰‰‰ S͂‰‰Ç_‰‰ SÍ¢‰‰‹½q‰‰ SÍ Ÿ‰í¢‰‰Ge‰‰ Õ¡‚‰‰o½‰‰ Õ¡¢‰‰ƒ;;‰‰ Õ¡÷ã‰í¢‰‰‰ U…‚‰‰Ù͉‰ U…¢‰‰‹½q‰‰ U… Ÿ‰í¢‰‰YӉ‰ Uù‚‰‰o½‰‰ Uù¢‰‰ƒ;;‰‰ Uù÷ã‰í¢‰‰‰ ×݂‰‰Ù͉‰ ×Ý¢‰‰‹½q‰‰ ×Ý Ÿ‰í¢‰‰[Á‰‰ W±‚‰‰o½‰‰ W±¢‰‰ƒ;;‰‰ W±÷ã‰í¢‰‰‰ 镂‰‰Û»‰‰ 镢‰‰‹½q‰‰ é• ‰í¢‰‰ ‰)‰‰ i‰„ 碉‰[Á‰‰ i‰‚‰‰o½‰‰ i‰¢‰‰ƒ;;‰‰ i‰÷ã‰í¢‰‰‰ i킉‰Û»‰‰ ií¢‰‰ ‰q‰‰ ií„÷›¢‰‰‹¿o‰‰ ií Ÿ‰í¢‰‰ ‰)‰‰ ëÁ„ S¢‰‰]=‰‰ ëÁ‚‰‰a)‰‰ ëÁ¢‰‰ƒ9ω‰ ëÁ÷ã‰í¢‰‰á‰‰ k¥‚‰‰Ý‰‰ k¥¢‰‰ ‹ï‰‰ k¥„õ-¢‰‰‹¿o‰‰ k¥ Ÿ‰í¢‰‰ ‰)‰‰ 홄 S¢‰‰]=‰‰ 홂‰‰a)‰‰ 홢‰‰ƒ9ω‰ í™÷ã‰í¢‰‰á‰‰ íý‚‰‰ß…‰‰ íý¢‰‰ ‹ï‰‰ íý„õ-¢‰‰‹¿o‰‰ íý Ÿ‰í¢‰‰ ‰)‰‰ mф S¢‰‰_«‰‰ mт‰‰a)‰‰ mÑ¢‰‰ƒ9ω‰ mÑ÷ã‰í¢‰‰á‰‰ ﵂‰‰_q‰‰ ïµ¢‰‰ [‰‰ ﵄󱢉‰‹¿o‰‰ ïµ Ÿ‰í¢‰‰ ‰)‰‰ o©„ Á¢‰‰Q‰‰ o©‚‰‰a)‰‰ o©¢‰‰ƒ9ω‰ o©÷ã‰í¢‰‰á‰‰ ፂ‰‰_q‰‰ ።‰‰ [‰‰ ፄ󱢉‰‹¿o‰‰ ፠‰í¢‰‰ ‰)‰‰ áá„=¢‰‰Óu‰‰ áႉ‰a)‰‰ áᢉ‰ƒ9ω‰ áá÷ã‰í¢‰‰á‰‰ ał‰‰Qo‰‰ aÅ¢‰‰ É‰‰ ańñC¢‰‰‹±Ý‰‰ aÅ Ÿ‰í¢‰‰ ‰)‰‰ 㹄+¢‰‰Óu‰‰ 㹂‰‰a)‰‰ ã¹¢‰‰ƒ9ω‰ ã¹÷ã‰í¢‰‰á‰‰ c‚‰‰Qo‰‰ c¢‰‰ §‰‰ c„ÿU¢‰‰‹±Ý‰‰ c Ÿ‰í¢‰‰ ‰)‰‰ cñ„™¢‰‰Õ㉉ cñ‚‰‰a)‰‰ cñ¢‰‰ƒ9ω‰ cñ÷ã‰í¢‰‰á‰‰ åՂ‰‰S݉‰ åÕ¢‰‰ ƒ‰‰ åՄýù¢‰‰‹±Ý‰‰ åÕ Ÿ‰í¢‰‰ ‰)‰‰ eɄ™¢‰‰Õ㉉ eɂ‰‰a)‰‰ eÉ¢‰‰ƒ9ω‰ eÉ÷ã‰í¢‰‰á‰‰ 筂‰‰UI‰‰ ç­¢‰‰ …‰‰ 筄{ ¢‰‰‹±Ý‰‰ ç­ Ÿ‰í¢‰‰ ‰)‰‰ g„…u¢‰‰×_‰‰ g‚‰‰a)‰‰ g¢‰‰ƒ9ω‰ g÷ã‰í¢‰‰á‰‰ g傉‰W§‰‰ g墉‰ …‰‰ gå„{ ¢‰‰‹±Ý‰‰ gå Ÿ‰í¢‰‰ ‰)‰‰ ùل…u¢‰‰é͉‰ ùق‰‰a)‰‰ ùÙ¢‰‰ƒ9ω‰ ùÙ÷ã‰í¢‰‰á‰‰ y½‚‰‰W§‰‰ y½¢‰‰ …‰‰ y½„{ ¢‰‰‹±Ý‰‰ y½ ‰í¢‰‰ ‰)‰‰ û‘„‡ã¢‰‰ë»‰‰ û‘‚‰‰c‡‰‰ û‘¢‰‰ƒ9ω‰ û‘÷ã‰í¢‰‰añ‰‰ ûõ‚‰‰i•‰‰ ûõ¢‰‰ }‰‰ ûõ„yŸ¢‰‰‹³I‰‰ ûõ Ÿ‰í¢‰‰ ‰)‰‰ {鄙Ѣ‰‰ë»‰‰ {邉‰c‡‰‰ {颉‰ƒ'Q‰‰ {é÷ã‰í¢‰‰añ‰‰ ý͂‰‰i•‰‰ ýÍ¢‰‰ k‰‰ ý̈́g¡¢‰‰‹³I‰‰ ýÍ Ÿ‰í¢‰‰ ‰)‰‰ }¡„›M¢‰‰í‰‰ }¡‚‰‰c‡‰‰ }¡¢‰‰ƒ'Q‰‰ }¡÷ã‰í¢‰‰añ‰‰ ÿ…‚‰‰k‰‰ ÿ…¢‰‰ ى‰ ÿ…„e3¢‰‰‹³I‰‰ ÿ… Ÿ‰í¢‰‰ ‰)‰‰ ÿù„›M¢‰‰í‰‰ ÿù‚‰‰c‡‰‰ ÿù¢‰‰ƒ'Q‰‰ ÿù÷ã‰í¢‰‰añ‰‰ ݂‰‰íÿ‰‰ Ý¢‰‰ 5‰‰ ݄cÇ¢‰‰‹³I‰‰ Ý Ÿ‰í¢‰‰ ‰)‰‰ ñ±„»¢‰‰ï…‰‰ ñ±‚‰‰c‡‰‰ ñ±¢‰‰ƒ'Q‰‰ ñ±÷ã‰í¢‰‰añ‰‰ q•‚‰‰ïk‰‰ q•¢‰‰ 5‰‰ q•„cÇ¢‰‰‹³I‰‰ q• Ÿ‰í¢‰‰ ‰)‰‰ 󉄝»¢‰‰oq‰‰ 󉂉‰c‡‰‰ 󉢉‰ƒ'Q‰‰ ó‰÷ã‰í¢‰‰añ‰‰ ó킉‰áY‰‰ óí¢‰‰ £‰‰ óí„ai¢‰‰‹µ§‰‰ óí Ÿ‰í¢‰‰ ‰)‰‰ sÁ„Ÿ¢‰‰ao‰‰ sÁ‚‰‰c‡‰‰ sÁ¢‰‰ƒ'Q‰‰ sÁ÷ã‰í¢‰‰añ‰‰ õ¥‚‰‰áY‰‰ õ¥¢‰‰ £‰‰ õ¥„ai¢‰‰‹µ§‰‰ õ¥ Ÿ‰í¢‰‰ ‰)‰‰ u™„‘¢‰‰c݉‰ u™‚‰‰c‡‰‰ u™¢‰‰ƒ'Q‰‰ u™÷ã‰í¢‰‰añ‰‰ uý‚‰‰ã·‰‰ uý¢‰‰ ‘‰‰ uý„o{¢‰‰‹µ§‰‰ uý Ÿ‰í¢‰‰ ‰)‰‰ ÷ф󢉉c݉‰ ÷т‰‰c‡‰‰ ÷Ñ¢‰‰ƒ'Q‰‰ ÷Ñ÷ã‰í¢‰‰añ‰‰ wµ‚‰‰ã·‰‰ wµ¢‰‰  ‰‰ wµ„‰‰‹µ§‰‰ wµ Ÿ‰í¢‰‰ ‰)‰‰‹‰©„󢉉c݉‰‹‰©‚‰‰c‡‰‰‹‰©¢‰‰ƒ'Q‰‰‹‰©÷ã‰í¢‰‰añ‰‰‹ ‚‰‰å#‰‰‹ ¢‰‰ “û‰‰‹ „í¢‰‰‹µ§‰‰‹  Ÿ‰í¢‰‰ ‰)‰‰‹ á„o¢‰‰eI‰‰‹ ႉ‰c‡‰‰‹ ᢉ‰ƒ'Q‰‰‹ á÷ã‰í¢‰‰añ‰‰‹‹Å‚‰‰ç‘‰‰‹‹Å¢‰‰ •W‰‰‹‹Å„륢‰‰‹µ§‰‰‹‹Å Ÿ‰í¢‰‰ ‰)‰‰‹ ¹„Ý¢‰‰g§‰‰‹ ¹‚‰‰c‡‰‰‹ ¹¢‰‰ƒ'Q‰‰‹ ¹÷ã‰í¢‰‰añ‰‰‹‚‰‰ù‰‰‹¢‰‰ •W‰‰‹„륢‰‰‹µ§‰‰‹ Ÿ‰í¢‰‰ ‰)‰‰‹ñ„Ý¢‰‰y•‰‰‹ñ‚‰‰c‡‰‰‹ñ¢‰‰ƒ'Q‰‰‹ñ÷ã‰í¢‰‰añ‰‰‹ Ղ‰‰ù‰‰‹ Õ¢‰‰ •W‰‰‹ Մ륢‰‰‹µ§‰‰‹ Õ Ÿ‰í¢‰‰ ‰)‰‰‹É„Ë¢‰‰{‰‰‹É‚‰‰c‡‰‰‹É¢‰‰ƒ'Q‰‰‹É÷ã‰í¢‰‰añ‰‰‹­‚‰‰y{‰‰‹­¢‰‰ —E‰‰‹­„é·¢‰‰‹µ§‰‰‹­ ‰í¢‰‰ ‰)‰‰‹„)'¢‰‰{‰‰‹‚‰‰c‡‰‰‹¢‰‰ƒ'Q‰‰‹÷ã‰í¢‰‰añ‰‰‹å‚‰‰y{‰‰‹å¢‰‰ ©³‰‰‹å„×Y¢‰‰‹·•‰‰‹å Ÿ‰í¢‰‰ ‰)‰‰‹ل+•¢‰‰ýÿ‰‰‹ق‰‰åõ‰‰‹Ù¢‰‰ƒ'Q‰‰‹Ù÷ã‰í¢‰‰c‹ƒ½‚‰‰{鉉‹ƒ½¢‰‰ «/‰‰‹ƒ½„Õí¢‰‰‹·•‰‰‹ƒ½ Ÿ‰í¢‰‰ ‰)‰‰‹‘„+•¢‰‰ýÿ‰‰‹‘‚‰‰åõ‰‰‹‘¢‰‰ƒ'Q‰‰‹‘÷ã‰í¢‰‰c‹õ‚‰‰}E‰‰‹õ¢‰‰ ­‰‰‹õ„Ó¢‰‰‹·•‰‰‹õ Ÿ‰í¢‰‰ ‰)‰‰‹…é„-¢‰‰ÿk‰‰‹…邉‰åõ‰‰‹…颉‰ƒ'Q‰‰‹…é÷ã‰í¢‰‰c‹͂‰‰3‰‰‹Í¢‰‰ ­‰‰‹̈́Ó¢‰‰‹·•‰‰‹Í Ÿ‰í¢‰‰ ‰)‰‰‹‡¡„-¢‰‰ñY‰‰‹‡¡‚‰‰åõ‰‰‹‡¡¢‰‰ƒ'Q‰‰‹‡¡÷ã‰í¢‰‰c‹…‚‰‰3‰‰‹…¢‰‰ ­‰‰‹…„Ó¢‰‰‹·•‰‰‹… Ÿ‰í¢‰‰ ‰)‰‰‹ù„¯¢‰‰ó·‰‰‹ù‚‰‰åõ‰‰‹ù¢‰‰ƒ'Q‰‰‹ù÷ã‰í¢‰‰c‹™Ý‚‰‰q¡‰‰‹™Ý¢‰‰ ¯‹‰‰‹™Ý„Q¢‰‰‹·•‰‰‹™Ý Ÿ‰í¢‰‰ ‰)‰‰‹±„¡í¢‰‰ó·‰‰‹±‚‰‰åõ‰‰‹±¢‰‰ƒ%c‰‰‹±÷ã‰í¢‰‰c‹›•‚‰‰q¡‰‰‹›•¢‰‰ /g‰‰‹›•„_•¢‰‰‹·•‰‰‹›• Ÿ‰í¢‰‰ ‰)‰‰‹‰„£Y¢‰‰õ#‰‰‹‰‚‰‰åõ‰‰‹‰¢‰‰ƒ%c‰‰‹‰÷ã‰í¢‰‰c‹킉‰s‰‰‹í¢‰‰ !Չ‰‹í„]'¢‰‰‹·•‰‰‹í Ÿ‰í¢‰‰ ‰)‰‰‹Á„£Y¢‰‰õ#‰‰‹Á‚‰‰åõ‰‰‹Á¢‰‰ƒ%c‰‰‹Á÷ã‰í¢‰‰c‹¥‚‰‰u‹‰‰‹¥¢‰‰ #A‰‰‹¥„[Ë¢‰‰‹·•‰‰‹¥ Ÿ‰í¢‰‰ ‰)‰‰‹Ÿ™„¥·¢‰‰÷‘‰‰‹Ÿ™‚‰‰åõ‰‰‹Ÿ™¢‰‰ƒ%c‰‰‹Ÿ™÷ã‰í¢‰‰c‹Ÿý‚‰‰÷ù‰‰‹Ÿý¢‰‰ #A‰‰‹Ÿý„[Ë¢‰‰‹·•‰‰‹Ÿý Ÿ‰í¢‰‰ ‰)‰‰‹ф¥·¢‰‰ ‰‰‰‹т‰‰åõ‰‰‹Ñ¢‰‰ƒ%c‰‰‹Ñ÷ã‰í¢‰‰c‹‘µ‚‰‰ ‰U‰‰‹‘µ¢‰‰ %?‰‰‹‘µ„YݤÉ÷ Ÿ‰í¤7‹„§¥¢‰‰ {‰‰‹©‚‰‰åõ‰‰‹©¢‰‰ƒ%c‰‰‹©÷ã‰í¢‰‰c‹“‚‰‰ ‰U‰‰‹“¢‰‰ %?‰‰‹“„YݤÉ÷ Ÿ‰í¤7‹„¹¢‰‰ {‰‰‹“ႉ‰åõ‰‰‹“ᢉ‰ƒ%c‰‰‹“á÷ã‰í¢‰‰c‹ł‰‰ ‰U‰‰‹Å¢‰‰ '­‰‰‹ńGo¤É÷ Ÿ‰í¤7‹„»¢‰‰ 鉉‹•¹‚‰‰åõ‰‰‹•¹¢‰‰ƒ%c‰‰‹•¹÷ã‰í¢‰‰c‹‚‰‰ ‹Ã‰‰‹¢‰‰ 9‰‰‹„Eó¤É÷ Ÿ‰í¤7‹„»¢‰‰ 鉉‹ñ‚‰‰åõ‰‰‹ñ¢‰‰ƒ%c‰‰‹ñ÷ã‰í¢‰‰c‹—Õ‚‰‰ ?‰‰‹—Õ¢‰‰ »÷‰‰‹—ՄŤÉ÷ Ÿ‰í¤7‹„;{¢‰‰ E‰‰‹ɂ‰‰åõ‰‰‹É¢‰‰ƒ%c‰‰‹É÷ã‰í¢‰‰c‹©­‚‰‰ -‰‰‹©­¢‰‰ ½å‰‰‹©­„äÉ÷ Ÿ‰í¤7‹„=i¢‰‰ 3‰‰‹)‚‰‰åõ‰‰‹)¢‰‰ƒ%c‰‰‹)÷ã‰í¢‰‰c‹)傉‰ ›‰‰‹)墉‰ ½å‰‰‹)å„äÉ÷ Ÿ‰í¤7‹„=i¢‰‰ ¡‰‰‹«Ù‚‰‰åõ‰‰‹«Ù¢‰‰ƒ%c‰‰‹«Ù÷ã‰í¢‰‰c‹+½‚‰‰ ›‰‰‹+½¢‰‰ ½å‰‰‹+½„äÉ÷ Ÿ‰í¤7‹„?Ç¢‰‰ ¡‰‰‹­‘‚‰‰åõ‰‰‹­‘¢‰‰ƒ%c‰‰‹­‘÷ã‰í¢‰‰c‹­õ‚‰‰ ›‰‰‹­õ¢‰‰ ¿Q‰‰‹­õ„Á»¤É÷ Ÿ‰í¤7‹„13¢‰‰ ‰‰‹-邉‰åõ‰‰‹-颉‰ƒ%c‰‰‹-é÷ã‰í¢‰‰c‹¯Í‚‰‰ w‰‰‹¯Í¢‰‰ ±Ï‰‰‹¯Í„ÏM¤É÷ Ÿ‰í¤7‹„3¡¢‰‰ ‹‰‰‹/¡‚‰‰åõ‰‰‹/¡¢‰‰ƒ%c‰‰‹/¡÷ã‰í¢‰‰c‹¡…‚‰‰ 剉‹¡…¢‰‰ ³;‰‰‹¡…„ÍѤÉ÷ Ÿ‰í¤7‹„3¡¢‰‰ ‹‰‰‹¡ù‚‰‰åõ‰‰‹¡ù¢‰‰ƒ%c‰‰‹¡ù÷ã‰í¢‰‰c‹!݂‰‰ Ӊ‰‹!Ý¢‰‰ µ)‰‰‹!݄Ëã¤É÷ Ÿ‰í¤7‹„5Ÿ¢‰‰ ‡ù‰‰‹£±‚‰‰åõ‰‰‹£±¢‰‰ƒ%c‰‰‹£±÷ã‰í¢‰‰c‹#•‚‰‰ O‰‰‹#•¢‰‰ µ)‰‰‹#•„Ëã¤É÷ Ÿ‰í¤7‹„5Ÿ¢‰‰ ™U‰‰‹¥‰‚‰‰åõ‰‰‹¥‰¢‰‰ƒ%c‰‰‹¥‰÷ã‰í¢‰‰c‹¥í‚‰‰ O‰‰‹¥í¢‰‰ µ)‰‰‹¥í„Ëã¤É÷ Ÿ‰í¤7‹„7 ¢‰‰ ›Ã‰‰‹%Á‚‰‰åõ‰‰‹%Á¢‰‰ƒ%c‰‰‹%Á÷ã‰í¢‰‰c‹§¥‚‰‰ ½‰‰‹§¥¢‰‰ ·‡‰‰‹§¥„Éu¤É÷ Ÿ‰í¤7‹‚‰‰ I‰‰‹'™¢‰‰ ›Ã‰‰‹'™‚‰‰åõ‰‰‹'™¢‰‰ƒ%c‰‰‹'™÷ã‰í¢‰‰c‹'ý‚‰‰ ½‰‰‹'ý¢‰‰ 7s‰‰‹'ý‚‰‰‹w‰‰‹'ý¤É÷ Ÿ‰í¤7‹‚‰‰ Ë÷‰‰‹¹Ñ¢‰‰ ?‰‰‹¹Ñ‚‰‰åõ‰‰‹¹Ñ¢‰‰ƒ%c‰‰‹¹Ñ÷ã‰í¢‰‰c‹9µ‚‰‰ )‰‰‹9µ¢‰‰ I቉‹9µ‚‰‰‹w‰‰‹9µ¤É÷ Ÿ‰í¤7‹‚‰‰ Ë÷‰‰‹»©¢‰‰ ?‰‰‹»©‚‰‰åõ‰‰‹»©¢‰‰ƒ%c‰‰‹»©÷ã‰í¢‰‰c‹;‚‰‰ ‰‰‹;¢‰‰ K]‰‰‹;‚‰‰‹w‰‰‹;¤Ëc Ÿ‰í¤5‚‰‰ Íc‰‰‹;ᢉ‰ Ÿ-‰‰‹;ႉ‰åõ‰‰‹;ᢉ‰ƒ%c‰‰‹;á÷ã‰í¢‰‰c‹½Å‚‰‰ Ÿõ‰‰‹½Å¢‰‰ MK‰‰‹½Å‚‰‰‹w‰‰‹½Å¤Ëc Ÿ‰í¤5‚‰‰ ÏQ‰‰‹=¹¢‰‰ ‘›‰‰‹=¹‚‰‰åõ‰‰‹=¹¢‰‰ƒ%c‰‰‹=¹÷ã‰í¢‰‰c‹¿‚‰‰ ‘a‰‰‹¿¢‰‰ MK‰‰‹¿‚‰‰‹w‰‰‹¿¤Ëc Ÿ‰í¤5‚‰‰ ÏQ‰‰‹¿ñ¢‰‰ w‰‰‹¿ñ‚‰‰åõ‰‰‹¿ñ¢‰‰ƒ%c‰‰‹¿ñ÷ã‰í¢‰‰c‹?Ղ‰‰ ‘a‰‰‹?Õ¢‰‰ MK‰‰‹?Ղ‰‰‹w‰‰‹?Õ¤Ëc Ÿ‰í¤5‚‰‰ Áω‰‹±É¢‰‰ w‰‰‹±É‚‰‰åõ‰‰‹±É¢‰‰ƒ%c‰‰‹±É÷ã‰í¢‰‰c‹1­‚‰‰ ‘a‰‰‹1­¢‰‰ O¹‰‰‹1­‚‰‰‹w‰‰‹1­¤Ëc Ÿ‰í¤5‚‰‰ Ã;‰‰‹³¢‰‰ 剉‹³‚‰‰åõ‰‰‹³¢‰‰ƒ%c‰‰‹³÷ã‰í¢‰‰c‹³å‚‰‰ “߉‰‹³å¢‰‰ A‰‰‹³å‚‰‰‹w‰‰‹³å¤Ëc Ÿ‰í¤5‚‰‰ Ã;‰‰‹3Ù¢‰‰ 剉‹3ق‰‰åõ‰‰‹3Ù¢‰‰ƒ%c‰‰‹3Ù÷ã‰í¢‰‰c‹µ½‚‰‰ •͉‰‹µ½¢‰‰ Cƒ‰‰‹µ½‚‰‰‹w‰‰‹µ½¤Ëc Ÿ‰í¤5‚‰‰ Å©‰‰‹5‘¢‰‰ Ӊ‰‹5‘‚‰‰åõ‰‰‹5‘¢‰‰ƒ%c‰‰‹5‘÷ã‰í¢‰‰c‹5õ‚‰‰ —9‰‰‹5õ¢‰‰ Åñ‰‰‹5õ‚‰‰‹w‰‰‹5õ¤Ëc Ÿ‰í¤5‚‰‰ LJ‰‰‹·é¢‰‰ O‰‰‹·é‚‰‰åõ‰‰‹·é¢‰‰ƒ%c‰‰‹·é÷ã‰í¢‰‰c‹7͂‰‰ ©—‰‰‹7Í¢‰‰ Åñ‰‰‹7͂‰‰‹w‰‰‹7ͤËc Ÿ‰í¤5‚‰‰ LJ‰‰‹É¡¢‰‰ )½‰‰‹É¡‚‰‰åõ‰‰‹É¡¢‰‰ƒ%c‰‰‹É¡÷ã‰í¢‰‰c‹I…‚‰‰ ©—‰‰‹I…¢‰‰ Åñ‰‰‹I…‚‰‰‹w‰‰‹I…¤Ëc Ÿ‰í¤5‚‰‰ Gs‰‰‹Iù¢‰‰ )½‰‰‹Iù‚‰‰åõ‰‰‹Iù¢‰‰ƒ%c‰‰‹Iù÷ã‰í¢‰‰c‹Ë݂‰‰ ©—‰‰‹ËÝ¢‰‰ Çm‰‰‹Ë݂‰‰‹w‰‰‹ËݤËc Ÿ‰í¤5‚‰‰ Y቉‹K±¢‰‰ +)‰‰‹K±‚‰‰åõ‰‰‹K±¢‰‰ƒ%c‰‰‹K±÷ã‰í¢‰‰c‹Í•‚‰‰ «‰‰‹Í•¢‰‰ Ùۉ‰‹Í•‚‰‰‹w‰‰‹Í•¤Ëc Ÿ‰í¤5‚‰‰ []‰‰‹M‰¢‰‰ -‡‰‰‹M‰‚‰‰åõ‰‰‹M‰¢‰‰ƒ%c‰‰‹M‰÷ã‰í¢‰‰c‹M킉‰ +ñ‰‰‹Mí¢‰‰ Û7‰‰‹M킉‰‹w‰‰‹Mí¤Ëc Ÿ‰í¤5‚‰‰ []‰‰‹ÏÁ¢‰‰ -‡‰‰‹ÏÁ‚‰‰åõ‰‰‹ÏÁ¢‰‰ƒ%c‰‰‹ÏÁ÷ã‰í¢‰‰c‹O¥‚‰‰ -‹O¥¢‰‰ Ý%‰‰‹O¥‚‰‰‹w‰‰‹O¥¤Ëc Ÿ‰í¤5‚‰‰ ]K‰‰‹Á™¢‰‰ ¯õ‰‰‹Á™‚‰‰åõ‰‰‹Á™¢‰‰ƒ%c‰‰‹Á™÷ã‰í¢‰‰c‹Áý‚‰‰ /[‰‰‹Áý¢‰‰ ߓ‰‰‹Áý‚‰‰‹w‰‰‹Áý¤Ëc Ÿ‰í¤5‚‰‰ _¹‰‰‹AÑ¢‰‰ ¡a‰‰‹Aт‰‰åõ‰‰‹AÑ¢‰‰ƒ%c‰‰‹AÑ÷ã‰í¢‰‰c‹Ãµ‚‰‰ /[‰‰‹Ãµ¢‰‰ ߓ‰‰‹Ãµ‚‰‰‹w‰‰‹Ãµ¤Ëc Ÿ‰í¤5‚‰‰ _¹‰‰‹C©¢‰‰ ¡a‰‰‹C©‚‰‰åõ‰‰‹C©¢‰‰ƒ%c‰‰‹C©÷ã‰í¢‰‰c‹Å‚‰‰ /[‰‰‹Å¢‰‰ ߓ‰‰‹Å‚‰‰‹w‰‰‹Å¤Ëc Ÿ‰í¤5‚‰‰ Q‰‰‹Åᢉ‰ £ß‰‰‹Åႉ‰åõ‰‰‹Åᢉ‰ƒ%c‰‰‹Åá÷ã‰í¢‰‰c‹Eł‰‰ !ɉ‰‹EÅ¢‰‰ щ‰‹Eł‰‰‹w‰‰‹EŤËc Ÿ‰í¤5‚‰‰ Sƒ‰‰‹Ç¹¢‰‰ ¥K‰‰‹Ç¹‚‰‰åõ‰‰‹Ç¹¢‰‰ƒ%c‰‰‹Ç¹÷ã‰í¢‰‰c‹G‚‰‰ #%‰‰‹G¢‰‰ Qý‰‰‹G‚‰‰‹w‰‰‹G¤Ëc Ÿ‰í¤5‚‰‰ Sƒ‰‰‹Gñ¢‰‰ ¥K‰‰‹Gñ‚‰‰åõ‰‰‹Gñ¢‰‰ƒ%c‰‰‹Gñ÷ã‰í¢‰‰c‹ÙՂ‰‰ %‰‰‹ÙÕ¢‰‰ S뉉‹ÙՂ‰‰‹w‰‰‹ÙÕ¤Ëc Ÿ‰í¤5‚‰‰ Õñ‰‰‹YÉ¢‰‰ §9‰‰‹Yɂ‰‰åõ‰‰‹YÉ¢‰‰ƒ%c‰‰‹YÉ÷ã‰í¢‰‰c‹Û­‚‰‰ '‰‰‹Û­¢‰‰ UG‰‰‹Û­‚‰‰‹w‰‰‹Û­¤Ëc Ÿ‰í¤5‚‰‰ ×m‰‰‹[¢‰‰ ¹—‰‰‹[‚‰‰åõ‰‰‹[¢‰‰ƒ%c‰‰‹[÷ã‰í¢‰‰c‹[傉‰ '‰‰‹[墉‰ UG‰‰‹[傉‰‹w‰‰‹[å¤Ëc Ÿ‰í¤5‚‰‰ ×m‰‰‹ÝÙ¢‰‰ ¹—‰‰‹Ýق‰‰åõ‰‰‹ÝÙ¢‰‰ƒ%c‰‰‹ÝÙ÷ã‰í¢‰‰c‹]½‚‰‰ '‰‰‹]½¢‰‰ UG‰‰‹]½‚‰‰‹w‰‰‹]½¤Ëc Ÿ‰í¤5‚‰‰ éۉ‰‹ß‘¢‰‰ »‰‰‹ß‘‚‰‰åõ‰‰‹ß‘¢‰‰ƒ%c‰‰‹ß‘÷ã‰í¢‰‰c‹ßõ‚‰‰ ¹}‰‰‹ßõ¢‰‰ Wµ‰‰‹ßõ‚‰‰‹w‰‰‹ßõ¤Ëc Ÿ‰í¤5‚‰‰ ë7‰‰‹_颉‰ ;ñ‰‰‹_邉‰åõ‰‰‹_颉‰ƒ%c‰‰‹_é÷ã‰í¢‰‰c‹Ñ͂‰‰ »ë‰‰‹ÑÍ¢‰‰ i!‰‰‹Ñ͂‰‰‹w‰‰‹ÑͤËc Ÿ‰í¤5‚‰‰ ë7‰‰‹Q¡¢‰‰ ;ñ‰‰‹Q¡‚‰‰åõ‰‰‹Q¡¢‰‰ƒ%c‰‰‹Q¡÷ã‰í¢‰‰c‹Ó…‚‰‰ ½Ù‰‰‹Ó…¢‰‰ k‰‰‹Ó…‚‰‰‹w‰‰‹Ó…¤Ëc Ÿ‰í¤5‚‰‰ í%‰‰‹Óù¢‰‰ =‹Óù‚‰‰åõ‰‰‹Óù¢‰‰ƒ%c‰‰‹Óù÷ã‰í¢‰‰c‹S݂‰‰ ¿5‰‰‹SÝ¢‰‰ m‰‰‹S݂‰‰‹w‰‰‹SݤËc Ÿ‰í¤5‚‰‰ ‰‹Õ±¢‰‰ ?[‰‰‹Õ±‚‰‰åõ‰‰‹Õ±¢‰‰ƒ%c‰‰‹Õ±÷ã‰í¢‰‰c‹U•‚‰‰ ±£‰‰‹U•¢‰‰ m‰‰‹U•‚‰‰‹w‰‰‹U•¤Ëc Ÿ‰í¤5‚‰‰ ‰‹×‰¢‰‰ 1ɉ‰‹×‰‚‰‰åõ‰‰‹×‰¢‰‰ƒ%c‰‰‹×‰÷ã‰í¢‰‰c‹×킉‰ ±£‰‰‹×í¢‰‰ m‰‰‹×킉‰‹w‰‰‹×í¤Ëc Ÿ‰í¤5‚‰‰ ቉‹WÁ¢‰‰ 1ɉ‰‹WÁ‚‰‰åõ‰‰‹WÁ¢‰‰ƒ%c‰‰‹WÁ÷ã‰í¢‰‰c‹é¥‚‰‰ ±£‰‰‹é¥¢‰‰ ïy‰‰‹é¥‚‰‰‹w‰‰‹é¥¤Ëc Ÿ‰í¤5‚‰‰ aý‰‰‹i™¢‰‰ 3%‰‰‹i™‚‰‰åõ‰‰‹i™¢‰‰ƒ%c‰‰‹i™÷ã‰í¢‰‰c‹iý‚‰‰ ³‰‰‹iý¢‰‰ á׉‰‹iý‚‰‰‹w‰‰‹iý¤Ëc Ÿ‰í¤5‚‰‰ aý‰‰‹ëÑ¢‰‰ 3%‰‰‹ëт‰‰åõ‰‰‹ëÑ¢‰‰ƒ%c‰‰‹ëÑ÷ã‰í¢‰‰c‹kµ‚‰‰ µ ‰‰‹kµ¢‰‰ ãʼn‰‹kµ‚‰‰‹w‰‰‹kµ¤Ëc Ÿ‰í¤5‚‰‰ c뉉‹í©¢‰‰ 5‰‰‹í©‚‰‰åõ‰‰‹í©¢‰‰ƒ%c‰‰‹í©÷ã‰í¢‰‰c‹m‚‰‰ 5û‰‰‹m¢‰‰ å1‰‰‹m‚‰‰‹w‰‰‹m¤Ëc Ÿ‰í¤5‚‰‰ eG‰‰‹mᢉ‰ 7‰‰‹mႉ‰åõ‰‰‹mᢉ‰ƒ%c‰‰‹má÷ã‰í¢‰‰c‹ïł‰‰ 7W‰‰‹ïÅ¢‰‰ 築‰‹ïł‰‰‹w‰‰‹ïŤËc Ÿ‰í¤5‚‰‰ gµ‰‰‹o¹¢‰‰ É}‰‰‹o¹‚‰‰åõ‰‰‹o¹¢‰‰ƒ%c‰‰‹o¹÷ã‰í¢‰‰c‹á‚‰‰ 7W‰‰‹á¢‰‰ 築‰‹á‚‰‰‹w‰‰‹á¤Ëc Ÿ‰í¤5‚‰‰ gµ‰‰‹áñ¢‰‰ É}‰‰‹áñ‚‰‰åõ‰‰‹áñ¢‰‰ƒ%c‰‰‹áñ÷ã‰í¢‰‰c‹aՂ‰‰ 7W‰‰‹aÕ¢‰‰ 築‰‹aՂ‰‰‹w‰‰‹aÕ¤Ëc Ÿ‰í¤5‚‰‰ y!‰‰‹ãÉ¢‰‰ Ë뉉‹ãɂ‰‰åõ‰‰‹ãÉ¢‰‰ƒ%c‰‰‹ãÉ÷ã‰í¢‰‰c‹c­‚‰‰ Iʼn‰‹c­¢‰‰ ù‰‰‹c­‚‰‰‹w‰‰‹c­¤Ëc Ÿ‰í¤5‚‰‰ {‰‰‹å¢‰‰ Íى‰‹å‚‰‰åõ‰‰‹å¢‰‰ƒ%c‰‰‹å÷ã‰í¢‰‰c‹å傉‰ K³‰‰‹å墉‰ û ‰‰‹å傉‰‹w‰‰‹åå¤Ëc Ÿ‰í¤5‚‰‰ {‰‰‹eÙ¢‰‰ Íى‰‹eق‰‰åõ‰‰‹eÙ¢‰‰ƒ%c‰‰‹eÙ÷ã‰í¢‰‰c‹ç½‚‰‰ M/‰‰‹ç½¢‰‰ {牉‹ç½‚‰‰‹w‰‰‹ç½¤Ëc Ÿ‰í¤5‚‰‰ }‰‰‹g‘¢‰‰ Ï5‰‰‹g‘‚‰‰åõ‰‰‹g‘¢‰‰ƒ%c‰‰‹g‘÷ã‰í¢‰‰c‹gõ‚‰‰ O‰‰‹gõ¢‰‰ }S‰‰‹gõ‚‰‰‹w‰‰‹gõ¤Ëc Ÿ‰í¤5‚‰‰ ÿy‰‰‹ù颉‰ Á£‰‰‹ù邉‰åõ‰‰‹ù颉‰ƒ%c‰‰‹ùé÷ã‰í¢‰‰c‹y͂‰‰ O‰‰‹yÍ¢‰‰ }S‰‰‹y͂‰‰‹w‰‰‹yͤËc Ÿ‰í¤5‚‰‰ ÿy‰‰‹û¡¢‰‰ Á£‰‰‹û¡‚‰‰åõ‰‰‹û¡¢‰‰ƒ%c‰‰‹û¡÷ã‰í¢‰‰c‹{…‚‰‰ O‰‰‹{…¢‰‰ }S‰‰‹{…‚‰‰‹w‰‰‹{…¤Ëc Ÿ‰í¤5‚‰‰ ñ׉‰‹{ù¢‰‰ ɉ‹{ù‚‰‰åõ‰‰‹{ù¢‰‰ƒ%c‰‰‹{ù÷ã‰í¢‰‰c‹ý݂‰‰ A ‰‰‹ýÝ¢‰‰ Á‰‰‹ý݂‰‰‹w‰‰‹ýݤËc Ÿ‰í¤5‚‰‰ óʼn‰‹}±¢‰‰ Å ‰‰‹}±‚‰‰åõ‰‰‹}±¢‰‰ƒ%c‰‰‹}±÷ã‰í¢‰‰c‹ÿ•‚‰‰ Ãg‰‰‹ÿ•¢‰‰ q¿‰‰‹ÿ•‚‰‰‹w‰‰‹ÿ•¤Ëc Ÿ‰í¤5‚‰‰ óʼn‰‹‰¢‰‰ Å ‰‰‹‰‚‰‰åõ‰‰‹‰¢‰‰ƒ%c‰‰‹‰÷ã‰í¢‰‰c‹킉‰ ÅՉ‰‹í¢‰‰ s+‰‰‹킉‰‹w‰‰‹í¤Ëc Ÿ‰í¤5‚‰‰ õ1‰‰‹ñÁ¢‰‰ Eû‰‰‹ñÁ‚‰‰åõ‰‰‹ñÁ¢‰‰ƒ%c‰‰‹ñÁ÷ã‰í¢‰‰c‹q¥‚‰‰ ÇA‰‰‹q¥¢‰‰ u™‰‰‹q¥‚‰‰‹w‰‰‹q¥¤Ëc Ÿ‰í¤5‚‰‰ ÷¯‰‰‹ó™¢‰‰ GW‰‰‹ó™‚‰‰åõ‰‰‹ó™¢‰‰ƒ%c‰‰‹ó™÷ã‰í¢‰‰c‹óý‚‰‰ ÇA‰‰‹óý¢‰‰ ÷u‰‰‹óý‚‰‰‹w‰‰‹óý¤Ëc Ÿ‰í¤5‚‰‰‰‰‰‹sÑ¢‰‰ GW‰‰‹sт‰‰åõ‰‰‹sÑ¢‰‰ƒ%c‰‰‹sÑ÷ã‰í¢‰‰c‹õµ‚‰‰ ÇA‰‰‹õµ¢‰‰ ÷u‰‰‹õµ‚‰‰‹w‰‰‹õµ¤Ëc Ÿ‰í¤5‚‰‰‰‰‰‹u©¢‰‰ Yʼn‰‹u©‚‰‰åõ‰‰‹u©¢‰‰ƒ%c‰‰‹u©÷ã‰í¢‰‰c‹÷‚‰‰ Ù¿‰‰‹÷¢‰‰ ÷u‰‰‹÷‚‰‰‹w‰‰‹÷¤Ëc Ÿ‰í¤5‚‰‰‹ ‰‰‹÷ᢉ‰ [³‰‰‹÷ႉ‰åõ‰‰‹÷ᢉ‰ƒ%c‰‰‹÷á÷ã‰í¢‰‰c‹wł‰‰ Û­‰‰‹wÅ¢‰‰‰c‰‰‹wł‰‰‹w‰‰‹wŤËc Ÿ‰í¤5‚‰‰‹ ‰‰ ‰¹¢‰‰ [³‰‰ ‰¹‚‰‰åõ‰‰ ‰¹¢‰‰ƒ%c‰‰ ‰¹÷ã‰í¢‰‰c ‚‰‰ ݉‰ ¢‰‰‹Ñ‰‰ ‚‰‰‹w‰‰ ¤Ëc Ÿ‰í¤5‚‰‰ 牉 ñ¢‰‰ ]/‰‰ ñ‚‰‰åõ‰‰ ñ¢‰‰ƒ%c‰‰ ñ÷ã‰í¢‰‰c ‹Õ‚‰‰ ]÷‰‰ ‹Õ¢‰‰M‰‰ ‹Õ‚‰‰‹w‰‰ ‹Õ¤Ëc Ÿ‰í¤5‚‰‰ S‰‰ É¢‰‰ _‰‰ ɂ‰‰åõ‰‰ É¢‰‰ƒ%c‰‰ É÷ã‰í¢‰‰c ­‚‰‰ ]÷‰‰ ­¢‰‰»‰‰ ­‚‰‰‹w‰‰ ­¤Ëc Ÿ‰í¤5‚‰‰Á‰‰ ¢‰‰ _‰‰ ‚‰‰åõ‰‰ ¢‰‰ƒ%c‰‰ ÷ã‰í¢‰‰c 傉‰ ]÷‰‰ 墉‰»‰‰ 傉‰‹w‰‰ å¤Ëc Ÿ‰í¤5‚‰‰Á‰‰ Ù¢‰‰ Q ‰‰ Ù‚‰‰åõ‰‰ Ù¢‰‰ƒ%c‰‰ Ù÷ã‰í¢‰‰c ½‚‰‰ _c‰‰ ½¢‰‰»‰‰ ½‚‰‰‹w‰‰ ½¤Ëc Ÿ‰í¤5‚‰‰¿‰‰ ‘¢‰‰ Óg‰‰ ‘‚‰‰åõ‰‰ ‘¢‰‰ƒ%c‰‰ ‘÷ã‰í¢‰‰c õ‚‰‰ QQ‰‰ õ¢‰‰©‰‰ õ‚‰‰‹w‰‰ õ¤Ëc Ÿ‰í¤5‚‰‰+‰‰ 颉‰ ÕՉ‰ 邉‰åõ‰‰ 颉‰ƒ%c‰‰ é÷ã‰í¢‰‰c ƒÍ‚‰‰ Sω‰ ƒÍ¢‰‰ƒ‰‰ ƒÍ‚‰‰‹w‰‰ ƒÍ¤Ëc Ÿ‰í¤5‚‰‰+‰‰ ¡¢‰‰ ÕՉ‰ ¡‚‰‰åõ‰‰ ¡¢‰‰ƒ%c‰‰ ¡÷ã‰í¢‰‰c ……‚‰‰ U;‰‰ ……¢‰‰ó‰‰ ……‚‰‰‹w‰‰ ……¤Ëc Ÿ‰í¤5‚‰‰™‰‰ …ù¢‰‰ ×A‰‰ …ù‚‰‰åõ‰‰ …ù¢‰‰ƒ%c‰‰ …ù÷ã‰í¢‰‰c ݂‰‰ W©‰‰ Ý¢‰‰o‰‰ ݂‰‰‹w‰‰ ݤËc Ÿ‰í¤5‚‰‰‡u‰‰ ‡±¢‰‰ 鿉‰ ‡±‚‰‰åõ‰‰ ‡±¢‰‰ƒ%c‰‰ ‡±÷ã‰í¢‰‰c •‚‰‰ W©‰‰ •¢‰‰o‰‰ •‚‰‰‹w‰‰ •¤Ëc Ÿ‰í¤5‚‰‰‡u‰‰ ™‰¢‰‰ 鿉‰ ™‰‚‰‰åõ‰‰ ™‰¢‰‰ƒ%c‰‰ ™‰÷ã‰í¢‰‰c ™í‚‰‰ W©‰‰ ™í¢‰‰o‰‰ ™í‚‰‰‹w‰‰ ™í¤Ëc Ÿ‰í¤5‚‰‰™c‰‰ Á¢‰‰ 뭉‰ Á‚‰‰åõ‰‰ Á¢‰‰ƒ%c‰‰ Á÷ã‰í¢‰‰c ›¥‚‰‰ i‡‰‰ ›¥¢‰‰]‰‰ ›¥‚‰‰‹w‰‰ ›¥¤Ëc Ÿ‰í¤5‚‰‰›Ñ‰‰ ™¢‰‰ 퉉 ™‚‰‰åõ‰‰ ™¢‰‰ƒ%c‰‰ ™÷ã‰í¢‰‰c ý‚‰‰ ës‰‰ ý¢‰‰ˉ‰ ý‚‰‰‹w‰‰ ý¤Ëc Ÿ‰í¤5‚‰‰›Ñ‰‰ Ñ¢‰‰ 퉉 Ñ‚‰‰åõ‰‰ Ñ¢‰‰ƒ%c‰‰ Ñ÷ã‰í¢‰‰c µ‚‰‰ í቉ µ¢‰‰'‰‰ µ‚‰‰‹w‰‰ µ¤Ëc Ÿ‰í¤5‚‰‰M‰‰ Ÿ©¢‰‰ m÷‰‰ Ÿ©‚‰‰åõ‰‰ Ÿ©¢‰‰ƒ%c‰‰ Ÿ©÷ã‰í¢‰‰c ‚‰‰ ï]‰‰ ¢‰‰•‰‰ ‚‰‰‹w‰‰ ¤Ëc Ÿ‰í¤5‚‰‰Ÿ»‰‰ ᢉ‰ oc‰‰ ႉ‰åõ‰‰ ᢉ‰ƒ%c‰‰ á÷ã‰í¢‰‰c ‘Å‚‰‰ ï]‰‰ ‘Å¢‰‰ƒ‰‰ ‘Å‚‰‰‹w‰‰ ‘ŤËc Ÿ‰í¤5‚‰‰‘©‰‰ ¹¢‰‰ oc‰‰ ¹‚‰‰åõ‰‰ ¹¢‰‰ƒ%c‰‰ ¹÷ã‰í¢‰‰c “‚‰‰ ï]‰‰ “¢‰‰ƒ‰‰ “‚‰‰‹w‰‰ “¤Ëc Ÿ‰í¤5‚‰‰‘©‰‰ “ñ¢‰‰ aQ‰‰ “ñ‚‰‰åõ‰‰ “ñ¢‰‰ƒ%c‰‰ “ñ÷ã‰í¢‰‰c Ղ‰‰ áK‰‰ Õ¢‰‰ƒ‰‰ Ղ‰‰‹w‰‰ Õ¤Ëc Ÿ‰í¤5‚‰‰“‰‰ •É¢‰‰ cω‰ •É‚‰‰åõ‰‰ •É¢‰‰ƒ%c‰‰ •É÷ã‰í¢‰‰c ­‚‰‰ 㹉‰ ­¢‰‰‘‰‰ ­‚‰‰‹w‰‰ ­¤Ëc Ÿ‰í¤5‚‰‰“‰‰ —¢‰‰ cω‰ —‚‰‰åõ‰‰ —¢‰‰ƒ%c‰‰ —÷ã‰í¢‰‰c —傉‰ 剉 —墉‰“퉉 —傉‰‹w‰‰ —å¤Ëc Ÿ‰í¤5‚‰‰ó‰‰ Ù¢‰‰ e;‰‰ ق‰‰åõ‰‰ Ù¢‰‰ƒ%c‰‰ Ù÷ã‰í¢‰‰c ©½‚‰‰ 烉‰ ©½¢‰‰•Y‰‰ ©½‚‰‰‹w‰‰ ©½¤Ëc Ÿ‰í¤5‚‰‰o‰‰ )‘¢‰‰ g©‰‰ )‘‚‰‰åõ‰‰ )‘¢‰‰ƒ%c‰‰ )‘÷ã‰í¢‰‰c )õ‚‰‰ 烉‰ )õ¢‰‰—7‰‰ )õ‚‰‰‹w‰‰ )õ¤Ëc Ÿ‰í¤5‚‰‰]‰‰ «é¢‰‰ g©‰‰ «é‚‰‰åõ‰‰ «é¢‰‰ƒ%c‰‰ «é÷ã‰í¢‰‰c +͂‰‰ 烉‰ +Í¢‰‰—7‰‰ +͂‰‰‹w‰‰ +ͤËc Ÿ‰í¤5‚‰‰]‰‰ ­¡¢‰‰ y‡‰‰ ­¡‚‰‰åõ‰‰ ­¡¢‰‰ƒ%c‰‰ ­¡÷ã‰í¢‰‰c -…‚‰‰ gñ‰‰ -…¢‰‰—7‰‰ -…‚‰‰‹w‰‰ -…¤Ëc Ÿ‰í¤5‚‰‰)ˉ‰ -ù¢‰‰ ûs‰‰ -ù‚‰‰åõ‰‰ -ù¢‰‰ƒ%c‰‰ -ù÷ã‰í¢‰‰c ¯Ý‚‰‰ ym‰‰ ¯Ý¢‰‰©¥‰‰ ¯Ý‚‰‰‹w‰‰ ¯Ý¤Ëc Ÿ‰í¤5‚‰‰+'‰‰ /±¢‰‰ ý቉ /±‚‰‰åõ‰‰ /±¢‰‰ƒ%c‰‰ /±÷ã‰í¢‰‰c ¡•‚‰‰ {ۉ‰ ¡•¢‰‰«‰‰ ¡•‚‰‰‹w‰‰ ¡•¤Ëc Ÿ‰í¤5‚‰‰+'‰‰ !‰¢‰‰ ý቉ !‰‚‰‰åõ‰‰ !‰¢‰‰ƒ%c‰‰ !‰÷ã‰í¢‰‰c !킉‰ }7‰‰ !í¢‰‰­‰‰ !킉‰‹w‰‰ !í¤Ëc Ÿ‰í¤5‚‰‰-•‰‰ £Á¢‰‰ ÿ]‰‰ £Á‚‰‰åõ‰‰ £Á¢‰‰ƒ%c‰‰ £Á÷ã‰í¢‰‰c #¥‚‰‰ }7‰‰ #¥¢‰‰-ý‰‰ #¥‚‰‰‹w‰‰ #¥¤Ëc Ÿ‰í¤5‚‰‰/ƒ‰‰ ¥™¢‰‰ ÿ]‰‰ ¥™‚‰‰åõ‰‰ ¥™¢‰‰ƒ%c‰‰ ¥™÷ã‰í¢‰‰c ¥ý‚‰‰ }7‰‰ ¥ý¢‰‰-ý‰‰ ¥ý‚‰‰‹w‰‰ ¥ý¤Ëc Ÿ‰í¤5‚‰‰¡‰‰ %Ñ¢‰‰ ñK‰‰ %т‰‰åõ‰‰ %Ñ¢‰‰ƒ%c‰‰ %Ñ÷ã‰í¢‰‰c §µ‚‰‰ %‰‰ §µ¢‰‰/i‰‰ §µ‚‰‰‹w‰‰ §µ¤Ëc Ÿ‰í¤5‚‰‰¡‰‰ '©¢‰‰ 󹉉 '©‚‰‰åõ‰‰ '©¢‰‰ƒ%c‰‰ '©÷ã‰í¢‰‰c ¹‚‰‰ q“‰‰ ¹¢‰‰/i‰‰ ¹‚‰‰‹w‰‰ ¹¤Ëc Ÿ‰í¤5‚‰‰£í‰‰ ¹á¢‰‰ õ‰‰ ¹á‚‰‰åõ‰‰ ¹á¢‰‰ƒ%c‰‰ ¹á÷ã‰í¢‰‰c 9ł‰‰ s‰‰ 9Å¢‰‰!lj‰ 9ł‰‰‹w‰‰ 9ŤËc Ÿ‰í¤5‚‰‰£í‰‰ »¹¢‰‰ õ‰‰ »¹‚‰‰åõ‰‰ »¹¢‰‰ƒ%c‰‰ »¹÷ã‰í¢‰‰c ;‚‰‰ õý‰‰ ;¢‰‰#3‰‰ ;‚‰‰‹w‰‰ ;¤Ëc Ÿ‰í¤5‚‰‰¥Y‰‰ ;ñ¢‰‰ ÷ƒ‰‰ ;ñ‚‰‰åõ‰‰ ;ñ¢‰‰ƒ%c‰‰ ;ñ÷ã‰í¢‰‰c ½Õ‚‰‰ õý‰‰ ½Õ¢‰‰%!‰‰ ½Õ‚‰‰‹w‰‰ ½Õ¤Ëc Ÿ‰í¤5‚‰‰§7‰‰ =É¢‰‰ ÷ƒ‰‰ =ɂ‰‰åõ‰‰ =É¢‰‰ƒ%c‰‰ =É÷ã‰í¢‰‰c ¿­‚‰‰ ÷뉉 ¿­¢‰‰'Ÿ‰‰ ¿­‚‰‰‹w‰‰ ¿­¤Ëc Ÿ‰í¤5‚‰‰¹¥‰‰ ?¢‰‰ wñ‰‰ ?‚‰‰åõ‰‰ ?¢‰‰ƒ%c‰‰ ?÷ã‰í¢‰‰c ?傉‰ ÷뉉 ?墉‰'Ÿ‰‰ ?傉‰‹w‰‰ ?å¤Ëc Ÿ‰í¤5‚‰‰¹¥‰‰ ±Ù¢‰‰ m‰‰ ±Ù‚‰‰åõ‰‰ ±Ù¢‰‰ƒ%c‰‰ ±Ù÷ã‰í¢‰‰c 1½‚‰‰‰G‰‰ 1½¢‰‰'Ÿ‰‰ 1½‚‰‰‹w‰‰ 1½¤Ëc Ÿ‰í¤5‚‰‰»‰‰ ³‘¢‰‰ ۉ‰ ³‘‚‰‰åõ‰‰ ³‘¢‰‰ƒ%c‰‰ ³‘÷ã‰í¢‰‰c ³õ‚‰‰‹µ‰‰ ³õ¢‰‰9 ‰‰ ³õ‚‰‰‹w‰‰ ³õ¤Ëc Ÿ‰í¤5‚‰‰»‰‰ 3颉‰ ۉ‰ 3邉‰åõ‰‰ 3颉‰ƒ%c‰‰ 3é÷ã‰í¢‰‰c µÍ‚‰‰!‰‰ µÍ¢‰‰»ù‰‰ µÍ‚‰‰‹w‰‰ µÍ¤Ëc Ÿ‰í¤5‚‰‰½‰‰ 5¡¢‰‰ 7‰‰ 5¡‚‰‰åõ‰‰ 5¡¢‰‰ƒ%c‰‰ 5¡÷ã‰í¢‰‰c ·…‚‰‰‰‰ ·…¢‰‰½×‰‰ ·…‚‰‰‹w‰‰ ·…¤Ëc Ÿ‰í¤5‚‰‰=ý‰‰ ·ù¢‰‰%‰‰ ·ù‚‰‰åõ‰‰ ·ù¢‰‰ƒ%c‰‰ ·ù÷ã‰í¢‰‰c 7݂‰‰‰‰ 7Ý¢‰‰¿C‰‰ 7݂‰‰‹w‰‰ 7ݤËc Ÿ‰í¤5‚‰‰?i‰‰ ɱ¢‰‰%‰‰ ɱ‚‰‰åõ‰‰ ɱ¢‰‰ƒ%c‰‰ ɱ÷ã‰í¢‰‰c I•‚‰‰‰‰ I•¢‰‰¿C‰‰ I•‚‰‰‹w‰‰ I•¤Ëc Ÿ‰í¤5‚‰‰?i‰‰ ˉ¢‰‰“‰‰ ˉ‚‰‰åõ‰‰ ˉ¢‰‰ƒ%c‰‰ ˉ÷ã‰í¢‰‰c Ë킉‰‰‰ Ëí¢‰‰¿C‰‰ Ë킉‰‹w‰‰ Ëí¤Ëc Ÿ‰í¤5‚‰‰1lj‰ KÁ¢‰‰‰‰ KÁ‚‰‰åõ‰‰ KÁ¢‰‰ƒ%c‰‰ KÁ÷ã‰í¢‰‰c Í¥‚‰‰y‰‰ Í¥¢‰‰±±‰‰ Í¥‚‰‰‹w‰‰ Í¥¤Ëc Ÿ‰í¤5‚‰‰33‰‰ M™¢‰‰…ý‰‰ M™‚‰‰åõ‰‰ M™¢‰‰ƒ%c‰‰ M™÷ã‰í¢‰‰c Mý‚‰‰׉‰ Mý¢‰‰³-‰‰ Mý‚‰‰‹w‰‰ Mý¤Ëc Ÿ‰í¤5‚‰‰33‰‰ ÏÑ¢‰‰…ý‰‰ Ïт‰‰åõ‰‰ ÏÑ¢‰‰ƒ%c‰‰ ÏÑ÷ã‰í¢‰‰c Oµ‚‰‰ʼn‰ Oµ¢‰‰µ‰‰ Oµ‚‰‰‹w‰‰ Oµ¤Ëc Ÿ‰í¤5‚‰‰5!‰‰ Á©¢‰‰‡ë‰‰ Á©‚‰‰åõ‰‰ Á©¢‰‰ƒ%c‰‰ Á©÷ã‰í¢‰‰c A‚‰‰ʼn‰ A¢‰‰·‰‰‰ A‚‰‰‹w‰‰ A¤Ëc Ÿ‰í¤5‚‰‰7Ÿ‰‰ Aᢉ‰‡ë‰‰ Aႉ‰åõ‰‰ Aᢉ‰ƒ%c‰‰ Aá÷ã‰í¢‰‰c Ãł‰‰ʼn‰ ÃÅ¢‰‰·‰‰‰ Ãł‰‰‹w‰‰ ÃŤËc Ÿ‰í¤5‚‰‰I ‰‰ C¹¢‰‰™G‰‰ C¹‚‰‰åõ‰‰ C¹¢‰‰ƒ%c‰‰ C¹÷ã‰í¢‰‰c ŝ‚‰‰1‰‰ ŝ¢‰‰7e‰‰ ŝ‚‰‰‹w‰‰ ŝ¤Ëc Ÿ‰í¤5‚‰‰I ‰‰ Åñ¢‰‰›µ‰‰ Åñ‚‰‰åõ‰‰ Åñ¢‰‰ƒ%c‰‰ Åñ÷ã‰í¢‰‰c EՂ‰‰¯‰‰ EÕ¢‰‰7e‰‰ EՂ‰‰‹w‰‰ EÕ¤Ëc Ÿ‰í¤5‚‰‰Ëù‰‰ ÇÉ¢‰‰!‰‰ Çɂ‰‰åõ‰‰ ÇÉ¢‰‰ƒ%c‰‰ ÇÉ÷ã‰í¢‰‰c G­‚‰‰‰‰ G­¢‰‰IӉ‰ G­‚‰‰‹w‰‰ G­¤Ëc Ÿ‰í¤5‚‰‰Ëù‰‰ ف¢‰‰!‰‰ ف‚‰‰åõ‰‰ ف¢‰‰ƒ%c‰‰ ف÷ã‰í¢‰‰c Ù傉‰ ‰‰ Ù墉‰KÁ‰‰ Ù傉‰‹w‰‰ Ùå¤Ëc Ÿ‰í¤5‚‰‰Í׉‰ YÙ¢‰‰Ÿ‰‰ Yق‰‰åõ‰‰ YÙ¢‰‰ƒ%c‰‰ YÙ÷ã‰í¢‰‰c Û½‚‰‰ ‰‰ Û½¢‰‰M=‰‰ Û½‚‰‰‹w‰‰ Û½¤Ëc Ÿ‰í¤5‚‰‰ÏC‰‰ [‘¢‰‰Ÿ‰‰ [‘‚‰‰åõ‰‰ [‘¢‰‰ƒ%c‰‰ [‘÷ã‰í¢‰‰c [õ‚‰‰ ‰‰ [õ¢‰‰M=‰‰ [õ‚‰‰‹w‰‰ [õ¤Ëc Ÿ‰í¤5‚‰‰Á±‰‰ Ý颉‰‘‰‰ Ý邉‰åõ‰‰ Ý颉‰ƒ%c‰‰ Ýé÷ã‰í¢‰‰c ]͂‰‰Ÿç‰‰ ]Í¢‰‰O«‰‰ ]͂‰‰‹w‰‰ ]ͤËc Ÿ‰í¤5‚‰‰Á±‰‰ ß¡¢‰‰y‰‰ ß¡‚‰‰åõ‰‰ ß¡¢‰‰ƒ%c‰‰ ß¡÷ã‰í¢‰‰c _…‚‰‰‘S‰‰ _…¢‰‰O«‰‰ _…‚‰‰‹w‰‰ _…¤Ëc Ÿ‰í¤5‚‰‰Ã-‰‰ _ù¢‰‰׉‰ _ù‚‰‰åõ‰‰ _ù¢‰‰ƒ%c‰‰ _ù÷ã‰í¢‰‰c Ñ݂‰‰“Á‰‰ ÑÝ¢‰‰A‰‰ Ñ݂‰‰‹w‰‰ ÑݤËc Ÿ‰í¤5‚‰‰Ã-‰‰ Q±¢‰‰׉‰ Q±‚‰‰åõ‰‰ Q±¢‰‰ƒ%c‰‰ Q±÷ã‰í¢‰‰c ӕ‚‰‰•¿‰‰ ӕ¢‰‰Ãu‰‰ ӕ‚‰‰‹w‰‰ ӕ¤Ëc Ÿ‰í¤5‚‰‰Å‰‰ S‰¢‰‰ʼn‰ S‰‚‰‰åõ‰‰ S‰¢‰‰ƒ%c‰‰ S‰÷ã‰í¢‰‰c S킉‰•¿‰‰ Sí¢‰‰Å㉉ S킉‰‹w‰‰ Sí¤Ëc Ÿ‰í¤5‚‰‰Ç‰‰‰ ÕÁ¢‰‰ʼn‰ ÕÁ‚‰‰åõ‰‰ ÕÁ¢‰‰ƒ%c‰‰ ÕÁ÷ã‰í¢‰‰c U¥‚‰‰—+‰‰ U¥¢‰‰Ç_‰‰ U¥‚‰‰‹w‰‰ U¥¤Ëc Ÿ‰í¤5‚‰‰Ge‰‰ י¢‰‰1‰‰ י‚‰‰åõ‰‰ י¢‰‰ƒ%c‰‰ י÷ã‰í¢‰‰c ×ý‚‰‰—+‰‰ ×ý¢‰‰Ç_‰‰ ×ý‚‰‰‹w‰‰ ×ý¤Ëc Ÿ‰í¤5‚‰‰YӉ‰ WÑ¢‰‰)¯‰‰ Wт‰‰åõ‰‰ WÑ¢‰‰ƒ%c‰‰ WÑ÷ã‰í¢‰‰c 鵂‰‰©™‰‰ éµ¢‰‰Ù͉‰ 鵂‰‰‹w‰‰ 鵤Ëc Ÿ‰í¤5‚‰‰YӉ‰ i©¢‰‰+‰‰ i©‚‰‰åõ‰‰ i©¢‰‰ƒ%c‰‰ i©÷ã‰í¢‰‰c 덂‰‰)u‰‰ 덢‰‰Ù͉‰ 덂‰‰‹w‰‰ 덤Ëc Ÿ‰í¤5‚‰‰YӉ‰ ëᢉ‰+‰‰ ëႉ‰åõ‰‰ ëᢉ‰ƒ%c‰‰ ëá÷ã‰í¢‰‰c kł‰‰+c‰‰ kÅ¢‰‰Û»‰‰ kł‰‰‹w‰‰ kŤËc Ÿ‰í¤5‚‰‰[Á‰‰ í¹¢‰‰- ‰‰ í¹‚‰‰åõ‰‰ í¹¢‰‰ƒ%c‰‰ í¹÷ã‰í¢‰‰c m‚‰‰+c‰‰ m¢‰‰Ý‰‰ m‚‰‰‹w‰‰ m¤Ëc Ÿ‰í¤5‚‰‰]=‰‰ mñ¢‰‰- ‰‰ mñ‚‰‰åõ‰‰ mñ¢‰‰ƒ%c‰‰ mñ÷ã‰í¢‰‰c ïՂ‰‰-щ‰ ïÕ¢‰‰ß…‰‰ ïՂ‰‰‹w‰‰ ïÕ¤Ëc Ÿ‰í¤5‚‰‰_«‰‰ oÉ¢‰‰¯ç‰‰ oɂ‰‰åõ‰‰ oÉ¢‰‰ƒ%c‰‰ oÉ÷ã‰í¢‰‰c ᭂ‰‰-щ‰ á­¢‰‰ß…‰‰ ᭂ‰‰‹w‰‰ á­¤Ëc Ÿ‰í¤5‚‰‰Q‰‰ a¢‰‰¡S‰‰ a‚‰‰åõ‰‰ a¢‰‰ƒ%c‰‰ a÷ã‰í¢‰‰c a傉‰/M‰‰ a墉‰_q‰‰ a傉‰‹w‰‰ aå¤Ëc Ÿ‰í¤5‚‰‰Q‰‰ ãÙ¢‰‰£Á‰‰ ãق‰‰åõ‰‰ ãÙ¢‰‰ƒ%c‰‰ ãÙ÷ã‰í¢‰‰c c½‚‰‰!»‰‰ c½¢‰‰_q‰‰ c½‚‰‰‹w‰‰ c½¤Ëc Ÿ‰í¤5‚‰‰Óu‰‰ 呢‰‰¥¿‰‰ 呂‰‰åõ‰‰ 呢‰‰ƒ%c‰‰ å‘÷ã‰í¢‰‰c åõ‚‰‰#©‰‰ åõ¢‰‰Qo‰‰ åõ‚‰‰‹w‰‰ åõ¤Ëc Ÿ‰í¤5‚‰‰Óu‰‰ e颉‰¥¿‰‰ e邉‰åõ‰‰ e颉‰ƒ%c‰‰ eé÷ã‰í¢‰‰c ç͂‰‰%‰‰ çÍ¢‰‰S݉‰ ç͂‰‰‹w‰‰ çͤËc Ÿ‰í¤5‚‰‰Õ㉉ g¡¢‰‰§+‰‰ g¡‚‰‰åõ‰‰ g¡¢‰‰ƒ%c‰‰ g¡÷ã‰í¢‰‰c ù…‚‰‰%‰‰ ù…¢‰‰UI‰‰ ù…‚‰‰‹w‰‰ ù…¤Ëc Ÿ‰í¤5‚‰‰×_‰‰ ùù¢‰‰§+‰‰ ùù‚‰‰åõ‰‰ ùù¢‰‰ƒ%c‰‰ ùù÷ã‰í¢‰‰c y݂‰‰%‰‰ yÝ¢‰‰UI‰‰ y݂‰‰‹w‰‰ yݤËc Ÿ‰í¤5‚‰‰é͉‰ û±¢‰‰¹™‰‰ û±‚‰‰åõ‰‰ û±¢‰‰ƒ%c‰‰ û±÷ã‰í¢‰‰c {•‚‰‰§ó‰‰ {•¢‰‰W§‰‰ {•‚‰‰‹w‰‰ {•¤Ëc Ÿ‰í¤5‚‰‰ë»‰‰ ý‰¢‰‰9u‰‰ ý‰‚‰‰åõ‰‰ ý‰¢‰‰ƒ%c‰‰ ý‰÷ã‰í¢‰‰c ý킉‰¹o‰‰ ýí¢‰‰i•‰‰ ý킉‰‹w‰‰ ýí¤Ëc Ÿ‰í¤5‚‰‰ë»‰‰ }Á¢‰‰;c‰‰ }Á‚‰‰åõ‰‰ }Á¢‰‰ƒ%c‰‰ }Á÷ã‰í¢‰‰c ÿ¥‚‰‰»]‰‰ ÿ¥¢‰‰i•‰‰ ÿ¥‚‰‰‹w‰‰ ÿ¥¤Ëc Ÿ‰í¤5‚‰‰ë»‰‰ ™¢‰‰;c‰‰ ™‚‰‰åõ‰‰ ™¢‰‰ƒ%c‰‰ ™÷ã‰í¢‰‰c ý‚‰‰½Ë‰‰ ý¢‰‰k‰‰ ý‚‰‰‹w‰‰ ý¤Ëc Ÿ‰í¤5‚‰‰í‰‰ ñÑ¢‰‰=щ‰ ñт‰‰åõ‰‰ ñÑ¢‰‰ƒ%c‰‰ ñÑ÷ã‰í¢‰‰c qµ‚‰‰½Ë‰‰ qµ¢‰‰íÿ‰‰ qµ‚‰‰‹w‰‰ qµ¤Ëc Ÿ‰í¤5‚‰‰ï…‰‰ 󩢉‰=щ‰ 󩂉‰åõ‰‰ 󩢉‰ƒ%c‰‰ ó©÷ã‰í¢‰‰c s‚‰‰¿'‰‰ s¢‰‰ïk‰‰ s‚‰‰‹w‰‰ s¤Ëc Ÿ‰í¤5‚‰‰oq‰‰ sᢉ‰?M‰‰ sႉ‰åõ‰‰ sᢉ‰ƒ%c‰‰ sá÷ã‰í¢‰‰c õł‰‰¿'‰‰ õÅ¢‰‰ïk‰‰ õł‰‰‹w‰‰ õŤËc Ÿ‰í¤5‚‰‰ao‰‰ u¹¢‰‰1»‰‰ u¹‚‰‰åõ‰‰ u¹¢‰‰ƒ%c‰‰ u¹÷ã‰í¢‰‰c ÷‚‰‰±•‰‰ ÷¢‰‰áY‰‰ ÷‚‰‰‹w‰‰ ÷¤Ëc Ÿ‰í¤5‚‰‰ao‰‰ ÷ñ¢‰‰3©‰‰ ÷ñ‚‰‰åõ‰‰ ÷ñ¢‰‰ƒ%c‰‰ ÷ñ÷ã‰í¢‰‰c wՂ‰‰³ƒ‰‰ wÕ¢‰‰áY‰‰ wՂ‰‰‹w‰‰ wÕ¤Ëc Ÿ‰í¤5‚‰‰ao‰‰‰É¢‰‰3©‰‰‰É‚‰‰åõ‰‰‰É¢‰‰ƒ%c‰‰‰É÷ã‰í¢‰‰c ­‚‰‰3‰‰ ­¢‰‰ã·‰‰ ­‚‰‰‹w‰‰ ­¤Ëc Ÿ‰í¤5‚‰‰c݉‰‹¢‰‰5‰‰‹‚‰‰åõ‰‰‹¢‰‰ƒ%c‰‰‹÷ã‰í¢‰‰c‹å‚‰‰3‰‰‹å¢‰‰å#‰‰‹å‚‰‰‹w‰‰‹å¤Ëc Ÿ‰í¤5‚‰‰eI‰‰ Ù¢‰‰5‰‰ ق‰‰åõ‰‰ Ù¢‰‰ƒ%c‰‰ Ù÷ã‰í¢‰‰c½‚‰‰5퉉½¢‰‰ç‘‰‰½‚‰‰‹w‰‰½¤Ëc Ÿ‰í¤5‚‰‰g§‰‰ ‘¢‰‰·ó‰‰ ‘‚‰‰åõ‰‰ ‘¢‰‰ƒ%c‰‰ ‘÷ã‰í¢‰‰c õ‚‰‰5퉉 õ¢‰‰ç‘‰‰ õ‚‰‰‹w‰‰ õ¤Ëc Ÿ‰í¤5‚‰‰y•‰‰é¢‰‰Éo‰‰é‚‰‰åõ‰‰é¢‰‰ƒ%c‰‰é÷ã‰í¢‰‰c͂‰‰7Y‰‰Í¢‰‰ù‰‰͂‰‰‹w‰‰ͤËc Ÿ‰í¤5‚‰‰y•‰‰¡¢‰‰Ë]‰‰¡‚‰‰åõ‰‰¡¢‰‰ƒ%c‰‰¡÷ã‰í¢‰‰c…‚‰‰I7‰‰…¢‰‰ù‰‰…‚‰‰‹w‰‰…¤Ëc Ÿ‰í¤5‚‰‰{‰‰ù¢‰‰Íˉ‰ù‚‰‰åõ‰‰ù¢‰‰ƒ%c‰‰ù÷ã‰í¢‰‰cƒÝ‚‰‰K¥‰‰ƒÝ¢‰‰y{‰‰ƒÝ‚‰‰‹w‰‰ƒÝ¤Ëc Ÿ‰í¤5‚‰‰{‰‰±¢‰‰Íˉ‰±‚‰‰åõ‰‰±¢‰‰ƒ%c‰‰±÷ã‰í¢‰‰c…•‚‰‰K¥‰‰…•¢‰‰{鉉…•‚‰‰‹w‰‰…•¤Ëc Ÿ‰í¤5‚‰‰ýÿ‰‰‰¢‰‰Íˉ‰‰‚‰‰åõ‰‰‰¢‰‰ƒ%c‰‰‰÷ã‰í¢‰‰c킉‰M‰‰í¢‰‰}E‰‰킉‰‹w‰‰í¤Ëc Ÿ‰í¤5‚‰‰ÿk‰‰‡Á¢‰‰Ï'‰‰‡Á‚‰‰åõ‰‰‡Á¢‰‰ƒ%c‰‰‡Á÷ã‰í¢‰‰c¥‚‰‰M‰‰¥¢‰‰}E‰‰¥‚‰‰‹w‰‰¥¤Ëc Ÿ‰í¤5‚‰‰ñY‰‰™™¢‰‰Á•‰‰™™‚‰‰åõ‰‰™™¢‰‰ƒ%c‰‰™™÷ã‰í¢‰‰c™ý‚‰‰O‰‰™ý¢‰‰3‰‰™ý‚‰‰‹w‰‰™ý¤Ëc Ÿ‰í¤5‚‰‰ó·‰‰Ñ¢‰‰Ãƒ‰‰т‰‰åõ‰‰Ñ¢‰‰ƒ%c‰‰Ñ÷ã‰í¢‰‰c›µ‚‰‰Áý‰‰›µ¢‰‰q¡‰‰›µ‚‰‰‹w‰‰›µ¤Ëc Ÿ‰í¤5‚‰‰ó·‰‰©¢‰‰C‰‰©‚‰‰åõ‰‰©¢‰‰ƒ%c‰‰©÷ã‰í¢‰‰c‚‰‰Ãi‰‰¢‰‰q¡‰‰‚‰‰‹w‰‰¤Ëc Ÿ‰í¤5‚‰‰ó·‰‰á¢‰‰C‰‰á‚‰‰åõ‰‰á¢‰‰ƒ%c‰‰á÷ã‰í¢‰‰cł‰‰Ãi‰‰Å¢‰‰s‰‰ł‰‰‹w‰‰ŤËc Ÿ‰í¤5‚‰‰õ#‰‰Ÿ¹¢‰‰C‰‰Ÿ¹‚‰‰åõ‰‰Ÿ¹¢‰‰ƒ%c‰‰Ÿ¹÷ã‰í¢‰‰c‚‰‰Ålj‰¢‰‰u‹‰‰‚‰‰‹w‰‰¤Ëc Ÿ‰í¤5‚‰‰÷‘‰‰ñ¢‰‰E퉉ñ‚‰‰åõ‰‰ñ¢‰‰ƒ%c‰‰ñ÷ã‰í¢‰‰c‘Õ‚‰‰Ç3‰‰‘Õ¢‰‰÷ù‰‰‘Õ‚‰‰‹w‰‰‘Õ¤Ëc Ÿ‰í¤5‚‰‰ ‰‰‰É¢‰‰GY‰‰ɂ‰‰åõ‰‰É¢‰‰ƒ%c‰‰É÷ã‰í¢‰‰c“­‚‰‰Ç3‰‰“­¢‰‰÷ù‰‰“­‚‰‰‹w‰‰“­¤Ëc Ÿ‰í¤5‚‰‰ {‰‰¢‰‰Y7‰‰‚‰‰åõ‰‰¢‰‰ƒ%c‰‰÷ã‰í¢‰‰c傉‰Ù!‰‰墉‰ ‰U‰‰傉‰‹w‰‰å¤Ëc Ÿ‰í¤5‚‰‰ {‰‰•Ù¢‰‰[¥‰‰•Ù‚‰‰åõ‰‰•Ù¢‰‰ƒ%c‰‰•Ù÷ã‰í¢‰‰c½‚‰‰ÛŸ‰‰½¢‰‰ ‰U‰‰½‚‰‰‹w‰‰½¤Ëc Ÿ‰í¤5‚‰‰ {‰‰—‘¢‰‰[¥‰‰—‘‚‰‰åõ‰‰—‘¢‰‰ƒ%c‰‰—‘÷ã‰í¢‰‰c—õ‚‰‰Ý ‰‰—õ¢‰‰ ‹Ã‰‰—õ‚‰‰‹w‰‰—õ¤Ëc Ÿ‰í¤5‚‰‰ 鉉颉‰]‰‰邉‰åõ‰‰颉‰ƒ%c‰‰é÷ã‰í¢‰‰c©Í‚‰‰Ý ‰‰©Í¢‰‰ ?‰‰©Í‚‰‰‹w‰‰©Í¤Ëc Ÿ‰í¤5‚‰‰ E‰‰)¡¢‰‰]‰‰)¡‚‰‰åõ‰‰)¡¢‰‰ƒ%c‰‰)¡÷ã‰í¢‰‰c«…‚‰‰]ù‰‰«…¢‰‰ -‰‰«…‚‰‰‹w‰‰«…¤Ëc Ÿ‰í¤5‚‰‰ 3‰‰«ù¢‰‰_‰‰«ù‚‰‰åõ‰‰«ù¢‰‰ƒ%c‰‰«ù÷ã‰í¢‰‰c+݂‰‰]ù‰‰+Ý¢‰‰ -‰‰+݂‰‰‹w‰‰+ݤËc Ÿ‰í¤5‚‰‰ ¡‰‰­±¢‰‰Ñý‰‰­±‚‰‰åõ‰‰­±¢‰‰ƒ%c‰‰­±÷ã‰í¢‰‰c-•‚‰‰_׉‰-•¢‰‰ ›‰‰-•‚‰‰‹w‰‰-•¤Ëc Ÿ‰í¤5‚‰‰ ‰‰¯‰¢‰‰Ói‰‰¯‰‚‰‰åõ‰‰¯‰¢‰‰ƒ%c‰‰¯‰÷ã‰í¢‰‰c¯í‚‰‰QC‰‰¯í¢‰‰ w‰‰¯í‚‰‰‹w‰‰¯í¤Ëc Ÿ‰í¤5‚‰‰ ‰‰/Á¢‰‰Õlj‰/Á‚‰‰åõ‰‰/Á¢‰‰ƒ%c‰‰/Á÷ã‰í¢‰‰c¡¥‚‰‰S±‰‰¡¥¢‰‰ w‰‰¡¥‚‰‰‹w‰‰¡¥¤Ëc Ÿ‰í¤5‚‰‰ ‰‰!™¢‰‰Õlj‰!™‚‰‰åõ‰‰!™¢‰‰ƒ%c‰‰!™÷ã‰í¢‰‰c!ý‚‰‰S±‰‰!ý¢‰‰ 剉!ý‚‰‰‹w‰‰!ý¤Ëc Ÿ‰í¤5‚‰‰ ‹‰‰£Ñ¢‰‰Õlj‰£Ñ‚‰‰åõ‰‰£Ñ¢‰‰ƒ%c‰‰£Ñ÷ã‰í¢‰‰c#µ‚‰‰U-‰‰#µ¢‰‰ Ӊ‰#µ‚‰‰‹w‰‰#µ¤Ëc Ÿ‰í¤5‚‰‰ ‡ù‰‰¥©¢‰‰×3‰‰¥©‚‰‰åõ‰‰¥©¢‰‰ƒ%c‰‰¥©÷ã‰í¢‰‰c%‚‰‰U-‰‰%¢‰‰ Ӊ‰%‚‰‰‹w‰‰%¤Ëc Ÿ‰í¤5‚‰‰ ™U‰‰%ᢉ‰é!‰‰%ႉ‰åõ‰‰%ᢉ‰ƒ%c‰‰%á÷ã‰í¢‰‰c§Å‚‰‰W‰‰§Å¢‰‰ O‰‰§Å‚‰‰‹w‰‰§Å¤Ëc Ÿ‰í¤5‚‰‰ ›Ã‰‰'¹¢‰‰ëŸ‰‰'¹‚‰‰åõ‰‰'¹¢‰‰ƒ%c‰‰'¹÷ã‰í¢‰‰c¹‚‰‰i‰‰‰¹¢‰‰ ½‰‰¹‚‰‰‹w‰‰¹¤Ëc Ÿ‰í¤5‚‰‰ ›Ã‰‰¹ñ¢‰‰í ‰‰¹ñ‚‰‰åõ‰‰¹ñ¢‰‰ƒ%c‰‰¹ñ÷ã‰í¢‰‰c9Ղ‰‰ëe‰‰9Õ¢‰‰ ½‰‰9Ղ‰‰‹w‰‰9Õ¤Ëc Ÿ‰í¤5‚‰‰ ›Ã‰‰»É¢‰‰í ‰‰»É‚‰‰åõ‰‰»É¢‰‰ƒ%c‰‰»É÷ã‰í¢‰‰c;­‚‰‰ëe‰‰;­¢‰‰ )‰‰;­‚‰‰‹w‰‰;­¤Ëc Ÿ‰í¤5‚‰‰ ?‰‰½¢‰‰í ‰‰½‚‰‰åõ‰‰½¢‰‰ƒ%c‰‰½÷ã‰í¢‰‰c½å‚‰‰íӉ‰½å¢‰‰ ‰‰½å‚‰‰‹w‰‰½å¤Ëc Ÿ‰í¤5‚‰‰ Ÿ-‰‰=Ù¢‰‰mù‰‰=ق‰‰åõ‰‰=Ù¢‰‰ƒ%c‰‰=Ù÷ã‰í¢‰‰c¿½‚‰‰ïÁ‰‰¿½¢‰‰ Ÿõ‰‰¿½‚‰‰‹w‰‰¿½¤Ëc Ÿ‰í¤5‚‰‰ ‘›‰‰?‘¢‰‰o׉‰?‘‚‰‰åõ‰‰?‘¢‰‰ƒ%c‰‰?‘÷ã‰í¢‰‰c?õ‚‰‰ïÁ‰‰?õ¢‰‰ Ÿõ‰‰?õ‚‰‰‹w‰‰?õ¤Ëc Ÿ‰í¤5‚‰‰ w‰‰±é¢‰‰aC‰‰±é‚‰‰åõ‰‰±é¢‰‰ƒ%c‰‰±é÷ã‰í¢‰‰c1͂‰‰á=‰‰1Í¢‰‰ ‘a‰‰1͂‰‰‹w‰‰1ͤËc Ÿ‰í¤5‚‰‰ w‰‰³¡¢‰‰c±‰‰³¡‚‰‰åõ‰‰³¡¢‰‰ƒ%c‰‰³¡÷ã‰í¢‰‰c3…‚‰‰ã«‰‰3…¢‰‰ ‘a‰‰3…‚‰‰‹w‰‰3…¤Ëc Ÿ‰í¤5‚‰‰ w‰‰3ù¢‰‰c±‰‰3ù‚‰‰åõ‰‰3ù¢‰‰ƒ%c‰‰3ù÷ã‰í¢‰‰cµÝ‚‰‰ã«‰‰µÝ¢‰‰ “߉‰µÝ‚‰‰‹w‰‰µÝ¤Ëc Ÿ‰í¤5‚‰‰ 剉5±¢‰‰c±‰‰5±‚‰‰åõ‰‰5±¢‰‰ƒ%c‰‰5±÷ã‰í¢‰‰c·•‚‰‰å‰‰·•¢‰‰ •͉‰·•‚‰‰‹w‰‰·•¤Ëc Ÿ‰í¤5‚‰‰ Ӊ‰7‰¢‰‰e-‰‰7‰‚‰‰åõ‰‰7‰¢‰‰ƒ%c‰‰7‰÷ã‰í¢‰‰c7킉‰eu‰‰7í¢‰‰ —9‰‰7킉‰‹w‰‰7í¤Ëc Ÿ‰í¤5‚‰‰ O‰‰ÉÁ¢‰‰g‰‰ÉÁ‚‰‰åõ‰‰ÉÁ¢‰‰ƒ%c‰‰ÉÁ÷ã‰í¢‰‰cI¥‚‰‰eu‰‰I¥¢‰‰ —9‰‰I¥‚‰‰‹w‰‰I¥¤Ëc‚‰‰‰]剉Ë™¢‰‰…W]‰‰Ë™÷ã‰í¢‰‰‰Ý߉‰Ëý Ÿ‰í¢‰‰…W]‰‰KÑ÷ã‰í¢‰‰‰Ý߉‰Íµ Ÿ‰í¢‰‰…W]‰‰M©÷ã‰í¢‰‰‰Ý߉‰Ï Ÿ‰í¢‰‰…W]‰‰Ïá÷ã‰í¢‰‰‰Ý߉‰OÅ Ÿ‰í¢‰‰…W]‰‰Á¹÷ã‰í¢‰‰‰Ý߉‰A Ÿ‰í¢‰‰…W]‰‰Añ÷ã‰í¢‰‰‰Ý߉‰ÃÕ Ÿ‰í¢‰‰…W]‰‰CÉ÷ã‰í¢‰‰‰Ý߉‰Å­ Ÿ‰í¢‰‰…W]‰‰E÷ã‰í¢‰‰‰Ý߉‰Eå Ÿ‰í¢‰‰…W]‰‰ÇÙ÷ã‰í¢‰‰‰Ý߉‰G½ Ÿ‰í¢‰‰…W]‰‰Ù‘÷ã‰í¢‰‰‰Ý߉‰Ùõ Ÿ‰í¢‰‰…W]‰‰YéQi%¡¤eµ‚‰‰39‰‰w¤ai‚‰‰ý‰‰w¤ai‚‰‰Ÿç‰‰w¤ai‚‰‰]‰‰w¤ai Ÿ‰í¤Ÿ‚‰‰ m‰‰w够‚‰‰ƒc‰‰w够‚‰‰¯¡‰‰w够‚‰‰…¡‰‰w夛Í÷ã‰í¤eµ‚‰‰39‰‰ ‰Ù¤ai‚‰‰ý‰‰ ‰Ù¤ai‚‰‰Ÿç‰‰ ‰Ù¤ai‚‰‰]‰‰ ‰Ù¤ai‚‰‰‰Û›‰‰ ½¤o‚‰‰ ŵ‰‰ ½¤o„…u¤-„…õ¤¡í‚‰‰ ŸÉ‰‰ ½¤݄5¤£Y„?Ǥ¯ÿ‚‰‰ ñK‰‰ ½¤£Ù„5Ÿ¤?E‚‰‰ A‰‰ ½¤£Y„5¤¯ÿ„13¤-„«¤¯ÿ‚‰‰‰‰ ½¤£Y‚‰‰…©[‰‰ ½¤+÷ã‰í¤Õm‚‰‰ÏI‰‰ ‹‘¤]'‚‰‰ƒc‰‰ ‹‘¤Qƒ„ÿפÓ„ÏM¤Qƒ„Ëc¤]'‚‰‰Ÿé‰‰ ‹‘¤Á;„Ëã¤]¹‚‰‰q¡‰‰ ‹‘¤Qƒ„Á»¤]'„Ëc¤ë¥‚‰‰‹Ç{‰‰ ‹‘¤_•„{¤Ó„{ ¤í‚‰‰‰åk‰‰ ‹‘¤íƒ±‰í¤»‚‰‰ [y‰‰ ‹õ¤‡ã„‘…¤‘„ݤ‘…‚‰‰ “‰‰ ‹õ¤›Í„7 ¤=é„13¤‘…‚‰‰ ûs‰‰ ‹õ¤=é„7 ¤3/‚‰‰õ ‰‰ ‹õ¤=i„7‹¤‘…‚‰‰G¡‰‰ ‹õ¤‘…„‘…¤‘…‚‰‰·‰‰ ‹õ¤=i‚‰‰…‰‰ ‹õ¤§#÷ã‰í¤Y]‚‰‰Å‰‰ é¤Ã‚‰‰ƒŸ¿‰‰ é¤oý„oý¤oý‚‰‰Ӊ‰ é¤oý„É÷¤Ã‚‰‰)g‰‰ é¤ÍQ„Éu¤Ã©‚‰‰y{‰‰ é¤oý„ÏM¤Ã©„Éu¤eµ‚‰‰‹O·‰‰ é¤oý„륤o{„oý¤yŸ‚‰‰‰çى‰ é¤cǍy‰í¤™_‚‰‰ ]W‰‰ Í¤«„ݤ9„)'¤»‚‰‰ •‰‰ Í¤™„7‹¢‰‰/ƒ‰‰ Í„=餝»‚‰‰ e;‰‰ Í¢‰‰§+‰‰ Í„3¡¤›Í„Ÿ¤Ÿ‚‰‰ÿ³‰‰ Í¢‰‰?!‰‰ Í„3¡¤»‚‰‰Y‰‰ Í¤»„欄»‚‰‰‰‰ Í¢‰‰Û Í‚‰‰…9‰‰ Í¤…õ„›M¤›Í÷ã‰í¤eµ„e3¤{‚‰‰Yى‰ ¡¢‰‰™i‰‰ ¡‚‰‰ƒA‰‰ ¡¤cDŽ퓤cǂ‰‰ e‰‰ ¡¤cDŽÍÑ¢‰‰}­‰‰ ¡‚‰‰/?‰‰ ¡¤ai„ai¤eµ„ÍÑ¢‰‰ 剉 ¡‚‰‰ù‰‰ ¡¤cDŽ颉‰ m‰‰ ¡„É÷¤ýù‚‰‰‹MY‰‰ ¡¤cDŽ×Y¤cG„륤ÿׂ‰‰‰çى‰ ¡¤g! Ÿ‰í¤™_‚‰‰ ]W‰‰ …¤«„ݤ9„)'¤»‚‰‰ •‰‰ …¤™„7‹¢‰‰/ƒ‰‰ …„=餝»‚‰‰ e;‰‰ …¢‰‰§+‰‰ …„3¡¤›Í„Ÿ¤Ÿ‚‰‰ÿ³‰‰ …¢‰‰?!‰‰ …„3¡¤»‚‰‰Y‰‰ …¤»„欄»‚‰‰‰‰ …¢‰‰Û …‚‰‰…9‰‰ …¤…õ„›M¤›Í‰_‰í¤{„é·¤ÿׄÉ÷¤Qƒ„cǤ{ ‚‰‰ƒ›Õ‰‰ ù¤g!„é·¤g!‚‰‰‰‰‰ ù¤g¡„ÏϤQƒ„cǤ{ ‚‰‰#›‰‰ ù¤e3„륤eµ„Á»¤Qƒ„cǤ{‚‰‰ç‘‰‰ ù¤g!„ŤQƒ„cǤ{ „É÷¤ÿׂ‰‰‹Kk‰‰ ù¤eµ„Õí¤g!„é7¤ÿׂ‰‰‰ù5‰‰ ù¤g! Ÿ‰í¤™_‚‰‰ ]W‰‰ ݤ«„I¤™_„+•¤›Í‚‰‰ “‰‰ ݤ«„7‹¤…u„»¤¯ÿ„;{¤™_‚‰‰ aQ‰‰ ݤ…õ„»¤¯ÿ„?Ǥ›Í„ݤ›M‚‰‰ûW‰‰ ݤ…u„»¤¯ÿ„1³¤™Ñ‚‰‰[‹‰‰ ݤ™_„ˤ™_‚‰‰‡•‰‰ ݤ…u„»¤¯ÿ„7‹¤«„ˤ…õ÷ã‰í¤ÿׄQƒ¤ñC„ÍѤ×ۄÕí¤ÿׂ‰‰ƒ›Õ‰‰ ±¤g!„é·¤g!‚‰‰‰‰‰ ±¤g¡„Á;¤×ۄÕí¤ÿׂ‰‰¥w‰‰ ±¤g!„×Y¤g!„Á;¤×Y„Õí¤ÿׂ‰‰ç‘‰‰ ±¤g!„Eó¤×ۄÕí¤ÿׄÉ÷¤ÿU‚‰‰‹Kk‰‰ ±¤g!„Ó¤g!„×ۤ󱂉‰‰û£‰‰ ±¤eµ Ÿ‰í¤›Í‚‰‰ _ʼn‰ •¤ Á„)§¤™_„-¤™_‚‰‰ ŸÉ‰‰ •¤+„7‹¤«„+•¤)§„»¤™_‚‰‰ m÷‰‰ •¤«„+•¤)'„?E¤™_„)'¤™_‚‰‰g ‰‰ •¤«„+•¤)§„?E¤™Ñ‚‰‰[‹‰‰ •¤™_„ˤ™_‚‰‰ƒË‰‰ •¤«„+•¤)§„3¡¤=„¯ÿ¤«‰Ñ‰í¤ÿU„]¹¤ñC„é¤é·„[ˤÿׂ‰‰ƒ›Õ‰‰ ƒ‰¤g!„é·¤g!‚‰‰‰‰‰ ƒ‰¤g¡„Ҥ鷄[ˤÿׄÏM¤eµ„Õí¤g!„Ťé7„[ˤÿׂ‰‰ç‘‰‰ ƒ‰¤g!„Y]¤é7„[ˤÿU„Ëc¤ÿׂ‰‰‹Kk‰‰ ƒ‰¤g!„Ó¤g!„×ۤ󱂉‰‰ý‘‰‰ ƒ‰¤e3 Ÿ‰í¤›M‚‰‰ _ʼn‰ ƒí¤ Á„)§¤™_„-¤™_‚‰‰ [‰‰ ƒí¤«„5¤+„¥·¤I„§#¤™_‚‰‰ 뭉‰ ƒí¤«„¥·¤I„;{¤™_„+•¤›Í„13¤«„¥·¤˄;û¤™Ñ‚‰‰[‹‰‰ ƒí¤™_„ˤ™_‚‰‰o‰‰ ƒí¤«„¥·¤˄=é¤=„£Ù¤+÷ã‰í¤ÿU„]¹¤ñC„é¤é·„[ˤÿׂ‰‰ƒ›Õ‰‰ Á¤g!„é·¤g!‚‰‰‰‰‰ Á¤g¡„Ҥ鷄[ˤÿׄÏM¤eµ„Õí¤g!„Ťé7„[ˤÿׂ‰‰ç‘‰‰ Á¤g!„Y]¤é7„[ˤÿU„Ëc¤ÿׂ‰‰‹Kk‰‰ Á¤g!„Ó¤g!„×ۤ󱂉‰‰ý‘‰‰ Á¤e3 Ÿ‰í¤»‚‰‰ _ʼn‰ …¥¤ Á„)§¤™_„-¤™_‚‰‰ ›ï‰‰ …¥¤«„5¤=„;{¤݄¥·¤™_‚‰‰ 鿉‰ …¥¤=„;{¤݄»¤™_„-¤›M„=é¤=„;{¤݄» ¤™Ñ‚‰‰[‹‰‰ …¥¤™_„ˤ™_‚‰‰ó‰‰ …¥¤=„;{¤݄»¤ O„»¤ Á÷ã‰í¤ó±„Eó¤ó1„Eó¤ë¥„ŤñC‚‰‰ƒ›Õ‰‰ ™¤g!„é·¤g!‚‰‰‰‰‰ ™¤g¡„Es¤ë¥„ŤñC„é¤e3„Ó¤g!„Eó¤ë¥„ŤñC‚‰‰ç‘‰‰ ™¤g!„[ˤ륄ŤñC„Ëc¤ÿׂ‰‰‹Kk‰‰ ™¤g!„Ó¤g!„×ۤ󱂉‰‰ÿ ‰‰ ™¤cÇ Ÿ‰í¤Ÿ‚‰‰ _ʼn‰ ý¤ Á„)§¤™_„-¤™_‚‰‰ ‰‰ ý¤+„7‹¤ Á„3¡¤݄¡k¤™_‚‰‰ ×A‰‰ ý¤ Á„3¡¤݄§#¤™_„¯ÿ¤›M„»¤ O„3¡¤݄§#¤™Ñ‚‰‰[‹‰‰ ý¤™_„ˤ™_‚‰‰ ‰‰ ý¤ O„3¡¤݄¥·¤=„;{¤ S÷ã‰í¤õ-„ŤñC„[ˤ륄ÍѤó1‚‰‰ƒ›Õ‰‰ ‡Ñ¤g!„é·¤g!‚‰‰‰‰‰ ‡Ñ¤g¡„Y]¤ë¥„ÍѤó1„Eó¤e3„Qƒ¤g!„Y]¤ë¥„ÍѤ󱂉‰ç‘‰‰ ‡Ñ¤g!„_¤ë¥„ÍѤó±„É÷¤ÿU‚‰‰‹Kk‰‰ ‡Ñ¤g!„Ó¤g!„×ۤ󱂉‰‰û‰‰ ‡Ñ¤ai Ÿ‰í¤Ÿ‚‰‰ _ʼn‰ µ¤ Á„)§¤™_„-¤™_‚‰‰ ‰‰ µ¤+„7‹¤ Á„3¡¤݄¡k¤™_‚‰‰ ×A‰‰ µ¤ Á„3¡¤݄§#¤™_„¯ÿ¤›M„»¤ O„3¡¤݄§#¤™Ñ‚‰‰[‹‰‰ µ¤™_„ˤ™_‚‰‰ ‰‰ µ¤ O„3¡¤݄¥·¤=„;{¤ S‰_‰í¤õ-„Á;¤ñC„_¤í‚‰‰ 퉉 ™©¤ó±‚‰‰ƒ›Õ‰‰ ™©¤g!„é·¤g!‚‰‰‰‰‰ ™©¤g¡„[K¤í‚‰‰á/‰‰ ™©¤ó±„Y]¤g!„_•¤g!„[ˤ킉‰ W©‰‰ ™©¤ó1‚‰‰ç‘‰‰ ™©¤g!„Qƒ¤í‚‰‰ щ‰ ™©¤ó±„Ëc¤ÿׂ‰‰‹Kk‰‰ ™©¤g!„Ó¤g!„×ۤ󱂉‰‰qW‰‰ ™©¤ai Ÿ‰í¤Ÿ‚‰‰ _ʼn‰ ¤ Á„)§¤™_„-¤™_‚‰‰ —‰‰ ¤«„5¤ Á‚‰‰ó‰‰ ¤o„¯ÿ¤™_‚‰‰ Óg‰‰ ¤ O‚‰‰+‰‰ ¤o„¥·¤™_„¡í¤™_„§#¤ Á‚‰‰¥“‰‰ ¤o„¥5¤™Ñ‚‰‰[‹‰‰ ¤™_„ˤ™_‚‰‰ƒw»‰‰ ¤ Á‚‰‰O_‰‰ ¤o„¡k¤=„?E¤ S÷ã‰í¤÷„ÍѤó±„Qƒ¤ï‚‰‰ ‰‰ á¤ó±‚‰‰ƒ›Õ‰‰ á¤g!„é·¤g!‚‰‰‰‰‰ á¤g¡„]¹¤ï‚‰‰ﳉ‰ á¤ó±„]'¤g!„]'¤g!„]'¤ï‚‰‰ U;‰‰ á¤ó±‚‰‰ç‘‰‰ á¤g!„Ӥ‰ ߓ‰‰ á¤ó±„Ëc¤ÿׂ‰‰‹Kk‰‰ á¤g!„Ó¤g!„×ۤ󱂉‰‰sE‰‰ á¤o{ Ÿ‰í¤‘‚‰‰ _ʼn‰ ›Å¤ Á„)§¤™_„-¤™_‚‰‰ 9‰‰ ›Å¤«„5¤ Á‚‰‰]‰‰ ›Å¤q„-¤™_‚‰‰ Q ‰‰ ›Å¤ Á‚‰‰¯ç‰‰ ›Å¤q„£Y¤™_„£Y¤™_„£Y¤ Á‚‰‰'ý‰‰ ›Å¤ó„£Ù¤™Ñ‚‰‰[‹‰‰ ›Å¤™_„ˤ™_‚‰‰ƒuM‰‰ ›Å¤ Á‚‰‰C9‰‰ ›Å¤ó„¯ÿ¤ Á„3¡¤ e‚‰‰…‡ï‰‰ ¹¤ó±„Õí¤ï‚‰‰‰‰‰ ¹¤ó1‚‰‰ƒ›Õ‰‰ ¹¤g!„é·¤g!‚‰‰‰‰‰ ¹¤g¡„_¤ï‚‰‰íʼn‰ ¹¤ó1„_•¤g!„]'¤g!„_¤ï‚‰‰ Sω‰ ¹¤ó±‚‰‰ç‘‰‰ ¹¤g!„Õí¤ï‚‰‰ Ý%‰‰ ¹¤ó±„É÷¤ÿU‚‰‰‹Kk‰‰ ¹¤g!„Ó¤yŸ„Õm¤ó±‚‰‰‰u³‰‰ ¹¤ï Ÿ‰í¤󂉉 _ʼn‰ ¤ Á„+¤‡ã„-¤™_‚‰‰ ߉‰ ¤+„7‹¤ Á‚‰‰)ˉ‰ ¤q„+•¤™_‚‰‰ _‰‰ ¤ Á‚‰‰¡S‰‰ ¤ó„¡k¤™_„£Y¤™_„¡í¤ O‚‰‰9뉉 ¤q„¡k¤™Ñ‚‰‰[‹‰‰ ¤™_„ˤ™_‚‰‰ƒsщ‰ ¤ O‚‰‰E‰‰ ¤q„+•¤ Á÷ã‰í¤ó±„Õí¤ï‚‰‰‰‰‰ ñ¤ó1‚‰‰ƒ›Õ‰‰ ñ¤g!„é·¤g!‚‰‰‰‰‰ ñ¤g¡„_¤ï‚‰‰íʼn‰ ñ¤ó1„_•¤g!„]'¤g!„_¤ï‚‰‰ Sω‰ ñ¤ó±‚‰‰ç‘‰‰ ñ¤g!„Õí¤ï‚‰‰ Ý%‰‰ ñ¤ó±„É÷¤ÿU‚‰‰‹Kk‰‰ ñ¤g!„Ó¤yŸ„Õm¤ó±‚‰‰‰u³‰‰ ñ¤ï Ÿ‰í¤o‚‰‰ _ʼn‰ Õ¤ Á„+¤‡ã„-¤™_‚‰‰ a‰‰ Õ¤«„7 ¤ O‚‰‰+'‰‰ Õ¤ó„)§¤™_‚‰‰ ]/‰‰ Õ¤ Á‚‰‰£Á‰‰ Õ¤ó„¯ÿ¤™_„¥·¤™_„-¤ Á‚‰‰;G‰‰ Õ¤q„¯ÿ¤™Ñ‚‰‰[‹‰‰ Õ¤™_„ˤ™_‚‰‰ƒq㉉ Õ¤ Á‚‰‰G…‰‰ Õ¤q„)'¤ Á÷ã‰í¤ó±„×Y¤ï‚‰‰ƒ÷•‰‰ ŸÉ¤ó±‚‰‰ƒ›Õ‰‰ ŸÉ¤g!„é·¤g!‚‰‰‰‰‰ ŸÉ¤g¡„Qƒ¤ï‚‰‰ëW‰‰ ŸÉ¤ó±„Ó¤g!„[ˤg!„Qƒ¤ï‚‰‰ QQ‰‰ ŸÉ¤ó±‚‰‰ç‘‰‰ ŸÉ¤g!„×ۤ‰‰ Û7‰‰ ŸÉ¤ó1„Éu¤ÿׂ‰‰‹Kk‰‰ ŸÉ¤g!„Ó¤yŸ„Õm¤ó±‚‰‰‰w/‰‰ ŸÉ¤í Ÿ‰í¤݂‰‰ _ʼn‰ ­¤ Á„+¤‡ã„-¤™_„7 ¤«„7‹¤ Á‚‰‰/ƒ‰‰ ­¤Ÿ„)§¤™_‚‰‰ [³‰‰ ­¤ O‚‰‰§+‰‰ ­¤Ÿ„¯ÿ¤™_„¥·¤™_„+•¤ Á‚‰‰?!‰‰ ­¤Ÿ„¯ÿ¤™Ñ‚‰‰[‹‰‰ ­¤™_„ˤ™_‚‰‰ƒu‰‰ ­¤ Á‚‰‰Û ­¤Ÿ„ˤ=÷ã‰í¤ñC„é·¤ai‚‰‰ƒõ'‰‰ ‘¤ó±‚‰‰ƒ›Õ‰‰ ‘¤g!„é·¤g!‚‰‰‰‰‰ ‘¤g¡„Qƒ¤ai‚‰‰éû‰‰ ‘¤ó±„Õí¤g!„[ˤg!„Qƒ¤ai‚‰‰ _c‰‰ ‘¤ó1‚‰‰ç‘‰‰ ‘¤g!„×Û¤ai‚‰‰ Ùۉ‰ ‘¤ó±„É÷¤ÿׄÉu¤g!„Ó¤yŸ„Õm¤ó±‚‰‰ ‰‰ ‘¤ë¥ Ÿ‰í¤=„ 礟‚‰‰ _ʼn‰ ‘å¤ Á„+¤‡ã„-¤™_„3¡¤+‚‰‰ Ã;‰‰ ‘å¤ Á‚‰‰¡‰‰ ‘够„I¤™_‚‰‰ Yʼn‰ ‘å¤ Á‚‰‰¹™‰‰ ‘够„-¤™_„§#¤™Ñ„I¤ Á‚‰‰1‰‰ ‘够„-¤™Ñ‚‰‰[‹‰‰ ‘夙_„ˤ™_‚‰‰ƒÿ™‰‰ ‘å¤ Á‚‰‰Ý݉‰ ‘够„ݤ O÷ã‰í¤ó1„륤ai‚‰‰ƒóˉ‰ ٤󱂉‰ƒ›Õ‰‰ Ù¤g!„é·¤g!‚‰‰‰‰‰ Ù¤g¡„Óñ¤ai‚‰‰W ‰‰ Ù¤ó±„é7¤g¡„Y]¤g!„Ó¤ai‚‰‰ ]÷‰‰ ٤󱂉‰ç‘‰‰ Ù¤g!„é7¤ai‚‰‰ Çm‰‰ ٤󱂉‰ §‰‰ Ù¤ÿU„ÍѤg!„Ó¤yŸ„Õm¤ó±‚‰‰ ‹‰‰ Ù¤ai„÷›¤ñC Ÿ‰í¤=„ 礟‚‰‰ _ʼn‰ “½¤ Á„+¤‡ã„-¤™_„3¡¤+‚‰‰ Ã;‰‰ “½¤ Á‚‰‰¡‰‰ “½¤Ÿ„I¤™_‚‰‰ Yʼn‰ “½¤ Á‚‰‰¹™‰‰ “½¤Ÿ„-¤™_„§#¤™Ñ„I¤ Á‚‰‰1‰‰ “½¤Ÿ„-¤™Ñ‚‰‰[‹‰‰ “½¤™_„ˤ™_‚‰‰ƒÿ™‰‰ “½¤ Á‚‰‰Ý݉‰ “½¤Ÿ„ݤ O÷ã‰í¤ó1„í¤ai‚‰‰ƒñ݉‰ ‘¤õ¯‚‰‰ƒ›Õ‰‰ ‘¤g!„é·¤g!‚‰‰‰‰‰ ‘¤g¡„Õm¤ai‚‰‰U‰‰ ‘¤õ¯„é7¤g¡„Y]¤g!„Õí¤ai‚‰‰ ݉‰ ‘¤õ-‚‰‰ç‘‰‰ ‘¤g!„륤ai‚‰‰ Åñ‰‰ ‘¤õ-‚‰‰ É‰‰ ‘¤ÿׄÏM¤g!„Ó¤yŸ„Õm¤ó±‚‰‰ g‰‰ ‘¤ai„õ-¤ñC Ÿ‰í¤=„ S¤Ÿ‚‰‰ _ʼn‰ õ¤ Á„+¤‡ã„-¤™_„13¤«‚‰‰ Ã;‰‰ õ¤ S‚‰‰£í‰‰ õ¤Ÿ„ݤ™_‚‰‰ Yʼn‰ õ¤ S‚‰‰9u‰‰ õ¤Ÿ„+•¤™_„§#¤™Ñ„I¤ ӂ‰‰3‰‰ õ¤Ÿ„+¤™Ñ‚‰‰[‹‰‰ õ¤™_„ˤ™_‚‰‰ƒÿ™‰‰ õ¤ ӂ‰‰ßI‰‰ õ¤Ÿ„o¤ O÷ã‰í¤ó1„ai‚‰‰ƒÿo‰‰ •é¤õ¯‚‰‰ƒ›Õ‰‰ •é¤g!„é·¤g!‚‰‰‰‰‰ •é¤g¡„×Û¤ai‚‰‰S£‰‰ •é¤õ¯„륤g¡„Y]¤g!„×Û¤ai‚‰‰ Û­‰‰ •é¤õ-‚‰‰ç‘‰‰ •é¤g!„í¤ai‚‰‰ Cƒ‰‰ •é¤õ¯‚‰‰ ‹ï‰‰ •é¤ñC„Á»¤g!„Ó¤yŸ„Õm¤ó±‚‰‰ Õ‰‰ •é¤ai„ó±¤ó1 Ÿ‰í¤ O„ Á¤Ÿ‚‰‰ _ʼn‰ ͤ Á„+¤‡ã„-¤™_„?Ǥ=‚‰‰ Áω‰ ͤ ӂ‰‰¥Y‰‰ ͤŸ„o¤™_‚‰‰ GW‰‰ ͤ S‚‰‰;c‰‰ ͤŸ„)§¤™_„§#¤™Ñ„ݤ ӂ‰‰µy‰‰ ͤŸ„)§¤™Ñ‚‰‰[‹‰‰ ͤ™_„ˤ™_‚‰‰ƒý+‰‰ ͤ ӂ‰‰ѧ‰‰ ͤŸ„ó¤ O÷ã‰í¤ó1„ai‚‰‰ƒÿo‰‰ —¡¤õ¯‚‰‰ƒ›Õ‰‰ —¡¤g!„é·¤g!‚‰‰‰‰‰ —¡¤g¡„×Û¤ai‚‰‰S£‰‰ —¡¤õ¯„륤g¡„Y]¤g!„×Û¤ai‚‰‰ Û­‰‰ —¡¤õ-‚‰‰ç‘‰‰ —¡¤g!„í¤ai‚‰‰ Cƒ‰‰ —¡¤õ¯‚‰‰ ‹ï‰‰ —¡¤ñC„Á»¤g!„Ó¤yŸ„Õm¤ó±‚‰‰ Õ‰‰ —¡¤ai„ó±¤ó1 Ÿ‰í¤ O„=¤Ÿ‚‰‰ _ʼn‰ …¤ Á„+¤‡ã„-¤™_„;{¤+‚‰‰ ÏQ‰‰ …¤ ӂ‰‰§7‰‰ …¤9„o¤™_‚‰‰ Eû‰‰ …¤ ӂ‰‰=щ‰ …¤»„)§¤™_„§#¤™Ñ„ï¤ S‚‰‰·×‰‰ …¤»„)§¤™Ñ‚‰‰[‹‰‰ …¤™_„ˤ™_‚‰‰ƒû=‰‰ …¤ S‚‰‰Ó‰‰ …¤»„‘…¤=÷ã‰í¤ñC„oý¤cǂ‰‰ƒýó‰‰ ù¤õ-‚‰‰ƒ›Õ‰‰ ù¤g!„é·¤g!‚‰‰‰‰‰ ù¤g¡„×Û¤cǂ‰‰Q5‰‰ ù¤õ-„퓤g¡„Y]¤g!„×Û¤cǂ‰‰ Ù¿‰‰ ù¤õ¯‚‰‰ç‘‰‰ ù¤g!„í¤cG‚‰‰ A‰‰ ù¤õ¯‚‰‰ ‰q‰‰ ù¤ÿU„Ťg!„Ó¤yŸ„Õm¤ó±‚‰‰ A‰‰ ù¤ai„ñC¤ó1 Ÿ‰í¤ O„+¤Ÿ‚‰‰ _ʼn‰ ©Ý¤ Á„+¤‡ã„-¤™_„»¤«‚‰‰ Íc‰‰ ©Ý¤ Á‚‰‰¹¥‰‰ ©Ý¤»„q¤™_‚‰‰ Å ‰‰ ©Ý¤ O‚‰‰?M‰‰ ©Ý¤»„I¤™_„¹¤‡ã„q¤ Á‚‰‰Éʼn‰ ©Ý¤9„I¤™Ñ‚‰‰[‹‰‰ ©Ý¤™_„ˤ™_‚‰‰ƒùÁ‰‰ ©Ý¤ Á‚‰‰Õ‰‰ ©Ý¤9„Ÿ¤=÷ã‰í¤ñC„ai¤cG‚‰‰ƒýó‰‰ )±¤ó±‚‰‰ƒ›Õ‰‰ )±¤g!„é·¤g!‚‰‰‰‰‰ )±¤g¡„é7¤cG‚‰‰Q5‰‰ )±¤ó±„ï¤yŸ„Go¤g!„é7¤cǂ‰‰ Ù¿‰‰ )±¤ó1‚‰‰ç‘‰‰ )±¤g!„ï¤cǂ‰‰ A‰‰ )±¤ó±‚‰‰‹w‰‰ )±¤ÿׄEó¤g!„Ó¤yŸ„Õm¤ó±‚‰‰ ƒ?‰‰ )±¤ai„ÿU¤ó1 Ÿ‰í¤ O„+¤Ÿ‚‰‰ _ʼn‰ «•¤ Á„+¤‡ã„-¤™_„¹¤=‚‰‰ Íc‰‰ «•¤ S‚‰‰»‰‰ «•¤›M„q¤™_‚‰‰ Å ‰‰ «•¤ ӂ‰‰1»‰‰ «•¤›M„I¤™_„¹¤‡ã„q¤ S‚‰‰Ë1‰‰ «•¤›Í„I¤™Ñ‚‰‰[‹‰‰ «•¤™_„ˤ™_‚‰‰ƒùÁ‰‰ «•¤ S‚‰‰Uÿ‰‰ «•¤›Í„Ÿ¤=÷ã‰í¤ñC„ai¤eµ‚‰‰ƒ{‰‰ +‰¤õ-‚‰‰ƒ›Õ‰‰ +‰¤g!„é·¤g!‚‰‰‰‰‰ +‰¤g¡„é7¤eµ‚‰‰_ى‰ +‰¤õ-„ï¤yŸ„Go¤g!„é7¤e3‚‰‰ ÇA‰‰ +‰¤õ¯‚‰‰ç‘‰‰ +‰¤g!„ï¤e3‚‰‰ O¹‰‰ +‰¤õ-‚‰‰‹s9‰‰ +‰¤ñC„Go¤g!„Ó¤yŸ„Õm¤ó±‚‰‰ ƒ?‰‰ +‰¤ai„ÿU¤ó1 Ÿ‰í¤ O„+¤Ÿ‚‰‰ _ʼn‰ +í¤ Á„+¤‡ã„-¤™_„¹¤=‚‰‰ Íc‰‰ +í¤ S‚‰‰»‰‰ +í¤›M„q¤™_‚‰‰ Å ‰‰ +í¤ Ó‚‰‰1»‰‰ +í¤›M„I¤™_„¹¤‡ã„q¤ S‚‰‰Ë1‰‰ +í¤›Í„I¤™Ñ‚‰‰[‹‰‰ +í¤™_„ˤ™_‚‰‰ƒùÁ‰‰ +í¤ S‚‰‰Uÿ‰‰ +í¤›Í„Ÿ¤=÷ã‰í¤ñC„cG¤e3‚‰‰ƒ{‰‰ ­Á¤ó±‚‰‰ƒ›Õ‰‰ ­Á¤g!„é·¤g!‚‰‰‰‰‰ ­Á¤g¡„ë%¤e3‚‰‰_ى‰ ­Á¤ó±„oý¤yŸ„Go¤g!„륤eµ‚‰‰ ÇA‰‰ ­Á¤ó±‚‰‰ç‘‰‰ ­Á¤g!„oý¤e3‚‰‰ O¹‰‰ ­Á¤ó±‚‰‰‹q͉‰ ­Á¤ÿU„[ˤg!„Ó¤yŸ„Õm¤ó±‚‰‰ …­‰‰ ­Á¤ai„ýù¤ó1 Ÿ‰í¤ O„™¤Ÿ‚‰‰ _ʼn‰ -¥¤ Á„+¤‡ã„-¤™_„¥·¤+‚‰‰ Ë÷‰‰ -¥¤ Á‚‰‰½‰‰ -¥¤›M„‘…¤™_‚‰‰ ɉ -¥¤ Á‚‰‰3©‰‰ -¥¤›Í„ݤ™_„¹¤‡ã„‘…¤ Á‚‰‰Í¯‰‰ -¥¤›M„[¤™Ñ‚‰‰[‹‰‰ -¥¤™_„ˤ™_‚‰‰ƒçS‰‰ -¥¤ Á‚‰‰Wk‰‰ -¥¤›M„9¤=÷ã‰í¤ñC„cG¤e3‚‰‰ƒy‰‰ ¯™¤õ¯‚‰‰ƒ›Õ‰‰ ¯™¤g!„é·¤g!‚‰‰‰‰‰ ¯™¤g¡„ë%¤e3‚‰‰]뉉 ¯™¤õ¯„oý¤yŸ„Go¤g!„륤eµ‚‰‰ ÅՉ‰ ¯™¤õ-‚‰‰ç‘‰‰ ¯™¤g!„oý¤e3‚‰‰ MK‰‰ ¯™¤õ-‚‰‰‹߉‰ ¯™¤ÿׄ]'¤g!„Ó¤yŸ„Õm¤ó±‚‰‰ ‡‰‰ ¯™¤ai„{ ¤ó1 Ÿ‰í¤ O„…u¤Ÿ‚‰‰ _ʼn‰ ¯ý¤ Á„+¤‡ã„-¤™_„£Y¤«‚‰‰ Ë÷‰‰ ¯ý¤ S‚‰‰½‰‰ ¯ý¤›M„‘…¤™_‚‰‰ ɉ ¯ý¤ S‚‰‰3©‰‰ ¯ý¤›Í„ݤ™_„¹¤‡ã„‘…¤ ӂ‰‰Í¯‰‰ ¯ý¤›M„[¤™Ñ‚‰‰[‹‰‰ ¯ý¤™_„ˤ™_‚‰‰ƒçS‰‰ ¯ý¤ ӂ‰‰Wk‰‰ ¯ý¤›M„9¤=‰Ñ‰í¤ÿU„cG¤g¡‚‰‰ƒy‰‰ /Ѥó1‚‰‰ƒ›Õ‰‰ /Ѥg!„é·¤g!‚‰‰‰‰‰ /Ѥg¡„ë%¤g¡‚‰‰]뉉 /Ѥó1„ai¤yŸ„Go¤g!„륤g!‚‰‰ ÅՉ‰ /Ѥ󱂉‰ç‘‰‰ /Ѥg!„oý¤g!‚‰‰ MK‰‰ /Ѥ󱂉‰‹{õ‰‰ /ѤñC„_•¤g!„Ó¤yŸ„Õm¤ó±‚‰‰ ÷‰‰ /Ѥai„yŸ¤ó1 Ÿ‰í¤ O„‡ã¤Ÿ‚‰‰ _ʼn‰ ¡µ¤ Á„+¤‡ã„-¤™_„¡í¤=‚‰‰ I‰‰ ¡µ¤ Á‚‰‰=ý‰‰ ¡µ¤™_„‘…¤™_‚‰‰ Á£‰‰ ¡µ¤ Á‚‰‰5‰‰ ¡µ¤™_„ݤ™_„¹¤‡ã„Ÿ¤ O‚‰‰Ï‰‰ ¡µ¤™Ñ„[¤™Ñ‚‰‰[‹‰‰ ¡µ¤™_„ˤ™_‚‰‰ƒå牉 ¡µ¤ O‚‰‰iى‰ ¡µ¤™Ñ„9¤+÷ã‰í¤ÿU„cG¤g¡‚‰‰ƒy‰‰ !©¤ó1‚‰‰ƒ›Õ‰‰ !©¤g!„é·¤g!‚‰‰‰‰‰ !©¤g¡„ë%¤g¡‚‰‰]뉉 !©¤ó1„ai¤yŸ„Go¤g!„륤g!‚‰‰ ÅՉ‰ !©¤ó±‚‰‰ç‘‰‰ !©¤g!„oý¤g!‚‰‰ MK‰‰ !©¤ó±‚‰‰‹{õ‰‰ !©¤ñC„_•¤g!„Ó¤yŸ„Õm¤ó±‚‰‰ ÷‰‰ !©¤ai„yŸ¤ó1 Ÿ‰í¤ O„™Ñ¤Ÿ‚‰‰ _ʼn‰ £¤ Á„+¤‡ã„-¤™_„-¤+‚‰‰ I‰‰ £¤ ӂ‰‰?i‰‰ £¤™_„Ÿ¤™_‚‰‰ Á£‰‰ £¤ S‚‰‰·ó‰‰ £¤™_„o¤™_„¹¤‡ã„Ÿ¤ ӂ‰‰Á ‰‰ £¤™_„酪т‰‰[‹‰‰ £¤™_„ˤ™_‚‰‰ƒå牉 £¤ ӂ‰‰k·‰‰ £¤™_„›Í¤+÷ã‰í¤ÿU„eµ¤g!‚‰‰ƒg»‰‰ £á¤õ¯‚‰‰ƒ›Õ‰‰ £á¤g!„é·¤g!‚‰‰‰‰‰ £á¤g¡„퓤g!‚‰‰[}‰‰ £á¤õ¯„ai¤yŸ„Go¤g!„í¤g!‚‰‰ Ãg‰‰ £á¤õ-‚‰‰ç‘‰‰ £á¤g!„ai¤g!‚‰‰ K]‰‰ £á¤õ¯‚‰‰‹û‰‰ £á¤ÿU„Ó¤g!„Ó¤yŸ„Õm¤ó±‚‰‰ 剉 £á¤ai„g¡¤ó1 Ÿ‰í¤ O„›M¤Ÿ‚‰‰ _ʼn‰ #Ť Á„+¤‡ã„-¤™_„+•¤«‚‰‰ 7­‰‰ #Ť O‚‰‰?i‰‰ #Ť™_„Ÿ¤™_‚‰‰ Ï5‰‰ #Ť Á‚‰‰·ó‰‰ #Ť™_„o¤™_„¹¤‡ã„9¤ Á‚‰‰Á ‰‰ #Ť™_„酪т‰‰[‹‰‰ #Ť™_„ˤ™_‚‰‰ƒc‰‰‰ #Ť Á‚‰‰k·‰‰ #Ť™_„›Í¤™÷ã‰í¤ýù„eµ¤g!‚‰‰ƒg»‰‰ ¥¹¤ó±‚‰‰ƒ›Õ‰‰ ¥¹¤g!„é·¤g!‚‰‰‰‰‰ ¥¹¤g¡„퓤g!‚‰‰[}‰‰ ¥¹¤ó±„cG¤yŸ„Go¤g!„í¤g!‚‰‰ Ãg‰‰ ¥¹¤ó±‚‰‰ç‘‰‰ ¥¹¤g!„ai¤g!‚‰‰ K]‰‰ ¥¹¤ó1‚‰‰‹ù)‰‰ ¥¹¤ÿׄÕí¤g!„Ó¤yŸ„Õm¤ó±‚‰‰ Q‰‰ ¥¹¤ai„e3¤ó1 Ÿ‰í¤ O„›M¤Ÿ‚‰‰ _ʼn‰ %¤ Á„+¤‡ã„-¤™_„+•¤«‚‰‰ 7­‰‰ %¤ O‚‰‰?i‰‰ %¤™_„Ÿ¤™_‚‰‰ Ï5‰‰ %¤ Á‚‰‰·ó‰‰ %¤™_„o¤™_„¹¤‡ã„9¤ Á‚‰‰Á ‰‰ %¤™_„酪т‰‰[‹‰‰ %¤™_„ˤ™_‚‰‰ƒc‰‰‰ %¤ Á‚‰‰k·‰‰ %¤™_„›Í¤™÷ã‰í¤ÿׄcǤyŸ‚‰‰ƒg»‰‰ %ñ¤ó±‚‰‰ƒ›Õ‰‰ %ñ¤g!„é·¤g!‚‰‰‰‰‰ %ñ¤g¡„퓤yŸ‚‰‰[}‰‰ %ñ¤ó±„cG¤yŸ„Go¤g!„í¤yŸ‚‰‰ Ãg‰‰ %ñ¤ó±‚‰‰ç‘‰‰ %ñ¤g!„ai¤yŸ‚‰‰ K]‰‰ %ñ¤ó1‚‰‰‹åO‰‰ %ñ¤ñC„×Y¤g!„Ó¤yŸ„Õm¤ó±‚‰‰ ω‰ %ñ¤ai„cǤó1 Ÿ‰í¤ O„»¤Ÿ‚‰‰ _ʼn‰ §Õ¤ Á„+¤‡ã„-¤™_„)'¤=‚‰‰ 7­‰‰ §Õ¤ O‚‰‰1lj‰ §Õ¤‡ã„Ÿ¤™_‚‰‰ Ï5‰‰ §Õ¤ Á‚‰‰Éo‰‰ §Õ¤‡ã„o¤™_„¹¤‡ã„9¤ Á‚‰‰A牉 §Õ¤‡ã„酪т‰‰[‹‰‰ §Õ¤™_„ˤ™_‚‰‰ƒc‰‰‰ §Õ¤ Á‚‰‰m#‰‰ §Õ¤‡ã„»¤«‰_‰í¤ýy„cǤ{ ‚‰‰ƒeM‰‰ 'ɤ󱂉‰ƒ›Õ‰‰ 'ɤg!„é·¤g!‚‰‰‰‰‰ 'ɤg¡„퓤{ ‚‰‰ہ‰‰ 'ɤó±„cǤyŸ„Y]¤g!„í¤{‚‰‰ A ‰‰ 'ɤó1‚‰‰ç‘‰‰ 'ɤg!„ai¤{ ‚‰‰ I቉ 'ɤó1‚‰‰‹ãӉ‰ 'ɤÿU„륤g!„Ó¤yŸ„Õm¤ó±„Éu¤ai„ai¤ó1 Ÿ‰í¤ O„Ÿ¤Ÿ„7 ¤ Á„+¤‡ã„-¤™_„ݤ+‚‰‰ 5?‰‰ ¹­¤ O‚‰‰33‰‰ ¹­¤…u„Ÿ¤™_‚‰‰ Íى‰ ¹­¤ O‚‰‰Ë]‰‰ ¹­¤…õ„o¤™_„§#¤‡ã„»¤ Á‚‰‰CS‰‰ ¹­¤…u„酪т‰‰[‹‰‰ ¹­¤™_„ˤ™_‚‰‰ƒa‰‰ ¹­¤ Á‚‰‰o‘‰‰ ¹­¤…u„»¤‰_‰í¤{„e3¤yŸ‚‰‰ƒeM‰‰ 9¤ó±‚‰‰ƒ›Õ‰‰ 9¤g!„é·¤g!‚‰‰‰‰‰ 9¤g¡„ï¤yŸ‚‰‰ہ‰‰ 9¤ó±„cǤyŸ„Y]¤g!„ï¤yŸ‚‰‰ A ‰‰ 9¤ó1‚‰‰ç‘‰‰ 9¤g!„cǤyŸ‚‰‰ I቉ 9¤ó1‚‰‰‹á剉 9¤ÿׄí¤g!„Ó¤yŸ„Õm¤ó±„Ëc¤ai„o{¤ó1 Ÿ‰í¤ O„‘¤Ÿ„5¤ Á„+¤‡ã„-¤™_„o¤«‚‰‰ 5?‰‰ 9å¤ O‚‰‰33‰‰ 9备ㄝ»¤™_‚‰‰ Íى‰ 9å¤ O‚‰‰Ë]‰‰ 9备ã„q¤™_„§#¤‡ã„»¤ Á‚‰‰CS‰‰ 9备ã„q¤™Ñ‚‰‰[‹‰‰ 9夙_„ˤ™_‚‰‰ƒa‰‰ 9å¤ Á‚‰‰o‘‰‰ 9备ㄛM¤…õ÷ã‰í¤{„e3¤yŸ‚‰‰ƒeM‰‰ »Ù¤ó±‚‰‰ƒ›Õ‰‰ »Ù¤g!„é·¤g!‚‰‰‰‰‰ »Ù¤g¡„ï¤yŸ‚‰‰ہ‰‰ »Ù¤ó±„cǤyŸ„Y]¤g!„ï¤yŸ‚‰‰ A ‰‰ »Ù¤ó1‚‰‰ç‘‰‰ »Ù¤g!„cǤyŸ‚‰‰ I቉ »Ù¤ó1‚‰‰‹á剉 »Ù¤ÿׄí¤g!„Ó¤yŸ„Õm¤ó±„Ëc¤ai„o{¤ó1 Ÿ‰í¤ O„󤟄3¡¤ Á„+¤‡ã„-¤™_„ó¤=‚‰‰ 3A‰‰ ;½¤=‚‰‰5!‰‰ ;½¤…õ„»¤™_‚‰‰ Ë뉉 ;½¤=‚‰‰Íˉ‰ ;½¤…u„q¤™_„§#¤‡ã„›M¤=‚‰‰EÁ‰‰ ;½¤…u„q¤™Ñ‚‰‰[‹‰‰ ;½¤™_„ˤ™_‚‰‰ƒo¯‰‰ ;½¤=‚‰‰a ‰‰ ;½¤…u„»¤‡ã÷ã‰í¤yŸ„cǤ{ ‚‰‰ƒeM‰‰ ½‘¤ñC‚‰‰ƒ›Õ‰‰ ½‘¤g!„é·¤g!‚‰‰‰‰‰ ½‘¤g¡„ï¤{ ‚‰‰ہ‰‰ ½‘¤ñC„e3¤yŸ„Y]¤g!„ï¤{ ‚‰‰ A ‰‰ ½‘¤ñC‚‰‰ç‘‰‰ ½‘¤g!„cǤ{‚‰‰ I቉ ½‘¤ñC‚‰‰‹m›‰‰ ½‘¤ñC„g!„Ó¤yŸ„Õm¤ó±„ÍѤai„ó1 Ÿ‰í¤ O„o¤Ÿ„13¤ Á„+¤‡ã„-¤™_„Ÿ¤+‚‰‰ 3A‰‰ ½õ¤=‚‰‰5!‰‰ ½õ¤…õ„»¤™_‚‰‰ Ë뉉 ½õ¤=‚‰‰Íˉ‰ ½õ¤…u„q¤™_„§#¤…u„»¤=‚‰‰EÁ‰‰ ½õ¤…u„q¤™Ñ‚‰‰[‹‰‰ ½õ¤™_„ˤ™_‚‰‰ƒo¯‰‰ ½õ¤=‚‰‰a ‰‰ ½õ¤…u„»¤™_÷ã‰í¤g!„cǤ{ ‚‰‰ƒeM‰‰ =é¤ñC‚‰‰ƒ›Õ‰‰ =é¤g!„é·¤g!‚‰‰‰‰‰ =é¤g¡„ï¤{ ‚‰‰ہ‰‰ =é¤ñC„cǤ{ „Y]¤g!„ï¤{ ‚‰‰ A ‰‰ =é¤ñC‚‰‰ç‘‰‰ =é¤g!„cǤ{‚‰‰ I቉ =é¤ñC‚‰‰‹k-‰‰ =é¤ÿU„ai¤g!„Ó¤yŸ„Õm¤ó±„ÏM¤ai„í¤ó1 Ÿ‰í¤ O„ݤŸ„?Ǥ Á„+¤‡ã„-¤™_„»¤«‚‰‰ 3A‰‰ ¿Í¤=‚‰‰5!‰‰ ¿Í¤…õ„»¤™_‚‰‰ Ë뉉 ¿Í¤=‚‰‰Íˉ‰ ¿Í¤…u„q¤™_„¥·¤‡ã„»¤=‚‰‰EÁ‰‰ ¿Í¤…u„q¤™Ñ‚‰‰[‹‰‰ ¿Í¤™_„ˤ™_‚‰‰ƒo¯‰‰ ¿Í¤=‚‰‰a ‰‰ ¿Í¤…u„Ÿ¤»÷ã‰í¤cDŽai¤{ ‚‰‰ƒeM‰‰ ?¡¤ñC‚‰‰ƒ›Õ‰‰ ?¡¤g!„é·¤g!‚‰‰‰‰‰ ?¡¤g¡„ï¤{ ‚‰‰ہ‰‰ ?¡¤ñC„cǤyŸ„[ˤg!„ï¤{ ‚‰‰ A ‰‰ ?¡¤ñC‚‰‰ç‘‰‰ ?¡¤g!„cǤ{‚‰‰ I቉ ?¡¤ñC‚‰‰‹i?‰‰ ?¡¤ÿׄcǤg!„Ó¤yŸ„Õm¤ó±„Á»¤ai„륤ó1 Ÿ‰í¤ O„ݤŸ„?Ǥ Á„+¤‡ã„-¤™_„»¤«‚‰‰ 3A‰‰ ±…¤=‚‰‰5!‰‰ ±…¤…õ„»¤™_‚‰‰ Ë뉉 ±…¤=‚‰‰Íˉ‰ ±…¤…u„q¤™_„¥·¤‡ã„»¤=‚‰‰EÁ‰‰ ±…¤…u„q¤™Ñ‚‰‰[‹‰‰ ±…¤™_„ˤ™_‚‰‰ƒo¯‰‰ ±…¤=‚‰‰a ‰‰ ±…¤…u„Ÿ¤»‹Í‰í¤oý„ai¤ýy‚‰‰ƒcщ‰ ±ù¤ñC‚‰‰ƒ›Õ‰‰ ±ù¤g!„é·¤g!‚‰‰‰‰‰ ±ù¤g¡„ï¤ýy‚‰‰Ù‰‰ ±ù¤ñC„cǤ{ „[ˤg!„ï¤ýù‚‰‰ O‰‰ ±ù¤ñC‚‰‰ç‘‰‰ ±ù¤g!„cǤýy‚‰‰ 7s‰‰ ±ù¤ñC‚‰‰‹UU‰‰ ±ù¤ñC„e3¤g!„Ó¤yŸ„Õm¤ó±„é¤ai„é·¤ó1 Ÿ‰í¤ O„ˤŸ„=é¤ Á„+¤‡ã„-¤™_„›M¤=‚‰‰ 1Չ‰ 1ݤ=‚‰‰7Ÿ‰‰ 1ݤ„»¤™_‚‰‰ É}‰‰ 1ݤ=‚‰‰Ï'‰‰ 1ݤ™„q¤™_„¥·¤…u„»¤=‚‰‰G¿‰‰ 1ݤ„q¤™Ñ‚‰‰[‹‰‰ 1ݤ™_„ˤ™_‚‰‰ƒm1‰‰ 1ݤ=‚‰‰ã{‰‰ 1ݤ„Ÿ¤‘…»‰í¤í„oý¤ýy‚‰‰ƒcщ‰ ³±¤ñC‚‰‰ƒ›Õ‰‰ ³±¤g!„é·¤g!‚‰‰‰‰‰ ³±¤g¡„ï¤ýy‚‰‰Ù‰‰ ³±¤ñC„ai¤{ „]'¤g!„ï¤ýù‚‰‰ O‰‰ ³±¤ñC‚‰‰ç‘‰‰ ³±¤g!„cǤýy‚‰‰ 7s‰‰ ³±¤ñC‚‰‰‹Sù‰‰ ³±¤ÿU„yŸ¤g!„Ó¤yŸ„Õm¤ó±„Ťai„×Y¤ó1 Ÿ‰í¤ O„)'¤Ÿ„;{¤ Á„+¤‡ã„-¤™_„‡ã¤+‚‰‰ 1Չ‰ 3•¤=‚‰‰7Ÿ‰‰ 3•¤„»¤™_‚‰‰ É}‰‰ 3•¤=‚‰‰Ï'‰‰ 3•¤™„q¤™_„£Y¤…u„Ÿ¤=‚‰‰G¿‰‰ 3•¤„q¤™Ñ‚‰‰[‹‰‰ 3•¤™_„ˤ™_‚‰‰ƒm1‰‰ 3•¤=‚‰‰ã{‰‰ 3•¤„‘…¤o÷ã‰í¤í„oý¤ýy‚‰‰ƒcщ‰ µ‰¤ñC‚‰‰ƒ›Õ‰‰ µ‰¤g!„é·¤g!‚‰‰‰‰‰ µ‰¤g¡„ï¤ýy‚‰‰Ù‰‰ µ‰¤ñC„ai¤{ „]'¤g!„ï¤ýù‚‰‰ O‰‰ µ‰¤ñC‚‰‰ç‘‰‰ µ‰¤g!„cǤýy‚‰‰ 7s‰‰ µ‰¤ñC‚‰‰‹Sù‰‰ µ‰¤ÿU„yŸ¤g!„Ó¤yŸ„Õm¤ó±„Ťai„×Y¤ó1 Ÿ‰í¤ O„+•¤Ÿ„»¤ Á„+¤‡ã„-¤™_„…u¤«‚‰‰ 1Չ‰ µí¤=‚‰‰7Ÿ‰‰ µí¤„»¤™_‚‰‰ É}‰‰ µí¤=‚‰‰Ï'‰‰ µí¤™„q¤™_„¡í¤…õ„‘¤=‚‰‰G¿‰‰ µí¤„q¤™Ñ‚‰‰[‹‰‰ µí¤™_„ˤ™_‚‰‰ƒm1‰‰ µí¤=‚‰‰ã{‰‰ µí¤„‘…¤)'÷ã‰í¤×Y„oý¤ýy‚‰‰ƒcщ‰ 5Á¤ñC‚‰‰ƒ›Õ‰‰ 5Á¤g!„é·¤g!‚‰‰‰‰‰ 5Á¤g¡„ï¤ýy‚‰‰Ù‰‰ 5Á¤ñC„o{¤{„_•¤g!„ï¤ýù‚‰‰ O‰‰ 5Á¤ñC‚‰‰ç‘‰‰ 5Á¤g!„cǤýy‚‰‰ 7s‰‰ 5Á¤ñC‚‰‰‹Ó‹‰‰ 5Á¤ÿׄ{ ¤g!„Ó¤yŸ„Õm¤ó±„Eó¤ai„Õí¤ó1 Ÿ‰í¤ O„-¤Ÿ„¹¤ Á„+¤‡ã„-¤™_„™¤=‚‰‰ 1Չ‰ ·¥¤=‚‰‰I ‰‰ ·¥¤«„»¤™_‚‰‰ É}‰‰ ·¥¤=‚‰‰Á•‰‰ ·¥¤+„q¤™_„¯ÿ¤…u„ó¤=‚‰‰Y+‰‰ ·¥¤«„q¤™Ñ‚‰‰[‹‰‰ ·¥¤™_„ˤ™_‚‰‰ƒm1‰‰ ·¥¤=‚‰‰å鉉 ·¥¤«„ó¤-÷ã‰í¤Óñ„ÿׂ‰‰ƒcщ‰ 7™¤ñC‚‰‰ƒ›Õ‰‰ 7™¤g!„é·¤g!‚‰‰‰‰‰ 7™¤g¡„ï¤ÿׂ‰‰Ù‰‰ 7™¤ñC„{ „Qƒ¤g!„ï¤ÿU‚‰‰ O‰‰ 7™¤ñC‚‰‰ç‘‰‰ 7™¤g!„cǤÿׂ‰‰ 7s‰‰ 7™¤ñC‚‰‰‹ß¡‰‰ 7™¤ñC„ýù¤g!„Ó¤yŸ„Õm¤ó±„Go¤ai„Ó¤ó1 Ÿ‰í¤ O„-¤Ÿ„¹¤ Á„+¤‡ã„-¤™_„=¤+‚‰‰ 1Չ‰ 7ý¤=‚‰‰I ‰‰ 7ý¤«„»¤™_‚‰‰ É}‰‰ 7ý¤=‚‰‰Á•‰‰ 7ý¤+„q¤™_„-¤…u„o¤=‚‰‰Y+‰‰ 7ý¤«„q¤™Ñ‚‰‰[‹‰‰ 7ý¤™_„ˤ™_‚‰‰ƒm1‰‰ 7ý¤=‚‰‰å鉉 7ý¤«„o¤¯ÿ÷ã‰í¤Qƒ„í¤ÿׂ‰‰ƒcщ‰ ÉѤñC‚‰‰ƒ›Õ‰‰ ÉѤg!„é·¤g!‚‰‰‰‰‰ ÉѤg¡„ï¤ÿׂ‰‰Ù‰‰ ÉѤñC„í¤{ „Ó¤g!„ï¤ÿU‚‰‰ O‰‰ ÉѤñC‚‰‰ç‘‰‰ ÉѤg!„cǤÿׂ‰‰ 7s‰‰ ÉѤñC‚‰‰‹Ý3‰‰ ÉѤÿU„ñC¤g!„Ó¤yŸ„Õm¤ó±„Go¤ai„Ó¤ó1 Ÿ‰í¤ O„-¤Ÿ„¹¤ Á„+¤‡ã„-¤™_„=¤+‚‰‰ 1Չ‰ Iµ¤=‚‰‰I ‰‰ Iµ¤«„»¤™_‚‰‰ É}‰‰ Iµ¤=‚‰‰Á•‰‰ Iµ¤+„q¤™_„-¤…u„o¤=‚‰‰Y+‰‰ Iµ¤«„q¤™Ñ‚‰‰[‹‰‰ Iµ¤™_„ˤ™_‚‰‰ƒm1‰‰ Iµ¤=‚‰‰å鉉 Iµ¤«„o¤¯ÿ»‰í¤]'„륤ÿׂ‰‰ƒcщ‰ Ë©¤ÿU‚‰‰ƒ›Õ‰‰ Ë©¤g!„é·¤g!‚‰‰‰‰‰ Ë©¤g¡„ï¤ÿׂ‰‰Ù‰‰ Ë©¤ÿU„륤ýy„Õí¤g!„ï¤ÿU‚‰‰ O‰‰ Ë©¤ÿׂ‰‰ç‘‰‰ Ë©¤g!„cǤÿׂ‰‰ 7s‰‰ Ë©¤ÿׂ‰‰‹ÛE‰‰ Ë©¤ÿׄó±¤g!„Ó¤yŸ„Õm¤ó±„Y]¤ai„Q¤ó1 Ÿ‰í¤ O„¯¤Ÿ„§#¤ Á„+¤‡ã„-¤™_„ Á¤«‚‰‰ ?g‰‰ K¤«‚‰‰I ‰‰ K¤«„»¤™_‚‰‰ 7‰‰ K¤«‚‰‰Á•‰‰ K¤+„q¤™_„+•¤„ݤ+‚‰‰Y+‰‰ K¤«„q¤™Ñ‚‰‰[‹‰‰ K¤™_„ˤ™_‚‰‰ƒkC‰‰ K¤+‚‰‰å鉉 K¤«„ݤ£Y‹Í‰í¤]¹„×Y¤ÿׂ‰‰ƒcщ‰ Ká¤ÿU‚‰‰ƒ›Õ‰‰ Ká¤g!„é·¤g!‚‰‰‰‰‰ Ká¤g¡„ï¤ÿׂ‰‰Ù‰‰ Ká¤ÿU„×Û¤ýù„é7¤g!„ï¤ÿU‚‰‰ O‰‰ Ká¤ÿׂ‰‰ç‘‰‰ Ká¤g!„cǤÿׂ‰‰ 7s‰‰ Ká¤ÿׂ‰‰‹Ý3‰‰ Ká¤{ „õ-¤g!„Ó¤yŸ„Õm¤ó±„[ˤai„_•¤ó1 Ÿ‰í¤ O„¡í¤Ÿ„¥·¤ Á„+¤‡ã„-¤™_„ S¤…u‚‰‰ ?g‰‰ ÍŤ«‚‰‰I ‰‰ ÍŤ«„»¤™_‚‰‰ 7‰‰ ÍŤ«‚‰‰Á•‰‰ ÍŤ+„q¤™_„I¤™„)§¤+‚‰‰Y+‰‰ ÍŤ«„q¤™Ñ‚‰‰[‹‰‰ ÍŤ™_„ˤ™_‚‰‰ƒkC‰‰ ÍŤ+‚‰‰å鉉 ÍŤ«„)'¤£Ù÷ã‰í¤]¹„×Y¤ÿׂ‰‰ƒcщ‰ M¹¤ÿU‚‰‰ƒ›Õ‰‰ M¹¤g!„é·¤g!‚‰‰‰‰‰ M¹¤g¡„ï¤ÿׂ‰‰Ù‰‰ M¹¤ÿU„×Û¤ýù„é7¤g!„ï¤ÿU‚‰‰ O‰‰ M¹¤ÿׂ‰‰ç‘‰‰ M¹¤g!„cǤÿׂ‰‰ 7s‰‰ M¹¤ÿׂ‰‰‹Ý3‰‰ M¹¤{ „õ-¤g!„Ó¤yŸ„Õm¤ó±„[ˤai„_•¤ó1 Ÿ‰í¤ O„£Y¤Ÿ„£Y¤ Á„+¤‡ã„-¤-‚‰‰ ?g‰‰ ϝ¤‚‰‰I ‰‰ ϝ¤«„»¤™_‚‰‰ 7‰‰ ϝ¤‚‰‰Á•‰‰ ϝ¤+„q¤™_„ó¤…õ„-¤™‚‰‰Y+‰‰ ϝ¤«„q¤™Ñ‚‰‰[‹‰‰ ϝ¤™_„ˤ™_‚‰‰ƒkC‰‰ ϝ¤™‚‰‰å鉉 ϝ¤«„+•¤¥·÷ã‰í¤[˄Õí¤ÿׂ‰‰ƒeM‰‰ Ïñ¤ýù‚‰‰ƒ›Õ‰‰ Ïñ¤g!„é·¤g!‚‰‰‰‰‰ Ïñ¤g¡„ï¤ÿׂ‰‰ہ‰‰ Ïñ¤ýù„Ó¤{„g!„ï¤ÿU‚‰‰ A ‰‰ Ïñ¤ýy‚‰‰ç‘‰‰ Ïñ¤g!„cǤÿׂ‰‰ I቉ Ïñ¤ýy‚‰‰‹Ñ‰‰ Ïñ¤Ó„Ó¤yŸ„Õm¤ó±„]'¤ai„]'¤ó1 Ÿ‰í¤ O„¥·¤Ÿ„¡í¤ Á„+¤‡ã„-¤¯ÿ‚‰‰ ?g‰‰ OÕ¤‚‰‰I ‰‰ OÕ¤«„»¤™_‚‰‰ 7‰‰ OÕ¤‚‰‰Á•‰‰ OÕ¤+„q¤™_„™_¤…u„§#¤™‚‰‰Y+‰‰ OÕ¤«„q¤™Ñ‚‰‰[‹‰‰ OÕ¤™_„ˤ™_‚‰‰ƒkC‰‰ OÕ¤™‚‰‰å鉉 OÕ¤«„¯ÿ¤£Y÷ã‰í¤]'„Qƒ¤ÿׂ‰‰ƒeM‰‰ Áɤýù‚‰‰ƒ›Õ‰‰ Áɤg!„é·¤g!‚‰‰‰‰‰ Áɤg¡„ï¤ÿׂ‰‰ہ‰‰ Áɤýù„Y]¤{ „g!¤g!„ï¤ÿU‚‰‰ A ‰‰ Áɤýy‚‰‰ç‘‰‰ Áɤg!„cǤÿׂ‰‰ I቉ Áɤýy‚‰‰‹Ó‹‰‰ ÁɤQƒ„Ó¤yŸ„Õm¤ó±„_•¤ai„[ˤó1 Ÿ‰í¤ O„§¥¤Ÿ„¯ÿ¤ Á„+¤‡ã„-¤¡í‚‰‰ ?g‰‰ A­¤‚‰‰Ëù‰‰ A­¤=„»¤™_‚‰‰ 7‰‰ A­¤‚‰‰Ãƒ‰‰ A­¤=„q¤™_‚‰‰Á‡‰‰ A­¤™‚‰‰[™‰‰ A­¤=„q¤™Ñ‚‰‰[‹‰‰ A­¤™_„ˤ™_‚‰‰ƒkC‰‰ A­¤™‚‰‰çE‰‰ A­¤=„¥·¤¯ÿ÷ã‰í¤Qƒ„[ˤñC‚‰‰ƒeM‰‰ Á¤ýù‚‰‰ƒ›Õ‰‰ Á¤g!„é·¤g!‚‰‰‰‰‰ Á¤g¡„ï¤ñC‚‰‰ہ‰‰ Á¤ýù‚‰‰óq‰‰ Á¤g!„ï¤ñC‚‰‰ A ‰‰ Á¤ýy‚‰‰ç‘‰‰ Á¤g!„cǤñC‚‰‰ I቉ Á¤ýy‚‰‰‹Sù‰‰ Á¤_•„Ó¤yŸ„Õm¤ó±„Qƒ¤ai„Yݤó1 Ÿ‰í¤ O„§¥¤Ÿ„¯ÿ¤ Á„+¤‡ã„-¤¡í‚‰‰ ?g‰‰ Ã夂‰‰Ëù‰‰ Ãå¤=„»¤™_‚‰‰ 7‰‰ Ã夂‰‰Ãƒ‰‰ Ãå¤=„q¤™_‚‰‰Á‡‰‰ Ã夙‚‰‰[™‰‰ Ãå¤=„q¤™Ñ‚‰‰[‹‰‰ Ã夙_„ˤ™_‚‰‰ƒkC‰‰ Ã夙‚‰‰çE‰‰ Ãå¤=„¥·¤¯ÿ‹M‰í¤Q„Gá¤ñC‚‰‰ƒeM‰‰ CÙ¤ýù‚‰‰ƒ›Õ‰‰ CÙ¤g!„é·¤g!‚‰‰‰‰‰ CÙ¤g¡„ï¤ñC‚‰‰ہ‰‰ CÙ¤ýù‚‰‰óq‰‰ CÙ¤g!„ï¤ñC‚‰‰ A ‰‰ CÙ¤ýy‚‰‰ç‘‰‰ CÙ¤g!„cǤñC‚‰‰ I቉ CÙ¤ýy‚‰‰‹Wɉ CÙ¤[˄Ó¤yŸ„Õm¤ó±„Ó¤ai„Go¤ó1 Ÿ‰í¤ O„¹¤Ÿ„-¤ Á„+¤‡ã„-¤¥·‚‰‰ ?g‰‰ ޤ‚‰‰Ëù‰‰ ޤ=„»¤™_‚‰‰ 7‰‰ ޤ‚‰‰Ãƒ‰‰ ޤ=„q¤™_‚‰‰Á‡‰‰ ޤ™‚‰‰[™‰‰ ޤ=„q¤™Ñ‚‰‰[‹‰‰ ޤ™_„ˤ™_‚‰‰ƒkC‰‰ ޤ™‚‰‰çE‰‰ ޤ=„¹‘¤¯÷ã‰í¤×Y„é¤ñC‚‰‰ƒeM‰‰ E‘¤ýù‚‰‰ƒ›Õ‰‰ E‘¤g!„é·¤g!‚‰‰‰‰‰ E‘¤g¡„ï¤ñC‚‰‰ہ‰‰ E‘¤ýù‚‰‰óq‰‰ E‘¤g!„ï¤ñC‚‰‰ A ‰‰ E‘¤ýy‚‰‰ç‘‰‰ E‘¤g!„cǤñC‚‰‰ I቉ E‘¤ýy‚‰‰‹i?‰‰ E‘¤ï„õ-¤g!„Ó¤yŸ„Õm¤ó±„Õí¤ai„Eó¤ó1 Ÿ‰í¤ O„»¤Ÿ„+•¤ Á„+¤‡ã„-¤™_„ S¤q‚‰‰ ?g‰‰ Eõ¤‚‰‰Ëù‰‰ Eõ¤=„»¤™_‚‰‰ 7‰‰ Eõ¤‚‰‰Ãƒ‰‰ Eõ¤=„q¤™_‚‰‰Á‡‰‰ Eõ¤™‚‰‰[™‰‰ Eõ¤=„q¤™Ñ‚‰‰[‹‰‰ Eõ¤™_„ˤ™_‚‰‰ƒkC‰‰ Eõ¤™‚‰‰çE‰‰ Eõ¤=„=é¤)'‰_‰í¤ë¥„ÍѤñC‚‰‰ƒeM‰‰ Çé¤ýù‚‰‰ƒ›Õ‰‰ Çé¤g!„é·¤g!‚‰‰‰‰‰ Çé¤g¡„ï¤ñC‚‰‰ہ‰‰ Çé¤ýù‚‰‰óq‰‰ Çé¤g!„ï¤ñC‚‰‰ A ‰‰ Çé¤ýy‚‰‰ç‘‰‰ Çé¤g!„cǤñC‚‰‰ I቉ Çé¤ýy‚‰‰‹k-‰‰ Çé¤ï„ó±¤g!„Ó¤yŸ„Õm¤ó±„×Y¤ai„Ťó1 Ÿ‰í¤ O„;{¤Ÿ„)'¤ Á„+¤‡ã„-¤™_„ Á¤󂉉 ?g‰‰ Gͤ‚‰‰Ëù‰‰ Gͤ=„»¤™_‚‰‰ 7‰‰ Gͤ‚‰‰Ãƒ‰‰ Gͤ=„q¤™_‚‰‰Á‡‰‰ Gͤ™‚‰‰[™‰‰ Gͤ=„q¤™Ñ‚‰‰[‹‰‰ Gͤ™_„ˤ™_‚‰‰ƒkC‰‰ Gͤ™‚‰‰çE‰‰ Gͤ=„3¡¤Ý÷ã‰í¤ë¥„ÍѤñC‚‰‰ƒeM‰‰ Ù¡¤ýù‚‰‰ƒ›Õ‰‰ Ù¡¤g!„é·¤g!‚‰‰‰‰‰ Ù¡¤g¡„ï¤ñC‚‰‰ہ‰‰ Ù¡¤ýù‚‰‰óq‰‰ Ù¡¤g!„ï¤ñC‚‰‰ A ‰‰ Ù¡¤ýy‚‰‰ç‘‰‰ Ù¡¤g!„cǤñC‚‰‰ I቉ Ù¡¤ýy‚‰‰‹k-‰‰ Ù¡¤ï„ó±¤g!„Ó¤yŸ„Õm¤ó±„×Y¤ai„Ťó1 Ÿ‰í¤ O„=i¤Ÿ„I¤ Á„+¤‡ã„-¤™_„=¤o‚‰‰ ?g‰‰ Y…¤…õ‚‰‰Ëù‰‰ Y…¤ Á„Ÿ¤™_‚‰‰ 7‰‰ Y…¤…u‚‰‰Ãƒ‰‰ Y…¤ O„o¤™_‚‰‰Á‡‰‰ Y…¤…u‚‰‰[™‰‰ Y…¤ Á„酪т‰‰[‹‰‰ Y…¤™_„ˤ™_‚‰‰ƒkC‰‰ Y…¤…u‚‰‰çE‰‰ Y…¤ Á‚‰‰…=‰‰ Y…¤o÷ã‰í¤í‚‰‰ûw‰‰ Yù¤ó±‚‰‰ƒg»‰‰ Yù¤{ ‚‰‰ƒ›Õ‰‰ Yù¤g!„é·¤g!‚‰‰‰‰‰ Yù¤g¡„퓤󱂉‰[}‰‰ Yù¤{ ‚‰‰óq‰‰ Yù¤g!„í¤ó1‚‰‰ Ãg‰‰ Yù¤{ ‚‰‰ç‘‰‰ Yù¤g!„ai¤ó±‚‰‰ K]‰‰ Yù¤{‚‰‰‹ïw‰‰ Yù¤í„ñC¤g!„Ó¤yŸ„Õm¤ó±„é7¤ai„äó1 Ÿ‰í¤ O„?ǤŸ„ݤ Á„+¤‡ã„-¤™_„™¤q‚‰‰ ?g‰‰ Ûݤ…õ‚‰‰Ëù‰‰ Ûݤ Á„Ÿ¤™_‚‰‰ 7‰‰ Ûݤ…u‚‰‰Ãƒ‰‰ Ûݤ O„o¤™_‚‰‰Á‡‰‰ Ûݤ…u‚‰‰[™‰‰ Ûݤ Á„酪т‰‰[‹‰‰ Ûݤ™_„ˤ™_‚‰‰ƒkC‰‰ Ûݤ…u‚‰‰çE‰‰ Ûݤ Á‚‰‰…³Å‰‰ Ûݤ‘…÷ã‰í¤oý‚‰‰ûw‰‰ [±¤ó±‚‰‰ƒg»‰‰ [±¤{ ‚‰‰ƒ›Õ‰‰ [±¤g!„é·¤g!‚‰‰‰‰‰ [±¤g¡„퓤󱂉‰[}‰‰ [±¤{ ‚‰‰óq‰‰ [±¤g!„í¤ó1‚‰‰ Ãg‰‰ [±¤{ ‚‰‰ç‘‰‰ [±¤g!„ai¤ó±‚‰‰ K]‰‰ [±¤{‚‰‰‹á剉 [±¤ï„ýù¤g!„Ó¤yŸ„Õm¤ó±„륤ai„Á»¤ó1 Ÿ‰í¤ O„?ǤŸ„ݤ Á„+¤‡ã„-¤™_„™¤q‚‰‰ ?g‰‰ ݕ¤…õ‚‰‰Ëù‰‰ ݕ¤ Á„Ÿ¤™_‚‰‰ 7‰‰ ݕ¤…u‚‰‰Ãƒ‰‰ ݕ¤ O„o¤™_‚‰‰Á‡‰‰ ݕ¤…u‚‰‰[™‰‰ ݕ¤ Á„酪т‰‰[‹‰‰ ݕ¤™_„ˤ™_‚‰‰ƒkC‰‰ ݕ¤…u‚‰‰çE‰‰ ݕ¤ Á‚‰‰…³Å‰‰ ݕ¤‘…÷ã‰í¤cG‚‰‰ûw‰‰ ]‰¤ó±‚‰‰ƒg»‰‰ ]‰¤{ ‚‰‰ƒ›Õ‰‰ ]‰¤g!„é·¤g!‚‰‰‰‰‰ ]‰¤g¡„퓤󱂉‰[}‰‰ ]‰¤{ ‚‰‰óq‰‰ ]‰¤g!„í¤ó1‚‰‰ Ãg‰‰ ]‰¤{ ‚‰‰ç‘‰‰ ]‰¤g!„ai¤ó±‚‰‰ K]‰‰ ]‰¤{‚‰‰‹ãӉ‰ ]‰¤ï„{ ¤g!„Ó¤yŸ„Õm¤ó±„í¤ai„ÏM¤ó1 Ÿ‰í¤ O„13¤Ÿ„o¤ Á„+¤‡ã„-¤™_„…u¤󂉉 ?g‰‰ ]í¤…õ‚‰‰Ëù‰‰ ]í¤ Á„Ÿ¤™_‚‰‰ 7‰‰ ]í¤…u‚‰‰Ãƒ‰‰ ]í¤ O„o¤™_‚‰‰Á‡‰‰ ]í¤…u‚‰‰[™‰‰ ]í¤ Á„酪т‰‰[‹‰‰ ]í¤™_„ˤ™_‚‰‰ƒkC‰‰ ]í¤…u‚‰‰çE‰‰ ]í¤ Á‚‰‰…·/‰‰ ]í¤9‰Ñ‰í¤e3‚‰‰ûw‰‰ ßÁ¤ñC‚‰‰ƒy‰‰ ßÁ¤yŸ‚‰‰ƒ›Õ‰‰ ßÁ¤g!„é·¤g!‚‰‰‰‰‰ ßÁ¤g¡„퓤ñC‚‰‰]뉉 ßÁ¤yŸ‚‰‰óq‰‰ ßÁ¤g!„í¤ñC‚‰‰ ÅՉ‰ ßÁ¤yŸ‚‰‰ç‘‰‰ ßÁ¤g!„ai¤ñC‚‰‰ MK‰‰ ßÁ¤yŸ‚‰‰‹ç½‰‰ ßÁ¤í„yŸ¤g!„Ó¤yŸ„Õm¤ó±„ai„ÍѤó1 Ÿ‰í¤ O„3¡¤Ÿ„ó¤ Á„+¤‡ã„-¤™_„‡ã¤o‚‰‰ ?g‰‰ _¥¤‡ã‚‰‰I ‰‰ _¥¤=„Ÿ¤™_‚‰‰ 7‰‰ _¥¤‡ã‚‰‰Á•‰‰ _¥¤=„o¤™_‚‰‰Á‡‰‰ _¥¤‡ã‚‰‰Y+‰‰ _¥¤=„酪т‰‰[‹‰‰ _¥¤™_„ˤ™_‚‰‰ƒkC‰‰ _¥¤‡ã‚‰‰å鉉 _¥¤=‚‰‰…Ë ‰‰ _¥¤›M÷ã‰í¤yŸ‚‰‰y›‰‰ љ¤ó±‚‰‰ƒy‰‰ љ¤yŸ‚‰‰ƒ›Õ‰‰ љ¤g!„é·¤g!‚‰‰‰‰‰ љ¤g¡„ë%¤ó±‚‰‰]뉉 љ¤yŸ‚‰‰óq‰‰ љ¤g!„륤󱂉‰ ÅՉ‰ љ¤yŸ‚‰‰ç‘‰‰ љ¤g!„oý¤ó±‚‰‰ MK‰‰ љ¤yŸ‚‰‰‹ù)‰‰ љ¤ï„e3¤g!„Ó¤yŸ„Õm¤ó±„oý¤ai„Ëã¤ó1 Ÿ‰í¤ O„5Ÿ¤Ÿ„‘…¤ Á„+¤‡ã„-¤™_„›M¤q‚‰‰ ?g‰‰ Ñý¤‡ã‚‰‰I ‰‰ Ñý¤ Á„‘…¤™_‚‰‰ 7‰‰ Ñý¤‡ã‚‰‰Á•‰‰ Ñý¤ Á„ݤ™_‚‰‰Á‡‰‰ Ñý¤‡ã‚‰‰Y+‰‰ Ñý¤ Á„[¤™Ñ‚‰‰[‹‰‰ Ñý¤™_„ˤ™_‚‰‰ƒkC‰‰ Ñý¤‡ã‚‰‰å鉉 Ñý¤ Á‚‰‰…MՉ‰ Ñý¤‡ã÷ã‰í¤yŸ‚‰‰y›‰‰ QѤ󱂉‰ƒy‰‰ QѤyŸ‚‰‰ƒ›Õ‰‰ QѤg!„é·¤g!‚‰‰‰‰‰ QѤg¡„ë%¤ó±‚‰‰]뉉 QѤyŸ‚‰‰óq‰‰ QѤg!„륤󱂉‰ ÅՉ‰ QѤyŸ‚‰‰ç‘‰‰ QѤg!„oý¤ó±‚‰‰ MK‰‰ QѤyŸ‚‰‰‹ù)‰‰ QѤï„e3¤g!„Ó¤yŸ„Õm¤ó±„oý¤ai„Ëã¤ó1 Ÿ‰í¤ O„7 ¤Ÿ„Ÿ¤ Á„+¤‡ã„-¤™_„»¤󂉉 ?g‰‰ Óµ¤‡ã‚‰‰I ‰‰ Óµ¤ Á„‘…¤™_‚‰‰ 7‰‰ Óµ¤‡ã‚‰‰Á•‰‰ Óµ¤ Á„ݤ™_‚‰‰Á‡‰‰ Óµ¤‡ã‚‰‰Y+‰‰ Óµ¤ Á„[¤™Ñ‚‰‰[‹‰‰ Óµ¤™_„ˤ™_‚‰‰ƒkC‰‰ Óµ¤‡ã‚‰‰å鉉 Óµ¤ Á‚‰‰…OA‰‰ Óµ¤‡ã÷ã‰í¤yŸ‚‰‰y›‰‰ S©¤ó±‚‰‰ƒy‰‰ S©¤yŸ‚‰‰ƒ›Õ‰‰ S©¤g!„é·¤g!‚‰‰‰‰‰ S©¤g¡„ë%¤ó±‚‰‰]뉉 S©¤yŸ‚‰‰óq‰‰ S©¤g!„륤󱂉‰ ÅՉ‰ S©¤yŸ‚‰‰ç‘‰‰ S©¤g!„oý¤ó±‚‰‰ MK‰‰ S©¤yŸ‚‰‰‹û‰‰ S©¤ï„cǤg!„Ó¤yŸ„Õm¤ó±„ai¤ai„Éu¤ó1 Ÿ‰í¤ O‚‰‰ ¥s‰‰ Ս¤Ÿ„»¤ Á„+¤‡ã„-¤™_„‘¤q‚‰‰ ?g‰‰ Ս¤™_‚‰‰I ‰‰ Ս¤ Á„‘…¤™_‚‰‰ 7‰‰ Ս¤™_‚‰‰Á•‰‰ Ս¤ Á„ݤ™_‚‰‰Á‡‰‰ Ս¤™Ñ‚‰‰Y+‰‰ Ս¤ Á„[¤™Ñ‚‰‰[‹‰‰ Ս¤™_„ˤ™_‚‰‰ƒkC‰‰ Ս¤™Ñ‚‰‰å鉉 Ս¤ Á‚‰‰…A¿‰‰ Ս¤…u÷ã‰í¤{ ‚‰‰y›‰‰ Õá¤ó±‚‰‰ƒ{‰‰ Õá¤g¡‚‰‰ƒ›Õ‰‰ Õá¤g!„é·¤g!‚‰‰‰‰‰ Õá¤g¡„ë%¤ó±‚‰‰_ى‰ Õá¤g¡‚‰‰óq‰‰ Õá¤g!„륤󱂉‰ ÇA‰‰ Õá¤g!‚‰‰ç‘‰‰ Õá¤g!„oý¤ó±‚‰‰ O¹‰‰ Õá¤g!‚‰‰‹}a‰‰ Õá¤ï„o{¤g!„Ó¤yŸ„Õm¤ó±„cǤai‚‰‰‰ã}‰‰ Õá¤ó1 Ÿ‰í¤ O‚‰‰ ¥s‰‰ UŤŸ„»¤ Á„+¤‡ã„-¤™_„ó¤q‚‰‰ 1Չ‰ UŤ‡ã‚‰‰I ‰‰ UŤ S„q¤™_‚‰‰ É}‰‰ UŤ‡ã‚‰‰Á•‰‰ UŤ S„I¤™_‚‰‰Aõ‰‰ UŤ‡ã‚‰‰Y+‰‰ UŤ ӄI¤™Ñ‚‰‰[‹‰‰ UŤ™_„ˤ™_‚‰‰ƒm1‰‰ UŤ‡ã‚‰‰å鉉 UŤ ӂ‰‰…C+‰‰ UŤ™÷ã‰í¤ýù‚‰‰g­‰‰ ×¹¤õ¯‚‰‰ƒ{‰‰ ×¹¤yŸ‚‰‰ƒ›Õ‰‰ ×¹¤g!„é·¤g!‚‰‰‰‰‰ ×¹¤g¡„é7¤õ¯‚‰‰_ى‰ ×¹¤yŸ‚‰‰óq‰‰ ×¹¤g!„é7¤õ-‚‰‰ ÇA‰‰ ×¹¤yŸ‚‰‰ç‘‰‰ ×¹¤g!„ï¤õ-‚‰‰ O¹‰‰ ×¹¤yŸ‚‰‰‹߉‰ ×¹¤ï„g!„Ó¤yŸ„Õm¤ó±„cǤai‚‰‰‰ã}‰‰ ×¹¤ó1 Ÿ‰í¤ O‚‰‰ ¥s‰‰ W¤Ÿ„»¤ Á„+¤‡ã„-¤™_„ó¤q‚‰‰ 1Չ‰ W¤‡ã‚‰‰I ‰‰ W¤ S„q¤™_‚‰‰ É}‰‰ W¤‡ã‚‰‰Á•‰‰ W¤ S„I¤™_‚‰‰Aõ‰‰ W¤‡ã‚‰‰Y+‰‰ W¤ ӄI¤™Ñ‚‰‰[‹‰‰ W¤™_„ˤ™_‚‰‰ƒm1‰‰ W¤‡ã‚‰‰å鉉 W¤ ӂ‰‰…C+‰‰ W¤™÷ã‰í¤ÿׂ‰‰g­‰‰ Wñ¤ó1‚‰‰ƒýó‰‰ Wñ¤g!‚‰‰ƒ›Õ‰‰ Wñ¤g!„é·¤g!‚‰‰‰‰‰ Wñ¤g¡„é7¤ó1‚‰‰Q5‰‰ Wñ¤g!‚‰‰óq‰‰ Wñ¤g!„é7¤ó±‚‰‰ Ù¿‰‰ Wñ¤g!‚‰‰ç‘‰‰ Wñ¤g!„ï¤ó±‚‰‰ A‰‰ Wñ¤g!„É÷¤ï„í¤g!„Ó¤yŸ„Õm¤ó±„e3¤ai‚‰‰‰ã}‰‰ Wñ¤ó1 Ÿ‰í¤ O‚‰‰ §á‰‰ éÕ¤Ÿ„›M¤ Á„+¤‡ã„-¤™_„o¤ó„7‹¤™_‚‰‰7Ÿ‰‰ éÕ¤ Á„q¤™_‚‰‰ É}‰‰ éÕ¤™_‚‰‰Ï'‰‰ éÕ¤ Á„I¤™_‚‰‰Aõ‰‰ éÕ¤™_‚‰‰G¿‰‰ éÕ¤ O„I¤™Ñ‚‰‰[‹‰‰ éÕ¤™_„ˤ™_‚‰‰ƒm1‰‰ éÕ¤™_‚‰‰ã{‰‰ éÕ¤ O‚‰‰…E‰‰ éÕ¤«‰_‰í¤ýy‚‰‰g­‰‰ iɤó1‚‰‰ƒýó‰‰ iɤg!‚‰‰ƒ›Õ‰‰ iɤg!„é·¤g!‚‰‰‰‰‰ iɤg¡„é7¤ó1‚‰‰Q5‰‰ iɤg!‚‰‰óq‰‰ iɤg!„é7¤ó±‚‰‰ Ù¿‰‰ iɤg!‚‰‰ç‘‰‰ iɤg!„ï¤ó±‚‰‰ A‰‰ iɤg!„ÍѤï„é·¤g!„Ó¤yŸ„Õm¤ó±„g!¤ai‚‰‰‰ã}‰‰ iɤó1 Ÿ‰í¤ O‚‰‰ ¹ß‰‰ ë­¤Ÿ„™_¤ Á„+¤‡ã„-¤™_„ˤq„3¡¤™_‚‰‰7Ÿ‰‰ ë­¤ Á„q¤™_‚‰‰ É}‰‰ ë­¤™_‚‰‰Ï'‰‰ ë­¤ Á„I¤™_‚‰‰Aõ‰‰ ë­¤™_‚‰‰G¿‰‰ ë­¤ O„I¤™Ñ‚‰‰[‹‰‰ ë­¤™_„ˤ™_‚‰‰ƒm1‰‰ ë­¤™_‚‰‰ã{‰‰ ë­¤ O‚‰‰…E‰‰ ë­¤÷ã‰í¤ýy‚‰‰g­‰‰ k¤ó1‚‰‰ƒýó‰‰ k¤g!‚‰‰ƒ›Õ‰‰ k¤g!„é·¤g!‚‰‰‰‰‰ k¤g¡„é7¤ó1‚‰‰Q5‰‰ k¤g!‚‰‰óq‰‰ k¤g!„é7¤ó±‚‰‰ Ù¿‰‰ k¤g!‚‰‰ç‘‰‰ k¤g!„ï¤ó±‚‰‰ A‰‰ k¤g!„ÍѤï„é·¤g!„Ó¤yŸ„Õm¤ó±„g!¤ai‚‰‰‰ã}‰‰ k¤ó1 Ÿ‰í¤ O‚‰‰ »K‰‰ k够„‡ã¤ Á„+¤‡ã„-¤™_„)'¤q„13¤›Í‚‰‰7Ÿ‰‰ kå¤ Ó„o¤™_‚‰‰ É}‰‰ k夛M‚‰‰Ï'‰‰ kå¤ S„)§¤™_‚‰‰Aõ‰‰ k夛͂‰‰G¿‰‰ kå¤ Ó„)§¤™Ñ‚‰‰[‹‰‰ k夙_„ˤ™_‚‰‰ƒm1‰‰ k夛͂‰‰ã{‰‰ kå¤ Ó‚‰‰…Ç÷‰‰ k夫÷ã‰í¤ÿׂ‰‰e?‰‰ íÙ¤õ¯‚‰‰ƒÿo‰‰ íÙ¤eµ‚‰‰ƒ›Õ‰‰ íÙ¤g!„é·¤g!‚‰‰‰‰‰ íÙ¤g¡„×Û¤õ¯‚‰‰S£‰‰ íÙ¤eµ‚‰‰óq‰‰ íÙ¤g!„×Û¤õ-‚‰‰ Û­‰‰ íÙ¤e3‚‰‰ç‘‰‰ íÙ¤g!„í¤õ¯‚‰‰ Cƒ‰‰ íÙ¤eµ„ÏM¤ï„×Y¤g!„Ó¤yŸ„Õm¤ó±„yŸ¤ai‚‰‰‰ã}‰‰ íÙ¤ó1 Ÿ‰í¤ O‚‰‰ ½¹‰‰ m½¤Ÿ„…u¤ Á„+¤‡ã„-¤™_„+•¤ó„1³¤™_‚‰‰5!‰‰ m½¤ O„o¤™_‚‰‰ Ë뉉 m½¤™Ñ‚‰‰Íˉ‰ m½¤ Á„)§¤™_‚‰‰Ca‰‰ m½¤™_‚‰‰EÁ‰‰ m½¤ Á„)§¤™Ñ‚‰‰[‹‰‰ m½¤™_„ˤ™_‚‰‰ƒo¯‰‰ m½¤™_‚‰‰a ‰‰ m½¤ Á‚‰‰…Ç÷‰‰ m½¤«÷ã‰í¤ÿׂ‰‰e?‰‰ 󱂉‰ƒÿo‰‰ g!‚‰‰ƒ›Õ‰‰ g!„é·¤g!‚‰‰‰‰‰ g¡„×ۤ󱂉‰S£‰‰ g!‚‰‰óq‰‰ g!„×ۤ󱂉‰ Û­‰‰ g¡‚‰‰ç‘‰‰ g!„í¤ó1‚‰‰ Cƒ‰‰ g!„ÏϤÕí¤g!„Ó¤yŸ„Õm¤ó±„{ ¤ai‚‰‰‰ã}‰‰ ó1 Ÿ‰í¤ O‚‰‰ ¿‰‰ ïõ¤Ÿ„™¤ Á„+¤‡ã„-¤™_„¯¤q„=餛M‚‰‰5!‰‰ ïõ¤ ӄݤ™_‚‰‰ Ë뉉 ïõ¤›M‚‰‰Íˉ‰ ïõ¤ ӄ+•¤™_‚‰‰Ca‰‰ ïõ¤›Í‚‰‰EÁ‰‰ ïõ¤ S„+¤™Ñ‚‰‰[‹‰‰ ïõ¤™_„ˤ™_‚‰‰ƒo¯‰‰ ïõ¤›Í‚‰‰a ‰‰ ïõ¤ S‚‰‰…Ç÷‰‰ ïõ¤«÷ã‰í¤ÿׂ‰‰cɉ oé¤õ-‚‰‰ƒñ݉‰ oé¤eµ‚‰‰ƒ›Õ‰‰ oé¤g!„é·¤g!‚‰‰‰‰‰ oé¤g¡„Õm¤õ-‚‰‰U‰‰ oé¤eµ‚‰‰óq‰‰ oé¤g!„Õí¤õ¯‚‰‰ ݉‰ oé¤e3‚‰‰ç‘‰‰ oé¤g!„륤õ¯‚‰‰ Åñ‰‰ oé¤e3„é¤ï„Q¤g!„Ó¤yŸ„Õm¤ó±„ýù¤ai‚‰‰‰ã}‰‰ oé¤ó1 Ÿ‰í¤ O‚‰‰ ¿‰‰ áͤŸ„™¤ Á„+¤‡ã„-¤™_„¯¤q„=餛M‚‰‰5!‰‰ áͤ ӄݤ™_‚‰‰ Ë뉉 áͤ›M‚‰‰Íˉ‰ áͤ ӄ+•¤™_‚‰‰Ca‰‰ áͤ›Í‚‰‰EÁ‰‰ áͤ S„+¤™Ñ‚‰‰[‹‰‰ áͤ™_„ˤ™_‚‰‰ƒo¯‰‰ áͤ›Í‚‰‰a ‰‰ áͤ S‚‰‰…Ç÷‰‰ áͤ«÷ã‰í¤ñC‚‰‰cɉ a¡¤ó±‚‰‰ƒóˉ‰ a¡¤cǂ‰‰ƒ›Õ‰‰ a¡¤g!„é·¤g!‚‰‰‰‰‰ a¡¤g¡„Õm¤ó±‚‰‰W ‰‰ a¡¤cǂ‰‰óq‰‰ a¡¤g!„Õí¤ó1‚‰‰ ]÷‰‰ a¡¤cǂ‰‰ç‘‰‰ a¡¤g!„륤󱂉‰ Çm‰‰ a¡¤cDŽÅ¤ï„_•¤g!„Ó¤yŸ„Õm¤ó±„ÿפai‚‰‰‰ã}‰‰ a¡¤ó1 Ÿ‰í¤ O‚‰‰ ±‰‰ ㅤŸ„«¤ Á„+¤‡ã„-¤™_„¡í¤q„;{¤»‚‰‰33‰‰ ㅤ Á„ݤ™_‚‰‰ Ë뉉 ㅤ»‚‰‰Ë]‰‰ ㅤ O„+•¤™_‚‰‰Ca‰‰ ㅤ»‚‰‰CS‰‰ ㅤ Á„+¤™Ñ‚‰‰[‹‰‰ ㅤ™_„ˤ™_‚‰‰ƒo¯‰‰ ㅤ»‚‰‰o‘‰‰ ㅤ Á‚‰‰…Ùc‰‰ ㅤ=÷ã‰í¤ñC‚‰‰aU‰‰ ãù¤õ-‚‰‰ƒóˉ‰ ãù¤e3‚‰‰ƒ›Õ‰‰ ãù¤g!„é·¤g!‚‰‰‰‰‰ ãù¤g¡„Óñ¤õ-‚‰‰W ‰‰ ãù¤e3‚‰‰óq‰‰ ãù¤g!„Ó¤õ¯‚‰‰ ]÷‰‰ ãù¤eµ‚‰‰ç‘‰‰ ãù¤g!„é7¤õ-‚‰‰ Çm‰‰ ãù¤eµ„Ť]'¤g!„Ó¤yŸ„Õm¤ó±„ñC¤ai‚‰‰‰ã}‰‰ ãù¤ó1 Ÿ‰í¤ O‚‰‰ 1ñ‰‰ cݤŸ„=¤ Á„+¤‡ã„-¤™_„£Y¤ó„;{¤›Í‚‰‰33‰‰ cݤ S„I¤™_‚‰‰ Íى‰ cݤ›Í‚‰‰Ë]‰‰ cݤ ӄ-¤™_‚‰‰E߉‰ cݤ›M‚‰‰CS‰‰ cݤ S„-¤™Ñ‚‰‰[‹‰‰ cݤ™_„ˤ™_‚‰‰ƒa‰‰ cݤ›M‚‰‰o‘‰‰ cݤ S‚‰‰…Ùc‰‰ cݤ=÷ã‰í¤ñC‚‰‰… ߉‰ 層÷„õ¯‚‰‰ƒõ'‰‰ 層cǂ‰‰ƒ›Õ‰‰ 層g!„é·¤g!‚‰‰ e‰‰ 層e3„Qƒ¤õ¯‚‰‰éû‰‰ 層cǂ‰‰óq‰‰ 層g!„Qƒ¤õ-‚‰‰ _c‰‰ 層cG‚‰‰ç‘‰‰ 層g!„×Û¤õ-‚‰‰ Ùۉ‰ 層cG„Go¤ï„Yݤg!„Ó¤yŸ„Õm¤ó±„ó±¤ai‚‰‰‰ã}‰‰ 層ó1 Ÿ‰í¤ O‚‰‰ 3m‰‰ e•¤Ÿ„ Á¤ Á„+¤‡ã„-¤™_„§¥¤q„¹¤9‚‰‰1lj‰ e•¤ S„)§¤™_‚‰‰ Íى‰ e•¤9‚‰‰Éo‰‰ e•¤ S„¯ÿ¤™_‚‰‰E߉‰ e•¤»‚‰‰A牉 e•¤ ӄ¯ÿ¤›M‚‰‰[‹‰‰ e•¤™_„ˤ™_‚‰‰ƒa‰‰ e•¤»‚‰‰m#‰‰ e•¤ ӄó¤ e‚‰‰…Ùc‰‰ e•¤=÷ã‰í¤ñC‚‰‰… ߉‰ 牤÷„õ¯‚‰‰ƒõ'‰‰ 牤cǂ‰‰ƒ›Õ‰‰ 牤g!„é·¤g!‚‰‰ e‰‰ 牤e3„Qƒ¤õ¯‚‰‰éû‰‰ 牤cǂ‰‰óq‰‰ 牤g!„Qƒ¤õ-‚‰‰ _c‰‰ 牤cG‚‰‰ç‘‰‰ 牤g!„×Û¤õ-‚‰‰ Ùۉ‰ 牤cG„Go¤ï„Yݤg!„Ó¤yŸ„Õm¤ó±„ó±¤ai‚‰‰‰ã}‰‰ 牤ó1 Ÿ‰í¤ O‚‰‰ 5ۉ‰ çí¤Ÿ„ S¤ Á„+¤‡ã„-¤™_„¹¤q„¹¤9‚‰‰?i‰‰ çí¤ Á„)§¤™_‚‰‰ Ï5‰‰ çí¤»‚‰‰·ó‰‰ çí¤ Á„¯ÿ¤™_‚‰‰GK‰‰ çí¤»‚‰‰Á ‰‰ çí¤ O„¡í¤™_‚‰‰[‹‰‰ çí¤™_„ˤ™_‚‰‰ƒc‰‰‰ çí¤»‚‰‰k·‰‰ çí¤ O„ó¤ e‚‰‰…Ùc‰‰ çí¤ Á÷ã‰í¤ó±‚‰‰… ߉‰ gÁ¤÷„ó1‚‰‰ƒ÷•‰‰ gÁ¤cǂ‰‰ƒ›Õ‰‰ gÁ¤g!„é·¤g!‚‰‰ e‰‰ gÁ¤g!„_•¤ó1‚‰‰ëW‰‰ gÁ¤cǂ‰‰óq‰‰ gÁ¤g!„Qƒ¤ó±‚‰‰ QQ‰‰ gÁ¤cǂ‰‰ç‘‰‰ gÁ¤g!„×ۤ󱂉‰ Û7‰‰ gÁ¤cG„Go¤ï„Go¤g!„Ó¤yŸ„Õm¤ó±„õ-¤ai‚‰‰‰ã}‰‰ gÁ¤ó1 Ÿ‰í¤ O‚‰‰ 7ɉ‰ ù¥¤Ÿ„ e¤ Á„+¤‡ã„-¤™_„»¤o„¥·¤Ÿ‚‰‰?i‰‰ ù¥¤ ӄ+•¤™_‚‰‰ Ï5‰‰ ù¥¤Ÿ‚‰‰·ó‰‰ ù¥¤ S„¡k¤™_‚‰‰GK‰‰ ù¥¤Ÿ‚‰‰Á ‰‰ ù¥¤ ӄ£Y¤™_‚‰‰[‹‰‰ ù¥¤™_„ˤ™_‚‰‰ƒc‰‰‰ ù¥¤Ÿ‚‰‰k·‰‰ ù¥¤ ӄo¤ e‚‰‰…Ùc‰‰ ù¥¤ Á÷ã‰í¤ó±‚‰‰… ߉‰ y™¤÷„í¤õ¯‚‰‰‰‰‰ y™¤ai‚‰‰ƒ›Õ‰‰ y™¤g!„é·¤g!‚‰‰ e‰‰ y™¤g!„]'¤õ¯‚‰‰íʼn‰ y™¤ai‚‰‰óq‰‰ y™¤g!„_¤õ-‚‰‰ Sω‰ y™¤ai‚‰‰ç‘‰‰ y™¤g!„Õí¤õ¯‚‰‰ Ý%‰‰ y™¤ai„[ˤí„Eó¤g!„Ó¤yŸ„Õm¤ó±„÷¤ai‚‰‰‰ã}‰‰ y™¤ó1 Ÿ‰í¤ O‚‰‰ 7ɉ‰ yý¤Ÿ„ e¤ Á„+¤‡ã„-¤™_„»¤o„¥·¤Ÿ‚‰‰?i‰‰ yý¤ ӄ+•¤™_‚‰‰ Ï5‰‰ yý¤Ÿ‚‰‰·ó‰‰ yý¤ S„¡k¤™_‚‰‰GK‰‰ yý¤Ÿ‚‰‰Á ‰‰ yý¤ ӄ£Y¤™_‚‰‰[‹‰‰ yý¤™_„ˤ™_‚‰‰ƒc‰‰‰ yý¤Ÿ‚‰‰k·‰‰ yý¤ ӄo¤ e‚‰‰…Ùc‰‰ yý¤ Á÷ã‰í¤ñC‚‰‰… ߉‰ ûѤ÷„륤õ-‚‰‰ ‰‰ ûѤai„e3¤÷‚‰‰ƒ›Õ‰‰ ûѤg!„é·¤g!‚‰‰Ӊ‰ ûѤeµ„[ˤõ-‚‰‰ﳉ‰ ûѤai‚‰‰óq‰‰ ûѤg!„]'¤õ¯‚‰‰ U;‰‰ ûѤai„eµ¤÷›‚‰‰ç‘‰‰ ûѤg!„Ó¤õ¯‚‰‰ ߓ‰‰ ûѤai„[ˤï„äg!„Ó¤yŸ„Õm¤í‚‰‰‰ã}‰‰ ûѤó1 Ÿ‰í¤ O‚‰‰ I%‰‰ {µ¤o„+¤‡ã„-¤™_„=i¤q„¥·¤Ÿ‚‰‰=ý‰‰ {µ¤ ӄ-¤™_‚‰‰ ;ñ‰‰ {µ¤ 焛ͤŸ‚‰‰5‰‰ {µ¤ ӄ£Y¤™_‚‰‰Y9‰‰ {µ¤Ÿ‚‰‰Ï‰‰ {µ¤ S„¥·¤›Í‚‰‰[‹‰‰ {µ¤™_„ˤ™_‚‰‰ƒÑµ‰‰ {µ¤ e„›M¤Ÿ‚‰‰iى‰ {µ¤ S„ݤ e‚‰‰…Ç÷‰‰ {µ¤=÷ã‰í¤ñC‚‰‰… ߉‰ ý©¤÷„륤󱂉‰ 퉉 ý©¤ai„cǤ÷‚‰‰ƒ›Õ‰‰ ý©¤g!„é·¤g!‚‰‰Ӊ‰ ý©¤eµ„[ˤ󱂉‰á/‰‰ ý©¤ai‚‰‰óq‰‰ ý©¤g!„]'¤ó1‚‰‰ W©‰‰ ý©¤ai„cG¤÷›‚‰‰ç‘‰‰ ý©¤g!„Ӥ󱂉‰ щ‰ ý©¤ai„[ˤï„Á»¤g!„Ó¤yŸ„Õm¤ï‚‰‰‰ã}‰‰ ý©¤ó1 Ÿ‰í¤ O‚‰‰ K“‰‰ }¤ó„+¤‡ã„-¤™_„?Ǥq„¥·¤Ÿ‚‰‰½‰‰ }¤ Á„-¤™_‚‰‰ ;ñ‰‰ }¤ 焝9¤Ÿ‚‰‰3©‰‰ }¤ O„£Y¤™_‚‰‰[—‰‰ }¤Ÿ‚‰‰Í¯‰‰ }¤ Á„¥·¤›Í‚‰‰[‹‰‰ }¤™_„ˤ™_‚‰‰ƒÑµ‰‰ }¤ e„»¤Ÿ‚‰‰Wk‰‰ }¤ Á„ݤ e‚‰‰…Ç÷‰‰ }¤=õu‰í¤ó±‚‰‰… K‰‰ }á¤õ¯„é7¤ó±‚‰‰ Y‰‰ }á¤oý„cǤ÷‚‰‰ƒ›Õ‰‰ }á¤g!„é·¤g!‚‰‰Ӊ‰ }á¤g!„Gá¤ó±‚‰‰㝉‰ }á¤oý‚‰‰óq‰‰ }á¤g!„[ˤ󱂉‰ i‡‰‰ }á¤o{„cG¤÷›‚‰‰ç‘‰‰ }á¤g!„Qƒ¤ó±‚‰‰ Qý‰‰ }á¤oý„_¤í„ÏM¤g!„Ó¤yŸ„Õm¤o{‚‰‰‰ã}‰‰ }á¤ó1 Ÿ‰í¤ O‚‰‰ M‰‰ ÿŤ‘„+¤‡ã„-¤™_„13¤o„¡k¤‘…‚‰‰»‰‰ ÿŤ Á„¯ÿ¤™_‚‰‰ ;ñ‰‰ ÿŤ 焝9¤‘‚‰‰1»‰‰ ÿŤ Á„¥·¤™_‚‰‰[—‰‰ ÿŤ‘…‚‰‰Ë1‰‰ ÿŤ Á„¹‘¤™_‚‰‰[‹‰‰ ÿŤ™_„ˤ™_‚‰‰ƒÑµ‰‰ ÿŤ e„»¤‘…‚‰‰Uÿ‰‰ ÿŤ Á„I¤ ӂ‰‰…Ç÷‰‰ ÿŤ Á÷ã‰í¤ó±‚‰‰… K‰‰ ¹¤õ¯„é7¤ó±‚‰‰ Y‰‰ ¹¤oý„cǤ÷‚‰‰ƒ›Õ‰‰ ¹¤g!„é·¤g!‚‰‰Ӊ‰ ¹¤g!„Gá¤ó±‚‰‰㝉‰ ¹¤oý‚‰‰óq‰‰ ¹¤g!„[ˤ󱂉‰ i‡‰‰ ¹¤o{„cG¤÷›‚‰‰ç‘‰‰ ¹¤g!„Qƒ¤ó±‚‰‰ Qý‰‰ ¹¤oý„_¤í„ÏM¤g!„Ó¤yŸ„Õm¤o{‚‰‰‰ã}‰‰ ¹¤ó1 Ÿ‰í¤ O‚‰‰ Ï}‰‰ ñ¤Ÿ„+¤‡ã„-¤™_„5Ÿ¤q„¡k¤‘‚‰‰¹¥‰‰ ñ¤ Á„¡k¤™_‚‰‰ ;ñ‰‰ ñ¤ 焟¤‘…‚‰‰?M‰‰ ñ¤ Á„§#¤™_‚‰‰]‰‰ ñ¤‘…‚‰‰Éʼn‰ ñ¤ O„»¤›Í‚‰‰Y‰‰ ñ¤›Í„ˤ™_‚‰‰ƒÑµ‰‰ ñ¤ e„Ÿ¤‘…‚‰‰Õ‰‰ ñ¤ O„)'¤ ӂ‰‰…E‰‰ ñ¤=÷ã‰í¤ñC‚‰‰… K‰‰ ññ¤õ¯„×Y¤ó1‚‰‰·‰‰ ññ¤oý„ai¤÷‚‰‰ƒ›Õ‰‰ ññ¤g!„é·¤eµ‚‰‰O‰‰ ññ¤eµ„Eó¤ó1‚‰‰å ‰‰ ññ¤oý‚‰‰óq‰‰ ññ¤g!„Y]¤ó±‚‰‰ ës‰‰ ññ¤oý„ai¤÷›‚‰‰ç‘‰‰ ññ¤g!„_¤ó±‚‰‰ S뉉 ññ¤o{„_¤ï„Ëã¤g!„Ó¤yŸ„Õm¤ai‚‰‰‰ã}‰‰ ññ¤ó1 Ÿ‰í¤ O‚‰‰ Á뉉 qÕ¤»„+¤‡ã„-¤™_„7 ¤q„¡í¤‘…‚‰‰§7‰‰ qÕ¤ O„£Y¤™_‚‰‰ ;ñ‰‰ qÕ¤ 焑…¤‘…‚‰‰=щ‰ qÕ¤ Á„¹‘¤™_‚‰‰ßñ‰‰ qÕ¤‘‚‰‰·×‰‰ qÕ¤ Á„;{¤»‚‰‰G¡‰‰ qÕ¤9„ˤ™_‚‰‰ƒÑµ‰‰ qÕ¤ e„‘…¤‘‚‰‰Ó‰‰ qÕ¤ Á„+•¤ ӂ‰‰…E‰‰ qÕ¤ O÷ã‰í¤ó1‚‰‰… K‰‰ óɤõ¯„Õí¤ó±‚‰‰¥‰‰ óɤo{„oý¤÷‚‰‰ƒ›Õ‰‰ óɤg!„é·¤cG‚‰‰=‰‰ óɤcDŽÅ¤ó±‚‰‰eg‰‰ óɤo{‚‰‰óq‰‰ óɤg!„Gá¤ó±‚‰‰ í቉ óɤoý„oý¤÷›‚‰‰ç‘‰‰ óɤg!„]'¤ó1‚‰‰ UG‰‰ óɤoý„_•¤ï„Éu¤g!„Ó¤yŸ„Õm¤cǂ‰‰‰ã}‰‰ óɤó1 Ÿ‰í¤ O‚‰‰ ÃG‰‰ s­¤›M„+¤‡ã„-¤™_‚‰‰ a‰‰ s­¤o„¯ÿ¤‘…‚‰‰¥Y‰‰ s­¤ Á„¥·¤™_‚‰‰ ;ñ‰‰ s­¤ ç„q¤‘…‚‰‰;c‰‰ s­¤ O„»¤™_‚‰‰Ñ s­¤‘…‚‰‰µy‰‰ s­¤ Á„?E¤›M‚‰‰E³‰‰ s­¤ S„ e¤™_„ˤ™_‚‰‰ƒÑµ‰‰ s­¤ e„󤑅‚‰‰ѧ‰‰ s­¤ Á„-¤ Á‚‰‰…C+‰‰ s­¤=÷ã‰í¤ñC‚‰‰…9‰‰ õ¤ó±„Ӥ󱂉‰‰‰ õ¤oý„÷‚‰‰ƒ›Õ‰‰ õ¤g!„é·¤g!„÷¤õ-‚‰‰=‰‰ õ¤e3„Á;¤ó±‚‰‰gՉ‰ õ¤oý‚‰‰óq‰‰ õ¤g!„Eó¤ó1‚‰‰ ï]‰‰ õ¤oý„ï¤÷›‚‰‰ç‘‰‰ õ¤g!„[ˤ󱂉‰ Wµ‰‰ õ¤oý„Qƒ¤í‚‰‰‹Kk‰‰ õ¤g!„Ó¤yŸ„Õm¤e3‚‰‰‰ã}‰‰ õ¤ó1 Ÿ‰í¤ O‚‰‰ ÃG‰‰ õ夛M„+¤‡ã„-¤™_‚‰‰ a‰‰ õå¤o„¯ÿ¤‘…‚‰‰¥Y‰‰ õå¤ Á„¥·¤™_‚‰‰ ;ñ‰‰ õå¤ ç„q¤‘…‚‰‰;c‰‰ õå¤ O„»¤™_‚‰‰Ñ õ夑…‚‰‰µy‰‰ õå¤ Á„?E¤›M‚‰‰E³‰‰ õå¤ S„ e¤™_„ˤ™_‚‰‰ƒÑµ‰‰ õå¤ e„󤑅‚‰‰ѧ‰‰ õå¤ Á„-¤ Á‚‰‰…C+‰‰ õå¤=õu‰í¤ñC„É÷¤ñC„Qƒ¤ó±‚‰‰‰‰ uÙ¤oý„í¤õ¯‚‰‰ƒ›Õ‰‰ uÙ¤g!„é·¤g!„õ¯¤õ-„Ëc¤e3„ÍQ¤ó±‚‰‰yA‰‰ uÙ¤oý‚‰‰óq‰‰ uÙ¤g!„Ť󱂉‰ áK‰‰ uÙ¤o{„퓤õ-‚‰‰ç‘‰‰ uÙ¤g!„Y]¤ó±‚‰‰ i!‰‰ uÙ¤oý„Qƒ¤ï‚‰‰‹Kk‰‰ uÙ¤g!„Ó¤yŸ„Õm¤e3‚‰‰‰ã}‰‰ uÙ¤ó1 Ÿ‰í¤ O‚‰‰ ÃG‰‰ ÷½¤›M„+¤‡ã„-¤™_‚‰‰ ͉‰ ÷½¤q„¯ÿ¤‘…‚‰‰£í‰‰ ÷½¤ Á„§#¤™_‚‰‰ »‰‰ ÷½¤ S„螺‚‰‰9u‰‰ ÷½¤ Á„;{¤™_‚‰‰Ó[‰‰ ÷½¤‘…‚‰‰3‰‰ ÷½¤ Á„3/¤›M„5¤ S„ Ó¤™_„ˤ™_‚‰‰ƒßG‰‰ ÷½¤ ӄo¤‘…‚‰‰ßI‰‰ ÷½¤ Á„¯ÿ¤=„7‹¤=õu‰í¤ó±„É÷¤ñC„_•¤ñC‚‰‰™i‰‰ w‘¤ï„륤õ¯‚‰‰ƒA‰‰ w‘¤eµ„é·¤g!„ó±¤õ¯„ÏM¤cDŽËc¤ñC‚‰‰}­‰‰ w‘¤ï‚‰‰óq‰‰ w‘¤g!„é¤ñC‚‰‰ 剉 w‘¤ï„륤õ-‚‰‰ù‰‰ w‘¤e3„Gá¤ñC‚‰‰ m‰‰ w‘¤ï„Óñ¤í‚‰‰‹Kk‰‰ w‘¤g!„Ó¤g!„×Û¤g¡‚‰‰‰ã}‰‰ w‘¤ó1 Ÿ‰í¤ O‚‰‰ ŵ‰‰ wõ¤™Ñ„)§¤™_„-¤™_‚‰‰ 9‰‰ wõ¤o„-¤󂉉/ƒ‰‰ wõ¤=„¹‘¤›M‚‰‰ »‰‰ wõ¤ S„ݤq‚‰‰§+‰‰ wõ¤=„=餙_‚‰‰Õɉ‰ wõ¤󂉉?!‰‰ wõ¤=„5¤»„13¤ ӄ Á¤™_„ˤ›Í‚‰‰ƒßG‰‰ wõ¤ ӄݤ󂉉Û wõ¤=„¡í¤=„7‹¤ Á÷ã‰í¤ó±„É÷¤ñC„_•¤ñC‚‰‰™i‰‰‰é¤ï„륤õ¯‚‰‰ƒA‰‰‰é¤eµ„é·¤g!„ó±¤õ¯„ÏM¤cDŽËc¤ñC‚‰‰}­‰‰‰é¤ï‚‰‰óq‰‰‰é¤g!„é¤ñC‚‰‰ 剉‰é¤ï„륤õ-‚‰‰ù‰‰‰é¤e3„Gá¤ñC‚‰‰ m‰‰‰é¤ï„Óñ¤í‚‰‰‹Kk‰‰‰é¤g!„Ó¤g!„×Û¤g¡‚‰‰‰ã}‰‰‰é¤ó1 Ÿ‰í¤ O‚‰‰ Ç£‰‰ ͤ‡ã„)§¤™_„-¤™_‚‰‰ —‰‰ ͤo„-¤q„5¤=„»¤›M„7‹¤ Á„I¤q„5Ÿ¤=„?E¤™_‚‰‰×%‰‰ ͤó„5¤=‚‰‰ui‰‰ ͤ9„;{¤ Á„=¤™_„ˤ›Í„7‹¤ Á„I¤ó„5¤=„£Y¤«„13¤«÷ã‰í¤ÿׄÏM¤ÿׄ]'¤ñC„Ëc¤ï„é7¤ó±„É÷¤eµ„é·¤g!„ñC¤ó±„ŤcG‚‰‰3S‰‰‹¡¤ñC„Ëc¤ï‚‰‰óq‰‰‹¡¤g!„Á;¤ñC„Ëã¤ï„é7¤ó±„É÷¤e3„Eó¤ñC„Ëc¤ï„Ӥ킉‰‹Kk‰‰‹¡¤g!„Ó¤g!„×Û¤yŸ‚‰‰‰ã}‰‰‹¡¤ó1w¡‰í¤=‚‰‰ Ù‰‰ …¤…u„)§¤™_„-¤™_‚‰‰ ™q‰‰ …¤q„-¤o„=é¤=„=餝»„5¤ Á„)§¤o„=é¤=„3¡¤™_‚‰‰é‰‰ …¤ï„=i¤=‚‰‰wlj‰ …¤Ÿ„§#¤ Á„«¤™_„ˤ9„5¤ Á„)'¤ï„=i¤=„§#¤…u„;û¤+÷ã‰í¤ÿU„Ҥ{ „Y]¤ñC„ä퓄×Y¤ó±„Ëc¤cG„é·¤g!„ÿפó±„Y]¤ai‚‰‰± ‰‰ ù¤ñC„ä퓂‰‰óq‰‰ ù¤g!„ÍѤñC„é¤í„×Û¤ó±„Ëc¤cDŽé¤ñC„é¤í„Ӥ‰‹Kk‰‰ ù¤g!„Ó¤g!„×Û¤{ ‚‰‰‰ã}‰‰ ù¤ñC Ÿ‰í¤=‚‰‰ ۍ‰‰Ý¤™„)§¤™_„-¤™_‚‰‰ ›ï‰‰Ý¤o„+¤݄§#¤+„?E¤Ÿ„13¤=„+¤݄§¥¤«„5¤™_‚‰‰끉‰Ý¤݄§#¤«‚‰‰ 3‰‰Ý¤Ÿ„¡í¤ O„…u¤™_„ˤŸ„1³¤=„+•¤݄§#¤«„¹¤‡ã„¹‘¤«÷ã‰í¤ÿׄGá¤yŸ„Go¤ÿׄY]¤ë¥„Õí¤ñC„ÏϤai„é·¤g!„{ ¤ó1„_•¤ai‚‰‰¿‰‰ ±¤ÿׄY]¤ë¥‚‰‰óq‰‰ ±¤g!„Ëc¤ÿׄYݤ륄Õm¤ñC„ÏM¤ai„Á;¤ÿU„Y]¤ë¥„Õm¤í‚‰‰‹Kk‰‰ ±¤g!„Ó¤g!„×Û¤ýù‚‰‰‰ã}‰‰ ±¤ñC Ÿ‰í¤=‚‰‰ ۍ‰‰•¤™„)§¤™_„-¤™_‚‰‰ ›ï‰‰•¤o„+¤݄§#¤+„?E¤Ÿ„13¤=„+¤݄§¥¤«„5¤™_‚‰‰끉‰•¤݄§#¤«‚‰‰ 3‰‰•¤Ÿ„¡í¤ O„…u¤™_„ˤŸ„1³¤=„+•¤݄§#¤«„¹¤‡ã„¹‘¤«õu‰í¤ýy„_•¤eµ„Ťýù„Õm¤é·„Qƒ¤ÿׄé¤oý„é·¤g!„yŸ¤ÿׄé7¤o{‚‰‰»1‰‰‰¤ýù„Õm¤é·‚‰‰óq‰‰‰¤g!‚‰‰!»‰‰‰¤ýy„Õí¤é7„Qƒ¤ÿU„é¤oý„ÍѤýy„Õí¤é7„Õí¤ë¥‚‰‰‹Kk‰‰‰¤g!„Ó¤g!„×Û¤ÿU‚‰‰‰ã}‰‰‰¤ñC Ÿ‰í¤=‚‰‰ [y‰‰í¤+„)§¤™_„-¤™_‚‰‰ [‰‰í¤݄+•¤I„+•¤„3¡¤‘…„=é¤+„¯ÿ¤I„+•¤‚‰‰c±‰‰í¤™_‚‰‰m뉉í¤˄+¤™‚‰‰ ¡‰‰í¤‘„I¤«„‡ã¤™_„ˤ‘…„=餫„¯ÿ¤˄+¤™„;{¤›Í„¡í¤s™‰í¤yŸ„ï„Á»¤yŸ„ai¤×ۄ]'¤ýy„Eó¤ï„륤e3„eµ¤ýù„cǤ퓂‰‰§×‰‰Á¤yŸ„ai¤×ۂ‰‰óq‰‰Á¤eµ‚‰‰-щ‰Á¤yŸ„ai¤×Y„]¹¤ýù„Es¤ï„É÷¤yŸ„ai¤×Y„Ӥ퓂‰‰‹Kk‰‰Á¤eµ„×Y¤eµ„×Û¤ñC‚‰‰‰ã}‰‰Á¤ñC Ÿ‰í¤=‚‰‰ ]W‰‰¥¤=„)§¤›Í„)'¤›Í‚‰‰ ‘§‰‰¥¤ï„-¤)'„Ÿ¤‡ã„7‹¤ó„» ¤™„£Ù¤)'„Ÿ¤‡ã‚‰‰aC‰‰¥¤›Í‚‰‰a5‰‰¥¤)§„Ÿ¤‡ã‚‰‰ Ÿ‰‰¥¤»¤™„›Í¤›M„ݤq„»¤„£Y¤)§„Ÿ¤‡ã„?Ǥq„ó¤‡ãõõ‰í¢‰‰… a‰‰ƒ™„ÍѤÉu„Y]¤g!„Qƒ¤×Y„퓤e3„ai¤Å‚‰‰#‰‰ƒ™¤Éu‚‰‰óq‰‰ƒ™¤eµ‚‰‰)u‰‰ƒ™¤É÷„Y]¤g¡„Qƒ¤×ۂ‰‰“퉉ƒ™¤É÷„Ӥ륂‰‰‹MY‰‰ƒ™¤cDŽ×Y¤eµ„×ۤ󱂉‰‰åk‰‰ƒ™¤ÿU Ÿ‰í¤+‚‰‰ _ʼn‰ƒý¤ Á„)§¤›Í„)'¤»‚‰‰ “‰‰ƒý¤݄-¤7‹‚‰‰Õ㉉ƒý¤)§„¯ÿ¤™Ñ„§#¤7‹‚‰‰aC‰‰ƒý¤›Í‚‰‰e‰‰ƒý¤7 ‚‰‰ù‰‰ƒý¤;{„Ÿ¤›M„ï¤)'„¯ÿ¤™_„§#¤7 „3¡¢‰‰…K牉ƒý÷ã‰í¢‰‰… a‰‰фÍѤÉu„Y]¤g!„Qƒ¤×Y„퓤e3„ai¤Å‚‰‰#‰‰ѤÉu‚‰‰óq‰‰Ѥeµ‚‰‰)u‰‰ѤÉ÷„Y]¤g¡„Qƒ¤×ۂ‰‰“퉉ѤÉ÷„Ӥ륂‰‰‹MY‰‰ѤcDŽ×Y¤eµ„×ۤ󱂉‰‰åk‰‰ѤÿUuµ‰í¤‡ã‚‰‰ Q1‰‰…µ¤ S„[¤‘„ݤ‘…‚‰‰ “‰‰…µ¤+•„+¤=邉‰Óu‰‰…µ¢‰‰ =…µ„» ¤=i‚‰‰mù‰‰…µ¤󂉉ùû‰‰…µ¤=邉‰…C‰‰…µ¤¥·„q¤»„‘…¢‰‰ƒÓ!‰‰…µ„»¤=邉‰…‹w‰‰…µ¤3¡÷ã‰í¤Íт‰‰Éq‰‰©¤Ã©„E󢉉÷£‰‰©„oý¤cDŽï¤[˂‰‰-µ‰‰©¤Ã©‚‰‰÷݉‰©¤ï‚‰‰•¿‰‰©¤Ã„Es¢‰‰Qo‰‰©‚‰‰•‰‰©¤Ã©„Õm¤Õ킉‰‹O·‰‰©¤oý„륤o{„ë%¤õ-‚‰‰‰çى‰©¤yŸsljí¤󂉉 S¯‰‰‡¤ 焑…¤-„…õ¤¡í‚‰‰ [‰‰‡¤¹‘„)'¤£Y‚‰‰[Á‰‰‡¢‰‰ =‡„?E¤£Y‚‰‰é!‰‰‡¤¯ÿ‚‰‰ÿ³‰‰‡¤£Ù‚‰‰™-‰‰‡¤+•„I¤o„«¢‰‰ƒÓ!‰‰‡„?Ǥ£Ù‚‰‰…‹w‰‰‡¤§#÷ã‰í¤Y]‚‰‰39‰‰‡á¤]¹„Á»¢‰‰ÿ}‰‰‡á„ÿפí„é7¤Õ킉‰ý‰‰‡á¤]¹‚‰‰‰‰‡á¤Qƒ‚‰‰Ÿç‰‰‡á¤]'„Á;¢‰‰Û»‰‰‡á‚‰‰]‰‰‡á¤]'„×Y¤Gႉ‰‹Ç{‰‰‡á¤_•„{¤Ó„oý¤÷›‚‰‰‰ý‘‰‰‡á¤ï Ÿ‰í¤󂉉 S¯‰‰Ť 焑…¤-„…õ¤¡í‚‰‰ [‰‰Ť¹‘„)'¤£Y‚‰‰[Á‰‰Å¢‰‰ =ń?E¤£Y‚‰‰é!‰‰Ť¯ÿ‚‰‰ÿ³‰‰Ť£Ù‚‰‰™-‰‰Ť+•„I¤o„«¢‰‰ƒÓ!‰‰ń?Ǥ£Ù‚‰‰…‹w‰‰Ť§#‚‰‰¹«‰‰™¹¤ai‚‰‰£ÿ‰‰™¹¤e3‚‰‰m‰‰™¹¤ai‚‰‰׉‰™¹¤ai‚‰‰M‰‰™¹¤ai Ÿ‰í¤Ÿ‚‰‰ ÿ]‰‰¤Ÿ‚‰‰wՉ‰¤Ÿ‚‰‰O‰‰¤›M‚‰‰‰‰¤Ÿ‚‰‰…×7‰‰Ë¹¢‰‰‰Ý߉‰Ë¹ Ÿ‰í¢‰‰…W]‰‰K÷ã‰í¢‰‰‰Ý߉‰Kñ Ÿ‰í¢‰‰…W]‰‰ÍÕ÷ã‰í¢‰‰‰Ý߉‰MÉ Ÿ‰í¢‰‰…W]‰‰Ï­÷ã‰í¢‰‰‰Ý߉‰O Ÿ‰í¢‰‰…W]‰‰Oå÷ã‰í¢‰‰‰Ý߉‰ÁÙ Ÿ‰í¢‰‰…W]‰‰A½÷ã‰í¢‰‰‰Ý߉‰Ã‘ Ÿ‰í¢‰‰…W]‰‰Ãõ÷ã‰í¢‰‰‰Ý߉‰Cé Ÿ‰í¢‰‰…W]‰‰ÅÍ÷ã‰í¢‰‰‰Ý߉‰E¡ Ÿ‰í¢‰‰…W]‰‰Ç…÷ã‰í¢‰‰‰Ý߉‰Çù Ÿ‰í¢‰‰…W]‰‰GÝ÷ã‰í¢‰‰‰Ý߉‰Ù± Ÿ‰í¢‰‰…W]‰‰Y•÷ã‰í¢‰‰‰Ý߉‰Û‰‚‰‰‹K…‰‰Ûí¤5‚‰‰w㉉Ûí¢‰‰EÁ‰‰Û킉‰åõ‰‰Ûí¢‰‰ƒ%c‰‰Ûí÷ã‰í¢‰‰c[Á‚‰‰Å»‰‰[Á¢‰‰÷݉‰[Á‚‰‰‹w‰‰[Á¤Ëc Ÿ‰í¤5‚‰‰w㉉Ý¥¢‰‰EÁ‰‰Ý¥‚‰‰åõ‰‰Ý¥¢‰‰ƒ%c‰‰Ý¥÷ã‰í¢‰‰c]™‚‰‰Å»‰‰]™¢‰‰÷݉‰]™‚‰‰‹w‰‰]™¤Ëc Ÿ‰í¤5‚‰‰w㉉]ý¢‰‰EÁ‰‰]ý‚‰‰åõ‰‰]ý¢‰‰ƒ%c‰‰]ý÷ã‰í¢‰‰cßт‰‰Ç©‰‰ßÑ¢‰‰‰I‰‰ßт‰‰‹w‰‰ßѤËc Ÿ‰í¤5‚‰‰ _‰‰_µ¢‰‰G¿‰‰_µ‚‰‰åõ‰‰_µ¢‰‰ƒ%c‰‰_µ÷ã‰í¢‰‰cÑ©‚‰‰Ù‰‰Ñ©¢‰‰‹§‰‰Ñ©‚‰‰‹w‰‰Ñ©¤Ëc Ÿ‰í¤5‚‰‰ ͉‰Q¢‰‰Y+‰‰Q‚‰‰åõ‰‰Q¢‰‰ƒ%c‰‰Q÷ã‰í¢‰‰cQႉ‰Ù‰‰Qᢉ‰‹§‰‰Qႉ‰‹w‰‰Qá¤Ëc Ÿ‰í¤5‚‰‰ 9‰‰ÓÅ¢‰‰[™‰‰Ół‰‰åõ‰‰ÓÅ¢‰‰ƒ%c‰‰ÓÅ÷ã‰í¢‰‰cS¹‚‰‰Y󉉏S¹¢‰‰‰‰S¹‚‰‰‹w‰‰S¹¤Ëc Ÿ‰í¤5‚‰‰‰‰Õ¢‰‰Ýu‰‰Õ‚‰‰åõ‰‰Õ¢‰‰ƒ%c‰‰Õ÷ã‰í¢‰‰cÕñ‚‰‰[o‰‰Õñ¢‰‰‰‰Õñ‚‰‰‹w‰‰Õñ¤Ëc Ÿ‰í¤5‚‰‰‰‰UÕ¢‰‰Ýu‰‰UՂ‰‰åõ‰‰UÕ¢‰‰ƒ%c‰‰UÕ÷ã‰í¢‰‰c×ɂ‰‰[o‰‰×É¢‰‰‰‰×ɂ‰‰‹w‰‰×ɤËc Ÿ‰í¤5‚‰‰‰‰W­¢‰‰Ýu‰‰W­‚‰‰åõ‰‰W­¢‰‰ƒ%c‰‰W­÷ã‰í¢‰‰cé‚‰‰]]‰‰é¢‰‰ÿ‰‰é‚‰‰‹w‰‰é¤Ëc Ÿ‰í¤5‚‰‰…‰‰é墉‰ßc‰‰é傉‰åõ‰‰é墉‰ƒ%c‰‰éå÷ã‰í¢‰‰ciق‰‰_ˉ‰iÙ¢‰‰k‰‰iق‰‰‹w‰‰iÙ¤Ëc Ÿ‰í¤5‚‰‰ƒq‰‰ë½¢‰‰Ñщ‰ë½‚‰‰åõ‰‰ë½¢‰‰ƒ%c‰‰ë½÷ã‰í¢‰‰ck‘‚‰‰_ˉ‰k‘¢‰‰k‰‰k‘‚‰‰‹w‰‰k‘¤Ëc Ÿ‰í¤5‚‰‰…kõ¢‰‰ÓM‰‰kõ‚‰‰åõ‰‰kõ¢‰‰ƒ%c‰‰kõ÷ã‰í¢‰‰cí邉‰Q'‰‰í颉‰ى‰í邉‰‹w‰‰íé¤Ëc Ÿ‰í¤5‚‰‰‡Ý‰‰mÍ¢‰‰Õ»‰‰m͂‰‰åõ‰‰mÍ¢‰‰ƒ%c‰‰mÍ÷ã‰í¢‰‰cï¡‚‰‰S•‰‰ï¡¢‰‰·‰‰ï¡‚‰‰‹w‰‰ï¡¤Ëc Ÿ‰í¤5‚‰‰™I‰‰o…¢‰‰Õ»‰‰o…‚‰‰åõ‰‰o…¢‰‰ƒ%c‰‰o…÷ã‰í¢‰‰coù‚‰‰S•‰‰où¢‰‰#‰‰où‚‰‰‹w‰‰où¤Ëc Ÿ‰í¤5‚‰‰™I‰‰áÝ¢‰‰Õ»‰‰á݂‰‰åõ‰‰áÝ¢‰‰ƒ%c‰‰áÝ÷ã‰í¢‰‰ca±‚‰‰U‰‰a±¢‰‰#‰‰a±‚‰‰‹w‰‰a±¤Ëc Ÿ‰í¤5‚‰‰™I‰‰ã•¢‰‰×‰‰ã•‚‰‰åõ‰‰ã•¢‰‰ƒ%c‰‰ã•÷ã‰í¢‰‰cc‰‚‰‰×‰‰c‰¢‰‰‘‰‰c‰‚‰‰‹w‰‰c‰¤Ëc Ÿ‰í¤5‚‰‰›§‰‰cí¢‰‰é‰‰c킉‰åõ‰‰cí¢‰‰ƒ%c‰‰cí÷ã‰í¢‰‰cåÁ‚‰‰é퉉åÁ¢‰‰ ‰‰åÁ‚‰‰‹w‰‰åÁ¤Ëc Ÿ‰í¤5‚‰‰‰‰e¥¢‰‰i󉉏e¥‚‰‰åõ‰‰e¥¢‰‰ƒ%c‰‰e¥÷ã‰í¢‰‰cç™‚‰‰é퉉ç™¢‰‰ ‰‰ç™‚‰‰‹w‰‰ç™¤Ëc Ÿ‰í¤5‚‰‰Ÿ‰‰çý¢‰‰ko‰‰çý‚‰‰åõ‰‰çý¢‰‰ƒ%c‰‰çý÷ã‰í¢‰‰cgт‰‰ëY‰‰gÑ¢‰‰{‰‰gт‰‰‹w‰‰gѤËc Ÿ‰í¤5‚‰‰ÿ‰‰ùµ¢‰‰ko‰‰ùµ‚‰‰åõ‰‰ùµ¢‰‰ƒ%c‰‰ùµ÷ã‰í¢‰‰cy©‚‰‰ëY‰‰y©¢‰‰Ÿé‰‰y©‚‰‰‹w‰‰y©¤Ëc Ÿ‰í¤5‚‰‰ÿ‰‰û¢‰‰ko‰‰û‚‰‰åõ‰‰û¢‰‰ƒ%c‰‰û÷ã‰í¢‰‰cûႉ‰í·‰‰ûᢉ‰Ÿé‰‰ûႉ‰‹w‰‰ûá¤Ëc Ÿ‰í¤5‚‰‰ÿ‰‰{Å¢‰‰m݉‰{ł‰‰åõ‰‰{Å¢‰‰ƒ%c‰‰{Å÷ã‰í¢‰‰cý¹‚‰‰ï¥‰‰ý¹¢‰‰‘E‰‰ý¹‚‰‰‹w‰‰ý¹¤Ëc Ÿ‰í¤5‚‰‰k‰‰}¢‰‰oˉ‰}‚‰‰åõ‰‰}¢‰‰ƒ%c‰‰}÷ã‰í¢‰‰c}ñ‚‰‰á‰‰}ñ¢‰‰“³‰‰}ñ‚‰‰‹w‰‰}ñ¤Ëc Ÿ‰í¤5‚‰‰ى‰ÿÕ¢‰‰a'‰‰ÿՂ‰‰åõ‰‰ÿÕ¢‰‰ƒ%c‰‰ÿÕ÷ã‰í¢‰‰cɂ‰‰á‰‰É¢‰‰“³‰‰ɂ‰‰‹w‰‰ɤËc Ÿ‰í¤5‚‰‰·‰‰ñ­¢‰‰c•‰‰ñ­‚‰‰åõ‰‰ñ­¢‰‰ƒ%c‰‰ñ­÷ã‰í¢‰‰cq‚‰‰ã‰‰q¢‰‰•¡‰‰q‚‰‰‹w‰‰q¤Ëc Ÿ‰í¤5‚‰‰#‰‰q墉‰e‰‰q傉‰åõ‰‰q墉‰ƒ%c‰‰qå÷ã‰í¢‰‰cóق‰‰c{‰‰óÙ¢‰‰—‰‰óق‰‰‹w‰‰óÙ¤Ëc Ÿ‰í¤5‚‰‰)‘‰‰s½¢‰‰e‰‰s½‚‰‰åõ‰‰s½¢‰‰ƒ%c‰‰s½÷ã‰í¢‰‰cõ‘‚‰‰c{‰‰õ‘¢‰‰©‹‰‰õ‘‚‰‰‹w‰‰õ‘¤Ëc Ÿ‰í¤5‚‰‰)‘‰‰õõ¢‰‰e‰‰õõ‚‰‰åõ‰‰õõ¢‰‰ƒ%c‰‰õõ÷ã‰í¢‰‰cu邉‰ei‰‰u颉‰©‹‰‰u邉‰‹w‰‰ué¤Ëc Ÿ‰í¤5‚‰‰)‘‰‰÷Í¢‰‰ç‰‰÷͂‰‰åõ‰‰÷Í¢‰‰ƒ%c‰‰÷Í÷ã‰í¢‰‰cw¡‚‰‰glj‰w¡¢‰‰)g‰‰w¡‚‰‰‹w‰‰w¡¤Ëc Ÿ‰í¤5‚‰‰+ ‰‰‰…¢‰‰ù퉉‰…‚‰‰åõ‰‰‰…¢‰‰ƒ%c‰‰‰…÷ã‰í¢‰‰c‰ù‚‰‰glj‰‰ù¢‰‰)g‰‰‰ù‚‰‰‹w‰‰‰ù¤Ëc Ÿ‰í¤5‚‰‰­{‰‰ Ý¢‰‰ûY‰‰ ݂‰‰åõ‰‰ Ý¢‰‰ƒ%c‰‰ Ý÷ã‰í¢‰‰c‹±‚‰‰y3‰‰‹±¢‰‰+U‰‰‹±‚‰‰‹w‰‰‹±¤Ëc Ÿ‰í¤5‚‰‰¯é‰‰ •¢‰‰ý·‰‰ •‚‰‰åõ‰‰ •¢‰‰ƒ%c‰‰ •÷ã‰í¢‰‰c‰‚‰‰{¡‰‰‰¢‰‰-ɉ‰‚‰‰‹w‰‰‰¤Ëc Ÿ‰í¤5‚‰‰¡E‰‰í¢‰‰ý·‰‰í‚‰‰åõ‰‰í¢‰‰ƒ%c‰‰í÷ã‰í¢‰‰c Á‚‰‰{¡‰‰ Á¢‰‰/?‰‰ Á‚‰‰‹w‰‰ Á¤Ëc Ÿ‰í¤5‚‰‰¡E‰‰¥¢‰‰ý·‰‰¥‚‰‰åõ‰‰¥¢‰‰ƒ%c‰‰¥÷ã‰í¢‰‰c™‚‰‰}Ÿ‰‰™¢‰‰/?‰‰™‚‰‰‹w‰‰™¤Ëc Ÿ‰í¤5‚‰‰¡E‰‰ý¢‰‰ÿ¥‰‰ý‚‰‰åõ‰‰ý¢‰‰ƒ%c‰‰ý÷ã‰í¢‰‰cÑ‚‰‰ ‰‰Ñ¢‰‰!­‰‰Ñ‚‰‰‹w‰‰Ñ¤Ëc Ÿ‰í¤5‚‰‰£³‰‰µ¢‰‰ñ‰‰µ‚‰‰åõ‰‰µ¢‰‰ƒ%c‰‰µ÷ã‰í¢‰‰cƒ©‚‰‰ñù‰‰ƒ©¢‰‰#›‰‰ƒ©‚‰‰‹w‰‰ƒ©¤Ëc Ÿ‰í¤5‚‰‰¥¡‰‰¢‰‰ó‰‰‚‰‰åõ‰‰¢‰‰ƒ%c‰‰÷ã‰í¢‰‰cႉ‰ñù‰‰ᢉ‰#›‰‰ႉ‰‹w‰‰á¤Ëc Ÿ‰í¤5‚‰‰§‰‰…Å¢‰‰s{‰‰…Å‚‰‰åõ‰‰…Å¢‰‰ƒ%c‰‰…Å÷ã‰í¢‰‰c¹‚‰‰óU‰‰¹¢‰‰¥w‰‰¹‚‰‰‹w‰‰¹¤Ëc Ÿ‰í¤5‚‰‰¹‹‰‰‡¢‰‰s{‰‰‡‚‰‰åõ‰‰‡¢‰‰ƒ%c‰‰‡÷ã‰í¢‰‰c‡ñ‚‰‰óU‰‰‡ñ¢‰‰§å‰‰‡ñ‚‰‰‹w‰‰‡ñ¤Ëc Ÿ‰í¤5‚‰‰¹‹‰‰Õ¢‰‰s{‰‰Ղ‰‰åõ‰‰Õ¢‰‰ƒ%c‰‰Õ÷ã‰í¢‰‰c™É‚‰‰õC‰‰™É¢‰‰§å‰‰™É‚‰‰‹w‰‰™É¤Ëc Ÿ‰í¤5‚‰‰¹‹‰‰­¢‰‰ui‰‰­‚‰‰åõ‰‰­¢‰‰ƒ%c‰‰­÷ã‰í¢‰‰c›‚‰‰÷±‰‰›¢‰‰¹Q‰‰›‚‰‰‹w‰‰›¤Ëc Ÿ‰í¤5‚‰‰9g‰‰›å¢‰‰wlj‰›å‚‰‰åõ‰‰›å¢‰‰ƒ%c‰‰›å÷ã‰í¢‰‰cق‰‰‰-‰‰Ù¢‰‰»O‰‰ق‰‰‹w‰‰Ù¤Ëc Ÿ‰í¤5‚‰‰;U‰‰½¢‰‰ 3‰‰½‚‰‰åõ‰‰½¢‰‰ƒ%c‰‰½÷ã‰í¢‰‰c‘‚‰‰‰-‰‰‘¢‰‰»O‰‰‘‚‰‰‹w‰‰‘¤Ëc Ÿ‰í¤5‚‰‰=ɉõ¢‰‰ ¡‰‰õ‚‰‰åõ‰‰õ¢‰‰ƒ%c‰‰õ÷ã‰í¢‰‰cŸé‚‰‰‹›‰‰Ÿé¢‰‰½½‰‰Ÿé‚‰‰‹w‰‰Ÿé¤Ëc Ÿ‰í¤5‚‰‰??‰‰Í¢‰‰ ¡‰‰͂‰‰åõ‰‰Í¢‰‰ƒ%c‰‰Í÷ã‰í¢‰‰c‘¡‚‰‰‹›‰‰‘¡¢‰‰¿)‰‰‘¡‚‰‰‹w‰‰‘¡¤Ëc Ÿ‰í¤5‚‰‰1­‰‰…¢‰‰ Ÿ‰‰…‚‰‰åõ‰‰…¢‰‰ƒ%c‰‰…÷ã‰í¢‰‰cù‚‰‰‰‰‰ù¢‰‰±‡‰‰ù‚‰‰‹w‰‰ù¤Ëc Ÿ‰í¤5‚‰‰1­‰‰“Ý¢‰‰ Ÿ‰‰“Ý‚‰‰åõ‰‰“Ý¢‰‰ƒ%c‰‰“Ý÷ã‰í¢‰‰c±‚‰‰ e‰‰±¢‰‰±‡‰‰±‚‰‰‹w‰‰±¤Ëc Ÿ‰í¤5‚‰‰1­‰‰••¢‰‰ ‰‰••‚‰‰åõ‰‰••¢‰‰ƒ%c‰‰••÷ã‰í¢‰‰c‰‚‰‰Ӊ‰‰¢‰‰1õ‰‰‰‚‰‰‹w‰‰‰¤Ëc Ÿ‰í¤5‚‰‰3›‰‰í¢‰‰ù‰‰킉‰åõ‰‰í¢‰‰ƒ%c‰‰í÷ã‰í¢‰‰c—Á‚‰‰Ӊ‰—Á¢‰‰1õ‰‰—Á‚‰‰‹w‰‰—Á¤Ëc Ÿ‰í¤5‚‰‰µw‰‰¥¢‰‰ƒU‰‰¥‚‰‰åõ‰‰¥¢‰‰ƒ%c‰‰¥÷ã‰í¢‰‰c©™‚‰‰O‰‰©™¢‰‰3a‰‰©™‚‰‰‹w‰‰©™¤Ëc Ÿ‰í¤5‚‰‰·å‰‰©ý¢‰‰ƒU‰‰©ý‚‰‰åõ‰‰©ý¢‰‰ƒ%c‰‰©ý÷ã‰í¢‰‰c)т‰‰O‰‰)Ñ¢‰‰5߉‰)т‰‰‹w‰‰)ѤËc Ÿ‰í¤5‚‰‰ÉQ‰‰«µ¢‰‰…C‰‰«µ‚‰‰åõ‰‰«µ¢‰‰ƒ%c‰‰«µ÷ã‰í¢‰‰c+©‚‰‰=‰‰+©¢‰‰7K‰‰+©‚‰‰‹w‰‰+©¤Ëc Ÿ‰í¤5‚‰‰ÉQ‰‰­¢‰‰…C‰‰­‚‰‰åõ‰‰­¢‰‰ƒ%c‰‰­÷ã‰í¢‰‰c­á‚‰‰«‰‰­á¢‰‰7K‰‰­á‚‰‰‹w‰‰­á¤Ëc Ÿ‰í¤5‚‰‰ÉQ‰‰-Å¢‰‰‡±‰‰-ł‰‰åõ‰‰-Å¢‰‰ƒ%c‰‰-Å÷ã‰í¢‰‰c¯¹‚‰‰‰‰¯¹¢‰‰I9‰‰¯¹‚‰‰‹w‰‰¯¹¤Ëc Ÿ‰í¤5‚‰‰ËO‰‰/¢‰‰™-‰‰/‚‰‰åõ‰‰/¢‰‰ƒ%c‰‰/÷ã‰í¢‰‰c/ñ‚‰‰‰‰/ñ¢‰‰I9‰‰/ñ‚‰‰‹w‰‰/ñ¤Ëc Ÿ‰í¤5‚‰‰ͽ‰‰¡Õ¢‰‰››‰‰¡Õ‚‰‰åõ‰‰¡Õ¢‰‰ƒ%c‰‰¡Õ÷ã‰í¢‰‰c!ɂ‰‰™õ‰‰!É¢‰‰K—‰‰!ɂ‰‰‹w‰‰!ɤËc Ÿ‰í¤5‚‰‰Ï)‰‰£­¢‰‰‰‰‰£­‚‰‰åõ‰‰£­¢‰‰ƒ%c‰‰£­÷ã‰í¢‰‰c#‚‰‰›ã‰‰#¢‰‰M‰‰#‚‰‰‹w‰‰#¤Ëc Ÿ‰í¤5‚‰‰Á‡‰‰#墉‰‰‰‰#傉‰åõ‰‰#墉‰ƒ%c‰‰#å÷ã‰í¢‰‰c¥Ù‚‰‰›ã‰‰¥Ù¢‰‰Ïñ‰‰¥Ù‚‰‰‹w‰‰¥Ù¤Ëc Ÿ‰í¤5‚‰‰Á‡‰‰%½¢‰‰‰‰‰%½‚‰‰åõ‰‰%½¢‰‰ƒ%c‰‰%½÷ã‰í¢‰‰c§‘‚‰‰_‰‰§‘¢‰‰Á§‘‚‰‰‹w‰‰§‘¤Ëc Ÿ‰í¤5‚‰‰Aõ‰‰§õ¢‰‰e‰‰§õ‚‰‰åõ‰‰§õ¢‰‰ƒ%c‰‰§õ÷ã‰í¢‰‰c'邉‰ŸÍ‰‰'颉‰Á'邉‰‹w‰‰'é¤Ëc Ÿ‰í¤5‚‰‰Aõ‰‰¹Í¢‰‰Ӊ‰¹Í‚‰‰åõ‰‰¹Í¢‰‰ƒ%c‰‰¹Í÷ã‰í¢‰‰c9¡‚‰‰‘9‰‰9¡¢‰‰Ã[‰‰9¡‚‰‰‹w‰‰9¡¤Ëc Ÿ‰í¤5‚‰‰Ca‰‰»…¢‰‰O‰‰»…‚‰‰åõ‰‰»…¢‰‰ƒ%c‰‰»…÷ã‰í¢‰‰c»ù‚‰‰‘9‰‰»ù¢‰‰Ã[‰‰»ù‚‰‰‹w‰‰»ù¤Ëc Ÿ‰í¤5‚‰‰E߉‰;Ý¢‰‰=‰‰;݂‰‰åõ‰‰;Ý¢‰‰ƒ%c‰‰;Ý÷ã‰í¢‰‰c½±‚‰‰“‰‰½±¢‰‰Åɉ‰½±‚‰‰‹w‰‰½±¤Ëc Ÿ‰í¤5‚‰‰GK‰‰=•¢‰‰=‰‰=•‚‰‰åõ‰‰=•¢‰‰ƒ%c‰‰=•÷ã‰í¢‰‰c¿‰‚‰‰“‰‰¿‰¢‰‰Ç%‰‰¿‰‚‰‰‹w‰‰¿‰¤Ëc Ÿ‰í¤5‚‰‰GK‰‰¿í¢‰‰=‰‰¿í‚‰‰åõ‰‰¿í¢‰‰ƒ%c‰‰¿í÷ã‰í¢‰‰c?Á‚‰‰•…‰‰?Á¢‰‰Ù‰‰?Á‚‰‰‹w‰‰?Á¤Ëc Ÿ‰í¤5‚‰‰Y9‰‰±¥¢‰‰«‰‰±¥‚‰‰åõ‰‰±¥¢‰‰ƒ%c‰‰±¥÷ã‰í¢‰‰c1™‚‰‰q‰‰1™¢‰‰Ù‰‰1™‚‰‰‹w‰‰1™¤Ëc Ÿ‰í¤5‚‰‰Y9‰‰1ý¢‰‰‰‰1ý‚‰‰åõ‰‰1ý¢‰‰ƒ%c‰‰1ý÷ã‰í¢‰‰c³Ñ‚‰‰³Ñ¢‰‰ہ‰‰³Ñ‚‰‰‹w‰‰³Ñ¤Ëc Ÿ‰í¤5‚‰‰[—‰‰3µ¢‰‰©õ‰‰3µ‚‰‰åõ‰‰3µ¢‰‰ƒ%c‰‰3µ÷ã‰í¢‰‰cµ©‚‰‰µ©¢‰‰ہ‰‰µ©‚‰‰‹w‰‰µ©¤Ëc Ÿ‰í¤5‚‰‰]‰‰5¢‰‰«ã‰‰5‚‰‰åõ‰‰5¢‰‰ƒ%c‰‰5÷ã‰í¢‰‰c5ႉ‰)݉‰5ᢉ‰[}‰‰5ႉ‰‹w‰‰5á¤Ëc Ÿ‰í¤5‚‰‰ßñ‰‰·Å¢‰‰«ã‰‰·Å‚‰‰åõ‰‰·Å¢‰‰ƒ%c‰‰·Å÷ã‰í¢‰‰c7¹‚‰‰)݉‰7¹¢‰‰]뉉7¹‚‰‰‹w‰‰7¹¤Ëc Ÿ‰í¤5‚‰‰Ñɝ¢‰‰­_‰‰ɝ‚‰‰åõ‰‰ɝ¢‰‰ƒ%c‰‰ɝ÷ã‰í¢‰‰cÉñ‚‰‰+I‰‰Éñ¢‰‰_ى‰Éñ‚‰‰‹w‰‰Éñ¤Ëc Ÿ‰í¤5‚‰‰ÑIÕ¢‰‰­_‰‰IՂ‰‰åõ‰‰IÕ¢‰‰ƒ%c‰‰IÕ÷ã‰í¢‰‰cËɂ‰‰-§‰‰ËÉ¢‰‰_ى‰Ëɂ‰‰‹w‰‰ËɤËc Ÿ‰í¤5‚‰‰ÑK­¢‰‰¯Í‰‰K­‚‰‰åõ‰‰K­¢‰‰ƒ%c‰‰K­÷ã‰í¢‰‰c́‚‰‰/‰‰́¢‰‰Q5‰‰́‚‰‰‹w‰‰́¤Ëc Ÿ‰í¤5‚‰‰Ó[‰‰Í墉‰¡9‰‰Í傉‰åõ‰‰Í墉‰ƒ%c‰‰Íå÷ã‰í¢‰‰cMق‰‰/‰‰MÙ¢‰‰Q5‰‰Mق‰‰‹w‰‰MÙ¤Ëc Ÿ‰í¤5‚‰‰Õɉ‰Ͻ¢‰‰£‰‰Ͻ‚‰‰åõ‰‰Ͻ¢‰‰ƒ%c‰‰Ͻ÷ã‰í¢‰‰cO‘‚‰‰!‰‰O‘¢‰‰S£‰‰O‘‚‰‰‹w‰‰O‘¤Ëc Ÿ‰í¤5‚‰‰×%‰‰Oõ¢‰‰£‰‰Oõ‚‰‰åõ‰‰Oõ¢‰‰ƒ%c‰‰Oõ÷ã‰í¢‰‰cÁ邉‰!‰‰Á颉‰U‰‰Á邉‰‹w‰‰Áé¤Ëc Ÿ‰í¤5‚‰‰é‰‰AÍ¢‰‰¥…‰‰A͂‰‰åõ‰‰AÍ¢‰‰ƒ%c‰‰AÍ÷ã‰í¢‰‰cႉ‰£ÿ‰‰ᢉ‰W ‰‰ႉ‰‹w‰‰á¤Ëc Ÿ‰í¤5‚‰‰é‰‰C…¢‰‰¥…‰‰C…‚‰‰åõ‰‰C…¢‰‰ƒ%c‰‰C…÷ã‰í¢‰‰cCù„Éu¢‰‰éû‰‰Cù‚‰‰‹w‰‰Cù¤Ëc Ÿ‰í¤5‚‰‰끉‰ÅÝ¢‰‰%q‰‰Å݄7 ¢‰‰ƒ%c‰‰ÅÝ÷ã‰í¢‰‰cE±„Ë㢉‰éû‰‰E±‚‰‰‹w‰‰E±¤Ëc Ÿ‰í¤5‚‰‰끉‰Ǖ¢‰‰'Ǖ„5Ÿ¢‰‰ƒ%c‰‰Ǖ÷ã‰í¢‰‰cG‰„ÍÑ¢‰‰ëW‰‰G‰‚‰‰‹w‰‰G‰¤Ëc Ÿ‰í¤5‚‰‰k}‰‰Gí¢‰‰9݉‰Gí„3¡¢‰‰ƒ%c‰‰Gí÷ã‰í¢‰‰cÙÁ„ÍÑ¢‰‰ëW‰‰ÙÁ‚‰‰‹w‰‰ÙÁ¤Ëc Ÿ‰í¤5‚‰‰m뉉Y¥¢‰‰9݉‰Y¥„3¡¢‰‰ƒ%c‰‰Y¥÷ã‰í¢‰‰cۙ„ÍÑ¢‰‰íʼn‰ۙ‚‰‰‹w‰‰ۙ¤Ëc Ÿ‰í¤5‚‰‰oى‰Ûý¢‰‰;I‰‰Ûý„13¢‰‰ƒ%c‰‰Ûý÷ã‰í¢‰‰c[фÏM¢‰‰ﳉ‰[т‰‰‹w‰‰[ѤËc Ÿ‰í¤5‚‰‰oى‰ݵ¢‰‰;I‰‰ݵ„13¢‰‰ƒ%c‰‰ݵ÷ã‰í¢‰‰c]©„Á»¢‰‰á/‰‰]©‚‰‰‹w‰‰]©¤Ëc Ÿ‰í¤5‚‰‰a5‰‰ߍ¢‰‰=§‰‰ߍ„?Ç¢‰‰ƒ%c‰‰ߍ÷ã‰í¢‰‰cßá„≉á/‰‰ßႉ‰‹w‰‰ßá¤Ëc Ÿ‰í¤5‚‰‰a5‰‰_Å¢‰‰?‰‰_ń=i¢‰‰ƒ%c‰‰_Å÷ã‰í¢‰‰cѹ„Å¢‰‰㝉‰ѹ‚‰‰‹w‰‰ѹ¤Ëc Ÿ‰í¤5‚‰‰c£‰‰Q¢‰‰1‰‰Q„;{¢‰‰ƒ%c‰‰Q÷ã‰í¢‰‰cQñ„Å¢‰‰㝉‰Qñ‚‰‰‹w‰‰Qñ¤Ëc Ÿ‰í¤5‚‰‰e‰‰ÓÕ¢‰‰³ÿ‰‰ÓՄ»¢‰‰ƒ%c‰‰ÓÕ÷ã‰í¢‰‰cSɄE󢉉å ‰‰Sɂ‰‰‹w‰‰SɤËc Ÿ‰í¤5‚‰‰g ‰‰Õ­¢‰‰³ÿ‰‰Õ­„»¢‰‰ƒ%c‰‰Õ­÷ã‰í¢‰‰cU„E󢉉eg‰‰U‚‰‰‹w‰‰U¤Ëc Ÿ‰í¤5‚‰‰ùû‰‰U墉‰µk‰‰U儹¢‰‰ƒ%c‰‰Uå÷ã‰í¢‰‰c×لGo¢‰‰gՉ‰×ق‰‰‹w‰‰×Ù¤Ëc Ÿ‰í¤5‚‰‰ùû‰‰W½¢‰‰µk‰‰W½„¹¢‰‰ƒ%c‰‰W½÷ã‰í¢‰‰c鑄YÝ¢‰‰yA‰‰鑂‰‰‹w‰‰鑤Ëc Ÿ‰í¤5‚‰‰ûW‰‰éõ¢‰‰·Ù‰‰éõ„§¥¢‰‰ƒ%c‰‰éõ÷ã‰í¢‰‰cié„[Ë¢‰‰yA‰‰i邉‰‹w‰‰ié¤Ëc Ÿ‰í¤5‚‰‰ûW‰‰ëÍ¢‰‰É·‰‰ë̈́¥·¢‰‰ƒ%c‰‰ëÍ÷ã‰í¢‰‰ck¡„[Ë¢‰‰yA‰‰k¡‚‰‰‹w‰‰k¡¤Ëc Ÿ‰í¤5‚‰‰ýʼn‰텢‰‰Ë#‰‰텄£Y¢‰‰ƒ%c‰‰í…÷ã‰í¢‰‰cíù„]'¢‰‰{¿‰‰íù‚‰‰‹w‰‰íù¤Ëc Ÿ‰í¤5‚‰‰ÿ³‰‰mÝ¢‰‰Ë#‰‰m݄£Y¢‰‰ƒ%c‰‰mÝ÷ã‰í¢‰‰cﱄ]'¢‰‰}­‰‰ﱂ‰‰‹w‰‰ﱤËc Ÿ‰í¤5‚‰‰ñ/‰‰o•¢‰‰͑‰‰o•„¡í¢‰‰ƒ%c‰‰o•÷ã‰í¢‰‰cቄ_•¢‰‰‰‰ቂ‰‰‹w‰‰ቤËc Ÿ‰í¤5‚‰‰ñ/‰‰áí¢‰‰͑‰‰áí„¡í¢‰‰ƒ%c‰‰áí÷ã‰í¢‰‰caÁ„Q¢‰‰ñ÷‰‰aÁ‚‰‰‹w‰‰aÁ¤Ëc Ÿ‰í¤5‚‰‰󝉉㥢‰‰Ï ‰‰㥄¯¢‰‰ƒ%c‰‰ã¥÷ã‰í¢‰‰cc™„Ó¢‰‰ñ÷‰‰c™‚‰‰‹w‰‰c™¤Ëc Ÿ‰í¤5‚‰‰󝉉cý¢‰‰O{‰‰cý„-¢‰‰ƒ%c‰‰cý÷ã‰í¢‰‰cåфÕí¢‰‰óc‰‰åт‰‰‹w‰‰åѤËc Ÿ‰í¤5‚‰‰õ ‰‰eµ¢‰‰A鉉eµ„+•¢‰‰ƒ%c‰‰eµ÷ã‰í¢‰‰c穄Õí¢‰‰óc‰‰穂‰‰‹w‰‰穤Ëc Ÿ‰í¤5‚‰‰ug‰‰g¢‰‰A鉉g„+•¢‰‰ƒ%c‰‰g÷ã‰í¢‰‰cgá„Õí¢‰‰õQ‰‰gႉ‰‹w‰‰gá¤Ëc Ÿ‰í¤5‚‰‰wՉ‰ùÅ¢‰‰CE‰‰ùń)'¢‰‰ƒ%c‰‰ùÅ÷ã‰í¢‰‰cy¹„×Y¢‰‰÷ω‰y¹‚‰‰‹w‰‰y¹¤Ëc Ÿ‰í¤5‚‰‰wՉ‰û¢‰‰CE‰‰û„)'¢‰‰ƒ%c‰‰û÷ã‰í¢‰‰cûñ„é·¢‰‰‰;‰‰ûñ‚‰‰‹w‰‰ûñ¤Ëc Ÿ‰í¤5‚‰‰ A‰‰{Õ¢‰‰E³‰‰{ՄË¢‰‰ƒ%c‰‰{Õ÷ã‰í¢‰‰cýɄ륢‰‰‹©‰‰ýɂ‰‰‹w‰‰ýɤËc Ÿ‰í¤5‚‰‰ ¿‰‰}­¢‰‰G¡‰‰}­„Ý¢‰‰ƒ%c‰‰}­÷ã‰í¢‰‰cÿ„í¢‰‰‹©‰‰ÿ‚‰‰‹w‰‰ÿ¤Ëc Ÿ‰í¤5‚‰‰ ¿‰‰ÿ墉‰Y‰‰ÿå„o¢‰‰ƒ%c‰‰ÿå÷ã‰í¢‰‰cلí¢‰‰‹©‰‰ق‰‰‹w‰‰Ù¤Ëc Ÿ‰í¤5‚‰‰ ­‰‰ñ½¢‰‰Y‰‰ñ½„o¢‰‰ƒ%c‰‰ñ½÷ã‰í¢‰‰cq‘„í¢‰‰‡‰‰q‘‚‰‰‹w‰‰q‘¤Ëc Ÿ‰í¤5‚‰‰‰‰qõ¢‰‰[‹‰‰qõ„󢉉ƒ%c‰‰qõ÷ã‰í¢‰‰cóé„‰‰ s‰‰ó邉‰‹w‰‰óé¤Ëc Ÿ‰í¤5‚‰‰÷‰‰sÍ¢‰‰Ýg‰‰s̈́‘¢‰‰ƒ%c‰‰sÍ÷ã‰í¢‰‰cõ¡„o{¢‰‰቉õ¡‚‰‰‹w‰‰õ¡¤Ëc Ÿ‰í¤5‚‰‰÷‰‰u…¢‰‰Ýg‰‰u…„‘¢‰‰ƒ%c‰‰u…÷ã‰í¢‰‰cuù„ai¢‰‰]‰‰uù‚‰‰‹w‰‰uù¤Ëc Ÿ‰í¤5‚‰‰ƒc‰‰÷Ý¢‰‰ßU‰‰÷݄Ÿ¢‰‰ƒ%c‰‰÷Ý÷ã‰í¢‰‰cw±„cÇ¢‰‰]‰‰w±‚‰‰‹w‰‰w±¤Ëc Ÿ‰í¤5‚‰‰ƒc‰‰‰•¢‰‰Ñɉ‰•„»¢‰‰ƒ%c‰‰‰•÷ã‰í¢‰‰c ‰„cÇ¢‰‰]‰‰ ‰‚‰‰‹w‰‰ ‰¤Ëc Ÿ‰í¤5‚‰‰…Q‰‰ í¢‰‰Ñɉ 턝»¢‰‰ƒ%c‰‰ í÷ã‰í¢‰‰c‹Á„cÇ¢‰‰K‰‰‹Á‚‰‰‹w‰‰‹Á¤Ëc Ÿ‰í¤5‚‰‰‡Ï‰‰ ¥¢‰‰Ó?‰‰ ¥„›M¢‰‰ƒ%c‰‰ ¥÷ã‰í¢‰‰c™„e3¢‰‰¹‰‰™‚‰‰‹w‰‰™¤Ëc Ÿ‰í¤5‚‰‰™;‰‰ý¢‰‰Õ­‰‰ý„™Ñ¢‰‰ƒ%c‰‰ý÷ã‰í¢‰‰c фg¡¢‰‰‰‰ т‰‰‹w‰‰ ѤËc Ÿ‰í¤5‚‰‰™;‰‰µ¢‰‰Õ­‰‰µ„™Ñ¢‰‰ƒ%c‰‰µ÷ã‰í¢‰‰c©„yŸ¢‰‰ƒ‰‰©‚‰‰‹w‰‰©¤Ëc Ÿ‰í¤5‚‰‰›©‰‰¢‰‰כ‰‰„‡ã¢‰‰ƒ%c‰‰÷ã‰í¢‰‰cá„{ ¢‰‰ƒ‰‰á‚‰‰‹w‰‰á¤Ëc Ÿ‰í¤5‚‰‰›©‰‰Å¢‰‰Ww‰‰ń…u¢‰‰ƒ%c‰‰Å÷ã‰í¢‰‰cƒ¹„ýù¢‰‰›ñ‰‰ƒ¹‚‰‰‹w‰‰ƒ¹¤Ëc Ÿ‰í¤5‚‰‰‡‰‰¢‰‰i剉„™¢‰‰ƒ%c‰‰÷ã‰í¢‰‰cñ„ýù¢‰‰›ñ‰‰ñ‚‰‰‹w‰‰ñ¤Ëc Ÿ‰í¤5‚‰‰s‰‰…Õ¢‰‰i剉…Õ„™¢‰‰ƒ%c‰‰…Õ÷ã‰í¢‰‰cɄýù¢‰‰m‰‰ɂ‰‰‹w‰‰ɤËc Ÿ‰í¤5‚‰‰቉‡­¢‰‰kQ‰‰‡­„+¢‰‰ƒ%c‰‰‡­÷ã‰í¢‰‰c„ÿU¢‰‰ŸÛ‰‰‚‰‰‹w‰‰¤Ëc Ÿ‰í¤5‚‰‰቉墉‰kQ‰‰å„+¢‰‰ƒ%c‰‰å÷ã‰í¢‰‰c™Ù„ñC¢‰‰‘7‰‰™Ù‚‰‰‹w‰‰™Ù¤Ëc Ÿ‰í¤5‚‰‰]‰‰½¢‰‰mO‰‰½„=¢‰‰ƒ%c‰‰½÷ã‰í¢‰‰c›‘„󱢉‰“%‰‰›‘‚‰‰‹w‰‰›‘¤Ëc Ÿ‰í¤5‚‰‰K‰‰›õ¢‰‰o½‰‰›õ„ Á¢‰‰ƒ%c‰‰›õ÷ã‰í¢‰‰cé„õ-¢‰‰“%‰‰邉‰‹w‰‰é¤Ëc Ÿ‰í¤5‚‰‰K‰‰Í¢‰‰a)‰‰Í„ S¢‰‰ƒ%c‰‰Í÷ã‰í¢‰‰c¡„õ-¢‰‰“%‰‰¡‚‰‰‹w‰‰¡¤Ëc Ÿ‰í¤5‚‰‰¹‰‰Ÿ…¢‰‰a)‰‰Ÿ…„ S¢‰‰ƒ%c‰‰Ÿ…÷ã‰í¢‰‰cŸù„õ-¢‰‰•“‰‰Ÿù‚‰‰‹w‰‰Ÿù¤Ëc Ÿ‰í¤5‚‰‰‰‰Ý¢‰‰c‡‰‰݄ 碉‰ƒ%c‰‰Ý÷ã‰í¢‰‰c‘±„÷›¢‰‰—‰‰‘±‚‰‰‹w‰‰‘±¤Ëc Ÿ‰í¤5‚‰‰)ƒ‰‰•¢‰‰ƒ%c‰‰•÷ã‰í¢‰‰ý‰‰“‰‚‰‰‹w‰‰“‰¤Ëc Ÿ‰í¤5‚‰‰)ƒ‰‰“í¢‰‰ƒ%c‰‰“í÷ã‰í¢‰‰)뉉Á‚‰‰‹w‰‰Á¤Ëc Ÿ‰í¤5‚‰‰«ñ‰‰•¥¢‰‰ƒ%c‰‰•¥÷ã‰í¢‰‰)뉉™‚‰‰‹w‰‰™¤Ëc Ÿ‰í¤5‚‰‰«ñ‰‰ý¢‰‰ƒ%c‰‰ý÷ã‰í¢‰‰)뉉—Ñ‚‰‰‹w‰‰—ѤËc Ÿ‰í¤5‚‰‰­m‰‰µ¢‰‰ƒ%c‰‰µ÷ã‰í¢‰‰+G‰‰©©‚‰‰‹w‰‰©©¤Ëc Ÿ‰í¤5‚‰‰¯Û‰‰)¢‰‰ƒ%c‰‰)÷ã‰í¢‰‰-µ‰‰)ႉ‰‹w‰‰)á¤Ëc Ÿ‰í¤5‚‰‰¡7‰‰«Å¢‰‰ƒ%c‰‰«Å÷ã‰í¢‰‰/!‰‰+¹‚‰‰‹w‰‰+¹¤Ëc Ÿ‰í¤5‚‰‰¡7‰‰­¢‰‰ƒ%c‰‰­÷ã‰í¢‰‰!‰‰­ñ‚‰‰‹w‰‰­ñ¤Ëc Ÿ‰í¤5‚‰‰£%‰‰-Õ¢‰‰ƒ%c‰‰-Õ÷ã‰í¢‰‰#‰‰¯É‚‰‰‹w‰‰¯É¤Ëc Ÿ‰í¤5‚‰‰¥“‰‰/­¢‰‰ƒ%c‰‰/­÷ã‰í¢‰‰#‰‰¡‚‰‰‹w‰‰¡¤Ëc Ÿ‰í¤5‚‰‰¥“‰‰¡å¢‰‰ƒ%c‰‰¡å÷ã‰í¢‰‰#‰‰!ق‰‰‹w‰‰!Ù¤Ëc Ÿ‰í¤5‚‰‰§‰‰£½¢‰‰ƒ%c‰‰£½÷ã‰í¢‰‰¥y‰‰#‘‚‰‰‹w‰‰#‘¤Ëc Ÿ‰í¤5‚‰‰'ý‰‰#õ¢‰‰ƒ%c‰‰#õ÷ã‰í¢‰‰§×‰‰¥é‚‰‰‹w‰‰¥é¤Ëc Ÿ‰í¤5‚‰‰'ý‰‰%Í¢‰‰ƒ%c‰‰%Í÷ã‰í¢‰‰¹Å‰‰§¡‚‰‰‹w‰‰§¡¤Ëc Ÿ‰í¤5‚‰‰9뉉'…¢‰‰ƒ%c‰‰'…÷ã‰í¢‰‰»1‰‰'ù‚‰‰‹w‰‰'ù¤Ëc Ÿ‰í¤5‚‰‰;G‰‰¹Ý¢‰‰ƒ%c‰‰¹Ý÷ã‰í¢‰‰»1‰‰9±‚‰‰‹w‰‰9±¤Ëc Ÿ‰í¤5‚‰‰;G‰‰»•¢‰‰ƒ%c‰‰»•÷ã‰í¢‰‰»1‰‰;‰‚‰‰‹w‰‰;‰¤Ëc Ÿ‰í¤5‚‰‰=µ‰‰;í¢‰‰ƒ%c‰‰;í÷ã‰í¢‰‰½¯‰‰½Á‚‰‰‹w‰‰½Á¤Ëc Ÿ‰í¤5‚‰‰?!‰‰=¥¢‰‰ƒ%c‰‰=¥÷ã‰í¢‰‰¿‰‰¿™‚‰‰‹w‰‰¿™¤Ëc Ÿ‰í¤5‚‰‰?!‰‰¿ý¢‰‰ƒ%c‰‰¿ý÷ã‰í¢‰‰± ‰‰?т‰‰‹w‰‰?ѤËc Ÿ‰í¤5‚‰‰1‰‰±µ¢‰‰ƒ%c‰‰±µ÷ã‰í¢‰‰1牉1©‚‰‰‹w‰‰1©¤Ëc Ÿ‰í¤5‚‰‰3‰‰³¢‰‰ƒ%c‰‰³÷ã‰í¢‰‰1牉³á‚‰‰‹w‰‰³á¤Ëc Ÿ‰í¤5‚‰‰3‰‰3Å¢‰‰ƒ%c‰‰3Å÷ã‰í¢‰‰1牉µ¹‚‰‰‹w‰‰µ¹¤Ëc Ÿ‰í¤5‚‰‰µy‰‰5¢‰‰ƒ%c‰‰5÷ã‰í¢‰‰3S‰‰5ñ‚‰‰‹w‰‰5ñ¤Ëc Ÿ‰í¤5‚‰‰·×‰‰·Õ¢‰‰ƒ%c‰‰·Õ÷ã‰í¢‰‰5Á‰‰7ɂ‰‰‹w‰‰7ɤËc Ÿ‰í¤5‚‰‰Éʼn‰É­¢‰‰ƒ%c‰‰É­÷ã‰í¢‰‰7¿‰‰I‚‰‰‹w‰‰I¤Ëc Ÿ‰í¤5‚‰‰Éʼn‰I墉‰ƒ%c‰‰Iå÷ã‰í¢‰‰I+‰‰Ëق‰‰‹w‰‰ËÙ¤Ëc Ÿ‰í¤5‚‰‰Ë1‰‰K½¢‰‰ƒ%c‰‰K½÷ã‰í¢‰‰K™‰‰Í‘‚‰‰‹w‰‰Í‘¤Ëc Ÿ‰í¤5‚‰‰Í¯‰‰Íõ¢‰‰ƒ%c‰‰Íõ÷ã‰í¢‰‰K™‰‰M邉‰‹w‰‰Mé¤Ëc Ÿ‰í¤5‚‰‰Í¯‰‰ÏÍ¢‰‰ƒ%c‰‰ÏÍ÷ã‰í¢‰‰K™‰‰O¡‚‰‰‹w‰‰O¡¤Ëc Ÿ‰í¤5‚‰‰Ï‰‰Á…¢‰‰ƒ%c‰‰Á…÷ã‰í¢‰‰Íu‰‰Áù‚‰‰‹w‰‰Áù¤Ëc Ÿ‰í¤5‚‰‰Á ‰‰AÝ¢‰‰ƒ%c‰‰AÝ÷ã‰í¢‰‰Ïc‰‰Ã±‚‰‰‹w‰‰Ã±¤Ëc Ÿ‰í¤5‚‰‰Á ‰‰C•¢‰‰ƒ%c‰‰C•÷ã‰í¢‰‰Áщ‰Å‰‚‰‰‹w‰‰Å‰¤Ëc Ÿ‰í¤5‚‰‰A牉Åí¢‰‰ƒ%c‰‰Åí÷ã‰í¢‰‰ÃM‰‰EÁ‚‰‰‹w‰‰EÁ¤Ëc Ÿ‰í¤5‚‰‰CS‰‰Ç¥¢‰‰ƒ%c‰‰Ç¥÷ã‰í¢‰‰ÃM‰‰G™‚‰‰‹w‰‰G™¤Ëc Ÿ‰í¤5‚‰‰CS‰‰Gý¢‰‰ƒ%c‰‰Gý÷ã‰í¢‰‰ÃM‰‰Ùт‰‰‹w‰‰ÙѤËc Ÿ‰í¤5‚‰‰EÁ‰‰Yµ¢‰‰ƒ%c‰‰Yµ÷ã‰í¢‰‰Å»‰‰Û©‚‰‰‹w‰‰Û©¤Ëcw!‰í¤7‹‚‰‰G¿‰‰[¢‰‰ƒ%c‰‰[÷ã‰í¢‰‰Ç©‰‰[ႉ‰‹w‰‰[á¤É÷ Ÿ‰í¤7‹‚‰‰G¿‰‰ÝÅ¢‰‰ƒ%c‰‰ÝÅ÷ã‰í¢‰‰Ù‰‰]¹‚‰‰‹w‰‰]¹¤É÷ Ÿ‰í¤7‹‚‰‰Y+‰‰ß¢‰‰ƒ%c‰‰ß÷ã‰í¢‰‰Y󉉁ßñ‚‰‰‹w‰‰ßñ¤É÷ Ÿ‰í¤7‹‚‰‰[™‰‰_Õ¢‰‰ƒ%c‰‰_Õ÷ã‰í¢‰‰[o‰‰Ñɂ‰‰‹w‰‰ÑɤÉ÷ Ÿ‰í¤7‹‚‰‰Ýu‰‰Q­¢‰‰ƒ%c‰‰Q­÷ã‰í¢‰‰[o‰‰Ó‚‰‰‹w‰‰Ó¤É÷ Ÿ‰í¤7‹‚‰‰Ýu‰‰Ó墉‰ƒ%c‰‰Óå÷ã‰í¢‰‰[o‰‰Sق‰‰‹w‰‰SÙ¤É÷ Ÿ‰í¤7‹‚‰‰ßc‰‰Õ½¢‰‰ƒ%c‰‰Õ½÷ã‰í¢‰‰]]‰‰U‘‚‰‰‹w‰‰U‘¤É÷ Ÿ‰í¤7‹‚‰‰Ñщ‰Uõ¢‰‰ƒ%c‰‰Uõ÷ã‰í¢‰‰_ˉ‰×邉‰‹w‰‰×é¤É÷ Ÿ‰í¤7‹‚‰‰Ñщ‰WÍ¢‰‰ƒ%c‰‰WÍ÷ã‰í¢‰‰Q'‰‰é¡‚‰‰‹w‰‰é¡¤É÷ Ÿ‰í¤7‹‚‰‰ÓM‰‰i…¢‰‰ƒ%c‰‰i…÷ã‰í¢‰‰S•‰‰iù‚‰‰‹w‰‰iù¤É÷ Ÿ‰í¤7‹‚‰‰Õ»‰‰ëÝ¢‰‰ƒ%c‰‰ëÝ÷ã‰í¢‰‰S•‰‰k±‚‰‰‹w‰‰k±¤É÷ Ÿ‰í¤7‹‚‰‰Õ»‰‰í•¢‰‰ƒ%c‰‰í•÷ã‰í¢‰‰S•‰‰m‰‚‰‰‹w‰‰m‰¤É÷ Ÿ‰í¤7‹‚‰‰×‰‰mí¢‰‰ƒ%c‰‰mí÷ã‰í¢‰‰U‰‰ïÁ‚‰‰‹w‰‰ïÁ¤É÷ Ÿ‰í¤7‹‚‰‰é‰‰o¥¢‰‰ƒ%c‰‰o¥÷ã‰í¢‰‰×‰‰á™‚‰‰‹w‰‰á™¤É÷ Ÿ‰í¤7‹‚‰‰é‰‰áý¢‰‰ƒ%c‰‰áý÷ã‰í¢‰‰é퉉aт‰‰‹w‰‰aѤÉ÷ Ÿ‰í¤7‹‚‰‰i󉉁㵢‰‰ƒ%c‰‰ãµ÷ã‰í¢‰‰ëY‰‰c©‚‰‰‹w‰‰c©¤É÷ Ÿ‰í¤7‹‚‰‰ko‰‰å¢‰‰ƒ%c‰‰å÷ã‰í¢‰‰ëY‰‰åႉ‰‹w‰‰åá¤É÷ Ÿ‰í¤7‹‚‰‰ko‰‰eÅ¢‰‰ƒ%c‰‰eÅ÷ã‰í¢‰‰ëY‰‰ç¹‚‰‰‹w‰‰ç¹¤É÷ Ÿ‰í¤7‹‚‰‰m݉‰g¢‰‰ƒ%c‰‰g÷ã‰í¢‰‰í·‰‰gñ‚‰‰‹w‰‰gñ¤É÷w!‰í¢‰‰ ‰)‰‰ùՂ‰‰oˉ‰ùÕ¢‰‰ƒ%c‰‰ùÕ÷ã‰í¢‰‰ï¥‰‰yɂ‰‰‹w‰‰yÉ¢‰‰‹·•‰‰yÉ Ÿ‰í¢‰‰ ‰)‰‰û­‚‰‰oˉ‰û­¢‰‰ƒ%c‰‰û­÷ã‰í¢‰‰á‰‰{‚‰‰‹w‰‰{¢‰‰‹·•‰‰{ Ÿ‰í¢‰‰ ‰)‰‰{傉‰a'‰‰{墉‰ƒ%c‰‰{å÷ã‰í¢‰‰ã‰‰ýق‰‰ ‰q‰‰ýÙ¢‰‰‹·•‰‰ýÙ Ÿ‰í¢‰‰ ‹‰‰}½‚‰‰c•‰‰}½¢‰‰ƒ%c‰‰}½÷ã‰í¢‰‰c{‰‰ÿ‘‚‰‰ ‰q‰‰ÿ‘¢‰‰‹·•‰‰ÿ‘ Ÿ‰í¢‰‰ ‹‰‰ÿõ‚‰‰e‰‰ÿõ¢‰‰ƒ%c‰‰ÿõ÷ã‰í¢‰‰c{‰‰邉‰ ‰q‰‰颉‰‹·•‰‰é Ÿ‰í¢‰‰ ‹‰‰ñ͂‰‰e‰‰ñÍ¢‰‰ƒ%c‰‰ñÍ÷ã‰í¢‰‰c{‰‰q¡‚‰‰ ‰q‰‰q¡¢‰‰‹·•‰‰q¡ Ÿ‰í¢‰‰ ‹‰‰ó…‚‰‰ç‰‰ó…¢‰‰ƒ%c‰‰ó…÷ã‰í¢‰‰ei‰‰óù‚‰‰ ‰q‰‰óù¢‰‰‹·•‰‰óù Ÿ‰í¢‰‰ ‹‰‰s݂‰‰ù퉉sÝ¢‰‰ƒ%c‰‰sÝ÷ã‰í¢‰‰glj‰õ±‚‰‰ ‰q‰‰õ±¢‰‰‹·•‰‰õ± Ÿ‰í¢‰‰ ‹‰‰u•‚‰‰ù퉉u•¢‰‰ƒ%c‰‰u•÷ã‰í¢‰‰y3‰‰÷‰‚‰‰ ‰q‰‰÷‰¢‰‰‹·•‰‰÷‰ Ÿ‰í¢‰‰ ‹‰‰÷킉‰ûY‰‰÷í¢‰‰ƒ%c‰‰÷í÷ã‰í¢‰‰{¡‰‰wÁ‚‰‰ ‰q‰‰wÁ¢‰‰‹·•‰‰wÁ Ÿ‰í¢‰‰ ‹‰‰‰¥‚‰‰ý·‰‰‰¥¢‰‰ƒ%c‰‰‰¥÷ã‰í¢‰‰{¡‰‰ ™‚‰‰ ‰q‰‰ ™¢‰‰‹·•‰‰ ™ Ÿ‰í¢‰‰ ‹‰‰ ý‚‰‰ý·‰‰ ý¢‰‰ƒ%c‰‰ ý÷ã‰í¢‰‰{¡‰‰‹Ñ‚‰‰ ‰q‰‰‹Ñ¢‰‰‹·•‰‰‹Ñw¡‰í¢‰‰ ‹‰‰ µ‚‰‰ÿ¥‰‰ µ¢‰‰ƒ%c‰‰ µ÷ã‰í¢‰‰}Ÿ‰‰©‚‰‰ ‰q‰‰©¢‰‰‹µ§‰‰© Ÿ‰í¢‰‰ ‹‰‰ ‚‰‰ñ‰‰ ¢‰‰ƒ%c‰‰ ÷ã‰í¢‰‰ ‰‰ ႉ‰ ‰q‰‰ ᢉ‰‹µ§‰‰ á Ÿ‰í¢‰‰ ‹‰‰Å‚‰‰ñ‰‰Å¢‰‰ƒ%c‰‰Å÷ã‰í¢‰‰ñù‰‰¹‚‰‰ ‰q‰‰¹¢‰‰‹µ§‰‰¹ Ÿ‰í¢‰‰ ‹‰‰‚‰‰ó‰‰¢‰‰ƒ%c‰‰÷ã‰í¢‰‰óU‰‰ñ‚‰‰ ‰q‰‰ñ¢‰‰‹µ§‰‰ñ Ÿ‰í¢‰‰ ‹‰‰Ղ‰‰s{‰‰Õ¢‰‰ƒ%c‰‰Õ÷ã‰í¢‰‰õC‰‰ƒÉ‚‰‰ ‹ï‰‰ƒÉ¢‰‰‹µ§‰‰ƒÉ Ÿ‰í¢‰‰ õ‰‰­‚‰‰ui‰‰­¢‰‰ƒ%c‰‰­÷ã‰í¢‰‰õC‰‰…‚‰‰ ‹ï‰‰…¢‰‰‹µ§‰‰… Ÿ‰í¢‰‰ õ‰‰…傉‰ui‰‰…墉‰ƒ%c‰‰…å÷ã‰í¢‰‰õC‰‰ق‰‰ ‹ï‰‰Ù¢‰‰‹µ§‰‰Ù Ÿ‰í¢‰‰ õ‰‰‡½‚‰‰wlj‰‡½¢‰‰ƒ%c‰‰‡½÷ã‰í¢‰‰÷±‰‰‘‚‰‰ ‹ï‰‰‘¢‰‰‹µ§‰‰‘ Ÿ‰í¢‰‰ õ‰‰õ‚‰‰wlj‰õ¢‰‰ƒ%c‰‰õ÷ã‰í¢‰‰‰-‰‰™é‚‰‰ ‹ï‰‰™é¢‰‰‹³I‰‰™é Ÿ‰í¢‰‰ õ‰‰͂‰‰ 3‰‰Í¢‰‰ƒ%c‰‰Í÷ã‰í¢‰‰‹›‰‰›¡‚‰‰ ‹ï‰‰›¡¢‰‰‹³I‰‰›¡ Ÿ‰í¢‰‰ õ‰‰…‚‰‰ ¡‰‰…¢‰‰ƒ%c‰‰…÷ã‰í¢‰‰‰‰‰ù‚‰‰ ‹ï‰‰ù¢‰‰‹³I‰‰ù Ÿ‰í¢‰‰ õ‰‰Ý‚‰‰ Ÿ‰‰Ý¢‰‰ƒ%c‰‰Ý÷ã‰í¢‰‰‰‰‰±‚‰‰ ‹ï‰‰±¢‰‰‹³I‰‰± Ÿ‰í¢‰‰ õ‰‰Ÿ•‚‰‰ Ÿ‰‰Ÿ•¢‰‰ƒ%c‰‰Ÿ•÷ã‰í¢‰‰‰‰‰‰‚‰‰ ‹ï‰‰‰¢‰‰‹³I‰‰‰ Ÿ‰í¢‰‰ a‰‰킉‰ ‰‰í¢‰‰ƒ%c‰‰í÷ã‰í¢‰‰ e‰‰‘Á‚‰‰ [‰‰‘Á¢‰‰‹³I‰‰‘Á Ÿ‰í¢‰‰ a‰‰¥‚‰‰ ‰‰¥¢‰‰ƒ%c‰‰¥÷ã‰í¢‰‰Ӊ‰“™‚‰‰ [‰‰“™¢‰‰‹³I‰‰“™ Ÿ‰í¢‰‰ a‰‰“ý‚‰‰ù‰‰“ý¢‰‰ƒ%c‰‰“ý÷ã‰í¢‰‰O‰‰т‰‰ [‰‰Ñ¢‰‰‹±Ý‰‰Ñ Ÿ‰í¢‰‰ a‰‰•µ‚‰‰ƒU‰‰•µ¢‰‰ƒ%c‰‰•µ÷ã‰í¢‰‰=‰‰©‚‰‰ [‰‰©¢‰‰‹±Ý‰‰© Ÿ‰í¢‰‰ a‰‰—‚‰‰…C‰‰—¢‰‰ƒ%c‰‰—÷ã‰í¢‰‰=‰‰—ႉ‰ [‰‰—ᢉ‰‹±Ý‰‰—á Ÿ‰í¢‰‰ a‰‰ł‰‰‡±‰‰Å¢‰‰ƒ%c‰‰Å÷ã‰í¢‰‰«‰‰©¹‚‰‰ [‰‰©¹¢‰‰‹±Ý‰‰©¹ Ÿ‰í¢‰‰ a‰‰)‚‰‰‡±‰‰)¢‰‰ƒ%c‰‰)÷ã‰í¢‰‰«‰‰)ñ‚‰‰ [‰‰)ñ¢‰‰‹±Ý‰‰)ñ Ÿ‰í¢‰‰ ߉‰«Õ‚‰‰™-‰‰«Õ¢‰‰ƒ%c‰‰«Õ÷ã‰í¢‰‰‰‰+ɂ‰‰ É‰‰+É¢‰‰‹±Ý‰‰+É Ÿ‰í¢‰‰ ߉‰­­‚‰‰™-‰‰­­¢‰‰ƒ%c‰‰­­÷ã‰í¢‰‰™õ‰‰-‚‰‰ É‰‰-¢‰‰‹¿o‰‰- Ÿ‰í¢‰‰ ߉‰-傉‰››‰‰-墉‰ƒ%c‰‰-å÷ã‰í¢‰‰›ã‰‰¯Ù‚‰‰ É‰‰¯Ù¢‰‰‹¿o‰‰¯Ù Ÿ‰í¢‰‰ ߉‰/½‚‰‰‰‰‰/½¢‰‰ƒ%c‰‰/½÷ã‰í¢‰‰›ã‰‰¡‘‚‰‰ É‰‰¡‘¢‰‰‹¿o‰‰¡‘ Ÿ‰í¢‰‰ ߉‰¡õ‚‰‰e‰‰¡õ¢‰‰ƒ%c‰‰¡õ÷ã‰í¢‰‰_‰‰!邉‰ É‰‰!颉‰‹¿o‰‰!é Ÿ‰í¢‰‰ ͉‰£Í‚‰‰e‰‰£Í¢‰‰ƒ%c‰‰£Í÷ã‰í¢‰‰_‰‰#¡‚‰‰ §‰‰#¡¢‰‰‹¿o‰‰#¡w¡‰í¢‰‰ ͉‰¥…‚‰‰Ӊ‰¥…¢‰‰ƒ%c‰‰¥…÷ã‰í¢‰‰ŸÍ‰‰¥ù‚‰‰ §‰‰¥ù¢‰‰‹½q‰‰¥ù Ÿ‰í¢‰‰ ͉‰%݂‰‰Ӊ‰%Ý¢‰‰ƒ%c‰‰%Ý÷ã‰í¢‰‰‘9‰‰§±‚‰‰ §‰‰§±¢‰‰‹½q‰‰§± Ÿ‰í¢‰‰ ͉‰'•‚‰‰O‰‰'•¢‰‰ƒ%c‰‰'•÷ã‰í¢‰‰“‰‰¹‰‚‰‰ §‰‰¹‰¢‰‰‹½q‰‰¹‰ Ÿ‰í¢‰‰ ͉‰¹í‚‰‰=‰‰¹í¢‰‰ƒ%c‰‰¹í÷ã‰í¢‰‰•…‰‰9Á‚‰‰ ƒ‰‰9Á¢‰‰‹½q‰‰9Á Ÿ‰í¢‰‰ 9‰‰»¥‚‰‰«‰‰»¥¢‰‰ƒ%c‰‰»¥÷ã‰í¢‰‰•…‰‰;™‚‰‰ ƒ‰‰;™¢‰‰‹½q‰‰;™w!‰í¢‰‰ 9‰‰;ý‚‰‰«‰‰;ý¢‰‰ƒ%c‰‰;ý÷ã‰í¢‰‰•…‰‰½Ñ‚‰‰ ƒ‰‰½Ñ¢‰‰‹;…‰‰½Ñ Ÿ‰í¢‰‰ 9‰‰=µ‚‰‰‰‰=µ¢‰‰ƒ%c‰‰=µ÷ã‰í¢‰‰q‰‰¿©‚‰‰ ƒ‰‰¿©¢‰‰‹;…‰‰¿© Ÿ‰í¢‰‰ 9‰‰?‚‰‰‰‰?¢‰‰ƒ%c‰‰?÷ã‰í¢‰‰?ႉ‰ …‰‰?ᢉ‰‹;…‰‰?á Ÿ‰í¢‰‰ —‰‰±Å‚‰‰©õ‰‰±Å¢‰‰ƒ%c‰‰±Å÷ã‰í¢‰‰)݉‰1¹‚‰‰ …‰‰1¹¢‰‰‹;…‰‰1¹ Ÿ‰í¢‰‰ —‰‰³‚‰‰«ã‰‰³¢‰‰ƒ%c‰‰³÷ã‰í¢‰‰+I‰‰³ñ‚‰‰ …‰‰³ñ¢‰‰‹9‰‰³ñ Ÿ‰í¢‰‰ —‰‰3Ղ‰‰­_‰‰3Õ¢‰‰ƒ%c‰‰3Õ÷ã‰í¢‰‰+I‰‰µÉ‚‰‰ …‰‰µÉ¢‰‰‹9‰‰µÉ Ÿ‰í¢‰‰ ‰‰5­‚‰‰¯Í‰‰5­¢‰‰ƒ%c‰‰5­÷ã‰í¢‰‰-§‰‰·‚‰‰ }‰‰·¢‰‰‹9‰‰· Ÿ‰í¢‰‰ ‰‰·å‚‰‰¯Í‰‰·å¢‰‰ƒ%c‰‰·å÷ã‰í¢‰‰-§‰‰7ق‰‰ }‰‰7Ù¢‰‰‹9‰‰7Ùw!‰í¢‰‰ ‰‰ɽ‚‰‰¡9‰‰ɽ¢‰‰ƒ%c‰‰ɽ÷ã‰í¢‰‰/‰‰I‘‚‰‰ }‰‰I‘¢‰‰‹'»‰‰I‘ Ÿ‰í¢‰‰ ‰‰Iõ‚‰‰¡9‰‰Iõ¢‰‰ƒ%c‰‰Iõ÷ã‰í¢‰‰!‰‰Ë邉‰ k‰‰Ë颉‰‹'»‰‰Ëé Ÿ‰í¢‰‰ ™q‰‰K͂‰‰£‰‰KÍ¢‰‰ƒ%c‰‰KÍ÷ã‰í¢‰‰£ÿ‰‰Í¡‚‰‰ k‰‰Í¡¢‰‰‹%͉‰Í¡ Ÿ‰í¢‰‰ ™q‰‰M…‚‰‰¥…‰‰M…¢‰‰ƒ%c‰‰M…÷ã‰í¢‰‰£ÿ‰‰Mù‚‰‰ k‰‰Mù¢‰‰‹%͉‰Mù Ÿ‰í¢‰‰ ›ï‰‰Ï݂‰‰%q‰‰ÏÝ¢‰‰ƒ%c‰‰ÏÝ÷ã‰í¢‰‰¥k‰‰O±‚‰‰ ى‰O±¢‰‰‹%͉‰O± Ÿ‰í¢‰‰ ›ï‰‰Á•‚‰‰%q‰‰Á•¢‰‰ƒ%c‰‰Á•÷ã‰í¢‰‰¥k‰‰A‰‚‰‰ ى‰A‰¢‰‰‹%͉‰A‰w!‰í¢‰‰ [‰‰A킉‰'Aí¢‰‰ƒ%c‰‰Aí÷ã‰í¢‰‰§Ù‰‰ÃÁ‚‰‰ 5‰‰ÃÁ¢‰‰‹#_‰‰ÃÁ Ÿ‰í¢‰‰ [‰‰C¥‚‰‰'C¥¢‰‰ƒ%c‰‰C¥÷ã‰í¢‰‰¹·‰‰ř‚‰‰ 5‰‰ř¢‰‰‹#_‰‰ř Ÿ‰í¢‰‰ [‰‰Åý‚‰‰9݉‰Åý¢‰‰ƒ%c‰‰Åý÷ã‰í¢‰‰»#‰‰Eт‰‰ £‰‰EÑ¢‰‰‹!㉉EÑ Ÿ‰í¢‰‰ ŸÉ‰‰ǵ‚‰‰;I‰‰ǵ¢‰‰ƒ%c‰‰ǵ÷ã‰í¢‰‰½‘‰‰G©‚‰‰ £‰‰G©¢‰‰‹!㉉G© Ÿ‰í¢‰‰ ŸÉ‰‰ٍ‚‰‰=§‰‰ٍ¢‰‰ƒ%c‰‰ٍ÷ã‰í¢‰‰½‘‰‰Ùႉ‰ £‰‰Ùᢉ‰‹!㉉Ùáw!‰í¢‰‰ ‘§‰‰Ył‰‰?‰‰YÅ¢‰‰ƒ%c‰‰YÅ÷ã‰í¢‰‰¿ ‰‰Û¹‚‰‰ ‘‰‰Û¹¢‰‰‹/u‰‰Û¹ Ÿ‰í¢‰‰ ‘§‰‰[‚‰‰?‰‰[¢‰‰ƒ%c‰‰[÷ã‰í¢‰‰¿ ‰‰[ñ‚‰‰ ‘‰‰[ñ¢‰‰‹/u‰‰[ñ Ÿ‰í¢‰‰ ‘§‰‰ÝՂ‰‰?‰‰ÝÕ¢‰‰ƒ%c‰‰ÝÕ÷ã‰í¢‰‰?{‰‰]ɂ‰‰  ‰‰]É¢‰‰‹¯‰‰]É Ÿ‰í¢‰‰ “‰‰ß­‚‰‰1‰‰ß­¢‰‰ƒ%c‰‰ß­÷ã‰í¢‰‰1鉉_‚‰‰  ‰‰_¢‰‰‹¯‰‰_ Ÿ‰í¢‰‰ “‰‰_傉‰³ÿ‰‰_墉‰ƒ%c‰‰_å÷ã‰í¢‰‰3E‰‰Ñق‰‰ “û‰‰ÑÙ¢‰‰‹­«‰‰ÑÙ Ÿ‰í¢‰‰ •‰‰Q½‚‰‰µk‰‰Q½¢‰‰ƒ%c‰‰Q½÷ã‰í¢‰‰3E‰‰ӑ‚‰‰ “û‰‰ӑ¢‰‰‹­«‰‰ӑ Ÿ‰í¢‰‰ }‰‰Óõ‚‰‰·Ù‰‰Óõ¢‰‰ƒ%c‰‰Óõ÷ã‰í¢‰‰5³‰‰S邉‰ •W‰‰S颉‰‹­«‰‰Séw!‰í¢‰‰ k‰‰Õ͂‰‰·Ù‰‰ÕÍ¢‰‰ƒ%c‰‰ÕÍ÷ã‰í¢‰‰5³‰‰U¡‚‰‰ —E‰‰U¡¢‰‰‹«=‰‰U¡w!‰í¢‰‰ k‰‰ׅ‚‰‰É·‰‰ׅ¢‰‰ƒ%c‰‰ׅ÷ã‰í¢‰‰7¡‰‰×ù‚‰‰ —E‰‰×ù¢‰‰‹©Á‰‰×ù Ÿ‰í¢‰‰ k‰‰W݂‰‰É·‰‰WÝ¢‰‰ƒ%c‰‰WÝ÷ã‰í¢‰‰I‰‰鱂‰‰ ©³‰‰é±¢‰‰‹©Á‰‰é± Ÿ‰í¢‰‰ )ى‰i•‚‰‰Ë#‰‰i•¢‰‰ƒ%c‰‰i•÷ã‰í¢‰‰K‹‰‰뉂‰‰ «/‰‰뉢‰‰‹—Ó‰‰ë‰ Ÿ‰í¢‰‰ +5‰‰ë킉‰͑‰‰ëí¢‰‰ƒ%c‰‰ëí÷ã‰í¢‰‰K‹‰‰kÁ‚‰‰ «/‰‰kÁ¢‰‰‹—Ó‰‰kÁw!‰í¢‰‰ -£‰‰í¥‚‰‰Ï ‰‰í¥¢‰‰ƒ%c‰‰í¥÷ã‰í¢‰‰Íg‰‰m™‚‰‰ ­‰‰m™¢‰‰‹•e‰‰m™w!‰í¢‰‰ /‘‰‰mý‚‰‰Ï ‰‰mý¢‰‰ƒ%c‰‰mý÷ã‰í¢‰‰Íg‰‰ïт‰‰ ¯‹‰‰ïÑ¢‰‰‹‰‰‰ïÑ Ÿ‰í¢‰‰ ! ‰‰oµ‚‰‰O{‰‰oµ¢‰‰ƒ%c‰‰oµ÷ã‰í¢‰‰ÏU‰‰ᩂ‰‰ /g‰‰á©¢‰‰‹‰‰‰á© Ÿ‰í¢‰‰ ! ‰‰a‚‰‰O{‰‰a¢‰‰ƒ%c‰‰a÷ã‰í¢‰‰Áɉaႉ‰ !Չ‰aᢉ‰‹‰‰aá Ÿ‰í¢‰‰ £û‰‰ãł‰‰A鉉ãÅ¢‰‰ƒ%c‰‰ãÅ÷ã‰í¢‰‰Ã?‰‰c¹‚‰‰ #A‰‰c¹¢‰‰‹-‰‰c¹ Ÿ‰í¢‰‰ ¥W‰‰坂‰‰CE‰‰坢‰‰ƒ%c‰‰å÷ã‰í¢‰‰Å­‰‰åñ‚‰‰ %?‰‰åñ¢‰‰‹±‰‰åñ Ÿ‰í¢‰‰ §E‰‰eՂ‰‰E³‰‰eÕ¢‰‰ƒ%c‰‰eÕ÷ã‰í¢‰‰Å­‰‰çɂ‰‰ %?‰‰çÉ¢‰‰‹±‰‰çÉw!‰í¢‰‰ ¹³‰‰g­‚‰‰G¡‰‰g­¢‰‰ƒ%c‰‰g­÷ã‰í¢‰‰Ǜ‰‰ù‚‰‰ '­‰‰ù¢‰‰‹C‰‰ùw!‰í¢‰‰ »/‰‰ù傉‰G¡‰‰ù墉‰ƒ%c‰‰ùå÷ã‰í¢‰‰Ǜ‰‰yق‰‰ 9‰‰yÙ¢‰‰‹׉‰yÙ Ÿ‰í¢‰‰ »/‰‰û½‚‰‰G¡‰‰û½¢‰‰ƒ%c‰‰û½÷ã‰í¢‰‰Gw‰‰{‘‚‰‰ ½å‰‰{‘¢‰‰‹‡ ‰‰{‘ Ÿ‰í¢‰‰ ¿‹‰‰{õ‚‰‰Y‰‰{õ¢‰‰ƒ%c‰‰{õ÷ã‰í¢‰‰Y剉ý邉‰ ¿Q‰‰ý颉‰‹…Ÿ‰‰ýé Ÿ‰í¢‰‰ ?g‰‰}͂‰‰[‹‰‰}Í¢‰‰ƒ%c‰‰}Í÷ã‰í¢‰‰[Q‰‰ÿ¡‚‰‰ ±Ï‰‰ÿ¡¢‰‰‹ƒ!‰‰ÿ¡ Ÿ‰í¢‰‰ 1Չ‰…‚‰‰Ýg‰‰…¢‰‰ƒ%c‰‰…÷ã‰í¢‰‰[Q‰‰ù‚‰‰ ±Ï‰‰ù¢‰‰‹ƒ!‰‰ùu3‰í¢‰‰ 5?‰‰ñ݂‰‰ßU‰‰ñÝ¢‰‰ƒ%c‰‰ñÝ÷ã‰í¢‰‰]O‰‰q±‚‰‰ µ)‰‰q±¢‰‰‹Ç‰‰q±w!‰í¢‰‰ I‰‰󕂉‰ßU‰‰󕢉‰ƒ%c‰‰ó•÷ã‰í¢‰‰]O‰‰s‰‚‰‰ 7s‰‰s‰¢‰‰‹i‰‰s‰u3‰í¢‰‰ Íc‰‰s킉‰Ñɉsí¢‰‰ƒ%c‰‰sí÷ã‰í¢‰‰_½‰‰õÁ‚‰‰ K]‰‰õÁ¢‰‰‹ ‰‰õÁ Ÿ‰í¢‰‰ Íc‰‰u¥‚‰‰Ñɉu¥¢‰‰ƒ%c‰‰u¥÷ã‰í¢‰‰Q)‰‰÷™‚‰‰ O¹‰‰÷™¢‰‰ u¥‰‰÷™ Ÿ‰í¢‰‰ Áω‰÷ý‚‰‰Ó?‰‰÷ý¢‰‰ƒ%c‰‰÷ý÷ã‰í¢‰‰S‡‰‰wт‰‰ Cƒ‰‰wÑ¢‰‰ qY‰‰wÑ Ÿ‰í¢‰‰ Å©‰‰ƒ‰µ‚‰‰Õ­‰‰ƒ‰µ¢‰‰ƒ%c‰‰ƒ‰µ÷ã‰í¢‰‰S‡‰‰ƒ ©‚‰‰ Cƒ‰‰ƒ ©¢‰‰ qY‰‰ƒ ©uµ‰í¢‰‰ Gs‰‰ƒ‹‚‰‰כ‰‰ƒ‹¢‰‰ƒ%c‰‰ƒ‹÷ã‰í¢‰‰Õõ‰‰ƒ‹á‚‰‰ Çm‰‰ƒ‹á¢‰‰ }‰‰ƒ‹ásÇ‰í¢‰‰ ]K‰‰ƒ ł‰‰Ww‰‰ƒ Å¢‰‰ƒ%c‰‰ƒ Å÷ã‰í¢‰‰×a‰‰ƒ¹‚‰‰ Ý%‰‰ƒ¹¢‰‰ ù'‰‰ƒ¹uµ‰í¢‰‰ Sƒ‰‰ƒ ‚‰‰Ww‰‰ƒ ¢‰‰ƒ%c‰‰ƒ ÷ã‰í¢‰‰×a‰‰ƒ ñ‚‰‰ Qý‰‰ƒ ñ¢‰‰ å]‰‰ƒ ñ Ÿ‰í¢‰‰ Sƒ‰‰ƒÕ‚‰‰Ww‰‰ƒÕ¢‰‰ƒ%c‰‰ƒÕ÷ã‰í¢‰‰é߉‰ƒɂ‰‰ Wµ‰‰ƒÉ¢‰‰ m©‰‰ƒÉ Ÿ‰í¢‰‰ éۉ‰ƒ­‚‰‰i剉ƒ­¢‰‰ƒ%c‰‰ƒ­÷ã‰í¢‰‰ëK‰‰ƒ‚‰‰ ïy‰‰ƒ¢‰‰ Wщ‰ƒ Ÿ‰í¢‰‰ ቉ƒ傉‰kQ‰‰ƒ墉‰ƒ%c‰‰ƒå÷ã‰í¢‰‰ëK‰‰ƒƒÙ‚‰‰ ïy‰‰ƒƒÙ¢‰‰ Wщ‰ƒƒÙý‰í¢‰‰ y!‰‰ƒ½‚‰‰mO‰‰ƒ½¢‰‰ƒ%c‰‰ƒ½÷ã‰í¢‰‰í9‰‰ƒ…‘‚‰‰ ù‰‰ƒ…‘¢‰‰ ß¿‰‰ƒ…‘ý‰í¢‰‰ õ1‰‰ƒ…õ‚‰‰o½‰‰ƒ…õ¢‰‰ƒ%c‰‰ƒ…õ÷ã‰í¢‰‰‰ƒ邉‰ s+‰‰ƒ颉‰ E‰‰ƒéçY‰í¢‰‰‡u‰‰ƒ‡Í‚‰‰o½‰‰ƒ‡Í¢‰‰ƒ%c‰‰ƒ‡Í÷ã‰í¢‰‰‰ƒ¡‚‰‰o‰‰ƒ¡¢‰‰ ³G‰‰ƒ¡ Ÿ‰í¢‰‰‡u‰‰ƒ™…‚‰‰o½‰‰ƒ™…¢‰‰ƒ%c‰‰ƒ™…÷ã‰í¢‰‰á‰‰ƒ™ù‚‰‰-ý‰‰ƒ™ù¢‰‰ ­¹‰‰ƒ™ù Ÿ‰í¢‰‰/ƒ‰‰ƒ݂‰‰a)‰‰ƒÝ¢‰‰ƒ%c‰‰ƒÝ÷ã‰í¢‰‰añ‰‰ƒ›±‚‰‰-ý‰‰ƒ›±¢‰‰ ­¹‰‰ƒ›± Ÿ‰í¢‰‰/ƒ‰‰ƒ•‚‰‰c‡‰‰ƒ•¢‰‰ƒ%c‰‰ƒ•÷ã‰í¢‰‰cƒ‰‚‰‰-ý‰‰ƒ‰¢‰‰ ­¹‰‰ƒ‰ Ÿ‰í¢‰‰/ƒ‰‰ƒí‚‰‰åõ‰‰ƒí¢‰‰ƒ%c‰‰ƒí÷ã‰í¢‰‰cƒÁ‚‰‰-ý‰‰ƒÁ¢‰‰ ­¹‰‰ƒÁ Ÿ‰í¢‰‰/ƒ‰‰ƒŸ¥‚‰‰ça‰‰ƒŸ¥¤7‹÷ã‰í¤É÷‚‰‰-ý‰‰ƒ™¢‰‰ ­¹‰‰ƒ™ Ÿ‰í¢‰‰/ƒ‰‰ƒý‚‰‰ça‰‰ƒý¤7‹÷ã‰í¤É÷‚‰‰-ý‰‰ƒ‘Ñ¢‰‰ ­¹‰‰ƒ‘Ñ Ÿ‰í¢‰‰/ƒ‰‰ƒµ‚‰‰ça‰‰ƒµ¤7‹÷ã‰í¤Ëc‚‰‰-ý‰‰ƒ“©¢‰‰ ­¹‰‰ƒ“© Ÿ‰í¢‰‰/ƒ‰‰ƒ‚‰‰ù߉‰ƒ¤5÷ã‰í¤Íт‰‰-ý‰‰ƒᢉ‰ ­¹‰‰ƒá Ÿ‰í¢‰‰/ƒ‰‰ƒ•Å‚‰‰ûK‰‰ƒ•Ť3¡÷ã‰í¤Ïς‰‰-ý‰‰ƒ¹¢‰‰ ­¹‰‰ƒ¹ Ÿ‰í¢‰‰/ƒ‰‰ƒ—‚‰‰ý9‰‰ƒ—¤1³÷ã‰í¤Ïς‰‰-ý‰‰ƒ—ñ¢‰‰ ­¹‰‰ƒ—ñ Ÿ‰í¢‰‰/ƒ‰‰ƒՂ‰‰ÿ—‰‰ƒÕ¤?E÷ã‰í¤Á;‚‰‰-ý‰‰ƒ©É¢‰‰ ­¹‰‰ƒ©É Ÿ‰í¢‰‰/ƒ‰‰ƒ)­‚‰‰ñ‰‰ƒ)­¤=é÷ã‰í¤Ã©‚‰‰-ý‰‰ƒ«¢‰‰ ­¹‰‰ƒ« Ÿ‰í¢‰‰/ƒ‰‰ƒ«å‚‰‰ñ‰‰ƒ«å¤=é÷ã‰í¤Ã©‚‰‰-ý‰‰ƒ+Ù¢‰‰ ­¹‰‰ƒ+Ù Ÿ‰í¢‰‰/ƒ‰‰ƒ­½‚‰‰ñ‰‰ƒ­½¤=é÷ã‰í¤Å‚‰‰-ý‰‰ƒ-‘¢‰‰ ­¹‰‰ƒ-‘ Ÿ‰í¢‰‰/ƒ‰‰ƒ-õ‚‰‰qñ‰‰ƒ-õ¤;{÷ã‰í¤Es‚‰‰-ý‰‰ƒ¯é¢‰‰ ­¹‰‰ƒ¯é Ÿ‰í¢‰‰/ƒ‰‰ƒ/͂‰‰sƒ/ͤ» ÷ã‰í¤Es‚‰‰-ý‰‰ƒ¡¡¢‰‰ ­¹‰‰ƒ¡¡ Ÿ‰í¢‰‰/ƒ‰‰ƒ!…‚‰‰u[‰‰ƒ!…¤¹‘÷ã‰í¤Gႉ‰-ý‰‰ƒ!ù¢‰‰ ­¹‰‰ƒ!ù Ÿ‰í¢‰‰/ƒ‰‰ƒ£Ý‚‰‰wɉ‰ƒ£Ý¤§#÷ã‰í¤Y]‚‰‰-ý‰‰ƒ#±¢‰‰ ­¹‰‰ƒ#± Ÿ‰í¢‰‰/ƒ‰‰ƒ¥•‚‰‰wɉ‰ƒ¥•¤§#÷ã‰í¤Y]‚‰‰-ý‰‰ƒ%‰¢‰‰ ­¹‰‰ƒ%‰ Ÿ‰í¢‰‰/ƒ‰‰ƒ%킉‰wɉ‰ƒ%í¤§#÷ã‰í¤[˂‰‰-ý‰‰ƒ§Á¢‰‰ ­¹‰‰ƒ§Á Ÿ‰í¢‰‰/ƒ‰‰ƒ'¥‚‰‰ƒ %‰‰ƒ'¥¤¥·÷ã‰í¤]¹‚‰‰-ý‰‰ƒ¹™¢‰‰ ­¹‰‰ƒ¹™ Ÿ‰í¢‰‰/ƒ‰‰ƒ¹ý‚‰‰ƒ ‰‰ƒ¹ý¤£Ù÷ã‰í¤_‚‰‰-ý‰‰ƒ9Ñ¢‰‰ ­¹‰‰ƒ9Ñ Ÿ‰í¢‰‰/ƒ‰‰ƒ»µ‚‰‰ƒ ‰‰ƒ»µ¤¡k÷ã‰í¤_‚‰‰-ý‰‰ƒ;©¢‰‰ ­¹‰‰ƒ;© Ÿ‰í¢‰‰/ƒ‰‰ƒ½‚‰‰ƒ}‰‰ƒ½¤¯ÿ÷ã‰í¤Qƒ‚‰‰-ý‰‰ƒ½á¢‰‰ ­¹‰‰ƒ½áp‰dp‰Ô ©‰‰±%‰‰±%`
9d0c085dd06d44ef7956465c854333f503837ccd
27b4436fdbab36fd3d7963df408a2d259e8fd909
/R/internal.R
892096df7a8cbba0843c9637ed6b557043cb5ba6
[]
no_license
dklinges9/mcera5
962833c6b208d892ddb7e1ef1f9e80b9b7fa6140
a37074a74f78439118f90c2ee76ab5bafaf7f2ed
refs/heads/master
2023-07-23T11:09:26.529124
2023-07-10T23:13:41
2023-07-10T23:13:41
260,175,954
10
4
null
2023-09-05T13:51:41
2020-04-30T10:01:45
R
UTF-8
R
false
false
10,960
r
internal.R
#' function to calculate humidity from dew point temperature #' @param tdew dewpoint temperature (°C) #' @param tc air temperature (°C) #' @param p pressure (Pa) #' @return specific humidity (Kg/Kg) #' @noRd humfromdew <- function(tdew, tc, p) { pk <- p/1000 ea <- 0.6108 * exp(17.27 * tdew / (tdew + 237.3)) e0 <- 0.6108 * exp(17.27 * tc/(tc + 237.3)) s <- 0.622 * e0/pk hr <- (ea/e0) * 100 hs <- (hr/100) * s hs } #' function to apply a correction to ERA5 temperatures based on proximity to the #' coast #' @param tc air temperature (°C) #' @param landprop single numeric value indicating proportion of grid cell that #' is land (0 = all sea, 1 = all land) #' @param cor_fac correction factor to be applied (default to 1.285 for UK) #' @return air temperature (°C) #' @noRd coastal_correct <- function(tc, landprop, cor_fac = 1.285) { td <- matrix(tc, ncol=24, byrow=T) tmean <- rep(apply(td, 1, mean), each=24) m <- (1 - landprop) * cor_fac + 1 tdif <- (tc - tmean) * m tco <- tmean + tdif return(tco) } #' function to correct radiation for being an average over the hour rather than #' on the hour #' @param rad vector of radiation #' @param tme vector of times #' @param long longitude #' @param lat latitude #' @return vector of radiation #' @noRd rad_calc <- function(rad, tme, long, lat) { bound <- function(x, mn = 0, mx = 1) { x[x > mx] <- mx x[x < mn] <- mn x } tme1h <- as.POSIXlt(tme, tz = "UTC", origin = "1970-01-01 00:00") # interpolate times to 10 mins tme10min <- as.POSIXlt(seq(from = as.POSIXlt(tme1h[1]-3600), to = as.POSIXlt(tme1h[length(tme1h)]), by = "10 min"), origin = "1970-01-01 00:00", tz = "UTC") csr10min <- microclima::clearskyrad(tme10min, lat = lat , long = long, merid = 0, dst = 0) # mean csr per hour (every 6 values) csr1h <- tibble::tibble(csr10min) %>% dplyr::group_by(group = gl(length(csr10min)/6, 6)) %>% dplyr::summarise(csr1h = mean(csr10min)) %>% .$csr1h # watch out for NAs here - need modding? od <- bound(rad / csr1h) #if (is.na(od)[1]) od[1] <- mean(od, na.rm = T) #if (is.na(od)[length(od)]) od[length(od)] <- mean(od, na.rm = T) od[is.na(od)] <- 0 # hourly time stamps on the half hour tme1h_2 <- as.POSIXlt(tme1h - 1800, tz = "UTC", origin = "1970-01-01 00:00") # length to interpolate to n <- length(tme1h_2) * 2 # work out od values for every half hour od1h <- stats::spline(tme1h_2, od, n = n)$y # select just the ones on the hour od1h <- od1h[seq(2,length(od1h),2)] # csr on the hour csr1h_2 <- microclima::clearskyrad(tme1h, lat = lat, long = long, merid = 0, dst = 0) # calculate corrected rad on the hour rad_out <- csr1h_2 * od1h rad_out[is.na(rad_out)] <- 0 return(rad_out) } #' function to calculate the position of the 4 nearest neighbours to a point, #' the xy distance and inverse weight of each. #' @param long longitude #' @param lat latitude #' @return data frame of longitude, latitude, xy distance and inverse weight of #' each neighbouring point #' @noRd focal_dist <- function(long, lat) { # round to nearest 0.25 x_r <- plyr::round_any(long, .25) y_r <- plyr::round_any(lat, .25) # work out locations of the four neighbour points in the ERA5 dataset if(long >= x_r) { focal_x <- c(x_r, x_r, x_r + 0.25, x_r + 0.25) } else { focal_x <- c(x_r - 0.25, x_r - 0.25, x_r, x_r) } if(lat >= y_r) { focal_y <- c(y_r, y_r + 0.25, y_r, y_r + 0.25) } else { focal_y <- c(y_r - 0.25, y_r, y_r - 0.25, y_r) } # work out weighting based on dist between input & 4 surrounding points x_dist <- abs(long - focal_x) y_dist <- abs(lat - focal_y) xy_dist <- sqrt(x_dist^2 + y_dist^2) focal <- data.frame(x = focal_x, y = focal_y, xy_dist) %>% dplyr::mutate(., inverse_weight = 1/sum(1/(1/sum(xy_dist) * xy_dist)) * 1/(1/sum(xy_dist) * xy_dist)) return(focal) } #' function to process relevant hourly climate data from an ERA5 nc to #' a data frame #' @param nc path to nc file downloaded with `request_era5` #' @param long longitude #' @param lat latitude #' @param start_time start time for data required #' @param end_time end time for data required #' @param dtr_cor logical value indicating whether to apply a diurnal temperature #' range correction to air temperature values. Default = `TRUE`. #' @param dtr_cor_fac numeric value to be used in the diurnal temperature range #' correction. Default = 1. #' @return data frame of hourly climate variables #' @noRd nc_to_df <- function(nc, long, lat, start_time, end_time, dtr_cor = TRUE, dtr_cor_fac = 1) { dat <- tidync::tidync(nc) %>% tidync::hyper_filter(longitude = longitude == long, latitude = latitude == lat) %>% tidync::hyper_tibble() %>% dplyr::mutate(., obs_time = lubridate::ymd_hms("1900:01:01 00:00:00") + (time * 3600), timezone = lubridate::tz(obs_time)) %>% # convert to readable times dplyr::filter(., obs_time >= start_time & obs_time < end_time + 1) %>% dplyr::rename(., pressure = sp) %>% dplyr::mutate(., temperature = t2m - 273.15, # kelvin to celcius lsm = dplyr::case_when( lsm < 0 ~ 0, lsm >= 0 ~ lsm), temperature = dplyr::case_when( dtr_cor == TRUE ~ coastal_correct(temperature, lsm, dtr_cor_fac), dtr_cor == FALSE ~ temperature), humidity = humfromdew(d2m - 273.15, temperature, pressure), windspeed = sqrt(u10^2 + v10^2), windspeed = microclima::windheight(windspeed, 10, 2), winddir = (atan2(u10, v10) * 180/pi + 180)%%360, cloudcover = tcc * 100, netlong = abs(msnlwrf) * 0.0036, downlong = msdwlwrf * 0.0036, uplong = netlong + downlong, emissivity = downlong/uplong, # converted to MJ m-2 hr-1 jd = microclima::julday(lubridate::year(obs_time), lubridate::month(obs_time), lubridate::day(obs_time)), si = microclima::siflat(lubridate::hour(obs_time), lat, long, jd, merid = 0)) %>% dplyr::mutate(., rad_dni = fdir * 0.000001, rad_glbl = ssrd * 0.000001, rad_glbl = rad_calc(rad_glbl, obs_time, long, lat), # fix hourly rad rad_dni = rad_calc(rad_dni, obs_time, long, lat), # fix hourly rad rad_dif = rad_glbl - rad_dni * si) %>% # converted to MJ m-2 hr-1 from J m-2 hr-1 dplyr::mutate(., szenith = 90 - microclima::solalt(lubridate::hour(obs_time), lat, long, jd, merid = 0)) %>% dplyr::select(.,obs_time, temperature, humidity, pressure, windspeed, winddir, emissivity, cloudcover, netlong, uplong, downlong, rad_dni, rad_dif, szenith, timezone) return(dat) } # process relevant precipitation data from an ERA5 nc to data frame #' a function to process relevant precipitation data from an ERA5 nc to #' a data frame #' @param nc path to nc file downloaded with `request_era5` #' @param long longitude #' @param lat latitude #' @param start_time start time for data required #' @param end_time end time for data required #' @return vector of daily precipitation values #' @noRd nc_to_df_precip <- function(nc, long, lat, start_time, end_time) { dat <- tidync::tidync(nc) %>% tidync::hyper_filter(longitude = longitude == long, latitude = latitude == lat) %>% tidync::hyper_tibble() %>% dplyr::mutate(., obs_time = lubridate::ymd_hms("1900:01:01 00:00:00") + (time * 3600), timezone = lubridate::tz(obs_time)) %>% # convert to readable times dplyr::filter(., obs_time >= start_time & obs_time < end_time + 1) %>% dplyr::rename(., precipitation = tp) %>% dplyr::select(., obs_time, precipitation) return(dat) } #' creates a data frame of unique month and year pairs from input #' start/end times #' @param start_time start time #' @param end_time end time #' @return data frame of unique months and years #' @noRd uni_dates <- function(start_time, end_time) { tme <- seq(as.POSIXlt(start_time), as.POSIXlt(end_time), by = 1) mon <- lubridate::month(tme) yea <- lubridate::year(tme) df <- data.frame(mon,yea) %>% dplyr::distinct(.) return(df) } #' Combines a series of netCDFs that all have the same spatial extent and #' set of variables #' @param filenames a list of filenames for netCDFs you wish to combine #' @param combined_name the name of the combined netCDF #' @noRd combine_netcdf <- function(filenames, combined_name) { files <- lapply(filenames, function(x) { ncdf4::nc_open(x) }) # Pull out first file for reference specs nc <- files[[1]] # Create an empty list to populate vars_list <- vector(mode = "list", length = nc$nvars) data_list <- vector(mode = "list", length = nc$nvars) # One variable at a time for (i in 1:length(names(nc$var))) { varname <- names(nc$var)[i] # Get the variable from each of the netCDFs vars_dat <- lapply(files, function(x) { ncdf4::ncvar_get(x, varname) }) # Then bind all of the arrays together using abind, flexibly called via do.call data_list[[i]] <- do.call(abind::abind, list(... = vars_dat, along = 3)) # To populate the time dimension, need to pull out the time values from each # netCDF timevals <- lapply(files, function(x) { x$dim$time$vals }) # Create a netCDF variable vars_list[[i]] <- ncdf4::ncvar_def( name = varname, units = nc$var[varname][[varname]]$units, # Pull dimension names, units, and values from file1 dim = list( # Longitude ncdf4::ncdim_def(nc$dim$longitude$name, nc$dim$longitude$units, nc$dim$longitude$vals), # Latitude ncdf4::ncdim_def(nc$dim$latitude$name, nc$dim$latitude$units, nc$dim$latitude$vals), # Time ncdf4::ncdim_def(nc$dim$time$name, nc$dim$time$units, # Combination of values of all files do.call(c, timevals)) )) } # Create a new file file_combined <- ncdf4::nc_create( # Filename from param combined_name filename = combined_name, # We need to define the variables here vars = vars_list) # And write to it (must write one variable at a time with ncdf4) for (i in 1:length(names(nc$var))) { ncdf4::ncvar_put( nc = file_combined, varid = names(nc$var)[i], vals = data_list[[i]]) } # Finally, close the file ncdf4::nc_close(file_combined) }
84f913d72932cb7c68f4a52a5f108b378f83311e
4846b5b3748b6724d7c379dae7572e9fa90a798d
/man/bindingContextDistanceCapR.Rd
32e76b560b88caa21093e6b8490ca984e8dbb5b3
[]
no_license
vbusa1/nearBynding
d225bcbdb1541b65c3f01604a1affd8ff51b068a
9ccf2b0e7fec87c426cf37fe45077d67abef210a
refs/heads/master
2023-04-07T19:01:47.323219
2021-07-30T17:39:58
2021-07-30T17:39:58
278,680,217
1
0
null
null
null
null
UTF-8
R
false
true
3,491
rd
bindingContextDistanceCapR.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bindingContextDistanceCapR.R \name{bindingContextDistanceCapR} \alias{bindingContextDistanceCapR} \title{bindingContextDistanceCapR} \usage{ bindingContextDistanceCapR( dir_stereogene_output = ".", CapR_prefix = "", protein_file, protein_file_input = NULL, dir_stereogene_output_2 = NULL, CapR_prefix_2 = "", protein_file_2, protein_file_input_2 = NULL, context = "all", range = c(-200, 200) ) } \arguments{ \item{dir_stereogene_output}{Directory of Stereogene output for first protein. Default current directory.} \item{CapR_prefix}{The prefix common to CapR output files of protein_file, if applicable. Equivalent to output_prefix from runStereogeneOnCapR. Default ""} \item{protein_file}{A vector of strings with at least one protein file name to be averaged for calculation of distance. File names must exclude extensions such as ".bedGraph". All files in the list should be experimental/biological replicates. Required.} \item{protein_file_input}{A protein file name of background input to be subtracted from protein_file signal. File name must exclude extension. Only one input file is permitted. Optional.} \item{dir_stereogene_output_2}{Directory of Stereogene output for second protein. Default current directory.} \item{CapR_prefix_2}{The prefix common to CapR output files of protein_file_2, if applicable.Equivalent to output_prefix from r unStereogeneOnCapR. Default ""} \item{protein_file_2}{Similar to protein_file. A second vector of at least one protein file name to be averaged for calculation of distance. File names must exclude extensions such as ".bedGraph". All files in the list should be experimental/biological replicates. Required.} \item{protein_file_input_2}{Similar to protein_file_input. A second protein file name of background input to be subtracted from protein_file_2 signal. File name must exclude extension. Only one input file is permitted. Optional.} \item{context}{The RNA structure context being compared for the two protein file sets. Acceptable contexts include "all", which sums the distance of all six contexts, or any of the contexts individually ("bulge", "hairpin", "stem", "exterior", "multibranch", or "internal"). Default "all"} \item{range}{A vector of two integers denoting the range upstream and downstream of the center of protein binding to consider in the comparison. Ranges that are too small miss the holistic binding context, while large ranges amplify distal noise in the binding data. Cannot exceed wSize/2 from write_config. Default c(-200, 200)} } \value{ Wasserstein distance between the two protein file sets provided for the RNA structure context specified, minus the input binding signal if applicable } \description{ Calculate the Wasserstein distance between two replicates' or two proteins' binding contexts. } \note{ Wasserstein distance calculations are reciprocal, so it does not matter which protein is first or second so long as replicates and input files correspond to one another. } \examples{ ## load example StereoGene output get_outfiles() ## This boring example compares a protein's binding with itself for all ## contexts, therefore the distance is 0 bindingContextDistanceCapR(CapR_prefix = "chr4and5_3UTR", protein_file = "chr4and5_liftOver", CapR_prefix_2 = "chr4and5_3UTR", protein_file_2 = "chr4and5_liftOver") }
3ae1c1d65957cec0ea40ab60337e8d78c379854e
5f90bac8cc2cf136a3478a2290d9abde7dda745a
/R/normalize.GoldenGate.R
423ce1d5ae817c4717f6c4d944070d4ce9644460
[]
no_license
cran/LumiWCluster
277d23a646575b891d5218a5e69633ff052b50b7
a1272d73fd1bc89c29da5845d50c7e634b7f4a12
refs/heads/master
2020-04-22T03:14:46.756019
2010-09-02T00:00:00
2010-09-02T00:00:00
null
0
0
null
null
null
null
UTF-8
R
false
false
1,336
r
normalize.GoldenGate.R
normalize.GoldenGate <- function(Probe_ID, beta, det.p.value, design.file, plot.option = FALSE){ if(is.vector(beta) == TRUE) beta <- matrix(beta) if(is.vector(det.p.value) == TRUE) det.p.value <- matrix(det.p.value) beta[beta == 0] <- min(beta[ beta > 0],1e-3)/10 beta[beta == 1] <- max(beta[ beta < 1],0.999) + (1 - max(beta[ beta < 1],0.999))/100 matchID <- rep(NA,length(Probe_ID)) for(i in 1:length(matchID)){ matchID[i] <- which(as.character(design.file$Probe_ID) == as.character(Probe_ID[i])) } L <- design.file$L[matchID] GC <- design.file$GC[matchID] y_raw <- log(t(beta)/(1 - t(beta))) n <- dim(y_raw)[1] p <- dim(y_raw)[2] y <- matrix(0,nrow=n,ncol=p) w <- apply(log(t(det.p.value)),1,median) w <- w/sum(w) g <- apply(log(t(det.p.value)),2,median) g <- g/sum(g) L_new <- L L_new[which(L<=44)] <- 44 L_new[which(L>=57)] <- 57 GC_new <- GC GC_new[which(GC<=0.4)] <- 0.4 GC_new[which(GC>=0.8)] <- 0.8 L_coef <- 0.04458 GC_coef <- -3.658 for(i in 1:n){ y[i,] <- y_raw[i,] - L_coef*L_new - GC_coef*GC_new + mean(L_coef*L_new) + mean(GC_coef*GC_new) } norm.beta <- t(inv_logit(y)) if(plot.option == TRUE ) plotBAnorm(beta, norm.beta, design.file) return(list(norm.beta = norm.beta, norm.logitbeta = t(y), w = w, g = g)) }
6e7c1bdba722dd0fdba107c7c52e3f9b9d2f643a
eb46831a43d55a348e29989d827401f2eb49e30a
/liquididtyt.R
9107a6506d16dfdd5b84a36165aa8a02d04bea65
[]
no_license
bplloyd/R-risk-mgmt
32a0315bdd95790acb4c98cc87c52993a1cd8f9a
ad0e0f3596e5588fb4c483486e271fb98ad942cc
refs/heads/master
2020-12-13T09:19:32.536834
2016-08-03T22:08:14
2016-08-03T22:08:14
49,309,816
0
0
null
null
null
null
UTF-8
R
false
false
1,386
r
liquididtyt.R
qry = "WITH Levels AS ( select v.DateReported , s.Abbreviation 'Name' , COALESCE(l.Liquidity, 99) 'Level' , COALESCE(v.MarketValue,0)/a.NetAssets 'Exposure' from v_FundSecs_FLASHREPORT AS v LEFT JOIN v_Assets_UFT AS a ON (v.Fund_UID = a.Fund_UID AND v.DateReported = a.DateReported) LEFT JOIN HAMF.Liquidity AS l ON ((v.SubAdvised_UID = l.SubAdvised_UID) AND (v.Security_UID = l.Security_UID) AND (l.DateReported = (SELECT MAX(l2.DateREported) FROM Hamf.Liquidity AS l2 WHERE l2.DateReported <= v.DateReported))) LEFT JOIN HAMF.SubAdvisors AS s ON v.SubAdvised_UID = s.SubAdvised_UID WHERE v.DateReported >= '1-22-2016' AND v.Fund_UID = 784 AND v.Asset_Type IN ('EQ', 'DERV', 'OP', 'FI') AND l.Liquidity >= 3 ) select l.DateReported , l.Name , l.[Level] , SUM(COALESCE(l.Exposure,0)) 'Exposure' from Levels as l GROUP BY l.DateReported , l.Name , l.[Level] ORDER BY l.DateReported , l.Name , l.[Level] DESC" require(RODBC) cn = odbcDriverConnect("driver={SQL Server}; server=HAT-SQL-01; database=Hatteras_Sandbox_Tools; trusted_connection=true") require(xts) require(data.table) lsd.liq = data.table(sqlQuery(cn,qry)) lsd.liq$DateReported = as.Date.factor(lsd.liq$DateReported) lsd.liq_xts = as.xts(dcast.data.table(lsd.liq[, sum(Exposure), by = c("DateReported", "Name")], formula = DateReported ~ Name, value.var = "V1", fun.aggregate = sum))
5418eb7403d03b32b0761206690011f8f14839cb
a70d7e23b94219ac412a38f509e386c0c7a51bfe
/data-raw/app/utils.R
6b05b24bc580aacf0473f9a4eec6dc6f4f238d61
[]
no_license
nverno/moosedata
17a6867b567401972b7e9fb38f06dbca5608a15f
856482d12b4ac3812454969c28aafe7ae21fe576
refs/heads/master
2021-01-10T05:47:56.386212
2016-01-31T07:42:18
2016-01-31T07:42:18
46,290,372
0
0
null
null
null
null
UTF-8
R
false
false
4,053
r
utils.R
### utils.R --- ## Filename: utils.R ## Description: ## Author: Noah Peart ## Created: Sat Jan 23 20:37:49 2016 (-0500) ## Last-Updated: Wed Jan 27 23:08:23 2016 (-0500) ## By: Noah Peart ###################################################################### "%||%" <- function(a, b) if (is.null(a)) b else a ################################################################################ ## ## Some AFS utils ## ################################################################################ ## Store the AFS token information afs_cache <- new.env() ##' Check that tokens and klog are available afs_utils <- function() { progs <- c('klog', 'tokens') command <- switch(Sys.info()['sysname'], 'Windows'='where', 'type') all(unlist(lapply(progs, system2, command=command, stdout=FALSE)) == 0L) } ##' Check the afs cache for tokens afs_yes <- function() !is.null(afs_cache$tokens) ##' Load data. If can't connect to data, returns and empty data.table. ##' @title Load data ##' @param data name of dataset load_data <- function(data) { if (!afs_yes()) { warning("No AFS tokens, can't load data from AFS.") return( data.table(x=numeric()) ) } dat <- sync.afs::get_data(data, sync.afs::get_afs(), dkey) setnames(dat, names(dat), toupper(names(dat))) } ##' Parse strings of AFS tokens returned by system call to 'tokens'. ##' @title Parse AFS tokens ##' @param tokens character vector of tokens ##' @return \code{data.table} with token information afs_parse_tokens <- function(tokens) { tokens <- strsplit(tokens, '\\s+|@') out <- lapply(tokens, function(x) { list(user=sub('([[:alnum:]]+).*', '\\1', x[[2]]), cell=x[[6]], expires=as.POSIXct( paste(c(x[8:9], sub(']', '', x[[10]])), collapse=' '), format="%b %d %H:%M")) }) rbindlist(out) } ##' Query system for tokens ##' @title Retrieve tokens from system afs_tokens <- function() { response <- system2("tokens", stdout=TRUE, stderr=TRUE) has_token <- is.character(response) && any((inds <- grepl("tokens for afs@", response))) if (has_token) response[inds] else '' } ##' logout of AFS afs_logout <- function() { res <- system2('unlog') afs_cache$tokens <- NULL afs_cache$error <- NULL invisible(res == 0L) } ##' Update the AFS token cache. This could be either a new set of tokens, an error, or ##' an initial check for tokens with the system. ##' @title Update AFS cache ##' @param error Optional error message to cache. ##' @return NULL afs_update_cache <- function(error) { token <- afs_tokens() if (!missing(error)) { afs_cache$error <- error if (!nzchar(token)) afs_cache$tokens <- NULL return( invisible() ) } afs_cache$error <- NULL # wipe errors if (!nzchar(token)) { afs_cache$tokens <- NULL return( invisible() ) } afs_cache$tokens <- afs_parse_tokens(token) invisible() } ##' Submit AFS credentials ##' @title Submit AFS creds ##' @param user username ##' @param pwd password ##' @param cell cell afs_submit <- function(user, pwd, cell='northstar.dartmouth.edu') { if (missing(user) || missing(pwd)) return(FALSE) res <- system2("klog", args=c("-principal", user, "-password", pwd, "-cell", cell), stderr=TRUE) if (!length(res)) { afs_update_cache() TRUE } else { afs_update_cache(error=res) FALSE } } ##' Format tokens table for html ##' @title Print tokens table afs_tokens_table <- function() { if (is.null(afs_cache$tokens)) return() ## res <- formattable(afs_cache$tokens, list( ## expires = formatter( ## 'span', ## style=x ~ style(color = csscolor(gradient(rank(x), 'white', 'red'))) ## ) ## )) ## paste(as.character(res)) as.character(knitr::kable(afs_cache$tokens, format='html', caption='Current AFS Tokens')) }
b675c7e0c35dd6653fa8e0dd11f4ad91c016987e
fe0cd547bd325d0026607421d334b25df2d6b15d
/inst/doc/neasg.R
f15c3da9eeeb6b5a224514e7deb2bc28ccd9c198
[]
no_license
andrew-loh/neaSG
ee800a4922b4c4c755f62c661f9e560a75ebdaee
2518b9e21163baef559d45c73cf0ba344bca0ab6
refs/heads/master
2020-04-10T01:14:16.113645
2018-12-17T00:02:36
2018-12-17T00:02:36
160,709,884
1
0
null
null
null
null
UTF-8
R
false
false
1,685
r
neasg.R
## ----setup, include = FALSE---------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ---- message=FALSE, warning=FALSE, eval=FALSE--------------------------- # library(devtools) # install_github("andrew-loh/neaSG", force = TRUE) ## ---- message=FALSE, warning=FALSE--------------------------------------- library(neaSG) ls(package:neaSG) ## ---- message=FALSE, warning=FALSE--------------------------------------- get_weatherstns() ## ---- message=FALSE, warning=FALSE--------------------------------------- #Dates must be input as characters in a "YYYY-MM-DD" format data <- get_airtemp(from = "2017-12-31", to = "2018-01-04") head(data) ## ---- message=FALSE, warning=FALSE--------------------------------------- #Dates must be input as characters in a "YYYY-MM-DD" format data <- get_humidity(from = "2017-12-31", to = "2018-01-04") head(data) ## ---- message=FALSE, warning=FALSE--------------------------------------- #Dates must be input as characters in a "YYYY-MM-DD" format data <- get_rainfall(from = "2017-12-31", to = "2018-01-04") head(data) ## ---- message=FALSE, warning=FALSE--------------------------------------- #Dates must be input as characters in a "YYYY-MM-DD" format data <- get_UV(from = "2017-12-31", to = "2018-01-04") head(data) ## ---- message=FALSE, warning=FALSE--------------------------------------- #Dates must be input as characters in a "YYYY-MM-DD" format data <- get_pollutants(from = "2017-12-31", to = "2018-01-04") head(data) ## ---- message=FALSE, warning=FALSE--------------------------------------- data <- get_weathersumm(from = "2017-12-31", to = "2018-01-04") head(data)
12d9d373fd616efe1d442d2a5cda66a26e865eb7
ee0689132c92cf0ea3e82c65b20f85a2d6127bb8
/23-functions/47c-apply.R
fb592875fc859fbcc430268254c84ad7f8702158
[]
no_license
DUanalytics/rAnalytics
f98d34d324e1611c8c0924fbd499a5fdac0e0911
07242250a702631c0d6a31d3ad8568daf9256099
refs/heads/master
2023-08-08T14:48:13.210501
2023-07-30T12:27:26
2023-07-30T12:27:26
201,704,509
203
29
null
null
null
null
UTF-8
R
false
false
1,754
r
47c-apply.R
# Apply Commands # apply Functions # #apply ---- #create matrix#### m1 = matrix(1:10,nrow=5) m1 # create sample data of Matrix : Use ND data : 3 cols, 30 rows m2 <- matrix(data=cbind(rnorm(30, 0), rnorm(30, 2), rnorm(30, 5)), nrow=30, ncol=3) m2 # apply is used for structured data - matrix, dataframe with same number of coln & rows # https://www.r-bloggers.com/using-apply-sapply-lapply-in-r/ # apply Mean on Rowise on all 3 Column m1 apply(m1, 1, mean) apply(m1, 2, mean) # apply function on particular values : count number of negative values apply(m2, 2, function(x) length(x[x<0])) m2 apply(m2, 2, function(x) is.numeric(x)) apply(m2, 1, function(x) is.character(x)) apply(m2, 2, function(x) is.vector(x)) apply(m2, 2, function(x) mean(x[x>0])) # sapply sapply(1:3, function(x) x^2) sapply(m[1,], function(x) x^2) sapply(m[,2], function(x) x^2) # lapply - return list rather than a vector lapply(1:3, function(x) x^2) unlist(lapply(1:3, function(x) x^2)) mean(m[,1]) mean(m[,3]) #https://www.r-bloggers.com/the-apply-command-101/ # tricks sapply(1:3, function(x) mean(m[,x])) # dataset name pass as y sapply(1:3, function(x, y) mean(y[,x]), y=m) # Means of various colns meanset = c( mean(m[,1]), mean(m[,2]), mean(m[,3])) meanset # fiven no summary five number summary (minimum, lower-hinge, median, upper-hinge, maximum) for the input data sum1 = apply(m,2,fivenum) sum1 ?fivenum mean(m[,2]) sum2 = apply(m,2,summary) sum2 # rowmeans rowMeans(m) colMeans(m) # trimming data mean(m[,1]) mean(m[,1],trim=0.5) a1 = array(1:(3*4*2), dim=c(3,4,2)) a1 #5 rows, 4 cols, 3 matrices apply(a1, 1, sum) apply(a1, 2, sum) apply(a1, c(3), sum) apply(a1, c(1,2), sum) apply(a1, c(2,3), sum) apply(a1, c(1,2,3), sum) apply(a1, c(1,3), sum)
1cf7c65b14239d68f7f364c0ae5ec46b6a53be1b
3d3dca4a42b1777c13108e03ca0e3bfcd9a93205
/Rfunctions/makeTimeStamp.R
c69635c701fd0057037eac677daef9bf10f14b7a
[]
no_license
BioAimie/AnalyticsWebHub
90afaaf4abf09ef0799ddaa5bd762233975f2e38
4f650049cdbfb2ce41ab072655878a5a7a12d5e7
refs/heads/master
2021-03-27T12:31:24.229879
2017-07-27T16:50:14
2017-07-27T16:50:14
74,378,065
0
0
null
null
null
null
UTF-8
R
false
false
467
r
makeTimeStamp.R
makeTimeStamp <- function(timeStamp = Sys.time(), author=NULL, size = 1, color = 'black') { library(grid) stamp <- ifelse(is.null(author), as.character(timeStamp), paste(timeStamp, paste('Created by', author))) pushViewport(viewport()) grid.text(label = stamp, x = unit(1,"npc") - unit(2, "mm"), y = unit(2, "mm"), just = c("right", "bottom"), gp = gpar(cex = size, col = color)) popViewport() }
2cc861d3440267e2c1f46c8d8677e6860fa3bf2b
f36b2ad1dc17ec05278f13c7fa72a1fd8343ee19
/tests/testthat/test-chk-atomic.R
45ddbb9d8651545d0f44bc30c4a4adcf6339f69e
[ "MIT" ]
permissive
poissonconsulting/chk
45f5d81df8a967aad6e148f0bff9a9f5b89a51ac
c2545f04b23e918444d4758e4362d20dfaa8350b
refs/heads/main
2023-06-14T19:32:17.452025
2023-05-27T23:53:25
2023-05-27T23:53:25
199,894,184
43
3
NOASSERTION
2023-01-05T18:50:23
2019-07-31T16:42:59
R
UTF-8
R
false
false
460
r
test-chk-atomic.R
test_that("vld_atomic", { expect_true(vld_atomic(1)) expect_true(vld_atomic(matrix(1:3))) expect_true(vld_atomic(character(0))) expect_true(vld_atomic(NULL)) expect_false(vld_atomic(list(1))) }) test_that("chk_atomic", { expect_identical(chk_atomic(1), 1) expect_invisible(chk_atomic(1)) expect_chk_error(chk_atomic(list(1)), "^`list[(]1[)]` must be atomic[.]$") expect_chk_error(chk_atomic(list(1), x_name = 1), "^1 must be atomic[.]$") })
0812ac11c1576915c4ae79ae5d086ae1d2238cb1
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
/fuzzedpackages/AlphaSimR/man/varP.Rd
89470d2e87c0126dbbe82b5dc190ae6e894eb8a0
[]
no_license
akhikolla/testpackages
62ccaeed866e2194652b65e7360987b3b20df7e7
01259c3543febc89955ea5b79f3a08d3afe57e95
refs/heads/master
2023-02-18T03:50:28.288006
2021-01-18T13:23:32
2021-01-18T13:23:32
329,981,898
7
1
null
null
null
null
UTF-8
R
false
true
604
rd
varP.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/popSummary.R \name{varP} \alias{varP} \title{Phenotypic variance} \usage{ varP(pop) } \arguments{ \item{pop}{an object of \code{\link{Pop-class}} or \code{\link{HybridPop-class}}} } \description{ Returns phenotypic variance for all traits } \examples{ #Create founder haplotypes founderPop = quickHaplo(nInd=10, nChr=1, segSites=10) #Set simulation parameters SP = SimParam$new(founderPop) SP$addTraitA(10) SP$setVarE(h2=0.5) #Create population pop = newPop(founderPop, simParam=SP) varP(pop) }
d723ee88c075ed21d83f37654885eb727f6f36cf
b47547eae99baa6288cd350d2ef949eaa9faecee
/man/g11_ftest.Rd
7f489b135b16a5cdf70917e6559d94460edff14a
[]
no_license
gabiitokazu/desperation
a75ed7739b386b02384c1d8f7bc92b6103384f95
b0957e34661dd476c3702ebf79eac63855196de0
refs/heads/master
2023-01-21T08:39:36.094367
2020-12-05T05:37:43
2020-12-05T05:37:43
318,705,585
0
0
null
null
null
null
UTF-8
R
false
true
1,334
rd
g11_ftest.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/g11_ftest.R \name{g11_ftest} \alias{g11_ftest} \title{Linear Model Function for Group11 - AU STAT6210} \usage{ g11_ftest(response, covariates) } \arguments{ \item{response}{A \code{vector} with the values for the dependent variable (also called outcome).} \item{covariates}{A \code{matrix} with the values for the independent variable (also called predictors, or explanatory variable).} \item{Beta}{A \code{matrix} that can be calculated using \code{g11_lm} function.!} \item{alpha}{A \code{numeric} (double) that sets the alpha coefficient to be used. Has to be between 0 and 1.} \item{method}{A \code{string} that defines the method used. Options are "Asymptotic" and "Bootstrap", accepts minor misspellings with a warning - which can be both good and bad.} } \value{ A \code{list} containing the following attributes: \describe{ \item{beta}{Estimated coefficients, Linear Regression Model.} \item{sigma2}{explanation} \item{variance_beta}{explanation} \item{ci}{explanation} } } \description{ Function that } \examples{ Using data(hubble) from libary(gamair) g11_lm(hubble$y, hubble$x, alpha = 0.01, method = "Bootstrap") g11_lm(hubble$y, hubble$x, method = "Asymptotic") From here, is the ACTUAL function: } \author{ Group11 }
6e88aaf93c8f8ec2e540e383173ef53e45cc762f
14f5a7238a5e264a22c59d8864b2084d58b3b29e
/man/columnwise.matrix.norm.Rd
a02d85a3b69d5a7c9630822b24efa2418dd0d4d1
[]
no_license
dami82/learningMedicine
b3e8dffea4b35e3d94b26e3c8151178c4fb88553
25e74bbfd2173da60428ae09ccd9e77b7b4d1553
refs/heads/master
2021-01-17T21:01:33.681580
2017-11-27T02:32:25
2017-11-27T02:32:25
84,156,788
0
0
null
null
null
null
UTF-8
R
false
false
422
rd
columnwise.matrix.norm.Rd
\name{columnwise.matrix.norm} \alias{columnwise.matrix.norm} \title{Columnwise Matrix Normalization} \description{Normalize a numeric matrix so that all columns sum up to unity. Requires anumeric matrix as input} \usage{ columnwise.matrix.norm(mat) } \arguments{ \item{mat}{numeric matrix (or numeric data.frame)} } \examples{ my.mat <- sapply(1:8, function(i) sample(1:100, 15)) my.mat columnwise.matrix.norm(my.mat) }
ad4a172efb65e9c6a927c735c5ab7c61f2f21c56
df9b866147cb0235af9e5290690fbf15b16d2cfd
/man/getUCSC.Rd
179d0364b95baea7eb6c354cbb483f2ddb1054f0
[]
no_license
guldenolgun/NoRCE
d3375f46c5ec3810c9cb3033401472993ec3240b
f1ce050b1771c2eb5f33ddc021cf12bdf509b6f0
refs/heads/master
2022-10-28T05:41:19.118562
2022-10-20T01:06:28
2022-10-20T01:06:28
187,480,634
1
2
null
null
null
null
UTF-8
R
false
true
1,287
rd
getUCSC.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/getNearest.R \name{getUCSC} \alias{getUCSC} \title{Get nearest genes for the window of the upstream/downstream region.} \usage{ getUCSC( bedfile, upstream, downstream, org_assembly = c("hg19", "hg38", "mm10", "dre10", "rn6", "dm6", "ce11", "sc3") ) } \arguments{ \item{bedfile}{Bed formated input gene regions} \item{upstream}{Maximum upstream distance from the transcription start region of the input gene} \item{downstream}{Maximum downstream distance from the transcription end region of the input gene} \item{org_assembly}{genomee assembly of interest for the analysis. Possible assemblies are "mm10" for mouse, "dre10" for zebrafish, "rn6" for rat, "dm6" for fruit fly, "ce11" for worm, "sc3" for yeast, "hg19" and "hg38" for human} } \value{ genes } \description{ When downstream = 0 / upstream = 0, function converts bed formated regions to HUGO genes } \examples{ \dontrun{ regions<-system.file("extdata", "ncRegion.txt", package = "NoRCE") regionNC <- rtracklayer::import(regions, format = "BED") neighbour <- getUCSC(bedfile = regionNC, upstream = 1000, downstream = 1000, org_assembly = 'hg19') } }
6f8c27b676cf0e6cdec1a014c41188bf6bbcfe80
7b102f9c8f2e3f9240090d1d67af50333a2ba98d
/gbd_2017/nonfatal_code/headache/split/01_childsplit.R
36e9048182b9756dd68819255416a59699b2b544
[]
no_license
Nermin-Ghith/ihme-modeling
9c8ec56b249cb0c417361102724fef1e6e0bcebd
746ea5fb76a9c049c37a8c15aa089c041a90a6d5
refs/heads/main
2023-04-13T00:26:55.363986
2020-10-28T19:51:51
2020-10-28T19:51:51
null
0
0
null
null
null
null
UTF-8
R
false
false
4,184
r
01_childsplit.R
########################################################### ### Author: ### Date: 10/20/17 ### Project: Split Migraine into asym/sym ### Purpose: GBD 2017 Nonfatal Estimation ########################################################### ## SET-UP rm(list=ls()) if (Sys.info()[1] == "Linux"){ j_root <- "/home/j/" h_root <- "~/" } else if (Sys.info()[1] == "Windows"){ j_root <- "J:/" h_root <- "H:/" } library(pacman, lib.loc = FILEPATH) pacman::p_load(data.table, ggplot2, readr) ## SET OBJECTS date <- gsub("-", "_", Sys.Date()) repo_dir <- paste0(h_root, FILEPATH) functions_dir <- paste0(j_root, FILEPATH) headache_dir <- FILEPATH sev_dir <- paste0(headache_dir, "sev_draws/") dir.create(paste0(headache_dir, "probable_sym/", date, "/")) dir.create(paste0(headache_dir, "definite_sym/", date, "/")) dir.create(paste0(headache_dir, "probable_asym/", date, "/")) dir.create(paste0(headache_dir, "definite_asym/", date, "/")) save_probsym_dir <- paste0(headache_dir, "probable_sym/", date, "/") save_defsym_dir <- paste0(headache_dir, "definite_sym/", date, "/") save_probasym_dir <- paste0(headache_dir, "probable_asym/", date, "/") save_defasym_dir <- paste0(headache_dir, "definite_asym/", date, "/") draws <- paste0("draw_", 0:999) prob_me <- 20190 def_me <- 20191 ## SOURCE FUNCTIONS source(paste0(functions_dir, "get_draws.R")) source(paste0(repo_dir, "job_array.R")) source(paste0(functions_dir, "get_demographics.R")) ## GET TASK INFORMATION getit <- job.array.child() print(commandArgs()) loc_id <- getit[[1]] # grab the unique PARAMETERS for this task id loc_id <- as.numeric(loc_id) print(loc_id) ## GET IDS ids <- get_demographics(gbd_team = "epi") years <- ids$year_id sexes <- ids$sex_id ## GET DRAWS draws_dt <- get_draws(gbd_id_type = "modelable_entity_id", gbd_id = c(def_me, prob_me), source = "epi", measure_id = c(5, 6), location_id = loc_id, year_id = years, age_group_id = c(6:20, 30:32, 235), sex_id = sexes, status = "best") draws_dt[, id := 1] ## GET SEVERITY DRAWS AND FORMAT files <- list.files(sev_dir) dates <- substr(files, 1, 10) dates <- gsub("_", "-", dates) last_date <- dates[which.max(as.POSIXct(dates))] last_date <- gsub("-", "_", last_date) timesym <- as.data.table(read_rds(paste0(sev_dir, last_date, ".rds"))) timesym[, id := 1] def <- copy(timesym[, .(variable, id, timesymdef)]) def <- dcast(def, id ~ variable, value.var = "timesymdef") setnames(def, draws, paste0("def_", 0:999)) prob <- copy(timesym[, .(variable, id, timesymprob)]) prob <- dcast(prob, id ~ variable, value.var = "timesymprob") setnames(prob, draws, paste0("prob_", 0:999)) ## MERGE AND CALC DEF def_draws <- merge(draws_dt[modelable_entity_id == def_me], def, by = "id") def_draws[, id := NULL] defsym_draws <- copy(def_draws) defsym_draws[, (draws) := lapply(0:999, function(x) get(paste0("draw_", x)) * get(paste0("def_", x)))] defsym_draws[, c(paste0("def_", 0:999), "model_version_id", "modelable_entity_id") := NULL] defasym_draws <- copy(def_draws) defasym_draws[, (draws) := lapply(0:999, function(x) get(paste0("draw_", x)) * (1 - get(paste0("def_", x))))] defasym_draws[, c(paste0("def_", 0:999), "model_version_id", "modelable_entity_id") := NULL] ## MERGE AND CALC PROB prob_draws <- merge(draws_dt[modelable_entity_id == prob_me], prob, by = "id") prob_draws[, id := NULL] probsym_draws <- copy(prob_draws) probsym_draws[, (draws) := lapply(0:999, function(x) get(paste0("draw_", x)) * get(paste0("prob_", x)))] probsym_draws[, c(paste0("prob_", 0:999), "model_version_id", "modelable_entity_id") := NULL] probasym_draws <- copy(prob_draws) probasym_draws[, (draws) := lapply(0:999, function(x) get(paste0("draw_", x)) * (1 - get(paste0("prob_", x))))] probasym_draws[, c(paste0("prob_", 0:999), "model_version_id", "modelable_entity_id") := NULL] ## SAVE FILES write.csv(defsym_draws, paste0(save_defsym_dir, loc_id, ".csv"), row.names = F) write.csv(defasym_draws, paste0(save_defasym_dir, loc_id, ".csv"), row.names = F) write.csv(probsym_draws, paste0(save_probsym_dir, loc_id, ".csv"), row.names = F) write.csv(probasym_draws, paste0(save_probasym_dir, loc_id, ".csv"), row.names = F)
ae900237e80ac30fc080bc63c5fd5cedf907893d
4b5cecbbf56fa2704f2d24c9255adf3dd4df6ff6
/AppliedDataMining/AppliedDataMining/HW2/2.3/sb41train.R
934b1aeb29c7e57f86e156edd9ec16cf576ab714
[]
no_license
keithhickman08/IUH
fa2a8c50eb4ab86f3ea10081a18620e27dc8f021
20bc22bdef9523310e1e1b9b6225e6a3eb039d20
refs/heads/master
2020-04-02T07:40:22.310044
2019-01-04T16:16:45
2019-01-04T16:16:45
154,208,374
0
0
null
null
null
null
UTF-8
R
false
false
1,528
r
sb41train.R
#Homework 4 #Problem 4 #Support Vector Machines ??e1071 install.packages("e1071") library(e1071) set.seed(1234) # Creating training and testing data sets: Randomly picking 100 data points from Iris data set # as training data and the rest of 50 data points will be used as test data. rndSample <- sample(1:nrow(mydata), 900) mydata.training <- mydata[rndSample,] mydata.test <- mydata[-rndSample, ] s <- svm(Purchase ~ ., mydata.training) ps <- predict(s, mydata.training) (cm <- table(ps, mydata.training$Purchase)) #confusion matrix for evaluation 100*(1-sum(diag(cm))/sum(cm)) # the error rate is 14% # Adjusting some of the parameters of SVM # In this example we are changing radial kernel to polynomial kernel # and changing cost from 1 to 10 #cost argument: to specify the cost of a violation to the margin. #if the cost is small, the margin is large -- more support vectors violating the margin #if the cost is large, the margin is narrow -- less support vectors violating the margin # Kernels are used to map linearly non-separable data to higher dimenisonal space so that # the data can be linearly seperable. s2 <- svm(Purchase ~ ., mydata.training, cost=10, kernel="polynomial", degree=3) ps2 <- predict(s2, mydata.training) (cm2 <- table(ps2, mydata.training$Purchase)) #confusion matrix for evaluation 100*(1-sum(diag(cm2))/sum(cm2)) # the error rate for a cost of 10 is 14% # the error rate for a cost of 20 is also 14% # modifying the degree parameter does not substantially improve the error rate.
90601fe939e33a8628f8d1754d8c226914118e84
72d9009d19e92b721d5cc0e8f8045e1145921130
/iBATCGH/R/Scenario2.R
f762e6a7bd1e03c7ae818e35921b9df6f39f166f
[]
no_license
akhikolla/TestedPackages-NoIssues
be46c49c0836b3f0cf60e247087089868adf7a62
eb8d498cc132def615c090941bc172e17fdce267
refs/heads/master
2023-03-01T09:10:17.227119
2021-01-25T19:44:44
2021-01-25T19:44:44
332,027,727
1
0
null
null
null
null
UTF-8
R
false
false
2,858
r
Scenario2.R
Scenario2 <- function(sigmak=0.1){ g=100 #number of gene expression probes m=1000 # number of CGH probes s=100 # number of samples #State specific mean and variance mu1=-0.65 mu2=0 mu3=0.65 mu4=1.5 mu=c(mu1,mu2,mu3,mu4) sigma1=0.1 sigma2=0.1 sigma3=0.1 sigma4=0.2 sigma=c(sigma1,sigma2,sigma3,sigma4) #Generate xi A=matrix(c(0.3,0.6,0.095,0.005,0.09,0.818,0.09,0.002,0.095,0.6,0.3,0.005,0.005,0.71,0.005,0.28),nrow=4,byrow=T) AC=matrix(nrow=4,ncol=4) for (i in 1:4){ AC[i,1]=A[i,1] for (j in 2:4){ AC[i,j]=AC[i,(j-1)]+A[i,j] } } A1=A A1[1,]=c(0.7500, 0.1800, 0.0500, 0.020) A1[2,]=c(0.4955, 0.0020, 0.4955, 0.007) A1[3,]=c(0.0200, 0.1800, 0.7000, 0.100) A1[4,]=c(0.0001, 0.3028, 0.1001, 0.597) AC1=matrix(nrow=4,ncol=4) for (i in 1:4){ AC1[i,1]=A1[i,1] for (j in 2:4){ AC1[i,j]=AC1[i,(j-1)]+A1[i,j] } } xi=matrix(2,nrow=s,ncol=m) change=c(4:8,100:109,250:259,300,306,380,390,420:426,490:495,500:503,505,525:530,sample(531:1000,197)) change.complete=rep(0,m) change.complete[change]=1 change.pos.two=which(change.complete==0) change.partial=sample(change.pos.two[-1],375) change.complete[change.partial]=2 q=10 for(j in 2:m){ if(change.complete[j]==1){ for(i in 1:s){ temp2=runif(1,0,1) if(temp2<AC1[xi[i,j-1],1]){ xi[i,j]=1 } if(AC1[xi[i,j-1],1]<=temp2 && temp2<AC1[xi[i,j-1],2]){ xi[i,j]=2 } if(AC1[xi[i,j-1],2]<=temp2 && temp2<AC1[xi[i,j-1],3]){ xi[i,j]=3 } if(AC1[xi[i,j-1],3]<=temp2){ xi[i,j]=4 } } } if(change.complete[j]==2){ samples.to.change=sample(1:s,q) for(i in 1:q){ temp2=runif(1,0,1) if(temp2<AC1[xi[samples.to.change[i],j-1],1]){ xi[samples.to.change[i],j]=1 } if(AC1[xi[samples.to.change[i],j-1],1]<=temp2 && temp2<AC1[xi[samples.to.change[i],j-1],2]){ xi[samples.to.change[i],j]=2 } if(AC1[xi[samples.to.change[i],j-1],2]<=temp2 && temp2<AC1[xi[samples.to.change[i],j-1],3]){ xi[samples.to.change[i],j]=3 } if(AC1[xi[samples.to.change[i],j-1],3]<=temp2){ xi[samples.to.change[i],j]=4 } } } } #Generate X X=matrix(nrow=s,ncol=m) for (i in 1:s){ for(j in 1:m){ X[i,j]=rnorm(1,mean=mu[xi[i,j]],sd=sigma[xi[i,j]]) } } #Generate beta beta=matrix(0,nrow=g,ncol=m) beta[4,change[6:15]]=((-1)^(floor(runif(1,0,2))))*rnorm(10,mean=0.5,sd=0.3) beta[10,change[16:25]]=((-1)^(floor(runif(1,0,2))))*rnorm(10,mean=0.5,sd=0.3) #Generate epsilon epsilon=NULL for(i in 1:s){ epsilon=rbind(epsilon,rnorm(g,mean=0,sd=sigmak)) } #Generate intercept mu.g=rnorm(g,0,sd=0.1) #Generate Y Y=xi%*%t(beta)+mu.g+epsilon ##Empirical transition matrix realA=Tran(xi) realA[1,]=realA[1,]/sum(realA[1,]) realA[2,]=realA[2,]/sum(realA[2,]) realA[3,]=realA[3,]/sum(realA[3,]) realA[4,]=realA[4,]/sum(realA[4,]) ##Beta different from zero signbeta=which(beta!=0) #Generate distances between probes distance=rexp(m-1) disfix=2*sum(distance) return(list(Y=Y,X=X,Xi=xi,A=realA,mu=mu,Sd=sigma,coeff=beta,distance=distance,disfix=disfix)) }
021b991c29dafc0ba7d9b69d66af463067658f4b
b2f61fde194bfcb362b2266da124138efd27d867
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Letombe/Abduction/aim-50-3_4-yes1-3-90/aim-50-3_4-yes1-3-90.R
4d5fc2d7dde295bd77f02c07315b1c2b3e197c15
[]
no_license
arey0pushpa/dcnf-autarky
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
a6c9a52236af11d7f7e165a4b25b32c538da1c98
refs/heads/master
2021-06-09T00:56:32.937250
2021-02-19T15:15:23
2021-02-19T15:15:23
136,440,042
0
0
null
null
null
null
UTF-8
R
false
false
628
r
aim-50-3_4-yes1-3-90.R
c DCNF-Autarky [version 0.0.1]. c Copyright (c) 2018-2019 Swansea University. c c Input Clause Count: 696 c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 696 c c Input Parameter (command line, file): c input filename QBFLIB/Letombe/Abduction/aim-50-3_4-yes1-3-90.qdimacs c output filename /tmp/dcnfAutarky.dimacs c autarky level 1 c conformity level 0 c encoding type 2 c no.of var 280 c no.of clauses 696 c no.of taut cls 0 c c Output Parameters: c remaining no.of clauses 696 c c QBFLIB/Letombe/Abduction/aim-50-3_4-yes1-3-90.qdimacs 280 696 E1 [] 0 50 230 696 NONE
cca35e68bf07c83ddcf6e47e86656e58cb6e502c
918943c4b8b38bee65be66822ecddb6083220fee
/scripts/geno_annot_to_RDS.R
71df74219a98a6de0f52869de85f7ec87b40aba9
[]
no_license
drveera/PredictDBPipeline
ad4c0b8e606069663e7a8c699a6249957eba4c9b
664c1919e3d865841fa6aaf286bb634a42f4c0d1
refs/heads/master
2021-04-12T12:22:56.843820
2016-11-08T20:16:50
2016-11-08T20:16:50
94,552,057
1
0
null
2017-06-16T14:31:25
2017-06-16T14:31:25
null
UTF-8
R
false
false
338
r
geno_annot_to_RDS.R
# Turns the parsed genotype annotation file into an RDS file. # Run from commandline. First arg is the parsed text file, second is the output file. argv <- commandArgs(trailingOnly = TRUE) gene_annot <- read.table(argv[1], stringsAsFactors = FALSE, header = TRUE) rownames(gene_annot) <- gene_annot$gene_id saveRDS(gene_annot, argv[2])
c3255d748e6b2042d71613d88f00959339a7885c
420cac816c739b8f6a3581c1628d706f7d398beb
/R/intg1.TMLW.r
1dab80422aff5f43e6bff536d69a5af294ac94db
[]
no_license
cran/RobustAFT
d80a89efb8ffcc80b604d5959893210aab0ae31b
357b7400ae0a4d0be157b6a46970eb04d8b9ea51
refs/heads/master
2023-08-31T10:42:53.415730
2023-08-21T16:40:02
2023-08-21T17:30:23
17,693,388
0
0
null
null
null
null
UTF-8
R
false
false
55
r
intg1.TMLW.r
intg1.TMLW <- function(x){ (exp(x)-1)*dlweibul(x)}
d30b950d4d81861d7b0f3269e07c32b0f9e3a7c0
9bd21f21a7e6150e539072ea8405d3a03b4c54d0
/R/sappp.R
73ba8ea871b6a6e934c43920f19f900e7d9e7281
[]
no_license
lucasnell/sappp
dda9c9d1c21721b872890e6cca6166188eced21b
10d5df07b38303d6e1839bc035681b61a72a08b5
refs/heads/master
2022-03-30T04:22:14.862948
2020-01-13T17:41:39
2020-01-13T17:41:39
100,888,574
0
0
null
null
null
null
UTF-8
R
false
false
252
r
sappp.R
#' sappp: Simulating Aphid and Parasitoid Populations... with Predation. #' #' #' @section safepug functions: #' #' @importFrom Rcpp evalCpp cpp_object_initializer #' @useDynLib sappp, .registration = TRUE #' #' @docType package #' @name sappp NULL
7bef2f0d456422234a5caaf54b350dbb9c376ba6
4d6eae864f95140e6f605c1886245b2a6e1936f6
/plot3.R
ab72850ea828456cb858e64180c3faeaa993f2cf
[]
no_license
jarrodshingleton/datasciencecoursera
24ea504dba86e3d256bafe1785260a5e44b8a448
dbdbaf0b91f754a9954c306c1fc14c5f0a554c28
refs/heads/master
2020-07-11T12:56:47.268226
2019-09-16T23:45:57
2019-09-16T23:45:57
204,545,009
0
0
null
null
null
null
UTF-8
R
false
false
1,033
r
plot3.R
library(dplyr) library(lubridate) library(tidyr) par(mfrow=c(1,1)) data1<-read.table('Course4/household_power_consumption.txt', sep=';', header=TRUE) #2007-02-01 and 2007-02-02 data1$Date<-dmy(data1$Date) data1<-data1%>% filter(Date>=as.Date("2007-02-01"))%>% filter(Date<=as.Date("2007-02-02")) data1$DateTime<-paste(data1$Date, data1$Time) data1$DateTime<-ymd_hms(data1$DateTime) ##plot1 png("plot3.png", width = 480, height = 480) ##plot3 plot3<-data1[,c("DateTime", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3")] plot3$Sub_metering_1<-as.numeric(as.character(plot3$Sub_metering_1)) plot3$Sub_metering_2<-as.numeric(as.character(plot3$Sub_metering_2)) with(plot3, plot(DateTime, Sub_metering_1, type="l", ylab="Energy sub metering", xlab="")) with(plot3, points(DateTime, Sub_metering_2, type="l", col="red")) with(plot3, points(DateTime, Sub_metering_3, type="l", col="blue")) legend('topright', col=c('black', 'red', 'blue'),lty=1, legend=c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3')) dev.off()
7b313fd078eab3632116645e3d88198c6063675c
c6106b0c33904555119fa42477203ad915e5626d
/man/df.Rd
37048a5ba0d025827e050a0bf7498b97e4414ec9
[]
no_license
fanymiles/coindesk
ba8d3f0f869319cb8fff28e5d7f09875d437f2bf
488b9b0cd804368e7e239c89be5b491640e0bad0
refs/heads/master
2021-08-30T12:30:30.424211
2017-12-18T00:13:56
2017-12-18T00:13:56
114,314,914
0
0
null
null
null
null
UTF-8
R
false
true
767
rd
df.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{df} \alias{df} \title{Bitcoin Price Index presented in CNY from 2017-10-01 to 2017-12-01} \format{The df data frame contains 62 observations on 1 variable. \describe{ \item{price}{The Bitcoin Price Index, in CNY} }} \source{ \url{ https://www.coindesk.com/price/} } \usage{ {"df"} } \description{ This data provides a small sample of the query result from the get_history() function. The original query function is set as: get_history(currency = 'CNY',start = '2017-10-01', end = '2017-12-01'), where the currency is specified as CNY. And start date and end date are specified as above. The rownames of the data frame are repective time. } \keyword{datasets}
63ac7b982a5787e51135b27f2e7fd87984c99261
985217126eedbf793a4a292c488dc6f84096e6ca
/pkg/man/COC.Rd
7293bb7d598c89ac6d8d76d573251bd5c8f468d8
[ "MIT" ]
permissive
SNStatComp/GenericValidationRules
fb6bea4c82f604ddd989dc1fe84576fc72377b76
e80bbf61b067998601c1ed213dde1bc4f5a6b9e1
refs/heads/master
2020-06-15T05:47:59.841867
2020-06-09T08:19:07
2020-06-09T08:19:07
195,218,366
2
0
null
null
null
null
UTF-8
R
false
true
2,075
rd
COC.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/COC.R \name{COC} \alias{COC} \title{Code consistency} \description{ Check that the codes used in fields are consistent with other codes used in another field of the same record, the same field in different records of the same file or in different datasets from the same country. } \section{Note}{ The interface proposed in the original document contains redundancies, and it is easier to express this rule type directly in \pkg{validate} syntax as shown in the examples below. } \examples{ # First example: consistency of TABLE and FREW library(validate) data(COCdat) rules <- validator( if ( TABLE == "T01" ) FREQ == "A" , if ( TABLE == "T02" ) FREQ == "Q") result <- confront(COCdat, rules) summary(result) values(result) as.data.frame(result) # Second example: consistency of TABLE and FREQ data(COC2dat) result <- confront(COC2dat, rules) summary(result) values(result) as.data.frame(result) # Third example: country must be EL. Envelope data can be passed # as a single-row data frame data(COC3dat) rules <- validator(REPORTING == envelope$Country) env <- data.frame(Country="EL", stringsAsFactors=FALSE) result <- confront(COC3dat, rules, ref=list(envelope=env)) summary(result) values(result) as.data.frame(result) # Fourth example: REPORTING country and PARTNER country cannot be the same data(COC4dat) # we convert to character as in the original data, these variables are # different types of 'factor' (categorical) variables. rules <- validator(as.character(REPORTING) != as.character(PARTNER) ) result <- confront(COC3dat, rules) summary(result) values(result) as.data.frame(result) } \references{ \href{../doc/20180202_maintypes.pdf}{Main types of validation rules for ESS data}: COC } \seealso{ Other validation-functions: \code{\link{COV}}, \code{\link{FDL}}, \code{\link{FDM}}, \code{\link{FDT}}, \code{\link{RNR}}, \code{\link{RTS}}, \code{\link{RWD}}, \code{\link{VAD}}, \code{\link{VCO}}, \code{\link{VIR}}, \code{\link{VSA}} } \concept{validation-functions}
e14776f346fbe05c8995a1f408bac1e638300e33
a1d75e1fb878f2fa43218e78b1361b4f1e125e2d
/House-Price-Prediction-Project.R
3779db5352373dc672a3a76c2c30d3bc757fb863
[]
no_license
jesslynne73/Machine-Learning
4075f5ade1887c12d49936071107d66734309ef6
ed7a6bd487967c73a653bd2d0cea6fd96725b745
refs/heads/main
2023-07-09T02:55:52.610753
2021-08-05T21:08:07
2021-08-05T21:08:07
329,099,558
1
0
null
null
null
null
UTF-8
R
false
false
8,919
r
House-Price-Prediction-Project.R
# Author: Jess Strait # Front Matter rm(list = ls()) library(data.table) library(Metrics) # Import and explore data train <- fread("Stat_380_train.csv") test <- fread("Stat_380_test.csv") View(train) class(train) # Fill NA values with mean of column train$LotFrontage <- as.numeric(train$LotFrontage) test$LotFrontage <- as.numeric(test$LotFrontage) train[, LotFrontage := replace(LotFrontage, is.na(LotFrontage), 0)] test[, LotFrontage := replace(LotFrontage, is.na(LotFrontage), 0)] # Feature engineering: age of house train$age <- as.numeric(train$YrSold - train$YearBuilt) test$age <- as.numeric(test$YrSold - test$YearBuilt) # Write new data to interim folder fwrite(train,"trainengineer.csv") fwrite(test, "testengineer.csv") # Front Matter rm(list = ls()) library(data.table) library(Metrics) library(caret) library(glmnet) library(xgboost) # Import and explore data train <- fread("trainengineer.csv") test <- fread("testengineer.csv") # Initialize variables for dummies train_y <- train$SalePrice test$SalePrice <- 0 test_y <- test$SalePrice # Save ID variables before removal trainid <- train$Id testid <- test$Id train$Id <- NULL test$Id <- NULL # Generate dummies and store as matrices for modeling dummies <- dummyVars(SalePrice ~ ., data = train) traindummies <- predict(dummies, newdata = train) testdummies <- predict(dummies, newdata = test) x <- as.matrix(traindummies) x_test <- as.matrix(testdummies) # Explore alpha and lambda combinations for penalized regression # Setting alpha = 1 implements lasso regression with cross validation lasso_reg <- cv.glmnet(x, train_y, alpha = 1, family="gaussian") # Identify best lambda lambda_best <- lasso_reg$lambda.min lambda_best # Adapt model to use best lambda lasso_model <- glmnet(x, train_y, alpha = 1, lambda = lambda_best, family="gaussian") # Generate and evaluate training predictions predictions_train <- predict(lasso_model, s = lambda_best, newx = x) rmse(train_y, predictions_train) # 25218.7 # Try other elastic net approaches with alpha = 0.6 # Setting alpha = 0.6 elastic_reg <- cv.glmnet(x, train_y, alpha = 0.6, family="gaussian") # Identify best lambda lambda_best <- elastic_reg$lambda.min lambda_best # Adapt model to use best lambda elastic_model <- glmnet(x, train_y, alpha = 0.6, lambda = lambda_best, family="gaussian") # Generate and evaluate training predictions predictions_train <- predict(elastic_model, s = lambda_best, newx = x) rmse(train_y, predictions_train) # 25220.32 # Try with alpha = 0.8 # Setting alpha = 0.8 elastic_reg <- cv.glmnet(x, train_y, alpha = 0.8, family="gaussian") # Identify best lambda lambda_best <- elastic_reg$lambda.min lambda_best # Adapt model to use best lambda elastic_model <- glmnet(x, train_y, alpha = 0.8, lambda = lambda_best, family="gaussian") # Generate and evaluate training predictions predictions_train <- predict(elastic_model, s = lambda_best, newx = x) rmse(train_y, predictions_train) # 25218.9 # Try with alpha = 0.2 # Setting alpha = 0.2 elastic_reg <- cv.glmnet(x, train_y, alpha = 0.2, family="gaussian") # Identify best lambda lambda_best <- elastic_reg$lambda.min lambda_best # Adapt model to use best lambda elastic_model <- glmnet(x, train_y, alpha = 0.2, lambda = lambda_best, family="gaussian") # Generate and evaluate training predictions predictions_train <- predict(elastic_model, s = lambda_best, newx = x) rmse(train_y, predictions_train) # 25218.8 # Try ridge regression with alpha = 0 # Setting alpha = 0 ridge_reg <- cv.glmnet(x, train_y, alpha = 0, family="gaussian") # Identify best lambda lambda_best <- ridge_reg$lambda.min lambda_best # Adapt model to use best lambda ridge_model <- glmnet(x, train_y, alpha = 0, lambda = lambda_best, family="gaussian") # Generate and evaluate training predictions predictions_train <- predict(ridge_model, s = lambda_best, newx = x, type="response") rmse(train_y, predictions_train) # 2244.33 # Alpha = 1 gave the lowest RMSE and we will proceed with that model. # Setting alpha = 1 final_reg <- cv.glmnet(x, train_y, alpha = 1, family="gaussian") # Identify best lambda lambda_best <- final_reg$lambda.min lambda_best # Adapt model to use best lambda final_model <- glmnet(x, train_y, alpha = 1, lambda = lambda_best, family="gaussian") # Generate testing predictions predictions_test <- predict(final_model, s = lambda_best, newx = x_test, type="response") # Save model summary(final_model) saveRDS(final_model, "master_final.model") # Create submission file test$Id <- testid test$SalePrice <- predictions_test submit <- test[,.(Id, SalePrice)] fwrite(submit,"submission_final.csv") # Now explore XGBoost boosttrain <- xgb.DMatrix(traindummies,label=train_y,missing=NA) boosttest <- xgb.DMatrix(testdummies,missing=NA) # Use cross validation to identify tuning parameters as shown in class tuning <- NULL # Combinations of parameters tested # gamma = 0.002, eta = .002, max_depth = 20, min_child_weight = 1, subsample = 1, colsample_bytree = 1, test-rmse = 15630.36 # gamma = 0.002, eta = .002, max_depth = 10, min_child_weight = 1, subsample = 1, colsample_bytree = 1, test-rmse = 15917.03 # gamma = 0.002, eta = .002, max_depth = 15, min_child_weight = 1, subsample = 1, colsample_bytree = 1, test-rmse = 15568.09 # gamma = 0.002, eta = .01, max_depth = 15, min_child_weight = 1, subsample = 1, colsample_bytree = 1, test-rmse = 15429.37 # gamma = 0.002, eta = .1, max_depth = 15, min_child_weight = 1, subsample = 1, colsample_bytree = 1, test-rmse = 15698.66 # gamma = 0.002, eta = .05, max_depth = 15, min_child_weight = 1, subsample = 1, colsample_bytree = 1, test-rmse = 15588.01 # gamma = 0.002, eta = .01, max_depth = 15, min_child_weight = 5, subsample = 1, colsample_bytree = 1, test-rmse = 15518.65 # gamma = 0.002, eta = .01, max_depth = 15, min_child_weight = 2, subsample = 1, colsample_bytree = 1, test-rmse = 15352.95 # gamma = 0.01, eta = .01, max_depth = 15, min_child_weight = 2, subsample = 1, colsample_bytree = 1, test-rmse = 15478.37 # gamma = 0.0001, eta = .01, max_depth = 15, min_child_weight = 2, subsample = 1, colsample_bytree = 1, test-rmse = 15359.76 # gamma = 0.002, eta = .01, max_depth = 15, min_child_weight = 2, subsample = .95, colsample_bytree = 1, test-rmse = 15179.88 # gamma = 0.002, eta = .01, max_depth = 15, min_child_weight = 2, subsample = .9, colsample_bytree = 1, test-rmse = 15258.05 # gamma = 0.002, eta = .01, max_depth = 15, min_child_weight = 2, subsample = .95, colsample_bytree = .9, test-rmse = 15124.55 # gamma = 0.002, eta = .01, max_depth = 15, min_child_weight = 2, subsample = .95, colsample_bytree = .8, test-rmse = 15116.60 # gamma = 0.002, eta = .01, max_depth = 15, min_child_weight = 2, subsample = .95, colsample_bytree = .7, test-rmse = 15083.91 # gamma = 0.002, eta = .01, max_depth = 15, min_child_weight = 2, subsample = .95, colsample_bytree = .5, test-rmse = 15422.06 # gamma = 0.002, eta = .01, max_depth = 15, min_child_weight = 2, subsample = .95, colsample_bytree = .6, test-rmse = 15326.38 # The best parameters we've found are # gamma = 0.002, eta = .01, max_depth = 15, min_child_weight = 2, subsample = .95, colsample_bytree = .7, test-rmse = 15083.91 parameters <- list( objective = "reg:squarederror", gamma = 0.002, booster = "gbtree", eval_metric = "rmse", eta = 0.01, max_depth = 15, min_child_weight = 2, subsample = .95, colsample_bytree = .7, tree_method = 'hist' ) XGBm <- xgb.cv(params=parameters,nfold=5,nrounds=10000,missing=NA,data=boosttrain,print_every_n=1, early_stopping_rounds=25) # Save best results iter_results <- data.table(t(parameters), best_iter = XGBm$best_iteration, rmse = XGBm$evaluation_log$test_rmse_mean[XGBm$best_iteration]) tuning <- rbind(tuning, iter_results) fwrite(tuning, "besttuning.csv") # Fit the model to training data watchlist <- list(train = boosttrain) XGBm <- xgb.train(params=parameters,nrounds=143,missing=NA,data=boosttrain,watchlist=watchlist,print_every_n=1) # Generate and evaluate testing predictions pred <- predict(XGBm, newdata = boosttest) rmse(test_y, pred) # XGBoost did not perform as well as the regression model. We will submit the results from that model to Kaggle. # Save XGBoost model summary(XGBm) saveRDS(XGBm, "boost.model") # Create submission file testlast <- NULL testlast$Id <- testid test$SalePrice <- pred submitlast <- test[,.(Id, SalePrice)] fwrite(submitlast,"submission_XGB.csv")
94f39d3bb1200f13445ff2e6f01204693b77856a
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
/cran/paws.analytics/man/glue_get_workflow.Rd
2dea060d1ca405f4b79beb671dcf363ab828bce8
[ "Apache-2.0" ]
permissive
paws-r/paws
196d42a2b9aca0e551a51ea5e6f34daca739591b
a689da2aee079391e100060524f6b973130f4e40
refs/heads/main
2023-08-18T00:33:48.538539
2023-08-09T09:31:24
2023-08-09T09:31:24
154,419,943
293
45
NOASSERTION
2023-09-14T15:31:32
2018-10-24T01:28:47
R
UTF-8
R
false
true
600
rd
glue_get_workflow.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glue_operations.R \name{glue_get_workflow} \alias{glue_get_workflow} \title{Retrieves resource metadata for a workflow} \usage{ glue_get_workflow(Name, IncludeGraph = NULL) } \arguments{ \item{Name}{[required] The name of the workflow to retrieve.} \item{IncludeGraph}{Specifies whether to include a graph when returning the workflow resource metadata.} } \description{ Retrieves resource metadata for a workflow. See \url{https://www.paws-r-sdk.com/docs/glue_get_workflow/} for full documentation. } \keyword{internal}
e4ca3d197bf01b2164b11cb30349d6f4f662540c
0363e9059653e5ce2a8fd4dfa1bcfe981072ea82
/man/rowMads.Rd
d781a696d41242cff34accf43da07e196ba079f6
[]
no_license
mwrowe/microRutils
7725bd4d5e2ac60337932f384562ed39abcf86a1
654cd867bafe126593089441f63c88906ecf60ed
refs/heads/master
2021-07-07T19:59:43.732449
2021-06-10T16:59:33
2021-06-10T16:59:33
245,310,935
0
0
null
null
null
null
UTF-8
R
false
true
614
rd
rowMads.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rowZs.R \name{rowMads} \alias{rowMads} \title{rowMads: Fast Calculation of MADs by Row.} \usage{ rowMads(X, na.rm = T, constant = 1.4826) } \arguments{ \item{X}{Numeric matrix, or object that can be coerced to matrix.} \item{na.rm}{Logical; should NA's be omitted?} \item{constant}{Scale factor; numeric constant for asymptotically normal consistency.} } \description{ rowMads(X) returns median absolute deviation (MAD) of each row of matrix x. } \seealso{ \code{\link[stats]{mad}}. } \author{ M.W.Rowe, \email{mwr.stats@gmail.com} }
363d89367f0850684cc146da8e5b5351b0cee4a1
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/rAmCharts/examples/AmStockChart.Rd.R
5331f9733b62e78a05fbd697303276fdaf710bb0
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
4,310
r
AmStockChart.Rd.R
library(rAmCharts) ### Name: initialize,AmStockChart-method ### Title: Initializes an AmStockChart ### Aliases: initialize,AmStockChart-method amStockChart ### setBalloon,AmStockChart,AmBalloonOrMissing-method ### setCategoryAxesSettings setCategoryAxesSettings,AmStockChart-method ### setChartCursorSettings setChartCursorSettings,AmStockChart-method ### setChartScrollbarSettings ### setChartScrollbarSettings,AmStockChart,ChartScrollbarOrMissing-method ### setComparedDataSets setComparedDataSets,AmStockChart-method ### addComparedDataSet ### addComparedDataSet,AmStockChart,DataSetOrMissing-method setDataSets ### setDataSets,AmStockChart-method addDataSet ### addDataSet,AmStockChart,DataSetOrMissing-method setDataSetSelector ### setDataSetSelector,AmStockChart-method setLegendSettings ### setLegendSettings,AmStockChart-method setMainDataSet ### setMainDataSet,AmStockChart,DataSetOrMissing-method setPanels ### setPanels,AmStockChart,list-method addPanel ### addPanel,AmStockChart,StockPanelOrMissing-method setPanelsSettings ### setPanelsSettings,AmStockChart-method setPeriodSelector ### setPeriodSelector,AmStockChart,PeriodSelectorOrMissing-method ### setStockEventsSettings setStockEventsSettings,AmStockChart-method ### setValueAxesSettings setValueAxesSettings,AmStockChart-method ### ** Examples ## No test: # --- method 'initialize' new("AmStockChart", theme = "dark") ## End(No test) ## No test: # --- constructor amStockChart() ## End(No test) library(pipeR) ## No test: # Dummy example amStockChart() %>>% setBalloon(gridPosition = "start") ## End(No test) ## No test: # Dummy example setCategoryAxesSettings(.Object = amStockChart(), gridPosition = "start") ## End(No test) ## No test: # Dummy example setChartCursorSettings(.Object = amStockChart(), oneBallOnly = TRUE) ## End(No test) ## No test: # Dummy example amchart <- setChartScrollbarSettings(.Object = amStockChart(), enabled = TRUE) print(amchart) # equivalent to: chartScrollbarSettings_obj <- chartScrollbarSettings() setChartScrollbarSettings(.Object = amStockChart(), chartScrollbarSettings = chartScrollbarSettings_obj) ## End(No test) ## No test: # Dummy example comparedDataSets_ls <- list(dataSet(compared = TRUE), dataSet(compared = TRUE)) setComparedDataSets(.Object = amStockChart(), comparedDataSets = comparedDataSets_ls) ## End(No test) ## No test: # Dummy example addComparedDataSet(.Object = amStockChart(), compared = TRUE) ## End(No test) ## No test: # Dummy example dataSets_ls <- list(dataSet(compared = FALSE), dataSet(compared = FALSE)) setDataSets(.Object = amStockChart(), dataSets = dataSets_ls) ## End(No test) ## No test: # Dummy example addDataSet(.Object = amStockChart(), compared = FALSE) # equivalent to: dataSet_obj <- dataSet(compared = FALSE) addDataSet(.Object = amStockChart(), dataSet = dataSet_obj) ## End(No test) ## No test: # Dummy example print(setDataSetSelector(.Object = amStockChart(), width = 180)) # equivalent to: dataSetSelector_obj <- dataSetSelector(width = 180) print(setDataSetSelector(.Object = amStockChart(), dataSetSelector = dataSetSelector_obj)) ## End(No test) ## No test: # Dummy example setLegendSettings(.Object = amStockChart(), equalWidths = TRUE) ## End(No test) ## No test: # Dummy example setMainDataSet(.Object = amStockChart(), showInCompare = TRUE) ## End(No test) ## No test: # Dummy example panels_ls <- list(stockPanel(compared = TRUE), stockPanel(compared = TRUE)) setPanels(.Object = amStockChart(), panels = panels_ls) ## End(No test) ## No test: # Dummy example chart <- addPanel(.Object = amStockChart(), allowTurningOff = TRUE); print(chart) # equivalent to: panel_obj <- panel(allowTurningOff = TRUE) addPanel(.Object = amStockChart(), panel = panel_obj) ## End(No test) ## No test: # Dummy example setPanelsSettings(.Object = amStockChart(), backgroundAlpha = 0) ## End(No test) ## No test: # Dummy example setPeriodSelector(.Object = amStockChart(), dateFormat = "DD-MM-YYYY") ## End(No test) ## No test: # Dummy example setStockEventsSettings(.Object = amStockChart(), backgroundAlpha = 1) ## End(No test) ## No test: # Dummy example setValueAxesSettings(.Object = amStockChart(), autoGridCount = "TRUE") ## End(No test)
e209a623af5fa157496295021bb9d83352796262
528f588e92ec54c34f29bad39e25b833b6b11366
/plot1.R
99632e2a9a594d0867bd1ee930d219c9aff42ad4
[]
no_license
iuribonna/ExData_Plotting1
c071ec7cd4aa405d06b2aff1248e20fd605bd558
e7d6438cf9047fc3e90365d64f4090785f09a261
refs/heads/master
2022-12-05T11:44:13.970892
2020-08-04T21:19:36
2020-08-04T21:19:36
285,077,076
0
0
null
2020-08-04T19:20:21
2020-08-04T19:20:20
null
UTF-8
R
false
false
1,365
r
plot1.R
## Plot 1 ## Reads the file in a table power_data <- read.table("household_power_consumption.txt",skip=1,sep=";") ## Renames the columns in a descrpitive way names(power_data) <- c("Date","Time","Global_active_power","Global_reactive_power" ,"Voltage","Global_intensity","Sub_metering_1","Sub_metering_2" ,"Sub_metering_3") ## Subset the dataset for the required 2 dates power_data <- subset(power_data, power_data$Date=="1/2/2007" | power_data$Date =="2/2/2007") ## Sets the dates as variables to format them later date1 <- "2007-02-01" date2 <- "2007-02-02" ## Formats the dates as Date class and Time in the posixt format power_data$Date <- as.Date(power_data$Date, format="%d/%m/%Y") power_data$Time <- strptime(power_data$Time, format = "%H:%M:%S") power_data[power_data$Date == date1, "Time"] <- format(power_data[power_data$Date == date1, "Time"], "2007-02-01 %H:%M:%S") power_data[power_data$Date == date1, "Time"] <- format(power_data[power_data$Date == date1, "Time"], "2007-02-02 %H:%M:%S") ## ------------------------------------------------ ## Creates the first plot hist(as.numeric(power_data$Global_active_power), col="red", main = "Global Active Power", xlab= "Global Active Power (kilowatts)") ## Export to png dev.copy(png, file="plot1.png", width=480, height=480) dev.off() ## Close device
921f9d427b2a865cc1e04ac91ee03b1ebe19a501
960e994f0ba2f7db9821cbad3490a579aaaba136
/man/probe_profile.Rd
4f7da525e5f1693ce27f1c868991f1590e1982c1
[ "MIT" ]
permissive
ouzlim/vjsim
1a3aaabf5d93bc1be72c9fe069d80f00eb5d8755
456d771193463ef00efb91085ef8782ca57f9f21
refs/heads/master
2022-11-26T22:14:19.172622
2020-08-03T22:39:53
2020-08-03T22:39:53
null
0
0
null
null
null
null
UTF-8
R
false
true
3,530
rd
probe_profile.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probing.R \name{probe_profile} \alias{probe_profile} \title{Probe Profile} \usage{ probe_profile( mass = 75, push_off_distance = 0.4, max_force = 3000, max_velocity = 4, time_to_max_activation = 0.3, change_ratio = seq(0.9, 1.1, length.out = 3), aggregate = "raw", external_load = c(-40, -20, 0, 20, 40, 60, 80, 100), profile_func = get_all_profiles, ... ) } \arguments{ \item{mass}{Numeric value. Initial parameter value to be changed using \code{change_ratio}.} \item{push_off_distance}{Numeric value. Initial parameter value to be changed using \code{change_ratio}} \item{max_force}{Numeric value. Initial parameter value to be changed using \code{change_ratio}} \item{max_velocity}{Numeric value. Initial parameter value to be changed using \code{change_ratio}} \item{time_to_max_activation}{Numeric value. Initial parameter value to be changed using \code{change_ratio}} \item{change_ratio}{Numeric vector indicating probing change ratios} \item{aggregate}{How should \code{\link{get_all_profiles}} output be aggregated? Default is "raw". Other options involve "ratio" and "diff" which use initial output values} \item{external_load}{Numeric vector. Default is \code{c(-40, -20, 0, 20, 40, 60, 80)}. Forwarded to \code{\link{vj_profile}}} \item{profile_func}{Profiling function. Default is \code{\link{get_all_profiles}}. Also use \code{\link{get_FV_profile}}, \code{\link{get_power_profile}}, and \code{\link{get_all_samozino_profiles}}} \item{...}{Extra argument forwarded to \code{\link{vj_profile}}} } \value{ Probing data frame } \description{ \code{probe_profile} simulates the vertical jump profiling using \code{\link{vj_profile}} over \code{external_load} loads, but estimate which parameter brings biggest change in the profile summary metric returned by \code{profile_func}. This is done by keeping all parameters at initial value, while changing only one parameter. This is then repeated for all parameters. This way we can answer by changing what parameter for standardize change (\code{change_ratio}) yield biggest change in profile summary metric (e.g. jump height) } \examples{ require(tidyverse) # You call also use get get_all_samozino_profiles() function in the profile_func parameter profile_probe_data <- probe_profile( mass = 75, max_force = 3000, max_velocity = 3, time_to_max_activation = 0.3, time_step = 0.001, external_load = c(-40, -20, 0, 20, 40, 60, 80, 100), profile_func = get_all_profiles # Can also use get_all_samozino_profiles ) plot_data <- gather(profile_probe_data, key = "variable", value = "value", -(1:8)) \%>\% filter(variable \%in\% c( "profile_mean_FV.F0", "profile_mean_FV.V0", "profile_mean_FV.Pmax", "profile_mean_FV.Sfv", "profile_mean_power.Pmax", "profile_mean_power.Pmax_location", "profile_mean_power.F0_perc" )) ggplot(plot_data, aes(x = change_ratio, y = value, color = probing)) + theme_minimal() + geom_line() + facet_wrap(~variable, scales = "free_y") + xlab("Normalized parameter change") + ylab(NULL) # ----------------------------------- # When probing using get_FV_profile or get_power_profile use the following power_probe_data <- probe_profile( mass = 75, max_force = 3000, max_velocity = 3, time_to_max_activation = 0.3, time_step = 0.001, external_load = c(-40, -20, 0, 20, 40, 60, 80, 100), profile_func = function(...) list(list = get_power_profile(...)) ) }
ac1a47b6e64104ffa85d7b124eaa5f7bbc0342e7
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/vegan/examples/anova.cca.Rd.R
e6b58a4f17e6d6e12e954354718b2021b2bb821a
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
542
r
anova.cca.Rd.R
library(vegan) ### Name: anova.cca ### Title: Permutation Test for Constrained Correspondence Analysis, ### Redundancy Analysis and Constrained Analysis of Principal Coordinates ### Aliases: anova.cca permutest permutest.cca ### Keywords: multivariate htest ### ** Examples data(varespec, varechem) mod <- cca(varespec ~ Al + P + K, varechem) ## overall test anova(mod) ## tests for individual terms anova(mod, by="term") anova(mod, by="margin") ## test for adding all environmental variables anova(mod, cca(varespec ~ ., varechem))
d99c0b2bbe2400f922f0023824b1182e68593f65
29585dff702209dd446c0ab52ceea046c58e384e
/RHRV/R/BuildTakensVector.R
02c7bb8a5953b3ce166f1264c013dcaa5c042094
[]
no_license
ingted/R-Examples
825440ce468ce608c4d73e2af4c0a0213b81c0fe
d0917dbaf698cb8bc0789db0c3ab07453016eab9
refs/heads/master
2020-04-14T12:29:22.336088
2016-07-21T14:01:14
2016-07-21T14:01:14
null
0
0
null
null
null
null
UTF-8
R
false
false
602
r
BuildTakensVector.R
BuildTakensVector <- function(HRVData, Data, m, tau) { # ------------------------------------- # Calculates Takens expanded vectors # ------------------------------------- if (HRVData$Verbose) { cat("** Creating Takens expanded vectors **\n") cat(" m: ", m, " Tau: ", tau, "\n", sep="") } N = length(Data) jump = tau maxjump = (m-1)*jump jumpsvect = seq(0,maxjump,jump) numjumps = length(jumpsvect) numelem = N-maxjump DataExp = matrix(nrow=numelem,ncol=numjumps) for (i in 1:numelem) { DataExp[i,1:numjumps] = Data[jumpsvect+i] } return(DataExp) }
6621db2b3e8fbda4783349d44bc37a021866bd30
15ce65a306373ce210f8904bb2e3a0697030ffc6
/man/createAnalysisSettings.Rd
90a05304607778014273bf9d9920a8f5643bc6c0
[ "Apache-2.0" ]
permissive
OHDSI/RiskStratifiedEstimation
dd19c1562dcc88078b0b963fe5d49d192deb036e
f4476f8296e5444f35502e5c1e60429a9b0acd19
refs/heads/main
2023-07-08T15:41:31.377724
2023-03-08T13:19:29
2023-03-08T13:19:29
136,479,509
9
5
Apache-2.0
2023-03-14T07:31:53
2018-06-07T13:12:58
R
UTF-8
R
false
true
2,784
rd
createAnalysisSettings.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/CreateArgFuns.R \name{createAnalysisSettings} \alias{createAnalysisSettings} \title{Create a parameter defining the performed risk stratified analysis} \usage{ createAnalysisSettings( analysisId = NULL, description = "", databaseName, treatmentCohortId, comparatorCohortId, outcomeIds, analysisMatrix = diag(length(outcomeIds)), mapTreatments, mapOutcomes, negativeControlOutcomes = c(), balanceThreads = 1, negativeControlThreads = 1, verbosity = NULL, saveDirectory = NULL ) } \arguments{ \item{analysisId}{The analysis ID.} \item{description}{Text describing the analysis.} \item{databaseName}{The name of the database.} \item{treatmentCohortId}{The cohort definition id of the treatment cohort in the cohortTable.} \item{comparatorCohortId}{The cohort definition id of the comparator cohort in the cohortTable.} \item{outcomeIds}{The cohort definition ids of the outcome cohorts in the outcomeTable.} \item{analysisMatrix}{Boolean matrix defining the outcomes to be assessed (rows) within risk strata (columns). The order in columns should match the the order of \code{outcomeIds}. Default is the diagonal matrix, which leads to the risk stratified assessment of only the outcome for which the risk strata were defined.} \item{mapTreatments}{Dataframe containing 2 columns: "exposure_id" with the id numbers of the treatment and comparator cohorts and "exposure_name" the cohort names.} \item{mapOutcomes}{Dataframe containing 2 columns: "outcome_id" with the cohort names of the outcomes of interest and "outcome_name" with their names.} \item{negativeControlOutcomes}{The outcome Ids to be used as negative controls} \item{balanceThreads}{The number of threads to be used for the estimation of covariate balance} \item{negativeControlThreads}{The number of threads to be used for running the negative control analyses} \item{verbosity}{Sets the level of the verbosity. If the log level is at or higher in priority than the logger threshold, a message will print. The levels are: \itemize{ \item {DEBUG}{Highest verbosity showing all debug statements} \item {TRACE}{Showing information about start and end of steps} \item {INFO}{Show informative information (Default)} \item {WARN}{Show warning messages} \item {ERROR}{Show error messages} \item {FATAL}{Be silent except for fatal errors} }} \item{saveDirectory}{The directory name where the results of the analyses will be stored.} } \value{ An analysisSettings object providing the identification information of the analysis. } \description{ Create a parameter defining the performed risk stratified analysis }
d6ba3e12baec294fdb56456236e9ba82111251bf
dcf0a19d6beee75169234d35b563a27ce4fb0542
/R/allele_methods.R
ae5fcfceb89fc4c3506be1d84dd4936ccd37a9dd
[ "MIT" ]
permissive
andremrsantos/htspop
01ddda7ca4bc217cdd1c9c575eb75120bb8c1dcb
48df4f3519da0504693a12e3ed0e23be48e0edb7
refs/heads/master
2023-07-22T01:41:22.192915
2020-05-11T17:37:01
2020-05-11T17:37:01
154,565,177
2
0
null
null
null
null
UTF-8
R
false
false
1,051
r
allele_methods.R
#' Allele Count Manipulation #' #' Functions to explore and manipule the data from allele count matrices. #' #' @param ac Allele count matrix to analyze #' #' @name allele_methods #' @aliases NULL NULL #> NULL #' @rdname allele_methods #' @export count_allele <- function(ac) { if (class(ac) != "allele_count") stop("`ac` must be an `allele_count`.") return(ac$count) } #' @rdname allele_methods #' @export total_allele <- function(ac) { if (class(ac) != "allele_count") stop("`ac` must be an `allele_count`.") return(ac$n) } #' @rdname allele_methods #' @export invert_allele <- function(ac) { if (class(ac) != "allele_count") stop("`ac` must be an `allele_count`.") new_mtx <- list(count = ac$n - ac$count, n = ac$n) return(structure(new_mtx, class = "allele_count")) } #' @rdname allele_methods #' @export population_names <- function(ac) { if (class(ac) != "allele_count") stop("`ac` must be an `allele_count`.") names <- colnames(ac) if (is.null(names)) return(seq_len(ncol(ac))) return(names) }
60dec3f9c692c1df43e9e55098efb832968fef21
ac755a2e6f8e956b75a658f5682847b48025049c
/R/print.catpredi.survival.R
a419abf6d8067fa60cfcc11e3b63d085b7583d62
[]
no_license
cran/CatPredi
f6dee6f07df42ce07f6269b759fcc1292059b736
99c37302aaec362bb3df7dae0b942aee89876753
refs/heads/master
2022-01-14T05:28:04.646840
2022-01-10T11:43:01
2022-01-10T11:43:01
106,293,023
0
0
null
null
null
null
UTF-8
R
false
false
5,057
r
print.catpredi.survival.R
print.catpredi.survival <- function(x, digits = 4, ...) { cat("\nCall:\n") print(x$call) cat("\n\n*************************************************\n") method <- switch(x$method, "addfor" = cat("Addfor Search Algorithm"), "genetic" = cat("Genetic Search Algorithm")) conc.index <- switch(x$conc.index, "cindex" = cat("Concordance C-index"), "cpe" = cat("Concordance Probability Estimator - CPE")) cat("\n*************************************************\n\n") if(x$method == "addfor" & x$conc.index == "cindex") { if(x$correct.index == TRUE){ cutpoints <- format(x$results$cutpoints, digits = digits, justify = "right") Cindex <- format(x$results$Cindex, digits = digits, justify = "left") Cindex.corrected <- format(x$results$Cindex.cor, digits = digits, justify = "left") p.table <- cbind(cutpoints, Cindex,c(rep("NA",length(x$results$cutpoints)-1),Cindex.corrected)) dimnames(p.table) <- list(rep("", l = length(x$results$Cindex)), c("Optimal cutpoints", "Optimal C-index", "Corrected C-index")) print(p.table, quote = FALSE, justify = "right") } else { cutpoints <- format(x$results$cutpoints, digits = digits, justify = "right") Cindex <- format(x$results$Cindex, digits = digits, justify = "left") p.table <- cbind(cutpoints, Cindex) dimnames(p.table) <- list(rep("", l = length(x$results$Cindex)), c("Optimal cutpoints", "Optimal C-index")) print(p.table, quote = FALSE, justify = "right") } } else if(x$method == "addfor" & x$conc.index == "cpe") { if(x$correct.index == TRUE){ cutpoints <- format(x$results$cutpoints, digits = digits, justify = "right") CPE <- format(x$results$CPE, digits = digits, justify = "left") CPE.corrected <- format(x$results$CPE.cor, digits = digits, justify = "left") p.table <- cbind(cutpoints, CPE,c(rep("NA",length(x$results$cutpoints)-1),CPE.corrected)) dimnames(p.table) <- list(rep("", l = length(x$results$CPE)), c("Optimal cutpoints", "Optimal CPE", "Corrected CPE")) print(p.table, quote = FALSE, justify = "right") } else { cutpoints <- format(x$results$cutpoints, digits = digits, justify = "right") CPE <- format(x$results$CPE, digits = digits, justify = "left") p.table <- cbind(cutpoints, CPE) dimnames(p.table) <- list(rep("", l = length(x$results$CPE)), c("Optimal cutpoints", "Optimal CPE")) print(p.table, quote = FALSE, justify = "right") } } else if(x$method == "genetic" & x$conc.index == "cindex") { if(x$correct.index == TRUE){ cutpoints <- cbind(format(sort(x$results$cutpoints), digits = digits, justify = "right")) dimnames(cutpoints) <- list(rep("", l = length(x$results$cutpoints)), "Optimal cutpoints") print(cutpoints, quote = FALSE, justify = "right") cat("\n") Cindex <- cbind(format(x$results$Cindex, digits = digits, justify = "right")) dimnames(Cindex) <- list(rep("", l = length(x$results$Cindex)), "Optimal Cindex") print(Cindex, quote = FALSE, justify = "right") cat("\n") Cindex.corrected <- cbind(format(x$results$Cindex.cor, digits = digits, justify = "right")) dimnames(Cindex.corrected) <- list(rep("", l = length(x$results$Cindex.cor)), "Corrected Cindex") print(Cindex.corrected, quote = FALSE, justify = "right") } else { cutpoints <- cbind(format(sort(x$results$cutpoints), digits = digits, justify = "right")) dimnames(cutpoints) <- list(rep("", l = length(x$results$cutpoints)), "Optimal cutpoints") print(cutpoints, quote = FALSE, justify = "right") cat("\n") Cindex <- cbind(format(x$results$Cindex, digits = digits, justify = "right")) dimnames(Cindex) <- list(rep("", l = length(x$results$Cindex)), "Optimal Cindex") print(Cindex, quote = FALSE, justify = "right") } } else { if(x$correct.index == TRUE) { cutpoints <- cbind(format(sort(x$results$cutpoints), digits = digits, justify = "right")) dimnames(cutpoints) <- list(rep("", l = length(x$results$cutpoints)), "Optimal cutpoints") print(cutpoints, quote = FALSE, justify = "right") cat("\n") CPE <- cbind(format(x$results$CPE, digits = digits, justify = "right")) dimnames(CPE) <- list(rep("", l = length(x$results$CPE)), "Optimal CPE") print(CPE, quote = FALSE, justify = "right") cat("\n") CPE.corrected <- cbind(format(x$results$CPE.cor, digits = digits, justify = "right")) dimnames(CPE.corrected) <- list(rep("", l = length(x$results$CPE.cor)), "Corrected CPE") print(CPE.corrected, quote = FALSE, justify = "right") } else { cutpoints <- cbind(format(sort(x$results$cutpoints), digits = digits, justify = "right")) dimnames(cutpoints) <- list(rep("", l = length(x$results$cutpoints)), "Optimal cutpoints") print(cutpoints, quote = FALSE, justify = "right") cat("\n") CPE <- cbind(format(x$results$CPE, digits = digits, justify = "right")) dimnames(CPE) <- list(rep("", l = length(x$results$CPE)), "Optimal CPE") print(CPE, quote = FALSE, justify = "right") } } invisible(x) }
961a1a9956b40a38a763917dc31c9397728ca853
2f5816f413d45a75914a2a5f4dc5d2b9ac2457da
/Daten/Scleractinia.R
9ec6429ef74d7a3f7a13f2d28da2ec58377c394a
[]
no_license
FAU-Paleo/Analytical-Palaeobiology
a7d13a18b6ea51e7f3534f776f4ce51a50b38402
d20639c040203538735b68490f330763cb441470
refs/heads/master
2022-11-28T14:40:19.827525
2020-08-05T16:54:11
2020-08-05T16:54:11
null
0
0
null
null
null
null
UTF-8
R
false
false
7,347
r
Scleractinia.R
# Import einer Datei und Auswertung remove(list=ls()) # PBDB raw data myData <- read.table(file="C:/Daten/nceas/PBDB/Scleractinia.csv", header=TRUE, sep=",", as.is=TRUE) attach(myData) # Distinct genera in dataset genus <- occurrences.genus_name f <- factor(genus) levels(f) length(levels(f)) detach(myData) # PBDB ouput raw data myData <- read.table(file="C:/Daten/nceas/PBDB/Scleractinia_rawcurve.csv", header=TRUE, sep=",", as.is=TRUE) # edit(myData) attach(myData) age1 <- Base..Ma. age2 <- Midpoint..Ma. sg <- Sampled.genera bg <- Boundary.crosser.genera rg <- Range.through.genera e <- Extinction.rate o <- Origination.rate co <- Collections oc <- Occurrences hist(e, col="red", xlab="Extinction rate", main="Histogram of extinction rates") # Number of collections plot(age2, co, xlab="Age (Ma)", ylab="Number of collections", pch=16, type="b", xlim=c(250,0), ylim<-c(-5, 500), frame=F, axes=F) axis(1, pos=-25, at=seq(0, 250, by=50), labels=c("0", "50", "100", "150", "200", "250")) # for collections axis(2, pos=251) # Definition of legend boxes bt <- -25 # Bottom of rectangle to <- 5 # Top of rectangle bt2 <- -5 # Level of text # Number of occurrences plot(age2, oc, xlab="Age (Ma)", ylab="Number of occurrences", log="y", pch=16, type="b", xlim=c(250,0), ylim<-c(50, 5000), frame=F, axes=F) axis(1, pos=42, at=seq(0, 250, by=50), labels=c("0", "50", "100", "150", "200", "250")) # for occurrences axis(2, pos=251) # Definition of legend boxes bt <- 42 # Bottom of rectangle to <- 60 # Top of rectangle bt2 <- 50 # Level of text # Boundary crosser and SIB diversity lt1=2 lt2=1 plot(age1, bg, type="b", col="blue", xlim=c(250,0), ylim<-c(-5, 160), xlab="Age (Ma)", pch=21, lty=lt1, ylab="Number of genera", frame=F) points(age2, sg, pch=16, type="b", lty=lt2) abline(v=199.6, col="red") # Mark Triassic-Jurassic boundary abline(v=65.5, col="red") # Mark KT boundary legend(150, 35, bty="n", legend = c("Boundary crossers", "SIB"), lty = lt1:lt2, col = c("blue", "black"), pch=c(21, 16), title ="Counting methods") # Definition of legend boxes bt <- -25 # Bottom of rectangle to <- 0 # Top of rectangle bt2 <- -6 # Level of text # PBDB ouput subsampled data # Two timers myData2 <- read.table(file="C:/Daten/nceas/PBDB/Scler_subs_tt.csv", header=TRUE, sep=",", as.is=TRUE) attach(myData2) age1 <- Base..Ma. age2 <- Midpoint..Ma. tt <- Mean.two.timers.diversity sg <- Raw.SIB.diversity cg <- Corrected.SIB.diversity e <- Extinction.rate o <- Origination.rate pairs(myData2) # Boundary crossers myData3 <- read.table(file="C:/Daten/nceas/PBDB/Scler_subs_bc.csv", header=TRUE, sep=",", as.is=TRUE) attach(myData3) bg <- Mean.boundary.crossers.diversity age2 <- Midpoint..Ma. e <- Extinction.rate o <- Origination.rate t= e+o d = o-e # Turnover rates lt1=2 lt2=1 plot(age2[t>0], e[t>0],xlim=c(250,0), ylim<-c(0, 0.55), col="red", xlab="Age (Ma)", type="b", pch=21, ylab="Per genus rate") points(age2[t>0], o[t>0], pch=16, col="green", type="b", lty=lt2) legend(65, 0.12, bty="n", legend = c("Extinction rate", "Origination rate"), lty = lt2, col = c("red", "green"), pch=c(21, 16)) abline(v=199.6, col="red") # Mark Triassic-Jurassic boundary abline(v=65.5, col="red") # Mark KT boundary # Definition of legend boxes bt <- -0.25 # Bottom of rectangle to <- 0.02 # Top of rectangle bt2 <- 0 # Level of text # Boundary crosser and SIB diversity windows(height=5, width=7) lt1=2 lt2=1 plot(age1, bg, type="b", col="blue", xlim=c(250,0), ylim<-c(-2, 60), xlab="Age (Ma)", pch=21, lty=lt1, ylab="Number of genera", frame=F) points(age2, sg, pch=16, type="b", lty=lt2) abline(v=199.6, col="red") # Mark Triassic-Jurassic boundary abline(v=65.5, col="red") # Mark KT boundary legend(150, 15, bty="n", legend = c("Boundary crossers", "SIB"), lty = lt1:lt2, col = c("blue", "black"), pch=c(21, 16), title ="Counting methods") # Definition of legend boxes bt <- -10 # Bottom of rectangle to <- 0 # Top of rectangle bt2 <- -2 # Level of text # New plot windows(height=5, width=7) plot(age2, sg, xlim=c(250,0), ylim<-c(-2, 60), xlab="Age (Ma)", ylab="Number of genera", type="n") abline(v=199.6, col="red") # Mark Triassic-Jurassic boundary abline(v=65.5, col="red") # Mark KT boundary # Definition of equilibrium states s1 <- mean(sg[age2<250&age2>160]) s2 <- mean(sg[age2<160&age2>66]) s3 <- mean(sg[age1<66]) ser1 <- sd(sg[age2<250&age2>160])/sqrt(length(sg[age2<250&age2>160])) ser2 <- sd(sg[age2<160&age2>66])/sqrt(length(sg[age2<160&age2>66])) ser3 <- sd(sg[age1<66])/sqrt(length(sg[age1<66])) rect(xleft=240, ybottom=s1-ser1, xright=160, ytop=s1+ser1, col="gray") rect(xleft=160, ybottom=s2-ser2, xright=66, ytop=s2+ser2, col="gray") rect(xleft=66, ybottom=s3-ser3, xright=0, ytop=s3+ser3, col="gray") segments(240, s1, 160, s1) segments(160, s2, 65.5, s2) segments(65.5, s3, 0, s3) points(age2, sg, pch=16, type="b", lty=lt2) # Definition of legend boxes bt <- -10 # Bottom of rectangle to <- 0 # Top of rectangle bt2 <- -2 # Level of text # Fetch combined bivalve-coral data for comparison bdat <- read.table(file="C:/Daten/nceas/PBDB/Combined_curve.csv", header=TRUE, sep=";", as.is=TRUE) attach(bdat) lt1=2 lt2=1 plot(age2, Occ_biv, type="b", col="blue", xlim=c(250,0), ylim=c(10, 20000), log="y", xlab="Age (Ma)", pch=21, lty=lt1, ylab="Number of occurrences", frame=F) points(age2, Occ_cora, pch=16, type="b", log="y", lty=lt2) abline(v=199.6, col="red") # Mark Triassic-Jurassic boundary abline(v=65.5, col="red") # Mark KT boundary legend(150, 35, bty="n", legend = c("Bivalves", "Corals"), lty = lt1:lt2, col = c("blue", "black"), pch=c(21, 16)) # Definition of legend boxes bt <- 5 # Bottom of rectangle to <- 12 # Top of rectangle bt2 <- 9 # Level of text # Z-value plot plot(age2, cor_biv_z, type="b", col="blue", xlim=c(250,0), xlab="Age (Ma)", ylab="Standardized difference corals-bivalves" ) abline(v=199.6, col="red") # Mark Triassic-Jurassic boundary abline(v=65.5, col="red") # Mark KT boundary abline(h=0, lty=2) abline(h=-1.96, lty=2) # Definition of legend boxes bt <- -3.32 # Bottom of rectangle to <- -3 # Top of rectangle bt2 <- -3.17 # Level of text # Z-value plot plot(age2, cor_reef_z, type="b", col="blue", xlim=c(250,0), xlab="Age (Ma)", ylab="Standardized difference corals-reefs" ) abline(v=199.6, col="red") # Mark Triassic-Jurassic boundary abline(v=65.5, col="red") # Mark KT boundary abline(h=0, lty=2) abline(h=-1.96, lty=2) # Definition of legend boxes bt <- -1.22 # Bottom of rectangle to <- -1.05 # Top of rectangle bt2 <- -1.12 # Level of text # Draw boxes rect(xleft=251, ybottom=bt, xright=199.6, ytop=to) # Triassic rect(xleft=145.5, ybottom=bt, xright=199.6, ytop=to) # Jurassic rect(xleft=145.5, ybottom=bt, xright=65.5, ytop=to) # Cretaceous rect(xleft=23, ybottom=bt, xright=65.5, ytop=to) # Paleogene rect(xleft=23, ybottom=bt, xright=0, ytop=to) # Neogene # Add Text for legend of time text(x=226, y=bt2, labels="Tr") text(x=175, y=bt2, labels="J") text(x=105, y=bt2, labels="K") text(x=45, y=bt2, labels="Pg") text(x=10, y=bt2, labels="N")
1d1945f45834582177fed14d56bfe5d8adbc1151
183caf378df099da122f65ea9b75002b1e12b774
/projFocus/ceRNA/processData/step1-2_expVoomNormed.r
25912d5f38d161c2dd09bd010843d5b9f3836088
[]
no_license
cwt1/scripts-1
f58e476ddb2c83e0480856a95a95a644ad3c001c
061d6592aa6ab11c93363fcb40305a57db05e3f2
refs/heads/master
2021-05-28T01:31:30.896133
2014-08-25T19:02:37
2014-08-25T19:02:37
null
0
0
null
null
null
null
UTF-8
R
false
false
3,182
r
step1-2_expVoomNormed.r
#!/usr/bin/Rscript #Author: Jing He #Date:24 Oct,2013 #Last Updated: #COMMENTS: need edgeR installed; #input: <string:path you wnat your results to be> # <string:name of your design file(4 cols, tab delimite:example) # <string:name of count matrix file> # <string:name of your output files> #output: <file:pdf of 2 plots> <file: txt of differetial expresssed genes> ####TODO: need more development sysInfo = Sys.info() if(sysInfo['sysname']=="Darwin" ){ source("/Volumes/ifs/home/c2b2/ac_lab/jh3283/scripts/projFocus/ceRNA/projFocusCernaFunctions.R") rootd = "/Volumes/ifs/data/c2b2/ac_lab/jh3283/projFocus/" }else if(sysInfo['sysname']=="Linux" ){ source("/ifs/home/c2b2/ac_lab/jh3283/scripts/projFocus/ceRNA/projFocusCernaFunctions.R") print("working from Linux") rootd = "/ifs/data/c2b2/ac_lab/jh3283/projFocus/" } args = getArgs() usage = "Usage: Rscript step1-2_expVoomNormed.r --tumor <tumor.mat file> --normal <normal.mat file> " example = "Example: " # if(length(args) < 3 || is.null(args)){ # print(usage) # print(example) # print(args) # stop("Input parameter error!") # }else{ # print(args) # } CDT = as.character(paste(unlist(strsplit(date(),split=" "))[c(2,3,5)],collapse="-")) setwd(system("pwd",intern=T)) cwd = getwd() # tumor = args['tumor'] # normal = args['normal'] # outExp = paste(tumor, "_", CDT, ".voomNormed.matrix", sep="") print(paste("current working directory:",cwd)) ##-----test # cwd = getwd() tumor = "/Volumes//ifs/data/c2b2/ac_lab/jh3283/projFocus/result/03102014/exp/brca_exp_l3_tumor_Mar-21-2014.matrix" normal = "/Volumes//ifs/data/c2b2/ac_lab/jh3283/projFocus/result/03102014/exp/brca_exp_l3_normal_Mar-21-2014.matrix" outExp = paste(tumor, "_", CDT, ".voomNormed.matrix", sep="") outExpN = paste(normal, "_", CDT, ".voomNormed.matrix", sep="") ##---------------------------- getData = function(file,type="T"){ data = read.delim(file,header=T) gene = unique(unlist(data[,1])) data = data[-16274,] data = data[,-1] rownames(data)=gene # colnames(data) = vapply(colnames(data),FUN=function(x){substr(x,start=9,stop=16)},'a') sample = colnames(data) design = rep(type,ncol(data)) names(design) = sample return(list(data=data,design=design,gene=gene)) } ##--------#load data dataT = getData(tumor,type='tumor') dataN = getData(normal,type='normal') cntSampleT = ncol(dataT$data) cntSampleN = ncol(dataN$data) cntGene = nrow(dataT$data) dataMat = cbind(dataT$data,dataN$data) gene = dataT$gene row.names(dataMat) = gene design = c(dataT$design,dataN$design) condition <- factor( design ) ##-------------voom transformation require(limma) designMat = model.matrix(~design) dataMatVoom = voom(as.matrix(dataMat),designMat,plot=TRUE) dataMatVoomTumor = dataMatVoom$E[,which(as.character(design)=="tumor")] dataMatVoomNormal = dataMatVoom$E[,which(as.character(design)=="normal")] ##---------output dev.off() write.table(round(dataMatVoomTumor,digits=4), outExp,sep="\t",col.names=T,row.names=T,quote=F) write.table(round(dataMatVoomNormal,digits=4), outExpN,sep="\t",col.names=T,row.names=T,quote=F) #--------------------------------------------
32351769250220c5d6b03272933a767b4c9a0c2a
0365e239275b3e08473bef7ae68cbba75137a92f
/5A_CAR_OLS_data_prep_similarity.R
37ab3a29447c2149374cb5557af7d20f08d35e1d
[]
no_license
KateMMiller/RegenDebtCode
4a9aacc95c62a742099965a1a63cec00541f7c59
2fa9705bf61c3f913932d0da3a29d3f10a90c7b0
refs/heads/master
2021-04-26T23:34:17.736231
2018-11-03T15:31:54
2018-11-03T15:31:54
123,821,663
0
1
null
null
null
null
UTF-8
R
false
false
18,611
r
5A_CAR_OLS_data_prep_similarity.R
#-------------------------------------------- # Spatial Regression on beta diversity #-------------------------------------------- setwd('C:/temp/GIS') library(rgdal) library(raster) #library(sp) #library(maptools) library(dplyr) options("scipen"=100, "digits"=4) rasterOptions(timer=TRUE,progress='text', tmpdir='G:/raster_temp') #shows a timer for processes run from raster package file=('C:/temp/GIS') #----------------------------------- # Relate response shapefile point locations to explanatory variables #----------------------------------- # Seedling/canopy similarity seedbeta<-readOGR(dsn=file,layer="Tree_seedling_stems_m2_beta") seeds.bdf<-data.frame(seedbeta)[,-9] names(seeds.bdf) # read in raster and shapefiles to join data with prec<-raster('Pct_Change_Precip.tif') # raster files not kernel smoothed tmax<-raster('Pct_Change_Tmax.tif') deer.den<-readOGR(dsn=file, layer='deer_density_UTMalbers') deer.den$deer.index<-ifelse(deer.den$deer_dnsty=="red",4,ifelse(deer.den$deer_dnsty=="orange",3,ifelse(deer.den$deer_dnsty=="yellow",2,ifelse(deer.den$deer_dnsty=="green",1,NA)))) deer.sm<-raster('Browse_Impact_kern10k.tif') inv.sm<-raster('Invasive_Cover_kern10k.tif') hmod.lc<-raster('nlcd300_utm_clip.tif') plots<-readOGR(dsn=file, layer="FIA_plots_for_analysis") relabund.tr<-readOGR(dsn=file, layer="relabund_trees") relabund.sd<-readOGR(dsn=file, layer="relabund_seedlings") plots.df<-data.frame(plots)[,-13] relab.tr.df<-data.frame(relabund.tr)[,-(17:19)] #remove coordinates and optional to keep from getting double copies in merged df relab.sd.df<-data.frame(relabund.sd)[,-(17:19)] #remove coordinates and optional fields colnames(relab.sd.df)<-c("PLT_CN","INVYR","ACERUB.sd","ACESAC.sd","CARYA.sd", "EARSUC.sd","FAGGRA.sd","FRAX.sd", "INVASIVE.sd","LIRTUL.sd","OTHER_SPP.sd","PICEA.sd","PINUS.sd","QUERCUS.sd","SHORT.sd","ULMUS.sd") # extract values of rasters for each point location in seeds and join point data prec.ex<-extract(prec,seedbeta,method='bilinear') tmax.ex<-extract(tmax,seedbeta,method='bilinear') deer.sm.ex<-extract(deer.sm,seedbeta,method='bilinear') inv.sm.ex<-extract(inv.sm,seedbeta,method='bilinear') hmod.ex<-extract(hmod.lc,seedbeta,method='simple') deer.ex<-sp::over(seedbeta, deer.den) seeds2<-left_join(seeds.bdf,plots.df[,c("PLT_CN","INVYR","STDSZCD","CANOPY_")], by=c("PLT_CN")) seeds3<-left_join(seeds2,relab.tr.df, by=c("PLT_CN","INVYR")) seeds4<-left_join(seeds3,relab.sd.df, by=c("PLT_CN","INVYR")) # combine datasets seeds5<-data.frame(seeds4,prec.ex,tmax.ex, deer.sm.ex, inv.sm.ex,hmod.ex, deer.ex[,2]) head(seeds5) values<-c("train","test") seeds5$type<-sample(values, nrow(seeds5), TRUE, prob = c(.95,.05)) # split data into training and testing dataset table(seeds5$type) coordinates(seeds5)<-~coords.x1+coords.x2 proj4string(seeds5)=CRS('+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs') file<-'C:/temp/GIS' writeOGR(seeds5, dsn=file, layer="Seedlings_m2_beta_with_predictors", driver="ESRI Shapefile", overwrite_layer=T) #------------------------ # Seedling/canopy similarity sapbeta<-readOGR(dsn=file,layer="Tree_sapling_stems_m2_beta") bbox<-readOGR(dsn=file, layer="Bounding_Box") saps.bdf<-data.frame(sapbeta)[,-9] #remove optional names(saps.bdf) # read in sapling relative abundance shapefile relabund.sap<-readOGR(dsn=file, layer="relabund_saplings") relab.sap.df<-data.frame(relabund.sap)[,-(17:19)] colnames(relab.sap.df)<-c("PLT_CN","INVYR","ACERUB.sap","ACESAC.sap","CARYA.sap", "EARSUC.sap","FAGGRA.sap","FRAX.sap", "INVASIVE.sap","LIRTUL.sap","OTHER_SPP.sap","PICEA.sap","PINUS.sap","QUERCUS.sap","SHORT.sap","ULMUS.sap") names(relab.sap.df) # extract values of rasters for each point location in seeds and join point data prec.ex<-extract(prec,sapbeta,method='bilinear') tmax.ex<-extract(tmax,sapbeta,method='bilinear') deer.sm.ex<-extract(deer.sm,sapbeta,method='bilinear') inv.sm.ex<-extract(inv.sm,sapbeta,method='bilinear') hmod.ex<-extract(hmod.lc,sapbeta,method='simple') deer.ex<-sp::over(sapbeta, deer.den) saps2<-left_join(saps.bdf,plots.df[,c("PLT_CN","INVYR","STDSZCD","CANOPY_")], by=c("PLT_CN")) saps3<-left_join(saps2,relab.tr.df, by=c("PLT_CN","INVYR")) saps4<-left_join(saps3,relab.sap.df, by=c("PLT_CN","INVYR")) # combine datasets saps5<-data.frame(saps4,prec.ex,tmax.ex, deer.sm.ex, inv.sm.ex,hmod.ex, deer.ex[,2]) values<-c("train","test") saps5$type<-sample(values, nrow(saps5), TRUE, prob = c(.95,.05)) # split data into training and testing dataset table(saps5$type) coordinates(saps5)<-~coords.x1+coords.x2 proj4string(saps5)=CRS('+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs') file<-'C:/temp/GIS' writeOGR(saps5, dsn=file, layer="Saplings_m2_beta_with_predictors", driver="ESRI Shapefile", overwrite_layer=T) #--------------------------------------- # Clip datasets to mid-Atlantic region for analysis #--------------------------------------- # Regen without deer impacts #--------------------------------------- # Clip seedling data to primary area of interest in analysis seeds<-readOGR(dsn=file,layer="Seedlings_m2_beta_with_predictors") bbox<-readOGR(dsn=file, layer="Bounding_Box") seeds.c<-raster::crop(seeds,bbox, snap='in') plot(seeds.c, col='blue', pch=16) plot(bbox,add=T) names(seeds.c) #View(seeds2) names(seeds.c) seeds2<-seeds.c[,c("PLT_CN","INVYR","Sor","Hor","STDSZCD","CANOPY_","ACERUB","ACESAC","CARYA","EARSUC","FAGGRA","FRAX","INVASIVE","LIRTUL", "OTHER_SPP","PICEA","PINUS","QUERCUS","SHORT","ULMUS","ACERUB_","ACESAC_", "CARYA_s","EARSUC_", "FAGGRA_","FRAX_sd","INVASIVE_","LIRTUL_","OTHER_SPP_","PICEA_s","PINUS_s","QUERCUS_","SHORT_s","ULMUS_s", "prec_ex","tmax_ex","inv_sm_","hmod_ex","d____2_","type" )] names(seeds2)[names(seeds2)=="d____2_"]<-"deer_den" seeds2$X<-seeds2$coords.x1 seeds2$Y<-seeds2$coords.x2 seeds3<-na.omit(seeds2@data) nrow(seeds2);nrow(seeds3) coordinates(seeds3)<-~X+Y proj4string(seeds3)=CRS('+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs') writeOGR(seeds3, dsn=file, layer='Seedlings_m2_beta_with_pred_clip',driver="ESRI Shapefile",overwrite_layer=T) seeds.df<-as.data.frame(seeds3) write.csv(seeds.df, "Seedlings_m2_beta_with_pred_df.csv") # Clip sapling data to primary area of interest in analysis saps<-readOGR(dsn=file,layer="Saplings_m2_beta_with_predictors") bbox<-readOGR(dsn=file, layer="Bounding_Box") saps.c<-raster::crop(saps,bbox, snap='in') plot(saps.c, col='blue', pch=16) plot(bbox,add=T) names(saps.c) saps2<-saps.c[,c("PLT_CN","INVYR","Sor","Hor","STDSZCD","CANOPY_","ACERUB","ACESAC","CARYA","EARSUC","FAGGRA","FRAX","INVASIVE","LIRTUL", "OTHER_SPP","PICEA","PINUS","QUERCUS","SHORT","ULMUS","ACERUB_","ACESAC_", "CARYA_s","EARSUC_", "FAGGRA_","FRAX_sp","INVASIVE_","LIRTUL_","OTHER_SPP_","PICEA_s","PINUS_s","QUERCUS_","SHORT_s","ULMUS_s", "prec_ex","tmax_ex","inv_sm_","hmod_ex","d____2_","type" )] names(saps2)[names(saps2)=="d____2_"]<-"deer_den" saps2$X<-saps2$coords.x1 saps2$Y<-saps2$coords.x2 saps3<-na.omit(saps2@data) nrow(saps2)-nrow(saps3) # only losing 23 records coordinates(saps3)<-~X+Y proj4string(saps3)=CRS('+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs') writeOGR(saps3, dsn=file, layer='Saplings_m2_beta_with_pred_clip',driver="ESRI Shapefile",overwrite_layer=T) saps.df<-as.data.frame(saps3) write.csv(saps.df, "Saplings_m2_beta_with_pred_df.csv") #--------------------------------------- # Regen with deer impacts #--------------------------------------- # Clip seedling data to primary area of interest in analysis seeds<-readOGR(dsn=file,layer="Seedlings_m2_beta_with_predictors") bbox<-readOGR(dsn=file, layer="Bounding_Box") seeds.c<-raster::crop(seeds,bbox, snap='in') plot(seeds.c, col='blue', pch=16) plot(bbox,add=T) names(seeds.c) names(seeds.c) seeds2d<-seeds.c[,c("PLT_CN","INVYR","Sor","Hor","STDSZCD","CANOPY_","ACERUB","ACESAC","CARYA","EARSUC","FAGGRA","FRAX","INVASIVE","LIRTUL", "OTHER_SPP","PICEA","PINUS","QUERCUS","SHORT","ULMUS","ACERUB_","ACESAC_", "CARYA_s","EARSUC_", "FAGGRA_","FRAX_sd","INVASIVE_","LIRTUL_","OTHER_SPP_","PICEA_s","PINUS_s","QUERCUS_","SHORT_s","ULMUS_s", "prec_ex","tmax_ex","inv_sm_","hmod_ex","type", "dr_sm_x", "d____2_")] names(seeds2d)[names(seeds2d)=="d____2_"]<-"deer_den" seeds2d$X<-seeds2d$coords.x1 seeds2d$Y<-seeds2d$coords.x2 seeds3d<-na.omit(seeds2d@data) nrow(seeds2)-nrow(seeds3d) #1400 plots lost with deer data included coordinates(seeds3d)<-~X+Y proj4string(seeds3d)=CRS('+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs') writeOGR(seeds3d, dsn=file, layer='Seedlings_m2_beta_with_deer_pred_clip',driver="ESRI Shapefile",overwrite_layer=T) seeds.dfd<-as.data.frame(seeds3d) write.csv(seeds.dfd, "Seedlings_m2_beta_with_deer_pred_df.csv") # Clip sapling data to primary area of interest in analysis saps<-readOGR(dsn=file,layer="Saplings_m2_beta_with_predictors") bbox<-readOGR(dsn=file, layer="Bounding_Box") saps.c<-raster::crop(saps,bbox, snap='in') plot(saps.c, col='blue', pch=16) plot(bbox,add=T) names(saps.c) saps2d<-saps.c[,c("PLT_CN","INVYR","Sor","Hor","STDSZCD","CANOPY_","ACERUB","ACESAC","CARYA","EARSUC","FAGGRA","FRAX","INVASIVE","LIRTUL", "OTHER_SPP","PICEA","PINUS","QUERCUS","SHORT","ULMUS","ACERUB_","ACESAC_", "CARYA_s","EARSUC_", "FAGGRA_","FRAX_sp","INVASIVE_","LIRTUL_","OTHER_SPP_","PICEA_s","PINUS_s","QUERCUS_","SHORT_s","ULMUS_s", "prec_ex","tmax_ex","inv_sm_","hmod_ex","type", "dr_sm_x", "d____2_")] names(saps2d)[names(saps2d)=="d____2_"]<-"deer_den" saps2d$X<-saps2d$coords.x1 saps2d$Y<-saps2d$coords.x2 saps3d<-na.omit(saps2d@data) nrow(saps2d)-nrow(saps3d) #1391 coordinates(saps3d)<-~X+Y proj4string(saps3d)=CRS('+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs') writeOGR(saps3d, dsn=file, layer='Saplings_m2_beta_with_deer_pred_clip',driver="ESRI Shapefile",overwrite_layer=T) saps.dfd<-as.data.frame(saps3d) write.csv(saps.dfd, "Saplings_m2_beta_with_deer_pred_df.csv") #----------------------- # Close R and reopen to clear RAM and temporary files #---------------------- # START HERE FOR Spatial Regress #------------------ setwd('C:/temp/GIS') library(rgdal) library(raster) library(spdep) library(car) #library(GWmodel) options("scipen"=100, "digits"=4) rasterOptions(timer=TRUE,progress='text', tmpdir='G:/raster_temp') #shows a timer for processes run from raster package file=('C:/temp/GIS') #----------------------------- # Seedling vs. Canopy similarity without deer impacts #----------------------------- seeds<-read.csv('Seedlings_m2_beta_with_pred_df.csv')[,-c(1)] seeds2<-subset(seeds,type=="train" ) # select points that are only in the training dataset names(seeds2) seeds2$meso.tr<-seeds2$ACERUB+seeds2$ACESAC+seeds2$FAGGRA seeds2$maple.tr<-seeds2$ACERUB+seeds2$ACESAC seeds2$oakhick.tr<-seeds2$QUERCUS+seeds2$CARYA seeds2$meso.sd<-seeds2$ACERUB_+seeds2$ACESAC_+seeds2$FAGGRA_ seeds2$maple.sd<-seeds2$ACERUB_+seeds2$ACESAC_ seeds2$oakhick.sd<-seeds2$QUERCUS_+seeds2$CARYA_s seeds2$oakhick.sub<-seeds2$oakhick.tr-seeds2$oakhick.sd seeds2$Sor.lgt<-logit(seeds2$Sor) seeds2$Hor.lgt<-logit(seeds2$Hor) seeds2$cancov<-seeds2$CANOPY_/100 #View(seeds2) # Run OLS model first names(seeds2) #View(seeds2) Sor.lm<-lm(Sor.lgt~SHORT_s+maple.sd+QUERCUS+QUERCUS_+FAGGRA+FAGGRA_+PINUS+PINUS_s+INVASIVE_+deer_den, data=seeds2) summary(Sor.lm) #r2=0.113 AIC(Sor.lm) #19326 par(mfrow = c(2, 2), oma = c(0, 0, 2, 0)) plot(Sor.lm) par(mfrow=c(1,1)) Hor.lm<-lm(Hor.lgt~tmax_ex+SHORT_s+maple.sd+QUERCUS+QUERCUS_+FAGGRA+FAGGRA_+PINUS+PINUS_s, data=seeds2) summary(Hor.lm) #r2=0.254 AIC(Hor.lm) #20886 par(mfrow = c(2, 2), oma = c(0, 0, 2, 0)) plot(Hor.lm) par(mfrow=c(1,1)) library(car) vif(Sor.lm) vif(Hor.lm) # all are under 2, so no issues of colinearity library(gstat) #for bubble plot names(seeds2) autocor<-data.frame(seeds2$X,seeds2$Y,resids=Sor.lm$residuals) names(autocor) coordinates(autocor)<-~seeds2.X+seeds2.Y proj4string(autocor)=CRS('+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs') bubble(autocor,zcol='resids') var.mod<-variogram(resids~1, data=autocor, alpha=c(0,90,180,270)) plot(var.mod) autocorH<-data.frame(seeds2$X,seeds2$Y,resids=Hor.lm$residuals) coordinates(autocorH)<-~seeds2.X+seeds2.Y proj4string(autocorH)=CRS('+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs') bubble(autocorH,zcol='resids') var.modH<-variogram(resids~1, data=autocorH, alpha=c(0,90,180,270)) plot(var.modH) #Calc Moran.I library(ape) plt.dist<-as.matrix(dist(cbind(seeds2$X,seeds2$Y))) plt.dist.inv<-1/plt.dist diag(plt.dist.inv)<-0 Moran.I(autocorH$resids,plt.dist.inv) #checking if residuals are autocorrelated # significant p-value #++++++++++++++++++++++++++++++++ #----------------------------- # Seedling data with deer impacts #----------------------------- #++++++++++++++++++++++++++++++++ seedsd<-read.csv('Seedlings_m2_beta_with_deer_pred_df.csv')[,-c(1)] seeds2d<-subset(seedsd,type=="train" ) # select points that are only in the training dataset names(seeds2d) seeds2d$meso.tr<-seeds2d$ACERUB+seeds2d$ACESAC+seeds2d$FAGGRA seeds2d$maple.tr<-seeds2d$ACERUB+seeds2d$ACESAC seeds2d$oakhick.tr<-seeds2d$QUERCUS+seeds2d$CARYA seeds2d$meso.sd<-seeds2d$ACERUB_+seeds2d$ACESAC_+seeds2d$FAGGRA_ seeds2d$maple.sd<-seeds2d$ACERUB_+seeds2d$ACESAC_ seeds2d$oakhick.sd<-seeds2d$QUERCUS_+seeds2d$CARYA_s seeds2d$Sor.lgt<-logit(seeds2d$Sor) seeds2d$Hor.lgt<-logit(seeds2d$Hor) # Run OLS model for Sorenson similarity Sor.lmd<-lm(Sor.lgt~SHORT_s+maple.sd+QUERCUS+QUERCUS_+FAGGRA+FAGGRA_+PINUS+INVASIVE_+dr_sm_x+deer_den, data=seeds2d) summary(Sor.lmd) #r2=0.117 AIC(Sor.lmd) #15336 par(mfrow = c(2, 2), oma = c(0, 0, 2, 0)) plot(Sor.lmd) par(mfrow=c(1,1)) # OLS model for Horn similarity Hor.lmd<-lm(Hor.lgt~tmax_ex+SHORT_s+maple.sd+QUERCUS+QUERCUS_+FAGGRA+FAGGRA_+PINUS+PINUS_s+deer_den, data=seeds2d) #deer dens better than impact summary(Hor.lmd) #r2=0.265 AIC(Hor.lmd) #16503 par(mfrow = c(2, 2), oma = c(0, 0, 2, 0)) plot(Hor.lm) par(mfrow=c(1,1)) library(gstat) #for bubble plot names(seeds2d) length(glob.lm$residuals) autocor<-data.frame(seeds2d$X,seeds2d$Y,resids=glob.lm$residuals) names(autocor) coordinates(autocor)<-~seeds2d.X+seeds2d.Y proj4string(autocor)=CRS('+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs') names(autocor) bubble(autocor,zcol='resids') var.mod<-variogram(resids~1, data=autocor, alpha=c(0,90,180,270)) plot(var.mod) #Calc Moran.I library(ape) plt.dist<-as.matrix(dist(cbind(seeds2d$X,seeds2d$Y))) plt.dist.inv<-1/plt.dist diag(plt.dist.inv)<-0 Moran.I(autocor$resids,plt.dist.inv) #checking if residuals are autocorrelated # significant p-value #---------------------------------- # Sapling data without deer impacts #---------------------------------- saps<-read.csv('Saplings_m2_beta_with_pred_df.csv')[,-c(1)] saps2<-subset(saps,type=="train" ) # select points that are only in the training dataset names(saps2) saps2$meso.tr<-saps2$ACERUB+saps2$ACESAC+saps2$FAGGRA saps2$maple.tr<-saps2$ACERUB+saps2$ACESAC saps2$oakhick.tr<-saps2$QUERCUS+saps2$CARYA saps2$meso.sd<-saps2$ACERUB_+saps2$ACESAC_+saps2$FAGGRA_ saps2$maple.sap<-saps2$ACERUB_+saps2$ACESAC_ saps2$oakhick.sd<-saps2$QUERCUS_+saps2$CARYA_s saps2$oakhick.sub<-saps2$oakhick.tr-saps2$oakhick.sd saps2$Sor.lgt<-logit(saps2$Sor) saps2$Hor.lgt<-logit(saps2$Hor) saps2$cancov<-saps2$CANOPY_/100 # OLS model for Sorenson similarity names(saps2) Sor.sap.lm<-lm(Sor.lgt~tmax_ex+INVASIVE_+SHORT_s+QUERCUS+QUERCUS_+CARYA+maple.sap+FAGGRA+PINUS_s+STDSZCD+cancov, data=saps2) summary(Sor.sap.lm) #r2=0.153 AIC(Sor.sap.lm) #18397 par(mfrow = c(2, 2), oma = c(0, 0, 2, 0)) plot(Sor.sap.lm) par(mfrow=c(1,1)) library(gstat) #for bubble plot autocor<-data.frame(saps2$X,saps2$Y,resids=Sor.sap.lm$residuals) names(autocor) coordinates(autocor)<-~saps2.X+saps2.Y proj4string(autocor)=CRS('+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs') names(autocor) library(car) bubble(autocor,zcol='resids') var.mod<-variogram(resids~1, data=autocor, alpha=c(0,90,180,270)) plot(var.mod) #Calc Moran.I library(ape) names(saps2) plt.dist<-as.matrix(dist(cbind(saps2$X,saps2$Y))) plt.dist.inv<-1/plt.dist diag(plt.dist.inv)<-0 Moran.I(autocor$resids,plt.dist.inv) #checking if residuals are autocorrelated # significant p-value # OLS model for Horn similarity names(saps2) Hor.sap.lm<-lm(Hor.lgt~tmax_ex+INVASIVE_+SHORT_s+QUERCUS+QUERCUS_+CARYA+maple.sap+FAGGRA+FAGGRA_+PINUS+PINUS_s+STDSZCD+cancov, data=saps2) summary(Hor.sap.lm) #r2=0.274 AIC(Hor.sap.lm) #20773 vif(Hor.sap.lm) #---------------------------------- # Sapling data with deer impacts #---------------------------------- saps<-read.csv('Saplings_m2_beta_with_deer_pred_df.csv')[,-c(1)] saps2<-subset(saps,type=="train" ) # select points that are only in the training dataset saps2$maple.tr<-saps2$ACERUB+saps2$ACESAC saps2$maple.sap<-saps2$ACERUB_+saps2$ACESAC_ saps2$Sor.lgt<-logit(saps2$Sor) saps2$Hor.lgt<-logit(saps2$Hor) saps2$cancov<-saps2$CANOPY_/100 # OLS model for Sorenson similarity names(saps2) Sor.sap.lm<-lm(Sor.lgt~tmax_ex+INVASIVE_+SHORT_s+QUERCUS+QUERCUS_+CARYA+maple.sap+FAGGRA+FAGGRA_+PINUS_s+STDSZCD+cancov, data=saps2) summary(Sor.sap.lm) #r2=0.168 AIC(Sor.sap.lm) #14234 par(mfrow = c(2, 2), oma = c(0, 0, 2, 0)) plot(Sor.sap.lm) par(mfrow=c(1,1)) Hor.sap.lm<-lm(Hor.lgt~tmax_ex+INVASIVE_+SHORT_s+QUERCUS+QUERCUS_+CARYA+maple.sap+FAGGRA+FAGGRA_+PINUS+PINUS_s+STDSZCD+cancov, data=saps2) summary(Hor.sap.lm) #r2=0.277 AIC(Hor.sap.lm) #16273
b47813daf12b15e147c908cff022694573ee2091
1a7f5a2318c3a1e76c2916cb9637509f373dccf0
/Problem_1.R
c4a5fcdc7ab53809521b45b4eab5d22618734b18
[]
no_license
feb-uni-sofia/homework-1-r-basics-magivelikova
539d504c4c2b574d4c6d6415f8c119686c971e1c
e03d3b5a6798068fd1ffc02d268de12e750b02b5
refs/heads/master
2021-04-15T18:57:26.111997
2018-03-22T18:04:21
2018-03-22T18:04:21
126,330,392
0
0
null
null
null
null
UTF-8
R
false
false
368
r
Problem_1.R
#a) x<-c(4,1,1,4) #b) y<-c(1,4) # c) - since y is shorter than x, the elements of y are repeated until y becomes the same length as x and this can only happen if the longer # vector's length is a multiple of the shorter vector's legth. We get x(4,1,1,4)-y(1,4,1,4)=z(3,-3,0,0) z <- x-y #d) s<-c(x,y) #e) rep(s,10) #f) rep(s,3) #g) seq(7,21) 7:21 #h) length(seq(7,21))
9d2f9bceff2a20169b14d2b30ab0a748c19da630
f67ac5fcafe4cb26ebc800e6f839de9df402e4d6
/gather_data.R
90b61c7fd2e087aa359be4d743b9cd32300b2e2b
[]
no_license
billzichos/Bag-of-Words-Meets-Bags-of-Popcorn
58c63f2d457eff20f85a78540219349b69c811e2
29ff12b8700b27272fc65c1e315edc9a9a06d9aa
refs/heads/master
2021-01-21T00:52:49.889803
2016-05-21T21:31:21
2016-05-21T21:31:21
36,864,346
1
1
null
null
null
null
UTF-8
R
false
false
402
r
gather_data.R
wd <- "~/GitHub/Bag-of-Words-Meets-Bags-of-Popcorn" setwd(wd) source("~/GitHub/Get-Raw-Data/download.R") downloadSingleKaggleZip("word2vec-nlp-tutorial","unlabeledTrainData.tsv.zip", "unlabeledTrainData.tsv") downloadSingleKaggleZip("word2vec-nlp-tutorial","testData.tsv.zip", "testData.tsv") downloadSingleKaggleZip("word2vec-nlp-tutorial","labeledTrainData.tsv.zip", "labeledTrainData.tsv")
aa17c0c2603b5dd3fc3fe2465521f05362712980
86e5242d859ab04bc40e330f075b71c9ef636a91
/HRAnalytics/hranalytics.r
dd26f47ed143cc732782721cbf3808dbf8412232
[]
no_license
ujwalk93/Data-Science
17ba6fb1c1f0d19c96b099eeeb6c2d4383332f70
4e84c9f253e5f93f46a1a61d50f62d26f9b634e3
refs/heads/master
2020-03-26T10:35:56.134518
2019-07-01T12:51:43
2019-07-01T12:51:43
144,806,509
0
0
null
null
null
null
UTF-8
R
false
false
38,864
r
hranalytics.r
setwd("D:\\PG_Diploma\\HR Analytics") #loading all the required libraries library(dplyr) library(tidyr) library(stringr) library(ggplot2) library(reshape2) library(car) library(caret) library(caTools) library(MASS) library(lubridate) library(e1071) library(cowplot) #understanding the data based on business perspective #there are 5 .csv data files used for analysis in this case study #1.employee_survey_data.csv - contains details from employees about how well they are satisfied with their job and work-life balance #2.manager_survey_data.csv - contains details from managers about how well employees perform under them #3.general_data.csv - contains details about employee location and other aspects like employee behaviour and their involvement in job #4.in_time.csv - contains details regarding in time of employees in calendar year 2015 #5.out_time.csv - contains details regarding out time of employees in calendar year 2015 #business understanding and objective of case study #A large company XYZ having around 4000 employees has a very high level of attrition. #This has a significant negative impact on the company. #The main objective of this case study is to model the probability of attrition using #logistic regression to understand what are the main factors that cause the high rate of attrition #The company management can use these results to determine how the high rate of attrition can be reduced #by making appropriate changes in the workplace. #1. Data preparation, cleaning, analysis and EDA #loading all the required dataset files employee <- read.csv("employee_survey_data.csv",stringsAsFactors=F) manager <- read.csv("manager_survey_data.csv",stringsAsFactors=F) general <- read.csv("general_data.csv",stringsAsFactors=F) intime <- read.csv("in_time.csv",stringsAsFactors=F) outtime <- read.csv("out_time.csv",stringsAsFactors=F) #display the file details str(employee) str(manager) str(general) str(intime) str(outtime) #renaming first columns in intime and outtime as EmployeeID to ensure consistency and ease of analysis colnames(intime)[1]<-"EmployeeID" colnames(outtime)[1]<-"EmployeeID" #check for duplicate employee ID values in each data frame sum(duplicated(employee$EmployeeID)) sum(duplicated(manager$EmployeeID)) sum(duplicated(general$EmployeeID)) sum(duplicated(intime$EmployeeID)) sum(duplicated(outtime$EmployeeID)) #check for missing values sapply(employee, function(x) length(which(x == ''))) sapply(manager, function(x) length(which(x == ''))) sapply(general, function(x) length(which(x == ''))) sapply(intime, function(x) length(which(x == ''))) sapply(outtime, function(x) length(which(x == ''))) #check for NA values sapply(employee, function(x) length(which(is.na(x)))) sapply(manager, function(x) length(which(is.na(x)))) sapply(general, function(x) length(which(is.na(x)))) sapply(intime, function(x) length(which(is.na(x)))) sapply(outtime, function(x) length(which(is.na(x)))) #replace NA values in individual columns of the employee dataset with median for ease of analysis employee$EnvironmentSatisfaction[which(is.na(employee$EnvironmentSatisfaction))]<-median(employee$EnvironmentSatisfaction,na.rm = TRUE) employee$JobSatisfaction[which(is.na(employee$JobSatisfaction))]<-median(employee$JobSatisfaction,na.rm = TRUE) employee$WorkLifeBalance[which(is.na(employee$WorkLifeBalance))]<-median(employee$WorkLifeBalance,na.rm = TRUE) #replace NA values in individual columns of the general dataset with mean for ease of analysis general$NumCompaniesWorked[which(is.na(general$NumCompaniesWorked))]<-mean(general$NumCompaniesWorked,na.rm = TRUE) general$TotalWorkingYears[which(is.na(general$TotalWorkingYears))]<-mean(general$TotalWorkingYears,na.rm = TRUE) #Remove the columns with all the row values as NA employee <- employee[,colSums(is.na(employee))<nrow(employee)] manager <- manager[,colSums(is.na(manager))<nrow(manager)] general <- general[,colSums(is.na(general))<nrow(general)] #convert all character type columns in general dataframe into factor variables general[,sapply(general, is.character)] <- lapply(general[,sapply(general, is.character)], as.factor) #remove EmployeeCount and Over18 columns from general dataframe as all the row values are equal to 1 general <- general[ , -c(8,16)] #remove StandardHours column from general dataframe as all the row values are equal to 8 general <- general[ , -c(16)] #check whether EmployeeID values are consistent across different dataframes setdiff(employee$EmployeeID,general$EmployeeID) setdiff(manager$EmployeeID,general$EmployeeID) setdiff(intime$EmployeeID,general$EmployeeID) setdiff(outtime$EmployeeID,general$EmployeeID) #convert some numeric variables into categorical (factor) variables as per format given in the data dictionary general$Education[which(general$Education==1)] <- "Below College" general$Education[which(general$Education==2)] <- "College" general$Education[which(general$Education==3)] <- "Bachelor" general$Education[which(general$Education==4)] <- "Master" general$Education[which(general$Education==5)] <- "Doctor" general$Education <- as.factor(general$Education) employee$EnvironmentSatisfaction[which(employee$EnvironmentSatisfaction==1)] <- "Low" employee$EnvironmentSatisfaction[which(employee$EnvironmentSatisfaction==2)] <- "Medium" employee$EnvironmentSatisfaction[which(employee$EnvironmentSatisfaction==3)] <- "High" employee$EnvironmentSatisfaction[which(employee$EnvironmentSatisfaction==4)] <- "Very High" employee$EnvironmentSatisfaction <- as.factor(employee$EnvironmentSatisfaction) employee$JobSatisfaction[which(employee$JobSatisfaction==1)] <- "Low" employee$JobSatisfaction[which(employee$JobSatisfaction==2)] <- "Medium" employee$JobSatisfaction[which(employee$JobSatisfaction==3)] <- "High" employee$JobSatisfaction[which(employee$JobSatisfaction==4)] <- "Very High" employee$JobSatisfaction <- as.factor(employee$JobSatisfaction) employee$WorkLifeBalance[which(employee$WorkLifeBalance==1)] <- "Bad" employee$WorkLifeBalance[which(employee$WorkLifeBalance==2)] <- "Good" employee$WorkLifeBalance[which(employee$WorkLifeBalance==3)] <- "Better" employee$WorkLifeBalance[which(employee$WorkLifeBalance==4)] <- "Best" employee$WorkLifeBalance <- as.factor(employee$WorkLifeBalance) manager$JobInvolvement[which(manager$JobInvolvement==1)] <- "Low" manager$JobInvolvement[which(manager$JobInvolvement==2)] <- "Medium" manager$JobInvolvement[which(manager$JobInvolvement==3)] <- "High" manager$JobInvolvement[which(manager$JobInvolvement==4)] <- "Very High" manager$JobInvolvement <- as.factor(manager$JobInvolvement) manager$PerformanceRating[which(manager$PerformanceRating==1)] <- "Low" manager$PerformanceRating[which(manager$PerformanceRating==2)] <- "Good" manager$PerformanceRating[which(manager$PerformanceRating==3)] <- "Excellent" manager$PerformanceRating[which(manager$PerformanceRating==4)] <- "Outstanding" manager$PerformanceRating <- as.factor(manager$PerformanceRating) #merging dataframes for analysis using common attribute (EmployeeID) hrdata <- merge(general, employee, by="EmployeeID") hrdata <- merge(hrdata, manager, by="EmployeeID") sum(is.na(employee$JobSatisfaction)) sum(is.na(employee$WorkLifeBalance)) sum(is.na(manager$JobInvolvement)) sum(is.na(hrdata)) #removing the columns which have na more than 15% in intime and outtime datasets missing_values_intime <- intime %>% summarise_all(funs(sum(is.na(.))/n())) missing_values_intime <- gather(missing_values_intime,key='feature',value = 'missing_percentage') missing_values_outtime <- outtime %>% summarise_all(funs(sum(is.na(.))/n())) missing_values_outtime <- gather(missing_values_outtime,key='feature',value = 'missing_percentage') intime_clean_cols <- filter(missing_values_intime,missing_percentage<0.15) outtime_clean_cols <- filter(missing_values_outtime,missing_percentage<0.15) intime_clean <- intime[,(colnames(intime) %in% intime_clean_cols$feature)] outtime_clean <- outtime[,(colnames(outtime) %in% outtime_clean_cols$feature)] sum(is.na(intime_clean)) sum(is.na(outtime_clean)) ncol(intime_clean) ncol(intime) ncol(outtime) ncol(outtime_clean) #Convert all the date columns from character to DateTime (POSIX_ct) format intime_clean[,2:ncol(intime_clean)] <- lapply(intime_clean[,2:ncol(intime_clean)], function(x) as_datetime(x)) outtime_clean[,2:ncol(outtime_clean)] <- lapply(outtime_clean[,2:ncol(outtime_clean)], function(x) as_datetime(x)) x<-outtime_clean[,2:ncol(outtime_clean)]-intime_clean[,2:ncol(intime_clean)] x[,2:ncol(x)] <- lapply(x[,2:ncol(x)],function(x) as.numeric(x)) avg_hours <- rowMeans(x[,2:ncol(x)],na.rm = T) #removing prefix X from all columns in intime_clean and outtime_clean colnames(intime_clean) <- gsub(pattern="X","",colnames(intime_clean)) colnames(outtime_clean) <- gsub(pattern="X","",colnames(outtime_clean)) #convert data from wide to long format using gather() function in.time_long <- gather(intime_clean, date, timein, EmployeeID) out.time_long <- gather(outtime_clean, date, timein, EmployeeID) nrow(x) length(avg_hours) hrdata_mergd <- cbind(hrdata,avg_hours) hrdata_mergd$overtime <- ifelse(hrdata_mergd$avg_hours>8,1,0) hrdata_mergd$undertime <- ifelse(hrdata_mergd$avg_hours<7,1,0) #the derived metrics used are avg_hours, leaves, overtime and undertime #calculating number of leaves for each employee for(i in 1:ncol(x)){ hrdata_mergd$leaves[i] <- sum(is.na(x[i,])) } #collate data into one single file hrdata_mergd #masterData str(hrdata_mergd) ################################################################## # Barcharts for categorical features with stacked Attrition information bar_theme1<- theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5), legend.position="none") plot_grid(ggplot(hrdata, aes(x=EducationField,fill=Attrition))+ geom_bar(), ggplot(hrdata, aes(x=BusinessTravel,fill=Attrition))+ geom_bar()+bar_theme1, ggplot(hrdata, aes(x=Department,fill=Attrition))+ geom_bar()+bar_theme1, ggplot(hrdata, aes(x=Education,fill=Attrition))+ geom_bar()+bar_theme1, ggplot(hrdata, aes(x=Gender,fill=Attrition))+ geom_bar()+bar_theme1, ggplot(hrdata, aes(x=factor(JobLevel),fill=Attrition))+ geom_bar()+bar_theme1, align = "h") ggplot(hrdata, aes(x=EducationField,fill=Attrition))+ geom_bar() plot_grid(ggplot(hrdata, aes(x=JobRole,fill=Attrition))+ geom_bar()+bar_theme1, ggplot(hrdata, aes(x=MaritalStatus,fill=Attrition))+ geom_bar()+bar_theme1, align = "h") plot_grid(ggplot(hrdata, aes(x=EnvironmentSatisfaction,fill=Attrition))+ geom_bar()+bar_theme1, ggplot(hrdata, aes(x=JobSatisfaction,fill=Attrition))+ geom_bar()+bar_theme1, ggplot(hrdata, aes(x=WorkLifeBalance,fill=Attrition))+ geom_bar()+bar_theme1, ggplot(hrdata, aes(x=JobInvolvement,fill=Attrition))+ geom_bar()+bar_theme1, ggplot(hrdata, aes(x=PerformanceRating,fill=Attrition))+ geom_bar()+bar_theme1, align = "h") # Histogram and Boxplots for numeric variables box_theme<- theme(axis.line=element_blank(),axis.title=element_blank(), axis.ticks=element_blank(), axis.text=element_blank()) box_theme_y<- theme(axis.line.y=element_blank(),axis.title.y=element_blank(), axis.ticks.y=element_blank(), axis.text.y=element_blank(), legend.position="none") #plot_grid(ggplot(hrdata, aes(YearsAtCompany))+ geom_histogram(binwidth = 10), # ggplot(hrdata, aes(x="",y=YearsAtCompany))+ geom_boxplot(width=0.1)+coord_flip()+box_theme, # align = "v",ncol = 1) #plot_grid(ggplot(hrdata, aes(YearsWithCurrManager))+ geom_histogram(binwidth = 20), # ggplot(hrdata, aes(x="",y=JobInvolvement))+ geom_boxplot(width=0.1)+coord_flip()+box_theme, # align = "v",ncol = 1) #plot_grid(ggplot(hrdata, aes(MonthlyIncome))+ geom_histogram(), # ggplot(hrdata, aes(x="",y=TotalWorkingYears))+ geom_boxplot(width=0.1)+coord_flip()+box_theme, # align = "v",ncol = 1) #No outliers in numeric variables # Boxplots of numeric variables relative to attrition status plot_grid(ggplot(hrdata, aes(x=Attrition,y=TotalWorkingYears, fill=Attrition))+ geom_boxplot(width=0.2)+ coord_flip() +theme(legend.position="none"), ggplot(hrdata, aes(x=Attrition,y=PercentSalaryHike, fill=Attrition))+ geom_boxplot(width=0.2)+ coord_flip() + box_theme_y, ggplot(hrdata, aes(x=Attrition,y=NumCompaniesWorked, fill=Attrition))+ geom_boxplot(width=0.2)+ coord_flip() + box_theme_y, align = "v",nrow = 1) #From the plots obtained, it is clear that as the work experience of employees increases the attrition rate decreases #Also, as the years with current manager increase, attrition rate decreases. #outliers are not removed from the numeric variables since it may affect the accuracy of final model ######################################################################@@@@@@ #2. Model preparation #converting all categorical variables (two-level and multi-level) into dummy variables for model building #converting attrition to dummy data dummy_1 <- data.frame(model.matrix( ~Attrition, data = hrdata_mergd)) hrdata_mergd$AttritionYes <- dummy_1$AttritionYes hrdata_mergd$Attrition <- NULL #Here we are concerned about Attrition rate, so only AttritionYes is considered #converting BusinessTravel to dummy data str(hrdata_mergd) dummy_1 <- data.frame(model.matrix( ~BusinessTravel, data = hrdata_mergd)) dummy_1 <- dummy_1[,-1] hrdata_mergd <- cbind(hrdata_mergd[,-3],dummy_1) #converting Department to dummy data str(hrdata_mergd) dummy_1 <- data.frame(model.matrix( ~Department, data = hrdata_mergd)) dummy_1 <- dummy_1[,-1] hrdata_mergd <- cbind(hrdata_mergd[,-3],dummy_1) #converting Education to dummy data str(hrdata_mergd) dummy_1 <- data.frame(model.matrix( ~Education, data = hrdata_mergd)) dummy_1 <- dummy_1[,-1] str(hrdata_mergd) hrdata_mergd <- cbind(hrdata_mergd[,-4],dummy_1) #converting EducationField to dummy data str(hrdata_mergd) dummy_1 <- data.frame(model.matrix( ~EducationField, data = hrdata_mergd)) dummy_1 <- dummy_1[,-1] str(hrdata_mergd) hrdata_mergd <- cbind(hrdata_mergd[,-4],dummy_1) #converting Gender to dummy data str(hrdata_mergd) dummy_1 <- data.frame(model.matrix( ~Gender, data = hrdata_mergd)) str(hrdata_mergd) hrdata_mergd$GenderMale <- dummy_1$GenderMale hrdata_mergd$Gender <- NULL #converting JobRole to dummy data str(hrdata_mergd) dummy_1 <- data.frame(model.matrix( ~JobRole, data = hrdata_mergd)) dummy_1 <- dummy_1[,-1] str(hrdata_mergd) hrdata_mergd <- cbind(hrdata_mergd[,-5],dummy_1) #converting MaritalStatus to dummy data dummy_1 <- data.frame(model.matrix( ~MaritalStatus, data = hrdata_mergd)) dummy_1 <- dummy_1[,-1] str(hrdata_mergd) hrdata_mergd <- cbind(hrdata_mergd[,-5],dummy_1) #converting EnvironmentSatisfaction to dummy data str(hrdata_mergd) dummy_1 <- data.frame(model.matrix( ~EnvironmentSatisfaction, data = hrdata_mergd)) dummy_1 <- dummy_1[,-1] str(hrdata_mergd) hrdata_mergd <- cbind(hrdata_mergd[,-14],dummy_1) #converting JobSatisfaction to dummy data str(hrdata_mergd) dummy_1 <- data.frame(model.matrix( ~JobSatisfaction, data = hrdata_mergd)) dummy_1 <- dummy_1[,-1] str(hrdata_mergd) hrdata_mergd <- cbind(hrdata_mergd[,-14],dummy_1) #converting WorkLifeBalance to dummy data str(hrdata_mergd) dummy_1 <- data.frame(model.matrix( ~WorkLifeBalance, data = hrdata_mergd)) dummy_1 <- dummy_1[,-1] str(hrdata_mergd) hrdata_mergd <- cbind(hrdata_mergd[,-14],dummy_1) #converting JobInvolvement to dummy data str(hrdata_mergd) dummy_1 <- data.frame(model.matrix( ~JobInvolvement, data = hrdata_mergd)) dummy_1 <- dummy_1[,-1] str(hrdata_mergd) hrdata_mergd <- cbind(hrdata_mergd[,-14],dummy_1) #converting PerformanceRating to dummy data str(hrdata_mergd) dummy_1 <- data.frame(model.matrix( ~PerformanceRating, data = hrdata_mergd)) str(hrdata_mergd) hrdata_mergd$PerformanceRatingOutstanding <- dummy_1$PerformanceRatingOutstanding hrdata_mergd$PerformanceRating <- NULL #converting JobLevel to dummy data str(hrdata_mergd) hrdata_mergd$JobLevel <- as.factor(hrdata_mergd$JobLevel) dummy_1 <- data.frame(model.matrix( ~JobLevel, data = hrdata_mergd)) dummy_1 <- dummy_1[,-1] str(hrdata_mergd) hrdata_mergd <- cbind(hrdata_mergd[,-4],dummy_1) #data with dummy variabels str(hrdata_mergd) ncol(hrdata_mergd) #EnvironmentSatisfaction,JobSatisfaction,WorkLifeBalance,Education #JobInvolvement,JobLevel,PerformanceRating,Attrition and Gender #Above variables have been converted into dummy variables. #Same approach is used for both the two-level and multi-level categorical variables #This is because for each n-level categorical variable, n-1 dummy variables are created. #So, it does not matter w.r.t. two-level categorical variables as only one dummy variable is created for each of them. #Hence, the NULL values obtained as a result (if any) can be ignored #Here, AttritionYes is used as the dependent variable for modelling as we are only concerned about employees who are actually leaving the company. ################################Check which columns needs to be scaled############ # Feature standardisation # Normalising continuous features ind<-c(2:16) hr_analytics_scaled<-hrdata_mergd for(i in ind) { hr_analytics_scaled[,i]<-scale(x=hrdata_mergd[,i],center = TRUE,scale = TRUE) } summary(hr_analytics_scaled) # separate training and testing data # 70% data for training and remaining for testing set.seed(100) indices = sample.split(hrdata_mergd$EmployeeID, SplitRatio = 0.7) train = hr_analytics_scaled[indices,] test = hr_analytics_scaled[-indices,] ######################################################################## #3. Model building #initial model model_1<-glm(AttritionYes~.,data=train,family = 'binomial') summary(model_1) #variable selection using stepAIC() function model_2<-stepAIC(model_1, direction="both") summary(model_2) vif(model_2) #build model based on variables selected based on last AIC call model_3<-glm(formula = AttritionYes ~ YearsAtCompany+JobRoleSales.Executive+PerformanceRatingOutstanding+ MaritalStatusMarried+JobInvolvementVery.High+EnvironmentSatisfactionVery.High+JobLevel5+ PercentSalaryHike+EducationCollege+DepartmentResearch...Development+JobInvolvementLow+ DepartmentSales+JobRoleResearch.Director+BusinessTravelTravel_Rarely+TrainingTimesLastYear+ Age+JobRoleManufacturing.Director+WorkLifeBalanceGood+JobSatisfactionLow+WorkLifeBalanceBest+ JobSatisfactionVery.High+NumCompaniesWorked+YearsWithCurrManager+WorkLifeBalanceBetter+BusinessTravelTravel_Frequently+ YearsSinceLastPromotion+EnvironmentSatisfactionLow+TotalWorkingYears+MaritalStatusSingle+overtime, family = "binomial", data = train) summary(model_3) vif(model_3) #Remove variables based on VIF #Remove YearsAtCompany due to high VIF (4.77) and high p-value (0.12910) model_4<-glm(formula = AttritionYes ~ JobRoleSales.Executive+PerformanceRatingOutstanding+ MaritalStatusMarried+JobInvolvementVery.High+EnvironmentSatisfactionVery.High+JobLevel5+ PercentSalaryHike+EducationCollege+DepartmentResearch...Development+JobInvolvementLow+ DepartmentSales+JobRoleResearch.Director+BusinessTravelTravel_Rarely+TrainingTimesLastYear+ Age+JobRoleManufacturing.Director+WorkLifeBalanceGood+JobSatisfactionLow+WorkLifeBalanceBest+ JobSatisfactionVery.High+NumCompaniesWorked+YearsWithCurrManager+WorkLifeBalanceBetter+BusinessTravelTravel_Frequently+ YearsSinceLastPromotion+EnvironmentSatisfactionLow+TotalWorkingYears+MaritalStatusSingle+overtime, family = "binomial", data = train) summary(model_4) vif(model_4) #Remove BusinessTravelTravel_Rarely due to high VIF (3.762446) model_5<-glm(formula = AttritionYes ~ JobRoleSales.Executive+PerformanceRatingOutstanding+ MaritalStatusMarried+JobInvolvementVery.High+EnvironmentSatisfactionVery.High+JobLevel5+ PercentSalaryHike+EducationCollege+DepartmentResearch...Development+JobInvolvementLow+ DepartmentSales+JobRoleResearch.Director+TrainingTimesLastYear+ Age+JobRoleManufacturing.Director+WorkLifeBalanceGood+JobSatisfactionLow+WorkLifeBalanceBest+ JobSatisfactionVery.High+NumCompaniesWorked+YearsWithCurrManager+WorkLifeBalanceBetter+BusinessTravelTravel_Frequently+ YearsSinceLastPromotion+EnvironmentSatisfactionLow+TotalWorkingYears+MaritalStatusSingle+overtime, family = "binomial", data = train) summary(model_5) vif(model_5) #Remove DepartmentResearch...Development due to high VIF (4.201548) model_6<-glm(formula = AttritionYes ~ JobRoleSales.Executive+PerformanceRatingOutstanding+ MaritalStatusMarried+JobInvolvementVery.High+EnvironmentSatisfactionVery.High+JobLevel5+ PercentSalaryHike+EducationCollege+JobInvolvementLow+ DepartmentSales+JobRoleResearch.Director+TrainingTimesLastYear+ Age+JobRoleManufacturing.Director+WorkLifeBalanceGood+JobSatisfactionLow+WorkLifeBalanceBest+ JobSatisfactionVery.High+NumCompaniesWorked+YearsWithCurrManager+WorkLifeBalanceBetter+BusinessTravelTravel_Frequently+ YearsSinceLastPromotion+EnvironmentSatisfactionLow+TotalWorkingYears+MaritalStatusSingle+overtime, family = "binomial", data = train) summary(model_6) vif(model_6) #Remove PercentSalaryHike due to high VIF (2.408234) and high p-value (0.066460) model_7<-glm(formula = AttritionYes ~ JobRoleSales.Executive+PerformanceRatingOutstanding+ MaritalStatusMarried+JobInvolvementVery.High+EnvironmentSatisfactionVery.High+JobLevel5+ EducationCollege+JobInvolvementLow+ DepartmentSales+JobRoleResearch.Director+TrainingTimesLastYear+ Age+JobRoleManufacturing.Director+WorkLifeBalanceGood+JobSatisfactionLow+WorkLifeBalanceBest+ JobSatisfactionVery.High+NumCompaniesWorked+YearsWithCurrManager+WorkLifeBalanceBetter+BusinessTravelTravel_Frequently+ YearsSinceLastPromotion+EnvironmentSatisfactionLow+TotalWorkingYears+MaritalStatusSingle+overtime, family = "binomial", data = train) summary(model_7) vif(model_7) #Remove MaritalStatusMarried due to high VIF (2.132230) and high p-value (0.082892) model_8<-glm(formula = AttritionYes ~ JobRoleSales.Executive+PerformanceRatingOutstanding+ JobInvolvementVery.High+EnvironmentSatisfactionVery.High+JobLevel5+ EducationCollege+JobInvolvementLow+ DepartmentSales+JobRoleResearch.Director+TrainingTimesLastYear+ Age+JobRoleManufacturing.Director+WorkLifeBalanceGood+JobSatisfactionLow+WorkLifeBalanceBest+ JobSatisfactionVery.High+NumCompaniesWorked+YearsWithCurrManager+WorkLifeBalanceBetter+BusinessTravelTravel_Frequently+ YearsSinceLastPromotion+EnvironmentSatisfactionLow+TotalWorkingYears+MaritalStatusSingle+overtime, family = "binomial", data = train) summary(model_8) vif(model_8) #Now, remove variables based on p-value as there are quite a few variables having high p-value #Remove PerformanceRatingOutstanding due to high p-value (0.777756) model_9<-glm(formula = AttritionYes ~ JobRoleSales.Executive+ JobInvolvementVery.High+EnvironmentSatisfactionVery.High+JobLevel5+ EducationCollege+JobInvolvementLow+DepartmentSales+ JobRoleResearch.Director+TrainingTimesLastYear+ Age+JobRoleManufacturing.Director+WorkLifeBalanceGood+JobSatisfactionLow+WorkLifeBalanceBest+ JobSatisfactionVery.High+NumCompaniesWorked+YearsWithCurrManager+WorkLifeBalanceBetter+BusinessTravelTravel_Frequently+ YearsSinceLastPromotion+EnvironmentSatisfactionLow+TotalWorkingYears+MaritalStatusSingle+overtime, family = "binomial", data = train) summary(model_9) vif(model_9) #Remove DepartmentSales due to high p-value (0.2873) model_10<-glm(formula = AttritionYes ~ JobRoleSales.Executive+ JobInvolvementVery.High+EnvironmentSatisfactionVery.High+JobLevel5+ EducationCollege+JobInvolvementLow+ JobRoleResearch.Director+TrainingTimesLastYear+ Age+JobRoleManufacturing.Director+WorkLifeBalanceGood+JobSatisfactionLow+WorkLifeBalanceBest+ JobSatisfactionVery.High+NumCompaniesWorked+YearsWithCurrManager+WorkLifeBalanceBetter+BusinessTravelTravel_Frequently+ YearsSinceLastPromotion+EnvironmentSatisfactionLow+TotalWorkingYears+MaritalStatusSingle+overtime, family = "binomial", data = train) summary(model_10) vif(model_10) #Remove JobInvolvementVery.High due to high p-value (0.154716) model_11<-glm(formula = AttritionYes ~ JobRoleSales.Executive+ EnvironmentSatisfactionVery.High+JobLevel5+ EducationCollege+JobInvolvementLow+ JobRoleResearch.Director+TrainingTimesLastYear+ Age+JobRoleManufacturing.Director+WorkLifeBalanceGood+JobSatisfactionLow+WorkLifeBalanceBest+ JobSatisfactionVery.High+NumCompaniesWorked+YearsWithCurrManager+WorkLifeBalanceBetter+BusinessTravelTravel_Frequently+ YearsSinceLastPromotion+EnvironmentSatisfactionLow+TotalWorkingYears+MaritalStatusSingle+overtime, family = "binomial", data = train) summary(model_11) vif(model_11) #Remove JobLevel5 due to high p-value (0.115706) model_12<-glm(formula = AttritionYes ~ JobRoleSales.Executive+ EnvironmentSatisfactionVery.High+ EducationCollege+JobInvolvementLow+ JobRoleResearch.Director+TrainingTimesLastYear+ Age+JobRoleManufacturing.Director+WorkLifeBalanceGood+JobSatisfactionLow+WorkLifeBalanceBest+ JobSatisfactionVery.High+NumCompaniesWorked+YearsWithCurrManager+WorkLifeBalanceBetter+BusinessTravelTravel_Frequently+ YearsSinceLastPromotion+EnvironmentSatisfactionLow+TotalWorkingYears+MaritalStatusSingle+overtime, family = "binomial", data = train) summary(model_12) vif(model_12) #Remove JobRoleSales.Executive due to high p-value (0.116878) model_13<-glm(formula = AttritionYes ~ EnvironmentSatisfactionVery.High+ EducationCollege+JobInvolvementLow+ JobRoleResearch.Director+TrainingTimesLastYear+ Age+JobRoleManufacturing.Director+WorkLifeBalanceGood+JobSatisfactionLow+WorkLifeBalanceBest+ JobSatisfactionVery.High+NumCompaniesWorked+YearsWithCurrManager+WorkLifeBalanceBetter+BusinessTravelTravel_Frequently+ YearsSinceLastPromotion+EnvironmentSatisfactionLow+TotalWorkingYears+MaritalStatusSingle+overtime, family = "binomial", data = train) summary(model_13) vif(model_13) #Remove EnvironmentSatisfactionVery.High due to high p-value (0.085664) model_14<-glm(formula = AttritionYes ~ EducationCollege+JobInvolvementLow+ JobRoleResearch.Director+TrainingTimesLastYear+ Age+JobRoleManufacturing.Director+WorkLifeBalanceGood+JobSatisfactionLow+WorkLifeBalanceBest+ JobSatisfactionVery.High+NumCompaniesWorked+YearsWithCurrManager+WorkLifeBalanceBetter+BusinessTravelTravel_Frequently+ YearsSinceLastPromotion+EnvironmentSatisfactionLow+TotalWorkingYears+MaritalStatusSingle+overtime, family = "binomial", data = train) summary(model_14) vif(model_14) #Remove EducationCollege due to high p-value (0.085664) model_15<-glm(formula = AttritionYes ~ JobInvolvementLow+ JobRoleResearch.Director+TrainingTimesLastYear+ Age+JobRoleManufacturing.Director+WorkLifeBalanceGood+JobSatisfactionLow+WorkLifeBalanceBest+ JobSatisfactionVery.High+NumCompaniesWorked+YearsWithCurrManager+WorkLifeBalanceBetter+BusinessTravelTravel_Frequently+ YearsSinceLastPromotion+EnvironmentSatisfactionLow+TotalWorkingYears+MaritalStatusSingle+overtime, family = "binomial", data = train) summary(model_15) vif(model_15) #All variables have p-value less than 0.05 #But the number of variables is still too high. #Hence, variables are removed one by one until all of them have p-value less than 0.001 #Remove JobRoleResearch.Director due to high p-value (0.015714) model_16<-glm(formula = AttritionYes ~ JobInvolvementLow+TrainingTimesLastYear+ Age+JobRoleManufacturing.Director+WorkLifeBalanceGood+JobSatisfactionLow+WorkLifeBalanceBest+ JobSatisfactionVery.High+NumCompaniesWorked+YearsWithCurrManager+WorkLifeBalanceBetter+BusinessTravelTravel_Frequently+ YearsSinceLastPromotion+EnvironmentSatisfactionLow+TotalWorkingYears+MaritalStatusSingle+overtime, family = "binomial", data = train) summary(model_16) vif(model_16) #Remove JobInvolvementLow due to high p-value (0.007243) model_17<-glm(formula = AttritionYes ~ TrainingTimesLastYear+ Age+JobRoleManufacturing.Director+WorkLifeBalanceGood+JobSatisfactionLow+WorkLifeBalanceBest+ JobSatisfactionVery.High+NumCompaniesWorked+YearsWithCurrManager+WorkLifeBalanceBetter+BusinessTravelTravel_Frequently+ YearsSinceLastPromotion+EnvironmentSatisfactionLow+TotalWorkingYears+MaritalStatusSingle+overtime, family = "binomial", data = train) summary(model_17) vif(model_17) #Remove TrainingTimesLastYear due to high p-value (0.003009) model_18<-glm(formula = AttritionYes ~ Age+JobRoleManufacturing.Director+WorkLifeBalanceGood+JobSatisfactionLow+WorkLifeBalanceBest+ JobSatisfactionVery.High+NumCompaniesWorked+YearsWithCurrManager+WorkLifeBalanceBetter+BusinessTravelTravel_Frequently+ YearsSinceLastPromotion+EnvironmentSatisfactionLow+TotalWorkingYears+MaritalStatusSingle+overtime, family = "binomial",data = train) summary(model_18) vif(model_18) #Remove Age due to high p-value (0.00163) model_19<-glm(formula = AttritionYes ~ JobRoleManufacturing.Director+WorkLifeBalanceGood+JobSatisfactionLow+WorkLifeBalanceBest+ JobSatisfactionVery.High+NumCompaniesWorked+YearsWithCurrManager+WorkLifeBalanceBetter+BusinessTravelTravel_Frequently+ YearsSinceLastPromotion+EnvironmentSatisfactionLow+TotalWorkingYears+MaritalStatusSingle+overtime, family = "binomial",data = train) summary(model_19) vif(model_19) #Remove JobRoleManufacturing.Director due to high p-value (0.00109) model_20<-glm(formula = AttritionYes ~ WorkLifeBalanceGood+JobSatisfactionLow+WorkLifeBalanceBest+ JobSatisfactionVery.High+NumCompaniesWorked+YearsWithCurrManager+WorkLifeBalanceBetter+BusinessTravelTravel_Frequently+ YearsSinceLastPromotion+EnvironmentSatisfactionLow+TotalWorkingYears+MaritalStatusSingle+overtime, family = "binomial",data = train) summary(model_20) vif(model_20) #now all variables have p-values less than 0.001 #therefore, model_20 is deemed to be the final model #The major assumption is that p-values of each variable in the final model are less than 0.001 #the method for eliminating variables after each model, is as follows: #1. If the variables have high VIF and low signifance(p-value>0.05), the variable can be removed #2. If the variables all have p-values less than 0.05 (or 0.01 if applicable), the variable having the highest VIF is considered (if VIF>3) #3. If the variables all have VIF less than 2, the variable having the highest p-value is considered if the p-value is more than 0.05 #4. Still, if variables are remaining, they are eliminated in order of p-values until all have p-value<0.001 #5. This means finally variable selection is based on high VIF and then p-values #From the final model, it is clear that following are the major factors that influence the attrition rate #1.WorkLifeBalance #2.JobSatisfaction #3.NumCompaniesWorked #4.YearsWithCurrManager #5.BusinessTravel_Frequently #6.YearsSinceLastPromotion #7.EnvironmentSatisfaction #8.MaritalStatus #9.overtime #4. Model evaluation Predict_1 <- predict(model_20,type = "response",test) summary(Predict_1) test$predcit_attrition <- Predict_1 r <- cor(test$AttritionYes,test$predcit_attrition) rsquared <- cor(test$AttritionYes,test$predcit_attrition)^2 rsquared ggplot(test, aes(avg_hours, AttritionYes)) + geom_line(aes(colour = "blue" )) + geom_line(aes(x=avg_hours, y=predcit_attrition, colour="red")) ggplot(test, aes(EmployeeID, AttritionYes)) + geom_line(aes(colour = "blue" )) + geom_line(aes(x=EmployeeID, y=predcit_attrition, colour="red")) test_actual_attrition<-factor(ifelse(test$AttritionYes==1,"Yes","No")) #P_Cutoff>=0.5 test_predict_attrition<-factor(ifelse(Predict_1 >= 0.50, "Yes", "No")) test_conf <- confusionMatrix(test_predict_attrition, test_actual_attrition, positive = "Yes") test_conf #P_Cutoff>=0.4 test_predict_attrition<-factor(ifelse(Predict_1 >= 0.40, "Yes", "No")) test_conf <- confusionMatrix(test_predict_attrition, test_actual_attrition, positive = "Yes") test_conf #Finding the Optimal Probability Cutoff s = seq(.01,.80,length=100) OUT = matrix(0,100,3) perform_fn <- function(cutoff) { predicted_attrition <- factor(ifelse(Predict_1 >= cutoff, "Yes", "No")) conf <- confusionMatrix(predicted_attrition, test_actual_attrition, positive = "Yes") acc <- conf$overall[1] sens <- conf$byClass[1] spec <- conf$byClass[2] out <- t(as.matrix(c(sens, spec, acc))) colnames(out) <- c("sensitivity", "specificity", "accuracy") return(out) } for(i in 1:100) { OUT[i,] = perform_fn(s[i]) } dev.off() op_p_cutoff <- s[which(abs(OUT[,1]-OUT[,2])<0.02)] op_p_cutoff #0.1616 test_predict_attrition<-factor(ifelse(Predict_1 >= op_p_cutoff, "Yes", "No")) conf_final <- confusionMatrix(test_predict_attrition, test_actual_attrition, positive = "Yes") conf_final # Confusion Matrix and Statistics # # Reference # Prediction No Yes # No 2848 171 # Yes 850 540 # # Accuracy : 0.7684 # 95% CI : (0.7557, 0.7808) # No Information Rate : 0.8387 # P-Value [Acc > NIR] : 1 # # Kappa : 0.3822 # Mcnemar's Test P-Value : <2e-16 # # Sensitivity : 0.7595 # Specificity : 0.7701 # Pos Pred Value : 0.3885 # Neg Pred Value : 0.9434 # Prevalence : 0.1613 # Detection Rate : 0.1225 # Detection Prevalence : 0.3153 # Balanced Accuracy : 0.7648 # # 'Positive' Class : Yes # plot(s, OUT[,1],xlab="Cutoff",ylab="Value",cex.lab=1.5,cex.axis=1.5,ylim=c(0,1),type="l",lwd=2,axes=FALSE,col=2) axis(1,seq(0,1,length=5),seq(0,1,length=5),cex.lab=1.5) axis(2,seq(0,1,length=5),seq(0,1,length=5),cex.lab=1.5) lines(s,OUT[,2],col="darkgreen",lwd=2) lines(s,OUT[,3],col=4,lwd=2) box() legend(0,.50,col=c(2,"darkgreen",4,"darkred"),lwd=c(2,2,2,2),c("Sensitivity","Specificity","Accuracy")) # Let's choose the optimum cutoff value of 0.1616 for final model test_cutoff_churn <- factor(ifelse(Predict_1 >=op_p_cutoff, "Yes", "No")) conf_final <- confusionMatrix(test_cutoff_churn, test_actual_attrition, positive = "Yes") acc <- conf_final$overall[1] sens <- conf_final$byClass[1] spec <- conf_final$byClass[2] acc #0.7684 sens #0.7595 spec #0.7701 ################################################################################################## ### KS -statistic - Test Data ###### test_cutoff_churn <- ifelse(test_actual_attrition=="Yes",1,0) test_actual_churn <- ifelse(test_predict_attrition=="Yes",1,0) decile<-c(1:10) library(ROCR) #on testing data pred_object_test<- prediction(test_cutoff_churn, test_actual_churn) performance_measures_test<- performance(pred_object_test, "tpr", "fpr") ks_table_test <- attr(performance_measures_test, "y.values")[[1]] - (attr(performance_measures_test, "x.values")[[1]]) max(ks_table_test) # 0.3318479 #################################################################### # Lift & Gain Chart # plotting the lift chart # Loading dplyr package require(dplyr) library(dplyr) lift <- function(labels , predicted_prob,groups=10) { if(is.factor(labels)) labels <- as.integer(as.character(labels )) if(is.factor(predicted_prob)) predicted_prob <- as.integer(as.character(predicted_prob)) helper = data.frame(cbind(labels , predicted_prob)) helper[,"bucket"] = ntile(-helper[,"predicted_prob"], groups) gaintable = helper %>% group_by(bucket) %>% summarise_at(vars(labels ), funs(total = n(), totalresp=sum(., na.rm = TRUE))) %>% mutate(Cumresp = cumsum(totalresp), Gain=Cumresp/sum(totalresp)*100, Cumlift=Gain/(bucket*(100/groups))) return(gaintable) } Churn_decile = lift(test_actual_churn, Predict_1, groups = 10) Churn_decile Churn_decile$Gain Churn_decile$Cumlift Churn_decile$total #each bucket indicates the respective decile plot(x=Churn_decile$Cumlift,y=Churn_decile$bucket,type = 'o') plot(x=Churn_decile$Gain,y=Churn_decile$bucket,type ='o') plot(x=Churn_decile$Gain,y=Churn_decile$Cumlift,type ='o') plot(x=max(decile),y=max(ks_table_test),type = 'o') #From the above plots obtained, it is clear that the model predicts attrition more accurately as most of the values are accurately predicted by the 4th decile. #From the optimum probability cut-off (0.1616) and the ROC curve obtained, it is clear that the model is the best fit when it comes to accuracy. #From the gain and lift charts obtained, it is clear that the model has increasing gain and decreasing lift. #It is clear that the model predicts attrition more accurately as most of the values are accurately predicted by the 4th decile.
6459a267104c2d1dc3f7b8bc3f07f7a25008e993
bb34abb58df1d4d9ef8609a050dffad197256bb4
/R/mboost_diff_Rsq.R
467fd2794a6271695c69309781e377b5297264f1
[]
no_license
hanfu-bios/varsel
6537cdb12a1dc65ac7d20ed73f62021ba4ccc413
1890077d8cbe607a1142b1dd83c9aca264a1f05a
refs/heads/master
2021-09-07T15:32:52.249826
2018-02-25T04:18:32
2018-02-25T04:18:32
103,572,631
0
0
null
null
null
null
UTF-8
R
false
false
2,233
r
mboost_diff_Rsq.R
stat.mboost_diff_Rsq <- function(Xaug, y, max.mstop = 100, bl = c("bbs", "bols", "btree"), cv.fold = 5, family = Gaussian()){ p = ncol(Xaug) / 2 Xaug = as.data.frame(Xaug) check.dist <- function(x) grepl(x,family@name) if (any(unlist(lapply(c("Ada","Binomial","AUC"),check.dist)))) response.type = "binary" else if (any(unlist(lapply(c("Squared Error","Huber","Absolute Err"),check.dist)))) response.type = "continuous" else if (any(unlist(lapply(c("Poisson","Gamma","Multinomial"),check.dist)))) response.type = "discrete" else if (any(unlist(lapply(c("Cox","Weibull","Log Logistic","Lognormal","Gehan","Concordance Probability"),check.dist)))) response.type = "survival" else stop("unknown family") if (response.type %in% c("continuous","discrete")){ Xaug$y = as.vector(y) } else if (response.type == "binary"){ Xaug$y = as.factor(y) } else { # survival error("R-square for survival data still under development") } model = mboost(y ~ ., data = Xaug, control = boost_control(mstop = max.mstop), baselearner = bl, family = family) cv10f = cv(model.weights(model), type="kfold", B = cv.fold) cvm = cvrisk(model, folds = cv10f, papply = mclapply) best.mstop = mstop(cvm) best.fit = cv.mboost(Xaug, y, best.mstop, family = family, baselearner = bl, cv.fold = cv.fold) mboost.mse = mean((y-best.fit)^2) Rsq = mse2Rsq(mboost.mse, y) mse_j = NULL Rsq_j = NULL for (j in 1:(2*p)){ best.pred1 = cv.mboost(Xaug[,-j], y, best.mstop, family = family, baselearner = bl, cv.fold = cv.fold) mse_j[j] = mean((best.pred1 - y)^2) Rsq_j[j] = mse2Rsq(mse_j[j], y) } Z = abs(Rsq - Rsq_j) } mse2Rsq <- function(mse, y) 1 - mse*n/sum((y-mean(y))^2) cv.mboost <- function(Xaug, y, mstop, family, baselearner = "bbs", cv.fold = 5){ folds_i = sample(rep(1:cv.fold, length.out = n)) X.pred2 = rep(0,n) for (k2 in 1:cv.fold) { test_i = which(folds_i == k2) Xtr = Xaug[-test_i, ] Xt = Xaug[test_i, ] fit1 = mboost(y ~ ., data = Xtr, control = boost_control(mstop = mstop), baselearner = baselearner, family = family) X.pred2[test_i] = predict(fit1, newdata = subset(Xt,select=-y), type = "response") } X.pred2 }
e89e6a291ded54559e571b81ff6ab4908551ab8d
7331b465dbed2459478e719a81eac3e15179aa7f
/ejemplo2.R
40282007047ea736806b023b2c34b65ae8d37f90
[]
no_license
920052859/Clases_Progra_R
572d6b73e0b15e872c75ab97263f11b31acc0e0e
80dc895cc13a074e9809e14e9d4fa0687490e1fb
refs/heads/master
2022-10-18T15:19:23.500925
2020-06-08T18:36:56
2020-06-08T18:36:56
null
0
0
null
null
null
null
UTF-8
R
false
false
444
r
ejemplo2.R
getwd() setwd("C:/Users/DANIPZA/Desktop/pylinR/Rclass") dir.create("corian") dir() help("log") log(8,base = 2) vec1 <- 1:5 vec2 <- c(1,4,5,5.7) vec3 <- c(vec1,vec2) help("rnorm") vect4 <- rnorm(5,mean = 5,sd = 2) vect5 <- sum(vect4) vect5 <- vect5/5 help("runif") vec6 <- runif(5,min = 4,max = 7) help("rchisq") help("seq") seq(8,100) # secuencia help("rep") rep(9,9) # replicar getwd() setwd("C:/Users/DANIPZA/Desktop/pylinR/Rclass")
24fd6551d3824d7bde0928bbe7e220ac6415f208
26e61f22191fb048d9e1eb1074afe13e2a3cbf69
/getData.R
5adee4ee2ec51d909c264e786667957aea751824
[]
no_license
waleedayoub/nhl
14e3edfb223cf16987b6731655ac2c8eb7934740
8975b394097cab2ebfb4dd6318b232997aecd3a3
refs/heads/master
2020-05-28T04:06:24.969981
2015-06-05T12:07:15
2015-06-05T12:07:15
35,517,938
0
0
null
2015-06-05T13:14:23
2015-05-12T23:36:07
R
UTF-8
R
false
false
1,110
r
getData.R
setwd('./development/R/nhl/') library(nhlscrapr) library(dplyr) all.games <- full.game.database() table(all.games$season,all.games$session) distinct(select(all.games,session)) distinct(select(all.games,season)) # select only the playoff games for 2014/2015 season filter(all.games, session=='Playoffs', season==20142015) playoffgames <- filter(all.games, season == 20142015, session=='Playoffs') head(playoffgames) # download all the game info for playoff games compile.all.games(new.game.table=playoffgames) # figure out how to isolate just the info required for goals/assists # then iterate through each file and process load("./nhlr-data/20142015-30216-processed.RData") game.info$date summary(game.info) head(game.info$playbyplay) game.info$playbyplay[[:3,4]] summary(game.info$playbyplay) head(game.info$playbyplay['etype']) distinct(game.info$playbyplay['etype']) goaldata <- filter(game.info$playbyplay,etype=='GOAL') head(goaldata) goaldata[1,] summary(goaldata) goaldata # create a table with pool participant to player lookup # merge the 2 tables # plot the cumulative point haul by day
c549b4618a51fef24a513a013bc0432ac9d88a05
02b127b8e92f7c2af1917c722045b083142d802e
/helpers.R
25639c2ed441c925823010463c3e1f5adf97c720
[]
no_license
beegieb/BF4StatsApp
769b3a28bcad58dc160210ec56791ad3674b3b63
eeb53cd01bf526a40e0293bf7b65716ded593dc6
refs/heads/master
2021-01-19T06:27:20.213286
2014-06-23T05:39:07
2014-06-23T05:39:07
null
0
0
null
null
null
null
UTF-8
R
false
false
9,914
r
helpers.R
library(caret) library(shiny) library(rjson) platforms <- c("PS3", "Xbox 360") ovoPlatforms <- c("PC", "PS4", "Xbox One", "PS3", "Xbox 360") mapNames <- read.csv("data/mapNames.csv")$name platMap <- function(plat){ switch(plat, "PS3" = "ps3", "PS4" = "ps4", "Xbox 360" = "xbox", "Xbox One" = "xone", "PC" = "pc") } getPlayerStats <- function(name, plat) { bf4statsurl <- "http://api.bf4stats.com/api/playerInfo" opt <- "stats,extra,vehicleCategory,weaponCategory" name <- gsub(" ", "+", name) query <- paste0("?plat=", plat, "&name=", name, "&opt=", opt) player.json <- tryCatch( readLines(paste0(bf4statsurl, query)), error=function(e) "failed") player.json } genPlayerStatus <- function(formInput, plat, playerData){ if (formInput == "") { NULL } else if (is.null(playerData)) { p(paste("Failed to find", formInput), style="color:red") } else if (is.null(playerData$stats)) { name <- gsub(" ", "%20", formInput) url <- paste("http://bf4stats.com", plat, name, sep="/") p("Player stats are stale, please browse to ", a(href=url, url), " to refresh the players stats", style="color:orange") } else { p("Found!", style="color:green") } } genPlayerInput <- function(playerName, platform){ if (playerName == "") { NULL } else { pj <- getPlayerStats(playerName, platform) if (pj == "failed") { NULL } else { fromJSON(pj) } } } nullPlayer <- data.frame(rank=0, skill=0, kdr=0, wlr=0, spm=0, gspm=0, kpm=0, sfpm=0, hkp=0, khp=0, accuracy=0, timePlayed=0, kills=0, deaths=0, headshots=0, shotsFired=0, shotsHit=0, suppressionAssists=0, avengerKills=0, saviorKills=0, nemesisKills=0, numRounds=0, roundsFinished=0, numWins=0, numLosses=0, killStreakBonus=0, nemesisStreak=0, resupplies=0, repairs=0, heals=0, revives=0, longestHeadshot=0, flagDefend=0, flagCaptures=0, killAssists=0, vehicleDestroyed=0, vehicleDamage=0, dogtagsTaken=0, score.total=0, score.conquest=0, score.award=0, score.bonus=0, score.unlock=0, score.vehicle=0, score.team=0, score.squad=0, score.general=0, score.rank=0, score.combat=0, score.assault=0, score.engineer=0, score.support=0, score.recon=0, score.commander=0, time.assault=0, time.engineer=0, time.support=0, time.recon=0, time.commander=0, time.vehicle=0, time.vehicle.precent=0, time.weapon=0, time.weapon.percent=0, stars.assault=0, stars.engineer=0, stars.support=0, stars.recon=0, stars.commander=0, spm.assault=0, spm.engineer=0, spm.support=0, spm.recon=0, spm.commander=0, kills.weapon=0, kills.weapon.percent=0, kills.vehicle=0, kills.vehicle.percent=0, kpm.weapon=0, kpm.vehicle=0, ribbons=0, medals=0 ) playerDataToDF <- function(pj) { player <- pj$player stats <- pj$stats scores <- stats$scores kits <- stats$kits extra <- stats$extra weap <- pj$weaponCategory vehi <- pj$vehicleCategory data.frame( rank = stats$rank, skill = stats$skill, kdr = extra$kdr, wlr = extra$wlr, spm = extra$spm, gspm = extra$gspm, kpm = extra$kpm, sfpm = extra$sfpm, hkp = extra$hkp, khp = extra$khp, accuracy = extra$accuracy, timePlayed = stats$timePlayed, kills = stats$kills, deaths = stats$deaths, headshots = stats$headshots, shotsFired = stats$shotsFired, shotsHit = stats$shotsHit, suppressionAssists = stats$suppressionAssists, avengerKills = stats$avengerKills, saviorKills = stats$saviorKills, nemesisKills = stats$nemesisKills, numRounds = stats$numRounds, roundsFinished = extra$roundsFinished, numWins = stats$numWins, numLosses = stats$numLosses, killStreakBonus = stats$killStreakBonus, nemesisStreak = stats$nemesisStreak, resupplies = stats$resupplies, repairs = stats$repairs, heals = stats$heals, revives = stats$revives, longestHeadshot = stats$longestHeadshot, flagDefend = stats$flagDefend, flagCaptures = stats$flagCaptures, killAssists = stats$killAssists, vehicleDestroyed = stats$vehiclesDestroyed, vehicleDamage = stats$vehicleDamage, dogtagsTaken = stats$dogtagsTaken, score.total = scores$score, score.conquest = stats$mode[[1]]$score, score.award = scores$award, score.bonus = scores$bonus, score.unlock = scores$unlock, score.vehicle = scores$vehicle, score.team = scores$team, score.squad = scores$squad, score.general = scores$general, score.rank = scores$rankScore, score.combat = scores$combatScore, score.assault = kits$assault$score, score.engineer = kits$engineer$score, score.support = kits$support$score, score.recon = kits$recon$score, score.commander = kits$commander$score, time.assault = kits$assault$time, time.engineer = kits$engineer$time, time.support = kits$support$time, time.recon = kits$recon$time, time.commander = kits$commander$time, time.vehicle = extra$vehicleTime, time.vehicle.precent = extra$vehTimeP, time.weapon = extra$weaponTime, time.weapon.percent = extra$weaTimeP, stars.assault = kits$assault$stars, stars.engineer = kits$engineer$stars, stars.support = kits$support$stars, stars.recon = kits$recon$stars, stars.commander = kits$commander$stars, spm.assault = kits$assault$spm, spm.engineer = kits$engineer$spm, spm.support = kits$support$spm, spm.recon = kits$recon$spm, spm.commander = kits$commander$spm, kills.weapon = extra$weaKills, kills.weapon.percent = extra$weaKillsP, kills.vehicle = extra$vehKills, kills.vehicle.percent = extra$vehKillsP, kpm.weapon = extra$weaKpm, kpm.vehicle = extra$vehKpm, ribbons = extra$ribbons, medals = extra$medals ) } getPlayerDF <- function(pj) { if (is.null(pj)) { nullPlayer } else if (is.null(pj$stats)) { nullPlayer } else { playerDataToDF(pj) } } getTeamAvg <- function(team) { tot <- nullPlayer for (p in team) { tot <- tot + p } tot / 12 } predictResult <- function(team1, team2, map, model) { d <- getTeamAvg(team1) - getTeamAvg(team2) d$map <- map predict(model, d, type="response") } getOVOResult <- function(p1, p2, pred, p1Name, p2Name) { if (is.null(p1) | is.null(p2)) { return() } rp <- round(pred*100, 2) if (pred < 0.5) { winner <- p1Name scrub <- p2Name rp <- 100 - rp } else if (pred > 0.5) { winner <- p2Name scrub <- p1Name } else { return("WTF ... netcode!? You kill trade!") } getWinMSG(winner, scrub, rp) } getWinMSG <- function(winner, loser, confidence) { msgs <- c( paste0(loser, " was boned by ", winner, ".... no lube and ", confidence, "% hard"), paste0(loser, " is such a scrub... ", winner, " is MLG PRO! I'm ", confidence, "% certain"), paste0(loser, " is ", confidence, "% lady and ", winner, " is taking dat ass"), paste0(round(confidence), " out of 100 DICE employees agree, ", loser, " gets rekt by ", winner), paste0(winner, " quenched the thirst of ", round(confidence), " African children", " with the tears of ", loser), paste0(loser, " calls the whaaambulance ", confidence, "% percent of the time while ", winner, " eats a carrot."), paste0("Maybe if ", loser, " spent ", confidence, "% less time playing, ", "'just for fun' they could avoid getting wrecked by ", winner), paste0(round(confidence), " alzheimers patients remembered how bad ", loser, " is after that crushing defeat from ", winner), paste0("Getting dragged through ", round(confidence), "m of fire is less painful", " than the beatdown ", winner, " gave ", loser), paste0(round(confidence), " atheists turned to religion after ", winner, "'s ", "godly performance when beating ", loser), paste0(loser, " is like Leonardo DiCrapio in Titanic, while ", winner, " swims away and marries a $", round(confidence), "million richman"), paste0("According to Wikipedia ", loser, " died at the majestic hands of ", winner, "... a total of ", round(confidence), " sources agree"), paste0(loser, " is hearing ", round(confidence), " fat ladies sing because he got owned by ", winner), paste0(loser, " changed sexes after being worked over by ", winner), paste0("Mommy told ", loser, " there is a ", confidence, "% chance ", winner, " is his daddy"), paste0("There is a ", confidence, "% chance ", loser, " is pregnant after ", winner, " was done with them"), paste0("The results are in: God favours ", winner, " ", confidence, "% more than ", loser), paste0(loser, "'s tight end got loosened by ", confidence, "% after ", winner, " got done with them"), paste0(confidence, "% of lesbians said they would gladly chow down on ", loser, "'s box after that extremely feminine display against ", winner), paste0("There's a ", confidence, "% chance ", loser, "'s girlfriend will cheat ", "after witnessing ", winner, "'s glorious performance"), paste0("There is a ", confidence, "% chance ", loser, " is the taker and ", winner, " is the giver"), paste0(loser, "'s life insurance fee just increased by ", confidence, "% after ", winner, " exposed how much ", loser, " is prone to disaster"), paste0("Someone call an ambulance because ", winner, " just put ", round(confidence), " bullets into ", loser, "'s chest") ) sample(msgs, 1) }
3f5249c430684102cc2e6f0aee2a8853cb248dc3
e01763b11db2185c5c3f233dea1802f8f84c21c5
/script-prepadonnees.R
1f47cfca0423bac9aed9e386bc08f7aaae936271
[]
no_license
rxlacroix/masterthesis
de118ff9bf14113a0dfbd8595591cba57370fe94
8c6bd4e322a7bb355060c1b5b06e0dabd2ef35b2
refs/heads/master
2021-01-19T17:38:22.327208
2017-08-22T15:31:01
2017-08-22T15:31:01
101,077,914
0
0
null
null
null
null
UTF-8
R
false
false
23,684
r
script-prepadonnees.R
setwd("D:/Switchdrive/Mémoire/DATA") list.files() library(stringr) ############################################################################################### ############################################################################################### # 1. Fichier des applicants par ville via subset et correspondance NUTS3-FUA ############################################################################################### ############################################################################################### # charger REGPAT-Inventeurs inventeurs <- read.csv("201602_EPO_Inv_reg.txt", sep= "|") inventeursEPO_RA <- subset(inventeurs, Reg_code=="FR711"| Reg_code=="FR712" | Reg_code=="FR713"| Reg_code=="FR714" | Reg_code=="FR715" | Reg_code=="FR716") write.csv(file="inventeursEPO_RA.csv", inventeursEPO_RA) patipc <- read.csv("201307_Patents_IPC.txt", sep= "|") # API Google ne gère pas les adresses avec des accents, il faut donc les enlever # fonction qui supprime les accents inventeursEPO_RA$Address <- gsub("é","e",inventeursEPO_RA$Address) inventeursEPO_RA$Address <- gsub("è","e",inventeursEPO_RA$Address) inventeursEPO_RA$Address <- gsub("â","a",inventeursEPO_RA$Address) inventeursEPO_RA$Address <- gsub("ô","o",inventeursEPO_RA$Address) inventeursEPO_RA$Address <- gsub("ê","e",inventeursEPO_RA$Address) inventeursEPO_RA$Address <- gsub("û","u",inventeursEPO_RA$Address) inventeursEPO_RA$Address <- gsub("ï","i",inventeursEPO_RA$Address) inventeursEPO_RA$Address <- gsub("î","i",inventeursEPO_RA$Address) Unaccent <- function(text) { text <- gsub("['`^~\"]", " ", text) text <- iconv(text, to="ASCII//TRANSLIT//IGNORE") text <- gsub("['`^~\"]", "", text) return(text) } # execution de la fonction sur la colonne address inventeursEPO_RA$adress2 <- Unaccent(inventeursEPO_RA$Address) # on enleve l'identifiant du pays du code postal qui gene souvent inventeursEPO_RA$adress2 <- gsub("F-", "", inventeursEPO_RA$adress2) # fonction de geolocalisation par l'adresse via API Google geocodeAdddress <- function(address) { require(RJSONIO) url <- "http://maps.google.com/maps/api/geocode/json?address=" url <- URLencode(paste(url, address, sep = "", key="AIzaSyCsjDAVV5C2cVMbGHlOVLT1JXcMI029g30")) x <- fromJSON(url, simplify = FALSE) if (x$status == "OK") { out <- c(x$results[[1]]$geometry$location$lng, x$results[[1]]$geometry$location$lat) } else { out <- NA } Sys.sleep(0.20) # API ne peut gérer que 5 requetes par seconde out } # pour i in 1:nombre d'observations, on applique la fonction pour la longitude for (i in 1001:1500) { inventeursEPO_RA$lng[i] <- (as.matrix(geocodeAdddress(inventeursEPO_RA$Address[i])))[1] } # puis pour la latitude for (i in 1000:1005){ inventeursEPO_RA$lat[i] <- (as.matrix(geocodeAdddress(inventeursEPO_RA$Address[i])))[2] } write.csv(file="inventeursEPO_RA.csv", inventeursEPO_RA) # ne garder que les adresses européennes regeuroapp <- subset(regapplicants, Ctry_code == "FI"|Ctry_code == "CH"|Ctry_code == "ES"| Ctry_code == "IT"|Ctry_code == "FR"|Ctry_code == "GB"| Ctry_code == "DE"|Ctry_code == "BE"|Ctry_code == "PT"| Ctry_code == "SE"|Ctry_code == "NO"|Ctry_code == "IE"| Ctry_code == "GR"|Ctry_code == "DK"|Ctry_code == "AT"| Ctry_code == "BG"|Ctry_code == "CZ"|Ctry_code == "EE"| Ctry_code == "HR"|Ctry_code == "HU"|Ctry_code == "LT"| Ctry_code == "NL"|Ctry_code == "PL"|Ctry_code == "RO"| Ctry_code == "SI"|Ctry_code == "SK") # charger correspondance FUA-NUTS3 nutsaero <- read.csv("final_nuts_aero_3.csv", sep = ",") # renomme la deuxième colonne pour jointure colnames(nutsaero)[2] <- "Reg_code" # fusionner sur l'attribut Reg_code regeuroappvilles <- merge(regeuroapp, nutsaero, by="Reg_code") # supprimer les colonnes inutiles regeuroappvilles <- regeuroappvilles[,- c(9:10)] # statistiques descriptives summary(regeuroappvilles, maxsum = 11) # test sur une seule ville (Lyon, code=LYS) brevetslyon <- subset(regeuroappvilles, CAERO == "LYS") summary(brevetslyon, maxsum = 11) ############################################################################################### ############################################################################################### # 2. Fichier des inventeurs par ville via subset et correspondance NUTS3-FUA ############################################################################################### ############################################################################################### # charger REGPAT-Inventeurs reginv<- read.csv("201602_EPO_Inv_reg.txt", sep= "|") # ne garder que les adresses européennes regeuroinv <- subset(reginv, Ctry_code == "FI"|Ctry_code == "CH"|Ctry_code == "ES"| Ctry_code == "IT"|Ctry_code == "FR"|Ctry_code == "GB"| Ctry_code == "DE"|Ctry_code == "BE"|Ctry_code == "PT"| Ctry_code == "SE"|Ctry_code == "NO"|Ctry_code == "IE"| Ctry_code == "GR"|Ctry_code == "DK"|Ctry_code == "AT"| Ctry_code == "BG"|Ctry_code == "CZ"|Ctry_code == "EE"| Ctry_code == "HR"|Ctry_code == "HU"|Ctry_code == "LT"| Ctry_code == "NL"|Ctry_code == "PL"|Ctry_code == "RO"| Ctry_code == "SI"|Ctry_code == "SK") # charger correspondance FUA-NUTS3 nutsaero <- read.csv("final_nuts_aero_3.csv", sep = ";") # renomme la deuxième colonne pour jointure colnames(nutsaero)[2] <- "Reg_code" # fusionner sur l'attribut Reg_code regeuroinvvilles <- merge(regeuroinv, nutsaero, by="Reg_code") # supprimer les colonnes inutiles regeuroinvvilles <- regeuroinvvilles[,- c(9:10)] # statistiques descriptives summary(regeuroinvvilles, maxsum = 11) # test sur une seule ville (Lyon, code=LYS) invlyon <- subset(regeuroinvvilles, CAERO == "LYS") summary(brevetslyon, maxsum = 11) ############################################################################################### ############################################################################################### # 3. Fichier des localisations des inventeurs via géolocalisation API Google # test sur la région Rhône-Alpes ############################################################################################### ############################################################################################### regfranceinv <- subset(reginv, Ctry_code == "FR") regRAinv <- subset(regfranceinv, Reg_code == "FR711"|Reg_code == "FR712"|Reg_code == "FR713"|Reg_code == "FR714" |Reg_code == "FR715"|Reg_code == "FR716"|Reg_code == "FR717"|Reg_code == "FR718") # si besoin autre fichier # inventeurgeo <- subset(reginv.....) # API Google ne gère pas les adresses avec des accents, il faut donc les enlever # fonction qui supprime les accents Unaccent <- function(text) { text <- gsub("['`^~\"]", " ", text) text <- iconv(text, to="ASCII//TRANSLIT//IGNORE") text <- gsub("['`^~\"]", "", text) return(text) } # exécution de la fonction sur la colonne address regRAinv$adress2 <- Unaccent(regRAinv$Address) # on enlève l'identifiant du pays du code postal qui gene souvent regRAinv$adress2 <- gsub("F-", "", regRAinv$adress2) # fonction de géolocalisation par l'adresse via API Google geocodeAdddress <- function(address) { require(RJSONIO) url <- "http://maps.google.com/maps/api/geocode/json?address=" url <- URLencode(paste(url, address, sep = "")) x <- fromJSON(url, simplify = FALSE) if (x$status == "OK") { out <- c(x$results[[1]]$geometry$location$lng, x$results[[1]]$geometry$location$lat) } else { out <- NA } Sys.sleep(0.20) # API ne peut gérer que 5 requetes par seconde out } # pour i in 1:nombre d'observations, on applique la fonction pour la longitude for (i in 1:20) { regRAinv$lng[i] <- (as.matrix(geocodeAdddress(regRAinv$adress2[i])))[1] } # puis pour la latitude for (i in 1:77384){ regRAinv$lat[i] <- (as.matrix(geocodeAdddress(regRAinv$adress2[i])))[2] } #################################################################################### # travail sur les citations citationsEPO <- read.csv("201602_EP_Citations.txt", sep= "|") regeuroapp <- read.csv ("regeuroapp.csv") nutsaero <- read.csv("final_nuts_aero_3.csv", sep = ",") colnames(nutsaero)[2] <- "Reg_code" regeuroappvilles <- merge(regeuroapp, nutsaero, by="Reg_code") regeuroappvilles <- regeuroappvilles[,- c(9:10)] write.csv(regeuroappvilles,"regeuroappvilles.csv") colnames (regeuroappvilles)[3] <- "Cited_Appln_id" citedreg <- merge(citationsEPO, regeuroappvilles, by="Cited_Appln_id", suffixes = TRUE) colnames (regeuroappvilles)[3] <- "Citing_appln_id" citreg <- merge(citedreg, regeuroappvilles, by="Citing_appln_id", suffixes = TRUE) colnames (citreg)[25] <- "Target" colnames (citreg)[37] <- "Source" citreg <- citreg[,c("Citing_appln_id","Cited_Appln_id","Source","Target")] write.csv(citreg,"citreg.csv") citreg <- read.csv("citreg.csv") links <- citreg[,c(4:5)] links <- count(links,c("Source","Target")) net <- graph_from_data_frame(d=links, directed=T) plot(net) l <- layout_with_fr(net) linksAalborg <- subset(citreg, Source=="Aalborg") linksAarhus <- subset(citreg, Source=="Aarhus") linksHelsinki <- subset(citreg, Source=="Helsinki") c <- unique(links$Source) # fusion data techfield list.files() Qualityp <- read.csv("201602_OECD_PATENT_QUALITY_EPO.txt", sep="|") colnames(Qualityp)[1] <- "Citing_appln_id" citregtf <- merge(citreg, Qualityp, by="Citing_appln_id") citregtf <- citregtf[,-c(9:28)] citregtf <- citregtf[,-2] citregtf <- citregtf[,-5] # création de chaque fichier de liens totaux, non filtrés for (i in c){ x <- subset(citregtf, Source== i) write.csv(x, file = paste("citstf",i,".csv", sep="")) } # pour chaque fichier, liens totaux for (i in c) { x <-read.csv(file = paste("citstf",i,".csv", sep="")) x <- x[,c(4:5)] x <- count(x,c("Source","Target")) write.csv(x, file=paste("linksTot",i,".csv", sep="")) } # creation de chaque fichier général pour les techfield d <- unique(citregtf$tech_field) for (i in 1:35) { x <- subset(citregtf, tech_field==i) write.csv(x, paste("citregtf", i,".csv", sep = "")) } # creation des links généraux pour les techfields for (i in 1:35){ x<- read.csv(paste("citregtf", i,".csv", sep = "")) x <- count(x,c("Source","Target")) write.csv(x,file=paste("linksTot",i,".csv", sep="")) } # creation pour techfield du fichier pour histogramme for (i in 1:35){ x <- read.csv(paste("citregtf", i,".csv", sep = "")) y <- count(x, "Source") y <- y[order(-y$freq),] colnames(y)[2]<-"number" colnames(y)[1]<-"letter" y$frequency <- (y$number/sum(y$number)) y$frequency <- round(y$frequency, digits=4) y$name <- y$letter y <- y[1:15,] write.table(y, file=paste("tffreq", i,".tsv", sep = ""), quote=FALSE, sep='\t', row.names = FALSE) } # creation pour techfield du fichier pour histogramme / ville + spécialisation library("plyr") library("car") for (i in c){ x <-read.csv(file = paste("citstf",i,".csv", sep="")) y <- count(x,"tech_field") colnames (y)[1]<-"letter" y$name=y$letter y$letter<-recode(y$letter,"1='A1'") y$letter<-recode(y$letter,"2='A2'") y$letter<-recode(y$letter,"3='A3'") y$letter<-recode(y$letter,"4='A4'") y$letter<-recode(y$letter,"5='A5'") y$letter<-recode(y$letter,"6='A6'") y$letter<-recode(y$letter,"7='A7'") y$letter<-recode(y$letter,"8='A8'") y$letter<-recode(y$letter,"9='B1'") y$letter<-recode(y$letter,"10='B2'") y$letter<-recode(y$letter,"11='B3'") y$letter<-recode(y$letter,"12='B4'") y$letter<-recode(y$letter,"13='B5'") y$letter<-recode(y$letter,"14='C1'") y$letter<-recode(y$letter,"15='C2'") y$letter<-recode(y$letter,"16='C3'") y$letter<-recode(y$letter,"17='C4'") y$letter<-recode(y$letter,"18='C5'") y$letter<-recode(y$letter,"19='C6'") y$letter<-recode(y$letter,"20='C7'") y$letter<-recode(y$letter,"21='C8'") y$letter<-recode(y$letter,"22='C9'") y$letter<-recode(y$letter,"23='C10'") y$letter<-recode(y$letter,"24='C11'") y$letter<-recode(y$letter,"25='D1'") y$letter<-recode(y$letter,"26='D2'") y$letter<-recode(y$letter,"27='D3'") y$letter<-recode(y$letter,"28='D4'") y$letter<-recode(y$letter,"29='D5'") y$letter<-recode(y$letter,"30='D6'") y$letter<-recode(y$letter,"31='D7'") y$letter<-recode(y$letter,"32='D8'") y$letter<-recode(y$letter,"33='E1'") y$letter<-recode(y$letter,"34='E2'") y$letter<-recode(y$letter,"35='E3'") y$name<-recode(y$name,"1='Electrical Machinery, apparatus, energy'") y$name<-recode(y$name,"2='Audio-Visual Technology'") y$name<-recode(y$name,"3='Telecommunications'") y$name<-recode(y$name,"4='Digital Communication'") y$name<-recode(y$name,"5='Basic Communication Processes'") y$name<-recode(y$name,"6='Computer Technology'") y$name<-recode(y$name,"7='IT Methods for Management'") y$name<-recode(y$name,"8='Semiconductors'") y$name<-recode(y$name,"9='Optics'") y$name<-recode(y$name,"10='Measurement'") y$name<-recode(y$name,"11='Analysis of Biological Materials'") y$name<-recode(y$name,"12='Control'") y$name<-recode(y$name,"13='Medical Technology'") y$name<-recode(y$name,"14='Organic Fine Chemistery'") y$name<-recode(y$name,"15='Biotechnology'") y$name<-recode(y$name,"16='Pharmaceuticals'") y$name<-recode(y$name,"17='Macromolecular chemistery, polymers'") y$name<-recode(y$name,"18='Food chemistery'") y$name<-recode(y$name,"19='Basic materials chemistery'") y$name<-recode(y$name,"20='Materials, metallurgy'") y$name<-recode(y$name,"21='Surface technology, coating'") y$name<-recode(y$name,"22='Micro-structural and nano-technology'") y$name<-recode(y$name,"23='Chemical Engineering'") y$name<-recode(y$name,"24='Environmental Technology'") y$name<-recode(y$name,"25='Handling'") y$name<-recode(y$name,"26='Machine Tools'") y$name<-recode(y$name,"27='Engines, Pumps, Turbines'") y$name<-recode(y$name,"28='Textile and Paper Machines'") y$name<-recode(y$name,"29='Other Special Machines'") y$name<-recode(y$name,"30='Thermal Processes and Apparatus'") y$name<-recode(y$name,"31='Mechanical Elements'") y$name<-recode(y$name,"32='Transport'") y$name<-recode(y$name,"33='Furniture, Games'") y$name<-recode(y$name,"34='Other Consumer Goods'") y$name<-recode(y$name,"35='Civil Engineering'") y <- y[order(-y$freq),] colnames (y)[2]<-"number" y$frequency <- (y$number/sum(y$number)) y$frequency <- round(y$frequency, digits=4) y$specialisation <- y$frequency y <- y[order(y$letter),] y$specialisation[y$letter=='A1']=y$specialisation[y$letter=='A1']/0.0601 y$specialisation[y$letter=='A2']=y$specialisation[y$letter=='A2']/0.0265 y$specialisation[y$letter=='A3']=y$specialisation[y$letter=='A3']/0.0258 y$specialisation[y$letter=='A4']=y$specialisation[y$letter=='A4']/0.0351 y$specialisation[y$letter=='A5']=y$specialisation[y$letter=='A5']/0.0098 y$specialisation[y$letter=='A6']=y$specialisation[y$letter=='A6']/0.0324 y$specialisation[y$letter=='A7']=y$specialisation[y$letter=='A7']/0.0042 y$specialisation[y$letter=='A8']=y$specialisation[y$letter=='A8']/0.0151 y$specialisation[y$letter=='B1']=y$specialisation[y$letter=='B1']/0.0189 y$specialisation[y$letter=='B2']=y$specialisation[y$letter=='B2']/0.0497 y$specialisation[y$letter=='B3']=y$specialisation[y$letter=='B3']/0.0079 y$specialisation[y$letter=='B4']=y$specialisation[y$letter=='B4']/0.0158 y$specialisation[y$letter=='B5']=y$specialisation[y$letter=='B5']/0.0462 y$specialisation[y$letter=='C1']=y$specialisation[y$letter=='C1']/0.0459 y$specialisation[y$letter=='C2']=y$specialisation[y$letter=='C2']/0.0283 y$specialisation[y$letter=='C3']=y$specialisation[y$letter=='C3']/0.0431 y$specialisation[y$letter=='C4']=y$specialisation[y$letter=='C4']/0.0263 y$specialisation[y$letter=='C5']=y$specialisation[y$letter=='C5']/0.0115 y$specialisation[y$letter=='C6']=y$specialisation[y$letter=='C6']/0.0283 y$specialisation[y$letter=='C7']=y$specialisation[y$letter=='C7']/0.0204 y$specialisation[y$letter=='C8']=y$specialisation[y$letter=='C8']/0.0147 y$specialisation[y$letter=='C9']=y$specialisation[y$letter=='C9']/0.0006 y$specialisation[y$letter=='C10']=y$specialisation[y$letter=='C10']/0.0306 y$specialisation[y$letter=='C11']=y$specialisation[y$letter=='C11']/0.0152 y$specialisation[y$letter=='D1']=y$specialisation[y$letter=='D1']/0.0426 y$specialisation[y$letter=='D2']=y$specialisation[y$letter=='D2']/0.0324 y$specialisation[y$letter=='D3']=y$specialisation[y$letter=='D3']/0.0337 y$specialisation[y$letter=='D4']=y$specialisation[y$letter=='D4']/0.0268 y$specialisation[y$letter=='D5']=y$specialisation[y$letter=='D5']/0.0399 y$specialisation[y$letter=='D6']=y$specialisation[y$letter=='D6']/0.0204 y$specialisation[y$letter=='D7']=y$specialisation[y$letter=='D7']/0.0409 y$specialisation[y$letter=='D8']=y$specialisation[y$letter=='D8']/0.0575 y$specialisation[y$letter=='E1']=y$specialisation[y$letter=='E1']/0.0239 y$specialisation[y$letter=='E2']=y$specialisation[y$letter=='E2']/0.0217 y$specialisation[y$letter=='E3']=y$specialisation[y$letter=='E3']/0.0476 y$specialisation <- round(y$specialisation, digits=4) write.table(y, file=paste("tffreq",i,".tsv", sep=""), quote=FALSE, sep='\t', row.names = FALSE) } ############## Memoire ############ setwd("C:/Users/rlx/Desktop/memoire_tmp") list.files() inventeurs <- read.csv("inventeursEPO_AML.csv", sep=";") annees <- as.data.frame(table(inventeurs$Année)) colnames(annees)[1]<-"Année" colnames(annees)[2]<-"Brevets" annees$Année <- as.numeric(as.character(annees$Année)) plot(annees) lines(annees) codes_ipc <- read.csv("201602_EPO_IPC.txt", sep="|") # jointure des codes IPC pour chaque brevet dont un inventeur est localisé dans AML brevets_ipc <- merge(inventeurs, codes_ipc, by="Appln_id", all.x = TRUE) plot(brevets_ipc$IPC, ylim=c(0,1400)) write.csv(brevets_ipc, file="brevets_ipc.csv",row.names = FALSE) brevets_ipc <- read.csv("brevets_ipc_AML.csv", sep=";", quote="") ipc <- count(brevets_ipc$IPC) # on ne garde que les paires id-IPC uniques (pour éviter les doublons dues au co-brevetage) graph_brevets_ipc <- unique(brevets_ipc[c("Appln_id", "IPC")]) write.csv(graph_brevets_ipc, file="graph_brevets_ipc.csv",row.names = FALSE) graph_brevets_ipc <- read.csv("graph_brevets_ipc.csv", sep=";") graph_brevets_ipc$Appln_id <- as.factor(graph_brevets_ipc$Appln_id) # réseau full-digit library(igraph) bip <- graph.data.frame(graph_brevets_ipc) V(bip)$type <- V(bip)$name %in% graph_brevets_ipc[,2] ## sparse=TRUE is a good idea if you have a large matrix here v <- get.adjacency(bipartite.projection(bip)[[2]], attr="weight", sparse=FALSE) ## Need to reorder if you want it alphabetically v[order(rownames(v)), order(colnames(v))] g <- graph_from_adjacency_matrix(v, mode="undirected", weighted = TRUE, diag = TRUE) e <- cbind( get.edgelist(g) , round( E(g)$weight, 3 )) e <- as.data.frame(e) summary(g) transitivity(g) average.path.length(g) graph.density(g) summary(degree(g)) summary(graph.strength(g)) edge.betweenness.community(g) multilevel.community(g) leading.eigenvector.community(g) optimal.community(g) colnames(e)[1]<-"Source" colnames(e)[2]<-"Target" colnames(e)[3]<-"Weight" write.csv(e, "cooc_pat_fulldigits.csv") # réseau 6 digits graph_brevets_ipc$IPC <- substr(graph_brevets_ipc$IPC, 0, 6) graph_brevets_ipc$Appln_id <- as.factor(graph_brevets_ipc$Appln_id) library(igraph) bip <- graph.data.frame(graph_brevets_ipc) V(bip)$type <- V(bip)$name %in% graph_brevets_ipc[,2] ## sparse=TRUE is a good idea if you have a large matrix here v <- get.adjacency(bipartite.projection(bip)[[2]], attr="weight", sparse=FALSE) ## Need to reorder if you want it alphabetically v[order(rownames(v)), order(colnames(v))] g <- graph_from_adjacency_matrix(v, mode="undirected", weighted = TRUE, diag = TRUE) e <- cbind( get.edgelist(g) , round( E(g)$weight, 3 )) e <- as.data.frame(e) colnames(e)[1]<-"Source" colnames(e)[2]<-"Target" colnames(e)[3]<-"Weight" summary(g) transitivity(g) average.path.length(g) graph.density(g) summary(degree(g)) summary(graph.strength(g)) diameter(g) summary(clusters(g)) edge.betweenness.community(g) multilevel.community(g) leading.eigenvector.community(g) optimal.community(g) write.csv(e, "cooc_pat_6digits.csv") # réseau 4 digits graph_brevets_ipc$IPC <- substr(graph_brevets_ipc$IPC, 0, 4) graph_brevets_ipc$Appln_id <- as.factor(graph_brevets_ipc$Appln_id) library(igraph) bip <- graph.data.frame(graph_brevets_ipc) V(bip)$type <- V(bip)$name %in% graph_brevets_ipc[,2] ## sparse=TRUE is a good idea if you have a large matrix here v <- get.adjacency(bipartite.projection(bip)[[2]], attr="weight", sparse=FALSE) ## Need to reorder if you want it alphabetically v[order(rownames(v)), order(colnames(v))] g <- graph_from_adjacency_matrix(v, mode="undirected", weighted = TRUE, diag = TRUE) e <- cbind( get.edgelist(g) , round( E(g)$weight, 3 )) e <- as.data.frame(e) colnames(e)[1]<-"Source" colnames(e)[2]<-"Target" colnames(e)[3]<-"Weight" write.csv(e, "cooc_pat_4digits.csv") # réseau 3 digits graph_brevets_ipc$IPC <- substr(graph_brevets_ipc$IPC, 0, 3) graph_brevets_ipc$Appln_id <- as.factor(graph_brevets_ipc$Appln_id) library(igraph) bip <- graph.data.frame(graph_brevets_ipc) V(bip)$type <- V(bip)$name %in% graph_brevets_ipc[,2] ## sparse=TRUE is a good idea if you have a large matrix here v <- get.adjacency(bipartite.projection(bip)[[2]], attr="weight", sparse=FALSE) ## Need to reorder if you want it alphabetically v[order(rownames(v)), order(colnames(v))] g <- graph_from_adjacency_matrix(v, mode="undirected", weighted = TRUE, diag = TRUE) e <- cbind( get.edgelist(g) , round( E(g)$weight, 3 )) e <- as.data.frame(e) colnames(e)[1]<-"Source" colnames(e)[2]<-"Target" colnames(e)[3]<-"Weight" library(EconGeo) relatedness.cosine <- relatedness(v, method="Jaccard") summary(relatedness.cosine) relatedness.density <- (relatedness.density.int.avg(v, relatedness.cosine)) summary(relatedness.density) transitivity(g) average.path.length(g) graph.density(g) summary(degree(g)) summary(graph.strength(g)) diameter(g) clusters(g) dist <- distance_table(g) barplot(dist$res, names.arg = seq_along(dist$res)) edge.betweenness.community(g) multilevel.community(g) leading.eigenvector.community(g) optimal.community(g) write.csv(e, "cooc_pat_3digits.csv") # to clear now unused memory gc() # to remove all files in memory rm(list = ls(all = TRUE))
114308586d067a0f659095577bfd3abeb3527ca2
9b0051af4477ce26be267877fbc31f32f01a3a21
/cachematrix.R
e8ddb45e5ecbe62b03b20679980703248c230695
[]
no_license
jsl416/ProgrammingAssignment2
8c3cdc1a096181a6994481d57d0b30e6e13ee787
1835d0b401f6b019fb2d141502102731a38137bb
refs/heads/master
2021-01-18T09:42:14.956767
2014-06-21T19:30:35
2014-06-21T19:30:35
null
0
0
null
null
null
null
UTF-8
R
false
false
2,167
r
cachematrix.R
## --------------------------------------------------------------------------------------------------------------------- ## 2014-06-22 04:27 ## Written By: JS Lee ## Purpose of this R code: This R code is to calculate an inverse of a sqaure matrix. ## When the inverse is calculated alreadly, the cached inverse will be returned which saves caculation time. ##---------------------------------------------------------------------------------------------------------------------- ## Firstly, "makecacheMatrix" function will create a matrix, which will be saved into a matrix variable. ## Then, it will calculate the inverse of the matrix using "Sovle" function. The result will be set(cached) it as m. ## Finally, The list of four calculation results will be returned, which are set, get, setsolve, getsolve. ## (Input matrix should only be a square matrix.) makeCacheMatrix <- function(x = matrix()) { m <- NULL # initialize with NULL set <- function(y) { x <<- y m <<- NULL } get <- function() x # return x, which is the matrix setsolve <- function(solve) m <<- solve # solve function is applied to get the inverse getsolve <- function() m # return calculated m list(set = set, get = get, setsolve = setsolve, getsolve = getsolve) } ## "cacheSolve" function checks getsolve to see if there is any value. ## If there is, it will return m value, which is the inverse of the matrix. ## Otherwise, it will calculate the inverse of the matrix. cacheSolve <- function(x, ...) { m <- x$getsolve() ## Return a matrix that is the inverse of 'x' if(!is.null(m)) { message("getting cached data") return(m) } data <- x$get() #This will retrieve the matirx. m <- solve(data, ...) #It solves to get the inverse of the matrix. x$setsolve(m) #Set the inverse matrix as m m #Return m value } ## End
b91dfc1c8a0029a51e2de9f447323ef8538d9c58
a893f063e4fb685c6d959882b74f35cfc17686ee
/exercises/models01.R
2c51eaf3b5a71721884ebccacbb557e9a97580e7
[]
no_license
jmarshallnz/intro_to_r
e8ebe29fe4df3d32f9848e79eb75ead3a46bce4c
35065a9255915f5f9eec4248972560fcbe7ff991
refs/heads/main
2023-06-16T10:09:57.485226
2021-06-21T04:13:12
2021-06-27T23:29:42
332,910,494
0
0
null
null
null
null
UTF-8
R
false
false
266
r
models01.R
# Modelling Donkey bodyweights in R # Load our libraries library(tidyverse) # read in the data donkeys <- read_csv("data/donkeys/donkeys.csv") # fit a model model1 <- lm(Bodywt ~ Heartgirth + Umbgirth + Length + Height, data=donkeys) # assess it summary(model1)
6c6e40e3a9b2109c48e526588e1dddbefd35e381
e25cd2cce4a5b069406fb87520b7ef9f14a20438
/phc6711/moves/read.R
e893c060402619baa3dc6994a430f70dac2375da
[]
no_license
joebrew/uf
415ecd5bb8bad16ba3d93237d7061cbaf6c8dd9c
7ced5fe71813469c3b2ab4f5ae862664c87e36dd
refs/heads/master
2016-09-01T23:14:28.734112
2015-08-27T14:43:14
2015-08-27T14:43:14
21,254,912
0
1
null
null
null
null
UTF-8
R
false
false
2,110
r
read.R
zipfile <- "exportfeb20.zip" location <- "/home/joebrew/Documents/uf/phc6711/moves" # SET WD setwd(location) # UNZIP MASTER FILE unzip(zipfile = zipfile) # UNZIP CSV unzip(zipfile = 'csv.zip') # CD INTO THE CSV DAILY FOLDER setwd('csv/daily') # READ JUST SUMMARY setwd('summary') for (i in dir()){ assign(gsub(".csv", "", i), read.csv(i)) } # LOOP THROUGH EACH DATAFRAME AND BIND # Get list of current dataframes dfs <- names(which(sapply(.GlobalEnv, is.data.frame))) # Make master dataframe to populate master <- data.frame(Date = NA, Activity = NA, Group = NA, Duration = NA, Distance = NA, Steps = NA, Calories = NA) for (i in 1:length(dfs)){ df <- get(dfs[i]) master <- rbind(master, df) } # FORMAT MASTER # make date object master$Date <- as.Date(master$Date, format = "%m/%d/%y") # order master <- master[order(master$Date),] ######################################## # END OF DATA READ IN ######################################### # BEGINNING OF DATA VIS library(dplyr) # Get overall calories by day temp <- master %>% group_by(Date) %>% summarise(Distance = sum(Distance, na.rm = TRUE)) barplot(temp$Distance, names.arg = temp$Date, las = 3, cex.names = 0.6) # Plot duration just by type temp <- master %>% filter(Activity == "walking") %>% group_by(Date, Activity) %>% summarise(walk = sum(Duration, na.rm = TRUE)) temp$Activity <- NULL temp2 <- master %>% filter(Activity == "cycling") %>% group_by(Date, Activity) %>% summarise(bike = sum(Duration, na.rm = TRUE)) temp2$Activity <- NULL temp3 <- master %>% filter(Activity == "running") %>% group_by(Date, Activity) %>% summarise(run = sum(Duration, na.rm = TRUE)) temp3$Activity <- NULL tt <- left_join(left_join(temp, temp2), temp3) mat <- as.matrix(tt[,2:4]) mat <- t(mat) / 60 barplot(mat, beside = TRUE, legend = TRUE, col = c("red", "blue", "green"), names.arg = tt$Date, las = 3) title(main = "Minutes per day") box("plot")
f5f8b8fd799a6790c5b66d1785fc01515e12af54
7a95abd73d1ab9826e7f2bd7762f31c98bd0274f
/metacoder/inst/testfiles/centroid/AFL_centroid/centroid_valgrind_files/1615765243-test.R
59e35f965db8b7a98233c186265bfbaa626bb3f1
[ "MIT" ]
permissive
akhikolla/updatedatatype-list3
536d4e126d14ffb84bb655b8551ed5bc9b16d2c5
d1505cabc5bea8badb599bf1ed44efad5306636c
refs/heads/master
2023-03-25T09:44:15.112369
2021-03-20T15:57:10
2021-03-20T15:57:10
349,770,001
0
0
null
null
null
null
UTF-8
R
false
false
427
r
1615765243-test.R
testlist <- list(b = c(1.78388675173214e+127, -40358544454836.7, 5.34324542977894e-305, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) result <- do.call(metacoder:::centroid,testlist) str(result)
e6251ad03873ad53e1b34c00b07985b79abd4d70
02b25b1ed401fc57f4945a970d2b57f2622e7bf7
/dose_response_curve/dose_response_curve.R
128c0c5e247f9e61d020edc76cc691906fd8c939
[]
no_license
xuyanli2016spring/R_scripts_islj_project
bb361dc865e63def1061bb5dd3e1d7376b5e592c
20d9742977e0f5f31ae30af67fba8428f1c93668
refs/heads/master
2021-01-18T18:27:58.849883
2017-04-04T22:14:23
2017-04-04T22:14:23
86,855,939
0
1
null
2017-04-04T22:14:24
2017-03-31T20:01:57
R
UTF-8
R
false
false
2,472
r
dose_response_curve.R
# Name: Yanli Xu # Date: Apr 4th, 2017 # Note: This script creates a dose response curve of mouse and culture experiments. # Note: ggplot2 library is needed culture_doses <- c(0.038, 0.045, 0.050, 0.170, 0.200, 0.220, 0.270, 0.300, 0.340, 0.350, 0.360, 0.370, 0.380, 0.390, 0.400, 0.440, 0.450, 0.460, 0.500, 0.600, 0.700, 0.800, 0.900, 1.000) culture_response <- c(0.005119048, 0.006023810, 0.008464286, 0.051809524, 0.065309524, 0.071726190, 0.106851190, 0.140535714, 0.162523810, 0.177630952, 0.181666667, 0.182047619, 0.191488095, 0.207047619, 0.214642857, 0.240476190, 0.252059524, 0.268297619, 0.287845238, 0.372708333, 0.384726190, 0.408797619, 0.405619048, 0.435285714) data2 <- data.frame(culture_doses, culture_response) # for adding minor trick mark insert_minor <- function(major_labs, n_minor) {labs <- c( sapply( major_labs, function(x) c(x, rep("", 3) ) ) ) labs[1:(length(labs)-n_minor)]} ggplot() + geom_line(data = data1, aes(x = data1$mouse_doses, y = data1$mouse_response, color = "red"), linetype = 6) + geom_line(data = data2, aes(x = data2$culture_doses, y = data2$culture_response, color = "blue"), linetype = 6) + xlab("doses") + ylab("nectrig percentage") + ggtitle("mouse and culture dose response curve") + theme(axis.text.x = element_text(face = "bold", color = "red", size = 12, angle = 45), axis.text.y = element_text(face = "bold", color = "red", size = 12, angle = 45) ) + theme(axis.line = element_line(color = "black", size = 1, linetype = "solid")) + theme(legend.position="top", panel.background = element_blank(), panel.grid.minor = element_blank()) + theme(plot.title = element_text(family = "Trebuchet MS", color="black", face="bold", size=18, hjust=0)) + theme(axis.title = element_text(family = "Trebuchet MS", color="black", face="bold", size=14)) + scale_x_continuous(breaks = seq(0,1,by=0.025), labels = insert_minor(seq(0, 1, 0.1), 3), limits = c(0, 1), expand = c(0.01,0.01)) + scale_y_continuous(breaks = seq(0,0.5,by=0.025), labels = insert_minor(seq(0, 0.5, 0.1), 3), limits = c(0, 0.5), expand = c(0.01,0.01)) + theme(legend.title = element_text(colour="blue", size=10, face="bold"), legend.text = element_text(colour="blue", size=10, face="bold"),legend.background = element_rect(fill="lightblue", size=0.5, linetype="solid"), legend.title = element_blank()) + scale_color_manual(labels = c("mouse", "culture"), values = c("red", "blue")) + guides(color=guide_legend("Exp_Type"))
65b93e133c1879820d0ca1a49e1c60fb4fb890dc
ae29d98e457098c8241dc4ca533911927b3fe47f
/man/zoom.Rd
c29aee730b4ec7ee52139dcadd561958e66f7b38
[]
no_license
great-northern-diver/loon.ggplot
1a10c6303f05aca8d9e18431233143a1295f069c
d06a9ad6d53e9a7372745ec4c25ea24e6efc451e
refs/heads/master
2023-04-14T09:07:03.520916
2023-03-15T13:05:44
2023-03-15T13:05:44
172,743,501
23
3
null
2023-03-15T13:05:46
2019-02-26T15:59:58
R
UTF-8
R
false
true
2,518
rd
zoom.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/interactive-ggproto.R \name{zoom} \alias{zoom} \title{Zoom Plot Region} \usage{ zoom(layerId = NULL, scaleToFun = NULL) } \arguments{ \item{layerId}{numerical; which layer to scale the plot by.} \item{scaleToFun}{scale to function. See details.} } \value{ a \code{ggproto} object } \description{ Change the visible plot region by scaling to different elements of the display. } \details{ Argument \code{layerId} is used for additional plot region settings. If the \code{layerId} is set as \code{NULL} (default), the region of the interactive graphics \code{loon} will be determined by the \code{ggplot} object (i.e. \code{coord_cartesian}, \code{xlim}, etc); else one can use \code{scaleToFun} to modify the region of the layer. The \code{scaleToFun} is a function to scale the region. If it is \code{NULL} (default), based on different layers, different scale functions will be applied. For example, if the layer is the main graphic model, i.e. \code{l_plot} \code{l_hist}, then the default \code{scaleToFun} is \code{\link{l_scaleto_plot}}; else if the layer is a general \code{l_layer} widget, the default \code{scaleToFun} would be \code{\link{l_scaleto_layer}} (see \code{\link{get_activeGeomLayers}}). If it is not \code{NULL}, users can select one that precisely tailor their own problems. The table shows the available \code{scaleToFun} functions \tabular{ll}{\strong{scale to} \tab \strong{Subfunction}\cr plot \tab \code{\link{l_scaleto_plot}}\cr world \tab \code{\link{l_scaleto_world}}\cr active \tab \code{\link{l_scaleto_active}}\cr selected \tab \code{\link{l_scaleto_selected}}\cr layer \tab \code{\link{l_scaleto_layer}}\cr} Users can also supply their own function, providing its arguments match those of the functions shown in the above table. } \examples{ if(interactive()) { p <- l_ggplot(mtcars, mapping = aes(x = hp, y = mpg)) + geom_point(mapping = aes(color = factor(gear))) + geom_smooth(data = mtcars[mtcars$gear == 4, ], method = "lm") # a scatter plot with a fitted line on 4 gear cars p # scale to the second layer (smooth line) p + zoom(layerId = 2) # highlight the 3 gear cars # scale to the selected points p + selection(mtcars$gear == 3) + zoom(layerId = 1, scaleToFun = loon::l_scaleto_selected) } } \seealso{ \code{\link{active}}, \code{\link{linking}}, \code{\link{selection}}, \code{\link{hover}}, \code{\link{interactivity}} }
bf60f90223b21f305759bc9f5adc010c15607d09
b37f4d89507ad02ec69a38cece7fb83acf851757
/decisions.R
da712897661744712b81ae54d0ecb14f8f4dacdf
[]
no_license
ginahixx/sd3
4d58e8200c40883566abeb4d44a2d9edfbd73507
a020dd38acb83ecbd221bf369d9a76e1d879e7b3
refs/heads/master
2021-01-25T07:54:56.939514
2017-06-07T22:30:10
2017-06-07T22:30:10
93,684,888
0
0
null
null
null
null
UTF-8
R
false
false
5,161
r
decisions.R
#functions for Design Decisions section settingstablename <- "DDSettings" controlTableName <- "DesignDecisions" versionflag <- "version" appflag <- "app" #functions --------------------------------- GetMeetingIDs <- function() { # Connect to the database db <- dbConnect(MySQL(), dbname = databaseName, host = options()$mysql$host, port = options()$mysql$port, user = options()$mysql$user, password = options()$mysql$password) query <- paste0("SELECT MeetingID FROM ", settingstablename, " Group By MeetingID") #group by in case table structure changes from one line per data <- dbGetQuery(db, query) dbDisconnect(db) return (data) } GetSettingsTable <- function(id) { # Connect to the database db <- dbConnect(MySQL(), dbname = databaseName, host = options()$mysql$host, port = options()$mysql$port, user = options()$mysql$user, password = options()$mysql$password) query <- paste0("SELECT * FROM ", settingstablename, " where MeetingID = ", id) data <- dbGetQuery(db, query) dbDisconnect(db) return (data) } LoadControlData <- function(id){ db <- dbConnect(MySQL(), dbname = databaseName, host = options()$mysql$host, port = options()$mysql$port, user = options()$mysql$user, password = options()$mysql$password) #select * from DesignDecisions inner join MeetingGroups on DDID = DecisionID where MeetingID = 1 query <- paste0("SELECT * FROM ", controlTableName, " inner join MeetingGroups on DDID = DecisionID WHERE MeetingID = ", id) data <- dbGetQuery(db, query) dbDisconnect(db) return (data) } LoadComments <- function(tablename, meetingid){ db <- dbConnect(MySQL(), dbname = databaseName, host = options()$mysql$host, port = options()$mysql$port, user = options()$mysql$user, password = options()$mysql$password) query <- paste0("SELECT * FROM ", tablename, " WHERE MeetingID = ", id) data <- dbGetQuery(db, query) dbDisconnect(db) return (data) } GetCommentData <- function(tablename, selecteditem, meetingid){ # Connect to the database db <- dbConnect(MySQL(), dbname = databaseName, host = options()$mysql$host, port = options()$mysql$port, user = options()$mysql$user, password = options()$mysql$password) #cat(file=stderr(), "GetCommentData data has myemail as ", email, ".\n") downloadfields <- paste0(" comment, PickChoice, concat_ws(', ', firstname, organization) as 'Submitted by' ") if (selecteditem==""){ query <- paste0("SELECT ", downloadfields, " FROM users INNER JOIN (", desisiontablename, " INNER JOIN ", tablename, " ON DDID = DecisionID) ON users.email = ", tablename, ".email WHERE MeetingID = ", meetingid) }else if (selecteditem=="%"){ query <- sprintf(paste0("SELECT %s FROM users INNER JOIN (", desisiontablename, " INNER JOIN ", tablename, " ON DDID = DecisionID) ON users.email = ", tablename, ".email Where Title like '%s' and MeetingID = ", meetingid), downloadfields, selecteditem, meetingid) }else{ query <- sprintf(paste0("SELECT %s FROM users INNER JOIN (", desisiontablename, " INNER JOIN ", tablename, " ON DDID = DecisionID) ON users.email = ", tablename, ".email Where Title = '%s' and MeetingID = ", meetingid), downloadfields, selecteditem, meetingid) } if(debugmode){ cat(file=stderr(), "in GetCommentData, query is ", query, ".\n") } data <- dbGetQuery(db, query) downloadtable <<- data dbDisconnect(db) data } #AssembleCommentData(commentsTableName(), MeetingID(), topic, userInfo$user$email, input$comment) AssembleCommentData <- function(tablename, meetingid, decisiontitle, decisionid, email, comment, pickchoice){ #lookup up commentID commentCheck <- LoadUserComment(tablename, meetingid, decisiontitle, email) if(debugmode){ cat(file=stderr(), "in AssembleCommentData nrow(commentCheck) is ", nrow(commentCheck), ", length is ",length(commentCheck), ".\n") } #build data if(nrow(commentCheck)> 0){ if(debugmode){ cat(file=stderr(), "in AssembleCommentData nrow >0 section, nrow(commentCheck) is ", nrow(commentCheck), ".\n") } data <- as.list(commentCheck$DCID) }else { data <- as.list("") } names(data) <- "DCID" data$DecisionID <- decisionid data$MeetingID <- meetingid data$email <-email data$Comment <- comment pickoptions <- pickchoice pickstr <- paste(pickoptions, collapse = ", ") data$PickChoice <- pickstr if(debugmode){ cat(file=stderr(), "leaving AssembleCommentData.\n") } return(data) } # Text retrieval functions ---------------------------------------- #pull together parts for modal message getmodalHTML <- function(data){ c(data$ModalIntro, data$ContactInfo, "<br><br>") } GetInstructionsHTML <- function(data){ c(data$InstructionsText, data$ContactInfo, "<br><br>") } GetIntroHTML <- function(data){ c(data$IntroText, "<br>") } GetExcelLinkHTML <- function(data){ c(data$ExcelFileLink, "<br>") }
d4a0d9a623ea38b588c5b074004d71bd477388bb
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
/cran/paws.security.identity/man/wafregional_get_change_token.Rd
69c31884357cb22187c64c1d938ec4d11ab98254
[ "Apache-2.0" ]
permissive
paws-r/paws
196d42a2b9aca0e551a51ea5e6f34daca739591b
a689da2aee079391e100060524f6b973130f4e40
refs/heads/main
2023-08-18T00:33:48.538539
2023-08-09T09:31:24
2023-08-09T09:31:24
154,419,943
293
45
NOASSERTION
2023-09-14T15:31:32
2018-10-24T01:28:47
R
UTF-8
R
false
true
596
rd
wafregional_get_change_token.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/wafregional_operations.R \name{wafregional_get_change_token} \alias{wafregional_get_change_token} \title{This is AWS WAF Classic documentation} \usage{ wafregional_get_change_token() } \description{ This is \strong{AWS WAF Classic} documentation. For more information, see \href{https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html}{AWS WAF Classic} in the developer guide. See \url{https://www.paws-r-sdk.com/docs/wafregional_get_change_token/} for full documentation. } \keyword{internal}
99d22a17a414c3c8c311235d088929ccdbf5ea4d
b8f853e88d5c9f843e3ef0edc940ec1ef206e8ef
/server.R
e2b615f4741ac7904c32af54f01e1da3e2964ae0
[]
no_license
DBAJAJ89/Text_Analytics
28af6bbd0717ae9b58c921de2d4081973b70d097
bebd94013364741e65844bd5253fe43b2c12fab8
refs/heads/master
2020-03-16T06:03:10.314544
2018-06-02T11:17:27
2018-06-02T11:17:27
132,546,258
0
0
null
null
null
null
UTF-8
R
false
false
4,504
r
server.R
#---------------------------------------------------------------------# #Title: Text Analytics Assignment 2 #Authors: Deepak Bajaj, Richa Kansal, Soumya Panigrahi #Shiny App: Server.r code #---------------------------------------------------------------------# #Start of server code shinyServer(function(input, output) { #Module to input data from user # Input from user is in rective mode since the data should change with every change in input data Dataset <- reactive({ if (is.null(input$file1)) { return(NULL) } #here we have used file1 since the UI.r has the same name else{ text<- readLines(input$file1$datapath) #readline function to read the input from user text = str_replace_all(text, "<.*?>","") # stringr library has the function str_replace_all to clean the corpus text=text[text!=""] #selecting not null text return(text) # return cleaned corpus } }) # uploading the input module model = reactive({ if(input$radio==1) {model = udpipe_load_model("english-ud-2.0-170801.udpipe")} if(input$radio==2) {model = udpipe_load_model("spanish-ud-2.0-170801.udpipe")} if(input$radio==3) {model = udpipe_load_model("hindi-ud-2.0-170801.udpipe")} return(model) }) #passing the input data and english model to the UDpipe annotate function annot.obj =reactive({ x<-udpipe_annotate(model(),x=Dataset()) x<-as.data.frame(x) return(x) }) #--------------tab panel 2 outputs--------------------# # module to let the user download the annotated data as a csv input output$downloadData <- downloadHandler( filename=function(){ "annonated_data.csv" #name of the downloaded file }, content = function(file) { write.csv(annot.obj()[,-4],file,row.names=FALSE) #writing the output as a csv after removing the sentence column }) # module to display the top 100 rows of the annotated corpus. we have intentionally hidden the sentence field output$dout1 = renderDataTable({ if(is.null(input$file1)) {return (NULL)} #exception handler in case the file is empty else{ out=annot.obj ()[,-4] return(head(out,100)) } }) #--------------tab panel 3 outputs--------------------# #module to print a wordcloud for nouns output$plot1 = renderPlot({ if(is.null(input$file1)) {return (NULL)} #exception handler in case the file is empty else { all_nouns=annot.obj() %>% subset(.,upos %in% "NOUN") #filtering the corpus for nouns top_nouns =txt_freq(all_nouns$lemma) # count of each noun terms wordcloud(top_nouns$key,top_nouns$freq,min.freq = input$freq, max.words=input$max,colors =brewer.pal(8,"Dark2")) # plotting on a word cloud } }) #module to print a wordcloud for verbs output$plot2 = renderPlot({ if(is.null(input$file1)) {return (NULL)} #exception handler in case the file is empty else { all_verbs=annot.obj() %>% subset(.,upos %in% "VERB") #filtering the corpus for verbs top_verbs =txt_freq(all_verbs$lemma) # count of each verbs terms #head(top_verbs,10) wordcloud(top_verbs$key,top_verbs$freq,min.freq = input$freq, max.words=input$max,colors =brewer.pal(8,"Dark2")) # plotting on a word cloud } }) #--------------tab panel 4 output--------------------# output$plot3 = renderPlot({ if(is.null(input$file1)) {return (NULL)} #exception handler in case the file is empty else { data_cooc<-cooccurrence( x=subset(annot.obj(),upos %in% input$upos), #collecting required upos from user input and filtering the annonated corpus term="lemma", #paramerter to specify the extraction terms as lemma group=c("doc_id","paragraph_id","sentence_id")) # creation of co-occurrence graph wordnetwork<- head(data_cooc,50) wordnetwork<-igraph::graph_from_data_frame(wordnetwork) #plotting the graph ggraph(wordnetwork,layout="fr") + geom_edge_link(aes(width=cooc,edge_alpha=cooc),edge_colour="orange")+ geom_node_text(aes(label=name),col="darkgreen", size=4)+ theme_graph(base_family="Arial Narrow")+ theme(legend.position="none")+ labs(title= "Cooccurrences Plot",subtitle="Nouns & Adjectives") } }) })
00f3e22c097f8b8fe083f27e94874df5eb71ec3e
ef4b390f70ba2fbdb78c4e68ef7a8dee0eb4c917
/analysis.R
d69d53fa984c45a8ee01592eeab0eb8c698aaa9b
[]
no_license
sahNarek/CSE_270_FINAL_PROJECT
a4c30fc03e9f0107dd277638afc6e5fca81823de
b650f3a06a7ebfde0d6ae34d7a4236131d0feb2c
refs/heads/master
2021-07-12T15:28:58.884826
2020-11-21T23:58:31
2020-11-21T23:58:31
220,856,820
0
0
null
null
null
null
UTF-8
R
false
false
10,845
r
analysis.R
load("data/no_et_games.rda") load("data/et_agr_94_18.rda") load("data/legs_info.rda") load("data/et_minutes.rda") load("data/po_elos.rda") packages_list <- c("ggplot2","dplyr", "scales", "DescTools", "plotly", "stringr", "elo", "caret", "e1071") install_or_call <- function(list = packages_list){ installed <- installed.packages()[,"Package"] for( package in packages_list ){ if(!(package %in% installed)){ install.packages(package) } do.call(library, list(package)) } } install_or_call() by_id <- function(data, var, id){ return(data[data[var] == id,]) } all_games <- rbind(games, et_agr) legs_summary <- legs_info %>% group_by(TYPE) %>% summarise(COUNT = n()) pie <- plot_ly(legs_summary, labels = ~TYPE, values = ~COUNT, type = 'pie', textposition = 'inside', textinfo = 'label+percent', insidetextfont = list(color = '#FFFFFF'), hoverinfo = 'text', text = ~paste(TYPE), marker = list(line = list(color = '#FFFFFF', width = 1)), showlegend = F) %>% layout(title = "DETERMINING LEG'S WINNER IN UCL PO ROUNDS", xaxis = list(showgrid = F, zeroline = F, showticklabels = F), yaxis = list(showgrid = F, zeroline = F, showticklabels = F)) pie et_minutes <- et_minutes %>% mutate(ROUND = by_id(all_games, "GAME_ID", GAME_ID)$ROUND) box <- plot_ly(et_minutes, x = ~MINUTE, color = ~ROUND, type = "box") %>% layout(title = "GOAL SCORING MINUTES IN ALL ROUNDS OF PO", yaxis = list(showgrid = F, zeroline = F, showticklabels = F), xaxis = list(showgrid = T, zeroline = F, showticklabels = T)) box all_games <- all_games %>% mutate(FTR = ifelse(FTHG > FTAG, "H", ifelse(FTHG < FTAG, "A", "D")), HTG = (FTHG + ETHG + PTHG), ATG = (FTAG + ETAG + PTAG), GR = ifelse(HTG > ATG, "H", ifelse(HTG < ATG, "A", "D"))) games_by_team <- function(data = all_games, team, field) { var <- "HOMETEAM" if(field == "A"){ var <- "AWAYTEAM" } games <- data[data[var] == team,] return(length(games)) } games_in_et <- function(data, team){ h_games <- data %>% filter(HOMETEAM == team && TYPE == "ET" && LEG == 2) a_games <- data %>% filter(AWAYTEAM == team && TYPE == "ET" && LEG == 2) return(nrow(h_games) + nrow(a_games)) } standings <- function(data) { as_ht <- data %>% group_by(HOMETEAM) %>% summarise(W = sum(FTR == "H"), L = sum(FTR == "A"), D = sum(FTR == "D"), HTGF = sum(HTHG), HTGA = sum(HTAG), FTGF = sum(FTHG), FTGA = sum(FTAG), ETGF = sum(ETHG), ETGA = sum(ETAG)) as_at <- data %>% group_by(AWAYTEAM) %>% summarise(W = sum(FTR == "A"), L = sum(FTR == "H"), D = sum(FTR == "D"), HTGF = sum(HTAG), HTGA = sum(HTHG), FTGF = sum(FTAG), FTGA = sum(FTHG), ETGF = sum(ETAG), ETGA = sum(ETHG)) table <- data.frame(TEAM = as_ht$HOMETEAM, W = as_ht$W + as_at$W, D = as_ht$D + as_at$D, L = as_ht$L + as_at$L, FTGF = as_ht$FTGF + as_at$FTGF, FTGA = as_ht$FTGA + as_at$FTGA, ETGF = as_ht$ETGF + as_at$ETGF, ETGA = as_ht$ETGA + as_at$ETGA, HTGF = as_ht$HTGF + as_at$HTGF, HTGA = as_ht$HTGA + as_at$HTGA) table$M <- table$W + table$D + table$L table$POINTS <- 3 * table$W + table$D table$STGF <- table$FTGF - table$HTGF table$STGA <- table$FTGA - table$HTGA table$EGM <- sapply(table$TEAM, games_in_et, data = et_agr) return(table %>% select(TEAM, M, POINTS ,everything()) %>% arrange(desc(POINTS))) } cl_po_k <- function(table){ table <- table %>% filter(!(W == 0)) table$FTGD <- table$FTGF - table$FTGA table$ETGD <- table$ETGF - table$ETGA table$WPCT <- (table$W + round(table$D / 3)) / (table$M) table$RATIO <- table$FTGF / table$FTGA k_model <- lm(log(WPCT) ~ 0 + log(RATIO), data = table) k <- coefficients(k_model) table$EWPCT <- table$FTGF ^ k / ((table$FTGF)^k + (table$FTGA)^k) return(table) } table <- standings(data = all_games) box_1 <- plot_ly(table, x = ~M, type = "box") %>% layout(title = "NUMBER OF PO MATCHES BY TEAMS", yaxis = list(showgrid = F, zeroline = F, showticklabels = F), xaxis = list(showgrid = T, zeroline = F, showticklabels = T)) box_1 wpct_table <- cl_po_k(table) cor.test(wpct_table$WPCT, wpct_table$FTGD) # model <- lm(log(WPCT) ~ 0 + log(RATIO), data = table) # coefficients(model) # save(table, file = "data/po_table.rda") # write.csv(table, file = "data/po_table.csv") p <- ggplot(wpct_table, aes(x = EWPCT, y = WPCT, text = TEAM)) + geom_point() + geom_abline(intercept = 0, slope = 1, col = "red") p <- ggplotly(p)%>% layout(title = "EXPECTED WIN PERCENTAGE VS ACTUAL", yaxis = list(showgrid = F, zeroline = F, showticklabels = F), xaxis = list(showgrid = T, zeroline = F, showticklabels = T)) lucky_teams <- wpct_table %>% filter(WPCT > EWPCT) unlucky_teams <- wpct_table %>% filter(WPCT < EWPCT) expected_teams <- wpct_table %>% filter(WPCT == EWPCT) wpct_table_1 <- wpct_table %>% filter(M >= 4) p1 <- ggplot(wpct_table_1, aes(x = EWPCT, y = WPCT, text = TEAM)) + geom_point() + geom_abline(intercept = 0, slope = 1, col = "red") p1 <- ggplotly(p1)%>% layout(title = "EXPECTED WIN PERCENTAGE VS ACTUAL", yaxis = list(showgrid = F, zeroline = F, showticklabels = F), xaxis = list(showgrid = T, zeroline = F, showticklabels = T)) p1 hist <- ggplot(data=et_minutes, aes(x = MINUTE)) + geom_histogram(breaks=seq(0, 120, by=10), col="red", fill="green", alpha = .2) + labs(title="Histogram for Goal Minutes", x="Minutes", y="Number of Goals") + xlim(c(0,120)) hist_ly <- ggplotly(hist) hist_ly lucky_teams_1 <- wpct_table_1 %>% filter(WPCT > EWPCT) unlucky_teams_1 <- wpct_table_1 %>% filter(WPCT < EWPCT) expected_teams_1 <- wpct_table_1 %>% filter(WPCT == EWPCT) lucky_teams %>% anti_join(lucky_teams_1) all_time_elos <- elo.run(score(FTHG, FTAG) ~ adjust(HOMETEAM, 200) + AWAYTEAM + k(30 * (all_games$FTHG - all_games$FTAG)), data = all_games) sort(final.elos(all_time_elos), decreasing = T) make_final_elos_df <- function(final_elos){ final_elos <- as.data.frame(final_elos) final_elos$TEAM <- rownames(final_elos) colnames(final_elos) <- c("ELO", "TEAM") rownames(final_elos) <- 1:length(final_elos$TEAM) final_elos <- final_elos[c("TEAM", "ELO")] return(final_elos) } get_elos <- function(final_elos, def_elo = 1500){ starting_elos <- c() if(length(final_elos) == 0){ starting_elos = def_elo } else{ starting_elos <- (def_elo) + (final_elos - def_elo)/2 } return(starting_elos) } calculate_relative_elos <- function(data){ relative_df <- c() final_elos <- c() for(season in unique(data$SEASON)){ starting_elos <- get_elos(final_elos, def_elo = 1500) season_data <- data[data$SEASON == season,] season_elos <- elo.run(score(FTHG, FTAG) ~ adjust(HOMETEAM, 200) + AWAYTEAM + k(30 * (all_games$FTHG - all_games$FTAG)), data = all_games, initial.elos = starting_elos) season_final_elos <- final.elos(season_elos) season_elos <- data.frame(SEASON = season,season_elos) relative_df <- rbind(relative_df, season_elos) final_elos <- season_final_elos } final_elos <- make_final_elos_df(final_elos) return(list(relative = relative_df, elos_df = final_elos)) } games_no_rares <- all_games %>% filter(HOMETEAM %in% unique(po_elos$team),AWAYTEAM %in% unique(po_elos$team)) custom_elos <- calculate_relative_elos(data = games_no_rares) custom_elos$elos_df <- custom_elos$elos_df %>% filter(TEAM %in% wpct_table_1$TEAM) %>% arrange(desc(ELO)) custom_elos$elos_df gold_minutes <- et_minutes %>% filter(MINUTE > 90) %>% group_by(LEG_ID) %>% summarise(MINUTE = min(MINUTE)) et_minutes %>% inner_join(gold_minutes, by = c("MINUTE", "LEG_ID")) gold_goal <- function(data) { gold_minutes <- data %>% filter(MINUTE > 90) %>% group_by(LEG_ID) %>% summarise(MINUTE = min(MINUTE)) winners <- data %>% inner_join(gold_minutes, by = c("MINUTE", "LEG_ID")) return(winners) } get_elos_by_date <- function(source, game_date, game_team) { elos <- source %>% filter(team == game_team , date <= game_date ) %>% arrange(desc(date)) return(elos[1,]$rating) } neutral_games <- function(data) { agr_legs <- data %>% filter(TYPE == "AGR") elo_teams <- unique(po_elos$team) result <- c() for(i in 1:nrow(agr_legs)){ leg <- agr_legs[i,] team1 <- as.character(leg$TEAM1) team2 <- as.character(leg$TEAM2) l2date <- leg$L2D if(team1 %in% elo_teams && team2 %in% elo_teams){ elo1 <- get_elos_by_date(source = po_elos, game_date = l2date, game_team = team1) elo2 <- get_elos_by_date(source = po_elos, game_date = l2date, game_team = team2) wins.1 = elo.prob(elo.A = elo1, elo.B = elo2) wins.2 = 1 - wins.1 game <- data.frame(TEAM1 = team1, TEAM2 = team2, WINS.1 = wins.1, WINS.2 = wins.2, LEG_ID = leg$LEG_ID, ACTUAL_WINNER = leg$WINNER) result <- rbind(result, game) } } result$TEAM1 <- as.character(result$TEAM1) result$TEAM2 <- as.character(result$TEAM2) result <- result %>% mutate(ACTUAL.1 = ifelse(ACTUAL_WINNER == TEAM1, 1, 0), ACTUAL.2 = ifelse(ACTUAL_WINNER == TEAM2, 1, 0), NG_WINNER = ifelse(WINS.1 > WINS.2, TEAM1, TEAM2)) return(result) } rand_0_1 <- function() { return(round(runif(n = 1, min = 0, max = 1))) } simulated_games <- neutral_games(data = legs_info) simulated_games <- simulated_games %>% mutate(PREDICTED.1 = ifelse(WINS.1 > 0.5,1, ifelse(WINS.1 < 0.5, 0, rand_0_1())), PREDICTED.2 = ifelse(WINS.2 > 0.5,1, ifelse(WINS.2 < 0.5, 0, rand_0_1()))) agr_brier_1 <- BrierScore(pred = simulated_games$WINS.1, resp = simulated_games$ACTUAL.1) agr_brier_1 agr_conf_1 <- confusionMatrix(data = factor(simulated_games$ACTUAL.1), reference = factor(simulated_games$PREDICTED.1)) agr_conf_1 golden_goal_winners <- gold_goal(data = et_minutes) actual_winners <- legs_info %>% filter(LEG_ID %in% golden_goal_winners$LEG_ID) et_vs <- games_no_rares %>% group_by(SEASON) %>% summarise(HTG = sum(HTHG) + sum(HTAG), STG = (sum(FTHG) + sum(FTAG)) - HTG, ETG = sum(ETHG) + sum(ETAG)) %>% arrange(desc(HTG + STG + ETG))
a6524e79a68e47674e3ac94dcbd353dd84deebb9
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/kappalab/examples/card.set.func.gen.Rd.R
84c6f12a6f4300ad35ac43f0c00124becc2a017d
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
367
r
card.set.func.gen.Rd.R
library(kappalab) ### Name: card.set.func ### Title: Create objects representing cardinal set functions. ### Aliases: card.set.func card.game card.capacity lower.capacity ### upper.capacity uniform.capacity ### Keywords: math ### ** Examples card.set.func(4:-2) card.game(c(0,-2:2)) card.capacity(0:5) lower.capacity(3) upper.capacity(4) uniform.capacity(5)
136b6f05607feda62ea02ae6644c2ddea69586ba
f026d81ea0042c1b88f5f58f0ceff979d6020664
/day6/solution.R
fb926dddea561c7d86fb38aa828bcec74b238f0b
[]
no_license
apostaremczak/advent-of-code-2017
c4832385f122dabbbfeec11c7a9076d7bd449d81
36e0f7cfc3379e86f3b08700e269f83010c8a530
refs/heads/master
2021-08-31T20:59:32.578569
2017-12-22T18:36:44
2017-12-22T18:36:44
null
0
0
null
null
null
null
UTF-8
R
false
false
1,173
r
solution.R
# --- Day 6: Memory Reallocation --- puzzle_input <- read.csv("input.csv", sep = "\t", header = FALSE) len <- ncol(puzzle_input) row_to_string <- function(row) { return(paste(as.character(row), collapse = ",")) } next_ind <- function(i) { if (i + 1 <= len) return(i + 1) else return(1) } redistribute <- function(row) { bank <- max(row[1:len]) bank_ind <- which(row == bank)[1] row[,bank_ind] <- 0 current_ind <- next_ind(bank_ind) while(bank) { row[,current_ind] <- row[,current_ind] + 1 bank <- bank - 1 current_ind <- next_ind(current_ind) } return(row) } puzzle_input[1, len + 1] <- row_to_string(puzzle_input[1,]) redistribution_count <- 0 while(!any(duplicated(puzzle_input[, len + 1]))) { last_row <- tail(puzzle_input, n=1) new_row <- redistribute(last_row[1:len]) redistribution_count <- redistribution_count + 1 puzzle_input[nrow(puzzle_input) + 1,] <- c(new_row, row_to_string(new_row)) } # --- Part Two --- last_conf <- puzzle_input[nrow(puzzle_input), len + 1] duplicates <- which(puzzle_input[,len + 1] == last_conf) how_many_loops <- duplicates[2] - duplicates[1]
e5648961297634c5108630b4dff934ccd63fdbad
2192489e86e8993ac9de7f5c32583c2546e31129
/plot1.R
0abec5fd9c1a063b9f301fbac837c1e981a5d625
[]
no_license
KimMonks/ExData_Plotting1
e2d4063c5b2d2bd1863498d69993f23d216ad223
dbecd4b04b01ee9cb021f74c1547f0b37204eedb
refs/heads/master
2021-01-21T16:31:20.245254
2015-07-07T15:09:16
2015-07-07T15:09:16
38,661,173
0
0
null
2015-07-07T03:16:57
2015-07-07T03:16:56
null
UTF-8
R
false
false
468
r
plot1.R
# Load and extract data path <- "./household_power_consumption.txt" #data <- read.table(path, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".") data <- read.table(path, header=TRUE, sep=";") data <- subset(data, Date %in% c("1/2/2007","2/2/2007")) # Trasnform and plot to file power <- as.numeric(data$Global_active_power) png("plot1.png", width=480, height=480) hist(power, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)") dev.off()
03218a86bda8a7e49a1775936c349c1795247640
81e80f2198a64b8ab94ee962489bbc114534ed09
/plot4.R
c33e80a3630c621ad7974faf9a9731397501b474
[]
no_license
rajus74/ExData_Plotting1
c964738a798b794eaced8c246b513b5bc1b8ecae
163431684d158435f75526efb21c673049bea7fd
refs/heads/master
2021-01-18T18:57:12.832834
2016-02-05T09:55:02
2016-02-05T09:55:02
51,138,252
0
0
null
2016-02-05T09:44:27
2016-02-05T09:44:27
null
UTF-8
R
false
false
793
r
plot4.R
source("loadSubsetData.R") png("plot4.png",width = 480,height=480) par(mfrow=c(2,2)) plot(subsetData$Time, subsetData$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power") plot(subsetData$Time, subsetData$Voltage, type = "l", xlab = "datetime", ylab = "Voltage") plot(subsetData$Time, subsetData$Sub_metering_1, type = "l", col = "black", xlab = "", ylab = "Energy sub metering") lines(subsetData$Time, subsetData$Sub_metering_2, type="l", col="red") lines(subsetData$Time, subsetData$Sub_metering_3, type="l", col="blue") legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lwd=2, col=c("black", "red", "blue")) plot(subsetData$Time, subsetData$Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power") dev.off()
3ff7819cd37ea2d272e87e681eebe6386cde554e
6dcf636b5872580402c291079e8ec1152c5c85d3
/GeneticsHaplotype/R/simulationModels.R
20b3e242feb774cf6260b7e86a7be43201c20631
[]
no_license
sboehringer/GeneticsHaplotype
f9175d66529d0b753dcf7cd3ca9c9dd50f46cabc
ffb217614a70fbb5f46134f5aa71dd1455c9161f
refs/heads/master
2020-05-16T09:52:50.506237
2015-07-31T16:05:28
2015-07-31T16:05:28
24,718,518
0
0
null
null
null
null
UTF-8
R
false
false
2,427
r
simulationModels.R
# # simulationModels.R #Wed Apr 1 18:16:36 CEST 2015 simulationModelComparisonSingle = function( # genetic parameters htfs = rev(vector.std(1:8)), N = 2e2, pedTemplate, # phenotypes simulatePhenotypes, useCors = F, usePedIdcs = F, # models to compare models, #list(mcmcClass, passPhenotype, coerceFounders) # other parameters Nburnin = 1e3L, Nchain = 1e4L, missingness = 0, ... ) { # diplotypes d = simulateFromTemplate(pedTemplate, N = N, hfs = htfs); # phenotypes phenotypePars = list(...); cors = NULL; if (usePedIdcs) phenotypePars = c(phenotypePars, list(pedIdcs = pedsIdcs(d$peds))); if (useCors) { cors = pedsCoeffOfRel(d$peds); phenotypePars = c(phenotypePars, list(cors = cors)); } y = do.call(simulatePhenotypes, c(list(gts = d$gts[, 1]), phenotypePars)); if (is.matrix(y)) y = y[, 1]; # Covariates X = model.matrix(~ 1, data.frame(dummy = rep(1, length(y)))); # induce missingness dMiss = if (missingness > 0) createMissing(d, missing = missingness) else d; r = lapply(models, function(model) with(model, { Logs('Analyzing with class: %{mcmcClass}s', 4); # <p> data set d0 = dMiss; cors0 = cors; if (coerceFounders) { d0 = createIndependent(dMiss); cors0 = pedsCoeffOfRel(d0$peds); } # <p> reconstructor, instantiate object R = new(DiplotypeReconstructor, d0$gts, pedsItrios2rcpp(d0$peds)); # <p> arguments for mcmc instantiation mcmcArgs = list(peds = d0$peds, reconstructor = R, Nburnin = Nburnin, Nchain = Nchain); if (passPhenotype) mcmcArgs = c(mcmcArgs, list(y = y, X = X)); if (nif(model$passCors)) mcmcArgs = c(mcmcArgs, list(cors = cors0)); mcmc = do.call(new, c(list(Class = mcmcClass), mcmcArgs)); # <p> run chain mcmc$run(); # <p> summary r = c(mcmc$summary(), R2 = mcmc$R2(d$gts[, 1])); r })); r } simulationModelComparison = function(..., Nrepetition = 5e2L) { print(date()); print(Nrepetition); r = Lapply(1:Nrepetition, function(i, ...) simulationModelComparisonSingle(...), ...); print(date()); r } simulationModelComparisonSpec = function(spec, htfs, N, pedTemplate, beta, Nburnin = 1e2L, Nchain = 1e3L, Nrepetition, missingness = 0) { args = c(list(htfs = htfs, N = N, pedTemplate = pedTemplate, missingness = missingness, simulatePhenotype = get(spec$phenotypeFunction), beta = beta, models = spec$models, Nrepetition = Nrepetition ), spec$phenotype); sim = do.call(simulationModelComparison, args); }
e36233b486b9a2a8a2ca2edd881c6be279161bdb
d7607d73cd5cc231b44dcb9f6c9c1591b71ed789
/R/conn.R
c52c3a628394ad3f300a649915ec6e3849ab622d
[]
no_license
cran/msgpack
f595ed6d9ba97c53420462e6d4659367a5d9f59c
4742bb3a4efdda1b28f432db80c0e0694ee0ac48
refs/heads/master
2021-09-07T11:28:41.219445
2018-02-22T08:15:35
2018-02-22T08:15:35
122,456,129
0
0
null
null
null
null
UTF-8
R
false
false
10,185
r
conn.R
#' Read and write msgpack formatted messages over R connections. #' #' A `msgConnection` object decodes msgpack messages from an #' underlying R raw connection. #' #' @param con A [connection] object open in binary mode. #' @param max_size The largest partial message to store, in #' bytes. `NA` means do not enforce a limit. #' @param read_size How many bytes to read at a time. #' @param ... Unpacking options (see [unpackMsg]). #' @return `msgConnection()` returns an object of class #' `msgConnection`. #' #' @examples #' out <- rawConnection(raw(0), open="wb") #' apply(quakes, 1, function(x) writeMsg(x, out)) #' length(rawConnectionValue(out)) #' inn <- msgConnection(rawConnection(rawConnectionValue(out), open="rb")) #' readMsg(inn) #' readMsgs(inn, 3) #' @export msgConnection <- function(con, read_size=2^16, max_size=NA, ...) { partial <- raw(0) status <- "ok" bread <- 0 bwrite <- 0 reader <- function(desired) { # ignore "desired" and just read non-blockingly. readRaw(con, read_size) } readMsgs <- function(n=NA, read_size = parent.env(environment())$read_size) { if (is.na(n)) n <- .Machine$integer.max msgs_bytes <- unpackMsgs(partial, n, max_size = max_size, reader = reader) partial <<- msgs_bytes$remaining status <<- msgs_bytes$status bread <<- bread + msgs_bytes$bytes_read msgs_bytes$msgs } doClose <- function(...) { close(con, ...) } #msgConnection object is just the orig object with this #environment dangled off it. structure(addClass(con, "msgConnection"), reader = environment()) } #' @export summary.msgConnection <- function(object, ...) { s <- NextMethod("summary") c(s, list(status = status(object))) } #' @rdname msgConnection #' @export close.msgConnection <- function(con, ...) { attr(con, "reader")$doClose(...) } catenator <- function(val=c()) { # An in-memory FIFO type object. #tracemem(val) start <- 0 end <- length(val) function(x, action="store", ..., opts) { switch(action, store = { lx <- length(x) l <- length(val) if (lx > 0) { #check for overflow if (end + lx > l && start > 0) { # rotate back to start if (start > 0 && end != start) { val[1:(end-start)] <- val[(start+1):end] } end <<- end - start start <<- 0 } if (end + lx > l) { # double array length length(val) <<- max(end + lx, 2 * l); } #inject new values val[ (end + 1):(end + lx) ] <<- x end <<- end + lx } dbg("lx", lx, "start", start, "end", end, "\n") x }, read = { if (end > start) { val[(start+1):end] } else val[c()] }, buf = { val }, start = start, length = end - start, end = end, contents = { list(val, start, end) }, reset = { val <<- x start <<- 0 end <<- length(x) }, drop = { if (x <= end - start && x >= 0) { start <<- start + x } else { stop("have ", end - start, ", can't drop ", x) } }) } } lister <- function(val = list()) { n <- length(val) function(x, action="store") { switch(action, store = { if (n > length(val)) length(val) <<- max(1, 2 * length(val)) n <<- n + 1 val[[n]] <<- x }, read = { length(val) <<- n val }, length = { n }, clear = { n <<- 0 length(val) <<- 0 } ) } } addClass <- function(x, classes) structure(x, class = c(classes, class(x))) #' @return `partial(con)` returns any data that has been read ahead of #' the last decoded message. #' @rdname msgConnection #' @export partial <- function(con) UseMethod("partial") #' @rdname msgConnection #' @export partial.msgConnection <- function(con) { attr(con, "reader")$partial } #' @rdname msgConnection #' @export #' @param n The maximum number of messages to read. A value of NA #' means to parse all available messages until end of input. #' @return `readMsgs(con, n)` returns a list of up to `n` decoded messages. readMsgs <- function(con, n = NA, ...) { UseMethod("readMsgs") } #' @export readMsgs.msgConnection <- function(con, n = NA, ...) { attr(con, "reader")$readMsgs(n, ...) } #' @rdname msgConnection #' @return `status(con)` returns the status of msgpack decoding on the #' connection. A value of `"ok"` indicates all requested messages #' were read, `"buffer underflow"` for a non-blocking connection #' indicates that only part of a message has been received, and #' `"end of input"` means the last available message has been read. #' Other values indicate errors encountered in decoding, which will #' effectively halt reading. #' @export status <- function(con) UseMethod("status") #' @rdname msgConnection #' @export status.msgConnection <- function(con) { attr(con, "reader")$status } #' @rdname msgConnection #' @return `seek(con)` returns the number of bytes that have been #' successfully read or written, depending on the mode of the #' connection. (Repositioning is not supported.) #' @param rw See [seek()]. #' @export seek.msgConnection <- function(con, rw = summary(con)$mode, ...) { rw <- pmatch(rw, c("read", "write"), 0L) switch(rw, attr(con, "reader")$bread, attr(con, "reader")$bwrite, ) } #' `readMsg(con)` reads exactly one message from a #' msgConnection, or throws an error. #' #' @rdname msgConnection #' @return `readMsg(con)` returns one decoded message. #' @export readMsg <- function(con, ...) { UseMethod("readMsg", con) } #' @export readMsg.msgConnection <- function(con, ...) { x <- readMsgs(con, 1, ...) if (length(x) < 1) { stop(status(con)) } x[[1]] } #' `writeMsg(x, con)` writes a single message to a msgConnection. #' #' @rdname msgConnection #' @param obj An R object. #' @export writeMsg <- function(obj, con, ...) { UseMethod("writeMsg", con) } #' @export writeMsg.connection <- function(obj, con, ...) { writeMsgs(list(obj), con, ...) } #' @export writeMsg.msgConnection <- function(obj, con, ...) { writeMsgs(list(obj), con, ...) } #' `writeMsgs(l, conn)` writes a list of #' messages to a connection. That is, `writeMsg(1:10, conn)` writes one #' message containing an array, while `writeMsgs(1:10, conn)` writes #' ten consecutive messages each containing one integer. #' #' `writeMsg` will work with any R connection in raw mode, but reading #' requires a msgConnection object. #' #' Because msgpack messages have unpredictable length, the decoder #' reads ahead in chunks, then finds the boundaries between messages. #' Therefore when reading over a socket or a fifo it is best to use a #' nonblocking connection, and it will not work to mix readMsg and #' readBin on the same connection. #' #' If you are reading data from a not completely trusted source you #' should specify options `max_size` and `max_depth` (see #' [unpackOpts]). Without it, some deeply nested or cleverly designed #' messages can cause a stack overflow or out-of-memory error. With #' these options set, you will get an R exception instead. #' @rdname msgConnection #' @param objs A list of R objects. #' @export writeMsgs <- function(objs, con, ...) { UseMethod("writeMsgs", con) } #' @export writeMsgs.connection <- function(objs, con, ...) { writeRaw(packMsgs(objs, ...), con) } #' @export writeMsgs.msgConnection <- function(objs, con, ...) { buf <- packMsgs(objs, ...) result <- writeRaw(buf, con) attr(con, "reader")$bwrite <- attr(con, "reader")$bwrite + length(buf) invisible(result) } ## To support test harness, we use "readRaw" and "writeRaw" ## internally, instead of "readBin" which is not an S3 method readRaw <- function(con, n, ...) { UseMethod("readRaw") } writeRaw <- function(object, con, ...) { UseMethod("writeRaw", con) } readRaw.connection <- function(con, n) { # I get errors thrown (sometimes) when reading at the end of # a nonblocking fifo. tryCatch({ readBin(con, 'raw', n) }, error = function(e) { # warning("Ignoring ", e) raw(0) }) } writeRaw.connection <- function(object, con, ...) { writeBin(object, con, ...) } ## An inefficient double ended byte buffer for test harness purposes rawBuffer <- function(object = raw(0)) { open <- "r" bytes <- length(object) buf <- rawConnection(object, open = "r") object <- NULL write <- function(object) { switch(open, "w" = { writeBin(object, buf) }, "r" = { data <- readBin(buf, 'raw', bytes - seek(buf)) close(buf) buf <<- rawConnection(data, open = "w") open <<- "w" writeBin(data, buf) write(object) } ) } read <- function(n) { switch(open, "r" = { readBin(buf, 'raw', n) }, "w" = { ##convert a write buffer into a read buffer val <- rawConnectionValue(buf) close(buf) buf <<- rawConnection(val, open = "r") bytes <<- length(val) open <<- "r" read(n) } ) } doClose <- function(n) { close(buf) buf <- NULL } structure(list(environment()), class = "rawBuffer") } writeRaw.rawBuffer <- function(object, con) { con[[1]]$write(object) } readRaw.rawBuffer <- function(con, n) { con[[1]]$read(n) } close.rawBuffer <- function(con) { con[[1]]$doClose() }
395fa01ccb03b0be67978f4ee0769ad3aa0132ca
b2f61fde194bfcb362b2266da124138efd27d867
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query30_query07_1344n/query30_query07_1344n.R
e55088654c65668efd062c434ef16e1245914e81
[]
no_license
arey0pushpa/dcnf-autarky
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
a6c9a52236af11d7f7e165a4b25b32c538da1c98
refs/heads/master
2021-06-09T00:56:32.937250
2021-02-19T15:15:23
2021-02-19T15:15:23
136,440,042
0
0
null
null
null
null
UTF-8
R
false
false
703
r
query30_query07_1344n.R
c DCNF-Autarky [version 0.0.1]. c Copyright (c) 2018-2019 Swansea University. c c Input Clause Count: 362 c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 362 c c Input Parameter (command line, file): c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query30_query07_1344n.qdimacs c output filename /tmp/dcnfAutarky.dimacs c autarky level 1 c conformity level 0 c encoding type 2 c no.of var 161 c no.of clauses 362 c no.of taut cls 0 c c Output Parameters: c remaining no.of clauses 362 c c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query30_query07_1344n.qdimacs 161 362 E1 [] 0 69 92 362 NONE
a9d62e1275c250b246937c87e1b6497ea183cc85
1bff670706a93f577eff6af8ed52dd3275e1534d
/scripts/R/QC/config_db.R
5100d5a175d62668325c83393ecea4e8ce2b4ca2
[]
no_license
aodn/aatams
f426d308c553bf5098cbf566f04ba13a9aa3a515
f21b94497935130f8006e6ef33bce718d197dca0
refs/heads/master
2021-05-16T18:24:16.238708
2021-05-12T01:18:58
2021-05-12T01:18:58
10,501,618
7
3
null
2021-04-28T01:22:42
2013-06-05T12:04:53
Groovy
UTF-8
R
false
false
234
r
config_db.R
#file:config_db.R server_address <- 'server_url'; # e.g. '183.24.245.102' db_name <- 'db_name'; # e.g. 'aatams' db_port <- 'db_port'; # e.g. '5432' db_user <- 'db_username'; # e.g. 'username' db_password <- 'db_pwd'; # e.g. 'password'
464437012c3fe3216bc66babc54c20c55231af81
b2f61fde194bfcb362b2266da124138efd27d867
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#124.A#48.c#.w#5.s#53.asp/ctrl.e#1.a#3.E#124.A#48.c#.w#5.s#53.asp.R
c95ddac8d1f5f19eaa1e6d88b9da0d637085e816
[]
no_license
arey0pushpa/dcnf-autarky
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
a6c9a52236af11d7f7e165a4b25b32c538da1c98
refs/heads/master
2021-06-09T00:56:32.937250
2021-02-19T15:15:23
2021-02-19T15:15:23
136,440,042
0
0
null
null
null
null
UTF-8
R
false
false
91
r
ctrl.e#1.a#3.E#124.A#48.c#.w#5.s#53.asp.R
786622ce009c8d70f9104dcd2c86838e ctrl.e#1.a#3.E#124.A#48.c#.w#5.s#53.asp.qdimacs 5131 14878