blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
327
content_id
stringlengths
40
40
detected_licenses
listlengths
0
91
license_type
stringclasses
2 values
repo_name
stringlengths
5
134
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
46 values
visit_date
timestamp[us]date
2016-08-02 22:44:29
2023-09-06 08:39:28
revision_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
committer_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
github_id
int64
19.4k
671M
star_events_count
int64
0
40k
fork_events_count
int64
0
32.4k
gha_license_id
stringclasses
14 values
gha_event_created_at
timestamp[us]date
2012-06-21 16:39:19
2023-09-14 21:52:42
gha_created_at
timestamp[us]date
2008-05-25 01:21:32
2023-06-28 13:19:12
gha_language
stringclasses
60 values
src_encoding
stringclasses
24 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
7
9.18M
extension
stringclasses
20 values
filename
stringlengths
1
141
content
stringlengths
7
9.18M
dc2b1323d7835fd835c6d75062cf9e07b0560b04
f47256310805e64cc08b57233a94c6416ffbe3d8
/man/ttbGreedyModel.Rd
c284c9e9ae65dff0d6edfae1e04e5779fe6f7989
[]
no_license
jeanimal/heuristica
1fed5ccee936cf2b23d7142a5f7e0b97829baab0
0e4933f3f263a92aa3c7deb3fe7b4ba0b8f899bb
refs/heads/master
2021-11-28T09:37:51.809141
2021-09-08T14:36:14
2021-09-08T14:36:14
36,178,661
5
2
null
null
null
null
UTF-8
R
false
true
2,453
rd
ttbGreedyModel.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/heuristics.R \name{ttbGreedyModel} \alias{ttbGreedyModel} \title{Greedy Take The Best} \usage{ ttbGreedyModel( train_data, criterion_col, cols_to_fit, fit_name = "ttbGreedyModel" ) } \arguments{ \item{train_data}{Training/fitting data as a matrix or data.frame.} \item{criterion_col}{The index of the column in train_data that has the criterion.} \item{cols_to_fit}{A vector of column indices in train_data, used to fit the criterion.} \item{fit_name}{Optional The name other functions can use to label output. It defaults to the class name. It is useful to change this to a unique name if you are making multiple fits, e.g. "ttb1", "ttb2", "ttbNoReverse."} } \value{ An object of \code{\link[base]{class}} ttbGreedyModel, which can be passed in to \code{\link{predictPair}}. } \description{ A variant of the Take The Best heuristic with a different cue order, namely using conditional cue validity, where the validity of a cue is judged only on row pairs not already decided by prior cues. Specifically, it uses the cue ranks returned by \code{\link{conditionalCueValidityComplete}}. } \examples{ ## A data set where Take the Best and Greedy Take the Best disagree. matrix <- cbind(y=c(3:1), x1=c(1,0,0), x2=c(1,0,1)) ttb <- ttbModel(matrix, 1, c(2,3)) ttb$cue_validities # Returns # x1 x2 # 1.0 0.5 ttbG <- ttbGreedyModel(matrix, 1, c(2:3)) ttbG$cue_validities # Returns # x1 x2 # 1 1 # because after using x1, only decisions between row 2 and 3 are left, # and x2 gets 100\% right on those (after reversal). However, these # cue_validities depend on using x1, first, so cue_rank is key. ttbG$cue_ranks # Returns # x1 x2 # 1 2 # Now see how this affects predictions on row 2 vs. 3. # Take the best guesses (output 0). predictPair(oneRow(matrix, 2), oneRow(matrix, 3), ttb) # Greedy Take The Best selects row 2 (output 1). predictPair(oneRow(matrix, 2), oneRow(matrix, 3), ttbG) } \references{ Martignon, L., & Hoffrage, U. (2002). Fast, frugal, and fit: Simple heuristics for paired comparisons. Theory and Decision, 52: 29-71. } \seealso{ \code{\link{conditionalCueValidityComplete}} for the metric used to sort cues. \code{\link{ttbModel}} for the original version of Take The Best. \code{\link{predictPair}} for predicting whether row1 is greater. \code{\link{predictPairProb}} for predicting the probability row1 is greater. }
9060b16abc534d808bc1ffe4ac9a2a93f0ce5759
7d075c000b055160f43d6ac81d98e0b67ff41e92
/scripts/plotpredict.R
cd112536893b4b11bca7ab58d8b7a70f6f59d131
[]
no_license
lerouzic/dalechampia
24eb740af0a99b9a7608086af0ea213d7950ab6e
5b4ad284db85e5cea2e5ba84b6911563729e9bae
refs/heads/master
2023-05-06T15:01:26.242804
2021-03-19T11:13:51
2021-03-19T11:13:51
226,364,280
0
0
null
null
null
null
UTF-8
R
false
false
10,933
r
plotpredict.R
################ Plotting selection time series ################## makeTransparent<-function(someColor, alpha=100) { # From https://stackoverflow.com/questions/8047668/transparent-equivalent-of-given-color # Author: Nick Sabbe # Licence : CC-attribution-SA from the conditions of the website newColor<-col2rgb(someColor) apply(newColor, 2, function(curcoldata){rgb(red=curcoldata[1], green=curcoldata[2], blue=curcoldata[3],alpha=alpha, maxColorValue=255)}) } custom.axis <- function(side, loglim, type=c("log", "natural", "percent0", "percent100")[1]) { lim <- loglim if (type == "natural") { lim <- exp(loglim) } else if (type == "percent0") { lim <- exp(loglim)*100 - 100 } else if (type == "percent100") { lim <- exp(loglim)*100 } lab <- pretty(lim) at <- if (type == "log") { lab } else if (type == "natural") { log(lab) } else if (type == "percent0") { log((lab+100)/100) } else if (type == "percent100") { log(lab/100) } lab.string <- as.character(lab) if (type == "percent0") lab.string <- ifelse(lab > 0, paste0("+", lab.string), paste0(lab.string)) if (type == "percent100") lab.string <- paste0(lab.string) axis(side, at=at, lab.string) } custom.label <- function(trait, type=c("log", "natural", "percent0", "percent100")[1]) { if (type == "natural") { bquote(.(trait)*" (mm"^2*")") } else if (type == "log") { bquote("log "*.(trait)*" (mm"^2*")") } else if (type == "percent0" || type == "percent100") { bquote(.(trait)*"(%)") } } # Estimate means and variances in raw and centered datasets # Three ways to analyse the data: # * raw: phenotype as observed experimentally # * control-centered: phenotype centered on the control line # * updown-centered: phenotype centered on the average between up and down selection lines (symmetric response) recenter <- function(data, G, Gv, P, N, Np, target="mu.x", normalization=c("raw", "control", "updown")[1], G0.boost=FALSE) { # data: summary statistics (from summarypop script) # G: G matrix # Gv: Estimated variance of each element of the G matrix # P: P matrix # N: Number of individuals measured # Np: Number of selected parents # target: Either "mu.x" (selected trait) or "mu.y" (correlated trait) # G0.boost: Whether the G matrix at the first generation should be x1.5 due to inbreeding stopifnot(target %in% c("mu.x", "mu.y")) stopifnot(normalization %in% c("raw","control","updown")) se.target <- if(target=="mu.x") "se.x" else "se.y" # The different variances to account for depend on the target Gsel <- if(target == "mu.x") G[1,1] else G[2,1] Gdrift <- if(target == "mu.x") G[1,1] else G[2,2] Gerr <- if(target == "mu.x") Gv[1,1] else Gv[2,1] Eerr <- if(target == "mu.x") P[1,1]-G[1,1] else P[2,2]-G[2,2] h2 <- G[1,1]/P[1,1] # heritability of the selected trait # Selection gradient (always on the selected trait, the gradient on the correlated trait is 0 by definition) beta <- data$S/data$sig2.x # mean gradients to compute prediction errors: a bit sloppy, but this should not really matter mean.beta <- mean(abs(beta[data$Rep=="Up" | data$Rep == "Down"]), na.rm=TRUE) gen <- seq(1, max(data$Gen)) G.correct <- rep(1, length(gen)) if (G0.boost) G.correct[1] <- 1.5 pred.control <- if (normalization == "raw") { rep(data[data$Rep=="Control" & data$Gen == 1, target], length(gen)) } else if (normalization == "control") { rep(0, length(gen) ) } else if (normalization == "updown") { c(0, cumsum(0.5*(beta[data$Rep=="Down"] + beta[data$Rep=="Up"]))[-length(gen)]*Gsel*G.correct[-length(gen)]) } pred.up <- if (normalization == "raw") { data[data$Rep=="Up" & data$Gen == 1, target] + c(0, cumsum(beta[data$Rep=="Up"])[-length(gen)]*Gsel*G.correct[-length(gen)]) } else if (normalization == "control") { c(0, cumsum(beta[data$Rep=="Up"])[-length(gen)]*Gsel*G.correct[-length(gen)]) } else if (normalization == "updown") { c(0, cumsum(0.5*(beta[data$Rep=="Up"] - beta[data$Rep=="Down"]))[-length(gen)]*Gsel*G.correct[-length(gen)]) } pred.down <- if (normalization == "raw") { data[data$Rep=="Down" & data$Gen == 1, target] + c(0, cumsum(beta[data$Rep=="Down"])[-length(gen)]*Gsel*G.correct[-length(gen)]) } else if (normalization == "control") { c(0, cumsum(beta[data$Rep=="Down"])[-length(gen)]*Gsel*G.correct[-length(gen)]) } else if (normalization == "updown") { c(0, cumsum(0.5*(beta[data$Rep=="Down"] - beta[data$Rep=="Up"]))[-length(gen)]*Gsel*G.correct[-length(gen)]) } # Calculation of prediction variances # Three terms : environmental sampling error + drift + error in the additive variance estimate # Environmental sampling error : at the current generation, always Ve/N # Drift : cumulative over generations, + Va / N every generation (+ Va/N for generation 0) # in selected lines, there are only Np parents, but the offspring number is normalized # theory shows that the increase in variance is 1/Np - 1/2N every generation # Error on the estimate of Va : # this error cancels for drift (assuming overestimation is as likely as underestimation) # but not for selected lines. A term in Var(Va) * t^2 * beta^2 needs to be considered. # this error cumulates (quadratically) ver generations erf <- function(x) 2 * pnorm(x * sqrt(2)) - 1 erfinv <- function (x) qnorm((1 + x)/2)/sqrt(2) tmp.X <- erfinv(2*(N-Np)/N-1) tmp.H <- sqrt((1-h2)/(2*h2)) tmp.X1 <- erf(tmp.X + tmp.H) tmp.X2 <- erf(tmp.X - tmp.H) varW <- (N^2-2*Np^2)/(2*Np^2) - ((N^2-Np^2)/(2*Np^2))*tmp.X1 - 0.5*tmp.X2 verr.drift <- Gdrift*G.correct*(gen-1)*(1/Np - 1/(2*N)) verr.drift.sel <- Gdrift*G.correct*(gen-1)*(1/Np - 1/(2*N) - varW/N) verr.va <- Gerr * (gen-1)^2 * mean.beta^2 verr.env <- Eerr/N # print(cbind(verr.drift, verr.drift.sel)) # If the data is control or up-down centered, the error variance is redistributed: verr.control <- if (normalization == "raw") { verr.drift + verr.env } else if (normalization == "control") { rep(0, length(gen)) } else if (normalization == "updown") { verr.drift + 0.5*verr.drift.sel + 1.5*verr.env } verr.sel <- if (normalization == "raw") { verr.drift.sel + verr.va + verr.env } else if (normalization == "control") { verr.drift + verr.drift.sel + verr.va + 2*verr.env } else if (normalization == "updown") { 0.5*verr.drift.sel + verr.va + 0.5*verr.env } # Scaling the phenotype phen.control <- if (normalization == "raw") { data[data$Rep=="Control",target] } else if (normalization == "control") { rep(0, length(gen)) } else if (normalization == "updown") { data[data$Rep=="Control",target] - 0.5*data[data$Rep=="Up",target] - 0.5*data[data$Rep=="Down",target] } phen.up <- if (normalization == "raw") { data[data$Rep=="Up",target] } else if (normalization == "control") { data[data$Rep=="Up",target] - data[data$Rep=="Control",target] } else if (normalization == "updown") { 0.5*data[data$Rep=="Up",target] - 0.5*data[data$Rep=="Down",target] } phen.down <- if (normalization == "raw") { data[data$Rep=="Down",target] } else if (normalization == "control") { data[data$Rep=="Down",target] - data[data$Rep=="Control",target] } else if (normalization == "updown") { 0.5*data[data$Rep=="Down",target] - 0.5*data[data$Rep=="Up",target] } # Scaling the std errors se.control <- if (normalization == "raw") { data[data$Rep=="Control",se.target] } else if (normalization == "control") { rep(0, length(gen)) } else if (normalization == "updown") { sqrt(data[data$Rep=="Control",se.target]^2 + (1/4)*(data[data$Rep=="Up",se.target]^2 + data[data$Rep=="Down",se.target]^2)) } se.up <- if (normalization == "raw") { data[data$Rep=="Up",se.target] } else if (normalization == "control") { sqrt(data[data$Rep=="Up",se.target]^2 + data[data$Rep=="Control",se.target]^2) } else if (normalization == "updown") { sqrt((1/4)*(data[data$Rep=="Up",se.target]^2 + data[data$Rep=="Down",se.target]^2)) } se.down <- if (normalization == "raw") { data[data$Rep=="Down",se.target] } else if (normalization == "control") { sqrt(data[data$Rep=="Down",se.target]^2 + data[data$Rep=="Control",se.target]^2) } else if (normalization == "updown") { sqrt((1/4)*(data[data$Rep=="Up",se.target]^2 + data[data$Rep=="Down",se.target]^2)) } return(list( drift = verr.drift, va = verr.va, env = verr.env, Control = data.frame(gen=gen, pred=pred.control, phen=phen.control, se=se.control, verr=verr.control), Up = data.frame(gen=gen, pred=pred.up, phen=phen.up, se=se.up, verr=verr.sel), Down = data.frame(gen=gen, pred=pred.down, phen=phen.down, se=se.down, verr=verr.sel))) } # Plots a single time series plot.ts.common <- function(pred, gen=seq_along(pred), verr=NULL, data, data.se=NULL, CI.factor=1.96, col.line="black", col.point=col.line, col.err=makeTransparent(col.line, alpha=50), pch=1, prediction=TRUE, data.lty=if(prediction) 2 else 1, ...) { # pred: the vector of predicted phenotype # gen : generation numbers (default: 1:5) # verr: vector of prediction error variance # data: data points # data.se: standard errors for the observed phenotypic means # CI.factor: factor by which sqrt(verr) should be multiplied to figure prediction intervals # ... graphical line, points, color options # Prediction error (shaded area) if (prediction && !is.null(verr)) { polygon(c(gen, rev(gen)), c(pred-CI.factor*sqrt(verr), rev(pred+CI.factor*sqrt(verr))), border=NA, col=col.err) } # Prediction (plain line) if (prediction) lines(gen, pred, col=col.line, lwd=3) # Data points points(gen[!is.na(data)], data[!is.na(data)], pch=pch, lty=data.lty, type="b", col=col.point) # Data error (error bars) if(!is.null(data.se) && data.se[1] > 0) { arrows(x0=gen, y0=data-CI.factor*data.se, y1=data+CI.factor*data.se, length=0.1, angle=90, code=3, col=col.point) } } # Call the plot routine on the recentered data. plot.data.recenter <- function(data.recenter, col.data=c(Control="gray50", Up="black", Down="black"), pch=18, CI.factor=1.96, ylab="Phenotype", xlab="Generations", ylim=NULL, prediction=TRUE, G0=0, axis.type="log", ...) { if(is.null(ylim)) ylim <- 0.2*c(-1,1) + range(do.call(c, lapply(data.recenter, function(x) x$phen)), na.rm=TRUE) plot(NULL, xlim=range(data.recenter$Control$gen), ylim=ylim, xlab=xlab, ylab=if(ylab == "") "" else custom.label(ylab, axis.type), xaxt="n", yaxt="n", ...) axis(1, at=data.recenter$Control$gen, labels=as.character(G0:(G0-1+length(data.recenter$Control$gen)))) custom.axis(2, ylim, axis.type) for (ll in c("Control","Up","Down")) { plot.ts.common(data.recenter[[ll]]$pred, data.recenter[[ll]]$gen, data.recenter[[ll]]$verr, data.recenter[[ll]]$phen, data.recenter[[ll]]$se, CI.factor=CI.factor, col.line=col.data[ll], prediction=prediction) } }
41f07b6cbacbf8361c2bd45756f2d0dbf81cfdb7
a593d96a7f0912d8dca587d7fd54ad96764ca058
/R/ml_model_bisecting_kmeans.R
e1158a9c9af559fd1c0086f49ad679e13e16beeb
[ "Apache-2.0" ]
permissive
sparklyr/sparklyr
98f3da2c0dae2a82768e321c9af4224355af8a15
501d5cac9c067c22ad7a9857e7411707f7ea64ba
refs/heads/main
2023-08-30T23:22:38.912488
2023-08-30T15:59:51
2023-08-30T15:59:51
59,305,491
257
68
Apache-2.0
2023-09-11T15:02:52
2016-05-20T15:28:53
R
UTF-8
R
false
false
1,101
r
ml_model_bisecting_kmeans.R
new_ml_model_bisecting_kmeans <- function(pipeline_model, formula, dataset, features_col) { m <- new_ml_model_clustering( pipeline_model = pipeline_model, formula = formula, dataset = dataset, features_col = features_col, class = "ml_model_bisecting_kmeans" ) model <- m$model m$summary <- model$summary m$centers <- model$cluster_centers() %>% do.call(rbind, .) %>% as.data.frame() %>% rlang::set_names(m$feature_names) m$cost <- suppressWarnings( possibly_null( ~ pipeline_model %>% ml_stage(1) %>% ml_transform(dataset) %>% model$compute_cost() )() ) m } #' @export print.ml_model_bisecting_kmeans <- function(x, ...) { preamble <- sprintf( "K-means clustering with %s %s", nrow(x$centers), if (nrow(x$centers) == 1) "cluster" else "clusters" ) cat(preamble, sep = "\n") print_newline() ml_model_print_centers(x) print_newline() cat( "Within Set Sum of Squared Errors = ", if (is.null(x$cost)) "not computed." else x$cost ) }
c78fee39558554f1baf7d0c364dede3c2947d754
5d3deb2b60727315f2ec162b2be22f5202dd9e29
/V-code/Chemical_data.R
997aaf48703347cec839f392a3860617ce640c3b
[]
no_license
people-r-strange/spacey
7f4a5003ed9e27db75caf1938361d0af6ea8f8ba
cd350573c9af12d8b0930a4fa23a4efcc8c2c324
refs/heads/main
2023-03-20T21:32:26.276748
2021-03-23T03:18:48
2021-03-23T03:18:48
349,056,563
0
0
null
2021-03-23T03:18:48
2021-03-18T12:00:30
HTML
UTF-8
R
false
false
1,944
r
Chemical_data.R
library(tidyverse) library(readxl) library(knitr) library(ggridges) library(wesanderson) #load sensor data chemical_data <- read_excel("data/Sensor Data.xlsx") #renaming columnns names(chemical_data)[3] <- "DateTime" #look at the dates when the trucks were there... #from suspsicious rangers 3 table the dates in question are: dates <- c("2016-02", "2016-03", "2016-05", "2016-05", "2016-05") chemical_dates <- chemical_data %>% mutate(date = format(DateTime, "%Y-%m")) chemical_means <- chemical_dates %>% group_by(Chemical, Monitor, date) %>% mutate(average_reading = mean(Reading)) daily_chemical_means <- chemical_means %>% select(Chemical, Monitor, date, average_reading) daily_chemical_means <- daily_chemical_means %>% as.factor(Monitor) #visualize the chemical data ggplot( daily_chemical_means, aes(x=date, y= average_reading, fill=Chemical)) + geom_bar(stat="identity", position="dodge") + labs(title = "Average Chemical Reading", y = "Average Reading", x = "Date") + scale_fill_discrete( name = "Chemical") + theme_light() #focus on 2016-12 ggplot(daily_chemical_means, aes(x=as.factor(Monitor), y= average_reading, fill = Chemical)) + facet_wrap( ~Chemical, ncol=2) + geom_bar(stat="identity", position="dodge") + labs(title = "Average Chemical Reading for December 2016", y = "Average Reading", x = "Monitor") + scale_fill_brewer(palette="Dark2") #read in wind data Meteorological_Data <- read_excel("~/Documents/Spring2021/Visual Analytics/Spacey/DC3-data/Sensor Data/Meteorological Data.xlsx") #select first three columns Meteorological_Data <- Meteorological_Data %>% select(1:3) #separate date and time Meteorological_Data_sep <- separate(Meteorological_Data, Date, c("date", "time"), sep = " ") Meteorological_Data_sep <- as.yearmon(date) # wind <- filter(Meteorological_Data_sep, date %in% c('2016-02', '2016-03', '2016-05', '2016-05', '2016-05'))
c1f95a28b3e09aed07e17fbaaac79226142b2a11
3810f013ef1bb6da62ae44849f04575ee8daf2f7
/R/get_week.R
2612553d11f5592338741b196a3be87457b5a18a
[ "MIT" ]
permissive
minghao2016/incidence2
72dac5797bb44a8df537e3212710247ec8365a82
5aa52edf3526def57cff1458dfad026940674723
refs/heads/master
2023-01-11T11:30:00.512052
2020-11-12T14:21:25
2020-11-12T14:21:25
null
0
0
null
null
null
null
UTF-8
R
false
false
2,108
r
get_week.R
#' Translate user input to the start date of the week #' #' @param a Weekday specification: ISOweek, MMWRweek, EPIweek, Mon-week, #' Tue-week, etc. #' #' @return the corresponding weekday #' #' @examples #' get_week_start("ISOweek") #' get_week_start("MMWRweek") #' get_week_start("EPIweek") #' #' # weeks that start on saturday #' get_week_start("Sat-week") #' get_week_start("week: Saturday") #' get_week_start("2 weeks: Saturday") #' get_week_start("epiweek: Saturday") #' #' @noRd get_week_start <- function(weekday) { wkdy <- gsub("weeks?", "", tolower(weekday)) wkdy <- gsub("[[:punct:][:blank:][:digit:]]*", "", wkdy) wkdy <- if (wkdy == "") "monday" else wkdy # the input was "weeks" res <- switch(wkdy, "mmwr" = "sunday", # MMWR == CDC epiweek "epi" = "sunday", # CDC epiweek "iso" = "monday", # ISOweek == WHO epiweek wkdy # all others ) gsub("epi", "", res) # if they specify something like "epiweek:saturday" } # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- #' Translate a custom interval to a valid interval #' #' @param the_interval An interval like `"2 epiweeks"` or `"1 ISOweek"`. #' #' @return An interval compatible with `seq.Date()`. #' @examples #' get_week_duration("2 weeks (wednesday)") # 2 weeks #' get_week_duration("2 epiweeks") # 2 weeks #' #' @noRd get_week_duration <- function(the_interval) { if (the_interval == 7) { return(the_interval) } res <- gsub("^(\\d*) ?.*(weeks?).*$", "\\1 \\2", tolower(the_interval), perl = TRUE) trimws(res) } # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- get_type_of_week <- function(x) { date_var <- get_dates_name(x) week_var <- get_date_group_names(x) switch( as.character(attr(x[[week_var]], "week_start")), "1" = "ISO", "7" = "MMWR", sprintf("(%s)", weekdays(x[[date_var]][1])) ) } # -------------------------------------------------------------------------
48860f89ef9e942b4400f0b0b5a0ff58b495b0bb
df88c9ad1ff85f827381290a1d98aee51e51f3be
/primary/figures/spillover.R
c233fc1a8da656b7b88ffbccc2f35fcca5f967b6
[]
no_license
jadebc/WBK-primary-outcomes
02c2a722b99c1201361c64ac23c765fb7dd3a68a
3d93265bc32baaacb15cf5dd3d6bf770917af39d
refs/heads/master
2022-04-09T23:22:48.235396
2018-03-29T08:13:17
2018-03-29T08:13:17
75,782,104
0
0
null
null
null
null
UTF-8
R
false
false
2,815
r
spillover.R
############################################## # WASH Benefits Kenya # Primary outcome analysis # Generate plots of each primray outcome by # the proportion of treated households within # 2km of each control compound for sets of treatments # # by Jade Benjamin-Chung (jadebc@berkeley.edu) ############################################## rm(list=ls()) library(ggplot2) source("~/Documents/CRG/wash-benefits/kenya/src/primary/analysis/10-btw-clus-spill/10a-distance-functions.R") source("~/documents/crg/wash-benefits/kenya/src/primary/analysis/0-base-programs.R") #---------------------------------------------------- # Read in distance matrices #---------------------------------------------------- load("~/Dropbox/WBK-primary-analysis/Results/Jade/washb-dist-sub.RData") #---------------------------------------------------- # Read in outcome data - HAZ #---------------------------------------------------- # load child length endline dataset e=read.csv("~/Dropbox/WBK-primary-analysis/Data/final/jade/endline-anthro.csv",stringsAsFactors=TRUE) e=preprocess.anthro(e, "haz") # subset to control compounds only e = subset(e,e$tr=="Control") e=e[order(e$hhid),] #---------------------------------------------------- # Read in outcome data - diarrhea #---------------------------------------------------- # load child length endline dataset data=read.csv("~/Dropbox/WBK-primary-analysis/Data/final/jade/diarrhea.csv") d=preprocess.diarr(data) # subset to control compounds only d = subset(d,d$tr=="Control") #---------------------------------------------------- # Count the number of treated compounds within a control # compound for a given set of treatments #---------------------------------------------------- W.comp2km=comp2km(anyW.mat) S.comp2km=comp2km(anyS.mat) H.comp2km=comp2km(anyH.mat) N.comp2km=comp2km(anyN.mat) # get the number of compounds within 2 km - any tr C.comp2km=comp2km(C.mat) W.comp2km$prop=W.comp2km$comp2km / C.comp2km$comp2km S.comp2km$prop=S.comp2km$comp2km / C.comp2km$comp2km H.comp2km$prop=H.comp2km$comp2km / C.comp2km$comp2km N.comp2km$prop=N.comp2km$comp2km / C.comp2km$comp2km #---------------------------------------------------- # Merge datasets #---------------------------------------------------- mymerge=function(trNdata,ydata,y){ y.ncomp=merge(trNdata,ydata[,c("hhid",y)],by="hhid",all.x=TRUE,all.y=TRUE) y.ncomp=y.ncomp[!is.na(y.ncomp[[y]]),] y.ncomp=y.ncomp[!is.na(y.ncomp$prop),] return(y.ncomp) } W.dat.haz=mymerge(W.comp2km,e,"haz") S.dat.haz=mymerge(S.comp2km,e,"haz") H.dat.haz=mymerge(H.comp2km,e,"haz") N.dat.haz=mymerge(N.comp2km,e,"haz") ggplot(W.dat.haz,aes(x=prop,y=haz))+geom_point() ggplot(S.dat.haz,aes(x=prop,y=haz))+geom_point() ggplot(H.dat.haz,aes(x=prop,y=haz))+geom_point() ggplot(N.dat.haz,aes(x=prop,y=haz))+geom_point()
b1308e93e88e11d67d24c1d4866302a7197d6b9e
8f1be5778fce0622c8026aa219a995361f723c8c
/BBMRIomics/man/runQuery.Rd
359c8941f457842a6f9fc8832fa486d83e1b492e
[]
no_license
bbmri-nl/BBMRIomics
aa5112e9f20aafa9ae506332ba0db556e544f7b7
1c7d9a6ef966365be2b95e2066e8f2fd2006c757
refs/heads/master
2023-05-31T13:21:37.130878
2023-04-28T17:29:31
2023-04-28T17:29:31
95,667,968
3
0
null
null
null
null
UTF-8
R
false
true
892
rd
runQuery.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Sql_MDb_Helpers.R \name{runQuery} \alias{runQuery} \title{send a query to the database} \usage{ runQuery( query, usrpwd = "guest:guest", url = "localhost", port = 5432, db = "rp3_rp4_meta", verbose = T ) } \arguments{ \item{query}{SQL query to be send to the database} \item{usrpwd}{Username and password concatenated by a colon, defaults to "guest:guest".} \item{url}{URL through which the database can be accessed, defaults to "localhost".} \item{port}{port to be used to connect to the database, defaults to 5432.} \item{db}{name of the database to be connected to, defaults to "rp3_rp4_meta".} } \value{ A data.frame with the query results. } \description{ send a query to the database } \examples{ \dontrun{ visits <- runQuery("SELECT * FROM visit;", RP3_MDB_USRPWD) } } \author{ Davy Cats }
2bdd4838ab50097aab5b1e665f669d57164c984d
3578a4578f61435c25aea39f1c7a7c6968f1517e
/tables.R
8382456280459b11b50aec26f64f9a4abdb768f2
[]
no_license
dushoff/TZ_clinics
ea40addedecac40d5b33492b4b8cc7bae1d3cc63
c7d533f88ec94c973af99b8afde40e7b6ee959d4
refs/heads/master
2020-06-12T20:34:04.819676
2016-02-26T23:39:36
2016-02-26T23:39:36
42,281,791
0
1
null
null
null
null
UTF-8
R
false
false
1,849
r
tables.R
library(dplyr) library(tidyr) year <- (c_visits %>% select(patientid,arvstatuscode,visitdate,visitnum) %>% group_by(patientid) %>% mutate(startyear = as.numeric(format(min(visitdate),"%Y"))) ) arvyear <- (year %>% group_by(patientid) %>% filter(arvstatuscode == "Start ARV") %>% mutate(arvyear = as.numeric(format(min(visitdate),"%Y"))) ) tmpd <- (arvyear %>% select(c(startyear,arvyear))) ### Collapse columns into key/value pairs. JD does not like it. gather_by_year <- (gather(tmpd[,2:3],startyear,arvyear)) all <- (year %>% filter(visitnum == 1) %>% select(startyear) %>% group_by(startyear)) allcount <- (count(all,startyear)) ## Is this counting all patients by their visit number one per startyear? newtable <- (count(newdat2,startyear,arvyear) %>% ungroup %>% arrange(startyear)) yeartotal <- (count(newdat2,startyear) %>% ungroup %>% arrange(startyear)) newtable2 <- (matrix(NA,nrow=4,ncol=5)) newtable2[1,1] <- (newtable$n[1]) #are [1,1] these number stands for row number and column number for a table? newtable2[1,2] <- (newtable$n[2]) newtable2[1,3] <- (newtable$n[3]) newtable2[1,4] <- (newtable$n[4]) newtable2[1,5] <- (allcount$n[1] - yeartotal$n[1]) newtable2[2,2] <- (newtable$n[5]) newtable2[2,3] <- (newtable$n[6]) newtable2[2,4] <- (newtable$n[7]) newtable2[2,5] <- (allcount$n[2] - yeartotal$n[2]) newtable2[3,3] <- (newtable$n[8]) newtable2[3,4] <- (newtable$n[9]) newtable2[3,5] <- (allcount$n[3] - yeartotal$n[3]) newtable2[4,4] <- (newtable$n[10]) newtable2[4,5] <- (allcount$n[4] - yeartotal$n[4]) rownames(newtable2) <- (c("2011","2012","2013","2014")) colnames(newtable2) <- (c("2011","2012","2013","2014","NA")) print(newtable2) library(ggplot2) g <- (ggplot(newtable, aes(x=arvyear, y=n, colour=factor(startyear), group = startyear) ) + geom_line() + geom_point() + theme_bw()) print(g)
64b3b3131fa47e132ef022c0f9898b5279e45a99
1577e1cf4e89584a125cffb855ca50a9654c6d55
/tcl/tcl_ext/quicktimetcl/quicktimetcl/QuickTimeTcl.r
db46fd1ee57a81f4c63b7bcb950125dd5954a020
[ "BSD-3-Clause" ]
permissive
apple-open-source/macos
a4188b5c2ef113d90281d03cd1b14e5ee52ebffb
2d2b15f13487673de33297e49f00ef94af743a9a
refs/heads/master
2023-08-01T11:03:26.870408
2023-03-27T00:00:00
2023-03-27T00:00:00
180,595,052
124
24
null
2022-12-27T14:54:09
2019-04-10T14:06:23
null
MacCentralEurope
R
false
false
2,220
r
QuickTimeTcl.r
/* * QuickTimeTcl.r -- * * This file creates resources used by the QuickTimeTcl package. * * Copyright (c) 1998 Jim Ingham and Bruce O'Neel * * $Id: QuickTimeTcl.r,v 1.1.1.1 2003/04/04 16:24:54 matben Exp $ */ #include <Types.r> #include <SysTypes.r> #define RESOURCE_INCLUDED #define RC_INVOKED #include "tcl.h" /* * The folowing include and defines help construct * the version string for Tcl. */ #define SCRIPT_MAJOR_VERSION 3 /* Major number */ #define SCRIPT_MINOR_VERSION 1 /* Minor number */ #define SCRIPT_RELEASE_SERIAL 0 /* Really minor number! */ #define RELEASE_LEVEL alpha /* alpha, beta, or final */ #define SCRIPT_VERSION "3.1" #define SCRIPT_PATCH_LEVEL "3.1" #define FINAL 0 /* Change to 1 if final version. */ #if FINAL # define MINOR_VERSION (SCRIPT_MINOR_VERSION * 16) + SCRIPT_RELEASE_SERIAL #else # define MINOR_VERSION SCRIPT_MINOR_VERSION * 16 #endif #define RELEASE_CODE 0x00 resource 'vers' (1) { SCRIPT_MAJOR_VERSION, MINOR_VERSION, RELEASE_LEVEL, 0x00, verUS, SCRIPT_PATCH_LEVEL, SCRIPT_PATCH_LEVEL ", © 1998 Bruce O'Neel, © 2000-2003 Mats Bengtsson" }; resource 'vers' (2) { SCRIPT_MAJOR_VERSION, MINOR_VERSION, RELEASE_LEVEL, 0x00, verUS, SCRIPT_PATCH_LEVEL, "QuickTimeTcl " SCRIPT_PATCH_LEVEL " © 1998-2003" }; /* * The -16397 string will be displayed by Finder when a user * tries to open the shared library. The string should * give the user a little detail about the library's capabilities * and enough information to install the library in the correct location. * A similar string should be placed in all shared libraries. */ resource 'STR ' (-16397, purgeable) { "QuickTimeTcl Library\n\n" "This library provides the ability to run QuickTime " " commands from Tcl/Tk programs. To work properly, it " "should be placed in the ‘Tool Command Language’ folder " "within the Extensions folder." }; /* * We now load the Tk library into the resource fork of the library. */ data 'TEXT' (4000, "pkgIndex", purgeable, preload) { "# Tcl package index file, version 1.0\n" "if {[info tclversion] != "TCL_VERSION"} return\n" "package ifneeded QuickTimeTcl 3.1 [list load [file join $dir QuickTimeTcl"TCL_VERSION".shlb]]\n" };
1fbefc9ec0c3243170d94c61cd5b0f300712d196
058e7ab0f39e470d82a9a2b78b2758d478f598a1
/R/otherTools.R
24d3692f6849dfa8764097d2c7d9ab356b16c9e8
[]
no_license
sanadamakomi/kfltCNV
2e0b9cd7d30adaa573572bbc176ecd74e2ea6620
b3a0acd18191db782de738e9c8b8851679a5967b
refs/heads/master
2020-04-16T22:57:47.166881
2019-07-29T08:01:37
2019-07-29T08:01:37
165,992,232
0
0
null
null
null
null
UTF-8
R
false
false
5,128
r
otherTools.R
#' @title Call gender from BAM file. #' @description Call gender by read depth of chromosome X and Y. Compare the #' chi-squared values obtained to infer whether the male or female assumption #' fits read depth better. #' @param bam A character string of BAM file path. #' @param lower A non-negative integer. Position of which coverage is lower than #' the integer will not be count. #' @return A character string, \emph{Unknow}, \emph{Female} or \emph{Male}. #' @export #' @import IRanges #' @import GenomicRanges #' @import GenomicAlignments #' @import GenomeInfoDb #' @import S4Vectors #' @importFrom stats chisq.test #' @importFrom Rsamtools idxstatsBam #' @importFrom Rsamtools ScanBamParam #' @author Zhan-Ni Chen #' @examples #' ####### Call gender from BAM file ####### #' callGenderByBam(system.file("extdata", 'testSample.bam', package ="kfltCNV")) callGenderByBam <- function(bam, lower = 20) { df <- idxstatsBam(bam) df <- df[1:24,] auto_chr <- df[which(df[,'mapped'] == max(df[,'mapped'])), 'seqnames'] auto_chr <- auto_chr[1] bamSeqinfo <- seqinfo(BamFile(bam)) seqn <- seqnames(bamSeqinfo) sexn <- seqn[sapply(c('X', 'Y'), grep, seqn)] which <- GRanges(Rle(c(auto_chr,sexn)), ranges = IRanges(start = c(1, 1, 1), end = seqlengths(bamSeqinfo)[c(auto_chr, sexn)]), seqinfo = bamSeqinfo) param <- ScanBamParam(which = which) aln <- readGAlignments(file = bam, index = bam, param = param) cov <- coverage(bam, param = param) df <- sapply(cov[c(auto_chr, sexn)], function(c, lower) { v <- Views(c, c >= lower) return(c(sum(width(v)), sum(sum(v)))) }, lower = lower ) ratio <- df[2,] / df[1,] ratio_a <- ratio[grep(auto_chr, names(ratio))] ratio_x <- ratio[grep('X', names(ratio))] ratio_y <- ratio[grep('Y', names(ratio))] if (is.na(ratio_x) & is.na(ratio_y)) return('Unknow') f_a_x <- chisq.test(c(ratio_a, ratio_x), p = c(2/4, 2/4))$p.value m_a_y <- chisq.test(c(ratio_a, ratio_y), p = c(2/3, 1/3))$p.value if (ratio_x == 0 & ratio_y == 0) return('Unknow') if (ratio_x == 0) { if (m_a_y > 5E-2) return('Male') return('Female') } else if (ratio_y == 0) { if (f_a_x > 5E-2) return('Female') return('Male') } else { if (f_a_x > m_a_y & f_a_x > 5E-2) return('Female') if (f_a_x < m_a_y & m_a_y > 5E-2) return('Male') return('Unknow') } } #' @title Call gender from coverage file. #' @description Call gender by read depth of chromosome X and Y. Compare the #' chi-squared values obtained to infer whether the male or female assumption #' fits read depth better. #' @param x A character string of coverage file (<fileName>.cov) path. #' @return A character string, \emph{Unknow}, \emph{Female} or \emph{Male}. #' @export #' @import IRanges #' @import GenomicRanges #' @import S4Vectors #' @importFrom stats chisq.test #' @author Zhan-Ni Chen #' @examples #' ####### Call gender from coverage file ####### #' callGenderByCov(system.file("extdata", 'testSample.cov', package = "kfltCNV")) callGenderByCov <- function (x) { gr <- readCovFile(x) seqn <- as.character(as.vector(runValue(seqnames(gr)))) sexn <- lapply(c('X', 'Y'), function(x) {seqn[grep(x, seqn)]}) sexn <- unlist(sexn) if (length(sexn) == 0) return('Unknow') sexgr <- split(gr, seqnames(gr)) sexgr <- sexgr[sexn] ratio <- sapply(sexgr, function(x) {mean(x$depth, na.rm = TRUE)}) ratio_a <- mean(gr$depth, na.rm = TRUE ) ratio_x <- 0 ratio_y <- 0 if (grepl('X', names(ratio))) ratio_x <- ratio[grep('X', names(ratio))] if (grepl('Y', names(ratio))) ratio_y <- ratio[grep('Y', names(ratio))] f_a_x <- chisq.test(c(ratio_a, ratio_x), p = c(2/4, 2/4))$p.value m_a_y <- chisq.test(c(ratio_a, ratio_y), p = c(2/3, 1/3))$p.value if (ratio_x == 0 & ratio_y == 0) return('Unknow') if (ratio_x == 0) { if (m_a_y > 5E-2) return('Male') return('Female') } else if (ratio_y == 0) { if (f_a_x > 5E-2) return('Female') return('Male') } else { if (f_a_x > m_a_y & f_a_x > 5E-2) return('Female') if (f_a_x < m_a_y & m_a_y > 5E-2) return('Male') return('Unknow') } } #' @title Check BAM file. #' @description It will stop if BAM file is illegal. #' @param x A character string or vector of BAM File path. #' @export #' @import IRanges #' @import GenomicRanges #' @import S4Vectors #' @importFrom Rsamtools BamFile #' @author Zhan-Ni Chen #' @examples #' ####### Check BAM file ####### #' checkBam(system.file("extdata", 'testSample.bam', package = "kfltCNV")) checkBam <- function(x) { a <- sapply(x, function(bam) { if (! file.exists(bam)) stop(paste0(bam, ' file is missing.')) bai <- BamFile(bam)$index if (is.na(bai)) stop(paste0(bam, '.bai index file is missing.')) bam_info <- file.info(bam) bai_info <- file.info(bai) dt <- difftime(bai_info$mtime, bam_info$mtime, units = 'secs') dt <- as.numeric(dt) if (dt < 0) stop(paste0(bam, ' index file is older than BAM file.')) }) }
f9198e81df3cf9cd52ee72249aee50aef4adc7a0
1a111b0a16d39f1387e189687b116698262688d1
/simulator.R
e0d2e5e333888b16d66ba69864e2669ffe27cfa1
[]
no_license
wisus/spatial_autoreg_code
e0a3afa92227f6ae8ffabeae09991a710bbb816e
746b647ef2696bf344760394a79abb35233080d8
refs/heads/master
2022-01-09T03:39:48.480132
2019-06-17T15:25:31
2019-06-17T15:25:31
null
0
0
null
null
null
null
UTF-8
R
false
false
8,308
r
simulator.R
library(RSpectra) ### Simulation: for bootstrap used in estimation ### format specify specify_decimal = function(x, k) gsub('\\s+','',format(round(x, k), nsmall=k)) ### format function for keeping 2 digits after # simu.loc<-function(N, cov.type = "exp") # { # X_loc = runif(N, 0, sqrt(N)) # Y_loc = runif(N, 0, sqrt(N)) # loc = cbind(X_loc, Y_loc) # if (cov.type=="quad") # loc = loc*sqrt(10)*2 # return(loc) # } ### for data generation # for simulating autocoefficients rho simu.rho <- function(beta, sig2, N, alpha, dist_loc2, cov.type = "exp") { if (cov.type=="exp") cov_X = cov.exp(dist_loc2, beta, sig2) else cov_X = cov.quad(dist_loc2, beta, sig2) #### simulate X eig_X = eigen(cov_X) sqrt_value = sqrt(eig_X$values) X = eig_X$vectors%*%(sqrt_value*rnorm(N, 0, 1)) # generate X following cov_X #rhos = 2*pnorm(X+alpha)-1 rhos = 2*exp(X+alpha)/(1+exp(X+alpha))-1 # generate rhos across logistic function return(rhos) } simu.mu <- function(beta, sig2, N, mu, dist_loc2, cov.type = "exp") { if (cov.type=="exp") cov_X = cov.exp(dist_loc2, beta, sig2) else cov_X = cov.quad(dist_loc2, beta, sig2) #### simulate X eig_X = eigen(cov_X) eig_X$values[eig_X$values<0] = 0 sqrt_value = sqrt(eig_X$values) mus = eig_X$vectors%*%(sqrt_value*rnorm(N, 0, 1)) + mu # generate X following cov_X return(mus) } # simulate Y_t matrix following autoregression model with spatial dependence simu.Y <- function(beta, sigy2, N, Time, rhos, dist_loc2, mu, cov.type = "exp") { #### simulate covariance of epsilon if (cov.type=="exp") cov_e = cov.exp(dist_loc2, beta, sigy2) else cov_e = cov.quad(dist_loc2, beta, sigy2) #### simulate epsilon eig_e = eigen(cov_e) sqrt_value = sqrt(eig_e$values) # eps_mat = sapply(1:Time, function(t){ # epsilon follows same distribution across time # eps0 = rnorm(N, 0, 1) # eps = eig_e$vectors%*%(sqrt_value*eps0) # }) eps_mat = eig_e$vectors%*%(sqrt_value*matrix(rnorm(N*Time, 0, 1), nrow = N)) Y = matrix(0, nrow = N, ncol = Time) Y[,1] = eps_mat[,1] for (t in 2:Time) { Y[,t] = Y[,t-1]*rhos + eps_mat[,t] + mu # generate Yt according to AMSD model } return(Y = Y) } ### for bootstrap estimation # for simulating autocoefficients rho N*R matrix simu.rho.rep <- function(beta, sig2, N, R, alpha, dist_loc2, cov.type = "exp") { if (cov.type=="exp") cov_X = cov.exp(dist_loc2, beta, sig2) else cov_X = cov.quad(dist_loc2, beta, sig2) #### simulate X eig_X = eigen(cov_X) sqrt_value = sqrt(eig_X$values) X = eig_X$vectors%*%(sqrt_value*matrix(rnorm(N*R, 0, 1), nrow = N)) # generate X following cov_X #rhos = 2*pnorm(X+alpha)-1 rhos = 2*exp(X+alpha)/(1+exp(X+alpha))-1 # generate rhos across logistic function return(as.vector(rhos)) } simu.mu.rep <- function(beta, sig2, N, R, mu, dist_loc2, cov.type = "exp") { if (cov.type=="exp") cov_X = cov.exp(dist_loc2, beta, sig2) else cov_X = cov.quad(dist_loc2, beta, sig2) #### simulate X eig_X = eigs_sym(cov_X, nrow(cov_X)-1) #eig_X = eigen(cov_X) eig_X$values[eig_X$values<0] = 0 sqrt_value = sqrt(eig_X$values) X = eig_X$vectors%*%(sqrt_value*matrix(rnorm(length(eig_X$values)*R, 0, 1), nrow = length(eig_X$values))) # generate X following cov_X #rhos = 2*pnorm(X+alpha)-1 mus = X + mu # generate rhos across logistic function return(as.vector(mus)) } simu.Y.rep <- function(beta, sigy2, N, R, Time, rhos, dist_loc2, mus, cov.type = "exp") { #### simulate covariance of epsilon if (cov.type=="exp") cov_e = cov.exp(dist_loc2, beta, sigy2) else cov_e = cov.quad(dist_loc2, beta, sigy2) #### simulate epsilon eig_e = eigen(cov_e) sqrt_value = sqrt(eig_e$values) eps_mat_list = lapply(1:R, function(r) eig_e$vectors%*%(sqrt_value*matrix(rnorm(N*Time, 0, 1), nrow = N))) eps_mat_R = do.call(rbind, eps_mat_list) #eps_mat = eig_e$vectors%*%(sqrt_value*matrix(rnorm(N*Time, 0, 1), nrow = N)) # eps_mat = sapply(1:Time, function(t){ # epsilon follows same distribution across time # eps0 = rnorm(N, 0, 1) # eps = eig_e$vectors%*%(sqrt_value*eps0) # }) Y = matrix(0, nrow = N*R, ncol = Time) Y[,1] = eps_mat_R[,1] for (t in 2:Time) { Y[,t] = Y[,t-1]*rhos + eps_mat_R[,t] + mus # generate Yt according to AMSD model } return(Y = Y) } ### Estimation functions # ## estimate rho parameters # # estimate theta_X # estThetaRho<-function(rhos, dist_loc2) # { # #X1 = qnorm((rhos+1)/2) # rhos[which(abs(rhos)>1)] = sign(rhos[which(abs(rhos)>1)])*rep(0.999, sum(abs(rhos)>1)) # y1 = (rhos+1)/2 # X1 = log(y1/(1-y1)) # alpha = mean(X1) # thetaX = lse.X(X1 - alpha, dist_loc2) # return(c(alpha, thetaX)) # } # # estThetaMu<-function(mus, dist_loc2) # { # #X1 = qnorm((rhos+1)/2) # mu = mean(mus) # thetaMu = lse.X(mus-mu, dist_loc2) # return(c(mu, thetaMu)) # } # # # estimate the parameters in rhos (theta_X) # lse.X<-function(Y, dist_loc2) # Y here is X-alpha, which is centered # { # sig_hat = tcrossprod(Y) # # theta = c(1, var(as.vector(Y))) # iter = 1; del = 1 # while(mean(abs(del))>10^-3&iter<1000) # { # #cat(mean(abs(del)), " ", theta, "\n") # del = lse.step(sig_hat, beta = theta[1], sigy2 = theta[2], dist_loc2) # theta = theta - del*0.5 # if (any(theta<0|theta>10)) # theta = runif(2, 0.1,10) # iter = iter+1 # } # return(abs(theta)) # since exponential covariance model is symmetric of beta, we restrict theta to be positive # } # # # estimate rho and mu by iterations # ### Estimation functions # # estRhoMu<-function(Y) # { # Time = ncol(Y) # rhosmu = apply(Y, 1, function(y){ # x = cbind(1, y[-Time]) # return(solve(crossprod(x))%*%crossprod(x, y[-1])) # estimate rho for each location # }) # #mu = mean(rhosmu[1,]) # return(rhosmu = rhosmu) # } # # ## estimate epsilon parameters # # filter to obtain residuals # filter<-function(Ymat, rhos,mu) # { # Time = ncol(Ymat) # #Ymat1 = cbind(mu, rhos, Ymat) # #eps = apply(Ymat1, 1, function(x) x[-(1:3)]-x[2]*x[-c(1,2,ncol(Ymat1))] - x[1]) # eps = Ymat[,-1] - rhos*Ymat[,-Time] - mu # return(eps) # } # # # estimate the parameters epsilon theta_e # lse.theta<-function(Y, dist_loc2, rhos, mu) # { # Time = ncol(Y) # Y0 = Y # Y = Y[,-1] - rhos*Y[,-Time] - mu # residuals # if (is.null(dim(Y))) # sig_hat = tcrossprod(Y) # else # sig_hat = tcrossprod(Y)/ncol(Y) # # theta = c(1, 0.5) # the initial value # iter = 1; del = 1 # while(mean(abs(del))>10^-3&iter<1000) # { # #cat(mean(abs(del)), " ", theta, "\n") # del = lse.step(sig_hat, beta = theta[1], sigy2 = theta[2], dist_loc2) # each step # theta = theta - del*0.5 # if (any(theta<0|theta>5)) # if the value is beyond this, it might not converge due to the initial values # theta = runif(2, 0.1,5) # iter = iter+1 # } # return(abs(theta)) # } # # # # for each newton-raphson iteration, the following function gives each step # # only works for exponential model # lse.step <- function(sig_hat, beta, sigy2, dist_loc2) # { # exp_loc = exp(-beta^2*dist_loc2) # sig = sigy2*exp_loc # sig_del = sig_hat - sig # sig_del2 = sig_hat - 2*sig # # ### first order derivative: gradient # grad_beta = 4*beta*sum(sig_del*sig*dist_loc2) # grad_sigy2 = -2*sum(sig_del*exp_loc) # grad_para = c(grad_beta, grad_sigy2) # # ### hessian matrix # hmat = matrix(0,2,2) # hmat[1,1] = (-8*sum(sig_del2*sig*beta^2*dist_loc2^2) + 4*sum(sig_del*sig*dist_loc2)) # hmat[1,2] = 4*beta*sum(sig_del2*exp_loc*dist_loc2) # hmat[2,1] = hmat[1,2] # hmat[2,2] = 2*sum(exp_loc^2) # if (any(!is.finite(hmat))) # return(grad_para) # eig_hmat = eigen(hmat) # if (any(eig_hmat$values<0)) # hmat = (eig_hmat$vectors)%*%(abs(eig_hmat$values)*t(eig_hmat$vectors)) # # del = solve(hmat)%*%grad_para #grad_beta/hmat # return(del) # } # # # to obtain the kriging surface # trans2mat<-function(Yt_pred, lattice_num) # { # Yt_pred_mat = t(matrix(Yt_pred, nrow = lattice_num)) # colnames(Yt_pred_mat) = round(seq(116, 117.1, length.out = lattice_num), 3) # rownames(Yt_pred_mat) = round(seq(39.52, 40.53, length.out = lattice_num), 3) # return(Yt_pred_mat) # }
679147bf1e12533d609da5ab94d78de15de672c7
1d4dc3bb6d4f8404c42778fa8e76f521dadc0b29
/demographic.R
a44cd41601fd925702fdbecbe3b85e377a1933b2
[]
no_license
lash1937/stochasticity_structure
79b97a0c2b98c923b4274b84e3318f36572c23ce
ce6d68b427c5aa12ef235154b5c2a6f446383cff
refs/heads/master
2020-05-23T18:07:51.972256
2019-05-25T14:38:24
2019-05-25T14:38:24
186,882,233
3
2
null
null
null
null
UTF-8
R
false
false
7,491
r
demographic.R
# code to create figure 3, # diving into the effects of demographic stochasticity on population and community structure # source the functions to run each model source("model_functions.R") # ---------------------------------------------------------------------------------------------------------- # single run population results (panel B) # time to run the model time <- 50 # density independent growth rates R1 <- 1.6 # speices 1 R2 <- 1.6 # species 2 # intraspecific competition coefficients alpha1 <- .02 alpha2 <- .1 # set up vector to hold results results1 <- results2 <- rep(NA, time) results1[1] <- results2[1] <- 20 # run model for (t in 1:(time-1)) { results1[t+1] <- do.pop.dem.BH(R=R1, alpha=alpha1, N=results1[t]) results2[t+1] <- do.pop.dem.BH(R=R2, alpha=alpha2, N=results2[t]) } # Create figure (panel B) quartz(width=5, height=5) par(mar=c(3,3,2,2)) plot(results2, col="darkgoldenrod2", type="l", lwd=2, lty=1, ylab="", yaxt="n", xaxt="n", xlab="", cex.axis=1.25, xlim=c(0, 50), ylim=c(0,50), main="") abline(v=40, lty=2, col="black") axis(side=1, at=c(0, 25, 50), labels=c(0, 25, 50), cex.axis=1.25) mtext("Time", side=1, line=2, outer=FALSE, col="black", cex=1.25) mtext("Abundance", side=2, line=2, outer=FALSE, col="black", cex=1.25) axis(side=2, at=c(0, 25, 50), labels=c(0, 25, 50), cex.axis=1.25) lines(results1, col="firebrick4", lwd=2) # ---------------------------------------------------------------------------------------------------------- # across runs population distributions (panel C) # time point to create the distribution t_star <- 40 # number of runs for creating the distributions runs <- 1000 # create matrix to hold results results1 <- results2 <- matrix(NA, nrow=runs, ncol=time) results1[,1] <- results2[,1] <- 20 # run model for (counter in 1:runs) { for (t in 1:(time-1)) { results1[counter,t+1] <- do.pop.dem.BH(R=R1, alpha=alpha1, N=results1[counter,t]) results2[counter,t+1] <- do.pop.dem.BH(R=R2, alpha=alpha2, N=results2[counter,t]) } } # extract abundance at time=t_star for each run dist_results1 <- results1[,t_star] dist_results2 <- results2[,t_star] # create distributions of expected diversity density_results1 <- density(dist_results1, from=0) density_results2 <- density(dist_results2, from=0) # Create figure (panel C) quartz(width=5, height=5) par(mar=c(3,3,2,2)) plot(density_results2, col="darkgoldenrod2", lwd=2, ylab="", lty=1, yaxt="n", xaxt="n", xlab="", cex.axis=1.25, xlim=c(0, 50), ylim=c(0,.4), main="") axis(side=1, at=c(0, 25, 50), labels=c(0, 25, 50), cex.axis=1.25) mtext("Abundance (time=t*)", side=1, line=2, outer=FALSE, col="black", cex=1.25) mtext("Probability", side=2, line=2, outer=FALSE, col="black", cex=1.25) axis(side=2, at=c(0, .2, .4), labels=c(0, .2, .4), cex.axis=1.25) lines(density_results1, col="firebrick4", lwd=2) # ---------------------------------------------------------------------------------------------------------- # single run community results (panel D) # number of species species <- 20 # set parameters for model run # parameters for the stabilizing case R_stabilizing <- runif(species, 2, 2.5) alphas_stabilizing <- matrix(runif(species*species, .002, .005), ncol=species, nrow=species) # set intraspecific competition (i.e. all of the diagonal elements) higher than interspecific competition diag(alphas_stabilizing) <- .03 # parameters for the neutral case R_neutral <- rep(mean(R_stabilizing), species) alphas_neutral <- matrix(rep(mean(alphas_stabilizing), species*species), ncol=species, nrow=species) # set up matrix to hold results results_stabilizing <- results_neutral <- matrix(NA, nrow=species, ncol=time) results_stabilizing[,1] <- results_neutral[,1] <- 20 # run model for (t in 1:(time-1)) { for (s in 1:species) { results_stabilizing[s,t+1] <- do.com.dem.BH(R=R_stabilizing[s], alphas=alphas_stabilizing[s,], N=results_stabilizing[s,t], Nall=results_stabilizing[,t]) results_neutral[s,t+1] <- do.com.dem.BH(R=R_neutral[s], alphas=alphas_neutral[s,], N=results_neutral[s,t], Nall=results_neutral[,t]) } } # determine diversity through time diversity_stabilizing <- diversity_neutral <- rep(NA, time) for (t in 1:time) { diversity_stabilizing[t] <- sum(results_stabilizing[,t] > 0) diversity_neutral[t] <- sum(results_neutral[,t] > 0) } # Create figure (panel D) quartz(width=5, height=5) par(mar=c(3,3,2,2)) plot(diversity_stabilizing, col="darkorchid3", type="l", lwd=2, lty=1, ylab="", yaxt="n", xaxt="n", xlab="", cex.axis=1.25, xlim=c(0, 50), ylim=c(0,20), main="") abline(v=40, lty=2, col="black") axis(side=1, at=c(0, 25, 50), labels=c(0, 25, 50), cex.axis=1.25) mtext("Time", side=1, line=2, outer=FALSE, col="black", cex=1.25) mtext("Diversity", side=2, line=2, outer=FALSE, col="black", cex=1.25) axis(side=2, at=c(0, 10, 20), labels=c(0, 10, 20), cex.axis=1.25) lines(diversity_neutral, col="plum2", lwd=2) # ---------------------------------------------------------------------------------------------------------- # distribution for community results (panel E) # set up arrays to hold results dist_stabilizing <- dist_neutral <- array(NA, c(runs, species, time)) dist_stabilizing[,,1] <- dist_neutral[,,1] <- 20 # model run for (counter in 1:runs) { # redraw parameters for each run R_stabilizing <- runif(species, 2, 2.5) alphas_stabilizing <- matrix(runif(species*species, .002, .005), ncol=species, nrow=species) diag(alphas_stabilizing) <- .03 R_neutral <- rep(mean(R_stabilizing), species) alphas_neutral <- matrix(rep(mean(alphas_stabilizing), species*species), ncol=species, nrow=species) for (t in 1:(time-1)) { for (s in 1:species) { dist_stabilizing[counter,s,t+1] <- do.com.dem.BH(R=R_stabilizing[s], alphas=alphas_stabilizing[s,], N=dist_stabilizing[counter,s,t], Nall=dist_stabilizing[counter,,t]) dist_neutral[counter,s,t+1] <- do.com.dem.BH(R=R_neutral[s], alphas=alphas_neutral[s,], N=dist_neutral[counter,s,t], Nall=dist_neutral[counter,,t]) } } } # extract results for t=t_star t_star_stabilizing <- dist_stabilizing[,,t_star] t_star_neutral <- dist_neutral[,,t_star] # determine diversity at t=t_star for each run diversity_stabilizing <- diversity_neutral <- rep(NA, runs) for(counter in 1:runs) { diversity_stabilizing[counter] <- sum(t_star_stabilizing[counter,]>0) diversity_neutral[counter] <- sum(t_star_neutral[counter,]>0) } # create distributions of expected diversity density_stabilizing <- density(diversity_stabilizing, from=0, adjust=1.5) density_neutral <- density(diversity_neutral, from=0, adjust=1.5) # Create figure (panel E) quartz(width=5, height=5) par(mar=c(3,3,2,2)) plot(density_stabilizing, col="darkorchid3", lwd=2, ylab="", lty=1, yaxt="n", xaxt="n", xlab="", cex.axis=1.25, xlim=c(0, 20), ylim=c(0,.3), main="") axis(side=1, at=c(0, 10, 20), labels=c(0, 10, 20), cex.axis=1.25) mtext("Diversity (time=t*)", side=1, line=2, outer=FALSE, col="black", cex=1.25) mtext("Probability", side=2, line=2, outer=FALSE, col="black", cex=1.25) axis(side=2, at=c(0, .15, .3), labels=c(0, .15, .3), cex.axis=1.25) lines(density_neutral, col="plum2", lwd=2)
9346641cc443de684cef3656918317bc9bb46a5e
f8955345f2129d214ae932a6199a2cd480fb8771
/cachematrix.R
8e73b5d028a28310726bbf39f91b6ba13aea52ea
[]
no_license
funnyletter/ProgrammingAssignment2
b44ee9e01c1ec6922c3180c55356ac07d0037fb4
4e33563e82bcbb199be0388e13f6aa8d54ec9757
refs/heads/master
2021-01-17T01:08:50.771131
2016-02-05T22:35:51
2016-02-05T22:35:51
50,875,795
0
0
null
2016-02-01T22:21:43
2016-02-01T22:21:43
null
UTF-8
R
false
false
1,669
r
cachematrix.R
## These functions create an object that can contain a matrix and also its inverse, so you can calculate the ## inverse once, cache it, and retrieve it again later. This only works on invertable matrices. If you give it ## a non-square matrix it will fail loudly. ## Makes a special matrix object that can store the matrix and its own inverse. It does not set the inverse ## until the inverse is asked for using cacheSolve. makeCacheMatrix <- function(x = matrix()) { # If there's already something cached, get rid of it myCache <- NULL # Set the value of the matrix, and since we're doing a new matrix, clear any cached inverse set <- function(y) { x <<- y myCache <<- NULL } # Retrieve the value of the matrix. So all this does is return x from when we made the matrix. get <- function () x #Set the inverse matrix setInverse <- function(theInverse) myCache <<- theInverse # Retrieve the inverse matrix getInverse <- function() myCache # Make a list to retrieve stuff from in other functions list(set = set, get = get, setInverse = setInverse, getInverse = getInverse) } ## This function takes an object made with makeCacheMatrix and returns the inverse from cache if it exists. ## If it does not exist, it calculates the inverse, caches it, and returns it. cacheSolve <- function(x, ...) { m <- x$getInverse() #Is there something cached? If so, return it. if(!is.null(m)) { message("Getting cached data...") return(m) } # If there's nothing cached, calculate the inverse, cache it, and return it. data <- x$get() m <- solve(data) x$setInverse(m) return(m) }
8bae8e6dd7a5100264281fa1b651be6eb6d8993f
83fec32b0e9f9f113f105271adb8d49e062d9909
/geologyGeometry/tutorials/4orientations/5ignoringSymmetry.R
b355e8a2a8e16baa34943de3d83033fe7e5307b0
[ "Apache-2.0" ]
permissive
nicolasmroberts/InternalStructureMtEdgar_PR2021
e0982fa25ad8247573649ff780381742ce461163
01ebb627aedb2b8f7dab6ce8f3bc53b545c6df4d
refs/heads/master
2023-03-17T14:18:24.883512
2021-03-16T19:59:57
2021-03-16T19:59:57
348,474,436
0
0
null
null
null
null
UTF-8
R
false
false
6,549
r
5ignoringSymmetry.R
# Copyright 2017 Joshua R. Davis # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ### INTRODUCTION ### # In the preceding tutorial we learned the 'right' way to compute the mean and # dispersion of an orientational data set. But tools for orientations are so # scarce that we must sometimes resort to a less-than-ideal way. ### THE WRONG WRONG WAY ### # Load a set foliation-lineation orientations from the western Idaho shear # zone (Giorgis and Tikoff, 2004). Also compute and illustrate the Frechet # mean. Because of the 4-fold symmetry, the everything appears four times in a # rotation plot. wiszData <- geoDataFromFile("data/wiszFollins.tsv") wiszMeanVar <- oriMeanVariance(wiszData$rotation, group=oriLineInPlaneGroup) wiszCurves <- lapply( oriNearestRepresentatives(wiszData$rotation, wiszMeanVar$mean, group=oriLineInPlaneGroup), rotGeodesicPoints, wiszMeanVar$mean) oriEqualAnglePlot(wiszData$rotation, curves=wiszCurves, group=oriLineInPlaneGroup) # What if we weren't being careful, and we treated our orientations as mere # rotations? First, here's the plot. The data set looks bimodal, even though # it's unimodal. rotEqualAnglePlot(wiszData$rotation) # Here's the rotational mean as minimizing the size of a spider in that plot. wiszMeanVarRot <- rotMeanVariance(wiszData$rotation) wiszCurvesRot <- lapply(wiszData$rotation, rotGeodesicPoints, wiszMeanVarRot$mean) rotEqualAnglePlot(wiszData$rotation, curves=wiszCurvesRot) # The spider looks much bigger than in the orientational treatment. The # variances confirm it. wiszMeanVar$variance wiszMeanVarRot$variance ### THE RIGHT WRONG WAY ### # Remembering now that we should be dealing with orientations, let's view all # four symmetric copies of that spider. This plot reveals the problem: By # ignoring symmetry, the rotation-only treatment 'crosses between symmetric # copies' of the data when it shouldn't. oriEqualAnglePlot(wiszData$rotation, curves=wiszCurvesRot, group=oriLineInPlaneGroup) # In this example, we can take another tack: At the start, ensure that all of # the orientations are represented by rotations in a single symmetric copy. # Then treat them as rotations. wiszRots <- oriNearestRepresentatives(wiszData$rotation, wiszData$rotation[[1]], group=oriLineInPlaneGroup) wiszMeanVarOkay <- rotMeanVariance(wiszRots) wiszCurvesOkay <- lapply( wiszRots, rotGeodesicPoints, wiszMeanVarOkay$mean) rotEqualAnglePlot(wiszRots, curves=wiszCurvesOkay) # When we symmetrize that plot, we get the same plot as we did in the true, # orientational treatment. oriEqualAnglePlot(wiszData$rotation, curves=wiszCurvesOkay, group=oriLineInPlaneGroup) # And the variances confirm that we're getting the right answer. wiszMeanVar$variance wiszMeanVarOkay$variance # The lesson here is: If your data are tightly concentrated enough, then # isolating the symmetric copies is not difficult, and working within one # symmetric copy often yields good results. # This strategy is common in electron backscatter diffraction (EBSD), for # example. Let's take another look at the Moine thrust intra-grain quartz # orientations of (Strine and Wojtal, 2004; Michels et al., 2015). Six tight, # beautiful symmetric copies. michelsData <- geoDataFromFile("data/moine_one_grainABCxyz.tsv") oriEqualVolumePlot(michelsData$rotation, group=oriTrigonalTrapezohedralGroup, simplePoints=TRUE) # But structural orientations are often so dispersed that we can't easily # discern the symmetric copies. Here are some faults with slip from Cyprus # (Davis and Titus, 2017). Where are the two symmetric copies? slickData <- geoDataFromFile("data/cyprusSlicks2008005.tsv") oriEqualVolumePlot(slickData$rotation, group=oriRayInPlaneGroup) # So the other part of the lesson is: Structural geologists cannot always # ignore symmetry in orientational data. Our default approach should be one # that handles symmetry. Only sometimes can we cheat on the symmetry. ### SO LET'S CHEAT THEN ### # Let's return to our western Idaho shear zone foliation-lineation data set. oriEqualVolumePlot(wiszRots, group=oriLineInPlaneGroup) # Remember that we've pre-processed the data to choose representative # rotations lying in one symmetric copy. So intrinsic methods will give the # same results for these rotations as for the corresponding orientations. rotEqualVolumePlot(wiszRots) # Maximum likelihood estimation (MLE) of the matrix Fisher distribution # parameters is a method for rotations. And it's not intrinsic, so we're not # allowed to use it on orientations. But these orientations are concentrated # enough that treating them as rotations is approximately okay. So let's do # the MLE. Among other things, it gives a concentration matrix (kHat) whose # eigenvalues quantify the dispersion in the data set. wiszFisher <- rotFisherMLE(wiszRots) eigen(wiszFisher$kHat, symmetric=TRUE)$values # In the next tutorial we will learn why this specific calculation is useful. ### SOMETHING THAT RESEMBLES CHEATING ### # When your data are tightly concentrated, you can approximate them as points # in the tangent space at the mean. Then principal component analysis (PCA) in # that tangent space gives you yet another measure of anisotropic dispersion. wiszPCA <- rotLeftPrincipalComponentAnalysis(wiszRots, wiszMeanVarOkay$mean, numPoints=5) wiszPCA$magnitudes rotEqualAnglePlot(wiszRots, curves=wiszPCA$curves, simplePoints=TRUE) # Theoretical aside: We're using rotation methods, but this PCA concept is # actually intrinsic to the geometry of the space of orientations, so it is a # legitimate orientation technique. We're not actually cheating. # Anyway, here is the symmetrized version of that last plot. oriEqualAnglePlot(wiszRots, group=oriLineInPlaneGroup, curves=wiszPCA$curves, simplePoints=TRUE) ### CONCLUSION ### # For tightly concentrated orientation data, intrinsic orientation methods # work exactly as intrinsic rotation methods do. Sometimes we cheat and use # non-intrinsic rotation methods on orientations too.
188db04b5ff1b36d2cd0f5d546c70e2495898b43
4ce576791c7b3bc0154889143ea4f390417b6411
/TextMiningfilesort_rscript/rhadoop_test_glm_20170105.R
16a4839e947139896884f1b39ec7e558d78ced3e
[]
no_license
wagaman/MachineLearning
847b3b83cc986b26499f31335363d21727e2711b
1fac4f973a75d0207864087dfd01be684d79dc8e
refs/heads/master
2021-01-25T12:49:46.417404
2017-12-21T12:30:45
2017-12-21T12:30:45
null
0
0
null
null
null
null
UTF-8
R
false
false
1,494
r
rhadoop_test_glm_20170105.R
###test1——glm论文测试,理解glm的概念和应用 install.packages("faraway") library(faraway) #载入数据集 data(pima,package="faraway") #pima 数据集如下 #提取出数据集的 # test 变量 b<-factor(pima$test) #test 变量值的分布范围为 # 0-1 分布 #构建模型,汇总信息存入变量 m m<-glm(b~diastolic+bmi,family=binomial,data=pima) #q 其中变量 # diastolic 表示 # 舒张血压,变量 b # mi 表示身体重量指数。 #汇总模型信息 >summary(m) #结果如下: # 由上图可知,d # iastolic 的值为 # 0.805, # bmi 的值为 # 1.95e-14,接近为 # 0,所以只有变 # 量 b # mi 是显著的,对我们的模型有重要的影响,简化模型如下: m.reduce<-glm(b~bmi,family=binomial,data=pima) # 简化后的模型依然可以用 s # ummary 查看汇总信息: summary(m.reduce) # 然后,用这个模型来计算一个中等体重指数(例如 B # MI 的值为 # 35)的人糖尿病检 # 查为阳性的概率。 newdata<-data.frame(bmi=35) # #调用 # predict 函数 predict(m.reduce,type="response",newdata=newdata) # 返回结果如下: ###test2——glm测试用例 utils::data(anorexia, package = "MASS") anorex.1 <- glm(Postwt ~ Prewt + Treat + offset(Prewt), family = gaussian, data = anorexia) summary(anorex.1) anorexia # Treat Prewt Postwt # 1 Cont 80.7 80.2 # 2 Cont 89.4 80.1 # 3 Cont 91.8 86.4 # 4 Cont 74.0 86.3 anorex.1
f1d2649e84c4fe38bb1c32570ea4401a8c029db9
8a723b30751071eb6f7084523e961ed0efd1b11e
/LTEE_mutations/CellMorphologyMutations.R
7d364efd8c107bb36bd8cfeb717f2b8e5f2df5b7
[]
no_license
NkrumahG/LTEE-cell-size-shape
1aa3e61e8b8999e534960838a855bbbc1bf80394
d20572359e904f9b13db1e0bf3890036f828035a
refs/heads/main
2023-06-16T08:39:27.110569
2021-07-14T17:36:20
2021-07-14T17:36:20
385,993,206
0
0
null
null
null
null
UTF-8
R
false
false
3,166
r
CellMorphologyMutations.R
rm(list=ls()) library(dplyr) library(ggplot2) library(tidyverse) library(grid) library(reshape2) dirname(rstudioapi::getActiveDocumentContext()$path) # Finds the directory where this script is located setwd(dirname(rstudioapi::getActiveDocumentContext()$path)) # Sets the working directory to where the script is located getwd() PrimaryDirectory <- getwd() PrimaryDirectory ShinyData <- list.files(pattern = "*.csv") Data <- as.data.frame(do.call(rbind,Map('cbind', lapply(ShinyData, read.csv)))) %>% select(-starts_with("html"), -gene_name, -snp_type, -X) RodMain <- Data %>% filter(gene_list %in% c("mreB", "mreC", "mreD", "mrdA", "mrdB")) #View(RodMain) #Arc <- Data %>% filter(grepl(pattern="arc", tolower(gene_list))) #related to anaerobic project CloneA <- RodMain %>% filter(clone == "A") #NonMutators <- CloneA %>% filter(mutator_status == "IS-mutator"| mutator_status == "non-mutator" ) ##by_population <- CloneA %>% group_by(population) %>% arrange(population) CloneA$population <- factor(CloneA$population, c("Ara-1","Ara-2","Ara-3","Ara-4","Ara-5","Ara-6","Ara+1","Ara+2","Ara+3","Ara+4","Ara+5","Ara+6")) #Using true minus symbol levels(CloneA$population) <- c("Ara−1","Ara−2","Ara−3","Ara−4","Ara−5","Ara−6","Ara+1","Ara+2","Ara+3","Ara+4","Ara+5","Ara+6") # Manual levels gene_list_table <- table(CloneA$gene_list) gene_list_levels <- names(gene_list_table)[order(gene_list_table)] CloneA$gene_list_2 <- factor(CloneA$gene_list, levels = gene_list_levels) #gene_list_table <- table(NonMutators$gene_list) #NonMutators$gene_list_2 <- factor(NonMutators$gene_list, levels = gene_list_levels) #Change labels for facet CloneA$time <- factor(CloneA$time) levels(CloneA$time) <- c("Generation 2k", "Generation 10k", "Generation 50k") #7.85 x 5.35 ggplot(CloneA, aes(x=population, y=gene_list, height = .5, width = 1)) + geom_tile(aes(fill = mutation_category)) + scale_x_discrete(drop = F)+ facet_grid (~time) + theme_bw() + scale_fill_discrete(name = "Mutation", labels = c("Indel","Nonsynonymous", "Synonymous")) + theme(axis.text.x = element_text(angle = 90, vjust = 0.5)) + theme(axis.title.x = element_blank(),axis.title.y = element_blank()) + theme(legend.justification=c(0,1), legend.position=c(0.005,.995)) + theme(axis.text.y = element_text(face = "italic", colour = "black", size = 12, margin = (margin(l = 5, r=5)))) + theme(axis.title.y = element_blank()) + theme(axis.ticks = element_blank())+ theme(legend.position = "bottom", legend.margin = margin (10,0,0,100), legend.box.margin = margin(-20,0,0,10))+ theme(axis.text.x = element_text(colour = "black", size = 12, margin = (margin(t = 5, b=5)))) + theme(axis.title.x = element_text(size = 12)) + theme(legend.text = element_text(size = 12)) + theme(legend.title = element_text(size = 12)) + theme(strip.text = element_text(size=12)) + coord_fixed(ratio=1.5) #binomial tests. Treating as two families, Mre and Mrd. # binom.test(5,8,p = 0.40, alternative = "g") #p=0.1737 # binom.test(6,12,p=0.40, alternative = "g") #p=0.3348 # binom.test(8,20,p=0.40, alternative = "g") #p=0.5841
b76ebdde29e1bbe37be7ef64fb323e2086e6ef7f
960d8eee1a4b3b968cd044b7c8cb3a27e9ca3757
/jeerthiliza/mousehuman/analysis/analysis.R
c347b3ac889bafcee129f6103549b0485bdff765
[]
no_license
shouguog/hematopoiesis
db4c5238a721724730fd8ce9cb9b27b8ba18c35d
1c80665bb1ff1a63a18e97d21e5ba9786bc75d8b
refs/heads/master
2023-01-14T09:29:47.941646
2023-01-07T17:52:53
2023-01-07T17:52:53
171,172,140
0
1
null
null
null
null
UTF-8
R
false
false
2,195
r
analysis.R
setwd("/data/gaos2/tmp/mousehuman/analysis") rm(list=ls()) library(Seurat) load("humanmouse.RData") human@meta.data<-human@meta.data[,c(1:3,8)] #keep only necessary column mouse@meta.data<-mouse@meta.data[,c(1:3,13)] #keep only necessary column # Set up human object human@meta.data$speice <- "human" human <- NormalizeData(human) # Set up mouse object mouse@meta.data$speice <- "mouse" mouse <- NormalizeData(mouse) # Gene selection for input to CCA human <- FindVariableGenes(human, do.plot = F) mouse <- FindVariableGenes(mouse, do.plot = F) g.1 <- head(rownames(human@hvg.info), 1000) g.2 <- head(rownames(mouse@hvg.info), 1000) genes.use <- unique(c(g.1, g.2)) human <- ScaleData(human, display.progress = F, genes.use = genes.use) mouse <- ScaleData(mouse, display.progress = F, genes.use = genes.use) speices.combined <- RunCCA(human, mouse, genes.use = genes.use, num.cc = 30) # visualize results of CCA plot CC1 versus CC2 and look at a violin plot p1 <- DimPlot(object = speices.combined, reduction.use = "cca", group.by = "speice", pt.size = 0.5, do.return = TRUE) p2 <- VlnPlot(object = speices.combined, features.plot = "CC1", group.by = "speice", do.return = TRUE) plot_grid(p1, p2) speices.combined <- AlignSubspace(speices.combined, reduction.type = "cca", grouping.var = "speice",dims.align = 1:20) p1 <- VlnPlot(object = speices.combined, features.plot = "ACC1", group.by = "speice", do.return = TRUE) p2 <- VlnPlot(object = speices.combined, features.plot = "ACC2", group.by = "speice", do.return = TRUE) plot_grid(p1, p2) rm(human.data, mouse.data) save(list=ls(), file="humanmouse.aligned.RData") # t-SNE and Clustering speices.combined <- RunTSNE(speices.combined, reduction.use = "cca.aligned", dims.use = 1:20, do.fast = T) speices.combined <- FindClusters(speices.combined, reduction.type = "cca.aligned", resolution = 0.6, dims.use = 1:20) # Visualization p1 <- TSNEPlot(speices.combined, do.return = T, pt.size = 0.5, group.by = "speice") p2 <- TSNEPlot(speices.combined, do.label = T, do.return = T, pt.size = 0.5) png("tSNE_group_mouse_human.png", width=2000, height=1000, res=100) plot_grid(p1, p2) dev.off() save(list=ls(), file="humanmouse.aligned.2.RData")
8b0c062b1faa7692ff73390b1755daaff263a4f8
41648c813bb2dec678ba5b82a8a3a2dcee45cbaf
/R/test.r
1b4b687ea0661c9dd7d8b5bf60beb8ba2790a777
[]
no_license
GokulGeo/TaalStarPlot
ba8efeeb94ebe1e5f959efca63a59b2a2194fd2e
a48d3f1cc701f31fb473b7cc084f5c27bd828b24
refs/heads/master
2022-12-18T18:08:34.523244
2020-09-27T08:34:53
2020-09-27T08:34:53
298,985,730
0
0
null
null
null
null
UTF-8
R
false
false
1,543
r
test.r
library(tidyverse) library(readxl) library(scales) data <-read_excel("~/Desktop/Sentinel-1 Metadata info.xls") centroid <- slice(data,1) data %>% ggplot(aes(`Date of acquisition`, `Bperp(m)`)) + geom_point() + geom_segment(aes(x = centroid$`Date of acquisition`, y = centroid$`Bperp(m)`, xend = `Date of acquisition`, yend = `Bperp(m)`)) + theme_minimal() my_data_compressed <-data reconstruct <- structure(list(`Date of acquisition` = structure(c(1580256000, 1581292800, 1582329600, 1579219200, 1577664000, 1575590400, 1576627200, 1578700800, 1579737600, 1581811200, 1580774400, 1582848000), class = c("POSIXct", "POSIXt"), tzone = "UTC"), `Bperp(m)` = c(0, -23.22, 15.03, 8.85, -26.13, 7.35, -31.04, 19.4, 12.44, -25.21, -6.45, 70.35)), row.names = c(NA, -12L), class = c("tbl_df", "tbl", "data.frame"))
59e09618657ad32a27b351b6ca532c834432d890
6e32987e92e9074939fea0d76f103b6a29df7f1f
/googleidentitytoolkitv2.auto/man/GoogleIamV1TestIamPermissionsResponse.Rd
ef52b35a2d75dfd893e1ed1da24e9f69e65744c0
[]
no_license
justinjm/autoGoogleAPI
a8158acd9d5fa33eeafd9150079f66e7ae5f0668
6a26a543271916329606e5dbd42d11d8a1602aca
refs/heads/master
2023-09-03T02:00:51.433755
2023-08-09T21:29:35
2023-08-09T21:29:35
183,957,898
1
0
null
null
null
null
UTF-8
R
false
true
695
rd
GoogleIamV1TestIamPermissionsResponse.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/identitytoolkit_objects.R \name{GoogleIamV1TestIamPermissionsResponse} \alias{GoogleIamV1TestIamPermissionsResponse} \title{GoogleIamV1TestIamPermissionsResponse Object} \usage{ GoogleIamV1TestIamPermissionsResponse(permissions = NULL) } \arguments{ \item{permissions}{A subset of `TestPermissionsRequest} } \value{ GoogleIamV1TestIamPermissionsResponse object } \description{ GoogleIamV1TestIamPermissionsResponse Object } \details{ Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}} Response message for \code{TestIamPermissions} method. } \concept{GoogleIamV1TestIamPermissionsResponse functions}
a9724224b5e8af792174a7da586832a766e945fe
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/QuantumClone/examples/multiplot_trees.Rd.R
da9d3269e7b1f693085f0f175318acc803fd6956
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
215
r
multiplot_trees.Rd.R
library(QuantumClone) ### Name: multiplot_trees ### Title: Plots multiple trees ### Aliases: multiplot_trees ### Keywords: Clonal inference phylogeny ### ** Examples multiplot_trees(QuantumClone::Tree, d= 4)
5d4d3ecf6d13413605bcbb9bdaff09fcf69fd2c1
514ff5c8a94ba290d9de186149271838275f728d
/man/geneCount.Rd
2b84fd0bec93e7e08edf97e51526e6df1fe5b295
[]
no_license
DingailuM/ribiosNGS
eac90a15346db52aebdc3298e950e4cb26199f66
d2d69c044d89a3ccbcb05becf9b33901681e4694
refs/heads/master
2022-11-30T09:52:08.561350
2020-08-04T11:07:22
2020-08-04T11:07:22
284,919,686
0
0
null
2020-08-04T10:57:26
2020-08-04T08:18:28
null
UTF-8
R
false
true
291
rd
geneCount.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/edgeR-funcs.R \name{geneCount} \alias{geneCount} \title{Return gene count} \usage{ geneCount(edgeResult) } \arguments{ \item{edgeResult}{An EdgeResult object} } \value{ Integer } \description{ Return gene count }
8235c63e36e4edaa3687fb2413e4d8a5d2636b7f
0d2190a6efddb7167dee3569820724bfeed0e89c
/R3.0.2 Package Creation/PBTools/R/QTLDataPrep.R
922a83d3d325b0d52270e510f08f8a6da82b45ed
[]
no_license
djnpisano/RScriptLibrary
6e186f33458396aba9f4151bfee0a4517d233ae6
09ae2ac1824dfeeca8cdea62130f3c6d30cb492a
refs/heads/master
2020-12-27T10:02:05.719000
2015-05-19T08:34:19
2015-05-19T08:34:19
null
0
0
null
null
null
null
UTF-8
R
false
false
4,924
r
QTLDataPrep.R
#################################################################### # QTLDataPrep #' Function for preparing data file(s) for QTL analysis # # Parameters: #' @param P_data name of phenotypic data set (R data format) #' @param G_data genotypic data set #' @param M_data map data set #' @param P_geno name of genotype variable in P_data #' #' @return list with the elements G_diffGid, P_diffGid, M_diffMid, G_diffMid, isNewPhenoCreated, isNewMapCreated, isNewGenoCreated # where: # G_diffGid - list of genotypes in G_data w/c are not in P_data # P_diffGid - list of genotypes in P_data w/c are not in G_data # M_diffMid - list of markers in M_data w/c are not in G_data # G_diffMid - list of markers in G_data w/c are not in M_data # isNewPhenoCreated - logical; whether a new phenotype file is created # isNewMapCreated - logical; whether a new map file is created # isNewGenoCreated - logical; whether a new genotype file is created # #' @author Author: Rose Imee Zhella Morantte #------------------------------------------------- QTLDataPrep <- function(P_data, G_data, M_data, P_geno) UseMethod("QTLDataPrep") QTLDataPrep.default <- function(P_data, G_data, M_data, P_geno) { #trim the strings of genotype and Markers ID P_data[,match(P_geno, names(P_data))] <- trimStrings(as.matrix(P_data[match(P_geno, names(P_data))])) G_data[,1] <- trimStrings(G_data[,1]) M_data[,1] <- trimStrings(M_data[,1]) ################################### #P_data vs G_data #get genotype and marker "variable" in the data sets colnames(G_data)[1] <- P_geno P_gid <- unique(P_data[,match(P_geno,names(P_data))]) M_mid <- M_data[1] colnames(M_mid) <- c("1") G_mid <- colnames(G_data)[-1] #G_dataMat[1,] G_midt <- as.data.frame(G_mid) #t(G_dataMat)[,1]) rownames(G_midt) <- NULL G_gidt <- data.frame(G_data[,1]) #G_gidt <-data.frame(I(G_data[,1]))# G_gidt <- as.data.frame(t(G_dataMat)[1,]) colnames(G_gidt) <- P_geno #"G.id" #replaced "V1" ##check if there are genotypes in G_data w/c are not in P_data; for displaying (if any) G_diffGid <- as.character(as.matrix(setdiff(paste(G_gidt[,1]), P_data[,match(P_geno, names(P_data))]))) G_diffGidNoSpace<-G_diffGid[which(G_diffGid!="")] ##check if there are genotypes in P_data w/c are not in G_data; for displaying (if any) P_diffGid <- as.character(as.matrix(setdiff(P_data[,match(P_geno, names(P_data))], paste(G_gidt[,1])))) P_diffGidNoSpace <- P_diffGid[which(P_diffGid!="")] ##reduce (if needed) P_data, sort genotypes as in G_data P_dataRed <- merge(P_data, G_gidt, by = P_geno, sort = FALSE) ##reduce (if needed) G_data G_dataRed <- merge(G_data, P_gid, by = P_geno, sort = FALSE) isNewPhenoCreated<-FALSE if (length(P_diffGidNoSpace)!=0) { ##save new P_data as csv file write.table(P_dataRed,file=paste(getwd(),"/newPhenoData.csv", sep=""), quote = FALSE, sep = ",", row.names = FALSE, col.names=TRUE) isNewPhenoCreated<-TRUE } ################################### #G_data vs M_data colnames(M_data) <- c("V1","V2_1","V3_1") G_datat <- as.data.frame(t(G_dataRed)) # t(G_dataRed) ###no -1 row? G_datat <- cbind(G_datat, rownames(G_datat)) ncolGdatat <- dim(G_datat)[2] colnames(G_datat)[ncolGdatat] <- "mID" ##check if there are markers in M_data w/c are not in G_data; for displaying (if any) M_diffMid <- as.character(as.matrix(setdiff(M_data[,1], G_datat[,"mID"]))) M_diffMidNoSpace<-M_diffMid[which(M_diffMid!="")] #reduce, if needed, M_data M_dataRed <- merge(M_data, G_midt, by.x = "V1", by.y = names(G_midt)[1], sort = FALSE) ##check if there are markers in G_data w/c are not in M_data; for displaying (if any) G_diffMid<-as.character(as.matrix(setdiff(G_datat[-1,"mID"], M_data[,1]))) G_diffMidNoSpace<-G_diffMid[which(G_diffMid!="")] #reduce G_data G_dataRed <- merge(M_mid, G_datat[-1,], by.x = "1", by.y = "mID", sort = FALSE) G_dataRed2 <- as.data.frame(t(G_dataRed)) rownames(G_dataRed2)[1] <- P_geno rownames(G_dataRed2)[2:ncolGdatat] <- t(G_datat[1,c(1:ncolGdatat-1)]) isNewMapCreated<-FALSE if (length(M_diffMidNoSpace)!=0) { ##save new M_data write.table(M_dataRed,file=paste(getwd(), "/newMapData.txt", sep=""), quote = FALSE, sep = "\t", row.names = FALSE, col.names=FALSE) isNewMapCreated<-TRUE } isNewGenoCreated<-FALSE if (length(G_diffMidNoSpace)!=0 || length(G_diffGidNoSpace)!=0) { #save new G_data write.table(G_dataRed2,file=paste(getwd(), "/newGenoData.txt", sep=""), quote = FALSE, sep = "\t", row.names = FALSE, col.names=FALSE) isNewGenoCreated<-TRUE } return(list(G_diffGid = G_diffGid, P_diffGid = P_diffGid, M_diffMid = M_diffMid, G_diffMid = G_diffMid, isNewPhenoCreated =isNewPhenoCreated, isNewMapCreated =isNewMapCreated, isNewGenoCreated =isNewGenoCreated)) }
428f64de6be29030fa41504bbbe37be14c1186bf
08c6e8b8087244561878460b2b1abe8ecc83bd2c
/herrstein RL smoothed.R
c277860b078c3d964b6e6f094fb256441e0f7d30
[]
no_license
KasiaO/SemanticsGames
d298169bc5c047a4da06baad52c9ecc7016d852d
7d3aa33ee5cc90994dd5c586da6c2ec3869644fa
refs/heads/master
2021-01-12T03:00:46.048017
2017-08-28T17:30:28
2017-08-28T17:30:28
78,149,456
0
0
null
null
null
null
UTF-8
R
false
false
3,087
r
herrstein RL smoothed.R
# source main methods and functions source('base.R') ##### ## learning agents - basic Herrnstein RL, smoothed memory ##### hrlLearner <- setRefClass( "hrlLearner", fields = list( urns = "list", delta = "numeric" ), contains = "Agent", methods = list( initUrns = function(figures, dict) { urns <- list() for(i in 1:length(figures)) { # initialize equal propensities # figure is stored in the first position of the list (cannot be a name) urns[[i]] <- c(figures[[i]], rep(1/length(dict), length = length(dict))) names(urns[[i]]) <- c("figure", dict) } return(urns) }, findUrn = function(figure) { for(i in 1:length(urns)) { if(identical(urns[[i]][[1]], figure)) { return(i) } } }, updateUrns = function(figure, communicate, point) { # no penalty urns[[findUrn(figure)]][[communicate]] <<- ({ urns[[findUrn(figure)]][[communicate]]*(1-delta) + delta*point }) }, updateSplit = function(figure, communicate, point) { updateUrns(figure, communicate, point) split <- list() for(i in 1:length(urns)) { urn <- unlist(urns[[i]][-1]) figure <- urns[[i]][[1]] probs <- urn/sum(urn) drawn <- sample(x = dict, size = 1, prob = probs)[[1]] split[[drawn]] <- c(split[[drawn]], figure) } return(split) } ) ) # override setEnvironment setEnvironment <- function(figDims, dict) { # input: # figDims - list - values for each dimension of the figure description (col, size, shape) # check configuration stopifnot(c("color", "size", "shape") %in% names(figDims)) # initialize figures combs <- expand.grid(figDims$color, figDims$size, figDims$shape) colnames(combs) <- c("color", "size", "shape") figures <- c() for(i in 1:nrow(combs)) { set <- combs[i,] newFig <- Figure$new(color = set$color, size = set$size, shape = set$shape) figures <- c(figures, newFig) } # initizalize agents player1 <- hrlLearner$new( split = list(), score = 0, urns = list(), delta = 0.7 ) player1$split <- player1$makeSplit(figures, dict) player1$urns <- player1$initUrns(figures, dict) player2 <- hrlLearner$new( split = list(), score = 0, urns = list(), delta = 0.4 ) player2$split <- player2$makeSplit(figures, dict) player2$urns <- player2$initUrns(figures, dict) env <- list() env$figures <- figures env$player1 <- player1 env$player2 <- player2 env$dict <- dict return(env) } ##### ## run experiment ##### figDims <- list( "color" = c("white", "red"), "size" = c("small", "big"), "shape" = c("square", "triangle") ) dict <- c("A", "B") res <- playGame(500, figDims, dict, 1) plotRes(res) ###### ## run simulation ###### sim <- runSimulation(10, 500, figDims, dict, 0) plotRes(sim)
5bbdfa06a85ef130b96d565aba8ad5dc4b900ea0
a978ee2ce4d399cdd35a9407a04ce874d674505a
/R/DataPrep.R
a108192a29fcdbf50ee28440083477cb7c2c07a6
[]
no_license
shambam/cellexalvrR
1f1f8c5ed3653fa3105405d6d3ff3194c22f548c
cbfd62b99a52fcbf91033478f1a91ffd5289b785
refs/heads/master
2023-04-06T23:21:06.365196
2018-05-03T14:26:56
2018-05-03T14:26:56
95,549,018
3
1
null
2021-06-16T11:48:04
2017-06-27T10:57:58
R
UTF-8
R
false
false
635
r
DataPrep.R
#'Creates a meta cell matrix from a supplied dataframe from required fields #'@param metad A dataframe of per cell metadata #'@param rq.fields A vector of name specifiying which columns should me made into metadata #'@keywords metadata cell #'@export make.cell.meta.from.df make.cell.meta.from.df <- function(metad,rq.fields){ meta4cellexalvr <- NULL for(i in 1:length(rq.fields)){ tmp.met <- to.matrix(metad[,rq.fields[i]],unique(metad[,rq.fields[i]]) ) colnames(tmp.met) <- paste(rq.fields[i],colnames(tmp.met),sep=".") meta4cellexalvr <- cbind(meta4cellexalvr,tmp.met) } meta4cellexalvr }
ba1dce12151e97a4edd5ac98b35da95de2f2aa98
e24c2715cee33c751b6cb325ef8692c037a94846
/R/prostate.R
e84c6b1817b26457be19f98606b4b47f01870267
[]
no_license
cran/MultNonParam
993e9551b760fcb593a1c25f1bfefc684154bf2e
71f6fb719bb000f3ae1dc4ac6e16f4b4a16bdefe
refs/heads/master
2022-12-07T12:08:00.225538
2022-11-30T17:20:07
2022-11-30T17:20:07
23,304,602
0
0
null
null
null
null
UTF-8
R
false
false
1,052
r
prostate.R
#' @title prostate #' @name prostate #' @description 221 prostate cancer patients are collected in this data set. #' @docType data #' @format #' \itemize{ #' \item hosp : Hospital in which the patient is hospitalized. #' \item stage : stage of the cancer. #' \item gleason score : used to help evaluate the prognosis of the cancer. #' \item psa : prostate-specific antigen. #' \item age : age of the patient. #' \item advanced : boolean. \code{TRUE} if the cancer is advanced. #'} #' @references #' A. V. D'Amico, R. Whittington, S. B. Malkowicz, D. Schultz, K. Blank, G. A. Broderick, J. E. Tomaszewski, A. A. Renshaw, I. Kaplan, C. J. Beard, A. Wein (1998) , \emph{Biochemical outcome after radical prostatectomy, external beam radiation therapy, or interstitial radiation therapy for clinically localized prostate cancer}, JAMA : the journal of the American Medical Association 280 969-74. #' #' @examples #' data(prostate) #' attach(prostate) #' plot(age,psa,main="Age and PSA",sub="Prostate Cancer Data", #' xlab="Age (years)",ylab="PSA") NULL
39696fd4cdcb8648d39a5582822d364fe9a84c3b
3e740bfc1105cc42db18ed2f894568d068608d02
/server.R
c5654887e261f354216d0ff34415e106713a0286
[]
no_license
MPacho/NextWordPrediction
c5f48581619db27e3c60a834c841d078e882e6fd
981e0c6b92e27a995c324bbc1ea3e04bdeb75360
refs/heads/master
2021-05-06T14:34:06.386079
2017-12-06T23:55:06
2017-12-06T23:55:06
113,378,012
1
0
null
null
null
null
UTF-8
R
false
false
487
r
server.R
library(shiny) source("04_stupid_backoff_model.R") shinyServer(function(input, output) { text <- reactive({ input$typeText }) prediction <- reactive({ if (input$typeText == "") c("","","") else predictStupidBackoff(text()) }) output$wordPrediction1 <- renderText({ prediction()[1] }) output$wordPrediction2 <- renderText({ prediction()[2] }) output$wordPrediction3 <- renderText({ prediction()[3] }) })
d4cf15cc7d2c9215fb0337bb2b337aadf99686f7
8175788715e6344aeaa0ab4f82612e4245b03957
/file1.R
192db30d0ec47d1ec312ba7af2d6aea2de26385b
[]
no_license
rohilagarwal/analytics1
2aceb92dd50a528cf003b4713ce33fb040d30eb1
c7b19022b8b5084a37b55576cb157cbe4c87e33f
refs/heads/master
2020-04-08T01:56:38.584340
2018-12-09T10:13:44
2018-12-09T10:13:44
158,915,523
0
0
null
null
null
null
UTF-8
R
false
false
580
r
file1.R
x=c(1:10) (x) x=c(2,5,3,6,7,10) (x) x[2] sort(x) x[-4] x=sort(x) (x) plot(density(x)) plot(hist(x)) x = rnorm(100) (x) plot(hist(x)) plot(hist(x), freq = F) (x) plot(hist(x)) #matrix 100:111 length(100:111) matrix(1,ncol = 3, nrow = 4) matrix(100:111, nrow = 4, byrow = T) (m1 = matrix(100:111, nrow = 4)) (m2 = matrix(100:111, ncol = 3, byrow = T)) class(m1) attributes(m1) dim(m1) m1 m1[1] m1[1];m1[1] m1 m1[1,] m1[,1] m1[,1, drop = F] m1[c(1,3),] m1[(1,3),] paste('c', 'd', sep = '-') colnames(m1) = paste('c', 1:3, sep = '') m1 m1[,c('c1','c3')] #matrix---- #array----
0e325c90c4de28192676540e435ed733ee10b68c
92b41bf11e58d671a1fab6224a9b17afb2b5050e
/sac1.R
8a49128d8a4de93acbbd23d7c4bc564dfd107429
[]
no_license
MaggieFang/MarketSegmentation-CourseADBI
6fe081cfc7aaef039c1dfdba13fb2ffa2d54822e
4e09a9a63dd205e0b5d3ae14c5c31745b5093777
refs/heads/master
2020-04-28T02:34:11.581404
2019-03-11T01:33:34
2019-03-11T01:33:34
174,903,652
0
0
null
null
null
null
UTF-8
R
false
false
3,690
r
sac1.R
library(igraph) library(lsa) # read data folder <- "/Users/xfang7/Google Drive/Courses/CSC591-603/hw/MarketSegmentation/" graph = read_graph(paste(folder,"fb_caltech_small_edgelist.txt",sep = ''),format =c("edgelist")) attribute_data <- read.csv(paste(folder,"fb_caltech_small_attrlist.csv", sep=''),header = T) # funciton to compute Gain of modularity attribute get_delta_attr <- function(attrList, h, membership, values) { indices <- which(values == membership) similar <- 0 for(i in indices) { similar <- similar + cosine(as.numeric(attrList[h,]), as.numeric(attrList[i,])) } similar <- similar/length(indices) } #implement phase 1 of Sac1 algorithm phase1 <- function(graph,attributes, mapped_communities, alpha){ #limit maximum of 15 iterations for(it in 1:15) { x <- mapped_communities for(i in 1:vcount(graph)) { # index: store the index of vertex with max deltaQ index <- 0 # maxQ:store the maximum of DeltaW maxQ <- 0 # neighbors of vertex i n <- neighbors(graph, i) for(j in unique(mapped_communities[n])) { tmp <- mapped_communities old_modularity <- modularity(graph,tmp) #Remove i from its community, place to j’s community tmp[i] <- j new_modularity <- modularity(graph,tmp) #Compute the composite modularity gain delta_Q_newman <- new_modularity - old_modularity delta_Q_attr <- get_delta_attr(attributes, i, j, mapped_communities) delta_Q <- (1-alpha)*delta_Q_attr + (alpha)*delta_Q_newman # update the vertex index and corresponding max deltaQ if(i!=j && delta_Q > maxQ){ index <- j maxQ <- delta_Q } } # move i to j's comunity, where j with maximum positive gain (if exists) if(index !=0){ mapped_communities[i] <- index } } # if no further improvement in modularity,break if(isTRUE(all.equal(x, mapped_communities))) { break } x <- mapped_communities } mapped_communities } #Phase2 of sac1 algorithm phase2 <- function(graph,attributes, mapped_communities, alpha){ x <- mapped_communities for(i in 1:15) { combined_graph <- contract.vertices(graph, mapped_communities) new_graph <- simplify(combined_graph, remove.multiple = TRUE, remove.loops = TRUE) #reapply phase1 mapped_communities <- phase1(new_graph, attributes,mapped_communities, alpha) # no futher improvement,break if(isTRUE(all.equal(x, mapped_communities))) { break } x <- mapped_communities } mapped_communities } #sac1 algorithm sac1 <- function(alpha, attributes = attribute_data){ r1 <- phase1(graph, attributes,alpha=alpha, mapped_communities = c(1:324)) communities <- phase2(graph, attributes,alpha=alpha, mapped_communities = r1) return(communities) } # save result to file save_file <- function(communities, alpha){ if(alpha == 0.5){ alpha = 5 } file_name<-paste("communities",alpha,sep="_") file_name<-paste(file_name,"txt",sep=".") f<-file(file_name,"w") for(i in 1:length(unique(communities))) { community <- vector("numeric") for(j in 1:324) { if(communities[j]==unique(communities)[i]){ community <- append(community, j-1, after = length(community)) } } cat(as.character(community), file=f, sep = ",") cat("\n", file=f) } close(f) } # read parameter(alpha) from command line args <- commandArgs(trailingOnly = TRUE) alpha = as.numeric(args[1]) # run sac1 algorithm and save result to file result <- sac1(alpha = alpha) save_file(result, alpha = alpha)
1dbbfce0489400e77194cce7f4de2a6200b36bc5
78858583954c6cba8490d9e629f56b2f67b4578b
/ui.R
1bed5d2ec5bc19fea14ebbbc6f7de517fdfa4e81
[]
no_license
VictimOfMaths/COVID_LA_Plots
9ed06f02f27ad7f3e017f85891cdac3468c77c33
4d82fdb58391abba0f5a32b31e5315cff90cad6a
refs/heads/master
2023-08-22T17:36:08.895032
2021-10-08T10:14:37
2021-10-08T10:14:37
279,406,562
6
1
null
2020-08-30T23:03:35
2020-07-13T20:37:19
HTML
UTF-8
R
false
false
1,704
r
ui.R
library(shiny) library(lubridate) #Remove blue fill from date slider ui <- fluidPage( tags$head(tags$style(HTML('* {font-family: "Lato"};'))), tags$style( ".irs-bar {", " border-color: transparent;", " background-color: transparent;", "}", ".irs-bar-edge {", " border-color: transparent;", " background-color: transparent;", "}" ), titlePanel("Visualising age patterns in English Local Authority COVID-19 case data"), sidebarPanel( selectInput('LA', 'Select Area', c("England", "East of England", "East Midlands", "London", "North East", "North West", "South East", "South West", "West Midlands", "Yorkshire and The Humber", sort(as.character(unique(shortdata$areaName[shortdata$areaType=="ltla"])))), multiple=FALSE, selected="England"), selectInput('plottype', 'Select plot', c("Heatmap of case numbers"=1, "Heatmap of case rates"=2, "Line chart of case rates (detailed ages)"=3, "Line chart of case rates (broad ages)"=4, "Streamgraph of case numbers"=5)), sliderInput('StartDate', 'Select start date for plot', min=min(shortdata$date)+days(3), max=max(shortdata$date)-days(4), value=as.Date("2020-08-01")), radioButtons('scale', "Select y-axis scale for line charts", choices=c("Linear", "Log"), inline=TRUE), checkboxInput('fix', "Select to fix y-axis scales to be the same for all plots", FALSE)), mainPanel( plotOutput('plot') ) )
9e238474ddc4e33bc3836ef2c871f7de901de400
0c1c9fce8a615de52819f530edce2a9a83824d99
/R/fbGetUserAdAccounts.R
3d73f87f8ed08da1c58336baa55654b2af4635cd
[]
no_license
IgorZakrevskiy/rfacebookstat_ks
2ce059f656d5d5c43d2a54359dcc86c3ed3efde6
35d98bd741e4de0d000e3e56792f5ad0e6579d0b
refs/heads/master
2023-05-08T04:38:21.895060
2021-06-04T09:30:11
2021-06-04T09:30:11
null
0
0
null
null
null
null
UTF-8
R
false
false
2,468
r
fbGetUserAdAccounts.R
fbGetUserAdAccounts <- function(user_id = "me", api_version = getOption("rfacebookstat.api_version"), username = getOption("rfacebookstat.username"), token_path = fbTokenPath(), access_token = getOption("rfacebookstat.access_token")) { # auth if ( is.null(access_token) ) { if ( Sys.getenv("RFB_API_TOKEN") != "" ) { access_token <- Sys.getenv("RFB_API_TOKEN") } else { access_token <- fbAuth(username = username, token_path = token_path)$access_token } } if ( class(access_token) == "fb_access_token" ) { access_token <- access_token$access_token } # attributes rq_ids <- list() out_headers <- list() #Create result data frame result <- tibble() link <- paste0("https://graph.facebook.com/", api_version,"/", user_id, "/adaccounts", "?fields=id,name,account_id,account_status,amount_spent,balance,business_name,currency,owner&limit=5000&access_token=", access_token) answer <- GET(link) # attr rq_ids <- append(rq_ids, setNames(list(status_code(answer)), answer$headers$`x-fb-trace-id`)) out_headers <- append(out_headers, setNames(list(headers(answer)), answer$headers$`x-fb-trace-id`)) user_account <- content(answer) if ( !is.null(user_account$error) ) { stop(user_account$error$message) } result <- bind_rows(user_account$data) #Paging while ( !is.null(user_account$paging$`next`) ) { link <- user_account$paging$`next` answer <- GET(link) # attr rq_ids <- append(rq_ids, setNames(list(status_code(answer)), answer$headers$`x-fb-trace-id`)) out_headers <- append(out_headers, setNames(list(headers(answer)), answer$headers$`x-fb-trace-id`)) user_account <- content(answer) if ( !is.null(user_account$error) ) { stop(user_account$error$message) } result <- bind_rows(user_account$data) } # set attributes attr(result, "request_ids") <- rq_ids attr(result, "headers") <- out_headers return(result) }
784b769a8f8c982cfc6629842a396583e7dadcd6
994f419867322d603f47d9a5e0d10e4147973ba1
/R/TSPred-package.R
24d5a9d668b560c651f18856cf95a2a28cf952bb
[]
no_license
cran/TSPred
3d6bdb853b92c02a387e5898f886b192603bbe43
01e9a907bb272f6e61f4bf1a2a0617ee6bbb9d73
refs/heads/master
2021-06-04T20:14:18.278665
2021-01-21T10:30:03
2021-01-21T10:30:03
33,107,476
0
0
null
null
null
null
UTF-8
R
false
false
660
r
TSPred-package.R
#' Functions for Benchmarking Time Series Prediction #' #' Functions for time series pre(post)processing, decomposition, modelling, prediction and accuracy assessment. The generated models and its yielded prediction errors can be used for benchmarking other time series prediction methods and for creating a demand for the refinement of such methods. For this purpose, benchmark data from prediction competitions may be used. #' #' @docType package #' @name TSPred-package #' @author Rebecca Pontes Salles #' #' Maintainer: rebeccapsalles@acm.org #' @keywords package NULL # Instead of "_PACKAGE" to remove inclusion of \alias{forecast} # "_PACKAGE"
ef61a34950258f56781aab47c119f476240f1ead
dfa8d36c361dae6df3f037019205c5415bd483e8
/cachematrix.R
99163f7b52cf11198351145e80e092bcf919cea2
[]
no_license
olusal/ProgrammingAssignment2
481435fe14a1f9783dc07de9e0246a54ce4b8ef6
98f3df581d0007644d49440172a108451fda140b
refs/heads/master
2021-01-18T08:56:19.707695
2015-01-25T21:38:12
2015-01-25T21:38:12
29,826,895
0
0
null
2015-01-25T19:15:04
2015-01-25T19:15:04
null
UTF-8
R
false
false
989
r
cachematrix.R
## Put comments here that give an overall description of what your ## functions do ## makeCacheMatrix creates a matrix using four other functions (set,get (values of matrix) ## setinverse, and getinverse (set/get inverse of matrix)) makeCacheMatrix <- function(x = matrix()) { m<-NULL set<-function(y){ x<<-y m<<-NULL } get<-function() x setinverse<-function(solve) m<<-solve getinverse<-function() m list(set=set,get=get, setinverse=setinverse, getinverse=getinverse) } ## cacheSolve checks to see if an inverse of a matrix has already ## been calculated previously and gets the result(inverse of the matrix) ## before any computation is made cacheSolve <- function(x, ...) { m<-x$getinverse() if(!is.null(m)){ message("getting cached data") return (m) } data<- x$get() m<-solve(data,...) x$setinverse(m) m }
e14acfbc5ea1694134554190a7a11761332b4e31
799e535f5be2ea8e9b238b092bd4c1fbffd56841
/programs/Supplementary_analyses/table_simulations_under_H0.R
cf6b8a368784916d419bee3c0961df969a7b3319
[]
no_license
andreaarfe/Bayesian-optimal-tests-non-PH
f16ff152d30c0bd075080af0a44ef6fb839c0d5f
fd8cc488b07f770ce11b184c76a6cdfbe899d67a
refs/heads/master
2021-07-14T12:31:29.632474
2020-08-21T14:13:34
2020-08-21T14:13:34
167,343,902
0
0
null
null
null
null
UTF-8
R
false
false
442
r
table_simulations_under_H0.R
library(xtable) library(tidyverse) load('./datasets/Supplementary_simulations/sim_no_trt_eff.Rdata') sims <- as.data.frame(t(out)) %>% gather(key=pval) tab <- sims %>% group_by(pval) %>% summarise(alpha = mean(value<=0.05), N = n()) %>% mutate(SE = sqrt(alpha*(1-alpha)/N), LCL95 = alpha - 1.96*SE, UCL95 = alpha + 1.96*SE) %>% select(pval, alpha, LCL95, UCL95) %>% xtable(digits=3) print(tab, include.rownames=FALSE)
fe484fd535c878d557fee69bdb4849617dd1334d
1c5cb67f169bca8dcdd5bc38a358fb45ea532305
/cachematrix.R
fbe36f6da6cf1b40e90b716d8edb533f8840bbb6
[]
no_license
abhinavgoel95/ProgrammingAssignment2
280704602e9d75ddd04f57bd6f8e1b25292ef3c0
75e30b9191fc1fce2d82732618a19a6c43e77ecf
refs/heads/master
2021-01-15T23:53:43.667176
2016-05-30T11:21:40
2016-05-30T11:21:40
60,002,587
2
0
null
2016-05-30T10:46:29
2016-05-30T10:46:29
null
UTF-8
R
false
false
1,316
r
cachematrix.R
## Put comments here that give an overall description of what your ## functions do ##The functions are used to depict the use of Lexical Scoping in R. ## Write a short comment describing this function ##makeCacheMatrix is a function which returns a list of functions. ##These functions are: set, get, setinv and getinv makeCacheMatrix <- function(x = matrix()) { matinv <- NULL set <- function(y) { x <<- y matinv <<- NULL } get <- function() x #Retrieve the matrix setinv <- function(inv) matinv <<- inv #Store the inverse in cache getinv <- function() matinv #Retirive the inverse from cache. list(set = set, get = get , setinv = setinv, getinv = getinv) #Return a list of functions } ## Write a short comment describing this function ##cacheSolve is for finding the inverse of the matrix (defined in x$set()). ##It will check if the inverse exits in the cache. If not it will find the inverse. cacheSolve <- function(x, ...) { inv <- x$getinv() #Read inverse from cache if(! is.null(inv)) #Check if inverse exists in cache { print("Available in cache"); matinv <<- inv return(matinv) } mat <- x$get() #Obtain the matrix inv <- solve(mat,...) #Find the inverse x$setinv(inv) #Store inverse in cache inv #return the inverse }
35d6b35e1e36998ddf9114f5f8c0ccf6ff52864c
7e5e5139f817c4f4729c019b9270eb95978feb39
/Intermediate R/Chapter 2-Loops/8.R
e25d0d99c4e277cb1ec8e7fdfe1f9ffe8fc91a03
[]
no_license
Pranav-Polavarapu/Datacamp-Data-Scientist-with-R-Track-
a45594a8a9078076fe90076f675ec509ae694761
a50740cb3545c3d03f19fc79930cb895b33af7c4
refs/heads/main
2023-05-08T19:45:46.830676
2021-05-31T03:30:08
2021-05-31T03:30:08
366,929,815
1
2
null
null
null
null
UTF-8
R
false
false
933
r
8.R
# Mix it up with control flow # Let's return to the LinkedIn profile views data, stored in a vector linkedin. In the first exercise on for loops you already did a simple printout of each element in this vector. A little more in-depth interpretation of this data wouldn't hurt, right? Time to throw in some conditionals! As with the while loop, you can use the if and else statements inside the for loop. # # Instructions # 100 XP # Add code to the for loop that loops over the elements of the linkedin vector: # # If the vector element's value exceeds 10, print out "You're popular!". # If the vector element's value does not exceed 10, print out "Be more visible!" # The linkedin vector has already been defined for you linkedin <- c(16, 9, 13, 5, 2, 17, 14) # Code the for loop with conditionals for (li in linkedin) { if (li > 10) { print("You're popular!") } else { print("Be more visible!") } print(li) }
500eb3f5851ad3a18c3d5eee4c27561c41813368
454cd9e49518d421264943fddf65bd4130b31eb0
/BLRM-Randomised-AGILE-SIMULATIONS.R
1c4349c8b7ac94195575b70c227ccb386acf63d4
[]
no_license
dose-finding/agile-implement
994e6307d130ab5877e916e1c5100ca09376b824
aaeecfdcbdff8d319f64903608430999b3f8b42f
refs/heads/main
2023-04-06T06:27:32.109276
2021-04-16T12:15:40
2021-04-16T12:15:40
358,584,971
0
0
null
null
null
null
UTF-8
R
false
false
4,741
r
BLRM-Randomised-AGILE-SIMULATIONS.R
# Defining the MCMC model library("rjags") model1.string <-" model { for (i in 1:m){ logit(p[i]) <- alpha0[1] + alpha1[1] * sdose[i] s[i] ~ dbin(p[i], n[i]) } theta[1:2] ~ dmnorm(priorMean[1:2], priorPrec[1:2, 1:2]) ## extract actual coefficients alpha0<- theta[1] alpha1 <- exp(theta[2]) } " model1.spec<-textConnection(model1.string) # Defining function to find standarditised doses for the given skeleton ptox and parameters alpha find.x <- function(ptox, alpha ) { alpha<-matrix(alpha,ncol=2) x <- (qlogis(ptox)-(alpha[,1]))/alpha[,2] return( x ) } # Defining doses (1 stands for SoC and 2:5 stand for experimental doses) doses<-c(1,2,3,4,5) # SoC D<-doses[1] # Defining Scenarios in Table 2 true<-c(0.10,0.30,0.45,0.60,0.70) # Sc 1 # true<-c(0.10,0.15,0.30,0.45,0.60) # Sc 2 # true<-c(0.10,0.12,0.15,0.30,0.45) # Sc 3 # true<-c(0.10,0.11,0.12,0.15,0.30) # Sc 4 # Number of MCMC Samples used to approximate the posterior distribution iter<-10000 # Number of Simulations used to produce OC nsims<-2000 # Cohort size for experimental group cohort<-4 # Cohort size for SoC/Control group cohort.control<-2 # Total number of patients N<-30 # Starting dose firstdose<-2 # Target increase in the toxicity (over the control) target.increase<-0.20 # Half-width of the tolerance interval around target.increase delta<-0.05 # Prior Probability of AE at the SoC p0.control<-0.10 # Overdosing Threshold overdose<-0.25 # Calibrated prior parameters var1<-1.10 var2<-0.30 slope<-(-0.05) spacing<-0.075 #Defining Skeleton and standartised dose levels corresponding to this skeleton p.tox0<-c(p0.control,p0.control + spacing* seq(1,length(doses)-1)) # finding the skeleton priorMean<-c(log(p0.control/(1-p0.control)),slope) priorVar<-matrix(c(var1,0.0,0.0,var2),2,2) priorPrec<-solve(priorVar) alpha.prior.plug<-c(priorMean[1],exp(priorMean[2]+diag(priorVar)[2]/2)) sdose<-find.x(p.tox0,alpha=alpha.prior.plug) # standartised dose levels # Defining matrices to store the results ss<-mat.or.vec(nsims,1) selection<-mat.or.vec(nsims,length(doses)) p<-mat.or.vec(iter,length(doses)) # Running Simulations for (z in 1:nsims){ nextdose<-firstdose counter<-0 stop<-0 n<-rep(0,length(doses)) s<-rep(0,length(doses)) while(sum(n)<N){ n[1]<-n[1]+cohort.control n[nextdose]<-n[nextdose]+cohort #Assigning the patients and evaluating DLTs s[1]<-s[1]+sum(rbinom(cohort.control,1,true[1])) s[nextdose]<-s[nextdose]+sum(rbinom(cohort,1,true[nextdose])) #Fitting the Bayesian model model1.spec<-textConnection(model1.string) mydata <- list(n=n,s=s,m=length(doses),sdose=sdose,priorMean=priorMean,priorPrec=priorPrec) jags <- jags.model(model1.spec,data =mydata,n.chains=1,n.adapt=iter,quiet=TRUE) update(jags, iter,progress.bar="none") tt<-jags.samples(jags,c('alpha0','alpha1'),iter,progress.bar="none") # Extracting vectors of posterior samples of the model parameters a0<-tt$alpha0[1,,] a1<-tt$alpha1[1,,] #Fitting the model with these parameters for (j in 1:length(doses)){ logit <- a0 + a1 * sdose[j] p[,j]<-exp(logit)/(1+exp(logit)) } # Finding the probability of being in the target interval and overdosing probability prob.next<-mat.or.vec(length(doses),1) for (j in 2:length(doses)){ y<-p[,j]-p[,1] prob.next[j]<-mean(y <=(target.increase+delta) & (y>=target.increase-delta)) if(mean(y>=(target.increase+2*delta))>overdose){ prob.next[j]<-0 } } # If all unsafe - stop the trial, otherwise assign to the max Prob of Target dose (subject to no skipping constraint) if(all(prob.next==0)){ stop<-1 break() }else{ nextdose<-min(nextdose+1,which.max(prob.next)) } } # Storing results of the simulation if(stop==0){ selection[z,nextdose]<-1 ss[z]<-sum(n) }else{ counter<-counter+1 ss[z]<-sum(n) } cat(z,"\n") } # Proportion of Each Dose Selection colMeans(selection) # Mean Sample size mean(ss)
ea319b3f1a6e800acfc3e075dd9994f9800d8531
32bba96a36c3783f7634ec99e75fc2153f7e3d3d
/donneesdatabase2020.R
d08781740ee094e9c6826d32e6266bcf534ee222
[]
no_license
julienvu/TravailPerso_R_Python_autres_tous_domaines
f2879db106d60cda7af4d5e5c6b883380c66a573
2144268029f8c13e6302111627b20472c4206b96
refs/heads/main
2023-04-08T17:43:21.938840
2021-04-21T20:27:20
2021-04-21T20:27:20
341,634,941
0
0
null
null
null
null
UTF-8
R
false
false
8,082
r
donneesdatabase2020.R
# données sources: organisme Fred https://fred.stlouisfed.org/ #debut code R #donnees fichier excel database-reaction_function2020.xlsx #importation des libraries readxl, dplyr, stats library(readxl) library(dplyr, quietly = TRUE) library(stats) #suppression des variables d'environnement de la mémoire remove(list = ls()) #importation fichier excel données sources database_reaction_function2020 <- read_excel("Master_Info_Dauphine/M2_ID/database-reaction_function2020.xlsx") View(database_reaction_function2020) head(database_reaction_function2020) df1 <-database_reaction_function2020[,-10] #suppresion des colonnes après la colonne ib df1 <-df1[,-11] #df1 <-df1[,-12] df1 <-df1[,-11] df1 <-df1[,-10] names(df1)[1] names(df1)[3] names(df1) View(df1) #remplacer nom de colonne ...1 par date dfinal <-df1%>% rename(date = ...1) names(dfinal)[1]#"date" names(dfinal)[3]#"rb" names(dfinal)[8]#"if" #renommage colonne 8 if par inflafrance names(dfinal)[8]<-"inflafrance" #voir table modifiée View(dfinal) #tracer des séries temporelles pour taux d'intérêt(BDF, bundesbank, Fed et général) plot(dfinal$date,dfinal$rf,col="purple",type="l", main="Taux d'intérêt en fonction du temps", xlab = "date",ylab="taux d'intêret rf/rb/ru/général") lines(dfinal$date,dfinal$rb,col="orange") lines(dfinal$date,dfinal$ru,col="blue") lines(dfinal$date,dfinal$rdb, col="green") # Add a legend pour taux d'intérêt legend("right", legend=c("rf", "rb","ru","rdb"), col=c("purple", "orange","blue","green"), lty=1:2, cex=0.8) #tracer des séries temporelles pour taux de change #fenêtre 1*2 graphique par(mfrow=c(1,2)) plot(dfinal$date,dfinal$ff,col="brown",type="l",main="Tchange ff", xlab = "date",ylab="taux de change ff") # Add a legend pour taux de change ff legend("topleft", legend=c("ff"), col=c("brown"), lty=1:2, cex=0.8) plot(dfinal$date,dfinal$dm,col="orange",,type="l",main="Tchange dm", xlab = "date",ylab="taux de change dm") # Add a legend pour taux de change dm legend("topleft", legend=c("dm"), col=c("orange"), lty=1:2, cex=0.8) #tracer des séries temporelles pour taux d'inflation #retour à la fenêtre normal (1*1) par(mfrow=c(1,2)) plot(dfinal$date,dfinal$ib,col="blue",type="l",main="Tinflation en fonction du temps", xlab = "date",ylab="taux d'inflation") legend("topright", legend=c("ib"), col=c("blue"), lty=1:2, cex=0.8) plot(dfinal$date,dfinal$inflafrance,col="pink",type="l",main="Tinflation en fonction du temps", xlab = "date",ylab="taux d'inflation") # Add a legend pour taux d'inflation legend("topright", legend=c("inflafrance"), col=c("pink"), lty=1:2, cex=0.8) #voir les corrélations entre certaines variables du jeu de données #coefficient de corrélation de pearson avec complete.obs qui supprime les lignes #contenant des valeurs manquantes cor(dfinal$rf,dfinal$inflafrance,use="complete.obs")# #interprétation :corrélation positive très forte entre les deux variables cor(dfinal$rb,dfinal$ib,use="complete.obs")#0.946854 très proche de 1 #interprétation :corrélation positive très forte entre les deux variables #taux intérêt allemagne et taux d'inflation allemagne cor(dfinal$rf,dfinal$ff,use="complete.obs")#0.04 très proche de 0 #aucune relation entre le taux d'intérêt france et le taux de change france cor(dfinal$rb,dfinal$dm,use="complete.obs")#0.06382578 très proche de 0 #aucune relation entre le taux d'intérêt allemagne et le taux de change allemagne cor(dfinal$rf,dfinal$rdb,use="complete.obs")#0.7694166 très proche de 1 #interprétation :corrélation positive forte entre les deux variables #taux intérêt france et taux d'intérêt général cor(dfinal$rb,dfinal$rdb,use="complete.obs")#0.7694166 très proche de 1 #interprétation :corrélation positive très forte entre les deux variables #taux intérêt allemagne et taux d'intérêt général cor(dfinal$ff,dfinal$rdb,use="complete.obs")#-0.3348911 négatif #valeur négative du coefficient et relation non linéaire entre les deux variables #taux de change france et taux d'intérêt général cor(dfinal$dm,dfinal$rdb,use="complete.obs")#0.06976173 proche de 0 #valeur du coefficient et relation non linéaire entre les deux variables #taux de change allemagne et taux d'intérêt général cor(dfinal$inflafrance,dfinal$rdb,use="complete.obs")#0.8801829 valeur proche de 1 #interprétation :corrélation positive très forte entre les deux variables #taux d'intérêt général et taux d'inflation france cor(dfinal$inflafrance,dfinal$ff,use="complete.obs")#-0.376904 négatif #sens de relation non linéaire entre les variables #taux d'inflration france et taux de change france cor(dfinal$ib,dfinal$rdb,use="complete.obs")#0.8920401valeur proche de 1 #interprétation :corrélation positive très forte entre les deux variables #taux d'intérêt général et taux d'inflation allemagne cor(dfinal$ib,dfinal$dm,use="complete.obs") #0.03744707 très proche de 0 mais positif #valeur très proche de 0 et relation non linéaire entre les deux variables #taux de change allemagne et taux d'inflation allemagne #Amplitude des variables du jeu de données #taux intérêt france gap max min print('étendue taux intérêt france: ') max(dfinal$rf)-min(dfinal$rf)#12.375 #taux intérêt allemagne gap max min print('étendue taux intérêt allemagne: ') max(dfinal$rb)-min(dfinal$rb)#8.5 #taux intérêt fed gap max min print('étendue taux intérêt fed: ') max(dfinal$ru)-min(dfinal$ru)#13.25 #taux change france gap max min print('étendue taux de change france: ') max(dfinal$ff)-min(dfinal$ff)#6.0528 #taux change allemagne gap max min print('étendue taux de change allemagne: ') max(dfinal$dm)-min(dfinal$dm)#1.669 #taux intérêt général gap max min print('étendue taux intérêt général: ') max(dfinal$rdb)-min(dfinal$rdb)#5 #taux inflation allemagne gap max min print('étendue taux d inflation allemagne: ') max(dfinal$inflafrance)-min(dfinal$inflafrance)#12.43 #taux inflation allemagne gap max min print('étendue taux d inflation allemagne: ') max(dfinal$ib)-min(dfinal$ib)#8.46 #regression lineaire multiple avec l'instruction lm entre taux intérêt france en fonction #du taux intérêt allemand, taux intérêt américain et taux inflation france tauxintfrreg<-lm(dfinal$rf ~ dfinal$rb +dfinal$ru+dfinal$inflafrance, data=dfinal) #output regression tauxintffreg summary(tauxintfrreg) #Plus la valeur du R ajusté est proche de 1, #et plus l'adéquation entre le modèle et les données observées va être forte. Cependant, cette valeur est fortement influencée, entre autres #par le nombre de variables explicatives incluses dans la regression. #equation droite #tauxinteretfrancais= 4.73 +0.57*tauxinteretallemagne-0.21*tauxinteretamericain+ 0.35*tauxinflationfrance #gamma= 4.73 #coefficients non significatifs pour ru car pas de symbole #pour rb et inflafrance: significatifs( 90 % et 99,999%) #avec le taux d'intérêt allemand augmenté de 1% augmente le taux d'intérêt francais de 0.57 #avec le taux d'inflation français augmenté de 1% augmente le taux d'intérêt francais de 0.35 #autre interprétation: pour un taux d'inflation constant, vu que coefficient associé à taux intérêt #allemand positive, augmenter taux d'intérêt allemand revient donc à augmenter à augmenter taux d'intérêt francais # stratégie européenne # Extraction des coefficients coef(tauxintfrreg) # Intervalle de confiance (à 95%) des coefficients confint(tauxintfrreg) # plot : "vraies" valeurs et droite de regression plot(dfinal$rf ~ dfinal$rb + dfinal$ru + dfinal$inflafrance, data=dfinal) abline(tauxintfrreg, col = "orange") # Prédiction du taux d'intérêt francais à 99% en fonction du taux d'intérêt allemand, du taux d'intérêt américain et du taux d'inflation francais valeurspredites <- predict(tauxintfrreg,data.frame(4,3.4,7), level= 0.99) #Affichage des valeurs prédites du taux d'intérêt francais en fonction du temps plot(dfinal$date,valeurspredites,col="brown",type="l", main="Prédiction du taux d'intérêt francais") #fin code R
8b61ea0fd66bc15515d51d9a79cd2802179053d2
f25f19454371c545fb69ccb7da1a4ef0baf6acb8
/man/getMainEffects.Rd
f5c57c101127c7ef73e89e2f24a8954ac5f789e1
[]
no_license
Sandy4321/npdr
00e6ab9fd7db2a6465b39bb320afeee76db5f7ce
b02e08577c58a6fddb0b157f9870740795747b04
refs/heads/master
2020-12-06T07:34:02.878253
2020-01-06T04:54:47
2020-01-06T04:54:47
null
0
0
null
null
null
null
UTF-8
R
false
true
1,703
rd
getMainEffects.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/inbixGAIN.R \name{getMainEffects} \alias{getMainEffects} \title{Get main effects from generalized linear model regression (parallel).} \usage{ getMainEffects(labelledDataFrame, regressionFamily = "binomial", numCovariates = 0, writeBetas = FALSE, useBetas = FALSE, transformMethod = "", numCores = 2, verbose = FALSE) } \arguments{ \item{labelledDataFrame}{\code{data.frame} with variables in columns and samples in rows.} \item{regressionFamily}{\code{string} glm regression family name.} \item{numCovariates}{\code{numeric} of included covariates.} \item{writeBetas}{\code{logical} indicating whther to write beta values to separate file.} \item{useBetas}{\code{logical} indicating betas rather than standardized betas used.} \item{transformMethod}{\code{string} optional transform method.} \item{numCores}{\code{numeric} number of processor cores to use in mclapply} \item{verbose}{\code{logical} to send verbose messages to stdout.} } \value{ mainEffectValues \code{vector} of main effect values. } \description{ \code{getMainEffects} } \seealso{ \code{\link{rankUnivariateRegression}} Other GAIN functions: \code{\link{dcgain}}, \code{\link{dmgain}}, \code{\link{fitInteractionModel}}, \code{\link{fitMainEffectModel}}, \code{\link{gainToSimpleSIF}}, \code{\link{getInteractionEffects}}, \code{\link{regainParallel}}, \code{\link{regain}} Other inbix synonym functions: \code{\link{dcgain}}, \code{\link{regainParallel}}, \code{\link{regain}} } \concept{GAIN functions} \concept{inbix synonym functions} \keyword{array} \keyword{internal} \keyword{models} \keyword{regression} \keyword{univar}
664453d5a3c8cc17b22a552777980ea6b9f8f7c9
49b8ff57b4184c137dde8ed358b3372f3020d9b0
/RStudioProjects/mbDiscoveryR/testing/parseMRMR.R
5a78a8f532f19e763826967ec13c57e04426e0cc
[]
no_license
kelvinyangli/PhDProjects
c70bad5df7e4fd2b1803ceb80547dc9750162af8
db617e0dbb87e7d5ab7c5bfba2aec54ffa43208f
refs/heads/master
2022-06-30T23:36:29.251628
2019-09-08T07:14:42
2019-09-08T07:14:42
59,722,411
0
0
null
null
null
null
UTF-8
R
false
false
802
r
parseMRMR.R
# nFeatures is the number of features to be returned # it needs to be pre-determined for mrmr # allNodes depends on each dataset that is feed to mrmr, since we need to move each column to the 1st position parseMRMR = function(output, nFeatures) { #features = allNodes[-1] # remove the 1st variable since it is the target startString = "*** mRMR features ***" # allocate the results of mrmr from entire printing results indexStart = pmatch(startString, output) + 2 # pass the next index which contains parameter settings for mrmr mrmrOutput = output[indexStart:(indexStart + nFeatures - 1)] mb = vector(length = nFeatures) for (i in 1:nFeatures) {# for each feature string = mrmrOutput[i] mb[i] = strsplit(string, "\"")[[1]][2] } # end for i return(mb) }
5f197afefac4d74d49baf2e6db6cd691f18e9d73
29c2e5531fb0a5095df2d5a02a08d4429acdb280
/cachematrix.R
df95d63e0ee891b90adb708c682b7d29dd44ec03
[]
no_license
andrewdziedzic25/Andrew-Dziedzic
5d358c492a015ad44efb2574bdede57222b3e9af
79d56ec46262edae5a462eee2eba9f3499b36226
refs/heads/master
2022-12-13T18:00:30.171030
2022-12-04T22:17:44
2022-12-04T22:17:44
244,030,725
0
0
null
null
null
null
UTF-8
R
false
false
1,169
r
cachematrix.R
##The particular function creates a specific "MATRIX" object which can cache its inverse ## makeCacheMatrix creates a list containing a function to: ## Get & Set value of MATRIX ## Get & Set value of inv MATRIX makeCacheMatrix <- function(a = matrix()) { inv <- NULL SET <- function(c) { a <<- c inv <<- NULL } GET <- function() a SETinverse <- function(inverse) inv <<- inverse GETinverse <- function() inv list(SET=SET, GET=GET, SETinverse=SETinverse, GETinverse=GETinverse) } ##The particular function computes inverse of a special MATRIX returned by ##makeCacheMatrix (*which is the function ABOVE*) ##Computing inverse of square MATRIX is done with the solve ##function in R .... if X = square invertible MATRIX, ##solve(X) returns inv ##For the specific assignment.... assume the MATRIX supplied is always invertible!!! cacheSolve <- function(b, ...) { inv <- b$GETinverse() if(!is.null(inv)) { message("Cached data is:") return(inv) } data <- b$GET() inv <- solve(data) b$SETinverse(inv) ## Return a matrix that is the inverse of 'b' inv }
1b6699cee852d24a6ed4ff1e01eb9e6b5d6c7648
98615162d2e147060cf2a8fd7f20cb756c6d6b41
/R/restrict.minimal.hits.R
f2207d2eb09870e2d9cc94e536f1a82c1c893085
[]
no_license
axrt/gbra
9796f993d8b457d21cc1213f0d9768907e63cfa5
dde797f8c2ae9777094b5b05ee4d4e5023f0ad31
refs/heads/master
2021-01-19T05:49:11.562580
2018-02-18T18:02:15
2018-02-18T18:02:15
24,808,334
0
0
null
null
null
null
UTF-8
R
false
false
924
r
restrict.minimal.hits.R
#' Use to restrict a given data.frame with RAW(!) bitscores to a certain number of #' non-zero bitscores per ORF. #' @param \code{df} data.frame, presumably from read.bhs(). #' @param \code{minhit} minimal number of non-zero hits that an ORF must have in order remain in the table, default is 10. #' @return a data.frame with only those ORFs that had non-zero hits of \code{minhit} threshold and greater. #' @examples #' master.table.raw<-read.bhs(bh.folder = "gBLASTer/bh") #' master.table.raw<-as.data.frame(master.table.raw) #' master.table.raw.10hcut<-restrict.minimal.hits<-function(df=master.table.raw) #' restrict.minimal.hits<-function(df, minhit=10){ #Checks if a row given in i has enough non-zero hits enough<-function(i,minhit){ return(ifelse(test = sum(i>0)>=minhit, yes=TRUE, no=FALSE)) } #filters out poor ORF rows enoughs<-apply(df[,3:ncol(df)],1,enough,minhit=minhit) return(df[enoughs,]) }
7c200df63cf7a6711d37182aa05c133be45fd303
1b901d2fae21c0fcbe7799682af9611403832bf9
/RforgeCode/Meucci/man/PlotCompositionEfficientFrontier.Rd
70c9ac5ad39528c398bf42ba0b522ace167f4cc7
[]
no_license
dsnaveen/symmys
ca5ad4c47de4ab4315389c04bc62e752b30aadbc
c227e905452779425a08c5304ac7a0df7dbb5247
refs/heads/master
2021-05-30T01:18:59.261392
2015-12-07T20:32:25
2015-12-07T20:32:25
null
0
0
null
null
null
null
UTF-8
R
false
false
726
rd
PlotCompositionEfficientFrontier.Rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/PlotCompositionEfficientFrontier.R \name{PlotCompositionEfficientFrontier} \alias{PlotCompositionEfficientFrontier} \title{Plots the efficient frontier} \usage{ PlotCompositionEfficientFrontier(Portfolios) } \arguments{ \item{Portfolios}{: [matrix] (M x N) M portfolios of size N (weights)} } \description{ Plot the efficient frontier, as described in A. Meucci, "Risk and Asset Allocation", Springer, 2005. } \author{ Xavier Valls \email{flamejat@gmail.com} } \references{ A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}. See Meucci's script for "PlotCompositionEfficientFrontier.m" }
800fac66c5f0e5b57ffd2b772ef5cba16dc43cf8
fe7bdff75402c7bcff45082fb384f8918a23811d
/my_first_r.R
635252572210a7f6d3bab33e1aabf17cb6637e8c
[]
no_license
Jiyounglee78/project_nottoGIT
cc810f1528716eed6d4ebf5577dfe13047adf5d7
e19a514421482570c6a6a8b34f48480a5c99424c
refs/heads/master
2022-04-22T10:43:47.753066
2020-04-16T19:20:30
2020-04-16T19:20:30
256,309,228
0
0
null
null
null
null
UTF-8
R
false
false
281
r
my_first_r.R
example <- matrix(c(1, 2, 3, 4, 5, 6, 7, 8), nrow = 4, ncol = 2) #install from CRAN install.packages(c("ggplot2", "devtools", "lme4")) #Installing from Bioconductor source("https://bioconductor.org/biocLite.R") biocLite() #Installing from GitHub install_github("author/package")
6ed89b78796c679f11370998ce46c516abb269f9
3e70c711647c48e41231a44125236ec2c706ecb0
/Assignment3/LogitReg.R
71c0fad1facea04f76c4cd814c943718570a0b53
[]
no_license
laurencehendry/Collaborative-Data-Analysis-Assignment2
2715a702284905e7f5d3a8e22af33a33ce853bef
fb9818050a160be46f91e385aa069427369550f7
refs/heads/master
2021-01-20T15:57:41.070713
2015-12-03T15:35:23
2015-12-03T15:35:23
47,351,803
1
0
null
2015-12-03T18:30:41
2015-12-03T18:30:41
null
UTF-8
R
false
false
3,458
r
LogitReg.R
### logit models for assignment 3 ### ### Claire & Noriko ### ### set working directory setwd("C:/Users/noriko/Desktop/Collaborative-Data-Analysis-Assignment2/Assignment3") library(stargazer) library(knitr) library(Zelig) library(rms) # road the cleaned up dataframe load("EVdata1.rda") ### step-wise logistic regression ### # Estimate model-1 (AGE & SEX & INCOME & Education) # Categorical income (4 classes, low is the reference) L1 <- lrm(EVinterest ~ RAGE + Male + lowermiddle + highermiddle + high + degree, EVINTEREST) lrm(L1) # Estimate model-2 (AGE & SEX & INCOME & Education & Licence & # of Cars) L2 <- lrm(EVinterest ~ RAGE + Male + lowermiddle + highermiddle + high + degree + licence + NumCar, EVINTEREST) lrm(L2) # Estimate model-3 (AGE & SEX & INCOME & Education & Licence & # of Cars & # of children) L3 <- lrm(EVinterest ~ RAGE + Male + lowermiddle + highermiddle + high + degree + licence + NumCar + DVHsize + havechildren, EVINTEREST) lrm(L3) # Estimate model-4 (AGE & SEX & INCOME & Education & Licence & # of Cars & # of children & Region) L4 <- lrm(EVinterest ~ RAGE + Male + lowermiddle + highermiddle + high + degree + licence + NumCar + DVHsize + havechildren + Scotland, EVINTEREST) lrm(L4) ### present results # Create cleaner covariate labels labels <- c('Age', 'Male', 'Income: low-middle', 'Income: high-middle', 'Income: high', 'College degree', 'Drivers licence', '# of cars', "Size of household", 'Having dependent children', 'Scotland', '(Intercept)') stargazer::stargazer(L1, L2, L3, L4, covariate.labels = labels, title = 'Interests in EVs', digits = 2, type = 'text') ### predicted probabilities by income (other covariates are fixed) L4Pred <- glm(EVinterest ~ RAGE + Male + lowermiddle + highermiddle + high + degree + licence + NumCar + NumDepCh + Scotland, data = EVINTEREST, family = 'binomial') fittedP <- with(EVINTEREST, data.frame(RAGE=mean(RAGE), Male=c(1,1,1,1), inccat=c(1,2,3,4), low=c(1,0,0,0), lowermiddle=c(0,1,0,0), highermiddle=c(0,0,1,0), high=c(0,0,0,1), licence=1, degree =1, NumCar=1, NumDepCh=1, Scotland=0)) fittedP$predicted <- predict(L4Pred, newdata = fittedP, type = 'response') fittedPselected <- subset(fittedP, select= c(inccat, predicted)) kable(fittedPselected, align = 'c', digits = 2, caption = 'Predicted Probabilities for Fitted Values') ### Zelig plot ZP <- zelig(EVinterest ~ RAGE + Male + lowermiddle + highermiddle + high + degree + licence + NumCar + NumDepCh + Scotland, cite = FALSE, data = EVINTEREST, model = 'logit') setZP1 <- setx(ZP, RAGE = 20:80) simZP1 <- sim(ZP, x = setZP1) plot(simZP1, xlab="Age", ylab="Predicted Probability", main="Predicted Probability of Having an Interest in EV by Age") setZP2 <- setx(ZP, NumCar = 0:3) simZP2 <- sim(ZP, x = setZP2) plot(simZP2, xlab="Number of Cars in Household", ylab="Predicted Probability", main="Predicted Probability of Having an Interest in EV by # of Cars") setZP3 <- setx(ZP, NumDepCh = 0:7) simZP3 <- sim(ZP, x = setZP3) plot(simZP3, xlab="Number of Dependent Childen", ylab="Predicted Probability", main="Predicted Probability of Having an Interest in EV by # of Children")
210af23717f662ccf05c4887923b7612448e1252
2cd8f80c7a1016129c57e85d729d655fefc01070
/tests/readIDAT_gs_wg6v2.R
1dc865f2280c18c9a17c25ca87f9d8350b85f971
[]
no_license
HenrikBengtsson/illuminaio
44f5f2295839c4132139d98cdc6d7e5d366705ff
4a79a1f7b0348a331ac79d2abc8599e50a25d6c5
refs/heads/master
2023-04-29T15:38:56.887547
2023-04-26T19:56:09
2023-04-26T19:56:09
24,728,987
6
5
null
2020-07-13T01:01:16
2014-10-02T17:29:13
R
UTF-8
R
false
false
1,144
r
readIDAT_gs_wg6v2.R
library(illuminaio) library(IlluminaDataTestFiles) idatFile <- system.file("extdata", "idat", "4343238080_A_Grn.idat", package = "IlluminaDataTestFiles") idatData <- readIDAT(idatFile)$Quants gsFile <- system.file("extdata", "gs", "4343238080_A_ProbeSummary.txt.gz", package = "IlluminaDataTestFiles") gStudio <- read.delim(gzfile(gsFile, open = "r"), sep = "\t", header = TRUE) ## not all probes are present in GenomeStudio output, so only select those that are idatData <- idatData[which(idatData[,"CodesBinData"] %in% gStudio[,"ProbeID"]),] ## the orders are also different (numeric vs alphabetical) gStudio <- gStudio[match(idatData[,"CodesBinData"], gStudio[,"ProbeID"]),] # check each value in GenomeStudio output ## there are some rounding differences, so we allow slight differences ## summarised bead intensities RUnit::checkEqualsNumeric(idatData[, "MeanBinData"], gStudio[, 3], tolerance = 10e-7) ## number of beads RUnit::checkEquals(idatData[, "NumGoodBeadsBinData"], gStudio[, 5]) ## standard errors RUnit::checkEqualsNumeric(idatData[, "DevBinData"] / sqrt(idatData[, "NumGoodBeadsBinData"]), gStudio[, 4], tolerance = 10e-7)
9cf51268acd9c255a1d17374220c300a02c2e994
20ff998b9ab0992bd595de0b8446238338042201
/play.R
a61d78ea7e30dce89fbf90cf160ca65ca5bb4ed8
[]
no_license
faisal-samin/visnetwork-demo
796e0b0f0750343a769d92fb3c631f17542d7b2c
47e9e7441a2ab2cd152b7111f13e9ac40f8950a2
refs/heads/master
2020-04-19T17:17:49.030059
2019-01-31T10:44:13
2019-01-31T10:44:13
168,330,669
0
0
null
null
null
null
UTF-8
R
false
false
1,622
r
play.R
# Read in packages library(tidyverse) library(visNetwork) # Read in data ------------------------------------------------------------ nodes = read_csv( "got-nodes.csv", col_names = c("id", "label"), # set name of columns skip = 1 # skip column headers ) edges = read_csv( "got-edges.csv", col_names = c("from", "to", "weight"), skip = 1 ) # preview network visNetwork(nodes, edges, height = "500px", width = "100%") # Add features to nodes --------------------------------------------------- # add houses # Stark - Arya, Bran, Jon, Rickon, Catelyn, Robb, Sansa, Eddard # Lannister - Tywin, Tyrion, Jaime, Cersei nodes_h = nodes %>% mutate(house = case_when( id %in% c("Arya", "Bran", "Jon", "Catelyn", "Robb", "Sansa", "Eddard") ~ "Stark", id %in% c("Tywin", "Tyrion", "Jaime", "Cersei") ~ "Lannister" )) nodes_c = nodes_h %>% mutate(color = case_when( house == "Stark" ~ "darkred", house == "Lannister" ~ "gold", TRUE ~ "lightgrey" ), size = case_when( house %in% c("Stark", "Lannister") ~ 30, TRUE ~ 10 ), font.size = case_when( house %in% c("Stark", "Lannister") ~ 50, TRUE ~ 15 ) ) nodes_i = nodes_c %>% mutate( shape = "image" , image = case_when( id == "Arya" ~ "https://pngimage.net/wp-content/uploads/2018/05/arya-stark-png-2.png" ) ) edges_l = edges %>% mutate(width = weight/5) visNetwork(nodes_i, edges_l, width = "100%") %>% visNodes(shapeProperties = list(useBorderWithImage = TRUE))
c36ea601587784f3b6c2c6e66cb5dc3b7de6b957
a5e49e9b3e7892ce476bab528cde3f686d5a5e3d
/inst/shiny_apps/Histogram/app.R
fdf680f745d13579d7ccc854642983dc112399c2
[]
no_license
cran/lessR
a7af34480e88c5b9bf102ab45fa6464a22ffbe3b
562f60e6688622d8b8cede7f8d73d790d0b55e27
refs/heads/master
2023-05-29T07:57:09.544619
2023-05-14T20:20:02
2023-05-14T20:20:02
17,697,039
6
3
null
null
null
null
UTF-8
R
false
false
21,200
r
app.R
# --------- # Histogram # --------- library(shiny) library(lessR) clr.one <- list( "#96AAC3", "dodgerblue3", "cornflowerblue", "steelblue", "darkblue", "pink2", "red3", "firebrick2", "darkred", "violetred", "mediumorchid", "purple3", "darkorange2", "salmon", "orange3", "sienna", "rosybrown", "wheat3", "goldenrod2", "khaki", "yellow2", "darkseagreen2", "springgreen3", "seagreen4", "darkgreen", "black", "gray45", "slategray4", "gray75", "snow3", "gray95", "lavender", "ivory2", "aliceblue", "white") clr.edge <- list("off", "black", "gray50", "gray75", "white", "ivory", "darkblue", "darkred", "darkgreen", "rosybrown2", "bisque", "slategray2", "aliceblue", "thistle1", "coral", "gold") clr.qual <- c("hues", "Okabe-Ito", "viridis") clr.seq <- list("reds", "rusts", "browns", "olives", "greens", "emeralds", "turquoises", "aquas", "blues", "purples", "violets", "magentas", "grays") clr.den_g <- c("steelblue3", clr.one[2:length(clr.one)]) clr.den_n <- c("pink1", clr.one[2:length(clr.one)]) addResourcePath("shiny_dir", system.file("shiny_apps", package="lessR")) ui <- fluidPage( tags$head(tags$link(rel="stylesheet", href="shiny_dir/styles.css")), tabsetPanel( tabPanel("Data", titlePanel(div("Upload a text (.csv, .txt) or Excel file", id="hp")), sidebarLayout( sidebarPanel( radioButtons("fType", HTML("<h5 class='soft'>Format</h5>"), c("Excel"="Excel", "Text"="Text")), conditionalPanel(condition="input.fType == 'Text'", radioButtons("sep", HTML("<h5 class='soft'>Separator</h5>"), c(Comma=",", Semicolon=";", Tab="\t"), ","), radioButtons("decimal", HTML("<h5 class='soft'>Decimal</h5>"), c("Point"=".", "Comma"=",")), ), radioButtons("fSource", HTML("<h5 class='soft'>Source</h5>"), c("Local"="local", "Web"="web")), conditionalPanel(condition="input.fSource == 'local'", fileInput("myFile", "Locate your data file", accept=c(".csv", ".txt", ".xlsx", ".xlsm")), ), conditionalPanel(condition="input.fSource == 'web'", textInput("myURL", "Web address of data file"), actionButton("submitURL", "Submit") ), textOutput("ncols"), textOutput("nrows"), uiOutput("d.show"), ), # end sidbarPanel mainPanel( tableOutput("d.table"), tags$style(type="text/css", "#d.table {font-size: .95em;}") ) ) # end sidbarLayout ), # end tabPanel 1 tabPanel("Histogram", pageWithSidebar( titlePanel(""), sidebarPanel( selectInput('x.col', 'x Variable', ""), tags$hr(), checkboxInput("myBins", div("Bins", class="view"), FALSE), conditionalPanel(condition="input.myBins == true", uiOutput("slider_bw"), uiOutput("slider_bs") ), tags$hr(), checkboxInput("myGeom", div("Colors", class="view"), FALSE), conditionalPanel(condition="input.myGeom == true", selectInput("myFill", "fill", choices=list("Constant"=clr.one, "Qualitative"=clr.qual, "Sequential"=clr.seq)), selectInput("myColor", "color", choices=clr.edge), sliderInput("myTrans", "transparency", min=0, max=1, value=0) ), tags$hr(), checkboxInput("myValCm", div("Values, Cumulate", class="view"), FALSE), conditionalPanel(condition="input.myValCm == true", checkboxInput("myValues", "values", value=FALSE), selectInput("myCumlt", "cumulate", choices=list("off", "on", "both")) ), tags$hr(), checkboxInput("mySmooth", div("Smooth", class="view"), FALSE), conditionalPanel(condition="input.mySmooth == true", checkboxInput("myDens", "density", TRUE), checkboxInput("myHist", "show_histogram", TRUE), checkboxInput("myRug", "rug", FALSE), radioButtons("myType", "type", c("general"="general", "normal"="normal", "both"="both")), uiOutput("slider_bndwd"), selectInput("myFill_gen", "fill_general", choices=clr.den_g), selectInput("myFill_nrm", "fill_normal", choices=clr.den_n) ), tags$hr(), checkboxInput("do_pdf", div("Save", class="view"), FALSE), conditionalPanel(condition="input.do_pdf == true", sliderInput("w", "width (inches):", min=3, max=20, value=8), sliderInput("h", "height (inches):", min=3, max=20, value=6), checkboxInput("do_cmt", "include comments in R file", TRUE), actionButton(inputId="btn_pdf", "Save"), tags$p(div("Save pdf file and R code file", style="margin-top:.25em;")) ), tags$hr(), checkboxInput("do_help", div("Help", class="view"), FALSE), ), # end sidebarPanel mainPanel( plotOutput('myPlot'), verbatimTextOutput("summary"), plotOutput("saved_plot"), textOutput("help") ) ) # end pageWithSidebar ) # end tabPanel 2 ) # end tabsetPanel ) # end fluidPage server <- function(input, output, session) { options(shiny.maxRequestSize=50*1024^2) # max upload file size is 50MB v <- reactiveValues() v$x.new <- FALSE # ------- Read and Display Data ----------- # ----------------------------------------- # process the URL for reading from the web theURL <- eventReactive(input$submitURL, { input$myURL }) data <- reactive({ if (input$fSource == "local") { shiny::req("input$myFile") myPath <- input$myFile$datapath theRead <- input$myFile$name } if (input$fSource == "web") { url <- theURL() if (!(grepl("http://", url))) url <- paste("http://", url, sep="") myPath <- url theRead <- myPath } shiny::req(myPath) if (input$fType == "Excel") { library(openxlsx) if (grepl(".xlsx", myPath, fixed=TRUE)) { d <- read.xlsx(myPath) } else { message("\n>>> Excel file must have file type of .xlsx <<<\n\n") stopApp() } } if (input$fType == "Text") { if ((grepl(".csv", myPath, fixed=TRUE)) || (grepl(".txt", myPath, fixed=TRUE))) { d <- read.csv(myPath, sep=input$sep, dec=input$decimal, na.strings="") # default is NOT a blank char missing } else { message("\n>>> Text file must have file type of .csv or .txt <<<\n\n") stopApp() } } # end fType is "Text" updateSelectInput(session, inputId="x.col", label="x variable", choices=c("Select a numerical variable" = "", names(d)[sapply(d, is.numeric)])) return(d) }) # end reactive() output$d.show <- renderUI({ shiny::req(data()) output$nrows <- renderText({paste("Number of data rows:", nrow(data()))}) output$ncols <- renderText({paste("Number of variables:", ncol(data()))}) if (nrow(data()) > 10) radioButtons("d.show", HTML("<h5 class='soft'>Rows to display</h5>"), c("First 10"="head", "Last 10"="tail", "Random 10"="random", "All"="all")) }) output$d.table <- renderTable({ if (is.null(input$d.show)) data() else { nr <- min(11, nrow(data())) if (nr == 11) { if (input$d.show == "all") data() else if (input$d.show == "head") head(data(), n=10) else if (input$d.show == "tail") tail(data(), n=10) else if (input$d.show == "random") { dd <- data() dd[.(random(10)), ] } } } }, striped=TRUE) # end renderTable # ----------------------------------------- # ------- Bin Width and Bin Start --------- # ----------------------------------------- # get default bin_width and bin_start values for initial histogram # and bin width and start sliders # default bin width is v$bw observeEvent(input$x.col, { # processed while still on Get Data tab v$x.new <- TRUE x.name <- input$x.col x <- data()[, x.name] v$min.x <- min(x, na.rm=TRUE) max.x <- max(x, na.rm=TRUE) h <- suppressWarnings(hist(x, plot=FALSE, breaks="Sturges")) v$bw <- h$breaks[2]-h$breaks[1] if (v$bw == 0.5) v$bw <- 1 v$rng <- max.x - v$min.x v$bw1 <- v$rng/45 if (v$min.x > 1) v$bw1 <- floor(v$bw1) if (v$bw1 == 0) v$bw1 <- 0.5 v$bw2 <- v$rng/2.5 if (v$bw2 > 5) v$bw2 <- ceiling(v$bw2) v$bw1 <- round(v$bw1, 3) v$bw2 <- round(v$bw2, 3) pret <- pretty(c((v$min.x-(.01*v$min.x)), max.x))[1] v$bs1 <- pret - (v$bw) v$bs <- pret v$bs2 <- v$min.x if(v$bs1 > v$bs2) v$bs2 <- v$bs1 if (abs(v$min.x) > 1) { v$bs1 <- floor(v$bs1) v$bs2 <- floor(v$bs2) } updateSliderInput(inputId="slider_bw", label="bin_width", min=NA, max=NA, value=NA) updateSliderInput(inputId="slider_bs", label="bin_start", min=NA, max=NA, value=NA) }) # ---------- Density Bandwidth ------------ # ----------------------------------------- get_bw <- function(x) { bw <- bw.nrd0(x) irep <- 0 repeat { # iterated value of bw irep <- irep + 1 d.gen <- suppressWarnings(density(x, bw)) # no missing data xd <- diff(d.gen$y) flip <- 0 for (j in 2:length(xd)) if (sign(xd[j-1]) != sign(xd[j])) flip <- flip + 1 if (flip > 1 && irep <= 25) bw <- 1.1 * bw else break; } # end repeat v$bndwd <- bw # cutoff of 7 to keep bw*.15 > 1 v$bndwd1 <- ifelse (bw>7, floor(bw*0.15), round(bw*0.15, 2)) if (v$bndwd1 == 0) v$bndwd1 <- 0.00001 v$bndwd2 <- ifelse (bw>7, ceiling(bw*1.5), round(bw*1.5, 2)) } observeEvent(input$x.col, { # if switch variable if (input$myDens) { x.name <- input$x.col shiny::req(x.name) x <- na.omit(data()[, x.name]) get_bw(x) sliderInput(inputId="slider_bndwd", label="bandwidth", min=v$bndwd1, max=v$bndwd2, value=v$bndwd) } }) # get default band width and min, max for input slider when # "parameters" button is checked observeEvent(input$myDens, { if (input$myDens) { x.name <- input$x.col shiny::req(x.name) x <- na.omit(data()[, x.name]) get_bw(x) } }) # band width slider, only activates for a variable change # runs whenever an input$ in the function changes output$slider_bndwd <- renderUI({ if (!is.null(v$bndwd)) { sliderInput(inputId="slider_bndwd", label="bandwidth", min=v$bndwd1, max=v$bndwd2, value=v$bndwd) } }) # ------------ The Histogram -------------- # ----------------------------------------- output$myPlot <- renderPlot({ if (input$myBins) { if (is.null(input$slider_bw)) { # bin_width slider, only activates for a variable change output$slider_bw <- renderUI({ req(!is.null(v$bw1)) sliderInput(inputId="slider_bw", label="bin_width", min=v$bw1, max=v$bw2, value=v$bw) }) } # bin_start slider, only activates for a variable change if (is.null(input$slider_bs)) { output$slider_bs <- renderUI({ req(!is.null(v$bs1)) sliderInput(inputId="slider_bs", label="bin_start", min=v$bs1, max=v$bs2, value=v$bs) }) } } # the sliders update later, so the old value can be current for a new var # update to avoid the extra computation and distracting intermediate plot if (!is.null(input$slider_bw)) { if (input$slider_bw < v$bw1 || input$slider_bw > v$bw2) { if (!input$myBins) updateSliderInput(session,"slider_bw", min=v$bw1, max=v$bw2, value=v$bw) req(input$slider_bw >= v$bw1) # takes a while for the update req(input$slider_bw <= v$bw2) } } if (!is.null(input$slider_bs)) { if (input$slider_bs < v$bs1 || input$slider_bs > v$bs2) { if (!input$myBins) updateSliderInput(session,"slider_bs", min=v$bs1, max=v$bs2, value=v$bs) req(input$slider_bs >= v$bs1) # takes a while for the update req(input$slider_bs <= v$bs2) } } # switching to a new var, v$x.new is TRUE, initiates the histogram # computations twice, so skip the first histogram and take the second go.new <- v$x.new if (go.new) { v$x.new <- FALSE req(!go.new) } # when Bins button clicked do not want any new re-renderings, but get two # this hack stops the first re-rendering for the first instance # slider values are only null before any click on the Bins button if (input$myBins) req(!is.null(input$slider_bw)) shiny::req(input$x.col) x.name <- input$x.col x <- data()[, x.name] y.name <- paste("Count of", x.name) # when beginning, sliders will be NULL in.bw <- ifelse (is.null(input$slider_bw), v$bw, input$slider_bw) in.bs <- ifelse (is.null(input$slider_bs), v$bs, input$slider_bs) v$in.den <- ifelse (input$mySmooth, TRUE, FALSE) out <- paste("Histogram(", x.name, sep="") if (!v$in.den) { v$h <- Histogram(x, data=NULL, bin_width=in.bw, bin_start=in.bs, bin_end=NULL, fill=input$myFill, color=input$myColor, transparency=input$myTrans, values=input$myValues, cumulate=input$myCumlt, xlab=x.name, ylab=y.name, quiet=TRUE) p_bin_width <- in.bw == v$bw p_bin_start <- in.bs == v$bs p_fill <- input$myFill == "#96AAC3" p_color <- input$myColor == "off" p_trans <- input$myTrans == 0 p_values <- input$myValues == FALSE p_cumul <- input$myCumlt == "off" if (!p_bin_width) out <- paste(out, ", bin_width=", in.bw, sep="") if (!p_bin_start) out <- paste(out, ", bin_start=", in.bs, sep="") if (!p_fill) out <- paste(out, ", fill=\"", input$myFill, "\"", sep="") if (!p_color) out <- paste(out, ", color=\"", input$myColor, "\"", sep="") if (!p_trans) out <- paste(out, ", transparency=", input$myTrans, sep="") if (!p_values) out <- paste(out, ", values=", input$myValues, sep="") if (!p_cumul) out <- paste(out, ", cumulate=\"", input$myCumlt, "\"", sep="") } else { # density plot shiny::req(input$slider_bndwd) fg.rgb <- col2rgb(input$myFill_gen) v$fg.trns <- rgb(fg.rgb[1], fg.rgb[2], fg.rgb[3], alpha=80, maxColorValue=255) fn.rgb <- col2rgb(input$myFill_nrm) v$fn.trns <- rgb(fn.rgb[1], fn.rgb[2], fn.rgb[3], alpha=80, maxColorValue=255) v$h <- Histogram(x, data=NULL, bin_width=in.bw, bin_start=in.bs, density=input$myDens, rug=input$myRug, type=input$myType, bandwidth=input$slider_bndwd, show_histogram=input$myHist, fill_general=v$fg.trns, fill_normal=v$fn.trns, xlab=x.name, ylab=y.name, quiet=TRUE) p_dens <- input$myDens == FALSE p_rug <- input$myRug == FALSE p_type <- input$myType == "general" p_bw <- (abs(input$slider_bndwd-v$bndwd) < 1) p_hist <- input$myHist == TRUE p_fill_general <- input$myFill_gen == "steelblue3" p_fill_normal <- input$myFill_nrm == "pink1" if (!p_dens) out <- paste(out, ", density=", input$myDens, sep="") if (!p_rug) out <- paste(out, ", rug=", input$myRug, sep="") if (!p_type) out <- paste(out, ", type=\"", input$myType, "\"", sep="") if (!p_bw) out <- paste(out, ", bandwidth=", input$slider_bndwd, sep="") if (!p_hist) out <- paste(out, ", show_histogram=",input$myHist, sep="") if (!p_fill_general) out <- paste(out, ", fill_general=\"", input$myFill_gen, "\"", sep="") if (!p_fill_normal) out <- paste(out, ", fill_normal=\"", input$myFill_nrm, "\"", sep="") } # end dens out <- paste(out, ")", sep="") cat(out, "\n") v$code <- out # save the code for a pdf file # print stats output$summary <- renderPrint({ shiny::req(v$h) h <- v$h # v$go <- TRUE # if (v$go) { if (!v$in.den) out2 <- c(h$out_summary, " ", h$out_outliers, " ", h$out_freq) else out2 <- c(h$out_stats, " ", h$out_ss, " ", h$out_outliers) for (i in 1:length(out2)) cat(out2[i], "\n") # } }) v$x.new <- FALSE }) # end renderPlot # clicking on the Save button generates a pdf file plotInput <- eventReactive(input$btn_pdf, { code <- v$code x.name <- input$x.col shiny::req(x.name) x <- data()[, x.name] y.name <- paste("Count of", x.name) pdf.fname <- paste("hs_", x.name, ".pdf", sep="") pdf.path <- file.path(path.expand("~"), pdf.fname) # styles before re-set in interact() were saved style(lab_cex=getOption("l.cex")) style(axis_cex=getOption("l.axc")) if (!v$in.den) Histogram(x, data=NULL, bin_width=input$slider_bw, bin_start=input$slider_bs, fill=input$myFill, color=input$myColor, transparency=input$myTrans, values=input$myValues, cumulate=input$myCumlt, xlab=x.name, ylab=y.name, quiet=TRUE, pdf_file=pdf.path, width=as.numeric(input$w), height=as.numeric(input$h)) else # density Histogram(x, data=NULL, bin_width=input$slider_bw, bin_start=input$slider_bs, density=input$myDens, rug=input$myRug, type=input$myType, bandwidth=input$slider_bndwd, show_histogram=input$myHist, fill_general=v$fg.trns, fill_normal=v$fn.trns, xlab=x.name, ylab=y.name, quiet=TRUE, pdf_file=pdf.path, width=as.numeric(input$w), height=as.numeric(input$h)) # reset back to shiny setting style(lab_cex=1.201, axis_cex=1.011, suggest=FALSE) # R code r.fname <- paste("hs_", x.name, ".r", sep="") r.path <- file.path(path.expand("~"), r.fname) cat("\n") message("---------------------------------------------") cat("Files written to folder:", path.expand("~"), "\n") message("---------------------------------------------") cat("pdf file: ", pdf.fname, "\n") cat("R code file: ", r.fname, "\n") message("---------------------------------------------") cat("\n") if (input$fSource == "web") { url <- theURL() if (!(grepl("http://", url))) url <- paste("http://", url, sep="") } read.path <- ifelse (input$fSource == "local", input$myFile$name, url) read.code <- paste("d <- Read(\"", read.path, "\")", sep="") is.local <- !grepl("http://", read.path, fixed=TRUE) if (input$do_cmt) cat("# The # symbol indicates a comment rather than an R instruction\n\n", "# Begin the R session by loading the lessR functions ", "from the library\n", sep="", file=r.path) cat("library(\"lessR\")\n\n", file=r.path, append=TRUE) if (input$do_cmt) { cat("# Read your data into an R data table, the data frame, here d", "\n", sep="", file=r.path, append=TRUE) if (is.local) cat("# To browse for the data file, include nothing between the quotes", "\n", sep="", file=r.path, append=TRUE) } if (is.local && input$do_cmt) cat("d <- Read(\"\")\n\n", file=r.path, append=TRUE) if (is.local && input$do_cmt) { cat("# For security, the path to your data file is not available\n", "# Can replace PATHtoFILE in the following with the path\n", "# Remove the # sign in the first column and delete the previous ", "Read()\n", sep="", file=r.path, append=TRUE) read.path <- file.path("PATHtoFILE", read.path) read.code <- paste("# d <- Read(\"", read.path, "\")", sep="") } cat(read.code, "\n\n", file=r.path, append=TRUE) if (input$do_cmt) cat("# When you have your data table, do the histogram analysis of a\n", "# continuous variable in the data table\n", "# d is the default data frame name, so no need to specify\n", sep="", file=r.path, append=TRUE) cat(code, "\n\n", file=r.path, append=TRUE) anlys <- "Histogram()" if (input$do_cmt) cat("# If accessing data with a name other than d, must add data=NAME\n", paste("# to the", anlys, "call, where NAME is the name of your", "data frame"), "\n", sep="", file=r.path, append=TRUE) }) output$saved_plot <- renderPlot({ plotInput() }) # access web page help file output$help <- eventReactive(input$do_help, { shiny::req(input$do_help) fp <- system.file("shiny_apps/help/Histogram.html", package="lessR") browseURL(fp) }) } # end server shinyApp(ui, server)
14f73c4dac3f52c971dc210b6a4bdf0a7ea86868
80c5446c7fc608b0c2fffb4087c7610b7e40e9a6
/src/model.R
62f7e55be3b26a8196ee1778f857bcec333f1c02
[]
no_license
wikimedia/wikidata-analytics-dashboard
b2a54be3aa07767020641830147f8a59871c5e2f
428a8602f9dc6e8a6c5f261190e01f854124f795
refs/heads/master
2023-08-26T15:50:00.979846
2015-11-21T10:44:31
2015-11-21T10:44:31
41,689,407
3
1
null
null
null
null
UTF-8
R
false
false
4,551
r
model.R
get_local_datasets <- function(){ wikidata_social_media <<- get_local_set("wikidata_eng_social_media.tsv") wikidata_mailing_lists <<-get_local_set("wikidata_eng_mailing_lists.tsv") wikidata_mailing_lists_messages <<-get_local_set("wikidata_eng_mailing_lists_messages.tsv") wikidata_references_overview <<- get_local_set("wikidata_content_references_overview.tsv") wikidata_content_items <<- get_local_set("wikidata_content_items.tsv") wikidata_properties <<- get_local_set("wikidata_content_properties.tsv") wikidata_content_refstmts <<-get_local_set("wikidata_content_refstmts.tsv") wikidata_content_refstmts_wikipedia <<- get_local_set("wikidata_content_refstmts_wikipedia.tsv") wikidata_content_refstmts_other <<- get_local_set("wikidata_content_refstmts_other.tsv") wikidata_content_references <<-get_local_set("wikidata_content_references.tsv") wikidata_content_statement_ranks <<- get_local_set("wikidata_content_statement_ranks.tsv") wikidata_content_statement_item <<- get_local_set("wikidata_content_statement_item.tsv") wikidata_content_labels_item <<- get_local_set("wikidata_content_labels_item.tsv") wikidata_content_descriptions_item <<- get_local_set("wikidata_content_descriptions_item.tsv") wikidata_content_wikilinks_item <<- get_local_set("wikidata_content_wikimedia_links_item.tsv") wikidata_kpi_active_editors <<- get_local_set("wikidata_kpi_active_editors.tsv") return(invisible()) } get_local_sparql_results <- function(){ sparql1 <<- get_local_set("spql1.tsv", sparql_data_uri) sparql2 <<- get_local_set("spql2.tsv", sparql_data_uri) sparql3 <<- get_local_set("spql3.tsv", sparql_data_uri) sparql13 <<- get_local_set("spql13.tsv", sparql_data_uri) property_usage_counts <<- get_local_set("property_usage.tsv", sparql_data_uri) return(invisible()) } get_graphite_datasets <- function(){ out <- tryCatch({ con <- curl(agg_data_uri) readLines(con) }, warning = function(cond){ message(paste("URL caused a warning:", agg_data_uri)) message("Warning message:") message(cond) return(NULL) }, error = function(cond){ message(paste("URL does not exist:", agg_data_uri)) message("Error message:") message(cond) return(NA) }, finally = { wikidata_addUsagesForPage <<- get_csv_from_api("jobrunner.pop.wikibase-addUsagesForPage.ok.mw1004.count&format=csv",graphite_api_uri) }) return(out) } get_remote_datasets <- function(){ out <- tryCatch({ con <- curl(agg_data_uri) readLines(con) }, warning = function(cond){ message(paste("URL caused a warning:", agg_data_uri)) message("Warning message:") message(cond) return(NULL) }, error = function(cond){ message(paste("URL does not exist:", agg_data_uri)) message("Error message:") message(cond) return(NA) }, finally = { wikidata_edits <<- download_set("site_stats_total_edits.tsv", agg_data_uri) wikidata_active_users <<- download_set("site_stats_active_users.tsv", agg_data_uri) wikidata_pages <<- download_set("site_stats_total_pages.tsv", agg_data_uri) wikidata_gooditems <<- download_set("site_stats_good_articles.tsv", agg_data_uri) wikidata_daily_getclaims_property_use <<- download_set("getclaims_property_use.tsv", agg_data_uri) wikidata_facebook <<- download_set("social_facebook.tsv", agg_data_uri) wikidata_googleplus <<- download_set("social_googleplus.tsv", agg_data_uri) wikidata_twitter <<- download_set("social_twitter.tsv", agg_data_uri) wikidata_identica <<- download_set("social_identica.tsv", agg_data_uri) wikidata_irc <<- download_set("social_irc.tsv", agg_data_uri) }) return(out) } load_rdf_model <-function(){ metrics_model <<- load.rdf(metrics_rdf) } get_rdf_objects <- function(){ engagement_obj <<- get_rdf_individuals("<http://wikiba.se/metrics#Engagement>") content_obj <<- get_rdf_individuals("<http://wikiba.se/metrics#Content>") community_health_obj <<- get_rdf_individuals("<http://wikiba.se/metrics#Community_Health>") quality_obj <<- get_rdf_individuals("<http://wikiba.se/metrics#Quality>") partnerships_obj <<- get_rdf_individuals("<http://wikiba.se/metrics#Partnerships>") external_use_obj <<- get_rdf_individuals("<http://wikiba.se/metrics#External_Use>") internal_use_obj <<- get_rdf_individuals("<http://wikiba.se/metrics#Internal_Use>") daily_obj <<- get_rdf_individuals("<http://wikiba.se/metrics#Daily>") return(invisible()) }
9fde7b66d1c05612dac4e6eab69ff8d779b27a93
539bc13246703b33fd27135dbcaebcc2f2a8e432
/man/post_plan.Rd
e4c5c93924568b78fae2398ab634e16133d64f3c
[ "MIT" ]
permissive
emilyriederer/projmgr
26f420eaf8e93155e9373c0083b81aaa8b78757c
92017f8d02c65060d648c08059859f0ff03687b0
refs/heads/master
2023-02-07T07:14:22.471549
2023-01-21T11:56:32
2023-01-21T11:56:32
163,583,327
116
9
NOASSERTION
2023-01-21T11:47:52
2018-12-30T11:47:51
R
UTF-8
R
false
true
1,327
rd
post_plan.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plan-todo.R \name{post_plan} \alias{post_plan} \title{Post plan (milestones + issues) to GitHub repository} \usage{ post_plan(ref, plan, distinct = TRUE) } \arguments{ \item{ref}{Repository reference (list) created by \code{create_repo_ref()}} \item{plan}{Plan list as read with \code{read_plan()}} \item{distinct}{Logical value to denote whether issues with the same title as a current open issue should be allowed. Passed to \code{get_issues()}} } \value{ Dataframe with numbers (identifiers) of posted milestones and issues and issue title } \description{ Post custom plans (i.e. create milestones and issues) based on yaml read in by \code{read_plan}. Please see the "Building Custom Plans" vignette for details. } \examples{ \dontrun{ # This example uses example file included in pkg # You should be able to run example as-is after creating your own repo reference file_path <- system.file("extdata", "plan.yml", package = "projmgr", mustWork = TRUE) my_plan <- read_plan(file_path) post_plan(ref, my_plan) } } \seealso{ Other plans and todos: \code{\link{post_todo}()}, \code{\link{read_plan}()}, \code{\link{read_todo}()}, \code{\link{report_plan}()}, \code{\link{report_todo}()}, \code{\link{template_yaml}()} } \concept{plans and todos}
43adbdbb93eb6ddf101f32dd65a7e2f1df06fa20
1665f39a8fb6e36f9169b8778349a6e28874b0cc
/Second_Assisgnment/HW_2_UB50291058/ques_2.R
9c2029d939c146d8d157279a383ff81d27e53a6e
[]
no_license
kartik1611/Statistical-Data-Mining
8fb2a19803102672e1467281b41bdfcebfdd94ae
ec2d33f8823a2568e714dd9a6a3008532a260cc5
refs/heads/master
2020-04-30T19:53:09.763932
2019-04-05T17:57:07
2019-04-05T17:57:07
177,046,007
0
0
null
null
null
null
UTF-8
R
false
false
725
r
ques_2.R
rm(list = ls(all=T)) setwd("F:\\buffalo\\R\\sdma spring\\r\\1st_assignment_sdma\\Second_Assisgnment") #a) loading data data <- read.csv('Ch10Ex11.csv',header =TRUE) #b) Apply hierarchical clustering to the samples using correlation-based distance comp_model <- hclust(as.dist(1 - cor(data)), method = "complete") plot(comp_model) single_clus_model = hclust(as.dist(1 - cor(data)), method = "single") plot(single_clus_model) avg_clus_model = hclust(as.dist(1 - cor(data)),method = "average") plot(avg_clus_model) #c) using pca data_transpose = t(data) pca_data= prcomp(data_transpose) head(pca_data$rotation) load_overall = apply(pca_data$rotation, 1, sum) row= order(abs(load_overall), decreasing = TRUE) row[1:10]
959327ab5d2063dc3d0e86e1f3ee710abc1b2892
3582a4b32de3059cd2adc81a4f3074b63426ae1f
/EjercicioGGplot.R
fbafb5f4b38ac8016d1f1f053cd23d9c8f8bbffb
[]
no_license
annalawrenc/R
059c9ce463629fa7b264fc4918ddf457db006def
e835c36a098c4c85a15f754ae5296e3f0959aa6d
refs/heads/master
2020-04-02T12:45:56.563737
2016-07-17T20:31:56
2016-07-17T20:31:56
60,811,464
0
0
null
null
null
null
ISO-8859-1
R
false
false
597
r
EjercicioGGplot.R
library(ggplot2) # voy a esoger aleatoriamente 100 registros para que la gráfica se parezca a la del ejemplo: index_filtro <- sample(1:length(diamonds[[1]]), 100) ciendiamonds <- diamonds[index_filtro,] # también se puede hacerlo con sample_n y con dplyr library(dplyr) ciendiamonds <- sample_n (diamonds, 100, replace = FALSE) q<- ggplot(ciendiamonds, aes( x=carat, y= price)) # creamos gráfico vacio q c <- q + geom_point(aes(color=color)) # pintamos la capa de puntos c r <- c+ geom_smooth(method=lm, formula= y~x) # pintamos la capa de la linea de regresion r
17123348fec3182996769b1ecd2d2be3d47fd02e
7f3f667f127b7355d61af8b90df8ca6365fa9817
/Main/mainPredict.R
ff42bd8776b0061136ec60d70333f6060432a9ae
[]
no_license
jcombari/Rcode-for-recovery
8162253884734dcd10396c3d7d58f01350aa41ed
7655a03150e829485d2bdd84becec63cbac6115b
refs/heads/master
2022-12-12T15:22:53.109778
2020-08-29T16:54:14
2020-08-29T16:54:14
291,306,594
0
0
null
null
null
null
UTF-8
R
false
false
5,458
r
mainPredict.R
args = commandArgs(trailingOnly=TRUE) source("/home/mck/bpop-analytics-cobranzas/configuration/configEnv.R") configEnvironment() auxJson <- jsonlite::read_json(paste0(rootPath,"configuration/configJson.json")) predictPeriodo <<- "20180207" # Comprobar que se haya recibido al menos, un argumento. En caso contrario, devolver un error if (length(args)!=1) { stop("Es necesario el argumento: Periodo a predecir", call.=FALSE) } else { predictPeriodo = args[1] } # Leer funciones a ejecutar del JSON de configuracion processFunctionsDaily <- auxJson[["DataPreparationConfig"]][["processFunctionsDaily"]] processFunctionsMonthly <- auxJson[["DataPreparationConfig"]][["processFunctionsMonthly"]] expandFunctionsDaily <- auxJson[["DataPreparationConfig"]][["expandFunctionsDaily"]] %>% unlist expandFunctionsMonthly <- auxJson[["DataPreparationConfig"]][["expandFunctionsMonthly"]] %>% unlist expandFunctionsDaily <- setdiff(expandFunctionsDaily, "createTargetMaster") # TBD - Validar existencia de archivos ------------------------------------ dailyPeriodToExpand <- predictPeriodo dailyPeriodsToProcess <- seqDays(dayAddInteger(min(dailyPeriodToExpand), -120), dailyPeriodToExpand) monthlyPeriodsToProcess <- unique(str_sub(seqMonth(dayAddInteger(min(dailyPeriodToExpand), -120), dayAddInteger(max(dailyPeriodToExpand), -30)), 1, 6)) monthlyPeriodsToExpand <- c(unique(monthAddInteger(str_sub(dailyPeriodToExpand, 1, 6), -2)), unique(monthAddInteger(str_sub(dailyPeriodToExpand, 1, 6), -1))) # Procesando tablas ------------------------------------------------- # Ejecutar los scripts de procesamiento de datos originales for(auxFunction in processFunctionsMonthly){ cat("Ejecutando funcion de proceso de datos:", auxFunction, fill = TRUE) nonGeneratedPeriods <- returnNonExistentFilesS3(fun = auxFunction, periods = monthlyPeriodsToProcess, json = auxJson) for(auxPeriodo in nonGeneratedPeriods){ tryCatch(do.call(eval(auxFunction), list(period = auxPeriodo)), error = function(err){cat("Error: ", err[['message']], fill = TRUE)}) } } for(auxFunction in processFunctionsDaily){ cat("Ejecutando funcion de proceso de datos:", auxFunction, fill = TRUE) nonGeneratedPeriods <- returnNonExistentFilesS3(fun = auxFunction, periods = dailyPeriodsToProcess, json = auxJson) for(auxPeriodo in nonGeneratedPeriods){ cat("Ejecutando funcion de proceso de datos:", auxFunction, fill = TRUE) tryCatch(do.call(eval(auxFunction), list(period = auxPeriodo)), error = function(err){cat("Error: ", err[['message']], fill = TRUE)}) } } cat("Finalizando procesamiento de datos", fill = TRUE) # Expand Functions -------------------------------------------------- # Ejecutar los scripts de creacion de variable sinteticas for(auxFunction in expandFunctionsMonthly){ cat("Ejecutando funcion de creacion de sinteticas:", auxFunction, fill = TRUE) nonGeneratedPeriods <- returnNonExistentFilesS3(fun = auxFunction, periods = monthlyPeriodsToExpand, json = auxJson) for(auxPeriodo in nonGeneratedPeriods){ tryCatch(do.call(eval(auxFunction), list(period = auxPeriodo)), error = function(err){cat("Error: ", err[['message']], fill = TRUE)}) } } for(auxFunction in expandFunctionsDaily){ cat("Ejecutando funcion de creacion de sinteticas:", auxFunction, fill = TRUE) nonGeneratedPeriods <- returnNonExistentFilesS3(fun = auxFunction, periods = dailyPeriodToExpand, json = auxJson) for(auxPeriodo in nonGeneratedPeriods){ tryCatch(do.call(eval(auxFunction), list(period = auxPeriodo)), error = function(err){cat("Error: ", err[['message']], fill = TRUE)}) } } cat("Finalizando creacion de variables sinteticas", fill = TRUE) cat("Creando dataset", fill = TRUE) dataset <- createDataset(periodsToSelect = predictPeriodo) # Save dataset, in case there was a dataset already in the folder, move old dataset # to control version folder, with the date of creation of the dataset datasetFile <- osPathJoin(datasetPreparedPath, "Dataset_predict.csv") if(file.exists_s3(datasetFile) == TRUE){ auxDataset <- fread_s3(datasetFile) timeStamp <- file.info_s3(datasetFile) fwrite_s3(auxDataset, osPathJoin(oldDatasetPreparedPath, paste0("Dataset_predict_", timeStamp, ".csv"))) } fwrite_s3(dataset, datasetFile) # Entrenamos Modelo # Seleccionamos variables que no van a entrar al modelo # Estas variables o bien son la target, o pueden contener informacion de futuro varsTarget <- names(dataset)[str_detect(names(dataset), "TARG")] # Seleccionando columnas para no incluir en la modelizacion varsToRemove <- unique(c(varsTarget, "YEAR_MONTH", "ID_CLIENTE", "ID_CONTRATO", "YEAR_MONTH_DAY", colnames(dataset)[dataset[, lapply(.SD, class)]== "character"])) # Ejecutamos el modelo con los datos anteriores predictModel(dataset = dataset, modelName = "Collections_vPrueba", varsToRemove = varsToRemove, productIDColName = "ID_CONTRATO", clientIDColName = "ID_CLIENTE", periodColName = "YEAR_MONTH_DAY", periodo = predictPeriodo) # TBD - Output validation ------------------------------------------------- # TBD - Model Performance para el nuevo mes -------------------------------
fae60b29ec69ddbdd82f0e04129dee1fd5ae0dd9
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
/cran/paws.internet.of.things/man/iotanalytics_describe_dataset.Rd
80287b53631e62ff535b30d8aa4c26558dab6dfd
[ "Apache-2.0" ]
permissive
paws-r/paws
196d42a2b9aca0e551a51ea5e6f34daca739591b
a689da2aee079391e100060524f6b973130f4e40
refs/heads/main
2023-08-18T00:33:48.538539
2023-08-09T09:31:24
2023-08-09T09:31:24
154,419,943
293
45
NOASSERTION
2023-09-14T15:31:32
2018-10-24T01:28:47
R
UTF-8
R
false
true
2,917
rd
iotanalytics_describe_dataset.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/iotanalytics_operations.R \name{iotanalytics_describe_dataset} \alias{iotanalytics_describe_dataset} \title{Retrieves information about a dataset} \usage{ iotanalytics_describe_dataset(datasetName) } \arguments{ \item{datasetName}{[required] The name of the data set whose information is retrieved.} } \value{ A list with the following syntax:\preformatted{list( dataset = list( name = "string", arn = "string", actions = list( list( actionName = "string", queryAction = list( sqlQuery = "string", filters = list( list( deltaTime = list( offsetSeconds = 123, timeExpression = "string" ) ) ) ), containerAction = list( image = "string", executionRoleArn = "string", resourceConfiguration = list( computeType = "ACU_1"|"ACU_2", volumeSizeInGB = 123 ), variables = list( list( name = "string", stringValue = "string", doubleValue = 123.0, datasetContentVersionValue = list( datasetName = "string" ), outputFileUriValue = list( fileName = "string" ) ) ) ) ) ), triggers = list( list( schedule = list( expression = "string" ), dataset = list( name = "string" ) ) ), contentDeliveryRules = list( list( entryName = "string", destination = list( iotEventsDestinationConfiguration = list( inputName = "string", roleArn = "string" ), s3DestinationConfiguration = list( bucket = "string", key = "string", glueConfiguration = list( tableName = "string", databaseName = "string" ), roleArn = "string" ) ) ) ), status = "CREATING"|"ACTIVE"|"DELETING", creationTime = as.POSIXct( "2015-01-01" ), lastUpdateTime = as.POSIXct( "2015-01-01" ), retentionPeriod = list( unlimited = TRUE|FALSE, numberOfDays = 123 ), versioningConfiguration = list( unlimited = TRUE|FALSE, maxVersions = 123 ), lateDataRules = list( list( ruleName = "string", ruleConfiguration = list( deltaTimeSessionWindowConfiguration = list( timeoutInMinutes = 123 ) ) ) ) ) ) } } \description{ Retrieves information about a dataset. } \section{Request syntax}{ \preformatted{svc$describe_dataset( datasetName = "string" ) } } \keyword{internal}
f7b497215dba21107a8a0254e8065d516a913777
9a05746937795ddc6af174473f37b6cc159bdf57
/src/features/declare_factors.R
83aace789ccf9b3f2e2e00baaa96609fbc85467e
[]
no_license
abrahamalex13/carvana-lemon
1429c9c86d99216d524498e0a76f32aa7c714599
e2e3315b0c0acdceb8fa2fc676abe14b94d67547
refs/heads/master
2023-02-18T11:37:54.801153
2021-01-22T16:47:43
2021-01-22T16:47:43
240,712,560
0
1
null
null
null
null
UTF-8
R
false
false
2,059
r
declare_factors.R
#declare_factors.R #ordering also guides modeling treatment. declare_factors_custom <- function(df) { df[["WheelType"]] <- factor(df[["WheelType"]], c("NULL", "Special", "Covers", "Alloy")) %>% droplevels() df[["Color_consol"]] <- factor(df[["Color_consol"]]) %>% droplevels() df[["VehYear_consol"]] <- factor(df[["VehYear_consol"]], unique(c(1, unique(df[["VehYear_consol"]])))) %>% droplevels() df[["Make_consol"]] <- factor(df[["Make_consol"]], unique( c("HONDA", unique(df[["Make_consol"]])))) %>% droplevels() df[["Make_Model_consol"]] <- factor(df[["Make_Model_consol"]]) %>% droplevels() df[["Make_Model_SubModel_consol"]] <- factor(df[["Make_Model_SubModel_consol"]]) %>% droplevels() df[["Size_consol"]] <- factor(df[["Size_consol"]], unique( c("MEDIUM", unique(df[["Size_consol"]]))) ) %>% droplevels() df[["Auction"]] <- factor(df[["Auction"]], c("MANHEIM", "OTHER", "ADESA")) %>% droplevels() df[["AUCGUART"]] <- factor(df[["AUCGUART"]]) df[["PRIMEUNIT"]] <- factor(df[["PRIMEUNIT"]]) df[["VNST_consol"]] <- factor(df[["VNST_consol"]], unique( c("TX", unique(df[["VNST_consol"]])))) %>% droplevels() df[["VNZIP1_consol"]] <- factor(df[["VNZIP1_consol"]]) df[["engine_type_consol"]] <- factor(df[["engine_type_consol"]]) df[["engine_vol_consol"]] <- factor(df[["engine_vol_consol"]]) df[["engine_type_engine_vol_consol"]] <- factor(df[["engine_type_engine_vol_consol"]]) df[["Make_engine_type_engine_vol_consol"]] <- factor(df[["Make_engine_type_engine_vol_consol"]]) df[["BYRNO_consol"]] <- factor(df[["BYRNO_consol"]]) %>% droplevels() df[["PurchDate_month"]] <- factor(df[["PurchDate_month"]], ordered = FALSE) %>% droplevels() df[["PurchDate_day"]] <- factor(df[["PurchDate_day"]]) df[["PurchDate_wday"]] <- factor(df[["PurchDate_wday"]], ordered = FALSE) %>% droplevels() df[["PurchDate_month_PurchDate_wday_consol"]] <- factor(df[["PurchDate_month_PurchDate_wday_consol"]]) return(df) }
89b773c6fe33e9a7d09d80a03b89b408c263750d
26b29791c5827a146d4dee7a397acb0dbd7eca2d
/SNG/size of next gift.R
d0d0b5976e71ed12fa83d3c46b4bfbd0272d054d
[]
no_license
wangy63/Leverage-Subsampling
eb56cb846733f503debdd30e81b0839da3f68206
db9e51feb47d367903d8533ce8ce665746cd8dfc
refs/heads/master
2020-03-14T20:56:15.622939
2018-09-07T18:24:39
2018-09-07T18:24:39
122,065,097
1
0
null
null
null
null
UTF-8
R
false
false
3,442
r
size of next gift.R
library(data.table) entity<-read.csv("1_Entity.csv") names(entity) entity_clean<-entity[, c(1, 3, 6, 7)] athletics <- read.csv("2_Athletics.csv") names(athletics) athletics_clean<-athletics[, c(1, 2)] entity_clean$athlete<-0 entity_clean$athlete[entity$Entity.ID %in% athletics$Entity.ID] <- 1 givingdata<-read.csv("3_givingdata.csv") names(givingdata) giving_clean<-givingdata[, c(2, 3, 5)] giving_clean$Transaction.year <- substr(giving_clean$Transaction.Date,7,10) Degree<-read.csv("4_Degree.csv") names(Degree) degree_clean<-Degree[, c(1, 2, 3, 4)] degree_clean<-cbind(degree_clean, d=rep(1, nrow(degree_clean)))%>% group_by(Entity.ID)%>% summarise(count.Degree=sum(d, na.rm=T), year=max(Degree.Year, na.rm=T), School=School.of.Graduation[1]) donate_year<-merge(degree_clean, giving_clean, by="Entity.ID") donate_year<-donate_year[, c(1, 2, 3, 4, 5, 7)] donate_year<-cbind(donate_year, d=rep(1, nrow(donate_year)))%>% group_by(Entity.ID, Transaction.year)%>% summarise(Amount=sum(Legal.Amount, na.rm=T), count.Degree=count.Degree[1], School=School[1], year=year[1]) donate_year<-merge(donate_year, entity_clean, by="Entity.ID", all=T) contact <- read.csv("5_ContactInformation.csv") names(contact) contact_clean<-contact[, c(1, 2, 3, 6)] contact_clean <- contact_clean %>% filter(Preferred.Indicator == 1 & Entity.ID != "") donate_year<-merge(donate_year, contact_clean, by="Entity.ID", all=T) part <- read.csv("7_ParticipationHistory.csv") # participation in activities, including dates part<-dcast(part, Entity.ID ~ Participation.Category) # turns raw values into column names part<-part%>%mutate(Part.Level=apply(part[,-1], 1, sum), Greek=FRTTY+SOROR) # Part.Level includes all not just these four/five part<-filter(part, Greek<4)%>%select(Entity.ID, SRVCE, ALUEV, CHAPT, REUN, FRTTY, SOROR, Greek, Part.Level) donate_year<-merge(donate_year, part, by="Entity.ID", all=T) donate_year[is.na(donate_year)] <- 0 donate_year<-donate_year[which(donate_year$Transaction.year!=0), ] #us.state <- read.csv("state_table.csv") #us.state <- us.state %>% select(name, abbreviation, census_region_name, census_division_name) #colnames(us.state)<- c("State.Full", "State", "Region", "SubRegion") #us.state<-data.table(us.state[, c(1,2)]) #colnames(us.state)<-c("State", "Acronym") final<-donate_year final$Transaction.year<-as.numeric(final$Transaction.year) final$year<-as.numeric(final$year) final$GradToGive<-final$Transaction.year-final$year ################################################### groupfinal <- final %>% arrange(Entity.ID,Transaction.year)%>% group_by(Entity.ID) %>% mutate(rank=row_number()) groupfinal$lastGave<-NA for (i in 1: nrow(groupfinal)){ if (groupfinal$rank[i]!=1){ print(i) k<-i-1 groupfinal$lastGave[i]<-groupfinal$Amount[k] } else { groupfinal$lastGave[i]<-0 } } groupfinal$GaveLastYear<-NA ptm <- proc.time() for (i in 1: nrow(groupfinal)){ print(i) if (groupfinal$rank[i]!=1){ k<-i-1 if (groupfinal$Transaction.year[i]-1==groupfinal$Transaction.year[k]){ groupfinal$GaveLastYear[i]="Yes" } else { groupfinal$GaveLastYear[i]="No" } } else { groupfinal$GaveLastYear[i]<- "No" } } proc.time() - ptm SNGdata<-SNGdata[, c(-1, -24)] SNGdata$Greek <- ifelse(SNGdata$Greek == 0,0,1) write.csv(SNGdata, "SNGdata.csv")
8715e815e186f1b35924ffe6e5ad5860e4bc18ac
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/Compounding/examples/pgfpoisson.Rd.R
b213d4cc7e2eed2f13c632b9ed3e54810e6b75d4
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
536
r
pgfpoisson.Rd.R
library(Compounding) ### Name: pgfpoisson ### Title: Function pgfpoisson ### Aliases: pgfpoisson ### ** Examples params<-5 pgfpoisson(.2,params) ## The function is currently defined as pgfpoisson <- function(s,params) { k<-s[abs(s)>1] if (length(k)>0) warning("At least one element of the vector s are out of interval [-1,1]") if (missing(params)) stop("Distribution parameters are not defined") theta<-params[1] if (theta<=0) stop ("Parameter of Poisson distribution must be positive") exp(theta*(s-1)) }
a2362d70c35da4e6e11adf279e57f4844151aa68
d746fef241f9a0e06ae48cc3b1fe72693c43d808
/tesseract/rotate/d7t59p-002.r
9f7245dc5a4b872cf9bf11da3ff529ec45246478
[ "MIT" ]
permissive
ucd-library/wine-price-extraction
5abed5054a6e7704dcb401d728c1be2f53e05d78
c346e48b5cda8377335b66e4a1f57c013aa06f1f
refs/heads/master
2021-07-06T18:24:48.311848
2020-10-07T01:58:32
2020-10-07T01:58:32
144,317,559
5
0
null
2019-10-11T18:34:32
2018-08-10T18:00:02
JavaScript
UTF-8
R
false
false
195
r
d7t59p-002.r
r=0.06 https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7t59p/media/images/d7t59p-002/svc:tesseract/full/full/0.06/default.jpg Accept:application/hocr+xml
f4f01ec984439a9f7df7bfa1741efb8150dc75d9
056c5b033f1749218fdc8582c0be1e7876c1ba5f
/man/unnest_recursive.Rd
2527dccf41102a0b7e3045e0bb31dbd0a137a75d
[]
no_license
kippchicago/teachboostr
efddba0ce4c489fc24ea624a73d1215fe0ffb24c
0b4bf24ef26ed70e2e6d95e9b7e2ce98ae7cf9f6
refs/heads/master
2021-01-21T10:33:33.349146
2017-03-21T20:16:14
2017-03-21T20:16:14
83,447,315
1
0
null
null
null
null
UTF-8
R
false
true
533
rd
unnest_recursive.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/unnest_cols.R \name{unnest_recursive} \alias{unnest_recursive} \title{Unnests a a data frame of list-columns} \usage{ unnest_recursive(x, id_col = "id") } \arguments{ \item{x}{data.frame with one \code{N} columns, where one colum is the \code{id_col} and the \code{N-1} are all list-columns} \item{id_col}{id column which will be used to join unnested lists back together} } \value{ a data frame } \description{ Unnests a a data frame of list-columns }
fcd1d1ee81ff37be903c0c382e5e31a1c45dd4f2
7e5e5139f817c4f4729c019b9270eb95978feb39
/Introduction to Tidyverse/Chapter 2-Data visualization/8.R
4d989b98835a01b7efa6637c5000fea276b395e6
[]
no_license
Pranav-Polavarapu/Datacamp-Data-Scientist-with-R-Track-
a45594a8a9078076fe90076f675ec509ae694761
a50740cb3545c3d03f19fc79930cb895b33af7c4
refs/heads/main
2023-05-08T19:45:46.830676
2021-05-31T03:30:08
2021-05-31T03:30:08
366,929,815
1
2
null
null
null
null
UTF-8
R
false
false
697
r
8.R
# Creating a subgraph for each continent # You've learned to use faceting to divide a graph into subplots based on one of its variables, such as the continent. # # Instructions # 100 XP # Create a scatter plot of gapminder_1952 with the x-axis representing population (pop), the y-axis representing life expectancy (lifeExp), and faceted to have one subplot per continent (continent). Put the x-axis on a log scale. library(gapminder) library(dplyr) library(ggplot2) gapminder_1952 <- gapminder %>% filter(year == 1952) # Scatter plot comparing pop and lifeExp, faceted by continent ggplot(gapminder_1952, aes(x = pop, y = lifeExp)) + geom_point() + scale_x_log10() + facet_wrap(~ continent)
558ffec3a23cb96f72bcfd893a13608620b85617
fa6349ff93efd0670d7db0a441810670661738e8
/FizzBuzz/R/fizzbuzz.R
73c4d6255397263e9f3a3d04867832cc5d93e1b2
[]
no_license
petervegh/katas
5a1dec4c26d3084d9c474e1da2640b501ba61b56
c8e624c5f26a37c8f31638b76b2f4790b9262c2f
refs/heads/master
2020-12-11T21:21:37.193373
2017-05-11T20:16:35
2017-05-11T20:16:35
50,564,684
1
0
null
2017-05-11T20:22:03
2016-01-28T07:26:28
Clojure
UTF-8
R
false
false
338
r
fizzbuzz.R
fizzBuzz <- function(number) { if(number %% 5 == 0 && number %% 3 == 0) return('fizzBuzz') if(number %% 3 == 0) return('fizz') if(number %% 5 == 0) return('buzz') number } applyFizzBuzz <- function(array) { res = lapply(array, fizzBuzz) for(stuff in res) { cat (stuff) cat (' ') } res } applyFizzBuzz(x<-1:100)
17bab9fc5e47d0ccff26a96d36f4d0fdf4c097eb
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/turner/examples/meanlist.Rd.R
46f3e4f90acea0259371e9cdebc0fe8daf76456b
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
398
r
meanlist.Rd.R
library(turner) ### Name: meanlist ### Title: Mean of all elements in a list ### Aliases: meanlist ### ** Examples # say you have some list list1 = list(1:5, runif(3), rnorm(4)) # get the mean of all elements in list1 meanlist(list1) # say you have missing data list2 = list(c(1:4, NA), runif(3), rnorm(4)) # get the mean of all elements in list2 removing NAs meanlist(list2, na.rm=TRUE)
85578c215199b600d6b368c8d7012d805c39e83f
c88ca9dc733d68a694e64c6fccc15c138ee3b2db
/vis/vis_moletable.R
60cc7d812ba9912da9c16e7f8deaf897a887e05e
[ "MIT" ]
permissive
med-material/Whack_A_Mole_RShiny
4f4eb23278ae2899f7af53cc069b4d260a38776e
ef0c90197fc1b4037cd30d42a763c16c22879d4f
refs/heads/master
2023-01-28T17:24:50.251104
2023-01-25T08:21:58
2023-01-25T08:21:58
213,659,401
1
2
null
2020-12-08T09:38:26
2019-10-08T14:05:43
null
UTF-8
R
false
false
641
r
vis_moletable.R
library(dplyr) vis_moleTable <- function(df) { moleEvents = c("Mole Spawned", "Fake Mole Spawned", "Pointer Shoot", "Mole Hit", "Mole Missed","Fake Mole Hit") table = data.frame(moleEvents, rep(NA,length(moleEvents))) names(table) = c("Event", "Count") stats = df %>% group_by(Event) %>% dplyr::summarise(Count = n()) %>% filter(Event %in% moleEvents) %>% mutate(Event = factor(Event, levels = moleEvents), ) %>% arrange(Event) table <- table %>% left_join(stats, by = "Event") %>% mutate(Count = as.integer(Count.y), Count.x = NULL, Count.y = NULL) return(table) }
1b56771431df40e8abadb42585ac71b3bf88006e
1a9ef448017a28bfffdfb78887022b46a6169507
/R/text-to-sentences.R
1c49e96871a4ba7982ade113ce85941b36d24775
[ "BSD-3-Clause", "BSD-2-Clause" ]
permissive
rtelmore/RDSTK
4ae28abbb12c937141c5a834dc46a010799d0f15
cdddc4d3647281155067bb434f54f12c27fdd3aa
refs/heads/master
2021-01-21T11:45:00.044495
2017-11-20T20:47:30
2017-11-20T20:47:30
1,686,614
19
12
null
2017-03-16T18:52:10
2011-05-01T02:08:32
R
UTF-8
R
false
false
1,363
r
text-to-sentences.R
#' @title Identifies sentences in a text string. #' #' @description #' This function returns the legitimate sentences (if they exist) from a text #' string. #' #' @param text A string (hopefully) containing sentences. #' @param session The CURLHandle object giving the structure for the options #' and that will process the command. For curlMultiPerform, this is an object #' of class code MultiCURLHandle-class. #' #' @return #' A list containing #' \item{sentences}{A string identifying the sentences in the text.} #' #' @seealso #' \code{\link{curlPerform}}, #' \code{\link{getCurlHandle}}, #' \code{\link{dynCurlReader}} #' #' @references #' http://www.datasciencetoolkit.org/developerdocs#text2sentences #' #' @examples #' \dontrun{ #' sentences <- "But this does, it contains enough words. So does this #' one, it appears correct. This is long and complete enough #' too." #' text2sentences(sentences) #' } #' #' @export text2sentences <- function(text, session = RCurl::getCurlHandle()) { api <- paste(getOption("RDSTK_api_base"), "/text2sentences/", sep="") r = RCurl::dynCurlReader() RCurl::curlPerform(postfields = text, url = api, post = 1L, writefunction = r$update, curl = session) result <- rjson::fromJSON(r$value()) return(result) }
7263d5ed9bd8047be0080d756c698ce897fc0db3
18f4631b4b312215877e5a65fff966089b993c8d
/LIB_RHESSys_outputFormat.r
a178cabc5adddfe0260a1bfbd32e65538d5798ee
[]
no_license
kkyong77/R-coded-scripts-for-RHESSys-calibration
42c93f00d8e6dcb7027141f3dc0527897e84be3b
7fc7c222c0afe36cde7a88abde8357d7bc1d68c9
refs/heads/master
2020-03-20T10:07:16.627513
2018-06-12T13:57:57
2018-06-12T13:57:57
null
0
0
null
null
null
null
UTF-8
R
false
false
80,948
r
LIB_RHESSys_outputFormat.r
source("~/Dropbox/LIB_Rscript/LIB_misc.r") source("~/Dropbox/LIB_Rscript/LIB_dailytimeseries2.r") arg=commandArgs(T) ##---------------------------------------- # option A single basin file # 1) convert a series of rhessys basin output at the same watershed into WSC format # 2) convert flow mm/day to volumn (maybe scale up) ) # 3) bias correction # 4) combine different single basin files into one # 2) convert a series of rhessys basin output at the same watershed (need to combine multiple sub-basin) into WSC format ## very importance assumption of subbasin.csv # it must have column names: id, area, grid # each row is a subbasin # the format used in the past is very confusing for the propose of this # suggest the past format should be used for developing GRASS extract and by running GRASS extract it should also yield "subbasin.csv" in the curremt format ##----------------------------------------- combineSubbasin2Basin=function(prefix, suffix, subbasin, label='', st=NA, ed=NA){ #prefix = 'output/rhessys' #suffix = '_param1' tryCatch({ # read the first file i=1 rhessysFile = read.table(paste(prefix,'_sub',subbasin[i,'id'],suffix,'_basin.daily',sep=''),skip=1,header=F ) rhessys.date = as.Date(paste(rhessysFile[,1], rhessysFile[,2], rhessysFile[,3],sep="-"),format="%d-%m-%Y") rhessysCol = ncol(rhessysFile) subArea = as.numeric(subbasin[,'area']); totalArea = 1/sum(subArea) subGrid = as.numeric(subbasin[,'grid']); totalGrid = 1/sum(subGrid) if( is.na(st) | is.na(ed)){ period = rhessys.date period.dailytimeSeriesMatch = rep(T,length(period)) rhessys.dailytimeSeriesMatch = rep(T,length(period)) }else{ period=seq.Date(from=as.Date(st), to=as.Date(ed) ,by="day") tmp = match3DailyTimeSeries(rhessys.date, period) ### assume period is the most narrow band rhessys.dailytimeSeriesMatch = tmp$xSelect period.dailytimeSeriesMatch = tmp$ySelect } holding = array(NA, dim=c( sum(rhessys.dailytimeSeriesMatch), ifelse(rhessysCol>=43,25,22), nrow(subbasin) ))# subbasin#, time, vars for(i in 1:nrow(subbasin)){ if(i>1){ rhessysFile = read.table(paste(prefix,'_sub',subbasin[i,'id'],suffix,'_basin.daily',sep=''),skip=1,header=F ) } # 1) 7 = sat def z # 2) 8 = sat def # 3) 9 = rz storage # 4) 10 = unsat storage # 5) 13 = cap # 6) 14 = evap # 7) 16 = trans # 8) 17 = baseflow # 9) 18 = return # 10) 19 = streamflow # 11) 20 = psn # 12) 21 = lai # 13) 22 = gw out # 14) 23 = gw storage # 15) 24 = detention storage # 16) 25 = % sat area # 17) 26 = litter store # 18) 27 = canopy storage # 19) 33 = pet # 20) 35 = precip # 21) 38 = tmax (37) # 22) 39 = tmin (38) # 23) 40 = tavg (NA) # 24) 41 = vpd (NA) # 25) 43 = recharge (NA) if(rhessysCol>=43){ # 5.20 holding[,,i]=as.matrix(rhessysFile[rhessys.dailytimeSeriesMatch,c(7,8,9,10,13,14,16,17,18,19,20,21,22,23,24,25,26,27,33,35,38,39,40,41,43)]) }else{ # 5.18 holding[,,i]=as.matrix(rhessysFile[rhessys.dailytimeSeriesMatch,c(7,8,9,10,13,14,16,17,18,19,20,21,22,23,24,25,26,27,33,35,37,38)]) } system(paste("rm ",prefix,'_sub',subbasin[i,'id'],suffix,"*params",sep='')) system(paste("rm ",prefix,'_sub',subbasin[i,'id'],suffix,"*monthly",sep='')) system(paste("rm ",prefix,'_sub',subbasin[i,'id'],suffix,"*yearly",sep='')) system(paste("rm ",prefix,'_sub',subbasin[i,'id'],suffix,"*hourly",sep='')) }#i if(rhessysCol>=43){ basin = cbind( as.numeric(format(period[period.dailytimeSeriesMatch],"%d")),#1 as.numeric(format(period[period.dailytimeSeriesMatch],"%m")),#2 as.numeric(format(period[period.dailytimeSeriesMatch],"%Y")),#3 rep(0,length(period[period.dailytimeSeriesMatch])),#4 rep(0,length(period[period.dailytimeSeriesMatch])),#5 rep(0,length(period[period.dailytimeSeriesMatch])),#6 (holding[,1,]%*% subArea)*totalArea,#7 satz (holding[,2,]%*% subArea)*totalArea,#8 satdef (holding[,3,]%*% subArea)*totalArea,#9 rz (holding[,4,]%*% subArea)*totalArea,#10 unsat rep(0,length(period[period.dailytimeSeriesMatch])),#11 rep(0,length(period[period.dailytimeSeriesMatch])),#12 (holding[,5,]%*% subArea)*totalArea,#13 cap (holding[,6,]%*% subArea)*totalArea,#14 evap rep(0,length(period[period.dailytimeSeriesMatch])),#15 (holding[,7,]%*% subArea)*totalArea,#16 trans (holding[,8,]%*% subArea)*totalArea,#17 baseflow (holding[,9,]%*% subArea)*totalArea,#18 returnflow (holding[,10,]%*% subArea)*totalArea,#19 flow (holding[,11,]%*% subArea)*totalArea,#20 psn (holding[,12,]%*% subArea)*totalArea,#21 LAI (holding[,13,]%*% subArea)*totalArea,#22 gwq (holding[,14,]%*% subArea)*totalArea,#23 gw store (holding[,15,]%*% subArea)*totalArea,#24 detention store (holding[,16,]%*% subArea)*totalArea,#25 sat area (holding[,17,]%*% subArea)*totalArea,#26 litter store (holding[,18,]%*% subArea)*totalArea,#27 canopy store rep(0,length(period[period.dailytimeSeriesMatch])),#28 rep(0,length(period[period.dailytimeSeriesMatch])),#29 rep(0,length(period[period.dailytimeSeriesMatch])),#30 rep(0,length(period[period.dailytimeSeriesMatch])),#31 rep(0,length(period[period.dailytimeSeriesMatch])),#32 (holding[,19,]%*% subArea)*totalArea,#33 pet rep(0,length(period[period.dailytimeSeriesMatch])),#34 (holding[,20,]%*% subArea)*totalArea,#35 rain rep(0,length(period[period.dailytimeSeriesMatch])),#36 rep(0,length(period[period.dailytimeSeriesMatch])),#37 (holding[,21,]%*% subArea)*totalArea,#38 tmax (holding[,22,]%*% subArea)*totalArea,#39 tmin (holding[,23,]%*% subArea)*totalArea,#40 tavg (holding[,24,]%*% subArea)*totalArea,#41 vpd rep(0,length(period[period.dailytimeSeriesMatch])),#42 (holding[,25,]%*% subArea)*totalArea #43 recharge ) colnames(basin)=c( "day",#1 "month",#2 "year",#3 '',#4 '',#5 '',#6 'satz',#7 'satdef',#8 'rz',#9 'unsat',#10 '',#11 '',#12 'cap',#13 'evap',#14 '',#15 'trans',#16 'baseflow',#17 'returnflow',#18 'streamflow',#19 'psn',#20 'lai',#21 'gwq',#22 'gwstore',#23 'detentionstore',#24 'satarea',#25 'litterstore',#26 'canopystore',#27 '',#28 '',#29 '',#30 '',#31 '',#32 'pet',#33 '',#34 'precip',#35 '',#36 '',#37 'tmax',#38 'tmin',#39 'tavg',#40 'vpd',#41 '',#42 'rechargre'#43 ) }else{ basin = cbind( as.numeric(format(period,"%d")),#1 as.numeric(format(period,"%m")),#2 as.numeric(format(period,"%Y")),#3 rep(0,length(period[period.dailytimeSeriesMatch])),#4 rep(0,length(period[period.dailytimeSeriesMatch])),#5 rep(0,length(period[period.dailytimeSeriesMatch])),#6 (holding[,1,]%*% subArea)*totalArea,#7 satz (holding[,2,]%*% subArea)*totalArea,#8 satdef (holding[,3,]%*% subArea)*totalArea,#9 rz (holding[,4,]%*% subArea)*totalArea,#10 unsat rep(0,length(period[period.dailytimeSeriesMatch])),#11 rep(0,length(period[period.dailytimeSeriesMatch])),#12 (holding[,5,]%*% subArea)*totalArea,#13 cap (holding[,6,]%*% subArea)*totalArea,#14 evap rep(0,length(period[period.dailytimeSeriesMatch])),#15 (holding[,7,]%*% subArea)*totalArea,#16 trans (holding[,8,]%*% subArea)*totalArea,#17 baseflow (holding[,9,]%*% subArea)*totalArea,#18 returnflow (holding[,10,]%*% subArea)*totalArea,#19 flow (holding[,11,]%*% subArea)*totalArea,#20 psn (holding[,12,]%*% subArea)*totalArea,#21 LAI (holding[,13,]%*% subArea)*totalArea,#22 gwq (holding[,14,]%*% subArea)*totalArea,#23 gw store (holding[,15,]%*% subArea)*totalArea,#24 detention store (holding[,16,]%*% subArea)*totalArea,#25 sat area (holding[,17,]%*% subArea)*totalArea,#26 litter store (holding[,18,]%*% subArea)*totalArea,#27 canopy store rep(0,length(period[period.dailytimeSeriesMatch])),#28 rep(0,length(period[period.dailytimeSeriesMatch])),#29 rep(0,length(period[period.dailytimeSeriesMatch])),#30 rep(0,length(period[period.dailytimeSeriesMatch])),#31 rep(0,length(period[period.dailytimeSeriesMatch])),#32 (holding[,19,]%*% subArea)*totalArea,#33 pet rep(0,length(period[period.dailytimeSeriesMatch])),#34 (holding[,20,]%*% subArea)*totalArea#35 rain ) colnames(basin)=c( "day",#1 "month",#2 "year",#3 '',#4 '',#5 '',#6 'satz',#7 'satdef',#8 'rz',#9 'unsat',#10 '',#11 '',#12 'cap',#13 'evap',#14 '',#15 'trans',#16 'baseflow',#17 'returnflow',#18 'streamflow',#19 'psn',#20 'lai',#21 'gwq',#22 'gwstore',#23 'detentionstore',#24 'satarea',#25 'litterstore',#26 'canopystore',#27 '',#28 '',#29 '',#30 '',#31 '',#32 'pet',#33 '',#34 'precip'#35 ) } write.table(basin,paste(prefix,suffix, label,"_basin.daily",sep=""),row.names=F,col.names=T) }, error = function(e){ print(paste(subbasin[i,'id']," is not here.",e,sep="")) })#try blocks }#function SingleBasinSeries2WSC=function( prefix, Jindex, outputPrefix, sitename=NA, period=NA){ # first step (everything is in terms of mm/day ) # 1) convert a series of rhessys basin output at the same watershed into WSC format outputname = paste(prefix,Jindex[1],'_basin.daily',sep='' ) tmp = read.table(outputname,header=F,skip=1) if(is.na(period)){ print('use RHESSys output period') period = as.Date(paste(tmp[,1], tmp[,2], tmp[,3],sep="-"),format="%d-%m-%Y") rhessys.dailytimeSeriesMatch = rep(T,length(period)) period.dailytimeSeriesMatch = rep(T,length(period)) print(range(period)) }else{ rhessys.date = as.Date(paste(tmp[,1], tmp[,2], tmp[,3],sep="-"),format="%d-%m-%Y") tmp = match2DailyTimeSeries(rhessys.date, period) ### assume period is the most narrow band rhessys.dailytimeSeriesMatch = tmp$xSelect period.dailytimeSeriesMatch = tmp$ySelect } tmp = unlist(strsplit(prefix,split='/')) if(length(tmp)>1){location = paste(tmp[1:(length(tmp)-1)],collapse='/')}else{location='.'} if(is.na(sitename)){sitename = tmp[length(tmp)]} jMax = length(Jindex) holding = matrix(NA,sum(period.dailytimeSeriesMatch), jMax) holding_et = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_pet = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_precip = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_tmax = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_tmin = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_tavg = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_vpd = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_recharge = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) NewVersion=F for(j in 1:jMax){ outputname = paste(prefix,Jindex[j],'_basin.daily',sep='' ) tmp = read.table(outputname,header=F,skip=1) if(ncol(tmp)>=43){ # 5.20 holding[,j]=tmp[rhessys.dailytimeSeriesMatch,19] holding_et[,j]=tmp[rhessys.dailytimeSeriesMatch,14]+tmp[rhessys.dailytimeSeriesMatch,16] holding_pet[,j]=tmp[rhessys.dailytimeSeriesMatch,33] holding_precip[,j] = tmp[rhessys.dailytimeSeriesMatch,35]# rain mm tmax_ = tmp[rhessys.dailytimeSeriesMatch,38]# tmax C tmin_ = tmp[rhessys.dailytimeSeriesMatch,39]# tmin C holding_tmax[,j] = colMaxs( rbind(tmax_,tmin_)) holding_tmin[,j] = colMins( rbind(tmax_,tmin_)) holding_tavg[,j] = tmp[rhessys.dailytimeSeriesMatch,40]# tavg holding_vpd[,j] = tmp[rhessys.dailytimeSeriesMatch,41]# vpd holding_recharge[,j] = tmp[rhessys.dailytimeSeriesMatch,43]# recharge NewVersion=T }else{ # 5.18 holding[,j]=tmp[rhessys.dailytimeSeriesMatch,19] holding_et[,j]=tmp[rhessys.dailytimeSeriesMatch,14]+tmp[rhessys.dailytimeSeriesMatch,16] holding_pet[,j]=tmp[rhessys.dailytimeSeriesMatch,33] holding_precip[,j] = tmp[rhessys.dailytimeSeriesMatch,35]# rain mm tmax_ = tmp[rhessys.dailytimeSeriesMatch,37]# tmax C tmin_ = tmp[rhessys.dailytimeSeriesMatch,38]# tmin C holding_tmax[,j] = colMaxs( rbind(tmax_,tmin_)) holding_tmin[,j] = colMins( rbind(tmax_,tmin_)) } }#j result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding ) colnames(result) = c('year','month','day', paste('streamflowmm',1:jMax,sep='_') ) write.csv(result,paste(location,'/',outputPrefix, sitename,'_streamflowmm.csv',sep=''),row.names=F) result = cbind( format(period,format='%Y'), format(period,format='%m'), format(period,format='%d'), holding_et ) colnames(result) = c('year','month','day', paste('et',1:jMax,sep='_') ) write.csv(result,paste(location,'/',outputPrefix, sitename,'_et.csv',sep=''),row.names=F) result = cbind( format(period,format='%Y'), format(period,format='%m'), format(period,format='%d'), holding_pet ) colnames(result) = c('year','month','day', paste('pet',1:jMax,sep='_') ) write.csv(result,paste(location,'/',outputPrefix, sitename,'_pet.csv',sep=''),row.names=F) result = cbind( format(period,format='%Y'), format(period,format='%m'), format(period,format='%d'), holding_precip ) colnames(result) = c('year','month','day', paste('precip',1:jMax,sep='_') ) write.csv(result,paste(location,'/',outputPrefix, sitename,'_precip.csv',sep=''),row.names=F) result = cbind( format(period,format='%Y'), format(period,format='%m'), format(period,format='%d'), holding_tmax ) colnames(result) = c('year','month','day', paste('tmax',1:jMax,sep='_') ) write.csv(result,paste(location,'/',outputPrefix, sitename,'_tmax.csv',sep=''),row.names=F) result = cbind( format(period,format='%Y'), format(period,format='%m'), format(period,format='%d'), holding_tmin ) colnames(result) = c('year','month','day', paste('tmin',1:jMax,sep='_') ) write.csv(result,paste(location,'/',outputPrefix, sitename,'_tmin.csv',sep=''),row.names=F) if(NewVersion){ result = cbind( format(period,format='%Y'), format(period,format='%m'), format(period,format='%d'), holding_tavg ) colnames(result) = c('year','month','day', paste('tavg',1:jMax,sep='_') ) write.csv(result,paste(location,'/',outputPrefix, sitename,'_tavg.csv',sep=''),row.names=F) result = cbind( format(period,format='%Y'), format(period,format='%m'), format(period,format='%d'), holding_vpd ) colnames(result) = c('year','month','day', paste('vpd',1:jMax,sep='_') ) write.csv(result,paste(location,'/',outputPrefix, sitename,'_vpd.csv',sep=''),row.names=F) result = cbind( format(period,format='%Y'), format(period,format='%m'), format(period,format='%d'), holding_recharge ) colnames(result) = c('year','month','day', paste('recharge',1:jMax,sep='_') ) write.csv(result,paste(location,'/',outputPrefix, sitename,'_recharge.csv',sep=''),row.names=F) } }#function WSC_combineUSGS2Lake=function( prefix, dailytimeSeriesMatch, replication, arealweight, outPrefix, scaler=1, NewVersion=F){ # assume usgs files num = length(prefix) time = matrix(NA,sum(unlist(dailytimeSeriesMatch[1])),3) hold_flow = array(NA,dim=c(sum(unlist(dailytimeSeriesMatch[1])),num, replication) ) hold_pet= array(NA,dim=c(sum(unlist(dailytimeSeriesMatch[1])),num, replication) ) hold_et= array(NA,dim=c(sum(unlist(dailytimeSeriesMatch[1])),num, replication) ) hold_rain= array(NA,dim=c(sum(unlist(dailytimeSeriesMatch[1])),num, replication) ) hold_tmax= array(NA,dim=c(sum(unlist(dailytimeSeriesMatch[1])),num, replication) ) hold_tmin= array(NA,dim=c(sum(unlist(dailytimeSeriesMatch[1])),num, replication) ) hold_satz= array(NA,dim=c(sum(unlist(dailytimeSeriesMatch[1])),num, replication) ) hold_unsat= array(NA,dim=c(sum(unlist(dailytimeSeriesMatch[1])),num, replication) ) hold_evap= array(NA,dim=c(sum(unlist(dailytimeSeriesMatch[1])),num, replication) ) hold_snow= array(NA,dim=c(sum(unlist(dailytimeSeriesMatch[1])),num, replication) ) hold_return= array(NA,dim=c(sum(unlist(dailytimeSeriesMatch[1])),num, replication) ) hold_psn= array(NA,dim=c(sum(unlist(dailytimeSeriesMatch[1])),num, replication) ) hold_lai= array(NA,dim=c(sum(unlist(dailytimeSeriesMatch[1])),num, replication) ) hold_tavg= array(NA,dim=c(sum(unlist(dailytimeSeriesMatch[1])),num, replication) ) hold_vpd= array(NA,dim=c(sum(unlist(dailytimeSeriesMatch[1])),num, replication) ) hold_recharge= array(NA,dim=c(sum(unlist(dailytimeSeriesMatch[1])),num, replication) ) ## missing tavg, VPD, recharge for(i in 1:num){ tmp = as.matrix(read.csv( paste(prefix[i],'_streamflowmm.csv',sep=''))) if(i==1){time = tmp[unlist(dailytimeSeriesMatch[i]),1:3] } hold_flow[,i,] = tmp[unlist(dailytimeSeriesMatch[i]),4:(replication+3)] tmp = as.matrix(read.csv( paste(prefix[i],'_pet.csv',sep=''))) hold_pet[,i,] = tmp[unlist(dailytimeSeriesMatch[i]),4:(replication+3)] tmp = as.matrix(read.csv( paste(prefix[i],'_et.csv',sep=''))) hold_et[,i,] = tmp[unlist(dailytimeSeriesMatch[i]),4:(replication+3)] tmp = as.matrix(read.csv( paste(prefix[i],'_precip.csv',sep=''))) hold_rain[,i,] = tmp[unlist(dailytimeSeriesMatch[i]),4:(replication+3)] tmp = as.matrix(read.csv( paste(prefix[i],'_tmax.csv',sep=''))) hold_tmax[,i,] = tmp[unlist(dailytimeSeriesMatch[i]),4:(replication+3)] tmp = as.matrix(read.csv( paste(prefix[i],'_tmin.csv',sep=''))) hold_tmin[,i,] = tmp[unlist(dailytimeSeriesMatch[i]),4:(replication+3)] tmp = as.matrix(read.csv( paste(prefix[i],'_satz.csv',sep=''))) hold_satz[,i,] = tmp[unlist(dailytimeSeriesMatch[i]),4:(replication+3)] tmp = as.matrix(read.csv( paste(prefix[i],'_unsat.csv',sep=''))) hold_unsat[,i,] = tmp[unlist(dailytimeSeriesMatch[i]),4:(replication+3)] tmp = as.matrix(read.csv( paste(prefix[i],'_evap.csv',sep=''))) hold_evap[,i,] = tmp[unlist(dailytimeSeriesMatch[i]),4:(replication+3)] tmp = as.matrix(read.csv( paste(prefix[i],'_snow.csv',sep=''))) hold_snow[,i,] = tmp[unlist(dailytimeSeriesMatch[i]),4:(replication+3)] tmp = as.matrix(read.csv( paste(prefix[i],'_return.csv',sep=''))) hold_return[,i,] = tmp[unlist(dailytimeSeriesMatch[i]),4:(replication+3)] tmp = as.matrix(read.csv( paste(prefix[i],'_psn.csv',sep=''))) hold_psn[,i,] = tmp[unlist(dailytimeSeriesMatch[i]),4:(replication+3)] tmp = as.matrix(read.csv( paste(prefix[i],'_lai.csv',sep=''))) hold_lai[,i,] = tmp[unlist(dailytimeSeriesMatch[i]),4:(replication+3)] if(NewVersion){ tmp = as.matrix(read.csv( paste(prefix[i],'_tavg.csv',sep=''))) hold_tavg[,i,] = tmp[unlist(dailytimeSeriesMatch[i]),4:(replication+3)] tmp = as.matrix(read.csv( paste(prefix[i],'_vpd.csv',sep=''))) hold_vpd[,i,] = tmp[unlist(dailytimeSeriesMatch[i]),4:(replication+3)] tmp = as.matrix(read.csv( paste(prefix[i],'_recharge.csv',sep=''))) hold_recharge[,i,] = tmp[unlist(dailytimeSeriesMatch[i]),4:(replication+3)] } }#i comb_flow = matrix(NA,sum(unlist(dailytimeSeriesMatch[1])), replication) comb_pet = matrix(NA,sum(unlist(dailytimeSeriesMatch[1])), replication) comb_et = matrix(NA,sum(unlist(dailytimeSeriesMatch[1])), replication) comb_rain = matrix(NA,sum(unlist(dailytimeSeriesMatch[1])), replication) comb_tmax = matrix(NA,sum(unlist(dailytimeSeriesMatch[1])), replication) comb_tmin = matrix(NA,sum(unlist(dailytimeSeriesMatch[1])), replication) comb_satz = matrix(NA,sum(unlist(dailytimeSeriesMatch[1])), replication) comb_unsat = matrix(NA,sum(unlist(dailytimeSeriesMatch[1])), replication) comb_evap = matrix(NA,sum(unlist(dailytimeSeriesMatch[1])), replication) comb_snow = matrix(NA,sum(unlist(dailytimeSeriesMatch[1])), replication) comb_return = matrix(NA,sum(unlist(dailytimeSeriesMatch[1])), replication) comb_psn = matrix(NA,sum(unlist(dailytimeSeriesMatch[1])), replication) comb_lai = matrix(NA,sum(unlist(dailytimeSeriesMatch[1])), replication) comb_tavg = matrix(NA,sum(unlist(dailytimeSeriesMatch[1])), replication) comb_vpd = matrix(NA,sum(unlist(dailytimeSeriesMatch[1])), replication) comb_recharge = matrix(NA,sum(unlist(dailytimeSeriesMatch[1])), replication) for(j in 1: replication){ comb_flow[,j] = hold_flow[,,j]%*% arealweight/sum(arealweight)*scaler*0.001 comb_pet[,j] = hold_pet[,,j]%*% arealweight/sum(arealweight) comb_et[,j] = hold_et[,,j]%*% arealweight/sum(arealweight) comb_rain[,j] = hold_rain[,,j]%*% arealweight/sum(arealweight) comb_tmax[,j] = hold_tmax[,,j]%*% arealweight/sum(arealweight) comb_tmin[,j] = hold_tmin[,,j]%*% arealweight/sum(arealweight) comb_satz[,j] = hold_satz[,,j]%*% arealweight/sum(arealweight) comb_unsat[,j] = hold_unsat[,,j]%*% arealweight/sum(arealweight) comb_evap[,j] = hold_evap[,,j]%*% arealweight/sum(arealweight) comb_snow[,j] = hold_snow[,,j]%*% arealweight/sum(arealweight) comb_return[,j] = hold_return[,,j]%*% arealweight/sum(arealweight) comb_psn[,j] = hold_psn[,,j]%*% arealweight/sum(arealweight) comb_lai[,j] = hold_lai[,,j]%*% arealweight/sum(arealweight) if(NewVersion){ comb_tavg[,j] = hold_tavg[,,j]%*% arealweight/sum(arealweight) comb_vpd[,j] = hold_vpd[,,j]%*% arealweight/sum(arealweight) comb_recharge[,j] = hold_recharge[,,j]%*% arealweight/sum(arealweight) } }#j result = cbind(time,comb_flow) colnames(result) = c('year','month','day', paste('flowcmd',1:replication,sep='_') ) write.csv(result,paste(outPrefix,'_flowcmd.csv',sep=''),row.names=F) result = cbind(time,comb_pet) colnames(result) = c('year','month','day', paste('pet',1:replication,sep='_') ) write.csv(result,paste(outPrefix,'_pet.csv',sep=''),row.names=F) result = cbind(time,comb_et) colnames(result) = c('year','month','day', paste('et',1:replication,sep='_') ) write.csv(result,paste(outPrefix,'_et.csv',sep=''),row.names=F) result = cbind(time,comb_rain) colnames(result) = c('year','month','day', paste('precip',1:replication,sep='_') ) write.csv(result,paste(outPrefix,'_precip.csv',sep=''),row.names=F) result = cbind(time,comb_tmax) colnames(result) = c('year','month','day', paste('tmax',1:replication,sep='_') ) write.csv(result,paste(outPrefix,'_tmax.csv',sep=''),row.names=F) result = cbind(time,comb_tmin) colnames(result) = c('year','month','day', paste('tmin',1:replication,sep='_') ) write.csv(result,paste(outPrefix,'_tmin.csv',sep=''),row.names=F) result = cbind(time,comb_satz) colnames(result) = c('year','month','day', paste('satz',1:replication,sep='_') ) write.csv(result,paste(outPrefix,'_satz.csv',sep=''),row.names=F) result = cbind(time,comb_unsat) colnames(result) = c('year','month','day', paste('unsat',1:replication,sep='_') ) write.csv(result,paste(outPrefix,'_unsat.csv',sep=''),row.names=F) result = cbind(time,comb_evap) colnames(result) = c('year','month','day', paste('evap',1:replication,sep='_') ) write.csv(result,paste(outPrefix,'_evap.csv',sep=''),row.names=F) result = cbind(time,comb_snow) colnames(result) = c('year','month','day', paste('snow',1:replication,sep='_') ) write.csv(result,paste(outPrefix,'_snow.csv',sep=''),row.names=F) result = cbind(time,comb_return) colnames(result) = c('year','month','day', paste('return',1:replication,sep='_') ) write.csv(result,paste(outPrefix,'_return.csv',sep=''),row.names=F) result = cbind(time,comb_psn) colnames(result) = c('year','month','day', paste('psn',1:replication,sep='_') ) write.csv(result,paste(outPrefix,'_psn.csv',sep=''),row.names=F) result = cbind(time,comb_lai) colnames(result) = c('year','month','day', paste('lai',1:replication,sep='_') ) write.csv(result,paste(outPrefix,'_lai.csv',sep=''),row.names=F) if(NewVersion){ result = cbind(time,comb_tavg) colnames(result) = c('year','month','day', paste('tavg',1:replication,sep='_') ) write.csv(result,paste(outPrefix,'_tavg.csv',sep=''),row.names=F) result = cbind(time,comb_vpd) colnames(result) = c('year','month','day', paste('vpd',1:replication,sep='_') ) write.csv(result,paste(outPrefix,'_vpd.csv',sep=''),row.names=F) result = cbind(time,comb_recharge) colnames(result) = c('year','month','day', paste('recharge',1:replication,sep='_') ) write.csv(result,paste(outPrefix,'_recharge.csv',sep=''),row.names=F) } }#function WSC_cutPeriod=function( prefix, period, label, prefixBias_ ='', NewVersion=F){ # after bias correction namelist = c( '_flowcmd_bias', '_flowcmd', '_et', '_pet', '_precip', '_tmax', '_tmin', '_satz', '_unsat', '_evap', '_snow', '_return', '_psn', '_lai' ) if(prefixBias_ == ''){ prefixBias = prefix; }else{ prefixBias = prefixBias_; } hold = as.matrix(read.csv( paste(prefixBias, namelist[1],'.csv',sep=''))) time = as.Date(paste(hold[,3], hold[,2], hold[,1],sep="-"),format="%d-%m-%Y") hold2 = as.matrix(read.csv( paste(prefix, namelist[2],'.csv',sep=''))) time2 = as.Date(paste(hold2[,3], hold2[,2], hold2[,1],sep="-"),format="%d-%m-%Y") tmp = match3DailyTimeSeries(time, time2, period) ### assume period is the most narrow band hold.dailytimeSeriesMatch = tmp$xSelect hold2.dailytimeSeriesMatch = tmp$ySelect #write.csv(hold[hold.dailytimeSeriesMatch,],paste(prefix, namelist[1],'_',label,'.csv',sep=''),row.names=F) #write.csv(hold2[hold2.dailytimeSeriesMatch,],paste(prefix, namelist[2],'_',label,'.csv',sep=''),row.names=F) write.csv(hold[hold.dailytimeSeriesMatch,],paste(prefixBias, label,'_',namelist[1],'.csv',sep=''),row.names=F) write.csv(hold2[hold2.dailytimeSeriesMatch,],paste(prefix, label,'_',namelist[2],'.csv',sep=''),row.names=F) for(i in 3:length(namelist)){ hold = as.matrix(read.csv( paste(prefix, namelist[i],'.csv',sep=''))) write.csv(hold[hold2.dailytimeSeriesMatch,],paste(prefix, label,'_',namelist[i],'.csv',sep=''),row.names=F) }#i if(NewVersion){ hold = as.matrix(read.csv( paste(prefix, '_tavg','.csv',sep=''))) write.csv(hold[hold2.dailytimeSeriesMatch,],paste(prefix, label,'_','_tavg','.csv',sep=''),row.names=F) hold = as.matrix(read.csv( paste(prefix, '_vpd','.csv',sep=''))) write.csv(hold[hold2.dailytimeSeriesMatch,],paste(prefix, label,'_','_vpd','.csv',sep=''),row.names=F) hold = as.matrix(read.csv( paste(prefix, '_recharge','.csv',sep=''))) write.csv(hold[hold2.dailytimeSeriesMatch,],paste(prefix, label,'_','_recharge','.csv',sep=''),row.names=F) } }#function MultipleBasinSeries2WSC=function( prefix, sub, Jindex, outputPrefix, sitename=NA, period=NULL, subPrefix=NA, toLake=F,lakeArea=NA,label=''){ # first step (everything is in terms of mm/day ) # convert a series of rhessys basin output at the same watershed (need to combine multiple sub-basin) into WSC format # ls -l world_subbasin_??? | awk '{print $9}' subFile = read.csv(paste(sub,sep=''),stringsAsFactors=F) tmp = unlist(strsplit(prefix,split='/')) if(length(tmp)>1){location = paste(tmp[1:(length(tmp)-1)],collapse='/')}else{location='.'} if(is.na(sitename)){sitename = tmp[length(tmp)]} if(is.na(subPrefix)){subPrefix = '_world_subbasin_'} if(is.na(lakeArea)){lakeArea=sum(subFile[,'area'])} outputname = paste(prefix, sitename, subPrefix,subFile[1,'id'],'_',Jindex[1],'_basin.daily',sep='' ) tmp = read.table(outputname,header=F,skip=1) if(is.null(period)){ print('use RHESSys output period') period = as.Date(paste(tmp[,1], tmp[,2], tmp[,3],sep="-"),format="%d-%m-%Y") rhessys.dailytimeSeriesMatch = rep(T,length(period)) period.dailytimeSeriesMatch = rep(T,length(period)) print(range(period)) }else{ rhessys.date = as.Date(paste(tmp[,1], tmp[,2], tmp[,3],sep="-"),format="%d-%m-%Y") tmp = match2DailyTimeSeries(rhessys.date, period) ### assume period is the most narrow band rhessys.dailytimeSeriesMatch = tmp$xSelect period.dailytimeSeriesMatch = tmp$ySelect } jMax = length(Jindex) holding = matrix(NA,sum(period.dailytimeSeriesMatch), jMax) #flow holding_et = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_pet = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_precip = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_tmax = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_tmin = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_satz = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_unsat = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_evap = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_snow = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_return = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_psn = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_lai = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_tavg = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_vpd = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_recharge = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) for(j in 1:jMax){ # multiple sub-catchment secondholding = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_et = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_pet = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_rain = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_tmax = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_tmin = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_satz = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_unsat = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_evap = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_snow = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_return = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_psn = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_lai = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_tavg = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_vpd = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_recharge = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) NewVersion=F for(kk in 1:nrow(subFile)){ tryCatch({ outputname = paste(prefix, sitename, subPrefix,subFile[kk,'id'],'_',Jindex[j],'_basin.daily',sep='' ) tmp = read.table(outputname,header=F,skip=1) if(ncol(tmp)>=43){ # 5.20 secondholding[,kk]=tmp[rhessys.dailytimeSeriesMatch,19] secondholding_et[,kk]=tmp[rhessys.dailytimeSeriesMatch,14]+tmp[rhessys.dailytimeSeriesMatch,16] secondholding_pet[,kk]=tmp[rhessys.dailytimeSeriesMatch,33] secondholding_rain[,kk]=tmp[rhessys.dailytimeSeriesMatch,35]# rain mm tmax_ = tmp[rhessys.dailytimeSeriesMatch,38]# tmax C tmin_ = tmp[rhessys.dailytimeSeriesMatch,39]# tmin C secondholding_tmax[,kk]=colMaxs( rbind(tmax_,tmin_)) secondholding_tmin[,kk]=colMins( rbind(tmax_,tmin_)) secondholding_satz[,kk]=tmp[rhessys.dailytimeSeriesMatch,7]# satz secondholding_unsat[,kk]=tmp[rhessys.dailytimeSeriesMatch,10]# unsat secondholding_evap[,kk]=tmp[rhessys.dailytimeSeriesMatch,14]# evap secondholding_snow[,kk]=tmp[rhessys.dailytimeSeriesMatch,15]# snowpack secondholding_return[,kk]=tmp[rhessys.dailytimeSeriesMatch,18]# return flow secondholding_psn[,kk]=tmp[rhessys.dailytimeSeriesMatch,20]# psn secondholding_lai[,kk]=tmp[rhessys.dailytimeSeriesMatch,21]# lai secondholding_tavg[,kk]=tmp[rhessys.dailytimeSeriesMatch,40]# tavg secondholding_vpd[,kk]=tmp[rhessys.dailytimeSeriesMatch,41]# vpd secondholding_recharge[,kk]=tmp[rhessys.dailytimeSeriesMatch,43]# recharge NewVersion=T }else{ # 5.18 secondholding[,kk]=tmp[rhessys.dailytimeSeriesMatch,19] secondholding_et[,kk]=tmp[rhessys.dailytimeSeriesMatch,14]+tmp[rhessys.dailytimeSeriesMatch,16] secondholding_pet[,kk]=tmp[rhessys.dailytimeSeriesMatch,33] secondholding_rain[,kk]=tmp[rhessys.dailytimeSeriesMatch,35]# rain mm tmax_ = tmp[rhessys.dailytimeSeriesMatch,37]# tmax C tmin_ = tmp[rhessys.dailytimeSeriesMatch,38]# tmin C secondholding_tmax[,kk]=colMaxs( rbind(tmax_,tmin_)) secondholding_tmin[,kk]=colMins( rbind(tmax_,tmin_)) secondholding_satz[,kk]=tmp[rhessys.dailytimeSeriesMatch,7]# satz secondholding_unsat[,kk]=tmp[rhessys.dailytimeSeriesMatch,10]# unsat secondholding_evap[,kk]=tmp[rhessys.dailytimeSeriesMatch,14]# evap secondholding_snow[,kk]=tmp[rhessys.dailytimeSeriesMatch,15]# snowpack secondholding_return[,kk]=tmp[rhessys.dailytimeSeriesMatch,18]# return flow secondholding_psn[,kk]=tmp[rhessys.dailytimeSeriesMatch,20]# psn secondholding_lai[,kk]=tmp[rhessys.dailytimeSeriesMatch,21]# lai }}, warning = function(w) { print(kk) print(outputname) print(w) }, error = function(e) { print(kk) print(outputname) print(e) } )#tryCatch }#kk holding[,j] = secondholding %*%subFile[,'grid']/sum(subFile[,'grid']) holding_et[,j] = secondholding_et %*%subFile[,'grid']/sum(subFile[,'grid']) holding_pet[,j] = secondholding_pet %*%subFile[,'grid']/sum(subFile[,'grid']) holding_precip[,j] = secondholding_rain%*%subFile[,'grid']/sum(subFile[,'grid'])# rain mm holding_tmax[,j] = secondholding_tmax%*%subFile[,'grid']/sum(subFile[,'grid'])# tmax C holding_tmin[,j] = secondholding_tmin%*%subFile[,'grid']/sum(subFile[,'grid'])# tmin C holding_satz[,j] = secondholding_satz%*%subFile[,'grid']/sum(subFile[,'grid'])# holding_unsat[,j] = secondholding_unsat%*%subFile[,'grid']/sum(subFile[,'grid'])# holding_evap[,j] = secondholding_evap%*%subFile[,'grid']/sum(subFile[,'grid'])# holding_snow[,j] = secondholding_snow%*%subFile[,'grid']/sum(subFile[,'grid'])# holding_return[,j] = secondholding_return%*%subFile[,'grid']/sum(subFile[,'grid'])# holding_psn[,j] = secondholding_psn%*%subFile[,'grid']/sum(subFile[,'grid'])# holding_lai[,j] = secondholding_lai%*%subFile[,'grid']/sum(subFile[,'grid'])# if(NewVersion){ holding_tavg[,j] = secondholding_tavg%*%subFile[,'grid']/sum(subFile[,'grid'])# holding_vpd[,j] = secondholding_vpd%*%subFile[,'grid']/sum(subFile[,'grid'])# holding_recharge[,j] = secondholding_recharge%*%subFile[,'grid']/sum(subFile[,'grid'])# } }#j result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding ) colnames(result) = c('year','month','day', paste('streamflowmm', Jindex,sep='_') ) write.csv(result,paste(location,'/',outputPrefix, sitename,label,'_streamflowmm.csv',sep=''),row.names=F) if(toLake){ result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding*0.001*lakeArea ) colnames(result) = c('year','month','day', paste('flowcmd', Jindex,sep='_') ) write.csv(result,paste(location,'/',outputPrefix, sitename,'_flowcmd.csv',sep=''),row.names=F) } result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_et ) colnames(result) = c('year','month','day', paste('et', Jindex,sep='_') ) write.csv(result,paste(location,'/',outputPrefix, sitename,'_et.csv',sep=''),row.names=F) result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_pet ) colnames(result) = c('year','month','day', paste('pet', Jindex,sep='_') ) write.csv(result,paste(location,'/',outputPrefix, sitename,'_pet.csv',sep=''),row.names=F) result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_precip ) colnames(result) = c('year','month','day', paste('precip', Jindex,sep='_') ) write.csv(result,paste(location,'/',outputPrefix, sitename,'_precip.csv',sep=''),row.names=F) result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_tmax ) colnames(result) = c('year','month','day', paste('tmax', Jindex,sep='_') ) write.csv(result,paste(location,'/',outputPrefix, sitename,'_tmax.csv',sep=''),row.names=F) result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_tmin ) colnames(result) = c('year','month','day', paste('tmin', Jindex,sep='_') ) write.csv(result,paste(location,'/',outputPrefix, sitename,'_tmin.csv',sep=''),row.names=F) result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_satz ) colnames(result) = c('year','month','day', paste('satz', Jindex,sep='_') ) write.csv(result,paste(location,'/',outputPrefix, sitename,label,'_satz.csv',sep=''),row.names=F) result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_unsat ) colnames(result) = c('year','month','day', paste('unsat', Jindex,sep='_') ) write.csv(result,paste(location,'/',outputPrefix, sitename,label,'_unsat.csv',sep=''),row.names=F) result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_evap ) colnames(result) = c('year','month','day', paste('evap', Jindex,sep='_') ) write.csv(result,paste(location,'/',outputPrefix, sitename,label,'_evap.csv',sep=''),row.names=F) result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_snow ) colnames(result) = c('year','month','day', paste('snow', Jindex,sep='_') ) write.csv(result,paste(location,'/',outputPrefix, sitename,label,'_snow.csv',sep=''),row.names=F) result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_return ) colnames(result) = c('year','month','day', paste('return', Jindex,sep='_') ) write.csv(result,paste(location,'/',outputPrefix, sitename,label,'_return.csv',sep=''),row.names=F) result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_psn ) colnames(result) = c('year','month','day', paste('psn', Jindex,sep='_') ) write.csv(result,paste(location,'/',outputPrefix, sitename,label,'_psn.csv',sep=''),row.names=F) result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_lai ) colnames(result) = c('year','month','day', paste('lai', Jindex,sep='_') ) write.csv(result,paste(location,'/',outputPrefix, sitename,label,'_lai.csv',sep=''),row.names=F) if(NewVersion){ result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_tavg ) colnames(result) = c('year','month','day', paste('tavg', Jindex,sep='_') ) write.csv(result,paste(location,'/',outputPrefix, sitename,label,'_tavg.csv',sep=''),row.names=F) result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_vpd ) colnames(result) = c('year','month','day', paste('vpd', Jindex,sep='_') ) write.csv(result,paste(location,'/',outputPrefix, sitename,label,'_vpd.csv',sep=''),row.names=F) result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_recharge ) colnames(result) = c('year','month','day', paste('recharge', Jindex,sep='_') ) write.csv(result,paste(location,'/',outputPrefix, sitename,label,'_recharge.csv',sep=''),row.names=F) } }#function # lakeID name area_m2 grid simulation percent # 4 657 buttner 72981900 81091 70761 0.87261225 # 1 865 michie 432610200 480678 472280 0.982528845 # 7 948 westfor 23639400 26266 16277 0.61969847 # 6 1009 orange 22968000 25520 21294 0.834404389 # 5 1127 rogers 44847900 49831 46498 0.933113925 # 2 1146 little 249788700 277543 267050 0.962193246 # 3 1439 fall 1099539900 1221711 1089327 0.891640494 # 8 2046 cane 80128800 89032 78132 0.877572109 # 9 2317 university 76376700 84863 80021 0.942943332 # 10 2435 jordan 946452600 1051614 945619 0.899207314 (include newhope) # total jordan lake with HAW 4369799700 4855333 # total falls lake 1996320600 2218134 # swift 1.7172e+8 m2 # 101 flat 385145100 427939 # 102 little 202600800 225112 # 103 mtn 20677500 22975 # 104 morgan 21401100 23779 # 105 cane 19687500 21875 # 106 newhope 198703800 220782 --> 198868500 (model) # 107 eno 365411700 406013 # 108 ellerbeClub 13273200 14748 # 109 ellerbeGorman 41607900 46231 # 110 lick 9989100 11099 # 111 northeast 55437300 61597 # 112 swift 54394200 60438 ##-------------------------- outdated functions WSC_usgs2lake=function( inputname, scaler, outputname){ tmp = unlist(strsplit(inputname,split='/')) if(length(tmp)>1){location = paste(tmp[1:(length(tmp)-1)],collapse='/')}else{location='.'} inputnamePrefix = tmp[length(tmp)] print(location) print(inputnamePrefix) tmp = read.csv(paste(inputname,'_streamflowmm.csv',sep='')) tmp[,4:ncol(tmp)] = tmp[,4:ncol(tmp)]*0.001*scaler write.csv(tmp,paste(location,'/',outputname,'_flowcmd.csv',sep=''),row.names=F) system(paste('cp ', inputname, '_pet.csv ',location,'/',outputname,'_pet.csv', sep='')) system(paste('cp ', inputname, '_precip.csv ',location,'/',outputname,'_precip.csv', sep='')) system(paste('cp ', inputname, '_tmax.csv ',location,'/',outputname,'_tmax.csv', sep='')) system(paste('cp ', inputname, '_tmin.csv ',location,'/',outputname,'_tmin.csv', sep='')) }#function ConditionMultipleBasinSeries2WSC =function(CONDS,prefix, sub, Jindex, outputPrefix, sitename=NA, period=NULL, subPrefix=NA, toLake=F,lakeArea=NA,label=''){ # assuming more than 1 condition # CONDS: multiple conditions based on sub.csv (T/F-matrix: nrow = sub, col=conds) # prefix: multiple prefixes (should correspond to CONDS) # Jidex: index matrix: nrow=index, and col=cond # first step (everything is in terms of mm/day ) # convert a series of rhessys basin output at the same watershed (need to combine multiple sub-basin) into WSC format # ls -l world_subbasin_??? | awk '{print $9}' subFile = read.csv(paste(sub,sep=''),stringsAsFactors=F) location = rep(NA, length(prefix)) for(i in 1:length(location)){ tmp = unlist(strsplit(prefix[i],split='/')) if(length(tmp)>1){location[i] = paste(tmp[1:(length(tmp)-1)],collapse='/')}else{location[i]='.'} }#i if(is.na(sitename)){sitename = tmp[length(tmp)]} if(is.na(subPrefix)){subPrefix = '_world_subbasin_'} if(is.na(lakeArea)){lakeArea=sum(subFile[,'area'])} prefixuseIndex = which(CONDS[1,]==T) outputname = paste(prefix[prefixuseIndex], sitename, subPrefix,subFile[1,'id'],'_',Jindex[1, prefixuseIndex],'_basin.daily',sep='' ) tmp = read.table(outputname,header=F,skip=1) if(is.null(period)){ print('use RHESSys output period') period = as.Date(paste(tmp[,1], tmp[,2], tmp[,3],sep="-"),format="%d-%m-%Y") rhessys.dailytimeSeriesMatch = rep(T,length(period)) period.dailytimeSeriesMatch = rep(T,length(period)) print(range(period)) }else{ rhessys.date = as.Date(paste(tmp[,1], tmp[,2], tmp[,3],sep="-"),format="%d-%m-%Y") tmp = match2DailyTimeSeries(rhessys.date, period) ### assume period is the most narrow band rhessys.dailytimeSeriesMatch = tmp$xSelect period.dailytimeSeriesMatch = tmp$ySelect } jMax = nrow(Jindex) holding = matrix(NA,sum(period.dailytimeSeriesMatch), jMax) #flow holding_et = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_pet = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_precip = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_tmax = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_tmin = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_satz = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_unsat = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_evap = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_snow = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_return = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_psn = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_lai = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) for(j in 1:jMax){ # multiple sub-catchment secondholding = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_et = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_pet = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_rain = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_tmax = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_tmin = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_satz = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_unsat = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_evap = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_snow = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_return = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_psn = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_lai = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) for(kk in 1:nrow(subFile)){ tryCatch({ prefixuseIndex = which(CONDS[kk,]==T) outputname = paste(prefix[prefixuseIndex], sitename, subPrefix,subFile[kk,'id'],'_',Jindex[j, prefixuseIndex],'_basin.daily',sep='' ) tmp = read.table(outputname,header=F,skip=1) if(ncol(tmp)>70){ # 5.20 secondholding[,kk]=tmp[rhessys.dailytimeSeriesMatch,19] secondholding_et[,kk]=tmp[rhessys.dailytimeSeriesMatch,14]+tmp[rhessys.dailytimeSeriesMatch,16] secondholding_pet[,kk]=tmp[rhessys.dailytimeSeriesMatch,33] secondholding_rain[,kk]=tmp[rhessys.dailytimeSeriesMatch,35]# rain mm tmax_ = tmp[rhessys.dailytimeSeriesMatch,38]# tmax C tmin_ = tmp[rhessys.dailytimeSeriesMatch,39]# tmin C secondholding_tmax[,kk]=colMaxs( rbind(tmax_,tmin_)) secondholding_tmin[,kk]=colMins( rbind(tmax_,tmin_)) secondholding_satz[,kk]=tmp[rhessys.dailytimeSeriesMatch,7]# satz secondholding_unsat[,kk]=tmp[rhessys.dailytimeSeriesMatch,10]# unsat secondholding_evap[,kk]=tmp[rhessys.dailytimeSeriesMatch,14]# evap secondholding_snow[,kk]=tmp[rhessys.dailytimeSeriesMatch,15]# snowpack secondholding_return[,kk]=tmp[rhessys.dailytimeSeriesMatch,18]# return flow secondholding_psn[,kk]=tmp[rhessys.dailytimeSeriesMatch,20]# psn secondholding_lai[,kk]=tmp[rhessys.dailytimeSeriesMatch,21]# lai }else{ # 5.18 secondholding[,kk]=tmp[rhessys.dailytimeSeriesMatch,19] secondholding_et[,kk]=tmp[rhessys.dailytimeSeriesMatch,14]+tmp[rhessys.dailytimeSeriesMatch,16] secondholding_pet[,kk]=tmp[rhessys.dailytimeSeriesMatch,33] secondholding_rain[,kk]=tmp[rhessys.dailytimeSeriesMatch,35]# rain mm tmax_ = tmp[rhessys.dailytimeSeriesMatch,37]# tmax C tmin_ = tmp[rhessys.dailytimeSeriesMatch,38]# tmin C secondholding_tmax[,kk]=colMaxs( rbind(tmax_,tmin_)) secondholding_tmin[,kk]=colMins( rbind(tmax_,tmin_)) secondholding_satz[,kk]=tmp[rhessys.dailytimeSeriesMatch,7]# satz secondholding_unsat[,kk]=tmp[rhessys.dailytimeSeriesMatch,10]# unsat secondholding_evap[,kk]=tmp[rhessys.dailytimeSeriesMatch,14]# evap secondholding_snow[,kk]=tmp[rhessys.dailytimeSeriesMatch,15]# snowpack secondholding_return[,kk]=tmp[rhessys.dailytimeSeriesMatch,18]# return flow secondholding_psn[,kk]=tmp[rhessys.dailytimeSeriesMatch,20]# psn secondholding_lai[,kk]=tmp[rhessys.dailytimeSeriesMatch,21]# lai }}, warning = function(w) { print(kk) print(outputname) print(w) }, error = function(e) { print(kk) print(outputname) print(e) } )#tryCatch }#kk holding[,j] = secondholding %*%subFile[,'grid']/sum(subFile[,'grid']) holding_et[,j] = secondholding_et %*%subFile[,'grid']/sum(subFile[,'grid']) holding_pet[,j] = secondholding_pet %*%subFile[,'grid']/sum(subFile[,'grid']) holding_precip[,j] = secondholding_rain%*%subFile[,'grid']/sum(subFile[,'grid'])# rain mm holding_tmax[,j] = secondholding_tmax%*%subFile[,'grid']/sum(subFile[,'grid'])# tmax C holding_tmin[,j] = secondholding_tmin%*%subFile[,'grid']/sum(subFile[,'grid'])# tmin C holding_satz[,j] = secondholding_satz%*%subFile[,'grid']/sum(subFile[,'grid'])# holding_unsat[,j] = secondholding_unsat%*%subFile[,'grid']/sum(subFile[,'grid'])# holding_evap[,j] = secondholding_evap%*%subFile[,'grid']/sum(subFile[,'grid'])# holding_snow[,j] = secondholding_snow%*%subFile[,'grid']/sum(subFile[,'grid'])# holding_return[,j] = secondholding_return%*%subFile[,'grid']/sum(subFile[,'grid'])# holding_psn[,j] = secondholding_psn%*%subFile[,'grid']/sum(subFile[,'grid'])# holding_lai[,j] = secondholding_lai%*%subFile[,'grid']/sum(subFile[,'grid'])# }#j result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding ) colnames(result) = c('year','month','day', paste('streamflowmm',1:jMax,sep='_') ) write.csv(result,paste(location[1],'/',outputPrefix, sitename,label,'_streamflowmm.csv',sep=''),row.names=F) if(toLake){ result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding*0.001*lakeArea ) colnames(result) = c('year','month','day', paste('flowcmd',1:jMax,sep='_') ) write.csv(result,paste(location[1],'/',outputPrefix, sitename,'_flowcmd.csv',sep=''),row.names=F) } result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_et ) colnames(result) = c('year','month','day', paste('et',1:jMax,sep='_') ) write.csv(result,paste(location[1],'/',outputPrefix, sitename,'_et.csv',sep=''),row.names=F) result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_pet ) colnames(result) = c('year','month','day', paste('pet',1:jMax,sep='_') ) write.csv(result,paste(location[1],'/',outputPrefix, sitename,'_pet.csv',sep=''),row.names=F) result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_precip ) colnames(result) = c('year','month','day', paste('precip',1:jMax,sep='_') ) write.csv(result,paste(location[1],'/',outputPrefix, sitename,'_precip.csv',sep=''),row.names=F) result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_tmax ) colnames(result) = c('year','month','day', paste('tmax',1:jMax,sep='_') ) write.csv(result,paste(location[1],'/',outputPrefix, sitename,'_tmax.csv',sep=''),row.names=F) result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_tmin ) colnames(result) = c('year','month','day', paste('tmin',1:jMax,sep='_') ) write.csv(result,paste(location[1],'/',outputPrefix, sitename,'_tmin.csv',sep=''),row.names=F) result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_satz ) colnames(result) = c('year','month','day', paste('satz',1:jMax,sep='_') ) write.csv(result,paste(location[1],'/',outputPrefix, sitename,label,'_satz.csv',sep=''),row.names=F) result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_unsat ) colnames(result) = c('year','month','day', paste('unsat',1:jMax,sep='_') ) write.csv(result,paste(location[1],'/',outputPrefix, sitename,label,'_unsat.csv',sep=''),row.names=F) result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_evap ) colnames(result) = c('year','month','day', paste('evap',1:jMax,sep='_') ) write.csv(result,paste(location[1],'/',outputPrefix, sitename,label,'_evap.csv',sep=''),row.names=F) result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_snow ) colnames(result) = c('year','month','day', paste('snow',1:jMax,sep='_') ) write.csv(result,paste(location[1],'/',outputPrefix, sitename,label,'_snow.csv',sep=''),row.names=F) result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_return ) colnames(result) = c('year','month','day', paste('return',1:jMax,sep='_') ) write.csv(result,paste(location[1],'/',outputPrefix, sitename,label,'_return.csv',sep=''),row.names=F) result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_psn ) colnames(result) = c('year','month','day', paste('psn',1:jMax,sep='_') ) write.csv(result,paste(location[1],'/',outputPrefix, sitename,label,'_psn.csv',sep=''),row.names=F) result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_lai ) colnames(result) = c('year','month','day', paste('lai',1:jMax,sep='_') ) write.csv(result,paste(location[1],'/',outputPrefix, sitename,label,'_lai.csv',sep=''),row.names=F) }#function ConditionII_MultipleBasinSeries2WSC =function(CONDS,prefix, sub, Jindex, outputPrefix, sitename=NA, period=NULL, subPrefix=NA, toLake=F,lakeArea=NA,label=''){ # assuming more than 1 condition # CONDS: multiple conditions based on sub.csv (T/F-matrix: nrow = sub, col=conds) # prefix: multiple prefixes (should correspond to CONDS) # Jidex: index matrix: nrow=index, and col=cond # first step (everything is in terms of mm/day ) # convert a series of rhessys basin output at the same watershed (need to combine multiple sub-basin) into WSC format # ls -l world_subbasin_??? | awk '{print $9}' subFile = read.csv(paste(sub,sep=''),stringsAsFactors=F) # location = rep(NA, length(prefix)) # for(i in 1:length(location)){ # tmp = unlist(strsplit(prefix[i],split='/')) # if(length(tmp)>1){location[i] = paste(tmp[1:(length(tmp)-1)],collapse='/')}else{location[i]='.'} # }#i if(is.na(sitename)){sitename = tmp[length(tmp)]} if(is.na(subPrefix)){subPrefix = '_world_subbasin_'} if(is.na(lakeArea)){lakeArea=sum(subFile[,'area'])} prefixuseIndex = which(CONDS[1,]==T) #find out which cond the first file is on outputname = paste(prefix[1,prefixuseIndex], sitename, subPrefix,subFile[1,'id'],'_',Jindex[1, prefixuseIndex],'_basin.daily',sep='' ) tmp = read.table(outputname,header=F,skip=1) if(is.null(period)){ print('use RHESSys output period') period = as.Date(paste(tmp[,1], tmp[,2], tmp[,3],sep="-"),format="%d-%m-%Y") rhessys.dailytimeSeriesMatch = rep(T,length(period)) period.dailytimeSeriesMatch = rep(T,length(period)) print(range(period)) }else{ rhessys.date = as.Date(paste(tmp[,1], tmp[,2], tmp[,3],sep="-"),format="%d-%m-%Y") tmp = match2DailyTimeSeries(rhessys.date, period) ### assume period is the most narrow band rhessys.dailytimeSeriesMatch = tmp$xSelect period.dailytimeSeriesMatch = tmp$ySelect } jMax = nrow(Jindex) holding = matrix(NA,sum(period.dailytimeSeriesMatch), jMax) #flow holding_et = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_pet = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_precip = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_tmax = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_tmin = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_satz = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_unsat = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_evap = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_snow = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_return = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_psn = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) holding_lai = matrix(NA, sum(period.dailytimeSeriesMatch), jMax) for(j in 1:jMax){ # multiple sub-catchment secondholding = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_et = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_pet = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_rain = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_tmax = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_tmin = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_satz = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_unsat = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_evap = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_snow = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_return = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_psn = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) secondholding_lai = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile)) for(kk in 1:nrow(subFile)){ tryCatch({ prefixuseIndex = which(CONDS[kk,]==T) outputname = paste(prefix[j,prefixuseIndex], sitename, subPrefix,subFile[kk,'id'],'_',Jindex[j, prefixuseIndex],'_basin.daily',sep='' ) tmp = read.table(outputname,header=F,skip=1) if(ncol(tmp)>70){ # 5.20 secondholding[,kk]=tmp[rhessys.dailytimeSeriesMatch,19] secondholding_et[,kk]=tmp[rhessys.dailytimeSeriesMatch,14]+tmp[rhessys.dailytimeSeriesMatch,16] secondholding_pet[,kk]=tmp[rhessys.dailytimeSeriesMatch,33] secondholding_rain[,kk]=tmp[rhessys.dailytimeSeriesMatch,35]# rain mm tmax_ = tmp[rhessys.dailytimeSeriesMatch,38]# tmax C tmin_ = tmp[rhessys.dailytimeSeriesMatch,39]# tmin C secondholding_tmax[,kk]=colMaxs( rbind(tmax_,tmin_)) secondholding_tmin[,kk]=colMins( rbind(tmax_,tmin_)) secondholding_satz[,kk]=tmp[rhessys.dailytimeSeriesMatch,7]# satz secondholding_unsat[,kk]=tmp[rhessys.dailytimeSeriesMatch,10]# unsat secondholding_evap[,kk]=tmp[rhessys.dailytimeSeriesMatch,14]# evap secondholding_snow[,kk]=tmp[rhessys.dailytimeSeriesMatch,15]# snowpack secondholding_return[,kk]=tmp[rhessys.dailytimeSeriesMatch,18]# return flow secondholding_psn[,kk]=tmp[rhessys.dailytimeSeriesMatch,20]# psn secondholding_lai[,kk]=tmp[rhessys.dailytimeSeriesMatch,21]# lai }else{ # 5.18 secondholding[,kk]=tmp[rhessys.dailytimeSeriesMatch,19] secondholding_et[,kk]=tmp[rhessys.dailytimeSeriesMatch,14]+tmp[rhessys.dailytimeSeriesMatch,16] secondholding_pet[,kk]=tmp[rhessys.dailytimeSeriesMatch,33] secondholding_rain[,kk]=tmp[rhessys.dailytimeSeriesMatch,35]# rain mm tmax_ = tmp[rhessys.dailytimeSeriesMatch,37]# tmax C tmin_ = tmp[rhessys.dailytimeSeriesMatch,38]# tmin C secondholding_tmax[,kk]=colMaxs( rbind(tmax_,tmin_)) secondholding_tmin[,kk]=colMins( rbind(tmax_,tmin_)) secondholding_satz[,kk]=tmp[rhessys.dailytimeSeriesMatch,7]# satz secondholding_unsat[,kk]=tmp[rhessys.dailytimeSeriesMatch,10]# unsat secondholding_evap[,kk]=tmp[rhessys.dailytimeSeriesMatch,14]# evap secondholding_snow[,kk]=tmp[rhessys.dailytimeSeriesMatch,15]# snowpack secondholding_return[,kk]=tmp[rhessys.dailytimeSeriesMatch,18]# return flow secondholding_psn[,kk]=tmp[rhessys.dailytimeSeriesMatch,20]# psn secondholding_lai[,kk]=tmp[rhessys.dailytimeSeriesMatch,21]# lai }}, warning = function(w) { print(kk) print(outputname) print(w) }, error = function(e) { print(kk) print(outputname) print(e) } )#tryCatch }#kk holding[,j] = secondholding %*%subFile[,'grid']/sum(subFile[,'grid']) holding_et[,j] = secondholding_et %*%subFile[,'grid']/sum(subFile[,'grid']) holding_pet[,j] = secondholding_pet %*%subFile[,'grid']/sum(subFile[,'grid']) holding_precip[,j] = secondholding_rain%*%subFile[,'grid']/sum(subFile[,'grid'])# rain mm holding_tmax[,j] = secondholding_tmax%*%subFile[,'grid']/sum(subFile[,'grid'])# tmax C holding_tmin[,j] = secondholding_tmin%*%subFile[,'grid']/sum(subFile[,'grid'])# tmin C holding_satz[,j] = secondholding_satz%*%subFile[,'grid']/sum(subFile[,'grid'])# holding_unsat[,j] = secondholding_unsat%*%subFile[,'grid']/sum(subFile[,'grid'])# holding_evap[,j] = secondholding_evap%*%subFile[,'grid']/sum(subFile[,'grid'])# holding_snow[,j] = secondholding_snow%*%subFile[,'grid']/sum(subFile[,'grid'])# holding_return[,j] = secondholding_return%*%subFile[,'grid']/sum(subFile[,'grid'])# holding_psn[,j] = secondholding_psn%*%subFile[,'grid']/sum(subFile[,'grid'])# holding_lai[,j] = secondholding_lai%*%subFile[,'grid']/sum(subFile[,'grid'])# }#j result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding ) colnames(result) = c('year','month','day', paste('streamflowmm',1:jMax,sep='_') ) write.csv(result,paste(outputPrefix, sitename,label,'_streamflowmm.csv',sep=''),row.names=F) if(toLake){ result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding*0.001*lakeArea ) colnames(result) = c('year','month','day', paste('flowcmd',1:jMax,sep='_') ) write.csv(result,paste(outputPrefix, sitename,'_flowcmd.csv',sep=''),row.names=F) } result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_et ) colnames(result) = c('year','month','day', paste('et',1:jMax,sep='_') ) write.csv(result,paste(outputPrefix, sitename,'_et.csv',sep=''),row.names=F) result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_pet ) colnames(result) = c('year','month','day', paste('pet',1:jMax,sep='_') ) write.csv(result,paste(outputPrefix, sitename,'_pet.csv',sep=''),row.names=F) result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_precip ) colnames(result) = c('year','month','day', paste('precip',1:jMax,sep='_') ) write.csv(result,paste(outputPrefix, sitename,'_precip.csv',sep=''),row.names=F) result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_tmax ) colnames(result) = c('year','month','day', paste('tmax',1:jMax,sep='_') ) write.csv(result,paste(outputPrefix, sitename,'_tmax.csv',sep=''),row.names=F) result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_tmin ) colnames(result) = c('year','month','day', paste('tmin',1:jMax,sep='_') ) write.csv(result,paste(outputPrefix, sitename,'_tmin.csv',sep=''),row.names=F) result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_satz ) colnames(result) = c('year','month','day', paste('satz',1:jMax,sep='_') ) write.csv(result,paste(outputPrefix, sitename,label,'_satz.csv',sep=''),row.names=F) result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_unsat ) colnames(result) = c('year','month','day', paste('unsat',1:jMax,sep='_') ) write.csv(result,paste(outputPrefix, sitename,label,'_unsat.csv',sep=''),row.names=F) result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_evap ) colnames(result) = c('year','month','day', paste('evap',1:jMax,sep='_') ) write.csv(result,paste(outputPrefix, sitename,label,'_evap.csv',sep=''),row.names=F) result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_snow ) colnames(result) = c('year','month','day', paste('snow',1:jMax,sep='_') ) write.csv(result,paste(outputPrefix, sitename,label,'_snow.csv',sep=''),row.names=F) result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_return ) colnames(result) = c('year','month','day', paste('return',1:jMax,sep='_') ) write.csv(result,paste(outputPrefix, sitename,label,'_return.csv',sep=''),row.names=F) result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_psn ) colnames(result) = c('year','month','day', paste('psn',1:jMax,sep='_') ) write.csv(result,paste(outputPrefix, sitename,label,'_psn.csv',sep=''),row.names=F) result = cbind( format(period[period.dailytimeSeriesMatch],format='%Y'), format(period[period.dailytimeSeriesMatch],format='%m'), format(period[period.dailytimeSeriesMatch],format='%d'), holding_lai ) colnames(result) = c('year','month','day', paste('lai',1:jMax,sep='_') ) write.csv(result,paste(outputPrefix, sitename,label,'_lai.csv',sep=''),row.names=F) }#function ##-------------------------------------------------------------------------------------------------------------------- ##--------- below are exmaples ##-------------------------------------------------------------------------------------------------------------------- if(F){ #---------------------------------------------------- new hope Bias MultipleBasinSeries2WSC( prefix=paste("output_newhopeBias/FIAnlcdlocal_2010",sep=''), sub='newhope_sub.csv', Jindex = paste('param', c(1,2,3,4),sep=''), outputPrefix=paste('regionalNewhopeBias',sep=''), sitename='', period=seq.Date(from=as.Date('1940-1-1'), to=as.Date('2010-10-1'), by="day") , subPrefix='_sub', toLake=T, lakeArea=198868500 ) MultipleBasinSeries2WSC( prefix=paste("output_jordanBias/FIAnlcdlocal_2010",sep=''), sub='jordan_sub.csv', Jindex = paste('param', c(1,2,3,4),sep=''), outputPrefix=paste('regionalJordanBias',sep=''), sitename='', period=seq.Date(from=as.Date('1940-1-1'), to=as.Date('2010-10-1'), by="day") , subPrefix='_sub', toLake=T, lakeArea=747584100 #area excluding newhope ) #---------------------------------------------------- michie regional simulation # MultipleBasinSeries2WSC( # prefix='output_michie_FIAnlcdlocal_proj2_s3/FIAnlcdlocal_2060', # sub='flat_region_sub.csv', # Jindex = paste('param', c(1,2,3,4),sep=''), # outputPrefix='regionalFlat2060', # sitename='', # period=NULL, # subPrefix='_sub', # toLake=T, # lakeArea=432610200 # ) projhh = c(1) #c(2,3,12,13) allperiod = matrix(c( '2020','1990-10-1','2051-9-30', '2030','2000-10-1','2061-9-30', '2040','2010-10-1','2071-9-30', '2050','2020-10-1','2081-9-30', '2060','2030-10-1','2090-9-30' ),nrow=5,ncol=3,byrow=T) for(jj in 1:1){ #nrow(allperiod) for(hh in projhh){ MultipleBasinSeries2WSC( prefix=paste("output_michie_FIAnlcdlocal_proj",hh,"_csiroRCP6r7/FIAnlcdlocal_",allperiod[jj,1],sep=''), sub='flat_region_sub.csv', Jindex = paste('param', c(1,2,3,4),sep=''), outputPrefix=paste('regionalFlat',allperiod[jj,1],sep=''), sitename='', period=seq.Date(from=as.Date(allperiod[jj,2]), to=as.Date(allperiod[jj,3]), by="day") , subPrefix='_sub', toLake=T, lakeArea=432610200 ) }#hh }#jj # period set to avoid 5 spin up years # flatsub = read.csv('flat_region_sub.csv') # combineSubbasin2Basin( # prefix='output_michieBias/FIAnlcdlocal_2010', # suffix='_param3', # subbasin=flatsub # ) #---------------------------------------------------- owasa regional simulation # allperiod = matrix(c( # '2010','1980-10-1','2041-9-30', # '2020','1990-10-1','2051-9-30', # '2030','2000-10-1','2061-9-30', # '2040','2010-10-1','2071-9-30', # '2050','2020-10-1','2081-9-30', # '2060','2030-10-1','2090-9-30' # ),nrow=6,ncol=3,byrow=T) allperiod = matrix(c( '2060','1980-10-1','2041-9-30' #'2010','1980-10-1','2040-9-30' ),nrow=1,ncol=3,byrow=T) for(jj in 1:nrow(allperiod)){ MultipleBasinSeries2WSC( prefix=paste('output_owasa_FIAnlcdlocal_proj1_s3_nonForest_climateperiod2010/FIAnlcdlocal_',allperiod[jj,1],sep=''), sub='cane_regional_sub.csv', Jindex = paste('param', c(1),sep=''), outputPrefix=paste('regionalCane',allperiod[jj,1],sep=''), sitename='', period=seq.Date(from=as.Date(allperiod[jj,2]), to=as.Date(allperiod[jj,3]), by="day"), subPrefix='_sub', toLake=T, lakeArea=80128800 ) MultipleBasinSeries2WSC( prefix=paste('output_owasa_FIAnlcdlocal_proj1_s3_nonForest_climateperiod2010/FIAnlcdlocal_',allperiod[jj,1],sep=''), sub='morgan_regional_sub.csv', Jindex = paste('param', c(1),sep=''), outputPrefix=paste('regionalMorgan',allperiod[jj,1],sep=''), sitename='', period=seq.Date(from=as.Date(allperiod[jj,2]), to=as.Date(allperiod[jj,3]), by="day"), subPrefix='_sub', toLake=T, lakeArea=76376700 ) ww=read.csv(paste("output_owasa_FIAnlcdlocal_proj1_s3_nonForest_climateperiod2010/regionalCane",allperiod[jj,1],"_streamflowmm.csv",sep='')) ww.date = as.Date(paste(ww[,3], ww[,2], ww[,1],sep="-"),format="%d-%m-%Y") tmp = match2DailyTimeSeries( ww.date, seq.Date(from=as.Date(allperiod[jj,2]), to=as.Date(allperiod[jj,3]), by="day") ) period.dailytimeSeriesMatch = tmp$xSelect WSC_combineUSGS2Lake( prefix = c( paste('output_owasa_FIAnlcdlocal_proj1_s3_nonForest_climateperiod2010/regionalCane',allperiod[jj,1],sep=''), paste('output_owasa_FIAnlcdlocal_proj1_s3_nonForest_climateperiod2010/regionalMorgan',allperiod[jj,1],sep='') ), dailytimeSeriesMatch = list( period.dailytimeSeriesMatch, period.dailytimeSeriesMatch ), replication = 1, arealweight = c(70318800, 72018900), outPrefix = paste('output_owasa_FIAnlcdlocal_proj1_s3_nonForest_climateperiod2010/lake_owasa',allperiod[jj,1],sep=''), scaler = 80128800+ 76376700 ) }#jj ##----------- landuse projhh = c(1) allperiod = matrix(c( '2020','1990-10-1','2051-9-30', '2030','2000-10-1','2061-9-30', '2040','2010-10-1','2071-9-30', '2050','2020-10-1','2081-9-30', '2060','2030-10-1','2090-9-30' ),nrow=5,ncol=3,byrow=T) for(jj in 1:nrow(allperiod)){ for(hh in projhh){ MultipleBasinSeries2WSC( prefix=paste("output_owasa_FIAnlcdlocal_proj",hh,"_csiroRCP6r7/FIAnlcdlocal_",allperiod[jj,1],sep=''), sub='cane_regional_sub.csv', Jindex = paste('param', c(1,2,3,4),sep=''), outputPrefix=paste('regionalCane',allperiod[jj,1],sep=''), sitename='', period=seq.Date(from=as.Date(allperiod[jj,2]), to=as.Date(allperiod[jj,3]), by="day"), subPrefix='_sub', toLake=T, lakeArea=80128800 ) MultipleBasinSeries2WSC( prefix=paste("output_owasa_FIAnlcdlocal_proj",hh,"_csiroRCP6r7/FIAnlcdlocal_",allperiod[jj,1],sep=''), sub='morgan_regional_sub.csv', Jindex = paste('param', c(1,2,3,4),sep=''), outputPrefix=paste('regionalMorgan',allperiod[jj,1],sep=''), sitename='', period=seq.Date(from=as.Date(allperiod[jj,2]), to=as.Date(allperiod[jj,3]), by="day"), subPrefix='_sub', toLake=T, lakeArea=76376700 ) ww=read.csv(paste("output_owasa_FIAnlcdlocal_proj",hh,"_csiroRCP6r7/regionalCane",allperiod[jj,1],"_streamflowmm.csv",sep='')) ww.date = as.Date(paste(ww[,3], ww[,2], ww[,1],sep="-"),format="%d-%m-%Y") tmp = match2DailyTimeSeries( ww.date, seq.Date(from=as.Date(allperiod[jj,2]), to=as.Date(allperiod[jj,3]), by="day") ) period.dailytimeSeriesMatch = tmp$xSelect WSC_combineUSGS2Lake( prefix = c( paste('output_owasa_FIAnlcdlocal_proj',hh,'_csiroRCP6r7/regionalCane',allperiod[jj,1],sep=''), paste('output_owasa_FIAnlcdlocal_proj',hh,'_csiroRCP6r7/regionalMorgan',allperiod[jj,1],sep='') ), dailytimeSeriesMatch = list( period.dailytimeSeriesMatch, period.dailytimeSeriesMatch ), replication = 4, arealweight = c(70318800, 72018900), outPrefix = paste('output_owasa_FIAnlcdlocal_proj',hh,'_csiroRCP6r7/lake_owasa',allperiod[jj,1],sep=''), scaler = 80128800+ 76376700 ) }#hh }#jj canesub = read.csv('cane_regional_sub.csv') combineSubbasin2Basin( prefix='output_owasaTesting/FIAnlcdlocal_2010', suffix='_param1', subbasin=canesub ) canesub = read.csv('morgan_regional_sub.csv') combineSubbasin2Basin( prefix='output_owasaTesting/FIAnlcdlocal_2010', suffix='_param1', label='morgan', subbasin=canesub ) #------------------------------------------------ #"-st 1980 10 1 1 -ed 2041 9 30 1", #<<--- 2010 #"-st 1990 10 1 1 -ed 2051 9 30 1", #<<--- 2020 #"-st 2000 10 1 1 -ed 2061 9 30 1", #<<--- 2030 #"-st 2010 10 1 1 -ed 2071 9 30 1", #<<--- 2040 #"-st 2020 10 1 1 -ed 2081 9 30 1", #<<--- 2050 #"-st 2030 10 1 1 -ed 2090 9 30 1", #<<--- 2060 #WSC_cutPeriod0 # problem here is that "WSC_cutPeriod" produced a different name scheme WSC_cutPeriod( prefix='output_owasa_csiroRCP6r7/lake_owasa', period=seq.Date(from=as.Date('1980-10-1'), to=as.Date('2041-9-30') ,by="day"), label='2010') WSC_cutPeriod( prefix='output_owasa_csiroRCP6r7/lake_owasa', period=seq.Date(from=as.Date('1990-10-1'), to=as.Date('2051-9-30') ,by="day"), label='2020') WSC_cutPeriod( prefix='output_owasa_csiroRCP6r7/lake_owasa', period=seq.Date(from=as.Date('2000-10-1'), to=as.Date('2061-9-30') ,by="day"), label='2030') WSC_cutPeriod( prefix='output_owasa_csiroRCP6r7/lake_owasa', period=seq.Date(from=as.Date('2010-10-1'), to=as.Date('2071-9-30') ,by="day"), label='2040') WSC_cutPeriod( prefix='output_owasa_csiroRCP6r7/lake_owasa', period=seq.Date(from=as.Date('2020-10-1'), to=as.Date('2081-9-30') ,by="day"), label='2050') WSC_cutPeriod( prefix='output_owasa_csiroRCP6r7/lake_owasa', period=seq.Date(from=as.Date('2030-10-1'), to=as.Date('2090-9-30') ,by="day"), label='2060') #"-st 1980 10 1 1 -ed 2041 9 30 1", #<<--- 2010 #"-st 1990 10 1 1 -ed 2051 9 30 1", #<<--- 2020 #"-st 2000 10 1 1 -ed 2061 9 30 1", #<<--- 2030 #"-st 2010 10 1 1 -ed 2071 9 30 1", #<<--- 2040 #"-st 2020 10 1 1 -ed 2081 9 30 1", #<<--- 2050 #"-st 2030 10 1 1 -ed 2090 9 30 1", #<<--- 2060 #---------------------------------------------------- morgan SingleBasinSeries2WSC('morgan_',c(5,6,8,10),'usgs_','morgan' ) WSC_usgs2lake('usgs_morgan', 21401100,'lake_university' ) #wrong 76376700 SingleBasinSeries2WSC('../output_cmip5/morgan_',c(5,6,8,10),'usgs_', 'morgan' ) WSC_usgs2lake('../output_cmip5/usgs_morgan', 21401100,'lake_university' ) ww=read.csv("usgs_morgan_pet.csv") WSC_combineUSGS2Lake( prefix = c('usgs_morgan', '~/Dropbox/Myself/UNC/WSC/cane_testingMCMC_sept15/cane_FIA_nlcd_local/rhessys/needed_coop/usgs_cane'), dailytimeSeriesMatch = list( rep(T,nrow(ww)), rep(T,nrow(ww)) ), replication = 4, arealweight = c(21401100, 19687500), outPrefix = 'lake_owasa', scaler = 80128800+ 76376700 ) WSC_combineUSGS2Lake( prefix = c('../output_cmip5/usgs_morgan', '~/Dropbox/Myself/UNC/WSC/cane_testingMCMC_sept15/cane_FIA_nlcd_local/rhessys/needed_cmip5/usgs_cane'), dailytimeSeriesMatch = list( rep(T,nrow(ww)), rep(T,nrow(ww)) ), replication = 4, arealweight = c(21401100, 19687500), outPrefix = '../output_cmip5/lake_owasa', scaler = 80128800+ 76376700 ) #---------------------------------------------------- cane SingleBasinSeries2WSC( '~/Dropbox/Myself/UNC/WSC/cane_testingMCMC_sept15/cane_FIA_nlcd_local/rhessys/needed_coop/rhessys', c(16,20,41,42), 'usgs_','cane' ) WSC_usgs2lake( '~/Dropbox/Myself/UNC/WSC/cane_testingMCMC_sept15/cane_FIA_nlcd_local/rhessys/needed_coop/usgs_cane', 19687500, 'lake_cane') SingleBasinSeries2WSC( '~/Dropbox/Myself/UNC/WSC/cane_testingMCMC_sept15/cane_FIA_nlcd_local/rhessys/needed_cmip5/rhessys', c(16,20,41,42), 'usgs_' ,'cane') WSC_usgs2lake( '~/Dropbox/Myself/UNC/WSC/cane_testingMCMC_sept15/cane_FIA_nlcd_local/rhessys/needed_cmip5/usgs_cane', 19687500, 'lake_cane') #---------------------------------------------------- flat MultipleBasinSeries2WSC( prefix='/Users/laurencelin/Desktop/master_FIA/rhessys/output_green_cmip/case05_', sub='flat_sub.csv', Jindex =c(1,2,3,4,5), outputPrefix='usgs_flat', sitename='flat', period=NULL ) WSC_usgs2lake('/Users/laurencelin/Desktop/master_FIA/rhessys/output_green_cmip/usgs_flatflat', 432610200,'lake_michie') MultipleBasinSeries2WSC( prefix='/Users/laurencelin/Desktop/master_FIA/rhessys/output_green_coop/case05_', sub='flat_sub.csv', Jindex =c(1,2,3,4,5), outputPrefix='usgs_flat', sitename='flat', period=NULL ) WSC_usgs2lake('/Users/laurencelin/Desktop/master_FIA/rhessys/output_green_coop/usgs_flatflat', 432610200,'lake_michie') ## still working }# not exe
a30f14824d490214b290396adb5f8bc9444ea99e
5e207545b85ae719b2cbde944db2aa4601e49ab7
/Statistical Analysis and Visualisation.R
2e461df2e3a271437b42abd931ab6c1647e4dc19
[]
no_license
ShaneBrennan8/Data-wrangling-and-calculations
dddef3a601848396ae2c877e9f8b630ac7cf12cb
73304b9f2dc41cf24b58636983031b31a573d35c
refs/heads/main
2023-04-15T16:24:12.723037
2021-05-04T11:21:53
2021-05-04T11:21:53
336,409,707
0
0
null
null
null
null
UTF-8
R
false
false
2,490
r
Statistical Analysis and Visualisation.R
# Load libraries install.package("MASS")# glm model with negative binomial distribution library(MASS) install.packages("basicPlotteR")# alpha colours in plotting library(basicPlotteR) install.packages("broom")# simple table from model outputs library(broom) install.packages("car") # A Maths package. library(car) install.packages("tidyverse") library(tidyverse) install.packages("ggpubr") library(ggpubr) install.packages("rstatix") library(rstatix) install.packages("ggplot") library(ggplot) install.packages("ggpubr") library("ggpubr") #### Strongyles ##### # Testing for normality. # We need to determine if the data is parametic or non-parametic which will allow us to pick the correct means analysis test # Import the data trial_data = read.csv("HSI - McM, MF, Manual 26 Feb.csv", head=TRUE, sep=",") ### Normality testing for all datasets. McMaster MiniFLOTAC # Build the linear model model1 <- lm(McMaster.Strongyles ~ Mini.FLOTAC.Strongyles, data = trial_data) # Create a QQ plot of residuals ggqqplot(residuals(model1)) # Compute Shapiro-Wilk test of normality shapiro_test(residuals(model1)) # Wilcox test (Optional) #wilcox.test(trial_data$McMaster.Strongyles, trial_data$Mini.FLOTAC.Strongyles) #wilcox.test(trial_data$Manual.Strongyles, trial_data$Mini.FLOTAC.Strongyles) #wilcox.test(trial_data$McMaster.Strongyles, trial_data$Manual.Strongyles) #Plot data to visualise its distribution plot(trial_data$McMaster.Strongyles, trial_data$Manual.Strongyles) plot(trial_data$Manual.Strongyles, trial_data$Mini.FLOTAC.Strongyles) plot(trial_data$McMaster.Strongyles, trial_data$Manual.Strongyle) #### Correlation testing using spearman's correlation (datasets are not normally distributed) cor.test(trial_data$McMaster.Strongyles, trial_data$Manual.Strongyles, method=c("spearman")) cor.test(trial_data$Manual.Strongyles, trial_data$Mini.FLOTAC.Strongyles, method=c("spearman")) cor.test(trial_data$McMaster.Strongyles, trial_data$Mini.FLOTAC.Strongyles, method=c("spearman")) ### Visualise all the three devices and how they compare on a a ggplot ggplot(trial_data, aes(Manual.Strongyles, Mini.FLOTAC.Strongyles, colour = "Manual V MinifFLOTAC")) + geom_point(colour=alpha("black",1)) + geom_point(aes(Mini.FLOTAC.Strongyles, McMaster.Strongyles, colour = "MiniFLOTAC V McMaster"))+ geom_point(aes(Manual.Strongyles, McMaster.Strongyles, colour = "Manual V McMaster")) + ggtitle("Manual V MiniFLOTAC similarity (Black) compared to other devices")
737683175c1c71d7e2829c147a7a68a9676d2bb9
1df1ea7f7a8f8a4cd876f0d643d355a54ddfbe14
/maps and histograms SST and tracking_ESFGR.R
80957253586eeaaef02d917cbe0ccd6f3fab2420
[]
no_license
ESFGR/R-Scripts-for-analysis-of-SST-from-Copernicus-Marine-Service-prodcuts-and-electronic-tagging-data
d87aea27cc932bc7af237e9317eef81c42789bfa
44f43cf7701233d7efab97f75f1e074e4b34f85c
refs/heads/main
2023-06-28T06:30:50.140369
2021-07-30T17:53:48
2021-07-30T17:53:48
null
0
0
null
null
null
null
WINDOWS-1250
R
false
false
26,402
r
maps and histograms SST and tracking_ESFGR.R
####Mapping daily SST + distibution #### library(stringr) library(ncdf4) library(reshape2) library(ggplot2) library(mapdata) library(maptools) library(rnaturalearth) library(gdata) library(raster) library(scatterpie) library (openxlsx) library(dplyr) library(lubridate) #SST data from IBI model ---- setwd("C:/Users/Usuario/Desktop/TFM/SST and Taggging") tracking_data <-read.csv("Tracking data.csv") table(tracking_data$inout) dates<-read.csv("dates_inout.csv") #dates of tagging dates<-arrange(dates, dates$inout, dates$df_dates) #order data by direction (in/out) #dates<-arrange(dates, dates$df_dates) #order data by date #View(dates) #observe the dates of tagging --> dates for which SST values will be obtained dates2011 <-subset(dates, dates$Year=="2011") dates2012 <-subset(dates, dates$Year=="2012") dates2013 <-subset(dates, dates$Year=="2013") f1<- function(x){ t1<- nc_open(x) t2<-list() t2$lat<-ncvar_get(t1, "latitude") t2$lon<-ncvar_get(t1, "longitude") t2$time<-ncvar_get(t1, "time") t2$SST<-ncvar_get(t1, "thetao") t2<-na.omit(t2) nc_close (t1) dimnames(t2$SST) <- list(long = t2$lon, lat = t2$lat, date = t2$time) t3 <- melt(t2$SST, value.name = "sst") }#funcion para crear una tabla a partir del NC con valores de SST, coordenadas y fechas #colourss- col.pal<- rainbow(150, end=0.8, rev=TRUE) ##preparando mapa de la zona de estudio---- mapBuffer <-0.1 worldMap <- ne_download(scale = "medium", returnclass = "sp") worldMapCroped <- crop(worldMap,extent(-9,-5,35,37.5)) #coordenadas del área de estudio mainMap <- ggplot() + geom_polygon(data = worldMapCroped, aes(x = long, y = lat, group = group)) + coord_fixed() + theme(axis.ticks=element_blank()) + ylab("Latitude") + xlab("Longitude")#mapa del area de estudio ##### 2013 #### ####track_in_2013 ###IN : #May: 21-28th, 29th, 31st (in) June: 4th (in)---- ##subsetting tracking data per day trackData2013<- subset(tracking_data, Year=="2013") arrange(trackData2013,trackData2013$id, trackData2013$yday) ?arrange track_in_2013 <- subset(trackData2013, inout == "in") head(track_in_2013) arrange(track_in_2013,yday) range(track_in_2013$Obs.SST) range(track_in_2013$yday) # IN 141 = 21/05 trackmay21<- subset(track_in_2013,track_in_2013$yday=="141") #1 trackmay22<- subset(track_in_2013,track_in_2013$yday=="142") #2 trackmay23<- subset(track_in_2013, track_in_2013$yday=="143") #3 trackmay24<- subset(track_in_2013, track_in_2013$yday=="144") #1 trackmay25<- subset(track_in_2013, track_in_2013$yday=="145") #3 trackmay26<- subset(track_in_2013, track_in_2013$yday=="146") #2 trackmay27<- subset(track_in_2013, track_in_2013$yday=="147") #2 trackmay28<- subset(track_in_2013, track_in_2013$yday=="148") #0 trackmay29<- subset(track_in_2013, track_in_2013$yday=="149") #2 trackmay31<- subset(track_in_2013, track_in_2013$yday=="151") #2 trackjune4<- subset(track_in_2013, track_in_2013$yday=="153") #1 #IBI model data #IN #May: 21-28th, 29th, 31st (in) may1 <- "datos SST/21-28may2013.nc" may29 <- "datos SST/29may2013.nc" may31 <- "datos SST/31may2013.nc" #june <-"datos SST/2june2013.nc" may1<-f1(may1)#need to subset per day (days of interes = 21, 22, 23, 25, 26, 27) sstmay29<- f1(may29) sstmay31<-f1(may31) #june4 <-f1(june) sstin2013 <- c(may1, sstmay29, sstmay31) #21-28 May may1$ddmmyy<-as.Date(as.POSIXct(may1$date*60*60, origin="1950-01-01")) may1$yday<-yday(may1$ddmmyy) may21<-subset(may1, may1$yday=="141") may22<-subset(may1, may1$yday=="142") may23<-subset(may1, may1$yday=="143") may25<-subset(may1, may1$yday=="145") may26<-subset(may1, may1$yday=="146") may27<-subset(may1, may1$yday=="147") #mapping SST and tracks + plotting SST distribution---- ?stat_contour #may 21 mainMap + geom_raster(data =may21, aes(x = long, y = lat, fill = sst), interpolate = TRUE) + stat_contour(data =may21, aes(x = long, y = lat, z= sst), binwidth= 1, color="black") + scale_fill_gradientn(limits=c(14, 19.5), colours = col.pal, na.value = NA) + theme_bw() + geom_point(data = trackmay21, aes(x = Long, y = Lat, color =factor(trackmay21$id)), size=3 , show.legend=F) + geom_point(data = trackmay21, aes(x = Long, y = Lat, color = factor(trackmay21$id)), size=12, alpha=0.5, show.legend=F) + labs(title = "SST and BF tuna locations", fill = "SST")+labs(subtitle = " 21/05/2013 | Direction: IN") #con tracks hist(may21$sst, col = "red", breaks = 50, xlim = c(14,19), main = "2013 SST May 21th", xlab= "SST") rug(trackmay21$Obs.SST, ticksize = 0.05, side = 1,lwd = 5, col = "blue") legend("topright", c("Model", "BF tracks"), fill=c("red", "blue")) #may 22 mainMap + geom_raster(data =may22, aes(x = long, y = lat, fill = sst), interpolate = TRUE) + stat_contour(data =may22, aes(x = long, y = lat, z= sst), binwidth= 1, color="black") + scale_fill_gradientn(limits=c(14, 19.5), colours = col.pal, na.value = NA) + theme_bw() + geom_point(data = trackmay22, aes(x = Long, y = Lat, color =factor(trackmay22$id)), size=3 , show.legend=F) + geom_point(data = trackmay22, aes(x = Long, y = Lat, color = factor(trackmay22$id)), size=12, alpha=0.5, show.legend=F) + labs(title = "SST and BF tuna locations", fill = "SST")+labs(subtitle = " 22/05/2013 | Direction: IN") #con tracks hist(may22$sst, col = "red", breaks = 50, xlim = c(14,20), main = "2013 SST May 22th", xlab= "SST") rug(trackmay22$Obs.SST, ticksize = 0.05, side = 1,lwd = 5, col = "blue") legend("topright", c("Model", "BF tracks"), fill=c("red", "blue")) #may 23 mainMap + geom_raster(data =may23, aes(x = long, y = lat, fill = sst), interpolate = TRUE) + stat_contour(data =may23, aes(x = long, y = lat, z= sst), binwidth= 1, color="black") + scale_fill_gradientn(limits=c(14, 19.5), colours = col.pal, na.value = NA) + theme_bw() + geom_point(data = trackmay23, aes(x = Long, y = Lat, color =factor(trackmay23$id)), size=3 , show.legend=F) + geom_point(data = trackmay23, aes(x = Long, y = Lat, color = factor(trackmay23$id)), size=12, alpha=0.5, show.legend=F) + geom_text(data = trackmay23, aes(x = Long, y = Lat, label= trackmay23$id) , color= "black") + labs(title = "SST and BF tuna locations", fill = "SST")+labs(subtitle = " 23/05/2013 | Direction: IN") #con tracks hist(may23$sst, col = "red", breaks = 50, xlim = c(14,20), main = "2013 SST May 23th", xlab= "SST") rug(trackmay23$Obs.SST, ticksize = 0.05, side = 1,lwd = 5, col = "blue") legend("topright", c("Model", "BF tracks"), fill=c("red", "blue")) #may 25 mainMap + geom_raster(data =may25, aes(x = long, y = lat, fill = sst), interpolate = TRUE) + stat_contour(data =may25, aes(x = long, y = lat, z= sst), color="black", binwidth= 1) + scale_fill_gradientn(limits=c(14, 19.5), colours = col.pal, na.value = NA) + theme_bw() + geom_point(data = trackmay25, aes(x = Long, y = Lat, color =factor(trackmay25$id)), size=3 , show.legend=F) + geom_point(data = trackmay25, aes(x = Long, y = Lat, color = factor(trackmay25$id)), size=12, alpha=0.5, show.legend=F) + geom_text(data = trackmay25, aes(x = Long, y = Lat, label= trackmay25$id) , color= "black") + labs(title = "SST and BF tuna locations", fill = "SST")+labs(subtitle = " 25/05/2013 | Direction: IN") #con tracks hist(may25$sst, col = "red", breaks = 50, xlim = c(14,20), main = "2013 SST May 25th", xlab= "SST") rug(trackmay25$Obs.SST, ticksize = 0.05, side = 1,lwd = 5, col = "blue") legend("topright", c("Model", "BF tracks"), fill=c("red", "blue")) #may 26 mainMap + geom_raster(data =may26, aes(x = long, y = lat, fill = sst), interpolate = TRUE) + scale_fill_gradientn(limits=c(14, 19.5), colours = col.pal, na.value = NA) + theme_bw() + stat_contour(data =may26, aes(x = long, y = lat, z= sst), binwidth= 1, color="black") + geom_point(data = trackmay26, aes(x = Long, y = Lat, color =factor(trackmay26$id)), size=3 , show.legend=F) + geom_point(data = trackmay26, aes(x = Long, y = Lat, color = factor(trackmay26$id)), size=12, alpha=0.5, show.legend=F) + labs(title = "SST and BF tuna locations", fill = "SST")+labs(subtitle = " 26/05/2013 | Direction: IN") #con tracks hist(may26$sst, col = "red", breaks = 50, xlim = c(14,20), main = "2013 SST May 26th", xlab= "SST") rug(trackmay26$Obs.SST, ticksize = 0.05, side = 1,lwd = 5, col = "blue") legend("topright", c("Model", "BF tracks"), fill=c("red", "blue")) #may 27 mainMap + geom_raster(data =may27, aes(x = long, y = lat, fill = sst), interpolate = TRUE) + scale_fill_gradientn(limits=c(14, 19.5), colours = col.pal, na.value = NA) + theme_bw() + stat_contour(data =may27, aes(x = long, y = lat, z= sst), binwidth= 1, color="black") + geom_point(data = trackmay27, aes(x = Long, y = Lat, color =factor(trackmay27$id)), size=3 , show.legend=F) + geom_point(data = trackmay27, aes(x = Long, y = Lat, color = factor(trackmay27$id)), size=12, alpha=0.5, show.legend=F) + labs(title = "SST and BF tuna locations", fill = "SST") + labs(subtitle = " 27/05/2013 | Direction: IN") #con tracks hist(may27$sst, col = "red", breaks = 50, xlim = c(14,20), main = "2013 SST May 27th", xlab= "SST") rug(trackmay27$Obs.SST, ticksize = 0.05, side = 1,lwd = 5, col = "blue") legend("topright", c("Model", "BF tracks"), fill=c("red", "blue")) #may 29 mainMap + geom_raster(data =sstmay29, aes(x = long, y = lat, fill = sstmay29$sst), interpolate = TRUE) + stat_contour(data =sstmay29, aes(x = long, y = lat, z= sst), binwidth= 1, color="black") + scale_fill_gradientn(limits=c(14, 19.5), colours = col.pal, na.value = NA) + theme_bw() + geom_point(data = trackmay29, aes(x = Long, y = Lat), size=3 , color = trackmay29$id, alpha=3) + geom_point(data = trackmay29, aes(x = Long, y = Lat), size=12, alpha=0.5 , color = trackmay29$id) + labs(title = "SST and BF tuna locations", fill = "SST")+labs(subtitle = " 29/05/2013 | Direction: IN") #con tracks hist(sstmay29$sst, col = "red", breaks = 50, xlim = c(14,20), main = "2013 SST May 29th", xlab= "SST") rug(trackmay29$Obs.SST, ticksize = 0.05, side = 1,lwd = 5, col = "blue") legend("topright", c("Model", "BF tracks"), fill=c("red", "blue")) #may 31 mainMap + geom_raster(data =sstmay31, aes(x = long, y = lat, fill = sstmay31$sst), interpolate = TRUE) + stat_contour(data =sstmay31, aes(x = long, y = lat, z= sst), binwidth= 1, color="black") + scale_fill_gradientn(limits=c(14, 19.5), colours = col.pal, na.value = NA) + theme_bw() + geom_point(data = trackmay31, aes(x = Long, y = Lat), size=3 , color = trackmay31$id) + geom_point(data = trackmay31, aes(x = Long, y = Lat), size=12, alpha=0.5 , color = trackmay31$id) + labs(title = "SST and BF tuna locations", fill = "SST")+labs(subtitle = " 31/05/2013 | Direction: IN") #con tracks hist(sstmay31$sst, col = "red", breaks = 50, xlim = c(14,20), main = "2013 SST May 31st", xlab= "SST") rug(trackmay29$Obs.SST, ticksize = 0.05, side = 1,lwd = 5, col = "blue") legend("topright", c("Model", "BF tracks"), fill=c("red", "blue")) #### total 2013 in sstin2013<-data.frame(sstin2013) mainMap + geom_raster(data =sstin2013, aes(x = long, y = lat, fill = sstin2013$sst), interpolate = TRUE) + scale_fill_gradientn(limits=c(14, 19.5), colours = col.pal, na.value = NA) + stat_contour(data =sstin2013, aes(x = long, y = lat, z= sst), binwidth= 1, color="black") + theme_bw() + geom_point(data = track_in_2013, aes(x = Long, y = Lat), size=3, colour = track_in_2013$id, alpha=5) + geom_point(data = track_in_2013, aes(x = Long, y = Lat), size=7, alpha=0.5 , color = track_in_2013$id) + labs(title = "SST and BF tuna locations", fill = "SST")+labs(subtitle = " 2013 | Direction: IN") #con tracks hist(sstin2013$sst, col = "red", breaks = 100, xlim = c(14,20), main = "SST 2013", xlab= "SST") rug(track_in_2013$Obs.SST, ticksize = 0.05, side = 1,lwd = 5, col = "blue") legend("topright", c("Model", "BF tracks"), fill=c("red", "blue")) ### c1 <- rgb(216,238,192, max = 255, alpha = 120, names = "lt.green") c2 <- rgb(255,100,100, max = 255, alpha =100, names = "lt.red") par(mar=c(4, 4, 2, 4)) hist(sstin2013$sst, col = "red", breaks = 70, xlim = c(14,26), main = "SST 2013 | Direction: IN", xlab= "") par(new=TRUE) ## Allow a second plot on the same graph hist(track_in_2013$Obs.SST, breaks =50, xlab="", ylab="", ylim=c(0,10), xlim=c(14, 26), main = "", axes=FALSE, col = "blue") mtext("Tuna Obs.",side=4,col="blue",line=2) axis(4, col="blue",col.axis="blue",las=1) mtext("SST",side=1,col="black",line=2.5) legend("topleft", c("Model", "BF tracks"), fill=c("red", "blue")) #### OUT ---- subset(dates2013, dates2013$inout=="out") #subsetting tracking data per day trackData2013<- subset(tracking_data, Year=="2013") track_out_2013 <- subset(trackData2013, inout == "out") head(track_out_2013) arrange(track_out_2013,yday) ##May: 21-28th, 29th, 31st #June: 16, 18, 20-21, 23-24 (out) #june2 <- "datos SST/6june2013.nc" #june3 <- "datos SST/8june2013.nc" #june4 <- "datos SST/14june2013.nc" june5 <- "datos SST/16june2013.nc" june6 <- "datos SST/18june2013.nc" june7 <- "datos SST/20&21june2013.nc" june8 <- "datos SST/23&24june2013.nc" #june6 <- f1(june2) #june8 <- f1(june3) #june14 <- f1(june4) june16 <- f1(june5) june18 <- f1(june6) june2021 <- f1(june7) june2324 <- f1(june8) #July: 5, 7, 9, 13, 17, 19, 21 (out) july1 <- "datos SST/5july2013.nc" july2 <- "datos SST/7july2013.nc" july3 <- "datos SST/9july2013.nc" july4 <- "datos SST/13july2013.nc" july5 <- "datos SST/17july2013.nc" july6 <- "datos SST/19july2013.nc" july7 <- "datos SST/21july2013.nc" july5<- f1(july1) july7<- f1(july2) july9<- f1(july3) july13<- f1(july4) july17<- f1(july5) july19<- f1(july6) july21<- f1(july7) out_2013 <- c(may1, june16, june18, june2021, june2324, july5, july7, july9, july13, july17, july19, july21) range(out_2013$sst) head(out_2013) total_2013 <- c(out_2013, sstin2013) #histogram with all dates and tracks out out_2013 <- data.frame(out_2013) mainMap + geom_raster(data =out_2013, aes(x = long, y = lat, fill = out_2013$sst), interpolate = TRUE) + scale_fill_gradientn(limits=c(14, 19.5), colours = col.pal, na.value = NA) + stat_contour(data =out_2013, aes(x = long, y = lat, z= sst), binwidth= 1, color="black") + theme_bw() + geom_point(data = track_out_2013, aes(x = Long, y = Lat), size=3, colour = track_out_2013$id, alpha=5) + geom_point(data = track_out_2013, aes(x = Long, y = Lat), size=7, alpha=0.5 , color = track_out_2013$id) + labs(title = "SST and BF tuna locations", fill = "SST")+labs(subtitle = " 2013 | Direction: OUT") #con tracks ### c1 <- rgb(216,238,192, max = 255, alpha = 120, names = "lt.green") c2 <- rgb(255,100,100, max = 255, alpha =100, names = "lt.red") par(mar=c(4, 4, 2, 4)) hist(out_2013$sst, col = "red", breaks = 100, xlim = c(14,26), main = "SST 2013 | Direction: OUT", xlab= "") par(new=TRUE) ## Allow a second plot on the same graph hist(track_out_2013$Obs.SST, breaks =50, xlab="", ylab="", ylim=c(0,10), xlim=c(14,26), main = "", axes=FALSE, col = "blue") mtext("Tuna Obs.",side=4,col="blue",line=2) axis(4, col="blue",col.axis="blue",las=1) mtext("SST",side=1,col="black",line=2.5) legend("topleft", c("Model", "BF tracks"), fill=c("red", "blue")) #. hist(out_2013$sst, col = "red", breaks = 1000, xlim = c(14,24), main = "2013 SST | OUT", xlab= "SST") rug(track_out_2013$Obs.SST, ticksize = 0.05, side = 1,lwd = 5, col = "blue") legend("topright", c("Model", "BF tracks"), fill=c("red", "blue")) ### total 2013---- par(mar=c(4, 4, 2, 4)) hist(total_2013$sst, col = "red", breaks = 100, xlim = c(14,26), main = "SST 2013", xlab= "") par(new=TRUE) ## Allow a second plot on the same graph hist(trackData2013$Obs.SST, breaks =50, xlab="", ylab="", ylim=c(0,10), xlim=c(14,26), main = "", axes=FALSE, col = "blue") mtext("Tuna Obs.",side=4,col="blue",line=2) axis(4, col="blue",col.axis="blue",las=1) mtext("SST",side=1,col="black",line=2.5) arrange(trackData2013, by_group= df_dates) # 2012 ---- ## IN ---- #15-19 May (in) #tracks trackData2012<- subset(tracking_data, Year=="2012") track_in_2012 <- subset(trackData2012, inout == "in") head(track_in_2012) arrange(track_in_2012,yday) # 2012-05-15 = 136 track136 <- subset(track_in_2012, track_in_2012$yday=="136") #6 track137 <- subset(track_in_2012, track_in_2012$yday=="137") #9 track138 <- subset(track_in_2012, track_in_2012$yday=="138") #5 track139 <- subset(track_in_2012, track_in_2012$yday=="139") #4 track140 <- subset(track_in_2012, track_in_2012$yday=="140") #1 #SST #15-19 May (in) may2012<- "datos SST/15-19may2012.nc" may12<-f1(may2012) may12$ddmmyy<-as.Date(as.POSIXct(may12$date*60*60, origin="1950-01-01")) may12$yday<-yday(may12$ddmmyy) may15<-subset(may12, may12$yday=="136") may16<-subset(may12, may12$yday=="137") may17<-subset(may12, may12$yday=="138") may18<-subset(may12, may12$yday=="139") may19<-subset(may12, may12$yday=="140") # SST distribution for 2012 + sst from tuna tracks par(mar=c(4, 4, 2, 4)) hist(may12$sst, col = "red", breaks = 50, xlim = c(14,26), main = "SST 2012 | Direction: IN", xlab= "") par(new=TRUE) ## Allow a second plot on the same graph hist(track_in_2012$Obs.SST, breaks =50, xlab="", ylab="", ylim=c(0,10), xlim=c(14, 26), main = "", axes=FALSE, col = "blue") mtext("Tuna Obs.",side=4,col="blue",line=2) axis(4, col="blue",col.axis="blue",las=1) mtext("SST",side=1,col="black",line=2.5) legend("topleft", c("Model", "BF tracks"), fill=c("red", "blue")) #mapping SST and tracks + plotting SST distribution for 2012 ---- #may 15 mainMap + geom_raster(data =may15, aes(x = long, y = lat, fill = sst), interpolate = TRUE) + scale_fill_gradientn(limits=c(16, 22), colours = col.pal, na.value = NA) + theme_bw() + stat_contour(data =may15, aes(x = long, y = lat, z= sst), binwidth= 1, color="black") + geom_point(data = track136, aes(x = Long, y = Lat, color =factor(track136$id)), size=3 , show.legend=F) + geom_point(data = track136, aes(x = Long, y = Lat, color = factor(track136$id)), size=12, alpha=0.5, show.legend=F) + labs(title = "SST and BF tuna locations", fill = "SST")+ labs(subtitle = " 15/05/2012 | Direction: IN") #con tracks hist(may15$sst, col = "red", breaks = 50, xlim = c(16,22), main = "2012 SST May 15th", xlab= "SST") rug(track136$Obs.SST, ticksize = 0.05, side = 1,lwd = 5, col = "blue") legend("topright", c("Model", "BF tracks"), fill=c("red", "blue")) #may 16 mainMap + geom_raster(data =may16, aes(x = long, y = lat, fill = sst), interpolate = TRUE) + scale_fill_gradientn(limits=c(16, 22), colours = col.pal, na.value = NA) + theme_bw() + stat_contour(data =may16, aes(x = long, y = lat, z= sst), binwidth= 1, color="black") + geom_point(data = track137, aes(x = Long, y = Lat, color =factor(track137$id)), size=3 , show.legend=F) + geom_point(data = track137, aes(x = Long, y = Lat, color = factor(track137$id)), size=12, alpha=0.5, show.legend=F) + labs(title = "SST and BF tuna locations", fill = "SST")+ labs(subtitle = " 16/05/2012 | Direction: IN") #con tracks hist(may16$sst, col = "red", breaks = 50, xlim = c(16,22), main = "2012 SST May 16th", xlab= "SST") rug(track137$Obs.SST, ticksize = 0.05, side = 1,lwd = 5, col = "blue") legend("topright", c("Model", "BF tracks"), fill=c("red", "blue")) #may 17 mainMap + geom_raster(data =may17, aes(x = long, y = lat, fill = sst), interpolate = TRUE) + scale_fill_gradientn(limits=c(16, 22), colours = col.pal, na.value = NA) + theme_bw() + stat_contour(data =may17, aes(x = long, y = lat, z= sst), binwidth= 1, color="black") + geom_point(data = track138, aes(x = Long, y = Lat, color =factor(track138$id)), size=3 , show.legend=F) + geom_point(data = track138, aes(x = Long, y = Lat, color = factor(track138$id)), size=12, alpha=0.5, show.legend=F) + labs(title = "SST and BF tuna locations", fill = "SST")+ labs(subtitle = " 17/05/2012 | Direction: IN") #con tracks hist(may17$sst, col = "red", breaks = 50, xlim = c(16,22), main = "2012 SST May 17th", xlab= "SST") rug(track138$Obs.SST, ticksize = 0.05, side = 1,lwd = 5, col = "blue") legend("topright", c("Model", "BF tracks"), fill=c("red", "blue")) #may 18 mainMap + geom_raster(data =may18, aes(x = long, y = lat, fill = sst), interpolate = TRUE) + scale_fill_gradientn(limits=c(16, 22), colours = col.pal, na.value = NA) + theme_bw() + stat_contour(data =may18, aes(x = long, y = lat, z= sst), binwidth= 1, color="black") + geom_point(data = track139, aes(x = Long, y = Lat, color =factor(track139$id)), size=3 , show.legend=F) + geom_point(data = track139, aes(x = Long, y = Lat, color = factor(track139$id)), size=12, alpha=0.5, show.legend=F) + labs(title = "SST and BF tuna locations", fill = "SST")+ labs(subtitle = " 18/05/2012 | Direction: IN") #con tracks hist(may18$sst, col = "red", breaks = 50, xlim = c(16,22), main = "2012 SST May 18th", xlab= "SST") rug(track139$Obs.SST, ticksize = 0.05, side = 1,lwd = 5, col = "blue") legend("topright", c("Model", "BF tracks"), fill=c("red", "blue")) #may 19 mainMap + geom_raster(data =may19, aes(x = long, y = lat, fill = sst), interpolate = TRUE) + scale_fill_gradientn(limits=c(16, 22), colours = col.pal, na.value = NA) + theme_bw() + stat_contour(data =may19, aes(x = long, y = lat, z= sst), binwidth= 1, color="black") + geom_point(data = track140, aes(x = Long, y = Lat, color =factor(track140$id)), size=3 , show.legend=F) + geom_point(data = track140, aes(x = Long, y = Lat, color = factor(track140$id)), size=12, alpha=0.5, show.legend=F) + labs(title = "SST and BF tuna locations", fill = "SST")+ labs(subtitle = " 19/05/2012 | Direction: IN") #con tracks hist(may19$sst, col = "red", breaks = 50, xlim = c(16,22), main = "2012 SST May 19th", xlab= "SST") rug(track140$Obs.SST, ticksize = 0.05, side = 1,lwd = 5, col = "blue") legend("topright", c("Model", "BF tracks"), fill=c("red", "blue")) #all days mainMap + geom_raster(data =may12, aes(x = long, y = lat, fill = sst), interpolate = TRUE) + scale_fill_gradientn(limits=c(16, 22), colours = col.pal, na.value = NA) + theme_bw() + stat_contour(data =may12, aes(x = long, y = lat, z= sst), binwidth= 1, color="black") + geom_point(data = track_in_2012, aes(x = Long, y = Lat, color =factor(track_in_2012$id)), size=3 , show.legend=F) + geom_point(data = track_in_2012, aes(x = Long, y = Lat, color = factor(track_in_2012$id)), size=12, alpha=0.5, show.legend=F) + labs(title = "SST and BF tuna locations", fill = "SST")+ labs(subtitle = " 19/05/2012 | Direction: IN") #con tracks hist(may12$sst, col = "red", breaks = 50, xlim = c(16,22), main = "2012 SST May 2012", xlab= "SST") rug(track_in_2012$Obs.SST, ticksize = 0.05, side = 1,lwd = 5, col = "blue") legend("topright", c("Model", "BF tracks"), fill=c("red", "blue")) range(track_in_2012$Obs.SST) # OUT ---- #July: 10, 14-17 (out) july_10_12<- "datos SST/10july2012.nc" july_2012<- "datos SST/14-17july2012.nc" j10<-f1(july_10_12) j2012<- f1(july_2012) july2012out <- c(j10, j2012 ) track_out_2012 <- subset(trackData2012, inout == "out") head(track_out_2012) arrange(track_out_2012,yday) # track192 <- subset(track_in_2012, track_in_2012$yday=="192") track196 <- subset(track_in_2012, track_in_2012$yday=="196") track197 <- subset(track_in_2012, track_in_2012$yday=="197") track198 <- subset(track_in_2012, track_in_2012$yday=="198") track199 <- subset(track_in_2012, track_in_2012$yday=="199") # distributions out par(mar=c(4, 4, 2, 4)) hist(july2012out$sst, col = "red", breaks = 100, xlim = c(14,26), main = "SST 2012 | Direction: OUT", xlab= "") par(new=TRUE) ## Allow a second plot on the same graph hist(track_out_2012$Obs.SST, breaks =50, xlab="", ylab="", ylim=c(0,10), xlim=c(14, 26), main = "", axes=FALSE, col = "blue") mtext("Tuna Obs.",side=4,col="blue",line=2) axis(4, col="blue",col.axis="blue",las=1) mtext("SST",side=1,col="black",line=2.5) legend("topleft", c("Model", "BF tracks"), fill=c("red", "blue")) # 2011 ---- trackData2011<- subset(tracking_data, Year=="2011") track_in_2011 <- subset(trackData2011, inout == "in") track_out_2011 <- subset(trackData2011, inout == "out") SST_fun<-function(x){ # x= dataset in format nc including SST values (variable name = "thetao") SST<- ncvar_get(x, "thetao") SST<- as.vector(SST) SST<- na.omit(SST) return(SST) } may2011<- nc_open("datos SST/SST_2011_05_26-29.nc") may2011_IN<-SST_fun(may2011) #in #|2011| July 23rd, 28th and 30th | OUT | july_23_11<- nc_open("datos SST/23july2011.nc") july_28_11<- nc_open("datos SST/28july2011.nc") july_30_11<- nc_open("datos SST/30july2011.nc") SST_july_23_11<-SST_fun(july_23_11) SST_july_28_11<-SST_fun(july_28_11) SST_july_30_11<-SST_fun(july_30_11) july2011_OUT<- c(SST_july_23_11, SST_july_28_11, SST_july_30_11) #### Histrograms #IN par(mar=c(4, 4, 2, 4)) hist(may2011_IN, col = "red", breaks = 50, xlim = c(14,26), main = "SST 2011 | Direction: IN", xlab= "") par(new=TRUE) ## Allow a second plot on the same graph hist(track_in_2011$Obs.SST, breaks =50, xlab="", ylab="", ylim=c(0,10), xlim=c(14, 24), main = "", axes=FALSE, col = "blue") mtext("Tuna Obs.",side=4,col="blue",line=2) axis(4, col="blue",col.axis="blue",las=1) mtext("SST",side=1,col="black",line=2.5) legend("topleft", c("Model", "BF tracks"), fill=c("red", "blue")) #???OUT par(mar=c(4, 4, 2, 4)) hist(july2011_OUT, col = "red", breaks = 100, xlim = c(14,26), main = "SST 2011 | Direction: OUT", xlab= "") par(new=TRUE) ## Allow a second plot on the same graph hist(track_out_2011$Obs.SST, breaks =25, xlab="", ylab="", ylim=c(0,10), xlim=c(14, 26), main = "", axes=FALSE, col = "blue") mtext("Tuna Obs.",side=4,col="blue",line=2) axis(4, col="blue",col.axis="blue",las=1) mtext("SST",side=1,col="black",line=2.5) legend("topleft", c("Model", "BF tracks"), fill=c("red", "blue"))
580d274ddb5999be01225a914c3d2c48b2c2cc5d
84af362583c9562a8a5225986d877b6a5b26ef0a
/tests/testthat/test-yf_collection.R
e82efec45e41ef6b56268770648b3c1a73863301
[ "MIT" ]
permissive
ropensci/yfR
945ef3f23a6f04560072d7436dd5c28e0f809462
b2f2c5c9f933e933821b5a92763c98f155b401bd
refs/heads/main
2023-05-23T19:12:44.048601
2023-02-16T10:48:15
2023-02-16T10:48:15
375,024,106
19
6
NOASSERTION
2023-01-30T17:18:11
2021-06-08T13:44:11
HTML
UTF-8
R
false
false
2,446
r
test-yf_collection.R
library(testthat) library(yfR) test_that("Test of yf_index_list()", { available_indices <- yf_index_list() expect_true(class(available_indices) == 'character') }) testhat_index_comp <- function(df_in) { expect_true(tibble::is_tibble(df_in)) expect_true(nrow(df_in) > 0) } test_that("Test of yf_index_composition() -- using web", { if (!covr::in_covr()) { skip_if_offline() skip_on_cran() # too heavy for cran } available_indices <- yf_index_list() for (i_index in available_indices) { df_index <- yf_index_composition(i_index, force_fallback = FALSE) testhat_index_comp(df_index) } }) test_that("Test of yf_index_composition() -- using fallback files", { available_indices <- yf_index_list() for (i_index in available_indices) { df_index <- yf_index_composition(i_index, force_fallback = TRUE) testhat_index_comp(df_index) } }) test_that("Test of yf_collection_get() -- single session", { if (!covr::in_covr()) { skip_if_offline() skip_on_cran() # too heavy for cran } # parallel test for collections to_test_collection <- "testthat-collection" df <- yf_collection_get(collection = to_test_collection, first_date = Sys.Date() - 30, last_date = Sys.Date(), do_parallel = FALSE, be_quiet = TRUE) expect_true(nrow(df) > 0) }) test_that("Test of yf_collection_get() -- multi-session", { # 20220501 yf now sets api limits, which invalidates any parallel computation skip( paste0("Skipping since parallel is not supported due to YF api limits, ", "and collections are large datasets for single session download.") ) # parallel test for collections n_workers <- floor(parallel::detectCores()/2) future::plan(future::multisession, workers = n_workers) available_collections <- yf_get_available_collections() if (!covr::in_covr()) { skip_if_offline() skip_on_cran() # too heavy for cran } for (i_collection in available_collections) { df <- yf_collection_get(collection = i_collection, first_date = Sys.Date() - 30, last_date = Sys.Date(), do_parallel = TRUE, be_quiet = TRUE) expect_true(nrow(df) > 0) } })
a547cea7b379b3e6dcb0d8280f82e864e9ed2e31
60668dba3bda50b082e200e8ae6c150e0c9f7bc3
/man/tapering.Rd
885e1204425cf86954dd1e5573b48707781cf678
[]
no_license
qizhu21/CVTuningCov
894b698876bd497fcadfc2e8c8aa1fe0304e67d6
c9f4785734dafd35c5d82ac2ffa57ea9be2fc12d
refs/heads/master
2021-01-04T03:09:20.856365
2014-07-31T00:00:00
2014-07-31T00:00:00
null
0
0
null
null
null
null
UTF-8
R
false
false
1,053
rd
tapering.Rd
\name{tapering} \alias{tapering} %- Also NEED an '\alias' for EACH other topic documented here. \title{ A Tapering Operator on A Matrix } \description{ Generate a tapering operator with given dimention and tuning parameter. Multiplying it on a covariance matrix by componentwise product can provide a regularized estimator with the tapering method. } \usage{ tapering(p, k = 1) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{p}{ the dimension of a covariance matrix. } \item{k}{ the tuning parameter of the tapering method. The default value is 1. } } \value{ a \code{p*p} matrix. } \references{ Cai, T, Zhang, CH and Zhou, H, Optimal rates of convergence for covariance matrix estimation, Annals of Statistics, 38, 2118-2144 (2010). } \author{ Binhuan Wang } \examples{ p <- 5; W <- tapering(p,k=2) ; W; } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ tapering } %\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
1cd92ed7038f3041a333ee5f74f8ffbf46561e74
d79d64ae23f7005eb76ca48f66527d7d0cb0917b
/analysis.R
9f041c2b5a03ceddd39b2128edc35f111446742d
[]
no_license
adamwilkinsonJCU/workshop1
e03acbfa6372bf1c814bf9a885c58ef0553864ff
f2c4e3c89fe61c958bb64fc253a5fe015e910cd4
refs/heads/master
2020-04-28T02:31:00.550781
2019-03-11T01:27:20
2019-03-11T01:27:20
174,901,213
1
0
null
null
null
null
UTF-8
R
false
false
77
r
analysis.R
x=seq(1, 10, len=1) y=40*2 + rnorm(10,0,5) plot(x,y) sumary(x) mean(x) Change
cdafed3a87a434059761331a13ab582ec3b17f61
b0d0234d31bd230cca36b3789414ccfa2a54f7d7
/R/parttree.R
679f9eac6120390a6e2277f66f272e67cf453be1
[ "MIT" ]
permissive
ClinicoPath/parttree
31cd67a02a2d9d90b946fcfa8b1cf272e8ef4300
64c148d47074b66140bc63a1c54179d4f5569e95
refs/heads/master
2022-12-11T18:04:21.511776
2020-08-27T23:52:42
2020-08-27T23:52:42
null
0
0
null
null
null
null
UTF-8
R
false
false
5,639
r
parttree.R
#' @title Convert a decision tree into a data frame of partition coordinates #' #' @description Extracts the terminal leaf nodes of a decision tree with one or #' two predictor variables. These leaf nodes are then converted into a data #' frame, where each row represents a partition that can easily be plotted in #' coordinate space. #' @param tree An \code{\link[rpart]{rpart.object}}, or an object of compatible #' type (e.g. a decision tree constructed via the `parsnip` or `mlr3` #' front-ends). #' @param keep_as_dt Logical. The function relies on `data.table` for internal #' data manipulation. But it will coerce the final return object into a #' regular data frame (default behaviour) unless the user specifies `TRUE`. #' @param flipaxes Logical. The function will automatically set the yaxis #' variable as the first split variable in the tree provided unless #' the user specifies `TRUE`. #' @details This function can be used with a regression or classification tree #' containing one or (at most) two continuous predictors. #' @seealso \code{\link{geom_parttree()}}, \code{\link[rpart]{rpart}}. #' @return A data frame comprising seven columns: the leaf node, its path, a set #' of coordinates understandable to `ggplot2` (i.e. xmin, xmax, ymin, ymax), #' and a final column corresponding to the predicted value for that leaf. #' @importFrom data.table := #' @export #' @examples #' library(rpart) #' parttree(rpart(Species ~ Petal.Length + Petal.Width, data=iris)) parttree = function(tree, keep_as_dt = FALSE, flipaxes = FALSE) { if (!(inherits(tree, "rpart") || inherits(tree, "_rpart") || inherits(tree, "LearnerClassifRpart") || inherits(tree, "LearnerRegrRpart"))) { stop("The parttree() function only accepts rpart objects.\n", "The object that you provided is of class type: ", class(tree)[1]) } ## parsnip front-end if (inherits(tree, "_rpart")) { if (is.null(tree$fit)) { stop("No model detected.\n", "Did you forget to fit a model? See `?parsnip::fit`.") } tree = tree$fit } ## mlr3 front-end if (inherits(tree, "LearnerClassifRpart") || inherits(tree, "LearnerRegrRpart")) { if (is.null(tree$model)) { stop("No model detected.\n", "Did you forget to assign a learner? See `?mlr3::lrn`.") } tree = tree$model } if (nrow(tree$frame)<=1) { stop("Cannot plot single node tree.") } vars = unique(as.character(tree$frame[tree$frame$var != "<leaf>", ]$var)) if (length(vars)>2) { stop("Tree can only have one or two predictors.") } nodes = rownames(tree$frame[tree$frame$var == "<leaf>", ]) ## Get details about y variable for later ### y variable string (i.e. name) y_var = attr(tree$terms, "variables")[[2]] ### y values yvals = tree$frame[tree$frame$var == "<leaf>", ]$yval y_factored = attr(tree$terms, "dataClasses")[paste(y_var)] == "factor" ## factor equivalents (if factor) if (y_factored) { yvals = attr(tree, "ylevels")[yvals] } part_list = lapply( nodes, function(n) { pv = rpart::path.rpart(tree, node=n, print.it = FALSE) node = as.integer(paste0(names(pv))) pv = unlist(pv) pd = data.frame(node = rep(node, times = length(pv)-1)) pv = sapply(2:length(pv), function(i) pv[i]) # pd$var = gsub("[[:punct:]].+", "", pv) ## Causes problems when punctuation mark in name, so use below pd$var = gsub("<.+|<=.+|>.+|>=.+", "", pv) # pd$split = gsub(".+[[:punct:]]", "", pv) ## Use below since we want to keep - and . in split values (e.g. -2.5) pd$split = as.numeric(gsub(".+[^[:alnum:]\\-\\.\\s]", "", pv)) pd$side = gsub("\\w|\\.", "", pv) pd$yvals = yvals[nodes==node] return(pd) } ) part_dt = data.table::rbindlist(part_list) ## Trim irrelevant parts of tree data.table::setorder(part_dt, node) part_dt[, path := paste(var, side, split, collapse = " --> "), by = node] part_dt = part_dt[, .SD[(grepl(">", side) & split == max(split)) | (grepl("<", side) & split == min(split))], keyby = .(node, var, side)] ## Get the coords data frame if (flipaxes) vars = rev(vars) part_coords = part_dt[, `:=`(split = as.double(split))][ , `:=`(xvar = var == ..vars[1], yvar = var == ..vars[2])][ , `:=`(xmin = ifelse(xvar, ifelse(grepl(">", side), split, NA), NA), xmax = ifelse(xvar, ifelse(grepl("<", side), split, NA), NA), ymin = ifelse(yvar, ifelse(grepl(">", side), split, NA), NA), ymax = ifelse(yvar, ifelse(grepl("<", side), split, NA), NA))][ , .(xmin = mean(xmin, na.rm = TRUE), xmax = mean(xmax, na.rm = TRUE), ymin = mean(ymin, na.rm = TRUE), ymax = mean(ymax, na.rm = TRUE)), keyby = .(node, yvals, path)][ , `:=`(xmin = ifelse(is.na(xmin), -Inf, xmin), xmax = ifelse(is.na(xmax), Inf, xmax), ymin = ifelse(is.na(ymin), -Inf, ymin), ymax = ifelse(is.na(ymax), Inf, ymax))] if (y_factored) { part_coords$yvals = as.factor(part_coords$yvals) } colnames(part_coords) = gsub("yvals", y_var, colnames(part_coords)) if (!keep_as_dt) { part_coords = as.data.frame(part_coords) } return(part_coords) }
3c4940461a5b374c69153b7d3239a34efdca5e5f
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/BiBitR/examples/bibit.Rd.R
6267d207ab760705859d479076d3e9a91122ea01
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
472
r
bibit.Rd.R
library(BiBitR) ### Name: bibit ### Title: The BiBit Algorithm ### Aliases: bibit ### ** Examples ## Not run: ##D data <- matrix(sample(c(0,1),100*100,replace=TRUE,prob=c(0.9,0.1)),nrow=100,ncol=100) ##D data[1:10,1:10] <- 1 # BC1 ##D data[11:20,11:20] <- 1 # BC2 ##D data[21:30,21:30] <- 1 # BC3 ##D data <- data[sample(1:nrow(data),nrow(data)),sample(1:ncol(data),ncol(data))] ##D result <- bibit(data,minr=5,minc=5) ##D result ##D MaxBC(result) ## End(Not run)
027fce138c288e707e22c4cc0b8b96ba24a4d7c9
3b24d08de0d126d7906c23d6574607ba0dc8281c
/tallpalmettoandmoon.rd
9828a8c09c7401403150bccc97a5e7a997a85834
[]
no_license
htbrdd/rdworks-files
de6602f90dcc1933f13f4fc970d5ad5315146569
af17e751cf3a31b71de4c748fb1cf9d9032d16ef
refs/heads/master
2021-03-19T08:29:59.037313
2018-01-21T01:06:43
2018-01-21T01:06:43
82,507,071
0
0
null
null
null
null
ISO-8859-1
R
false
false
112,996
rd
tallpalmettoandmoon.rd
қúz‹‰Ò‰p‰‰‰‰‰‰‰‰‰‰p wwww³‰‰‰‰‰p‰‰߉‰ýýpى‰‰‰‰‰‰‰‰‰pY‰‰™‰‰ýýp‰ ‰ ‰‰‰‰‰‰‰‰‰‰p ‰B‰‰‰ùÐ9‰ÐI‰¯»Ð»‰ÐˉWw‰‰‰‰ÄI‰‹pۉwwww³‰‰‰‰‰p[‰‰‰߉‰ýýpi‰wwww³‰‰‰‰‰p뉉‰߉‰ýýÄ«‰p݉‰‰‰‰‰pÝ ‰‰‰‰‰p]‰‰‰‰‰‰p] ‰‰‰‰‰z ‰‰‰‰‰‰‰‰‰‰z‰‰z ‰ü‰‰ü ‰ü‹ £1•Iã©ü ‰‰‰‰‰‰‰‰‰‰ü‰‰‰‰ýÿü‰‰‰‰‰‰‰‰‰‰ü‰ü ‰ ‰ ‰‰‰‰ýÿä‰pé‰p‰‰‰‰‰‰‰‰‰‰p‰‰‰‰ýÿp+‰‰‰‰‰‰‰‰‰‰p­‰p‰ ‰ ‰‰‰‰ýÿÄ Ä‹‰Ä ¹Ä ™Ä B‹‰‰ùÐ Ð)¯»Ð‹ЫWwЛ‰‰‰‰‰Ð‰‰‰‰‰Ä ‚‰‰‰é‰ᤠ?÷ã‰í¤÷A‰3‰í¤ Ó÷ã‰í¤ó¯ Ÿ‰í¤O _‰í¤ýÅ Ÿ‰í¤½‰s‰í¤{Ÿ y‰í¤‡)÷ã‰í¤yW û‰í¤_÷ã‰í¤ù!‹Å‰í¤_‰õ‰í¤çû Ÿ‰í¤‡‰©‰í¤ç‘ Ÿ‰í¤á‰)‰í¤eµ y‰í¤÷ã‰í¤åi y‰í¤s÷ã‰í¤å  û‰í¤]wM‰í¤c# Ÿ‰í¤]‰)‰í¤ãG y‰í¤9÷ã‰í¤ãG y‰í¤Ÿ…÷ã‰í¤aý û‰í¤Ÿ…wM‰í¤aý Ÿ‰í¤Ÿ…‰©‰í¤a Ÿ‰í¤ŸowM‰í¤a û‰í¤Ÿo÷ã‰í¤a y‰í¤ËwM‰í¤ok Ÿ‰í¤‘wωí¤oë Ÿ‰í¤‘—wM‰í¤oë y‰í¤‘—÷ã‰í¤oë Ÿ‰í¤‘q÷ã‰í¤o û‰í¤‘qwM‰í¤o Ÿ‰í¤‘qwM‰í¤ï¥ Ÿ‰í¤ÝwM‰í¤ï¥ y‰í¤Ý÷ã‰í¤ï¥ û‰í¤ÝwM‰í¤mY Ÿ‰í¤“'wM‰í¤mY Ÿ‰í¤“'wM‰í¤íÿ y‰í¤÷ã‰í¤í} y‰í¤“¹÷ã‰í¤mÙ Ÿ‰í¤wM‰í¤í} Ÿ‰í¤wM‰í¤í“ û‰í¤ƒ÷ã‰í¤íÿ Ÿ‰í¤m÷ã‰í¤í y‰í¤ïwM‰í¤k7 Ÿ‰í¤•I÷ã‰í¤í“ Ÿ‰í¤ïwM‰í¤k7 y‰í¤•I÷ã‰í¤k7 Ÿ‰í¤•I÷ã‰í¤k7 û‰í¤•I÷ã‰í¤k7 Ÿ‰í¤•IwM‰í¤k7 Ÿ‰í¤•÷ã‰í¤ëí y‰í¤•I÷ã‰í¤k7 Ÿ‰í¤•÷ã‰í¤ëí Ÿ‰í¤•wM‰í¤ëí Ÿ‰í¤•÷ã‰í¤ëí Ÿ‰í¤ñ÷ã‰í¤ë y‰í¤÷ã‰í¤ëk Ÿ‰í¤ñ÷ã‰í¤ëí Ÿ‰í¤•÷ã‰í¤ëí Ÿ‰í¤÷ã‰í¤ë Ÿ‰í¤÷ã‰í¤ë y‰í¤÷ã‰í¤ë Ÿ‰í¤÷ã‰í¤ë Ÿ‰í¤wωí¤ë Ÿ‰í¤ñ÷ã‰í¤ë Ÿ‰í¤—[÷ã‰í¤i% Ÿ‰í¤—[÷ã‰í¤i% Ÿ‰í¤—[wM‰í¤i% Ÿ‰í¤—[÷ã‰í¤i% Ÿ‰í¤—[÷ã‰í¤i% Ÿ‰í¤—[wM‰í¤éÛ Ÿ‰í¤§÷ã‰í¤éÛ Ÿ‰í¤§÷ã‰í¤éÛ û‰í¤—Û÷ã‰í¤i§ Ÿ‰í¤%÷ã‰í¤é[ Ÿ‰í¤%÷ã‰í¤é[ Ÿ‰í¤%÷ã‰í¤é[ Ÿ‰í¤%÷ã‰í¤é[ Ÿ‰í¤©÷ã‰í¤W Ÿ‰í¤©÷ã‰í¤W Ÿ‰í¤©÷ã‰í¤W Ÿ‰í¤©÷ã‰í¤W Ÿ‰í¤©í÷ã‰í¤W• Ÿ‰í¤©í÷ã‰í¤W• Ÿ‰í¤©í÷ã‰í¤×É Ÿ‰í¤)É÷ã‰í¤×É Ÿ‰í¤)É÷ã‰í¤×É Ÿ‰í¤)É÷ã‰í¤×É Ÿ‰í¤)É÷ã‰í¤×É Ÿ‰í¤)É÷ã‰í¤×É Ÿ‰í¤)É÷ã‰í¤×ɉ3‰í¤«ÿ÷ã‰í¤Uƒ Ÿ‰í¤«ÿ÷‰í¤Um Ÿ‰í¤«÷ã‰í¤Um Ÿ‰í¤«÷ã‰í¤Um‰3‰í¤«ÿ÷ã‰í¤Uƒ Ÿ‰í¤«ÿ÷ã‰í¤Uƒ Ÿ‰í¤«ÿwM‰í¤SÝ Ÿ‰í¤­¥÷ã‰í¤SÝ Ÿ‰í¤+Y÷ã‰í¤Õ'‰³‰í¤­%÷ã‰í¤S[ Ÿ‰í¤­%÷ã‰í¤S[ Ÿ‰í¤­%÷ã‰í¤Óñ Ÿ‰í¤-÷ã‰í¤Óñ Ÿ‰í¤-‚‰‰‹1³‰‰ ©ñ¤ ?÷ã‰í¤÷A‚‰‰ )‰‰ )Õ¤Óñ‰3‰í¤-k‚‰‰‹±W‰‰ «É¤‹ewM‰í¤u‚‰‰ )‰‰ +­¤Ó‚‰‰‰…‰‰ +­¤÷à Ÿ‰í¤ ¿‚‰‰‰õʼn‰ ­¤-k‚‰‰‹1³‰‰ ­¤‹e÷ã‰í¤u‚‰‰ )‰‰ ­å¤Q˂‰‰‰…ó‰‰ ­å¤u Ÿ‰í¤‹e‚‰‰‰sy‰‰ -Ù¤¯·‚‰‰‹1³‰‰ -Ù¤‹ewωí¤õ?‚‰‰ )‰‰ ¯½¤Q˂‰‰‰]‰‰ ¯½¤õ± Ÿ‰í¤ Á‚‰‰‰sŸ‰‰ /‘¤¯·‚‰‰‹1³‰‰ /‘¤­÷ã‰í¤sՂ‰‰ ©·‰‰ /õ¤Q˂‰‰‰]‰‰ /õ¤õ± û‰í¤ Á‚‰‰‰sŸ‰‰ ¡é¤¯·‚‰‰‹1³‰‰ ¡é¤­÷ã‰í¤sՂ‰‰ ©·‰‰ !ͤQ˂‰‰‰‡9‰‰ !ͤõ± Ÿ‰í¤+‚‰‰‰ó³‰‰ £¡¤/“‚‰‰‹³Ÿ‰‰ £¡¤ ÁwM‰í¤sU„ýŤw-‚‰‰ ©·‰‰ #…¤Ñ‰‰…‰‰ #…¤sU Ÿ‰í¤+‚‰‰‰ó³‰‰ #ù¤/“‚‰‰‹§‰‰ #ù¤‰S„½¤+÷ã‰í¤sU„}/¤ug‚‰‰ ©·‰‰ ¥Ý¤Ñ‚‰‰‰o‰‰ ¥Ý¤s‹ Ÿ‰í¤÷‚‰‰‰qW‰‰ %±¤/}‚‰‰‹%¥‰‰ %±¤‹„ƒQ¤+wM‰í¤s‹„ÿ ¤u‚‰‰ 뉉 §•¤Ñ…‚‰‰‰™Ë‰‰ §•¤ó/ y‰í¤÷‚‰‰‰q‰‰ '‰¤/ý‚‰‰‹§‰‰ '‰¤‹e„«¤÷÷ã‰í¤s‹„ÿפu‚‰‰ 뉉 'í¤Ñ…‚‰‰‰™Ë‰‰ 'í¤s‹ Ÿ‰í¤ ӂ‰‰‰ñ!‰‰ ¹Á¤¡G‚‰‰‹§‰‰ ¹Á¤‹e„«¤÷wM‰í¤ó/„1¤õ±‚‰‰ 뉉 9¥¤ß]‚‰‰‰q‰‰ 9¥¤qC Ÿ‰í¤=‚‰‰‰E‰‰ »™¤!#‚‰‰‹§‰‰ »™¤ Á„O¤ Q÷ã‰í¤s‹„1¤õ±‚‰‰ 뉉 »ý¤ß]‚‰‰‰›Ý‰‰ »ý¤ñù Ÿ‰í¤™‚‰‰‰E‰‰ ;Ѥ!#‚‰‰‹§ë‰‰ ;Ѥ Á„O¤÷÷ã‰í¤s‹„1¤õ±‚‰‰ ‰‰ ½µ¤ß]‚‰‰‰›Ý‰‰ ½µ¤ñù y‰í¤™‚‰‰‰ÿû‰‰ =©¤!#‚‰‰‹§ë‰‰ =©¤+„å¤ Ó÷ã‰í¤ó¯„ñ¤sU‚‰‰ ‰‰ ¿¤ß]‚‰‰‰'‰‰ ¿¤ñù Ÿ‰í¤傉‰‰ÿ‰‰ ¿á¤£‚‰‰‹ç‰‰ ¿á¤+„+Y¤+„å¤ Ó÷ã‰í¤ó¯„ñ¤sU„Õ'¤sU‚‰‰ ‰‰ ?Ť]󂉉‰‰‰ ?Ťñ Ÿ‰í¤O‚‰‰‰}µ‰‰ ±¹¤£i„—[¤‹›‚‰‰‹ç‰‰ ±¹¤ Q„«ÿ¤­„™¤ Ó÷ã‰í¤ó¯„ñù¤sՄ×I¤qC‚‰‰ I«‰‰ 1¤õ±„éÛ¤]‚‰‰‰I‰‰ 1¤ÿ× Ÿ‰í¤«‚‰‰‰ýi‰‰ 1ñ¤£i„§¤ Á‚‰‰‹A‰‰ 1ñ¤=„)7¤­„™¤ Ó÷ã‰í¤ó¯„qC¤s „W¤ñù‚‰‰ Iu‰‰ ³Õ¤õ±„W¤ÝK‚‰‰‰Ÿ•‰‰ ³Õ¤ÿ× Ÿ‰í¤u‚‰‰‰ý ‰‰ 3ɤ#5„©í¤ Á‚‰‰‹w‰‰ 3ɤ™„©ƒ¤+„=¤=÷ã‰í¤qC„qC¤sU„Wÿ¤ñù‚‰‰ Ëщ‰ µ­¤õ±„W•¤ÝK‚‰‰‰Ÿ‰‰ µ­¤ÿ  Ÿ‰í¤ƒQ‚‰‰‰{£‰‰ 5¤¥‘„©í¤­‚‰‰‹Ӊ‰ 5¤儗[¤÷„ Ó¤=÷ã‰í¤qC„ó¯¤s‹„i%¤ñ‚‰‰ K½‰‰ 5å¤sՄW•¤[ႉ‰‰[‰‰ 5å¤}/ y‰í¤ƒQ‚‰‰‰ûG‰‰ ·Ù¤¥‘„«¤+‚‰‰‹ƒ¿‰‰ ·Ù¤c„ñ¤÷„ Ó¤=„9Ÿ¤‹›wM‰í¤õ±„GM¤qC„s ¤ó/„ëk¤ñù‚‰‰ Í㉉ 7½¤sU„Uƒ¤[‚‰‰‰‰‰ 7½¤ýÅ Ÿ‰í¤½‚‰‰‰yý‰‰ ɑ¤¥{„«ÿ¤+‚‰‰‹剉 ɑ¤™„¤ Q„w¤=„¹3¤ Á÷ã‰í¤sU„Ù©¤qC„s ¤s‹„k7¤ñ‚‰‰ Í㉉ Éõ¤sU„Uƒ¤Û9‚‰‰‰퉉 Éõ¤{y Ÿ‰í¤…‚‰‰‰y‘‰‰ Ié¤%G„«ÿ¤+‚‰‰‹…O‰‰ Ié¤儕I¤÷„w¤=„'餍+÷ã‰í¤sU„ó¯¤w-„_¹¤ñù„s ¤s‹„퓤ñ„ËÓ¤w-„1¤sU„SݤÛ9‚‰‰‰“ɉ‰ Ëͤ{y Ÿ‰í¤…ら‰‰g뉉 K¡¤§£„-¤­„™¤‹e„µÃ¤c„M¤‰Õ„…㤠ӄ+¤™„/}¤ ¿„w¤÷÷ã‰í¤s‹„s ¤÷ÄѤñù„sU¤ó¯„{Ÿ¤w­„û3¤ñ„K¿¤u„ñù¤sՄÓq¤Y߂‰‰‰‰‰ ͅ¤{Ÿ Ÿ‰í¤M‚‰‰‰g‰‰ Íù¤§!„-k¤w„=¤ Á„™»¤ ?„£i¤c„…㤋›„½¤ ӄ÷¤=„/¤‹›„w¤÷÷ã‰í¤s‹„s ¤uç„Ño¤qC„s‹¤ó¯„ýŤuç„{Ÿ¤ñ„]¤÷A„gǤõ±„qC¤s „Ó¤Y_‚‰‰‰ÿ‰‰ Mݤû3 y‰í¤M‚‰‰‰ç¥‰‰ ϱ¤'„¯·¤­„™¤ Á„_¤‹e„£¤O„;¤ A„u¤ ӄ+¤™„/¤‹›„+¤ Ó÷ã‰í¤qC„õ±¤u„QˤqC„sU¤ó¯„ÿ ¤õ±„ýŤ1„]ó¤u„ù!¤sU„qC¤s „Qˤ٩‚‰‰‰¥‰‰ O•¤yé Ÿ‰í¤‡©‚‰‰‰eى‰ Á‰¤'鄯·¤w„=¤+„_¤‹e„£¤O„½¤ Á„u¤ ӄ+¤=„¯·¤‹e„ Á¤=÷‰í¤ó¯„sU¤uç„Qˤñù„õ±¤ó¯„ÿפsU„}/¤³„]q¤õ±„y ¤s „ó¯¤s‹„Ѥ٩‚‰‰‰—k‰‰ Áí¤y Ÿ‰í¤_‚‰‰‰c7‰‰ AÁ¤9Ÿ„/}¤÷„=¤w„‡©¤­„£¤O„ƒÑ¤+„O¤ ӄ Á¤™„-k¤‹e„ Á¤=÷ã‰í¤qC„õ±¤u„Ó¤ñù„õ±¤ó¯„1¤sU„}¡¤1„]ó¤sՄyé¤s „qC¤s‹„ѤÇら‰‰·‰‰ Ã¥¤ù! Ÿ‰í¤™»‚‰‰‰ãk‰‰ C™¤9„!#¤÷„ Ó¤ ӄ‡)¤+„!#¤«„u¤w„c¤ ӄ Á¤™„¤‰S„u¤‹e„ Á¤=÷ã‰í¤qC„õ±¤u„ÿ ¤w-„í}¤ñù„õ±¤ó¯„ñ¤s „ÿ ¤ÿׄß]¤sU„yW¤ó¯„ó¯¤s‹„ß]¤Ça‚‰‰‰©‰‰ Cý¤gÇ y‰í¤™;‚‰‰‰ã‰‰ ÅѤ9û„!#¤ ӄ÷¤ ӄ‡)¤÷„!#¤«„u¤­„c¤=„‹›¤Ï„ݤ‹›„O¤‹e„ Á¤™÷ã‰í¤ñ„u¤u„1¤u„o¤³„uç¤qC„ñù¤s „ÿפÿׄß]¤s‹„yW¤ó¯„ó/¤ó¯„ß]¤Åт‰‰‰«#‰‰ Eµ¤ç Ÿ‰í¤a‚‰‰‰áۉ‰ Ç©¤;¡„!#¤ ӄ Q¤ ӄ‡)¤÷„!#¤«„«¤w„™¤=„‹›¤Ï„‘q¤‹e„O¤‹e„‹e¤c÷‰í¤ñù„u¤õ±„ñ¤õ±„oë¤ç{„qC¤s „ÿפÿW„ß]¤ó¯„û³¤qC„s‹¤ó¯„]¤Åт‰‰‰+‰‰ G¤ç‘ Ÿ‰í¤ႉ‰‰o‰‰ Gá¤;¡„£i¤ ӄ÷¤=„Ϥ ӄ!#¤)„«¤w„=¤„‘—¤ Á„å¤ Á„‹e¤™÷ã‰í¤ñ„u¤õ±„ñù¤sU„ýŤuç„ÿ ¤gDŽñù¤s „³¤ÿׄ]ó¤ó¯„{Ÿ¤ñg„sU¤qC„]¤C)‚‰‰‰­5‰‰ ÙŤåi y‰í¤ó„·Õ¤½×„¥‘¤ ӄ+¤„M¤ ӄ£¤«„c¤ ӄ=¤™»„«¤‹e„½¤ Á„c¤ Á„‹e¤å÷ã‰í¤ñ„u¤õ±„ñ¤õ±„ýŤu„ÿפgDŽqC¤ó¯„ñ¤ÿׄ]ó¤ó¯„û3¤ñg„sU¤ó¯„[á¤C«„I­¤å Ÿ‰í¤]„µÃ¤¿„¥‘¤=„ Á¤儅㤏=„!#¤u„å¤ Q„ Ó¤™»„«¤‹e„ƒQ¤+„™¤+„‹›¤O÷‰í¤1„uç¤õ±„ñù¤s „ÿ ¤õ±„1¤gDŽó¯¤ó/„ñg¤ÿ „ë¤÷A„ÿ ¤ñù„{Ÿ¤ñù„sÕ¤ó/„Û9¤A™„Ï+¤ãG Ÿ‰í¤9„1U¤¿ù„%G¤ Q„­¤™„…㤙„u¤ ?„¤u„¤ Q„ Ó¤™»„O¤ Á„u¤w„™¤ Á„‹›¤O¹‰í¤u„ó/¤1„uç¤sU„ñù¤s „ÿפsU„ñ¤gDŽs‹¤qC„qC¤ÿ‹„i§¤÷A„ÿפñ„{y¤ñ„õ±¤qC„Û9¤Á=„OQ¤aý Ÿ‰í¤Ÿ…„±/¤?C„%G¤=„ Á¤c„…¤c„«¤ ?„—Û¤÷„=¤=„÷¤™»„复+„«¤w„™¤+„‹›¤O„ Q¤‹e÷ã‰í¤s‹„õ±¤ÿׄuç¤õ±„ñù¤ó¯„1¤s‹„ñg¤ç{„sU¤qC„ó¯¤ÿ „i%¤uç„ÿפñù„{Ÿ¤ñ„u¤ñù„YߤOQ„ÃϤa Ÿ‰í¤‘—„»E¤1U„' ¤=„‹e¤Ï„…¤儁Ϥ‹›„—[¤ƒÑ„ Ó¤=„ Á¤a„=¤+„O¤=„=¤+„ ¿¤«„‹e¤ Ó÷ã‰í¤ó¯„u¤ÿׄ÷äsU„qC¤qC„1¤sU„qC¤ç„õ±¤qC„ó¯¤}¡„i%¤u焳¤ñ„{y¤³„u¤qC„Ùs¤Ï+„E;¤oë û‰í¤݄'¤³1„'餙„‹›¤Ï„…¤O„c¤‹›„§¤ƒÑ„w¤=„‹e¤›Í„ Q¤w„O¤ Q„=¤w„ ¿¤™;÷ã‰í¤gE„÷äs „qC¤ó/„1¤s „ó/¤eµ„u¤qC„s ¤}¡„éÛ¤uç„ñ¤1„{y¤³„uç¤ñù„Ù©¤MO„Ùõ¤ï¥ Ÿ‰í¤ƒ„#5¤µÃ„'餙„‹›¤«„+¤ ¿„ Ó¤O„c¤‹e„§¤ƒÑ„­¤÷¤ ӄc¤=„=¤w„‰S¤™»÷‰í¤aý„qC¤qC„ñù¤qC„sU¤í“„õ±¤}/„W¤u„ñù¤ÿׄs‹¤uç„sU¤ÿׄ÷A¤ñù„Çã¤Ëӄ_¹¤í Ÿ‰í¤m„¡Ù¤5¯„9Ÿ¤™„ ?¤«„+¤‹›„÷¤«„™¤‹e„©¤ƒQ„ Á¤+¤=„™¤=„=¤Ÿ…÷‰í¤aý„ó¯¤ñù„ñù¤qC„õ±¤í“„u¤ýńW¤u„ñù¤ÿׄsU¤u„õ±¤eµ„LJ¤I-„S[¤ëí Ÿ‰í¤•„­%¤·S„9û¤›Í„ Á¤‹e„+¤«„™¤‹e„©¤½„‹e¤ï„ Á¤=„™¤™„ Ó¤Ÿ…÷‰í¤ãG„qC¤ñù„qC¤ñg„u¤í}„õ±¤}¡„Wÿ¤sU„ó¯¤ÿ „õ±¤u„sU¤eµ„LJ¢‰‰‰Ýñ‰‰ ×݄W¤éÛ y‰í¤©í„¢‰‰ Ÿ©‰‰ W±„»E¤„ Á¤ Á„‹e¤u„ Ó¤+„©ƒ¤;„‹e¤„‹¤å„ Ó¤c„ Ó¤¹÷ã‰í¤ãلó¯¤ñ„ó¯¤ñ„ug¤í}„u¤ýE„Wÿ¤sU„ó¯¤ÿ „u¤õ±„õ±¤åi„E;¢‰‰‰Û‰‰ 镄í}¤W• Ÿ‰í¤+Y„ß¢‰‰ M‰‰ i‰„½ ¤›Í„ Á¤ Á„‹e¤ƒQ„ Q¤­„©¤;„‹›¤S¤c„ Ó¤O„ Ó¤ó÷‰í¤å„ó¯¤1„s ¤S݄÷äýńW•¤sՄó/¤}/„ug¤sՄu¤åi„Cu¢‰‰‰A͉‰ ií„ýE¤Ñï Ÿ‰í¤/“„;¢‰‰ M‰‰ ëÁ„½ ¤„‹e¤­„‹¤ƒQ„ Q¤­„©í¤½„ ¿¤­¥„w¤O„ Ó¤ó÷‰í¤å „s‹¤ÿׄs ¤GM„W•¤s „s‹¤}/„ug¤sՄu¤åi„C«¢‰‰‰[‰‰ k¥ Ÿ‰í¢‰‰ ã‰‰ 홄½×¤„‹e¤­„‹¤ƒQ„÷¤w„©í¤¹3„w¤«„÷¤s÷‡‰í¤åi„ó/¤1„s ¤Ù©„W•¤s „s‹¤ýń÷äs „ug¤å „ÃÏ¢‰‰‰‘§‰‰ íý Ÿ‰í¢‰‰ ‡‰‰ mф£ë¤‹›„Ï¤s„‹¤w„ ¿¤½„+¤ ӄ©í¤'鄍­¤Ï„ Ó¤÷ã‰í¤åi„󯤳„sÕ¤Ù©„W•¤ó¯„sU¤ýń÷äs „ug¤å „³¤uç„]—¢‰‰‰‘§‰‰ ïµ y‰í¢‰‰ ;‰‰ o©„#5¤‹e„Ï¤©„‹¤ ӄ‰S¤½„+¤ ӄ)7¤§£„+¤«„w¤„‘q¤‹e÷ã‰í¤u„o¤åi„s ¤ÿׄsU¤Y߄×I¤ó¯„sU¤ýńw-¤ó¯„ug¤å鄳¤u„ÝK¢‰‰‰‰‰ ፠y‰í¢‰‰ ›Ñ‰‰ áᄥ{¤ Á„™¤s„ ¿¤ ӄ‰S¤½„+¤ ӄc¤ ?„‘¤§£„ Á¤u„w¤›Í„‘¤­÷ã‰í¤s „oë¤e5„s ¤ÿ „õ±¤[„oë¤u„ñù¤ó¯„õ±¤Ñ„qC¤õ±„Y_¢‰‰‰“ɉ‰ aÅ Ÿ‰í¢‰‰ u‰‰ 㹄§!¤ Á„=¤/}„ Á¤ ӄ™¤‹e„‘—¤¥{„ Á¤u„w¤›K„‘—¤w÷‡‰í¤s‹„oë¤eµ„s‹¤ÿ „u¤[á„o¤u„qC¤qC„õ±¤Ñ„ó/¤õ±„Ùs¢‰‰‰“ɉ‰ c y‰í¢‰‰ «‰‰ cñ„'餍+„÷¤/}„ Á¤=„=¤‹e„‘q¤#5„‹å¤ƒQ„÷¤a„…¤‹›„)¤ Ó÷ã‰í¤ó¯„ÿW¤uç„{y¤ç„s‹¤}/„u¤ÝK„o¤u„qC¤qC„õ±¤Ñ„s‹¤sU„Ù©¢‰‰‰‰‰ åÕ y‰í¢‰‰ ™Ï‰‰ eɄ9¤÷„ Á¤/}„ Á¤=„=¤‹e„ݤ£ë„‹¤ƒQ„÷¤a„;¤ A„Ï¤ ӄ)ɤ…‡÷ã‰í¤{û„×ɤ󯄳¤õ?„ýE¤ç„s‹¤}/„ug¤]—„磻u„qC¤qC„õ±¤Ñ„õ±¤s‹„Ça¢‰‰‰ÿ‰‰ ç­ y‰í¢‰‰ c‰‰ g„¤‹e„•I¤w„‹å¤/}„‹e¤™„ Ó¤ Á„ݤ£ë„ ¿¤½„+¤a„;¤­„™¤=„•I¤Ë‰)‰í¤k7„磻qC„ñg¤sU„ýŤ瑄sÕ¤A™„y¤ug„1¤õ±„ó¯¤ñù„u¤Ñ„ug¤ó¯„퓤õ±„叢‰‰‰¥‰‰ gå Ÿ‰í¢‰‰ ™‰‰ ùلó¤ Á„ï¤ Ó„‹¤/}„‹e¤™„ Ó¤ Á„O¤‹„‡õ¤¿ù„­¤á„½¤+„¤=„ݤ•IwM‰í¤W„a¤ñg„qC¤s‹„}/¤e5„sÕ¤Ãτù!¤ug„ñ¤sU„ó¯¤ñù„u¤Ñ„÷äó¯„íÿ¤s „叢‰‰‰—‰‰ y½ Ÿ‰í¢‰‰ ™‰‰ û‘„ó¤w„ƒ¤ ӄ ¿¤/}„‹e¤™„ Ó¤+„夋„_¤=³„­¤›K„ƒQ¤÷„=¤„Ÿo¤©÷ã‰í¤Um„ãG¤ñ„ó¯¤ó¯„ÿ ¤ç‘„sÕ¤Ãτy ¤u„ñ¤sU„s ¤³„uç¤Ùs„mY¤s „ãÙ¢‰‰‰—k‰‰ ûõ Ÿ‰í¢‰‰ Q‰‰ {鄟ᤍ÷„“'¤§£„‹›¤Ï„w¤+„夋e„_¤½ „+¤a„u¤=„w¤c„ó¤­%÷ã‰í¤S[„古ñ„s ¤qC„ÿ ¤ç„sU¤Cu„ù!¤u„ñ¤sU„s ¤³„uç¤Y߄mY¤s‹„a‘¢‰‰‰—k‰‰ ýÍ y‰í¢‰‰ …÷‰‰ }¡„K¤=„‘ñ¤§£„‹›¤Ï„w¤+„夋e„_¤½ „ Á¤a„u¤™„+¤儛ͤ­¥÷ã‰í¤S݄eµ¤ñ„sU¤ñù„ÿ ¤ç„õ±¤Cu„ù!¤u„ñ¤sU„s ¤³„uç¤Y߄o¤qC„á5¢‰‰‰·‰‰ ÿ… y‰í¢‰‰ …+‰‰ ÿù„‘q¤™„ˤ§£„ ¿¤«„w¤+„夋e„_¤;¡„ Á¤á„÷¤™„ Á¤O„ᤕ„‰Õ¤‹Ӊí¤mY„çû¤³„u¤ñ„ÿ ¤ç‘„õ±¤Ç‡„ù!¤õ±„ñ¤s‹„sÕ¤ÿׄ÷äۻ„ó¯¤w-„çû¤ñù„mY¢‰‰‰©}‰‰ Ý Ÿ‰í¢‰‰ Á‰‰ ñ±„“'¤™„‡¤‰S„ Ó¤%DŽ ¿¤«„­¤÷„å¤ Á„_¤9û„ Á¤á„u¤儋e¤Ï„‡¤“'su‰í¤o„gǤ叄ÿפç„u¤Ç‡„ù!¤sU„qC¤s „s ¤Oӄs ¤÷ÄgE¤ñ„íÿ¢‰‰‰)ى‰ q•‹Å‰í¢‰‰ ƒ‰‰ 󉄕I¤O„‡õ¤‹„­¤±¯„w¤w„=¤+„_¤9Ÿ„ Á¤‡„«¤ó„_¤‘—÷ã‰í¤oë„ù!¤å„ÿפçû„õ±¤Çã„ù!¤sU„qC¤s „s ¤OӄsÕ¤ug„y¤1„k7¢‰‰‰+‰‰ óí y‰í¢‰‰ S‰‰ sÁ„—Û¤«„‡)¤‹›„ Á¤±¯„w¤w„=¤+„™»¤'é„ Á¤a„Ï¤s„‡s¤K÷ã‰í¤á5„y ¤å „³¤ç„õ±¤Ù©„gǤsU„qC¤s „s ¤Oӄõ±¤uç„yW¤ÿׄi§¢‰‰‰+i‰‰ õ¥ û‰í¢‰‰ ‰‰‰ u™„©¤u„…㤋e„‹e¤±¯„w¤w„=¤+„™»¤' „ Á¤a„Ï¤s„‡©¤Ÿou½‰í¤a„û³¤c#„ñ¤ç‘„u¤Ùs„gǤsU„qC¤s „s ¤Oӄ÷A¤õ±„{y¤ýE„W¢‰‰‰-‘‰‰ uý Ÿ‰í¢‰‰ ­‰‰ ÷ф©m¤;„…¤ Á„ ?¤±¯„w¤w„=¤+„™»¤' „‹e¤á„夝]„ϤŸou;‰í¤aý„{Ÿ¤ãلñ¤ç‘„u¤Û9„ç{¤s „s‹¤ó¯„s ¤ËS„}¡¤{y„Um¢‰‰‰-{‰‰ wµ Ÿ‰í¢‰‰ A‰‰‹‰©„«¤…„ƒÑ¤5-„w¤ ӄ÷¤w„¤%G„‹e¤á„夹„…㤟…õQ‰í¤ãG„{y¤ãG„ñ¤ç„u¤[„ç{¤s „s‹¤ó¯„sU¤Iw„ÿפ{Ÿ„Õ'¢‰‰‰¯Ç‰‰‹ ‹E‰í¢‰‰ ?‰‰‹ á„-k¤‡)„c¤5¯„+¤ ӄ÷¤w„¤¥{„‹›¤a„c¤9„;¤9÷ã‰í¤ãG„ýE¤ãG„ñ¤ç„uç¤[„ç{¤s „s‹¤ó¯„sU¤Ëӄñ¤yW„Ó¢‰‰‰¡ ‰‰‹‹Å y‰í¢‰‰ Չ‰‹ ¹„/“¤‡s„=¤5¯„+¤ ӄ÷¤w„ Ó¤‹„«¤¥{„‰Õ¤„™¤9„ƒÑ¤9÷ã‰í¤ãG„}¡¤ãG„ñù¤åi„w­¤[„ÿפug„ó¯¤s „s‹¤ó¯„sU¤ËӄqC¤y „Ñ‰‰¡é‰‰‹ y‰í¢‰‰ ‰‰‹ñ„¡Ù¤™»„+¤5¯„+¤=„+¤w„÷¤ A„Ï¢‰‰‹ëm‰‰‹ñ„¤Ÿ…„u¤]u½‰í¤c£„³¤aý„ñ¤I­„ÿW¤õ?„s‹¤ó¯„õ±¤qC„sU¤Ëӄ÷äeµ„]¢‰‰‰£û‰‰‹ Õ Ÿ‰í¢‰‰ C‰‰‹É„£i¤›Í„ ¿¤5¯„+¤=„ Á¤ ӄ÷¤ A„)¤·Õ„够…„Ï¤ß÷‰í¤c£„ñ¤a‘„ñ¤Iw„³¤sՄs‹¤ó¯„õ±¤qC„sU¢‰‰ #y‰‰‹­„[ᢉ‰‰#E‰‰‹­‹Å‰í¢‰‰‰w1‰‰‹„§!¢‰‰ uʼn‰‹„+¤=„ Á¤ ӄ+¤w„Ï¤· „™¤‘—„ Ó¤]÷ã‰í¤c#„ó¯¤oë„ñù¤Iw„³¤s „sU¤ó¯„õ±¤qC„sU¢‰‰ ¥Å‰‰‹å„Y_¢‰‰‰% ‰‰‹å û‰í¢‰‰‰÷׉‰‹ل'颉‰ uʼn‰‹ل+¤™„‹e¤ ӄ+¤w„Ï¤5¯„™¤݄+¤s÷ã‰í¤å „sU¤ï¥„ñù¤Ëӄ³¤s „sU¤ó¯„u¤ñù„sU¢‰‰ %¡‰‰‹ƒ½„Ù©¢‰‰‰%W‰‰‹ƒ½‹Å‰í¢‰‰‰u/‰‰‹‘„9û¢‰‰ uʼn‰‹‘„+¤™„‹e¤ ӄ+¤w„Ï¤µC„™¤9Ÿ÷‰í¤Çã„qC¤K=„³¤s „sU¤ó¯„u¤ñù„sU¢‰‰ §×‰‰‹õ„ÅÑ¢‰‰‰¹C‰‰‹õ Ÿ‰í¢‰‰‰õʼn‰‹…é„;¡¢‰‰ uʼn‰‹…鄍+¤™„‹e¤ ӄ+¤w„Ï¤µC„=¤9Ÿu½‰í¤G̈́qC¤Í„³¤s „sU¤qC„uç¤ñù„sU¢‰‰ ¹‰‰‹̈́Ac¢‰‰‰9/‰‰‹Í Ÿ‰í¢‰‰‰sŸ‰‰‹‡¡„¿¢‰‰ uʼn‰‹‡¡„+¤™„‹›¤=„+¤w„Ï¤3g„=¤¹µu;‰í¤Ù©„ó¯¤Í„ñ¤qC„õ±¤qC„÷A¤ñù„s‹¢‰‰ ¹ù‰‰‹…„OQ¢‰‰‰»U‰‰‹…‹E‰í¢‰‰‰ñ!‰‰‹ù„3¢‰‰ uʼn‰‹ù„÷¤™„ ?¤=„ Á¤=„c¤3g„w¤'é÷ã‰í¤Ù©„s ¤Í„ñ¤qC„õ±¤qC„÷A¤ñù„s‹¢‰‰ »¯‰‰‹™Ý„Íe¢‰‰‰½‰‰‹™Ý‹Å‰í¢‰‰‰ÿû‰‰‹±„5¯¢‰‰ uʼn‰‹±„÷¤™„‰Õ¤™„ Á¤=„O¤3„­¤'÷‰í¤Ù©„u¤Í„ñ¤ñù„u¤y„s‹¢‰‰ ;U‰‰‹›•„ÉA¤ËÓ Ÿ‰í¤5¯„7?¢‰‰ uʼn‰‹‰„÷¤‡õ„‹e¤™„c¤3g„‹e¤'éu½‰í¢‰‰‹«‘‰‰‹í„ñ¤ñù„u¤y„s‹¢‰‰ =‰‰‹킉‰‰û‰‰‹í¤Ï÷ Ÿ‰í¤1‹‚‰‰ ¿!‰‰‹Á¢‰‰ uʼn‰‹Á„÷¤‡õ„‹e¤™„c¢‰‰ ™½‰‰‹Á÷‰í¢‰‰‹«ý‰‰‹¥„1¤ñ„u¤yW„s‹¢‰‰ =‰‰‹¥‚‰‰‰ù׉‰‹¥¤Ac /‰í¤9û‚‰‰ ¿!‰‰‹Ÿ™¢‰‰ uʼn‰‹Ÿ™„÷¤‡)„‹e¤c„c¢‰‰ ‡«‰‰‹Ÿ™÷ã‰í¢‰‰‹«‘‰‰‹Ÿý„ñ¤ñ„u¤yW„s‹¢‰‰ =‰‰‹Ÿý‚‰‰‰çʼn‰‹Ÿý¤Ç‡ ¡‰í¤%ǂ‰‰ =lj‰‹Ñ¢‰‰ õù‰‰‹ф Ó¤‡)„‹¤O„c¢‰‰ O‰‰‹Ñ÷ã‰í¢‰‰‹«‘‰‰‹‘µ„ñ¤1„ug¤yW„󯢉‰ ½±‰‰‹‘µ‚‰‰‰å³‰‰‹‘µ¤Û» ¡‰í¤!#‚‰‰ ½{‰‰‹©¢‰‰ õù‰‰‹©„ Ó¤‡)„‹¤O„c¢‰‰ …剉‹©¡‰í¤ñg„Õ'¢‰‰‹«‘‰‰‹“„ñ¤ÿׄ÷äû³„qC¢‰‰ 9C‰‰‹“‚‰‰‰oµ‰‰‹“¤Õ' Ÿ‰í¤+Y‚‰‰ »i‰‰‹“ᢉ‰ õù‰‰‹“ᄏ=¤τ ¿¤«„c¢‰‰ =‰‰‹“á„+Y¤K‰í¤åi„뢉‰‹«‘‰‰‹ńñù¤o„qC¢‰‰ '1‰‰‹ł‰‰‰íG‰‰‹Ťë Ÿ‰í¤ñ‚‰‰ ¹G‰‰‹•¹¢‰‰ õù‰‰‹•¹„=¤‘q„™¢‰‰ ƒÓ‰‰‹•¹„¤‹;‰í¤mلíÿ¢‰‰‹«‘‰‰‹„ñù¤o„qC¢‰‰ § ‰‰‹‚‰‰‰i‰‰‹¤aý¯‰í¤™‚‰‰ %‰‰‹ñ¢‰‰ õù‰‰‹ñ„=¤‘—„c¢‰‰ Á‰‰‹ñ„ݤ§÷ã‰í¤éۄ異‰‰‹«‘‰‰‹—Õ„ñ¤oë„qC¢‰‰ #y‰‰‹—Õ‚‰‰‰Q‰‰‹—Õ¤ñù‚‰‰ #Y‰‰‹É¢‰‰ õù‰‰‹Ʉ=¤‘—„c¢‰‰ e‰‰‹ɄK¤­¥‰©‰í¤_9„ãG¢‰‰‹«‘‰‰‹©­„ñ¤oë„qC¢‰‰ ¡!‰‰‹©­ Ÿ‰í¢‰‰ õù‰‰‹)„=¤‘—„c¢‰‰ ›‰‰‹)„9¤¡G‰)‰í¤[„åi¢‰‰‹)5‰‰‹)å„ñù¤oë„ñù¢‰‰ ¯{‰‰‹)å Ÿ‰í¢‰‰ õ‰‰‹«Ù„™¤‘—„™¢‰‰ ?‰‰‹«Ù„¤¥{wM‰í¤Ù©„碉‰‹)5‰‰‹+½„ñù¤oë„ñù¢‰‰ -µ‰‰‹+½wé‰í¢‰‰ õ‰‰‹­‘„™¤‘—„™¢‰‰ -‰‰‹­‘„¤»E÷ã‰í¤E;„ç{¢‰‰‹)5‰‰‹­õ„ñù¤oë„ñù¢‰‰ ­‰‰‹­õwW‰í¤s„_¤½×„™¤‘—„™¢‰‰ ɉ‹-é„_¤=³÷ã‰í¤Ãτù!¢‰‰‹)5‰‰‹¯Í„ñù¤oë„ñù¤C«„ù!¤å wé‰í¤„9¤»E„c¤‘—„=¢‰‰ ‹g‰‰‹/¡„‡©¤?Åñ½‰í¤w-„w-¤Çã„û³¢‰‰‹©ë‰‰‹¡…„ñù¤á5„ñ¤Ç‡„磻y Ÿ‰í¤‡õ„ݤ9û„c¤K„™¢‰‰ ‹‰‰‹¡ù„Ϥ9Ÿ„‰S¤‰SóO‰í¤GM„{y¢‰‰‹©ë‰‰‹!݄ñù¤á5„ñ¤Ça„k7¤yéwé‰í¤M„—Û¤¹µ„Ï¤K„=¢‰‰ ‰U‰‰‹£±„;¤9÷ã‰í¤Ça„ýE¢‰‰‹©‰‰‹#•„qC¤á5„³¤G̈́i§¤û3wW‰í¤…ã„©¤9„Ï¤K„=¤_„ ¿¢‰‰ ‰‹‰‰‹¥‰„ƒÑ¤»E÷ã‰í¤E;„}¡¢‰‰‹9‰‰‹¥í„÷äù!„qC¤á5„³¤Ça„W¤{Ÿ‰3‰í¤;„)ɤ9û„Ï¤K„=¤_„‹¢‰‰ ‰‹‰‰‹%Á„Ï¤?Å‹»‰í¤K¿„ñ¢‰‰‹9g‰‰‹§¥„u¤ù!„qC¤a‘„ÿפÅф×I¤}/ Ÿ‰í¤ƒQ„)7¤;¡„«¤Ÿá„=¤_„‹å¢‰‰‹w¯‰‰‹'™„夵É©‰í¤ÉA„ñù¢‰‰‹9g‰‰‹'ý„u¤ù!„qC¤a‘„ÿפA™„éÛ¤ÿ  Ÿ‰í¤u„§¤¿ù„«¤Ÿá„=¤_„‹å¢‰‰‹÷C‰‰‹¹Ñ„™¤7?wÏ‰í¢‰‰ ㉉‹9µ„ñù¢‰‰‹»C‰‰‹9µ„õ±¤gE„ó/¤a‘„ÿפÍe„ëí¤1 Ÿ‰í¤=„“'¢‰‰ óW‰‰‹»©„Ï¤Ÿá„ Q¤™;„ Á¢‰‰‹u‰‰‹»©„=¢‰‰ M牉‹»©÷ã‰í¢‰‰ ‹½‰‰‹;„qC¢‰‰‹»C‰‰‹;„õ±¤gE„ó/¤a‘„³¢‰‰ ±S‰‰‹;„mY¤qC Ÿ‰í¤+„‘q¢‰‰ óW‰‰‹;ᄁϤŸ…„ Ó¤‡„ Á¢‰‰‹u‰‰‹;ᄍ÷¢‰‰ M牉‹;á÷ã‰í¢‰‰ ‰u‰‰‹½Å„s‹¢‰‰‹»C‰‰‹½Å„õ±¤çû„ó¯¤aý„³¢‰‰ =‰‰‹½Å„o¤sU“ã‰í¢‰‰ s³‰‰‹=¹„c¤Ÿ…„ Ó¤‡„ Á¢‰‰‹u‰‰‹=¹„ Á¤5-óO‰í¤MO„ug¢‰‰‹»C‰‰‹¿„sÕ¤ç{„ó¯¤ãG„ñù¢‰‰ '1‰‰‹¿ Ÿ‰í¢‰‰ õ‰‰‹¿ñ„™¤9„ Ó¤„­¢‰‰‹u‰‰‹¿ñ„‹¤³1u;‰í¢‰‰‹»C‰‰‹?Մsդ瑄s ¤ãG„ó/¢‰‰ ¥Å‰‰‹?Õwé‰í¢‰‰ ÷/‰‰‹±É„÷¤9„w¤ᄍ­¢‰‰ ?E‰‰‹±É÷ã‰í¢‰‰‹»C‰‰‹1­„sդ瑄s ¤ãG„s‹¢‰‰ #Ÿ‰‰‹1­w‰í¢‰‰ wU‰‰‹³„‹e¤9„÷¤›Í„ Á¢‰‰ ±¡‰‰‹³÷ã‰í¢‰‰‹9g‰‰‹³å„õ±¤eµ„s‹¤ãG„u¢‰‰ !‰‰‹³åwé‰í¢‰‰‹‰±‰‰‹3ل‹›¤9„ Á¤ó„ Á¢‰‰ 3ù‰‰‹3Ù _‰í¢‰‰‹9g‰‰‹µ½„õ±¤c#„u¢‰‰ ³›‰‰‹µ½„sÕ¤y  Ÿ‰í¤‡s„­¢‰‰‹s‰‰‹5‘„‹e¤]„ Á¢‰‰ ·U‰‰‹5‘‰õ‰í¢‰‰‹9g‰‰‹5õ„õ±¢‰‰ µ+‰‰‹5õ„{û¤}/ Ÿ‰í¤ƒQ„…‡¢‰‰‹'5‰‰‹·é„ Á¢‰‰ Ég‰‰‹·é _‰í¢‰‰‹9g‰‰‹7̈́õ±¢‰‰ aû‰‰‹7̈́w-¤Û9„yé¤1 Ÿ‰í¤ ӄ_¤%DŽw¢‰‰‹'5‰‰‹É¡„ Á¢‰‰ M牉‹É¡÷ã‰í¢‰‰‹9g‰‰‹I…„õ±¢‰‰ åW‰‰‹I…„s ¤Û»„ù!¤ó¯ Ÿ‰í¤‹›„á¤' „=¤7?„+¢‰‰ Ow‰‰‹Iù‰õ‰í¢‰‰‹9g‰‰‹Ë݄sU¤Ë‰„ñg¤Çã Ÿ‰í¤9Ÿ„¤·‰„+¢‰‰ ɉ‹K±‰õ‰í¢‰‰‹9g‰‰‹Í•„sU¤K¿„1¤ç{„÷A¤W Ÿ‰í¤©„ ?¤„O¤µÃ„+¢‰‰ Å+‰‰‹M‰‰©‰í¢‰‰‹9g‰‰‹Mí„sU¤Íå„ÿW¤yW„s ¤W•wé‰í¤«ÿ„=¤…„ƒÑ¤1U„ Á¤…„ ¿¢‰‰ Gc‰‰‹ÏÁ÷ã‰í¢‰‰‹·Q‰‰‹O¥„÷ä{y„õ±¤Ï+„}¡¤{y„qC¤UƒwW‰í¤-k„™¤ƒQ„;¤±/„ Á¤ƒQ„+¢‰‰ Y)‰‰‹Á™÷ã‰í¢‰‰‹É™‰‰‹Áý„sU¤}/„õ±¤OQ„ýE¤}/„ñù¤Ówé‰í¤/ý„å¤u„½¤¿ù„+¤u„w¢‰‰ [;‰‰‹Aщ©‰í¢‰‰‹Éc‰‰‹Ãµ„qC¤ÿׄsU¤Ac„{y¤1„ñ¤]ó Ÿ‰í¤£„c¤O„…¤¿„+¤«„=¢‰‰ Ýa‰‰‹C©‰)‰í¢‰‰‹Éc‰‰‹Å„ñù¤1„sU¤C)„û3¤ñù„1¤[á Ÿ‰í¤¥‘„O¤™„M¤½W„+¤O„™¢‰‰ ß©‰‰‹Åቩ‰í¢‰‰‹Éc‰‰‹Eńñ¤ñ„sU¤Cõ„û3¤ñù„1¤Y_‰³‰í¤'i„)¤ ӄ‡)¤;¡„ Á¤c„墉‰ Q‰‰‹Ç¹÷ã‰í¢‰‰‹É™‰‰‹G„ñ¤ñ„õ±¤ÅфyW¤ó¯„ÿW¤Ùwé‰í¤»E„«¤+„‡õ¤»E„ Á¤™„墉‰ ÓK‰‰‹Gñ‰)‰í¢‰‰‹5÷‰‰‹ÙՄñù¤qC„sU¤Ça„y¤sU„ÿפCu Ÿ‰í¤½ „«¤+„‡õ¤9„+¤=„™¢‰‰ S󉉋Yɉ©‰í¢‰‰‹5+‰‰‹Û­„ñù¤ó¯„sU¤G̈́ù!¤õ±„ÿפAc Ÿ‰í¤¿„«¤ Á„_¤¹µ„+¤ ӄ™¢‰‰ U¹‰‰‹[wM‰í¢‰‰‹µÁ‰‰‹[å„ñù¤s „sU¤G̈́ù!¤ug„ÿ ¤Á½‰3‰í¤1‹„ƒ¤¹µ„ Á¤÷„¢‰‰ Wˉ‰‹ÝÙ÷ã‰í¢‰‰‹3e‰‰‹]½„ñg¤s‹„õ±¤G̈́íÿ¤Ï÷‰3‰í¤³±„¤'é„ Á¤÷„=¢‰‰ éñ‰‰‹ß‘÷ã‰í¢‰‰‹3‰‰‹ßõ„qC¤s‹„õ±¤Ù©„í}¤MÁwW‰í¤ƒQ„ ¿¤¹3„“¹¤'„­¤+„=¢‰‰ i[‰‰‹_é‰)‰í¢‰‰‹1S‰‰‹Ñ̈́qC¤õ±„sÕ¤Ù©„o¤Ça„õ±¤ÿ× Ÿ‰í¤«„ Á¤9„‘ñ¤'鄍­¤ Á„=¢‰‰ kƒ‰‰‹Q¡wM‰í¢‰‰‹1‰‰‰‹Ó…„qC¤u„sÕ¤Ù©„ok¤}/„u¤S݄s ¤³ Ÿ‰í¤Ï„w¤­¥„‹e¤ƒQ„‘¤'鄍­¤‹e„=¢‰‰ km‰‰‹ÓùwM‰í¢‰‰‹±­‰‰‹S݄qC¤ug„sÕ¤Ù©„ok¤ó¯„ñ¤Óq„ó¯¤ñ‰³‰í¤„™¤-„÷¤ Á„ˤ'é„ Ó¤‰S„=¤1U„‹e¢‰‰ Ë-‰‰‹Õ±„…c¤Ÿo÷ã‰í¤a„{¢‰‰‹ý‰‰‹U•„u¤Ï+„qC¤w-„ó¯¤Ù©„á·¤õ±„ÿ‹¤Óq„ñù¤ñg‰3‰í¤™„¤¹„‰Õ¤߄…¤ ?„Ÿo¤¹3„…¤³±„ Ó¢‰‰ ·U‰‰‹×‰„s¤ówM‰í¤eµ„ok¢‰‰‹q›‰‰‹×í„ñù¤Í„ýŤGM„Õ'¤ýńýE¤gE„ñ¤qC Ÿ‰í¤=„c¤™;„;¤½„+Y¤¹3„½¤3g„™¢‰‰ µÅ‰‰‹WÁ„‘¤›Í‰)‰í¤ç„í}¤û3„ug¤Ù©„ñ¤Ë‰„}/¤Çã„Uƒ¤ÿ „yW¤y„³¤ó¯ Ÿ‰í¤ ӄÏ¤‡õ„‡)¤u„«ÿ¤9Ÿ„ƒQ¤·‰„å¤'鄋¤M„¤awM‰í¤gDŽ끤{y„ug¤Ùs„ñù¢‰‰‹=±‰‰‹iý„ýŤÇa„Uï¤1„ç{¤yW„1¤ó/‰³‰í¤w„«¤‡©„›Í¤å„)7¤9û„ƒÑ¢‰‰‹ñŸ‰‰‹ëфå¤%DŽ‹e¤u„©¤_÷ã‰í¤ù!„W¤ÿ „u¤Û»„ñ¢‰‰‹½Õ‰‰‹kµ„}¡¤Ç‡„×I¤ñ„eµ¤yé„ÿפs  Ÿ‰í¤­„u¤…ㄝ]¤儩¤»E„u¢‰‰‹ó¡‰‰‹í©„™¤¥‘„ Á¤O„«ÿ¤‡©÷ã‰í¤yé„Uƒ¤1„õ±¤[á„ñù¢‰‰‹½ ‰‰‹m„ÿ ¤E;„W¤ñ„c#¤{Ÿ„ÿ ¤sÕ‰3‰í¤­„u¤…ㄟ…¤™„§¤;¡„«¢‰‰‹s׉‰‹mᄤ£i„+¤儭¥¤Mwωí¤û³„Óq¤ñù„sÕ¤]ó„ñù¢‰‰‹»C‰‰‹ïńÿפCõ„ë¤qC„oë¤{y„}¡¤u Ÿ‰í¤‹e„ƒÑ¤…„‘—¤=„¤½„«¢‰‰‹u‰‰‹o¹„™¤£„­¤™„-¤ÏwM‰í¤{Ÿ„QˤqC„s ¤_¹„ñ¢‰‰‹9g‰‰‹á„³¤Ac„퓤qC„o¤ýńýE¤uç Ÿ‰í¤ ?„;¤ƒQ„ƒ¤ ӄݤ±/„c¢‰‰‹w¯‰‰‹áñ„å¤/ý„w¤ ӄ/ý¤…÷ã‰í¤{y„Ñ…¤ó¯„s ¤Ñ…„ñ¢‰‰‹9‰‰‹aՄñ¤OQ„磻ó¯„íÿ¤}/„ýE¤÷A‡‰‰í¤u„ï¤ Ó„ˤ1ׄc¢‰‰ ‰U‰‰‹ãɄ™¤/„ Ó¤w„!#¤;÷ã‰í¤ýE„ß]¤s „ó¯¤Ño„ñù¢‰‰‹¹1‰‰‹c­„ñ¤Ï«„á·¤ó¯„퓤ÿ ‰3‰í¤u„•¤w„Ÿ…¤3„c¢‰‰ ±‰‰‹å„å¤-ë„ Ó¤w„£¤ƒÑwωí¤ÿ‹„W¤÷Äÿ ¤sՄqC¤S݄ñ¤Ãτuç¤mY„ñ¤K¿„古s „i§¤1 Ÿ‰í¤O„—Û¤w„ó¤µÃ„夓'„‹›¤=³„c¤­¥„=¤­„u¤ ¿„©¤÷wM‰í¤ÿׄoë¤sU„s ¤u„³¤sՄó/¤S[„ñ¤A„u¤ï¥„ñg¤Ë‰„eµ¤sU„W¤1 Ÿ‰í¤O„©¤+„›Í¤·‰„¤݄‹å¤¿y„c¤­%„ Q¤­„Ï¤‹e„w¤+„‘—¤«wM‰í¤ÿׄo¤ó/„õ?¤sU„ñù¤sՄó/¤Uƒ„ñ¤Ï÷„õ±¤o„qC¤I-„ç¤sU„W¤ñ„i%¤{yuljí¤„¤™„ß¤‹„½¤‹e„¤7?„=¤‘q„ Á¤³±„夫„=¤ Á„=¤+„ Á¤ ӄ“'¤O÷ã‰í¤1„mY¤ó¯„õ±¤sU„qC¤õ±„qC¤Um„ñ¤MÁ„õ±¤o„qC¤ÉA„ç{¤u„ýŤug„c£¤ñù„í}¤åiw ‰í¤‘„K¤™„¤ ӄu¤‹e„_¢‰‰‹¡%‰‰‹û¡„=¤˄­¤3g„c¤‘—„÷¤­„=¤ Á„ Ó¤+„‹e¤=„•I¤c÷ã‰í¤ñ„k7¤qC„u¤sU„ó¯¤õ±„qC¤sՄs‹¤oë„ñ¤Í„sդ᷄qC¢‰‰ ‰‹{…„ù!¤u„ÿ ¤ó¯„åi¤ñù„á5¤okwW‰í¤ï„9¤=„›K¤å„«¤‹e„‡s¤‡„+¤#5„=¤˄­¤5-„O¤ᄃQ¤ Á„=¤‹e„ Q¤w„‹›¤=„¤åwM‰í¤ñg„é[¤ñg„ug¤s „s‹¤u„qC¤õ±„}/¤ç{„1¤I­„s‹¤a„ó¯¤[á„ó¯¤gDŽû3¤uç„ÿ ¤ÿ „çû¤ó/„å ¤i§ Ÿ‰í¤—Û„s¤ Q„‡¤u„u¤‹›„M¤™»„ Ó¤¥‘„ Ó¤Ÿo„÷¤·Õ„O¤„ƒQ¤ Á„=¤‹e„÷¤w„‹¤„%¤÷ã‰í¤qC„W¤û3„s‹¤u„qC¤u„ýŤgDŽ1¢‰‰‹O©‰‰‹ÿ•„s‹¤{y„w-¤ÿ‹„s‹¤[„ñg¤y „û³¤÷Äÿ ¤ýńgǤs‹„ç¤Umwé‰í¤-„_¤w„_¤…ã„u¤ ¿„…¤‡õ„c¤¥ý„ Q¤O„‹›¤;„w¢‰‰ ­‰‰‹‰„)¤‡õ„½¤‹e„=¤‹e„+¤M„«¤=÷ã‰í¤qC„Um¤û3„sU¤u„qC¤u„ýŤy„ÿW¢‰‰‹ÏM‰‰‹í„s ¤ýE„uç¤1„ó/¤[…„ñ¤y„{y¤÷Äÿ ¤{Ÿ„ù!¤s „ù!¤Óq‰3‰í¤/„‡©¤w„‡©¤τˤ‡)„Ï¤%G„÷¤O„‹e¤ƒÑ„w¢‰‰ w‰‰‹ñÁ„«¤‡©„½¤‹„‡)¤M„ Ó¤+„‘q¤ Ó÷ã‰í¤ó¯„o¤sU„ó¯¤û3„yW¤ug„ýŤyé„ÿ×¢‰‰‹M㉉‹q¥„s ¤}¡„u¤1„s‹¤Û9„³¤yW„á·¤û³„yé¤s „yé¤Ño‰³‰í¤!#„…㤍­„M¤‡©„K¤…ã„«¤§!„w¤O„‹e¤ƒÑ„÷¢‰‰ ƒÓ‰‰‹ó™„«¤‡©„½¤ ¿„Ϥ‡©„ Á¤=„ݤ Ó÷ã‰í¤s‹„í}¤ñù„u¤û3„mÙ¤û3„ÿ×¢‰‰‹Í;‰‰‹óý„s‹¤ÿ „õ?¤³„sÕ¤Ùõ„ÿפ{Ÿ„á5¤yé„û³¤õ±„{û¤ÝK Ÿ‰í¤#5„…‡¤ Á„Ϥ‡©„K¤…ã„«¤'„­¤Ï„ A¤u„÷¢‰‰ =‰‰‹sф«¤M„“¹¤M„‹e¤™„¤÷÷‰í¤õ±„끤aý„o¤û³„ÿ ¢‰‰‹Kщ‰‹õµ„s ¤ÿ „õ±¤ñ„s ¤Ùs„ÿ ¤{y„á·¤yé„û³¤u„}¡¤[ Ÿ‰í¤¥{„ƒÑ¤‹e„Ϥ‡©„ˤ…„u¤' „w¤c„ Á¤u„w¢‰‰ =‰‰‹u©„u¤τ‘ñ¤Ÿ…„ñ¤ Á÷ã‰í¤u„W¤c£„ok¤û3„ÿ‹¢‰‰‹Ëu‰‰‹÷„s ¤ÿ „sU¤ñù„s ¤Ù©„}¡¤}/„á·¤y„û3¤w-„ÿ‹¤Õ'„ÿפw--‰í¤«„Ÿo¤_„Ÿo¤u„½¤¹3„+¤„+¤u„w¢‰‰ …剉‹÷á„«¤‡©„Ÿo¤ß÷ã‰í¤c£„a¤yé„ÿ×¢‰‰‹Ë«‰‰‹wńs ¤ÿ „sU¤ñg„sU¤GM„ýŤÿ „a¤ù!„a¤Um‹Å‰í¤)Ʉ]¤™;„Ÿo¤u„;¤¹µ„+¤™„w¤«„÷¢‰‰ …剉 ‰¹„u¤M„Ÿo¤s÷‡‰í¤å„ãG¤û3„}¡¢‰‰‹Iω‰ „ó/¤ÿׄs ¤qC„sU¤Ça„{y¤ÿW„a‘¤gDŽeµ¤×I Ÿ‰í¤)7„›Í¤™»„Ÿá¤)„…¤9„+¤=„w¤«„ Q¢‰‰ …剉 ñ„ƒÑ¤M„9¤ó÷‰í¤å„ñù¤÷A„ýE¤yW„ÿ ¤MÁ„uç¤y„󯤳„ó¯¤qC„sդDŽ{y¤1„a¤gDŽç{¤Um Ÿ‰í¤«„¤™»„Ÿo¤O„…¤9y„­¤=„ Ó¤Ï„ Ó¤‡õ„‹›¤³±„u¤‡)„;¤ ?„™¤ó÷‰í¤å„u¤ÿׄqC¤yW„}¡¤Íå„u¤yé„ó¯¤1„ó/¤ó¯„s ¤Ç„{Ÿ¤ñ„a¤gDŽù!¤Uƒ‰3‰í¤­¥„‡)¤™»„Ÿo¤c„…ã¤9û„w¤ Q„=¤å„ Ó¤M„ Á¤3g„ƒÑ¤‡õ„‹›¤‡©„‰S¤ß÷ã‰í¤c£„w-¤yé„uç¤y„}¡¤Í„õ±¤û3„ó¯¤ñ„qC¤ó/„s ¤Ç‡„{Ÿ¤ñ„a¤gDŽyW¤SÝwW‰í¤/„…¤‡„Ÿ…¤™„M¤»E„+¤ ӄ=¤å„ Q¤τ+¤3g„;¤3g÷ã‰í¤Í„ýE¤Í„sU¤û³„ó/¤ñ„qC¤ó¯„sU¤E;„û3¤ñù„aý¤çû„{y¤Ño‰3‰í¤/ý„½¤„…¤ ?„Ï¤=„‡)¤»E„÷¤w„™¤™„=¤…ㄍ­¤µÃ„;¢‰‰ ÓK‰‰ ‘‹»‰í¢‰‰ 퉉 õ„ýE¤ËS„s ¤{y„qC¤qC„ñ¤s „s‹¤E;„yW¤ó¯„ñ¤u„{y¤ç{„ÿ ¤ß] Ÿ‰í¤!#„u¤„…¤‹e„c¤ ӄ‡)¤»E„÷¤w„c¤=„=¤…„w¤5-„;¢‰‰ ׉‰ é‰õ‰í¢‰‰ ‰‰ ƒÍ„ýE¤K¿„ó¯¤{y„qC¤qC„ñ¤s „s ¤E;„y ¤s „ñù¤sU„ýŤç{„ñ¤[á Ÿ‰í¤¥‘„处½¤+„™¤w„‡s¤»E„w¤w„c¤=„=¤…„ Ó¤µÃ„;¢‰‰ 鉉 ¡‰õ‰í¢‰‰ ‰‰ ……„û3¤Íå„ó/¤ýńqC¤qC„ñ¤s „ó¯¤Åфyé¤sՄñù¤s „}¡¤eµ„ó¯¤Û»‰3‰í¤'鄋e¤„ƒÑ¤ ӄ™¤‹e„‡õ¤;¡„ Ó¤÷„O¤ ӄ=¤;„ Ó¤³1„M¢‰‰ km‰‰ …ù÷ã‰í¢‰‰ [‰‰ ݄û3¤MO„ó¯¤ýE„qC¤ó¯„1¤s‹„ó¯¤Åфy¤u„ñù¤ó¯„}¡¤åi„u¤Ù©‰3‰í¢‰‰ I‹‰‰ ‡±„ƒÑ¤=„‘—¤;¡„=¤+„«¤w„=¤;„=¤1‹„M¢‰‰ km‰‰ ‡±÷ã‰í¢‰‰ ›ï‰‰ •„û3¤OQ„ñù¤ÿ „qC¤ó¯„ÿפsՄó/¤Cõ„á·¤ñù„ÿ ¢‰‰‰ñA‰‰ • Ÿ‰í¢‰‰ I‹‰‰ ™‰„u¤™„ˤ½„ Q¤­„«¤ ӄ=¤u„™¤±/„M¢‰‰ km‰‰ ™‰}Á‰í¢‰‰ '‰‰ ™í„û3¤Á=„ñg¤ÿ „qC¤s‹„ÿ ¤sՄó¯¤Cu„a¤ñ„ÿ ¢‰‰‰牉 ™í Ÿ‰í¢‰‰ ɯ‰‰ Á„u¤償o¤½ „ Ó¤­„u¤÷„=¤u„¤?C„M¢‰‰ ÓK‰‰ Áqå‰í¢‰‰ ™Ý‰‰ ›¥„yé¤Ac„ñ¤ÿׄñù¤s‹„ÿ‹¤õ±„qC¤Cu„aý¤1„ÿ×¢‰‰‰‰‰ ›¥‰³‰í¢‰‰ 7C‰‰ ™„«¤u„ß¤½ „™¤‹e„ƒQ¤÷„=¤«„c¤¿Ÿ„M¢‰‰ Yõ‰‰ ™÷ã‰í¢‰‰ —‰‰ ý„û3¤Aã„ñ¤ÿׄqC¤s‹„}/¤u„ñù¤Cu„c£¤ÿ „ÿ×¢‰‰‰ÿ?‰‰ ý‰3‰í¢‰‰ 7C‰‰ Ñ„å¤;„󤽍„™¤‹e„½¤+„™¤O„Ï¤½W„M¢‰‰ Gc‰‰ Ñ÷ã‰í¢‰‰ o‰‰ µ„û3¤C)„³¤1„ñù¤sU„ýŤu„ñù¤Cõ„古ýE„ñ¢‰‰‰}Չ‰ µ‰3‰í¢‰‰ ·ù‰‰ Ÿ©„c¤…„¤™»„‹›¤)Ʉc¤‹›„…¤+„=¤O„Ï¤½W„M¢‰‰ ǽ‰‰ Ÿ©÷ã‰í¢‰‰ ߉‰ „{Ÿ¤Cõ„ÿפñ„ñù¤sՄ{y¤÷ÄÿפW„sU¤yW„åi¤{Ÿ„ñù¢‰‰‰ý¯‰‰  Ÿ‰í¢‰‰ ·ù‰‰ á„™¤…ㄤ‡)„+¤©„«¤ ¿„…¤­„™¤c„«¤½„…㢉‰ ǽ‰‰ áñ‰í¢‰‰ ƒó‰‰ ‘Å„{Ÿ¤Cu„1¤³„qC¤sՄ{Ÿ¤w-„ÿ ¤W•„ó¯¤{y„eµ¤û3„qC¢‰‰‰ûg‰‰ ‘ʼn3‰í¢‰‰ ·‰‰ ¹„=¤‡©„a¤ƒÑ„=¤)Ʉ‘¤­„=¤c„«¤;¡„…㢉‰ ë'‰‰ ¹÷ã‰í¢‰‰ ƒ‰‰ “„{Ÿ¤Åфÿפñ„qC¤sՄok¤×ɄqC¤}¡„ç¤yé„qC¢‰‰‰û‰‰ “‰3‰í¢‰‰ ·‰‰ “ñ„ Q¤‡õ„‡¤u„=¤«“„‘q¤ Á„=¤å„«¤;¡„…¢‰‰ km‰‰ “ñ÷ã‰í¢‰‰ a‰‰ Մ{y¤Åфÿפñ„qC¤õ±„o¤Uï„qC¤ÿ „çû¤y„ó/¢‰‰‰y1‰‰ Õ‰3‰í¢‰‰ ·‰‰ •É„÷¤™»„_¤O„c¤«„ݤ Á„=¤„u¤]„‰Õ¤•„…¢‰‰ km‰‰ •É÷‰í¢‰‰ »‰‰ ­„{û¤ëk„w-¤c#„ÿ‹¤ñù„qC¤u„磻Õ'„ñ¤ñ„y¤ç{„sU¢‰‰‰ù‹‰‰ ­ Ÿ‰í¢‰‰ ·‰‰ —„+¤„‡õ¤å„c¤+Y„ݤ‹e„=¤™„÷¤]„‰S¤„…‡¢‰‰ kƒ‰‰ —qc‰í¢‰‰ _‰‰ —å„{y¤k7„÷ä叄}/¤qC„qC¤u„o¤Óq„1¤qC„y¤ç‘„sÕ¢‰‰‰g/‰‰ —å Ÿ‰í¢‰‰ 51‰‰ ل­¤ᄇõ¤=„O¤-„‘q¤‹e„=¤=„ƒQ¤ó„ ¿¤•I„…¢‰‰ ׉‰ Ùqå‰í¢‰‰ õ‰‰ ©½„{y¤í“„ug¤å „}¡¤ó¯„ñù¤uç„gǤÙs„³¤ó¯„yW¤e5„õ?¢‰‰‰çʼn‰ ©½‰3‰í¢‰‰ 51‰‰ )‘„‹e¤„‡)¤÷„O¤݄‹›¤9„‡©¤‹e„=¤ ӄ;¤„‹e¤ï„½¢‰‰ Ýa‰‰ )‘÷ã‰í¢‰‰ )‰‰ )õ„ýŤ퓄u¤åi„ýE¤ó¯„qC¤u„yé¤ãG„uç¤ï¥„1¤s‹„yW¤åi„u¢‰‰‰ey‰‰ )õ Ÿ‰í¢‰‰ ·‰‰ «é„ ¿¤ß„…㤍+„«¤‘—„+¤˄…¤‹›„™¤ ӄƒÑ¤󄋛¤ï„;¢‰‰ Y)‰‰ «é÷ã‰í¢‰‰ M‰‰ +̈́ýE¤í“„uç¤å„}¡¤ó¯„ñù¤uç„{y¤á·„sU¤oë„ÿפsU„{Ÿ¤c£„÷≉‰ey‰‰ +͉3‰í¢‰‰ Ý)‰‰ ­¡„…㤋e„u¤˄ Ó¤‘q„ƒÑ¤‹›„™¤w„;¤„‹e¤„;¢‰‰ Gc‰‰ ­¡m‰í¤w-„÷≉ ‰‰ -…„}¡¤í}„u¤åi„{y¤s‹„qC¤uç„ÿ ¤mY„qC¤á5„ÿפu„{û¢‰‰‰å³‰‰ -… Ÿ‰í¢‰‰ Ý)‰‰ -ù„…‡¤‹e„«¤K„=¤“'„u¤‹›„=¤÷„…¤„‹e¤„ƒÑ¢‰‰ Õ]‰‰ -ù„ ¿¤‰S…‰í¢‰‰ ‹½‰‰ ¯Ý„}/¤íÿ„u¤qC„÷äýń{y¤sU„ñù¤÷A„1¤ëí„ó¯¤á·„ÿפuç„{û¢‰‰‰cW‰‰ ¯Ý‰3‰í¢‰‰ Ýõ‰‰ /±„…¤‰S„«¤˄ Ó¤§„_¤+„…¤ƒÑ„ ?¤™„‹e¤ƒ„÷¢‰‰ 奉‰ /±÷ã‰í¢‰‰ Q‰‰ ¡•„ÿ‹¤íÿ„u¤ñù„÷A¤}¡„{y¤sU„ù!¤éۄó¯¤á·„ÿפw-„{y¢‰‰‰c‰‰ ¡•‰3‰í¢‰‰ Ýõ‰‰ !‰„9¤‡s„‹›¤=„ Ó¤©í„‡)¤+„…¤u„‹e¤=„ Á¤“'„ƒQ¢‰‰ g‰‰ !‰÷ã‰í¢‰‰ Q‰‰ !í„}/¤mY„õ±¤qC„u¤ÿ „{y¤sU„yW¤W•„ó¯¤qC„uç¤y „ãG¢‰‰‰ã!‰‰ !퉳‰í¢‰‰ Ýõ‰‰ £Á„]¤‡õ„‹e¤ ӄ Ó¤«“„Ϥ Á„…㤫„ Á¤=„ Á¤“'„u¢‰‰ ùG‰‰ £Á‰õ‰í¢‰‰ ‰«‰‰ #¥„ÿפíÿ„u¤qC„sÕ¤ÿׄ{û¤õ±„{y¤Óñ„s ¤ó/„u¤y„åi¢‰‰‰áû‰‰ #¥ Ÿ‰í¢‰‰ ]_‰‰ ¥™„¤‡õ„‹e¤ Q„w¤-„…¤ Á„…‡¤«„­¤=„‹e¤ƒ„«¢‰‰ ûi‰‰ ¥™‰©‰í¢‰‰ ‰«‰‰ ¥ý„ÿ ¤mY„õ±¤ó¯„s ¤1„{Ÿ¤õ±„}¡¤Q˄s ¤s‹„sÕ¤û3„åi¢‰‰‰û‰‰ ¥ý„÷äñ Ÿ‰í¤å„ ¿¢‰‰ ]_‰‰ %ф¤M„­¤÷„w¤¯·„ƒÑ¤ Á„…㤁O„w¤ ӄ Á¤“'„u¢‰‰ ý‰‰ %щ)‰í¢‰‰‹wO‰‰ §µ„ÿפmY„õ±¤qC„s ¤ñ„û3¤u„ÿ ¤Ñ…„s ¤s „sU¤qC„uç¤s‹„碉‰‰û‰‰ §µ„u¤ñù‰3‰í¤™„ Á¢‰‰ ß»‰‰ '©„‡¤÷„‹›¤=„÷¤­„÷¤!#„«¤‹e„…ã¤å„ Ó¤=„ Á¤™»„‰S¤«„«¢‰‰ ‰‰ '©÷ã‰í¢‰‰‹wO‰‰ ¹„ÿפÿׄw-¤gDŽõ±¤qC„ó¯¤ñ„{Ÿ¤u„ÿפß]„s‹¤sՄs‹¤qC„uç¤s‹„çû¢‰‰‰û‰‰ ¹„õ±¤ñù‰3‰í¤™„ Á¢‰‰ ß»‰‰ ¹á„™»¤w„‹e¤ ӄ÷¤­„÷¤£ë„Ï¤‹›„M¤™„¿¤ Q„ Á¤™;„ ¿¤O„«¢‰‰ W‰‰ ¹á÷ã‰í¢‰‰‹÷剉 9ńÿפ1„÷ägE„õ±¤ó/„qäñù„û3¤u焳¤]—„s‹¤sՄs‹¤ó¯„u¤s „gÇ¢‰‰‰y1‰‰ 9ńõ±¤ñù Ÿ‰í¤=„ Á¢‰‰ _‰‰ »¹„_¤­„‹e¤ Q„ Ó¤ Á„w¤¥‘„™¤‹e„M¤=„=¤=„ Á¤_„‹›¤O„Ï¢‰‰ qŸ‰‰ »¹wM‰í¢‰‰‹÷™‰‰ ;„³¤1„uç¤ù!„õ±¤qC„ñù¤qC„û3¤uç„qC¤Û»„sÕ¤sU„ó¯¤s‹„õ±¤sՄy¢‰‰‰ù‹‰‰ ;„õ±¤qC Ÿ‰í¤=„ Á¢‰‰ _‰‰ ;ñ„‡õ¤­„ Á¤÷„ Ó¤+„­¤%DŽ=¤‹›„M¤=„™¤=„ Á¤_„‹›¤O„Ï¢‰‰ qy‰‰ ;ñ‰©‰í¢‰‰‹u=‰‰ ½Õ„1¤ñ„u¤y„sU¤ó¯„ñù¤qC„û3¤uç„ó¯¤Ùs„sU¤õ±„qC¤s „õ±¤sU„û3¢‰‰‰g/‰‰ ½Õ„sU¤ó¯‰3‰í¤ ӄ+¤‡©„‹›¤;¡„ ?¤«ÿ„M¤ Á„+¤­„™¤‹e„+¤¹µ„÷¤ ?„‡©¤ ӄc¤ ӄ+¤‡õ„‹e¤c„O¢‰‰ õ ‰‰ =É÷ã‰í¢‰‰‹u=‰‰ ¿­„1¤ñ„u¤y„sU¤ó¯„ñ¤ó¯„yé¤÷A„s‹¤G̈́sU¤u„ñù¤sՄsU¤õ±„û3¤Uƒ„÷A¤Åфuç¤yé„sU¤ó¯ Ÿ‰í¤÷„w¤…ã„ Á¤;¡„ ?¤+Y„…¤ Á„÷¤­„=¤‹e„+¤9û„¤ ӄc¤=„ Á¤‡õ„‹e¤Ï„墉‰ õ׉‰ ?÷ã‰í¢‰‰‹õӉ‰ ?å„ñ¤³„u¤y„õ±¤qC„ñ¤ó¯„åi¤Ç‡„sU¤u„qC¤sՄs‹¤õ±„{y¤Õ'„÷A¤Åфõ±¤{Ÿ„s ¤s‹‰3‰í¤+„ Ó¤;„ Ó¤;¡„‹›¤«ÿ„…¤‹e„÷¤­„™¤‹„­¤;¡„›Í¤ ӄc¤ ӄ+¤‡õ„ Á¤™„O¢‰‰ u1‰‰ ±ÙwM‰í¢‰‰‹sw‰‰ 1½„ñ¤ñù„sU¤yW„sU¤ó¯„1¤s‹„ç¤C«„sÕ¤÷Äñ¤õ±„ó¯¤u„}/¤ãG„ug¤{y„÷äCõ„qC¤ýE„ó¯¤õ± Ÿ‰í¤ Á„ Ó¤;„=¤½„ ¿¤…„‹¤9„ƒQ¤‹å„ Ó¤ Á„å¤ ¿„­¤½×„a¤÷„O¤ ӄ+¤‡)„+¤™„墉‰ ÷‰‰ ³‘wÏ‰í¢‰‰‹sw‰‰ ³õ„ñ¤ñù„sU¤yW„sU¤ó¯„ÿפs‹„çû¤C«„yW¤õ±„ó¯¤ug„}/¤c#„sÕ¤ÿ „ug¤Cõ„ñù¤ýńó/¤õ? Ÿ‰í¤ A„ Q¤½„™¤½„‹¤u„­¤]„ƒQ¤‹„ Ó¤ Á„‡)¤½×„‡¤÷„«¤ ӄ+¤‡)„+¤™„墉‰ ÷ù‰‰ 3éwM‰í¢‰‰‹sw‰‰ µÍ„ñ¤ñù„sU¤qC„w­¤ñù„s‹¤ó¯„1¤s‹„gE¤Ac„û3¤õ±„qC¤÷Äÿ‹¤aý„sU¤1„u¤Ãτñg¤ÿ „ñù¤uç Ÿ‰í¤ ?„™¤u„™¤¿Ÿ„‹å¤儍+¤K„«¤‰S„™¤‹e„M¤?ń_¤+„«¤ ӄ+¤™„‹›¤÷„w¤™„¢‰‰‰¯‰‰ 5¡÷ã‰í¢‰‰‹s­‰‰ ·…„ñg¤ñù„s ¤s‹„uç¤ñù„sU¤ó¯„ÿפsU„ù!¤Á½„û3¤u„ñù¤w-„ÿפá5„sU¤ñ„u¤Aã„ñù¤ÿ „ñù¤÷A‹‰í¤u„™¤?ń‹e¤=„÷¤K„™»¤ Á„…㤱/„_¤+„«¤ ӄ+¤™„ ?¤ Q„w¤=„墉‰ ‰‰ ·ùwM‰í¢‰‰‹óÁ‰‰ 7݄ñù¤qC„s ¤ó/„uç¤qC„sU¤ó¯„ÿ ¤sՄyé¤Ï÷„û3¤u„ù!¤o„s ¤s‹„õ±¤OQ„ñ¤1 Ÿ‰í¤O„c¤±/„ Á¤÷„w¤‘q„_¤‹e„M¤1‹„‡©¤­„u¤ ӄ+¤=„‹›¤ Q„w¤=„™¢‰‰ Չ‰ ɱwÏ‰í¢‰‰‹óÁ‰‰ I•„ñù¤qC„ó¯¤s‹„uç¤qC„s ¤s‹„ÿ ¤sՄû3¤MÁ„û³¤ug„y¤ï¥„ó¯¤sU„õ±¤Ï÷„ñ¤³ Ÿ‰í¤Ï„å¤1‹„ Á¤+„ Ó¤݄‡õ¤‹„Ϥ³±„M¤­„u¤÷„w¤=„‹›¤÷„ Ó¤=„™¢‰‰‹±‰‰ ˉ÷ã‰í¢‰‰‹óÁ‰‰ Ëí„ñù¤qC„ó¯¤s‹„u¤ó¯„s ¤ó/„ÿ‹¤õ±„{Ÿ¤Íe„û³¤u„yW¤mY„ó¯¤u„sU¤Ï+„ñ¤1 Ÿ‰í¤„Ï¤³1„+¤‹›„=¤ƒ„Ϥ‹„M¤3g„…㤠Á„÷¤ Q„w¤ ӄ‹e¤÷„=¤ ӄ=¢‰‰ W‰‰ KÁ„‹›¤«÷ã‰í¤ÿׄu碉‰‹qe‰‰ Í¥„qC¤ó¯„qC¤s‹„u¤ó¯„s ¤ó/„ÿ‹¤õ±„{Ÿ¤Í„û3¤ug„û³¤íÿ„qC¤uç„sU¤MO„³¤ñg‰3‰í¤™„O¤3g„½¤•I„…㤋„…ã¤5¯„;¤ Á„ƒQ¤ Q„­¤ ӄ Á¤÷„=¤ ӄ=¢‰‰ ñ3‰‰ M™„ Á¤™÷ã‰í¤ñù„õ±¢‰‰‹qe‰‰ Mý„qC¤ó¯„qC¤s‹„õ±¤ó¯„sÕ¤ó/„}/¤õ±„ýE¤Ëӄ{Ÿ¤ug„{Ÿ¤k7„ýŤÍ„1¤ñù Ÿ‰í¤=„O¤µÃ„½¤•„ƒ¤5¯„;¤ Á„ƒQ¤÷„w¤ ӄ Á¤÷„=¤ ӄ=¢‰‰ qŸ‰‰ Ïф+¤ ÓwM‰í¤s‹„sÕ¢‰‰‹q›‰‰ Oµ„qC¤s „qC¤s‹„õ±¤ó¯„s ¤s‹„}/¤õ±„}¡¤Iw„íÿ¤ëí„ýŤËS„1¤qC Ÿ‰í¤=„O¤5-„½¤•„ƒ¤· „ƒÑ¤ Á„ƒQ¤÷„w¤ ӄ Á¤÷„=¤w„=¢‰‰ óʼn‰ Á©„­¤÷÷ã‰í¤õ±„s ¢‰‰‹q›‰‰ A„qC¤sՄñù¤ó¯„õ±¤s‹„s ¤s‹„ýŤu„ÿ ¤ÉA„磻ë„ýŤIw„³¤qC Ÿ‰í¤=„Ï¤· „½¤„ݤ7?„u¤‹e„½¤÷„w¤÷„ Á¤ ӄ™¤­„=¢‰‰ s/‰‰ Aᄍw¤ Á÷ã‰í¤uç„ó/¢‰‰‹q›‰‰ ÃńqC¤sՄñ¤s „õ±¤s‹„s ¤s‹„ýŤõ±„ÿפÉA„o¤éۄÿ ¤å„w-¤]—„³¤ó¯‰3‰í¤ Q„«¤£„ ?¤ó„u¤©„K¢‰‰‹±W‰‰ C¹„O¤ Á„½¤÷„w¤÷„­¤+„c¤­„=¢‰‰ õ׉‰ C¹÷ã‰í¢‰‰‹q›‰‰ ŝ„qC¤sՄñ¤sU„sÕ¤s‹„s ¤s‹„ýŤõ±„1¢‰‰ ÿՉ‰ ŝ„á5¤W„ÿ ¤å„÷A¤]ó„ÿפó/ Ÿ‰í¤÷„«¤£„‹›¤ó„u¤©m„Ÿo¢‰‰‹±W‰‰ Åñ„O¤‹e„…¤+„ Ó¤+„w¤+„O¤ Á„=¢‰‰ u1‰‰ ÅñwM‰í¢‰‰‹q›‰‰ EՄqC¤õ±„1¤sU„s ¤sU„ó¯¤sU„{y¤u„ñ¢‰‰ ÿՉ‰ EՄaý¤Um„ÿפ叄uç¤]„ÿפs  Ÿ‰í¤w„«¤£i„‹›¤ó„«¤«„Ÿ…¢‰‰‹1³‰‰ ÇɄ夋e„…¤+„ Ó¤+„w¤+„O¤ Á„=¢‰‰ ÷‰‰ ÇÉwÏ‰í¢‰‰‹ñ?‰‰ G­„qC¤u„1¤sU„s ¤s‹„s ¤sU„{y¤u„ñ¢‰‰ ÿ ‰‰ G­„c£¤Uƒ„ÿפc#„uç¤]„ÿפs  Ÿ‰í¤w„«¤£i„‹›¤]„«¤«ÿ„ß¢‰‰‹1³‰‰ ف„夋e„…¤+„w¤÷„w¤+„O¤‹e„=¢‰‰ ÷ù‰‰ فwM‰í¢‰‰‹ñ?‰‰ Ùå„qC¤uç„ÿפsU„ó¯¤sU„s ¤sU„{Ÿ¤uç„ñù¢‰‰ ÿ ‰‰ Ùå„å ¤Õ'„ÿפc#„uç¤]„ÿ‹¤sU‰3‰í¤÷„«¤£ë„‹e¤¹„O¤­%„¢‰‰‹³Ÿ‰‰ Yل™¤‹›„…㤍+„w¤+„ Ó¤+„a¢‰‰ wC‰‰ YÙ÷ã‰í¢‰‰‹ñ?‰‰ Û½„ç¤sU„ó¯¤sU„s ¤sU„{Ÿ¤uç„ñù¢‰‰ ÿ ‰‰ Û½„åi¤S[„1¤ãلu¤]—„ÿפs‹ Ÿ‰í¤+„u¤£ë„‹e¤¹„O¤-„ᢉ‰‹³y‰‰ [‘„ Ó¤‹e„…㤠Á„ Ó¤+„=¤ Á„a¢‰‰‰¯‰‰ [‘÷ã‰í¢‰‰‹ñ?‰‰ [õ„ç¤õ±„qC¤sU„ó¯¤õ±„{Ÿ¤u„󯢉‰ }-‰‰ [õ„瑤Óñ„1¤ãلu¤]—„ÿ ¤sU‰3‰í¤+„u¤£ë„ Á¤¹„夯·„¢‰‰‹³y‰‰ Ý鄏=¤ ¿„Ϥ Á„ Ó¤ Á„™¤ Á„a¢‰‰ ‰‰ Ýé÷ã‰í¢‰‰‹ñ?‰‰ ]̈́ç¤u„ñ¤õ±„ó¯¤õ±„û³¤÷Ä󯢉‰ }-‰‰ ]̈́gǤÑo„ñ¤aý„u¤y„uç¤mY„ÿ ¤õ± Ÿ‰í¤ Á„u¤“'„‹›¤‡õ„‹å¤Ÿ…„å¤/„™»¢‰‰‹3ʼn‰ ß¡„ Ó¤ ¿„Ϥ Á„ Ó¤ Á„c¤‹e„a¢‰‰ ‰‰ ß¡wM‰í¢‰‰‹S‰‰ _…„瑤uç„ñ¤õ±„ó¯¤u„yW¤÷Ä󯢉‰ }-‰‰ _…„ù!¤W„÷äqC„ñù¤a‘„u¤yW„sU¤o„}¡¤u‰³‰í¤ A„u¤c„‹›¤‡õ„÷¤M„ Á¤Ÿá„™¤ ӄ‹e¤©í„M¢‰‰‹3ʼn‰ _ù„w¤ ¿„‡õ¤‹e„ Ó¤‹å„•I¢‰‰‹±‰‰ _ù÷ã‰í¢‰‰‹S‰‰ Ñ݄k7¤u„ó¯¤u„y¤÷Äs ¢‰‰ ýɉ Ñ݄û3¤W•„u¤ó¯„ñù¤a‘„õ±¤û3„s‹¤y„uç¤ñ„ÿ ¤õ? Ÿ‰í¤‹›„ƒÑ¤„+¤M„ Ó¤M„ Á¤Ÿá„™¤÷„‹e¤«„…㢉‰‹µ/‰‰ Q±„w¤‰S„_¤‹›„ Ó¤‹å„“¹¢‰‰‹±‰‰ Q±÷ã‰í¢‰‰‹}ɉ ӕ„mÙ¤u„ó¯¤uç„ù!¤w-„s ¢‰‰ ýɉ ӕ„{Ÿ¤Um„u¤s‹„ñù¤a‘„õ±¤û3„ó¯¤û3„sU¤ñg„}¡¤uç Ÿ‰í¤ ?„;¤ ӄ Ó¤…ã„ Ó¤M„+¤Ÿá„=¤÷„ Á¤«ÿ„…¢‰‰‹µ/‰‰ S‰„Ÿ…¤‰Õ„=¤‹å„K¢‰‰‹±‰‰ S‰wM‰í¢‰‰‹gù‰‰ Sí„ãG¤ug„k7¢‰‰ {g‰‰ Sí„ÿ ¤Óñ„u¤s‹„qC¤á5„õ±¤yé„ó¯¤{Ÿ„qC¤s  Ÿ‰í¤w„=¤…ã„ Ó¤‡©„ Á¤K„=¤÷„‹e¤-„u¢‰‰‹5 ‰‰ ÕÁ„•I¤‹„9¢‰‰ ‰‰ ÕÁ÷ã‰í¢‰‰‹e ‰‰ U¥„eµ¤ug„k7¢‰‰ {g‰‰ U¥„ÿפQ˄u¤sU„qC¤á5„õ±¤yé„qC¤{y„ñù¤s  Ÿ‰í¤w„™¤…„=¤‡©„ Á¤K„=¤+„‹e¤¯·„«¢‰‰‹5 ‰‰ י„•I¤‹„›Í¢‰‰ ‰‰ יwM‰í¢‰‰‹ãŸ‰‰ ×ý„Y_¢‰‰ {‰‰ ×ý„ñ¤Ño„õ±¤õ±„ó¯¤oë„sU¤yW„qC¤ýE„ñ¤sU‰3‰í¤ Á„«¤ƒÑ„=¤‡õ„+¤‘—„ Ó¤ Á„ Á¤¡Ù„ Q¢‰‰‹5׉‰ Wф#µ¢‰‰ ‰‰ Wф‰S¤…÷ã‰í¤{y„w-¢‰‰‹á‰‰ 鵄ÝÍ¢‰‰ û±‰‰ 鵄ó/¤_¹„õ±¤õ±„ó¯¤oë„sU¤y„qC¤}¡„ÿפõ±‰3‰í¤ Á„«¤ƒÑ„=¤_„+¤‘—„÷¤ A„ Á¤#µ„‹›¢‰‰‹5׉‰ i©„¡Ù¢‰‰ W‰‰ i©„ ?¤;÷ã‰í¤ýE„÷A¢‰‰‹m‘‰‰ 덄_¹¢‰‰ y‹‰‰ 덄uç¤Ý̈́õ±¤õ?„s‹¤oë„sU¤ù!„qC¤}¡„ÿפõ± Ÿ‰í¤‹å„÷¤u„=¤_„÷¤‘—„÷¤ A„ Á¢‰‰‹5׉‰ ëá„-k¤¯·„ ¿¢‰‰ W‰‰ ëᄋe¤u÷ã‰í¤ÿׄu¢‰‰ ߉‰ kńu¤ß]„Um¢‰‰ Ëщ‰ kńõ±¤u„s‹¤o„sU¤ù!„ñg¤ÿW„ýŤ÷à Ÿ‰í¤ ¿„½¤)„¤_„+¤‘q„÷¤‹e„ Á¢‰‰‹·1‰‰ í¹„«¤!#„‹e¢‰‰ ñ3‰‰ í¹„‹å¤«÷ã‰í¤³„õ±¢‰‰ …‰‰ m„sU¤ÝK„W¢‰‰ Ëщ‰ m„sU¤uç„s‹¤o„sU¤gDŽñg¤³‰³‰í¤«„=¤‡„+¤݄+¤‹›„+¢‰‰‹·1‰‰ mñ„¤§£„+¢‰‰ qŸ‰‰ mñ„+¤å÷ã‰í¤ñ„sU¢‰‰ o‰‰ ïՄsU¤Y߄뢉‰ Ëщ‰ ïՄsU¤uç„sU¤ï¥„sU¤çû„qC¤ÿ× Ÿ‰í¤O„™¤™»„w¤݄+¤‹›„ Á¢‰‰‹·1‰‰ oɄï¤9Ÿ„w¢‰‰ qŸ‰‰ oɄ÷¤÷ã‰í¤ñg„s‹¢‰‰ —‰‰ ᭄s ¤Çã„í“¢‰‰ Iu‰‰ ᭄õ±¤uç„sU¤ï¥„s ¤gDŽñù¤1 Ÿ‰í¤å„™¤„w¤݄+¤ ?„+¢‰‰‹7‰‰ a„ݤ;¡„w¢‰‰ {µ‰‰ a„‰S¤ ӄw¤=÷ã‰í¤ó¯„ó¯¤sU„u碉‰ '‰‰ aå„ó/¤Ãτo뢉‰ Iu‰‰ aå„s‹¤w­„sU¤mY„sդ瑄ñù¤ñg Ÿ‰í¤„™¤ᄍ­¤“'„+¤‰Õ„÷¢‰‰‹7‰‰ ãل‘—¤=³„ Q¢‰‰ {µ‰‰ ãل‹›¤+„ Ó¤ ÓwM‰í¤ó¯„qC¤õ±„u¢‰‰ ›‰‰ c½„qC¤A„a¢‰‰ I«‰‰ c½„ÿפmY„s ¤ó¯„÷äÿ „ñù¤ñ Ÿ‰í¤å„™¤u„ ¿¤ ӄw¤“'„«¢‰‰‹7‰‰ 呄Ÿo¤¿y„=¢‰‰ {µ‰‰ 呄‹e¤ Á„=¤ Ó÷ã‰í¤s „ñù¤u„õ±¢‰‰ I‰‰ åõ„ñù¤OQ„aý¢‰‰ I«‰‰ åõ„ÿפmY„s ¤ó¯„ug¤ÿׄñ¤ñù Ÿ‰í¤=„夫„‹å¤ ӄw¤„‰S¤O„«¢‰‰‹7g‰‰ eé„s¤³±„™¢‰‰ ý‰‰ eé„ Á¤‹e„™¤­÷ã‰í¤sՄñù¤u„õ±¢‰‰ •‰‰ ç̈́ñù¤MÁ„å ¢‰‰ I«‰‰ ç̈́ÿפ1„w-¤ç{„s ¤ó¯„u¤ÿׄñ¤qC Ÿ‰í¤=„夁τ Á¤ ӄw¤™»„‹›¤c„«¢‰‰‹7g‰‰ g¡„©¤3ç„¢‰‰ ý‰‰ g¡„­¤‹„™¤­÷ã‰í¤õ±„ñ¤ug„õ?¢‰‰ [‰‰ ù…„ñ¤ËS„碉‰ ÉO‰‰ ù…„1¤ñ„u¤ñ„w-¤ñù„ó¯¤ó¯„õ±¤1„ñ¤qC Ÿ‰í¤=„c¤O„ Á¤ ӄ Ó¤™„‰S¤儋e¤c„O¢‰‰‹7g‰‰ ùù„a¤5-„墉‰ ýû‰‰ ùù„ A¤‹„å¤ Á÷ã‰í¤u„1¤÷ÄsÕ¢‰‰ Ÿ§‰‰ y݄ñ¤I­„çû¢‰‰ ÉO‰‰ y݄1¤ñ„u¤ñg„ug¤qC„ó¯¤ó¯„sU¤ñ„³¤ó¯ Ÿ‰í¤ ӄÏ¤儍+¤ ӄ Ó¤=„‹¤„‹e¤c„O¢‰‰‹7g‰‰ û±„‡¤·Õ„c¢‰‰ ýû‰‰ û±„­¤ ¿„O¤‹e÷ã‰í¤u„û3¢‰‰ ‰‰ {•„³¤ÉA„gE¢‰‰ Aa‰‰ {•„w­¤³„1¤ñù„õ±¤ñg„u¤qC„s ¤ó¯„s‹¤ñ„ñ¤ó¯ Ÿ‰í¤÷„O¤™„ Ó¤w„ Ó¤=„‹å¤„ Á¤™„O¤c„ ?¢‰‰‹7g‰‰ ý‰„‡õ¢‰‰ ™½‰‰ ý‰„Ï¤©ƒ„‰S¤µÃ„M¤‹e÷ã‰í¤u„û3¤K¿„w-¤Wÿ„³¢‰‰‹E]‰‰ ýí„y¢‰‰ Aa‰‰ ýí„÷A¤ñ„1¤ñù„õ±¤ñg„u¤qC„ó¯¤s „ó¯¤ñù„1¤s‹ Ÿ‰í¤÷„夙„ Ó¤ ӄ Ó¤ Q„ Á¤=„­¤™„å¤c„‹›¢‰‰‹;i‰‰ }Á„ ¿¤ƒÑ„‡)¢‰‰ ‰‰ }Á„O¤_„ ¿¤‡)„ ¿¤3g„M¤‹e÷ã‰í¤u„û3¤Í„÷äyW„÷äù!„1¢‰‰‹Åó‰‰ ÿ¥„yW¤}¡„÷≉ Aa‰‰ ÿ¥„uç¤ñ„ñ¤ñù„sÕ¤qC„õ±¤ó/„ó¯¤ó¯„ó¯¤ñù„ñ¤s‹ Ÿ‰í¤+„O¤=„=¤ ӄ Ó¤ Q„ Á¤=„­¤™„å¤c„‹›¢‰‰‹;i‰‰ ™„‹¤ƒQ„…㢉‰ ‰‰ ™„«¤‡õ„ ¿¤‡)„ ¿¤µÃ„M¤‹›÷ã‰í¤÷ÄyW¤K¿„÷äyW„uç¤û3„ÿפY_„qC¤gE„{û¤ÿ „u¢‰‰ A‰‰ ý„ug¤ñù„ñ¤ñù„sÕ¤ó/„s ¤s‹„ó¯¤ó¯„ñ¤s „1¤s‹ Ÿ‰í¤÷„O¤w„c¤ ӄ Ó¤÷„w¤ Q„­¤™„夙„‹¢‰‰‹;i‰‰ ñф‹e¤u„…‡¤™;„=¤§!„«¤M„‹›¤‡)„ ¿¤µÃ„‡)¤ ¿gç‰í¤ËS„uç¤û3„uç¤{Ÿ„ÿ ¤Y_„ÿ ¤û³„ýE¤ÿׄõ±¢‰‰ A‰‰ qµ„ug¤ñù„ñ¤qC„s ¤ó/„s ¤s‹„qC¤s „1¤sU„ÿפsU Ÿ‰í¤+„«¤+„O¤w„=¤÷„w¤ Q„w¤=„夙„‹¢‰‰‹;i‰‰ ó©„ Á¤«„;¤τu¤§!„u¤…ã„‹›¤M„‹›¤5-÷ã‰í¤ËS„uç¤û3„uç¤{Ÿ„ÿפÙõ„{y¤{y„}¡¤1„sU¢‰‰ A‰‰ s„u¤ñg„ñù¤qC„s ¤ó/„s ¤s‹„qC¤ó¯„1¤õ±„ÿפsU Ÿ‰í¤ Á„u¤‹›„u¤ ӄ=¤÷„w¤÷„ Ó¤=„™¤„‹å¤‡„‰S¢‰‰‹½µ‰‰ sᄠÁ¤O„u¤ƒÑ„M¤'é„÷¤…‡„‹›¤M„‹›¤· ÷ã‰í¤Iw„uç¤û3„uç¤{û„ÿ‹¤Ù©„û3¤}¡„ÿ ¤1„õ±¢‰‰ Óÿ‰‰ õńw-¤çû„u¤ñg„ñù¤qC„ó¯¤s‹„s ¤s‹„qC¤ó¯„ÿ ¤uç„ÿ ¤õ± Ÿ‰í¤ Á„Ÿ…¤ ӄ Q¤w„ Ó¤÷„ Ó¤=„™¤=„ Á¤™;„‹¢‰‰‹½µ‰‰ u¹„+¤儁Ϥ÷„ߤ'鄃Q¤;„‹e¤…„‹e¤· wM‰í¤Iw„õ±¤{y„u¤ýE„ÿ ¤Ça„gǤÿׄñ¤ñg„s‹¢‰‰ SY‰‰ ÷„ug¤çû„u¤qC„ñù¤qC„ó¯¤s‹„ó¯¤s „qC¤s‹„a¤u Ÿ‰í¤‹e„Ÿo¤÷„=¤w„ Ó¤÷„ Ó¤=„™¤=„‹e¤‡„‹¢‰‰‹½µ‰‰ ÷ñ„÷¤„c¤«„™»¤9„u¤;„‹e¤…„ Á¤· ÷ã‰í¤Iw„õ±¤{y„õ±¤ÿ „}¡¤E;„gǤñ„ñ¤qC„󯢉‰ Õ%‰‰ wՄõ?¤gDŽõ±¤ó¯„ñù¤qC„ó¯¤s‹„ó¯¤s „qC¤s‹„a¤u Ÿ‰í¤‹e„Ÿo¤÷„=¤w„ Ó¤÷„ Ó¤=„™¤ ӄ Á¤™»„ A¢‰‰‹½µ‰‰‰É„ Ó¤=„c¤儙»¤»E„ƒÑ¤u„ Á¤…„ Á¤· wM‰í¤I­„sU¤ýńõ±¤ÿׄýE¤ñ„ug¤]—„ù!¤ñù„ñù¤qC„s ¢‰‰ Õ%‰‰ ­„õ?¤gDŽõ±¤ó¯„ñ¤ó¯„ó¯¤s‹„qC¤sՄqC¤s‹„a¤u Ÿ‰í¤‹„K¤+„™¤­„=¤+„=¤ ӄ™¤ ӄ+¤™»„ A¢‰‰‹=‰‰‹„ Ó¤ ӄ=¤™„‡õ¤¥{„‹¤儃Ѥ«„ Á¤½„+¤·Õ÷ã‰í¤I­„sU¤ýńõ±¤ÿׄ}¡¤ñ„ug¤[„y¤ñù„qC¤ó¯„󯢉‰ Õ%‰‰‹å„õ?¤gDŽsU¤ó¯„ñù¤ó¯„qC¤sU„qC¤sՄñù¤sU„á5¤ug Ÿ‰í¤‹„Ÿá¤÷„™¤­„=¤+„=¤ ӄ™¤ ӄ+¤™»„ A¢‰‰‹=‰‰ ل=¤w„ Q¤=„‡)¤' „‹e¤™„½¤Ï„ Á¤½„+¤—[„‰S¤§÷ã‰í¤éۄw-¤i%„sU¤ýńõ±¤³„ýŤñù„u¤Ùs„yW¤qC„ó/¤s „qC¢‰‰ Õ%‰‰½„õ?¤gDŽsU¤ó¯„ñù¤ó¯„qC¤sU„qC¤sՄñù¤s‹„a‘¤ug“‰í¤÷„™¤­„=¤+„=¤ ӄ™¤w„÷¤=„‰S¤«„ A¢‰‰‹=û‰‰ ‘„ Ó¤w„÷¤=„‡)¤'é„ Á¤=„½¤c„+¤ƒQ„÷¤ñ„‹¤9„‰S¤«÷ã‰í¤³„÷A¤c#„u¤ë„s‹¤}/„sU¤ñù„{y¤qC„õ±¤Ça„yé¤s „s‹¤sՄqC¢‰‰ Õ%‰‰ õ„õ?¤³„uç¤ó¯„s‹¤s „ñù¤s „ñù¤sU„ñg¤õ±„ñù¤s‹ Ÿ‰í¤÷„™¤ Á„¤+„™¤w„™¤w„÷¤ ӄ‹›¤Ï„ A¢‰‰‹=û‰‰é„=¤­„÷¤w„‡©¤9„ Á¤=„…¤™„+¤ƒQ„÷¤ñ„‹e¤]„ ?¤Ï÷ã‰í¤ñù„u¤ÿׄw-¤ÿ „õ±¤ë„ó¯¤ÿ „sU¤ñù„ýŤqC„õ±¤ÿ „uç¤Ño„û3¤s „sU¤sՄqC¢‰‰ Õ%‰‰̈́õ?¤³„uç¤qC„sU¤s „ñ¤sՄñù¤sU„ñg¤õ±„ñù¤s‹ Ÿ‰í¤÷„™¤ Á„¤+„™¤­„c¤w„+¤=„‹›¤Ï„ A¢‰‰‹=û‰‰¡„=¤­„+¤w„M¤/„‹›¤u„ Á¤=„½¤™„+¤u„ Ó¤ñ„ Á¤u„‰S¤«„‹e¤™wM‰í¤ñù„õ±¤1„ug¤ÿׄõ±¤å „w-¤ÿׄó¯¤ÿ „sU¤qC„{y¤qC„õ±¤ÿׄu¤_¹„{Ÿ¤sU„sU¤õ±„qC¢‰‰ Õ%‰‰…„sÕ¤ñù„õ±¤ó¯„s ¤sU„ñ¤sՄñù¤sU„ñg¤õ±„ñ¤sU Ÿ‰í¤+„c¤ Á„¤ Á„™¤w„™¤÷„w¤ ӄ Á¤™„­¤µÃ„ ¿¤‡)„‹¤ƒQ„=¤ Á„ Á¤+„…㤣„‹e¤O„+¤ ӄ…¤ ӄ÷¤«„™¤O„ ?¤ó„ Á¤O„‹e¤Ï„ Á¤=÷ã‰í¤qC„õ±¤³„u¤1„õ±¤å„÷A¤1„ñù¤ÿׄs‹¤ó¯„{y¤ó¯„sU¤1„u¤]ó„{Ÿ¤sU„õ±¤õ±„qC¤}/„ug¤yW„÷äK¿„sÕ¤ñù„õ±¤ó¯„s ¤s‹„ñù¤s „ñù¤õ±„ñg¤õ±„ñ¤sU y‰í¤ Á„c¤ Á„¤ Á„™¤w„™¤÷„w¤w„+¤™„­¤+„‰S¤¿ù„‹e¤τ‹å¤÷„™¤‹„­¤ Á„…¤¥‘„‹e¤c„+¤ ӄ…㤍w„÷¤«„™¤儋›¤ó„ Á¤O„ Á¤c„­¤ Q÷ã‰í¤s‹„sU¤ñ„õ±¤1„sU¤å„uç¤ñ„ñ¤1„s‹¤s „{Ÿ¤s‹„s ¤ñù„õ±¤[„{y¤u„sÕ¤ug„qC¤ÿ „õ±¤{„sU¤Ac„w-¤sU„sÕ¤ñù„sU¤s „ó¯¤sU„ñ¤sU„ñù¤sՄqC¤õ±„ñ¤õ± Ÿ‰í¤ Á„c¤ Á„=¤­„™¤+„复+„ Ó¤w„+¤™„­¤+„‰S¤¿„+¤…c„ Á¤u„=¤‹„­¤‹e„…¤¥{„ Á¤™„w¤÷„…㤍w„÷¤O„c¤儋›¤ó„+¤O„ Á¤儍+¤÷wωí¤s „s‹¤ñg„sU¤1„sU¤åi„u¤ñ„ñ¤1„s‹¤s „{Ÿ¤s‹„s ¤qC„õ±¤Ùs„ýŤu„{y¤ÿׄsU¤{y„s ¤Ac„÷äõ±„sÕ¤ñù„sU¤s „ó¯¤sU„ñ¤õ±„ñ¤sՄñù¤u„³¤u Ÿ‰í¤‹e„Ï¤‹e„™¤­„c¤ Á„复+„ Ó¤w„+¤™„­¤ Á„ ¿¤¿„w¤…„+¤«„…¤‹å„½¤' „ Á¤=„w¤÷„…㤍w„÷¤O„c¤儋e¤„+¤O„+¤„÷¤w÷ã‰í¤sՄó/¤qÄs‹¤ñ„s‹¤åi„u¤ñ„³¤ñ„ó/¤õ?„û3¤s‹„s ¤qC„õ±¤Ù©„ýŤug„{y¤ÿׄsU¤ýńó¯¤Ac„÷äsՄõ±¤ñù„s ¤sU„ó¯¤sU„1¤u„³¤õ?„ñù¤u„³¤u Ÿ‰í¤‹e„Ï¤‹e„™¤ A„Ï¤‹e„O¤ Á„=¤+„w¤™„ Á¤ Á„ ?¤¿ù„ Ó¤½„÷¤«„½¤ ¿„½¤9Ÿ„ Á¤ ӄ Ó¤+„M¤ A„ Q¤儁Ϥ儋e¤„÷¤å„ Ó¤=„÷¤­÷ã‰í¤sՄs‹¤qC„ó¯¤ñ„s‹¤åi„u¤ñ„³¤ñ„ó/¤õ?„û3¤sU„ó¯¤ó¯„õ±¤Çã„ýŤ÷ÄýŤÿׄs‹¤ýńó¯¤A™„÷A¤õ±„õ±¤ñù„s ¤sU„qC¤õ±„1¤u„³¤õ?„ñù¤u„³¤u Ÿ‰í¤‹e„Ï¤‹e„™¤ A„Ï¤‹e„O¤ Á„=¤+„ Ó¤=„ Á¤ Á„ ?¤?C„ Ó¤½„÷¤O„Ÿo¤;¡„ Á¤w„ Ó¤+„‡)¤‹e„ Q¤„)¤„ Á¤„÷¤O„ Ó¤ ӄ Ó¤ Á÷ã‰í¤õ±„ó¯¤ó¯„ó¯¤1„s‹¤åi„õ±¤ñg„ÿW¤ñg„ó/¤u„yW¤sU„ó¯¤s „õ±¤Åфa¤1„s‹¤ýńó¯¤Á=„÷A¤õ±„õ±¤qC„ó¯¤sU„qC¤õ±„1¤u„³¤õ?„ñù¤u„³¤u y‰í¤ ¿„÷¤ ¿„c¤ A„c¤‹e„«¤ Á„=¤ Á„=¤=„‹e¤+„ ?¤?C„=¤ƒQ„÷¤O„Ÿ…¤½ „ Á¤w„ Ó¤+„‡)¤‹„=¤=„u¤„+¤›Í„ Ó¤c„=¤ ӄ Ó¤‹e÷ã‰í¤uç„qC¤s „ñù¤ñ„ó¯¤eµ„sU¤ñg„ÿ ¤qC„qC¤ug„yW¤õ±„qC¤sU„õ±¤Ãτc£¤³„ó¯¤}/„ó/¤Ï÷„÷A¤õ±„u¤qC„qC¤sU„ó¯¤õ±„ÿפu„ñ¤õ? Ÿ‰í¤ A„c¤‹e„«¤ Á„ Ó¤+„=¤=„‹e¤ Á„ ?¤1‹„ Q¤ƒQ„ Ó¤Ï„ß¤=³„ Á¤+„=¤ Á„‡)¤‹„=¤=„u¤„+¤›Í„ Ó¤c„™¤w„=¤‹›÷ã‰í¤uç„ó¯¤ó¯„qC¤ñ„qC¤eµ„sU¤ñg„}¡¤ó¯„qC¤÷Äy¤õ±„qC¤õ±„sU¤Aã„古ñ„ó¯¤}¡„qC¤Ï÷„÷A¤õ±„u¤qC„qC¤õ±„ñù¤õ±„ÿפuç„1¤u‰3‰í¤ Á„ß¤‹e„=¤ Á„™¤=„‹›¤ Á„‹›¤1U„ Ó¤ƒÑ„ Ó¤c„›Í¤±/„ Á¤ Á„=¤ Á„Ÿ…¤ ӄ;¤=„w¤„ Ó¤c„¤÷„=¤‰S÷ã‰í¤w-„qC¤s‹„ñg¤ñ„ó¯¤åi„s ¤qC„ýE¤ó¯„aý¤õ±„qC¤õ±„õ±¤OQ„eµ¤ñ„ó¯¤}¡„ó¯¤Ï+„uç¤õ±„uç¤qC„ñù¤õ±„qC¤u„c£¤õ± Ÿ‰í¤ Á„s¤ Á„=¤ Á„™¤=„‹›¤ Á„ ¿¤³1„=¤ƒÑ„ Ó¤c„‡¤1‹„+¤‹›„™¤ Á„Ÿ…¤ ӄ;¤=„ Ó¤›Í„ Ó¤c„复+÷ã‰í¤sU„ñ¤ñ„ó¯¤eµ„ó¯¤qC„ýE¤ó¯„aý¤õ±„ñù¤uç„sU¤Ï÷„çû¤ñ„ó¯¤}¡„qC¤MO„÷äõ±„uç¤qC„ñù¤õ±„qC¤õ±„å ¤õ±]Ó‰í¤‰S„£i¤‹e„ß¤ Á„=¤ Á„™¤=„‹›¤‹e„‹¤3„ Ó¤ƒÑ„ Ó¤c„™;¤1U„+¤‹›„c¤‹›„Ÿo¤w„…‡¤=„ Ó¤›Í„=¤™„复÷÷ã‰í¤sU„ñ¤ñ„qC¤eµ„ó¯¤qC„{Ÿ¤sU„a¤uç„û3¤ñù„w-¤Å_„ù!¤ñù„qC¤ÿ „qC¤Í„ug¤uç„u¤ó¯„ñù¤õ±„qC¤õ±„c£¤u„×I¤ýÅ Ÿ‰í¤½„)7¤‹e„ß¤ Á„=¤ Á„™¤ ӄ‹e¤‹›„‹¤3g„=¤u„=¤™„_¤;!„‰S¤™„M¤‹›„Ÿo¤+„…㤏=„ Ó¤›Í„=¤å„c¤+wωí¤s „ñ¤ñ„qC¤ýE„w­¤ó/„qäqC„û3¤õ±„a¤uç„û3¤ó¯„ug¤ëk„ó/¤çû„y¤ñù„ó¯¤}¡„ó¯¤K¿„ug¤u„uç¤ó¯„ñù¤õ±„ñù¤u„c£¤u„o¤å Ÿ‰í¤ó„‘q¤‹e„ß¤‹e„™¤ Á„™¤ ӄ‹›¤‹e„‹¤µÃ„ Ó¤ƒÑ„ Ó¤™„‡õ¤‡„ Q¤„‹¤ ӄM¤‹›„Ÿo¤ Á„M¤=„¿¤ Q„‰Õ¤;„=¤å„c¤w÷ã‰í¤sՄ³¤ñg„ñù¤}¡„uç¤s‹„ñg¤ó¯„yé¤õ±„Ó¤s „u¤ëk„³¤y„û3¤ñg„qC¤ÿ „ó¯¤Oӄ÷A¤õ±„u¤÷Äu¤ó¯„ñù¤õ±„ñù¤u„ãG¤÷A„古o ‰í¤„¯·¤‹›„™¤‹e„c¤ ӄ=¤ Á„ ?¤1 „ Ó¤u„=¤„…㤅ã„;¤•I„­¤+„-k¤ Á„‡©¤ ӄ¤÷„‹›¤ƒÑ„™¤„Ï¤­÷ã‰í¤sՄ³¤ñg„ñù¤}¡„uç¤s‹„ñg¤ó¯„yé¤õ±„Ó¤sU„sÕ¤k7„ýE¤{Ÿ„{Ÿ¤ñg„qC¤ÿ „ó¯¤Ïw„÷A¤õ±„qC¤ó¯„ñ¤u„ñù¤uç„Qˤí} ¡‰í¤„£ë¤‹e„c¤ ӄ=¤‹e„‹›¤1 „=¤«„¤=„…‡¤½„M¤•I„w¤ Á„¯·¤ ?„ߤ ӄ¤ Q„‹›¤u„™¤„«¤+÷ã‰í¤sU„ÿפñg„ñù¤ÿ „uç¤ó/„ñg¤ó¯„ù£¤÷A„Qˤõ±„s ¤k7„û3¤ýń{û¤qC„ñg¤ÿׄqC¤Ïw„uç¤u„qC¤ó¯„ñ¤u„]—¤ë ¡‰í¤©„¯·¤‹e„™¤ ӄ=¤‹e„‹e¤1U„ Q¤÷„=¤=„;¤u„_¤•I„ Ó¤ Á„¿ù¤ ӄ复+„‹e¤u„™¤儁O¤+÷ã‰í¤õ±„ÿפñù„ñ¤ÿ „u¤sU„1¤sU„Á½¤u„qC¤ëí„gǤÿׄ}¡¤ó¯„ñù¤ÿׄqC¤Ï+„u¤u„qC¤ó¯„ñù¤u„Um¤Uƒ Ÿ‰í¤«ÿ„«¤‹e„™¤ ӄ=¤‹e„‹e¤1U„=¤«„™¤ ӄƒÑ¤«„™»¤•„=¤‹e„?Ť+„O¤+„‹e¤u„夙„«¤ ÁwM‰í¤sU„ÿפñù„ñ¤ÿׄõ±¤sU„1¤sU„Á½¤ug„ñg¤ëí„礳„ÿ ¤ó¯„ñù¤ÿׄqC¤MO„u¤u„ó¯¤ó¯„ñù¤u„끤Óq‹Å‰í¤/„“¹¤‹e„™¤÷„¿¤‹„ Á¤³1„=¤u„™¤ ӄO¤c„¤•„å¤ ¿„?Ť+„O¤+„ Á¤«„夙„u¤ Á÷ã‰í¤õ±„ÿ ¤ñù„ñ¤ÿׄõ±¤sU„1¤sU„Á½¤÷Äñ¤ëí„åi¤ñ„1¤ó¯„ñù¤ÿ „qC¤MO„õ±¤ug„qäs‹„ñù¤u„mÙ¤Ño‹E‰í¤!#„ˤ‹›„™¤÷„ Ó¤‹›„+¤³1„™¤«„™¤ ӄO¤=„]¤•‚‰‰ µÅ‰‰¯‰¤+„O¤+„ Á¤«„夙„u¤ Á÷ã‰í¤õ±„ÿ ¤ñù„ñ¤ÿׄõ±¤sU„1¤sU‚‰‰‹óÁ‰‰¯í¤ëí„c#¤qC„1¤ó¯„ñù¤ÿׄñù¤MO„sU¤uç„ó¯¤s‹„ñù¤uç„á·¤ß]‹Å‰í¤£ë„]¤ ¿„复+„=¤‹›„+¤3„=¤«„c¤w„c¤ ӄŸ…¤•I‚‰‰ µÅ‰‰/Á¤ Á„u¤ Á„ Á¤«„O¤=„ƒÑ¤‹ewM‰í¤õ±„}¡¤qC„1¤1„sU¤õ±„ÿ ¤õ±„W¤qC„mÙ¤ë„aý¤ó¯„ñù¤s „ñ¤1„ñ¤Åфw­¤qC„s‹¤÷A„qC¤sU„mY¤Û9 Ÿ‰í¤%G„“'¤+„=¤ ?„÷¤=„‰Õ¤;¡„c¤O„c¤w„™¤ ӄŸ…¤„“¹¤=„©¤ Á„u¤ Á„+¤O„O¤=„ƒÑ¤ Á÷ã‰í¤õ±„}¡¤qC„1¤1„sU¤õ±„ÿ ¤õ±„磻oë„÷äw­„{Ÿ¤ë„á5¤sU„ñù¤s „ñ¤1„ñ¤Åф÷A¤qC„{y¤sU„a¤Ù© Ÿ‰í¤'鄟o¤+„…¤=„ ?¤;¡„c¤O„c¤w„™¤+„K¤„…㤉Մ ¿¤‘—„ݤ Á„u¤ Á„+¤O„O¤=„ƒÑ¤ Á÷ã‰í¤u„ýE¤qC„1¤1„sU¤õ±„ÿ ¤õ±„aý¢‰‰‹Sÿ‰‰#µ„á5¤õ±„ñù¤sU„ñ¤ÿׄñ¤yW„w-¤Õ'„u¤ó¯„{Ÿ¤sU„åi¤Ç‡‹Å‰í¤»E„‡¤ Á„M¤ ӄ‹e¤+Y„ ¿¤τÏ¤O„复+„™¤‹›„Ÿ…¢‰‰ !%‰‰¥©„¤‹e„ƒÑ¤ Á„+¤O„O¤=„;¤‹e÷ã‰í¤u„ýE¤qC„1¤1„sU¤õ±„}¡¤u„åi¢‰‰‹Q‰‰%„aý¤uç„ñù¤sU„ñ¤1„³¤û³„÷äÕ'„u¤ó¯„û3¤õ±„çû¤E; Ÿ‰í¤=³„‡)¤ Á„M¤w„ Á¤+Y„‹›¤…㄁ϤO„O¤ Á„“'¢‰‰ #·‰‰%ᄇ¤‹e„ƒÑ¤ Á„+¤O„O¤=„;¤‹e÷ã‰í¤ug„{y¤ó¯„ÿפñ„s ¤u„ýE¤u„ù!¢‰‰‹Ùˉ‰§Å„a¤õ±„1¤1„³¤{y„u¤Õ'„õ±¤s „yé¤uç„{Ÿ¤Á½ Ÿ‰í¤?ń…㤋›„‡©¤w„ Á¤+Y„‹e¤…„Ï¤O„O¤ Á„Ÿo¢‰‰ ¥}‰‰'¹„_¤‹e„;¤‹e„w¤c„«¤ ӄ…¤‹÷ã‰í¤ug„{y¤ó¯„ÿפñ„s ¤u„ýE¤u„û³¢‰‰‹E]‰‰¹„古u„ÿפñ„ÿפ{y„u¤Uƒ„s ¤sU„í“¢‰‰ u‰‰¹ Ÿ‰í¢‰‰ ÏS‰‰¹ñ„懶+„w¤«ÿ„‹e¤…„«¤c„«¤‹e„󢉉 '‰‰¹ñ„Ϥ‹e„;¤‹e„w¤c„«¤ ӄ…¤‹÷ã‰í¤ug„{y¤ó¯„ÿפñ„s ¤u„{y¤ug„{y¢‰‰‹Ã቉9Մç{¤uç„ÿ ¤ñ„ÿפýńu¤Õ'„s ¤s‹„o¢‰‰ ‹Ï‰‰9Õ Ÿ‰í¢‰‰ Ã+‰‰»É„K¤+„ Ó¤+Y„ Á¤„‹¤‰S„÷¤c„¢‰‰ 9{‰‰»É„ƒQ¤‹„…¤‹e„w¤c„«¤ ӄ…㤠¿÷ã‰í¤÷Ä{Ÿ¤ó¯„ÿפñ„s ¤u„{y¤ug„}/¢‰‰‹Á_‰‰;­„í}¤ñ„ÿ‹¤w-„ug¤ñg„õ±¤Õ'„ó¯¤sU„á5¢‰‰ ‹Ï‰‰;­ y‰í¢‰‰ CӉ‰½„Ÿ…¤+„ Ó¤+Y„­¤=„M¤™„‘q¢‰‰ ;£‰‰½„Ï¤‹›„…㤋e„w¤c„«¤ ӄ…㤉S÷ã‰í¤w-„{Ÿ¤ó¯„ÿפñ„s ¤u„{Ÿ¤u焳¢‰‰‹ÏM‰‰½å„o¤ñù„û3¤qC„sÕ¤Õ'„ó¯¤sU„aý¢‰‰ )‰‰½å‹E‰í¢‰‰ E™‰‰=لß¤ Á„=¤­¥„ A¤™„…ã¤c„Ÿo¢‰‰ ½é‰‰=ل変S„M¤‹e„w¤c„«¤ Ó÷ã‰í¤ó¯„ÿפñù„ó¯¤u„c#¢‰‰‹Ëu‰‰¿½„c£¤ñ„{Ÿ¤qC„sÕ¤Óq„ó¯¤õ±„åi¢‰‰ ‡‰‰¿½ Ÿ‰í¢‰‰ ÇO‰‰?‘„¤ Á„ Ó¤-„­¤=„…ã¤c„ß¢‰‰ ¿‰‰?‘„]¤‹e„ Ó¤™„«¤ Ó÷ã‰í¤ó¯„ÿפñù„ó¯¤uç„c#¢‰‰‹Iω‰?õ„åi¤ñù„{Ÿ¤ñù„õ±¤Ó„qC¤u„碉‰ M‰‰?õ Ÿ‰í¢‰‰ Gu‰‰±é„a¤‹e„=¤-k„ Á¤™„…㤙„¢‰‰ ¿û‰‰±é„]¤‹›„ Ó¤™„«¤ Ó÷ã‰í¤s „ÿ ¤ñù„ó¯¤uç„åi¢‰‰‹É™‰‰1̈́ç¤ñù„{Ÿ¤qC„sU¤Ó„qC¤u„gÇ¢‰‰ s‰‰1Í‹E‰í¢‰‰ Û‰‰³¡„_¤‹›„™¤-k„÷¤=„…¤™„™»¢‰‰ 1‰‰³¡„›K¤ ?„=¤=„ƒÑ¤w÷ã‰í¤s „}¡¤qC„qC¤÷A„e5¢‰‰‹·Q‰‰3…„gǤñù„{y¤qC„s‹¤Ó„ñù¤uç„ù!¢‰‰ ƒ»‰‰3…‹Å‰í¢‰‰ Û㉉3ù„‡õ¤‹›„™¤¯·„+¤=„…¤™„‡õ¢‰‰ ³³‰‰3ù„¤ ?„=¤=„ƒÑ¤w÷ã‰í¤s „}¡¤qC„qC¤÷A„gÇ¢‰‰‹µÁ‰‰µÝ„yW¤qC„{y¤qC„s‹¤Q˄ñ¤÷Äû3¢‰‰ …K‰‰µÝ Ÿ‰í¢‰‰ Ý)‰‰5±„M¤ ¿„夯·„÷¤=„…¤=„‡)¢‰‰ 3‰‰5±„™»¤ ?„=¤=„ƒÑ¤w÷ã‰í¤s „}¡¤qC„a¢‰‰‹3‰‰·•„{Ÿ¤qC„{y¤qC„s‹¤Q˄1¤w-„{Ÿ¢‰‰ 󉉍·• Ÿ‰í¢‰‰ Ýõ‰‰7‰„…㤉S„O¤¯·„÷¤=„…¤=„…㢉‰ µÅ‰‰7‰„Ÿo¤=„ƒÑ¤w÷‰í¤sՄ}¡¤qC„aý¢‰‰‹³¿‰‰7í„{Ÿ¤ó¯„{y¤ó/„s ¤Ñ…„c£¢‰‰ ‡]‰‰7í y‰í¢‰‰ ß»‰‰ÉÁ„s¤/ý„w¤=„;¤ ӄ…¢‰‰ ·‹‰‰ÉÁ„Ÿ…¤ ӄƒÑ¤­÷ã‰í¤sՄ}¡¤ó¯„aý¢‰‰‹1S‰‰I¥„{y¤ó¯„ýE¤qC„s ¤Ñ…„å ¢‰‰ ¹‰‰I¥ Ÿ‰í¢‰‰ _a‰‰Ë™„›Í¤/ý„ Ó¤ ӄ…¤÷„ƒÑ¢‰‰ ·U‰‰Ë™„9¤ ӄƒÑ¤­÷ã‰í¤sՄ}¡¤ó¯„ãG¢‰‰‹±­‰‰Ëý„}¡¤s‹„{y¤ó¯„ó¯¤Ñ…„eµ¢‰‰ ¹‰‰Ëýwé‰í¢‰‰ Ñ͉‰Kфa¤¡G„w¤=„½¤÷„u¢‰‰ ɝ‰‰Kфó¤ ӄƒÑ¤­÷‡‰í¤õ±„}¡¤ó¯„åi¢‰‰‹¿ç‰‰Íµ„ÿפs‹„ýŤó¯„ó¯¤ß]„gÇ¢‰‰ ©‰‰Íµ Ÿ‰í¢‰‰ Q‰‰M©„™»¤!#„ Ó¤ ӄ½¤÷„«¢‰‰ Ég‰‰M©„¤ ӄƒÑ¤ Á÷ã‰í¤õ±„}¡¤ó¯„eµ¢‰‰‹¿‰‰Ï„1¤sU„{y¤ó¯„ó¯¤ß]„gÇ¢‰‰ ;‰‰Ï Ÿ‰í¢‰‰ Q‰‰Ïᄙ»¤!#„ Ó¤ ӄ…¤+„O¢‰‰ IɉÏᄛͤ ӄƒÑ¤ Á÷ã‰í¤sU„ÿ ¤ó¯„碉‰‹=±‰‰Ońñ¤sU„ýŤó¯„ó/¤]q„ù!¢‰‰ ‹Ï‰‰OÅw ‰í¢‰‰ Ó߉‰Á¹„‡©¤£ë„ Q¤ ӄ…¤‹e„c¢‰‰ K ‰‰Á¹„¤w„;¤‹e÷ã‰í¤u„ýE¤s „ç{¢‰‰‹½Õ‰‰A„ñ¤u„{y¤ó¯„ó/¤]—„y颉‰ ‰=‰‰Aw‰í¢‰‰ S9‰‰Añ„M¤£ë„=¤÷„…¤‹e„™¢‰‰ KՉ‰Añ„™»¤w„ƒÑ¤ Á÷‰í¤uç„ýE¤s „ù!¢‰‰‹;¯‰‰ÃՄãG¤s‹„ó¯¤[á„{y¢‰‰‰õ‰‰ÃÕ Ÿ‰í¢‰‰ Յ‰‰CɄ…¤¥‘„ Ó¤÷„9¢‰‰ Í?‰‰CɄ_¤w„;¤‹›÷‰í¤÷A„ýE¤s „y¢‰‰‹»C‰‰Å­„å ¤s‹„qC¤[„}/¢‰‰‰óS‰‰Å­ Ÿ‰í¢‰‰ Õo‰‰E„ƒQ¤¥{„=¤÷„s¢‰‰ M›‰‰E„‡õ¤w„;¤ ?û‰‰í¤s „y颉‰‹9‰‰Eå„eµ¤s‹„qC¤Y߄³¢‰‰‰q­‰‰Eåwé‰í¢‰‰ ח‰‰Çل™¤' „™¤+„a¢‰‰ M牉Çل‡s¤­÷ã‰í¤sՄy ¢‰‰‹¹1‰‰G½„ç¤sU„ñù¤Ùs„ñù¢‰‰‰牉G½wW‰í¢‰‰ ×q‰‰Ù‘„ Ó¤¹3„=¤+„a¢‰‰ ÏA‰‰Ù‘„‡©¤ Á÷ã‰í¤õ±„y颉‰‹¹1‰‰Ùõ„ç¤sU„qC¤GM„󯢉‰‰ÿ?‰‰Ùõ‰3‰í¢‰‰ W݉‰Yé„ Á¤9û„=¤w„™»¢‰‰ O­‰‰Yé„M¤ Á÷ã‰í¤õ±„{Ÿ¢‰‰‹' ‰‰Û̈́ù!¤sՄñù¤Åф÷A¢‰‰‰ý¯‰‰ÛÍ Ÿ‰í¢‰‰ é'‰‰[¡„ ?¤;¡„™¤­„_¢‰‰ Ow‰‰[¡„…㤠Á÷ã‰í¤õ±„{û¢‰‰‹§/‰‰Ý…„y ¤sՄñù¢‰‰‰{C‰‰Ý…‰³‰í¢‰‰ ù뉉Ýù„‹¤«ÿ„™¤ A„‡©¢‰‰ ÁS‰‰Ýù„;¤+÷ã‰í¤sU„ýE¢‰‰‹%ʼn‰]݄yé¤õ?„ñù¤Uƒ„ug¢‰‰‰ûg‰‰]݉3‰í¢‰‰ ù‰‰ß±„‹e¤«ÿ„™¤­„…㢉‰ A¿‰‰ß±„ƒÑ¤ Á÷ã‰í¤õ±„}¡¢‰‰‹¥y‰‰_•„{Ÿ¤sՄñù¤Uƒ„u¢‰‰‰û‰‰_•‰3‰í¢‰‰ ù‰‰Ñ‰„ Á¤«„™¤­„…¢‰‰ ɉÑ‰„u¤ Á÷ã‰í¤õ±„ÿ×¢‰‰‹#³‰‰Ñí„ýE¤õ±„ñ¤Um„sU¢‰‰‰ù׉‰Ñí Ÿ‰í¢‰‰ g#‰‰QÁ„+¤«„å¤ Á„;¢‰‰ Ã剉QÁ„«¤ Á÷‰í¤u„ÿ×¢‰‰‹ÿ-‰‰Ó¥„u碉‰‹£W‰‰Ó¥„}¡¤õ±„ñg¤Õ'„õ±¢‰‰‰õ剉Ó¥„sU¤1 Ÿ‰í¤O„+¢‰‰ g#‰‰S™„ Á¤+Y„¤ Á„ƒÑ¢‰‰‹}!‰‰S™„‹›¢‰‰ Ã剉S™„«¤‹e÷‰í¤uç„1¢‰‰‹‰‰‰Sý„õ±¢‰‰‹£W‰‰Sý„}¡¤õ±„ñg¤Õ'„sU¢‰‰‰õ‰‰Sý„qC¤ó¯ Ÿ‰í¤‹›„O¢‰‰ e}‰‰Õф÷¤a„ ¿¤™»„夋e„u¢‰‰‹ýlj‰Õф Á¢‰‰ Å+‰‰ÕфÏ¤‰Õ÷ã‰í¤w­„³¢‰‰‹‰‰‰Uµ„õ±¢‰‰‹£‰‰Uµ„ÿ ¤u„ñ¤gDŽ÷äç„s‹¢‰‰‰óS‰‰Uµ„1¤uçU‰í¢‰‰ e‰‰×©„w¤a„ Á¤‡s„å¤ Á„O¢‰‰‹}!‰‰×©„ Á¢‰‰ Å+‰‰×©÷ã‰í¢‰‰‹S‰‰W„õ±¢‰‰‹!!‰‰W„1¤õ±„ñ¤y „õ±¤ç„s ¢‰‰‰ñA‰‰Wwé‰í¢‰‰ e‰‰Wᄍw¤›Í„ Á¤‡©„O¤‹e„O¢‰‰‹}!‰‰WᄠÁ¢‰‰ Å÷‰‰Wá÷ã‰í¢‰‰‹ñ?‰‰éńõ?¢‰‰‹¡Ç‰‰éńñ¤u„1¤û3„sU¤eµ„󯢉‰‰ÿ?‰‰éÅ Ÿ‰í¢‰‰ 巉‰i¹„ Ó¤›Í„+¤M„O¤‹e„墉‰‹ÿ‰‰i¹„ A¢‰‰ Å÷‰‰i¹wM‰í¢‰‰‹ñ?‰‰ë„õ?¢‰‰‹/û‰‰ë„ñ¤÷A„ÿפ{Ÿ„s‹¤åi„󯢉‰‰} ‰‰ë‰3‰í¢‰‰ c‰‰ëñ„=¤„÷¤τá¤å„ ¿¢‰‰‹ÿ鉉ëñ„ Á¢‰‰ ǽ‰‰ëñ÷ã‰í¢‰‰‹q›‰‰kՄõ±¢‰‰‹9‰‰kՄ÷äñ„瑤û³„s‹¤åi„qC¢‰‰‰ý¯‰‰kÕ‰3‰í¢‰‰ c‰‰íɄ=¤„ Ó¤…ã„¤å„ ¿¢‰‰‹ÿ鉉íɄ Á¢‰‰ ǽ‰‰íÉ÷ã‰í¢‰‰‹q›‰‰m­„õ±¢‰‰‹¹1‰‰m­„÷äñ„ç{¤{Ÿ„ó¯¤åi„qC¢‰‰‰{C‰‰m­wW‰í¢‰‰ ã%‰‰ï„=¤ó„ Ó¤…ã„™»¤c„‹›¢‰‰‹3‰‰ï„ Á¢‰‰ G™‰‰ï÷ã‰í¢‰‰‹qe‰‰ïå„õ±¢‰‰‹'׉‰ïå„uç¤ñù„gǤ{y„ó¯¤c#„ñg¢‰‰‰y1‰‰ïå Ÿ‰í¢‰‰ aY‰‰oل¤]„ Ó¤…„™»¤™„‹›¢‰‰‹3‰‰oل Á¢‰‰ G™‰‰oÙwM‰í¢‰‰‹óÁ‰‰á½„õ±¢‰‰‹'׉‰á½„u¤ñù„ù!¤{y„qC¤c£„ñù¢‰‰‰ù׉‰á½ Ÿ‰í¢‰‰ áÿ‰‰a‘„™¤ß„=¤…„_¤™„‹e¢‰‰‹ñŸ‰‰a‘„ Á¢‰‰ Gc‰‰a‘÷ã‰í¢‰‰‹óÁ‰‰aõ„õ±¢‰‰‹' ‰‰aõ„uç¤ñù„ù!¤ýńqC¤ãG„ñ¢‰‰‰ù‹‰‰aõ‰3‰í¢‰‰ ቉ãé„™¤Ÿ…„=¤½„‡s¤„‹e¢‰‰‹ñŸ‰‰ãé„ Á¢‰‰ Ùω‰ãé÷ã‰í¢‰‰‹óÁ‰‰c̈́õ±¢‰‰‹' ‰‰c̈́u¤ñg„y ¤ýńqC¤aý„ñù¢‰‰‰g/‰‰cÍ Ÿ‰í¢‰‰ ቉å¡„=¤Ÿo„=¤½„‡s¤=„‹e¢‰‰‹ñy‰‰å¡„ A¢‰‰ Y)‰‰å¡÷ã‰í¢‰‰‹s­‰‰e…„õ?¢‰‰‹§/‰‰e…„õ±¤qC„yé¤}/„ñù¤y„u¤s‹„qC¢‰‰‰çʼn‰e… Ÿ‰í¢‰‰ ቉eù„=¤÷„‹e¤‡õ„™¤ƒQ„‡©¤=„ Á¢‰‰‹ñy‰‰eù„ A¢‰‰ Y)‰‰eùwM‰í¢‰‰‹s­‰‰ç݄õ?¢‰‰‹§/‰‰ç݄sU¤ó¯„yé¤}/„ñù¤y„s‹¤u„qC¢‰‰‰ey‰‰çÝ Ÿ‰í¢‰‰ ቉g±„=¤‹e„÷¤‡õ„™¤ƒQ„‡©¤ ӄ+¢‰‰‹ñy‰‰g±„ A¢‰‰ Yõ‰‰g±÷ã‰í¢‰‰‹sw‰‰ù•„sÕ¢‰‰‹%ʼn‰ù•„sU¤s „yé¤}/„ñù¤ÿׄ÷äs „s‹¤uç„󯢉‰‰eŸ‰‰ù• Ÿ‰í¢‰‰ =lj‰y‰„‹›¤9y„…¤w„‹¤O„™¤ƒQ„M¤w„÷¢‰‰‹qE‰‰y‰„ Á¢‰‰ Ûщ‰y‰÷ã‰í¢‰‰‹sw‰‰yí„õ±¢‰‰‹%ʼn‰yí„s‹¤s „û3¤}/„ñù¤1„ug¤s „{y¤Ç„u碉‰‰eŸ‰‰y퉳‰í¢‰‰ ;µ‰‰ûÁ„ Á¤;!„;¤+„‹e¤O„å¤u„M¤w„÷¢‰‰‹qE‰‰ûÁ„ Á¢‰‰ Ûщ‰ûÁ÷ã‰í¢‰‰‹sw‰‰{¥„õ±¢‰‰‹%ʼn‰{¥„s‹¤s „û3¤ÿ „ñ¤1„u¤sU„ýE¤Å_„õ±¢‰‰‰å³‰‰{¥‰3‰í¢‰‰ »‰‰ý™„+¤½W„ƒÑ¤ Á„ Á¤O„å¤u„…㤍÷„w¢‰‰‹qE‰‰ý™„+¤½„‰S¢‰‰ [;‰‰ý™÷ã‰í¢‰‰ ͉‰ýý„uç¤}¡„sU¢‰‰‹¥y‰‰ýý„ó¯¤sU„û³¤ÿW„1¤ñù„s ¤õ±„ÿפAc„s ¢‰‰‰cW‰‰ýý Ÿ‰í¢‰‰ ¹G‰‰}фw¤¿„«¤ Á„w¤™„O¤)„Ϥ+„ Ó¢‰‰‹qE‰‰}ф+¤ƒÑ„‹›¢‰‰ [;‰‰}ÑwM‰í¢‰‰ ͉‰ÿµ„u¤ÿ „sU¢‰‰‹¥y‰‰ÿµ„ó¯¤sU„û³¤ÿW„1¤ñù„s ¤õ±„1¤Oӄs ¤Cu„uç¤{Ÿ Ÿ‰í¤…ã„‹›¤½ „w¤±¯„O¤ Á„w¤™„O¤)„Ϥ+„ Ó¢‰‰‹qE‰‰©„+¤u„‹e¢‰‰ ݇‰‰©÷ã‰í¢‰‰ a‰‰ñ„u¤1„sU¢‰‰‹¥Ÿ‰‰ñ„ó¯¤õ±„û³¤ÿW„1¤ñù„ó¯¤u„ñ¤Ï+„ó¯¤Åфõ±¤}¡‰3‰í¤u„+¤»E„ Ó¤3g„™¤‹e„ Ó¤™„O¤)„…㤠A„=¢‰‰‹ó¡‰‰ñᄍ+¤å„ Á¢‰‰ Ýa‰‰ñá÷ã‰í¢‰‰ a‰‰qńõ±¤ñ„sU¢‰‰‹¥Ÿ‰‰qńqC¤õ?„{Ÿ¤ÿW„1¤ñù„ó¯¤u„ñù¤Í„ó¯¤E;„sU¤ÿ  Ÿ‰í¤Ï„w¤9û„ Q¤· „=¤‹›„=¤™„O¤)„…㤠A„=¢‰‰‹ó¡‰‰ó¹„+¤™„ A¢‰‰ Ýa‰‰ó¹wÏ‰í¢‰‰ »‰‰s„õ±¤ó¯„s‹¢‰‰‹#³‰‰s„qC¤u„{Ÿ¤ÿW„1¤qC„ñ¤÷Äó¯¤ÉA„qC¤Çã„ó¯¤ñ Ÿ‰í¤å„ Ó¤9Ÿ„=¤7?„ Ó¤ ¿„夏=„O¤)„…㤋e„=¢‰‰‹ó¡‰‰sñ„÷¤ ӄ Á¢‰‰ ]M‰‰sñ÷ã‰í¢‰‰ »‰‰õՄsU¤s „s‹¢‰‰‹#³‰‰õՄqC¤u„{Ÿ¤ÿW„ÿפó¯„yW¢‰‰ %¡‰‰õՄqC¤G̈́qC¤qC Ÿ‰í¤=„=¤¹µ„=¢‰‰ ù‰‰uɄ‡)¤ ӄ«¤)„…㤋e„=¢‰‰‹ó¡‰‰uɄ÷¤w„+¢‰‰ ]M‰‰uÉ÷ã‰í¢‰‰ _‰‰÷­„sU¤sՄs‹¢‰‰‹#³‰‰÷­„ñù¤u„{Ÿ¤³„ÿפó¯„yW¢‰‰ #y‰‰÷­„ó¯¤GM„qC¤qC Ÿ‰í¤ ӄ™¤' „=¢‰‰ ù뉉w„M¤ ӄ«¤Ï„…㤋e„=¢‰‰‹s ‰‰w„+¤­„ Á¢‰‰ ß©‰‰w÷ã‰í¢‰‰ õ‰‰wå„õ±¤sՄsU¢‰‰‹£W‰‰wå„qC¤u„{Ÿ¤³„ÿפó¯„û3¢‰‰ #Ÿ‰‰wå„qC¤Ùs„ñù¤ó¯ Ÿ‰í¤­„c¤ ӄ ¿¤/“„™¢‰‰ y5‰‰ ‰Ù„…㤍÷„÷¤Ï„M¤ ?„™¢‰‰‹s ‰‰ ‰Ù„w¤‹e„+¢‰‰ ß©‰‰ ‰Ù÷ã‰í¢‰‰ õ‰‰ ½„sU¤u„s ¢‰‰‹£W‰‰ ½„ñù¤÷A„û3¤³„ÿ‹¤s‹„{Ÿ¢‰‰ £3‰‰ ½„ñù¤Ñï„÷äó¯„ñ¤sÕ‰3‰í¤+„O¤+„‹e¤¯·„=¢‰‰ û‘‰‰ ‹‘„…¤÷„÷¤Ï„Ÿ…¢‰‰‹s ‰‰ ‹‘„w¤‹„­¢‰‰ ßs‰‰ ‹‘÷ã‰í¢‰‰ )‰‰ ‹õ„sÕ¤ug„s ¢‰‰‹£W‰‰ ‹õ„aý¤³„}/¤sU„ýÅ¢‰‰ ¡!‰‰ ‹õ„ñù¤Óñ„sU¤õ±„1¤õ± Ÿ‰í¤ Á„O¤ Á„+¤-„™¢‰‰ û{‰‰ 鄽¤+„ƒQ¤Ï„Ÿ…¢‰‰‹s ‰‰ 鄍w¤‹„­¢‰‰ ßs‰‰ é÷ã‰í¢‰‰ M‰‰ Í„sU¤÷Äs ¢‰‰‹§/‰‰ Í„w-¤õ±„aý¤ñ„}/¤sU„}/¢‰‰ /lj‰ Í„ñù¤Óñ„sU¤u„ÿפu‰3‰í¤‹e„u¤ ¿„w¤-„¢‰‰ ý£‰‰ ¡„«¤+„½¤c„Ÿ…¤‹e„ ¿¤›Í„‰S¤±/„ƒÑ¢‰‰ _߉‰ ¡÷ã‰í¢‰‰ M‰‰ …„}¡¤OQ„w-¤eµ„÷äu„aý¤ñ„ýŤsU„ÿ×¢‰‰ ¯{‰‰ …„ñg¤Óq„s ¤÷Äÿ ¤u Ÿ‰í¤‹„ƒQ¤‰S„w¤­¥„™¢‰‰ ý£‰‰ ù„«¤+„½¤c„Ÿ…¤‹›„‹¤a„ ¿¤±/„u¤“¹„‰S¤?C÷ã‰í¤Á=„w-¤mلÿ ¤OQ„÷äç„ug¤uç„aý¤ñ„ýŤsU„ÿ×¢‰‰ -µ‰‰ ݄ñù¤S݄s ¤w-„}/¤ug Ÿ‰í¤ ¿„™»¤­%„™¢‰‰ } ‰‰ ±„O¤ Á„…¤c„Ÿ…¤‹›„‹¤‡„‹¤±/„u¤“¹„ ¿¤?C÷ã‰í¤Á=„uç¤mY„ÿפOQ„ug¤çû„u¤÷Äa¤ñù„{¤u„ñ¢‰‰ ­‰‰ •„ñù¤S[ Ÿ‰í¤­%„™¢‰‰ }鉉 ƒ‰„夋儅c¤™„Ÿo¤ ¿„‹e¤‡„‹¤±/„«¤“'„‹›¤?C÷ã‰í¤Á=„uç¤mY„ÿפÓ„÷äçû„u¤gDŽíÿ¤ñù„û³¤ug„ñ¢‰‰ +#‰‰ ƒí„ñ¤Õ' Ÿ‰í¤+Y„墉‰ }鉉 Á„夋„Ϥ™„ƒ¤™»„‹e¤‡„ ¿¤-k„«¤“'„‹›¤?Cwωí¤qC„uç¤ýńug¤Um„uç¤mY„ÿפÓ„ug¤ù!„õ?¤gDŽíÿ¤ñù„yW¤÷Äñg¢‰‰ «G‰‰ …¥„ñ¤Õ'‰³‰í¤+ۄc¢‰‰ ‰‰ ™„Ÿ…¤=„m¤_„ Á¤_„ Á¤-„O¤ƒ„‹e¤©í„ A¤ƒÑ„‹›¤=÷ã‰í¤qC„uç¤}¡„õ?¤W•„u¤íÿ„1¤Óñ„õ±¤ù!„õ±¤ù!„í¤qC„aý¢‰‰ )ý‰‰ ý„ñ¤Õ§‰3‰í¤+Y„O¢‰‰ ‰‰ ‡Ñ„Ÿ…¤=„m¤_„­¤‡s„ Á¤-„O¤ƒ„‹e¤©í„­¤ƒÑ„‹›¤ Ó÷ã‰í¤ó¯„uç¤}¡„sÕ¤W•„u¤íÿ„1¤Óñ„õ±¤y „sÕ¤ù!„í¤qC„aý¢‰‰ )ý‰‰ µ„1¤Õ' Ÿ‰í¤«ÿ„O¢‰‰ û‰‰ ™©„9¤=„m¤‡õ„w¤‡s„ Á¤­%„«¤“'„ Á¤)Ʉ+¤u„‹›¤=÷ã‰í¤ó¯„u¤ÿׄs‹¤×Ʉõ±¤íÿ„1¤Õ'„s ¤yé„s ¤y„k7¤ó/„c£¢‰‰ ©·‰‰ „1¤Uƒ Ÿ‰í¤«ÿ„O¢‰‰ ñE‰‰ ᄝߤ Q„•I¤‡õ„w¤‡©„w¤+Y„O¤ƒ„ Á¤)Ʉ÷¤«„‹e¤ Ó÷ã‰í¤s „u¤ÿW„ó¯¤×Ʉõ±¤íÿ„1¤Õ'„s ¤yé„ó¯¤yW„k7¤qC„叢‰‰ 뉉 ›Å„ÿW¤Um‰3‰í¤«“„«¢‰‰ ñE‰‰ ¹„ó¤=„•I¤ ӄ ?¤™„ Ó¤M„=¤«ÿ„c¤ï„ Á¤«„w¤)„‹e¤w÷ã‰í¤s „u¤ÿW„s ¤Um„õ±¤í“„ñ¤Uƒ„qC¤û3„ó¯¤ñù„÷A¤ó¯„k7¤qC„叢‰‰ ‰‰ „ÿפUï Ÿ‰í¤)7„u¢‰‰ q¡‰‰ ñ„¤ ӄ•¤w„‹›¤™„ Ó¤M„=¤«ÿ„c¤ï„ Á¤«„ Ó¤Ï„ A¤÷÷ã‰í¤s‹„õ?¤³„ó¯¤Um„õ±¤í“„ñ¤Uƒ„qC¤û3„ó¯¤ñù„uç¤s „ëí¤ó¯„åi¢‰‰ ‰‰ Մÿ ¤×I‰³‰í¤)Ʉu¢‰‰ q¡‰‰ ŸÉ„¤ ӄñ¤+„‹›¤=„=¤M„=¤«ÿ„c¤ï„ Á¤«ÿ„w¤Ï„ A¤÷÷ã‰í¤sU„õ±¤1„ó¯¤Uƒ„sU¤í“„ñù¤Um„ñù¤û3„qC¤qC„u¤õ±„i%¤s „eµ¢‰‰ Y‰‰ ­„ÿ ¤×É Ÿ‰í¤)Ʉu¢‰‰ ó ‰‰ ‘„›Í¤w„—[¤ Á„‹e¤=„=¤M„™¤«„™¤+¤«ÿ„ Ó¤O„ Á¤+wM‰í¤s‹„õ±¤1„s ¤Õ'„sU¤í“„ñù¤å„÷A¤{Ÿ„ñ¤{Ÿ„qC¤qC„u¤õ±„ù!¤uç„{Ÿ¤s „eµ¢‰‰ •ÿ‰‰ ‘å„ÿ‹¤×I Ÿ‰í¤)7„÷¢‰‰ ó ‰‰ ل›Í¤w„…㤋›„_¤ Á„‹e¤=„=¤…ã„c¤…ã„ ?¤ó„™¤+¤+Y„w¤O„ Á¤÷÷ã‰í¤sU„sU¤ñ„ó¯¤Õ'„sU¤í“„ñù¤åi„u¤{y„ñ¤{Ÿ„ñg¤ó/„u¤õ±„y¤sՄýE¤s „eµ¢‰‰ •ÿ‰‰ “½„}/¤W•‰3‰í¤©í„ƒÑ¢‰‰ óW‰‰ ‘„ᤍ­„;¤=„Ϥ‹å„ A¤ Q„¤…㄁Ϥ½„‹e¤›Í„™¤•I„+¤­%„ Q¤儍+¤ Á÷ã‰í¤õ±„sU¤ñ„ó/¤S[„sU¤k7„ñù¤eµ„u¤ýń³¤{Ÿ„ñg¤ó/„õ?¤u„û³¤qC„ýE¤sՄ瑢‰‰ •“‰‰ õ„}¡¤W• Ÿ‰í¤©í„u¢‰‰ óW‰‰ •é„ᤍ­„ƒÑ¤c„…c¤‹å„ A¤ Q„å¤ Ó„ ¿¤w„Ï¤½„ Á¤a„™¤•I„ Á¤-„ Q¤儍+¤ Á÷ã‰í¤õ±„sU¤ñ„ó/¤Óñ„õ±¤k7„ñù¤ç„õ±¤ýń³¤s „÷äó¯„ñ¤ó/„õ?¤u„{¤ñ„}¡¤sՄ瑢‰‰ 7‰‰ ̈́ÿ ¤W•‰³‰í¤«„‹›¤9„u¢‰‰ óW‰‰ —¡„ᤠÁ„ƒQ¤u„…¤‹„ A¤ Q„å¤ Ó„ ¿¤+„«¤½„+¤‡„™¤+¤-„=¤™„÷¤‹ewωí¤õ?„s‹¤qC„qC¤Ó„sU¤çû„w-¤ÿ „ñù¤çû„sU¤}¡„ÿ‹¤sU„uç¤s‹„1¤s‹„õ?¤ug„ýŤ}¡„ýŤu„ç{¢‰‰ “퉉 …„}¡¤ãG„u¤ñ Ÿ‰í¤儋e¤9„ƒÑ¢‰‰ s³‰‰ ù„¤‹e„½¤ƒÑ„½¤‹„ A¤÷„O¤÷„‹›¤+„÷¤ƒÑ„+¤‡„™¤u„‰S¤‡„+¤-k„=¤=„÷¤ A÷ã‰í¤u„ó¯¤qC„qC¤Ó„sU¤çû„÷äÿ „qC¤gE„s‹¤}¡„ÿ‹¤sU„uç¤s‹„1¤sU„s ¤÷Ä}/¤{û„i%¤ñ„÷≉ “‰‰ ©Ý„}/¤qC„÷ä{Ÿ„sÕ¤ñù Ÿ‰í¤ ӄw¤…‡„‹e¤ ӄ½¢‰‰ y5‰‰ )±„‹e¤™„¤τ™»¤+„O¤÷„‹›¤+„÷¤ƒÑ„÷¤™;„=¤«„‹¤‡„+¤¯·„=¤=„ Ó¤‹›÷ã‰í¤uç„ó¯¤qC„qC¤Q˄sU¤çû„ug¤ÿׄqC¤gE„s‹¤}¡„ÿ‹¤sU„uç¤s‹„1¤sU„gǤû³„ë¤ñù„u¢‰‰ “‰‰ «•„ýŤó¯„u¤{û„s ¤ó¯‰3‰í¤ ӄ÷¤½„+¤w„;¤©ƒ„‰S¤¿ù„‹e¤=„¤‡)„_¤+„O¤+„‹e¤+„÷¤ƒÑ„ Ó¤‡s„¤Ï„‹e¤™»„w¤¯·„™¤ ӄ Ó¤‹›÷ã‰í¤uç„ó¯¤ó¯„ñù¤Q˄s ¤gDŽu¤³„ñg¤y „ó¯¤}¡„ÿ‹¤sU„u¤sU„1¤sU„ù!¤yW„ë¤qC„u¤A™„w-¤Wÿ„ýE¤s „sU¤ýńs‹¤ó¯ Ÿ‰í¤+„=¤u„w¤w„;¤©m„ ¿¤¿„‹e¤=„•¤™»„‡)¤ Á„«¤+„ Á¤ Á„ƒQ¤u„ Ó¤‡s„=¤O„ Á¤™»„w¤¯·„™¤ ӄ=¤‰SӉí¤s‹„ñg¤ãل÷A¤gDŽs ¤gDŽõ±¤1„qC¤yé„qC¤ÿ „ýŤu„õ±¤õ±„ÿ ¤õ±„û3¤çû„ëí¤ó¯„õ±¤Ac„uç¤W„ýE¤s „qC¤ÿׄqC¤õ± Ÿ‰í¤ Á„=¤«„=¤w„;¤©„‹›¤¿„ Á¤ ӄ•¤‡„M¤ Á„u¤ Á„ Á¤‹e„½¤u„=¤‡©„=¤O„ Á¤™»„w¤™»„ ?¤¹„¤÷÷ã‰í¤s‹„ñg¤c#„uç¤gDŽs ¤gDŽsU¤ñ„qC¤yé„qC¤ÿ „ýŤu„õ±¤õ±„ÿ ¤u„û3¤ç„k7¤ó¯„sU¤Ac„uç¤W•„ýŤsU„ñù¤ÿׄqC¤u Ÿ‰í¤‹e„=¤«„™¤+„½¤©í„‹›¤¿„+¤ ӄ•I¤a„M¤‹e„u¤ Á„ Á¤‹e„½¤u„=¤‡©„=¤儍+¤™»„w¤™»„‹›¤]„¤÷wM‰í¤ó/„ñg¤c#„uç¤gDŽs ¤ñ„w-¤ñ„s‹¤ñg„ñù¤û3„ñù¤ÿׄýŤu„sU¤u„ÿ ¤u„{Ÿ¤ç„ëí¤ó¯„sU¤Ac„uç¤W•„{y¤sU„ñù¤ÿׄqC¤u Ÿ‰í¤‹„¤c„O¤ Á„…¤©í„‹e¤¿ù„­¤÷„ñ¤ᄅ¤‹e„u¤‹e„+¤‹e„…¤O„™¤M„™¤„÷¤™„‹›¤=„w¤„‹e¤ó„复÷÷ã‰í¤s‹„ñ¤å„u¤ç{„s ¤qC„uç¤ñù„s‹¤ñg„ñù¤û3„ñù¤1„{y¤u„sU¤u„ÿ ¤u„{y¤ç‘„끤s‹„sÕ¤A™„u¤W•„{y¤õ±„1¤ñ„ñg¤ugՉí¤儁O¤+„…¤©í„‹e¤¿ù„­¤÷„—[¤á„½¤‹e„u¤‹e„+¤‹e„…¤O„c¤…ã„™¤=„w¤™„‹e¤=„w¤„‹e¤ó„复÷÷ã‰í¤sU„ñ¤å „õ±¤ç{„s ¤qC„u¤ñù„s ¤ñù„qC¤{Ÿ„ñ¤1„{y¤uç„s‹¤uç„ýE¤÷A„ýE¤ç„éÛ¤s‹„sÕ¤ñù„w-¤Ùs„u¤×I„{y¤õ±„ÿפñ Ÿ‰í¤å„«¤ Á„…¤)7„‹e¤' „‰S¤™„­¤÷„§¤a„;¤ ?„;¤‹›„÷¤‹›„…¤O„c¤…ㄏ=¤™„w¤™„‹e¤=„w¤„ Á¤s„c¤+÷ã‰í¤sU„ñ¤å „õ±¤ç{„s ¤qC„õ±¤qC„ó¯¤qC„ñù¤{y„ñ¤1„{Ÿ¤÷A„s‹¤uç„ýE¤÷A„}¡¤ç„W¤sU„sÕ¤ñù„ug¤Y߄õ±¤W•„{y¤õ±„ÿ‹¤ñ Ÿ‰í¤c„÷¤ Á„…¤©í„ Á¤§£„‹¤™„­¤+„©¤a„ƒÑ¤ ?„;¤‹›„÷¤ ?„…㤁O„c¤…„™¤=„ Ó¤=„ Á¤=„w¤„ Á¤s„c¤+÷ã‰í¤sU„ñ¤ñg„ug¤ÿ‹„õ±¤ç‘„s ¤qC„sU¤ó¯„ó¯¤qC„ñù¤{y„ñ¤1„{Ÿ¤÷A„s‹¤uç„a¤ç„W•¤sU„sÕ¤qC„u¤Y߄õ±¤×Ʉ{û¤õ?„}¡¤qC‰3‰í¤=„;¤‹e„…¤«„+¤%G„‹å¤=„w¤ Á„)ɤá„•¤ ?„M¤™„«¤½„™¤ ӄ=¤ ӄ+¤ ӄ Ó¤ᄠÁ¤«„ Á¤¿„Ï¤­÷ã‰í¤sՄ³¤qÄõ±¤ÿׄõ±¤ç‘„ó¯¤ó¯„sU¤ó¯„qC¤ó¯„ñù¤ýńÿפñù„û3¤÷A„ëí¤ç‘„×ɤõ±„s ¤qC„u¤Û9„sU¤Um„{y¤u„ýE¤qC‰3‰í¤=„;¤‹e„…ã¤)Ʉ÷¤%G„ Á¤ Q„w¤ Á„«¤a„#µ¤™„«¤ƒQ„c¤ ӄ=¤ ӄ+¤ ӄ Ó¤ᄍ­¤Ï„­¤ ӄÏ¤­÷ã‰í¤sՄ³¤ó¯„sÕ¤³„sդ瑄ó¯¤ó¯„sU¤ó¯„qC¤ó¯„ñ¤}/„ÿפñù„Ýͤç„Um¤õ±„s ¤ó/„õ±¤Û9„s‹¤×Ʉ{Ÿ¤u„ýE¤qC Ÿ‰í¤ Q„…¤‹e„…¤«„÷¤%G„ Á¤=„+¤ Á„«ÿ¤a„£i¤™„«¤ƒQ„c¤w„=¤ ӄ÷¤ ӄ Ó¤ᄍ­¤Ï„w¤ ӄc¤­÷ã‰í¤õ?„³¤s „󯤳„sÕ¤e5„ó¯¤s „s‹¤ó¯„qC¤s „ñ¤}/„ÿפñù„]ó¤ç„Õ'¤u„s‹¤ó¯„sU¤ï#„÷äeµ„s‹¤Um„{Ÿ¤uç„{Ÿ¤ó¯ Ÿ‰í¤ ӄ…㤋›„…㤫„÷¤›Í„ ¿¤]„+¤ ӄ÷¤‹e„+Y¤a„£¤™„«¤ƒQ„c¤w„=¤ ӄ÷¤w„ Ó¤›K„­¤Ï„ Ó¤w„Ï¤ A÷ã‰í¤õ?„³¤sՄqC¤ñ„s ¤e5„ó¯¤s „ó¯¤s‹„ñù¤sՄ1¤ÿ „ÿפñù„ß]¤ç‘„Óñ¤uç„s‹¤ó¯„sU¤ï#„ug¤ç„s‹¤Uƒ„{y¤uç„û3¤ó¯ Ÿ‰í¤ ӄM¤‹›„…¤«ÿ„÷¤a„‹¤]„+¤ ӄ÷¤‹›„-¤á„!#¤™„«¤u„O¤­„™¤÷„ Ó¤w„ Ó¤›K„w¤c„=¤­„Ï¤ A÷ã‰í¤õ?„³¤sՄqC¤ñ„s ¤}¡„uç¤ó/„ó¯¤s „ó¯¤s‹„ñù¤s „ñ¤ÿ „ÿפñù„_¹¤ç‘„[¤s „s‹¤o„uç¤ýń÷äó¯„ó¯¤Uï„{Ÿ¤÷A„yé¤s ‰3‰í¤÷„‡õ¤ ?„…㤫“„ Ó¤w„‹e¤u„‹e¤‘q„÷¤ ӄ¥{¤a„/ý¤=„u¤«„«¤ Á„复÷„ Ó¤w„ Ó¤ Q„‹›¤ƒÑ„w¤c„¤+„O¤‹e÷ã‰í¤u„1¤sU„ñg¤ñ„s ¤}¡„uç¤ó/„ó¯¤s „ó¯¤s‹„ñ¤õ±„ÿפÿׄÿ ¤qC„Ñ…¤ç„[¤ó¯„s‹¤o„u¤ÿ „u¤s „ó¯¤Uï„{Ÿ¤÷A„y¤s‹ Ÿ‰í¤+„•¤«ÿ„ Ó¤w„‹e¤u„ Á¤‘q„÷¤w„%Ǥa„/ý¤ ӄu¤«„«¤ Á„复+„=¤+„¿¤ Q„‹e¤ƒQ„÷¤™„å¤ Á„O¤‹e÷ã‰í¤u„1¤õ±„ñ¤ñù„s‹¤}/„õ±¤s‹„qäs‹„ó¯¤s‹„ñg¤õ±„ÿ ¤1„ÿ ¤ó¯„ok¤uç„{Ÿ¤ç„Y_¤s „s‹¤o„õ±¤ÿׄsU¤s „s ¤Uƒ„ëí¤s‹ Ÿ‰í¤÷„•¤«ÿ„w¤w„+¤«„ Á¤‘q„÷¤w„§!¤a„…㤋›„‘¤ ӄu¤O„u¤ Á„¤÷„ Ó¤÷„¿¤÷„ Á¤ƒQ„÷¤™„å¤ Á„O¤‹e÷ã‰í¤u„1¤u„ÿפqC„s‹¤ÿ‹„sU¤s‹„qäs‹„qC¤sU„ñ¤u„ÿ ¤1„ÿ ¤ó¯„a¤sU„ýŤ瑄Ùs¤s „s‹¤oë„sU¤ÿׄsU¤s „ó¯¤Uƒ„ëí¤sU Ÿ‰í¤+„•¤«ÿ„ Ó¤w„+¤«„+¤‘—„÷¤w„' ¤á„½¤+„Ÿo¤ ӄu¤O„u¤‹e„复+„=¤÷„¿¤÷„+¤÷„÷¤=„«¤‹e„O¤‹e÷ã‰í¤uç„ÿפu„ÿפqC„s‹¤ÿ‹„sU¤ó¯„ó¯¤s‹„qC¤sU„ñ¤õ±„ÿפ1„}¡¤s „aý¤qC„ÿW¤ç„Ù©¤sU„ó¯¤oë„sU¤ÿׄs ¤sU„ó¯¤Uƒ„ë¤sÕ Ÿ‰í¤ A„—Û¤«ÿ„ Ó¤+„w¤«„+¤‘—„ Ó¤+„¹3¤a„c¤O„]¤w„ƒÑ¤å„u¤ Á„复+„=¤+„=¤ ӄ+¤÷„÷¤=„u¤‹›„u¤ ?÷ã‰í¤÷A„ÿ ¤uç„ÿ ¤qC„s‹¤ÿ‹„sU¤ó¯„qC¤sU„qC¤sU„ñ¤õ±„ÿ ¤ñ„}¡¤s „c#¤1„ñ¤ç„GM¤sU„ó¯¤oë„sU¤ÿׄs ¤sU„ó¯¤Uƒ„i§¤õ?‰3‰í¤ Á„—[¤O„ ¿¤݄ Ó¤ Á„=¤O„÷¤™;„ ¿¤ ӄ Ó¤÷„¹3¤‡„c¤«„ó¤w„ƒÑ¤儃Ѥ‹›„O¤ Á„™¤+„=¤ ӄw¤«„ Q¤ ӄƒQ¤‹„«¤ ?÷ã‰í¤÷A„ÿפug„}/¤ó¯„ó/¤ÿׄs ¤ó¯„qC¤sU„ñù¤õ±„1¤uç„}¡¤ñ„}¡¤s „古ÿׄñ¤çû„GM¤s‹„ó¯¤ó¯„÷ägE„s‹¤1„qC¤õ±„ó¯¤ï¥„÷ä1„i%¤õ± Ÿ‰í¤ Á„—[¤c„‹›¤݄ Ó¤ Á„=¤O„÷¤™;„‹¤w„ Ó¤÷„9¤„=¤ƒÑ„¤­„;¤„;¤‹›„c¤­„™¤+„=¤w„ Ó¤«„ Q¤ ӄƒQ¤‹÷‰í¤w-„ýŤó¯„ó/¤1„qC¤s „ñù¤õ±„ñù¤sՄñ¤uç„{y¤qC„ýE¤sՄe5¤{y„s ¤ç{„LJ¤sU„qC¤s „ug¤ù!„ó¯¤1„ñù¤u„qC¤o„uç¤ñ„éÛ¤u Ÿ‰í¤‹e„§¤c„‹›¤‘q„=¤‹e„™¤O„ Ó¤_„‹¤w„=¤+„9û¤„w¤…„›K¤­„;¤=„…¤‹›„c¤­„™¤ Á„™¤w„=¤O„ Q¤ ӄ½¤‰SûS‰í¤ó¯„ó/¤1„qC¤s „ñù¤sU„ñg¤õ±„aý¤qC„ýE¤sՄe5¤{Ÿ„õ?¤ç{„ÅѤõ±„qC¤s „ug¤ù!„ó¯¤ñ„ñ¤u„qC¤o„u¤ñ„éÛ¤u Ÿ‰í¤‹e„§¤™„‹e¤݄=¤‹›„Ï¤å„ Ó¤‡s„ Á¤­„=¤ Á„½ ¤‡„‹e¤…ã„›K¤ A„…‡¤ ӄŸo¤ Á„¤+„=¤w„™¤O„ Q¤ Ó÷ã‰í¤ó¯„ó/¤1„ñù¤s „qC¤sU„ñg¤õ±„a¤ó¯„{û¤õ?„e5¤{Ÿ„u¤çû„Cu¤õ±„qC¤sՄõ±¤y „ó¯¤ñ„³¤uç„qC¤ï¥„u¤ñù„éÛ¤u Ÿ‰í¤‹›„©¤=„ Á¤݄=¤‹›„Ï¤儏=¤c„ ?¤÷„ Á¤­„=¤ Á„½W¤„ ¿¤…ㄤ A„…‡¤ ӄŸo¤ Á„¤ Á„™¤w„™¤O„ Q¤w÷ã‰í¤s „ó/¤1„ñù¤s „ñù¤õ±„ñg¤õ±„a¤ó¯„{û¤õ?„åi¤{Ÿ„÷äç{„C)¤õ±„qC¤sՄõ±¤s‹„÷A¤ñ„qC¤ñ„³¤uç„qC¤ï¥„õ±¤qC„W¤uç Ÿ‰í¤ ¿„©ƒ¤™„‹e¤“'„=¤‹›„Ï¤儏=¤c„ ?¤÷„ Á¤­„™¤‹e„=³¤„ß¤‹„…¤ ӄˤ Á„¤ Á„™¤w„c¤å„ Q¤w÷ã‰í¤s „ó/¤ñ„ñ¤s „ñ¤u„ñ¤u„á·¤ó¯„{y¤ug„ãG¤k7„Ac¤u„ñù¤õ±„sÕ¤sU„uç¤ñ„qC¤ñ„ÿפ÷ÄqC¤mY„õ±¤qC„Wÿ¤÷à Ÿ‰í¤ ¿„©ƒ¤=„ Á¤“'„=¤ ¿„«¤儏=¤c„‹›¤+„­¤ Á„™¤‹e„¿¤•I„9¤‹„…¤ ӄˤ‹e„夋e„c¤w„c¤å„ Q¤w÷ã‰í¤sU„qC¤ñ„1¤sU„ñ¤õ?„ñù¤õ±„á·¤s „{Ÿ¤÷Äaý¤í}„Á=¤uç„ñù¤õ±„sÕ¤sU„uç¤ñù„ñù¤ñù„ÿ‹¤÷ÄqC¤ñù„÷A¤eµ„õ±¤ó/ Ÿ‰í¤ Q„ Á¤›Í„ ?¤™„=¤ ¿„÷¤™„™¤™„‹›¤+„­¤ Á„™¤‹›„?C¤„Ÿ…¤ ¿„…㤍w„ˤ Á„™¤ A„c¤+„O¤儏=¤+÷ã‰í¤sU„qC¤ñg„ÿפsU„1¤u„ñù¤õ±„oë¤sU„Óñ¤ï¥„OÓ¤uç„ñù¤õ±„sÕ¤sU„uç¤ñù„ñù¤ñù„eµ¤qC„uç¤ç„õ±¤ó/‰³‰í¤ ӄ+¤=„‰Õ¤ƒÑ„‹›¤=„›Í¤™„™¤™„‹e¤ Á„­¤ Á‚‰‰‹%¥‰‰ CÙ¤‘„-k¤+„‘—¤ Á„™¤‹e„复÷„«¤„=¤ Á÷ã‰í¤õ±„qC¤ñg„ÿפs‹„ñ¤u„ñù¤õ±„oë¤sU„Ó¤ok‚‰‰ aû‰‰ ޤõ±„sÕ¤õ±„u¤ñù„ñù¤ñù„eµ¤qC„uç¤}¡„w­¤qC„sU¤ó¯ Ÿ‰í¤w„+¤=„‹›¤«„‹e¤=„›Í¤™„™¤™„‹e¤ Á„w¤‹e„½ ¤ Á„½¤Ÿo„¯·¤ Á„‘q¤‹e„O¤ ¿„O¤+„u¤„=¤ Á÷ã‰í¤õ±„qC¤ñg„ÿ ¤sU„1¤÷Ä1¤u„o¤õ±„Qˤa„ýŤõ±„Cu¤u„s ¤õ±„u¤ñù„ñù¤ñù„eµ¤qC„u¤ÿׄuç¤qC„sU¤s  Ÿ‰í¤w„+¤ ӄ‹e¤O„+¤ ӄ›Í¤™„c¤=„‹e¤ Á„w¤‹e„;¡¤÷„ƒQ¤Ÿ…„/¤‹e„ݤ‹e„›Í¤+„u¤„=¤ Á÷ã‰í¤õ±„qC¤qC„}/¤sU„eµ¤u„磻u„Ñ…¤c£„}¡¤qC„ÅѤuç„s ¤u„õ±¤qC„ñ¤ñù„eµ¤ó¯„s ¤ñ„u¤qC„sU¤sÕ Ÿ‰í¤­„+¤=„‹e¤c„w¤ ӄ›Í¤™„c¤=„ Á¤‹e„w¤‹›„;¡¤=„ƒÑ¤ß„/ý¤‹e„ݤ‹e„›Í¤+„ƒQ¤=„=¤ Á÷‡‰í¤u„qC¤qC„ýŤõ±„GM¤w­„_9¤å „}¡¤ñ„E½¤uç„s ¤u„sU¤ó¯„ñ¤ñù„eµ¤ó¯„s ¤³„u¤ó¯„sU¤sÕ‰3‰í¤+„w¤w„+¤™„ Ó¤ ӄ¤=„Ï¤w„+¤‹e„1U¤å„½¤›K‚‰‰ •K‰‰ Yù¤+„½¤=„=¤‹å÷ã‰í¤u„qC¤qC„ýŤsU‚‰‰‹Iω‰ Ûݤe5„ýŤñ„Ï+¤u„sU¤s „³¤qC„åi¤ó¯„ó¯¤ñù„sU¤s „s ¤sU Ÿ‰í¤ Á„w¤ ӄ+¤™„ Ó¤ ӄ¤ ӄ)¤w„w¤‹„1‹¤«„½¤ႉ‰ •K‰‰ [±¤ Á„…¤=„=¤‹å÷ã‰í¤u„qC¤qC„{y¤õ±‚‰‰‹Iω‰ ݕ¤ç‘„ýŤÿׄÏ÷¤ug„s ¤s „ÿW¤ó¯„åi¤ó¯„ó¯¤ñù„sU¤ó¯„s ¤õ± Ÿ‰í¤ Á„w¤÷„w¤=„™¤ ӄ›Í¤ ӄ)¤w„w¤‹„1‹¤u„ƒQ¤‚‰‰ •K‰‰ ]‰¤ Á„…¤=„=¤‹÷ã‰í¤ug„qC¤ó¯„û3¤u‚‰‰‹Éc‰‰ ]í¤gDŽ}¡¤}/„Ï÷¤÷Äó¯¤s „ÿW¤ó¯„eµ¤ó¯„ñù¤qC„s ¤s‹„ó¯¤u Ÿ‰í¤‹e„ Ó¤÷„w¤=„™¤ ӄ›Í¤ ӄ)¤w„ Ó¤ ¿„1‹¤ƒQ„ƒÑ¤™»‚‰‰ •K‰‰ ßÁ¤‹e„M¤ ӄ=¤‹÷ã‰í¤ug„qC¤ó¯„û3¤u‚‰‰‹Éc‰‰ _¥¤ù!„ýE¤}/‚‰‰ Óÿ‰‰ _¥¤sU„ÿ ¤ó¯„eµ¤ó¯„ñù¤qC„ó¯¤s‹„s ¤u Ÿ‰í¤‹e„w¤÷„ Ó¤=„™¤ ӄ›Í¤ ӄu¤+‚‰‰‹•—‰‰ љ¤ƒQ„;¤_‚‰‰ •K‰‰ љ¤‹e„M¤ ӄ=¤‹ÿ­‰í¤ó¯„yW¤ug‚‰‰‹É™‰‰ Ñý¤yW„ýE¤ýł‰‰ Óÿ‰‰ Ñý¤sU„ÿ ¤ó¯„eµ¤s „³¤ó¯„qC¤sU„ó¯¤uç Ÿ‰í¤‹›„ Ó¤+„=¤ ӄÏ¤w„›Í¤ ӄu¤ Á‚‰‰‹•—‰‰ QѤ…„½¤M‚‰‰ •K‰‰ QѤ‹„‡)¤ Ó÷ã‰í¤ó¯„yW¤ug‚‰‰‹É™‰‰ Óµ¤û3„ýŤ{y‚‰‰ Ó‰‰ Óµ¤õ±„ÿ ¤ó¯„eµ¤s „³¤ó¯„qC¤sU„ó¯¤uç Ÿ‰í¤ ?„=¤ Á„™¤÷„«¤w„›Í¤ ӄu¤ Á‚‰‰‹•—‰‰ S©¤…„…¤…ら‰ •K‰‰ S©¤ ¿„‡õ¤ Ó÷ã‰í¤s ‚‰‰‹É™‰‰ Ս¤{y„{y¤{y‚‰‰ Ó‰‰ Ս¤u„ýŤs‹„eµ¤sU„ÿ‹¤sU„1¤u Ÿ‰í¤‹å„O¤+„÷¤+„›Í¤÷„½¤‹e‚‰‰‹•q‰‰ Õᤅ„…¤…‚‰‰ %ى‰ Õᤍw÷ã‰í¤s ‚‰‰‹Éc‰‰ UŤ{Ÿ„ýE¤{‚‰‰ Ó‰‰ UŤu„ýŤs‹„ç¤sU„}/¤sU„1¤õ± Ÿ‰í¤ Á„O¤+„ƒQ¤+„a¤÷„½¤‹e‚‰‰‹•q‰‰ ×¹¤…c„;¤…ら‰ %ى‰ ×¹¤w÷ã‰í¤s ‚‰‰‹Éc‰‰ W¤{y„ýE¤{Ÿ‚‰‰ Qɉ‰ W¤uç„ýŤs‹„ç¤sU„ýŤu„ÿ ¤u Ÿ‰í¤‹›„ƒÑ¤‹e„…¤ Á„a¤÷‚‰‰‹—¹‰‰ Wñ¤…„…¤…‡‚‰‰ %ى‰ Wñ¤w÷ã‰í¤s ‚‰‰‹Iω‰ éÕ¤{û„{y¤{y‚‰‰ Có‰‰ éÕ¤s‹„ç¤õ±„{y¤u„}¡¤uç û‰í¤ ¿„½¤‹„…㤋e„a¤÷‚‰‰‹‰‰ iɤ…„…¤;‚‰‰ %ى‰ iɤw÷ã‰í¤s ‚‰‰‹Iω‰ ë­¤ýE„{y¤{y‚‰‰ Có‰‰ ë­¤s‹„ç¤u„{Ÿ¤ug„ýŤ÷Ã9Y‰í¤+‚‰‰‹‰‰ k¤…ã„…¤½‚‰‰ %ى‰ k¤w÷ã‰í¤sՂ‰‰‹Ë«‰‰ kå¤ýńýŤ{Ÿ‚‰‰ C‰‰ kå¤sU Ÿ‰í¤+‚‰‰‹ íÙ¤…ㄽ¤½‚‰‰ §#‰‰ íÙ¤­÷ã‰í¤sՂ‰‰‹Ëu‰‰ m½¤ýńýE¤{Ÿ‚‰‰ C‰‰ m½¤sU Ÿ‰í¤+‚‰‰‹©Ë‰‰ …ã„;¤½‚‰‰ §#‰‰ ­÷ã‰í¤sՂ‰‰‹Ëu‰‰ ïõ¤}/„ýE¤{Ÿ‚‰‰ Ã͉‰ ïõ¤õ±‰3‰í¤+‚‰‰‹)‰‰ o餅ã„;¤½‚‰‰ §#‰‰ o餍­÷ã‰í¤sՂ‰‰‹Kщ‰ áͤýńýE¤{Ÿ‚‰‰ Ã͉‰ áͤsU Ÿ‰í¤ Á‚‰‰‹)ñ‰‰ a¡¤…ㄽ¤ƒÑ‚‰‰ §#‰‰ a¡¤­÷ã‰í¤sՂ‰‰‹Í;‰‰ ㅤýE„}/¤{Ÿ‚‰‰ Aa‰‰ ㅤõ± Ÿ‰í¤ Á‚‰‰‹«[‰‰ ãù¤…㄃Q¤;‚‰‰ §#‰‰ ãù¤­÷ã‰í¤sՂ‰‰‹Í;‰‰ cݤýE„ÿ ¤û³‚‰‰ A‰‰ cݤu Ÿ‰í¤‹å‚‰‰‹«[‰‰ 層τu¤;‚‰‰ §#‰‰ 層­÷ã‰í¤õ?‚‰‰‹M‡‰‰ e•¤{û„ÿפû³‚‰‰ A‰‰ e•¤u‚‰‰‹­ƒ‰‰ 牤M„O¤…‡„ A¤ ¿‚‰‰ '‰‰ 牤 A÷ã‰í¤õ?‚‰‰‹Ã‰‰ çí¤÷Äõ?¤{û„1¤û3 y‰í¤…㄁O¤M„ ¿¤‹e‚‰‰ '‰‰ gÁ¤‹e÷ã‰í¤u‚‰‰‹Ã቉ ù¥¤u„÷äû3„1¤{Ÿ y‰í¤…ã„™¤›Í‚‰‰ '‰‰ y™¤‹ewωí¤õ?‚‰‰‹Ã቉ yý¤åi„ó¯¤û3 Ÿ‰í¤M„ Ó¤‚‰‰ '‰‰ ûѤ A÷ã‰í¤õ?‚‰‰‹CK‰‰ {µ¤c#„sU¤û³ Ÿ‰í¤τ+¤]‚‰‰ '‰‰ ý©¤ A÷‡‰í¤u‚‰‰‹CK‰‰ }¤ãلu¤û3 y‰í¤‡©„‰Õ¤Ÿ‚‰‰ '‰‰ }ᤋe÷ã‰í¤u‚‰‰‹CK‰‰ ÿŤa}„w­¤yé Ÿ‰í¤-÷ã‰í¤SÝ Ÿ‰í¤­¥÷ã‰í¤Õ' Ÿ‰í¤+Y÷ã‰í¤U Ÿ‰í¤«}÷ã‰í¤U y‰í¤«“÷ã‰í¤Uï y‰í¤©í÷‰í¤éÛ Ÿ‰í¤§÷‰í¤i%y7‰í¤‹e„M¤÷ã‰í¤ëk„û3¤u‰3‰í¤w„…㤕I÷ã‰í¤k7„{Ÿ¤s ‰3‰í¤=„…㤃÷‰í¤mY„{y¤ñ Ÿ‰í¤c„…¤“'÷‰í¤o„{Ÿ¤1 Ÿ‰í¤O„…㤑q÷‰í¤oë„{y¤ÿ× Ÿ‰í¤u„…¤Ÿo÷ã‰í¤a„{y¤ÿ  Ÿ‰í¤ƒQ„…¤¹÷ã‰í¤ãل{y¤}/ Ÿ‰í¤ƒQ„…¤]÷‰í¤åi„{y¤}¡ Ÿ‰í¤ƒÑ„…¤÷‰í¤eµ„ýŤýE Ÿ‰í¤…‡„÷¤›K÷ã‰í¤e5„ÿ‹¤{û y‰í¤;„«¤›Í÷ã‰í¤eµ„ÿפýE Ÿ‰í¤…„c¤›Ío‰í¤w-„ÿ ¤ç„ñù¤{Ÿ Ÿ‰í¤…ã„™¤a„u¤‰S÷ã‰í¤÷Äÿפeµ„ó¯¤{Ÿ Ÿ‰í¤…ã„ Ó¤›Í„«¤ ¿÷ã‰í¤uç„ÿפe5„sÕ¤û3 y‰í¤…ã„ Á¤›Í„«¤‹e÷ã‰í¤u„ÿפeµ„õ±¤{Ÿ Ÿ‰í¤τ‹å¤›Í„O¤ Á÷ã‰í¤sU„1¤eµ„÷äyW Ÿ‰í¤‡)„ ¿¤›Í„O¤+÷ã‰í¤s „ñ¤×É Ÿ‰í¤)Ʉc¤w÷ã‰í¤ó¯„ñù¤×É û‰í¤©í„=¤=÷ã‰í¤qC„qC¤W• Ÿ‰í¤)7„w¤™÷ã‰í¤ñù„s ¤×I y‰í¤©í„­¤c÷‰í¤ñ„õ±¤W• Ÿ‰í¤©í„ Á¤å÷ã‰í¤1„uç¤W• Ÿ‰í¤©í„‹›¤O÷ã‰í¤ÿׄ÷A¤W• y‰í¤£÷ã‰í¤]q Ÿ‰í¤£÷‡‰í¤_9 Ÿ‰í¤¡G÷‰í¤Ñ… Ÿ‰í¤/ý÷‰í¤QË Ÿ‰í¤-k÷ã‰í¤Ó y‰í¤O„ ¿¤“'÷ã‰í¤mY„÷ä1 Ÿ‰í¤™„ Á¤‘q÷ã‰í¤ok„s ¤ó¯ Ÿ‰í¤ ӄw¤‘÷‰í¤á·„ó¯¤s  y‰í¤ A„=¤Ÿo÷ã‰í¤a„qC¤õ?‰í¤Ÿ…÷ã‰í¤aý y‰í¤]÷ã‰í¤c# Ÿ‰í¤]÷‰í¤å Ÿ‰í¤ó÷ã‰í¤å Ÿ‰í¤]÷ã‰í¤c# Ÿ‰í¤]÷ã‰í¤c#‰3‰í¤¹wωí¤a‘ Ÿ‰í¤Ÿá÷ã‰í¤á5‰3‰í¤‘q÷ã‰í¤o‰3‰í¤“'÷ã‰í¤mY‰3‰í¤ƒwM‰í¤k7 Ÿ‰í¤•IwM‰í¤ëí Ÿ‰í¤•wωí¤i%‰3‰í¤‡õ„ ¿¤‡õ÷ã‰í¤y„÷äy‰3‰í¤‡s„‹e¤‡õ÷ã‰í¤y„u¤y ‰³‰í¤‡õ„ Á¤_÷ã‰í¤ù!„s‹¤yW Ÿ‰í¤‡)„÷¤_wM‰í¤ù!„ó/¤y‰3‰í¤‡s„=¤_÷ã‰í¤ù!„qC¤y ‰³‰í¤‡õ„™¤‡õ÷ã‰í¤y„ñù¤y‰3‰í¤‡õ„O¤‡©÷ã‰í¤yé„1¤y Ÿ‰í¤‡õ„O¤‡©÷‰í¤{Ÿ„ÿפù! Ÿ‰í¤_„«¤…ã÷ã‰í¤{y„ÿ ¤ù! Ÿ‰í¤_„u¤½÷ã‰í¤ýńÿ ¤ù! Ÿ‰í¤™»„u¤ƒQ÷‡‰í¤ÿW„}/¤gÇ Ÿ‰í¤™»„ƒQ¤)÷ã‰í¤³„}/¤gÇ Ÿ‰í¤™»„ƒQ¤Ï÷‰í¤ñ„}/¤gÇ y‰í¤™;„ƒÑ¤™÷ã‰í¤ñù„}¡¤gE û‰í¤™»„ƒÑ¤ Ó÷ã‰í¤ó¯„}¡¤gÇ y‰í¤™»„ƒÑ¤­÷ã‰í¤õ±„}/¤ç{ Ÿ‰í¤„ƒQ¤ Á÷‰í¤uç„}¡¤çû Ÿ‰í¤‡„ƒÑ¤‹›ûS‰í¤çû Ÿ‰í¤a÷ã‰í¤ç û‰í¤áwM‰í¤ç‘ Ÿ‰í¤áwωí¤eµ Ÿ‰í¤›ÍwM‰í¤eµ y‰í¤÷ã‰í¤åi û‰í¤÷ã‰í¤åi y‰í¤wM‰í¤åi„û3¤w­ Ÿ‰í¤‰Õ„M¤wM‰í¤åi„{Ÿ¤u‰3‰í¤+„…ã¤÷ã‰í¤åi„{Ÿ¤sU Ÿ‰í¤w„…㤛K÷ã‰í¤e5„{Ÿ¤s ‰3‰í¤=„…ã¤ᄁO¤ ?wM‰í¤u„ñ¤gDŽû3¤ñù Ÿ‰í¤™„M¤™»„夋ewM‰í¤sU„ñ¤y „û³¤ñ Ÿ‰í¤c„Ϥ‡s„复+÷ã‰í¤s „ñù¤û3„yW¤³‰3‰í¤u„‡©¤…ã„™¤ Ó÷ã‰í¤ó¯„ñù¤{Ÿ„yé¤ÿ  Ÿ‰í¤ƒÑ„‡©¤½„™¤=÷ã‰í¤qC„ñù¤ýńyé¤}¡ Ÿ‰í¤;„‡)¤ƒÑ„=¤™÷ã‰í¤ñù„ñù¤}/„{û¤{Ÿ Ÿ‰í¤…ã„…‡¤ƒQ„™¤™÷ã‰í¤ñ„qC¤}/„ýE¤û3 Ÿ‰í¤‡©„ƒÑ¤ƒQ„=¤c÷ã‰í¤ñ„qC¤}/„}¡¤yé y‰í¤‡õ„Ï¤ƒQ„=¤Ï÷ã‰í¤³„qC¤}/„³¤y Ÿ‰í¤_„c¤ƒQ„=¤Ï÷ã‰í¤ÿׄqC¤}¡„qC¤ù! Ÿ‰í¤_„=¤ƒÑ„=¤«÷‰í¤1„qC¤ýE„sÕ¤gÇ Ÿ‰í¤™»„­¤;„=¤O÷ã‰í¤ÿׄqC¤ýE„u¤çû y‰í¤—[„=¤O÷ã‰í¤1„qC¤i% û‰í¤„ Ó¤«÷‰í¤1„qC¤k7 Ÿ‰í¤•I„=¤O÷ã‰í¤ÿׄó¯¤í“ Ÿ‰í¤ï„ Ó¤«÷ã‰í¤ÿׄqC¤mÙ û‰í¤݄ Q¤«÷ã‰í¤ÿׄó/¤ï¥ y‰í¤‘q„ Q¤«÷ã‰í¤ÿׄó/¤o y‰í¤˄ Ó¤«÷ã‰í¤ÿׄó¯¤a Ÿ‰í¤Ÿo„ Ó¤«÷‰í¤ÿׄs ¤aý Ÿ‰í¤Ÿ…„w¤«÷ã‰í¤ÿׄó¯¤c# y‰í¤ó„w¤«÷ã‰í¤ÿׄs ¤å y‰í¤„w¤«÷‡‰í¤ÿW„s ¤ç Ÿ‰í¤a„w¤)÷ã‰í¤ÿW„s ¤ç{ Ÿ‰í¤„w¤)÷‰í¤ÿׄsU¤ç{ y‰í¤™»„+¤«÷ã‰í¤ÿׄsU¤gÇ y‰í¤_„ Á¤«„c¤‹e÷ã‰í¤u„ñ¤ÿׄõ±¤ù! y‰í¤_„‹›¤u„™¤­÷ã‰í¤sՄñ¤í} Ÿ‰í¤„c¤­wM‰í¤ó¯„ñ¤ï¥„û³¤w- Ÿ‰í¤‰S„Ϥ݄c¤ Ó÷ã‰í¤qC„ñù¤ï¥„{¤ug Ÿ‰í¤‹e„…㤑„夏=÷ã‰í¤qC„ñ¤ok„{Ÿ¤u Ÿ‰í¤‹e„…㤑„¤™÷ã‰í¤ñù„ñ¤á·„{y¤õ± Ÿ‰í¤ Á„…¤˄夙÷ã‰í¤ñ„ñù¤á·„ýŤsU Ÿ‰í¤+„½¤˄™¤å÷‰í¤ñg„ñ¤aý„{y¤sU Ÿ‰í¤÷„½¤Ÿ…„™¤å÷ã‰í¤ñ„ñù¤aý„ýŤs‹ Ÿ‰í¤÷„½¤9„c¤å÷ã‰í¤ñ„ñ¤ãG„ýŤs‹ Ÿ‰í¤ ӄƒÑ¤9„™¤c÷ã‰í¤³„ñg¤c#„}¡¤ó¯ Ÿ‰í¤ ӄƒÑ¤]„¤Ï÷ã‰í¤³„ñg¤c#„ÿ ¤qC Ÿ‰í¤=„u¤ó„™¤«÷ã‰í¤ÿׄñù¤å„ÿ ¤qC Ÿ‰í¤™„«¤„c¤O÷ã‰í¤1„ñ¤åi„ÿפñù Ÿ‰í¤™„«¤„™¤«÷ã‰í¤ÿׄñ¤eµ„1¤ñ Ÿ‰í¤c„O¤›Í„c¤«÷‰í¤ÿׄñ¤ç„1¤ñ Ÿ‰í¤c„O¤a„c¤«÷ã‰í¤ÿ „ñù¤ç„ñ¤³ Ÿ‰í¤Ï„处å¤u÷ã‰í¤ÿ „ñ¤ç{„ñ¤³ Ÿ‰í¤«„™¤„¤u÷ã‰í¤ÿ „ñg¤ç{„ñù¤ÿ× Ÿ‰í¤«„™¤„=¤ƒQ÷‡‰í¤}¡„ó¯¤ç{„qC¤ÿ  Ÿ‰í¤u„=¤„ Ó¤ƒÑ÷ã‰í¤}¡„qC¤gDŽqC¤ÿ  Ÿ‰í¤ƒÑ„ Ó¤™»„ Ó¤;÷ã‰í¤ýE„ó¯¤gDŽó¯¤}¡ Ÿ‰í¤ƒÑ„w¤„w¤;÷ã‰í¤ýE„s ¤ç{„s ¤}¡ Ÿ‰í¤;„­¤„+¤…÷ã‰í¤{Ÿ„õ±¤ç{„sÕ¤ýE Ÿ‰í¤;„­¤„ Á¤…ã÷‰í¤{Ÿ„u¤ç{„õ±¤{y Ÿ‰í¤…„ Á¤„‹e¤…ã÷ã‰í¤yé„w­¤ç‘„u¤{y Ÿ‰í¤…„‹›¤©í÷ã‰í¤W•„uç¤{y Ÿ‰í¤…ㄉS¤)É÷ã‰í¤Ùs Ÿ‰í¤' ÷‰í¤Û9 Ÿ‰í¤%G÷ã‰í¤Û9 y‰í¤¥‘÷ã‰í¤[á Ÿ‰í¤¥‘÷ã‰í¤[á û‰í¤£i÷ã‰í¤] Ÿ‰í¤£i÷‰í¤ß] Ÿ‰í¤!#÷ã‰í¤ß] y‰í¤/}÷ã‰í¤Ñ Ÿ‰í¤/}÷ã‰í¤Ñï Ÿ‰í¤/“÷‡‰í¤QË Ÿ‰í¤¯·÷‰í¤Óq Ÿ‰í¤-÷ã‰í¤Óq y‰í¤+Y÷ã‰í¤Õ' Ÿ‰í¤+Y÷‰í¤ó¯„uç¤mY Ÿ‰í¤“'„‹›¤ Ó÷ã‰í¤s „u¤ï¥ Ÿ‰í¤݄­¤‹e÷ã‰í¤u„sÕ¤ï¥ Ÿ‰í¤Ý÷ã‰í¤ï¥ y‰í¤‘qɉí¤uç„eµ¤o Ÿ‰í¤‘q„›Í¤‹›÷ã‰í¤õ±„çû¤ok Ÿ‰í¤‘„‡¤ Á÷ã‰í¤sՄgǤá· Ÿ‰í¤˄_¤w÷ã‰í¤s „ù!¤á· Ÿ‰í¤˄‡õ¤ Ó÷ã‰í¤ó¯„y¤á· y‰í¤K„M¤=÷ã‰í¤ñù„{y¤oë Ÿ‰í¤‘—„…¤™÷ã‰í¤ñ„}¡¤oë Ÿ‰í¤‘q„«¤å÷ã‰í¤ñ„ÿפoñÙ‰í¤‰S„c¤݄夁O÷ã‰í¤1„ñ¤ï¥„ñ¤w-‰³‰í¤‹›„å¤݄™¤O÷ã‰í¤ÿׄó¯¤mY„qC¤sÕ Ÿ‰í¤­„=¤“'„ Ó¤«÷ã‰í¤ÿ‹„sU¤íÿ„ó¯¤s  Ÿ‰í¤w„ Ó¤ƒ„+¤÷÷‡‰í¤ÿ „u¤í„s ¤qC Ÿ‰í¤™„ Á¤„ ¿¤ƒÑ÷ã‰í¤}¡„÷äëk„õ±¤ñù Ÿ‰í¤c„‹e¤!#÷ã‰í¤ß]„u¤ñ‰3‰í¤Ï„‹e¤!#÷‰í¤íÿ„÷A¤{y„ug¤ÿ× Ÿ‰í¤«„‹¤…„ ?¤ƒ÷ã‰í¤ï¥„ó¯¤ÿ „w-¤ÿ  Ÿ‰í¤ó„™¤‘q÷ã‰í¤o„ñù¤å y‰í¤„夑÷ã‰í¤ok„ñ¤åi y‰í¤›Í„«¤Ÿ…÷ã‰í¤ãG„ÿ ¤eµ Ÿ‰í¤›Í„u¤9÷ã‰í¤c£„}¡¤ç Ÿ‰í¤a„ƒÑ¤ß÷‰í¤åi„ýŤç{ Ÿ‰í¤á„½¤›Í÷ã‰í¤eµ„ýŤç‘ y‰í¤„½¤›Í÷‡‰í¤ç{„ýE¤çû Ÿ‰í¤‡„;¤÷ã‰í¤gDŽýE¤ç Ÿ‰í¤a„;¤™»÷ã‰í¤ù£„ýŤç û‰í¤„…¤‡©÷ã‰í¤yé„{y¤ç{ Ÿ‰í¤ᄅ¤M÷ã‰í¤û3„{y¤ç‘ y‰í¤ᄅ¤…÷ã‰í¤{y„ýE¤ç Ÿ‰í¤a„;¤…÷ã‰í¤ýńýE¤eµ Ÿ‰í¤›Í„;¤½÷‰í¤ÿ „{y¤ç y‰í¤a„½¤u÷ã‰í¤ÿ „ýŤç û‰í¤ᄃQ¤«÷ã‰í¤1„ýŤç‘ Ÿ‰í¤á„½¤O÷‡‰í¤ñ„}¡¤ç Ÿ‰í¤a„ƒÑ¤c÷ã‰í¤ñ„ÿ ¤eµ y‰í¤›Í„«¤™÷ã‰í¤ñù„ÿפeµ y‰í¤›Í„O¤™÷ã‰í¤ñù„1¤eµ}‰í¤‹„u¤›Í„O¤=÷ã‰í¤ñù„ñ¤ç‘„ÿ‹¤u Ÿ‰í¤‹å„÷¤á„夙÷‰í¤qC„ñ¤ç{„ÿ ¤sÕ Ÿ‰í¤­„u¤„™¤÷ã‰í¤ñg„ñù¤ç{„ÿ ¤sÕ Ÿ‰í¤w„u¤™»„™¤=÷ã‰í¤qC„ñù¤gDŽÿ ¤s  Ÿ‰í¤ ӄ«¤_„c¤ Ó÷ã‰í¤qC„ñ¤yW„ÿ ¤ó¯ Ÿ‰í¤ ӄu¤‡)„c¤=÷‰í¤ó¯„ñ¤yW„ÿפqC Ÿ‰í¤=„«¤‡)„c¤ Ó÷ã‰í¤qC„ñ¤{Ÿ„ÿפñg Ÿ‰í¤„«¤…‡„O¤ Ó÷ã‰í¤ó¯„1¤{û„ÿפñg Ÿ‰í¤å„«¤;„夏=÷ã‰í¤qC„ñ¤ýE„ÿפñ û‰í¤c„O¤ƒÑ„O¤ Ó÷‡‰í¤sU„ÿפÿ „ÿפñ Ÿ‰í¤c„«¤u„«¤+÷ã‰í¤sU„ÿפÿ „1¤³ Ÿ‰í¤Ï„O¤ƒÑ„u¤‹›÷ã‰í¤uç„ÿ ¤}¡„1¤³ Ÿ‰í¤)„O¤u÷ã‰í¤ÿ „1¤ÿW y‰í¤«„c¤uɉí¤w-„åi¤}/„ñ¤ÿ× Ÿ‰í¤«„c¤ƒQ„¤‰S‰s‰í¤õ±„瑤ýńñù¤ÿ  Ÿ‰í¤u„™¤½„ᤠÁwωí¤s „gǤ{Ÿ„qC¤ÿ× Ÿ‰í¤u„w¤M„_¤=÷ã‰í¤qC„ù!¤û3„s ¤ÿ  Ÿ‰í¤ƒÑ„­¤‡©„‡)¤cwM‰í¤ÿׄ{Ÿ¤gDŽug¤}/ Ÿ‰í¤ƒQ„‹¤™»„…㤫÷ã‰í¤ÿ „ýE¤ë Ÿ‰í¤ñ„;¤uwωí¤}/„ýE¤ëk Ÿ‰í¤•I„½¤½÷ã‰í¤ýńýŤk7 û‰í¤ƒ„½¤…÷ã‰í¤{y„ýŤíÿ Ÿ‰í¤“'„…¤…÷‡‰í¤{û„ýŤï¥ Ÿ‰í¤݄½¤…‡÷ã‰í¤{û„{y¤o Ÿ‰í¤‘q„…¤…‡÷ã‰í¤{Ÿ„ýE¤oë y‰í¤Ÿá„…‡¤…÷ã‰í¤{y„{û¤a‘ Ÿ‰í¤Ÿá„;¤…ã÷‰í¤{y„{y¤ãÙ Ÿ‰í¤¹„…¤…÷ã‰í¤{Ÿ„}¡¤ãG Ÿ‰í¤9„ƒÑ¤…ã÷ã‰í¤{Ÿ„}¡¤ãG Ÿ‰í¤9„u¤…ã÷ã‰í¤{Ÿ„ÿ ¤ãG y‰í¤ß„u¤…ã÷ã‰í¤{Ÿ„ÿ ¤c£ Ÿ‰í¤9„Ï¤Ï÷‡‰í¤{Ÿ„ñ¤aý Ÿ‰í¤Ÿ…„c¤…ã÷ã‰í¤û3„ó¯¤ok Ÿ‰í¤‘„ Ó¤M÷ã‰í¤û3„s‹¤o Ÿ‰í¤݄ Á¤M÷ã‰í¤û3„õ±¤ï¥ y‰í¤‘q„ Á¤M÷‰í¤û3„uç¤ï¥ Ÿ‰í¤݄‹›¤M÷ã‰í¤ß] Ÿ‰í¤!#÷ã‰í¤ß] y‰í¤/}÷ã‰í¤Ñ Ÿ‰í¤/}÷ã‰í¤Ñ y‰í¤/“÷‰í¤QI Ÿ‰í¤¯7÷ã‰í¤Ó y‰í¤­¥÷ã‰í¤SÝ Ÿ‰í¤­¥÷ã‰í¤SÝ y‰í¤+Y÷‰í¤Um Ÿ‰í¤«÷ã‰í¤×I„qC¤uç Ÿ‰í¤‹›„=¤)7÷ã‰í¤W•„ó¯¤s ‰3‰í¤=„ Ó¤§÷ã‰í¤éۄó¯¤qC Ÿ‰í¤™„ Ó¤_„‰S¤‡)÷ã‰í¤yW„w-¤ù!„ó¯¤ñù Ÿ‰í¤å„ Ó¤‡©„‹›¤…ã÷ã‰í¤{Ÿ„u¤û3„s ¤1 Ÿ‰í¤O„w¤M„‹e¤…ã÷ã‰í¤{y„sU¤{y„s ¤1 Ÿ‰í¤O„ Ó¤ƒÑ„w¤;÷ã‰í¤ýE„s ¤}¡„ó¯¤1 Ÿ‰í¤«„ Ó¤u„ Ó¤ƒÑ÷ã‰í¤}¡„ó¯¤ÿ „ó¯¤ÿ× û‰í¤«„÷¤«„=¤u÷ã‰í¤ÿׄñù¤ÿׄsU¤ÿ  Ÿ‰í¤u„+¤«„™¤«÷‰í¤1„ñù¤ÿ „u¤ÿ  Ÿ‰í¤u„‹e¤u„™¤O÷ã‰í¤ñ„ñ¤ÿ „uç¤}¡ y‰í¤ƒQ„‰S¤;„¤c÷ã‰í¤ñ„ñg¤ýE„w-¤}/ Ÿ‰í¤Ÿo„夏=÷ã‰í¤qC„ñù¤a‘ Ÿ‰í¤Ÿá„™¤=÷‰í¤s „ñ¤aý Ÿ‰í¤Ÿ…„c¤w÷ã‰í¤s „ñ¤aý y‰í¤Ÿ…„c¤­÷ã‰í¤sՄñ¤aý y‰í¤9„c¤ Á„¤‰S÷ã‰í¤w-„ñg¤õ±„ñ¤ãG Ÿ‰í¤Ÿ…„c¤‹e„=¤‹e÷ã‰í¤õ±„qC¤uç„ñ¤ãG Ÿ‰í¤9„c¤‹›„=¤ ÁwM‰í¤sU„gǤãG Ÿ‰í¤9„™»¤+÷ã‰í¤s‹„y¤ãG û‰í¤]„‡õ¤ Ó÷ã‰í¤ó¯„y¤c#ÿ5‰í¤‹›„O¤¹„Ϥ Q÷ã‰í¤ó/„û³¤c#„1¤u Ÿ‰í¤‹e„O¤]„Ϥ Q÷ã‰í¤qC„{Ÿ¤å„1¤õ± Ÿ‰í¤ Á„O¤ó„…㤏=÷‰í¤ó¯„{û¤c£„ñ¤s  Ÿ‰í¤ ӄc¤s„…‡¤ Ó÷ã‰í¤ó¯„{û¤å „ñ¤ó¯ Ÿ‰í¤=„处;¤=÷ã‰í¤qC„ýE¤åi„ñ¤qC Ÿ‰í¤=„å¤ó„ƒÑ¤ Ó÷ã‰í¤qC„ÿ ¤åi„ñ¤ñù Ÿ‰í¤™„处u¤=÷ã‰í¤qC„ÿפåi„ñ¤ñ Ÿ‰í¤å„c¤„«¤=÷‰í¤qC„1¤åi„ñ¤ñ Ÿ‰í¤O„c¤›Í„O¤=÷ã‰í¤qC„1¤eµ„ñ¤1 Ÿ‰í¤«„™¤„¤™÷ã‰í¤ñù„ñg¤e5„ñ¤ÿ× Ÿ‰í¤«„夛K„¤™÷‰í¤ñg„ó/¤åi„ñg¤ÿ× Ÿ‰í¤«„¤„ Q¤÷ã‰í¤ñg„ó/¤eµ„ñg¤ÿ  Ÿ‰í¤u„¤›Í„÷¤å÷ã‰í¤ñ„s‹¤eµ„ñg¤ÿ  Ÿ‰í¤ƒQ„=¤„+¤å÷ã‰í¤ñ„sU¤åi„qC¤}/ Ÿ‰í¤ƒQ„=¤„ Á¤c÷ã‰í¤ñ„u¤å„ó¯¤}¡ Ÿ‰í¤ƒÑ„ Ó¤ó„‹e¤c÷ã‰í¤³„uç¤åi„ó¯¤ýE y‰í¤;„­¤]„‰S¤«÷ã‰í¤ÿׄw-¤c#„sÕ¤ýE Ÿ‰í¤;„­¤„ Á¤ ?÷ã‰í¤÷A„õ±¤ë„sÕ¤ýE y‰í¤½„ Á¤„‹›¤ Á÷ã‰í¤sU„÷A¤ë„u¤{y Ÿ‰í¤…„‹e¤„ ?¤+÷ã‰í¤Õ'„uç¤{y Ÿ‰í¤…„‹›¤+Y÷ã‰í¤S݄w­¤{y Ÿ‰í¤¹µ÷ã‰í¤GÍ û‰í¤'é÷ã‰í¤Ù© y‰í¤§!÷ã‰í¤Û» Ÿ‰í¤%Ç÷‰í¤[ y‰í¤¥‘÷ã‰í¤[á û‰í¤£i÷ã‰í¤] y‰í¤£÷ã‰í¤]ó Ÿ‰í¤£÷‰í¤_¹„{y¤õ± Ÿ‰í¤ Á„…¤¡Ù÷ã‰í¤Ñ„ýE¤s  Ÿ‰í¤ ӄƒÑ¤/}÷ã‰í¤Ñ„}¡¤ó¯ Ÿ‰í¤=„ƒÑ¤¯7„…㤠Á÷ã‰í¤ó¯„}¡¤Ó„}/¤ñù„1¤qC Ÿ‰í¤=„O¤™„ƒQ¤-k„ƒÑ¤ Ó÷ã‰í¤ñù„ÿפÓñ„}¡¤ñg„ñù¤ÿ  Ÿ‰í¤u„™¤„ƒÑ¤-„«¤™÷ã‰í¤ñ„³¤Óñ„ÿ ¤ñ„s‹¤{Ÿ Ÿ‰í¤‡©„ Á¤O„u¤­%„™¤O÷ã‰í¤1„ñù¤S[„ÿ ¤1„õ±¤yéq£‰í¤‹e„ Ó¤_„ ¿¤«„u¤+ۄ=¤Ï÷ã‰í¤³„qC¤Õ§„ÿ ¤ÿׄ÷äù!„ó¯¤u‰3‰í¤+„w¤“'„u¤+ۄ÷¤«÷‰í¤ÿ „u¤Õ'„ÿ‹¤mY„sU¤qC Ÿ‰í¤=„+¤“'„÷¤+Y„‹e¤u÷‰í¤ÿ‹„÷äS݄ÿפmلõ±¤ñ Ÿ‰í¤c„ Á¤“¹„«¤­¥„ ¿¤÷÷ã‰í¤Ù©„1¤mY„õ?¤ñói‰í¤‹e„ Á¤«„‹e¤“'„夹3„¤‹e÷ã‰í¤u„ç{¤GM„ñ¤mY„u¤ÿׄõ±¤u‰3‰í¤+„‹e¤¯·„™¤½W„…¤+÷ã‰í¤ó¯„ÿפA™„ó¯¤Ño„÷äqC Ÿ‰í¤=„ ¿¤/„ Ó¤¿ù„«¤ Óƒ—‰í¤sՄñù¤ñù„ñù¤Ï÷„õ±¤GÍ Ÿ‰í¤¹µ„ Á¤1‹„™¤™„™¤­÷ã‰í¤s „qC¤1„s‹¤Íå„÷äLJ Ÿ‰í¢‰‰‹Uo‰‰Mù„ Á¤ƒÑ„­¤ Q÷ã‰í¤ó/„sÕ¤}¡„õ±¢‰‰ 牉ÏÝ Ÿ‰í¢‰‰‹×ˉ‰O±„‹›¤;„‹e¤™÷ã‰í¤ñù„u¤ýE„u碉‰ 牉Á•‰3‰í¢‰‰‹ÿ‰‰A‰ s‰í¤w-„q≉ e³‰‰Aí Ÿ‰í¢‰‰‹ÿ‰‰ÃÁ„¿¤‰SwM‰í¤sU„õ?¢‰‰ e³‰‰C¥„uç¤s‹wW‰í¢‰‰‹uù‰‰ř÷ã‰í¢‰‰ í{‰‰Åýwé‰í¢‰‰‹uù‰‰EÑ÷ã‰í¢‰‰ k5‰‰ǵ Ÿ‰í¢‰‰‹uù‰‰G©÷ã‰í¢‰‰ ë뉉ٍ Ÿ‰í¢‰‰‹uù‰‰Ùá÷‰í¢‰‰ ë뉉YÅ Ÿ‰í¢‰‰‹u‰‰Û¹÷ã‰í¢‰‰ ë뉉[‹Å‰í¢‰‰‹uù‰‰[ñ÷ã‰í¢‰‰ 푉‰ÝÕ‹Å‰í¢‰‰‹uù‰‰]É÷ã‰í¢‰‰ mlj‰ß­ Ÿ‰í¢‰‰‹w¯‰‰_‰)‰í¢‰‰ k5‰‰_å Ÿ‰í¢‰‰ ‰U‰‰ÑÙwM‰í¢‰‰ i#‰‰Q½wé‰í¢‰‰ ‹g‰‰ӑ÷ã‰í¢‰‰ W}‰‰ÓõwW‰í¢‰‰ ɉSé÷ã‰í¢‰‰ ×·‰‰Õ͉3‰í¢‰‰ ɉU¡÷ã‰í¢‰‰ ×·‰‰ׅ Ÿ‰í¢‰‰ ɉ×ù÷ã‰í¢‰‰ ×·‰‰WÝ Ÿ‰í¢‰‰ ɉé±÷ã‰í¢‰‰ W}‰‰i• /‰í¢‰‰ ‹‰‰ë‰÷ã‰í¢‰‰ 돉‰ëí ‰í¢‰‰ ‰U‰‰kÁ÷ã‰í¢‰‰ í{‰‰í¥‰í¢‰‰‹uù‰‰m™su‰í¢‰‰ yU‰‰mý Ÿ‰í¢‰‰‹s ‰‰ïÑå¯‰í¢‰‰‹Ÿm‰‰oµ„÷A¤åip‰dp‰Ô ©‰‰ [‰‰ [`
653ecd2daa67f36c0f2f5ca0fcc459daec83c61c
492e6a532c132cc616b9419d280147699f3a8412
/man/rowCoxTests.Rd
524ae6a9056e36d813716d882221cefc1330ee36
[]
no_license
zhangyuqing/simulatorZ
161b61029e7ef46af650ee7db650cc88a254692b
019e59514d56caebcde6affa34f9caee224518a1
refs/heads/master
2021-06-05T00:38:59.760802
2020-10-18T18:22:46
2020-10-18T18:22:46
22,471,700
4
3
null
2014-09-14T02:56:57
2014-07-31T14:14:52
R
UTF-8
R
false
false
1,681
rd
rowCoxTests.Rd
\name{rowCoxTests} \alias{rowCoxTests} \title{rowCoxTests} \description{method for performing Cox regression} \usage{rowCoxTests(X, y, option = c("fast", "slow"), ...)} \arguments{ \item{X}{Gene expression data. The following formats are available: matrix Rows correspond to observations, columns to variables. data.frame Rows correspond to observations, columns to variables. ExpressionSet rowCoxTests will extract the expressions using exprs().} \item{y}{Survival Response, an object of class: Surv if X is of type data.frame or matrix character if X is of type ExpressionSet. In this case y is the name of the survival response in the phenoData of X. If survival time and indicator are stored separately in the phenoData one can specify a two-element character vector the first element representing the survival time variable.} \item{option}{"fast" loops over rows in C, "slow" calls coxph directly in R. The latter method may be used if something goes wrong with the "fast" method.} \item{\dots}{currently unused} } \value{dataframe with two columns: coef = Cox regression coefficients, p.value = Wald Test p-values. Rows correspond to the rows of X.} \author{Yuqing Zhang, Christoph Bernau, Levi Waldron} \examples{ #test ##regressor-matrix (gene expressions) X<-matrix(rnorm(1e6),nrow=10000) #seed set.seed(123) #times time<-rnorm(n=ncol(X),mean=100) #censoring(1->death) status<-rbinom(n=ncol(X),size=1, prob=0.8) ##survival object y<-Surv(time,status) ## Do 10,000 Cox regressions: system.time(output <- rowCoxTests(X=X,y=y, option="fast")) }
5a4512d36de5340f2208255de4ad4a18022f1030
a5ea9d5ec0d70bfa722cfd5e49ce08119e339dda
/man/grasp.pred.export.Rd
b7c9b6fcf2df4d638d9c693b0a41ba558d3507d1
[]
no_license
cran/grasp
c46f16a28babb6cbed65aadbe2ddecc1a7214fd2
d57d11504ee99616e55a1a9c49e337cf1caf139d
refs/heads/master
2021-01-23T16:35:38.670044
2008-10-10T00:00:00
2008-10-10T00:00:00
null
0
0
null
null
null
null
UTF-8
R
false
false
684
rd
grasp.pred.export.Rd
\name{grasp.pred.export} \alias{grasp.pred.export} \title{ Internal GRASP function } \description{ This function will export the predictions made in grasp.pred() and stored in gr.predmat into an ASCII file ready to be read by Import Grid in ArcView or ArcGIS. } \usage{ grasp.pred.export(gr.Yi) } \arguments{ \item{gr.Yi}{A vector containing the selected responses} } \details{ The resolution of the exported GRID can be set on page R of the GUI. When observations are merging in a new cell a mean prediction is calculated (agglomeration). } \author{ Anthony.Lehmann@unige.ch } \seealso{ grasp \code{\link{grasp}}, grasp.in \code{\link{grasp.in}}} \keyword{models}
c1e4da6ba01f0cbbfec40ea0d670f1ac8a2f195a
29585dff702209dd446c0ab52ceea046c58e384e
/EcoGenetics/R/eco.2geneland.R
7c37d78e9988f5b2b5119763d6ddac3588c2eec2
[]
no_license
ingted/R-Examples
825440ce468ce608c4d73e2af4c0a0213b81c0fe
d0917dbaf698cb8bc0789db0c3ab07453016eab9
refs/heads/master
2020-04-14T12:29:22.336088
2016-07-21T14:01:14
2016-07-21T14:01:14
null
0
0
null
null
null
null
UTF-8
R
false
false
1,474
r
eco.2geneland.R
#' Creating input data for Geneland with an ecogen object #' #' @description This function creates four data frames in the working #' directory (XY.txt, NAMES.txt, P.txt, G.txt) which can be loaded #' in Geneland. #' @param eco Object of class "ecogen". #' @param ncod Number of digits coding each allele #' (e.g., 1: x, 2: xx, 3: xxx, etc.). #' @param ploidy Ploidy of the data. #' @return XY.txt Matrix with coordinates. #' @return NAMES.txt Matrix with row names. #' @return P.txt Matrix with phenotypic data. #' @return G.txt Matrix with genotypic data. #' @examples #' #' \dontrun{ #' #' data(eco.test) #' eco.2geneland(eco, 1) #' #' } #' #' @author Leandro Roser \email{leandroroser@@ege.fcen.uba.ar} #' @export setGeneric("eco.2geneland", function(eco, ncod = NULL, ploidy = 2) { write.table(eco@XY, "XY.txt", quote = FALSE, row.names = FALSE, col.names = FALSE) write.table(rownames(eco@XY), "NAMES.txt", quote = FALSE, row.names = FALSE, col.names = FALSE) write.table(eco@P,"P.txt", quote = FALSE, row.names = FALSE, col.names = FALSE) write.table(int.loc2al(eco@G, ncod = ncod, ploidy = ploidy), "G.txt", quote = FALSE, row.names = FALSE, col.names = FALSE) return("done!") })
50b23cd7a287a017daccde0bb4261ad50e2a1e9b
03d77a50c862638cae0a60fcd0eb4e415cabc933
/Fish 558 Workshop/Workshop Day 3/ex3 Class.R
11c6c7baff6a3dacd832e02a870eb14f64f5a3c5
[]
no_license
DanOvando/FISH-558
66ef5135449edddf1d025aefec0db3c1a55be4e0
fd889bdae6425f06991f5019f5caf27f50b82ec0
refs/heads/master
2021-01-02T09:02:00.004504
2015-12-19T01:27:55
2015-12-19T01:27:55
42,471,204
0
0
null
null
null
null
UTF-8
R
false
false
4,180
r
ex3 Class.R
# Ex3 <- function() # # # call for the logistic model set.seed(443) SchModel <- DoSir(Nout=100,Model='Schaefer') head(SchModel) quartz() ggplot(SchModel,aes(K,r)) + geom_hex() ggplot(SchModel,aes(x = K,y = r)) + geom_point(aes(fill = NegLogLike), shape = 21, size = 2) + scale_fill_gradient(low = 'green',high = 'red') ggplot(SchModel,aes(NegLogLike)) + geom_histogram() # call for the exponential model ExpoModel <- DoSir(Nout=100,Model= 'ExponModel') # ================================================================================= DoSir <- function(Nout=1000,Model) { # Read in the basic data TheData <- ReadData() Yr1 <- TheData$CatchYr[1] Catch <- TheData$CatchVal SurveyEst <- TheData$SurveyEst SurveyCV <- TheData$SurveyCV SurveyYr <- TheData$SurveyYr Nyears <- length(Catch) years <- TheData$CatchYr # Storage for the parameters and the final depletion Vals <- as.data.frame(matrix(0,ncol=5,nrow=Nout)) colnames(Vals) <- c('K','r','Pop1965','AddCV','NegLogLike') # Storage for the total likelihood encountered in the first stage sampling # and the number of draws AveLike <- 0 Ntest <- 0 # Reset parameters for SIR Threshold <- exp(0) Cumu <- 0 Ndone <- 0 while (Ndone < Nout) { # Generate from priors r <- runif(1,0,.15) Pop1965 <- runif(1,10000,15000) AddCV <- runif(1,.1,.2) K <- runif(1,20000,50000) # Call the population model Pop <- PopModel(Catch = Catch,r = r,K = K,years = years,InitPop = Pop1965,ExponModel = Model) survey <- data.frame(TheData$SurveyYr,TheData$SurveyEst,TheData$SurveyCV) colnames(survey) <- c('year','SurveyEst','SurveyCV') Pop <- join(Pop,survey, by = 'year') # ggplot(Pop,aes(year,n)) + geom_point() + geom_line(aes(year,SurveyEst)) # #write the pop model # Compute the negative log-likelihood and hence the likelihood NegLogLike <- Likelihood(Pop = Pop$n,SurveyYr-Yr1+1,SurveyEst,SurveyCV,AddCV) TheLike <- exp(-1*NegLogLike-32.19) # Determine if a parameter vector is to be saved Cumu <- Cumu + TheLike AveLike <- AveLike + TheLike Ntest <- Ntest +1 while (Cumu > Threshold & Ndone < Nout) { Ndone <- Ndone + 1 Cumu <- Cumu - Threshold Vals[Ndone,] <- data.frame(K,r,Pop1965,AddCV,NegLogLike) } } Vals$AveLike <- AveLike/Ntest return(Vals) } # ================================================================================= Likelihood <- function(Pop,SurveyYr,SurveyEst,SurveyCV,AddCV) { # Account for the additional CV UseCV <- sqrt(SurveyCV^2+AddCV^2) # Extract the predictions corresponding to the observations and compute the negatuve log-likelihood Preds <- Pop[SurveyYr] Residuals <- log(UseCV)+0.5*(log(Preds)-log(SurveyEst))^2/UseCV^2 LogLike <- sum(Residuals) } # ================================================================================= PopModel <- function(Catch,r,K,years,InitPop,ExponModel) { time <- length(years) output <- as.data.frame(matrix(NA,nrow = (time),ncol = 3)) colnames(output) <- c('year','catch','n') output$catch <- Catch output$n[1] <- InitPop output$year <- years if (ExponModel == 'ExponModel') { for (t in 2:time) { output$n[t] <- pmax(1e-5,(1+r)*output$n[t-1] - output$catch[t-1]) } } if (ExponModel == 'Schaefer') { for (t in 2:time) { output$n[t] <- pmax(1e-5,output$n[t-1] + (output$n[t-1]*r)*(1-output$n[t-1]/K) - output$catch[t-1]) } } return(output) } # ================================================================================= ReadData <- function() { TheData1 <- read.csv(paste('Fish 558 Workshop/',lecture,'/Ex3a.csv', sep = ''),header=TRUE, stringsAsFactors = F) TheData2 <- read.csv(paste('Fish 558 Workshop/',lecture,'/Ex3b.csv', sep = ''),header=TRUE, stringsAsFactors = F) Outs <- NULL Outs$SurveyYr <- TheData1[,1] Outs$SurveyEst <- TheData1[,2] Outs$SurveyCV <- TheData1[,3] Outs$CatchYr <- TheData2[,1] Outs$CatchVal <- TheData2[,2] return(Outs) } # ================================================================================= Ex3()
cdc09ddd67bcaf42fdde26beb8f27dc6fa7782c0
3b5479d2035b0955de9e9240d65c65ffb560a131
/3_analysisi/frequency_table/plot.R
f5ae717cb92b843c8dd4bbf488e03b5650d4d840
[]
no_license
elara7/Application-of-Topic-Model-in-Evolution-of-Financial-Texts
b7106c5f69f2a125865ab3f6ae2517c43f9af29e
49452f89f3a4c4ca75cfffd40ab9cc8bab2fd69f
refs/heads/master
2020-03-17T03:48:02.274349
2018-05-13T08:29:26
2018-05-13T08:29:26
133,250,919
0
0
null
null
null
null
UTF-8
R
false
false
1,236
r
plot.R
require(stringr) require(data.table) require(wordcloud2) require(htmlwidgets) file_path = 'C:\\Elara\\Documents\\paper\\3_analysisi\\frequency_table\\' pic_path = 'C:\\Elara\\Documents\\paper\\3_analysisi\\frequency_table\\pic\\' all_file = str_c(file_path,'merged_frequency_all.csv') all_data = fread(all_file,encoding = 'UTF-8') names(all_data) <- c('word','cnt') # 总体 plot(all_data$cnt[1:100]/1000,type='o',ylab='词频(千次)',xlab='词编号(按词频降序排列)') al = wordcloud2(all_data[1:100,],minRotation = 0, maxRotation = 0) saveWidget(al,"1.html",selfcontained = F) webshot::webshot("1.html",str_c(pic_path,"0.png"),vwidth = 1400, vheight = 900, delay =10) stockfiles = dir(str_c(file_path,'stocks\\')) write.csv(as.data.frame(stockfiles),'C:\\Elara\\Documents\\paper\\3_analysisi\\frequency_table\\pic\\name.csv') st <- NULL for (n in 1:length(stockfiles)) { stock_file = str_c(file_path,'stocks\\',stockfiles[n]) stock_data = readr::read_csv(stock_file,col_names = c('word','cnt')) st <- wordcloud2(stock_data[1:100,],minRotation = 0, maxRotation = 0) saveWidget(st,"1.html",selfcontained = F) webshot::webshot("1.html",str_c(pic_path,n,".png"),vwidth = 1400, vheight = 900, delay =10) }
3e2875c53e96261a78a0cac81ddac124d528a727
3fee6d185198ef39917b4fa30c643a4ecee4df15
/standings3pt.r
ad005f0f8a3d52c1db80de6ef97d25e1018ad95a
[ "Apache-2.0" ]
permissive
zzuum/3ptNHLstandings
f67216ce3f18b23a782515dd32b2453b8c8acf49
4162dad354d273e6e1ab3831ccdc6e582d74eaa0
refs/heads/master
2020-03-09T08:55:56.144037
2018-04-09T04:01:29
2018-04-09T04:01:29
128,700,269
0
0
null
null
null
null
UTF-8
R
false
false
1,950
r
standings3pt.r
setwd('~/Projects/NHL/') standings.all <- read.csv('2018standings.csv') # Getting overtime wins and losses standings.all$OTW <- sapply(strsplit( as.character(standings.all$Overtime), '-'),`[`, 1) standings.all$OTL <- sapply(strsplit(as.character( standings.all$Overtime), '-'),`[`, 2) # Getting shootout wins and losses standings.all$SOW <- sapply(strsplit(as.character( standings.all$Shootout), '-'),`[`, 1) standings.all$SOL <- sapply(strsplit(as.character( standings.all$Shootout), '-'),`[`, 2) # Regulation wins standings.all$RW <- sapply(strsplit(as.character( standings.all$Overall), '-'),`[`, 1) # Converting characters to numerics standings.all$RW <- as.numeric(standings.all$RW) standings.all$OTW <- as.numeric(standings.all$OTW) standings.all$SOW <- as.numeric(standings.all$SOW) standings.all$OTL <- as.numeric(standings.all$OTL) standings.all$SOL <- as.numeric(standings.all$SOL) # Correct regulation wins standings.all$RW <- standings.all$RW - standings.all$OTW - standings.all$SOW # Calculations! 3 points for regulation wins, 2 points for OT win, 2 points for # SO win, 1 point for OT loss, 1 point for SO loss # Also, ROW are used for points tie breakers standings.all$ROW <- standings.all$RW + standings.all$OTW standings.all$points <- 3 * standings.all$RW + 2 * standings.all$OTW + 2 * standings.all$SOW + 1 * standings.all$OTL + 1 * standings.all$SOL # Ordering the dataframe by points and ROW standings.all <- standings.all[with(standings.all, order( -points, -ROW )), ] # Looking at standings team by team colnames(standings.all)[2] <- 'Team' print('Pacific Standings:') standings.all[standings.all$Division == 'P', c('X', 'points')] print('Central Standings:') standings.all[standings.all$Division == 'C', c('X', 'points')] print('Metro Standings:') standings.all[standings.all$Division == 'M', c('X', 'points')] print('Atlantic Standings:') standings.all[standings.all$Division == 'A', c('X', 'points')]
492d8a7f671f97f77c66c0371eb6b39704b18661
ffb4618297e98c856e7f3c51e64b3481455ab3bf
/Brier_analysis.R
00fd84079d1d767bf730b950cc0dbec4d162b3ce
[]
no_license
tristinb/nfl-reporter-predictions
3b62005c1ed34c72897319bc90fd385cfda35350
f05d5f64a965ef343aea8ecb2c4be36ece6174dd
refs/heads/master
2021-05-12T07:33:53.173942
2018-01-12T14:29:49
2018-01-12T14:29:49
117,249,111
0
0
null
null
null
null
UTF-8
R
false
false
2,143
r
Brier_analysis.R
#set wd data <- read.csv('pundit_data.csv', header=TRUE) # Remove intercept; just want averages mod <- lm(squared_error~ -1+team, data= data) results <- coef(summary(mod))[c(1:32),] # 32 teams names <- row.names((coef(summary(mod)))) # Get names row.names(results) <- substring(names[1:32], 5) # Remove first 4 chars results <- results[,1] # Only care about coefs # Order results from lowest to highest results <- results[order(results)] pdf('Brier.pdf') # Save image barplot(results,las=2, ylab='Brier Score', main='Brier Score for Each Team Reporter \n (Lower Scores Indicate Better Predictions)') dev.off() # More complex model (control for wins, week) mod_comp <- lm(squared_error~ -1+team+tot_wins+as.factor(week), data= data) # Check residuals for normality mod_stnd <- rstandard(mod_comp) hist(mod_stnd) qqnorm(mod_stnd) qqline(mod_stnd) results_comp <- coef(summary(mod_comp))[1:32,] names_comp <- row.names((coef(summary(mod_comp)))) row.names(results_comp) <- substring(names_comp[1:32], 5) results_comp <- results_comp[,1] results_comp <- results_comp[order(results_comp)] pdf('adjusted_Brier.pdf') barplot(results_comp,las=2, ylab='Adjusted Brier Score', main='Adjusted Brier Score for Each Team Reporter') dev.off() anova(mod,mod_comp,test='F') # Put table together data2 <- read.csv('pundit_standings.csv',header=T) data2 <- data2[order(data2$pundit_team),] # Alphabetical order by team # Get results of comp in alphabetical order mod_comp <- lm(squared_error~ -1+team+tot_wins+as.factor(week), data= data) results_comp <- coef(summary(mod_comp))[1:32,] names_comp <- row.names((coef(summary(mod_comp)))) row.names(results_comp) <- substring(names_comp[1:32], 5) results_comp <- results_comp[,1] data2 <- cbind(data2,results_comp) # Now combine (both in alph order) data2 <- data2[order(data2$results_comp),] # Order by adj_brier score data2$standing_adj <- c(1:32) #ranking 1 through 32 data_sub <- data2[,c(11,2,1,3,5,10)] # Reorder colnames(data_sub) <- c('Standing','Team','Reporter','Average Spread','Brier Score','Adjusted Brier Score') library(xtable) print(xtable(data_sub,digits=3),include.rownames=FALSE)
1c46a68f30acfed3442bf9cd273860efa63df83b
9db8386eb77a7ce2d3ddfd18c6ad3bab2240b96e
/man/Compare_Plot_Function.Rd
883f6d50ea67a78ed01f0c7d9d4abe2c756f892a
[]
no_license
azavez/azavezHW6
447cfae0851c6c5f828e46a225659ca6e3f8a065
9583de0b55eae0ad939755b4da13bfd97cf5d663
refs/heads/master
2020-06-17T14:28:10.255877
2016-12-16T17:40:46
2016-12-16T17:40:46
74,995,204
0
0
null
null
null
null
UTF-8
R
false
false
1,905
rd
Compare_Plot_Function.Rd
\name{compare.plot} \alias{compare.plot} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Comparison Barplot Function } \description{ This function is specifically designed for the URMC fitness dataset. It compares the averages for one group to the averages in a second group. } \usage{ compare.plot(x, compare_column, group1, group2, group1.label = "Group 1", group2.label = "Group 2", ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{ The data frame containting the data} \item{compare_column}{ The column of the data frame that contains the data that will be compared. } \item{group1}{ The first comparison group. It should be in the form of a list of character strings. } \item{group2}{ The second comparison group. It should be in the form of a list of character strings. } \item{group1.label}{ Legend text for first comparison group. The default is "Group 1". } \item{group2.label}{ Legend text for second comparison group. The default is "Group 2". } \item{...}{ This function takes additional arguments for barplot function. } } \details{ %% ~~ If necessary, more details than the description above ~~ } \value{ %% ~Describe the value returned %% If it is a LIST, use %% \item{comp1 }{Description of 'comp1'} %% \item{comp2 }{Description of 'comp2'} %% ... } \references{ %% ~put references to the literature/web site here ~ } \author{ Alexis Zavez } \note{ %% ~~further notes~~ } %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ %% ~~objects to See Also as \code{\link{help}}, ~~~ } \examples{ data(sample.data) compare.plot(sample.data, sample.data$Day, group1 = "Wednesday", group2 = "Monday") } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS") \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
3e922f0e722d523bcd695e7cfa90c33300fc4f3b
e2ca393f7aec100bd648505925cb2e9d6c725473
/01_plotly.R
5fcecbdb92b9f4ff1aa039e51e94f165de023e67
[]
no_license
ComunidadBioInfo/minicurso_mayo_2021
9a067f2b6beb0d73febb80796ddf30a23184a264
b7305b56636bad0a147b4147981f81ccd47c4818
refs/heads/main
2023-05-31T00:23:34.618062
2021-06-01T00:14:08
2021-06-01T00:14:08
328,855,753
0
2
null
null
null
null
UTF-8
R
false
false
790
r
01_plotly.R
## ----"cargar_librerias_plotly"----------------------------------------------------------------------------------------------------- ## Carguemos las librerías que vamos a usar para esta parte del mini curso library("palmerpenguins") library("ggplot2") library("plotly") ## ----"plotly_ejemplo"-------------------------------------------------------------------------------------------------------------- ## De ?plotly::ggplotly ggpenguins <- qplot(bill_length_mm, body_mass_g, data = palmerpenguins::penguins, color = species ) ## Versión estática creada con ggplot2 ggpenguins ## ----"plotly_ejemplo_parte2"------------------------------------------------------------------------------------------------------- ## Ahora en versión interactiva ggplotly(ggpenguins)
3402bce3d0a1ed72b9fa0d6e6dad1642587de8b9
a82ebc7c1dcc3eb671542f10645ab3d457853565
/r_modular/classifier_libsvm_modular.R
7e752f314b8676b16818536783a68f086ed1cefe
[]
no_license
joobog/shogun-eval
dac24f629744521760061c7979aa579129daa666
12b1ba2a67d5c661c6a11580634fb1a036e61af2
refs/heads/master
2021-03-12T23:24:41.686252
2016-11-23T10:04:04
2016-11-23T10:04:04
31,391,835
3
0
null
null
null
null
UTF-8
R
false
false
1,399
r
classifier_libsvm_modular.R
# In this example a two-class support vector machine classifier is trained on a # toy data set and the trained classifier is used to predict labels of test # examples. As training algorithm the LIBSVM solver is used with SVM # regularization parameter C=1 and a Gaussian kernel of width 2.1 and the # precision parameter epsilon=1e-5. The example also shows how to retrieve the # support vectors from the train SVM model. # # For more details on LIBSVM solver see http://www.csie.ntu.edu.tw/~cjlin/libsvm/ library(shogun) fm_train_real <- t(as.matrix(read.table('../data/fm_train_real.dat'))) fm_test_real <- t(as.matrix(read.table('../data/fm_test_real.dat'))) label_train_twoclass <- as.double(read.table('../data/label_train_twoclass.dat')$V1) # libsvm print('LibSVM') feats_train <- RealFeatures() dummy <- feats_train$set_feature_matrix(fm_train_real) feats_test <- RealFeatures() dummy <- feats_test$set_feature_matrix(fm_test_real) width <- 2.1 kernel <- GaussianKernel(feats_train, feats_train, width) C <- 1.017 epsilon <- 1e-5 num_threads <- as.integer(2) labels <- BinaryLabels() print(label_train_twoclass) dump <- labels$set_labels(label_train_twoclass) svm <- LibSVM(C, kernel, labels) dump <- svm$set_epsilon(epsilon) dump <- svm$parallel$set_num_threads(num_threads) dump <- svm$train() dump <- kernel$init(feats_train, feats_test) lab <- svm$apply() out <- lab$get_labels()
f3fbf1cab89ac290642acaf385f5b45bf93c37c7
cb7eaa28fbe6e970fe6d564d2d53bc8d8a333568
/Rcode_func.spec.R
13297d716fa500745f0e58e2b30eecbae2ab8b6c
[]
no_license
MarlenF/repertoire-orang
7531a606e8c9cae7ffe94b4071b31e63a1cc7b6e
500d59072156fdb0c43dd961623085072c586510
refs/heads/main
2023-04-11T20:27:22.190379
2021-05-05T10:12:18
2021-05-05T10:12:18
320,832,060
0
0
null
null
null
null
ISO-8859-1
R
false
false
4,912
r
Rcode_func.spec.R
## R code for analysis of functional specificity (3) rm(list=ls()) setwd("C:/Users/Marlen Fröhlich/Documents/R") fun <- read.table ("dataset_func.spec.csv", header=TRUE, sep=",", stringsAsFactors=TRUE) xx=as.data.frame(na.omit(fun[, c("species","signal", "setting","dominance_con", "max_cases", "context", "no_cases", "no_subjects")])) library(arm) library(car) test.data=xx test.data$z.subjects=as.vector(scale(test.data$no_subjects)) test.data$z.sample=as.vector(scale(test.data$no_cases)) test.data$context_pl=as.numeric(test.data$context==levels(test.data$context)[4]) contr=glmerControl(optimizer="bobyqa", optCtrl=list(maxfun=10000000)) dom_bin = cbind(test.data$max_cases, test.data$no_cases-test.data$max_cases) contr=glmerControl(optimizer="bobyqa", optCtrl=list(maxfun=10000000)) # collinearity: max vif = 2.7 vif(lm(rnorm(nrow(test.data)) ~ species + setting + log(no_cases) + log(no_subjects) , data = test.data)) #run the full model mod.fun = glmer(formula = dom_bin ~ species * setting + context_pl + z.subjects + z.sample + +(1|signal), family =binomial, control=contr, data = test.data) #run the null model null.fun = glmer(formula = dom_bin ~ context_pl + z.subjects + z.sample + +(1|signal), family =binomial, control=contr, data = test.data) length(residuals(mod.fun)) #128 length(residuals(null.fun)) #128 #Likelihood ratio test as.data.frame(anova(null.fun, mod.fun, test="Chisq")) # get chi square and p-values drop1(mod.fun, test ="Chisq") #post hoc sidak test of interaction effect require(lsmeans) lsm <- lsmeans(mod.fun, list(pairwise ~ setting|species, pairwise ~ species|setting)) lsm #get estimates and SE round(summary(mod.fun)$coefficients, 3) ####################3 # dummycoding for plot context_pl.c= test.data$context_pl- mean(test.data$context_pl) ###################################################### path <- "C:/Users/Marlen Fröhlich/Documents/R/" #plot model plot.fun = glmer(formula = dom_bin ~ species * setting + context_pl.c + z.subjects + z.sample + +(1|signal), family =binomial, control=contr, data = test.data) setwd("C:/Users/Marlen Fröhlich/Documents/R/roger/") source("local_jitter_AlexHausmann.R") require(effects) test.data$XPos <- ifelse(test.data$species=="Bor",1,2) EF <- Effect(c("species","setting"),plot.fun,se=TRUE) dat.EF <- as.data.frame(EF) # Add colour column test.data$colourBG <- ifelse(test.data$setting=="wild",rgb(255, 210, 128, maxColorValue=255),rgb(128, 128, 255, maxColorValue=255)) test.data$colourL <- ifelse(test.data$setting=="wild",rgb(255, 192, 77, maxColorValue=255),rgb(77, 77, 255, maxColorValue=255)) # Open empty plot (IMPORTANT: THE PLOT HAS TO BE OPEN BEFORE RUNNING THE FUNCTION) path <- "C:\\Users\\Marlen Fröhlich\\Documents\\R\\" svg(filename=paste0(path,"FunSpecN2.svg",sep=""), height=90/25.4, width=90/25.4, family="Arial", pointsize=9) OF <- 0.1 par(mar=c(2.7, 3.2, 0.2, 0.2), mgp=c(1.3, 0.2, 0), tcl=-0.25, cex=1) plot(c(0.5,2.5),c(0.35,1), type="n", axes=FALSE, xlab="Orang-utan species", ylab="") ; par(new=TRUE) plot(c(0.5,2.5),c(0.35,1), type="n", axes=FALSE, xlab="", ylab="Functional specificity", mgp=c(2.2, 0.2, 0)) X0 <-local_jitter(fact_coord = test.data$XPos, gradual_coord = test.data$dominance_con, categories = as.character(test.data$setting), factorial_axis = 1, buffer = 0.45, sizes = sqrt(test.data$no_cases)/4, verbose=F, iterations=1000) points(X0,test.data$dominance_con,cex=sqrt(test.data$no_cases)/4, pch=21, bg=test.data$colourBG, col=test.data$colourL) arrows(1-OF,dat.EF$lower[1],1-OF,dat.EF$upper[1],code=3,length=0.1,angle=90) points(x=1-OF,y=dat.EF$fit[1], pch=23, col="black", bg="blue", cex=3) arrows(1+OF,dat.EF$lower[3],1+OF,dat.EF$upper[3],code=3,length=0.1,angle=90) points(x=1+OF,y=dat.EF$fit[3], pch=23, col="black", bg="orange", cex=3) arrows(2-OF,dat.EF$lower[2],2-OF,dat.EF$upper[2],code=3,length=0.1,angle=90) points(x=2-OF,y=dat.EF$fit[2], pch=23, col="black", bg="blue", cex=3) arrows(2+OF,dat.EF$lower[4],2+OF,dat.EF$upper[4],code=3,length=0.1,angle=90) points(x=2+OF,y=dat.EF$fit[4], pch=23, col="black", bg="orange", cex=3) axis(1,at=c(1,2), label=c("Bornean","Sumatran"), tcl=-0.25) axis(2,at=seq(0,1,by=0.2), label=c("0.0","0.2","0.4","0.6","0.8","1.0"), tcl=-0.25, las=2, mgp=c(1.2, 0.4, 0)) legend("topright", pt.bg=c("blue","orange"), pch=23, legend=c("captive","wild"), bty="n", pt.cex=2) box() dev.off() #border=c(rgb(77, 77, 255, maxColorValue=255),rgb(255, 192, 77, maxColorValue=255)), fill=c(rgb(128, 128, 255, maxColorValue=255), rgb(255, 210, 128, maxColorValue=255)) domrep=aggregate(x=test.data$dominance_con, by=test.data[, c("species", "setting")], FUN=mean) domrep
ff12232e2b384c642f559b19aa682da8d8ec57d0
456f00cd7e3d19f5be19313b4c1d0bd35b80d626
/plants.R
6d588ac9919b8867adfd0aa597d3e7a26470f17c
[]
no_license
pearselab/r-world-aspri951
af47fb158149b55d059c0ba85ce2858637778c42
4988abe28d5c9d3835eebfb56a1e152264827466
refs/heads/master
2020-04-06T04:28:53.809611
2017-03-17T22:56:51
2017-03-17T22:56:51
72,656,588
0
0
null
null
null
null
UTF-8
R
false
false
34,501
r
plants.R
#plants #setup.plants = a function that checks that all the inputs for reproduction, survival, and competition parameters are correct #checks to see if repro and surv vectors are of the same length as # species in simulation #checks to see if competition arguemnt is a matrix with dim equal to length of repro/surv #stop function if conditions aren't met #how to set names of matrix? Test: bekkeh <- matrix(1:16, nrow = 4, ncol = 4) rownames(bekkeh) <- c("You", "want", "sum", "blue?") colnames(bekkeh) <- c("learnin'", "to jump", "for", "bekkeh") bekkeh["sum", "for"] #Cool. Can call an INDEX by NAME. USEFUL. setup.plants <- function(reproduce, survive, compete.matrix, names = NULL){ if(is.null(names) == TRUE){ names <- letters[1:length(reproduce)] } if(length(names) != length(reproduce)){ stop("Each plant must have a name.") } if(length(reproduce) != length(survive)){ stop("Reproduction and survival parameters needed for each species.") } if(nrow(compete.matrix) != length(reproduce) | ncol(compete.matrix) != length(reproduce)){ stop("Competition matrix must have competition parameters for each pairwise combination of species.") } if(any(reproduce) > 1 | any(reproduce) < 0 | any(survive) > 1 | any(survive < 0)){ #what about matrix probabilities? This too? stop("Reproduction and survival probabilities must be values between zero and one!") } reproduction <- setNames(reproduction, names) survive <- setNames(survive, names) rownames(compete.matrix) <- names colnames(compete.matrix) <- names return(list(reproduce = reproduce, survive = survive, compete.matrix = compete.matrix, names = names)) } #For the competition matrix, what probability is what? Is it probability ROW will survive, or COL will survive? #Will need to define #survive: function that determines whether an individual survives to the next time step #this function will do something to ONE CELL #THis function will also need to match probabilities for EACH SPECIES to the species in the cell #So will need survive vector in function #for loop... for i in 1: length(names), if cell = name i, then run if-statement about survival #No, not needed! #Can check PROPER SPECIES by calling the NAME as the INDEX (see bekkeh["blue?", "for"]) #So if cell <- plant A, then run statement: if(runif(weifj) <= probability plant A survives), put "plant A" in cell #else, put "" in the cell (blank, NOT NA) survival <- function(cell, setup.plants){ if(is.na(cell) == TRUE){ cell <- NA } if(runif(1) <= setup.plants$survive[cell]){ cell <- cell } else { cell <- "" } return(cell) } #test: survive <- c(0.5, 0.9, 0.01) survive <- setNames(survive, c("coin", "super", "sucky")) blahblah <- list(survive = survive, etc = "blah", blargh = "fatcat") cell_1 <- "coin" cell_2 <- "super" cell_3 <- "sucky" cell_4 <- NA survival(cell_1, blahblah) survival(cell_2, blahblah) survival(cell_3, blahblah) survival(cell_4, blahblah) #problem, not calling survival because list stuff NOT NAMED. #Need NAMES when MAKING LIST or LIST HAS NO NAMES and CANNOT CALL PARTS OF LIST #New problem: doesn't work for NA. Says missing value where T/F needed. Probably trying to run BOTH if-statements? #Try making else-statement to fix this? survival <- function(cell, setup.plants){ if(is.na(cell) == TRUE){ cell <- NA } else if(runif(1) <= setup.plants$survive[cell]){ cell <- cell } else { cell <- "" } return(cell) } #Problem solved. Final test: cell_5 <- "" survival(cell_5, blahblah) #Nope, same problem. Tries to run second if-statement and finds the name is NOT in the list of plant names survival <- function(cell, setup.plants){ if(is.na(cell) == TRUE){ cell <- NA } else if(cell == ""){ cell <- "" } else if(is.na(setup.plants$survive[cell]) == TRUE){ stop("You just discovered a new species of plant! Whatever is in this cell shouldn't exist... try again.") } else if(runif(1) <= setup.plants$survive[cell]){ cell <- cell } else if(runif(1) > setup.plants$survive[cell]){ cell <- "" } else { stop("How have you even done this...") } return(cell) } #Test: cell_5 <- "" cell_6 <- "George" survival(cell_5, blahblah) survival(cell_6, blahblah) #Done! #plant.timestep: a function that takes a matrix of plants, loops over it and applies "survival" function to each cell #Will need two for loops, nested: for (i in 1:nrow(matrix)) and then for(j in 1:ncol(matrix)) to hit each cell #matrix[i, j] <- survival(matrix[i, j], setup.plants) #want to return new matrix at end plant.timestep <- function(plant.matrix, setup.plants){ new.plant.matrix <- plant.matrix for(i in 1:nrow(plant.matrix)){ for(j in 1:ncol(plant.matrix)){ new.plant.matrix[i, j] <- survival(plant.matrix[i,j], setup.plants) } } return(new.plant.matrix) } test.plants <- matrix(nrow = 4, ncol = 4, c("coin", "super", "sucky", "coin")) #a matrix with coin in the first row, super in the second, sucky in the third, coin in the last survive <- c(0.5, 0.9, 0.01) survive <- setNames(survive, c("coin", "super", "sucky")) blahblah <- list(survive = survive, etc = "blah", blargh = "fatcat") plant.timestep(test.plants, blahblah) #A problem in survival function: it is possible to get BELOW the p-value on the first run, but ABOVE it in the second #I have accidentally drawn TWO random values, not just one... #Fix: survival <- function(cell, setup.plants){ if(is.na(cell) == TRUE){ cell <- NA } else if(cell == ""){ cell <- "" } else if(is.na(setup.plants$survive[cell]) == TRUE){ stop("You just discovered a new species of plant! Whatever is in this cell shouldn't exist... try again.") } else { random.draw <- runif(1) if(random.draw <= setup.plants$survive[cell]){ cell <- cell } else if(random.draw > setup.plants$survive[cell]){ cell <- "" } } return(cell) } #Problem solved. Plant.timestep now works. #run.plant.ecosystem: function that seeds plants into an initial matrix (of same size as terrain), #then builds an array using survive function each timestep #need function that takes terrain, #plants of each species you want, plant.timestep, setup.plants, and timesteps+1 #Need timesteps + 1 because the first "sheet" in the array is time t = 0. Need timesteps after that, so timesteps plus one(initial conditions) #How to draw plants? If you put in first set, then second, then third, and just replace ones that fall on top of another, bias towards last plant species run #Fix this by creating places for the TOTAL number of plants, then assigning a place for each? #No, better option: sample function: samples WITHOUT REPLACEMENT BY DEFAULT #Problem of having plants land on top of one another solved? #If too many plants, cannot fit in matrix. #if sum(plants blah) > #cells in matrix, problem: too many plants run.plant.ecosystem <- function(terrain, setup.plants, plant.timestep, numb.plants.per.sp, timesteps){ if(numb.plants.per.sp != ) plant.array <- array(dim = c(nrow(terrain), ncol(terrain), timesteps + 1)) #Creates plant array to put stuff into. } #I want to randomly shuffle a vector of plant names so there is no bias towards the first or last plants for filling #I also REFUSE to build a matrix that culls a random number of plants that land on water. #I want to know EXACTLY how many plants are initially put in the matrix, and I want to DECIDE that number #I want to call cells of a matrix WITHOUT using row/col: test.plants <- matrix(nrow = 4, ncol = 4, c("coin", "super", "sucky", "coin")) test.plants #[,1] [,2] [,3] [,4] #[1,] "coin" "coin" "coin" "coin" #[2,] "super" "super" "super" "super" #[3,] "sucky" "sucky" "sucky" "sucky" #[4,] "coin" "coin" "coin" "coin" which(test.plants == "coin") #[1] 1 4 5 8 9 12 13 16 #So each cell MUST have a call number... just need to figure out how #Oh well that's simple... if you don't use a comma, it calls the CELL, not the ROW/COL test.plants[3] #[1] "sucky" test.plants[,3] #[1] "coin" "super" "sucky" "coin" #top to bottom, left to right... fills columns bob <- matrix(nrow = 4, ncol = 4, 1:16) #[,1] [,2] [,3] [,4] #[1,] 1 5 9 13 #[2,] 2 6 10 14 #[3,] 3 7 11 15 #[4,] 4 8 12 16 bob[6] <- "bob" #[,1] [,2] [,3] [,4] #[1,] "1" "5" "9" "13" #[2,] "2" "bob" "10" "14" #[3,] "3" "7" "11" "15" #[4,] "4" "8" "12" "16" #So now I just need to make a sequence from one to length nrow(terrain) ^2 in vector form... #Then make a vector where NAs are using which(terrain, NA) #Then remove everything in the NA vector from the first vector (sequence of numbers) #and finally, sample a certain number of values from this "corrected" vector #and for every i in 1:length(corrected vector), put shuffled.plant.vector[i] in matrix[i] #Will need a stop function that indicates that there are length(NA vector) water cells in your terrain. #You cannot seed more than nrow(terrain) ^2 - length(NA vector) plants into your matrix #How to call names of a vector? names function? survive <- c(0.5, 0.9, 0.01) survive <- setNames(survive, c("coin", "super", "sucky")) names(survive[3]) #[1] "sucky" #Can use this to create vector of plant names to seed into the matrix seed.plants <- function(terrain, setup.plants, number.plants){ if(length(number.plants) != length(setup.plants$names)){ stop("There must be an initial population size given for every plant species in the 'number of plants per species' vector!") } all.terrain.locations <- seq(1, nrow(terrain)^2, 1) #Vector of all possible cells in the terrain matrix, from which locations for plants can be drawn water.locations <- which(is.na(terrain) == TRUE) #Vector of all the cells in the initial matrix that contain water, and therefore are NOT potential locations for plants. terrain.locations.minus.water <- all.terrain.locations[-water.locations] #Vector of all cells in matrix WITHOUT water, and therefore potential locations for plants total.number.plants <- sum(number.plants) #Adds up the initial population values for all the plant species to give total population (plants of any species) if(total.number.plants > length(terrain.locations.minus.water)){ stop("There are more plants to seed than there are locations on the terrain to put plants!", "\n", " You currently have ", total.number.plants, " plants, and only ", length(terrain.locations.minus.water), " places to put them!") } #Checks to see if there are too many plants to fit on your terrain locations.for.plants.to.go <- sample(terrain.locations.minus.water, total.number.plants) #Draws a random sample of locations in which to put the total number of plants you said you wanted to seed number.plants <- setNames(number.plants, setup.plants$names) #Takes the vector containing number of plants of each species to seed, and names them appropriately plants.to.add <- character(0) for(i in 1:length(number.plants)){ plants.to.add <- c(plants.to.add, rep(names(number.plants[i]), number.plants[i])) } #Creates a vector of plants, with each plant species repeated the number of times specified by the user random.ordering.for.plants <- sample(1:length(plants.to.add), length(plants.to.add)) #creates a vector that will become the indices for the shuffled plants.to.add vector (to eliminate bias in seeding) shuffled.plants.to.add <- character(0) for(i in 1:length(plants.to.add)){ shuffled.plants.to.add[i] <- plants.to.add[random.ordering.for.plants[i]] } #SHUFFLES the vector of plants to add randomly to the terrain because I hate bias and want things seeded randomly. plant.matrix <- matrix(nrow = nrow(terrain), ncol = ncol(terrain), "") #Creates the final plant matrix, into which plants and water will be seeded plant.matrix[water.locations] <- NA #Sets all water locations to NA plant.matrix[locations.for.plants.to.go] <- shuffled.plants.to.add #Puts the plants into the matrix, and none should fall on top of each other or on water! return(plant.matrix) } #Hooray! Now to put into the whole ecosystem function: run.plant.ecosystem <- function(terrain, setup.plants, numb.plants.per.sp, timesteps){ plant.array <- array(dim = c(nrow(terrain), ncol(terrain), timesteps + 1)) #Creates plant array to put stuff into. plant.array[,,1] <- seed.plants(terrain, setup.plants, numb.plants.per.sp) return(plant.array) } #Test: terrain <- matrix(nrow = 9, ncol = 9, sample(27, 27)) terrain[which(terrain < 10)] <- NA survive <- c(0.5, 0.9, 0.01) names <- c("coin", "super", "sucky") survive <- setNames(survive, names) setup.plants <- list(survive = survive, etc = "blah", blargh = "fatcat", names = names) numb.plants.per.sp <- c(5, 2, 7) run.plant.ecosystem(terrain, setup.plants, numb.plants.per.sp, 4) #Oh hell yes. Works on first try (minus the copy errors in object names) #Test error message: numb.plants.per.sp <- c(5, 50, 40) run.plant.ecosystem(terrain, setup.plants, numb.plants.per.sp, 4) #Cool.Now add timesteps run.plant.ecosystem <- function(terrain, setup.plants, numb.plants.per.sp, timesteps){ plant.array <- array(dim = c(nrow(terrain), ncol(terrain), timesteps + 1)) #Creates plant array to put stuff into. plant.array[,,1] <- seed.plants(terrain, setup.plants, numb.plants.per.sp) for(i in 1:(timesteps)){ plant.array[,,(i + 1)] <- plant.timestep(plant.array[,,i], setup.plants) } return(plant.array) } #IT WORRRRKS... SEE TEST: terrain <- matrix(nrow = 9, ncol = 9, sample(27, 27)) terrain[which(terrain < 10)] <- NA survive <- c(0.5, 0.9, 0.01) names <- c("coin", "super", "sucky") survive <- setNames(survive, names) setup.plants <- list(survive = survive, etc = "blah", blargh = "fatcat", names = names) numb.plants.per.sp <- c(5, 2, 7) run.plant.ecosystem(terrain, setup.plants, numb.plants.per.sp, 8) #Just as expected, sucky plants die out first, then coin (50/50 coin flip) plants, and lastly, super plants remain #reproduction function #want to determine the possible cells around the reproducing plant that can house an offspring #make a vector of options, will be length 8 (three to left, three to right, one above, one below) #the cell to the left is cell # minus ncol or nrow of matrix #cell to the right is cell # plus ncol #so c(cell#-nrow, cell#+nrow, cell#+1, cell#-1, cell#-nrow+1, cell#-nrow-1, cell#+nrow+1, cell#+nrow-1) #then draw one from this vector to place the new plant in #sample(potential.baby.location, 1) #matrix[sample(potential.baby.location, 1)] <- matrix[i] #test potential for-loop: plant.matrix <- matrix(nrow = 4, ncol = 4) for(i in 1:nrow(plant.matrix)^2){ potential.offspring.locations <- c((i - nrow(plant.matrix)), (i + nrow(plant.matrix)), (i + 1), (i - 1), (i - nrow(plant.matrix) + 1), (i - nrow(plant.matrix) - 1), (i + nrow(plant.matrix) + 1), (i + nrow(plant.matrix) + 1)) print(potential.offspring.locations) } #complicated to filter out those off the matrix because it wraps around edges for(i in 1:nrow(plant.matrix)){ for(j in 1:ncol(plant.matrix)){ potential.offspring.locations <- as.matrix(expand.grid(i + c(-1, 0, 1), j + c(-1, 0, 1))) print(potential.offspring.locations) } } #Now, need to remove any row with the original plant inside (i + 0, j + 0) #and remove any row with values less than 1 or greater than nrow(plant.matrix) for(i in 1:nrow(plant.matrix)){ for(j in 1:ncol(plant.matrix)){ potential.offspring.locations <- as.matrix(expand.grid(i + c(-1, 0, 1), j + c(-1, 0, 1))) for(k in 1:9){ if(potential.offspring.locations[k, 1] == i & potential.offspring.locations[k, 2] == j){ potential.offspring.locations <- potential.offspring.locations[-k,] } } print(potential.offspring.locations) } } #error, subscripts out of bounds? for(i in 1:nrow(plant.matrix)){ for(j in 1:ncol(plant.matrix)){ potential.offspring.locations <- as.matrix(expand.grid(i + c(-1, 0, 1), j + c(-1, 0, 1))) for(k in 1:9){ print(potential.offspring.locations[k, 1]) print(potential.offspring.locations[k, 2]) } } } #But this works... plant.matrix <- matrix(nrow = 4, ncol = 4) #matrix doesn't like changing sizes in the mdidle of a loop, perhaps? for(i in 1:nrow(plant.matrix)){ for(j in 1:ncol(plant.matrix)){ potential.offspring.locations <- as.matrix(expand.grid(i + c(0, -1, 1), j + c(0, -1, 1))) potential.offspring.locations.two <- potential.offspring.locations for(k in 1:nrow(potential.offspring.locations)){ if(potential.offspring.locations[k, 1] == i & potential.offspring.locations[k, 2] == j){ potential.offspring.locations.two <- potential.offspring.locations[-k,] } } cat(i,j) print(potential.offspring.locations.two) } } #cool, working (and cat confirms that it's deleting the center point as it should) for(i in 1:nrow(plant.matrix)){ for(j in 1:ncol(plant.matrix)){ potential.offspring.locations <- as.matrix(expand.grid(i + c(0, -1, 1), j + c(0, -1, 1))) potential.offspring.locations.two <- potential.offspring.locations potential.offspring.locations.three <- potential.offspring.locations.two for(k in 1:nrow(potential.offspring.locations)){ if(potential.offspring.locations[k, 1] == i & potential.offspring.locations[k, 2] == j){ potential.offspring.locations.two <- potential.offspring.locations[-k,] } if(potential.offspring.locations[k, 1] > nrow(plant.matrix) | potential.offspring.locations[k, 2] > nrow(plant.matrix) | potential.offspring.locations[k, 1] < 1 | potential.offspring.locations[k, 2] < 1){ potential.offspring.locations.three <- potential.offspring.locations.two[-k,] } } cat(i,j) print(potential.offspring.locations.three) } } #Doesn't work. Try splitting into vectors individually offspring.location <- function(F0.row, F0.col, plant.matrix){ potential.F1.locations <- as.matrix(expand.grid(F0.row + c(0, -1, 1), F0.col + c(0, -1, 1))) #Matrix storing all possible locations for plant offspring, INCLUDING the location of the parent potential.F1.locations.minus.center <- potential.F1.locations #Matrix storing potential locations for plant offspring MINUS the location of the parent for(k in 1:nrow(potential.F1.locations)){ if(potential.F1.locations[k, 1] == F0.row & potential.F1.locations[k, 2] == F0.col){ potential.F1.locations.minus.center <- potential.F1.locations[-k,] } #Loop that takes the list of all possible locations and removes the center point (the location of the parent plant) potential.F1.row <- potential.F1.locations.minus.center[,1] potential.F1.col <- potential.F1.locations.minus.center[,2] #Vector from the possible offspring location matrix storing the indices of potential rows and columns for offspring print(potential.F1.row) print(potential.F1.col) print(which(potential.F1.row < 1)) print(nrow(plant.matrix)) rows.to.remove <- c(which(potential.F1.row > nrow(plant.matrix)), which(potential.F1.row < 1)) col.to.remove <- c(which(potential.F1.col > ncol(plant.matrix)), which(potential.F1.col < 1)) #Vectors determining which row and column locations are off the grid (terrain), and need to be removed print(rows.to.remove) print(col.to.remove) potential.F1.row <- potential.F1.row[-c(rows.to.remove, col.to.remove)] potential.F1.col <- potential.F1.col[-c(rows.to.remove, col.to.remove)] #corrected vectors storing potential row/col locations for offspring, all invalid locations removed print(potential.F1.row) print(potential.F1.col) potential.location.index <- seq(from = 1, to = length(potential.F1.row), by = 1) offspring.location.index <- sample(potential.location.index, 1) #draws a random sample from the vector of potential F1 locations offspring.location <- c(potential.F1.row[offspring.location.index], potential.F1.col[offspring.location.index]) return(offspring.location) } } #Final, de-bugged version below: offspring.location <- function(F0.row, F0.col, plant.matrix){ potential.F1.locations <- as.matrix(expand.grid(F0.row + c(0, -1, 1), F0.col + c(0, -1, 1))) #Matrix storing all possible locations for plant offspring, INCLUDING the location of the parent potential.F1.locations.minus.center <- potential.F1.locations #Matrix storing potential locations for plant offspring MINUS the location of the parent for(k in 1:nrow(potential.F1.locations)){ if(potential.F1.locations[k, 1] == F0.row & potential.F1.locations[k, 2] == F0.col){ potential.F1.locations.minus.center <- potential.F1.locations[-k,] } #Loop that takes the list of all possible locations and removes the center point (the location of the parent plant) potential.F1.row <- potential.F1.locations.minus.center[,1] potential.F1.col <- potential.F1.locations.minus.center[,2] #Vector from the possible offspring location matrix storing the indices of potential rows and columns for offspring rows.to.remove <- c(which(potential.F1.row > nrow(plant.matrix)), which(potential.F1.row < 1)) col.to.remove <- c(which(potential.F1.col > ncol(plant.matrix)), which(potential.F1.col < 1)) #Vectors determining which row and column locations are off the grid (terrain), and need to be removed if(length(rows.to.remove) > 0 | length(col.to.remove > 0)){ potential.F1.row <- potential.F1.row[-c(rows.to.remove, col.to.remove)] potential.F1.col <- potential.F1.col[-c(rows.to.remove, col.to.remove)] } #corrected vectors storing potential row/col locations for offspring, all invalid locations removed potential.location.index <- seq(from = 1, to = length(potential.F1.row), by = 1) offspring.location.index <- sample(potential.location.index, 1) #draws a random sample from the vector of potential F1 locations offspring.location <- c(potential.F1.row[offspring.location.index], potential.F1.col[offspring.location.index]) return(offspring.location) } } #need to determine whether randomly drawn prob is less than repro prob. If so, then run the above stuff #reproduction function should go AFTER the survival function is complete, this seems simpler #if two plants are next to each other, don't want one to reproduce over the top of its neighbor... #then have the loop move to the neighbor's cell (now filled with the offspring of the first cell)... #And then have the offspring "reproduce" into another cell... #need to make sure offspring can't reproduce, so need to put offspring into NEW matrix but draw from the ORIGINAL each time #This will prevent mixing of generations reproduction <- function(plant.matrix, reproduce){ new.plant.matrix <- plant.matrix #creates a new matrix for the "next generation" to be seeded into without messing up the original matrix I'm drawing from for(i in 1:nrow(plant.matrix)^2){ potential.offspring.locations <- c((i - nrow(plant.matrix)), (i + nrow(plant.matrix)), (i + 1), (i - 1), (i - nrow(plant.matrix) + 1), (i - nrow(plant.matrix) - 1), (i + nrow(plant.matrix) + 1), (i + nrow(plant.matrix) + 1)) random.draw <- runif(1) if(random.draw <= setup.plants$reproduce[plant.matrix[i]]){ #stuff to determine where thing goes } } } #Try again reproduction <- function(F0.row, F0.col, plant.matrix, setup.plants){ new.plant.matrix <- plant.matrix #creates a new matrix for the "next generation" to be seeded into without messing up the original matrix I'm drawing from cell <- plant.matrix[F0.row, F0.col] if(is.na(plant.matrix[cell]) == TRUE){ cell <- NA } else if(cell == ""){ cell <- "" } else if(is.na(setup.plants$reproduce[cell]) == TRUE){ stop("You just discovered a new species of plant! Whatever is in this cell shouldn't exist... try again.") } else { if(runif(1) <= setup.plants$reproduce[cell]){ offspring.location <- offspring.location(F0.row, F0.col, plant.matrix) if(is.na(matrix[offspring.location] == FALSE)){ new.plant.matrix[offspring.location] <- cell } } } return(new.plant.matrix) } #New plant.timestep function with reproduction inside plant.timestep <- function(plant.matrix, setup.plants){ new.plant.matrix <- plant.matrix repro.plant.matrix <- new.plant.matrix for(i in 1:nrow(plant.matrix)){ for(j in 1:ncol(plant.matrix)){ new.plant.matrix[i, j] <- survival(plant.matrix[i,j], setup.plants) } } for(i in 1:nrow(new.plant.matrix)){ for(j in 1:ncol(new.plant.matrix)){ repro.plant.matrix <- reproduction(i, j, new.plant.matrix, setup.plants) } } return(repro.plant.matrix) } #new test: reproduction <- function(plant.matrix, setup.plants){ repro.plant.matrix <- plant.matrix #creates a new matrix for the "next generation" to be seeded into without messing up the original matrix I'm drawing from for(i in nrow(plant.matrix)){ for(j in ncol(plant.matrix)){ cell <- plant.matrix[i, j] print(c(i,j)) if(is.na(plant.matrix[cell]) == TRUE){ cell <- NA } else if(cell == ""){ cell <- "" } else if(is.na(setup.plants$reproduce[cell]) == TRUE){ stop("You just discovered a new species of plant! Whatever is in this cell shouldn't exist... try again.") } else { if(runif(1) <= setup.plants$reproduce[cell]){ offspring.location <- offspring.location(F0.row, F0.col, plant.matrix) print(offspring.location) if(is.na(matrix[offspring.location] == FALSE)){ new.plant.matrix[offspring.location] <- cell } } } } } print(repro.plant.matrix) return(repro.plant.matrix) } plant.timestep <- function(plant.matrix, setup.plants){ new.plant.matrix <- plant.matrix for(i in 1:nrow(plant.matrix)){ for(j in 1:ncol(plant.matrix)){ new.plant.matrix[i, j] <- survival(plant.matrix[i,j], setup.plants) } } repro.plant.matrix <- reproduction(new.plant.matrix, setup.plants) return(repro.plant.matrix) } #New new test: OH MY GOD. UGHHHHHHHHHHHHHHHHHHH STUPIDITY.PERHAPS A RANGEEEE OF I,J VALUES WOULD HELP............. reproduction <- function(plant.matrix, setup.plants){ repro.plant.matrix <- plant.matrix #creates a new matrix for the "next generation" to be seeded into without messing up the original matrix I'm drawing from for(i in 1:nrow(plant.matrix)){ for(j in 1:ncol(plant.matrix)){ cell <- plant.matrix[i, j] if(is.na(cell) == TRUE){ cell <- NA print("first works") } else if(cell == ""){ cell <- "" print("second works") } else if(is.na(setup.plants$reproduce[cell]) == TRUE){ stop("You just discovered a new species of plant! Whatever is in this cell shouldn't exist... try again.") } else { if(runif(1) <= setup.plants$reproduce[cell]){ print("third works") offspring.location <- offspring.location(i, j, plant.matrix) print(offspring.location) if(is.na(plant.matrix[offspring.location[1], offspring.location[2]] == FALSE)){ new.plant.matrix[offspring.location] <- cell } } } } } return(repro.plant.matrix) } #Without all the print stuff, FUNCTIONAL version: reproduction <- function(plant.matrix, setup.plants){ repro.plant.matrix <- plant.matrix #creates a new matrix for the "next generation" to be seeded into without messing up the original matrix I'm drawing from for(i in 1:nrow(plant.matrix)){ for(j in 1:ncol(plant.matrix)){ cell <- plant.matrix[i, j] if(is.na(cell) == TRUE){ cell <- NA } else if(cell == ""){ cell <- "" } else if(is.na(setup.plants$reproduce[cell]) == TRUE){ stop("You just discovered a new species of plant! Whatever is in this cell shouldn't exist... try again.") } else { if(runif(1) <= setup.plants$reproduce[cell]){ offspring.location <- offspring.location(i, j, plant.matrix) if(is.na(plant.matrix[offspring.location[1], offspring.location[2]] == TRUE)){ repro.plant.matrix[offspring.location[1], offspring.location[2]] <- NA } else { repro.plant.matrix[offspring.location[1], offspring.location[2]] <- cell } } } } } return(repro.plant.matrix) } #Working, but why filling only the NA spots with a baby plant?! #Needed an else-statement. Doesn't work with if statement only. For some reason doesn't check true/false properly. #Competition function: #Should go in reproduction function within the reproduction function at the end where offspring location is drawn #only need to run the function if the cell location is NOT NA or "", so should go after that #Thus, need 1) if NA, do this, else if "" do this, else compete function #function will need an input of cell and plant.matrix[potential offspring location] #Will return the content of the cell #so plant.matrix[potential offspring location] <- compete(cell, plant.matrix[potential offspring location]) compete <- function(parent.cell, potential.offspring.cell){ cat(parent.cell, "parent cell", "\n") cat(potential.offspring.cell, "potential offspring cell", "\n") print(comp.matrix[parent.cell, potential.offspring.cell]) winner <- sample(c(parent.cell, potential.offspring.cell), 1, prob = comp.matrix[parent.cell, potential.offspring.cell]) return(winner) } #Test stuff: reproduction <- function(plant.matrix, setup.plants){ repro.plant.matrix <- plant.matrix #creates a new matrix for the "next generation" to be seeded into without messing up the original matrix I'm drawing from for(i in 1:nrow(plant.matrix)){ for(j in 1:ncol(plant.matrix)){ cell <- plant.matrix[i, j] if(is.na(cell) == TRUE){ cell <- NA } else if(cell == ""){ cell <- "" } else if(is.na(setup.plants$reproduce[cell]) == TRUE){ stop("You just discovered a new species of plant! Whatever is in this cell shouldn't exist... try again.") } else if(is.na(setup.plants$reproduce[cell]) == FALSE){ if(runif(1) <= setup.plants$reproduce[cell]){ print(cell) offspring.location <- offspring.location(i, j, plant.matrix) if(is.na(plant.matrix[offspring.location[1], offspring.location[2]] == TRUE)){ repro.plant.matrix[offspring.location[1], offspring.location[2]] <- NA } else if(plant.matrix[offspring.location[1], offspring.location[2]] == ""){ repro.plant.matrix[offspring.location[1], offspring.location[2]] <- "" } else { repro.plant.matrix[offspring.location[1], offspring.location[2]] <- compete(cell, repro.plant.matrix[offspring.location[1], offspring.location[2]]) } } } } } return(repro.plant.matrix) } #problem: cell is somehow being "" sometimes, rather than always a plant name... messing up the competition function #Another try.... reproduction <- function(plant.matrix, setup.plants){ repro.plant.matrix <- plant.matrix #creates a new matrix for the "next generation" to be seeded into without messing up the original matrix I'm drawing from for(i in 1:nrow(plant.matrix)){ for(j in 1:ncol(plant.matrix)){ if(is.na(setup.plants$reproduce[plant.matrix[i, j]]) == FALSE){ if(runif(1) <= setup.plants$reproduce[plant.matrix[i, j]]){ cat(plant.matrix[i, j], "repro function", "\n") offspring.location <- offspring.location(i, j, plant.matrix) if(is.na(setup.plants$reproduce[plant.matrix[offspring.location[1], offspring.location[2]]]) == FALSE){ cat("part of loop", plant.matrix[i, j]) repro.plant.matrix[offspring.location[1], offspring.location[2]] <- compete(plant.matrix[i, j], repro.plant.matrix[offspring.location[1], offspring.location[2]], setup.plants) } } } } } return(repro.plant.matrix) } #Problemmmmmm I don't have setup.plants in my function....... also comp.matrix is compete.matrix in setup.plants, so... compete <- function(parent.cell, potential.offspring.cell, setup.plants){ cat(parent.cell, "parent cell", "\n") cat(potential.offspring.cell, "potential offspring cell", "\n") print(setup.plants$compete.matrix[parent.cell, potential.offspring.cell]) winner <- sample(c(parent.cell, potential.offspring.cell), 1, prob = c(setup.plants$compete.matrix[parent.cell, potential.offspring.cell], (1 - setup.plants$compete.matrix[parent.cell, potential.offspring.cell]))) return(winner) } #Okay. Mostly working. Re-try with regular reproduction function. reproduction <- function(plant.matrix, setup.plants){ repro.plant.matrix <- plant.matrix #creates a new matrix for the "next generation" to be seeded into without messing up the original matrix I'm drawing from for(i in 1:nrow(plant.matrix)){ for(j in 1:ncol(plant.matrix)){ cell <- plant.matrix[i, j] if(is.na(cell) == TRUE){ cell <- NA } else if(cell == ""){ cell <- "" } else if(is.na(setup.plants$reproduce[cell]) == TRUE){ stop("You just discovered a new species of plant! Whatever is in this cell shouldn't exist... try again.") } else if(is.na(setup.plants$reproduce[cell]) == FALSE){ if(runif(1) <= setup.plants$reproduce[cell]){ offspring.location <- offspring.location(i, j, plant.matrix) if(is.na(plant.matrix[offspring.location[1], offspring.location[2]] == TRUE)){ repro.plant.matrix[offspring.location[1], offspring.location[2]] <- NA } else if(repro.plant.matrix[offspring.location[1], offspring.location[2]] == ""){ repro.plant.matrix[offspring.location[1], offspring.location[2]] <- cell } else { repro.plant.matrix[offspring.location[1], offspring.location[2]] <- compete(cell, repro.plant.matrix[offspring.location[1], offspring.location[2]], setup.plants) } } } } } return(repro.plant.matrix) } compete <- function(parent.cell, potential.offspring.cell, setup.plants){ winner <- sample(c(parent.cell, potential.offspring.cell), 1, prob = c(setup.plants$compete.matrix[parent.cell, potential.offspring.cell], (1 - setup.plants$compete.matrix[parent.cell, potential.offspring.cell]))) return(winner) }
da5f888fa9f023c3fa1f51c537eb6b93679caea4
de004591ab9dcc1053a7fae7a1b695bf25bdddb8
/R/superResolutionUtilities.R
9e8ad87cedacfff7526218a71379a5b79c34761a
[]
no_license
msharrock/ANTsRNet
1aea8a5afcebf70f13e7b80b70e912a676901f8b
0836f4faafa96e7a7b29497e56a48129c47292d0
refs/heads/master
2020-03-28T12:50:46.610970
2018-09-11T01:31:14
2018-09-11T01:31:14
null
0
0
null
null
null
null
UTF-8
R
false
false
7,091
r
superResolutionUtilities.R
#' Model loss function for super-resolution---peak-signal-to-noise ratio. #' #' Based on the keras loss function (losses.R): #' #' \url{https://github.com/rstudio/keras/blob/master/R/losses.R} #' #' @param y_true True labels (Tensor) #' @param y_pred Predictions (Tensor of the same shape as \code{y_true}) #' #' @details Loss functions are to be supplied in the loss parameter of the #' \code{compile()} function. #' #' @export peak_signal_to_noise_ratio <- function( y_true, y_pred ) { K <- keras::backend() return( -10.0 * K$log( K$mean( K$square( y_pred - y_true ) ) ) / K$log( 10.0 ) ) } attr( peak_signal_to_noise_ratio, "py_function_name" ) <- "peak_signal_to_noise_ratio" #' Peak-signal-to-noise ratio. #' #' @param y_true true encoded labels #' @param y_pred predicted encoded labels #' #' @rdname loss_peak_signal_to_noise_ratio_error #' @export loss_peak_signal_to_noise_ratio_error <- function( y_true, y_pred ) { return( -peak_signal_to_noise_ratio( y_true, y_pred ) ) } attr( loss_peak_signal_to_noise_ratio_error, "py_function_name" ) <- "peak_signal_to_noise_ratio_error" #' Extract 2-D or 3-D image patches. #' #' @param image Input ANTs image #' @param patchSize Width, height, and depth (if 3-D) of patches. #' @param maxNumberOfPatches Maximum number of patches returned. If #' "all" is specified, then all overlapping patches are extracted. #' @param randomSeed integer seed that allows reproducible patch extraction #' across runs. #' #' @return a randomly selected list of patches. #' @author Tustison NJ #' @examples #' #' library( ANTsR ) #' i = ri( 1 ) #' patchSet1 = extractImagePatches( i, c( 32, 32 ), 10, randomSeed = 0 ) #' patchSet2 = extractImagePatches( i, c( 32, 32 ), 10, randomSeed = 1 ) #' patchSet3 = extractImagePatches( i, c( 32, 32 ), 10, randomSeed = 0 ) #' #' @export extractImagePatches <- function( image, patchSize, maxNumberOfPatches = 'all', randomSeed ) { if ( ! missing( randomSeed ) ) { set.seed( randomSeed ) } imageSize <- dim( image ) dimensionality <- length( imageSize ) if( length( imageSize ) != length( patchSize ) ) { stop( "Mismatch between the image size and the specified patch size.\n" ) } if( any( patchSize > imageSize ) ) { stop( "Patch size is greater than the image size.\n") } imageArray <- as.array( image ) patches <- list() if( tolower( maxNumberOfPatches ) == 'all' ) { count <- 1 if( dimensionality == 2 ) { for( i in seq_len( imageSize[1] - patchSize[1] + 1 ) ) { for( j in seq_len( imageSize[2] - patchSize[2] + 1 ) ) { startIndex <- c( i, j ) endIndex <- startIndex + patchSize - 1 patches[[count]] <- imageArray[startIndex[1]:endIndex[1], startIndex[2]:endIndex[2]] count <- count + 1 } } } else if( dimensionality == 3 ) { for( i in seq_len( imageSize[1] - patchSize[1] + 1 ) ) { for( j in seq_len( imageSize[2] - patchSize[2] + 1 ) ) { for( k in seq_len( imageSize[3] - patchSize[3] + 1 ) ) { startIndex <- c( i, j, k ) endIndex <- startIndex + patchSize - 1 patches[[count]] <- imageArray[startIndex[1]:endIndex[1], startIndex[2]:endIndex[2], startIndex[3]:endIndex[3]] count <- count + 1 } } } } else { stop( "Unsupported dimensionality.\n" ) } } else { startIndex <- rep( 0, dimensionality ) for( i in seq_len( maxNumberOfPatches ) ) { for( d in seq_len( dimensionality ) ) { startIndex[d] <- sample.int( imageSize[d] - patchSize[d] + 1, 1 ) } endIndex <- startIndex + patchSize - 1 if( dimensionality == 2 ) { patches[[i]] <- imageArray[startIndex[1]:endIndex[1], startIndex[2]:endIndex[2]] } else if( dimensionality == 3 ) { patches[[i]] <- imageArray[startIndex[1]:endIndex[1], startIndex[2]:endIndex[2], startIndex[3]:endIndex[3]] } else { stop( "Unsupported dimensionality.\n" ) } } } return( patches ) } #' Reconstruct image from a list of patches. #' #' @param patchList list of overlapping patches defining an image. #' @param domainImage Image to define the geometric information of the #' reconstructed image. #' #' @return an ANTs image. #' @author Tustison NJ #' @examples #' \dontrun{ #' } #' @importFrom ANTsRCore as.antsImage #' @export reconstructImageFromPatches <- function( patchList, domainImage ) { imageSize <- dim( domainImage ) dimensionality <- length( imageSize ) patchSize <- dim( patchList[[1]] ) numberOfPatches <- 1 for( d in 1:dimensionality ) { numberOfPatches <- numberOfPatches * ( imageSize[d] - patchSize[d] + 1 ) } if( numberOfPatches != length( patchList ) ) { stop( "Not the right number of patches.\n" ) } imageArray <- array( data = 0, dim = imageSize ) count <- 1 if( dimensionality == 2 ) { for( i in seq_len( imageSize[1] - patchSize[1] + 1 ) ) { for( j in seq_len( imageSize[2] - patchSize[2] + 1 ) ) { startIndex <- c( i, j ) endIndex <- startIndex + patchSize - 1 imageArray[startIndex[1]:endIndex[1], startIndex[2]:endIndex[2]] <- imageArray[startIndex[1]:endIndex[1], startIndex[2]:endIndex[2]] + patchList[[count]] count <- count + 1 } } for( i in seq_len( imageSize[1] ) ) { for( j in seq_len( imageSize[2] ) ) { factor <- min( i, patchSize[1], imageSize[1] - i + 1 ) * min( j, patchSize[2], imageSize[2] - j + 1 ) imageArray[i, j] <- imageArray[i, j] / factor } } } else if( dimensionality == 3 ) { for( i in seq_len( imageSize[1] - patchSize[1] + 1 ) ) { for( j in seq_len( imageSize[2] - patchSize[2] + 1 ) ) { for( k in seq_len( imageSize[3] - patchSize[3] + 1 ) ) { startIndex <- c( i, j, k ) endIndex <- startIndex + patchSize - 1 imageArray[startIndex[1]:endIndex[1], startIndex[2]:endIndex[2], startIndex[3]:endIndex[3]] <- imageArray[startIndex[1]:endIndex[1], startIndex[2]:endIndex[2], startIndex[3]:endIndex[3]] + patchList[[count]] count <- count + 1 } } } for( i in seq_len( imageSize[1] ) ) { for( j in seq_len( imageSize[2] ) ) { for( k in seq_len( imageSize[3] ) ) { factor <- min( i, patchSize[1], imageSize[1] - i + 1 ) * min( j, patchSize[2], imageSize[2] - j + 1 ) * min( k, patchSize[3], imageSize[3] - k + 1 ) imageArray[i, j, k] <- imageArray[i, j, k] / factor count <- count + 1 } } } } else { stop( "Unsupported dimensionality.\n" ) } return( as.antsImage( imageArray, reference = domainImage ) ) }
e595e1057e7abe41fc1ca227ba2448f752f03461
e970f01992264a2ade46e44c115741b20a108fe6
/Social_Networks/Twitter_analysis.R
86a631ed05948c19f3cf9000e1d14619e5406f5d
[]
no_license
ovdavid28/Advanced_Data_Analysis
9b0b2a26e7253b2336f82bc0ad176edfd89a4bb9
378864d49da74185f2048e9c6c8b892f06f3db23
refs/heads/master
2020-05-02T04:41:03.358925
2019-04-06T05:49:38
2019-04-06T05:49:38
177,755,700
0
0
null
null
null
null
UTF-8
R
false
false
1,302
r
Twitter_analysis.R
install.packages(c('twitteR','igraph','dplyr')) library(twitteR) library(igraph) library(dplyr) api_key<-'' api_secret<-'' access_token<-'-' access_token_secret<-'' setup_twitter_oauth(api_key,api_secret,access_token,access_token_secret) alltweets<-searchTwitter('climatechange', n = 500) alltweets<-twListToDF(alltweets) tweets<-alltweets[1:500,] tweets split_point = split(tweets, tweets$isRetweet) reTweets = mutate(split_point[['TRUE']], sender = substr(text, 5, regexpr(':', text) - 1)) edge_list = as.data.frame(cbind(sender = tolower(reTweets$sender), receiver = tolower(reTweets$screenName))) edge_list = count(edge_list, sender, receiver) edge_list[1:5,] reTweets_graph <- graph_from_data_frame(d=edge_list, directed=T) save(reTweets_graph, file = "retweet-graph.Rdata") par(bg="white", mar=c(1,1,1,1)) plot(reTweets_graph, layout=layout.fruchterman.reingold, vertex.color="blue", vertex.size=(degree(reTweets_graph, mode = "in")), #sized by in-degree centrality vertex.label = NA, edge.arrow.size=0.8, edge.arrow.width=0.5, edge.width=edge_attr(reTweets_graph)$n/10, #sized by edge weight edge.color=hsv(h=.95, s=1, v=.7, alpha=0.5)) title("Retweet Climate Change Network", cex.main=1, col.main="black")
c280656899502618cebc1915e2c420579d228f68
99b84703a5df130a2b5f83a8f0c0fb1771d30cac
/CodeFiles/Feb03.2020.R
157c2cb996a1ce9b8a6461ead923a362aa36b6ff
[]
no_license
jakelawlor/TidyTuesday_JL
83b582991ead0782310bae4d25f3e2353b2082ef
fe07240d549d8d51278151af5b868cf1d9745d55
refs/heads/master
2021-07-01T14:33:27.309313
2021-03-02T20:21:30
2021-03-02T20:21:30
227,494,516
25
6
null
null
null
null
UTF-8
R
false
false
728
r
Feb03.2020.R
## Tidy Tuesday Feb 3, 2020 attendance <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-02-04/attendance.csv') standings <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-02-04/standings.csv') games <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-02-04/games.csv') theme_set(theme_bw()) # libraries library(tidyverse) library(ggplot2) library(dplyr) rm(tuesdata) ## view dataset attendance %>% glimpse() standings %>% glimpse() games %>% glimpse() attendance %>% ggplot(aes(x=year,y=home)) + geom_line() + facet_wrap(~team_name) ?distinct()
1a078d45159808e9c604c39b6089439f932fa008
e4ff3a5fc17302d8d4fd86b38072e67ffe1aedec
/R/mtt2.R
c9767c3f62da3e084ad594b7a72f204860543674
[]
no_license
cran/robeth
5782cfcb86e6931152dc8a0b9b8f71e97068e449
5b60aabc7c1b21f15d73d1246ab40c1defdf5f7f
refs/heads/master
2023-08-31T11:44:10.694653
2023-08-22T08:00:05
2023-08-22T09:31:11
17,699,284
0
1
null
null
null
null
UTF-8
R
false
false
248
r
mtt2.R
"mtt2" <- function(a,n) { if (missing(a)) messagena("a") if (missing(n)) messagena("n") nn <- length(a) b <- single(nn) f.res <- .Fortran("mtt2z", a=to.single(a), b=to.single(b), n=to.integer(n), nn=to.integer(nn)) list(b=f.res$b) }
2c5757750027484c28451174de77b5b9ee090dda
ea2360f5466ba3f1cc92aebe34189a66f529698b
/sparkHardDrive/R/trialAndError.R
204cb4cdbe0ef169eb65909112f872ec82dce6a7
[ "MIT" ]
permissive
kklamsi/nsdb
538ff123de2314eba9030a4081073d97a0297829
298576a6320273867496d7810d9b40d27a3de272
refs/heads/master
2020-04-29T02:21:36.865364
2019-06-05T08:37:11
2019-06-05T08:37:11
175,764,138
0
0
null
null
null
null
UTF-8
R
false
false
1,311
r
trialAndError.R
library(sparklyr) library(tidyverse) library(dplyr) sc <- spark_connect(master = "local") read.csv.zip <- function(zipfile, ...) { # Create a name for the dir where we'll unzip zipdir <- tempfile() # Create the dir using that name dir.create(zipdir) # Unzip the file into the dir unzip(zipfile, exdir=zipdir) # Get a list of csv files in the dir files <- list.files(zipdir) files <- files[grep("//.csv$", files)] # Create a list of the imported csv files csv.data <- sapply(files, function(f) { fp <- file.path(zipdir, f) return(read.csv(fp, ...)) }) return(csv.data)} data_Q1_2016 <- read.csv.zip("/data_Q1_2016.zip") data_Q1_2016 <- read.csv("/data_Q1_2016.csv") ## harddriveKaggle ## harddriveKaggle <- read.csv("/harddriveKaggle.csv") harddriveKaggle <- select(harddriveKaggle, failure, smart_1_normalized, smart_3_normalized, smart_5_normalized, smart_7_normalized, smart_9_normalized, smart_192_normalized, smart_193_normalized, smart_194_normalized) sampleFailure <- sample_n(filter(harddriveKaggle, failure == 1), 1000, replace = T) sampleRunning <- sample_n(filter(harddriveKaggle, failure == 0), 1000, replace = F) sample <- bind_rows(sampleFailure, sampleRunning)
b3718b0f71aa5424aed52f6283888d7cef1d2a8e
63299b8fd41d02dfb2f7187bf9f8b07b037a07df
/2 WAY ANOVA.R
0b0607d8979c0015fef3093a3a720700de6f3119
[]
no_license
shrddha-p-jain/Statistics-Practicals
c971c7307b6e79e16598ad6c2bb1d44baa548698
d4f4c2075a9d74b5b60aed6bfe5b94e001416872
refs/heads/main
2023-07-17T00:50:25.601213
2021-08-26T14:46:51
2021-08-26T14:46:51
400,203,710
1
0
null
null
null
null
UTF-8
R
false
false
3,645
r
2 WAY ANOVA.R
#2 WAY ANOVA #AN EXPANDING COMPANY WANTED TO KNOW HOW TO INTRODUCE A NEW TYPE OF MACHINE INTO THE FACTORY. #SHOULD IT TRANSFER STAFF WORKING ON THE OLD MACHINE TO OPEARTE IF OR EMPLOY NEW STAFF WHO HAD NOT WORKED ON ANY MACHINE BEFORE? # A RESEARCHER SELECTED 12 STAFF WHO HAD EXPERIENCE OF THE OLD MACHINE AND 12 STAFF WHO HAD NO SUCH EXPERIENCE. HALF #THE PARTICIOANTS FROM EACH GROUP WERE ALLOCATED TO THE NEW Mchine and half to ther old machine. #the number of errors made by the praticipamnts over a set period was measured. #H01:PERSOM:THERE IS NO SIGNIFICSNCE DIFFERENCE BETWEEN NEW AND EXPERIENCED PERSON. #H02:MACHINE:THERE IS NO SIGNIFICSNCE DIFFERENCE BETWEEN OLD AND NEW MACHINE. #H03:INTERACTIOPN:THERE IS NO SIGNIFICSNCE DIFFERENCE BETWEEN PERSON AND MACHINE. person=rep(rep(c('newly','experience'),each=6),time=2) person machine=rep(c('old','new'),each=12) errors=c(4,5,7,6,8,5,1,2,2,3,2,3,5,6,5,6,5,6,8,9,8,8,7,9) data1=data.frame(person,machine,errors) data1 str(data1) data1.aov=aov(errors~person+machine,data=data1) summary(data1.aov) #h01:accepted (no diff) #h02:rejected (diff) data2.aov=aov(errors~person*machine,data=data1)#3rd summary(data2.aov) #h03:rejected (diff) model.tables(data2.aov,type="means") plot.design(data1)#single #interaction interaction.plot(person,machine,errors) TukeyHSD(data2.aov) TukeyHSD(data2.aov,ordered=T) plot(TukeyHSD(data2.aov)) op=par(mar=c(5,8,4,2)) op plot(TukeyHSD(data2.aov,ordered=T),cex.axis=1,las=0) plot(TukeyHSD(data2.aov,ordered=T),cex.axis=1,las=1) plot(TukeyHSD(data2.aov,ordered=T),cex.axis=1,las=2) plot(TukeyHSD(data2.aov,ordered=T),cex.axis=1,las=3) #O2:ASSUME THAT YOU ARE STUDING THE EFFECTS OF OBSERVING VIOLENT ACYS ON SUBSEQUENT AGGRESSSIVE BEHAVIOUR. #YPU ARE INTERESTED IN THE KIND OF VIOLENCE OBSERVED 1.VIOLENCE CARTOON V/S VIDEO OF REAL ACTION. #SECOND FACTOR IS THE AMT OF TIME ONE IS EXPOSED TO VIOLENCE 10 OR 30 MINS. #U RANDOMLY ASSIGN 8 CHILDREN TO EACH GRP.AFTER THE CHILD WATCHES THE VIOLENT CARTOON OR ACTION VIDEO,THE CHILD PLAYS A TETRIS LIKE COMPUTER VIDEO FOR 30 MINS. #THE GAME PROVIDES OPTIONS FOR POINTS WITHOUT INTERFEARING WITH THE OTHER PLAYER.THE PROGRSM PROVIDES 100 OPPORTUNITIES FOR THE PLAYER TO MAKE AN AGGRESSIVE CHOICE #AND RECIRDS THE NO. OF TIME THE CHILD CHOOSE AN AGGRESSIVE WHEN THE GAME PROVIDES THE CHOICE. #H01:PERSOM:THERE IS NO SIGNIFICSNCE DIFFERENCE BETWEEN NEW AND EXPERIENCED PERSON. #H02:MACHINE:THERE IS NO SIGNIFICSNCE DIFFERENCE BETWEEN OLD AND NEW MACHINE. #H03:INTERACTIOPN:THERE IS NO SIGNIFICSNCE DIFFERENCE BETWEEN PERSON AND MACHINE. time=rep(rep(c('10 mins','30 mins'),each=8),times=2) e=c(47,56,48,51,46,44,50,51,67,69,65,62,67,69,59,72,52,62,57,49,64,39,50,48, 81,92,82,92,82,94,86,83) kind=rep(c('cartoon','real action'),each=16) kov=data.frame(time,kind,e) kov#kind of varience str(kov) kov.aov=aov(e~kind+time,data=kov) summary(kov.aov) kov.aov1=aov(e~kind*time,data=kov) summary(kov.aov1) model.tables(kov.aov1,type="means") plot.design(kov)#single #interaction interaction.plot(time,kind,e) TukeyHSD(kov.aov1) TukeyHSD(kov.aov1,ordered=T) plot(TukeyHSD(kov.aov1)) op=par(mar=c(5,8,4,2)) op plot(TukeyHSD(kov.aov1,ordered=T),cex.axis=1,las=0) plot(TukeyHSD(kov.aov1,ordered=T),cex.axis=1,las=1) plot(TukeyHSD(kov.aov1,ordered=T),cex.axis=1,las=2) plot(TukeyHSD(kov.aov1,ordered=T),cex.axis=1,las=3)
95eb38163386b754ca9468a6d1148b01987c983d
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/export/examples/rgl2bitmap.Rd.R
8bee6661027740237aa691077f03c54d50ad6a26
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
516
r
rgl2bitmap.Rd.R
library(export) ### Name: rgl2bitmap ### Title: Save currently active rgl 3D graph to bitmap format ### Aliases: rgl2bitmap rgl2png rgl2png ### ** Examples # Create a file name filen <- tempfile(pattern = "rgl") # or # filen <- paste("YOUR_DIR/rgl") # Generate a 3D plot using 'rgl' x = y = seq(-10, 10, length = 20) z = outer(x, y, function(x, y) x^2 + y^2) rgl::persp3d(x, y, z, col = 'lightblue') # Save the plot as a png rgl2png(file = filen) # Note that omitting 'file' will save in current directory
0a6086845f9875b836d34764f99cba9ada3ad209
a62befa320d9d10aaa8815899d76bd138add029a
/final_analysis/Sample_Analysis.R
a2967c4db8f4d4bba60274522492714c1e62618c
[]
no_license
sfpacman/MSc-Project
7e84dd887abfc36bb1da6c5bc439dce24863ad95
d87cea5a35435bf385b975bbc8a4be61c28218b7
refs/heads/master
2021-01-20T12:34:48.425715
2017-09-05T09:47:31
2017-09-05T09:47:31
90,382,245
1
1
null
null
null
null
UTF-8
R
false
false
3,065
r
Sample_Analysis.R
library(Matrix) library(ggplot2) library(dplyr) source("danaher_def.R") # Five combination of normalization and clustering are applied and analyzed by the following codes:correlation, UMI+kmean, computeSumFactors+kmeans, UMI+kmeans, UMI+SNN-cliq, computeSumFactors+ SNN-cliq # computeSumFactors+ SNN-cliq have two set of data (k = 11 and k = 5) ### This is the example of correlation (Figure. 3A and Supp.table 1) sample_analysis <-readRDS("sample_analysis.rds") sample <- readRDS("sample.rds") rcell_assign <- sample_analysis$X$cls_id act <- unlist(sample$summary[,2]) result <- data.frame(pre=rcell_assign,act =act) x<- result %>% group_by(act,pre) %>% summarise (n = n()) %>% mutate(Precentage = n / sum(n)*100) # This produces supplmentary table 1 kable(x,"latex",col.names=c("Actual Identity", "Prediction","cell count","% cell count" )) freq <- as.numeric(table(result$pre)) # This produces figure 3A pie(freq, labels = paste(cluster_type,signif(clu$S_s_11[,2]/sum(clu$S_s_11[,2])*100,2),"%",sep=" ")) ### This is the example UMI+kmean (Figure 3B and Supp. table 3) ### The rest of Danher's marker results are produced in this methods by change m_n (expression matrix) and cluster_assign (cluster assignment) ### necessary for getting the gene name for danaher's markers pbmc68k <- readRDS("pbmc68k.rds") gen <- pbmc68k$all_data[[1]]$hg19$gene_symbols ngn <- sapply(use_gene, function(x){gen[x]}) m_n <- sample_analysis$X$m_n ### mean score computing ### mean_score <- list() for(i in 1:length(cell_list)){ type= cell_list[i] type_gene_select <- match(unlist(cell_list[i]),ngn) type_gene_select <- type_gene_select[!is.na(type_gene_select)] type_expr <- m_n[,type_gene_select] type_mean <-colMeans(t(type_expr)) mean_score <- cbind(unlist(mean_score), type_mean) } colnames(mean_score) <- names(cell_list) cluster_assign <- sample_analysis$X$k$cluster cluster_mean <- data.frame(Cluster= cluster_assign,mean_score) score_by_cluster <- round(cluster_mean %>% group_by(Cluster) %>% summarise_all(funs(mean)),3) score_by_cluster <- score_by_cluster[,-which(names(score_by_cluster) %in% c("Cluster"))] ### selecting cell identity for each cluster ### cluster_type <- list() for( i in 1:nrow(score_by_cluster)){ x <- as.numeric(score_by_cluster[i,]) if( mean(x) == 0 ) {cluster_type = c(unlist(cluster_type),0) } else {cluster_type = c(unlist(cluster_type), which(x== max(x)))} } rcell_assign <-sapply(cluster_mean$Cluster,function(x){cluster_type[x]}) rcell_assign <- factor(rcell_assign,levels=c(0:length(cell_list)),labels= c("unknown",names(cell_list))) act <- unlist(sample$summary[,2]) result <- data.frame(pre=rcell_assign,act =act) x<- result %>% group_by(act,pre) %>% summarise (n = n()) %>% mutate(Precentage = n / sum(n)*100) # This produces supplmentary table 3 kable(x,"latex",col.names=c("Actual Identity", "Prediction","cell count","% cell count" )) #This produces figure 3B freq <- as.numeric(table(result$pre)) pie(freq, labels = paste(cluster_type,signif(clu$S_s_11[,2]/sum(clu$S_s_11[,2])*100,2),"%",sep=" "))
474c615cfa0aeb0aaa4bbbe0193626cff8f875dd
3b8730002a61a6296f5e1394b3a49e0e487bba1e
/testing.r
ee3f84278c8f8888f992ad7c380dfa6cc4cb1565
[]
no_license
AliShahzad07/Hello-R
827602fc27bbd9941e5d617d10e0b4ceddab6ff6
39d61f823782cf7b8ade2cc06c73715a6fe50f86
refs/heads/main
2023-02-24T19:31:05.278170
2021-02-08T06:24:14
2021-02-08T06:24:14
336,978,972
0
0
null
null
null
null
UTF-8
R
false
false
35
r
testing.r
#This is fron git "Hello From git"
43c247924f4edf431d00a6fba0bb208008fb765b
4b90b3de3c3e819e424774bb1b90826917747e6d
/shujuzhunbei1().R
204cea22e9d8e8202533acf9f7b80dd4b1e0c227
[]
no_license
leivenwong/RFiles-64
f7f375df57aced0c9e3139e682cf2588ff9332d4
8847c8c8e13561269804cc9de73d491613e97e56
refs/heads/master
2020-03-28T10:48:03.263988
2018-09-10T11:38:51
2018-09-10T11:38:51
148,146,193
0
0
null
null
null
null
UTF-8
R
false
false
3,103
r
shujuzhunbei1().R
shujuzhunbei1<-function(id="SH000001",cy="1D",JC=10) { library("RODBC") library("xts") channel<-odbcConnect("ctp_merged_mq",uid="ctp_user",pwd="ctp_password") IF.1D<<-sqlQuery(channel,"select * from if_1d order by utc_string") odbcClose(channel) IFTIME <- IF.1D$utc_string IFOPEN <- IF.1D$open_price IFHIGH <- IF.1D$high_price IFLOW <- IF.1D$low_price IFCLOSE <- IF.1D$close_price PINZHONG <- list() PINZHONG$TIME<-IFTIME PINZHONG$OPEN<-IFOPEN PINZHONG$HIGH<-IFHIGH PINZHONG$LOW<-IFLOW PINZHONG$CLOSE<-IFCLOSE PINZHONG <- as.data.frame(PINZHONG) IF.1D <<- PINZHONG DATETIME<-as.Date(PINZHONG$TIME) CLOSE<-as.xts(PINZHONG$CLOSE,DATETIME) REFCLOSE<-lag(CLOSE,k = 1,na.pad=TRUE) ZF<-xts((as.numeric(CLOSE)-as.numeric(REFCLOSE))/as.numeric(CLOSE)*100,DATETIME) MACLOSE<-rollapply(CLOSE,JC,mean) REFMACLOSE<-lag(MACLOSE,k = 1,na.pad=TRUE) LOW<-as.xts(PINZHONG$LOW,DATETIME) MALOW<-rollapply(LOW,JC,mean) MS<-NA ZF<-as.numeric(ZF) CLOSE<-as.numeric(CLOSE) MALOW<-as.numeric(MALOW) i<- (JC+1) bl<-length(DATETIME) while(i<=bl) { if(ZF[i]>=5 & CLOSE[i]>=MALOW[i]) MS[i]<-"QS" else if(ZF[i]>=3 & ZF[i]<5 & CLOSE[i]>=MALOW[i]) MS[i]<-"QT" else if(ZF[i]<3 & ZF[i]>1 & CLOSE[i]>=MALOW[i]) MS[i]<-"QU" else if(ZF[i]<=1 & ZF[i]>=0 & CLOSE[i]>=MALOW[i]) MS[i]<-"QV" else if(ZF[i]<0 & ZF[i]>=-1 & CLOSE[i]>=MALOW[i]) MS[i]<-"QW" else if(ZF[i]<0-1 & ZF[i]>-3 & CLOSE[i]>=MALOW[i]) MS[i]<-"QX" else if(ZF[i]<=0-3 & ZF[i]>-5 & CLOSE[i]>=MALOW[i]) MS[i]<-"QY" else if(ZF[i]<=-5 & CLOSE[i]>=MALOW[i]) MS[i]<-"QZ" else if(ZF[i]>=5 & CLOSE[i]<MALOW[i]) MS[i]<-"RS" else if(ZF[i]>=3 & ZF[i]<5 & CLOSE[i]<MALOW[i]) MS[i]<-"RT" else if(ZF[i]<3 & ZF[i]>1 & CLOSE[i]<MALOW[i]) MS[i]<-"RU" else if(ZF[i]<=1 & ZF[i]>=0 & CLOSE[i]<MALOW[i]) MS[i]<-"RV" else if(ZF[i]<0 & ZF[i]>=-1 & CLOSE[i]<MALOW[i]) MS[i]<-"RW" else if(ZF[i]<0-1 & ZF[i]>-3 & CLOSE[i]<MALOW[i]) MS[i]<-"RX" else if(ZF[i]<=-3 & ZF[i]>-5 & CLOSE[i]<MALOW[i]) MS[i]<-"RY" else if(ZF[i]<=-5 & CLOSE[i]<MALOW[i]) MS[i]<-"RZ" else MS[i]<-NA; i<-i+1 } NEXTMS<-0 for(i in 1:(length(MS)-1)) NEXTMS[i]<-MS[i+1] NEXTMS[length(MS)]<-"WAIT" PINZHONG$ZF<-as.numeric(ZF) PINZHONG$MS<-MS PINZHONG$NEXTMS<-NEXTMS PINZHONG$DATETIME<-DATETIME PINZHONG$DATE<-as.character(DATETIME) PINZHONG$LOW<-as.numeric(LOW) PINZHONG$MALOW<-as.numeric(MALOW) PINZHONG<-na.omit(PINZHONG) IF.1D.1 <<- as.data.frame(PINZHONG) return(as.data.frame(PINZHONG)) }
0e53c2b32150f83b7868e587607e55068dbeaa55
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
/cran/paws.storage/man/s3control_get_multi_region_access_point_routes.Rd
a1a79ebcf9b9d89c3f9077d5ef864bc9634928e9
[ "Apache-2.0" ]
permissive
paws-r/paws
196d42a2b9aca0e551a51ea5e6f34daca739591b
a689da2aee079391e100060524f6b973130f4e40
refs/heads/main
2023-08-18T00:33:48.538539
2023-08-09T09:31:24
2023-08-09T09:31:24
154,419,943
293
45
NOASSERTION
2023-09-14T15:31:32
2018-10-24T01:28:47
R
UTF-8
R
false
true
857
rd
s3control_get_multi_region_access_point_routes.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/s3control_operations.R \name{s3control_get_multi_region_access_point_routes} \alias{s3control_get_multi_region_access_point_routes} \title{Returns the routing configuration for a Multi-Region Access Point, indicating which Regions are active or passive} \usage{ s3control_get_multi_region_access_point_routes(AccountId, Mrap) } \arguments{ \item{AccountId}{[required] The Amazon Web Services account ID for the owner of the Multi-Region Access Point.} \item{Mrap}{[required] The Multi-Region Access Point ARN.} } \description{ Returns the routing configuration for a Multi-Region Access Point, indicating which Regions are active or passive. See \url{https://www.paws-r-sdk.com/docs/s3control_get_multi_region_access_point_routes/} for full documentation. } \keyword{internal}
be7d0f8c1883b5db42520ce2baea6e84614d8381
884677dd48325c8314489ce0cfe0770b0d97e5c0
/man/grpimpperm.rf.Rd
709cc934120c43dc62f27ed3fd182d38352b3a79
[]
no_license
dtrfgv/dtrfgv
1ff5657c39933ae14f5057dbdd570c6235648461
a736537579ecddaa124898c3683eb209aad7537e
refs/heads/master
2020-03-28T12:36:31.603482
2019-04-26T20:37:56
2019-04-26T20:37:56
148,315,133
1
0
null
null
null
null
UTF-8
R
false
true
1,188
rd
grpimpperm.rf.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Functions_variant_CARTGV_for_RFGV.R \name{grpimpperm.rf} \alias{grpimpperm.rf} \title{grpimpperm.rf} \usage{ grpimpperm.rf(num_group, data, oobsamples, group, tree, impurityacc) } \arguments{ \item{num_group}{index of the considered group} \item{data}{a data frame containing all observation included in the training data set.} \item{oobsamples}{a vector containing the indices of the observations that are not included in the sample used to build the modified CARTGV tree.} \item{group}{a vector with the group number of each variable. (WARNING : if there are "\code{p}" goups, the groups must be numbers from "\code{1}" to "\code{p}" in increasing order. The group label of the response variable is missing (i.e. NA)).} \item{tree}{the output of the function \code{cartgv.rf}.} \item{impurityacc}{the accurancy value of the modified CARTGV tree.} } \value{ DecreaseAcc the decrease in accurance from permuting the values of the considered group. } \description{ Compute the permutation importance of a group from a modified CARTGV tree. Used only by the function \code{rfgv} when grp.importance=TRUE. }
44283c9a9d0f40d361f60da52063a6ca75598b96
c4b195fe15bd50be87eee1d9461221ae4dfdebb7
/R/calculateStats.R
27f2fa6d6d6dbea78c4019914104f6f5f2a0faf1
[]
no_license
ischeller/FraseR
1c3f758d220109efc33afa09d5ebc7ff4ad0ef22
34dc4fb9713c37edda30a2ea06efc2c0777e8ab7
refs/heads/master
2020-08-09T00:28:56.164226
2019-10-08T14:24:42
2019-10-08T14:24:42
null
0
0
null
null
null
null
UTF-8
R
false
false
5,290
r
calculateStats.R
## ## @author Christian Mertes \email{mertes@@in.tum.de} ## ## This file contains all functions for calculating the statistics ## First it starts with calculating the Z-score for each ## site and then the p-values are calculated dependend on the ## given method in the setting file ## #' #' Calculate the zscore for each PSI value. #' #' @export #' @examples #' fds <- countRNAData(createTestFraseRSettings()) #' fds <- calculatePSIValues(fds) #' fds <- calculateZScores(fds) calculateZScores <- function(fds, type=psiTypes){ # check input stopifnot(class(fds) == "FraseRDataSet") # calculate zscore for each psi type for(pt in type){ fds <- calculateZScorePerDataSet(fds, pt) } return(fds) } #' #' calculates the zscore for a given data type and a given psi type #' and adds it directly to the dataset itself #' #' @noRd calculateZScorePerDataSet <- function(fds, psiType){ message(date(), ": Calculate the Zscore for ", psiType, " values ...") # get raw data and replace NA's with zeros psiVal <- assay(fds, psiType) # z = ( x - mean ) / sd rowmean <- rowMeans(psiVal, na.rm = TRUE) rowsd <- apply(psiVal, 1, sd, na.rm = TRUE) zscores <- (psiVal - rowmean) / rowsd # use as.matrix to rewrite it as a new hdf5 array zScores(fds, type=psiType) <- as.matrix(zscores) return(fds) } #' #' calculates the P-Value for the given FraseR dataset object #' The P-Value calculation is based on the given method in the #' FraseRSettings object #' #' @export #' @examples #' fds <- countRNAData(createTestFraseRSettings()) #' fds <- calculatePSIValues(fds) #' fds <- calculatePValues(fds) calculatePValues <- function(fds, type=psiTypes, internBPPARAM=bpparam(), ...){ # check input stopifnot(class(fds) == "FraseRDataSet") enforceHDF5 <- FALSE # get correct method: FUN <- switch(method(fds), betaBin = { enforceHDF5 <- TRUE list(FUN=pvalueByBetaBinomialPerType, pvalFun=betabinVglmTest) }, Fisher = { list(FUN=pvalueByFisherPerType, internBPPARAM=internBPPARAM) }, stop("The provided method is not present for this package.", "Please set the method to one of the following:", "Fisher, betaBin, DESeq2, Martin" ) ) # check, that the object is stored as HDF5 array if requested aIsHDF5 <- sapply(assays(fds), function(x) any("DelayedArray" == is(x))) if(enforceHDF5 & !all(aIsHDF5)){ message(date(), ": The data is not stored in a HDF5Array. ", "To improve the performance we will store now ", "the data in HDF5 format.") # fds <- saveFraseRDataSet(fds) } # test all 3 different types for(psiType in type){ fds <- do.call(FUN[[1]], c(fds=fds, aname=aname, psiType=psiType, FUN[-1], ...) ) fds <- saveFraseRDataSet(fds) gc() } # return the new datasets return(fds) } #' #' calculates the pvalue per type (psi3,psi5,spliceSite) with fisher #' #' @noRd pvalueByFisherPerType <- function(dataset, psiType, internBPPARAM){ # go over each group but no NA's group <- sampleGroup(dataset) # reads to test for abberent splicing (eg: nonSplicedReads) rawCounts <- counts(dataset, type=psiType, side="ofIn") rawOtherCounts <- counts(dataset, type=psiType, side="other") pvalues <- bplapply(unique(na.omit(group)), dataset=dataset, rawCounts=rawCounts, rawOtherCounts=rawOtherCounts, BPPARAM=parallel(dataset), internBPPARAM=internBPPARAM, FUN=.testPsiWithFisherPerGroup ) names(pvalues) <- as.character(unique(na.omit(group))) pvalues_full <- pvalues[as.character(group)] # add NA's to the non tested ones pvalues_full[is.na(group)] <- list( rep(as.numeric(NA), length(pvalues[[1]])) ) # transform it to a DataFrame and return it return(.asDataFrame(pvalues_full, samples(dataset))) } #' #' calculates the pvalue per group with fisher #' #' @noRd .testPsiWithFisherPerGroup <- function(dataset, groupID, rawCounts, rawOtherCounts, internBPPARAM){ # get group to test group <- sampleGroup(dataset@settings) group2Test <- group == groupID group2Test[is.na(group2Test)] <- FALSE fullFisherTable <- data.table( TP=rowSums(rawCounts[ , group2Test,with=FALSE]), FP=rowSums(rawCounts[ ,!group2Test,with=FALSE]), FN=rowSums(rawOtherCounts[, group2Test,with=FALSE]), TN=rowSums(rawOtherCounts[,!group2Test,with=FALSE]) ) # test only where at least the group has one read fisherTableToTest <- fullFisherTable[TP+FN > 0] pvalues <- unlist(bplapply(1:nrow(fisherTableToTest), BPPARAM=internBPPARAM, fisherTableToTest=fisherTableToTest, FUN=function(idx, fisherTableToTest){ fisher.test(matrix(as.integer( fisherTableToTest[idx]), nrow=2 ))$p.value } )) # add NAs wher the test group did not had any read fullFisherTable[,pvalue:=as.numeric(NA)] fullFisherTable[TP+FN>0,pvalue:=pvalues] return(fullFisherTable[,pvalue]) }
fc95f64b79ee3f8389967c72f135b83f977631a3
71129b1c03eed2abdd67fc2b52b57874bae49f45
/collapsibleTree/tests/testthat/test-root.R
95d06b7c83c0e7e6836924f67ee6a8f8802c01e1
[]
no_license
Bastiaanspanjaard/LINNAEUS
0eb880d8e581f870b58d69cea7060822baf8564a
6c86288e8e684d77f5499249023e7157d0c440dc
refs/heads/master
2022-11-13T10:48:40.584477
2020-07-03T14:56:07
2020-07-03T14:56:07
255,870,978
4
0
null
null
null
null
UTF-8
R
false
false
1,023
r
test-root.R
library(collapsibleTree) context("Root labelling") test_that("unlabelled root works for collapsibleTree", { wb <- collapsibleTree(warpbreaks, c("wool", "tension", "breaks")) expect_is(wb,"htmlwidget") expect_is(wb$x$data,"list") expect_is(wb$x$options$hierarchy,"character") }) test_that("unlabelled root works for collapsibleTreeSummary", { wb <- collapsibleTreeSummary(warpbreaks, c("wool", "tension", "breaks")) expect_is(wb,"htmlwidget") expect_is(wb$x$data,"list") expect_is(wb$x$options$hierarchy,"character") }) test_that("labeled root works for collapsibleTree", { wblabeled <- collapsibleTree(warpbreaks, c("wool", "tension", "breaks"), "a label") expect_is(wblabeled$x$data,"list") expect_is(wblabeled$x$options$hierarchy,"character") }) test_that("labeled root works for collapsibleTreeSummary", { wblabeled <- collapsibleTreeSummary(warpbreaks, c("wool", "tension", "breaks"), "a label") expect_is(wblabeled$x$data,"list") expect_is(wblabeled$x$options$hierarchy,"character") })
7ab9dcb2eb91618bb64934736015cadbac3caa87
57b6bc2896092a29cbd829199359f96be3da7572
/2019/test.R
3af42ba4b83e9b46526cd55b7d70e76b6ab397df
[]
no_license
azambranog/hash_code
1a6b43c8af3388df1bba1e4750e092825eca6886
0e0787d72712d7084bb8c397e3e70c372ee1e3f0
refs/heads/master
2022-03-13T15:20:22.978505
2022-02-24T21:39:54
2022-02-24T21:39:54
239,148,802
0
0
null
null
null
null
UTF-8
R
false
false
668
r
test.R
source('format_as_tables.R') library(parallel) message('####', 'prepare data') t <- system.time({ data <- format_as_table('b_lovely_landscapes.txt') }) print(t) for (cores in 10:1) { message('##' , cores) t <- system.time( { all_tags <- mclapply(as.list(1:nrow(data)), function(i) { x <- data.table( slide = data[i, slide], tag = unlist(data[i, tags][[1]]) ) return(x) }, mc.cores = cores) all_tags <- rbindlist(all_tags) }) print(t) }
52dafb5284e7deb8c1cf633e82bfc3085c33be1f
e48dc77b5684cd3f1b2d2081808a93575692e89f
/week3.R
f123774f03250ada471f4b968b25d4dfd8ec4d37
[]
no_license
mindmui/CleanData-Coursera
7deabdc16e1468c7f22369fa909c7a6093d25b6e
815023b244581128c80c334b55307a49c89f7e08
refs/heads/master
2021-01-10T06:35:10.347878
2016-02-26T17:58:50
2016-02-26T17:58:50
51,161,243
0
0
null
null
null
null
UTF-8
R
false
false
6,658
r
week3.R
# Week 3 - Subsetting # a quick review: set.seed(12345) x <- data.frame("var1"=sample(1:5),"var2"=sample(6:10),"var3"=sample(11:15)) x<- x[sample(1:5),]; x$var2[c(1,3)] = NA # x[x$var1<=3,] # subsetting where var1 is less than 3 # Dealing with missing values x[which(x$var2 > 8),] # sort sort(x$var1, decreasing =TRUE, na.last = TRUE) # ordering x[order(x$var1),] # reorder the rows such that variable 1 is in increasing value # ordering with plyr install.packages("plyr") library(plyr) arrange(x,var1) # is the same as the above command arrange(x,desc(var1)) # wrap with desc for descending order # adding rows and columns x$var4 <- rnorm(5) x # added varialbe y <- cbind(x,rnorm(5)) # bind to the right of x y # Summarizing data # Setting up setwd("/Users/Mind/Desktop/Cleandata-Coursera") if(!file.exists("./data")){ dir.create("data") } fileUrl <- "http://data.baltimorecity.gov/api/views/k5ry-ef3g/rows.csv?accessType=DOWNLOAD" download.file(fileUrl,destfile="./data/restaurants.csv",method="curl") restData <- read.csv("./data/restaurants.csv") # look at a bit of data head(restData) tail(restData, n=2) # shows bottom 2 summary(restData) # for qualitative shows the count # for quantitative shows the distribution str(restData) quantile(restData$councilDistrict,na.rm=TRUE) quantile(restData$councilDistrict,probs = c(0.5,0.75,0.9)) # different percentile # make table table(restData$zipCode,useNA = "ifany") # shows the missing value (if any) # check for missing values sum(is.na(restData$councilDistrict)) any(restData$zipCode > 0) all(restData$zipCode > 0) colSums(is.na(restData)) #none is any # values with specific characteristics table(restData$zipCode %in% c("21212","21213")) # this shows how many row that zipCode is 21212 or 21213 # subset the data set restData[restData$zipCode %in% c("21212","21213"),] # Cross tabs data(UCBAdmissions) DF = as.data.frame(UCBAdmissions) xt <- xtabs(Freq ~ Gender + Admit, data=DF ) # take data from DF, Freq is the data value # Gender and Admit are the row and column field # flat tables: ftable() # size of the data set fakeData = rnorm(1e5) object.size(fakeData) # shows the size (in bytes) print(object.size(fakeData),units="Mb") # in Megabytes # Create new variables: # - missingness indicators # - applying transformation setwd("/Users/Mind/Desktop/Cleandata-Coursera") if(!file.exists("./data")){ dir.create("data") } fileUrl <- "http://data.baltimorecity.gov/api/views/k5ry-ef3g/rows.csv?accessType=DOWNLOAD" download.file(fileUrl,destfile="./data/restaurants.csv",method="curl") restData <- read.csv("./data/restaurants.csv") # create sequences s1 <- seq(1,10,by=2) # tell min=1 , max=10, increasing by 2 s2 <- seq(1,10,length=3) # create binary vairable: restData$nearMe = restData$neighborhood %in% c("Roland Park","Home land") restData$zipWrong = ifelse(restData$zipCode <0,TRUE,FALSE) table(restData$zipWrong, restData$zipCode <0) # create categorical variables restData$zipGroups = cut(restData$zipCode, breaks= quantile(restData$zipCode)) table(restData$zipGroups) table(restData$zipGroups,restData$zipCode) ### Easier cutting library(Hmisc) restData$zipGroups = cut2(restData$zipCode, g=4) # cut into 4 groups according to quantile table(restData$zipGroups) # create factor variables restData$zcf <- factor(restData$zipCode) class(restData$zcf) # common quantitative transforms: x <- 10 abs(x) sqrt(x) ceiling(x) floor(x) round(x, digits = 2) # round to 2 decimal places signif(x, digits = 2) cos(x) # or sin(x), ... etc. log(x) # natural log log2(x) # or log10(x) exp(x) # Reshaping the data install.packages("reshape2") library(reshape2) head(mtcars) # melting data frames mtcars$carname <- rownames(mtcars) # variable melt mpg and hp on the same column carMelt <- melt(mtcars, id=c("carname","gear","cyl"),measure.vars = c("mpg","hp")) head(carMelt) tail(carMelt) # Casting data frames cylData <- dcast(carMelt,cyl~variable) # cyl is the row, variable is the column cylData # adding a function cylData <- dcast(carMelt,cyl~variable,mean) cylData # resummarizing # Averaging values head(InsectSprays) tapply(InsectSprays$count, InsectSprays$spray,sum) # sum along the count for each spray # or a nice way using plyr package library(plyr) ddply(InsectSprays,.(spray),summarize,sum=sum(count)) # Creating a new variable - sum spraySums <- ddply(InsectSprays,.(spray),summarize,sum=ave(count, FUN=sum)) spraySums # dplyr -- working with data frames in R # basic assumptions # one observation per row # each column represents a variable or measure or characteristics # dplyr verbs # select # filter # arrange # rename # mutate # summarize # deplyr properties: # note: first argument is always a data frame # the result is a new data frame # Managing data frames: chicago <- readRDS("chicago.rds") dim(chicago) names(chicago) head(select(chicago, city:dptp)) # very handy way to select only city ="dptp" head(select(chicago, -(city:dptp))) # very handy way to select all cities except "dptp" chicago.f <- filter(chicago, pm25 > 30) # filter data chicago <- arrange(chicago, date) # arrange by the date chicago <- rename(chicago, pm25 = pm25tmeans2) # change column name # centralised the pm25 using mutate chicago <- mutate(chicago, pm25detrend = pm25-mean(pm25)) # transform / create new variable using mutate chicago <- mutate(chicago, year = as.POSIXlt(date)$year + 1900) years <- group_by(chicago, year) summarise(years, pm25 = mean(pm25, na.rm = TRUE)) # pipeline : %>% # Merging data # data from peer review experiments reviews = read.csv("file1.csv") solutions <- read.csv("file2.csv") names(reviews) # explore the column to merges names(solutions) mergedData = merge(reviews,solutions,by.x="solution_id",by.y="id",all=TRUE) # for reviews data frame use "solution_id" # for solutions use "id" column # all=TRUE implies add new row if doesn't exist # Using join commands from dplyr packages* join(df1,df2) join_all(df1,df2,df3) # the joining df MUST have the SAME column name! (drawback compared to merge) # however, join is easier and faster when you have more than 1 data frame.