blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dc2b1323d7835fd835c6d75062cf9e07b0560b04 | f47256310805e64cc08b57233a94c6416ffbe3d8 | /man/ttbGreedyModel.Rd | c284c9e9ae65dff0d6edfae1e04e5779fe6f7989 | [] | no_license | jeanimal/heuristica | 1fed5ccee936cf2b23d7142a5f7e0b97829baab0 | 0e4933f3f263a92aa3c7deb3fe7b4ba0b8f899bb | refs/heads/master | 2021-11-28T09:37:51.809141 | 2021-09-08T14:36:14 | 2021-09-08T14:36:14 | 36,178,661 | 5 | 2 | null | null | null | null | UTF-8 | R | false | true | 2,453 | rd | ttbGreedyModel.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/heuristics.R
\name{ttbGreedyModel}
\alias{ttbGreedyModel}
\title{Greedy Take The Best}
\usage{
ttbGreedyModel(
train_data,
criterion_col,
cols_to_fit,
fit_name = "ttbGreedyModel"
)
}
\arguments{
\item{train_data}{Training/fitting data as a matrix or data.frame.}
\item{criterion_col}{The index of the column in train_data that has the
criterion.}
\item{cols_to_fit}{A vector of column indices in train_data, used to fit
the criterion.}
\item{fit_name}{Optional The name other functions can use to label output.
It defaults to the class name. It is useful to change this to a unique name
if you are making multiple fits, e.g. "ttb1", "ttb2", "ttbNoReverse."}
}
\value{
An object of \code{\link[base]{class}} ttbGreedyModel, which can
be passed in to \code{\link{predictPair}}.
}
\description{
A variant of the Take The Best heuristic with a different cue order, namely
using conditional cue validity, where the validity of a cue is judged only
on row pairs not already decided by prior cues. Specifically, it uses the
cue ranks returned by \code{\link{conditionalCueValidityComplete}}.
}
\examples{
## A data set where Take the Best and Greedy Take the Best disagree.
matrix <- cbind(y=c(3:1), x1=c(1,0,0), x2=c(1,0,1))
ttb <- ttbModel(matrix, 1, c(2,3))
ttb$cue_validities
# Returns
# x1 x2
# 1.0 0.5
ttbG <- ttbGreedyModel(matrix, 1, c(2:3))
ttbG$cue_validities
# Returns
# x1 x2
# 1 1
# because after using x1, only decisions between row 2 and 3 are left,
# and x2 gets 100\% right on those (after reversal). However, these
# cue_validities depend on using x1, first, so cue_rank is key.
ttbG$cue_ranks
# Returns
# x1 x2
# 1 2
# Now see how this affects predictions on row 2 vs. 3.
# Take the best guesses (output 0).
predictPair(oneRow(matrix, 2), oneRow(matrix, 3), ttb)
# Greedy Take The Best selects row 2 (output 1).
predictPair(oneRow(matrix, 2), oneRow(matrix, 3), ttbG)
}
\references{
Martignon, L., & Hoffrage, U. (2002). Fast, frugal, and fit: Simple
heuristics for paired comparisons. Theory and Decision, 52: 29-71.
}
\seealso{
\code{\link{conditionalCueValidityComplete}} for the metric used to sort cues.
\code{\link{ttbModel}} for the original version of Take The Best.
\code{\link{predictPair}} for predicting whether row1 is greater.
\code{\link{predictPairProb}} for predicting the probability row1 is
greater.
}
|
9060b16abc534d808bc1ffe4ac9a2a93f0ce5759 | 7d075c000b055160f43d6ac81d98e0b67ff41e92 | /scripts/plotpredict.R | cd112536893b4b11bca7ab58d8b7a70f6f59d131 | [] | no_license | lerouzic/dalechampia | 24eb740af0a99b9a7608086af0ea213d7950ab6e | 5b4ad284db85e5cea2e5ba84b6911563729e9bae | refs/heads/master | 2023-05-06T15:01:26.242804 | 2021-03-19T11:13:51 | 2021-03-19T11:13:51 | 226,364,280 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,933 | r | plotpredict.R |
################ Plotting selection time series ##################
makeTransparent<-function(someColor, alpha=100)
{
# From https://stackoverflow.com/questions/8047668/transparent-equivalent-of-given-color
# Author: Nick Sabbe
# Licence : CC-attribution-SA from the conditions of the website
newColor<-col2rgb(someColor)
apply(newColor, 2, function(curcoldata){rgb(red=curcoldata[1], green=curcoldata[2], blue=curcoldata[3],alpha=alpha, maxColorValue=255)})
}
custom.axis <- function(side, loglim, type=c("log", "natural", "percent0", "percent100")[1]) {
lim <- loglim
if (type == "natural") {
lim <- exp(loglim)
} else if (type == "percent0") {
lim <- exp(loglim)*100 - 100
} else if (type == "percent100") {
lim <- exp(loglim)*100
}
lab <- pretty(lim)
at <- if (type == "log") {
lab
} else if (type == "natural") {
log(lab)
} else if (type == "percent0") {
log((lab+100)/100)
} else if (type == "percent100") {
log(lab/100)
}
lab.string <- as.character(lab)
if (type == "percent0")
lab.string <- ifelse(lab > 0, paste0("+", lab.string), paste0(lab.string))
if (type == "percent100")
lab.string <- paste0(lab.string)
axis(side, at=at, lab.string)
}
custom.label <- function(trait, type=c("log", "natural", "percent0", "percent100")[1]) {
if (type == "natural") {
bquote(.(trait)*" (mm"^2*")")
} else if (type == "log") {
bquote("log "*.(trait)*" (mm"^2*")")
} else if (type == "percent0" || type == "percent100") {
bquote(.(trait)*"(%)")
}
}
# Estimate means and variances in raw and centered datasets
# Three ways to analyse the data:
# * raw: phenotype as observed experimentally
# * control-centered: phenotype centered on the control line
# * updown-centered: phenotype centered on the average between up and down selection lines (symmetric response)
recenter <- function(data, G, Gv, P, N, Np, target="mu.x", normalization=c("raw", "control", "updown")[1], G0.boost=FALSE) {
# data: summary statistics (from summarypop script)
# G: G matrix
# Gv: Estimated variance of each element of the G matrix
# P: P matrix
# N: Number of individuals measured
# Np: Number of selected parents
# target: Either "mu.x" (selected trait) or "mu.y" (correlated trait)
# G0.boost: Whether the G matrix at the first generation should be x1.5 due to inbreeding
stopifnot(target %in% c("mu.x", "mu.y"))
stopifnot(normalization %in% c("raw","control","updown"))
se.target <- if(target=="mu.x") "se.x" else "se.y"
# The different variances to account for depend on the target
Gsel <- if(target == "mu.x") G[1,1] else G[2,1]
Gdrift <- if(target == "mu.x") G[1,1] else G[2,2]
Gerr <- if(target == "mu.x") Gv[1,1] else Gv[2,1]
Eerr <- if(target == "mu.x") P[1,1]-G[1,1] else P[2,2]-G[2,2]
h2 <- G[1,1]/P[1,1] # heritability of the selected trait
# Selection gradient (always on the selected trait, the gradient on the correlated trait is 0 by definition)
beta <- data$S/data$sig2.x
# mean gradients to compute prediction errors: a bit sloppy, but this should not really matter
mean.beta <- mean(abs(beta[data$Rep=="Up" | data$Rep == "Down"]), na.rm=TRUE)
gen <- seq(1, max(data$Gen))
G.correct <- rep(1, length(gen))
if (G0.boost) G.correct[1] <- 1.5
pred.control <-
if (normalization == "raw") { rep(data[data$Rep=="Control" & data$Gen == 1, target], length(gen)) }
else if (normalization == "control") { rep(0, length(gen) ) }
else if (normalization == "updown") { c(0, cumsum(0.5*(beta[data$Rep=="Down"] + beta[data$Rep=="Up"]))[-length(gen)]*Gsel*G.correct[-length(gen)]) }
pred.up <-
if (normalization == "raw") { data[data$Rep=="Up" & data$Gen == 1, target] + c(0, cumsum(beta[data$Rep=="Up"])[-length(gen)]*Gsel*G.correct[-length(gen)]) }
else if (normalization == "control") { c(0, cumsum(beta[data$Rep=="Up"])[-length(gen)]*Gsel*G.correct[-length(gen)]) }
else if (normalization == "updown") { c(0, cumsum(0.5*(beta[data$Rep=="Up"] - beta[data$Rep=="Down"]))[-length(gen)]*Gsel*G.correct[-length(gen)]) }
pred.down <-
if (normalization == "raw") { data[data$Rep=="Down" & data$Gen == 1, target] + c(0, cumsum(beta[data$Rep=="Down"])[-length(gen)]*Gsel*G.correct[-length(gen)]) }
else if (normalization == "control") { c(0, cumsum(beta[data$Rep=="Down"])[-length(gen)]*Gsel*G.correct[-length(gen)]) }
else if (normalization == "updown") { c(0, cumsum(0.5*(beta[data$Rep=="Down"] - beta[data$Rep=="Up"]))[-length(gen)]*Gsel*G.correct[-length(gen)]) }
# Calculation of prediction variances
# Three terms : environmental sampling error + drift + error in the additive variance estimate
# Environmental sampling error : at the current generation, always Ve/N
# Drift : cumulative over generations, + Va / N every generation (+ Va/N for generation 0)
# in selected lines, there are only Np parents, but the offspring number is normalized
# theory shows that the increase in variance is 1/Np - 1/2N every generation
# Error on the estimate of Va :
# this error cancels for drift (assuming overestimation is as likely as underestimation)
# but not for selected lines. A term in Var(Va) * t^2 * beta^2 needs to be considered.
# this error cumulates (quadratically) ver generations
erf <- function(x) 2 * pnorm(x * sqrt(2)) - 1
erfinv <- function (x) qnorm((1 + x)/2)/sqrt(2)
tmp.X <- erfinv(2*(N-Np)/N-1)
tmp.H <- sqrt((1-h2)/(2*h2))
tmp.X1 <- erf(tmp.X + tmp.H)
tmp.X2 <- erf(tmp.X - tmp.H)
varW <- (N^2-2*Np^2)/(2*Np^2) - ((N^2-Np^2)/(2*Np^2))*tmp.X1 - 0.5*tmp.X2
verr.drift <- Gdrift*G.correct*(gen-1)*(1/Np - 1/(2*N))
verr.drift.sel <- Gdrift*G.correct*(gen-1)*(1/Np - 1/(2*N) - varW/N)
verr.va <- Gerr * (gen-1)^2 * mean.beta^2
verr.env <- Eerr/N
# print(cbind(verr.drift, verr.drift.sel))
# If the data is control or up-down centered, the error variance is redistributed:
verr.control <-
if (normalization == "raw") { verr.drift + verr.env }
else if (normalization == "control") { rep(0, length(gen)) }
else if (normalization == "updown") { verr.drift + 0.5*verr.drift.sel + 1.5*verr.env }
verr.sel <-
if (normalization == "raw") { verr.drift.sel + verr.va + verr.env }
else if (normalization == "control") { verr.drift + verr.drift.sel + verr.va + 2*verr.env }
else if (normalization == "updown") { 0.5*verr.drift.sel + verr.va + 0.5*verr.env }
# Scaling the phenotype
phen.control <-
if (normalization == "raw") { data[data$Rep=="Control",target] }
else if (normalization == "control") { rep(0, length(gen)) }
else if (normalization == "updown") { data[data$Rep=="Control",target] - 0.5*data[data$Rep=="Up",target] - 0.5*data[data$Rep=="Down",target] }
phen.up <-
if (normalization == "raw") { data[data$Rep=="Up",target] }
else if (normalization == "control") { data[data$Rep=="Up",target] - data[data$Rep=="Control",target] }
else if (normalization == "updown") { 0.5*data[data$Rep=="Up",target] - 0.5*data[data$Rep=="Down",target] }
phen.down <-
if (normalization == "raw") { data[data$Rep=="Down",target] }
else if (normalization == "control") { data[data$Rep=="Down",target] - data[data$Rep=="Control",target] }
else if (normalization == "updown") { 0.5*data[data$Rep=="Down",target] - 0.5*data[data$Rep=="Up",target] }
# Scaling the std errors
se.control <-
if (normalization == "raw") { data[data$Rep=="Control",se.target] }
else if (normalization == "control") { rep(0, length(gen)) }
else if (normalization == "updown") { sqrt(data[data$Rep=="Control",se.target]^2 + (1/4)*(data[data$Rep=="Up",se.target]^2 + data[data$Rep=="Down",se.target]^2)) }
se.up <-
if (normalization == "raw") { data[data$Rep=="Up",se.target] }
else if (normalization == "control") { sqrt(data[data$Rep=="Up",se.target]^2 + data[data$Rep=="Control",se.target]^2) }
else if (normalization == "updown") { sqrt((1/4)*(data[data$Rep=="Up",se.target]^2 + data[data$Rep=="Down",se.target]^2)) }
se.down <-
if (normalization == "raw") { data[data$Rep=="Down",se.target] }
else if (normalization == "control") { sqrt(data[data$Rep=="Down",se.target]^2 + data[data$Rep=="Control",se.target]^2) }
else if (normalization == "updown") { sqrt((1/4)*(data[data$Rep=="Up",se.target]^2 + data[data$Rep=="Down",se.target]^2)) }
return(list(
drift = verr.drift,
va = verr.va,
env = verr.env,
Control = data.frame(gen=gen, pred=pred.control, phen=phen.control, se=se.control, verr=verr.control),
Up = data.frame(gen=gen, pred=pred.up, phen=phen.up, se=se.up, verr=verr.sel),
Down = data.frame(gen=gen, pred=pred.down, phen=phen.down, se=se.down, verr=verr.sel)))
}
# Plots a single time series
plot.ts.common <- function(pred, gen=seq_along(pred), verr=NULL, data, data.se=NULL, CI.factor=1.96, col.line="black", col.point=col.line, col.err=makeTransparent(col.line, alpha=50), pch=1, prediction=TRUE, data.lty=if(prediction) 2 else 1, ...) {
# pred: the vector of predicted phenotype
# gen : generation numbers (default: 1:5)
# verr: vector of prediction error variance
# data: data points
# data.se: standard errors for the observed phenotypic means
# CI.factor: factor by which sqrt(verr) should be multiplied to figure prediction intervals
# ... graphical line, points, color options
# Prediction error (shaded area)
if (prediction && !is.null(verr)) {
polygon(c(gen, rev(gen)), c(pred-CI.factor*sqrt(verr), rev(pred+CI.factor*sqrt(verr))),
border=NA, col=col.err)
}
# Prediction (plain line)
if (prediction)
lines(gen, pred, col=col.line, lwd=3)
# Data points
points(gen[!is.na(data)], data[!is.na(data)], pch=pch, lty=data.lty, type="b", col=col.point)
# Data error (error bars)
if(!is.null(data.se) && data.se[1] > 0) {
arrows(x0=gen, y0=data-CI.factor*data.se, y1=data+CI.factor*data.se,
length=0.1, angle=90, code=3, col=col.point)
}
}
# Call the plot routine on the recentered data.
plot.data.recenter <- function(data.recenter, col.data=c(Control="gray50", Up="black", Down="black"), pch=18, CI.factor=1.96, ylab="Phenotype", xlab="Generations", ylim=NULL, prediction=TRUE, G0=0, axis.type="log", ...) {
if(is.null(ylim))
ylim <- 0.2*c(-1,1) + range(do.call(c, lapply(data.recenter, function(x) x$phen)), na.rm=TRUE)
plot(NULL, xlim=range(data.recenter$Control$gen), ylim=ylim, xlab=xlab, ylab=if(ylab == "") "" else custom.label(ylab, axis.type), xaxt="n", yaxt="n", ...)
axis(1, at=data.recenter$Control$gen, labels=as.character(G0:(G0-1+length(data.recenter$Control$gen))))
custom.axis(2, ylim, axis.type)
for (ll in c("Control","Up","Down")) {
plot.ts.common(data.recenter[[ll]]$pred, data.recenter[[ll]]$gen, data.recenter[[ll]]$verr, data.recenter[[ll]]$phen, data.recenter[[ll]]$se, CI.factor=CI.factor, col.line=col.data[ll], prediction=prediction)
}
}
|
41f07b6cbacbf8361c2bd45756f2d0dbf81cfdb7 | a593d96a7f0912d8dca587d7fd54ad96764ca058 | /R/ml_model_bisecting_kmeans.R | e1158a9c9af559fd1c0086f49ad679e13e16beeb | [
"Apache-2.0"
] | permissive | sparklyr/sparklyr | 98f3da2c0dae2a82768e321c9af4224355af8a15 | 501d5cac9c067c22ad7a9857e7411707f7ea64ba | refs/heads/main | 2023-08-30T23:22:38.912488 | 2023-08-30T15:59:51 | 2023-08-30T15:59:51 | 59,305,491 | 257 | 68 | Apache-2.0 | 2023-09-11T15:02:52 | 2016-05-20T15:28:53 | R | UTF-8 | R | false | false | 1,101 | r | ml_model_bisecting_kmeans.R | new_ml_model_bisecting_kmeans <- function(pipeline_model, formula, dataset,
features_col) {
m <- new_ml_model_clustering(
pipeline_model = pipeline_model,
formula = formula,
dataset = dataset,
features_col = features_col,
class = "ml_model_bisecting_kmeans"
)
model <- m$model
m$summary <- model$summary
m$centers <- model$cluster_centers() %>%
do.call(rbind, .) %>%
as.data.frame() %>%
rlang::set_names(m$feature_names)
m$cost <- suppressWarnings(
possibly_null(
~ pipeline_model %>%
ml_stage(1) %>%
ml_transform(dataset) %>%
model$compute_cost()
)()
)
m
}
#' @export
print.ml_model_bisecting_kmeans <- function(x, ...) {
preamble <- sprintf(
"K-means clustering with %s %s",
nrow(x$centers),
if (nrow(x$centers) == 1) "cluster" else "clusters"
)
cat(preamble, sep = "\n")
print_newline()
ml_model_print_centers(x)
print_newline()
cat(
"Within Set Sum of Squared Errors = ",
if (is.null(x$cost)) "not computed." else x$cost
)
}
|
c78fee39558554f1baf7d0c364dede3c2947d754 | 5d3deb2b60727315f2ec162b2be22f5202dd9e29 | /V-code/Chemical_data.R | 997aaf48703347cec839f392a3860617ce640c3b | [] | no_license | people-r-strange/spacey | 7f4a5003ed9e27db75caf1938361d0af6ea8f8ba | cd350573c9af12d8b0930a4fa23a4efcc8c2c324 | refs/heads/main | 2023-03-20T21:32:26.276748 | 2021-03-23T03:18:48 | 2021-03-23T03:18:48 | 349,056,563 | 0 | 0 | null | 2021-03-23T03:18:48 | 2021-03-18T12:00:30 | HTML | UTF-8 | R | false | false | 1,944 | r | Chemical_data.R | library(tidyverse)
library(readxl)
library(knitr)
library(ggridges)
library(wesanderson)
#load sensor data
chemical_data <- read_excel("data/Sensor Data.xlsx")
#renaming columnns
names(chemical_data)[3] <- "DateTime"
#look at the dates when the trucks were there...
#from suspsicious rangers 3 table the dates in question are:
dates <- c("2016-02", "2016-03", "2016-05", "2016-05",
"2016-05")
chemical_dates <- chemical_data %>%
mutate(date = format(DateTime, "%Y-%m"))
chemical_means <- chemical_dates %>%
group_by(Chemical, Monitor, date) %>%
mutate(average_reading = mean(Reading))
daily_chemical_means <- chemical_means %>%
select(Chemical, Monitor, date, average_reading)
daily_chemical_means <- daily_chemical_means %>% as.factor(Monitor)
#visualize the chemical data
ggplot( daily_chemical_means, aes(x=date, y= average_reading, fill=Chemical)) +
geom_bar(stat="identity", position="dodge") +
labs(title = "Average Chemical Reading", y = "Average Reading", x = "Date") +
scale_fill_discrete(
name = "Chemical") +
theme_light()
#focus on 2016-12
ggplot(daily_chemical_means, aes(x=as.factor(Monitor), y= average_reading, fill = Chemical)) +
facet_wrap( ~Chemical, ncol=2) +
geom_bar(stat="identity", position="dodge") +
labs(title = "Average Chemical Reading for December 2016", y = "Average Reading", x = "Monitor") +
scale_fill_brewer(palette="Dark2")
#read in wind data
Meteorological_Data <- read_excel("~/Documents/Spring2021/Visual Analytics/Spacey/DC3-data/Sensor Data/Meteorological Data.xlsx")
#select first three columns
Meteorological_Data <- Meteorological_Data %>%
select(1:3)
#separate date and time
Meteorological_Data_sep <- separate(Meteorological_Data, Date, c("date", "time"), sep = " ")
Meteorological_Data_sep <- as.yearmon(date)
#
wind <- filter(Meteorological_Data_sep, date %in% c('2016-02', '2016-03', '2016-05', '2016-05',
'2016-05')) |
c1f95a28b3e09aed07e17fbaaac79226142b2a11 | 3810f013ef1bb6da62ae44849f04575ee8daf2f7 | /R/get_week.R | 2612553d11f5592338741b196a3be87457b5a18a | [
"MIT"
] | permissive | minghao2016/incidence2 | 72dac5797bb44a8df537e3212710247ec8365a82 | 5aa52edf3526def57cff1458dfad026940674723 | refs/heads/master | 2023-01-11T11:30:00.512052 | 2020-11-12T14:21:25 | 2020-11-12T14:21:25 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,108 | r | get_week.R | #' Translate user input to the start date of the week
#'
#' @param a Weekday specification: ISOweek, MMWRweek, EPIweek, Mon-week,
#' Tue-week, etc.
#'
#' @return the corresponding weekday
#'
#' @examples
#' get_week_start("ISOweek")
#' get_week_start("MMWRweek")
#' get_week_start("EPIweek")
#'
#' # weeks that start on saturday
#' get_week_start("Sat-week")
#' get_week_start("week: Saturday")
#' get_week_start("2 weeks: Saturday")
#' get_week_start("epiweek: Saturday")
#'
#' @noRd
get_week_start <- function(weekday) {
wkdy <- gsub("weeks?", "", tolower(weekday))
wkdy <- gsub("[[:punct:][:blank:][:digit:]]*", "", wkdy)
wkdy <- if (wkdy == "") "monday" else wkdy # the input was "weeks"
res <- switch(wkdy,
"mmwr" = "sunday", # MMWR == CDC epiweek
"epi" = "sunday", # CDC epiweek
"iso" = "monday", # ISOweek == WHO epiweek
wkdy # all others
)
gsub("epi", "", res) # if they specify something like "epiweek:saturday"
}
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
#' Translate a custom interval to a valid interval
#'
#' @param the_interval An interval like `"2 epiweeks"` or `"1 ISOweek"`.
#'
#' @return An interval compatible with `seq.Date()`.
#' @examples
#' get_week_duration("2 weeks (wednesday)") # 2 weeks
#' get_week_duration("2 epiweeks") # 2 weeks
#'
#' @noRd
get_week_duration <- function(the_interval) {
if (the_interval == 7) {
return(the_interval)
}
res <- gsub("^(\\d*) ?.*(weeks?).*$", "\\1 \\2", tolower(the_interval), perl = TRUE)
trimws(res)
}
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
get_type_of_week <- function(x) {
date_var <- get_dates_name(x)
week_var <- get_date_group_names(x)
switch(
as.character(attr(x[[week_var]], "week_start")),
"1" = "ISO",
"7" = "MMWR",
sprintf("(%s)", weekdays(x[[date_var]][1]))
)
}
# -------------------------------------------------------------------------
|
48860f89ef9e942b4400f0b0b5a0ff58b495b0bb | df88c9ad1ff85f827381290a1d98aee51e51f3be | /primary/figures/spillover.R | c233fc1a8da656b7b88ffbccc2f35fcca5f967b6 | [] | no_license | jadebc/WBK-primary-outcomes | 02c2a722b99c1201361c64ac23c765fb7dd3a68a | 3d93265bc32baaacb15cf5dd3d6bf770917af39d | refs/heads/master | 2022-04-09T23:22:48.235396 | 2018-03-29T08:13:17 | 2018-03-29T08:13:17 | 75,782,104 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,815 | r | spillover.R | ##############################################
# WASH Benefits Kenya
# Primary outcome analysis
# Generate plots of each primray outcome by
# the proportion of treated households within
# 2km of each control compound for sets of treatments
#
# by Jade Benjamin-Chung (jadebc@berkeley.edu)
##############################################
rm(list=ls())
library(ggplot2)
source("~/Documents/CRG/wash-benefits/kenya/src/primary/analysis/10-btw-clus-spill/10a-distance-functions.R")
source("~/documents/crg/wash-benefits/kenya/src/primary/analysis/0-base-programs.R")
#----------------------------------------------------
# Read in distance matrices
#----------------------------------------------------
load("~/Dropbox/WBK-primary-analysis/Results/Jade/washb-dist-sub.RData")
#----------------------------------------------------
# Read in outcome data - HAZ
#----------------------------------------------------
# load child length endline dataset
e=read.csv("~/Dropbox/WBK-primary-analysis/Data/final/jade/endline-anthro.csv",stringsAsFactors=TRUE)
e=preprocess.anthro(e, "haz")
# subset to control compounds only
e = subset(e,e$tr=="Control")
e=e[order(e$hhid),]
#----------------------------------------------------
# Read in outcome data - diarrhea
#----------------------------------------------------
# load child length endline dataset
data=read.csv("~/Dropbox/WBK-primary-analysis/Data/final/jade/diarrhea.csv")
d=preprocess.diarr(data)
# subset to control compounds only
d = subset(d,d$tr=="Control")
#----------------------------------------------------
# Count the number of treated compounds within a control
# compound for a given set of treatments
#----------------------------------------------------
W.comp2km=comp2km(anyW.mat)
S.comp2km=comp2km(anyS.mat)
H.comp2km=comp2km(anyH.mat)
N.comp2km=comp2km(anyN.mat)
# get the number of compounds within 2 km - any tr
C.comp2km=comp2km(C.mat)
W.comp2km$prop=W.comp2km$comp2km / C.comp2km$comp2km
S.comp2km$prop=S.comp2km$comp2km / C.comp2km$comp2km
H.comp2km$prop=H.comp2km$comp2km / C.comp2km$comp2km
N.comp2km$prop=N.comp2km$comp2km / C.comp2km$comp2km
#----------------------------------------------------
# Merge datasets
#----------------------------------------------------
mymerge=function(trNdata,ydata,y){
y.ncomp=merge(trNdata,ydata[,c("hhid",y)],by="hhid",all.x=TRUE,all.y=TRUE)
y.ncomp=y.ncomp[!is.na(y.ncomp[[y]]),]
y.ncomp=y.ncomp[!is.na(y.ncomp$prop),]
return(y.ncomp)
}
W.dat.haz=mymerge(W.comp2km,e,"haz")
S.dat.haz=mymerge(S.comp2km,e,"haz")
H.dat.haz=mymerge(H.comp2km,e,"haz")
N.dat.haz=mymerge(N.comp2km,e,"haz")
ggplot(W.dat.haz,aes(x=prop,y=haz))+geom_point()
ggplot(S.dat.haz,aes(x=prop,y=haz))+geom_point()
ggplot(H.dat.haz,aes(x=prop,y=haz))+geom_point()
ggplot(N.dat.haz,aes(x=prop,y=haz))+geom_point()
|
b1308e93e88e11d67d24c1d4866302a7197d6b9e | 8f1be5778fce0622c8026aa219a995361f723c8c | /BBMRIomics/man/runQuery.Rd | 359c8941f457842a6f9fc8832fa486d83e1b492e | [] | no_license | bbmri-nl/BBMRIomics | aa5112e9f20aafa9ae506332ba0db556e544f7b7 | 1c7d9a6ef966365be2b95e2066e8f2fd2006c757 | refs/heads/master | 2023-05-31T13:21:37.130878 | 2023-04-28T17:29:31 | 2023-04-28T17:29:31 | 95,667,968 | 3 | 0 | null | null | null | null | UTF-8 | R | false | true | 892 | rd | runQuery.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Sql_MDb_Helpers.R
\name{runQuery}
\alias{runQuery}
\title{send a query to the database}
\usage{
runQuery(
query,
usrpwd = "guest:guest",
url = "localhost",
port = 5432,
db = "rp3_rp4_meta",
verbose = T
)
}
\arguments{
\item{query}{SQL query to be send to the database}
\item{usrpwd}{Username and password concatenated by a colon,
defaults to "guest:guest".}
\item{url}{URL through which the database can be accessed,
defaults to "localhost".}
\item{port}{port to be used to connect to the database, defaults
to 5432.}
\item{db}{name of the database to be connected to, defaults to
"rp3_rp4_meta".}
}
\value{
A data.frame with the query results.
}
\description{
send a query to the database
}
\examples{
\dontrun{
visits <- runQuery("SELECT * FROM visit;", RP3_MDB_USRPWD)
}
}
\author{
Davy Cats
}
|
2bdd4838ab50097aab5b1e665f669d57164c984d | 3578a4578f61435c25aea39f1c7a7c6968f1517e | /tables.R | 8382456280459b11b50aec26f64f9a4abdb768f2 | [] | no_license | dushoff/TZ_clinics | ea40addedecac40d5b33492b4b8cc7bae1d3cc63 | c7d533f88ec94c973af99b8afde40e7b6ee959d4 | refs/heads/master | 2020-06-12T20:34:04.819676 | 2016-02-26T23:39:36 | 2016-02-26T23:39:36 | 42,281,791 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,849 | r | tables.R | library(dplyr)
library(tidyr)
year <- (c_visits
%>% select(patientid,arvstatuscode,visitdate,visitnum)
%>% group_by(patientid)
%>% mutate(startyear = as.numeric(format(min(visitdate),"%Y")))
)
arvyear <- (year
%>% group_by(patientid)
%>% filter(arvstatuscode == "Start ARV")
%>% mutate(arvyear = as.numeric(format(min(visitdate),"%Y")))
)
tmpd <- (arvyear %>% select(c(startyear,arvyear)))
### Collapse columns into key/value pairs. JD does not like it.
gather_by_year <- (gather(tmpd[,2:3],startyear,arvyear))
all <- (year %>% filter(visitnum == 1) %>% select(startyear) %>% group_by(startyear))
allcount <- (count(all,startyear)) ## Is this counting all patients by their visit number one per startyear?
newtable <- (count(newdat2,startyear,arvyear) %>% ungroup %>% arrange(startyear))
yeartotal <- (count(newdat2,startyear) %>% ungroup %>% arrange(startyear))
newtable2 <- (matrix(NA,nrow=4,ncol=5))
newtable2[1,1] <- (newtable$n[1]) #are [1,1] these number stands for row number and column number for a table?
newtable2[1,2] <- (newtable$n[2])
newtable2[1,3] <- (newtable$n[3])
newtable2[1,4] <- (newtable$n[4])
newtable2[1,5] <- (allcount$n[1] - yeartotal$n[1])
newtable2[2,2] <- (newtable$n[5])
newtable2[2,3] <- (newtable$n[6])
newtable2[2,4] <- (newtable$n[7])
newtable2[2,5] <- (allcount$n[2] - yeartotal$n[2])
newtable2[3,3] <- (newtable$n[8])
newtable2[3,4] <- (newtable$n[9])
newtable2[3,5] <- (allcount$n[3] - yeartotal$n[3])
newtable2[4,4] <- (newtable$n[10])
newtable2[4,5] <- (allcount$n[4] - yeartotal$n[4])
rownames(newtable2) <- (c("2011","2012","2013","2014"))
colnames(newtable2) <- (c("2011","2012","2013","2014","NA"))
print(newtable2)
library(ggplot2)
g <- (ggplot(newtable, aes(x=arvyear, y=n, colour=factor(startyear), group = startyear) ) + geom_line() + geom_point() + theme_bw())
print(g)
|
64b3b3131fa47e132ef022c0f9898b5279e45a99 | 1577e1cf4e89584a125cffb855ca50a9654c6d55 | /tcl/tcl_ext/quicktimetcl/quicktimetcl/QuickTimeTcl.r | db46fd1ee57a81f4c63b7bcb950125dd5954a020 | [
"BSD-3-Clause"
] | permissive | apple-open-source/macos | a4188b5c2ef113d90281d03cd1b14e5ee52ebffb | 2d2b15f13487673de33297e49f00ef94af743a9a | refs/heads/master | 2023-08-01T11:03:26.870408 | 2023-03-27T00:00:00 | 2023-03-27T00:00:00 | 180,595,052 | 124 | 24 | null | 2022-12-27T14:54:09 | 2019-04-10T14:06:23 | null | MacCentralEurope | R | false | false | 2,220 | r | QuickTimeTcl.r | /*
* QuickTimeTcl.r --
*
* This file creates resources used by the QuickTimeTcl package.
*
* Copyright (c) 1998 Jim Ingham and Bruce O'Neel
*
* $Id: QuickTimeTcl.r,v 1.1.1.1 2003/04/04 16:24:54 matben Exp $
*/
#include <Types.r>
#include <SysTypes.r>
#define RESOURCE_INCLUDED
#define RC_INVOKED
#include "tcl.h"
/*
* The folowing include and defines help construct
* the version string for Tcl.
*/
#define SCRIPT_MAJOR_VERSION 3 /* Major number */
#define SCRIPT_MINOR_VERSION 1 /* Minor number */
#define SCRIPT_RELEASE_SERIAL 0 /* Really minor number! */
#define RELEASE_LEVEL alpha /* alpha, beta, or final */
#define SCRIPT_VERSION "3.1"
#define SCRIPT_PATCH_LEVEL "3.1"
#define FINAL 0 /* Change to 1 if final version. */
#if FINAL
# define MINOR_VERSION (SCRIPT_MINOR_VERSION * 16) + SCRIPT_RELEASE_SERIAL
#else
# define MINOR_VERSION SCRIPT_MINOR_VERSION * 16
#endif
#define RELEASE_CODE 0x00
resource 'vers' (1) {
SCRIPT_MAJOR_VERSION, MINOR_VERSION,
RELEASE_LEVEL, 0x00, verUS,
SCRIPT_PATCH_LEVEL,
SCRIPT_PATCH_LEVEL ", © 1998 Bruce O'Neel, © 2000-2003 Mats Bengtsson"
};
resource 'vers' (2) {
SCRIPT_MAJOR_VERSION, MINOR_VERSION,
RELEASE_LEVEL, 0x00, verUS,
SCRIPT_PATCH_LEVEL,
"QuickTimeTcl " SCRIPT_PATCH_LEVEL " © 1998-2003"
};
/*
* The -16397 string will be displayed by Finder when a user
* tries to open the shared library. The string should
* give the user a little detail about the library's capabilities
* and enough information to install the library in the correct location.
* A similar string should be placed in all shared libraries.
*/
resource 'STR ' (-16397, purgeable) {
"QuickTimeTcl Library\n\n"
"This library provides the ability to run QuickTime "
" commands from Tcl/Tk programs. To work properly, it "
"should be placed in the ‘Tool Command Language’ folder "
"within the Extensions folder."
};
/*
* We now load the Tk library into the resource fork of the library.
*/
data 'TEXT' (4000, "pkgIndex", purgeable, preload) {
"# Tcl package index file, version 1.0\n"
"if {[info tclversion] != "TCL_VERSION"} return\n"
"package ifneeded QuickTimeTcl 3.1 [list load [file join $dir QuickTimeTcl"TCL_VERSION".shlb]]\n"
};
|
1fbefc9ec0c3243170d94c61cd5b0f300712d196 | 058e7ab0f39e470d82a9a2b78b2758d478f598a1 | /R/otherTools.R | 24d3692f6849dfa8764097d2c7d9ab356b16c9e8 | [] | no_license | sanadamakomi/kfltCNV | 2e0b9cd7d30adaa573572bbc176ecd74e2ea6620 | b3a0acd18191db782de738e9c8b8851679a5967b | refs/heads/master | 2020-04-16T22:57:47.166881 | 2019-07-29T08:01:37 | 2019-07-29T08:01:37 | 165,992,232 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,128 | r | otherTools.R | #' @title Call gender from BAM file.
#' @description Call gender by read depth of chromosome X and Y. Compare the
#' chi-squared values obtained to infer whether the male or female assumption
#' fits read depth better.
#' @param bam A character string of BAM file path.
#' @param lower A non-negative integer. Position of which coverage is lower than
#' the integer will not be count.
#' @return A character string, \emph{Unknow}, \emph{Female} or \emph{Male}.
#' @export
#' @import IRanges
#' @import GenomicRanges
#' @import GenomicAlignments
#' @import GenomeInfoDb
#' @import S4Vectors
#' @importFrom stats chisq.test
#' @importFrom Rsamtools idxstatsBam
#' @importFrom Rsamtools ScanBamParam
#' @author Zhan-Ni Chen
#' @examples
#' ####### Call gender from BAM file #######
#' callGenderByBam(system.file("extdata", 'testSample.bam', package ="kfltCNV"))
callGenderByBam <- function(bam, lower = 20) {
df <- idxstatsBam(bam)
df <- df[1:24,]
auto_chr <- df[which(df[,'mapped'] == max(df[,'mapped'])), 'seqnames']
auto_chr <- auto_chr[1]
bamSeqinfo <- seqinfo(BamFile(bam))
seqn <- seqnames(bamSeqinfo)
sexn <- seqn[sapply(c('X', 'Y'), grep, seqn)]
which <- GRanges(Rle(c(auto_chr,sexn)), ranges = IRanges(start = c(1, 1, 1), end = seqlengths(bamSeqinfo)[c(auto_chr, sexn)]), seqinfo = bamSeqinfo)
param <- ScanBamParam(which = which)
aln <- readGAlignments(file = bam, index = bam, param = param)
cov <- coverage(bam, param = param)
df <- sapply(cov[c(auto_chr, sexn)], function(c, lower) {
v <- Views(c, c >= lower)
return(c(sum(width(v)), sum(sum(v))))
}, lower = lower )
ratio <- df[2,] / df[1,]
ratio_a <- ratio[grep(auto_chr, names(ratio))]
ratio_x <- ratio[grep('X', names(ratio))]
ratio_y <- ratio[grep('Y', names(ratio))]
if (is.na(ratio_x) & is.na(ratio_y)) return('Unknow')
f_a_x <- chisq.test(c(ratio_a, ratio_x), p = c(2/4, 2/4))$p.value
m_a_y <- chisq.test(c(ratio_a, ratio_y), p = c(2/3, 1/3))$p.value
if (ratio_x == 0 & ratio_y == 0) return('Unknow')
if (ratio_x == 0) {
if (m_a_y > 5E-2) return('Male')
return('Female')
} else if (ratio_y == 0) {
if (f_a_x > 5E-2) return('Female')
return('Male')
} else {
if (f_a_x > m_a_y & f_a_x > 5E-2) return('Female')
if (f_a_x < m_a_y & m_a_y > 5E-2) return('Male')
return('Unknow')
}
}
#' @title Call gender from coverage file.
#' @description Call gender by read depth of chromosome X and Y. Compare the
#' chi-squared values obtained to infer whether the male or female assumption
#' fits read depth better.
#' @param x A character string of coverage file (<fileName>.cov) path.
#' @return A character string, \emph{Unknow}, \emph{Female} or \emph{Male}.
#' @export
#' @import IRanges
#' @import GenomicRanges
#' @import S4Vectors
#' @importFrom stats chisq.test
#' @author Zhan-Ni Chen
#' @examples
#' ####### Call gender from coverage file #######
#' callGenderByCov(system.file("extdata", 'testSample.cov', package = "kfltCNV"))
callGenderByCov <- function (x) {
gr <- readCovFile(x)
seqn <- as.character(as.vector(runValue(seqnames(gr))))
sexn <- lapply(c('X', 'Y'), function(x) {seqn[grep(x, seqn)]})
sexn <- unlist(sexn)
if (length(sexn) == 0) return('Unknow')
sexgr <- split(gr, seqnames(gr))
sexgr <- sexgr[sexn]
ratio <- sapply(sexgr, function(x) {mean(x$depth, na.rm = TRUE)})
ratio_a <- mean(gr$depth, na.rm = TRUE )
ratio_x <- 0
ratio_y <- 0
if (grepl('X', names(ratio))) ratio_x <- ratio[grep('X', names(ratio))]
if (grepl('Y', names(ratio))) ratio_y <- ratio[grep('Y', names(ratio))]
f_a_x <- chisq.test(c(ratio_a, ratio_x), p = c(2/4, 2/4))$p.value
m_a_y <- chisq.test(c(ratio_a, ratio_y), p = c(2/3, 1/3))$p.value
if (ratio_x == 0 & ratio_y == 0) return('Unknow')
if (ratio_x == 0) {
if (m_a_y > 5E-2) return('Male')
return('Female')
} else if (ratio_y == 0) {
if (f_a_x > 5E-2) return('Female')
return('Male')
} else {
if (f_a_x > m_a_y & f_a_x > 5E-2) return('Female')
if (f_a_x < m_a_y & m_a_y > 5E-2) return('Male')
return('Unknow')
}
}
#' @title Check BAM file.
#' @description It will stop if BAM file is illegal.
#' @param x A character string or vector of BAM File path.
#' @export
#' @import IRanges
#' @import GenomicRanges
#' @import S4Vectors
#' @importFrom Rsamtools BamFile
#' @author Zhan-Ni Chen
#' @examples
#' ####### Check BAM file #######
#' checkBam(system.file("extdata", 'testSample.bam', package = "kfltCNV"))
checkBam <- function(x) {
a <- sapply(x, function(bam) {
if (! file.exists(bam)) stop(paste0(bam, ' file is missing.'))
bai <- BamFile(bam)$index
if (is.na(bai)) stop(paste0(bam, '.bai index file is missing.'))
bam_info <- file.info(bam)
bai_info <- file.info(bai)
dt <- difftime(bai_info$mtime, bam_info$mtime, units = 'secs')
dt <- as.numeric(dt)
if (dt < 0) stop(paste0(bam, ' index file is older than BAM file.'))
})
}
|
f9198e81df3cf9cd52ee72249aee50aef4adc7a0 | 1a111b0a16d39f1387e189687b116698262688d1 | /simulator.R | e0d2e5e333888b16d66ba69864e2669ffe27cfa1 | [] | no_license | wisus/spatial_autoreg_code | e0a3afa92227f6ae8ffabeae09991a710bbb816e | 746b647ef2696bf344760394a79abb35233080d8 | refs/heads/master | 2022-01-09T03:39:48.480132 | 2019-06-17T15:25:31 | 2019-06-17T15:25:31 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,308 | r | simulator.R | library(RSpectra)
### Simulation: for bootstrap used in estimation
### format specify
specify_decimal = function(x, k) gsub('\\s+','',format(round(x, k), nsmall=k)) ### format function for keeping 2 digits after
# simu.loc<-function(N, cov.type = "exp")
# {
# X_loc = runif(N, 0, sqrt(N))
# Y_loc = runif(N, 0, sqrt(N))
# loc = cbind(X_loc, Y_loc)
# if (cov.type=="quad")
# loc = loc*sqrt(10)*2
# return(loc)
# }
### for data generation
# for simulating autocoefficients rho
simu.rho <- function(beta, sig2, N, alpha, dist_loc2, cov.type = "exp")
{
if (cov.type=="exp")
cov_X = cov.exp(dist_loc2, beta, sig2)
else
cov_X = cov.quad(dist_loc2, beta, sig2)
#### simulate X
eig_X = eigen(cov_X)
sqrt_value = sqrt(eig_X$values)
X = eig_X$vectors%*%(sqrt_value*rnorm(N, 0, 1)) # generate X following cov_X
#rhos = 2*pnorm(X+alpha)-1
rhos = 2*exp(X+alpha)/(1+exp(X+alpha))-1 # generate rhos across logistic function
return(rhos)
}
simu.mu <- function(beta, sig2, N, mu, dist_loc2, cov.type = "exp")
{
if (cov.type=="exp")
cov_X = cov.exp(dist_loc2, beta, sig2)
else
cov_X = cov.quad(dist_loc2, beta, sig2)
#### simulate X
eig_X = eigen(cov_X)
eig_X$values[eig_X$values<0] = 0
sqrt_value = sqrt(eig_X$values)
mus = eig_X$vectors%*%(sqrt_value*rnorm(N, 0, 1)) + mu # generate X following cov_X
return(mus)
}
# simulate Y_t matrix following autoregression model with spatial dependence
simu.Y <- function(beta, sigy2, N, Time, rhos, dist_loc2, mu, cov.type = "exp")
{
#### simulate covariance of epsilon
if (cov.type=="exp")
cov_e = cov.exp(dist_loc2, beta, sigy2)
else
cov_e = cov.quad(dist_loc2, beta, sigy2)
#### simulate epsilon
eig_e = eigen(cov_e)
sqrt_value = sqrt(eig_e$values)
# eps_mat = sapply(1:Time, function(t){ # epsilon follows same distribution across time
# eps0 = rnorm(N, 0, 1)
# eps = eig_e$vectors%*%(sqrt_value*eps0)
# })
eps_mat = eig_e$vectors%*%(sqrt_value*matrix(rnorm(N*Time, 0, 1), nrow = N))
Y = matrix(0, nrow = N, ncol = Time)
Y[,1] = eps_mat[,1]
for (t in 2:Time)
{
Y[,t] = Y[,t-1]*rhos + eps_mat[,t] + mu # generate Yt according to AMSD model
}
return(Y = Y)
}
### for bootstrap estimation
# for simulating autocoefficients rho N*R matrix
simu.rho.rep <- function(beta, sig2, N, R, alpha, dist_loc2, cov.type = "exp")
{
if (cov.type=="exp")
cov_X = cov.exp(dist_loc2, beta, sig2)
else
cov_X = cov.quad(dist_loc2, beta, sig2)
#### simulate X
eig_X = eigen(cov_X)
sqrt_value = sqrt(eig_X$values)
X = eig_X$vectors%*%(sqrt_value*matrix(rnorm(N*R, 0, 1), nrow = N)) # generate X following cov_X
#rhos = 2*pnorm(X+alpha)-1
rhos = 2*exp(X+alpha)/(1+exp(X+alpha))-1 # generate rhos across logistic function
return(as.vector(rhos))
}
simu.mu.rep <- function(beta, sig2, N, R, mu, dist_loc2, cov.type = "exp")
{
if (cov.type=="exp")
cov_X = cov.exp(dist_loc2, beta, sig2)
else
cov_X = cov.quad(dist_loc2, beta, sig2)
#### simulate X
eig_X = eigs_sym(cov_X, nrow(cov_X)-1)
#eig_X = eigen(cov_X)
eig_X$values[eig_X$values<0] = 0
sqrt_value = sqrt(eig_X$values)
X = eig_X$vectors%*%(sqrt_value*matrix(rnorm(length(eig_X$values)*R, 0, 1), nrow = length(eig_X$values))) # generate X following cov_X
#rhos = 2*pnorm(X+alpha)-1
mus = X + mu # generate rhos across logistic function
return(as.vector(mus))
}
simu.Y.rep <- function(beta, sigy2, N, R, Time, rhos, dist_loc2, mus, cov.type = "exp")
{
#### simulate covariance of epsilon
if (cov.type=="exp")
cov_e = cov.exp(dist_loc2, beta, sigy2)
else
cov_e = cov.quad(dist_loc2, beta, sigy2)
#### simulate epsilon
eig_e = eigen(cov_e)
sqrt_value = sqrt(eig_e$values)
eps_mat_list = lapply(1:R, function(r) eig_e$vectors%*%(sqrt_value*matrix(rnorm(N*Time, 0, 1), nrow = N)))
eps_mat_R = do.call(rbind, eps_mat_list)
#eps_mat = eig_e$vectors%*%(sqrt_value*matrix(rnorm(N*Time, 0, 1), nrow = N))
# eps_mat = sapply(1:Time, function(t){ # epsilon follows same distribution across time
# eps0 = rnorm(N, 0, 1)
# eps = eig_e$vectors%*%(sqrt_value*eps0)
# })
Y = matrix(0, nrow = N*R, ncol = Time)
Y[,1] = eps_mat_R[,1]
for (t in 2:Time)
{
Y[,t] = Y[,t-1]*rhos + eps_mat_R[,t] + mus # generate Yt according to AMSD model
}
return(Y = Y)
}
### Estimation functions
# ## estimate rho parameters
# # estimate theta_X
# estThetaRho<-function(rhos, dist_loc2)
# {
# #X1 = qnorm((rhos+1)/2)
# rhos[which(abs(rhos)>1)] = sign(rhos[which(abs(rhos)>1)])*rep(0.999, sum(abs(rhos)>1))
# y1 = (rhos+1)/2
# X1 = log(y1/(1-y1))
# alpha = mean(X1)
# thetaX = lse.X(X1 - alpha, dist_loc2)
# return(c(alpha, thetaX))
# }
#
# estThetaMu<-function(mus, dist_loc2)
# {
# #X1 = qnorm((rhos+1)/2)
# mu = mean(mus)
# thetaMu = lse.X(mus-mu, dist_loc2)
# return(c(mu, thetaMu))
# }
#
# # estimate the parameters in rhos (theta_X)
# lse.X<-function(Y, dist_loc2) # Y here is X-alpha, which is centered
# {
# sig_hat = tcrossprod(Y)
#
# theta = c(1, var(as.vector(Y)))
# iter = 1; del = 1
# while(mean(abs(del))>10^-3&iter<1000)
# {
# #cat(mean(abs(del)), " ", theta, "\n")
# del = lse.step(sig_hat, beta = theta[1], sigy2 = theta[2], dist_loc2)
# theta = theta - del*0.5
# if (any(theta<0|theta>10))
# theta = runif(2, 0.1,10)
# iter = iter+1
# }
# return(abs(theta)) # since exponential covariance model is symmetric of beta, we restrict theta to be positive
# }
#
# # estimate rho and mu by iterations
# ### Estimation functions
#
# estRhoMu<-function(Y)
# {
# Time = ncol(Y)
# rhosmu = apply(Y, 1, function(y){
# x = cbind(1, y[-Time])
# return(solve(crossprod(x))%*%crossprod(x, y[-1])) # estimate rho for each location
# })
# #mu = mean(rhosmu[1,])
# return(rhosmu = rhosmu)
# }
#
# ## estimate epsilon parameters
# # filter to obtain residuals
# filter<-function(Ymat, rhos,mu)
# {
# Time = ncol(Ymat)
# #Ymat1 = cbind(mu, rhos, Ymat)
# #eps = apply(Ymat1, 1, function(x) x[-(1:3)]-x[2]*x[-c(1,2,ncol(Ymat1))] - x[1])
# eps = Ymat[,-1] - rhos*Ymat[,-Time] - mu
# return(eps)
# }
#
# # estimate the parameters epsilon theta_e
# lse.theta<-function(Y, dist_loc2, rhos, mu)
# {
# Time = ncol(Y)
# Y0 = Y
# Y = Y[,-1] - rhos*Y[,-Time] - mu # residuals
# if (is.null(dim(Y)))
# sig_hat = tcrossprod(Y)
# else
# sig_hat = tcrossprod(Y)/ncol(Y)
#
# theta = c(1, 0.5) # the initial value
# iter = 1; del = 1
# while(mean(abs(del))>10^-3&iter<1000)
# {
# #cat(mean(abs(del)), " ", theta, "\n")
# del = lse.step(sig_hat, beta = theta[1], sigy2 = theta[2], dist_loc2) # each step
# theta = theta - del*0.5
# if (any(theta<0|theta>5)) # if the value is beyond this, it might not converge due to the initial values
# theta = runif(2, 0.1,5)
# iter = iter+1
# }
# return(abs(theta))
# }
#
#
# # for each newton-raphson iteration, the following function gives each step
# # only works for exponential model
# lse.step <- function(sig_hat, beta, sigy2, dist_loc2)
# {
# exp_loc = exp(-beta^2*dist_loc2)
# sig = sigy2*exp_loc
# sig_del = sig_hat - sig
# sig_del2 = sig_hat - 2*sig
#
# ### first order derivative: gradient
# grad_beta = 4*beta*sum(sig_del*sig*dist_loc2)
# grad_sigy2 = -2*sum(sig_del*exp_loc)
# grad_para = c(grad_beta, grad_sigy2)
#
# ### hessian matrix
# hmat = matrix(0,2,2)
# hmat[1,1] = (-8*sum(sig_del2*sig*beta^2*dist_loc2^2) + 4*sum(sig_del*sig*dist_loc2))
# hmat[1,2] = 4*beta*sum(sig_del2*exp_loc*dist_loc2)
# hmat[2,1] = hmat[1,2]
# hmat[2,2] = 2*sum(exp_loc^2)
# if (any(!is.finite(hmat)))
# return(grad_para)
# eig_hmat = eigen(hmat)
# if (any(eig_hmat$values<0))
# hmat = (eig_hmat$vectors)%*%(abs(eig_hmat$values)*t(eig_hmat$vectors))
#
# del = solve(hmat)%*%grad_para #grad_beta/hmat
# return(del)
# }
#
# # to obtain the kriging surface
# trans2mat<-function(Yt_pred, lattice_num)
# {
# Yt_pred_mat = t(matrix(Yt_pred, nrow = lattice_num))
# colnames(Yt_pred_mat) = round(seq(116, 117.1, length.out = lattice_num), 3)
# rownames(Yt_pred_mat) = round(seq(39.52, 40.53, length.out = lattice_num), 3)
# return(Yt_pred_mat)
# }
|
679147bf1e12533d609da5ab94d78de15de672c7 | 1d4dc3bb6d4f8404c42778fa8e76f521dadc0b29 | /demographic.R | a44cd41601fd925702fdbecbe3b85e377a1933b2 | [] | no_license | lash1937/stochasticity_structure | 79b97a0c2b98c923b4274b84e3318f36572c23ce | ce6d68b427c5aa12ef235154b5c2a6f446383cff | refs/heads/master | 2020-05-23T18:07:51.972256 | 2019-05-25T14:38:24 | 2019-05-25T14:38:24 | 186,882,233 | 3 | 2 | null | null | null | null | UTF-8 | R | false | false | 7,491 | r | demographic.R | # code to create figure 3,
# diving into the effects of demographic stochasticity on population and community structure
# source the functions to run each model
source("model_functions.R")
# ----------------------------------------------------------------------------------------------------------
# single run population results (panel B)
# time to run the model
time <- 50
# density independent growth rates
R1 <- 1.6 # speices 1
R2 <- 1.6 # species 2
# intraspecific competition coefficients
alpha1 <- .02
alpha2 <- .1
# set up vector to hold results
results1 <- results2 <- rep(NA, time)
results1[1] <- results2[1] <- 20
# run model
for (t in 1:(time-1)) {
results1[t+1] <- do.pop.dem.BH(R=R1, alpha=alpha1, N=results1[t])
results2[t+1] <- do.pop.dem.BH(R=R2, alpha=alpha2, N=results2[t])
}
# Create figure (panel B)
quartz(width=5, height=5)
par(mar=c(3,3,2,2))
plot(results2, col="darkgoldenrod2", type="l", lwd=2, lty=1, ylab="", yaxt="n", xaxt="n",
xlab="", cex.axis=1.25, xlim=c(0, 50), ylim=c(0,50), main="")
abline(v=40, lty=2, col="black")
axis(side=1, at=c(0, 25, 50), labels=c(0, 25, 50), cex.axis=1.25)
mtext("Time", side=1, line=2, outer=FALSE, col="black", cex=1.25)
mtext("Abundance", side=2, line=2, outer=FALSE, col="black", cex=1.25)
axis(side=2, at=c(0, 25, 50), labels=c(0, 25, 50), cex.axis=1.25)
lines(results1, col="firebrick4", lwd=2)
# ----------------------------------------------------------------------------------------------------------
# across runs population distributions (panel C)
# time point to create the distribution
t_star <- 40
# number of runs for creating the distributions
runs <- 1000
# create matrix to hold results
results1 <- results2 <- matrix(NA, nrow=runs, ncol=time)
results1[,1] <- results2[,1] <- 20
# run model
for (counter in 1:runs) {
for (t in 1:(time-1)) {
results1[counter,t+1] <- do.pop.dem.BH(R=R1, alpha=alpha1, N=results1[counter,t])
results2[counter,t+1] <- do.pop.dem.BH(R=R2, alpha=alpha2, N=results2[counter,t])
}
}
# extract abundance at time=t_star for each run
dist_results1 <- results1[,t_star]
dist_results2 <- results2[,t_star]
# create distributions of expected diversity
density_results1 <- density(dist_results1, from=0)
density_results2 <- density(dist_results2, from=0)
# Create figure (panel C)
quartz(width=5, height=5)
par(mar=c(3,3,2,2))
plot(density_results2, col="darkgoldenrod2", lwd=2, ylab="", lty=1, yaxt="n", xaxt="n",
xlab="", cex.axis=1.25, xlim=c(0, 50), ylim=c(0,.4), main="")
axis(side=1, at=c(0, 25, 50), labels=c(0, 25, 50), cex.axis=1.25)
mtext("Abundance (time=t*)", side=1, line=2, outer=FALSE, col="black", cex=1.25)
mtext("Probability", side=2, line=2, outer=FALSE, col="black", cex=1.25)
axis(side=2, at=c(0, .2, .4), labels=c(0, .2, .4), cex.axis=1.25)
lines(density_results1, col="firebrick4", lwd=2)
# ----------------------------------------------------------------------------------------------------------
# single run community results (panel D)
# number of species
species <- 20
# set parameters for model run
# parameters for the stabilizing case
R_stabilizing <- runif(species, 2, 2.5)
alphas_stabilizing <- matrix(runif(species*species, .002, .005), ncol=species, nrow=species)
# set intraspecific competition (i.e. all of the diagonal elements) higher than interspecific competition
diag(alphas_stabilizing) <- .03
# parameters for the neutral case
R_neutral <- rep(mean(R_stabilizing), species)
alphas_neutral <- matrix(rep(mean(alphas_stabilizing), species*species), ncol=species, nrow=species)
# set up matrix to hold results
results_stabilizing <- results_neutral <- matrix(NA, nrow=species, ncol=time)
results_stabilizing[,1] <- results_neutral[,1] <- 20
# run model
for (t in 1:(time-1)) {
for (s in 1:species) {
results_stabilizing[s,t+1] <- do.com.dem.BH(R=R_stabilizing[s], alphas=alphas_stabilizing[s,],
N=results_stabilizing[s,t], Nall=results_stabilizing[,t])
results_neutral[s,t+1] <- do.com.dem.BH(R=R_neutral[s], alphas=alphas_neutral[s,],
N=results_neutral[s,t], Nall=results_neutral[,t])
}
}
# determine diversity through time
diversity_stabilizing <- diversity_neutral <- rep(NA, time)
for (t in 1:time) {
diversity_stabilizing[t] <- sum(results_stabilizing[,t] > 0)
diversity_neutral[t] <- sum(results_neutral[,t] > 0)
}
# Create figure (panel D)
quartz(width=5, height=5)
par(mar=c(3,3,2,2))
plot(diversity_stabilizing, col="darkorchid3", type="l", lwd=2, lty=1, ylab="", yaxt="n", xaxt="n",
xlab="", cex.axis=1.25, xlim=c(0, 50), ylim=c(0,20), main="")
abline(v=40, lty=2, col="black")
axis(side=1, at=c(0, 25, 50), labels=c(0, 25, 50), cex.axis=1.25)
mtext("Time", side=1, line=2, outer=FALSE, col="black", cex=1.25)
mtext("Diversity", side=2, line=2, outer=FALSE, col="black", cex=1.25)
axis(side=2, at=c(0, 10, 20), labels=c(0, 10, 20), cex.axis=1.25)
lines(diversity_neutral, col="plum2", lwd=2)
# ----------------------------------------------------------------------------------------------------------
# distribution for community results (panel E)
# set up arrays to hold results
dist_stabilizing <- dist_neutral <- array(NA, c(runs, species, time))
dist_stabilizing[,,1] <- dist_neutral[,,1] <- 20
# model run
for (counter in 1:runs) {
# redraw parameters for each run
R_stabilizing <- runif(species, 2, 2.5)
alphas_stabilizing <- matrix(runif(species*species, .002, .005), ncol=species, nrow=species)
diag(alphas_stabilizing) <- .03
R_neutral <- rep(mean(R_stabilizing), species)
alphas_neutral <- matrix(rep(mean(alphas_stabilizing), species*species), ncol=species, nrow=species)
for (t in 1:(time-1)) {
for (s in 1:species) {
dist_stabilizing[counter,s,t+1] <- do.com.dem.BH(R=R_stabilizing[s], alphas=alphas_stabilizing[s,],
N=dist_stabilizing[counter,s,t],
Nall=dist_stabilizing[counter,,t])
dist_neutral[counter,s,t+1] <- do.com.dem.BH(R=R_neutral[s], alphas=alphas_neutral[s,],
N=dist_neutral[counter,s,t],
Nall=dist_neutral[counter,,t])
}
}
}
# extract results for t=t_star
t_star_stabilizing <- dist_stabilizing[,,t_star]
t_star_neutral <- dist_neutral[,,t_star]
# determine diversity at t=t_star for each run
diversity_stabilizing <- diversity_neutral <- rep(NA, runs)
for(counter in 1:runs) {
diversity_stabilizing[counter] <- sum(t_star_stabilizing[counter,]>0)
diversity_neutral[counter] <- sum(t_star_neutral[counter,]>0)
}
# create distributions of expected diversity
density_stabilizing <- density(diversity_stabilizing, from=0, adjust=1.5)
density_neutral <- density(diversity_neutral, from=0, adjust=1.5)
# Create figure (panel E)
quartz(width=5, height=5)
par(mar=c(3,3,2,2))
plot(density_stabilizing, col="darkorchid3", lwd=2, ylab="", lty=1, yaxt="n", xaxt="n",
xlab="", cex.axis=1.25, xlim=c(0, 20), ylim=c(0,.3), main="")
axis(side=1, at=c(0, 10, 20), labels=c(0, 10, 20), cex.axis=1.25)
mtext("Diversity (time=t*)", side=1, line=2, outer=FALSE, col="black", cex=1.25)
mtext("Probability", side=2, line=2, outer=FALSE, col="black", cex=1.25)
axis(side=2, at=c(0, .15, .3), labels=c(0, .15, .3), cex.axis=1.25)
lines(density_neutral, col="plum2", lwd=2)
|
9346641cc443de684cef3656918317bc9bb46a5e | f8955345f2129d214ae932a6199a2cd480fb8771 | /cachematrix.R | 8e73b5d028a28310726bbf39f91b6ba13aea52ea | [] | no_license | funnyletter/ProgrammingAssignment2 | b44ee9e01c1ec6922c3180c55356ac07d0037fb4 | 4e33563e82bcbb199be0388e13f6aa8d54ec9757 | refs/heads/master | 2021-01-17T01:08:50.771131 | 2016-02-05T22:35:51 | 2016-02-05T22:35:51 | 50,875,795 | 0 | 0 | null | 2016-02-01T22:21:43 | 2016-02-01T22:21:43 | null | UTF-8 | R | false | false | 1,669 | r | cachematrix.R | ## These functions create an object that can contain a matrix and also its inverse, so you can calculate the
## inverse once, cache it, and retrieve it again later. This only works on invertable matrices. If you give it
## a non-square matrix it will fail loudly.
## Makes a special matrix object that can store the matrix and its own inverse. It does not set the inverse
## until the inverse is asked for using cacheSolve.
makeCacheMatrix <- function(x = matrix()) {
# If there's already something cached, get rid of it
myCache <- NULL
# Set the value of the matrix, and since we're doing a new matrix, clear any cached inverse
set <- function(y) {
x <<- y
myCache <<- NULL
}
# Retrieve the value of the matrix. So all this does is return x from when we made the matrix.
get <- function () x
#Set the inverse matrix
setInverse <- function(theInverse) myCache <<- theInverse
# Retrieve the inverse matrix
getInverse <- function() myCache
# Make a list to retrieve stuff from in other functions
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## This function takes an object made with makeCacheMatrix and returns the inverse from cache if it exists.
## If it does not exist, it calculates the inverse, caches it, and returns it.
cacheSolve <- function(x, ...) {
m <- x$getInverse()
#Is there something cached? If so, return it.
if(!is.null(m)) {
message("Getting cached data...")
return(m)
}
# If there's nothing cached, calculate the inverse, cache it, and return it.
data <- x$get()
m <- solve(data)
x$setInverse(m)
return(m)
}
|
8bae8e6dd7a5100264281fa1b651be6eb6d8993f | 83fec32b0e9f9f113f105271adb8d49e062d9909 | /geologyGeometry/tutorials/4orientations/5ignoringSymmetry.R | b355e8a2a8e16baa34943de3d83033fe7e5307b0 | [
"Apache-2.0"
] | permissive | nicolasmroberts/InternalStructureMtEdgar_PR2021 | e0982fa25ad8247573649ff780381742ce461163 | 01ebb627aedb2b8f7dab6ce8f3bc53b545c6df4d | refs/heads/master | 2023-03-17T14:18:24.883512 | 2021-03-16T19:59:57 | 2021-03-16T19:59:57 | 348,474,436 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,549 | r | 5ignoringSymmetry.R |
# Copyright 2017 Joshua R. Davis
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
### INTRODUCTION ###
# In the preceding tutorial we learned the 'right' way to compute the mean and
# dispersion of an orientational data set. But tools for orientations are so
# scarce that we must sometimes resort to a less-than-ideal way.
### THE WRONG WRONG WAY ###
# Load a set foliation-lineation orientations from the western Idaho shear
# zone (Giorgis and Tikoff, 2004). Also compute and illustrate the Frechet
# mean. Because of the 4-fold symmetry, the everything appears four times in a
# rotation plot.
wiszData <- geoDataFromFile("data/wiszFollins.tsv")
wiszMeanVar <- oriMeanVariance(wiszData$rotation, group=oriLineInPlaneGroup)
wiszCurves <- lapply(
oriNearestRepresentatives(wiszData$rotation, wiszMeanVar$mean, group=oriLineInPlaneGroup),
rotGeodesicPoints, wiszMeanVar$mean)
oriEqualAnglePlot(wiszData$rotation, curves=wiszCurves, group=oriLineInPlaneGroup)
# What if we weren't being careful, and we treated our orientations as mere
# rotations? First, here's the plot. The data set looks bimodal, even though
# it's unimodal.
rotEqualAnglePlot(wiszData$rotation)
# Here's the rotational mean as minimizing the size of a spider in that plot.
wiszMeanVarRot <- rotMeanVariance(wiszData$rotation)
wiszCurvesRot <- lapply(wiszData$rotation, rotGeodesicPoints, wiszMeanVarRot$mean)
rotEqualAnglePlot(wiszData$rotation, curves=wiszCurvesRot)
# The spider looks much bigger than in the orientational treatment. The
# variances confirm it.
wiszMeanVar$variance
wiszMeanVarRot$variance
### THE RIGHT WRONG WAY ###
# Remembering now that we should be dealing with orientations, let's view all
# four symmetric copies of that spider. This plot reveals the problem: By
# ignoring symmetry, the rotation-only treatment 'crosses between symmetric
# copies' of the data when it shouldn't.
oriEqualAnglePlot(wiszData$rotation, curves=wiszCurvesRot, group=oriLineInPlaneGroup)
# In this example, we can take another tack: At the start, ensure that all of
# the orientations are represented by rotations in a single symmetric copy.
# Then treat them as rotations.
wiszRots <- oriNearestRepresentatives(wiszData$rotation, wiszData$rotation[[1]], group=oriLineInPlaneGroup)
wiszMeanVarOkay <- rotMeanVariance(wiszRots)
wiszCurvesOkay <- lapply(
wiszRots,
rotGeodesicPoints, wiszMeanVarOkay$mean)
rotEqualAnglePlot(wiszRots, curves=wiszCurvesOkay)
# When we symmetrize that plot, we get the same plot as we did in the true,
# orientational treatment.
oriEqualAnglePlot(wiszData$rotation, curves=wiszCurvesOkay, group=oriLineInPlaneGroup)
# And the variances confirm that we're getting the right answer.
wiszMeanVar$variance
wiszMeanVarOkay$variance
# The lesson here is: If your data are tightly concentrated enough, then
# isolating the symmetric copies is not difficult, and working within one
# symmetric copy often yields good results.
# This strategy is common in electron backscatter diffraction (EBSD), for
# example. Let's take another look at the Moine thrust intra-grain quartz
# orientations of (Strine and Wojtal, 2004; Michels et al., 2015). Six tight,
# beautiful symmetric copies.
michelsData <- geoDataFromFile("data/moine_one_grainABCxyz.tsv")
oriEqualVolumePlot(michelsData$rotation, group=oriTrigonalTrapezohedralGroup, simplePoints=TRUE)
# But structural orientations are often so dispersed that we can't easily
# discern the symmetric copies. Here are some faults with slip from Cyprus
# (Davis and Titus, 2017). Where are the two symmetric copies?
slickData <- geoDataFromFile("data/cyprusSlicks2008005.tsv")
oriEqualVolumePlot(slickData$rotation, group=oriRayInPlaneGroup)
# So the other part of the lesson is: Structural geologists cannot always
# ignore symmetry in orientational data. Our default approach should be one
# that handles symmetry. Only sometimes can we cheat on the symmetry.
### SO LET'S CHEAT THEN ###
# Let's return to our western Idaho shear zone foliation-lineation data set.
oriEqualVolumePlot(wiszRots, group=oriLineInPlaneGroup)
# Remember that we've pre-processed the data to choose representative
# rotations lying in one symmetric copy. So intrinsic methods will give the
# same results for these rotations as for the corresponding orientations.
rotEqualVolumePlot(wiszRots)
# Maximum likelihood estimation (MLE) of the matrix Fisher distribution
# parameters is a method for rotations. And it's not intrinsic, so we're not
# allowed to use it on orientations. But these orientations are concentrated
# enough that treating them as rotations is approximately okay. So let's do
# the MLE. Among other things, it gives a concentration matrix (kHat) whose
# eigenvalues quantify the dispersion in the data set.
wiszFisher <- rotFisherMLE(wiszRots)
eigen(wiszFisher$kHat, symmetric=TRUE)$values
# In the next tutorial we will learn why this specific calculation is useful.
### SOMETHING THAT RESEMBLES CHEATING ###
# When your data are tightly concentrated, you can approximate them as points
# in the tangent space at the mean. Then principal component analysis (PCA) in
# that tangent space gives you yet another measure of anisotropic dispersion.
wiszPCA <- rotLeftPrincipalComponentAnalysis(wiszRots, wiszMeanVarOkay$mean, numPoints=5)
wiszPCA$magnitudes
rotEqualAnglePlot(wiszRots, curves=wiszPCA$curves, simplePoints=TRUE)
# Theoretical aside: We're using rotation methods, but this PCA concept is
# actually intrinsic to the geometry of the space of orientations, so it is a
# legitimate orientation technique. We're not actually cheating.
# Anyway, here is the symmetrized version of that last plot.
oriEqualAnglePlot(wiszRots, group=oriLineInPlaneGroup, curves=wiszPCA$curves, simplePoints=TRUE)
### CONCLUSION ###
# For tightly concentrated orientation data, intrinsic orientation methods
# work exactly as intrinsic rotation methods do. Sometimes we cheat and use
# non-intrinsic rotation methods on orientations too.
|
188db04b5ff1b36d2cd0f5d546c70e2495898b43 | 4ce576791c7b3bc0154889143ea4f390417b6411 | /TextMiningfilesort_rscript/rhadoop_test_glm_20170105.R | 16a4839e947139896884f1b39ec7e558d78ced3e | [] | no_license | wagaman/MachineLearning | 847b3b83cc986b26499f31335363d21727e2711b | 1fac4f973a75d0207864087dfd01be684d79dc8e | refs/heads/master | 2021-01-25T12:49:46.417404 | 2017-12-21T12:30:45 | 2017-12-21T12:30:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,494 | r | rhadoop_test_glm_20170105.R |
###test1——glm论文测试,理解glm的概念和应用
install.packages("faraway")
library(faraway)
#载入数据集
data(pima,package="faraway")
#pima 数据集如下
#提取出数据集的
# test 变量
b<-factor(pima$test)
#test 变量值的分布范围为
# 0-1 分布
#构建模型,汇总信息存入变量
m
m<-glm(b~diastolic+bmi,family=binomial,data=pima) #q 其中变量
# diastolic 表示
# 舒张血压,变量 b
# mi 表示身体重量指数。
#汇总模型信息
>summary(m)
#结果如下:
# 由上图可知,d
# iastolic 的值为
# 0.805,
# bmi 的值为
# 1.95e-14,接近为
# 0,所以只有变
# 量 b
# mi 是显著的,对我们的模型有重要的影响,简化模型如下:
m.reduce<-glm(b~bmi,family=binomial,data=pima)
# 简化后的模型依然可以用 s
# ummary 查看汇总信息:
summary(m.reduce)
# 然后,用这个模型来计算一个中等体重指数(例如 B
# MI 的值为
# 35)的人糖尿病检
# 查为阳性的概率。
newdata<-data.frame(bmi=35)
# #调用
# predict 函数
predict(m.reduce,type="response",newdata=newdata)
# 返回结果如下:
###test2——glm测试用例
utils::data(anorexia, package = "MASS")
anorex.1 <- glm(Postwt ~ Prewt + Treat + offset(Prewt),
family = gaussian, data = anorexia)
summary(anorex.1)
anorexia
# Treat Prewt Postwt
# 1 Cont 80.7 80.2
# 2 Cont 89.4 80.1
# 3 Cont 91.8 86.4
# 4 Cont 74.0 86.3
anorex.1
|
f1d2649e84c4fe38bb1c32570ea4401a8c029db9 | 8a723b30751071eb6f7084523e961ed0efd1b11e | /LTEE_mutations/CellMorphologyMutations.R | 7d364efd8c107bb36bd8cfeb717f2b8e5f2df5b7 | [] | no_license | NkrumahG/LTEE-cell-size-shape | 1aa3e61e8b8999e534960838a855bbbc1bf80394 | d20572359e904f9b13db1e0bf3890036f828035a | refs/heads/main | 2023-06-16T08:39:27.110569 | 2021-07-14T17:36:20 | 2021-07-14T17:36:20 | 385,993,206 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,166 | r | CellMorphologyMutations.R | rm(list=ls())
library(dplyr)
library(ggplot2)
library(tidyverse)
library(grid)
library(reshape2)
dirname(rstudioapi::getActiveDocumentContext()$path) # Finds the directory where this script is located
setwd(dirname(rstudioapi::getActiveDocumentContext()$path)) # Sets the working directory to where the script is located
getwd()
PrimaryDirectory <- getwd()
PrimaryDirectory
ShinyData <- list.files(pattern = "*.csv")
Data <- as.data.frame(do.call(rbind,Map('cbind', lapply(ShinyData, read.csv)))) %>%
select(-starts_with("html"), -gene_name, -snp_type, -X)
RodMain <- Data %>% filter(gene_list %in% c("mreB", "mreC", "mreD", "mrdA", "mrdB"))
#View(RodMain)
#Arc <- Data %>% filter(grepl(pattern="arc", tolower(gene_list))) #related to anaerobic project
CloneA <- RodMain %>% filter(clone == "A")
#NonMutators <- CloneA %>% filter(mutator_status == "IS-mutator"| mutator_status == "non-mutator" )
##by_population <- CloneA %>% group_by(population) %>% arrange(population)
CloneA$population <- factor(CloneA$population, c("Ara-1","Ara-2","Ara-3","Ara-4","Ara-5","Ara-6","Ara+1","Ara+2","Ara+3","Ara+4","Ara+5","Ara+6"))
#Using true minus symbol
levels(CloneA$population) <- c("Ara−1","Ara−2","Ara−3","Ara−4","Ara−5","Ara−6","Ara+1","Ara+2","Ara+3","Ara+4","Ara+5","Ara+6")
# Manual levels
gene_list_table <- table(CloneA$gene_list)
gene_list_levels <- names(gene_list_table)[order(gene_list_table)]
CloneA$gene_list_2 <- factor(CloneA$gene_list, levels = gene_list_levels)
#gene_list_table <- table(NonMutators$gene_list)
#NonMutators$gene_list_2 <- factor(NonMutators$gene_list, levels = gene_list_levels)
#Change labels for facet
CloneA$time <- factor(CloneA$time)
levels(CloneA$time) <- c("Generation 2k", "Generation 10k", "Generation 50k")
#7.85 x 5.35
ggplot(CloneA, aes(x=population, y=gene_list, height = .5, width = 1)) +
geom_tile(aes(fill = mutation_category)) +
scale_x_discrete(drop = F)+
facet_grid (~time) +
theme_bw() +
scale_fill_discrete(name = "Mutation", labels = c("Indel","Nonsynonymous", "Synonymous")) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5)) +
theme(axis.title.x = element_blank(),axis.title.y = element_blank()) +
theme(legend.justification=c(0,1), legend.position=c(0.005,.995)) +
theme(axis.text.y = element_text(face = "italic", colour = "black", size = 12, margin = (margin(l = 5, r=5)))) +
theme(axis.title.y = element_blank()) +
theme(axis.ticks = element_blank())+
theme(legend.position = "bottom",
legend.margin = margin (10,0,0,100),
legend.box.margin = margin(-20,0,0,10))+
theme(axis.text.x = element_text(colour = "black", size = 12, margin = (margin(t = 5, b=5)))) +
theme(axis.title.x = element_text(size = 12)) +
theme(legend.text = element_text(size = 12)) +
theme(legend.title = element_text(size = 12)) +
theme(strip.text = element_text(size=12)) +
coord_fixed(ratio=1.5)
#binomial tests. Treating as two families, Mre and Mrd.
# binom.test(5,8,p = 0.40, alternative = "g") #p=0.1737
# binom.test(6,12,p=0.40, alternative = "g") #p=0.3348
# binom.test(8,20,p=0.40, alternative = "g") #p=0.5841
|
b76ebdde29e1bbe37be7ef64fb323e2086e6ef7f | 960d8eee1a4b3b968cd044b7c8cb3a27e9ca3757 | /jeerthiliza/mousehuman/analysis/analysis.R | c347b3ac889bafcee129f6103549b0485bdff765 | [] | no_license | shouguog/hematopoiesis | db4c5238a721724730fd8ce9cb9b27b8ba18c35d | 1c80665bb1ff1a63a18e97d21e5ba9786bc75d8b | refs/heads/master | 2023-01-14T09:29:47.941646 | 2023-01-07T17:52:53 | 2023-01-07T17:52:53 | 171,172,140 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,195 | r | analysis.R | setwd("/data/gaos2/tmp/mousehuman/analysis")
rm(list=ls())
library(Seurat)
load("humanmouse.RData")
human@meta.data<-human@meta.data[,c(1:3,8)] #keep only necessary column
mouse@meta.data<-mouse@meta.data[,c(1:3,13)] #keep only necessary column
# Set up human object
human@meta.data$speice <- "human"
human <- NormalizeData(human)
# Set up mouse object
mouse@meta.data$speice <- "mouse"
mouse <- NormalizeData(mouse)
# Gene selection for input to CCA
human <- FindVariableGenes(human, do.plot = F)
mouse <- FindVariableGenes(mouse, do.plot = F)
g.1 <- head(rownames(human@hvg.info), 1000)
g.2 <- head(rownames(mouse@hvg.info), 1000)
genes.use <- unique(c(g.1, g.2))
human <- ScaleData(human, display.progress = F, genes.use = genes.use)
mouse <- ScaleData(mouse, display.progress = F, genes.use = genes.use)
speices.combined <- RunCCA(human, mouse, genes.use = genes.use, num.cc = 30)
# visualize results of CCA plot CC1 versus CC2 and look at a violin plot
p1 <- DimPlot(object = speices.combined, reduction.use = "cca", group.by = "speice", pt.size = 0.5, do.return = TRUE)
p2 <- VlnPlot(object = speices.combined, features.plot = "CC1", group.by = "speice", do.return = TRUE)
plot_grid(p1, p2)
speices.combined <- AlignSubspace(speices.combined, reduction.type = "cca", grouping.var = "speice",dims.align = 1:20)
p1 <- VlnPlot(object = speices.combined, features.plot = "ACC1", group.by = "speice", do.return = TRUE)
p2 <- VlnPlot(object = speices.combined, features.plot = "ACC2", group.by = "speice", do.return = TRUE)
plot_grid(p1, p2)
rm(human.data, mouse.data)
save(list=ls(), file="humanmouse.aligned.RData")
# t-SNE and Clustering
speices.combined <- RunTSNE(speices.combined, reduction.use = "cca.aligned", dims.use = 1:20, do.fast = T)
speices.combined <- FindClusters(speices.combined, reduction.type = "cca.aligned", resolution = 0.6, dims.use = 1:20)
# Visualization
p1 <- TSNEPlot(speices.combined, do.return = T, pt.size = 0.5, group.by = "speice")
p2 <- TSNEPlot(speices.combined, do.label = T, do.return = T, pt.size = 0.5)
png("tSNE_group_mouse_human.png", width=2000, height=1000, res=100)
plot_grid(p1, p2)
dev.off()
save(list=ls(), file="humanmouse.aligned.2.RData")
|
8b0c062b1faa7692ff73390b1755daaff263a4f8 | 41648c813bb2dec678ba5b82a8a3a2dcee45cbaf | /R/test.r | 1b4b687ea0661c9dd7d8b5bf60beb8ba2790a777 | [] | no_license | GokulGeo/TaalStarPlot | ba8efeeb94ebe1e5f959efca63a59b2a2194fd2e | a48d3f1cc701f31fb473b7cc084f5c27bd828b24 | refs/heads/master | 2022-12-18T18:08:34.523244 | 2020-09-27T08:34:53 | 2020-09-27T08:34:53 | 298,985,730 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,543 | r | test.r | library(tidyverse)
library(readxl)
library(scales)
data <-read_excel("~/Desktop/Sentinel-1 Metadata info.xls")
centroid <- slice(data,1)
data %>%
ggplot(aes(`Date of acquisition`, `Bperp(m)`)) +
geom_point() +
geom_segment(aes(x = centroid$`Date of acquisition`, y = centroid$`Bperp(m)`,
xend = `Date of acquisition`, yend = `Bperp(m)`)) +
theme_minimal()
my_data_compressed <-data
reconstruct <- structure(list(`Date of acquisition` = structure(c(1580256000,
1581292800, 1582329600, 1579219200, 1577664000, 1575590400, 1576627200,
1578700800, 1579737600, 1581811200, 1580774400, 1582848000), class = c("POSIXct",
"POSIXt"), tzone = "UTC"), `Bperp(m)` = c(0, -23.22, 15.03, 8.85,
-26.13, 7.35, -31.04, 19.4, 12.44, -25.21, -6.45, 70.35)), row.names = c(NA,
-12L), class = c("tbl_df", "tbl", "data.frame")) |
59e09618657ad32a27b351b6ca532c834432d890 | 6e32987e92e9074939fea0d76f103b6a29df7f1f | /googleidentitytoolkitv2.auto/man/GoogleIamV1TestIamPermissionsResponse.Rd | ef52b35a2d75dfd893e1ed1da24e9f69e65744c0 | [] | no_license | justinjm/autoGoogleAPI | a8158acd9d5fa33eeafd9150079f66e7ae5f0668 | 6a26a543271916329606e5dbd42d11d8a1602aca | refs/heads/master | 2023-09-03T02:00:51.433755 | 2023-08-09T21:29:35 | 2023-08-09T21:29:35 | 183,957,898 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 695 | rd | GoogleIamV1TestIamPermissionsResponse.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/identitytoolkit_objects.R
\name{GoogleIamV1TestIamPermissionsResponse}
\alias{GoogleIamV1TestIamPermissionsResponse}
\title{GoogleIamV1TestIamPermissionsResponse Object}
\usage{
GoogleIamV1TestIamPermissionsResponse(permissions = NULL)
}
\arguments{
\item{permissions}{A subset of `TestPermissionsRequest}
}
\value{
GoogleIamV1TestIamPermissionsResponse object
}
\description{
GoogleIamV1TestIamPermissionsResponse Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Response message for \code{TestIamPermissions} method.
}
\concept{GoogleIamV1TestIamPermissionsResponse functions}
|
a9724224b5e8af792174a7da586832a766e945fe | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/QuantumClone/examples/multiplot_trees.Rd.R | da9d3269e7b1f693085f0f175318acc803fd6956 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 215 | r | multiplot_trees.Rd.R | library(QuantumClone)
### Name: multiplot_trees
### Title: Plots multiple trees
### Aliases: multiplot_trees
### Keywords: Clonal inference phylogeny
### ** Examples
multiplot_trees(QuantumClone::Tree, d= 4)
|
5d4d3ecf6d13413605bcbb9bdaff09fcf69fd2c1 | 514ff5c8a94ba290d9de186149271838275f728d | /man/geneCount.Rd | 2b84fd0bec93e7e08edf97e51526e6df1fe5b295 | [] | no_license | DingailuM/ribiosNGS | eac90a15346db52aebdc3298e950e4cb26199f66 | d2d69c044d89a3ccbcb05becf9b33901681e4694 | refs/heads/master | 2022-11-30T09:52:08.561350 | 2020-08-04T11:07:22 | 2020-08-04T11:07:22 | 284,919,686 | 0 | 0 | null | 2020-08-04T10:57:26 | 2020-08-04T08:18:28 | null | UTF-8 | R | false | true | 291 | rd | geneCount.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/edgeR-funcs.R
\name{geneCount}
\alias{geneCount}
\title{Return gene count}
\usage{
geneCount(edgeResult)
}
\arguments{
\item{edgeResult}{An EdgeResult object}
}
\value{
Integer
}
\description{
Return gene count
}
|
8235c63e36e4edaa3687fb2413e4d8a5d2636b7f | 0d2190a6efddb7167dee3569820724bfeed0e89c | /R3.0.2 Package Creation/PBTools/R/QTLDataPrep.R | 922a83d3d325b0d52270e510f08f8a6da82b45ed | [] | no_license | djnpisano/RScriptLibrary | 6e186f33458396aba9f4151bfee0a4517d233ae6 | 09ae2ac1824dfeeca8cdea62130f3c6d30cb492a | refs/heads/master | 2020-12-27T10:02:05.719000 | 2015-05-19T08:34:19 | 2015-05-19T08:34:19 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,924 | r | QTLDataPrep.R | ####################################################################
# QTLDataPrep
#' Function for preparing data file(s) for QTL analysis
#
# Parameters:
#' @param P_data name of phenotypic data set (R data format)
#' @param G_data genotypic data set
#' @param M_data map data set
#' @param P_geno name of genotype variable in P_data
#'
#' @return list with the elements G_diffGid, P_diffGid, M_diffMid, G_diffMid, isNewPhenoCreated, isNewMapCreated, isNewGenoCreated
# where:
# G_diffGid - list of genotypes in G_data w/c are not in P_data
# P_diffGid - list of genotypes in P_data w/c are not in G_data
# M_diffMid - list of markers in M_data w/c are not in G_data
# G_diffMid - list of markers in G_data w/c are not in M_data
# isNewPhenoCreated - logical; whether a new phenotype file is created
# isNewMapCreated - logical; whether a new map file is created
# isNewGenoCreated - logical; whether a new genotype file is created
#
#' @author Author: Rose Imee Zhella Morantte
#-------------------------------------------------
QTLDataPrep <- function(P_data, G_data, M_data, P_geno) UseMethod("QTLDataPrep")
QTLDataPrep.default <- function(P_data, G_data, M_data, P_geno) {
#trim the strings of genotype and Markers ID
P_data[,match(P_geno, names(P_data))] <- trimStrings(as.matrix(P_data[match(P_geno, names(P_data))]))
G_data[,1] <- trimStrings(G_data[,1])
M_data[,1] <- trimStrings(M_data[,1])
###################################
#P_data vs G_data
#get genotype and marker "variable" in the data sets
colnames(G_data)[1] <- P_geno
P_gid <- unique(P_data[,match(P_geno,names(P_data))])
M_mid <- M_data[1]
colnames(M_mid) <- c("1")
G_mid <- colnames(G_data)[-1] #G_dataMat[1,]
G_midt <- as.data.frame(G_mid) #t(G_dataMat)[,1])
rownames(G_midt) <- NULL
G_gidt <- data.frame(G_data[,1]) #G_gidt <-data.frame(I(G_data[,1]))# G_gidt <- as.data.frame(t(G_dataMat)[1,])
colnames(G_gidt) <- P_geno #"G.id" #replaced "V1"
##check if there are genotypes in G_data w/c are not in P_data; for displaying (if any)
G_diffGid <- as.character(as.matrix(setdiff(paste(G_gidt[,1]), P_data[,match(P_geno, names(P_data))])))
G_diffGidNoSpace<-G_diffGid[which(G_diffGid!="")]
##check if there are genotypes in P_data w/c are not in G_data; for displaying (if any)
P_diffGid <- as.character(as.matrix(setdiff(P_data[,match(P_geno, names(P_data))], paste(G_gidt[,1]))))
P_diffGidNoSpace <- P_diffGid[which(P_diffGid!="")]
##reduce (if needed) P_data, sort genotypes as in G_data
P_dataRed <- merge(P_data, G_gidt, by = P_geno, sort = FALSE)
##reduce (if needed) G_data
G_dataRed <- merge(G_data, P_gid, by = P_geno, sort = FALSE)
isNewPhenoCreated<-FALSE
if (length(P_diffGidNoSpace)!=0) {
##save new P_data as csv file
write.table(P_dataRed,file=paste(getwd(),"/newPhenoData.csv", sep=""), quote = FALSE, sep = ",", row.names = FALSE, col.names=TRUE)
isNewPhenoCreated<-TRUE
}
###################################
#G_data vs M_data
colnames(M_data) <- c("V1","V2_1","V3_1")
G_datat <- as.data.frame(t(G_dataRed)) # t(G_dataRed) ###no -1 row?
G_datat <- cbind(G_datat, rownames(G_datat))
ncolGdatat <- dim(G_datat)[2]
colnames(G_datat)[ncolGdatat] <- "mID"
##check if there are markers in M_data w/c are not in G_data; for displaying (if any)
M_diffMid <- as.character(as.matrix(setdiff(M_data[,1], G_datat[,"mID"])))
M_diffMidNoSpace<-M_diffMid[which(M_diffMid!="")]
#reduce, if needed, M_data
M_dataRed <- merge(M_data, G_midt, by.x = "V1", by.y = names(G_midt)[1], sort = FALSE)
##check if there are markers in G_data w/c are not in M_data; for displaying (if any)
G_diffMid<-as.character(as.matrix(setdiff(G_datat[-1,"mID"], M_data[,1])))
G_diffMidNoSpace<-G_diffMid[which(G_diffMid!="")]
#reduce G_data
G_dataRed <- merge(M_mid, G_datat[-1,], by.x = "1", by.y = "mID", sort = FALSE)
G_dataRed2 <- as.data.frame(t(G_dataRed))
rownames(G_dataRed2)[1] <- P_geno
rownames(G_dataRed2)[2:ncolGdatat] <- t(G_datat[1,c(1:ncolGdatat-1)])
isNewMapCreated<-FALSE
if (length(M_diffMidNoSpace)!=0) {
##save new M_data
write.table(M_dataRed,file=paste(getwd(), "/newMapData.txt", sep=""), quote = FALSE, sep = "\t", row.names = FALSE, col.names=FALSE)
isNewMapCreated<-TRUE
}
isNewGenoCreated<-FALSE
if (length(G_diffMidNoSpace)!=0 || length(G_diffGidNoSpace)!=0) {
#save new G_data
write.table(G_dataRed2,file=paste(getwd(), "/newGenoData.txt", sep=""), quote = FALSE, sep = "\t", row.names = FALSE, col.names=FALSE)
isNewGenoCreated<-TRUE
}
return(list(G_diffGid = G_diffGid,
P_diffGid = P_diffGid,
M_diffMid = M_diffMid,
G_diffMid = G_diffMid,
isNewPhenoCreated =isNewPhenoCreated,
isNewMapCreated =isNewMapCreated,
isNewGenoCreated =isNewGenoCreated))
} |
428f64de6be29030fa41504bbbe37be14c1186bf | 08c6e8b8087244561878460b2b1abe8ecc83bd2c | /herrstein RL smoothed.R | c277860b078c3d964b6e6f094fb256441e0f7d30 | [] | no_license | KasiaO/SemanticsGames | d298169bc5c047a4da06baad52c9ecc7016d852d | 7d3aa33ee5cc90994dd5c586da6c2ec3869644fa | refs/heads/master | 2021-01-12T03:00:46.048017 | 2017-08-28T17:30:28 | 2017-08-28T17:30:28 | 78,149,456 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,087 | r | herrstein RL smoothed.R | # source main methods and functions
source('base.R')
#####
## learning agents - basic Herrnstein RL, smoothed memory
#####
hrlLearner <- setRefClass(
"hrlLearner",
fields = list(
urns = "list",
delta = "numeric"
),
contains = "Agent",
methods = list(
initUrns = function(figures, dict) {
urns <- list()
for(i in 1:length(figures)) {
# initialize equal propensities
# figure is stored in the first position of the list (cannot be a name)
urns[[i]] <- c(figures[[i]], rep(1/length(dict), length = length(dict)))
names(urns[[i]]) <- c("figure", dict)
}
return(urns)
},
findUrn = function(figure) {
for(i in 1:length(urns)) {
if(identical(urns[[i]][[1]], figure)) {
return(i)
}
}
},
updateUrns = function(figure, communicate, point) {
# no penalty
urns[[findUrn(figure)]][[communicate]] <<- ({
urns[[findUrn(figure)]][[communicate]]*(1-delta) + delta*point
})
},
updateSplit = function(figure, communicate, point) {
updateUrns(figure, communicate, point)
split <- list()
for(i in 1:length(urns)) {
urn <- unlist(urns[[i]][-1])
figure <- urns[[i]][[1]]
probs <- urn/sum(urn)
drawn <- sample(x = dict, size = 1, prob = probs)[[1]]
split[[drawn]] <- c(split[[drawn]], figure)
}
return(split)
}
)
)
# override setEnvironment
setEnvironment <- function(figDims, dict) {
# input:
# figDims - list - values for each dimension of the figure description (col, size, shape)
# check configuration
stopifnot(c("color", "size", "shape") %in% names(figDims))
# initialize figures
combs <- expand.grid(figDims$color, figDims$size, figDims$shape)
colnames(combs) <- c("color", "size", "shape")
figures <- c()
for(i in 1:nrow(combs)) {
set <- combs[i,]
newFig <- Figure$new(color = set$color, size = set$size, shape = set$shape)
figures <- c(figures, newFig)
}
# initizalize agents
player1 <- hrlLearner$new(
split = list(),
score = 0,
urns = list(),
delta = 0.7
)
player1$split <- player1$makeSplit(figures, dict)
player1$urns <- player1$initUrns(figures, dict)
player2 <- hrlLearner$new(
split = list(),
score = 0,
urns = list(),
delta = 0.4
)
player2$split <- player2$makeSplit(figures, dict)
player2$urns <- player2$initUrns(figures, dict)
env <- list()
env$figures <- figures
env$player1 <- player1
env$player2 <- player2
env$dict <- dict
return(env)
}
#####
## run experiment
#####
figDims <- list(
"color" = c("white", "red"),
"size" = c("small", "big"),
"shape" = c("square", "triangle")
)
dict <- c("A", "B")
res <- playGame(500, figDims, dict, 1)
plotRes(res)
######
## run simulation
######
sim <- runSimulation(10, 500, figDims, dict, 0)
plotRes(sim)
|
5bbdfa06a85ef130b96d565aba8ad5dc4b900ea0 | a978ee2ce4d399cdd35a9407a04ce874d674505a | /R/DataPrep.R | a108192a29fcdbf50ee28440083477cb7c2c07a6 | [] | no_license | shambam/cellexalvrR | 1f1f8c5ed3653fa3105405d6d3ff3194c22f548c | cbfd62b99a52fcbf91033478f1a91ffd5289b785 | refs/heads/master | 2023-04-06T23:21:06.365196 | 2018-05-03T14:26:56 | 2018-05-03T14:26:56 | 95,549,018 | 3 | 1 | null | 2021-06-16T11:48:04 | 2017-06-27T10:57:58 | R | UTF-8 | R | false | false | 635 | r | DataPrep.R | #'Creates a meta cell matrix from a supplied dataframe from required fields
#'@param metad A dataframe of per cell metadata
#'@param rq.fields A vector of name specifiying which columns should me made into metadata
#'@keywords metadata cell
#'@export make.cell.meta.from.df
make.cell.meta.from.df <- function(metad,rq.fields){
meta4cellexalvr <- NULL
for(i in 1:length(rq.fields)){
tmp.met <- to.matrix(metad[,rq.fields[i]],unique(metad[,rq.fields[i]]) )
colnames(tmp.met) <- paste(rq.fields[i],colnames(tmp.met),sep=".")
meta4cellexalvr <- cbind(meta4cellexalvr,tmp.met)
}
meta4cellexalvr
}
|
ba1dce12151e97a4edd5ac98b35da95de2f2aa98 | e24c2715cee33c751b6cb325ef8692c037a94846 | /R/prostate.R | e84c6b1817b26457be19f98606b4b47f01870267 | [] | no_license | cran/MultNonParam | 993e9551b760fcb593a1c25f1bfefc684154bf2e | 71f6fb719bb000f3ae1dc4ac6e16f4b4a16bdefe | refs/heads/master | 2022-12-07T12:08:00.225538 | 2022-11-30T17:20:07 | 2022-11-30T17:20:07 | 23,304,602 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,052 | r | prostate.R | #' @title prostate
#' @name prostate
#' @description 221 prostate cancer patients are collected in this data set.
#' @docType data
#' @format
#' \itemize{
#' \item hosp : Hospital in which the patient is hospitalized.
#' \item stage : stage of the cancer.
#' \item gleason score : used to help evaluate the prognosis of the cancer.
#' \item psa : prostate-specific antigen.
#' \item age : age of the patient.
#' \item advanced : boolean. \code{TRUE} if the cancer is advanced.
#'}
#' @references
#' A. V. D'Amico, R. Whittington, S. B. Malkowicz, D. Schultz, K. Blank, G. A. Broderick, J. E. Tomaszewski, A. A. Renshaw, I. Kaplan, C. J. Beard, A. Wein (1998) , \emph{Biochemical outcome after radical prostatectomy, external beam radiation therapy, or interstitial radiation therapy for clinically localized prostate cancer}, JAMA : the journal of the American Medical Association 280 969-74.
#'
#' @examples
#' data(prostate)
#' attach(prostate)
#' plot(age,psa,main="Age and PSA",sub="Prostate Cancer Data",
#' xlab="Age (years)",ylab="PSA")
NULL
|
39696fd4cdcb8648d39a5582822d364fe9a84c3b | 3e740bfc1105cc42db18ed2f894568d068608d02 | /server.R | c5654887e261f354216d0ff34415e106713a0286 | [] | no_license | MPacho/NextWordPrediction | c5f48581619db27e3c60a834c841d078e882e6fd | 981e0c6b92e27a995c324bbc1ea3e04bdeb75360 | refs/heads/master | 2021-05-06T14:34:06.386079 | 2017-12-06T23:55:06 | 2017-12-06T23:55:06 | 113,378,012 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 487 | r | server.R |
library(shiny)
source("04_stupid_backoff_model.R")
shinyServer(function(input, output) {
text <- reactive({
input$typeText
})
prediction <- reactive({
if (input$typeText == "")
c("","","")
else
predictStupidBackoff(text())
})
output$wordPrediction1 <- renderText({
prediction()[1]
})
output$wordPrediction2 <- renderText({
prediction()[2]
})
output$wordPrediction3 <- renderText({
prediction()[3]
})
})
|
d4cf15cc7d2c9215fb0337bb2b337aadf99686f7 | 8175788715e6344aeaa0ab4f82612e4245b03957 | /file1.R | 192db30d0ec47d1ec312ba7af2d6aea2de26385b | [] | no_license | rohilagarwal/analytics1 | 2aceb92dd50a528cf003b4713ce33fb040d30eb1 | c7b19022b8b5084a37b55576cb157cbe4c87e33f | refs/heads/master | 2020-04-08T01:56:38.584340 | 2018-12-09T10:13:44 | 2018-12-09T10:13:44 | 158,915,523 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 580 | r | file1.R | x=c(1:10)
(x)
x=c(2,5,3,6,7,10)
(x)
x[2]
sort(x)
x[-4]
x=sort(x)
(x)
plot(density(x))
plot(hist(x))
x = rnorm(100)
(x)
plot(hist(x))
plot(hist(x), freq = F)
(x)
plot(hist(x))
#matrix
100:111
length(100:111)
matrix(1,ncol = 3, nrow = 4)
matrix(100:111, nrow = 4, byrow = T)
(m1 = matrix(100:111, nrow = 4))
(m2 = matrix(100:111, ncol = 3, byrow = T))
class(m1)
attributes(m1)
dim(m1)
m1
m1[1]
m1[1];m1[1]
m1
m1[1,]
m1[,1]
m1[,1, drop = F]
m1[c(1,3),]
m1[(1,3),]
paste('c', 'd', sep = '-')
colnames(m1) = paste('c', 1:3, sep = '')
m1
m1[,c('c1','c3')]
#matrix----
#array----
|
0e325c90c4de28192676540e435ed733ee10b68c | 92b41bf11e58d671a1fab6224a9b17afb2b5050e | /sac1.R | 8a49128d8a4de93acbbd23d7c4bc564dfd107429 | [] | no_license | MaggieFang/MarketSegmentation-CourseADBI | 6fe081cfc7aaef039c1dfdba13fb2ffa2d54822e | 4e09a9a63dd205e0b5d3ae14c5c31745b5093777 | refs/heads/master | 2020-04-28T02:34:11.581404 | 2019-03-11T01:33:34 | 2019-03-11T01:33:34 | 174,903,652 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,690 | r | sac1.R | library(igraph)
library(lsa)
# read data
folder <- "/Users/xfang7/Google Drive/Courses/CSC591-603/hw/MarketSegmentation/"
graph = read_graph(paste(folder,"fb_caltech_small_edgelist.txt",sep = ''),format =c("edgelist"))
attribute_data <- read.csv(paste(folder,"fb_caltech_small_attrlist.csv", sep=''),header = T)
# funciton to compute Gain of modularity attribute
get_delta_attr <- function(attrList, h, membership, values)
{
indices <- which(values == membership)
similar <- 0
for(i in indices)
{
similar <- similar + cosine(as.numeric(attrList[h,]), as.numeric(attrList[i,]))
}
similar <- similar/length(indices)
}
#implement phase 1 of Sac1 algorithm
phase1 <- function(graph,attributes, mapped_communities, alpha){
#limit maximum of 15 iterations
for(it in 1:15)
{
x <- mapped_communities
for(i in 1:vcount(graph))
{
# index: store the index of vertex with max deltaQ
index <- 0
# maxQ:store the maximum of DeltaW
maxQ <- 0
# neighbors of vertex i
n <- neighbors(graph, i)
for(j in unique(mapped_communities[n]))
{
tmp <- mapped_communities
old_modularity <- modularity(graph,tmp)
#Remove i from its community, place to j’s community
tmp[i] <- j
new_modularity <- modularity(graph,tmp)
#Compute the composite modularity gain
delta_Q_newman <- new_modularity - old_modularity
delta_Q_attr <- get_delta_attr(attributes, i, j, mapped_communities)
delta_Q <- (1-alpha)*delta_Q_attr + (alpha)*delta_Q_newman
# update the vertex index and corresponding max deltaQ
if(i!=j && delta_Q > maxQ){
index <- j
maxQ <- delta_Q
}
}
# move i to j's comunity, where j with maximum positive gain (if exists)
if(index !=0){
mapped_communities[i] <- index
}
}
# if no further improvement in modularity,break
if(isTRUE(all.equal(x, mapped_communities)))
{
break
}
x <- mapped_communities
}
mapped_communities
}
#Phase2 of sac1 algorithm
phase2 <- function(graph,attributes, mapped_communities, alpha){
x <- mapped_communities
for(i in 1:15)
{
combined_graph <- contract.vertices(graph, mapped_communities)
new_graph <- simplify(combined_graph, remove.multiple = TRUE, remove.loops = TRUE)
#reapply phase1
mapped_communities <- phase1(new_graph, attributes,mapped_communities, alpha)
# no futher improvement,break
if(isTRUE(all.equal(x, mapped_communities)))
{
break
}
x <- mapped_communities
}
mapped_communities
}
#sac1 algorithm
sac1 <- function(alpha, attributes = attribute_data){
r1 <- phase1(graph, attributes,alpha=alpha, mapped_communities = c(1:324))
communities <- phase2(graph, attributes,alpha=alpha, mapped_communities = r1)
return(communities)
}
# save result to file
save_file <- function(communities, alpha){
if(alpha == 0.5){
alpha = 5
}
file_name<-paste("communities",alpha,sep="_")
file_name<-paste(file_name,"txt",sep=".")
f<-file(file_name,"w")
for(i in 1:length(unique(communities)))
{
community <- vector("numeric")
for(j in 1:324)
{
if(communities[j]==unique(communities)[i]){
community <- append(community, j-1, after = length(community))
}
}
cat(as.character(community), file=f, sep = ",")
cat("\n", file=f)
}
close(f)
}
# read parameter(alpha) from command line
args <- commandArgs(trailingOnly = TRUE)
alpha = as.numeric(args[1])
# run sac1 algorithm and save result to file
result <- sac1(alpha = alpha)
save_file(result, alpha = alpha)
|
1dbbfce0489400e77194cce7f4de2a6200b36bc5 | 78858583954c6cba8490d9e629f56b2f67b4578b | /ui.R | 1bed5d2ec5bc19fea14ebbbc6f7de517fdfa4e81 | [] | no_license | VictimOfMaths/COVID_LA_Plots | 9ed06f02f27ad7f3e017f85891cdac3468c77c33 | 4d82fdb58391abba0f5a32b31e5315cff90cad6a | refs/heads/master | 2023-08-22T17:36:08.895032 | 2021-10-08T10:14:37 | 2021-10-08T10:14:37 | 279,406,562 | 6 | 1 | null | 2020-08-30T23:03:35 | 2020-07-13T20:37:19 | HTML | UTF-8 | R | false | false | 1,704 | r | ui.R | library(shiny)
library(lubridate)
#Remove blue fill from date slider
ui <- fluidPage(
tags$head(tags$style(HTML('* {font-family: "Lato"};'))),
tags$style(
".irs-bar {",
" border-color: transparent;",
" background-color: transparent;",
"}",
".irs-bar-edge {",
" border-color: transparent;",
" background-color: transparent;",
"}"
),
titlePanel("Visualising age patterns in English Local Authority COVID-19 case data"),
sidebarPanel(
selectInput('LA', 'Select Area',
c("England", "East of England", "East Midlands", "London", "North East",
"North West", "South East", "South West", "West Midlands",
"Yorkshire and The Humber",
sort(as.character(unique(shortdata$areaName[shortdata$areaType=="ltla"])))),
multiple=FALSE, selected="England"),
selectInput('plottype', 'Select plot', c("Heatmap of case numbers"=1,
"Heatmap of case rates"=2,
"Line chart of case rates (detailed ages)"=3,
"Line chart of case rates (broad ages)"=4,
"Streamgraph of case numbers"=5)),
sliderInput('StartDate', 'Select start date for plot', min=min(shortdata$date)+days(3),
max=max(shortdata$date)-days(4), value=as.Date("2020-08-01")),
radioButtons('scale', "Select y-axis scale for line charts", choices=c("Linear", "Log"), inline=TRUE),
checkboxInput('fix', "Select to fix y-axis scales to be the same for all plots", FALSE)),
mainPanel(
plotOutput('plot')
)
) |
9e238474ddc4e33bc3836ef2c871f7de901de400 | 0c1c9fce8a615de52819f530edce2a9a83824d99 | /R/fbGetUserAdAccounts.R | 3d73f87f8ed08da1c58336baa55654b2af4635cd | [] | no_license | IgorZakrevskiy/rfacebookstat_ks | 2ce059f656d5d5c43d2a54359dcc86c3ed3efde6 | 35d98bd741e4de0d000e3e56792f5ad0e6579d0b | refs/heads/master | 2023-05-08T04:38:21.895060 | 2021-06-04T09:30:11 | 2021-06-04T09:30:11 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,468 | r | fbGetUserAdAccounts.R | fbGetUserAdAccounts <- function(user_id = "me",
api_version = getOption("rfacebookstat.api_version"),
username = getOption("rfacebookstat.username"),
token_path = fbTokenPath(),
access_token = getOption("rfacebookstat.access_token")) {
# auth
if ( is.null(access_token) ) {
if ( Sys.getenv("RFB_API_TOKEN") != "" ) {
access_token <- Sys.getenv("RFB_API_TOKEN")
} else {
access_token <- fbAuth(username = username,
token_path = token_path)$access_token
}
}
if ( class(access_token) == "fb_access_token" ) {
access_token <- access_token$access_token
}
# attributes
rq_ids <- list()
out_headers <- list()
#Create result data frame
result <- tibble()
link <- paste0("https://graph.facebook.com/", api_version,"/", user_id, "/adaccounts", "?fields=id,name,account_id,account_status,amount_spent,balance,business_name,currency,owner&limit=5000&access_token=", access_token)
answer <- GET(link)
# attr
rq_ids <- append(rq_ids, setNames(list(status_code(answer)), answer$headers$`x-fb-trace-id`))
out_headers <- append(out_headers, setNames(list(headers(answer)), answer$headers$`x-fb-trace-id`))
user_account <- content(answer)
if ( !is.null(user_account$error) ) {
stop(user_account$error$message)
}
result <- bind_rows(user_account$data)
#Paging
while ( !is.null(user_account$paging$`next`) ) {
link <- user_account$paging$`next`
answer <- GET(link)
# attr
rq_ids <- append(rq_ids, setNames(list(status_code(answer)), answer$headers$`x-fb-trace-id`))
out_headers <- append(out_headers, setNames(list(headers(answer)), answer$headers$`x-fb-trace-id`))
user_account <- content(answer)
if ( !is.null(user_account$error) ) {
stop(user_account$error$message)
}
result <- bind_rows(user_account$data)
}
# set attributes
attr(result, "request_ids") <- rq_ids
attr(result, "headers") <- out_headers
return(result)
}
|
784b769a8f8c982cfc6629842a396583e7dadcd6 | 994f419867322d603f47d9a5e0d10e4147973ba1 | /R/TSPred-package.R | 24d5a9d668b560c651f18856cf95a2a28cf952bb | [] | no_license | cran/TSPred | 3d6bdb853b92c02a387e5898f886b192603bbe43 | 01e9a907bb272f6e61f4bf1a2a0617ee6bbb9d73 | refs/heads/master | 2021-06-04T20:14:18.278665 | 2021-01-21T10:30:03 | 2021-01-21T10:30:03 | 33,107,476 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 660 | r | TSPred-package.R | #' Functions for Benchmarking Time Series Prediction
#'
#' Functions for time series pre(post)processing, decomposition, modelling, prediction and accuracy assessment. The generated models and its yielded prediction errors can be used for benchmarking other time series prediction methods and for creating a demand for the refinement of such methods. For this purpose, benchmark data from prediction competitions may be used.
#'
#' @docType package
#' @name TSPred-package
#' @author Rebecca Pontes Salles
#'
#' Maintainer: rebeccapsalles@acm.org
#' @keywords package
NULL # Instead of "_PACKAGE" to remove inclusion of \alias{forecast}
# "_PACKAGE" |
ef61a34950258f56781aab47c119f476240f1ead | dfa8d36c361dae6df3f037019205c5415bd483e8 | /cachematrix.R | 99163f7b52cf11198351145e80e092bcf919cea2 | [] | no_license | olusal/ProgrammingAssignment2 | 481435fe14a1f9783dc07de9e0246a54ce4b8ef6 | 98f3df581d0007644d49440172a108451fda140b | refs/heads/master | 2021-01-18T08:56:19.707695 | 2015-01-25T21:38:12 | 2015-01-25T21:38:12 | 29,826,895 | 0 | 0 | null | 2015-01-25T19:15:04 | 2015-01-25T19:15:04 | null | UTF-8 | R | false | false | 989 | r | cachematrix.R | ## Put comments here that give an overall description of what your
## functions do
## makeCacheMatrix creates a matrix using four other functions (set,get (values of matrix)
## setinverse, and getinverse (set/get inverse of matrix))
makeCacheMatrix <- function(x = matrix()) {
m<-NULL
set<-function(y){
x<<-y
m<<-NULL
}
get<-function() x
setinverse<-function(solve) m<<-solve
getinverse<-function() m
list(set=set,get=get,
setinverse=setinverse,
getinverse=getinverse)
}
## cacheSolve checks to see if an inverse of a matrix has already
## been calculated previously and gets the result(inverse of the matrix)
## before any computation is made
cacheSolve <- function(x, ...) {
m<-x$getinverse()
if(!is.null(m)){
message("getting cached data")
return (m)
}
data<- x$get()
m<-solve(data,...)
x$setinverse(m)
m
}
|
e14acfbc5ea1694134554190a7a11761332b4e31 | 799e535f5be2ea8e9b238b092bd4c1fbffd56841 | /programs/Supplementary_analyses/table_simulations_under_H0.R | cf6b8a368784916d419bee3c0961df969a7b3319 | [] | no_license | andreaarfe/Bayesian-optimal-tests-non-PH | f16ff152d30c0bd075080af0a44ef6fb839c0d5f | fd8cc488b07f770ce11b184c76a6cdfbe899d67a | refs/heads/master | 2021-07-14T12:31:29.632474 | 2020-08-21T14:13:34 | 2020-08-21T14:13:34 | 167,343,902 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 442 | r | table_simulations_under_H0.R |
library(xtable)
library(tidyverse)
load('./datasets/Supplementary_simulations/sim_no_trt_eff.Rdata')
sims <- as.data.frame(t(out)) %>% gather(key=pval)
tab <- sims %>%
group_by(pval) %>%
summarise(alpha = mean(value<=0.05),
N = n()) %>%
mutate(SE = sqrt(alpha*(1-alpha)/N),
LCL95 = alpha - 1.96*SE,
UCL95 = alpha + 1.96*SE) %>%
select(pval, alpha, LCL95, UCL95) %>%
xtable(digits=3)
print(tab, include.rownames=FALSE)
|
fe484fd535c878d557fee69bdb4849617dd1334d | 1c5cb67f169bca8dcdd5bc38a358fb45ea532305 | /cachematrix.R | fbe36f6da6cf1b40e90b716d8edb533f8840bbb6 | [] | no_license | abhinavgoel95/ProgrammingAssignment2 | 280704602e9d75ddd04f57bd6f8e1b25292ef3c0 | 75e30b9191fc1fce2d82732618a19a6c43e77ecf | refs/heads/master | 2021-01-15T23:53:43.667176 | 2016-05-30T11:21:40 | 2016-05-30T11:21:40 | 60,002,587 | 2 | 0 | null | 2016-05-30T10:46:29 | 2016-05-30T10:46:29 | null | UTF-8 | R | false | false | 1,316 | r | cachematrix.R | ## Put comments here that give an overall description of what your
## functions do
##The functions are used to depict the use of Lexical Scoping in R.
## Write a short comment describing this function
##makeCacheMatrix is a function which returns a list of functions.
##These functions are: set, get, setinv and getinv
makeCacheMatrix <- function(x = matrix()) {
matinv <- NULL
set <- function(y)
{
x <<- y
matinv <<- NULL
}
get <- function() x #Retrieve the matrix
setinv <- function(inv) matinv <<- inv #Store the inverse in cache
getinv <- function() matinv #Retirive the inverse from cache.
list(set = set, get = get , setinv = setinv, getinv = getinv)
#Return a list of functions
}
## Write a short comment describing this function
##cacheSolve is for finding the inverse of the matrix (defined in x$set()).
##It will check if the inverse exits in the cache. If not it will find the inverse.
cacheSolve <- function(x, ...) {
inv <- x$getinv() #Read inverse from cache
if(! is.null(inv)) #Check if inverse exists in cache
{
print("Available in cache");
matinv <<- inv
return(matinv)
}
mat <- x$get() #Obtain the matrix
inv <- solve(mat,...) #Find the inverse
x$setinv(inv) #Store inverse in cache
inv #return the inverse
}
|
35d6b35e1e36998ddf9114f5f8c0ccf6ff52864c | 7e5e5139f817c4f4729c019b9270eb95978feb39 | /Intermediate R/Chapter 2-Loops/8.R | e25d0d99c4e277cb1ec8e7fdfe1f9ffe8fc91a03 | [] | no_license | Pranav-Polavarapu/Datacamp-Data-Scientist-with-R-Track- | a45594a8a9078076fe90076f675ec509ae694761 | a50740cb3545c3d03f19fc79930cb895b33af7c4 | refs/heads/main | 2023-05-08T19:45:46.830676 | 2021-05-31T03:30:08 | 2021-05-31T03:30:08 | 366,929,815 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 933 | r | 8.R | # Mix it up with control flow
# Let's return to the LinkedIn profile views data, stored in a vector linkedin. In the first exercise on for loops you already did a simple printout of each element in this vector. A little more in-depth interpretation of this data wouldn't hurt, right? Time to throw in some conditionals! As with the while loop, you can use the if and else statements inside the for loop.
#
# Instructions
# 100 XP
# Add code to the for loop that loops over the elements of the linkedin vector:
#
# If the vector element's value exceeds 10, print out "You're popular!".
# If the vector element's value does not exceed 10, print out "Be more visible!"
# The linkedin vector has already been defined for you
linkedin <- c(16, 9, 13, 5, 2, 17, 14)
# Code the for loop with conditionals
for (li in linkedin) {
if (li > 10) {
print("You're popular!")
} else {
print("Be more visible!")
}
print(li)
} |
500eb3f5851ad3a18c3d5eee4c27561c41813368 | 454cd9e49518d421264943fddf65bd4130b31eb0 | /BLRM-Randomised-AGILE-SIMULATIONS.R | 1c4349c8b7ac94195575b70c227ccb386acf63d4 | [] | no_license | dose-finding/agile-implement | 994e6307d130ab5877e916e1c5100ca09376b824 | aaeecfdcbdff8d319f64903608430999b3f8b42f | refs/heads/main | 2023-04-06T06:27:32.109276 | 2021-04-16T12:15:40 | 2021-04-16T12:15:40 | 358,584,971 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,741 | r | BLRM-Randomised-AGILE-SIMULATIONS.R | # Defining the MCMC model
library("rjags")
model1.string <-"
model {
for (i in 1:m){
logit(p[i]) <- alpha0[1] + alpha1[1] * sdose[i]
s[i] ~ dbin(p[i], n[i])
}
theta[1:2] ~ dmnorm(priorMean[1:2],
priorPrec[1:2,
1:2])
## extract actual coefficients
alpha0<- theta[1]
alpha1 <- exp(theta[2])
}
"
model1.spec<-textConnection(model1.string)
# Defining function to find standarditised doses for the given skeleton ptox and parameters alpha
find.x <- function(ptox, alpha ) {
alpha<-matrix(alpha,ncol=2)
x <- (qlogis(ptox)-(alpha[,1]))/alpha[,2]
return( x )
}
# Defining doses (1 stands for SoC and 2:5 stand for experimental doses)
doses<-c(1,2,3,4,5)
# SoC
D<-doses[1]
# Defining Scenarios in Table 2
true<-c(0.10,0.30,0.45,0.60,0.70) # Sc 1
# true<-c(0.10,0.15,0.30,0.45,0.60) # Sc 2
# true<-c(0.10,0.12,0.15,0.30,0.45) # Sc 3
# true<-c(0.10,0.11,0.12,0.15,0.30) # Sc 4
# Number of MCMC Samples used to approximate the posterior distribution
iter<-10000
# Number of Simulations used to produce OC
nsims<-2000
# Cohort size for experimental group
cohort<-4
# Cohort size for SoC/Control group
cohort.control<-2
# Total number of patients
N<-30
# Starting dose
firstdose<-2
# Target increase in the toxicity (over the control)
target.increase<-0.20
# Half-width of the tolerance interval around target.increase
delta<-0.05
# Prior Probability of AE at the SoC
p0.control<-0.10
# Overdosing Threshold
overdose<-0.25
# Calibrated prior parameters
var1<-1.10
var2<-0.30
slope<-(-0.05)
spacing<-0.075
#Defining Skeleton and standartised dose levels corresponding to this skeleton
p.tox0<-c(p0.control,p0.control + spacing* seq(1,length(doses)-1)) # finding the skeleton
priorMean<-c(log(p0.control/(1-p0.control)),slope)
priorVar<-matrix(c(var1,0.0,0.0,var2),2,2)
priorPrec<-solve(priorVar)
alpha.prior.plug<-c(priorMean[1],exp(priorMean[2]+diag(priorVar)[2]/2))
sdose<-find.x(p.tox0,alpha=alpha.prior.plug) # standartised dose levels
# Defining matrices to store the results
ss<-mat.or.vec(nsims,1)
selection<-mat.or.vec(nsims,length(doses))
p<-mat.or.vec(iter,length(doses))
# Running Simulations
for (z in 1:nsims){
nextdose<-firstdose
counter<-0
stop<-0
n<-rep(0,length(doses))
s<-rep(0,length(doses))
while(sum(n)<N){
n[1]<-n[1]+cohort.control
n[nextdose]<-n[nextdose]+cohort
#Assigning the patients and evaluating DLTs
s[1]<-s[1]+sum(rbinom(cohort.control,1,true[1]))
s[nextdose]<-s[nextdose]+sum(rbinom(cohort,1,true[nextdose]))
#Fitting the Bayesian model
model1.spec<-textConnection(model1.string)
mydata <- list(n=n,s=s,m=length(doses),sdose=sdose,priorMean=priorMean,priorPrec=priorPrec)
jags <- jags.model(model1.spec,data =mydata,n.chains=1,n.adapt=iter,quiet=TRUE)
update(jags, iter,progress.bar="none")
tt<-jags.samples(jags,c('alpha0','alpha1'),iter,progress.bar="none")
# Extracting vectors of posterior samples of the model parameters
a0<-tt$alpha0[1,,]
a1<-tt$alpha1[1,,]
#Fitting the model with these parameters
for (j in 1:length(doses)){
logit <- a0 + a1 * sdose[j]
p[,j]<-exp(logit)/(1+exp(logit))
}
# Finding the probability of being in the target interval and overdosing probability
prob.next<-mat.or.vec(length(doses),1)
for (j in 2:length(doses)){
y<-p[,j]-p[,1]
prob.next[j]<-mean(y <=(target.increase+delta) & (y>=target.increase-delta))
if(mean(y>=(target.increase+2*delta))>overdose){
prob.next[j]<-0
}
}
# If all unsafe - stop the trial, otherwise assign to the max Prob of Target dose (subject to no skipping constraint)
if(all(prob.next==0)){
stop<-1
break()
}else{
nextdose<-min(nextdose+1,which.max(prob.next))
}
}
# Storing results of the simulation
if(stop==0){
selection[z,nextdose]<-1
ss[z]<-sum(n)
}else{
counter<-counter+1
ss[z]<-sum(n)
}
cat(z,"\n")
}
# Proportion of Each Dose Selection
colMeans(selection)
# Mean Sample size
mean(ss)
|
ea319b3f1a6e800acfc3e075dd9994f9800d8531 | 32bba96a36c3783f7634ec99e75fc2153f7e3d3d | /donneesdatabase2020.R | d08781740ee094e9c6826d32e6266bcf534ee222 | [] | no_license | julienvu/TravailPerso_R_Python_autres_tous_domaines | f2879db106d60cda7af4d5e5c6b883380c66a573 | 2144268029f8c13e6302111627b20472c4206b96 | refs/heads/main | 2023-04-08T17:43:21.938840 | 2021-04-21T20:27:20 | 2021-04-21T20:27:20 | 341,634,941 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,082 | r | donneesdatabase2020.R |
# données sources: organisme Fred https://fred.stlouisfed.org/
#debut code R
#donnees fichier excel database-reaction_function2020.xlsx
#importation des libraries readxl, dplyr, stats
library(readxl)
library(dplyr, quietly = TRUE)
library(stats)
#suppression des variables d'environnement de la mémoire
remove(list = ls())
#importation fichier excel données sources
database_reaction_function2020 <- read_excel("Master_Info_Dauphine/M2_ID/database-reaction_function2020.xlsx")
View(database_reaction_function2020)
head(database_reaction_function2020)
df1 <-database_reaction_function2020[,-10]
#suppresion des colonnes après la colonne ib
df1 <-df1[,-11]
#df1 <-df1[,-12]
df1 <-df1[,-11]
df1 <-df1[,-10]
names(df1)[1]
names(df1)[3]
names(df1)
View(df1)
#remplacer nom de colonne ...1 par date
dfinal <-df1%>% rename(date = ...1)
names(dfinal)[1]#"date"
names(dfinal)[3]#"rb"
names(dfinal)[8]#"if"
#renommage colonne 8 if par inflafrance
names(dfinal)[8]<-"inflafrance"
#voir table modifiée
View(dfinal)
#tracer des séries temporelles pour taux d'intérêt(BDF, bundesbank, Fed et général)
plot(dfinal$date,dfinal$rf,col="purple",type="l", main="Taux d'intérêt en fonction du temps", xlab = "date",ylab="taux d'intêret rf/rb/ru/général")
lines(dfinal$date,dfinal$rb,col="orange")
lines(dfinal$date,dfinal$ru,col="blue")
lines(dfinal$date,dfinal$rdb, col="green")
# Add a legend pour taux d'intérêt
legend("right", legend=c("rf", "rb","ru","rdb"),
col=c("purple", "orange","blue","green"), lty=1:2, cex=0.8)
#tracer des séries temporelles pour taux de change
#fenêtre 1*2 graphique
par(mfrow=c(1,2))
plot(dfinal$date,dfinal$ff,col="brown",type="l",main="Tchange ff", xlab = "date",ylab="taux de change ff")
# Add a legend pour taux de change ff
legend("topleft", legend=c("ff"),
col=c("brown"), lty=1:2, cex=0.8)
plot(dfinal$date,dfinal$dm,col="orange",,type="l",main="Tchange dm", xlab = "date",ylab="taux de change dm")
# Add a legend pour taux de change dm
legend("topleft", legend=c("dm"),
col=c("orange"), lty=1:2, cex=0.8)
#tracer des séries temporelles pour taux d'inflation
#retour à la fenêtre normal (1*1)
par(mfrow=c(1,2))
plot(dfinal$date,dfinal$ib,col="blue",type="l",main="Tinflation en fonction du temps", xlab = "date",ylab="taux d'inflation")
legend("topright", legend=c("ib"),
col=c("blue"), lty=1:2, cex=0.8)
plot(dfinal$date,dfinal$inflafrance,col="pink",type="l",main="Tinflation en fonction du temps", xlab = "date",ylab="taux d'inflation")
# Add a legend pour taux d'inflation
legend("topright", legend=c("inflafrance"),
col=c("pink"), lty=1:2, cex=0.8)
#voir les corrélations entre certaines variables du jeu de données
#coefficient de corrélation de pearson avec complete.obs qui supprime les lignes
#contenant des valeurs manquantes
cor(dfinal$rf,dfinal$inflafrance,use="complete.obs")#
#interprétation :corrélation positive très forte entre les deux variables
cor(dfinal$rb,dfinal$ib,use="complete.obs")#0.946854 très proche de 1
#interprétation :corrélation positive très forte entre les deux variables
#taux intérêt allemagne et taux d'inflation allemagne
cor(dfinal$rf,dfinal$ff,use="complete.obs")#0.04 très proche de 0
#aucune relation entre le taux d'intérêt france et le taux de change france
cor(dfinal$rb,dfinal$dm,use="complete.obs")#0.06382578 très proche de 0
#aucune relation entre le taux d'intérêt allemagne et le taux de change allemagne
cor(dfinal$rf,dfinal$rdb,use="complete.obs")#0.7694166 très proche de 1
#interprétation :corrélation positive forte entre les deux variables
#taux intérêt france et taux d'intérêt général
cor(dfinal$rb,dfinal$rdb,use="complete.obs")#0.7694166 très proche de 1
#interprétation :corrélation positive très forte entre les deux variables
#taux intérêt allemagne et taux d'intérêt général
cor(dfinal$ff,dfinal$rdb,use="complete.obs")#-0.3348911 négatif
#valeur négative du coefficient et relation non linéaire entre les deux variables
#taux de change france et taux d'intérêt général
cor(dfinal$dm,dfinal$rdb,use="complete.obs")#0.06976173 proche de 0
#valeur du coefficient et relation non linéaire entre les deux variables
#taux de change allemagne et taux d'intérêt général
cor(dfinal$inflafrance,dfinal$rdb,use="complete.obs")#0.8801829 valeur proche de 1
#interprétation :corrélation positive très forte entre les deux variables
#taux d'intérêt général et taux d'inflation france
cor(dfinal$inflafrance,dfinal$ff,use="complete.obs")#-0.376904 négatif
#sens de relation non linéaire entre les variables
#taux d'inflration france et taux de change france
cor(dfinal$ib,dfinal$rdb,use="complete.obs")#0.8920401valeur proche de 1
#interprétation :corrélation positive très forte entre les deux variables
#taux d'intérêt général et taux d'inflation allemagne
cor(dfinal$ib,dfinal$dm,use="complete.obs")
#0.03744707 très proche de 0 mais positif
#valeur très proche de 0 et relation non linéaire entre les deux variables
#taux de change allemagne et taux d'inflation allemagne
#Amplitude des variables du jeu de données
#taux intérêt france gap max min
print('étendue taux intérêt france: ')
max(dfinal$rf)-min(dfinal$rf)#12.375
#taux intérêt allemagne gap max min
print('étendue taux intérêt allemagne: ')
max(dfinal$rb)-min(dfinal$rb)#8.5
#taux intérêt fed gap max min
print('étendue taux intérêt fed: ')
max(dfinal$ru)-min(dfinal$ru)#13.25
#taux change france gap max min
print('étendue taux de change france: ')
max(dfinal$ff)-min(dfinal$ff)#6.0528
#taux change allemagne gap max min
print('étendue taux de change allemagne: ')
max(dfinal$dm)-min(dfinal$dm)#1.669
#taux intérêt général gap max min
print('étendue taux intérêt général: ')
max(dfinal$rdb)-min(dfinal$rdb)#5
#taux inflation allemagne gap max min
print('étendue taux d inflation allemagne: ')
max(dfinal$inflafrance)-min(dfinal$inflafrance)#12.43
#taux inflation allemagne gap max min
print('étendue taux d inflation allemagne: ')
max(dfinal$ib)-min(dfinal$ib)#8.46
#regression lineaire multiple avec l'instruction lm entre taux intérêt france en fonction
#du taux intérêt allemand, taux intérêt américain et taux inflation france
tauxintfrreg<-lm(dfinal$rf ~ dfinal$rb +dfinal$ru+dfinal$inflafrance, data=dfinal)
#output regression tauxintffreg
summary(tauxintfrreg)
#Plus la valeur du R ajusté est proche de 1,
#et plus l'adéquation entre le modèle et les données observées va être forte. Cependant, cette valeur est fortement influencée, entre autres
#par le nombre de variables explicatives incluses dans la regression.
#equation droite
#tauxinteretfrancais= 4.73 +0.57*tauxinteretallemagne-0.21*tauxinteretamericain+ 0.35*tauxinflationfrance
#gamma= 4.73
#coefficients non significatifs pour ru car pas de symbole
#pour rb et inflafrance: significatifs( 90 % et 99,999%)
#avec le taux d'intérêt allemand augmenté de 1% augmente le taux d'intérêt francais de 0.57
#avec le taux d'inflation français augmenté de 1% augmente le taux d'intérêt francais de 0.35
#autre interprétation: pour un taux d'inflation constant, vu que coefficient associé à taux intérêt
#allemand positive, augmenter taux d'intérêt allemand revient donc à augmenter à augmenter taux d'intérêt francais
# stratégie européenne
# Extraction des coefficients
coef(tauxintfrreg)
# Intervalle de confiance (à 95%) des coefficients
confint(tauxintfrreg)
# plot : "vraies" valeurs et droite de regression
plot(dfinal$rf ~ dfinal$rb + dfinal$ru + dfinal$inflafrance, data=dfinal)
abline(tauxintfrreg, col = "orange")
# Prédiction du taux d'intérêt francais à 99% en fonction du taux d'intérêt allemand, du taux d'intérêt américain et du taux d'inflation francais
valeurspredites <- predict(tauxintfrreg,data.frame(4,3.4,7), level= 0.99)
#Affichage des valeurs prédites du taux d'intérêt francais en fonction du temps
plot(dfinal$date,valeurspredites,col="brown",type="l", main="Prédiction du taux d'intérêt francais")
#fin code R
|
8b61ea0fd66bc15515d51d9a79cd2802179053d2 | f25f19454371c545fb69ccb7da1a4ef0baf6acb8 | /man/getMainEffects.Rd | f5c57c101127c7ef73e89e2f24a8954ac5f789e1 | [] | no_license | Sandy4321/npdr | 00e6ab9fd7db2a6465b39bb320afeee76db5f7ce | b02e08577c58a6fddb0b157f9870740795747b04 | refs/heads/master | 2020-12-06T07:34:02.878253 | 2020-01-06T04:54:47 | 2020-01-06T04:54:47 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,703 | rd | getMainEffects.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/inbixGAIN.R
\name{getMainEffects}
\alias{getMainEffects}
\title{Get main effects from generalized linear model regression (parallel).}
\usage{
getMainEffects(labelledDataFrame, regressionFamily = "binomial",
numCovariates = 0, writeBetas = FALSE, useBetas = FALSE,
transformMethod = "", numCores = 2, verbose = FALSE)
}
\arguments{
\item{labelledDataFrame}{\code{data.frame} with variables in columns and samples in rows.}
\item{regressionFamily}{\code{string} glm regression family name.}
\item{numCovariates}{\code{numeric} of included covariates.}
\item{writeBetas}{\code{logical} indicating whther to write beta values to separate file.}
\item{useBetas}{\code{logical} indicating betas rather than standardized betas used.}
\item{transformMethod}{\code{string} optional transform method.}
\item{numCores}{\code{numeric} number of processor cores to use in mclapply}
\item{verbose}{\code{logical} to send verbose messages to stdout.}
}
\value{
mainEffectValues \code{vector} of main effect values.
}
\description{
\code{getMainEffects}
}
\seealso{
\code{\link{rankUnivariateRegression}}
Other GAIN functions: \code{\link{dcgain}},
\code{\link{dmgain}}, \code{\link{fitInteractionModel}},
\code{\link{fitMainEffectModel}},
\code{\link{gainToSimpleSIF}},
\code{\link{getInteractionEffects}},
\code{\link{regainParallel}}, \code{\link{regain}}
Other inbix synonym functions: \code{\link{dcgain}},
\code{\link{regainParallel}}, \code{\link{regain}}
}
\concept{GAIN functions}
\concept{inbix synonym functions}
\keyword{array}
\keyword{internal}
\keyword{models}
\keyword{regression}
\keyword{univar}
|
664453d5a3c8cc17b22a552777980ea6b9f8f7c9 | 49b8ff57b4184c137dde8ed358b3372f3020d9b0 | /RStudioProjects/mbDiscoveryR/testing/parseMRMR.R | 5a78a8f532f19e763826967ec13c57e04426e0cc | [] | no_license | kelvinyangli/PhDProjects | c70bad5df7e4fd2b1803ceb80547dc9750162af8 | db617e0dbb87e7d5ab7c5bfba2aec54ffa43208f | refs/heads/master | 2022-06-30T23:36:29.251628 | 2019-09-08T07:14:42 | 2019-09-08T07:14:42 | 59,722,411 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 802 | r | parseMRMR.R | # nFeatures is the number of features to be returned
# it needs to be pre-determined for mrmr
# allNodes depends on each dataset that is feed to mrmr, since we need to move each column to the 1st position
parseMRMR = function(output, nFeatures) {
#features = allNodes[-1] # remove the 1st variable since it is the target
startString = "*** mRMR features ***" # allocate the results of mrmr from entire printing results
indexStart = pmatch(startString, output) + 2 # pass the next index which contains parameter settings for mrmr
mrmrOutput = output[indexStart:(indexStart + nFeatures - 1)]
mb = vector(length = nFeatures)
for (i in 1:nFeatures) {# for each feature
string = mrmrOutput[i]
mb[i] = strsplit(string, "\"")[[1]][2]
} # end for i
return(mb)
} |
5f197afefac4d74d49baf2e6db6cd691f18e9d73 | 29c2e5531fb0a5095df2d5a02a08d4429acdb280 | /cachematrix.R | df95d63e0ee891b90adb708c682b7d29dd44ec03 | [] | no_license | andrewdziedzic25/Andrew-Dziedzic | 5d358c492a015ad44efb2574bdede57222b3e9af | 79d56ec46262edae5a462eee2eba9f3499b36226 | refs/heads/master | 2022-12-13T18:00:30.171030 | 2022-12-04T22:17:44 | 2022-12-04T22:17:44 | 244,030,725 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,169 | r | cachematrix.R | ##The particular function creates a specific "MATRIX" object which can cache its inverse
## makeCacheMatrix creates a list containing a function to:
## Get & Set value of MATRIX
## Get & Set value of inv MATRIX
makeCacheMatrix <- function(a = matrix()) {
inv <- NULL
SET <- function(c)
{ a <<- c
inv <<- NULL }
GET <- function() a
SETinverse <- function(inverse) inv <<- inverse
GETinverse <- function() inv
list(SET=SET, GET=GET, SETinverse=SETinverse, GETinverse=GETinverse)
}
##The particular function computes inverse of a special MATRIX returned by
##makeCacheMatrix (*which is the function ABOVE*)
##Computing inverse of square MATRIX is done with the solve
##function in R .... if X = square invertible MATRIX,
##solve(X) returns inv
##For the specific assignment.... assume the MATRIX supplied is always invertible!!!
cacheSolve <- function(b, ...) {
inv <- b$GETinverse()
if(!is.null(inv))
{ message("Cached data is:")
return(inv) }
data <- b$GET()
inv <- solve(data)
b$SETinverse(inv)
## Return a matrix that is the inverse of 'b'
inv
}
|
1b6699cee852d24a6ed4ff1e01eb9e6b5d6c7648 | 98615162d2e147060cf2a8fd7f20cb756c6d6b41 | /R/restrict.minimal.hits.R | f2207d2eb09870e2d9cc94e536f1a82c1c893085 | [] | no_license | axrt/gbra | 9796f993d8b457d21cc1213f0d9768907e63cfa5 | dde797f8c2ae9777094b5b05ee4d4e5023f0ad31 | refs/heads/master | 2021-01-19T05:49:11.562580 | 2018-02-18T18:02:15 | 2018-02-18T18:02:15 | 24,808,334 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 924 | r | restrict.minimal.hits.R | #' Use to restrict a given data.frame with RAW(!) bitscores to a certain number of
#' non-zero bitscores per ORF.
#' @param \code{df} data.frame, presumably from read.bhs().
#' @param \code{minhit} minimal number of non-zero hits that an ORF must have in order remain in the table, default is 10.
#' @return a data.frame with only those ORFs that had non-zero hits of \code{minhit} threshold and greater.
#' @examples
#' master.table.raw<-read.bhs(bh.folder = "gBLASTer/bh")
#' master.table.raw<-as.data.frame(master.table.raw)
#' master.table.raw.10hcut<-restrict.minimal.hits<-function(df=master.table.raw)
#'
restrict.minimal.hits<-function(df, minhit=10){
#Checks if a row given in i has enough non-zero hits
enough<-function(i,minhit){
return(ifelse(test = sum(i>0)>=minhit, yes=TRUE, no=FALSE))
}
#filters out poor ORF rows
enoughs<-apply(df[,3:ncol(df)],1,enough,minhit=minhit)
return(df[enoughs,])
} |
7c200df63cf7a6711d37182aa05c133be45fd303 | 1b901d2fae21c0fcbe7799682af9611403832bf9 | /RforgeCode/Meucci/man/PlotCompositionEfficientFrontier.Rd | 70c9ac5ad39528c398bf42ba0b522ace167f4cc7 | [] | no_license | dsnaveen/symmys | ca5ad4c47de4ab4315389c04bc62e752b30aadbc | c227e905452779425a08c5304ac7a0df7dbb5247 | refs/heads/master | 2021-05-30T01:18:59.261392 | 2015-12-07T20:32:25 | 2015-12-07T20:32:25 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 726 | rd | PlotCompositionEfficientFrontier.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/PlotCompositionEfficientFrontier.R
\name{PlotCompositionEfficientFrontier}
\alias{PlotCompositionEfficientFrontier}
\title{Plots the efficient frontier}
\usage{
PlotCompositionEfficientFrontier(Portfolios)
}
\arguments{
\item{Portfolios}{: [matrix] (M x N) M portfolios of size N (weights)}
}
\description{
Plot the efficient frontier, as described in A. Meucci,
"Risk and Asset Allocation", Springer, 2005.
}
\author{
Xavier Valls \email{flamejat@gmail.com}
}
\references{
A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170}.
See Meucci's script for "PlotCompositionEfficientFrontier.m"
}
|
800fac66c5f0e5b57ffd2b772ef5cba16dc43cf8 | fe7bdff75402c7bcff45082fb384f8918a23811d | /my_first_r.R | 635252572210a7f6d3bab33e1aabf17cb6637e8c | [] | no_license | Jiyounglee78/project_nottoGIT | cc810f1528716eed6d4ebf5577dfe13047adf5d7 | e19a514421482570c6a6a8b34f48480a5c99424c | refs/heads/master | 2022-04-22T10:43:47.753066 | 2020-04-16T19:20:30 | 2020-04-16T19:20:30 | 256,309,228 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 281 | r | my_first_r.R | example <- matrix(c(1, 2, 3, 4, 5, 6, 7, 8), nrow = 4, ncol = 2)
#install from CRAN
install.packages(c("ggplot2", "devtools", "lme4"))
#Installing from Bioconductor
source("https://bioconductor.org/biocLite.R")
biocLite()
#Installing from GitHub
install_github("author/package") |
6ed89b78796c679f11370998ce46c516abb269f9 | 3e70c711647c48e41231a44125236ec2c706ecb0 | /Assignment3/LogitReg.R | 71c0fad1facea04f76c4cd814c943718570a0b53 | [] | no_license | laurencehendry/Collaborative-Data-Analysis-Assignment2 | 2715a702284905e7f5d3a8e22af33a33ce853bef | fb9818050a160be46f91e385aa069427369550f7 | refs/heads/master | 2021-01-20T15:57:41.070713 | 2015-12-03T15:35:23 | 2015-12-03T15:35:23 | 47,351,803 | 1 | 0 | null | 2015-12-03T18:30:41 | 2015-12-03T18:30:41 | null | UTF-8 | R | false | false | 3,458 | r | LogitReg.R | ### logit models for assignment 3 ###
### Claire & Noriko ###
### set working directory
setwd("C:/Users/noriko/Desktop/Collaborative-Data-Analysis-Assignment2/Assignment3")
library(stargazer)
library(knitr)
library(Zelig)
library(rms)
# road the cleaned up dataframe
load("EVdata1.rda")
### step-wise logistic regression ###
# Estimate model-1 (AGE & SEX & INCOME & Education)
# Categorical income (4 classes, low is the reference)
L1 <- lrm(EVinterest ~ RAGE + Male + lowermiddle + highermiddle + high + degree,
EVINTEREST)
lrm(L1)
# Estimate model-2 (AGE & SEX & INCOME & Education & Licence & # of Cars)
L2 <- lrm(EVinterest ~ RAGE + Male + lowermiddle + highermiddle + high + degree
+ licence + NumCar, EVINTEREST)
lrm(L2)
# Estimate model-3 (AGE & SEX & INCOME & Education & Licence & # of Cars & # of children)
L3 <- lrm(EVinterest ~ RAGE + Male + lowermiddle + highermiddle + high + degree
+ licence + NumCar + DVHsize + havechildren, EVINTEREST)
lrm(L3)
# Estimate model-4 (AGE & SEX & INCOME & Education & Licence & # of Cars & # of children & Region)
L4 <- lrm(EVinterest ~ RAGE + Male + lowermiddle + highermiddle + high + degree
+ licence + NumCar + DVHsize + havechildren + Scotland, EVINTEREST)
lrm(L4)
### present results
# Create cleaner covariate labels
labels <- c('Age', 'Male', 'Income: low-middle', 'Income: high-middle', 'Income: high',
'College degree', 'Drivers licence', '# of cars', "Size of household", 'Having dependent children',
'Scotland', '(Intercept)')
stargazer::stargazer(L1, L2, L3, L4, covariate.labels = labels,
title = 'Interests in EVs',
digits = 2, type = 'text')
### predicted probabilities by income (other covariates are fixed)
L4Pred <- glm(EVinterest ~ RAGE + Male + lowermiddle + highermiddle + high + degree
+ licence + NumCar + NumDepCh + Scotland,
data = EVINTEREST, family = 'binomial')
fittedP <- with(EVINTEREST,
data.frame(RAGE=mean(RAGE), Male=c(1,1,1,1), inccat=c(1,2,3,4),
low=c(1,0,0,0), lowermiddle=c(0,1,0,0), highermiddle=c(0,0,1,0),
high=c(0,0,0,1), licence=1, degree =1, NumCar=1,
NumDepCh=1, Scotland=0))
fittedP$predicted <- predict(L4Pred, newdata = fittedP,
type = 'response')
fittedPselected <- subset(fittedP, select= c(inccat, predicted))
kable(fittedPselected, align = 'c', digits = 2,
caption = 'Predicted Probabilities for Fitted Values')
### Zelig plot
ZP <- zelig(EVinterest ~ RAGE + Male + lowermiddle + highermiddle + high
+ degree + licence + NumCar + NumDepCh + Scotland,
cite = FALSE, data = EVINTEREST, model = 'logit')
setZP1 <- setx(ZP, RAGE = 20:80)
simZP1 <- sim(ZP, x = setZP1)
plot(simZP1, xlab="Age", ylab="Predicted Probability",
main="Predicted Probability of Having an Interest in EV by Age")
setZP2 <- setx(ZP, NumCar = 0:3)
simZP2 <- sim(ZP, x = setZP2)
plot(simZP2, xlab="Number of Cars in Household", ylab="Predicted Probability",
main="Predicted Probability of Having an Interest in EV by # of Cars")
setZP3 <- setx(ZP, NumDepCh = 0:7)
simZP3 <- sim(ZP, x = setZP3)
plot(simZP3, xlab="Number of Dependent Childen", ylab="Predicted Probability",
main="Predicted Probability of Having an Interest in EV by # of Children")
|
210af23717f662ccf05c4887923b7612448e1252 | 2cd8f80c7a1016129c57e85d729d655fefc01070 | /tests/readIDAT_gs_wg6v2.R | 1dc865f2280c18c9a17c25ca87f9d8350b85f971 | [] | no_license | HenrikBengtsson/illuminaio | 44f5f2295839c4132139d98cdc6d7e5d366705ff | 4a79a1f7b0348a331ac79d2abc8599e50a25d6c5 | refs/heads/master | 2023-04-29T15:38:56.887547 | 2023-04-26T19:56:09 | 2023-04-26T19:56:09 | 24,728,987 | 6 | 5 | null | 2020-07-13T01:01:16 | 2014-10-02T17:29:13 | R | UTF-8 | R | false | false | 1,144 | r | readIDAT_gs_wg6v2.R | library(illuminaio)
library(IlluminaDataTestFiles)
idatFile <- system.file("extdata", "idat", "4343238080_A_Grn.idat", package = "IlluminaDataTestFiles")
idatData <- readIDAT(idatFile)$Quants
gsFile <- system.file("extdata", "gs", "4343238080_A_ProbeSummary.txt.gz", package = "IlluminaDataTestFiles")
gStudio <- read.delim(gzfile(gsFile, open = "r"), sep = "\t", header = TRUE)
## not all probes are present in GenomeStudio output, so only select those that are
idatData <- idatData[which(idatData[,"CodesBinData"] %in% gStudio[,"ProbeID"]),]
## the orders are also different (numeric vs alphabetical)
gStudio <- gStudio[match(idatData[,"CodesBinData"], gStudio[,"ProbeID"]),]
# check each value in GenomeStudio output
## there are some rounding differences, so we allow slight differences
## summarised bead intensities
RUnit::checkEqualsNumeric(idatData[, "MeanBinData"], gStudio[, 3], tolerance = 10e-7)
## number of beads
RUnit::checkEquals(idatData[, "NumGoodBeadsBinData"], gStudio[, 5])
## standard errors
RUnit::checkEqualsNumeric(idatData[, "DevBinData"] / sqrt(idatData[, "NumGoodBeadsBinData"]), gStudio[, 4], tolerance = 10e-7)
|
9cf51268acd9c255a1d17374220c300a02c2e994 | 20ff998b9ab0992bd595de0b8446238338042201 | /play.R | a61d78ea7e30dce89fbf90cf160ca65ca5bb4ed8 | [] | no_license | faisal-samin/visnetwork-demo | 796e0b0f0750343a769d92fb3c631f17542d7b2c | 47e9e7441a2ab2cd152b7111f13e9ac40f8950a2 | refs/heads/master | 2020-04-19T17:17:49.030059 | 2019-01-31T10:44:13 | 2019-01-31T10:44:13 | 168,330,669 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,622 | r | play.R | # Read in packages
library(tidyverse)
library(visNetwork)
# Read in data ------------------------------------------------------------
nodes = read_csv(
"got-nodes.csv",
col_names = c("id", "label"),
# set name of columns
skip = 1 # skip column headers
)
edges = read_csv(
"got-edges.csv",
col_names = c("from", "to", "weight"),
skip = 1
)
# preview network
visNetwork(nodes,
edges,
height = "500px",
width = "100%")
# Add features to nodes ---------------------------------------------------
# add houses
# Stark - Arya, Bran, Jon, Rickon, Catelyn, Robb, Sansa, Eddard
# Lannister - Tywin, Tyrion, Jaime, Cersei
nodes_h = nodes %>%
mutate(house = case_when(
id %in% c("Arya", "Bran", "Jon", "Catelyn", "Robb", "Sansa", "Eddard") ~ "Stark",
id %in% c("Tywin", "Tyrion", "Jaime", "Cersei") ~ "Lannister"
))
nodes_c = nodes_h %>%
mutate(color = case_when(
house == "Stark" ~ "darkred",
house == "Lannister" ~ "gold",
TRUE ~ "lightgrey"
),
size = case_when(
house %in% c("Stark", "Lannister") ~ 30,
TRUE ~ 10
),
font.size = case_when(
house %in% c("Stark", "Lannister") ~ 50,
TRUE ~ 15
)
)
nodes_i = nodes_c %>%
mutate(
shape = "image"
,
image = case_when(
id == "Arya" ~ "https://pngimage.net/wp-content/uploads/2018/05/arya-stark-png-2.png"
)
)
edges_l = edges %>%
mutate(width = weight/5)
visNetwork(nodes_i, edges_l, width = "100%") %>%
visNodes(shapeProperties = list(useBorderWithImage = TRUE))
|
c36ea601587784f3b6c2c6e66cb5dc3b7de6b957 | a5e49e9b3e7892ce476bab528cde3f686d5a5e3d | /inst/shiny_apps/Histogram/app.R | fdf680f745d13579d7ccc854642983dc112399c2 | [] | no_license | cran/lessR | a7af34480e88c5b9bf102ab45fa6464a22ffbe3b | 562f60e6688622d8b8cede7f8d73d790d0b55e27 | refs/heads/master | 2023-05-29T07:57:09.544619 | 2023-05-14T20:20:02 | 2023-05-14T20:20:02 | 17,697,039 | 6 | 3 | null | null | null | null | UTF-8 | R | false | false | 21,200 | r | app.R | # ---------
# Histogram
# ---------
library(shiny)
library(lessR)
clr.one <- list(
"#96AAC3", "dodgerblue3", "cornflowerblue", "steelblue", "darkblue",
"pink2", "red3", "firebrick2", "darkred",
"violetred", "mediumorchid", "purple3",
"darkorange2", "salmon", "orange3", "sienna", "rosybrown",
"wheat3", "goldenrod2", "khaki", "yellow2",
"darkseagreen2", "springgreen3", "seagreen4", "darkgreen",
"black", "gray45", "slategray4", "gray75", "snow3", "gray95",
"lavender", "ivory2", "aliceblue", "white")
clr.edge <- list("off", "black", "gray50", "gray75", "white", "ivory",
"darkblue", "darkred", "darkgreen", "rosybrown2", "bisque",
"slategray2", "aliceblue", "thistle1", "coral", "gold")
clr.qual <- c("hues", "Okabe-Ito", "viridis")
clr.seq <- list("reds", "rusts", "browns", "olives", "greens",
"emeralds", "turquoises", "aquas", "blues", "purples", "violets",
"magentas", "grays")
clr.den_g <- c("steelblue3", clr.one[2:length(clr.one)])
clr.den_n <- c("pink1", clr.one[2:length(clr.one)])
addResourcePath("shiny_dir", system.file("shiny_apps", package="lessR"))
ui <- fluidPage(
tags$head(tags$link(rel="stylesheet", href="shiny_dir/styles.css")),
tabsetPanel(
tabPanel("Data",
titlePanel(div("Upload a text (.csv, .txt) or Excel file", id="hp")),
sidebarLayout(
sidebarPanel(
radioButtons("fType", HTML("<h5 class='soft'>Format</h5>"),
c("Excel"="Excel", "Text"="Text")),
conditionalPanel(condition="input.fType == 'Text'",
radioButtons("sep", HTML("<h5 class='soft'>Separator</h5>"),
c(Comma=",", Semicolon=";", Tab="\t"), ","),
radioButtons("decimal", HTML("<h5 class='soft'>Decimal</h5>"),
c("Point"=".", "Comma"=",")),
),
radioButtons("fSource", HTML("<h5 class='soft'>Source</h5>"),
c("Local"="local", "Web"="web")),
conditionalPanel(condition="input.fSource == 'local'",
fileInput("myFile", "Locate your data file",
accept=c(".csv", ".txt", ".xlsx", ".xlsm")),
),
conditionalPanel(condition="input.fSource == 'web'",
textInput("myURL", "Web address of data file"),
actionButton("submitURL", "Submit")
),
textOutput("ncols"),
textOutput("nrows"),
uiOutput("d.show"),
), # end sidbarPanel
mainPanel(
tableOutput("d.table"),
tags$style(type="text/css", "#d.table {font-size: .95em;}")
)
) # end sidbarLayout
), # end tabPanel 1
tabPanel("Histogram",
pageWithSidebar(
titlePanel(""),
sidebarPanel(
selectInput('x.col', 'x Variable', ""),
tags$hr(),
checkboxInput("myBins", div("Bins", class="view"), FALSE),
conditionalPanel(condition="input.myBins == true",
uiOutput("slider_bw"),
uiOutput("slider_bs")
),
tags$hr(),
checkboxInput("myGeom", div("Colors", class="view"), FALSE),
conditionalPanel(condition="input.myGeom == true",
selectInput("myFill", "fill",
choices=list("Constant"=clr.one,
"Qualitative"=clr.qual,
"Sequential"=clr.seq)),
selectInput("myColor", "color", choices=clr.edge),
sliderInput("myTrans", "transparency", min=0, max=1, value=0)
),
tags$hr(),
checkboxInput("myValCm", div("Values, Cumulate", class="view"), FALSE),
conditionalPanel(condition="input.myValCm == true",
checkboxInput("myValues", "values", value=FALSE),
selectInput("myCumlt", "cumulate", choices=list("off", "on", "both"))
),
tags$hr(),
checkboxInput("mySmooth", div("Smooth", class="view"), FALSE),
conditionalPanel(condition="input.mySmooth == true",
checkboxInput("myDens", "density", TRUE),
checkboxInput("myHist", "show_histogram", TRUE),
checkboxInput("myRug", "rug", FALSE),
radioButtons("myType", "type",
c("general"="general", "normal"="normal", "both"="both")),
uiOutput("slider_bndwd"),
selectInput("myFill_gen", "fill_general", choices=clr.den_g),
selectInput("myFill_nrm", "fill_normal", choices=clr.den_n)
),
tags$hr(),
checkboxInput("do_pdf", div("Save", class="view"), FALSE),
conditionalPanel(condition="input.do_pdf == true",
sliderInput("w", "width (inches):", min=3, max=20, value=8),
sliderInput("h", "height (inches):", min=3, max=20, value=6),
checkboxInput("do_cmt", "include comments in R file", TRUE),
actionButton(inputId="btn_pdf", "Save"),
tags$p(div("Save pdf file and R code file",
style="margin-top:.25em;"))
),
tags$hr(),
checkboxInput("do_help", div("Help", class="view"), FALSE),
), # end sidebarPanel
mainPanel(
plotOutput('myPlot'),
verbatimTextOutput("summary"),
plotOutput("saved_plot"),
textOutput("help")
)
) # end pageWithSidebar
) # end tabPanel 2
) # end tabsetPanel
) # end fluidPage
server <- function(input, output, session) {
options(shiny.maxRequestSize=50*1024^2) # max upload file size is 50MB
v <- reactiveValues()
v$x.new <- FALSE
# ------- Read and Display Data -----------
# -----------------------------------------
# process the URL for reading from the web
theURL <- eventReactive(input$submitURL, {
input$myURL
})
data <- reactive({
if (input$fSource == "local") {
shiny::req("input$myFile")
myPath <- input$myFile$datapath
theRead <- input$myFile$name
}
if (input$fSource == "web") {
url <- theURL()
if (!(grepl("http://", url)))
url <- paste("http://", url, sep="")
myPath <- url
theRead <- myPath
}
shiny::req(myPath)
if (input$fType == "Excel") {
library(openxlsx)
if (grepl(".xlsx", myPath, fixed=TRUE)) {
d <- read.xlsx(myPath)
}
else {
message("\n>>> Excel file must have file type of .xlsx <<<\n\n")
stopApp()
}
}
if (input$fType == "Text") {
if ((grepl(".csv", myPath, fixed=TRUE)) ||
(grepl(".txt", myPath, fixed=TRUE))) {
d <- read.csv(myPath, sep=input$sep, dec=input$decimal,
na.strings="") # default is NOT a blank char missing
}
else {
message("\n>>> Text file must have file type of .csv or .txt <<<\n\n")
stopApp()
}
} # end fType is "Text"
updateSelectInput(session, inputId="x.col", label="x variable",
choices=c("Select a numerical variable" = "",
names(d)[sapply(d, is.numeric)]))
return(d)
}) # end reactive()
output$d.show <- renderUI({
shiny::req(data())
output$nrows <- renderText({paste("Number of data rows:", nrow(data()))})
output$ncols <- renderText({paste("Number of variables:", ncol(data()))})
if (nrow(data()) > 10)
radioButtons("d.show", HTML("<h5 class='soft'>Rows to display</h5>"),
c("First 10"="head", "Last 10"="tail", "Random 10"="random",
"All"="all"))
})
output$d.table <- renderTable({
if (is.null(input$d.show))
data()
else {
nr <- min(11, nrow(data()))
if (nr == 11) {
if (input$d.show == "all")
data()
else if (input$d.show == "head")
head(data(), n=10)
else if (input$d.show == "tail")
tail(data(), n=10)
else if (input$d.show == "random") {
dd <- data()
dd[.(random(10)), ]
}
}
}
}, striped=TRUE) # end renderTable
# -----------------------------------------
# ------- Bin Width and Bin Start ---------
# -----------------------------------------
# get default bin_width and bin_start values for initial histogram
# and bin width and start sliders
# default bin width is v$bw
observeEvent(input$x.col, { # processed while still on Get Data tab
v$x.new <- TRUE
x.name <- input$x.col
x <- data()[, x.name]
v$min.x <- min(x, na.rm=TRUE)
max.x <- max(x, na.rm=TRUE)
h <- suppressWarnings(hist(x, plot=FALSE, breaks="Sturges"))
v$bw <- h$breaks[2]-h$breaks[1]
if (v$bw == 0.5) v$bw <- 1
v$rng <- max.x - v$min.x
v$bw1 <- v$rng/45
if (v$min.x > 1) v$bw1 <- floor(v$bw1)
if (v$bw1 == 0) v$bw1 <- 0.5
v$bw2 <- v$rng/2.5
if (v$bw2 > 5) v$bw2 <- ceiling(v$bw2)
v$bw1 <- round(v$bw1, 3)
v$bw2 <- round(v$bw2, 3)
pret <- pretty(c((v$min.x-(.01*v$min.x)), max.x))[1]
v$bs1 <- pret - (v$bw)
v$bs <- pret
v$bs2 <- v$min.x
if(v$bs1 > v$bs2) v$bs2 <- v$bs1
if (abs(v$min.x) > 1) {
v$bs1 <- floor(v$bs1)
v$bs2 <- floor(v$bs2)
}
updateSliderInput(inputId="slider_bw", label="bin_width",
min=NA, max=NA, value=NA)
updateSliderInput(inputId="slider_bs", label="bin_start",
min=NA, max=NA, value=NA)
})
# ---------- Density Bandwidth ------------
# -----------------------------------------
get_bw <- function(x) {
bw <- bw.nrd0(x)
irep <- 0
repeat { # iterated value of bw
irep <- irep + 1
d.gen <- suppressWarnings(density(x, bw)) # no missing data
xd <- diff(d.gen$y)
flip <- 0
for (j in 2:length(xd))
if (sign(xd[j-1]) != sign(xd[j])) flip <- flip + 1
if (flip > 1 && irep <= 25)
bw <- 1.1 * bw
else
break;
} # end repeat
v$bndwd <- bw # cutoff of 7 to keep bw*.15 > 1
v$bndwd1 <- ifelse (bw>7, floor(bw*0.15), round(bw*0.15, 2))
if (v$bndwd1 == 0) v$bndwd1 <- 0.00001
v$bndwd2 <- ifelse (bw>7, ceiling(bw*1.5), round(bw*1.5, 2))
}
observeEvent(input$x.col, { # if switch variable
if (input$myDens) {
x.name <- input$x.col
shiny::req(x.name)
x <- na.omit(data()[, x.name])
get_bw(x)
sliderInput(inputId="slider_bndwd", label="bandwidth",
min=v$bndwd1, max=v$bndwd2, value=v$bndwd)
}
})
# get default band width and min, max for input slider when
# "parameters" button is checked
observeEvent(input$myDens, {
if (input$myDens) {
x.name <- input$x.col
shiny::req(x.name)
x <- na.omit(data()[, x.name])
get_bw(x)
}
})
# band width slider, only activates for a variable change
# runs whenever an input$ in the function changes
output$slider_bndwd <- renderUI({
if (!is.null(v$bndwd)) {
sliderInput(inputId="slider_bndwd", label="bandwidth",
min=v$bndwd1, max=v$bndwd2, value=v$bndwd)
}
})
# ------------ The Histogram --------------
# -----------------------------------------
output$myPlot <- renderPlot({
if (input$myBins) {
if (is.null(input$slider_bw)) {
# bin_width slider, only activates for a variable change
output$slider_bw <- renderUI({
req(!is.null(v$bw1))
sliderInput(inputId="slider_bw", label="bin_width",
min=v$bw1, max=v$bw2, value=v$bw)
})
}
# bin_start slider, only activates for a variable change
if (is.null(input$slider_bs)) {
output$slider_bs <- renderUI({
req(!is.null(v$bs1))
sliderInput(inputId="slider_bs", label="bin_start",
min=v$bs1, max=v$bs2, value=v$bs)
})
}
}
# the sliders update later, so the old value can be current for a new var
# update to avoid the extra computation and distracting intermediate plot
if (!is.null(input$slider_bw)) {
if (input$slider_bw < v$bw1 || input$slider_bw > v$bw2) {
if (!input$myBins)
updateSliderInput(session,"slider_bw", min=v$bw1, max=v$bw2,
value=v$bw)
req(input$slider_bw >= v$bw1) # takes a while for the update
req(input$slider_bw <= v$bw2)
}
}
if (!is.null(input$slider_bs)) {
if (input$slider_bs < v$bs1 || input$slider_bs > v$bs2) {
if (!input$myBins)
updateSliderInput(session,"slider_bs", min=v$bs1, max=v$bs2,
value=v$bs)
req(input$slider_bs >= v$bs1) # takes a while for the update
req(input$slider_bs <= v$bs2)
}
}
# switching to a new var, v$x.new is TRUE, initiates the histogram
# computations twice, so skip the first histogram and take the second
go.new <- v$x.new
if (go.new) {
v$x.new <- FALSE
req(!go.new)
}
# when Bins button clicked do not want any new re-renderings, but get two
# this hack stops the first re-rendering for the first instance
# slider values are only null before any click on the Bins button
if (input$myBins) req(!is.null(input$slider_bw))
shiny::req(input$x.col)
x.name <- input$x.col
x <- data()[, x.name]
y.name <- paste("Count of", x.name)
# when beginning, sliders will be NULL
in.bw <- ifelse (is.null(input$slider_bw), v$bw, input$slider_bw)
in.bs <- ifelse (is.null(input$slider_bs), v$bs, input$slider_bs)
v$in.den <- ifelse (input$mySmooth, TRUE, FALSE)
out <- paste("Histogram(", x.name, sep="")
if (!v$in.den) {
v$h <- Histogram(x, data=NULL,
bin_width=in.bw, bin_start=in.bs, bin_end=NULL,
fill=input$myFill, color=input$myColor, transparency=input$myTrans,
values=input$myValues, cumulate=input$myCumlt,
xlab=x.name, ylab=y.name,
quiet=TRUE)
p_bin_width <- in.bw == v$bw
p_bin_start <- in.bs == v$bs
p_fill <- input$myFill == "#96AAC3"
p_color <- input$myColor == "off"
p_trans <- input$myTrans == 0
p_values <- input$myValues == FALSE
p_cumul <- input$myCumlt == "off"
if (!p_bin_width) out <- paste(out, ", bin_width=", in.bw, sep="")
if (!p_bin_start) out <- paste(out, ", bin_start=", in.bs, sep="")
if (!p_fill) out <- paste(out, ", fill=\"", input$myFill, "\"", sep="")
if (!p_color) out <- paste(out, ", color=\"", input$myColor, "\"", sep="")
if (!p_trans) out <- paste(out, ", transparency=", input$myTrans, sep="")
if (!p_values) out <- paste(out, ", values=", input$myValues, sep="")
if (!p_cumul) out <- paste(out, ", cumulate=\"", input$myCumlt, "\"", sep="")
}
else { # density plot
shiny::req(input$slider_bndwd)
fg.rgb <- col2rgb(input$myFill_gen)
v$fg.trns <- rgb(fg.rgb[1], fg.rgb[2], fg.rgb[3],
alpha=80, maxColorValue=255)
fn.rgb <- col2rgb(input$myFill_nrm)
v$fn.trns <- rgb(fn.rgb[1], fn.rgb[2], fn.rgb[3],
alpha=80, maxColorValue=255)
v$h <- Histogram(x, data=NULL,
bin_width=in.bw, bin_start=in.bs,
density=input$myDens, rug=input$myRug, type=input$myType,
bandwidth=input$slider_bndwd, show_histogram=input$myHist,
fill_general=v$fg.trns, fill_normal=v$fn.trns,
xlab=x.name, ylab=y.name, quiet=TRUE)
p_dens <- input$myDens == FALSE
p_rug <- input$myRug == FALSE
p_type <- input$myType == "general"
p_bw <- (abs(input$slider_bndwd-v$bndwd) < 1)
p_hist <- input$myHist == TRUE
p_fill_general <- input$myFill_gen == "steelblue3"
p_fill_normal <- input$myFill_nrm == "pink1"
if (!p_dens) out <- paste(out, ", density=", input$myDens, sep="")
if (!p_rug) out <- paste(out, ", rug=", input$myRug, sep="")
if (!p_type) out <- paste(out, ", type=\"", input$myType, "\"", sep="")
if (!p_bw) out <- paste(out, ", bandwidth=", input$slider_bndwd, sep="")
if (!p_hist) out <- paste(out, ", show_histogram=",input$myHist, sep="")
if (!p_fill_general) out <- paste(out, ", fill_general=\"",
input$myFill_gen, "\"", sep="")
if (!p_fill_normal) out <- paste(out, ", fill_normal=\"",
input$myFill_nrm, "\"", sep="")
} # end dens
out <- paste(out, ")", sep="")
cat(out, "\n")
v$code <- out # save the code for a pdf file
# print stats
output$summary <- renderPrint({
shiny::req(v$h)
h <- v$h
# v$go <- TRUE
# if (v$go) {
if (!v$in.den)
out2 <- c(h$out_summary, " ", h$out_outliers, " ", h$out_freq)
else
out2 <- c(h$out_stats, " ", h$out_ss, " ", h$out_outliers)
for (i in 1:length(out2)) cat(out2[i], "\n")
# }
})
v$x.new <- FALSE
}) # end renderPlot
# clicking on the Save button generates a pdf file
plotInput <- eventReactive(input$btn_pdf, {
code <- v$code
x.name <- input$x.col
shiny::req(x.name)
x <- data()[, x.name]
y.name <- paste("Count of", x.name)
pdf.fname <- paste("hs_", x.name, ".pdf", sep="")
pdf.path <- file.path(path.expand("~"), pdf.fname)
# styles before re-set in interact() were saved
style(lab_cex=getOption("l.cex"))
style(axis_cex=getOption("l.axc"))
if (!v$in.den)
Histogram(x, data=NULL,
bin_width=input$slider_bw, bin_start=input$slider_bs,
fill=input$myFill, color=input$myColor, transparency=input$myTrans,
values=input$myValues, cumulate=input$myCumlt,
xlab=x.name, ylab=y.name, quiet=TRUE,
pdf_file=pdf.path,
width=as.numeric(input$w), height=as.numeric(input$h))
else # density
Histogram(x, data=NULL,
bin_width=input$slider_bw, bin_start=input$slider_bs,
density=input$myDens, rug=input$myRug, type=input$myType,
bandwidth=input$slider_bndwd, show_histogram=input$myHist,
fill_general=v$fg.trns, fill_normal=v$fn.trns,
xlab=x.name, ylab=y.name, quiet=TRUE,
pdf_file=pdf.path,
width=as.numeric(input$w), height=as.numeric(input$h))
# reset back to shiny setting
style(lab_cex=1.201, axis_cex=1.011, suggest=FALSE)
# R code
r.fname <- paste("hs_", x.name, ".r", sep="")
r.path <- file.path(path.expand("~"), r.fname)
cat("\n")
message("---------------------------------------------")
cat("Files written to folder:", path.expand("~"), "\n")
message("---------------------------------------------")
cat("pdf file: ", pdf.fname, "\n")
cat("R code file: ", r.fname, "\n")
message("---------------------------------------------")
cat("\n")
if (input$fSource == "web") {
url <- theURL()
if (!(grepl("http://", url)))
url <- paste("http://", url, sep="")
}
read.path <- ifelse (input$fSource == "local", input$myFile$name, url)
read.code <- paste("d <- Read(\"", read.path, "\")", sep="")
is.local <- !grepl("http://", read.path, fixed=TRUE)
if (input$do_cmt)
cat("# The # symbol indicates a comment rather than an R instruction\n\n",
"# Begin the R session by loading the lessR functions ",
"from the library\n", sep="", file=r.path)
cat("library(\"lessR\")\n\n", file=r.path, append=TRUE)
if (input$do_cmt) {
cat("# Read your data into an R data table, the data frame, here d",
"\n", sep="", file=r.path, append=TRUE)
if (is.local)
cat("# To browse for the data file, include nothing between the quotes",
"\n", sep="", file=r.path, append=TRUE)
}
if (is.local && input$do_cmt)
cat("d <- Read(\"\")\n\n", file=r.path, append=TRUE)
if (is.local && input$do_cmt) {
cat("# For security, the path to your data file is not available\n",
"# Can replace PATHtoFILE in the following with the path\n",
"# Remove the # sign in the first column and delete the previous ",
"Read()\n", sep="", file=r.path, append=TRUE)
read.path <- file.path("PATHtoFILE", read.path)
read.code <- paste("# d <- Read(\"", read.path, "\")", sep="")
}
cat(read.code, "\n\n", file=r.path, append=TRUE)
if (input$do_cmt)
cat("# When you have your data table, do the histogram analysis of a\n",
"# continuous variable in the data table\n",
"# d is the default data frame name, so no need to specify\n",
sep="", file=r.path, append=TRUE)
cat(code, "\n\n", file=r.path, append=TRUE)
anlys <- "Histogram()"
if (input$do_cmt)
cat("# If accessing data with a name other than d, must add data=NAME\n",
paste("# to the", anlys, "call, where NAME is the name of your",
"data frame"), "\n", sep="", file=r.path, append=TRUE)
})
output$saved_plot <- renderPlot({ plotInput() })
# access web page help file
output$help <- eventReactive(input$do_help, {
shiny::req(input$do_help)
fp <- system.file("shiny_apps/help/Histogram.html", package="lessR")
browseURL(fp)
})
} # end server
shinyApp(ui, server)
|
14f73c4dac3f52c971dc210b6a4bdf0a7ea86868 | 80c5446c7fc608b0c2fffb4087c7610b7e40e9a6 | /src/model.R | 62f7e55be3b26a8196ee1778f857bcec333f1c02 | [] | no_license | wikimedia/wikidata-analytics-dashboard | b2a54be3aa07767020641830147f8a59871c5e2f | 428a8602f9dc6e8a6c5f261190e01f854124f795 | refs/heads/master | 2023-08-26T15:50:00.979846 | 2015-11-21T10:44:31 | 2015-11-21T10:44:31 | 41,689,407 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,551 | r | model.R | get_local_datasets <- function(){
wikidata_social_media <<- get_local_set("wikidata_eng_social_media.tsv")
wikidata_mailing_lists <<-get_local_set("wikidata_eng_mailing_lists.tsv")
wikidata_mailing_lists_messages <<-get_local_set("wikidata_eng_mailing_lists_messages.tsv")
wikidata_references_overview <<- get_local_set("wikidata_content_references_overview.tsv")
wikidata_content_items <<- get_local_set("wikidata_content_items.tsv")
wikidata_properties <<- get_local_set("wikidata_content_properties.tsv")
wikidata_content_refstmts <<-get_local_set("wikidata_content_refstmts.tsv")
wikidata_content_refstmts_wikipedia <<- get_local_set("wikidata_content_refstmts_wikipedia.tsv")
wikidata_content_refstmts_other <<- get_local_set("wikidata_content_refstmts_other.tsv")
wikidata_content_references <<-get_local_set("wikidata_content_references.tsv")
wikidata_content_statement_ranks <<- get_local_set("wikidata_content_statement_ranks.tsv")
wikidata_content_statement_item <<- get_local_set("wikidata_content_statement_item.tsv")
wikidata_content_labels_item <<- get_local_set("wikidata_content_labels_item.tsv")
wikidata_content_descriptions_item <<- get_local_set("wikidata_content_descriptions_item.tsv")
wikidata_content_wikilinks_item <<- get_local_set("wikidata_content_wikimedia_links_item.tsv")
wikidata_kpi_active_editors <<- get_local_set("wikidata_kpi_active_editors.tsv")
return(invisible())
}
get_local_sparql_results <- function(){
sparql1 <<- get_local_set("spql1.tsv", sparql_data_uri)
sparql2 <<- get_local_set("spql2.tsv", sparql_data_uri)
sparql3 <<- get_local_set("spql3.tsv", sparql_data_uri)
sparql13 <<- get_local_set("spql13.tsv", sparql_data_uri)
property_usage_counts <<- get_local_set("property_usage.tsv", sparql_data_uri)
return(invisible())
}
get_graphite_datasets <- function(){
out <- tryCatch({
con <- curl(agg_data_uri)
readLines(con)
},
warning = function(cond){
message(paste("URL caused a warning:", agg_data_uri))
message("Warning message:")
message(cond)
return(NULL)
},
error = function(cond){
message(paste("URL does not exist:", agg_data_uri))
message("Error message:")
message(cond)
return(NA)
},
finally = {
wikidata_addUsagesForPage <<- get_csv_from_api("jobrunner.pop.wikibase-addUsagesForPage.ok.mw1004.count&format=csv",graphite_api_uri)
})
return(out)
}
get_remote_datasets <- function(){
out <- tryCatch({
con <- curl(agg_data_uri)
readLines(con)
},
warning = function(cond){
message(paste("URL caused a warning:", agg_data_uri))
message("Warning message:")
message(cond)
return(NULL)
},
error = function(cond){
message(paste("URL does not exist:", agg_data_uri))
message("Error message:")
message(cond)
return(NA)
},
finally = {
wikidata_edits <<- download_set("site_stats_total_edits.tsv", agg_data_uri)
wikidata_active_users <<- download_set("site_stats_active_users.tsv", agg_data_uri)
wikidata_pages <<- download_set("site_stats_total_pages.tsv", agg_data_uri)
wikidata_gooditems <<- download_set("site_stats_good_articles.tsv", agg_data_uri)
wikidata_daily_getclaims_property_use <<- download_set("getclaims_property_use.tsv", agg_data_uri)
wikidata_facebook <<- download_set("social_facebook.tsv", agg_data_uri)
wikidata_googleplus <<- download_set("social_googleplus.tsv", agg_data_uri)
wikidata_twitter <<- download_set("social_twitter.tsv", agg_data_uri)
wikidata_identica <<- download_set("social_identica.tsv", agg_data_uri)
wikidata_irc <<- download_set("social_irc.tsv", agg_data_uri)
})
return(out)
}
load_rdf_model <-function(){
metrics_model <<- load.rdf(metrics_rdf)
}
get_rdf_objects <- function(){
engagement_obj <<- get_rdf_individuals("<http://wikiba.se/metrics#Engagement>")
content_obj <<- get_rdf_individuals("<http://wikiba.se/metrics#Content>")
community_health_obj <<- get_rdf_individuals("<http://wikiba.se/metrics#Community_Health>")
quality_obj <<- get_rdf_individuals("<http://wikiba.se/metrics#Quality>")
partnerships_obj <<- get_rdf_individuals("<http://wikiba.se/metrics#Partnerships>")
external_use_obj <<- get_rdf_individuals("<http://wikiba.se/metrics#External_Use>")
internal_use_obj <<- get_rdf_individuals("<http://wikiba.se/metrics#Internal_Use>")
daily_obj <<- get_rdf_individuals("<http://wikiba.se/metrics#Daily>")
return(invisible())
}
|
9fde7b66d1c05612dac4e6eab69ff8d779b27a93 | 539bc13246703b33fd27135dbcaebcc2f2a8e432 | /man/post_plan.Rd | e4c5c93924568b78fae2398ab634e16133d64f3c | [
"MIT"
] | permissive | emilyriederer/projmgr | 26f420eaf8e93155e9373c0083b81aaa8b78757c | 92017f8d02c65060d648c08059859f0ff03687b0 | refs/heads/master | 2023-02-07T07:14:22.471549 | 2023-01-21T11:56:32 | 2023-01-21T11:56:32 | 163,583,327 | 116 | 9 | NOASSERTION | 2023-01-21T11:47:52 | 2018-12-30T11:47:51 | R | UTF-8 | R | false | true | 1,327 | rd | post_plan.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plan-todo.R
\name{post_plan}
\alias{post_plan}
\title{Post plan (milestones + issues) to GitHub repository}
\usage{
post_plan(ref, plan, distinct = TRUE)
}
\arguments{
\item{ref}{Repository reference (list) created by \code{create_repo_ref()}}
\item{plan}{Plan list as read with \code{read_plan()}}
\item{distinct}{Logical value to denote whether issues with the same title
as a current open issue should be allowed. Passed to \code{get_issues()}}
}
\value{
Dataframe with numbers (identifiers) of posted milestones and issues and issue title
}
\description{
Post custom plans (i.e. create milestones and issues) based on yaml read in by
\code{read_plan}. Please see the "Building Custom Plans" vignette for details.
}
\examples{
\dontrun{
# This example uses example file included in pkg
# You should be able to run example as-is after creating your own repo reference
file_path <- system.file("extdata", "plan.yml", package = "projmgr", mustWork = TRUE)
my_plan <- read_plan(file_path)
post_plan(ref, my_plan)
}
}
\seealso{
Other plans and todos:
\code{\link{post_todo}()},
\code{\link{read_plan}()},
\code{\link{read_todo}()},
\code{\link{report_plan}()},
\code{\link{report_todo}()},
\code{\link{template_yaml}()}
}
\concept{plans and todos}
|
43adbdbb93eb6ddf101f32dd65a7e2f1df06fa20 | 1665f39a8fb6e36f9169b8778349a6e28874b0cc | /Second_Assisgnment/HW_2_UB50291058/ques_2.R | 9c2029d939c146d8d157279a383ff81d27e53a6e | [] | no_license | kartik1611/Statistical-Data-Mining | 8fb2a19803102672e1467281b41bdfcebfdd94ae | ec2d33f8823a2568e714dd9a6a3008532a260cc5 | refs/heads/master | 2020-04-30T19:53:09.763932 | 2019-04-05T17:57:07 | 2019-04-05T17:57:07 | 177,046,007 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 725 | r | ques_2.R | rm(list = ls(all=T))
setwd("F:\\buffalo\\R\\sdma spring\\r\\1st_assignment_sdma\\Second_Assisgnment")
#a) loading data
data <- read.csv('Ch10Ex11.csv',header =TRUE)
#b) Apply hierarchical clustering to the samples using correlation-based distance
comp_model <- hclust(as.dist(1 - cor(data)), method = "complete")
plot(comp_model)
single_clus_model = hclust(as.dist(1 - cor(data)), method = "single")
plot(single_clus_model)
avg_clus_model = hclust(as.dist(1 - cor(data)),method = "average")
plot(avg_clus_model)
#c) using pca
data_transpose = t(data)
pca_data= prcomp(data_transpose)
head(pca_data$rotation)
load_overall = apply(pca_data$rotation, 1, sum)
row= order(abs(load_overall), decreasing = TRUE)
row[1:10]
|
959327ab5d2063dc3d0e86e1f3ee710abc1b2892 | 3582a4b32de3059cd2adc81a4f3074b63426ae1f | /EjercicioGGplot.R | fbafb5f4b38ac8016d1f1f053cd23d9c8f8bbffb | [] | no_license | annalawrenc/R | 059c9ce463629fa7b264fc4918ddf457db006def | e835c36a098c4c85a15f754ae5296e3f0959aa6d | refs/heads/master | 2020-04-02T12:45:56.563737 | 2016-07-17T20:31:56 | 2016-07-17T20:31:56 | 60,811,464 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 597 | r | EjercicioGGplot.R | library(ggplot2)
# voy a esoger aleatoriamente 100 registros para que la gráfica se parezca a la del ejemplo:
index_filtro <- sample(1:length(diamonds[[1]]), 100)
ciendiamonds <- diamonds[index_filtro,]
# también se puede hacerlo con sample_n y con dplyr
library(dplyr)
ciendiamonds <- sample_n (diamonds, 100, replace = FALSE)
q<- ggplot(ciendiamonds, aes( x=carat, y= price)) # creamos gráfico vacio
q
c <- q + geom_point(aes(color=color)) # pintamos la capa de puntos
c
r <- c+ geom_smooth(method=lm, formula= y~x) # pintamos la capa de la linea de regresion
r
|
17123348fec3182996769b1ecd2d2be3d47fd02e | 7f3f667f127b7355d61af8b90df8ca6365fa9817 | /Main/mainPredict.R | ff42bd8776b0061136ec60d70333f6060432a9ae | [] | no_license | jcombari/Rcode-for-recovery | 8162253884734dcd10396c3d7d58f01350aa41ed | 7655a03150e829485d2bdd84becec63cbac6115b | refs/heads/master | 2022-12-12T15:22:53.109778 | 2020-08-29T16:54:14 | 2020-08-29T16:54:14 | 291,306,594 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,458 | r | mainPredict.R | args = commandArgs(trailingOnly=TRUE)
source("/home/mck/bpop-analytics-cobranzas/configuration/configEnv.R")
configEnvironment()
auxJson <- jsonlite::read_json(paste0(rootPath,"configuration/configJson.json"))
predictPeriodo <<- "20180207"
# Comprobar que se haya recibido al menos, un argumento. En caso contrario, devolver un error
if (length(args)!=1) {
stop("Es necesario el argumento: Periodo a predecir", call.=FALSE)
} else {
predictPeriodo = args[1]
}
# Leer funciones a ejecutar del JSON de configuracion
processFunctionsDaily <- auxJson[["DataPreparationConfig"]][["processFunctionsDaily"]]
processFunctionsMonthly <- auxJson[["DataPreparationConfig"]][["processFunctionsMonthly"]]
expandFunctionsDaily <- auxJson[["DataPreparationConfig"]][["expandFunctionsDaily"]] %>% unlist
expandFunctionsMonthly <- auxJson[["DataPreparationConfig"]][["expandFunctionsMonthly"]] %>% unlist
expandFunctionsDaily <- setdiff(expandFunctionsDaily, "createTargetMaster")
# TBD - Validar existencia de archivos ------------------------------------
dailyPeriodToExpand <- predictPeriodo
dailyPeriodsToProcess <- seqDays(dayAddInteger(min(dailyPeriodToExpand), -120), dailyPeriodToExpand)
monthlyPeriodsToProcess <- unique(str_sub(seqMonth(dayAddInteger(min(dailyPeriodToExpand), -120),
dayAddInteger(max(dailyPeriodToExpand), -30)), 1, 6))
monthlyPeriodsToExpand <- c(unique(monthAddInteger(str_sub(dailyPeriodToExpand, 1, 6), -2)),
unique(monthAddInteger(str_sub(dailyPeriodToExpand, 1, 6), -1)))
# Procesando tablas -------------------------------------------------
# Ejecutar los scripts de procesamiento de datos originales
for(auxFunction in processFunctionsMonthly){
cat("Ejecutando funcion de proceso de datos:", auxFunction, fill = TRUE)
nonGeneratedPeriods <- returnNonExistentFilesS3(fun = auxFunction, periods = monthlyPeriodsToProcess, json = auxJson)
for(auxPeriodo in nonGeneratedPeriods){
tryCatch(do.call(eval(auxFunction), list(period = auxPeriodo)),
error = function(err){cat("Error: ", err[['message']], fill = TRUE)})
}
}
for(auxFunction in processFunctionsDaily){
cat("Ejecutando funcion de proceso de datos:", auxFunction, fill = TRUE)
nonGeneratedPeriods <- returnNonExistentFilesS3(fun = auxFunction, periods = dailyPeriodsToProcess, json = auxJson)
for(auxPeriodo in nonGeneratedPeriods){
cat("Ejecutando funcion de proceso de datos:", auxFunction, fill = TRUE)
tryCatch(do.call(eval(auxFunction), list(period = auxPeriodo)),
error = function(err){cat("Error: ", err[['message']], fill = TRUE)})
}
}
cat("Finalizando procesamiento de datos", fill = TRUE)
# Expand Functions --------------------------------------------------
# Ejecutar los scripts de creacion de variable sinteticas
for(auxFunction in expandFunctionsMonthly){
cat("Ejecutando funcion de creacion de sinteticas:", auxFunction, fill = TRUE)
nonGeneratedPeriods <- returnNonExistentFilesS3(fun = auxFunction, periods = monthlyPeriodsToExpand, json = auxJson)
for(auxPeriodo in nonGeneratedPeriods){
tryCatch(do.call(eval(auxFunction), list(period = auxPeriodo)),
error = function(err){cat("Error: ", err[['message']], fill = TRUE)})
}
}
for(auxFunction in expandFunctionsDaily){
cat("Ejecutando funcion de creacion de sinteticas:", auxFunction, fill = TRUE)
nonGeneratedPeriods <- returnNonExistentFilesS3(fun = auxFunction, periods = dailyPeriodToExpand, json = auxJson)
for(auxPeriodo in nonGeneratedPeriods){
tryCatch(do.call(eval(auxFunction), list(period = auxPeriodo)),
error = function(err){cat("Error: ", err[['message']], fill = TRUE)})
}
}
cat("Finalizando creacion de variables sinteticas", fill = TRUE)
cat("Creando dataset", fill = TRUE)
dataset <- createDataset(periodsToSelect = predictPeriodo)
# Save dataset, in case there was a dataset already in the folder, move old dataset
# to control version folder, with the date of creation of the dataset
datasetFile <- osPathJoin(datasetPreparedPath, "Dataset_predict.csv")
if(file.exists_s3(datasetFile) == TRUE){
auxDataset <- fread_s3(datasetFile)
timeStamp <- file.info_s3(datasetFile)
fwrite_s3(auxDataset, osPathJoin(oldDatasetPreparedPath, paste0("Dataset_predict_", timeStamp, ".csv")))
}
fwrite_s3(dataset, datasetFile)
# Entrenamos Modelo
# Seleccionamos variables que no van a entrar al modelo
# Estas variables o bien son la target, o pueden contener informacion de futuro
varsTarget <- names(dataset)[str_detect(names(dataset), "TARG")]
# Seleccionando columnas para no incluir en la modelizacion
varsToRemove <- unique(c(varsTarget,
"YEAR_MONTH",
"ID_CLIENTE",
"ID_CONTRATO",
"YEAR_MONTH_DAY",
colnames(dataset)[dataset[, lapply(.SD, class)]== "character"]))
# Ejecutamos el modelo con los datos anteriores
predictModel(dataset = dataset,
modelName = "Collections_vPrueba",
varsToRemove = varsToRemove,
productIDColName = "ID_CONTRATO",
clientIDColName = "ID_CLIENTE",
periodColName = "YEAR_MONTH_DAY",
periodo = predictPeriodo)
# TBD - Output validation -------------------------------------------------
# TBD - Model Performance para el nuevo mes -------------------------------
|
fae60b29ec69ddbdd82f0e04129dee1fd5ae0dd9 | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.internet.of.things/man/iotanalytics_describe_dataset.Rd | 80287b53631e62ff535b30d8aa4c26558dab6dfd | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 2,917 | rd | iotanalytics_describe_dataset.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iotanalytics_operations.R
\name{iotanalytics_describe_dataset}
\alias{iotanalytics_describe_dataset}
\title{Retrieves information about a dataset}
\usage{
iotanalytics_describe_dataset(datasetName)
}
\arguments{
\item{datasetName}{[required] The name of the data set whose information is retrieved.}
}
\value{
A list with the following syntax:\preformatted{list(
dataset = list(
name = "string",
arn = "string",
actions = list(
list(
actionName = "string",
queryAction = list(
sqlQuery = "string",
filters = list(
list(
deltaTime = list(
offsetSeconds = 123,
timeExpression = "string"
)
)
)
),
containerAction = list(
image = "string",
executionRoleArn = "string",
resourceConfiguration = list(
computeType = "ACU_1"|"ACU_2",
volumeSizeInGB = 123
),
variables = list(
list(
name = "string",
stringValue = "string",
doubleValue = 123.0,
datasetContentVersionValue = list(
datasetName = "string"
),
outputFileUriValue = list(
fileName = "string"
)
)
)
)
)
),
triggers = list(
list(
schedule = list(
expression = "string"
),
dataset = list(
name = "string"
)
)
),
contentDeliveryRules = list(
list(
entryName = "string",
destination = list(
iotEventsDestinationConfiguration = list(
inputName = "string",
roleArn = "string"
),
s3DestinationConfiguration = list(
bucket = "string",
key = "string",
glueConfiguration = list(
tableName = "string",
databaseName = "string"
),
roleArn = "string"
)
)
)
),
status = "CREATING"|"ACTIVE"|"DELETING",
creationTime = as.POSIXct(
"2015-01-01"
),
lastUpdateTime = as.POSIXct(
"2015-01-01"
),
retentionPeriod = list(
unlimited = TRUE|FALSE,
numberOfDays = 123
),
versioningConfiguration = list(
unlimited = TRUE|FALSE,
maxVersions = 123
),
lateDataRules = list(
list(
ruleName = "string",
ruleConfiguration = list(
deltaTimeSessionWindowConfiguration = list(
timeoutInMinutes = 123
)
)
)
)
)
)
}
}
\description{
Retrieves information about a dataset.
}
\section{Request syntax}{
\preformatted{svc$describe_dataset(
datasetName = "string"
)
}
}
\keyword{internal}
|
f7b497215dba21107a8a0254e8065d516a913777 | 9a05746937795ddc6af174473f37b6cc159bdf57 | /src/features/declare_factors.R | 83aace789ccf9b3f2e2e00baaa96609fbc85467e | [] | no_license | abrahamalex13/carvana-lemon | 1429c9c86d99216d524498e0a76f32aa7c714599 | e2e3315b0c0acdceb8fa2fc676abe14b94d67547 | refs/heads/master | 2023-02-18T11:37:54.801153 | 2021-01-22T16:47:43 | 2021-01-22T16:47:43 | 240,712,560 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,059 | r | declare_factors.R | #declare_factors.R
#ordering also guides modeling treatment.
declare_factors_custom <- function(df) {
df[["WheelType"]] <-
factor(df[["WheelType"]], c("NULL", "Special", "Covers", "Alloy")) %>%
droplevels()
df[["Color_consol"]] <- factor(df[["Color_consol"]]) %>% droplevels()
df[["VehYear_consol"]] <-
factor(df[["VehYear_consol"]], unique(c(1, unique(df[["VehYear_consol"]])))) %>%
droplevels()
df[["Make_consol"]] <-
factor(df[["Make_consol"]], unique( c("HONDA", unique(df[["Make_consol"]])))) %>%
droplevels()
df[["Make_Model_consol"]] <- factor(df[["Make_Model_consol"]]) %>% droplevels()
df[["Make_Model_SubModel_consol"]] <- factor(df[["Make_Model_SubModel_consol"]]) %>% droplevels()
df[["Size_consol"]] <-
factor(df[["Size_consol"]], unique( c("MEDIUM", unique(df[["Size_consol"]]))) ) %>% droplevels()
df[["Auction"]] <-
factor(df[["Auction"]], c("MANHEIM", "OTHER", "ADESA")) %>% droplevels()
df[["AUCGUART"]] <- factor(df[["AUCGUART"]])
df[["PRIMEUNIT"]] <- factor(df[["PRIMEUNIT"]])
df[["VNST_consol"]] <-
factor(df[["VNST_consol"]], unique( c("TX", unique(df[["VNST_consol"]])))) %>%
droplevels()
df[["VNZIP1_consol"]] <- factor(df[["VNZIP1_consol"]])
df[["engine_type_consol"]] <- factor(df[["engine_type_consol"]])
df[["engine_vol_consol"]] <- factor(df[["engine_vol_consol"]])
df[["engine_type_engine_vol_consol"]] <- factor(df[["engine_type_engine_vol_consol"]])
df[["Make_engine_type_engine_vol_consol"]] <- factor(df[["Make_engine_type_engine_vol_consol"]])
df[["BYRNO_consol"]] <- factor(df[["BYRNO_consol"]]) %>% droplevels()
df[["PurchDate_month"]] <- factor(df[["PurchDate_month"]], ordered = FALSE) %>% droplevels()
df[["PurchDate_day"]] <- factor(df[["PurchDate_day"]])
df[["PurchDate_wday"]] <- factor(df[["PurchDate_wday"]], ordered = FALSE) %>% droplevels()
df[["PurchDate_month_PurchDate_wday_consol"]] <- factor(df[["PurchDate_month_PurchDate_wday_consol"]])
return(df)
} |
89b773c6fe33e9a7d09d80a03b89b408c263750d | 26b29791c5827a146d4dee7a397acb0dbd7eca2d | /SNG/size of next gift.R | d0d0b5976e71ed12fa83d3c46b4bfbd0272d054d | [] | no_license | wangy63/Leverage-Subsampling | eb56cb846733f503debdd30e81b0839da3f68206 | db9e51feb47d367903d8533ce8ce665746cd8dfc | refs/heads/master | 2020-03-14T20:56:15.622939 | 2018-09-07T18:24:39 | 2018-09-07T18:24:39 | 122,065,097 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,442 | r | size of next gift.R | library(data.table)
entity<-read.csv("1_Entity.csv")
names(entity)
entity_clean<-entity[, c(1, 3, 6, 7)]
athletics <- read.csv("2_Athletics.csv")
names(athletics)
athletics_clean<-athletics[, c(1, 2)]
entity_clean$athlete<-0
entity_clean$athlete[entity$Entity.ID %in% athletics$Entity.ID] <- 1
givingdata<-read.csv("3_givingdata.csv")
names(givingdata)
giving_clean<-givingdata[, c(2, 3, 5)]
giving_clean$Transaction.year <- substr(giving_clean$Transaction.Date,7,10)
Degree<-read.csv("4_Degree.csv")
names(Degree)
degree_clean<-Degree[, c(1, 2, 3, 4)]
degree_clean<-cbind(degree_clean, d=rep(1, nrow(degree_clean)))%>%
group_by(Entity.ID)%>%
summarise(count.Degree=sum(d, na.rm=T),
year=max(Degree.Year, na.rm=T),
School=School.of.Graduation[1])
donate_year<-merge(degree_clean, giving_clean, by="Entity.ID")
donate_year<-donate_year[, c(1, 2, 3, 4, 5, 7)]
donate_year<-cbind(donate_year, d=rep(1, nrow(donate_year)))%>%
group_by(Entity.ID, Transaction.year)%>%
summarise(Amount=sum(Legal.Amount, na.rm=T),
count.Degree=count.Degree[1],
School=School[1],
year=year[1])
donate_year<-merge(donate_year, entity_clean, by="Entity.ID", all=T)
contact <- read.csv("5_ContactInformation.csv")
names(contact)
contact_clean<-contact[, c(1, 2, 3, 6)]
contact_clean <- contact_clean %>%
filter(Preferred.Indicator == 1 & Entity.ID != "")
donate_year<-merge(donate_year, contact_clean, by="Entity.ID", all=T)
part <- read.csv("7_ParticipationHistory.csv") # participation in activities, including dates
part<-dcast(part, Entity.ID ~ Participation.Category) # turns raw values into column names
part<-part%>%mutate(Part.Level=apply(part[,-1], 1, sum), Greek=FRTTY+SOROR)
# Part.Level includes all not just these four/five
part<-filter(part, Greek<4)%>%select(Entity.ID, SRVCE, ALUEV, CHAPT, REUN, FRTTY, SOROR, Greek, Part.Level)
donate_year<-merge(donate_year, part, by="Entity.ID", all=T)
donate_year[is.na(donate_year)] <- 0
donate_year<-donate_year[which(donate_year$Transaction.year!=0), ]
#us.state <- read.csv("state_table.csv")
#us.state <- us.state %>% select(name, abbreviation, census_region_name, census_division_name)
#colnames(us.state)<- c("State.Full", "State", "Region", "SubRegion")
#us.state<-data.table(us.state[, c(1,2)])
#colnames(us.state)<-c("State", "Acronym")
final<-donate_year
final$Transaction.year<-as.numeric(final$Transaction.year)
final$year<-as.numeric(final$year)
final$GradToGive<-final$Transaction.year-final$year
###################################################
groupfinal <- final %>%
arrange(Entity.ID,Transaction.year)%>%
group_by(Entity.ID) %>%
mutate(rank=row_number())
groupfinal$lastGave<-NA
for (i in 1: nrow(groupfinal)){
if (groupfinal$rank[i]!=1){
print(i)
k<-i-1
groupfinal$lastGave[i]<-groupfinal$Amount[k]
} else {
groupfinal$lastGave[i]<-0
}
}
groupfinal$GaveLastYear<-NA
ptm <- proc.time()
for (i in 1: nrow(groupfinal)){
print(i)
if (groupfinal$rank[i]!=1){
k<-i-1
if (groupfinal$Transaction.year[i]-1==groupfinal$Transaction.year[k]){
groupfinal$GaveLastYear[i]="Yes"
} else {
groupfinal$GaveLastYear[i]="No"
}
} else {
groupfinal$GaveLastYear[i]<- "No"
}
}
proc.time() - ptm
SNGdata<-SNGdata[, c(-1, -24)]
SNGdata$Greek <- ifelse(SNGdata$Greek == 0,0,1)
write.csv(SNGdata, "SNGdata.csv")
|
8715e815e186f1b35924ffe6e5ad5860e4bc18ac | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/Compounding/examples/pgfpoisson.Rd.R | b213d4cc7e2eed2f13c632b9ed3e54810e6b75d4 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 536 | r | pgfpoisson.Rd.R | library(Compounding)
### Name: pgfpoisson
### Title: Function pgfpoisson
### Aliases: pgfpoisson
### ** Examples
params<-5
pgfpoisson(.2,params)
## The function is currently defined as
pgfpoisson <- function(s,params) {
k<-s[abs(s)>1]
if (length(k)>0)
warning("At least one element of the vector s are out of interval [-1,1]")
if (missing(params))
stop("Distribution parameters are not defined")
theta<-params[1]
if (theta<=0)
stop ("Parameter of Poisson distribution must be positive")
exp(theta*(s-1))
}
|
a2362d70c35da4e6e11adf279e57f4844151aa68 | d746fef241f9a0e06ae48cc3b1fe72693c43d808 | /tesseract/rotate/d7t59p-002.r | 9f7245dc5a4b872cf9bf11da3ff529ec45246478 | [
"MIT"
] | permissive | ucd-library/wine-price-extraction | 5abed5054a6e7704dcb401d728c1be2f53e05d78 | c346e48b5cda8377335b66e4a1f57c013aa06f1f | refs/heads/master | 2021-07-06T18:24:48.311848 | 2020-10-07T01:58:32 | 2020-10-07T01:58:32 | 144,317,559 | 5 | 0 | null | 2019-10-11T18:34:32 | 2018-08-10T18:00:02 | JavaScript | UTF-8 | R | false | false | 195 | r | d7t59p-002.r | r=0.06
https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7t59p/media/images/d7t59p-002/svc:tesseract/full/full/0.06/default.jpg Accept:application/hocr+xml
|
f4f01ec984439a9f7df7bfa1741efb8150dc75d9 | 056c5b033f1749218fdc8582c0be1e7876c1ba5f | /man/unnest_recursive.Rd | 2527dccf41102a0b7e3045e0bb31dbd0a137a75d | [] | no_license | kippchicago/teachboostr | efddba0ce4c489fc24ea624a73d1215fe0ffb24c | 0b4bf24ef26ed70e2e6d95e9b7e2ce98ae7cf9f6 | refs/heads/master | 2021-01-21T10:33:33.349146 | 2017-03-21T20:16:14 | 2017-03-21T20:16:14 | 83,447,315 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 533 | rd | unnest_recursive.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/unnest_cols.R
\name{unnest_recursive}
\alias{unnest_recursive}
\title{Unnests a a data frame of list-columns}
\usage{
unnest_recursive(x, id_col = "id")
}
\arguments{
\item{x}{data.frame with one \code{N} columns, where one colum is the \code{id_col} and the
\code{N-1} are all list-columns}
\item{id_col}{id column which will be used to join unnested lists back together}
}
\value{
a data frame
}
\description{
Unnests a a data frame of list-columns
}
|
fcd1d1ee81ff37be903c0c382e5e31a1c45dd4f2 | 7e5e5139f817c4f4729c019b9270eb95978feb39 | /Introduction to Tidyverse/Chapter 2-Data visualization/8.R | 4d989b98835a01b7efa6637c5000fea276b395e6 | [] | no_license | Pranav-Polavarapu/Datacamp-Data-Scientist-with-R-Track- | a45594a8a9078076fe90076f675ec509ae694761 | a50740cb3545c3d03f19fc79930cb895b33af7c4 | refs/heads/main | 2023-05-08T19:45:46.830676 | 2021-05-31T03:30:08 | 2021-05-31T03:30:08 | 366,929,815 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 697 | r | 8.R | # Creating a subgraph for each continent
# You've learned to use faceting to divide a graph into subplots based on one of its variables, such as the continent.
#
# Instructions
# 100 XP
# Create a scatter plot of gapminder_1952 with the x-axis representing population (pop), the y-axis representing life expectancy (lifeExp), and faceted to have one subplot per continent (continent). Put the x-axis on a log scale.
library(gapminder)
library(dplyr)
library(ggplot2)
gapminder_1952 <- gapminder %>%
filter(year == 1952)
# Scatter plot comparing pop and lifeExp, faceted by continent
ggplot(gapminder_1952, aes(x = pop, y = lifeExp)) + geom_point() + scale_x_log10() + facet_wrap(~ continent) |
558ffec3a23cb96f72bcfd893a13608620b85617 | fa6349ff93efd0670d7db0a441810670661738e8 | /FizzBuzz/R/fizzbuzz.R | 73c4d6255397263e9f3a3d04867832cc5d93e1b2 | [] | no_license | petervegh/katas | 5a1dec4c26d3084d9c474e1da2640b501ba61b56 | c8e624c5f26a37c8f31638b76b2f4790b9262c2f | refs/heads/master | 2020-12-11T21:21:37.193373 | 2017-05-11T20:16:35 | 2017-05-11T20:16:35 | 50,564,684 | 1 | 0 | null | 2017-05-11T20:22:03 | 2016-01-28T07:26:28 | Clojure | UTF-8 | R | false | false | 338 | r | fizzbuzz.R | fizzBuzz <- function(number) {
if(number %% 5 == 0 && number %% 3 == 0)
return('fizzBuzz')
if(number %% 3 == 0)
return('fizz')
if(number %% 5 == 0)
return('buzz')
number
}
applyFizzBuzz <- function(array) {
res = lapply(array, fizzBuzz)
for(stuff in res) {
cat (stuff)
cat (' ')
}
res
}
applyFizzBuzz(x<-1:100)
|
17bab9fc5e47d0ccff26a96d36f4d0fdf4c097eb | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/turner/examples/meanlist.Rd.R | 46f3e4f90acea0259371e9cdebc0fe8daf76456b | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 398 | r | meanlist.Rd.R | library(turner)
### Name: meanlist
### Title: Mean of all elements in a list
### Aliases: meanlist
### ** Examples
# say you have some list
list1 = list(1:5, runif(3), rnorm(4))
# get the mean of all elements in list1
meanlist(list1)
# say you have missing data
list2 = list(c(1:4, NA), runif(3), rnorm(4))
# get the mean of all elements in list2 removing NAs
meanlist(list2, na.rm=TRUE)
|
85578c215199b600d6b368c8d7012d805c39e83f | c88ca9dc733d68a694e64c6fccc15c138ee3b2db | /vis/vis_moletable.R | 60cc7d812ba9912da9c16e7f8deaf897a887e05e | [
"MIT"
] | permissive | med-material/Whack_A_Mole_RShiny | 4f4eb23278ae2899f7af53cc069b4d260a38776e | ef0c90197fc1b4037cd30d42a763c16c22879d4f | refs/heads/master | 2023-01-28T17:24:50.251104 | 2023-01-25T08:21:58 | 2023-01-25T08:21:58 | 213,659,401 | 1 | 2 | null | 2020-12-08T09:38:26 | 2019-10-08T14:05:43 | null | UTF-8 | R | false | false | 641 | r | vis_moletable.R | library(dplyr)
vis_moleTable <- function(df) {
moleEvents = c("Mole Spawned", "Fake Mole Spawned", "Pointer Shoot", "Mole Hit", "Mole Missed","Fake Mole Hit")
table = data.frame(moleEvents, rep(NA,length(moleEvents)))
names(table) = c("Event", "Count")
stats = df %>%
group_by(Event) %>%
dplyr::summarise(Count = n()) %>%
filter(Event %in% moleEvents) %>%
mutate(Event = factor(Event, levels = moleEvents),
) %>%
arrange(Event)
table <- table %>%
left_join(stats, by = "Event") %>%
mutate(Count = as.integer(Count.y),
Count.x = NULL,
Count.y = NULL)
return(table)
}
|
1b56771431df40e8abadb42585ac71b3bf88006e | 1a9ef448017a28bfffdfb78887022b46a6169507 | /R/text-to-sentences.R | 1c49e96871a4ba7982ade113ce85941b36d24775 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | rtelmore/RDSTK | 4ae28abbb12c937141c5a834dc46a010799d0f15 | cdddc4d3647281155067bb434f54f12c27fdd3aa | refs/heads/master | 2021-01-21T11:45:00.044495 | 2017-11-20T20:47:30 | 2017-11-20T20:47:30 | 1,686,614 | 19 | 12 | null | 2017-03-16T18:52:10 | 2011-05-01T02:08:32 | R | UTF-8 | R | false | false | 1,363 | r | text-to-sentences.R | #' @title Identifies sentences in a text string.
#'
#' @description
#' This function returns the legitimate sentences (if they exist) from a text
#' string.
#'
#' @param text A string (hopefully) containing sentences.
#' @param session The CURLHandle object giving the structure for the options
#' and that will process the command. For curlMultiPerform, this is an object
#' of class code MultiCURLHandle-class.
#'
#' @return
#' A list containing
#' \item{sentences}{A string identifying the sentences in the text.}
#'
#' @seealso
#' \code{\link{curlPerform}},
#' \code{\link{getCurlHandle}},
#' \code{\link{dynCurlReader}}
#'
#' @references
#' http://www.datasciencetoolkit.org/developerdocs#text2sentences
#'
#' @examples
#' \dontrun{
#' sentences <- "But this does, it contains enough words. So does this
#' one, it appears correct. This is long and complete enough
#' too."
#' text2sentences(sentences)
#' }
#'
#' @export
text2sentences <- function(text, session = RCurl::getCurlHandle()) {
api <- paste(getOption("RDSTK_api_base"), "/text2sentences/", sep="")
r = RCurl::dynCurlReader()
RCurl::curlPerform(postfields = text, url = api, post = 1L,
writefunction = r$update,
curl = session)
result <- rjson::fromJSON(r$value())
return(result)
}
|
7263d5ed9bd8047be0080d756c698ce897fc0db3 | 18f4631b4b312215877e5a65fff966089b993c8d | /LIB_RHESSys_outputFormat.r | a178cabc5adddfe0260a1bfbd32e65538d5798ee | [] | no_license | kkyong77/R-coded-scripts-for-RHESSys-calibration | 42c93f00d8e6dcb7027141f3dc0527897e84be3b | 7fc7c222c0afe36cde7a88abde8357d7bc1d68c9 | refs/heads/master | 2020-03-20T10:07:16.627513 | 2018-06-12T13:57:57 | 2018-06-12T13:57:57 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 80,948 | r | LIB_RHESSys_outputFormat.r |
source("~/Dropbox/LIB_Rscript/LIB_misc.r")
source("~/Dropbox/LIB_Rscript/LIB_dailytimeseries2.r")
arg=commandArgs(T)
##----------------------------------------
# option A single basin file
# 1) convert a series of rhessys basin output at the same watershed into WSC format
# 2) convert flow mm/day to volumn (maybe scale up) )
# 3) bias correction
# 4) combine different single basin files into one
# 2) convert a series of rhessys basin output at the same watershed (need to combine multiple sub-basin) into WSC format
## very importance assumption of subbasin.csv
# it must have column names: id, area, grid
# each row is a subbasin
# the format used in the past is very confusing for the propose of this
# suggest the past format should be used for developing GRASS extract and by running GRASS extract it should also yield "subbasin.csv" in the curremt format
##-----------------------------------------
combineSubbasin2Basin=function(prefix, suffix, subbasin, label='', st=NA, ed=NA){
#prefix = 'output/rhessys'
#suffix = '_param1'
tryCatch({
# read the first file
i=1
rhessysFile = read.table(paste(prefix,'_sub',subbasin[i,'id'],suffix,'_basin.daily',sep=''),skip=1,header=F )
rhessys.date = as.Date(paste(rhessysFile[,1], rhessysFile[,2], rhessysFile[,3],sep="-"),format="%d-%m-%Y")
rhessysCol = ncol(rhessysFile)
subArea = as.numeric(subbasin[,'area']); totalArea = 1/sum(subArea)
subGrid = as.numeric(subbasin[,'grid']); totalGrid = 1/sum(subGrid)
if( is.na(st) | is.na(ed)){
period = rhessys.date
period.dailytimeSeriesMatch = rep(T,length(period))
rhessys.dailytimeSeriesMatch = rep(T,length(period))
}else{
period=seq.Date(from=as.Date(st), to=as.Date(ed) ,by="day")
tmp = match3DailyTimeSeries(rhessys.date, period) ### assume period is the most narrow band
rhessys.dailytimeSeriesMatch = tmp$xSelect
period.dailytimeSeriesMatch = tmp$ySelect
}
holding = array(NA, dim=c(
sum(rhessys.dailytimeSeriesMatch),
ifelse(rhessysCol>=43,25,22),
nrow(subbasin)
))# subbasin#, time, vars
for(i in 1:nrow(subbasin)){
if(i>1){ rhessysFile = read.table(paste(prefix,'_sub',subbasin[i,'id'],suffix,'_basin.daily',sep=''),skip=1,header=F ) }
# 1) 7 = sat def z
# 2) 8 = sat def
# 3) 9 = rz storage
# 4) 10 = unsat storage
# 5) 13 = cap
# 6) 14 = evap
# 7) 16 = trans
# 8) 17 = baseflow
# 9) 18 = return
# 10) 19 = streamflow
# 11) 20 = psn
# 12) 21 = lai
# 13) 22 = gw out
# 14) 23 = gw storage
# 15) 24 = detention storage
# 16) 25 = % sat area
# 17) 26 = litter store
# 18) 27 = canopy storage
# 19) 33 = pet
# 20) 35 = precip
# 21) 38 = tmax (37)
# 22) 39 = tmin (38)
# 23) 40 = tavg (NA)
# 24) 41 = vpd (NA)
# 25) 43 = recharge (NA)
if(rhessysCol>=43){
# 5.20
holding[,,i]=as.matrix(rhessysFile[rhessys.dailytimeSeriesMatch,c(7,8,9,10,13,14,16,17,18,19,20,21,22,23,24,25,26,27,33,35,38,39,40,41,43)])
}else{
# 5.18
holding[,,i]=as.matrix(rhessysFile[rhessys.dailytimeSeriesMatch,c(7,8,9,10,13,14,16,17,18,19,20,21,22,23,24,25,26,27,33,35,37,38)])
}
system(paste("rm ",prefix,'_sub',subbasin[i,'id'],suffix,"*params",sep=''))
system(paste("rm ",prefix,'_sub',subbasin[i,'id'],suffix,"*monthly",sep=''))
system(paste("rm ",prefix,'_sub',subbasin[i,'id'],suffix,"*yearly",sep=''))
system(paste("rm ",prefix,'_sub',subbasin[i,'id'],suffix,"*hourly",sep=''))
}#i
if(rhessysCol>=43){
basin = cbind(
as.numeric(format(period[period.dailytimeSeriesMatch],"%d")),#1
as.numeric(format(period[period.dailytimeSeriesMatch],"%m")),#2
as.numeric(format(period[period.dailytimeSeriesMatch],"%Y")),#3
rep(0,length(period[period.dailytimeSeriesMatch])),#4
rep(0,length(period[period.dailytimeSeriesMatch])),#5
rep(0,length(period[period.dailytimeSeriesMatch])),#6
(holding[,1,]%*% subArea)*totalArea,#7 satz
(holding[,2,]%*% subArea)*totalArea,#8 satdef
(holding[,3,]%*% subArea)*totalArea,#9 rz
(holding[,4,]%*% subArea)*totalArea,#10 unsat
rep(0,length(period[period.dailytimeSeriesMatch])),#11
rep(0,length(period[period.dailytimeSeriesMatch])),#12
(holding[,5,]%*% subArea)*totalArea,#13 cap
(holding[,6,]%*% subArea)*totalArea,#14 evap
rep(0,length(period[period.dailytimeSeriesMatch])),#15
(holding[,7,]%*% subArea)*totalArea,#16 trans
(holding[,8,]%*% subArea)*totalArea,#17 baseflow
(holding[,9,]%*% subArea)*totalArea,#18 returnflow
(holding[,10,]%*% subArea)*totalArea,#19 flow
(holding[,11,]%*% subArea)*totalArea,#20 psn
(holding[,12,]%*% subArea)*totalArea,#21 LAI
(holding[,13,]%*% subArea)*totalArea,#22 gwq
(holding[,14,]%*% subArea)*totalArea,#23 gw store
(holding[,15,]%*% subArea)*totalArea,#24 detention store
(holding[,16,]%*% subArea)*totalArea,#25 sat area
(holding[,17,]%*% subArea)*totalArea,#26 litter store
(holding[,18,]%*% subArea)*totalArea,#27 canopy store
rep(0,length(period[period.dailytimeSeriesMatch])),#28
rep(0,length(period[period.dailytimeSeriesMatch])),#29
rep(0,length(period[period.dailytimeSeriesMatch])),#30
rep(0,length(period[period.dailytimeSeriesMatch])),#31
rep(0,length(period[period.dailytimeSeriesMatch])),#32
(holding[,19,]%*% subArea)*totalArea,#33 pet
rep(0,length(period[period.dailytimeSeriesMatch])),#34
(holding[,20,]%*% subArea)*totalArea,#35 rain
rep(0,length(period[period.dailytimeSeriesMatch])),#36
rep(0,length(period[period.dailytimeSeriesMatch])),#37
(holding[,21,]%*% subArea)*totalArea,#38 tmax
(holding[,22,]%*% subArea)*totalArea,#39 tmin
(holding[,23,]%*% subArea)*totalArea,#40 tavg
(holding[,24,]%*% subArea)*totalArea,#41 vpd
rep(0,length(period[period.dailytimeSeriesMatch])),#42
(holding[,25,]%*% subArea)*totalArea #43 recharge
)
colnames(basin)=c(
"day",#1
"month",#2
"year",#3
'',#4
'',#5
'',#6
'satz',#7
'satdef',#8
'rz',#9
'unsat',#10
'',#11
'',#12
'cap',#13
'evap',#14
'',#15
'trans',#16
'baseflow',#17
'returnflow',#18
'streamflow',#19
'psn',#20
'lai',#21
'gwq',#22
'gwstore',#23
'detentionstore',#24
'satarea',#25
'litterstore',#26
'canopystore',#27
'',#28
'',#29
'',#30
'',#31
'',#32
'pet',#33
'',#34
'precip',#35
'',#36
'',#37
'tmax',#38
'tmin',#39
'tavg',#40
'vpd',#41
'',#42
'rechargre'#43
)
}else{
basin = cbind(
as.numeric(format(period,"%d")),#1
as.numeric(format(period,"%m")),#2
as.numeric(format(period,"%Y")),#3
rep(0,length(period[period.dailytimeSeriesMatch])),#4
rep(0,length(period[period.dailytimeSeriesMatch])),#5
rep(0,length(period[period.dailytimeSeriesMatch])),#6
(holding[,1,]%*% subArea)*totalArea,#7 satz
(holding[,2,]%*% subArea)*totalArea,#8 satdef
(holding[,3,]%*% subArea)*totalArea,#9 rz
(holding[,4,]%*% subArea)*totalArea,#10 unsat
rep(0,length(period[period.dailytimeSeriesMatch])),#11
rep(0,length(period[period.dailytimeSeriesMatch])),#12
(holding[,5,]%*% subArea)*totalArea,#13 cap
(holding[,6,]%*% subArea)*totalArea,#14 evap
rep(0,length(period[period.dailytimeSeriesMatch])),#15
(holding[,7,]%*% subArea)*totalArea,#16 trans
(holding[,8,]%*% subArea)*totalArea,#17 baseflow
(holding[,9,]%*% subArea)*totalArea,#18 returnflow
(holding[,10,]%*% subArea)*totalArea,#19 flow
(holding[,11,]%*% subArea)*totalArea,#20 psn
(holding[,12,]%*% subArea)*totalArea,#21 LAI
(holding[,13,]%*% subArea)*totalArea,#22 gwq
(holding[,14,]%*% subArea)*totalArea,#23 gw store
(holding[,15,]%*% subArea)*totalArea,#24 detention store
(holding[,16,]%*% subArea)*totalArea,#25 sat area
(holding[,17,]%*% subArea)*totalArea,#26 litter store
(holding[,18,]%*% subArea)*totalArea,#27 canopy store
rep(0,length(period[period.dailytimeSeriesMatch])),#28
rep(0,length(period[period.dailytimeSeriesMatch])),#29
rep(0,length(period[period.dailytimeSeriesMatch])),#30
rep(0,length(period[period.dailytimeSeriesMatch])),#31
rep(0,length(period[period.dailytimeSeriesMatch])),#32
(holding[,19,]%*% subArea)*totalArea,#33 pet
rep(0,length(period[period.dailytimeSeriesMatch])),#34
(holding[,20,]%*% subArea)*totalArea#35 rain
)
colnames(basin)=c(
"day",#1
"month",#2
"year",#3
'',#4
'',#5
'',#6
'satz',#7
'satdef',#8
'rz',#9
'unsat',#10
'',#11
'',#12
'cap',#13
'evap',#14
'',#15
'trans',#16
'baseflow',#17
'returnflow',#18
'streamflow',#19
'psn',#20
'lai',#21
'gwq',#22
'gwstore',#23
'detentionstore',#24
'satarea',#25
'litterstore',#26
'canopystore',#27
'',#28
'',#29
'',#30
'',#31
'',#32
'pet',#33
'',#34
'precip'#35
)
}
write.table(basin,paste(prefix,suffix, label,"_basin.daily",sep=""),row.names=F,col.names=T)
}, error = function(e){
print(paste(subbasin[i,'id']," is not here.",e,sep=""))
})#try blocks
}#function
SingleBasinSeries2WSC=function( prefix, Jindex, outputPrefix, sitename=NA, period=NA){
# first step (everything is in terms of mm/day )
# 1) convert a series of rhessys basin output at the same watershed into WSC format
outputname = paste(prefix,Jindex[1],'_basin.daily',sep='' )
tmp = read.table(outputname,header=F,skip=1)
if(is.na(period)){
print('use RHESSys output period')
period = as.Date(paste(tmp[,1], tmp[,2], tmp[,3],sep="-"),format="%d-%m-%Y")
rhessys.dailytimeSeriesMatch = rep(T,length(period))
period.dailytimeSeriesMatch = rep(T,length(period))
print(range(period))
}else{
rhessys.date = as.Date(paste(tmp[,1], tmp[,2], tmp[,3],sep="-"),format="%d-%m-%Y")
tmp = match2DailyTimeSeries(rhessys.date, period) ### assume period is the most narrow band
rhessys.dailytimeSeriesMatch = tmp$xSelect
period.dailytimeSeriesMatch = tmp$ySelect
}
tmp = unlist(strsplit(prefix,split='/'))
if(length(tmp)>1){location = paste(tmp[1:(length(tmp)-1)],collapse='/')}else{location='.'}
if(is.na(sitename)){sitename = tmp[length(tmp)]}
jMax = length(Jindex)
holding = matrix(NA,sum(period.dailytimeSeriesMatch), jMax)
holding_et = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_pet = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_precip = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_tmax = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_tmin = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_tavg = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_vpd = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_recharge = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
NewVersion=F
for(j in 1:jMax){
outputname = paste(prefix,Jindex[j],'_basin.daily',sep='' )
tmp = read.table(outputname,header=F,skip=1)
if(ncol(tmp)>=43){
# 5.20
holding[,j]=tmp[rhessys.dailytimeSeriesMatch,19]
holding_et[,j]=tmp[rhessys.dailytimeSeriesMatch,14]+tmp[rhessys.dailytimeSeriesMatch,16]
holding_pet[,j]=tmp[rhessys.dailytimeSeriesMatch,33]
holding_precip[,j] = tmp[rhessys.dailytimeSeriesMatch,35]# rain mm
tmax_ = tmp[rhessys.dailytimeSeriesMatch,38]# tmax C
tmin_ = tmp[rhessys.dailytimeSeriesMatch,39]# tmin C
holding_tmax[,j] = colMaxs( rbind(tmax_,tmin_))
holding_tmin[,j] = colMins( rbind(tmax_,tmin_))
holding_tavg[,j] = tmp[rhessys.dailytimeSeriesMatch,40]# tavg
holding_vpd[,j] = tmp[rhessys.dailytimeSeriesMatch,41]# vpd
holding_recharge[,j] = tmp[rhessys.dailytimeSeriesMatch,43]# recharge
NewVersion=T
}else{
# 5.18
holding[,j]=tmp[rhessys.dailytimeSeriesMatch,19]
holding_et[,j]=tmp[rhessys.dailytimeSeriesMatch,14]+tmp[rhessys.dailytimeSeriesMatch,16]
holding_pet[,j]=tmp[rhessys.dailytimeSeriesMatch,33]
holding_precip[,j] = tmp[rhessys.dailytimeSeriesMatch,35]# rain mm
tmax_ = tmp[rhessys.dailytimeSeriesMatch,37]# tmax C
tmin_ = tmp[rhessys.dailytimeSeriesMatch,38]# tmin C
holding_tmax[,j] = colMaxs( rbind(tmax_,tmin_))
holding_tmin[,j] = colMins( rbind(tmax_,tmin_))
}
}#j
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding
)
colnames(result) = c('year','month','day', paste('streamflowmm',1:jMax,sep='_') )
write.csv(result,paste(location,'/',outputPrefix, sitename,'_streamflowmm.csv',sep=''),row.names=F)
result = cbind(
format(period,format='%Y'),
format(period,format='%m'),
format(period,format='%d'),
holding_et
)
colnames(result) = c('year','month','day', paste('et',1:jMax,sep='_') )
write.csv(result,paste(location,'/',outputPrefix, sitename,'_et.csv',sep=''),row.names=F)
result = cbind(
format(period,format='%Y'),
format(period,format='%m'),
format(period,format='%d'),
holding_pet
)
colnames(result) = c('year','month','day', paste('pet',1:jMax,sep='_') )
write.csv(result,paste(location,'/',outputPrefix, sitename,'_pet.csv',sep=''),row.names=F)
result = cbind(
format(period,format='%Y'),
format(period,format='%m'),
format(period,format='%d'),
holding_precip
)
colnames(result) = c('year','month','day', paste('precip',1:jMax,sep='_') )
write.csv(result,paste(location,'/',outputPrefix, sitename,'_precip.csv',sep=''),row.names=F)
result = cbind(
format(period,format='%Y'),
format(period,format='%m'),
format(period,format='%d'),
holding_tmax
)
colnames(result) = c('year','month','day', paste('tmax',1:jMax,sep='_') )
write.csv(result,paste(location,'/',outputPrefix, sitename,'_tmax.csv',sep=''),row.names=F)
result = cbind(
format(period,format='%Y'),
format(period,format='%m'),
format(period,format='%d'),
holding_tmin
)
colnames(result) = c('year','month','day', paste('tmin',1:jMax,sep='_') )
write.csv(result,paste(location,'/',outputPrefix, sitename,'_tmin.csv',sep=''),row.names=F)
if(NewVersion){
result = cbind(
format(period,format='%Y'),
format(period,format='%m'),
format(period,format='%d'),
holding_tavg
)
colnames(result) = c('year','month','day', paste('tavg',1:jMax,sep='_') )
write.csv(result,paste(location,'/',outputPrefix, sitename,'_tavg.csv',sep=''),row.names=F)
result = cbind(
format(period,format='%Y'),
format(period,format='%m'),
format(period,format='%d'),
holding_vpd
)
colnames(result) = c('year','month','day', paste('vpd',1:jMax,sep='_') )
write.csv(result,paste(location,'/',outputPrefix, sitename,'_vpd.csv',sep=''),row.names=F)
result = cbind(
format(period,format='%Y'),
format(period,format='%m'),
format(period,format='%d'),
holding_recharge
)
colnames(result) = c('year','month','day', paste('recharge',1:jMax,sep='_') )
write.csv(result,paste(location,'/',outputPrefix, sitename,'_recharge.csv',sep=''),row.names=F)
}
}#function
WSC_combineUSGS2Lake=function( prefix, dailytimeSeriesMatch, replication, arealweight, outPrefix, scaler=1, NewVersion=F){
# assume usgs files
num = length(prefix)
time = matrix(NA,sum(unlist(dailytimeSeriesMatch[1])),3)
hold_flow = array(NA,dim=c(sum(unlist(dailytimeSeriesMatch[1])),num, replication) )
hold_pet= array(NA,dim=c(sum(unlist(dailytimeSeriesMatch[1])),num, replication) )
hold_et= array(NA,dim=c(sum(unlist(dailytimeSeriesMatch[1])),num, replication) )
hold_rain= array(NA,dim=c(sum(unlist(dailytimeSeriesMatch[1])),num, replication) )
hold_tmax= array(NA,dim=c(sum(unlist(dailytimeSeriesMatch[1])),num, replication) )
hold_tmin= array(NA,dim=c(sum(unlist(dailytimeSeriesMatch[1])),num, replication) )
hold_satz= array(NA,dim=c(sum(unlist(dailytimeSeriesMatch[1])),num, replication) )
hold_unsat= array(NA,dim=c(sum(unlist(dailytimeSeriesMatch[1])),num, replication) )
hold_evap= array(NA,dim=c(sum(unlist(dailytimeSeriesMatch[1])),num, replication) )
hold_snow= array(NA,dim=c(sum(unlist(dailytimeSeriesMatch[1])),num, replication) )
hold_return= array(NA,dim=c(sum(unlist(dailytimeSeriesMatch[1])),num, replication) )
hold_psn= array(NA,dim=c(sum(unlist(dailytimeSeriesMatch[1])),num, replication) )
hold_lai= array(NA,dim=c(sum(unlist(dailytimeSeriesMatch[1])),num, replication) )
hold_tavg= array(NA,dim=c(sum(unlist(dailytimeSeriesMatch[1])),num, replication) )
hold_vpd= array(NA,dim=c(sum(unlist(dailytimeSeriesMatch[1])),num, replication) )
hold_recharge= array(NA,dim=c(sum(unlist(dailytimeSeriesMatch[1])),num, replication) )
## missing tavg, VPD, recharge
for(i in 1:num){
tmp = as.matrix(read.csv( paste(prefix[i],'_streamflowmm.csv',sep='')))
if(i==1){time = tmp[unlist(dailytimeSeriesMatch[i]),1:3] }
hold_flow[,i,] = tmp[unlist(dailytimeSeriesMatch[i]),4:(replication+3)]
tmp = as.matrix(read.csv( paste(prefix[i],'_pet.csv',sep='')))
hold_pet[,i,] = tmp[unlist(dailytimeSeriesMatch[i]),4:(replication+3)]
tmp = as.matrix(read.csv( paste(prefix[i],'_et.csv',sep='')))
hold_et[,i,] = tmp[unlist(dailytimeSeriesMatch[i]),4:(replication+3)]
tmp = as.matrix(read.csv( paste(prefix[i],'_precip.csv',sep='')))
hold_rain[,i,] = tmp[unlist(dailytimeSeriesMatch[i]),4:(replication+3)]
tmp = as.matrix(read.csv( paste(prefix[i],'_tmax.csv',sep='')))
hold_tmax[,i,] = tmp[unlist(dailytimeSeriesMatch[i]),4:(replication+3)]
tmp = as.matrix(read.csv( paste(prefix[i],'_tmin.csv',sep='')))
hold_tmin[,i,] = tmp[unlist(dailytimeSeriesMatch[i]),4:(replication+3)]
tmp = as.matrix(read.csv( paste(prefix[i],'_satz.csv',sep='')))
hold_satz[,i,] = tmp[unlist(dailytimeSeriesMatch[i]),4:(replication+3)]
tmp = as.matrix(read.csv( paste(prefix[i],'_unsat.csv',sep='')))
hold_unsat[,i,] = tmp[unlist(dailytimeSeriesMatch[i]),4:(replication+3)]
tmp = as.matrix(read.csv( paste(prefix[i],'_evap.csv',sep='')))
hold_evap[,i,] = tmp[unlist(dailytimeSeriesMatch[i]),4:(replication+3)]
tmp = as.matrix(read.csv( paste(prefix[i],'_snow.csv',sep='')))
hold_snow[,i,] = tmp[unlist(dailytimeSeriesMatch[i]),4:(replication+3)]
tmp = as.matrix(read.csv( paste(prefix[i],'_return.csv',sep='')))
hold_return[,i,] = tmp[unlist(dailytimeSeriesMatch[i]),4:(replication+3)]
tmp = as.matrix(read.csv( paste(prefix[i],'_psn.csv',sep='')))
hold_psn[,i,] = tmp[unlist(dailytimeSeriesMatch[i]),4:(replication+3)]
tmp = as.matrix(read.csv( paste(prefix[i],'_lai.csv',sep='')))
hold_lai[,i,] = tmp[unlist(dailytimeSeriesMatch[i]),4:(replication+3)]
if(NewVersion){
tmp = as.matrix(read.csv( paste(prefix[i],'_tavg.csv',sep='')))
hold_tavg[,i,] = tmp[unlist(dailytimeSeriesMatch[i]),4:(replication+3)]
tmp = as.matrix(read.csv( paste(prefix[i],'_vpd.csv',sep='')))
hold_vpd[,i,] = tmp[unlist(dailytimeSeriesMatch[i]),4:(replication+3)]
tmp = as.matrix(read.csv( paste(prefix[i],'_recharge.csv',sep='')))
hold_recharge[,i,] = tmp[unlist(dailytimeSeriesMatch[i]),4:(replication+3)]
}
}#i
comb_flow = matrix(NA,sum(unlist(dailytimeSeriesMatch[1])), replication)
comb_pet = matrix(NA,sum(unlist(dailytimeSeriesMatch[1])), replication)
comb_et = matrix(NA,sum(unlist(dailytimeSeriesMatch[1])), replication)
comb_rain = matrix(NA,sum(unlist(dailytimeSeriesMatch[1])), replication)
comb_tmax = matrix(NA,sum(unlist(dailytimeSeriesMatch[1])), replication)
comb_tmin = matrix(NA,sum(unlist(dailytimeSeriesMatch[1])), replication)
comb_satz = matrix(NA,sum(unlist(dailytimeSeriesMatch[1])), replication)
comb_unsat = matrix(NA,sum(unlist(dailytimeSeriesMatch[1])), replication)
comb_evap = matrix(NA,sum(unlist(dailytimeSeriesMatch[1])), replication)
comb_snow = matrix(NA,sum(unlist(dailytimeSeriesMatch[1])), replication)
comb_return = matrix(NA,sum(unlist(dailytimeSeriesMatch[1])), replication)
comb_psn = matrix(NA,sum(unlist(dailytimeSeriesMatch[1])), replication)
comb_lai = matrix(NA,sum(unlist(dailytimeSeriesMatch[1])), replication)
comb_tavg = matrix(NA,sum(unlist(dailytimeSeriesMatch[1])), replication)
comb_vpd = matrix(NA,sum(unlist(dailytimeSeriesMatch[1])), replication)
comb_recharge = matrix(NA,sum(unlist(dailytimeSeriesMatch[1])), replication)
for(j in 1: replication){
comb_flow[,j] = hold_flow[,,j]%*% arealweight/sum(arealweight)*scaler*0.001
comb_pet[,j] = hold_pet[,,j]%*% arealweight/sum(arealweight)
comb_et[,j] = hold_et[,,j]%*% arealweight/sum(arealweight)
comb_rain[,j] = hold_rain[,,j]%*% arealweight/sum(arealweight)
comb_tmax[,j] = hold_tmax[,,j]%*% arealweight/sum(arealweight)
comb_tmin[,j] = hold_tmin[,,j]%*% arealweight/sum(arealweight)
comb_satz[,j] = hold_satz[,,j]%*% arealweight/sum(arealweight)
comb_unsat[,j] = hold_unsat[,,j]%*% arealweight/sum(arealweight)
comb_evap[,j] = hold_evap[,,j]%*% arealweight/sum(arealweight)
comb_snow[,j] = hold_snow[,,j]%*% arealweight/sum(arealweight)
comb_return[,j] = hold_return[,,j]%*% arealweight/sum(arealweight)
comb_psn[,j] = hold_psn[,,j]%*% arealweight/sum(arealweight)
comb_lai[,j] = hold_lai[,,j]%*% arealweight/sum(arealweight)
if(NewVersion){
comb_tavg[,j] = hold_tavg[,,j]%*% arealweight/sum(arealweight)
comb_vpd[,j] = hold_vpd[,,j]%*% arealweight/sum(arealweight)
comb_recharge[,j] = hold_recharge[,,j]%*% arealweight/sum(arealweight)
}
}#j
result = cbind(time,comb_flow)
colnames(result) = c('year','month','day', paste('flowcmd',1:replication,sep='_') )
write.csv(result,paste(outPrefix,'_flowcmd.csv',sep=''),row.names=F)
result = cbind(time,comb_pet)
colnames(result) = c('year','month','day', paste('pet',1:replication,sep='_') )
write.csv(result,paste(outPrefix,'_pet.csv',sep=''),row.names=F)
result = cbind(time,comb_et)
colnames(result) = c('year','month','day', paste('et',1:replication,sep='_') )
write.csv(result,paste(outPrefix,'_et.csv',sep=''),row.names=F)
result = cbind(time,comb_rain)
colnames(result) = c('year','month','day', paste('precip',1:replication,sep='_') )
write.csv(result,paste(outPrefix,'_precip.csv',sep=''),row.names=F)
result = cbind(time,comb_tmax)
colnames(result) = c('year','month','day', paste('tmax',1:replication,sep='_') )
write.csv(result,paste(outPrefix,'_tmax.csv',sep=''),row.names=F)
result = cbind(time,comb_tmin)
colnames(result) = c('year','month','day', paste('tmin',1:replication,sep='_') )
write.csv(result,paste(outPrefix,'_tmin.csv',sep=''),row.names=F)
result = cbind(time,comb_satz)
colnames(result) = c('year','month','day', paste('satz',1:replication,sep='_') )
write.csv(result,paste(outPrefix,'_satz.csv',sep=''),row.names=F)
result = cbind(time,comb_unsat)
colnames(result) = c('year','month','day', paste('unsat',1:replication,sep='_') )
write.csv(result,paste(outPrefix,'_unsat.csv',sep=''),row.names=F)
result = cbind(time,comb_evap)
colnames(result) = c('year','month','day', paste('evap',1:replication,sep='_') )
write.csv(result,paste(outPrefix,'_evap.csv',sep=''),row.names=F)
result = cbind(time,comb_snow)
colnames(result) = c('year','month','day', paste('snow',1:replication,sep='_') )
write.csv(result,paste(outPrefix,'_snow.csv',sep=''),row.names=F)
result = cbind(time,comb_return)
colnames(result) = c('year','month','day', paste('return',1:replication,sep='_') )
write.csv(result,paste(outPrefix,'_return.csv',sep=''),row.names=F)
result = cbind(time,comb_psn)
colnames(result) = c('year','month','day', paste('psn',1:replication,sep='_') )
write.csv(result,paste(outPrefix,'_psn.csv',sep=''),row.names=F)
result = cbind(time,comb_lai)
colnames(result) = c('year','month','day', paste('lai',1:replication,sep='_') )
write.csv(result,paste(outPrefix,'_lai.csv',sep=''),row.names=F)
if(NewVersion){
result = cbind(time,comb_tavg)
colnames(result) = c('year','month','day', paste('tavg',1:replication,sep='_') )
write.csv(result,paste(outPrefix,'_tavg.csv',sep=''),row.names=F)
result = cbind(time,comb_vpd)
colnames(result) = c('year','month','day', paste('vpd',1:replication,sep='_') )
write.csv(result,paste(outPrefix,'_vpd.csv',sep=''),row.names=F)
result = cbind(time,comb_recharge)
colnames(result) = c('year','month','day', paste('recharge',1:replication,sep='_') )
write.csv(result,paste(outPrefix,'_recharge.csv',sep=''),row.names=F)
}
}#function
WSC_cutPeriod=function( prefix, period, label, prefixBias_ ='', NewVersion=F){
# after bias correction
namelist = c(
'_flowcmd_bias',
'_flowcmd',
'_et',
'_pet',
'_precip',
'_tmax',
'_tmin',
'_satz',
'_unsat',
'_evap',
'_snow',
'_return',
'_psn',
'_lai'
)
if(prefixBias_ == ''){ prefixBias = prefix; }else{ prefixBias = prefixBias_; }
hold = as.matrix(read.csv( paste(prefixBias, namelist[1],'.csv',sep='')))
time = as.Date(paste(hold[,3], hold[,2], hold[,1],sep="-"),format="%d-%m-%Y")
hold2 = as.matrix(read.csv( paste(prefix, namelist[2],'.csv',sep='')))
time2 = as.Date(paste(hold2[,3], hold2[,2], hold2[,1],sep="-"),format="%d-%m-%Y")
tmp = match3DailyTimeSeries(time, time2, period) ### assume period is the most narrow band
hold.dailytimeSeriesMatch = tmp$xSelect
hold2.dailytimeSeriesMatch = tmp$ySelect
#write.csv(hold[hold.dailytimeSeriesMatch,],paste(prefix, namelist[1],'_',label,'.csv',sep=''),row.names=F)
#write.csv(hold2[hold2.dailytimeSeriesMatch,],paste(prefix, namelist[2],'_',label,'.csv',sep=''),row.names=F)
write.csv(hold[hold.dailytimeSeriesMatch,],paste(prefixBias, label,'_',namelist[1],'.csv',sep=''),row.names=F)
write.csv(hold2[hold2.dailytimeSeriesMatch,],paste(prefix, label,'_',namelist[2],'.csv',sep=''),row.names=F)
for(i in 3:length(namelist)){
hold = as.matrix(read.csv( paste(prefix, namelist[i],'.csv',sep='')))
write.csv(hold[hold2.dailytimeSeriesMatch,],paste(prefix, label,'_',namelist[i],'.csv',sep=''),row.names=F)
}#i
if(NewVersion){
hold = as.matrix(read.csv( paste(prefix, '_tavg','.csv',sep='')))
write.csv(hold[hold2.dailytimeSeriesMatch,],paste(prefix, label,'_','_tavg','.csv',sep=''),row.names=F)
hold = as.matrix(read.csv( paste(prefix, '_vpd','.csv',sep='')))
write.csv(hold[hold2.dailytimeSeriesMatch,],paste(prefix, label,'_','_vpd','.csv',sep=''),row.names=F)
hold = as.matrix(read.csv( paste(prefix, '_recharge','.csv',sep='')))
write.csv(hold[hold2.dailytimeSeriesMatch,],paste(prefix, label,'_','_recharge','.csv',sep=''),row.names=F)
}
}#function
MultipleBasinSeries2WSC=function( prefix, sub, Jindex, outputPrefix, sitename=NA, period=NULL, subPrefix=NA, toLake=F,lakeArea=NA,label=''){
# first step (everything is in terms of mm/day )
# convert a series of rhessys basin output at the same watershed (need to combine multiple sub-basin) into WSC format
# ls -l world_subbasin_??? | awk '{print $9}'
subFile = read.csv(paste(sub,sep=''),stringsAsFactors=F)
tmp = unlist(strsplit(prefix,split='/'))
if(length(tmp)>1){location = paste(tmp[1:(length(tmp)-1)],collapse='/')}else{location='.'}
if(is.na(sitename)){sitename = tmp[length(tmp)]}
if(is.na(subPrefix)){subPrefix = '_world_subbasin_'}
if(is.na(lakeArea)){lakeArea=sum(subFile[,'area'])}
outputname = paste(prefix, sitename, subPrefix,subFile[1,'id'],'_',Jindex[1],'_basin.daily',sep='' )
tmp = read.table(outputname,header=F,skip=1)
if(is.null(period)){
print('use RHESSys output period')
period = as.Date(paste(tmp[,1], tmp[,2], tmp[,3],sep="-"),format="%d-%m-%Y")
rhessys.dailytimeSeriesMatch = rep(T,length(period))
period.dailytimeSeriesMatch = rep(T,length(period))
print(range(period))
}else{
rhessys.date = as.Date(paste(tmp[,1], tmp[,2], tmp[,3],sep="-"),format="%d-%m-%Y")
tmp = match2DailyTimeSeries(rhessys.date, period) ### assume period is the most narrow band
rhessys.dailytimeSeriesMatch = tmp$xSelect
period.dailytimeSeriesMatch = tmp$ySelect
}
jMax = length(Jindex)
holding = matrix(NA,sum(period.dailytimeSeriesMatch), jMax) #flow
holding_et = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_pet = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_precip = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_tmax = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_tmin = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_satz = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_unsat = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_evap = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_snow = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_return = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_psn = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_lai = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_tavg = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_vpd = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_recharge = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
for(j in 1:jMax){
# multiple sub-catchment
secondholding = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_et = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_pet = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_rain = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_tmax = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_tmin = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_satz = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_unsat = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_evap = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_snow = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_return = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_psn = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_lai = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_tavg = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_vpd = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_recharge = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
NewVersion=F
for(kk in 1:nrow(subFile)){
tryCatch({
outputname = paste(prefix, sitename, subPrefix,subFile[kk,'id'],'_',Jindex[j],'_basin.daily',sep='' )
tmp = read.table(outputname,header=F,skip=1)
if(ncol(tmp)>=43){
# 5.20
secondholding[,kk]=tmp[rhessys.dailytimeSeriesMatch,19]
secondholding_et[,kk]=tmp[rhessys.dailytimeSeriesMatch,14]+tmp[rhessys.dailytimeSeriesMatch,16]
secondholding_pet[,kk]=tmp[rhessys.dailytimeSeriesMatch,33]
secondholding_rain[,kk]=tmp[rhessys.dailytimeSeriesMatch,35]# rain mm
tmax_ = tmp[rhessys.dailytimeSeriesMatch,38]# tmax C
tmin_ = tmp[rhessys.dailytimeSeriesMatch,39]# tmin C
secondholding_tmax[,kk]=colMaxs( rbind(tmax_,tmin_))
secondholding_tmin[,kk]=colMins( rbind(tmax_,tmin_))
secondholding_satz[,kk]=tmp[rhessys.dailytimeSeriesMatch,7]# satz
secondholding_unsat[,kk]=tmp[rhessys.dailytimeSeriesMatch,10]# unsat
secondholding_evap[,kk]=tmp[rhessys.dailytimeSeriesMatch,14]# evap
secondholding_snow[,kk]=tmp[rhessys.dailytimeSeriesMatch,15]# snowpack
secondholding_return[,kk]=tmp[rhessys.dailytimeSeriesMatch,18]# return flow
secondholding_psn[,kk]=tmp[rhessys.dailytimeSeriesMatch,20]# psn
secondholding_lai[,kk]=tmp[rhessys.dailytimeSeriesMatch,21]# lai
secondholding_tavg[,kk]=tmp[rhessys.dailytimeSeriesMatch,40]# tavg
secondholding_vpd[,kk]=tmp[rhessys.dailytimeSeriesMatch,41]# vpd
secondholding_recharge[,kk]=tmp[rhessys.dailytimeSeriesMatch,43]# recharge
NewVersion=T
}else{
# 5.18
secondholding[,kk]=tmp[rhessys.dailytimeSeriesMatch,19]
secondholding_et[,kk]=tmp[rhessys.dailytimeSeriesMatch,14]+tmp[rhessys.dailytimeSeriesMatch,16]
secondholding_pet[,kk]=tmp[rhessys.dailytimeSeriesMatch,33]
secondholding_rain[,kk]=tmp[rhessys.dailytimeSeriesMatch,35]# rain mm
tmax_ = tmp[rhessys.dailytimeSeriesMatch,37]# tmax C
tmin_ = tmp[rhessys.dailytimeSeriesMatch,38]# tmin C
secondholding_tmax[,kk]=colMaxs( rbind(tmax_,tmin_))
secondholding_tmin[,kk]=colMins( rbind(tmax_,tmin_))
secondholding_satz[,kk]=tmp[rhessys.dailytimeSeriesMatch,7]# satz
secondholding_unsat[,kk]=tmp[rhessys.dailytimeSeriesMatch,10]# unsat
secondholding_evap[,kk]=tmp[rhessys.dailytimeSeriesMatch,14]# evap
secondholding_snow[,kk]=tmp[rhessys.dailytimeSeriesMatch,15]# snowpack
secondholding_return[,kk]=tmp[rhessys.dailytimeSeriesMatch,18]# return flow
secondholding_psn[,kk]=tmp[rhessys.dailytimeSeriesMatch,20]# psn
secondholding_lai[,kk]=tmp[rhessys.dailytimeSeriesMatch,21]# lai
}},
warning = function(w) {
print(kk)
print(outputname)
print(w)
}, error = function(e) {
print(kk)
print(outputname)
print(e)
}
)#tryCatch
}#kk
holding[,j] = secondholding %*%subFile[,'grid']/sum(subFile[,'grid'])
holding_et[,j] = secondholding_et %*%subFile[,'grid']/sum(subFile[,'grid'])
holding_pet[,j] = secondholding_pet %*%subFile[,'grid']/sum(subFile[,'grid'])
holding_precip[,j] = secondholding_rain%*%subFile[,'grid']/sum(subFile[,'grid'])# rain mm
holding_tmax[,j] = secondholding_tmax%*%subFile[,'grid']/sum(subFile[,'grid'])# tmax C
holding_tmin[,j] = secondholding_tmin%*%subFile[,'grid']/sum(subFile[,'grid'])# tmin C
holding_satz[,j] = secondholding_satz%*%subFile[,'grid']/sum(subFile[,'grid'])#
holding_unsat[,j] = secondholding_unsat%*%subFile[,'grid']/sum(subFile[,'grid'])#
holding_evap[,j] = secondholding_evap%*%subFile[,'grid']/sum(subFile[,'grid'])#
holding_snow[,j] = secondholding_snow%*%subFile[,'grid']/sum(subFile[,'grid'])#
holding_return[,j] = secondholding_return%*%subFile[,'grid']/sum(subFile[,'grid'])#
holding_psn[,j] = secondholding_psn%*%subFile[,'grid']/sum(subFile[,'grid'])#
holding_lai[,j] = secondholding_lai%*%subFile[,'grid']/sum(subFile[,'grid'])#
if(NewVersion){
holding_tavg[,j] = secondholding_tavg%*%subFile[,'grid']/sum(subFile[,'grid'])#
holding_vpd[,j] = secondholding_vpd%*%subFile[,'grid']/sum(subFile[,'grid'])#
holding_recharge[,j] = secondholding_recharge%*%subFile[,'grid']/sum(subFile[,'grid'])#
}
}#j
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding
)
colnames(result) = c('year','month','day', paste('streamflowmm', Jindex,sep='_') )
write.csv(result,paste(location,'/',outputPrefix, sitename,label,'_streamflowmm.csv',sep=''),row.names=F)
if(toLake){
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding*0.001*lakeArea
)
colnames(result) = c('year','month','day', paste('flowcmd', Jindex,sep='_') )
write.csv(result,paste(location,'/',outputPrefix, sitename,'_flowcmd.csv',sep=''),row.names=F)
}
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_et
)
colnames(result) = c('year','month','day', paste('et', Jindex,sep='_') )
write.csv(result,paste(location,'/',outputPrefix, sitename,'_et.csv',sep=''),row.names=F)
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_pet
)
colnames(result) = c('year','month','day', paste('pet', Jindex,sep='_') )
write.csv(result,paste(location,'/',outputPrefix, sitename,'_pet.csv',sep=''),row.names=F)
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_precip
)
colnames(result) = c('year','month','day', paste('precip', Jindex,sep='_') )
write.csv(result,paste(location,'/',outputPrefix, sitename,'_precip.csv',sep=''),row.names=F)
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_tmax
)
colnames(result) = c('year','month','day', paste('tmax', Jindex,sep='_') )
write.csv(result,paste(location,'/',outputPrefix, sitename,'_tmax.csv',sep=''),row.names=F)
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_tmin
)
colnames(result) = c('year','month','day', paste('tmin', Jindex,sep='_') )
write.csv(result,paste(location,'/',outputPrefix, sitename,'_tmin.csv',sep=''),row.names=F)
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_satz
)
colnames(result) = c('year','month','day', paste('satz', Jindex,sep='_') )
write.csv(result,paste(location,'/',outputPrefix, sitename,label,'_satz.csv',sep=''),row.names=F)
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_unsat
)
colnames(result) = c('year','month','day', paste('unsat', Jindex,sep='_') )
write.csv(result,paste(location,'/',outputPrefix, sitename,label,'_unsat.csv',sep=''),row.names=F)
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_evap
)
colnames(result) = c('year','month','day', paste('evap', Jindex,sep='_') )
write.csv(result,paste(location,'/',outputPrefix, sitename,label,'_evap.csv',sep=''),row.names=F)
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_snow
)
colnames(result) = c('year','month','day', paste('snow', Jindex,sep='_') )
write.csv(result,paste(location,'/',outputPrefix, sitename,label,'_snow.csv',sep=''),row.names=F)
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_return
)
colnames(result) = c('year','month','day', paste('return', Jindex,sep='_') )
write.csv(result,paste(location,'/',outputPrefix, sitename,label,'_return.csv',sep=''),row.names=F)
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_psn
)
colnames(result) = c('year','month','day', paste('psn', Jindex,sep='_') )
write.csv(result,paste(location,'/',outputPrefix, sitename,label,'_psn.csv',sep=''),row.names=F)
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_lai
)
colnames(result) = c('year','month','day', paste('lai', Jindex,sep='_') )
write.csv(result,paste(location,'/',outputPrefix, sitename,label,'_lai.csv',sep=''),row.names=F)
if(NewVersion){
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_tavg
)
colnames(result) = c('year','month','day', paste('tavg', Jindex,sep='_') )
write.csv(result,paste(location,'/',outputPrefix, sitename,label,'_tavg.csv',sep=''),row.names=F)
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_vpd
)
colnames(result) = c('year','month','day', paste('vpd', Jindex,sep='_') )
write.csv(result,paste(location,'/',outputPrefix, sitename,label,'_vpd.csv',sep=''),row.names=F)
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_recharge
)
colnames(result) = c('year','month','day', paste('recharge', Jindex,sep='_') )
write.csv(result,paste(location,'/',outputPrefix, sitename,label,'_recharge.csv',sep=''),row.names=F)
}
}#function
# lakeID name area_m2 grid simulation percent
# 4 657 buttner 72981900 81091 70761 0.87261225
# 1 865 michie 432610200 480678 472280 0.982528845
# 7 948 westfor 23639400 26266 16277 0.61969847
# 6 1009 orange 22968000 25520 21294 0.834404389
# 5 1127 rogers 44847900 49831 46498 0.933113925
# 2 1146 little 249788700 277543 267050 0.962193246
# 3 1439 fall 1099539900 1221711 1089327 0.891640494
# 8 2046 cane 80128800 89032 78132 0.877572109
# 9 2317 university 76376700 84863 80021 0.942943332
# 10 2435 jordan 946452600 1051614 945619 0.899207314 (include newhope)
# total jordan lake with HAW 4369799700 4855333
# total falls lake 1996320600 2218134
# swift 1.7172e+8 m2
# 101 flat 385145100 427939
# 102 little 202600800 225112
# 103 mtn 20677500 22975
# 104 morgan 21401100 23779
# 105 cane 19687500 21875
# 106 newhope 198703800 220782 --> 198868500 (model)
# 107 eno 365411700 406013
# 108 ellerbeClub 13273200 14748
# 109 ellerbeGorman 41607900 46231
# 110 lick 9989100 11099
# 111 northeast 55437300 61597
# 112 swift 54394200 60438
##-------------------------- outdated functions
WSC_usgs2lake=function( inputname, scaler, outputname){
tmp = unlist(strsplit(inputname,split='/'))
if(length(tmp)>1){location = paste(tmp[1:(length(tmp)-1)],collapse='/')}else{location='.'}
inputnamePrefix = tmp[length(tmp)]
print(location)
print(inputnamePrefix)
tmp = read.csv(paste(inputname,'_streamflowmm.csv',sep=''))
tmp[,4:ncol(tmp)] = tmp[,4:ncol(tmp)]*0.001*scaler
write.csv(tmp,paste(location,'/',outputname,'_flowcmd.csv',sep=''),row.names=F)
system(paste('cp ', inputname, '_pet.csv ',location,'/',outputname,'_pet.csv', sep=''))
system(paste('cp ', inputname, '_precip.csv ',location,'/',outputname,'_precip.csv', sep=''))
system(paste('cp ', inputname, '_tmax.csv ',location,'/',outputname,'_tmax.csv', sep=''))
system(paste('cp ', inputname, '_tmin.csv ',location,'/',outputname,'_tmin.csv', sep=''))
}#function
ConditionMultipleBasinSeries2WSC =function(CONDS,prefix, sub, Jindex, outputPrefix, sitename=NA, period=NULL, subPrefix=NA, toLake=F,lakeArea=NA,label=''){
# assuming more than 1 condition
# CONDS: multiple conditions based on sub.csv (T/F-matrix: nrow = sub, col=conds)
# prefix: multiple prefixes (should correspond to CONDS)
# Jidex: index matrix: nrow=index, and col=cond
# first step (everything is in terms of mm/day )
# convert a series of rhessys basin output at the same watershed (need to combine multiple sub-basin) into WSC format
# ls -l world_subbasin_??? | awk '{print $9}'
subFile = read.csv(paste(sub,sep=''),stringsAsFactors=F)
location = rep(NA, length(prefix))
for(i in 1:length(location)){
tmp = unlist(strsplit(prefix[i],split='/'))
if(length(tmp)>1){location[i] = paste(tmp[1:(length(tmp)-1)],collapse='/')}else{location[i]='.'}
}#i
if(is.na(sitename)){sitename = tmp[length(tmp)]}
if(is.na(subPrefix)){subPrefix = '_world_subbasin_'}
if(is.na(lakeArea)){lakeArea=sum(subFile[,'area'])}
prefixuseIndex = which(CONDS[1,]==T)
outputname = paste(prefix[prefixuseIndex], sitename, subPrefix,subFile[1,'id'],'_',Jindex[1, prefixuseIndex],'_basin.daily',sep='' )
tmp = read.table(outputname,header=F,skip=1)
if(is.null(period)){
print('use RHESSys output period')
period = as.Date(paste(tmp[,1], tmp[,2], tmp[,3],sep="-"),format="%d-%m-%Y")
rhessys.dailytimeSeriesMatch = rep(T,length(period))
period.dailytimeSeriesMatch = rep(T,length(period))
print(range(period))
}else{
rhessys.date = as.Date(paste(tmp[,1], tmp[,2], tmp[,3],sep="-"),format="%d-%m-%Y")
tmp = match2DailyTimeSeries(rhessys.date, period) ### assume period is the most narrow band
rhessys.dailytimeSeriesMatch = tmp$xSelect
period.dailytimeSeriesMatch = tmp$ySelect
}
jMax = nrow(Jindex)
holding = matrix(NA,sum(period.dailytimeSeriesMatch), jMax) #flow
holding_et = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_pet = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_precip = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_tmax = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_tmin = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_satz = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_unsat = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_evap = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_snow = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_return = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_psn = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_lai = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
for(j in 1:jMax){
# multiple sub-catchment
secondholding = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_et = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_pet = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_rain = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_tmax = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_tmin = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_satz = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_unsat = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_evap = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_snow = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_return = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_psn = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_lai = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
for(kk in 1:nrow(subFile)){
tryCatch({
prefixuseIndex = which(CONDS[kk,]==T)
outputname = paste(prefix[prefixuseIndex], sitename, subPrefix,subFile[kk,'id'],'_',Jindex[j, prefixuseIndex],'_basin.daily',sep='' )
tmp = read.table(outputname,header=F,skip=1)
if(ncol(tmp)>70){
# 5.20
secondholding[,kk]=tmp[rhessys.dailytimeSeriesMatch,19]
secondholding_et[,kk]=tmp[rhessys.dailytimeSeriesMatch,14]+tmp[rhessys.dailytimeSeriesMatch,16]
secondholding_pet[,kk]=tmp[rhessys.dailytimeSeriesMatch,33]
secondholding_rain[,kk]=tmp[rhessys.dailytimeSeriesMatch,35]# rain mm
tmax_ = tmp[rhessys.dailytimeSeriesMatch,38]# tmax C
tmin_ = tmp[rhessys.dailytimeSeriesMatch,39]# tmin C
secondholding_tmax[,kk]=colMaxs( rbind(tmax_,tmin_))
secondholding_tmin[,kk]=colMins( rbind(tmax_,tmin_))
secondholding_satz[,kk]=tmp[rhessys.dailytimeSeriesMatch,7]# satz
secondholding_unsat[,kk]=tmp[rhessys.dailytimeSeriesMatch,10]# unsat
secondholding_evap[,kk]=tmp[rhessys.dailytimeSeriesMatch,14]# evap
secondholding_snow[,kk]=tmp[rhessys.dailytimeSeriesMatch,15]# snowpack
secondholding_return[,kk]=tmp[rhessys.dailytimeSeriesMatch,18]# return flow
secondholding_psn[,kk]=tmp[rhessys.dailytimeSeriesMatch,20]# psn
secondholding_lai[,kk]=tmp[rhessys.dailytimeSeriesMatch,21]# lai
}else{
# 5.18
secondholding[,kk]=tmp[rhessys.dailytimeSeriesMatch,19]
secondholding_et[,kk]=tmp[rhessys.dailytimeSeriesMatch,14]+tmp[rhessys.dailytimeSeriesMatch,16]
secondholding_pet[,kk]=tmp[rhessys.dailytimeSeriesMatch,33]
secondholding_rain[,kk]=tmp[rhessys.dailytimeSeriesMatch,35]# rain mm
tmax_ = tmp[rhessys.dailytimeSeriesMatch,37]# tmax C
tmin_ = tmp[rhessys.dailytimeSeriesMatch,38]# tmin C
secondholding_tmax[,kk]=colMaxs( rbind(tmax_,tmin_))
secondholding_tmin[,kk]=colMins( rbind(tmax_,tmin_))
secondholding_satz[,kk]=tmp[rhessys.dailytimeSeriesMatch,7]# satz
secondholding_unsat[,kk]=tmp[rhessys.dailytimeSeriesMatch,10]# unsat
secondholding_evap[,kk]=tmp[rhessys.dailytimeSeriesMatch,14]# evap
secondholding_snow[,kk]=tmp[rhessys.dailytimeSeriesMatch,15]# snowpack
secondholding_return[,kk]=tmp[rhessys.dailytimeSeriesMatch,18]# return flow
secondholding_psn[,kk]=tmp[rhessys.dailytimeSeriesMatch,20]# psn
secondholding_lai[,kk]=tmp[rhessys.dailytimeSeriesMatch,21]# lai
}},
warning = function(w) {
print(kk)
print(outputname)
print(w)
}, error = function(e) {
print(kk)
print(outputname)
print(e)
}
)#tryCatch
}#kk
holding[,j] = secondholding %*%subFile[,'grid']/sum(subFile[,'grid'])
holding_et[,j] = secondholding_et %*%subFile[,'grid']/sum(subFile[,'grid'])
holding_pet[,j] = secondholding_pet %*%subFile[,'grid']/sum(subFile[,'grid'])
holding_precip[,j] = secondholding_rain%*%subFile[,'grid']/sum(subFile[,'grid'])# rain mm
holding_tmax[,j] = secondholding_tmax%*%subFile[,'grid']/sum(subFile[,'grid'])# tmax C
holding_tmin[,j] = secondholding_tmin%*%subFile[,'grid']/sum(subFile[,'grid'])# tmin C
holding_satz[,j] = secondholding_satz%*%subFile[,'grid']/sum(subFile[,'grid'])#
holding_unsat[,j] = secondholding_unsat%*%subFile[,'grid']/sum(subFile[,'grid'])#
holding_evap[,j] = secondholding_evap%*%subFile[,'grid']/sum(subFile[,'grid'])#
holding_snow[,j] = secondholding_snow%*%subFile[,'grid']/sum(subFile[,'grid'])#
holding_return[,j] = secondholding_return%*%subFile[,'grid']/sum(subFile[,'grid'])#
holding_psn[,j] = secondholding_psn%*%subFile[,'grid']/sum(subFile[,'grid'])#
holding_lai[,j] = secondholding_lai%*%subFile[,'grid']/sum(subFile[,'grid'])#
}#j
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding
)
colnames(result) = c('year','month','day', paste('streamflowmm',1:jMax,sep='_') )
write.csv(result,paste(location[1],'/',outputPrefix, sitename,label,'_streamflowmm.csv',sep=''),row.names=F)
if(toLake){
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding*0.001*lakeArea
)
colnames(result) = c('year','month','day', paste('flowcmd',1:jMax,sep='_') )
write.csv(result,paste(location[1],'/',outputPrefix, sitename,'_flowcmd.csv',sep=''),row.names=F)
}
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_et
)
colnames(result) = c('year','month','day', paste('et',1:jMax,sep='_') )
write.csv(result,paste(location[1],'/',outputPrefix, sitename,'_et.csv',sep=''),row.names=F)
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_pet
)
colnames(result) = c('year','month','day', paste('pet',1:jMax,sep='_') )
write.csv(result,paste(location[1],'/',outputPrefix, sitename,'_pet.csv',sep=''),row.names=F)
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_precip
)
colnames(result) = c('year','month','day', paste('precip',1:jMax,sep='_') )
write.csv(result,paste(location[1],'/',outputPrefix, sitename,'_precip.csv',sep=''),row.names=F)
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_tmax
)
colnames(result) = c('year','month','day', paste('tmax',1:jMax,sep='_') )
write.csv(result,paste(location[1],'/',outputPrefix, sitename,'_tmax.csv',sep=''),row.names=F)
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_tmin
)
colnames(result) = c('year','month','day', paste('tmin',1:jMax,sep='_') )
write.csv(result,paste(location[1],'/',outputPrefix, sitename,'_tmin.csv',sep=''),row.names=F)
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_satz
)
colnames(result) = c('year','month','day', paste('satz',1:jMax,sep='_') )
write.csv(result,paste(location[1],'/',outputPrefix, sitename,label,'_satz.csv',sep=''),row.names=F)
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_unsat
)
colnames(result) = c('year','month','day', paste('unsat',1:jMax,sep='_') )
write.csv(result,paste(location[1],'/',outputPrefix, sitename,label,'_unsat.csv',sep=''),row.names=F)
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_evap
)
colnames(result) = c('year','month','day', paste('evap',1:jMax,sep='_') )
write.csv(result,paste(location[1],'/',outputPrefix, sitename,label,'_evap.csv',sep=''),row.names=F)
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_snow
)
colnames(result) = c('year','month','day', paste('snow',1:jMax,sep='_') )
write.csv(result,paste(location[1],'/',outputPrefix, sitename,label,'_snow.csv',sep=''),row.names=F)
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_return
)
colnames(result) = c('year','month','day', paste('return',1:jMax,sep='_') )
write.csv(result,paste(location[1],'/',outputPrefix, sitename,label,'_return.csv',sep=''),row.names=F)
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_psn
)
colnames(result) = c('year','month','day', paste('psn',1:jMax,sep='_') )
write.csv(result,paste(location[1],'/',outputPrefix, sitename,label,'_psn.csv',sep=''),row.names=F)
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_lai
)
colnames(result) = c('year','month','day', paste('lai',1:jMax,sep='_') )
write.csv(result,paste(location[1],'/',outputPrefix, sitename,label,'_lai.csv',sep=''),row.names=F)
}#function
ConditionII_MultipleBasinSeries2WSC =function(CONDS,prefix, sub, Jindex, outputPrefix, sitename=NA, period=NULL, subPrefix=NA, toLake=F,lakeArea=NA,label=''){
# assuming more than 1 condition
# CONDS: multiple conditions based on sub.csv (T/F-matrix: nrow = sub, col=conds)
# prefix: multiple prefixes (should correspond to CONDS)
# Jidex: index matrix: nrow=index, and col=cond
# first step (everything is in terms of mm/day )
# convert a series of rhessys basin output at the same watershed (need to combine multiple sub-basin) into WSC format
# ls -l world_subbasin_??? | awk '{print $9}'
subFile = read.csv(paste(sub,sep=''),stringsAsFactors=F)
# location = rep(NA, length(prefix))
# for(i in 1:length(location)){
# tmp = unlist(strsplit(prefix[i],split='/'))
# if(length(tmp)>1){location[i] = paste(tmp[1:(length(tmp)-1)],collapse='/')}else{location[i]='.'}
# }#i
if(is.na(sitename)){sitename = tmp[length(tmp)]}
if(is.na(subPrefix)){subPrefix = '_world_subbasin_'}
if(is.na(lakeArea)){lakeArea=sum(subFile[,'area'])}
prefixuseIndex = which(CONDS[1,]==T) #find out which cond the first file is on
outputname = paste(prefix[1,prefixuseIndex], sitename, subPrefix,subFile[1,'id'],'_',Jindex[1, prefixuseIndex],'_basin.daily',sep='' )
tmp = read.table(outputname,header=F,skip=1)
if(is.null(period)){
print('use RHESSys output period')
period = as.Date(paste(tmp[,1], tmp[,2], tmp[,3],sep="-"),format="%d-%m-%Y")
rhessys.dailytimeSeriesMatch = rep(T,length(period))
period.dailytimeSeriesMatch = rep(T,length(period))
print(range(period))
}else{
rhessys.date = as.Date(paste(tmp[,1], tmp[,2], tmp[,3],sep="-"),format="%d-%m-%Y")
tmp = match2DailyTimeSeries(rhessys.date, period) ### assume period is the most narrow band
rhessys.dailytimeSeriesMatch = tmp$xSelect
period.dailytimeSeriesMatch = tmp$ySelect
}
jMax = nrow(Jindex)
holding = matrix(NA,sum(period.dailytimeSeriesMatch), jMax) #flow
holding_et = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_pet = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_precip = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_tmax = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_tmin = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_satz = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_unsat = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_evap = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_snow = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_return = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_psn = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
holding_lai = matrix(NA, sum(period.dailytimeSeriesMatch), jMax)
for(j in 1:jMax){
# multiple sub-catchment
secondholding = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_et = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_pet = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_rain = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_tmax = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_tmin = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_satz = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_unsat = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_evap = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_snow = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_return = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_psn = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
secondholding_lai = matrix(NA,sum(period.dailytimeSeriesMatch), nrow(subFile))
for(kk in 1:nrow(subFile)){
tryCatch({
prefixuseIndex = which(CONDS[kk,]==T)
outputname = paste(prefix[j,prefixuseIndex], sitename, subPrefix,subFile[kk,'id'],'_',Jindex[j, prefixuseIndex],'_basin.daily',sep='' )
tmp = read.table(outputname,header=F,skip=1)
if(ncol(tmp)>70){
# 5.20
secondholding[,kk]=tmp[rhessys.dailytimeSeriesMatch,19]
secondholding_et[,kk]=tmp[rhessys.dailytimeSeriesMatch,14]+tmp[rhessys.dailytimeSeriesMatch,16]
secondholding_pet[,kk]=tmp[rhessys.dailytimeSeriesMatch,33]
secondholding_rain[,kk]=tmp[rhessys.dailytimeSeriesMatch,35]# rain mm
tmax_ = tmp[rhessys.dailytimeSeriesMatch,38]# tmax C
tmin_ = tmp[rhessys.dailytimeSeriesMatch,39]# tmin C
secondholding_tmax[,kk]=colMaxs( rbind(tmax_,tmin_))
secondholding_tmin[,kk]=colMins( rbind(tmax_,tmin_))
secondholding_satz[,kk]=tmp[rhessys.dailytimeSeriesMatch,7]# satz
secondholding_unsat[,kk]=tmp[rhessys.dailytimeSeriesMatch,10]# unsat
secondholding_evap[,kk]=tmp[rhessys.dailytimeSeriesMatch,14]# evap
secondholding_snow[,kk]=tmp[rhessys.dailytimeSeriesMatch,15]# snowpack
secondholding_return[,kk]=tmp[rhessys.dailytimeSeriesMatch,18]# return flow
secondholding_psn[,kk]=tmp[rhessys.dailytimeSeriesMatch,20]# psn
secondholding_lai[,kk]=tmp[rhessys.dailytimeSeriesMatch,21]# lai
}else{
# 5.18
secondholding[,kk]=tmp[rhessys.dailytimeSeriesMatch,19]
secondholding_et[,kk]=tmp[rhessys.dailytimeSeriesMatch,14]+tmp[rhessys.dailytimeSeriesMatch,16]
secondholding_pet[,kk]=tmp[rhessys.dailytimeSeriesMatch,33]
secondholding_rain[,kk]=tmp[rhessys.dailytimeSeriesMatch,35]# rain mm
tmax_ = tmp[rhessys.dailytimeSeriesMatch,37]# tmax C
tmin_ = tmp[rhessys.dailytimeSeriesMatch,38]# tmin C
secondholding_tmax[,kk]=colMaxs( rbind(tmax_,tmin_))
secondholding_tmin[,kk]=colMins( rbind(tmax_,tmin_))
secondholding_satz[,kk]=tmp[rhessys.dailytimeSeriesMatch,7]# satz
secondholding_unsat[,kk]=tmp[rhessys.dailytimeSeriesMatch,10]# unsat
secondholding_evap[,kk]=tmp[rhessys.dailytimeSeriesMatch,14]# evap
secondholding_snow[,kk]=tmp[rhessys.dailytimeSeriesMatch,15]# snowpack
secondholding_return[,kk]=tmp[rhessys.dailytimeSeriesMatch,18]# return flow
secondholding_psn[,kk]=tmp[rhessys.dailytimeSeriesMatch,20]# psn
secondholding_lai[,kk]=tmp[rhessys.dailytimeSeriesMatch,21]# lai
}},
warning = function(w) {
print(kk)
print(outputname)
print(w)
}, error = function(e) {
print(kk)
print(outputname)
print(e)
}
)#tryCatch
}#kk
holding[,j] = secondholding %*%subFile[,'grid']/sum(subFile[,'grid'])
holding_et[,j] = secondholding_et %*%subFile[,'grid']/sum(subFile[,'grid'])
holding_pet[,j] = secondholding_pet %*%subFile[,'grid']/sum(subFile[,'grid'])
holding_precip[,j] = secondholding_rain%*%subFile[,'grid']/sum(subFile[,'grid'])# rain mm
holding_tmax[,j] = secondholding_tmax%*%subFile[,'grid']/sum(subFile[,'grid'])# tmax C
holding_tmin[,j] = secondholding_tmin%*%subFile[,'grid']/sum(subFile[,'grid'])# tmin C
holding_satz[,j] = secondholding_satz%*%subFile[,'grid']/sum(subFile[,'grid'])#
holding_unsat[,j] = secondholding_unsat%*%subFile[,'grid']/sum(subFile[,'grid'])#
holding_evap[,j] = secondholding_evap%*%subFile[,'grid']/sum(subFile[,'grid'])#
holding_snow[,j] = secondholding_snow%*%subFile[,'grid']/sum(subFile[,'grid'])#
holding_return[,j] = secondholding_return%*%subFile[,'grid']/sum(subFile[,'grid'])#
holding_psn[,j] = secondholding_psn%*%subFile[,'grid']/sum(subFile[,'grid'])#
holding_lai[,j] = secondholding_lai%*%subFile[,'grid']/sum(subFile[,'grid'])#
}#j
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding
)
colnames(result) = c('year','month','day', paste('streamflowmm',1:jMax,sep='_') )
write.csv(result,paste(outputPrefix, sitename,label,'_streamflowmm.csv',sep=''),row.names=F)
if(toLake){
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding*0.001*lakeArea
)
colnames(result) = c('year','month','day', paste('flowcmd',1:jMax,sep='_') )
write.csv(result,paste(outputPrefix, sitename,'_flowcmd.csv',sep=''),row.names=F)
}
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_et
)
colnames(result) = c('year','month','day', paste('et',1:jMax,sep='_') )
write.csv(result,paste(outputPrefix, sitename,'_et.csv',sep=''),row.names=F)
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_pet
)
colnames(result) = c('year','month','day', paste('pet',1:jMax,sep='_') )
write.csv(result,paste(outputPrefix, sitename,'_pet.csv',sep=''),row.names=F)
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_precip
)
colnames(result) = c('year','month','day', paste('precip',1:jMax,sep='_') )
write.csv(result,paste(outputPrefix, sitename,'_precip.csv',sep=''),row.names=F)
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_tmax
)
colnames(result) = c('year','month','day', paste('tmax',1:jMax,sep='_') )
write.csv(result,paste(outputPrefix, sitename,'_tmax.csv',sep=''),row.names=F)
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_tmin
)
colnames(result) = c('year','month','day', paste('tmin',1:jMax,sep='_') )
write.csv(result,paste(outputPrefix, sitename,'_tmin.csv',sep=''),row.names=F)
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_satz
)
colnames(result) = c('year','month','day', paste('satz',1:jMax,sep='_') )
write.csv(result,paste(outputPrefix, sitename,label,'_satz.csv',sep=''),row.names=F)
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_unsat
)
colnames(result) = c('year','month','day', paste('unsat',1:jMax,sep='_') )
write.csv(result,paste(outputPrefix, sitename,label,'_unsat.csv',sep=''),row.names=F)
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_evap
)
colnames(result) = c('year','month','day', paste('evap',1:jMax,sep='_') )
write.csv(result,paste(outputPrefix, sitename,label,'_evap.csv',sep=''),row.names=F)
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_snow
)
colnames(result) = c('year','month','day', paste('snow',1:jMax,sep='_') )
write.csv(result,paste(outputPrefix, sitename,label,'_snow.csv',sep=''),row.names=F)
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_return
)
colnames(result) = c('year','month','day', paste('return',1:jMax,sep='_') )
write.csv(result,paste(outputPrefix, sitename,label,'_return.csv',sep=''),row.names=F)
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_psn
)
colnames(result) = c('year','month','day', paste('psn',1:jMax,sep='_') )
write.csv(result,paste(outputPrefix, sitename,label,'_psn.csv',sep=''),row.names=F)
result = cbind(
format(period[period.dailytimeSeriesMatch],format='%Y'),
format(period[period.dailytimeSeriesMatch],format='%m'),
format(period[period.dailytimeSeriesMatch],format='%d'),
holding_lai
)
colnames(result) = c('year','month','day', paste('lai',1:jMax,sep='_') )
write.csv(result,paste(outputPrefix, sitename,label,'_lai.csv',sep=''),row.names=F)
}#function
##--------------------------------------------------------------------------------------------------------------------
##--------- below are exmaples
##--------------------------------------------------------------------------------------------------------------------
if(F){
#---------------------------------------------------- new hope Bias
MultipleBasinSeries2WSC(
prefix=paste("output_newhopeBias/FIAnlcdlocal_2010",sep=''),
sub='newhope_sub.csv',
Jindex = paste('param', c(1,2,3,4),sep=''),
outputPrefix=paste('regionalNewhopeBias',sep=''),
sitename='',
period=seq.Date(from=as.Date('1940-1-1'), to=as.Date('2010-10-1'), by="day") ,
subPrefix='_sub',
toLake=T,
lakeArea=198868500
)
MultipleBasinSeries2WSC(
prefix=paste("output_jordanBias/FIAnlcdlocal_2010",sep=''),
sub='jordan_sub.csv',
Jindex = paste('param', c(1,2,3,4),sep=''),
outputPrefix=paste('regionalJordanBias',sep=''),
sitename='',
period=seq.Date(from=as.Date('1940-1-1'), to=as.Date('2010-10-1'), by="day") ,
subPrefix='_sub',
toLake=T,
lakeArea=747584100 #area excluding newhope
)
#---------------------------------------------------- michie regional simulation
# MultipleBasinSeries2WSC(
# prefix='output_michie_FIAnlcdlocal_proj2_s3/FIAnlcdlocal_2060',
# sub='flat_region_sub.csv',
# Jindex = paste('param', c(1,2,3,4),sep=''),
# outputPrefix='regionalFlat2060',
# sitename='',
# period=NULL,
# subPrefix='_sub',
# toLake=T,
# lakeArea=432610200
# )
projhh = c(1) #c(2,3,12,13)
allperiod = matrix(c(
'2020','1990-10-1','2051-9-30',
'2030','2000-10-1','2061-9-30',
'2040','2010-10-1','2071-9-30',
'2050','2020-10-1','2081-9-30',
'2060','2030-10-1','2090-9-30'
),nrow=5,ncol=3,byrow=T)
for(jj in 1:1){ #nrow(allperiod)
for(hh in projhh){
MultipleBasinSeries2WSC(
prefix=paste("output_michie_FIAnlcdlocal_proj",hh,"_csiroRCP6r7/FIAnlcdlocal_",allperiod[jj,1],sep=''),
sub='flat_region_sub.csv',
Jindex = paste('param', c(1,2,3,4),sep=''),
outputPrefix=paste('regionalFlat',allperiod[jj,1],sep=''),
sitename='',
period=seq.Date(from=as.Date(allperiod[jj,2]), to=as.Date(allperiod[jj,3]), by="day") ,
subPrefix='_sub',
toLake=T,
lakeArea=432610200
)
}#hh
}#jj
# period set to avoid 5 spin up years
# flatsub = read.csv('flat_region_sub.csv')
# combineSubbasin2Basin(
# prefix='output_michieBias/FIAnlcdlocal_2010',
# suffix='_param3',
# subbasin=flatsub
# )
#---------------------------------------------------- owasa regional simulation
# allperiod = matrix(c(
# '2010','1980-10-1','2041-9-30',
# '2020','1990-10-1','2051-9-30',
# '2030','2000-10-1','2061-9-30',
# '2040','2010-10-1','2071-9-30',
# '2050','2020-10-1','2081-9-30',
# '2060','2030-10-1','2090-9-30'
# ),nrow=6,ncol=3,byrow=T)
allperiod = matrix(c(
'2060','1980-10-1','2041-9-30'
#'2010','1980-10-1','2040-9-30'
),nrow=1,ncol=3,byrow=T)
for(jj in 1:nrow(allperiod)){
MultipleBasinSeries2WSC(
prefix=paste('output_owasa_FIAnlcdlocal_proj1_s3_nonForest_climateperiod2010/FIAnlcdlocal_',allperiod[jj,1],sep=''),
sub='cane_regional_sub.csv',
Jindex = paste('param', c(1),sep=''),
outputPrefix=paste('regionalCane',allperiod[jj,1],sep=''),
sitename='',
period=seq.Date(from=as.Date(allperiod[jj,2]), to=as.Date(allperiod[jj,3]), by="day"),
subPrefix='_sub',
toLake=T,
lakeArea=80128800
)
MultipleBasinSeries2WSC(
prefix=paste('output_owasa_FIAnlcdlocal_proj1_s3_nonForest_climateperiod2010/FIAnlcdlocal_',allperiod[jj,1],sep=''),
sub='morgan_regional_sub.csv',
Jindex = paste('param', c(1),sep=''),
outputPrefix=paste('regionalMorgan',allperiod[jj,1],sep=''),
sitename='',
period=seq.Date(from=as.Date(allperiod[jj,2]), to=as.Date(allperiod[jj,3]), by="day"),
subPrefix='_sub',
toLake=T,
lakeArea=76376700
)
ww=read.csv(paste("output_owasa_FIAnlcdlocal_proj1_s3_nonForest_climateperiod2010/regionalCane",allperiod[jj,1],"_streamflowmm.csv",sep=''))
ww.date = as.Date(paste(ww[,3], ww[,2], ww[,1],sep="-"),format="%d-%m-%Y")
tmp = match2DailyTimeSeries(
ww.date,
seq.Date(from=as.Date(allperiod[jj,2]), to=as.Date(allperiod[jj,3]), by="day")
)
period.dailytimeSeriesMatch = tmp$xSelect
WSC_combineUSGS2Lake(
prefix = c(
paste('output_owasa_FIAnlcdlocal_proj1_s3_nonForest_climateperiod2010/regionalCane',allperiod[jj,1],sep=''),
paste('output_owasa_FIAnlcdlocal_proj1_s3_nonForest_climateperiod2010/regionalMorgan',allperiod[jj,1],sep='')
),
dailytimeSeriesMatch = list( period.dailytimeSeriesMatch, period.dailytimeSeriesMatch ),
replication = 1,
arealweight = c(70318800, 72018900),
outPrefix = paste('output_owasa_FIAnlcdlocal_proj1_s3_nonForest_climateperiod2010/lake_owasa',allperiod[jj,1],sep=''),
scaler = 80128800+ 76376700
)
}#jj
##----------- landuse
projhh = c(1)
allperiod = matrix(c(
'2020','1990-10-1','2051-9-30',
'2030','2000-10-1','2061-9-30',
'2040','2010-10-1','2071-9-30',
'2050','2020-10-1','2081-9-30',
'2060','2030-10-1','2090-9-30'
),nrow=5,ncol=3,byrow=T)
for(jj in 1:nrow(allperiod)){
for(hh in projhh){
MultipleBasinSeries2WSC(
prefix=paste("output_owasa_FIAnlcdlocal_proj",hh,"_csiroRCP6r7/FIAnlcdlocal_",allperiod[jj,1],sep=''),
sub='cane_regional_sub.csv',
Jindex = paste('param', c(1,2,3,4),sep=''),
outputPrefix=paste('regionalCane',allperiod[jj,1],sep=''),
sitename='',
period=seq.Date(from=as.Date(allperiod[jj,2]), to=as.Date(allperiod[jj,3]), by="day"),
subPrefix='_sub',
toLake=T,
lakeArea=80128800
)
MultipleBasinSeries2WSC(
prefix=paste("output_owasa_FIAnlcdlocal_proj",hh,"_csiroRCP6r7/FIAnlcdlocal_",allperiod[jj,1],sep=''),
sub='morgan_regional_sub.csv',
Jindex = paste('param', c(1,2,3,4),sep=''),
outputPrefix=paste('regionalMorgan',allperiod[jj,1],sep=''),
sitename='',
period=seq.Date(from=as.Date(allperiod[jj,2]), to=as.Date(allperiod[jj,3]), by="day"),
subPrefix='_sub',
toLake=T,
lakeArea=76376700
)
ww=read.csv(paste("output_owasa_FIAnlcdlocal_proj",hh,"_csiroRCP6r7/regionalCane",allperiod[jj,1],"_streamflowmm.csv",sep=''))
ww.date = as.Date(paste(ww[,3], ww[,2], ww[,1],sep="-"),format="%d-%m-%Y")
tmp = match2DailyTimeSeries(
ww.date,
seq.Date(from=as.Date(allperiod[jj,2]), to=as.Date(allperiod[jj,3]), by="day")
)
period.dailytimeSeriesMatch = tmp$xSelect
WSC_combineUSGS2Lake(
prefix = c(
paste('output_owasa_FIAnlcdlocal_proj',hh,'_csiroRCP6r7/regionalCane',allperiod[jj,1],sep=''),
paste('output_owasa_FIAnlcdlocal_proj',hh,'_csiroRCP6r7/regionalMorgan',allperiod[jj,1],sep='')
),
dailytimeSeriesMatch = list( period.dailytimeSeriesMatch, period.dailytimeSeriesMatch ),
replication = 4,
arealweight = c(70318800, 72018900),
outPrefix = paste('output_owasa_FIAnlcdlocal_proj',hh,'_csiroRCP6r7/lake_owasa',allperiod[jj,1],sep=''),
scaler = 80128800+ 76376700
)
}#hh
}#jj
canesub = read.csv('cane_regional_sub.csv')
combineSubbasin2Basin(
prefix='output_owasaTesting/FIAnlcdlocal_2010',
suffix='_param1',
subbasin=canesub
)
canesub = read.csv('morgan_regional_sub.csv')
combineSubbasin2Basin(
prefix='output_owasaTesting/FIAnlcdlocal_2010',
suffix='_param1',
label='morgan',
subbasin=canesub
)
#------------------------------------------------
#"-st 1980 10 1 1 -ed 2041 9 30 1", #<<--- 2010
#"-st 1990 10 1 1 -ed 2051 9 30 1", #<<--- 2020
#"-st 2000 10 1 1 -ed 2061 9 30 1", #<<--- 2030
#"-st 2010 10 1 1 -ed 2071 9 30 1", #<<--- 2040
#"-st 2020 10 1 1 -ed 2081 9 30 1", #<<--- 2050
#"-st 2030 10 1 1 -ed 2090 9 30 1", #<<--- 2060
#WSC_cutPeriod0
# problem here is that "WSC_cutPeriod" produced a different name scheme
WSC_cutPeriod(
prefix='output_owasa_csiroRCP6r7/lake_owasa',
period=seq.Date(from=as.Date('1980-10-1'), to=as.Date('2041-9-30') ,by="day"),
label='2010')
WSC_cutPeriod(
prefix='output_owasa_csiroRCP6r7/lake_owasa',
period=seq.Date(from=as.Date('1990-10-1'), to=as.Date('2051-9-30') ,by="day"),
label='2020')
WSC_cutPeriod(
prefix='output_owasa_csiroRCP6r7/lake_owasa',
period=seq.Date(from=as.Date('2000-10-1'), to=as.Date('2061-9-30') ,by="day"),
label='2030')
WSC_cutPeriod(
prefix='output_owasa_csiroRCP6r7/lake_owasa',
period=seq.Date(from=as.Date('2010-10-1'), to=as.Date('2071-9-30') ,by="day"),
label='2040')
WSC_cutPeriod(
prefix='output_owasa_csiroRCP6r7/lake_owasa',
period=seq.Date(from=as.Date('2020-10-1'), to=as.Date('2081-9-30') ,by="day"),
label='2050')
WSC_cutPeriod(
prefix='output_owasa_csiroRCP6r7/lake_owasa',
period=seq.Date(from=as.Date('2030-10-1'), to=as.Date('2090-9-30') ,by="day"),
label='2060')
#"-st 1980 10 1 1 -ed 2041 9 30 1", #<<--- 2010
#"-st 1990 10 1 1 -ed 2051 9 30 1", #<<--- 2020
#"-st 2000 10 1 1 -ed 2061 9 30 1", #<<--- 2030
#"-st 2010 10 1 1 -ed 2071 9 30 1", #<<--- 2040
#"-st 2020 10 1 1 -ed 2081 9 30 1", #<<--- 2050
#"-st 2030 10 1 1 -ed 2090 9 30 1", #<<--- 2060
#---------------------------------------------------- morgan
SingleBasinSeries2WSC('morgan_',c(5,6,8,10),'usgs_','morgan' )
WSC_usgs2lake('usgs_morgan', 21401100,'lake_university' ) #wrong 76376700
SingleBasinSeries2WSC('../output_cmip5/morgan_',c(5,6,8,10),'usgs_', 'morgan' )
WSC_usgs2lake('../output_cmip5/usgs_morgan', 21401100,'lake_university' )
ww=read.csv("usgs_morgan_pet.csv")
WSC_combineUSGS2Lake(
prefix = c('usgs_morgan', '~/Dropbox/Myself/UNC/WSC/cane_testingMCMC_sept15/cane_FIA_nlcd_local/rhessys/needed_coop/usgs_cane'),
dailytimeSeriesMatch = list( rep(T,nrow(ww)), rep(T,nrow(ww)) ),
replication = 4,
arealweight = c(21401100, 19687500),
outPrefix = 'lake_owasa',
scaler = 80128800+ 76376700
)
WSC_combineUSGS2Lake(
prefix = c('../output_cmip5/usgs_morgan', '~/Dropbox/Myself/UNC/WSC/cane_testingMCMC_sept15/cane_FIA_nlcd_local/rhessys/needed_cmip5/usgs_cane'),
dailytimeSeriesMatch = list( rep(T,nrow(ww)), rep(T,nrow(ww)) ),
replication = 4,
arealweight = c(21401100, 19687500),
outPrefix = '../output_cmip5/lake_owasa',
scaler = 80128800+ 76376700
)
#---------------------------------------------------- cane
SingleBasinSeries2WSC(
'~/Dropbox/Myself/UNC/WSC/cane_testingMCMC_sept15/cane_FIA_nlcd_local/rhessys/needed_coop/rhessys',
c(16,20,41,42),
'usgs_','cane' )
WSC_usgs2lake(
'~/Dropbox/Myself/UNC/WSC/cane_testingMCMC_sept15/cane_FIA_nlcd_local/rhessys/needed_coop/usgs_cane',
19687500,
'lake_cane')
SingleBasinSeries2WSC(
'~/Dropbox/Myself/UNC/WSC/cane_testingMCMC_sept15/cane_FIA_nlcd_local/rhessys/needed_cmip5/rhessys',
c(16,20,41,42),
'usgs_' ,'cane')
WSC_usgs2lake(
'~/Dropbox/Myself/UNC/WSC/cane_testingMCMC_sept15/cane_FIA_nlcd_local/rhessys/needed_cmip5/usgs_cane',
19687500,
'lake_cane')
#---------------------------------------------------- flat
MultipleBasinSeries2WSC(
prefix='/Users/laurencelin/Desktop/master_FIA/rhessys/output_green_cmip/case05_',
sub='flat_sub.csv',
Jindex =c(1,2,3,4,5),
outputPrefix='usgs_flat',
sitename='flat',
period=NULL
)
WSC_usgs2lake('/Users/laurencelin/Desktop/master_FIA/rhessys/output_green_cmip/usgs_flatflat', 432610200,'lake_michie')
MultipleBasinSeries2WSC(
prefix='/Users/laurencelin/Desktop/master_FIA/rhessys/output_green_coop/case05_',
sub='flat_sub.csv',
Jindex =c(1,2,3,4,5),
outputPrefix='usgs_flat',
sitename='flat',
period=NULL
)
WSC_usgs2lake('/Users/laurencelin/Desktop/master_FIA/rhessys/output_green_coop/usgs_flatflat', 432610200,'lake_michie')
## still working
}# not exe
|
a30f14824d490214b290396adb5f8bc9444ea99e | 5e207545b85ae719b2cbde944db2aa4601e49ab7 | /Statistical Analysis and Visualisation.R | 2e461df2e3a271437b42abd931ab6c1647e4dc19 | [] | no_license | ShaneBrennan8/Data-wrangling-and-calculations | dddef3a601848396ae2c877e9f8b630ac7cf12cb | 73304b9f2dc41cf24b58636983031b31a573d35c | refs/heads/main | 2023-04-15T16:24:12.723037 | 2021-05-04T11:21:53 | 2021-05-04T11:21:53 | 336,409,707 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,490 | r | Statistical Analysis and Visualisation.R | # Load libraries
install.package("MASS")# glm model with negative binomial distribution
library(MASS)
install.packages("basicPlotteR")# alpha colours in plotting
library(basicPlotteR)
install.packages("broom")# simple table from model outputs
library(broom)
install.packages("car") # A Maths package.
library(car)
install.packages("tidyverse")
library(tidyverse)
install.packages("ggpubr")
library(ggpubr)
install.packages("rstatix")
library(rstatix)
install.packages("ggplot")
library(ggplot)
install.packages("ggpubr")
library("ggpubr")
#### Strongyles #####
# Testing for normality.
# We need to determine if the data is parametic or non-parametic which will allow us to pick the correct means analysis test
# Import the data
trial_data = read.csv("HSI - McM, MF, Manual 26 Feb.csv", head=TRUE, sep=",")
### Normality testing for all datasets. McMaster MiniFLOTAC
# Build the linear model
model1 <- lm(McMaster.Strongyles ~ Mini.FLOTAC.Strongyles, data = trial_data)
# Create a QQ plot of residuals
ggqqplot(residuals(model1))
# Compute Shapiro-Wilk test of normality
shapiro_test(residuals(model1))
# Wilcox test (Optional)
#wilcox.test(trial_data$McMaster.Strongyles, trial_data$Mini.FLOTAC.Strongyles)
#wilcox.test(trial_data$Manual.Strongyles, trial_data$Mini.FLOTAC.Strongyles)
#wilcox.test(trial_data$McMaster.Strongyles, trial_data$Manual.Strongyles)
#Plot data to visualise its distribution
plot(trial_data$McMaster.Strongyles, trial_data$Manual.Strongyles)
plot(trial_data$Manual.Strongyles, trial_data$Mini.FLOTAC.Strongyles)
plot(trial_data$McMaster.Strongyles, trial_data$Manual.Strongyle)
#### Correlation testing using spearman's correlation (datasets are not normally distributed)
cor.test(trial_data$McMaster.Strongyles, trial_data$Manual.Strongyles, method=c("spearman"))
cor.test(trial_data$Manual.Strongyles, trial_data$Mini.FLOTAC.Strongyles, method=c("spearman"))
cor.test(trial_data$McMaster.Strongyles, trial_data$Mini.FLOTAC.Strongyles, method=c("spearman"))
### Visualise all the three devices and how they compare on a a ggplot
ggplot(trial_data, aes(Manual.Strongyles, Mini.FLOTAC.Strongyles, colour = "Manual V MinifFLOTAC")) +
geom_point(colour=alpha("black",1)) +
geom_point(aes(Mini.FLOTAC.Strongyles, McMaster.Strongyles, colour = "MiniFLOTAC V McMaster"))+
geom_point(aes(Manual.Strongyles, McMaster.Strongyles, colour = "Manual V McMaster")) +
ggtitle("Manual V MiniFLOTAC similarity (Black) compared to other devices")
|
737683175c1c71d7e2829c147a7a68a9676d2bb9 | 1df1ea7f7a8f8a4cd876f0d643d355a54ddfbe14 | /maps and histograms SST and tracking_ESFGR.R | 80957253586eeaaef02d917cbe0ccd6f3fab2420 | [] | no_license | ESFGR/R-Scripts-for-analysis-of-SST-from-Copernicus-Marine-Service-prodcuts-and-electronic-tagging-data | d87aea27cc932bc7af237e9317eef81c42789bfa | 44f43cf7701233d7efab97f75f1e074e4b34f85c | refs/heads/main | 2023-06-28T06:30:50.140369 | 2021-07-30T17:53:48 | 2021-07-30T17:53:48 | null | 0 | 0 | null | null | null | null | WINDOWS-1250 | R | false | false | 26,402 | r | maps and histograms SST and tracking_ESFGR.R |
####Mapping daily SST + distibution ####
library(stringr)
library(ncdf4)
library(reshape2)
library(ggplot2)
library(mapdata)
library(maptools)
library(rnaturalearth)
library(gdata)
library(raster)
library(scatterpie)
library (openxlsx)
library(dplyr)
library(lubridate)
#SST data from IBI model ----
setwd("C:/Users/Usuario/Desktop/TFM/SST and Taggging")
tracking_data <-read.csv("Tracking data.csv")
table(tracking_data$inout)
dates<-read.csv("dates_inout.csv") #dates of tagging
dates<-arrange(dates, dates$inout, dates$df_dates) #order data by direction (in/out)
#dates<-arrange(dates, dates$df_dates) #order data by date
#View(dates) #observe the dates of tagging --> dates for which SST values will be obtained
dates2011 <-subset(dates, dates$Year=="2011")
dates2012 <-subset(dates, dates$Year=="2012")
dates2013 <-subset(dates, dates$Year=="2013")
f1<- function(x){
t1<- nc_open(x)
t2<-list()
t2$lat<-ncvar_get(t1, "latitude")
t2$lon<-ncvar_get(t1, "longitude")
t2$time<-ncvar_get(t1, "time")
t2$SST<-ncvar_get(t1, "thetao")
t2<-na.omit(t2)
nc_close (t1)
dimnames(t2$SST) <- list(long = t2$lon, lat = t2$lat, date = t2$time)
t3 <- melt(t2$SST, value.name = "sst")
}#funcion para crear una tabla a partir del NC con valores de SST, coordenadas y fechas
#colourss-
col.pal<- rainbow(150, end=0.8, rev=TRUE)
##preparando mapa de la zona de estudio----
mapBuffer <-0.1
worldMap <- ne_download(scale = "medium", returnclass = "sp")
worldMapCroped <- crop(worldMap,extent(-9,-5,35,37.5)) #coordenadas del área de estudio
mainMap <- ggplot() + geom_polygon(data = worldMapCroped, aes(x = long, y = lat, group = group)) + coord_fixed() + theme(axis.ticks=element_blank()) + ylab("Latitude") + xlab("Longitude")#mapa del area de estudio
##### 2013 ####
####track_in_2013
###IN : #May: 21-28th, 29th, 31st (in) June: 4th (in)----
##subsetting tracking data per day
trackData2013<- subset(tracking_data, Year=="2013")
arrange(trackData2013,trackData2013$id, trackData2013$yday)
?arrange
track_in_2013 <- subset(trackData2013, inout == "in")
head(track_in_2013)
arrange(track_in_2013,yday)
range(track_in_2013$Obs.SST)
range(track_in_2013$yday)
# IN 141 = 21/05
trackmay21<- subset(track_in_2013,track_in_2013$yday=="141") #1
trackmay22<- subset(track_in_2013,track_in_2013$yday=="142") #2
trackmay23<- subset(track_in_2013, track_in_2013$yday=="143") #3
trackmay24<- subset(track_in_2013, track_in_2013$yday=="144") #1
trackmay25<- subset(track_in_2013, track_in_2013$yday=="145") #3
trackmay26<- subset(track_in_2013, track_in_2013$yday=="146") #2
trackmay27<- subset(track_in_2013, track_in_2013$yday=="147") #2
trackmay28<- subset(track_in_2013, track_in_2013$yday=="148") #0
trackmay29<- subset(track_in_2013, track_in_2013$yday=="149") #2
trackmay31<- subset(track_in_2013, track_in_2013$yday=="151") #2
trackjune4<- subset(track_in_2013, track_in_2013$yday=="153") #1
#IBI model data
#IN
#May: 21-28th, 29th, 31st (in)
may1 <- "datos SST/21-28may2013.nc"
may29 <- "datos SST/29may2013.nc"
may31 <- "datos SST/31may2013.nc"
#june <-"datos SST/2june2013.nc"
may1<-f1(may1)#need to subset per day (days of interes = 21, 22, 23, 25, 26, 27)
sstmay29<- f1(may29)
sstmay31<-f1(may31)
#june4 <-f1(june)
sstin2013 <- c(may1, sstmay29, sstmay31)
#21-28 May
may1$ddmmyy<-as.Date(as.POSIXct(may1$date*60*60, origin="1950-01-01"))
may1$yday<-yday(may1$ddmmyy)
may21<-subset(may1, may1$yday=="141")
may22<-subset(may1, may1$yday=="142")
may23<-subset(may1, may1$yday=="143")
may25<-subset(may1, may1$yday=="145")
may26<-subset(may1, may1$yday=="146")
may27<-subset(may1, may1$yday=="147")
#mapping SST and tracks + plotting SST distribution----
?stat_contour
#may 21
mainMap + geom_raster(data =may21, aes(x = long, y = lat, fill = sst), interpolate = TRUE) + stat_contour(data =may21, aes(x = long, y = lat, z= sst), binwidth= 1, color="black") +
scale_fill_gradientn(limits=c(14, 19.5), colours = col.pal, na.value = NA) +
theme_bw() +
geom_point(data = trackmay21, aes(x = Long, y = Lat, color =factor(trackmay21$id)), size=3 , show.legend=F) +
geom_point(data = trackmay21, aes(x = Long, y = Lat, color = factor(trackmay21$id)), size=12, alpha=0.5, show.legend=F) +
labs(title = "SST and BF tuna locations", fill = "SST")+labs(subtitle = " 21/05/2013 | Direction: IN") #con tracks
hist(may21$sst, col = "red", breaks = 50, xlim = c(14,19), main = "2013 SST May 21th", xlab= "SST")
rug(trackmay21$Obs.SST, ticksize = 0.05, side = 1,lwd = 5, col = "blue")
legend("topright", c("Model", "BF tracks"), fill=c("red", "blue"))
#may 22
mainMap + geom_raster(data =may22, aes(x = long, y = lat, fill = sst), interpolate = TRUE) + stat_contour(data =may22, aes(x = long, y = lat, z= sst), binwidth= 1, color="black") +
scale_fill_gradientn(limits=c(14, 19.5), colours = col.pal, na.value = NA) + theme_bw() +
geom_point(data = trackmay22, aes(x = Long, y = Lat, color =factor(trackmay22$id)), size=3 , show.legend=F) +
geom_point(data = trackmay22, aes(x = Long, y = Lat, color = factor(trackmay22$id)), size=12, alpha=0.5, show.legend=F) +
labs(title = "SST and BF tuna locations", fill = "SST")+labs(subtitle = " 22/05/2013 | Direction: IN") #con tracks
hist(may22$sst, col = "red", breaks = 50, xlim = c(14,20), main = "2013 SST May 22th", xlab= "SST")
rug(trackmay22$Obs.SST, ticksize = 0.05, side = 1,lwd = 5, col = "blue")
legend("topright", c("Model", "BF tracks"), fill=c("red", "blue"))
#may 23
mainMap + geom_raster(data =may23, aes(x = long, y = lat, fill = sst), interpolate = TRUE) + stat_contour(data =may23, aes(x = long, y = lat, z= sst), binwidth= 1, color="black") +
scale_fill_gradientn(limits=c(14, 19.5), colours = col.pal, na.value = NA) +
theme_bw() +
geom_point(data = trackmay23, aes(x = Long, y = Lat, color =factor(trackmay23$id)), size=3 , show.legend=F) +
geom_point(data = trackmay23, aes(x = Long, y = Lat, color = factor(trackmay23$id)), size=12, alpha=0.5, show.legend=F) + geom_text(data = trackmay23, aes(x = Long, y = Lat, label= trackmay23$id) , color= "black") +
labs(title = "SST and BF tuna locations", fill = "SST")+labs(subtitle = " 23/05/2013 | Direction: IN") #con tracks
hist(may23$sst, col = "red", breaks = 50, xlim = c(14,20), main = "2013 SST May 23th", xlab= "SST")
rug(trackmay23$Obs.SST, ticksize = 0.05, side = 1,lwd = 5, col = "blue")
legend("topright", c("Model", "BF tracks"), fill=c("red", "blue"))
#may 25
mainMap + geom_raster(data =may25, aes(x = long, y = lat, fill = sst), interpolate = TRUE) + stat_contour(data =may25, aes(x = long, y = lat, z= sst), color="black", binwidth= 1) +
scale_fill_gradientn(limits=c(14, 19.5), colours = col.pal, na.value = NA) + theme_bw() +
geom_point(data = trackmay25, aes(x = Long, y = Lat, color =factor(trackmay25$id)), size=3 , show.legend=F) +
geom_point(data = trackmay25, aes(x = Long, y = Lat, color = factor(trackmay25$id)), size=12, alpha=0.5, show.legend=F) + geom_text(data = trackmay25, aes(x = Long, y = Lat, label= trackmay25$id) , color= "black") +
labs(title = "SST and BF tuna locations", fill = "SST")+labs(subtitle = " 25/05/2013 | Direction: IN") #con tracks
hist(may25$sst, col = "red", breaks = 50, xlim = c(14,20), main = "2013 SST May 25th", xlab= "SST")
rug(trackmay25$Obs.SST, ticksize = 0.05, side = 1,lwd = 5, col = "blue")
legend("topright", c("Model", "BF tracks"), fill=c("red", "blue"))
#may 26
mainMap + geom_raster(data =may26, aes(x = long, y = lat, fill = sst), interpolate = TRUE) +
scale_fill_gradientn(limits=c(14, 19.5), colours = col.pal, na.value = NA) +
theme_bw() + stat_contour(data =may26, aes(x = long, y = lat, z= sst), binwidth= 1, color="black") +
geom_point(data = trackmay26, aes(x = Long, y = Lat, color =factor(trackmay26$id)), size=3 , show.legend=F) +
geom_point(data = trackmay26, aes(x = Long, y = Lat, color = factor(trackmay26$id)), size=12, alpha=0.5, show.legend=F) +
labs(title = "SST and BF tuna locations", fill = "SST")+labs(subtitle = " 26/05/2013 | Direction: IN") #con tracks
hist(may26$sst, col = "red", breaks = 50, xlim = c(14,20), main = "2013 SST May 26th", xlab= "SST")
rug(trackmay26$Obs.SST, ticksize = 0.05, side = 1,lwd = 5, col = "blue")
legend("topright", c("Model", "BF tracks"), fill=c("red", "blue"))
#may 27
mainMap + geom_raster(data =may27, aes(x = long, y = lat, fill = sst), interpolate = TRUE) +
scale_fill_gradientn(limits=c(14, 19.5), colours = col.pal, na.value = NA) + theme_bw() + stat_contour(data =may27, aes(x = long, y = lat, z= sst), binwidth= 1, color="black") +
geom_point(data = trackmay27, aes(x = Long, y = Lat, color =factor(trackmay27$id)), size=3 , show.legend=F) +
geom_point(data = trackmay27, aes(x = Long, y = Lat, color = factor(trackmay27$id)), size=12, alpha=0.5, show.legend=F) +
labs(title = "SST and BF tuna locations", fill = "SST") +
labs(subtitle = " 27/05/2013 | Direction: IN") #con tracks
hist(may27$sst, col = "red", breaks = 50, xlim = c(14,20), main = "2013 SST May 27th", xlab= "SST")
rug(trackmay27$Obs.SST, ticksize = 0.05, side = 1,lwd = 5, col = "blue")
legend("topright", c("Model", "BF tracks"), fill=c("red", "blue"))
#may 29
mainMap + geom_raster(data =sstmay29, aes(x = long, y = lat, fill = sstmay29$sst), interpolate = TRUE) + stat_contour(data =sstmay29, aes(x = long, y = lat, z= sst), binwidth= 1, color="black") +
scale_fill_gradientn(limits=c(14, 19.5), colours = col.pal, na.value = NA) +
theme_bw() + geom_point(data = trackmay29, aes(x = Long, y = Lat), size=3 , color = trackmay29$id, alpha=3) +
geom_point(data = trackmay29, aes(x = Long, y = Lat), size=12, alpha=0.5 , color = trackmay29$id) +
labs(title = "SST and BF tuna locations", fill = "SST")+labs(subtitle = " 29/05/2013 | Direction: IN") #con tracks
hist(sstmay29$sst, col = "red", breaks = 50, xlim = c(14,20), main = "2013 SST May 29th", xlab= "SST")
rug(trackmay29$Obs.SST, ticksize = 0.05, side = 1,lwd = 5, col = "blue")
legend("topright", c("Model", "BF tracks"), fill=c("red", "blue"))
#may 31
mainMap + geom_raster(data =sstmay31, aes(x = long, y = lat, fill = sstmay31$sst), interpolate = TRUE) + stat_contour(data =sstmay31, aes(x = long, y = lat, z= sst), binwidth= 1, color="black") +
scale_fill_gradientn(limits=c(14, 19.5), colours = col.pal, na.value = NA) +
theme_bw() + geom_point(data = trackmay31, aes(x = Long, y = Lat), size=3 , color = trackmay31$id) +
geom_point(data = trackmay31, aes(x = Long, y = Lat), size=12, alpha=0.5 , color = trackmay31$id) +
labs(title = "SST and BF tuna locations", fill = "SST")+labs(subtitle = " 31/05/2013 | Direction: IN") #con tracks
hist(sstmay31$sst, col = "red", breaks = 50, xlim = c(14,20), main = "2013 SST May 31st", xlab= "SST")
rug(trackmay29$Obs.SST, ticksize = 0.05, side = 1,lwd = 5, col = "blue")
legend("topright", c("Model", "BF tracks"), fill=c("red", "blue"))
#### total 2013 in
sstin2013<-data.frame(sstin2013)
mainMap + geom_raster(data =sstin2013, aes(x = long, y = lat, fill = sstin2013$sst), interpolate = TRUE) +
scale_fill_gradientn(limits=c(14, 19.5), colours = col.pal, na.value = NA) + stat_contour(data =sstin2013, aes(x = long, y = lat, z= sst), binwidth= 1, color="black") +
theme_bw() + geom_point(data = track_in_2013, aes(x = Long, y = Lat), size=3, colour = track_in_2013$id, alpha=5) + geom_point(data = track_in_2013, aes(x = Long, y = Lat), size=7, alpha=0.5 , color = track_in_2013$id) +
labs(title = "SST and BF tuna locations", fill = "SST")+labs(subtitle = " 2013 | Direction: IN") #con tracks
hist(sstin2013$sst, col = "red", breaks = 100, xlim = c(14,20), main = "SST 2013", xlab= "SST")
rug(track_in_2013$Obs.SST, ticksize = 0.05, side = 1,lwd = 5, col = "blue")
legend("topright", c("Model", "BF tracks"), fill=c("red", "blue"))
###
c1 <- rgb(216,238,192, max = 255, alpha = 120, names = "lt.green")
c2 <- rgb(255,100,100, max = 255, alpha =100, names = "lt.red")
par(mar=c(4, 4, 2, 4))
hist(sstin2013$sst, col = "red", breaks = 70, xlim = c(14,26), main = "SST 2013 | Direction: IN", xlab= "")
par(new=TRUE) ## Allow a second plot on the same graph
hist(track_in_2013$Obs.SST, breaks =50, xlab="", ylab="", ylim=c(0,10), xlim=c(14, 26), main = "", axes=FALSE, col = "blue")
mtext("Tuna Obs.",side=4,col="blue",line=2)
axis(4, col="blue",col.axis="blue",las=1)
mtext("SST",side=1,col="black",line=2.5)
legend("topleft", c("Model", "BF tracks"), fill=c("red", "blue"))
#### OUT ----
subset(dates2013, dates2013$inout=="out")
#subsetting tracking data per day
trackData2013<- subset(tracking_data, Year=="2013")
track_out_2013 <- subset(trackData2013, inout == "out")
head(track_out_2013)
arrange(track_out_2013,yday)
##May: 21-28th, 29th, 31st
#June: 16, 18, 20-21, 23-24 (out)
#june2 <- "datos SST/6june2013.nc"
#june3 <- "datos SST/8june2013.nc"
#june4 <- "datos SST/14june2013.nc"
june5 <- "datos SST/16june2013.nc"
june6 <- "datos SST/18june2013.nc"
june7 <- "datos SST/20&21june2013.nc"
june8 <- "datos SST/23&24june2013.nc"
#june6 <- f1(june2)
#june8 <- f1(june3)
#june14 <- f1(june4)
june16 <- f1(june5)
june18 <- f1(june6)
june2021 <- f1(june7)
june2324 <- f1(june8)
#July: 5, 7, 9, 13, 17, 19, 21 (out)
july1 <- "datos SST/5july2013.nc"
july2 <- "datos SST/7july2013.nc"
july3 <- "datos SST/9july2013.nc"
july4 <- "datos SST/13july2013.nc"
july5 <- "datos SST/17july2013.nc"
july6 <- "datos SST/19july2013.nc"
july7 <- "datos SST/21july2013.nc"
july5<- f1(july1)
july7<- f1(july2)
july9<- f1(july3)
july13<- f1(july4)
july17<- f1(july5)
july19<- f1(july6)
july21<- f1(july7)
out_2013 <- c(may1, june16, june18, june2021, june2324, july5, july7, july9, july13, july17, july19, july21)
range(out_2013$sst)
head(out_2013)
total_2013 <- c(out_2013, sstin2013)
#histogram with all dates and tracks out
out_2013 <- data.frame(out_2013)
mainMap + geom_raster(data =out_2013, aes(x = long, y = lat, fill = out_2013$sst), interpolate = TRUE) +
scale_fill_gradientn(limits=c(14, 19.5), colours = col.pal, na.value = NA) + stat_contour(data =out_2013, aes(x = long, y = lat, z= sst), binwidth= 1, color="black") +
theme_bw() + geom_point(data = track_out_2013, aes(x = Long, y = Lat), size=3, colour = track_out_2013$id, alpha=5) + geom_point(data = track_out_2013, aes(x = Long, y = Lat), size=7, alpha=0.5 , color = track_out_2013$id) +
labs(title = "SST and BF tuna locations", fill = "SST")+labs(subtitle = " 2013 | Direction: OUT") #con tracks
###
c1 <- rgb(216,238,192, max = 255, alpha = 120, names = "lt.green")
c2 <- rgb(255,100,100, max = 255, alpha =100, names = "lt.red")
par(mar=c(4, 4, 2, 4))
hist(out_2013$sst, col = "red", breaks = 100, xlim = c(14,26), main = "SST 2013 | Direction: OUT", xlab= "")
par(new=TRUE) ## Allow a second plot on the same graph
hist(track_out_2013$Obs.SST, breaks =50, xlab="", ylab="", ylim=c(0,10), xlim=c(14,26), main = "", axes=FALSE, col = "blue")
mtext("Tuna Obs.",side=4,col="blue",line=2)
axis(4, col="blue",col.axis="blue",las=1)
mtext("SST",side=1,col="black",line=2.5)
legend("topleft", c("Model", "BF tracks"), fill=c("red", "blue"))
#.
hist(out_2013$sst, col = "red", breaks = 1000, xlim = c(14,24), main = "2013 SST | OUT", xlab= "SST")
rug(track_out_2013$Obs.SST, ticksize = 0.05, side = 1,lwd = 5, col = "blue")
legend("topright", c("Model", "BF tracks"), fill=c("red", "blue"))
### total 2013----
par(mar=c(4, 4, 2, 4))
hist(total_2013$sst, col = "red", breaks = 100, xlim = c(14,26), main = "SST 2013", xlab= "")
par(new=TRUE) ## Allow a second plot on the same graph
hist(trackData2013$Obs.SST, breaks =50, xlab="", ylab="", ylim=c(0,10), xlim=c(14,26), main = "", axes=FALSE, col = "blue")
mtext("Tuna Obs.",side=4,col="blue",line=2)
axis(4, col="blue",col.axis="blue",las=1)
mtext("SST",side=1,col="black",line=2.5)
arrange(trackData2013, by_group= df_dates)
# 2012 ----
## IN ----
#15-19 May (in)
#tracks
trackData2012<- subset(tracking_data, Year=="2012")
track_in_2012 <- subset(trackData2012, inout == "in")
head(track_in_2012)
arrange(track_in_2012,yday) # 2012-05-15 = 136
track136 <- subset(track_in_2012, track_in_2012$yday=="136") #6
track137 <- subset(track_in_2012, track_in_2012$yday=="137") #9
track138 <- subset(track_in_2012, track_in_2012$yday=="138") #5
track139 <- subset(track_in_2012, track_in_2012$yday=="139") #4
track140 <- subset(track_in_2012, track_in_2012$yday=="140") #1
#SST #15-19 May (in)
may2012<- "datos SST/15-19may2012.nc"
may12<-f1(may2012)
may12$ddmmyy<-as.Date(as.POSIXct(may12$date*60*60, origin="1950-01-01"))
may12$yday<-yday(may12$ddmmyy)
may15<-subset(may12, may12$yday=="136")
may16<-subset(may12, may12$yday=="137")
may17<-subset(may12, may12$yday=="138")
may18<-subset(may12, may12$yday=="139")
may19<-subset(may12, may12$yday=="140")
# SST distribution for 2012 + sst from tuna tracks
par(mar=c(4, 4, 2, 4))
hist(may12$sst, col = "red", breaks = 50, xlim = c(14,26), main = "SST 2012 | Direction: IN", xlab= "")
par(new=TRUE) ## Allow a second plot on the same graph
hist(track_in_2012$Obs.SST, breaks =50, xlab="", ylab="", ylim=c(0,10), xlim=c(14, 26), main = "", axes=FALSE, col = "blue")
mtext("Tuna Obs.",side=4,col="blue",line=2)
axis(4, col="blue",col.axis="blue",las=1)
mtext("SST",side=1,col="black",line=2.5)
legend("topleft", c("Model", "BF tracks"), fill=c("red", "blue"))
#mapping SST and tracks + plotting SST distribution for 2012 ----
#may 15
mainMap + geom_raster(data =may15, aes(x = long, y = lat, fill = sst), interpolate = TRUE) +
scale_fill_gradientn(limits=c(16, 22), colours = col.pal, na.value = NA) +
theme_bw() + stat_contour(data =may15, aes(x = long, y = lat, z= sst), binwidth= 1, color="black") +
geom_point(data = track136, aes(x = Long, y = Lat, color =factor(track136$id)), size=3 , show.legend=F) +
geom_point(data = track136, aes(x = Long, y = Lat, color = factor(track136$id)), size=12, alpha=0.5, show.legend=F) +
labs(title = "SST and BF tuna locations", fill = "SST")+ labs(subtitle = " 15/05/2012 | Direction: IN") #con tracks
hist(may15$sst, col = "red", breaks = 50, xlim = c(16,22), main = "2012 SST May 15th", xlab= "SST")
rug(track136$Obs.SST, ticksize = 0.05, side = 1,lwd = 5, col = "blue")
legend("topright", c("Model", "BF tracks"), fill=c("red", "blue"))
#may 16
mainMap + geom_raster(data =may16, aes(x = long, y = lat, fill = sst), interpolate = TRUE) +
scale_fill_gradientn(limits=c(16, 22), colours = col.pal, na.value = NA) +
theme_bw() + stat_contour(data =may16, aes(x = long, y = lat, z= sst), binwidth= 1, color="black") +
geom_point(data = track137, aes(x = Long, y = Lat, color =factor(track137$id)), size=3 , show.legend=F) +
geom_point(data = track137, aes(x = Long, y = Lat, color = factor(track137$id)), size=12, alpha=0.5, show.legend=F) +
labs(title = "SST and BF tuna locations", fill = "SST")+ labs(subtitle = " 16/05/2012 | Direction: IN") #con tracks
hist(may16$sst, col = "red", breaks = 50, xlim = c(16,22), main = "2012 SST May 16th", xlab= "SST")
rug(track137$Obs.SST, ticksize = 0.05, side = 1,lwd = 5, col = "blue")
legend("topright", c("Model", "BF tracks"), fill=c("red", "blue"))
#may 17
mainMap + geom_raster(data =may17, aes(x = long, y = lat, fill = sst), interpolate = TRUE) +
scale_fill_gradientn(limits=c(16, 22), colours = col.pal, na.value = NA) +
theme_bw() + stat_contour(data =may17, aes(x = long, y = lat, z= sst), binwidth= 1, color="black") +
geom_point(data = track138, aes(x = Long, y = Lat, color =factor(track138$id)), size=3 , show.legend=F) +
geom_point(data = track138, aes(x = Long, y = Lat, color = factor(track138$id)), size=12, alpha=0.5, show.legend=F) +
labs(title = "SST and BF tuna locations", fill = "SST")+ labs(subtitle = " 17/05/2012 | Direction: IN") #con tracks
hist(may17$sst, col = "red", breaks = 50, xlim = c(16,22), main = "2012 SST May 17th", xlab= "SST")
rug(track138$Obs.SST, ticksize = 0.05, side = 1,lwd = 5, col = "blue")
legend("topright", c("Model", "BF tracks"), fill=c("red", "blue"))
#may 18
mainMap + geom_raster(data =may18, aes(x = long, y = lat, fill = sst), interpolate = TRUE) +
scale_fill_gradientn(limits=c(16, 22), colours = col.pal, na.value = NA) +
theme_bw() + stat_contour(data =may18, aes(x = long, y = lat, z= sst), binwidth= 1, color="black") +
geom_point(data = track139, aes(x = Long, y = Lat, color =factor(track139$id)), size=3 , show.legend=F) +
geom_point(data = track139, aes(x = Long, y = Lat, color = factor(track139$id)), size=12, alpha=0.5, show.legend=F) +
labs(title = "SST and BF tuna locations", fill = "SST")+ labs(subtitle = " 18/05/2012 | Direction: IN") #con tracks
hist(may18$sst, col = "red", breaks = 50, xlim = c(16,22), main = "2012 SST May 18th", xlab= "SST")
rug(track139$Obs.SST, ticksize = 0.05, side = 1,lwd = 5, col = "blue")
legend("topright", c("Model", "BF tracks"), fill=c("red", "blue"))
#may 19
mainMap + geom_raster(data =may19, aes(x = long, y = lat, fill = sst), interpolate = TRUE) +
scale_fill_gradientn(limits=c(16, 22), colours = col.pal, na.value = NA) +
theme_bw() + stat_contour(data =may19, aes(x = long, y = lat, z= sst), binwidth= 1, color="black") +
geom_point(data = track140, aes(x = Long, y = Lat, color =factor(track140$id)), size=3 , show.legend=F) +
geom_point(data = track140, aes(x = Long, y = Lat, color = factor(track140$id)), size=12, alpha=0.5, show.legend=F) +
labs(title = "SST and BF tuna locations", fill = "SST")+ labs(subtitle = " 19/05/2012 | Direction: IN") #con tracks
hist(may19$sst, col = "red", breaks = 50, xlim = c(16,22), main = "2012 SST May 19th", xlab= "SST")
rug(track140$Obs.SST, ticksize = 0.05, side = 1,lwd = 5, col = "blue")
legend("topright", c("Model", "BF tracks"), fill=c("red", "blue"))
#all days
mainMap + geom_raster(data =may12, aes(x = long, y = lat, fill = sst), interpolate = TRUE) +
scale_fill_gradientn(limits=c(16, 22), colours = col.pal, na.value = NA) +
theme_bw() + stat_contour(data =may12, aes(x = long, y = lat, z= sst), binwidth= 1, color="black") +
geom_point(data = track_in_2012, aes(x = Long, y = Lat, color =factor(track_in_2012$id)), size=3 , show.legend=F) +
geom_point(data = track_in_2012, aes(x = Long, y = Lat, color = factor(track_in_2012$id)), size=12, alpha=0.5, show.legend=F) +
labs(title = "SST and BF tuna locations", fill = "SST")+ labs(subtitle = " 19/05/2012 | Direction: IN") #con tracks
hist(may12$sst, col = "red", breaks = 50, xlim = c(16,22), main = "2012 SST May 2012", xlab= "SST")
rug(track_in_2012$Obs.SST, ticksize = 0.05, side = 1,lwd = 5, col = "blue")
legend("topright", c("Model", "BF tracks"), fill=c("red", "blue"))
range(track_in_2012$Obs.SST)
# OUT ----
#July: 10, 14-17 (out)
july_10_12<- "datos SST/10july2012.nc"
july_2012<- "datos SST/14-17july2012.nc"
j10<-f1(july_10_12)
j2012<- f1(july_2012)
july2012out <- c(j10, j2012 )
track_out_2012 <- subset(trackData2012, inout == "out")
head(track_out_2012)
arrange(track_out_2012,yday) #
track192 <- subset(track_in_2012, track_in_2012$yday=="192")
track196 <- subset(track_in_2012, track_in_2012$yday=="196")
track197 <- subset(track_in_2012, track_in_2012$yday=="197")
track198 <- subset(track_in_2012, track_in_2012$yday=="198")
track199 <- subset(track_in_2012, track_in_2012$yday=="199")
# distributions out
par(mar=c(4, 4, 2, 4))
hist(july2012out$sst, col = "red", breaks = 100, xlim = c(14,26), main = "SST 2012 | Direction: OUT", xlab= "")
par(new=TRUE) ## Allow a second plot on the same graph
hist(track_out_2012$Obs.SST, breaks =50, xlab="", ylab="", ylim=c(0,10), xlim=c(14, 26), main = "", axes=FALSE, col = "blue")
mtext("Tuna Obs.",side=4,col="blue",line=2)
axis(4, col="blue",col.axis="blue",las=1)
mtext("SST",side=1,col="black",line=2.5)
legend("topleft", c("Model", "BF tracks"), fill=c("red", "blue"))
# 2011 ----
trackData2011<- subset(tracking_data, Year=="2011")
track_in_2011 <- subset(trackData2011, inout == "in")
track_out_2011 <- subset(trackData2011, inout == "out")
SST_fun<-function(x){ # x= dataset in format nc including SST values (variable name = "thetao")
SST<- ncvar_get(x, "thetao")
SST<- as.vector(SST)
SST<- na.omit(SST)
return(SST)
}
may2011<- nc_open("datos SST/SST_2011_05_26-29.nc")
may2011_IN<-SST_fun(may2011) #in
#|2011| July 23rd, 28th and 30th | OUT |
july_23_11<- nc_open("datos SST/23july2011.nc")
july_28_11<- nc_open("datos SST/28july2011.nc")
july_30_11<- nc_open("datos SST/30july2011.nc")
SST_july_23_11<-SST_fun(july_23_11)
SST_july_28_11<-SST_fun(july_28_11)
SST_july_30_11<-SST_fun(july_30_11)
july2011_OUT<- c(SST_july_23_11, SST_july_28_11, SST_july_30_11)
#### Histrograms
#IN
par(mar=c(4, 4, 2, 4))
hist(may2011_IN, col = "red", breaks = 50, xlim = c(14,26), main = "SST 2011 | Direction: IN", xlab= "")
par(new=TRUE) ## Allow a second plot on the same graph
hist(track_in_2011$Obs.SST, breaks =50, xlab="", ylab="", ylim=c(0,10), xlim=c(14, 24), main = "", axes=FALSE, col = "blue")
mtext("Tuna Obs.",side=4,col="blue",line=2)
axis(4, col="blue",col.axis="blue",las=1)
mtext("SST",side=1,col="black",line=2.5)
legend("topleft", c("Model", "BF tracks"), fill=c("red", "blue"))
#???OUT
par(mar=c(4, 4, 2, 4))
hist(july2011_OUT, col = "red", breaks = 100, xlim = c(14,26), main = "SST 2011 | Direction: OUT", xlab= "")
par(new=TRUE) ## Allow a second plot on the same graph
hist(track_out_2011$Obs.SST, breaks =25, xlab="", ylab="", ylim=c(0,10), xlim=c(14, 26), main = "", axes=FALSE, col = "blue")
mtext("Tuna Obs.",side=4,col="blue",line=2)
axis(4, col="blue",col.axis="blue",las=1)
mtext("SST",side=1,col="black",line=2.5)
legend("topleft", c("Model", "BF tracks"), fill=c("red", "blue"))
|
580d274ddb5999be01225a914c3d2c48b2c2cc5d | 84af362583c9562a8a5225986d877b6a5b26ef0a | /tests/testthat/test-yf_collection.R | e82efec45e41ef6b56268770648b3c1a73863301 | [
"MIT"
] | permissive | ropensci/yfR | 945ef3f23a6f04560072d7436dd5c28e0f809462 | b2f2c5c9f933e933821b5a92763c98f155b401bd | refs/heads/main | 2023-05-23T19:12:44.048601 | 2023-02-16T10:48:15 | 2023-02-16T10:48:15 | 375,024,106 | 19 | 6 | NOASSERTION | 2023-01-30T17:18:11 | 2021-06-08T13:44:11 | HTML | UTF-8 | R | false | false | 2,446 | r | test-yf_collection.R | library(testthat)
library(yfR)
test_that("Test of yf_index_list()", {
available_indices <- yf_index_list()
expect_true(class(available_indices) == 'character')
})
testhat_index_comp <- function(df_in) {
expect_true(tibble::is_tibble(df_in))
expect_true(nrow(df_in) > 0)
}
test_that("Test of yf_index_composition() -- using web", {
if (!covr::in_covr()) {
skip_if_offline()
skip_on_cran() # too heavy for cran
}
available_indices <- yf_index_list()
for (i_index in available_indices) {
df_index <- yf_index_composition(i_index,
force_fallback = FALSE)
testhat_index_comp(df_index)
}
})
test_that("Test of yf_index_composition() -- using fallback files", {
available_indices <- yf_index_list()
for (i_index in available_indices) {
df_index <- yf_index_composition(i_index,
force_fallback = TRUE)
testhat_index_comp(df_index)
}
})
test_that("Test of yf_collection_get() -- single session", {
if (!covr::in_covr()) {
skip_if_offline()
skip_on_cran() # too heavy for cran
}
# parallel test for collections
to_test_collection <- "testthat-collection"
df <- yf_collection_get(collection = to_test_collection,
first_date = Sys.Date() - 30,
last_date = Sys.Date(),
do_parallel = FALSE,
be_quiet = TRUE)
expect_true(nrow(df) > 0)
})
test_that("Test of yf_collection_get() -- multi-session", {
# 20220501 yf now sets api limits, which invalidates any parallel computation
skip(
paste0("Skipping since parallel is not supported due to YF api limits, ",
"and collections are large datasets for single session download.")
)
# parallel test for collections
n_workers <- floor(parallel::detectCores()/2)
future::plan(future::multisession, workers = n_workers)
available_collections <- yf_get_available_collections()
if (!covr::in_covr()) {
skip_if_offline()
skip_on_cran() # too heavy for cran
}
for (i_collection in available_collections) {
df <- yf_collection_get(collection = i_collection,
first_date = Sys.Date() - 30,
last_date = Sys.Date(),
do_parallel = TRUE,
be_quiet = TRUE)
expect_true(nrow(df) > 0)
}
})
|
a547cea7b379b3e6dcb0d8280f82e864e9ed2e31 | 60668dba3bda50b082e200e8ae6c150e0c9f7bc3 | /man/tapering.Rd | 885e1204425cf86954dd1e5573b48707781cf678 | [] | no_license | qizhu21/CVTuningCov | 894b698876bd497fcadfc2e8c8aa1fe0304e67d6 | c9f4785734dafd35c5d82ac2ffa57ea9be2fc12d | refs/heads/master | 2021-01-04T03:09:20.856365 | 2014-07-31T00:00:00 | 2014-07-31T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,053 | rd | tapering.Rd | \name{tapering}
\alias{tapering}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
A Tapering Operator on A Matrix
}
\description{
Generate a tapering operator with given dimention and tuning parameter. Multiplying
it on a covariance matrix by componentwise product can provide a regularized estimator
with the tapering method.
}
\usage{
tapering(p, k = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{p}{
the dimension of a covariance matrix.
}
\item{k}{
the tuning parameter of the tapering method. The default value is 1.
}
}
\value{
a \code{p*p} matrix.
}
\references{
Cai, T, Zhang, CH and Zhou, H, Optimal rates of convergence for covariance
matrix estimation, Annals of Statistics, 38, 2118-2144 (2010).
}
\author{
Binhuan Wang
}
\examples{
p <- 5;
W <- tapering(p,k=2) ;
W;
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ tapering }
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
1cd92ed7038f3041a333ee5f74f8ffbf46561e74 | d79d64ae23f7005eb76ca48f66527d7d0cb0917b | /analysis.R | 9f041c2b5a03ceddd39b2128edc35f111446742d | [] | no_license | adamwilkinsonJCU/workshop1 | e03acbfa6372bf1c814bf9a885c58ef0553864ff | f2c4e3c89fe61c958bb64fc253a5fe015e910cd4 | refs/heads/master | 2020-04-28T02:31:00.550781 | 2019-03-11T01:27:20 | 2019-03-11T01:27:20 | 174,901,213 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 77 | r | analysis.R | x=seq(1, 10, len=1)
y=40*2 + rnorm(10,0,5)
plot(x,y)
sumary(x)
mean(x)
Change |
cdafed3a87a434059761331a13ab582ec3b17f61 | b0d0234d31bd230cca36b3789414ccfa2a54f7d7 | /R/parttree.R | 679f9eac6120390a6e2277f66f272e67cf453be1 | [
"MIT"
] | permissive | ClinicoPath/parttree | 31cd67a02a2d9d90b946fcfa8b1cf272e8ef4300 | 64c148d47074b66140bc63a1c54179d4f5569e95 | refs/heads/master | 2022-12-11T18:04:21.511776 | 2020-08-27T23:52:42 | 2020-08-27T23:52:42 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,639 | r | parttree.R | #' @title Convert a decision tree into a data frame of partition coordinates
#'
#' @description Extracts the terminal leaf nodes of a decision tree with one or
#' two predictor variables. These leaf nodes are then converted into a data
#' frame, where each row represents a partition that can easily be plotted in
#' coordinate space.
#' @param tree An \code{\link[rpart]{rpart.object}}, or an object of compatible
#' type (e.g. a decision tree constructed via the `parsnip` or `mlr3`
#' front-ends).
#' @param keep_as_dt Logical. The function relies on `data.table` for internal
#' data manipulation. But it will coerce the final return object into a
#' regular data frame (default behaviour) unless the user specifies `TRUE`.
#' @param flipaxes Logical. The function will automatically set the yaxis
#' variable as the first split variable in the tree provided unless
#' the user specifies `TRUE`.
#' @details This function can be used with a regression or classification tree
#' containing one or (at most) two continuous predictors.
#' @seealso \code{\link{geom_parttree()}}, \code{\link[rpart]{rpart}}.
#' @return A data frame comprising seven columns: the leaf node, its path, a set
#' of coordinates understandable to `ggplot2` (i.e. xmin, xmax, ymin, ymax),
#' and a final column corresponding to the predicted value for that leaf.
#' @importFrom data.table :=
#' @export
#' @examples
#' library(rpart)
#' parttree(rpart(Species ~ Petal.Length + Petal.Width, data=iris))
parttree =
function(tree, keep_as_dt = FALSE, flipaxes = FALSE) {
if (!(inherits(tree, "rpart") || inherits(tree, "_rpart") ||
inherits(tree, "LearnerClassifRpart") || inherits(tree, "LearnerRegrRpart"))) {
stop("The parttree() function only accepts rpart objects.\n",
"The object that you provided is of class type: ", class(tree)[1])
}
## parsnip front-end
if (inherits(tree, "_rpart")) {
if (is.null(tree$fit)) {
stop("No model detected.\n",
"Did you forget to fit a model? See `?parsnip::fit`.")
}
tree = tree$fit
}
## mlr3 front-end
if (inherits(tree, "LearnerClassifRpart") || inherits(tree, "LearnerRegrRpart")) {
if (is.null(tree$model)) {
stop("No model detected.\n",
"Did you forget to assign a learner? See `?mlr3::lrn`.")
}
tree = tree$model
}
if (nrow(tree$frame)<=1) {
stop("Cannot plot single node tree.")
}
vars = unique(as.character(tree$frame[tree$frame$var != "<leaf>", ]$var))
if (length(vars)>2) {
stop("Tree can only have one or two predictors.")
}
nodes = rownames(tree$frame[tree$frame$var == "<leaf>", ])
## Get details about y variable for later
### y variable string (i.e. name)
y_var = attr(tree$terms, "variables")[[2]]
### y values
yvals = tree$frame[tree$frame$var == "<leaf>", ]$yval
y_factored = attr(tree$terms, "dataClasses")[paste(y_var)] == "factor"
## factor equivalents (if factor)
if (y_factored) {
yvals = attr(tree, "ylevels")[yvals]
}
part_list =
lapply(
nodes,
function(n) {
pv = rpart::path.rpart(tree, node=n, print.it = FALSE)
node = as.integer(paste0(names(pv)))
pv = unlist(pv)
pd = data.frame(node = rep(node, times = length(pv)-1))
pv = sapply(2:length(pv), function(i) pv[i])
# pd$var = gsub("[[:punct:]].+", "", pv) ## Causes problems when punctuation mark in name, so use below
pd$var = gsub("<.+|<=.+|>.+|>=.+", "", pv)
# pd$split = gsub(".+[[:punct:]]", "", pv) ## Use below since we want to keep - and . in split values (e.g. -2.5)
pd$split = as.numeric(gsub(".+[^[:alnum:]\\-\\.\\s]", "", pv))
pd$side = gsub("\\w|\\.", "", pv)
pd$yvals = yvals[nodes==node]
return(pd)
}
)
part_dt = data.table::rbindlist(part_list)
## Trim irrelevant parts of tree
data.table::setorder(part_dt, node)
part_dt[, path := paste(var, side, split, collapse = " --> "), by = node]
part_dt = part_dt[,
.SD[(grepl(">", side) & split == max(split)) | (grepl("<", side) & split == min(split))],
keyby = .(node, var, side)]
## Get the coords data frame
if (flipaxes) vars = rev(vars)
part_coords =
part_dt[, `:=`(split = as.double(split))][
, `:=`(xvar = var == ..vars[1], yvar = var == ..vars[2])][
, `:=`(xmin = ifelse(xvar, ifelse(grepl(">", side), split, NA), NA),
xmax = ifelse(xvar, ifelse(grepl("<", side), split, NA), NA),
ymin = ifelse(yvar, ifelse(grepl(">", side), split, NA), NA),
ymax = ifelse(yvar, ifelse(grepl("<", side), split, NA), NA))][
, .(xmin = mean(xmin, na.rm = TRUE),
xmax = mean(xmax, na.rm = TRUE),
ymin = mean(ymin, na.rm = TRUE),
ymax = mean(ymax, na.rm = TRUE)),
keyby = .(node, yvals, path)][
, `:=`(xmin = ifelse(is.na(xmin), -Inf, xmin),
xmax = ifelse(is.na(xmax), Inf, xmax),
ymin = ifelse(is.na(ymin), -Inf, ymin),
ymax = ifelse(is.na(ymax), Inf, ymax))]
if (y_factored) {
part_coords$yvals = as.factor(part_coords$yvals)
}
colnames(part_coords) = gsub("yvals", y_var, colnames(part_coords))
if (!keep_as_dt) {
part_coords = as.data.frame(part_coords)
}
return(part_coords)
}
|
3c4940461a5b374c69153b7d3239a34efdca5e5f | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/BiBitR/examples/bibit.Rd.R | 6267d207ab760705859d479076d3e9a91122ea01 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 472 | r | bibit.Rd.R | library(BiBitR)
### Name: bibit
### Title: The BiBit Algorithm
### Aliases: bibit
### ** Examples
## Not run:
##D data <- matrix(sample(c(0,1),100*100,replace=TRUE,prob=c(0.9,0.1)),nrow=100,ncol=100)
##D data[1:10,1:10] <- 1 # BC1
##D data[11:20,11:20] <- 1 # BC2
##D data[21:30,21:30] <- 1 # BC3
##D data <- data[sample(1:nrow(data),nrow(data)),sample(1:ncol(data),ncol(data))]
##D result <- bibit(data,minr=5,minc=5)
##D result
##D MaxBC(result)
## End(Not run)
|
027fce138c288e707e22c4cc0b8b96ba24a4d7c9 | 3b24d08de0d126d7906c23d6574607ba0dc8281c | /tallpalmettoandmoon.rd | 9828a8c09c7401403150bccc97a5e7a997a85834 | [] | no_license | htbrdd/rdworks-files | de6602f90dcc1933f13f4fc970d5ad5315146569 | af17e751cf3a31b71de4c748fb1cf9d9032d16ef | refs/heads/master | 2021-03-19T08:29:59.037313 | 2018-01-21T01:06:43 | 2018-01-21T01:06:43 | 82,507,071 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 112,996 | rd | tallpalmettoandmoon.rd | ÒúzÒppwwww³pßýýpÙpYýýp p
BùÐ9ÐI¯»Ð»ÐËWwÄÄIpÛwwww³p[ßýýpiwwww³pëßýýÄ«pÝpÝ p]p] zzz üü ü
£1Iã©üüýÿüüü
ýÿäpéppýÿp+pp ýÿÄ ÄÄ ¹Ä Ä BùÐ Ð)¯»ÐЫWwÐÐÄ éïᤠ?÷ãí¤÷A3í¤
Ó÷ãí¤ó¯ í¤O _í¤ýÅ í¤½sí¤{ yí¤)÷ãí¤yW ûí¤_÷ãí¤ù!Åí¤_õí¤çû í¤©í¤ç í¤á)í¤eµ yí¤÷ãí¤åi yí¤s÷ãí¤å
ûí¤]wMí¤c# í¤])í¤ãG yí¤9÷ãí¤ãG yí¤
÷ãí¤aý ûí¤
wMí¤aý í¤
©í¤a í¤owMí¤a ûí¤o÷ãí¤a yí¤ËwMí¤ok í¤wÏí¤oë í¤wMí¤oë yí¤÷ãí¤oë í¤q÷ãí¤o ûí¤qwMí¤o í¤qwMí¤ï¥ í¤ÝwMí¤ï¥ yí¤Ý÷ãí¤ï¥ ûí¤ÝwMí¤mY í¤'wMí¤mY í¤'wMí¤íÿ yí¤÷ãí¤í} yí¤¹÷ãí¤mÙ í¤wMí¤í} í¤wMí¤í ûí¤÷ãí¤íÿ í¤m÷ãí¤í yí¤ïwMí¤k7 í¤I÷ãí¤í í¤ïwMí¤k7 yí¤I÷ãí¤k7 í¤I÷ãí¤k7 ûí¤I÷ãí¤k7 í¤IwMí¤k7 í¤÷ãí¤ëí yí¤I÷ãí¤k7 í¤÷ãí¤ëí í¤wMí¤ëí í¤÷ãí¤ëí í¤ñ÷ãí¤ë yí¤÷ãí¤ëk í¤ñ÷ãí¤ëí í¤÷ãí¤ëí í¤÷ãí¤ë í¤÷ãí¤ë yí¤÷ãí¤ë í¤÷ãí¤ë í¤wÏí¤ë í¤ñ÷ãí¤ë í¤[÷ãí¤i% í¤[÷ãí¤i% í¤[wMí¤i% í¤[÷ãí¤i% í¤[÷ãí¤i% í¤[wMí¤éÛ í¤§÷ãí¤éÛ í¤§÷ãí¤éÛ ûí¤Û÷ãí¤i§ í¤%÷ãí¤é[ í¤%÷ãí¤é[ í¤%÷ãí¤é[ í¤%÷ãí¤é[ í¤©÷ãí¤W í¤©÷ãí¤W í¤©÷ãí¤W í¤©÷ãí¤W í¤©í÷ãí¤W í¤©í÷ãí¤W í¤©í÷ãí¤×É í¤)É÷ãí¤×É í¤)É÷ãí¤×É í¤)É÷ãí¤×É í¤)É÷ãí¤×É í¤)É÷ãí¤×É í¤)É÷ãí¤×É3í¤«ÿ÷ãí¤U í¤«ÿ÷í¤Um í¤«÷ãí¤Um í¤«÷ãí¤Um3í¤«ÿ÷ãí¤U í¤«ÿ÷ãí¤U í¤«ÿwMí¤SÝ í¤¥÷ãí¤SÝ í¤+Y÷ãí¤Õ'³í¤%÷ãí¤S[ í¤%÷ãí¤S[ í¤%÷ãí¤Óñ í¤-÷ãí¤Óñ í¤-1³ ©ñ¤ ?÷ãí¤÷A ) )Õ¤Óñ3í¤-k±W «É¤ewMí¤u ) +¤Ó
+¤÷Ã í¤ ¿õÅ ¤-k1³ ¤e÷ãí¤u ) å¤QË
ó å¤u í¤esy -Ù¤¯·1³ -Ù¤ewÏí¤õ? ) ¯½¤QË] ¯½¤õ± í¤Ás /¤¯·1³ /¤÷ãí¤sÕ ©· /õ¤QË] /õ¤õ± ûí¤Ás ¡é¤¯·1³ ¡é¤÷ãí¤sÕ ©· !ͤQË9 !ͤõ± í¤+ó³ £¡¤/³ £¡¤ÁwMí¤sUýŤw- ©· #
¤Ñï
#
¤sU í¤+ó³ #ù¤/§ #ù¤S½¤+÷ãí¤sU}/¤ug ©· ¥Ý¤Ño ¥Ý¤s í¤÷qW %±¤/}%¥ %±¤Q¤+wMí¤sÿ¤u ë §¤Ñ
Ë §¤ó/ yí¤÷q '¤/ý§ '¤e«¤÷÷ãí¤sÿפu ë 'í¤Ñ
Ë 'í¤s í¤
Óñ! ¹Á¤¡G§ ¹Á¤e«¤÷wMí¤ó/1¤õ± ë 9¥¤ß]q 9¥¤qC í¤=E »¤!#§ »¤ÁO¤
Q÷ãí¤s1¤õ± ë »ý¤ß]Ý »ý¤ñù í¤E ;Ѥ!#§ë ;ѤÁO¤÷÷ãí¤s1¤õ± ½µ¤ß]Ý ½µ¤ñù yí¤ÿû =©¤!#§ë =©¤+å¤
Ó÷ãí¤ó¯ñ¤sU ¿¤ß]' ¿¤ñù í¤åÿ ¿á¤£ç ¿á¤++Y¤+å¤
Ó÷ãí¤ó¯ñ¤sUÕ'¤sU ?Ť]ó ?Ťñ í¤O}µ ±¹¤£i[¤ç ±¹¤
Q«ÿ¤¤
Ó÷ãí¤ó¯ñù¤sÕ×I¤qC I« 1¤õ±éÛ¤]I 1¤ÿ× í¤«ýi 1ñ¤£i§¤ÁA 1ñ¤=)7¤¤
Ó÷ãí¤ó¯qC¤s W¤ñù Iu ³Õ¤õ±W¤ÝK ³Õ¤ÿ× í¤uý
3ɤ#5©í¤Áw 3ɤ©¤+=¤=÷ãí¤qCqC¤sUWÿ¤ñù ËÑ µ¤õ±W¤ÝK µ¤ÿ í¤Q{£ 5¤¥©í¤Ó 5¤å[¤÷
Ó¤=÷ãí¤qCó¯¤si%¤ñ K½ 5å¤sÕW¤[á[ 5å¤}/ yí¤QûG ·Ù¤¥«¤+¿ ·Ù¤cñ¤÷
Ó¤=9¤wMí¤õ±GM¤qCs ¤ó/ëk¤ñù Íã 7½¤sUU¤[ 7½¤ýÅ í¤½yý ɤ¥{«ÿ¤+å ɤ¤
Qw¤=¹3¤Á÷ãí¤sUÙ©¤qCs ¤sk7¤ñ Íã Éõ¤sUU¤Û9í Éõ¤{y í¤
y Ié¤%G«ÿ¤+
O Ié¤åI¤÷w¤='é¤+÷ãí¤sUó¯¤w-_¹¤ñùs ¤sí¤ñËÓ¤w-1¤sUSݤÛ9É Ëͤ{y í¤
ãgë K¡¤§£-¤¤eµÃ¤cM¤Õ
ã¤
Ó+¤/}¤ ¿w¤÷÷ãí¤ss ¤÷ÃѤñùsU¤ó¯{¤wû3¤ñK¿¤uñù¤sÕÓq¤Yß Í
¤{ í¤Mg Íù¤§!-k¤w=¤Á»¤ ?£i¤c
㤽¤
Ó÷¤=/¤w¤÷÷ãí¤ss ¤uçÑo¤qCs¤ó¯ýŤuç{¤ñ]¤÷AgǤõ±qC¤s Ó¤Y_ÿ Mݤû3 yí¤Mç¥ Ï±¤'¯·¤¤Á_¤e£¤O;¤Au¤
Ó+¤/¤+¤
Ó÷ãí¤qCõ±¤uQˤqCsU¤ó¯ÿ¤õ±ýŤ1]ó¤uù!¤sUqC¤s Qˤ٩¥ O¤yé í¤©eÙ Á¤'鯷¤w=¤+_¤e£¤O½¤Áu¤
Ó+¤=¯·¤eÁ¤=÷í¤ó¯sU¤uçQˤñùõ±¤ó¯ÿפsU}/¤³]q¤õ±y
¤s ó¯¤sѤ٩k Áí¤y í¤_c7 AÁ¤9/}¤÷=¤w©¤£¤OѤ+O¤
ÓÁ¤-k¤eÁ¤=÷ãí¤qCõ±¤uÓ¤ñùõ±¤ó¯1¤sU}¡¤1]ó¤sÕyé¤s qC¤sѤÇ㷠å¤ù! í¤»ãk C¤9!#¤÷
Ó¤
Ó)¤+!#¤«u¤wc¤
ÓÁ¤¤Su¤eÁ¤=÷ãí¤qCõ±¤uÿ¤w-í}¤ñùõ±¤ó¯ñ¤s ÿ¤ÿ×ß]¤sUyW¤ó¯ó¯¤sß]¤Ça© Cý¤gÇ yí¤;ã ÅѤ9û!#¤
Ó÷¤
Ó)¤÷!#¤«u¤c¤=¤ÏݤO¤eÁ¤÷ãí¤ñu¤u1¤uo¤³uç¤qCñù¤s ÿפÿ×ß]¤syW¤ó¯ó/¤ó¯ß]¤ÅÑ«# Eµ¤ç í¤aáÛ Ç©¤;¡!#¤
Ó
Q¤
Ó)¤÷!#¤««¤w¤=¤Ïq¤eO¤ee¤c÷í¤ñùu¤õ±ñ¤õ±oë¤ç{qC¤s ÿפÿWß]¤ó¯û³¤qCs¤ó¯]¤ÅÑ+ G¤ç í¤áo Gá¤;¡£i¤
Ó÷¤=Ϥ
Ó!#¤)«¤w=¤¤Áå¤Áe¤÷ãí¤ñu¤õ±ñù¤sUýŤuçÿ¤gÇñù¤s ³¤ÿ×]ó¤ó¯{¤ñgsU¤qC]¤C)5 ÙŤåi yí¤ó·Õ¤½×¥¤
Ó+¤M¤
Ó£¤«c¤
Ó=¤»«¤e½¤Ác¤Áe¤å÷ãí¤ñu¤õ±ñ¤õ±ýŤuÿפgÇqC¤ó¯ñ¤ÿ×]ó¤ó¯û3¤ñgsU¤ó¯[á¤C«I¤å í¤]µÃ¤¿¥¤=Á¤å
ã¤=!#¤uå¤
Q
Ó¤»«¤eQ¤+¤+¤O÷í¤1uç¤õ±ñù¤s ÿ¤õ±1¤gÇó¯¤ó/ñg¤ÿë¤÷Aÿ¤ñù{¤ñùsÕ¤ó/Û9¤AÏ+¤ãG í¤91U¤¿ù%G¤
Q¤
ã¤u¤ ?¤u¤
Q
Ó¤»O¤Áu¤w¤Á¤O¹í¤uó/¤1uç¤sUñù¤s ÿפsUñ¤gÇs¤qCqC¤ÿi§¤÷Aÿפñ{y¤ñõ±¤qCÛ9¤Á=OQ¤aý í¤
±/¤?C%G¤=Á¤c
¤c«¤ ?Û¤÷=¤=÷¤»å¤+«¤w¤+¤O
Q¤e÷ãí¤sõ±¤ÿ×uç¤õ±ñù¤ó¯1¤sñg¤ç{sU¤qCó¯¤ÿi%¤uçÿפñù{¤ñu¤ñùYߤOQÃϤa í¤»E¤1U'
¤=e¤Ï
¤åϤ[¤Ñ
Ó¤=Á¤a=¤+O¤==¤+ ¿¤«e¤
Ó÷ãí¤ó¯u¤ÿ×÷äsUqC¤qC1¤sUqC¤çõ±¤qCó¯¤}¡i%¤u糤ñ{y¤³u¤qCÙs¤Ï+E;¤oë ûí¤Ý'¤³1'餤Ï
¤Oc¤§¤Ñw¤=e¤Í
Q¤wO¤
Q=¤w ¿¤;÷ãí¤gE÷äs qC¤ó/1¤s ó/¤eµu¤qCs ¤}¡éÛ¤uçñ¤1{y¤³uç¤ñùÙ©¤MOÙõ¤ï¥ í¤#5¤µÃ'餤«+¤ ¿
Ó¤Oc¤e§¤Ñ¤ï÷¤
Óc¤==¤wS¤»÷í¤aýqC¤qCñù¤qCsU¤íõ±¤}/W¤uñù¤ÿ×s¤uçsU¤ÿ×÷A¤ñùÇã¤ËÓ_¹¤í í¤m¡Ù¤5¯9¤ ?¤«+¤÷¤«¤e©¤QÁ¤ï+¤=¤==¤
÷í¤aýó¯¤ñùñù¤qCõ±¤íu¤ýÅW¤uñù¤ÿ×sU¤uõ±¤eµÇ¤I-S[¤ëí í¤%¤·S9û¤ÍÁ¤e+¤«¤e©¤½e¤ïÁ¤=¤
Ó¤
÷í¤ãGqC¤ñùqC¤ñgu¤í}õ±¤}¡Wÿ¤sUó¯¤ÿõ±¤usU¤eµÇ¢Ýñ ×ÝW¤éÛ yí¤©í¢ © W±»E¤Á¤Áe¤u
Ó¤+©¤;e¤¤å
Ó¤c
Ó¤¹÷ãí¤ãÙó¯¤ñó¯¤ñug¤í}u¤ýEWÿ¤sUó¯¤ÿu¤õ±õ±¤åiE;¢Û éí}¤W í¤+Yߢ M i½¤ÍÁ¤Áe¤Q
Q¤©¤;¤ïS¤c
Ó¤O
Ó¤ó÷í¤åó¯¤1s ¤SÝ÷äýÅW¤sÕó/¤}/ug¤sÕu¤åiCu¢AÍ iíýE¤Ñï í¤/;¢ M ëÁ½¤e¤¤Q
Q¤©í¤½ ¿¤¥w¤O
Ó¤ó÷í¤å
s¤ÿ×s ¤GMW¤s s¤}/ug¤sÕu¤åiC«¢[ k¥ í¢ ã í½×¤e¤¤Q÷¤w©í¤¹3w¤«÷¤s÷í¤åió/¤1s ¤Ù©W¤s s¤ýÅ÷äs ug¤å
ÃÏ¢§ íý í¢ mÑ£ë¤Ï¤s¤w ¿¤½+¤
Ó©í¤'é¤Ï
Ó¤÷ãí¤åi󯤳sÕ¤Ù©W¤ó¯sU¤ýÅ÷äs ug¤å
³¤uç]¢§ ïµ yí¢ ; o©#5¤eϤ©¤
ÓS¤½+¤
Ó)7¤§£+¤«w¤q¤e÷ãí¤uo¤åis ¤ÿ×sU¤Yß×I¤ó¯sU¤ýÅw-¤ó¯ug¤å鳤uÝK¢ á yí¢ Ñ áá¥{¤Á¤s ¿¤
ÓS¤½+¤
Óc¤ ?¤§£Á¤uw¤Í¤÷ãí¤s oë¤e5s ¤ÿõ±¤[oë¤uñù¤ó¯õ±¤ÑqC¤õ±Y_¢É aÅ í¢ u ã¹§!¤Á=¤/}Á¤
Ó¤e¤¥{Á¤uw¤K¤w÷í¤soë¤eµs¤ÿu¤[áo¤uqC¤qCõ±¤Ñó/¤õ±Ùs¢É c yí¢ « cñ'é¤+÷¤/}Á¤==¤eq¤#5å¤Q÷¤a
¤)¤
Ó÷ãí¤ó¯ÿW¤uç{y¤çs¤}/u¤ÝKo¤uqC¤qCõ±¤Ñs¤sUÙ©¢ åÕ yí¢ Ï eÉ9¤÷Á¤/}Á¤==¤eݤ£ë¤Q÷¤a;¤AϤ
Ó)ɤ
÷ãí¤{û×ɤ󯳤õ?ýE¤çs¤}/ug¤]磻uqC¤qCõ±¤Ñõ±¤sÇa¢ÿ ç yí¢ c g¤eI¤wå¤/}e¤
Ó¤Áݤ£ë ¿¤½+¤a;¤¤=I¤Ë)í¤k7磻qCñg¤sUýŤçsÕ¤Ay¤ug1¤õ±ó¯¤ñùu¤Ñug¤ó¯í¤õ±å¢¥ gå í¢ ùÙó¤Áï¤
Ó¤/}e¤
Ó¤ÁO¤õ¤¿ù¤á½¤+¤=ݤIwMí¤Wa¤ñgqC¤s}/¤e5sÕ¤ÃÏù!¤ugñ¤sUó¯¤ñùu¤Ñ÷äó¯íÿ¤s å¢ y½ í¢ ûó¤w¤
Ó ¿¤/}e¤
Ó¤+å¤_¤=³¤KQ¤÷=¤o¤©÷ãí¤UmãG¤ñó¯¤ó¯ÿ¤çsÕ¤ÃÏy
¤uñ¤sUs ¤³uç¤ÙsmY¤s ãÙ¢k ûõ í¢ Q {éá¤÷'¤§£¤Ïw¤+å¤e_¤½+¤au¤=w¤có¤%÷ãí¤S[å¤ñs ¤qCÿ¤çsU¤Cuù!¤uñ¤sUs ¤³uç¤YßmY¤sa¢k ýÍ yí¢
÷ }¡K¤=ñ¤§£¤Ïw¤+å¤e_¤½Á¤au¤+¤åͤ¥÷ãí¤SÝeµ¤ñsU¤ñùÿ¤çõ±¤Cuù!¤uñ¤sUs ¤³uç¤Yßo¤qCá5¢· ÿ
yí¢
+ ÿùq¤Ë¤§£ ¿¤«w¤+å¤e_¤;¡Á¤á÷¤Á¤Oá¤Õ¤Óí¤mYçû¤³u¤ñÿ¤çõ±¤Çù!¤õ±ñ¤ssÕ¤ÿ×÷äۻó¯¤w-çû¤ñùmY¢©} Ý í¢ Á ñ±'¤¤S
Ó¤%Ç ¿¤«¤÷å¤Á_¤9ûÁ¤áu¤åe¤Ï¤'suí¤ogǤåÿפçu¤Çù!¤sUqC¤s s ¤OÓs ¤÷ÃgE¤ñíÿ¢)Ù qÅí¢ óI¤Oõ¤¤±¯w¤w=¤+_¤9Á¤«¤ó_¤÷ãí¤oëù!¤åÿפçûõ±¤Çãù!¤sUqC¤s s ¤OÓsÕ¤ugy¤1k7¢+ óí yí¢ S sÁÛ¤«)¤Á¤±¯w¤w=¤+»¤'éÁ¤aϤss¤K÷ãí¤á5y
¤å
³¤çõ±¤Ù©gǤsUqC¤s s ¤OÓõ±¤uçyW¤ÿ×i§¢+i õ¥ ûí¢ u©¤u
ã¤ee¤±¯w¤w=¤+»¤'
Á¤aϤs©¤ou½í¤aû³¤c#ñ¤çu¤ÙsgǤsUqC¤s s ¤OÓ÷A¤õ±{y¤ýEW¢- uý í¢ ÷Ñ©m¤;
¤Á ?¤±¯w¤w=¤+»¤'
e¤áå¤]Ϥou;í¤aý{¤ãÙñ¤çu¤Û9ç{¤s s¤ó¯s ¤ËS}¡¤{yUm¢-{ wµ í¢ A©«¤
Ѥ5-w¤
Ó÷¤w¤%Ge¤á夹
ã¤
õQí¤ãG{y¤ãGñ¤çu¤[ç{¤s s¤ó¯sU¤Iwÿפ{Õ'¢¯Ç Eí¢ ? á-k¤)c¤5¯+¤
Ó÷¤w¤¥{¤ac¤9;¤9÷ãí¤ãGýE¤ãGñ¤çuç¤[ç{¤s s¤ó¯sU¤ËÓñ¤yWÓ¢¡
Å yí¢ Õ¹/¤s=¤5¯+¤
Ó÷¤w
Ó¤«¤¥{Õ¤¤9Ѥ9÷ãí¤ãG}¡¤ãGñù¤åiw¤[ÿפugó¯¤s s¤ó¯sU¤ËÓqC¤y
Ñé yí¢ ñ¡Ù¤»+¤5¯+¤=+¤w÷¤AÏ¢ëmñ¤
u¤]u½í¤c£³¤aýñ¤IÿW¤õ?s¤ó¯õ±¤qCsU¤ËÓ÷äeµ]¢£û
Õ í¢ CÉ£i¤Í ¿¤5¯+¤=Á¤
Ó÷¤A)¤·Õå¤
Ϥß÷í¤c£ñ¤añ¤Iw³¤sÕs¤ó¯õ±¤qCsU¢ #y[á¢#EÅí¢w1§!¢ uÅ+¤=Á¤
Ó+¤wϤ· ¤
Ó¤]÷ãí¤c#ó¯¤oëñù¤Iw³¤s sU¤ó¯õ±¤qCsU¢ ¥ÅåY_¢%å ûí¢÷×Ù'é¢ uÅÙ+¤e¤
Ó+¤wϤ5¯¤Ý+¤s÷ãí¤å
sU¤ï¥ñù¤ËÓ³¤s sU¤ó¯u¤ñùsU¢ %¡½Ù©¢%W½Åí¢u/9û¢ uÅ+¤e¤
Ó+¤wϤµC¤9÷í¤ÇãqC¤K=³¤s sU¤ó¯u¤ñùsU¢ §×õÅÑ¢¹Cõ í¢õÅ
é;¡¢ uÅ
é+¤e¤
Ó+¤wϤµC=¤9u½í¤GÍqC¤Í³¤s sU¤qCuç¤ñùsU¢ ¹ÍAc¢9/Í í¢s¡¿¢ uÅ¡+¤¤=+¤wϤ3g=¤¹µu;í¤Ù©ó¯¤Íñ¤qCõ±¤qC÷A¤ñùs¢ ¹ù
OQ¢»U
Eí¢ñ!ù3¢ uÅù÷¤ ?¤=Á¤=c¤3gw¤'é÷ãí¤Ù©s ¤Íñ¤qCõ±¤qC÷A¤ñùs¢ »¯ÝÍe¢½ÝÅí¢ÿû±5¯¢ uű÷¤Õ¤Á¤=O¤3¤'÷í¤Ù©u¤Íñ¤ñùu¤ys¢ ;UÉA¤ËÓ í¤5¯7?¢ uÅ÷¤õe¤c¤3ge¤'éu½í¢«íñ¤ñùu¤ys¢ =íûí¤Ï÷ í¤1 ¿!Á¢ uÅÁ÷¤õe¤c¢½Á÷í¢«ý¥1¤ñu¤yWs¢ =¥ù×¥¤Ac/í¤9û ¿!¢ uÅ÷¤)e¤cc¢«÷ãí¢«ýñ¤ñu¤yWs¢ =ýçÅý¤Ç¡í¤%Ç =ÇÑ¢ õùÑ
Ó¤)¤Oc¢OÑ÷ãí¢«µñ¤1ug¤yWó¯¢ ½±µå³µ¤Û»¡í¤!# ½{©¢ õù©
Ó¤)¤Oc¢
å©¡í¤ñgÕ'¢«ñ¤ÿ×÷äû³qC¢ 9Coµ¤Õ' í¤+Y »iᢠõùá=¤Ï ¿¤«c¢=á+Y¤Kí¤åi뢫Åñù¤oqC¢ '1ÅíGŤë í¤ñ ¹G¹¢ õù¹=¤q¢Ó¹¤;í¤mÙíÿ¢«ñù¤oqC¢ §i¤aý¯í¤ %ñ¢ õùñ=¤c¢Áñݤ§÷ãí¤éÛ異«Õñ¤oëqC¢ #yÕQÕ¤ñù #YÉ¢ õùÉ=¤c¢eÉK¤¥©í¤_9ãG¢«©ñ¤oëqC¢ ¡!© í¢ õù)=¤c¢)9¤¡G)í¤[åi¢)5)åñù¤oëñù¢ ¯{)å í¢ õ«Ù¤¢?«Ù¤¥{wMí¤Ù©ç¢)5+½ñù¤oëñù¢ -µ+½wéí¢ õ¤¢-¤»E÷ãí¤E;ç{¢)5õñù¤oëñù¢ õwWí¤s_¤½×¤¢Ã-é_¤=³÷ãí¤ÃÏù!¢)5¯Íñù¤oëñù¤C«ù!¤å
wéí¤9¤»Ec¤=¢g/¡©¤?Åñ½í¤w-w-¤Çãû³¢©ë¡
ñù¤á5ñ¤Ç磻y í¤õݤ9ûc¤K¢¡ùϤ9S¤SóOí¤GM{y¢©ë!Ýñù¤á5ñ¤Çak7¤yéwéí¤MÛ¤¹µÏ¤K=¢U£±;¤9÷ãí¤ÇaýE¢©#qC¤á5³¤GÍi§¤û3wWí¤
㩤9ϤK=¤_ ¿¢¥Ñ¤»E÷ãí¤E;}¡¢9¥í÷äù!qC¤á5³¤ÇaW¤{3í¤;)ɤ9ûϤK=¤_¢%ÁϤ?Å»í¤K¿ñ¢9g§¥u¤ù!qC¤aÿפÅÑ×I¤}/ í¤Q)7¤;¡«¤á=¤_å¢w¯'夵éí¤ÉAñù¢9g'ýu¤ù!qC¤aÿפAéÛ¤ÿ í¤u§¤¿ù«¤á=¤_å¢÷C¹Ñ¤7?wÏí¢ã9µñù¢»C9µõ±¤gEó/¤aÿפÍeëí¤1 í¤='¢ óW»©Ï¤á
Q¤;Á¢u»©=¢M绩÷ãí¢½;qC¢»C;õ±¤gEó/¤a³¢ ±S;mY¤qC í¤+q¢ óW;áϤ
Ó¤Á¢u;á÷¢Mç;á÷ãí¢u½Ås¢»C½Åõ±¤çûó¯¤aý³¢ =½Åo¤sUãí¢ s³=¹c¤
Ó¤Á¢u=¹Á¤5-óOí¤MOug¢»C¿sÕ¤ç{ó¯¤ãGñù¢ '1¿ í¢ õ¿ñ¤9
Ó¤¢u¿ñ¤³1u;í¢»C?ÕsÕ¤çs ¤ãGó/¢ ¥Å?Õwéí¢ ÷/±É÷¤9w¤á¢?E±É÷ãí¢»C1sÕ¤çs ¤ãGs¢ #1wí¢ wU³e¤9÷¤ÍÁ¢±¡³÷ãí¢9g³åõ±¤eµs¤ãGu¢ !³åwéí¢±3Ù¤9Á¤óÁ¢3ù3Ù _í¢9gµ½õ±¤c#u¢ ³µ½sÕ¤y
í¤s¢s5e¤]Á¢·U5õí¢9g5õõ±¢ µ+5õ{û¤}/ í¤Q
¢'5·éÁ¢Ég·é _í¢9g7Íõ±¢ aû7Íw-¤Û9yé¤1 í¤
Ó_¤%Çw¢'5É¡Á¢MçÉ¡÷ãí¢9gI
õ±¢ åWI
s ¤Û»ù!¤ó¯ í¤á¤'
=¤7?+¢OwIùõí¢9gËÝsU¤Ëñg¤Çã í¤9¤·+¢ÃK±õí¢9gÍsU¤K¿1¤ç{÷A¤W í¤© ?¤O¤µÃ+¢Å+M©í¢9gMísU¤ÍåÿW¤yWs ¤Wwéí¤«ÿ=¤
Ѥ1UÁ¤
¿¢GcÏÁ÷ãí¢·QO¥÷ä{yõ±¤Ï+}¡¤{yqC¤UwWí¤-k¤Q;¤±/Á¤Q+¢Y)Á÷ãí¢ÉÁýsU¤}/õ±¤OQýE¤}/ñù¤Ówéí¤/ýå¤u½¤¿ù+¤uw¢[;AÑ©í¢ÉcõqC¤ÿ×sU¤Ac{y¤1ñ¤]ó í¤£c¤O
¤¿+¤«=¢ÝaC©)í¢ÉcÅñù¤1sU¤C)û3¤ñù1¤[á í¤¥O¤M¤½W+¤O¢ß©Åá©í¢ÉcEÅñ¤ñsU¤Cõû3¤ñù1¤Y_³í¤'i)¤
Ó)¤;¡Á¤cå¢Qǹ÷ãí¢ÉGñ¤ñõ±¤ÅÑyW¤ó¯ÿW¤Ùwéí¤»E«¤+õ¤»EÁ¤å¢ÓKGñ)í¢5÷ÙÕñù¤qCsU¤Çay¤sUÿפCu í¤½«¤+õ¤9+¤=¢SóYÉ©í¢5+Ûñù¤ó¯sU¤GÍù!¤õ±ÿפAc í¤¿«¤Á_¤¹µ+¤
Ó¢U¹[wMí¢µÁ[åñù¤s sU¤GÍù!¤ugÿ¤Á½3í¤1¤¹µÁ¤÷¢WËÝÙ÷ãí¢3e]½ñg¤sõ±¤GÍíÿ¤Ï÷3í¤³±¤'éÁ¤÷=¢éñß÷ãí¢3ßõqC¤sõ±¤Ù©í}¤MÁwWí¤Q ¿¤¹3¹¤'¤+=¢i[_é)í¢1SÑÍqC¤õ±sÕ¤Ù©o¤Çaõ±¤ÿ× í¤«Á¤9ñ¤'é¤Á=¢kQ¡wMí¢1Ó
qC¤usÕ¤Ù©ok¤}/u¤SÝs ¤³ í¤Ïw¤¥e¤Q¤'é¤e=¢kmÓùwMí¢±SÝqC¤ugsÕ¤Ù©ok¤ó¯ñ¤Óqó¯¤ñ³í¤¤-÷¤Áˤ'é
Ó¤S=¤1Ue¢Ë-Õ±
c¤o÷ãí¤a{¢ýUu¤Ï+qC¤w-ó¯¤Ù©á·¤õ±ÿ¤Óqñù¤ñg3í¤¤¹Õ¤ß
¤ ?o¤¹3
¤³±
Ó¢·U×s¤ówMí¤eµok¢q×íñù¤ÍýŤGMÕ'¤ýÅýE¤gEñ¤qC í¤=c¤;;¤½+Y¤¹3½¤3g¢µÅWÁ¤Í)í¤çí}¤û3ug¤Ù©ñ¤Ë}/¤ÇãU¤ÿyW¤y³¤ó¯ í¤
ÓϤõ)¤u«ÿ¤9Q¤·å¤'é¤M¤awMí¤gÇë¤{yug¤Ùsñù¢=±iýýŤÇaUï¤1ç{¤yW1¤ó/³í¤w«¤©Í¤å)7¤9ûÑ¢ñëÑå¤%Çe¤u©¤_÷ãí¤ù!W¤ÿu¤Û»ñ¢½Õkµ}¡¤Ç×I¤ñeµ¤yéÿפs í¤u¤
ã]¤å©¤»Eu¢ó¡í©¤¥Á¤O«ÿ¤©÷ãí¤yéU¤1õ±¤[áñù¢½ mÿ¤E;W¤ñc#¤{ÿ¤sÕ3í¤u¤
ã
¤§¤;¡«¢s×mᤣi+¤å¥¤MwÏí¤û³Óq¤ñùsÕ¤]óñù¢»CïÅÿפCõë¤qCoë¤{y}¡¤u í¤eѤ
¤=¤½«¢uo¹¤£¤-¤ÏwMí¤{QˤqCs ¤_¹ñ¢9g᳤Ací¤qCo¤ýÅýE¤uç í¤ ?;¤Q¤
Óݤ±/c¢w¯áñå¤/ýw¤
Ó/ý¤
÷ãí¤{yÑ
¤ó¯s ¤Ñ
ñ¢9aÕñ¤OQ磻ó¯íÿ¤}/ýE¤÷Aí¤uï¤
Óˤ1×c¢Uãɤ/
Ó¤w!#¤;÷ãí¤ýEß]¤s ó¯¤Ñoñù¢¹1cñ¤Ï«á·¤ó¯í¤ÿ3í¤u¤w
¤3c¢ ±åå¤-ë
Ó¤w£¤ÑwÏí¤ÿW¤÷Ãÿ¤sÕqC¤SÝñ¤ÃÏuç¤mYñ¤K¿å¤s i§¤1 í¤OÛ¤wó¤µÃå¤'¤=³c¤¥=¤u¤ ¿©¤÷wMí¤ÿ×oë¤sUs ¤u³¤sÕó/¤S[ñ¤Au¤ï¥ñg¤Ëeµ¤sUW¤1 í¤O©¤+ͤ·¤Ý夿yc¤%
Q¤Ï¤ew¤+¤«wMí¤ÿ×o¤ó/õ?¤sUñù¤sÕó/¤Uñ¤Ï÷õ±¤oqC¤I-ç¤sUW¤ñi%¤{yuÇí¤¤ß¤½¤e¤7?=¤qÁ¤³±å¤«=¤Á=¤+Á¤
Ó'¤O÷ãí¤1mY¤ó¯õ±¤sUqC¤õ±qC¤Umñ¤MÁõ±¤oqC¤ÉAç{¤uýŤugc£¤ñùí}¤åiw
í¤K¤¤
Óu¤e_¢¡%û¡=¤Ë¤3gc¤÷¤=¤Á
Ó¤+e¤=I¤c÷ãí¤ñk7¤qCu¤sUó¯¤õ±qC¤sÕs¤oëñ¤ÍsÕ¤á·qC¢ ï£{
ù!¤uÿ¤ó¯åi¤ñùá5¤okwWí¤ï9¤=K¤å«¤es¤+¤#5=¤Ë¤5-O¤áQ¤Á=¤e
Q¤w¤=¤åwMí¤ñgé[¤ñgug¤s s¤uqC¤õ±}/¤ç{1¤Is¤aó¯¤[áó¯¤gÇû3¤uçÿ¤ÿçû¤ó/å
¤i§ í¤Ûs¤
Q¤uu¤M¤»
Ó¤¥
Ó¤o÷¤·ÕO¤Q¤Á=¤e÷¤w¤%¤÷ãí¤qCW¤û3s¤uqC¤uýŤgÇ1¢O©ÿs¤{yw-¤ÿs¤[ñg¤y
û³¤÷Ãÿ¤ýÅgǤsç¤Umwéí¤-_¤w_¤
ãu¤ ¿
¤õc¤¥ý
Q¤O¤;w¢)¤õ½¤e=¤e+¤M«¤=÷ãí¤qCUm¤û3sU¤uqC¤uýŤyÿW¢ÏMís ¤ýEuç¤1ó/¤[
ñ¤y{y¤÷Ãÿ¤{ù!¤s ù!¤Óq3í¤/©¤w©¤Ïˤ)Ϥ%G÷¤Oe¤Ñw¢wñÁ«¤©½¤)¤M
Ó¤+q¤
Ó÷ãí¤ó¯o¤sUó¯¤û3yW¤ugýŤyéÿ×¢Mãq¥s ¤}¡u¤1s¤Û9³¤yWá·¤û³yé¤s yé¤Ño³í¤!#
ã¤M¤©K¤
㫤§!w¤Oe¤Ñ÷¢Ó󫤩½¤ ¿Ï¤©Á¤=ݤ
Ó÷ãí¤sí}¤ñùu¤û3mÙ¤û3ÿ×¢Í;óýs¤ÿõ?¤³sÕ¤Ùõÿפ{á5¤yéû³¤õ±{û¤ÝK í¤#5
¤ÁϤ©K¤
㫤'¤ÏA¤u÷¢=sÑ«¤M¹¤Me¤¤÷÷í¤õ±ë¤aýo¤û³ÿ¢KÑõµs ¤ÿõ±¤ñs ¤Ùsÿ¤{yá·¤yéû³¤u}¡¤[ í¤¥{ѤeϤ©Ë¤
u¤'
w¤cÁ¤uw¢=u©u¤Ïñ¤
ñ¤Á÷ãí¤uW¤c£ok¤û3ÿ¢Ëu÷s ¤ÿsU¤ñùs ¤Ù©}¡¤}/á·¤yû3¤w-ÿ¤Õ'ÿפw--í¤«o¤_o¤u½¤¹3+¤+¤uw¢
å÷᫤©o¤ß÷ãí¤c£a¤yéÿע˫wÅs ¤ÿsU¤ñgsU¤GMýŤÿa¤ù!a¤UmÅí¤)É]¤;o¤u;¤¹µ+¤w¤«÷¢
å¹u¤Mo¤s÷í¤åãG¤û3}¡¢IÏ ó/¤ÿ×s ¤qCsU¤Ça{y¤ÿWa¤gÇeµ¤×I í¤)7ͤ»á¤)
¤9+¤=w¤«
Q¢
å ñѤM9¤ó÷í¤åñù¤÷AýE¤yWÿ¤MÁuç¤y󯤳ó¯¤qCsÕ¤Ç{y¤1a¤gÇç{¤Um í¤«¤»o¤O
¤9y¤=
Ó¤Ï
Ó¤õ¤³±u¤);¤ ?¤ó÷í¤åu¤ÿ×qC¤yW}¡¤Íåu¤yéó¯¤1ó/¤ó¯s ¤Ç{¤ña¤gÇù!¤U3í¤¥)¤»o¤c
ã¤9ûw¤
Q=¤å
Ó¤MÁ¤3gѤõ¤©S¤ß÷ãí¤c£w-¤yéuç¤y}¡¤Íõ±¤û3ó¯¤ñqC¤ó/s ¤Ç{¤ña¤gÇyW¤SÝwWí¤/
¤
¤M¤»E+¤
Ó=¤å
Q¤Ï+¤3g;¤3g÷ãí¤ÍýE¤ÍsU¤û³ó/¤ñqC¤ó¯sU¤E;û3¤ñùaý¤çû{y¤Ño3í¤/ý½¤
¤ ?Ϥ=)¤»E÷¤w¤=¤
㤵Ã;¢ÓK»í¢íõýE¤ËSs ¤{yqC¤qCñ¤s s¤E;yW¤ó¯ñ¤u{y¤ç{ÿ¤ß] í¤!#u¤
¤ec¤
Ó)¤»E÷¤wc¤==¤
w¤5-;¢×éõí¢ÍýE¤K¿ó¯¤{yqC¤qCñ¤s s ¤E;y
¤s ñù¤sUýŤç{ñ¤[á í¤¥å¤½¤+¤ws¤»Ew¤wc¤==¤
Ó¤µÃ;¢é¡õí¢
û3¤Íåó/¤ýÅqC¤qCñ¤s ó¯¤ÅÑyé¤sÕñù¤s }¡¤eµó¯¤Û»3í¤'ée¤Ñ¤
Ó¤eõ¤;¡
Ó¤÷O¤
Ó=¤;
Ó¤³1M¢km
ù÷ãí¢[Ýû3¤MOó¯¤ýEqC¤ó¯1¤só¯¤ÅÑy¤uñù¤ó¯}¡¤åiu¤Ù©3í¢ I±Ñ¤=¤;¡=¤+«¤w=¤;=¤1M¢km±÷ãí¢ïû3¤OQñù¤ÿqC¤ó¯ÿפsÕó/¤Cõá·¤ñùÿ¢ñA í¢ Iu¤Ë¤½
Q¤«¤
Ó=¤u¤±/M¢km}Áí¢'íû3¤Á=ñg¤ÿqC¤sÿ¤sÕó¯¤Cua¤ñÿ¢çí í¢ É¯Áu¤åo¤½
Ó¤u¤÷=¤u¤?CM¢ÓKÁqåí¢Ý¥yé¤Acñ¤ÿ×ñù¤sÿ¤õ±qC¤Cuaý¤1ÿ×¢¥³í¢ 7C«¤uߤ½¤eQ¤÷=¤«c¤¿M¢Yõ÷ãí¢ýû3¤Aãñ¤ÿ×qC¤s}/¤uñù¤Cuc£¤ÿÿ×¢ÿ?ý3í¢ 7CÑå¤;󤽤e½¤+¤OϤ½WM¢GcÑ÷ãí¢oµû3¤C)³¤1ñù¤sUýŤuñù¤Cõå¤ýEñ¢}Õµ3í¢ ·ù©c¤
¤»¤)Éc¤
¤+=¤OϤ½WM¢Ç½©÷ãí¢ß{¤Cõÿפññù¤sÕ{y¤÷ÃÿפWsU¤yWåi¤{ñù¢ý¯ í¢ ·ùá¤
ã¤)+¤©«¤ ¿
¤¤c«¤½
ã¢Ç½áñí¢óÅ{¤Cu1¤³qC¤sÕ{¤w-ÿ¤Wó¯¤{yeµ¤û3qC¢ûgÅ3í¢ ·¹=¤©a¤Ñ=¤)ɤ=¤c«¤;¡
ã¢ë'¹÷ãí¢{¤ÅÑÿפñqC¤sÕok¤×ÉqC¤}¡ç¤yéqC¢û3í¢ ·ñ
Q¤õ¤u=¤«q¤Á=¤å«¤;¡
¢kmñ÷ãí¢aÕ{y¤ÅÑÿפñqC¤õ±o¤UïqC¤ÿçû¤yó/¢y1Õ3í¢ ·É÷¤»_¤Oc¤«Ý¤Á=¤u¤]Õ¤
¢kmÉ÷í¢»{û¤ëkw-¤c#ÿ¤ñùqC¤u磻Õ'ñ¤ñy¤ç{sU¢ù í¢ ·+¤õ¤åc¤+Yݤe=¤÷¤]S¤
¢kqcí¢_å{y¤k7÷äå}/¤qCqC¤uo¤Óq1¤qCy¤çsÕ¢g/å í¢ 51Ù¤áõ¤=O¤-q¤e=¤=Q¤ó ¿¤I
¢×Ùqåí¢
õ©½{y¤íug¤å
}¡¤ó¯ñù¤uçgǤÙs³¤ó¯yW¤e5õ?¢çÅ©½3í¢ 51)e¤)¤÷O¤Ý¤9©¤e=¤
Ó;¤e¤ï½¢Ýa)÷ãí¢
))õýŤíu¤åiýE¤ó¯qC¤uyé¤ãGuç¤ï¥1¤syW¤åiu¢ey)õ í¢ ·«é ¿¤ß
ã¤+«¤+¤Ë
¤¤
ÓѤó¤ï;¢Y)«é÷ãí¢M+ÍýE¤íuç¤å}¡¤ó¯ñù¤uç{y¤á·sU¤oëÿפsU{¤c£÷âey+Í3í¢ Ý)¡
ã¤eu¤Ë
Ó¤qѤ¤w;¤e¤;¢Gc¡mí¤w-÷â-
}¡¤í}u¤åi{y¤sqC¤uçÿ¤mYqC¤á5ÿפu{û¢å³-
í¢ Ý)-ù
¤e«¤K=¤'u¤=¤÷
¤e¤Ñ¢Õ]-ù ¿¤S
í¢½¯Ý}/¤íÿu¤qC÷äýÅ{y¤sUñù¤÷A1¤ëíó¯¤á·ÿפuç{û¢cW¯Ý3í¢ Ýõ/±
¤S«¤Ë
Ó¤§_¤+
¤Ñ ?¤e¤÷¢å¥/±÷ãí¢ Q¡ÿ¤íÿu¤ñù÷A¤}¡{y¤sUù!¤éÛó¯¤á·ÿפw-{y¢c¡3í¢ Ýõ!9¤s¤=
Ó¤©í)¤+
¤ue¤=Á¤'Q¢g!÷ãí¢ Q!í}/¤mYõ±¤qCu¤ÿ{y¤sUyW¤Wó¯¤qCuç¤y
ãG¢ã!!í³í¢ Ýõ£Á]¤õe¤
Ó
Ó¤«Ï¤Á
㤫Á¤=Á¤'u¢ùG£Áõí¢«#¥ÿפíÿu¤qCsÕ¤ÿ×{û¤õ±{y¤Óñs ¤ó/u¤yåi¢áû#¥ í¢ ]_¥¤õe¤
Qw¤-
¤Á
¤«¤=e¤«¢ûi¥©í¢«¥ýÿ¤mYõ±¤ó¯s ¤1{¤õ±}¡¤QËs ¤ssÕ¤û3åi¢û¥ý÷äñ í¤å ¿¢ ]_%ѤM¤÷w¤¯·Ñ¤Á
ã¤Ow¤
ÓÁ¤'u¢ý%Ñ)í¢wO§µÿפmYõ±¤qCs ¤ñû3¤uÿ¤Ñ
s ¤s sU¤qCuç¤sç¢û§µu¤ñù3í¤Á¢ ß»'©¤÷¤=÷¤÷¤!#«¤e
ã¤å
Ó¤=Á¤»S¤««¢'©÷ãí¢wO¹ÿפÿ×w-¤gÇõ±¤qCó¯¤ñ{¤uÿפß]s¤sÕs¤qCuç¤sçû¢û¹õ±¤ñù3í¤Á¢ ß»¹á»¤we¤
Ó÷¤÷¤£ëϤM¤¿¤
QÁ¤; ¿¤O«¢W¹á÷ãí¢÷å9Åÿפ1÷ägEõ±¤ó/qäñùû3¤u糤]s¤sÕs¤ó¯u¤s gÇ¢y19Åõ±¤ñù í¤=Á¢ _»¹_¤e¤
Q
Ó¤Áw¤¥¤eM¤==¤=Á¤_¤OÏ¢q»¹wMí¢÷;³¤1uç¤ù!õ±¤qCñù¤qCû3¤uçqC¤Û»sÕ¤sUó¯¤sõ±¤sÕy¢ù;õ±¤qC í¤=Á¢ _;ñõ¤Á¤÷
Ó¤+¤%Ç=¤M¤=¤=Á¤_¤OÏ¢qy;ñ©í¢u=½Õ1¤ñu¤ysU¤ó¯ñù¤qCû3¤uçó¯¤ÙssU¤õ±qC¤s õ±¤sUû3¢g/½ÕsU¤ó¯3í¤
Ó+¤©¤;¡ ?¤«ÿM¤Á+¤¤e+¤¹µ÷¤ ?©¤
Óc¤
Ó+¤õe¤cO¢õ=É÷ãí¢u=¿1¤ñu¤ysU¤ó¯ñ¤ó¯yé¤÷As¤GÍsU¤uñù¤sÕsU¤õ±û3¤U÷A¤ÅÑuç¤yésU¤ó¯ í¤÷w¤
ãÁ¤;¡ ?¤+Y
¤Á÷¤=¤e+¤9û¤
Óc¤=Á¤õe¤Ïå¢õ×?÷ãí¢õÓ?åñ¤³u¤yõ±¤qCñ¤ó¯åi¤ÇsU¤uqC¤sÕs¤õ±{y¤Õ'÷A¤ÅÑõ±¤{s ¤s3í¤+
Ó¤;
Ó¤;¡¤«ÿ
¤e÷¤¤¤;¡Í¤
Óc¤
Ó+¤õÁ¤O¢u1±ÙwMí¢sw1½ñ¤ñùsU¤yWsU¤ó¯1¤sç¤C«sÕ¤÷Ãñ¤õ±ó¯¤u}/¤ãGug¤{y÷äCõqC¤ýEó¯¤õ± í¤Á
Ó¤;=¤½ ¿¤
¤9Q¤å
Ó¤Áå¤ ¿¤½×a¤÷O¤
Ó+¤)+¤å¢÷³wÏí¢sw³õñ¤ñùsU¤yWsU¤ó¯ÿפsçû¤C«yW¤õ±ó¯¤ug}/¤c#sÕ¤ÿug¤Cõñù¤ýÅó/¤õ? í¤A
Q¤½¤½¤u¤]Q¤
Ó¤Á)¤½×¤÷«¤
Ó+¤)+¤å¢÷ù3éwMí¢swµÍñ¤ñùsU¤qCw¤ñùs¤ó¯1¤sgE¤Acû3¤õ±qC¤÷Ãÿ¤aýsU¤1u¤ÃÏñg¤ÿñù¤uç í¤ ?¤u¤¿å¤å+¤K«¤S¤eM¤?Å_¤+«¤
Ó+¤¤÷w¤¢¯5¡÷ãí¢s·
ñg¤ñùs ¤suç¤ñùsU¤ó¯ÿפsUù!¤Á½û3¤uñù¤w-ÿפá5sU¤ñu¤Aãñù¤ÿñù¤÷Aí¤u¤?Åe¤=÷¤K»¤Á
㤱/_¤+«¤
Ó+¤ ?¤
Qw¤=å¢ ·ùwMí¢óÁ7Ýñù¤qCs ¤ó/uç¤qCsU¤ó¯ÿ¤sÕyé¤Ï÷û3¤uù!¤os ¤sõ±¤OQñ¤1 í¤Oc¤±/Á¤÷w¤q_¤eM¤1©¤u¤
Ó+¤=¤
Qw¤=¢ ÕɱwÏí¢óÁIñù¤qCó¯¤suç¤qCs ¤sÿ¤sÕû3¤MÁû³¤ugy¤ï¥ó¯¤sUõ±¤Ï÷ñ¤³ í¤Ïå¤1Á¤+
Ó¤Ýõ¤Ï¤³±M¤u¤÷w¤=¤÷
Ó¤=¢±Ë÷ãí¢óÁËíñù¤qCó¯¤su¤ó¯s ¤ó/ÿ¤õ±{¤Íeû³¤uyW¤mYó¯¤usU¤Ï+ñ¤1 í¤Ï¤³1+¤=¤Ï¤M¤3g
ã¤Á÷¤
Qw¤
Óe¤÷=¤
Ó=¢WKÁ¤«÷ãí¤ÿ×uç¢qeÍ¥qC¤ó¯qC¤su¤ó¯s ¤ó/ÿ¤õ±{¤Íû3¤ugû³¤íÿqC¤uçsU¤MO³¤ñg3í¤O¤3g½¤I
ã¤
ã¤5¯;¤ÁQ¤
Q¤
ÓÁ¤÷=¤
Ó=¢ñ3MÁ¤÷ãí¤ñùõ±¢qeMýqC¤ó¯qC¤sõ±¤ó¯sÕ¤ó/}/¤õ±ýE¤ËÓ{¤ug{¤k7ýŤÍ1¤ñù í¤=O¤µÃ½¤¤5¯;¤ÁQ¤÷w¤
ÓÁ¤÷=¤
Ó=¢qÏÑ+¤
ÓwMí¤ssÕ¢qOµqC¤s qC¤sõ±¤ó¯s ¤s}/¤õ±}¡¤Iwíÿ¤ëíýŤËS1¤qC í¤=O¤5-½¤¤· ѤÁQ¤÷w¤
ÓÁ¤÷=¤w=¢óÅÁ©¤÷÷ãí¤õ±s ¢qAqC¤sÕñù¤ó¯õ±¤ss ¤sýŤuÿ¤ÉA磻ëýŤIw³¤qC í¤=Ϥ· ½¤Ý¤7?u¤e½¤÷w¤÷Á¤
Ó¤=¢s/Aáw¤Á÷ãí¤uçó/¢qÃÅqC¤sÕñ¤s õ±¤ss ¤sýŤõ±ÿפÉAo¤éÛÿ¤åw-¤]³¤ó¯3í¤
Q«¤£ ?¤óu¤©K¢±WC¹O¤Á½¤÷w¤÷¤+c¤=¢õ×C¹÷ãí¢qÅqC¤sÕñ¤sUsÕ¤ss ¤sýŤõ±1¢ ÿÕÅá5¤Wÿ¤å÷A¤]óÿפó/ í¤÷«¤£¤óu¤©mo¢±WÅñO¤e
¤+
Ó¤+w¤+O¤Á=¢u1ÅñwMí¢qEÕqC¤õ±1¤sUs ¤sUó¯¤sU{y¤uñ¢ ÿÕEÕaý¤Umÿפåuç¤]ÿפs í¤w«¤£i¤ó«¤«
¢1³ÇÉå¤e
¤+
Ó¤+w¤+O¤Á=¢÷ÇÉwÏí¢ñ?GqC¤u1¤sUs ¤ss ¤sU{y¤uñ¢ ÿ Gc£¤Uÿפc#uç¤]ÿפs í¤w«¤£i¤]«¤«ÿߢ1³Ùå¤e
¤+w¤÷w¤+O¤e=¢÷ùÙwMí¢ñ?ÙåqC¤uçÿפsUó¯¤sUs ¤sU{¤uçñù¢ ÿ Ùåå
¤Õ'ÿפc#uç¤]ÿ¤sU3í¤÷«¤£ëe¤¹O¤%¢³YÙ¤
ã¤+w¤+
Ó¤+a¢wCYÙ÷ãí¢ñ?Û½ç¤sUó¯¤sUs ¤sU{¤uçñù¢ ÿ Û½åi¤S[1¤ãÙu¤]ÿפs í¤+u¤£ëe¤¹O¤-ᢳy[
Ó¤e
ã¤Á
Ó¤+=¤Áa¢¯[÷ãí¢ñ?[õç¤õ±qC¤sUó¯¤õ±{¤uó¯¢ }-[õç¤Óñ1¤ãÙu¤]ÿ¤sU3í¤+u¤£ëÁ¤¹å¤¯·¢³yÝé=¤ ¿Ï¤Á
Ó¤Á¤Áa¢ Ýé÷ãí¢ñ?]Íç¤uñ¤õ±ó¯¤õ±û³¤÷Ãó¯¢ }-]ÍgǤÑoñ¤aýu¤yuç¤mYÿ¤õ± í¤Áu¤'¤õå¤
å¤/»¢3Åß¡
Ó¤ ¿Ï¤Á
Ó¤Ác¤ea¢ ß¡wMí¢S_
ç¤uçñ¤õ±ó¯¤uyW¤÷Ãó¯¢ }-_
ù!¤W÷äqCñù¤au¤yWsU¤o}¡¤u³í¤Au¤c¤õ÷¤MÁ¤á¤
Óe¤©íM¢3Å_ùw¤ ¿õ¤e
Ó¤åI¢±_ù÷ãí¢SÑÝk7¤uó¯¤uy¤÷Ãs ¢ ýÃÑÝû3¤Wu¤ó¯ñù¤aõ±¤û3s¤yuç¤ñÿ¤õ? í¤Ñ¤+¤M
Ó¤MÁ¤á¤÷e¤«
㢵/Q±w¤S_¤
Ӥ幢±Q±÷ãí¢}ÃÓmÙ¤uó¯¤uçù!¤w-s ¢ ýÃÓ{¤Umu¤sñù¤aõ±¤û3ó¯¤û3sU¤ñg}¡¤uç í¤ ?;¤
Ó
Ó¤
ã
Ó¤M+¤á=¤÷Á¤«ÿ
¢µ/S
¤Õ=¤åK¢±SwMí¢gùSíãG¤ugk7¢ {gSíÿ¤Óñu¤sqC¤á5õ±¤yéó¯¤{qC¤s í¤w=¤
ã
Ó¤©Á¤K=¤÷e¤-u¢5ÕÁI¤9¢ÕÁ÷ãí¢eU¥eµ¤ugk7¢ {gU¥ÿפQËu¤sUqC¤á5õ±¤yéqC¤{yñù¤s í¤w¤
=¤©Á¤K=¤+e¤¯·«¢5×I¤Í¢×wMí¢ã×ýY_¢ {×ýñ¤Ñoõ±¤õ±ó¯¤oësU¤yWqC¤ýEñ¤sU3í¤Á«¤Ñ=¤õ+¤
Ó¤ÁÁ¤¡Ù
Q¢5×WÑ#µ¢WÑS¤
÷ãí¤{yw-¢áéµÝÍ¢ û±éµó/¤_¹õ±¤õ±ó¯¤oësU¤yqC¤}¡ÿפõ±3í¤Á«¤Ñ=¤_+¤÷¤AÁ¤#µ¢5×i©¡Ù¢Wi© ?¤;÷ãí¤ýE÷A¢më_¹¢ yëuç¤ÝÍõ±¤õ?s¤oësU¤ù!qC¤}¡ÿפõ± í¤å÷¤u=¤_÷¤÷¤AÁ¢5×ëá-k¤¯· ¿¢Wëáe¤u÷ãí¤ÿ×u¢ßkÅu¤ß]Um¢ ËÑkÅõ±¤us¤osU¤ù!ñg¤ÿWýŤ÷Ã í¤ ¿½¤)¤_+¤q÷¤eÁ¢·1í¹«¤!#e¢ñ3í¹å¤«÷ãí¤³õ±¢
msU¤ÝKW¢ ËÑmsU¤uçs¤osU¤gÇñg¤³³í¤«=¤+¤Ý+¤+¢·1mñ¤§£+¢qmñ+¤å÷ãí¤ñsU¢oïÕsU¤Yßë¢ ËÑïÕsU¤uçsU¤ï¥sU¤çûqC¤ÿ× í¤O¤»w¤Ý+¤Á¢·1oÉï¤9w¢qoÉ÷¤÷ãí¤ñgs¢ás ¤Çãí¢ Iuáõ±¤uçsU¤ï¥s ¤gÇñù¤1 í¤å¤w¤Ý+¤ ?+¢7aݤ;¡w¢{µaS¤
Ów¤=÷ãí¤ó¯ó¯¤sUuç¢'aåó/¤ÃÏoë¢ Iuaås¤wsU¤mYsÕ¤çñù¤ñg í¤¤á¤'+¤Õ÷¢7ãÙ¤=³
Q¢{µãÙ¤+
Ó¤
ÓwMí¤ó¯qC¤õ±u¢c½qC¤Aa¢ I«c½ÿפmYs ¤ó¯÷äÿñù¤ñ í¤å¤u ¿¤
Ów¤'«¢7åo¤¿y=¢{µåe¤Á=¤
Ó÷ãí¤s ñù¤uõ±¢Iåõñù¤OQaý¢ I«åõÿפmYs ¤ó¯ug¤ÿ×ñ¤ñù í¤=夫å¤
Ów¤S¤O«¢7geés¤³±¢ýeéÁ¤e¤÷ãí¤sÕñù¤uõ±¢çÍñù¤MÁå
¢ I«çÍÿפ1w-¤ç{s ¤ó¯u¤ÿ×ñ¤qC í¤=å¤ÏÁ¤
Ów¤»¤c«¢7gg¡©¤3ç¢ýg¡¤¤÷ãí¤õ±ñ¤ugõ?¢[ù
ñ¤ËSç¢ ÉOù
1¤ñu¤ñw-¤ñùó¯¤ó¯õ±¤1ñ¤qC í¤=c¤OÁ¤
Ó
Ó¤S¤åe¤cO¢7gùùa¤5-å¢ýûùùA¤å¤Á÷ãí¤u1¤÷ÃsÕ¢§yÝñ¤Içû¢ ÉOyÝ1¤ñu¤ñgug¤qCó¯¤ó¯sU¤ñ³¤ó¯ í¤
ÓϤå+¤
Ó
Ó¤=¤e¤cO¢7gû±¤·Õc¢ýûû±¤ ¿O¤e÷ãí¤uû3¢{³¤ÉAgE¢ Aa{w¤³1¤ñùõ±¤ñgu¤qCs ¤ó¯s¤ññ¤ó¯ í¤÷O¤
Ó¤w
Ó¤=å¤Á¤O¤c ?¢7gýõ¢½ýϤ©S¤µÃM¤e÷ãí¤uû3¤K¿w-¤Wÿ³¢E]ýíy¢ Aaýí÷A¤ñ1¤ñùõ±¤ñgu¤qCó¯¤s ó¯¤ñù1¤s í¤÷å¤
Ó¤
Ó
Ó¤
QÁ¤=¤å¤c¢;i}Á ¿¤Ñ)¢}ÁO¤_ ¿¤) ¿¤3gM¤e÷ãí¤uû3¤Í÷äyW÷äù!1¢Åóÿ¥yW¤}¡÷â Aaÿ¥uç¤ññ¤ñùsÕ¤qCõ±¤ó/ó¯¤ó¯ó¯¤ñùñ¤s í¤+O¤==¤
Ó
Ó¤
QÁ¤=¤å¤c¢;i¤Q
㢫¤õ ¿¤) ¿¤µÃM¤÷ãí¤÷ÃyW¤K¿÷äyWuç¤û3ÿפY_qC¤gE{û¤ÿu¢ Aýug¤ñùñ¤ñùsÕ¤ó/s ¤só¯¤ó¯ñ¤s 1¤s í¤÷O¤wc¤
Ó
Ó¤÷w¤
Q¤å¤¢;iñÑe¤u
¤;=¤§!«¤M¤) ¿¤µÃ)¤ ¿gçí¤ËSuç¤û3uç¤{ÿ¤Y_ÿ¤û³ýE¤ÿ×õ±¢ Aqµug¤ñùñ¤qCs ¤ó/s ¤sqC¤s 1¤sUÿפsU í¤+«¤+O¤w=¤÷w¤
Qw¤=夢;ió©Á¤«;¤Ïu¤§!u¤
ã¤M¤5-÷ãí¤ËSuç¤û3uç¤{ÿפÙõ{y¤{y}¡¤1sU¢ Asu¤ñgñù¤qCs ¤ó/s ¤sqC¤ó¯1¤õ±ÿפsU í¤Áu¤u¤
Ó=¤÷w¤÷
Ó¤=¤å¤S¢½µsáÁ¤Ou¤ÑM¤'é÷¤
¤M¤· ÷ãí¤Iwuç¤û3uç¤{ûÿ¤Ù©û3¤}¡ÿ¤1õ±¢ ÓÿõÅw-¤çûu¤ñgñù¤qCó¯¤ss ¤sqC¤ó¯ÿ¤uçÿ¤õ± í¤Á
¤
Ó
Q¤w
Ó¤÷
Ó¤=¤=Á¤;¢½µu¹+¤åϤ÷ߤ'éQ¤;e¤
e¤· wMí¤Iwõ±¤{yu¤ýEÿ¤ÇagǤÿ×ñ¤ñgs¢ SY÷ug¤çûu¤qCñù¤qCó¯¤só¯¤s qC¤sa¤u í¤eo¤÷=¤w
Ó¤÷
Ó¤=¤=e¤¢½µ÷ñ÷¤c¤«»¤9u¤;e¤
Á¤· ÷ãí¤Iwõ±¤{yõ±¤ÿ}¡¤E;gǤññ¤qCó¯¢ Õ%wÕõ?¤gÇõ±¤ó¯ñù¤qCó¯¤só¯¤s qC¤sa¤u í¤eo¤÷=¤w
Ó¤÷
Ó¤=¤
ÓÁ¤»A¢½µÉ
Ó¤=c¤å»¤»EѤuÁ¤
Á¤· wMí¤IsU¤ýÅõ±¤ÿ×ýE¤ñug¤]ù!¤ñùñù¤qCs ¢ Õ% õ?¤gÇõ±¤ó¯ñ¤ó¯ó¯¤sqC¤sÕqC¤sa¤u í¤K¤+¤=¤+=¤
Ó¤
Ó+¤»A¢=
Ó¤
Ó=¤õ¤¥{¤åѤ«Á¤½+¤·Õ÷ãí¤IsU¤ýÅõ±¤ÿ×}¡¤ñug¤[y¤ñùqC¤ó¯ó¯¢ Õ%åõ?¤gÇsU¤ó¯ñù¤ó¯qC¤sUqC¤sÕñù¤sUá5¤ug í¤á¤÷¤=¤+=¤
Ó¤
Ó+¤»A¢=Ù=¤w
Q¤=)¤'
e¤½¤ÏÁ¤½+¤[S¤§÷ãí¤éÛw-¤i%sU¤ýÅõ±¤³ýŤñùu¤ÙsyW¤qCó/¤s qC¢ Õ%½õ?¤gÇsU¤ó¯ñù¤ó¯qC¤sUqC¤sÕñù¤sa¤ugí¤÷¤=¤+=¤
Ó¤w÷¤=S¤«A¢=û
Ó¤w÷¤=)¤'éÁ¤=½¤c+¤Q÷¤ñ¤9S¤«÷ãí¤³÷A¤c#u¤ës¤}/sU¤ñù{y¤qCõ±¤Çayé¤s s¤sÕqC¢ Õ%
õõ?¤³uç¤ó¯s¤s ñù¤s ñù¤sUñg¤õ±ñù¤s í¤÷¤Á¤+¤w¤w÷¤
Ó¤ÏA¢=ûé=¤÷¤w©¤9Á¤=
¤+¤Q÷¤ñe¤] ?¤Ï÷ãí¤ñùu¤ÿ×w-¤ÿõ±¤ëó¯¤ÿsU¤ñùýŤqCõ±¤ÿuç¤Ñoû3¤s sU¤sÕqC¢ Õ%Íõ?¤³uç¤qCsU¤s ñ¤sÕñù¤sUñg¤õ±ñù¤s í¤÷¤Á¤+¤c¤w+¤=¤ÏA¢=û¡=¤+¤wM¤/¤uÁ¤=½¤+¤u
Ó¤ñÁ¤uS¤«e¤wMí¤ñùõ±¤1ug¤ÿ×õ±¤å
w-¤ÿ×ó¯¤ÿsU¤qC{y¤qCõ±¤ÿ×u¤_¹{¤sUsU¤õ±qC¢ Õ%
sÕ¤ñùõ±¤ó¯s ¤sUñ¤sÕñù¤sUñg¤õ±ñ¤sU í¤+c¤Á¤Á¤w¤÷w¤
ÓÁ¤¤µÃ ¿¤)¤Q=¤ÁÁ¤+
㤣e¤O+¤
Ó
¤
Ó÷¤«¤O ?¤óÁ¤Oe¤ÏÁ¤=÷ãí¤qCõ±¤³u¤1õ±¤å÷A¤1ñù¤ÿ×s¤ó¯{y¤ó¯sU¤1u¤]ó{¤sUõ±¤õ±qC¤}/ug¤yW÷äK¿sÕ¤ñùõ±¤ó¯s ¤sñù¤s ñù¤õ±ñg¤õ±ñ¤sU yí¤Ác¤Á¤Á¤w¤÷w¤w+¤¤+S¤¿ùe¤Ïå¤÷¤¤Á
¤¥e¤c+¤
Ó
ã¤w÷¤«¤å¤óÁ¤OÁ¤c¤
Q÷ãí¤ssU¤ñõ±¤1sU¤åuç¤ññ¤1s¤s {¤ss ¤ñùõ±¤[{y¤usÕ¤ugqC¤ÿõ±¤{sU¤Acw-¤sUsÕ¤ñùsU¤s ó¯¤sUñ¤sUñù¤sÕqC¤õ±ñ¤õ± í¤Ác¤Á=¤¤+å¤+
Ó¤w+¤¤+S¤¿+¤
cÁ¤u=¤¤e
¤¥{Á¤w¤÷
ã¤w÷¤Oc¤å¤ó+¤OÁ¤å+¤÷wÏí¤s s¤ñgsU¤1sU¤åiu¤ññ¤1s¤s {¤ss ¤qCõ±¤ÙsýŤu{y¤ÿ×sU¤{ys ¤Ac÷äõ±sÕ¤ñùsU¤s ó¯¤sUñ¤õ±ñ¤sÕñù¤u³¤u í¤eϤe¤c¤Áå¤+
Ó¤w+¤¤Á ¿¤¿w¤
+¤«
¤å½¤'
Á¤=w¤÷
ã¤w÷¤Oc¤åe¤+¤O+¤÷¤w÷ãí¤sÕó/¤qÃs¤ñs¤åiu¤ñ³¤ñó/¤õ?û3¤ss ¤qCõ±¤Ù©ýŤug{y¤ÿ×sU¤ýÅó¯¤Ac÷äsÕõ±¤ñùs ¤sUó¯¤sU1¤u³¤õ?ñù¤u³¤u í¤eϤe¤AϤeO¤Á=¤+w¤Á¤Á ?¤¿ù
Ó¤½÷¤«½¤ ¿½¤9Á¤
Ó
Ó¤+M¤A
Q¤åϤåe¤÷¤å
Ó¤=÷¤÷ãí¤sÕs¤qCó¯¤ñs¤åiu¤ñ³¤ñó/¤õ?û3¤sUó¯¤ó¯õ±¤ÇãýŤ÷ÃýŤÿ×s¤ýÅó¯¤A÷A¤õ±õ±¤ñùs ¤sUqC¤õ±1¤u³¤õ?ñù¤u³¤u í¤eϤe¤AϤeO¤Á=¤+
Ó¤=Á¤Á ?¤?C
Ó¤½÷¤Oo¤;¡Á¤w
Ó¤+)¤e
Q¤)¤Á¤÷¤O
Ó¤
Ó
Ó¤Á÷ãí¤õ±ó¯¤ó¯ó¯¤1s¤åiõ±¤ñgÿW¤ñgó/¤uyW¤sUó¯¤s õ±¤ÅÑa¤1s¤ýÅó¯¤Á=÷A¤õ±õ±¤qCó¯¤sUqC¤õ±1¤u³¤õ?ñù¤u³¤u yí¤ ¿÷¤ ¿c¤Ac¤e«¤Á=¤Á=¤=e¤+ ?¤?C=¤Q÷¤O
¤½Á¤w
Ó¤+)¤=¤=u¤+¤Í
Ó¤c=¤
Ó
Ó¤e÷ãí¤uçqC¤s ñù¤ñó¯¤eµsU¤ñgÿ¤qCqC¤ugyW¤õ±qC¤sUõ±¤ÃÏc£¤³ó¯¤}/ó/¤Ï÷÷A¤õ±u¤qCqC¤sUó¯¤õ±ÿפuñ¤õ? í¤Ac¤e«¤Á
Ó¤+=¤=e¤Á ?¤1
Q¤Q
Ó¤Ïߤ=³Á¤+=¤Á)¤=¤=u¤+¤Í
Ó¤c¤w=¤÷ãí¤uçó¯¤ó¯qC¤ñqC¤eµsU¤ñg}¡¤ó¯qC¤÷Ãy¤õ±qC¤õ±sU¤Aãå¤ñó¯¤}¡qC¤Ï÷÷A¤õ±u¤qCqC¤õ±ñù¤õ±ÿפuç1¤u3í¤Áߤe=¤Á¤=¤Á¤1U
Ó¤Ñ
Ó¤cͤ±/Á¤Á=¤Á
¤
Ó;¤=w¤
Ó¤c¤÷=¤S÷ãí¤w-qC¤sñg¤ñó¯¤åis ¤qCýE¤ó¯aý¤õ±qC¤õ±õ±¤OQeµ¤ñó¯¤}¡ó¯¤Ï+uç¤õ±uç¤qCñù¤õ±qC¤uc£¤õ± í¤Ás¤Á=¤Á¤=¤Á ¿¤³1=¤Ñ
Ó¤c¤1+¤¤Á
¤
Ó;¤=
Ó¤Í
Ó¤cå¤+÷ãí¤sUñ¤ñó¯¤eµó¯¤qCýE¤ó¯aý¤õ±ñù¤uçsU¤Ï÷çû¤ñó¯¤}¡qC¤MO÷äõ±uç¤qCñù¤õ±qC¤õ±å
¤õ±]Óí¤S£i¤eߤÁ=¤Á¤=¤e¤3
Ó¤Ñ
Ó¤c;¤1U+¤c¤o¤w
¤=
Ó¤Í=¤å¤÷÷ãí¤sUñ¤ñqC¤eµó¯¤qC{¤sUa¤uçû3¤ñùw-¤Å_ù!¤ñùqC¤ÿqC¤Íug¤uçu¤ó¯ñù¤õ±qC¤õ±c£¤u×I¤ýÅ í¤½)7¤eߤÁ=¤Á¤
Óe¤¤3g=¤u=¤_¤;!S¤M¤o¤+
ã¤=
Ó¤Í=¤åc¤+wÏí¤s ñ¤ñqC¤ýEw¤ó/qäqCû3¤õ±a¤uçû3¤ó¯ug¤ëkó/¤çûy¤ñùó¯¤}¡ó¯¤K¿ug¤uuç¤ó¯ñù¤õ±ñù¤uc£¤uo¤å í¤óq¤eߤe¤Á¤
Ó¤e¤µÃ
Ó¤Ñ
Ó¤õ¤
Q¤¤
ÓM¤o¤ÁM¤=¿¤
QÕ¤;=¤åc¤w÷ãí¤sÕ³¤ñgñù¤}¡uç¤sñg¤ó¯yé¤õ±Ó¤s u¤ëk³¤yû3¤ñgqC¤ÿó¯¤OÓ÷A¤õ±u¤÷Ãu¤ó¯ñù¤õ±ñù¤uãG¤÷Aå¤oí¤¯·¤¤ec¤
Ó=¤Á ?¤1
Ó¤u=¤
ã¤
ã;¤I¤+-k¤Á©¤
Ó¤÷¤Ñ¤Ï¤÷ãí¤sÕ³¤ñgñù¤}¡uç¤sñg¤ó¯yé¤õ±Ó¤sUsÕ¤k7ýE¤{{¤ñgqC¤ÿó¯¤Ïw÷A¤õ±qC¤ó¯ñ¤uñù¤uçQˤí}¡í¤£ë¤ec¤
Ó=¤e¤1 =¤«¤=
¤½M¤Iw¤Á¯·¤ ?ߤ
Ó¤
Q¤u¤«¤+÷ãí¤sUÿפñgñù¤ÿuç¤ó/ñg¤ó¯ù£¤÷AQˤõ±s ¤k7û3¤ýÅ{û¤qCñg¤ÿ×qC¤Ïwuç¤uqC¤ó¯ñ¤u]¤ë¡í¤©¯·¤e¤
Ó=¤ee¤1U
Q¤÷=¤=;¤u_¤I
Ó¤Á¿ù¤
Óå¤+e¤u¤åO¤+÷ãí¤õ±ÿפñùñ¤ÿu¤sU1¤sUÁ½¤uqC¤ëígǤÿ×}¡¤ó¯ñù¤ÿ×qC¤Ï+u¤uqC¤ó¯ñù¤uUm¤U í¤«ÿ«¤e¤
Ó=¤ee¤1U=¤«¤
ÓѤ«»¤=¤e?Ť+O¤+e¤u夫¤ÁwMí¤sUÿפñùñ¤ÿ×õ±¤sU1¤sUÁ½¤ugñg¤ëí礳ÿ¤ó¯ñù¤ÿ×qC¤MOu¤uó¯¤ó¯ñù¤uë¤ÓqÅí¤/¹¤e¤÷¿¤Á¤³1=¤u¤
ÓO¤c¤å¤ ¿?Ť+O¤+Á¤«å¤u¤Á÷ãí¤õ±ÿ¤ñùñ¤ÿ×õ±¤sU1¤sUÁ½¤÷Ãñ¤ëíåi¤ñ1¤ó¯ñù¤ÿqC¤MOõ±¤ugqäsñù¤umÙ¤ÑoEí¤!#ˤ¤÷
Ó¤+¤³1¤«¤
ÓO¤=]¤µÅ¯¤+O¤+Á¤«å¤u¤Á÷ãí¤õ±ÿ¤ñùñ¤ÿ×õ±¤sU1¤sUóÁ¯í¤ëíc#¤qC1¤ó¯ñù¤ÿ×ñù¤MOsU¤uçó¯¤sñù¤uçá·¤ß]Åí¤£ë]¤ ¿å¤+=¤+¤3=¤«c¤wc¤
Ó
¤IµÅ/Á¤Áu¤ÁÁ¤«O¤=ѤewMí¤õ±}¡¤qC1¤1sU¤õ±ÿ¤õ±W¤qCmÙ¤ëaý¤ó¯ñù¤s ñ¤1ñ¤ÅÑw¤qCs¤÷AqC¤sUmY¤Û9 í¤%G'¤+=¤ ?÷¤=Õ¤;¡c¤Oc¤w¤
Ó
¤¹¤=©¤Áu¤Á+¤OO¤=ѤÁ÷ãí¤õ±}¡¤qC1¤1sU¤õ±ÿ¤õ±ï¥¤oë÷äw{¤ëá5¤sUñù¤s ñ¤1ñ¤ÅÑ÷A¤qC{y¤sUa¤Ù© í¤'éo¤+
¤= ?¤;¡c¤Oc¤w¤+K¤
ã¤Õ ¿¤Ý¤Áu¤Á+¤OO¤=ѤÁ÷ãí¤uýE¤qC1¤1sU¤õ±ÿ¤õ±aý¢Sÿ#µá5¤õ±ñù¤sUñ¤ÿ×ñ¤yWw-¤Õ'u¤ó¯{¤sUåi¤ÇÅí¤»E¤ÁM¤
Óe¤+Y ¿¤ÏϤOå¤+¤
¢!%¥©¤eѤÁ+¤OO¤=;¤e÷ãí¤uýE¤qC1¤1sU¤õ±}¡¤uåi¢Q%aý¤uçñù¤sUñ¤1³¤û³÷äÕ'u¤ó¯û3¤õ±çû¤E; í¤=³)¤ÁM¤wÁ¤+Y¤
ãϤOO¤Á'¢#·%á¤eѤÁ+¤OO¤=;¤e÷ãí¤ug{y¤ó¯ÿפñs ¤uýE¤uù!¢Ù˧Åa¤õ±1¤1³¤{yu¤Õ'õ±¤s yé¤uç{¤Á½ í¤?Å
㤩¤wÁ¤+Ye¤
ϤOO¤Áo¢¥}'¹_¤e;¤ew¤c«¤
Ó
¤÷ãí¤ug{y¤ó¯ÿפñs ¤uýE¤uû³¢E]¹å¤uÿפñÿפ{yu¤Us ¤sUí¢ u¹ í¢ ÏS¹ñï¤+w¤«ÿe¤
«¤c«¤eó¢'¹ñϤe;¤ew¤c«¤
Ó
¤÷ãí¤ug{y¤ó¯ÿפñs ¤u{y¤ug{y¢Ãá9Õç{¤uçÿ¤ñÿפýÅu¤Õ's ¤so¢ Ï9Õ í¢ Ã+»ÉK¤+
Ó¤+YÁ¤¤S÷¤c¢9{»ÉQ¤
¤ew¤c«¤
Ó
㤠¿÷ãí¤÷Ã{¤ó¯ÿפñs ¤u{y¤ug}/¢Á_;í}¤ñÿ¤w-ug¤ñgõ±¤Õ'ó¯¤sUá5¢ Ï; yí¢ CÓ½
¤+
Ó¤+Y¤=M¤q¢;£½Ï¤
ã¤ew¤c«¤
Ó
ã¤S÷ãí¤w-{¤ó¯ÿפñs ¤u{¤uç³¢ÏM½åo¤ñùû3¤qCsÕ¤Õ'ó¯¤sUaý¢ )½åEí¢ E=ÙߤÁ=¤¥A¤
ã¤co¢½é=Ùå¤SM¤ew¤c«¤
Ó÷ãí¤ó¯ÿפñùó¯¤uc#¢Ëu¿½c£¤ñ{¤qCsÕ¤Óqó¯¤õ±åi¢ ¿½ í¢ ÇO?¤Á
Ó¤-¤=
ã¤cߢ¿?]¤e
Ó¤«¤
Ó÷ãí¤ó¯ÿפñùó¯¤uçc#¢IÏ?õåi¤ñù{¤ñùõ±¤ÓqC¤uç¢ M?õ í¢ Gu±éa¤e=¤-kÁ¤
㤢¿û±é]¤
Ó¤«¤
Ó÷ãí¤s ÿ¤ñùó¯¤uçåi¢É1Íç¤ñù{¤qCsU¤ÓqC¤ugÇ¢ s1ÍEí¢ Û³¡_¤¤-k÷¤=
¤»¢1³¡K¤ ?=¤=Ѥw÷ãí¤s }¡¤qCqC¤÷Ae5¢·Q3
gǤñù{y¤qCs¤Óñù¤uçù!¢ »3
Åí¢ Ûã3ùõ¤¤¯·+¤=
¤õ¢³³3ù¤ ?=¤=Ѥw÷ãí¤s }¡¤qCqC¤÷AgÇ¢µÁµÝyW¤qC{y¤qCs¤QËñ¤÷Ãû3¢
KµÝ í¢ Ý)5±M¤ ¿å¤¯·÷¤=
¤=)¢35±»¤ ?=¤=Ѥw÷ãí¤s }¡¤qCa¢3·{¤qC{y¤qCs¤QË1¤w-{¢ ó· í¢ Ýõ7
ã¤SO¤¯·÷¤=
¤=
㢵Å7o¤=Ѥw÷í¤sÕ}¡¤qCaý¢³¿7í{¤ó¯{y¤ó/s ¤Ñ
c£¢ ]7í yí¢ ß»ÉÁs¤/ýw¤=;¤
Ó
¢·ÉÁ
¤
ÓѤ÷ãí¤sÕ}¡¤ó¯aý¢1SI¥{y¤ó¯ýE¤qCs ¤Ñ
å
¢ ¹I¥ í¢ _aËͤ/ý
Ó¤
Ó
¤÷Ñ¢·UË9¤
ÓѤ÷ãí¤sÕ}¡¤ó¯ãG¢±Ëý}¡¤s{y¤ó¯ó¯¤Ñ
eµ¢ ¹Ëýwéí¢ ÑÍKÑa¤¡Gw¤=½¤÷u¢ÉKÑó¤
ÓѤ÷í¤õ±}¡¤ó¯åi¢¿ç͵ÿפsýŤó¯ó¯¤ß]gÇ¢ ©Íµ í¢ QM©»¤!#
Ó¤
Ó½¤÷«¢ÉgM©¤
ÓѤÁ÷ãí¤õ±}¡¤ó¯eµ¢¿Ï1¤sU{y¤ó¯ó¯¤ß]gÇ¢
;Ï í¢ QÏỤ!#
Ó¤
Ó
¤+O¢IÃÏáͤ
ÓѤÁ÷ãí¤sUÿ¤ó¯ç¢=±OÅñ¤sUýŤó¯ó/¤]qù!¢ ÏOÅw
í¢ ÓßÁ¹©¤£ë
Q¤
Ó
¤ec¢K Á¹¤w;¤e÷ãí¤uýE¤s ç{¢½ÕAñ¤u{y¤ó¯ó/¤]yé¢ =Awí¢ S9AñM¤£ë=¤÷
¤e¢KÕAñ»¤wѤÁ÷í¤uçýE¤s ù!¢;¯ÃÕãG¤só¯¤[á{y¢õÃÕ í¢ Õ
CÉ
¤¥
Ó¤÷9¢Í?CÉ_¤w;¤÷í¤÷AýE¤s y¢»CÅå
¤sqC¤[}/¢óSÅ í¢ ÕoEQ¤¥{=¤÷s¢MEõ¤w;¤ ?ûí¤s yé¢9Eåeµ¤sqC¤Yß³¢qEåwéí¢ ×ÇÙ¤'
¤+a¢MçÇÙs¤÷ãí¤sÕy
¢¹1G½ç¤sUñù¤Ùsñù¢çG½wWí¢ ×qÙ
Ó¤¹3=¤+a¢ÏAÙ©¤Á÷ãí¤õ±y颹1Ùõç¤sUqC¤GMó¯¢ÿ?Ùõ3í¢ WÝYéÁ¤9û=¤w»¢OYéM¤Á÷ãí¤õ±{¢'ÛÍù!¤sÕñù¤ÅÑ÷A¢ý¯ÛÍ í¢ é'[¡ ?¤;¡¤_¢Ow[¡
ã¤Á÷ãí¤õ±{û¢§/Ý
y
¤sÕñù¢{CÝ
³í¢ ùëÝù¤«ÿ¤A©¢ÁSÝù;¤+÷ãí¤sUýE¢%Å]Ýyé¤õ?ñù¤Uug¢ûg]Ý3í¢ ùß±e¤«ÿ¤
ã¢A¿ß±Ñ¤Á÷ãí¤õ±}¡¢¥y_{¤sÕñù¤Uu¢û_3í¢ ùÑÁ¤«¤
¢ÃÑu¤Á÷ãí¤õ±ÿ×¢#³ÑíýE¤õ±ñ¤UmsU¢ù×Ñí í¢ g#QÁ+¤«å¤Á;¢ÃåQÁ«¤Á÷í¤uÿ×¢ÿ-Ó¥u碣WÓ¥}¡¤õ±ñg¤Õ'õ±¢õåÓ¥sU¤1 í¤O+¢ g#SÁ¤+Y¤ÁÑ¢}!S¢ÃåS«¤e÷í¤uç1¢Sýõ±¢£WSý}¡¤õ±ñg¤Õ'sU¢õSýqC¤ó¯ í¤O¢ e}ÕÑ÷¤a ¿¤»å¤eu¢ýÇÕÑÁ¢Å+ÕÑϤÕ÷ãí¤w³¢Uµõ±¢£Uµÿ¤uñ¤gÇ÷äçs¢óSUµ1¤uçUí¢ eשw¤aÁ¤så¤ÁO¢}!שÁ¢Å+ש÷ãí¢SWõ±¢!!W1¤õ±ñ¤y
õ±¤çs ¢ñAWwéí¢ eWáw¤ÍÁ¤©O¤eO¢}!WáÁ¢Å÷Wá÷ãí¢ñ?éÅõ?¢¡ÇéÅñ¤u1¤û3sU¤eµó¯¢ÿ?éÅ í¢ å·i¹
Ó¤Í+¤MO¤eå¢ÿi¹A¢Å÷i¹wMí¢ñ?ëõ?¢/ûëñ¤÷Aÿפ{s¤åió¯¢} ë3í¢ cëñ=¤÷¤Ïá¤å ¿¢ÿéëñÁ¢Ç½ëñ÷ãí¢qkÕõ±¢9kÕ÷äñç¤û³s¤åiqC¢ý¯kÕ3í¢ cíÉ=¤
Ó¤
ã¤å ¿¢ÿéíÉÁ¢Ç½íÉ÷ãí¢qmõ±¢¹1m÷äñç{¤{ó¯¤åiqC¢{CmwWí¢ ã%ï=¤ó
Ó¤
㻤c¢3ïÁ¢Gï÷ãí¢qeïåõ±¢'×ïåuç¤ñùgǤ{yó¯¤c#ñg¢y1ïå í¢ aYoÙ¤]
Ó¤
»¤¢3oÙÁ¢GoÙwMí¢óÁá½õ±¢'×á½u¤ñùù!¤{yqC¤c£ñù¢ù×á½ í¢ áÿa¤ß=¤
_¤e¢ñaÁ¢Gca÷ãí¢óÁaõõ±¢'aõuç¤ñùù!¤ýÅqC¤ãGñ¢ùaõ3í¢ áãé¤
=¤½s¤e¢ñãéÁ¢ÙÏãé÷ãí¢óÁcÍõ±¢'cÍu¤ñgy
¤ýÅqC¤aýñù¢g/cÍ í¢ áå¡=¤o=¤½s¤=e¢ñyå¡A¢Y)å¡÷ãí¢se
õ?¢§/e
õ±¤qCyé¤}/ñù¤yu¤sqC¢çÅe
í¢ áeù=¤÷e¤õ¤Q©¤=Á¢ñyeùA¢Y)eùwMí¢sçÝõ?¢§/çÝsU¤ó¯yé¤}/ñù¤ys¤uqC¢eyçÝ í¢ ág±=¤e÷¤õ¤Q©¤
Ó+¢ñyg±A¢Yõg±÷ãí¢swùsÕ¢%ÅùsU¤s yé¤}/ñù¤ÿ×÷äs s¤uçó¯¢eù í¢ =Çy¤9y
¤w¤O¤QM¤w÷¢qEyÁ¢ÛÑy÷ãí¢swyíõ±¢%Åyís¤s û3¤}/ñù¤1ug¤s {y¤Çuç¢eyí³í¢ ;µûÁÁ¤;!;¤+e¤Oå¤uM¤w÷¢qEûÁÁ¢ÛÑûÁ÷ãí¢sw{¥õ±¢%Å{¥s¤s û3¤ÿñ¤1u¤sUýE¤Å_õ±¢å³{¥3í¢ »ý+¤½WѤÁÁ¤Oå¤u
ã¤÷w¢qEý+¤½S¢[;ý÷ãí¢Íýýuç¤}¡sU¢¥yýýó¯¤sUû³¤ÿW1¤ñùs ¤õ±ÿפAcs ¢cWýý í¢ ¹G}Ñw¤¿«¤Áw¤O¤)Ϥ+
Ó¢qE}Ñ+¤Ñ¢[;}ÑwMí¢Íÿµu¤ÿsU¢¥yÿµó¯¤sUû³¤ÿW1¤ñùs ¤õ±1¤OÓs ¤Cuuç¤{ í¤
㤽w¤±¯O¤Áw¤O¤)Ϥ+
Ó¢qE©+¤ue¢Ý©÷ãí¢añu¤1sU¢¥ñó¯¤õ±û³¤ÿW1¤ñùó¯¤uñ¤Ï+ó¯¤ÅÑõ±¤}¡3í¤u+¤»E
Ó¤3g¤e
Ó¤O¤)
ã¤A=¢ó¡ñá+¤åÁ¢Ýañá÷ãí¢aqÅõ±¤ñsU¢¥qÅqC¤õ?{¤ÿW1¤ñùó¯¤uñù¤Íó¯¤E;sU¤ÿ í¤Ïw¤9û
Q¤· =¤=¤O¤)
ã¤A=¢ó¡ó¹+¤A¢Ýaó¹wÏí¢»sõ±¤ó¯s¢#³sqC¤u{¤ÿW1¤qCñ¤÷Ãó¯¤ÉAqC¤Çãó¯¤ñ í¤å
Ó¤9=¤7?
Ó¤ ¿å¤=O¤)
ã¤e=¢ó¡sñ÷¤
ÓÁ¢]Msñ÷ãí¢»õÕsU¤s s¢#³õÕqC¤u{¤ÿWÿפó¯yW¢ %¡õÕqC¤GÍqC¤qC í¤==¤¹µ=¢ ùuÉ)¤
Ó«¤)
ã¤e=¢ó¡uÉ÷¤w+¢]MuÉ÷ãí¢_÷sU¤sÕs¢#³÷ñù¤u{¤³ÿפó¯yW¢ #y÷ó¯¤GMqC¤qC í¤
Ó¤'
=¢ ùëwM¤
Ó«¤Ï
ã¤e=¢sw+¤Á¢ß©w÷ãí¢
õwåõ±¤sÕsU¢£WwåqC¤u{¤³ÿפó¯û3¢ #wåqC¤Ùsñù¤ó¯ í¤c¤
Ó ¿¤/¢ y5
Ù
ã¤÷÷¤ÏM¤ ?¢s
Ùw¤e+¢ß©
Ù÷ãí¢
õ
½sU¤us ¢£W
½ñù¤÷Aû3¤³ÿ¤s{¢ £3
½ñù¤Ñï÷äó¯ñ¤sÕ3í¤+O¤+e¤¯·=¢ û
¤÷÷¤Ï
¢s
w¤¢ßs
÷ãí¢
)
õsÕ¤ugs ¢£W
õaý¤³}/¤sUýÅ¢ ¡!
õñù¤ÓñsU¤õ±1¤õ± í¤ÁO¤Á+¤-¢ û{
齤+Q¤Ï
¢s
éw¤¢ßs
é÷ãí¢M
ÍsU¤÷Ãs ¢§/
Íw-¤õ±aý¤ñ}/¤sU}/¢ /Ç
Íñù¤ÓñsU¤uÿפu3í¤eu¤ ¿w¤-¢ ý£
¡«¤+½¤c
¤e ¿¤ÍS¤±/Ñ¢_ß
¡÷ãí¢M
}¡¤OQw-¤eµ÷äuaý¤ñýŤsUÿ×¢ ¯{
ñg¤Óqs ¤÷Ãÿ¤u í¤Q¤Sw¤¥¢ ý£
ù«¤+½¤c
¤¤a ¿¤±/u¤¹S¤?C÷ãí¤Á=w-¤mÙÿ¤OQ÷äçug¤uçaý¤ñýŤsUÿ×¢ -µ
Ýñù¤SÝs ¤w-}/¤ug í¤ ¿»¤%¢ }
±O¤Á
¤c
¤¤¤±/u¤¹ ¿¤?C÷ãí¤Á=uç¤mYÿפOQug¤çûu¤÷Ãa¤ñù{¤uñ¢
ñù¤S[ í¤%¢ }é
å¤å
c¤o¤ ¿e¤¤±/«¤'¤?C÷ãí¤Á=uç¤mYÿפÓ÷äçûu¤gÇíÿ¤ñùû³¤ugñ¢ +#
íñ¤Õ' í¤+Yå¢ }é
Áå¤Ï¤¤»e¤ ¿¤-k«¤'¤?CwÏí¤qCuç¤ýÅug¤Umuç¤mYÿפÓug¤ù!õ?¤gÇíÿ¤ñùyW¤÷Ãñg¢ «G
¥ñ¤Õ'³í¤+Ûc¢
¤=m¤_Á¤_Á¤-O¤e¤©íA¤Ñ¤=÷ãí¤qCuç¤}¡õ?¤Wu¤íÿ1¤Óñõ±¤ù!õ±¤ù!í¤qCaý¢ )ý
ýñ¤Õ§3í¤+YO¢
Ñ
¤=m¤_¤sÁ¤-O¤e¤©í¤Ñ¤
Ó÷ãí¤ó¯uç¤}¡sÕ¤Wu¤íÿ1¤Óñõ±¤y
sÕ¤ù!í¤qCaý¢ )ý
µ1¤Õ' í¤«ÿO¢ û
©9¤=m¤õw¤sÁ¤%«¤'Á¤)É+¤u¤=÷ãí¤ó¯u¤ÿ×s¤×Éõ±¤íÿ1¤Õ's ¤yés ¤yk7¤ó/c£¢ ©·
1¤U í¤«ÿO¢ ñE
áߤ
QI¤õw¤©w¤+YO¤Á¤)É÷¤«e¤
Ó÷ãí¤s u¤ÿWó¯¤×Éõ±¤íÿ1¤Õ's ¤yéó¯¤yWk7¤qCå¢ ë
ÅÿW¤Um3í¤««¢ ñE
¹ó¤=I¤
Ó ?¤
Ó¤M=¤«ÿc¤ïÁ¤«w¤)e¤w÷ãí¤s u¤ÿWs ¤Umõ±¤íñ¤UqC¤û3ó¯¤ñù÷A¤ó¯k7¤qCå¢
ÿפUï í¤)7u¢ q¡
ñ¤
Ó¤w¤
Ó¤M=¤«ÿc¤ïÁ¤«
Ó¤ÏA¤÷÷ãí¤sõ?¤³ó¯¤Umõ±¤íñ¤UqC¤û3ó¯¤ñùuç¤s ëí¤ó¯åi¢
Õÿ¤×I³í¤)Éu¢ q¡
ɤ
Óñ¤+¤==¤M=¤«ÿc¤ïÁ¤«ÿw¤ÏA¤÷÷ãí¤sUõ±¤1ó¯¤UsU¤íñù¤Umñù¤û3qC¤qCu¤õ±i%¤s eµ¢ Y
ÿ¤×É í¤)Éu¢ ó
ͤw[¤Áe¤==¤M¤«¤ï+¤«ÿ
Ó¤OÁ¤+wMí¤sõ±¤1s ¤Õ'sU¤íñù¤å÷A¤{ñ¤{qC¤qCu¤õ±ù!¤uç{¤s eµ¢ ÿ
åÿ¤×I í¤)7÷¢ ó
Ùͤw
ã¤_¤Áe¤==¤
ãc¤
ã ?¤ó¤ï+¤+Yw¤OÁ¤÷÷ãí¤sUsU¤ñó¯¤Õ'sU¤íñù¤åiu¤{yñ¤{ñg¤ó/u¤õ±y¤sÕýE¤s eµ¢ ÿ
½}/¤W3í¤©íÑ¢ óW
á¤;¤=ϤåA¤
Q¤
ãϤ½e¤Í¤I+¤%
Q¤å+¤Á÷ãí¤õ±sU¤ñó/¤S[sU¤k7ñù¤eµu¤ýų¤{ñg¤ó/õ?¤uû³¤qCýE¤sÕç¢
õ}¡¤W í¤©íu¢ óW
éá¤Ñ¤c
c¤åA¤
Qå¤
Ó ¿¤wϤ½Á¤a¤IÁ¤-
Q¤å+¤Á÷ãí¤õ±sU¤ñó/¤Óñõ±¤k7ñù¤çõ±¤ýų¤s ÷äó¯ñ¤ó/õ?¤u{¤ñ}¡¤sÕç¢ 7
Íÿ¤W³í¤«¤9u¢ óW
¡á¤ÁQ¤u
¤A¤
Qå¤
Ó ¿¤+«¤½+¤¤ï+¤-=¤÷¤ewÏí¤õ?s¤qCqC¤ÓsU¤çûw-¤ÿñù¤çûsU¤}¡ÿ¤sUuç¤s1¤sõ?¤ugýŤ}¡ýŤuç{¢ í
}¡¤ãGu¤ñ í¤åe¤9Ñ¢ s³
ù¤e½¤Ñ½¤A¤÷O¤÷¤+÷¤Ñ+¤¤uS¤+¤-k=¤=÷¤A÷ãí¤uó¯¤qCqC¤ÓsU¤çû÷äÿqC¤gEs¤}¡ÿ¤sUuç¤s1¤sUs ¤÷Ã}/¤{ûi%¤ñ÷â
©Ý}/¤qC÷ä{sÕ¤ñù í¤
Ów¤
e¤
Ó½¢ y5
)±e¤¤Ï»¤+O¤÷¤+÷¤Ñ÷¤;=¤«¤+¤¯·=¤=
Ó¤÷ãí¤uçó¯¤qCqC¤QËsU¤çûug¤ÿ×qC¤gEs¤}¡ÿ¤sUuç¤s1¤sUgǤû³ë¤ñùu¢
«ýŤó¯u¤{ûs ¤ó¯3í¤
Ó÷¤½+¤w;¤©S¤¿ùe¤=¤)_¤+O¤+e¤+÷¤Ñ
Ó¤s¤Ïe¤»w¤¯·¤
Ó
Ó¤÷ãí¤uçó¯¤ó¯ñù¤QËs ¤gÇu¤³ñg¤y
ó¯¤}¡ÿ¤sUu¤sU1¤sUù!¤yWë¤qCu¤Aw-¤WÿýE¤s sU¤ýÅs¤ó¯ í¤+=¤uw¤w;¤©m ¿¤¿e¤=¤»)¤Á«¤+Á¤ÁQ¤u
Ó¤s=¤OÁ¤»w¤¯·¤
Ó=¤SÓí¤sñg¤ãÙ÷A¤gÇs ¤gÇõ±¤1qC¤yéqC¤ÿýŤuõ±¤õ±ÿ¤õ±û3¤çûëí¤ó¯õ±¤Acuç¤WýE¤s qC¤ÿ×qC¤õ± í¤Á=¤«=¤w;¤©¤¿Á¤
Ó¤M¤Áu¤ÁÁ¤e½¤u=¤©=¤OÁ¤»w¤» ?¤¹¤÷÷ãí¤sñg¤c#uç¤gÇs ¤gÇsU¤ñqC¤yéqC¤ÿýŤuõ±¤õ±ÿ¤uû3¤çk7¤ó¯sU¤Acuç¤WýŤsUñù¤ÿ×qC¤u í¤e=¤«¤+½¤©í¤¿+¤
ÓI¤aM¤eu¤ÁÁ¤e½¤u=¤©=¤å+¤»w¤»¤]¤÷wMí¤ó/ñg¤c#uç¤gÇs ¤ñw-¤ñs¤ñgñù¤û3ñù¤ÿ×ýŤusU¤uÿ¤u{¤çëí¤ó¯sU¤Acuç¤W{y¤sUñù¤ÿ×qC¤u í¤¤cO¤Á
¤©íe¤¿ù¤÷ñ¤á
¤eu¤e+¤e
¤O¤M¤÷¤¤=w¤e¤óå¤÷÷ãí¤sñ¤åu¤ç{s ¤qCuç¤ñùs¤ñgñù¤û3ñù¤1{y¤usU¤uÿ¤u{y¤çë¤ssÕ¤Au¤W{y¤õ±1¤ññg¤ugÕí¤åO¤+
¤©íe¤¿ù¤÷[¤á½¤eu¤e+¤e
¤Oc¤
ã¤=w¤e¤=w¤e¤óå¤÷÷ãí¤sUñ¤å
õ±¤ç{s ¤qCu¤ñùs ¤ñùqC¤{ñ¤1{y¤uçs¤uçýE¤÷AýE¤çéÛ¤ssÕ¤ñùw-¤Ùsu¤×I{y¤õ±ÿפñ í¤å«¤Á
¤)7e¤'
S¤¤÷§¤a;¤ ?;¤÷¤
¤Oc¤
ã=¤w¤e¤=w¤Á¤sc¤+÷ãí¤sUñ¤å
õ±¤ç{s ¤qCõ±¤qCó¯¤qCñù¤{yñ¤1{¤÷As¤uçýE¤÷A}¡¤çW¤sUsÕ¤ñùug¤Yßõ±¤W{y¤õ±ÿ¤ñ í¤c÷¤Á
¤©íÁ¤§£¤¤+©¤aѤ ?;¤÷¤ ?
ã¤Oc¤
¤=
Ó¤=Á¤=w¤Á¤sc¤+÷ãí¤sUñ¤ñgug¤ÿõ±¤çs ¤qCsU¤ó¯ó¯¤qCñù¤{yñ¤1{¤÷As¤uça¤çW¤sUsÕ¤qCu¤Yßõ±¤×É{û¤õ?}¡¤qC3í¤=;¤e
¤«+¤%Gå¤=w¤Á)ɤᤠ?M¤«¤½¤
Ó=¤
Ó+¤
Ó
Ó¤áÁ¤«Á¤¿Ï¤÷ãí¤sÕ³¤qÃõ±¤ÿ×õ±¤çó¯¤ó¯sU¤ó¯qC¤ó¯ñù¤ýÅÿפñùû3¤÷Aëí¤ç×ɤõ±s ¤qCu¤Û9sU¤Um{y¤uýE¤qC3í¤=;¤e
ã¤)É÷¤%GÁ¤
Qw¤Á«¤a#µ¤«¤Qc¤
Ó=¤
Ó+¤
Ó
Ó¤á¤Ï¤
ÓϤ÷ãí¤sÕ³¤ó¯sÕ¤³sÕ¤çó¯¤ó¯sU¤ó¯qC¤ó¯ñ¤}/ÿפñùÝͤçUm¤õ±s ¤ó/õ±¤Û9s¤×É{¤uýE¤qC í¤
Q
¤e
¤«÷¤%GÁ¤=+¤Á«ÿ¤a£i¤«¤Qc¤w=¤
Ó÷¤
Ó
Ó¤á¤Ïw¤
Óc¤÷ãí¤õ?³¤s 󯤳sÕ¤e5ó¯¤s s¤ó¯qC¤s ñ¤}/ÿפñù]ó¤çÕ'¤us¤ó¯sU¤ï#÷äeµs¤Um{¤uç{¤ó¯ í¤
Ó
ã¤
㤫÷¤Í ¿¤]+¤
Ó÷¤e+Y¤a£¤«¤Qc¤w=¤
Ó÷¤w
Ó¤K¤Ï
Ó¤wϤA÷ãí¤õ?³¤sÕqC¤ñs ¤e5ó¯¤s ó¯¤sñù¤sÕ1¤ÿÿפñùß]¤çÓñ¤uçs¤ó¯sU¤ï#ug¤çs¤U{y¤uçû3¤ó¯ í¤
ÓM¤
¤«ÿ÷¤a¤]+¤
Ó÷¤-¤á!#¤«¤uO¤¤÷
Ó¤w
Ó¤Kw¤c=¤Ï¤A÷ãí¤õ?³¤sÕqC¤ñs ¤}¡uç¤ó/ó¯¤s ó¯¤sñù¤s ñ¤ÿÿפñù_¹¤ç[¤s s¤ouç¤ýÅ÷äó¯ó¯¤Uï{¤÷Ayé¤s 3í¤÷õ¤ ?
㤫
Ó¤we¤ue¤q÷¤
Ó¥{¤a/ý¤=u¤««¤Áå¤÷
Ó¤w
Ó¤
Q¤Ñw¤c¤+O¤e÷ãí¤u1¤sUñg¤ñs ¤}¡uç¤ó/ó¯¤s ó¯¤sñ¤õ±ÿפÿ×ÿ¤qCÑ
¤ç[¤ó¯s¤ou¤ÿu¤s ó¯¤Uï{¤÷Ay¤s í¤+¤«ÿ
Ó¤we¤uÁ¤q÷¤w%Ǥa/ý¤
Óu¤««¤Áå¤+=¤+¿¤
Qe¤Q÷¤å¤ÁO¤e÷ãí¤u1¤õ±ñ¤ñùs¤}/õ±¤sqäsó¯¤sñg¤õ±ÿ¤1ÿ¤ó¯ok¤uç{¤çY_¤s s¤oõ±¤ÿ×sU¤s s ¤Uëí¤s í¤÷¤«ÿw¤w+¤«Á¤q÷¤w§!¤a
㤤
Óu¤Ou¤Á¤÷
Ó¤÷¿¤÷Á¤Q÷¤å¤ÁO¤e÷ãí¤u1¤uÿפqCs¤ÿsU¤sqäsqC¤sUñ¤uÿ¤1ÿ¤ó¯a¤sUýŤçÙs¤s s¤oësU¤ÿ×sU¤s ó¯¤Uëí¤sU í¤+¤«ÿ
Ó¤w+¤«+¤÷¤w'
¤á½¤+o¤
Óu¤Ou¤eå¤+=¤÷¿¤÷+¤÷÷¤=«¤eO¤e÷ãí¤uçÿפuÿפqCs¤ÿsU¤ó¯ó¯¤sqC¤sUñ¤õ±ÿפ1}¡¤s aý¤qCÿW¤çÙ©¤sUó¯¤oësU¤ÿ×s ¤sUó¯¤Uë¤sÕ í¤AÛ¤«ÿ
Ó¤+w¤«+¤
Ó¤+¹3¤ac¤O]¤wѤåu¤Áå¤+=¤+=¤
Ó+¤÷÷¤=u¤u¤ ?÷ãí¤÷Aÿ¤uçÿ¤qCs¤ÿsU¤ó¯qC¤sUqC¤sUñ¤õ±ÿ¤ñ}¡¤s c#¤1ñ¤çGM¤sUó¯¤oësU¤ÿ×s ¤sUó¯¤Ui§¤õ?3í¤Á[¤O ¿¤Ý
Ó¤Á=¤O÷¤; ¿¤
Ó
Ó¤÷¹3¤c¤«ó¤wѤåѤO¤Á¤+=¤
Ów¤«
Q¤
ÓQ¤«¤ ?÷ãí¤÷Aÿפug}/¤ó¯ó/¤ÿ×s ¤ó¯qC¤sUñù¤õ±1¤uç}¡¤ñ}¡¤s å¤ÿ×ñ¤çûGM¤só¯¤ó¯÷ägEs¤1qC¤õ±ó¯¤ï¥÷ä1i%¤õ± í¤Á[¤c¤Ý
Ó¤Á=¤O÷¤;¤w
Ó¤÷9¤=¤Ñ¤;¤;¤c¤¤+=¤w
Ó¤«
Q¤
ÓQ¤÷í¤w-ýŤó¯ó/¤1qC¤s ñù¤õ±ñù¤sÕñ¤uç{y¤qCýE¤sÕe5¤{ys ¤ç{ǤsUqC¤s ug¤ù!ó¯¤1ñù¤uqC¤ouç¤ñéÛ¤u í¤e§¤c¤q=¤e¤O
Ó¤_¤w=¤+9û¤w¤
K¤;¤=
¤c¤¤Á¤w=¤O
Q¤
Ó½¤SûSí¤ó¯ó/¤1qC¤s ñù¤sUñg¤õ±aý¤qCýE¤sÕe5¤{õ?¤ç{ÅѤõ±qC¤s ug¤ù!ó¯¤ññ¤uqC¤ou¤ñéÛ¤u í¤e§¤e¤Ý=¤Ï¤å
Ó¤sÁ¤=¤Á½¤e¤
ãK¤A
¤
Óo¤Á¤+=¤w¤O
Q¤
Ó÷ãí¤ó¯ó/¤1ñù¤s qC¤sUñg¤õ±a¤ó¯{û¤õ?e5¤{u¤çûCu¤õ±qC¤sÕõ±¤y
ó¯¤ñ³¤uçqC¤ï¥u¤ñùéÛ¤u í¤©¤=Á¤Ý=¤Ï¤å=¤c ?¤÷Á¤=¤Á½W¤ ¿¤
ã¤A
¤
Óo¤Á¤Á¤w¤O
Q¤w÷ãí¤s ó/¤1ñù¤s ñù¤õ±ñg¤õ±a¤ó¯{û¤õ?åi¤{÷äç{C)¤õ±qC¤sÕõ±¤s÷A¤ñqC¤ñ³¤uçqC¤ï¥õ±¤qCW¤uç í¤ ¿©¤e¤'=¤Ï¤å=¤c ?¤÷Á¤¤e=³¤ß¤
¤
ÓˤÁ¤Á¤wc¤å
Q¤w÷ãí¤s ó/¤ññ¤s ñ¤uñ¤uá·¤ó¯{y¤ugãG¤k7Ac¤uñù¤õ±sÕ¤sUuç¤ñqC¤ñÿפ÷ÃqC¤mYõ±¤qCWÿ¤÷Ã í¤ ¿©¤=Á¤'=¤ ¿«¤å=¤c¤+¤Á¤e¿¤I9¤
¤
Óˤeå¤ec¤wc¤å
Q¤w÷ãí¤sUqC¤ñ1¤sUñ¤õ?ñù¤õ±á·¤s {¤÷Ãaý¤í}Á=¤uçñù¤õ±sÕ¤sUuç¤ñùñù¤ñùÿ¤÷ÃqC¤ñù÷A¤eµõ±¤ó/ í¤
QÁ¤Í ?¤=¤ ¿÷¤¤¤+¤Á¤?C¤
¤ ¿
ã¤wˤÁ¤Ac¤+O¤å=¤+÷ãí¤sUqC¤ñgÿפsU1¤uñù¤õ±oë¤sUÓñ¤ï¥OÓ¤uçñù¤õ±sÕ¤sUuç¤ñùñù¤ñùeµ¤qCuç¤çõ±¤ó/³í¤
Ó+¤=դѤ=ͤ¤e¤Á¤Á%¥
CÙ¤-k¤+¤Á¤eå¤÷«¤=¤Á÷ãí¤õ±qC¤ñgÿפsñ¤uñù¤õ±oë¤sUÓ¤ok aû
ޤõ±sÕ¤õ±u¤ñùñù¤ñùeµ¤qCuç¤}¡w¤qCsU¤ó¯ í¤w+¤=¤«e¤=ͤ¤e¤Áw¤e½¤Á½¤o¯·¤Áq¤eO¤ ¿O¤+u¤=¤Á÷ãí¤õ±qC¤ñgÿ¤sU1¤÷Ã1¤uo¤õ±QˤaýŤõ±Cu¤us ¤õ±u¤ñùñù¤ñùeµ¤qCu¤ÿ×uç¤qCsU¤s í¤w+¤
Óe¤O+¤
Óͤc¤=e¤Áw¤e;¡¤÷Q¤
/¤eݤeͤ+u¤=¤Á÷ãí¤õ±qC¤qC}/¤sUeµ¤u磻uÑ
¤c£}¡¤qCÅѤuçs ¤uõ±¤qCñ¤ñùeµ¤ó¯s ¤ñu¤qCsU¤sÕ í¤+¤=e¤cw¤
Óͤc¤=Á¤ew¤;¡¤=Ѥß/ý¤eݤeͤ+Q¤==¤Á÷í¤uqC¤qCýŤõ±GM¤w_9¤å
}¡¤ñE½¤uçs ¤usU¤ó¯ñ¤ñùeµ¤ó¯s ¤³u¤ó¯sU¤sÕ3í¤+w¤w+¤
Ó¤
Ó¤=Ϥw+¤e1U¤å½¤KK
Yù¤+½¤==¤å÷ãí¤uqC¤qCýŤsUIÏ
Ûݤe5ýŤñÏ+¤usU¤s ³¤qCåi¤ó¯ó¯¤ñùsU¤s s ¤sU í¤Áw¤
Ó+¤
Ó¤
Ó¤
Ó)¤ww¤1¤«½¤áK
[±¤Á
¤==¤å÷ãí¤uqC¤qC{y¤õ±IÏ
ݤçýŤÿ×Ï÷¤ugs ¤s ÿW¤ó¯åi¤ó¯ó¯¤ñùsU¤ó¯s ¤õ± í¤Áw¤÷w¤=¤
Óͤ
Ó)¤ww¤1¤uQ¤K
]¤Á
¤==¤÷ãí¤ugqC¤ó¯û3¤uÉc
]í¤gÇ}¡¤}/Ï÷¤÷Ãó¯¤s ÿW¤ó¯eµ¤ó¯ñù¤qCs ¤só¯¤u í¤e
Ó¤÷w¤=¤
Óͤ
Ó)¤w
Ó¤ ¿1¤QѤ»K
ßÁ¤eM¤
Ó=¤÷ãí¤ugqC¤ó¯û3¤uÉc
_¥¤ù!ýE¤}/ Óÿ
_¥¤sUÿ¤ó¯eµ¤ó¯ñù¤qCó¯¤ss ¤u í¤ew¤÷
Ó¤=¤
Óͤ
Óu¤+
ѤQ;¤_K
ѤeM¤
Ó=¤ÿí¤ó¯yW¤ugÉ
Ñý¤yWýE¤ýÅ Óÿ
Ñý¤sUÿ¤ó¯eµ¤s ³¤ó¯qC¤sUó¯¤uç í¤
Ó¤+=¤
ÓϤwͤ
Óu¤Á
QѤ
½¤MK
QѤ)¤
Ó÷ãí¤ó¯yW¤ugÉ
Óµ¤û3ýŤ{y Ó
Óµ¤õ±ÿ¤ó¯eµ¤s ³¤ó¯qC¤sUó¯¤uç í¤ ?=¤Á¤÷«¤wͤ
Óu¤Á
S©¤
¤
ãK
S©¤ ¿õ¤
Ó÷ãí¤s É
Õ¤{y{y¤{y Ó
Õ¤uýŤseµ¤sUÿ¤sU1¤u í¤åO¤+÷¤+ͤ÷½¤eq
Õá¤
¤
%Ù
Õá¤w÷ãí¤s Éc
UŤ{ýE¤{ Ó
UŤuýŤsç¤sU}/¤sU1¤õ± í¤ÁO¤+Q¤+a¤÷½¤eq
×¹¤
c;¤
ã%Ù
×¹¤w÷ãí¤s Éc
W¤{yýE¤{ QÉ
W¤uçýŤsç¤sUýŤuÿ¤u í¤Ñ¤e
¤Áa¤÷¹
Wñ¤
¤
%Ù
Wñ¤w÷ãí¤s IÏ
éÕ¤{û{y¤{y Có
éÕ¤sç¤õ±{y¤u}¡¤uç ûí¤ ¿½¤
ã¤ea¤÷
iɤ
¤;%Ù
iɤw÷ãí¤s IÏ
ë¤ýE{y¤{y Có
ë¤sç¤u{¤ugýŤ÷Ã9Yí¤+
k¤
ã
¤½%Ù
k¤w÷ãí¤sÕË«
kå¤ýÅýŤ{ C
kå¤sU í¤+ï
íÙ¤
㽤½§#
íÙ¤÷ãí¤sÕËu
m½¤ýÅýE¤{ C
m½¤sU í¤+©Ë
ï¤
ã;¤½§#
ï¤÷ãí¤sÕËu
ïõ¤}/ýE¤{ ÃÍ
ïõ¤õ±3í¤+)
oé¤
ã;¤½§#
oé¤÷ãí¤sÕKÑ
áͤýÅýE¤{ ÃÍ
áͤsU í¤Á)ñ
a¡¤
㽤ѧ#
a¡¤÷ãí¤sÕÍ;
ã
¤ýE}/¤{ Aa
ã
¤õ± í¤Á«[
ãù¤
ãQ¤;§#
ãù¤÷ãí¤sÕÍ;
cݤýEÿ¤û³ A
cݤu í¤å«[
層Ïu¤;§#
層÷ãí¤õ?M
e¤{ûÿפû³ A
e¤u
ç¤MO¤
A¤ ¿'
ç¤A÷ãí¤õ?Ã
çí¤÷Ãõ?¤{û1¤û3 yí¤
ãO¤M ¿¤e'
gÁ¤e÷ãí¤uÃá
ù¥¤u÷äû31¤{ yí¤
ã¤Í'
y¤ewÏí¤õ?Ãá
yý¤åió¯¤û3 í¤M
Ó¤'
ûѤA÷ãí¤õ?CK
{µ¤c#sU¤û³ í¤Ï+¤]'
ý©¤A÷í¤uCK
}¤ãÙu¤û3 yí¤©Õ¤'
}á¤e÷ãí¤uCK
ÿŤa}w¤yé í¤-÷ãí¤SÝ í¤¥÷ãí¤Õ' í¤+Y÷ãí¤U í¤«}÷ãí¤U yí¤«÷ãí¤Uï yí¤©í÷í¤éÛ í¤§÷í¤i%y7í¤eM¤÷ãí¤ëkû3¤u3í¤w
ã¤I÷ãí¤k7{¤s 3í¤=
ã¤÷í¤mY{y¤ñ í¤c
¤'÷í¤o{¤1 í¤O
ã¤q÷í¤oë{y¤ÿ× í¤u
¤o÷ãí¤a{y¤ÿ í¤Q
¤¹÷ãí¤ãÙ{y¤}/ í¤Q
¤]÷í¤åi{y¤}¡ í¤Ñ
¤÷í¤eµýŤýE í¤
÷¤K÷ãí¤e5ÿ¤{û yí¤;«¤Í÷ãí¤eµÿפýE í¤
c¤Íoí¤w-ÿ¤çñù¤{ í¤
ã¤au¤S÷ãí¤÷Ãÿפeµó¯¤{ í¤
ã
Ӥͫ¤ ¿÷ãí¤uçÿפe5sÕ¤û3 yí¤
ãÁ¤Í«¤e÷ãí¤uÿפeµõ±¤{ í¤Ïå¤ÍO¤Á÷ãí¤sU1¤eµ÷äyW í¤) ¿¤ÍO¤+÷ãí¤s ñ¤×É í¤)Éc¤w÷ãí¤ó¯ñù¤×É ûí¤©í=¤=÷ãí¤qCqC¤W í¤)7w¤÷ãí¤ñùs ¤×I yí¤©í¤c÷í¤ñõ±¤W í¤©íÁ¤å÷ãí¤1uç¤W í¤©í¤O÷ãí¤ÿ×÷A¤W yí¤£÷ãí¤]q í¤£÷í¤_9 í¤¡G÷í¤Ñ
í¤/ý÷í¤QË í¤-k÷ãí¤Ó yí¤O ¿¤'÷ãí¤mY÷ä1 í¤Á¤q÷ãí¤oks ¤ó¯ í¤
Ów¤÷í¤á·ó¯¤s yí¤A=¤o÷ãí¤aqC¤õ?í¤
÷ãí¤aý yí¤]÷ãí¤c# í¤]÷í¤å í¤ó÷ãí¤å í¤]÷ãí¤c# í¤]÷ãí¤c#3í¤¹wÏí¤a í¤á÷ãí¤á53í¤q÷ãí¤o3í¤'÷ãí¤mY3í¤wMí¤k7 í¤IwMí¤ëí í¤wÏí¤i%3í¤õ ¿¤õ÷ãí¤y÷äy3í¤se¤õ÷ãí¤yu¤y
³í¤õÁ¤_÷ãí¤ù!s¤yW í¤)÷¤_wMí¤ù!ó/¤y3í¤s=¤_÷ãí¤ù!qC¤y
³í¤õ¤õ÷ãí¤yñù¤y3í¤õO¤©÷ãí¤yé1¤y í¤õO¤©÷í¤{ÿפù! í¤_«¤
ã÷ãí¤{yÿ¤ù! í¤_u¤½÷ãí¤ýÅÿ¤ù! í¤»u¤Q÷í¤ÿW}/¤gÇ í¤»Q¤)÷ãí¤³}/¤gÇ í¤»Q¤Ï÷í¤ñ}/¤gÇ yí¤;Ѥ÷ãí¤ñù}¡¤gE ûí¤»Ñ¤
Ó÷ãí¤ó¯}¡¤gÇ yí¤»Ñ¤÷ãí¤õ±}/¤ç{ í¤Q¤Á÷í¤uç}¡¤çû í¤Ñ¤ûSí¤çû í¤a÷ãí¤ç ûí¤áwMí¤ç í¤áwÏí¤eµ í¤ÍwMí¤eµ yí¤÷ãí¤åi ûí¤÷ãí¤åi yí¤wMí¤åiû3¤w í¤ÕM¤wMí¤åi{¤u3í¤+
ã¤÷ãí¤åi{¤sU í¤w
ã¤K÷ãí¤e5{¤s 3í¤=
ã¤áO¤ ?wMí¤uñ¤gÇû3¤ñù í¤M¤»å¤ewMí¤sUñ¤y
û³¤ñ í¤cϤså¤+÷ãí¤s ñù¤û3yW¤³3í¤u©¤
ã¤
Ó÷ãí¤ó¯ñù¤{yé¤ÿ í¤Ñ©¤½¤=÷ãí¤qCñù¤ýÅyé¤}¡ í¤;)¤Ñ=¤÷ãí¤ñùñù¤}/{û¤{ í¤
ã
¤Q¤÷ãí¤ñqC¤}/ýE¤û3 í¤©Ñ¤Q=¤c÷ãí¤ñqC¤}/}¡¤yé yí¤õϤQ=¤Ï÷ãí¤³qC¤}/³¤y í¤_c¤Q=¤Ï÷ãí¤ÿ×qC¤}¡qC¤ù! í¤_=¤Ñ=¤«÷í¤1qC¤ýEsÕ¤gÇ í¤»¤;=¤O÷ãí¤ÿ×qC¤ýEu¤çû yí¤[=¤O÷ãí¤1qC¤i% ûí¤
Ó¤«÷í¤1qC¤k7 í¤I=¤O÷ãí¤ÿ×ó¯¤í í¤ï
Ó¤«÷ãí¤ÿ×qC¤mÙ ûí¤Ý
Q¤«÷ãí¤ÿ×ó/¤ï¥ yí¤q
Q¤«÷ãí¤ÿ×ó/¤o yí¤Ë
Ó¤«÷ãí¤ÿ×ó¯¤a í¤o
Ó¤«÷í¤ÿ×s ¤aý í¤
w¤«÷ãí¤ÿ×ó¯¤c# yí¤ów¤«÷ãí¤ÿ×s ¤å yí¤w¤«÷í¤ÿWs ¤ç í¤aw¤)÷ãí¤ÿWs ¤ç{ í¤w¤)÷í¤ÿ×sU¤ç{ yí¤»+¤«÷ãí¤ÿ×sU¤gÇ yí¤_Á¤«c¤e÷ãí¤uñ¤ÿ×õ±¤ù! yí¤_¤u¤÷ãí¤sÕñ¤í} í¤c¤wMí¤ó¯ñ¤ï¥û³¤w- í¤SϤÝc¤
Ó÷ãí¤qCñù¤ï¥{¤ug í¤e
ã¤å¤=÷ãí¤qCñ¤ok{¤u í¤e
㤤÷ãí¤ñùñ¤á·{y¤õ± í¤Á
¤Ëå¤÷ãí¤ññù¤á·ýŤsU í¤+½¤Ë¤å÷í¤ñgñ¤aý{y¤sU í¤÷½¤
¤å÷ãí¤ññù¤aýýŤs í¤÷½¤9c¤å÷ãí¤ññ¤ãGýŤs í¤
ÓѤ9¤c÷ãí¤³ñg¤c#}¡¤ó¯ í¤
ÓѤ]¤Ï÷ãí¤³ñg¤c#ÿ¤qC í¤=u¤ó¤«÷ãí¤ÿ×ñù¤åÿ¤qC í¤«¤c¤O÷ãí¤1ñ¤åiÿפñù í¤«¤¤«÷ãí¤ÿ×ñ¤eµ1¤ñ í¤cO¤Íc¤«÷í¤ÿ×ñ¤ç1¤ñ í¤cO¤ac¤«÷ãí¤ÿñù¤çñ¤³ í¤Ïå¤å¤u÷ãí¤ÿñ¤ç{ñ¤³ í¤«¤¤u÷ãí¤ÿñg¤ç{ñù¤ÿ× í¤«¤=¤Q÷í¤}¡ó¯¤ç{qC¤ÿ í¤u=¤
Ó¤Ñ÷ãí¤}¡qC¤gÇqC¤ÿ í¤Ñ
Ó¤»
Ó¤;÷ãí¤ýEó¯¤gÇó¯¤}¡ í¤Ñw¤w¤;÷ãí¤ýEs ¤ç{s ¤}¡ í¤;¤+¤
÷ãí¤{õ±¤ç{sÕ¤ýE í¤;¤Á¤
ã÷í¤{u¤ç{õ±¤{y í¤
Á¤e¤
ã÷ãí¤yéw¤çu¤{y í¤
¤©í÷ãí¤Wuç¤{y í¤
ãS¤)É÷ãí¤Ùs í¤'
÷í¤Û9 í¤%G÷ãí¤Û9 yí¤¥÷ãí¤[á í¤¥÷ãí¤[á ûí¤£i÷ãí¤] í¤£i÷í¤ß] í¤!#÷ãí¤ß] yí¤/}÷ãí¤Ñ í¤/}÷ãí¤Ñï í¤/÷í¤QË í¤¯·÷í¤Óq í¤-÷ãí¤Óq yí¤+Y÷ãí¤Õ' í¤+Y÷í¤ó¯uç¤mY í¤'¤
Ó÷ãí¤s u¤ï¥ í¤Ý¤e÷ãí¤usÕ¤ï¥ í¤Ý÷ãí¤ï¥ yí¤qÉí¤uçeµ¤o í¤qͤ÷ãí¤õ±çû¤ok í¤¤Á÷ãí¤sÕgǤᷠí¤Ë_¤w÷ãí¤s ù!¤á· í¤Ëõ¤
Ó÷ãí¤ó¯y¤á· yí¤KM¤=÷ãí¤ñù{y¤oë í¤
¤÷ãí¤ñ}¡¤oë í¤q«¤å÷ãí¤ñÿפoñÙí¤Sc¤Ýå¤O÷ãí¤1ñ¤ï¥ñ¤w-³í¤å¤Ý¤O÷ãí¤ÿ×ó¯¤mYqC¤sÕ í¤=¤'
Ó¤«÷ãí¤ÿsU¤íÿó¯¤s í¤w
Ó¤+¤÷÷í¤ÿu¤ís ¤qC í¤Á¤ ¿¤Ñ÷ãí¤}¡÷äëkõ±¤ñù í¤ce¤!#÷ãí¤ß]u¤ñ3í¤Ïe¤!#÷í¤íÿ÷A¤{yug¤ÿ× í¤«¤
?¤÷ãí¤ï¥ó¯¤ÿw-¤ÿ í¤ó¤q÷ãí¤oñù¤å yí¤å¤÷ãí¤okñ¤åi yí¤Í«¤
÷ãí¤ãGÿ¤eµ í¤Íu¤9÷ãí¤c£}¡¤ç í¤aѤß÷í¤åiýŤç{ í¤á½¤Í÷ãí¤eµýŤç yí¤½¤Í÷í¤ç{ýE¤çû í¤;¤÷ãí¤gÇýE¤ç í¤a;¤»÷ãí¤ù£ýŤç ûí¤
¤©÷ãí¤yé{y¤ç{ í¤á
¤M÷ãí¤û3{y¤ç yí¤á
¤
÷ãí¤{yýE¤ç í¤a;¤
÷ãí¤ýÅýE¤eµ í¤Í;¤½÷í¤ÿ{y¤ç yí¤a½¤u÷ãí¤ÿýŤç ûí¤áQ¤«÷ãí¤1ýŤç í¤á½¤O÷í¤ñ}¡¤ç í¤aѤc÷ãí¤ñÿ¤eµ yí¤Í«¤÷ãí¤ñùÿפeµ yí¤ÍO¤÷ãí¤ñù1¤eµ}í¤u¤ÍO¤=÷ãí¤ñùñ¤çÿ¤u í¤å÷¤áå¤÷í¤qCñ¤ç{ÿ¤sÕ í¤u¤¤÷ãí¤ñgñù¤ç{ÿ¤sÕ í¤wu¤»¤=÷ãí¤qCñù¤gÇÿ¤s í¤
Ó«¤_c¤
Ó÷ãí¤qCñ¤yWÿ¤ó¯ í¤
Óu¤)c¤=÷í¤ó¯ñ¤yWÿפqC í¤=«¤)c¤
Ó÷ãí¤qCñ¤{ÿפñg í¤«¤
O¤
Ó÷ãí¤ó¯1¤{ûÿפñg í¤å«¤;å¤=÷ãí¤qCñ¤ýEÿפñ ûí¤cO¤ÑO¤
Ó÷í¤sUÿפÿÿפñ í¤c«¤u«¤+÷ãí¤sUÿפÿ1¤³ í¤ÏO¤Ñu¤÷ãí¤uçÿ¤}¡1¤³ í¤)O¤u÷ãí¤ÿ1¤ÿW yí¤«c¤uÉí¤w-åi¤}/ñ¤ÿ× í¤«c¤Q¤Ssí¤õ±ç¤ýÅñù¤ÿ í¤u¤½á¤ÁwÏí¤s gǤ{qC¤ÿ× í¤uw¤M_¤=÷ãí¤qCù!¤û3s ¤ÿ í¤Ñ¤©)¤cwMí¤ÿ×{¤gÇug¤}/ í¤Q¤»
㤫÷ãí¤ÿýE¤ë í¤ñ;¤uwÏí¤}/ýE¤ëk í¤I½¤½÷ãí¤ýÅýŤk7 ûí¤½¤
÷ãí¤{yýŤíÿ í¤'
¤
÷í¤{ûýÅ¤ï¥ í¤Ý½¤
÷ãí¤{û{y¤o í¤q
¤
÷ãí¤{ýE¤oë yí¤á
¤
÷ãí¤{y{û¤a í¤á;¤
ã÷í¤{y{y¤ãÙ í¤¹
¤
÷ãí¤{}¡¤ãG í¤9Ѥ
ã÷ãí¤{}¡¤ãG í¤9u¤
ã÷ãí¤{ÿ¤ãG yí¤ßu¤
ã÷ãí¤{ÿ¤c£ í¤9ϤÏ÷í¤{ñ¤aý í¤
c¤
ã÷ãí¤û3ó¯¤ok í¤
Ó¤M÷ãí¤û3s¤o í¤ÝÁ¤M÷ãí¤û3õ±¤ï¥ yí¤qÁ¤M÷í¤û3uç¤ï¥ í¤Ý¤M÷ãí¤ß] í¤!#÷ãí¤ß] yí¤/}÷ãí¤Ñ í¤/}÷ãí¤Ñ yí¤/÷í¤QI í¤¯7÷ãí¤Ó yí¤¥÷ãí¤SÝ í¤¥÷ãí¤SÝ yí¤+Y÷í¤Um í¤«÷ãí¤×IqC¤uç í¤=¤)7÷ãí¤Wó¯¤s 3í¤=
Ó¤§÷ãí¤éÛó¯¤qC í¤
Ó¤_S¤)÷ãí¤yWw-¤ù!ó¯¤ñù í¤å
Ó¤©¤
ã÷ãí¤{u¤û3s ¤1 í¤Ow¤Me¤
ã÷ãí¤{ysU¤{ys ¤1 í¤O
Ó¤Ñw¤;÷ãí¤ýEs ¤}¡ó¯¤1 í¤«
Ó¤u
Ó¤Ñ÷ãí¤}¡ó¯¤ÿó¯¤ÿ× ûí¤«÷¤«=¤u÷ãí¤ÿ×ñù¤ÿ×sU¤ÿ í¤u+¤«¤«÷í¤1ñù¤ÿu¤ÿ í¤ue¤u¤O÷ãí¤ññ¤ÿuç¤}¡ yí¤QS¤;¤c÷ãí¤ññg¤ýEw-¤}/ í¤oå¤=÷ãí¤qCñù¤a í¤á¤=÷í¤s ñ¤aý í¤
c¤w÷ãí¤s ñ¤aý yí¤
c¤÷ãí¤sÕñ¤aý yí¤9c¤Á¤S÷ãí¤w-ñg¤õ±ñ¤ãG í¤
c¤e=¤e÷ãí¤õ±qC¤uçñ¤ãG í¤9c¤=¤ÁwMí¤sUgǤãG í¤9»¤+÷ãí¤sy¤ãG ûí¤]õ¤
Ó÷ãí¤ó¯y¤c#ÿ5í¤O¤¹Ï¤
Q÷ãí¤ó/û³¤c#1¤u í¤eO¤]Ϥ
Q÷ãí¤qC{¤å1¤õ± í¤ÁO¤ó
ã¤=÷í¤ó¯{û¤c£ñ¤s í¤
Óc¤s
¤
Ó÷ãí¤ó¯{û¤å
ñ¤ó¯ í¤=å¤;¤=÷ãí¤qCýE¤åiñ¤qC í¤=å¤óѤ
Ó÷ãí¤qCÿ¤åiñ¤ñù í¤å¤u¤=÷ãí¤qCÿפåiñ¤ñ í¤åc¤«¤=÷í¤qC1¤åiñ¤ñ í¤Oc¤ÍO¤=÷ãí¤qC1¤eµñ¤1 í¤«¤¤÷ãí¤ñùñg¤e5ñ¤ÿ× í¤«å¤K¤÷í¤ñgó/¤åiñg¤ÿ× í¤«¤
Q¤÷ãí¤ñgó/¤eµñg¤ÿ í¤u¤Í÷¤å÷ãí¤ñs¤eµñg¤ÿ í¤Q=¤+¤å÷ãí¤ñsU¤åiqC¤}/ í¤Q=¤Á¤c÷ãí¤ñu¤åó¯¤}¡ í¤Ñ
Ó¤óe¤c÷ãí¤³uç¤åió¯¤ýE yí¤;¤]S¤«÷ãí¤ÿ×w-¤c#sÕ¤ýE í¤;¤Á¤ ?÷ãí¤÷Aõ±¤ësÕ¤ýE yí¤½Á¤¤Á÷ãí¤sU÷A¤ëu¤{y í¤
e¤ ?¤+÷ãí¤Õ'uç¤{y í¤
¤+Y÷ãí¤SÝw¤{y í¤¹µ÷ãí¤GÍ ûí¤'é÷ãí¤Ù© yí¤§!÷ãí¤Û» í¤%Ç÷í¤[ yí¤¥÷ãí¤[á ûí¤£i÷ãí¤] yí¤£÷ãí¤]ó í¤£÷í¤_¹{y¤õ± í¤Á
¤¡Ù÷ãí¤ÑýE¤s í¤
ÓѤ/}÷ãí¤Ñ}¡¤ó¯ í¤=Ѥ¯7
ã¤Á÷ãí¤ó¯}¡¤Ó}/¤ñù1¤qC í¤=O¤Q¤-kѤ
Ó÷ãí¤ñùÿפÓñ}¡¤ñgñù¤ÿ í¤u¤Ñ¤-«¤÷ãí¤ñ³¤Óñÿ¤ñs¤{ í¤©Á¤Ou¤%¤O÷ãí¤1ñù¤S[ÿ¤1õ±¤yéq£í¤e
Ó¤_ ¿¤«u¤+Û=¤Ï÷ãí¤³qC¤Õ§ÿ¤ÿ×÷äù!ó¯¤u3í¤+w¤'u¤+Û÷¤«÷í¤ÿu¤Õ'ÿ¤mYsU¤qC í¤=+¤'÷¤+Ye¤u÷í¤ÿ÷äSÝÿפmÙõ±¤ñ í¤cÁ¤¹«¤¥ ¿¤÷÷ãí¤Ù©1¤mYõ?¤ñóií¤eÁ¤«e¤'夹3¤e÷ãí¤uç{¤GMñ¤mYu¤ÿ×õ±¤u3í¤+e¤¯·¤½W
¤+÷ãí¤ó¯ÿפAó¯¤Ño÷äqC í¤= ¿¤/
Ó¤¿ù«¤
Óí¤sÕñù¤ñùñù¤Ï÷õ±¤GÍ í¤¹µÁ¤1¤¤÷ãí¤s qC¤1s¤Íå÷Ã¤Ç í¢UoMùÁ¤Ñ¤
Q÷ãí¤ó/sÕ¤}¡õ±¢ çÏÝ í¢×ËO±¤;e¤÷ãí¤ñùu¤ýEuç¢ çÁ3í¢ÿA
sí¤w-qâ e³Aí í¢ÿÃÁ¿¤SwMí¤sUõ?¢ e³C¥uç¤swWí¢uùÅ÷ãí¢ í{Åýwéí¢uùEÑ÷ãí¢ k5ǵ í¢uùG©÷ãí¢ ëëÙ í¢uùÙá÷í¢ ëëYÅ í¢uÛ¹÷ãí¢ ëë[Åí¢uù[ñ÷ãí¢ íÝÕÅí¢uù]É÷ãí¢ mÇß í¢w¯_)í¢ k5_å í¢UÑÙwMí¢ i#Q½wéí¢gÓ÷ãí¢ W}ÓõwWí¢ÃSé÷ãí¢ ×·ÕÍ3í¢ÃU¡÷ãí¢ ×·×
í¢Ã×ù÷ãí¢ ×·WÝ í¢Ãé±÷ãí¢ W}i/í¢ë÷ãí¢ ëëíí¢UkÁ÷ãí¢ í{í¥í¢uùmsuí¢ yUmý í¢sïÑå¯í¢moµ÷A¤åipdpÔ © [ [` |
653ecd2daa67f36c0f2f5ca0fcc459daec83c61c | 492e6a532c132cc616b9419d280147699f3a8412 | /man/rowCoxTests.Rd | 524ae6a9056e36d813716d882221cefc1330ee36 | [] | no_license | zhangyuqing/simulatorZ | 161b61029e7ef46af650ee7db650cc88a254692b | 019e59514d56caebcde6affa34f9caee224518a1 | refs/heads/master | 2021-06-05T00:38:59.760802 | 2020-10-18T18:22:46 | 2020-10-18T18:22:46 | 22,471,700 | 4 | 3 | null | 2014-09-14T02:56:57 | 2014-07-31T14:14:52 | R | UTF-8 | R | false | false | 1,681 | rd | rowCoxTests.Rd | \name{rowCoxTests}
\alias{rowCoxTests}
\title{rowCoxTests}
\description{method for performing Cox regression}
\usage{rowCoxTests(X, y, option = c("fast", "slow"), ...)}
\arguments{
\item{X}{Gene expression data. The following formats are available:
matrix Rows correspond to observations, columns to variables.
data.frame Rows correspond to observations, columns to variables.
ExpressionSet rowCoxTests will extract the expressions using exprs().}
\item{y}{Survival Response, an object of class:
Surv if X is of type data.frame or matrix
character if X is of type ExpressionSet.
In this case y is the name of the survival
response in the phenoData of X. If survival
time and indicator are stored separately
in the phenoData one can specify a two-element
character vector the first element representing
the survival time variable.}
\item{option}{"fast" loops over rows in C, "slow" calls coxph
directly in R. The latter method may be used if
something goes wrong with the "fast" method.}
\item{\dots}{currently unused}
}
\value{dataframe with two columns: coef = Cox regression
coefficients, p.value =
Wald Test p-values. Rows correspond to the rows of X.}
\author{Yuqing Zhang, Christoph Bernau, Levi Waldron}
\examples{
#test
##regressor-matrix (gene expressions)
X<-matrix(rnorm(1e6),nrow=10000)
#seed
set.seed(123)
#times
time<-rnorm(n=ncol(X),mean=100)
#censoring(1->death)
status<-rbinom(n=ncol(X),size=1, prob=0.8)
##survival object
y<-Surv(time,status)
## Do 10,000 Cox regressions:
system.time(output <- rowCoxTests(X=X,y=y, option="fast"))
}
|
5a4512d36de5340f2208255de4ad4a18022f1030 | a5ea9d5ec0d70bfa722cfd5e49ce08119e339dda | /man/grasp.pred.export.Rd | b7c9b6fcf2df4d638d9c693b0a41ba558d3507d1 | [] | no_license | cran/grasp | c46f16a28babb6cbed65aadbe2ddecc1a7214fd2 | d57d11504ee99616e55a1a9c49e337cf1caf139d | refs/heads/master | 2021-01-23T16:35:38.670044 | 2008-10-10T00:00:00 | 2008-10-10T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 684 | rd | grasp.pred.export.Rd | \name{grasp.pred.export}
\alias{grasp.pred.export}
\title{ Internal GRASP function }
\description{
This function will export the predictions made in grasp.pred() and stored in gr.predmat into an ASCII file ready to be read by Import Grid in ArcView or ArcGIS.
}
\usage{
grasp.pred.export(gr.Yi)
}
\arguments{
\item{gr.Yi}{A vector containing the selected responses}
}
\details{
The resolution of the exported GRID can be set on page R of the GUI. When observations are merging in a new cell a mean prediction is calculated (agglomeration).
}
\author{ Anthony.Lehmann@unige.ch }
\seealso{ grasp \code{\link{grasp}}, grasp.in \code{\link{grasp.in}}}
\keyword{models} |
c1e4da6ba01f0cbbfec40ea0d670f1ac8a2f195a | 29585dff702209dd446c0ab52ceea046c58e384e | /EcoGenetics/R/eco.2geneland.R | 7c37d78e9988f5b2b5119763d6ddac3588c2eec2 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,474 | r | eco.2geneland.R | #' Creating input data for Geneland with an ecogen object
#'
#' @description This function creates four data frames in the working
#' directory (XY.txt, NAMES.txt, P.txt, G.txt) which can be loaded
#' in Geneland.
#' @param eco Object of class "ecogen".
#' @param ncod Number of digits coding each allele
#' (e.g., 1: x, 2: xx, 3: xxx, etc.).
#' @param ploidy Ploidy of the data.
#' @return XY.txt Matrix with coordinates.
#' @return NAMES.txt Matrix with row names.
#' @return P.txt Matrix with phenotypic data.
#' @return G.txt Matrix with genotypic data.
#' @examples
#'
#' \dontrun{
#'
#' data(eco.test)
#' eco.2geneland(eco, 1)
#'
#' }
#'
#' @author Leandro Roser \email{leandroroser@@ege.fcen.uba.ar}
#' @export
setGeneric("eco.2geneland",
function(eco, ncod = NULL, ploidy = 2) {
write.table(eco@XY, "XY.txt", quote = FALSE,
row.names = FALSE, col.names = FALSE)
write.table(rownames(eco@XY), "NAMES.txt", quote = FALSE,
row.names = FALSE, col.names = FALSE)
write.table(eco@P,"P.txt", quote = FALSE, row.names = FALSE,
col.names = FALSE)
write.table(int.loc2al(eco@G, ncod = ncod, ploidy = ploidy), "G.txt",
quote = FALSE, row.names = FALSE, col.names = FALSE)
return("done!")
})
|
50b23cd7a287a017daccde0bb4261ad50e2a1e9b | 03d77a50c862638cae0a60fcd0eb4e415cabc933 | /Fish 558 Workshop/Workshop Day 3/ex3 Class.R | 11c6c7baff6a3dacd832e02a870eb14f64f5a3c5 | [] | no_license | DanOvando/FISH-558 | 66ef5135449edddf1d025aefec0db3c1a55be4e0 | fd889bdae6425f06991f5019f5caf27f50b82ec0 | refs/heads/master | 2021-01-02T09:02:00.004504 | 2015-12-19T01:27:55 | 2015-12-19T01:27:55 | 42,471,204 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,180 | r | ex3 Class.R | # Ex3 <- function()
#
# # call for the logistic model
set.seed(443)
SchModel <- DoSir(Nout=100,Model='Schaefer')
head(SchModel)
quartz()
ggplot(SchModel,aes(K,r)) + geom_hex()
ggplot(SchModel,aes(x = K,y = r)) + geom_point(aes(fill = NegLogLike), shape = 21, size = 2) + scale_fill_gradient(low = 'green',high = 'red')
ggplot(SchModel,aes(NegLogLike)) + geom_histogram()
# call for the exponential model
ExpoModel <- DoSir(Nout=100,Model= 'ExponModel')
# =================================================================================
DoSir <- function(Nout=1000,Model)
{
# Read in the basic data
TheData <- ReadData()
Yr1 <- TheData$CatchYr[1]
Catch <- TheData$CatchVal
SurveyEst <- TheData$SurveyEst
SurveyCV <- TheData$SurveyCV
SurveyYr <- TheData$SurveyYr
Nyears <- length(Catch)
years <- TheData$CatchYr
# Storage for the parameters and the final depletion
Vals <- as.data.frame(matrix(0,ncol=5,nrow=Nout))
colnames(Vals) <- c('K','r','Pop1965','AddCV','NegLogLike')
# Storage for the total likelihood encountered in the first stage sampling
# and the number of draws
AveLike <- 0
Ntest <- 0
# Reset parameters for SIR
Threshold <- exp(0)
Cumu <- 0
Ndone <- 0
while (Ndone < Nout)
{
# Generate from priors
r <- runif(1,0,.15)
Pop1965 <- runif(1,10000,15000)
AddCV <- runif(1,.1,.2)
K <- runif(1,20000,50000)
# Call the population model
Pop <- PopModel(Catch = Catch,r = r,K = K,years = years,InitPop = Pop1965,ExponModel = Model)
survey <- data.frame(TheData$SurveyYr,TheData$SurveyEst,TheData$SurveyCV)
colnames(survey) <- c('year','SurveyEst','SurveyCV')
Pop <- join(Pop,survey, by = 'year')
# ggplot(Pop,aes(year,n)) + geom_point() + geom_line(aes(year,SurveyEst))
#
#write the pop model
# Compute the negative log-likelihood and hence the likelihood
NegLogLike <- Likelihood(Pop = Pop$n,SurveyYr-Yr1+1,SurveyEst,SurveyCV,AddCV)
TheLike <- exp(-1*NegLogLike-32.19)
# Determine if a parameter vector is to be saved
Cumu <- Cumu + TheLike
AveLike <- AveLike + TheLike
Ntest <- Ntest +1
while (Cumu > Threshold & Ndone < Nout)
{
Ndone <- Ndone + 1
Cumu <- Cumu - Threshold
Vals[Ndone,] <- data.frame(K,r,Pop1965,AddCV,NegLogLike)
}
}
Vals$AveLike <- AveLike/Ntest
return(Vals)
}
# =================================================================================
Likelihood <- function(Pop,SurveyYr,SurveyEst,SurveyCV,AddCV)
{
# Account for the additional CV
UseCV <- sqrt(SurveyCV^2+AddCV^2)
# Extract the predictions corresponding to the observations and compute the negatuve log-likelihood
Preds <- Pop[SurveyYr]
Residuals <- log(UseCV)+0.5*(log(Preds)-log(SurveyEst))^2/UseCV^2
LogLike <- sum(Residuals)
}
# =================================================================================
PopModel <- function(Catch,r,K,years,InitPop,ExponModel)
{
time <- length(years)
output <- as.data.frame(matrix(NA,nrow = (time),ncol = 3))
colnames(output) <- c('year','catch','n')
output$catch <- Catch
output$n[1] <- InitPop
output$year <- years
if (ExponModel == 'ExponModel')
{
for (t in 2:time)
{
output$n[t] <- pmax(1e-5,(1+r)*output$n[t-1] - output$catch[t-1])
}
}
if (ExponModel == 'Schaefer')
{
for (t in 2:time)
{
output$n[t] <- pmax(1e-5,output$n[t-1] + (output$n[t-1]*r)*(1-output$n[t-1]/K) - output$catch[t-1])
}
}
return(output)
}
# =================================================================================
ReadData <- function()
{
TheData1 <- read.csv(paste('Fish 558 Workshop/',lecture,'/Ex3a.csv', sep = ''),header=TRUE, stringsAsFactors = F)
TheData2 <- read.csv(paste('Fish 558 Workshop/',lecture,'/Ex3b.csv', sep = ''),header=TRUE, stringsAsFactors = F)
Outs <- NULL
Outs$SurveyYr <- TheData1[,1]
Outs$SurveyEst <- TheData1[,2]
Outs$SurveyCV <- TheData1[,3]
Outs$CatchYr <- TheData2[,1]
Outs$CatchVal <- TheData2[,2]
return(Outs)
}
# =================================================================================
Ex3()
|
cdc09ddd67bcaf42fdde26beb8f27dc6fa7782c0 | 3b5479d2035b0955de9e9240d65c65ffb560a131 | /3_analysisi/frequency_table/plot.R | f5ae717cb92b843c8dd4bbf488e03b5650d4d840 | [] | no_license | elara7/Application-of-Topic-Model-in-Evolution-of-Financial-Texts | b7106c5f69f2a125865ab3f6ae2517c43f9af29e | 49452f89f3a4c4ca75cfffd40ab9cc8bab2fd69f | refs/heads/master | 2020-03-17T03:48:02.274349 | 2018-05-13T08:29:26 | 2018-05-13T08:29:26 | 133,250,919 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,236 | r | plot.R | require(stringr)
require(data.table)
require(wordcloud2)
require(htmlwidgets)
file_path = 'C:\\Elara\\Documents\\paper\\3_analysisi\\frequency_table\\'
pic_path = 'C:\\Elara\\Documents\\paper\\3_analysisi\\frequency_table\\pic\\'
all_file = str_c(file_path,'merged_frequency_all.csv')
all_data = fread(all_file,encoding = 'UTF-8')
names(all_data) <- c('word','cnt')
# 总体
plot(all_data$cnt[1:100]/1000,type='o',ylab='词频(千次)',xlab='词编号(按词频降序排列)')
al = wordcloud2(all_data[1:100,],minRotation = 0, maxRotation = 0)
saveWidget(al,"1.html",selfcontained = F)
webshot::webshot("1.html",str_c(pic_path,"0.png"),vwidth = 1400, vheight = 900, delay =10)
stockfiles = dir(str_c(file_path,'stocks\\'))
write.csv(as.data.frame(stockfiles),'C:\\Elara\\Documents\\paper\\3_analysisi\\frequency_table\\pic\\name.csv')
st <- NULL
for (n in 1:length(stockfiles))
{
stock_file = str_c(file_path,'stocks\\',stockfiles[n])
stock_data = readr::read_csv(stock_file,col_names = c('word','cnt'))
st <- wordcloud2(stock_data[1:100,],minRotation = 0, maxRotation = 0)
saveWidget(st,"1.html",selfcontained = F)
webshot::webshot("1.html",str_c(pic_path,n,".png"),vwidth = 1400, vheight = 900, delay =10)
}
|
3e2875c53e96261a78a0cac81ddac124d528a727 | 3fee6d185198ef39917b4fa30c643a4ecee4df15 | /standings3pt.r | ad005f0f8a3d52c1db80de6ef97d25e1018ad95a | [
"Apache-2.0"
] | permissive | zzuum/3ptNHLstandings | f67216ce3f18b23a782515dd32b2453b8c8acf49 | 4162dad354d273e6e1ab3831ccdc6e582d74eaa0 | refs/heads/master | 2020-03-09T08:55:56.144037 | 2018-04-09T04:01:29 | 2018-04-09T04:01:29 | 128,700,269 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,950 | r | standings3pt.r | setwd('~/Projects/NHL/')
standings.all <- read.csv('2018standings.csv')
# Getting overtime wins and losses
standings.all$OTW <- sapply(strsplit(
as.character(standings.all$Overtime), '-'),`[`, 1)
standings.all$OTL <- sapply(strsplit(as.character(
standings.all$Overtime), '-'),`[`, 2)
# Getting shootout wins and losses
standings.all$SOW <- sapply(strsplit(as.character(
standings.all$Shootout), '-'),`[`, 1)
standings.all$SOL <- sapply(strsplit(as.character(
standings.all$Shootout), '-'),`[`, 2)
# Regulation wins
standings.all$RW <- sapply(strsplit(as.character(
standings.all$Overall), '-'),`[`, 1)
# Converting characters to numerics
standings.all$RW <- as.numeric(standings.all$RW)
standings.all$OTW <- as.numeric(standings.all$OTW)
standings.all$SOW <- as.numeric(standings.all$SOW)
standings.all$OTL <- as.numeric(standings.all$OTL)
standings.all$SOL <- as.numeric(standings.all$SOL)
# Correct regulation wins
standings.all$RW <- standings.all$RW - standings.all$OTW - standings.all$SOW
# Calculations! 3 points for regulation wins, 2 points for OT win, 2 points for
# SO win, 1 point for OT loss, 1 point for SO loss
# Also, ROW are used for points tie breakers
standings.all$ROW <- standings.all$RW + standings.all$OTW
standings.all$points <- 3 * standings.all$RW + 2 * standings.all$OTW +
2 * standings.all$SOW + 1 * standings.all$OTL + 1 * standings.all$SOL
# Ordering the dataframe by points and ROW
standings.all <- standings.all[with(standings.all, order(
-points, -ROW
)), ]
# Looking at standings team by team
colnames(standings.all)[2] <- 'Team'
print('Pacific Standings:')
standings.all[standings.all$Division == 'P', c('X', 'points')]
print('Central Standings:')
standings.all[standings.all$Division == 'C', c('X', 'points')]
print('Metro Standings:')
standings.all[standings.all$Division == 'M', c('X', 'points')]
print('Atlantic Standings:')
standings.all[standings.all$Division == 'A', c('X', 'points')]
|
492d8a7f671f97f77c66c0371eb6b39704b18661 | ffb4618297e98c856e7f3c51e64b3481455ab3bf | /Brier_analysis.R | 00fd84079d1d767bf730b950cc0dbec4d162b3ce | [] | no_license | tristinb/nfl-reporter-predictions | 3b62005c1ed34c72897319bc90fd385cfda35350 | f05d5f64a965ef343aea8ecb2c4be36ece6174dd | refs/heads/master | 2021-05-12T07:33:53.173942 | 2018-01-12T14:29:49 | 2018-01-12T14:29:49 | 117,249,111 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,143 | r | Brier_analysis.R |
#set wd
data <- read.csv('pundit_data.csv', header=TRUE)
# Remove intercept; just want averages
mod <- lm(squared_error~ -1+team, data= data)
results <- coef(summary(mod))[c(1:32),] # 32 teams
names <- row.names((coef(summary(mod)))) # Get names
row.names(results) <- substring(names[1:32], 5) # Remove first 4 chars
results <- results[,1] # Only care about coefs
# Order results from lowest to highest
results <- results[order(results)]
pdf('Brier.pdf') # Save image
barplot(results,las=2, ylab='Brier Score', main='Brier Score for Each Team Reporter \n (Lower Scores Indicate Better Predictions)')
dev.off()
# More complex model (control for wins, week)
mod_comp <- lm(squared_error~ -1+team+tot_wins+as.factor(week), data= data)
# Check residuals for normality
mod_stnd <- rstandard(mod_comp)
hist(mod_stnd)
qqnorm(mod_stnd)
qqline(mod_stnd)
results_comp <- coef(summary(mod_comp))[1:32,]
names_comp <- row.names((coef(summary(mod_comp))))
row.names(results_comp) <- substring(names_comp[1:32], 5)
results_comp <- results_comp[,1]
results_comp <- results_comp[order(results_comp)]
pdf('adjusted_Brier.pdf')
barplot(results_comp,las=2, ylab='Adjusted Brier Score', main='Adjusted Brier Score for Each Team Reporter')
dev.off()
anova(mod,mod_comp,test='F')
# Put table together
data2 <- read.csv('pundit_standings.csv',header=T)
data2 <- data2[order(data2$pundit_team),] # Alphabetical order by team
# Get results of comp in alphabetical order
mod_comp <- lm(squared_error~ -1+team+tot_wins+as.factor(week), data= data)
results_comp <- coef(summary(mod_comp))[1:32,]
names_comp <- row.names((coef(summary(mod_comp))))
row.names(results_comp) <- substring(names_comp[1:32], 5)
results_comp <- results_comp[,1]
data2 <- cbind(data2,results_comp) # Now combine (both in alph order)
data2 <- data2[order(data2$results_comp),] # Order by adj_brier score
data2$standing_adj <- c(1:32) #ranking 1 through 32
data_sub <- data2[,c(11,2,1,3,5,10)] # Reorder
colnames(data_sub) <- c('Standing','Team','Reporter','Average Spread','Brier Score','Adjusted Brier Score')
library(xtable)
print(xtable(data_sub,digits=3),include.rownames=FALSE)
|
1c46a68f30acfed3442bf9cd273860efa63df83b | 9db8386eb77a7ce2d3ddfd18c6ad3bab2240b96e | /man/Compare_Plot_Function.Rd | 883f6d50ea67a78ed01f0c7d9d4abe2c756f892a | [] | no_license | azavez/azavezHW6 | 447cfae0851c6c5f828e46a225659ca6e3f8a065 | 9583de0b55eae0ad939755b4da13bfd97cf5d663 | refs/heads/master | 2020-06-17T14:28:10.255877 | 2016-12-16T17:40:46 | 2016-12-16T17:40:46 | 74,995,204 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,905 | rd | Compare_Plot_Function.Rd | \name{compare.plot}
\alias{compare.plot}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Comparison Barplot Function
}
\description{
This function is specifically designed for the URMC fitness dataset. It compares the averages for one group to the averages in a second group.
}
\usage{
compare.plot(x, compare_column, group1, group2, group1.label = "Group 1", group2.label = "Group 2", ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
The data frame containting the data}
\item{compare_column}{
The column of the data frame that contains the data that will be compared.
}
\item{group1}{
The first comparison group. It should be in the form of a list of character strings.
}
\item{group2}{
The second comparison group. It should be in the form of a list of character strings.
}
\item{group1.label}{
Legend text for first comparison group. The default is "Group 1".
}
\item{group2.label}{
Legend text for second comparison group. The default is "Group 2".
}
\item{...}{
This function takes additional arguments for barplot function.
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
Alexis Zavez
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
data(sample.data)
compare.plot(sample.data, sample.data$Day, group1 = "Wednesday", group2 = "Monday")
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
3e922f0e722d523bcd695e7cfa90c33300fc4f3b | e2ca393f7aec100bd648505925cb2e9d6c725473 | /01_plotly.R | 5fcecbdb92b9f4ff1aa039e51e94f165de023e67 | [] | no_license | ComunidadBioInfo/minicurso_mayo_2021 | 9a067f2b6beb0d73febb80796ddf30a23184a264 | b7305b56636bad0a147b4147981f81ccd47c4818 | refs/heads/main | 2023-05-31T00:23:34.618062 | 2021-06-01T00:14:08 | 2021-06-01T00:14:08 | 328,855,753 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 790 | r | 01_plotly.R | ## ----"cargar_librerias_plotly"-----------------------------------------------------------------------------------------------------
## Carguemos las librerías que vamos a usar para esta parte del mini curso
library("palmerpenguins")
library("ggplot2")
library("plotly")
## ----"plotly_ejemplo"--------------------------------------------------------------------------------------------------------------
## De ?plotly::ggplotly
ggpenguins <- qplot(bill_length_mm,
body_mass_g,
data = palmerpenguins::penguins,
color = species
)
## Versión estática creada con ggplot2
ggpenguins
## ----"plotly_ejemplo_parte2"-------------------------------------------------------------------------------------------------------
## Ahora en versión interactiva
ggplotly(ggpenguins)
|
3402bce3d0a1ed72b9fa0d6e6dad1642587de8b9 | a82ebc7c1dcc3eb671542f10645ab3d457853565 | /r_modular/classifier_libsvm_modular.R | 7e752f314b8676b16818536783a68f086ed1cefe | [] | no_license | joobog/shogun-eval | dac24f629744521760061c7979aa579129daa666 | 12b1ba2a67d5c661c6a11580634fb1a036e61af2 | refs/heads/master | 2021-03-12T23:24:41.686252 | 2016-11-23T10:04:04 | 2016-11-23T10:04:04 | 31,391,835 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,399 | r | classifier_libsvm_modular.R | # In this example a two-class support vector machine classifier is trained on a
# toy data set and the trained classifier is used to predict labels of test
# examples. As training algorithm the LIBSVM solver is used with SVM
# regularization parameter C=1 and a Gaussian kernel of width 2.1 and the
# precision parameter epsilon=1e-5. The example also shows how to retrieve the
# support vectors from the train SVM model.
#
# For more details on LIBSVM solver see http://www.csie.ntu.edu.tw/~cjlin/libsvm/
library(shogun)
fm_train_real <- t(as.matrix(read.table('../data/fm_train_real.dat')))
fm_test_real <- t(as.matrix(read.table('../data/fm_test_real.dat')))
label_train_twoclass <- as.double(read.table('../data/label_train_twoclass.dat')$V1)
# libsvm
print('LibSVM')
feats_train <- RealFeatures()
dummy <- feats_train$set_feature_matrix(fm_train_real)
feats_test <- RealFeatures()
dummy <- feats_test$set_feature_matrix(fm_test_real)
width <- 2.1
kernel <- GaussianKernel(feats_train, feats_train, width)
C <- 1.017
epsilon <- 1e-5
num_threads <- as.integer(2)
labels <- BinaryLabels()
print(label_train_twoclass)
dump <- labels$set_labels(label_train_twoclass)
svm <- LibSVM(C, kernel, labels)
dump <- svm$set_epsilon(epsilon)
dump <- svm$parallel$set_num_threads(num_threads)
dump <- svm$train()
dump <- kernel$init(feats_train, feats_test)
lab <- svm$apply()
out <- lab$get_labels()
|
f3fbf1cab89ac290642acaf385f5b45bf93c37c7 | cb7eaa28fbe6e970fe6d564d2d53bc8d8a333568 | /Rcode_func.spec.R | 13297d716fa500745f0e58e2b30eecbae2ab8b6c | [] | no_license | MarlenF/repertoire-orang | 7531a606e8c9cae7ffe94b4071b31e63a1cc7b6e | 500d59072156fdb0c43dd961623085072c586510 | refs/heads/main | 2023-04-11T20:27:22.190379 | 2021-05-05T10:12:18 | 2021-05-05T10:12:18 | 320,832,060 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 4,912 | r | Rcode_func.spec.R | ## R code for analysis of functional specificity (3)
rm(list=ls())
setwd("C:/Users/Marlen Fröhlich/Documents/R")
fun <- read.table ("dataset_func.spec.csv", header=TRUE, sep=",", stringsAsFactors=TRUE)
xx=as.data.frame(na.omit(fun[, c("species","signal", "setting","dominance_con", "max_cases", "context", "no_cases", "no_subjects")]))
library(arm)
library(car)
test.data=xx
test.data$z.subjects=as.vector(scale(test.data$no_subjects))
test.data$z.sample=as.vector(scale(test.data$no_cases))
test.data$context_pl=as.numeric(test.data$context==levels(test.data$context)[4])
contr=glmerControl(optimizer="bobyqa", optCtrl=list(maxfun=10000000))
dom_bin = cbind(test.data$max_cases, test.data$no_cases-test.data$max_cases)
contr=glmerControl(optimizer="bobyqa", optCtrl=list(maxfun=10000000))
# collinearity: max vif = 2.7
vif(lm(rnorm(nrow(test.data)) ~ species + setting + log(no_cases) + log(no_subjects) , data = test.data))
#run the full model
mod.fun = glmer(formula = dom_bin ~ species * setting + context_pl + z.subjects + z.sample +
+(1|signal), family =binomial, control=contr,
data = test.data)
#run the null model
null.fun = glmer(formula = dom_bin ~ context_pl + z.subjects + z.sample +
+(1|signal), family =binomial, control=contr,
data = test.data)
length(residuals(mod.fun)) #128
length(residuals(null.fun)) #128
#Likelihood ratio test
as.data.frame(anova(null.fun, mod.fun, test="Chisq"))
# get chi square and p-values
drop1(mod.fun, test ="Chisq")
#post hoc sidak test of interaction effect
require(lsmeans)
lsm <- lsmeans(mod.fun, list(pairwise ~ setting|species, pairwise ~ species|setting))
lsm
#get estimates and SE
round(summary(mod.fun)$coefficients, 3)
####################3
# dummycoding for plot
context_pl.c= test.data$context_pl- mean(test.data$context_pl)
######################################################
path <- "C:/Users/Marlen Fröhlich/Documents/R/"
#plot model
plot.fun = glmer(formula = dom_bin ~ species * setting + context_pl.c + z.subjects + z.sample +
+(1|signal), family =binomial, control=contr,
data = test.data)
setwd("C:/Users/Marlen Fröhlich/Documents/R/roger/")
source("local_jitter_AlexHausmann.R")
require(effects)
test.data$XPos <- ifelse(test.data$species=="Bor",1,2)
EF <- Effect(c("species","setting"),plot.fun,se=TRUE)
dat.EF <- as.data.frame(EF)
# Add colour column
test.data$colourBG <- ifelse(test.data$setting=="wild",rgb(255, 210, 128, maxColorValue=255),rgb(128, 128, 255, maxColorValue=255))
test.data$colourL <- ifelse(test.data$setting=="wild",rgb(255, 192, 77, maxColorValue=255),rgb(77, 77, 255, maxColorValue=255))
# Open empty plot (IMPORTANT: THE PLOT HAS TO BE OPEN BEFORE RUNNING THE FUNCTION)
path <- "C:\\Users\\Marlen Fröhlich\\Documents\\R\\"
svg(filename=paste0(path,"FunSpecN2.svg",sep=""), height=90/25.4, width=90/25.4, family="Arial", pointsize=9)
OF <- 0.1
par(mar=c(2.7, 3.2, 0.2, 0.2), mgp=c(1.3, 0.2, 0), tcl=-0.25, cex=1)
plot(c(0.5,2.5),c(0.35,1), type="n", axes=FALSE, xlab="Orang-utan species", ylab="") ; par(new=TRUE)
plot(c(0.5,2.5),c(0.35,1), type="n", axes=FALSE, xlab="", ylab="Functional specificity", mgp=c(2.2, 0.2, 0))
X0 <-local_jitter(fact_coord = test.data$XPos, gradual_coord = test.data$dominance_con, categories = as.character(test.data$setting), factorial_axis = 1, buffer = 0.45, sizes = sqrt(test.data$no_cases)/4, verbose=F, iterations=1000)
points(X0,test.data$dominance_con,cex=sqrt(test.data$no_cases)/4, pch=21, bg=test.data$colourBG, col=test.data$colourL)
arrows(1-OF,dat.EF$lower[1],1-OF,dat.EF$upper[1],code=3,length=0.1,angle=90)
points(x=1-OF,y=dat.EF$fit[1], pch=23, col="black", bg="blue", cex=3)
arrows(1+OF,dat.EF$lower[3],1+OF,dat.EF$upper[3],code=3,length=0.1,angle=90)
points(x=1+OF,y=dat.EF$fit[3], pch=23, col="black", bg="orange", cex=3)
arrows(2-OF,dat.EF$lower[2],2-OF,dat.EF$upper[2],code=3,length=0.1,angle=90)
points(x=2-OF,y=dat.EF$fit[2], pch=23, col="black", bg="blue", cex=3)
arrows(2+OF,dat.EF$lower[4],2+OF,dat.EF$upper[4],code=3,length=0.1,angle=90)
points(x=2+OF,y=dat.EF$fit[4], pch=23, col="black", bg="orange", cex=3)
axis(1,at=c(1,2), label=c("Bornean","Sumatran"), tcl=-0.25)
axis(2,at=seq(0,1,by=0.2), label=c("0.0","0.2","0.4","0.6","0.8","1.0"), tcl=-0.25, las=2, mgp=c(1.2, 0.4, 0))
legend("topright", pt.bg=c("blue","orange"), pch=23, legend=c("captive","wild"), bty="n", pt.cex=2)
box()
dev.off()
#border=c(rgb(77, 77, 255, maxColorValue=255),rgb(255, 192, 77, maxColorValue=255)), fill=c(rgb(128, 128, 255, maxColorValue=255), rgb(255, 210, 128, maxColorValue=255))
domrep=aggregate(x=test.data$dominance_con, by=test.data[, c("species", "setting")], FUN=mean)
domrep
|
ff12232e2b384c642f559b19aa682da8d8ec57d0 | 456f00cd7e3d19f5be19313b4c1d0bd35b80d626 | /plants.R | 6d588ac9919b8867adfd0aa597d3e7a26470f17c | [] | no_license | pearselab/r-world-aspri951 | af47fb158149b55d059c0ba85ce2858637778c42 | 4988abe28d5c9d3835eebfb56a1e152264827466 | refs/heads/master | 2020-04-06T04:28:53.809611 | 2017-03-17T22:56:51 | 2017-03-17T22:56:51 | 72,656,588 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 34,501 | r | plants.R | #plants
#setup.plants = a function that checks that all the inputs for reproduction, survival, and competition parameters are correct
#checks to see if repro and surv vectors are of the same length as # species in simulation
#checks to see if competition arguemnt is a matrix with dim equal to length of repro/surv
#stop function if conditions aren't met
#how to set names of matrix? Test:
bekkeh <- matrix(1:16, nrow = 4, ncol = 4)
rownames(bekkeh) <- c("You", "want", "sum", "blue?")
colnames(bekkeh) <- c("learnin'", "to jump", "for", "bekkeh")
bekkeh["sum", "for"]
#Cool. Can call an INDEX by NAME. USEFUL.
setup.plants <- function(reproduce, survive, compete.matrix, names = NULL){
if(is.null(names) == TRUE){
names <- letters[1:length(reproduce)]
}
if(length(names) != length(reproduce)){
stop("Each plant must have a name.")
}
if(length(reproduce) != length(survive)){
stop("Reproduction and survival parameters needed for each species.")
}
if(nrow(compete.matrix) != length(reproduce) | ncol(compete.matrix) != length(reproduce)){
stop("Competition matrix must have competition parameters for each pairwise combination of species.")
}
if(any(reproduce) > 1 | any(reproduce) < 0 | any(survive) > 1 | any(survive < 0)){
#what about matrix probabilities? This too?
stop("Reproduction and survival probabilities must be values between zero and one!")
}
reproduction <- setNames(reproduction, names)
survive <- setNames(survive, names)
rownames(compete.matrix) <- names
colnames(compete.matrix) <- names
return(list(reproduce = reproduce, survive = survive, compete.matrix = compete.matrix, names = names))
}
#For the competition matrix, what probability is what? Is it probability ROW will survive, or COL will survive?
#Will need to define
#survive: function that determines whether an individual survives to the next time step
#this function will do something to ONE CELL
#THis function will also need to match probabilities for EACH SPECIES to the species in the cell
#So will need survive vector in function
#for loop... for i in 1: length(names), if cell = name i, then run if-statement about survival
#No, not needed!
#Can check PROPER SPECIES by calling the NAME as the INDEX (see bekkeh["blue?", "for"])
#So if cell <- plant A, then run statement: if(runif(weifj) <= probability plant A survives), put "plant A" in cell
#else, put "" in the cell (blank, NOT NA)
survival <- function(cell, setup.plants){
if(is.na(cell) == TRUE){
cell <- NA
}
if(runif(1) <= setup.plants$survive[cell]){
cell <- cell
} else {
cell <- ""
}
return(cell)
}
#test:
survive <- c(0.5, 0.9, 0.01)
survive <- setNames(survive, c("coin", "super", "sucky"))
blahblah <- list(survive = survive, etc = "blah", blargh = "fatcat")
cell_1 <- "coin"
cell_2 <- "super"
cell_3 <- "sucky"
cell_4 <- NA
survival(cell_1, blahblah)
survival(cell_2, blahblah)
survival(cell_3, blahblah)
survival(cell_4, blahblah)
#problem, not calling survival because list stuff NOT NAMED.
#Need NAMES when MAKING LIST or LIST HAS NO NAMES and CANNOT CALL PARTS OF LIST
#New problem: doesn't work for NA. Says missing value where T/F needed. Probably trying to run BOTH if-statements?
#Try making else-statement to fix this?
survival <- function(cell, setup.plants){
if(is.na(cell) == TRUE){
cell <- NA
} else if(runif(1) <= setup.plants$survive[cell]){
cell <- cell
} else {
cell <- ""
}
return(cell)
}
#Problem solved. Final test:
cell_5 <- ""
survival(cell_5, blahblah)
#Nope, same problem. Tries to run second if-statement and finds the name is NOT in the list of plant names
survival <- function(cell, setup.plants){
if(is.na(cell) == TRUE){
cell <- NA
} else if(cell == ""){
cell <- ""
} else if(is.na(setup.plants$survive[cell]) == TRUE){
stop("You just discovered a new species of plant! Whatever is in this cell shouldn't exist... try again.")
} else if(runif(1) <= setup.plants$survive[cell]){
cell <- cell
} else if(runif(1) > setup.plants$survive[cell]){
cell <- ""
} else {
stop("How have you even done this...")
}
return(cell)
}
#Test:
cell_5 <- ""
cell_6 <- "George"
survival(cell_5, blahblah)
survival(cell_6, blahblah)
#Done!
#plant.timestep: a function that takes a matrix of plants, loops over it and applies "survival" function to each cell
#Will need two for loops, nested: for (i in 1:nrow(matrix)) and then for(j in 1:ncol(matrix)) to hit each cell
#matrix[i, j] <- survival(matrix[i, j], setup.plants)
#want to return new matrix at end
plant.timestep <- function(plant.matrix, setup.plants){
new.plant.matrix <- plant.matrix
for(i in 1:nrow(plant.matrix)){
for(j in 1:ncol(plant.matrix)){
new.plant.matrix[i, j] <- survival(plant.matrix[i,j], setup.plants)
}
}
return(new.plant.matrix)
}
test.plants <- matrix(nrow = 4, ncol = 4, c("coin", "super", "sucky", "coin"))
#a matrix with coin in the first row, super in the second, sucky in the third, coin in the last
survive <- c(0.5, 0.9, 0.01)
survive <- setNames(survive, c("coin", "super", "sucky"))
blahblah <- list(survive = survive, etc = "blah", blargh = "fatcat")
plant.timestep(test.plants, blahblah)
#A problem in survival function: it is possible to get BELOW the p-value on the first run, but ABOVE it in the second
#I have accidentally drawn TWO random values, not just one...
#Fix:
survival <- function(cell, setup.plants){
if(is.na(cell) == TRUE){
cell <- NA
} else if(cell == ""){
cell <- ""
} else if(is.na(setup.plants$survive[cell]) == TRUE){
stop("You just discovered a new species of plant! Whatever is in this cell shouldn't exist... try again.")
} else {
random.draw <- runif(1)
if(random.draw <= setup.plants$survive[cell]){
cell <- cell
} else if(random.draw > setup.plants$survive[cell]){
cell <- ""
}
}
return(cell)
}
#Problem solved. Plant.timestep now works.
#run.plant.ecosystem: function that seeds plants into an initial matrix (of same size as terrain),
#then builds an array using survive function each timestep
#need function that takes terrain, #plants of each species you want, plant.timestep, setup.plants, and timesteps+1
#Need timesteps + 1 because the first "sheet" in the array is time t = 0. Need timesteps after that, so timesteps plus one(initial conditions)
#How to draw plants? If you put in first set, then second, then third, and just replace ones that fall on top of another, bias towards last plant species run
#Fix this by creating places for the TOTAL number of plants, then assigning a place for each?
#No, better option: sample function: samples WITHOUT REPLACEMENT BY DEFAULT
#Problem of having plants land on top of one another solved?
#If too many plants, cannot fit in matrix.
#if sum(plants blah) > #cells in matrix, problem: too many plants
run.plant.ecosystem <- function(terrain, setup.plants, plant.timestep, numb.plants.per.sp, timesteps){
if(numb.plants.per.sp != )
plant.array <- array(dim = c(nrow(terrain), ncol(terrain), timesteps + 1))
#Creates plant array to put stuff into.
}
#I want to randomly shuffle a vector of plant names so there is no bias towards the first or last plants for filling
#I also REFUSE to build a matrix that culls a random number of plants that land on water.
#I want to know EXACTLY how many plants are initially put in the matrix, and I want to DECIDE that number
#I want to call cells of a matrix WITHOUT using row/col:
test.plants <- matrix(nrow = 4, ncol = 4, c("coin", "super", "sucky", "coin"))
test.plants
#[,1] [,2] [,3] [,4]
#[1,] "coin" "coin" "coin" "coin"
#[2,] "super" "super" "super" "super"
#[3,] "sucky" "sucky" "sucky" "sucky"
#[4,] "coin" "coin" "coin" "coin"
which(test.plants == "coin")
#[1] 1 4 5 8 9 12 13 16
#So each cell MUST have a call number... just need to figure out how
#Oh well that's simple... if you don't use a comma, it calls the CELL, not the ROW/COL
test.plants[3]
#[1] "sucky"
test.plants[,3]
#[1] "coin" "super" "sucky" "coin"
#top to bottom, left to right... fills columns
bob <- matrix(nrow = 4, ncol = 4, 1:16)
#[,1] [,2] [,3] [,4]
#[1,] 1 5 9 13
#[2,] 2 6 10 14
#[3,] 3 7 11 15
#[4,] 4 8 12 16
bob[6] <- "bob"
#[,1] [,2] [,3] [,4]
#[1,] "1" "5" "9" "13"
#[2,] "2" "bob" "10" "14"
#[3,] "3" "7" "11" "15"
#[4,] "4" "8" "12" "16"
#So now I just need to make a sequence from one to length nrow(terrain) ^2 in vector form...
#Then make a vector where NAs are using which(terrain, NA)
#Then remove everything in the NA vector from the first vector (sequence of numbers)
#and finally, sample a certain number of values from this "corrected" vector
#and for every i in 1:length(corrected vector), put shuffled.plant.vector[i] in matrix[i]
#Will need a stop function that indicates that there are length(NA vector) water cells in your terrain.
#You cannot seed more than nrow(terrain) ^2 - length(NA vector) plants into your matrix
#How to call names of a vector? names function?
survive <- c(0.5, 0.9, 0.01)
survive <- setNames(survive, c("coin", "super", "sucky"))
names(survive[3])
#[1] "sucky"
#Can use this to create vector of plant names to seed into the matrix
seed.plants <- function(terrain, setup.plants, number.plants){
if(length(number.plants) != length(setup.plants$names)){
stop("There must be an initial population size given for every plant species in the 'number of plants per species' vector!")
}
all.terrain.locations <- seq(1, nrow(terrain)^2, 1)
#Vector of all possible cells in the terrain matrix, from which locations for plants can be drawn
water.locations <- which(is.na(terrain) == TRUE)
#Vector of all the cells in the initial matrix that contain water, and therefore are NOT potential locations for plants.
terrain.locations.minus.water <- all.terrain.locations[-water.locations]
#Vector of all cells in matrix WITHOUT water, and therefore potential locations for plants
total.number.plants <- sum(number.plants)
#Adds up the initial population values for all the plant species to give total population (plants of any species)
if(total.number.plants > length(terrain.locations.minus.water)){
stop("There are more plants to seed than there are locations on the terrain to put plants!", "\n",
" You currently have ", total.number.plants, " plants, and only ", length(terrain.locations.minus.water), " places to put them!")
}
#Checks to see if there are too many plants to fit on your terrain
locations.for.plants.to.go <- sample(terrain.locations.minus.water, total.number.plants)
#Draws a random sample of locations in which to put the total number of plants you said you wanted to seed
number.plants <- setNames(number.plants, setup.plants$names)
#Takes the vector containing number of plants of each species to seed, and names them appropriately
plants.to.add <- character(0)
for(i in 1:length(number.plants)){
plants.to.add <- c(plants.to.add, rep(names(number.plants[i]), number.plants[i]))
}
#Creates a vector of plants, with each plant species repeated the number of times specified by the user
random.ordering.for.plants <- sample(1:length(plants.to.add), length(plants.to.add))
#creates a vector that will become the indices for the shuffled plants.to.add vector (to eliminate bias in seeding)
shuffled.plants.to.add <- character(0)
for(i in 1:length(plants.to.add)){
shuffled.plants.to.add[i] <- plants.to.add[random.ordering.for.plants[i]]
}
#SHUFFLES the vector of plants to add randomly to the terrain because I hate bias and want things seeded randomly.
plant.matrix <- matrix(nrow = nrow(terrain), ncol = ncol(terrain), "")
#Creates the final plant matrix, into which plants and water will be seeded
plant.matrix[water.locations] <- NA
#Sets all water locations to NA
plant.matrix[locations.for.plants.to.go] <- shuffled.plants.to.add
#Puts the plants into the matrix, and none should fall on top of each other or on water!
return(plant.matrix)
}
#Hooray! Now to put into the whole ecosystem function:
run.plant.ecosystem <- function(terrain, setup.plants, numb.plants.per.sp, timesteps){
plant.array <- array(dim = c(nrow(terrain), ncol(terrain), timesteps + 1))
#Creates plant array to put stuff into.
plant.array[,,1] <- seed.plants(terrain, setup.plants, numb.plants.per.sp)
return(plant.array)
}
#Test:
terrain <- matrix(nrow = 9, ncol = 9, sample(27, 27))
terrain[which(terrain < 10)] <- NA
survive <- c(0.5, 0.9, 0.01)
names <- c("coin", "super", "sucky")
survive <- setNames(survive, names)
setup.plants <- list(survive = survive, etc = "blah", blargh = "fatcat", names = names)
numb.plants.per.sp <- c(5, 2, 7)
run.plant.ecosystem(terrain, setup.plants, numb.plants.per.sp, 4)
#Oh hell yes. Works on first try (minus the copy errors in object names)
#Test error message:
numb.plants.per.sp <- c(5, 50, 40)
run.plant.ecosystem(terrain, setup.plants, numb.plants.per.sp, 4)
#Cool.Now add timesteps
run.plant.ecosystem <- function(terrain, setup.plants, numb.plants.per.sp, timesteps){
plant.array <- array(dim = c(nrow(terrain), ncol(terrain), timesteps + 1))
#Creates plant array to put stuff into.
plant.array[,,1] <- seed.plants(terrain, setup.plants, numb.plants.per.sp)
for(i in 1:(timesteps)){
plant.array[,,(i + 1)] <- plant.timestep(plant.array[,,i], setup.plants)
}
return(plant.array)
}
#IT WORRRRKS... SEE TEST:
terrain <- matrix(nrow = 9, ncol = 9, sample(27, 27))
terrain[which(terrain < 10)] <- NA
survive <- c(0.5, 0.9, 0.01)
names <- c("coin", "super", "sucky")
survive <- setNames(survive, names)
setup.plants <- list(survive = survive, etc = "blah", blargh = "fatcat", names = names)
numb.plants.per.sp <- c(5, 2, 7)
run.plant.ecosystem(terrain, setup.plants, numb.plants.per.sp, 8)
#Just as expected, sucky plants die out first, then coin (50/50 coin flip) plants, and lastly, super plants remain
#reproduction function
#want to determine the possible cells around the reproducing plant that can house an offspring
#make a vector of options, will be length 8 (three to left, three to right, one above, one below)
#the cell to the left is cell # minus ncol or nrow of matrix
#cell to the right is cell # plus ncol
#so c(cell#-nrow, cell#+nrow, cell#+1, cell#-1, cell#-nrow+1, cell#-nrow-1, cell#+nrow+1, cell#+nrow-1)
#then draw one from this vector to place the new plant in
#sample(potential.baby.location, 1)
#matrix[sample(potential.baby.location, 1)] <- matrix[i]
#test potential for-loop:
plant.matrix <- matrix(nrow = 4, ncol = 4)
for(i in 1:nrow(plant.matrix)^2){
potential.offspring.locations <- c((i - nrow(plant.matrix)), (i + nrow(plant.matrix)), (i + 1), (i - 1), (i - nrow(plant.matrix) + 1),
(i - nrow(plant.matrix) - 1), (i + nrow(plant.matrix) + 1), (i + nrow(plant.matrix) + 1))
print(potential.offspring.locations)
}
#complicated to filter out those off the matrix because it wraps around edges
for(i in 1:nrow(plant.matrix)){
for(j in 1:ncol(plant.matrix)){
potential.offspring.locations <- as.matrix(expand.grid(i + c(-1, 0, 1), j + c(-1, 0, 1)))
print(potential.offspring.locations)
}
}
#Now, need to remove any row with the original plant inside (i + 0, j + 0)
#and remove any row with values less than 1 or greater than nrow(plant.matrix)
for(i in 1:nrow(plant.matrix)){
for(j in 1:ncol(plant.matrix)){
potential.offspring.locations <- as.matrix(expand.grid(i + c(-1, 0, 1), j + c(-1, 0, 1)))
for(k in 1:9){
if(potential.offspring.locations[k, 1] == i & potential.offspring.locations[k, 2] == j){
potential.offspring.locations <- potential.offspring.locations[-k,]
}
}
print(potential.offspring.locations)
}
}
#error, subscripts out of bounds?
for(i in 1:nrow(plant.matrix)){
for(j in 1:ncol(plant.matrix)){
potential.offspring.locations <- as.matrix(expand.grid(i + c(-1, 0, 1), j + c(-1, 0, 1)))
for(k in 1:9){
print(potential.offspring.locations[k, 1])
print(potential.offspring.locations[k, 2])
}
}
}
#But this works...
plant.matrix <- matrix(nrow = 4, ncol = 4)
#matrix doesn't like changing sizes in the mdidle of a loop, perhaps?
for(i in 1:nrow(plant.matrix)){
for(j in 1:ncol(plant.matrix)){
potential.offspring.locations <- as.matrix(expand.grid(i + c(0, -1, 1), j + c(0, -1, 1)))
potential.offspring.locations.two <- potential.offspring.locations
for(k in 1:nrow(potential.offspring.locations)){
if(potential.offspring.locations[k, 1] == i & potential.offspring.locations[k, 2] == j){
potential.offspring.locations.two <- potential.offspring.locations[-k,]
}
}
cat(i,j)
print(potential.offspring.locations.two)
}
}
#cool, working (and cat confirms that it's deleting the center point as it should)
for(i in 1:nrow(plant.matrix)){
for(j in 1:ncol(plant.matrix)){
potential.offspring.locations <- as.matrix(expand.grid(i + c(0, -1, 1), j + c(0, -1, 1)))
potential.offspring.locations.two <- potential.offspring.locations
potential.offspring.locations.three <- potential.offspring.locations.two
for(k in 1:nrow(potential.offspring.locations)){
if(potential.offspring.locations[k, 1] == i & potential.offspring.locations[k, 2] == j){
potential.offspring.locations.two <- potential.offspring.locations[-k,]
}
if(potential.offspring.locations[k, 1] > nrow(plant.matrix) | potential.offspring.locations[k, 2] > nrow(plant.matrix)
| potential.offspring.locations[k, 1] < 1 | potential.offspring.locations[k, 2] < 1){
potential.offspring.locations.three <- potential.offspring.locations.two[-k,]
}
}
cat(i,j)
print(potential.offspring.locations.three)
}
}
#Doesn't work. Try splitting into vectors individually
offspring.location <- function(F0.row, F0.col, plant.matrix){
potential.F1.locations <- as.matrix(expand.grid(F0.row + c(0, -1, 1), F0.col + c(0, -1, 1)))
#Matrix storing all possible locations for plant offspring, INCLUDING the location of the parent
potential.F1.locations.minus.center <- potential.F1.locations
#Matrix storing potential locations for plant offspring MINUS the location of the parent
for(k in 1:nrow(potential.F1.locations)){
if(potential.F1.locations[k, 1] == F0.row & potential.F1.locations[k, 2] == F0.col){
potential.F1.locations.minus.center <- potential.F1.locations[-k,]
}
#Loop that takes the list of all possible locations and removes the center point (the location of the parent plant)
potential.F1.row <- potential.F1.locations.minus.center[,1]
potential.F1.col <- potential.F1.locations.minus.center[,2]
#Vector from the possible offspring location matrix storing the indices of potential rows and columns for offspring
print(potential.F1.row)
print(potential.F1.col)
print(which(potential.F1.row < 1))
print(nrow(plant.matrix))
rows.to.remove <- c(which(potential.F1.row > nrow(plant.matrix)), which(potential.F1.row < 1))
col.to.remove <- c(which(potential.F1.col > ncol(plant.matrix)), which(potential.F1.col < 1))
#Vectors determining which row and column locations are off the grid (terrain), and need to be removed
print(rows.to.remove)
print(col.to.remove)
potential.F1.row <- potential.F1.row[-c(rows.to.remove, col.to.remove)]
potential.F1.col <- potential.F1.col[-c(rows.to.remove, col.to.remove)]
#corrected vectors storing potential row/col locations for offspring, all invalid locations removed
print(potential.F1.row)
print(potential.F1.col)
potential.location.index <- seq(from = 1, to = length(potential.F1.row), by = 1)
offspring.location.index <- sample(potential.location.index, 1)
#draws a random sample from the vector of potential F1 locations
offspring.location <- c(potential.F1.row[offspring.location.index], potential.F1.col[offspring.location.index])
return(offspring.location)
}
}
#Final, de-bugged version below:
offspring.location <- function(F0.row, F0.col, plant.matrix){
potential.F1.locations <- as.matrix(expand.grid(F0.row + c(0, -1, 1), F0.col + c(0, -1, 1)))
#Matrix storing all possible locations for plant offspring, INCLUDING the location of the parent
potential.F1.locations.minus.center <- potential.F1.locations
#Matrix storing potential locations for plant offspring MINUS the location of the parent
for(k in 1:nrow(potential.F1.locations)){
if(potential.F1.locations[k, 1] == F0.row & potential.F1.locations[k, 2] == F0.col){
potential.F1.locations.minus.center <- potential.F1.locations[-k,]
}
#Loop that takes the list of all possible locations and removes the center point (the location of the parent plant)
potential.F1.row <- potential.F1.locations.minus.center[,1]
potential.F1.col <- potential.F1.locations.minus.center[,2]
#Vector from the possible offspring location matrix storing the indices of potential rows and columns for offspring
rows.to.remove <- c(which(potential.F1.row > nrow(plant.matrix)), which(potential.F1.row < 1))
col.to.remove <- c(which(potential.F1.col > ncol(plant.matrix)), which(potential.F1.col < 1))
#Vectors determining which row and column locations are off the grid (terrain), and need to be removed
if(length(rows.to.remove) > 0 | length(col.to.remove > 0)){
potential.F1.row <- potential.F1.row[-c(rows.to.remove, col.to.remove)]
potential.F1.col <- potential.F1.col[-c(rows.to.remove, col.to.remove)]
}
#corrected vectors storing potential row/col locations for offspring, all invalid locations removed
potential.location.index <- seq(from = 1, to = length(potential.F1.row), by = 1)
offspring.location.index <- sample(potential.location.index, 1)
#draws a random sample from the vector of potential F1 locations
offspring.location <- c(potential.F1.row[offspring.location.index], potential.F1.col[offspring.location.index])
return(offspring.location)
}
}
#need to determine whether randomly drawn prob is less than repro prob. If so, then run the above stuff
#reproduction function should go AFTER the survival function is complete, this seems simpler
#if two plants are next to each other, don't want one to reproduce over the top of its neighbor...
#then have the loop move to the neighbor's cell (now filled with the offspring of the first cell)...
#And then have the offspring "reproduce" into another cell...
#need to make sure offspring can't reproduce, so need to put offspring into NEW matrix but draw from the ORIGINAL each time
#This will prevent mixing of generations
reproduction <- function(plant.matrix, reproduce){
new.plant.matrix <- plant.matrix
#creates a new matrix for the "next generation" to be seeded into without messing up the original matrix I'm drawing from
for(i in 1:nrow(plant.matrix)^2){
potential.offspring.locations <- c((i - nrow(plant.matrix)), (i + nrow(plant.matrix)), (i + 1), (i - 1), (i - nrow(plant.matrix) + 1),
(i - nrow(plant.matrix) - 1), (i + nrow(plant.matrix) + 1), (i + nrow(plant.matrix) + 1))
random.draw <- runif(1)
if(random.draw <= setup.plants$reproduce[plant.matrix[i]]){
#stuff to determine where thing goes
}
}
}
#Try again
reproduction <- function(F0.row, F0.col, plant.matrix, setup.plants){
new.plant.matrix <- plant.matrix
#creates a new matrix for the "next generation" to be seeded into without messing up the original matrix I'm drawing from
cell <- plant.matrix[F0.row, F0.col]
if(is.na(plant.matrix[cell]) == TRUE){
cell <- NA
} else if(cell == ""){
cell <- ""
} else if(is.na(setup.plants$reproduce[cell]) == TRUE){
stop("You just discovered a new species of plant! Whatever is in this cell shouldn't exist... try again.")
} else {
if(runif(1) <= setup.plants$reproduce[cell]){
offspring.location <- offspring.location(F0.row, F0.col, plant.matrix)
if(is.na(matrix[offspring.location] == FALSE)){
new.plant.matrix[offspring.location] <- cell
}
}
}
return(new.plant.matrix)
}
#New plant.timestep function with reproduction inside
plant.timestep <- function(plant.matrix, setup.plants){
new.plant.matrix <- plant.matrix
repro.plant.matrix <- new.plant.matrix
for(i in 1:nrow(plant.matrix)){
for(j in 1:ncol(plant.matrix)){
new.plant.matrix[i, j] <- survival(plant.matrix[i,j], setup.plants)
}
}
for(i in 1:nrow(new.plant.matrix)){
for(j in 1:ncol(new.plant.matrix)){
repro.plant.matrix <- reproduction(i, j, new.plant.matrix, setup.plants)
}
}
return(repro.plant.matrix)
}
#new test:
reproduction <- function(plant.matrix, setup.plants){
repro.plant.matrix <- plant.matrix
#creates a new matrix for the "next generation" to be seeded into without messing up the original matrix I'm drawing from
for(i in nrow(plant.matrix)){
for(j in ncol(plant.matrix)){
cell <- plant.matrix[i, j]
print(c(i,j))
if(is.na(plant.matrix[cell]) == TRUE){
cell <- NA
} else if(cell == ""){
cell <- ""
} else if(is.na(setup.plants$reproduce[cell]) == TRUE){
stop("You just discovered a new species of plant! Whatever is in this cell shouldn't exist... try again.")
} else {
if(runif(1) <= setup.plants$reproduce[cell]){
offspring.location <- offspring.location(F0.row, F0.col, plant.matrix)
print(offspring.location)
if(is.na(matrix[offspring.location] == FALSE)){
new.plant.matrix[offspring.location] <- cell
}
}
}
}
}
print(repro.plant.matrix)
return(repro.plant.matrix)
}
plant.timestep <- function(plant.matrix, setup.plants){
new.plant.matrix <- plant.matrix
for(i in 1:nrow(plant.matrix)){
for(j in 1:ncol(plant.matrix)){
new.plant.matrix[i, j] <- survival(plant.matrix[i,j], setup.plants)
}
}
repro.plant.matrix <- reproduction(new.plant.matrix, setup.plants)
return(repro.plant.matrix)
}
#New new test: OH MY GOD. UGHHHHHHHHHHHHHHHHHHH STUPIDITY.PERHAPS A RANGEEEE OF I,J VALUES WOULD HELP.............
reproduction <- function(plant.matrix, setup.plants){
repro.plant.matrix <- plant.matrix
#creates a new matrix for the "next generation" to be seeded into without messing up the original matrix I'm drawing from
for(i in 1:nrow(plant.matrix)){
for(j in 1:ncol(plant.matrix)){
cell <- plant.matrix[i, j]
if(is.na(cell) == TRUE){
cell <- NA
print("first works")
} else if(cell == ""){
cell <- ""
print("second works")
} else if(is.na(setup.plants$reproduce[cell]) == TRUE){
stop("You just discovered a new species of plant! Whatever is in this cell shouldn't exist... try again.")
} else {
if(runif(1) <= setup.plants$reproduce[cell]){
print("third works")
offspring.location <- offspring.location(i, j, plant.matrix)
print(offspring.location)
if(is.na(plant.matrix[offspring.location[1], offspring.location[2]] == FALSE)){
new.plant.matrix[offspring.location] <- cell
}
}
}
}
}
return(repro.plant.matrix)
}
#Without all the print stuff, FUNCTIONAL version:
reproduction <- function(plant.matrix, setup.plants){
repro.plant.matrix <- plant.matrix
#creates a new matrix for the "next generation" to be seeded into without messing up the original matrix I'm drawing from
for(i in 1:nrow(plant.matrix)){
for(j in 1:ncol(plant.matrix)){
cell <- plant.matrix[i, j]
if(is.na(cell) == TRUE){
cell <- NA
} else if(cell == ""){
cell <- ""
} else if(is.na(setup.plants$reproduce[cell]) == TRUE){
stop("You just discovered a new species of plant! Whatever is in this cell shouldn't exist... try again.")
} else {
if(runif(1) <= setup.plants$reproduce[cell]){
offspring.location <- offspring.location(i, j, plant.matrix)
if(is.na(plant.matrix[offspring.location[1], offspring.location[2]] == TRUE)){
repro.plant.matrix[offspring.location[1], offspring.location[2]] <- NA
} else {
repro.plant.matrix[offspring.location[1], offspring.location[2]] <- cell
}
}
}
}
}
return(repro.plant.matrix)
}
#Working, but why filling only the NA spots with a baby plant?!
#Needed an else-statement. Doesn't work with if statement only. For some reason doesn't check true/false properly.
#Competition function:
#Should go in reproduction function within the reproduction function at the end where offspring location is drawn
#only need to run the function if the cell location is NOT NA or "", so should go after that
#Thus, need 1) if NA, do this, else if "" do this, else compete function
#function will need an input of cell and plant.matrix[potential offspring location]
#Will return the content of the cell
#so plant.matrix[potential offspring location] <- compete(cell, plant.matrix[potential offspring location])
compete <- function(parent.cell, potential.offspring.cell){
cat(parent.cell, "parent cell", "\n")
cat(potential.offspring.cell, "potential offspring cell", "\n")
print(comp.matrix[parent.cell, potential.offspring.cell])
winner <- sample(c(parent.cell, potential.offspring.cell), 1, prob = comp.matrix[parent.cell, potential.offspring.cell])
return(winner)
}
#Test stuff:
reproduction <- function(plant.matrix, setup.plants){
repro.plant.matrix <- plant.matrix
#creates a new matrix for the "next generation" to be seeded into without messing up the original matrix I'm drawing from
for(i in 1:nrow(plant.matrix)){
for(j in 1:ncol(plant.matrix)){
cell <- plant.matrix[i, j]
if(is.na(cell) == TRUE){
cell <- NA
} else if(cell == ""){
cell <- ""
} else if(is.na(setup.plants$reproduce[cell]) == TRUE){
stop("You just discovered a new species of plant! Whatever is in this cell shouldn't exist... try again.")
} else if(is.na(setup.plants$reproduce[cell]) == FALSE){
if(runif(1) <= setup.plants$reproduce[cell]){
print(cell)
offspring.location <- offspring.location(i, j, plant.matrix)
if(is.na(plant.matrix[offspring.location[1], offspring.location[2]] == TRUE)){
repro.plant.matrix[offspring.location[1], offspring.location[2]] <- NA
} else if(plant.matrix[offspring.location[1], offspring.location[2]] == ""){
repro.plant.matrix[offspring.location[1], offspring.location[2]] <- ""
} else {
repro.plant.matrix[offspring.location[1], offspring.location[2]] <- compete(cell, repro.plant.matrix[offspring.location[1], offspring.location[2]])
}
}
}
}
}
return(repro.plant.matrix)
}
#problem: cell is somehow being "" sometimes, rather than always a plant name... messing up the competition function
#Another try....
reproduction <- function(plant.matrix, setup.plants){
repro.plant.matrix <- plant.matrix
#creates a new matrix for the "next generation" to be seeded into without messing up the original matrix I'm drawing from
for(i in 1:nrow(plant.matrix)){
for(j in 1:ncol(plant.matrix)){
if(is.na(setup.plants$reproduce[plant.matrix[i, j]]) == FALSE){
if(runif(1) <= setup.plants$reproduce[plant.matrix[i, j]]){
cat(plant.matrix[i, j], "repro function", "\n")
offspring.location <- offspring.location(i, j, plant.matrix)
if(is.na(setup.plants$reproduce[plant.matrix[offspring.location[1], offspring.location[2]]]) == FALSE){
cat("part of loop", plant.matrix[i, j])
repro.plant.matrix[offspring.location[1], offspring.location[2]] <- compete(plant.matrix[i, j], repro.plant.matrix[offspring.location[1], offspring.location[2]], setup.plants)
}
}
}
}
}
return(repro.plant.matrix)
}
#Problemmmmmm I don't have setup.plants in my function....... also comp.matrix is compete.matrix in setup.plants, so...
compete <- function(parent.cell, potential.offspring.cell, setup.plants){
cat(parent.cell, "parent cell", "\n")
cat(potential.offspring.cell, "potential offspring cell", "\n")
print(setup.plants$compete.matrix[parent.cell, potential.offspring.cell])
winner <- sample(c(parent.cell, potential.offspring.cell), 1, prob = c(setup.plants$compete.matrix[parent.cell, potential.offspring.cell], (1 - setup.plants$compete.matrix[parent.cell, potential.offspring.cell])))
return(winner)
}
#Okay. Mostly working. Re-try with regular reproduction function.
reproduction <- function(plant.matrix, setup.plants){
repro.plant.matrix <- plant.matrix
#creates a new matrix for the "next generation" to be seeded into without messing up the original matrix I'm drawing from
for(i in 1:nrow(plant.matrix)){
for(j in 1:ncol(plant.matrix)){
cell <- plant.matrix[i, j]
if(is.na(cell) == TRUE){
cell <- NA
} else if(cell == ""){
cell <- ""
} else if(is.na(setup.plants$reproduce[cell]) == TRUE){
stop("You just discovered a new species of plant! Whatever is in this cell shouldn't exist... try again.")
} else if(is.na(setup.plants$reproduce[cell]) == FALSE){
if(runif(1) <= setup.plants$reproduce[cell]){
offspring.location <- offspring.location(i, j, plant.matrix)
if(is.na(plant.matrix[offspring.location[1], offspring.location[2]] == TRUE)){
repro.plant.matrix[offspring.location[1], offspring.location[2]] <- NA
} else if(repro.plant.matrix[offspring.location[1], offspring.location[2]] == ""){
repro.plant.matrix[offspring.location[1], offspring.location[2]] <- cell
} else {
repro.plant.matrix[offspring.location[1], offspring.location[2]] <- compete(cell, repro.plant.matrix[offspring.location[1], offspring.location[2]], setup.plants)
}
}
}
}
}
return(repro.plant.matrix)
}
compete <- function(parent.cell, potential.offspring.cell, setup.plants){
winner <- sample(c(parent.cell, potential.offspring.cell), 1, prob = c(setup.plants$compete.matrix[parent.cell, potential.offspring.cell], (1 - setup.plants$compete.matrix[parent.cell, potential.offspring.cell])))
return(winner)
} |
da5f888fa9f023c3fa1f51c537eb6b93679caea4 | de004591ab9dcc1053a7fae7a1b695bf25bdddb8 | /R/superResolutionUtilities.R | 9e8ad87cedacfff7526218a71379a5b79c34761a | [] | no_license | msharrock/ANTsRNet | 1aea8a5afcebf70f13e7b80b70e912a676901f8b | 0836f4faafa96e7a7b29497e56a48129c47292d0 | refs/heads/master | 2020-03-28T12:50:46.610970 | 2018-09-11T01:31:14 | 2018-09-11T01:31:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,091 | r | superResolutionUtilities.R | #' Model loss function for super-resolution---peak-signal-to-noise ratio.
#'
#' Based on the keras loss function (losses.R):
#'
#' \url{https://github.com/rstudio/keras/blob/master/R/losses.R}
#'
#' @param y_true True labels (Tensor)
#' @param y_pred Predictions (Tensor of the same shape as \code{y_true})
#'
#' @details Loss functions are to be supplied in the loss parameter of the
#' \code{compile()} function.
#'
#' @export
peak_signal_to_noise_ratio <- function( y_true, y_pred )
{
K <- keras::backend()
return( -10.0 * K$log( K$mean( K$square( y_pred - y_true ) ) ) / K$log( 10.0 ) )
}
attr( peak_signal_to_noise_ratio, "py_function_name" ) <-
"peak_signal_to_noise_ratio"
#' Peak-signal-to-noise ratio.
#'
#' @param y_true true encoded labels
#' @param y_pred predicted encoded labels
#'
#' @rdname loss_peak_signal_to_noise_ratio_error
#' @export
loss_peak_signal_to_noise_ratio_error <- function( y_true, y_pred )
{
return( -peak_signal_to_noise_ratio( y_true, y_pred ) )
}
attr( loss_peak_signal_to_noise_ratio_error, "py_function_name" ) <-
"peak_signal_to_noise_ratio_error"
#' Extract 2-D or 3-D image patches.
#'
#' @param image Input ANTs image
#' @param patchSize Width, height, and depth (if 3-D) of patches.
#' @param maxNumberOfPatches Maximum number of patches returned. If
#' "all" is specified, then all overlapping patches are extracted.
#' @param randomSeed integer seed that allows reproducible patch extraction
#' across runs.
#'
#' @return a randomly selected list of patches.
#' @author Tustison NJ
#' @examples
#'
#' library( ANTsR )
#' i = ri( 1 )
#' patchSet1 = extractImagePatches( i, c( 32, 32 ), 10, randomSeed = 0 )
#' patchSet2 = extractImagePatches( i, c( 32, 32 ), 10, randomSeed = 1 )
#' patchSet3 = extractImagePatches( i, c( 32, 32 ), 10, randomSeed = 0 )
#'
#' @export
extractImagePatches <- function( image,
patchSize,
maxNumberOfPatches = 'all',
randomSeed
)
{
if ( ! missing( randomSeed ) )
{
set.seed( randomSeed )
}
imageSize <- dim( image )
dimensionality <- length( imageSize )
if( length( imageSize ) != length( patchSize ) )
{
stop( "Mismatch between the image size and the specified patch size.\n" )
}
if( any( patchSize > imageSize ) )
{
stop( "Patch size is greater than the image size.\n")
}
imageArray <- as.array( image )
patches <- list()
if( tolower( maxNumberOfPatches ) == 'all' )
{
count <- 1
if( dimensionality == 2 )
{
for( i in seq_len( imageSize[1] - patchSize[1] + 1 ) )
{
for( j in seq_len( imageSize[2] - patchSize[2] + 1 ) )
{
startIndex <- c( i, j )
endIndex <- startIndex + patchSize - 1
patches[[count]] <-
imageArray[startIndex[1]:endIndex[1], startIndex[2]:endIndex[2]]
count <- count + 1
}
}
} else if( dimensionality == 3 ) {
for( i in seq_len( imageSize[1] - patchSize[1] + 1 ) )
{
for( j in seq_len( imageSize[2] - patchSize[2] + 1 ) )
{
for( k in seq_len( imageSize[3] - patchSize[3] + 1 ) )
{
startIndex <- c( i, j, k )
endIndex <- startIndex + patchSize - 1
patches[[count]] <- imageArray[startIndex[1]:endIndex[1],
startIndex[2]:endIndex[2], startIndex[3]:endIndex[3]]
count <- count + 1
}
}
}
} else {
stop( "Unsupported dimensionality.\n" )
}
} else {
startIndex <- rep( 0, dimensionality )
for( i in seq_len( maxNumberOfPatches ) )
{
for( d in seq_len( dimensionality ) )
{
startIndex[d] <- sample.int( imageSize[d] - patchSize[d] + 1, 1 )
}
endIndex <- startIndex + patchSize - 1
if( dimensionality == 2 )
{
patches[[i]] <-
imageArray[startIndex[1]:endIndex[1], startIndex[2]:endIndex[2]]
} else if( dimensionality == 3 ) {
patches[[i]] <- imageArray[startIndex[1]:endIndex[1],
startIndex[2]:endIndex[2], startIndex[3]:endIndex[3]]
} else {
stop( "Unsupported dimensionality.\n" )
}
}
}
return( patches )
}
#' Reconstruct image from a list of patches.
#'
#' @param patchList list of overlapping patches defining an image.
#' @param domainImage Image to define the geometric information of the
#' reconstructed image.
#'
#' @return an ANTs image.
#' @author Tustison NJ
#' @examples
#' \dontrun{
#' }
#' @importFrom ANTsRCore as.antsImage
#' @export
reconstructImageFromPatches <- function( patchList, domainImage )
{
imageSize <- dim( domainImage )
dimensionality <- length( imageSize )
patchSize <- dim( patchList[[1]] )
numberOfPatches <- 1
for( d in 1:dimensionality )
{
numberOfPatches <- numberOfPatches *
( imageSize[d] - patchSize[d] + 1 )
}
if( numberOfPatches != length( patchList ) )
{
stop( "Not the right number of patches.\n" )
}
imageArray <- array( data = 0, dim = imageSize )
count <- 1
if( dimensionality == 2 )
{
for( i in seq_len( imageSize[1] - patchSize[1] + 1 ) )
{
for( j in seq_len( imageSize[2] - patchSize[2] + 1 ) )
{
startIndex <- c( i, j )
endIndex <- startIndex + patchSize - 1
imageArray[startIndex[1]:endIndex[1], startIndex[2]:endIndex[2]] <-
imageArray[startIndex[1]:endIndex[1], startIndex[2]:endIndex[2]] +
patchList[[count]]
count <- count + 1
}
}
for( i in seq_len( imageSize[1] ) )
{
for( j in seq_len( imageSize[2] ) )
{
factor <- min( i, patchSize[1], imageSize[1] - i + 1 ) *
min( j, patchSize[2], imageSize[2] - j + 1 )
imageArray[i, j] <- imageArray[i, j] / factor
}
}
} else if( dimensionality == 3 ) {
for( i in seq_len( imageSize[1] - patchSize[1] + 1 ) )
{
for( j in seq_len( imageSize[2] - patchSize[2] + 1 ) )
{
for( k in seq_len( imageSize[3] - patchSize[3] + 1 ) )
{
startIndex <- c( i, j, k )
endIndex <- startIndex + patchSize - 1
imageArray[startIndex[1]:endIndex[1],
startIndex[2]:endIndex[2], startIndex[3]:endIndex[3]] <-
imageArray[startIndex[1]:endIndex[1],
startIndex[2]:endIndex[2], startIndex[3]:endIndex[3]] +
patchList[[count]]
count <- count + 1
}
}
}
for( i in seq_len( imageSize[1] ) )
{
for( j in seq_len( imageSize[2] ) )
{
for( k in seq_len( imageSize[3] ) )
{
factor <- min( i, patchSize[1], imageSize[1] - i + 1 ) *
min( j, patchSize[2], imageSize[2] - j + 1 ) *
min( k, patchSize[3], imageSize[3] - k + 1 )
imageArray[i, j, k] <- imageArray[i, j, k] / factor
count <- count + 1
}
}
}
} else {
stop( "Unsupported dimensionality.\n" )
}
return( as.antsImage( imageArray, reference = domainImage ) )
}
|
e595e1057e7abe41fc1ca227ba2448f752f03461 | e970f01992264a2ade46e44c115741b20a108fe6 | /Social_Networks/Twitter_analysis.R | 86a631ed05948c19f3cf9000e1d14619e5406f5d | [] | no_license | ovdavid28/Advanced_Data_Analysis | 9b0b2a26e7253b2336f82bc0ad176edfd89a4bb9 | 378864d49da74185f2048e9c6c8b892f06f3db23 | refs/heads/master | 2020-05-02T04:41:03.358925 | 2019-04-06T05:49:38 | 2019-04-06T05:49:38 | 177,755,700 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,302 | r | Twitter_analysis.R | install.packages(c('twitteR','igraph','dplyr'))
library(twitteR)
library(igraph)
library(dplyr)
api_key<-''
api_secret<-''
access_token<-'-'
access_token_secret<-''
setup_twitter_oauth(api_key,api_secret,access_token,access_token_secret)
alltweets<-searchTwitter('climatechange', n = 500)
alltweets<-twListToDF(alltweets)
tweets<-alltweets[1:500,]
tweets
split_point = split(tweets, tweets$isRetweet)
reTweets = mutate(split_point[['TRUE']], sender = substr(text, 5, regexpr(':', text) - 1))
edge_list = as.data.frame(cbind(sender = tolower(reTweets$sender), receiver = tolower(reTweets$screenName)))
edge_list = count(edge_list, sender, receiver)
edge_list[1:5,]
reTweets_graph <- graph_from_data_frame(d=edge_list, directed=T)
save(reTweets_graph, file = "retweet-graph.Rdata")
par(bg="white", mar=c(1,1,1,1))
plot(reTweets_graph, layout=layout.fruchterman.reingold,
vertex.color="blue",
vertex.size=(degree(reTweets_graph, mode = "in")), #sized by in-degree centrality
vertex.label = NA,
edge.arrow.size=0.8,
edge.arrow.width=0.5,
edge.width=edge_attr(reTweets_graph)$n/10, #sized by edge weight
edge.color=hsv(h=.95, s=1, v=.7, alpha=0.5))
title("Retweet Climate Change Network", cex.main=1, col.main="black")
|
c280656899502618cebc1915e2c420579d228f68 | 99b84703a5df130a2b5f83a8f0c0fb1771d30cac | /CodeFiles/Feb03.2020.R | 157c2cb996a1ce9b8a6461ead923a362aa36b6ff | [] | no_license | jakelawlor/TidyTuesday_JL | 83b582991ead0782310bae4d25f3e2353b2082ef | fe07240d549d8d51278151af5b868cf1d9745d55 | refs/heads/master | 2021-07-01T14:33:27.309313 | 2021-03-02T20:21:30 | 2021-03-02T20:21:30 | 227,494,516 | 25 | 6 | null | null | null | null | UTF-8 | R | false | false | 728 | r | Feb03.2020.R | ## Tidy Tuesday Feb 3, 2020
attendance <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-02-04/attendance.csv')
standings <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-02-04/standings.csv')
games <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-02-04/games.csv')
theme_set(theme_bw())
# libraries
library(tidyverse)
library(ggplot2)
library(dplyr)
rm(tuesdata)
## view dataset
attendance %>% glimpse()
standings %>% glimpse()
games %>% glimpse()
attendance %>%
ggplot(aes(x=year,y=home)) +
geom_line() +
facet_wrap(~team_name)
?distinct()
|
1a078d45159808e9c604c39b6089439f932fa008 | e4ff3a5fc17302d8d4fd86b38072e67ffe1aedec | /R/mtt2.R | c9767c3f62da3e084ad594b7a72f204860543674 | [] | no_license | cran/robeth | 5782cfcb86e6931152dc8a0b9b8f71e97068e449 | 5b60aabc7c1b21f15d73d1246ab40c1defdf5f7f | refs/heads/master | 2023-08-31T11:44:10.694653 | 2023-08-22T08:00:05 | 2023-08-22T09:31:11 | 17,699,284 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 248 | r | mtt2.R | "mtt2" <-
function(a,n) {
if (missing(a)) messagena("a")
if (missing(n)) messagena("n")
nn <- length(a)
b <- single(nn)
f.res <- .Fortran("mtt2z",
a=to.single(a),
b=to.single(b),
n=to.integer(n),
nn=to.integer(nn))
list(b=f.res$b)
}
|
2c5757750027484c28451174de77b5b9ee090dda | ea2360f5466ba3f1cc92aebe34189a66f529698b | /sparkHardDrive/R/trialAndError.R | 204cb4cdbe0ef169eb65909112f872ec82dce6a7 | [
"MIT"
] | permissive | kklamsi/nsdb | 538ff123de2314eba9030a4081073d97a0297829 | 298576a6320273867496d7810d9b40d27a3de272 | refs/heads/master | 2020-04-29T02:21:36.865364 | 2019-06-05T08:37:11 | 2019-06-05T08:37:11 | 175,764,138 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,311 | r | trialAndError.R | library(sparklyr)
library(tidyverse)
library(dplyr)
sc <- spark_connect(master = "local")
read.csv.zip <- function(zipfile, ...) {
# Create a name for the dir where we'll unzip
zipdir <- tempfile()
# Create the dir using that name
dir.create(zipdir)
# Unzip the file into the dir
unzip(zipfile, exdir=zipdir)
# Get a list of csv files in the dir
files <- list.files(zipdir)
files <- files[grep("//.csv$", files)]
# Create a list of the imported csv files
csv.data <- sapply(files, function(f) {
fp <- file.path(zipdir, f)
return(read.csv(fp, ...))
})
return(csv.data)}
data_Q1_2016 <- read.csv.zip("/data_Q1_2016.zip")
data_Q1_2016 <- read.csv("/data_Q1_2016.csv")
## harddriveKaggle ##
harddriveKaggle <- read.csv("/harddriveKaggle.csv")
harddriveKaggle <- select(harddriveKaggle, failure, smart_1_normalized, smart_3_normalized,
smart_5_normalized, smart_7_normalized, smart_9_normalized,
smart_192_normalized, smart_193_normalized, smart_194_normalized)
sampleFailure <- sample_n(filter(harddriveKaggle, failure == 1), 1000, replace = T)
sampleRunning <- sample_n(filter(harddriveKaggle, failure == 0), 1000, replace = F)
sample <- bind_rows(sampleFailure, sampleRunning)
|
b3718b0f71aa5424aed52f6283888d7cef1d2a8e | 63299b8fd41d02dfb2f7187bf9f8b07b037a07df | /2 WAY ANOVA.R | 0b0607d8979c0015fef3093a3a720700de6f3119 | [] | no_license | shrddha-p-jain/Statistics-Practicals | c971c7307b6e79e16598ad6c2bb1d44baa548698 | d4f4c2075a9d74b5b60aed6bfe5b94e001416872 | refs/heads/main | 2023-07-17T00:50:25.601213 | 2021-08-26T14:46:51 | 2021-08-26T14:46:51 | 400,203,710 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,645 | r | 2 WAY ANOVA.R | #2 WAY ANOVA
#AN EXPANDING COMPANY WANTED TO KNOW HOW TO INTRODUCE A NEW TYPE OF MACHINE INTO THE FACTORY.
#SHOULD IT TRANSFER STAFF WORKING ON THE OLD MACHINE TO OPEARTE IF OR EMPLOY NEW STAFF WHO HAD NOT WORKED ON ANY MACHINE BEFORE?
# A RESEARCHER SELECTED 12 STAFF WHO HAD EXPERIENCE OF THE OLD MACHINE AND 12 STAFF WHO HAD NO SUCH EXPERIENCE. HALF
#THE PARTICIOANTS FROM EACH GROUP WERE ALLOCATED TO THE NEW Mchine and half to ther old machine.
#the number of errors made by the praticipamnts over a set period was measured.
#H01:PERSOM:THERE IS NO SIGNIFICSNCE DIFFERENCE BETWEEN NEW AND EXPERIENCED PERSON.
#H02:MACHINE:THERE IS NO SIGNIFICSNCE DIFFERENCE BETWEEN OLD AND NEW MACHINE.
#H03:INTERACTIOPN:THERE IS NO SIGNIFICSNCE DIFFERENCE BETWEEN PERSON AND MACHINE.
person=rep(rep(c('newly','experience'),each=6),time=2)
person
machine=rep(c('old','new'),each=12)
errors=c(4,5,7,6,8,5,1,2,2,3,2,3,5,6,5,6,5,6,8,9,8,8,7,9)
data1=data.frame(person,machine,errors)
data1
str(data1)
data1.aov=aov(errors~person+machine,data=data1)
summary(data1.aov)
#h01:accepted (no diff)
#h02:rejected (diff)
data2.aov=aov(errors~person*machine,data=data1)#3rd
summary(data2.aov)
#h03:rejected (diff)
model.tables(data2.aov,type="means")
plot.design(data1)#single
#interaction
interaction.plot(person,machine,errors)
TukeyHSD(data2.aov)
TukeyHSD(data2.aov,ordered=T)
plot(TukeyHSD(data2.aov))
op=par(mar=c(5,8,4,2))
op
plot(TukeyHSD(data2.aov,ordered=T),cex.axis=1,las=0)
plot(TukeyHSD(data2.aov,ordered=T),cex.axis=1,las=1)
plot(TukeyHSD(data2.aov,ordered=T),cex.axis=1,las=2)
plot(TukeyHSD(data2.aov,ordered=T),cex.axis=1,las=3)
#O2:ASSUME THAT YOU ARE STUDING THE EFFECTS OF OBSERVING VIOLENT ACYS ON SUBSEQUENT AGGRESSSIVE BEHAVIOUR.
#YPU ARE INTERESTED IN THE KIND OF VIOLENCE OBSERVED 1.VIOLENCE CARTOON V/S VIDEO OF REAL ACTION.
#SECOND FACTOR IS THE AMT OF TIME ONE IS EXPOSED TO VIOLENCE 10 OR 30 MINS.
#U RANDOMLY ASSIGN 8 CHILDREN TO EACH GRP.AFTER THE CHILD WATCHES THE VIOLENT CARTOON OR ACTION VIDEO,THE CHILD PLAYS A TETRIS LIKE COMPUTER VIDEO FOR 30 MINS.
#THE GAME PROVIDES OPTIONS FOR POINTS WITHOUT INTERFEARING WITH THE OTHER PLAYER.THE PROGRSM PROVIDES 100 OPPORTUNITIES FOR THE PLAYER TO MAKE AN AGGRESSIVE CHOICE
#AND RECIRDS THE NO. OF TIME THE CHILD CHOOSE AN AGGRESSIVE WHEN THE GAME PROVIDES THE CHOICE.
#H01:PERSOM:THERE IS NO SIGNIFICSNCE DIFFERENCE BETWEEN NEW AND EXPERIENCED PERSON.
#H02:MACHINE:THERE IS NO SIGNIFICSNCE DIFFERENCE BETWEEN OLD AND NEW MACHINE.
#H03:INTERACTIOPN:THERE IS NO SIGNIFICSNCE DIFFERENCE BETWEEN PERSON AND MACHINE.
time=rep(rep(c('10 mins','30 mins'),each=8),times=2)
e=c(47,56,48,51,46,44,50,51,67,69,65,62,67,69,59,72,52,62,57,49,64,39,50,48,
81,92,82,92,82,94,86,83)
kind=rep(c('cartoon','real action'),each=16)
kov=data.frame(time,kind,e)
kov#kind of varience
str(kov)
kov.aov=aov(e~kind+time,data=kov)
summary(kov.aov)
kov.aov1=aov(e~kind*time,data=kov)
summary(kov.aov1)
model.tables(kov.aov1,type="means")
plot.design(kov)#single
#interaction
interaction.plot(time,kind,e)
TukeyHSD(kov.aov1)
TukeyHSD(kov.aov1,ordered=T)
plot(TukeyHSD(kov.aov1))
op=par(mar=c(5,8,4,2))
op
plot(TukeyHSD(kov.aov1,ordered=T),cex.axis=1,las=0)
plot(TukeyHSD(kov.aov1,ordered=T),cex.axis=1,las=1)
plot(TukeyHSD(kov.aov1,ordered=T),cex.axis=1,las=2)
plot(TukeyHSD(kov.aov1,ordered=T),cex.axis=1,las=3)
|
95eb38163386b754ca9468a6d1148b01987c983d | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/export/examples/rgl2bitmap.Rd.R | 8bee6661027740237aa691077f03c54d50ad6a26 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 516 | r | rgl2bitmap.Rd.R | library(export)
### Name: rgl2bitmap
### Title: Save currently active rgl 3D graph to bitmap format
### Aliases: rgl2bitmap rgl2png rgl2png
### ** Examples
# Create a file name
filen <- tempfile(pattern = "rgl") # or
# filen <- paste("YOUR_DIR/rgl")
# Generate a 3D plot using 'rgl'
x = y = seq(-10, 10, length = 20)
z = outer(x, y, function(x, y) x^2 + y^2)
rgl::persp3d(x, y, z, col = 'lightblue')
# Save the plot as a png
rgl2png(file = filen)
# Note that omitting 'file' will save in current directory
|
0a6086845f9875b836d34764f99cba9ada3ad209 | a62befa320d9d10aaa8815899d76bd138add029a | /final_analysis/Sample_Analysis.R | a2967c4db8f4d4bba60274522492714c1e62618c | [] | no_license | sfpacman/MSc-Project | 7e84dd887abfc36bb1da6c5bc439dce24863ad95 | d87cea5a35435bf385b975bbc8a4be61c28218b7 | refs/heads/master | 2021-01-20T12:34:48.425715 | 2017-09-05T09:47:31 | 2017-09-05T09:47:31 | 90,382,245 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,065 | r | Sample_Analysis.R |
library(Matrix)
library(ggplot2)
library(dplyr)
source("danaher_def.R")
# Five combination of normalization and clustering are applied and analyzed by the following codes:correlation, UMI+kmean, computeSumFactors+kmeans, UMI+kmeans, UMI+SNN-cliq, computeSumFactors+ SNN-cliq
# computeSumFactors+ SNN-cliq have two set of data (k = 11 and k = 5)
### This is the example of correlation (Figure. 3A and Supp.table 1)
sample_analysis <-readRDS("sample_analysis.rds")
sample <- readRDS("sample.rds")
rcell_assign <- sample_analysis$X$cls_id
act <- unlist(sample$summary[,2])
result <- data.frame(pre=rcell_assign,act =act)
x<- result %>% group_by(act,pre) %>% summarise (n = n()) %>% mutate(Precentage = n / sum(n)*100)
# This produces supplmentary table 1
kable(x,"latex",col.names=c("Actual Identity", "Prediction","cell count","% cell count" ))
freq <- as.numeric(table(result$pre))
# This produces figure 3A
pie(freq, labels = paste(cluster_type,signif(clu$S_s_11[,2]/sum(clu$S_s_11[,2])*100,2),"%",sep=" "))
### This is the example UMI+kmean (Figure 3B and Supp. table 3)
### The rest of Danher's marker results are produced in this methods by change m_n (expression matrix) and cluster_assign (cluster assignment)
### necessary for getting the gene name for danaher's markers
pbmc68k <- readRDS("pbmc68k.rds")
gen <- pbmc68k$all_data[[1]]$hg19$gene_symbols
ngn <- sapply(use_gene, function(x){gen[x]})
m_n <- sample_analysis$X$m_n
### mean score computing ###
mean_score <- list()
for(i in 1:length(cell_list)){
type= cell_list[i]
type_gene_select <- match(unlist(cell_list[i]),ngn)
type_gene_select <- type_gene_select[!is.na(type_gene_select)]
type_expr <- m_n[,type_gene_select]
type_mean <-colMeans(t(type_expr))
mean_score <- cbind(unlist(mean_score), type_mean)
}
colnames(mean_score) <- names(cell_list)
cluster_assign <- sample_analysis$X$k$cluster
cluster_mean <- data.frame(Cluster= cluster_assign,mean_score)
score_by_cluster <- round(cluster_mean %>% group_by(Cluster) %>% summarise_all(funs(mean)),3)
score_by_cluster <- score_by_cluster[,-which(names(score_by_cluster) %in% c("Cluster"))]
### selecting cell identity for each cluster ###
cluster_type <- list()
for( i in 1:nrow(score_by_cluster)){
x <- as.numeric(score_by_cluster[i,])
if( mean(x) == 0 ) {cluster_type = c(unlist(cluster_type),0) }
else {cluster_type = c(unlist(cluster_type), which(x== max(x)))} }
rcell_assign <-sapply(cluster_mean$Cluster,function(x){cluster_type[x]})
rcell_assign <- factor(rcell_assign,levels=c(0:length(cell_list)),labels= c("unknown",names(cell_list)))
act <- unlist(sample$summary[,2])
result <- data.frame(pre=rcell_assign,act =act)
x<- result %>% group_by(act,pre) %>% summarise (n = n()) %>% mutate(Precentage = n / sum(n)*100)
# This produces supplmentary table 3
kable(x,"latex",col.names=c("Actual Identity", "Prediction","cell count","% cell count" ))
#This produces figure 3B
freq <- as.numeric(table(result$pre))
pie(freq, labels = paste(cluster_type,signif(clu$S_s_11[,2]/sum(clu$S_s_11[,2])*100,2),"%",sep=" "))
|
474c615cfa0aeb0aaa4bbbe0193626cff8f875dd | 3b8730002a61a6296f5e1394b3a49e0e487bba1e | /testing.r | ee3f84278c8f8888f992ad7c380dfa6cc4cb1565 | [] | no_license | AliShahzad07/Hello-R | 827602fc27bbd9941e5d617d10e0b4ceddab6ff6 | 39d61f823782cf7b8ade2cc06c73715a6fe50f86 | refs/heads/main | 2023-02-24T19:31:05.278170 | 2021-02-08T06:24:14 | 2021-02-08T06:24:14 | 336,978,972 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 35 | r | testing.r | #This is fron git
"Hello From git"
|
43c247924f4edf431d00a6fba0bb208008fb765b | 4b90b3de3c3e819e424774bb1b90826917747e6d | /shujuzhunbei1().R | 204cea22e9d8e8202533acf9f7b80dd4b1e0c227 | [] | no_license | leivenwong/RFiles-64 | f7f375df57aced0c9e3139e682cf2588ff9332d4 | 8847c8c8e13561269804cc9de73d491613e97e56 | refs/heads/master | 2020-03-28T10:48:03.263988 | 2018-09-10T11:38:51 | 2018-09-10T11:38:51 | 148,146,193 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,103 | r | shujuzhunbei1().R | shujuzhunbei1<-function(id="SH000001",cy="1D",JC=10)
{
library("RODBC")
library("xts")
channel<-odbcConnect("ctp_merged_mq",uid="ctp_user",pwd="ctp_password")
IF.1D<<-sqlQuery(channel,"select * from if_1d order by utc_string")
odbcClose(channel)
IFTIME <- IF.1D$utc_string
IFOPEN <- IF.1D$open_price
IFHIGH <- IF.1D$high_price
IFLOW <- IF.1D$low_price
IFCLOSE <- IF.1D$close_price
PINZHONG <- list()
PINZHONG$TIME<-IFTIME
PINZHONG$OPEN<-IFOPEN
PINZHONG$HIGH<-IFHIGH
PINZHONG$LOW<-IFLOW
PINZHONG$CLOSE<-IFCLOSE
PINZHONG <- as.data.frame(PINZHONG)
IF.1D <<- PINZHONG
DATETIME<-as.Date(PINZHONG$TIME)
CLOSE<-as.xts(PINZHONG$CLOSE,DATETIME)
REFCLOSE<-lag(CLOSE,k = 1,na.pad=TRUE)
ZF<-xts((as.numeric(CLOSE)-as.numeric(REFCLOSE))/as.numeric(CLOSE)*100,DATETIME)
MACLOSE<-rollapply(CLOSE,JC,mean)
REFMACLOSE<-lag(MACLOSE,k = 1,na.pad=TRUE)
LOW<-as.xts(PINZHONG$LOW,DATETIME)
MALOW<-rollapply(LOW,JC,mean)
MS<-NA
ZF<-as.numeric(ZF)
CLOSE<-as.numeric(CLOSE)
MALOW<-as.numeric(MALOW)
i<- (JC+1)
bl<-length(DATETIME)
while(i<=bl)
{
if(ZF[i]>=5
& CLOSE[i]>=MALOW[i])
MS[i]<-"QS"
else if(ZF[i]>=3
& ZF[i]<5
& CLOSE[i]>=MALOW[i])
MS[i]<-"QT"
else if(ZF[i]<3
& ZF[i]>1
& CLOSE[i]>=MALOW[i])
MS[i]<-"QU"
else if(ZF[i]<=1
& ZF[i]>=0
& CLOSE[i]>=MALOW[i])
MS[i]<-"QV"
else if(ZF[i]<0
& ZF[i]>=-1
& CLOSE[i]>=MALOW[i])
MS[i]<-"QW"
else if(ZF[i]<0-1
& ZF[i]>-3
& CLOSE[i]>=MALOW[i])
MS[i]<-"QX"
else if(ZF[i]<=0-3
& ZF[i]>-5
& CLOSE[i]>=MALOW[i])
MS[i]<-"QY"
else if(ZF[i]<=-5
& CLOSE[i]>=MALOW[i])
MS[i]<-"QZ"
else if(ZF[i]>=5
& CLOSE[i]<MALOW[i])
MS[i]<-"RS"
else if(ZF[i]>=3
& ZF[i]<5
& CLOSE[i]<MALOW[i])
MS[i]<-"RT"
else if(ZF[i]<3
& ZF[i]>1
& CLOSE[i]<MALOW[i])
MS[i]<-"RU"
else if(ZF[i]<=1
& ZF[i]>=0
& CLOSE[i]<MALOW[i])
MS[i]<-"RV"
else if(ZF[i]<0
& ZF[i]>=-1
& CLOSE[i]<MALOW[i])
MS[i]<-"RW"
else if(ZF[i]<0-1
& ZF[i]>-3
& CLOSE[i]<MALOW[i])
MS[i]<-"RX"
else if(ZF[i]<=-3
& ZF[i]>-5
& CLOSE[i]<MALOW[i])
MS[i]<-"RY"
else if(ZF[i]<=-5
& CLOSE[i]<MALOW[i])
MS[i]<-"RZ"
else MS[i]<-NA;
i<-i+1
}
NEXTMS<-0
for(i in 1:(length(MS)-1))
NEXTMS[i]<-MS[i+1]
NEXTMS[length(MS)]<-"WAIT"
PINZHONG$ZF<-as.numeric(ZF)
PINZHONG$MS<-MS
PINZHONG$NEXTMS<-NEXTMS
PINZHONG$DATETIME<-DATETIME
PINZHONG$DATE<-as.character(DATETIME)
PINZHONG$LOW<-as.numeric(LOW)
PINZHONG$MALOW<-as.numeric(MALOW)
PINZHONG<-na.omit(PINZHONG)
IF.1D.1 <<- as.data.frame(PINZHONG)
return(as.data.frame(PINZHONG))
}
|
0e53c2b32150f83b7868e587607e55068dbeaa55 | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.storage/man/s3control_get_multi_region_access_point_routes.Rd | a1a79ebcf9b9d89c3f9077d5ef864bc9634928e9 | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 857 | rd | s3control_get_multi_region_access_point_routes.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/s3control_operations.R
\name{s3control_get_multi_region_access_point_routes}
\alias{s3control_get_multi_region_access_point_routes}
\title{Returns the routing configuration for a Multi-Region Access Point,
indicating which Regions are active or passive}
\usage{
s3control_get_multi_region_access_point_routes(AccountId, Mrap)
}
\arguments{
\item{AccountId}{[required] The Amazon Web Services account ID for the owner of the Multi-Region
Access Point.}
\item{Mrap}{[required] The Multi-Region Access Point ARN.}
}
\description{
Returns the routing configuration for a Multi-Region Access Point, indicating which Regions are active or passive.
See \url{https://www.paws-r-sdk.com/docs/s3control_get_multi_region_access_point_routes/} for full documentation.
}
\keyword{internal}
|
be7d0f8c1883b5db42520ce2baea6e84614d8381 | 884677dd48325c8314489ce0cfe0770b0d97e5c0 | /man/grpimpperm.rf.Rd | 709cc934120c43dc62f27ed3fd182d38352b3a79 | [] | no_license | dtrfgv/dtrfgv | 1ff5657c39933ae14f5057dbdd570c6235648461 | a736537579ecddaa124898c3683eb209aad7537e | refs/heads/master | 2020-03-28T12:36:31.603482 | 2019-04-26T20:37:56 | 2019-04-26T20:37:56 | 148,315,133 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,188 | rd | grpimpperm.rf.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Functions_variant_CARTGV_for_RFGV.R
\name{grpimpperm.rf}
\alias{grpimpperm.rf}
\title{grpimpperm.rf}
\usage{
grpimpperm.rf(num_group, data, oobsamples, group, tree, impurityacc)
}
\arguments{
\item{num_group}{index of the considered group}
\item{data}{a data frame containing all observation included in the training data set.}
\item{oobsamples}{a vector containing the indices of the observations that are not included in the sample used to build the modified CARTGV tree.}
\item{group}{a vector with the group number of each variable. (WARNING : if there are "\code{p}" goups, the groups must be numbers
from "\code{1}" to "\code{p}" in increasing order. The group label of the response variable is missing (i.e. NA)).}
\item{tree}{the output of the function \code{cartgv.rf}.}
\item{impurityacc}{the accurancy value of the modified CARTGV tree.}
}
\value{
DecreaseAcc the decrease in accurance from permuting the values of the considered group.
}
\description{
Compute the permutation importance of a group from a modified CARTGV tree. Used only by the function \code{rfgv} when grp.importance=TRUE.
}
|
44283c9a9d0f40d361f60da52063a6ca75598b96 | c4b195fe15bd50be87eee1d9461221ae4dfdebb7 | /R/calculateStats.R | 27f2fa6d6d6dbea78c4019914104f6f5f2a0faf1 | [] | no_license | ischeller/FraseR | 1c3f758d220109efc33afa09d5ebc7ff4ad0ef22 | 34dc4fb9713c37edda30a2ea06efc2c0777e8ab7 | refs/heads/master | 2020-08-09T00:28:56.164226 | 2019-10-08T14:24:42 | 2019-10-08T14:24:42 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,290 | r | calculateStats.R | ##
## @author Christian Mertes \email{mertes@@in.tum.de}
##
## This file contains all functions for calculating the statistics
## First it starts with calculating the Z-score for each
## site and then the p-values are calculated dependend on the
## given method in the setting file
##
#'
#' Calculate the zscore for each PSI value.
#'
#' @export
#' @examples
#' fds <- countRNAData(createTestFraseRSettings())
#' fds <- calculatePSIValues(fds)
#' fds <- calculateZScores(fds)
calculateZScores <- function(fds, type=psiTypes){
# check input
stopifnot(class(fds) == "FraseRDataSet")
# calculate zscore for each psi type
for(pt in type){
fds <- calculateZScorePerDataSet(fds, pt)
}
return(fds)
}
#'
#' calculates the zscore for a given data type and a given psi type
#' and adds it directly to the dataset itself
#'
#' @noRd
calculateZScorePerDataSet <- function(fds, psiType){
message(date(), ": Calculate the Zscore for ", psiType, " values ...")
# get raw data and replace NA's with zeros
psiVal <- assay(fds, psiType)
# z = ( x - mean ) / sd
rowmean <- rowMeans(psiVal, na.rm = TRUE)
rowsd <- apply(psiVal, 1, sd, na.rm = TRUE)
zscores <- (psiVal - rowmean) / rowsd
# use as.matrix to rewrite it as a new hdf5 array
zScores(fds, type=psiType) <- as.matrix(zscores)
return(fds)
}
#'
#' calculates the P-Value for the given FraseR dataset object
#' The P-Value calculation is based on the given method in the
#' FraseRSettings object
#'
#' @export
#' @examples
#' fds <- countRNAData(createTestFraseRSettings())
#' fds <- calculatePSIValues(fds)
#' fds <- calculatePValues(fds)
calculatePValues <- function(fds, type=psiTypes, internBPPARAM=bpparam(), ...){
# check input
stopifnot(class(fds) == "FraseRDataSet")
enforceHDF5 <- FALSE
# get correct method:
FUN <- switch(method(fds),
betaBin = {
enforceHDF5 <- TRUE
list(FUN=pvalueByBetaBinomialPerType, pvalFun=betabinVglmTest)
},
Fisher = {
list(FUN=pvalueByFisherPerType, internBPPARAM=internBPPARAM)
},
stop("The provided method is not present for this package.",
"Please set the method to one of the following:",
"Fisher, betaBin, DESeq2, Martin"
)
)
# check, that the object is stored as HDF5 array if requested
aIsHDF5 <- sapply(assays(fds), function(x) any("DelayedArray" == is(x)))
if(enforceHDF5 & !all(aIsHDF5)){
message(date(), ": The data is not stored in a HDF5Array. ",
"To improve the performance we will store now ",
"the data in HDF5 format.")
# fds <- saveFraseRDataSet(fds)
}
# test all 3 different types
for(psiType in type){
fds <- do.call(FUN[[1]],
c(fds=fds, aname=aname, psiType=psiType, FUN[-1], ...)
)
fds <- saveFraseRDataSet(fds)
gc()
}
# return the new datasets
return(fds)
}
#'
#' calculates the pvalue per type (psi3,psi5,spliceSite) with fisher
#'
#' @noRd
pvalueByFisherPerType <- function(dataset, psiType, internBPPARAM){
# go over each group but no NA's
group <- sampleGroup(dataset)
# reads to test for abberent splicing (eg: nonSplicedReads)
rawCounts <- counts(dataset, type=psiType, side="ofIn")
rawOtherCounts <- counts(dataset, type=psiType, side="other")
pvalues <- bplapply(unique(na.omit(group)), dataset=dataset,
rawCounts=rawCounts, rawOtherCounts=rawOtherCounts,
BPPARAM=parallel(dataset),
internBPPARAM=internBPPARAM,
FUN=.testPsiWithFisherPerGroup
)
names(pvalues) <- as.character(unique(na.omit(group)))
pvalues_full <- pvalues[as.character(group)]
# add NA's to the non tested ones
pvalues_full[is.na(group)] <- list(
rep(as.numeric(NA), length(pvalues[[1]]))
)
# transform it to a DataFrame and return it
return(.asDataFrame(pvalues_full, samples(dataset)))
}
#'
#' calculates the pvalue per group with fisher
#'
#' @noRd
.testPsiWithFisherPerGroup <- function(dataset, groupID, rawCounts,
rawOtherCounts, internBPPARAM){
# get group to test
group <- sampleGroup(dataset@settings)
group2Test <- group == groupID
group2Test[is.na(group2Test)] <- FALSE
fullFisherTable <- data.table(
TP=rowSums(rawCounts[ , group2Test,with=FALSE]),
FP=rowSums(rawCounts[ ,!group2Test,with=FALSE]),
FN=rowSums(rawOtherCounts[, group2Test,with=FALSE]),
TN=rowSums(rawOtherCounts[,!group2Test,with=FALSE])
)
# test only where at least the group has one read
fisherTableToTest <- fullFisherTable[TP+FN > 0]
pvalues <- unlist(bplapply(1:nrow(fisherTableToTest),
BPPARAM=internBPPARAM,
fisherTableToTest=fisherTableToTest,
FUN=function(idx, fisherTableToTest){
fisher.test(matrix(as.integer(
fisherTableToTest[idx]), nrow=2
))$p.value
}
))
# add NAs wher the test group did not had any read
fullFisherTable[,pvalue:=as.numeric(NA)]
fullFisherTable[TP+FN>0,pvalue:=pvalues]
return(fullFisherTable[,pvalue])
}
|
fc95f64b79ee3f8389967c72f135b83f977631a3 | 71129b1c03eed2abdd67fc2b52b57874bae49f45 | /collapsibleTree/tests/testthat/test-root.R | 95d06b7c83c0e7e6836924f67ee6a8f8802c01e1 | [] | no_license | Bastiaanspanjaard/LINNAEUS | 0eb880d8e581f870b58d69cea7060822baf8564a | 6c86288e8e684d77f5499249023e7157d0c440dc | refs/heads/master | 2022-11-13T10:48:40.584477 | 2020-07-03T14:56:07 | 2020-07-03T14:56:07 | 255,870,978 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,023 | r | test-root.R | library(collapsibleTree)
context("Root labelling")
test_that("unlabelled root works for collapsibleTree", {
wb <- collapsibleTree(warpbreaks, c("wool", "tension", "breaks"))
expect_is(wb,"htmlwidget")
expect_is(wb$x$data,"list")
expect_is(wb$x$options$hierarchy,"character")
})
test_that("unlabelled root works for collapsibleTreeSummary", {
wb <- collapsibleTreeSummary(warpbreaks, c("wool", "tension", "breaks"))
expect_is(wb,"htmlwidget")
expect_is(wb$x$data,"list")
expect_is(wb$x$options$hierarchy,"character")
})
test_that("labeled root works for collapsibleTree", {
wblabeled <- collapsibleTree(warpbreaks, c("wool", "tension", "breaks"), "a label")
expect_is(wblabeled$x$data,"list")
expect_is(wblabeled$x$options$hierarchy,"character")
})
test_that("labeled root works for collapsibleTreeSummary", {
wblabeled <- collapsibleTreeSummary(warpbreaks, c("wool", "tension", "breaks"), "a label")
expect_is(wblabeled$x$data,"list")
expect_is(wblabeled$x$options$hierarchy,"character")
})
|
7ab9dcb2eb91618bb64934736015cadbac3caa87 | 57b6bc2896092a29cbd829199359f96be3da7572 | /2019/test.R | 3af42ba4b83e9b46526cd55b7d70e76b6ab397df | [] | no_license | azambranog/hash_code | 1a6b43c8af3388df1bba1e4750e092825eca6886 | 0e0787d72712d7084bb8c397e3e70c372ee1e3f0 | refs/heads/master | 2022-03-13T15:20:22.978505 | 2022-02-24T21:39:54 | 2022-02-24T21:39:54 | 239,148,802 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 668 | r | test.R | source('format_as_tables.R')
library(parallel)
message('####', 'prepare data')
t <- system.time({
data <- format_as_table('b_lovely_landscapes.txt')
})
print(t)
for (cores in 10:1) {
message('##' , cores)
t <- system.time( {
all_tags <- mclapply(as.list(1:nrow(data)), function(i) {
x <- data.table(
slide = data[i, slide], tag = unlist(data[i, tags][[1]])
)
return(x)
}, mc.cores = cores)
all_tags <- rbindlist(all_tags)
})
print(t)
}
|
52dafb5284e7deb8c1cf633e82bfc3085c33be1f | e48dc77b5684cd3f1b2d2081808a93575692e89f | /week3.R | f123774f03250ada471f4b968b25d4dfd8ec4d37 | [] | no_license | mindmui/CleanData-Coursera | 7deabdc16e1468c7f22369fa909c7a6093d25b6e | 815023b244581128c80c334b55307a49c89f7e08 | refs/heads/master | 2021-01-10T06:35:10.347878 | 2016-02-26T17:58:50 | 2016-02-26T17:58:50 | 51,161,243 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,658 | r | week3.R | # Week 3 - Subsetting
# a quick review:
set.seed(12345)
x <- data.frame("var1"=sample(1:5),"var2"=sample(6:10),"var3"=sample(11:15))
x<- x[sample(1:5),]; x$var2[c(1,3)] = NA
#
x[x$var1<=3,] # subsetting where var1 is less than 3
# Dealing with missing values
x[which(x$var2 > 8),]
# sort
sort(x$var1, decreasing =TRUE, na.last = TRUE)
# ordering
x[order(x$var1),] # reorder the rows such that variable 1 is in increasing value
# ordering with plyr
install.packages("plyr")
library(plyr)
arrange(x,var1) # is the same as the above command
arrange(x,desc(var1)) # wrap with desc for descending order
# adding rows and columns
x$var4 <- rnorm(5)
x # added varialbe
y <- cbind(x,rnorm(5)) # bind to the right of x
y
# Summarizing data
# Setting up
setwd("/Users/Mind/Desktop/Cleandata-Coursera")
if(!file.exists("./data")){
dir.create("data")
}
fileUrl <- "http://data.baltimorecity.gov/api/views/k5ry-ef3g/rows.csv?accessType=DOWNLOAD"
download.file(fileUrl,destfile="./data/restaurants.csv",method="curl")
restData <- read.csv("./data/restaurants.csv")
# look at a bit of data
head(restData)
tail(restData, n=2) # shows bottom 2
summary(restData) # for qualitative shows the count
# for quantitative shows the distribution
str(restData)
quantile(restData$councilDistrict,na.rm=TRUE)
quantile(restData$councilDistrict,probs = c(0.5,0.75,0.9)) # different percentile
# make table
table(restData$zipCode,useNA = "ifany") # shows the missing value (if any)
# check for missing values
sum(is.na(restData$councilDistrict))
any(restData$zipCode > 0)
all(restData$zipCode > 0)
colSums(is.na(restData)) #none is any
# values with specific characteristics
table(restData$zipCode %in% c("21212","21213"))
# this shows how many row that zipCode is 21212 or 21213
# subset the data set
restData[restData$zipCode %in% c("21212","21213"),]
# Cross tabs
data(UCBAdmissions)
DF = as.data.frame(UCBAdmissions)
xt <- xtabs(Freq ~ Gender + Admit, data=DF ) # take data from DF, Freq is the data value
# Gender and Admit are the row and column field
# flat tables:
ftable()
# size of the data set
fakeData = rnorm(1e5)
object.size(fakeData) # shows the size (in bytes)
print(object.size(fakeData),units="Mb") # in Megabytes
# Create new variables:
# - missingness indicators
# - applying transformation
setwd("/Users/Mind/Desktop/Cleandata-Coursera")
if(!file.exists("./data")){
dir.create("data")
}
fileUrl <- "http://data.baltimorecity.gov/api/views/k5ry-ef3g/rows.csv?accessType=DOWNLOAD"
download.file(fileUrl,destfile="./data/restaurants.csv",method="curl")
restData <- read.csv("./data/restaurants.csv")
# create sequences
s1 <- seq(1,10,by=2) # tell min=1 , max=10, increasing by 2
s2 <- seq(1,10,length=3)
# create binary vairable:
restData$nearMe = restData$neighborhood %in% c("Roland Park","Home land")
restData$zipWrong = ifelse(restData$zipCode <0,TRUE,FALSE)
table(restData$zipWrong, restData$zipCode <0)
# create categorical variables
restData$zipGroups = cut(restData$zipCode, breaks= quantile(restData$zipCode))
table(restData$zipGroups)
table(restData$zipGroups,restData$zipCode)
### Easier cutting
library(Hmisc)
restData$zipGroups = cut2(restData$zipCode, g=4) # cut into 4 groups according to quantile
table(restData$zipGroups)
# create factor variables
restData$zcf <- factor(restData$zipCode)
class(restData$zcf)
# common quantitative transforms:
x <- 10
abs(x)
sqrt(x)
ceiling(x)
floor(x)
round(x, digits = 2) # round to 2 decimal places
signif(x, digits = 2)
cos(x) # or sin(x), ... etc.
log(x) # natural log
log2(x) # or log10(x)
exp(x)
# Reshaping the data
install.packages("reshape2")
library(reshape2)
head(mtcars)
# melting data frames
mtcars$carname <- rownames(mtcars)
# variable melt mpg and hp on the same column
carMelt <- melt(mtcars, id=c("carname","gear","cyl"),measure.vars = c("mpg","hp"))
head(carMelt)
tail(carMelt)
# Casting data frames
cylData <- dcast(carMelt,cyl~variable) # cyl is the row, variable is the column
cylData
# adding a function
cylData <- dcast(carMelt,cyl~variable,mean)
cylData
# resummarizing
# Averaging values
head(InsectSprays)
tapply(InsectSprays$count, InsectSprays$spray,sum)
# sum along the count for each spray
# or a nice way using plyr package
library(plyr)
ddply(InsectSprays,.(spray),summarize,sum=sum(count))
# Creating a new variable - sum
spraySums <- ddply(InsectSprays,.(spray),summarize,sum=ave(count, FUN=sum))
spraySums
# dplyr -- working with data frames in R
# basic assumptions
# one observation per row
# each column represents a variable or measure or characteristics
# dplyr verbs
# select
# filter
# arrange
# rename
# mutate
# summarize
# deplyr properties:
# note: first argument is always a data frame
# the result is a new data frame
# Managing data frames:
chicago <- readRDS("chicago.rds")
dim(chicago)
names(chicago)
head(select(chicago, city:dptp)) # very handy way to select only city ="dptp"
head(select(chicago, -(city:dptp))) # very handy way to select all cities except "dptp"
chicago.f <- filter(chicago, pm25 > 30) # filter data
chicago <- arrange(chicago, date) # arrange by the date
chicago <- rename(chicago, pm25 = pm25tmeans2) # change column name
# centralised the pm25 using mutate
chicago <- mutate(chicago, pm25detrend = pm25-mean(pm25)) # transform / create new variable using mutate
chicago <- mutate(chicago, year = as.POSIXlt(date)$year + 1900)
years <- group_by(chicago, year)
summarise(years, pm25 = mean(pm25, na.rm = TRUE))
# pipeline : %>%
# Merging data
# data from peer review experiments
reviews = read.csv("file1.csv")
solutions <- read.csv("file2.csv")
names(reviews) # explore the column to merges
names(solutions)
mergedData = merge(reviews,solutions,by.x="solution_id",by.y="id",all=TRUE)
# for reviews data frame use "solution_id"
# for solutions use "id" column
# all=TRUE implies add new row if doesn't exist
# Using join commands from dplyr packages*
join(df1,df2)
join_all(df1,df2,df3)
# the joining df MUST have the SAME column name! (drawback compared to merge)
# however, join is easier and faster when you have more than 1 data frame.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.