blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
78a93a0aaa63a2c0e84e493173b250de472bc420
|
2e8a0f83c5a27cfd1977eb7b94863d12bee7bc5f
|
/barplotPercentClasse.R
|
76a4030610c7335b58677a5547a6fb93d442bd2b
|
[] |
no_license
|
ABorrel/saltbridges
|
611036cfa101da4c0e390de3c12b9cae04e3b5b0
|
5b8a0bb15ab6876082891f2afc2d3ce0b4c03c7a
|
refs/heads/master
| 2020-06-17T21:51:16.843534
| 2016-11-28T10:54:37
| 2016-11-28T10:54:37
| 74,966,556
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,534
|
r
|
barplotPercentClasse.R
|
#!/usr/bin/env Rscript
source("tool.R")
source("AFC.R")
openFile = function(path, type, line){
file = paste(path,"proportionType", type, sep = "")
#print (file)
data = read.csv(file , sep = "\t", header = TRUE)
#print (data[line,])
return (data[line,])
}
frequencyMatrix = function (data){
nbCol = dim(data)[2]
nbLine = dim(data)[1]
for (i in 1:nbLine){
sumLine = sum(data[i,])
if(sumLine != 0){
for(j in 1:nbCol){
data[i,j] = data[i,j]/sumLine
}
}
}
return (data)
}
plotGlobal = function (data, distance, listType, path){
#print (data)
color = defColor(colnames(data))
legendHisto = listType
png(filename=paste(path,"porportionAllType",distance,".png",sep = ""),width=as.integer(600))
#par(mar = c(6,6,6,10))
par(xpd=T, mar=par()$mar+c(0,0,0,10))
barplot(t(data), ylim = c(0,1),main=paste("Space arround type structure\n",distance), ylab="Frequencies", xlab = "", col=color, space=0.6, cex.axis=1, las=2, names.arg=listType, cex=1, axes = TRUE)
legend("right",legend=colnames(data), fill=color,inset=c(-0.2,0))
dev.off()
}
pieType = function (d, path_out){
#print (data)
colors = defColor(colnames(d))
par (lwd = 1000)
png(filename=paste(path_out,".png",sep = ""),4000, 4000)
try(pie(as.double(d), col = colors, label = colnames(d), cex = 10))
dev.off()
svg(filename=paste(path_out,".svg",sep = ""))
try(pie(as.double(d), col = colors, label = colnames(d)))
dev.off()
}
#######################
# Main #
#######################
args <- commandArgs(TRUE)
pathData = args[1]
####Retrieve list distance####
#listType = c("Primary", "Secondary", "Tertiary", "Diamine", "Guanidium","Imidazole","Pyridine", "AcidCarboxylic", "Global")
listType = c("I", "II", "III", "GAI", "IMD", "COO", "Global")
list_distance = rownames(read.table(paste(pathData, "proportionType", listType[1], sep = "")))
i_line_distance = 0
print (list_distance)
for (distance in list_distance){
data = NULL
i_line_distance = i_line_distance + 1
for (type in listType){
#print (paste(type, i_line_distance))
d_temp = openFile(pathData, type,i_line_distance)
pieType (d_temp, paste(pathData, type, distance, sep = ""))
data = rbind(data,openFile(pathData, type,i_line_distance))
#print (data)
}
rownames (data) = listType
print (data)
write.csv (data, file = paste(pathData, distance ,"_contingence.csv", sep = ""))
AFC (data, pathData)
data = frequencyMatrix (data)
#print (data)
plotGlobal(data,distance, listType, pathData)
}
warnings()
|
7b23730a01e65740bcf89dad88391ccb218efdf1
|
268e84cedc0d48cc1b59d153ec093729b323cb90
|
/Exercise 2.R
|
e39a60131d2f55e2732bf3cae0ff38c650bbf43d
|
[] |
no_license
|
dashdalvi/Data-Analysis-Decision-Making-Coursework
|
33e738e06d5a73956310dc2ae40f8651cd26c486
|
1a81c5d967833316010079d830d9652301fc8666
|
refs/heads/master
| 2020-04-24T12:46:21.403455
| 2019-02-22T00:24:06
| 2019-02-22T00:24:06
| 171,966,048
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,075
|
r
|
Exercise 2.R
|
#Exercise 2
library(ggplot2)
library(grid)
library(gridExtra)
# Set the state for random number generation in R
set.seed(123)
# Pick the number of observations
n <- 100
# Pick the values for the intercept and the slope
beta0 <- 10
beta1 <- 2
# Assume the error has a normal distribution
# Pick the mean and standard deviation
mu <- 0
sigma1 <- 2.7
# Pick the errors
err1 <- rnorm(n, mean = mu, sd = sigma1)
# Pick the observed inputs
x <- 1:n/(n+1)
# Generate the observed outputs
yobs1 <- beta0 + beta1*x + err1
# Repeat with a different error standard deviation
sigma2 <- 0.7
err2 <- rnorm(n, mean = mu, sd = sigma2)
yobs2 <- beta0 + beta1*x + err2
# 3rd Graph
sigma3 <- 0.27
err3 <- rnorm(n, mean = mu, sd = sigma3)
yobs3 <- beta0 + beta1*x + err3
my_data <- data.frame(x, yobs1, yobs2, yobs3)
#Plotting Graphs
p1 <- qplot(x, yobs1) +
geom_abline(intercept = beta0, slope = beta1)
p2 <- qplot(x, yobs2) +
geom_abline(intercept = beta0, slope = beta1)
p3 <- qplot(x, yobs3) +
geom_abline(intercept = beta0, slope = beta1)
# 4th Graph
x1 <- seq(10,80,70/(n-1))
sigma4 <- 2.7
err4 <- rnorm(n, mean = mu, sd = sigma4)
yobs4 <- beta0 + beta1*x1 + err4
my_data <- data.frame(x1, yobs1, yobs2, yobs3, yobs4)
library(grid)
library(gridExtra)
p4 <- qplot(x1, yobs4) +
geom_abline(intercept = beta0, slope = beta1)
grid.arrange(p1, p2, p3, p4,
ncol = 4,
top = "Plots with different error variances")
# OBSERVATIONS
# First Graph
# 1. In the first graph we have taken standard deviation of 2.7, which is approximately between 95%-99.7% area of the bell curve.
# 2. So when the Error is calculated by random normalizing the number of observations, mean and standard deviation. We get alot of scattered errors.
# 3. Based on the standard linear model equation when the observations are generated we find alot of scattered values.
# 4. So when the graph is plotted we can see that alot of observations are scattered above and below the expected line.
#Second Graph
# 5. Same way when we plot the second graph by changing the standard deviation to 0.7 which inturn reduced the Error.
# 6. Thats why we see a difference of expected value line and also the observations are less scattered compared to the first graph.
# Third Graph
# 7. In a similar way we plot the third graph by changing the standard deviation to 0.27, thus it reduces the error even more.
# 8. That is why the points are less scattered and are close to the expected value line.
# Fourth Graph
# 9. In fourth graph the x coordinate is modified by assigning 100 random variables between 10-80
# 10.Increasing the x variable results in y coordinate scale being increased when compared to previous three graphs
# 11. Interesting observation in this graph is even though the standard deviation is 2.7 which is higher error margin, all the points are lying on the expected value line
# 12. This is because all the values are lying between 95-99% and thus points are on the line
|
330b666b08b71639bbe39dd71b3821c89ada716c
|
d43ac1bb61f96e970fb96d1f52cd2afef79b73be
|
/man/users.Rd
|
0dbf4a69cc4a6af8a08ae8691b6563bf90248eeb
|
[] |
no_license
|
debruine/demopsydata
|
faf3079561bbb7d36964e84a1257485ca618d8d8
|
1b5d2a179edd873162a54dec11faff2163afe6c0
|
refs/heads/master
| 2021-04-06T03:57:48.554015
| 2018-03-08T16:32:41
| 2018-03-08T16:32:41
| 124,418,673
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 484
|
rd
|
users.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{users}
\alias{users}
\title{Participant data.}
\format{A data frame with 53999 observations and 3 variables:
\code{user_id}, \code{sex}, and \code{birthday}.}
\usage{
users
}
\description{
A dataset containing the user_id, sex and birthdate of participants in the
\code{disgust}, \code{senseek}, and \code{motivation} datasets.
Join using \code{user_id}.
}
\keyword{datasets}
|
283f7c58e135cb192978e9163141dd7ba716478e
|
357c61695c0b2885916745226b5d0dc7408766c0
|
/BAMMtools/man/testTimeVariableBranches.Rd
|
88c960355462af86c287d7f8af88c4c72a805ff4
|
[] |
no_license
|
macroevolution/bammtools
|
62b4f9c6dd20ea37d1df6b7dd75d10967a8f3e75
|
07a17d8260a9e17419ca4bbc27687b4b6a7164be
|
refs/heads/master
| 2022-11-22T15:11:11.336582
| 2022-11-11T17:08:43
| 2022-11-11T17:08:43
| 17,520,404
| 7
| 7
| null | 2016-05-05T21:09:28
| 2014-03-07T16:42:06
|
R
|
UTF-8
|
R
| false
| true
| 5,228
|
rd
|
testTimeVariableBranches.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/testTimeVariableBranches.R
\name{testTimeVariableBranches}
\alias{testTimeVariableBranches}
\title{Evaluate evidence for temporal rate variation across tree}
\usage{
testTimeVariableBranches(ephy, prior_tv = 0.5, return.type = "posterior")
}
\arguments{
\item{ephy}{An object of class \code{bammdata}.}
\item{prior_tv}{The prior probability that rate shifts lead to a new
time-varying rate process (versus a time-constant process).}
\item{return.type}{Either \code{"posterior"} or \code{"bayesfactor"},
depending on which form of evidence you would like.}
}
\value{
An object of class \code{phylo}, but where branch lengths are
replaced with the desired evidence (posterior probability or Bayes
factor) that each branch is governed by a time-varying rate dynamic.
}
\description{
For each branch in a phylogenetic tree, evaluates the
evidence (posterior probability or Bayes factor) that
macroevolutionary rates have varied through time.
}
\details{
In \code{BAMM 2.0}, rate shifts on trees can lead to time-varying
or constant-rate diversification processes. In other words, the model
will incorporate temporal variation in rates only if there is
sufficient evidence in the data to favor it. The function
\code{testTimeVariableBranches} enables the user to extract the
evidence in favor of time-varying rates on any branch of a
phylogenetic tree from a \code{bammdata} object.
The function returns a copy of the original phylogenetic tree, but
where branch lengths have been replaced by either the posterior
probability (\code{return.type = "posterior"}) or the Bayes factor
evidence (\code{return.type = "bayesfactor"}) that the
macroevolutionary rate regime governing each branch is time-variable.
Consider a particular branch X on a phylogenetic tree. If the length
of this branch is 0.97 and \code{return.type = "posterior"}, this
implies that branch X was governed by a time-varying rate dynamic in
97\% of all samples in the posterior. Alternatively, only 3\% of
samples specified a constant rate dynamic on this branch.
The function also provides an alternative measure of support if
\code{return.type = "bayesfactor"}. In this case, the Bayes factor
evidence for temporal rate variation is computed for each branch. We
simply imagine that diversification rates on each branch can be
explained by one of two models: either rates vary through time, or
they do not. In the above example (branch X), the Bayes factor would
be computed as follows, letting \emph{Prob_timevar} and
\emph{Prior_timevar} be the posterior and prior probabilities that a
particular branch is governed by a time-varying rate process:
\emph{( Prob_timevar) / (1 - Prob_timevar)} * \emph{ (1 -
prior_timevar) / (prior_timevar) }
The Bayes factor is not particularly useful under uniform prior odds
(e.g., \code{prior_tv = 0.5}), since this simply reduces to the ratio
of posterior probabilities. Note that the prior must correspond to
whatever you used to analyze your data in \code{BAMM}. By default,
time-variable and time-constant processes are assumed to have equal
prior odds.
This function can be used several ways, but this function allows the
user to quickly evaluate which portions of a phylogenetic tree have
"significant" evidence for rate variation through time (see Examples
below).
}
\examples{
# Load whale data:
data(whales, events.whales)
ed <- getEventData(whales, events.whales, burnin=0.1, nsamples=200)
# compute the posterior probability of
# time-varying rates on each branch
tree.pp <- testTimeVariableBranches(ed)
# Plot tree, but color all branches where the posterior
# probability of time-varying rates exceeds 95\\%:
colvec <- rep("black", nrow(whales$edge))
colvec[tree.pp$edge.length >= 0.95] <- 'red'
plot.phylo(whales, edge.color=colvec, cex=0.5)
# now, compute Bayes factors for each branch:
tree.bf <- testTimeVariableBranches(ed, return.type = "bayesfactor")
# now, assume that our prior was heavily stacked in favor
# of a time-constant process:
tree.bf2 <- testTimeVariableBranches(ed, prior_tv = 0.1,
return.type = "bayesfactor")
# Plotting the branch-specific Bayes factors against each other:
plot.new()
par(mar=c(5,5,1,1))
plot.window(xlim=c(0, 260), ylim=c(0, 260))
points(tree.bf2$edge.length, tree.bf$edge.length, pch=21, bg='red',
cex=1.5)
axis(1)
axis(2, las=1)
mtext(side=1, text="Bayes factor: prior_tv = 0.1", line=3, cex=1.5)
mtext(side = 2, text = "Bayes factor: uniform prior odds", line=3,
cex=1.5)
# and you can see that if your prior favors CONSTANT RATE dynamics
# you will obtain much stronger Bayes factor support for time varying
# rates.
# IF the evidence is present in your data to support time variation.
# To be clear, the Bayes factors in this example were computed from the
# same posterior probabilities: it is only the prior odds that differed.
}
\references{
\url{http://bamm-project.org/}
}
\seealso{
\code{\link{getRateThroughTimeMatrix}}
}
\author{
Dan Rabosky
}
|
01833603ea95c4e8518e82702eee17b092830169
|
c3eb3703ce4dd401cc37b112f751aa49b6b1e0ef
|
/man/wiggleplotr.Rd
|
ac281a11e892b9cf17ffb13737ce895c5f83b329
|
[
"Apache-2.0"
] |
permissive
|
js29/wiggleplotr
|
6a2b17e3e424d9bae0f06dbb13807a3b8fe0e4e0
|
24b5a5af6ade2bf6ff91903f4496ea3d49d50f98
|
refs/heads/master
| 2020-12-24T23:55:05.900753
| 2016-09-08T12:12:51
| 2016-09-08T12:12:51
| 67,697,987
| 0
| 0
| null | 2016-09-08T11:40:17
| 2016-09-08T11:40:17
| null |
UTF-8
|
R
| false
| true
| 236
|
rd
|
wiggleplotr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wiggleplotr-package.r
\docType{package}
\name{wiggleplotr}
\alias{wiggleplotr}
\alias{wiggleplotr-package}
\title{wiggleplotr.}
\description{
wiggleplotr.
}
|
b177b9c070b4887430176f5df8796a1e8c2f6177
|
1017ba10076f8b16baf35156427ab046d7077743
|
/Analysis/toy_model.R
|
18526f7c09a1e425713f4a6ad69e7abac8e4a7c4
|
[] |
no_license
|
sumitsrv/metaphors
|
254bfb1ca404e2160b983b9117e4c38e28a037a0
|
e2abdc134a035844cf7f4f03ec035c10d7998639
|
refs/heads/master
| 2021-05-19T12:58:34.629277
| 2016-07-25T15:34:53
| 2016-07-25T15:34:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,406
|
r
|
toy_model.R
|
d1 <- read.csv("../Model/Output/lion-1feature.csv", header=FALSE)
colnames(d1) <- c("category", "feature1", "prob")
ggplot(d1, aes(x=category, y=prob, fill=feature1)) +
geom_bar(stat="identity", color="black", position=position_dodge()) +
theme_bw() +
scale_fill_brewer(palette="Accent")
d2 <- read.csv("../Model/Output/lion-2feature.csv", header=FALSE)
colnames(d2) <- c("category", "feature1", "feature2", "prob")
feature2 <- aggregate(data=d2, prob ~ category + feature2, FUN=sum)
ggplot(d2, aes(x=category, y=prob, fill=feature2)) +
geom_bar(stat="identity", color="black", position=position_dodge()) +
theme_bw() +
facet_grid(.~feature1) +
scale_fill_manual(values=c("gray", "white"))
d3 <- read.csv("../Model/Output/lion-2feature.csv", header=FALSE)
colnames(d3) <- c("category", "feature1", "feature2", "prob")
d3$features <- paste(d3$feature1, d3$feature2)
ggplot(d3, aes(x=category, y=prob, fill=features)) +
geom_bar(stat="identity", color="black", position=position_dodge()) +
theme_bw() +
scale_fill_brewer(palette="Accent")
d.prior <- read.csv("../Model/Output/animal-test-prior.csv", header=FALSE)
colnames(d.prior) <- c("category", "feature1", "feature2", "feature3", "prob")
d.prior$feature1 <- factor(d.prior$feature1, labels=c("small", "big"))
d.prior$feature2 <- factor(d.prior$feature2, labels=c("weak", "strong"))
d.prior$feature3 <- factor(d.prior$feature3, labels=c("tame", "wild"))
d.prior$features <- paste(d.prior$feature1, d.prior$feature2, d.prior$feature3, sep="\n")
d.prior.person <- subset(d.prior, category=="person")
d.prior.animal <- subset(d.prior, category=="animal")
ggplot(d.prior.person, aes(x=features, y=prob, fill=feature1)) +
geom_bar(stat="identity", color="black", position=position_dodge()) +
theme_bw() +
scale_fill_brewer(palette="Accent") +
xlab("")
d4 <- read.csv("../Model/Output/animal-test.csv", header=FALSE)
colnames(d4) <- c("category", "feature1", "feature2", "feature3", "prob")
d4$feature1 <- factor(d4$feature1, labels=c("small", "big"))
d4$feature2 <- factor(d4$feature2, labels=c("weak", "strong"))
d4$feature3 <- factor(d4$feature3, labels=c("tame", "wild"))
d4$features <- paste(d4$feature1, d4$feature2, d4$feature3, sep="\n")
d4.person <- subset(d4, category=="person")
d4.animal <- subset(d4, category=="animal")
ggplot(d4.person, aes(x=features, y=prob, fill=feature1)) +
geom_bar(stat="identity", color="black", position=position_dodge()) +
theme_bw() +
scale_fill_brewer(palette="Accent") +
xlab("")
d5 <- read.csv("../Model/Output/animal-test-qud.csv", header=FALSE)
colnames(d5) <- c("category", "feature1", "feature2", "feature3", "prob")
d5$feature1 <- factor(d5$feature1, labels=c("small", "big"))
d5$feature2 <- factor(d5$feature2, labels=c("weak", "strong"))
d5$feature3 <- factor(d5$feature3, labels=c("tame", "wild"))
d5$features <- paste(d4$feature1, d5$feature2, d5$feature3, sep="\n")
d5.person <- subset(d5, category=="person")
d5.animal <- subset(d5, category=="animal")
ggplot(d5.person, aes(x=features, y=prob, fill=feature1)) +
geom_bar(stat="identity", color="black", position=position_dodge()) +
theme_bw() +
scale_fill_brewer(palette="Accent") +
xlab("")
## combine prior, metaphor, and qud+metaphor
d.prior.person$type <- "prior"
d4.person$type <- "'What is he like?'\n'He is a buffalo.'"
d5.person$type <- "'Is he big?'\n'He is a buffalo.'"
d.prior.animal$type <- "prior"
d4.animal$type <- "'What is he like?'\n'He is a buffalo.'"
d5.animal$type <- "'Is he big?'\n'He is a buffalo.'"
d.comp.person <- rbind(d.prior.person, d4.person, d5.person)
d.comp.person$type <- factor(d.comp.person$type, levels=c("prior", "'What is he like?'\n'He is a buffalo.'", "'Is he big?'\n'He is a buffalo.'"))
ggplot(d.comp.person, aes(x=features, y=prob, fill=feature1)) +
geom_bar(stat="identity", color="black", position=position_dodge()) +
theme_bw() +
facet_grid(.~type) +
scale_fill_brewer(palette="Accent") +
xlab("")
d.comp.animal <- rbind(d.prior.animal, d4.animal, d5.animal)
d.comp.animal$type <- factor(d.comp.animal$type, levels=c("prior", "'What is he like?'\n'He is a buffalo.'", "'Is he big?'\n'He is a buffalo.'"))
ggplot(d.comp.animal, aes(x=features, y=prob, fill=feature1)) +
geom_bar(stat="identity", color="black", position=position_dodge()) +
theme_bw() +
facet_grid(.~type) +
scale_fill_brewer(palette="Accent") +
xlab("")
|
d2b1aae5133a4412b8c62890f57a1b3f07f4e959
|
a83cf31dc97cb34639e9962e43cc8e40dd79dfa3
|
/bobby1.R
|
35b55843ec00287376f10f1525e78e7f4d53afec
|
[] |
no_license
|
jnownes/ds202_project
|
9cf0d5c78701ad4ed428b8c34654d003d96f2059
|
f7b4ec97a794d9f7036e7df45b3df00e6f4c8c13
|
refs/heads/master
| 2021-05-23T16:57:45.958877
| 2020-05-05T16:59:22
| 2020-05-05T16:59:22
| 253,391,008
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 954
|
r
|
bobby1.R
|
library(lubridate)
library(dplyr)
library(ggplot2)
library(plotly)
dat = read.csv("us_states_covid19_daily.csv", stringsAsFactors = FALSE)
#View(dat)
str(dat)
dat$date = ymd(dat$date)
trend <- dat %>%
group_by(date)
ggplot(trend,aes(x= date,y = positiveIncrease,fill = state)) +geom_bar(stat= 'identity') +xlab("Date") +ylab("Cases")+
ggtitle("New reported cases by day in the United States") +theme(legend.position = 'none')
ggplot(trend,aes(x= date,y = deathIncrease,fill = state)) +geom_bar(stat= 'identity') +ylim(0,3000)+xlab("Date") +ylab("Deaths")+
ggtitle("New reported deaths by day in the United States") +theme(legend.position = 'none')
ggplot(trend,aes(x= date,y = positive,fill = state)) +geom_bar(stat= 'identity') +xlab("Date") +ylab("Total Cases")+
ggtitle("Total Cases by day in the United States") +theme(legend.position = 'none')
ggplot(trend,aes(x= date,y = positive,fill = state)) +geom_point()
|
dd0038cad0af312afe17744150dfdccfc70997ee
|
fa31080db4ae795e124286a54da1468428cde439
|
/man/fracMake.Rd
|
806f816f6493298180b2946286a4aca5c3396107
|
[] |
no_license
|
cran/Rfractran
|
59d2d46f11d1ed911a87001300c71a0adab2c621
|
271ea36f804b3b09f0e0dc3208eef3fc1f5893f0
|
refs/heads/master
| 2022-11-09T23:51:48.719714
| 2020-06-25T15:20:11
| 2020-06-25T15:20:11
| 276,704,632
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 796
|
rd
|
fracMake.Rd
|
\name{fracMake}
\alias{fracMake}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Function to Create a Sequence of \code{bigq} Fractions
}
\description{
Feed this function a collection of numerator and denominator values;
get a vector of \code{bigq} fractions suitable for use as a FRACTRAN program.
}
\usage{
fracMake(n, d = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{n}{
Vector of integers to be used as numerators, or a Nx2 array of integers
where each row contains a num, denom pair. If this is an arry, the input
\code{d} is ignored.
}
\item{d}{
Vector of integers to be used as denominators. Ignored if \code{n} is an array
}
}
\value{
Vector of \code{bigq} fractions.
}
\author{
Carl Witthoft <carl@witthoft.com>
}
|
1bba0047efcbf7a870a98297551710399cd6f32d
|
ce7a1122d6b1e21733f6ac0ae572336c7063134f
|
/Capstone_Two_Script.r
|
234eafe011c0ab474da1a49964dc542e291228f0
|
[] |
no_license
|
Thom-J-H/Capstone2_Harvard_edX
|
88700ace0cc58e6f0d2cd80ddff8f8a944354b57
|
0cad8d90843254a78b162998c16e2510b8be7924
|
refs/heads/master
| 2020-04-28T07:31:52.205830
| 2019-11-19T02:04:45
| 2019-11-19T02:04:45
| 175,096,029
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 52,178
|
r
|
Capstone_Two_Script.r
|
# Capstone Project Two: 21 models tested on the WDBC data
# R Script: All Code in RMD and more
# Submitted on 2019-03-12, by Thomas J. Haslam
# In partial fulfillment of the requirements for the Harvard edX:
# Data Science Professional Certificate
# Revised (after graded) on 2019-03-14, based on peer-review suggestions.
# Thank you, reviewers.
# Data Wrangle_EDA_Prep
## Libraries
# For RMD use this instead of standard library()
if (!require(tidyverse)) install.packages("tidyverse",
repos = "http://cran.us.r-project.org")
if (!require(caret)) install.packages("caret",
repos = "http://cran.us.r-project.org")
if (!require(matrixStats)) install.packages("matrixStats",
repos = "http://cran.us.r-project.org")
if (!require(readr)) install.packages("readr",
repos = "http://cran.us.r-project.org")
if (!require(cluster)) install.packages("cluster",
repos = "http://cran.us.r-project.org")
if (!require(fpc)) install.packages("fpc",
repos = "http://cran.us.r-project.org")
if (!require(utils)) install.packages("utils",
repos = "http://cran.us.r-project.org")
#library(tidyverse)
#library(caret)
#library(readr)
#library(matrixStats)
#library(utils)
options(scipen = 999) # no natural log, please
skim_glim <- function(df) {
tibble::glimpse(df)
skimr::skim(df)
}
#######################################################################################
# Data Import & Wrangle(1)
# First Problem: Set Data Variable Ids (column names) deriveed from wdbc.names.txt
name_cols <- c("id","diagnosis","radius_mean","texture_mean",
"perimeter_mean","area_mean","smoothness_mean",
"compactness_mean","concavity_mean","concave_points_mean",
"symmetry_mean","fractal_dimension_mean","radius_se","texture_se",
"perimeter_se","area_se","smoothness_se","compactness_se",
"concavity_se","concave_points_se","symmetry_se","fractal_dimension_se",
"radius_worst","texture_worst","perimeter_worst",
"area_worst","smoothness_worst","compactness_worst",
"concavity_worst","concave_points_worst","symmetry_worst",
"fractal_dimension_worst")
# Read in UIC data with column names
wdbc_data <- read_csv("wdbc.data.csv", col_names = name_cols )
str(wdbc_data) # Check import
## NOTE: for RMD
# Read in UIC data with column names
#csv_url <- "https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data"
#wdbc_data <- read_csv(csv_url, col_names = name_cols)
# as.factor and set levels
wdbc_data <- mutate_if(wdbc_data, is.character, as.factor) %>%
mutate_at("diagnosis", factor, levels = c("M", "B")) # Set malignant as POSITIVE
str(wdbc_data) # Check result
skim_glim(wdbc_data) # Quick EDA overview
#
# All good on data import: note that skim does not display properly in RMD
#
###############################################################################
# ML EDA, using old school and Tidyverse methods
#
table(wdbc_data$diagnosis) #check distribution: confirm M/B ratio 212/357
# Convert wdbc_data to matrix
wdbc_mx <- as.matrix(wdbc_data[, 3:32]) # remove id & diagnosis
# Set the row names
row.names(wdbc_mx) <- wdbc_data$id
#
# Ways of checking data varability
#
colMeans2(wdbc_mx) # old school
colSds(wdbc_mx) # old school
apply(wdbc_mx, 2, mean) # old school
apply(wdbc_mx, 2, sd) # old school
# Recapture as df using Tidyverse
tidy_2 <- bind_cols(enframe(names(wdbc_data[, 3:32]),
name = NULL, value = "Variable"),
enframe(colMeans2(wdbc_mx),
name = NULL, value = "Avg") ,
enframe(colSds(wdbc_mx),
name = NULL, value = "SD"),
enframe(colMins(wdbc_mx),
name = NULL, value = "Min"),
enframe(colMaxs(wdbc_mx),
name = NULL, value = "Max"),
enframe(colMedians(wdbc_mx),
name = NULL, value = "Median"))
tidy_2 %>% head() %>%
knitr::kable(caption = "wdbc: Summary Stats [first 6 variables shown] ")
# Summarise the summary table -- another Tidyverse advantage
tidy_2 %>%
summarize(avg_mean = mean(Avg), sd_mean = sd(Avg),
min_mean = min(Avg), max_mean = max(Avg),
avg_sd = mean(SD), sd_sd = sd(SD),
min_sd = min(SD), max_sd = max(SD)) %>%
knitr::kable(caption = "wdbc: Range of mean & sd values")
# Strong case for centering and scaling the data
####################
# Explore centering and scaling data
true_values <- as.factor(wdbc_data$diagnosis) %>%
relevel("M") %>% set_names(wdbc_data$id) # Set malignant as POSITIVE
#check for `id` match
head(true_values)
t(wdbc_mx[1:6, 1:2]) # all good
##
## Unsupervised Learning for EDA
##
library(cluster)
library(fpc)
## We know that we have B M 357 212.
# The task of kmeans here, unsupervised learning, is just to identify which belong to which centers.
# Unscaled
set.seed(2019)
unscaled_K <- kmeans(wdbc_mx, centers = 2, nstart = 20)
table(unscaled_K$cluster) #also unscaled_K$size
# Viz
plotcluster(wdbc_mx, unscaled_K$cluster, main = "Unscaled K Results",
ylab = "", xlab = "Cluster 1: 131 assigned; Cluster 2: 438")
# Center and Scale
wdbc_mx_sc <- scale(sweep(wdbc_mx, 2, colMeans(wdbc_mx))) # center & scale
set.seed(2019)
scaled_K <- kmeans(wdbc_mx_sc, centers = 2, nstart = 20)
table(scaled_K$cluster) #also scaled_K$size
# Viz
plotcluster(wdbc_mx, scaled_K$cluster,
main = "Centered & Scaled K Results",
ylab = "", xlab = "Cluster1: 380 assigned; Cluster 2: 189")
## Test Assumptions about Clusters per model
# unscaled
unscaled_k_pred <- if_else(unscaled_K$cluster == 1, "M", "B") %>%
as.factor() %>% relevel("M") %>% set_names(wdbc_data$id)
rbind(unscaled_k_pred[60:64], true_values[60:64]) # check
mean(unscaled_k_pred[60:79] == true_values[60:79]) # check
# scaled
scaled_k_pred <- if_else(scaled_K$cluster == 1, "B", "M") %>%
as.factor() %>% relevel("M") %>% set_names(wdbc_data$id)
rbind(scaled_k_pred[60:64], true_values[60:64]) # check
mean(scaled_k_pred[60:79] == true_values[60:79]) # check
ID_check <- rbind(
rbind("True_Values" = true_values[90:97]),
rbind("Unscaled_K" = unscaled_k_pred[90:97]),
rbind("Scaled_K" = scaled_k_pred[90:97])
) # for RMD
ID_check %>%
knitr::kable(caption = "IDs and Indexs Match: sample[90:97]" )
## Confusion Matrix Test
cfm_unscaled_k <- confusionMatrix(unscaled_k_pred, true_values)
cfm_unscaled_k #check in console -- not raw print to RMD
cfm_scaled_k <- confusionMatrix(scaled_k_pred, true_values)
cfm_scaled_k #check
# key values as table output
key_values_K_cluster <- bind_cols(
enframe(cfm_unscaled_k$overall["Accuracy"], name = NULL, value = "unK_Acc" ),
enframe(cfm_unscaled_k$byClass["F1"], name = NULL, value = "unK_F1" ) ,
enframe(cfm_scaled_k$overall["Accuracy"], name = NULL, value = "scalK_Acc " ),
enframe(cfm_scaled_k$byClass["F1"], name = NULL, value = "scalK_F1" ) ) %>%
knitr::kable(caption = "Unscaled and Scaled K: Accuracy and F Measure results")
key_values_K_cluster # print out in RMD
# quick and dirty comp table
dirty_comp_table <- cbind(cbind(TrV = table(true_values)),
cfm_unscaled_k$table,
cfm_scaled_k$table) %>%
knitr::kable(caption = "L-R: True Values, Unscaled K, Scaled K" )
dirty_comp_table #print out
# Raw vs. C-S: Clear example of preprocessing effect
t(wdbc_mx)[1:8, 1:5] %>%
as.data.frame() %>%
rownames_to_column("Variable") %>%
knitr::kable(caption = "Raw Data: wdbc_mx: First 8 Vars. for 5 Cases")
t(wdbc_mx_sc)[1:8, 1:5] %>%
as.data.frame() %>%
rownames_to_column("Variable") %>%
knitr::kable(caption = "C-S Prep: wdbc_mx_sc: First 8 Vars. for 5 Cases")
# Centering and scaling the data shows
# a meaningful improvement in Accuaracy and F Measure.
#The improvement in predicting malignant cancer cells
#is particularly important for this ML task.
#We know both from the kmeans model test above, and from the professional
#literature, that many ML alogorithms will perform better with centered & scaled data.
#So our first preprocessing prepartion will be to center and scale the data.
# Next question. Would the data set benefit from PCAA/
diagno <- as.numeric(wdbc_data$diagnosis == "M") # for plotting
wdbc_PCA <- prcomp(wdbc_mx, center = TRUE, scale = TRUE)
importance_df <- data.frame(Sum_Exp = summary(wdbc_PCA)$importance[3,]) %>%
rownames_to_column("PCA") # Cumulative Proportion
PCA_sum <- summary(wdbc_PCA) # PCA list: SD, Prop Var., Cum Prop.
# PCA_sum$importance[3,] == summary(wdbc_PCA)$importance[3,]
biplot(wdbc_PCA, cex = 0.45) # explain in RMD
plot_PCA1_2 <- data.frame(PC1 = wdbc_PCA$x[,1], PC2 = wdbc_PCA$x[,2],
label = factor(wdbc_data$diagnosis )) %>%
ggplot(aes(PC1, PC2, fill = label)) +
geom_point(cex = 3, pch = 21) +
labs(fill = "Class", title = "True Value Groupings: PC1 / PC2",
subtitle = "63% of variance explained") +
theme(legend.position = c(0.88, 0.14)) # inside graph blank area
plot_PCA4_5 <- data.frame(PC4 = wdbc_PCA$x[,4], PC5 = wdbc_PCA$x[,5],
label = factor(wdbc_data$diagnosis )) %>%
ggplot(aes(PC4, PC5, fill = label)) +
geom_point(cex = 3, pch = 21) +
labs(fill = "Class", title = "True Value Groupings: PC4 / PC5",
subtitle = "12% of variance explained") +
theme(legend.position = c(0.88, 0.14)) # as plot_PCA1_2
#########
# http://www.cookbook-r.com/Graphs/Multiple_graphs_on_one_page_(ggplot2)/
# Winston Chang
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots == 1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
multiplot(plot_PCA1_2,plot_PCA4_5 , cols = 2)
#########
PCA_sum[["importance"]][, 1:5] %>%
as.data.frame() %>%
rownames_to_column("Results") %>%
knitr::kable(caption = "PCA Summary: First 5 Components")
wdbc_PCA$rotation[1:8, 1:5] %>%
as.data.frame() %>%
rownames_to_column("Variable") %>%
knitr::kable(caption = "PCA Matrix: First 8 Variables for First 5 PC")
graph_PCA <- importance_df[1:12, ] %>%
ggplot( aes(reorder(PCA, Sum_Exp), Sum_Exp)) +
geom_point() +
labs(title = "PCA Results: 10 components explain over 95% variance",
x = "Principal Component Analysis: 1-12 of 30" ,
y = "Variance Explained", subtitle = "WDBC (Wisconsin Diagnostic Breast Cancer) data set") +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
geom_hline(aes(yintercept = 0.95), color = "orange", linetype = 2)
graph_PCA
nearZeroVar(wdbc_data[, 3:32])
## Strong case for PCA; not so for nzv, but include as formality
# standard stack: nzv, center, scale, pca OR zv, center, scale, pca
#
# END EDA on to modelling
#
# Create first stage train and test sets 50% for model training; 50% for testing
# Second stage: 82% train; 18 % test
############################# 50/50 train/test split
Y <- wdbc_data$diagnosis
set.seed(2019)
test_index <- createDataPartition(Y, times = 1, p = 0.5, list = FALSE)
# Apply index
test_set_id <- wdbc_data[test_index, ] # will use ID later
train_set_id <- wdbc_data[-test_index, ]
# Remove id for testing
test_set <- test_set_id %>% select(-id)
train_set <- train_set_id %>% select(-id)
############################# 82/18 train/test split
(test_ratio <- 1/sqrt(30)) # round down to 18%
# Which models have an ML advantage?
set.seed(2019)
test_index2 <- createDataPartition(Y, times = 1, p = 0.18, list = FALSE)
# Apply index
test2_id <- wdbc_data[test_index2, ]
train2_id <- wdbc_data[-test_index2, ]
# Remove id variable
test_2 <- test2_id %>% select(-id)
train_2 <- train2_id %>% select(-id)
######## trainControl and Data Preparations for all models
## NOTE: To have reproducible results for ensemble modeling, must use seeds argument in trainControl
set.seed(2019)
seeds <- vector(mode = "list", length = 1000)
for (i in 1:1000) seeds[[i]] <- sample.int(1000, 800)
### Re-usable trainControl for consistent comparison across models
#
### Does NOT change: same for all models
myControl <- trainControl(
method = "cv", number = 10,
summaryFunction = twoClassSummary,
classProbs = TRUE, # IMPORTANT!
verboseIter = TRUE,
seeds = seeds
)
##
# Experiement with preprocessing: one NULL, two typical
##
prep_0 <- NULL
prep_1 <- c("center", "scale")
prep_2 <- c("nzv", "center", "scale", "pca")
###
## Select Models for Ensemble: 21
###
models <- c("adaboost", "avNNet", "gamboost", "gamLoess", "glm",
"gbm", "knn", "kknn", "lda", "mlp", "monmlp", "naive_bayes",
"qda", "ranger", "Rborist", "rf", "rpart", "svmLinear", "svmRadial",
"svmRadialCost", "svmRadialSigma")
mod_names <- enframe(models, value = "Model", name = NULL)
###############################################################################
## SAVE POINT:
## NEXT: Modelling and Results
###############################################################################
#
#
######################################## First Run : 50/50 ############################
set.seed(2019)
garbage_0 <- capture.output(
fits_0 <- lapply(models, function(model){
print(model)
train(diagnosis ~ ., data = train_set, method = model,
trControl = myControl, preProcess = prep_0)
})
) # NOTE: this will capture the usual console output for train
# to see train at work REMOVE the function capture.output()
# but for the RMD report, this would result in over 800 pages!
# setting "verbose = FALSE" or "trace = FALSE" will NOT work --
# these options are only supported by some models, will crash others
names(fits_0) <- models
# Predictions 0
predictions_0 <- sapply(fits_0, function(object)
predict(object, newdata = test_set))
# Predictions for CFM & F Measure
pred_ft_0 <- predictions_0 %>% as.data.frame() %>%
mutate_if(., is.character, as.factor) %>%
mutate_all(., factor, levels = c("M", "B")) # Set malignant as POSITIVE
# Confusion Matrix for Prep_0
CFM_Prep_0 <- sapply(pred_ft_0 , function(object) {
CFM <- confusionMatrix(data = object, reference = test_set$diagnosis)
list(CFM)
})
########### Quick and Dirty extract for all CFM Lists!
ACC_dex <- c(6, 30, 54, 78, 102, 126, 150,174 ,198 ,222 ,246,
270 ,294 ,318 ,342, 366, 390, 414, 438, 462, 486) # Accuracy score
F1_dex <- c(19, 43, 67, 91, 115, 139, 163, 187, 211, 235, 259, 283,
307, 331, 355, 379, 403, 427, 451, 475, 499) # F1 score
############
CFM_mess_0 <- CFM_Prep_0 %>% unlist() %>% as.data.frame() # create an ordered mess
CFM_0_Keys <- bind_cols(mod_names,
Accuracy = round(as.numeric(as.character(CFM_mess_0[ACC_dex,])),4) ,
F_Measure = round(as.numeric(as.character(CFM_mess_0[F1_dex,])),4)
) %>%
mutate(Total = Accuracy + F_Measure) # grab values: convert from factor to numeric; round
CFM_0_Keys %>% arrange(desc(Total)) %>% head(n = 7) %>%
knitr::kable(caption = "Run One: NULL prep: Top Seven Models")
#
#
## Prep_1 center, scale
set.seed(2019)
garbage_1 <- capture.output(
fits_1 <- lapply(models, function(model){
print(model)
train(diagnosis ~ ., data = train_set, method = model,
trControl = myControl, preProcess = prep_1)
})
)
names(fits_1) <- models
##
# Predictions
predictions_1 <- sapply(fits_1, function(object)
predict(object, newdata = test_set))
# Predictions for CFM & F Measure
pred_ft_1 <- predictions_1 %>% as.data.frame() %>%
mutate_if(., is.character, as.factor) %>%
mutate_all(., factor, levels = c("M" , "B"))
# Confusion Matrix List for Prep_1
CFM_Prep_1 <- sapply(pred_ft_1 , function(object) {
CFM <- confusionMatrix(data = object, reference = test_set$diagnosis)
list(CFM)
})
CFM_mess_1 <- CFM_Prep_1 %>% unlist() %>% as.data.frame() # mess!
CFM_1_Keys <- bind_cols(mod_names,
Accuracy = round(as.numeric(as.character(CFM_mess_1[ACC_dex,])), 4 ) ,
F_Measure = round(as.numeric(as.character(CFM_mess_1[F1_dex,])), 4 )
) %>%
mutate(Total = Accuracy + F_Measure)
CFM_1_Keys %>% arrange(desc(Total)) %>% head(n = 7) %>%
knitr::kable(caption = "Run One: Prep_1: Top Seven Models")
#
#
## Prep 2: nzv, center, scale, pca
set.seed(2019)
garbage_2 <- capture.output(
fits_2 <- lapply(models, function(model){
print(model)
train(diagnosis ~ ., data = train_set, method = model,
trControl = myControl, preProcess = prep_2)
})
)
names(fits_2) <- models
# Predictions
predictions_2 <- sapply(fits_2, function(object)
predict(object, newdata = test_set))
pred_ft_2 <- predictions_2 %>% as_tibble() %>%
mutate_if(., is.character, as.factor) %>%
mutate_all(., factor, levels = c("M" , "B"))
# Confusion Matrix for Prep_2
CFM_Prep_2 <- sapply(pred_ft_2 , function(object) {
CFM <- confusionMatrix(data = object, reference = test_set$diagnosis)
list(CFM)
})
CFM_mess_2 <- CFM_Prep_2 %>% unlist() %>% as.data.frame()
CFM_2_Keys <- bind_cols(mod_names,
Accuracy = round(as.numeric(as.character(CFM_mess_2[ACC_dex,])), 4),
F_Measure = round(as.numeric(as.character(CFM_mess_2[F1_dex,])), 4)
) %>%
mutate(Total = Accuracy + F_Measure)
CFM_2_Keys %>% arrange(desc(Total)) %>% head(n = 7) %>%
knitr::kable(caption = "Run One: Prep_2: Top Seven Models")
#
#
######################################## Second Run : 82/18 ############################
set.seed(2019)
garbage_3 <- capture.output(
fits_3.0 <- lapply(models, function(model){
print(model)
train(diagnosis ~ ., data = train_2, method = model,
trControl = myControl, preProcess = prep_0)
})
)
names(fits_3.0) <- models
# Predictions
predictions_3.0 <- sapply(fits_3.0, function(object)
predict(object, newdata = test_2))
pred_ft_3.0 <- predictions_3.0 %>% as_tibble() %>%
mutate_if(., is.character, as.factor) %>%
mutate_all(., factor, levels = c("M", "B"))
# Confusion Matrix for Prep_0
CFM_Prep_3.0 <- sapply(pred_ft_3.0 , function(object) {
CFM <- confusionMatrix(data = object, reference = test_2$diagnosis)
list(CFM)
})
CFM_mess_3.0 <- CFM_Prep_3.0 %>% unlist() %>% as.data.frame()
CFM_3.0_Keys <- bind_cols(mod_names,
Accuracy = round(as.numeric(as.character(CFM_mess_3.0[ACC_dex,])), 4),
F_Measure = round(as.numeric(as.character(CFM_mess_3.0[F1_dex,])), 4)
) %>%
mutate(Total = Accuracy + F_Measure)
CFM_3.0_Keys %>% arrange(desc(Total)) %>% head(n = 7) %>%
knitr::kable(caption = "Run Two: NULL prep: Top Seven Models")
#
## Prep_1 model center, scale
#
set.seed(2019)
garbage_3.1 <- capture.output(
fits_3.1 <- lapply(models, function(model){
print(model)
train(diagnosis ~ ., data = train_2, method = model,
trControl = myControl, preProcess = prep_1)
})
)
names(fits_3.1) <- models
# Predictions Prep_1
predictions_3.1 <- sapply(fits_3.1, function(object)
predict(object, newdata = test_2))
pred_ft_3.1 <- predictions_3.1 %>% as_tibble() %>%
mutate_if(., is.character, as.factor) %>%
mutate_all(., factor, levels = c("M", "B"))
# Confusion Matrix for Prep_1
CFM_Prep_3.1 <- sapply(pred_ft_3.1 , function(object) {
CFM <- confusionMatrix(data = object, reference = test_2$diagnosis)
list(CFM)
})
CFM_mess_3.1 <- CFM_Prep_3.1 %>% unlist() %>% as.data.frame()
CFM_3.1_Keys <- bind_cols(mod_names,
Accuracy = round(as.numeric(as.character(CFM_mess_3.1[ACC_dex,])), 4) ,
F_Measure = round(as.numeric(as.character(CFM_mess_3.1[F1_dex,])), 4)
) %>%
mutate(Total = Accuracy + F_Measure)
CFM_3.1_Keys %>% arrange(desc(Total)) %>% head(n = 7) %>%
knitr::kable(caption = "Run Two: Prep_1: Top Seven Models" )
#
## Prep_2 model nzv, center, scale, pca
#
set.seed(2019)
garbage_3.2 <- capture.output(
fits_3.2 <- lapply(models, function(model){
print(model)
train(diagnosis ~ ., data = train_2, method = model,
trControl = myControl, preProcess = prep_2)
})
)
names(fits_3.2) <- models
# Predictions Prep_2
predictions_3.2 <- sapply(fits_3.2, function(object)
predict(object, newdata = test_2))
pred_ft_3.2 <- predictions_3.2 %>% as_tibble() %>%
mutate_if(., is.character, as.factor) %>%
mutate_all(., factor, levels = c("M", "B"))
# Confusion Matrix for Prep_2
CFM_Prep_3.2 <- sapply(pred_ft_3.2 , function(object) {
CFM <- confusionMatrix(data = object, reference = test_2$diagnosis)
list(CFM)
})
CFM_mess_3.2 <- CFM_Prep_3.2 %>% unlist() %>% as.data.frame()
CFM_3.2_Keys <- bind_cols(mod_names,
Accuracy = round(as.numeric(as.character(CFM_mess_3.2[ACC_dex,])), 4) ,
F_Measure = round(as.numeric(as.character(CFM_mess_3.2[F1_dex,])), 4)
) %>%
mutate(Total = Accuracy + F_Measure)
#
CFM_3.2_Keys %>% arrange(desc(Total)) %>% head(n = 7) %>%
knitr::kable(caption = "Run Two: Prep_2: Top Seven Models")
#
rm(garbage_0, garbage_1, garbage_2, garbage_3, garbage_3.1, garbage_3.2)
################ END Modelling ####################################################
############################### Results #########################################
#
# Run One: 50/50
Accuracy_Table_1 <- bind_cols(Model = mod_names,
Acc_0 = CFM_0_Keys$Accuracy,
F1_0 = CFM_0_Keys$F_Measure,
Acc_1 = CFM_1_Keys$Accuracy,
F1_1 = CFM_1_Keys$F_Measure,
Acc_2 = CFM_2_Keys$Accuracy,
F1_2 = CFM_2_Keys$F_Measure) %>%
mutate(Top_PreProcess = (Acc_1 + Acc_2) / 2,
Top_Overall = (Acc_0 + Acc_1 + Acc_2) / 3)
## Averages
h_line_Acc_0 <- mean(Accuracy_Table_1$Acc_0)
h_line1_Acc_1 <- mean(Accuracy_Table_1$Acc_1)
h_line2_Acc_2 <- mean(Accuracy_Table_1$Acc_2)
Accuracy_Run_One_Viz <- Accuracy_Table_1 %>%
ggplot(aes(Model, Acc_0)) +
geom_jitter(color = "red", alpha = 0.6, width = 0.44, height = -0.1) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5)) +
geom_jitter(aes(y = Acc_1), color = "blue",
alpha = 0.6, width = 0.5, height = 0) +
geom_jitter(aes(y = Acc_2), color = "green" , alpha = 0.6, width = 0.44, height = 0) +
geom_hline(yintercept = h_line1_Acc_1, linetype = 2, color = "blue", alpha = 0.3) +
geom_hline(yintercept = h_line_Acc_0, linetype = 2, color = "red", alpha = 0.3) +
geom_hline(yintercept = h_line2_Acc_2, linetype = 2, color = "green", alpha = 0.5) +
labs(title = "All Models: Accuracy Scores: 50/50 Split",
subtitle = "Prep by color: Red 0; Blue 1; Green 2",
y = "Accuracy Rate", caption = "H-lines = Prep avg.")
Accuracy_Run_One_Viz # includes outlier mlp prep_0
## Replot
Accuracy_Table_1a <- Accuracy_Table_1
Accuracy_Table_1a$Acc_0[10] <- NA # induce NA to remove mlp outlier
h_line_Acc_0_check <- mean(Accuracy_Table_1a$Acc_0, na.rm = TRUE) # without MLP_0
Accuracy_Run_One_reViz <- Accuracy_Table_1a %>%
ggplot(aes(Model, Acc_0)) +
geom_jitter(color = "red", alpha = 0.6, width = 0.4, height = 0) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5)) +
geom_jitter(aes(y = Acc_1), color = "blue",
alpha = 0.6, width = 0.5, height = 0) +
geom_jitter(aes(y = Acc_2), color = "green", alpha = 0.6, width = 0.4, height = 0) +
geom_hline(yintercept = h_line_Acc_0, linetype = 2, color = "red", alpha = 0.3) +
geom_hline(yintercept = h_line1_Acc_1, linetype = 2, color = "blue", alpha = 0.3) +
geom_hline(yintercept = h_line2_Acc_2, linetype = 2, color = "green", alpha = 0.5) +
geom_hline(yintercept = h_line_Acc_0_check , linetype = 2, color = "orange", alpha = 0.5) +
labs(title = "All Models: Accuracy Scores: 50/50 Split",
subtitle = "Prep by color: Red 0; Blue 1; Green 2",
y = "Accuracy Rate",
caption = "H-lines = Prep avg.; Prep_0 for MLP not plotted: 0.6281")
Accuracy_Run_One_reViz # removes outlier mlp prep_0
## Top Seven Models Per Prep, Run One
Top_Seven_0 <- Accuracy_Table_1 %>% arrange(desc(Acc_0)) %>%
select(Model_0 = Model, Prep_Null = Acc_0) %>% slice(1:7)
Top_Seven_1 <- Accuracy_Table_1 %>% arrange(desc(Acc_1)) %>%
select(Model_1 = Model, Prep_1 = Acc_1) %>% slice(1:7)
Top_Seven_2 <- Accuracy_Table_1 %>% arrange(desc(Acc_2)) %>%
select(Model_2 = Model, Prep_2 = Acc_2) %>% slice(1:7)
Top_Overall <- Accuracy_Table_1 %>% arrange(desc(Top_Overall)) %>%
select(Model_Overall = Model, Avg_Acc = Top_Overall) %>% slice(1:7)
Top_Seven_50_Split <- bind_cols(Top_Seven_0, Top_Seven_1,
Top_Seven_2, Top_Overall)
Top_Seven_50_Split %>%
knitr::kable(caption = "Run One: Top Seven Models by Accuracy per Prep")
Top_Seven_50_Split %>%
summarize(Avg_Prep_0 = mean(Prep_Null),
Avg_Prep_1 = mean(Prep_1),
Avg_Prep_2 = mean(Prep_2)) %>%
knitr::kable(caption = "Run One: Mean Accuracy by Prep for Top Seven Models")
###################### END Run One
#
# Run Two: 82/18
Accuracy_Table_2 <- bind_cols(Model = mod_names,
Acc_3.0 = CFM_3.0_Keys$Accuracy,
F1_3.0 = CFM_3.0_Keys$F_Measure,
Acc_3.1 = CFM_3.1_Keys$Accuracy,
F1_3.1 = CFM_3.1_Keys$F_Measure,
Acc_3.2 = CFM_3.2_Keys$Accuracy,
F1_3.2 = CFM_3.2_Keys$F_Measure) %>%
mutate(Top_PreProcess = (Acc_3.1 + Acc_3.2) / 2,
Top_Overall = (Acc_3.0 + Acc_3.1 + Acc_3.2) / 3)
# Remove mlp NULL for chart
Accuracy_Table_2a <- Accuracy_Table_2
Accuracy_Table_2a$Acc_3.0[10] <- NA # induce NA
h_line_Acc_3.0 <- mean(Accuracy_Table_2$Acc_3.0)
h_line_Acc_3.1 <- mean(Accuracy_Table_2$Acc_3.1)
h_line_Acc_3.2 <- mean(Accuracy_Table_2$Acc_3.2)
h_line_check_3.0 <- mean(Accuracy_Table_2a$Acc_3.0,
na.rm = TRUE ) # remove mlp
Accuracy_Run_Two_reViz <- Accuracy_Table_2a %>%
ggplot(aes(Model, Acc_3.0)) +
geom_jitter(color = "red", alpha = 0.6, width = 0.4, height = 0) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5)) +
geom_jitter(aes(y = Acc_3.1), color = "blue",
alpha = 0.6, width = 0.5, height = 0) +
geom_jitter(aes(y = Acc_3.2), color = "green", alpha = 0.6, width = 0.4, height = 0) +
geom_hline(yintercept = h_line_Acc_3.0, linetype = 2, color = "red", alpha = 0.3) +
geom_hline(yintercept = h_line_Acc_3.1, linetype = 2, color = "blue", alpha = 0.3) +
geom_hline(yintercept = h_line_Acc_3.2, linetype = 2, color = "green", alpha = 0.5) +
geom_hline(yintercept = h_line_check_3.0, linetype = 2, color = "orange", alpha = 0.5) +
labs(title = "All Models: Accuracy Scores: 82/18 Split",
subtitle = "Prep by color: Red 3.0; Blue 3.1; Green 3.2",
y = "Accuracy Rate",
caption = "H-lines = Prep avg.; Prep_3.0 for MLP not plotted: 0.6281")
Accuracy_Run_Two_reViz # removes outlier mlp prep_0
# Top Seven Models Run Two
Top_Seven_3.0 <- Accuracy_Table_2 %>% arrange(desc(Acc_3.0)) %>%
select(Model_3.0 = Model, Prep_Null = Acc_3.0) %>% slice(1:7)
Top_Seven_3.1 <- Accuracy_Table_2 %>% arrange(desc(Acc_3.1)) %>%
select(Model_3.1 = Model, Prep_1 = Acc_3.1) %>% slice(1:7)
Top_Seven_3.2 <- Accuracy_Table_2 %>% arrange(desc(Acc_3.2)) %>%
select(Model_3.2 = Model, Prep_2 = Acc_3.2) %>% slice(1:7)
Top_Overall_82 <- Accuracy_Table_2 %>% arrange(desc(Top_Overall)) %>%
select(Model_Overall = Model, Avg_Acc = Top_Overall) %>% slice(1:7)
Top_Seven_82_Split <- bind_cols(Top_Seven_3.0, Top_Seven_3.1,
Top_Seven_3.2, Top_Overall_82 )
Top_Seven_82_Split %>%
knitr::kable(caption = "Run Two: Top Seven Models by Accuracy per Prep")
Top_Seven_82_Split %>%
summarize(Avg_3.0 = mean(Prep_Null), Avg_3.1 = mean(Prep_1), Avg_3.2 = mean(Prep_2)) %>%
knitr::kable(caption = "Run Two: Mean Accuracy by Prep for Top Seven Models")
###################### END Run Two
#
#
# Comparing Run Results
Overall_Accuracy_Table <- bind_cols(
Accuracy_Table_1 %>%
select(Model, starts_with("Acc")),
Accuracy_Table_2 %>%
select(starts_with("Acc"))
) %>%
mutate(Top_PreProcess = (Acc_1 + Acc_2 + Acc_3.1 + Acc_3.2) / 4,
Top_Overall = (Acc_0 + Acc_1 + Acc_2 + Acc_3.0 + Acc_3.1 + Acc_3.2) / 6) %>%
arrange(desc(Top_Overall))
Overall_Accuracy_Table %>% slice(1:10) %>% rename("CS_to_PCA" = Top_PreProcess, "All_Preps" = Top_Overall) %>%
knitr::kable(caption = "All Runs/Preps: Top 10 Models for Accuracy")
#
###############################################################################
## SAVE POINT:
##
## NEXT: Failure Tables
###############################################################################
# Create Tables Holding ALL Prediction for each RUN.
# This will allow drill down per model for all conditions
###
T_Set_ID <- test_set_id %>% select(id, diagnosis) # grab id and true values
T2_ID <- test2_id %>% select(id, diagnosis) # ditto
All_Predictions_Run_One <- bind_cols(T_Set_ID, pred_ft_0, pred_ft_1, pred_ft_2)
All_Predictions_Run_One <- All_Predictions_Run_One %>%
mutate(Percent = rowMeans2(as.matrix(All_Predictions_Run_One[3:65]) == test_set_id$diagnosis)) %>%
select(id, diagnosis, Percent, everything() ) # observations ordered as in the test_set: 50/50
All_Predictions_Run_Two <- bind_cols(T2_ID , pred_ft_3.0, pred_ft_3.1, pred_ft_3.2)
All_Predictions_Run_Two <- All_Predictions_Run_Two %>%
mutate(Percent = rowMeans2(as.matrix(All_Predictions_Run_Two[3:65]) == T2_ID$diagnosis)) %>%
select(id, diagnosis, Percent, everything() ) # observations ordered as in test2: 82/18
names_vector_one <- c("id", "diagnosis", "Percent", "adaboost_0", "avNNet_0", "gamboost_0", "gamLoess_0", "glm_0",
"gbm_0", "knn_0", "kknn_0", "lda_0", "mlp_0", "monmlp_0", "naive_bayes_0", "qda_0", "ranger_0",
"Rborist_0", "rf_0", "rpart_0", "svmLinear_0", "svmRadial_0", "svmRadialCost_0", "svmRadialSigma_0",
"adaboost_1", "avNNet_1", "gamboost_1", "gamLoess_1", "glm_1", "gbm_1", "knn_1", "kknn_1", "lda_1",
"mlp_1", "monmlp_1", "naive_bayes_1", "qda_1", "ranger_1", "Rborist_1", "rf_1", "rpart_1" ,"svmLinear_1",
"svmRadial_1", "svmRadialCost_1", "svmRadialSigma_1", "adaboost_2", "avNNet_2", "gamboost_2", "gamLoess_2",
"glm_2", "gbm_2", "knn_2", "kknn_2", "lda_2", "mlp_2", "monmlp_2", "naive_bayes_2", "qda_2", "ranger_2" ,
"Rborist_2", "rf_2", "rpart_2", "svmLinear_2", "svmRadial_2", "svmRadialCost_2", "svmRadialSigma_2")
names(All_Predictions_Run_One)[1:66] <- names_vector_one
mean(All_Predictions_Run_One$Percent == 1)
mean(All_Predictions_Run_One$Percent > 0.625)
names_vector_two <- c("id", "diagnosis", "Percent","adaboost_3.0", "avNNet_3.0", "gamboost_3.0", "gamLoess_3.0", "glm_3.0", "gbm_3.0", "knn_3.0", "kknn_3.0",
"lda_3.0", "mlp_3.0", "monmlp_3.0", "naive_bayes_3.0", "qda_3.0", "ranger_3.0", "Rborist_3.0", "rf_3.0",
"rpart_3.0", "svmLinear_3.0", "svmRadial_3.0", "svmRadialCost_3.0", "svmRadialSigma_3.0", "adaboost_3.1",
"avNNet_3.1", "gamboost_3.1", "gamLoess_3.1", "glm_3.1", "gbm_3.1", "knn_3.1", "kknn_3.1", "lda_3.1",
"mlp_3.1", "monmlp_3.1", "naive_bayes_3.1", "qda_3.1", "ranger_3.1", "Rborist_3.1", "rf_3.1", "rpart_3.1",
"svmLinear_3.1", "svmRadial_3.1", "svmRadialCost_3.1", "svmRadialSigma_3.1", "adaboost_3.2", "avNNet_3.2",
"gamboost_3.2", "gamLoess_3.2", "glm_3.2", "gbm_3.2", "knn_3.2", "kknn_3.2", "lda_3.2", "mlp_3.2",
"monmlp_3.2", "naive_bayes_3.2", "qda_3.2", "ranger_3.2", "Rborist_3.2", "rf_3.2", "rpart_3.2",
"svmLinear_3.2", "svmRadial_3.2", "svmRadialCost_3.2", "svmRadialSigma_3.2")
names(All_Predictions_Run_Two)[1:66] <- names_vector_two
mean(All_Predictions_Run_Two$Percent == 1)
mean(All_Predictions_Run_Two$Percent > 0.625)
Obvious_Cases <- full_join(All_Predictions_Run_One %>%
filter(Percent == 1) %>% select(id),
All_Predictions_Run_Two %>%
filter(Percent == 1) %>% select(id), by = "id") %>%
left_join(wdbc_data, by = "id")
table(Obvious_Cases$diagnosis) # 168 uncontroversial: all benign
Obvious_Cases2 <- full_join(All_Predictions_Run_One %>% select(-mlp_0) %>%
filter(Percent == 1) %>% select(id),
All_Predictions_Run_Two %>% select(-mlp_3.0) %>%
filter(Percent == 1) %>% select(id), by = "id") %>%
left_join(wdbc_data, by = "id")
table(Obvious_Cases2$diagnosis) # 168 uncontroversial: all benign
############# First Run 50/50 Split ##################################
#
## Do the individual Preps step by step
################################
### Prep = NULL
# identify failure points
Fail_0 <- rowMeans2(predictions_0 == test_set_id$diagnosis)
# create index and success percentage
Fail_Dex_0 <- bind_cols(dex = which(Fail_0 < 1),
percent = Fail_0[Fail_0 < 1] )
# create table
Fail_Table_0 <- bind_cols(Fail_Dex_0, pred_ft_0[Fail_Dex_0$dex, ],
T_Set_ID[Fail_Dex_0$dex, ] ) %>%
select(id, diagnosis, percent, everything() , -dex) %>%
arrange(percent)
names_fail_0 <- c("id", "diagnosis", "Percent", "adaboost_0", "avNNet_0", "gamboost_0", "gamLoess_0", "glm_0",
"gbm_0", "knn_0", "kknn_0", "lda_0", "mlp_0", "monmlp_0", "naive_bayes_0", "qda_0", "ranger_0",
"Rborist_0", "rf_0", "rpart_0", "svmLinear_0", "svmRadial_0", "svmRadialCost_0", "svmRadialSigma_0")
names(Fail_Table_0)[1:24] <- names_fail_0
str(Fail_Table_0)
Fail_Table_0[,1:8] %>% head(8) %>%
knitr::kable(caption = "Fail Table: 50/50 split: Null Prep: First 8 rows/cols" )
# Prep_1 = center, scale
Fail_1 <- rowMeans2(predictions_1 == test_set_id$diagnosis)
# create index and success percentage
Fail_Dex_1 <- bind_cols(dex = which(Fail_1 < 1),
percent = Fail_1[Fail_1 < 1] )
# create table
Fail_Table_1 <- bind_cols(Fail_Dex_1, pred_ft_1[Fail_Dex_1$dex, ],
T_Set_ID[Fail_Dex_1$dex, ] ) %>%
select(id, diagnosis, percent, everything() , -dex) %>%
arrange(percent)
names_fail_1 <- c("id", "diagnosis", "Percent", "adaboost_1", "avNNet_1", "gamboost_1", "gamLoess_1", "glm_1",
"gbm_1", "knn_1", "kknn_1", "lda_1", "mlp_1", "monmlp_1", "naive_bayes_1", "qda_1", "ranger_1",
"Rborist_1", "rf_1", "rpart_1", "svmLinear_1", "svmRadial_1", "svmRadialCost_1", "svmRadialSigma_1")
names(Fail_Table_1)[1:24] <- names_fail_1
str(Fail_Table_1)
Fail_Table_1[,1:8] %>% head(8) %>%
knitr::kable(caption = "Fail Table: 50/50 split: Prep_1: First 8 rows/cols" )
# Prep_2 = nzv, center, scale, pca
Fail_2 <- rowMeans2(predictions_2 == test_set_id$diagnosis)
# create index and success percentage
Fail_Dex_2 <- bind_cols(dex = which(Fail_2 < 1),
percent = Fail_2[Fail_2 < 1] )
# create table
Fail_Table_2 <- bind_cols(Fail_Dex_2, pred_ft_2[Fail_Dex_2$dex, ],
T_Set_ID[Fail_Dex_2$dex, ] ) %>%
select(id, diagnosis, percent, everything() , -dex) %>%
arrange(percent)
names_fail_2 <- c("id", "diagnosis", "Percent", "adaboost_2", "avNNet_2", "gamboost_2", "gamLoess_2", "glm_2",
"gbm_2", "knn_2", "kknn_2", "lda_2", "mlp_2", "monmlp_2", "naive_bayes_2", "qda_2", "ranger_2",
"Rborist_2", "rf_2", "rpart_2", "svmLinear_2", "svmRadial_2", "svmRadialCost_2", "svmRadialSigma_2")
names(Fail_Table_2)[1:24] <- names_fail_2
str(Fail_Table_2)
Fail_Table_2[,1:8] %>% head(8) %>%
knitr::kable(caption = "Fail Table: 50/50 split: Prep_2: First 8 rows/cols" )
#
################### 82 / 18 Split #####################
# Prep = NULL
Fail_3.0 <- rowMeans2(predictions_3.0 == test2_id$diagnosis)
Fail_Dex_3.0 <- bind_cols(dex = which(Fail_3.0 < 1),
percent = Fail_3.0[Fail_3.0 < 1] )
Fail_Table_3.0 <- bind_cols(Fail_Dex_3.0, pred_ft_3.0[Fail_Dex_3.0$dex, ],
T2_ID[Fail_Dex_3.0$dex, ] ) %>%
select(id, diagnosis, percent, everything() , -dex) %>%
arrange(percent)
names_fail_3.0 <- c("id", "diagnosis", "Percent", "adaboost_3.0", "avNNet_3.0", "gamboost_3.0", "gamLoess_3.0", "glm_3.0",
"gbm_3.0", "knn_3.0", "kknn_3.0", "lda_3.0", "mlp_3.0", "monmlp_3.0", "naive_bayes_3.0", "qda_3.0", "ranger_3.0",
"Rborist_3.0", "rf_3.0", "rpart_3.0", "svmLinear_3.0", "svmRadial_3.0", "svmRadialCost_3.0", "svmRadialSigma_3.0")
names(Fail_Table_3.0)[1:24] <- names_fail_3.0
str(Fail_Table_3.0)
Fail_Table_3.0[,1:8] %>% head(8) %>%
knitr::kable(caption = "Fail Table: 82/18 split: Prep_3.0: First 8 rows/cols" )
# Prep_1 = center, scale
Fail_3.1 <- rowMeans2(predictions_3.1 == test2_id$diagnosis)
Fail_Dex_3.1 <- bind_cols(dex = which(Fail_3.1 < 1),
percent = Fail_3.1[Fail_3.1 < 1] )
Fail_Table_3.1 <- bind_cols(Fail_Dex_3.1, pred_ft_3.1[Fail_Dex_3.1$dex, ],
T2_ID[Fail_Dex_3.1$dex, ] ) %>%
select(id, diagnosis, percent, everything() , -dex) %>%
arrange(percent)
names_fail_3.1 <- c("id", "diagnosis", "Percent", "adaboost_3.1", "avNNet_3.1", "gamboost_3.1", "gamLoess_3.1", "glm_3.1",
"gbm_3.1", "knn_3.1", "kknn_3.1", "lda_3.1", "mlp_3.1", "monmlp_3.1", "naive_bayes_3.1", "qda_3.1", "ranger_3.1",
"Rborist_3.1", "rf_3.1", "rpart_3.1", "svmLinear_3.1", "svmRadial_3.1", "svmRadialCost_3.1", "svmRadialSigma_3.1")
names(Fail_Table_3.1)[1:24] <- names_fail_3.1
str(Fail_Table_3.1)
Fail_Table_3.1[,1:8] %>% head(8) %>%
knitr::kable(caption = "Fail Table: 82/18 split: Prep_3.1: First 8 rows/cols" )
# Prep_2 = nzv, center, scale, pca
Fail_3.2 <- rowMeans2(predictions_3.2 == test2_id$diagnosis)
Fail_Dex_3.2 <- bind_cols(dex = which(Fail_3.2 < 1),
percent = Fail_3.2[Fail_3.2 < 1] )
Fail_Table_3.2 <- bind_cols(Fail_Dex_3.2, pred_ft_3.2[Fail_Dex_3.2$dex, ],
T2_ID[Fail_Dex_3.2$dex, ] ) %>%
select(id, diagnosis, percent, everything() , -dex) %>%
arrange(percent)
names_fail_3.2 <- c("id", "diagnosis", "Percent", "adaboost_3.2", "avNNet_3.2", "gamboost_3.2", "gamLoess_3.2", "glm_3.2",
"gbm_3.2", "knn_3.2", "kknn_3.2", "lda_3.2", "mlp_3.2", "monmlp_3.2", "naive_bayes_3.2", "qda_3.2", "ranger_3.2",
"Rborist_3.2", "rf_3.2", "rpart_3.2", "svmLinear_3.2", "svmRadial_3.2", "svmRadialCost_3.2", "svmRadialSigma_3.2")
names(Fail_Table_3.2)[1:24] <- names_fail_3.2
str(Fail_Table_3.2)
Fail_Table_3.2[,1:8] %>% head(8) %>%
knitr::kable(caption = "Fail Table: 82/18 split: Prep_3.2: First 8 rows/cols" )
## Clean up environment
rm(Fail_0, Fail_Dex_0, Fail_1, Fail_Dex_1, Fail_2, Fail_Dex_2,
Fail_3.0, Fail_Dex_3.0, Fail_3.1, Fail_Dex_3.1, Fail_3.2, Fail_Dex_3.2,
names_fail_0, names_fail_1, names_fail_2, names_fail_3.0, names_fail_3.1, names_fail_3.2)
#
# SAVE POINT
#
#
########### Fails Tables Per Entire Run, and Preps Across Runs ####
#### 50/50 Split Run : all preps
Common_Run_One <- intersect(Fail_Table_0$id, Fail_Table_1$id) %>% intersect(Fail_Table_2$id)
Common_One_Fail_Table <- bind_cols(Fail_Table_0 %>% filter(id %in% Common_Run_One),
Fail_Table_1 %>% filter(id %in% Common_Run_One),
Fail_Table_2 %>% filter(id %in% Common_Run_One)) %>%
select(everything(), -c(Percent, id1, diagnosis1, Percent1,id2, diagnosis2, Percent2) )
Common_One_Fail_Table <- Common_One_Fail_Table %>%
mutate(Percent = rowMeans(as.matrix(Common_One_Fail_Table[3:65]) == Common_One_Fail_Table$diagnosis)) %>%
select(id, diagnosis, Percent, everything() ) %>% arrange(Percent)
Common_One_Fail_Table[,1:8] %>% head(8) %>%
knitr::kable(caption = "50/50 Split: Failures Common to All Preps: First 8 rows/cols" )
#### 82/18 Split: all preps
Common_Run_Two <- intersect(Fail_Table_3.0$id, Fail_Table_3.1$id) %>% intersect(Fail_Table_3.2$id)
Common_Two_Fail_Table <- bind_cols(Fail_Table_3.0 %>% filter(id %in% Common_Run_Two),
Fail_Table_3.1 %>% filter(id %in% Common_Run_Two),
Fail_Table_3.2 %>% filter(id %in% Common_Run_Two)) %>%
select(everything(), -c(Percent, id1, diagnosis1, Percent1,
id2, diagnosis2, Percent2) )
Common_Two_Fail_Table <- Common_Two_Fail_Table %>%
mutate(Percent = rowMeans(as.matrix(Common_Two_Fail_Table[3:65]) == Common_Two_Fail_Table$diagnosis)) %>%
select(id, diagnosis, Percent, everything() ) %>% arrange(Percent)
Common_Two_Fail_Table[,1:8] %>% head(8) %>%
knitr::kable(caption = "82/18 Split: Failures Common to All Preps: First 8 rows/cols" )
table(Common_One_Fail_Table$diagnosis)
table(Common_Two_Fail_Table$diagnosis)
##### Tables per prep across splits
# Prep = NULL
Null_Run_Fails <- intersect(Fail_Table_0$id, Fail_Table_3.0$id)
Prep_Null_Fail_Table <- bind_cols(Fail_Table_0 %>% filter(id %in% Null_Run_Fails ),
Fail_Table_3.0 %>% filter(id %in% Null_Run_Fails ) ) %>%
select(everything(), -c(id1, diagnosis1) ) %>%
mutate(Null_percent = (Percent + Percent1) / 2 ) %>%
select(id, diagnosis, Null_percent, everything(), -c(Percent, Percent1) ) %>%
arrange(Null_percent)
Prep_Null_Fail_Table[,1:8] %>% head(8) %>%
knitr::kable(caption = "Null Prep Fails: Both Splits: First 8 rows/cols" )
# Prep_1 = center, scale
Prep1_Run_Fails <- intersect(Fail_Table_1$id, Fail_Table_3.1$id)
Prep1_Fail_Table <- bind_cols(Fail_Table_1 %>% filter(id %in% Prep1_Run_Fails),
Fail_Table_3.1 %>% filter(id %in% Prep1_Run_Fails) ) %>%
select(everything(), -c(id1, diagnosis1) ) %>%
mutate(Prep1_percent = (Percent + Percent1) / 2 ) %>%
select(id, diagnosis, Prep1_percent, everything(), -c(Percent, Percent1) ) %>%
arrange(Prep1_percent)
Prep1_Fail_Table[,1:8] %>% head(8) %>%
knitr::kable(caption = "Prep_1 Fails: Both Splits: First 8 rows/cols" )
# Prep_2 = nzv, center, scale, pca
Prep2_Run_Fails <- intersect(Fail_Table_2$id, Fail_Table_3.2$id)
Prep2_Fail_Table <- bind_cols(Fail_Table_2 %>% filter(id %in% Prep2_Run_Fails ),
Fail_Table_3.2 %>% filter(id %in% Prep2_Run_Fails ) ) %>%
select(everything(), -c(id1, diagnosis1) ) %>%
mutate(Prep2_percent = (Percent + Percent1) / 2 ) %>%
select(id, diagnosis, Prep2_percent, everything(), -c(Percent, Percent1) ) %>%
arrange(Prep2_percent)
Prep2_Fail_Table[,1:8] %>% head(8) %>%
knitr::kable(caption = "Prep_2 Fails: Both Splits: First 8 rows/cols" )
############ Relist results
Prep_Null_Fail_Table[,1:8] %>% head(8) %>%
knitr::kable(caption = "Null Prep Fails: Both Splits: First 8 rows/cols" )
Prep1_Fail_Table[,1:8] %>% head(8) %>%
knitr::kable(caption = "Prep_1 Fails: Both Splits: First 8 rows/cols" )
Prep2_Fail_Table[,1:8] %>% head(8) %>%
knitr::kable(caption = "Prep_2 Fails: Both Splits: First 8 rows/cols" )
######
### Failures common to all runs, all preps (but not all models!): the intersection
######
Overall_Run_Fails <- intersect(Common_Run_One , Common_Run_Two )
Overall_Fail_Table_Long <- bind_cols(Common_One_Fail_Table %>% filter(id %in% Overall_Run_Fails ),
Common_Two_Fail_Table %>% filter(id %in% Overall_Run_Fails )) %>%
select(everything(), -c(Percent, id1,diagnosis1, Percent1))
Overall_Fail_Table_Long <- Overall_Fail_Table_Long %>%
mutate(Percent = rowMeans2(as.matrix(Overall_Fail_Table_Long[3:128]) == Overall_Fail_Table_Long$diagnosis)) %>%
select(id, diagnosis, Percent, everything())
names_vector_long <- c("id", "diagnosis", "Percent", "adaboost_0", "avNNet_0", "gamboost_0", "gamLoess_0", "glm_0",
"gbm_0", "knn_0", "kknn_0", "lda_0", "mlp_0", "monmlp_0", "naive_bayes_0", "qda_0", "ranger_0",
"Rborist_0", "rf_0", "rpart_0", "svmLinear_0", "svmRadial_0", "svmRadialCost_0", "svmRadialSigma_0",
"adaboost_1", "avNNet_1", "gamboost_1", "gamLoess_1", "glm_1", "gbm_1", "knn_1", "kknn_1", "lda_1",
"mlp_1", "monmlp_1", "naive_bayes_1", "qda_1", "ranger_1", "Rborist_1", "rf_1", "rpart_1" ,"svmLinear_1",
"svmRadial_1", "svmRadialCost_1", "svmRadialSigma_1", "adaboost_2", "avNNet_2", "gamboost_2", "gamLoess_2",
"glm_2", "gbm_2", "knn_2", "kknn_2", "lda_2", "mlp_2", "monmlp_2", "naive_bayes_2", "qda_2", "ranger_2" ,
"Rborist_2", "rf_2", "rpart_2", "svmLinear_2", "svmRadial_2", "svmRadialCost_2", "svmRadialSigma_2",
"adaboost_3.0", "avNNet_3.0", "gamboost_3.0", "gamLoess_3.0", "glm_3.0", "gbm_3.0", "knn_3.0", "kknn_3.0",
"lda_3.0", "mlp_3.0", "monmlp_3.0", "naive_bayes_3.0", "qda_3.0", "ranger_3.0", "Rborist_3.0", "rf_3.0",
"rpart_3.0", "svmLinear_3.0", "svmRadial_3.0", "svmRadialCost_3.0", "svmRadialSigma_3.0", "adaboost_3.1",
"avNNet_3.1", "gamboost_3.1", "gamLoess_3.1", "glm_3.1", "gbm_3.1", "knn_3.1", "kknn_3.1", "lda_3.1",
"mlp_3.1", "monmlp_3.1", "naive_bayes_3.1", "qda_3.1", "ranger_3.1", "Rborist_3.1", "rf_3.1", "rpart_3.1",
"svmLinear_3.1", "svmRadial_3.1", "svmRadialCost_3.1", "svmRadialSigma_3.1", "adaboost_3.2", "avNNet_3.2",
"gamboost_3.2", "gamLoess_3.2", "glm_3.2", "gbm_3.2", "knn_3.2", "kknn_3.2", "lda_3.2", "mlp_3.2",
"monmlp_3.2", "naive_bayes_3.2", "qda_3.2", "ranger_3.2", "Rborist_3.2", "rf_3.2", "rpart_3.2",
"svmLinear_3.2", "svmRadial_3.2", "svmRadialCost_3.2", "svmRadialSigma_3.2" )
names(Overall_Fail_Table_Long)[1:129] <- names_vector_long
# Example
Overall_Fail_Table_Long[1:8 ,1:8 ] %>%
knitr::kable(caption = "Failures Common to All Ensemble Runs [but not all models!]")
# Data information
Overall_Fail_Table_Short <- bind_cols(Common_One_Fail_Table %>%
filter(id %in% Overall_Run_Fails ),
Common_Two_Fail_Table %>%
filter(id %in% Overall_Run_Fails ) ) %>%
mutate(Percent = Overall_Fail_Table_Long$Percent ) %>%
select(id, Percent) %>% left_join(wdbc_data, by = "id" ) %>%
select(everything()) %>%
arrange(Percent)
Overall_Fail_Table_Short[1:8, 1:8] %>%
knitr::kable(caption = "Common Failures by ID: The Data in Detail")
#
#
#
############################ Individual Models #########################################
## case Study GamLoes
GamLoess_Run_One <- All_Predictions_Run_One %>%
select(id, diagnosis, starts_with("gamLoess"))
GamLoess_Run_Two <- All_Predictions_Run_Two %>%
select(id, diagnosis, starts_with("gamLoess"))
GamLoess_Fails_One <- GamLoess_Run_One %>%
mutate(percent = rowMeans(as.matrix(GamLoess_Run_One[3:5]) == GamLoess_Run_One$diagnosis)) %>%
select(id, diagnosis, percent, everything() ) %>%
filter(percent < 1) %>%
arrange(percent)
GamLoess_Fails_Two <- GamLoess_Run_Two %>%
mutate(percent = rowMeans(as.matrix(GamLoess_Run_Two[3:5]) == GamLoess_Run_Two$diagnosis)) %>%
select(id, diagnosis, percent, everything() ) %>%
filter(percent < 1) %>%
arrange(percent)
GamLoess_Fails_One %>% head(8) %>%
knitr::kable(caption = "gamLoess Fails: Run One: [First 8]")
GamLoess_Fails_Two %>% head(8) %>%
knitr::kable(caption = "gamLoess Fails: Run Two: [3 total]")
gamLoess_Accuracy_Table <- bind_cols(Accuracy_Table_1 %>%
filter(Model == "gamLoess") %>%
select(Model, starts_with("Acc")),
Accuracy_Table_2 %>%
filter(Model == "gamLoess") %>%
select(starts_with("Acc")) )
gamLoess_Accuracy_Table %>% rename( "Model/Prep" = "Model") %>%
knitr::kable(caption = "gamLoess Accuracy Scores by Prep & ID")
gamLoess_names <- names(gamLoess_Accuracy_Table)[2:7]
by_Prep <- c("Fail Count") # create rowname_Colum
gamLoess_failures_by_prep <- bind_cols(enframe(by_Prep, name = NULL, value = "Model"),
abs(round((gamLoess_Accuracy_Table[,2:4] * 285) - 285)),
abs(round((gamLoess_Accuracy_Table[,5:7] * 104) - 104))
) # do not re-run unless restarting
gamLoess_failures_by_prep %>%
knitr::kable(caption = "gamLoess Failure Count by Prep")
# lead the gamLoess example with this
gamLoess_Accuracy_Table[1,1] <- "Accuracy" # for RMD, see next
bind_rows(gamLoess_Accuracy_Table, gamLoess_failures_by_prep) %>%
rename( "gamLoess" = "Model") %>%
knitr::kable(caption = "gamLoess Accuracy & Fail Counts by Prep")
|
83c95a4c923cfa3be73dde5e52576869cc546064
|
3fe3c4c2e05c1f60ebba273d53e413607684ff92
|
/man/print.nb.test.Rd
|
aa37dec75fbb1b6da8db57ca7ceff108956204c8
|
[] |
no_license
|
cran/NBPSeq
|
60282cf216bd83793780d5f276a4ef7df88c08d1
|
0b30098f016d10c514e7942402dd405c1e18a363
|
refs/heads/master
| 2022-06-29T05:26:20.184281
| 2022-06-09T10:02:06
| 2022-06-09T10:02:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 370
|
rd
|
print.nb.test.Rd
|
\name{print.nb.test}
\alias{print.nb.test}
\title{Print output from \code{\link{test.coefficient}}}
\usage{
\method{print}{nb.test}(x, ...)
}
\arguments{
\item{x}{output from \code{\link{test.coefficient}}}
\item{...}{currenty not used}
}
\description{
We simply print out the structure of \code{x}. (Currenlty
the method is equivalent to \code{print(str(x))}.)
}
|
e2910a1b45ccdea607c890486e3b81f883bdd29d
|
cad7ac48b067c7f10c4890dccb41c68f12368c2c
|
/1_4.R
|
7a4ec6bdd3780b11c0e8bb582c3397b6a3d04cf1
|
[] |
no_license
|
AnguillaJaponica/rctr
|
2a595d16c29501021b9ffad7d92d596897717210
|
37d1bdda97d8d58ecccc15dee8e8fd359c37faa8
|
refs/heads/master
| 2023-04-15T22:45:45.531240
| 2021-04-29T13:23:30
| 2021-04-29T13:23:30
| 362,815,054
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,219
|
r
|
1_4.R
|
install.packages("tidyverse")
library("tidyverse")
email_data <- read.csv("http://www.minethatdata.com/Kevin_Hillstrom_MineThatData_E-MailAnalytics_DataMiningChallenge_2008.03.20.csv")
male_df <- email_data %>%
filter(segment != "Womens E-Mail") %>%
mutate(treatment = ifelse(segment == "Mens E-Mail", 1, 0))
summary_by_segment <- male_df %>%
group_by(treatment) %>%
summarise(conversion_rate = mean(conversion),
spend_mean = mean(spend),
count = n())
mens_mail <- male_df %>%
filter(treatment == 1) %>%
pull(spend)
no_mail <- male_df %>%
filter(treatment == 0) %>%
pull(spend)
rct_test <- t.test(mens_mail, no_mail, var.equal = T)
set.seed(1)
obs_rate_c <- 0.5
obs_rate_t <- 0.5
biased_data <- male_df %>%
mutate(obs_rate_c =
ifelse( (history > 300) | (recency < 6) |
(channel == "Multichannel"), obs_rate_c, 1),
obs_rate_t =
ifelse( (history > 300) | (recency < 6) |
(channel == "Multichannel"), 1, obs_rate_t),
random_number = runif(n = NROW(male_df))) %>%
filter( (treatment == 0 & random_number < obs_rate_c ) |
(treatment == 1 & random_number < obs_rate_t) )
|
d5aece887083d6864876e5ccf169296f2b2f0983
|
59aa5552391ef722b9a7fea03b42d7d588c00688
|
/Intervals_PMRF.R
|
b45abb7ea47b85894ccf6aef4bb72360d594a2a9
|
[] |
no_license
|
cascadiaresearch/PMRF_Feb2020_Report
|
a0255ae347b35bc682ab0b13606eabc0cfd5488c
|
53efa5d504bd2a88e933396f5b2cf88a54298be7
|
refs/heads/main
| 2023-01-04T04:23:16.450276
| 2020-11-02T18:29:13
| 2020-11-02T18:29:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,411
|
r
|
Intervals_PMRF.R
|
## PMRF Feb 2020 Report:
## Calculate intervals b/t locations
## Michaela A. Kratofil
## 05 OCT 2020
#######################################################
# load packages
library(tidyverse)
library(lubridate)
# read in location data
locs <- read.csv('Douglas Filtered/TtTag002-035_DouglasFiltered_KS_r20d3lc2_2020MAYv2.csv', header = T)
locs <- read.csv("Fastloc-GPS/Raw files/GmTag231-180166-1-FastGPS.csv")
# review data
str(locs)
summary(locs)
locs$date <- as.POSIXct(locs$date, tz = 'UTC') # format date
locs$animal <- as.factor(locs$animal) # make animal/tag a factor
## Take each tag through one at a time; easier to check for inconsistencies ##
# subset out 1 tag
sub <- filter(locs, animal %in% c("TtTag034", "TtTag035"))
sub <- locs
l <- bind_rows(g231, sub) %>%
arrange(animal, date)
write.csv(l, "PMRF Feb 2020 Report/KauaiFeb2020_GmTtTags_DouglasFiltered_Argos_2020Oct05.csv", row.names = F)
# calculate delta t (change in time)
sub$deltaT <- as.numeric(NA) # deltaT
sub$deltaT_r <- as.numeric(NA) # rounded deltaT
for (i in 1:nrow(sub)) {
Start = sub[i, "date"]
End = sub[i + 1, "date"]
sub[i, "deltaT"] = difftime(End, Start, units = 'hours')
sub[i, "deltaT_r"] = round(difftime(End, Start, units = 'hours'), 0) # round digits
}
sub.nons <- filter(sub, !is.na(deltaT))
median(sub.nons$deltaT)
max(sub.nons$deltaT)
|
eaa64ad29e19141ee9c8ac6ccbe6b55ce105884a
|
1ccb6df12f53fe2e387bf624b65da4f6af78f203
|
/notes/shiny/tutorialUI/lesson2/App-2a/ui.R
|
e19a751ffc738d27c87eceb4fc59f132c1f7baf9
|
[] |
no_license
|
raffled/DSCI504
|
960963720cad8c5b4f6c4bea449beaec7ef7ff91
|
0aebb9331b0cfabe40db4977c117df4c58c6297b
|
refs/heads/master
| 2016-09-05T09:52:07.153963
| 2015-05-04T03:01:34
| 2015-05-04T03:01:34
| 34,693,548
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 185
|
r
|
ui.R
|
# ui.R
shinyUI(fluidPage(#position = "right",
titlePanel("title panel"),
sidebarLayout(position = "right",
sidebarPanel( "sidebar panel"),
mainPanel("main panel")
)
))
|
99d79b9222b86b326e7a5d8793a8afa35b1865c7
|
f72562681d88b12b1e0c76e2f41878a44729650e
|
/D3playing.R
|
ff94e548924d5cdee72f94e820bf5fddd2d6da91
|
[] |
no_license
|
vargovargo/CHVIr
|
1706cfd35e0779cfebfd56f759bd1ded2a503dbb
|
56e2b295a78a733f4fcf5f140097327f314d107b
|
refs/heads/master
| 2018-09-17T18:09:44.056721
| 2018-08-13T21:31:49
| 2018-08-13T21:31:49
| 106,874,655
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,704
|
r
|
D3playing.R
|
library(shiny)
library(tidyverse)
library(leaflet)
library(shinythemes)
library(ggthemes)
library(sf)
library(DT)
library(plotly)
CHVIdata <- readRDS("~/GitHub/CHVIr/CHVIz/chviCountyTidyRace.RDS")
tri <- {CHVIdata %>%
filter(def == "Projected number of extreme heat days" & strata %in% c("2085", 2085, "none") & race == "Total") %>%
mutate(expTer = ntile(est, 3)) %>%
select(county, climReg, COUNTYFI_1, def, est, expTer) %>%
spread(key = def, value = round(est,2))
} %>% left_join({
CHVIdata %>%
filter(def == "Percent of population aged 65 years or older" & strata %in% c("Overall","ViolentCrime","total","2006-2010","2009-2013","All Non-English","none", "population-weighted") & race =="Total") %>%
mutate(sensTer = ntile(est, 3)) %>%
select(county, climReg, COUNTYFI_1, def, est, sensTer) %>%
spread(key = def, value = round(est,2)) %>%
left_join({
CHVIdata %>%
filter(def == "Percent of population aged 65 years or older" & race == "Total") %>%
select(county, denmntr)%>%
rename(Population = denmntr)
})
}) %>%
mutate(Population = as.numeric(as.character(Population)),
vulnerability = factor(ifelse(sensTer + expTer == 2, "lowest",
ifelse(sensTer + expTer == 3, "low",
ifelse(sensTer + expTer == 4, "medium",
ifelse(sensTer + expTer == 5, "high","highest")))),
levels = c("lowest","low","medium","high","highest")))
tri[["sign"]] <- ifelse(tri[["vulnerability"]] == "lowest", "rgba(26,152,80, 0.5)",
ifelse(tri[["vulnerability"]] == "low", "rgba(166,217,106, 0.6)",
ifelse(tri[["vulnerability"]] == "medium", "rgba(253,174,97, 0.7)",
ifelse(tri[["vulnerability"]] == "high", "rgba(244,109,67, 0.9)", "rgba(215,48,39, 1)"))))
tri[["size"]] <- ntile(tri[["Population"]],5)
tri <- na.omit(tri)
#devtools::install_github("rCharts", "ramnathv")
library(rCharts)
# names(tri)[5] <- "Exposure"
# names(tri)[7] <- "Sensitivity"
p1 <- nPlot(Sensitivity ~ Exposure, group = 'vulnerability', data = tri, type = 'scatterChart')
p1$xAxis(axisLabel = "Projected number of extreme heat days")
p1$yAxis(axisLabel = "Percent of population aged 65 years or older")
p1
d8 <- dPlot(x = names(tri)[5],
y = "Percent of population aged 65 years or older",
z = "size",
groups = "vulnerability",
data = tri,
type = "bubble"
)
d8$xAxis()
d8$zAxis(type = "addMeasureAxis")
d8
#devtools::install_github("mrjoh3/c3")
library(c3)
tri %>%
c3(x = "Projected number of extreme heat days",
y = "Percent of population aged 65 years or older",
group = 'vulnerability') %>%
point_options(r = 4,
expand.r = 2) %>%
c3_scatter() %>% RColorBrewer(pal="YlOrRd", reverse =TRUE)
RColorBrewer::brewer.pal.info
# tri %>% ggplot(aes_q(x = as.name(names(tri)[5]),
# y = as.name(names(tri)[7]),
# size = as.name(names(tri)[8]),
# color = as.name(names(tri)[8])
# )) +
# geom_point(alpha = 0.9) +
# guides(alpha = FALSE, color = FALSE, size = FALSE) +
# scale_color_brewer(palette = "Spectral", direction = -1) +
# scale_size_continuous(range = c(3,15)) +
# geom_text(aes_q(x = as.name(names(tri)[5]),
# y = as.name(names(tri)[7]),
# label = as.name(names(tri)[1])), size= 3, color="black") +
# ggtitle("This plot displays the vulnerability to two factors. Counties in the top right corner (red) are in the top third of all counties for each.")
#
plot_ly(
data = tri,
x = ~ round(tri[[5]],2),
y = ~ round(tri[[7]],2),
hoverinfo = 'text',
text = ~paste('</br> County:',tri[["county"]],
'</br> Population:',format(tri[["Population"]], big.mark = ","),
'</br> Exposure:', round(tri[[5]],2), names(tri)[5],
'</br> Sensitivity:', round(tri[[7]],2), names(tri)[7]),
showlegend = FALSE
) %>%
add_markers(type = 'scatter',
mode = 'markers',
size = ~tri[["Population"]]+50,
marker = list(color = tri[["sign"]],
size = tri[["size"]]*25,
line = list(color = 'rgba(99,99,99, .8)',width = 0.5))) %>%
add_text(type = 'scatter',mode = 'text', text = tri[["county"]], textposition = 'top right',
textfont = list(
size = 10,
color = toRGB("grey40"))) %>%
layout(title = paste0('Combined Vulnerabiltity from Exposure (',names(tri)[5], ') \n and Sensitivity (',names(tri)[7],")") ,
margin = list(l = 50,
t = 70),
xaxis = list(
title = names(tri)[5],
autotick = TRUE,
ticks = "outside",
tick0 = 0,
dtick = 0.25,
ticklen = 5,
tickwidth = 2,
tickcolor = toRGB("black")
),
yaxis = list(title = names(tri)[7],
autotick = TRUE,
ticks = "outside",
tick0 = 0,
dtick = 0.25,
ticklen = 5,
tickwidth = 2,
tickcolor = toRGB("black"))
) %>%
config(collaborate = FALSE,
displaylogo = FALSE,
modeBarButtonsToRemove = list(
'toggleSpikelines',
'sendDataToCloud',
'hoverCompareCartesian',
'hoverClosestCartesian'
)
)
})
|
d1bc560336ed0394bd81773c52721683c63e3bdc
|
044d3102f385d400d921ef094f32622037bc8f85
|
/Basic Programs/Input_Output/app.R
|
556c209bbb6672c7f429aa1289dceccf99e1e5bb
|
[] |
no_license
|
chidcrushev/RShiny
|
6d2ce453d73b46b518166463010567595ef11ec4
|
b6a7010d71aa0126ec33c0f08a9afd2de12f41d3
|
refs/heads/master
| 2020-05-07T11:27:35.423219
| 2019-04-09T23:05:52
| 2019-04-09T23:05:52
| 180,462,378
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,599
|
r
|
app.R
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
? sliderInput()
# Define UI for application that draws a histogram
ui <- fluidPage(
#textInput
textInput(
inputId = "text",
label = "Enter the value",
value = "",
width = NULL
),
textOutput("value"),
#passowrdInput
passwordInput(
inputId = "password",
label = "Enter the password",
value = "",
width = NULL
),
textOutput("pwd"),
#checkboxGroupInput
checkboxGroupInput(
inputId = "checkbox",
label = "Are you sure you want to continue?",
choices = c("Yes" = "Y",
"No" =
"N"),
selected = NULL,
inline = FALSE,
width = NULL
),
textOutput("checkbox"),
#radiobuttons
radioButtons(
inputId = "radio",
label = "Do you like Football?",
choices = c("Yup" = "Yes", "Nope" = "No") ,
selected = NULL,
inline = FALSE,
width = NULL
),
#This just adds space for the object to be printed
#You need to put the code in server function if you want the output to be printed
textOutput("radioButton")
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$value <- renderText({
input$text
})
output$pwd <- renderText({
input$password
})
output$radioButton <- renderText({
paste("You have chosen:", input$radio)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
59846157cd4a5dae5ea0cec6d7a2831281dc6ce8
|
38a5a35e74e487f400fccb327749a1a97e0309a8
|
/code/downsampling_code/run_archr_geneactivity.R
|
1e3e882bfa44c3c4056b84f3ab71aa0b10f4c6b0
|
[] |
no_license
|
timoast/signac-paper
|
1d0f303f20ab018aa69e8929f6a66cc110e1c81f
|
1cdbb6dd6a5ad817bd23bb7d65319d5bf802455f
|
refs/heads/master
| 2023-08-25T10:59:33.951761
| 2021-11-01T21:05:01
| 2021-11-01T21:05:01
| 309,495,686
| 7
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 739
|
r
|
run_archr_geneactivity.R
|
library(ArchR)
args = commandArgs(trailingOnly = TRUE)
ncore <- as.numeric(args[1])
arrowfile <- args[2]
nrep <- args[3]
timefile <- args[4]
genome <- args[5]
# load archr project for downsampling level
proj <- loadArchRProject(path = arrowfile)
# set threads
addArchRThreads(threads = ncore)
addArchRGenome(genome)
# run gene activity n times
invisible(gc())
timings <- c()
for (i in seq_len(length.out = as.numeric(x = nrep))) {
start.time <- Sys.time()
proj <- addGeneScoreMatrix(input = proj, force = TRUE)
elapsed <- as.numeric(Sys.time() - start.time, units = "secs")
timings <- c(timings, elapsed)
invisible(gc())
}
# save timings
writeLines(text = sapply(X = timings, FUN = as.character), con = timefile, sep = "\n")
|
8dbc259a69df6f34f8694ccc66c831c94e216558
|
fcebca7c5725c44796d90a7158350e52aa61cc72
|
/man/RSiena-package.Rd
|
8e6b8ae0a0f8d977c8b290c80e71ac741102b698
|
[] |
no_license
|
kkc-krish/RSiena
|
c082a0e1c3698bffd68734387347c4de7981698f
|
4f9d65392367703150e6285291a9b41d23e647c6
|
refs/heads/master
| 2020-12-24T19:59:58.649070
| 2013-06-18T00:00:00
| 2013-06-18T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,757
|
rd
|
RSiena-package.Rd
|
\name{RSiena-package}
\alias{RSiena-package}
\alias{RSiena}
\docType{package}
\title{
Simulation Investigation for Empirical Network Analysis
}
\description{
Fits statistical models to longitudinal sets of networks, and to
longitudinal sets of networks and behavioral variables.
Not only one-mode networks but also two-mode networks
and multivariate networks are allowed.
The models are stochastic actor-oriented models.
Package \code{"RSienaTest"} is the development version, and
is distributed through R-Forge, see
\url{http://r-forge.r-project.org/R/?group_id=461}.
Package \code{"RSiena"} is the official release.
}
\details{
Use \code{\link{siena07}} to fit a model.
Data objects can be created from matrices and
vectors using \code{\link{sienaDependent}}, \code{\link{coCovar}} etc.,
and finally \code{\link{sienaDataCreate}}.
Another possibility (but with less flexibility) is via the
Gui displayed by \code{\link{siena01Gui}}, or via
\code{\link{sienaDataCreateFromSession}}.
Effects are selected using an \emph{effects} object, which can be created
using \code{\link{getEffects}}.
Control of the estimation algorithm requires a
\code{sienaAlgorithm} object that
defines the settings (parameters) of the algorithm,\cr
and which can be created by \code{\link{sienaAlgorithmCreate}}
(alias \code{\link{sienaModelCreate}}).
More detailed help is available in the manual which you can display
using \cr
\code{RShowDoc("RSiena_Manual", package="RSiena")}
\tabular{ll}{
Package: \tab RSiena\cr
Type: \tab Package\cr
Version: \tab 1.1-232\cr
Date: \tab 2013-06-18\cr
Depends: \tab R (>= 2.15.0)\cr
Imports: \tab Matrix\cr
Suggests: \tab tcltk, network, codetools, lattice, MASS, parallel,
xtable, tools\cr
SystemRequirements: \tab GNU make, tcl/tk 8.5, Tktable\cr
License: \tab GPL-2 \cr
LazyData: \tab yes\cr
NeedsCompilation: yes\cr
BuildResaveData: \tab no\cr
}
}
\author{
Ruth Ripley, Krists Boitmanis, Tom Snijders.
Contributions by Josh Lospinoso, Charlotte Greenan, Christian Steglich,
Johan Koskinen, Mark Ortmann, and Nynke Niezink.
Maintainer: Tom A.B. Snijders <tom.snijders@nuffield.ox.ac.uk>
}
\references{
\itemize{
\item Schweinberger, Michael, and Snijders,
Tom A.B. (2007). Markov models for digraph panel data:
Monte Carlo-based derivative estimation.
\emph{Computational Statistics and Data Analysis} 51, 4465-4483.
\item Snijders, Tom A.B. (2001).
The statistical evaluation of social network dynamics.
\emph{Sociological Methodology}, 31, 361-395.
\item Snijders, Tom A.B., Steglich, Christian E.G., and Schweinberger,
Michael (2007). Modeling the co-evolution of networks and behavior.
Pp. 41-71 in \emph{Longitudinal models in the behavioral
and related sciences},
edited by Kees van Montfort, Han Oud and Albert Satorra; Lawrence Erlbaum.
\item Steglich, Christian E. G., Snijders, Tom A. B., and
Pearson, Michael A. (2010). Dynamic networks and behavior:
Separating selection from influence. Sociological Methodology, 40, 329-393.
\item Further see the extensive manual accessible by the command\cr
\code{RShowDoc("RSiena_Manual", package="RSiena")}\cr
and the website \url{http://www.stats.ox.ac.uk/~snijders/siena/} .
}
}
\seealso{\code{\link{siena07}}}
\examples{
mynet1 <- sienaDependent(array(c(tmp3, tmp4), dim=c(32, 32, 2)))
mydata <- sienaDataCreate(mynet1)
myeff <- getEffects(mydata)
myeff <- includeEffects(myeff, transTrip)
myeff
myalgorithm <- sienaAlgorithmCreate(nsub=3, n3=200)
ans <- siena07(myalgorithm, data=mydata, effects=myeff, batch=TRUE)
summary(ans)
}
\keyword{ package }
|
3876639b7f2bee6aa8ced406e1d6bb97634cdc2c
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/exuber/R/tidiers-radf.R
|
1927943cbbbda6688cf6ff89da3c583f142105ed
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,246
|
r
|
tidiers-radf.R
|
#' Tidy a `radf_obj` object
#'
#' Summarizes information about `radf_obj` object.
#'
#' @param x An object of class `radf_obj`.
#' @param format Long or wide format (default = "wide").
#' @param panel If TRUE then returns the panel statistics
#' @param ... Further arguments passed to methods. Not used.
#'
#' @importFrom purrr keep map reduce pluck
#' @importFrom dplyr full_join arrange
#' @importFrom rlang set_names
#' @importFrom tidyr gather
#' @importFrom tibble enframe
#'
#' @return A [tibble::tibble()]
#'
#' @export
#' @examples
#' \donttest{
#' dta <- data.frame(psy1 = sim_psy1(n = 100), psy2 = sim_psy2(n = 100))
#'
#' rfd <- radf(dta)
#'
#' # Get the test statistic
#' tidy(rfd)
#'
#' # Get the test statisticsequences
#' augment(rfd)
#'
#' # Get the panel test statistic
#' tidy(rfd, panel = TRUE)
#' }
tidy.radf_obj <- function(x, format = c("wide", "long"), panel = FALSE, ...) {
format <- match.arg(format)
if (panel) {
format <- match.arg(format)
tbl_radf <- x %>%
pluck("gsadf_panel") %>%
enframe(name = NULL, value = "gsadf_panel")
if (format == "long") {
tbl_radf <-
tbl_radf %>%
gather(name, tstat) %>%
mutate(
id = factor("panel"),
name = factor(name)) %>%
select(id, name, tstat)
}
}else{
tbl_radf <-
x %>%
keep(names(.) %in% c("adf", "sadf", "gsadf")) %>%
map(enframe) %>%
reduce(full_join, by = "name") %>%
set_names(c("id", "adf", "sadf", "gsadf")) %>%
mutate(id = factor(id, series_names(x)))
if (format == "long") {
tbl_radf <-
tbl_radf %>%
gather(name, tstat, -id) %>%
mutate(name = factor(name, levels = c("adf", "sadf", "gsadf"))) %>%
arrange(id)
}
}
tbl_radf
}
#' @rdname tidy.radf_obj
#'
#' @importFrom dplyr rename as_tibble everything
#' @importFrom tidyr gather
#' @export
augment.radf_obj <- function(x, format = c("wide", "long"), panel = FALSE, ...) {
format <- match.arg(format)
stopifnot(is.logical(panel))
if (panel) {
tbl_radf <- tibble(
index = index(x, trunc = TRUE),
bsadf_panel = pluck(x, "bsadf_panel")
) %>%
add_key(x) %>%
select(key, index, bsadf_panel)
if (format == "long") {
tbl_radf <-
tbl_radf %>%
gather(name, tstat, -index, -key, factor_key = TRUE) %>%
mutate(id = factor("panel")) %>%
select(key, index, id, name, tstat)
}
}else{
tbl_radf <- x %>%
pluck("badf") %>%
as_tibble() %>%
add_key(x) %>%
mutate(
index = index(x, trunc = TRUE)
) %>%
gather(id, badf, -index, -key, factor_key = TRUE) %>%
bind_cols(
x %>%
pluck("bsadf") %>%
as_tibble() %>%
gather(name, bsadf) %>%
select(bsadf)
) %>%
select(key, index, id, everything())
if (format == "long") {
tbl_radf <-
tbl_radf %>%
gather(name, tstat, -index, -id, -key) %>%
mutate(name = factor(name, levels = c("badf", "bsadf"))) %>%
arrange(id, name)
}
}
tbl_radf
}
|
8f8c4d3874a2c07f77e39fb9c31da9036004d4c3
|
90b3ff028fc6fb3729933bac757d999454be96d9
|
/06_data.R
|
1b2db78b5947dff0e457cc57758b4788ae4dc62e
|
[] |
no_license
|
bacoon23/polyreg_build_tmp
|
bd986aa658b4c2c6d9427413114a7549c7d98640
|
3aed0fe604ff134bddf08f8d868c03614c214ccf
|
refs/heads/master
| 2020-03-16T08:54:44.795896
| 2018-05-08T12:26:52
| 2018-05-08T12:26:52
| 132,604,863
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 798
|
r
|
06_data.R
|
# Data Workflow, same.
install.packages("triangle")
library("triangle")
gen_beerbowl <- function() {
gen_dat <- data.frame(x1=c(rtriangle(250,0,7,01.2),rep(0,250)))
gen_dat$x2 <- gen_dat$x1^2
gen_dat$x3 <- gen_dat$x1^3
gen_dat$y <- 191.6 + 11.55*gen_dat$x1 + -6*gen_dat$x2 + 0.4*gen_dat$x3 + rnorm(500,0,18)
return(gen_dat[sample(nrow(gen_dat)),c("x1","x2","x3","y")])
}
beerbowl <- gen_beerbowl()
devtools::use_data(beerbowl,pkg="R/polyreg")
# Add some documentation
if (tail(strsplit(getwd(),"/")[[1]],1)=="polyreg_build") {
download.file("http://github.mtv.cloudera.com/raw/mlop/polyreg/master/R/polyreg/R/beerbowl.R",
"R/polyreg/R/beerbowl.R",quiet = TRUE)
}
devtools::document("R/polyreg")
devtools::load_all("R/polyreg")
head(polyreg::beerbowl)
?beerbowl
|
fd4705d5b6345d738e2fad5685fc7eaa1c9ffa79
|
aea96af9c73c07791e509a65c28fe0f76053007d
|
/describe/R/missingness_functions.R
|
2350dce67801fe524af8d055eab407108044c5e1
|
[] |
no_license
|
jgutman/eduanalytics
|
cb0f4e933575c3cc4eec7fc69b75aa36a3846a97
|
d4cc37bd80fb820350f72a93511b33218c1393b6
|
refs/heads/master
| 2021-11-23T15:37:59.928954
| 2017-10-06T19:50:58
| 2017-10-06T19:50:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,678
|
r
|
missingness_functions.R
|
## FUNCTIONS EXPLORING MISSINGNESS
#' Function to create a matrix with the percent of observations missing in each variable
#' in a data frame or tibble grouped by another variable (generally appl_year)
#'
#' @param dat tibble or data frame
#' @param varname grouping variable
#' @param round_digits number of digits for rounding
#'
#' @return a matrix
#' @export
#'
pct_miss_table_single <- function(dat, varname, round_digits = 1) {
quo_group_by <- enquo(varname)
colname <- dat %>% select(!!quo_group_by) %>% colnames()
dat %>%
group_by(!!quo_group_by) %>%
summarize_all(funs(mean(is.na(.)))) %>%
column_to_rownames(colname) %>%
t() %>% multiply_by(100) %>% round(round_digits)
}
#' Function to create a matrix with the percent of observations missing in each variable
#' in a list of data frames or tibbles grouped by another variable (generally appl_year)
#'
#' @param tbl_list list of tibbles or data frames
#' @param varname grouping variable
#' @param round number of digits for rounding
#'
#' @return a list of matrices
#' @export
#'
pct_miss_table <- function(df_list, varname, round_digits = 1) {
quo_group_by <- enquo(varname)
df_list %>%
map(., function(df)
df %>% pct_miss_table_single(varname = !!quo_group_by)
)
}
## VISUALIZATION FUNCTIONS
#' Function to create an aggr object for a single tibble or data frame
#'
#' @param dat tibble or data frame
#' @param plot a logical indicating whether hte reslts should be plotted
#'
#' @return an aggr object. if plot=T plots missingness patterns and barplot of missingness proportions.
#' @export
#'
explore_missingness_single <- function(dat, plot = TRUE) {
miss_obj <- aggr(dat, bars = FALSE, sortVars = TRUE, numbers = TRUE, plot = plot, cex.axis=.8)
#summary(miss_obj)
}
#' Function to create a list of aggr objects from a list of tibbles
#'
#' @param df_list a list of tibbles or data frames
#' @param plot a logical indicating whether the results should be plotted. if
#' plot = TRUE, plots missing proportions and patterns and prints
#' proportions of missings in each var for each tibble in list. if plot = FALSE outputs
#' only the number of observations missing for each variable. if plot = FALSE and output
#' is saved to an object, nothing automaitcally prints
#'
#' @return a list of aggr objects
#' @export
#'
explore_missingness <- function(df_list, plot = TRUE) {
df_list %>%
map(., function(df)
explore_missingness_single(df, plot = plot))
}
## COMPLETE CASES FUNCTIONS ##
#' Function to get the proportion of oberservations with complete data in a tibble
#'
#' @param dat a tibble or data frame
#' @param varname an optional grouping variable
#' @return a data frame. if varname is not specified data frame contains 1 observation. otherwise a two
#' column data frame is returned.
#' @export
get_complete_cases_single <- function(dat, varname) {
quo_group_by <- enquo(varname)
dat %>% mutate(complete = complete.cases(.)) %>%
group_by(appl_year) %>%
summarize(n = n(), c = sum(complete), pct_complete = c/n * 100) %>%
select(appl_year, pct_complete) %>%
as.data.frame()
}
#' Function to get the proportion of complete observations for each tibble in a
#' list of tibbles or data frames
#'
#' @param df_list a list of tibbles or data frames
#' @param varname an optional grouping variable
#'
#' @return a list of data frames
#' @export
#'
get_complete_cases <- function(df_list, varname) {
quo_group_by <- enquo(varname)
#print(quo_group_by)
df_list %>%
map(., function(df)
df %>%
get_complete_cases_single(varname = !!quo_group_by) %>%
as.data.frame()
)
}
|
aafb9282d30ffe5078f5885a441c6f5aea946fca
|
2f5ed17ace2ae9c7a1102617ca1dcc91ae1f2466
|
/R/calc_streak.R
|
1eaba07cf855746523b7004c78b30138be4ab2f2
|
[] |
no_license
|
jbryer/DATA606
|
0b9f79590d257040e997b48a78c3b0c9ce0b006c
|
3c702d4b08af2e2258d54dc31b13ae61a8e29bcd
|
refs/heads/master
| 2023-08-17T04:27:03.710532
| 2023-08-11T14:59:38
| 2023-08-11T14:59:38
| 39,025,976
| 6
| 15
| null | 2022-11-11T22:27:03
| 2015-07-13T17:09:52
|
HTML
|
UTF-8
|
R
| false
| false
| 193
|
r
|
calc_streak.R
|
#' Calculate a streak.
#'
#' @export
calc_streak <- function(x) {
y <- rep(0,length(x))
y[x == "H"] <- 1
y <- c(0, y, 0)
wz <- which(y == 0)
streak <- diff(wz) - 1
return(streak)
}
|
aad2a3692145f63c1fef0790d5af0b2072c5dc8a
|
7b82068433efacf8840c57e2c05b613dbe13d31c
|
/man/get_execution_role.Rd
|
0f51182a93345b976f68f99354089dcc6376b5dc
|
[
"Apache-2.0"
] |
permissive
|
OwenGarrity/sagemaker-r-sdk
|
d25f0d264dcddcb6e0fa248af22d47fc22c159ce
|
3598b789af41ed21bb0bf65bd1b4dfe1469673c9
|
refs/heads/master
| 2022-12-09T04:50:07.412057
| 2020-09-19T13:02:38
| 2020-09-19T13:02:38
| 285,834,692
| 0
| 0
|
NOASSERTION
| 2020-09-19T13:02:39
| 2020-08-07T13:23:16
|
R
|
UTF-8
|
R
| false
| true
| 455
|
rd
|
get_execution_role.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/session.R
\name{get_execution_role}
\alias{get_execution_role}
\title{Return the role ARN whose credentials are used to call the API.}
\usage{
get_execution_role(sagemaker_session = NULL)
}
\arguments{
\item{sagemaker_session}{(Session): Current sagemaker session}
}
\value{
(str): The role ARN
}
\description{
Return the role ARN whose credentials are used to call the API.
}
|
4c4409748da7458b0627019ccfaa8c4e4fad4944
|
742987e658baec8f280792b07253b8e1d7d00bf4
|
/R/LNM.Sim.R
|
276c686bbd74be96f558fa0d84d403228b95f7e7
|
[] |
no_license
|
ZRChao/LRTT
|
ab083de0a8d64f688ac68ff7e2cd2cff39ff10ae
|
47ea5c46adf326e101b86b80a95019182980f178
|
refs/heads/master
| 2020-03-06T14:47:49.748541
| 2018-09-12T11:52:33
| 2018-09-12T11:52:33
| 126,942,320
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,292
|
r
|
LNM.Sim.R
|
#############################################################################
### logistical normal multinomial distribution simulation #####
### Given one tree structure and sample depth, on leafs it will follow#####
### logistical normal multinomial distribution. Correspond to the tree, #####
### choose the different OTU with different mu in case and control, and #####
### then take logit transform as the probability on the leafs count #####
### here, depth is depth is unifrom p*10, p*2000 both case and control #####
### N is sample size for case and control. #####
### The results is the count data on leafs and combine internal node #####
### counts which is the sum of his childs, the colname is correspond to #####
### the tree structure. #####
#############################################################################
#' @importFrom dirmult simPop
#-----------------------------------------------------------------------#####
LNM.Sim = function(p, seed, N, dif = diff_leaf){
lnm_otutab <- matrix(NA, 2*N, p)
set.seed(seed)
control_mu <- runif(p-1, -0.00001, 0.00001)
# need to import mvtnorm
for(y in 1:N){
set.seed(seed*y+5)
control_y <- rmvnorm(1, mean = control_mu, sigma = diag(length(control_mu)))
control_z <- c(exp(control_y)/(sum(exp(control_y))+1), 1/(sum(exp(control_y))+1))
set.seed(y + 1)
control_M <- round(runif(1, p*10, p*2000))
lnm_otutab[y, ] <- round(control_z * control_M)
}
case_mu <- control_mu
diff_num <- length(dif)
set.seed(p)
dif_mu <- c(runif(round(diff_num/2) ,-4, -1),runif(diff_num -round(diff_num/2) ,1, 4))
case_mu[dif] <- dif_mu
for(x in 1:N){
set.seed(seed*x+3)
case_y <- rmvnorm(1, mean = case_mu, sigma = diag(length(case_mu)))
case_z <- c(exp(case_y) / (sum(exp(case_y))+1), 1/(sum(exp(case_y))+1))
set.seed(seed*x + 1)
case_M <- round(runif(1, p*10, p*2000))
set.seed(seed*x + 2)
lnm_otutab[x+N, ] <- rmultinom(1, case_M, case_z)
}
colnames(lnm_otutab) <- as.character(1:p)
#taxa <- lnm_otutab%*%taxa_index
#lnm_alltab <- cbind(taxa, lnm_otutab)
return(lnm_otutab)
}
#############################################################################
|
3d835198de9540570017c85ab70fdf5c5d06d214
|
56a6b27413fc7e5d04c584675a9fbf599467428c
|
/CSV2GIFT.R
|
6dfb9b159afb6fd349648825a84ecc784ce4a39f
|
[] |
no_license
|
dominikfroehlich/CSV2GIFT.R
|
b3336c544d3ba371a26393c8d1a746d1192b4108
|
b2a75e06fb950e9bf0ba3defa2fafa61cf00621e
|
refs/heads/master
| 2020-04-08T19:25:06.252688
| 2018-11-29T11:18:36
| 2018-11-29T11:18:36
| 159,654,387
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,709
|
r
|
CSV2GIFT.R
|
full <- read.csv2(file = file.choose(new = FALSE))
cat <- "StudentMC1"
output <- paste0(cat,".gift")
write("\n",file=paste(output),append=FALSE) #deletes file!
for (i in 1:dim(full)[1]){
df <- full[i, ]
attach(df)
subcat <- ifelse(is.na(Einheit),"uncategorized",Einheit)
numT <- sum(df[grep(pattern = "TF", x = names(df))])
if(numT==5){
Tpercent <- "~%20%"
Fpercent <- "~%0%"}
if(numT==4){
Tpercent <- "~%25%"
Fpercent <- "~%-100%"}
if(numT==3){
Tpercent <- "~%33.33333%"
Fpercent <- "~%-50%"}
if(numT==2){
Tpercent <- "~%50%"
Fpercent <- "~%-33.33333%"}
if(numT==1){
Tpercent <- "~%100%"
Fpercent <- "~%-25%"}
write(paste("$CATEGORY: ",cat,"/",Einheit, sep=""),file=paste(output),append=TRUE)
write("\n",file=paste(output),append=TRUE)
qid <- paste(Einheit,
substr(MATNR,start = 2, stop = 5),
round(runif(n = 1, min = 1000, max = 9999),0), sep = "")
q <- paste("::",qid,"::",
df$Question, " {",
ifelse(TF1==1,Tpercent,Fpercent),Answer1,"#",FB1,
ifelse(TF2==1,Tpercent,Fpercent),Answer2,"#",FB2,
ifelse(TF3==1,Tpercent,Fpercent),Answer3,"#",FB3,
ifelse(TF4==1,Tpercent,Fpercent),Answer4,"#",FB4,
ifelse(TF5==1,Tpercent,Fpercent),Answer5,"#",FB5,
"####","Falls du mehr Informationen benoetigst oder andere Antworten als richtig identifizierst, frage doch im Forum nach! Nenne dabei die FragenID: ",qid,".",
"}",
sep = "")
write(q,file=paste(output),append=TRUE)
write("\n",file=paste(output),append=TRUE)
}
|
55f77afc012c3159596fccd911257816fb71d192
|
df1e8f192926f8a38ce24948bc5297d380466d2c
|
/RGCCA/R/define_M_regularisation.R
|
4de92b603b32c2ef13ae9b140e7ce3086335f484
|
[] |
no_license
|
AGloaguen/-MGCCA---Reproducible-Code
|
0f622230e3b0869ab390a9d8a3e8200509e7bdfe
|
2ed68908d416a83aa9aa741477eba737bfcb587f
|
refs/heads/master
| 2020-05-17T08:29:28.548106
| 2019-04-26T12:59:09
| 2019-04-26T12:59:09
| 183,607,206
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,470
|
r
|
define_M_regularisation.R
|
define_M_regularisation <- function(M_regularisation, n_way, tau, A, A_m, n, p = NULL, K = NULL, J = NULL, M_K = NULL, M_J = NULL, Proj_K = NULL, Proj_J = NULL) {
if (n_way){
switch(M_regularisation,
###############################
## non_kronecker_RGCCA ##
###############################
"non_kronecker_RGCCA" =
{
if (tau == 1){
P = (n^(-1/2)) * A_m
return(list(P = P, M_inv = NULL, tau_l = 1))
}else{
#Tau optimal or already define
if (!is.numeric(tau)){
tau_l = apply(A, 3, tau.estimate)
}else{
tau_l = rep(tau, K)
}
M_J_sqrt_inv = sapply(1:K, function(x) sqrtMatrice(tau_l[x] * diag(J) + ((1 - tau_l[x])/(n)) * (t(A[, , x]) %*% A[, , x]))$Minv_sqrt, simplify = "array")
P = (n^(-1/2)) * sapply(1:K, function(x) A[, , x] %*% M_J_sqrt_inv[, , x], simplify = "array")
P = t(apply(P, 1, c))
}
return(list(P = P, M_inv = NULL, tau_l = tau_l, M_J_sqrt_inv = M_J_sqrt_inv))
},
###############################
## kronecker_RGCCA ##
###############################
"kronecker_Identity_RGCCA" =
{
P = (n^(-1/2)) * A_m
return(list(P = P, M_inv = NULL, tau_l = 1))
},
###############################
## kronecker_RGCCA ##
###############################
"kronecker_specification" =
{
if (!is.null(Proj_J) && !is.null(Proj_K)){
M_J_sqrt_inv = sqrtMatrice(t(Proj_J) %*% M_J %*% Proj_J)$Minv_sqrt
M_K_sqrt_inv = sqrtMatrice(t(Proj_K) %*% M_K %*% Proj_K)$Minv_sqrt
}else{
M_J_sqrt_inv = sqrtMatrice(M_J)$Minv_sqrt
M_K_sqrt_inv = sqrtMatrice(M_K)$Minv_sqrt
}
P = sapply(1:K, function(x) A[, , x] %*% M_J_sqrt_inv, simplify = "array")
P = aperm(a = sapply(1:J, function(x) P[, x, ] %*% M_K_sqrt_inv, simplify = "array"), perm = c(1, 3, 2))
P = (n^(-1/2)) * t(apply(P, 1, c))
return(list(P = P, M_inv = NULL, tau_l = NULL, M_J_sqrt_inv = M_J_sqrt_inv, M_K_sqrt_inv = M_K_sqrt_inv))
}
)
}else{
if(tau != 1){
if (!is.numeric(tau)) tau = tau.estimate(A)
M_inv = ginv(tau * diag(p) + ((1 - tau)/(n)) * (t(A) %*% A))
}else{
M_inv = NULL
}
return(list(P = NULL, M_inv = M_inv, tau_l = tau))
}
}
|
1ef0b2093d7b4ac224ae06aca3d42d591def3cb0
|
9a808268700a7ddf02c3b11b3820eed269acd9b9
|
/functions.R
|
2b46c025d1c55102c450677f958cdfc4df2951df
|
[] |
no_license
|
wmattbrown/dndhelper
|
92c532b9e2fbbe5d4351df401ea8e18ca9a67de0
|
1ab4b25d83dc90939c9bbb9dc2bd03f6e5a2900c
|
refs/heads/master
| 2022-05-29T12:41:48.356959
| 2020-05-02T20:36:36
| 2020-05-02T20:36:36
| 259,116,249
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,205
|
r
|
functions.R
|
# functions!
# roll <number> <sided> dice
# e.g. roll 3 8 sided dice: roll(8, 3)
# e.g. roll 1 20 sided die: roll(20)
roll <- function(sided = 20, number = 1){
stopifnot(is.numeric(number))
stopifnot(number == round(number))
stopifnot(sided %in% c(4, 6, 8, 10, 12, 20, 100))
stopifnot(number > 0)
# roll the dice
sample(1:sided, number, replace = TRUE)
}
# given a stat like strength or charisma, get the associated modifier
get_ability_modifier <- function(stat){
stopifnot(is.numeric(stat))
stopifnot(stat >= 1)
stopifnot(stat <= 20)
stopifnot(stat == round(stat))
as.integer(floor((stat - 10) / 2))
}
get_skill_modifier <- function(character, skill){
ability_stat <- character[["abilities"]][skill_abilities[skill]]
proficiency <- character[["proficiencies"]][skill] * character[["proficiency"]]
as.integer(get_ability_modifier(ability_stat) + proficiency)
}
# this is NOT vectorized (can't use it in mutate as is, need to use sapply)
get_saving_throw_modifier <- function(character, ability){
stopifnot(ability %in% ability_list)
ability_modifier <- get_ability_modifier(character[["abilities"]][[ability]])
proficiency <- character[["proficiency"]] * character[["proficiencies"]][[ability]]
as.integer(ability_modifier + proficiency)
}
roll_saving_throw <- function(character, ability, roll_value = NA){
stopifnot(ability %in% ability_list)
# a roll can be provided, or the app can roll for you
if(is.na(roll_value)) roll_value <- roll()
saving_throw_modifier <- get_saving_throw_modifier(character, ability)
value <- roll_value + saving_throw_modifier
message <- sprintf("%d %s %d = <b>%d</b>",
roll_value,
ifelse(saving_throw_modifier >= 0, "+", "-"),
abs(saving_throw_modifier),
value)
list(message = message,
value = value)
}
# for use with anything - just need the relevant ability stat and the
# proficiency modifier for the ability
roll_save_check <- function(ability_stat, proficiency){
dice_value <- roll()
stat_modifier <- get_ability_modifier(ability_stat) + proficiency
value <- dice_value + stat_modifier
message <- sprintf("%d %s %d = <b>%d</b>",
dice_value,
ifelse(stat_modifier >= 0, "+", "-"),
abs(stat_modifier),
value)
list(message = message,
value = value)
}
roll_skill_check <- function(character, skill, roll_value = NA){
stopifnot(skill %in% names(skill_abilities))
stopifnot(roll_value <= 20 | is.na(roll_value))
stopifnot(roll_value >=1 | is.na(roll_value))
# a roll can be provided, or the app can roll for you
if(is.na(roll_value)) roll_value <- roll()
skill_modifier <- get_skill_modifier(character, skill)
message <- sprintf("%s %s %s = <b>%s</b>",
roll_value,
ifelse(skill_modifier >= 0, "+", "-"),
abs(skill_modifier),
roll_value + skill_modifier)
value <- as.integer(roll_value + skill_modifier)
list(message = message,
value = value)
}
|
678f8df82b9b093326c2aea4c515ac79cf564b8c
|
435ee8ff8c0f06de1f77edb787d4bd019f4c4b98
|
/Watson-CONVERSATION-Code-Snippet.R
|
bff0940da7a91e371814710c028785727ad51196
|
[
"Apache-2.0"
] |
permissive
|
RudyMartin/R_Scripts_for_Watson
|
8e01734037d404c8f9228146a739723605fda249
|
50c7fdd643e51ecc5461f75a29e57512d8938968
|
refs/heads/master
| 2021-01-14T13:21:30.958126
| 2016-08-27T20:56:59
| 2016-08-27T20:56:59
| 67,254,718
| 1
| 0
| null | 2016-09-02T21:05:05
| 2016-09-02T21:05:04
| null |
UTF-8
|
R
| false
| false
| 3,400
|
r
|
Watson-CONVERSATION-Code-Snippet.R
|
######################################################
### IBM Watson - Code Snippet --- VERY EARLY CODE - API"S NOT FULLY AVAILALBLE YET
### Experimental Code. R Interface for IBM Watson Services
### DOCS: http://www.ibm.com/watson/developercloud/doc/conversation/
### Before you begin you will need (1) An IBM Bluemix demo account (2) CONVERSATION Service tile created on bluemix and (3) Credentials for service
######################################################
library(RCurl) # install.packages("RCurl") # if the package is not already installed
library(httr)
######### Housekeeping And Authentication
setwd("/Users/ryan/Documents/Service - Conversation") # Set working Directory
getwd()
source("keys.r") ## KEYS has acutal username:password for each IBM service.
## Base URLs for IBM Watson APIs
base_url_CONV <- "https://gateway.watsonplatform.net/conversation/api/v1"
workspace_CONV # will look something like "2ded4293-XXXX-4b24-b080-XXXXXXXXXX"
#### AS OF JULY 2016 - RIGHT AFTER LAUNCH - YOU NEED TO USE GUI / BLUEMIX TO LAUNCH UI/UX TOOLING FIRST
# http://www.ibm.com/watson/developercloud/doc/conversation/convo_getstart.shtml
paste(base_url_CONV,"/workspaces/",workspace_CONV,"/message?version=2016-07-11",sep="")
# "https://gateway.watsonplatform.net/conversation/api/v1/workspaces/2ded4293-XXXX-4b24-b080-XXXXXXXXXX/message?version=2016-07-11"
#TEST 200
POST(url=paste(base_url_CONV,"/workspaces/",workspace_CONV,"/message?version=2016-07-11",sep=""),authenticate(username_CONV,password_CONV))
## OK - Lets put the response somewhere and take a look:
response <- POST(url=paste(base_url_CONV,"/workspaces/",workspace_CONV,"/message?version=2016-07-11",sep=""),
authenticate(username_CONV,password_CONV))
reply <- content(response)
reply$output$text[1]
# [1] "Welcome to the Tasty Helper! \nWhat are you looking for?"
# (THIS IS WHAT I CREATED WITH THE UX I LAUNCHED FROM TOOLING IN BLUEMIX
# HERE IS WHAT COMES BACK
# content(response)
#
# $context
# $context$conversation_id
# [1] "c7a11109-672d-XXXX-a3f4-XXXXXXXXXXX"
#
# $context$system
# $context$system$dialog_stack
# $context$system$dialog_stack[[1]]
# [1] "node_6_1468893593485"
#
#
# $context$system$dialog_turn_counter
# [1] 1
#
# $context$system$dialog_request_counter
# [1] 1
#
#
#
# $intents
# list()
#
# $entities
# list()
#
# $input
# named list()
#
# $output
# $output$log_messages
# list()
#
# $output$text
# $output$text[[1]]
# [1] "Welcome to the Tasty Helper! \nWhat are you looking for?"
#
#
# $output$nodes_visited
# $output$nodes_visited[[1]]
# [1] "node_6_1468893593485"
### CODE INCOMPLETE - AWAITING MORE API FUNCTIONALITY
### CODE INCOMPLETE - AWAITING MORE API FUNCTIONALITY
# once Python / CURL examples of interacting from code - can engage more....
# https://watson-api-explorer.mybluemix.net/apis/conversation-v1#!/message/post_v1_workspaces_workspace_id_message
# http://www.ibm.com/watson/developercloud/conversation/api/v1/?node#send_input
## OK - Lets put the response somewhere and take a look:
response <- POST(url=paste(base_url_CONV,"/workspaces/",workspace_CONV,"/message?version=2016-07-11",sep=""),
authenticate(username_CONV,password_CONV),
add_headers("input"="text"),
body="I like coffee" )
content(response) #gives a 200, but not really useful yet.
|
ee1188a33b3cbe7a68011de86ceb432fbb8590f4
|
fd194cce7c398cddba8dfc7dfaf75dc3a10f1bb7
|
/data-raw/process_data_sets_maggie.R
|
693cf3ee1a90ab8fc54a6177ee6e6cc3c0098c5a
|
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
Starryz/fivethirtyeight
|
16241e7f3fc5939ebb01b0676e89a05c1bb08f5c
|
1c1cb1f92caa9802fc6086bd1af0d45bebb5a10f
|
refs/heads/master
| 2020-05-31T12:23:33.192568
| 2019-09-20T07:06:10
| 2019-09-20T07:06:10
| 190,279,423
| 1
| 0
|
NOASSERTION
| 2019-09-20T07:06:12
| 2019-06-04T21:07:20
|
R
|
UTF-8
|
R
| false
| false
| 11,415
|
r
|
process_data_sets_maggie.R
|
library(tidyverse)
library(janitor)
library(usethis)
# nba-carmelo---------------------------------------------------------------------
nba_carmelo <- read_csv("https://projects.fivethirtyeight.com/nba-model/nba_elo.csv") %>%
clean_names() %>%
mutate(
team1 = as.factor(team1),
team2 = as.factor(team2),
playoff = ifelse(playoff == "t", TRUE, FALSE),
playoff = ifelse(is.na(playoff), FALSE, TRUE),
neutral = ifelse(neutral == 1, TRUE, FALSE)
) %>%
# Given that data frame is large, only include preview of data in package:
slice(1:10)
usethis::use_data(nba_carmelo, overwrite = TRUE)
# nfl-elo---------------------------------------------------------------------
nfl_elo <-
"https://projects.fivethirtyeight.com/nfl-api/nfl_elo.csv" %>%
read_csv() %>%
clean_names() %>%
mutate(
team1 = as.factor(team1),
team2 = as.factor(team2),
neutral = ifelse(neutral == 1, TRUE, FALSE)
) %>%
# Given that data frame is large, only include preview of data in package:
slice(1:10)
usethis::use_data(nfl_elo, overwrite = TRUE)
# nfl-fandom---------------------------------------------------------------------
nfl_fandom_google <- read_csv("data-raw/nfl-fandom/NFL_fandom_data-google_trends.csv", skip=1) %>%
clean_names() %>%
rename(
trump_2016_vote = 'trump_2016_votepercent'
) %>%
mutate(
nfl = as.numeric(str_replace_all(nfl, "%", "")),
nba = as.numeric(str_replace_all(nba, "%", "")),
mlb = as.numeric(str_replace_all(mlb, "%", "")),
nhl = as.numeric(str_replace_all(nhl, "%", "")),
nascar = as.numeric(str_replace_all(nascar, "%", "")),
cbb = as.numeric(str_replace_all(cbb, "%", "")),
cfb = as.numeric(str_replace_all(cfb, "%", "")),
trump_2016_vote = as.numeric(str_replace_all(trump_2016_vote, "%", ""))
)
usethis::use_data(nfl_fandom_google, overwrite = TRUE)
nfl_fandom_surveymonkey <- read_csv(
"data-raw/nfl-fandom/NFL_fandom_data-surveymonkey.csv",
skip=1
) %>%
clean_names() %>%
rename(
total_respondents = 'tot_respondents',
gop_percent = 'goppercent',
dem_percent = 'dempercent',
ind_percent = 'indpercent',
white_percent = 'whitepercent',
nonwhite_percent = 'nonwhitepercent',
asian_dem = 'asian',
black_dem = 'black',
hispanic_dem = 'hispanic',
other_dem = 'other',
white_dem = 'white',
total_dem = 'total'
) %>%
mutate(
team = as.factor(team),
gop_percent = str_replace_all(gop_percent, "%", ""),
dem_percent = str_replace_all(dem_percent, "%", ""),
ind_percent = str_replace_all(ind_percent, "%", ""),
white_percent = str_replace_all(white_percent, "%", ""),
nonwhite_percent = str_replace_all(nonwhite_percent, "%", "")
)
colnames(nfl_fandom_surveymonkey) <- colnames(nfl_fandom_surveymonkey) %>%
str_replace_all(pattern="_1", replacement="_ind") %>%
str_replace_all(pattern="_2", replacement="_gop")
usethis::use_data(nfl_fandom_surveymonkey, overwrite = TRUE)
# puerto-rico-media---------------------------------------------------------------------
# Data on Google trend searches for hurricanes Harvey, Irma, Jose, and Maria
google_trends <- read_csv("data-raw/puerto-rico-media/google_trends.csv") %>%
clean_names() %>%
rename(
date = day,
hurricane_harvey_us = hurricane_harvey_united_states,
hurricane_irma_us = hurricane_irma_united_states,
hurricane_maria_us = hurricane_maria_united_states,
hurricane_jose_us = hurricane_jose_united_states)
usethis::use_data(google_trends, overwrite = TRUE)
# Data on the number of sentences per day that mention Hurricanes Harvey, Irma,
# Jose, and Maria in online news
mediacloud_hurricanes <- read_csv("data-raw/puerto-rico-media/mediacloud_hurricanes.csv") %>%
clean_names() %>%
mutate(
date = as.Date(date, format= "%m / %d / %y")
)
usethis::use_data(mediacloud_hurricanes, overwrite = TRUE)
# Data on the number of sentences per day
# that mention Puerto Rico, Texas, and Florida in online news
mediacloud_states <- read_csv("data-raw/puerto-rico-media/mediacloud_states.csv") %>%
clean_names() %>%
mutate(
date = as.Date(date, format= "%m / %d / %y")
)
usethis::use_data(mediacloud_states, overwrite = TRUE)
# A list of sources included in Media Cloud's "U.S. Top Online News" collection
mediacloud_online_news <- read_csv("data-raw/puerto-rico-media/mediacloud_top_online_news.csv")
usethis::use_data(mediacloud_online_news, overwrite = TRUE)
# Data on the number of headlines that mention Puerto Rico, Texas, and Florida,
# as well as headlines that mention each location and 'President' or 'Trump'.
mediacloud_trump <- read_csv("data-raw/puerto-rico-media/mediacloud_trump.csv") %>%
clean_names() %>%
rename(
puerto_rico = title_puerto_rico,
puerto_rico_and_trump = title_puerto_rico_and_title_trump_or_title_president,
florida = title_florida,
florida_and_trump = title_florida_and_title_trump_or_title_president,
texas = title_texas,
texas_and_trump = title_texas_and_title_trump_or_title_president
)
usethis::use_data(mediacloud_trump, overwrite = TRUE)
# Data on the percent of sentences per day in TV News that mention Hurricanes
# Harvey, Irma, Jose, and Maria.
tv_hurricanes <- read_csv("data-raw/puerto-rico-media/tv_hurricanes.csv") %>%
clean_names() %>%
mutate(
date = as.Date(date, format= "%m / %d / %y")
)
usethis::use_data(tv_hurricanes, overwrite = TRUE)
# Data on the percent of sentences per day in TV News that mention Hurricanes
# Harvey, Irma, Jose, and Maria by network.
tv_hurricanes_by_network <- read_csv("data-raw/puerto-rico-media/tv_hurricanes_by_network.csv") %>%
clean_names() %>%
mutate(
date = as.Date(date, format= "%m / %d / %y"),
query = as.factor(query)
)
usethis::use_data(tv_hurricanes_by_network, overwrite = TRUE)
# Data on the percent of sentences per day in TV News that mention Puerto Rico,
# Texas, and Florida.
tv_states <- read_csv("data-raw/puerto-rico-media/tv_states.csv") %>%
clean_names() %>%
mutate(date = as.Date(date, format= "%m / %d / %y"))
usethis::use_data(tv_states, overwrite = TRUE)
# riddler-pick-lowest---------------------------------------------------------------------
riddler_pick_lowest <- read_csv("data-raw/riddler-pick-lowest/low_numbers.csv") %>%
clean_names()
usethis::use_data(riddler_pick_lowest, overwrite = TRUE)
# sandy-311-calls---------------------------------------------------------------------
sandy_311 <- read_csv("data-raw/sandy-311-calls/sandy-311-calls-by-day.csv") %>%
clean_names() %>%
rename(
nyc_311 = nyc_3_1_1,
nyc_service = nycservice,
nys_emergency_mg = nysemergencymg
) %>%
mutate(date = as.Date(date, format= "%m / %d / %y"))
usethis::use_data(sandy_311, overwrite = TRUE)
# trump-approval-ratings---------------------------------------------------------------------
trump_approval_poll <- read_csv("https://projects.fivethirtyeight.com/trump-approval-data/approval_polllist.csv") %>%
mutate(
multiversions = ifelse(multiversions == "*", TRUE, FALSE),
multiversions = ifelse(is.na(multiversions), FALSE, TRUE),
tracking = ifelse(is.na(tracking), FALSE, TRUE),
subgroup = as.factor(subgroup),
modeldate = as.Date(modeldate, format = "%m / %d / %Y"),
startdate = as.Date(startdate, format = "%m / %d / %Y"),
enddate = as.Date(enddate, format = "%m / %d / %Y"),
pollster = as.factor(pollster),
grade = factor(grade, levels = rev(c("A+", "A", "A-", "B+", "B", "B-", "C+", "C", "C-")), ordered = TRUE),
population = as.factor(population),
url = as.factor(url),
createddate = as.Date(createddate, format = "%m / %d / %Y"),
timestamp = as.POSIXct(timestamp, tz = "GMT", format = "%H:%M:%S %d %b %Y")
) %>%
rename(
model_date = modeldate,
start_date = startdate,
end_date = enddate,
sample_size = samplesize,
created_date = createddate
) %>%
select(-c(president, model_date, influence))
usethis::use_data(trump_approval_poll, overwrite = TRUE)
trump_approval_trend <- read_csv("https://projects.fivethirtyeight.com/trump-approval-data/approval_topline.csv") %>%
clean_names() %>%
mutate(
president = as.factor(president),
subgroup = as.factor(subgroup),
modeldate = as.Date(modeldate, format = "%m / %d / %Y"),
timestamp = as.POSIXct(timestamp, tz = "GMT", format = "%H:%M:%S %d %b %Y")
) %>%
rename(
approve_high = approve_hi,
approve_low = approve_lo,
disapprove_high = disapprove_hi,
disapprove_low = disapprove_lo
) %>%
select(-c(president))
usethis::use_data(trump_approval_trend, overwrite = TRUE)
# trump-world-trust---------------------------------------------------------------------
trumpworld_issue_1 <- read_csv("data-raw/trump-world-trust/TRUMPWORLD-issue-1.csv") %>%
clean_names() %>%
mutate(
country = as.factor(country),
issue = 1
)
trumpworld_issue_2 <- read_csv("data-raw/trump-world-trust/TRUMPWORLD-issue-2.csv") %>%
clean_names() %>%
mutate(
country = as.factor(country),
issue = 2
)
trumpworld_issue_3 <- read_csv("data-raw/trump-world-trust/TRUMPWORLD-issue-3.csv") %>%
clean_names() %>%
mutate(
country = as.factor(country),
issue = 3
)
trumpworld_issue_4 <- read_csv("data-raw/trump-world-trust/TRUMPWORLD-issue-4.csv") %>%
clean_names() %>%
mutate(
country = as.factor(country),
issue = 4
)
trumpworld_issue_5 <- read_csv("data-raw/trump-world-trust/TRUMPWORLD-issue-5.csv") %>%
clean_names() %>%
mutate(
country = as.factor(country),
issue = 5
)
trumpworld_issues <- bind_rows(
trumpworld_issue_1, trumpworld_issue_2, trumpworld_issue_3,
trumpworld_issue_4, trumpworld_issue_5
)
usethis::use_data(trumpworld_issues, overwrite = TRUE)
trumpworld_pres <- read_csv("data-raw/trump-world-trust/TRUMPWORLD-pres.csv") %>%
clean_names() %>%
mutate(question = "Favorable view of US")
trumpworld_us <- read_csv("data-raw/trump-world-trust/TRUMPWORLD-us.csv") %>%
clean_names() %>%
mutate(question = "Trust President")
trumpworld_polls <- bind_rows(trumpworld_pres, trumpworld_us)
usethis::use_data(trumpworld_polls, overwrite = TRUE)
# twitter-ratio---------------------------------------------------------------------
barack_obama <- read_csv("data-raw/twitter-ratio/BarackObama.csv") %>%
mutate(
created_at = as.POSIXct(created_at, tz = "GMT", format = "%m/%d/%Y %H:%M"),
text = gsub("[^\x01-\x7F]", "", text)
)
real_donald_trump <- read_csv("data-raw/twitter-ratio/realDonaldTrump.csv") %>%
mutate(
created_at = as.POSIXct(created_at, tz = "GMT", format = "%m/%d/%Y %H:%M"),
text = gsub("[^\x01-\x7F]", "", text)
)
twitter_presidents <- bind_rows(barack_obama, real_donald_trump) %>%
select(created_at, user, everything())
usethis::use_data(twitter_presidents, overwrite=TRUE)
senators <- read_csv("data-raw/twitter-ratio/senators.csv") %>%
mutate(
party = as.factor(party),
state = as.factor(state),
created_at = as.POSIXct(created_at, tz = "GMT", format = "%m/%d/%Y %H:%M"),
text = gsub("[^\x01-\x7F]", "", text)
) %>%
select(created_at, user, everything()) %>%
slice(1:10)
usethis::use_data(senators, overwrite = TRUE)
# undefeated-boxers---------------------------------------------------------------------
undefeated <- read_csv("data-raw/undefeated-boxers/undefeated.csv")
usethis::use_data(undefeated, overwrite = TRUE)
|
c56386485865ca3d193ebb5b00f83d6cab2280b7
|
54054ff32cea5b78942ec2e259a630067c1aa2cd
|
/cachematrix.R
|
30c3d5cebc6a4703d10bf1b056d7aacc75cc0dff
|
[] |
no_license
|
JLovr/ProgrammingAssignment2
|
4fd9a5315dfa90a31b0e3d7887fd069ac692c8c7
|
ab6f87198dfe2d604b935524bc3d3e506975f9eb
|
refs/heads/master
| 2021-01-21T06:02:23.094706
| 2015-01-16T06:07:57
| 2015-01-16T06:07:57
| 29,264,092
| 0
| 0
| null | 2015-01-14T20:25:37
| 2015-01-14T20:25:37
| null |
UTF-8
|
R
| false
| false
| 997
|
r
|
cachematrix.R
|
## creates a cached list that contains a matrix and its inverse
## which are employed by cachSolve.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## finds the inverse of an invertible matrix, using cached
## values if available, else calculating and caching the new value
## in a cached list managed by makeCacheMatrix.
## Generates an error if matrix is not invertible.
cacheSolve <- function(x, ...) {
## Return a matrix m that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
0b9156fe7d6baafc586178b4ac6f8e288695ed9b
|
863aa7e71911423a9096c82a03ef755d1cf34654
|
/man/get_type.Rd
|
927144ef983a9012d244dda896c811fb9f031092
|
[] |
no_license
|
BioSystemsUM/specmine
|
8bd2d2b0ee1b1db9133251b80724966a5ee71040
|
13b5cbb73989e1f84e726dab90ff4ff34fed68df
|
refs/heads/master
| 2023-08-18T05:51:53.650469
| 2021-09-21T13:35:11
| 2021-09-21T13:35:11
| 313,974,923
| 1
| 1
| null | 2021-09-21T13:35:12
| 2020-11-18T15:22:49
|
R
|
UTF-8
|
R
| false
| false
| 721
|
rd
|
get_type.Rd
|
\name{get_type}
\alias{get_type}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Get type of data
}
\description{
Get the type of the data from the dataset
}
\usage{
get_type(dataset)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dataset}{
list representing the dataset from a metabolomics experiment.
}
}
\value{
Returns a string with the type of the data.
}
\examples{
## Example of getting the type of the data
library(specmine.datasets)
data(cachexia)
type = get_type(cachexia)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ type }
\keyword{ dataset }% __ONLY ONE__ keyword per line
|
a9632fac1518ecf88f06826d0f47868900a90dc1
|
db72f30a7e160279bda10af408ce82d7bb905d0e
|
/R/ALLOCATION.R
|
2bbbb3021c483cae56613a68b846d640ffa18e5d
|
[] |
no_license
|
NeveTong/mySIDES
|
3fe563cc73f4903eb4f5a4e6fefce26b30b74360
|
ab3c4acf6bebcfd9db03bb1cc0f2a010eb2c4c56
|
refs/heads/main
| 2023-05-28T21:34:09.971860
| 2021-06-14T02:36:39
| 2021-06-14T02:36:39
| 376,684,419
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,979
|
r
|
ALLOCATION.R
|
library(nnet)
allocation_procedure = function(H, pct_random, Xcov, type_var, prop_gpe, alloc_hp=TRUE, overall_imb=FALSE, seed=NA){
if(is.na(seed)==FALSE){
set.seed(seed)
}
nb_patients = nrow(Xcov)
if(H > 1){
X = Xcov
ind_cont = which(type_var=="continuous")
nb_cont = length(ind_cont)
if(nb_cont>0){
for(j in 1:nb_cont){
quant = quantile(Xcov[,ind_cont[j]], probs = seq(0, 1, 0.33))
q33 = as.numeric(quant[2])
q66 = as.numeric(quant[3])
for(i in 1:nb_patients){
if(Xcov[i,ind_cont[j]] < q33){
X[i,ind_cont[j]] = 0
}
else if(Xcov[i,ind_cont[j]] >= q66){
X[i,ind_cont[j]] = 2
}
else{
X[i,ind_cont[j]] = 1
}
}
}
}
nb_pat_max_gpe = round(prop_gpe*nb_patients)
J = ncol(X)
set_alloc = rep(NA,nb_patients)
nb_pat_random = max(round(nb_patients*pct_random),1)
nb_pat_remain = nb_patients-nb_pat_random
# Allocation of "pct_random" of the sample size randomly between the H sets
sets = 1:H
set_full = numeric(0)
for(s in 1:nb_pat_random){
set_cur = 1:H
alloc_cur = table(set_alloc)
set_allocated = as.numeric(names(alloc_cur))
pat_allocated = numeric(H)
pat_allocated[set_allocated] = as.numeric(alloc_cur[paste(set_allocated)])
prob_alloc = prop_gpe
if(length(set_allocated)>0){
set_full = which(pat_allocated >= nb_pat_max_gpe)
if(length(set_full)>0){
set_cur = sets[-set_full]
prob_alloc = prop_gpe[-set_full]
}
}
if(length(set_cur)>1){
set_alloc[s] = sample(set_cur, 1, replace=TRUE, prob=prob_alloc)
}
else{
set_alloc[s] = set_cur
}
}
# Allocation based on imbalanced score
if(nb_pat_remain>0){
for(s in (nb_pat_random+1):nb_patients){
covariates_cur = as.numeric(X[s,])
alloc_cur = table(set_alloc)
set_allocated = as.numeric(names(alloc_cur))
set_cur = 1:H
set_full = numeric(0)
pat_allocated = numeric(H)
pat_allocated[set_allocated] = as.numeric(alloc_cur[paste(set_allocated)])
if(length(set_allocated)>0){
set_full = which(pat_allocated >= nb_pat_max_gpe)
}
if(length(set_full) == H){
set_full = which(pat_allocated >= nb_pat_max_gpe+1)
}
if(length(set_full)>0){
set_cur = set_cur[-set_full]
}
H_cur = length(set_cur)
if(H_cur > 1){
f_hij = array(0, dim=c(H_cur,H_cur,J))
for(i in 1:H_cur){
n_i_cur = length(which(set_alloc==set_cur[i]))
if(n_i_cur > 0){
for(j in 1:J){
n_ij_cur = length(which(X[,j]==covariates_cur[j] & set_alloc==set_cur[i]))
f_hij[,i,j] = (n_ij_cur + (set_cur[i]==set_cur)) / (n_i_cur + (set_cur[i]==set_cur))
}
}
else if(n_i_cur == 0){
for(h in 1:H_cur){
f_hij[h,h,] = rep(1,J)
}
}
}
d_jh_cur = matrix(NA, nrow=H_cur, ncol=J)
for(h in 1:H_cur){
for(j in 1:J){
d_jh_cur[h,j] = max(f_hij[h,,j])-min(f_hij[h,,j])
}
}
d_h = rowSums(d_jh_cur)
d_tot = sum(d_h)
f_h = 1/(H_cur-1) * (1 - d_h/d_tot)
if(alloc_hp==TRUE){
set_alloc[s] = set_cur[which.is.max(f_h)]
}
else{
set_alloc[s] = sample(set_cur, 1, replace=TRUE, prob=f_h)
}
}
else{
set_alloc[s] = set_cur
}
}
}
# Overall imbalanced score
if(overall_imb){
nb_levels_cov = numeric(J)
for(j in 1:J){
nb_levels_cov[j] = length(unique(X[,j]))
}
f_end_ijl = array(NA, dim=c(H, J, max(nb_levels_cov)))
for(i in 1:H){
for(j in 1:J){
levels_cov = unique(X[,j])
for(l in 1:nb_levels_cov[j]){
f_end_ijl[i,j,l] = length(which(X[,j]==levels_cov[l] & set_alloc==i))/length(which(set_alloc==i))
}
}
}
djl = matrix(NA, nrow=J, ncol=max(nb_levels_cov))
for(j in 1:J){
for(l in 1:nb_levels_cov[j]){
djl[j,l] = max(f_end_ijl[,j,l],na.rm=TRUE)-min(f_end_ijl[,j,l],na.rm=TRUE)
}
}
d_j = numeric(J)
for(j in 1:J){
d_j[j] = max(djl[j,],na.rm=TRUE)
}
overall_score = 1/J*sum(d_j)
return(list(set_alloc,overall_score))
}
else{
return(set_alloc)
}
}
else{
return(rep(1, nb_patients))
}
}
|
f94e86bb2b92d27f6ae09c8c83e565d7a1addabe
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ptstem/examples/stem_modified_hunspell.Rd.R
|
551090fb49a50f4ac2828635e13b547d31c0f890
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 278
|
r
|
stem_modified_hunspell.Rd.R
|
library(ptstem)
### Name: stem_modified_hunspell
### Title: Stemming with small modification of Hunspell
### Aliases: stem_modified_hunspell
### ** Examples
words <- c("balões", "aviões", "avião", "gostou", "gosto", "gostaram")
ptstem:::stem_modified_hunspell(words)
|
a6af62a8298d9395f8ed85fba5ba3b884a9cd53d
|
6a28ba69be875841ddc9e71ca6af5956110efcb2
|
/Numerical_Methods_In_Finance_And_Economics:_A_Matlab-Based_Introduction_by_Paolo_Brandimarte/CH8/EX8.8/Page_445_ExchangeMC.R
|
b3b170bc90200695664986fdf3ea489135bf16d0
|
[] |
permissive
|
FOSSEE/R_TBC_Uploads
|
1ea929010b46babb1842b3efe0ed34be0deea3c0
|
8ab94daf80307aee399c246682cb79ccf6e9c282
|
refs/heads/master
| 2023-04-15T04:36:13.331525
| 2023-03-15T18:39:42
| 2023-03-15T18:39:42
| 212,745,783
| 0
| 3
|
MIT
| 2019-10-04T06:57:33
| 2019-10-04T05:57:19
| null |
UTF-8
|
R
| false
| false
| 907
|
r
|
Page_445_ExchangeMC.R
|
require(fBasics)
norm.interval = function(data, variance = var(data), conf.level = 0.95) {
z = qnorm((1 - conf.level)/2, lower.tail = FALSE)
xbar = mean(data)
sdx = sqrt(variance/length(data))
c(xbar - z * sdx, xbar + z * sdx)
}
f <- function(r,T,VT,UT) {
exp(-r*T)*max(VT-UT, 0)
}
ExchangeMC <- function(V0,U0,sigmaV,sigmaU,rho,T,r,NRepl) {
eps1 = rnorm(NRepl)
eps2 = rho*eps1 + sqrt(1-rho^2)*rnorm(NRepl)
VT = V0*exp((r - 0.5*sigmaV^2)*T + sigmaV*sqrt(T)*eps1)
UT = U0*exp((r - 0.5*sigmaU^2)*T + sigmaU*sqrt(T)*eps2)
DiscPayoff = matrix()
for(i in 1:length(VT)){
DiscPayoff[i] = f(r,T,VT[i],UT[i])
}
parameter_estimation<-.normFit(DiscPayoff)
ci<-norm.interval(DiscPayoff)
return(c(parameter_estimation,ci))
}
VO = 50
UO = 60
sigmaV = 0.3
sigmaU = 0.4
rho = 0.7
T = 5/12
r = 0.05
NRepl = 200000
ExchangeMC(VO,UO,sigmaV,sigmaU,rho,T,r,NRepl)
|
1762d759f2b4153784cdb8af0b69520ac3c76842
|
685adb82b0ef76319c7d0e5fe4cb9aabae82367a
|
/man/MLD.Rd
|
f3facf0b25d36b23d463b2da479444c5d073f2ed
|
[] |
no_license
|
scarpino/binequality
|
6474cc7a520b414dd622437582fe1d8e8fcbc3b7
|
c810d3e5f066bfa8e1b67edbe8b06bc289f380b0
|
refs/heads/master
| 2021-01-19T02:19:01.663501
| 2018-11-05T14:04:13
| 2018-11-05T14:04:13
| 37,625,137
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 486
|
rd
|
MLD.Rd
|
\name{MLD}
\alias{MLD}
\title{
A function to calculate the MLD
}
\description{
This fuction calculates MLD
}
\usage{
MLD(samps)
}
\arguments{
\item{samps}{
a (non-empty) numeric vector of values to calculate MLD over, for example, bin mid points or samples take from a fitted distribution.
}
}
\details{
FIXME - equations
}
\value{
returns a numeric value representing the MLD
}
\references{
FIXME - references
}
\examples{
MLD(qnorm(seq(0.001,0.999,length.out = 10), mean = 100))
}
|
97b990f2837350a31df32ae61bead64e2b5e8222
|
15fe81660db70c06d112157ae21fed7303ea9194
|
/dataGeneration/Pic_Selector.R
|
1475dc6628c10518d068ad73062eae93eb324997
|
[] |
no_license
|
bStrangerman/ie332group11
|
d7dcf3ed7cc694b20545a9cd31560f9787165667
|
4febd1e850b2fa09f437df6f76008cc9444e4209
|
refs/heads/master
| 2020-03-29T16:59:25.070299
| 2018-12-18T18:01:27
| 2018-12-18T18:01:27
| 150,137,731
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 405
|
r
|
Pic_Selector.R
|
# This script assigns a random number of pictures to each warehouse in the database
num_of_warehouse <- 932
num_of_pictures <- 150
warehouse <- 1:num_of_warehouse
pictures <- 1:num_of_pictures
warehouse_pic <- matrix(0,1,200)
for (i in 1:length(warehouse)){
z <- sample(seq(1,150),1, replace = TRUE)
warehouse_pic[i] <- pictures[z]
print(paste0("(",i,",",z,")")) # print out the sql insert into values
}
|
992987c297f874f7b91d35080a1f73466a81e0e8
|
eb6eaba44c1d54cb301ec351898ecb1a71591a11
|
/man/webchem-deprecated.Rd
|
a53248e48584cf418cda379b266ef68c8650e032
|
[
"MIT"
] |
permissive
|
ropensci/webchem
|
b93a02272b134c2e22586ba04a9be4941cc33039
|
7cb91fd7ca653a176f5acf0e127eb27dc55bc7ab
|
refs/heads/master
| 2023-08-25T17:47:31.939888
| 2023-08-15T16:49:20
| 2023-08-15T16:49:20
| 31,718,339
| 150
| 62
|
NOASSERTION
| 2023-08-03T20:01:00
| 2015-03-05T14:41:50
|
R
|
UTF-8
|
R
| false
| true
| 708
|
rd
|
webchem-deprecated.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/webchem-deprecated.R
\name{webchem-deprecated}
\alias{webchem-deprecated}
\alias{cid_compinfo}
\alias{aw_query}
\title{Deprecated function(s) in the webchem package}
\usage{
cid_compinfo(...)
aw_query(...)
}
\arguments{
\item{...}{Parameters to be passed to the modern version of the function}
}
\description{
These functions are provided for compatibility with older version of
the webchem package. They may eventually be completely
removed.
}
\details{
Deprecated functions are:
\tabular{rl}{
\code{pc_prop} \tab was formerly \code{\link{cid_compinfo}}\cr
\code{bcpc_query} \tab was formerly \code{\link{aw_query}}\cr
}
}
|
8a1023b5bfe304e23acb159d2f556a157950e7c7
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/genepop/inst/genepop-shiny/opts/opt3ct.R
|
a37dcdbfcfcc40d34f9d6f06102e26456238addd
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 610
|
r
|
opt3ct.R
|
opt33 <- eventReactive(input$RunOpt33, {
cat("Opt33\n")
dem = input$Dememo33
nbatchs = input$Nbatches33
niters = input$Niters33
ficout = tempfile()
ficin = GenepopFile()$datapath
cat(ficin)
setRandomSeed(getSeed(input$randomSeed))
RPDGenotypicAllPopulationDifferentiation(ficin, outputFile = ficout, dememorization = dem, batches = nbatchs, iterations = niters)
file.rename("cmdline.txt", "cmdline.old")
ficout
})
output$Opt33out <- renderText({
filePath <- opt33()
fileText <- paste(readLines(filePath), collapse = "\n")
fileText
})
|
30e5d41ae54dc246d2ed654fc3fb112f00fda3c1
|
2bc59a2d2a9b7562e66b1108b7ff87c2aee1a506
|
/ch08/ch08_1_적합도.R
|
910a55ba225fdc0f22c4a602f870344ea6062d13
|
[] |
no_license
|
ckiekim/R-Statistics
|
4bb78296b9e59761bdfac63433a44abf19c4e386
|
d7b6f1bb32a15b310254e524ab4cf277a124a6f0
|
refs/heads/master
| 2020-06-05T03:35:42.388304
| 2019-07-05T08:35:39
| 2019-07-05T08:35:39
| 192,299,631
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,190
|
r
|
ch08_1_적합도.R
|
# 8장. 범주형 자료분석
# 8-1. 적합도 검정
# 그림 8-1
x <- seq(0, 15, by=0.01)
dc <- dchisq(x, df=3)
alpha <- 0.05
tol <- qchisq(0.95, df=3)
par(mar=c(0,1,1,1))
plot(x, dc, type="l", axes=F, ylim=c(-0.03, 0.25), xlab="", ylab="")
abline(h=0)
tol.g <- round(tol, 2)
polygon(c(tol.g, x[x>tol.g], 15), c(0, dc[x>tol.g], 0), col="red")
text(0, -0.03, "0", cex=0.8)
text(tol, -0.03, expression(chi[0.05]^{2}==2.14), cex=0.8)
# 예제-1. 멘델의 법칙
x <- c(315, 101, 108, 32)
chisq.test(x, p=c(9, 3, 3, 1)/16)
# 그래프에 표시
x <- seq(0, 15, by=0.01)
dc <- dchisq(x, df=3)
par(mar=c(0,1,1,1))
plot(x, dc, type="l", axes=F, ylim=c(-0.03, 0.25), xlab="", ylab="")
abline(h=0)
tol.g <- round(tol, 2)
polygon(c(tol.g, x[x>tol.g], 15), c(0, dc[x>tol.g], 0), col="red")
text(0, -0.01, "0", cex=0.8)
text(tol, -0.03, expression(chi[0.05]^{2}==2.14), cex=0.8)
tol2 <- qchisq(1-0.9254, df=3)
tol2.g <- round(tol2, 2)
polygon(c(tol2.g, x[x>tol2.g], 15), c(0, dc[x>tol2.g], 0), col="blue", density=20, angle=305)
text(tol2, -0.03, expression(chi[0.9254]^{2}==0.47), cex=0.8)
arrows(7, 0.17, 4, 0.12, length=0.05)
text(7, 0.177, expression(plain(P)(F>0.47) == 0.9254), cex=0.8)
|
a97f10a71a5e5259752aa03b7376e6efb5a80280
|
cdbdfa2809213938a9fefd8bdd304a2cb5ad6278
|
/tests/testthat/test-alma-in.R
|
1fa02dfdc0eff798d5c39c9290b890f1e3c3de7e
|
[
"MIT"
] |
permissive
|
DavisVaughan/almanac
|
49491a478e3bcdfae801111e5263efc86c33a3fb
|
7b14f6e8f1e685975231e5dadb40bb5bb8f2a9c8
|
refs/heads/main
| 2023-04-27T20:31:58.281595
| 2023-04-14T17:29:53
| 2023-04-14T17:29:53
| 208,673,066
| 74
| 4
|
NOASSERTION
| 2023-04-19T19:08:04
| 2019-09-15T23:45:27
|
R
|
UTF-8
|
R
| false
| false
| 1,424
|
r
|
test-alma-in.R
|
test_that("can check if a date is in a runion", {
rrule <- monthly(since = "2019-01-01")
expect_true(alma_in("2019-01-01", rrule))
expect_false(alma_in("2019-01-02", rrule))
})
test_that("is vectorized", {
rrule <- monthly(since = "2019-01-01")
expect_identical(alma_in(c("2019-01-01", "2019-01-02"), rrule), c(TRUE, FALSE))
})
test_that("`alma_in()`ness can be determined when NA values are present", {
x <- as.Date(c(NA, "2019-01-01", "2019-01-02"))
rrule <- daily(since = "2019-01-01")
expect_identical(alma_in(x, rrule), c(FALSE, TRUE, TRUE))
})
test_that("`alma_in()`ness can be silently determined when all values are NA", {
x <- as.Date(c(NA, NA, NA))
rrule <- daily(since = "2019-01-01")
expect_warning(
expect_identical(alma_in(x, rrule), c(FALSE, FALSE, FALSE)),
NA
)
})
test_that("`alma_in()` can be called even with infinite dates", {
rrule <- daily(since = "1970-01-01")
x <- vec_c(almanac_global_inf_date, as.Date("1970-01-01"))
expect_identical(alma_in(x, rrule), c(FALSE, TRUE))
})
test_that("`alma_in()`ness is defined in the extreme cases", {
rrule <- daily()
expect_identical(alma_in(almanac_global_inf_date, rrule), FALSE)
expect_identical(alma_in(almanac_global_neg_inf_date, rrule), FALSE)
})
test_that("can handle size zero input without warnings", {
expect_warning(
expect_identical(alma_in(new_date(), runion()), logical()),
NA
)
})
|
ab1fbdbb1c7d8d9ed6b89456f83d5cbb17f4263e
|
fce53c0e4a1d45c9c4d3470684f6b0fde6c939eb
|
/Chapter_5/xtendShinyjs/ui.R
|
e3d59952fa7fe07b6fe46efe40e9210b1203756c
|
[] |
no_license
|
himynameismarcel/Web-Application-Development-with-R-Using-Shiny
|
579ff710e977519cf1b33256f8165f586aa68d35
|
7dff0ddcb992725c892c65c44edafd94c4458faa
|
refs/heads/master
| 2020-06-02T03:52:12.919588
| 2019-06-16T12:03:07
| 2019-06-16T12:03:07
| 191,026,674
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,009
|
r
|
ui.R
|
## Marcel Kropp
## 09.06.2019
## Shiny Application, Gapminder
## Following the book: Web Application with Shiny R (Breeley, 2018)
library(leaflet)
library(DT)
library(shinyjs)
fluidPage(
# we need to define the CSS in the head of the HTML using tabs$head:
tags$head(
tags$style(HTML(".redText {
color: red;
}"
))
),
# we need to add useShinyjs() anywhere within fluidPage() in ui.R
useShinyjs(),
# the last addition is a reference to the JavaScript file that will
# make everything happen. We add this with the extendShinyjs()
# function
extendShinyjs(script = "appendText.js"),
titlePanel("Gapminder"),
sidebarLayout(
sidebarPanel(
div(id = "yearPanel",
sliderInput(inputId = "year",
label = "Years included",
min = 1952,
max = 2007,
value = c(1952, 2007),
sep = "",
step = 5
)
),
# checkboxInput("linear", label = "Add trend line?", value = FALSE),
conditionalPanel(
condition = "input.theTabs == 'trend'",
checkboxInput("linear", label = "Add trend line?",
value = FALSE)
),
uiOutput("yearSelectorUI"),
# Modal (elements from Bootstrap, pop-up messages)
actionButton("showModal", "Launch loyalty test"),
# we add a button
checkboxInput("redText", "Red text?"),
# we add a button for the user to reset everything
actionButton("reset", "Reset year"),
# then we add a button to add the text and some controls to the sidebar
actionButton("buttonClick", "Add inputs"),
selectInput("color", "Text colour",
c("Red" = "red",
"Blue" = "blue",
"Black" = "black")),
selectInput("size", "Text size",
c("Extremely small" = "xx-small",
"Very small" = "x-small",
"Small" = "small",
"Medium" = "medium",
"Large" = "large",
"Extra large" = "x-large",
"Super size" = "xx-large"))
),
mainPanel(
tabsetPanel(id = "theTabs",
tabPanel("Summary", value = "text", div(id = "theText",
textOutput("summary"))),
tabPanel("Trend", plotOutput("trend"),
h3("User selection history"),
p(id = "selection", "")),
tabPanel("Map", leafletOutput("map"),
p("Map data is from the most recent year in the selected range;
radius of circles is scaled to life expectancy"),
value = "map"),
tabPanel("Table", dataTableOutput("countryTable"),
value = "table")
)
)
)
)
|
ff43c0d91a260f88afe8db0d9aaaad1df3421a7f
|
4247ac14240060b5f1ea8897e69e2dbfa56b6656
|
/man/extractData.Rd
|
86bf552302e338d900aafcb15107927b87c65aaa
|
[] |
no_license
|
bussejoh/eatGADS
|
980f4d12aefa63315e6c7265e604a1d39ca65008
|
dfaa7108069be4480ec2ceff44c589a0333364c7
|
refs/heads/master
| 2022-12-04T17:18:11.162390
| 2020-08-11T12:12:43
| 2020-08-11T12:12:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,497
|
rd
|
extractData.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extractData.R
\name{extractData}
\alias{extractData}
\title{Extract Data}
\usage{
extractData(
GADSdat,
convertMiss = TRUE,
convertLabels = "character",
dropPartialLabels = TRUE,
convertVariables
)
}
\arguments{
\item{GADSdat}{A \code{GADSdat} object.}
\item{convertMiss}{Should values coded as missing values be recoded to \code{NA}?}
\item{convertLabels}{If \code{"numeric"}, values remain as numerics. If \code{"factor"} or \code{"character"}, values are recoded to their labels. Corresponding variable type is applied.}
\item{dropPartialLabels}{Should value labels for partially labeled variables be dropped? If \code{TRUE}, the partial labels will be dropped. If \code{FALSE}, the variable will be converted to the class specified in \code{convertLabels}.}
\item{convertVariables}{Character vector of variables names, which labels should be applied to. If not specified (default), value labels are applied to all variables for which labels are available. Variable names not in the actual GADS are silently dropped.}
}
\value{
Returns a data frame.
}
\description{
Extract \code{data.frame} from a \code{GADSdat} object for analyses in \code{R}. For extracting meta data see \code{\link{extractMeta}}.
}
\details{
A \code{GADSdat} object includes actual data (\code{GADSdat$dat}) and the corresponding meta data information
(\code{GADSdat$labels}). \code{extractData} extracts the data and applies relevant meta data (missing conversion, value labels),
so the data can be used for analyses in \code{R}.
If \code{factor} are extracted via \code{convertLabels == "factor"}, the underlying integers will
be tried to preserved. If this is not possible, a warning is issued. As \code{SPSS} has almost no limitations regarding the underlying values of labeled
integers and \code{R}'s \code{factor} format is very strict (no \code{0}, only integers increasing by \code{+ 1}), this procedure can lead to
frequent problems.
}
\examples{
\dontrun{
gads10 <- getGADS(vSelect = c("idstud", "wgt", "jkzone", "jkrep", "imp", "domain", "score"),
filePath = "t:/_R_Tutorials/R_Workshops/04_eatPakete/minigads_2010.db")
# Extract Data for Analysis
dat <- extractData(gads10)
# convert labeled variables to factors
dat <- extractData(gads10, convertLabels = "factor")
# convert only some variables to factor
dat <- extractData(gads10, convertLabels = "factor", convertVariables = c("domain"))
}
}
|
716e07166c8d72d5785973d18266d174ff8f07cd
|
d12216651e80028f584efce9e87a9e6ee9ac8d71
|
/man/grapes-tail_while-grapes.Rd
|
210340d9c5d6ca9837568ade8b427d5420a010d5
|
[
"MIT"
] |
permissive
|
d0rj/pido
|
e2774a727719143747c2c4fa0468bb5c1cd7b2e2
|
4da06b8b70a17f0dc55a7cd538702ba80bdd0bd4
|
refs/heads/main
| 2023-06-30T06:40:56.643640
| 2021-07-14T12:08:03
| 2021-07-14T12:08:03
| 354,776,985
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 455
|
rd
|
grapes-tail_while-grapes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functional.R
\name{\%tail_while\%}
\alias{\%tail_while\%}
\title{tail_while combinator which returns tail elements while predicate is TRUE (infix version)}
\usage{
x \%tail_while\% p
}
\arguments{
\item{x}{vector to filter}
\item{p}{predicate}
}
\value{
tail filtered
}
\description{
tail_while combinator which returns tail elements while predicate is TRUE (infix version)
}
|
c624b3f36ade75ab34fbadc9719937e3de6b571d
|
1005478508bac2fe3b259c0bfc9270c85328c5d4
|
/man/mzrtsim.Rd
|
b25ffba4ddcb5817e6de685b7674bfd53b8b9969
|
[] |
no_license
|
yufree/mzrtsim
|
bdf288efe7ccb2b257c692eb41472887505f8d5d
|
07918656844c42cb9f84932dccc443c457ebf45e
|
refs/heads/master
| 2023-08-31T03:54:31.689131
| 2023-08-29T02:12:06
| 2023-08-29T02:12:06
| 129,319,854
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,751
|
rd
|
mzrtsim.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mzrtsim.R
\name{mzrtsim}
\alias{mzrtsim}
\title{Generate simulated count data with batch effects for npeaks}
\usage{
mzrtsim(
ncomp = 100,
fc = NULL,
ncond = 2,
ncpeaks = 0.1,
nbatch = 3,
nbpeaks = 0.1,
npercond = 10,
nperbatch = c(8, 5, 7),
batchtype = "mb",
rtsim = TRUE,
db = NULL,
seed = 42
)
}
\arguments{
\item{ncomp}{compound numbers to be selected, default 100}
\item{fc}{fold change of compounds with the same length of compounds numbers, default NULL}
\item{ncond}{Number of conditions to simulate}
\item{ncpeaks}{percentage of compounds influenced by conditions}
\item{nbatch}{Number of batches to simulate}
\item{nbpeaks}{percentage of peaks influenced by batchs}
\item{npercond}{Number of samples per condition to simulate}
\item{nperbatch}{Number of samples per batch to simulate}
\item{batchtype}{type of batch. 'm' means monotonic, 'b' means block, 'r' means random error, 'mb' means mixed mode of monotonic and block, default 'mb'}
\item{rtsim}{logical, simulate retention time or not}
\item{db}{compound database with MS1 data. e.g. hmdbcms or monams1}
\item{seed}{Random seed for reproducibility}
}
\value{
list with rtmz data matrix, row index of peaks influenced by conditions, row index of peaks influenced by batchs, column index of conditions, column of batchs, raw condition matrix, raw batch matrix, peak mean across the samples, peak rsd across the samples
}
\description{
Generate simulated count data with batch effects for npeaks
}
\details{
the numbers of batch columns should be the same with the condition columns.
}
\examples{
data(hmdbcms)
sim <- mzrtsim(db=hmdbcms)
}
\seealso{
\code{\link{simdata}}
}
|
450764220f8dc6fb805769a1a1976573c904cdb9
|
c6b0c684b665af00cd61b2c5e72d595f917ca25a
|
/functions/old/calc_parents_TGV.R
|
5f133049ffbcf1aa14d8878559b2a3f2069dfb65
|
[
"MIT"
] |
permissive
|
arfesta/SimBreeder_Project
|
861acb0678bce76a49f08f90c38b709934b3692f
|
000337e18be4501d49839b1b213e1025d3ef6fa2
|
refs/heads/master
| 2023-07-09T04:56:59.737463
| 2023-06-26T20:10:39
| 2023-06-26T20:10:39
| 64,675,861
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,520
|
r
|
calc_parents_TGV.R
|
#' Cacluate Parents Total Genetic Value
#'
#' This function esimates the total genetic value of parents produced from the create_parents function
#' @param parents The object that is returned from the create_parents function
#' @param map.info The object returned from create_map function
#' @param A Value assigned to the Major SNPQTL allele
#' @param a Value assigned to the Minor SNPQTL allele
#' @param dom.coeff The dominance coeffcient used for SNPQTLs
#' @param usesnpeffect logical. Set to TRUE if specificying user provided snp effects. Default: FALSE
#' @param snp.effects The vector of SNP effects to use. Only use if usesnpeffect = TRUE.
#' @param prefix Name prefix to add to ouptut if save = TRUE. Default does not write output
#' @param save logical. Saves the output of genetic map (Default: FALSE)
#' @keywords parent genetic value
#' @export
#' @examples
#' parent.TGV <- calc_parents_TGV(parents = the.parents, map.info = the.map, A = 1, a = -100, dom.coeff = 1)
####Calc Parent TGV Genetic Value####
calc_parents_TGV <- function(parents, map.info,
A, a, dom.coeff,
usesnpeffect = F,snp.effects = NULL,
save = F, prefix = NULL) {
locus.names <- map.info$genetic.map$loci # The locus names pulled from the mab object
QTLSNPs <- map.info$QTLSNP.loci # vector of the loci which are snpqtl
QTLSNP.num <- parents$genos.3d[QTLSNPs,,] # genotypes of both alleles pulled from the current generation
markers <- map.info$available.Markers# a list of all the markers pulled from map object
num.markers <- length(markers)
marker.loci <- sort(sample(markers,num.markers,replace=F),decreasing=F)
num.markers <- length(marker.loci) # length of markers that were selected
marker.select.genos <- parents$genos.3d[marker.loci,,] # genotypes of the markers pulled from the current generation
marker.map <- map.info$genetic.map[marker.loci,c(1,6)]
num.QTL <- length(map.info$rQTL.loci) # the number of additive qtl
par.IDs <- parents$parent.IDs
# Create 2 matrices: One to hold snpqtl values for all parents and the other to hold marker marker values for blup analysis
num.SNPQTL <- map.info$total.SNPQTL.num # the number of loci which are snpqtl
num.parents <- length(par.IDs) # the number of parents
QTLSNP.values <-matrix(NA,nrow=num.SNPQTL,ncol=num.parents) # matrix to hold snpqtl values
marker.matrix <- matrix(NA,nrow=num.markers,ncol=num.parents) # matrix to hold marker values
capital.genotypes <- vector()
lowercase.genotypes <- vector()
for (i in 1:26){
capital.genotypes <- c(capital.genotypes,paste(LETTERS[i],LETTERS, sep=""))
lowercase.genotypes <- c(lowercase.genotypes,paste(letters[i],letters, sep=""))}
for (i in 1:length(par.IDs)){
QTLSNPaa <- which(QTLSNP.num[,i,1]=="a" & QTLSNP.num[,i,2]=="a")
QTLSNPcc <- which(QTLSNP.num[,i,1]=="c" & QTLSNP.num[,i,2]=="c")
QTLSNPac <- which(QTLSNP.num[,i,1]=="a" & QTLSNP.num[,i,2]=="c")
QTLSNPca <- which(QTLSNP.num[,i,1]=="c" & QTLSNP.num[,i,2]=="a")
if (dom.coeff==0){
QTLSNP.values[QTLSNPaa,i] <- A*2
QTLSNP.values[QTLSNPcc,i] <- a*2
QTLSNP.values[QTLSNPac,i] <- (A+a+dom.coeff)
QTLSNP.values[QTLSNPca,i] <- (A+a+dom.coeff) } else {
QTLSNP.values[QTLSNPaa,i] <- A
if(usesnpeffect){
QTLSNP.values[QTLSNPcc,i] <- snp.effects[a]
} else{
QTLSNP.values[QTLSNPcc,i] <- a}
QTLSNP.values[QTLSNPac,i] <- (A*dom.coeff)
QTLSNP.values[QTLSNPca,i] <- (A*dom.coeff)
}
markers.aa <- which(marker.select.genos[,i,1] %in% capital.genotypes & marker.select.genos[,i,2] %in% capital.genotypes)
markers.cc <- which(marker.select.genos[,i,1] %in% lowercase.genotypes & marker.select.genos[,i,2] %in% lowercase.genotypes)
markers.ac <-which(marker.select.genos[,i,1] %in% capital.genotypes & marker.select.genos[,i,2] %in% lowercase.genotypes)
markers.ca <- which(marker.select.genos[,i,1] %in% lowercase.genotypes & marker.select.genos[,i,2] %in% capital.genotypes)
marker.matrix[markers.aa,i] <- "0"
marker.matrix[markers.cc,i] <- "2"
marker.matrix[markers.ac,i] <- "1"
marker.matrix[markers.ca,i] <- "1"
}
marker.matrix <- t(marker.matrix)
colnames(marker.matrix) <- marker.loci
rownames(marker.matrix) <- par.IDs
# Convert the 'invisible' rQTL genotypes to numeric matrices, merge the alleles to paired values also
par.QTL.allele1 <- matrix(as.integer(parents$genos.3d[map.info$rQTL,,1]),nrow=num.QTL,ncol=num.parents)
colnames(par.QTL.allele1) <- c(par.IDs)
par.QTL.allele2 <- matrix(as.integer(parents$genos.3d[map.info$rQTL,,2]),nrow=num.QTL,ncol=num.parents)
colnames(par.QTL.allele2) <- c(par.IDs)
QTL.values <- matrix(paste(par.QTL.allele1,par.QTL.allele2,sep=","),nrow=num.QTL,ncol=num.parents)
dimnames(QTL.values)<-list(locus.names[map.info$rQTL],par.IDs)
genetic.values <- colSums(QTLSNP.values) + colSums(par.QTL.allele1) + colSums(par.QTL.allele2)
#parQTLalleles <- par.QTL.allele2 + rqtldom
# Genetic values of progeny
#geneticvals <- colSums(QTLSNP.values) + colSums(final.qtl)
if(save) {
pedfilename=paste(prefix,".txt", sep="")
write.table(genetic.values,pedfilename, quote = F, row.names = F, col.names = T, sep="\t")}
TGV <- list(genetic.values=genetic.values, SNP.value.matrix=QTLSNP.values, marker.matrix=marker.matrix, marker.loci=marker.loci, marker.map=marker.map)
return(TGV)
}
|
2ca84c95982462c3e023713f64880e04a07fa887
|
32ed69ea8721f9913b704a1223e13a6fe398374b
|
/tests/testthat/test-mNIX_NLL.R
|
513f268eeb6cf9ed1024a64c18d7968b061624c8
|
[] |
no_license
|
mlysy/losmix
|
1957a388845bafba673c41c609ef1aee32aaefc9
|
16de5dbeb18d89a45f658e93e3c6b276e9ac1f13
|
refs/heads/master
| 2021-06-22T23:05:57.787587
| 2021-01-13T14:38:26
| 2021-01-13T17:35:15
| 180,701,572
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,929
|
r
|
test-mNIX_NLL.R
|
## require(losmix)
## require(testthat)
## require(numDeriv)
source("losmix-testfunctions.R")
context("Single mNIX")
test_that("Sufficient statistics are the same in R and TMB", {
ntests <- 10
suff_names <- c("yy", "Xy", "XX", "N")
for(ii in 1:ntests) {
N <- sample(10:20, 1)
p <- sample(3:5, 1)
X <- sim_X(N, p)
y <- sim_y(N)
odata <- list(Xtr = t(X), y = as.matrix(y))
opars <- Phi2par(Phi = sim_Phi(p))
obj <- TMB::MakeADFun(data = c(list(model = "mNIX_NLL"), odata),
parameters = opars,
DLL = "losmix_TMBExports", silent = TRUE)
suff_r <- get_suff(y, X)
suff_tmb <- obj$simulate(par = vec_phi(sim_Phi(p)))[suff_names]
expect_equal(suff_r, suff_tmb)
}
})
test_that("Conjugate posterior hyperparameters are the same in R and TMB", {
ntests <- 10
Phi_names <- c("lambda", "Omega", "nu", "tau")
names(Phi_names) <- paste0(Phi_names, "_hat")
for(ii in 1:ntests) {
N <- sample(10:20, 1)
p <- sample(3:5, 1)
X <- sim_X(N, p)
y <- sim_y(N)
Phi <- sim_Phi(p)
odata <- list(Xtr = t(X), y = as.matrix(y))
opars <- Phi2par(sim_Phi(p))
obj <- TMB::MakeADFun(data = c(list(model = "mNIX_NLL"), odata),
parameters = opars,
DLL = "losmix_TMBExports", silent = TRUE)
Phi_hat_r <- get_post(suff = get_suff(y, X), Phi = Phi)
Phi_hat_r$lambda <- as.matrix(Phi_hat_r$lambda)
Phi_hat_tmb <- obj$simulate(par = vec_phi(Phi))[names(Phi_names)]
names(Phi_hat_tmb) <- as.character(Phi_names)
expect_equal(Phi_hat_r, Phi_hat_tmb)
}
})
test_that("R(loglik + lprior) = TMB(lmarg + lcond)", {
ntests <- 10
Phi_names <- c("lambda", "Omega", "nu", "tau")
names(Phi_names) <- paste0(Phi_names, "_hat")
loglik <- function(theta, y, X) {
sum(dnorm(y, mean = X %*% theta$beta, sd = sqrt(theta$sig2), log = TRUE))
}
logpi <- function(theta, Phi) {
dmnix(x = theta$beta, v = theta$sig2, Phi = Phi, log = TRUE)
}
ans <- rep(NA, ntests)
for(ii in 1:ntests) {
N <- sample(10:20, 1)
p <- sample(3:5, 1)
X <- sim_X(N, p)
y <- sim_y(N)
theta <- sim_theta(p)
Phi <- sim_Phi(p)
odata <- list(Xtr = t(X), y = as.matrix(y))
opars <- Phi2par(sim_Phi(p))
obj <- TMB::MakeADFun(data = c(list(model = "mNIX_NLL"), odata),
parameters = opars,
DLL = "losmix_TMBExports", silent = TRUE)
Phi_hat <- obj$simulate(par = vec_phi(Phi)) # conjugate posterior
Phi_hat <- setNames(Phi_hat[names(Phi_names)], as.character(Phi_names))
Phi_hat$lambda <- c(Phi_hat$lambda)
ll <- loglik(theta = theta, y = y, X = X)
lpi <- logpi(theta = theta, Phi = Phi)
lc <- logpi(theta, Phi = Phi_hat)
# TMB returns NLL up to factor of N/2 * log(2*pi)
lm <- -obj$fn(vec_phi(Phi)) - N/2 * log(2*pi)
expect_equal(ll + lpi, lc + lm)
}
})
|
32b3d3ae6a98ccd1d38587de1c3c5bbb9653337a
|
a4a5aa44832be7058fb253eb0e1fe57f1c796c6b
|
/R/bootstats.R
|
759a0bbc68b9c9cab530cf3792690f65a91fba54
|
[] |
no_license
|
AkselA/R-confintplot
|
03bcbcab7edebbb6fb429a8d58b5d5768f8ef5a3
|
ff21fe40b25f51d0a7ff93d9805b92819703f364
|
refs/heads/master
| 2021-01-01T17:38:40.638186
| 2018-01-05T22:09:14
| 2018-01-05T22:09:14
| 98,118,494
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,654
|
r
|
bootstats.R
|
#' Calculate mean and median with bootstrapped confidence intervals.
#'
#' This function returns the bootstrapped mean and median,
#' and the respective confidence intervals, of the supplied numeric vector.
#' @param x A vector of numerical values.
#' @param p.level A number giving the level of confidence for the intervals
#' @param nrep The number of repetitions, or resamples, to perform. Defaults to
#' \code{max(500, 2*(10^5)/length(x))}, except if \code{length(x) <= exact.thrs}.
#' @param exact.thrs Upper level of \code{length(x)} at which all possible subsamples
#' are used
#' @keywords univar
#' @export
#' @examples
#' bootstats(rnorm(1000), p.level=0.9, smooth=FALSE)
#'
#' bootstats(c(1, 3, 4, 4, 6, 7, 9), exact.thrs=7, smooth=FALSE)
#'
#' bootstats(c(1, 3, 4, 4, 6, 7, 8), exact.thrs=1, smooth=FALSE)
#'
#' ## Simple smooth function based on jitter()
#' bootstats(1:5, smooth=function(b=b) jitter(b))
#'
#' ## Alternative entropy based smooth function
#' x <- round(rnorm(15, 10, 2))
#'
#' entropy <- function(x, base=2) {
#' freqs <- table(x) / length(x)
#' -sum(freqs * log(freqs, base=base))
#' }
#'
#' H5 <- entropy(x, base=5)
#'
#' bootstats(x,
#' smooth=function(b=b, x=x, H5=H5) {
#' b + rnorm(length(b), 0, H5/sqrt(length(x)))
#' })
#'
#' bootstats(x)
#'
#' ## Return resamples and plot distribution of means
#' bst <- bootstats(c(1, 2, 2, 4), return_resamples=TRUE)
#' t(bst[[2]])
#' plot(density(colMeans(bst[[2]])))
bootstats <- function(x, p.level=0.95, nrep=NULL, exact.thrs=8,
smooth=TRUE, return_resamples=FALSE) {
x <- na.omit(x)
if (!is.function(smooth)) {
if (is.logical(smooth)) {
smooth <- ifelse(smooth,
function(b=b, x=x) {
b + rnorm(length(b), 0, 1/sqrt(length(x)))
},
function(b=b) b)
} else {
stop("smooth needs to be a function or logical")
}
}
if (length(x) <= exact.thrs & is.null(nrep)) {
b <- t(multisubsets(x))
nrep <- ncol(b)
b <- do.call(smooth, as.list(formals(smooth)))
} else {
if (is.null(nrep)) {
nrep <- 2*(10^5)/length(x)
nrep <- floor(max(500, nrep))
}
b <- replicate(nrep, sample(x, length(x), replace=TRUE))
b <- do.call(smooth, as.list(formals(smooth)))
}
mn <- colMeans(b)
b.mean <- mean(mn)
mean.upper <- quantile(mn, 1-((1-p.level)/2))
mean.lower <- quantile(mn, (1-p.level)/2)
md <- apply(b, 2, median)
b.median <- mean(md)
median.upper <- quantile(md, 1-((1-p.level)/2))
median.lower <- quantile(md, (1-p.level)/2)
stats <- data.frame(b.mean, mean.upper, mean.lower,
b.median, median.upper, median.lower,
nrep, p.level)
rownames(stats) <- NULL
stats <- t(stats)
if (return_resamples) {
stats <- list(stats, resamples=b)
} else {
stats <- list(stats)
}
class(stats) <- c("bootlist", class(stats))
stats
}
#' Print method for class bootlist.
#'
#' @export
#' @examples
#' x <- c(1, 3, 4, 2, 8)
#' bst <- bootstats(x, nrep=5, return_resamples=TRUE)
#' bst
#' str(bst)
#' bst$resamples
print.bootlist <- function(x, ...){
x <- round(x[[1]], 5)
NextMethod()
}
#' @export
jackknife <- function(x, FUN, ...) {
sapply(1:length(x), function(id) {
FUN(x[-id], ...)
})
}
# data <- c(rep(1, 6), rep(2, 5))
# jk.theta <- jackknife(data, median)
# theta.hat <- median(data)
# a <- sum((theta.hat-jk.theta)^3)/(6*sum((theta.hat-jk.theta)^2)^(3/2))
# a
|
2ca642e5a090ff1fb51c74427bdd360a44272ef1
|
ed67c7d9b3860901a9b2feb68914fcee3c3de736
|
/Scripts/4_Figures_betadiversity_S_ENSPIE_All_Native_Scen2_10plots.R
|
37398904fae7171c7b3d0c820b2b04c88cea046a
|
[
"CC-BY-2.0"
] |
permissive
|
dylancraven/Hawaii_diversity
|
f5ebaef8ebba6bd652aef33efa6d668374107d61
|
270a61cd7723b662f395eeaf04f4bc7b124fa79d
|
refs/heads/master
| 2022-01-23T15:37:17.950429
| 2019-07-15T12:12:29
| 2019-07-15T12:12:29
| 192,880,471
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,259
|
r
|
4_Figures_betadiversity_S_ENSPIE_All_Native_Scen2_10plots.R
|
################
## beta_S_PIE #
################
# 10 plots #####
###############
# only Scenario II: "Het + Age"
# just Beta S
require(tidyr)
require(dplyr)
require(ggplot2)
#require(ggridges)
#require(grid)
require(reshape2)
#############
# Data #####
#############
load("Cleaned_Data/Scen2_All_Native_BetaS_anova_summary.RData")
Beta_All<-read.csv("Cleaned_Data/Scen2_Total_10plots_BetaPIE.csv",sep=",",header=T)
Beta_All$geo_entity2<-as.character(Beta_All$geo_entity2)
Beta_All$geo_entity2<-ifelse(Beta_All$geo_entity2=="O'ahu Island (incl. Mokoli'i Islet)","O'ahu",Beta_All$geo_entity2)
Beta_All$geo_entity2<-ifelse(Beta_All$geo_entity2=="O'ahu Island","O'ahu",Beta_All$geo_entity2)
Beta_All$geo_entity2<-ifelse(Beta_All$geo_entity2=="Hawai'i Island","Hawai'i",Beta_All$geo_entity2)
Beta_All$geo_entity2<-ifelse(Beta_All$geo_entity2=="Kaua'i Island","Kaua'i",Beta_All$geo_entity2)
Beta_All$geo_entity2<-as.factor(Beta_All$geo_entity2)
Beta_All$geo_entity2<-factor(Beta_All$geo_entity2,levels=c("Hawai'i","Maui Nui","O'ahu","Kaua'i"))
# take average of beta diversity per iteration
Beta_All<- Beta_All %>% group_by(Iteration,geo_entity2, index) %>%
summarize(beta=mean(value))
Beta_All<-filter(Beta_All, index=="beta_S")
Beta_All$Scenario<-"All species"
Beta_N<-read.csv("Cleaned_Data/Scen2_Natives_10plots_BetaPIE.csv",sep=",",header=T)
Beta_N$geo_entity2<-as.character(Beta_N$geo_entity2)
Beta_N$geo_entity2<-ifelse(Beta_N$geo_entity2=="O'ahu Island (incl. Mokoli'i Islet)","O'ahu",Beta_N$geo_entity2)
Beta_N$geo_entity2<-ifelse(Beta_N$geo_entity2=="O'ahu Island","O'ahu",Beta_N$geo_entity2)
Beta_N$geo_entity2<-ifelse(Beta_N$geo_entity2=="Hawai'i Island","Hawai'i",Beta_N$geo_entity2)
Beta_N$geo_entity2<-ifelse(Beta_N$geo_entity2=="Kaua'i Island","Kaua'i",Beta_N$geo_entity2)
Beta_N$geo_entity2<-as.factor(Beta_N$geo_entity2)
Beta_N$geo_entity2<-factor(Beta_N$geo_entity2,levels=c("Hawai'i","Maui Nui","O'ahu","Kaua'i"))
# take average of beta diversity per iteration
Beta_N<- Beta_N %>% group_by(Iteration,geo_entity2, index) %>%
summarize(beta=mean(value))
Beta_N<-filter(Beta_N, index=="beta_S")
Beta_N$Scenario<-"Native species"
Beta_SS<-rbind.data.frame(Beta_All, Beta_N)
Beta_SS<- Beta_SS%>%
unite( "Isl_Scen", c("geo_entity2","Scenario"),remove=FALSE)
Beta_SS$Scenario<-factor(Beta_SS$Scenario,levels=c("All species","Native species"))
Beta_SS$geo_entity2<-as.character(Beta_SS$geo_entity2)
Beta_SS$x_axis<-ifelse(Beta_SS$geo_entity2=="Hawai'i" & Beta_SS$Scenario=="All species", 0.90, NA)
Beta_SS$x_axis<-ifelse(Beta_SS$geo_entity2=="Hawai'i" & Beta_SS$Scenario=="Native species", 1.10, Beta_SS$x_axis)
Beta_SS$x_axis<-ifelse(Beta_SS$geo_entity2=="Maui Nui" & Beta_SS$Scenario=="All species", 1.90, Beta_SS$x_axis)
Beta_SS$x_axis<-ifelse(Beta_SS$geo_entity2=="Maui Nui" & Beta_SS$Scenario=="Native species", 2.10, Beta_SS$x_axis)
Beta_SS$x_axis<-ifelse(Beta_SS$geo_entity2=="O'ahu" & Beta_SS$Scenario=="All species", 2.90, Beta_SS$x_axis)
Beta_SS$x_axis<-ifelse(Beta_SS$geo_entity2=="O'ahu" & Beta_SS$Scenario=="Native species", 3.10, Beta_SS$x_axis)
Beta_SS$x_axis<-ifelse(Beta_SS$geo_entity2=="Kaua'i" & Beta_SS$Scenario=="All species", 3.90, Beta_SS$x_axis)
Beta_SS$x_axis<-ifelse(Beta_SS$geo_entity2=="Kaua'i" & Beta_SS$Scenario=="Native species", 4.10, Beta_SS$x_axis)
# join models
Scen2_AllSpp_BetaS_modelpred$Scenario<-"All species"
Scen2_AllSpp_BetaS_modelpred<-select(Scen2_AllSpp_BetaS_modelpred, Scenario,geo_entity2=x, beta=predicted,
beta.LCL=conf.low, beta.UCL=conf.high)
Scen2_NativesSpp_BetaS_modelpred$Scenario<-"Native species"
Scen2_NativesSpp_BetaS_modelpred<-select(Scen2_NativesSpp_BetaS_modelpred,Scenario,geo_entity2=x, beta=predicted,
beta.LCL=conf.low, beta.UCL=conf.high)
BetaS_f<-rbind.data.frame(Scen2_AllSpp_BetaS_modelpred,Scen2_NativesSpp_BetaS_modelpred)
BetaS_f$geo_entity2<-as.character(BetaS_f$geo_entity2)
BetaS_f$x_axis<-ifelse(BetaS_f$geo_entity2=="Hawai'i" & BetaS_f$Scenario=="All species", 0.90, NA)
BetaS_f$x_axis<-ifelse(BetaS_f$geo_entity2=="Hawai'i" & BetaS_f$Scenario=="Native species", 1.10, BetaS_f$x_axis)
BetaS_f$x_axis<-ifelse(BetaS_f$geo_entity2=="Maui Nui" & BetaS_f$Scenario=="All species", 1.90, BetaS_f$x_axis)
BetaS_f$x_axis<-ifelse(BetaS_f$geo_entity2=="Maui Nui" & BetaS_f$Scenario=="Native species", 2.10, BetaS_f$x_axis)
BetaS_f$x_axis<-ifelse(BetaS_f$geo_entity2=="O'ahu" & BetaS_f$Scenario=="All species", 2.90, BetaS_f$x_axis)
BetaS_f$x_axis<-ifelse(BetaS_f$geo_entity2=="O'ahu" & BetaS_f$Scenario=="Native species", 3.10, BetaS_f$x_axis)
BetaS_f$x_axis<-ifelse(BetaS_f$geo_entity2=="Kaua'i" & BetaS_f$Scenario=="All species", 3.90, BetaS_f$x_axis)
BetaS_f$x_axis<-ifelse(BetaS_f$geo_entity2=="Kaua'i" & BetaS_f$Scenario=="Native species", 4.10, BetaS_f$x_axis)
BetaS_f$Scenario<-factor(BetaS_f$Scenario,levels=c("All species","Native species"))
BetaS_f<- BetaS_f%>%
unite( "Isl_Scen", c("geo_entity2","Scenario"),remove=FALSE)
################
## figure #
################
p_BetaS<-ggplot(Beta_SS) +
# geom_vridgeline(aes(x = x_axis, y = qD, width = ..density.., group=Isl_Scen,
# fill=Scenario,colour=Scenario),
# stat = "ydensity",trim = FALSE, alpha = 0.2, scale = 1) +
geom_point(data=Beta_SS,aes(x = x_axis, y = beta, group=Isl_Scen, colour=Scenario),
position=position_dodge(0.2),alpha=0.05, shape=20, size=0.5) +
geom_pointrange(data=BetaS_f, aes(x=x_axis,y = beta,ymin=beta.LCL,ymax=beta.UCL,
group=Isl_Scen, colour=Scenario),fatten=0.25,size=1)+
scale_colour_manual(name="",values=c("All species"="#460B6AFF","Native species"="#F8870EFF"))+
scale_fill_manual(name="",values=c("All species"="#460B6AFF","Native species"="#F8870EFF"))+
scale_x_continuous( breaks=c(1,2,3,4),labels=c("Hawai'i","Maui Nui","O'ahu","Kaua'i"))+
labs(x="",y=expression(bold(beta["S"])))+
guides(colour = guide_legend(override.aes = list(size = 0.5,fill="transparent",linetype=0)))+
theme_bw()+theme(plot.title = element_text(colour="black",face="bold",size=7,hjust=0.5,vjust=0),
axis.title.x=element_text(colour="black",face="bold",size=8,family="sans"),
axis.title.y=element_text(colour="black",face="bold",size=7,family="sans"),
axis.text.x=element_text(colour="black",face="bold",size=8,family="sans"),
axis.text.y=element_text(colour=c("black"),face="bold",size=6,family="sans"),
legend.text=element_text(colour=c("black"),face="bold",size=7,family="sans"),
legend.title = element_text(colour=c("black"),face="bold",size=7,family="sans"),
legend.title.align = 0.5,
legend.margin=margin(t=0.00, r=0, b=0, l=0, unit="cm"),
legend.position=c("top"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank())
# write out
ggsave(filename = file.path("Figures", "FigS5_Beta_S_all_natives_Scenario2_10plots.png"),
width = 8.7,
height = 6.7,
units = "cm", dpi=900)
p_BetaS
dev.off()
|
dcb742a42ce26ed40e3fac04e2c0175686cc42f8
|
84ff0aa8aed2eab8635c3823e8f4c2f364e0e192
|
/Examan_2.R
|
70a12011d4c5cd89b79a2b2d07f6ee701c5500fd
|
[] |
no_license
|
pmtempone/AID
|
1843415711abcad8d1aee4704291dc78643e58cb
|
b6292e97c31ae4a9abf281fe7f06e25a1e7fd271
|
refs/heads/master
| 2021-01-20T19:15:55.635664
| 2016-07-28T15:18:59
| 2016-07-28T15:18:59
| 64,361,103
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,674
|
r
|
Examan_2.R
|
library(readxl)
suppressPackageStartupMessages(library(xlsx))
library(ggplot2)
#library(Stuff)
library(knitr)
library(reshape)
library(biotools)
suppressPackageStartupMessages(library(psych))
suppressPackageStartupMessages(library(caret))
library(MVN)
library(klaR)
library(Rmisc)
library(Hotelling)
library(profileR)
library(MASS)
library(lattice)
anticuerpos <- read.csv2("C:/Users/Pablo/Google Drive/Maestria/AID/Segundo Examen/anticuerpos.csv")
anticuerpos <- anticuerpos[,-1]
anticuerpos$Grupo <- factor(anticuerpos$Grupo)
#a- Establezca las hipótesis de interés. Realice el contraste correspondiente.
fit <- hotelling.test(.~Grupo,data = anticuerpos)
fit
'Este contraste rechaza la hipótesis de igualdad del vector de medias, para verificar que el análisis es correcto, se procede a verificar las hipótesis:
Normalidad multivariada (Se usa el test de Royston):'
roystonTest(anticuerpos[, -1])
'No se satisface el supuesto de normalidad multivariada, por lo tanto no es posible hacer un análisis discriminante a menos que se encuentre una transformación que permita normalizar los datos.
Homocedasticidad multivariada (Se usa la prueba M de Box):'
boxM(anticuerpos[, -1], anticuerpos[,1])
#Como el valor p obtenido es mayor a 0.05, no se rechaza la hipótesis nula, entonces se puede asumir la homogeneidad de las matrices de varianzas y covarianzas para cada uno de los grupos
set.seed(12345)
intrain <- createDataPartition(y=anticuerpos$Grupo,p=0.67,list = FALSE)
anticuerpos_train <- anticuerpos[intrain,]
anticuerpos_test <- anticuerpos[-intrain,]
fit.anticuerpos <- lda(Grupo ~.,data = anticuerpos_train)
fit.anticuerpos
'Call:
lda(Grupo ~ ., data = anticuerpos_train)
Prior probabilities of groups:
1 2
0.5 0.5
Group means:
Colesterol Albúmina Calcio Aurico
1 237.4444 42.17048 100.16063 48.74000
2 243.9841 40.17460 99.13492 47.57143
Coefficients of linear discriminants:
LD1
Colesterol 0.005915743
Albúmina -0.271777595
Calcio -0.019302855
Aurico -0.021523867'
#c- Analice de dos maneras el poder discriminante de la regla.
pred.anticuerpos_test <- predict(fit.anticuerpos,anticuerpos_test)
confusionMatrix(anticuerpos_test$Grupo,pred.anticuerpos_test$class)
pred.anticuerpos_train <- predict(fit.anticuerpos,anticuerpos_train)
confusionMatrix(anticuerpos_train$Grupo,pred.anticuerpos_train$class)
#c- A qué grupo asignaría un paciente con: Colesterol=240 ,Albumnia=39, Calcio=101 y
#aurico=49.
pred_c <- predict(fit.anticuerpos,data.frame("Colesterol"=240,"Albúmina"=39,"Calcio"=101,"Aurico"=49))
pred_c
#Grupo 2
|
5ca6bdb4e0f71ef6a1f707d740843fc992b39ca6
|
6fb04083c9d4ee38349fc04f499a4bf83f6b32c9
|
/R/utilities.R
|
574d02a23ada6aea2cb40cbb802bb6765b8736ad
|
[] |
no_license
|
phani-srikar/AdapteR
|
39c6995853198f01d17a85ac60f319de47637f89
|
81c481df487f3cbb3d5d8b3787441ba1f8a96580
|
refs/heads/master
| 2020-08-09T10:33:28.096123
| 2017-09-07T09:39:25
| 2017-09-07T09:39:25
| 214,069,176
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 24,516
|
r
|
utilities.R
|
# Contains the support functions
#' @include platforms.R
NULL
setOldClass("RODBC")
setOldClass("FLConnection")
cleanNames <- function(x){
##change factors to strings
if(is.factor(x) || class(x)=="Date")
x <- as.character(x)
if(is.character(x))
x <- gsub("^ +| +$","",x)
x
}
sqlError <- function(e){
warning(e)
sys.call()
}
################################################################################
###### provide methods for JDBC with same signature as ODBC methods
################################################################################
#' Send a query to database
#'
#' No result is returned
#' @param channel JDBC connection object
#' @param query SQLQuery to be sent
#' @export
sqlSendUpdate <- function(connection,query,...) UseMethod("sqlSendUpdate")
#' @export
sqlSendUpdate.FLConnection <- function(connection,query,...)
sqlSendUpdate(connection$connection,query,...)
#' Send a query to database
#' Result is returned as data.frame
#' @param channel ODBC/JDBC connection object
#' @param query SQLQuery to be sent
#' @export
sqlQuery <- function(connection,query,...) UseMethod("sqlQuery")
#' @export
sqlQuery.FLConnection <- function(connection,query,...)
sqlQuery(connection$connection,query,...)
#' Send a query to database
#'
#' No result is returned
#' @param channel JDBC connection object
#' @param query SQLQuery to be sent
#' @export
sqlSendUpdate.JDBCConnection <- function(connection,query,warn=TRUE,...) {
verrflag<-sapply(query, function(q){
##browser()
if(getOption("debugSQL")) cat(paste0("SENDING SQL: \n",gsub(" +"," ",q),"\n"))
tryCatch({
if(is.TDAster() || is.Hadoop())
res <- RJDBC::dbSendUpdate(connection,q,...)
else{
res <- DBI::dbSendQuery(connection, q, ...)
##dbCommit(connection)
dbClearResult(res)
}
return(TRUE)
},
error=function(e){
if(warn) sqlError(e)
return(FALSE)
})
})
return(verrflag)
}
#' Send a query to database
#'
#' No result is returned
#' @param channel ODBC connection object
#' @param query SQLQuery to be sent
#' @export
sqlSendUpdate.RODBC <- function(connection,query,warn=FALSE,...){
if(!is.TDAster())
RODBC::odbcSetAutoCommit(connection, autoCommit = FALSE)
else RODBC::odbcSetAutoCommit(connection, autoCommit = TRUE)
verrflag <- sapply(query, function(q){
if(getOption("debugSQL")) cat(paste0("SENDING SQL: \n",gsub(" +"," ",q),"\n"))
err<-RODBC::sqlQuery(connection,q,errors=FALSE)
errmsg<- RODBC::odbcGetErrMsg(connection)
if(length(errmsg) == 0 || as.character(errmsg)=="No Data")
{
RODBC::odbcEndTran(connection, commit = TRUE)
verrflag <- TRUE
}
else
{
RODBC::odbcEndTran(connection, commit = FALSE)
##print(sys.calls())
if(warn) sqlError(errmsg)
verrflag <- FALSE
}
RODBC::odbcClearError(connection)
return(verrflag)
})
RODBC::odbcSetAutoCommit(connection, autoCommit = TRUE)
return(verrflag)
#cat("DONE...\n")
}
sqlSendUpdate.ODBCConnection <- function(connection, query , warn = TRUE){
suppressWarnings(sqlQuery(connection, query))
}
#' @export
constructStoredProcArgs <- function(query,
outputParameter,
...){
args <- list(...)
if("pInputParams" %in% names(args))
args <- args[["pInputParams"]]
else if(length(args)==1 && is.list(args[[1]]))
args <- args[[1]]
## print("stored PROC Arguments:")
## print(args)
spMap <- getStoredProcMapping(query)
## print("stored PROC Mapping:")
## print(spMap)
if(!is.null(spMap)){
query <- spMap$funcNamePlatform
if(length(spMap$argsPlatform)>0){
args <- args[spMap$argsPlatform]
names(args) <- names(spMap$argsPlatform)
}
}
## print("remapped stored PROC Arguments:")
## print(args)
return(list(args=args,
query=query))
}
#' Send a query to database
#' Result is returned as data.frame
#' @param channel ODBC/JDBC connection object
#' @param query SQLQuery to be sent
#' @export
sqlStoredProc <- function(connection,
query,
outputParameter,
...)
UseMethod("sqlStoredProc")
#' @export
sqlStoredProc.FLConnection <- function(connection,
query,
outputParameter=NULL,
...) {
if((is.TDAster(connection=connection)||is.Hadoop(connection=connection)) &&
class(getRConnection(connection))=="JDBCConnection")
class(connection$connection) <- "JDBCTDAster"
sqlStoredProc(connection=getRConnection(connection),
query=query,
outputParameter=outputParameter,
...)
}
#' @export
sqlStoredProc.JDBCTDAster <- function(connection,
query,
outputParameter,
...) {
vlist <- constructStoredProcArgs(query=query,
outputParameter=outputParameter,
...)
args <- vlist$args
query <- vlist$query
sqlstr <- do.call("constructStoredProcSQL",
append(list(pConnection="string",
pFuncName=query,
pOutputParameter=outputParameter),
args))
if(getOption("debugSQL")) {
cat(paste0("CALLING Stored Proc: \n",
gsub(" +"," ", sqlstr),"\n"))
}
if("returnQuery" %in% names(list(...)) && list(...)[["returnQuery"]])
return(sqlstr)
class(connection) <- "JDBCConnection"
retobj <- DBI::dbGetQuery(connection,sqlstr)
return(retobj)
}
#' @export
sqlStoredProc.RODBC <- function(connection, query,
outputParameter,
...) {
vlist <- constructStoredProcArgs(query=query,
outputParameter=outputParameter,
...)
args <- vlist$args
query <- vlist$query
sqlstr <- do.call("constructStoredProcSQL",
append(list(pConnection=connection,
pFuncName=query,
pOutputParameter=outputParameter),
args))
if("returnQuery" %in% names(list(...)) && list(...)[["returnQuery"]])
return(sqlstr)
retobj <- sqlQuery(connection,sqlstr)
return(retobj)
}
sqlStoredProc.ODBCConnection <- function(connection,
query,
outputParameter, ...)
{
return(sqlStoredProc.RODBC(connection, query, outputParameter, ...))
}
#' @export
sqlStoredProc.JDBCConnection <- function(connection, query,
outputParameter=NULL,
...) { #browser()
## http://developer.teradata.com/doc/connectivity/jdbc/reference/current/jdbcug_chapter_2.html
## Creating a CallableStatement object, representing
## a precompiled SQL statement and preparing the callable
## statement for execution.
vlist <- constructStoredProcArgs(query=query,
outputParameter=outputParameter,
...)
args <- vlist$args
query <- vlist$query
if(getOption("debugSQL")) {
sqlstr <- do.call("constructStoredProcSQL",
append(list(pConnection="string",
pFuncName=query,
pOutputParameter=outputParameter),
args))
cat(paste0("CALLING Stored Proc: \n",
gsub(" +"," ", sqlstr),"\n"))
}
query <- do.call("constructStoredProcSQL",
append(list(pConnection=connection,
pFuncName=query,
pOutputParameter=outputParameter),
args))
cStmt = .jcall(connection@jc,"Ljava/sql/PreparedStatement;","prepareStatement",query)
##CallableStatement cStmt = con.prepareCall(sCall);
## Setting up input parameter value
ai <- 1L
for(a in args){
if(is.character(a)){
if(a=="NULL")
.jcall(cStmt,"V","setNull",ai,.jfield("java/sql/Types",,"VARCHAR"))
else
.jcall(cStmt,"V","setString",ai,a)
} else if(is.integer(a))
.jcall(cStmt,"V","setInt",ai,as.integer(a))
else if(is.numeric(a))
.jcall(cStmt,"V","setFloat",ai,.jfloat(a))
else if(is.null(a))
.jcall(cStmt,"V","setNull",ai,.jfield("java/sql/Types",,"VARCHAR"))
ai <- ai+1L
}
##browser()
## Setting up output parameters for data retrieval by
## declaring parameter types.
for(a in outputParameter){
if(is.character(a))
a <- .jfield("java/sql/Types",,"VARCHAR")
else if(is.integer(a))
a <- .jfield("java/sql/Types",,"BIGINT")
else if(is.numeric(a))
a <- .jfield("java/sql/Types",,"FLOAT")
.jcall(cStmt,"V","registerOutParameter",ai,a) ## Error Hadoop:- method registerOutParameter with signature (II)V not found
ai <- ai+1L
}
## Making a procedure call
exR <- .jcall(cStmt,"I","executeUpdate")
argOffset <- length(args)
ai <- 1L
result <- list()
for(a in outputParameter){
if(is.character(a))
a <- .jcall(cStmt,"S","getString",argOffset+ai)
else if(is.integer(a))
a <- .jcall(cStmt,"I","getInt",argOffset+ai)
else if(is.numeric(a))
a <- .jcall(cStmt,"F","getFloat",argOffset+ai)
result[[names(outputParameter)[[ai]]]] <- a
}
.jcall(cStmt,"V","close")
return(as.data.frame(result))
}
#' @export
sqlQuery.JDBCConnection <- function(connection,query, AnalysisIDQuery=NULL, ...) {
if(length(query)==1){
if(getOption("debugSQL")) cat(paste0("QUERY SQL: \n",query,"\n"))
if(is.null(AnalysisIDQuery))
tryCatch({
resd <- DBI::dbGetQuery(connection, query, ...)
return(resd)
},
error=function(e) cat(paste0(sqlError(e))))
else {
tryCatch({
warning(paste0("Use of AnalysisIDQuery is deprecated. Please use sqlStoredProc!\n",query))
res <- DBI::dbSendQuery(connection, query, ...)
dbClearResult(res)
},
error=function(e) cat(paste0(sqlError(e))))
resd <- DBI::dbGetQuery(connection,AnalysisIDQuery,...)
return(resd)
}
} else
lapply(query, function(q){
sqlQuery(connection, q, AnalysisIDQuery,...)
})
}
#' @export
sqlQuery.RODBC <- function(connection,query,AnalysisIDQuery=NULL, ...) {
if(is.TDAster())
RODBC::odbcSetAutoCommit(connection, autoCommit = TRUE)
if(!is.null(AnalysisIDQuery))
warning(paste0("Use of AnalysisIDQuery is deprecated. Please use sqlStoredProc!\n",query))
if(length(query)==1){
if(getOption("debugSQL")) cat(paste0("QUERY SQL: \n",query,"\n"))
resd <- RODBC::sqlQuery(connection, query, stringsAsFactors = FALSE,...)
resd <- checkSqlQueryOutput(resd)
return(resd)
}
lapply(query, function(q){
if(getOption("debugSQL")) cat(paste0("QUERY SQL: \n",q,"\n"))
resd <- RODBC::sqlQuery(connection, q, stringsAsFactors = FALSE,...)
resd <- checkSqlQueryOutput(resd)
return(resd)
})
}
#' @export
sqlQuery.ODBCConnection <- function(connection, query, ...){
resd <- DBI::dbGetQuery(connection,query )
if(is.null(resd)){
return(TRUE)
}
else
return(resd)
}
sqlQuery.NULL <- function(connection, query, ...){
stop("please connect to the database before using AdapteR")
}
##' drop a table
##'
##' @param object FLTable object
##' @return message if the table is dropped
##' @export
dbDrop <- function(object)
{
vSqlStr <- paste0(" DROP TABLE ",getTableNameSlot(object))
sqlSendUpdate(getFLConnection(object), vSqlStr)
return(paste0(getTableNameSlot(object)," DROPPED"))
}
list_to_where_clause <- function (x) {
where_clause <- paste(names(x),x,sep="=\'",collapse="\' AND ");
where_clause <- paste(where_clause,"\'",sep="");
if(nchar(where_clause) > 1) {
where_clause <- where_clause
} else {
where_clause <- "1=1"
}
where_clause
}
# /**
# * Converts List to class Spec. Used for Data Prep
# * @param {list} x e.g. list(Varx="a",Vary="b")
# * @return {string} "Varx(x), Vary(y)"
# */
list_to_class_spec <- function (x) {
classSpec <- paste0(names(x),"(",x,")",collapse=", ")
if(nchar(classSpec) > 1) {
classSpec <- classSpec
} else {
classSpec <- ""
}
classSpec
}
# /**
# * Converts List to class Spec. Used for Data Prep
# * @param {list} x e.g. list(Varx="a",Vary="b")
# * @return {string} "Varx(x), Vary(y)"
# */
list_to_exclude_clause <- function (x) {
excludeClause <- paste(x, collapse=", ")
excludeClause
}
calc_cat_to_dummy <- function(ClassSpec) {
if (length(ClassSpec) == 0)
CatToDummy <- 0
else
CatToDummy <- 1;
CatToDummy
}
validate_args <- function (arg_list, type_list, class_list = list())
{
for (name in names(type_list)) {
if( typeof(arg_list[[name]]) != type_list[[name]])
{
stop(paste("Argument Type Mismatch:", name, "should be of type", type_list[[name]]))
}
}
for (name in names(class_list)) {
if(!inherits(arg_list[[name]],class_list[[name]]))
stop(paste("Argument Type Mismatch:", name, "should inherit class", class_list[[name]]))
}
}
is_integer <- function(x) { (x == ceiling(x)||x == floor(x)) }
is_number <- function(x) { (x == ceiling(x)||x == floor(x))&&(x>=1) }
FLGenTableName <- function(pTableName,
pCode){
pTableName <- removeAlias(pTableName)
vtbl <- paste0("ARBase",pTableName,pCode,round(as.numeric(Sys.time())))
#options("FLTempTables"=c(getOption("FLTempTables"),vtbl))
vtbl
}
gen_deep_table_name <- function(TableName){
return(FLGenTableName(pTableName=TableName,
pCode="D"))
}
trim <- function( x ) {
gsub("(^[[:space:]]+|[[:space:]]+$)", "", x)
}
gen_score_table_name <- function(TableName){
vtbl <- FLGenTableName(pTableName=TableName,
pCode="S")
updateMetaTable(pTableName=vtbl, pType="widetable")
return(vtbl)
}
gen_wide_table_name <- function(TableName){
return(FLGenTableName(pTableName=TableName,
pCode="W"))
}
gen_unique_table_name <- function(TableName){
return(FLGenTableName(pTableName=TableName,
pCode="U"))
}
gen_view_name <- function(TableName=""){
return(FLGenTableName(pTableName=TableName,
pCode="V"))
}
genRandVarName <- function(){
vrnum <- rnorm(1)
vrnum <- round(vrnum*vrnum*1000)
vtime <- round(as.numeric(Sys.time()))
return(paste0(sample(letters[1:26],1),vrnum,sample(letters[1:26],1),vtime))
}
genSessionID <- function(){
vtbl <- paste0("ARBase",round(as.numeric(Sys.time())))
options("FLSessionID"=vtbl)
vtbl
}
genAnalysisIDQuery <- function(pTable,pNote)
{
paste0("SELECT top 1 ANALYSISID from ",pTable,
" where Note=",fquote(pNote)," order by RUNENDTIME DESC")
}
genNote <- function(pFunction){
paste0(pFunction," from ",getOption("FLSessionID"))
}
gen_table_name <- function(prefix,suffix=NULL){
vtbl <- ifelse(is.null(suffix),
paste0(prefix),
paste0(prefix,"_",suffix))
#options("FLTempTables"=c(getOption("FLTempTables"),vtbl))
vtbl
}
getMaxId <- function(vdatabase,vtable,vcolName,
vconnection=getFLConnection(),...){
sqlstr <- paste0(" SELECT MAX(",vcolName,
" )+1 FROM ",vdatabase,".",vtable)
t <- sqlQuery(vconnection,sqlstr)[1,1]
if(is.na(t)) return(0)
else return(t)
}
#' Get Max Matrix ID+1 from result Matrix table
#'
#' used to know ID of next entry in table
#' @param vconnection ODBC/JDBC connection object
getMaxMatrixId <- function(vconnection=getFLConnection(),
vtable=getOption("ResultMatrixTableFL"),
...)
getMaxValue(vtable=vtable,
vcolName="MATRIX_ID",
vconnection=vconnection)+1
#' Get Max ID from given table
#'
#' used to know ID of last entry in table
#' @param vconnection ODBC/JDBC connection object
#' @param vtable name of the table
#' @param vdatabase name of the database of table
#' @param vcolName name of the primary index column in table
getMaxValue <- function(vtable=getOption("ResultVectorTableFL"),
vcolName="vectorIdColumn",
vconnection=getFLConnection())
{
R <- sqlQuery(vconnection,
paste0("SELECT max(",
vcolName,")",
" FROM ",
vtable))[1,1]
if(is.na(R)) return(0)
else return(R)
}
#' Get Max Vector ID+1 from result Vector table
#'
#' used to know ID of next entry in table
#' @param vconnection ODBC/JDBC connection object
getMaxVectorId <- function(vconnection = getFLConnection(),
vtable=getOption("ResultVectorTableFL"),
...)
getMaxValue(vtable=vtable,
vcolName="vectorIdColumn",
vconnection=vconnection)+1
#' Ensure sqlQuery constructed meets limits
#' namely max size(1MB) and max nestings(140-147)
#'
#' @param pResult object whose constructSelect
#' needs to be within limits
#' @param pInput list of input objects
#' @param pOperator function which generated the pResult
#' @param pStoreResult Flag whether to store the pResult
#' @return pResult after storing transparently inputs
#' and recomputing the operation
#' @examples
#' cat("Below Example shows how expressions with number of nested queries exceeding the limit are handled:")
#' flm <- FLMatrix("tblmatrixMulti",3,"Matrix_id","ROW_ID","COL_ID","CELL_VAL")
#' flv <- as.FLVector(rnorm(25))
#' vexpression <- paste0(rep("flm+flv",17),collapse="+")
#' cat(vexpression)
#' cat("no.of Nested Queries: ",length(gregexpr("FROM",constructSelect(eval(parse(text=vexpression))))[[1]]))
#' vResult <- eval(parse(text=vexpression))
#' cat("no.of Nested Queries in Result: ",length(gregexpr("FROM",constructSelect(vResult))[[1]]))
ensureQuerySize <- function(pResult,
pInput,
pOperator,
pStoreResult=FALSE,
...)
{
##browser()
if(checkQueryLimits(pResult))
{
vQuerySizes <- sapply(pInput,
FUN=function(x)
ifelse(is.FL(x),
object.size(constructSelect(x,...)),
0))
vbulkyInput <- which.max(vQuerySizes)
if(vbulkyInput==0)
return(pResult)
pInput[[vbulkyInput]] <- store(pInput[[vbulkyInput]])
return(do.call(pOperator,pInput))
}
else
{
if(pStoreResult) return(store(pResult,...))
else return(pResult)
}
}
checkYorN <- function(pInput)
{
pInput <- toupper(pInput)
if(pInput=="N") return(FALSE)
else if(pInput=="Y") return(TRUE)
else stop("invalid input. Expected y or n")
}
checkSqlQueryOutput <- function(pObject)
{
if(!is.data.frame(pObject) && length(pObject)==2 && is.character(pObject))
stop("Error in Query:-",pObject[1])
else return(pObject)
}
fquote <- function(pname) return(paste0("'",pname,"'"))
checkValidFormula <- function(pObject,pData)
{
if(class(pObject)!="formula")
stop("invalid formula object")
vallVars <- base::all.vars(pObject)
vcolnames <- colnames(pData)
sapply(vallVars,function(x)
if(!(x %in% vcolnames))
stop(x," not in colnames of data\n"))
}
#' @export
checkRemoteTableExistence <- function(databaseName=getOption("ResultDatabaseFL"),
tableName)
{
## shortcut in case of a results table -- setup during session start, assumed to not having been dropped
# if(tableName %in% getOption("resultTablesFL")) return(TRUE)
## check if tableName has database
if(grepl(".",tableName,fixed=TRUE)){
vdb <- strsplit(tableName,".",fixed=TRUE)[[1]][1]
vtbl <- strsplit(tableName,".",fixed=TRUE)[[1]][2]
if(!missing(databaseName) && vdb!=databaseName)
stop("databaseName and database included in tableName dont match \n ")
databaseName <- vdb
tableName <- vtbl
}
if(is.TD()){
vtemp <- sqlQuery(getFLConnection(),paste0(
"SELECT 1 FROM dbc.tables \n ",
" WHERE databaseName = ",fquote(databaseName),
" AND tablename = ",fquote(tableName)))
if(!is.na(vtemp[1,1]) && vtemp[1,1]==1)
return(TRUE)
else return(FALSE)
}
else{
### No table in Aster that stores MetaInfo!
if(is.Hadoop())
tableName <- paste0(databaseName,".",tableName)
vsqlstr <- limitRowsSQL(paste0("SELECT * FROM \n ",
tableName," \n "),1)
vtemp <- tryCatch(sqlQuery(getFLConnection(),
vsqlstr),error=function(e)FALSE)
if(is.data.frame(vtemp))
return(TRUE)
else return(FALSE)
}
}
#' @export
existsRemoteTable <- function(tableName,
databaseName=getOption("ResultDatabaseFL"))
checkRemoteTableExistence(databaseName,tableName)
rearrangeInputCols <- function(pInputCols,
pIndex){
return(pInputCols[pIndex])
}
separateDBName <- function(vtableName){
## If tablename has database name
names(vtableName) <- NULL
if(grepl(".",vtableName,fixed=TRUE)){
vdatabase <- base::strsplit(vtableName,".",fixed=TRUE)[[1]]
vtableName <- vdatabase[2]
vdatabase <- vdatabase[1]
}
else vdatabase <- getOption("ResultDatabaseFL")
vres <- c(vdatabase=vdatabase,
vtableName=vtableName)
names(vres) <- c("vdatabase","vtableName")
vres
}
removeAlias <- function(pName){
return(changeAlias(pName,"",""))
}
hasWhereClause <- function(pObject){
return(length(setdiff(constructWhere(pObject),""))>0)
}
hasSQLSelect <- function(pObject){
return(inherits(pObject@select,"FLTableFunctionQuery"))
}
|
978fe00482b0d953bde367ecba9d261cab4ad6c1
|
51c56c6dd891f2f5899bc8ddb0082b8dc56e0230
|
/cachematrix.R
|
f298529610b790b41b57e0a96d9362813578f9d2
|
[] |
no_license
|
nsamady/ProgrammingAssignment2
|
6f5f03f07388459d5a42dc2b46d5bb9342f42a60
|
17d6b1228931792862477a4a3447ad7c17cf9f53
|
refs/heads/master
| 2021-04-29T11:33:08.756903
| 2017-01-02T08:49:02
| 2017-01-02T08:49:02
| 77,819,390
| 0
| 0
| null | 2017-01-02T08:08:19
| 2017-01-02T08:08:18
| null |
UTF-8
|
R
| false
| false
| 1,153
|
r
|
cachematrix.R
|
## makeCacheMatrix creates a special matrix object, and then cacheSolve
## will calculate the inverse of the matrix.
## If the matrix inverse has already been calculated, it will instead
## find it in the cache and return it instead of calculating it again
makeCacheMatrix <- function(x = matrix()) {
inv = NULL
set = function(y) {
x <<- y
inv <<- NULL
}
get = function() x
setinv = function(inverse) inv <<- inverse
getinv = function() inv
list(set=set, get=get, setinv=setinv, getinv=getinv)
}
## The function cacheSolve returns the inverse of a matrix A created with
## the makeCacheMatrix function.
## If the cached inverse is available, cacheSolve will retrieve it
## while if the is inverse is not availiable, the function will
##compute it, cache it , and then return it
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv = x$getinv()
if (!is.null(inv)){
message("getting cached data")
return(inv)
}
mat.data = x$get()
inv = solve(mat.data, ...)
x$setinv(inv)
return(inv)
}
|
8ac6c470bfe01da99396d8e3f42a56ef8b600040
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/StatDA/R/treesold.R
|
f5630f124811010c7fdccd573f95b09d7cd91080
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 15,258
|
r
|
treesold.R
|
# plot trees as multivariate graphics
#
setClass("branch",representation(LR="numeric",
w="numeric",h="numeric",El="numeric",LeafL="branch",LeafR="branch",Bole="numeric"),
prototype(LR=0,w=0,h=1,El=0,LeafL=NULL,LeafR=NULL,Bole=2))
setMethod("show", "branch",function(object){
cat("Tree with ",length(object@El)," Elements \n")
# Anzahl der Element
if(object@LR==0) cat("shows to the left \n")
# Richtung des Astes
else cat("shows to the rigth \n")
cat("Bole: ",object@Bole," \n")
# Ast oder Stamm
cat("angle: ",object@w," \n") # Winkel
cat("heigh: ",object@h," \n") # Höhe
if((object@Bole==2)|(object@Bole==3))
("Tree is part of the Bole")
if(class(object@LeafL)!="NULL") {
# gleiche Anzeig des linkes Astes
print("Left Branch")
cat("Tree with ",length(object@LeafL@El)," Elements \n")
if(object@LeafL@LR==0) cat("shows to the left \n")
else cat("shows to the rigth \n")
cat("Bole: ",object@LeafL@Bole," \n")
cat("angle: ",object@LeafL@w," \n")
cat("heigh: ",object@LeafL@h," \n")
if((object@LeafL@Bole==2)|(object@LeafL@Bole==3))
("Tree is part of the Bole")
}
if(class(object@LeafR)!="NULL") {
# gleiche Anzeig des rechten Astes
print("Left Branch")
cat("Tree with ",length(object@LeafR@El)," Elements \n")
if(object@LeafR@LR==0) cat("shows to the left \n")
else cat("shows to the rigth \n")
cat("Bole: ",object@LeafR@Bole," \n")
cat("angle: ",object@LeafR@w," \n")
cat("heigh: ",object@LeafR@h," \n")
if((object@LeafR@Bole==2)|(object@LeafR@Bole==3))
("Tree is part of the Bole")
}
if(class(object@LeafL)=="NULL") ("Tree is Leaf") # einelementiger Ast?
})
setMethod("plot", "branch",function(a,x,y,len=1,lh=1,leg=NULL,...){
# a Branch, x x-Koord, y y-Koord, len generelle Vergrößerung
# len Zoomfaktor, lh Multiplikator der Höhe
points=matrix(0,nrow=4,ncol=2)
# Matrix mit den Eckpunkten des Astes
if(a@LR==0) { # für linken Ast
points[1,1]=x
points[1,2]=y
points[2,1]=points[1,1]-lh*len*a@h*sin(a@w*2*pi/360)
points[2,2]=points[1,2]+lh*len*a@h*cos(a@w*2*pi/360)
points[3,1]=points[2,1]+len*length(a@El)
points[3,2]=points[2,2]
points[4,1]=points[1,1]+len*length(a@El)
points[4,2]=points[1,2]
polygon(points)
if((class(leg)!="NULL")&length(a@El)==1) {
text(xy.coords(points[2,1],points[2,2]),colnames(leg)[a@El],cex=0.8,adj=c(0.3,-0.2))
}
}
if(a@LR==1) { # für rechten Ast
points[1,1]=x
points[1,2]=y
points[2,1]=points[1,1]+lh*len*a@h*sin(a@w*2*pi/360)
points[2,2]=points[1,2]+lh*len*a@h*cos(a@w*2*pi/360)
points[3,1]=points[2,1]+len*length(a@El)
points[3,2]=points[2,2]
points[4,1]=points[1,1]+len*length(a@El)
points[4,2]=points[1,2]
polygon(points)
if((class(leg)!="NULL")&length(a@El)==1) {
text(xy.coords(points[2,1],points[2,2]),colnames(leg)[a@El],cex=0.8,adj=c(0,-0.2))
}
}
# Rekursiver Aufruf der Funktion plot
if(class(a@LeafL)!="NULL") {
if(a@LR==0) plot(a@LeafL,x-lh*len*a@h*sin(a@w*2*pi/360),y+lh*len*a@h*cos(a@w*2*pi/360),len,lh,leg)
if(a@LR==1) plot(a@LeafL,x+lh*len*a@h*sin(a@w*2*pi/360),y+lh*len*a@h*cos(a@w*2*pi/360),len,lh,leg)
}
if(class(a@LeafR)!="NULL") {
if(a@LR==0) plot(a@LeafR,x-lh*len*a@h*sin(a@w*2*pi/360)+len*length(a@LeafL@El),y+lh*len*a@h*cos(a@w*2*pi/360),len,lh,leg)
if(a@LR==1) plot(a@LeafR,x+lh*len*a@h*sin(a@w*2*pi/360)+len*length(a@LeafL@El),y+lh*len*a@h*cos(a@w*2*pi/360),len,lh,leg)
}
})
conv<-function(x,i=1){
#x hcl Matrix mit cutree
#i te Stufe
a<-new("branch") # Anlegen eines neuen Astes
a@El<-x[length(as.matrix(x[,1])),]
#Abspeichern der Elemente
if(length(x[1,])==1) return(a) # Bei Astende abbruch
repeat{ # Stufe finden wo es zur Astteilung kommt
i=i+1
if(prod(x[i,]==x[i,1])!=1) break
}
xl<-as.matrix(x[i:(length(x)/length(x[1,])),x[i,1]==x[i,]])
xr<-as.matrix(x[i:(length(x)/length(x[1,])),x[i,1]!=x[i,]])
# Rekursiver Aufruf der Funktion conv
a@LeafL<-conv(xl)
a@LeafR<-conv(xr)
return(a)
}
setlr<-function(x) {
if(class(x@LeafL)=="branch") { # Überprüfung auf Klasse
if((x@Bole==0)|(x@Bole==1)){ # Bei links von Stamm
if(length(x@LeafL@El)<length(x@LeafR@El)) {
y<-x@LeafL # Wenn rechte Fortsetzung
x@LeafL<-x@LeafR # größer als linke
x@LeafR<-y # Vertauschung
}
x@LeafL@Bole=0 # links vom Stamm zeigt links
x@LeafR@Bole=1 # links vom Stamm zeigt rechts
}
if(x@Bole==2){ # Stamm zeigt nach links
if(length(x@LeafL@El)>length(x@LeafR@El)) {
y<-x@LeafL # Wenn linke Fortsetzung
x@LeafL<-x@LeafR # größer als rechte
x@LeafR<-y # Vertauschung
}
x@LeafL@Bole=0 # Links vom Stamm zeigt links
x@LeafR@Bole=3 # Fortsetzung Stamm, zeigt rechts
}
if(x@Bole==3){ # Stamm zeigt nach rechts
if(length(x@LeafL@El)<length(x@LeafR@El)) {
y<-x@LeafL # Wenn rechte Fortsetzung
x@LeafL<-x@LeafR # größer als linke
x@LeafR<-y # Vertauschung
}
x@LeafL@Bole=2 # Fortsetzung Stamm, zeigt links
x@LeafR@Bole=5 # rechts vom Stamm zeigt rechts
}
if((x@Bole==4)|(x@Bole==5)){ # Bei rechts von Stamm
if(length(x@LeafL@El)>length(x@LeafR@El)) {
y<-x@LeafL # Wenn linke Fortsetzung
x@LeafL<-x@LeafR # größer als rechte
x@LeafR<-y # Vertauschung
}
x@LeafL@Bole=4 # rechts vom Stamm zeigt links
x@LeafR@Bole=5 # rechts vom Stamm zeigt rechts
}
x@LeafL@LR=0 # Linker branch zeigt links
x@LeafR@LR=1 # rechter branch zeigt rechts
# nur wenn nicht Fortsetzung Astende rekursiver Aufruf
if(length(x@LeafL@El)>1) x@LeafL<-setlr(x@LeafL)
if(length(x@LeafR@El)>1) x@LeafR<-setlr(x@LeafR)
return(x)
}
}
setw<-function(x,unm,wmax=0,wmin=0) {
# x Branch, unm Unähnlichkeitsmatrix
# wmax Winkel des Cluster aus allen El
# wmin Winkel des homogensten Cluster aus 2 El
if((class(x@LeafL)=="branch") & (class(x@LeafR)=="branch")) {
gX=max(unm[names(x@El),x@El[1]])
# x cluster max nach complete linkage
# von 1 El zu allen anderen
if(length(x@El)>1) {
for(i in 2:length(x@El[1])) {
gX<-c(gX,max(unm[names(x@El),x@El[i]]))
}
# von den weiteren El zu den anderen
}
gX<-max(gX) # max Heterogenität von El in cluster
gA<-max(unm) # max aller Merkmale
gmin<-min(unm[unm!=0]) # Min aller Merkmale
w<-(wmin*(log(gA+1)-log(gX+1))+wmax*(log(gX+1)-log(gmin+1)))/(log(gA+1)-log(gmin+1))
if((x@LeafL@Bole!=2)&(x@LeafL@Bole!=3)&(x@LeafR@Bole!=2)&(x@LeafR@Bole!=3)) {
x@LeafL@w<-w*length(x@LeafL@El)/length(x@El)
x@LeafR@w<-w*length(x@LeafR@El)/length(x@El)
# Wenn Ast proportionale Aufteilung nach El
}
if((x@LeafL@Bole==2)|(x@LeafL@Bole==3)|(x@LeafR@Bole==2)|(x@LeafR@Bole==3)) {
x@LeafL@w<-w*length(x@LeafR@El)/length(x@El)
x@LeafR@w<-w*length(x@LeafL@El)/length(x@El)
# wenn stamm umgekehrt proportional
}
#rekursiver Aufruf
if((class(x@LeafL@LeafL)=="branch") & (class(x@LeafL@LeafR)=="branch")) {
x@LeafL<-setw(x@LeafL,unm,wmax,wmin)
}
if((class(x@LeafR@LeafL)=="branch") & (class(x@LeafR@LeafR)=="branch")) {
x@LeafR<-setw(x@LeafR,unm,wmax,wmin)
}
return(x)
}
}
seth<-function(x,a,i) {
# x Branch, a Merkmalwerte, i Ort
if(class(x)=="branch") {
x@h=sum(a[i,x@El])/length(x@El) # Mittelwert
if((class(x@LeafL)=="branch") & (class(x@LeafR)=="branch")) {
# bei Astfortsetzung rekursiver Aufruf
x@LeafL<-seth(x@LeafL,a,i)
x@LeafR<-seth(x@LeafR,a,i)
}
}
return(x)
}
#tree<-function (x,locations=NULL,wmax=0,wmin=0,len=1,lh=1,legco=NULL,leglen=1, ...)
tree <-
function(x,wmax=0,wmin=0,lh=1,
labels = dimnames(x)[[1]], locations = NULL, nrow = NULL, ncol = NULL,
key.loc = NULL, key.labels = dimnames(x)[[2]], key.xpd = TRUE, xlim = NULL,
ylim = NULL, flip.labels = NULL, len=1, leglen=1, leglh=1,
axes = FALSE, frame.plot = axes, main = NULL,
sub = NULL, xlab = "", ylab = "", cex = 0.8, lwd = 0.25,
lty = par("lty"), xpd = FALSE, mar = pmin(par("mar"), 1.1 +
c(2 * axes + (xlab != ""), 2 * axes + (ylab != ""), 1,
0)), add = FALSE, plot = TRUE, ...)
{
# draw trees as multivariate graphics
#
# x ... multivariate data in form of matrix or data frame
# wmax, wmin ... maximum and minimum angle for the leaves of the tree
# lh ...
# labels ... vector of character strings for labeling the plots
# locations ... locations for the boxes on the plot (e.g. X/Y coordinates)
# nrow, ncol ... integers giving the number of rows and columns to use when
# 'locations' is 'NULL'. By default, 'nrow == ncol', a square
# layout will be used.
# key.loc ... vector with x and y coordinates of the unit key.
# key.labels: vector of character strings for labeling the segments of
# the unit key. If omitted, the second component of
# 'dimnames(x)' is used, if available.
# key.xpd: clipping switch for the unit key (drawing and labeling), see
# 'par("xpd")'.
# xlim: vector with the range of x coordinates to plot.
# ylim: vector with the range of y coordinates to plot.
# flip.labels: logical indicating if the label locations should flip up
# and down from diagram to diagram. Defaults to a somewhat
# smart heuristic.
# len, leglen, leglh: multiplicative values for the space of the labels on the legend
# axes: logical flag: if 'TRUE' axes are added to the plot.
# frame.plot: logical flag: if 'TRUE', the plot region is framed.
# main: a main title for the plot.
# sub: a sub title for the plot.
# xlab: a label for the x axis.
# ylab: a label for the y axis.
# cex: character expansion factor for the labels.
# lwd: line width used for drawing.
# lty: line type used for drawing.
# xpd: logical or NA indicating if clipping should be done, see
# 'par(xpd = .)'.
# mar: argument to 'par(mar = *)', typically choosing smaller
# margings than by default.
# add: logical, if 'TRUE' _add_ boxes to current plot.
# plot: logical, if 'FALSE', nothing is plotted.
# ...: further arguments, passed to the first call of 'plot()', see
# 'plot.default' and to 'box()' if 'frame.plot' is true.
if (is.data.frame(x))
x <- data.matrix(x)
else if (!is.matrix(x))
stop("'x' must be a matrix or a data frame")
if (!is.numeric(x))
stop("data in 'x' must be numeric")
n.loc <- nrow(x)
n.seg <- ncol(x)
if (is.null(locations)) {
if (is.null(nrow))
nrow <- ceiling(if (!is.numeric(ncol)) sqrt(n.loc) else n.loc/ncol)
if (is.null(ncol))
ncol <- ceiling(n.loc/nrow)
if (nrow * ncol < n.loc)
stop("nrow * ncol < number of observations")
ff <- if (!is.null(labels)) 2.3
else 2.1
locations <- expand.grid(ff * 1:ncol, ff * nrow:1)[1:n.loc, ]
if (!is.null(labels) && (missing(flip.labels) || !is.logical(flip.labels)))
flip.labels <- ncol * mean(nchar(labels, type = "c")) > 30
}
else {
if (is.numeric(locations) && length(locations) == 2) {
locations <- cbind(rep.int(locations[1], n.loc),
rep.int(locations[2], n.loc))
if (!missing(labels) && n.loc > 1)
warning("labels do not make sense for a single location")
else labels <- NULL
}
else {
if (is.data.frame(locations))
locations <- data.matrix(locations)
if (!is.matrix(locations) || ncol(locations) != 2)
stop("'locations' must be a 2-column matrix.")
if (n.loc != nrow(locations))
stop("number of rows of 'locations' and 'x' must be equal.")
}
if (missing(flip.labels) || !is.logical(flip.labels))
flip.labels <- FALSE
}
xloc <- locations[, 1] # Auslesen der Koordinaten
yloc <- locations[, 2]
x[is.na(x)] <- 0
mx <- max(x <- x * len)
if (is.null(xlim))
xlim <- range(xloc) + c(-mx, mx)
if (is.null(ylim))
ylim <- range(yloc) + c(-mx, mx)
op <- par(mar = mar, xpd = xpd)
on.exit(par(op))
if (!add)
plot(0, type = "n", ..., xlim = xlim, ylim = ylim, main = main,
sub = sub, xlab = xlab, ylab = ylab, asp = 1, axes = axes)
if (!plot)
return()
corm = cor(x) # Distanzmatrix
unm = 1-abs(corm)
unm[upper.tri(unm)]<-0
hcl = hclust(dist(unm)) # Cluster erstellen
bcl=matrix(0,nrow=ncol(x),ncol=ncol(x))
# einzelne Stufen durchlaufen
for(i in 1:length(x[1,])) bcl[i,]=cutree(hcl,k=i)
colnames(bcl)<-c(names(cutree(hcl,k=length(x[1,]))))
a<-conv(bcl) # in Objekt der Klasse "branch" umwandeln
a<-setlr(a) # Verzweigungsverlauf regeln
a<-setw(a,unm,wmax=wmax,wmin=wmin) # Winkelsetzung
dmin=apply(x,2,min)
dmax=apply(x,2,max)
q=(x-rep(1,nrow(x))%*%t(dmin))/(rep(1,nrow(x))%*%t(dmax-dmin))
# Merkmalswerte normieren
for(i in 1:length(locations[,1])) {
# Ablaufen aller zu plotenden Punkte
a<-seth(a,q,i)
plot(a,xloc[i]-len*lh,yloc[i],len,lh)
}
if (!is.null(labels)) {
y.off <- mx
if (flip.labels)
y.off <- y.off + cex * par("cxy")[2] * ((1:n.loc)%%2 - 0.4)
text(xloc, yloc - y.off/2, labels, cex = cex, adj = c(0.5, 1))
}
if (!is.null(key.loc)) {
par(xpd = key.xpd)
xleg=key.loc[1]
yleg=key.loc[2]
legh=matrix(seq(from=length(x[1,]),to=1,length=length(x[1,])),nrow=1,ncol=length(x[1,]))
a<-seth(a,legh,1)
plot(a,xleg,yleg,leglen,leglh,bcl)
}
if (frame.plot)
box(...)
invisible(locations)
}
|
f2aff405ac96995b204519acae92b9722a51e1a8
|
55e042f05ee3da0db86ecfb806c0e695382a843d
|
/R/pkgdepends.R
|
cd5fa65ce2aee438362a3e9f68a3a47b21311ea8
|
[
"MIT"
] |
permissive
|
r-lib/pkgdepends
|
f507dfe031e34c994311ca9a139dda9a6d7e016a
|
a0f5132320498780c8b87ce8eb4f66e754906376
|
refs/heads/main
| 2023-08-03T15:56:48.339228
| 2023-07-19T09:13:42
| 2023-07-19T09:13:42
| 102,942,545
| 86
| 23
|
NOASSERTION
| 2023-09-11T20:44:59
| 2017-09-09T09:17:38
|
R
|
UTF-8
|
R
| false
| false
| 378
|
r
|
pkgdepends.R
|
# nocov start
#' @description
#' pkgdepends is a toolkit for package dependencies, downloads and
#' installations, to be used in other packages. If you are looking for a
#' package manager, see [pak](https://github.com/r-lib/pak).
#'
#' @includeRmd tools/doc/README-body.Rmd
"_PACKAGE"
fix_check <- function() {
R6::R6Class
processx::process
invisible()
}
# nocov end
|
caea8843209fc602297f1fb0fc0f8043d194f624
|
823829085704d2c5fa88be0b2a1092eee4678b8c
|
/scripts/Rscripts/utilities.R
|
87f0385a96c4edecdc5b143a5bdb28c18e755003
|
[] |
no_license
|
amunzur/CHIP_project
|
a90e0ec5bc8741a44f318d89f2672badfb64eb70
|
8401f2a59b91a0bf7548c09db0d24337e4acf245
|
refs/heads/main
| 2023-07-18T05:51:15.959496
| 2021-09-07T21:26:53
| 2021-09-07T21:26:53
| 379,702,512
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 852
|
r
|
utilities.R
|
cool_theme <-
theme(panel.border = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.line = element_line(colour = "black", size = 1),
axis.ticks = element_line(colour = "black", size = 2),
axis.text = element_text(size=10),
axis.text.x = element_text(vjust=0.5, colour = "black", size=8),
axis.text.y = element_text(vjust=0.5, colour = "black", size=6),
axis.title = element_text(size=10,face="bold"),
legend.title = element_text(color = "black", size = 12),
legend.text = element_text(color = "black", size = 12),
axis.ticks.length=unit(0.15, "cm"),
axis.title.x = element_text(margin = margin(t = 20, r = 0, b = 0, l = 20)),
axis.title.y = element_text(margin = margin(t = 0, r = 20, b = 0, l = 0)))
|
84d5ef490b0ba93967625c71cf36466507b0d049
|
4e335d85c9210de018e0c4c057b4f00b2e9246a4
|
/Day03/Session01.R
|
eeeb3cbbba43ea1a026f773001546ef6315f3bd3
|
[] |
no_license
|
isrt09/Statistical_Analysis_with_R
|
0b226de7da9b06a2235d59a7de98d7edf2a75daf
|
65047800890fa29130e111373234d8c3247aa615
|
refs/heads/main
| 2023-06-24T06:45:15.707458
| 2021-07-24T14:55:42
| 2021-07-24T14:55:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 640
|
r
|
Session01.R
|
# Sequence numbers in R
x <- seq(1,10)
x
x<- seq(1,10,length=100)
x
x<- c(seq(1,10,length=5),30,40)
x
x<- rep(seq(1,3,length=.5),3)
x
x<-rep(seq(1,3,0.5),each=3)
x
# Random number
x <- sample(100,12)
x
y<- 1:100
n<- sample(y,5)
n
m<- sample(10,50, replace = TRUE)
m
n<- sample(10,50, replace = TRUE)
n
p<- sample(10,50, replace = TRUE)
p
q<- rnorm(100,10)
q
r<- rnorm(100,10,5)
r
s<- rnorm(100)
s
u<- runif(100,1,5)
u
e<- rexp(100,1/50)
e
emp<- c()
emp
class(emp)
typeof(emp)
null<- NULL
null
class(null)
typeof(null)
z<- vector()
z
class(z)
typeof(z)
a<-c(1,2,3)
a
b<- c(1,2,1,1,1,2,1)
a<- c(1,2,3,b)
a
typeof(a)
|
86e5a6014449820d751752936fd4f24a9472e07e
|
86ab62ed514326b49a142335a1f0a88dbcf1d962
|
/rt-crowd-forecast/update-rt.R
|
2d0698fb6c8bf38d9d8bffec8d4e3a420aeb1f9b
|
[
"MIT"
] |
permissive
|
5l1v3r1/covid.german.forecasts
|
f29b53ac135eab90772f20d4960a19f76a59bdeb
|
fb93ec15fadb08350a6b508a4ee960d8210c600e
|
refs/heads/master
| 2023-05-08T17:47:04.222681
| 2021-05-24T05:48:45
| 2021-05-24T05:48:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 682
|
r
|
update-rt.R
|
# update-rt-data by copying it from the Rt folder
library(here)
library(covid.german.forecasts)
locations <- c("Germany", "Poland")
date <- latest_weekday()
for (location in locations) {
file_names <- c("summarised_estimates.rds", "estimate_samples.rds",
"model_fit.rds", "model_args.rds", "reported_cases.rds")
target_dir <- here("rt-crowd-forecast", "data-raw", "rt-epinow-data",
location)
check_dir(target_dir)
for (file_name in file_names) {
file.copy(from = here("rt-forecast", "data", "samples", "cases",
location, date, file_name),
to = here(target_dir, file_name))
}
}
|
fe4b03a48974e7e7a48ea9d7b11a427d37b5f458
|
eff7ef52692c5fc92667f9627ff621ce43843aa2
|
/man/ec.theme.Rd
|
4e80499d9a7a03921db69b9b53face3ddacef7d7
|
[
"Apache-2.0"
] |
permissive
|
statunizaga/echarty
|
9e718e18dd74bab0fde39932a9a92255a30547ec
|
7e5f794e8e86c3363b8df2cf455022d9d99b6f57
|
refs/heads/main
| 2023-06-24T02:45:40.669501
| 2021-07-29T17:57:18
| 2021-07-29T17:57:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,066
|
rd
|
ec.theme.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/echarty.R
\name{ec.theme}
\alias{ec.theme}
\title{Themes}
\usage{
ec.theme(wt, name, code = NULL)
}
\arguments{
\item{wt}{An \code{echarty} widget as returned by \link{ec.init}}
\item{name}{Name of existing theme file (without extension), or name of custom theme defined in \code{code}.}
\item{code}{Custom theme as JSON formatted string, default NULL.}
}
\value{
An \code{echarty} widget.
}
\description{
Apply a pre-built or custom coded theme to a chart
}
\details{
Just a few built-in themes are included in folder \code{inst/themes}. The entire collection could be found \href{https://github.com/apache/echarts/tree/master/theme}{here} and copied if needed.\cr
To create custom themes or view predefined ones, visit \href{https://echarts.apache.org/en/theme-builder.html}{this site}.
}
\examples{
mtcars \%>\% ec.init() \%>\% ec.theme('dark-mushroom')
cars \%>\% ec.init() \%>\% ec.theme('mine', code=
'{"color": ["green","#eeaa33"],
"backgroundColor": "lemonchiffon"}')
}
|
9971ef9eea486312587a0e10d0eae13b5155f18a
|
45079c348da4eac1f6916b1a082c801e14fea0fe
|
/ch4.R
|
337695b798ce18a17aaf040a1b114cad1defc9f7
|
[] |
no_license
|
guhjy/ggplot2_book
|
e5780d4c0314d231835cb840508e6abc1ea5d1bd
|
65c5e1d3bdcd32f58a2d78ac229c927d4bdc4c5d
|
refs/heads/master
| 2021-04-28T20:11:17.711546
| 2012-04-12T02:49:14
| 2012-04-12T02:49:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,161
|
r
|
ch4.R
|
require(ggplot2)
data(diamonds)
#*************************************************************************
# 4.2 Creating a Plot *
#*************************************************************************
# when you use qplot(), it does a lot of things automatically for you.
# to create the plot objects themselves, use ggplot(). It has two arguments:
# data and aesthetic mapping.
p <- ggplot(diamonds, aes(carat, price, colour = cut))
#*************************************************************************
# 4.3 LAYERS *
#*************************************************************************
# a minimal layer may do nothing more than specify a geom
p <- p + layer(geom = 'point')
# no layers currently
p <- ggplot(diamonds, aes(carat, price, colour = cut))
p <- p + layer(geom = 'point')
# layer can take any of these arguments
layer(geom, geom_params, stats, stat_params, data, mapping, position)
# a more complicated ggplot call
p <- ggplot(diamonds, aes(x = carat))
p <- p + layer(
geom = 'bar',
geom_params = list(fill = 'steelblue'),
stat = 'bin',
stat_params = list(binwidth = 0.5)
)
p
# it's specific but verbose
# Can simplify it by using shortcuts that rely on the fact that every geom
# is associated wiht a default statistic and position, and every statistic with
# a default geom.
# This means you that you only need to specify one of stat or geom to get a
# completely specificied layer, with parameters passed on to the geom or stat
# this is the same thing
p <- p + geom_histogram(binwidth = 2, fill = 'steelblue')
p
# All the shortcut functions have the same basic form, beginning with geom_ or
# stat_
geom_XXX(mapping, data, ... , geom, position)
stat_XXX(mapping, data, ... , stat, position)
Their common parameters:
mapping (optional): a set or aesthetic mappings, specified using the aes()
function and combined with the plot defaults as described in 4.5
data(optional): a dataset which overrides the default plot dataset. It is most
commonly omitted, in which case the layer will use the default plot data. See
Section 4.4
...: parameters for the geom or stat, such as bin width in the histogram or
bandwidth for a loess smoother. You can also use aesthetic properties as
parameters. When you do this you set the property to a fixed value, not map it
to a variable in the dataset. The example above showed setting the fill colour
of the histogram to 'steelblue'. See section 4.5.2 for more examples
geom or stat (optional): You can override the default stat for a geom, or the
default geom for a stat. This is a text string containing the name of the geom
to use. Using the default will give you a standard plot; overriding the default
allows you to achieve something more exotic; as shown in section 4.9.1
position (optional): Choose a method for adjusting overlapping objects, as shown
described in section 4.8
# The order of data and mapping arguments is switched between ggplot() and the
# layer functions. THis is because you almost always specify data for the plot,
# and almost always specify aesthetics - but not data - for the layers.
# Explicitly name all other arguments for readability
# Equivalents
# scatter plot, msleep is data, % of sleep on x, awake on y
ggplot(msleep, aes(sleep_rem / sleep_total, awake)) + geom_point()
qplot(sleep_rem / sleep_total, awake, data = msleep)
qplot(sleep_rem / sleep_total, awake, data = msleep) + geom_smooth()
qplot(sleep_rem / sleep_total, awake, data = msleep, geom = c('point','smooth'))
ggplot(msleep, aes(sleep_rem / sleep_total, awake)) + geom_point() +
geom_smooth()
# To explore a plot do summary(plot_object)
p <- ggplot(msleep, aes(sleep_rem / sleep_total, awake))
summary(p)
p <- p + geom_point()
# Layers are regular R objects that can be stored as variables, so you can write
# clean code. Create the layer once, add it many times.
bestfit <- geom_smooth(method = 'lm', se = T, colour = alpha('steelblue', 0.5),
size = 2)
qplot(sleep_rem, sleep_total, data=msleep) + bestfit
q <- qplot(awake, brainwt, data = msleep, log = 'y') + bestfit
q <- qplot(bodywt, brainwt, colour = genus, data = msleep, log = 'xy') + bestfit
#*************************************************************************
# 4.4 DATA *
#*************************************************************************
# The data must be a dataframe. That's the only restriction
p <- ggplot(mtcars, aes(mpg, wt, colour = cyl)) + geom_point()
mtcars <- transform(mtcars, mpg = mpg ^ 2)
# can change the data and update p
p %+% mtcars
# the data is stored in the plot object as a copy, not as a reference.
# If your data changes, the plot will not. Also plots are entirely
# self-contained so they can be save()d to disk and later load()ed and plotted
# without needing anyting else from that session
#*****************************************************************************
# 4.5 Aestetic Mappings *
#*****************************************************************************
aes(x = weigth, y = height, colour = age)
# dont refer to variables outside of dataset by like diamonds$carat
aes(x = weight, y = height, colour = sqrt(age))
# functions of variables can be used
# any varibale in aes() must be contained in plot or layer data
#********************** 4.5.1 Plots and Layers *****************************
# can add the aesthetics after creating plot
data(mtcars)
p <- ggplot(mtcars)
p <- p + aes(wt, hp)
# or at same time
p <- ggplot(mtcars, aes(wt, hp))
p <- ggplot(mtcars, aes(x = mpg, y = wt))
p + geom_point()
p + geom_point(aes(colour = factor(cyl)))
p + geom_point(aes(y = disp))
# Aesthetic mappings specified in a layer affect only that layer. For that reason,
# unless you modify the default scales, axis labels and legend titles will be
# based on the plot defaults. See section 6.5 to see how to change these.
#********************** 4.5.2 Setting v Mapping ******************************
# Instead of mapping an aesthetic property to a variable, you can set it to a
# single value by specifying it in the layer parameters.
# Aesthetics can vary for each observation being plotted, while parameters do
# not. We map an aesthetic to a variable (eg (aes(colour=cut))) or set it to a
# constant (eg colour='red').
p <- ggplot(mtcars, aes(mpg, wt))
p + geom_point(colour = 'darkblue') # This sets the point colour to be dark blue
# instead of black. This is quite different than
p + geom_point(aes(colour = 'darkblue')) # This maps (not sets) the colour to
# the value 'darkblue'. This effectively creates a new variable containing only
# the value 'darkblue' and then maps colour to that new variable.
# You can map with qplot by doing colour = I('darkblue')
#*********************** 4.5.3 Grouping *************************************
# in ggplot2, geoms can be roughly divided into invidual and collective geoms.
# point geoms has a single object for each observation
# polygons have multile
# lines and paths fall somewhere in between
# By default gropu is set to the interaction of all discrete variables in the
# plot. This often partitions the data correctly, but when it does not, or when
# no discrete variable is used in the plot, you will need to explicitly define
# the grouping structure, by mapping group to a variable that has a different
# value for each group. The intersection() function is a useful if a single
# pre-existing variable doesn't cleanly separate groups, but a combination does.
require(nlme)
data(Oxboys)
# There are three common cases where the default is not enough.
# 1. Multiple Groups, one aesthetic
# You want to separate your data into groups, but render them in the same way.
# When looking at the data in aggregate you want to be able to distinguish
# individual subjects, but not identify them. This is common in longitudinal
# studies with many subjects, where the plots are often descriptively called
# spaghetti plots.
p <- ggplot(Oxboys, aes(age, height)); p # this is gibberish
p <- ggplot(Oxboys, aes(age, height, group = Subject)) + geom_line()
# 2. Different groups on different layers
p + geom_smooth(aes(group = Subject), method = 'lm', se = F) #smooths each line
p + geom_smooth(aes(group = 1), method = 'lm', se = F, size =2)#one smoothing
# line
# 3. Overriding the default grouping
# The plot has a discrete scale but you want to draw lines that connect across
# groups. This is a strategy used in interaction plots, profile plots, and
# parallel coordinate plots, among others. For example, we draw boxplots of
# height at each measurement occasion, as shown in the first figure in 4.5.
boysbox <- ggplot(Oxboys, aes(Occasion, height)) + geom_boxplot()
boysbox + geom_line(aes(group = Subject), colour = '#3366FF')
#*********************** 4.5.4 Matching aesthetics to graphic objects *******
# Another important issue with collective geom is how the aesthetics of the
# individual observations are mapped to the aesthetics of the complete entity.
# For individual geoms, this isn't a problem. However, high data densities can
# make it difficult or impossible to distinguish between individual points and
# in some sense the point geom becomes a collective geom, a single blob of
# points
xgrid <- with(df, seq(min(x), max(x), length = 50))
data(diamonds)
ggplot(diamonds, aes(color)) + geom_histogram()
ggplot(diamonds, aes(color, fill = cut)) + geom_histogram()
#Nice!
#*****************************************************************************
# 4.6 Geoms
#*****************************************************************************
# Geoms perform the actual rendering of the layer, controlling the type of plot
# you create. For example, using a point_geom will create a scatter plot, while
# using a line geom will create a line plot.
#*****************************************************************************
# 4.7 Stat
#*****************************************************************************
# A statistical transformation, or stat, transforms the data, typically by
# summarising it in some manner. For example, a useful stat is the smoother,
# which calculates the mean of y, conditional on x, subject to some restriction
# that ensures smoothness.
# To make sense in a graphic context a stat must be location-scale invariant.
# f(x+a) = f(x) + a and f(b * x) = b * f(x). This ensures that the
# transformation stays the same when you change the scales of the plot.
# A stat takes a dataset as input and returns a dataset as output, and so a stat
# can add new variables to the original dataset.
# like here where we map the new stat aesthetic ..density..
# the names of the generated variable must be surrounded with .. which prevents
# confusion in case the original dataset has the same name, and makes it clear
# to anyone else reading the code
ggplot(diamonds, aes(carat)) + geom_histogram(aes(y = ..density..), binwidth =
0.1)
# same thing in qplot
qplot(x = carat, y = ..density.., data = diamonds, geom='histogram', binwidth = 0.1)
#*****************************************************************************
# 4.8 Position Adjustments
#*****************************************************************************
# Position adjustments apply minor tweaks to the position of elements within a
# layer
# dodge - Adjust position by dodging overlaps to the side
# fill - stack overlapping objects and standardize have equal height
# identity - Don't adjust position
# jitter - jitter points to avoid overplotting
# stack - Stack overlapping objects on top of one another
#*****************************************************************************
# 4.9 Putting it all together
#*****************************************************************************
data(diamonds)
fig4.8a <- ggplot(diamonds, aes(clarity, fill = cut)) + geom_histogram()
fig4.8b <- ggplot(diamonds, aes(clarity, fill = cut)) +
geom_bar(position = 'fill')
fig4.8c <- ggplot(diamonds, aes(clarity, fill = cut)) + geom_bar(position =
'dodge')
# nice!
############################ 4.9.1 Combining Geoms and Stats ###############
# by connecting geoms with different statistics, you can easily create new
# graphics.
# variations on a histogram
d <- ggplot(diamonds, aes(carat)) + xlim(0, 3)
d + stat_bin(aes(ymax = ..count..), binwidth = 0.1, geom = 'area')
d + stat_bin(
aes(size = ..density..), binwidth = 0.1,
geom = 'point', position = 'identity')
d + stat_bin(
aes(y = 1, fill = ..count..), binwidth = 0.1, geom = 'tile', position =
'identity')
############################ 4.9.2 Displaying precomputed statistics ########
# If you have data which has already been summarised, and you just want to use
# it, you'll need to use stat_identity(), which leaves the data unchanged, and
# then map the appropriate variables to the right aesthetics
############################ 4.9.3 Varying aesthetics and data ##############
# You can plot two different datasets on different layers.
# One reason to do this: show actual values and predicted values
require(nlme, quiet = TRUE, warn.conflicts = FALSE)
model <- lme(height ~ age, data = Oxboys, random = ~1 + age | Subject)
oplot <- ggplot(Oxboys, aes(age, height, group = Subject)) + geom_line()
age_grid <- seq(-1, 1, length = 10)
subjects <- unique(Oxboys$Subject)
# expand.grid creates a dataframe from all combinations of supplied vectors or
# factors
preds <- expand.grid(age = age_grid, Subject = subjects)
preds$height <- predict(model, preds)
oplot + geom_line(data = preds, colour = '#3366FF', size = 0.4)
Oxboys$fitted <- predict(model)
Oxboys$resid <- with(Oxboys, fitted - height)
oplot %+% Oxboys + aes(y = resid) + geom_smooth(aes(greuup=1))
|
604c2b3af2fb5803bb41f079a34c711485d2a8a2
|
6151cde12944c1a41b263be4fe2745ba15fa0137
|
/R/zzz.R
|
b2c797e079ef0720b51c21e0f6b454e82557e890
|
[] |
no_license
|
cran/crq
|
31b8009817c95cd03a765926d478bda585eb66c2
|
4c61f0c4e6e8849e8c098a531638b4639f2b33f0
|
refs/heads/master
| 2016-09-06T14:43:09.870751
| 2007-10-04T00:00:00
| 2007-10-04T00:00:00
| 17,718,605
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 82
|
r
|
zzz.R
|
.First.lib<-function(lib,pkg) {
require(survival)
library.dynam("crq",pkg,lib)
}
|
4f38732c7f155055090edd324fd1588cedf06521
|
898a10dfdfa3b065e33d2b2557439cb94ffd3e1d
|
/parse_sql.R
|
4c561d8c0b97358003ce6fbc27661060a4945787
|
[] |
no_license
|
fmalmeida/rscripts
|
e9a783a9eac6dbf3691b2a789f4e9c3ba9dbf8e1
|
fd26423fccdc40c26da42d412db6e9cdb9dd9d7f
|
refs/heads/master
| 2021-07-21T02:30:33.166493
| 2021-07-14T13:21:39
| 2021-07-14T13:21:39
| 144,742,354
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,030
|
r
|
parse_sql.R
|
#!/usr/bin/Rscript
suppressMessages(library(RSQLite))
suppressMessages(library(glue))
suppressMessages(library(stringr))
suppressMessages(library(DataCombine))
# Setting Help
'usage: parse_sql.R [--input=<file> --start=<int> --end=<int> --fofn=<file> --regex=<chr> --type=<chr> --prefix=<chr> --outdir=<chr>]
options:
-i, --input=<file> sqlite database outputed
-s, --start=<int> OPTIONAL: retrieve elements from this start position.
-e, --end=<int> OPTIONAL: retrieve elements until this end position.
-f, --fofn=<file> OPTIONAL: retrieve elements based on ids from fofn file.
-r, --regex=<chr> OPTIONAL: retrieve elements based on searching a pattern in specific column of GFF file. Example: "feature|resistance" will search for elements that \
have the pattern resistance in the feature column. GFF Columns: chr|source|feature|attributes. This input must be given inside double quotes.
-t, --type=<chr> Type of FASTA to output: nt|aa|both [default: both]
-p, --prefix=<chr> Output prefix
-d, --outdir=<chr> Output directory. Default: Current directory
At least one of the OPTIONAL parameters must be supplied. The others are required.
The script will treat the OPTIONAL arguments as follows:
* Start position alone will retrieve all elements which start position is greater than the value given;
* End position alone will retrieve all elements which end position is less than the value given;
* Start and end together will retrieve all elements located between that genomic range;
* The fofn file containing one element id (the first attribute of the last gff column) per line will retrieve the elements that have this id.' -> doc
# Parse parameters
suppressMessages(library(docopt))
opt <- docopt(doc)
if (is.null(opt$start) && is.null(opt$end) && is.null(opt$fofn) && is.null(opt$regex)){
stop("At least one of the OPTIONAL parameters (start|end|fofn) must be supplied\n", call.=FALSE)
}
if (is.null(opt$input)){
stop("At least one argument must be supplied (input file)\n", call.=FALSE)
}
if (is.null(opt$outdir)) {
outdir <- "."
} else { outdir <- opt$outdir }
if (is.null(opt$prefix)) {
outdir <- "output"
} else { prefix <- opt$prefix }
## Creating functions
### To retrieve attribute information
getAttributeField <- function (x, field, attrsep = ";") {
s = strsplit(x, split = attrsep, fixed = TRUE)
sapply(s, function(atts) {
a = strsplit(atts, split = "=", fixed = TRUE)
m = match(field, sapply(a, "[", 1))
if (!is.na(m)) { rv = a[[m]][2]
}
else {
rv = as.character(NA)
}
return(rv)
})
}
### To extract FASTA based on IDs
get_from_ids <- function(y, x) {
dbGetQuery(con, paste("SELECT * FROM ", x, " WHERE ID='", y, "'", sep=""))
}
###
get_gff_from_ids <- function(y) {
dbGetQuery(con, paste("SELECT * FROM FinalGFF WHERE attributes LIKE '%", y, "%'", sep=""))
}
## Loading SQL database driver
drv <- dbDriver("SQLite")
dbname <- file.path(opt$input)
con <- dbConnect(drv, dbname=dbname)
## Getting Data Out
#head(dbListTables(con)) # Lists tables in database
#dbListFields(con, "FinalGFF") # Lists table fields
#dbListFields(con, "NucleotideFasta") # Lists table fields
#dbListFields(con, "ProteinFasta") # Lists table fields
### Searching elements that have the pattern resistance in feature column
if (!is.null(opt$regex)) {
#### Subset GFF
list <- strsplit(opt$regex, "|", fixed = TRUE)
string <- glue("SELECT * FROM FinalGFF WHERE {list[[1]][1]} LIKE '%{list[[1]][2]}%'")
out <- suppressWarnings(dbGetQuery(con, string))
outname <- glue("{outdir}/{prefix}.gff")
write.table(out, file = outname, quote = FALSE, sep = "\t",
col.names = TRUE, row.names = FALSE)
}
### Searching elements by genomic region
#### Only with start position
if (!is.null(opt$start) && is.null(opt$end)) {
##### Subset GFF
start <- opt$start
string <- glue("SELECT * FROM FinalGFF WHERE start > {start}")
out <- suppressWarnings(dbGetQuery(con, string))
outname <- glue("{outdir}/{prefix}.gff")
write.table(out, file = outname, quote = FALSE, sep = "\t",
col.names = TRUE, row.names = FALSE)
} else if (is.null(opt$start) && !is.null(opt$end)) {
##### Subset GFF
end <- opt$end
string <- glue("SELECT * FROM FinalGFF WHERE end < {end}")
out <- suppressWarnings(dbGetQuery(con, string))
outname <- glue("{outdir}/{prefix}.gff")
write.table(out, file = outname, quote = FALSE, sep = "\t",
col.names = TRUE, row.names = FALSE)
} else if (!is.null(opt$start) && !is.null(opt$end)) {
##### Subset GFF
start <- opt$start
end <- opt$end
string <- glue("SELECT * FROM FinalGFF WHERE start > {start} AND end < {end}")
out <- suppressWarnings(dbGetQuery(con, string))
outname <- glue("{outdir}/{prefix}.gff")
write.table(out, file = outname, quote = FALSE, sep = "\t",
col.names = TRUE, row.names = FALSE)
}
### Searching elements by ids
if (!is.null(opt$fofn)) {
fil <- file(opt$fofn)
ids <- readLines(fil, n = -1)
ids <- toupper(ids)
suppressWarnings(res <- lapply(ids, get_gff_from_ids))
out <- suppressWarnings(data.frame(t(sapply(res,c))))
df <- apply(out,2,as.character)
outname <- glue("{outdir}/{prefix}.gff")
write.table(df, file = outname, quote = FALSE, sep = "\t",
col.names = TRUE, row.names = FALSE)
}
### Subset FASTA
if (opt$type == 'both') {
# Get aa FASTA
ids <- getAttributeField(as.character(out$attributes), "id", ";")
ids <- toupper(ids)
suppressWarnings(res <- lapply(ids, get_from_ids, x = "ProteinFasta"))
out <- suppressWarnings(data.frame(t(sapply(res,c))))
out_fasta <-
glue('>{out$ID} {out$Comment}\n{out$Sequence}')
outname <- glue("{outdir}/{prefix}_aa.fasta")
write(out_fasta, file = outname, sep = "\n")
# Get nt FASTA
suppressWarnings(res <- lapply(ids, get_from_ids, x = "NucleotideFasta"))
out <- suppressWarnings(data.frame(t(sapply(res,c))))
out_fasta <-
glue('>{out$ID} {out$Comment}\n{out$Sequence}')
outname <- glue("{outdir}/{prefix}_nt.fasta")
write(out_fasta, file = outname, sep = "\n")
} else if (opt$type == 'nt') {
# Get nt FASTA
ids <- getAttributeField(out$attributes, "id", ";")
ids <- toupper(ids)
suppressWarnings(res <- lapply(ids, get_from_ids, x = "NucleotideFasta"))
out <- suppressWarnings(data.frame(t(sapply(res,c))))
out_fasta <-
glue('>{out$ID} {out$Comment}\n{out$Sequence}')
outname <- glue("{outdir}/{prefix}_nt.fasta")
write(out_fasta, file = outname, sep = "\n")
} else if (opt$type == 'aa') {
# Get aa FASTA
ids <- getAttributeField(out$attributes, "id", ";")
ids <- toupper(ids)
suppressWarnings(res <- lapply(ids, get_from_ids, x = "ProteinFasta"))
out <- suppressWarnings(data.frame(t(sapply(res,c))))
out_fasta <-
glue('>{out$ID} {out$Comment}\n{out$Sequence}')
outname <- glue("{outdir}/{prefix}_aa.fasta")
write(out_fasta, file = outname, sep = "\n")
}
|
8abccccf4950947ad2a2bb29b4d843ebaf6a2e25
|
dc9aaff85df4cb5bc681454a96cd6bac3a236732
|
/R-Derivative.r
|
eb69e04434f9a45303500bccf2b35c4d1fe3e57d
|
[] |
no_license
|
17523185/R_Function
|
d3fb4b7cd4013540511533d799452e91ca34ae78
|
6a96394e41c9af07a47b0b8981de1cfd485ca638
|
refs/heads/master
| 2020-03-30T08:28:09.240988
| 2018-12-18T16:47:12
| 2018-12-18T16:47:12
| 151,017,641
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 216
|
r
|
R-Derivative.r
|
#Nomor 1
rule11 <- function(x){
return(1 / (2*sqrt(x)))
}
rule11(4)
#Nomor 2
library(Ryacas)
x <- Sym("x")
#2.1
Simplify(deriv(2*x^5, x))
#2.2
Simplify(deriv(x^2 + 4, x))
#2.3
Simplify(deriv(x^5 - 6*x^7, x))
|
9293b00885b1e93acf94c79834525f8c11453006
|
deb24e0574d7833c16ead96596de1451c76194bf
|
/MSD_scaled_g1_revised.R
|
83c874a2ca1f77901324349dba9eac4c02db2965
|
[] |
no_license
|
edwardcooper/lammps
|
e5b371b239e74fe05853d0b6a1d8ac0b764c703d
|
3c9f3f932a19243a042f14892943a0d8c2968a1e
|
refs/heads/master
| 2021-01-25T15:43:54.419938
| 2019-04-18T03:58:44
| 2019-04-18T03:58:44
| 100,535,812
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,781
|
r
|
MSD_scaled_g1_revised.R
|
# MSD calculation for scaled position coordinates xs, ix .
### First define a function to do the calculation for one temeprature.
MSD_scaled_g1_one_temp=function(path="~/Dropbox/lammps/PMMA_long/atom300",filename="atom.300_long2",polymer="PMMA_long"
,num_mol=64,molecule_atoms=602,molecule_monomers=40 ,monomer_atoms=15
,atom_type=1:10,atom_type_mass=c(1.0079,12.011,12.011,12.011,15.9999,15.9999,12.011,12.011,1.0079,12.011)
){
setwd(path)
# record the time
timeRecordB()
# load all library
library(data.table)
library(foreach)
library(dplyr)
library(matrixStats)
library(magrittr)
### Load the data with fread
atom.300.1_fread=fread(input=filename, sep=" ", stringsAsFactors = FALSE, fill=TRUE
#,col.names = c("atom-id","type","xs","ys","zs","ix","iy","iz")
,colClasses = c("numeric","numeric","numeric","numeric","numeric","numeric","numeric","numeric","character","character")
,na.strings = c("ITEM:","TIMESTEP","NUMBER","OF","ATOMS","BOX","BOUNDS", "pp","id","type","mol","xs","ys","zs","ix","iy","iz")
)
## first give the bounds of box on the x,y,z direction.
xhigh=atom.300.1_fread[6,V2]
xlow=atom.300.1_fread[6,V1]
yhigh=atom.300.1_fread[7,V2]
ylow=atom.300.1_fread[7,V1]
zhigh=atom.300.1_fread[8,V2]
zlow=atom.300.1_fread[8,V1]
paste("The box bounds are:\n","x:",xlow,xhigh,"\n","y:",ylow,yhigh,"\n","z:",zlow,zhigh,"\n")%>%message
timeRecordB(output_message = "Load in data fread")
# select the non-NA columns
atom.300.1_fread=atom.300.1_fread[,.(V1,V2,V3,V4,V5,V6,V7,V8)]
# clear the rows that have NA values, V6 contains the most NAs.
atom.300.1_fread=atom.300.1_fread[complete.cases(atom.300.1_fread[,V6])]
colnames(atom.300.1_fread)=c("atom.id","type","xs","ys","zs","ix","iy","iz")
########################################################################
# calculate total atom number and timestep.
tot_atom_num=atom.300.1_fread[,.(atom.id)]%>%max()
paste("The number of atoms:",tot_atom_num,sep="")%>%message()
timestep=dim(atom.300.1_fread)[1]/tot_atom_num
paste("The number of timestep:",timestep,sep="")%>%message()
########################################################################
# add time variable for easier data handling and safe guarding any unwanted ordering to change the ordering of time in the data.
timeRecordB()
atom.300.1_fread[,time_step:=seq(1,timestep,by=1)%>%rep(times=tot_atom_num)%>%sort()]
timeRecordB(output_message = "add time variable data.table")
# calculate the xu,yu,zu unscaled position
## first give the bounds of box on the x,y,z direction.
xlength=xhigh-xlow
ylength=yhigh-ylow
zlength=zhigh-zlow
atom.300.1_fread[,`:=`(xu=xs*xlength+xlow+ix*xlength,yu=ys*ylength+ylow+iy*ylength,zu=zs*zlength+zlow+iz*zlength)]
# select only part of the data to release some memory.
atom.300.1_fread=atom.300.1_fread[,.(atom.id,type,time_step,xu,yu,zu)]
########################################################################
# Define MSD function
MSD=function(data){
(data$xu-data$xu[1])^2+ (data$yu-data$yu[1])^2+ (data$zu-data$zu[1])^2
}
#define a function for replacement
decode = function(x, search, replace, default = NULL) {
# build a nested ifelse function by recursion
decode.fun <- function(search, replace, default = NULL)
if (length(search) == 0) {
function(x) if (is.null(default)) x else rep(default, length(x))
} else {
function(x) ifelse(x == search[1], replace[1],
decode.fun(tail(search, -1),
tail(replace, -1),
default)(x))
}
return(decode.fun(search, replace, default)(x))
}
# Define a function to assign mol number according to the molecule size
mol_id_gen=function(atom_id,molecule_atoms){
# the minimum number for mol id is 0.
# minus 1 from the atom.id to get the last atom of each molecule to be on the correct molecule.
return(floor((atom_id-1)/molecule_atoms))
}
##########
# define a function to generate correct monomer id depending on the polymer option in the function.
if(polymer %in% c("PMMA_big","PMMA_long")){
monomer_gen=function(atom_id,molecule_atoms,molecule_monomers,monomer_atoms,edge_atoms=c(0,1)){
# if it is on the either end of polymer, the monomer is defined as monomer 0.
# if the atom is not on the end, then first calculate the molecule number and multiply it by 40 since there is 40 monomers in each molecule.
# then add the the monomer number it has in this molecule. Need to deduct the first atom off the molecule, divided by the number of atoms in a monomer and get a integer
# then you have the monomer id.
monomer.id=ifelse(atom_id%%molecule_atoms %in% edge_atoms
,0
,floor(atom_id/molecule_atoms)*molecule_monomers+ceiling((atom_id%%molecule_atoms-1)/monomer_atoms) )
return(monomer.id)
}
}else if(polymer %in% c("PS","PS_20") ){
monomer_gen=function(atom_id,molecule_atoms,molecule_monomers,monomer_atoms,edge_atoms=c(17,642,643,644,0)){
monomer.id=ifelse((atom_id%%molecule_atoms )%in% edge_atoms
,0
,floor(atom_id/molecule_atoms)*molecule_monomers
+ceiling((atom_id%%molecule_atoms+ifelse(atom_id%%molecule_atoms>16,-1,0))/monomer_atoms)
)
return(monomer.id)
}
}
##########
#####################################################################
# add mol variable to original data frame
timeRecordB()
atom.300.1_fread[,mol:=mol_id_gen(atom_id = atom.id,molecule_atoms=molecule_atoms)]
timeRecordB(output_message = "add mol variable data.table")
#####################################################################
# add mass variable to original data frame
timeRecordB()
atom.300.1_fread[,mass:=decode(x = type, search = atom_type
, replace = atom_type_mass)]
timeRecordB(output_message = "add mass variable data.table")
gc()
#####################################################################
# add monomer id
timeRecordB()
atom.300.1_fread[,monomer.id:=monomer_gen(atom_id=atom.id,molecule_atoms=molecule_atoms,molecule_monomers=molecule_monomers,monomer_atoms=monomer_atoms)]
timeRecordB(output_message = "add monomer id variable data.table")
gc()
# mass of monomer
monomer_mass=atom.300.1_fread[time_step==1,]%>%.[monomer.id==1,.(mass)]%>%sum
MSD_g1_matrix=function(data,timestep,num_monomer,monomer_mass){
MSD_g1_empty_matrix=matrix(NA,ncol=timestep,nrow=num_monomer)
###############################################################
# calculate the center of mass for every timestep and every monomer
center_of_mass=data[,.(xu=sum(xu*mass)/monomer_mass,yu=sum(yu*mass)/monomer_mass,zu=sum(zu*mass)/monomer_mass),by=.(monomer.id,time_step)]
###############################################################
for(j in 1:num_monomer){
MSD_g1_empty_matrix[j,]=center_of_mass[monomer.id==j,]%>%MSD
}
return(MSD_g1_empty_matrix)
}
timeRecordB()
MSD.matrix=MSD_g1_matrix(atom.300.1_fread,timestep=timestep,num_monomer= num_mol*molecule_monomers,monomer_mass=monomer_mass)
timeRecordB(output_message = "MSD for center of mass")
gc()
# Calculate the averaged MSD over all molecules.
MSD.matrix%>%colMeans()%>%write.table(file="MSD.g1.colmean.1.txt", sep=",")
## Add NGP calculation here.
timeRecordB()
NGP.COM=(0.6)*colMeans(MSD.matrix^2)/(colMeans(MSD.matrix))^2-1
NGP.COM%>%write.table(file="NGP.g1.1.txt", sep=",")
timeRecordB(output_message = "NGP for center of mass of monomer")
return( timeRecordR(ignore=0.1)%>%filter(output_message!="None")%>%select(output_message,run_time) )
}
# example use # put the timeRecord function loading outside the function in case internet conenction is down during the calculation.
# source("https://raw.githubusercontent.com/edwardcooper/mlmodel_select/master/timeRecord_functions.R")
#
# MSD_scaled_g1_one_temp(path="~/Dropbox/lammps/PS_20/atom300",filename="atom.300_1"
# ,molecule_atoms=645,num_mol=40,molecule_monomers=40
# ,monomer_atoms=16,atom_type=1:6,atom_type_mass=c(12.011,1.0079,12.011,12.011,12.011,1.0079)
# ,polymer="PS_20"
# )
## echo the current calculation and percentage of entire calculation.
MSD_scaled_g1=function(Path="~/Dropbox/lammps/",polymer="PMMA_long",temperatures=seq(300,620,by=20)
,num_mol=64,molecule_atoms=602,molecule_monomers=40 ,monomer_atoms=15
,atom_type=1:10,atom_type_mass=c(1.0079,12.011,12.011,12.011,15.9999,15.9999,12.011,12.011,1.0079,12.011)
){
# load the timeRecord functions from my github account.
source("https://raw.githubusercontent.com/edwardcooper/mlmodel_select/master/timeRecord_functions.R")
library(magrittr)
# the loop to calculate the same thing in all temepratures defined above.
for (i in seq_along(temperatures)){
# echo beginning of calculation
paste("Begin calculation of temperature:",temperatures[i],sep="")%>%message
# set correct path for the data file
path=paste(Path,"/", polymer,"/atom",temperatures[i], sep='')
# find the correct file to read and calculate.
filename=paste("atom.",temperatures[i],"_1",sep="")
# calculation for MSD
MSD_scaled_g1_one_temp(path=path,filename =filename,num_mol=num_mol,molecule_atoms=molecule_atoms,molecule_monomers=molecule_monomers,monomer_atoms=monomer_atoms
,atom_type=atom_type,atom_type_mass=atom_type_mass
)
# echo end of calculation
paste("End calculation of temperature:",temperatures[i],sep="")%>%message
paste(i,"/",length(temperatures))%>%message
gc()
}
return( timeRecordR(ignore=0.1)%>%filter(output_message!="None")%>%select(output_message,run_time) )
}
# example use
# MSD_scaled_g1(Path="~/Dropbox/lammps/",polymer="PMMA_long",temperatures=seq(300,620,by=20)
# ,num_mol=64,molecule_atoms=602,molecule_monomers=40 ,monomer_atoms=15
# ,atom_type=1:10,atom_type_mass=c(1.0079,12.011,12.011,12.011,15.9999,15.9999,12.011,12.011,1.0079,12.011)
# )
|
edb7760299d52e3f1d768382a1aafbe2288f860f
|
6eebf6c5eb3700a0f7ff463aea4083511e8ca0e1
|
/Project_Prob2.R
|
b87b1e4901e47b4a9af06b99a0b88b034e446cea
|
[] |
no_license
|
jingtian808/Machine-Learning-in-Stock-Price-Prediction
|
70e53d6f60b7a4c5ce619bd3cdb11d885e0fe78e
|
c12ec799108435fafba5baf0a24340b3657aa817
|
refs/heads/master
| 2020-04-22T08:28:10.073728
| 2019-02-12T04:43:14
| 2019-02-12T04:43:14
| 170,243,280
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,244
|
r
|
Project_Prob2.R
|
library(randomForest)
library(ranger)
library(caret)
#---------------- Set location--------------------
setwd("C:/Users/xieli/Desktop/850 project")
# read our sector CVS
data_sectors <- read.csv('GICS.csv',header=TRUE,stringsAsFactors=FALSE)
sector_names_list <- unique(data_sectors$Sector)
#------ load all models for different sector-----------------
model_1 = readRDS("Communication Services.rds")
model_2 = readRDS("Consumer Discretionary.rds")
model_3 = readRDS("Consumer Staples.rds")
model_4 = readRDS("Energy.rds")
model_5 = readRDS("Financials.rds")
model_6 = readRDS("Health Care.rds")
model_7 = readRDS("Industrials.rds")
model_8 = readRDS("Information Technology.rds")
model_9 = readRDS("Materials.rds")
model_10 = readRDS("Real Estate.rds")
model_11 = readRDS("Utilities.rds")
model_other = readRDS("Other.rds")
# Communication_Services = readRDS("Communication Services.rds")
# Consumer_Discretionary = readRDS("Consumer Discretionary.rds")
# Consumer_Staples = readRDS("Consumer Staples.rds")
# Energy = readRDS("Energy.rds")
# Financials = readRDS("Financials.rds")
# Health_Care = readRDS("Health Care.rds")
# Industrials = readRDS("Industrials.rds")
# Information_Technology = readRDS("Information Technology.rds")
# Materials = readRDS("Materials")
# Real_Estate = readRDS("Real Estate.rds")
# Utilities = readRDS("Utilities.rds")
# Other = readRDS("Other.rds")
sector_name1 = data_sectors$Industry[data_sectors$Sector==sector_names_list[1]]
sector_name2 = data_sectors$Industry[data_sectors$Sector==sector_names_list[2]]
sector_name3 = data_sectors$Industry[data_sectors$Sector==sector_names_list[3]]
sector_name4 = data_sectors$Industry[data_sectors$Sector==sector_names_list[4]]
sector_name5 = data_sectors$Industry[data_sectors$Sector==sector_names_list[5]]
sector_name6 = data_sectors$Industry[data_sectors$Sector==sector_names_list[6]]
sector_name7 = data_sectors$Industry[data_sectors$Sector==sector_names_list[7]]
sector_name8 = data_sectors$Industry[data_sectors$Sector==sector_names_list[8]]
sector_name9 = data_sectors$Industry[data_sectors$Sector==sector_names_list[9]]
sector_name10 = data_sectors$Industry[data_sectors$Sector==sector_names_list[10]]
sector_name11 = data_sectors$Industry[data_sectors$Sector==sector_names_list[11]]
# ------------- Load 2018 data --------------
#data<-read.csv("testing_data.csv",header = TRUE)
#data<-read.csv("data_sec.csv",header = TRUE,stringsAsFactors=FALSE)
#
data<-read.csv('mf850-finalproject-data.csv',header = TRUE)
# data= data[4000:4500,]
# data<-read.csv('test_data.csv',header = TRUE)
# data= data[6000:6020,]
# i=4;
# sector_name = data_sectors$Industry[data_sectors$Sector==sector_names_list[i]]
# data<- data[data$Industry %in% sector_name,]
#------------------------------ Process data----------------------------------
unlist<-c("Date","compid","Industry","RETMONTH")
names_list <- which(!(names(data) %in% unlist))
r_month <- data$RETMONTH
r_month_spx <- data$retmonth_spx
industry_name <- data$Industry
my_data <- data[,names_list]
neutralize<-function(list){
return ((list - mean(list))/(max(list)-min(list)))
}
my_data<-apply(my_data,2,neutralize)
my_data[is.na(my_data)]<-0
length = dim(my_data)[1]
#----------------------make predictions------------------------------------------
my_predictions <- c()
for (i in (1:length)){
for (j in (1:11)){
if (industry_name[i] %in% get(paste("sector_name",j,sep = ''))){
# if this company's industry belongs to a sector, choose the corresponding model
my_model = get(paste("model",j,sep="_"))
break
}else if(j==11 &(!industry_name[i] %in% sector_name11)){
# if the industry does not belong to any existed sector, we are using a general model here
my_model = model_other
break
}
}
if (i != length){
res <- predict(my_model,data = my_data[i:(i+1),])
my_predictions = c(my_predictions,res$predictions[1])
}else {
res <- predict(my_model,data = my_data[(i-1):i,])
my_predictions = c(my_predictions,res$predictions[2])
}
}
length_pred = length(my_predictions)
# my_predictions = as.data.frame(my_predictions)
predictions_trend<-rep('s',length_pred)
for (i in 1:length_pred){
if (sign(my_predictions[i] )> 0){
predictions_trend[i] = "grow"
}else{
predictions_trend[i] = "fall"
}
}
#----------------------------------------------------------------
# predictions_trend contains strings "grow" & "fall"
#--------------------------------------------------------------------------------
#--------------------Test code-----------------------------------------
# r_month_trend<-rep('s',length_pred)
# for (i in 1:length_pred){
# if (sign(r_month[i]) > 0){
#
# r_month_trend[i] = "grow"
# }else{
#
# r_month_trend[i] = "fall"
# }
# }
# count = 0
# for (i in 1:length_pred){
# if (r_month_trend[i]== predictions_trend[i]){
#
# count = count +1
# }
# }
#
# correct_rate = count/ length_pred
|
a8cfa0b8e70b2846905269e54f581399b79a54ab
|
de310f0eda436964b08550e6f6de59ae186eec5c
|
/R/modify.R
|
1ce1bb7c4762afbb2a4cb44c5893e8b227f9fc80
|
[] |
no_license
|
jimsforks/dplyrExtras
|
582c9c1c3eed5aa857a5830fb1ba2d54b6a58dd3
|
8b05cf2a3957167484ec31add14fdb3b534b6995
|
refs/heads/master
| 2022-07-17T19:56:30.044058
| 2020-05-18T19:58:27
| 2020-05-18T19:58:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,851
|
r
|
modify.R
|
examples.modify = function(a=0,K=0,n=0,x=0,y=0) {
library(microbenchmark)
K = 3
n = 10
dt = data.table(a= sample(1:3,n,replace=TRUE),
b= sample(1:100,n,replace=TRUE),
x=rnorm(n))
df = as.data.frame(dt)
# Set x to 100 where a==2
modify(dt,a==2, y=x+100, z=y+K)
modify(df,a==2,y=200,z=y*5+K)
dt[,y:=x+2]
# Set x to the mean value of b*100 in each group of a
modify(dt,.by=c("a"), x=mean(b)*100)
dt
# Call with strings
com = "x=200"
s_modify(dt,"a==2", com)
dt
}
EmptySymbol = function() (quote(f(,)))[[2]]
get.data.table.modify.call = function(args=NULL, filter.call=NULL, by=NULL, dat.quote=quote(.data)) {
if (length(args)==1) {
com = call(":=",names(args)[1],args[[1]])
} else {
return(as.expression(lapply(seq_along(args),function(i) {
get.data.table.modify.call(args=args[i], filter.call = filter.call, by=by, dat.quote=dat.quote)
})))
#com = as.call(c(list(quote(`:=`)),args))
}
if (is.null(filter.call)) {
ca = call('[',dat.quote, EmptySymbol(),com )
} else {
ca = call('[',dat.quote, filter.call, com)
}
if (!is.null(by)) {
ca$by = by
}
ca
}
#' In place modification of data tables
#'
#' If dt is a data table, then modify is essentially just a wrapper for data.table syntax that allows modification or creation of new columns. If dt is not a data table, it will by default be converted to a data table and then transformed and returned as the original data frame. Unlike mutate from dplyr, one can use the .SD argument in function calls, which can quite useful sometimes.
#'
#' @param .dt a data.table
#' @param .if optional a boolean conditions that specifies the rows that shall be modifed
#' @param .by optional a vector of column names used for computations that are splitted by groups
#' @param ... formulas for columns that are modified or newly created
#' @param .envir optional an environment in which the expressions shall be evaluated if variables are not found in .dt
#' @param .inplace allows .dt inplace modification (TRUE if .dt is a data table)
#' @param .as.data.table shall result be a data.table (only true if .dt is a data.table)
#' @export
modify = function(.dt,.if,.by=NULL,..., .envir=parent.frame(), .inplace=is.data.table(.dt), .as.data.table=is.data.table(.dt)) {
args = eval(substitute(alist(...)))
if (missing(.if)) {
filter.call=NULL
} else {
filter.call=substitute(.if)
}
if (.inplace) {
.dt = substitute(.dt)
ca = get.data.table.modify.call(args=args, by=.by, filter.call=filter.call, dat.quote=.dt)
return(invisible(eval(ca,envir=.envir)))
} else {
env = new.env(parent=.envir)
old.class = class(.dt)
if (is.data.table(.dt)) {
assign(".dt", copy(.dt),env)
} else {
assign(".dt", as.data.table(.dt),env)
}
ca = get.data.table.modify.call(args=args, by=.by, filter.call=filter.call, dat.quote=quote(.dt))
eval(ca,envir=env)
ret = get(".dt",env)
if (.as.data.table) {
return(ret)
} else {
#try(return(as(ret, old.class)), silent=TRUE)
return(as.data.frame(ret))
}
}
}
#' Modified version of modify that uses string arguments
#' @param .dt the data.table / similar object that shall be modified
#' @param ... string version of arguments of modify (see example)
#' @param .envir environment in which arguments will be evaluated (relevant if variables outside .dt are used)
#' @export
s_modify = function(.dt, ..., .envir=parent.frame()) {
.dt = substitute(.dt)
data.str = paste0(deparse(.dt, width.cutoff=500), collapse="")
args = list(...)
args = unlist(args)
#restore.point("s_modify")
code = paste0("modify(",data.str,",", paste0(args, collapse=","), ", .envir=.envir)")
invisible(eval(parse(text=code,srcfile=NULL)))
}
|
f0e254d4e7abf5ebcab4ebb318d29c6128a43822
|
caf356fd6c1fda3935d492763639e4d80485b02f
|
/man/simulate.mpt.Rd
|
738cfa1328b1e12cfbaaa8911cdb044f3ba5cc8a
|
[] |
no_license
|
cran/mpt
|
2bddd9800c22c31c6364d583e5edb8b209b07439
|
700e976dff2ede16f170dfcfcf828c42830a7ccc
|
refs/heads/master
| 2022-05-11T11:46:00.202450
| 2022-03-23T06:50:02
| 2022-03-23T06:50:02
| 17,697,663
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,464
|
rd
|
simulate.mpt.Rd
|
\name{simulate.mpt}
\alias{simulate.mpt}
\title{Simulate Responses from MPT Models}
\description{
Simulates responses from the distribution corresponding to a fitted
\code{mpt} model object.
}
\usage{
\method{simulate}{mpt}(object, nsim, seed, pool = TRUE, \dots)
}
\arguments{
\item{object}{an object of class \code{mpt}, typically the result of a
call to \code{\link{mpt}}.}
\item{nsim, seed}{currently not used.}
\item{pool}{logical, if \code{TRUE} (default), pooled responses (summed
across respondents) are returned.}
\item{\dots}{further arguments passed to or from other methods.
None are used in this method.}
}
\details{
Responses are simulated by (repeatedly) applying \code{\link{rmultinom}}
with sizes taken from the original sample and probabilities computed from
the model object.
}
\value{
A named vector of (pooled) responses. Names identify the tree from which
responses were simulated.
}
\seealso{
\code{\link{mpt}}, \code{\link{rmultinom}}.
}
\examples{
data(retroact)
m <- mpt(mptspec(
c*r,
(1 - c)*u^2,
2*(1 - c)*u*(1 - u),
c*(1 - r) + (1 - c)*(1 - u)^2,
u,
1 - u
), retroact[retroact$lists == 1, ])
simulate(m)
## Parametric bootstrap of goodness-of-fit test
LR.stat <- replicate(200, deviance(mpt(m$spec, simulate(m))))
hist(LR.stat, border="white", freq=FALSE, breaks=20,
main="Parametric bootstrap")
curve(dchisq(x, df=1), add=TRUE)
abline(v=deviance(m), lty=2)
}
\keyword{models}
|
826f9b58fa745ccb4fb5edb48d4c8183a436072b
|
47d61e0421b5ee045f40f9b174d0cfb6819a7849
|
/R/hcrPlot.r
|
634fe64ee03e7239aedb8f9752e1bd23fe165064
|
[] |
no_license
|
gomezcatalina/bio.lobster
|
912844380f8c122f97190cb0eb2b00c93d32f56c
|
586e11a58319925a65d0639509a1e23603947c17
|
refs/heads/master
| 2021-01-09T06:50:18.206316
| 2017-01-20T16:27:30
| 2017-01-20T16:27:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,260
|
r
|
hcrPlot.r
|
#' @export
hcrPlot <- function(B,mF,USR,LRP,RR,yrs,ylims=NULL,xlims=NULL) {
if(is.null(ylims)) ylims = c(0, (max(mF,RR)*1.05))
if(is.null(xlims)) xlims = c(0, (max(B,USR)*1.05))
plot( B, mF, type="b", xlim=xlims, ylim=ylims, col="darkorange", cex=0.8, lwd=2, xlab="", ylab="", pch=20,yaxs='i',xaxs='i' )
title( xlab="Fishable biomass (t)" )
title( ylab="Fishing mortality" )
polygon(x=c(USR,max(xlims)*1.3,max(xlims)*1.3, USR),y=c(-0.1,-0.1,max(ylims)*1.3,max(ylims)*1.3),col='lightgreen',border=NA)
polygon(x=c(LRP,USR,USR, LRP),y=c(-0.1,-0.1,max(ylims)*1.3,max(ylims)*1.3),col='lightgoldenrod',border=NA)
polygon(x=c(-100,LRP,LRP, -100),y=c(-0.1,-0.1,max(ylims)*1.3,max(ylims)*1.3),col='darksalmon',border=NA)
lines( B, mF, col="darkblue", cex=0.8, lwd=2, xlab="", ylab="", pch=20 ,lty=1)
abline (h=RR, lty="solid", col="gray", lwd=2 )
abline (v=USR, lty="dotted")
abline (v=LRP, lty="dotted")
text( USR-0.01*USR, RR+RR*0.1, "USR" , srt=90, pos=3)
text( LRP-0.01*USR, RR+RR*0.1, "LRP" , srt=90, pos=3)
text( USR+USR*0.2, RR+RR*0.1, "RR", lwd=2 )
text( B, mF, labels=yrs, pos=3, cex= 0.8 )
}
|
92ac3f69cbdefe18b72f02e64b71986cd6c59365
|
a9dcc6d36e928267e6ac9b3d8de324afd7030a72
|
/WoodchesterPark/PhylogeneticTree/PlotMLTreeIncPoorAndRef_13-10-16.R
|
2dfb12f6be59b974b49718c83f5348c6643565df
|
[] |
no_license
|
xulijunji/GeneralTools
|
e5778d2da6e64264a26027a713e577d88391007e
|
758c769ba10cde1c02e74d5dec70d978d9b6675d
|
refs/heads/master
| 2021-08-23T20:14:33.238693
| 2017-12-06T10:47:49
| 2017-12-06T10:47:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,899
|
r
|
PlotMLTreeIncPoorAndRef_13-10-16.R
|
# Load the ape package
library(ape)
library(geiger) # For the tips function
library(plotrix)
# Set the path
path <- "C:/Users/Joseph Crisp/Desktop/UbuntuSharedFolder/Woodchester_CattleAndBadgers/NewAnalyses_13-07-17/"
###################################
# Get the Maximum Likelihood Tree #
###################################
# Read in the newick tree
file <- paste(path, "vcfFiles/",
"mlTree_29-09-2017.tree", sep="")
tree <- read.tree(file=file)
# Drop badger beside reference - WB129 - THIS WILL MESS UP NODES DEFINING CLADES
# tree <- drop.tip(tree, "WB129")
# Convert Branch lengths to SNPs
fastaLength <- 8893
tree$edge.length <- tree$edge.length * fastaLength
##################################
# Get the Isolate Coverage Table #
##################################
file <- paste(path,"vcfFiles/",
"IsolateVariantPositionCoverage_RESCUED_29-09-2017.txt", sep="")
table <- read.table(file, header=TRUE, stringsAsFactors=FALSE)
table$Isolate <- getIsolateIDFromFileNames(table$Isolate)
##############################
# Plot the Phylogenetic Tree #
##############################
file <- paste(path, "vcfFiles/", "mlTree_CladesAndLocations_02-10-17.pdf", sep="")
pdf(file, height=10, width=10)
# Set the margins
par(mfrow=c(1,1))
par(mar=c(0,0,0,0)) # Bottom, Left, Top, Right
plotType <- "fan" # "phylogram", "cladogram", "fan", "unrooted", "radial"
# Plot initial tree to find nodes defining clades
#pdf(paste(path, "vcfFiles/", "test.pdf", sep=""), height=40, width=40)
#
#plot.phylo(tree, "fan")
#nodelabels()
#
#dev.off()
# Define branch colours by clade
nodesDefiningClades <- c(521, 305, 332, 382) # use nodelabels() to show node numbers
cladeColours <- c("cyan", "pink", "green", "darkorchid4")
branchColours <- defineBranchColoursOfClades(tree, nodesDefiningClades,
cladeColours, "lightgrey")
# Get each isolate's quality
isolateQuality <- getIsolateQuality(table)
# Plot the phylogenetic tree
plot.phylo(tree, show.tip.label=FALSE, plotType,
edge.color=branchColours, edge.width=3,
show.node.label=TRUE)
# Add node labels
nodelabels(node=1:length(tree$tip.label),
cex=defineTipSizeBySequencingQuality(tree$tip.label, isolateQuality),
pch=defineTipShapesForSpecies(tree$tip.label, 24, 21),
bg=defineTipColourBySpecies(tree, "blue", "red", "lightgrey", nodesDefiningClades),
col="dimgrey")
# Add Legends
#text(x=140, y=-130, labels="Variant Position Coverage:", col="black", cex=1)
#addLegendForQuality("bottomright", 1)
legend("bottomleft", legend=c("Cow", "Badger"),
pch=c(17, 16), cex=1, col=c("blue", "red"),
text.col=c("blue", "red"), bty='n')
text(x=20, y=0, labels="AF2122/97")
# Add Scale bar
points(x=c(-20, 30), y=c(-130, -130), type="l", lwd=3)
text(x=5, y=-135, labels="50 SNPs", cex=1)
# Add Clade labels
text(x=92.5, y=-82, labels="0", col=cladeColours[1], cex=2)
text(x=90, y=80, labels="1", col=cladeColours[2], cex=2)
text(x=19.5, y=128.5, labels="2", col=cladeColours[3], cex=2)
text(x=-82, y=-95, labels="3", col=cladeColours[4], cex=2)
################################
# Get the sampling information #
################################
# Read in the badger sampling information
fileName <- paste(path, "IsolateData/", "BadgerInfo_08-04-15_LatLongs_XY_Centroids.csv",
sep="")
metadata <- read.table(fileName, header=TRUE, stringsAsFactors=FALSE, sep=",")
# Get the locations of each of the isolates
badgerIsolateLocations <- noteBadgerIsolateSamplingLocations(metadata)
# Cattle Isolates
file <- paste(path, "IsolateData/",
"CattleIsolateInfo_LatLongs_plusID_outbreakSize_Coverage_AddedStrainIDs.csv", sep="")
cattleInfo <- read.table(file, header=TRUE, sep=",", stringsAsFactors=FALSE)
# Get the locations of each of the isolates
cattleIsolateLocations <- noteCattleIsolateSamplingLocations(cattleInfo)
####################################################
# Plot the spatial locations of isolates in clades #
####################################################
# Create the clade colours - apply alpha
cladeColoursRGB <- getRGBsOfColours(cladeColours, alpha=0.75)
cex=2
# Note the isolates in each clade
isolatesInClades <- findIsolatesInClades(tree, nodesDefiningClades)
# Note the centre of the badger territories
badgerCentre <- c(381761.7, 200964.3)
expand <- 7000
# Create an empty plot
par(mar=c(0,0,0,0))
plot(x=NULL, y=NULL, yaxt="n", xaxt="n", bty="n", ylab="",
xlim=c(badgerCentre[1] - expand, badgerCentre[1] + expand),
ylim=c(badgerCentre[2] - expand, badgerCentre[2] + expand), asp=1,
xlab="")
# Plot a minimum convex polygon around the
# cattle and badger sampling locations for each cluster
for(i in 1:length(cladeColours)){
# Get the isolates associated with the current clade
isolates <- isolatesInClades[[as.character(i)]]
# Get the coordinates of each isolate
isolateCoordinates <- getXandYCoordinatesOfIsolates(isolates, cattleIsolateLocations,
badgerIsolateLocations)
# Remove NA rows - where couldn't find coordinates for isolates
isolateCoordinates <- isolateCoordinates[is.na(isolateCoordinates$X) == FALSE, ]
# Plot the points
points(isolateCoordinates,
pch=ifelse(isolateCoordinates$Species == "BADGER", 19, 17),
col=cladeColoursRGB[i], cex=cex)
# Add a convex hull around the points
addPolygon(isolateCoordinates$X, isolateCoordinates$Y, cladeColours[i])
}
# Add inner circle from BASTA deme assignment diagram
thresholdDistance <- 3500
draw.circle(x=badgerCentre[1], y=badgerCentre[2], radius=thresholdDistance,
border="black", lty=2)
text(x=badgerCentre[1], y=badgerCentre[2] - (thresholdDistance + 500),
labels=paste(round(thresholdDistance/1000, digits=2), "km radius"))
# Add legend
legend("bottomleft", legend=c("CATTLE", "BADGERS"),
pch=c(17, 16), col="black", pt.cex=cex,
text.col="black", bty='n')
# Add the cluster numbers
legend("bottomright", legend=addTextToArray("Cluster ", 0:3, ""),
text.col=cladeColours, bty="n", cex=2)
dev.off()
##########################################################
# Print file noting which isolates are in which clusters #
##########################################################
# Note the clades of isolates in clades
isolateClades <- noteCladesOfIsolates(tree, nodesDefiningClades)
# Print out table
file <- paste(path, "vcfFiles/", "clusters_02-10-17.csv", sep="")
write.table(isolateClades, file, quote=FALSE, sep=",", row.names=FALSE)
#############
# FUNCTIONS #
#############
noteCladesOfIsolates <- function(tree, nodesDefiningClades){
# Initialise two arrays to store the isolate IDs and clades
isolates <- c()
clades <- c()
# Examine each clade
for(i in 1:length(nodesDefiningClades)){
tipsInClade <- tips(tree, nodesDefiningClades[i])
for(tip in tipsInClade){
isolates[length(isolates) + 1] <- tip
clades[length(clades) + 1] <- i - 1
}
}
# Combine the arrays into table
output <- data.frame(ID=isolates, Cluster=clades, stringsAsFactors=FALSE)
return(output)
}
addTextToArray <- function(text, array, sep){
output <- c()
for(i in 1:length(array)){
output <- paste(text, array, sep=sep)
}
return(output)
}
getRGBsOfColours <- function(colours, alpha){
output <- c()
for(i in 1:length(colours)){
output[i] <- convertToRGB(colours[i], alpha)
}
return(output)
}
convertToRGB <- function(colour, alpha){
rgbInfo <- col2rgb(colour)
output <- rgb(rgbInfo["red", 1], rgbInfo["green", 1], rgbInfo["blue", 1], alpha=alpha*255,
maxColorValue=255)
return(output)
}
getXandYCoordinatesOfIsolates <- function(isolates, cattleIsolateLocations, badgerIsolateLocations){
# Initialise a dataframe to store the X and Y coordinates of each isolate
coords <- data.frame(X=rep(NA, length(isolates)), Y=rep(NA, length(isolates)),
Species=rep(NA, length(isolates)), stringsAsFactors=FALSE)
# Examine each isolate
for(row in 1:length(isolates)){
# Is the current isolate from a badger?
if(grepl(x=isolates[row], pattern="WB") == TRUE){
if(is.null(badgerIsolateLocations[[isolates[row]]]) == FALSE){
coords[row, c(1,2)] <- badgerIsolateLocations[[isolates[row]]]
coords[row, "Species"] <- "BADGER"
}
}else{
if(is.null(cattleIsolateLocations[[isolates[row]]]) == FALSE){
coords[row, c(1,2)] <- cattleIsolateLocations[[isolates[row]]]
coords[row, "Species"] <- "COW"
}
}
}
return(coords)
}
addPolygon <- function(xValues, yValues, borderColour){
hullPoints <- chull(xValues, yValues)
hullPoints <- c(hullPoints, hullPoints[1])
polygon(xValues[hullPoints], yValues[hullPoints], col = NA, border = borderColour)
}
noteCattleIsolateSamplingLocations <- function(cattleInfo){
isolates <- list()
for(row in 1:nrow(cattleInfo)){
coords <- c()
# Does centroid information exist for the current isolate?
if(is.na(cattleInfo[row, "Mapx"]) == FALSE){
coords[1] <- cattleInfo[row, "Mapx"]
coords[2] <- cattleInfo[row, "Mapy"]
}
# Store sampling coordinates if found
if(length(coords) > 0 && is.na(cattleInfo[row, "StrainId"]) == FALSE){
isolates[[cattleInfo[row, "StrainId"]]] <- coords
}
}
return(isolates)
}
noteBadgerIsolateSamplingLocations <- function(metadata){
isolates <- list()
for(row in 1:nrow(metadata)){
coords <- c()
# Does centroid information exist for the current isolate?
if(is.na(metadata[row, "GroupCentroidX"]) == FALSE){
coords[1] <- metadata[row, "GroupCentroidX"]
coords[2] <- metadata[row, "GroupCentroidY"]
# Does X and Y exist for sampled group?
}else if(is.na(metadata[row, "SampledGrpX"]) == FALSE){
coords[1] <- metadata[row, "SampledGrpX"]
coords[2] <- metadata[row, "SampledGrpY"]
}
# Store sampling coordinates if found
if(length(coords) > 0){
isolates[[metadata[row, "WB_id"]]] <- coords
}
}
return(isolates)
}
findIsolatesInClades <- function(tree, nodesDefiningClades){
isolatesInClades <- list()
for(i in 1:length(nodesDefiningClades)){
isolatesInClades[[as.character(i)]] <- tips(tree, nodesDefiningClades[i])
}
return(isolatesInClades)
}
getTipsInClades <- function(tree, nodesDefiningClades){
tipsInClades <- list()
for(node in nodesDefiningClades){
tipsInClades[[as.character(node)]] <- tips(tree, node)
}
return(tipsInClades)
}
defineTipColourBySpecies <- function(tree, cow, badger, defaultColour, nodesDefiningClades){
tipColours <- rep(defaultColour, length(tree$tip.label))
tipsInClades <- getTipsInClades(tree, nodesDefiningClades)
for(tipIndex in 1:length(tree$tip.label)){
for(nodeIndex in 1:length(nodesDefiningClades)){
if(tree$tip.label[tipIndex] %in% tipsInClades[[as.character(nodesDefiningClades[nodeIndex])]] == TRUE){
if(grepl(pattern="TB", x=tree$tip.label[tipIndex]) == TRUE){
tipColours[tipIndex] <- cow
}else if(grepl(pattern="WB", x=tree$tip.label[tipIndex]) == TRUE){
tipColours[tipIndex] <- badger
}else{
tipColours[tipIndex] <- defaultColour
}
break
}
}
}
return(tipColours)
}
addLegendForQuality <- function(position, cex){
sizes <- seq(0.6, 1, 0.05)
legend(position, legend=sizes, col="black", pch=24, bty='n',
pt.cex=sizes, cex=cex)
}
getIsolateQuality <- function(table){
isolateQuality <- list()
for(i in 1:nrow(table)){
isolateQuality[[table[i, "Isolate"]]] <- table[i, "Coverage"]
}
return(isolateQuality)
}
defineTipSizeBySequencingQuality <- function(tipLabels, isolateQuality){
tipQuality <- c()
for(i in 1:length(tipLabels)){
if(tipLabels[i] != "Ref-1997"){
tipQuality[i] <- isolateQuality[[tipLabels[i]]]
}else{
tipQuality[i] <- 1
}
}
return(tipQuality)
}
defineBranchColoursOfClades <- function(tree, nodesDefiningClades,
CladeColours, defaultColour){
branchColours <- rep(defaultColour, dim(tree$edge)[1])
for(i in 1:length(nodesDefiningClades)){
clade <- tips(tree, node=nodesDefiningClades[i])
branchesInClades <- which.edge(tree, clade)
branchColours[branchesInClades] <- cladeColours[i]
}
return(branchColours)
}
getUpperBoundsFromCuts <- function(cuts){
bounds <- c()
for(i in 1:length(cuts)){
bounds[i] <- as.numeric(strsplit(strsplit(cuts[i], ",")[[1]][2], "]")[[1]][1])
}
return(bounds)
}
addLegend <- function(position, colours, nBreaks, cex){
colourPalette <- colorRampPalette(colours)
cuts <- levels(cut(table$PercentageCoverage, breaks=nBreaks))
bounds <- getUpperBoundsFromCuts(cuts)
bounds <- round(bounds, 2)
legend(position, legend=bounds, col=colourPalette(nBreaks), pch=20, bty='n',
cex=cex)
}
getIsolateIDFromFileNames <- function(fileNames){
isolates <- c()
for(i in 1:length(fileNames)){
isolates[i] <- strsplit(fileNames[i], split="_")[[1]][1]
}
return(isolates)
}
assignIsolatesContinuousColoursByCoverage <- function(table, colours, nBreaks){
colourPalette <- colorRampPalette(colours)
coloursPerRow <- colourPalette(nBreaks)[
as.numeric(cut(table$PercentageCoverage, breaks=nBreaks))]
isolateColours <- list()
for(i in 1:nrow(table)){
isolateColours[[table[i, 1]]] <- coloursPerRow[i]
}
return(isolateColours)
}
returnTipColoursForIsolates <- function(tipLabels, assignedColours){
tipColours <- c()
for(i in 1:length(tipLabels)){
if(tipLabels[i] != "Ref-1997"){
tipColours[i] <- assignedColours[[tipLabels[i]]]
}else{
tipColours[i] <- "black"
}
}
return(tipColours)
}
defineTipShapesForSpecies <- function(tipLabels, cow, badger){
shapes <- c()
for(i in 1:length(tipLabels)){
if(grepl(pattern="TB", x=tipLabels[i]) == TRUE){
shapes[i] <- cow
}else if(grepl(pattern="WB", x=tipLabels[i]) == TRUE){
shapes[i] <- badger
}else{
shapes[i] <- cow
}
}
return(shapes)
}
|
810d28b02aa5d34efc56d9c5bd9cdcb487de78d2
|
34131c61655635da412ea7474ba22a172f17f6ff
|
/man/ov_video_player.Rd
|
69147cc130752059cc28fa51121fedcd38982a1e
|
[
"MIT"
] |
permissive
|
openvolley/ovideo
|
8cfbe226050de88dbec2c3e7016e419708f37ea6
|
c9380b9dcc4be3669dda086949aab421164d14ff
|
refs/heads/master
| 2023-05-11T23:44:39.775596
| 2023-05-02T22:17:54
| 2023-05-02T22:17:54
| 244,466,765
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,597
|
rd
|
ov_video_player.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/player.R
\name{ov_video_player}
\alias{ov_video_player}
\title{Video player tag element}
\usage{
ov_video_player(
id,
type,
controls = FALSE,
version = 1,
controller_var = paste0(id, "_controller"),
with_js = FALSE,
...
)
}
\arguments{
\item{id}{string: the id of the tag}
\item{type}{string: either "youtube", "twitch" (only with \code{version = 2}), or "local"}
\item{controls}{logical: if \code{TRUE}, add "previous", "next", "pause", "stop", and "fullscreen" buttons. If \code{controls} is an object of class \code{shiny.tag} (created by \code{\link[htmltools:builder]{htmltools::tags()}}) or \code{shiny.tag.list} (\code{\link[htmltools:tagList]{htmltools::tagList()}}) then those controls will added with this tag or tag list appended}
\item{version}{numeric: code version. Default = 1, sort-of-experimental = 2. Version 2 supports multiple players on a single page, as well as \code{type = "twitch"}}
\item{controller_var}{string: (for version 2 only) the js variable name to use for the controller object that controls this video player}
\item{with_js}{logical: if \code{TRUE}, also include the supporting javascript libraries. If \code{with_js = FALSE}, you must make a separate call to \code{\link[=ov_video_js]{ov_video_js()}} (e.g. in your Shiny ui.R function)}
\item{...}{: other attributes of the player element (passed to the player \code{tags$div} call for youtube/twitch or \code{tags$video} for local)}
}
\value{
HTML tags. The outermost element is a div with id \code{paste0(id, "_container")}, with the player and optionally buttons nested within it.
}
\description{
Video player tag element
}
\examples{
\dontrun{
library(shiny)
## hand-crafted playlist for this example
playlist <- data.frame(video_src = "NisDpPFPQwU",
start_time = c(589, 1036, 1163, 2731, 4594),
duration = 8,
type = "youtube")
shinyApp(
ui = fluidPage(
ov_video_js(youtube = TRUE, version = 2),
ov_video_player(id = "yt_player", type = "youtube",
version = 2, controller_var = "my_dv",
style = "height: 480px; background-color: black;",
controls = tags$button("Go",
onclick = ov_playlist_as_onclick(playlist, "yt_player",
controller_var = "my_dv")))
),
server = function(input, output) {},
)
}
}
|
8e2e3c858aa6c3957c34ada401b1f86a65fe0d9f
|
7f4976dc84dc1a97237a52f2ed4fb1fd42f1980d
|
/Analysis/testing_ideas_about_analysis.R
|
8ed14542515e82d5caeea09c8706f9ae0dbe73f1
|
[] |
no_license
|
punctuationmarks/IMPD-Data-Analysis
|
3b8d997b4a0bfeb322b6cfed6a7e9bef5f95cb34
|
2bc2f806fdd2e219f5920d9d3ee094671e5c1362
|
refs/heads/master
| 2021-07-03T23:25:24.470081
| 2020-12-28T00:26:06
| 2020-12-28T00:26:06
| 210,629,797
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,234
|
r
|
testing_ideas_about_analysis.R
|
# testing the idea of having unique individual UOF on citizens
library(tidyverse)
library(ggrepel) # for some ggplot visual improvements
testing_uof <- read_csv("../CleanData/UOF/cleanedUOF_withGeoLocation_andFormattedDate.csv")
View(testing_uof)
# colnames(testing_uof)
uof_ <- testing_uof %>%
group_by(
lon,
lat,
OBJECTID,
INCNUM,
YMD_TM,
OCCURRED_DT,
OCCURRED_TM,
UDTEXT24A,
UDTEXT24B,
UDTEXT24C,
UDTEXT24D,
DISPOSITION,
FULL_ADD,
UOF_FORCE_TYPE,
UOF_REASON ,
SERVICE_TYPE,
CIT_ARRESTED,
CITCHARGE_TYPE,
CIT_WEAPON_TYPE,
CIT_INJURED,
CIT_HOSPITAL,
OFF_INJURED,
OFF_HOSPITAL,
CITNUM,
CIT_RACE,
CIT_SEX,
CIT_AGE,
CIT_COND_TYPE,
OFF_NUM,
OFF_RACE,
OFF_SEX,
OFF_AGE,
OFF_YR_EMPLOY,
OFF_COND_TYPE,
OCCURRED_WEEK_DAY,
OCCURRED_QUARTER
) %>%
# group_by(INCNUM, FULL_ADD, UOF_FORCE_TYPE, UOF_REASON) %>%
distinct(INCNUM) %>%
ungroup()
View(uof_)
# if else statement of if the INCNUM is the same then combine the officer's numbers and the uof type
# or swtich statement? something to make sure we can combine all of the information, that way it won't be lost on the distint INCNUM
blah <- testing_uof %>%
mutate(UOF_FORCE_TYPE = ifelse(INCNUM == 5098, "BILLY OAT", UOF_FORCE_TYPE))
View(blah)
unique()
# just a thought process of using which statements
# blah$UOF_FORCE_TYPE[which(
# !distint(blah$INCNUM) == "Pennsylvania" &
# wrangledUOF.df$STREET_N == 2900 &
# wrangledUOF.df$STREET_G == "S"
# )] = "St"
officerOccurances <- testing_uof %>%
count(OFF_NUM)
View(officerOccurances)
citizenOccurances <- testing_uof %>%
count(CITNUM)
View(citizenOccurances)
testing <- testing_uof %>%
filter(CITNUM == 11890) %>%
select(UOF_FORCE_TYPE, UOF_REASON, OCCURRED_DT, CIT_SEX, CIT_AGE, CIT_COND_TYPE, OFF_COND_TYPE)
View(testing)
# THIS DATA MAKES LITTLE SENSE, HIGHEST UOF ON SINGLE CITIZEN IS 900+,
# AND SOME COPS HAVE ~500 UOF OVER THE LAST 4 YEARS
# THIS SHIT IS WILD, MIGHT NEED TO LOOK AT THIS, MIGHT LITERALLY BE A FEW BAD APPLES
count_of_cit_num <- testing_uof %>%
group_by(OCCURRED_YEAR, UOF_FORCE_TYPE, UOF_REASON, OFF_NUM) %>%
dplyr::count(CITNUM)
count_of_off_num <- testing_uof %>%
group_by(OCCURRED_YEAR, UOF_FORCE_TYPE, UOF_REASON, CITNUM) %>%
dplyr::count(OFF_NUM)
# View(count_of_cit_num)
# View(count_of_off_num)
View(counting_cit_off_nums)
# how to graph this? maybe look at how you did UCR data?
testingGraphOnIncNum <- count_of_off_num %>%
mutate(OFF_NUM == factor(OFF_NUM)) %>%
ggplot(aes(x = OFF_NUM, fill = OCCURRED_YEAR)) +
geom_bar()
testingGraphOnIncNum
testingOff_Num <- testing_uof %>%
filter(OFF_NUM == 1814)
View(testingOff_Num)
blah <- testing_uof %>%
group_by(CITNUM, OFF_NUM, UOF_REASON) %>%
summarise(UOF_FORCE_TYPE = paste0(UOF_FORCE_TYPE, collapse = " ")) %>%
ungroup()
View(blah)
graphing.1814 <- testingOff_Num %>%
ggplot(aes(x = CIT_RACE)) +
geom_bar()
graphing.1814
resistingArrest <- testing_uof %>%
filter(UOF_REASON == "Resisting Arrest") %>%
group_by(UOF_REASON, UOF_FORCE_TYPE, INCNUM) %>%
# mutate(UOF_FORCE_TYPE == ) %>%
distinct(INCNUM)
View(resistingArrest)
TESTING_ <- testing_uof %>%
filter(INCNUM == 5098)
View(TESTING_)
# RColorBrewer::display.brewer.all(colorblindFriendly = TRUE)
# RColorBrewer::display.brewer.all()
# palette_by_year <- RColorBrewer::brewer.pal(n = 8, name = "Paired")
View(grDevices::colors())
color_palette_by_years_2014_to_unreported <- c("indianred", "wheat", "cornflowerblue", "seagreen2", "chocolate2", "purple", "darkorange4")
groupingByNumberOfINC <-
testing_uof %>%
arrange(desc(OCCURRED_YEAR)) %>%
mutate(OCCURRED_YEAR = ifelse(OCCURRED_YEAR == 2000, "Unreported", OCCURRED_YEAR)) %>% # was used as a placeholder
replace_na(list(OCCURRED_YEAR = "Unreported")) %>%
group_by(OCCURRED_YEAR, UOF_FORCE_TYPE, UOF_REASON) %>%
mutate(OCCURRED_YEAR == as.factor(OCCURRED_YEAR)) %>%
count(INCNUM) %>%
rename("NUM.OF.OCCURANCES.INCNUM" = n) %>%
# filter(NUM.OF.OCCURANCES.INCNUM > 50) %>%
ggplot(aes(x = INCNUM, y = NUM.OF.OCCURANCES.INCNUM, color = OCCURRED_YEAR)) +
geom_point() +
labs(title = "Number of Occurances of specific Incident Numbers",
subtitle = "(meaning, how many times use of force was used in one incident)",
caption = paste0("Measuring all of the INCNUMs together,\nwe can assume the 'Unreported' years are \ngrouped together with their respective year.")) +
scale_color_manual(values=color_palette_by_years_2014_to_unreported) +
# changing the color fill of the OCCURRED_YEAR
# theme_bw() +
theme_classic() +
theme(legend.position="bottom") +
coord_flip()
groupingByNumberOfINC
groupingByCitizenNum <-
testing_uof %>%
arrange(desc(OCCURRED_YEAR)) %>%
mutate(OCCURRED_YEAR = ifelse(OCCURRED_YEAR == 2000, "Unreported", OCCURRED_YEAR)) %>% # was used as a placeholder
replace_na(list(OCCURRED_YEAR = "Unreported")) %>%
group_by(OCCURRED_YEAR, UOF_FORCE_TYPE, UOF_REASON) %>%
mutate(OCCURRED_YEAR == as.factor(OCCURRED_YEAR)) %>%
count(CITNUM) %>%
rename("NUM.OF.OCCURANCES.ON.CITNUM" = n) %>%
# filter(NUM.OF.OCCURANCES.ON.CITNUM > 50) %>%
ggplot(aes(x = CITNUM, y = NUM.OF.OCCURANCES.ON.CITNUM, color = OCCURRED_YEAR)) +
geom_point() +
labs(title = "Number of Occurances on Single Citizen Number",
subtitle = "(meaning, how many times use of force was used on one 'Citizen')",
caption = "Measuring all of the CITNUMs together, we can assume the 'Unreported' years are grouped together") +
scale_color_manual(values=color_palette_by_years_2014_to_unreported) +
# changing the color fill of the OCCURRED_YEAR
coord_flip()
groupingByCitizenNum
testing3524 <- testing_uof %>%
filter(OFF_NUM == 3524)
View(testing3524)
howManyOfficersInvolved <- testing_uof %>%
arrange(desc(OCCURRED_YEAR)) %>%
mutate(OCCURRED_YEAR = ifelse(OCCURRED_YEAR == 2000, "Unreported", OCCURRED_YEAR)) %>% # was used as a placeholder
mutate(OFF_NUM = ifelse(OFF_NUM == 9999, "Unreported", OFF_NUM)) %>% # was used as a placeholder
group_by(OCCURRED_YEAR) %>%
count(OFF_NUM) %>%
filter(n > 500)
ggplot(aes(x = OFF_NUM, y = n)) +
geom_point(aes(color = OCCURRED_YEAR)) +
labs(title = "Number of Occurances on Single Citizen Number",
subtitle = "(meaning, how many times use of force was used on one 'Citizen')") +
theme(axis.text.x = element_text(face="bold", color="#993333",
size=14, angle=45),
axis.text.y = element_text(face="bold", color="#993333",
size=14, angle=45))
howManyOfficersInvolved
View(howManyOfficersInvolved)
# this returns a very weird obsertaion
groupingByOfficerNumber <-
testing_uof %>%
arrange(desc(OCCURRED_YEAR)) %>%
mutate(OCCURRED_YEAR = ifelse(OCCURRED_YEAR == 2000, "Unreported", OCCURRED_YEAR)) %>% # was used as a placeholder
mutate(OFF_NUM = ifelse(OFF_NUM == 9999, "Unreported", OFF_NUM)) %>% # was used as a placeholder
replace_na(list(OCCURRED_YEAR = "Unreported")) %>%
replace_na(list(OFF_NUM = "Unreported")) %>%
group_by(OCCURRED_YEAR, UOF_FORCE_TYPE, UOF_REASON) %>%
# mutate(OCCURRED_YEAR == as.factor(OCCURRED_YEAR)) %>%
count(OFF_NUM) %>%
rename("NUM.OF.OCCURANCES.ON.OFF_NUM" = n) %>%
filter(NUM.OF.OCCURANCES.ON.OFF_NUM > 50) %>%
ggplot(aes(x = OFF_NUM, y = NUM.OF.OCCURANCES.ON.OFF_NUM, color = OCCURRED_YEAR)) +
geom_point() +
ggrepel::geom_label_repel(aes(label = paste0(NUM.OF.OCCURANCES.ON.OFF_NUM, " occurances in ", OCCURRED_YEAR, " by Officer ", OFF_NUM)), label.size = 0.2) +
coord_flip() +
labs(title = "Number of Occurances on Single Citizen Number",
subtitle = "(meaning, how many times use of force was used on one 'Citizen')") +
scale_color_manual(values = color_palette_by_years_2014_to_unreported) +
theme(axis.text.x = element_text(angle = 45),
axis.text.y = element_text(angle = 45))
groupingByOfficerNumber
whatIs1982 <- testing_uof %>%
filter(OFF_NUM == 1982)
View(whatIs1982)
|
658078ddbc06e8e10db59572dfdec1a24d71631e
|
f2eadee083a58efffd3928f7feffae3d76f99b99
|
/plot3.R
|
1ab3223b818c27a71b3665b225046ac86ba5e8bb
|
[] |
no_license
|
tianwenlan/4.Exploratory_Data_Analysis_Week1
|
fe9900342f21621871c1e9f31f410e38f0d8ece4
|
e4f12e7452f17354e54e92d7a6f30e118dd310b6
|
refs/heads/master
| 2020-12-26T23:44:40.939320
| 2020-02-02T01:10:19
| 2020-02-02T01:10:19
| 237,692,041
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,341
|
r
|
plot3.R
|
library(dplyr)
#step 0: downlaod files
zipUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
zipFile <- "household_power_consumption.zip"
if (!file.exists(zipFile)) {
download.file(zipUrl, zipFile, mode = "wb")
}
# unzip zip file containing data if data directory doesn't already exist
dataPath <- "household_power_consumption"
if (!file.exists(dataPath)) {
unzip(zipFile)
}
#read the file to data, subset the data, convert date and time
data <-read.table('household_power_consumption.txt', sep=';', header=T, na.strings = "?")
#data$Date <- as.Date(as.character(data$Date), "%d/%m/%Y")
#print(as.character(data$Date))
data <-subset(data, as.character(Date) == '1/2/2007' | as.character(Date) == '2/2/2007')
data$Time <- paste(data$Date, ' ', data$Time)
data$Time <- strptime(data$Time, '%d/%m/%Y %H:%M:%S')
#make the plot
png(filename="plot3.png", width = 480, height = 480)
colors = c('black', 'red', 'blue')
plot(data$Time, data$Sub_metering_1, xlab= '', ylab='Energy sub metering', type="l", col = colors[1])
points(data$Time, data$Sub_metering_2, xlab= '', type="l", col = colors[2])
points(data$Time, data$Sub_metering_3, xlab= '', type="l", col = colors[3])
legend('topright', lty =1, legend = c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'), col=colors)
dev.off()
|
28a921680a013453e90e0d41c469aeabe94783f4
|
ad89addeb61f00bdf8a18275055c99eb22cfc0b2
|
/graphs/citation.R
|
abe27683ecb4033df36085024fb4fe2d95b35529
|
[] |
no_license
|
techtronics/ESOF522
|
850d9f987713dc6f18aba013edf27360caeb4c59
|
7b7b03165c12f6142a9958d5e09b04df928457ac
|
refs/heads/master
| 2017-12-07T11:23:15.938041
| 2015-05-21T23:54:31
| 2015-05-21T23:54:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,678
|
r
|
citation.R
|
myDF <- data.frame(rank = c(1,1,1,1,1,2,2,2,2,2,3,3,3,3,3,1,1,1,1,1,2,2,2,2,2,3,3,3,3,3,1,1,1,1,1,2,2,2,2,2,3,3,3,3,3,1,1,1,1,1,2,2,2,2,2,3,3,3,3,3,1,1,1,1,1,2,2,2,2,2,3,3,3,3,3),
year = c(1,2,3,4,5,1,2,3,4,5,1,2,3,4,5,1,2,3,4,5,1,2,3,4,5,1,2,3,4,5,1,2,3,4,5,1,2,3,4,5,1,2,3,4,5,1,2,3,4,5,1,2,3,4,5,1,2,3,4,5,1,2,3,4,5,1,2,3,4,5,1,2,3,4,5),
title = c(1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5),
citations = c(494.2,297.2,155.5,48.8,27.6,95.5,106.3,30.1,18.4,9.5,75.2,24.7,26.5,16.4,7.8,57.3,43.3,50.9,21.5,17.6,15.4,16.4,19.8,15.9,5.8,10.5,12.1,7.8,7,4.5,31,29.3,39.7,15.6,15.5,12.1,14.2,9.1,6.8,5.6,14.5,9.8,7.2,5.8,3.7,42.8,61.6,67.3,38.3,15.7,4.7,45.9,27.3,6.3,4.7,15.2,9.8,9.5,6,3.5,67.4,75,46.9,25.6,19.4,12.8,10.2,12.8,6,5.7,9.6,7.7,8.2,5.5,4.3)
)
aov.out = aov(myDF$citations ~ myDF$rank + myDF$year + myDF$title + myDF$rank * myDF$year + myDF$rank * myDF$title + myDF$year * myDF$title + myDF$rank * myDF$year * myDF$title, data = myDF)
summary(aov.out)
#mediun_citations = c(49,170,54,27,17.5,95,27,16,12,6,37,22,15.5,16.5,5,22,12,18,5,9,9,9,10,6,3,5,6,4,4,3,11,17.5,28,7,9,10,6.5,4,4,4,5,5,4,3,3,35.5,24.5,11,16,7,4,8,12,4,2.5,10,6,5,5,3,17.5,20.5,21,12,11,5,6,7,3,3,5,5,5,3.5,2)
#average_citations = c(494.2,297.2,155.5,48.8,27.6,95.5,106.3,30.1,18.4,9.5,75.2,24.7,26.5,16.4,7.8,57.3,43.3,50.9,21.5,17.6,15.4,16.4,19.8,15.9,5.8,10.5,12.1,7.8,7,4.5,31,29.3,39.7,15.6,15.5,12.1,14.2,9.1,6.8,5.6,14.5,9.8,7.2,5.8,3.7,42.8,61.6,67.3,38.3,15.7,4.7,45.9,27.3,6.3,4.7,15.2,9.8,9.5,6,3.5,67.4,75,46.9,25.6,19.4,12.8,10.2,12.8,6,5.7,9.6,7.7,8.2,5.5,4.3)
340675
|
97f3df05c2edb441d7f937ce6d7a002d303d5caf
|
e42721b2bf31675a294e14b2d59111a1b83de58b
|
/man/GSE33335.Rd
|
2f5121f16123262092ecde9d86d5f91c0766ddce
|
[] |
no_license
|
szymczak-lab/DataPathwayGuidedRF
|
d60fdd2a07cf69359f147b7956a27b03d1b9dd02
|
8af8869378e6aae27b727f1417e51c30564f4a34
|
refs/heads/master
| 2020-12-11T06:42:48.292639
| 2020-01-27T09:16:50
| 2020-01-27T09:16:50
| 233,790,509
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 842
|
rd
|
GSE33335.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GSE33335.R
\docType{data}
\name{GSE33335}
\alias{GSE33335}
\title{GSE33335}
\format{A Summarized Experiment object with 17025 genes and 50 samples (25 cases and 25 controls).
The column outcome in the colData corresponds to the outcome that was used in the paper.}
\usage{
data(GSE33335)
}
\description{
This is a preprocessed hallmark data set with GLYCOLYSIS as target pathway.
A Exon 1.0 ST Array is utilized to analyze gastric cancer in gastric tissue. The study was performed in a paired design.
}
\references{
Cheng, L., Wang, P., Yang, S., Yang, Y., Zhang, Q., Zhang, W., Xiao, H., Gao, H., and Zhang, Q. (2012). Identification of genes with a correlation between copy number and expression in gastric cancer. BMC Med Genomics, 5, 14.
}
\keyword{datasets}
|
ae274cbba57598b559630d43223cad69d4fe2b5a
|
0a1172cc3ee12f60bf998d79bee902131d45e58e
|
/How to join a shapefile with a csv/join_csv_with_shapefile.R
|
ebd7ca126ce91be5a5f528aa5d25154af4d2d8ca
|
[] |
no_license
|
ft-interactive/R-tutorials
|
2ae76472e1d18cb61ff97fdb3f456c8c16da5775
|
5ccc42d67ba211e43649df0e25c1eb1271edf788
|
refs/heads/master
| 2021-01-21T14:40:50.954977
| 2017-07-10T17:01:35
| 2017-07-10T17:01:35
| 95,441,282
| 20
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,615
|
r
|
join_csv_with_shapefile.R
|
library(rgdal)
library(dplyr)
library(rgeos)
library(ggplot2)
#you neeed to make sure the merge/join column in your csv has the same name as the column in your shapefile.
# Looking at at the attribute table in QGis will give you this info.
#at present this works for .shp files only.
# your .shp (and accompanying files) should be in a folder.
#fileName should bein this format: folder/filename.shp
fileNameShapefile <- 'constit/constituencies.shp'
#csv file should be loose in the same folder as this script.
# Make sure your working directory is set correctly (cog symbol bottom-right-hand pane)
fileNameCsv <- 'ukip_candidate_seats.csv'
#the name, in quotes, of the join column.
#you neeed to make sure the merge/join column in your csv has the same name as the column in your shapefile.
# Looking at at the attribute table in QGis will give you this info.
#In the example the two columns are different (.shp colname first, .csv second)
#If yours are two keep the format, otherwise just add the same column name twice
joinCols=c('CODE','PCON15CD')
########################### you shouldn't need to fiddle below here
csv <- read.csv(fileNameCsv,stringsAsFactors = F) #read the csv
mapData <- readOGR(dsn = fileNameShapefile,
layer = fileNameShapefile %>%
gsub('.+\\/|\\..+','',.)
) %>% #gsub() just removes the shp extension from the fileName to give default layer name
merge(csv,by.x=joinCols[1],by.y=joinCols[2]) #merges the csv with the .shp
writeOGR(obj=mapData, dsn="newMap", layer="newMap", driver="ESRI Shapefile") #saves it all
|
72197b2394426150780498c60d4867b6b210cb7f
|
e621d01f3584196d7977f25ebe57af5549ca7600
|
/man/create_NMEA_files.Rd
|
1e477f54bf90e350f4395792e45f4dc765dbfd7f
|
[] |
no_license
|
Lewis-Barnett-NOAA/gapctd
|
0b739094971a7b29f5973faffc17ebc7ba465550
|
1868f5cc0e583a0ab68387da2f8f51c0200b528f
|
refs/heads/main
| 2023-04-16T23:41:49.621428
| 2021-04-23T23:14:25
| 2021-04-23T23:14:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,016
|
rd
|
create_NMEA_files.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_NMEA_files.R
\name{create_NMEA_files}
\alias{create_NMEA_files}
\title{Create NMEA files with LAT/LON for Derive TEOS-10}
\usage{
create_NMEA_files(rodbc_channel = NA, haul_csv = NA, vessel, region, year)
}
\arguments{
\item{rodbc_channel}{Required if haul_csv is not provided. Must provide an open RODBC channel (this parameter) or path to haul_csv. Default = NA.}
\item{haul_csv}{Required if rodbc_channel is not provided. Path to a csv file that contains VESSEL, CRUISE, HAUL, START_TIME, START_LONGITUDE, START_LATITUDE, END_LONGITUDE, END_LATITUDE.}
\item{vessel}{Required. Vessel number as a numeric vector.}
\item{region}{Required. Region as a character vector. Either "bs", "ai", or "goa".}
\item{year}{Required. Year as a numeric vector.}
}
\description{
Function to retrieve haul data from RACEBASE or a csv file and generate NMEA (.txt) files that match .cnv file names. Function dependencies: getPass, RODBC, oce
}
|
c15b44711411f398d65fb8edb315a226fe97db06
|
1c01ed7a5e79c5e281c0ede3406f702f79766882
|
/man/l.cap.Rd
|
a7c56991d902d0624870fe6a6e0b5447f0c4efff
|
[] |
no_license
|
christiantillich/AnaliTools
|
19e738e4084be1678ff7aeda45aa9f146de5ac1d
|
cab56ef7729f1d9692af5241ac5eca60060c3045
|
refs/heads/master
| 2020-04-06T05:12:48.950283
| 2019-02-25T22:09:03
| 2019-02-25T22:09:03
| 47,645,937
| 0
| 1
| null | 2019-02-25T22:09:04
| 2015-12-08T19:53:20
|
R
|
UTF-8
|
R
| false
| true
| 453
|
rd
|
l.cap.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.tools.R
\name{l.cap}
\alias{l.cap}
\title{l.cap}
\usage{
l.cap(v, lbnd = -Inf, ubnd = Inf)
}
\arguments{
\item{v}{- A list input.}
\item{lbnd}{- The lower bound. Defaults to -Inf.}
\item{ubnd}{- The upper bound. Defaults to Inf}
}
\value{
Returns the list, where values are replaced by the bound if they
exceed that limit.
}
\description{
An lapply wrapper for cap.
}
|
34d36b63ff59b34959ef217bdc3254131a3ef819
|
1061216c2c33c1ed4ffb33e6211565575957e48f
|
/r/tests/testthat/test_bot.R
|
3c037460377a909b8e662b4d3a56153e1ab87b7e
|
[] |
no_license
|
MSurfer20/test2
|
be9532f54839e8f58b60a8e4587348c2810ecdb9
|
13b35d72f33302fa532aea189e8f532272f1f799
|
refs/heads/main
| 2023-07-03T04:19:57.548080
| 2021-08-11T19:16:42
| 2021-08-11T19:16:42
| 393,920,506
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,859
|
r
|
test_bot.R
|
# Automatically generated by openapi-generator (https://openapi-generator.tech)
# Please update as you see appropriate
context("Test Bot")
model.instance <- Bot$new()
test_that("user_id", {
# tests for the property `user_id` (AnyType)
# uncomment below to test the property
#expect_equal(model.instance$`user_id`, "EXPECTED_RESULT")
})
test_that("full_name", {
# tests for the property `full_name` (AnyType)
# uncomment below to test the property
#expect_equal(model.instance$`full_name`, "EXPECTED_RESULT")
})
test_that("api_key", {
# tests for the property `api_key` (AnyType)
# uncomment below to test the property
#expect_equal(model.instance$`api_key`, "EXPECTED_RESULT")
})
test_that("default_sending_stream", {
# tests for the property `default_sending_stream` (AnyType)
# uncomment below to test the property
#expect_equal(model.instance$`default_sending_stream`, "EXPECTED_RESULT")
})
test_that("default_events_register_stream", {
# tests for the property `default_events_register_stream` (AnyType)
# uncomment below to test the property
#expect_equal(model.instance$`default_events_register_stream`, "EXPECTED_RESULT")
})
test_that("default_all_public_streams", {
# tests for the property `default_all_public_streams` (AnyType)
# uncomment below to test the property
#expect_equal(model.instance$`default_all_public_streams`, "EXPECTED_RESULT")
})
test_that("avatar_url", {
# tests for the property `avatar_url` (AnyType)
# uncomment below to test the property
#expect_equal(model.instance$`avatar_url`, "EXPECTED_RESULT")
})
test_that("owner_id", {
# tests for the property `owner_id` (AnyType)
# uncomment below to test the property
#expect_equal(model.instance$`owner_id`, "EXPECTED_RESULT")
})
test_that("services", {
# tests for the property `services` (AnyType)
# uncomment below to test the property
#expect_equal(model.instance$`services`, "EXPECTED_RESULT")
})
test_that("email", {
# tests for the property `email` (character)
# The email of the bot.
# uncomment below to test the property
#expect_equal(model.instance$`email`, "EXPECTED_RESULT")
})
test_that("bot_type", {
# tests for the property `bot_type` (integer)
# An integer describing the type of bot: * `1` for a `Generic` bot. * `2` for an `Incoming webhook` bot. * `3` for an `Outgoing webhook` bot. * `4` for an `Embedded` bot.
# uncomment below to test the property
#expect_equal(model.instance$`bot_type`, "EXPECTED_RESULT")
})
test_that("is_active", {
# tests for the property `is_active` (character)
# A boolean describing whether the user account has been deactivated.
# uncomment below to test the property
#expect_equal(model.instance$`is_active`, "EXPECTED_RESULT")
})
|
b1bd9cacca27441864e4ab789b3b3af85ae69142
|
3f476a051eb22af77130ee485d0cbac40ccde03c
|
/lynx.vitals.r
|
4d95e12542e4f0ffc6c9933fa89201c44f732195
|
[] |
no_license
|
ranalut/Scripts
|
a62d44f809c460319cdd1e2ad249f32aec9ce4b3
|
d863122f53e3a22d23c87a1105c6de46280e2ad0
|
refs/heads/master
| 2020-12-29T02:38:27.728016
| 2017-01-23T07:00:16
| 2017-01-23T07:00:16
| 8,935,875
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 735
|
r
|
lynx.vitals.r
|
# Vital rates taken from Carroll 2007
# Multiplier for demographic cycling
fecund <- c(1,0.8,0.2,0.25,0.25,0.25,0.25,0.4,0.6)
surv <- c(1,0.89,0.67,0.56,0.56,0.56,0.56,0.67,0.89)
mean.fecund <- mean(fecund)
mean.surv <- mean(surv)
# Max values
surv.max <- c(0.77,0.77,0.99,0.99,0.99,0.44)
fecund.max <- c(0,2.4,2.4,2.9,2.2,2.2)
# Survival Matrices
s.surv <- rep(NA,6)
n.surv <- rep(NA,6)
s.surv <- round(surv.max * mean.surv,2)
s.surv[1] <- round(s.surv[1] * mean.fecund,2)
sink('F:/PNWCCVA_Data2/HexSim/Workspaces/lynx_v1/Analysis/vitals.v1.txt')
for (i in 1:9)
{
n.surv <- round(surv.max * surv[i],2)
n.surv[1] <- round(n.surv[1] * fecund[i],2)
# Write
cat(s.surv,sep='\n')
cat(n.surv,sep='\n')
cat('\n')
}
sink()
|
f961727bb4c84ed9a621da30a57c7003357dcf8d
|
37ff9d489e5663fba507851294d1c5161b9482bc
|
/r-misc.R
|
285b5018a778b83a3b630b4c4ccb2339f826c11d
|
[] |
no_license
|
artyomovlab/seurat_docker
|
c820ed83c810175e217d16d0e05043e7a6b919f7
|
f2419a3180a6033391fca3b5f805d5109a777f4b
|
refs/heads/main
| 2023-05-07T23:31:17.001027
| 2021-05-21T17:29:42
| 2021-05-21T17:29:42
| 369,606,307
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 298
|
r
|
r-misc.R
|
source("https://bioconductor.org/biocLite.R")
install.packages(c("tidyverse","ggplot2","Hmisc","plotrix","png", "Matrix", "RJSONIO", "cowplot", "devtools","foreign","latticeExtra","car","rstatix", "ggpubr"),repo=paste0("https://mran.microsoft.com/snapshot/",format(Sys.Date(), format="%Y-%m-%d")))
|
5425003fd4a61d2e4d4338c5c1e333609a523f1f
|
01f8a1aadcd95197914d7fcf496667ee756a29be
|
/data preprocess.R
|
d8529d8fceeeafb77fbf302a0c50b1cff961895c
|
[] |
no_license
|
sahilsingh2110/Prediction-for-VC-investment-in-startups-Python-R
|
9996b55fd13f067245557d38686a9d26a488e62b
|
2321c431ee4be205a1771ee91fc0ee468302763f
|
refs/heads/master
| 2020-03-23T02:19:34.157334
| 2018-08-22T17:03:18
| 2018-08-22T17:03:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 925
|
r
|
data preprocess.R
|
dataset = read.csv('Data.csv')
dataset
#taking care of missing dataset
dataset$Age = ifelse(is.na(dataset$Age),ave(dataset$Age, FUN= function(x) mean(x,na.rm = TRUE)),dataset$Age)
dataset$Salary = ifelse(is.na(dataset$Salary),ave(dataset$Salary, FUN= function(x) mean(x,na.rm = TRUE)),dataset$Salary)
# Encoding categorical data
dataset$Country = factor(dataset$Country, levels = c("France","Spain", "Germany" ), labels = c(1,2,3) )
dataset$Purchased = factor(dataset$Purchased , levels = c("Yes","No"), labels = c(1,0) )
# Splitting the dataset into the Training set n Test set
#install.packages('caTools')
library(caTools)
set.seed(123)
split = sample.split(dataset$Purchased, SplitRatio = 0.8 )
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# Feature Scaling
training_set[, 2:3] = scale(training_set[, 2:3])
test_set[, 2:3] = scale(test_set[, 2:3])
|
46ed009f14273d2b50ea6c6dc654de39f677bf15
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/MixMatrix/man/rmatrixinvt.Rd
|
0dcdc039bbc07b38924ba02f62f9f4e3c07612d1
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,817
|
rd
|
rmatrixinvt.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/matrixt.R
\name{rmatrixinvt}
\alias{rmatrixinvt}
\alias{dmatrixinvt}
\title{Distribution functions for matrix variate inverted t distributions}
\usage{
rmatrixinvt(n, df, mean, L = diag(dim(as.matrix(mean))[1]),
R = diag(dim(as.matrix(mean))[2]), U = L \%*\% t(L), V = t(R) \%*\%
R, list = FALSE, array = NULL)
dmatrixinvt(x, df, mean = matrix(0, p, n), L = diag(p), R = diag(n),
U = L \%*\% t(L), V = t(R) \%*\% R, log = FALSE)
}
\arguments{
\item{n}{number of observations for generation}
\item{df}{degrees of freedom (\eqn{>0}, may be non-integer),
\code{df = 0, Inf} is allowed and will return a normal distribution.}
\item{mean}{\eqn{p \times q}{p * q} This is really a 'shift' rather than a
mean, though the expected value will be equal to this if
\eqn{df > 2}}
\item{L}{\eqn{p \times p}{p * p} matrix specifying relations among the rows.
By default, an identity matrix.}
\item{R}{\eqn{q \times q}{q * q} matrix specifying relations among the
columns. By default, an identity matrix.}
\item{U}{\eqn{LL^T} - \eqn{p \times p}{p * p} positive definite matrix for
rows, computed from \eqn{L} if not specified.}
\item{V}{\eqn{R^T R} - \eqn{q \times q}{q * q} positive definite matrix for
columns, computed from \eqn{R} if not specified.}
\item{list}{Defaults to \code{FALSE} . If this is \code{TRUE} , then the
output will be a list of matrices.}
\item{array}{If \eqn{n = 1} and this is not specified and \code{list} is
\code{FALSE} , the function will return a matrix containing the one
observation. If \eqn{n > 1} , should be the opposite of \code{list} .
If \code{list} is \code{TRUE} , this will be ignored.}
\item{x}{quantile for density}
\item{log}{logical; in \code{dmatrixt}, if \code{TRUE}, probabilities
\code{p} are given as \code{log(p)}.}
}
\value{
\code{rmatrixinvt} returns either a list of \eqn{n}
\eqn{p \times q}{p * q} matrices or
a \eqn{p \times q \times n}{p * q * n} array.
\code{dmatrixinvt} returns the density at \code{x}.
}
\description{
Generate random samples from the inverted matrix
variate t distribution or compute densities.
}
\examples{
# an example of drawing from the distribution and computing the density.
A<-rmatrixinvt(n = 2, df = 10, diag(4))
dmatrixinvt(A[,,1], df = 10, mean = diag(4))
}
\references{
Gupta, Arjun K, and Daya K Nagar. 1999. Matrix Variate Distributions.
Vol. 104. CRC Press. ISBN:978-1584880462
Dickey, James M. 1967. “Matricvariate Generalizations of the Multivariate t
Distribution and the Inverted Multivariate t
Distribution.” Ann. Math. Statist. 38 (2): 511–18. \doi{10.1214/aoms/1177698967}
}
\seealso{
\code{\link{rmatrixnorm}}, \code{\link{rmatrixt}},
and \code{\link[stats]{Distributions}}.
}
|
733906e313f753189982de7afbad7c8fa35f58bc
|
53528ae8feec89209d99660d4bc6cd0d7f4c79d9
|
/run_analysis.R
|
e7a33af625ed55d4c4b69e6f569ac9af6d13a726
|
[] |
no_license
|
fenom/GetData_PeerAssessment
|
5ee99e47f470de57295d7f593a0d09910e6d1e24
|
20959f0bad92d6efcb00c47c0b25e9af2c48502d
|
refs/heads/master
| 2016-08-11T06:19:37.909032
| 2015-12-26T04:20:54
| 2015-12-26T04:20:54
| 48,567,984
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,174
|
r
|
run_analysis.R
|
features <- read.table("UCI HAR Dataset/features.txt")
labels <- read.table("UCI HAR Dataset/activity_labels.txt")
train <- read.table("UCI HAR Dataset/train/X_train.txt")
train.labels <- read.table("UCI HAR Dataset/train/y_train.txt")
train.subjects <- read.table("UCI HAR Dataset/train/subject_train.txt")
names(train) <- features$V2
train.activities <- merge(train.labels, labels, sort = FALSE)
train <- cbind(subject = train.subjects$V1, activity = train.activities$V2, train)
test <- read.table("UCI HAR Dataset/test/X_test.txt")
test.labels <- read.table("UCI HAR Dataset/test/y_test.txt")
test.subjects <- read.table("UCI HAR Dataset/test/subject_test.txt")
names(test) <- features$V2
test.activities <- merge(test.labels, labels, sort = FALSE)
test <- cbind(subject = test.subjects$V1, activity = test.activities$V2, test)
complete <- rbind(train, test)
mean <- aggregate(complete[, 3:length(complete)], list(subject = complete$subject,activity = complete$activity), mean)
subset.mean <- mean[, c(1, 2, sort(union(grep('mean()', features$V2, fixed = TRUE), grep('std()', features$V2, fixed = TRUE))) + 2)]
write.table(subset.mean, "table.txt", row.names = FALSE)
|
bcd96798cdb33eb7dedc238681a34049a9ba3082
|
086b3d93a0d22a0beadea74150404a7919a28e66
|
/QE_Functions/n_uptake_root_biomass/M_constraint_root_ocn.R
|
ce24da3838859ed0a03fe6ba4c441565c6f77ec2
|
[] |
no_license
|
mingkaijiang/QEframework
|
443a9b0c662f44843c8395f0090be8c78363565d
|
16a3860877bf8c4815b4ad0bce6e460ab3aec36a
|
refs/heads/master
| 2021-06-24T05:13:25.654618
| 2019-04-26T04:03:36
| 2019-04-26T04:03:36
| 118,394,762
| 4
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,371
|
r
|
M_constraint_root_ocn.R
|
### Function for nutrient N constraint in medium term ie passive, leaching, wood considered
# specifically for uptake as a function of root - O-CN approach
# i.e. N uptake as a saturating function of mineral N
M_constraint_root_ocn <- function(df, a, C_pass, C_slow, Nin_L) {
# passive pool burial
s_coef <- soil_coef(df, a)
omega_ap <- a$af*s_coef$omega_af_pass + a$ar*s_coef$omega_ar_pass + a$aw*s_coef$omega_aw_pass
omega_as <- a$af*s_coef$omega_af_slow + a$ar*s_coef$omega_ar_slow + a$aw*s_coef$omega_aw_slow
# N comes from decomposition of pass and slow pools
Npass <- (1-s_coef$qq_pass) * s_coef$decomp_pass * C_pass * ncp
Nslow <- (1-s_coef$qq_slow) * s_coef$decomp_slow * C_slow * ncs
# equation for N constraint with passive, wood, and leaching
U0 <- Nin_L + Npass + Nslow
nwood <- a$aw*a$nw
nburial <- omega_ap*ncp + omega_as*ncs
# n leach
nleach <- leachn * k * (a$af * a$nf + a$aw * a$nw + a$ar *
a$nr) / (vmax * (a$ar/sr) - (a$af * a$nf +
a$aw * a$nw + a$ar * a$nr))
# in g C m-2 yr-1
NPP_NC <- (U0 - nleach) / (nwood + nburial)
# returned in kg C m-2 yr-1
NPP <- NPP_NC*10^-3
out <- data.frame(NPP, nwood, nburial, nleach)
return(out)
}
|
1ecc029cc37620844c56b3fa0d136b41294c3c2f
|
825074ec285e262936790f313648a2844876b179
|
/data-raw/ML-Desafio.R
|
f26c722328422ee4cf29efe35d130bd51acbd63d
|
[] |
no_license
|
ViniciusJacobs/CreditoXGBoost
|
0b341361fcdac067302953af9b720cb473f6e165
|
207ae71f1bba17e413cbc3ee47c2894866a47e7a
|
refs/heads/master
| 2023-07-17T02:55:29.436825
| 2021-08-31T23:45:43
| 2021-08-31T23:45:43
| 305,163,302
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,139
|
r
|
ML-Desafio.R
|
library(tidyverse)
library(tidymodels)
library(ggplot2)
library(skimr)
library(stats)
library(doParallel)
cores = 5
base_adult <- read_rds("data/adult.rds")
glimpse(base_adult)
base_adult$workclass <- as.factor(base_adult$workclass)
base_adult$education <- as.factor(base_adult$education)
base_adult$marital_status <- as.factor(base_adult$marital_status)
base_adult$occupation <- as.factor(base_adult$occupation)
base_adult$relationship <- as.factor(base_adult$relationship)
base_adult$race <- as.factor(base_adult$race)
base_adult$sex <- as.factor(base_adult$sex)
base_adult <- base_adult %>%
tidyr::replace_na(replace = list(native_country = "United-States")) %>%
tidyr::replace_na(replace = list(workclass = "Private")) %>%
tidyr::replace_na(replace = list(occupation = "Other-service"))
skimr::skim(base_adult)
questionr::freq.na(base_adult)
#ML
set.seed(55)
base_adult_split <- initial_split(base_adult %>% select(-id), prop = 0.50)
base_adult_train <- training(base_adult_split)
base_adult_test <- testing(base_adult_split)
base_adult_recipe <- recipe(resposta ~ ., base_adult_train) %>%
step_center(all_numeric()) %>%
step_scale(all_numeric()) %>%
step_dummy(all_nominal(), -all_outcomes())
base_adult_resamples <- vfold_cv(base_adult_train, v = 5)
base_adult_model <- boost_tree(
mtry = 0.8,
trees = tune(),
min_n = 5,
tree_depth = 4,
loss_reduction = 0,
learn_rate = tune(),
sample_size = 0.8
) %>%
set_mode("classification") %>%
set_engine("xgboost", nthread = cores)
base_adult_model
base_adult_wf <- workflow() %>%
add_model(base_adult_model) %>%
add_recipe(base_adult_recipe)
base_adult_grid <- expand.grid(
learn_rate = c(0.05, 0.1, 0.2, 0.3),
trees = c(100, 250, 500, 1000, 1500, 2000)
)
base_adult_grid
base_adult_grid <- base_adult_wf %>%
tune_grid(
resamples = base_adult_resamples,
grid = base_adult_grid,
control = control_grid(save_pred = TRUE, verbose = FALSE, allow_par = TRUE),
metrics = metric_set(roc_auc)
)
autoplot(base_adult_grid)
base_adult_grid %>% show_best(metric = "roc_auc", n = 5)
base_adult_select_best_passo1 <- base_adult_grid %>% select_best(metric = "roc_auc")
base_adult_select_best_passo1
base_adult_model <- boost_tree(
mtry = 0.8,
trees = base_adult_select_best_passo1$trees,
min_n = tune(),
tree_depth = tune(),
loss_reduction = 0,
learn_rate = base_adult_select_best_passo1$learn_rate,
sample_size = 0.8
) %>%
set_mode("classification") %>%
set_engine("xgboost", nthread = cores)
#### Workflow
base_adult_wf <- workflow() %>%
add_model(base_adult_model) %>%
add_recipe(base_adult_recipe)
#### Grid
base_adult_grid <- expand.grid(
tree_depth = c(3, 4, 6, 8, 10),
min_n = c(5, 15, 30, 60, 90)
)
base_adult_grid <- base_adult_wf %>%
tune_grid(
resamples = base_adult_resamples,
grid = base_adult_grid,
control = control_grid(save_pred = TRUE, verbose = FALSE, allow_par = TRUE),
metrics = metric_set(roc_auc)
)
#### Melhores hiperparâmetros
autoplot(base_adult_grid)
base_adult_grid %>% show_best(metric = "roc_auc", n = 5)
base_adult_select_best_passo2 <- base_adult_grid %>% select_best(metric = "roc_auc")
base_adult_select_best_passo2
base_adult_model <- boost_tree(
mtry = 0.8,
trees = base_adult_select_best_passo1$trees,
min_n = base_adult_select_best_passo2$min_n,
tree_depth = base_adult_select_best_passo2$tree_depth,
loss_reduction = tune(),
learn_rate = base_adult_select_best_passo1$learn_rate,
sample_size = 0.8
) %>%
set_mode("classification") %>%
set_engine("xgboost", nthread = cores)
#### Workflow
base_adult_wf <- workflow() %>%
add_model(base_adult_model) %>%
add_recipe(base_adult_recipe)
#### Grid
base_adult_grid <- expand.grid(
loss_reduction = c(0, 0.05, 0.1, 0.15, 0.25, 0.35, 0.45, 0.5, 1, 2)
)
base_adult_grid <- base_adult_wf %>%
tune_grid(
resamples = base_adult_resamples,
grid = base_adult_grid,
control = control_grid(save_pred = TRUE, verbose = FALSE, allow_par = TRUE),
metrics = metric_set(roc_auc)
)
#### Melhores hiperparâmetros
autoplot(base_adult_grid)
base_adult_grid %>% show_best(metric = "roc_auc", n = 5)
base_adult_select_best_passo3 <- base_adult_grid %>% select_best(metric = "roc_auc")
base_adult_select_best_passo3
base_adult_model <- boost_tree(
mtry = tune(),
trees = base_adult_select_best_passo1$trees,
min_n = base_adult_select_best_passo2$min_n,
tree_depth = base_adult_select_best_passo2$tree_depth,
loss_reduction = base_adult_select_best_passo3$loss_reduction,
learn_rate = base_adult_select_best_passo1$learn_rate,
sample_size = tune()
) %>%
set_mode("classification") %>%
set_engine("xgboost", nthread = cores)
#### Workflow
base_adult_wf <- workflow() %>%
add_model(base_adult_model) %>%
add_recipe(base_adult_recipe)
#### Grid
base_adult_grid <- expand.grid(
sample_size = seq(0.5, 1.0, length.out = 10),
mtry = seq(0.1, 1.0, length.out = 10)
)
base_adult_grid <- base_adult_wf %>%
tune_grid(
resamples = base_adult_resamples,
grid = base_adult_grid,
control = control_grid(save_pred = TRUE, verbose = FALSE, allow_par = TRUE),
metrics = metric_set(roc_auc)
)
#### Melhores hiperparâmetros
autoplot(base_adult_grid)
base_adult_grid %>% show_best(metric = "roc_auc", n = 5)
base_adult_select_best_passo4 <- base_adult_grid %>% select_best(metric = "roc_auc")
base_adult_select_best_passo4
base_adult_model <- boost_tree(
mtry = base_adult_select_best_passo4$mtry,
trees = tune(),
min_n = base_adult_select_best_passo2$min_n,
tree_depth = base_adult_select_best_passo2$tree_depth,
loss_reduction = base_adult_select_best_passo3$loss_reduction,
learn_rate = tune(),
sample_size = base_adult_select_best_passo4$sample_size
) %>%
set_mode("classification") %>%
set_engine("xgboost", nthread = cores)
#### Workflow
base_adult_wf <- workflow() %>%
add_model(base_adult_model) %>%
add_recipe(base_adult_recipe)
#### Grid
base_adult_grid <- expand.grid(
learn_rate = c(0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.2, 0.3),
trees = c(100, 250, 500, 1000, 1500, 2000, 3000)
)
base_adult_grid <- base_adult_wf %>%
tune_grid(
resamples = base_adult_resamples,
grid = base_adult_grid,
control = control_grid(save_pred = TRUE, verbose = FALSE, allow_par = TRUE),
metrics = metric_set(roc_auc)
)
#### Melhores hiperparâmetros
autoplot(base_adult_grid)
base_adult_grid %>% show_best(metric = "roc_auc", n = 5)
base_adult_select_best_passo5 <- base_adult_grid %>% select_best(metric = "roc_auc")
base_adult_select_best_passo5
#desempenho final
base_adult_model <- boost_tree(
mtry = 0.3,
trees = 3000,
min_n = 5,
tree_depth = 4,
loss_reduction = 0.05,
learn_rate = 0.01,
sample_size = 0.9444444
) %>%
set_mode("classification") %>%
set_engine("xgboost", nthread = cores)
#### Workflow
base_adult_wf <- workflow() %>%
add_model(base_adult_model) %>%
add_recipe(base_adult_recipe)
base_adult_last_fit <- base_adult_wf %>%
last_fit(
split = base_adult_split,
control = control_grid(save_pred = TRUE, verbose = FALSE, allow_par = TRUE),
metrics = metric_set(roc_auc, f_meas)
)
#### Métricas
collect_metrics(base_adult_last_fit)
#### Variáveis Importantes
base_adult_last_fit %>%
pluck(".workflow", 1) %>%
pull_workflow_fit() %>%
vip::vip(num_features = 20)
#### Curva ROC
base_adult_last_fit %>%
collect_predictions() %>%
roc_curve(resposta, ".pred_>50K") %>%
autoplot()
adult_test_preds <- collect_predictions(base_adult_last_fit)
adult_test_preds %>%
mutate(
resposta_class = factor(if_else(`.pred_<=50K` > 0.6, "<=50K", ">50K"))
) %>%
conf_mat(resposta, resposta_class)
adult_modelo_final <- base_adult_wf %>% fit(base_adult)
adult_val <- read_rds("data/adult_val.rds")
adult_val_sumbissao <- adult_val %>%
mutate(
more_than_50k = predict(adult_modelo_final, new_data = adult_val, type = "prob")$`.pred_>50K`
) %>%
select(id, more_than_50k)
questionr::freq.na(adult_val_sumbissao)
write_csv(adult_val_sumbissao, "data/adult_val_sumbissao.csv")
|
5785b9b277b7e5f37359734e1861df17698d3efb
|
d56f0073431b98da11187c472b60e5951fb727ed
|
/R/Meta_Permutation.R
|
435b012ecf6d0134c050207c0f8f51ab5e8f433e
|
[] |
no_license
|
cran/MetaSKAT
|
b5e68aa2f115449d915552cfb7eef8965fc5d9a5
|
43921edb6bbbfa27bb59d34669653bfc5ccfae43
|
refs/heads/master
| 2022-08-05T01:59:38.838570
| 2022-07-21T08:20:02
| 2022-07-21T08:20:02
| 17,680,913
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,717
|
r
|
Meta_Permutation.R
|
ReadPermu_Header<-function(File.MPermu){
con = file(File.MPermu, "rb")
header<-readBin(con, integer(), n = 10, size = 8)
# n.permu, n.all, n, nSets, nSNPs, nSNPs.unique
re = list(con=con, n.permu=header[2], n.all=header[3], n=header[4], nSets=header[5], nSNPs.unique=header[6])
if(header[1] != 1){
close(con)
stop("Verion information in File.MPermu is not correct!")
}
return(re)
}
# one element of EachInfo.Permu
GetPermu_Score<-function(con, nSNP, nPermu, StartPosPermu){
#StartPosPermu = 8 * 10
seek(con, where = StartPosPermu, origin = "start")
out = readBin(con, double(), n = nSNP * (nPermu +1) , size = 8)
out.m = matrix(out, byrow=TRUE, nrow=nSNP)
return(out.m)
}
Open_MPermu_File_2Read<-function(File.MPermu.vec, File.MInfo.vec){
n.cohort<-length(File.MInfo.vec)
if(length(File.MInfo.vec) != length(File.MPermu.vec)){
stop("Different numbers of Meta Info and Permu files!")
}
message("Number of cohorts = ", n.cohort, "\n")
# Check the existence of files
for(i in 1:n.cohort){
File.MPermu.vec[i]<-normalizePath(File.MPermu.vec[i] ,mustWork =FALSE)
File.MInfo.vec[i]<-normalizePath(File.MInfo.vec[i] ,mustWork =FALSE)
Check_File_Exists(File.MPermu.vec[i])
Check_File_Exists(File.MInfo.vec[i])
}
# Read files
re<-list()
re.Permu<-list()
re.SetInfo<-list()
for(i in 1:n.cohort){
file.idx<-i-1
File.MPermu<-File.MPermu.vec[i]
File.MetaInfo<-File.MInfo.vec[i]
data.info<-Read_Info_File(File.MetaInfo)
re.Permu[[i]]<-ReadPermu_Header(File.MPermu)
re[[i]]<-data.info
}
# Get unique sets
Set_unique<-NULL
for(i in 1:n.cohort){
Set_unique<-union(Set_unique, re[[i]]$set_unique)
}
info<-list(n.cohort=n.cohort, Set_unique = Set_unique, EachInfo=re, EachInfo.Permu=re.Permu)
return(info)
}
GetPermu_obj<-function(Permu.Info, SetID){
n.cohort = Permu.Info$n.cohort
IsExistSNV<-rep(0, n.cohort)
Info.list<-list()
Permu.list<-list()
Score.list<-list()
N.Permu<-rep(0, n.cohort)
for(i in 1:n.cohort){
idx<-Permu.Info$EachInfo[[i]]$hash_set[[SetID]]
if(is.null(idx)){
IsExistSNV[i]<-0
} else {
IsExistSNV[i]<-1
nSNP = length(idx)
N.Permu[i] = Permu.Info$EachInfo.Permu[[i]]$n.permu
Info.list[[i]]<-Permu.Info$EachInfo[[i]]$Info[idx,]
StartPosPermu = Info.list[[i]]$StartPOSPermu[1]
out.m=GetPermu_Score(Permu.Info$EachInfo.Permu[[i]]$con, nSNP , N.Permu[i], StartPosPermu)
#out.m1<<-out.m
#score1<<-Info.list[[i]]$Score
Permu.list[[i]] = out.m[,-1]
Score.list[[i]] = out.m[,1]
}
}
#Info.list1<<-Info.list
obj.oneset = Get_META_Data_OneSet_Align(SMat.list=NULL, Info.list=Info.list, IsExistSNV=IsExistSNV, n.cohort=n.cohort, Is.SMat=FALSE)
n.all = obj.oneset$n.all
Permu.list.new<-list()
Score.list.new<-list()
for(i in 1:n.cohort){
n1 = N.Permu[i]
Permu.list.new[[i]]<-matrix(rep(0, n1*n.all), ncol=n1)
Score.list.new[[i]]<-rep(0, n.all)
if(IsExistSNV[i] == 1){
IDX<-obj.oneset$IDX.list[[i]]
IDX1<-obj.oneset$IDX1.list[[i]]
Permu.list.new[[i]][IDX,]<-Permu.list[[i]][IDX1,]
Permu.list.new[[i]]<-Permu.list.new[[i]]* obj.oneset$Sign.list[[i]]
Score.list.new[[i]][IDX]<-Score.list[[i]][IDX1]
Score.list.new[[i]]<-Score.list.new[[i]]* obj.oneset$Sign.list[[i]]
}
}
#A2<<-obj.oneset
re=list(Info.list=obj.oneset$Info.list, Permu.list.new=Permu.list.new, Score.list.new=Score.list.new, n.cohort=n.cohort, N.Permu = N.Permu)
return(re)
}
MetaSKAT_MPermu_OneSet<-function(Permu.Info, SetID, n.Resampling=10000, r.corr=0, weights.beta=c(1,25), MAF.cutoff=1){
n.cohort = Permu.Info$n.cohort
n1=rep(1, n.cohort)
obj=GetPermu_obj(Permu.Info, SetID)
for(i in 1:n.cohort){
idx_miss<-which(is.na(obj$Info.list[[i]]$MAF))
if(length(idx_miss) > 0){
obj$Info.list[[i]]$Score[idx_miss] = 0
obj$Info.list[[i]]$MAF[idx_miss] = 0
}
n1[i]<-Permu.Info$EachInfo[[i]]$header$N.Sample
}
##########################
# Get Combined MAF
MAF.Combine=0
weight.list<-list()
MAF.list<-list()
for(i in 1:n.cohort){
MAF.list[[i]]<-obj$Info.list[[i]]$MAF
}
for(i in 1:n.cohort){
MAF.Combine = MAF.Combine + MAF.list[[i]] * n1[i] / sum(n1)
}
for(i in 1:n.cohort){
weight.list[[i]]<-Beta.Weights(MAF.Combine,weights.beta, MAF.cutoff, Is.MAF=TRUE)
}
#
# Resampling
#
#A1<<-obj
Score1<-NULL
Score.mat<-NULL
for(i in 1:n.cohort){
Score.temp = obj$Score.list.new[[i]]
if(i==1){
Score1 = Score.temp *weight.list[[i]]
} else {
Score1 = Score1 + Score.temp *weight.list[[i]]
}
}
if(r.corr==0){
TestStat = sum(Score1^2)
} else {
TestStat = sum(Score1)^2
}
nRun<-ceiling(n.Resampling /50000)
n.Resampling.item = 50000
TestStat.RA<-NULL
for(k in 1:nRun){
for(i in 1:n.cohort){
id<-sample.int(obj$N.Permu[i] ,n.Resampling.item, replace = TRUE)
if(i==1){
Score.mat<-obj$Permu.list.new[[i]][,id] *weight.list[[i]]
} else {
Score.mat = Score.mat + obj$Permu.list.new[[i]][,id] *weight.list[[i]]
}
}
if(r.corr==0){
TestStat.R<-colSums(Score.mat^2)
} else {
TestStat.R = colSums(Score.mat)^2
}
TestStat.RA = c(TestStat.RA, TestStat.R)
}
#TestStat1<<-TestStat
#TestStat.R1<<-TestStat.R
pval<-(length(which(TestStat.RA >= TestStat))+1) /(length(TestStat.RA)+1)
return(list(p.value=pval))
}
Permu_Close<-function(Permu.Info){
n.cohort = Permu.Info$n.cohort
for(i in 1:n.cohort){
close(Permu.Info$EachInfo.Permu[[i]]$con)
}
}
|
43976dec67fada5d3e9b0c2991c9a9a66718f27e
|
65b2d8f88199970ca6b83e658760dfb66c5d951d
|
/GenAlgVarSelection/R/select.R
|
7a3174acd1a9885041fbbfc77ce08d16e8beaea8
|
[] |
no_license
|
ClayCampaigne/stat243_Project
|
8776a1572701f9fb261b3ab1b81186fbd11fddcb
|
b3483b486a39b66c00f3dac96495265335f7dea9
|
refs/heads/master
| 2021-12-04T01:24:46.989193
| 2014-12-11T10:15:11
| 2014-12-11T10:15:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,295
|
r
|
select.R
|
############################ Project: Main Code #################################
##### Description: #####
### This is the main code of the generic algorithm that attempts to solve the variable selection problem.
##### Input: #####
### Training Set: (X, y) (User Defined) #####
### genePool: size/# of individuals of the entire population
### mutation_prob: probability of mutation within each iteration
### crossover_prob: probability of crossover within each iteration
##### Output: #####
### result <- the best model based on the evaluation criterion
#X <- mtcars[,2:11]
#y <- mtcars[,1]
############################ Project: Main Code #################################
##### Description: #####
### This is the main code of the generic algorithm that attempts to solve the variable selection problem.
##### Input: #####
### Training Set: (X, y) (User Defined) #####
### genePool: size/# of individuals of the entire population
### mutation_prob: probability of mutation within each iteration
### crossover_prob: probability of crossover within each iteration
### max_iterations: maximum iterations to go through
### min_iterations
##### Output: #####
### result <- the best model based on the evaluation criterion
### Test Case (To be deleted)
##### Auxilary Functions #####
##### popInitialize #####
##### Input #####
# 1. popSize: The population size in each generation
# 2. geneLength: the number of genes in the chromosome
# 3. zeroToOneRatio: the change from a zero to one for mutations and initialization. (This option allows us to control the number of set genes in the chromosome. Since we are conducting variable selection, this parameter may be higher than usual.)
##### Output #####
# The output is a matrix of size (popSize, geneLength) with the values initiated
# We do not want to any individual with all zeros hence we guarantee that at least one element is 1.
##### Reference: genalg package
# The code is an adoption of the basic generic algorith implementation in the genalg package in R
##### Implementation #####
popInitialize <- function(popSize = 0, geneLength = 0, zeroToOneRatio){
if(is.na(zeroToOneRatio)){
zeroToOneRation = 0;
}
pop <- matrix(nrow = popSize, ncol = geneLength);
##### Randomly initialize the first generation #####
for (child in 1:popSize){
pop[child, ] = sample(c(rep(0, zeroToOneRatio), 1), geneLength, replace = TRUE);
while(sum(pop[child,]) == 0){
pop[child, ] = sample(c(rep(0, zeroToOneRatio), 1), geneLength, replace = TRUE);
}
}
return(pop)
}
##### Auxilary Function 2: EvalFunction #####
library(parallel)
library(doParallel)
library(foreach)
library(iterators)
nCores <- 4
registerDoParallel(nCores)
# to addd
##### Evaluation Function #####
##### Descritption ######
### Here we create two evaluation functions - evalLm and evalGlm
### that allows us evaluate the fitness of the current generation. ###
##### Input #####
### genePool: a matrix holding the current individuals of size population size by
### X: the independent variables (training data)
### y: the dependent varaibles (training data)
### family: indicating the model for the general linear model function
### criterion: a character string specifying the function used as the evaluation criteria(default: "AIC")
### criFun: the user-provided function to use as evaluation criteria if "criterion" is not AIC or BIC
##### Output #####
### result: a matrix which holds the value from the criterion function & rank (assume
### that the smaller criterion value, the smaller the rank, the higher weights)
### First row of result: evaluation criterion value
### Second row of result rank
#library(data.table)
singleEval <- function(singleGene, X, y, type, criterion, criFun, family){
if(type == "lm")
fit <- lm(y~as.matrix(X[,which(singleGene != 0)]))
if(type == "glm")
fit <- glm(as.vector(y)~as.matrix(X[,which(singleGene != 0)]), family)
if(is.null(criFun)){ # Dont have their own criterion function written
criFunBuilt <- eval(parse(text = criterion))
criValue <- criFunBuilt(fit)
}
else {
criValue = try(criFun(fit), silent = TRUE) # use the function inputted by the user; CHECK FOR ERRORS?
if(!is.null(attributes(criValue)$class))
if(attributes(criValue)$class == "try-error")
stop(cat(paste("criFun is not compatible. The following error occured:\n", geterrmessage())))
if(length(criValue)!=1)
stop("Dimension of output for criFun greater than 1.")
if(!is.numeric(criValue)!=1)
stop("Output for criFun is not numeric.")
return(criValue)
return(0)
}
return(criValue)
}
evalFunction <- function(X, y, currentGenePool, popSize, type = "lm", family = "gaussian", criterion = "AIC", criFun = NULL){
if((popSize %% 2)!= 0){
message("Warning: The size of the population has been rounded to the largest even numer")
popSize <- popSize + 1;
}
if(criterion!="AIC" & criterion!= "BIC") ### ADD MORE?
stop(paste(criterion, "is not a valid criterion. Please use AIC or BIC."))
if(!is.null(criFun) & !is.function(criFun))
stop("criFun input is not a function.")
if(type != "lm" & type != "glm")
stop("Regression must be of type 'lm' or 'glm'")
if(family == "binomial" & length(unique(na.omit(y)))!= 2)
stop("Logistic regression requires 'y' to be binary")
geneLength <- dim(currentGenePool)[2]
result <- rep(NA, popSize)
result <- foreach(i = 1:popSize, .combine = c) %dopar% {
criValue <- singleEval(currentGenePool[i,], X, y, type, criterion, criFun, family)
return(criValue)
}
obj <- rbind(result, rank(result), rank(-result)/sum(1:popSize))
row.names(obj) <- c(criterion, "ranks", "samplingProbs")
return(obj)
}
##### Auxilary Function: UpdateSam #####
updateSamp <- function(x, popSize, weights){
pairs = matrix(NA, ncol = popSize/2, nrow = 2) # form sampled pairs
set.seed(0) # take out later
for(i in 1:popSize/2){
pairs[,i] = sample(1:popSize, 2, prob = weights)
}
pairs # so 2 and 6 are paired up; 4 and 5 paired up; 6,5 paired
xSamp = x[as.vector(pairs), ]
return(xSamp)
}
##### Auxilary Function: Crossover #####
crossover <- function(v1, v2, geneLength, crossRate = 1){
crossBool = sample(c(TRUE, FALSE), 1, prob = c(crossRate, 1-crossRate))
if(crossBool){
cut = sample(geneLength-1, 1)
new1 = c(v1[1:cut], v2[(cut+1):geneLength])
new2 = c(v2[1:cut], v1[(cut+1):geneLength])
if(sum(new1) == 0 | sum(new2) == 0) # if either ended up with only zeros
return(rbind(v1,v2))
else
return(rbind(new1, new2))
}
else
return(rbind(v1,v2)) # return them unchanged
}
##### Auxilary Function: Mutation #####
mutation <- function(v1, v2, mRate){
mLoci = which(v1==v2)
len = length(mLoci)
# T/F: mutate or not
mBool1 = sample(c(TRUE, FALSE), len, replace = TRUE, prob = c(mRate, 1-mRate))
mBool2 = sample(c(TRUE, FALSE), len, replace = TRUE, prob = c(mRate, 1-mRate))
if(sum(mBool1) == 0 & sum(mBool2) == 0)
return(rbind(v1,v2)) # return v1 v2 and dont mutate
else{
v1Copy = v1
v2Copy = v2
v1Copy[mLoci][mBool1] <- as.numeric(!v1Copy[mLoci][mBool1])
v2Copy[mLoci][mBool2] <- as.numeric(!v2Copy[mLoci][mBool2])
if(sum(v1Copy) == 0) v1Copy <- v1
if(sum(v2Copy)== 0) v2Copy <- v2
return(rbind(v1Copy,v2Copy))
}
}
##### Clear out later #####
#X <- mtcars[,2:11]
#y <- mtcars[,1]
select <- function(X = NULL, y = NULL, popSize = 200, criterion = "AIC", type = "lm", family = "gaussian", criFun = NULL, max_iterations = 500, min_iterations = 50, crossRate = NA, mRate = NA, zeroToOneRatio = 2){
##### Defense coding #####
X <- as.matrix(X);
y <- as.vector(y);
if(is.na(mRate)){
mRate = 1/(dim(X)[1]);
}
if((popSize%%2)!=0){
#warning("The number of models has ")
#print("Warning: The number of models has been incremented to the nearest even number")
warning("The number of models has been incremented to the nearest even number")
popSize <- popSize + 1
}
if(is.null(X)){
stop("Please provide the predictors! Exiting from the function")
}
# if(is.null(y)){
# stop("Please provide the independent variable/outcome! Exiting from the function")
# }
##### Beginning of the generic algorithm #####
geneLength <- dim(X)[2];
##### Initializing the first generation of individuals/ models
initialPopulation <- popInitialize(popSize, geneLength, zeroToOneRatio);
currentGenePool <- initialPopulation;
### Calculating the sampling probabilities for the first generations of individuals/models
#samplingProb <- evalFunction(currenGenePool, type, criterion, family, criFun)[3,]
samplingProb <- evalFunction(X, y, currentGenePool, popSize, type, family, criterion, criFun)[3,];
avgCriterion <- mean(evalFunction(X, y, currentGenePool, popSize, type, family, criterion, criFun)[1,]);
### While loop to handle convergence/ exceedance of min iteration/ capped by max iteration
#iter = 0;
### Condition to be satisfied
### if iter < min_iteration
#while((iter <= min_iterations)&& !(iter >= max_iterations))
for(i in 1:max_iterations){
# really we will have predetermined # of iterations
#xSamp <- updateSamp(x, popSize, weights)
geneSample <- updateSamp(currentGenePool, popSize, samplingProb);
#xCrossed = matrix(NA, nrow = popSize, ncol = geneLength)
crossedSample <- matrix(NA, nrow = popSize, ncol = geneLength);
#for(i in seq(1, popSize, by = 2))
# xCrossed[i:(i+1),] <- crossover(xSamp[i,], xSamp[i+1,], popSize, geneLength, crossRate)
for(j in seq(1, popSize, by = 2)){
#print(i)
crossedSample[j:(j+1),] <- crossover(geneSample[j,], geneSample[j+1, ], geneLength, crossRate)
}
#
#xMut = matrix(NA, nrow = popSize, ncol = geneLength)
mutatedSample <- matrix(NA, nrow = popSize, ncol = geneLength)
#for(i in seq(1, popSize, by = 2))
# xMut[i:(i+1),] <- mutation(xCrossed[i,], xCrossed[i+1,], mRate)
for (k in seq(1, popSize, by = 2)){
#mutatedSample <- mutation(crossedSample[i,], crossedSample[i+1,], popSize, mRate)
mutatedSample[k:(k+1),] <- mutation(crossedSample[k,], crossedSample[k+1,], mRate)
}
### Here we would add the evaluation function ###
# weights = AIC( )
currentGenePool <- mutatedSample
samplingProb <- evalFunction(X, y, currentGenePool, popSize, type, family, criterion, criFun)[3,]
avgCriterion <- rbind(avgCriterion, mean(evalFunction(X, y, currentGenePool, popSize, type, family, criterion, criFun)[1,]))
#x = xMut # Update x-matrix with our new one!
#print(x) # take out later
}
##### After a fixed number of iterations, we return the best model #####
#return(currentGenePool)
final <- best(X, y, currentGenePool, popSize, type, criterion)
#print(avgAIC)
plot(avgCriterion)
##### Print the best model #####
return(final)
}
best <- function(X, y, pool, popSize, type, criterion, family = "gaussian", criFun = NULL){
#print('In best')
tmp <- evalFunction(X, y, pool, popSize, type, family, criterion, criFun)
#print(result)
final <- 0
if(type == "lm"){
#print('lm flow')
index <- which(tmp[2,] == min(tmp[2,]), arr.ind = T)[1]
#print(index)
index2 <- which(pool[index,] != 0, arr.ind = T)
#print(index2)
final <- lm(y~as.matrix(X[,index2]))
#print('success')
}
else if (type == "glm"){
#print('glm flow')
index <- which(tmp[2,] == min(tmp[2,]), arr.ind = T)
index2 <- which(pool[index,] != 0, arr.ind = T)[1]
final <- glm(as.vector(y)~as.matrix(X[,index2]),family)
}
### TO be fixed here
else{
final <- 0
}
print(summary(final))
criFunBuilt <- eval(parse(text = criterion))
criValue <- criFunBuilt(final)
print(paste("The resulting criterion is: ", criterion, criValue))
return(final)
}
### test code
#result <- select(X, y, popSize = 19, max_iterations = 50, crossRate = 0.95, mRate = 0.0001)
v1 <- matrix(runif(200)*500,nrow = 200)
v2 <- matrix(runif(200)*10,nrow = 200)
error <- matrix(rnorm(200), nrow = 200)
n <- rep(200,20)
v3_22 <- sapply(n, runif)
v3_22 <- (v3_22)*500
X24 <- cbind(v1,v2,v3_22)
y1 <- 0.5*v1 + 30*v2 +error
|
a7c1489dede567d70dcdb08ad16d701df94ef03c
|
6e520bb1eec1e3cfd543f994949e28565cdd6c54
|
/server.R
|
567fa070106d0b8f0c9cce979afd3bf8da8aae53
|
[] |
no_license
|
yusagi0603/Service-Time-Series-Analysis
|
653413f1b7ac47b6710c29f187d276e5e9ce8cef
|
dcef8489bad5a63f40eb5097a8feef73ef4d3cca
|
refs/heads/main
| 2023-04-04T01:01:34.032865
| 2021-04-04T12:56:06
| 2021-04-04T12:56:06
| 354,541,924
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,735
|
r
|
server.R
|
## load packages
library(zoo)
library(xts)
library(ggplot2)
library(reshape2)
library(forecast)
library(DT)
library(plotly)
library(lubridate)
library(dplyr)
library(tidyr)
# library(highcharter)
# input$[variable] to use variable in ui.R
# EX: input$store -> to use a variable "store" in ui.R
###############
source("./data_source.R")
source("./feature_engineering.R")
###############
function(input, output, session) {
##### PARSE VARIABLE FROM ui.R #####
# input$date and others are Date objects. When outputting
# text, we need to convert to character; otherwise it will
# print an integer rather than a date.
##### Output text & plot ######
output$dateRangeText <- renderText({
paste("Time Range: ",
paste(as.character(input$dateRange), collapse = " to ")
)
})
output$storeText <- renderText({
paste0("Store: ",
paste0(c(input$store1, input$store2, input$store3), collapse=", ")
)
})
output$serviceText <- renderText({
paste0("Service: ", input$service)
})
output$groupText <- renderText({
paste0(
paste0("group1: ", find_group(
c(input$store1, input$store2, input$store3), input$service, "1")),
paste0("\ngroup2: ", find_group(
c(input$store1, input$store2, input$store3), input$service, "2"))
)
})
output$profilePlot <- renderPlotly({
print(
ggplotly(
agg_service_store_user_df %>%
filter(invoice_date >= as.Date(as.character(input$dateRange)[1]),
invoice_date <= as.Date(as.character(input$dateRange)[2])) %>%
filter(gender %in% c("男", "女")) %>%
ggplot(., aes(age_group, fill = age_group)) +
geom_bar() +
facet_wrap(gender~.)
)
)
})
output$timeSeriesPlot <- renderPlotly({
print(
ggplotly(
ggplot(data = generate_long_forecast_df(
raw_long_df=total_long_store_service_df,
start_dt=as.character(input$dateRange)[1],
end_dt=as.character(input$dateRange)[2],
store=c(input$store1, input$store2, input$store3),
service=input$service
),
aes(x=date, y=demand, colour=store, line_shape=algorithm)) +
geom_line() +
facet_wrap(~service) +
ylim(0, 100)
)
)
})
output$demandTable <- renderDT(DT::datatable({
data <- total_forecast_df
if (input$service != "All") {
data <- data[data$service == input$service,]
}
# data <- data %>%
# spread(algorithm, demand) # g1 & g2 has different column
data
}, filter = list(
position = 'top', clear = FALSE
),
options = list(
pageLength = 15
)),
)
}
|
5797752824337e3785d0737870bc8fbdffae96e7
|
373f2abd88834acc0d1ab21ba13ce335600ceb0f
|
/R/debut.r
|
a2eb548f263d589bb09a74f88bc07535894cd97d
|
[] |
no_license
|
ClementCalenge/adehabitat
|
fa26e43fba432c29a5757fcd4b5f9ffd972bdd44
|
23ba023d5a57eec861fb6d3d07772cb9d2db6968
|
refs/heads/master
| 2021-01-22T05:20:32.012511
| 2018-01-28T12:27:32
| 2018-01-28T12:27:32
| 81,652,118
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,185
|
r
|
debut.r
|
##### Chargement de base
.onAttach <- function(libname, pkgname)
{
msg <- paste("\n************************************************\n",
"************************************************\n",
"THE PACKAGE adehabitat IS NOW DEPRECATED!!!!!!!\n It is dangerous to use it, as bugs will no longer be corrected.\n",
"It is now recommended to use the packages adehabitatMA, adehabitatLT, adehabitatHR, and adehabitatHS.\n",
"These 4 packages are the future of adehabitat.\n They have a vignette explaining in detail how they can be used.\nThey implement more methods than adehabitat\nThey are based on the more common and more clever spatial classes implemented in sp.\nBugs are corrected frequently.\nReally, avoid to use the classical adehabitat, unless you have a very good reason for it.\n\n",
"*****THIS IS THE VERY LAST WARNING*****\n This is the last version of adehabitat submitted to CRAN (at the time of writing: 2015-03-27).\n THE NEXT VERSION OF adehabitat WILL JUST BE A VIRTUAL PACKAGE LOADING ALL THE PACKAGES DESCRIBED ABOVE.\n",
sep="")
packageStartupMessage(msg)
}
|
3f2ff8fea96ddcdc294b36ef8a6749ac64403993
|
0afd4c95ea233454ad43437ccc6478f0f4c7e27e
|
/run_analysis.R
|
c8edb65a0793a0722b5dbb9872443303d7b6c5b0
|
[] |
no_license
|
tristanmarkwell/GettingCleaningProject
|
6df6dd5af0e3784f459ae8b348d232bbab6c46a5
|
9adbf4c2fcf9f89dda51d2e141a047897e9b1fdb
|
refs/heads/master
| 2020-05-17T10:39:22.749155
| 2014-05-25T05:06:27
| 2014-05-25T05:06:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,795
|
r
|
run_analysis.R
|
## Part 0 - This code downloads the data if you're starting from scratch
# if (!file.exists('data')) dir.create('data')
# download.file('https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip',
# 'rawDatasets.zip')
# dateDownloaded <- date()
#
# ## unzip files
# unzip('.\\data\\rawDatasets.zip',exdir='.\\data')
## Part 1 - load data - uncommented are simplified assuming all files are in the working directory
#activity_labels <- read.table('.\\data\\UCI HAR Dataset\\activity_labels.txt', col.names=c('ID','Activity'))
activity_labels <- read.table('activity_labels.txt', col.names=c('ID','Activity'))
#features <- read.table('.\\data\\UCI HAR Dataset\\features.txt')
features <- read.table('features.txt')
#subject_test <- read.table('.\\data\\UCI HAR Dataset\\test\\subject_test.txt', col.names='subject')
subject_test <- read.table('subject_test.txt', col.names='subject')
#y_test <- read.table('.\\data\\UCI HAR Dataset\\test\\y_test.txt', col.names='Activity')
y_test <- read.table('y_test.txt', col.names='Activity')
#X_test <- read.table('.\\data\\UCI HAR Dataset\\test\\X_test.txt',col.names=features$V2)
X_test <- read.table('X_test.txt',col.names=features$V2)
#subject_train <- read.table('.\\data\\UCI HAR Dataset\\train\\subject_train.txt', col.names='subject')
subject_train <- read.table('subject_train.txt', col.names='subject')
#y_train <- read.table('.\\data\\UCI HAR Dataset\\train\\y_train.txt', col.names='Activity')
y_train <- read.table('y_train.txt', col.names='Activity')
#X_train <- read.table('.\\data\\UCI HAR Dataset\\train\\X_train.txt',col.names=features$V2)
X_train <- read.table('X_train.txt',col.names=features$V2)
## Part 2 - have to use the activities to label as factors
y_test$Activity <- factor(y_test$Activity,labels=as.character(activity_labels$Activity))
y_train$Activity <- factor(y_train$Activity,labels=as.character(activity_labels$Activity))
## Part 3 - merge columns and then rows
testData <- cbind(X_test, y_test, subject_test)
trainingData <- cbind(X_train, y_train, subject_train)
mergedData <- rbind(testData,trainingData)
## Part 4 - keep only mean and standard deviation columns
trimmedColumns <- c(grep("mean.",colnames(mergedData), fixed=TRUE),grep("std.",colnames(mergedData)),562, 563)
trimmedData <- mergedData[,trimmedColumns]
## Part 5 - create tidy data set using plyr
library(plyr)
tidyData <- ddply(trimmedData,.(subject,Activity),summarize,
TimeBodyAccelerometerMeanX = mean(tBodyAcc.mean...X),
TimeBodyAccelerometerMeanY = mean(tBodyAcc.mean...Y),
TimeBodyAccelerometerMeanZ = mean(tBodyAcc.mean...Z),
TimeBodyAccelerometerStdDevX = mean(tBodyAcc.std...X),
TimeBodyAccelerometerStdDevY = mean(tBodyAcc.std...Y),
TimeBodyAccelerometerStdDevZ = mean(tBodyAcc.std...Z),
TimeGravityAccelerometerMeanX = mean(tGravityAcc.mean...X),
TimeGravityAccelerometerMeanY = mean(tGravityAcc.mean...Y),
TimeGravityAccelerometerMeanZ = mean(tGravityAcc.mean...Z),
TimeGravityAccelerometerStdDevX = mean(tGravityAcc.std...X),
TimeGravityAccelerometerStdDevY = mean(tGravityAcc.std...Y),
TimeGravityAccelerometerStdDevZ = mean(tGravityAcc.std...Z),
TimeBodyAccelerometerJerkMeanX = mean(tBodyAccJerk.mean...X),
TimeBodyAccelerometerJerkMeanY = mean(tBodyAccJerk.mean...Y),
TimeBodyAccelerometerJerkMeanZ = mean(tBodyAccJerk.mean...Z),
TimeBodyAccelerometerJerkStdDevX = mean(tBodyAccJerk.std...X),
TimeBodyAccelerometerJerkStdDevY = mean(tBodyAccJerk.std...Y),
TimeBodyAccelerometerJerkStdDevZ = mean(tBodyAccJerk.std...Z),
TimeBodyGyroscopeMeanX = mean(tBodyGyro.mean...X),
TimeBodyGyroscopeMeanY = mean(tBodyGyro.mean...Y),
TimeBodyGyroscopeMeanZ = mean(tBodyGyro.mean...Z),
TimeBodyGyroscopeStdDevX = mean(tBodyGyro.std...X),
TimeBodyGyroscopeStdDevY = mean(tBodyGyro.std...Y),
TimeBodyGyroscopeStdDevZ = mean(tBodyGyro.std...Z),
TimeBodyGyroscopeJerkMeanX = mean(tBodyGyroJerk.mean...X),
TimeBodyGyroscopeJerkMeanY = mean(tBodyGyroJerk.mean...Y),
TimeBodyGyroscopeJerkMeanZ = mean(tBodyGyroJerk.mean...Z),
TimeBodyGyroscopeJerkStdDevX = mean(tBodyGyroJerk.std...X),
TimeBodyGyroscopeJerkStdDevY = mean(tBodyGyroJerk.std...Y),
TimeBodyGyroscopeJerkStdDevZ = mean(tBodyGyroJerk.std...Z),
TimeBodyAccelerometerMagnitudeMean = mean(tBodyAccMag.mean..),
TimeGravityAccelerometerMagnitudeMean = mean(tGravityAccMag.mean..),
TimeBodyAccelerometerJerkMagnitudeMean = mean(tBodyAccJerkMag.mean..),
TimeBodyGyroscopeMagnitudeMean = mean(tBodyGyroMag.mean..),
TimeBodyGyroscopeJerkMagnitudeMean = mean(tBodyGyroJerkMag.mean..),
TimeBodyAccelerometerMagnitudeStdDev = mean(tBodyAccMag.std..),
TimeGravityAccelerometerMagnitudeStdDev = mean(tGravityAccMag.std..),
TimeBodyAccelerometerJerkMagnitudeStdDev = mean(tBodyAccJerkMag.std..),
TimeBodyGyroscopeMagnitudeStdDev = mean(tBodyGyroMag.std..),
TimeBodyGyroscopeJerkMagnitudeStdDev = mean(tBodyGyroJerkMag.std..),
FourierBodyAccelerometerMeanX = mean(fBodyAcc.mean...X),
FourierBodyAccelerometerMeanY = mean(fBodyAcc.mean...Y),
FourierBodyAccelerometerMeanZ = mean(fBodyAcc.mean...Z),
FourierBodyAccelerometerStdDevX = mean(fBodyAcc.std...X),
FourierBodyAccelerometerStdDevY = mean(fBodyAcc.std...Y),
FourierBodyAccelerometerStdDevZ = mean(fBodyAcc.std...Z),
FourierBodyAccelerometerJerkMeanX = mean(fBodyAccJerk.mean...X),
FourierBodyAccelerometerJerkMeanY = mean(fBodyAccJerk.mean...Y),
FourierBodyAccelerometerJerkMeanZ = mean(fBodyAccJerk.mean...Z),
FourierBodyAccelerometerJerkStdDevX = mean(fBodyAccJerk.std...X),
FourierBodyAccelerometerJerkStdDevY = mean(fBodyAccJerk.std...Y),
FourierBodyAccelerometerJerkStdDevZ = mean(fBodyAccJerk.std...Z),
FourierBodyGyroscopeMeanX = mean(fBodyGyro.mean...X),
FourierBodyGyroscopeMeanY = mean(fBodyGyro.mean...Y),
FourierBodyGyroscopeMeanZ = mean(fBodyGyro.mean...Z),
FourierBodyGyroscopeStdDevX = mean(fBodyGyro.std...X),
FourierBodyGyroscopeStdDevY = mean(fBodyGyro.std...Y),
FourierBodyGyroscopeStdDevZ = mean(fBodyGyro.std...Z),
FourierBodyAccelerometerMagnitudeMean = mean(fBodyAccMag.mean..),
FourierBodyAccelerometerMagnitudeStdDev = mean(fBodyAccMag.std..),
FourierBodyAccelerometerJerkMagnitudeMean = mean(fBodyBodyAccJerkMag.mean..),
FourierBodyAccelerometerJerkMagnitudeStdDev = mean(fBodyBodyAccJerkMag.std..),
FourierBodyGyroscopeMagnitudeMean = mean(fBodyBodyGyroMag.mean..),
FourierBodyGyroscopeMagnitudeStdDev = mean(fBodyBodyGyroMag.std..),
FourierBodyGyroscopeJerkMagnitudeMean = mean(fBodyBodyGyroJerkMag.mean..),
FourierBodyGyroscopeJerkMagnitudeStdDev = mean(fBodyBodyGyroJerkMag.std..)
)
## Part 6 - final output of tidy data
write.table(tidyData,file="tidyData.txt",sep=",")
|
423a30cf006afa52115c1ac4aefcaefcf78fe1e3
|
ad9a4aee8d97fcfcfc3345afb03fe3b64d4cc48d
|
/Code/2-MultipleLinearRegression.R
|
e4790bf108caf5e70e77a31d595c14a49ba200a6
|
[
"MIT"
] |
permissive
|
ErisonBarros/Demanda-de-Transporte
|
23e86f7482c829c349892f4dbb9407c3bedf970f
|
a5bc469a69865c3f36845b8cf49dd7239cd8f186
|
refs/heads/master
| 2023-03-23T10:35:08.669996
| 2021-03-14T19:24:48
| 2021-03-14T19:24:48
| 348,381,022
| 0
| 0
|
MIT
| 2021-03-16T14:37:05
| 2021-03-16T14:35:47
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 7,790
|
r
|
2-MultipleLinearRegression.R
|
#'
#' #### Example exercise: Trip production of 57 Traffic Assignment Zones of Chicago in 1960's.
#'
#' **Your task**: Estimate a linear regression model that predicts trips per occupied dwelling unit.
#'
#' #### Variables:
#'
#' * `TODU`: Motorized Trips (private car or Public Transportation) per occupied dwelling unit;
#' * `ACO`: Average car ownership (cars per dwelling);
#' * `AHS`: Average household size;
#' * `SRI`: Social Rank Index:
#' 1. proportion of blue-collar workers (e.g., construction, mining);
#' 2. proportion of people with age higher than 25 years that have completed at least 8 year of education;
#' (_**Note:** The SRI has its maximum value when there are no blue-collar workers and all adults have education of at least 8 years_)
#' * `UI`: Urbanization Index:
#' 1. fertility rate, defined as the ratio of children under 5 years of age to the female population of childbearing age;
#' 2. female labor force participation rate, meaning the % of women who are in the labor force;
#' 3. % of single family units to total dwelling units.
#'
#' The degree of urbanization index would be increased by
#' a) lower fertility rate,
#' b) higher female labor force participation rate, and
#' c) higher proportion of single dwelling units.
#' (_**Note:** High values for this index imply less attachment to the home_)
#'
#' * `SI`:Segregation Index
#' It measures the proportion of an area to which minority groups (e.g: non-whites, foreign-born, Eastern Europeans) live in isolation.
#' (_**Note:** High values for this index imply that those communities are less prone to leaving their living areas and as such to having lower levels of mobility_)
#'
#' ## Let's begin!
#'
#' ##### Import Libraries
library(readxl) #Library used to import excel files
library(tidyverse) # Pack of most used libraries
library(skimr) # Library used for providing a summary of the data
library(DataExplorer) # Library used in data science to perform exploratory data analysis
library(corrplot) # Library used for correlation plots
library(car) # Library used for testing autocorrelation (Durbin Watson)
library(olsrr) # Library used for testing multicollinearity (VIF, TOL, etc.)
#'
#' ##### Import dataset
dataset <- read_excel("Data/TDM_Class3_MLR_Chicago_Example.xls")
class(dataset)
#'
#' ##### Transform the dataset into a dataframe
df <- data.frame(dataset)
#'
#' ##### Show summary statistics
skim(df)
summary(df)
#'
#' ## Multiple Linear Regression
#' Equation with `TODU` as the dependent variable:
#'
#' $$Y_{\text{TODU}} = \beta_{0} + \beta_{1}{\text{ACO}} + \beta_{2}{\text{AHS}} + \beta_{3}{\text{SI}} + \beta_{4}{\text{SRI}} +\beta_{5}{\text{UI}} + \varepsilon$$
#'
#' #### Checking assumptions
#' Before running the model, you need to check if the assumptions are met.
#'
#' ##### Linear relation
#' For instance, let's take a look if the independent variables have linear relation with the dependent variable.
#'
par(mfrow=c(2,3)) #set plot area as 2 rows and 3 columns
plot(x = df$TODU, y = df$ACO, xlab = "TODU", ylab = "ACO")
plot(x = df$TODU, y = df$AHS, xlab = "TODU", ylab = "AHS")
plot(x = df$TODU, y = df$SI, xlab = "TODU", ylab = "SI")
plot(x = df$TODU, y = df$SRI, xlab = "TODU", ylab = "SRI")
plot(x = df$TODU, y = df$UI, xlab = "TODU", ylab = "UI")
#'
#' Or you could execute a pairwise scatterplot matrix, that compares every variable with each other:
#'
pairs(df[,1:6], pch = 19, lower.panel = NULL)
#'
#' > **Note:** SRI and TODU do not have a linear relationship. This should interfere on the model.
#'
#' ##### Normal distribution of the dependent variable
#' Check if the dependent variable is normally distributed. If the sample is smaller than 2000 observations, use Shapiro-Wilk test:
#'
shapiro.test(df$TODU)
#'
#' If not, use the Kolmogorov-Smirnov test
#'
ks.test(df$TODU, "pnorm", mean=mean(df$TODU), sd = sd(df$TODU))
#'
#' > **Note:** Regarding the warning that appears in the Kolmogorov-Smirnov test "ties should not be present for the Kolmogorov-Smirnov test", what most likely happened is that this test is only reliable with continuous variables.
#'
#' Although `TODU` is a continuous variable, the small sample size (n=57) makes it likely to have repeated values. Consequently, the test considers `TODU` as a categorical variable. Therefore, this is another evidence, that for small samples it is more appropriate to use the Shapiro-Wilk Test.
#' The null hypothesis of both tests is that the distribution is normal. Therefore, for the distribution to be normal, the pvalue > 0.05 and you should not reject the null hypothesis.
#'
#' ### Multiple linear regression model
#'
model <- lm(TODU ~ ACO + AHS + SI + SRI + UI, data = df)
summary(model)
#'
#' **Assessing the model**:
#'
#' 1. First check the **pvalue** and the **F statistics** of the model to see if there is any statistical relation between the dependent variable and the independent variables. If pvalue < 0.05 and the F statistics > Fcritical = 2,39, then the model is statistically acceptable.
#' 2. The **R-square** and **Adjusted R-square** evaluate the amount of variance that is explained by the model. The difference between one and another is that the R-square does not consider the number of variables. If you increase the number of variables in the model, the R-square will tend to increase which can lead to overfitting. On the other hand, the Adjusted R-square adjust to the number of independent variables.
#' 3. Take a look a the **t-value** and the Pr(>|t|). If the t-value > 1,96 or Pr(>|t|) < 0,05, then the IV is statistically significant to the model.
#' 4. To analyze the **estimates** of the variables, you should first check the **signal** and evaluate if the independent variable has a direct or inverse relationship with the dependent variable. It is only possible to evaluate the **magnitude** of the estimate if all variables are continuous and standarzized or by calculating the elasticities. The elasticities are explained and demonstrated in chapter 4.
#'
#'
#' ##### Residuals
#' Let's see how do the residuals behave by plotting them.
#'
#' * **Residuals vs Fitted:** This plot is used to detect non-linearity, heteroscedasticity, and outliers.
#' * **Normal Q-Q:** The quantile-quantile (Q-Q) plot is used to check if the dependent variable follows a normal distribution.
#' * **Scale-Location:** This plot is used to verify if the residuals are spread equally (homoscedasticity) or not (heteroscedasticity) through the sample.
#' * **Residuals vs Leverage:** This plot is used to detect the impact of the outliers in the model. If the outliers are outside the Cook-distance, this may lead to serious problems in the model.
#'
#' Try analyzing the plots and check if the model meets the assumptions.
par(mfrow=c(2,2))
plot(model)
#'
#'
#' ##### Autocorrelation
#' Execute the Durbin-Watson test to evaluate autocorrelation of the residuals
durbinWatsonTest(model)
#'
#' > **Note:** In the Durbin-Watson test, values of the D-W Statistic vary from 0 to 4. If the values are from 1.8 to 2.2 this means that there is no autocorrelation in the model.
#'
#' ##### Multicollinearity
#' Calculate the VIF and TOL to test for multicollinearity.
#'
ols_vif_tol(model)
#'
#' > **Note:** Values of VIF > 5, indicate multicollinearity problems.
#'
#' Calculate the Condition Index to test for multicollinearity
ols_eigen_cindex(model)
#'
#' > **Note:** Condition index values > 15 indicate multicollinearity problems, and values > 30 indicate serious problems of multicollinearity.
#'
#' To test both simultaneously, you can run the code below:
ols_coll_diag(model)
|
f7396f84a0e7f5dae73f13251a92065dc072ab66
|
84a6e98edb10a596cd2a08b6e5ea6b8d707e1045
|
/R/read_Boolean_functions_c.R
|
ddcb0bc8366bcf3d99b3d05cda24af4b428c4bf1
|
[] |
no_license
|
SPIDDOR/SPIDDOR
|
66af1c6995f9c87e66468c83eba1f09bde324b34
|
770f5b35baf9385d36b209f02b2153bcba2573cb
|
refs/heads/master
| 2021-07-04T06:07:03.650996
| 2019-04-24T14:51:55
| 2019-04-24T14:51:55
| 72,274,593
| 6
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,314
|
r
|
read_Boolean_functions_c.R
|
read.Boolean.functions.C<-function(file=NULL,Lines=NULL){
if(length(Lines)==0) Lines <- readLines(file, -1)
Lines <- gsub("#.*", "", Lines) #remove comments
Lines <- Lines[nchar(Lines) > 0]
#Create .cpp:
write("","Boolean_func_C.cpp")
add_header("Boolean_func_C.cpp")
nodes<-unlist(lapply(Lines,function(x){
c<-strsplit(x, split = "=")[[1]]
output<-c[1]
output<-gsub(" ","",output,perl=TRUE)
output<-gsub("[-\\.\\:]","_",output,perl=TRUE)
output<-gsub("[+=*]","",output,perl=TRUE)
}))
if(any(duplicated(nodes))) stop(paste("Node",nodes[duplicated(nodes)],"has more than one Boolean function"))
regulators<-unlist(lapply(Lines,function(x){
c<-strsplit(x, split = "=")[[1]]
c[2]<-gsub(" ","",c[2],perl=TRUE)
reg<-strsplit(c[2], split = "[&\\!\\|\\(\\)]",perl=TRUE)[[1]]
reg<-reg[!(reg%in%"")]
reg<-reg[!reg%in%c(0,1)]
reg<-gsub("(THR|MOD|ANY)_","",reg,perl=TRUE)#ignore.case=T)
reg<-gsub("[-\\.\\:]","_",reg,perl=TRUE)
reg<-gsub("[+=*]","",reg,perl=TRUE)
reg<-gsub("\\[.*?]","",reg)
}))
regulators<-unique(regulators)
node.names<-nodes
if(length(setdiff(regulators,nodes))>0){
for(node in setdiff(regulators,nodes)){
Lines<-c(Lines,paste(node,"=",node))
node.names<-c(node.names,node)
}
}
arguments<-c()
arguments2<-c()
arg_modulator<-c()
U_duration<-c()
fun_header<-c()
Initial_conditions<-c()
modulator_duration<-c()
modulator<-c()
for(j in 1:length(Lines)){
c<-strsplit(Lines[j], split = "=")[[1]]
if(length(c)>2){
stop(paste("BF",j,"incorrectly written"))
}
output<-node.names[j]
#Regulators:
c[2]<-gsub(" ","",c[2],perl=TRUE) #Quitar todos los espacios
#Comprobar si el numero de parentesis es el correcto: da un error
if(((gregexpr("[(\\)]",c[2])[[1]][1])!=-1) & ((sapply(gregexpr("[(\\)]",c[2]),length)%%2)!=0) ){
stop(paste("The brakets of the BF are wrong for",output))
}
chars <- strsplit(c[2], split = "")[[1]]
#que operadores tenemos:
z<-gregexpr("[&\\!\\|\\(\\)]",c[2],perl=TRUE)[[1]]
operators<-chars[z]
if(z[[1]]==-1) operators<-""
if(z[1]==1 & (operators[1] %in% c("&","|"))){ #La funcion booleana no puede empezar por & o |
stop(paste("The Boolean function is incorrently written for",output))
}
reg<-strsplit(c[2], split = "[&\\!\\|\\(\\)]",perl=TRUE)[[1]]
upreg_dur<-c()
upreg_dur_f<-c()
regulators<-c()
regulators_header<-c()
Us<-c()
Us_f<-c()
o=1
for(i in 1:length(reg)){
reg[i]<-gsub("[-\\.\\:]","_",reg[i],perl=TRUE)
reg[i]<-gsub("[+=*]","",reg[i],perl=TRUE)
if(reg[i]==""){
regulators<-paste(regulators,as.character(operators[o]),sep=" ")
o=o+1
next
}
if(reg[i]==output & length(reg[!reg%in%""])==1) Initial_conditions<-c(Initial_conditions,output)
#Hay U?
if(grepl("THR_",reg[i],perl=TRUE)){
reg[i]<-gsub("THR_","",reg[i],perl=TRUE)
if(gregexpr("\\[", reg[i])!=-1){
if(suppressWarnings(is.na(as.numeric(gsub("\\[|\\]", "", regmatches(reg[i], gregexpr("\\[.*?\\]", reg[i]))[[1]]))))){
U_duration<-c(U_duration,3)
Us_f<-c(Us_f,paste(",",gsub("\\[|\\]", "", regmatches(reg[i], gregexpr("\\[.*?\\]", reg[i]))[[1]])))
Us<-c(Us,paste(",const int & ",gsub("\\[|\\]", "", regmatches(reg[i], gregexpr("\\[.*?\\]", reg[i]))[[1]]),sep=""))
R<-paste("\n\tlastn_f(",gsub("\\[.*?]","",reg[i]),",pattern,update,time,ts,asynchronous,",
gsub("\\[|\\]", "", regmatches(reg[i], gregexpr("\\[.*?\\]", reg[i]))[[1]]),")",sep="")
reg[i]<-gsub("\\[.*?]","",reg[i])
}else{
U_duration<-c(U_duration,as.numeric(gsub("\\[|\\]", "", regmatches(reg[i], gregexpr("\\[.*?\\]", reg[i]))[[1]])))
reg[i]<-gsub("\\[.*?]","",reg[i])
Us_f<-c(Us_f,paste(",",paste(reg[i],"max_",output,sep=""),sep=""))
Us<-c(Us,paste(",const int & ",reg[i],"max_",output,sep=""))
R<-paste("\n\tlastn_f(",reg[i],",pattern,update,time,ts,asynchronous,",
reg[i],"max_",output,")",sep="")
}
#reg[i]<-gsub("\\[.*?]","",reg[i])
}else{
U_duration<-c(U_duration,3)
Us_f<-c(Us_f,paste(",",paste(reg[i],"max_",output,sep=""),sep=""))
Us<-c(Us,paste(",const int & ",reg[i],"max_",output,sep=""))
R<-paste("\n\tlastn_f(",reg[i],",pattern,update,time,ts,asynchronous,",
reg[i],"max_",output,")",sep="")
}
regulators_header<-c(regulators_header,paste("\n\tint ",reg[i],'= std::distance(nodes_names, std::find(nodes_names, nodes_names + (n_nodes - 1),"',gsub("THR_","",reg[i],perl=TRUE),'"));',sep=""))
}else if(grepl("MOD_",reg[i],perl=TRUE)){
reg[i]<-gsub("MOD_","",reg[i],perl=TRUE)
if(gregexpr("\\[", reg[i])!=-1){
if(suppressWarnings(is.na(as.numeric(gsub("\\[|\\]", "", regmatches(reg[i], gregexpr("\\[.*?\\]", reg[i]))[[1]]))))){
modulator_duration<-c(modulator_duration,3)
upreg_dur_f<-c(upreg_dur_f,paste(",",gsub("\\[|\\]", "", regmatches(reg[i], gregexpr("\\[.*?\\]", reg[i]))[[1]])))
upreg_dur<-c(upreg_dur,paste(",const int& ",gsub("\\[|\\]", "", regmatches(reg[i], gregexpr("\\[.*?\\]", reg[i]))[[1]]),sep=""))
R<-paste("\n\tlastn_f(",gsub("\\[.*?]","",reg[i]),",pattern,update,time,ts,asynchronous,",gsub("\\[|\\]", "", regmatches(reg[i], gregexpr("\\[.*?\\]", reg[i]))[[1]]),")",sep="")
}else{
modulator_duration<-c(modulator_duration,as.numeric(gsub("\\[|\\]", "", regmatches(reg[i], gregexpr("\\[.*?\\]", reg[i]))[[1]])))
upreg_dur_f<-c(upreg_dur_f,paste(",MOD_",output,sep=""))
upreg_dur<-c(upreg_dur,paste(",const int& MOD_",output,sep=""))
R<-paste("\n\tlastn_f(",gsub("\\[.*?]","",reg[i]),",pattern,update,time,ts,asynchronous,",paste("MOD_",output,sep=""),")",sep="")
}
reg[i]<-gsub("\\[.*?]","",reg[i])
}else{
modulator_duration<-c(modulator_duration,3)
upreg_dur_f<-c(upreg_dur_f,",modulator_dur")
upreg_dur<-c(upreg_dur,",const int& modulator_dur")
R<-paste("\n\tlastn_f(",reg[i],",pattern,update,time,ts,asynchronous,modulator_dur)",sep="")
}
regulators_header<-c(regulators_header,paste("\n\tint ",reg[i],'= std::distance(nodes_names, std::find(nodes_names, nodes_names + (n_nodes - 1),"',reg[i],'"));',sep=""))
}else if(grepl("ANY_",reg[i],perl=TRUE)){
reg[i]<-gsub("ANY_","",reg[i],perl=TRUE)
if(gregexpr("\\[", reg[i])!=-1){
if(suppressWarnings(is.na(as.numeric(gsub("\\[|\\]", "", regmatches(reg[i], gregexpr("\\[.*?\\]", reg[i]))[[1]]))))){
modulator_duration<-c(modulator_duration,3)
upreg_dur_f<-c(upreg_dur_f,paste(",",gsub("\\[|\\]", "", regmatches(reg[i], gregexpr("\\[.*?\\]", reg[i]))[[1]])))
upreg_dur<-c(upreg_dur,paste(",const int& ",gsub("\\[|\\]", "", regmatches(reg[i], gregexpr("\\[.*?\\]", reg[i]))[[1]]),sep=""))
R<-paste("\n\tlastns_f(",gsub("\\[.*?]","",reg[i]),",pattern,update,time,ts,asynchronous,",gsub("\\[|\\]", "", regmatches(reg[i], gregexpr("\\[.*?\\]", reg[i]))[[1]]),")",sep="")
reg[i]<-gsub("\\[.*?]","",reg[i])
}else{
modulator_duration<-c(modulator_duration,as.numeric(gsub("\\[|\\]", "", regmatches(reg[i], gregexpr("\\[.*?\\]", reg[i]))[[1]])))
reg[i]<-gsub("\\[.*?]","",reg[i])
upreg_dur_f<-c(upreg_dur_f,paste(",any",reg[i],"_",output,sep=""))
upreg_dur<-c(upreg_dur,paste(",const int& any",reg[i],"_",output,sep=""))
R<-paste("\n\tlastns_f(",reg[i],",pattern,update,time,ts,asynchronous,",paste("any",reg[i],"_",output,sep=""),")",sep="")
}
}else{
modulator_duration<-c(modulator_duration,3)
upreg_dur_f<-c(upreg_dur_f,paste(",any",reg[i],"_",output,sep=""))
upreg_dur<-c(upreg_dur,paste(",const int& any",reg[i],"_",output,sep=""))
R<-paste("\n\tlastns_f(",reg[i],",pattern,update,time,ts,asynchronous,",paste("any",reg[i],"_",output,sep=""),")",sep="")
}
regulators_header<-c(regulators_header,paste("\n\tint ",reg[i],'= std::distance(nodes_names, std::find(nodes_names, nodes_names + (n_nodes - 1),"',reg[i],'"));',sep=""))
}else if(reg[i]=="1"){ #Si hay un numero es porque es un input node, por lo que lo puedo guardar en Initial_conditions
R<-reg[i]
Initial_conditions<-c(Initial_conditions,output)
}
else{
regulators_header<-c(regulators_header,paste("\n\tint ",reg[i],'= std::distance(nodes_names, std::find(nodes_names, nodes_names + (n_nodes - 1),"',reg[i],'"));',sep=""))
R<-paste("\n\tlast_f(",reg[i],",pattern,update,time,ts,asynchronous)",sep="")
}
if(o<=length(operators)){
R<-paste(R,as.character(operators[o]),sep=" ")
o=o+1
}
regulators<-paste(regulators,R)
}
regulators<-paste("(",regulators,")& Polymorphism_f(P)",sep="")
regulators<-paste(regulators,";\n}")
arg_modulator<-c(arg_modulator,upreg_dur_f)
arguments<-c(arguments,Us_f)
upreg_dur_f<-unique(upreg_dur_f)
upreg_dur<-unique(upreg_dur)
Us_f<-unique(Us_f)
Us<-unique(Us)
row<-paste("\n\tint ",output,'= std::distance(nodes_names, std::find(nodes_names, nodes_names + (n_nodes - 1),"',output,'"));',sep="")
regulators_header<-c(row,regulators_header)
regulators_header<-unique(regulators_header)
regulators_header<-paste(regulators_header,collapse="")
Def_fun<-paste("void ",output,"_f(int pattern[], int update[], const int& time, std::string nodes_names[],
const int&n_nodes, const int& ts",paste(Us,collapse=""),paste(upreg_dur,collapse=""),",const double& P,bool asynchronous=true){",sep="")
fun_header<-c(fun_header,paste(' if(node_i == "',output,'") ',
output,"_f(pattern, update, j,nodes_names,n_nodes,ts",paste(Us_f,collapse=""),paste(upreg_dur_f,collapse=""),paste(",P_",output,sep=""),",asynchronous);",sep=""))
fun_header<-paste(fun_header,collapse="\n\t\t\telse")
pattern<-paste("\n \n\tpattern[",output,"*(ts + 1) + time]=",sep="")
All<-paste(Def_fun,regulators_header,pattern,regulators)
write(All,"Boolean_func_C.cpp",append=TRUE)
}
#write dynamic_evolution.f
if(length(arguments>0)){
arguments<-gsub("[,\\s]","",arguments,perl=TRUE)
}
arg_modulator<-gsub("[,\\s]","",arg_modulator,perl=TRUE)
arguments2<-c(arguments,arg_modulator)
if(length(arguments2)>0) arguments2<-paste(",",paste("const int&",arguments2))
arguments2<-unique(arguments2)
arguments2<-paste(arguments2,collapse="")
U_duration<-U_duration[!duplicated(arguments)]
arguments<-unique(arguments)
modulator_duration<-modulator_duration[!duplicated(arg_modulator)]
arg_modulator<-unique(arg_modulator)
modulator<-suppressWarnings(setNames(as.numeric(modulator_duration),arg_modulator))
write.dynamic_evolution_cpp(arguments2,fun_header,node.names)
write.dynamic_evolution_c(arguments,arg_modulator)
source("dynamic_evolution.R")
return(list("nodes.names"=node.names,"Initial_conditions"=Initial_conditions,"Modulator"=modulator,
"Arguments"=suppressWarnings(setNames(as.numeric(U_duration),arguments)),"Polymorphism"=setNames(rep(1,length(node.names)),node.names)))
}
# source("R/read_Boolean_functions.R")
# source("R/read_Boolean_functions_c.R")
# source("R/add_header.R")
# source("R/write_dynamic_evolution_cpp.R")
# source("R/write_dynamic_evolution_c.R")
#
# library(Rcpp)
# file<-"Example_networks/example_network.txt"
# BN<-read.Boolean.functions(file)
# pattern=dynamic_evolution.f(BN,time.steps = 30)
|
b3884fae99b8970e6a517018ae8fbffe4f5a1845
|
b1f207db195eb035198ef612602c0adde867bb06
|
/shizhan002.R
|
1511ab84d1d892130f7990583f193eb9a6cec7f9
|
[] |
no_license
|
kmustzjq/StudyR
|
4782c68883b9f86e2b15928914a279e3441054f8
|
2785ab410691889d030cff21896f4fe1b0ff12ab
|
refs/heads/master
| 2021-08-27T20:48:20.986278
| 2021-08-09T00:14:06
| 2021-08-09T00:14:06
| 164,197,516
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 670
|
r
|
shizhan002.R
|
vars<-c("mpg","hp","wt")
head(mtcars[vars])
summary(mtcars)
var(mtcars)
sapply(mtcars, var)
sapply(mtcars, sd)
library(Hmisc)
describe(mtcars)
mtcars$am
library(vcd)
attach(Arthritis)
mytable<-xtabs(~Treatment+Improved,data=Arthritis)
chisq.test(mytable)
library(randomForest)
set.seed(4543)
data(mtcars)
mtcars.rf <- randomForest(mpg ~ ., data=mtcars, ntree=1000,
keep.forest=FALSE, importance=TRUE)
summary(mtcars.rf)
importance(mtcars.rf)
importance(mtcars.rf, type=1)
states<-state.x77[,1:6]
cov(states)
cor(states,method = "spearman")
library(psych)
corr.test(states,use = "complete")
|
c0aa34aa3bf75053af4701fbe599b94acae8a084
|
ddc8c8f96c348abeccba5fc31ed01844d093b33b
|
/Rummy/Rank_order_analysis.R
|
642bd175202a8ff1ee6f30ad8eba243492d9dced
|
[
"MIT"
] |
permissive
|
hypdoctor/Lawson2020
|
b47c4fa8c67ccfe5f2ccf7d55cc12ee464943fa2
|
82662ff8183307ec09439dc001834537ec00bda3
|
refs/heads/master
| 2023-03-16T04:18:55.757725
| 2020-09-23T16:45:18
| 2020-09-23T16:45:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,363
|
r
|
Rank_order_analysis.R
|
setwd("C:/Users/rumi/Desktop/")
## reading in coreCTLs
#Cores_daisy <- read.table("Screen_recent/Screens/drugZ_V1/Output/core_drugZV1.txt", header = T, stringsAsFactors = F)
## selecting mid timepoint data for all screen, except OVA_QR
col_index=which(!grepl("OVA_QR",colnames(gene_data_table.combined)) &
!grepl("late",colnames(gene_data_table.combined)) &
!grepl("early",colnames(gene_data_table.combined)))
colnames(gene_data_table.combined)[col_index]
gene_data_table.combined_mid=gene_data_table.combined[,col_index]
rownames(gene_data_table.combined_mid)=gene_data_table.combined_mid$GENE
nrow(gene_data_table.combined_mid)
nrow(gene_data_table.combined)
## extracting coreCTL genes
library(dplyr)
cores_daisy_table.combined_mid <- inner_join(Cores_daisy, gene_data_table.combined_mid, by = "GENE")
nrow(cores_daisy_table.combined_mid)
##=================================
## calculate rank-product statistics for cores_daisy_table.combined_mid
##=================================
colnames(cores_daisy_table.combined_mid)
cores_daisy_table.combined_mid.normZ=cores_daisy_table.combined_mid[,grep("normZ",colnames(cores_daisy_table.combined_mid))]
cores_daisy_table.combined_mid.rank_synth=cores_daisy_table.combined_mid[,grep("rank_synth",colnames(cores_daisy_table.combined_mid))]
cores_daisy_table.combined_mid.rank_supp=cores_daisy_table.combined_mid[,grep("rank_supp",colnames(cores_daisy_table.combined_mid))]
cores_daisy_table.combined_mid.fdr_synth=cores_daisy_table.combined_mid[,grep("fdr_synth",colnames(cores_daisy_table.combined_mid))]
cores_daisy_table.combined_mid.fdr_supp=cores_daisy_table.combined_mid[,grep("fdr_supp",colnames(cores_daisy_table.combined_mid))]
calculate_rank_product_stat=function(rank_matrix){
rank_matrix.scaled=apply(rank_matrix,2,rank)
rank_matrix.dim=dim(rank_matrix)
rank_product=apply(rank_matrix.scaled,1,function(x) prod(x)^(1/rank_matrix.dim[2]))
rank.random=lapply(1:(rank_matrix.dim[2]*1000),function(x) {
set.seed(x)
sample(1:rank_matrix.dim[1])
})
rank.random.matrix=do.call(cbind,rank.random)
d=1:(rank_matrix.dim[2]*1000)
shuffle_index=split(d, ceiling(seq_along(d)/ncol(rank_matrix)))
rank.random.matrix=lapply(shuffle_index,function(x) rank.random.matrix[,x])
rank.random.matrix.rp=lapply(rank.random.matrix,function(x) apply(x,1,function(y) prod(y)^(1/rank_matrix.dim[2])))
rank.random.matrix.rp=do.call(cbind,rank.random.matrix.rp)
rank_product.matrix=matrix(rep(rank_product,1000),ncol=1000,byrow = F)
ERP=apply(rank.random.matrix.rp-rank_product.matrix<0,1,sum)/1000
PFP=ERP/rank(rank_product)
list(rank_product=rank_product,EPR=ERP,PFP=PFP)
}
## calculate rank statistics separately for synth and supp
rank_synth_stat=calculate_rank_product_stat(cores_daisy_table.combined_mid.rank_synth)
rank_supp_stat=calculate_rank_product_stat(cores_daisy_table.combined_mid.rank_supp)
cores_daisy_table.combined_mid.summary_table=data.frame(GENE=cores_daisy_table.combined_mid$GENE,
normZ.mean=round(apply(cores_daisy_table.combined_mid.normZ,1,function(x) mean(x,na.rm = T)),2),
normZ.min=apply(cores_daisy_table.combined_mid.normZ,1,function(x) min(x,na.rm = T)),
normZ.max=apply(cores_daisy_table.combined_mid.normZ,1,function(x) max(x,na.rm = T)),
normZ.sd=round(apply(cores_daisy_table.combined_mid.normZ,1,function(x) sd(x,na.rm = T)),2),
rank_synth.rank_product=rank(rank_synth_stat[[1]]),
rank_synth.pvalue=rank_synth_stat[[2]],
rank_supp.rank_product=rank(rank_supp_stat[[1]]),
rank_supp.pvalue=rank_supp_stat[[2]] ,
synth_sign_exps=apply(cores_daisy_table.combined_mid.fdr_synth<=0.05,1,function(x) sum(x,na.rm=T)),
supp_sign_exps=apply(cores_daisy_table.combined_mid.fdr_supp<=0.05,1,function(x) sum(x,na.rm=T)),
cores_daisy_table.combined_mid.normZ,check.names = F,stringsAsFactors = F)
###pre-processing for Plot: log(Rank_pVal) vs Rank_Product
synth <- cores_daisy_table.combined_mid.summary_table[, c("GENE","normZ.mean","rank_synth.rank_product","rank_synth.pvalue")]
colnames(synth)[3:4] <- c("Rank_Product", "Rank_pValue")
synth$LogPVal <- -log10(synth$Rank_pValue +0.001)
supp <- cores_daisy_table.combined_mid.summary_table[, c("GENE","normZ.mean","rank_supp.rank_product","rank_supp.pvalue")]
colnames(supp)[3:4] <- c("Rank_Product", "Rank_pValue")
supp$Rank_Product <- rank(-supp$Rank_Product)
supp$LogPVal <- -log10(supp$Rank_pValue +0.001)
rank_combined <- rbind(synth, supp)
##shortening off y-axis
rank_comb_yaxisCut <- subset(rank_combined, rank_combined$LogPVal > 0.5)
##significant genes
pval_sig <- subset(rank_comb_yaxisCut, rank_comb_yaxisCut$Rank_pValue < 0.05)
library(ggplot2)
library(ggrepel)
(b <- ggplot(rank_comb_yaxisCut, aes(Rank_Product, LogPVal)) + geom_point(col = "grey")+
geom_hline(yintercept=-log10(0.05 +0.001), linetype= 4, color = "red", size=0.5)+
geom_point(data = pval_sig, aes(Rank_Product, LogPVal, size = LogPVal), pch = 21, col = "black",
fill = ifelse(pval_sig$Rank_Product > 100, "yellow", "blue"))+
geom_text_repel(data = pval_sig, aes(x = Rank_Product, y = LogPVal), label = pval_sig$GENE)+
ylab("-log10(Rank_pVal)")+
theme_bw() +
theme(panel.grid = element_blank(),
axis.line = element_line(colour = "Black"),
panel.background = element_rect(fill = "transparent", colour = NA),
plot.background = element_rect(fill = "transparent", colour = NA)) +
annotate(geom="text", x=100, y=-log10(0.04 +0.001), label="p.Value = 0.05",
color="black"))
ggsave("Rank_overlap_2_Core_rank_aggregate.pdf", plot = last_plot(),
#width = 15, height = 7,
width = 7, height = 7,
bg = "transparent", dpi = 300, useDingbats=FALSE)
|
a2df71979ed2f7c8102328b0afd429d9f85c9719
|
d9d66c4db287172c6bf40526501d8ce4ab36645a
|
/R/MacroPCApredict.R
|
66b21d55d334c697ceb333e36d85d540a4b80ec2
|
[] |
no_license
|
cran/cellWise
|
fea14ee5b9e6e5f5b086e7528051818fb7c6c79f
|
d08e57e682651001645c94d429778839daea79b0
|
refs/heads/master
| 2023-04-30T13:02:06.532853
| 2023-04-20T21:22:30
| 2023-04-20T21:22:30
| 75,787,435
| 1
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,371
|
r
|
MacroPCApredict.R
|
MacroPCApredict <- function(Xnew, InitialMacroPCA, MacroPCApars = NULL)
{ # Added a check whether the number of columns of Xnew
# matches that of InitialMacroPCA, and cleaned up code.
if (is.null(MacroPCApars)) {
MacroPCApars = InitialMacroPCA$MacroPCApars
}
else {
if (!is.list(MacroPCApars)) {
stop("MacroPCApars must be a list")
}
InitialMacroPCA$MacroPCApars[names(MacroPCApars)] <- MacroPCApars
MacroPCApars <- InitialMacroPCA$MacroPCApars
}
if (!"distprob" %in% names(MacroPCApars)) {
MacroPCApars$distprob <- 0.99
}
if (!"maxiter" %in% names(MacroPCApars)) {
MacroPCApars$maxiter <- 20
}
if (!"tol" %in% names(MacroPCApars)) {
MacroPCApars$tol <- 0.005
}
if (!"bigOutput" %in% names(MacroPCApars)) {
MacroPCApars$bigOutput <- TRUE
}
Xnew <- as.matrix(Xnew)
n <- nrow(Xnew)
d <- ncol(Xnew)
dold = ncol(InitialMacroPCA$remX)
if(d != dold) stop(paste0(
"Xnew should have ",dold," columns, that correspond\n",
" to those of InitialMacroPCA$remX"))
InitialDDC <- InitialMacroPCA$DDC
DDCpars <- MacroPCApars$DDCpars
scaleX <- InitialMacroPCA$scaleX
distprob <- MacroPCApars$distprob
maxiter <- MacroPCApars$maxiter
tol <- MacroPCApars$tol
bigOutput <- MacroPCApars$bigOutput
resultDDC <- DDCpredict(Xnew, InitialDDC, DDCpars)
DDCpars <- resultDDC$DDCpars
DDCimp <- sweep(resultDDC$Ximp, 2, scaleX, "/")
Xnew <- sweep(Xnew, 2, scaleX, "/")
XO <- Xnew; rm(Xnew)
Xfi <- XO
indcells <- resultDDC$indcells
indNA <- which(is.na(XO))
indimp <- unique(c(indcells, indNA))
Xfi[indimp] <- DDCimp[indimp] # impute flagged cells and NA's
Xind <- Xfi
Xind[indimp] <- NA
Xind <- is.na(Xind) # matrix with T in those cells
k <- InitialMacroPCA$k
loadings <- InitialMacroPCA$loadings
center <- InitialMacroPCA$center/scaleX
eigenvalues <- InitialMacroPCA$eigenvalues
if (any(Xind) & maxiter > 0) { # iterate, but keep center.
diff <- 2 * tol
It <- 0
while (It < maxiter & diff > tol) {
It <- It + 1
Xfimis <- Xfi[Xind]
XfiC <- sweep(Xfi, 2, center) # centering
Tr <- XfiC %*% loadings # scores
Xfihat <- Tr %*% t(loadings) # centered predictions
Xfihat <- sweep(Xfihat, 2, center, "+") # uncentering
Xfi[Xind] <- Xfihat[Xind]
diff <- mean((Xfi[Xind] - Xfimis)^2)
}
}
else {
diff <- 0
It <- 0
}
rownames(loadings) <- colnames(XO)
colnames(loadings) <- paste0("PC", seq_len(ncol(loadings)))
res <- list(MacroPCApars = MacroPCApars, DDC = resultDDC,
scaleX = scaleX, k = k, loadings = loadings,
eigenvalues = eigenvalues, center = center,
It = It, diff = diff)
Xnai <- XO
Xnai[indNA] <- Xfi[indNA]
XnaiC <- sweep(Xnai, 2, center)
scoresnai <- XnaiC %*% loadings
res$scores <- scoresnai
NAimp <- list(scoresnai = scoresnai)
cutoffOD <- InitialMacroPCA$cutoffOD
out <- pca.distancesNew(res, Xnai, scoresnai, ncol(Xnai),
distprob, cutOD = cutoffOD)
res$OD <- out$OD
out$cutoffOD <- cutoffOD
res$cutoffOD <- out$cutoffOD
res$SD <- out$SD
out$cutoffSD <- InitialMacroPCA$cutoffSD
res$cutoffSD <- out$cutoffSD
out$indrowsnai <- which(out$OD > out$cutoffOD)
res$indrows <- out$indrowsnai
NAimp <- c(NAimp, out)
rm(out)
XOC <- sweep(XO, 2, center)
stdResid <- XOC - (scoresnai %*% t(loadings))
res$residScale <- InitialMacroPCA$residScale
res$stdResid <- sweep(stdResid, 2, res$residScale, "/")
cellcutoff <- sqrt(qchisq(DDCpars$tolProb, 1))
res$indcells <- which(abs(res$stdResid) > cellcutoff)
res$X.NAimp <- sweep(Xnai, 2, scaleX, "*")
res$center <- center * scaleX
names(res$scaleX) <- colnames(Xnai)
names(res$center) <- colnames(Xnai)
names(res$residScale) <- colnames(Xnai)
#
if (bigOutput) {
stdResidnai <- XnaiC - (scoresnai %*% t(loadings))
NAimp$residScalenai <- InitialMacroPCA$residScale
NAimp$stdResidnai <- sweep(stdResidnai, 2, NAimp$residScalenai,
"/")
NAimp$indcellsnai <- which(abs(NAimp$stdResidnai) > cellcutoff)
names(NAimp$residScalenai) <- colnames(Xnai)
res$NAimp <- NAimp
#
Xci <- Xfi
Xci[res$indrows] <- Xnai[res$indrows]
XciC <- sweep(Xci, 2, center)
scoresci <- XciC %*% loadings
Cellimp <- list(scoresci = scoresci)
out <- pca.distancesNew(res, Xci, scoresci, ncol(Xci),
distprob, cutOD = cutoffOD)
out$cutoffOD <- cutoffOD
out$cutoffSD <- InitialMacroPCA$cutoffSD
out$indrowsci <- which(out$OD > out$cutoffOD)
Cellimp <- c(Cellimp, out)
rm(out)
stdResidci <- XciC - (scoresci %*% t(loadings))
Cellimp$residScaleci <- InitialMacroPCA$Cellimp$residScaleci
Cellimp$stdResidci <- sweep(stdResidci, 2, Cellimp$residScaleci,
"/")
Cellimp$indcellsci <- which(abs(Cellimp$stdResidci) > cellcutoff)
Cellimp$Xci <- sweep(Xci, 2, scaleX, "*")
names(Cellimp$residScaleci) <- colnames(Xnai)
res$Cellimp <- Cellimp
#
XfiC <- sweep(Xfi, 2, center)
scoresfi <- XfiC %*% loadings
Fullimp <- list(scoresfi = scoresfi)
out <- pca.distancesNew(res, Xfi, scoresfi, ncol(Xfi),
distprob, cutOD = cutoffOD)
out$cutoffOD <- cutoffOD
out$cutoffSD <- InitialMacroPCA$cutoffSD
out$indrowsfi <- which(out$OD > out$cutoffOD)
Fullimp <- c(Fullimp, out)
rm(out)
stdResidfi <- XfiC - (scoresfi %*% t(loadings))
Fullimp$residScalefi <- InitialMacroPCA$Fullimp$residScalefi
Fullimp$stdResidfi <- sweep(stdResidfi, 2, Fullimp$residScalefi,
"/")
Fullimp$indcellsfi <- which(abs(Fullimp$stdResidfi) > cellcutoff)
Fullimp$Xfi <- sweep(Xfi, 2, scaleX, "*")
names(Fullimp$residScalefi) <- colnames(Xnai)
res$Fullimp <- Fullimp
}
return(res)
}
|
50924afc48a2a313dcf659f44938e7104e9768cb
|
3f680c621d68cd817097e1a83915ceaead162e12
|
/man/combineKeepRF.Rd
|
0ce48939e28bf17dff31093b8ca0e12acf6dd656
|
[] |
no_license
|
rohan-shah/mpMap2
|
46273875750e7a564a17156f34439a4d93260d6c
|
c43bb51b348bdf6937e1b11298b9cdfe7a85e001
|
refs/heads/master
| 2021-05-23T20:34:59.327670
| 2020-07-19T10:24:09
| 2020-07-19T10:24:09
| 32,772,885
| 10
| 6
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,680
|
rd
|
combineKeepRF.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/combineKeepRF.R
\name{combineKeepRF}
\alias{combineKeepRF}
\title{Combine mpcross objects, keeping recombination fraction data}
\usage{
combineKeepRF(
object1,
object2,
verbose = TRUE,
gbLimit = -1,
callEstimateRF = TRUE,
skipValidity = FALSE
)
}
\arguments{
\item{object1}{An object of class \code{mpcrossRF}}
\item{object2}{Another object of class \code{mpcrossRF}}
\item{verbose}{Passed straight through to estimateRF}
\item{gbLimit}{Passed straight through to estimateRF}
\item{callEstimateRF}{Should \code{estimateRF} be called, to compute any missing estimates?}
\item{skipValidity}{Should we skip the validity check for object construction, in this function? Running the validity checks can be expensive, and in theory internal package code is trusted to generate valid objects.}
}
\value{
A new object of class \code{mpcrossRF} containing the combined information of the two input objects.
}
\description{
Combine mpcross objects, keeping recombination fraction data
}
\details{
This function takes two objects containing disjoint sets of markers, each containing estimated recombination fractions for their individual sets of markers. A new object is returned that contains the combined set of markers, and also contains recombination fraction data.
This function is more efficient than other ways of achieving this, as it keeps the recombination fraction data contained in the original objects. If \code{callEstimateRF} is \code{TRUE}, it also computes the missing recombination fraction estimates between markers in different objects, using a call to \code{estimateRF}.
}
|
4cffcf60bfa4b9454939efb4bf9a819560c40d5a
|
e3c3dbb97047f287cbeab9880539eaa5dfb988d7
|
/SE/Network Construction - WGCNA.R
|
7c3e45ae9adb240115261c61e34723da05cfba18
|
[] |
no_license
|
desilvakithmee/Research_GRN
|
3c07950a4e5edcda9f993df821ba56374f4de886
|
3e2c036219a2a6799e532bbd22c12b7d4acea46b
|
refs/heads/master
| 2022-04-06T11:49:30.706229
| 2020-02-28T02:02:38
| 2020-02-28T02:02:38
| 213,803,973
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 19,363
|
r
|
Network Construction - WGCNA.R
|
### ----------------------------- STEP I --------------------------------------------
##----------------------- Data Input and Preprocessing ------------------------------
setwd("D:/UNI/4TH YEAR/RESEARCH/Codes/Arabidopsis")
library(WGCNA)
options(stringsAsFactors = FALSE)
#Reading data
exp = read.csv('data_processed.csv',header = T)
#exp = read.csv('data_processed_avg.csv', header = T) #using the averages is too noisy
#Preprocessing
header = as.character(exp[,1])
data = exp[,-1]
data = t(data)
colnames(data) = header
data = as.data.frame(data)
rm(header,exp)
#QC - checking for missing values
gsg = goodSamplesGenes(data, verbose = 2);
gsg$allOK
#Cluster samples
sampleTree = hclust(dist(data), method = "average")
# Plot the sample tree as a dendrogram
library(grDevices)
pdf(file = "Plots/1-sampleClustering.pdf", width = 12, height = 9)
par(cex = 0.6)
par(mar = c(0,4,2,0))
plot(sampleTree, main = "Sample clustering to detect outliers in Zygotic Embryogensis",
sub="", xlab="", cex.lab = 1.5, cex.axis = 1.5, cex.main = 2)
dev.off()
#No outliers, therefore no need of removing samples
collectGarbage()
#saving data
save(data, file = "dataInput.RData")
### --------------------------------- STEP II ---------------------------------------
##-------------------- Network Construction & Module Detection -----------------------
rm(list = ls(all.names = TRUE))
setwd("D:/UNI/4TH YEAR/RESEARCH/Codes/Arabidopsis")
library(WGCNA)
options(stringsAsFactors = FALSE)
enableWGCNAThreads()
library(grDevices)
# Load the data saved in the first part
load(file = "dataInput.RData")
# Choose a set of soft-thresholding powers
powers = c(c(1:10), seq(from = 12, to=30, by=5))
# Call the network topology analysis function
sft = pickSoftThreshold(data, powerVector = powers, verbose = 5)
# Plot the results:
pdf(file = "Plots/2-thresholding.pdf", width = 12, height = 9)
par(mfrow = c(1,2))
cex1 = 0.9
# Scale-free topology fit index as a function of the soft-thresholding power
plot(sft$fitIndices[,1], -sign(sft$fitIndices[,3])*sft$fitIndices[,2],
xlab="Soft Threshold (power)",ylab="Scale Free Topology Model Fit,signed R^2",
type="n", main = paste("Scale independence"))
text(sft$fitIndices[,1], -sign(sft$fitIndices[,3])*sft$fitIndices[,2],
labels=powers,cex=cex1,col="red")
# this line corresponds to using an R^2 cut-off of h
abline(h=0.90,col="red")
# Mean connectivity as a function of the soft-thresholding power
plot(sft$fitIndices[,1], sft$fitIndices[,5],
xlab="Soft Threshold (power)",ylab="Mean Connectivity", type="n",
main = paste("Mean connectivity"))
text(sft$fitIndices[,1], sft$fitIndices[,5], labels=powers, cex=cex1,col="red")
dev.off()
##We choose the power 17, which is the lowest power for which the scale-free topology
#fit index curve flattens out upon reaching a high value (in this case, roughly 0.90)
## One-step network construction and module detection
net = blockwiseModules(data, power = 17, corType = "pearson", networkType = "signed",
TOMType = "signed", minModuleSize = 30,
reassignThreshold = 0, mergeCutHeight = 0.25,
numericLabels = TRUE, pamRespectsDendro = FALSE,
saveTOMs = TRUE,
saveTOMFileBase = "TOM",
verbose = 3)
#SIGNED NETWORK is selected as it is more accurate.
#https://peterlangfelder.com/2018/11/25/signed-or-unsigned-which-network-type-is-preferable/
#number of modules and module sizes
table(net$colors)
#plotting modules
# Convert labels to colors for plotting
mergedColors = labels2colors(net$colors)
# Plot the dendrogram and the module colors underneath
pdf(file = "Plots/3-Modules.pdf", width = 12, height = 9)
plotDendroAndColors(net$dendrograms[[1]], mergedColors[net$blockGenes[[1]]],
"Module colors",
dendroLabels = FALSE, hang = 0.03,
addGuide = TRUE, guideHang = 0.05)
dev.off()
#saving the environment
moduleLabels = net$colors
moduleColors = labels2colors(net$colors)
MEs0 = moduleEigengenes(data, moduleColors)$eigengenes
MEs = removeGreyME(MEs0, greyMEName = paste(moduleColor.getMEprefix(), "grey", sep=""))
geneTree = net$dendrograms[[1]]
save(MEs, moduleLabels, moduleColors, geneTree,
file = "networkConstruction.RData")
### --------------------------------- STEP III -----------------=======---------------
#------------------------------ Module-Trait Relationship ----------------------------
rm(list = ls(all.names = TRUE))
setwd("D:/UNI/4TH YEAR/RESEARCH/Codes/Arabidopsis")
library(WGCNA)
options(stringsAsFactors = FALSE)
library(grDevices)
#load data
load(file = "dataInput.RData")
load(file = 'dataInput_avg.RData')
# Load network data
load(file = "networkConstruction.RData")
# Define numbers of genes and samples
nGenes = ncol(data)
nSamples = nrow(data)
# Recalculate MEs with color
MEs0 = moduleEigengenes(data, moduleColors)$eigengenes
MEs0 = removeGreyME(MEs0,greyMEName = "MEgrey")
MEs = orderMEs(MEs0)
ME_names = colnames(MEs)
Zygotic = colMeans(MEs[1:2,])
Day7 = colMeans(MEs[3:4,])
Day14 = colMeans(MEs[5:6,])
Somatic = colMeans(MEs[7:8,])
MEs = data.frame(rbind(Zygotic,Day7,Day14,Somatic))
colnames(MEs) = ME_names
rm(Zygotic,Day14,Day7,Somatic)
head(MEs)
#trait data
trait = matrix(c(1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1),byrow = T,nrow = 4,ncol = 4)
colnames(trait) = row.names(MEs)
rownames(trait) = row.names(MEs)
trait = as.data.frame(trait)
trait
moduleTraitCor = cor(MEs,trait, use = "complete.obs")
moduleTraitPvalue = corPvalueStudent(moduleTraitCor, nSamples)
pdf(file = "Plots/6-Module-trait relationship.pdf", width = 12, height = 9,
pagecentre = T, paper = "a4r")
# Will display correlations and their p-values
textMatrix = paste(signif(moduleTraitCor, 2), "\n(",
signif(moduleTraitPvalue, 1), ")", sep = "");
dim(textMatrix) = dim(moduleTraitCor)
par(mar = c(4,8,2,2))
# Display the correlation values within a heatmap plot
labeledHeatmap(Matrix = moduleTraitCor,
xLabels = c("Stage I","Stage II","Stage III","Stage IV"),
yLabels = names(MEs),
ySymbols = names(MEs),
colorLabels = FALSE,
xLabelsAngle = 45,
colors = blueWhiteRed(50),
textMatrix = textMatrix,
setStdMargins = FALSE,
cex.text = 0.8,
zlim = c(-1,1),
plotLegend = TRUE,
main = paste("Module-trait relationships"))
dev.off()
# Define variable weight containing the weight column of datTrait
weight = as.data.frame(trait$Somatic)
names(weight) = "weight"
# names (colors) of the modules
modNames = substring(names(MEs), 3)
geneModuleMembership = as.data.frame(cor(data, MEs0, use = "p"))
MMPvalue = as.data.frame(corPvalueStudent(as.matrix(geneModuleMembership), nSamples))
names(geneModuleMembership) = paste("MM", modNames, sep="")
names(MMPvalue) = paste("p.MM", modNames, sep="")
geneTraitSignificance = as.data.frame(cor(dt, weight, use = "p"));
GSPvalue = as.data.frame(corPvalueStudent(as.matrix(geneTraitSignificance), nSamples))
names(geneTraitSignificance) = paste("GS.", names(weight), sep="")
names(GSPvalue) = paste("p.GS.", names(weight), sep="")
head(GSPvalue)
cor = abs(moduleTraitCor) >= 0.5
p = moduleTraitPvalue < 0.05
xx = cor & p
write.csv(xx, 'Module-Trait.csv', quote = F)
#annot = read.csv('annotation.csv')
annot = annot[,1:6]
clr = sort(unique(moduleColors))
clr = clr[clr != "grey"]
hubs = character()
for (i in 1:length(clr)) {
dt = geneModuleMembership[,i]
kk = order(dt, decreasing = T)
nm = rownames(geneModuleMembership)[kk]
top10 = nm[1:10]
hubs = cbind(hubs,top10)
#mt = match(top10,annot$Probe.Set.ID)
#kg = annot[mt,]
#file = paste(clr[i],' hub genes annot.csv', sep = "")
#write.csv(kg,file,row.names = F,quote = F)
}
hubs = as.data.frame(hubs)
colnames(hubs) = clr
write.csv(hubs,'Top 10 hub genes.csv', row.names = F, quote = F)
setwd("D:/UNI/4TH YEAR/RESEARCH/Codes/Arabidopsis/Export")
options(stringsAsFactors = FALSE)
hubs = read.csv('Top 10 hub genes.csv')
gene = read.csv("gene_description.csv", header = T)
clr = colnames(hubs)
sum = data.frame()
for (i in 1:length(clr)) {
gg = hubs[,i]
key = match(gg, gene$ï..Column1)
des = gene[key,]
des[,5] = clr[i]
sum = rbind.data.frame(sum,des)
}
write.csv(sum,'10 hub genes.csv', row.names = F, quote = F)
write.table(sum,'10 hub genes.txt', row.names = F, quote = F, sep = "\t")
#================== Plots =======================================================
#Module-trait siginificant
rm(list = ls(all.names = TRUE))
setwd("D:/UNI/4TH YEAR/RESEARCH/Codes/Arabidopsis")
library(gplots)
mt = read.csv('Module-trait.csv',header = T)
rownames(mt) = mt$X
mt[,1] = NULL
mt = as.matrix(mt)
dt = as.integer(mt)
dt = matrix(dt, nrow = 14,ncol = 4)
colnames(dt) = colnames(mt)
rownames(dt) = rownames(mt)
#dt = t(dt)
dt[c(10,14),1] = -1
dt[c(6,7),4] = -1
par(mar=c(7,4,4,2))
heatmap.2(dt, col = cm.colors(3), trace = "none", Colv = F, colsep=0:ncol(dt),
rowsep=0:nrow(dt), sepcolor = c("lightgrey"),margins=c(8,8),
dendrogram = "none", Rowv = F)
dev.off()
#================== Plots =======================================================
abs(moduleTraitCor) > 0.6
moduleTraitPvalue < 0.05
setwd("D:/UNI/4TH YEAR/RESEARCH/Codes/Arabidopsis/Plots/figures")
clr1 = c('green','magenta','yellow','brown','cyan','purple','red','salmon',
'greenyellow','pink','turquoise')
clr2 = c('brown','cyan','tan','midnightblue')
clr3 = c('yellow','black','tan','greenyellow')
clr4 = c('blue','green','purple','red')
for (module in clr4){
file = paste("Stage IV - ",module,".jpg", sep = "")
column = match(module, modNames);
moduleGenes = moduleColors==module;
jpeg(file, width = 900, height = 600 )
par(mfrow = c(1,1));
verboseScatterplot(abs(geneModuleMembership[moduleGenes, column]),
abs(geneTraitSignificance[moduleGenes, 1]),
xlab = paste("Module Membership in", module, "module"),
ylab = "Gene significance for Stage 4",
main = paste("Module membership vs. gene significance\n"),
cex.main = 1.2, cex.lab = 1.2, cex.axis = 1.2, col = module)
dev.off()
}
library(ggplot2)
library(gridExtra)
module = rownames(moduleTraitCor)
mod = data.frame(module,moduleTraitCor)
rownames(mod) = NULL
clr = c('grey23','royalblue2','tan3','cyan2','limegreen','greenyellow','deeppink3',
'hotpink1','mediumpurple2','red3','salmon','tan','turquoise3','gold')
q1 = ggplot(data = mod,aes(x = module, y = Zygotic, fill = module))+
geom_col()+
scale_fill_manual(values = clr, guide = F) +
scale_x_discrete(labels = NULL) +
#theme(legend.position = "bottom", plot.title = element_text(hjust = 0.5) ) +
labs(title = "Module Expression in Stage I") +
ylab("Module-trait relationship") +
xlab("Modules")
q2 = ggplot(data = mod,aes(x = module, y = Day7, fill = module))+
geom_col()+
scale_fill_manual(values = clr, guide = F) +
scale_x_discrete(labels = NULL) +
labs(title = "Module Expression in Stage II") +
ylab("Module-trait relationship") +
xlab("Modules")
q3 = ggplot(data = mod,aes(x = module, y = Day14, fill = module))+
geom_col()+
scale_fill_manual(values = clr, guide = F) +
scale_x_discrete(labels = NULL) +
labs(title = "Module Expression in Stage III") +
ylab("Module-trait relationship") +
xlab("Modules")
q4 = ggplot(data = mod,aes(x = module, y = Somatic, fill = module))+
geom_col()+
scale_fill_manual(values = clr, guide = F) +
scale_x_discrete(labels = NULL) +
labs(title = "Module Expression in Stage IV") +
ylab("Module-trait relationship") +
xlab("Modules")
grid.arrange(q1, q2,q3,q4, nrow = 2)
### ------------------------------- STEP IV -----------------------------------------
##----------------------- Network visualization -------------------------------------
rm(list = ls(all.names = TRUE))
setwd("D:/UNI/4TH YEAR/RESEARCH/Codes/Arabidopsis")
library(WGCNA)
options(stringsAsFactors = FALSE)
enableWGCNAThreads()
library(grDevices)
#load data
load(file = "dataInput.RData")
# Load network data saved in the second part.
load(file = "networkConstruction.RData")
#Number of genes and samples
nGenes = ncol(data)
nSamples = nrow(data)
#Visualization using Heatmaps
# Calculate topological overlap
#TOM = TOMsimilarityFromExpr(data, power = 17)
#save(TOM,file = 'TOM_signed.RData')
load('TOM_signed.RData')
dissTOM = 1-TOM
# For reproducibility, we set the random seed
set.seed(10)
nSize = 1000
select = sample(nGenes, size = nSize)
selectTOM = dissTOM[select, select]
selectTree = hclust(as.dist(selectTOM), method = "average")
selectColors = moduleColors[select]
plotDiss = selectTOM^7 #makes the plot more informative
diag(plotDiss) = NA #improves the clarity of the plot
pdf(file = "Plots/4-Heatmap.pdf", width = 12, height = 9)
TOMplot(plotDiss, selectTree, selectColors,
main = "Network heatmap plot, selected genes")
dev.off()
#Visualizing the network of eigengenes
MEs = moduleEigengenes(data, moduleColors)$eigengenes
MEs = removeGreyME(MEs,greyMEName = "MEgrey")
# Plot the relationships among the eigengenes and the trait
pdf(file = "Plots/5-Network_Eigengenes.pdf", width = 12, height = 9)
par(cex = 0.9)
plotEigengeneNetworks(MEs, "", marDendro = c(0,4,1,2), marHeatmap = c(3,4,1,2),
cex.lab = 0.8,
xLabelsAngle = 90)
dev.off()
### ------------------------------- STEP V -------------------------------------------
##------------------------ Exporting the Network -------------------------------------
rm(list = ls(all.names = TRUE))
setwd("D:/UNI/4TH YEAR/RESEARCH/Codes/Arabidopsis")
library(WGCNA)
options(stringsAsFactors = FALSE)
enableWGCNAThreads()
#load data
load(file = "dataInput.RData")
load(file = "networkConstruction.RData")
load('TOM_signed.RData')
#Hub genes
hub = chooseTopHubInEachModule(data, moduleColors, omitColors = "grey",
power = 17, type = "signed")
write.table(hub,'Hub genes.txt',quote = F, col.names = F)
#EXPORTING TO CYTOSCAPE
setwd("D:/UNI/4TH YEAR/RESEARCH/Codes/Arabidopsis/Export")
clr = unique(moduleColors)
clr = sort(clr)
probes = names(data)
# Select modules
for (modules in clr) {
# Select module probes
inModule = is.finite(match(moduleColors, modules));
modProbes = probes[inModule];
# Select the corresponding Topological Overlap
modTOM = TOM[inModule, inModule]
dimnames(modTOM) = list(modProbes, modProbes)
modTOM = as.table(modTOM)
write.csv(modTOM,file = paste("TOM",modules,".csv"))
# Export the network into edge and node list files that Cytoscape can read
cyt = exportNetworkToCytoscape(modTOM,
edgeFile = paste("CytoscapeInput-edges-",
paste(modules, collapse="-"),
".txt", sep=""),
weighted = TRUE,
threshold = 0.01,
nodeNames = modProbes,
nodeAttr = moduleColors[inModule]);
}
#genes in the network after thresholding
count = numeric()
mod = character()
for (i in 1: length(clr)) {
modules = clr[i]
path = paste("CytoscapeInput-edges-",modules,'.txt',sep = "")
x = read.delim(path)
dt = unique(c(as.character(x$fromNode,x$toNode)))
count[i] = length(dt)
mod[i] = modules
}
summary = cbind(mod,count)
write.table(summary,file = 'Gene count.txt',row.names = F,quote = F)
head(summary)
### ------------------------------- STEP VI ------------------------------------------
##-------------------------- Validating the Network ----------------------------------
rm(list = ls(all.names = TRUE))
setwd("D:/UNI/4TH YEAR/RESEARCH/Codes/Arabidopsis")
library(WGCNA)
options(stringsAsFactors = FALSE)
enableWGCNAThreads()
#load data
load(file = "dataInput.RData")
load(file = "networkConstruction.RData")
softPower = 17
adjacency = adjacency(data, power = softPower)
setLabels = c("Network", "Test")
multiExpr = list(Network = list(data = adjacency), Test = list(data = data))
multiColor = list(Network = moduleColors, Test = moduleColors);
nSets = 2
system.time( {
mp = modulePreservation(multiExpr, multiColor,
referenceNetworks = c(1:2),
nPermutations = 200,
randomSeed = 1,
verbose = 3)
} );
# Save the results
save(mp, file = "ModulePreservation.RData")
ref = 1
test = 2
statsObs = cbind(mp$quality$observed[[ref]][[test]][, -1], mp$preservation$observed[[ref]][[test]][, -1])
statsZ = cbind(mp$quality$Z[[ref]][[test]][, -1], mp$preservation$Z[[ref]][[test]][, -1]);
print( cbind(statsObs[, c("medianRank.pres", "medianRank.qual")],
signif(statsZ[, c("Zsummary.pres", "Zsummary.qual")], 2)) )
mp_sum = cbind(statsObs[, c("medianRank.pres", "medianRank.qual")],
signif(statsZ[, c("Zsummary.pres", "Zsummary.qual")], 2))
write.csv(mp_sum,'Module Preservation.csv',quote = F)
# Module labels and module sizes are also contained in the results
modColors = rownames(mp$preservation$observed[[ref]][[test]])
moduleSizes = mp$preservation$Z[[ref]][[test]][, 1];
# leave grey and gold modules out
plotMods = !(modColors %in% c("grey", "gold"));
# Text labels for points
text = modColors[plotMods];
# Auxiliary convenience variable
plotData = cbind(mp$preservation$observed[[ref]][[test]][, 2], mp$preservation$Z[[ref]][[test]][, 2])
# Main titles for the plot
mains = c("Preservation Median rank", "Preservation Zsummary");
# Start the plot
sizeGrWindow(10, 5);
library(grDevices)
pdf(file = "Plots/7-modulePreservation.pdf", width = 12, height = 9)
par(mfrow = c(1,2))
par(mar = c(4.5,4.5,2.5,1))
for (p in 1:2)
{
min = min(plotData[, p], na.rm = TRUE);
max = max(plotData[, p], na.rm = TRUE);
# Adjust ploting ranges appropriately
if (p==2)
{
if (min > -max/10) min = -max/10
ylim = c(min - 0.1 * (max-min), max + 0.1 * (max-min))
} else
ylim = c(max + 0.1 * (max-min), min - 0.1 * (max-min))
plot(moduleSizes[plotMods], plotData[plotMods, p], col = 1, bg = modColors[plotMods], pch = 21,
main = mains[p],
cex = 2.4,
ylab = mains[p], xlab = "Module size", log = "x",
ylim = ylim,
xlim = c(10, 2000), cex.lab = 1.2, cex.axis = 1.2, cex.main =1.4)
labelPoints(moduleSizes[plotMods], plotData[plotMods, p], text, cex = 1, offs = 0.08);
# For Zsummary, add threshold lines
if (p==2)
{
abline(h=0)
abline(h=2, col = "blue", lty = 2)
abline(h=10, col = "darkgreen", lty = 2)
}
}
dev.off()
|
037979c8929b436d3a3c92766d32b3b93476e31f
|
c00b9a984cb8c8c9ae7a9e62d76eaeeac9863ab8
|
/firstRfunction.R
|
c6337a1961fb13aaec5bd2bde7c84490fc6e4853
|
[] |
no_license
|
stdare/R-EnrolViz
|
db7fecf422ecad17a34b81c4b2b097aad13a051e
|
2ef73fe99ef1ff63b5e4dd9ad82e8f1910d2eeec
|
refs/heads/master
| 2021-01-01T19:28:19.342143
| 2014-06-01T05:23:50
| 2014-06-01T05:23:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 382
|
r
|
firstRfunction.R
|
setwd("~/GitHub/R-EnrolViz/data")
seeplot <- function(x) {
rawdataset <- read.csv("trial.csv")
dataset <-na.omit(rawdataset)
enrolments <- c(sum(dataset$YearLeft=="2011"),sum(dataset$YearLeft=="2012"),sum(dataset$YearLeft=="2013"),sum(dataset$YearLeft=="2014"))
colors<-c("red","green","yellow","blue")
barplot(enrolments, col=colors, main="Enrolments", xlab="Years", ylab="No.") }
|
516f534a266c912a85cd589baf60bfe6b73fd4a4
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/AlphaSimR/examples/meanP.Rd.R
|
40016b85a41441cb19583c10533bd19d69f647d2
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 356
|
r
|
meanP.Rd.R
|
library(AlphaSimR)
### Name: meanP
### Title: Mean phenotypic values
### Aliases: meanP
### ** Examples
#Create founder haplotypes
founderPop = quickHaplo(nInd=10, nChr=1, segSites=10)
#Set simulation parameters
SP = SimParam$new(founderPop)
SP$addTraitA(10)
SP$setVarE(h2=0.5)
#Create population
pop = newPop(founderPop, simParam=SP)
meanP(pop)
|
5db9b5c15887255ac267891a7d2b8b2f5e8cb299
|
221ec6024ee196f0f9305687ec5fab20e25df291
|
/new_objects.R
|
7bc710824a519b30d52b75f4509670061e506a32
|
[] |
no_license
|
dannemil/comt_expression
|
2f6dd8e4f664ea3ede73cbab1a96fb14ae58eafb
|
c8e3c2779e99e78ace471fa22ed2c086bf823a08
|
refs/heads/master
| 2020-03-07T05:32:47.971796
| 2018-07-16T17:06:08
| 2018-07-16T17:06:08
| 127,298,861
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,720
|
r
|
new_objects.R
|
# Program to determine which new objects were created in an R session so that the ones that are temporary can be removed.
work.path <- c('/Volumes/Macintosh_HD_3/genetics/genenetwork2/')
setwd(work.path)
# Include sourced programs here.
source.prog <- data.frame(rcode='func_cbind_na',
'func_rbind_na',
'func_new_objects'
)
source(paste(work.path,source.prog$rcode,'.R',sep=''))
# Generate a unique ID for this program by path and filename. This unique ID is attached to this file using a tag in the filesystem.
source('func_unique_id_generator.R')
fname <- csf() # function to get current path including file name
prog.Name <- unique.ID(fname)
prog.Name
##############################
# persisting_objects.txt is the file that contains the objects left after the last round of deleting unused objects.
#objects.to.keep <- matrix(c('initialization'),ncol=1)
# fileConn <- file("active_objects.txt")
# objects.to.edit <- readLines(con = fileConn)
# close(fileConn)
# STEP 1: Uses function new.objects to get the objects that have been added since last rm operation.
# objects.to.edit <- new.objects(ls(all.names=TRUE,sorted=TRUE),objects.to.keep)
# OR after initial use of this program, read in file persisting_objects.txt
fileConn <- file("persisting_objects.txt")
persisting.objects <- readLines(con = fileConn)
close(fileConn)
# STEP 2: compare list of object currently in memory to persisting list
current.objects <- ls(all.names=TRUE,sorted=TRUE)
objects.new <- new.objects(current.objects,persisting.objects)
# STEP 3: write out new objects to edit_these_objects.txt
fileConn <- file("edit_these_objects.txt")
writeLines(objects.new, fileConn)
close(fileConn)
# STEP 4: Edit file edit_these_objects.txt by DELETING the objects that you don't want to keep.
file.edit("edit_these_objects.txt")
read.edit.done <- function()
{
ans <- readline(prompt="Did you edit the file? (y/n) ")
if (identical(ans,'n'))
{
file.edit("edit_these_objects.txt")
return(read.edit.done())
} else if (identical(ans,'y')) {
return('Continue to next step.')
} else {
cat('Please type y or n.')
return(read.edit.done())
}
}
read.edit.done()
# STEP 5: Read in edit_these_objects.txt as objects.to.keep and compute the setdiff with the original objects.new to get the objects to remove.
fileConn <- file("edit_these_objects.txt")
objects.to.keep <- readLines(con = fileConn)
close(fileConn)
# get the indices of the objects in objects.new that are also in objects.to.keep
inverse.remove.indx <- which(objects.new %in% objects.to.keep)
# a sequence with the indices of all objects in the original objects.new
seq.all.objects <- seq(1,length(objects.new),1)
# Indices of the objects to remove from objects.new
objects.to.remove <- objects.new[c(setdiff(seq.all.objects,inverse.remove.indx))]
# STEP 6: Remove these objects from memory
rm(list=objects.to.remove)
# STEP 7a: Append the objects.to.keep to the existing persisting.objects into new.persisting.objects and overwrite this out as persisting_objects.txt
new.persisting.objects <- matrix(rep(NA,(length(persisting.objects)+length(objects.to.keep))))
new.persisting.objects[1:length(persisting.objects)] <- persisting.objects
new.persisting.objects[(1+length(persisting.objects)):(length(persisting.objects)+length(objects.to.keep))] <- objects.to.keep
fileConn <- file("persisting_objects.txt")
writeLines(ls(all.names=TRUE,sorted=TRUE), fileConn)
close(fileConn)
# Repeat from STEP 1
|
9dca9b03f4ba0b33acf6f7ae0c846d2a8a340934
|
90d74d03513e588f1f0161846dfd9657c78feae8
|
/R/add_light.R
|
d2e2a9d580c78a9357dafe3e4bc396b9978faa69
|
[
"MIT"
] |
permissive
|
ropensci/unifir
|
859fe03f09e7f2a96cc0785d02f1153c465f24c1
|
e5c1df562b43751775e04777e204777646390c42
|
refs/heads/main
| 2023-05-23T12:33:33.104020
| 2022-12-04T15:15:46
| 2022-12-04T15:15:46
| 373,628,173
| 22
| 0
|
NOASSERTION
| 2022-12-02T17:12:52
| 2021-06-03T20:06:53
|
R
|
UTF-8
|
R
| false
| false
| 2,973
|
r
|
add_light.R
|
#' Add a light to a Unity scene
#'
#' This function creates light objects within a Unity scene. This function can
#' only add one light at a time -- call the function multiple times to add
#' more than one light.
#'
#' @param light_type One of "Directional", "Point", "Spot", or "Area". See
#' <https://docs.unity3d.com/Manual/Lighting.html> for more information.
#' @param light_name The name to assign the Light object.
#' @inheritParams instantiate_prefab
#'
#' @family props
#'
#' @examples
#' # First, create a script object.
#' # CRAN doesn't have Unity installed, so pass
#' # a waiver object to skip the Unity-lookup stage:
#' script <- make_script("example_script", unity = waiver())
#'
#' # Now add props:
#' script <- add_light(script)
#'
#' # Lastly, execute the script via the `action` function
#'
#' @return The `unifir_script` object passed to `script`, with props for adding
#' lights appended.
#'
#' @export
add_light <- function(script,
light_type = c(
"Directional",
"Point",
"Spot",
"Area"
),
method_name = NULL,
light_name = "Light",
x_position = 0,
y_position = 0,
z_position = 0,
x_scale = 1,
y_scale = 1,
z_scale = 1,
x_rotation = 50,
y_rotation = -30,
z_rotation = 0,
exec = TRUE) {
light_type <- match.arg(light_type)
prop <- unifir_prop(
prop_file = system.file("AddLight.cs", package = "unifir"),
method_name = method_name,
method_type = "InstantiatePrefab",
parameters = list(
light_name = light_name,
light_type = light_type,
x_position = x_position,
y_position = y_position,
z_position = z_position,
x_scale = x_scale,
y_scale = y_scale,
z_scale = z_scale,
x_rotation = x_rotation,
y_rotation = y_rotation,
z_rotation = z_rotation
),
build = function(script, prop, debug) {
glue::glue(
readChar(prop$prop_file, file.info(prop$prop_file)$size),
.open = "%",
.close = "%",
method_name = prop$method_name,
light_name = prop$parameters$light_name,
light_type = prop$parameters$light_type,
x_position = prop$parameters$x_position,
y_position = prop$parameters$y_position,
z_position = prop$parameters$z_position,
x_scale = prop$parameters$x_scale,
y_scale = prop$parameters$y_scale,
z_scale = prop$parameters$z_scale,
x_rotation = prop$parameters$x_rotation,
y_rotation = prop$parameters$y_rotation,
z_rotation = prop$parameters$z_rotation
)
},
using = c("UnityEngine")
)
add_prop(script, prop, exec)
}
|
f65b751c388c377019716f586cec67dc7c31ad96
|
b7cc80a80bd647649e5d828a0f95dfa0abcb4368
|
/cvvm201310_MDS.R
|
be51128b14565c7226af3a9be38fec53d1861901
|
[] |
no_license
|
krojtous/CzechParliamentaryElections
|
de9b4262d73b09f89e4f72ffd10badaf582560fc
|
ac87b44c32339042ee9238b3d1c2b51681ff6777
|
refs/heads/master
| 2021-01-11T22:41:53.306363
| 2017-01-26T17:05:25
| 2017-01-26T17:05:25
| 79,015,826
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,025
|
r
|
cvvm201310_MDS.R
|
#Date: 18.1.2017
#Author: Matouš Pilnáček - Public Opinion Research Centre, Czech Academy of Science
#E-mail: matous.pilnacek@soc.cas.cz
#Description: Multidimensinal scaling of voters by party and clusters of undicided voters
#--------------Load data----------------
library(foreign)
cvvm = read.spss(file = "./data/V1310/V1310_F1.sav",
to.data.frame = TRUE,
use.value.labels = FALSE,
use.missings = FALSE)
#---------------Extract party labels-------------------------------------------
labels = as.data.frame(cbind(attr(cvvm$PV_4, "value.labels"), names(attr(cvvm$PV_4, "value.labels"))))
names(labels) = c("voting", "label")
#-------------Compute voting variable and turnout---------------------------
#No and rather no are voters excluded
cvvm$voting = cvvm$PV_4
cvvm[cvvm$PV_4 %in% c(0,98), "voting"] = NA #dont know and probably wont go to elections
cvvm[cvvm$PV_1 %in% c(0,9,3,4,8), "voting"] = NA
#-------------Compute preferences with undicided voters---------------------
tableRel = function(data, variable){
table = as.data.frame(table(data[,variable]))
table = cbind(table, round( table$Freq/sum(table$Freq)*100,2 ))
names(table) = c(variable,"abs","rel")
return(table)
}
votingAll = tableRel(cvvm, "voting")
#------------Recode small parties to others---------------------------------
limit = 3
others = as.vector(votingAll[votingAll$rel < limit, "voting"])
cvvm[cvvm$PV_4 %in% others, "voting"] = "97"
votingBig = tableRel(cvvm, "voting")
#------------Remove category others from Big table (but in 100% is included category others)----------------------
votingBig = votingBig[votingBig$voting != "97",]
#-----------Remove category others from var voting---------------------
cvvm = cvvm[cvvm$voting != "97" & cvvm$voting != "99" & !is.na(cvvm$voting),]
table(cvvm$voting)
#-----------Add labels-------------------------------------------------
merge(labels, votingBig)
#-----------variables sorted by party number----------------------------
attit = c("PV_170a","PV_170d","PV_170b","PV_170g","PV_170j","PV_170i","PV_170h","PV_170c") #removed "PV_170e" - strana zelenych
# PV.170a Volil by stranu – ČSSD
# PV.170b Volil by stranu – ODS
# PV.170c Volil by stranu – KSČM
# PV.170d Volil by stranu – TOP 09
# PV.170e Volil by stranu – SZ
# PV.170g Volil by stranu – KDU-ČSL
# PV.170h Volil by stranu – ANO 2011
# PV.170i Volil by stranu – Úsvit
# PV.170j Volil by stranu – SPOZ
#----------remove dont know (99)----------------------------
for(i in attit){
cvvm[cvvm[,i] %in% "99",i] = NA
}
#----------Mean of attitudes to toher parties-------------------------
attitTab = aggregate(cvvm[,attit], by=list(cvvm$voting), FUN = mean, na.rm = TRUE)
attitTab = merge(labels, attitTab, by.x = "voting", by.y = "Group.1")
attitTab = attitTab[order(as.numeric(as.vector(attitTab$voting))),]
#---------proximity to distance, diag to zero, symetrize-----------------------------
s = attitTab[,c(3:10)]
s = 0.5 * (s + t(s))
s = 1/s *10
diag(s) = 0
#--------MDS-------------------------------------------------------------------
fit = cmdscale(s, eig = TRUE, k = 2)
#-------extract data for plot--------------------------------------------
MDS = data.frame(fit$points[, 1])
MDS = cbind (MDS, fit$points[, 2])
votingBig = votingBig[order(as.numeric(as.vector(votingBig$voting))),]
tmp = votingBig[votingBig$voting != 99, c(2,3)]
MDS = cbind ( attitTab[,2], tmp, MDS)
names(MDS) = c("party", "abs", "rel", "x", "y")
#-------Plot in plotly----------------------------------------------------
library(plotly)
p <- plot_ly(MDS, x = ~x, y = ~y, type = 'scatter', mode = 'markers',
marker = list(size = ~rel*2.5, opacity = 1, color = 'blue'),
hoverinfo = 'text',
text = ~paste('<b>',party, '</b><br>')) %>%
layout(title = 'Rozložení stran - říjen 2013 (MDS)',
xaxis = list(showgrid = FALSE, title = "x"),
yaxis = list(showgrid = FALSE, title = "y"))
p
|
68d00f14555c8ac165a3bb14f5f6f36b519176d1
|
6d3fb21b34d50c70c0525bba1bcf40b0d4008c21
|
/man/student_equity.Rd
|
45f6655e91bf9db500f39712dfe33b5d0898a5b8
|
[] |
no_license
|
vinhdizzo/DisImpact
|
2df6051da147dbf8d3bd7292ac7a5439e1d6d269
|
9d0cc5ccd79e06acd3854365b7cf8edfb6991c9c
|
refs/heads/master
| 2022-10-14T10:13:45.066043
| 2022-10-10T17:37:21
| 2022-10-10T17:37:21
| 134,330,630
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,550
|
rd
|
student_equity.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_student_equity.R
\docType{data}
\name{student_equity}
\alias{student_equity}
\title{Fake data on student equity}
\format{
A data frame with 20,000 rows:
\describe{
\item{Ethnicity}{ethnicity (one of: \code{Asian}, \code{Black}, \code{Hispanic}, \code{Multi-Ethnicity}, \code{Native American}, \code{White}).}
\item{Gender}{gender (one of: \code{Male}, \code{Female}, \code{Other}).}
\item{Cohort}{year student first enrolled in any credit course at the institution (one of: \code{2017}, \code{2018}).}
\item{Transfer}{1 or 0 indicating whether or not a student transferred within 2 years of first enrollment (\code{Cohort}).}
\item{Cohort_Math}{year student first enrolled in a math course at the institution; could be \code{NA} if the student have not attempted math.}
\item{Math}{1 or 0 indicating whether or not a student completed transfer-level math within 1 year of their first math attempt (\code{Cohort_Math}); could be \code{NA} if the student have not attempted math.}
\item{Cohort_English}{year student first enrolled in a math course at the institution; could be \code{NA} if the student have not attempted math.}
\item{English}{1 or 0 indicating whether or not a student completed transfer-level English within 1 year of their first math attempt (\code{Cohort_English}); could be \code{NA} if the student have not attempted English.}
\item{Ed_Goal}{student's educational goal (one of: \code{Deg/Transfer}, \code{Other}).}
\item{College_Status}{student's educational status (one of: \code{First-time College}, \code{Other}).}
\item{Student_ID}{student's unique identifier.}
\item{EthnicityFlag_Asian}{1 (yes) or 0 (no) indicating whether or not a student self-identifies as Asian.}
\item{EthnicityFlag_Black}{1 (yes) or 0 (no) indicating whether or not a student self-identifies as Black.}
\item{EthnicityFlag_Hispanic}{1 (yes) or 0 (no) indicating whether or not a student self-identifies as Hispanic.}
\item{EthnicityFlag_NativeAmerican}{1 (yes) or 0 (no) indicating whether or not a student self-identifies as Native American.}
\item{EthnicityFlag_PacificIslander}{1 (yes) or 0 (no) indicating whether or not a student self-identifies as Pacific Islander.}
\item{EthnicityFlag_White}{1 (yes) or 0 (no) indicating whether or not a student self-identifies as White.}
\item{EthnicityFlag_Carribean}{1 (yes) or 0 (no) indicating whether or not a student self-identifies as Carribean.}
\item{EthnicityFlag_EastAsian}{1 (yes) or 0 (no) indicating whether or not a student self-identifies as East Asian.}
\item{EthnicityFlag_SouthEastAsian}{1 (yes) or 0 (no) indicating whether or not a student self-identifies as Southeast Asian.}
\item{EthnicityFlag_SouthWestAsianNorthAfrican}{1 (yes) or 0 (no) indicating whether or not a student self-identifies as Southwest Asian / North African (SWANA).}
\item{EthnicityFlag_AANAPI}{1 (yes) or 0 (no) indicating whether or not a student self-identifies as Asian-American or Native American Pacific Islander (AANAPI).}
\item{EthnicityFlag_Unknown}{1 (yes) or 0 (no) indicating whether or not a student self-identifies as Unknown.}
\item{EthnicityFlag_TwoorMoreRaces}{1 (yes) or 0 (no) indicating whether or not a student self-identifies as two or more races.}
}
}
\usage{
data(student_equity)
}
\description{
Data randomly generated to illustrate the use of the package.
}
\examples{
data(student_equity)
}
\keyword{datasets}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.