blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e209f55a8af9fa5adfea58ff4ffd90056864da73
|
73763542bf5879333697d2b0e3831db9509ba2ee
|
/Self_Assigment_R/nomor2.R
|
ad5f766a7c80697cbd3d541c9850949be13a87d2
|
[] |
no_license
|
Aminurachma/my-first-repository
|
4ababe7fcea99062de099d8c041deb4c9e2fb7e1
|
3e0e037b72f923abc24766a6911168f54bc77ea8
|
refs/heads/master
| 2021-05-16T18:42:22.238113
| 2020-06-21T06:23:22
| 2020-06-21T06:23:22
| 250,424,058
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 195
|
r
|
nomor2.R
|
library(dplyr)
dataset <- read.csv('D:/TugasDM/AssessmentR_Dataset_superstore_simple.csv')
dataset_2 <- dataset %>% select(category, sub_category,profit) %>% filter(category == 'Office Supplies')
|
49414a0bda7aee20d1f867d981216d7bf84db558
|
42276c737c5874699b685bd882aa0ddc4073275f
|
/plot3.R
|
a290870123b8a8fff6c8234427a3d152d0105cc7
|
[] |
no_license
|
lszperling/ExData_Plotting1
|
da82093d110fef43f169ddd8b92200ae13240748
|
ebeb7194eb951d7ee30401faaa007e39b392b7af
|
refs/heads/master
| 2020-12-03T07:57:53.446660
| 2014-12-07T19:49:07
| 2014-12-07T19:49:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 715
|
r
|
plot3.R
|
source("load_file.R")
sub <- load_data()
#opens the png file were the result will be writen
png(filename = "plot3.png",width = 480, height = 480)
#Create the base plot with one line and the add the other 2 in the same scale
plot(sub$coso, sub$Sub_metering_1, type = "l", main="", ylab="Energy sub metering", xlab="")
lines(sub$coso, sub$Sub_metering_2, type= "l", col= "red")
lines(sub$coso, sub$Sub_metering_3, type= "l", col= "blue")
#adds the legend to the plot
legend("topright", legend= c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=c(1,1,1), col=c("black", "red", "blue"))
#prints result into the file and closes it
dev.off()
|
7f5f2d473cd457496595b9adfb5412417dab98a7
|
142dbad11a73fd0a8c62ce3cd0ea1901b46d7b95
|
/script/Fekete_Gergo/understanding-of-FDR-2.R
|
a270e12848db935d17bcffb8fc3cea382d9ff2e1
|
[] |
no_license
|
feketegergo/R-class-2020
|
fee771b589ba1757ee87e885e97eb08a1b456fb5
|
f0e72d1b852d34c1432ae21a74d06c5213930e02
|
refs/heads/master
| 2022-12-07T04:46:15.407622
| 2020-09-02T10:26:20
| 2020-09-02T10:26:20
| 284,748,204
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,450
|
r
|
understanding-of-FDR-2.R
|
library("tidyverse")
rm(list=ls())
mean_NT<-1.2
sigma_NT<-0.15
mean_AMP<-0.25
sigma_AMP<-0.8
N_noEffect<-4000
N_effect<-400
od_NT<-rnorm(15,mean = mean_NT, sd = sigma_NT)
tbl1<-tibble( experiment_id=character(),p=numeric(), type=character())
for( i in 1:N_noEffect)
{
#od_NT<-rnorm(4,mean = mean_NT, sd = sigma_NT)
od_AMP<-rnorm(5,mean = mean_NT, sd = sigma_NT)
result_of_t_test<-t.test(od_AMP,od_NT,alternative = "less")
tbl1<-tbl1 %>% add_row(experiment_id=sprintf("exp_%03i",i),p=result_of_t_test$p.value, type="no-effect" )
}
for( i in N_noEffect+(1:N_effect))
{
#od_NT<-rnorm(4,mean = mean_NT, sd = sigma_NT)
od_AMP<-rnorm(5,mean = mean_AMP, sd = sigma_AMP)
result_of_t_test<-t.test(od_AMP,od_NT,alternative = "less")
tbl1<-tbl1 %>% add_row(experiment_id=sprintf("exp_%03i",i),p=result_of_t_test$p.value, type="operative" )
}
tbl1 %>% ggplot(aes(x=p))+
geom_histogram(breaks=seq(from=0, to=1, by = 0.05)) +
geom_vline(xintercept = 0.05, color="red" , linetype="dashed")+
labs(title = "Histogram finom nagy bin-ekkel")
tbl1 %>% ggplot(aes(x=p))+
geom_histogram(breaks=seq(from=0, to=1, by = 0.01)) +
geom_vline(xintercept = 0.05, color="red" , linetype="dashed")+
labs(title = "Histogram finom bin felosztassal")
tbl1 %>% ggplot(aes(x=p)) +
geom_abline(color="blue", linetype="dotted")+stat_ecdf() +
labs(title = "CDF of p-values (Cumulative distribution function)")
tbl1<-tbl1 %>%
dplyr::mutate(p_fdr=p.adjust(p, method = "fdr")) %>%
mutate(significant=ifelse(p<0.05,"significant","non-significant")) %>%
mutate(significant_fdr=ifelse(p_fdr<0.05,"significant_after_fdr","non-significant_after_fdr"))
tbl1<-tbl1 %>%
mutate(ok=case_when(
(significant=="significant" & type=="operative") ~ "True-Positive",
(significant=="significant" & type=="no-effect") ~ "False-Positive",
(significant=="non-significant" & type=="operative") ~ "False-Negative",
(significant=="non-significant" & type=="no-effect") ~ "True-Negative"))
tbl1<-tbl1 %>%
mutate(ok_fdr=case_when(
(significant_fdr=="significant_after_fdr" & type=="operative") ~ "True-Positive",
(significant_fdr=="significant_after_fdr" & type=="no-effect") ~ "False-Positive",
(significant_fdr=="non-significant_after_fdr" & type=="operative") ~ "False-Negative",
(significant_fdr=="non-significant_after_fdr" & type=="no-effect") ~ "True-Negative"))
my_colors_1<-c(
"True-Negative"="blue",
"False-Positive"="red",
"True-Positive"="darkblue",
"False-Negative"="purple"
)
#
# cowplot::plot_grid(
#
# tbl1 %>% ggplot()+
# geom_mosaic(aes(x = product(significant, type ), fill=ok), na.rm=TRUE)+
# coord_equal()+
# scale_fill_manual(values = my_colors_1),
#
# tbl1 %>% ggplot()+
# geom_mosaic(aes(x = product(type,significant_fdr ), fill=ok_fdr), na.rm=TRUE)+
# coord_equal()+
# scale_fill_manual(values = my_colors_1)+
# labs(x="dd"),
#
#
# ncol=2)
tbl1 %>% ggplot(aes(x=p, fill=factor(type,levels =c("operative","no-effect") ))) +
geom_histogram(breaks=seq(from=0, to=1, by = 0.01)) +
geom_vline(xintercept = 0.05, color="red" , linetype="dashed")+
geom_text(x = 0.05, y=0,label= "p=0.05", color="red", angle=90, hjust=-4, vjust=-0.3)+
scale_fill_manual(values = c("operative"="pink2", "no-effect"="blue"))+
labs(title="raw p-values" , fill="valodi tipus")
ggsave(filename = "out/undersatndig-of-FDR/hitogram-twoColor.jpg")
cowplot::plot_grid(
tbl1 %>% ggplot(aes(x=p, fill=factor(type,levels =c("operative","no-effect") ))) +
geom_histogram(breaks=seq(from=0, to=1, by = 0.01)) +
geom_vline(xintercept = 0.05, color="red" , linetype="dashed")+
geom_text(x = 0.05, y=0,label= "p=0.05", color="red", angle=90, hjust=-4, vjust=-0.3)+
labs(title="raw p-values" , fill="valodi tipus") ,
tbl1 %>% ggplot(aes(x=p_fdr, fill=factor(type,levels =c("operative","no-effect") ))) +
geom_histogram(breaks=seq(from=0, to=1, by = 0.01)) +
geom_vline(xintercept = 0.05, color="red" , linetype="dashed")+
geom_text(x = 0.05, y=0,label= "p=0.05", color="red", angle=90, hjust=-4, vjust=-0.3)+
geom_vline(xintercept = 0.10, color="red" , linetype="dashed")+
geom_text(x = 0.10, y=0,label= "p=0.10", color="red", angle=90, hjust=-4, vjust=-0.3)+
labs(title="FDR", fill="valodi tipus") ,
ncol=1
)
ggsave(filename = "out/undersatndig-of-FDR/hitogram-dual.jpg")
|
4903e36ab45982c52d43c9cf6051ab835debd94a
|
89491fef8c724a2500434f220780f3300017ff38
|
/demo/demoIRIS.R
|
7aad1d9839ea811b4baab87ad18299b29962301b
|
[] |
no_license
|
cran/FKBL
|
ccafa5c7acbc14abad415b641d7d3e29004a658b
|
ec6c9300a8c01950db07ff57a93940b98936ed48
|
refs/heads/master
| 2016-09-05T20:16:08.923166
| 2007-03-31T00:00:00
| 2007-03-31T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 519
|
r
|
demoIRIS.R
|
data(Pl)
data(iris.train)
data(iris.test)
trainl=iris.train
testl=iris.test
kB<-list()
classTST<-list()
classTRN<-list()
e<-1
errorTST<-data.frame(confS0=e,confS1=e,confS2=e,confS3=e,confS4=e,anali=e,rew=e)
errorTRN<-data.frame(confS0=e,confS1=e,confS2=e,confS3=e,confS4=e,anali=e,rew=e)
kB<-expGetKb(train=trainl,P=Pl)
classTST<-expGetCl(testl,kB)
classTRN<-expGetCl(trainl,kB)
errorTRN<-expGetE(trainl,classTRN)
errorTST<-expGetE(testl,classTST)
print("Train")
print(errorTRN)
print("Test")
print(errorTST)
|
fcf1e69307935c8be0825060bbedc38bcc6bf522
|
7ef0e1b1f6d798075a83873b96dd441db3d7ec0e
|
/man/helloworld.Rd
|
6c3780dccaa1f174d192e4a9f5000fc13db56720
|
[
"MIT"
] |
permissive
|
nischalshrestha/hellopkgdown
|
2f820f56455304f327cf3b1d3e77724bcdba1239
|
b2b03c7fa47e1f04e6c9e5548396732a3307927b
|
refs/heads/main
| 2023-03-07T11:41:52.892115
| 2021-02-23T19:21:36
| 2021-02-23T19:21:36
| 341,633,782
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 303
|
rd
|
helloworld.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helloworld.R
\name{helloworld}
\alias{helloworld}
\title{Prints Hello, World! to the console.}
\usage{
helloworld()
}
\value{
No value returned
}
\description{
Prints Hello, World! to the console.
}
\examples{
helloworld()
}
|
0e3c1173260c2ab3120838347eb946d091ae30d9
|
d6cce6fbbb675f21d6010bcc710bd551497e21cc
|
/man/generator_PLN.Rd
|
2a32ae87724d39ee7c33709fb14dc81608a4d5a6
|
[] |
no_license
|
aulenbac/EMtree
|
b6915965aa71572f09bf3067978f6dc90c35e8d2
|
cfd17f0a808132e89a3729519241bc8a41a03fd3
|
refs/heads/master
| 2022-04-09T20:40:24.803364
| 2020-04-01T08:33:16
| 2020-04-01T08:33:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 723
|
rd
|
generator_PLN.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gener_data.R
\name{generator_PLN}
\alias{generator_PLN}
\title{Simulate count data under the Poisson log-Normal model}
\usage{
generator_PLN(Sigma, covariates = NULL, n = 50)
}
\arguments{
\item{Sigma}{Covariance matrix of the normal hidden layer of parameters}
\item{covariates}{a data.frame or matrix containing data covariates. If not NULL, defines the
number of simulated rows.}
\item{n}{number of rows to simulate}
}
\value{
Y: the simulated counts
}
\description{
Simulate count data under the Poisson log-Normal model
}
\examples{
G=generator_graph(p=10,graph="tree")
sigma=generator_param(G=G)$sigma
generator_PLN(as.matrix(sigma))
}
|
d7c3b47ede84aedaaedd84a30b73ef1ed31158b4
|
f00d232f784faf22f8cb321d9f84b2e4344aa855
|
/R/fitness.r
|
3fc5315c25b2cdf9de85dc909e4e022c26a03b74
|
[] |
no_license
|
vsbuffalo/tempautocov
|
44474dcf3917e9942d6786efab57f9269c82cb7e
|
090a8d89d8727b290440ba54f7715cf8dc707782
|
refs/heads/master
| 2022-03-04T08:58:53.905486
| 2019-09-26T01:07:38
| 2019-09-26T01:07:38
| 170,267,653
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,381
|
r
|
fitness.r
|
## fitness.r -- check math of exponential and other fitness models
trait2offspring <- function(N=1000, sigma2=1) {
# sigma2 is trait variation, for the trait that goes
# into fitness function
z <- rnorm(N, 0, sqrt(sigma2))
wz <- exp(z)
wbar <- mean(wz)
p <- wz/(N*wbar)
stopifnot(abs(sum(p) - 1) < 1e-5)
k <- rmultinom(1, N, p)
k
}
## analytic
offspring_var <- function(N, mu, sigma2) {
wbar <- exp(sigma2/2)
vwz <- exp(2*(mu + sigma2)) - 2*exp(mu+sigma2/2)*wbar + wbar^2
Vp <- vwz/(N*wbar)^2
Ep <- 1/N
N*Ep*(1-Ep) + N*(N-1)*Vp
}
## simulation
sim_trait2offspring <- function(nreps, N=1000, sigma2=1) {
replicate(nreps, {
d <- trait2offspring(sigma2=sigma2)
var(d)
}, simplify=TRUE)
}
### fitness approximations
mult_fit <- function(s, nreps=50, N=1000, L=1000, trait_mu=3e-4) {
theta <- 4*N*trait_mu
bind_rows(replicate(nreps, {
theta_per_L <- theta/L
g <- map_dbl(rbeta(L, theta_per_L, theta_per_L), ~ rbinom(1, 2, .))
tibble(mult=prod(1 + s*g), approx=1 + 2*sum(s*g))
}, simplify=FALSE))
}
### Trunctation Selection
trunc_Va <- function(prop, h2, Va0, gens) {
# from Bulmer, p. 154
z <- qnorm(prop)
cc <- dnorm(z)/prop # truncated standard normal
Vas <- numeric(gens)
Vas[1] <- Va0
for (t in 2:gens) {
Vas[t] <- 0.5*(1 - h2 * cc * (cc-z))*Vas[t-1] + 0.5*Va0
}
Vas
}
|
af53df27faa23c4f8229518a5340407b6972b77f
|
6fb233afe964a99ff4397b4be1fe1ed73949fac1
|
/man/TU_SVM.Rd
|
cb4d10fad3adc105e0d409e1c2b5422f4ec7087e
|
[
"MIT"
] |
permissive
|
OSU-BMBL/rSeqTU
|
b67f5327687ddc154982c7891c82885a2e19cf81
|
676d6eadca858ff4c705dfa21be0c59b7cc75e4a
|
refs/heads/master
| 2021-07-04T21:41:43.617523
| 2020-10-14T00:46:47
| 2020-10-14T00:46:47
| 188,394,107
| 0
| 0
|
MIT
| 2019-05-24T09:31:50
| 2019-05-24T09:31:50
| null |
UTF-8
|
R
| false
| true
| 1,007
|
rd
|
TU_SVM.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SVM.R
\name{TU_SVM}
\alias{TU_SVM}
\title{Using Support Vector Machine to train and generate TU prediction results}
\usage{
TU_SVM(positive_training, negative_training, positive_strand_testing,
negative_strand_testing, file_RNAseqSignals, file_gff, output_prefix,
genome_name)
}
\arguments{
\item{positive_training}{The file of positive training dataset.}
\item{negative_training}{The file of negative training dataset.}
\item{positive_strand_testing}{The file of positive strand testing dataset.}
\item{negative_strand_testing}{The file of negative strand testing dataset.}
\item{file_RNAseqSignals}{The .NA file generated by previous step}
\item{file_gff}{The .gff file of reference genome}
\item{output_prefix}{The prefix of output file name}
\item{genome_name}{The file of referecne genome}
}
\value{
TU prediction results
}
\description{
Using Support Vector Machine to train and generate TU prediction results
}
|
8c66c6b2f9a9edf3505594009a7bbdd66d9b31da
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/treeducken/man/is_extinct.Rd
|
38f7638ce1523fde1461adff22feafa71d3a0107
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,247
|
rd
|
is_extinct.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{is_extinct}
\alias{is_extinct}
\title{Identify extinct tips from tree}
\usage{
is_extinct(phy, tol = NULL)
}
\arguments{
\item{phy}{a 'phylo' class object}
\item{tol}{tolerance in decimal values for branch lengths}
}
\value{
A list of the tips that are extinct
}
\description{
This is a direct port of the geiger function, I import it here for convenience.
This code is copied under GPL 3 license.
}
\examples{
mu <- 0.5 # death rate
lambda <- 2.0 # birth rate
numb_replicates <- 10
numb_extant_tips <- 4
# simulate trees under the GSA so first simulates a tree with
# numb_extant_tips * 100 tips counting each time we have a tree with 10 tips
# then randomly picks one of those trees
tree_list <- sim_sptree_bdp(sbr = lambda,
sdr = mu,
numbsim = numb_replicates,
n_tips = numb_extant_tips)
is_extinct(tree_list[[1]])
}
\references{
Pennell M, Eastman J, Slater G, Brown J, Uyeda J, Fitzjohn R, Alfaro M, Harmon L (2014). “geiger v2.0: an expanded suite of methods for fitting macroevolutionary models to phylogenetic trees.” Bioinformatics, 30, 2216-2218
}
|
87333654fccfedb5982947e12f613baf9d3630ec
|
5c42184a79d320791411c0adcb22de7ba9ef2262
|
/plot1.R
|
45b122ce5642ccbff16c6add14ee4a855b8abebb
|
[] |
no_license
|
earthspatial/ExData_Plotting1
|
0dc83726eba4308f89a2d96c9d8ce4ed0e117736
|
171b462dbcb175bb6346225fd3d618c78375275c
|
refs/heads/master
| 2021-01-18T08:10:28.626406
| 2014-05-09T18:36:09
| 2014-05-09T18:36:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 683
|
r
|
plot1.R
|
#load onlyt the days in question
data <- read.table("c:/temp/household_power_consumption.txt",skip = 66637, nrow = 2880, sep = ";")
#load the names of the columns
names <- readLines("c:/temp/household_power_consumption.txt",1)
#split the column names vector apart
theColumnNames <- strsplit(names, ';')[1]
#add the names to the data frame
colnames(data) <- theColumnNames[[1]]
#open the image file for writing.
png(filename = "c:/temp/plot1.png", height=480, width=480)
#generate the histagram
hist(as.numeric(data$Global_active_power), col="red", breaks=16, main="Global Active Power", ylab="Frequency", xlab="Global Active Power (kilowatts)")
#close the image file
dev.off()
|
e2cd765b9c68d2e4a79b3f80c7f87b61d58ec7e3
|
0fbc58702c39addfa7949391d92533922dcf9d49
|
/man/music.Rd
|
3f382de9ad3de1b6596a20c035e9c6baa4e22641
|
[] |
no_license
|
yihui/MSG
|
d3d353514464f962a0d987efd8cf32ed50ac901a
|
8693859ef41139a43e32aeec33ab2af700037f82
|
refs/heads/master
| 2021-11-29T08:12:02.820072
| 2021-08-15T17:14:36
| 2021-08-15T17:14:36
| 1,333,662
| 30
| 12
| null | 2021-08-15T17:14:37
| 2011-02-06T05:42:53
|
R
|
UTF-8
|
R
| false
| true
| 391
|
rd
|
music.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MSG-package.R
\docType{data}
\name{music}
\alias{music}
\title{Attributes of some music clips}
\description{
Attributes of some music clips
}
\examples{
data(music)
}
\references{
Cook D, Swayne DF (2007). Interactive and Dynamic Graphics for
Data Analysis With R and GGobi. Springer. ISBN 978-0-387-71761-6.
}
|
3656d88cb291a1cdac48ef5cb3ca5a3a09895c0d
|
100ef7748001814334951538fa23869ee00f58f7
|
/geog418-518-a4-master/Thiessen Polygons.R
|
23dac4bc0b95783177c07d21042190d79290f581
|
[] |
no_license
|
dahewett/geostatistics
|
c51ca284288cc0a086886879629a852f7e083cab
|
3dd1c66391503f88c60160f1f8dfa270fdc2addd
|
refs/heads/master
| 2022-10-19T14:32:47.146003
| 2020-06-03T03:37:34
| 2020-06-03T03:37:34
| 152,347,434
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,136
|
r
|
Thiessen Polygons.R
|
#################################################
##Spatial Interpolation with Thiessen Polygons
# Create a tessellated surface
th <- as(dirichlet(as.ppp(ozone.mean.spdf)), "SpatialPolygons")
# The dirichlet function does not carry over projection information
# requiring that this information be added manually
proj4string(th) <- proj4string(ozone.mean.spdf)
# The tessellated surface does not store attribute information
# from the point data layer. We'll use the over() function (from the sp
# package) to join the point attributes to the tesselated surface via
# a spatial join. The over() function creates a dataframe that will need to
# be added to the `th` object thus creating a SpatialPolygonsDataFrame object
th.z <- over(th, ozone.mean.spdf, fn=mean)
th.spdf <- SpatialPolygonsDataFrame(th, th.z)
# Finally, we'll clip the tessellated surface to the Texas boundaries
th.clp <- raster::intersect(SC.AirBasin.t,th.spdf)
# Map the data
tm_shape(th.clp) +
tm_polygons(col="value", palette="RdBu", auto.palette.mapping=FALSE,
title="Predicted Ozone \n(in ppm)") +
tm_legend(legend.outside=TRUE)
|
c4075a5c8f6cfd3236f4726683cd21b04a072109
|
473ef88aafd5cc4dd8eee11e3e6881402824e8e3
|
/analyze.R
|
00f276d09c524cb1257ad7e27dec555dcc74e2ae
|
[] |
no_license
|
rsangole/CVG_Parking
|
1ddac0f409ce628a3a9fbf06deb41546024eb13d
|
614d49f79a6cbd36e6b4cbecad95dba6ef3836d6
|
refs/heads/master
| 2021-01-10T04:26:01.721484
| 2016-03-14T16:26:06
| 2016-03-14T16:26:06
| 53,560,645
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,333
|
r
|
analyze.R
|
setwd("~/Documents/Data Science/CVG_Parking")
library(ggplot2)
library(dplyr)
library(tidyr)
library(scales)
data.cvg <- tbl_df(read.csv('o.txt',sep = ',',header = F))
names(data.cvg) <- c('Date','TerminalGarageUtilization','ValuParkUtilization')
data.cvg$Date <- as.POSIXct(data.cvg$Date)
data.cvg <- data.cvg %>%
mutate(TerminalGarageUtilization=100-TerminalGarageUtilization) %>%
mutate(ValuParkUtilization=100-ValuParkUtilization) %>%
mutate(Day=format(Date,'%a'))
data.cvg$Day <- factor(data.cvg$Day,levels = c('Sat','Sun','Mon','Tue','Wed','Thu','Fri'),ordered = T)
data.cvg <- data.cvg %>% mutate(Time=as.POSIXct(format(Date, format = "%H:%M:%S %z"), format = "%H:%M:%S %z"))
data.tidy <- data.cvg %>% gather('Parameter','Value',2:3)
ggplot(data.tidy)+
geom_line(aes(x=Time,y=Value,color=Day))+
facet_grid(.~Parameter)+
labs(title='Parking Utilization at CVG',x='Time',y='% Utilization')+
theme_light()+
scale_x_datetime(date_breaks = '2 hours',
labels = date_format("%H:%M",tz = "EST"))+
scale_y_continuous(limits=c(30,71.5),breaks = seq.int(from = 0,to = 100,by = 10))+
theme(axis.text.x = element_text(angle = 45, hjust = 1,size = 9),strip.text.x = element_text(size = 12))+
annotate('text',x=as.POSIXct(paste(Sys.Date(),' 18:25'),tz = 'EST'),y=71.5,label=paste('Updated:',format(data.tidy$Time[length(data.tidy$Time)],"%a, %H:%M %Z")),size=3,color='lightskyblue4')
ggplot(data.cvg)+
geom_line(aes(x=Date,y=TerminalGarageUtilization,color=Day))+
geom_line(aes(x=Date,y=ValuParkUtilization,color=Day),lty=2)+
scale_color_manual(values=c('coral3','chartreuse3','dodgerblue3','deeppink3','darkorchid3','goldenrod3','limegreen'))+
labs(title='Solid=Terminal Garage, Dashed=ValuPark',y='% Utilization')+
theme_light()+
scale_x_datetime(date_breaks = '1 day', date_minor_breaks = '6 hours',
labels = date_format("%a, 03/%d",tz = "EST"))+
theme(axis.text.x = element_text(hjust = 0,size = 9))
ggplot(data.tidy)+geom_violin(aes(x = Parameter,y = Value,fill=Day),scale = 'area')+
labs(title='Variation in parking utilization',x='',y='% Utilization')+
theme(axis.text.x = element_text(size = 10))
data.tidy %>% group_by(Parameter) %>% summarise(Avg=mean(Value),StdDev=sd(Value),COV=StdDev/Avg,N=n())
|
3a519bd6968688989469c935943dfa404397c6fc
|
34a1872c598758ad8b6c0a82132f52b2f124489e
|
/TestScripts/proportionCheck.R
|
7b5c992020514d0fb8a3ce5516283c9b09a0fc41
|
[] |
no_license
|
DataAnalyticsinStudentHands/SyntheticDataSet
|
2f73599723d53c5ca0e04535de55bf05c44aaaac
|
82877e75367dbd9ff68976c61b9e8f237224cf2d
|
refs/heads/master
| 2023-08-22T20:00:35.665190
| 2023-08-16T17:14:39
| 2023-08-16T17:14:39
| 77,935,514
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,280
|
r
|
proportionCheck.R
|
# This function compares the proportions in the model to the proportions in the Census Data. Ideally they should be within 10% of each other
# The inputs are the model (sam), the Census Data (census), and the tract number (x)
prop_Check <- function(sam, census, x){
# These are temporary data frames. part is for the model, and cen is for the Census Data
part=data.frame(tract=x,stringsAsFactors = F)
cen=data.frame(tract=x,stringsAsFactors = F)
# mini is a subset of the model and census is a subset of the Census Data
mini=sam[sam$tract==x,]
census = census[census$tract== as.integer(x) & census$county == "201",]
# ******************************************** SAM CITY DATA ****************************************
# count the number of elements in each category
# household type
part$group.quarters.population=nrow(mini[mini$household.type=="Group Quarters",])
part$married.couple.families = nrow(mini[mini$member=="Husband",])
part$male.householders.no.wife = nrow(mini[mini$member=="Male Householder",])
part$female.householders.no.husband = nrow(mini[mini$member=="Female Householder",])
part$nonfamily = nrow(mini[mini$household.type %in% c("Alone", "Non-family"),])
# household member
part$adult = nrow(mini[mini$member != "Child",])
part$child = nrow(mini[mini$member == "Child",])
# household size
part$size1 = nrow(mini[mini$size == 1,])
part$size2 = nrow(mini[mini$size == 2,])/2
part$size3 = nrow(mini[mini$size == 3,])/3
part$size4 = nrow(mini[mini$size == 4,])/4
part$size5 = nrow(mini[mini$size == 5,])/5
part$size6 = nrow(mini[mini$size == 6,])/6
part$size7 = nrow(mini[mini$size == 7,])/7
# num of cars
part$car0 = nrow(mini[mini$number.of.vehicles == 0 & !is.na(mini$number.of.vehicles),])
part$car1 = nrow(mini[mini$number.of.vehicles == 1 & !is.na(mini$number.of.vehicles),])
part$car2 = nrow(mini[mini$number.of.vehicles == 2 & !is.na(mini$number.of.vehicles),])
part$car3 = nrow(mini[mini$number.of.vehicles == 3 & !is.na(mini$number.of.vehicles),])
part$car4 = nrow(mini[mini$number.of.vehicles == 4 & !is.na(mini$number.of.vehicles),])
part$car5 = nrow(mini[mini$number.of.vehicles == 5 & !is.na(mini$number.of.vehicles),])
# gender
part$male = nrow(mini[mini$sex == "Male",])
part$female = nrow(mini[mini$sex == "Female",])
# age
part$age4 = nrow(mini[mini$bracket.age == "0.to.4",])
part$age9 = nrow(mini[mini$bracket.age == "5.to.9",])
part$age14 = nrow(mini[mini$bracket.age == "10.to.14",])
part$age17 = nrow(mini[mini$bracket.age == "15.to.17",])
part$age19 = nrow(mini[mini$bracket.age == "18.to.19",])
part$age24 = nrow(mini[mini$bracket.age == "20.to.24",])
part$age29 = nrow(mini[mini$bracket.age == "25.to.29",])
part$age34 = nrow(mini[mini$bracket.age == "30.to.34",])
part$age44 = nrow(mini[mini$bracket.age == "35.to.44",])
part$age54 = nrow(mini[mini$bracket.age == "45.to.54",])
part$age64 = nrow(mini[mini$bracket.age == "55.to.64",])
part$age74 = nrow(mini[mini$bracket.age == "65.to.74",])
part$age84 = nrow(mini[mini$bracket.age == "75.to.84",])
part$age100 = nrow(mini[mini$bracket.age == "85.to.100",])
# race
part$black = nrow(mini[mini$race == "black",])
part$american.indian.or.alaskan = nrow(mini[mini$race == "american.indian.or.alaskan",])
part$asian = nrow(mini[mini$race == "asian",])
part$islander = nrow(mini[mini$race == "islander",])
part$other.race = nrow(mini[mini$race == "other.race",])
part$multiracial = nrow(mini[mini$race == "multiracial",])
part$white = nrow(mini[mini$race == "white",])
part$hispanic = nrow(mini[mini$race == "hispanic",])
# school.enrollment
part$privateSchool = nrow(mini[mini$school.enrollment == "Private School" & !is.na(mini$school.enrollment),])
part$publicSchool = nrow(mini[mini$school.enrollment == "Public School" & !is.na(mini$school.enrollment),])
part$noSchool = nrow(mini[mini$school.enrollment == "Not Enrolled in School" & !is.na(mini$school.enrollment),])
# educational attainment
part$noHighSchool = nrow(mini[mini$educational.attainment == "Less than 9th grade" & !is.na(mini$educational.attainment),])
part$someHighSchool = nrow(mini[mini$educational.attainment == "9th to 12th grade, no diploma" &! is.na(mini$educational.attainment),])
part$highSchool = nrow(mini[mini$educational.attainment == "High School Graduate" & !is.na(mini$educational.attainment),])
part$someCollege = nrow(mini[mini$educational.attainment == "Some College, no degree" & !is.na(mini$educational.attainment),])
part$associates = nrow(mini[mini$educational.attainment == "Associate's degree" & !is.na(mini$educational.attainment),])
part$bachelors = nrow(mini[mini$educational.attainment == "Bachelor's Degree" & !is.na(mini$educational.attainment),])
part$phd = nrow(mini[mini$educational.attainment == "Graduate or Professional Degree" & !is.na(mini$educational.attainment),])
# employment
part$army = nrow(mini[mini$employment == "In Armed Forces" & !is.na(mini$employment),])
part$employed = nrow(mini[mini$employment == "Employed" & !is.na(mini$employment),])
part$unemployed = nrow(mini[mini$employment == "Unemployed" & !is.na(mini$employment),])
part$noLabor = nrow(mini[mini$employment == "Not in labor force" & !is.na(mini$employment),])
# disability
part$disability1 = nrow(mini[mini$disability == "With One Type of Disability",])
part$disability2 = nrow(mini[mini$disability == "With Two or More Types of Disabilities",])
part$noDisability = nrow(mini[mini$disability == "No Disabilities",])
# native
part$native = nrow(mini[mini$nativity == "native" & !is.na(mini$nativity),])
part$foreign = nrow(mini[mini$nativity == "foreign" & !is.na(mini$nativity),])
# english
part$onlyEnglish = nrow(mini[mini$English.speaking.skills == "only.english" & !is.na(mini$English.speaking.skills),])
part$englishWell = nrow(mini[mini$English.speaking.skills == "english.well" & !is.na(mini$English.speaking.skills),])
part$englishBad = nrow(mini[mini$English.speaking.skills == "english.bad" & !is.na(mini$English.speaking.skills),])
# citizen
part$citizen = nrow(mini[mini$citizenship == "Citizen" & !is.na(mini$citizenship),])
part$naturalized = nrow(mini[mini$citizenship == "Naturalized Citizen" & !is.na(mini$citizenship),])
part$notCitizen = nrow(mini[mini$citizenship == "Not a U.S. Citizen" & !is.na(mini$citizenship),])
# Home language
part$english = nrow(mini[mini$Language.at.home == "English" & !is.na(mini$Language.at.home),])
part$spanish = nrow(mini[mini$Language.at.home == "Speaks Spanish" & !is.na(mini$Language.at.home),])
part$other = nrow(mini[mini$Language.at.home == "Speaks Other Languages" & !is.na(mini$Language.at.home),])
# veteran
part$veteran = nrow(mini[mini$veteran.status == "Veteran",])
part$nonveteran = nrow(mini[mini$veteran.status == "Nonveteran",])
# transportation
part$drive = nrow(mini[mini$means.of.transportation.to.work == "drove alone" & !is.na(mini$means.of.transportation.to.work),])
part$carpooled = nrow(mini[mini$means.of.transportation.to.work == "carpooled" & !is.na(mini$means.of.transportation.to.work),])
part$publicTransport = nrow(mini[mini$means.of.transportation.to.work == "public transportation" & !is.na(mini$means.of.transportation.to.work),])
part$bike = nrow(mini[mini$means.of.transportation.to.work == "bicycle" & !is.na(mini$means.of.transportation.to.work),])
part$walk = nrow(mini[mini$means.of.transportation.to.work == "walked" & !is.na(mini$means.of.transportation.to.work),])
part$otherTransport = nrow(mini[mini$means.of.transportation.to.work == "other" & !is.na(mini$means.of.transportation.to.work),])
part$workedHome = nrow(mini[mini$means.of.transportation.to.work == "worked at home" & !is.na(mini$means.of.transportation.to.work),])
# travel time
part$time10 = nrow(mini[mini$bracket.travel.time.to.work == "1 to 10 minutes" & !is.na(mini$bracket.travel.time.to.work),])
part$time14 = nrow(mini[mini$bracket.travel.time.to.work == "10 to 14 minutes" & !is.na(mini$bracket.travel.time.to.work),])
part$time19 = nrow(mini[mini$bracket.travel.time.to.work == "15 to 19 minutes" & !is.na(mini$bracket.travel.time.to.work),])
part$time24 = nrow(mini[mini$bracket.travel.time.to.work == "20 to 24 minutes" & !is.na(mini$bracket.travel.time.to.work),])
part$time29 = nrow(mini[mini$bracket.travel.time.to.work == "25 to 29 minutes" & !is.na(mini$bracket.travel.time.to.work),])
part$time34 = nrow(mini[mini$bracket.travel.time.to.work == "30 to 34 minutes" & !is.na(mini$bracket.travel.time.to.work),])
part$time44 = nrow(mini[mini$bracket.travel.time.to.work == "35 to 44 minutes" & !is.na(mini$bracket.travel.time.to.work),])
part$time59 = nrow(mini[mini$bracket.travel.time.to.work == "45 to 59 minutes" & !is.na(mini$bracket.travel.time.to.work),])
part$time100 = nrow(mini[mini$bracket.travel.time.to.work == "60 to 100 minutes" & !is.na(mini$bracket.travel.time.to.work),])
# income
part$income.0.9999 = nrow(mini[mini$household.income < 10000,]) + nrow(mini[mini$household.type != "Group Quarters" & mini$age < 16,])
part$income.10000.14999 = nrow(mini[mini$household.income > 9999 & mini$household.income < 15000,])
part$income.15000.24999 = nrow(mini[mini$household.income > 14999 & mini$household.income < 25000,])
part$income.25000.34999 = nrow(mini[mini$household.income > 24999 & mini$household.income < 35000,])
part$income.35000.49999 = nrow(mini[mini$household.income > 34999 & mini$household.income < 50000,])
part$income.50000.74999 = nrow(mini[mini$household.income > 49999 & mini$household.income < 75000,])
part$income.75000.99999 = nrow(mini[mini$household.income > 74999 & mini$household.income < 100000,])
part$income.100000.124999 = nrow(mini[mini$bracket.household.income == "income.100000.124999",])
part$income.125000.149999 = nrow(mini[mini$bracket.household.income == "income.125000.149999",])
part$income.150000.199999 = nrow(mini[mini$bracket.household.income == "income.150000.199999",])
part$income.200000.500000 = nrow(mini[mini$bracket.household.income == "income.200000.500000",])
# insurance
part$privateInsurance = nrow(mini[mini$health.insurance == "private insurance",])
part$publicInsurance = nrow(mini[mini$health.insurance == "public insurance",])
part$noInsurance = nrow(mini[mini$health.insurance == "no insurance",])
# ********************************************* CENSUS DATA *********************************************
# count the number of elements in each category
# household type
cen$group.quarters.population = census$group.quarters.population
cen$married.couple.families = census$married.couple.families
cen$male.householders.no.wife = census$male.householders.no.wife
cen$female.householders.no.husband = census$female.householders.no.husband
cen$nonfamily = sum(census[startsWith(names(census), "nonfamily")])
# household member
cen$adult = sum(census[c(45:54, 59:68, 73:82, 87:96, 101:110, 115:124, 129:138, 143:152, 157:166, 171:180, 185:194, 199:208, 213:222, 227:236, 241:250, 255:264)])
cen$child = sum(census[c(41:44, 55:58, 69:72, 83:86, 97:100, 111:114, 125:128, 139:142, 153:156, 167:170, 181:184, 195:198, 209:212, 223:226, 237:240, 251:254)])
# household size
cen$size1 = sum(census[c(4,11)])
cen$size2 = sum(census[c(5,12)])
cen$size3 = sum(census[c(6,13)])
cen$size4 = sum(census[c(7,14)])
cen$size5 = sum(census[c(8,15)])
cen$size6 = sum(census[c(9,16)])
cen$size7 = sum(census[c(10,17)])
# num of cars
cen$car0 = sum(census[endsWith(names(census), "no.vehicle")]) + sum(census[endsWith(names(census), "0cars")])
cen$car1 = sum(census[endsWith(names(census), "1.vehicle")]) + sum(census[endsWith(names(census), "1car")])
cen$car2 = sum(census[endsWith(names(census), "2.vehicles")]) + sum(census[endsWith(names(census), "2cars")])
cen$car3 = sum(census[endsWith(names(census), "3.vehicles")]) + sum(census[endsWith(names(census), "3cars")])
cen$car4 = sum(census[endsWith(names(census), "4.vehicles")]) + sum(census[endsWith(names(census), "5cars")])
cen$car5 = sum(census[endsWith(names(census), "5cars")])
# gender
cen$male = sum(census[c(41:54, 69:82, 97:110, 125:138, 153:166, 181:194, 209:222, 237:250)])
cen$female = sum(census[c(55:68, 83:96, 111:124, 139:152, 167:180, 195:208, 223:236, 251:264)])
# age
cen$age4 = sum(census[c(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15)*14 + 41])
cen$age9 = sum(census[c(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15)*14 + 42])
cen$age14 = sum(census[c(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15)*14 + 43])
cen$age17 = sum(census[c(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15)*14 + 44])
cen$age19 = sum(census[c(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15)*14 + 45])
cen$age24 = sum(census[c(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15)*14 + 46])
cen$age29 = sum(census[c(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15)*14 + 47])
cen$age34 = sum(census[c(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15)*14 + 48])
cen$age44 = sum(census[c(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15)*14 + 49])
cen$age54 = sum(census[c(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15)*14 + 50])
cen$age64 = sum(census[c(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15)*14 + 51])
cen$age74 = sum(census[c(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15)*14 + 52])
cen$age84 = sum(census[c(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15)*14 + 53])
cen$age100 = sum(census[c(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15)*14 + 54])
# race
cen$black = sum(census[startsWith(names(census), "black")])
cen$american.indian.or.alaskan = sum(census[startsWith(names(census), "american")])
cen$asian = sum(census[startsWith(names(census), "asian")])
cen$islander = sum(census[startsWith(names(census), "islander")])
cen$other.race = sum(census[startsWith(names(census), "other.race")])
cen$multiracial = sum(census[startsWith(names(census), "multiracial")])
cen$white = sum(census[startsWith(names(census), "white")])
cen$hispanic = sum(census[startsWith(names(census), "hispanic")])
# school.enrollment
cen$privateSchool = sum(census[startsWith(names(census), "private.school")])
cen$publicSchool = sum(census[startsWith(names(census), "public.school")])
cen$noSchool = sum(census[startsWith(names(census), "no.school")])
# educational attainment
cen$noHighSchool = sum(census[startsWith(names(census), "less.than.9.grade")])
cen$someHighSchool = sum(census[startsWith(names(census), "btwn.9")])
cen$highSchool = sum(census[startsWith(names(census), "high.school")])
cen$someCollege = sum(census[startsWith(names(census), "some.college")])
cen$associates = sum(census[startsWith(names(census), "associates")])
cen$bachelors = sum(census[startsWith(names(census), "bachelors")])
cen$phd = sum(census[startsWith(names(census), "post.grad")])
# employment
cen$army = sum(census[startsWith(names(census), "in.armed.forces")])
cen$employed = sum(census[startsWith(names(census), "employed")])
cen$unemployed = sum(census[startsWith(names(census), "unemployed")])
cen$noLabor = sum(census[startsWith(names(census), "not.in.labor.forces")])
# disability
cen$disability1 = sum(census[endsWith(names(census), "1.disability")])
cen$disability2 = sum(census[endsWith(names(census), "2.disability")])
cen$noDisability = sum(census[endsWith(names(census), "no.disability")])
# native
cen$native = sum(census[c(484:486, 490:492, 496:498, 502:504, 508:510, 514:516, 520:522, 526:528)])
cen$foreign = sum(census[c(487:489, 493:495, 499:501, 505:507, 511:513, 517:519, 523:525, 529:531)])
# english
cen$onlyEnglish = sum(census[endsWith(names(census), "only.english")])
cen$englishWell = sum(census[c(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15)*3 + 485])
cen$englishBad = sum(census[c(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15)*3 + 486])
# citizen
cen$citizen = cen$native
cen$naturalized = sum(census[c(540, 542:543, 546:547, 550, 552:553, 556:557)])
cen$notCitizen = sum(census[c(541, 544:545, 548:549, 551, 554:555, 558:559)])
# Home language
cen$english = cen$onlyEnglish
cen$spanish = sum(census[startsWith(names(census), "spanish")])
cen$other = sum(census[startsWith(names(census), "other.lang")])
# veteran
cen$veteran = sum(census[startsWith(names(census), "veteran")])
cen$nonveteran = sum(census[startsWith(names(census), "nonveteran")])
# transportation
cen$drive = sum(census[endsWith(names(census), "drove.alone")])
cen$carpooled = sum(census[endsWith(names(census), "carpooled")])
cen$publicTransport = sum(census[endsWith(names(census), "public.transport")])
cen$bike = sum(census[endsWith(names(census), "bike")])
cen$walk = sum(census[endsWith(names(census), "walk")])
cen$otherTransport = sum(census[endsWith(names(census), "other.transport")])
cen$workedHome = sum(census[endsWith(names(census), "work.at.home")])
# travel time
cen$time10 = sum(census[endsWith(names(census), "10.minutes")])
cen$time14 = sum(census[endsWith(names(census), "14.minutes")])
cen$time19 = sum(census[endsWith(names(census), "19.minutes")])
cen$time24 = sum(census[endsWith(names(census), "24.minutes")])
cen$time29 = sum(census[endsWith(names(census), "29.minutes")])
cen$time34 = sum(census[endsWith(names(census), "34.minutes")])
cen$time44 = sum(census[endsWith(names(census), "44.minutes")])
cen$time59 = sum(census[endsWith(names(census), "59.minutes")])
cen$time100 = sum(census[endsWith(names(census), "over60.minutes")])
# income
cen$income.0.9999 = census$income.0.9999 + census$individual.1.9999 + census$individual.0.0
cen$income.10000.14999 = census$income.10000.14999 + census$individual.10000.14999
cen$income.15000.24999 = census$income.15000.19999 + census$income.20000.24999 + census$individual.15000.24999
cen$income.25000.34999 = census$income.25000.29999 + census$income.30000.34999 + census$individual.25000.34999
cen$income.35000.49999 = census$income.35000.39999 + census$income.40000.44999 + census$income.45000.49999 + census$individual.35000.49999
cen$income.50000.74999 = census$income.50000.59999 + census$income.60000.74999 + census$individual.50000.64999 + census$individual.65000.74999
cen$income.75000.99999 = census$income.75000.99999 + census$individual.75000.100000
cen$income.100000.124999 = census$income.100000.124999
cen$income.125000.149999 = census$income.125000.149999
cen$income.150000.199999 = census$income.150000.199999
cen$income.200000.500000 = census$income.200000.500000
# insurance
cen$privateInsurance = sum(census[endsWith(names(census), "private.insurance")])
cen$publicInsurance = sum(census[endsWith(names(census), "public.insurance")])
cen$noInsurance = sum(census[endsWith(names(census), "no.insurance")])
# ********************************* PROPORTIONS ******************************************************************
# Find the diffrenece between the Sam City proportions and the Canses Data Proportions for each element
part[2:6] = part[2:6]/sum(part[2:6]) - cen[2:6]/sum(cen[2:6]) # household type
part[7:8] = part[7:8]/sum(part[7:8]) - cen[7:8]/sum(cen[7:8]) # adult or child
part[9:15] = part[9:15]/sum(part[9:15]) - cen[9:15]/sum(cen[9:15]) # family size
part[16:21] = part[16:21]/sum(part[16:21]) - cen[16:21]/sum(cen[16:21]) # number of vehicles
part[22:23] = part[22:23]/sum(part[22:23]) - cen[22:23]/sum(cen[22:23]) # gender
part[24:37] = part[24:37]/sum(part[24:37]) - cen[24:37]/sum(cen[24:37]) # age
part[38:45] = part[38:45]/sum(part[38:45]) - cen[38:45]/sum(cen[38:45]) # race
part[46:48] = part[46:48]/sum(part[46:48]) - cen[46:48]/sum(cen[46:48]) # school enrollment
part[49:55] = part[49:55]/sum(part[49:55]) - cen[49:55]/sum(cen[49:55]) # educational attainment
part[56:59] = part[56:59]/sum(part[56:59]) - cen[56:59]/sum(cen[56:59]) # employment
part[60:62] = part[60:62]/sum(part[60:62]) - cen[60:62]/sum(cen[60:62]) # disability
part[63:64] = part[63:64]/sum(part[63:64]) - cen[63:64]/sum(cen[63:64]) # nativity
part[65:67] = part[65:67]/sum(part[65:67]) - cen[65:67]/sum(cen[65:67]) # english speaking skills
part[68:70] = part[68:70]/sum(part[68:70]) - cen[68:70]/sum(cen[68:70]) # citizenship
part[71:73] = part[71:73]/sum(part[71:73]) - cen[71:73]/sum(cen[71:73]) # language at home
part[74:75] = part[74:75]/sum(part[74:75]) - cen[74:75]/sum(cen[74:75]) # veteran status
part[76:82] = part[76:82]/sum(part[76:82]) - cen[76:82]/sum(cen[76:82]) # means of transportation to work
part[83:91] = part[83:91]/sum(part[83:91]) - cen[83:91]/sum(cen[83:91]) # travel time
part[92:102] = part[92:102]/sum(part[92:102]) - cen[92:102]/sum(cen[92:102]) # income
part[103:105] = part[103:105]/sum(part[103:105]) - cen[103:105]/sum(cen[103:105]) # health insurance
# Any NA values should be 0
part[is.na(part)]=0
# Count the number of times the difference in proportions was more than .10 (10%)
part$flag_count = rowSums(part[2:105] < -0.1 | part[2:105] > 0.1)
# Return the data frame that has all the info stored
return(part)
}
|
86f360eb9de38c0d39f2c63a4dbd12fb42dfbe0e
|
8b65e595ae3c4331c81b2e0dc42546d006ffbace
|
/R/cv.R
|
946550b8622523715dcb65eb8d7121622e630d1f
|
[] |
no_license
|
rpruim/statisticalModeling
|
91b16556b92d5f12a6da7c7abbc9fac640f3cd2d
|
1d57112ca169d7960dd41c055ceb5c84c106b961
|
refs/heads/master
| 2021-01-20T14:25:21.586119
| 2017-02-24T22:45:45
| 2017-02-24T22:45:45
| 82,751,296
| 2
| 1
| null | 2017-02-22T02:29:08
| 2017-02-22T02:29:08
| null |
UTF-8
|
R
| false
| false
| 5,003
|
r
|
cv.R
|
#' Compare models with k-fold cross-validation
#'
#' @param ... one or more models on which to perform the cross-validation
#' @param k the k in k-fold. cross-validation will use k-1/k of the data for training.
#' @param ntrials how many random partitions to make. Each partition will be one case in the
#' output of the function
#' @param output The kind of output to produce from each cross-validation. See details.
#'
#' @details The purpose of cross-validation is to provide "new" data on which to test a model's
#' performance. In k-fold cross-validation, the data set used to train the model is broken into
#' new training and testing data. This is accomplished simply by using most of the data for training while
#' reserving the remaining data for evaluating the model: testing. Rather than training a single model, k models
#' are trained, each with its own particular testing set. The testing sets in the k models are arranged to cover the
#' whole of the data set. On each of the k testing sets, a performance output is calculated. Which output is
#' most appropriate depends on the kind of model: regression model or classifier. The most basic measure is the mean square error: the
#' difference between the actual response variable in the testing data and the output of the model
#' when presented with inputs from the testing data. This is appropriate in many regression models.
#'
#' For classification models, two different outputs are appropriate. The first is the error rate: the frequency
#' with which the classifier produces an incorrect output when presented with inputs from the testing data. This
#' is a rather course measure. A more graded measure is the likelihood: the probability of the response values
#' from the test data given the model. (The "class" method is exactly the same as "error rate", but provided
#' for compatibility purposes with other software under development.)
#'
#' @export
cv_pred_error <- function(..., k = 10, ntrials = 5,
output = c("mse", "likelihood", "error_rate", "class")) {
output <- match.arg(output)
# Get just the names of the models
full_names <- as.character(lapply(lazyeval::lazy_dots(...), FUN = function(x) x$expr))
# Now for the models themselves
models <- list(...)
# model can be a list. If so, repeat over all the models.
# Just a first stab at the problem.
type <- switch(output,
"mse" = "response",
"likelihood" = "likelihood",
"error_rate" = "class",
"class" = "class" # same as error_rate
)
result = NULL
for (counter in 1:length(models)) {
this_mod <- models[[counter]]
truth <- response_values(this_mod)
pred_error_results <- numeric(ntrials)
for (this_trial in 1:ntrials) {
# get the model outputs for each test group against
# the rest of the data
mod_output <- kfold_trial(this_mod, type = type)
pred_error_results[this_trial] <-
if( type == "class") {
mean(truth != mod_output, na.rm = TRUE)
} else if (type == "likelihood") {
sum(log(mod_output), na.rm = FALSE )
} else mean((truth - mod_output)^2, na.rm = TRUE)
}
from_this_mod <- data.frame(pred_error_results, model = full_names[counter],
stringsAsFactors = FALSE)
names(from_this_mod)[1] <- output # e.g. "mse", "likelihood", etc.
result <- rbind(result, from_this_mod)
}
result
}
class_helper <- function(mod, data) {
# find the classifier output for each case
if (inherits(mod, "rpart")) {
which_class <- predict(mod, newdata = data, type = "class")
which_class <- as.character(which_class)
} else if (inherits(mod, "glm")) {
probs <- predict(mod, newdata = data, type = "response")
which_class <- probs >= 0.5
} else {
warning("No likelihood_helper function for models of class", class(mod))
which_class <- NA
}
which_class
}
likelihood_helper <- function(mod, data) {
# calculate likelihood of each model output
actual <- eval(parse(text = response_var(mod)), envir = data)
if (inherits(mod, "rpart")) {
# class_preds <- predict(mod, newdata = data, type = "vector")
probs <- predict(mod, newdata = data, type = "prob")
L <- probs[cbind(1:nrow(data), actual)]
} else if (inherits(mod, "glm")) {
probs <- predict(mod, newdata = data, type = "response")
L <- ifelse(actual, probs, 1 - probs)
} else {
warning("No likelihood_helper function for models of class", class(mod))
L <- NA
}
L
}
mse_helper <- function(mod, data) {
# calculate likelihood of each model output
actual <- eval(parse(text = response_var(mod)), envir = data)
if ( ! is.numeric(actual)) stop("Can't calculate MSE on a classifier.")
if (inherits(mod, "rpart")) {
L <- predict(mod, newdata = data, type = "vector")
} else {
L <- predict(mod, newdata = data, type = "response")
}
L
}
|
6ee1dd2a44006a909cbfd0febc871b3622aaedb4
|
ea4a5b8e4d9be6141a055ed0f489062283767fc1
|
/Week7_Data Transformation/Code/DataTransformation.R
|
33d44c8bcf64be94f22698010e01acd259d5a060
|
[] |
no_license
|
anjalirawat82/EDAAssignment_CodePortfolio_AnjaliRawat
|
a88572f31f3eb7e7c79c016f5cc2edc08837b58d
|
7b01e99790ba028410cf556f8fef5eca055de0e9
|
refs/heads/master
| 2020-04-23T09:31:38.249844
| 2019-02-20T04:30:55
| 2019-02-20T04:30:55
| 171,071,324
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,760
|
r
|
DataTransformation.R
|
## Data Manipulation
## filter() - Extract/Filter rows that meets logical criteria.
## Logical and Boolean Operators to use with filter() - < , > , <=, >=, !=, ==, isna(), !isna(), !, |, & , % in %
## Exercises - 5.2.4
library(nycflights13)
library(tidyverse)
library(dplyr)
## Find all Flights that had an arrival delay of two or more hours
flightarrdelayequalgreater2=filter(flights,arr_delay>=120)
## Find all Flights that Flew to Houston (IAH or HOU)
Houstonflights=filter(flights,dest=="IAH"|dest=="HOU")
## Find all Flights that Were operated by United, American, or Delta
filter(flights, carrier %in% c("AA", "DL", "UA"))
## Find all Flights that Departed in summer (July, August, and September)
filter(flights,month >=7,month <= 9)
filter(flights, month %in% 7:9)
## Find all Flights that Arrived more than two hours late, but didn't leave late
filter(flights, arr_delay > 120, dep_delay <= 0)
## Find all Flights that Were delayed by at least an hour, but made up over 30 minutes in flight
filter(flights, dep_delay >= 60, dep_delay - arr_delay > 30)
## Find all Flights that Departed between midnight and 6am (inclusive)
filter(flights, dep_time <= 600 | dep_time == 2400)
## arrange() - Order rows by values of column (low to high), use with desc() to order from high to low
## Exercises - 5.3.1
## How could you use arrange() to sort all missing values to the start?
arrange(flights, desc(is.na(dep_time)), dep_time)
## Sort flights to find the most delayed flights. Find the flights that left earliest.
arrange(flights, desc(dep_delay))
arrange(flights, dep_delay)
## The most delayed flight was HA 51, JFK to HNL
## Flight that left earliest, Flight B6 97 (JFK to DEN)
## Sort flights to find the fastest flights.
arrange(flights, air_time)
## Which flights travelled the longest? Which travelled the shortest?
arrange(flights, desc(distance))
arrange(flights, distance)
## The longest flight is HA 51, JFK to HNL
## The shortest flight is US 1632, EWR to LGA
## select() - Extract columns by name
## Exercises 5.4.1
## Brainstorm as many ways as possible to select dep_time, dep_delay, arr_time, and arr_delay from flights
## Column names as Strings
select(flights, "dep_time", "dep_delay", "arr_time", "arr_delay")
##columns names as variable names without quotes
select(flights, dep_time, dep_delay, arr_time, arr_delay)
## column numbers of the variables.
select(flights, 4, 6, 7, 9)
## What happens if you include the name of a variable multiple times in a select() call?
## select() ignores duplication. Duplicated variables are only included once, in the first location they appear.
select(flights, year, month, day, year)
## What does the one_of() function do? Why might it be helpful in conjunction with this vector?
## one_of() function selects variables with a character vector
vars <- c("year", "month", "day", "dep_delay", "arr_delay")
select(flights, one_of(vars))
## Does the result of running the following code surprise you? How do the select helpers deal with case by default? How can you change that default?
select(flights, contains("TIME"))
## The default behavior for contains() is to ignore case. select helpers deal with case by default by using ignore.case = FALSE
select(flights, contains("TIME", ignore.case = FALSE))
## helper functionsto use within select() are starts_with(match), ends_with(match),contains(match),matches(match),num_range(prefix,range)
## mutate() - Create new variables/columns with functions of existing variables
## 5.5.2 - Exercise
## Currently dep_time and sched_dep_time are convenient to look at, but hard to compute with because they're not really continuous numbers. Convert them to a more convenient representation of number of minutes since midnight.
flightstimes <- mutate(flights,dep_time_mins = (dep_time %/% 100 * 60 + dep_time %% 100) %% 1440,sched_dep_time_mins = (sched_dep_time %/% 100 * 60 + sched_dep_time %% 100) %% 1440)
## View columns that are relevant for times
select(flightstimes,dep_time,dep_time_mins,sched_dep_time,sched_dep_time_mins)
## Find the 10 most delayed flights using a ranking function. How do you want to handle ties? Carefully read the documentation for min_rank().
flightsdelayed <- mutate(flights, dep_delay_rank = min_rank(-dep_delay))
flightsdelayed <- filter(flightsdelayed, dep_delay_rank <= 20)
arrange(flightsdelayed, dep_delay_rank)
## What does 1:3 + 1:10 return? Why?
1:3 + 1:10
# # Warning message:In 1:3 + 1:10 : longer object length is not a multiple of shorter object length
##[1] 2 4 6 5 7 9 8 10 12 11
## What trigonometric functions does R provide?
## cospi(x), sinpi(x), tanpi(x), cos(pi*x), tn(pi*x), tan(pi*x)
|
bf62da0c4ed5d66c15d43b77462556e781b3440f
|
4d975be8f019859b1673955d59c285b9208f51f2
|
/scripts/evaluacion.R
|
1bb657b8d34418ac2c9bb6d67c72d529457b8cc3
|
[] |
no_license
|
FranciscoJLopez/aguacate_af
|
efd53e0e5419dc3b421a4211737ede558fa722fa
|
36c78c25640eb6dc2642594e0846fb7c3f03b396
|
refs/heads/master
| 2020-06-27T04:00:21.956721
| 2019-07-31T12:07:53
| 2019-07-31T12:07:53
| 199,838,954
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 59,248
|
r
|
evaluacion.R
|
library(tidyverse)
library(openxlsx)
library(extrafont)
# font_import() # use this function first if R was updated
loadfonts(device = "win")
prmtrs <- edit(prmtrs)
#### general prices and yields updater ####
yld_nal_est <- agt_nal %>%
filter(anio %in% c(2014:2018), idmodalidad != 2) %>%
summarise(yld_nal = sum(volumenproduccion * rendimiento) / sum(volumenproduccion))
yld_nal_ini <- 0.375 * yld_nal_est
prmtrs[prmtrs$clave == "pdn_yini", "Magnitud"] <- yld_nal_ini
prmtrs[prmtrs$clave == "pdn_yesta", "Magnitud"] <- yld_nal_est
pmr_nal <- agt_nal %>%
filter(anio %in% c(2014:2018), idmodalidad != 2) %>%
summarise(pmr_nal = sum(volumenproduccion * preciomediorural) / sum(volumenproduccion))
prmtrs[prmtrs$clave == "com_p_nal", "Magnitud"] <- pmr_nal / 1000
#### ejecucion ####
mod_fin(prmtrs)
md_fn_10 <- mod_fin(prmtrs)
md_fn_xl <- mod_fin(prmtrs)
#### moving to excel ####
wb <- createWorkbook()
modifyBaseFont(wb, fontSize = 9, fontName = "Lato")
addWorksheet(wb, sheetName = "prmtrs", gridLines = FALSE)
setColWidths(wb, "prmtrs", cols = 1:100, widths = 10)
addWorksheet(wb, sheetName = "fin_esq", gridLines = FALSE)
setColWidths(wb, "fin_esq", cols = 1:100, widths = 10)
addWorksheet(wb, sheetName = "flujo", gridLines = FALSE)
setColWidths(wb, "flujo", cols = 1:100, widths = 10)
addWorksheet(wb, sheetName = "edos_fin", gridLines = FALSE)
setColWidths(wb, "edos_fin", cols = 1:100, widths = 10)
headSty <- createStyle(halign = "center", border = "bottom", borderColour = "yellowgreen",
borderStyle = "medium", textDecoration = "bold")
writeData(wb, sheet = "prmtrs", x = prmtrs %>% arrange(clave),
startCol = "E", startRow = 10, headerStyle = headSty,
borders = "rows", borderColour = "grey95", borderStyle = "thin")
ubicacion <- 10 + nrow(prmtrs) + 5
writeData(wb, sheet = "prmtrs", x = md_fn_xl$inv, startCol = "E",
startRow = ubicacion, headerStyle = headSty,
borders = "rows", borderColour = "grey95", borderStyle = "thin")
ubicacion <- ubicacion + nrow(md_fn_xl$inv) + 5
writeData(wb, sheet = "prmtrs", x = md_fn_xl$costos, startCol = "E",
startRow = ubicacion, headerStyle = headSty,
borders = "rows", borderColour = "grey95", borderStyle = "thin")
writeData(wb, sheet = "fin_esq", x = md_fn_xl$sch_apy, startCol = "E",
startRow = 10, headerStyle = headSty, borders = "rows",
borderColour = "grey95", borderStyle = "thin")
ubicacion <- 10 + nrow(md_fn_xl$sch_apy) + 5
writeData(wb, sheet = "fin_esq", x = md_fn_xl$sch_apy_pct, startCol = "E",
startRow = ubicacion, headerStyle = headSty, borders = "rows",
borderColour = "grey95", borderStyle = "thin")
ubicacion <- ubicacion + nrow(md_fn_xl$sch_apy_pct) + 5
writeData(wb, sheet = "fin_esq", x = md_fn_xl$sch_gral, startCol = "E",
startRow = ubicacion, headerStyle = headSty, borders = "rows",
borderColour = "grey95", borderStyle = "thin")
ubicacion <- ubicacion + nrow(md_fn_xl$sch_gral) + 5
writeData(wb, sheet = "fin_esq", x = md_fn_xl$loan, startCol = "E",
startRow = ubicacion, headerStyle = headSty, borders = "rows",
borderColour = "grey95", borderStyle = "thin")
writeData(wb, sheet = "flujo", x = md_fn_xl$flujo, startCol = "E",
startRow = 10, headerStyle = headSty, borders = "rows",
borderColour = "grey95", borderStyle = "thin")
ubicacion <- 10 + nrow(md_fn_xl$flujo) + 5
writeData(wb, sheet = "flujo", x = md_fn_xl$ind_fin %>% gather(key = "indicador", value = "Magnitud"),
startCol = "E", startRow = ubicacion, headerStyle = headSty,
borders = "rows", borderColour = "grey95", borderStyle = "thin")
ubicacion <- ubicacion + nrow(md_fn_xl$ind_fin %>% gather(key = "indicador", value = "Magnitud")) + 5
writeData(wb, sheet = "flujo", x = md_fn_xl$sens,
startCol = "E", startRow = ubicacion, headerStyle = headSty,
borders = "rows", borderColour = "grey95", borderStyle = "thin")
ubicacion <- ubicacion + nrow(md_fn_xl$sens) + 5
writeData(wb, sheet = "flujo", x = md_fn_xl$prob_van_interval,
startCol = "E", startRow = ubicacion, headerStyle = headSty,
borders = "rows", borderColour = "grey95", borderStyle = "thin")
writeData(wb, sheet = "edos_fin", x = md_fn_xl$est_res, startCol = "E",
startRow = 10, headerStyle = headSty, borders = "rows",
borderColour = "grey95", borderStyle = "thin")
ubicacion <- 10 + nrow(md_fn_xl$est_res) + 5
writeData(wb, sheet = "edos_fin", x = md_fn_xl$est_fc,
startCol = "E", startRow = ubicacion, headerStyle = headSty,
borders = "rows", borderColour = "grey95", borderStyle = "thin")
ubicacion <- ubicacion + nrow(md_fn_xl$est_fc) + 5
writeData(wb, sheet = "edos_fin", x = md_fn_xl$est_sf,
startCol = "E", startRow = ubicacion, headerStyle = headSty,
borders = "rows", borderColour = "grey95", borderStyle = "thin")
saveWorkbook(wb, "~/OneDrive/R/evaluaciones/aguacate/test.xlsx", overwrite = TRUE)
#### genarated functions ####
# inputs updater: necesary for changes in parameters
inputs_updt <- function(prmtrs) {
# creating a list from the dataframe to ease the manipulation of parameters
prm <- list()
for(i in 1:nrow(prmtrs)) {
prm[[i]] <- data.frame(prmtrs[i,], stringsAsFactors = F)
names(prm)[i] <- prmtrs[i,"clave"]
}
# ejemplo de uso individual
#prm$pdn_ini$Magnitud
# preparing costs to be added to the cash flow
costos_mxp <- prmtrs %>%
filter(Categoría %in% c("cv", "cf")) %>%
mutate(Magnitud = if_else(Categoría == "cv", Magnitud * cv_fun(prm$pdn_sup$Magnitud, prmtrs) / sum(Magnitud), Magnitud)) %>%
mutate(Magnitud = Magnitud * prm$pdn_sup$Magnitud,
UM = "mxp")
# preparing capital expenses (inversiones) to be added to the cash flow
inv_mxp <- prmtrs %>%
filter(Categoría == "inv") %>%
mutate(Magnitud = if_else(UM == "mxp/ha", Magnitud * prm$pdn_sup$Magnitud, Magnitud)) %>%
mutate(Magnitud = if_else(UM == "mxp/planta", Magnitud * prm$pdn_sup$Magnitud * prm$pdn_dens$Magnitud, Magnitud)) %>%
mutate(UM = "mxp") %>%
mutate_if(is.factor, as.character)
inv <- list()
for(i in 1:nrow(inv_mxp)) {
inv[[i]] <- data.frame(inv_mxp[i,], stringsAsFactors = F)
names(inv)[i] <- inv_mxp[i,"clave"]
}
fl_inv_mxp <- data.frame(periodo = 0:prm$eval_hrz$Magnitud)
for(i in 1:nrow(inv_mxp)) {
fl_inv_mxp[,stringr::str_extract(inv_mxp$Concepto[i], "([a-z|A-Z]){4}")] <- inv_mxp$Magnitud[i]
}
fl_inv_mxp <- fl_inv_mxp %>%
mutate_at(vars(-periodo), ~if_else(periodo == 0, ., 0)) %>%
mutate(total = rowSums(select(., -periodo)))
# financial scheme
sch_apy_mxp <- inv_mxp %>%
filter(subcat1 == "suj_apoyo") %>%
select(subcat2, Magnitud) %>%
magrittr::set_colnames(c("concepto", "total")) %>%
mutate(apoyo = if_else(concepto == "infra", if_else((total * prm$fin_tpctj_inf$Magnitud) > prm$fin_tmto_inf$Magnitud,
prm$fin_tmto_inf$Magnitud, total * prm$fin_tpctj_inf$Magnitud),
if_else((total * prm$fin_tpctj_equ$Magnitud) > prm$fin_tmto_equ$Magnitud,
prm$fin_tmto_equ$Magnitud, total * prm$fin_tpctj_equ$Magnitud)),
credito = if_else((total * prm$fin_monto$Magnitud + apoyo) > total, total - apoyo, total * prm$fin_monto$Magnitud),
aportacion = total - apoyo - credito) %>%
group_by(concepto) %>%
summarise_all(~sum(.)) %>%
gather(-concepto, key = "fuente", value = "mxp") %>%
spread(concepto, mxp) %>%
mutate(total = rowSums(select(., -fuente)))
sch_apy_pctj <- sch_apy_mxp %>%
mutate_at(vars(-fuente), ~./max(.))
sch_gral_mxp <- data.frame(credito = inv_mxp %>%
filter(subcat1 == "suj_cred") %>%
summarise(sum(Magnitud)) %>%
pull() * prm$fin_monto$Magnitud + sch_apy_mxp %>% filter(fuente == "credito") %>% select(total) %>% pull(),
apoyo = sch_apy_mxp %>% filter(fuente == "apoyo") %>% select(total) %>% pull()) %>%
mutate(aportacion = sum(inv_mxp$Magnitud) - credito - apoyo) %>%
gather(key = "fuente", value = "mxp") %>%
mutate(pct = mxp / sum(mxp))
fl_sch_mxp <- data.frame(periodo = 0:prm$eval_hrz$Magnitud) %>%
mutate(aportacion = if_else(periodo == 0, sch_gral_mxp %>% filter(fuente == "aportacion") %>% select(mxp) %>% pull(), 0),
credito = if_else(periodo == 0, sch_gral_mxp %>% filter(fuente == "credito") %>% select(mxp) %>% pull(), 0),
apoyo = if_else(periodo == 0, sch_gral_mxp %>% filter(fuente == "apoyo") %>% select(mxp) %>% pull(), 0))
# loan taken
loan <- data.frame(mes = 0:prm$fin_plz$Magnitud) %>%
mutate(periodo = c(0, rep(1:(prm$fin_plz$Magnitud / 12), each = 12))) %>%
mutate(saldo_ini = 0,
interes = 0,
pago = 0,
amort = 0,
saldo_fin = sch_gral_mxp[sch_gral_mxp$fuente == "credito", "mxp"])
if(prm$fin_monto$Magnitud != 0) {
if(prm$fin_pgr$Magnitud != 0) {
for(i in 2:(prm$fin_pgr$Magnitud + 1)) {
loan$saldo_ini[i] <- loan$saldo_fin[i - 1]
loan$interes[i] <- loan$saldo_fin[i - 1] * (prm$fin_tf$Magnitud/12)
loan$saldo_fin[i] <- loan$saldo_ini[i]
}
for(i in (prm$fin_pgr$Magnitud + 2):(prm$fin_plz$Magnitud + 1)) {
loan$saldo_ini[i] <- loan$saldo_fin[i - 1]
loan$interes[i] <- loan$saldo_fin[i - 1] * (prm$fin_tf$Magnitud / 12)
loan$pago[i] <- loan$saldo_fin[1] * (prm$fin_tf$Magnitud/12) / (1 - (1 + prm$fin_tf$Magnitud/12)^(-(prm$fin_plz$Magnitud - prm$fin_pgr$Magnitud)))
loan$amort[i] <- loan$pago[i] - loan$interes[i]
loan$saldo_fin[i] <- loan$saldo_ini[i] - loan$amort[i]
}
} else {
for(i in 2:(prm$fin_plz$Magnitud + 1)) {
loan$saldo_ini[i] <- loan$saldo_fin[i - 1]
loan$interes[i] <- loan$saldo_fin[i - 1] * (prm$fin_tf$Magnitud / 12)
loan$pago[i] <- loan$saldo_fin[1] * (prm$fin_tf$Magnitud/12) / (1 - (1 + prm$fin_tf$Magnitud/12)^(-prm$fin_plz$Magnitud))
loan$amort[i] <- loan$pago[i] - loan$interes[i]
loan$saldo_fin[i] <- loan$saldo_ini[i] - loan$amort[i]
}
}
}
# loan: INPUT
fl_loan_mxp <- data.frame(periodo = 0:prm$eval_hrz$Magnitud) %>%
left_join(loan %>%
group_by(periodo) %>%
summarise(interes = sum(interes),
pago_capital = sum(amort),
deuda_lp = min(saldo_fin)),
by = "periodo") %>%
mutate_at(vars(-periodo), ~if_else(is.na(.), 0, .))
# depreciation: INPUT
activos <- prmtrs %>%
filter(grepl("dpn", clave)) %>%
select(clave) %>%
pull() %>%
stringr::str_extract("(?<=dpn_)[a-z]+")
fl_dpr_mxp <- data.frame(periodo = 0:prm$eval_hrz$Magnitud)
for(i in 1:length(activos)) {
fl_dpr_mxp[, paste0("dpn_", activos[i])] <- inv[[paste0("inv_", activos[i])]]$Magnitud * prm[[paste0("cont_dpn_", activos[i])]]$Magnitud
fl_dpr_mxp[, paste0("tasa_", activos[i])] <- prm[[paste0("cont_dpn_", activos[i])]]$Magnitud
}
fl_dpr_mxp <- fl_dpr_mxp %>%
mutate_at(vars(-periodo), ~if_else(periodo == 0, 0, .))
for(j in 1:length(activos)) {
for(i in 3:nrow(fl_dpr_mxp)) {
if(sum(fl_dpr_mxp[2:i, paste0("tasa_", activos[j])]) > 1) {
fl_dpr_mxp[i, paste0("dpn_", activos[j])] <- 0
}
}
}
fl_dpr_mxp$dpn_total <- fl_dpr_mxp[, paste0("dpn_", activos[1])]
for(i in 2:length(activos)) fl_dpr_mxp$dpn_total <- fl_dpr_mxp$dpn_total + fl_dpr_mxp[, paste0("dpn_", activos[i])]
# production: INPUT
fl_pd_ton <- data.frame(periodo = 0:prm$eval_hrz$Magnitud,
yield = c(rep(0,prm$pdn_ini$Magnitud),
seq(from = prm$pdn_yini$Magnitud,
to = prm$pdn_yesta$Magnitud,
length.out = prm$pdn_esta$Magnitud - prm$pdn_ini$Magnitud + 1),
rep(prm$pdn_yesta$Magnitud, prm$eval_hrz$Magnitud - prm$pdn_esta$Magnitud))) %>%
mutate(ton = yield * prm$pdn_sup$Magnitud)
# revenues: INPUT
fl_ing_mxp <- data.frame(periodo = 0:prm$eval_hrz$Magnitud) %>%
mutate(pr_ton = if(prm$com_pr_sel$Magnitud == 1) {prm$com_p_nal$Magnitud * 1000} else {agt_mun %>%
filter(idestado == 14, idmunicipio == 86, anio >= 2014) %>%
select(anio, sembrada, volumenproduccion, rendimiento, preciomediorural) %>%
arrange(anio) %>%
summarise(pmr = sum(preciomediorural * volumenproduccion) / sum(volumenproduccion)) %>%
pull()}) %>%
mutate(ing_mxp = pr_ton * fl_pd_ton$ton)
# expenses: INPUT
fl_cts_mxp <- data.frame(periodo = 0:prm$eval_hrz$Magnitud) %>%
mutate(cv = costos_mxp %>%
filter(Categoría == "cv") %>%
summarise(sum(Magnitud)) %>%
pull(),
cf = costos_mxp %>%
filter(Categoría == "cf") %>%
summarise(sum(Magnitud)) %>%
pull(),
gpath = c(seq(from = 0, to = 1, length.out = prm$pdn_esta$Magnitud + 1),
rep(1, prm$eval_hrz$Magnitud - prm$pdn_esta$Magnitud)),
cv_mxp = cv * gpath,
cf_mxp = cf * gpath) %>%
mutate_at(vars(-periodo), ~if_else(periodo == 0, 0, .))
# risk analysis series
serie_pmr <- agt_mun %>%
filter(anio >= 2016) %>%
group_by(anio, idestado) %>%
summarise(pmr = sum(preciomediorural * volumenproduccion) / sum(volumenproduccion)) %>%
ungroup() %>%
select(pmr) %>%
pull()
serie_yld <- agt_mun %>%
filter(anio >= 2016) %>%
group_by(anio, idestado) %>%
summarise(pmr = sum(rendimiento * volumenproduccion) / sum(volumenproduccion)) %>%
ungroup() %>%
select(pmr) %>%
pull()
inputs <- list(prm = prm,
costos_mxp = costos_mxp,
inv_mxp = inv_mxp,
inv = inv,
sch_apy_mxp = sch_apy_mxp,
sch_apy_pctj = sch_apy_pctj,
sch_gral_mxp = sch_gral_mxp,
fl_sch_mxp = fl_sch_mxp,
fl_inv_mxp = fl_inv_mxp,
loan = loan,
fl_loan_mxp = fl_loan_mxp,
fl_dpr_mxp = fl_dpr_mxp,
fl_pd_ton = fl_pd_ton,
fl_ing_mxp = fl_ing_mxp,
fl_cts_mxp = fl_cts_mxp,
serie_pmr = serie_pmr,
serie_yld = serie_yld)
return(inputs)
}
# feasability indicators and other formulae
NPV <- function(cf, r) sum(cf / (1 + r)^(seq(along = cf) - 1))
IRR <- function(cf) uniroot(NPV, interval = c(1e-10, 1e+10), extendInt = "yes", cf = cf)$root
pago <- function(monto, r, n) monto * r /(1 - (1 + r)^(-n))
# vnp generator using just the cash flow
mod_fin_flujo <- function(inputs) {
for(i in 1:length(inputs)) assign(names(inputs)[i], inputs[[i]])
flujo <- data.frame(periodo = 0:prm$eval_hrz$Magnitud) %>%
mutate(ing = fl_ing_mxp$ing_mxp,
cv = fl_cts_mxp$cv_mxp,
cf = fl_cts_mxp$cf_mxp,
ebitda = ing - cv - cf,
intereses = if(prm$eval_sel_flj$Magnitud == 2) {fl_loan_mxp$interes} else {0},
dpr = fl_dpr_mxp$dpn_total,
ebt = ebitda - intereses - dpr,
taxes = if_else(ebt <0, 0, ebt * prm$cont_tax$Magnitud),
profit = ebt - taxes,
capex = fl_inv_mxp$total,
credito = if(prm$eval_sel_flj$Magnitud == 2) {sch_gral_mxp[sch_gral_mxp$fuente == "credito", "mxp"]} else {0},
apoyo = if(prm$eval_sel_flj$Magnitud == 2) {sch_gral_mxp[sch_gral_mxp$fuente == "apoyo", "mxp"]} else {0},
wk = 0,
repayment = if(prm$eval_sel_flj$Magnitud == 2) {fl_loan_mxp$pago_capital} else {0},
tv = 0)
for(i in 1:(prm$eval_hrz$Magnitud)) {
if(flujo$ing[i + 1] - (flujo$cv[i + 1] + flujo$cf[i + 1] + flujo$intereses[i + 1] + flujo$taxes[i + 1]) < 0) {
flujo$wk[i] <- (flujo$cv[i + 1] + flujo$cf[i + 1] + flujo$intereses[i + 1] + flujo$taxes[i + 1])
}
}
flujo$tv[prm$eval_hrz$Magnitud + 1] <- flujo$profit[prm$eval_hrz$Magnitud + 1] / prm$eval_td$Magnitud
flujo <- flujo %>%
mutate(fcf = profit - capex + credito + apoyo - wk - repayment + dpr + tv,
factor_dto = 1/(1 + prm$eval_td$Magnitud)^periodo,
fcf_pv = fcf * factor_dto,
fcf_pv_cum = cumsum(fcf_pv))
fi_df <- data.frame(npv = NPV(flujo$fcf, prm$eval_td$Magnitud))
return(list(van = fi_df$npv / 1000))
}
mod_fin_flujo_esc <- function(inputs) {
for(i in 1:length(inputs)) assign(names(inputs)[i], inputs[[i]])
flujo <- data.frame(periodo = 0:prm$eval_hrz$Magnitud) %>%
mutate(ing = fl_ing_mxp$ing_mxp,
cv = cv_fun(prm$eval_hrz$Magnitud, prmtrs_aux),
cf = fl_cts_mxp$cf_mxp,
ebitda = ing - cv - cf,
intereses = if(prm$eval_sel_flj$Magnitud == 2) {fl_loan_mxp$interes} else {0},
dpr = fl_dpr_mxp$dpn_total,
ebt = ebitda - intereses - dpr,
taxes = if_else(ebt <0, 0, ebt * prm$cont_tax$Magnitud),
profit = ebt - taxes,
capex = fl_inv_mxp$total,
credito = if(prm$eval_sel_flj$Magnitud == 2) {sch_gral_mxp[sch_gral_mxp$fuente == "credito", "mxp"]} else {0},
apoyo = if(prm$eval_sel_flj$Magnitud == 2) {sch_gral_mxp[sch_gral_mxp$fuente == "apoyo", "mxp"]} else {0},
wk = 0,
repayment = if(prm$eval_sel_flj$Magnitud == 2) {fl_loan_mxp$pago_capital} else {0},
tv = 0)
for(i in 1:(prm$eval_hrz$Magnitud)) {
if(flujo$ing[i + 1] - (flujo$cv[i + 1] + flujo$cf[i + 1] + flujo$intereses[i + 1] + flujo$taxes[i + 1]) < 0) {
flujo$wk[i] <- (flujo$cv[i + 1] + flujo$cf[i + 1] + flujo$intereses[i + 1] + flujo$taxes[i + 1])
}
}
#flujo$tv[prm$eval_hrz$Magnitud + 1] <- flujo$profit[prm$eval_hrz$Magnitud + 1] / prm$eval_td$Magnitud
flujo <- flujo %>%
mutate(fcf = profit - capex + credito + apoyo - wk - repayment + dpr + tv,
factor_dto = 1/(1 + prm$eval_td$Magnitud)^periodo,
fcf_pv = fcf * factor_dto,
fcf_pv_cum = cumsum(fcf_pv))
fi_df <- data.frame(npv = NPV(flujo$fcf, prm$eval_td$Magnitud))
return(list(van = fi_df$npv / 1000))
}
# sensitivity analysis
sens_anlys <- function(inputs) {
# production: INPUT
inputs_sens <- inputs
sens_df <- data.frame(sens_fact = seq(0.1, 10, by = 0.1)) %>%
mutate(vans = NA)
for(i in 1:length(sens_df$sens_fact)) {
inputs_sens$fl_pd_ton <- data.frame(periodo = inputs$prm$eval_hrz$Magnitud,
yield = inputs$fl_pd_ton$yield ) %>%
mutate(sens_fact = sens_df$sens_fact[i]) %>%
mutate(ton = yield * sens_fact * inputs$prm$pdn_sup$Magnitud)
# revenues: INPUT
inputs_sens$fl_ing_mxp <- data.frame(periodo = inputs$fl_ing_mxp$periodo) %>%
mutate(pr_ton = inputs$fl_ing_mxp$pr_ton) %>%
mutate(ing_mxp = pr_ton * inputs_sens$fl_pd_ton$ton)
sens_df$vans[i] <- mod_fin_flujo(inputs_sens)$van
}
sstep <- sens_df[which.min(abs(sens_df$vans)),1]
sens_df <- data.frame(sens_fact = seq(sstep - 0.1, sstep + 0.1, by = 0.01)) %>%
mutate(vans = NA)
for(i in 1:length(sens_df$sens_fact)) {
inputs_sens$fl_pd_ton <- data.frame(periodo = inputs$prm$eval_hrz$Magnitud,
yield = inputs$fl_pd_ton$yield ) %>%
mutate(sens_fact = sens_df$sens_fact[i]) %>%
mutate(ton = yield * sens_fact * inputs$prm$pdn_sup$Magnitud)
# revenues: INPUT
inputs_sens$fl_ing_mxp <- data.frame(periodo = inputs$fl_ing_mxp$periodo) %>%
mutate(pr_ton = inputs$fl_ing_mxp$pr_ton) %>%
mutate(ing_mxp = pr_ton * inputs_sens$fl_pd_ton$ton)
sens_df$vans[i] <- mod_fin_flujo(inputs_sens)$van
}
elast_pd <- - 1 / (sens_df[which.min(abs(sens_df$vans)),1] - 1)
# revenues: sensibility
inputs_sens <- inputs
sens_df <- data.frame(sens_fact = seq(0.1, 10, by = 0.1)) %>%
mutate(vans = NA)
for(i in 1:length(sens_df$sens_fact)) {
inputs_sens$fl_ing_mxp <- data.frame(periodo = inputs$fl_ing_mxp$periodo) %>%
mutate(pr_ton = inputs$fl_ing_mxp$pr_ton) %>%
mutate(sens_fact = sens_df$sens_fact[i]) %>%
mutate(ing_mxp = pr_ton * sens_fact * inputs$fl_pd_ton$ton)
sens_df$vans[i] <- mod_fin_flujo(inputs_sens)$van
}
sstep <- sens_df[which.min(abs(sens_df$vans)),1]
sens_df <- data.frame(sens_fact = seq(sstep - 0.1, sstep + 0.1, by = 0.01)) %>%
mutate(vans = NA)
for(i in 1:length(sens_df$sens_fact)) {
inputs_sens$fl_ing_mxp <- data.frame(periodo = inputs$fl_ing_mxp$periodo) %>%
mutate(pr_ton = inputs$fl_ing_mxp$pr_ton) %>%
mutate(sens_fact = sens_df$sens_fact[i]) %>%
mutate(ing_mxp = pr_ton * sens_fact * inputs$fl_pd_ton$ton)
sens_df$vans[i] <- mod_fin_flujo(inputs_sens)$van
}
elast_ing <- - 1 / (sens_df[which.min(abs(sens_df$vans)),1] - 1)
# expenses: sensibility
inputs_sens <- inputs
sens_df <- data.frame(sens_fact = seq(0.1, 10, by = 0.1)) %>%
mutate(vans = NA)
for(i in 1:length(sens_df$sens_fact)) {
inputs_sens$fl_cts_mxp <- data.frame(periodo = inputs$fl_cts_mxp$periodo) %>%
mutate(cv = inputs$fl_cts_mxp$cv,
sens_fact = sens_df$sens_fact[i],
cf = inputs$fl_cts_mxp$cf,
gpath = inputs$fl_cts_mxp$gpath,
cv_mxp = cv * gpath * sens_fact,
cf_mxp = cf * gpath) %>%
mutate_at(vars(-periodo), ~if_else(periodo == 0, 0, .))
sens_df$vans[i] <- mod_fin_flujo(inputs_sens)$van
}
sstep <- sens_df[which.min(abs(sens_df$vans)),1]
sens_df <- data.frame(sens_fact = seq(sstep - 0.1, sstep + 0.1, by = 0.01)) %>%
mutate(vans = NA)
for(i in 1:length(sens_df$sens_fact)) {
inputs_sens$fl_cts_mxp <- data.frame(periodo = inputs$fl_cts_mxp$periodo) %>%
mutate(cv = inputs$fl_cts_mxp$cv,
sens_fact = sens_df$sens_fact[i],
cf = inputs$fl_cts_mxp$cf,
gpath = inputs$fl_cts_mxp$gpath,
cv_mxp = cv * gpath * sens_fact,
cf_mxp = cf * gpath) %>%
mutate_at(vars(-periodo), ~if_else(periodo == 0, 0, .))
sens_df$vans[i] <- mod_fin_flujo(inputs_sens)$van
}
elast_cts <- - 1 / (sens_df[which.min(abs(sens_df$vans)),1] - 1)
# output: elasticities dataframe
sens_van0 <- data.frame(variable = c("yield", "pmr", "cv"),
elasticidad = c(elast_pd, elast_ing, elast_cts))
return(sens_van0)
}
# risk analysis
fit_escala <- function(serie) {
if(min(serie) < 0) {
reescala_sust <- abs(min(serie)) + 1
serie <- serie + reescala_sust
} else {
reescala_sust <- 0
}
if(nchar(trunc(max(serie))) > 0) {
reescala_mult <- 10^nchar(trunc(max(serie)))
serie <- serie / reescala_mult
} else {
reescala_mult <- 1
}
reescala <- list(serie = serie,
reescala_sust = reescala_sust,
reescala_mult = reescala_mult)
return(reescala)
}
fit_selec <- function(serie) {
ajustes <- list()
for(i in 1:length(dist_names)) {
ajustes[[i]] <- fitdistrplus::fitdist(serie,
distr = dist_names[i])
names(ajustes)[i] <- paste0("fit_", dist_names[i])
}
attach(ajustes)
fit_sel <- fitdistrplus::gofstat(list(fit_unif, fit_weibull, fit_norm, fit_logis, fit_lnorm, fit_gamma, fit_exp, fit_cauchy, fit_beta),
fitnames = c("unif", "weibull", "norm", "logis", "lnorm", "gamma", "exp", "cauchy", "beta"))
dist_win <- dist_names[which.min(fit_sel$ks)]
dist_win_par <- get(paste0("fit_", dist_win))$estimate
detach()
objetos <- list(dist_ajust = ajustes[[which.min(fit_sel$ks)]],
dist_win = dist_win,
dist_win_par = dist_win_par)
return(objetos)
}
risk_anlys <- function(serie_pmr, serie_yld, inputs) {
inputs_risk <- inputs
risk_pmr_scaled <- fit_escala(inputs$serie_pmr)
risk_yld_scaled <- fit_escala(inputs$serie_yld)
risk_pmr_par <- fit_selec(risk_pmr_scaled$serie)
risk_yld_par <- fit_selec(risk_yld_scaled$serie)
risk_vans <- rep(NA, 1000)
for(i in 1:1000) {
inputs_risk$fl_pd_ton <- data.frame(periodo = inputs$fl_pd_ton$periodo,
yield = if(risk_yld_par$dist_win != "exp") {
get(paste0("r", risk_yld_par$dist_win))(1, risk_yld_par$dist_win_par[1], risk_yld_par$dist_win_par[2]) * risk_yld_scaled$reescala_mult - risk_yld_scaled$reescala_sust
} else {
rexp(1, risk_yld_win$dist_win_par[1]) * risk_yld_scaled$reescala_mult - risk_yld_scaled$reescala_sust
}) %>%
mutate(ton = yield * inputs$prm$pdn_sup$Magnitud)
inputs_risk$fl_ing_mxp <- data.frame(periodo = inputs$fl_ing_mxp$periodo) %>%
mutate(pr_ton = if(risk_pmr_par$dist_win != "exp") {
get(paste0("r", risk_pmr_par$dist_win))(1, risk_pmr_par$dist_win_par[1], risk_pmr_par$dist_win_par[2]) * risk_pmr_scaled$reescala_mult - risk_pmr_scaled$reescala_sust
} else {
rexp(1, risk_pmr_win$dist_win_par[1]) * risk_pmr_scaled$reescala_mult - risk_pmr_scaled$reescala_sust
}) %>%
mutate(ing_mxp = pr_ton * inputs_risk$fl_pd_ton$ton)
risk_vans[i] <- mod_fin_flujo(inputs_risk)$van
}
risk_van_scaled <- fit_escala(risk_vans)
risk_van_par <- fit_selec(risk_van_scaled$serie)
# confidence interval
prob_van <- data.frame(limites = c("inferior", "superior"),
van_000_mxp = if(risk_van_par$dist_win != "exp") {
c(get(paste0("q", risk_van_par$dist_win))(0.025, risk_van_par$dist_win_par[1], risk_van_par$dist_win_par[2]) * risk_van_scaled$reescala_mult - risk_van_scaled$reescala_sust,
get(paste0("q", risk_van_par$dist_win))(0.975, risk_van_par$dist_win_par[1], risk_van_par$dist_win_par[2]) * risk_van_scaled$reescala_mult - risk_van_scaled$reescala_sust)
} else {
c(qexp(0.025, risk_van_par$dist_win_par[1]) * risk_van_scaled$reescala_mult - risk_van_scaled$reescala_sust,
qexp(0.975, risk_van_par$dist_win_par[1]) * risk_van_scaled$reescala_mult - risk_van_scaled$reescala_sust)
}) %>%
mutate(van_000_mxp = round(van_000_mxp, 0))
risk_rate <- (if(risk_van_par$dist_win != "exp") {
get(paste0("p", risk_van_par$dist_win))((-0.01 + risk_van_scaled$reescala_sust) / risk_van_scaled$reescala_mult, risk_van_par$dist_win_par[1], risk_van_par$dist_win_par[2])
} else {
pexp((-0.01 + risk_van_scaled$reescala_sust) / risk_van_scaled$reescala_mult, risk_van_par$dist_win_par[1])
}) %>% round(., 2) %>% format(., nsmall = 2)
return(list(risk_vans = risk_vans %>% tibble(),
risk_vans_scaled = risk_van_scaled$serie %>% tibble(),
pdf = risk_van_par$dist_win,
pdf_par = risk_van_par$dist_win_par,
risk_rate = risk_rate,
prob_van_interval = prob_van))
}
# the whole outputs model generator
# free cash flow: OUTPUT Function
mod_fin <- function(prmtrs) {
inputs <- inputs_updt(prmtrs)
for(i in 1:length(inputs)) assign(names(inputs)[i], inputs[[i]])
flujo <- data.frame(periodo = 0:prm$eval_hrz$Magnitud) %>%
mutate(ing = fl_ing_mxp$ing_mxp,
cv = fl_cts_mxp$cv_mxp,
cf = fl_cts_mxp$cf_mxp,
ebitda = ing - cv - cf,
intereses = fl_loan_mxp$interes,
intereses = if(prm$eval_sel_flj$Magnitud == 2) {intereses} else {rep(0, prm$eval_hrz$Magnitud + 1)},
dpr = fl_dpr_mxp$dpn_total,
ebt = ebitda - intereses - dpr,
taxes = if_else(ebt <0, 0, ebt * prm$cont_tax$Magnitud),
profit = ebt - taxes,
capex = fl_inv_mxp$total,
credito = fl_sch_mxp$credito,
credito = if(prm$eval_sel_flj$Magnitud == 2) {credito} else {rep(0, prm$eval_hrz$Magnitud + 1)},
apoyo = fl_sch_mxp$apoyo,
apoyo = if(prm$eval_sel_flj$Magnitud == 2) {apoyo} else {rep(0, prm$eval_hrz$Magnitud + 1)},
wk = 0,
repayment = fl_loan_mxp$pago_capital,
repayment = if(prm$eval_sel_flj$Magnitud == 2) {repayment} else {rep(0, prm$eval_hrz$Magnitud + 1)},
tv = 0)
for(i in 1:(prm$eval_hrz$Magnitud)) {
if(flujo$ing[i + 1] - (flujo$cv[i + 1] + flujo$cf[i + 1] + flujo$intereses[i + 1] + flujo$taxes[i + 1]) < 0) {
flujo$wk[i] <- (flujo$cv[i + 1] + flujo$cf[i + 1] + flujo$intereses[i + 1] + flujo$taxes[i + 1])
}
}
flujo$tv[prm$eval_hrz$Magnitud + 1] <- flujo$profit[prm$eval_hrz$Magnitud + 1] / prm$eval_td$Magnitud
flujo <- flujo %>%
mutate(fcf = profit - capex + credito + apoyo - wk - repayment + dpr + tv,
factor_dto = 1/(1 + prm$eval_td$Magnitud)^periodo,
fcf_pv = fcf * factor_dto,
fcf_pv_cum = cumsum(fcf_pv))
orden <- names(flujo)[-1]
flujo_fmt <- flujo %>%
mutate_at(vars(-factor_dto, -periodo), ~round(./1000, digits = 0)) %>%
select(-factor_dto) %>% gather(concepto, "000_mxp", -periodo) %>% spread(periodo, "000_mxp") %>%
mutate(concepto = factor(concepto, levels = orden)) %>% .[order(.$concepto),]
fi_df <- data.frame(npv = round(NPV(flujo$fcf, prm$eval_td$Magnitud) / 1000),
aev = round(pago(sum(flujo$fcf_pv), prm$eval_td$Magnitud, prm$eval_hrz$Magnitud) / 1000),
irr = IRR(flujo$fcf) %>% round(., 3) %>% format(., nsmall = 2),
bcr = ((sum((flujo$profit - flujo$repayment + flujo$dpr + flujo$tv) * flujo$factor_dto)) /
(sum((flujo$capex + flujo$wk) * flujo$factor_dto))) %>% round(., 2) %>% format(., nsmall = 2),
pbk = if(sum(flujo$fcf_pv_cum < 0) - 1 == prm$eval_hrz$Magnitud) {NA} else {sum(flujo$fcf_pv_cum < 0)})
est_res <- data.frame(periodo = flujo$periodo) %>%
mutate(ing = fl_ing_mxp$ing_mxp,
cv = fl_cts_mxp$cv_mxp,
cf = fl_cts_mxp$cf_mxp,
ebitda = ing - cv - cf,
intereses = fl_loan_mxp$interes,
dpr = fl_dpr_mxp$dpn_total,
ebt = ebitda - intereses - dpr,
taxes = if_else(ebt <0, 0, ebt * prm$cont_tax$Magnitud),
profit = ebt - taxes)
orden <- names(est_res)[-1]
est_res_fmt <- est_res %>%
mutate_at(vars(-periodo), ~round(./1000, digits = 0)) %>%
gather(concepto, "000_mxp", -periodo) %>% spread(periodo, "000_mxp") %>%
mutate(concepto = factor(concepto, levels = orden)) %>% .[order(.$concepto),]
cts_circ <- data.frame(periodo = flujo$periodo) %>%
mutate(almacen = 0,
cpc = 0,
cpp = 0,
almacen = if_else(periodo == 0, 0, fl_ing_mxp$ing_mxp * prm$cont_invent$Magnitud/12),
cpc = if_else(periodo == 0, 0, fl_ing_mxp$ing_mxp * prm$cont_cc$Magnitud/12),
cpp = if_else(periodo == 0,0, (fl_cts_mxp$cv + fl_cts_mxp$cf) * prm$cont_cp$Magnitud/12),
taxes = if_else(periodo == 0, 0, flujo$taxes))
est_fc <- data.frame(periodo = flujo$periodo) %>%
mutate(profit = est_res$profit,
dpr = est_res$dpr,
fond_act_op = profit + dpr,
var_act_circ = 0,
var_pas_circ = 0)
for(i in 2:length(est_fc$periodo)) {
est_fc$var_act_circ[i] <- sum(cts_circ$almacen[i] + cts_circ$cpc[i]) - sum(cts_circ$almacen[i - 1] + cts_circ$cpc[i - 1])
est_fc$var_pas_circ[i] <- sum(cts_circ$cpp[i] + cts_circ$taxes[i]) - sum(cts_circ$cpp[i - 1] + cts_circ$taxes[i - 1])
}
est_fc <- est_fc %>%
mutate(caja_act_op = fond_act_op + var_act_circ - var_pas_circ,
inv_sin_wk = fl_inv_mxp$total,
creditos = flujo$credito,
apoyos = flujo$apoyo,
amort_cred = flujo$repayment,
aport = flujo$wk + if_else(periodo == 0, sch_gral_mxp[sch_gral_mxp$fuente == "aportacion", "mxp"], 0),
var_caja = caja_act_op - inv_sin_wk + creditos + apoyos - amort_cred + aport,
caja_ini = 0,
caja_ter = var_caja + caja_ini)
for(i in 2:length(est_fc$periodo)) {
est_fc$caja_ini[i] <- est_fc$caja_ter[i - 1]
est_fc$caja_ter[i] <- est_fc$var_caja[i] + est_fc$caja_ini[i]
}
orden <- names(est_fc)[-1]
est_fc_fmt <- est_fc %>%
mutate_at(vars(-periodo), ~round(./1000, digits = 0)) %>%
gather(concepto, "000_mxp", -periodo) %>% spread(periodo, "000_mxp") %>%
mutate(concepto = factor(concepto, levels = orden)) %>% .[order(.$concepto),]
est_sf <- data.frame(periodo = flujo$periodo) %>%
mutate(mye = (cumsum(fl_inv_mxp$Maqu) - cumsum(fl_dpr_mxp$dpn_mye)) * (1 - sch_gral_mxp[sch_gral_mxp$fuente == "apoyo", "pct"]),
trans = (cumsum(fl_inv_mxp$Tran) - cumsum(fl_dpr_mxp$dpn_trans)) * (1 - sch_gral_mxp[sch_gral_mxp$fuente == "apoyo", "pct"]),
plantula = cumsum(fl_inv_mxp$Plan),
prep_terr = cumsum(fl_inv_mxp$Prep) - cumsum(fl_dpr_mxp$dpn_prep),
caja = est_fc$caja_ter,
almacen = cts_circ$almacen,
cpc = cts_circ$cpc,
tot_activos = mye + trans + plantula + prep_terr + caja + almacen + cpc,
prestamos = fl_loan_mxp$deuda_lp,
cpp = cts_circ$cpp,
taxes = cts_circ$taxes,
tot_pasivos = prestamos + cpp + taxes,
cap_soc = sch_gral_mxp[sch_gral_mxp$fuente == "aportacion", "mxp"],
aport_pend_cap = cumsum(flujo$wk),
ut_ret = if_else(cumsum(est_res$profit) < 0, 0, cumsum(est_res$profit)),
tot_capital = cap_soc + aport_pend_cap + ut_ret,
pasivos_capital = tot_pasivos + tot_capital)
ajuste <- est_sf$tot_activos - est_sf$pasivos_capital
est_sf <- est_sf %>%
mutate(aport_pend_cap = aport_pend_cap + ajuste,
tot_capital = cap_soc + aport_pend_cap + ut_ret,
pasivos_capital = tot_pasivos + tot_capital)
orden <- names(est_sf)[-1]
est_sf_fmt <- est_sf %>%
mutate_at(vars(-periodo), ~round(./1000, digits = 0)) %>%
gather(concepto, "000_mxp", -periodo) %>% spread(periodo, "000_mxp") %>%
mutate(concepto = factor(concepto, levels = orden)) %>% .[order(.$concepto),]
elast_df <- sens_anlys(inputs)
prob_van <- risk_anlys(inputs$serie_pmr, inputs$serie_yld, inputs)
modelo <- list(costos = inputs$costos_mxp %>% mutate(Magnitud = round(Magnitud / 1000, 1),
UM = "000_mxp"),
inv = inputs$inv_mxp %>%
rbind(data.frame(Categoría = "inv", Concepto = "Capital de trabajo",
Magnitud = NPV(flujo$wk, prm$eval_td$Magnitud), UM = "mxp",
clave = "inv_ctrab", subcat1 = "suj_null", subcat2 = NA)) %>%
mutate(Magnitud = round(Magnitud / 1000, 1),
UM = "000_mxp"),
sch_apy = inputs$sch_apy_mxp %>% mutate_at(vars(-fuente), ~round(./1000, 1)),
sch_apy_pct = inputs$sch_apy_pctj,
sch_gral = inputs$sch_gral_mxp %>% mutate(mxp = round(mxp / 1000, 1)) %>%
rename("000_mxp" = mxp),
loan = inputs$fl_loan_mxp %>% mutate_at(vars(-periodo), ~round(./1000, 1)),
est_res = est_res_fmt,
est_fc = est_fc_fmt,
est_sf = est_sf_fmt,
flujo = flujo_fmt,
ind_fin = fi_df,
van = fi_df$npv,
sens = elast_df,
risk_vans = prob_van$risk_vans,
risk_vans_scaled = prob_van$risk_vans_scaled,
pdf = prob_van$pdf,
pdf_par = prob_van$pdf_par,
risk_rate = prob_van$risk_rate,
prob_van_interval = prob_van$prob_van_interval)
return(modelo)
}
#### minimal scale ####
esca_min <- function(prmtrs) {
prmtrs_aux <- prmtrs
vans_esca <- expand.grid(sup = seq(from = 0.5, to = 100, by = .5),
hrz = seq(from = 8, to = 20, by = 1)) %>%
mutate(vans = NA)
for(i in 1:nrow(vans_esca)) {
prmtrs_aux[prmtrs_aux$clave == "pdn_sup", "Magnitud"] <- vans_esca$sup[i]
prmtrs_aux[prmtrs_aux$clave == "eval_hrz", "Magnitud"] <- vans_esca$hrz[i]
inputs_esca <- inputs_updt(prmtrs_aux)
vans_esca$vans[i] <- mod_fin_flujo_esc(inputs_esca)$van
}
escala_min <- vans_esca[which.min(abs(vans_esca$vans)), "sup"]
escala_opt <- vans_esca[which.max(vans_esca$vans), "sup"]
escala <- data.frame(escala = c("minima", "optima"), sup_ha = c(escala_min, escala_opt))
return(escala)
}
esca_min(prmtrs)
#### varios ####
#FinCal::irr(flujo$fcf)
#FinCal::npv(prm$eval_td$Magnitud, flujo$fcf)
pago(sum(flujo$fcf_pv), prm$eval_td$Magnitud, length(flujo$fcf_pv))
writeClipboard(as.vector(flujo$fcf))
write.table(flujo$fcf, "clipboard", sep = "\t")
#dist_names <- c("unif", "weibull", "norm", "logis", "lnorm", "gamma", "exp", "cauchy", "beta")
fitdistrplus::denscomp(risk_van_par$dist_ajust)
### first stpes building parameters dataframe
#prmtrs <- data.frame(Categoría = "pdn",
# Concepto = "Inicio de producción",
# Magnitud = 3,
# UM = "año",
# clave = "pdn_ini")
vans_esca_fil <- vans_esca %>%
filter(vans > 0)
#plotly::plot_ly(x=vans_esca_fil$hrz, y=vans_esca_fil$sup, z=vans_esca_fil$vans, type="scatter3d", mode="markers", color = vans_esca_fil$vans)
vans_esca_fil %>%
group_by(sup) %>%
summarise(hrz = min(hrz))
vans_esca_fil %>%
group_by(hrz) %>%
summarise(sup = min(sup), van = min(vans)) %>%
group_by(sup) %>%
summarise(hrz = min(hrz), van = min(van)) %>%
ggplot(aes(hrz, sup)) +
geom_step(col = "grey90") +
geom_point(aes(alpha = van), size = 5, col = "yellowgreen") +
labs(x = "Años", y = "Has",
title = "Relación entre la escala mínima rentable\ny el horizonte de evaluación",
subtitle = "VAN en miles de pesos",
caption = "Fuente: estimaciones propias") +
tema_gg +
scale_x_continuous(breaks = c(8,9,10,11,13,17), labels = c(8,9,10,11,13,17)) +
theme(axis.line.x = element_line(color = "lightgrey"),
axis.ticks.x = element_line(),
legend.position = c(0.85, 0.8))
vans_esca_fil %>%
filter(sup == 14)
vans_esca_fil %>%
filter(hrz == 10) %>%
arrange(desc(vans)) %>%
ggplot(aes(sup, vans)) +
geom_point()
cv_fun <- function(supe, prmtrs) {
esc_cts <- data.frame(sup = c(4,5,6,8,8,10.5,15),
dens = c(114,110,131,110,100,100,123),
cv = c(30686,50478,38584,29550,34134,56990,45627))
cv_lm <- esc_cts %>% mutate(cv = cv * 100 / dens,
index = 1:nrow(.)) %>%
filter(index %in% c(1,3,5,7)) %>%
select(sup, cv) %>%
lm(cv ~ log(sup), .)
cv_lm$coef[1] +
(prmtrs %>% filter(Categoría == "cv") %>% summarise(sum(Magnitud)) %>% pull() -
predict(cv_lm, data.frame(sup = 40))) +
cv_lm$coef[2] * log(supe)
}
cv_fun(40, prmtrs)
predict(cv_lm, c(40))
predict(cv_lm, newdata = data.frame(sup = 40))
#### PLOTS ####
## elasticities plot
md_fn_xl$sens %>% #cbind(data.frame(elas = c(-0.88, 10.33, 2.33))) %>% select(-elasticidad) %>% rename(elasticidad = elas) %>%
mutate(y_ini = rank(-elasticidad, ties.method = "first"),
aux_fct = abs(2/elasticidad),
aux2_fct = min(aux_fct),
fct = if_else(aux2_fct < 1, aux2_fct, 1),
y_fin = (elasticidad * fct + y_ini)) %>%
gather(y_ini, y_fin, key = "posicion", value = "eje_y") %>%
mutate(eje_x = if_else(posicion == "y_ini", 2, 5)) %>%
mutate(lab_ini = if_else(eje_x == 2, variable, as.factor(NA)),
pos_lab_ini = eje_x - 0.3,
lab_fin = if_else(eje_x == 2, scales::number(elasticidad, accuracy = 0.01), as.character(NA)),
pos_lab_fin = eje_x + 0.3,
col_sel = if_else(abs(elasticidad) == max(abs(elasticidad)), "T", "F")) %>%
ggplot(aes(eje_x, eje_y)) +
geom_point(aes(col = col_sel), size = 6) +
geom_line(aes(group = variable, col = col_sel), size = 1) +
geom_text(aes(x = eje_x - 2, label = lab_ini, col = col_sel), hjust = "left", size = 7, family = "Lato Light") +
geom_text(aes(x = eje_x - 0.5, label = lab_fin, col = col_sel), hjust = "right", size = 7, family = "Lato Light") +
labs(x = "", y = "",
title = "Sensibilidad de variables seleccionadas",
subtitle = "Coeficientes de elasticidad",
caption = "Fuente: estimaciones propias.") +
xlim(0,5) + ylim(0,4) +
scale_color_manual(values = c("F" = "grey80", "T" = "yellowgreen")) +
tema_gg +
theme(axis.line.y = element_blank(),
axis.text = element_blank(),
axis.ticks.y = element_blank(),
legend.position = "none")
## waterfall plot (cumulative discounted cash flow)
wf_df <- data.frame(periodo = 0:prmtrs[prmtrs$clave == "eval_hrz", "Magnitud"]) %>%
mutate(flujo = md_fn_xl$flujo %>%
filter(concepto == "fcf_pv") %>%
select(-concepto) %>%
gather() %>%
select(value) %>%
pull(),
inicio = 0,
fin = flujo)
for(i in 2:nrow(wf_df)) {
wf_df$inicio[i] <- wf_df$fin[i - 1]
wf_df$fin[i] <- wf_df$flujo[i] + wf_df$inicio[i]
}
wf_df %>%
mutate(fin = inicio + flujo,
eje_x = 1:nrow(wf_df),
col_sel = if_else(flujo <= 0, T, F),
txt = scales::comma(fin),
txt = if_else(fin %in% c(min(fin), max(fin)), txt, ""),
eje_y_txt = if_else(flujo <= 0, fin + (-0.04406491 * fin), fin - (0.02241248 * fin))) %>%
ggplot(aes(xmin = eje_x - 0.45,
xmax = eje_x + 0.45,
ymin = inicio,
ymax = fin)) +
geom_rect(aes(fill = col_sel)) +
scale_fill_manual(values = c("grey85", "yellowgreen")) +
scale_x_continuous(breaks = c(1:11), labels = c(0:10)) +
geom_hline(yintercept = 0, col = "darkgrey", lty = 2) +
geom_text(aes(x = eje_x, y = eje_y_txt, label = txt), col = "white", size = 2, family = "Lato") +
labs(x = "", y = "",
title = "Flujo de caja del proyecto descontado y acumulado",
subtitle = "Miles de pesos",
caption = "Fuente: estimaciones propias.") +
scale_y_continuous(label = scales::comma) +
tema_gg +
theme(legend.position = "none")
#axis.line.x = element_line(color = "lightgrey"),
#axis.ticks.x = element_line())
#axis.line.y = element_blank(),
#axis.text.y = element_blank(),
#axis.ticks = element_blank())
## cash flow plot
md_fn_xl$flujo %>%
filter(concepto == "fcf") %>%
select(-concepto) %>%
gather() %>%
rename(periodo = key, flujo = value) %>%
mutate(periodo = as.numeric(periodo),
col_fill = if_else(flujo <= 0, T, F)) %>%
ggplot(aes(x = periodo, y = flujo)) +
geom_col(aes(fill = col_fill)) +
geom_hline(yintercept = 0, col = "darkgrey", lty = 2) +
scale_fill_manual(values = c("grey85","yellowgreen")) +
scale_x_continuous(breaks = c(0:10)) +
scale_y_continuous(labels = scales::comma) +
labs(x = "", y = "",
title = "Flujo de caja del proyecto",
subtitle = "Miles de pesos",
caption = "Fuente: estimaciones propias.") +
tema_gg +
theme(legend.position = "none")
# confidence interval plot
md_fn_xl$risk_vans_scaled %>%
ggplot(aes(x = .)) +
geom_histogram(bins = 15, fill = "white", col = "grey90", aes(x = ., y = stat(density))) +
stat_function(fun = get(paste0("d", md_fn_xl$pdf)), n = 101,
args = list(location = md_fn_xl$pdf_par[1], scale = md_fn_xl$pdf_par[2]),
col = "yellowgreen", size = 0.75) +
scale_y_continuous(breaks = NULL) +
geom_vline(xintercept = get(paste0("q", md_fn_xl$pdf))(0.025, location = md_fn_xl$pdf_par[1], scale = md_fn_xl$pdf_par[2]),
col = "grey85", lty = 2) +
geom_vline(xintercept = get(paste0("q", md_fn_xl$pdf))(0.975, location = md_fn_xl$pdf_par[1], scale = md_fn_xl$pdf_par[2]),
col = "grey85", lty = 2) +
geom_polygon(data = md_fn_xl$risk_vans_scaled %>%
magrittr::set_names("van_sca") %>%
rbind(data.frame(van_sca = c(get(paste0("q", md_fn_xl$pdf))(0.025, location = md_fn_xl$pdf_par[1], scale = md_fn_xl$pdf_par[2]),
get(paste0("q", md_fn_xl$pdf))(0.975, location = md_fn_xl$pdf_par[1], scale = md_fn_xl$pdf_par[2])))) %>%
arrange(van_sca) %>%
mutate(dens = get(paste0("d", md_fn_xl$pdf))(van_sca, location = md_fn_xl$pdf_par[1], scale = md_fn_xl$pdf_par[2]),
dens = if_else(van_sca <= get(paste0("q", md_fn_xl$pdf))(0.025, location = md_fn_xl$pdf_par[1], scale = md_fn_xl$pdf_par[2]), 0, dens),
dens = if_else(van_sca >= get(paste0("q", md_fn_xl$pdf))(0.975, location = md_fn_xl$pdf_par[1], scale = md_fn_xl$pdf_par[2]), 0, dens)),
aes(x = van_sca, y = dens), fill = "grey", alpha = 0.35) +
labs(x = "", y = "",
title = "Intervalo de confianza al 95% del VAN",
subtitle = "Miles de pesos",
caption = "Fuente: estimaciones propias.") +
tema_gg +
theme(axis.ticks.x = element_line()) +
scale_x_continuous(breaks = c(get(paste0("q", md_fn_xl$pdf))(0.025, location = md_fn_xl$pdf_par[1], scale = md_fn_xl$pdf_par[2]),
fit_escala(md_fn_xl$risk_vans)$reescala_sust / fit_escala(md_fn_xl$risk_vans)$reescala_mult,
get(paste0("q", md_fn_xl$pdf))(0.975, location = md_fn_xl$pdf_par[1], scale = md_fn_xl$pdf_par[2])),
labels = scales::comma(c(md_fn_xl$prob_van_interval$van_000_mxp[1], 0, md_fn_xl$prob_van_interval$van_000_mxp[2])))
# risk interval plot
md_fn_xl$risk_vans_scaled %>%
ggplot(aes(x = .)) +
geom_histogram(bins = 15, fill = "white", col = "grey90", aes(x = ., y = stat(density))) +
geom_text(data = md_fn_xl$risk_vans_scaled, aes(x = quantile(.,0.99), y = quantile(density(.)$y,0.925),
label = paste0("TR: ",
scales::percent(as.numeric(md_fn_xl$risk_rate),
accuracy = 0.1))),
family = "Lato", color = "grey35", size = 5) +
stat_function(fun = get(paste0("d", md_fn_xl$pdf)), n = 101,
args = list(location = md_fn_xl$pdf_par[1], scale = md_fn_xl$pdf_par[2]),
col = "yellowgreen", size = 0.75) +
scale_y_continuous(breaks = NULL) +
geom_polygon(data = md_fn_xl$risk_vans_scaled %>%
magrittr::set_names("van_sca") %>%
rbind(data.frame(van_sca = c(fit_escala(md_fn_xl$risk_vans)$reescala_sust / fit_escala(md_fn_xl$risk_vans)$reescala_mult))) %>%
arrange(van_sca) %>%
mutate(dens = get(paste0("d", md_fn_xl$pdf))(van_sca, location = md_fn_xl$pdf_par[1], scale = md_fn_xl$pdf_par[2]),
dens = if_else(van_sca == min(van_sca), 0, dens),
dens = if_else(van_sca > fit_escala(md_fn_xl$risk_vans)$reescala_sust / fit_escala(md_fn_xl$risk_vans)$reescala_mult, 0, dens)),
aes(x = van_sca, y = dens), fill = "grey", alpha = 0.35) +
geom_vline(xintercept = fit_escala(md_fn_xl$risk_vans)$reescala_sust / fit_escala(md_fn_xl$risk_vans)$reescala_mult,
col = "grey50", lty = 2) +
labs(x = "", y = "",
title = "Intervalo de riesgo, VAN < 0",
subtitle = "Miles de pesos",
caption = "Fuente: estimaciones propias.") +
tema_gg +
theme(axis.ticks.x = element_line()) +
scale_x_continuous(breaks = c(get(paste0("q", md_fn_xl$pdf))(0.025, location = md_fn_xl$pdf_par[1], scale = md_fn_xl$pdf_par[2]),
fit_escala(md_fn_xl$risk_vans)$reescala_sust / fit_escala(md_fn_xl$risk_vans)$reescala_mult,
get(paste0("q", md_fn_xl$pdf))(0.975, location = md_fn_xl$pdf_par[1], scale = md_fn_xl$pdf_par[2])),
labels = scales::comma(c(md_fn_xl$prob_van_interval$van_000_mxp[1], 0, md_fn_xl$prob_van_interval$van_000_mxp[2])))
# financial indicators plot
md_fn_xl$ind_fin %>%
magrittr::set_names(c("VAN", "VAE", "TIR", "bcr", "Pbk")) %>%
gather() %>% #cbind(data.frame(value2 = c(100000,-3,0.1,1,NA))) %>% select(-value) %>% rename(value = value2) %>%
filter(key != "bcr") %>%
mutate(value = as.numeric(value),
value_fmt = value,
value_fmt = if_else(key %in% c("VAN", "VAE"), scales::comma(value), as.character(NA)),
value_fmt = if_else(key == "TIR", paste0(value*100, "%"), value_fmt),
value_fmt = if_else(key == "Pbk", if_else((is.na(value)), "> hrz", paste(scales::number(value), "años", "")), value_fmt),
aux = "P",
aux = if_else(key == "VAN", if_else(value >= 0, "P", "N"), aux),
aux = if_else(key == "VAE", if_else(value >= 0, "P", "N"), aux),
aux = if_else(key == "TIR", if_else(value >= prmtrs[prmtrs$clave == "eval_td", "Magnitud"], "P", "N"), aux),
aux = if_else(key == "Pbk", if_else(!(is.na(value)), "P", "N"), aux),
eje_x = if_else(key %in% c("VAN", "VAE"), 1.5, 5.5),
eje_y = if_else(key %in% c("VAN", "TIR"), 5.5, 1.5)) %>%
ggplot(aes(x = eje_x, y = eje_y)) +
ylim(0,8) + xlim(1,8) +
geom_text(aes(label = value_fmt), hjust = "left", family = "Lato", size = 13, col = "darkgrey") +
geom_text(aes(label = key, x = eje_x - 0.1, y = eje_y + 0.4), hjust = "right",
family = "Lato Light", col = "darkgrey") +
scale_shape_manual(values = c("P" = 17, "N" = 25)) +
scale_fill_manual(values = c("N" = "red", "P" = "yellowgreen")) +
scale_color_manual(values = c("N" = "red", "P" = "yellowgreen")) +
geom_point(aes(x = eje_x - 0.3, y = eje_y - 0.4, shape = aux, col = aux, fill = aux), size = 3) +
labs(x = "", y = "",
title = "Indicadores de rentabilidad",
subtitle = "VAN y VAE en miles de pesos",
caption = "Fuente: estimaciones propias.") +
tema_gg +
theme(legend.position = "none",
axis.text = element_blank(),
axis.ticks.y = element_blank(),
axis.line.y = element_blank())
# investments distribution plot
md_fn_xl$inv %>%
select(Concepto, Magnitud) %>%
filter(Magnitud != 0) %>%
arrange(Magnitud) %>%
mutate(pct_fmt = scales::percent(Magnitud),
col_pal = if_else(Magnitud == max(Magnitud), "T", "F"),
aux = Magnitud / sum(Magnitud),
txt = if_else(aux > 0.1, scales::percent(aux), "")) %>%
ggplot(aes(x = reorder(Concepto, Magnitud), y = Magnitud)) +
geom_col(aes(fill = col_pal)) +
coord_flip() +
labs(x = "", y = "",
title = "Conceptos de inversión en valor actual",
subtitle = "Participación porcentual en el total",
caption = "Fuente: estimación propia.") +
scale_fill_manual(values = c("T" = "yellowgreen", "F" = "grey85")) +
geom_text(aes(label = txt), size = 9, fontface = "bold",
hjust = "right", col = "white",
nudge_y = -0.0222 * max(md_fn_xl$inv[,"Magnitud"])) +
tema_gg +
theme(legend.position = "none",
axis.text.x = element_blank())
# expenses distribution plot
md_fn_xl$costos %>%
select(Categoría, Concepto, Magnitud) %>%
arrange(Magnitud) %>%
mutate(aux = Magnitud / sum(Magnitud),
col_pal = if_else(Magnitud == max(Magnitud), "T", "F"),
txt = if_else(aux > 0.1, scales::percent(aux), "")) %>%
ggplot(aes(x = reorder(Concepto, Magnitud), y = Magnitud)) +
geom_col(aes(fill = col_pal)) +
scale_fill_manual(values = c("T" = "yellowgreen", "F" = "grey85")) +
coord_flip() +
geom_text(aes(label = txt), hjust = "right", col = "white", fontface = "bold",
size = 8, nudge_y = - 0.0222 * max(md_fn_xl$costos[,"Magnitud"])) +
labs(x = "", y = "",
title = "Costos de operación a partir de la estabilización",
subtitle = "Participación porcentual en el total",
caption = "Fuente: estimaciones propias.") +
tema_gg +
theme(legend.position = "none",
axis.text.x = element_blank())
# expenses evolution plot
md_fn_xl$flujo %>%
filter(concepto %in% c("cv", "cf")) %>%
gather(key = "periodo", value = "monto", -concepto) %>%
filter(periodo != 0) %>%
ggplot(aes(x = as.numeric(periodo), y = monto)) +
geom_col(aes(fill = concepto)) +
scale_fill_manual(values = c("cv" = "yellowgreen", "cf" = "grey85")) +
scale_x_continuous(breaks = 1:10, labels = 1:10) +
scale_y_continuous(labels = scales::comma) +
labs(x = "", y = "",
title = "Evolución de los costos",
subtitle = "Miles de pesos",
caption = "Fuente: estimación propia.") +
tema_gg +
theme(legend.position = c(.2,.8),
legend.title = element_blank())
# revenue vs costs evolution plot
md_fn_xl$flujo %>%
filter(concepto %in% c("ing", "cv", "cf")) %>%
.[,-2] %>%
gather(key = "periodo", value = "monto", -concepto) %>%
spread(concepto, monto) %>%
mutate(ctt = cv + cf) %>%
select(-cv, -cf) %>%
mutate(periodo = as.numeric(periodo),
txt_ing = if_else(periodo == 9, "Ingresos", ""),
txt_ctt = if_else(periodo == 9, "Costos", "")) %>%
arrange(periodo) %>%
ggplot(aes(x = periodo)) +
geom_line(aes(y = ing), col = "yellowgreen") +
geom_point(aes(y = ing), col = "yellowgreen") +
geom_text(aes(y = ing - 0.05*(max(ing)), label = txt_ing), family = "Lato") +
geom_line(aes(y = ctt), col = "grey85") +
geom_point(aes(y = ctt), col = "grey85") +
geom_text(aes(y = ctt - 0.05*(max(ing)), label = txt_ctt), family = "Lato") +
scale_x_continuous(breaks = 1:10, labels = 1:10) +
scale_y_continuous(labels = scales::comma) +
labs(x = "", y = "",
title = "Evolución de los ingresos y de los costos de operación",
subtitle = "Miles de pesos",
caption = "Fuente: estimaciones propias.") +
tema_gg
# summary statistics of variables included in risk analysis
gridExtra::grid.arrange(nrow = 2,
agt_nal %>%
filter(anio %in% c(2014:2018), idmodalidad != 2) %>% #filter(rendimiento == 27.95)
select(anio, rendimiento) %>% #group_by(idmodalidad) %>% count()
ggplot(aes(rendimiento)) +
geom_histogram(bins = 15, fill = "grey85", col = "grey85", alpha = 0.2) +
geom_vline(xintercept = agt_nal %>%
filter(anio %in% c(2014:2018, idmodalidad != 2)) %>%
summarise(rdm = sum(rendimiento * volumenproduccion) / sum(volumenproduccion)) %>%
pull(), lty = 2, col = "yellowgreen", size = 1) +
labs(x = "", y = "",
title = "Rendimiento por ha",
subtitle = "Distribución, valor medio ponderado y estadísticos resumen") +
tema_gg +
theme(axis.line.y = element_blank(),
axis.ticks.y = element_blank(),
axis.text.y = element_blank(),
axis.ticks.x = element_line()),
data.frame(nom = c("Media", "D.E.", "Mínimo", "1er Q",
"Mediana", "3er Q", "Máximo", "MAD", "IQR", "CV"),
est = agt_nal %>%
filter(anio %in% c(2014:2018), idmodalidad != 2) %>% #filter(rendimiento == 27.95)
select(rendimiento) %>% #group_by(idmodalidad) %>% count()
summarytools::descr() %>% .[1:10,1]) %>%
mutate(est = scales::number(est, accuracy = 0.01),
dp = ":",
eje_y = 11:2,
eje_x_n = 1,
eje_x_dp = 1.8,
eje_x_e = 2.5) %>%
ggplot(aes(y = eje_y)) +
geom_text(aes(x = eje_x_n, label = nom), family = "Lato", hjust = "left", size = 6, col = "grey") +
geom_text(aes(x = eje_x_dp, label = dp), family = "Lato", col = "grey") +
geom_text(aes(x = eje_x_e, label = est), family = "Lato", size = 6, hjust = "right", col = "darkgrey") +
xlim(0, 3.5) + ylim(1,12) +
labs(x = "", y = "") +
tema_gg +
theme(axis.line.y = element_blank(),
axis.text = element_blank(),
axis.ticks.y = element_blank()),
agt_nal %>%
filter(anio %in% c(2014:2018), idmodalidad != 2) %>% #filter(rendimiento == 27.95)
select(anio, preciomediorural) %>% #group_by(idmodalidad) %>% count()
mutate(preciomediorural = preciomediorural / 1000) %>%
ggplot(aes(preciomediorural)) +
geom_histogram(bins = 15, fill = "grey85", col = "grey85", alpha = 0.2) +
geom_vline(xintercept = agt_nal %>%
filter(anio %in% c(2014:2018, idmodalidad != 2)) %>%
summarise(rdm = sum(preciomediorural * volumenproduccion) / sum(volumenproduccion)) %>%
pull() / 1000, lty = 2, col = "yellowgreen", size = 1) +
labs(x = "", y = "",
title = "Precio medio rural (mxp/kg)",
subtitle = "Distribución, valor medio ponderado y estadísticos resumen") +
tema_gg +
theme(axis.line.y = element_blank(),
axis.ticks.y = element_blank(),
axis.text.y = element_blank(),
axis.ticks.x = element_line()),
data.frame(nom = c("Media", "D.E.", "Mínimo", "1er Q",
"Mediana", "3er Q", "Máximo", "MAD", "IQR", "CV"),
est = agt_nal %>%
filter(anio %in% c(2014:2018), idmodalidad != 2) %>% #filter(rendimiento == 27.95)
select(preciomediorural) %>% #group_by(idmodalidad) %>% count()
mutate(preciomediorural = preciomediorural / 1000) %>%
summarytools::descr() %>% .[1:10,1]) %>%
mutate(est = scales::number(est, accuracy = 0.01),
dp = ":",
eje_y = 11:2,
eje_x_n = 1,
eje_x_dp = 1.8,
eje_x_e = 2.5) %>%
ggplot(aes(y = eje_y)) +
geom_text(aes(x = eje_x_n, label = nom), family = "Lato", hjust = "left", size = 6, col = "grey") +
geom_text(aes(x = eje_x_dp, label = dp), family = "Lato", col = "grey") +
geom_text(aes(x = eje_x_e, label = est), family = "Lato", size = 6, hjust = "right", col = "darkgrey") +
xlim(0, 3.5) + ylim(1,12) +
labs(x = "", y = "") +
tema_gg +
theme(axis.line.y = element_blank(),
axis.text = element_blank(),
axis.ticks.y = element_blank()))
inputs_rm_list <- "(prm, costos_mxp, inv_mxp, inv, fl_inv_mxp, sch_apy_mxp,
sch_apy_pctj, sch_gral_mxp, loan, fl_loan_mxp, activos,
fl_dpr_mxp, fl_pd_ton, fl_ing_mxp, fl_cts_mxp, fl_sch_mxp)"
tema_gg <- theme(panel.background = element_blank(),
panel.grid = element_blank(),
text = element_text(family = "Lato"),
plot.title = element_text(face = "bold"),
plot.subtitle = element_text(face = "bold"),
axis.ticks.x = element_blank(),
axis.text = element_text(family = "Lato Light"),
axis.line.y = element_line(color = "lightgrey"))
prmtrs %>%
filter(Categoría == "cont")
data.frame(datos = runif(3) * 100000) %>%
mutate(datos = scales::number(datos, accuracy = 0.01, big.mark = ","))
vans_esca_fil %>%
filter(sup == 10, hrz == 10) %>%
#group_by(hrz) %>%
#nest() %>%
#mutate(tc = map_dbl(data, ~exp(lm(log(vans) ~ sup, data = .x)$coef[2]) - 1))
ggplot(aes(sup, vans)) +
geom_point()
md_fn_10$risk_rate
|
18fc641c18df03f921db1ef7478efab52ef047e9
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/RLumModel/vignettes/RLumModel_-_Getting_started_with_RLumModel.R
|
dbb6b438d8a76bf0c14863aded6d9a61c7b3b93b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,326
|
r
|
RLumModel_-_Getting_started_with_RLumModel.R
|
## ----global_options, include=FALSE---------------------------------------
knitr::opts_chunk$set(fig.pos = 'H', fig.align = 'center')
## ---- echo=FALSE, message = FALSE----------------------------------------
library(RLumModel)
library(knitr)
library(kableExtra)
## ---- eval = FALSE-------------------------------------------------------
# data("ExampleData.ModelOutput", package = "RLumModel")
#
# ##show class
# class(model.output)
#
# ##show structure
# Luminescence::structure_RLum(model.output)
#
# ##seperate TL-curve from TL-concentrations
# TL_curve <- Luminescence::get_RLum(model.output, recordType = "TL$")
# TL_conc <- Luminescence::get_RLum(model.output, recordType = "(TL)", drop = FALSE)
#
# ##also possible: TL_curve <- get_RLum(model.output, record.id = 1)
#
# ##plot results
# Luminescence::plot_RLum(TL_curve)
# Luminescence::plot_RLum(TL_conc)
#
## ---- eval = FALSE, fig.align = "center"---------------------------------
# ##plot every energy-level by an extra plot
# Luminescence::plot_RLum(TL_conc, plot.single = TRUE)
## ---- eval = FALSE-------------------------------------------------------
# ##see structure of model.output
# Luminescence::structure_RLum(model.output)
## ------------------------------------------------------------------------
model <- "Bailey2001"
## ------------------------------------------------------------------------
sequence <- system.file(
"extdata",
"example_SAR_cycle.SEQ",
package = "RLumModel")
## ------------------------------------------------------------------------
lab.dose_rate <- 0.105
## ---- echo = FALSE-------------------------------------------------------
keywords <- data.frame(
ARGUMENTS = c("TL","OSL","ILL","LM_OSL", "RF", "RF_heating", "IRR", "CH", "PH", "PAUSE"),
DESCRIPTION = c("Thermally stimulated luminescence", "Optically stimulated luminescence", "Illumination", "Linear modulated OSL", "Radiofluorescence", "RF during heating/cooling","Irradiation", "Cutheat", "Preheat", "Pause"),
SUB_ARGUMENTS = c("’temp_begin’ [°C], ’temp_end’ [°C], ’heating_rate’ [°C/s]",
"’temp’ [°C], ’duration’ [s], ’optical_power’ [%]",
"’temp’ [°C], ’duration’ [s], ’optical_power’ [%]",
"’temp’ [°C], ’duration’ [s], optional: ’start_power’ [%], ’end_power’ [%]",
"’temp’ [°C], ’dose’ [Gy], ’dose_rate’ [Gy/s]",
"’temp_begin’ [°C], ’temp_end’ [°C], 'heating rate' [°C/s], 'dose_rate' [Gy/s]",
"’temp’ [°C], ’dose’ [Gy], ’dose_rate’ [Gy/s]",
"’temp’ [°C], optional: ’duration’ [s], ’heating_rate’ [°C/s]",
"’temp’ [°C], ’duration’ [s], optional: ’heating_rate’ [°C/s]",
"’temp’ [°C], ’duration [s]’"))
kable(keywords,
format = "html",
col.names = c("ARGUMENTS", "DESCRIPTION", "SUB-ARGUMENTS"),
caption = "Keywords in RLumModel for creating sequences") %>%
kable_styling(bootstrap_options = c("striped", "hover"), full_width = F)
## ------------------------------------------------------------------------
sequence <- list(
IRR = c(temp = 20, dose = 10, dose_rate = 1),
TL = c(temp_begin = 20, temp_end = 400 , heating_rate = 5))
## ------------------------------------------------------------------------
sequence <- list(
IRR = c(20, 10, 1),
TL = c(20, 400, 5))
## ---- echo = FALSE-------------------------------------------------------
SAR_sequence_table <- data.frame(
ABBREVIATION = c("RegDose", "TestDose", "PH", "CH", "OSL_temp", "OSL_duration", "Irr_temp", "PH_duration", "dose_rate", "optical_power", "Irr_2recover"),
DESCRIPTION = c("Dose points of the regenerative cycles [Gy]",
"Test dose for the SAR cycles [Gy]",
"Temperature of the preheat [°C]",
"Temperature of the cutheat [°C]",
"Temperature of OSL read out [°C]",
"Duration of OSL read out [s]",
"Temperature of irradiation [°C]",
"Duration of the preheat [s]",
"Dose rate of the laboratory irradiation source [Gy/s]",
"Percentage of the full illumination power [%]",
"Dose to be recovered in a dose-recovery-test [Gy]"),
EXAMPLE_ARGUMENTS = c("c(0, 80, 140, 260, 320, 0, 80)",
"50",
"240",
"200",
"125",
"default: 40",
"default: 20",
"default: 10",
"default: 1",
"default: 90",
"20"))
kable(SAR_sequence_table,
format = "html",
col.names = c("ABBREVIATION", "DESCRIPTION", "EXAMPLE ARGUMENTS"),
caption = "Keywords in RLumModel for creating SAR sequences") %>%
kable_styling(bootstrap_options = c("striped", "hover"), full_width = F)
## ------------------------------------------------------------------------
sequence <- list(
RegDose = c(0,10,20,50,90,0,10),
TestDose = 2,
PH = 220,
CH = 220,
OSL_temp = 125,
Irr_2recover = 20)
## ------------------------------------------------------------------------
sequence <- list (
IRR = c (20 , 10 , 1) ,
TL = c (20 , 400 , 5))
## ---- fig.cap = "TL curve with parameter set 'Bailey2001' after 10 Gy laboratory dose"----
model.output <- model_LuminescenceSignals(
model = "Bailey2001",
sequence = sequence,
verbose = FALSE)
## ---- fig.cap = "TL signal with different heating rates"-----------------
##set heating rate
heating.rate <- seq(from = 2, to = 10, by = 2)
##model signals
##"verbose = FALSE" for no terminal output
## "TL$" for exact matching TL and not (TL)
model.output <- lapply(heating.rate, function(x){
sequence <- list(
IRR = c(20, 10, 1),
TL = c(20, 400, x))
TL_data <- model_LuminescenceSignals(
sequence = sequence,
model = "Bailey2001",
plot = FALSE,
verbose = FALSE)
return(Luminescence::get_RLum(TL_data, recordType = "TL$", drop = FALSE))
})
##merge output
model.output.merged <- merge_RLum(model.output)
##plot results
plot_RLum(
object = model.output.merged,
xlab = "Temperature [\u00B0C]",
ylab = "TL signal [a.u.]",
main = "TL signal with different heating rates",
legend.text = paste(heating.rate, "°C/s"),
combine = TRUE)
## ----TACs, fig.align="center"--------------------------------------------
##set temperature
act.temp <- seq(from = 80, to = 600, by = 20)
##loop over temperature
model.output <- vapply(X = act.temp, FUN = function(x) {
##set sequence, note: sequence includes sample history
sequence <- list(
IRR = c(20, 1, 1e-11),
IRR = c(20, 10, 1),
PH = c(x, 1),
IRR = c(20, 0.1, 1),
TL = c(20, 150, 5)
)
##run simulation
temp <- model_LuminescenceSignals(
sequence = sequence,
model = "Pagonis2007",
simulate_sample_history = TRUE,
plot = FALSE,
verbose = FALSE
)
## "TL$" for exact matching TL and not (TL)
TL_curve <- Luminescence::get_RLum(temp, recordType = "TL$")
##return max value in TL curve
return(max(get_RLum(TL_curve)[,2]))
}, FUN.VALUE = 1)
## ---- echo=FALSE, fig.cap = "TAC with parameter set of 'Pagonis2007'"----
##plot resutls
plot(
act.temp[-(1:3)],
model.output[-(1:3)],
type = "b",
xlab = "Temperature [\u00B0C]",
ylab = "TL [a.u.]"
)
## ---- fig.cap = "OSL measurement with different optical power densities with the parameter set of 'Bailey2004'"----
##set optical power [%]
optical_power <- c(0,20,40,60,80,100)
##loop over power
model.output <- lapply(optical_power, function(x){
##set sequence
sequence <- list(
IRR = c(20, 50, 1),
PH = c(220, 10, 5),
OSL = c(125, 50, x))
data <- model_LuminescenceSignals(
sequence = sequence,
model = "Bailey2004",
plot = FALSE,
verbose = FALSE)
##"OSL$" for exact matching OSL and not (OSL)
return(Luminescence::get_RLum(data, recordType = "OSL$", drop = FALSE))
})
##merge output
model.output.merged <- Luminescence::merge_RLum(model.output)
##plot results
Luminescence::plot_RLum(
object = model.output.merged,
xlab = "Illumination time [s]",
ylab = "OSL signal [a.u.]",
legend.text = paste("Optical power ", 20 * optical_power / 100," mW/cm^2"),
combine = TRUE
)
## ----DRT, fig.cap = "Dose recovery test (DRT) with the parameter set of 'Pagonis2008'"----
##set PH temperatures
PH_temp <- seq(from = 160, to = 300, by = 20)
##set regeneration doses
RegDose = c(0, 80, 140, 260, 320, 0, 80)
##loop over PH temperatures
DRT.output <- lapply(PH_temp, function(x){
sequence <- list(
RegDose = RegDose,
TestDose = 20,
PH = x,
CH = x,
OSL_temp = 125,
Irr_2recover = 200)
model.output <- model_LuminescenceSignals(
sequence = sequence,
model = "Pagonis2008",
plot = FALSE,
verbose = FALSE)
results <- Luminescence::analyse_SAR.CWOSL(
object = model.output,
signal.integral.min = 1,
signal.integral.max = 7,
background.integral.min = 301,
background.integral.max = 401,
fit.method = "EXP",
dose.points = RegDose,
plot = FALSE)
temp <- get_RLum(results)
out <- data.frame(
De = temp$De,
De.error = temp$De.Error)
return(out)
})
##output as data.frame for plot_DRTResults
DRT.result <- as.data.frame(do.call(rbind, DRT.output))
##plot DRT.results
Luminescence::plot_DRTResults(
DRT.result,
preheat = PH_temp,
given.dose = 200)
## ----SAR, fig.cap = "SAR protocol with the parameter set of 'Pagonis2008'"----
##set RegDose
RegDose = c(0, 80, 140, 260, 320, 0, 80)
##set sequence
sequence <- list(
RegDose = RegDose,
TestDose = 20,
PH = 220,
CH = 220,
OSL_temp = 125
)
##model
model.output <- model_LuminescenceSignals(
sequence = sequence,
model = "Pagonis2008",
plot = FALSE,
verbose = FALSE
)
##analyse SAR sequence and plot only the resulting growth curve
results <-Luminescence::analyse_SAR.CWOSL(
model.output,
signal.integral.min = 1,
signal.integral.max = 7,
background.integral.min = 301,
background.integral.max = 401,
fit.method = "EXP",
dose.points = RegDose,
verbose = FALSE,
plot.single = c(6)
)
|
d80ec93d6f518f5a1afd47e7042337070ff12025
|
6c3ab47960ff279b77c1af10f36f5297bfdd10ef
|
/plot4.R
|
76f849d0fc2c3a282c43f461bd7b0e8d37201873
|
[] |
no_license
|
MaxShang/ExData_Plotting1
|
f865997f4c2c7358b932cce2d0298aa03195945d
|
dbe1eed78f340f5bcab951c838cc4bbab1082aa5
|
refs/heads/master
| 2020-04-07T06:23:40.482598
| 2016-06-07T00:46:04
| 2016-06-07T00:46:04
| 60,554,028
| 0
| 0
| null | 2016-06-06T19:13:19
| 2016-06-06T19:13:19
| null |
UTF-8
|
R
| false
| false
| 1,097
|
r
|
plot4.R
|
library(data.table)
dt2<-fread(input ="household_power_consumption.txt",sep = ";",header = TRUE,na.strings = c("?"))
dt<-dt2[dt2$Date=="1/2/2007" | dt2$Date=="2/2/2007",]
t<-paste(dt$Date,dt$Time)
t<-strptime(t,"%d/%m/%Y %H:%M:%S")
par(mfrow=c(2,2))
with(dt,
plot(t,Global_active_power,
type="l",
xlab="",
ylab="Global Active Power (Kilowatts)"))
with(dt,
plot(t,Voltage,
xlab="datetime",
ylab="Voltage",
type="l"))
with(dt,plot(t,dt$Sub_metering_1,type = "n",
ylab="Energy sub metering",
xlab="",oma = c(1, 1, 0, 1)
))
lines(t,dt$Sub_metering_1,type="l")
lines(t,dt$Sub_metering_2, col="red")
lines(t,dt$Sub_metering_3,col="blue")
legend("topright",
lty=1,
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
col=c("black","red","blue"),bty="n",xjust=0,yjust=0)
with(dt,
plot(t,Global_reactive_power,
type="l",
xlab="datetime"))
dev.copy(png, file = "plot4.png",width=480,height=480) ## Copy my plot to a PNG file
dev.off()
|
6ff84e8bd122e6f8dca76777ce512dd44f7f131e
|
b8e668c36ff178a28244ff752913ba0af209dd19
|
/dropNonLinearMarkers.R
|
785de1f48a0659e008ddc9f54a720ded221ebf2d
|
[] |
no_license
|
jtlovell/eQTL_functions
|
ab0a4cc0de2dcc134316c0032676888cfcb4d6a4
|
0d583f66534215f8307b191cde42fa38117d5d7a
|
refs/heads/master
| 2016-09-05T18:26:29.857769
| 2015-08-05T21:46:41
| 2015-08-05T21:46:41
| 37,740,661
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 844
|
r
|
dropNonLinearMarkers.R
|
dropNonLinearMarkers<-function(markers, bp, cm, plotit=T,...){
dat<-data.frame(markers, bp, cm, stringsAsFactors=F)
dat<-dat[order(dat$bp),]
cm=dat$cm
bp=dat$bp
if(plotit){
plot(bp,cm, type="n",...)
points(bp[diff(bp)/diff(cm)>=0],cm[diff(bp)/diff(cm)>=0])
points(bp[diff(bp)/diff(cm)<0],cm[diff(bp)/diff(cm)<0], col="red", pch=19)
}
if(min(diff(bp)/diff(cm))<0){
while(min(diff(bp)/diff(cm))<0){
bad<-which(diff(cm)==min(diff(cm)))
dat<-dat[-c(bad, bad+1),]
cm=dat$cm
bp=dat$bp
if(plotit){
plot(bp,cm, type="n",...)
points(bp[diff(bp)/diff(cm)>=0],cm[diff(bp)/diff(cm)>=0])
points(bp[diff(bp)/diff(cm)<0],cm[diff(bp)/diff(cm)<0], col="red", pch=19)
}
}
}
cat("dropping markers: ",markers[!markers %in% dat$markers], "\n")
return(dat$markers)
}
|
b462356ec26b310faa6aaf7839c48409f314f651
|
43a225d1d8f202bbaec4d9fab24f48228fdbf717
|
/web_scraping.R
|
1ddb71bcde277f3060c49be23b5b23fd393ac10e
|
[] |
no_license
|
karafede/Text_mining
|
d6a066891b0337bd94ef1298a57f55ed9ec62766
|
dbb91921284940cbc55268b9467c3bd76589fb13
|
refs/heads/master
| 2021-01-18T19:51:10.527506
| 2016-09-26T21:19:48
| 2016-09-26T21:19:48
| 69,293,713
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 601
|
r
|
web_scraping.R
|
library(rvest)
# install selectorGadget
# http://selectorgadget.com/
lego_movie <- read_html("http://www.imdb.com/title/tt1490017/")
# parsing the file with html():
# use html_node() to find the first node that matches that selector,
# extract its contents with html_text(), and convert it to numeric with as.numeric()
lego_movie %>%
# html_node("strong span") %>%
html_node(".ratingValue span") %>%
html_text() %>%
as.numeric()
lego_movie %>%
html_nodes("#titleCast .itemprop span") %>%
html_text()
lego_movie %>%
html_nodes("table") %>%
html_table()
|
b503645bc38f34af1106a74714704dcab353ac99
|
388219bd0cd85580cba19b7e4d47105c65587685
|
/old/old20161024/modelLabel.R
|
e0ae95790a040a553541781b265b7502b3e6874f
|
[] |
no_license
|
yiyusheng/SMART
|
758a32c32688ac369214d7d8696e9dfa80c03a8b
|
af59b2bfa50333a6f7b4dfe6f447da023890ff29
|
refs/heads/master
| 2020-04-12T07:34:21.124629
| 2018-05-18T09:06:53
| 2018-05-18T09:06:53
| 57,004,114
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 385
|
r
|
modelLabel.R
|
# Label model for lead time prediction
# Only process 4 type of disk model
# Date: 2016-06-21
# Author: Pezy
modelLabel <- function(dm){
r <- rep('0',length(dm))
r[dm == 'ST3750330NS' | dm == 'GB0750EAFJK'] <- '750G2'
r[dm == 'ST31000524NS' | dm == 'MB1000EAMZE'] <- '1000G2'
r[dm == 'ST1000NM0011' | dm == 'MB1000EBZQB'] <- '1000G3'
r[dm == 'ST2000NM0011'] <- '2000G3'
r
}
|
1356873af6291e1c909dfd1c9961ca2dc6b36201
|
def57d9f3803901ef424e7842fd169e2656f0a13
|
/trees for Titanic.R
|
7b30c48ae16419be1b3b7ea5b646b83e16dd8857
|
[] |
no_license
|
janiceli123098/DataAnalytics2020_Jiarun_Li
|
7c949bbc70ff9ef8f2b234ef13a32112d9d98b4b
|
69725614a10626ec70f19526197aadb9e7531a48
|
refs/heads/master
| 2023-01-15T17:46:31.324285
| 2020-12-02T02:12:40
| 2020-12-02T02:12:40
| 292,886,337
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 399
|
r
|
trees for Titanic.R
|
library(ggplot2)
library(dplyr)
library(GGally)
library(rpart)
library(rpart.plot)
library(randomForest)
library(party)
data("Titanic")
dt <- rpart(Survived~.,
data=Titanic,
method="class")
plot(dt)
rpart.plot(dt, type=0, extra=2, cex=1.5)
rf <- randomForest(Survived~., data=Titanic, importance=TRUE,ntree=1000)
rf
ct <- ctree(Survived~., data=Titanic)
ct
plot(ct)
|
994cd09dabfe41fa69261f376f20fde630921338
|
ea8bbb6225bc5309f77839872356168dfeb1fb87
|
/examples/oper.R
|
61f865cb163c123884b9953f412e38098996628d
|
[] |
no_license
|
Lasica/mowpartboost
|
8f45674dbdbb05a28680ea5b16fdd3f3e793cea8
|
188cd6edb8ded42182febe96aa84011cd0cf7fc2
|
refs/heads/master
| 2020-05-07T13:07:32.013894
| 2019-06-11T01:13:10
| 2019-06-11T01:13:10
| 180,535,198
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 716
|
r
|
oper.R
|
2*2
sapply(1:10, function(a) sapply(1:10, function(b) a*b))
`*` <- function(a, b) .Primitive("*")(a+1, (b %/% 2))+2
2*2
sapply(1:10, function(a) sapply(1:10, function(b) a*b))
####
cpx <- function(re, im) `class<-`(list(re=re, im=im), "cpx")
c1 <- cpx(2, -1)
c2 <- cpx(3, 2)
re <- function(cp) UseMethod("re")
im <- function(cp) UseMethod("im")
re.cpx <- function(cp) cp$re
im.cpx <- function(cp) cp$im
`re<-` <- function(`*tmp*`, value) UseMethod("re<-")
`im<-` <- function(`*tmp*`, value) UseMethod("im<-")
`re<-.cpx` <- function(cp, value) cpx(value, cp$im)
`im<-.cpx` <- function(cp, value) cpx(cp$re, value)
re(c1) <- 5
im(c2) <- -5
`+.cpx` <- function(c1, c2) cpx(c1$re+c2$re, c1$im+c2$im)
c1+c2
|
0942fcbea59943563bcb43a581935461d99f76ab
|
f5bf1754fcafb536ddd97fe457551700b3961ecf
|
/R/swiRcharts.R
|
45eaea3df8d28741546a623f457e1dfe0d7ca562
|
[] |
no_license
|
d-qn/swiRcharts
|
4989411e6f132639bd9947fb5b549614a85480c3
|
940c1b7d2b379956dc1287f73affa0d1f47ef587
|
refs/heads/master
| 2020-05-22T06:44:48.340676
| 2018-02-28T07:24:37
| 2018-02-28T07:24:37
| 45,199,662
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,565
|
r
|
swiRcharts.R
|
##' Helper functions for rCharts
##'
##' Misc helper functions to create interactive charts with rCharts
##'
##' @rdname swi_rcharts
##' @param x,y,z a numeric of same length
##' @param color,name,series a character of same length
##' @import rCharts
##' @export
##' @examples
##'
##' \dontrun{
##' #Example for hSeries to create a labelled bubble scatterchart with rCharts/highcharts
##'
##' library(swiTheme)
##' a <- rCharts::Highcharts$new()
##' x <- 1:10
##' y <- seq(1, 100, 10)
##' z <- 10:1
##' color <- rep(c("grey", "red"), 5)
##' name <- c("a", "b", "c", "d", "e", "f", "g", "h", "i", "j")
##' series <- c(rep(c("blob", "poop", "doop"), 3), "asdf")
##' a$series(hSeries(x,y,z,name, color, series))
##'
##' # tweak the bubble plot
##' a$chart(zoomType = "xy", type = "bubble")
##' a$plotOptions(bubble = list(dataLabels = list(enabled = T, style = list(textShadow = 'none') ,
##' color = '#aa8959', formatter = "#! function() { return this.point.name; } !#")))
##'
##' a$colors(swi_rpal)
##' a$tooltip(formatter = "#! function() { return this.point.name + ':' +this.x + ', ' + this.y; } !#")
##' a$xAxis(title = list(text = "important indicator", align = "high"), lineColor = list ('#FF0000'))
##' a
##'
##' hChart.html <- tempfile("hchart_labelledBubble.html")
##' a$save(hChart.html)
##' }
hSeries <- function(x, y, z, name, color = NULL, series) {
warning("\n", "hSeries is deprecated! Use hSeries2 instead\n")
# Check input
stopifnot(length(x) == length(y), length(z) == length(x), length(name) == length(x),
length(series) == length(x))
stopifnot(is.null(color) || length(color) == length(x))
if(!is.numeric(x)) stop("x needs to be numeric")
if(!is.numeric(y)) stop("y needs to be numeric")
if(!is.numeric(z)) stop("z needs to be numeric")
df <- data.frame (x = as.numeric(x), y = as.numeric(y), z = as.numeric(z), color = color, name = name, series = series)
# TODO: use rCharts::toJSONArray2 (http://stackoverflow.com/questions/26507326/rcharts-change-the-individual-point-colors-of-a-time-series-plot-highcharts)
# !!!!!!!!!!!!!!!!!
seriesList <- by(df, as.factor(df$series), function(df.s) {
list(
data = lapply(1:nrow(df.s), function(i) {
res <- list(x = df.s[i,'x'], y = df.s[i,'y'], z = as.character(df.s[i,'z']), name = as.character(df.s[i,'name']))
if(!is.null(color)) res$color <- df.s[i,'color']
res
}),
name = as.character(df.s$series[1])
)
}, simplify = F)
# very important: needs to get rid of the attributes, otherwise highchart will not plot it!
attributes(seriesList) <- NULL
seriesList
}
##' @rdname swi_rcharts
##' @param df a data.frame
##' @param series a character, the column name in \code{df} to split the data in different highcharts' series
##' @import rCharts
##' @export
##' @examples
##' hSeries2 <- hSeries2(data.frame(x = x, y = y, z = z, color = color, name = name, series = series), "series")
##' b <- rCharts::Highcharts$new()
##' b$series(hSeries2)
##'
hSeries2 <- function(df, series) {
# Check input (that series is a character and is in the given data.frame)
stopifnot(is.data.frame(df))
stopifnot(is.character(series))
stopifnot(series %in% colnames(df))
seriesList <- by(df, as.factor(df[,series]), function(df.s) {
# remove the column series of the data.frame
seriesName <- as.character(df.s[1,series])
df.s <- df.s[,-which(colnames(df.s) == series)]
list(data = rCharts::toJSONArray2(df.s, json = F, names = T), name = seriesName)
}, simplify = F)
attributes(seriesList) <- NULL
seriesList
}
##' Save highcharts from rCharts into a responsive html webpage
##'
##' Create a responsive html page along with javascript library files
##'
##' @rdname swi_rcharts
##' @param hChart.html,output.html character file path to the input highchart html and the output reponsive html
##' @param output a path to a folder where the reponsive html file and depending js libraries will be saved
##' @param source,author,h2,descr,h3 characters
##' @param overwrite a logical, should the \code{output} file be overwritten?
##' @export
##' @examples
##' \dontrun{
##' # Example of converting a highcharts-rCharts html chart into a responsive one
##'
##' hChart2responsiveHTML(hChart.html, source = "source: stupid data")
##' browseURL(hChart.html)
##' }
hChart2responsiveHTML <- function(hChart.html, output.html = "rHighchart.html", output = ".", source = "source:",
author = "Duc-Quang Nguyen | swissinfo.ch", h2 = "title", descr = "descriptive text",
h3 = "subtitle") {
output.html <- file.path(output, output.html)
#change the output file name if already exists
if(file.exists(output.html)) {
file.rename(output.html, gsub("\\.html$", "_init\\.html", output.html))
warning("\n Existing output html renamed to:", gsub("\\.html$", "_init\\.html", output.html), "\n")
}
# copy the responsive header to the output file
fpath <- system.file("extdata", "responsiveHeader.html", package="swiRcharts")
status <- file.copy(fpath, output.html, FALSE)
if(!status) {"Could not copy header file!"}
## Load highcharts'html and get everything between the tag <div rChart highcharts> until the last <script>
x <- readLines(hChart.html)
istart <- grep("rChart highcharts", x)
scriptStartEnd <- data.frame(start = grep("<script", x), end = grep("\\/script>", x))
scriptStartEnd$length <- scriptStartEnd$end - scriptStartEnd$start
# append javacript code to output.html
sink(output.html, append = T)
if(h2 != "") {
cat("<h2>",h2,"</h2>\n")
}
if(descr != "") {
cat('<div class="descr">', descr, "</div>\n")
}
if(h3 != "") {
cat("<h3>",h3,"</h3>\n")
}
# get and sink the div with rChart highcharts
cat(x[istart:scriptStartEnd[which.max(scriptStartEnd$length),'end']])
# add the footer: source & author
cat('\n\n<!-- Source -->\n<div id="cite">', source, "|", author, "</div>")
cat('\n<script type="text/javascript" src="https://www.swissinfo.ch/static/Themes/basic/js/vendor/iframeResizer.contentWindow.3.5.3.min.js"></script>')
cat("\n</body>\n</html>")
sink()
## copy the javacript library in a folder ".js"
lib.js <- list.files(system.file("extdata", package="swiRcharts"), ".js", full.names = T)
if(!file.exists(file.path(output, "js"))) {
dir.create(file.path(output, 'js'), showWarnings = FALSE)
}
lib.js.exists <- file.exists(file.path(output, "js", basename(lib.js)))
if(any(!lib.js.exists)) {
file.copy(lib.js[!lib.js.exists], file.path(output, "js"))
}
}
|
81548cc97f3e5b7074d355611aaf674d5d5008c7
|
1a79c363b1d4b5f92eee4048b24a879ecfa0aa05
|
/R/trio.R
|
0cf9597b22313f5a9ee84b80184ff624d89f55ca
|
[] |
no_license
|
cran/MetamapsDB
|
21f4f10768f94c3b052102e348f5de18e8840715
|
9b7f5d461b6bcbdddee7f7633125d4822192699d
|
refs/heads/master
| 2022-11-21T05:39:28.935110
| 2017-12-06T08:51:00
| 2017-12-06T08:51:00
| 113,299,187
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,940
|
r
|
trio.R
|
#' Find all trios surrounding the KO of interest
#'
#' Searches graphDB for all cases of trios surrounding the KO of interest
#'
#' @param KOI the ko id (for remote)
#' @param koi the ko id (for local)
#' @param toUnique if to return only unique (only applicable for remote)
#' @param withDetails with details (only applicable for remote)
#' @param local for doing the search locally, faster
#' @param contracted to contract anot (only applicable for local)
#' @param ... the other args for dbquery
#' @importFrom magrittr "%>%" "%<>%" "%$%"
#' @importFrom dplyr anti_join filter select arrange group_by
#' @importFrom igraph as_ids
#' @importFrom utils combn
#' @importFrom stats complete.cases
#' @importFrom igraph shortest_paths "%->%"
#'
#' @export
allTrios <- function(
KOI = 'ko:K00001',
koi = 'ko:K00001',
toUnique = TRUE,
withDetails = FALSE,
local = TRUE,
contracted = TRUE,
...
){
. = 'shutup'
if(local){
##################################################
#If you have the full metabolic graph
#surrNODES('ko:K00401', wholeMetabolism) %>% sapply(surrNODES, graph=wholeMetabolism) %>% do.call(c,.) %>% unique %>% grepgraph
oriG = grepgraph(koi)
if(vcount(oriG) == 0){
message(sprintf("%s not found", koi))
}else{
oriG2 = grepgraph_cpd(V(oriG)$name %>% grep("cpd", ., value=T))
meta = grepgraph(V(oriG2)$name %>% grep("ko", ., value=T))
if(contracted){
message("contracting graph")
meta = contractMetab(meta)
}
koID = findV(koi, meta)
surrCPDs = surrNODES(koi, meta)
surrCPD_id = sapply(surrCPDs, findV, g=meta)
surrKOs = surrNODES(koi, meta) %>% lapply(surrNODES, g = meta) %>% do.call(c,.) %>% unique
surrKOs %<>% grep(koi, ., value=T, invert=T) #remove itself
surrKOs_id <- sapply(surrKOs, findV, g=meta)
#Shortest Paths which passes through the KO of Interest
spaths = surrKOs %>% grep(koi, ., invert=T, value=T) %>%
combn(2) %>%
apply(2, function(pair){
alist = list(
one = findV(pair[[1]], meta),
two = findV(pair[[2]], meta)
)
lapply(shortest_paths(meta, from = alist$one, to = alist$two)$vpath,function(x){
x = as.integer(x)
#there may be multiple paths
isInside = koID %in% x
if(isInside){
matrix(V(meta)$name[x] %>% do.call(c,.), nrow=1)
}else{
NULL
}
})
}) %>% do.call(c,.)
if(is.null(do.call(c,spaths))){
sprintf("%s is not on any shortest path", koi) %>% message
}else{
sprintf("%s is on the shortest path", koi) %>% message
#shortestPathsDF
spaths = setNames(as.data.frame(do.call(rbind,spaths)), c("Kminus", "Cminus", "K", "Cplus", "Kplus"))
}
#Find ALL trios
allTrios= lapply(surrKOs_id,
function(ko1){
#ko1 -a-> cpd1
igraph::E(meta)[ko1 %->% surrCPD_id] %>% extractFromPath %>%
#ko1 -a-> cpd1 -b-> koi
lapply(function(cpd1){
isC2K = igraph::E(meta)[cpd1 %->% koID] %>% as_ids %>% length > 0 #valid
if(isC2K){
#ko1 -> cpd1 -> koi -> cpd2
igraph::E(meta)[koID %->% surrCPD_id] %>% extractFromPath %>%
#ko1 -> cpd1 -> koi -> cpd2 -> ko2
lapply(function(cpd2){
isC2K2 = igraph::E(meta)[cpd2 %->% surrKOs_id] %>% length > 0
if(isC2K2){
ko2 = igraph::E(meta)[cpd2 %->% surrKOs_id] %>% extractFromPath(type="ko")
setNames(data.frame(ko1, cpd1, koID, cpd2, ko2), c("Kminus","Cminus","K", "Cplus","Kplus"))
}else{
message("Failed")
}
}) %>% do.call(rbind,.)
}else{
message("Failed")
}
}) %>% do.call(rbind,.)
}) %>% do.call(rbind,.)
Kminus = NULL
Kplus = NULL
Cminus= NULL
Cplus= NULL
allTrios = filter(allTrios, Kminus != Kplus & Cminus != Cplus)
allTrios = apply(allTrios,1, function(x) matrix(unlist(V(meta)[x]$name), nrow=1)) %>% t %>% as.data.frame %>% setNames(colnames(allTrios))
notshortest = anti_join(allTrios,spaths, by=colnames(allTrios))
notshortest$type = "notshortest"
spaths$type = "shortest"
list(paths = rbind(notshortest, spaths), graph=meta)
}
##################################################
}else{
KOI = gsub("^(ko:)*","ko:",KOI)
#Round 1: Find the paths
trioDF <- dbquery(query = "
MATCH
(ko1:ko)-->(:cpd)-->(inputko:ko {ko:{koid}})-->(:cpd)-->(ko3:ko)
RETURN
ko1.ko AS before,
inputko.ko AS middle,
ko3.ko AS after
", list(koid = KOI) ,,...)
if(!is.na(trioDF)){
trioDF$before %<>% as.character
trioDF$middle %<>% as.character
trioDF$after %<>% as.character
# not uturn type reactions (redundant KOs)
before=NULL
after=NULL
trioDF = unique(filter(trioDF,before != after))
if(toUnique){
#remove reverse rxns
trioDF = trioDF[!duplicated(
trioDF %>%
apply(1, function(row){
c(row["before"], row["after"]) %>%
sort %>%
paste0(collapse="")
})
),]
#remove redundant
}
#Round 2: Find the contigs information
if(withDetails){
kos = with(trioDF, c(before, after,middle) %>% unique)
contigInfo <- dbquery(
query = "
UNWIND
{ kos } AS koss
MATCH
(inputko:ko {ko : koss.ko})
RETURN
inputko.ko AS koID,
inputko.contigCount AS ContigCount,
inputko.expression AS Expression
",
params = lapply(kos,function(x) list(ko=x)) %>% list(kos=.), ...) %>% make.data.frame
contigInfo <- contigInfo[complete.cases(contigInfo),]
list(trioDF, contigInfo)
}else{
trioDF
}
}else{
NA
}
}
}
|
b487d80a277e3d1548552ce2172c75da442a20d5
|
f6401021b5655bf14acba25664a389e0a2d024b5
|
/scripts/DataMunging/02_CalculatingProvAnnualClimaticVariables.R
|
7357f9ca63080c0ff1ca830505227031adc82365
|
[] |
no_license
|
JulietteArchambeau/HeightPinpinClonapin
|
56726076b40f274d1bfc4b4534382907cc41027b
|
8395f9b320665c8610a80ae0a4e396bbcb1e1616
|
refs/heads/master
| 2023-04-19T00:41:01.330853
| 2023-02-03T15:22:55
| 2023-02-03T15:22:55
| 254,827,204
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,564
|
r
|
02_CalculatingProvAnnualClimaticVariables.R
|
#################################################################################################"
#################### February 2019 ##########################"
#################### Computing the mean of climatic variables ##########################"
################### for the period 1901 - 2009 ##########################"
################### in each provenance ##########################"
#################################################################################################"
# function to select dates included in a given time period
select.yr <- function(tab,period) c(3:116)[c(1901:2014)>=min(period) & c(1901:2014)<=max(period)]
# Which climatic data?
bioclim=paste("bio",c(1,2,5,6,12,13,14),sep="")
T.seas=paste0("tmean.",c("djf","mam","jja","son"))
P.seas=paste0("prec.",c("djf","mam","jja","son"))
pet=paste0("pet.",c("mean","min","max"))
ppet=paste0("ppet.",c("mean","min","max"))
eumedclim.vars=c(bioclim,T.seas,P.seas,pet,ppet)
coord <- readRDS("data/SiteProvCoord.RDS")
# keep only coordinates of the provenances
coord <- coord[c(1:35),]
df <- matrix(NA,dim(coord)[[1]],length(eumedclim.vars),dim=list(c(rownames(coord)),c(eumedclim.vars)))
for(k in eumedclim.vars) {
nm.file=paste0("data/climate/extraction_",k,"_40points_eumedclim.RData")
load(nm.file)
clim.k <- clim.k[c(1:35),]
df[,k]=apply(clim.k[,select.yr(tab=clim.k,1901:2009)],1,mean)
}
df <- as.data.frame(df)
saveRDS(df,file="data/ProvAnnualClimateData.RDS")
|
79971eea3d216d5bd3f772ff3fd321a5e95fa394
|
1f2b1298f5731525bbe48f614adf3b5e8edfa814
|
/R/get2by2table.R
|
d3ccfcd56002ff6ab2a42358c4f9c7cf20c0a4a4
|
[
"MIT"
] |
permissive
|
mortenarendt/MBtransfeR
|
cc30ab0ab3c492c07917755487e7271c64869ee4
|
52b4f2d84abc76fd1efe6bce7624e65925a9c294
|
refs/heads/master
| 2020-12-23T22:42:51.588719
| 2020-08-18T15:37:20
| 2020-08-18T15:37:20
| 237,222,543
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 544
|
r
|
get2by2table.R
|
#' Calculates all 2x2 presens absense tables between two similar sized, and matched matrices
#'@param o1 matrix of counts (n x p) - n is samples and p is variables
#'@param o2 matrix of counts (n x p)
#'@import dplyr
#'@return a data.frame (p x 4) with the 2x2 table Ns (stats)
#'@export
get2by2table <- function(o1,o2){
n11 <- t((o1>0)+0) %*% (o2>0) %>% diag
n00 <- t((o1==0)+0) %*% (o2==0) %>% diag
n10 <- t((o1>0)+0) %*% (o2==0) %>% diag
n01 <- t((o1==0)+0) %*% (o2>0) %>% diag
STAT <- data.frame(n00,n10,n01,n11)
return(STAT)
}
|
51da1e27e99948310c82dd94c576e8b643ce27f9
|
73744a740941b13641c0175c8e583b20cfd023a1
|
/analysis/books/01b_tidy_LCL.R
|
31fd27e712db032d2922d57050286fbacbd3b879
|
[] |
no_license
|
mllewis/WCBC_GENDER
|
8afe092a60852283fd2aa7aea52b613f7b909203
|
ed2d96361f7ad09ba70b564281a733da187573ca
|
refs/heads/master
| 2021-12-25T22:41:21.914309
| 2021-12-22T19:08:36
| 2021-12-22T19:08:36
| 248,584,454
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,139
|
r
|
01b_tidy_LCL.R
|
# read in raw corpus and save tidied version
library(tidyverse)
library(tidytext)
library(textclean) # for contraction cleaning
library(here)
LCNL_INFILE <- here("data/raw/corpora/cdtc_lcnl_corpus_by_book/")
BOOKID_KEY <- here("data/raw/corpora/key_book_id.csv")
CONTRACTION_LIST <- here("data/processed/words/contractions_complete.csv")
LCL_OUTPATH <- here("data/processed/books/tidy_lcl.csv")
clean_titles_from_transcription <- function(text) {
text %>%
str_replace_all("â\u0080\u0099", "'") %>%
str_replace_all( "\u0092", "'")
}
clean_text_from_transcription <- function(text) {
text %>%
str_replace_all( "â\u0080\u009c", "\"") %>%
str_replace_all( "â\u0080\u009d", "\"") %>%
str_replace_all("â\u0080\u0098", "'") %>%
str_replace_all("â\u0080\u0099", "'") %>%
str_replace_all("â\u0080\u0093", "-") %>%
str_replace_all("â\u0080\u0094", "-") %>%
str_replace_all("â\u0092", "'") %>%
str_replace_all( "â\u0080", "...") %>%
str_replace_all( "â\u0085", "...")
}
contraction_list <- read_csv(CONTRACTION_LIST)
book_key <- read_csv(BOOKID_KEY)
# read in raw corpus
raw_lcnl_files <- list.files(LCNL_INFILE, full = T)
lcnl_raw <- map(raw_lcnl_files, readLines, encoding = "latin1") %>%
unlist() %>%
as.data.frame() %>%
rename(text = ".")
# tidy titles
lcnl_titles <- lcnl_raw %>%
filter(str_detect(text, regex("^Title", ignore_case = TRUE))) %>%
rename(title = text) %>%
mutate(title = str_trim(str_replace(title, "Title: ", "")),
title = map_chr(title, clean_titles_from_transcription),
title = toupper(title)) %>%
left_join(book_key) %>%
select(book_id, title, author)
# tidy corpus with titles and line numbers
lcnl_tidy <- lcnl_raw %>%
filter(!str_detect(text, regex("^Author",
ignore_case = TRUE)),
text != "") %>%
mutate(row_one = case_when(str_detect(text, regex("^Title",
ignore_case = TRUE)) ~ 1, TRUE ~ 0),
title = case_when(row_one == 1 ~ str_trim(str_replace(text, "Title: ", "")),
TRUE ~ NA_character_),
title = map_chr(title, clean_titles_from_transcription),
title = toupper(title)) %>%
left_join(book_key %>% select(-author)) %>%
fill(book_id) %>%
filter(row_one == 0) %>%
select(-row_one, -title) %>%
group_by(book_id) %>%
mutate(line_number = row_number()) %>%
ungroup() %>%
left_join(lcnl_titles) %>%
select(book_id, title, author, line_number, text)
# tidy corpus with cleaned text
lcnl_tidy_clean <- lcnl_tidy %>%
mutate(text = map_chr(text, clean_text_from_transcription),
text = replace_contraction(text, contraction.key = contraction_list), # replace contractions with uncontrated forms (do this before lowercasing)
text = str_replace_all(text, "'s", ""), # for characters (e.g. "George's") - this is important for our character schor
text = gsub('[[:punct:] ]+', ' ', text), # get rid of punctuation
text = str_squish(text),
text = tolower(text))
write_csv(lcnl_tidy_clean, LCL_OUTPATH)
|
a50345a6354392aa4f27ede26121593bf8bc1aa0
|
77794ca29c315b0cdb36d7e12fd9c297d93221a4
|
/Model_when_optimizing.R
|
7519ec68e906ff2fec7d1b6e878a428238c2aa9c
|
[] |
no_license
|
carolksun/NIH18
|
8b9debe9802ac97fdfb0faeb0d1aa61ea183d509
|
9e4360caa7c6f708b802688d4a8c300c90e0f2f8
|
refs/heads/master
| 2020-04-04T17:49:40.242545
| 2018-11-05T00:08:53
| 2018-11-05T00:08:53
| 156,137,628
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,709
|
r
|
Model_when_optimizing.R
|
library("ranger")
library("dplyr")
Scores <- read.table("Scores.txt", header = TRUE)
Master <- read.table("Master.txt", header = TRUE)
ID <- read.csv("List of Patients.csv", header = TRUE)
ID <- as.character(ID$x)
Master2 <- merge(Master, Scores, by="patient")
rfdata <- Master2 %>% select(msdss_last, contains("SL"))
Predicterror = data.frame(matrix(vector(), 1, 3))
colnames(Predicterror) <- c("Trees", "mtry", "error")
Templist <- data.frame(matrix(vector(), 1, 3))
colnames(Templist) <- c("Trees", "mtry", "error")
t=500
while(t <= 1000){
for(i in 7:30){
Templist <- data.frame(matrix(vector(), 1, 3))
colnames(Templist) <- c("Trees", "mtry", "error")
rfmodelresults <- ranger(msdss_last ~., data = rfdata, num.trees = t, mtry = i)
Templist$Trees <- t
Templist$mtry <- i
Templist$error <- rfmodelresults$prediction.error
Predicterror <- rbind(Predicterror, Templist)
}
t = t+50
}
plot(Predicterror$error~Predicterror$mtry)
Predicterror$Trees <- as.factor(Predicterror$Trees)
Predicterror2 <- Predicterror
Predicterror2$Trees <- as.numeric(as.character(Predicterror$Trees))
for(j in seq(500,1000,50)){
dferror <- Predicterror %>% filter(Trees == j) %>% select(mtry, error)
print(ggplot(dferror, aes(y=error, x=mtry)) + geom_line() + ggtitle(paste("Trees =", j)))
}
rfmodelresults <- ranger(msdss_last ~., data = rfdata, num.trees = 800, mtry = 19, importance = "permutation")
train=sample(1:nrow(rfdata),120)
testresults <- randomForest(msdss_last ~., data = rfdata, subset = train, importance = TRUE, mtry = 19)
varImpPlot(testresults)
#mtry = 19, trees = 800
#Read Variable Importance Methods, Splitting Training and Validation, how to show results of random forests
|
104d0583bef56562823f7d7d49506c7fe421538c
|
658f9fc2e63cad23e6262fc7caa1abf01ecd70cb
|
/Mobily/Redemption-Weekly-Report/Analyses/detailed_Mobily_Redemptions.R
|
e69c125c95b0654b3ce1e133c5398d5cd7b6dc6e
|
[] |
no_license
|
aliarsalankazmi/Aimia-Projects
|
1353758f829a14db97a40097f94ed283fe21dff9
|
e54345c12c6057ecef222b87885ec6c932ca8923
|
refs/heads/master
| 2016-08-12T06:18:07.109940
| 2016-01-03T06:31:24
| 2016-01-03T06:31:24
| 46,541,306
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,550
|
r
|
detailed_Mobily_Redemptions.R
|
df5 <- df1 %>%
filter(rdm_type == 'Mobily') %>%
select(redemption_we, msisdn, item_sub_category, pay_type_code, points_denomination) %>%
group_by(redemption_we, pay_type_code, item_sub_category, msisdn) %>%
summarise(total_redemptions_perCustomer = n(),
points_redeemed_perCustomer = sum(points_denomination)) %>%
group_by(redemption_we, pay_type_code, item_sub_category) %>%
summarise(total_redemptions = sum(total_redemptions_perCustomer),
unique_customers = n_distinct(msisdn),
total_points_redeemed = sum(points_redeemed_perCustomer),
avg_points_redeemed = mean(points_redeemed_perCustomer),
max_points_redeemed = max(points_redeemed_perCustomer),
min_points_redeemed = min(points_redeemed_perCustomer)) %>%
mutate(percent_total_redemptions = (total_redemptions/sum(total_redemptions))*100,
percent_points_redeemed = (total_points_redeemed/sum(total_points_redeemed))*100) %>%
arrange(redemption_we, pay_type_code, item_sub_category)
g1 <- df5 %>%
ggplot(aes(x = redemption_we, y = item_sub_category)) +
geom_tile(aes(fill = percent_total_redemptions), colour = 'white') + scale_fill_gradient(low = 'white', high = 'steelblue') +
geom_text(aes(x = redemption_we, y = item_sub_category, label = paste0(round(percent_total_redemptions,0),'%')), alpha = .7, fontface = 'bold', family = 'Garamond', size = 3.5) +
facet_grid(pay_type_code ~ ., scales = 'free') + ggtitle('Percentage of Total Redemptions for Mobily Products') + xlab('Week Ending Date') + ylab('') + myTheme + theme(panel.grid = element_blank(), axis.ticks = element_blank(), panel.border = element_blank(), legend.position = 'none')
g2 <- df5 %>%
ggplot(aes(x = redemption_we, y = item_sub_category)) +
geom_tile(aes(fill = percent_points_redeemed), colour = 'white') + scale_fill_gradient(low = 'white', high = 'steelblue') +
geom_text(aes(x = redemption_we, y = item_sub_category, label = paste0(round(percent_points_redeemed,0),'%')), alpha = .7, fontface = 'bold', family = 'Garamond', size = 3.5) +
facet_grid(pay_type_code ~ ., scales = 'free') + ggtitle('Percentage of Total Points Redeemed for Mobily Products') + xlab('Week Ending Date') + ylab('') + myTheme + theme(panel.grid = element_blank(), axis.ticks = element_blank(), panel.border = element_blank(), legend.position = 'none')
print(g1)
print(g2)
df5.1 <- df5 %>%
ungroup() %>%
select(redemption_we, pay_type_code, item_sub_category, total_redemptions, unique_customers,
total_points_redeemed,avg_points_redeemed, contains('percent'), contains('tidy')) %>%
arrange(pay_type_code, item_sub_category, redemption_we) %>%
group_by(pay_type_code, item_sub_category) %>%
mutate(percent_totRedemp_Change = round(percent_total_redemptions - lag(percent_total_redemptions)),
percent_totPts_Change = round(percent_points_redeemed - lag(percent_points_redeemed)),
totRedemp_Status = ifelse(percent_totRedemp_Change >= 0, 'increased', 'decreased'),
totPts_Status = ifelse(percent_totPts_Change >= 0, 'increased', 'decreased'),
redempNumTidy = tidyNum(total_redemptions),
ptsNumTidy = tidyNum(total_points_redeemed),
avgNumTidy = tidyNum(avg_points_redeemed)) %>%
ungroup() %>%
filter(redemption_we == max(redemption_we)) %>%
group_by(pay_type_code) %>%
arrange(desc(percent_total_redemptions))
df5.1.Prepaid <- df5.1 %>%
filter(pay_type_code == 'Prepaid')
df5.1.Postpaid <- df5.1 %>%
filter(pay_type_code == 'Postpaid')
|
8046e141c872e058226403e80447d828d7d3d90a
|
f0ef630f83b1e312d4b9ef82a09e813c1b8ed7bd
|
/cachematrix.R
|
e9907a01169edb316803c0f9065f1a4ee95dabe7
|
[] |
no_license
|
adsmaniotto/ProgrammingAssignment2
|
843d02963e790a596864549337ac6ff9ce699821
|
535b5ab6f20e5f323305c09b27e54b90367a56cd
|
refs/heads/master
| 2020-12-25T11:15:18.501044
| 2014-05-21T03:18:10
| 2014-05-21T03:18:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,481
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Creates a special "vector" of functions using a matrix as an input
makeCacheMatrix <- function(x = matrix()) {
## Reads in a matrix
inv <- NULL
## Set matrix and inverse variables in a different environment
set <- function(y){
x <<- y
inv <<- NULL
}
## Gets matrix
get <- function() x
## Sets inverse of function
setInv <- function(i) inv <<- i
## Gets inverse
getInv <- function() inv
## Creates a list of all four functions and returns the list
list(set = set, get = get,
setInv = setInv,
getInv = getInv)
}
## This function returns the inverse of a matrix. If the inverse has already
## been calculated, the cached inverse variable is returned instead
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInv()
## If the inverse already exists, return the cached variable
if(!is.null(inv)) {
print("getting cached data")
return(inv)
}
## Otherwise, use SOLVE to calculate the inverse and save it to inv
data <- x$get()
inv <- solve(data, ...)
x$setInv(inv)
## return the inverse
inv
}
|
63980b0b663d5523af95c8f81a2406d9b7452ae6
|
f5435fd1b9f39bec9b199a573aaf7a5a2de2889f
|
/R/subsample.R
|
c2dc4d3b11941b1e5cca1082ee56c3cb00ad4874
|
[] |
no_license
|
brycefrank/spsys
|
7977680a1482e294e8316e8c6f3f30124bfa15ab
|
d88d56661dcf1d6b6b77786a816a27ed1638e099
|
refs/heads/master
| 2022-12-27T11:41:34.098804
| 2020-07-31T18:02:44
| 2020-07-31T18:02:44
| 257,992,951
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,946
|
r
|
subsample.R
|
# Various functions for creating subsets of sampling frames used for assessment
# and to create neighborhood centers and anchors.
#' Retrieve the set of all possible starting positions
#'
#' In two-dimensional systematic samples, all possible samples can be obtained
#' merely by moving the starting position within a "starting region" contained
#' by the top-left-most sample group. For a sampling interveal `a` the hexagonal
#' and rectangular configurations will contain `a^2` possible samples. This function
#' retrieves those starting positions and is used internally as part of `compare_estimators()`.
#'
#' @param sys_frame A `SysFrame` object
#' @param a The sampling interval
#' @return A dataframe of starting positions for all possible systematic samples
#' for the interval a.
setGeneric('subsample_starts', function(sys_frame, a) {
standardGeneric('subsample_starts')
})
setMethod('subsample_starts', signature = list(sys_frame='RectFrame', a='numeric'),
function(sys_frame, a) {
r_starts <- seq(1, a)
c_starts <- seq(1, a)
starts <- expand.grid(r_starts, c_starts)
starts <- data.frame(r = starts[,1], c = starts[,2])
return(starts)
})
setMethod('subsample_starts', signature = list(sys_frame='HexFrame', a='numeric'),
function(sys_frame, a) {
r_starts <- c()
c_starts <- c()
for(r in 1:a) {
if(r %% 2 == 0) {
c_starts <- c(c_starts, seq(1, 2*a, 2) + 1)
} else {
c_starts <- c(c_starts, seq(1, 2*a, 2))
}
r_starts <- c(r_starts, rep(r, a))
}
return(data.frame(r=r_starts, c=c_starts))
})
#' Transforms a set of population unit coordinates to an integer index.
#'
#' Note that input coordinates must be spatially aligned vertically and horizontally. This
#' may not always be the case, e.g. for some spatial projections that curve geographic space.
#'
#' @param coords A dataframe of x,y coordinates of population unit locations.
#' @param d_x The distance between points in the x dimension
#' @param d_y The distance between points in the y dimension
#' @return A dataframe with two columns, r and c, that correspond to row and column indices
#' @keywords internal
transform_coords <- function(coords, d_x=NA, d_y=NA) {
x_shift <- coords[,1] - min(coords[,1])
y_shift <- coords[,2] - max(coords[,2])
if(is.na(d_x)) {
u_x <- unique(x_shift)
u_x <- u_x[order(u_x)]
d_x <- u_x[[2]] - u_x[[1]]
}
if(is.na(d_y)) {
u_y <- unique(y_shift)
u_y <- u_y[order(u_y)]
d_y <- u_y[[2]] - u_y[[1]]
}
# Create a dataframe of integers representing a grid of hexagons
c <- x_shift / d_x + 1
r <- abs(y_shift / d_y) + 1
# There may be some floating point error
c <- round(c)
r <- round(r)
return(data.frame(r=r, c=c))
}
#' Retrieves the subsample for a given set of hexagonal indices
#'
#' @param hex_ix A dataframe of hexagonal indices
#' @param start_pos A starting position
#' @param a A sampling interval
#' @return A dataframe of sample indices
#' @keywords internal
subsample_hex_ix <- function(hex_ix, start_pos, a) {
max_r <- max(hex_ix$r)
max_c <- max(hex_ix$c)
r_seq <- seq(0, max_r-1, a)
r_samp <- c()
c_samp <- c()
j <- 0
for(r in r_seq) {
if (j %% 2 == 0) {
add <- seq(0, max_c-1, a*2)
} else {
add <- seq(-a, max_c-1, a*2)
add <- add[add>0]
}
c_samp <- c(c_samp, add)
r_samp <- c(r_samp, rep(r, length(add)))
j <- j + 1
}
r_samp <- r_samp + start_pos[[1]]
c_samp <- c_samp + start_pos[[2]]
samp_ix <- data.frame(r = r_samp, c = c_samp)
samp_ix
}
#' Retrieves a subset of indices that correspond to the center of a compact set
#' of hexagonal neighborhoods
#'
#' @param ix A dataframe of hexagonal indices
#' @return A dataframe of hexagonal neigborhood centers
#' @keywords internal
subsample_hex_ix_compact <- function(ix) {
max_r <- max(max(ix$r), 14)
max_c <- max(max(ix$c), 14)
samp_ix <- list()
# Make a grid for each row
for(j in 0:14) {
col_shift <- -j * 5
r_seq <- seq(j, max_r, 14)
c_seq <- seq(col_shift, max_c, 14)
c_seq <- c_seq[c_seq>=0]
samp_ix[[j+1]] <- expand.grid(r_seq, c_seq)
}
samp_ix <- bind_rows(samp_ix)
# Bump over to (1,1 origin)
samp_ix <- samp_ix+1
colnames(samp_ix) <- c('r', 'c')
samp_ix
}
#' Retrieves the subsample for a given set of rectangular indices
#'
#' @param ix A dataframe of hexagonal indices
#' @param start_pos A starting position
#' @param a A sampling interval
#' @return A dataframe of sample indices
#' @keywords internal
subsample_rect_ix <- function(ix, start_pos, a) {
max_r <- max(ix$r)
max_c <- max(ix$c)
r_start <- start_pos[[1]]
c_start <- start_pos[[2]]
r_seq <- seq(r_start, max_r, a)
c_seq <- seq(c_start, max_c, a)
a_grid <- expand.grid(r_seq, c_seq)
colnames(a_grid) <- c('r', 'c')
return(a_grid)
}
|
3f850b8a0a4c70ba5c37f2ff0f0a0cbab29e538b
|
e3a1e3f9e092a20e849042dad1044e622965689c
|
/man/convertCorpus.Rd
|
6c71cc4221255cbed26ff8d91226183f35ef8a26
|
[] |
no_license
|
bryant1410/stm
|
b9be5fc117b0cbf4448589844ef3b5d9a7ad7fba
|
760aa2cade89ce8f9cae02fc6535622908d07502
|
refs/heads/master
| 2021-01-19T20:05:49.289923
| 2017-04-17T08:07:39
| 2017-04-17T08:07:39
| 88,486,274
| 0
| 0
| null | 2017-04-17T08:07:40
| 2017-04-17T08:07:40
| null |
UTF-8
|
R
| false
| true
| 2,269
|
rd
|
convertCorpus.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/convertCorpus.R
\name{convertCorpus}
\alias{convertCorpus}
\title{Convert \pkg{stm} formatted documents to another format}
\usage{
convertCorpus(documents, vocab, type = c("slam", "lda", "Matrix"))
}
\arguments{
\item{documents}{the documents object in \pkg{stm} format}
\item{vocab}{the vocab object in \pkg{stm} format}
\item{type}{the output type desired. See Details.}
}
\description{
Takes an \pkg{stm} formatted documents and vocab object and returns formats
useable in other packages.
}
\details{
We also recommend the \pkg{quanteda} and \pkg{tm} packages for text preparation
etc. The \code{convertCorpus} function is provided as a helpful utility for
moving formats around, but if you intend to do text processing with a variety
of output formats, you likely want to start with \pkg{quanteda} or \pkg{tm}.
The various type conversions are described below:
\describe{
\item{\code{type = "slam"}}{Converts to the simple triplet matrix
representation used by the \pkg{slam} package. This is the format used
internally by \pkg{tm}.}
\item{\code{type = "lda"}}{Converts to the format
used by the \pkg{lda} package. This is a very minor change as the format in
\pkg{stm} is based on \pkg{lda}'s data representation. The difference as
noted in \code{\link{stm}} involves how the numbers are indexed.
Accordingly this type returns a list containing the new documents object and
the unchanged vocab object.}
\item{\code{type = "Matrix"}}{Converts to the
sparse matrix representation used by \pkg{Matrix}. This is the format used
internally by numerous other text analysis packages.} }
If you want to write
out a file containing the sparse matrix representation popularized by David
Blei's \code{C} code \code{ldac} see the function \code{\link{writeLdac}}.
}
\examples{
#convert the poliblog5k data to slam package format
poliSlam <- convertCorpus(poliblog5k.docs, poliblog5k.voc, type="slam")
class(poliSlam)
poliMatrix <- convertCorpus(poliblog5k.docs, poliblog5k.voc, type="Matrix")
class(poliMatrix)
poliLDA <- convertCorpus(poliblog5k.docs, poliblog5k.voc, type="lda")
str(poliLDA)
}
\seealso{
\code{\link{writeLdac}} \code{\link{readCorpus}}
\code{\link{poliblog5k}}
}
|
8a4a94dc0d130c6ebe60da34c1a06677cdeea082
|
50ed17f78083221eb95cdc0638157aa58f819eb0
|
/man/InitPop.Rd
|
82394c59256a5373c5588a6c0833e25cc06d972c
|
[
"Apache-2.0"
] |
permissive
|
k-silva/amss
|
9db034d2dd3c2d5ee80298cf6d86eb2266c7c851
|
cbf5e7f6c668de493077ce1d08a2ab963891f0cc
|
refs/heads/master
| 2022-02-27T11:23:30.357174
| 2018-06-07T21:17:26
| 2018-06-07T21:17:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,609
|
rd
|
InitPop.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/module_natural_migration.R
\name{InitPop}
\alias{InitPop}
\title{Initialize population segmentation.}
\usage{
InitPop(data.dt, pop.total, market.rate = 1,
prop.activity = rep(1/length(kActivityStates), length(kActivityStates)),
prop.favorability = rep(1/length(kFavorabilityStates),
length(kFavorabilityStates)), prop.loyalty = rep(1/length(kLoyaltyStates),
length(kLoyaltyStates)),
prop.availability = rep(1/length(kAvailabilityStates),
length(kAvailabilityStates)))
}
\arguments{
\item{data.dt}{data.table containing all state-related data}
\item{pop.total}{total population}
\item{market.rate}{target proportion of consumers in 'in-market' market
state.}
\item{prop.activity}{vector of nonnegative values summing to 1, representing
the proportion of the population to be assigned to each activity
state, given they are "responsive," i.e., "in.market" and
"unsatiated."}
\item{prop.favorability}{vector of nonnegative values summing to 1,
representing the proportion of the population to be assigned to each
favorability state, given they are not "loyal."}
\item{prop.loyalty}{vector of nonnegative values summing to 1, representing
the proportion of the population to be assigned to each loyalty
state.}
\item{prop.availability}{vector of nonnegative values summing to 1,
representing the proportion of the population to be assigned to each
availability state.}
}
\value{
\code{invisible(NULL)}. \code{data.dt} is updated by reference.
}
\description{
Initialize population segmentation.
}
\keyword{internal}
|
8b67a12f63b8bdb1848af8ff32d3c873f766e91c
|
4339f893a3b34fb03f584205e787383b0a44d302
|
/2_Function_Check_RUT.R
|
84fc30b1b4e738a92e1258240c0390de30e0e601
|
[
"MIT"
] |
permissive
|
benyimr/Padron_Electoral_R
|
6f80abe21b54d1ff3af57a3047651e38a78bc5a6
|
eede31c2b03caec2f5bd93148f664ec939b9d5cb
|
refs/heads/master
| 2020-04-06T21:11:47.124434
| 2018-11-16T11:00:29
| 2018-11-16T11:00:29
| 157,795,777
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,580
|
r
|
2_Function_Check_RUT.R
|
#FUNCTION FOR CHECKING THE RUT (VERIFICATION DIGIT)
check_rut <- function(string1){
datos_temp <- data.frame(rut1 = rep(NA,length(string1)))
datos_temp$rut1 <- string1
datos_temp$rut2 <- stringr::str_remove_all(string = stringr::str_sub(string1, start = 1, end = (str_locate(string1, pattern = "[-]"))[,1]-1), pattern = "\\.")
datos_temp$rut2a <- if_else(is.na(datos_temp$rut2)==T, "000000000000", datos_temp$rut2)
datos_temp$rut3 <- stringr::str_sub(string1, start = -1, end = -1)
datos_temp$rut4 <- tcR::reverse.string(datos_temp$rut2a, .n = 1)
datos_temp$n <- nchar(datos_temp$rut4)
datos_temp$num_1a <- as.numeric(str_sub(string = datos_temp$rut4, start = 1, end = 1)) * 2
datos_temp$num_2a <- as.numeric(str_sub(string = datos_temp$rut4, start = 2, end = 2)) * 3
datos_temp$num_3a <- as.numeric(str_sub(string = datos_temp$rut4, start = 3, end = 3)) * 4
datos_temp$num_4a <- as.numeric(str_sub(string = datos_temp$rut4, start = 4, end = 4)) * 5
datos_temp$num_5a <- as.numeric(str_sub(string = datos_temp$rut4, start = 5, end = 5)) * 6
datos_temp$num_6a <- as.numeric(str_sub(string = datos_temp$rut4, start = 6, end = 6)) * 7
datos_temp$num_7a <- as.numeric(str_sub(string = datos_temp$rut4, start = 7, end = 7)) * 2
datos_temp$num_8a <- as.numeric(str_sub(string = datos_temp$rut4, start = 8, end = 8)) * 3
datos_temp$agregar <- if_else(datos_temp$n == 6,
rowSums(datos_temp[7:12]),
if_else(datos_temp$n == 7,
rowSums(datos_temp[7:13]),
if_else(datos_temp$n == 8,
rowSums(datos_temp[7:14]),-999
)))
datos_temp$modular <- if_else(datos_temp$agregar == -999, -999,11- (datos_temp$agregar%%11))
datos_temp$verificar <- if_else(datos_temp$modular > 0 & datos_temp$modular < 10, as.character(datos_temp$modular),
if_else(datos_temp$modular == 10 , "K",
if_else(datos_temp$modular == 11, "0", "NO_RUT")))
datos_temp$verificar2 <- if_else(is.na(datos_temp$rut3)==T,"NO_RUT",
if_else(datos_temp$rut3 == datos_temp$verificar, "OK", "NO_RUT"))
return(datos_temp$verificar2)
}
|
87374666c6b0c86abfedc4432446d3e0a1146d81
|
972fa9f59ad0d4e07b44bda60f11aba400ea423f
|
/1_basic_data_manipulation/450x_examples_1.R
|
142e0b9158881ab99461bca1bd86e4facd4c1946
|
[] |
no_license
|
tobiasnowacki/ps450x
|
fadf30e1c5ebbca3a55b208aa2a5210bc6cc8664
|
68e3a5604925f61201f66f13d863803452eee235
|
refs/heads/master
| 2020-08-05T03:43:55.606669
| 2020-02-07T19:13:58
| 2020-02-07T19:13:58
| 212,381,583
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,393
|
r
|
450x_examples_1.R
|
# head package:utils R Documentation
# Return the First or Last Part of an Object
# Description:
# Returns the first or last parts of a vector, matrix, table, data
# frame or function. Since ‘head()’ and ‘tail()’ are generic
# functions, they may also have been extended to other classes.
# Usage:
# head(x, ...)
# ## Default S3 method:
# head(x, n = 6L, ...)
data(Seatbelts)
df <- as.data.frame(Seatbelts)
head(df, n = 6)
# DriversKilled drivers front rear kms PetrolPrice
# 1 107 1687 867 269 9059 0.1029718
# 2 97 1508 825 265 7685 0.1023630
# 3 102 1507 806 319 9963 0.1020625
# 4 87 1385 814 407 10955 0.1008733
# 5 119 1632 991 454 11823 0.1010197
# 6 106 1511 945 427 12391 0.1005812
# VanKilled law
# 1 12 0
# 2 6 0
# 3 12 0
# 4 8 0
# 5 10 0
# 6 13 0
str(df)
# 'data.frame': 192 obs. of 8 variables:
# $ DriversKilled: num 107 97 102 87 119 106 110 106 107 134 ...
# $ drivers : num 1687 1508 1507 1385 1632 ...
# $ front : num 867 825 806 814 991 ...
# $ rear : num 269 265 319 407 454 427 522 536 405 437 ...
# $ kms : num 9059 7685 9963 10955 11823 ...
# $ PetrolPrice : num 0.103 0.102 0.102 0.101 0.101 ...
# $ VanKilled : num 12 6 12 8 10 13 11 6 10 16 ...
# $ law : num 0 0 0 0 0 0 0 0 0 0 ...
class(df)
# [1] "data.frame"
class(df$drivers)
# [1] "numeric"
dim(df)
# [1] 192 8
ex_vec <- c(1L, 2L, 3L)
ex_vec
# [1] 1 2 3
class(ex_vec)
# [1] "integer"
ex_vec <- as.character(ex_vec)
ex_vec
# [1] "1" "2" "3"
class(ex_vec)
# [1] "character"
ex_list <- list("ABC", c(2, 4, 6), c("DEF", "GHI"))
ex_list
# [[1]]
# [1] "ABC"
# [[2]]
# [1] 2 4 6
# [[3]]
# [1] "DEF" "GHI"
ex_mat <- matrix(1:9, nrow = 3, ncol = 3)
ex_mat
# [,1] [,2] [,3]
# [1,] 1 4 7
# [2,] 2 5 8
# [3,] 3 6 9
ex_df <- data.frame(col1 = 1:3, col2 = c("A", "B", "C"), col3 = c(T, F, F))
ex_df
# col1 col2 col3
# 1 1 A TRUE
# 2 2 B FALSE
# 3 3 C FALSE
ex_df$col1
#[1] 1 2 3
ex_df[, c("col1", "col2")]
# col1 col2
# 1 1 A
# 2 2 B
# 3 3 C
names(ex_df)
#[1] "col1" "col2" "col3"
# Merging examples
dfA <- data.frame(V1 = c(1, 2, 3), V2 = c("A", "B", "C"))
dfB <- data.frame(V2 = c("B", "C", "A"), V3 = c("Banana", "Canteloupe", "Apple"))
dfbind <- cbind(dfA, dfB)
dfM <- merge(dfA, dfB, by = c("V2"))
|
d0eac3d87e988c4281f7290e4b1b780592aad19b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/pedquant/examples/pq_perf.Rd.R
|
d23f87e8c9c7a2b9474dcd397066758dd5dea404
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 304
|
r
|
pq_perf.Rd.R
|
library(pedquant)
### Name: pq_perf
### Title: creating performance trends
### Aliases: pq_perf
### ** Examples
## No test:
# load data
dat = md_stock(c('000001', '^000001'), date_range = 'max', source = '163')
# create performance trends
perf = pq_perf(dat)
# pq_plot(perf)
## End(No test)
|
215e02af97a159811e0f3ee60acab218c59c8f3c
|
91f533c44a10ff2d45aa17f60cdc2520862fdbe8
|
/kernel/aggregate_result_as_table.R
|
b06dd04b8cc99f17ef38a6e00f4b5b5a45c2326d
|
[
"MIT"
] |
permissive
|
liuguoyou/CTOQ
|
6494756ca1444e7d51a23e82f504dd104ee13008
|
c3c50c0e6883b18d950eeae8326af5febf2d40a5
|
refs/heads/master
| 2021-06-06T21:23:03.069405
| 2016-09-02T01:32:44
| 2016-09-02T01:32:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,915
|
r
|
aggregate_result_as_table.R
|
#library(ggplot2)
library(plyr)
library(reshape2)
library(xtable)
#args <- commandArgs(trailingOnly = TRUE)
args <- c('result/sum.all.txt', 'error')
input <- args[1]
disp.type <- args[2]
df <- read.table(input, header=F)
names(df) <- c('n', 'k', 's', 'method', 'data', 'ans', 'time')
df$method <- factor(df$method)
df$data <- factor(df$data)
df$s <- factor(df$s)
df$k <- factor(df$k)
df$n <- factor(df$n)
df.approx <- subset(df, !(method=='approx' & k %in% unique(df$n)))
df.exact <- subset(df, method=='approx' & k %in% unique(df$n))
df.exact$time <- NULL
df.exact$method <- NULL
df.exact$k <- NULL
names(df.exact)[4] <- 'exact'
df.join <- join(df.exact, df.approx)
df.s <- ddply(df.join, .(n, k, method, data), summarize,
m.error=mean(abs(ans - exact), na.rm=T),
s.error=sd(abs(ans - exact), na.rm=T),
m.time=mean(time, na.rm=T),
s.time=sd(time, na.rm=T))
df.s$n <- factor(df.s$n)
df.s$k <- factor(df.s$k)
levels(df.s$n)[1] <- sprintf("$n=%s$",levels(df.s$n)[1])
len.n <- length(levels(df.s$n))
levels(df.s$method) <- c('Proposed', 'Nystr\\"{o}m')
df.s$error <- sapply(1:nrow(df.s),
function(i)sprintf('$%.4f\\pm%.4f$', df.s$m.error[i], df.s$s.error[i]))
df.s$time <- sapply(1:nrow(df.s),
function(i) sprintf('$%.3f$', df.s$m.time[i]))
df.latex <- dcast(df.s, method + k ~ n, value.var=disp.type)
df.latex$method <- ""
df.latex$method[1] <- sprintf("\\multirow{%d}{*}{\\rotatebox{90}{%s}} ",
len.n, levels(df.s$method)[1])
df.latex$method[1+len.n] <- sprintf("\\hline\n\\multirow{%d}{*}{\\rotatebox{90}{%s}}",
len.n, levels(df.s$method)[2])
names(df.latex)[1] <- ""
fmt <- paste(rep('r', len.n), collapse='')
print(xtable(df.latex, align=sprintf("rrr%s", fmt)),
include.rownames=FALSE,
sanitize.text.function = function(x){x})
|
e3c378c80b282828a9f4c7808516c1ad63d84de0
|
2f50d7a4d4aa8acc9060b8fed4c06364f3170a33
|
/plot1.R
|
3410350d3547440b1aba6e16b16a8ab9de1de288
|
[] |
no_license
|
chandrashivak/ExData_Plotting1
|
306750d6287290b997c5fe34488409c0eae6faef
|
90e12e1f740387c324eae13f94c93ed3ee1f82b1
|
refs/heads/master
| 2021-01-18T02:42:13.606356
| 2014-06-06T16:38:32
| 2014-06-06T16:38:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 578
|
r
|
plot1.R
|
epcdata <- read.table("household_power_consumption.txt", header = TRUE, na.strings = "?", nrow = 2075259, sep = ";", colClasses = c("character", "character", "numeric", "numeric", "numeric", "numeric","numeric","numeric", "numeric"))
epcdata <- transform(epcdata, Date =as.Date(Date, "%d/%m/%Y"))
subepcdata <- subset(epcdata, (Date == "2007-02-01")| (Date == "2007-02-02") )
png(file="plot1.png", width = 480, height = 480)
hist(subepcdata$Global_active_power, xlab = "Global Active Power(kilowatts)", ylab = "Frequency", main= "Global Active Power", col="red")
dev.off()
|
6121607687ce6c803009a2ed38dae1e4aad06a7f
|
a0843cd25f406060933e970d3d3d1d96eca286ad
|
/Community Paper/PLSR.R
|
c40f30c5c3ba69554c054f8b532a6e7430c89234
|
[] |
no_license
|
klapierre/C2E
|
6d5239a72707850978a4ffa1af01532ad0f6357e
|
c2b3acdbfc69a8aa6750cf8742bd4095166d22dc
|
refs/heads/master
| 2022-07-30T05:56:57.732219
| 2022-06-22T15:10:10
| 2022-06-22T15:10:10
| 75,030,900
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,050
|
r
|
PLSR.R
|
#emily's working directory
setwd("/Users/egrman/Dropbox/C2E/Products/CommunityChange/March2018 WG")
#kevin's working directory
#setwd("C:\\Users\\wilco\\Dropbox\\C2E\\Products\\CommunityChange\\March2018 WG\\")
library(tidyverse)
library(ggplot2)
library(ggthemes)
library(grid)
library(vegan)
library(pls)
theme_set(theme_bw())
theme_update(axis.title.x=element_text(size=20, vjust=-0.35), axis.text.x=element_text(size=16),
axis.title.y=element_text(size=20, angle=90, vjust=0.5), axis.text.y=element_text(size=16),
plot.title = element_text(size=24, vjust=2),
panel.grid.major=element_blank(), panel.grid.minor=element_blank(),
legend.title=element_blank(), legend.text=element_text(size=20))
### stealing Kevin's code for creating Glass's delta to compare T vs C at each timestep
### Read in data
change_metrics_perm <- read.csv("CORRE_RACS_Subset_Perm.csv") %>%
mutate(abs_richness_change = abs(richness_change),
abs_evenness_change = abs(evenness_change))
### Control data
change_control <- change_metrics_perm %>%
filter(plot_mani==0) %>%
dplyr::select(treatment_year, treatment_year2, abs_richness_change, abs_evenness_change,
rank_change, gains, losses, site_project_comm, treatment, plot_mani) %>%
rename(abs_richness_change_ctrl = abs_richness_change,
abs_evenness_change_ctrl = abs_evenness_change,
rank_change_ctrl = rank_change,
gains_ctrl = gains,
losses_ctrl = losses
) %>%
group_by(site_project_comm, treatment, treatment_year2) %>%
summarise_at(vars(abs_richness_change_ctrl:losses_ctrl), funs(mean, sd), na.rm=T)
change_glass_d <- change_metrics_perm %>%
filter(plot_mani != 0) %>%
group_by(site_project_comm, treatment, treatment_year2, plot_mani) %>%
summarise(abs_richness_change = mean(abs_richness_change,na.rm=T),
abs_evenness_change = mean(abs_evenness_change, na.rm=T),
rank_change = mean(rank_change, na.rm=T),
gains = mean(gains, na.rm=T),
losses = mean(losses, na.rm=T)) %>%
left_join(change_control, by=c("site_project_comm","treatment_year2")) %>%
mutate(abs_richness_glass = (abs_richness_change-abs_richness_change_ctrl_mean)/abs_richness_change_ctrl_sd,
abs_evenness_glass = (abs_evenness_change-abs_evenness_change_ctrl_mean)/abs_evenness_change_ctrl_sd,
rank_glass = (rank_change-rank_change_ctrl_mean)/rank_change_ctrl_sd,
gains_glass = (gains-gains_ctrl_mean)/gains_ctrl_sd,
losses_glass = (losses-losses_ctrl_mean)/losses_ctrl_sd
) %>%
dplyr::select(site_project_comm:plot_mani, abs_richness_glass:losses_glass) %>%
ungroup()
#change_glass_d is the thing that we want
## replace Inf with NAs in change_glass_d
change_glass_d <- change_glass_d %>%
mutate(gains_glass=replace(gains_glass, gains_glass=="Inf", NA)) %>%
mutate(losses_glass=replace(losses_glass, losses_glass=="Inf", NA))
# reading in predictor variables for PLSR
info.spc=read.csv("SiteExperimentDetails_Dec2016.csv") %>%
mutate(site_project_comm = paste(site_code, project_name, community_type, sep="_"))
info.trt=read.csv("ExperimentInformation_Nov2017.csv") %>%
mutate(site_project_comm = paste(site_code, project_name, community_type, sep="_")) %>%
group_by(site_project_comm, treatment) %>%
summarise_at(vars(n, p, k, CO2, precip, temp), funs(mean))
### calculate mean change through time and combine with predictor variables
change_glass_d_mean <- change_glass_d %>%
group_by(site_project_comm, treatment.x, plot_mani) %>%
summarise_at(vars(abs_richness_glass, abs_evenness_glass, rank_glass, gains_glass, losses_glass), funs(mean), na.rm=T) %>%
rename(treatment=treatment.x) %>%
left_join(info.spc, by=c("site_project_comm")) %>%
left_join(info.trt, by=c("site_project_comm","treatment"))
#subsetting out predictor variables
pred=as.matrix(change_glass_d_mean[, c("MAP", "MAT", "rrich", "anpp", "n", "p", "k", "CO2", "precip", "temp")])
#note that some response var (evenness, losses, gains) have NA (for every year there was no variation among the controls; sd=0 and glass's delta was undefined for every year)
#---------1) attempting PLSR
rich=plsr(abs_richness_glass~pred, data=change_glass_d_mean, validation="LOO")
plot(RMSEP(rich))
summary(rich) #first component explains <2% of variation in y
even=plsr(abs_evenness_glass~pred, data=change_glass_d_mean, validation="LOO")
plot(RMSEP(even))
summary(even) #first component explains 10% of variation in y
rank=plsr(rank_glass~pred, data=change_glass_d_mean, validation="LOO")
plot(RMSEP(rank))
summary(rank) #first component explains 8% of the variation in y
gains=plsr(gains_glass~pred, data=change_glass_d_mean, validation="LOO")
plot(RMSEP(gains))
summary(gains) #first component explains 5% of the variation in gains
plot(gains, "loadings", comps = 1:2, legendpos = "topright")
losses=plsr(losses_glass~pred, data=change_glass_d_mean, validation="LOO")
plot(RMSEP(losses))
summary(losses) #first component explains 9% of variation in losses
plot(losses, ncomp=2, asp=1, line=T) #terrible!
plot(losses, "loadings", comps = 1:2, legendpos = "topright")
#----------2) Multiple Regression with site and experiment predictors
#we have 125 datapoints and 10 predictors--why can't we just do multiple regression?
library(car)
library(rsq)
library(lme4)
cor(pred)
pairs(pred)
png(paste0("MR predictor variables pairs plot.png"), width=11, height=8, units="in", res=600)
print(pairs(pred))
dev.off()
#partial R2=(SSE(all other terms in model) - SSE(all terms in model))/SSE(all other terms in model)
rich=lm(abs_richness_glass ~ MAP + MAT + rrich + anpp + n + p + k + CO2 + precip + temp, data=change_glass_d_mean)
vif(rich)
rsq.partial(rich)
rich=lmer(abs_richness_glass ~ MAP + MAT + rrich + anpp + n + p + k + CO2 + precip + temp + (1|site_code), data=change_glass_d_mean)
Anova(rich)
summary(rich)
even=lm(abs_evenness_glass~MAP + MAT + rrich + anpp + n + p + k + CO2 + precip + temp, data=change_glass_d_mean)
Anova(even)
rsq.partial(even)
even=lmer(abs_evenness_glass~MAP + MAT + rrich + anpp + n + p + k + CO2 + precip + temp + (1|site_code), data=change_glass_d_mean)
Anova(even)
summary(even)
rank=lm(rank_glass~MAP + MAT + rrich + anpp + n + p + k + CO2 + precip + temp, data=change_glass_d_mean)
Anova(rank)
rsq.partial(rank)
rank=lmer(rank_glass~MAP + MAT + rrich + anpp + n + p + k + CO2 + precip + temp + (1|site_code), data=change_glass_d_mean)
Anova(rank)
summary(rank)
gains=lm(gains_glass~MAP + MAT + rrich + anpp + n + p + k + CO2 + precip + temp, data=change_glass_d_mean); Anova(gains)
rsq.partial(gains)
gains=lmer(gains_glass~MAP + MAT + rrich + anpp + n + p + k + CO2 + precip + temp + (1|site_code), data=change_glass_d_mean); Anova(gains); summary(gains)
losses=lm(losses_glass~MAP + MAT + rrich + anpp + n + p + k + CO2 + precip + temp, data=change_glass_d_mean); Anova(losses)
rsq.partial(losses)
losses=lmer(losses_glass~MAP + MAT + rrich + anpp + n + p + k + CO2 + precip + temp + (1|site_code), data=change_glass_d_mean); Anova(losses); summary(losses)
#-------------3) Multiple regression with only site predictors
#with only some of the treatments (so we don't have too many treatments/experiments at a single site)
#emily's working directory
setwd("/Users/egrman/Dropbox/C2E/Products/CommunityChange/March2018 WG")
#kevin's working directory
#setwd("C:\\Users\\wilco\\Dropbox\\C2E\\Products\\CommunityChange\\March2018 WG\\")
library(tidyverse)
library(ggplot2)
library(ggthemes)
library(grid)
library(vegan)
library(car)
library(rsq)
library(lme4)
theme_set(theme_bw())
theme_update(axis.title.x=element_text(size=20, vjust=-0.35), axis.text.x=element_text(size=16),
axis.title.y=element_text(size=20, angle=90, vjust=0.5), axis.text.y=element_text(size=16),
plot.title = element_text(size=24, vjust=2),
panel.grid.major=element_blank(), panel.grid.minor=element_blank(),
legend.title=element_blank(), legend.text=element_text(size=20))
### stealing Kevin's code for creating Glass's delta to compare T vs C at each timestep
### Read in data
change_metrics_perm <- read.csv("CORRE_RAC_Metrics_July2018_trtyr.csv") %>%
mutate(abs_richness_change = abs(richness_change),
abs_evenness_change = abs(evenness_change))
### Control data
change_control <- change_metrics_perm %>%
filter(plot_mani==0) %>%
dplyr::select(treatment_year, treatment_year2, abs_richness_change, abs_evenness_change,
rank_change, gains, losses, site_project_comm, treatment, plot_mani) %>%
rename(abs_richness_change_ctrl = abs_richness_change,
abs_evenness_change_ctrl = abs_evenness_change,
rank_change_ctrl = rank_change,
gains_ctrl = gains,
losses_ctrl = losses
) %>%
group_by(site_project_comm, treatment, treatment_year2) %>%
summarise_at(vars(abs_richness_change_ctrl:losses_ctrl), funs(mean, sd), na.rm=T)
change_glass_d <- change_metrics_perm %>%
filter(plot_mani != 0) %>%
group_by(site_project_comm, treatment, treatment_year2, plot_mani) %>%
summarise(abs_richness_change = mean(abs_richness_change,na.rm=T),
abs_evenness_change = mean(abs_evenness_change, na.rm=T),
rank_change = mean(rank_change, na.rm=T),
gains = mean(gains, na.rm=T),
losses = mean(losses, na.rm=T)) %>%
left_join(change_control, by=c("site_project_comm","treatment_year2")) %>%
mutate(abs_richness_glass = (abs_richness_change-abs_richness_change_ctrl_mean)/abs_richness_change_ctrl_sd,
abs_evenness_glass = (abs_evenness_change-abs_evenness_change_ctrl_mean)/abs_evenness_change_ctrl_sd,
rank_glass = (rank_change-rank_change_ctrl_mean)/rank_change_ctrl_sd,
gains_glass = (gains-gains_ctrl_mean)/gains_ctrl_sd,
losses_glass = (losses-losses_ctrl_mean)/losses_ctrl_sd
) %>%
dplyr::select(site_project_comm:plot_mani, abs_richness_glass:losses_glass) %>%
ungroup()
#change_glass_d is the thing that we want
## replace Inf with NAs in change_glass_d
change_glass_d <- change_glass_d %>%
mutate(gains_glass=replace(gains_glass, gains_glass=="Inf", NA)) %>%
mutate(losses_glass=replace(losses_glass, losses_glass=="Inf", NA))
# reading in predictor variables
info.spc=read.csv("SiteExperimentDetails_Dec2016.csv") %>%
mutate(site_project_comm = paste(site_code, project_name, community_type, sep="_"))
info.trt=read.csv("ExperimentInformation_Nov2017.csv") %>%
mutate(site_project_comm = paste(site_code, project_name, community_type, sep="_")) %>%
group_by(site_project_comm, treatment) %>%
summarise_at(vars(n, p, k, CO2, precip, temp), funs(mean))
### calculate mean change through time and combine with predictor variables
change_glass_d_mean <- change_glass_d %>%
group_by(site_project_comm, treatment.x, plot_mani) %>%
summarise_at(vars(abs_richness_glass, abs_evenness_glass, rank_glass, gains_glass, losses_glass), funs(mean), na.rm=T) %>%
rename(treatment=treatment.x) %>%
left_join(info.spc, by=c("site_project_comm")) %>%
left_join(info.trt, by=c("site_project_comm","treatment"))
#subsetting out predictor variables
pred=as.matrix(change_glass_d_mean[, c("MAP", "MAT", "rrich", "anpp")])
cor(pred)
pairs(pred)
png(paste0("MR predictor variables SITE LEVEL pairs plot.png"), width=11, height=8, units="in", res=600)
print(pairs(pred))
dev.off()
#partial R2=(SSE(all other terms in model) - SSE(all terms in model))/SSE(all other terms in model)
rich=lm(abs_richness_glass ~ MAP + MAT + rrich + anpp + n + p + k + CO2 + precip + temp, data=change_glass_d_mean)
vif(rich)
rsq.partial(rich)
rich=lmer(abs_richness_glass ~ MAP + MAT + rrich + anpp + n + p + k + CO2 + precip + temp + (1|site_code), data=change_glass_d_mean)
Anova(rich)
summary(rich)
even=lm(abs_evenness_glass~MAP + MAT + rrich + anpp + n + p + k + CO2 + precip + temp, data=change_glass_d_mean)
Anova(even)
rsq.partial(even)
even=lmer(abs_evenness_glass~MAP + MAT + rrich + anpp + n + p + k + CO2 + precip + temp + (1|site_code), data=change_glass_d_mean)
Anova(even)
summary(even)
rank=lm(rank_glass~MAP + MAT + rrich + anpp + n + p + k + CO2 + precip + temp, data=change_glass_d_mean)
Anova(rank)
rsq.partial(rank)
rank=lmer(rank_glass~MAP + MAT + rrich + anpp + n + p + k + CO2 + precip + temp + (1|site_code), data=change_glass_d_mean)
Anova(rank)
summary(rank)
gains=lm(gains_glass~MAP + MAT + rrich + anpp + n + p + k + CO2 + precip + temp, data=change_glass_d_mean); Anova(gains)
rsq.partial(gains)
gains=lmer(gains_glass~MAP + MAT + rrich + anpp + n + p + k + CO2 + precip + temp + (1|site_code), data=change_glass_d_mean); Anova(gains); summary(gains)
losses=lm(losses_glass~MAP + MAT + rrich + anpp + n + p + k + CO2 + precip + temp, data=change_glass_d_mean); Anova(losses)
rsq.partial(losses)
losses=lmer(losses_glass~MAP + MAT + rrich + anpp + n + p + k + CO2 + precip + temp + (1|site_code), data=change_glass_d_mean); Anova(losses); summary(losses)
|
ce4918685fd8a3dff4160ba1ea21a5fad2036c11
|
d4fb7b0bf2830af9f44f95f1ffdbc906c11379ab
|
/tests/testthat/test-graph_evol.R
|
2fd29339d4c5f64ddd35500ed76a068b3779cc14
|
[
"CC-BY-4.0",
"etalab-2.0",
"CC-BY-3.0",
"LicenseRef-scancode-etalab-2.0-en"
] |
permissive
|
jengelaere/enr.reseaux
|
b973be63f793652717c340d0fbf0785f960d1aff
|
72c4b44aa13c5fbb68ad18a2281d9a19478a8917
|
refs/heads/main
| 2023-04-10T19:43:47.856755
| 2021-04-06T18:58:27
| 2021-04-06T18:58:27
| 355,301,058
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 261
|
r
|
test-graph_evol.R
|
test_that("graph_evol works", {
liste_ter <- dplyr::filter(liste_zone_complete, CodeZone %in% c("200060010", "49", "52"))
graph <- graph_evol(fil = "Eol", indic = "Puissance", liste_ter)
testthat::expect_equal(attr(graph, "class"), c("gg", "ggplot" ))
})
|
6b160115b15a5f5cdb206ebe0688ab23bf95a111
|
0e7763471f0c1b4883fff8853106368db7502849
|
/tests/testthat.R
|
04fc451dffb11ed1a2bc081f00b489d7ae999ffd
|
[
"MIT"
] |
permissive
|
virgesmith/humanleague
|
d7b000c492aeb76fce2dff0fda4b5fa4650b5cd6
|
dceb216f9f40eb07bec7ecb5eaa1fed6224a23fd
|
refs/heads/main
| 2023-06-24T04:06:21.268692
| 2023-05-19T14:49:47
| 2023-05-19T14:49:47
| 95,961,787
| 16
| 4
|
NOASSERTION
| 2023-08-27T11:25:44
| 2017-07-01T12:03:50
|
C++
|
UTF-8
|
R
| false
| false
| 65
|
r
|
testthat.R
|
library(testthat)
library(humanleague)
test_check("humanleague")
|
645a15d25548e0db569c5fbbe873a36c9d83a679
|
4c699cae4a32824d90d3363302838c5e4db101c9
|
/06_Regressao_com_R/tools/Tools.R
|
6ed2a709c4a226fb2c59d154c112b21293d98751
|
[
"MIT"
] |
permissive
|
janes/BigData_Analytics_com_R
|
470fa6d758351a5fc6006933eb5f4e3f05c0a187
|
431c76b326e155715c60ae6bd8ffe7f248cd558a
|
refs/heads/master
| 2020-04-27T19:39:10.436271
| 2019-02-06T11:29:36
| 2019-02-06T11:29:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,100
|
r
|
Tools.R
|
# Este scrit contem diversas funcoes utilitarias usadas em diversos scripts R.
# Para usar as funcoes deste script, utilizamos a funcao source() para carregar este script.
set.asPOSIXct <- function(inFrame) {
dteday <- as.POSIXct(
as.integer(inFrame$dteday),
origin = "1970-01-01")
as.POSIXct(strptime(
paste(as.character(dteday),
" ",
as.character(inFrame$hr),
":00:00",
sep = ""),
"%Y-%m-%d %H:%M:%S"))
}
char.toPOSIXct <- function(inFrame) {
as.POSIXct(strptime(
paste(inFrame$dteday, " ",
as.character(inFrame$hr),
":00:00",
sep = ""),
"%Y-%m-%d %H:%M:%S")) }
set.asPOSIXct2 <- function(inFrame) {
dteday <- as.POSIXct(
as.integer(inFrame$dteday),
origin = "1970-01-01")
}
fact.conv <- function(inVec){
outVec <- as.factor(inVec)
levels(outVec) <- c("Segunda", "Terca", "Quarta",
"Quinta", "Sexta", "Sabado",
"Domingo")
outVec
}
get.date <- function(Date){
temp <- strftime(Date, format = "%Y-%m-%d %H:%M:%S")
substr(unlist(temp), 1, 10)
}
POSIX.date <- function(Date,Hour){
as.POSIXct(strptime(paste(Date, " ", as.character(Hour),
":00:00", sep = ""),
"%Y-%m-%d %H:%M:%S"))
}
var.log <- function(inFrame, col){
outVec <- ifelse(inFrame[, col] < 0.1, 1, inFrame[, col])
log(outVec)
}
month.count <- function(inFrame){
Dteday <- strftime(inFrame$dteday, format = "%Y-%m-%dT%H:%M:%S")
yearCount <- as.numeric(unlist(lapply(strsplit(
Dteday, "-"),
function(x){x[1]}))) - 2011
inFrame$monthCount <- 12 * yearCount + inFrame$mnth
inFrame
}
serList <- function(serlist){
messages <- c("O input nao eh uma lista ou tem comprimento maior que 0",
"Elementos nulos",
"A serializacao falhou")
if(!is.list(serlist) | is.null(serlist) |
length(serlist) < 1) {
warning(messages[2])
return(data.frame(as.integer(serialize(
list(numElements = 0, payload = NA),
connection = NULL))))}
nObj <- length(serlist)
tryCatch(outframe <- data.frame(payload = as.integer(
serialize(list(numElements = nObj,
payload = serlist),
connection=NULL))),
error = function(e){warning(messages[3])
outframe <- data.frame(
payload = as.integer(serialize(list(
numElements = 0, payload = NA),
connection=NULL)))}
)
outframe
}
unserList <- function(inlist){
messages <- c("A coluna payload esta missing ou com tipo incorreto de dado",
"Erro ao executar esta funcao",
"A funcao gerou uma lista vazia")
if(!is.integer(inlist$payload) | dim(inlist)[1] < 2 |
is.null(inlist$payload)){
warning(messages[1])
return(NA)
}
tryCatch(outList <- unserialize(as.raw(inlist$payload)),
error = function(e){warning(messages[2]); return(NA)})
if(outList$numElements < 1 ) {warning(messages[3]);
return(NA)}
outList$payload
}
|
48367a5aa20d7261d3d6b0b18ce97e7526a4b0d1
|
fe3ecb9b1ddd8de17b8cc93209134f86cd9c4a6f
|
/2_R/chap04_1_Function.R
|
40250d6c6b89b9fbc82ae35172f160ee22480e6f
|
[] |
no_license
|
nsh92/Bigdata-and-Machine-Learning-Education-at-ITWILL
|
d1a7292ee4865a3d0c664dd6ecf3afc0d6325847
|
3cb5661001597499178a2c85f4ccf70dcf0855d6
|
refs/heads/master
| 2022-11-21T23:10:51.421708
| 2020-07-23T12:49:11
| 2020-07-23T12:49:11
| 275,540,204
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,943
|
r
|
chap04_1_Function.R
|
#chap04_2_Function
# 1. 사용자 정의함수
# 형식)
# 함수명 <- function([인수]){
# 실행문
# 실행문
# [return 값]
# }
# 1) 매개변수없는 함수
f1 <- function(){
cat('f1 함수')
}
# 함수 정의만 했지 호출하지 않으면 쓰일 일이 없음
f1() # 이렇게 함수를 호출함
# 2) 매개변수 있는 함수
f2 <- function(x){
x2 <- x^2
cat('x2=', x2)
}
f2(10) # 실인수를 넣음
# 3) 리턴이 있는 함수
f3 <- function(x,y){
add <- x+y
return(add) #계산된 add를 호출하는 기능
}
f3(10,5)
#함수 호출 -> 반환값
add_re<-f3(10,5)
num<-1:10
tot_func <- function(x){
tot <- sum(x)
return(tot)
}
tot_re <- tot_func(num)
tot_re
avg <- tot_re/length(num)
avg
# 문) 사칙연산이 가능한 calc함수 정의하기
#100 + 20 = 120
#100 - 20 = 80
#100 * 20 = 2000
# 100 / 20 = 5
x<-100
y<-20
calc <- function(x,y){
add <- x+y
sub <- x-y
mul <- x*y
div <- x/y
cat(x, '+', y, '=', add, '\n')
cat(x, '-', y, '=', sub, '\n')
cat(x, '*', y, '=', mul, '\n')
cat(x, '/', y, '=', div, '\n')
calc_df <- data.frame(add,sub,mul,div)
#return(add,sub,mul,div) 이렇게 하면 오류 발생
return(calc_df)
}
calc(100,20)
df <- calc(100,20)
df
# 구구단의 단을 인수 받아서 구구단 출력하기
gugu <- function(dan){
cat('***',dan,'단 ***\n')
for(i in 1:9){
cat(dan, '*', i, '=', dan*i, '\n')
}
}
gugu(8)
gugu(2)
state <- function(fname, data){
switch(fname,
SUM = sum(data),
AVG = mean(data),
VAR = var(data),
SD = sd(data)
)
}
#니가 원하는 함수를 고르면 그거에 맞게 연산시켜주겠다는 함수
data <- 1:10
state("SUM", data)
state("AVG", data)
state("VAR", data)
state("SD", data)
# 결측치(NA) 처리 함수
na<- function(x){
#1. NA제거
x1 <- na.omit(x)
cat('x1 = ', x1, '\n')
cat('x1 = ', mean(x1), '\n')
#2. NA -> 평균 취급
x2 <- ifelse(is.na(x), mean(x, na.rm=T), x)
cat('x2 = ', x2, '\n')
cat('x2 = ', mean(x2),'\n')
#3. NA -> 0
x3 <- ifelse(is.na(x), 0, x)
cat('x3 = ', x3, '\n')
cat('x3 = ', mean(x3))
}
x <- c(10,5,4,NA,2,6,3,NA,7,5,8,10)
length(x)
mean(x, na.rm=T)
na(x)
###################################
### 몬테카를로 시뮬레이션
###################################
# 현실적으로 불가능한 문제의 해답을 얻기 위해서 난수의 확률분포를 이용하여
# 모의시험으로 근사적 해를 구하는 기법
# 동전 앞/뒤 난수 확률분포 함수
coin <- function(n){
r <- runif(n, min=0, max=1)
#print(r) # n번 시행
result <- numeric()
for (i in 1:n){
if (r[i] <= 0.5)
result[i] <- 0 # 앞면
else
result[i] <- 1 # 뒷면
}
return(result)
}
# 몬테카를로 시뮬레이션
montaCoin <- function(n){
cnt <- 0
for(i in 1:n){
cnt <- cnt + coin(1) # 동전 함수 호출
}
result <- cnt / n
return(result)
}
montaCoin(5)
montaCoin(1000)
montaCoin(10000)
# 중심극한정리 : 시행횟수가 많아질수록 근사해진다
# 2. R의 주요 내장 함수
# 2-1) 기술통계함수
vec <- 1:10
min(vec) # 최소값
max(vec) # 최대값
range(vec) # 범위
mean(vec) # 평균
median(vec) # 중위수
sum(vec) # 합계
prod(vec) # 데이터의 곱
#1*2*3*4*5*6*7*8*9*10
summary(vec) # 요약통계량
rnorm(10) # 평균은 0, 표준편차1에 수렴하도록 난수 생성
sd(rnorm(10)) # 표준편차 구하기
factorial(5) # 팩토리얼=120
sqrt(49) # 루트
install.packages('RSADBE')
library(RSADBE)
library(help='RSADBE')
data(Bug_Metrics_Software)
str(Bug_Metrics_Software)
# 자료구조 : num [1:5, 1:5, 1:2] 행 열 면 3차원
Bug_Metrics_Software[,,1] # 1면 열어라
Bug_Metrics_Software[,,2] # 2면 열어라
#이런 걸 가져다 행단위, 열단위 계산을 하고싶더라
rowSums(Bug_Metrics_Software[,,1]) #행단위 합계 : 소프트웨어별 버그수 합계
colSums(Bug_Metrics_Software[,,1]) #열단위 합계 : 버그별 소프트웨어 합계
rowMeans(Bug_Metrics_Software[,,1]) # 행 평균
colMeans(Bug_Metrics_Software[,,1]) # 열 평균
# 저 3차원 다루는 거에서 새로운 면을 만들어 넣고싶다
bug<-Bug_Metrics_Software
bug.new <- array(bug,dim=c(5,5,3)) # 기존의 구조와 다르지 않음
dim(bug.new)
bug.new[,,3] = bug[,,1]-bug[,,2]
bug.new
# 2-2) 반올림 관련 함수
x <- c(1.5, 2.5, -1.3, 2.5)
round(mean(x)) # 1.3 -> 1
ceiling(mean(x)) # x보다 큰 정수
floor(mean(x)) # 1보다 작은 정수
# 3. 난수 생성과 확률분포
# 3-1) 정규분포를 따르는 난수 - 연속확률분포(실수형)
# 형식) rnorm(n, mean=0, sd=1) 0과 1은 기본설정 바꾸려면 바꾸면댐
n<-1000
r<-rnorm(n, mean=0,sd=1)
mean(r)
sd(r)
hist(r) #대칭성
# 3-2) 균등분포를 따르는 난수 - 연속확률분포
# 형식) runif(n,min=,max=) 생략하면 0, 1
r2 <- runif(n, min = 0, max=1)
r2
hist(r2) # 비슷비슷하게끔 생성됨
# 3-3) 이항분포를 따르는 난수 - 이산확률분포 - 정수형데이터
set.seed(123)
n<-10
r3<-rbinom(n,size=1,0.5)
r3
#셋 시드를 미리 설정하면 같은 항상 결과가 나옴
r3<-rbinom(n,size=1,0.25)
r3
# 3-4) sample
sample(10:20,5)
sample(c(10:20, 50:100), 10)
# train(70%)/test(30%) 데이터셋
dim(iris)
idx<-sample(nrow(iris), nrow(iris)*0.7)
idx # 행번호를 추출한셈
range(idx)
length(idx)
train<-iris[idx,] #학습데이터
test<-iris[-idx,] #검증데이터
dim(train)
dim(test)
#이런 것을 홀드아웃 방식이라 함
# 4. 행렬연산 내장함수
x<-matrix(1:9, nrow=3, byrow=T)
x # 3행 3열
y<-matrix(1:3, nrow=3)
y # 3향 1열
# x %*% y : 행렬곱
x;y
z <- x %*% y
z
# 행렬곱 전제조건
# 1. x아 y가 모두 행렬이어야함
# 2. x(열)=y(행)이어야함 : 수일치
|
cbe9a9c330c04566a6b74ac0ef8f6eff8cdf850d
|
dc3d19a142a1be6b15977c73d3962dc26f871c9c
|
/plot3.R
|
7a2a2d568c47e84b3918b3c38d0a4ae7b6f3bc94
|
[] |
no_license
|
msinclairstevens/ExData_Plotting1
|
0bb069f11e583b9a6851f52f1e12b7c4f0bd059b
|
52fb5cc3cb8f7fbc2761227aabc0f61698b228f8
|
refs/heads/master
| 2021-01-21T08:33:26.148481
| 2015-07-08T21:48:07
| 2015-07-08T21:48:07
| 38,706,847
| 0
| 0
| null | 2015-07-07T18:28:28
| 2015-07-07T18:28:27
| null |
UTF-8
|
R
| false
| false
| 2,036
|
r
|
plot3.R
|
#The data file is large, so read only a subset into R.
data <- read.table("./data/household_power_consumption.txt",
header=FALSE,
sep=";",
na.strings="?",
nrows=10000,
skip=62000,
stringsAsFactors=FALSE,
)
#Clean: Add variable labels.
names(data) <- c("date", "time", "global_active_power", "global_reactive_power", "voltage", "global_intensity",
"sub_metering_1", "sub_metering_2", "sub_metering_3"
)
#Subset for observations on dates of interest.
library(dplyr)
feb01 <-filter(data, date=="1/2/2007")
feb02 <-filter(data, date=="2/2/2007")
#Combine dates of interest and create a data table.
library(data.table)
plotdata <- data.table(rbind(feb01, feb02))
#Clean: Transform dates.
library(lubridate)
x <- paste(plotdata$date, plotdata$time)
plotdata <- mutate(plotdata, new_datetime=dmy_hms(x))
# #Initiate plot. After testing. Copy and comment out.
library(graphics)
# with(plotdata, plot(new_datetime, sub_metering_1, type="l", col="black",
# xlab="", ylab="Energy sub metering"))
#
# #Annotate plot.
# ##Add more points.
# with(plotdata, points(new_datetime, sub_metering_2, type="l", col="red"))
# with(plotdata, points(new_datetime, sub_metering_3, type="l", col="blue"))
# legend("topright", lty=1, lwd=1, col = c("black", "red","blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
#
#Output plot to graphics device.
library(grDevices)
png(filename = "plot3.png", width=480, height=480, units="px")
with(plotdata, plot(new_datetime, sub_metering_1, type="l", col="black",
xlab="", ylab="Energy sub metering"))
with(plotdata, points(new_datetime, sub_metering_2, type="l", col="red"))
with(plotdata, points(new_datetime, sub_metering_3, type="l", col="blue"))
legend("topright", lty=1, lwd=1, col = c("black", "red","blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
|
356732b6f426f2391b813af07d7fed7a9cfa029a
|
ba1edf30bca6e023562e4aed21c0ca009d22f431
|
/db/man/append.covariate.Rd
|
f2511579bdd528e8a6689c9a43e8f1c8981f3a43
|
[
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
rgknox/pecan
|
79f080e77637dfb974ebb29313b5c63d9a53228e
|
5b608849dccb4f9c3a3fb8804e8f95d7bf1e4d4e
|
refs/heads/master
| 2020-12-27T20:38:35.429777
| 2014-05-06T13:42:52
| 2014-05-06T13:42:52
| 19,548,870
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,141
|
rd
|
append.covariate.Rd
|
\name{append.covariate}
\alias{append.covariate}
\title{Append covariate data as a column within a table
\code{append.covariate} appends one or more tables of covariate data
as a single column in a given table of trait data.
In the event a trait has several covariates across several given tables,
the first table given will take precedence}
\usage{
append.covariate(data, column.name, ...,
covariates.data = list(...))
}
\arguments{
\item{data}{trait dataframe that will be appended to.}
\item{column.name}{name of the covariate as it will
appear in the appended column}
\item{covariates.data}{one or more tables of covariate
data, ordered by the precedence they will assume in the
event a trait has covariates across multiple tables. All
tables must contain an 'id' and 'level' column, at
minimum.}
}
\description{
Append covariate data as a column within a table
\code{append.covariate} appends one or more tables of
covariate data as a single column in a given table of
trait data. In the event a trait has several covariates
across several given tables, the first table given will
take precedence
}
|
33fa19ea5ec4ca737d5c067650aea12967ae1b3d
|
9969b02c26fa5388ac971b8212c761c6abf98efb
|
/R/est_passage.r
|
02a0cdccc7d24faa892e207f13a5d32868ede800
|
[] |
no_license
|
tmcd82070/CAMP_RST
|
0cccd7d20c8c72d45fca31833c78cd2829afc169
|
eca3e894c19936edb26575aca125e795ab21d99f
|
refs/heads/master
| 2022-05-10T13:33:20.464702
| 2022-04-05T21:05:35
| 2022-04-05T21:05:35
| 10,950,738
| 0
| 0
| null | 2017-05-19T20:42:56
| 2013-06-25T21:24:52
|
R
|
UTF-8
|
R
| false
| false
| 31,471
|
r
|
est_passage.r
|
#' @export
#'
#' @title F.est.passage
#'
#' @description Compute passage estimates, given catch and efficiency trial
#' data.
#'
#' @param catch.df A data frame with one row per \code{trapvisitID} for a
#' particular \code{FinalRun} and \code{lifeStage}.
#'
#' @param release.df A data frame resulting from a call to function
#' \code{F.get.release.data}. Contains efficiency data.
#'
#' @param summarize.by A text string indicating the temporal unit over which
#' daily estimated catch is to be summarized. Can be one of \code{"day"},
#' \code{"week"}, \code{"month"}, or \code{"year"}.
#'
#' @param file.root A text string indicating a prefix to append to all output.
#'
#' @param ci A logical indicating if bootstrapped confidence intervals should be
#' estimated along with passage estimates. The default is 95\%, although
#' levels other than 95\% can be set in function \code{F.bootstrap.passage}.
#'
#' @return A data frame containing daily passage estimates, corrected for times
#' not fishing, along with associated standard errors.
#'
#' @details Two main steps comprise the estimation of passage. The first
#' fetches and formats all the necessary data. The second performs
#' statistical analysis on those processed data. Function
#' \code{F.est.passage} is the workhorse function for all statistical analysis
#' associated with the estimation of passage. As such, it calls functions
#' responsible for catch modeling (\code{F.est.catch}), efficiency modeling
#' (\code{F.est.efficiency}), and the bootstrapping of passage
#' (\code{F.bootstrap.passage}).
#'
#' Function \code{F.est.passage} brings together catch and efficiency data.
#' Called the "grand merge," resulting data frame \code{grand.df} forms the
#' basis of all passage estimation. Merging takes places on unique
#' combinations of \code{trapPositionID} and \code{batchDate}. Trap matches
#' respect decimal suffixes appended due to gaps in fishing. See the section
#' Fishing Gaps under the Structured Query Language (SQL) header in \code{F.sqlFile}.
#'
#' In processing prior to the creation of the \code{grand.df}, the dates
#' outside the first and last date of valid fishing are dropped from each
#' trap. In reality however, the season for each trap is identified as non-missing
#' catch. In other words, the grand merge inserts every date for all
#' traps because the underlying efficiency data frame has all dates. For those
#' dates for which a trap was not fishing, the resulting catch (and thus passage)
#' is essentially considered zero.
#'
#' Function \code{F.bootstrap.passage} summarizes the daily passage estimates
#' housed in \code{grand.df} to the temporal units specified via
#' \code{summarize.by}, and then compiles all statistics for eventual
#' reporting. Statistics include weighted mean forklength, standard deviation
#' of forklength, and fish counts \eqn{N}.
#'
#' Function calls resulting in non-zero catch, but zero efficiency, due to no
#' valid efficiency trials, result in warnings of zero efficiency. The
#' function will continue, but all passage estimates will be \code{NA}.
#'
#' @section Fish Accounting: Passage estimation results in the partitioning of
#' fish into different groups. For example, a fish could be assigned/not
#' assigned, measured/not measured, half-cone/full-cone, plus-count, imputed,
#' or inflated. Function \code{F.est.passage} organizes all of
#' these different types of fish following their initial partitioning in
#' function \code{F.get.catch.data}. Fish accounting on a daily basis ensures
#' that the counts of these different types of fish collapse back to their
#' original totals following analytic processing. Said another way, fish
#' accounting ensures that no fish are mysteriously gained or lost during the
#' passage estimation process.
#'
#' Three types of daily checks are performed for each individual trap, with the
#' function stopping in any case for which accounting fails.
#'
#' \enumerate{
#' \item{\eqn{totalCatch = assignedCatch + unassignedCatch +
#' imputedCatch}}
#' \item{\eqn{inflatedCatch = assignedCatch + unassignedCatch}}
#' \item{\eqn{totalCatch = inflatedCatch + imputedCatch}} }
#'
#' @seealso \code{F.get.release.data}, \code{F.bootstrap.passage},
#' \code{F.est.catch}, \code{F.est.efficiency}
#'
#' @author WEST Inc.
#'
#' @examples
#' \dontrun{
#' # ---- Estimate passage based on a given set of
#' # ---- catch and release dataframes, over weeks.
#' thePassage <- F.est.passage(catch.df, release.df, "week", "myFileRoot", ci=TRUE )
#' }
F.est.passage <- function( catch.df, release.df, summarize.by, file.root, ci ){
# catch.df <- catch.df.ls
# release.df <- release.df
# summarize.by <- by
# file.root <- out.fn.root
# ci <- ci
# ---- Data frame catch.df gets manipulated along the way. Preserve the min.date and
# ---- max.date entered as attribute variables.
min.date <- attr(catch.df,"min.date")
max.date <- attr(catch.df,"max.date")
enhmodel <- attr(catch.df,"enhmodel")
# ---- Maybe oldtrapPositionID doesn't exist is there are no gaps in fishing.
if("oldtrapPositionID" %in% colnames(catch.df)){
attr(catch.df,"catch.subsites") <- sort(unique(catch.df$oldtrapPositionID))
} else {
attr(catch.df,"catch.subsites") <- sort(unique(catch.df$trapPositionID))
}
# ---- Get global variable values.
max.ok.gap <- get("max.ok.gap",envir=.GlobalEnv)
passReport <- get("passReport",envir=.GlobalEnv)
R <- get("R",envir=.GlobalEnv)
# ---- Obtain text description of trap positions for use in output.
catch.df.sites <- unique(catch.df[,c('trapPositionID','TrapPosition')])
colnames(catch.df.sites) <- c('subSiteID','subSiteName')
# # ---- Obtain Julian weeks once and for all and place in Global environment for ease.
# if( summarize.by == "week" ){
# db <- get( "db.file", envir=.GlobalEnv )
# ch <- odbcConnectAccess(db)
# the.Jdates <<- sqlFetch( ch, "Dates" )
# close(ch)
# }
time.zone <- get("time.zone", envir=.GlobalEnv )
f.banner <- function( x ){
cat("\n")
cat(paste(rep("=",50), collapse=""));
cat(x);
cat(paste(rep("=",50), collapse=""));
cat("\n")
}
f.banner(" F.est.passage - START ")
# ---- Keep track of produced files.
out.fn.list <- NULL
# ---- Retrieve the progress bar.
usepb <- exists( "progbar", where=.GlobalEnv )
# ---- Need to collapse over lifeStage, in light of GitHub Issue #73. Ideally, this would occur in the very
# ---- beginning, but Unassigned counts are subsumed in run + lifeStage combinations in summarize_fish_visit.
# ---- So collapse over lifeStage here. Need to do this before we merge immediately below; otherwise, they break.
# ---- Helper function to collapse fish counts.
collapseEm <- function(var){
# var <- 'halfConeUnassignedCatch'
temp <- data.frame(temp=tapply(catch.df[!is.na(catch.df[,var]),][,var],list(catch.df[!is.na(catch.df[,var]),]$trapVisitID),FUN=sum))
colnames(temp)[colnames(temp) == 'temp'] <- var
temp
}
# ---- If we're running over runs, collapse counts over lifeStage.
passReport <- get("passReport",envir=.GlobalEnv)
if(passReport == 'ALLRuns'){
v1 <- collapseEm('n.tot')
v2 <- collapseEm('n.Orig')
v3 <- collapseEm('n.Unassd')
v4 <- collapseEm('halfConeAssignedCatch')
v5 <- collapseEm('halfConeUnassignedCatch')
v6 <- collapseEm('assignedCatch')
v7 <- collapseEm('unassignedCatch')
v8 <- collapseEm('modAssignedCatch')
v9 <- collapseEm('modUnassignedCatch')
vv <- cbind(v1,v2,v3,v4,v5,v6,v7,v8,v9)
df3b <- vv
df3b$trapVisitID <- rownames(df3b)
df3c <- unique(catch.df[,c("trapVisitID","trapPositionID","EndTime","ProjID","batchDate","StartTime","SampleMinutes","TrapStatus","siteID","siteName","oldtrapPositionID","TrapPosition","sampleGearID","sampleGear","halfConeID","HalfCone","includeCatchID","FinalRun","lifeStage")])
df3d <- merge(df3c,df3b,by=c('trapVisitID'),all.x=TRUE)
df3d <- df3d[order(df3d$EndTime),]
# ---- Keep the original, because it has mean and sd info over all the fish.
# ---- We're not amending those data to fix the collapsing over lifestage issue here.
catch.df.old <- catch.df
catch.df <- df3d
}
# ---- Data frame catch.df has the raw unmarked counts of catch. Note that rows with missing data for
# ---- certain days, i.e., for which imputation occurs, also appear as line items here. So, to get catch,
# ---- for different trapPositionIDs/subSiteIDs, summarise and add togeter (because some days have more
# ---- than one record). This brings back more dates than ultimately wanted; let merge below (after
# ---- grand.df) take care of which to keep.
# ---- In the case a trap runs more than 24 hours, followed by a short "Not fishing" period of duration
# ---- less than 2 hours (but greater than 30 mintues), that short duration is subsumed into the preivious
# ---- fishing period. In this case, the endDate and batchDate are overwritten with the "Not fishing"
# ---- period values. This leads to a problem in fish accounting, which records any fish tied to this
# ---- overwriting process with the original day. So, in creating jason.catch4.df, redefine the batchDate
# ---- to be the latest day. All of this takes place in the creation of data frame jason.catch1.df,
# ---- and the two commented lines that feed into data frame jason.catch2.df.
jason.catch1.df <- catch.df
jason.catch1.df$R_ID <- seq(1,nrow(jason.catch1.df),1)
jason.catch1.df <- jason.catch1.df[order(jason.catch1.df$EndTime),]
jason.catch1.df$mday_lead <- as.POSIXlt(dplyr::lead(jason.catch1.df$EndTime,1))$mday
jason.catch1.df$mday <- as.POSIXlt(jason.catch1.df$EndTime)$mday
jason.catch1.df$candidate <- ifelse( (dplyr::lead(jason.catch1.df$TrapStatus,1) == "Not fishing") & (dplyr::lead(jason.catch1.df$SampleMinutes,1) <= max.ok.gap*60) & (jason.catch1.df$mday != jason.catch1.df$mday_lead),1,0 )
jason.catch1.df$batchDate <- as.POSIXct(ifelse(jason.catch1.df$candidate == 1 & !is.na(jason.catch1.df$candidate),dplyr::lead(jason.catch1.df$batchDate,1),jason.catch1.df$batchDate),origin="1970-01-01 00:00.00 UTC",format="%Y-%m-%d",tz=time.zone)
jason.catch1.df <- jason.catch1.df[order(jason.catch1.df$R_ID),]
jason.catch2.df <- jason.catch1.df[,c('trapVisitID','batchDate','trapPositionID','n.Orig')]
#jason.catch2.df <- catch.df[,c('trapVisitID','batchDate','trapPositionID','n.Orig')]
jason.catch3.df <- data.frame(with(jason.catch2.df,tapply(n.Orig, list(batchDate,trapPositionID), sum, na.rm=T )))
jason.catch4.df <- na.omit(reshape(jason.catch3.df,idvar='batchDate',ids=row.names(jason.catch3.df),times=names(jason.catch3.df),timevar='trapPositionID',varying=list(names(jason.catch3.df)),direction='long'))
colnames(jason.catch4.df)[2] <- 'rawCatch'
jason.catch4.df$trapPositionID <- as.character(substr(jason.catch4.df$trapPositionID,2,nchar(jason.catch4.df$trapPositionID)))
jason.catch4.df$batchDate <- as.POSIXct(jason.catch4.df$batchDate,time.zone)
# ---- Do the same as above, but with n.tot. Sloppy to do this twice like this, but it works.
jason.totCatch2.df <- jason.catch1.df[,c('trapVisitID','batchDate','trapPositionID','n.tot')]
#jason.totCatch2.df <- catch.df[,c('trapVisitID','batchDate','trapPositionID','n.tot')]
jason.totCatch3.df <- data.frame(with(jason.totCatch2.df,tapply(n.tot, list(batchDate,trapPositionID), sum, na.rm=T )))
jason.totCatch4.df <- na.omit(reshape(jason.totCatch3.df,idvar='batchDate',ids=row.names(jason.totCatch3.df),times=names(jason.totCatch3.df),timevar='trapPositionID',varying=list(names(jason.totCatch3.df)),direction='long'))
colnames(jason.totCatch4.df)[2] <- 'n.tot'
jason.totCatch4.df$trapPositionID <- as.character(substr(jason.totCatch4.df$trapPositionID,2,nchar(jason.totCatch4.df$trapPositionID)))
jason.totCatch4.df$batchDate <- as.POSIXct(jason.totCatch4.df$batchDate,time.zone)
# ---- Estimate capture for every day of season. Return value is
# ---- a data frame with columns $batchDate and $catch.
# ---- By default, this produces a catch graph in a png. Turn
# ---- this off with plot=FALSE in call. Resulting list
# ---- catch.and.fits has components $catch, $fits, $X.miss, $gaps,
# ---- $bDates.miss, $trapsOperating, true.imp, and allDates.
catch.and.fits <- F.est.catch( catch.df, plot=TRUE, plot.file=file.root )
if(usepb){
progbar <- get( "progbar", pos=.GlobalEnv )
tmp <- getWinProgressBar(progbar)
setWinProgressBar(progbar, (2*tmp + 1)/3 )
}
# ---- Note this doesn't have imputedCatch.
catch <- catch.and.fits$catch
# ---- The catch data frame in this list has imputed values
# ---- already overwriting the original numbers. This happens
# ---- in F.catch.model. This step incorporates the imputed
# ---- values into the analysis for fish accounting and
# ---- eventual accounting.
jason.catch.and.fits2.df <- catch.and.fits$true.imp
jason.catch.and.fits3.df <- data.frame(with(jason.catch.and.fits2.df,tapply(n.tot, list(batchDate,trapPositionID), sum, na.rm=T )))
jason.catch.and.fits4.df <- na.omit(reshape(jason.catch.and.fits3.df,idvar='batchDate',ids=row.names(jason.catch.and.fits3.df),times=names(jason.catch.and.fits3.df),timevar='trapPositionID',varying=list(names(jason.catch.and.fits3.df)),direction='long'))
colnames(jason.catch.and.fits4.df)[2] <- 'imputedCatch'
jason.catch.and.fits4.df$trapPositionID <- as.character(substr(jason.catch.and.fits4.df$trapPositionID,2,nchar(jason.catch.and.fits4.df$trapPositionID)))
jason.catch.and.fits4.df$batchDate <- as.POSIXct(jason.catch.and.fits4.df$batchDate,time.zone)
out.fn.list <- c(out.fn.list, attr(catch.and.fits, "out.fn.list"))
# ---- Data frame release.df has info on adjusted beginning
# ---- and end fishing days, for each trap. Note that
# ---- release.df doesn't have decimal expansion, due to
# ---- incorporation of gap in fishing. Note that this for each
# ---- gap-in-fishing trap, which is not what we want here.
allDates <- catch.and.fits$allDates
allDates$trap <- round(allDates$trap,0)
# ---- Gaps in fishing lead to traps with the same 5-digit prefix potentially having different
# ---- beg.date and end.date. This leads to a sloppy join, where the data expand unnecessarily.
# ---- For each unique 5-digit trap, summarize the beg.date and end.date over its duration.
# ---- We want to collapse efficiency to the 5-digit trap, and not keep the decimal suffix.
# ---- A helper function.
fix.dates <- function(var){
ans <- aggregate(allDates[,c(var)],by=list(trapPositionID=allDates$trap),function(x) max(strftime(x,format="%Y-%m-%d")))
ans$x <- as.POSIXlt(ans$x,format="%Y-%m-%d",tz=time.zone)
ans <- list(data.frame(trapPositionID=ans[,1]),data.frame(ans[,2]))
names(ans[[2]])[names(ans[[2]]) == "ans...2."] <- var
return(ans)
}
# ---- Get the value for each variable, over all decimal traps, and assemble.
a <- fix.dates("beg.date")
b <- fix.dates("end.date")
c <- fix.dates("origBeg.date")
d <- fix.dates("origEnd.date")
newAllDates <- cbind(a[[1]],a[[2]],b[[2]],c[[2]],d[[2]])
# ---- Replace allDates with the data that actually matter here.
allDates <- newAllDates
# ---- Now, bring in the min and max dates to release.df for use in constructing bd.
release.df <- merge(release.df,allDates,by=c('trapPositionID'),all.x=TRUE)
f.banner("Efficiency estimation ")
# ---- Get all the batchDates for use in efficiency.
bd <- strptime(sort( seq(as.Date(min(na.omit(release.df$ReleaseDate),na.omit(release.df$origBeg.date),unique(catch$batchDate))),as.Date(max(na.omit(release.df$ReleaseDate),na.omit(release.df$origEnd.date),unique(catch$batchDate))),"days")),format="%F",tz=time.zone)
# ---- Estimate capture for every day of season. Add in min.date and max.date for enh eff.
attr(release.df,"min.date") <- min.date
attr(release.df,"max.date") <- max.date
attr(release.df,"enhmodel") <- enhmodel
# ---- Maybe oldtrapPositionID doesn't exist if there are no gaps in fishing.
if("oldtrapPositionID" %in% colnames(catch.df)){
attr(release.df,"catch.subsites") <- sort(unique(catch.df$oldtrapPositionID))
} else {
attr(release.df,"catch.subsites") <- sort(unique(catch.df$trapPositionID))
}
eff.and.fits <- F.est.efficiency( release.df, bd, df.spline=4, plot=TRUE, plot.file=file.root )
if(usepb){
tmp <- getWinProgressBar(progbar)
setWinProgressBar(progbar, (2*tmp + 1)/3 )
}
efficiency <- eff.and.fits$eff
out.fn.list <- c(out.fn.list, attr(eff.and.fits, "out.fn.list"))
# ---- Something is wrong with efficiency data. Make an empty efficiency data frame
if( all(is.na(efficiency[1,])) ){
efficiency <- data.frame( trapPositionID=catch$trapPositionID, batchDate=catch$batchDate, efficiency=rep(NA, nrow(catch)))
warning("Zero efficiency")
}
# ---- Could do...
# n <- data.base( catch, efficiency=efficiency$efficiency, gam.estimated.eff=efficiency$gam.estimated )
# ---- ...to produce a data frame of values that go into estimator, one line per batchDate.
# ---- Now, estimate passage. It shouldn't happen that efficiency <= 0,
# ---- but just in case. This also gives us a way to exclude days --
# ---- just set efficiency <= 0.
if( any(ind <- !is.na(efficiency$efficiency) & (efficiency$efficiency <= 0)) ){
efficiency$efficiency[ind] <- NA
}
# ---- First merge catch and efficiency data frames
catch$batchDay <- format(catch$batchDate, "%Y-%m-%d")
catch$trapPositionID <- as.character(catch$trapPositionID)
efficiency$batchDay <- format(efficiency$batchDate, "%Y-%m-%d")
efficiency$trapPositionID <- as.character(efficiency$trapPositionID)
# ---- Drop POSIX date from efficiency.
efficiency <- efficiency[,names(efficiency) != "batchDate"]
cat("First 20 rows of CATCH...\n")
print(catch[1:20,])
cat("First 20 rows of EFFICIENCY...\n")
print(efficiency[1:20,])
# ---- To ensure that trapPositionIDs with decimals find their efficiency trial trap match,
# ---- ensure we have the old IDs -- otherwise, these won't ever be found.
catch$oldTrapPositionID <- as.character(round(as.numeric(catch$trapPositionID),0))
names(efficiency)[names(efficiency) == 'trapPositionID'] <- 'oldTrapPositionID'
# ---- The Grand Merge. Merge catch info with efficiency info.
grand.df <- merge( catch, efficiency, by=c("oldTrapPositionID", "batchDay"), all=T)
# ---- Get rid of helper variable oldTrapPositionID; it has served its purpose.
grand.df$oldTrapPositionID <- NULL
# ---- For each trap, drop the dates that are outside its min. and max.date.
# ---- The season for each trap is identified as non missing catch. I.e., the
# ---- grand merge puts in every date because efficiency data frame has all dates.
grand.df <- grand.df[!is.na(grand.df$catch), ]
# ---- Bring in raw catch (measured).
grand.df.rawCatch <- merge(grand.df,jason.catch4.df,by=c('trapPositionID','batchDate'),all.x=TRUE)
# ---- Bring in inflated catch (measured + plus counts).
grand.df.rawCatch.Inflated <- merge(grand.df.rawCatch,jason.totCatch4.df,by=c('trapPositionID','batchDate'),all.x=TRUE)
# ---- Bring in imputed catch.
grand.df.rawCatch.Imputed <- merge(grand.df.rawCatch.Inflated ,jason.catch.and.fits4.df,by=c('trapPositionID','batchDate'),all.x=TRUE)
grand.df <- grand.df.rawCatch.Imputed
# ---- Somewhere, there are comments that state that catches of NA mean zero. So,
# ---- replace NA in each of rawCatch and ImputedCatch with zero.
grand.df$imputedCatch <- ifelse(is.na(grand.df$imputedCatch), 0, round(grand.df$imputedCatch,1))
grand.df$rawCatch <- ifelse(is.na(grand.df$rawCatch), 0, grand.df$rawCatch)
grand.df$n.tot <- ifelse(is.na(grand.df$n.tot), 0, grand.df$n.tot) # the preTotalCatch
grand.df$totalEstimatedCatch <- round(grand.df$n.tot + grand.df$imputedCatch,1)
grand.df$rawCatch <- grand.df$catch <- NULL
# ---- Fish accounting.
# ---- Check and make sure that assignedCatch + unassignedCatch + imputedCatch = totalCatch.
# ---- Check and make sure that assignedCatch + unassignedCatch = inflatedCatch.
# ---- Check and make sure that inflatedCatch + imputedCatch = totalCatch.
# ---- Note the rounding. In a rare instance on the Feather, 2012-01-25, trapPositionID 5002, what are displayed
# ---- as 3 modUnassignedCatch leaves to a non-zero number when modUnassigned - 3 is calculated. This causes
# ---- fish accounting to fail. Round all these values (seen to play a role) to the nearest tenth, which should
# ---- take care of this mysterious issue.
grand.df$sum1 <- round(grand.df$modAssignedCatch + grand.df$modUnassignedCatch + grand.df$imputedCatch,1)
grand.df$sum2 <- round(grand.df$modAssignedCatch + grand.df$modUnassignedCatch,1)
grand.df$sum3 <- round(grand.df$halfConeAssignedCatch + grand.df$halfConeUnassignedCatch + grand.df$assignedCatch + grand.df$unassignedCatch + grand.df$imputedCatch,1)
grand.df$check1 <- ifelse(grand.df$sum1 == grand.df$totalEstimatedCatch,TRUE,FALSE)
grand.df$check2 <- ifelse(grand.df$sum2 == round(grand.df$n.tot,1),TRUE,FALSE)
grand.df$check3 <- ifelse(grand.df$sum3 == grand.df$totalEstimatedCatch,TRUE,FALSE)
if(sum(grand.df$check1 + grand.df$check2 + grand.df$check3) != nrow(grand.df)*3){
stop('Issue with summation of assignedCatch, unassignedCatch, inflatedCatch, imputedCatch, and/or totalCatch. Investigate est_passage.R, around line 406.')
} else {
cat('No issue with summation of halfConeAssignedCatch, halfConeUnassignedCatch, assignedCatch, unassignedCatch, modAssignedCatch, modUnassignedCatch, imputedCatch, and/or totalEstimatedCatch. Continuing...\n')
}
# ---- The passage estimator.
grand.df$passage <- rep(NA, nrow(grand.df))
grand.df$passage <- grand.df$totalEstimatedCatch / grand.df$efficiency #ifelse(!is.na(grand.df$efficiency),grand.df$totalEstimatedCatch / grand.df$efficiency,0)
# ---- Need this information to construct week-based confidence intervals.
attr(grand.df,"min.date") <- min.date
attr(grand.df,"max.date") <- max.date
if( !is.na(file.root) ){
# ---- Do this so can change names (headers) in csv file; i.e., drop 2 columns.
tmp.df <- grand.df[, !(names(grand.df) %in% c("nReleased", "nCaught", "batchDay")) ]
names(tmp.df)[ names(tmp.df) == "imputed.catch" ] <- "propImputedCatch"
names(tmp.df)[ names(tmp.df) == "imputed.eff" ] <- "propImputedEff"
# ---- Convert to numbers, 0 or 1.
tmp.df$propImputedEff <- as.numeric(tmp.df$propImputedEff)
tmp.df$passage <- round(tmp.df$passage)
tmp.df$totalCatch <- round(tmp.df$totalEstimatedCatch,1)
tmp.df$efficiency <- round(tmp.df$efficiency, 4)
tmp.df$halfConeAdj <- tmp.df$halfConeAssignedCatch + tmp.df$halfConeUnassignedCatch
# ---- Merge in subsiteNames.
ssiteNames <- catch.df.sites
tmp.df <- merge( ssiteNames, tmp.df, by.x="subSiteID", by.y="trapPositionID", all.y=T )
out.fn <- paste(file.root, "_baseTable.csv", sep="")
tmp.df$TrapPosition <- tmp.df$TrapPositionID <- NULL
# ---- Rearrange columns.
tmp.df <- tmp.df[c('subSiteID','subSiteName','batchDate','assignedCatch','unassignedCatch','halfConeAdj','imputedCatch','totalEstimatedCatch','propImputedCatch','efficiency','propImputedEff','passage')]
tmp.df <- tmp.df[order(tmp.df$subSiteID,tmp.df$batchDate),]
# ---- Possibly obsolete. See below.
# if(enhmodel == TRUE){
# tmp.df$effModel <- "Enhanced"
# } else {
# tmp.df$effModel <- "Regular"
# }
# ---- Update vector of Enhanced status based on trap. Need to worry about Connie's
# ---- decimal trap names here, and the fact they are numeric.
tmpVec <- eff.and.fits$doOldEff[match(do.call("c",lapply(strsplit(as.character(tmp.df$subSiteID),".",fixed=TRUE),function(x) x[1])),names(eff.and.fits$doOldEff))]
tmp.df$effModel <- ifelse(tmpVec == TRUE,"Mark-Recapture","Enhanced")
rm(tmpVec)
write.table( tmp.df, file=out.fn, sep=",", row.names=FALSE, col.names=TRUE)
out.fn.list <- c(out.fn.list, out.fn)
}
# ====== Passage estimates are done by day. Compute variance and summarize ====================================================================================================
f.banner(paste(" Bootstrapping, if called for, and summarizing by", summarize.by))
# ---- Summarization (to weeks, years, etc.) needs to happen in the bootstrapping routine.
# ---- Even if bootstraps are not called for, F.bootstrap averages over traps (if multiple
# ---- present) and summarizes by 'summarize.by'. R set by GlobalVars.
n <- F.bootstrap.passage( grand.df, catch.and.fits$fits, catch.and.fits$X.miss, catch.and.fits$gaps,
catch.and.fits$bDates.miss, eff.and.fits$fits, eff.and.fits$X, eff.and.fits$ind.inside,
eff.and.fits$X.dates, eff.and.fits$obs.data, eff.and.fits$eff.type, summarize.by, R, ci )
n
if(usepb){
tmp <- getWinProgressBar(progbar)
setWinProgressBar(progbar, tmp + (1-tmp)*.9 )
}
# ---- Grab the correct catch.df for use in summarizing.
if(passReport == 'ALLRuns'){
# ---- Obtain Julian dates so days can be mapped to specialized Julian weeks.
db <- get( "db.file", envir=.GlobalEnv )
ch <- odbcConnectAccess(db)
JDates <- sqlFetch( ch, "Dates" )
close(ch)
attr(catch.df.old$batchDate,"JDates") <- JDates
index.aux <- F.summarize.index( catch.df.old$batchDate, summarize.by )
} else { # by lifeStage
# ---- Obtain Julian dates so days can be mapped to specialized Julian weeks.
db <- get( "db.file", envir=.GlobalEnv )
ch <- odbcConnectAccess(db)
JDates <- sqlFetch( ch, "Dates" )
close(ch)
attr(catch.df$batchDate,"JDates") <- JDates
index.aux <- F.summarize.index( catch.df$batchDate, summarize.by )
}
# ---- Force summarize.index and bootstrap passage to have the same year.
if(summarize.by == 'year'){
n[1,1] <- index.aux[[1]][1]
}
# ---- Grab the correct catch.df for use in summarizing.
if(passReport == 'ALLRuns'){
# ---- Calculate numerator (weighted) mean forklength.
num <- catch.df.old$mean.fl.Orig * catch.df.old$n.Orig
num <- tapply( num, index.aux, sum, na.rm=T )
# ---- Calcualte numerator standard deviation of forklength.
# ---- This is sum of squares without the summing just yet.
num.sd <- (catch.df.old$sd.fl.Orig * catch.df.old$sd.fl.Orig) * (catch.df.old$n.Orig - 1)
num.sd <- tapply( num.sd, index.aux, sum, na.rm=T )
# ---- Calculate n.
den <- tapply( catch.df.old$n.Orig, index.aux, sum, na.rm=T)
# ---- Estimate by lifeStage.
} else {
# ---- Calculate numerator (weighted) mean forklength.
num <- catch.df$mean.fl.Orig * catch.df$n.Orig
num <- tapply( num, index.aux, sum, na.rm=T )
# ---- Calculate numerator standard deviation of forklength.
# ---- This is sum of squares without the summing just yet.
num.sd <- (catch.df$sd.fl.Orig * catch.df$sd.fl.Orig) * (catch.df$n.Orig - 1)
num.sd <- tapply( num.sd, index.aux, sum, na.rm=T )
# ---- Calculate n.
den <- tapply( catch.df$n.Orig, index.aux, sum, na.rm=T)
}
# ---- Mean and standard deviation computations.
aux.fl <- ifelse( den > 0, num / den, NA )
aux.sd <- ifelse( den > 1, sqrt(num.sd / (den-1)), NA )
# ---- Grab the correct catch.df for use in summarizing.
if(passReport == 'ALLRuns'){
# ---- Reduce data frame to select first of each and change to batchdate.
catch.df.reduced <- aggregate(catch.df.old,by=list(ID=catch.df.old$batchDate),head,1)
# ---- By lifeStage.
} else {
# ---- Reduce data frame. Possibly due to multiple records over lifestage in run estimates.
catch.df.reduced <- aggregate(catch.df,by=list(ID=catch.df$batchDate),head,1)
}
catch.df.Fishing <- catch.df
catch.df.Fishing$SampleMinutes <- ifelse(catch.df.Fishing$TrapStatus == 'Not fishing',0,catch.df.Fishing$SampleMinutes)
catch.df.Fishing <- unique(catch.df.Fishing[,c('SampleMinutes','batchDate','trapPositionID')])
num <- aggregate(catch.df.Fishing$SampleMinutes,by=list(ID=catch.df.Fishing$batchDate),sum)[,2]
# ---- Variable batchDate defaults to Mountain Time. Fix that.
tzn <- get("time.zone", .GlobalEnv )
catch.df.reduced$batchDate <- as.POSIXct( strptime( format(catch.df.reduced$batchDate, "%Y-%m-%d"), "%Y-%m-%d", tz=tzn),tz=tzn)
# ---- Index in reduced data frame.
attr(catch.df.reduced$batchDate,"JDates") <- JDates
index.aux <- F.summarize.index(catch.df.reduced$batchDate,summarize.by)
# ---- Hours actually sampled during the 'index' period.
aux.hrs <- tapply( num, index.aux, sum, na.rm=T )/60
# ---- Make big data frame of statistics.
aux<-data.frame( s.by=dimnames(aux.fl)[[1]],
nForkLenMM=c(den),
meanForkLenMM=c(aux.fl),
sdForkLenMM=c(aux.sd),
sampleLengthHrs=c(aux.hrs),
stringsAsFactors=F, row.names=NULL )
# ---- Merge 'n' and 'aux' information together.
n <- merge(n,aux, by="s.by", all.x=T)
n$sampleLengthDays <- n$sampleLengthHrs / 24
# ---- For catch periods in which no fish were caught, the underlying boring intercept-
# ---- only model has a relatively large negative beta. During bootstrapping, this beta
# ---- is randomly sampled; apparently, this can lead to small decimal confidence
# ---- intervals that are non-zero. For zero-passage periods, force the confidence
# ---- intervals to also be zero.
#n$lower.95 <- ifelse(n$passage == 0,0,n$lower.95)
#n$upper.95 <- ifelse(n$passage == 0,0,n$upper.95)
# ---- Possibly only works west of GMT (North America). East of GMT,
# ---- it may be 12 hours off. Untested east of GMT.
tz.offset <- as.numeric(as.POSIXct(0, origin="1970-01-01", tz=time.zone))
n$date <- as.POSIXct( n$date-tz.offset, origin="1970-01-01", tz=time.zone )
# ---- Put the final data frame together.
names(n)[names(n) == "s.by"] <- summarize.by
attr(n, "taxonID" ) <- attr(catch.df,"taxonID")
attr(n, "species.name") <- attr(catch.df, "species.name")
attr(n, "siteID" ) <- attr(catch.df,"siteID")
attr(n, "site.name") <- attr(catch.df, "site.name")
attr(n, "site.abbr") <- attr(catch.df, "site.abbr")
attr(n, "runID") <- attr(catch.df, "runID")
attr(n, "run.name") <- attr(catch.df, "run.name")
attr(n, "year") <- attr(catch.df, "year")
attr(n, "run.season") <- attr(catch.df, "run.season")
attr(n, "summarized.by") <- summarize.by
attr(n, "out.fn.list") <- out.fn.list
attr(n, "trapsOperating") <- catch.and.fits$trapsOperating
f.banner(" F.est.passage - COMPLETE ")
n
}
|
518f6d309c1d84ab9a492d676513df20f27e0e33
|
061994e8bc10cee792da0792cc0ed81809323097
|
/R/listChildSites.R
|
df7f126e1a62465be62ac5df76f7cc1a93ac7821
|
[] |
no_license
|
KevinSee/PITcleanr_old
|
69c1ca77078b79fa35198e805753dc417f066905
|
3da9b0c370720b87ec981ed56200bfccce43d126
|
refs/heads/master
| 2023-04-06T11:19:39.967020
| 2020-06-17T21:00:17
| 2020-06-17T21:00:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 901
|
r
|
listChildSites.R
|
#' @title Child Sites
#'
#' @description Recursive function for finding all child sites of parent site
#'
#' @author Kevin See
#'
#' @param parent_site Site code from parent
#'
#'
#' @import dplyr purrr
#' @export
#' @return NULL
#' @examples listChildSites()
listChildSites = function(parent_site, parent_child_df) {
child_sites = parent_child_df %>%
filter(ParentSite %in% parent_site,
!ChildSite %in% parent_site) %>%
select(ChildSite) %>%
distinct() %>%
as.matrix() %>%
as.character()
test = as.list(child_sites) %>%
purrr::map_int(.f = function(x) {
parent_child_df %>%
filter(ParentSite == x,
ChildSite != x) %>%
select(ChildSite) %>%
distinct() %>%
nrow()
})
if(sum(test) == 0 ) return(child_sites)
if(sum(test) > 0 ) return(c(child_sites, listChildSites(child_sites, parent_child_df)))
}
|
0738331475fd8d6cdb8232c86be16ba731c35332
|
b27d0a4f5dc800f928e5ef31210879899955becd
|
/model for mapping/temp.R
|
b8125a4af99589b0a2a4e31ac8cd4fd1ca91ad2a
|
[] |
no_license
|
gaohong5752/Geoderma_paper_code
|
a8fa8a76891d34428bad83d80cc7aa29ac2eea32
|
19c22eb605b533fa6dd0932bba01d6419c270c7d
|
refs/heads/main
| 2023-06-27T11:57:05.560818
| 2021-07-22T09:31:44
| 2021-07-22T09:31:44
| 388,402,440
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,715
|
r
|
temp.R
|
#RSP -ok
##ns3
SNA RSP ns3 15
```{r}
mn="All_pSNA_RSP_NS3_pen15"
shpPoint<-readOGR(paste(point_shp_folder,mn,".shp",sep = "")) ###
sampData <- extract(covStack, shpPoint, sp = TRUE,method = "simple")
sampData<-sampData@data
sampData<-sampData[sampData$soilID %in% soil_type,]
sampData<-na.omit(sampData)
print(dim(sampData))
sampleNum_dict[mn]<-nrow(sampData)
sampData$Bedrock<- droplevels(as.factor(sampData$Bedrock))
sampData$soilID<- droplevels(as.factor(sampData$soilID))
RF_model<-randomForest(soilID ~.,data = sampData,
importance = TRUE, proximity = FALSE,
ntree=1000,type="classification")
varImpPlot(RF_model)
print(RF_model)
map_RF <- predict(covStack, RF_model, paste(result_tif_folder,mn,".tif",sep=""),
format = "GTiff", datatype = "FLT4S", overwrite = TRUE)
rm("mn","shpPoint","sampData","RF_model","map_RF")
```
SNA RSP ns3 30
```{r}
mn="All_pSNA_RSP_NS3_pen30"
shpPoint<-readOGR(paste(point_shp_folder,mn,".shp",sep = "")) ###
sampData <- extract(covStack, shpPoint, sp = TRUE,method = "simple")
sampData<-sampData@data
sampData<-sampData[sampData$soilID %in% soil_type,]
sampData<-na.omit(sampData)
print(dim(sampData))
sampleNum_dict[mn]<-nrow(sampData)
sampData$Bedrock<- droplevels(as.factor(sampData$Bedrock))
sampData$soilID<- droplevels(as.factor(sampData$soilID))
RF_model<-randomForest(soilID ~.,data = sampData,
importance = TRUE, proximity = FALSE,
ntree=1000,type="classification")
varImpPlot(RF_model)
print(RF_model)
map_RF <- predict(covStack, RF_model, paste(result_tif_folder,mn,".tif",sep=""),
format = "GTiff", datatype = "FLT4S", overwrite = TRUE)
rm("mn","shpPoint","sampData","RF_model","map_RF")
```
SNA RSP ns3 45
```{r}
mn="All_pSNA_RSP_NS3_pen45"
shpPoint<-readOGR(paste(point_shp_folder,mn,".shp",sep = "")) ###
sampData <- extract(covStack, shpPoint, sp = TRUE,method = "simple")
sampData<-sampData@data
sampData<-sampData[sampData$soilID %in% soil_type,]
sampData<-na.omit(sampData)
print(dim(sampData))
sampleNum_dict[mn]<-nrow(sampData)
sampData$Bedrock<- droplevels(as.factor(sampData$Bedrock))
sampData$soilID<- droplevels(as.factor(sampData$soilID))
RF_model<-randomForest(soilID ~.,data = sampData,
importance = TRUE, proximity = FALSE,
ntree=1000,type="classification")
varImpPlot(RF_model)
print(RF_model)
map_RF <- predict(covStack, RF_model, paste(result_tif_folder,mn,".tif",sep=""),
format = "GTiff", datatype = "FLT4S", overwrite = TRUE)
rm("mn","shpPoint","sampData","RF_model","map_RF")
```
RAN RSP ns3 15
```{r}
mn="All_pRAN_RSP_NS3_pen15"
shpPoint<-readOGR(paste(point_shp_folder,mn,".shp",sep = "")) ###
sampData <- extract(covStack, shpPoint, sp = TRUE,method = "simple")
sampData<-sampData@data
sampData<-sampData[sampData$soilID %in% soil_type,]
sampData<-na.omit(sampData)
print(dim(sampData))
sampleNum_dict[mn]<-nrow(sampData)
sampData$Bedrock<- droplevels(as.factor(sampData$Bedrock))
sampData$soilID<- droplevels(as.factor(sampData$soilID))
RF_model<-randomForest(soilID ~.,data = sampData,
importance = TRUE, proximity = FALSE,
ntree=1000,type="classification")
varImpPlot(RF_model)
print(RF_model)
map_RF <- predict(covStack, RF_model, paste(result_tif_folder,mn,".tif",sep=""),
format = "GTiff", datatype = "FLT4S", overwrite = TRUE)
rm("mn","shpPoint","sampData","RF_model","map_RF")
```
RAN RSP ns3 30
```{r}
mn="All_pRAN_RSP_NS3_pen30"
shpPoint<-readOGR(paste(point_shp_folder,mn,".shp",sep = "")) ###
sampData <- extract(covStack, shpPoint, sp = TRUE,method = "simple")
sampData<-sampData@data
sampData<-sampData[sampData$soilID %in% soil_type,]
sampData<-na.omit(sampData)
print(dim(sampData))
sampleNum_dict[mn]<-nrow(sampData)
sampData$Bedrock<- droplevels(as.factor(sampData$Bedrock))
sampData$soilID<- droplevels(as.factor(sampData$soilID))
RF_model<-randomForest(soilID ~.,data = sampData,
importance = TRUE, proximity = FALSE,
ntree=1000,type="classification")
varImpPlot(RF_model)
print(RF_model)
map_RF <- predict(covStack, RF_model, paste(result_tif_folder,mn,".tif",sep=""),
format = "GTiff", datatype = "FLT4S", overwrite = TRUE)
rm("mn","shpPoint","sampData","RF_model","map_RF")
```
RAN RSP ns3 45
```{r}
mn="All_pRAN_RSP_NS3_pen45"
shpPoint<-readOGR(paste(point_shp_folder,mn,".shp",sep = "")) ###
sampData <- extract(covStack, shpPoint, sp = TRUE,method = "simple")
sampData<-sampData@data
sampData<-sampData[sampData$soilID %in% soil_type,]
sampData<-na.omit(sampData)
print(dim(sampData))
sampleNum_dict[mn]<-nrow(sampData)
sampData$Bedrock<- droplevels(as.factor(sampData$Bedrock))
sampData$soilID<- droplevels(as.factor(sampData$soilID))
RF_model<-randomForest(soilID ~.,data = sampData,
importance = TRUE, proximity = FALSE,
ntree=1000,type="classification")
varImpPlot(RF_model)
print(RF_model)
map_RF <- predict(covStack, RF_model, paste(result_tif_folder,mn,".tif",sep=""),
format = "GTiff", datatype = "FLT4S", overwrite = TRUE)
rm("mn","shpPoint","sampData","RF_model","map_RF")
```
##ns5
SNA RSP ns5 15
```{r}
mn="All_pSNA_RSP_NS5_pen15"
shpPoint<-readOGR(paste(point_shp_folder,mn,".shp",sep = "")) ###
sampData <- extract(covStack, shpPoint, sp = TRUE,method = "simple")
sampData<-sampData@data
sampData<-sampData[sampData$soilID %in% soil_type,]
sampData<-na.omit(sampData)
print(dim(sampData))
sampleNum_dict[mn]<-nrow(sampData)
sampData$Bedrock<- droplevels(as.factor(sampData$Bedrock))
sampData$soilID<- droplevels(as.factor(sampData$soilID))
RF_model<-randomForest(soilID ~.,data = sampData,
importance = TRUE, proximity = FALSE,
ntree=1000,type="classification")
varImpPlot(RF_model)
print(RF_model)
map_RF <- predict(covStack, RF_model, paste(result_tif_folder,mn,".tif",sep=""),
format = "GTiff", datatype = "FLT4S", overwrite = TRUE)
rm("mn","shpPoint","sampData","RF_model","map_RF")
```
SNA RSP ns5 30
```{r}
mn="All_pSNA_RSP_NS5_pen30"
shpPoint<-readOGR(paste(point_shp_folder,mn,".shp",sep = "")) ###
sampData <- extract(covStack, shpPoint, sp = TRUE,method = "simple")
sampData<-sampData@data
sampData<-sampData[sampData$soilID %in% soil_type,]
sampData<-na.omit(sampData)
print(dim(sampData))
sampleNum_dict[mn]<-nrow(sampData)
sampData$Bedrock<- droplevels(as.factor(sampData$Bedrock))
sampData$soilID<- droplevels(as.factor(sampData$soilID))
RF_model<-randomForest(soilID ~.,data = sampData,
importance = TRUE, proximity = FALSE,
ntree=1000,type="classification")
varImpPlot(RF_model)
print(RF_model)
map_RF <- predict(covStack, RF_model, paste(result_tif_folder,mn,".tif",sep=""),
format = "GTiff", datatype = "FLT4S", overwrite = TRUE)
rm("mn","shpPoint","sampData","RF_model","map_RF")
```
SNA RSP ns5 45
```{r}
mn="All_pSNA_RSP_NS5_pen45"
shpPoint<-readOGR(paste(point_shp_folder,mn,".shp",sep = "")) ###
sampData <- extract(covStack, shpPoint, sp = TRUE,method = "simple")
sampData<-sampData@data
sampData<-sampData[sampData$soilID %in% soil_type,]
sampData<-na.omit(sampData)
print(dim(sampData))
sampleNum_dict[mn]<-nrow(sampData)
sampData$Bedrock<- droplevels(as.factor(sampData$Bedrock))
sampData$soilID<- droplevels(as.factor(sampData$soilID))
RF_model<-randomForest(soilID ~.,data = sampData,
importance = TRUE, proximity = FALSE,
ntree=1000,type="classification")
varImpPlot(RF_model)
print(RF_model)
map_RF <- predict(covStack, RF_model, paste(result_tif_folder,mn,".tif",sep=""),
format = "GTiff", datatype = "FLT4S", overwrite = TRUE)
rm("mn","shpPoint","sampData","RF_model","map_RF")
```
RAN RSP ns5 15
```{r}
mn="All_pRAN_RSP_NS5_pen15"
shpPoint<-readOGR(paste(point_shp_folder,mn,".shp",sep = "")) ###
sampData <- extract(covStack, shpPoint, sp = TRUE,method = "simple")
sampData<-sampData@data
sampData<-sampData[sampData$soilID %in% soil_type,]
sampData<-na.omit(sampData)
print(dim(sampData))
sampleNum_dict[mn]<-nrow(sampData)
sampData$Bedrock<- droplevels(as.factor(sampData$Bedrock))
sampData$soilID<- droplevels(as.factor(sampData$soilID))
RF_model<-randomForest(soilID ~.,data = sampData,
importance = TRUE, proximity = FALSE,
ntree=1000,type="classification")
varImpPlot(RF_model)
print(RF_model)
map_RF <- predict(covStack, RF_model, paste(result_tif_folder,mn,".tif",sep=""),
format = "GTiff", datatype = "FLT4S", overwrite = TRUE)
rm("mn","shpPoint","sampData","RF_model","map_RF")
```
RAN RSP ns5 30
```{r}
mn="All_pRAN_RSP_NS5_pen30"
shpPoint<-readOGR(paste(point_shp_folder,mn,".shp",sep = "")) ###
sampData <- extract(covStack, shpPoint, sp = TRUE,method = "simple")
sampData<-sampData@data
sampData<-sampData[sampData$soilID %in% soil_type,]
sampData<-na.omit(sampData)
print(dim(sampData))
sampleNum_dict[mn]<-nrow(sampData)
sampData$Bedrock<- droplevels(as.factor(sampData$Bedrock))
sampData$soilID<- droplevels(as.factor(sampData$soilID))
RF_model<-randomForest(soilID ~.,data = sampData,
importance = TRUE, proximity = FALSE,
ntree=1000,type="classification")
varImpPlot(RF_model)
print(RF_model)
map_RF <- predict(covStack, RF_model, paste(result_tif_folder,mn,".tif",sep=""),
format = "GTiff", datatype = "FLT4S", overwrite = TRUE)
rm("mn","shpPoint","sampData","RF_model","map_RF")
```
RAN RSP ns5 45
```{r}
mn="All_pRAN_RSP_NS5_pen45"
shpPoint<-readOGR(paste(point_shp_folder,mn,".shp",sep = "")) ###
sampData <- extract(covStack, shpPoint, sp = TRUE,method = "simple")
sampData<-sampData@data
sampData<-sampData[sampData$soilID %in% soil_type,]
sampData<-na.omit(sampData)
print(dim(sampData))
sampleNum_dict[mn]<-nrow(sampData)
sampData$Bedrock<- droplevels(as.factor(sampData$Bedrock))
sampData$soilID<- droplevels(as.factor(sampData$soilID))
RF_model<-randomForest(soilID ~.,data = sampData,
importance = TRUE, proximity = FALSE,
ntree=1000,type="classification")
varImpPlot(RF_model)
print(RF_model)
map_RF <- predict(covStack, RF_model, paste(result_tif_folder,mn,".tif",sep=""),
format = "GTiff", datatype = "FLT4S", overwrite = TRUE)
rm("mn","shpPoint","sampData","RF_model","map_RF")
```
##ns7
SNA RSP ns7 15
```{r}
mn="All_pSNA_RSP_NS7_pen15"
shpPoint<-readOGR(paste(point_shp_folder,mn,".shp",sep = "")) ###
sampData <- extract(covStack, shpPoint, sp = TRUE,method = "simple")
sampData<-sampData@data
sampData<-sampData[sampData$soilID %in% soil_type,]
sampData<-na.omit(sampData)
print(dim(sampData))
sampleNum_dict[mn]<-nrow(sampData)
sampData$Bedrock<- droplevels(as.factor(sampData$Bedrock))
sampData$soilID<- droplevels(as.factor(sampData$soilID))
RF_model<-randomForest(soilID ~.,data = sampData,
importance = TRUE, proximity = FALSE,
ntree=1000,type="classification")
varImpPlot(RF_model)
print(RF_model)
map_RF <- predict(covStack, RF_model, paste(result_tif_folder,mn,".tif",sep=""),
format = "GTiff", datatype = "FLT4S", overwrite = TRUE)
rm("mn","shpPoint","sampData","RF_model","map_RF")
```
SNA RSP ns7 30
```{r}
mn="All_pSNA_RSP_NS7_pen30"
shpPoint<-readOGR(paste(point_shp_folder,mn,".shp",sep = "")) ###
sampData <- extract(covStack, shpPoint, sp = TRUE,method = "simple")
sampData<-sampData@data
sampData<-sampData[sampData$soilID %in% soil_type,]
sampData<-na.omit(sampData)
print(dim(sampData))
sampleNum_dict[mn]<-nrow(sampData)
sampData$Bedrock<- droplevels(as.factor(sampData$Bedrock))
sampData$soilID<- droplevels(as.factor(sampData$soilID))
RF_model<-randomForest(soilID ~.,data = sampData,
importance = TRUE, proximity = FALSE,
ntree=1000,type="classification")
varImpPlot(RF_model)
print(RF_model)
map_RF <- predict(covStack, RF_model, paste(result_tif_folder,mn,".tif",sep=""),
format = "GTiff", datatype = "FLT4S", overwrite = TRUE)
rm("mn","shpPoint","sampData","RF_model","map_RF")
```
SNA RSP ns7 45
```{r}
mn="All_pSNA_RSP_NS7_pen45"
shpPoint<-readOGR(paste(point_shp_folder,mn,".shp",sep = "")) ###
sampData <- extract(covStack, shpPoint, sp = TRUE,method = "simple")
sampData<-sampData@data
sampData<-sampData[sampData$soilID %in% soil_type,]
sampData<-na.omit(sampData)
print(dim(sampData))
sampleNum_dict[mn]<-nrow(sampData)
sampData$Bedrock<- droplevels(as.factor(sampData$Bedrock))
sampData$soilID<- droplevels(as.factor(sampData$soilID))
RF_model<-randomForest(soilID ~.,data = sampData,
importance = TRUE, proximity = FALSE,
ntree=1000,type="classification")
varImpPlot(RF_model)
print(RF_model)
map_RF <- predict(covStack, RF_model, paste(result_tif_folder,mn,".tif",sep=""),
format = "GTiff", datatype = "FLT4S", overwrite = TRUE)
rm("mn","shpPoint","sampData","RF_model","map_RF")
```
RAN RSP ns7 15
```{r}
mn="All_pRAN_RSP_NS7_pen15"
shpPoint<-readOGR(paste(point_shp_folder,mn,".shp",sep = "")) ###
sampData <- extract(covStack, shpPoint, sp = TRUE,method = "simple")
sampData<-sampData@data
sampData<-sampData[sampData$soilID %in% soil_type,]
sampData<-na.omit(sampData)
print(dim(sampData))
sampleNum_dict[mn]<-nrow(sampData)
sampData$Bedrock<- droplevels(as.factor(sampData$Bedrock))
sampData$soilID<- droplevels(as.factor(sampData$soilID))
RF_model<-randomForest(soilID ~.,data = sampData,
importance = TRUE, proximity = FALSE,
ntree=1000,type="classification")
varImpPlot(RF_model)
print(RF_model)
map_RF <- predict(covStack, RF_model, paste(result_tif_folder,mn,".tif",sep=""),
format = "GTiff", datatype = "FLT4S", overwrite = TRUE)
rm("mn","shpPoint","sampData","RF_model","map_RF")
```
RAN RSP ns7 30
```{r}
mn="All_pRAN_RSP_NS7_pen30"
shpPoint<-readOGR(paste(point_shp_folder,mn,".shp",sep = "")) ###
sampData <- extract(covStack, shpPoint, sp = TRUE,method = "simple")
sampData<-sampData@data
sampData<-sampData[sampData$soilID %in% soil_type,]
sampData<-na.omit(sampData)
print(dim(sampData))
sampleNum_dict[mn]<-nrow(sampData)
sampData$Bedrock<- droplevels(as.factor(sampData$Bedrock))
sampData$soilID<- droplevels(as.factor(sampData$soilID))
RF_model<-randomForest(soilID ~.,data = sampData,
importance = TRUE, proximity = FALSE,
ntree=1000,type="classification")
varImpPlot(RF_model)
print(RF_model)
map_RF <- predict(covStack, RF_model, paste(result_tif_folder,mn,".tif",sep=""),
format = "GTiff", datatype = "FLT4S", overwrite = TRUE)
rm("mn","shpPoint","sampData","RF_model","map_RF")
```
RAN RSP ns7 45
```{r}
mn="All_pRAN_RSP_NS7_pen45"
shpPoint<-readOGR(paste(point_shp_folder,mn,".shp",sep = "")) ###
sampData <- extract(covStack, shpPoint, sp = TRUE,method = "simple")
sampData<-sampData@data
sampData<-sampData[sampData$soilID %in% soil_type,]
sampData<-na.omit(sampData)
print(dim(sampData))
sampleNum_dict[mn]<-nrow(sampData)
sampData$Bedrock<- droplevels(as.factor(sampData$Bedrock))
sampData$soilID<- droplevels(as.factor(sampData$soilID))
RF_model<-randomForest(soilID ~.,data = sampData,
importance = TRUE, proximity = FALSE,
ntree=1000,type="classification")
varImpPlot(RF_model)
print(RF_model)
map_RF <- predict(covStack, RF_model, paste(result_tif_folder,mn,".tif",sep=""),
format = "GTiff", datatype = "FLT4S", overwrite = TRUE)
rm("mn","shpPoint","sampData","RF_model","map_RF")
```
|
910557abb25de6740ddb94c1c355c9cccf3a0c24
|
1ff0f0217347e7ec30167a5524ffb8260e49e823
|
/man/findLQR.Rd
|
ca0ae0596701540849caa688209fa546db54714c
|
[] |
no_license
|
vaofford/amplican
|
0ee096b58585ceb24c6e451872af2a2fd87b2de6
|
7774dda136bdd3dd78c6c8c1f596195b847f77f3
|
refs/heads/master
| 2020-09-15T08:21:02.149838
| 2019-06-06T18:33:47
| 2019-06-06T18:33:47
| 223,392,406
| 0
| 0
| null | 2019-11-22T11:48:36
| 2019-11-22T11:48:35
| null |
UTF-8
|
R
| false
| true
| 1,251
|
rd
|
findLQR.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers_filters.R
\name{findLQR}
\alias{findLQR}
\title{Find Off-targets and Fragmented alignments from reads.}
\usage{
findLQR(aln)
}
\arguments{
\item{aln}{(data.frame) Should contain events from alignments in GRanges
style with columns eg. seqnames, width, start, end, score.}
}
\value{
(logical vector) where TRUE indicates events that are
potential off-targets or low quality alignments.
}
\description{
Will try to detect off-targets and low quality alignments (outliers). It
tries k-means clustering on normalized number of events per read and read
alignment score. If there are 3 clusters (decided based on silhouette
criterion) cluster with high event count and low alignment score will be
marked for filtering. When there is less than 1000
scores in \code{aln} it will filter nothing.
}
\examples{
file_path <- system.file("extdata", "results", "alignments",
"raw_events.csv", package = "amplican")
aln <- data.table::fread(file_path)
aln <- aln[seqnames == "ID_1"] # for first experiment
findLQR(aln)
}
\seealso{
\code{\link{findPD}} \code{\link{findEOP}}
Other filters: \code{\link{findEOP}}, \code{\link{findPD}}
}
\concept{filters}
|
7fa39fb3a4f5b8daaa34656d88a084d903677cf1
|
8d93ceb14c4c9504cecef60d7b9f878dc8ff355d
|
/CredibilityModels/R/bs.R
|
8b40b7a0ea2f84ec9a0b0bfed12d404659b26a41
|
[] |
no_license
|
Andreas329/MyPackages
|
4105c1830fae9240aa37fe4bea25d87055fe0f16
|
2b6f6c73bb8032418104540184bbd7bcada5faa4
|
refs/heads/master
| 2021-09-05T20:09:50.714375
| 2018-01-30T19:56:03
| 2018-01-30T19:56:03
| 108,578,621
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,070
|
r
|
bs.R
|
#' Fits Buehlmann-Straub-Model.
#'
#' @param formula a formula
#' @param data a data.frame in which to interpret the variables named in formula.
#' @param weights expression indicating the column of data containing the weights.
#' @return bs returns an object of class cm.
#' @export
#' @examples
#' bs(KS ~ Region, testdat, count)
bs <- function(formula, data, weights, ... ) {
#create model.frame
cl <- match.call()
mf <- match.call(expand.dots = FALSE)
m <- match(c("formula", "data", "weights"), names(mf), 0L)
mf <- mf[c(1L, m)]
mf[[1L]] <- quote(stats::model.frame)
mf <- eval(mf, parent.frame())
#todo: formula check
#get relevant data
y <- model.response(mf, "numeric")
w <- model.weights(mf)
group <- mf[, attr(attr(mf, "terms"), "term.labels"), drop = F]
if(any(apply(group, 2, function(x) length(grep("\\.", x, value = F)) > 1))){
stop("variables used for grouping cannot contain . (variables after ~ in formula)")
}
data.split <- split_k(data.frame(y = y, w = w), group)
#calc estimators for sigma
s.k <- sapply(data.split$data, function(data) weighted.sigma(data$y, data$w))
s.est <- weighted.mean(s.k, sapply(data.split$data, nrow))
#calc estimator for tau
w.y.group <- do.call(rbind,lapply(data.split$data, function(data)
data.frame(w = sum(data$w), y = weighted.mean(data$y, data$w))))
nominator <- sum(w.y.group$w*(w.y.group$y - weighted.mean(y, w))^2) -
(length(data.split$key)-1) * s.est
denominator <- sum(w) - sum(w.y.group$w^2/sum(w))
tau.est <- max(nominator/denominator, 0)
#create output
kappa <- ifelse(tau.est == 0 , 0, s.est/tau.est)
alpha <- w.y.group$w / (w.y.group$w + kappa)
mu <- weighted.mean(w.y.group$y, alpha)
cred.est <- alpha * w.y.group$y + (1-alpha) * mu
alpha.g <- unsplit_k(list(data = lapply(alpha, function(x) data.frame(alpha = x, row.names = NULL)),
key = data.split$key))
return(list(s.sq = s.est, tau = tau.est, kappa = kappa, mu = mu,
est = cbind(alpha.g$grouping, alpha.g$data, w.y.group, est = cred.est)))
}
|
a9a8ee5669669bfcbaa5be6bf8a32b127f7c3627
|
d8978ecd115f95d9e4f6d987d54c2cb6541a6bf4
|
/code/4_analyzeData/wrds/tpPriceDecomp.R
|
9673276ec8451da2f2f4626959bc7166b0410890
|
[] |
no_license
|
emallickhossain/WarehouseClubs
|
f0eaab1b645e13654de655c2f13e47aa72b02a42
|
7867171cdb3ca3fe32ec778dd8043d538ab1f6ef
|
refs/heads/master
| 2021-06-28T05:37:21.813087
| 2020-09-16T21:49:48
| 2020-09-16T21:49:48
| 149,994,881
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,164
|
r
|
tpPriceDecomp.R
|
# Calculate price decomposition
library(data.table)
library(ggplot2)
library(ggthemes)
library(stringr)
library(lfe)
threads <- 8
yrs <- 2004:2017
# Getting CPI to deflate values to Jan-2010 base
cpi <- fread("/scratch/upenn/hossaine/cpi.csv")
cpi[, "base" := cpi[date == "2010-01-01"]$value]
cpi[, "newIndex" := value / base * 100]
cpi[, c("base", "value") := NULL]
cpi[, "yearMonth" := .(substr(date, 1, 7))]
cpi[, "date" := NULL]
# Loading Nielsen data (no years)
trips <- fread("/scratch/upenn/hossaine/fullTrips.csv", nThread = threads,
select = c("retailer_code", "household_code", "panel_year",
"trip_code_uc", "purchase_date"),
key = "trip_code_uc")
trips[, "yearMonth" := substr(purchase_date, 1, 7)][, "purchase_date" := NULL]
panel <- fread("/scratch/upenn/hossaine/fullPanel.csv", nThread = threads,
select = c("household_code", "panel_year", "projection_factor",
"household_income", "household_income_coarse",
"household_size", "age", "child", "married"))
# Loading Nielsen with years
fullPurch <- NULL
for (i in yrs) {
print(i)
purch <- fread(paste0("/scratch/upenn/hossaine/fullPurch", i, ".csv"),
nThread = threads, key = "trip_code_uc",
select = c("trip_code_uc", "quantity", "packagePrice",
"product_module_code", "upc_descr", "brand_code_uc",
"totalAmount", "multi", "brand_descr",
"size1_amount"))[product_module_code == 7260]
fullPurch <- rbindlist(list(fullPurch, purch), use.names = TRUE)
}
# Getting rolls and sheets for each product
fullPurch[, "rolls" := as.integer(multi * size1_amount)]
fullPurch[, "ply" := str_extract_all(upc_descr, "\\s\\dP\\s")]
fullPurch[, "ply" := as.integer(gsub("P", "", ply))]
fullPurch[, "sheet" := str_extract_all(upc_descr, "\\d{2,}S\\s")]
fullPurch[, "sheet" := as.integer(gsub("S", "", sheet)) * ply]
fullPurch[, "totalSheet" := sheet * rolls]
fullPurch[, c("multi", "size1_amount", "ply", "product_module_code",
"upc_descr", "totalAmount", "rolls", "sheet") := NULL]
# Merging with trips and panel to get store, purchase date, and household info
mergedData <- merge(trips, fullPurch, by = "trip_code_uc")[, "trip_code_uc" := NULL]
mergedData <- merge(mergedData, panel, by = c("household_code", "panel_year"))
mergedData <- merge(mergedData, cpi, by = "yearMonth")[, "yearMonth" := NULL]
rm(purch)
# Deflating
mergedData[, ':=' (packagePriceReal = packagePrice / newIndex * 100)]
mergedData[, c("packagePrice", "newIndex") := NULL]
mergedData[, ':=' (unitPriceReal = packagePriceReal / totalSheet,
totalSpendReal = packagePriceReal * quantity)]
# Generating factors
# Reference group is the 2nd quantile generic brand sold at the
# most popular discount retailer
mergedData[, "brand_code_uc" := as.character(brand_code_uc)]
mergedData[brand_code_uc == "536746",
"brand_code_uc" := paste0(retailer_code, brand_code_uc)]
mergedData[, "brand_code_uc" := relevel(as.factor(brand_code_uc), ref = "6920536746")]
mergedData[, "retailer_code" := relevel(as.factor(retailer_code), ref = "6920")]
mergedData[, c("lUnitPrice", "lQ") := .(log(unitPriceReal), log(totalSheet))]
# Decomposing unit prices
fullCoefs <- NULL
for (i in yrs) {
print(i)
reg <- lm(lUnitPrice ~ lQ + brand_code_uc + retailer_code,
data = mergedData[panel_year == i])
print(summary(reg)$adj.r)
coefs <- as.data.table(summary(reg)$coefficients, keep.rownames = TRUE)
coefs[, "panel_year" := i]
fullCoefs <- rbindlist(list(fullCoefs, coefs), use.names = TRUE)
}
# Organizing effects
setnames(fullCoefs, c("rn", "beta", "se", "t", "p", "panel_year"))
fullCoefs[p > 0.05, "beta" := 0]
# Getting intercept
int <- fullCoefs[rn == "(Intercept)", .(int = beta, panel_year)]
# Getting slope
slope <- fullCoefs[rn == "lQ", .(slope = beta, panel_year)]
# Getting effects
fullCoefs[, c("type", "id") := tstrsplit(rn, "_code", fixed = TRUE)]
fullCoefs[, "id" := gsub("_uc", "", id)]
brandEffect <- fullCoefs[type == "brand"][, c("rn", "type", "se", "t", "p") := NULL]
setnames(brandEffect, c("brandBeta", "panel_year", "brand_code_uc"))
retailEffect <- fullCoefs[type == "retailer"][, c("rn", "type", "se", "t", "p") := NULL]
setnames(retailEffect, c("retailBeta", "panel_year", "retailer_code"))
# Combining everything into the data table
mergedData <- merge(mergedData, int, by = "panel_year")
mergedData <- merge(mergedData, slope, by = "panel_year")
mergedData[, "sizeBeta" := lQ * slope]
mergedData <- merge(mergedData, brandEffect, by = c("brand_code_uc", "panel_year"),
all.x = TRUE)
mergedData <- merge(mergedData, retailEffect, by = c("retailer_code", "panel_year"),
all.x = TRUE)
mergedData[is.na(brandBeta), "brandBeta" := 0]
mergedData[is.na(retailBeta), "retailBeta" := 0]
fwrite(mergedData, "/scratch/upenn/hossaine/mergedData.csv", nThread = threads)
# Regressing to get difference in components attributable to income
mergedData <- na.omit(fread("/scratch/upenn/hossaine/mergedData.csv", nThread = threads))
mergedData <- mergedData[, lapply(.SD, weighted.mean, w = totalSpendReal),
.SDcols = c("brandBeta", "sizeBeta", "retailBeta",
"lUnitPrice", "int"),
by = .(household_code, panel_year, projection_factor,
household_income_coarse, household_size, age,
child, married)]
mergedData[, "residual" := lUnitPrice - (sizeBeta + brandBeta + retailBeta + int)]
reg0 <- felm(lUnitPrice ~ household_income_coarse +
household_size + age + child + married | panel_year,
data = mergedData,
weights = mergedData$projection_factor)
reg1 <- felm(brandBeta ~ household_income_coarse +
household_size + age + child + married | panel_year,
data = mergedData,
weights = mergedData$projection_factor)
reg2 <- felm(retailBeta ~ household_income_coarse +
household_size + age + child + married | panel_year,
data = mergedData,
weights = mergedData$projection_factor)
reg3 <- felm(sizeBeta ~ household_income_coarse +
household_size + age + child + married | panel_year,
data = mergedData,
weights = mergedData$projection_factor)
reg4 <- felm(residual ~ household_income_coarse +
household_size + age + child + married | panel_year,
data = mergedData,
weights = mergedData$projection_factor)
stargazer(reg0, reg1, reg2, reg3, reg4, type = "text",
single.row = FALSE, no.space = TRUE, omit.stat = c("ser", "rsq"),
out.header = FALSE,
column.labels = c("Unit Price", "Brand", "Retailer", "Size", "Residual"),
column.separate = c(1, 1, 1, 1, 1),
dep.var.caption = "", dep.var.labels.include = FALSE,
keep = c("household_income*"),
order = c(2, 3, 1),
covariate.labels = c("25-50k", "50-100k", ">100k"),
notes.align = "l",
digits = 3,
notes.append = TRUE,
label = "tab:tpPriceDecomp",
out = "tables/tpPriceDecomp.tex")
# Getting coefficients and confidence intervals to plot
coef0 <- as.data.table(summary(reg0)$coefficients, keep.rownames = TRUE)
coef0SE <- as.data.table(confint(reg0), keep.rownames = TRUE)
coef0 <- merge(coef0, coef0SE, by = "rn")
setnames(coef0, c("rn", "beta", "se", "t", "p", "LCL", "UCL"))
coef1 <- as.data.table(summary(reg1)$coefficients, keep.rownames = TRUE)
coef1SE <- as.data.table(confint(reg1), keep.rownames = TRUE)
coef1 <- merge(coef1, coef1SE, by = "rn")
setnames(coef1, c("rn", "beta", "se", "t", "p", "LCL", "UCL"))
coef1[, "type" := "Brand"]
coef2 <- as.data.table(summary(reg2)$coefficients, keep.rownames = TRUE)
coef2SE <- as.data.table(confint(reg2), keep.rownames = TRUE)
coef2 <- merge(coef2, coef2SE, by = "rn")
setnames(coef2, c("rn", "beta", "se", "t", "p", "LCL", "UCL"))
coef2[, "type" := "Retailer"]
coef3 <- as.data.table(summary(reg3)$coefficients, keep.rownames = TRUE)
coef3SE <- as.data.table(confint(reg3), keep.rownames = TRUE)
coef3 <- merge(coef3, coef3SE, by = "rn")
setnames(coef3, c("rn", "beta", "se", "t", "p", "LCL", "UCL"))
coef3[, "type" := "Size"]
coef4 <- as.data.table(summary(reg4)$coefficients, keep.rownames = TRUE)
coef4SE <- as.data.table(confint(reg4), keep.rownames = TRUE)
coef4 <- merge(coef4, coef4SE, by = "rn")
setnames(coef4, c("rn", "beta", "se", "t", "p", "LCL", "UCL"))
coef4[, "type" := "Residual"]
graphData <- rbindlist(list(coef1, coef2, coef3, coef4), use.names = TRUE)
graphData <- graphData[grepl("household_income", rn)]
graphData[, "rn" := gsub("household_income_coarse", "", rn)]
graphData[, "rn" := factor(rn, levels = c("25-50k", "50-100k", ">100k"),
ordered = TRUE)]
coef0 <- coef0[grepl("household_income", rn)]
coef0[, "rn" := gsub("household_income_coarse", "", rn)]
coef0[, "rn" := factor(rn, levels = c("25-50k", "50-100k", ">100k"), ordered = TRUE)]
# Graphing
ggplot() +
geom_bar(data = graphData, position = "stack", stat = "identity",
aes(x = rn, y = beta * 100, fill = type)) +
# geom_errorbar(data = graphData, aes(x = rn,
# ymin = LCL * 100,
# ymax = UCL * 100), width = 0.05) +
geom_point(data = coef0, aes(x = rn, y = beta * 100)) +
# geom_errorbar(data = coef0, aes(x = rn,
# ymin = LCL * 100,
# ymax = UCL * 100), width = 0.05) +
geom_hline(yintercept = 0) +
labs(x = "Household Income",
y = "Unit Price Difference (%)",
fill = "Component") +
theme_tufte() +
theme(axis.title = element_text(),
plot.caption = element_text(hjust = 0),
legend.position = "bottom") +
scale_fill_grey()
ggsave(filename = "./figures/tpPriceDecomp.png", height = 4, width = 6)
|
1bd5bbacf3f22cb759ad8dd5f25f4a9dd52d4732
|
4344aa4529953e5261e834af33fdf17d229cc844
|
/input/gcamdata/man/module_aglu_L161.ag_R_C_Y_GLU_irr.Rd
|
2e2b5f1f3454464ec10025cba97dbd22a99d58d3
|
[
"ECL-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
JGCRI/gcam-core
|
a20c01106fd40847ed0a803969633861795c00b7
|
912f1b00086be6c18224e2777f1b4bf1c8a1dc5d
|
refs/heads/master
| 2023-08-07T18:28:19.251044
| 2023-06-05T20:22:04
| 2023-06-05T20:22:04
| 50,672,978
| 238
| 145
|
NOASSERTION
| 2023-07-31T16:39:21
| 2016-01-29T15:57:28
|
R
|
UTF-8
|
R
| false
| true
| 1,426
|
rd
|
module_aglu_L161.ag_R_C_Y_GLU_irr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zaglu_L161.ag_R_C_Y_GLU_irr.R
\name{module_aglu_L161.ag_R_C_Y_GLU_irr}
\alias{module_aglu_L161.ag_R_C_Y_GLU_irr}
\title{module_aglu_L161.ag_R_C_Y_GLU_irr}
\usage{
module_aglu_L161.ag_R_C_Y_GLU_irr(command, ...)
}
\arguments{
\item{command}{API command to execute}
\item{...}{other optional parameters, depending on command}
}
\value{
Depends on \code{command}: either a vector of required inputs,
a vector of output names, or (if \code{command} is "MAKE") all
the generated outputs: \code{L161.ag_irrProd_Mt_R_C_Y_GLU}, \code{L161.ag_rfdProd_Mt_R_C_Y_GLU}, \code{L161.ag_irrHA_bm2_R_C_Y_GLU}, \code{L161.ag_rfdHA_bm2_R_C_Y_GLU}, \code{L161.ag_irrYield_kgm2_R_C_Y_GLU}, \code{L161.ag_rfdYield_kgm2_R_C_Y_GLU}, \code{L161.ag_irrHA_frac_R_C_GLU}. The corresponding file in the
original data system was \code{LB161.ag_R_C_Y_GLU_irr.R} (aglu level1).
}
\description{
Calculates irrigated and rainfed agriculture production, harvested area and yields by GCAM region / commodity / GLU / year.
}
\details{
This chunk combines FAO annual data and GTAP disaggregated irrigated vs. rainfed data to compute irrigated and rainfed
agriculture production, harvested area and yields by GCAM region / commodity / GLU / year. The same irrigated and rainfed fraction
is applied to all historical years for each commodity, region and GLU.
}
\author{
RC August 2017
}
|
2c682fccabf1da026a748964d29010fe464c8720
|
e3e10eb45e55cb84ad28e2ab2f3a2dff46bf9328
|
/man/network.glmnet-MultiAssayExperiment-method.Rd
|
0fbb8582cbbf39fe422f9893d49e7807e8d4bb51
|
[] |
no_license
|
averissimo/network.cox
|
cbd8954c23139d59293f811f29b1313443d07610
|
88411403e99d4963a1e99fde9499755ebc5828bf
|
refs/heads/master
| 2020-03-22T11:23:35.165518
| 2018-07-06T10:16:17
| 2018-07-06T10:16:17
| 139,968,129
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 588
|
rd
|
network.glmnet-MultiAssayExperiment-method.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/network.glmnet.R
\docType{methods}
\name{network.glmnet,MultiAssayExperiment-method}
\alias{network.glmnet,MultiAssayExperiment-method}
\title{Calculate GLM model with network-based regularization}
\usage{
\S4method{network.glmnet}{MultiAssayExperiment}(xdata, ydata, network,
experiment.name = NULL, network.options = network.options.default(), ...)
}
\arguments{
\item{xdata}{MultiAssayExperiment.}
}
\value{
an object just as glmnet
}
\description{
Calculate GLM model with network-based regularization
}
|
f06dc377ec72ac01ca9d61fcfba6e4a9ac841a8b
|
4298020386c5271f265713e663a2fb6d78772be5
|
/imma.R
|
12ca12950e8f2790d770bb9433643b2b53f30899
|
[] |
no_license
|
armdhn23/Peta-Leaflet-R
|
5221a039d1b3dd1010dad4b4cefaf15ddd922df8
|
7e301d3418942ecaa29c439e33cf9168e47a559a
|
refs/heads/master
| 2022-12-08T12:50:09.673987
| 2020-08-26T11:55:04
| 2020-08-26T11:55:04
| 290,482,508
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 727
|
r
|
imma.R
|
library(leaflet)
data <- data.frame(long=c(119.449521, 119.450739, 119.449419,119.447934),
lat=c(-5.098641, -5.100104, -5.101071, -5.098983),
nama=c("Titik 1", "Titik 2", "Titik 3", "Titik 4"))
data
# Show first 20 rows from the `quakes` dataset
leaflet(data = data) %>% addTiles() %>%
addMarkers(~long[1:2], ~lat[1:2], popup = ~as.character(nama[1:2]), label = ~as.character(nama[1:2]),
labelOptions = labelOptions(noHide = T, textsize = "15px", direction = "right")) %>%
addMarkers(~long[3:4], ~lat[3:4], popup = ~as.character(nama[3:4]), label = ~as.character(nama[3:4]),
labelOptions = labelOptions(noHide = T, textsize = "15px", direction = "left"))
|
e794a471b2bda124a2ac67f71376db06ec407630
|
a53bdbe22bbee6c25fdd8085201327a59f61f6a4
|
/04_Factor_Analysis.R
|
0314f5e6fc58bad252b7d762fb4bd78f5237f227
|
[] |
no_license
|
mmendezs/IM
|
c8ff699feed55b55e7588b21e8739b1a2263b523
|
6b23792fc3672c6c82da17c402d4fe83e2edb3e5
|
refs/heads/master
| 2020-12-31T00:29:42.679837
| 2017-05-23T10:29:59
| 2017-05-23T10:29:59
| 85,386,754
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,266
|
r
|
04_Factor_Analysis.R
|
library(dplyr)
library(ggplot2)
library(psych)
#library(gridExtra)
survey <- read.delim("survey.csv",sep = ';')
# Separamos datos por supermercados
dia <- filter(survey, Establecimiento=='dia')
carrefour <- filter(survey, Establecimiento=='carrefour')
mercadona <- filter(survey, Establecimiento=='mercadona')
## psych
# Exploratorio
#psych::scree(select(survey, starts_with('P')), pc = TRUE) # gráfico feo
# Hacemos el análisis Kaiser Meyer Olsen
survey_KMO <-psych::KMO(select(survey, starts_with('P')))
# Valor KMO
survey_KMO$MSA
# PCA con todas las columnas para elegir cuantos factores y varianza explicada
survey_principal <- psych::principal(select(survey, starts_with('P')),
nfactors = ncol(select(survey, starts_with('P'))), rotate = 'none')
survey_principal$loadings
# Elegimos 4 factores sin rotar
# PCA con todas las columnas para elegir cuantos factores y varianza explicada
survey_principal <- psych::principal(select(survey, starts_with('P')),
nfactors = 4, rotate = 'none')
# Estudiamos las comunalidades
survey_principal$communality
print(survey_principal$loadings, cutoff = 0.4, digits = 2)
# Extraemos las puntuaciones
survey_principal_cargas <- as.data.frame(unclass(survey_principal$loadings))
# Añadimos columnas con los nombres de las variables
survey_principal_cargas$variables <- rownames(survey_principal_cargas)
# Reordenamos las columnas
survey_principal_cargas <- select(survey_principal_cargas, variables, starts_with('P'))
# Cogemos los scores de cada individuo
survey_principal_scores <- as.data.frame(unclass(survey_principal$scores))
# Les incluimos el establecimiento
survey_principal_scores$Establecimiento <- survey$Establecimiento
# Gráfico sin rotar
ggplot(data = survey_principal_scores, aes(PC1, PC2)) +
geom_hline(yintercept = 0, colour = "gray70") +
geom_vline(xintercept = 0, colour = "gray70") +
geom_point(aes(colour = factor(Establecimiento)), alpha = 0.7) +
geom_density2d(colour = "gray80") +
geom_text(data = survey_principal_cargas,
aes(PC1, PC2, label = variables),check_overlap = TRUE) +
theme(legend.position="bottom",legend.direction="horizontal") +
theme(legend.title = element_blank()) +
ggtitle("Análisis Componentes por centros, incluyendo individuos") +
scale_colour_discrete(name = "Variable")
# ROTAMOS
survey_principal_rot <- psych::principal(select(survey, starts_with('P')),
nfactors = 4, rotate = 'varimax')
# Vemos los principales
print(survey_principal_rot$loadings, cutoff = 0.4, digits = 2)
survey_principal_cargas_rot <- as.data.frame(unclass(survey_principal_rot$loadings))
survey_principal_cargas_rot$variables <- rownames(survey_principal_cargas_rot)
survey_principal_cargas_rot <- select(survey_principal_cargas_rot, variables, starts_with('R'))
# Cogemos los scores de cada individuo
survey_principal_scores_rot <- as.data.frame(unclass(survey_principal_rot$scores))
# Les incluimos el establecimiento
survey_principal_scores_rot$Establecimiento <- survey$Establecimiento
# Gráfico Rotado
ggplot(data = survey_principal_scores_rot, aes(RC1, RC2)) +
geom_hline(yintercept = 0, colour = "gray70") +
geom_vline(xintercept = 0, colour = "gray70") +
geom_point(aes(colour = factor(Establecimiento)), alpha = 0.4) +
geom_density2d(colour = "gray80") +
geom_text(data = survey_principal_cargas_rot, size = 3,
aes(RC1, RC2, label = variables), check_overlap = TRUE) +
theme(legend.position="bottom",legend.direction="horizontal") +
theme(legend.title = element_blank()) +
ggtitle("Análisis Componentes por centros, incluyendo individuos") +
scale_colour_discrete(name = "Variable")
survey_principal_scores_rot <- data.frame(survey_principal_scores_rot)
# Creamos variables temporales
a <- select(survey_principal_scores_rot, x = RC1, y = RC2, Establecimiento)
a$factor <- 'Empleados y Establecimiento'
b <- select(survey_principal_scores_rot, x = RC1, y = RC3, Establecimiento)
b$factor <- 'Empleados y Tiempos'
c <- select(survey_principal_scores_rot, x = RC1, y = RC4, Establecimiento)
c$factor <- 'Empleados y Gama Productos'
d <- dplyr::bind_rows(a, b, c)
# Gráfico de los 3 centros
ggplot(data = d, aes(x, y)) +
geom_hline(yintercept = 0, colour = "gray70") +
geom_vline(xintercept = 0, colour = "gray70") +
geom_point(aes(colour = factor(Establecimiento)), alpha = 0.4) +
geom_density2d(colour = "gray80") +
facet_grid(~ factor) +
theme(legend.position="bottom",legend.direction="horizontal") +
theme(legend.title = element_blank()) +
ggtitle("Análisis Componentes por centros, incluyendo individuos") +
scale_colour_discrete(name = "Variable")
## Día con 4 factores
# Hacemos el análisis Kaiser Meyer Olsen
dia_KMO <-psych::KMO(select(dia, starts_with('P')))
# Valor KMO
dia_KMO$MSA
# ROTAMOS
dia_principal_rot <- psych::principal(select(dia, starts_with('P')),
nfactors = 4, rotate = 'varimax')
# Estudiamos las comunalidades
dia_principal_rot$communality
# Vemos los principales
print(dia_principal_rot$loadings, sort = TRUE ,cutoff = 0.4, digits = 2)
dia_principal_cargas_rot <- as.data.frame(unclass(dia_principal_rot$loadings))
dia_principal_cargas_rot$variables <- rownames(dia_principal_cargas_rot)
dia_principal_cargas_rot <- select(dia_principal_cargas_rot, variables, starts_with('R'))
# Cogemos los scores de cada individuo
dia_principal_scores_rot <- as.data.frame(unclass(dia_principal_rot$scores))
# Les incluimos el establecimiento
dia_principal_scores_rot$Establecimiento <- dia$Establecimiento
# Agrupamos los scores por establecimiento
ggplot(data = dia_principal_scores_rot, aes(RC1, RC2)) +
geom_hline(yintercept = 0, colour = "gray70") +
geom_vline(xintercept = 0, colour = "gray70") +
geom_point(colour = 'green', alpha = 0.2) +
geom_density2d(colour = "gray80") +
geom_text(data = dia_principal_cargas_rot, size = 3,
aes(RC1, RC2, label = variables), check_overlap = TRUE) +
theme(legend.position="bottom",legend.direction="horizontal") +
theme(legend.title = element_blank()) +
ggtitle("Análisis Componentes por centros, incluyendo individuos") +
scale_colour_discrete(name = "Variable")
## Carrefour con 4 factores
# Hacemos el análisis Kaiser Meyer Olsen
carrefour_KMO <-psych::KMO(select(carrefour, starts_with('P')))
# Valor KMO
carrefour_KMO$MSA
# ROTAMOS
carrefour_principal_rot <- psych::principal(select(carrefour, starts_with('P')),
nfactors = 4, rotate = 'varimax')
# Estudiamos las comunalidades
carrefour_principal_rot$communality
# Vemos los principales
print(carrefour_principal_rot$loadings, sort = TRUE ,cutoff = 0.4, digits = 2)
carrefour_principal_cargas_rot <- as.data.frame(unclass(carrefour_principal_rot$loadings))
carrefour_principal_cargas_rot$variables <- rownames(carrefour_principal_cargas_rot)
carrefour_principal_cargas_rot <- select(carrefour_principal_cargas_rot, variables, starts_with('R'))
# Cogemos los scores de cada individuo
carrefour_principal_scores_rot <- as.data.frame(unclass(carrefour_principal_rot$scores))
# Les incluimos el establecimiento
carrefour_principal_scores_rot$Establecimiento <- carrefour$Establecimiento
# Agrupamos los scores por establecimiento
ggplot(data = carrefour_principal_scores_rot, aes(RC1, RC2)) +
geom_hline(yintercept = 0, colour = "gray70") +
geom_vline(xintercept = 0, colour = "gray70") +
geom_point(colour = 'green', alpha = 0.2) +
geom_density2d(colour = "gray80") +
geom_text(data = carrefour_principal_cargas_rot, size = 3,
aes(RC1, RC2, label = variables), check_overlap = TRUE) +
theme(legend.position="bottom",legend.direction="horizontal") +
theme(legend.title = element_blank()) +
ggtitle("Análisis Componentes por centros, incluyendo individuos") +
scale_colour_discrete(name = "Variable")
## Mercadona con 4 factores
# Hacemos el análisis Kaiser Meyer Olsen
mercadona_KMO <-psych::KMO(select(mercadona, starts_with('P')))
# Valor KMO
mercadona_KMO$MSA
# ROTAMOS
mercadona_principal_rot <- psych::principal(select(mercadona, starts_with('P')),
nfactors = 4, rotate = 'varimax')
# Estudiamos las comunalidades
mercadona_principal_rot$communality
# Vemos los principales
print(mercadona_principal_rot$loadings, sort = TRUE ,cutoff = 0.4, digits = 2)
mercadona_principal_cargas_rot <- as.data.frame(unclass(mercadona_principal_rot$loadings))
mercadona_principal_cargas_rot$variables <- rownames(mercadona_principal_cargas_rot)
mercadona_principal_cargas_rot <- select(mercadona_principal_cargas_rot, variables, starts_with('R'))
# Cogemos los scores de cada individuo
mercadona_principal_scores_rot <- as.data.frame(unclass(mercadona_principal_rot$scores))
# Les incluimos el establecimiento
mercadona_principal_scores_rot$Establecimiento <- mercadona$Establecimiento
# Agrupamos los scores por establecimiento
ggplot(data = mercadona_principal_scores_rot, aes(RC1, RC2)) +
geom_hline(yintercept = 0, colour = "gray70") +
geom_vline(xintercept = 0, colour = "gray70") +
geom_point(colour = 'green', alpha = 0.2) +
geom_density2d(colour = "gray80") +
geom_text(data = mercadona_principal_cargas_rot, size = 3,
aes(RC1, RC2, label = variables), check_overlap = TRUE) +
theme(legend.position="bottom",legend.direction="horizontal") +
theme(legend.title = element_blank()) +
ggtitle("Análisis Componentes por centros, incluyendo individuos") +
scale_colour_discrete(name = "Variable")
## Explicación de la rotación
x_sin <- c(-4, -2, 1, 3, 2)+6
y_sin <- c(4,0,1,-0.5,-2.5)+6
sin_rotar <- data.frame(x = x_sin, y = y_sin, rot='sin rotar', modelo = c('A', 'B', 'C', 'D', 'E'))
x_rot <- c(4,3.8,1,-2,-6)+6
y_rot <- c(-1,1.4,0.25,-1.3,0.7)+5
rotado <- data.frame(x = x_rot, y = y_rot, rot='rotado', modelo = c('E', 'D', 'C', 'B', 'A'))
datos <- bind_rows(sin_rotar, rotado)
datos$rot <- factor(datos$rot, levels = c('sin rotar', 'rotado'))
ggplot(datos, aes(x,y)) +
geom_hline(yintercept = 0, colour = "gray70") +
geom_vline(xintercept = 0, colour = "gray70") +
geom_point(fill = NA, alpha = 0) +
geom_smooth(method=lm, se=FALSE) +
facet_grid(~ rot) +
geom_text(aes(label = modelo)) +
labs(x='deportividad', y = 'comodidad')
|
f16e4393c6b4ae8f3ae68a942192d0a4098275c0
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/staTools/examples/RMSE.Rd.R
|
3c12bde569073dc87710a1dd6ec4d66cec142adb
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 185
|
r
|
RMSE.Rd.R
|
library(staTools)
### Name: RMSE
### Title: Root Mean Squared Error
### Aliases: RMSE
### Keywords: error mean root squared
### ** Examples
x = runif(10)
y = runif(10)
RMSE(x,y)
|
ec2b2ed706ced4f22a8670c527a82cbd2ab3a73c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/lmSupport/examples/varScore.Rd.R
|
87b10f407cf7b06fd731de63dfa150b41e0c7be3
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 283
|
r
|
varScore.Rd.R
|
library(lmSupport)
### Name: varScore
### Title: Creates a total score from a sum of items
### Aliases: varScore
### Keywords: manip
### ** Examples
##not run
##varScore(d, c('I1', 'I3', 'I4'), Reverse= c('I2', 'I5'),
## Range = c(1,5), Prorate=TRUE, MaxMiss = .25)
|
9147d92b3a02b6bdb6329a8634171a8031c855f9
|
b8d522f0bca0b379e982c5b094243f8a75979472
|
/R/featureCoverage.R
|
6e3a7b8a49776ea1aadc82e58b99f78c23c39259
|
[] |
no_license
|
aryeelab/scmeth
|
45d2dc4ce77b2e42c67737a1eeffa8a4570ceab3
|
b7b86da69dcfb13a14b8ab5099649c2d41469d55
|
refs/heads/master
| 2021-03-24T09:43:58.358028
| 2019-06-10T21:12:11
| 2019-06-10T21:12:11
| 75,974,085
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,401
|
r
|
featureCoverage.R
|
#' Coverage based on the genomic feature
#'
#'Provides Coverage metrics for the sample by each genomic features provided
#'by the user
#'@param bs bsseq object
#'@param features list of genomic features, e.g. genes_exons, genes_introns,
#'cpg_islands, cpg_shelves
#'Names are based on the annotatr packages, so all the features provided by the
#'annotatr
#'package will be supported in this function
#'@param genomebuild reference alignment, i.e. mm10 or hg38
#'@return a data frame with genomic feature names and the number of
#'CpG covered in each feature
#'@examples
#'directory <- system.file("extdata/bismark_data", package='scmeth')
#'bs <- HDF5Array::loadHDF5SummarizedExperiment(directory)
#'featureCoverage(bs, c('cpg_islands', 'cpg_shores'), 'hg38')
#'@importFrom DelayedArray rowSums
#'@importFrom GenomeInfoDb seqlevelsStyle
#'@importFrom annotatr builtin_genomes
#'@importFrom annotatr build_annotations
#'@importFrom annotatr annotate_regions
#'@importFrom annotatr summarize_annotations
#'@import GenomicRanges
#'@export
featureCoverage <- function(bs, features, genomebuild){
annotationFeatures <- c()
for (i in features){
annotationFeatures <- c(paste0(genomebuild, '_', i), annotationFeatures)
}
annots_gr = annotatr::build_annotations(genome = genomebuild,
annotations = annotationFeatures)
GenomeInfoDb::seqlevelsStyle(bs) <- "UCSC"
nSamples <- dim(bs)[2]
sumAnnotMatrix <- matrix(nrow=length(features), ncol=nSamples)
featureLabel <- rep(NA, length(features))
for (i in seq_len(nSamples)){
bsCell <- bs[,i]
# CpGs that are observed
coverageMatrix <- getCoverage(bsCell)
ind <- DelayedArray::rowSums(coverageMatrix)>0
# Intersect the regions with the reference annotations
dm_annotated = annotatr::annotate_regions(
regions = GenomicRanges::granges(bsCell)[ind,],
annotations = annots_gr,
ignore.strand = TRUE,
quiet = TRUE)
sumAnnot <- annotatr::summarize_annotations(dm_annotated, quiet=TRUE)
sumAnnotMatrix[,i] <- sumAnnot$n[match(annotationFeatures, sumAnnot$annot.type)]/sum(ind)
featureLabel[1:length(sumAnnot$annot.type)] <- sumAnnot$annot.type
}
colnames(sumAnnotMatrix) <- colnames(bs)
rownames(sumAnnotMatrix) <- features
return(sumAnnotMatrix)
}
|
067f734b1fef9c67abb98581b28962eb79df747e
|
b69ea4c85c60f4a3c59d302eea64c620270cfaae
|
/data-processing-code-salvage/05_preliminary_anal_MAP_misc_2017-05-11.R
|
3a033e55a4baf5d6b46ae2caae1f409c8b97cba7
|
[] |
no_license
|
rvanmazijk/Hons-thesis-code-salvage
|
52c1dbef9c136afabe385550d0258262c59d2773
|
b3492c5f5c212d75631557c8f280c898f91718a7
|
refs/heads/master
| 2020-03-20T01:30:39.817374
| 2018-08-30T14:30:26
| 2018-08-30T14:30:26
| 137,078,280
| 0
| 0
| null | 2018-08-30T14:30:27
| 2018-06-12T13:47:58
|
HTML
|
UTF-8
|
R
| false
| false
| 6,563
|
r
|
05_preliminary_anal_MAP_misc_2017-05-11.R
|
# Formal "preliminary" analyses on MAP --- misc. & "non-core" analyses
# Hons thesis
# Ruan van Mazijk
# created: 2017-05-11
# last edited: 2017-05-11
# Setup ---------------------------------------------------------------------------------
# Here, I load all the setup info and my own defined functions (in `11_my_funs.R`),
# and all the saved objects (mostly rasters) (in `12_my_objs.R`).
rm(list = ls())
source("i_my_funs.R")
source("ii_my_objs.R")
#quartz()
# (Using `MAP_GCFR_0.05` for this)
#MAP_GCFR_0.05 %>%
# plot_abs_vs_rough() %>%
# summary()
# Applying `resample()` -----------------------------------------------------------------
# 2017-05-08 15:59 ---
# After my meeting with Tony today, he agrees that `aggregate()` is
# better, as the mean-style way of doing things is more intuitive.
# And this preserves the pixel/grid-cell structure-alignment, whereas `resample()`-ed
# pixels are not necessarily aligned with the input raster's pixels.
# Though, this function may come in hand if I have to force some other raster, say
# MODIS, to be a specific starting resolution (say 0.05 deg) if it comes from NASA in,
# say, 0.06 deg resolution, in order to then use `aggregate()` on it with its pixels
# aligned with those of all my other environmental vars.
ress <- seq(0.05, 1.00, by = 0.05) # as before
resampled_MAP_GCFR <- custom_resample_loop(MAP_GCFR_0.05, ress)
par(mfrow = c(2, 3))
plot_raster_list(resampled_MAP_GCFR)
par(op)
resampled_MAP_GCFR_vals <- as_df_raster_vals_list(resampled_MAP_GCFR, n = ress)
plot(val ~ res, data = resampled_MAP_GCFR_vals)
par(mfrow = c(2, 3))
resampled_MAP_GCFR %>%
map(plot_abs_vs_rough)
par(op)
# Applying roughness calc and then `aggregate()` ----------------------------------------
# 2017-05-08 --- In pseudo-`R`-code:
# MAP %>% h() -> roughMAP %>% aggregate(%both%) -> agg_rough_MAP; agg_MAP
# (See Fig. 1b in my notes from my meeting with Tony (2017-05-08), which I had *after*
# I wrote this script)
# 2017-05-09 --- Refactored this § of code today
rough_MAP_GCFR_0.05 <-
MAP_GCFR_0.05 %>%
terrain("roughness")
# `agg_MAP_GCFR` already exists, and is identical in this case b) to what I made in a).
# Apply `agg()` to `rough_MAP_GCFR_0.05` /first/:
agg_rough_MAP_GCFR <- custom_aggregate_loop(rough_MAP_GCFR_0.05, fact = facts)
agg_rough_MAP_GCFR %<>% c(rough_MAP_GCFR_0.05, .) # incl. the original raster...
names(agg_roughMAP_GCFR)[1] <- "1" # ... name it factor = 1
# `agg_MAP_GCFR_df` already exists too,
# and is identical in this case b) to what I made in a).
# Make the `_df` for `agg_rough_MAP_GCFR`:
agg_rough_MAP_GCFR_df <-
agg_rough_MAP_GCFR %>%
as_df_raster_vals_list(n = c(1, facts))
summary(agg_MAP_GCFR_df)
summary(agg_rough_MAP_GCFR_df)
names(agg_MAP_GCFR_df) <- c("MAP", "fact")
names(agg_rough_MAP_GCFR_df) <- c("agg_rough_MAP", "fact")
# Add `agg_rough`'s `agg_rough_MAP` column to my data.frame from a)
length(abs_vs_rough_MAP_GCFR_df[, 1]) # to check...
length(agg_rough_MAP_GCFR_df[, 1]) # ... same no. rows!!
abs_vs_rough_MAP_GCFR_df %<>%
cbind(agg_rough_MAP_GCFR_df$agg_rough_MAP)
names(abs_vs_rough_MAP_GCFR_df)[4] <- "agg_rough_MAP"
# `agg_rough_MAP` models ----------------------------------------------------------------
. <- abs_vs_rough_MAP_GCFR_df
m1b <- lm(agg_rough_MAP ~ fact, data = .)
m1b_quad <- lm(agg_rough_MAP ~ I(fact ^ 2), data = .)
m1b_log <- lm(agg_rough_MAP ~ log(fact), data = .)
m2b <- lm(agg_rough_MAP ~ MAP, data = .)
m3b <- lm(MAP ~ fact, data = .)
m4b <- lm(agg_rough_MAP ~ fact + MAP, data = .)
m4b_i <- lm(agg_rough_MAP ~ fact * MAP, data = .)
rm(.)
summary(m1b )
summary(m1b_quad)
summary(m1b_log )
summary(m2b )
summary(m3b )
summary(m4b )
summary(m4b_i )
. <- abs_vs_rough_MAP_GCFR_df
visreg(m1b )
visreg(m1b_quad)
visreg(m1b_log )
visreg(m2b )
visreg(m3b )
visreg(m4b, "fact", by = "MAP", breaks = 4, overlay = F)
visreg(m4b_i, "fact", by = "MAP", breaks = 2, overlay = F)
rm(.)
AIC(m1b, m1b_quad, m1b_log, m4b, m4b_i)
# 2017-05-09 11:29 TODO --- `plot()` model assumption validations!
# Comparing the 1st `aggregate()` method with the 2nd, ----------------------------------
# and the effect it has on the "{E -> h(E)} -- scale relationship"
# I.e. 1st method = Fig. 1a* = h(agg(...)) = `xx`
# 2nd method = Fig. 1b* = agg(h(...)) = `xx_2`
# * In my notes from my meeting with Tony (2017-05-08), which I had *after*
# I wrote this script
# cf_1_2 <- function(a, b) { ... } # The rest of this fn def is in `11_my_funs.R`
. <- abs_vs_rough_MAP_GCFR_df
cf_1_2(m1a, m1b)
cf_1_2(m1a_quad, m1b_quad)
cf_1_2(m1a_log, m1b_log)
cf_1_2(m2a, m2b) # not good models, per se, as I have multiplied the number of obs b.c.
# of the 20 factors in that data.frame
cf_1_2(m3a, m3b) # identical! as expected
#cf_1_2(m4a, m4b)
## can't see ints nicely... will do this model and `_i` by hand:
summary(m4a)
summary(m4b)
visreg(m4a, "fact", by = "MAP", breaks = 2, overlay = F)
visreg(m4b, "fact", by = "MAP", breaks = 2, overlay = F) # hectic!
summary(m4a_i)
summary(m4b_i)
visreg(m4a_i, "fact", by = "MAP", breaks = 2, overlay = F)
visreg(m4b_i, "fact", by = "MAP", breaks = 2, overlay = F) # hectic!
AIC(m1a, m1a_quad, m1a_log, m4a, m4a_i)
AIC(m1b, m1b_quad, m1b_log, m4b, m4b_i)
rm(.)
# Fiddling: agg_rough vs rough_agg ------------------------------------------------------
# now for factor 2:20 only. I.e. removes the 1:1 strip,
# as rough_agg(fact = 1) = agg_rough(fact = 1)
. <-
abs_vs_rough_MAP_GCFR_df %>%
filter(fact != 1)
m5 <- lm(rough_agg_MAP ~ agg_rough_MAP, data = .)
visreg(m5)
summary(m5)
rm(.)
# Junk ----------------------------------------------------------------------------------
par(mfrow = c(2, 3))
aggregated_MAP_GCFR %T>%
plot_raster_list() %T>%
plot_raster_vals_list() %T>%
as_df_raster_vals_list(n = facts)
par(op)
par(mfrow = c(2, 3))
aggregated_MAP_GCFR %>%
map(plot_abs_vs_rough)
par(op)
fact_df <- as_df_mini_abs_vs_rough(aggregated_MAP_GCFR, fact = facts)
summary(fact_df)
par(mfrow = c(1, 3))
plot(fact ~ ., data = fact_df) # Plotted x & y swapped, sorry. (but actually ok?)
par(op)
plot(p.value ~ estimate, data = fact_df)
ggplot(aes(x = fact, y = estimate), data = fact_df) +
geom_point(aes(size = p.value, col = adj.r.squared))
# </> -----------------------------------------------------------------------------------
|
ad75e3dce00c150d04b3c90feb9bf1efa65aea26
|
613071c35cf546977440326fac50843a8be32cff
|
/src/ui.R
|
4ed67591484badfaf868dc6003bbc6ca003c1293
|
[
"MIT"
] |
permissive
|
peter0083/crime_data_shiny_app
|
f4bc381a061b7b9d58377e9f30dc6fd68894fb47
|
87f9a1b2f2942e8491a573b5b22610d459dba030
|
refs/heads/master
| 2021-01-19T13:15:24.596203
| 2017-04-24T03:28:32
| 2017-04-24T03:28:32
| 88,078,089
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,798
|
r
|
ui.R
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(colourpicker)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel(
h1("Crime Data for the 10 Best Cities for Jobs (USA)")
),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
sliderInput(inputId = "year_range",
label = "Year Range:",
min = 1975,
max = 2015,
sep = "", # avoid having comma when displaying the year
value = c(1980,2005))
,
selectInput(inputId = "crime_type",
label = "Select Crime Type",
choices = list("Violent Crimes per 100k people" = "violent_per_100k",
"Homicides per 100k people"= "homs_per_100k",
"Rapes per 100k people"= "rape_per_100k",
"Robberies per 100k people" = "rob_per_100k",
"Aggravated Assault per 100k people" ="agg_ass_per_100k"),
selected = "violent_per_100k")
,
selectInput(inputId = "city1",
label = "Select City 1",
choices = list("1. San Jose"= "San Jose",
"2. San Francisco"= "San Francisco",
"3. Seattle" = "Seattle",
"4. Boston" = "Boston",
"5. Washington, D.C." = "Washington, D.C.",
"6. Austin, Texas" ="Austin, Texas",
"7. Salt Lake City"="Salt Lake City",
"8. Raleigh, N.C." = "Raleigh, N.C.",
"9. Minneapolis"= "Minneapolis",
"10. Oklahoma City" = "Oklahoma City"),
selected = "Seattle")
,
selectInput(inputId = "city2",
label = "Select City 2",
choices = list("1. San Jose"= "San Jose",
"2. San Francisco"= "San Francisco",
"3. Seattle" = "Seattle",
"4. Boston" = "Boston",
"5. Washington, D.C." = "Washington, D.C.",
"6. Austin, Texas" ="Austin, Texas",
"7. Salt Lake City"="Salt Lake City",
"8. Raleigh, N.C." = "Raleigh, N.C.",
"9. Minneapolis"= "Minneapolis",
"10. Oklahoma City" = "Oklahoma City"),
selected = "Boston")
,
numericInput(inputId = "alpha_input",
label = "Select Data point transparency \n (1 = least transparent; 0.1 = most transparent)",
value = 1,
min = 0.1,
max = 1,
step = 0.1,
width = NULL)
,
checkboxInput(inputId = "viridis",
label = "Colour adjustment for colour vision deficiency users",
value = FALSE)
,
radioButtons(inputId = "scatter_plot",
label = "Scatter plot or line chart?",
choices = c("Scatter Plot"= 1, "Line Chart"= 2),
selected = 2,
inline = FALSE,
width = NULL)
,
a(href="https://www.themarshallproject.org/#.xTfnU9sDb", h5("Crime Data Source: The US Marshall Project Dataset")),
a(href="https://www.glassdoor.com/List/Best-Cities-for-Jobs-LST_KQ0,20.htm", h5("City Ranking Source: Glassdoor.com 2016")
)
## <html>
## <body>
## <a href="https://www.glassdoor.com/List/Best-Cities-for-Jobs-LST_KQ0,20.htm"><h5>"City Ranking Source: Glassdoor.com 2016"</h5></a>
## </body>
## </html>
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("linePlot",
# add click, dbclick, hover and brush functions
click = "plot_click",
dblclick = dblclickOpts(id = "plot_dblclick")
),
fluidRow(
column(width = 6,
verbatimTextOutput("click_info")
),
column(width = 6,
verbatimTextOutput("dblclick_info")
),
fluidPage(
colourInput("col", "Select a font colour for graph title", "black"),
plotOutput("plot")
)
)
)
)))
|
b4ef92838ff1c17a3b42fdb8c3ba425179f4a4c2
|
378a17586220fc92add9c226045f2cf7de327955
|
/Rscripts/diffExpMds.R
|
b31e27a791609d08be18926f11b0bec32a0b3e5b
|
[] |
no_license
|
CrickWu/code
|
30535fbc14e473ba024c636ea1dec4d3b29aa855
|
9bc8d94aa9c891ae49782e8ab384ca3e89919d97
|
refs/heads/master
| 2020-04-06T04:32:43.518685
| 2013-11-13T20:49:51
| 2013-11-13T20:49:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,953
|
r
|
diffExpMds.R
|
## .-.-. .-.-. .-.-. .-.-. .-.-. .-.-. .-.-. .-.-.
## /|/ \|\ /|/ \|\ /|/ \|\ /|/ \|\ /|/ \|\ /|/ \|\ / / \ \ / / \ \
##`-' `-`-' `-`-' `-`-' `-`-' `-`-' `-`-' `-`-' ' '
## Aug 2011 nanomed project (Sonia Human Th17 CARs)
## Bonneau lab - "Aviv Madar" <am2654@nyu.edu>,
## NYU - Center for Genomics and Systems Biology
## .-.-. .-.-. .-.-. .-.-. .-.-. .-.-. .-.-. .-.-.
## /|/ \|\ /|/ \|\ /|/ \|\ /|/ \|\ /|/ \|\ /|/ \|\ / / \ \ / / \ \
##`-' `-`-' `-`-' `-`-' `-`-' `-`-' `-`-' `-`-' ' '
setwd("/Users/xichen/")
# source required functions
source("code/microarray_r_scripts/util.R")
library(samr)
# set paths
path.input <- "data/Microarray_1/output/"
path.output <- "data/Microarray_1/results/"
# set file names
file.data <- paste(sep="",path.input,"sonia_28_icos_bb_time_series_data_matrix_Dec_11_2012.RData")
file.tfs <- paste(sep="",path.input,"humanTFs.RData")
# load data (variable name is d)
load(file.data)
# load human tf names humanTFNames
load(file.tfs)
# get names of experiments we want to compare to ICOS 4hr to 0hr
icos.ctrl.ix <- grep("30h",colnames(d),value=T)## ICOS 0hr
bb.ctrl.ix <- grep("20h",colnames(d),value=T)## BBz 0hr
z28.ctrl.ix <- grep("10h",colnames(d),value=T)## 28z 0hr
#h24.expt.ix <- grep("324h",colnames(d),value=T)## ICOS 24hr
#d4.expt.ix <- grep("34days",colnames(d),value=T)## ICOS 4days
## cd4.expt.names <- paste(sep="","cd4_t",c(2,6,12,24,48),"hr")
## cd8.expt.names <- paste(sep="","cd8_t",c(2,6,12,24,48),"hr")
icos.expt.names <- paste(sep="","3",c("4h","8h","24h","4days"))
bb.expt.names <- paste(sep="","2",c("4h","8h","24h","4days"))
z28.expt.names <- paste(sep="","1",c("4h","8h","24h","4days"))
all.expt.names <- c(icos.expt.names[1],bb.expt.names[1],z28.expt.names[1])
# create a list to hold diff expression results (and fold change results) of each comparison
#res <- list()
#fc <- list()
## data matrix to store p-value, t-test score, fold-change of differential expression between CARs
w.p <- matrix(0,nr=dim(d)[1],nc=3)
rownames(w.p) <- rownames(d)
colnames(w.p) <- c(paste(sep="",icos.expt.names[1], "_vs_", bb.expt.names[1]),paste(sep="",bb.expt.names[1], "_vs_", z28.expt.names[1]),paste(sep="",icos.expt.names[1], "_vs_", z28.expt.names[1]))
w.t <- w.p
w.fc <- w.p
# create SAM data object (to compare icos expt_i to bbz control)
#for(i in 1:length(all.expt.names )){
#time.point <- strsplit(icos.expt.names[i],"3")[[1]][2]
#h0.expt.ix <- grep("30h",colnames(d),value=T)##ICOS 0hr
expt.ix <- grep(icos.expt.names[1],colnames(d),value=T)
ctrl.ix <- paste(sep="",c("A","B","C"),"24h")#(bb.expt.names[1],colnames(d),value=T)
d.sub <- d[,c(ctrl.ix,expt.ix)]
##two-class paired
data <- list(x=d.sub,y=c(-(1:length(ctrl.ix)),(1:length(expt.ix))),
geneid=as.character(1:dim(d)[1]),genenames=rownames(d), logged2=TRUE)
##two-class unpaired
#data <- list(x=d.sub,y=c(rep(1,length(icos.ctrl.ix)),rep(2,length(expt.ix))),
# geneid=as.character(1:dim(d)[1]),genenames=rownames(d), logged2=TRUE)
sam.mat <- run.sam(data)
w.p[rownames(sam.mat),paste(sep="",icos.expt.names[1], "_vs_", bb.expt.names[1])] <- 2*pt(-abs(sam.mat[,"t_test"]),df = 2)
w.t[rownames(sam.mat),paste(sep="",icos.expt.names[1], "_vs_", bb.expt.names[1])] <- sam.mat[,"t_test"]
w.fc[rownames(sam.mat),paste(sep="",icos.expt.names[1], "_vs_", bb.expt.names[1])] <- sam.mat[,"fold_change"]
#fc[[ time.point ]][rownames(sam.mat),paste(sep="", icos.expt.names[i], "_vs_ctrl")] <- sam.mat[,"fold_change"]
#}
# create SAM data object (to compare icos expt_i to z28 control)
expt.ix <- grep(icos.expt.names[1],colnames(d),value=T)
ctrl.ix <- grep(z28.expt.names[1],colnames(d),value=T)
d.sub <- d[,c(ctrl.ix,expt.ix)]
##two-class paired
data <- list(x=d.sub,y=c(-(1:length(ctrl.ix)),(1:length(expt.ix))),
geneid=as.character(1:dim(d)[1]),genenames=rownames(d), logged2=TRUE)
##two-class unpaired
#data <- list(x=d.sub,y=c(rep(1,length(icos.ctrl.ix)),rep(2,length(expt.ix))),
# geneid=as.character(1:dim(d)[1]),genenames=rownames(d), logged2=TRUE)
sam.mat <- run.sam(data)
w.p[rownames(sam.mat),paste(sep="",icos.expt.names[1], "_vs_", z28.expt.names[1])] <- 2*pt(-abs(sam.mat[,"t_test"]),df = 2)
w.t[rownames(sam.mat),paste(sep="",icos.expt.names[1], "_vs_", z28.expt.names[1])] <- sam.mat[,"t_test"]
w.fc[rownames(sam.mat),paste(sep="",icos.expt.names[1], "_vs_", z28.expt.names[1])] <- sam.mat[,"fold_change"]
# create SAM data object (to compare bb expt_i to z28 control)
expt.ix <- paste(sep="",c("A","B","C"),"24h")
ctrl.ix <- grep(z28.expt.names[1],colnames(d),value=T)
d.sub <- d[,c(ctrl.ix,expt.ix)]
##two-class paired
data <- list(x=d.sub,y=c(-(1:length(ctrl.ix)),(1:length(expt.ix))),
geneid=as.character(1:dim(d)[1]),genenames=rownames(d), logged2=TRUE)
##two-class unpaired
#data <- list(x=d.sub,y=c(rep(1,length(icos.ctrl.ix)),rep(2,length(expt.ix))),
# geneid=as.character(1:dim(d)[1]),genenames=rownames(d), logged2=TRUE)
sam.mat <- run.sam(data)
w.p[rownames(sam.mat),paste(sep="",bb.expt.names[1], "_vs_", z28.expt.names[1])] <- 2*pt(-abs(sam.mat[,"t_test"]),df = 2)
w.t[rownames(sam.mat),paste(sep="",bb.expt.names[1], "_vs_", z28.expt.names[1])] <- sam.mat[,"t_test"]
w.fc[rownames(sam.mat),paste(sep="",bb.expt.names[1], "_vs_", z28.expt.names[1])] <- sam.mat[,"fold_change"]
j <- 0
for(i in 1:length(rownames(w.p))) {
if(w.p[i,1] < 0.05 | w.p[i,2] < 0.05 | w.p[i,3] < 0.05){
j = j+1
}
}
n.icos.bbz <- 0
for(i in 1:length(rownames(w.p))) {
if(w.p[i,1] < 0.05 & abs(log2(w.fc[i,1])) > 1){
n.icos.bbz <- n.icos.bbz +1
}
}
n.bbz.z28 <- 0
for(i in 1:length(rownames(w.p))) {
if(w.p[i,2] < 0.05 & abs(log2(w.fc[i,2])) > 1){
n.bbz.z28 <- n.bbz.z28 +1
}
}
n.icos.z28 <- 0
for(i in 1:length(rownames(w.p))) {
if(w.p[i,3] < 0.05 & abs(log2(w.fc[i,3])) > 1){
n.icos.z28 <- n.icos.z28 +1
}
}
res <- matrix(0,nr=j,nc=3)
colnames(res) <- colnames(w.p)
k <- 1
for(i in 1:length(rownames(w.p))) {
if(w.p[i,1] < 0.05 | w.p[i,2] < 0.05 | w.p[i,3] < 0.05) {
res[k,] <- w.p[i,]
if (k == 1){
n <- rownames(w.p)[i]
}else{
n <- c(n,rownames(w.p)[i])
}
k = k+1
}
}
rownames(res) <- n
########################################################################################################################
# Classical MDS
# N rows (objects) x p columns (variables)
# each row identified by a unique row name
x <- read.delim("~/data/Microarray_1/output/sonia_28_icos_bb_time_series_data_matrix_Dec_11_2012.xls", header=T)
x <- as.matrix(x)
# grep for all cars all donors only 4hr
ix <- grep("^..4h",colnames(x),perl=T,value=T)
x.sub <- x[n,]
x.sub.sub <- x.sub[,ix]
x.sub.sub.t <- t(x.sub.sub)
d <- dist(x.sub.sub.t) # euclidean distances between the rows
fit <- cmdscale(d,eig=TRUE, k=2) # k is the number of dim
fit # view results
# plot solution
nms.vec <- gsub("4h","",rownames(fit$points))
nms.vec <- gsub("2","-BBz",nms.vec)
nms.vec <- gsub("1","-28z",nms.vec)
nms.vec <- gsub("3","-ICOSz",nms.vec)
par(mar=c(5.1, 5.1, 4.1, 4.1), xpd=TRUE,font=2,font.axis=2,font.lab=2)
library("vegan")
x <- fit$points[,1]
y <- fit$points[,2]
plot(fit$points,col=c("gray","black","red","gray","black","red","gray","black","red"),pch=c(0,15,19,0,15,19,0,15,19),xlab = "Dimension 1",ylab = "Dimension 2",xlim = c(-20,20),ylim = c(-20,20),lwd = 2,cex=2,cex.lab = 1.5,cex.axis=1.5,las=1)
legend("topright",pch=c(0,15,19),lwd=2,legend=c("28z","BBz","ICOSz"),col=c("gray","black","red"),bty = "n",cex=1.2)
text(x, y, labels = c("A","A","A","B","B","B","C","C","C"), cex=1.2,pos=3)
#axis(2,cex.axis=1.2)
#axis(1,cex.axis=1.2)
# ordiellipse(fit$points[c(3,6,9),],c(1,1,1),conf=0.9, kind = "sd",lwd=2, draw = "polygon", border = "red")
#ordiellipse(fit$points[c(2,5,8),],c(1,1,1),conf=0.9, kind = "sd",lwd=4, draw = "polygon", border = "black")
ordiellipse(fit$points[c(1,4,7),],c(1,1,1),conf=0.9, kind = "sd",lwd=3, draw = "polygon", border = "gray")
library(cluster)
xy.icos <- fit$points[c(3,6,9),]
#exy.icos <- ellipsoidhull(as.matrix(xy.icos))
#lines(predict(exy.icos),col = "red",lwd = 4)
exy.icos <- ellipsoidhull(as.matrix(xy.icos))
exy.icos$cov[1,1] <- exy.icos$cov[1,1]*1.5
exy.icos$cov[2,2] <- exy.icos$cov[2,2]*3
lines(predict(exy.icos),col = "red",lwd = 3)
#xy.z28 <- fit$points[c(1,4,7),]
#exy.z28 <- ellipsoidhull(as.matrix(xy.z28))
#lines(predict(exy.z28),col = "gray",lwd = 4)
xy.bbz <- fit$points[c(2,5,8),]
exy.bbz <- ellipsoidhull(as.matrix(xy.bbz))
exy.bbz$cov[1,1] <- exy.icos$cov[1,1]*1.002
exy.bbz$cov[2,2] <- exy.icos$cov[2,2]*1.002
lines(predict(exy.bbz),col = "black",lwd = 3)
d.icos.bbz <- sqrt((exy.icos$loc[1]-exy.bbz$loc[1])^2 + (exy.icos$loc[2]-exy.bbz$loc[2])^2)
d.icos.z28 <- sqrt((exy.icos$loc[1]-exy.z28$loc[1])^2 + (exy.icos$loc[2]-exy.z28$loc[2])^2)
d.bbz.z28 <- sqrt((exy.bbz$loc[1]-exy.z28$loc[1])^2 + (exy.bbz$loc[2]-exy.z28$loc[2])^2)
#######################################^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# calc specificity of diff exression between different CARs
for(i in 1:length(icos.expt.names )){
time.point <- strsplit(icos.expt.names[i],"3")[[1]][2]
## calc difference btwn icos and bb/z28 lists of differentially expressed genes (for 48hr vs. ctrl)
res[[ time.point ]][,"diff_icos_vs_bb"] <- res[[ time.point ]][,1]-res[[ time.point ]][,2]
fc[[ time.point ]][,"diff_icos_vs_bb"] <- fc[[ time.point ]][,1]-fc[[ time.point ]][,2]
res[[ time.point ]][,"diff_icos_vs_z28"] <- res[[ time.point ]][,1]-res[[ time.point ]][,3]
fc[[ time.point ]][,"diff_icos_vs_z28"] <- fc[[ time.point ]][,1]-fc[[ time.point ]][,3]
## for each tf put a 1 for non tf put a 0 in column is.tf
ix <- which(rownames(w) %in% humanTFNames)
res[[ time.point ]][ix,"is_tf"] <- 1
fc[[ time.point ]][ix,"is_tf"] <- 1
}
x <- res[["4days"]]
y <- fc[["4days"]]
write.table(x,sep="\t",,file=paste(sep="",path.input,"sam_diff_exp_4days_vs_ctrl_paired.xls"))
write.table(y,sep="\t",,file=paste(sep="",path.input,"fold_change_4days_vs_ctrl_paired.xls"))
# get a matrix with specificiy scores at each time point (positive means upregulated in cd4, negative means upregulated in cd8)
w <- matrix(0,nr=dim(res[[ 1 ]])[1],nc=length(res))
rownames(w) <- rownames(res[[ 1 ]])
colnames(w) <- names(res)
for(i in 1:length(res)){
time.point <- names(res)[i]
#w[,time.point] <- res[[time.point]][,"diff_icos_vs_bb"]
w[,time.point] <- res[[time.point]][,"diff_icos_vs_z28"]
}
w <- w[order(abs(w[,"4h"]),decreasing=T),]
w.fc <- matrix(0,nr=dim(res[[ 1 ]])[1],nc=length(res))
rownames(w.fc) <- rownames(res[[ 1 ]])
colnames(w.fc) <- names(res)
for(i in 1:length(fc)){
time.point <- names(res)[i]
#w.fc[,time.point] <- fc[[time.point]][,"diff_icos_vs_bb"]
w.fc[,time.point] <- fc[[time.point]][,"34h_vs_ctrl"]
#w.fc[,time.point] <- fc[[time.point]][,"24h_vs_ctrl"]
#w.fc[,time.point] <- fc[[time.point]][,"14h_vs_ctrl"]
#w.fc[,time.point] <- fc[[time.point]][,"diff_icos_vs_z28"]
}
w.fc <- w.fc[order(abs(w.fc[,"4h"]),decreasing=T),]
#write.table(w.fc,sep="\t",,file=paste(sep="",path.input,"fold_change_icos_vs_bb_unpaired.xls"))
write.table(w.fc,sep="\t",,file=paste(sep="",path.input,"fold_change_icos_4h_vs_ctrl_paired.xls"))
#write.table(w.fc,sep="\t",,file=paste(sep="",path.input,"fold_change_bb_4h_vs_ctrl_paired.xls"))
#write.table(w.fc,sep="\t",,file=paste(sep="",path.input,"fold_change_z28_4h_vs_ctrl_paired.xls"))
cut.sam <- 4
cut.fc <- 2
gns.sam <- rownames(w)[which(w[,"4h"]>cut.sam)]
gns.fc <- rownames(w)[which(w.fc[,"4h"]>cut.fc)]
gns.volcano <- intersect(gns.sam,gns.fc)
# take genes with big difference in expression at time 48hr
expt.ix.icos<- grep(icos.expt.names[1],colnames(d),value=T)
expt.ix.bb<- grep(paste(sep="","^.",bb.expt.names[i]),colnames(d),value=T)##grep(bb.expt.names[1],colnames(d),value=T)
x=d[,expt.ix.icos]-d[,expt.ix.bb]
x.mean=apply(x,1,mean)
x.sum=apply(x,1,sum)
ix=sort(x.mean,decreasing=T,index.return=T)$ix
gns.abs.diff.4h <- names(x.mean)[ix]
gns.intersting <- c("IL23R","IL21","PRG4","AIM2","IL1R1","FNBP1L","IL1A","IL17F","NEK3","ETV6","FRMD4B","CXCR5","NCKAP1","PPP4R4","SPATS2L",
"CD40LG","CD4","RTKN2","FAM40B","GPR87","HSD11B1","CTLA4","CD109","PTGR1",
"IL1R2","CPM","LY75","CCL20","DYNC2LI1","IL2","MYOF","DEPDC1","PLEKHH2","SNORA73A","CYFIP1")
## gns <- gns.abs.diff.48hr[1:100]
## gns <- c("ETV6","SKIL","ZNF670","EGR1","AHR","IL23R","IL21","CCR2","TSPYL2","CCR7")
gns <- gns.intersting
pdf(file=paste(sep="",path.output,"interesting_genes.pdf"))
for(i in 1:length(gns)){
plot.gn(d,w,gns[i],cex=1.5)
}
dev.off()
gns.intersting <- c("IL23R","IL21","PRG4","AIM2","IL1R1","FNBP1L","IL1A","IL17F","NEK3","ETV6","FRMD4B","CXCR5","NCKAP1","PPP4R4","SPATS2L",
"CD40LG","CD4","RTKN2","FAM40B","GPR87","HSD11B1","CTLA4","CD109","PTGR1",
"IL1R2","CPM","LY75","CCL20","DYNC2LI1","IL2","MYOF","DEPDC1","PLEKHH2","SNORA73A","CYFIP1")
|
388ca1a667c6ac15ffe0a74be30ec50764a58efd
|
7a8fc2ffd84f18804046abc9bc41439ad9857cdf
|
/ActigraphAuswertung/RScripts/Auswertung_Kreuzkorrelation.R
|
3413cc0b3ba98b59123d1477470b5503ad306fbd
|
[] |
no_license
|
JJasonWang40/motion-sensor-analysis-package
|
b769827d8254343f48268adc1c31ed5047c71018
|
d547316f1fac375bf03866ff89cd238ef4156ee4
|
refs/heads/master
| 2016-08-12T20:57:47.540913
| 2012-04-03T17:30:30
| 2012-04-03T17:30:30
| 46,698,895
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 603
|
r
|
Auswertung_Kreuzkorrelation.R
|
if (length(dev.list()))
{
dev.off()
}
print(paste("length(datensatz0Daten) = ", length(datensatz0Daten), " length(datensatz1Daten) = ", length(datensatz1Daten)))
jpeg(filename = paste(outputFolder, outputPrefix, datensatz0Name, "-", datensatz1Name, "_Kreuzkorrelation.jpg"), width = outputWidth, height = outputHeight, units = "px", pointsize = 12, quality = 75, bg = "white", restoreConsole = TRUE)
correlation <- ccf(datensatz0Daten, datensatz1Daten, type = "correlation", plot=TRUE, main=paste("Kreuzkorrelation zwischen \r\n", datensatz0Name, " und ", datensatz1Name ))
dev.off();
|
ba06f0bb0588d1851fa34144a33616e7d0019777
|
5a6509d299864f09c63e78a383e7b4077b766c53
|
/R/api_check_batch.R
|
decbe164c3096aec76da1ac5d81018eda341a2c0
|
[] |
no_license
|
UCSF-MSLAB/msbwaiter
|
4395cc69e32c52007c630618a0efba807c25c315
|
c7527927b1bee0b2ae82fe9733847ea9708be167
|
refs/heads/master
| 2021-01-10T15:21:26.088000
| 2016-05-13T18:07:57
| 2016-05-13T18:07:57
| 44,769,673
| 0
| 1
| null | 2016-05-12T13:47:27
| 2015-10-22T19:54:12
|
R
|
UTF-8
|
R
| false
| false
| 6,703
|
r
|
api_check_batch.R
|
#' Create or update MS Bioscreen data entries?
#'
#' \code{api_check_batch} fetches all the bioscreen data for the specified endpoint. \code{api_check_batch} then compares each entry
#' in the inputed sufl data set with the corresponding entry in the bioscreen data set. For each entry in the inputed sufl data set,
#' \code{api_check_batch} returns "create", "update", or "no action". When \code{change} is set to TRUE, \code{api_check_batch} will
#' proceed to actually create and update entries. \code{api_check_batch} is similar to \code{api_check} except that it can take a
#' sufl data set with multiple entries. When there is only one entry, use \code{api_check} for faster comparison. See ?\code{api_check}
#' for more details.
#'
#' @inheritParams api_do_action
#' @inheritParams api_check
#' @param sufl_batch a data frame with any number of rows and column names that follow the current SUFL specification (1.0). At minimum, sufl_batch
#' must contain the identifier columns "source_id" and "external_identifier". For attacks, treatments and visits data, sufl_batch
#' must also contain the identifier columns "patient_source_id" and "patient_external_identifier".
#' @param destructive if TRUE and change = TRUE, \code{api_check_batch} will delete any data in the bioscreen that is not found in \code{sufl_batch} but that
#' has a \code{source_id} that is found \code{sufl_batch}.
#' @param max_update the maximum number of entries that you expect will need to be updated. If there are more than max_update entries to update,
#' \code{api_check_batch} will not proceed to create/update/delete entries in the bioscreen even if \code{change} is equal to TRUE. Set to NA for no restriction
#' on max_update.
#' @param max_delete the maximum number of entries that you expect will need to be deleted from the bioscreen when destructive = TRUE. If there are more
#' than max_delete entries to delete, \code{api_check_batch} will not proceed to create/update/delete entries in the bioscreen even if \code{destructive} and \code{change}
#' are equal to TRUE. Set to NA for no restriction on max_delete.
#'
#' @return
#' \code{api_check_batch} returns a list of length 2. The first entry in the list is a vector the same length as the number
#' of rows in \code{sufl_batch} where each entry of the vector is either create', 'update', or 'no action'. See ?\code{api_check}
#' for more details. The second entry in the list is the number of entries that were found in the bioscreen but not in \code{sufl_batch}.
#'
#' @seealso \code{\link{api_check}}, \code{\link{api_get_batch}}, \code{\link{api_create}},
#' \code{\link{api_update}}, \code{\link{to_json_non_array}}
#' @export
api_check_batch = function(sufl_batch, endpoint,
ignore_colnames = c("first_name", "last_name"),
base_url = "https://msbioscreen-uat.herokuapp.com/api/v1",
token = get_token(), verbose_b = TRUE,
keep_na = FALSE, change = FALSE,
destructive = FALSE, max_update = 200, max_delete = 200){
# get batch of data from bioscreen...
data_from_app = api_get_batch(endpoint = endpoint, base_url = base_url, token = token, verbose_b = FALSE)
# compare sufl_batch with all bioscreen data to determine which entries need to be created/updated
action_list = list()
for(i in 1:nrow(sufl_batch)){
action_list[[i]] = compare_entries(sufl_data = sufl_batch[i, ], data_from_app = data_from_app, endpoint = endpoint,
ignore_colnames = ignore_colnames,
verbose_b = verbose_b, keep_na = keep_na)
}
# determine which entries are only found in the bioscreen and will need to be deleted if destructive = TRUE and change = TRUE
destroy_data = data_from_app[data_from_app$source_id %in% unique(sufl_batch$source_id), ]
unique_sufl_batch_id = paste(sufl_batch$source_id, sufl_batch$external_identifier, sep = "_")
unique_destroy_data_id = paste(destroy_data$source_id, destroy_data$external_identifier, sep = "_")
destroy_data = destroy_data[!unique_destroy_data_id %in% unique_sufl_batch_id, ]
destroy_data_n = nrow(destroy_data)
if(verbose_b){
cat(sprintf("There are %s entries that are found in the bioscreen (source_id = %s) that are not found in sufl_batch.\n",
destroy_data_n, paste(unique(sufl_batch$source_id), collapse = ",")))
}
# safeguard against too many entries in the bioscreen being updated
if(!is.na(max_update)){
if(sum(unlist(action_list) == "update") > max_update){
if(change){
warning(sprintf("%d entries to update is above the maximum set (%d).", sum(unlist(action_list) == "update"), max_update ))
stop("MAX_UPDATE error: too many entries to update.")
} else{
warning(sprintf("%d entries to update is above the maximum set (%d).", sum(unlist(action_list) == "update"), max_update ))
}
}
}
# safeguard against too many entries in the bioscreen being deleted
if(!is.na(max_delete) & destructive){
if(destroy_data_n > max_delete){
if(change){
warning(sprintf("%d entries to delete is above the maximum set (%d).", destroy_data_n, max_delete))
stop("MAX_DELETE error: too many entries to delete.")
} else{
warning(sprintf("%d entries to delete is above the maximum set (%d).", destroy_data_n, max_delete))
}
}
}
# if change = TRUE, proceed to create/update/delete
if(change){
for(i in 1:nrow(sufl_batch)){
action = action_list[[i]]
switch(action,
create = {
api_create(sufl_data = sufl_batch[i, ], endpoint = endpoint,
base_url = base_url, verbose_b = verbose_b)
},
update = {
api_update(sufl_data = sufl_batch[i, ], endpoint = endpoint,
ignore_colnames = ignore_colnames,
base_url = base_url, verbose_b = verbose_b,
keep_na = keep_na)
}
)
}
if(destructive){
if(verbose_b){
cat("Deleting", destroy_data_n, "entries from the bioscreen.\n")
}
if(destroy_data_n != 0){
for(i in 1:destroy_data_n){
api_delete(source_id = destroy_data$source_id[i],
external_identifier = destroy_data$external_identifier[i],
endpoint = endpoint, base_url = base_url,
token = token, verbose_b = verbose_b)
}
}
}
}
return(list(action_list = unlist(action_list), number_of_entries_only_in_bioscreen = destroy_data_n))
}
|
8730450aaa78195142ee0ea39fabe50ecc30253e
|
6cc8e2e57cc6f906bc64f8394c6683dc0d614918
|
/R/plotSlice.R
|
fe20970801dba9a2dc4cb623b5ce63444a1e1669
|
[] |
no_license
|
cran/mgcViz
|
cf3f445892ad2954e8ca06dec808cc551bd012d4
|
de825975b97a2f6a020b84f8f6d30b06bca65c60
|
refs/heads/master
| 2021-11-01T08:37:43.917586
| 2021-10-05T06:10:12
| 2021-10-05T06:10:12
| 145,894,342
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,593
|
r
|
plotSlice.R
|
#'
#' Plotting sequence of slices of 2D smooth effect
#'
#' @description This function allows to slice a multi-dimensional (D > 2) smooth effect,
#' and to plot the resulting sequence of 2D slices in an array of plots.
#'
#' @param x a smooth effect object, extracted using [mgcViz::sm].
#' @param fix a named list of vectors, where the i-th entry of each vector indicates the value we want to
#' use for the covariate for i-th slice. When plotting a smooth in (d+2) dimensions,
#' we need d vectors, because d variables must be fixed. All vectors must have either the same length
#' (the number of slices) or length 1. \code{fix} can contain at most 2 vectors, so if d>=5, we need to set
#' at least one covariate to a scalar.
#' @param a.facet arguments to be passed to [ggplot2::facet_wrap] or [ggplot2::facet_grid]. The former gets
#' called when \code{fix} contains one vector, the latter when \code{fix} contains two vectors.
#' @param ... further arguments to be passed to [plot.mgcv.smooth.MD].
#' @return An objects of class \code{plotSmooth}.
#' @name plotSlice
#' @examples
#' \dontrun{
#' ### Example 1: plotting slices of 3D smooth
#' # Simulate data and fit GAM
#' library(mgcViz)
#' n <- 1e3
#' x <- rnorm(n); y <- rnorm(n); z <- rnorm(n)
#' ob <- (x-z)^2 + (y-z)^2 + rnorm(n)
#' b <- gam(ob ~ s(x, y, z))
#' v <- getViz(b)
#'
#' # Get plot of slices and add layers
#' pl <- plotSlice(x = sm(v, 1),
#' fix = list("z" = seq(-2, 2, length.out = 9)))
#' pl + l_fitRaster() + l_fitContour() + l_points() + l_rug()
#'
#' # Over-ride default layout
#' pl <- plotSlice(x = sm(v, 1),
#' fix = list("z" = seq(-2, 2, length.out = 9)),
#' a.facet = list(nrow = 2))
#' pl + l_fitRaster() + l_fitContour() + theme(panel.spacing = unit(0.5, "lines"))
#'
#' ### Example 2: plotting slices of 4D smooth
#' # Simulate data and fit GAM
#' n <- 5e3
#' x <- rnorm(n); y <- rnorm(n); z <- rnorm(n); z2 <- rnorm(n)
#' ob <- (x-z)^2 + (y-z)^2 + z2^3 + rnorm(n)
#' b <- bam(ob ~ s(x, y, z, z2), discrete = TRUE)
#' v <- getViz(b)
#'
#' # Plot slices across "z" and "x"
#' pl <- plotSlice(x = sm(v, 1),
#' fix = list("z" = seq(-2, 2, length.out = 3), "x" = c(-1, 0, 1)))
#' pl + l_fitRaster() + l_fitContour() + l_points() + l_rug()
#'
#' # Plot slices across "x", keeping "z" fixed
#' pl <- plotSlice(x = sm(v, 1),
#' fix = list("z" = 0, "x" = seq(-3, 3, length.out = 9)))
#' pl + l_fitRaster() + l_fitContour() + l_points() + l_rug()
#' }
#'
#' @importFrom plyr alply
#' @importFrom stats as.formula
#' @rdname plotSlice
#' @export plotSlice
#'
plotSlice <- function(x, fix, a.facet = list(), ...){
if( !("mgcv.smooth.MD" %in% class(x)) ){
stop( "x must be of class \"mgcv.smooth.MD\"" )
}
len <- sapply(fix, length)
grD <- sum(len > 1)
if( grD > 2 ) {
stop("'fix' cannot contain more than 2 vectors. Some variables must be fixed to scalars.")
}
if( grD == 0){
stop("'fix' does not contain any vector: cannot construct a grid of plots.")
}
gridVar <- names(fix)[ len > 1 ]
nfx <- length(fix)
# Create grid with all combinations of fixed variables. We'll get a slice of each.
indx <- as.matrix( do.call("expand.grid", fix) )
nsl <- nrow(indx)
# Get data for each slice
plts <- alply(indx, 1,
function(.vr, ...){
.d <- plot(x, fix = .vr, ...)$data
.d$fit[paste0(".fx.", names(.vr))] <- drop(matrix(rep(.vr, each = nrow(.d$fit)),
nrow(.d$fit), nfx))
.d$res[paste0(".fx.", names(.vr))] <- drop(matrix(rep(.vr, each = nrow(.d$res)),
nrow(.d$res), nfx))
return(.d)
}, ...)
.dat <- list()
.dat$fit <- do.call("rbind", lapply(plts, function(.x) .x$fit))
.dat$res <- do.call("rbind", lapply(plts, function(.x) .x$res))
.dat$misc <- plts[[1]]$misc
# One extra plot just to get the labels
lbs <- plot(x, fix = indx[1, ])$ggObj$labels
.pl <- ggplot(data = .dat$fit, aes(x = x, y = y, z = tz)) +
labs(title = lbs$title, x = lbs$x, y = lbs$y) +
theme_bw() +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
if( is.null(a.facet$labeller) ){ a.facet$labeller <- function (labels, multi_line = TRUE, sep = "=") {
.labels <- label_both(labels, multi_line = TRUE, sep)
.labels <- lapply(.labels, function(x) substring(x ,5)) # Drop .fx. prefix from labels
return(.labels)
}
}
if( grD == 1 ){
if( is.null(a.facet$nrow) && is.null(a.facet$ncol) ){ a.facet$ncol <- floor(sqrt(nsl)) }
if( is.null(a.facet$facets) ){ a.facet$facets <- as.formula(paste0("~ .fx.", gridVar)) }
.pl <- .pl + do.call("facet_wrap", a.facet)
}
if( grD == 2 ){
if( is.null(a.facet$facets) ){ a.facet$facets <- paste0(".fx.", gridVar[1], " ~ .fx.", gridVar[2]) }
.pl <- .pl + do.call("facet_grid", a.facet)
}
.pl <- .pl + theme(panel.spacing = unit(0, "lines"))
out <- structure(list("ggObj" = .pl, "data" = .dat, "type" = c("MD", "slice")),
class = c("plotSmooth", "gg"))
return( out )
}
|
a7e20aa90cc40505389959b729fa562118a3d8ca
|
05696931b4dee4845baeaf2220f0c960069b3cfd
|
/tests/testthat.R
|
f25165a9fd3a741a69e5bc880e8a0b4da1adc256
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
kvantas/hyetor
|
a3c056e2f7f01c7273b567ff89fabd8c77edbe7f
|
4e24367dcd173300b7c0cef964ac7ea82e44fe4c
|
refs/heads/master
| 2020-03-27T04:42:32.757172
| 2019-08-31T20:27:59
| 2019-08-31T20:27:59
| 145,962,234
| 7
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 56
|
r
|
testthat.R
|
library(testthat)
library(hyetor)
test_check("hyetor")
|
553508eadbd73d2c803a4a5a3fc575276edda4a9
|
6d0fe73f4a177861980154fb5551303dc1a5b5aa
|
/R/convert.old.datasheets.R
|
e44244244ff15ce2ef5792f148735caa834c6e33
|
[] |
no_license
|
npetraco/dustproj
|
ce8d9937f358035867499fbac5939eb39cc8d951
|
18fd1d1d928b9ffd2780b2439892b399f79fd093
|
refs/heads/master
| 2022-07-09T00:43:08.260175
| 2022-06-23T12:31:38
| 2022-06-23T12:31:38
| 159,550,850
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,486
|
r
|
convert.old.datasheets.R
|
#' Convert a row of a read in (flattened) study to the reference class,subclass and attribute
#'
#' The function will XXXX
#'
#' @param XX The XX
#' @return The function will XX
#'
#'
#' @export
convert.study.row<-function(study.row, conversion.info){
#print(study.row)
study.class <- study.row[1]
study.subclass <- study.row[2]
study.attrib <- study.row[3]
# Some classes repeat in the studies because their names haven't changed yet (As of 6/1/19 it's just glass/mineralgrains).
# Because there can be more than 1 index returned, just pick the 1st since both class names should be the same
# If this does happen we'll sort out the actual reference class name to change to when we change the subclass name below.
# Deal with subclasses first
# Grab the whole subclass block. It contains the class names info as well. They are need to differentiate study classes
# that have the same name.
subclass.block.row.idxs <- which(conversion.info$subclass.conversions[,3] == study.class)
subclass.block <- conversion.info$subclass.conversions[subclass.block.row.idxs,]
#print(subclass.block)
subclass.row.idx <- which(subclass.block[,4] == study.subclass) # This may return more than two indices it the subclass is other()
# This shouldn't happen, but if the study.subclass is not found, through an error and manually check to see why.
if(length(subclass.row.idx) == 0) {
print(paste("Study class:", study.class))
print(paste("*Study subclass:", study.subclass))
stop("Above study.subclass is not found in the conversion table! Check the study datasheet and conversion table to see why. It probably needs to be added to the conversion table.")
}
if(length(subclass.row.idx)>1){ # See if subclass name gets repeated in the subclass block...
if(study.subclass == "other()") { # If other() is the repeating subclass, just indicate to just skip the row and return
ref.class <- "skip" # ****ADD MAKE A RECORD OF WHAT THE ROW WAS ??
ref.subclass <- "skip"
ref.attrib <- "skip"
ref.row <- matrix(c(ref.class, ref.subclass, ref.attrib), c(1,3))
colnames(ref.row) <- c("class","subclass","attribute")
return(ref.row)
} else { # If the repeat is not an other() throw an error and manually see what is going on in the study datasheet.
print("============ PROBLEM SUBCLASS BLOCK!!!!!!!!! =====================")
print(subclass.block)
print("vvvvvvvvvvvv PROBLEM SUBCLASS BLOCK!!!!!!!!! vvvvvvvvvvvvvvvvvvvvv")
print(subclass.block[subclass.row.idx,])
stop("Repeated subclass names found in this subclass block. Check study datasheet and see why!")
}
} else { # Final case. If here, no issue with the subclass at least. Below we check for issues with the attribute
ref.class <- subclass.block[subclass.row.idx, 1]
ref.subclass <- subclass.block[subclass.row.idx, 2]
ref.row <- matrix(c(ref.class, ref.subclass, NA), c(1,3))
colnames(ref.row) <- c("class","subclass","attribute")
}
# Get reference attribute name corresponding to study attribute name GIVEN the study class name
# Since we should by now know the reference class name corresponding to the study class name use ref.class
# to pick out the rows of the study attributes conversion table.
#
# NOTE: we use ref.class instead of study.class here because some of the study class names are the same (e.g. Glass/Mineral Grains).
# This is not true of the reference class names.
attrib.class.row.idxs <- which(conversion.info$attribute.conversions[,1] == ref.class)
# Use these indices to grab the block of class-attribute info
attribs.of.class <- conversion.info$attribute.conversions[attrib.class.row.idxs, ]
# Pluck out the row with the study attribute given the (reference) class
attrib.row.idx <- which(attribs.of.class[,4] == study.attrib)
# This shouldn't happen, but if the study.attribute is not found, throw an error and manually check to see why.
if(length(attrib.row.idx) == 0) {
print(paste("Study class:", study.class))
print(paste("Reference class:", ref.class))
print(paste("Study subclass:", study.subclass))
print(paste("Reference subclass:", ref.subclass))
print(paste("*Study attribute:", study.attrib))
stop("Above study.attrib is not found in the conversion table! Check the study datasheet and conversion table to see why. It probably needs to be added to the conversion table.")
}
# Found the attribute is we made it here. Put it in the new row vector containing the converted names:
ref.attrib <- attribs.of.class[attrib.row.idx, 2]
ref.row[3] <- ref.attrib
return(ref.row)
}
#' Convert study classes, subclasses and attributes to the reference classes, subclasses and attributes
#'
#' The function will XXXX
#'
#' @param XX The XX
#' @return The function will XX
#'
#'
#' @export
convert.study.datasheet<-function(study.datasheet.file.path, study.name, study2ref.conversion.info, print.lvl=0){
parsed.dsht.info <- parse.study.datasheet(study.datasheet.file.path, study.name = study.name)
flat.dsht <- parsed.dsht.info$study.flattened.datasheet
flat.dsht.new <- array("", dim(flat.dsht))
skip.idxs <- NULL
for(i in 1:nrow(flat.dsht)) {
if(print.lvl>0){
print(paste("Row:",i, "Study class:", flat.dsht[i,1], "Study subclass:", flat.dsht[i,2], "Study attrib:", flat.dsht[i,3] ))
}
row.new <- convert.study.row(flat.dsht[i, ], study2ref.conversion.info)
# Use these to drop the skips later:
if("skip" %in% row.new){
skip.idxs <- c(skip.idxs, i)
}
flat.dsht.new[i,] <- row.new
if(print.lvl>1){
compre <- cbind(
as.matrix(flat.dsht[i, ]),
t(as.matrix(row.new))
)
print(compre)
print("++++++++++++++++++++++++++++++++++++++++++++++")
}
}
flat.dsht.new.full <- data.frame(flat.dsht.new, parsed.dsht.info$study.flattened.data)
colnames(flat.dsht.new.full) <- c("class", "subclass", "attribute", "response")
flat.dsht.new.red <- flat.dsht.new.full[-skip.idxs,]
row.names(flat.dsht.new.full) <- NULL
row.names(flat.dsht.new.red) <- NULL
new.dsht.info <- list(
skip.idxs,
flat.dsht.new.full,
flat.dsht.new.red
)
names(new.dsht.info) <- c(
"skip.idxs",
"full.converted.df",
"reduced.converted.df"
)
return(new.dsht.info)
}
|
71df5d91885e21db958bd008cb904c7d564c5057
|
cc63b7d10e50f7a96b28c77f354270f848cf6546
|
/man/download_BRENDA_regulators.Rd
|
2e02c4f4f701b450dcbc55f6f5cc92ea6666a15d
|
[
"MIT"
] |
permissive
|
shackett/fluxr
|
83e2ff3916b97a96f8814833d1a5c2459553bde0
|
e37133060c822312a9ec056a70a566736c4d8931
|
refs/heads/master
| 2020-12-26T03:12:01.457667
| 2016-09-20T21:29:20
| 2016-09-20T21:29:20
| 68,309,569
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 324
|
rd
|
download_BRENDA_regulators.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/access_databases.R
\name{download_BRENDA_regulators}
\alias{download_BRENDA_regulators}
\title{Download BRENDA Regulators}
\usage{
download_BRENDA_regulators()
}
\description{
Calls python to summarize all regulators in BRENDA by E.C. number.
}
|
b4092bfe6c89ae9d3aa0a958071902cb8a866b5f
|
14afb56ffe97046a3c596ab8cd35f57b9a257edc
|
/run_analysis.R
|
88e4ecef82f6a70bad8f563ddd1e31337bc6412a
|
[] |
no_license
|
craquiest/GetCleanData
|
54f92ace9f307f73931ee77c830851d9ca82e028
|
74b5fca08daa0bcb98c27456cb0739f3163e3468
|
refs/heads/master
| 2020-06-04T22:06:08.751544
| 2019-06-17T00:38:54
| 2019-06-17T00:38:54
| 192,209,056
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,488
|
r
|
run_analysis.R
|
library(tidyverse) # load packages for dplyr, readr, tidyr and others
library(stringr) # load for regular expressions
# Set a veariable for the name of "root" folder
# that is the result of file unzip
data_folder <- "UCI HAR Dataset"
# For each piece of raw data:
# 1. set the file path in platform independent way
# 2. read in file with read.table
# 3. convert into tibble (tidyverse package), and store as variable
# Information about how recorded activities are coded is
# present in activity_labels.txt in the root folder
path <- file.path(".",data_folder,"activity_labels.txt")
activities <- as_tibble(read.table(path))
names(activities) <- c("code","label")
# The name of all calculated features in the main data is
# present in the root folder
path <- file.path(".",data_folder,"features.txt")
features <- as_tibble(read.table(path,stringsAsFactors = FALSE))
names(features) <- c("code","label")
# Test dataset
path <- file.path(".", data_folder, "test", "subject_test.txt")
test_subject <- as_tibble(read.table(path,stringsAsFactors = FALSE))
names(test_subject) <- "subject"
path <- file.path(".", data_folder, "test", "y_test.txt")
test_activity <- as_tibble(read.table(path,stringsAsFactors = FALSE))
names(test_activity) <- "activity"
path <- file.path(".", data_folder, "test", "X_test.txt")
test_dataset <- as_tibble(read.table(path,stringsAsFactors = FALSE))
names(test_dataset) <- features$label
# Attach subject column and activity column to the left
test_dataset <- bind_cols(test_subject,test_activity,test_dataset)
# Training dataset
path <- file.path(".", data_folder, "train", "subject_train.txt")
train_subject <- as_tibble(read.table(path,stringsAsFactors = FALSE))
names(train_subject) <- "subject"
path <- file.path(".", data_folder, "train", "y_train.txt")
train_activity <- as_tibble(read.table(path,stringsAsFactors = FALSE))
names(train_activity) <- "activity"
# We take a little more care reading training dataset as it is bigger
# we load a sample to guess column classes, before reading
# the whole file using those guesses
path <- file.path(".", data_folder, "train", "X_train.txt")
train_dataset <- read.table(path, stringsAsFactors = FALSE,nrows = 100)
colClasses <- sapply(train_dataset, class)
train_dataset <- as_tibble(
read.table(path,stringsAsFactors = FALSE,colClasses = colClasses))
names(train_dataset) <- features$label
# Attach subject field and activity fieald
train_dataset <- bind_cols(train_subject,train_activity,train_dataset)
# Now merge both datasets
merged_dataset <- bind_rows(train_dataset, test_dataset)
# Free memory by removing initial datasets
rm(train_dataset, train_activity,train_subject, colClasses)
rm(test_dataset, test_activity,test_subject)
# we are keeping columns "subject", "activity"
# and columns whose names have "mean()" or "std()" in them
# We find them using regular expressions
# We use the escape \ to make sure to catch ()
nms <- names(merged_dataset)
cols_to_keep <- grepl("mean\\(\\)",nms) | grepl("std\\(\\)",nms)
cols_to_keep <- cols_to_keep | grepl("subject",nms)
cols_to_keep <- cols_to_keep | grepl("activity",nms)
merged_dataset <- merged_dataset[,cols_to_keep]
# Join the activity labels by matching "code" in activities table with
# "activity" column of merged dataset
# as a result, dataset will have extra column "label"
# containing activity label
merged_dataset <- left_join(merged_dataset,activities,by=c("activity"="code"))
# Replace activity code with activity label and discard extra column
merged_dataset <-
merged_dataset %>%
mutate(activity=label)%>%
select(-label)
# Make variable names more descriptive
# First get names of all kept variables in merged dataset
# Names will be changed but order will be preserved
nms <- names(merged_dataset)
# Make more descriptive names by changing to plain english
# Expand abbreviations, add spaces, and eliminate symbols
nms <- str_replace_all(nms, c("tB" = "Time B", "tG" = "Time G"))
nms <- str_replace_all(nms, c("fB" = "Frequency B", "fG" = "Frequency G"))
nms <- str_replace_all(nms, c("-X" = " X axis","-Y" = " Y axis","-Z" = " Z axis"))
nms <- str_replace_all(nms, c("-mean\\(\\)" = " Mean"))
nms <- str_replace_all(nms, c("-std\\(\\)" = " Standard Deviation"))
nms <- str_replace_all(nms, c("BodyBody" = "Body"))
nms <- str_replace_all(nms, c("Mag" = " Magnitude","Jerk" = " Jerk"))
nms <- str_replace_all(nms, c("BodyAcc" = "Body Acceleration"))
nms <- str_replace_all(nms, c("GravityAcc" = "Gravity Acceleration"))
nms <- str_replace_all(nms, c("BodyGyro" = "Body Angular Velocity"))
# Rename our dataset's column names using resulting vector of names
names(merged_dataset) <- nms
# To obtain 2nd independent tidy data set
# we group by subject and by activity, and we take the average
# for all other variables
summarized_dataset <-
merged_dataset %>% group_by(subject, activity) %>%
summarize_all(mean)
# Remove all variables from RAM, except for 2 tidy datasets
rm(features, nms, cols_to_keep, activities, path, data_folder)
# Create the output
# we write the summarized dtaset to a text file in working directory
write.table(summarized_dataset, file = "SummaryData_Samsung.txt", row.names = FALSE)
# to read in this output file in R you will need to run:
# read.table("SummaryData_Samsung.txt", header = TRUE)
# We leave both merged_dataset and summarized_dataset in memory
# for the user to work with if they opt to.
|
76cdc5973025135d6a61733163c759943ee1739e
|
415302fcd498e1aaf4f35d84d3f13fa59577c264
|
/helper.R
|
68bb43ec5d95b3671b0202228a91ddd0e80066cf
|
[
"MIT"
] |
permissive
|
pseegaha/Shiny-SoSV
|
926536c02a530f69e8d7c920bbccd849eaa0514e
|
76305d0354e3480736e5e8f24c638904998269bb
|
refs/heads/master
| 2020-07-17T02:03:02.078922
| 2019-06-27T02:23:07
| 2019-06-27T02:23:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,973
|
r
|
helper.R
|
library(ggplot2)
library(ggsci)
load("./data/gam.RData")
#color_SVCaller <- c(pal_npg("nrc")(10)[1],pal_npg("nrc")(10)[3:10],pal_jco()(6))
color_SVCaller <- c(pal_npg("nrc")(10)[1],pal_npg("nrc")(10)[3:4],
pal_jco()(6)[1],pal_npg("nrc")(10)[6:7], pal_jco()(6)[2],
pal_npg("nrc")(10)[9:10],pal_npg("nrc")(10)[5],pal_npg("nrc")(10)[8],
pal_jco()(6)[c(3,6,4,5)])
SV_caller <- c("Manta", "Lumpy", "GRIDSS",
"MantaLumpyUnion", "MantaLumpyIntersect",
"MantaGRIDSSUnion", "MantaGRIDSSIntersect",
"LumpyMantaUnion","LumpyMantaIntersect",
"LumpyGRIDSSUnion","LumpyGRIDSSIntersect",
"GRIDSSMantaUnion", "GRIDSSMantaIntersect",
"GRIDSSLumpyUnion", "GRIDSSLumpyIntersect")
names(color_SVCaller) <- SV_caller
SV_caller_label <- c("Manta", "Lumpy", "GRIDSS",
expression(Manta*union(Lumpy)), expression(Manta*intersect(Lumpy)),
expression(Manta*union(GRIDSS)), expression(Manta*intersect(GRIDSS)),
expression(Lumpy*union(Manta)), expression(Lumpy*intersect(Manta)),
expression(Lumpy*union(GRIDSS)), expression(Lumpy*intersect(GRIDSS)),
expression(GRIDSS*union(Manta)), expression(GRIDSS*intersect(Manta)),
expression(GRIDSS*union(Lumpy)), expression(GRIDSS*intersect(Lumpy)))
tweaks <-
tags$head(tags$style(HTML("
.multicol {
height: auto;
-webkit-column-count: 3; /* Chrome, Safari, Opera */
-moz-column-count: 3; /* Firefox */
column-count: 3;
#-moz-column-fill: balanced;
#-column-fill: balanced;
}
div.checkbox{
margin-top: 0px;
margin-bottom: 10px;
-webkit-margin-after: 0px;
}
"))
)
controls <-
#list(h3("Multicolumn checkboxGroupInput"),
tags$div(align = 'left',
class = 'multicol', checkboxGroupInput(inputId = "SVCaller3.1",
label = (" "),
# choiceNames = list(HTML("Manta","∪","Lumpy"), HTML("Manta","∩","Lumpy"),
# HTML("Manta","∪","GRIDSS"),
# #"Manta∪GRIDSS",
# HTML("Manta","∩","GRIDSS"),
# #"Manta∩GRIDSS",
# HTML("Lumpy","∪","Manta"), HTML("Lumpy","∩","Manta"),
# HTML("Lumpy","∪","GRIDSS"), HTML("Lumpy","∩","GRIDSS"),
# HTML("GRIDSS","∪","Manta"), HTML("GRIDSS","∩","Manta"),
# HTML("GRIDSS","∪","Lumpy"), HTML("GRIDSS","∩","Lumpy")),
choiceNames = list(HTML("∪","Lumpy"), HTML("∩","Lumpy"),
HTML("∪","GRIDSS"),
#"Manta∪GRIDSS",
HTML("∩","GRIDSS"),
#"Manta∩GRIDSS",
HTML("∪","Manta"), HTML("∩","Manta"),
HTML("∪","GRIDSS"), HTML("∩","GRIDSS"),
HTML("∪","Manta"), HTML("∩","Manta"),
HTML("∪","Lumpy"), HTML("∩","Lumpy")),
choiceValues = c("MantaLumpyUnion", "MantaLumpyIntersect",
"MantaGRIDSSUnion", "MantaGRIDSSIntersect",
"LumpyMantaUnion","LumpyMantaIntersect",
"LumpyGRIDSSUnion","LumpyGRIDSSIntersect",
"GRIDSSMantaUnion", "GRIDSSMantaIntersect",
"GRIDSSLumpyUnion", "GRIDSSLumpyIntersect"),
selected = NULL))
controls <- checkboxGroupInput(inputId = "SVCaller3",
label = ("SV Caller(s)"),
choiceNames = list("Manta ", "Lumpy ", "GRIDSS "
# HTML("Manta","∪","Lumpy"), HTML("Manta","∩","Lumpy"),
# HTML("Manta","∪","GRIDSS"), HTML("Manta","∩","GRIDSS"),
# HTML("Lumpy","∪","Manta"), HTML("Lumpy","∩","Manta"),
# HTML("Lumpy","∪","GRIDSS"), HTML("Lumpy","∩","GRIDSS"),
# HTML("GRIDSS","∪","Manta"), HTML("GRIDSS","∩","Manta"),
# HTML("GRIDSS","∪","Lumpy"), HTML("GRIDSS","∩","Lumpy")
),
choiceValues = c("Manta", "Lumpy", "GRIDSS"
# "MantaLumpyUnion", "MantaLumpyIntersect",
# "MantaGRIDSSUnion", "MantaGRIDSSIntersect",
# "LumpyMantaUnion","LumpyMantaIntersect",
# "LumpyGRIDSSUnion","LumpyGRIDSSIntersect",
# "GRIDSSMantaUnion", "GRIDSSMantaIntersect",
# "GRIDSSLumpyUnion", "GRIDSSLumpyIntersect"
),
selected = "Manta",inline=TRUE)
|
c92c649b3d52f4c98ce8e2de8edc21366264880a
|
b643cde1b3abb2913390d05e18213215a8e1618d
|
/R/nSphereVolume.R
|
8b0ca336f309fde08248ee460f98107ccbee738a
|
[] |
no_license
|
cran/mvst
|
c8385afe55775c59f0264a11c0d868640446706e
|
c0f1828ed74ed5bb0dfc8a2d73141bdcaf3e8fbd
|
refs/heads/master
| 2021-01-17T20:55:36.515387
| 2018-07-24T13:30:03
| 2018-07-24T13:30:03
| 63,795,821
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 219
|
r
|
nSphereVolume.R
|
nSphereVolume = function(n, r=1, LOG=TRUE){
# Function that returns the volume of a n-sphere with radius r
logV = (n/2) * log(pi) + n * log(r) - lgamma(n/2+1)
if(LOG == TRUE) f = logV else f = exp(logV)
return(f)
}
|
86f360c334494dfa996d965a82f513ad9cf986fc
|
7554373d5d5cee9adb7d7074e6479359c5ce5c45
|
/man/getParams.Rd
|
987161f1ccc1b37e81b3d86719f62d974fa6e257
|
[] |
no_license
|
philipmgoddard/nnePtR
|
ad6d1ed0e44cad1dcbdc2ad2be0ee0e4e50f9c73
|
29d5ff1b0c5eb3c90d3d8d75a3bb571a0bf4e359
|
refs/heads/master
| 2021-01-10T05:35:56.775078
| 2016-02-17T13:18:20
| 2016-02-17T13:18:20
| 51,033,274
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 515
|
rd
|
getParams.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/accessors.R
\docType{methods}
\name{getParams}
\alias{getParams}
\alias{getParams,nnePtR-method}
\title{Set the generic for accessor (getter) for fitted coefficients}
\usage{
getParams(object)
\S4method{getParams}{nnePtR}(object)
}
\arguments{
\item{object}{object of class nnePtR}
}
\description{
Set the generic for accessor (getter) for fitted coefficients
}
\section{Methods (by class)}{
\itemize{
\item \code{nnePtR}:
}}
|
686e59d79af644d4bd464a0aa89e1fdbeb4bb435
|
e26420970229a0c55ec092168797ed6e42f5708f
|
/R/data.R
|
55abd04117a0799908deb63a30ee5dfc02a81503
|
[
"MIT"
] |
permissive
|
kcha/psiplot
|
cd26b3d39301d20d82e453e4882b60a214afb600
|
7c840f69f7b321b8690e9f3f03191a0cd699a371
|
refs/heads/master
| 2022-06-03T02:54:17.403767
| 2022-03-18T15:30:05
| 2022-03-18T15:30:05
| 27,505,866
| 2
| 1
|
MIT
| 2022-03-18T15:30:06
| 2014-12-03T20:20:07
|
R
|
UTF-8
|
R
| false
| false
| 1,076
|
r
|
data.R
|
#' Sample events with PSI data
#'
#' Contains sample PSI and quality score data as produced by \code{vast-tools
#' combine}.
#'
#' @docType data
#' @name psi
#' @usage psi
#' @format A 5 x 22 data frame
#' @keywords datasets
NULL
#' Sample psiplot configuration settings for datasets \code{psi} and \code{crpkm}
#'
#' Example of how a psiplot configuration file should be formatted. This can be
#' passed to \code{\link{plot_event}}, \code{\link{plot_multievent}} or
#' \code{\link{plot_expr}} using the \code{config} argument.
#'
#' @docType data
#' @name config
#' @usage config
#' @format A 8 x 5 data frame
#' @keywords datasets
NULL
#' Sample genes with cRPKM data
#'
#' Contains sample cRPKM data as produced by \code{vast-tools combine}.
#'
#' @docType data
#' @name crpkm
#' @usage crpkm
#' @format A 6 x 10 data frame
#' @keywords datasets
NULL
#' Sample genes with cRPKM and counts data
#'
#' Contains sample cRPKM data and raw read counts
#'
#' @docType data
#' @name crpkm_counts
#' @usage crpkm_counts
#' @format A 6 x 18 data frame
#' @keywords datasets
NULL
|
bd545815750a924e01d973435fb24d86466b66ca
|
a9282dbec9a9698b88230a089bbaa458933a6945
|
/qtl/go_homo_readcross.R
|
2f6d387920bf110603825eaee3772a7fcbd1c8e6
|
[] |
no_license
|
dvalenzano/R-sessions
|
23cdd993be2ac741770bbc4aa35db0bbba484fb5
|
af9d6d2b98afe53cdab7d0d87598eae240af940f
|
refs/heads/master
| 2021-07-04T22:02:50.900279
| 2021-05-15T09:10:31
| 2021-05-15T09:10:31
| 24,762,137
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 137
|
r
|
go_homo_readcross.R
|
library(qtl)
Go_homo<-read.cross("csv", "~/", "Go_homoF2_converted.csv", genotypes=c("1", "2", "3"))
save(Go_homo, file="Go_homo.Rdata")
|
af3570dab6186ab6e1e31a9eb431136afd92ca7a
|
a9b45ca4a280b106d73e168c126599cc809fd9ff
|
/man/example.Rd
|
18a3a3839887601e976d40b5fa32daba5e6a34f6
|
[] |
no_license
|
cran/coefficientalpha
|
e8fd566f85f06226298a21c9504cbb99d005e527
|
0341d1c7121d0aacbb0e951374c38010d72d02c7
|
refs/heads/master
| 2023-08-31T14:45:12.497515
| 2023-08-27T06:50:07
| 2023-08-27T07:30:54
| 17,695,163
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 153
|
rd
|
example.Rd
|
\name{example}
\alias{example}
\docType{data}
\title{An example data set}
\usage{
data(example)
}
\description{
An example data set with 10 variables.
}
|
7532d697dbc898be33606bd83028ad4e79379e32
|
a7c789203a78f7c060ea27a56dfbd9b978e6b556
|
/man/npn_get_data_by_year.Rd
|
90cf772bb48948fa7e6df02ec9439ca329a02d51
|
[
"MIT"
] |
permissive
|
usa-npn/rnpn
|
ab7f7a3e9f7e6e24b61f46c207c6de58161b6a0d
|
cedaf06fd1194911d0452584c22d44b38f7f245e
|
refs/heads/master
| 2023-08-18T10:36:43.221260
| 2023-08-07T21:06:45
| 2023-08-07T21:06:45
| 2,175,238
| 5
| 2
|
NOASSERTION
| 2023-06-30T15:59:30
| 2011-08-08T18:57:41
|
R
|
UTF-8
|
R
| false
| true
| 1,251
|
rd
|
npn_get_data_by_year.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/npn_data_download.R
\name{npn_get_data_by_year}
\alias{npn_get_data_by_year}
\title{Get Data By Year}
\usage{
npn_get_data_by_year(
endpoint,
query,
years,
download_path = NULL,
six_leaf_layer = FALSE,
six_bloom_layer = FALSE,
agdd_layer = NULL,
six_sub_model = NULL,
additional_layers = NULL
)
}
\arguments{
\item{endpoint}{String, the endpoint to query.}
\item{query}{Base query string to use. This includes all the user selected parameters but doesn't include start/end date which will be automatically generated and
added.}
\item{years}{List of strings; the years for which to retrieve data. There will be one request to the service for each year}
\item{download_path}{String, optional file path to the file for which to output the results.}
}
\value{
Data table - a data table combining each requests results from the service.
}
\description{
Utility function to chain multiple requests to npn_get_data for requests where data should only be retrieved on an annual basis, or otherwise automatically be
delineated in some way. Results in a data table that's a combined set of the results from each request to the data service.
}
\keyword{internal}
|
97f08bd4ec20861c5524156a0dbb47543f27bba4
|
1459da78bda762c3cb31e8cdebf900f41be7d694
|
/Analysis/Summary_Error/extremes_predictionIntervals.R
|
eb75a07869d205d9b3e2b889210e580668932162
|
[] |
no_license
|
benee55/SharmaEtAl22
|
27147df2b639a8e43340d34a93452b957e4acfa6
|
43eca0ae1afa1eb2a6b5d5640aafe989a79aa14b
|
refs/heads/main
| 2023-04-14T13:49:20.965673
| 2022-11-30T06:40:22
| 2022-11-30T06:40:22
| 572,384,157
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,259
|
r
|
extremes_predictionIntervals.R
|
rm(list=ls())
setwd("~/Dropbox/FamosHydroModel/")
load("Official_Fast/input/fullObservations.RData") # Load Full observation
load("manuscript/revisionCode/resultsStremflow_calibration_full.RData")
load("manuscript/revisionCode/calibrationParameters.RData")
obsErrVar<-mean(famosParMat[,1])
obsErr<-rnorm(n=nrow(famosParMat), mean=0, sd=sqrt(obsErrVar))
dateVect<-dateVect[obsInd]
famosOutput_pred<-famosOutput[,obsInd]+matrix(rnorm(n=length(famosOutput[,obsInd]), mean=0, sd=sqrt(obsErrVar)),
nrow=nrow(famosOutput[,obsInd]), ncol=ncol(famosOutput[,obsInd]))
precalibrationOutput_Window_pred<-precalibrationOutput_Window[,obsInd]+matrix(rnorm(n=length(precalibrationOutput_Window[,obsInd]), mean=0, sd=sqrt(obsErrVar)),
nrow=nrow(precalibrationOutput_Window[,obsInd]), ncol=ncol(precalibrationOutput_Window[,obsInd]))
handTuneOutput<-handTuneOutput[obsInd]
## Using Ming-Hui Chen's paper in Journal of Computational and Graphical Stats.
hpd <- function(samp,p=0.05){
## to find an approximate (1-p)*100% HPD interval from a
## given posterior sample vector samp
r <- length(samp)
samp <- sort(samp)
rang <- matrix(0,nrow=trunc(p*r),ncol=3)
dimnames(rang) <- list(NULL,c("low","high","range"))
for (i in 1:trunc(p*r)) {
rang[i,1] <- samp[i]
rang[i,2] <- samp[i+(1-p)*r]
rang[i,3] <- rang[i,2]-rang[i,1]
}
hpd <- rang[order(rang[,3])[1],1:2]
return(hpd)
}
par(mfrow=c(5,5), mar=c(4,4,2,1))
plot.new()
legend("center", legend=c("95% Prediction \n Interval","Observation"),
lty=c(2,1), col=c("black","blue"), cex=1, bty="n", y.intersp = 0.2)
for(k in 1:21){
plot(density(famosOutput_pred[,k]), col="black", main=dateVect[k],
xlab = "Streamflow")
abline(v=subsetFinalObs[k], col="blue",lwd=2)
# abline(v=handTuneOutput[k], col="red",lwd=2, lty=1)
abline(v=hpd(famosOutput_pred[,k]), col="black",lwd=2, lty=2)
# lines(density(precalibrationOutput_Window_pred[,k]), col="red")
}
save(dateVect , obsErrVar , obsErr ,
subsetFinalObs , famosOutput_pred ,famosOutput ,
precalibrationOutput_Window,precalibrationOutput_Window_pred,
handTuneOutput,
file="manuscript/revisionCode/predictionInterval_calibration.RData")
rm(list=ls())
setwd("~/Dropbox/FamosHydroModel/")
load("Official_Fast/input/fullObservations.RData") # Load Full observation
load("manuscript/revisionCode/resultsStremflow_validation_full.RData")
# Format Date
dateVect<-paste(sprintf("%04d",as.numeric(obs[,1])),
sprintf("%02d",as.numeric(obs[,2])),
sprintf("%02d",as.numeric(obs[,3])),sep="-")
dateVect<-as.Date(dateVect, format = "%Y-%m-%d")
validationDate<-dateVect[validationInd] # Extreme Dates
dateVect<-dateVect[which(dateVect=="2009-01-01"):which(dateVect=="2011-10-01")]
# observation Index
keepValidation<-which(dateVect%in%validationDate)
dateVect<-dateVect[keepValidation]
load("manuscript/revisionCode/calibrationParameters.RData")
obsErrVar<-mean(famosParMat[,1])
famosOutput_pred<-famosOutput[,keepValidation]+matrix(rnorm(n=length(famosOutput[,keepValidation]), mean=0, sd=sqrt(obsErrVar)),
nrow=nrow(famosOutput[,keepValidation]), ncol=ncol(famosOutput[,keepValidation]))
precalibrationOutput_Window_pred<-precalibrationOutput_Window[,keepValidation]+matrix(rnorm(n=length(precalibrationOutput_Window[,keepValidation]), mean=0, sd=sqrt(obsErrVar)),
nrow=nrow(precalibrationOutput_Window[,keepValidation]), ncol=ncol(precalibrationOutput_Window[,keepValidation]))
## Using Ming-Hui Chen's paper in Journal of Computational and Graphical Stats.
hpd <- function(samp,p=0.05){
## to find an approximate (1-p)*100% HPD interval from a
## given posterior sample vector samp
r <- length(samp)
samp <- sort(samp)
rang <- matrix(0,nrow=trunc(p*r),ncol=3)
dimnames(rang) <- list(NULL,c("low","high","range"))
for (i in 1:trunc(p*r)) {
rang[i,1] <- samp[i]
rang[i,2] <- samp[i+(1-p)*r]
rang[i,3] <- rang[i,2]-rang[i,1]
}
hpd <- rang[order(rang[,3])[1],1:2]
return(hpd)
}
par(mfrow=c(4,5), mar=c(4,4,2,1))
plot.new()
legend("center", legend=c("95% Prediction \n Interval","Observation"),
lty=c(2,1), col=c("black","blue"), cex=1, bty="n", y.intersp = 0.2)
for(k in 1:18){
plot(density(famosOutput_pred[,k]), col="black", main=dateVect[k],
xlab = "Streamflow")
abline(v=subsetFinalValidation[k], col="blue",lwd=2)
# abline(v=handTuneOutput[k], col="red",lwd=2, lty=1)
abline(v=hpd(famosOutput_pred[,k]), col="black",lwd=2, lty=2)
# lines(density(precalibrationOutput_Window_pred[,k]), col="red")
}
save(dateVect , obsErrVar , obsErr ,
subsetFinalObs , famosOutput_pred ,famosOutput ,
precalibrationOutput_Window,precalibrationOutput_Window_pred,
handTuneOutput,
file="manuscript/revisionCode/predictionInterval_validation.RData")
rm(list=ls())
setwd("~/Dropbox/FamosHydroModel/")
load("Official_Fast/input/fullObservations.RData") # Load Full observation
load("manuscript/revisionCode/resultsStremflow_validation_full.RData")
# Format Date
dateVect<-paste(sprintf("%04d",as.numeric(obs[,1])),
sprintf("%02d",as.numeric(obs[,2])),
sprintf("%02d",as.numeric(obs[,3])),sep="-")
dateVect<-as.Date(dateVect, format = "%Y-%m-%d")
validationDate<-dateVect[validationInd] # Extreme Dates
dateVect<-dateVect[which(dateVect=="2009-01-01"):which(dateVect=="2011-10-01")]
# observation Index
keepValidation<-which(dateVect%in%validationDate)
dateVect<-dateVect[keepValidation]
famosOutput_rep_var<-famosOutput_rep_var[,keepValidation]
famosOutput<-famosOutput[,keepValidation]
precalibrationOutput_MSE<-precalibrationOutput_MSE[,keepValidation]
precalibrationOutput_Window<-precalibrationOutput_Window[,keepValidation]
handTuneOutput<-handTuneOutput[keepValidation]
save(dateVect , subsetFinalValidation , famosOutput_rep_var,
famosOutput , precalibrationOutput_MSE , precalibrationOutput_Window,
handTuneOutput,
file="manuscript/revisionCode/resultsStremflow_validation_extreme.RData")
rm(list=ls())
|
e3737f822d9711d2889e38fd240ff040473ab212
|
294728c360c797cd9b8a2a7d28a2737eacb6b0c7
|
/man/find.H.Rd
|
8f2fd2580d409fe53c774d6befa85d707cdbcfa3
|
[] |
no_license
|
cran/CPHshape
|
c6c6c68c35b14a737408a27a55cd83de460d0730
|
c7adde3057f665cf290d06f1ca5f115c962c039d
|
refs/heads/master
| 2016-09-10T10:46:19.276881
| 2012-02-27T00:00:00
| 2012-02-27T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,126
|
rd
|
find.H.Rd
|
\name{find.H}
\alias{find.H}
\title{Calculate the cumulative hazard function}
\description{
This function calculates the cumulative hazard function at location \code{t} based on input from either \link[=find.shapeCPH]{find.shapeCPH} or \link[=find.shapeMLE]{find.shapeMLE}.}
\usage{find.H(t, h.val, h.ranges)}
\arguments{
\item{t}{time at which to evaluate the cumulative hazard (must be univariate)}
\item{h.val}{vector specifying changes of values in the hazard baseline MLE (output from e.g. \code{find.shapeCPH})}
\item{h.ranges}{vector specifying locations of changes in values in the hazard baseline MLE (output from e.g. \code{find.shapeCPH})}
}
\value{A number giving the value of the estimated cumulative hazard at t.}
\author{Rihong Hui and Hanna Jankowski <hkj@mathstat.yorku.ca>}
\seealso{\link[=find.shapeCPH]{find.shapeCPH} \link[=find.shapeMLE]{find.shapeMLE}}
\examples{
# random sample from the uniform density
n <- 500
x <- runif(n)
# compute MLE of increasing hazard
mle <- find.shapeMLE(x, type="increasing")
# find fitted cumulative hazard function at t=0.2
find.H(0.2, mle$h.val, mle$h.range)
}
|
af917bd0f0de21e5fd5afaa7a2d283c98a90d85a
|
470fc335d1b2a52bdc8b740c0f8b6c6ea55ee538
|
/man/gguka_line.Rd
|
9d12f69dd68a637576ce37d6ebab7e99fc43638d
|
[
"MIT"
] |
permissive
|
fanner018/Kansler
|
e95625e751363c72abbd2e61b68e48079ca8cde0
|
cda344a0f43e1341141cae673d6290d04bd55a78
|
refs/heads/main
| 2023-05-14T10:43:40.866539
| 2021-03-12T15:52:46
| 2021-03-12T15:52:46
| 373,612,006
| 0
| 0
|
NOASSERTION
| 2021-06-03T18:57:35
| 2021-06-03T18:57:34
| null |
UTF-8
|
R
| false
| true
| 506
|
rd
|
gguka_line.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Kansler grafik.R
\name{gguka_line}
\alias{gguka_line}
\title{UKA linjediagram 1}
\usage{
gguka_line(df, x = x, y = y, g = g, pal = "uka_1", format = Svensk_antal)
}
\arguments{
\item{df}{dataframe}
\item{x}{x-variabel}
\item{y}{y-variabel (num)}
\item{g}{gruppvariabel}
\item{t}{fargpalett (se lista i uka_farg_paletter)}
\item{format.}{default Svensk antal}
}
\value{
linjediagram
}
\description{
använder theme_uka1#'
}
|
491872e92cbdaa2cb530ce6e18d03581e16f2f3c
|
8ad35c4567d61024c65737019651e55edd791045
|
/code/TESTE.R
|
3c0698d468543cb2f219e9a32895dfd6e270e72b
|
[
"Apache-2.0"
] |
permissive
|
rbteix/master-s-degree
|
6c7761e7b20d868f2a0fa9626c0f596c53b41126
|
5439782e021786c15579615db0d199baf6e6506f
|
refs/heads/main
| 2022-12-29T12:22:52.637466
| 2020-10-14T02:22:22
| 2020-10-14T02:22:22
| 303,877,794
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 5,124
|
r
|
TESTE.R
|
#CÓDIGO CRIAR CPD COMO EXEMPLO DISSERTAÇÃO
y_ts_CP <- ts(c(rnorm(250,mean=1,sd=.5), rnorm(250,mean=3,sd=1), rnorm(250,mean=2 ,sd=1))) # rand signal w\ changepoint
options(warn=-1)
library(changepoint)
cptfn <- function(data, pen) {
ans <- cpt.mean(data, test.stat="Normal", method = "PELT", penalty = "Manual", pen.value = pen)
length(cpts(ans)) +1
}
# evaluate and plot results:
windows()
plot.new()
frame()
# run cptfn for the signal with a known change point
pen.vals <- seq(0, 12,.2)
elbowplotData <- unlist(lapply(pen.vals, function(p)
cptfn(data = y_ts_CP, pen = p)))
plot.ts(y_ts_CP,type='l',col='red',
xlab = "time",
ylab = " Y(t)",
main = "Change in mean signal")
plot(pen.vals,elbowplotData,
xlab = "PELT penalty parameter",
ylab = " ",
main = " ")
penalty.val <- 8# this value is determined from elbow plots
cptm_CP <- cpt.mean(y_ts_CP, penalty='Manual',pen.value=penalty.val,method='PELT')
cpts_CP <- cpts(cptm_CP) # change point time points
cpts_CP
windows()
#plot.new()
frame()
plot(cptm_CP, ylab="Y", xlab="Tempo", col="gray", panel.first = grid(col = "gray"))
####################################ROC CURVE#################################
library(pROC)
cart.pred.Teste=predict(poda_fit, teste, type = "vector", ordered=T)
windows()
cart<-(multiclass.roc(teste$Regime, cart.pred.Teste, plot=F, col=4, main="", xlab="Especificidade", ylab="Sensibilidade"))
# # Data: multivariate predictor cart.pred.Teste with 3 levels of teste$Regime: 1, 2, 3.
# # Multi-class area under the curve:0.917
# Draw a legend.
legend(0.0, 0.2, c('CART', 'RF','SVM'), 4:6)
# add=TRUE draws on the existing chart
rf.pred.test <- predict(model.rf.tree, teste.rf, type = 'response', ordered=T)
windows()
rf <-multiclass.roc(teste.rf$Regime~as.numeric(rf.pred.test), col=5, plot=F)
# # Data: multivariate predictor rf.pred.test with 3 levels of teste.rf$Regime: 1, 2, 3.
# # Multi-class area under the curve: 0.9421
svm.pred.test <-predict(svm.tune.result$best.model, teste.svm, ordered=T )
windows()
svm <-multiclass.roc(teste.svm$Regime~as.numeric(svm.Teste) , plot=F, col=6)
# # Data: as.numeric(svm.Teste) with 3 levels of teste.svm$Regime: 1, 2, 3.
# # Multi-class area under the curve: 0.9268
#########################################################################
#Plot curvas juntas
#cart.pred=predict(fit, teste, type = "prob")
#
cart.rs <- cart[["rocs"]]
x11(width=20)
par(mfrow=c(1,3))
plot.roc(cart.rs[[1]], main="", col=5, xlab = "Especificidade", ylab="Sensibilidade")
legend(0.2, 0.2, c('CART', 'RF'), 5:6)
# sapply(2:length(cart.rs),function(i) lines.roc(cart.rs[[i]], col=i))
# # Draw a legend.
# legend(0.2, 0.2, c('R1', 'R2','R3'), 1:3)
rf.rs <- rf[["rocs"]]
plot.roc(rf.rs[[3]],add=T, col=6)
# sapply(2:length(rf.rs),function(i) lines.roc(rf.rs[[i]],col=i))
# legend(0.2, 0.2, c('R1', 'R2','R3'), 1:3)
svm.rs <- svm[["rocs"]]
plot.roc(svm.rs[[1]], main="", col=6, add=T)
sapply(2:length(svm.rs),function(i) lines.roc(svm.rs[[i]],col=i ))#plot três classes juntas
####################################################
#Replicando conjuntos
##############################################
train_svm <- function( dummies.balanced, svm.tune.result){
sample.svm <- sample.int(n = nrow(dummies.balanced), size = floor(.7*nrow(dummies.balanced)), replace=F)
treino.svm <- dummies.balanced[sample.svm,]
teste.svm <- dummies.balanced[-sample.svm,]
svm.Teste.tune=predict((svm.tune.result$best.model), type = "class",teste.svm)
svm.predic.Teste.tune <- table(teste.svm$Regime, svm.Teste.tune)
accuracy_value <-sum(diag(prop.table( svm.predic.Teste.tune)))#taxa de acerto
return(accuracy_value)
}
results_func <- replicate(100, train(data.gru.balanced, poda_fit))
windows()
boxplot(results_func, main="CART")
#####################################################
#Figuras juntas para a artigo
###################################################
x11(width=20)
par(mfrow=c(1,3))
plot.roc(cart.rs[[1]], main="", col=5, xlab = "Especificidade", ylab="Sensibilidade", cex.lab=1.7)
plot.roc(rf.rs[[1]],add=T, col=6 )
plot.roc(svm.rs[[1]], add=T, col=7)
legend(0.2, 0.2, c('CART', 'RF', 'SVM'), 5:7, cex=1.5)
plot.roc(cart.rs[[2]], main="", col=5, xlab = "Especificidade", ylab="Sensibilidade", cex.lab=1.7)
plot.roc(rf.rs[[2]],add=T, col=6)
plot.roc(svm.rs[[2]], add=T, col=7)
legend(0.2, 0.2, c('CART', 'RF', 'SVM'), 5:7, cex=1.5)
plot.roc(cart.rs[[3]], main="", col=5, xlab = "Especificidade", ylab="Sensibilidade", cex.lab=1.7)
plot.roc(rf.rs[[3]],add=T, col=6)
plot.roc(svm.rs[[3]], add=T, col=7)
legend(0.2, 0.2, c('CART', 'RF', 'SVM'), 5:7, cex=1.5)
# sapply(2:length(cart.rs),function(i) lines.roc(cart.rs[[i]], col=i))
# # Draw a legend.
# legend(0.2, 0.2, c('R1', 'R2','R3'), 1:3)
myPaths <- .libPaths() # get the paths
myPaths <- c(myPaths[2], myPaths[1]) # switch them
.libPaths(myPaths) # reassign them
|
4d65953506495716adac027513f90aa514111328
|
f7d8c513e5e4b81e710828281b9d198367c6313a
|
/Assign2Notes.R
|
b18b0be172c9c43be2488c555a58ee80c08bb3a2
|
[] |
no_license
|
alanfuller1219/ProgrammingAssignment2
|
a1beca77a8f3bc64ba94c282b6cb92f1fa045e38
|
e0d6fa012a572fa0515e06600e41674ba9c61977
|
refs/heads/master
| 2021-01-21T09:43:22.231258
| 2015-06-20T22:54:29
| 2015-06-20T22:54:29
| 37,777,287
| 0
| 0
| null | 2015-06-20T16:44:34
| 2015-06-20T16:44:34
| null |
UTF-8
|
R
| false
| false
| 1,092
|
r
|
Assign2Notes.R
|
#do a bit of testing with an invertible matrix
v2 <- c(3,3.2)
v3 <- c(3.5,3.6)
mat7 <- cbind(v2,v3)
dim(mat7)
solve(mat7)
#returns the following
> v2 <- c(3,3.2)
> v3 <- c(3.5,3.6)
> mat7 <- cbind(v2,v3)
> dim(mat7)
[1] 2 2
> solve(mat7)
[,1] [,2]
v2 -9 8.75
v3 8 -7.50
>
## Test using the functions created for Homework assignment 2
## Critical NOTE: cacheSolve takes the output of makeCacheMatrix!
## It does not run directly on the invertible matrix!
## this test is valid:
temp1 <- makeCacheMatrix(mat7)
cacheSolve(temp1)
> temp1 <- makeCacheMatrix(mat7)
> cacheSolve(temp1)
[,1] [,2]
v2 -9 8.75
v3 8 -7.50
## This is not a valid test, and returns an error:
makeCacheMatrix(mat7)
cacheSolve(mat7)
## Examples from TA Alan Due from the Discussion Forum:
myvec <- c(1,2,3,4,5,6,7,8,9,10)
foo <- makeVector(myvec)
foo$get()
[1] 1 2 3 4 5 6 7 8 9 10
class(foo)
[1] "list"
foo$getmean()
NULL
foo[1]
#returned the following
$set
function (y)
{
x <<- y
m <<- NULL
}
fooE <- environment(foo$set)
fooE
ls(fooE)
|
5d6f9fe1099523def3885fe67deab6a0870beb8c
|
e4e07501cd6da34beb76abc3e82b5784db1fe011
|
/R/jzs_partcorSD.R
|
499183a1523675bb5a11ca331138188ee45534e4
|
[] |
no_license
|
MicheleNuijten/BayesMed
|
ff239c8b598e95546fd5bed5d79963c8489192e1
|
2a156ed80e81bb0065b6cee017d86e15d87ff9d1
|
refs/heads/master
| 2021-06-02T20:09:07.919564
| 2020-01-29T13:13:28
| 2020-01-29T13:13:28
| 7,775,559
| 10
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,396
|
r
|
jzs_partcorSD.R
|
jzs_partcorSD <-
function(V1,V2,control,
SDmethod=c("dnorm","splinefun","logspline","fit.st"),
alternative=c("two.sided","less","greater"),
n.iter=10000,n.burnin=500,
standardize=TRUE){
runif(1) # defines .Random.seed
if(standardize==TRUE){
M <- (V1-mean(V1))/sd(V1)
Y <- (V2-mean(V2))/sd(V2)
X <- (control-mean(control))/sd(control)
} else {
M <- V1
Y <- V2
X <- control
}
n <- length(V1)
#==========================================================
# load JAGS models
#==========================================================
jagsmodelpartialcorrelation <-
"####### Cauchy-prior on beta and tau' #######
model
{
for (i in 1:n)
{
mu[i] <- intercept + theta[1]*x[i,1] + theta[2]*x[i,2]
y[i] ~ dnorm(mu[i],phi)
}
# uninformative prior on intercept alpha,
# Jeffreys' prior on precision phi
intercept ~ dnorm(0,.0001)
phi ~ dgamma(.0001,.0001)
#phi ~ dgamma(0.0000001,0.0000001) #JAGS accepts even this
#phi ~ dgamma(0.01,0.01) #WinBUGS wants this
# inverse-gamma prior on g:
g <- 1/invg
a.gamma <- 1/2
b.gamma <- n/2
invg ~ dgamma(a.gamma,b.gamma)
# Ntzoufras, I. (2009). Bayesian Modeling Using WinBUGS.
# New Jersey: John Wiley & Sons, Inc. p. 167
# calculation of the inverse matrix of V
inverse.V <- inverse(V)
# calculation of the elements of prior precision matrix
for(i in 1:2)
{
for (j in 1:2)
{
prior.T[i,j] <- inverse.V[i,j] * phi/g
}
}
# multivariate prior for the beta vector
theta[1:2] ~ dmnorm( mu.theta, prior.T )
for(i in 1:2) { mu.theta[i] <- 0 }
}
# Explanation-----------------------------------------------------------------
# Prior on g:
# We know that g ~ inverse_gamma(1/2, n/2), with 1/2 the shape parameter and
# n/2 the scale parameter.
# It follows that 1/g ~ gamma(1/2, 2/n).
# However, BUGS/JAGS uses the *rate parameterization* 1/theta instead of the
# scale parametrization theta. Hence we obtain, in de BUGS/JAGS rate notation:
# 1/g ~ dgamma(1/2, n/2)
# Also note: JAGS does not want [,] structure
#-----------------------------------------------------------------------------
"
jags.model.file2 <- tempfile(fileext=".txt")
write(jagsmodelpartialcorrelation,jags.model.file2)
#==========================================================
# BF FOR PARTIAL CORRELATION (MY|X)
#==========================================================
x <- cbind(X,M)
y <- Y
V <- solve(t(x)%*%x) #NB I switched to the notation from Ntzoufras, p. 167
jags.data <- list("n", "x", "y", "V")
jags.params <- c("theta")
jags.inits <- list(
list(theta = c(0.0,0.3)), #chain 1 starting value
list(theta = c(0.3, 0.0)), #chain 2 starting value
list(theta = c(-.15,.15))) #chain 3 starting value
jagssamples <- jags(data=jags.data, inits=jags.inits, jags.params,
n.chains=3, n.iter=n.iter, DIC=T,
n.burnin=n.burnin, n.thin=1, model.file=jags.model.file2)
beta <- jagssamples$BUGSoutput$sims.list$theta[,2]
#------------------------------------------------------------------
if(SDmethod[1]=="fit.st"){
mydt <- function(x, m, s, df) dt((x-m)/s, df)/s
bar <- try({
fit.t <- QRM::fit.st(beta)
nu <- as.numeric(fit.t$par.ests[1]) #degrees of freedom
mu <- as.numeric(fit.t$par.ests[2])
sigma <- abs(as.numeric(fit.t$par.ests[3])) # This is a hack -- with high n occasionally
# sigma switches sign.
})
if(!("try-error"%in%class(bar))){
# BAYES FACTOR BETA
BF <- 1/(mydt(0,mu,sigma,nu)/dcauchy(0))
} else {
warning("fit.st did not converge, alternative optimization method was used.","\n")
mydt2 <- function(pars){
m <- pars[1]
s <- abs(pars[2]) # no negative standard deviation
df <- abs(pars[3]) # no negative degrees of freedom
-2*sum(dt((beta-m)/s, df,log=TRUE)-log(s))
}
res <- optim(c(mean(beta),sd(beta),20),mydt2)$par
m <- res[1]
s <- res[2]
df <- res[3]
# ALTERNATIVE BAYES FACTOR PARTIAL CORRELATION
BF <- 1/(mydt2(0,m,s,df)/dcauchy(0))
}
#-------------------------
} else if(SDmethod[1]=="dnorm"){
BF <- 1/(dnorm(0,mean(beta),sd(beta))/dcauchy(0))
#-------------------------
} else if(SDmethod[1]=="splinefun"){
f <- splinefun(density(beta))
BF <- 1/(f(0)/dcauchy(0))
#-------------------------
} else if (SDmethod[1]=="logspline"){
fit.posterior <- polspline::logspline(beta)
posterior.pp <- polspline::dlogspline(0, fit.posterior) # this gives the pdf at point b2 = 0
prior.pp <- dcauchy(0) # height of prior at b2 = 0
BF <- prior.pp/posterior.pp
}
#-------------------------------------------------------
# one-sided test?
# save BF for one-tailed test
# BF21 = 2*{proportion posterior samples of beta < 0}
propposterior_less <- sum(beta<0)/length(beta)
propposterior_greater <- sum(beta>0)/length(beta)
# posterior proportion cannot be zero, because this renders a BF of zero
# none of the samples of the parameter follow the restriction
# ergo: the posterior proportion is smaller than 1/length(parameter)
if(propposterior_less==0){
propposterior_less <- 1/length(beta)
}
if(propposterior_greater==0){
propposterior_greater <- 1/length(beta)
}
BF21_less <- 2*propposterior_less
BF21_greater <- 2*propposterior_greater
if(alternative[1]=="less"){
# BF10 = p(D|b~cauchy(0,1))/p(D|b=0)
BF10 <- BF
# BF21 = p(D|b~cauchy-(0,1))/p(D|b~cauchy(0,1))
# BF21 = 2*{proportion posterior samples of beta < 0}
BF21 <- BF21_less
BF <- BF10*BF21
} else if(alternative[1]=="greater"){
# BF10 = p(D|b~cauchy(0,1))/p(D|b=0)
BF10 <- BF
# BF21 = p(D|b~cauchy+(0,1))/p(D|b~cauchy(0,1))
# BF21 = 2*{proportion posterior samples of beta > 0}
BF21 <- BF21_greater
BF <- BF10*BF21
}
#---------------------------------------------------
# convert BFs to posterior probability
# prob cannot be exactly 1 or 0
prob_b <- BF/(BF+1)
if(prob_b == 1){
prob_b <- prob_b - .Machine$double.eps
}
if(prob_b == 0){
prob_b <- prob_b + .Machine$double.eps
}
#====================================================
res <- list(PartCoef=mean(beta),
BayesFactor=BF,
PosteriorProbability=prob_b,
beta_samples=beta,
jagssamples=jagssamples)
class(res) <- c("jzs_med","list")
class(res$jagssamples) <- "rjags"
class(res$beta_samples) <- "CI"
return(res)
}
|
7374233ce404a1f790520ebbf99077c5954d6630
|
f6a1375e6453107cba75567ec0c3ba23a5ac7958
|
/TopmedPipeline/tests/testthat/test_filterVariants.R
|
1c259ee66d3d09bc7a52b8a65e3e2b865f5011c7
|
[] |
no_license
|
UW-GAC/analysis_pipeline
|
7c04b61c9cafa2bcf9ed1b25c47c089f4aec0646
|
df9f8ca64ddc9995f7aef118987553b3c31301a1
|
refs/heads/master
| 2023-04-07T03:13:52.185334
| 2022-03-23T21:15:46
| 2022-03-23T21:15:46
| 57,252,920
| 42
| 30
| null | 2023-03-23T20:13:40
| 2016-04-27T22:25:56
|
R
|
UTF-8
|
R
| false
| false
| 5,810
|
r
|
test_filterVariants.R
|
context("filterVariants tests")
library(dplyr)
library(gdsfmt)
library(GenomicRanges)
.testData <- function() {
showfile.gds(closeall=TRUE, verbose=FALSE)
gdsfile <- seqExampleFileName("gds")
seqOpen(gdsfile)
}
.testBinaryData <- function() {
gds <- .testData()
samp <- seqGetData(gds, "sample.id")
annot <- AnnotatedDataFrame(data.frame(sample.id=samp,
outcome=rbinom(length(samp), 1, 0.3),
stringsAsFactors=FALSE))
SeqVarData(gds, annot)
}
.testSegFile <- function() {
data(segments)
seg.df <- as.data.frame(segments) %>%
dplyr::rename(chromosome=seqnames) %>%
dplyr::select(chromosome, start, end)
segfile <- tempfile()
write.table(seg.df, file=segfile, quote=FALSE, sep="\t", row.names=FALSE)
segfile
}
.testVarList <- function(gds) {
var.id <- seqGetData(gds, "variant.id")
chr <- seqGetData(gds, "chromosome")
pos <- seqGetData(gds, "position")
ind1 <- which(chr == 1)[1:10]
ind2 <- which(chr == 1)[11:20]
ind3 <- which(chr == 2)[1:10]
lapply(list(ind1, ind2, ind3), function(x) {
data.frame(variant.id=var.id[x], chromosome=chr[x], position=pos[x], allele.index=1, stringsAsFactors=FALSE)
})
}
test_that("getSegments", {
segfile <- .testSegFile()
seg2 <- getSegments(segfile)
expect_equivalent(seg2, segments)
unlink(segfile)
})
test_that("subsetBySegment", {
gds <- .testData()
varList <- .testVarList(gds)
segfile <- .testSegFile()
segments <- getSegments(segfile)
exp <- sapply(varList, function(x) {
chr <- x$chromosome[1]
pos <- x$position[1]
seg.chr <- as.character(seqnames(segments[1]))
seg.start <- as.integer(BiocGenerics::start(segments[1]))
seg.end <- as.integer(BiocGenerics::end(segments[1]))
chr == seg.chr & pos >= seg.start & pos <= seg.end
})
ss <- subsetBySegment(varList, 1, segfile)
expect_equivalent(varList[exp], ss)
seqClose(gds)
unlink(segfile)
})
test_that("filterBySegment", {
gds <- .testData()
segfile <- .testSegFile()
segments <- getSegments(segfile)
gr <- granges(gds)
ol <- findOverlaps(gr, segments[1])
filterBySegment(gds, 1, segfile, verbose=FALSE)
expect_equal(sum(seqGetFilter(gds)$variant.sel), length(queryHits(ol)))
seqClose(gds)
unlink(segfile)
})
test_that("filterByFile", {
gds <- .testData()
id <- seqGetData(gds, "variant.id")[1:100]
idfile <- tempfile()
save(id, file=idfile)
filterByFile(gds, idfile, verbose=FALSE)
expect_equal(sum(seqGetFilter(gds)$variant.sel), 100)
seqResetFilter(gds, verbose=FALSE)
seqSetFilter(gds, variant.sel=1:10, verbose=FALSE)
filterByFile(gds, idfile, verbose=FALSE)
expect_equal(sum(seqGetFilter(gds)$variant.sel), 10)
seqClose(gds)
unlink(idfile)
})
test_that("filterByChrom", {
gds <- .testData()
chr <- seqGetData(gds, "chromosome")
filterByChrom(gds, 1, verbose=FALSE)
expect_equal(sum(seqGetFilter(gds)$variant.sel), sum(chr == 1))
seqResetFilter(gds, verbose=FALSE)
seqSetFilter(gds, variant.sel=1:10, verbose=FALSE)
filterByChrom(gds, 1, verbose=FALSE)
expect_equal(sum(seqGetFilter(gds)$variant.sel), 10)
seqClose(gds)
})
test_that("filterByPass", {
gds <- .testData()
filt <- seqGetData(gds, "annotation/filter")
filterByPass(gds, verbose=FALSE)
expect_equal(sum(seqGetFilter(gds)$variant.sel), sum(filt == "PASS"))
seqResetFilter(gds, verbose=FALSE)
seqSetFilter(gds, variant.sel=1:10, verbose=FALSE)
filterByPass(gds, verbose=FALSE)
expect_equal(sum(seqGetFilter(gds)$variant.sel), 10)
seqClose(gds)
})
test_that("filterBySNV", {
gds <- .testData()
snv <- isSNV(gds, biallelic=TRUE)
filterBySNV(gds, verbose=FALSE)
expect_equal(sum(seqGetFilter(gds)$variant.sel), sum(snv))
seqResetFilter(gds, verbose=FALSE)
seqSetFilter(gds, variant.sel=1:10, verbose=FALSE)
filterBySNV(gds, verbose=FALSE)
expect_equal(sum(seqGetFilter(gds)$variant.sel), 10)
seqClose(gds)
})
test_that("filterByMAF", {
gds <- .testData()
freq <- seqAlleleFreq(gds)
maf <- pmin(freq, 1-freq)
filterByMAF(gds, maf.min=0.1, verbose=FALSE)
expect_equal(sum(seqGetFilter(gds)$variant.sel), sum(maf >= 0.1))
seqResetFilter(gds, verbose=FALSE)
seqSetFilter(gds, variant.sel=1:10, verbose=FALSE)
filterByMAF(gds, maf.min=0.1, verbose=FALSE)
expect_equal(sum(seqGetFilter(gds)$variant.sel), sum(maf[1:10] >= 0.1))
seqClose(gds)
})
test_that("filterByMAC", {
gds <- .testData()
cnt1 <- alleleCount(gds)
cnt2 <- alleleCount(gds, n=1)
cnt <- round(pmin(cnt1, cnt2))
x <- .calcMAC(gds, sample.id=NULL)
expect_equal(x, cnt)
filterByMAC(gds, mac.min=5, verbose=FALSE)
expect_equal(sum(seqGetFilter(gds)$variant.sel), sum(cnt >= 5))
seqClose(gds)
})
test_that("minAltFreq", {
f <- list(c(1,0.5,0.2), c(1,0,0), c(1,0.1,0,1))
expect_equal(.minAltFreq(f), c(0.2,NA,0.1))
})
test_that("filterByRare", {
gds <- .testData()
freq <- seqAlleleFreq(gds, ref.allele=1)
filterByRare(gds, af.max=0.1, verbose=FALSE)
expect_equal(sum(seqGetFilter(gds)$variant.sel), sum(freq > 0 & freq <= 0.1))
seqClose(gds)
})
test_that("filterByPCAcorr", {
gds <- .testData()
filt <- get(data(pcaSnpFilters.hg19, package="GWASTools"))
filt.gr <- GRanges(seqnames=filt$chrom, ranges=IRanges(start=filt$start.base, end=filt$end.base))
exp <- GenomicRanges::setdiff(granges(gds), filt.gr)
filterByPCAcorr(gds, build="hg19", verbose=FALSE)
expect_equal(length(GenomicRanges::setdiff(granges(gds), exp)), 0)
seqClose(gds)
})
|
09f2085370ba7790736591ed1328e6cb38f6a5de
|
67dbc2b767ef8f3e57423a89edb27b5b59b0a9ef
|
/CT050/exploring_CRAN.R
|
c1ee8b2f6bf9558532843fdcced707d3dc4bdabb
|
[] |
no_license
|
marcusfreire0504/R_SocialNetworkAnalysis
|
8fe44af0eebecb6064e818a09fd76bf957016340
|
75a64e0e4daac64cb95c7ce18a7619950ed53a06
|
refs/heads/master
| 2021-09-04T19:21:43.281470
| 2018-01-21T17:06:20
| 2018-01-21T17:06:20
| null | 0
| 0
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 4,639
|
r
|
exploring_CRAN.R
|
library(needs)
needs(stringr)
needs(tidyverse)
needs(igraph) # manipulacao de grafos
needs(tidygraph) # visualizacoes de redes
needs(ggraph) # visualizacoes de redes
pdb <- tools::CRAN_package_db()
saveRDS(pdb,"./CT050/pdb.rds")
pdb <- readRDS("./CT050/pdb.rds")
pbaut <- pdb$Author
aut <- pbaut %>%
str_replace_all("\\(([^)]+)\\)", "") %>% # remocao
str_replace_all("\\[([^]]+)\\]", "") %>% # remocao
str_replace_all("<([^>]+)>", "") %>% # remocao
str_replace_all("\n", " ") %>% # remocao
str_replace_all("[Cc]ontribution.* from|[Cc]ontribution.* by|[Cc]ontributors", " ") %>%
str_replace_all("\\(|\\)|\\[|\\]", " ") %>% # remocao
iconv(to = "ASCII//TRANSLIT") %>% # limpeza dos caracters especiais
str_replace_all("'$|^'", "") %>% # limpeza
gsub("([A-Z])([A-Z]{1,})", "\\1\\L\\2", ., perl = TRUE) %>%
gsub("\\b([A-Z]{1}) \\b", "\\1\\. ", .) %>%
map(str_split, ",|;|&| \\. |--|(?<=[a-z])\\.| [Aa]nd | [Ww]ith | [Bb]y ", simplify = TRUE) %>%
map(str_replace_all, "[[:space:]]+", " ") %>%
map(str_replace_all, " $|^ | \\.", "") %>%
map(function(x) x[str_length(x) != 0]) %>%
set_names(pdb$Package) %>%
magrittr::extract(map_lgl(., function(x) length(x) > 1))
# conta autores por pacote
aut_list <- aut %>%
unlist() %>%
dplyr::as_data_frame() %>%
count(value) %>%
rename(Name = value, Package = n)
edge_list <- aut %>%
map(combn, m = 2) %>% # em cada pacote (map) gera uma combinacao do array de autores dois a dois
do.call("cbind", .) %>%
t() %>%
dplyr::as_data_frame() %>%
arrange(V1, V2) %>%
count(V1, V2)
g <- edge_list %>%
select(V1, V2) %>%
as.matrix() %>%
graph.edgelist(directed = FALSE) %>%
as_tbl_graph() %>% # wrapper tidygraph para o objeto igraph
activate("edges") %>% # tbl graph é duas linked table (edge e nodes) activate diz o que sera manipulado
mutate(Weight = edge_list$n) %>% # adiciona o peso nas arestas de acordo com o numero de vezes que os autores colabora
activate("nodes") %>% # manipulara os nos agora
rename(Name = name) %>% # nomeia os nos conforme
mutate(Component = group_components()) %>%
filter(Component == names(table(Component))[which.max(table(Component))])
# ggraph(g, layout="lgl") +
# geom_edge_fan(alpha=0.1)+
# theme_graph()
g <- g %>%
left_join(aut_list, by="Name") %>%
filter(Package > 5) %>% # pelo menos 4 pacotes
mutate(Component = group_components()) %>%
filter(Component == names(table(Component))[which.max(table(Component))])
ggraph(g, layout = 'lgl') +
geom_edge_fan(alpha = 0.1) +
theme_graph()
g <- mutate(g, Community = group_edge_betweenness(),
Degree = centrality_degree())
filter(g, Community == names(sort(table(Community), decr = TRUE))[1]) %>%
select(Name, Package) %>%
arrange(desc(Package)) %>%
top_n(10, Package) %>%
as_tibble() %>%
knitr::kable(format = "html", caption = "Cluster 1")
filter(g, Community == names(sort(table(Community), decr = TRUE))[2]) %>%
select(Name, Package) %>%
arrange(desc(Package)) %>%
top_n(10, Package) %>%
as_tibble() %>%
knitr::kable(format = "html", caption = "Cluster 2")
filter(g, Community == names(sort(table(Community), decr = TRUE))[3]) %>%
select(Name, Package) %>%
arrange(desc(Package)) %>%
top_n(10, Package) %>%
as_tibble() %>%
knitr::kable(format = "html", caption = "Cluster 2")
g <- g %>%
mutate(Community = case_when(Community == names(sort(table(Community),
decr = TRUE))[1] ~ "The Ancients",
Community == names(sort(table(Community),
decr = TRUE))[2] ~ "The Moderns",
Community == names(sort(table(Community),
decr = TRUE))[3] ~ "Suicide Squad",
Community == names(sort(table(Community),
decr = TRUE))[4] ~ "The Immortals",
Community %in% names(sort(table(Community),
decr = TRUE))[-1:-4] ~ "Unclassified")) %>%
mutate(Community = factor(Community))
g <- g %>%
filter(Degree > 5) %>%
mutate(Degree = centrality_degree())
ggraph(g, layout = 'lgl') +
geom_edge_fan(alpha = 0.1) +
geom_node_point(aes(color = Community, size = Package)) +
theme_graph() +
scale_color_manual(breaks = c("The Ancients", "The Moderns", "Suicide Squad", "The Immortals"),
values=c("#F8766D", "#00BFC4", "#969696", "#FF0000", "#00FF00"))
|
b78ecb3d71217ef32220413f41609c276e4be195
|
b06013d50cf47363e68006960d9146d1e58621d2
|
/Practica1/Ejercicio1/Ejercicio1.R
|
bb9678608550fb98cc1e806ccfccb4ecbbe05158
|
[] |
no_license
|
jlorenzor/Proyecto-de-investigacion-CM274
|
a5117abd89c6ff907298c9fd42ba9e5fa07dc82f
|
2504c7aebe4548d2a8be4db23bbb84831d308c43
|
refs/heads/master
| 2021-09-16T21:05:45.963728
| 2018-06-25T05:54:48
| 2018-06-25T05:54:48
| 286,544,210
| 1
| 0
| null | 2020-08-10T17:56:49
| 2020-08-10T17:56:48
| null |
UTF-8
|
R
| false
| false
| 1,415
|
r
|
Ejercicio1.R
|
# Nombre : Miguel Angel Oviedo Rodriguez 20131463I
# Respuesta(a)
# La función seq() genera una secuencia de números con una progresión de 0.3
seq(5, -11, by = -0.3)
# el último número no es -11, ya que éste no es un término de la progresión, sin embargo
# se puede hacer el siguiente artificio:
c(-15:33)*(-1/3)
# Respuesta(b)
# Para tener una secuencia invertida se dispone el orden de los límites al revés
seq(-11, 5, by = 0.3)
c(33:-15)*(-1/3)
# Respuesta(c)
# Para obtener lo solicitado se almacena el vector en una variable v,
# se usa la función rep() cuyos parámetros indican el vector a replicar, las veces
# que se va a repetir el vector y la repetición de cada elemento respectivamente.
# El resultado se guarda en un variable res y luego se emplea la función order(),
# esta función devuelve los índices de los elementos ordenados de menor a mayor.
# Por último la función rev() invierte el orden de los elementos de mayor a menor.
v <- c(-1,3,-5,7,-9)
res <- rep(v, times = 2, each = 10)
res[order(res)]
rev(res[order(res)])
# Respuesta(d)
# Se crea un vector por cada condición y luego se concatena todo con la función c()
seq_ent <- seq(6, 12)
seq_rep <- rep(5.3, times = 3)
lim <- length(rev(res[order(res)]))
# -0.25 se obtiene calculando una progresión aritmética
seq_9 <- seq(102, lim, by = -0.25)
vector <- c(seq_ent, seq_rep, -3, seq_9)
vector
length(vector) # [1] 20
|
2f0864d82a6a75d6b6f2e78abd4548af0197ca25
|
5189800985b464babdcc822978bc80954d3c70c4
|
/tests/timeseries.R
|
3fed8a08d5f0ad165c63fcd161c2f2d0678644b2
|
[] |
no_license
|
iNZightVIT/dev
|
7bcfdd091bd58f87b7417250669f73892f0575cb
|
36ce74ef40f9c8f220d3a8ae6c2d763bbe025b71
|
refs/heads/master
| 2021-08-01T07:48:32.069245
| 2021-07-21T01:21:06
| 2021-07-21T01:21:06
| 16,119,296
| 0
| 2
| null | 2019-09-23T21:57:30
| 2014-01-21T21:19:41
|
R
|
UTF-8
|
R
| false
| false
| 1,880
|
r
|
timeseries.R
|
library(devtools)
#library(tidyverse)
##install_github("iNZightVIT/iNZightTS@dev")
##library(iNZightTS)
load_all("~/iNZight/iNZightTS")
document("~/iNZight/iNZightTS")
data("visitorsQ")
data("visitorsA2")
load_all("~/iNZight/iNZightTS")
ta <- iNZightTS(visitorsA2, var = "Australia")
P1 <- plot(ta, t = 20, ylab = "Visitors")
tq <- iNZightTS(visitorsQ, var = "Australia")
P2 <- plot(tq, t = 20, ylab = "Visitors", title = "%var")
d <- decompositionplot(tq, t = 10)
recompose(d)
load_all("~/iNZight/iNZightTS")
ta2 <- iNZightTS(visitorsA2, var = colnames(visitorsA2)[2:3])
s <- plot(ta2, title = "Visitors from %var", ylab = "Visitors",
multiplicative = FALSE)
load_all("~/iNZight/iNZightTS")
tq2 <- iNZightTS(visitorsQ, var = colnames(visitorsQ)[2:5])
s <- plot(tq2, title = "Visitors from %var", ylab = "Visitors",
multiplicative = FALSE)
load_all("~/iNZight/iNZightTS")
plot(ta2, compare = F)
load_all("~/iNZight/iNZightTS")
plot(tq2, compare = F, ylab = "Visitors")
plot(tq2, compare = F, ylab = "Visitors", multiplicative = T)
x = tq
multiplicative = FALSE
t = 0
rawplot(tq2)
data <- data.frame(Date = as.numeric(time(ts$tsObj)))
Visitors = as.matrix(ts$tsObj))
tsPlot <- function(data) {
pl <- ggplot(data, aes(x = Date, y = Visitors)) +
geom_point()
pl
for (i in 1:nrow(data)) {
dev.hold()
print(pl + geom_line(data = data[1:i, ]))
dev.flush()
Sys.sleep(ifelse(i <= 9, 0.6, 0.05))
}
pl <- ggplot(data, aes(x = Date, y = Visitors)) +
geom_line() +
geom_smooth(se = F, col = "red", span = 0.5)
dev.hold()
print(pl)
dev.flush()
}
## the smoother isn't the correct type, but that would be easy enough
## to grab from the current plot
dev.new(width = 10, height = 3)
tsPlot(data)
## versus
rawplot(ts, t = 20, animate = TRUE)
|
de2e83ec6f92df4ad259372e6d9c7182c322594c
|
5e832862b2e36be6ba27e874e98499bc399de699
|
/man/parameter.update.Rd
|
4a4fc7502a3a721bcdf314aade272a9e133cfe32
|
[] |
no_license
|
dmgatti/DOQTL
|
c5c22306053ddbd03295207702827cf2a715bb70
|
a1a4d170bf5923ca45689a83822febdb46ede215
|
refs/heads/master
| 2021-01-17T02:08:27.831277
| 2019-05-24T19:22:35
| 2019-05-24T19:22:35
| 13,506,518
| 15
| 12
| null | 2019-02-27T13:46:31
| 2013-10-11T18:33:24
|
R
|
UTF-8
|
R
| false
| false
| 1,840
|
rd
|
parameter.update.Rd
|
\name{parameter.update}
\alias{parameter.update.alleles}
\alias{parameter.update.intensity}
\title{
Parameter updating in HMM
}
\description{
Not intended for external use.
}
\usage{
parameter.update.alleles(geno, b, pseudocounts, prsmth)
parameter.update.intensity(data, params, prsmth, founder.means)
}
\arguments{
\item{geno}{
Data.frame containing allele calls.
}
\item{b}{
Three dimensional numeric array containing emission probabilities.
}
\item{pseudocounts}{
Three dimensional numeric array containing pseudocounts for updating.
}
\item{prsmth}{
Three dimensional numeric array containing posterior genotype probabilities.
}
\item{data}{
A list with named elements containing the information needed to reconstruct genomes.
When method = intensity:
x: Numeric matrix, num.samples x num.snps, with X intensities for all samples. Sample IDs and SNP IDs must be in rownames and colnames.
y: Numeric matrix, num.samples x num.snps, with Y intensities for all samples. Sample IDs and SNP IDs must be in rownames and colnames.
sex: Character vector, containing "M" or F indicating sex. Sample IDs must be in names.
gen: Character matrix containing the generation of DO outbreeding for each sample. FALSEor the DO, this should be "DO" followed by a number with no space between them. FALSEor CC mice, this should be CC. Sample IDs must be in names.
}
\item{params}{
List containing two elements:
r.t.means: three dimensional numeric array containing the genotype cluster means at each marker.
r.t.covars: three dimensional numeric array containing the genotype cluster variances at each marker.
}
\item{founder.means}{
Numeric matrix containing the founder intensity means for each marker.
}
}
\author{
Daniel Gatti
}
\keyword{ internal }
|
e64546be85709f2697d96b8b3e09c4c18c6db6aa
|
1dfac533dc072be28012b6919b479449986c811a
|
/Plot3.R
|
d464dbad00adc18b3097a73d67c1fda01821bc93
|
[] |
no_license
|
thimmaru/Exploratory-data-analysis_Course-project-2
|
db04ddc3cacd97766692710b914c49ecf26d5f0c
|
22c02b9e16cbadf640cdb5e216fae49c532b4cde
|
refs/heads/master
| 2020-05-21T18:55:54.467861
| 2019-05-11T14:45:56
| 2019-05-11T14:45:56
| 186,143,313
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,346
|
r
|
Plot3.R
|
#Set working directory
setwd("C:\\Users\\TRUDR\\OneDrive - Monsanto\\Migrated from My PC\\Desktop\\Data\\Ex._data\\Course project 2")
#Step 0. Create directory, downloading the dataset from the source and unzip
if(!file.exists("./data")){dir.create("./data")}
#Load ggplot2
library(ggplot2)
#url for the dataset source for the project:
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(fileUrl,destfile="./data/Dataset.zip")
# Unzip dataSet to the directory-Data
unzip(zipfile="./data/Dataset.zip",exdir="./data")
## read the data from the source files & load
EPANEI <- readRDS("./data/summarySCC_PM25.rds")
SCC <- readRDS("./data/Source_Classification_Code.rds")
BaltimoreNEI <- (EPANEI[EPANEI$fips == "24510",])
#aggregate the data
agregdatBaltimore <- aggregate(Emissions~year,data = BaltimoreNEI,FUN = sum)
#create plot and store as a png file
png(filename = "Plot3.png",width = 750, height = 602,units = "px",)
g <- ggplot(data = BaltimoreNEI, aes(factor(year), Emissions, fill = type)) +
geom_bar(stat = "identity") +
facet_grid(facets = .~type,scales = "free",space = "free") +
labs(x="Year", y=expression("Total PM"[2.5]*" Emission (Tons)")) +
labs(title=expression("PM"[2.5]*" Emissions, Baltimore City 1999-2008 by Source Type"))
print(g)
dev.off()
|
e5377359028de55c76f0a7779530b69573d4e1e7
|
c3ed0eea77de3338cc4820ca27dc04384676b29b
|
/man/bin_values.Rd
|
f7ebbc97c64e250c98bd82518e482896586aa4b5
|
[] |
no_license
|
jrboyd/seqtsne
|
f3bd94ee23140dc71373af4f1e7ce9ffe60d702f
|
5a67cbe5af281ec42906689d1a9961d8fe9de68d
|
refs/heads/master
| 2022-10-30T18:33:13.005047
| 2022-10-28T20:22:58
| 2022-10-28T20:22:58
| 177,857,441
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 504
|
rd
|
bin_values.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions_util.R
\name{bin_values}
\alias{bin_values}
\title{bin_values}
\usage{
bin_values(x, n_bins, xrng = range(x))
}
\arguments{
\item{x}{Values to assign to bins}
\item{n_bins}{Number of bins to assign values to}
\item{xrng}{Optional numeric of length 2. Defines the domain to define bins
for. Defaults to range(x).}
}
\value{
bin assignments parall to x.
}
\description{
bin_values
}
\examples{
bin_values(0:10, 3)
}
|
55d9a731ef9a071a111b358364771f89149350f7
|
be76339484a140ae82ab62c5176bdd5822563061
|
/performance_measures.R
|
288fbf9d758057691bdac9f38e534b7f22e873de
|
[] |
no_license
|
majormajor2/Couponing-Challenge-HU
|
4007d3075356712f4b46cb50a8e69b16200e41fb
|
51bfc73efcb1d7206d8dc700929f932b1cece388
|
refs/heads/master
| 2020-03-28T11:36:46.454852
| 2017-02-15T20:11:32
| 2017-02-15T20:11:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,211
|
r
|
performance_measures.R
|
#### Model Performance Measures ####
# Helper function to compute measures of predictive accuracy
predictive_performance = function(y=NULL, prediction=NULL, cutoff=.5, returnH = TRUE, shallPlot = TRUE)
{
# Assumptions:
# y is a vector of factors
# prediction is a vector of probability predictions
if (length(y) != length(prediction))
{
stop("Data vector and prediction vector must have same length!")
}
# Calculate Brier Score
# y - 1 because levels of factors start at 1 not 0
brier_score = sum(((as.numeric(y) - 1) - prediction)^2) / length(y)
# Calculate Classification error
classification = factor(as.numeric(prediction >= cutoff), levels = c(0,1), labels=levels(y))
#classification = factor(as.numeric(prediction >= cutoff), labels=c("negative", "positive"))
classification_error = 1 - sum(y==classification) / length(y)
# Calculate Area Under the Curve with pROC
auc = as.numeric(roc(response = y, predictor = prediction)$auc)
# print confusion matrix
# class_counts = misclassCounts(classification, y); class_counts$conf.matrix
# print misclassification-based statistic - e.g. error rate
# print(class_counts$metrics, digits = 3)
# Compute the H-measure and other scalar classification performance metrics
H = HMeasure(y, prediction, threshold = cutoff, severity.ratio = 3/10)
h_measure = H$metrics$H
gini = H$metrics$Gini
precision = H$metrics$Precision
TP = H$metrics$TP
FP = H$metrics$FP
TN = H$metrics$TN
FN = H$metrics$FN
# Compute Average Return per Customer
score = (3*TN - 10*FN)/(TP+FP+TN+FN)
# Compute average expected costs
exp_cost = mean(ifelse(y == "yes", 10*(1-prediction), 3*(prediction)))
# Calculate ROC
if(shallPlot){plotROC(results = H)}
# create a list of the performance measures
output = list(brier_score = brier_score,
classification_error = classification_error,
h_measure = h_measure,
area_under_curve = auc,
gini = gini,
precision = precision,
true_positives = TP,
false_positives = FP,
true_negatives = TN,
false_negatives = FN,
avg_return = score,
exp_cost = exp_cost,
H = H)
# if returnH is FALSE, drop H object from output
if(!returnH){output$H = NULL}
return(output)
}
## Function to construct a cost matrix
build_cost_matrix = function(CBTN = +3, CBFN = -10, CBFP = 0, CBTP = 0)
{
# calculate costs with 0 on diagonals
CFN = CBFN - CBTP
CFP = CBFP - CBTN
# build cost-matrix
cost.matrix = matrix(c(
0, CFN,
CFP, 0),
2, 2, byrow=TRUE)
# name rows and columns
colnames(cost.matrix) = list("noreturn", "return")
rownames(cost.matrix) = list("noreturn", "return")
return(cost.matrix)
}
# Function to return the optimal cutoff
# given a target vector of factors and a vector of predictions as probabilities.
# Returns a number.
optimal_cutoff = function(target, prediction, cost_matrix = build_cost_matrix(), tag_false = "no")
{
# create dataframe for later use
df = data.frame(target = target, prediction = prediction)
# MODEL CONTROL
# method: maxKappa
model_control_cutpoints = control.cutpoints(CFP = -cost_matrix[2,1], CFN = -cost_matrix[1,2], costs.ratio = -cost_matrix[2,1]/-cost_matrix[1,2], weighted.Kappa = TRUE)
# get the OC object
oc = optimal.cutpoints(X = "prediction",
status = "target",
tag.healthy = tag_false,
methods = "MCT",
data = df,
control = model_control_cutpoints)
# extract optimal cutoff
optimal_cutoff = oc$MCT$Global$optimal.cutoff$cutoff
return(optimal_cutoff)
}
### Custom function for train.control
# use avg retrun as metric to choose the model
# input: dataframe with predictions
# output: average return
revenue_maximization = function(data, lev = NULL, model = NULL)
{
# load inside function to ensure that with parallel computing workers have it in their environment
if(!require("OptimalCutpoints")) install.packages("OptimalCutpoints"); library("OptimalCutpoints")
source("helper.R")
# GET COST MATRIX
cost.matrix <- build_cost_matrix()
# MODEL.CONTROL
# method: maxKappa
model.control.optc = control.cutpoints(CFP = -cost.matrix[2,1], CFN = -cost.matrix[1,2], costs.ratio = -cost.matrix[2,1]/-cost.matrix[1,2], weighted.Kappa = TRUE)
# RUN OPTIMAL CUTPOINTS
oc = optimal.cutpoints(
X = "yes",
status = "obs",
tag.healthy = "no",
methods = "MCT",
data = data,
control = model.control.optc)
# SELECT OPTIMAL CUTPOINT
# define temporary dataframes to store cutoffs
df <- data.frame(cutoff = oc$MCT$Global$optimal.cutoff$cutoff)
# check if cutpoint unique
for(index in 1:length(oc$MCT$Global$optimal.cutoff$cutoff)){
# optimal cutpoint
df[index,"avg_return"] <- predictive_performance(data[,"obs"], prediction = data[,"yes"], cutoff = df[index,"cutoff"], returnH = FALSE)$avg_return
}
# Choose cutoff that maximises avg return
opt.cutoff <- df[df$avg_return == max(df$avg_return), "cutoff"]
# Calculate average return
avg_return <- predictive_performance(y = data$obs, prediction = data$yes, cutoff = opt.cutoff, returnH = FALSE)$avg_return
# name metrics
names(avg_return) <- "avg_return"
names(opt.cutoff) <- "optimal.cutoff"
# OUTPUT
return(c(avg_return, opt.cutoff))
}
### Custom function for trainControl
# use expected cost as metric to choose the tune hyperparameters of the model
# input: dataframe with true values and predictions
# output: expected cost per customer
cost_minimization = function(data, lev = c("no","yes"), model = NULL)
{
# Define costs of False Positives (CFP) and False Negatives (CFN)
CFP = -3; CFN = -10
# Generate cost matrix
#cost_matrix = matrix(c(0, CFN,
# CFP, 0), nrow = 2, ncol = 2, byrow=TRUE,
# dimnames = list(c("non-return", "return"), c("non-return", "return")))
# Check if observations are encoded as expected
#print(summary(as.numeric(data$obs)))
# Calculate expected costs (true values are in data$obs, probability predictions for a return in data$yes)
expected_cost = ifelse(data$obs == "yes", CFN*(1-data$yes), CFP*(data$yes))
# Calculate mean of expected costs
expected_cost = mean(expected_cost)
# Name metrics
names(expected_cost) = "exp_cost"
# OUTPUT
return(expected_cost)
}
# This function chooses the best tune that generalizes best to other folds
# out of a selection of models from nested cross-validation
# Input: a list of model objects from nested x-val functions
# Output: data.frame with hyperparameters for final fitting
choose_best_tune = function(models)
{
highest_return = 0
for(i in 1:length(models))
{
model = models[[i]]$model
prediction = models[[i]]$prediction
best_cutoff = optimal_cutoff(known[which(fold_membership == i),]$return_customer, prediction)
avg_return = as.numeric(predictive_performance(known[which(fold_membership == i),]$return_customer, prediction, cutoff = best_cutoff, returnH = FALSE)$avg_return)
# Check if the highest return is better than that of the other sets of hyperparameters
# i.e. if it generalizes better than the others
if(avg_return > highest_return)
{
# Pick best set of hyperparameters
hyperparameters = data.frame(model$bestTune)
highest_return = avg_return
}
}
return(hyperparameters)
}
# This function lists the predictive performance of each fold
# from nested cross-validation
# Input: a list of model objects from nested x-val functions, dataframe to store the result
# Output: dataframe
list_fold_performance = function(models, name, store = NULL)
{
# Initialise
if(is.null(store)){store = data.frame(row.names = c("avg_return (mean)","avg_return (SD)","exp_cost (mean)", "exp_cost (SD)", "AUC (mean)","AUC (SD)"))}
avg_return = vector()
exp_cost = vector()
auc = vector()
for(i in 1:length(models))
{
model = models[[i]]$model
prediction = models[[i]]$prediction
best_cutoff = optimal_cutoff(known[which(fold_membership == i),]$return_customer, prediction)
avg_return = append(avg_return, predictive_performance(known[which(fold_membership == i),]$return_customer, prediction, cutoff = best_cutoff, returnH = FALSE)$avg_return)
exp_cost = append(exp_cost, predictive_performance(known[which(fold_membership == i),]$return_customer, prediction, shallPlot = FALSE, returnH = FALSE)$exp_cost)
auc = append(auc, predictive_performance(known[which(fold_membership == i),]$return_customer, prediction, shallPlot = FALSE, returnH = FALSE)$area_under_curve)
}
store["avg_return (mean)",name] = mean(avg_return)
store["avg_return (SD)",name] = sd(avg_return)
store["exp_cost (mean)",name] = mean(exp_cost)
store["exp_cost (SD)",name] = sd(exp_cost)
store["AUC (mean)",name] = mean(auc)
store["AUC (SD)",name] = sd(auc)
return(store)
}
|
14b629a487fc3f7a7e000944ca6ac183b4bcc4f4
|
6293d033c25ea4402d43ec857132bb86a50af048
|
/3-prepare-2011.R
|
0fe0f57dda9173acfbf523460fa72b53f83a12cd
|
[
"MIT"
] |
permissive
|
hafez-ahmad/in_household
|
bacbc1e5faa51f9351915b4017b88cef7c7f231b
|
58e27466147a9eca1191e33cf094d4c0dec144df
|
refs/heads/master
| 2022-02-11T14:13:35.318415
| 2019-07-02T14:42:12
| 2019-07-02T14:42:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,192
|
r
|
3-prepare-2011.R
|
##### Script returns tidy dataset for given year
library(tidyverse)
# create directory for tidy data output
if (!dir.exists("6-tidy-data")) {
dir.create("6-tidy-data")
}
joined11 <- readRDS("5-joined-data/2011.rds")
tidy11 <- joined11 %>%
# change raw numbers to integer type
mutate_at(10:14, as.integer) %>%
mutate(
# correct rows listing INDIA as a state to a country level variable
country = "INDIA",
state = ifelse(state == "INDIA", NA, state),
num_ea = `11` + `12`,
num_la = `11` + `13`,
# calculate percentages
ea = (`11` + `12`) / `10`, # % having electricity (regardless of latrine)
la = (`11` + `13`) / `10`, # % having latrine (regardless of electricity)
ea_la = `11` / `10`, # % having electricity and latrine
ea_ln = `12` / `10`, # % having electricity, no latrine
en_la = `13` / `10`, # % having no electricity, yes latrine
en_ln = `14` / `10`, # % having no electricity, no latrine
# add societal section variable
societal_section = case_when(
str_detect(`1`, "C$") ~ "SC",
str_detect(`1`, "T$") ~ "ST",
TRUE ~ "ALL"
),
# mutate geo_section
geo_section = case_when(
is.na(state) & is.na(district) & is.na(subdistrict) ~ "country",
!is.na(state) & is.na(district) & is.na(subdistrict) ~ "state",
!is.na(state) & !is.na(district) & is.na(subdistrict) ~ "district",
!is.na(state) & !is.na(district) & !is.na(subdistrict) ~ "subdistrict",
TRUE ~ "other"
)
) %>%
# reorganize columns; check on year
select(-c(1:6)) %>%
select(`15`, geo_section, country, state, district, subdistrict,
societal_section, everything()) %>%
rename(demo_section = `7`,
water_source = `8`,
water_avail = `9`,
total_hh = `10`,
num_ea_la = `11`,
num_ea_ln = `12`,
num_en_la = `13`,
num_en_ln = `14`,
year = `15`)
saveRDS(tidy11, file = "6-tidy-data/2011.rds")
rm(list = ls())
|
1f32d19cb9825830ff76d1ae739b8f7830914a2f
|
561c2118e306ce45b249e2cbbd4755230e28766a
|
/preprocessing/vital_except_5min.R
|
ee60dbf4b5aaff63ca1244e541f486414a7d8e35
|
[] |
no_license
|
Doyun-lab/MACE_classification
|
f89c2c5c0090647f76bb7328f057b056b0fe8a82
|
555e9543888a9db9dd563fdb1facd28d466f4e89
|
refs/heads/main
| 2023-08-21T15:31:48.093643
| 2021-10-21T06:34:14
| 2021-10-21T06:34:14
| 338,050,366
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,255
|
r
|
vital_except_5min.R
|
library(stringr)
library(dplyr)
setwd('E:\\')
vital_TIVA = readRDS('preprocessing\\vital_TIVA_NA_END_0225.rds')
vital_volatile = readRDS('preprocessing\\vital_volatile_NA_END_0225.rds')
# 수술시간이 5분 미만이라 제외한 케이스
vital_volatile = subset(vital_volatile, vital_volatile$Case_ID != "08_181012_1120")
nrow(vital_TIVA)
nrow(vital_volatile)
unique(vital_TIVA$Case_ID)
unique(vital_volatile$Case_ID)
TIVA_vital_5min <- data.frame()
for(case_id in unique(vital_TIVA$Case_ID)){
data = data.frame(subset(vital_TIVA, vital_TIVA$Case_ID==case_id))
data <- data[1:(nrow(data)-90),]
TIVA_vital_5min = rbind(TIVA_vital_5min, data)
}
volatile_vital_5min <- data.frame()
for(case_id in unique(vital_volatile$Case_ID)){
data = data.frame(subset(vital_volatile, vital_volatile$Case_ID==case_id))
data <- data[1:(nrow(data)-90),]
volatile_vital_5min = rbind(volatile_vital_5min, data)
}
nrow(TIVA_vital_5min)
nrow(volatile_vital_5min)
unique(TIVA_vital_5min$Case_ID)
unique(volatile_vital_5min$Case_ID)
saveRDS(TIVA_vital_5min, 'preprocessing\\vital_TIVA_ex5min_0225.rds')
saveRDS(volatile_vital_5min, 'preprocessing\\vital_volatile_ex5min_0225.rds')
|
4000ace1a85230ff2cd545de768eec5f428a1db2
|
8a3c62d2f1f81f66f00ba29905988a48841604c3
|
/TP_02.R
|
842156fe4a03703cee1e387c97bb5f95953873f9
|
[] |
no_license
|
Mxberd/Tps_individuales_entrega
|
c97073ba9e381f1ac64964e5b25dc8e201c6a3eb
|
9455eae7307ff323b2f25587cd1053f8f117d4ad
|
refs/heads/master
| 2023-07-29T05:20:42.353969
| 2021-09-06T13:35:49
| 2021-09-06T13:35:49
| 403,631,533
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,364
|
r
|
TP_02.R
|
#TRABAJO PRACTICO 02
#Para empezar, llamamos a nuestras librerias.
library(tidyverse)
library(datos)
library(janitor)
library(sf)
library(zip)
library(skimr)
library(sf)
#Tambien acomodamos la visualizacion de los valores numericos elimando notacion cientifica.
options(scipen = 999)
#Estaremos trabajando con el arbolado urbano de CABA. Los datos son de BADATA.
#Importamos la base de datos.
base_00<- read.csv("Datos/arbolado-publico-lineal-2017-2018.csv",
encoding = "UTF-8", dec = ".")
#Si hubiera algun problema de lectura, tambien se puede cargar el zip que esta incluido en "datos"
#Podemos continuar!
#La funcion as_tibble()nos permitira hacer una mejor visualizacion de nuestros datos.
base_0A <- as_tibble(base_00)
#Tambien probamos el paquete Skimr para conocer la estructura de nuestos datos.
skim(base_00)
#Hacemos un poco de limpieza
base_00 <- base_00 %>%
na.omit()
#Por otro lado con "Unique" eliminamos las cargas repetidas
base_00 <- base_00 %>%
unique()
#Hacemos algunas correciones de como se presentan nuestros datos. Renombramos columnas
base_00 <- base_00 %>%
select( -nro_registro,-manzana,-calle_altura,-direccion_normalizada,-tipo_activ,-ubicacion) %>%
rename("Calle"=calle_nombre, "Altura"=calle_chapa, "Tipo"=nombre_cientifico, "ancho_vereda"=ancho_acera)
#str_detect Es una buena herramienta para hacerle preguntasa los datos. Por ejemplo:
#Hay bananos en el arbolado de alineacion? lo buscamos por su nombre cientifico "Musa x paradisiaca"!
Banano <-
str_detect( string = base_0A, pattern = "Musa x paradisiaca")
#La respuesta "FALSE" en todas las categorias nos da entender que no habria bananos, al menos
#espresados por su nombre cientifico.
#Vamos a eliminar de la lista aquellas especies que tuvieran escasa representatividad. Para ello debemos
#Contar las repeticiones de cada especie!
base_01 <- base_00 %>%
select(Tipo)%>%
group_by(Tipo) %>%
summarise(cantidad_registrada=n())
#Ponemos 100 ejemplares como el piso a sobrepasar para considerarse especie signficativa
#Para ello usamos case_when
base_01 <-base_01 %>%
mutate(significatividad = case_when(cantidad_registrada<100
~ "No Significativa", TRUE ~ "Significativa"))
base_01 %>% count(significatividad)
#Detectamos que hay 333 especies No significativas, altener muy baja representatividad.
# Por otro lad 95 si tienen buena representatividad.
#Vamos a llevar estos datos a nuestra base_00
base_02 <- base_01 %>%
select(-cantidad_registrada)
base_03 <- base_00 %>%
right_join(base_02)
#Ahora incorporamos el dato de que especies tienen poca representatividad en el conjunto.
#Proponemos hacer algunos graficos
#Tramos la subdivision administrativa de CABA para usar luego!
barrios <- st_read("Datos/barrios.geojson")
#Proponemos ver las primeras 25 mas significativas, para ello usamos
#arrage y slice
base_0C <- base_01 %>%
arrange(desc(cantidad_registrada)) %>%
slice(1:25)
ggplot(base_0C )+
geom_bar(aes(x=reorder(Tipo,-cantidad_registrada), weight=cantidad_registrada,fill="cantidad de ejemplares"),
fill = "darkolivegreen4")+
coord_flip()+
theme(legend.position="top")+
labs(title ="Arbolado - CABA", subtitle="Primera Aproximacion", fill="cantidades detectadas", x="especies", y="cantidad", caption= "Nota: fuente, BA data")+
theme_light()+
theme_classic()
#reorder nos permite presentar nuestros datos ordenadamente.
#Obervamos que algunas especies como el Fraxinus estan sobrerepresentadas.
#Nos gustaria identificar a laS especieS mas representativaS de cada barrio.
#Para ello debemos cruzar datos.
#Estudiemos la distribucion de las primeras 3
#Platanos, Fraxinus y Ficus.
base_05 <- base_03 %>%
filter (Tipo %in% c("Fraxinus pennsylvanica","Platanus x acerifolia",
"Ficus benjamina"))
#Descubrimos que estas 3 especies practicamente representan la mitad de los ejemplares
#Queremos averiguar cual es el barrio con mas ficus.
barrios_01 <- barrios %>%
select(barrio,geometry)
base_06 <- base_05 %>%
filter(Tipo=="Ficus benjamina") %>%
st_as_sf(coords = c("long", "lat"), crs = 4326) %>%
select(Tipo,geometry) %>%
st_join(barrios_01)
#Habiendo unido con st_join la informacion de los barrios con la
#respectiva ubicacion de los arboles, estamos listos para hacer un mapa coropletico
base_0D <- base_06 %>%
select(barrio)%>%
group_by(barrio) %>%
summarise(cantidad_registrada=n())
#Con st_set_geometry(NULL) elimino los datos espaciales que contenian los puntos de los arboles
base_0E<-base_0D %>%
st_set_geometry(NULL)
#hago una nueva union y vuelvo a aplicar st_as_sf.
base_0E <- base_0E %>%
right_join(barrios_01) %>%
st_as_sf()
ggplot(base_0E)+
geom_sf(aes(fill=cantidad_registrada), color= NA)+
scale_fill_viridis_c(breaks=c(0,200,400,600,800,1000,1200))+
geom_sf_text(data=base_0E, aes(label = barrio), size=1.5)+
labs(title = "Mapa coropletico arbolado",
subtitle = "Visualizacion especie Ficus Benjamina",
fill = "arboles/barrio",
caption= "Fuente: BADATA, elaboracion propia")+
theme_light()+
theme_void()
#PALERMO, VILLA URQUIZA, CABALLITOS,FLORES Y MATADEROS
#SON LOS BARRIOS DONDE MAS FICUS HAN ESTADO PLANTANDO (SIN DUDA) LOS VECINOS-
#HEMOS TERMINADO EL EJERCICIO!
|
2de8ea5d0f67f2977212a68cafbd91e0e58ae817
|
c7fdfaa84c3b38160506ee9a4f544e39a1ddb300
|
/figure_data_generation/annotated_gene_fusions.R
|
2cdc1668763d6a858add9a08a6a0b1ac2e9489a7
|
[] |
no_license
|
iguana128/Gene-fusion_NB
|
459cada1af191b8678daa8d65df9668cc52bc0d3
|
d3e31cfe835f8d8daf24ee8a3e6dfaca88a3a8ff
|
refs/heads/master
| 2020-03-29T19:48:50.484503
| 2018-09-25T19:31:14
| 2018-09-25T19:31:14
| 150,282,120
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,374
|
r
|
annotated_gene_fusions.R
|
rm(list=ls())
## this script is wittern to display the gene fusion statistical and annotation results
library(ggplot2)
## load the fusion results
setwd("C:/Users/ZLiu/Desktop/genefusion/Updated_fusion_results_2018.06.05")
load("Chimerascan_results.RData")
load("SOAPfuse_results.RData")
load("TopHat_results.RData")
#
frequency_fusions_Chimerascan=as.data.frame(table(Chimerascan_results[,2]))
ratio_chimerascan = length(which(frequency_fusions_Chimerascan$Freq==1))/length(frequency_fusions_Chimerascan$Freq)
frequency_fusions_SOAPfuse=as.data.frame(table(SOAPfuse_results[,2]))
ratio_SOAPfuse = length(which(frequency_fusions_SOAPfuse$Freq==1))/length(frequency_fusions_SOAPfuse$Freq)
frequency_fusions_TopHat=as.data.frame(table(TopHat_results[,2]))
ratio_TopHat = length(which(frequency_fusions_TopHat$Freq==1))/length(frequency_fusions_TopHat$Freq)
#
frequency_report_fusions_Chimerascan = as.data.frame(table(Chimerascan_results[which(Chimerascan_results[,9]==1),2]))
kk = sort(frequency_report_fusions_Chimerascan$Freq,decreasing = TRUE,index.return = TRUE)
ranked_report_Chimerascan = frequency_report_fusions_Chimerascan$Var1[kk$ix]
gene_list_report_Chimerascan = union(Chimerascan_results[which(Chimerascan_results[,9]==1),3],Chimerascan_results[which(Chimerascan_results[,9]==1),5])
write.table(gene_list_report_Chimerascan,file='sky1.txt',col.names=F,row.names = F)
frequency_report_fusions_SOAPfuse = as.data.frame(table(SOAPfuse_results[which(SOAPfuse_results[,9]==1),2]))
kk = sort(frequency_report_fusions_SOAPfuse$Freq,decreasing = TRUE,index.return = TRUE)
ranked_report_SOAPfuse = frequency_report_fusions_SOAPfuse$Var1[kk$ix]
gene_list_report_SOAPfuse = union(SOAPfuse_results[which(SOAPfuse_results[,9]==1),3],SOAPfuse_results[which(SOAPfuse_results[,9]==1),5])
write.table(gene_list_report_SOAPfuse,file='sky2.txt',col.names=F,row.names = F)
frequency_report_fusions_TopHat = as.data.frame(table(TopHat_results[which(TopHat_results[,9]==1),2]))
kk = sort(frequency_report_fusions_TopHat$Freq,decreasing = TRUE,index.return = TRUE)
ranked_report_TopHat = frequency_report_fusions_TopHat$Var1[kk$ix]
gene_list_report_TopHat = union(TopHat_results[which(TopHat_results[,9]==1),3],TopHat_results[which(TopHat_results[,9]==1),5])
write.table(gene_list_report_TopHat,file='sky3.txt',col.names=F,row.names = F)
#
|
9a7e4fca173306684afc4cc713942f0e0fd41a77
|
3b7fd86eebc402c757e656d7f0c6183041f5583e
|
/App/server.R
|
411a9e4b2124e7c21eef34b396b1403758fb1b05
|
[] |
no_license
|
antoniogmzstat/EDA_Shiny
|
3769d758efafab77facb0068253dabba2295cc1c
|
ec1f536c9a2526be8854556f53b00acc49459e54
|
refs/heads/main
| 2023-02-26T13:03:11.097590
| 2021-02-01T20:10:03
| 2021-02-01T20:10:03
| 331,009,560
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 505
|
r
|
server.R
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
setwd("~/Proyectos_Shiny/EDA_Shiny")
server_functions <- list.files("./App/server_functions/", full.names = TRUE, recursive = TRUE)
# Define server logic required to draw a histogram
shinyServer(function(input, output, session) {
source(server_functions, local = TRUE)
})
|
67b6a8c56252e62281b84cbd69645b3f93042c7d
|
c5d3d69241ab09957e856ddc5204b2216c8a8b86
|
/man/write_10x.Rd
|
9ef66c8ab6cdc9a00b5f6605744d27aece83f45b
|
[] |
no_license
|
scottleh/ccfindR
|
2fea30d35c68b14e1e7ec4a4c38d90f0f38ddf69
|
4f3bf6b745a860dab2c5de67d267b942cd4a2a9e
|
refs/heads/master
| 2022-02-23T15:52:44.939509
| 2019-10-02T15:28:36
| 2019-10-02T15:28:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 903
|
rd
|
write_10x.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{write_10x}
\alias{write_10x}
\title{Write 10x data files}
\usage{
write_10x(object, dir, count = "matrix.mtx", genes = "genes.tsv",
barcodes = "barcodes.tsv", quote = FALSE)
}
\arguments{
\item{object}{Object of class \code{scNMFSet} containing count data}
\item{dir}{Directory where files are to be written.}
\item{count}{File name for count matrix.}
\item{genes}{File name for gene annotation.}
\item{barcodes}{File name for cell annotation.}
\item{quote}{Suppress quotation marks in output files.}
}
\value{
\code{NULL}
}
\description{
Use an object and write count and annotation files in 10x format.
}
\examples{
set.seed(1)
x <- matrix(rpois(n=12,lambda=3),4,3)
rownames(x) <- seq_len(4)
colnames(x) <- seq_len(3)
s <- scNMFSet(count=x,rowData=seq_len(4),colData=seq_len(3))
write_10x(s,dir='.')
}
|
17282ce524c036f9ad8d6f1b3f51fb609c94ede6
|
0ef60d5dd2963770fe9226996fbd27ee47ca1480
|
/ShinyApps/sirModel/server.R
|
33c362042f2f5754d1e59f2a7b7198e738242a0b
|
[] |
no_license
|
fernandosm/VPS-dinamica
|
f0c131d059c5c738a5dbab3d9615e15e03d126a6
|
756f9feaf78ad3b0772155b9ac95828e3b04f956
|
refs/heads/master
| 2021-01-20T06:59:50.657137
| 2014-04-08T03:23:02
| 2014-04-08T03:23:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,555
|
r
|
server.R
|
# file name: server.R
#
# Purpose:
# Executa o modelo SIR.
#
# inputs:
# b: taxa de contatos potencialmente infectantes
# g: taxa de recuperação = inverso do período infeccioso
# m: taxa de natalidade = taxa de mortalidade
# vo: taxa de vacinação constante
# (essa é uma taxa relacionada à proporção de cobertura vacinal,
# cujo valor numérico é de difícil interpretação)
# t1 e t2: intervalo de vacinação entre t1 e t2
# S0: número (ou densidade) inicial de indivíduos suscetíveis (no tempo t=0)
# I0: número (ou densidade) inicial de indivíduos infectados (no tempo t=0)
# tempoSim: tempo de simulação
# grafico: lógico, cria ou não o gráfico
#
# outputs:
# S(t): número de indivíduos suscetíveis no instante t
# I(t): número de indivíduos infectados no instante t
# R(t): número de indivíduos recuperados no instante t
#
# Other argumnts & variables:
# SIRmodel: função que carrega modelo
# parmsODE: armazena os parâmetros
# times: vetor com os tempos que retornarão com o resultado da simulação
# yini: vetor que armazena os valores iniciais
#
library(shiny)
library(deSolve)
library(ggplot2)
library(reshape)
sirModel <- function(b,g,vo,m,t1,t2,So,Io,Ro,tempoSim){
#### Bibliotecas Necessárias ####
library(deSolve)
library(ggplot2)
library(reshape)
#### modelo Suscetível-Infectado-Suscetível ####
SIRmodel <- function(Time, State, Pars) {
with(as.list(c(State, Pars)), {
v <- vo*(sign(Time-t1) + 1)/2*(sign(t2-Time) + 1)/2
dS <- m*(I+R) - b*S*I -v*S
dI <- b*S*I - m*I - g*I
dR <- v*S + g*I - m*R
return(list(c(dS, dI, dR)))
})
}
#### Parâmetros do Modelo e da Simução Numérica ####
parmsODE <- c(b=b,g=g,m=m,vo=vo,t1=t1,t2=t2)
times <- seq(0, tempoSim, by = tempoSim/100)
#### Condição Inicial ####
yini <- c(S=So, I=Io, R=Ro)
#### Simulação Numérica ####
outSIRmodel<- as.data.frame(ode(yini, times, SIRmodel, parmsODE,method="ode45"))
names(outSIRmodel) <- c('Tempo','Suscetíveis','Infectados','Recuperados')
#### Transformação dos dados para plotagem ####
outSIR <- melt(as.data.frame(outSIRmodel),id="Tempo")
#### Plotanto os resultados ####
if (vo==0){
graf <- ggplot(outSIR,aes(x=Tempo,y=value,colour=variable,group=variable)) + geom_line(size=1.1) +
ggtitle("Dinâmica SIR") +
xlab("Tempo") + ylab("Número de Animais") + labs(colour = "População") +
theme(text=element_text(size=20));
}
else{
graf <- ggplot(outSIR,aes(x=Tempo,y=value,colour=variable,group=variable)) + geom_line(size=1.1) +
ggtitle("Dinâmica SIR") +
xlab("Tempo") + ylab("Número de Animais") + labs(colour = "População") +
geom_vline(xintercept = c(t1,t2),linetype = "longdash") +
theme(text=element_text(size=20));
}
return(graf)
}
# Define server logic required to generate and plot a random distribution
shinyServer(function(input, output) {
# Expression that generates a plot of the distribution. The expression
# is wrapped in a call to renderPlot to indicate that:
#
# 1) It is "reactive" and therefore should be automatically
# re-executed when inputs change
# 2) Its output type is a plot
#
output$simPlot <- renderPlot({
graf <- sirModel(input$b,input$g,input$vo,input$m,input$t1,input$t2,
input$So,input$Io,input$Ro,input$tempoSim)
plot(graf)
})
})
|
30412d8c2d65fcb883775be4b9cacd3caa5b5c83
|
e66aa41a10f0346fa36bfdcdf400a71ed403ed58
|
/plot1.R
|
2a34b55aa394b0cd2014606ff7b7215811a1306a
|
[] |
no_license
|
ashishjha89/ExData_Plotting1
|
89adb694ff2c35acdf383822b5c297b7d38d58ff
|
c0b84c440aed63c20093e8e25db7e30cede0e3fe
|
refs/heads/master
| 2021-01-23T03:43:37.033402
| 2016-08-26T21:31:48
| 2016-08-26T21:31:48
| 66,495,439
| 0
| 0
| null | 2016-08-24T19:59:13
| 2016-08-24T19:59:13
| null |
UTF-8
|
R
| false
| false
| 1,101
|
r
|
plot1.R
|
library(lubridate)
# Read the full table
power.consumption <- read.table("household_power_consumption.txt", sep = ";", header = TRUE)
# Change the type to date
power.consumption[, "Date"] <- dmy(power.consumption[, "Date"])
# Extract only the required data
power.consumption.data <- power.consumption[power.consumption[, "Date"]==ydm('2007-01-02') | power.consumption[, "Date"]==ydm('2007-02-02'), ]
# Clear the original data
rm(power.consumption)
# Initially detected as factor class - so first change to charecter and then to numeric (Direct change to numeric gives frequency count in factor)
power.consumption.data[, "Global_active_power"] <- as.character(power.consumption.data[, "Global_active_power"])
power.consumption.data[, "Global_active_power"] <- as.numeric(power.consumption.data[, "Global_active_power"])
# Draw plot
dev.new()
hist(power.consumption.data[, "Global_active_power"], col = "red", xlab = "Global Active Power (kilowatts)", main = "Global Active Power")
# Copy it to png
dev.copy(png, file="plot1.png", width = 480, height = 480, units = "px")
# Make device off
dev.off()
|
ca939fe8b0bb48c89319d0df451361e4d30e34ff
|
5779ad8fc88297c6c6f15a69e655a3e644a491fa
|
/threshold_search.R
|
dc04a7c55530d0d680341032b626265afd26cabb
|
[] |
no_license
|
johnwoodill/cbb-dynamic-model
|
3ab580dcc1bbf24e4c8eb1cee961072bfc32b13b
|
db4901849d7b93295e7769dd21639b8f62ce0b27
|
refs/heads/master
| 2021-03-19T14:29:09.949462
| 2018-06-18T17:51:58
| 2018-06-18T17:51:58
| 72,034,690
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,834
|
r
|
threshold_search.R
|
rm(list=ls())
library(tidyverse)
library(ggthemes)
library(markovchain)
# Decision function
source("R/decision.R")
# Cherry growth
source("R/cherrygrowth.R")
# Dynamic cherry pricing function
source("R/cherrypricing.R")
# Initiate parameters
source("1-parameters.R")
# Get calibrated markov chains
calibration_type <- "field"
source("2-calibrate_markov_chains.R")
# Followed IPM
# Initial dissect levels
# Field-level AB live: 5.5%
# AB Dead: 2.5%
# CD: 1%
#
# cv <- c(0.055, 0.025, .01)
cv <- c(0.0001, 0.0001, 0.0001)
cv[4] <- 1 - sum(cv)
# Logistic function for cherry growth
cherryonfarm <- cherrygrowth(-10:10, acres*cherry_per_acre, beta = 1, r = .3)
cherryonfarm <- cherryonfarm[3:12]
# Range of values to search
srg <- seq(0, 1, by = 0.01)
tdat <- data.frame()
thold <- data.frame()
gridd <- expand.grid(seq(0, .2, by = 0.01), seq(0, 0.1, by = 0.01), seq(0, 1, by = 0.001))
gridd$Var4 <- 1 - rowSums(gridd[, 1:3])
gridd <- filter(gridd, Var4 >= 0 & Var3 > 0)
head(gridd)
for(i in 1:9){
tdat <- data.frame()
for (j in 1:nrow(gridd)){
threshold <- c(gridd[j, 1], gridd[j, 2], gridd[j, 3], gridd[j, 4])
#
# threshold <- c(0, 0, 0)
# threshold[1] <- gridd[j, 1]
# threshold[3] <- gridd[j, 2]
# threshold[4] <- 1 - sum(threshold[1:3])
#
nspray <- threshold %*% nsp_mcListFit$estimate[[i]][]
nspray_growth <- nspray[3] - threshold[3]
nsp_damage <- nspray_growth * (cherryonfarm[i+1]*threshold[4]) * cherrypricing(nspray[3])
# Get decision : 1 (spray), 0 (no spray)
dec <- ifelse(nsp_damage >= cost_s*acres, 1, 0)
#threshold_choice <- decision(acres, cost_s, cherryonfarm[i], nsp_mcListFit$estimate[[i]][], threshold)
mdat <- data.frame(month = i+2,
choice = dec,
nspray_growth = nspray_growth,
cherry_on_farm = cherryonfarm[i]*cv[4],
nspray_damage = nsp_damage,
ABL_threshold = threshold[1],
ABD_threshold = threshold[2],
CD_threshold = threshold[3],
NI_threshold = threshold[4])
tdat <- rbind(tdat, mdat)
}
tdat <- arrange(tdat, choice, CD_threshold)
tdat <- filter(tdat, NI_threshold >= 0 & nspray_growth >= 0)
tdat <- arrange(tdat, choice, CD_threshold)
#tdat
loc <- which(tdat$choice == 1)[1]
loc
thold <- rbind(thold, tdat[loc, ])
thold
print(i)
}
thold
#thold$CD_threshold <- ifelse(is.na(thold$CD_threshold), 0, thold$CD_threshold)
saveRDS(thold, "results/threshold_search.rds")
ggplot(thold, aes(month, 100*CD_threshold)) +
geom_line(linetype = "dashed", color = "grey") +
geom_line(data = finalresults, aes(Month, 100*field_cd)) +
scale_x_continuous(breaks = 3:12) +
ylim(0, 20)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.