blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
69f454d7126e516be1657a8b5b2c7c7021e2836e
|
3aa7718eda2b2e76efde2e9d41beb4fbd6115c79
|
/man/remove_accents.Rd
|
5aa52868a6db29c2121417b26e57f37d7b222a72
|
[] |
no_license
|
alinemsm/rslp
|
9c0e929323830615dceb0a9de1e619209ec6f549
|
be0a04855d8cca473bc36ae69349510b593ac6a9
|
refs/heads/master
| 2021-05-01T06:22:23.372303
| 2016-10-14T12:31:54
| 2016-10-14T12:31:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 292
|
rd
|
remove_accents.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zzz.R
\name{remove_accents}
\alias{remove_accents}
\title{Remove Acccents}
\usage{
remove_accents(s)
}
\arguments{
\item{s}{the string you want to remove accents}
}
\description{
A wrappper for stringi package.
}
|
9f841c39115e2751a56d99b3492c4e1c73343ace
|
2d24c72abe89c38bc13682ca2a048ffa97dcf9c3
|
/Bohn_Kleemann_Bambauer_Blatt9.R
|
a81bdf74762a841a9ad9bff79c6c8cfb2a9a2d31
|
[] |
no_license
|
curala70/StatistikSS2017
|
e42c93fe62255774727e0e44cad6ca97328fc008
|
b535c398ee1ad6ec6a0434b759ace077712108ac
|
refs/heads/master
| 2021-01-20T02:05:54.611177
| 2017-06-27T08:45:11
| 2017-06-27T08:45:11
| 89,371,542
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,649
|
r
|
Bohn_Kleemann_Bambauer_Blatt9.R
|
# a)
KSS <- function(x) {
x = sort(x)
n = length(x)
d = c()
for (i in 1:n-1) {
Fx = (1/n)*sum(x<=x[i])
d[i] = max(abs(Fx - pnorm(x[i])), abs(Fx - pnorm(x[i+1])))
}
d[n] = abs(1-pnorm(x[n]))
return (max(d))
}
# b)
# create samples of size n, each 100 times
x10 = list()
for (i in 1:100) {
x10[[i]] = rnorm(10)
}
x40 = c()
for (i in 1:100) {
x40[[i]] = rnorm(40)
}
x160 = c()
for (i in 1:100) {
x160[[i]] = rnorm(160)
}
x640 = c()
for (i in 1:100) {
x640[[i]] = rnorm(640)
}
# compute KSS for each sample of size n=10 and store in d10
d10 = c()
for (i in 1:100) {
d10[i] = KSS(x10[[i]])
}
# compute KSS for each sample of size n=40 and store in d40
d40 = c()
for (i in 1:100) {
d40[i] = KSS(x40[[i]])
}
# compute KSS for each sample of size n=160 and store in d160
d160 = c()
for (i in 1:100) {
d160[i] = KSS(x160[[i]])
}
# compute KSS for each sample of size n=640 and store in d640
d640 = c()
for (i in 1:100) {
d640[i] = KSS(x640[[i]])
}
# create headline - hier d10 oder x10? Mittelwert wovon?
MW10=round(mean(d10)*sqrt(10),digits=2)
MW40=round(mean(d40)*sqrt(40),digits=2)
MW160=round(mean(d160)*sqrt(160),digits=2)
MW640=round(mean(d640)*sqrt(640),digits=2)
maintext=paste("n10:",MW10,";n40:",MW40,";n160:",MW160,";n640:",MW640)
# plot samples
plot(1:100, d10, col='black', type='p',main=maintext,xlab="Index",ylab="KSS",ylim=c(0,1))
points(1:100, d40, col='red')
points(1:100, d160, col='green')
points(1:100, d640, col='blue')
# create legend
legend("topright", c("n=10","n=40","n=160", "n=640"),
lty=c(1,1), lwd=c(2,2), col=c("black","red","green","blue"))
|
f5da4739cb9ac7315d12e86cd5e0a877fec75862
|
8399dd26135eb99332c0d0cb456f42ee4ec63cb5
|
/congress109.R
|
6bdc7124fb23bba5670bea87d441031afab79e85
|
[] |
no_license
|
tiffblahthegiraffe/STA380-class-of-2019
|
fafa4ba321c5f1945b1e2ce38a5f68934b6c0ff1
|
84040d91ecdca3d42285b087ce94e22496bf1aa0
|
refs/heads/master
| 2020-03-25T00:38:21.157438
| 2018-08-15T22:35:44
| 2018-08-15T22:35:44
| 143,196,278
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,784
|
r
|
congress109.R
|
library(ggplot2)
countdata = read.csv(url("https://raw.githubusercontent.com/jgscott/STA380/master/data/congress109.csv"), header=TRUE, row.names=1)
memberdata = read.csv(url("https://raw.githubusercontent.com/jgscott/STA380/master/data/congress109members.csv"), header=TRUE, row.names=1)
# First normalize phrase counts to phrase frequencies.
# (often a sensible first step for count data, before z-scoring)
Z = countdata/rowSums(countdata)
# PCA
pc2 = prcomp(Z, scale=TRUE, rank=2) #rank = 2 ??? #there will be PC529
loadings = pc2$rotation
scores = pc2$x
# Question 1: where do the observations land in PC space?
# a biplot shows the first two PCs
qplot(scores[,1], scores[,2], color=memberdata$party, xlab='Component 1', ylab='Component 2')
# Confusingly, the default color mapping has Democrats as red and republicans as blue. This might be confusing, so let's fix that:
qplot(scores[,1], scores[,2], color=memberdata$party, xlab='Component 1', ylab='Component 2') + scale_color_manual(values=c("blue", "grey", "red"))
# Interpretation: the first PC axis primarily gas Republicans as positive numbers and Democrats as negative numbers
# PC1 left: Demoncrate; right: Republicant
# PC2 ambiguous, hard to interpretate
# Question 2: how are the individual PCs loaded on the original variables?
# Which X serve more important??
# The top words associated with each component
o1 = order(loadings[,1], decreasing=TRUE) # sort Xs in PC1 from highest positive to hightest negative
colnames(Z)[head(o1,25)] #most positive, might be the topics more discuss by Republicants
colnames(Z)[tail(o1,25)] #the most Demoncrative phrases
o2 = order(loadings[,2], decreasing=TRUE)# sort Xs in PC2 from highest positive to hightest negative
colnames(Z)[head(o2,25)]
colnames(Z)[tail(o2,25)]
|
8e9cddcbfdcc85159d855fba9322f54231d9d349
|
52c1f08ce14e5542ff36f7d5ae4b8f1da966508d
|
/MadingleyPlots/R/PlotMassDensity.R
|
1a747930f4dbe48a7d18c82126adce62da1af960
|
[] |
no_license
|
timnewbold/MadingleyPlots
|
5e61b75df743408e277456f9daba54ffe0ef1a5f
|
03cca3b67eea3b4de46d14774e9814237507e841
|
refs/heads/master
| 2020-07-23T11:57:21.421452
| 2017-06-29T08:17:16
| 2017-06-29T08:17:16
| 73,809,091
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,832
|
r
|
PlotMassDensity.R
|
PlotMassDensity <- function(resultsDir,plotName = "MassDensity",
outDir=NULL,
label=NULL,
whichCells=NULL,endTimeStep=NULL,
numTimeSteps=12,
vars=c("herbivore abundance",
"omnivore abundance",
"carnivore abundance"),
cols=c("#66a61e",
"#7570b3",
"#d95f02"),
xlims = NULL,
returnResults=FALSE){
initialization <- read.csv(paste(resultsDir,"/SimulationControlParameters.csv",sep=""))
cellsize <- as.numeric(paste(initialization$Value[which(initialization$Parameter=="Grid Cell Size")]))
locations <- read.csv(paste(resultsDir,"/SpecificLocations.csv",sep=""))
cohortDefs <- read.csv(paste(resultsDir,"/CohortFunctionalGroupDefinitions.csv",sep=""))
maxPossibleMass <- max(cohortDefs$PROPERTY_Maximum.mass)
minPossibleMass <- min(cohortDefs$PROPERTY_Minimum.mass)
.Log("Finding Madingley mass-bins output files\n")
files <- .ListMassBinsFiles(resultsDir)
if(!is.null(whichCells)){
files <- files[sapply(paste("Cell",whichCells-1,sep=""),FUN = function(x) return(grep(x,files)))]
}
# Find the unique cells in these simulations
cells.re<-regexpr("Cell[0-9]+",files)
cells<-as.list(unique(substr(files,cells.re,cells.re+
attr(cells.re,"match.length")-1)))
# Find the simulation numbers
sims.re<-regexpr("_[0-9]+_",files)
sims<-as.list(unique(substr(files,sims.re,sims.re+
attr(sims.re,"match.length")-1)))
if(is.null(label)){
label<-unique(substr(files,1,sims.re-1))
stopifnot(length(label)==1)
label<-label[1]
} else {
label <- paste("MassBinsOutputs_",label,sep="")
}
.Log(paste("Found results for ",length(cells)," cells\n",sep=""))
.Log(paste("Found results for ",length(sims)," simulations\n",sep=""))
.Log("Getting basic information about simulations\n")
sds.path<-paste("msds:nc?file=",resultsDir,"/",label,sims[1],cells[1],
".nc",sep="")
data<-open.sds(sds.path)
allTimes<-get.sds(data,"Time step")
if (is.null(endTimeStep)) endTimeStep <- tail(allTimes,1)
times <- (endTimeStep-numTimeSteps+1):endTimeStep
massBins <- get.sds(data,"Mass bin")
massBinsMidPoints <- 10^(log10(c(minPossibleMass,massBins[-1]))+diff(log10(c(
minPossibleMass,massBins[-1],maxPossibleMass)))/2)
latitudes <- locations$Latitude
if(!is.null(whichCells)){
latitudes <- latitudes[whichCells]
}
cell_areas <- DegreeCellAreaKM(lat = latitudes,height = cellsize,width = cellsize)
names(cell_areas) <- cells
if (is.null(xlims)){
xlims <- range(massBinsMidPoints)
}
.Log("Initializing plot\n")
if(!is.null(outDir)){
pdf(paste(outDir,plotName,".pdf",sep=""),
width = 17.5/2.54,height = (5/2.54)*length(cells))
}
par(mfrow=c(length(cells),3))
par(las=1)
par(tck=-0.01)
par(mar=c(2.8,3.3,0.2,0.2))
.Log("Plotting\n")
ret <- lapply(cells,FUN=function(cell){
# Create a list of matrices to hold the results for each specified variable
allResults<-list()
for (i in 1:length(vars)){
allResults[[i]]<-array(data = NA,dim = c(length(sims),length(massBins),length(allTimes)))
}
names(allResults)<-vars
# Loop over simulations in the ensemble
s<-1
for (sim in sims){
sds.path<-paste("msds:nc?file=",resultsDir,"/",label,sim,cell,
".nc",sep="")
data<-open.sds(sds.path)
# Populate the results matrices
for (var in vars){
allResults[var][[1]][s,,]<-exp(get.sds(data,paste(
"Log ",var," in mass bins",sep="")))/cell_areas[cell]
}
s<-s+1
}
resultsTimesMean <- lapply(allResults,function(x){
return(apply(x[,,times,drop=FALSE],MARGIN=c(1,2),FUN=mean,na.rm=TRUE))
})
resultsSimMean <- lapply(resultsTimesMean,function(x){
return(apply(x,MARGIN=2,FUN=mean,na.rm=TRUE))
})
v <- 1
r <- list()
for (var in vars){
par(mgp=c(1.2,0.2,0))
plot(massBinsMidPoints,resultsSimMean[[var]],log="xy",type="l",
col=cols[v],xlim=xlims,xlab="Current body mass (g)",
yaxt="n",ylab=NA)
par(mgp=c(2.5,0.2,0))
axis(2)
title(ylab=Hmisc::capitalize(var))
v <- v+1
r[[var]] <- data.frame(mass=massBinsMidPoints,
density=resultsSimMean[[var]])
}
return(r)
})
if(!is.null(outDir)) invisible(dev.off())
if (returnResults){
return(ret)
}
}
|
ab0a3ed2c99c98f201b846b9e3573df323122fc6
|
5289bb29b4f7d11b01f327761ece631de13d8ac9
|
/R/helpers.R
|
71edf67f54cdcd6ffd367db35b2b07755631a5a4
|
[] |
no_license
|
mironcat/conpac
|
ec018115aed26a09afb520f05e4052703c15637a
|
b1d3f45ca2e3702cc5f89389ac5ab338117e737a
|
refs/heads/master
| 2023-04-30T11:23:53.140232
| 2021-05-21T20:00:05
| 2021-05-21T20:00:05
| 352,006,472
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,830
|
r
|
helpers.R
|
getDatedMarkers <- function(formattedcpcht) {
dmarkers <- formattedcpcht %>%
filter(CLADE=='[dated' | CLADE=='AGE' )%>%
rename ( DATLEV=FAD)%>%
separate( col=EVENT,sep = '=',into=c('EVENT','AGE'))%>% #разделяем колонку EVENT на две используя в качестве разделителя '='
mutate( AGE=as.numeric(AGE))%>% #преобразуем тип колонки AGE в числовой тип
arrange( desc(AGE)) %>% #сортируем
select(ID, EVENT, DATLEV, AGE)
return (dmarkers)
}
get_intervals_by_params<- function (start,finish, step, age=FALSE){
# start=14
# finish=200
# step=8
if (age==FALSE) {
st <- seq(start, finish-step, by=step) #заполнение начальных координат отрезков
en <- seq(start+step, finish, by=step) #заполнение конечных координат отрезков
}
if (age==TRUE) {
en <- seq(start, finish-step, by=step*-1) #заполнение начальных координат отрезков
st <- seq(start+step, finish, by=step*-1) #заполнение конечных координат отрезков
}
return ( data.frame(num=c(1:numrow),st=st,en=en,mid=(st+en)/2) ) #data: num,st,en,mid
}
splitRangesToBins_by_int <- function(intervals, dat) {
divdindat<-dat[0,]%>%mutate(int=NULL) #create empty tibble
for (i in 1:nrow(intervals)) { #interate intervals
int<-intervals[i,] #select first intervals
st<-int$st
en<-int$en
dat.tt<-dat%>%filter( (max_ma<=st & max_ma>=en) | (min_ma<=st & min_ma>=en) | (max_ma>=st & min_ma<=en) )#filter dat by params
dat.tt<-mutate(dat.tt,int=int$num) #set interval nuber
divdindat<- bind_rows(divdindat, dat.tt) #connect filtered data together
}
return (divdindat)
}
|
de1f93579dbbb3a0c60ee537a6a8884c114d931c
|
f43377cd5c921dd609770789e2be686cdd012917
|
/scripts/Thermal_reaction_norms.R
|
1f30c3d58cf8eedaeae6bde1258236c1cbd82acb
|
[] |
no_license
|
siyuChen540/PFT_thermal_response
|
0e5692f2984c8697501a73f551f32a1099816e67
|
74598788f0647bac2f9ec3492f1aa7d482616bbf
|
refs/heads/master
| 2023-08-29T02:59:51.547675
| 2021-09-14T16:10:54
| 2021-09-14T16:10:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,999
|
r
|
Thermal_reaction_norms.R
|
# Code for Anderson et al. (2021)
# Marine Phytoplankton Functional Types Exhibit Diverse Responses to Thermal Change
# Stephanie I. Anderson updated 09/14/2020
# Contains:
## Phytoplankton functional type thermal reaction norms (Figure 1)
## Exponential curve fits
## Q10 calculations (Table 1)
## Comparison of thermal dependencies (Figure 2)
## Extended Figures 3 & 5
# Load packages
library(ggplot2)
library(quantreg)
library(lme4)
library(dplyr)
library(data.table)
library(cowplot)
# Load data
isolates <- read.csv("data/derived_traits.csv")
rates <- read.csv("data/growth_rates.csv")
source("scripts/nbcurve.R")
source("scripts/custom_theme.R")
#########################################################################
##### Thermal reaction norms #####
diatom<-subset(isolates, group=="diatoms")
cyano<-subset(isolates, group=="cyanobacteria")
dino<-subset(isolates, group=="dinoflagellates")
coccolithophores<-subset(isolates, group=="coccolithophores")
x<-seq(-2, 40, by=0.01) # temperature sequence
########## Diatoms ##########
# Fit 99th quantile regression
bissd <- rq(ln.r~temperature, data=subset(rates, group=="diatoms"),tau=0.99,ci=T)
cf.bd <- coef(bissd) #extract coefficients
# Calculate confidence intervals
## He & Hu (2002) method = "mcmb" uses the Markov chain marginal bootstrap
QR.b <- boot.rq(cbind(1,subset(rates, group=="diatoms")$temperature),
subset(rates, group=="diatoms")$ln.r,tau=0.99, R=10000, method="mcmb")
ci_d <- t(apply(QR.b$B, 2, quantile, c(0.025,0.975)))
# plotting thermal performance curves
dev.off()
for(j in 1){
pdf("figures/Diatom_TPC.pdf", width = 5.8, height = 4)
plot.new()
plot.window(c(-2,40),c(0,3))
axis(1, 10*(-2:40), mgp=c(1,0.5,0))
axis(2, 0.5*(0:6), mgp=c(1,0.5,0))
box()
for(i in 1:nrow(diatom)){
o=diatom[i, "mu.c.opt.list"]
w=diatom[i, "mu.wlist"]
a=diatom[i, "mu.alist"]
b=diatom[i, "mu.blist"]
curve(nbcurve(x=x,opt=o,w=w,a=a,b=b),-2,40, col=alpha("black",alpha=0.6),ylim=c(0,3),
lty=1, add=T,xlab="", ylab="", cex=1.5)
}
# Add regression
tempd <- seq(min(subset(rates, group=="diatoms")$temperature),
max(subset(rates, group=="diatoms")$temperature), by=0.1)
y1 <- c(exp(ci_d[1,2]+ci_d[2,2]*tempd))
y2 <- c(exp(ci_d[1,1]+ci_d[2,1]*tempd))
polygon(c(tempd, rev(tempd)),c(y1, rev(y2)),col=alpha("#026cb1",alpha=0.2), border=FALSE)
curve(exp(cf.bd[[1]]+cf.bd[[2]]*x),min(subset(rates, group=="diatoms")$temperature),max(subset(rates, group=="diatoms")$temperature),add=T,col='#026cb1',lwd=2.5)
# Eppley, 1972
curve(0.59*exp(0.0633*x),-2,40,add=T,col='grey30',lwd=2.5, lty=2) # Eppley, 1972
# add plot labels
title(xlab=(expression(bold("Temperature (ºC)"))),
ylab=(expression(bold("Specific Growth Rate (d"^"-1" *")"))), line=1.5, cex.lab=1)
title(main=expression(bold("Diatoms")), line=-1, adj=0.05, cex=0.9)
text(-1.6, 2.4, paste0("n=", length(diatom$isolate.code)), adj=c(0,0))
text(-1.6, 2.1, paste0("N=", length(subset(rates, group=="diatoms")$isolate.code)), adj=c(0,0))
dev.off()
}
########## Cyanobacteria ##########
# Fit 99th quantile regression
bissc_b<-rq(ln.r~temperature, data=subset(rates, group=="cyanobacteria"),tau=0.99,ci=T)
cf.bcb<-coef(bissc_b) #extract coefficients
# Calculate confidence intervals
QR.c <- boot.rq(cbind(1,subset(rates, group=="cyanobacteria")$temperature),
subset(rates, group=="cyanobacteria")$ln.r,tau=0.99, R=10000, method="mcmb")
ci_cy<-t(apply(QR.c$B, 2, quantile, c(0.025,0.975)))
# plotting thermal performance curves
dev.off()
for(j in 1){
pdf("figures/Cyanobacteria_TPC.pdf", width = 5.8, height = 4)
plot.new()
plot.window(c(-2,40),c(0,3))
axis(1, 10*(-2:40), mgp=c(1,0.5,0))
axis(2, 0.5*(0:6), mgp=c(1,0.5,0))
box()
for(i in 1:nrow(cyano)){
o=cyano[i, "mu.c.opt.list"]
w=cyano[i, "mu.wlist"]
a=cyano[i, "mu.alist"]
b=cyano[i, "mu.blist"]
curve(nbcurve(x=x,opt=o,w=w,a=a,b=b),-2,40,ylim=c(0,3), col=alpha("black",alpha=0.6),lty=1,
xlab="", ylab="", add=T, cex=1.5)
}
# Add regression
tempc <- seq(min(subset(rates, group=="cyanobacteria")$temperature),max(subset(rates, group=="cyanobacteria")$temperature), by=0.1)
y1_cy <- c(exp(ci_cy[1,2]+ci_cy[2,2]*tempc))
y2_cy <- c(exp(ci_cy[1,1]+ci_cy[2,1]*tempc))
polygon(c(tempc, rev(tempc)),c(y1_cy, rev(y2_cy)),col=alpha("#ec3a25",alpha=0.2), border=FALSE)
curve(exp(cf.bcb[[1]]+cf.bcb[[2]]*x),min(subset(rates, group=="cyanobacteria")$temperature),max(subset(rates, group=="cyanobacteria")$temperature),add=T,col='#ec3a25',lwd=2.5)
# Eppley, 1972
curve(0.59*exp(0.0633*x),-2,40,add=T,col='grey30',lwd=2.5, lty=2) # Eppley, 1972
# add plot labels
title(xlab=(expression(bold("Temperature (ºC)"))),
ylab=(expression(bold("Specific Growth Rate (d"^"-1" *")"))), line=1.5, cex.lab=1)
title(main=expression(bold("Cyanobacteria")), line=-1, adj=0.05, cex=0.9)
text(-1.6, 2.4, paste0("n=", length(cyano$isolate.code)), adj=c(0,0))
text(-1.6, 2.1, paste0("N=", length(subset(rates, group=="cyanobacteria")$isolate.code)),adj=c(0,0))
dev.off()
}
########## Dinoflagellates ##########
# Fit 99th quantile regression
bissdi<-rq(ln.r~temperature, data=subset(rates, group=="dinoflagellates"),tau=0.99,ci=T)
cf.di<-coef(bissdi) #extract coefficients
# Calculate confidence intervals
QR.d <- boot.rq(cbind(1,subset(rates, group=="dinoflagellates")$temperature),
subset(rates, group=="dinoflagellates")$ln.r,tau=0.99, R=10000, method="mcmb")
ci_df<-t(apply(QR.d$B, 2, quantile, c(0.025,0.975)))
# plotting thermal performance curves
dev.off()
for(j in 1){
pdf("figures/Dinoflagellate_TPC.pdf", width = 5.8, height = 4)
plot.new()
plot.window(c(-2,40),c(0,3))
axis(1, 10*(-2:40), mgp=c(1,0.5,0))
axis(2, 0.5*(0:6), mgp=c(1,0.5,0))
box()
for(i in 1:nrow(dino)){
o=dino[i, "mu.c.opt.list"]
w=dino[i, "mu.wlist"]
a=dino[i, "mu.alist"]
b=dino[i, "mu.blist"]
curve(nbcurve(x=x,opt=o,w=w,a=a,b=b),-2,40,ylim=c(0,3),col=alpha("black",alpha=0.6), lty=1,
xlab="", ylab="", add=T, cex=1.5)
}
# Add regression
tempdi <- seq(min(subset(rates, group=="dinoflagellates")$temperature),
max(subset(rates, group=="dinoflagellates")$temperature), by=0.1)
y1_df <- c(exp(ci_df[1,2]+ci_df[2,2]*tempdi))
y2_df <- c(exp(ci_df[1,1]+ci_df[2,1]*tempdi))
polygon(c(tempdi, rev(tempdi)),c(y1_df, rev(y2_df)),col=alpha("#3ea127",alpha=0.2), border=FALSE)
curve(exp(cf.di[[1]]+cf.di[[2]]*x),min(subset(rates, group=="dinoflagellates")$temperature),
max(subset(rates, group=="dinoflagellates")$temperature),add=T,col='#3ea127',lwd=2.5)
# Eppley, 1972
curve(0.59*exp(0.0633*x),-2,40,add=T,col='grey30',lwd=2.5, lty=2) # Eppley, 1972
# add plot labels
title(xlab=(expression(bold("Temperature (ºC)"))),
ylab=(expression(bold("Specific Growth Rate (d"^"-1" *")"))), line=1.5, cex.lab=1)
title(main=expression(bold("Dinoflagellates")), line=-1, adj=0.05, cex=0.9)
text(-1.6, 2.4, paste0("n=", length(dino$isolate.code)), adj=c(0,0))
text(-1.6, 2.1, paste0("N=", length(subset(rates, group=="dinoflagellates")$isolate.code)), adj=c(0,0))
dev.off()
}
########## Coccolithophores ##########
# Fit 99th quantile regression
bissco<-rq(ln.r~temperature, data=subset(rates, group=="coccolithophores"),tau=0.99,ci=T) #weights=wts
cf.co<-coef(bissco) #extract coefficients
# Calculate confidence intervals
QR.c <- boot.rq(cbind(1,subset(rates, group=="coccolithophores")$temperature),
subset(rates, group=="coccolithophores")$ln.r,tau=0.99, R=10000, method="mcmb")
ci_co<-t(apply(QR.c$B, 2, quantile, c(0.025,0.975)))
# plotting thermal performance curves
dev.off()
for(j in 1){
pdf("figures/Coccolithophore_TPC.pdf", width = 5.8, height = 4)
plot.new()
plot.window(c(-2,40),c(0,3))
axis(1, 10*(-2:40), mgp=c(1,0.5,0))
axis(2, 0.5*(0:6), mgp=c(1,0.5,0))
box()
for(i in 1:nrow(coccolithophores)){
o=coccolithophores[i, "mu.c.opt.list"]
w=coccolithophores[i, "mu.wlist"]
a=coccolithophores[i, "mu.alist"]
b=coccolithophores[i, "mu.blist"]
curve(nbcurve(x=x,opt=o,w=w,a=a,b=b),-2,40,ylim=c(0,3), col=alpha("black",alpha=0.6), lty=1, add=T,
xlab="", ylab="", cex=1.5)
}
# Add regression
tempco<-seq(min(subset(rates, group=="coccolithophores")$temperature),
max(subset(rates, group=="coccolithophores")$temperature), by=0.1)
y1_co <- c(exp(ci_co[1,2]+ci_co[2,2]*tempco))
y2_co <- c(exp(ci_co[1,1]+ci_co[2,1]*tempco))
polygon(c(tempco, rev(tempco)),c(y1_co, rev(y2_co)),col=alpha("orange",alpha=0.2), border=FALSE)
curve(exp(cf.co[[1]]+cf.co[[2]]*x),min(subset(rates, group=="coccolithophores")$temperature),
max(subset(rates, group=="coccolithophores")$temperature),add=T,col='orange',lwd=2.5)
# Eppley, 1972
curve(0.59*exp(0.0633*x),-2,40,add=T,col='grey30',lwd=2.5, lty=2) # Eppley, 1972
# add plot labels
title(xlab=(expression(bold("Temperature (ºC)"))),
ylab=(expression(bold("Specific Growth Rate (d"^"-1" *")"))), line=1.5, cex.lab=1)
title(main=expression(bold("Coccolithophores")), line=-1, adj=0.05, cex=0.9)
text(-1.6, 2.4, paste0("n=", length(coccolithophores$isolate.code)), adj=c(0,0))
text(-1.6, 2.1, paste0("N=", length(subset(rates, group=="coccolithophores")$isolate.code)), adj=c(0,0))
dev.off()
}
########## All PFTs ##########
## (Extended Figure 3)
# Fit 99th quantile regression
biss<-rq(ln.r~temperature, data=rates, tau=0.99,ci=T)
cf.b<-coef(biss) #extract coefficients
# Calculate confidence intervals
QR.all <- boot.rq(cbind(1,rates$temperature),
rates$ln.r,tau=0.99, R=10000, method="mcmb")
ci<-t(apply(QR.all$B, 2, quantile, c(0.025,0.975)))
# plotting thermal performance curves
dev.off()
for(j in 1){
pdf("figures/Extended_Figure3.pdf", width = 5.8, height = 4)
plot.new()
plot.window(c(-2,40),c(0,3))
axis(1, 10*(-2:40), mgp=c(1,0.5,0))
axis(2, 0.5*(0:6), mgp=c(1,0.5,0))
box()
for(i in 1:nrow(isolates)){
o=isolates[i, "mu.c.opt.list"]
w=isolates[i, "mu.wlist"]
a=isolates[i, "mu.alist"]
b=isolates[i, "mu.blist"]
curve(nbcurve(x=x,opt=o,w=w,a=a,b=b),-2,40,ylim=c(0,3), col=alpha("black",alpha=0.6), lty=1, add=T,
xlab="", ylab="", cex=1.5)
}
# Add regression
temp<-seq(min(rates$temperature),
max(rates$temperature), by=0.1)
y1 <- c(exp(ci[1,2]+ci[2,2]*temp))
y2 <- c(exp(ci[1,1]+ci[2,1]*temp))
polygon(c(temp, rev(temp)),c(y1, rev(y2)),col=alpha("orangered1",alpha=0.2), border=FALSE)
curve(exp(cf.b[[1]]+cf.b[[2]]*x),min(temp),max(temp),add=T,col='orangered1',lwd=2.5)
# Eppley, 1972
curve(0.59*exp(0.0633*x),-2,40,add=T,col='grey30',lwd=2.5, lty=2) # Eppley, 1972
# add plot labels
title(xlab=(expression(bold("Temperature (ºC)"))),
ylab=(expression(bold("Specific Growth Rate (d"^"-1" *")"))), line=1.5)
title(main=expression(bold("All PFTs")), line=-1, adj=0.05, cex=0.9)
text(-1.6, 2.4, paste0("n=", length(isolates$isolate.code)), adj=c(0,0))
text(-1.6, 2.1, paste0("N=", length(rates$isolate.code)), adj=c(0,0))
dev.off()
}
########## Q10 Temperature Coefficient & Activation Energy ############
# Activation Energy
k = 8.617333262145*(10^(-5)) # boltzman constant
Ea<-function(b){ #slope (b)
b*k*(273)^2
}
### Table 1 ###
group <- c('all.groups','coccolithophores','cyanobacteria', 'diatoms','dinoflagellates')
table1 <- as.data.frame(group)
# Number of unique isolates
table1$n <- rbind(length(isolates$isolate.code),length(coccolithophores$isolate.code),length(cyano$isolate.code),
length(diatom$isolate.code),length(dino$isolate.code))
# Number of growth rate measurements
table1$N <- rbind(length(rates$isolate.code),length(subset(rates, group=="coccolithophores")$isolate.code),
length(subset(rates, group=="cyanobacteria")$isolate.code),length(subset(rates, group=="diatoms")$isolate.code),
length(subset(rates, group=="dinoflagellates")$isolate.code))
#Coefficients for exponential curves (calculated above)
table1$a <- rbind(round(cf.b[[1]],3), round(cf.co[[1]],3), round(cf.bcb[[1]],3),
round(cf.bd[[1]],3), round(cf.di[[1]],3))
table1$a_ci <- rbind(paste0("[",round(ci[[1]],3),", ",round(ci[[3]],3),"]"),
paste0("[",round(ci_co[[1]],3),", ",signif(ci_co[[3]],3),"]"),
paste0("[",round(ci_cy[[1]],3),", ",round(ci_cy[[3]],3),"]"),
paste0("[",round(ci_d[[1]],3),", ",round(ci_d[[3]],3),"]"),
paste0("[",round(ci_df[[1]],3),", ",round(ci_df[[3]],3),"]"))
table1$b <- rbind(round(cf.b[[2]],3), round(cf.co[[2]],3), round(cf.bcb[[2]],3),
round(cf.bd[[2]],3), round(cf.di[[2]],3))
table1$b_ci <- rbind(paste0("[",round(ci[[2]],3),", ",round(ci[[4]],3),"]"),
paste0("[",round(ci_co[[2]],3),", ",round(ci_co[[4]],3),"]"),
paste0("[",round(ci_cy[[2]],3),", ",round(ci_cy[[4]],3),"]"),
paste0("[",round(ci_d[[2]],3),", ",round(ci_d[[4]],3),"]"),
paste0("[",round(ci_df[[2]],3),", ",round(ci_df[[4]],3),"]"))
# calculated variables
table1$intercept = exp(table1$a) # y-intercept
table1$int_ci <- rbind(paste0("[",round(exp(ci[[1]]),3),", ",round(exp(ci[[3]]),3),"]"),
paste0("[",round(exp(ci_co[[1]]),3),", ",signif(exp(ci_co[[3]]),3),"]"),
paste0("[",round(exp(ci_cy[[1]]),3),", ",round(exp(ci_cy[[3]]),3),"]"),
paste0("[",round(exp(ci_d[[1]]),3),", ",round(exp(ci_d[[3]]),3),"]"),
paste0("[",round(exp(ci_df[[1]]),3),", ",round(exp(ci_df[[3]]),3),"]"))
table1$Q10 = exp(table1$b*10) # Q10
table1$Ea = Ea(table1$b) # acivation energy
table1$umax20 = exp(table1$a+table1$b*20) # maximum growth at 20ºC
write.csv(table1, "output/table1.csv")
###### Extended Data Figure 5 ######
cocco<-subset(rates, group =='coccolithophores')
cyano<-subset(rates, group =='cyanobacteria')
diatoms<-subset(rates, group =='diatoms')
dinos<-subset(rates, group =='dinoflagellates')
pdf("figures/Extended_Figure5.pdf", width = 7.2, height = 4.5)
x=tempco
par(mfrow=c(2,2), mar=c(0.5,3.5, 3.5, 0.5))
plot(cocco$temperature, cocco$r, xlim=c(-2, 40), ylim=c(0,3), xaxt='n',xlab='', ylab='', pch=20, col=alpha("black", 0.4))
axis(side=1,labels=F)
curve(exp(cf.co[[1]]+cf.co[[2]]*x),min(x), max(x),add=T,col='orange',lwd=2.5)
polygon(c(tempco, rev(tempco)),c(y1_co, rev(y2_co)),col=alpha("orange",alpha=0.2), border=FALSE)
curve(0.59*exp(0.0633*x),-2,40,add=T,col='grey30',lwd=2.5, lty=2)
title(main=expression(bold("Coccolithophores")), line=-1, adj=0.05, cex=1)
text(-1.6, 2.3, paste0("n=", length(unique(coccolithophores$isolate.code))), adj=c(0,0))
text(-1.6, 1.9, paste0("N=", length(subset(rates, group=="coccolithophores")$isolate.code)), adj=c(0,0))
x=tempc
par(mar=c(0.5,0.5,3.5,3.5))
plot(cyano$temperature, cyano$r, xlim=c(-2, 40), ylim=c(0,3), xaxt='n', yaxt='n',xlab='', ylab='', pch=20, col=alpha("black", 0.4))
axis(side=1,labels=F)
curve(exp(cf.bcb[[1]]+cf.bcb[[2]]*x),min(x), max(x),add=T,col=colors[2], lwd=2.5)
polygon(c(tempc, rev(tempc)),c(y1_cy, rev(y2_cy)),col=alpha(colors[2], alpha=0.2), border=FALSE)
curve(0.59*exp(0.0633*x),-2,40,add=T,col='grey30',lwd=2.5, lty=2)
title(main=expression(bold("Cyanobacteria")), line=-1, adj=0.05, cex=1)
text(-1.6, 2.3, paste0("n=", length(unique(cyano$isolate.code))), adj=c(0,0))
text(-1.6, 1.9, paste0("N=", length(subset(rates,group=="cyanobacteria")$isolate.code)), adj=c(0,0))
x=tempd
par(mar=c(3.5, 3.5,0.5,0.5))
plot(diatoms$temperature, diatoms$r, xlim=c(-2, 40), ylim=c(0,3),
xlab='', ylab='', pch=20, col=alpha("black", 0.4))
curve(exp(cf.bd[[1]]+cf.bd[[2]]*x),min(x), max(x),add=T,col=colors[3],lwd=2.5)
polygon(c(tempd, rev(tempd)),c(y1, rev(y2)),col=alpha(colors[3],alpha=0.2), border=FALSE)
curve(0.59*exp(0.0633*x),-2,40,add=T,col='grey30',lwd=2.5, lty=2)
title(main=expression(bold("Diatoms")), line=-1, adj=0.05, cex=1)
text(-1.6, 2.3, paste0("n=", length(unique(diatoms$isolate.code))), adj=c(0,0))
text(-1.6, 1.9, paste0("N=", length(subset(rates, group=="diatoms")$isolate.code)), adj=c(0,0))
x=tempdi
par(mar=c(3.5, 0.5,0.5,3.5))
plot(dinos$temperature, dinos$r, xlim=c(-2, 40), ylim=c(0,3),
xlab='', ylab='', yaxt='n',pch=20, col=alpha("black", 0.4))
curve(exp(cf.di[[1]]+cf.di[[2]]*x),min(x), max(x),add=T,col=colors[4],lwd=2.5)
polygon(c(tempdi, rev(tempdi)),c(y1_df, rev(y2_df)),col=alpha(colors[4],alpha=0.2), border=FALSE)
curve(0.59*exp(0.0633*x),-2,40,add=T,col='grey30',lwd=2.5, lty=2)
title(main=expression(bold("Dinoflagellates")), line=-1, adj=0.05, cex=1)
text(-1.6, 2.3, paste0("n=", length(unique(dinos$isolate.code))), adj=c(0,0))
text(-1.6, 1.9, paste0("N=", length(subset(rates, group=="dinoflagellates")$isolate.code)), adj=c(0,0))
mtext(expression(bold("Temperature (ºC)")), side = 1, outer = TRUE, line = -1.5)
mtext(expression(bold("Specific Growth Rate (d"^"-1" *")")), side = 2, outer = TRUE, line = -1.5)
dev.off()
#########################################################################
###### Calculate the Rate of Change for Reaction Norms #########################
# calculate the change in growth estimating change from lower 20% to max growth
growth.change.inc <- vector()
growth.change.dec <- vector()
lower <- vector()
upper <- vector()
for(i in 1:nrow(isolates)){
o=isolates[i, "mu.c.opt.list"]
w=isolates[i, "mu.wlist"]
a=isolates[i, "mu.alist"]
b=isolates[i, "mu.blist"]
min=isolates[i, "tmin"]
max=isolates[i, "tmax"]
mumax = isolates[i, "mu.g.opt.val.list"]
topt=isolates[i, "mu.g.opt.list"]
#find 20% of µmax
target = mumax * 0.20
x1=seq(min,topt, by=0.001)
x2=seq(topt,max, by=0.001)
#find the temperature values that result in the target rates
lowerbound <- x1[which(abs(nbcurve(x1,o,w,a,b)-target)==min(abs(nbcurve(x1,o,w,a,b)-target)))]
upperbound <- x2[which(abs(nbcurve(x2,o,w,a,b)-target)==min(abs(nbcurve(x2,o,w,a,b)-target)))]
lower<-append(lower, lowerbound)
upper<-append(upper, upperbound)
inc = (mumax-target)/(topt-lowerbound)
dec = abs((target-mumax)/(upperbound-topt))
growth.change.inc<-rbind(growth.change.inc, inc)
growth.change.dec<-rbind(growth.change.dec, dec)
}
isolates$lowerbound = lower
isolates$upperbound = upper
isolates$growth.change.inc = growth.change.inc
isolates$growth.change.dec = growth.change.dec
write.csv(isolates, "output/Isolate_growth_bounds.csv")
##### Figure 2A: Exponential curve comparison ####
inset<- ggplot(data.frame(x = c(-2, 25)), aes(x = x)) +
stat_function(fun = nbcurve, args=list(8.19,31.2,0.21,0.1), color="grey30")+
ylim(0, 0.8)+theme_classic()+
geom_hline(yintercept=0.1566, linetype=2, color="grey60")+
geom_hline(yintercept=0.1566*5, linetype=2, color="grey60")+
theme(axis.title = element_blank(),
axis.text = element_blank(), axis.ticks = element_blank(),
plot.margin = unit(c(0, 0.4, 0, 4), "lines"),
plot.background = element_blank())+
geom_text(x=-9, y=0.1566, label=expression(µ["20%max"]),
color="black", size=3)+
geom_text(x=-7, y=0.1566*5, label=expression(µ["max"]),
color="black", size=3)+
coord_cartesian(clip = "off")
inset
rofc<-
ggplot(data=isolates)+
geom_boxplot(aes(x=group, y=growth.change.inc), position=position_nudge(x=-0.22),width=0.4, color="black")+
geom_boxplot(aes(x=group, y=growth.change.dec), fill="grey60",color="black", width=0.4, position=position_nudge(x=0.22))+
labs(x="", y=expression(bold(paste( "Change in Performance (|", "µ", "|/ºC)"))))+
scale_color_manual(values=colors)+
scale_fill_manual(values=colors)+
guides(fill=FALSE, color=FALSE)+
scale_x_discrete(labels=c("CO", "CY", "DT", "DF"))+
annotation_custom(
ggplotGrob(inset),
xmin = 1.75, xmax =Inf, ymin = 0.4, ymax = 0.6)+
y
rofc
##### Figure 2B: Exponential curve comparison ####
# Color palette
colors <- c("orange", "#ec3a25","#026cb1","#3ea127","grey30", "#033175", "#84e04c", "#fd843d" ) # primary colors
x=seq(-2, 40, by=0.1)
envel<- ggplot(data.frame(x = c(-2, 35)), aes(x = x)) +
stat_function(fun = dQ, aes(color="Diatoms", linetype="Diatoms"), lwd=0.8, xlim = c(min(tempd), max(tempd)))+
geom_ribbon(data=data.frame(cbind(tempd, y1, y2)), aes(ymax=y2, ymin=y1, x=tempd), alpha=0.1, fill=colors[[3]])+
stat_function(fun = cQ, aes(color="Cyanobacteria", linetype="Cyanobacteria"), lwd=0.8, xlim = c(min(tempc), max(tempc)))+
geom_ribbon(data=data.frame(cbind(tempc, y1_cy, y2_cy)), aes(ymax=y2_cy, ymin=y1_cy, x=tempc), alpha=0.1, fill=colors[[2]])+
stat_function(fun = coQ, aes(color="Coccolithophores", linetype="Coccolithophores"), lwd=0.8, xlim = c(min(tempco), max(tempco)))+
geom_ribbon(data=data.frame(cbind(tempco, y1_co, y2_co)), aes(ymax=y2_co, ymin=y1_co, x=tempco), alpha=0.1, fill=colors[[1]])+
stat_function(fun = diQ, aes(color="Dinoflagellates", linetype="Dinoflagellates"), lwd=0.8, xlim = c(min(tempdi), max(tempdi)))+
geom_ribbon(data=data.frame(cbind(tempdi, y1_df, y2_df)), aes(ymax=y2_df, ymin=y1_df, x=tempdi), alpha=0.1, fill=colors[[4]])+
stat_function(fun = ep, aes(color="Eppley (1972)", linetype="Eppley (1972)"), lwd=0.8)+
labs(x="Temperature (ºC)", y=expression(bold("Specific Growth Rate (d"^"-1" *")")), color="")+
scale_colour_manual("Groups", values=colors)+
scale_linetype_manual(values=c(1,1,1,1,2), guide=FALSE)+
guides(color=guide_legend(override.aes = list(linetype = c(1,1,1,1,2))))+ #overrides color so legend lines are dashed
coord_cartesian(ylim = c(0.1,2.9))+
y+theme(legend.position =c(0.20, 0.825), legend.text = element_text(size=9), legend.title = element_blank())
envel
### Save Figure 2 ###
plot_grid(rofc, envel, labels =letters[1:2])
ggsave("figures/Figure2.pdf", width = 8.3, height = 4.2)
|
45de62a61cde9f02dc798b8db55def235f65b0d6
|
03cb2887a235ba8038a8244f6a144af06a653e60
|
/R/get_peaks_chromatograms.R
|
f4e3750bd8c7ce65f9fc35ace93c047107a26c9a
|
[
"MIT"
] |
permissive
|
Roestlab/DrawAlignR
|
8724825fbf266d682183370988bc1bebcdc0f028
|
14990d47a6212e47a68327200c73714a6db15c78
|
refs/heads/master
| 2020-11-25T12:03:40.924982
| 2020-04-09T03:45:33
| 2020-04-09T03:45:33
| 228,648,957
| 5
| 0
|
MIT
| 2020-04-09T03:45:34
| 2019-12-17T15:44:30
|
R
|
UTF-8
|
R
| false
| false
| 8,942
|
r
|
get_peaks_chromatograms.R
|
#' Extract XICs of all transitions requested in chromIndices.
#'
#' Extracts XICs using mz object. Generally Savitzky–Golay filter is used, however, filter can be turned-off as well.
#' @author Shubham Gupta, \email{shubh.gupta@mail.utoronto.ca}
#'
#' ORCID: 0000-0003-3500-8152
#'
#' License: (c) Author (2019) + MIT
#' Date: 2019-12-13
#' @param mz (mzRpwiz object)
#' @param chromIndices (vector of Integers) Indices of chromatograms to be extracted.
#' @param XICfilter (string) This must be one of the strings "sgolay", "none".
#' @param SgolayFiltOrd (integer) It defines the polynomial order of filer.
#' @param SgolayFiltLen (integer) Must be an odd number. It defines the length of filter.
#' @return A list of data-frames. Each data frame has elution time and intensity of fragment-ion XIC.
#' @importFrom parallel mclapply detectCores
#' @examples
#' dataPath <- system.file("extdata", package = "DIAlignR")
#' mzmlName<-paste0(dataPath,"/mzml/hroest_K120809_Strep10%PlasmaBiolRepl2_R04_SW_filt.chrom.mzML")
#' mz <- mzR::openMSfile(mzmlName, backend = "pwiz")
#' chromIndices <- c(37L, 38L, 39L, 40L, 41L, 42L)
#' \dontrun{
#' XIC_group <- extractXIC_group(mz, chromIndices, SgolayFiltOrd = 4, SgolayFiltLen = 13)
#' }
extractXIC_group <- function(mz, chromIndices, XICfilter = "sgolay", SgolayFiltOrd = 4, SgolayFiltLen = 9){
if( any(class(mz)=="mzRpwiz") ){
message("[DrawAlignR::extractXIC_group] Calling mzR to extract XICs\n")
XIC_group <- lapply( seq_along(chromIndices), function(i) {
rawChrom <- mzR::chromatograms(mz, chromIndices[i])
# Savitzky-Golay filter to smooth chromatograms, filter order p = 3, filter length n = 13
if(XICfilter == "sgolay"){
rawChrom[,2] <- signal::sgolayfilt(rawChrom[,2], p = SgolayFiltOrd, n = SgolayFiltLen)
}
return(rawChrom)
} )
} else if ( is.data.frame(mz) ) { # TODO Need to add a better check.
message("[DrawAlignR::extractXIC_group] Calling mstools to extract XICs\n")
XIC_group <- mstools::getChromatogramDataPoints_( filename = ".sqMass", chromIndices, id_type = "chromatogramIndex", name_time = "time", name_intensity = "paste0('X', data_row$FRAGMENT_ID)", mzPntrs = mz, SgolayFiltOrd = SgolayFiltOrd, SgolayFiltLen = SgolayFiltLen )
names(XIC_group) <- NULL
}
message(sprintf("[DrawAlignR::extractXIC_group] Lenth of XIC_group: %s\n", length(XIC_group)))
return(XIC_group)
}
#' Extract XICs of all analytes from oswFiles
#'
#' For all the analytes requested, it fetches chromatogram indices from oswFiles and
#' extract chromatograms from mzML files.
#'
#' @author Shubham Gupta, \email{shubh.gupta@mail.utoronto.ca}
#'
#' ORCID: 0000-0003-3500-8152
#'
#' License: (c) Author (2019) + MIT
#' Date: 2019-12-13
#' @param dataPath (char) path to mzml and osw directory.
#' @param runs (vector of string) names of mzML files without extension. Names of the vector must be a combination of "run" and an iteger e.g. "run2".
#' @param oswFiles (list of data-frames) it is output from getOswFiles function.
#' @param analytes (string) analyte is as PRECURSOR.GROUP_LABEL or as PEPTIDE.MODIFIED_SEQUENCE and PRECURSOR.CHARGE from osw file.
#' @param XICfilter (string) this must be one of the strings "sgolay", "none".
#' @param SgolayFiltOrd (integer) it defines the polynomial order of filer.
#' @param SgolayFiltLen (integer) must be an odd number. It defines the length of filter.
#' @param mzPntrs A list of mzRpwiz.
#' @return A list of list of data-frames. Each data frame has elution time and intensity of fragment-ion XIC.
#'
#' @seealso \code{\link{getOswFiles}, \link{getRunNames}}
#' @examples
#' dataPath <- system.file("extdata", package = "DIAlignR")
#' filenames <- DIAlignR::getRunNames(dataPath = dataPath)
#' runs <- c("run1" = "hroest_K120809_Strep0%PlasmaBiolRepl2_R04_SW_filt",
#' "run0" = "hroest_K120808_Strep10%PlasmaBiolRepl1_R03_SW_filt")
#' oswFiles <- DIAlignR::getOswFiles(dataPath, filenames)
#' analytes <- "QFNNTDIVLLEDFQK_3"
#' XICs <- getXICs4AlignObj(dataPath, runs, oswFiles, analytes)
#' @export
getXICs4AlignObj <- function(dataPath, runs, oswFiles, analytes, XICfilter = "sgolay",
SgolayFiltOrd = 4, SgolayFiltLen = 9, mzPntrs = NULL){
if(is.null(mzPntrs)){
mzPntrs <- getMZMLpointers(dataPath, runs)
}
XICs <- vector("list", length(runs))
names(XICs) <- names(runs)
for(i in seq_along(runs)){
runname = names(runs)[i]
message("Fetching XICs from ", runname, " ", runs[[runname]])
XICs[[i]] <- lapply(seq_along(analytes), function(j){
analyte <- analytes[j]
chromIndices <- selectChromIndices(oswFiles, runname = runname, analyte = analyte)
if(is.null(chromIndices)){
warning("Chromatogram indices for ", analyte, " are missing in ", runs[[runname]])
message("Skipping ", analyte)
XIC_group <- NULL
} else {
XIC_group <- extractXIC_group(mzPntrs[[runname]]$mz, chromIndices, XICfilter, SgolayFiltOrd, SgolayFiltLen)
}
XIC_group
})
names(XICs[[i]]) <- analytes
}
rm(mzPntrs)
XICs
}
#' Get XICs of all analytes
#'
#' For all the analytes requested in runs, it first creates oswFiles, then, fetches chromatogram indices from oswFiles and
#' extract chromatograms from mzML files.
#'
#' @importFrom dplyr %>%
#' @author Shubham Gupta, \email{shubh.gupta@mail.utoronto.ca}
#'
#' ORCID: 0000-0003-3500-8152
#'
#' License: (c) Author (2019) + MIT
#' Date: 2019-12-13
#'
#' @param analytes (string) An analyte is as PRECURSOR.GROUP_LABEL or as PEPTIDE.MODIFIED_SEQUENCE and PRECURSOR.CHARGE from osw file.
#' @param runs (A vector of string) Names of mzml file without extension. Vector must have names as shown in the example.
#' @param dataPath (char) Path to mzml and osw directory.
#' @param maxFdrQuery (numeric) A numeric value between 0 and 1. It is used to filter features from osw file which have SCORE_MS2.QVALUE less than itself.
#' @param XICfilter (string) This must be one of the strings "sgolay", "none".
#' @param SgolayFiltOrd (integer) It defines the polynomial order of filer.
#' @param SgolayFiltLen (integer) Must be an odd number. It defines the length of filter.
#' @param runType (char) This must be one of the strings "DIA_proteomics", "DIA_Metabolomics".
#' @param oswMerged (logical) TRUE for experiment-wide FDR and FALSE for run-specific FDR by pyprophet.
#' @param nameCutPattern (string) regex expression to fetch mzML file name from RUN.FILENAME columns of osw files.
#' @param analyteInGroupLabel (logical) TRUE for getting analytes as PRECURSOR.GROUP_LABEL from osw file.
#' @param mzPntrs A list of mzRpwiz.
#' @return A list of list. Each list contains XIC-group for that run. XIC-group is a list of dataframe that has elution time and intensity of fragment-ion XIC.
#'
#' @seealso \code{\link{getOswFiles}, \link{getRunNames}}
#' @examples
#' dataPath <- system.file("extdata", package = "DIAlignR")
#' runs <- c("hroest_K120808_Strep10%PlasmaBiolRepl1_R03_SW_filt",
#' "hroest_K120809_Strep10%PlasmaBiolRepl2_R04_SW_filt")
#' XICs <- getXICs(analytes = c("QFNNTDIVLLEDFQK_3"), runs = runs, dataPath = dataPath)
#' @export
getXICs <- function(analytes, runs, dataPath = ".", maxFdrQuery = 1.0, XICfilter = "sgolay",
SgolayFiltOrd = 4, SgolayFiltLen = 9, runType = "DIA_proteomics",
oswMerged = TRUE, nameCutPattern = "(.*)(/)(.*)", chrom_ext=".chrom.mzML", analyteInGroupLabel = FALSE, mzPntrs=NULL){
if( (SgolayFiltLen %% 2) != 1){
print("SgolayFiltLen can only be odd number")
return(NULL)
}
# Get filenames from .merged.osw file and check if names are consistent between osw and mzML files.
filenames <- getRunNames(dataPath = dataPath, oswMerged = oswMerged, nameCutPattern = nameCutPattern, chrom_ext = chrom_ext)
filenames <- filenames[filenames$runs %in% runs,]
# Get Chromatogram indices for each peptide in each run.
oswFiles <- getOswFiles(dataPath, filenames, maxFdrQuery = maxFdrQuery, analyteFDR = 1.00,
oswMerged = oswMerged, analytes = analytes, runType = runType,
analyteInGroupLabel = analyteInGroupLabel)
refAnalytes <- getAnalytesName(oswFiles, commonAnalytes = FALSE)
analytesFound <- intersect(analytes, refAnalytes)
analytesNotFound <- setdiff(analytes, analytesFound)
if(length(analytesNotFound)>0){
message("Analytes ", paste(analytesNotFound, ", "), "not found.")
}
####################### Get XICs ##########################################
runs <- filenames$runs
names(runs) <- rownames(filenames)
# Get Chromatogram for each peptide in each run.
message("Fetching Extracted-ion chromatograms from runs")
XICs <- getXICs4AlignObj(dataPath, runs, oswFiles, analytesFound, XICfilter,
SgolayFiltOrd, SgolayFiltLen, mzPntrs=mzPntrs)
names(XICs) <- filenames$runs
XICs
}
|
97ef015ad4866d02ced6a87d98624b5fa9f69bec
|
0dd9227755c5b154d2184e712d53f4bacc02305c
|
/man/wait_for_dir.Rd
|
33d96d4140194359249f93936428ef9650fb7056
|
[
"MIT"
] |
permissive
|
imbs-hl/MDRDist
|
811b68cad2877c83d6d4ac9c08e04a63141757b1
|
2aa6838aeeb6291b76971b34169230abcd28a909
|
refs/heads/master
| 2023-09-02T08:34:07.942115
| 2017-07-05T10:51:59
| 2017-07-05T10:51:59
| 85,298,779
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 604
|
rd
|
wait_for_dir.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/supporting_functions.R
\name{wait_for_dir}
\alias{wait_for_dir}
\title{Waiting until recently created directory appears}
\usage{
wait_for_dir(Dir, max_wait = 30, timeout = 1)
}
\arguments{
\item{Dir}{path to the directory which we are waiting for}
\item{max_wait}{timeout until assertion will be raised, if dir does not appear}
\item{timeout}{timestep between two attempts to look for the dir}
}
\value{
nothing but certainty, that a directory is callable
}
\description{
Waiting until recently created directory appears
}
|
4897d314e934878e15c20846bd738a1efdde53ec
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ahaz/examples/sorlie.Rd.R
|
86690da2a25ac1a09079096ce7207a55f685e231
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 147
|
r
|
sorlie.Rd.R
|
library(ahaz)
### Name: sorlie
### Title: Sorlie gene expressions
### Aliases: sorlie
### Keywords: datasets
### ** Examples
data(sorlie)
|
e3718a21d9659ba4e70923f1dee4771c006c84d6
|
99fd08dac3a1bb59df57983ee5c737fc3fa3d721
|
/main__summarize_industry_assignment.R
|
060892eb917be681c2bc0705f1b5c1188d33f6d5
|
[] |
no_license
|
nareal/CRSP-Data-Summary-Statistics-by-Industry-
|
b01307836e1237b6460263e874e2c4687d2d816d
|
31bd5652d2d64a29b27eb3cf63ba1cdeb2773ccd
|
refs/heads/master
| 2021-01-15T22:41:40.908879
| 2011-11-22T02:06:11
| 2011-11-22T02:06:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 743
|
r
|
main__summarize_industry_assignment.R
|
rm(list=ls())
library(foreign)
library(reshape)
library(plyr)
library(matlab)
library(rjson)
library(RColorBrewer)
library(ggplot2)
library(tikzDevice)
library(classInt)
source("summarize_industry_assignment.R")
## plot_number_of_firms()
## plot_number_of_firms_per_industry(industry_classification = "mg1999")
## plot_number_of_firms_per_industry(industry_classification = "ff1988")
## plot_number_of_firms_per_sub_industry()
## plot_distribution_of_excess_returns_by_industry(industry_classification = "mg1999")
## plot_distribution_of_excess_returns_by_industry(industry_classification = "ff1988")
## plot_market_cap_by_industry(industry_classification = "mg1999")
plot_market_cap_by_industry(industry_classification = "ff1988")
|
a3564286c60930ab1e531d2f2475aaae0649042f
|
d690af5c19bb0d6b723e1b8f1687794b4e0f8830
|
/tests/testthat/test-stats-nls.R
|
37c2e5331e7f4461adb681eeed90be2f9e69bb15
|
[
"MIT"
] |
permissive
|
roldanalex/safepredict
|
03113c5095518fef7c007c7e98342ecf15c0f9dc
|
05c3b9c8770583221a73b7b68f88805402630f5f
|
refs/heads/master
| 2021-10-09T11:32:24.866936
| 2018-12-27T06:11:45
| 2018-12-27T06:11:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 831
|
r
|
test-stats-nls.R
|
context("test-stats-nls")
fit <- nls(demand ~ SSasympOrig(Time, A, lrc), data = BOD)
test_that("function signature", {
check_safepredict_signature(safe_predict.nls)
})
test_that("input validation", {
expect_error(
safe_predict(fit),
"argument \"new_data\" is missing, with no default"
)
expect_error(
safe_predict(fit, BOD, type = "infinite fun space"),
"`type` should be one of: \"response\""
)
expect_warning(
safe_predict(fit, BOD, bad_arg = 0.2),
"Some components of ... were not used: bad_arg"
)
})
## checks on returned predictions
test_that("default type", {
default_preds <- safe_predict(fit, BOD)
check_predict_output(default_preds, BOD, type = "response")
})
test_that("type = \"response\"", {
check_predict(safe_predict.nls, fit, BOD, "demand", type = "response")
})
|
8c449b7631a711f8eabd3c0a2035b4fb04f6a40c
|
f42a7b41b6acd4dac40234ff2d939c938f6f2d53
|
/man/earlyReduction.Rd
|
3f542af466342771deeae151c78f2bb1cfb2de65
|
[
"MIT"
] |
permissive
|
ttriche/bayesCC
|
2927f9228782b9c9814f3bdf5e31c36a50e5e794
|
627a88a5af1b07b4923ecba42174d4c148df29c2
|
refs/heads/master
| 2023-06-26T19:57:56.436883
| 2023-05-11T18:51:53
| 2023-05-11T18:51:53
| 44,848,488
| 24
| 4
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,137
|
rd
|
earlyReduction.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/earlyReduction.R
\name{earlyReduction}
\alias{earlyReduction}
\title{do dimension reduction (via NMF or SVD) before Bayesian consensus clustering}
\usage{
earlyReduction(
mat,
how = c("NMF", "SVD"),
mat2 = NULL,
joint = FALSE,
findK = FALSE,
howNA = c("both", "column", "row"),
viaCV = FALSE,
pctNA = 0.2
)
}
\arguments{
\item{mat}{a matrix to decompose (columns are samples, rows are features)}
\item{how}{one of "NMF" or "SVD"; SVD is likely to be much faster}
\item{mat2}{a 2nd matrix to reduce (optional; for joint factorization)}
\item{joint}{if using NMF, should joint factorization be attempted?}
\item{findK}{if using marginal NMF, should the optimal rank(s) be sought?}
\item{howNA}{for rank finding, add NAs column-wise, row-wise, or both?}
\item{viaCV}{for rank finding, should five-fold CV be used when imputing?}
\item{fracNA}{for rank finding, what fraction of the data should be NA'ed?}
}
\value{
a list with W, H, and K for each matrix if using NMF,
or a list with D, U, and V for each matrix if using SVD.
}
\description{
if NMF, the rank can be estimated by 5xCV on NAs, though this can be slow.
the underlying rationale is that whatever rank K best recovers artificially
missing data (knocked out column-wise, row-wise, or randomly across both)
is the best estimable rank we are likely to recover. In order to stabilize
the estimate of K, we can run 5x cross-validation and rotate the NAs (set at
a default of 20% of the entries to facilitate sampling without replacement).
}
\details{
joint NMF can also be requested (as in Wang et al., Bioinformatics 2015,
doi: 10.1093/bioinformatics/btu679) but in this case the ranks can only be
estimated marginally. Joint rank estimation (and, by extension, optimal
joint imputation for linked views) is an open research topic as best as we
can tell. if anyone wants to send a patch we will gladly apply it and a
great many people will probably start using it thereafter.
if SVD, the rank will be whatever the data supports (i.e. min(nrow, ncol)).
}
|
6b206a2af2f52692bc67107fc872dd6c6afe656b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/letsR/examples/lets.subsetPAM.Rd.R
|
b2532f0fd3fe3066d62a3b66377ae9cbd0c69ba7
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 528
|
r
|
lets.subsetPAM.Rd.R
|
library(letsR)
### Name: lets.subsetPAM
### Title: Subset a PresenceAbsence object based on species names
### Aliases: lets.subsetPAM
### ** Examples
## Not run:
##D data(PAM)
##D # PAM before subset
##D plot(PAM, xlab = "Longitude", ylab = "Latitude",
##D main = "Phyllomedusa species richness")
##D
##D # Subset PAM to the first 20 species
##D PAMsub <- lets.subsetPAM(PAM, PAM[[3]][1:20])
##D plot(PAMsub, xlab = "Longitude", ylab = "Latitude",
##D main = "Phyllomedusa species richness")
## End(Not run)
|
29d61a683e3c98a438e52dff949f826071235010
|
9cfbe86f685f8ef280899ca97bc425d1bfda5564
|
/0323_in_class.R
|
a087bb791dd652a7b3dc0ee002f52b5fc139178c
|
[] |
no_license
|
kisumzzz/DataAnalyticsSpring2020
|
20f3b6fa7fc112efcf5d6731d6cd024e17a61a2b
|
deb9496c9156991e81ab88091f5f13bfa55ed974
|
refs/heads/master
| 2020-12-21T11:52:53.424495
| 2020-05-05T03:06:49
| 2020-05-05T03:06:49
| 236,422,294
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 783
|
r
|
0323_in_class.R
|
data("USArrests")
states=row.names(USArrests)
states
apply(USArrests , 2, mean)
apply(USArrests , 2, var)
pr.out=prcomp(USArrests, scale=TRUE)
names(pr.out)
pr.out$center
pr.out$scale
pr.out$rotation
dim(pr.out$x)
biplot(pr.out, scale=0)
pr.out$sdev
# PCA with iris dataset
data("iris")
head(iris)
irisdata1 <- iris[,1:4]
irisdata1
head(irisdata1)
principal_components <- princomp(irisdata1, cor = TRUE, score = TRUE)
summary(principal_components)
plot(principal_components)
plot(principal_components, type = "l")
biplot(principal_components)
install.packages('MASS')
data(Boston, package="MASS")
pca_out <- prcomp(Boston,scale. = T)
pca_out
plot(pca_out)
help(biplot)
biplot(pca_out, scale = 0)
boston_pc <- pca_out$x
boston_pc
head(boston_pc)
summary(boston_pc)
|
87a9ec7985912e0486649f35fa8f96031f85d6d6
|
852d3fb58551d0c612c1c40ebf6ef5ad4d78f92b
|
/Visualization/BitEpiVis.R
|
56d14810054c1daa74d0271debf1e4e2c9e79438
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
aehrc/BitEpi
|
4a2e34a76453d66f18863d3ff0931c7f7c837953
|
8783f9664433c8de5f03b15a47cd5d7d81bb2e09
|
refs/heads/master
| 2021-08-06T00:54:44.046981
| 2021-07-27T07:35:41
| 2021-07-27T07:35:41
| 211,199,347
| 8
| 4
|
NOASSERTION
| 2021-07-22T00:16:18
| 2019-09-26T23:43:50
|
C++
|
UTF-8
|
R
| false
| false
| 6,319
|
r
|
BitEpiVis.R
|
library(dplyr)
library(RCy3)
library(igraph)
setwd('~/temp/cc/BitEpi')
Color=list(SNP='red',PAIR='blue',TRIPLET='orange',QUADLET='green', OTHER='gray')
# Nodes of the graph are SNPs and Interactions
# Each SNP node could be connected to multiple Interaction Node
# Each Interaction Node is conneced to the SNPs that are involved in that interaction.
# This function name the interaction nodes by concatinating SNPS with # seprator.
# the 2-SNP, 3-SNP, and 4-SNP names are added as 3 new column to the best dataframe
# For Example if rs123, rs456 and rs789 Interact with each other then
# the Interaction node is called rs123#rs456#rs789
AddInteractionNode = function(data)
{
x = as.data.frame(t(apply(select(data, SNP, PAIR), 1, sort)))
data$nP = paste(x$V1, x$V2, sep = "#")
x = as.data.frame(t(apply(select(data, SNP, TRIPLET_1, TRIPLET_2), 1, sort)))
data$nT = paste(x$V1, x$V2, x$V3, sep = "#")
x = as.data.frame(t(apply(select(data, SNP, QUADLET_1, QUADLET_2, QUADLET_3), 1, sort)))
data$nQ = paste(x$V1, x$V2, x$V3, x$V4, sep = "#")
return(data)
}
# list all the nodes (1-SNP, 2-SNP, 3-SNP, 4SNP) assing beta to the size and rank them by order
NodeGen = function(dataX)
{
#1-SNP
data = dataX
data$Node = data$SNP
data$order = 1
data$beta = data$SNP_B
data$color = Color$SNP
data = data[order(-data$SNP_A),]
data$rank = seq.int(nrow(data))
nodes = select(data, Node, rank, beta, color, order)
#2-SNP
data = dataX
data$Node = data$nP
data$order = 2
data$beta = data$PAIR_B
data$color = Color$PAIR
data = data[order(data[,'Node'],-data[,'beta']),]
data = data[!duplicated(data$Node),]
data = data[order(-data$PAIR_A),]
data$rank = seq.int(nrow(data))
nodes = rbind(nodes, select(data, Node, rank, beta, color, order))
#3-SNP
data = dataX
data$Node = data$nT
data$order = 3
data$beta = data$TRIPLET_B
data$color = Color$TRIPLET
data = data[order(data[,'Node'],-data[,'beta']),]
data = data[!duplicated(data$Node),]
data = data[order(-data$TRIPLET_A),]
data$rank = seq.int(nrow(data))
nodes = rbind(nodes, select(data, Node, rank, beta, color, order))
#4-SNP
data = dataX
data$Node = data$nQ
data$order = 4
data$beta = data$QUADLET_B
data$color = Color$QUADLET
data = data[order(data[,'Node'],-data[,'beta']),]
data = data[!duplicated(data$Node),]
data = data[order(-data$QUADLET_A),]
data$rank = seq.int(nrow(data))
nodes = rbind(nodes, select(data, Node, rank, beta, color, order))
return(nodes)
}
# list all edges between interactive nodes (2-SNP, 3-SNP and 4-SNP) and SNP nodes (1-SNP)
EdgeGen = function(data)
{
edf = data.frame(source=character(), target=character())
for(i in 1:nrow(data)) {
edf = rbind(edf, data.frame(source=data[i,"nP"], target=data[i,"SNP"]))
edf = rbind(edf, data.frame(source=data[i,"nP"], target=data[i,"PAIR"]))
edf = rbind(edf, data.frame(source=data[i,"nT"], target=data[i,"SNP"]))
edf = rbind(edf, data.frame(source=data[i,"nT"], target=data[i,"TRIPLET_1"]))
edf = rbind(edf, data.frame(source=data[i,"nT"], target=data[i,"TRIPLET_2"]))
edf = rbind(edf, data.frame(source=data[i,"nQ"], target=data[i,"SNP"]))
edf = rbind(edf, data.frame(source=data[i,"nQ"], target=data[i,"QUADLET_1"]))
edf = rbind(edf, data.frame(source=data[i,"nQ"], target=data[i,"QUADLET_2"]))
edf = rbind(edf, data.frame(source=data[i,"nQ"], target=data[i,"QUADLET_3"]))
}
return(edf)
}
# convert BitEpi Best file to nodes and edges
BestToNodesAndEdges = function(bestFn)
{
# Read BitEpi "best" file into a data frame
bestDf = read.csv(bestFn)
bestDf = AddInteractionNode(bestDf)
Nodes = NodeGen(bestDf)
Edges = EdgeGen(bestDf)
return(list(Nodes=Nodes, Edges=Edges))
}
# query nodes and related edges
QueryGraph = function(Graph, thr, minNodeSize, maxNodeSize)
{
if(minNodeSize >= maxNodeSize)
{
print("minNodeSize is greater or equal maxNodeSize")
return(NULL,NULL)
}
allNodes = Graph$Nodes
allEdges = Graph$Edges
# select nodes to be in the graph
s1 = allNodes %>% filter(order==1 & allNodes$rank<=thr$SNP)
s2 = allNodes %>% filter(order==2 & allNodes$rank<=thr$PAIR)
s3 = allNodes %>% filter(order==3 & allNodes$rank<=thr$TRIPLET)
s4 = allNodes %>% filter(order==4 & allNodes$rank<=thr$QUADLET)
selNodes = unique(rbind(s1,s2,s3,s4))
# select interaction nodes
intNodes = selNodes %>% filter(order>1)
# select all edges for intraction nodes
intEdges = select(merge(x=allEdges, y=intNodes, by.x='source', by.y='Node'), source, target)
intEdges = unique(intEdges)
# select all target names for interaction nodes
tarNames = unique(select(intEdges, target))
names(tarNames) = 'Node'
#grab target nodes from all nodes
tarNodes = merge(x=allNodes, y=tarNames, by='Node')
#and merge them to dataset
selNodes = unique(rbind(selNodes, tarNodes))
minBeta = min(selNodes$beta)
maxBeta = max(selNodes$beta)
rangeBeta = maxBeta - minBeta
rangeSize = maxNodeSize - minNodeSize
ratio = rangeSize/rangeBeta
selNodes$size = ((selNodes$beta - minBeta) * ratio) + minNodeSize;
selNodes[(selNodes$order==1) & (selNodes$rank>thr$SNP),]$color = Color$OTHER
selNodes[(selNodes$order==1) & (selNodes$rank>thr$SNP),]$size = minNodeSize
return(list(Nodes=selNodes, Edges=intEdges))
}
DoItAll = function(bestFn, thr, minNodeSize, maxNodeSize)
{
# read best file into a graph
GraphAll = BestToNodesAndEdges(bestFn)
# query graph
GraphSelected = QueryGraph(GraphAll, thr, minNodeSize, maxNodeSize)
Edges = GraphSelected$Edges
Nodes = GraphSelected$Nodes
#plot graph
Nodes$label = " "
network = graph_from_data_frame(d=Edges, directed=FALSE, vertices = Nodes)
plot(network, vertex.size=V(network)$size, vertex.label=V(network)$Node, vertex.color=V(network)$color, vertex.label=V(network)$label)
cytoscapePing()
createNetworkFromIgraph(network,"BitEpi Network", title = "BitEpi Graph")
}
thr=list(SNP=3,PAIR=3,TRIPLET=3,QUADLET=3)
minNodeSize = 10
maxNodeSize = 35
#Sort by Alpha and but represent beta as node size in the plot
DoItAll('sampleData/out.best.csv', thr, minNodeSize, maxNodeSize)
|
67b4c288cc691e1b72ce5df79a91ee5298726943
|
f5e25afe6fb3abdc9ea1ebe09ad383c21c83f92f
|
/R/errorbar.R
|
2f944886fdadac7639f37258657b78789528a0f8
|
[] |
no_license
|
cran/phonTools
|
34a4527f0ab8cbe06947dab7aff6385bb56cd452
|
80e82b901140f715c0b4d2c55b214410a6498038
|
refs/heads/master
| 2016-09-16T04:11:26.219457
| 2015-07-30T00:00:00
| 2015-07-30T00:00:00
| 17,698,512
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 598
|
r
|
errorbar.R
|
# Copyright (c) 2015 Santiago Barreda
# All rights reserved.
errorbars = function(x, y, top, bottom = top, length = .2, add = TRUE, ...){
if (add) arrows(x, y+top, x, y-bottom, angle=90, code=3, length=length, ...)
if (!add){
plot (x,y,pch=16, ylim = range(y) + c(-top, bottom))
arrows(x, y+top, x, y-bottom, angle=90, code=3, length=length, ...)
}
}
errorbar = function(x, y, top, bottom = top, length = .2, add = TRUE, ...){
cl = match.call()
args = sapply (2:length(cl), function(x) cl[[x]])
names(args) = names(cl)[-1]
do.call (errorbars, args)
}
|
3980ae7093f7e77dea6503b893a28c4e279bb3c0
|
f6f88407b149dfe2be1f46832ba4b3385ad7aada
|
/gdp_rates/r_scripts/assess_cumulative_impact.R
|
28e8d00548c28df02076d3ac975bdc7d5cda1829
|
[] |
no_license
|
dstauffer11/colonization_effects
|
3ee84872c067844a303e8dad35b72baa078d8cc3
|
c8adf4472c4296a086505763e93f4a97a140edab
|
refs/heads/master
| 2022-11-14T13:42:37.441079
| 2020-06-26T14:53:36
| 2020-06-26T14:53:36
| 275,174,879
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,360
|
r
|
assess_cumulative_impact.R
|
library(CausalImpact)
library(ggplot2)
library(rjson)
library(rstan)
library(bayesplot)
library(ggplot2)
library(CausalImpact)
library(gridExtra)
library(splines)
options(mc.cores = parallel::detectCores())
country.year.neighbors.list <- fromJSON(file='data/country_year_neighbors_map.json')
gdp.data <- read.csv('data/annual_growth.csv')
T <- 20
T0 <- 10
J <- length(country.year.neighbors.list)
N <- 0
X <- matrix(, nrow = 20, ncol = 0)
Y <- c()
indices <- c()
codes <- c()
neighbor_map = list()
colonizers = c()
for (country_code in names(country.year.neighbors.list)) {
yin = country.year.neighbors.list[[country_code]]
codes = c(codes, country_code)
neighbor_map[[country_code]] = yin[[4]]
colonizers = c(colonizers, yin[[5]])
first.year <- strtoi(yin[[1]])
independence.year <- strtoi(yin[[2]])
last.year <- strtoi(yin[[3]])
if ((first.year > independence.year - 9) | (last.year < independence.year + 10)) next
first.year <- independence.year - 9
last.year <- independence.year + 10
countries <- c(country_code, yin[[4]])
local.gdp.data <- gdp.data[gdp.data$country_code %in% countries, ]
local.gdp.data <- local.gdp.data[local.gdp.data$year >= first.year, ]
local.gdp.data <- local.gdp.data[local.gdp.data$year <= last.year, ]
y <- local.gdp.data[local.gdp.data$country_code == country_code, 'annual_growth']
Y <- append(Y, list(y))
indices <- c(indices, c(N+1, N+length(yin[[4]])))
N <- N + length(yin[[4]])
x.long <- local.gdp.data[local.gdp.data$country_code %in% yin[[4]], ]
x.long <- subset(x.long, select=c('year', 'country_code', 'annual_growth'))
x.wide <- reshape(x.long, direction = "wide", idvar = 'year', timevar = 'country_code')
x <- x.wide[ , !(names(x.wide) %in% c('year'))]
X <- cbind(X, data.matrix(x))
}
#X = apply(X,2 , norm <- function(x){return (x - mean(x))})
# for (i in 1:J) {
# Y[[i]] = Y[[i]] - mean(Y[[i]][1:T0])
# }
colonizers_numeric = as.numeric(factor(colonizers))
C = max(colonizers_numeric)
data = list(
T=T,
T0=T0,
N=N,
J=J,
X=X,
y=Y,
indices=indices
)
sm <- stan_model('stan_models/cumulative_impact.stan')
fit <- sampling(sm, data=data, iter=5000, control=list(adapt_delta=0.95, max_treedepth=12), seed=1, chains=1, warmup=1000)
sm_n <- stan_model('stan_models/cumulative_impact_normed.stan')
fitn <- sampling(sm_n, data=data, iter=2000, control=list(adapt_delta=0.95, max_treedepth=12), seed=1, chains=4, warmup=1000)
print(fit, pars=c('mean_effect', 's_effect', 'raw_effects', 'real_effects', 's_s_obs', 's_obs'))
data_colonizers = list(
T=T,
T0=T0,
N=N,
J=J,
X=X,
y=Y,
indices=indices,
C=C,
colonizer=colonizers_numeric
)
smc <- stan_model('stan_models/multiple_lines_colonizers2.stan')
fitc <- sampling(smc, data=data_colonizers, iter=2000, control=list(adapt_delta=0.9, max_treedepth=10), seed=5)
# check tree depths
check_treedepth(fit)
# check energy
check_energy(fit)
# check chain mixing
traceplot(fitn, pars=c('mean_effect', 's_effect', 'real_effects[1]', 's_obs[1]', 's_s_obs', 's_effect', 's_level[1]', 'neighbor_beta[10]'))
mcmc_trace_highlight(
fitn,
pars = c('mean_effect', 's_effect', 'real_effects[1]', 's_obs[1]', 's_s_obs', 's_effect', 's_level[1]', 'neighbor_beta[10]'),
alpha = 0.03,
highlight = 2
)
traceplot(fitn, pars=c('neighbor_beta[1]', 'neighbor_beta[10]', 'neighbor_beta[50]', 'neighbor_beta[100]', 'neighbor_beta[200]', 'neighbor_beta[300]'))
traceplot(fitc, pars=c('u[1,1]', 'u[1,15]', 'u[10,3]', 'u[10,5]', 'u[30,5]', 'u[40,20]'))
traceplot(fitc, pars=c('mean_effect', 's_colonizer_effect_unif', 's_colonizer_effect', 'raw_colonizer_effects[1]',
'raw_colonizer_effects[2]', 'raw_colonizer_effects[3]', 'raw_colonizer_effects[4]', 'raw_colonizer_effects[5]'))
pairs(fitn, pars=c('mean_effect', 's_effect', 'real_effects[1]', 'real_effects[10]', 'real_effects[30]'))
pairs(fitn, pars=c('mean_effect', 's_effect', 's_obs[1]', 's_obs[10]', 's_obs[30]', 's_s_obs'))
pairs(fitc, pars=c('mean_effect', 's_effect', 's_s_obs', 's_colonizer_effect', 'raw_colonizer_effects'))
# Histogram of tree depths
breaks = 0:13
sampler_params = get_sampler_params(fitn, inc_warmup=FALSE)
treedepths = do.call(rbind, sampler_params)[, 'treedepth__']
treedepths_hist = hist(treedepths, breaks=breaks, plot=FALSE)
par(mar=c(4, 4, 0.5, 0.5))
plot(treedepths_hist, main='', xlab='theta.1', yaxt='n', ann=FALSE)
# Estimate distribution of mean effect from posterior samples
mean_effect_draws = as.array(fitn, pars = c('mean_effect'))
mcmc_dens(mean_effect_draws, pars=c('mean_effect')) +
ggtitle('Estimated Effect of Indepence on Economic Growth') +
xlab('Change in Growth Rate of GDP per Capita (2011 US$)') +
ylab('Density')
# Estimate effect of each colonizer
colonizers_reducted = c('BEL', 'DEU', 'FRA', 'GBR', 'PRT', 'RUS')
mean_effect_draws = as.array(fitc, pars = c('real_colonizer_effects'))
mcmc_areas(-mean_effect_draws, regex_pars = "real_colonizer_effects\\[[1-6]\\]", prob=0.8) +
ggtitle('Estimated Effect of Colonizer its Colonies') +
xlab('Depression of Growth Rate') +
ylab('Density') +
xlim(-0.05, 0.05) +
scale_y_discrete(labels=colonizers_reducted)
# Estimate distribution of each indiviadual country's effects
individual_effect_draws = as.matrix(fitn, pars = c('real_effects', 'mean_effect'))
mcmc_intervals(individual_effect_draws) +
scale_y_discrete(labels=c(codes, 'Overall')) +
ggtitle('Estimated Independence Effect by Country') +
xlab('Growth Rate Change') +
theme(
panel.grid.major = element_line(size = 0.1, linetype = 'solid',
colour = "gray"),
panel.grid.minor = element_line(size = 0.1, linetype = 'solid',
colour = "gray")
)
# check step sizes of sample
sampler_params <- get_sampler_params(fit, inc_warmup=FALSE)
stepsizes <- sapply(sampler_params, function(x) x[1,'stepsize__'])
names(stepsizes) <- list("Chain 1", "Chain 2", "Chain 3" ,"Chain 4")
stepsizes
mean(stepsizes)
# 2000 iterations, centered: 0.009013763
# check gradient evaluations
n_gradients <- sapply(sampler_params, function(x) sum(x[,'n_leapfrog__']))
n_gradients
sum(n_gradients)
# 2000 ierations, centered: 2318880
stan_diag(fit, info = 'sample')
color_scheme_set('darkgray')
f <- extract(fit)
ppc_dens_overlay(y = y, yrep = f$effect[1:T0, ])
draws <- as.array(fit, pars = c('beta', 's_obs', 's_slope', 's_level', 'effect'))
np <- nuts_params(fit)
mcmc_parcoord(draws, np=np)
draws <- as.array(fit, pars = c('u'))
np <- nuts_params(fit)
mcmc_parcoord(draws, np=np)
draws <- as.array(fit, pars = c('mean_effect', 's_effect', 'raw_effects'))
np <- nuts_params(fit)
mcmc_parcoord(draws, np=np)
draws <- as.array(fit, pars = c('mean_effect', 's_effect', 's_s_obs'))
np <- nuts_params(fit)
mcmc_parcoord(draws, np=np)
mcmc_scatter(
as.matrix(fit),
pars = c('s_obs', 's_slope'),
np = nuts_params(fit),
np_style = scatter_style_np(div_color = "green", div_alpha = 0.8)
)
measure.impact <- function(country_code, yin) {
first.year <- strtoi(yin[[1]])
independence.year <- strtoi(yin[[2]])
last.year <- strtoi(yin[[3]]) - 1
countries <- c(country_code, yin[[4]])
local.gdp.data <- gdp.data[gdp.data$country_code %in% countries, ]
local.gdp.data <- local.gdp.data[local.gdp.data$year >= first.year, ]
local.gdp.data <- local.gdp.data[local.gdp.data$year <= last.year, ]
t <- as.Date(ISOdate(local.gdp.data[local.gdp.data$country_code == country_code, 'year'], 1, 1))
y <- local.gdp.data[local.gdp.data$country_code == country_code, 'annual_growth']
x.long <- local.gdp.data[local.gdp.data$country_code %in% yin[[4]], ]
x.long <- subset(x.long, select=c('year', 'country_code', 'annual_growth'))
x.wide <- reshape(x.long, direction = "wide", idvar = 'year', timevar = 'country_code')
x <- x.wide[ , !(names(x.wide) %in% c('year'))]
# print(x)
simple.data <- zoo(cbind(y, x), t)
pre.period <- as.Date(ISOdate(c(first.year, independence.year), 1, 1))
post.period <- as.Date(ISOdate(c(independence.year+1, last.year), 1, 1))
impact <- CausalImpact(simple.data, pre.period, post.period)
plot(impact)
ggsave(sprintf('results/causal_impact/%s.png', country_code))
return(impact)
}
# measure.impact('Canada', c(1901, 1931, c('United States', 'Mexico')))
for (name in names(country.year.neighbors.list)) {
print(name)
measure.impact(name, country.year.neighbors.list[[name]])
}
|
aca1ea8745d9f7de8e0684b542d4448ea1ba6a6d
|
a3e56dccec4c41f256583f45959ee64d6d269f57
|
/man/wine27.Rd
|
3a00fe10109004762099940d9b5bd12271636487
|
[] |
no_license
|
cran/MBCbook
|
76b189b1b24303fc49ae748f34807c5253507229
|
71cd7f2313a55239d5b1c6c707308c783481b200
|
refs/heads/master
| 2020-12-22T01:03:39.836908
| 2019-07-02T06:00:03
| 2019-07-02T06:00:03
| 236,623,831
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,155
|
rd
|
wine27.Rd
|
\name{wine27}
\alias{wine27}
\docType{data}
\title{
The (27-dimensional) Italian Wine data set
}
\description{
The (27-dimensional) Italian Wine data set is the result of a chemical analysis of 178 wines grown in the same region in Italy but derived from three different cultivars. The analysis determined the quantities of 27 constituents found in each of the three types of wines.
}
\usage{data("wine27")}
\format{
A data frame with 178 observations on the following 29 variables.
\describe{
\item{\code{Alcohol}}{a numeric vector}
\item{\code{Sugar.free_extract}}{a numeric vector}
\item{\code{Fixed_acidity}}{a numeric vector}
\item{\code{Tartaric_acid}}{a numeric vector}
\item{\code{Malic_acid}}{a numeric vector}
\item{\code{Uronic_acids}}{a numeric vector}
\item{\code{pH}}{a numeric vector}
\item{\code{Ash}}{a numeric vector}
\item{\code{Alcalinity_of_ash}}{a numeric vector}
\item{\code{Potassium}}{a numeric vector}
\item{\code{Calcium}}{a numeric vector}
\item{\code{Magnesium}}{a numeric vector}
\item{\code{Phosphate}}{a numeric vector}
\item{\code{Chloride}}{a numeric vector}
\item{\code{Total_phenols}}{a numeric vector}
\item{\code{Flavanoids}}{a numeric vector}
\item{\code{Nonflavanoid_phenols}}{a numeric vector}
\item{\code{Proanthocyanins}}{a numeric vector}
\item{\code{Color_Intensity}}{a numeric vector}
\item{\code{Hue}}{a numeric vector}
\item{\code{OD280.OD315_of_diluted_wines}}{a numeric vector}
\item{\code{OD280.OD315_of_flavanoids}}{a numeric vector}
\item{\code{Glycerol}}{a numeric vector}
\item{\code{X2.3.butanediol}}{a numeric vector}
\item{\code{Total_nitrogen}}{a numeric vector}
\item{\code{Proline}}{a numeric vector}
\item{\code{Methanol}}{a numeric vector}
\item{\code{Type}}{a factor with levels \code{Barbera}, \code{Barolo}, \code{Grignolino}}
\item{\code{Year}}{a numeric vector}
}
}
\details{
This data set is an expended version of the popular one from the UCI machine learning repository (http://archive.ics.uci.edu/ml/datasets/Wine).
}
\examples{
data(wine27)
}
\keyword{datasets}
|
e0199fb03566d406dd131363af98798813d7b3dc
|
eac21a885ac41794e6ef37f6abdf075e85897ed1
|
/day24.R
|
f88ab3e1c2b200e960b4fce77542afe348a75950
|
[] |
no_license
|
d-sci/Advent-of-Code-2017
|
6872104df76b9b008ef0b15238d6934f410443ca
|
73c8f0d0ca27e4fad2b85215df136882f27c2afc
|
refs/heads/master
| 2021-09-02T14:13:38.027871
| 2018-01-03T04:48:33
| 2018-01-03T04:48:33
| 116,091,965
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,589
|
r
|
day24.R
|
#Day 24
setwd("C:/Users/David.simons/Documents/advent of code")
library(data.table)
chunks <- rbindlist(lapply(strsplit(readLines("day24.txt"), "/"), function(x) as.list(as.numeric(x))))
chunks[, id := 1:nrow(chunks)]
#part 1 ----
#recursively find strengths of all maximally long bridges
strongestBridge <- function(startWith, used) {
possible <- chunks[(V1==startWith | V2==startWith) & !(id %in% used)]
if (nrow(possible)==0) return(chunks[id %in% used, sum(V1) + sum(V2)]) #base case; we've gone as far as possible
strengths <- sapply(seq(nrow(possible)), function(i){
val <- if (possible[i, V1]==startWith) possible[i, V2] else possible[i, V1]
strongestBridge(val, c(used, possible[i,id]))
})
return(max(strengths))
}
#recursion is slow in R but I'm satisfied
print(strongestBridge(0, NULL))
#part 2 ----
#same as part 1, but need to save both strength and length
longestBridge <- function(startWith, used) {
possible <- chunks[(V1==startWith | V2==startWith) & !(id %in% used)]
if (nrow(possible)==0) return(list(len=length(used), strength=chunks[id %in% used, sum(V1) + sum(V2)])) #base case; we've gone as far as possible
bridges <- rbindlist(lapply(seq(nrow(possible)), function(i){
val <- if (possible[i, V1]==startWith) possible[i, V2] else possible[i, V1]
longestBridge(val, c(used, possible[i,id]))
}))
return(list(len=bridges[,max(len)], strength=bridges[len==max(len), max(strength)]))
}
#recursion is slow in R but I'm satisfied
print(longestBridge(0, NULL)$strength)
|
279fa596811688e316c5f3eea1e04fd9dd821e41
|
282acf6c53cceeda154ea8a8f7bd87ef80d0672e
|
/analyze.r
|
fa0cf77e7371ad116e8645b43eb56c06b0e76f84
|
[] |
no_license
|
casras111/thesis
|
0f7729e92cdf30e6c0fd4dbce016f0126efa7142
|
995a062a18527205af926e6d490f926d396a08b1
|
refs/heads/master
| 2021-03-24T13:28:17.109233
| 2018-03-01T11:16:45
| 2018-03-01T11:16:45
| 63,358,019
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,208
|
r
|
analyze.r
|
#analyze previous runs
library(ggplot2)
library(quantmod)
library(reshape2)
library(gridExtra)
library(moments) #for skewness function
startDate <- "1996-1-1"
midDate <- "2005-12-31"
midDate_1 <- "2006-1-1"
endDate <- "2015-12-31"
if (dir.exists("C:/Users/Claudiu/Dropbox")) {
droppath <- "C:/Users/Claudiu/Dropbox" #Dell laptop
} else {
droppath <- "D:/Claudiu/Dropbox" #Home PC
}
#historical use for bootstrap
#load(file="C:/Users/Claudiu/Dropbox/Thesis/Docs/Data/StocksList26092016Daily_5stocks.Rdata")
#load(file="C:/Users/Claudiu/Dropbox/Thesis/Docs/Data/StocksList21092016Bootstrap10000_5stocks.Rdata")
#load(file="C:/Users/Claudiu/Dropbox/Thesis/Docs/Data/StocksList16122016Monthly48stocks.Rdata")
load(file=file.path(droppath,"Thesis/DataWork/StocksList_1_100_CPT_AVAR.Rdata"))
temp <- StocksList
load(file=file.path(droppath,"Thesis/DataWork/StocksList_101_200_CPT_AVAR.Rdata"))
StocksList <- c(temp,StocksList)
temp <- StocksList
load(file=file.path(droppath,"Thesis/DataWork/StocksList_201_400_CPT_AVAR.Rdata"))
StocksList <- c(temp,StocksList)
temp <- StocksList
load(file=file.path(droppath,"Thesis/DataWork/StocksList_401_600_CPT_AVAR.Rdata"))
StocksList <- c(temp,StocksList)
temp <- StocksList
load(file=file.path(droppath,"Thesis/DataWork/StocksList_601_778_CPT_AVAR.Rdata"))
StocksList <- c(temp,StocksList)
stocknames <- names(StocksList)
N <- dim(StocksList[[1]])[1] #number of periods, assumed consistent for all data structures
#constant defining how many months of history to use, 120 for 10y monthly
n_window <- round(N/2)
calc_start <- N-n_window+1
bootcols <- grep("Boot",colnames(StocksList[[1]]))
#vector with names of stocks that have completed runs
completeStocks <- stocknames[!sapply(lapply(StocksList,last),anyNA)]
completeIndx <- (1:length(StocksList))[!sapply(lapply(StocksList,last),anyNA)] #incomplete runs
StocksList <- StocksList[completeIndx]
StocksList <- StocksList[-417] #temp fix for TDS stock with stock split on 16/5/2005
for (i in 1:length(StocksList)) {
#sum of squares of the error for variance risk predict
RMSE1 <- with(StocksList[[i]][calc_start:N],sqrt(mean(((Price-CAPMPrice)/Price)^2)))
RMSE2 <- with(StocksList[[i]][calc_start:N],sqrt(mean(((Price-VarPrice)/Price)^2)))
RMSE3 <- with(StocksList[[i]][calc_start:N],sqrt(mean(((Price-SVarPrice)/Price)^2)))
RMSE4 <- with(StocksList[[i]][calc_start:N],sqrt(mean(((Price-VAR5pctPrice)/Price)^2)))
RMSE5 <- with(StocksList[[i]][calc_start:N],sqrt(mean(((Price-CPTPrice)/Price)^2)))
RMSE6 <- with(StocksList[[i]][calc_start:N],sqrt(mean(((Price-AVARPrice)/Price)^2)))
Err1 <- with(StocksList[[i]][calc_start:N],mean(((Price-VarPrice)/Price)))
Err2 <- with(StocksList[[i]][calc_start:N],mean(((Price-SVarPrice)/Price)))
cat(sprintf("%-4s RMSE: CAPM %.4f, Variance %.4f, Semivariance %.4f,
VAR5pct %.4f, CPT %.4f, AVAR %.4f \n",
stocknames[i],RMSE1,RMSE2,RMSE3,RMSE4,RMSE5,RMSE6))
StocksList[[i]]$RMSE_Var <- RMSE2
StocksList[[i]]$RMSE_SVar <- RMSE3
StocksList[[i]]$RMSE_VAR5Pct <- RMSE4
StocksList[[i]]$RMSE_CPT <- RMSE5
StocksList[[i]]$RMSE_AVAR <- RMSE6
StocksList[[i]]$Err_Var <- Err1
StocksList[[i]]$Err_SVar <- Err2
StocksList[[i]]$skewavg <- mean(StocksList[[i]]$Skew,na.rm=T)
StocksList[[i]]$LogReturn <- ROC(StocksList[[i]]$Price,type="continuous",na.pad=F)
#skewness (cumulative rolling) for n_window back history
StocksList[[i]]$LogSkew <- rollapply(StocksList[[i]]$LogReturn,FUN=skewness,
width=n_window,na.rm=T)
}
#descriptive statistics for 2 periods, average for all stocks for all months
mean_func <- function(x,a,b) {return(mean(coredata(x[paste0(a,"/",b)]$Return),na.rm=T))}
sd_func <- function(x,a,b) {return(sd(coredata(x[paste0(a,"/",b)]$Return),na.rm=T))}
skew_func <- function(x,a,b) {return(skewness(coredata(x[paste0(a,"/",b)]$Return),na.rm=T))}
StocksStat1 <- rbind(mean(sapply(StocksList, mean_func,startDate,midDate)),
mean(sapply(StocksList, sd_func, startDate,midDate)),
mean(sapply(StocksList, skew_func, startDate,midDate)))
StocksStat2 <- rbind(mean(sapply(StocksList, mean_func,midDate_1,endDate)),
mean(sapply(StocksList, sd_func, midDate_1,endDate)),
mean(sapply(StocksList, skew_func,midDate_1,endDate)))
StocksStat <- cbind(StocksStat1,StocksStat2)
row.names(StocksStat) <- c("Mean","Std Dev","Skewness")
colnames(StocksStat) <- c("1995-2005","2006-2015")
print(StocksStat)
#descriptive statistics - histogram for monthly cross-section of stocks returns
statret <- sapply(StocksList,function(x) {return(coredata(x$Return))})
stat_df1 <- data.frame(Dates=index(StocksList[[1]]),
Mean=apply(statret,1,mean))
stat_df1 <- stat_df1[-1,] #remove first NA row
ggplot(stat_df1,aes(Mean))+geom_histogram(binwidth=0.03)
#histograms for std dev and skewness
stat_df2 <- data.frame(StdDev=apply(statret,2,sd,na.rm=T),
Skewness=apply(statret,2,skewness,na.rm=T))
ggplot(stat_df2,aes(StdDev))+geom_histogram(binwidth=0.02)
ggplot(stat_df2,aes(Skewness))+geom_histogram(binwidth=0.3)
#extract vector of RMSE %
RMSE_Var <- sapply(lapply(StocksList,last),function(x) {return(x$RMSE_Var)})
RMSE_SVar <- sapply(lapply(StocksList,last),function(x) {return(x$RMSE_SVar)})
RMSE_VAR5Pct <- sapply(lapply(StocksList,last),function(x) {return(x$RMSE_VAR5Pct)})
RMSE_CPT <- sapply(lapply(StocksList,last),function(x) {return(x$RMSE_CPT)})
RMSE_AVAR <- sapply(lapply(StocksList,last),function(x) {return(x$RMSE_AVAR)})
Err_Var <- sapply(lapply(StocksList,last),function(x) {return(x$Err_Var)})
Err_SVar <- sapply(lapply(StocksList,last),function(x) {return(x$Err_SVar)})
#descriptive statistics in %
RMSE_summary <- 100*rbind(summary(RMSE_Var),summary(RMSE_SVar),summary(RMSE_VAR5Pct),
summary(RMSE_CPT),summary(RMSE_AVAR))
row.names(RMSE_summary)<-c("Variance","Semivariance","VAR","CPT","AVAR")
RMSE_summary
plot.df <- data.frame(stock=names(RMSE_Var),Variance=RMSE_Var,Semivariance=RMSE_SVar,
VaR=RMSE_VAR5Pct,CPT=RMSE_CPT,AVAR=RMSE_AVAR)
plot.df <- melt(plot.df,id="stock",value.name="RMSE",
variable.name="Risk_Measure")
ggplot(plot.df,aes(Risk_Measure,RMSE))+geom_boxplot()
#boxplot(RMSE_Var,RMSE_SVar,RMSE_VAR5Pct,names=c("Variance","Semivariance","VAR"))
skew_last <- sapply(lapply(StocksList,last),function(x) {return(x$Skew)})
skew_first <- sapply(lapply(StocksList,function(x) {return(first(last(x,120)))}),
function(x) {return(x$Skew)})
skew_avg <- 0.5*(skew_last+skew_first)
skew_rollavg <- sapply(lapply(StocksList,last),function(x) {return(x$skewavg)})
skew_log_last <- sapply(lapply(StocksList,last),function(x) {return(x$LogSkew)})
skew_log_first <- sapply(lapply(StocksList,function(x) {return(first(last(x,120)))}),
function(x) {return(x$LogSkew)})
skew_log_avg <- 0.5*(skew_log_last+skew_log_first)
skew_df <- as.data.frame(cbind(skew_last,skew_first,skew_avg,skew_rollavg,skew_log_avg,
RMSE_Var,RMSE_SVar,RMSE_CPT,RMSE_AVAR,Err_Var,Err_SVar))
skew_df$Var_best <- (skew_df$RMSE_Var < skew_df$RMSE_SVar)
#Labeling G1 group with lower variance RMSE and G2 for semivariance
skew_df$Predictor_Group <- ifelse((skew_df$RMSE_Var < skew_df$RMSE_SVar),
"G1","G2")
ggplot(skew_df,aes(Predictor_Group,skew_avg))+geom_boxplot()
ggplot(skew_df,aes(skew_avg))+geom_density(aes(colour=Predictor_Group))
# ggtitle("Stock returns skewness in optimal RMSE groups")
summary(skew_df$skew_avg)
summary(skew_df[skew_df$Predictor_Group=="G1",]$skew_avg)
summary(skew_df[skew_df$Predictor_Group=="G2",]$skew_avg)
table(skew_df$Predictor_Group)
skew_df$positive_skew <- ifelse(skew_df$skew_avg > 0.2,"Large","Small")
with(skew_df,table(Predictor_Group,positive_skew))
skew_df$beta <- sapply(lapply(StocksList,last),function(x) {return(x$Beta)})
table(skew_df$Var_best,skew_df$beta>1.5)
reg1 <- lm((skew_df$RMSE_Var>skew_df$RMSE_SVar) ~ skew_df$skew_first)
summary(reg1)
reg2 <- lm((skew_df$RMSE_Var>skew_df$RMSE_SVar) ~ skew_df$skew_last)
summary(reg2)
reg3 <- lm((skew_df$RMSE_Var>skew_df$RMSE_SVar) ~ skew_df$skew_rollavg)
summary(reg3)
#regression with average of first and last entry in 10y skew as a predictor
reg4 <- lm((skew_df$RMSE_Var>skew_df$RMSE_SVar) ~ skew_df$skew_avg)
summary(reg4)
reg5 <- lm((skew_df$RMSE_Var>skew_df$RMSE_SVar) ~ skew_df$skew_log_avg)
summary(reg5)
logreg <- glm(!skew_df$Var_best ~ skew_df$skew_avg,family=binomial(link="logit"))
summary(logreg)
ggplot(skew_df,aes(Var_best,skew_avg))+geom_boxplot()+
ggtitle("RMSE Variance < RMSE Semivariance as a function of skewness")
# boxplot(skew_df$skew_avg~skew_df$Var_best)
# title("RMSE Variance < RMSE Semivariance as a function of skewness")
#if data contains bootstrap columns show last period stats
if (!is.null(dim(bootcols))) {
for (i in seq_along(stocknames)) {
print(stocknames[i])
print(StocksList[[i]][N,bootcols])
}
}
save(StocksList,file="../DataWork/StocksList_after_Analyze.Rdata")
|
3597803ba44bb4c1d0a0be5d5b1885fd75226047
|
7be028e961329bd28e739e7004e1f42b68181d0d
|
/R/stacf.R
|
4a216b4d0a96003426c6eadcb53155ee32d3e498
|
[
"MIT"
] |
permissive
|
fcheysson/starma
|
a6293d9ac1ced9743d1e456c444ba16db2c36263
|
2e08ba5e122dda721d387e4aead8bf3304140e8a
|
refs/heads/main
| 2023-06-14T21:52:21.318306
| 2021-07-12T09:14:36
| 2021-07-12T09:14:36
| 385,189,347
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,116
|
r
|
stacf.R
|
# The 'stacf' function is defined as, per (Pfeifer & Stuart, 1980), the
# following expression:
# acf(l,0,s) = cov(l,0,s) / sqrt( cov(l,l,0) * cov(0,0,0) )
stacf <- function(data, wlist, tlag.max=NULL, plot=TRUE, use.ggplot=TRUE) {
# If only the weights matrix of first order is specified
if (is.matrix(wlist))
wlist <- list(diag(dim(wlist)[1]), wlist)
# If no tlag.max is specified
if (is.null(tlag.max))
tlag.max <- floor(10 * log10(nrow(data)))
# Call C++ function for optimized computation
if (is.data.frame(data))
out <- stacfCPP(as.matrix(data), wlist, tlag.max)
else
out <- stacfCPP(data, wlist, tlag.max)
colnames(out) <- paste("slag", 0:(length(wlist) - 1))
rownames(out) <- paste("tlag", 1:tlag.max)
# Plot stacf (and still returns the stacf matrix for efficient use)
if (plot) {
stplot(out, 2 / sqrt(nrow(data) * ncol(data)), match.call(), ggplot=use.ggplot)
invisible(out)
}
else
out
}
# A faire :
# - Essayer de separer les fichiers stcov.cpp et stacf.cpp tout en gardant
# la coherence du stacfCPP (qui utilise la fonction stcovCPP definie dans
# stcov.cpp).
|
5610d56094120d95401a7ded30f4d49e9b01da23
|
1f2ed7e0778776371702499954ab1b11d3ad3a4c
|
/man/oly12.Rd
|
8557a9a4c26c879b0eb34223181d43bb7882a7f4
|
[] |
no_license
|
cran/VGAMdata
|
1e3b653b5a9d4921535fb7d2e6d4191aa2d9201a
|
fbbb0beb0bf79fff712d1b994cf51de5cb3b176b
|
refs/heads/master
| 2023-04-07T05:39:02.437835
| 2023-01-11T19:20:02
| 2023-01-11T19:20:02
| 17,694,035
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,481
|
rd
|
oly12.Rd
|
\name{oly12}
\alias{oly12}
\docType{data}
\title{
2012 Summer Olympics: Individuals Data
}
\description{
Individual data for the Summer
2012 Olympic Games.
}
\usage{data(oly12)}
\format{
A data frame with 10384 observations on the following 14 variables.
\describe{
\item{\code{Name}}{The individual competitor's name. }
\item{\code{Country}}{Country. }
\item{\code{Age}}{A numeric vector, age in years. }
\item{\code{Height}}{A numeric vector, height in m. }
\item{\code{Weight}}{A numeric vector, weight in kg. }
\item{\code{Sex}}{A factor with levels \code{F} and \code{M}. }
\item{\code{DOB}}{A Date, date of birth. }
\item{\code{PlaceOB}}{Place of birth. }
\item{\code{Gold}}{Numeric vector,
number of such medals won. }
\item{\code{Silver}}{ Similar to \code{Gold}. }
\item{\code{Bronze}}{ Similar to \code{Gold}. }
\item{\code{Total}}{A numeric vector, total number of medals. }
\item{\code{Sport}}{A factor with levels
\code{Archery},
\code{Athletics},
\code{Athletics},
\code{Triathlon},
\code{Badminton}, etc.
}
\item{\code{Event}}{The sporting event. }
}
}
\details{
This data set represents a very small modification of a
\code{.csv} spreadsheet from the source below.
Height has been converted to meters,
and date of birth is of a \code{"Date"} class
(see \code{\link[base]{as.Date}}).
A few non-ASCII characters have been replaced by some ASCII sequence
(yet to be fixed up properly).
% yettodo: above.
Some competitors share the same name.
Some errors in the data are likely to exist.
%% ~~ If necessary, more details than the __description__ above ~~
}
\source{
Downloaded from
\code{http://www.guardian.co.uk/sport/series/london-2012-olympics-data}
in 2013-03; more recently it has changed to
\url{https://www.theguardian.com/sport/series/london-2012-olympics-data}.
%% ~~ reference to a publication or URL from which the data were obtained ~~
}
%\references{
%% ~~ possibly secondary sources and usages ~~
%}
\examples{
data(oly12)
mtab <- with(oly12, table(Country, Gold))
(mtab <- head(sort(mtab[, "1"] + 2 * mtab[, "2"], decreasing = TRUE), 10))
\dontrun{
barplot(mtab, col = "gold", cex.names = 0.8, names = abbreviate(names(mtab)),
beside = TRUE, main = "2012 Summer Olympic Final Gold Medal Count",
ylab = "Gold medal count", las = 1, sub = "Top 10 countries")
}
}
\keyword{datasets}
|
067d9f0f5bba8f0aeff74a5a7b3c400e903c8b8b
|
88311cfdacc0ada10cfb6e05c35411d3965dc582
|
/solution/2-similarity/part2a/visualise.R
|
972975d94f25000a0a810d4926ff9fd2d81dc2c1
|
[] |
no_license
|
g-eorge/CCPDS-02
|
7aab360f3c77c7c4a77bc047a7d18ee8c6dc1b95
|
6e3095395723f7d679349595d6ed8f098504a1b8
|
refs/heads/master
| 2021-05-27T10:18:52.830892
| 2014-07-01T01:47:09
| 2014-07-01T01:47:09
| 19,005,113
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,579
|
r
|
visualise.R
|
#! /usr/bin/env Rscript
# Dependencies
# install.packages("ggplot2")
# install.packages("reshape")
# Load packages
library(ggplot2)
library(reshape)
# The providers that are least like the others
provider_ids <- c('50195', '390180', '50441')
# Plot colours for the providers
scale_colours <- c()
scale_colours[[provider_ids[1]]] <- '#1AA794'
scale_colours[[provider_ids[2]]] <- '#F5435D'
scale_colours[[provider_ids[3]]] <- '#A532FF'
# Plot shapes for the procedures
scale_shapes <- c()
scale_shapes[[provider_ids[1]]] <- 15
scale_shapes[[provider_ids[2]]] <- 16
scale_shapes[[provider_ids[3]]] <- 17
numcols = 392 # The number of columns the vectorizer produced - 1
cls <- c("character", rep("numeric",numcols))
# Read in the data file
df <- read.csv("vector_providers.txt", header=F, stringsAsFactors=F, colClasses=cls, row.names=1, sep="\t", na.strings="NA")
## Plot number of procedures types each provider carries out (DRG, APC, Total)
counts <- df[,1:2] # Subset the procedure type count columns
colnames(counts) <- c("drg_count", "apc_count")
counts$total_count <- counts$drg_count + counts$apc_count # Compute a total column
# Use a Z scale for easier comparison
scaled_counts <- data.frame(scale(counts, center=T, scale=T))
# Pick out the providers we are interested in
compare_counts <- scaled_counts[provider_ids,]
compare_counts$id <- provider_ids
# Build the plot with a box plot for comparison
p <- ggplot(data = melt(scaled_counts), aes(x=variable, y=value))
p + geom_boxplot(alpha=0.4, size=0.5, color="grey") +
geom_point(aes(colour=provider_ids[1], shape=provider_ids[1]), data=melt(compare_counts[1,], id.vars='id')) +
geom_point(aes(colour=provider_ids[2], shape=provider_ids[2]), data=melt(compare_counts[2,], id.vars='id')) +
geom_point(aes(colour=provider_ids[3], shape=provider_ids[3]), data=melt(compare_counts[3,], id.vars='id')) +
scale_colour_manual(name="Provider", values=scale_colours) + scale_shape_manual(name="Provider", values=scale_shapes) +
xlab("procedure type counts") + ylab("z-score")
# Output the plot to a file
ggsave(file = "exploring/plots/procedure_type_counts.png", width = 11, height = 8, dpi = 300)
## Plot the number of services each provider carries out for each procedure
counts <- df[,seq(3,ncol(df),3)] # Subset the service count column for each procedure
# Use a Z scale for easier comparison
scaled_counts <- data.frame(scale(counts, center=T, scale=T))
# Pick out the providers we are interested in
compare_counts <- scaled_counts[provider_ids,]
compare_counts$id <- provider_ids
# Build the plot
p <- ggplot(data = melt(scaled_counts), aes(x=variable, y=value))
p + geom_point(aes(colour=provider_ids[1], shape=provider_ids[1]), data=melt(compare_counts[1,], id.vars='id')) +
geom_point(aes(colour=provider_ids[2], shape=provider_ids[2]), data=melt(compare_counts[2,], id.vars='id')) +
geom_point(aes(colour=provider_ids[3], shape=provider_ids[3]), data=melt(compare_counts[3,], id.vars='id')) +
scale_colour_manual(name="Provider", values=scale_colours) + scale_shape_manual(name="Provider", values=scale_shapes) +
xlab("procedure service counts") + ylab("z-score") +
theme(axis.text.x = element_blank())
# Output the plot to a file
ggsave(file = "exploring/plots/service_counts.png", width = 11, height = 8, dpi = 300)
## Plot the charges for procedures each provider carries out
counts <- df[,c(seq(4,ncol(df),3))] # Subset the charges columns for each procedure
# Use a Z scale for easier comparison
scaled_counts <- data.frame(scale(counts, center=T, scale=T))
# Pick out the providers we are interested in
compare_counts <- scaled_counts[provider_ids,]
compare_counts$id <- provider_ids
# Build the plot
p <- ggplot(data = melt(scaled_counts), aes(x=variable, y=value))
p + geom_point(aes(colour=provider_ids[1], shape=provider_ids[1]), data=melt(compare_counts[1,], id.vars='id')) +
geom_point(aes(colour=provider_ids[2], shape=provider_ids[2]), data=melt(compare_counts[2,], id.vars='id')) +
geom_point(aes(colour=provider_ids[3], shape=provider_ids[3]), data=melt(compare_counts[3,], id.vars='id')) +
scale_colour_manual(name="Provider", values=scale_colours) + scale_shape_manual(name="Provider", values=scale_shapes) +
xlab("procedure charges") + ylab("z-score") +
theme(axis.text.x = element_blank())
# Output the plot to a file
ggsave(file = "exploring/plots/charges.png", width = 11, height = 8, dpi = 300)
## Plot the payments for procedures each provider carries out
counts <- df[,c(seq(5,ncol(df),3))] # Subset the payments columns for each procedure
# Use a Z scale for easier comparison
scaled_counts <- data.frame(scale(counts, center=T, scale=T))
# Pick out the providers we are interested in
compare_counts <- scaled_counts[provider_ids,]
compare_counts$id <- provider_ids
# Build the plot
p <- ggplot(data = melt(scaled_counts), aes(x=variable, y=value))
p + geom_point(aes(colour=provider_ids[1], shape=provider_ids[1]), data=melt(compare_counts[1,], id.vars='id')) +
geom_point(aes(colour=provider_ids[2], shape=provider_ids[2]), data=melt(compare_counts[2,], id.vars='id')) +
geom_point(aes(colour=provider_ids[3], shape=provider_ids[3]), data=melt(compare_counts[3,], id.vars='id')) +
scale_colour_manual(name="Provider", values=scale_colours) + scale_shape_manual(name="Provider", values=scale_shapes) +
xlab("procedure payments") + ylab("z-score") +
theme(axis.text.x = element_blank())
# Output the plot to a file
ggsave(file = "exploring/plots/payments.png", width = 11, height = 8, dpi = 300)
## Plot everything in one plot
scaled_all <- data.frame(scale(df, center=T, scale=T))
# Pick out the providers we are interested in
compare_counts <- scaled_all[provider_ids,]
compare_counts$id <- provider_ids
# Build the plot
p <- ggplot(data = melt(scaled_all), aes(x=variable, y=value))
p + geom_point(color='#202020', size=1, alpha=0.2) +
geom_point(aes(colour=provider_ids[1], shape=provider_ids[1]), data=melt(compare_counts[1,], id.vars='id')) +
geom_point(aes(colour=provider_ids[2], shape=provider_ids[2]), data=melt(compare_counts[2,], id.vars='id')) +
geom_point(aes(colour=provider_ids[3], shape=provider_ids[3]), data=melt(compare_counts[3,], id.vars='id')) +
scale_colour_manual(name="Provider", values=scale_colours) + scale_shape_manual(name="Provider", values=scale_shapes) +
xlab("procedure counts, service counts, charges and payments") + ylab("z-score") +
theme(axis.text.x = element_blank())
# Output the plot to a file
ggsave(file = "exploring/plots/all.png", width = 11, height = 8, dpi = 300)
|
a2efd9abac3869e0aa56c7d99237242abb717e80
|
93a3ca0d2105970d92aba8ae04a7638d6938101c
|
/man/gt_get_data.Rd
|
14cff02b92e659ccca5435f0ef7426c7c5d623b3
|
[
"MIT"
] |
permissive
|
geysertimes/geysertimes-r-package
|
6482a9b72299497d0ad154ec0713e404adae0b63
|
933b2465337555092f06f7cd8b12413d47bfaa89
|
refs/heads/master
| 2022-06-18T04:48:53.824869
| 2022-06-12T02:16:07
| 2022-06-12T02:16:07
| 169,121,861
| 2
| 4
| null | 2020-07-26T18:02:03
| 2019-02-04T17:53:23
|
R
|
UTF-8
|
R
| false
| false
| 1,827
|
rd
|
gt_get_data.Rd
|
\name{gt_get_data}
\alias{gt_get_data}
\title{
Download GeyserTimes Data
}
\description{
Downloads the data from geysertimes.org.
Reads the data and creates a tibble object in `dest_folder`.
}
\usage{
gt_get_data(dest_folder = file.path(tempdir(), "geysertimes"),
overwrite = FALSE, quiet = FALSE, version = lubridate::today())
}
\arguments{
\item{dest_folder}{
the location where the binary tibble object should be written.
The default is under the current R session's temp directory
which will disappear when the session ends.
}
\item{overwrite}{
a logical value,
if\code{FALSE}, the data will not be downloaded again if copy of the data,
with \code{version}, already exists in \code{dest_folder}.
}
\item{quiet}{
a logical value, if \code{TRUE}, no messages are displayed.
}
\item{version}{
a character string giving the version of the data to download.
This should a date in the form \code{yyyy-mm-dd}.
Typically, only the version with today's date is available.
}
}
\details{
The data is downloaded from the GeyserTimes archive web site
\url{https://geysertimes.org/archive/} to the \code{tempdir()} directory.
The data is then read with \code{readr::read_tsv} with appropriate
column types.
The resulting \code{tibble} object is then saved as an binary (\code{.rds})
in \code{dest_folder}.
}
\value{
a character string giving the full path to the directory where
the GeyserTimes data was stored.
}
\author{
Stephen Kaluzny <spkaluzny@gmail.com>.
}
\note{
Users are encouraged to set \code{dest_folder} to \code{gt_path()} to save
a persistent copy of the data.
}
\seealso{
gt_load_eruptions, gt_load_geysers.
}
\examples{
\donttest{
dpath0 <- gt_get_data() # data saved under tempdir()
dpath1 <- gt_get_data(dest=gt_path()) # data saved under gt_path()
gt_cleanup_data(gt_version())
}
}
\keyword{geysertimes}
|
932436766d396791fa5efa04775006eb3f7bc586
|
771706de90263db2375687df55677276af8dcb57
|
/Assignment 8.R
|
daca64437110a38ee96f3d49b2a45f02c8515cde
|
[] |
no_license
|
dduwill/Product-Review
|
f65fcdf62bc7ecdb5f078a8e509e0093e90e6915
|
91db629ae04414ea6ecab3ca62e82c6c77a7bb2f
|
refs/heads/master
| 2020-04-15T04:37:35.378919
| 2016-11-15T22:02:57
| 2016-11-15T22:02:57
| 73,781,543
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,255
|
r
|
Assignment 8.R
|
library(rjson)
library(dplyr)
require(magrittr)
library(quanteda)
library(stm)
library(tm)
library(NLP)
library(openNLP)
library(ggplot2)
library(ggdendro)
library(cluster)
library(fpc)
#read json file
setwd("C:/Users/weiyi/Desktop/R/Assignment 8")
path <- "Automotive_5.json"
data <- fromJSON(sprintf("[%s]", paste(readLines(path),collapse=",")))
review <-sapply(data, function(x) x[[5]])
#Generate DFM
help(corpus)
corpus <- corpus(review)
corpus <- toLower(corpus, keepAcronyms = FALSE)
cleancorpus <- tokenize(corpus,
removeNumbers=TRUE,
removePunct = TRUE,
removeSeparators=TRUE,
removeTwitter=FALSE,
verbose=TRUE)
dfm <- dfm(cleancorpus,
toLower = TRUE,
ignoredFeatures =stopwords("SMART"),
verbose=TRUE,
stem=TRUE)
# Reviewing top features
topfeatures(dfm, 50) # displays 50 features
#Cleaning corpus
stop_words <- stopwords("SMART")
## additional junk words showing up in the data
stop_words <- c(stop_words, "just", "get", "will", "can", "also", "much","need")
stop_words <- tolower(stop_words)
cleancorpus <- gsub("'", "", cleancorpus) # remove apostrophes
cleancorpus <- gsub("[[:punct:]]", " ", cleancorpus) # replace punctuation with space
cleancorpus <- gsub("[[:cntrl:]]", " ", cleancorpus) # replace control characters with space
cleancorpus <- gsub("^[[:space:]]+", "", cleancorpus) # remove whitespace at beginning of documents
cleancorpus <- gsub("[[:space:]]+$", "", cleancorpus) # remove whitespace at end of documents
cleancorpus <- gsub("[^a-zA-Z -]", " ", cleancorpus) # allows only letters
cleancorpus <- tolower(cleancorpus) # force to lowercase
## get rid of blank docs
cleancorpus <- cleancorpus[cleancorpus != ""]
# tokenize on space and output as a list:
doc.list <- strsplit(cleancorpus, "[[:space:]]+")
# compute the table of terms:
term.table <- table(unlist(doc.list))
term.table <- sort(term.table, decreasing = TRUE)
# remove terms that are stop words or occur fewer than 5 times:
del <- names(term.table) %in% stop_words | term.table < 5
term.table <- term.table[!del]
term.table <- term.table[names(term.table) != ""]
vocab <- names(term.table)
# now put the documents into the format required by the lda package:
get.terms <- function(x) {
index <- match(x, vocab)
index <- index[!is.na(index)]
rbind(as.integer(index - 1), as.integer(rep(1, length(index))))
}
documents <- lapply(doc.list, get.terms)
# Compute some statistics related to the data set:
D <- length(documents) # number of documents (1)
W <- length(vocab) # number of terms in the vocab (8941L)
doc.length <- sapply(documents, function(x) sum(x[2, ])) # number of tokens per document [46, 27, 106 ...]
N <- sum(doc.length) # total number of tokens in the data (863558L)
term.frequency <- as.integer(term.table)
# MCMC and model tuning parameters:
K <- 10
G <- 3000
alpha <- 0.02
eta <- 0.02
# Fit the model:
library(lda)
set.seed(357)
t1 <- Sys.time()
fit <- lda.collapsed.gibbs.sampler(documents = documents, K = K, vocab = vocab,
num.iterations = G, alpha = alpha,
eta = eta, initial = NULL, burnin = 0,
compute.log.likelihood = TRUE)
t2 <- Sys.time()
## display runtime
t2 - t1
theta <- t(apply(fit$document_sums + alpha, 2, function(x) x/sum(x)))
phi <- t(apply(t(fit$topics) + eta, 2, function(x) x/sum(x)))
reviews.LDA <- list(phi = phi,
theta = theta,
doc.length = doc.length,
vocab = vocab,
term.frequency = term.frequency)
library(LDAvis)
library(servr)
# create the JSON object to feed the visualization:
json <- createJSON(phi = reviews.LDA$phi,
theta = reviews.LDA$theta,
doc.length = reviews.LDA$doc.length,
vocab = reviews.LDA$vocab,
term.frequency = reviews.LDA$term.frequency)
serVis(json, out.dir = 'vis', open.browser = TRUE)
|
2ea23c5ba92494e9669eae718b8d556be44b30aa
|
3bb85139690fe4f6c4575f1ca12aac3cccc758ea
|
/cachematrix.R
|
b8e67e68e3304032fb146fe193b46b90900dbb8f
|
[] |
no_license
|
IZLID-LSSO/ProgrammingAssignment2
|
ed3a9fa5c5fc5cf846d869ae832be172702e108c
|
428b985932e540fe4bc923a7e12f0f20ed91f53b
|
refs/heads/master
| 2020-05-30T18:52:26.790726
| 2019-06-03T01:53:33
| 2019-06-03T01:53:33
| 189,909,119
| 0
| 0
| null | 2019-06-03T00:25:06
| 2019-06-03T00:25:06
| null |
UTF-8
|
R
| false
| false
| 928
|
r
|
cachematrix.R
|
## Two functions that compute and cashe the inverse of a matrix.
## This function creates a "matrix" that can store (cache) its calculated inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(solveMatrix) inv <<- solveMatrix
getInverse <- function() inv
list(
set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse
)
}
## This function computes the inverse of the above function"getInverse", additionally
##if the value is not computed, a message is displayed while the cached data is returned.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if(!is.null (inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setInverse(inv)
inv
}
## End of Assignment
|
f29c2dd28757f3762f9451c10963aa33c68c9867
|
6a0a368b7509afbc729304fc0073b0b940b43e8f
|
/cachematrix.R
|
2e08ccea84129d89fccbc8f0d461fb5a2529468b
|
[] |
no_license
|
ong625/ProgrammingAssignment2
|
d90bc676b9425df7181f7292b92b219219349f1f
|
f53cc747c49ea766affa975dc7358cb0beb0fd86
|
refs/heads/master
| 2022-11-27T07:58:30.358493
| 2020-08-03T04:04:48
| 2020-08-03T04:04:48
| 284,584,988
| 0
| 0
| null | 2020-08-03T02:31:45
| 2020-08-03T02:31:44
| null |
UTF-8
|
R
| false
| false
| 665
|
r
|
cachematrix.R
|
## The code helps to invert matrices number input
makeCacheMatrix <- function(x = matrix()) {
r <- NULL
set <- function(y){
x <<- y
r<<- NULL
}
get <- function()x
setInverse <- function(inverse) r <<- inverse
getInverse <- function() r
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Get the mean from the cache tp calculate the final mean
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
r<- x$getInverse()
if(!is.null(r)){
message("getting cached data")
return(r)
}
mat <- x$get()
r <- solve(mat,...)
x$setInverse(r)
j
}
|
b9eec90cd116558e71c13bf32b0a83f7b42b91d6
|
df5885ac73301c7050b373d1d3d9f89991e9dbcc
|
/Figure-11.R
|
0f800a98b77a89475d90c65db172107c977d3287
|
[] |
no_license
|
Fabbiologia/BluePaper-10_Supplementary_informations
|
2af3103ea9b6d61becbe7db03b4939d1263169ef
|
e52a5f985c5df74297c09e937683eab68d2c9d9f
|
refs/heads/master
| 2020-12-15T03:08:23.841603
| 2020-04-14T16:41:36
| 2020-04-14T16:41:36
| 234,975,349
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,897
|
r
|
Figure-11.R
|
library(tidyverse)
library(patchwork)
library(RCurl)
### Data loading and wrangling ----
toplot <- read.csv(text = getURL('https://raw.githubusercontent.com/Fabbiologia/BluePaper-10_Supplementary_informations/master/data/HabitatProtectedDataset.csv')) %>%
filter(Cat %in% c('Total','mpa_all', 'mpa_all_m', 'mpa_all_nt')) %>%
pivot_wider(names_from = Cat, values_from = Pixel_count) %>%
replace(is.na(.), 0) %>%
mutate_at(vars(mpa_all:mpa_all_nt), list(~(./Total)*100)) %>%
filter(Total > 0) %>% #This filter out absent habitats
group_by(Habitat) %>%
mutate(mean_all = mean(mpa_all), median_all = median(mpa_all),
mean_m = mean(mpa_all_m), median_m = median(mpa_all_m),
mean_nt = mean(mpa_all_nt), median_nt = median(mpa_all_nt)) %>%
ungroup() %>%
mutate(Habitat = factor(.$Habitat,
labels=c("Cold Corals",
"Coral Reefs",
"Estuaries",
"Kelp",
"Mangroves",
"Ridges",
"Saltmarshes",
"Seagrasses",
"Seamounts and Guyots",
"Shelf and Canyons",
"Trenches",
"Hydrothermal vents")))
toplot$Habitat <-
factor(toplot$Habitat, levels = c(
"Estuaries",
"Mangroves",
"Saltmarshes",
"Seagrasses",
"Coral Reefs",
"Kelp",
"Shelf and Canyons",
"Cold Corals",
"Seamounts and Guyots",
"Trenches",
"Hydrothermal vents",
"Ridges"
))
toplot
### Data plotting -------
# day/night colours
night_colour <- c("aquamarine")
day_colour <- c("darkblue")
source("GeneratedGradientData.R")
# generate data for a one-hour sunrise gradient
sunrise_pd <- GenerateGradientData(start_hour = 0,
stop_hour = 13,
start_colour = night_colour,
stop_colour = day_colour,
x_resolution = 1000)
p1 <- ggplot(toplot, aes(x = Habitat, y = mpa_all, col=as.integer(Habitat), group=Habitat)) +
geom_rect(xmin=0, xmax=13, ymin=-Inf, ymax=Inf, fill=day_colour)+
# gradient backgrounds for sunrise and sunset
geom_rect(data = sunrise_pd,
mapping = aes(xmax = xmax,
xmin = xmin,
ymax = ymax,
ymin = ymin),
fill = sunrise_pd$grad_colours,
inherit.aes = FALSE) +
geom_jitter(size = 2, alpha = 0.5, width = 0.2, col="black", fill="white")+
geom_hline(yintercept = 30, col="white", size=0.9)+
geom_segment(aes(x = Habitat, xend = Habitat,
y = mean_all, yend = median_all), size = 0.1, col="white")+
geom_point(aes(y= mean_all), size = 3, pch=21, fill="blue") +
geom_point(aes(y= median_all), size = 3, pch=21, fill="red") +
labs(x = NULL, y = "% area within MPA in the EEZ") +
ylim(0,100)+
theme(legend.position = "none",
panel.background = element_blank(),
panel.grid = element_blank(),
axis.text.x = element_text(angle=90))
p1
### uncomment to save ------
ggsave('figs/Figure_11.pdf')
ggsave('figs/Figure_11.tiff')
ggsave('figs/Figure_11.png', dpi = 300)
# END OF SCRIPT ------
|
5835f8915957577b0b75aaace7a25a178f36f0e5
|
2a1a58c97642e4b4e568a18ad76dc6fbf246a125
|
/R/impreciseImputation.R
|
25163c12c9f837cf61815b59972ca2bbc17f5204
|
[] |
no_license
|
cran/impimp
|
1657934ffa51d9e8afd65d3e5f9d1c961fc863f0
|
976176f80b808f5b7c0e88830fd49c5c4fa7295f
|
refs/heads/master
| 2020-03-30T08:04:13.559483
| 2019-02-03T17:43:16
| 2019-02-03T17:43:16
| 150,986,631
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,690
|
r
|
impreciseImputation.R
|
# Copyright (C) 2018 Paul Fink, Eva Endres
#
# This file is part of impimp.
#
# imptree is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# imptree is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with imptree. If not, see <https://www.gnu.org/licenses/>.
#' @title Imprecise Imputation for Statistical Matching
#'
#' @description Impute a data frame imprecisely
#'
#' @param recipient a data.frame acting as recipient; see details.
#' @param donor a data.frame acting as donor; see details.
#' @param method 1-character string of the desired imputation method.
#' The following values are possible, see details for an explanantion:
#' \code{"variable_wise"} (default), \code{"case_wise"} and
#' \code{"domain"}.
#' @param matchvars a character vector containing the variable names
#' to be used as matching variables. If \code{NULL} (default) all
#' variables, present in both \code{donor} and \code{recipient} are
#' used as matching variables.
#' @param vardomains a named list containing the possible values of
#' all variable in \code{donor} that are not present in
#' \code{recipient}.\cr
#' If set to \code{NULL} (default) the list is generated by first
#' coercing all those variables to type \code{\link[base]{factor}}
#' and then storing their levels.
#'
#' @details
#' As in the context of statistical matching the data.frames
#' \code{recipient} and \code{donor} are assumed to contain an
#' overlapping set of variables.
#'
#' The missing values in \code{recipient} are subsituted with
#' observed values in \code{donor} for approaches based on donation
#' classes and otherwise with the set of all possible values for
#' the variable in question.
#'
#' For \code{method = "domain"} a missing value of a variable in
#' \code{recipient} is imputed by the set of all possible values
#' of that variable.
#'
#' The other methods are based on donation classes which are formed
#' based on the matching variables whose names are provided by
#' \code{matchvars}. They need to be present in both \code{recipient}
#' and \code{donor}:
#' For \code{method = "variable_wise"} a missing value of a variable
#' in \code{recipient} is imputed by the set of all observed values
#' of that variable in \code{donor}.
#' For \code{method = "case_wise"} the variables only present in
#' \code{donor} are represented as tuples. A missing tuple in
#' \code{recipient} is then imputed by the set of all observed
#' tuples in \code{donor}.
#'
#' @section Reserved characters:
#' The variable names and observations in \code{recipient} and
#' \code{donor} must not contain characters that are reserved for
#' internal purpose.
#' The actual characters that are internally used are stored in the
#' options \code{options("impimp.obssep")} and
#' \code{options("impimp.varssep")}. The former is used to separate
#' the values of a set-valued observation, while the other is used
#' for a concise tupel representation.
#'
#' @note
#' This method does not require that all variables in \code{recipient}
#' and \code{donor} are \code{\link[base]{factor}} variables, however,
#' the imputation methods apply coercion to factor, so purely
#' numerical variables will be treated as factors eventually.
#' It does assume (and test for it) that there are no missing
#' values present in the matching variables.
#'
#' @return
#' The data.frame resulting in an imprecise imputation
#' of \code{donor} into \code{recipient}.
#' It is also of class \code{"impimp"} and stores the imputation
#' method in its attribute \code{"impmethod"}, the names of the
#' variables of the resulting object containing imputed values
#' in the attribute \code{"imputedvarnames"}, as well as the
#' list of (guessed) levels of each underlying variable in
#' \code{"varlevels"}.
#'
#' @keywords robust datagen
#'
#' @seealso for the estimation of probabilities \code{\link{impest}}
#' and \code{\link{impestcond}}; \code{\link{rbindimpimp}} for
#' joining two \code{impimp} objects
#'
#' @references Endres, E., Fink, P. and Augustin, T. (2018),
#' Imprecise Imputation: A Nonparametric Micro Approach Reflecting
#' the Natural Uncertainty of Statistical Matching with Categorical
#' Data, \emph{Department of Statistics (LMU Munich): Technical Reports},
#' No. 214. URL \url{https://epub.ub.uni-muenchen.de/42423/}.
#'
#' @examples
#' A <- data.frame(x1 = c(1,0), x2 = c(0,0),
#' y1 = c(1,0), y2 = c(2,2))
#' B <- data.frame(x1 = c(1,1,0), x2 = c(0,0,0),
#' z1 = c(0,1,1), z2 = c(0,1,2))
#' impimp(A, B, method = "variable_wise")
#'
#' ## Specifically setting the possible levels of 'z1'
#' impimp(A, B, method = "domain", vardomains = list(z1 = c(0:5)))
#'
#' @importFrom stats setNames
#' @export
impimp <- function(recipient, donor, method = c("variable_wise",
"case_wise",
"domain"),
matchvars = NULL, vardomains = NULL) {
# Check the environment
varsep <- getOption("impimp.varsep", ",")
obssep <- getOption("impimp.obssep", "|")
if(varsep == obssep) {
stop(gettextf("option values %s and %s need to be different characters",
sQuote("impimp.varsep"), sQuote("impimp.obssep"),
domain = "R-impimp"))
}
# temporarily set stringsAsFactors to FALSE
# and reset it to old value after exiting
oldsAF <- options(stringsAsFactors = FALSE)
on.exit(options(oldsAF))
# function argument matching
method <- match.arg(method)
# extract common variables
cnames <- intersect(names(donor), names(recipient))
# Test if there is an non-empty intersection in the names
if(!length(cnames)) {
stop(gettextf("%s and %s do not contain any variables present in both",
sQuote("recipient"), sQuote("donor"),
domain = "R-impimp"))
}
if(is.null(matchvars)) {
matchvars <- cnames
} else if(is.character(matchvars)){
if(any(nm <- (match(matchvars, cnames, nomatch = 0L) == 0L))) {
stop(gettextf("%s contains variable(s) which are not present in both %s and %s: %s",
sQuote("matchvars"), sQuote("donor"),
sQuote("recipient"),
paste(sapply(matchvars[nm], dQuote), collapse = ", "),
domain = "R-impimp"))
}
} else {
stop(gettextf("%s must be NULL or a character vector",
sQuote("matchvars"),
domain = "R-impimp"))
}
# Test if the matching variables do not contain NA
lapply(matchvars, function(x) {
if(anyNA(recipient[ ,x])) {
stop(gettextf("missing values in variable %s in %s",
sQuote(x), sQuote("recipient"),
domain = "R-impimp"))
}
if(anyNA(donor[ ,x])) {
stop(gettextf("missing values in variable %s in %s",
sQuote(x), sQuote("donor"),
domain = "R-impimp"))
}
})
rnames <- setdiff(names(recipient), names(donor))
dnames <- setdiff(names(donor), names(recipient))
allnames <- c(rnames, cnames, dnames)
# check for special package-reserved characters in variable names
if(length(grep(varsep, allnames, fixed = TRUE))) {
stop(gettextf(c("some variable names contain the character %s, reserved for internal purpose.",
"\nRename the variable(s) or change the internal character by setting the option %s"),
c(sQuote(varsep), sQuote("impimp.varsep")),
domain = "R-impimp"))
}
# Do nothing if there are no variables in donor that aren't in recipient
if(!length(dnames)) {
warning(gettextf(c("no variable present only in %s and not in %s; ",
"returning %s unmodified"),
c(sQuote("donor"), sQuote("recipient")),
sQuote("recipient"),
domain = "R-impimp"))
return(recipient)
}
# Construct the possible values for the variables from the
# (partially) supplied argument 'vardomains'
if(!is.null(vardomains)){
# partially match available levels
lvls <- vardomains[allnames]
} else {
# else generate a list of empty ones
lvls <- vector(length = length(allnames), mode = "list")
}
# generate the potentially missing levels
# by using the factor based approach
lvls <- lapply(stats::setNames(nm = allnames),
function(varname) {
varlevels <- lvls[[varname]]
# if variable is not present in one df,
# then NULL is returned for that df
gvarlevels <- gather_levels(c(as.character(recipient[[varname]]),
as.character(donor[[varname]])))
if(is.null(varlevels)) {
varlevels <- gvarlevels
} else if(length(lvldiff <- setdiff(gvarlevels,
varlevels))) {
varlevels <- c(lvldiff, varlevels)
}
varlevels
})
# check for special package-reserved characters in variable values
lapply(names(lvls), function(x) {
if(length(grep(varsep, lvls[[x]], fixed = TRUE))) {
stop(gettextf(c("variable %s contains the character %s, reserved for internal purpose.",
"\nChange the internal character by setting the option %s"),
c(sQuote(x), sQuote("impimp.varsep")),
sQuote(varsep), domain = "R-impimp"))
}
if(length(grep(obssep, lvls[[x]], fixed = TRUE))) {
stop(gettextf(c("variable %s contains the character %s, reserved for internal purpose.",
"\nChange the internal character by setting the option %s"),
c(sQuote(x), sQuote("impimp.obssep")),
sQuote(obssep), domain = "R-impimp"))
}
})
dlvls <- lvls[dnames]
# impute the domain for every missing cell
if(method == "domain") {
# add columns with NA to the data
impRecipient <- cbind(recipient, matrix(NA, ncol = length(dnames),
nrow = nrow(recipient),
dimnames = list(c(), dnames)))
# impute all the levels
impRecipient[, dnames] <- imputation_values(dlvls, dnames)
} else {
# impute cell-wise within donor classes
# create new variable to index the x structure
# This is for donation classes
recipient$cfactor <- factor(apply(recipient[, matchvars], MARGIN = 1,
FUN = paste, collapse =","))
donor$cfactor <- factor(apply(donor[, matchvars], MARGIN = 1,
FUN = paste, collapse =","))
## transform into tuple notation for method == case_wise
if(method == "case_wise") {
donor <- cbind(donor, collapse_variables(donor, dnames))
dlvls <- collapse_variables(
do.call("expand.grid", dlvls), dnames)
dnames <- names(dlvls)
}
# extract level combinations of common variables
# which are present in recipient
clvls <- levels(recipient$cfactor)
# initialize the resulting data.frame with NAs
# in the variable sto be imputed
impRecipient <- cbind(recipient, matrix(NA, ncol = length(dnames),
nrow = nrow(recipient),
dimnames = list(c(), dnames)))
for(clvl in clvls) {
## Generate the levels to impute
donorclass_donor <- donor[donor$cfactor == clvl, ]
if(NROW(donorclass_donor) == 0) {
# empty donor class in donor, use collection
# of levels of the variables
##### Shall we leave this a warning or a message instead?
##### We can also opt for a 'verbose' option
warning(gettextf("No donor found for donation class: %s",
sQuote(clvl),
domain = "R-impimp"))
donor_dlvls <- dlvls
} else {
# extract observed donor values
donor_dlvls <- lapply(stats::setNames(nm = dnames),
function(x) {
gather_levels(donorclass_donor[,x])
})
}
# impute observed donor values
impRecipient[impRecipient$cfactor == clvl, dnames] <-
imputation_values(donor_dlvls, dnames)
}
}
impRecipient <- impRecipient[, c(rnames, cnames, dnames)]
if(!length(grep(class(impRecipient), "impimp", fixed = TRUE))) {
class(impRecipient) <- c("impimp", class(impRecipient))
}
attr(impRecipient, "impmethod") <-
c(method, attr(impRecipient, "impmethod"))
attr(impRecipient, "varlevels") <- lvls
attr(impRecipient, "imputedvarnames") <- dnames
impRecipient
}
#' @rdname impimp
#' @param x object of class 'impimp'
#' @param ... further arguments passed down to
#' \code{\link[base]{print.data.frame}}
#' @export
print.impimp <- function(x, ...) {
cat(gettextf("result of imprecise imputation with method %s\n",
sQuote(attr(x, "impmethod")),
domain ="R-impimp"))
NextMethod(x, ...)
}
#' @rdname impimp
#' @param z object to test for class \code{"impimp"}
#' @export
is.impimp <- function(z) {
inherits(x = z, what = "impimp")
}
|
11144f365afc3f8a8107e52a7f64f02db70f8453
|
f6a5600cd0c8cad6699710049c4edff1aa1934e4
|
/code/prioritizr_frontier.r
|
ce0b63cf3e8fa6e7d20f4c720e48c5668f58db01
|
[
"MIT"
] |
permissive
|
pinskylab/ClimateAndMSP
|
04f251d6cf2bc16f34265d6f38cc3b580e886af8
|
e9a9d693e045c8318d13f37ace33036407430c8a
|
refs/heads/master
| 2023-03-02T15:55:47.637873
| 2023-02-21T19:25:56
| 2023-02-21T19:25:56
| 30,681,601
| 0
| 1
| null | 2020-08-19T14:25:31
| 2015-02-12T02:36:09
|
R
|
UTF-8
|
R
| false
| false
| 24,203
|
r
|
prioritizr_frontier.r
|
# Set up and run Prioritizr with zones to simulate CMSP
# Fixed budget across a range of weight present vs. future to get an efficiency frontier
# set up to source from within R 3.5.3: source('code/5.1_prioritizr.r')
# May need to set R_MAX_VSIZE=60000000 or larger in .Renviron to avoid hitting memory limits (Sys.getenv('R_MAX_VSIZE') to query)
#############
## Parameters
#############
# choose the rcps
# will use both for first planning period
# will use only the second for the second planning period
rcps <- c(26, 85)
# choose the climate models to use for future planning
#bcc-csm1-1-m, bcc-csm1-1, CanESM2, CCSM4, CESM1-CAM5, CNRM-CM5, GFDL-CM3, GFDL-ESM2M, GFDL-ESM2G, GISS-E2-R, GISS-E2-H, IPSL-CM5A-LR, IPSL-CM5A-MR, MIROC-ESM, MPI-ESM-LR, NorESM1-ME
gcminds <- c(1, 2, 3, 4, 8, 9, 10, 14) # from running sample(1:16, 8)
# CMSP goals
consgoal <- 0.1 # proportion of presences to capture in conservation
energygoal <- 0.2 # proportion of NPV
fishgoal <- 0.5 # proportion of biomass
cost <- 0.01 # basic cost of including each planning unit in a given zone
# oceans to read in
oceans <- c('Atl', 'Pac')
# choose region and name these runs
myregs <- c('ebs', 'goa', 'bc', 'wc', 'gmex', 'seus', 'neus', 'maritime', 'newf')
# which time periods to use in the multi-period planning
# contemporary time period must be in first slot, second time period must be the future
planningperiods <- c('2007-2020', '2081-2100')
# how many budget levels to examine
nbudget <- 2
minbudget <- 0.75
maxbudget <- 0.90
# how many weights to examine (linear scale)
nweight <- 91
minweight <- 0
maxweight <- 100
# set output name
outname <- paste0('temp/frontierall_', format(Sys.time(), "%Y-%m-%d_%H%M%S"), '.csv')
# optimality gap, number of threads, and time limit for gurobi solver
gap <- 0.01
nthread <- 2
timelimit <- 1200 # seconds
######################
# Functions
######################
require(data.table)
library(prioritizr) # only runs in R 3.5.3 for now (Gurobi 8.1.1)
#####################
## Load data
#####################
# loads presence/absence and biomass data
if(!(length(rcps) %in% c(1,2))){
stop('rcp must be length 1 or 2')
}
for (i in 1:length(rcps)){
print(paste0('Loading rcp', rcps[i]))
for(j in 1:length(oceans)){
for(k in 1:length(planningperiods)){
# do both RCPs for first planning period. Do only 2nd rcp for 2nd planning period.
if(k == 1 | (k == 2 & i == 2)){
print(paste(oceans[j], planningperiods[k]))
prestemp <- fread(cmd = paste0('gunzip -c temp/presmap_', oceans[j], '_rcp', rcps[i], '_', planningperiods[k], '.csv.gz'), drop = 1)
biotemp <- fread(cmd = paste0('gunzip -c temp/biomassmap_', oceans[j], '_rcp', rcps[i], '_', planningperiods[k], '.csv.gz'), drop = 1)
# calculate ensemble mean across training GCMs and remaining RCPs
prestemp <- prestemp[model %in% c(1:16)[gcminds], .(poccur = mean(poccur)), by = c('latgrid', 'longrid', 'year_range', 'rcp', 'spp')]
biotemp <- biotemp[model %in% c(1:16)[gcminds], .(biomass = mean(biomass)), by = c('latgrid', 'longrid', 'year_range', 'rcp', 'spp')]
if(i == 1 & j == 1 & k == 1){
presmap <- prestemp
biomassmap <- biotemp
} else {
presmap <- rbind(presmap, prestemp)
biomassmap <- rbind(biomassmap, biotemp)
}
}
}
}
}
rm(prestemp, biotemp)
# average across the rcps
presmap <- presmap[, .(poccur = mean(poccur)), by = c('latgrid', 'longrid', 'year_range', 'spp')]
biomassmap <- biomassmap[, .(biomass = mean(biomass)), by = c('latgrid', 'longrid', 'year_range', 'spp')]
# poccur threshold: how high does the probability of occurrence in the projections need to be to consider the species "present"?
# use the thresholds calculated during model fitting from Morley et al. 2018 PLOS ONE
poccurthresh <- fread('https://raw.githubusercontent.com/pinskylab/project_velocity/master/output/modeldiag_Nov2017_fitallreg_2017.csv', drop = 1)[, .(sppocean, thresh.kappa)]
# load NatCap calculations
windnpv <- fread(cmd = 'gunzip -c output/wind_npv.csv.gz', drop = 1)
wavenpv <- fread(cmd = 'gunzip -c output/wave_npv.csv.gz', drop = 1)
setnames(windnpv, c('lat', 'lon', 'npv'), c('latgrid', 'longrid', 'wind_npv'))
setnames(wavenpv, c('lat', 'lon', 'npv'), c('latgrid', 'longrid', 'wave_npv'))
# definition of fishery species by region
fisheryspps <- fread('output/fishery_spps.csv', drop = 1) # which spp to include in fishery goal in each region
# region definitions
regiongrid <- fread(cmd = 'gunzip -c output/region_grid.csv.gz', drop = 1)
################################
## Set up data for any region
################################
# Fix lon in regiongrid to match presmap (-360 to 0)
regiongrid[longrid > 0, longrid := longrid - 360]
# Add region information to presmap
setkey(presmap, latgrid, longrid)
setkey(regiongrid, latgrid, longrid)
presmap <- merge(presmap, regiongrid[, .(latgrid, longrid, region)], all.x = TRUE) # add region information
if(presmap[is.na(region) & !duplicated(presmap[,.(latgrid, longrid)]), .N] != 0){ # 0 missing region: good!
stop('presmap is missing >0 regions')
}
# presmap[is.na(region) & !duplicated(presmap[,.(latgrid, longrid)]), ]
# presmap[is.na(region) & !duplicated(presmap[,.(latgrid, longrid)]), plot(longrid, latgrid)]
# Add region information to biomassmap
setkey(biomassmap, latgrid, longrid)
setkey(regiongrid, latgrid, longrid)
biomassmap <- merge(biomassmap, regiongrid[, .(latgrid, longrid, region)], all.x = TRUE) # add region information
if(biomassmap[is.na(region) & !duplicated(biomassmap[,.(latgrid, longrid)]), .N] != 0){ # 0 missing region: good!
stop('biomassmap is missing >0 regions')
}
# Add poccur threshold to presmap
poccurthresh[, ocean := gsub('.*_', '', sppocean)]
poccurthresh[, spp := gsub('_Atl|_Pac', '', sppocean)]
presmapPac <- merge(presmap[region %in% c('ebs', 'goa', 'bc', 'wc'), ], poccurthresh[ocean == 'Pac', .(spp, thresh.kappa)], by = 'spp') # have to do Atl and Pac separately since some species are in both regions but use different models
presmapAtl <- merge(presmap[region %in% c('gmex', 'seus', 'neus', 'maritime', 'newf'), ], poccurthresh[ocean == 'Atl', .(spp, thresh.kappa)], by = 'spp')
if(nrow(presmap) == nrow(presmapPac) + nrow(presmapAtl)){
presmap <- rbind(presmapPac, presmapAtl)
rm(presmapPac, presmapAtl)
} else {
stop('merge of poccurthesh and presmap did not work')
}
# Fix a species name
# ALSO DORYTEUTHIS/LOLIGO PEALEII?
presmap[spp == 'theragra chalcogramma', spp := 'gadus chalcogrammus']
# zones
# id and names for each zone
zones <- data.frame(id = 1:3, name = c('conservation', 'fishery', 'energy'))
############################
# Run prioritizr
# Fixed budget
#############################
for (i in 1:length(myregs)) {
print(paste0('Starting region ', myregs[i]))
print(Sys.time())
###############################
# Set up data for this region
###############################
# pus
# planning features are each 1/4 deg square
pus <- presmap[region == myregs[i], c('latgrid', 'longrid')]
pus <- pus[!duplicated(pus),]
dim(pus) # 2195 (ebs), 795 (goa), (bc), (wc), 651 (gomex), (seus), (neus), (maritime), (newf)
if(nrow(pus) == 0) stop('pus has length zero')
pus <- pus[order(pus$latgrid, pus$longrid),]
pus$id <- 1:nrow(pus)
pus$dummycost <- rep(cost, nrow(pus)) # set the same cost in each planning unit. can add separate costs for each zone.
############################################
## Run prioritizr on 2007-2020 and 2081-2100
############################################
# spps
# id and name for each species
# fishery features entered separately from conservation features, even if same species
# plan on ensemble mean of all climate models for the current time-period
sppstokeep <- presmap[region == myregs[i] & year_range == planningperiods[1], .(poccur = mean(poccur)), by = c('latgrid', 'longrid', 'spp', 'thresh.kappa')] # average across models
dim(sppstokeep)
sppstokeep <- sppstokeep[poccur >= thresh.kappa, ]
sppstokeep <- merge(sppstokeep, pus[, .(latgrid, longrid, id)], by = c('latgrid', 'longrid')) # add pu id (and trim to focal pus)
setnames(sppstokeep, 'id', 'pu')
dim(sppstokeep)
ngrid <- sppstokeep[ , .(ngrid = length(unique(pu))), by = 'spp']
sppstokeep <- merge(sppstokeep, ngrid, by = 'spp')
sppstokeep[ , summary(ngrid)] #
nspps <- sppstokeep[ , .(nspp = length(unique(spp))), by = 'pu']
sppstokeep <- merge(sppstokeep, nspps, by = 'pu')
sppstokeep[, summary(nspp)] #
sppstokeep <- sppstokeep[ngrid >= (nrow(pus)*0.05),] # trim to species found at poccur > poccurthresh in at least 5% of grids
sppstokeep[ , length(unique(spp))]
spps <- data.table(id = 1:length(unique(sppstokeep$spp)), name = gsub(' |_', '', sort(unique(sppstokeep$spp))), spp = sort(unique(sppstokeep$spp))) # fill spaces in species names.
# add fishery features
spps <- rbind(spps, data.table(id = max(spps$id) + 1:fisheryspps[region == myregs[i], length(projname)],
name = paste0(gsub(' |_', '', fisheryspps[region == myregs[i], projname]), '_fishery'),
spp = fisheryspps[region == myregs[i], projname]))
# add wind and wave energy feature
spps <- rbind(spps, data.table(id = max(spps$id) + 1, name = c('energy'), spp = c(NA)))
# add future species (2081-2100)
sppinds <- !grepl('energy', spps$name) # don't include energy in each time period
temp1 <- spps[sppinds,]
spps$name[sppinds] <- paste0(spps$name[sppinds], gsub('-', '', planningperiods[1]))
temp1$name <- paste0(temp1$name, gsub('-', '', planningperiods[2]))
temp1$id = temp1$id + max(spps$id) # make sure the ids don't overlap
spps <- rbind(spps, temp1)
# puvsp
# which features are in each planning unit
# Format conservation data
puvsppa <- presmap[region == myregs[i] & year_range == planningperiods[1], .(poccur = mean(poccur)), by = c('latgrid', 'longrid', 'spp', 'thresh.kappa')] # pres/abs data.
dim(puvsppa)
puvsppa[, amount := as.numeric(poccur >= thresh.kappa)] # use pres/abs as conservation amount.
puvsppa[, summary(amount)]
puvsppa[, sort(unique(amount))]
puvsppa[, poccur := NULL]
puvsppa[ , name := paste0(gsub(' |_', '', spp), gsub('-', '', planningperiods[1]))] # trim out spaces from species names and append time period
# Format fishery data
puvspbio <- biomassmap[region == myregs[i] & year_range == planningperiods[1] & spp %in% fisheryspps[region == myregs[i], projname], .(biomass = mean(biomass)), by = c('latgrid', 'longrid', 'spp')] # biomass data.
dim(puvspbio)
puvspbio[, length(unique(spp))] # should be 10
puvspbio[, amount := biomass] # use biomass as amount for fishery targets
puvspbio[ , name := paste0(gsub(' |_', '', spp), '_fishery', gsub('-', '', planningperiods[1]))] # trim out spaces from species names, append fishery
# Format wind and wave data
puvenergy <- merge(windnpv, wavenpv, by = c('latgrid', 'longrid'), all = TRUE)
head(puvenergy)
dim(windnpv)
dim(wavenpv)
dim(puvenergy)
puvenergy[wind_npv < 0 | is.na(wind_npv), wind_npv := 0] # set negative or NA NPV to 0
puvenergy[wave_npv < 0 | is.na(wave_npv), wave_npv := 0]
puvenergy[, amount := (wind_npv + wave_npv)/10000] # scale down to pass presolve checks
puvenergy[, name := 'energy']
# combine
puvsp <- rbind(puvsppa[, .(name, latgrid, longrid, amount, zone = 1)],
puvspbio[, .(name, latgrid, longrid, amount, zone = 2)],
puvenergy[, .(name, latgrid, longrid, amount, zone = 3)])
# Add species ids
nrow(puvsp)
puvsp <- merge(puvsp, spps[, .(id, name)], by = 'name') # merge in species IDs and trim to focal species
nrow(puvsp)
setnames(puvsp, 'id', 'species')
# Add planning units
puvsp <- merge(puvsp, pus[, .(latgrid, longrid, id)], by = c('latgrid', 'longrid')) # add pu id (and trim to focal pus)
nrow(puvsp)
setnames(puvsp, 'id', 'pu')
# Check fishery species for adequate biomass and scale up if needed
# Makes sure that no fishery species are eliminated by the next section checking for amount < 1e6
fishtotals <- puvsp[grepl('fishery', name), .(total = sum(amount), name = unique(name)), by = 'species']
for(j in which(fishtotals[, total != 1])){
scalar <- 1/fishtotals[j, total] # scale up so sum would be 1
puvsp[species == fishtotals[j, species], amount := amount * scalar]
}
# Trim out values < 1e-6 (will throw error in prioritizr)
# Use 5e-6 to leave some buffer
puvsp[amount < 5e-6, amount := 0]
# Sort and trim columns and rows
setkey(puvsp, pu, species) # order by pu then species
puvsp <- puvsp[amount > 0, ] # trim only to presences
# checks on the historical data
if(length(unique(puvsp$pu)) != nrow(pus)) stop(paste0('region: ', myregs[i], '. puvsp planning units do not match pus.')) # planning units for species + NatCap: 2195 (ebs), 661 (goa), 549 (neus), 1342 (newf)
if(!all(unique(puvsp$species) %in% spps$id)) stop(paste0('region: ', myregs[i], '. Some puvsp features are not in spps.')) # features that are species + fishery + NatCap
if(min(sort(unique(table(puvsp$species)))) < 1) stop(paste0('region: ', myregs[i], '. Some species are not in a planning unit (hist).')) # make sure all species show up in some planning units (shouldn't see any 0s)
if(min(sort(unique(table(puvsp$pu))) < 1)) stop(paste0('region: ', myregs[i], '. Some planning units do not have a species (hist).')) # make sure all planning units have some species (shouldn't see any 0s)
if(!all(sort(unique(table(puvsp$pu, puvsp$species))) %in% c(0,1))) stop(paste0('region: ', myregs[i], '. Some planning unit-species combinations appear more than once (hist).')) # should be all 0s and 1s
if(puvsp[, max(amount) > 1e6]) stop(paste0('region:', myregs[i], '. Amount > 1e6 (hist).'))
# add future data
puvsppa2 <- presmap[region == myregs[i] & year_range == planningperiods[2], .(poccur = mean(poccur)), by = c('latgrid', 'longrid', 'spp', 'thresh.kappa')] # pres/abs data. trim to focal models
dim(puvsppa2)
puvsppa2[, amount := as.numeric(poccur >= thresh.kappa)] # use pres/abs as conservation amount. should this instead be left as poccur?
puvsppa2[, summary(amount)]
puvsppa2[, sort(unique(amount))]
puvsppa2[ , name := gsub(' |_', '', spp)] # trim out spaces from species names and add future
puvsppa2[!grepl('energy', name), name := paste0(name, gsub('-', '', planningperiods[2]))] # append time period
# Format future fishery data
puvspbio2 <- biomassmap[region == myregs[i] & year_range == planningperiods[2] & spp %in% fisheryspps[region == myregs[i], projname], .(biomass = mean(biomass)), by = c('latgrid', 'longrid', 'spp')] # biomass data
dim(puvspbio2)
puvspbio2[, length(unique(spp))] # should be 10
puvspbio2[, amount := biomass] # use biomass as amount for fishery targets.
puvspbio2[ , name := paste0(gsub(' |_', '', spp), '_fishery')] # trim out spaces from species names
puvspbio2[!grepl('energy', name), name := paste0(name, gsub('-', '', planningperiods[2]))] # append time period
# combine future data
puvsp2 <- rbind(puvsppa2[, .(name, latgrid, longrid, amount, zone = 1)],
puvspbio2[, .(name, latgrid, longrid, amount, zone = 2)])
# Add species ids to future
nrow(puvsp2)
puvsp2 <- merge(puvsp2, spps[, .(id, name)], by = 'name') # merge in species IDs and trim to focal species
nrow(puvsp2)
setnames(puvsp2, 'id', 'species')
# Add planning units to future
puvsp2 <- merge(puvsp2, pus[, .(latgrid, longrid, id)], by = c('latgrid', 'longrid')) # add pu id (and trim to focal pus)
nrow(puvsp2)
setnames(puvsp2, 'id', 'pu')
# Check fishery species for adequate biomass and scale up if needed
# Makes sure that no fishery species are eliminated by the next section checking for amount < 1e6
fishtotals2 <- puvsp2[grepl('fishery', name), .(total = sum(amount), name = unique(name)), by = 'species']
for(j in which(fishtotals2[, total != 1])){
scalar <- 1/fishtotals2[j, total] # scale up so sum would be 1
puvsp2[species == fishtotals2[j, species], amount := amount * scalar]
}
# Add historical and future data
puvsp <- rbind(puvsp, puvsp2)
# Trim out values < 1e-6 (will throw error in prioritizr)
# Use 5e-6 to leave some buffer
puvsp[amount < 5e-6, amount := 0]
# Sort and trim columns and rows
setkey(puvsp, pu, species) # order by pu then species
puvsp <- puvsp[amount > 0, ] # trim only to presences
# checks
if(length(unique(puvsp$pu)) != nrow(pus)) stop(paste0('region: ', myregs[i], '. puvsp planning units do not match pus.')) # planning units for species + NatCap
if(!all(unique(puvsp$species) %in% spps$id)) stop(paste0('region: ', myregs[i], '. Some puvsp features are not in spps.')) # features that are species + fishery + NatCap
if(min(sort(unique(table(puvsp$species)))) < 1) stop(paste0('region: ', myregs[i], '. Some species are not in a planning unit.')) # make sure all species show up in some planning units (shouldn't see any 0s)
if(min(sort(unique(table(puvsp$pu))) < 1)) stop(paste0('region: ', myregs[i], '. Some planning units do not have a species.')) # make sure all planning units have some species (shouldn't see any 0s)
if(!all(sort(unique(table(puvsp$pu, puvsp$species))) %in% c(0,1))) stop(paste0('region: ', myregs[i], '. Some planning unit-species combinations appear more than once.')) # should be all 0s and 1s
if(puvsp[, max(amount) > 1e6]) stop(paste0('region:', myregs[i], '. Amount > 1e6.'))
#zone target
# set zone-specific targets: rows are features, columns are zones
zonetarget <- matrix(0, nrow = nrow(spps), ncol = nrow(zones), dimnames = list(spps$name, zones$name))
zonetarget[!grepl('energy|fishery', rownames(zonetarget)), 'conservation'] <- consgoal # set conservation zone target
zonetarget[grepl('fishery', rownames(zonetarget)), 'fishery'] <- fishgoal # set fishing zone target
zonetarget[grepl('energy', rownames(zonetarget)), 'energy'] <- energygoal # set energy goal target
# trim out species that aren't present
nrow(spps)
spps <- spps[name %in% puvsp$name,]
nrow(spps)
nrow(zonetarget)
zonetarget <- zonetarget[rownames(zonetarget) %in% puvsp$name,]
nrow(zonetarget)
# basic checks (automated)
if(!all(colSums(zonetarget) > 0)) stop(paste0('region:', myregs[i], '. Some zone targets are 0.')) # reasonable targets?
if(nrow(zonetarget) != nrow(spps)) stop(paste0('region: ', myregs[i], '. Zonetargets do not match spps.'))
if(!all(rownames(zonetarget) == spps$name)) stop(paste0('region: ', myregs[i], '. Zonetargets order does not match spps order.'))
if(sum(!(puvsp$pu %in% pus$id)) > 0) stop(paste0('region: ', myregs[i], '. Some planning units not in pus.'))
if(sum(!(puvsp$species %in% spps$id)) > 0) stop(paste0('region: ', myregs[i], '. Some species units not in spps.'))
if(sum(!(pus$id %in% puvsp$pu)) > 0) stop(paste0('region: ', myregs[i], '. Some pus units not in puvsp.'))
if(sum(!(spps$id %in% puvsp$species)) > 0) stop(paste0('region: ', myregs[i], '. Some species units not in puvsp.'))
# First solve the min cost problem
cat('\tSolving min cost\n')
p1 <- problem(pus, spps, cost_column = c('dummycost', 'dummycost', 'dummycost'), rij = puvsp, zones = zones) %>%
add_min_set_objective() %>%
add_relative_targets(zonetarget) %>%
add_binary_decisions() %>%
add_gurobi_solver(gap = gap)
if(presolve_check(p1)){
s1 <- solve(p1)
} else {
stop(paste0('region:', myregs[i], '. Failed presolve check (min set).'))
}
# Loop through a range of budgets and relative weights on future vs. present
frontier <- expand.grid(budget = seq(minbudget, maxbudget, length.out = nbudget), presweight = seq(minweight, maxweight, length.out = nweight))
frontier$region <- myregs[i]
frontier$status <- NA
frontier$presgoals <- NA
frontier$futgoals <- NA
for(j in 1:nrow(frontier)){
print(paste(myregs[i], frontier$budget[j], frontier$presweight[j]))
# Set up a budget as fraction of min cost
budget <- frontier$budget[j]*cost*s1[, sum(solution_1_conservation) + sum(solution_1_fishery) + sum(solution_1_energy)] # or with 0.9*attr(s1, 'objective')
# Set up feature weights
# Anything less than 0.01 will favor not adding a planning unit over meeting a feature target
prewght <- frontier$presweight[j]
futwght <- maxweight + minweight - frontier$presweight[j]
wghts <- zonetarget
wghts[grepl(gsub('-', '', planningperiods[1]), rownames(wghts)), ] <- prewght # historical
wghts[grepl(gsub('-', '', planningperiods[2]), rownames(wghts)), ] <- futwght # future
wghts[grepl('energy', rownames(wghts)), ] <- 0 # no attempt to meet energy goal
wghts[zonetarget == 0] <- 0 # set zeros back to zero
# Now solve the max representation problem for a limited budget
cat('\tSolving min budget\n')
p2 <- problem(pus, spps, cost_column = c('dummycost', 'dummycost', 'dummycost'), rij = puvsp, zones = zones) %>%
add_max_features_objective(budget) %>%
add_relative_targets(zonetarget) %>%
add_feature_weights(wghts) %>%
add_binary_decisions() %>%
add_gurobi_solver(gap = gap, threads = nthread, time_limit = timelimit) # 10 minute time limit
if(presolve_check(p2)){
s2 <- solve(p2)
} else {
stop(paste0('region:', myregs[i], '. Failed presolve check (min budget).'))
}
# save status
frontier$status[j] <- attr(s2, 'status')
# calculate goals met
r2 <- feature_representation(p2, s2[, 5:7])
r2 <- r2[(!grepl('fishery|energy', r2$feature) & r2$zone == 'conservation') | (grepl('fishery', r2$feature) & r2$zone == 'fishery') | (grepl('energy', r2$feature) & r2$zone == 'energy'), ] # trim to feature/zone combinations we care about
if(nrow(r2) != nrow(zonetarget)) stop('r2 and zonetargets do not match')
r2$goal <- NA
r2$goal[r2$zone == 'conservation'] <- consgoal
r2$goal[r2$zone == 'fishery'] <- fishgoal
r2$goal[r2$zone == 'energy'] <- energygoal
r2$met <- r2$relative_held >= r2$goal
frontier$presgoals[j] <- sum(r2$met[grepl(gsub('-', '', planningperiods[1]), r2$feature)]) # contemporary period goals met
frontier$futgoals[j] <- sum(r2$met[grepl(gsub('-', '', planningperiods[2]), r2$feature)]) # future
}
if(i == 1){
frontierall <- frontier
} else {
frontierall <- rbind(frontierall, frontier)
}
write.csv(frontierall, file = outname)
}
print(Sys.time())
########################
# Make a simple plot
########################
require(data.table)
require(ggplot2)
frontierall <- fread(outname, drop = 1)
setkey(frontierall, region, budget, presweight)
frontierall[, region := factor(region, levels = c('ebs', 'goa', 'bc', 'wc', 'gmex', 'seus', 'neus', 'maritime', 'newf'))] # set order
# how many not optimal?
print(frontierall[, .(notopt = sum(status != 'OPTIMAL'), total = .N)])
print(frontierall[, .(notopt = sum(status != 'OPTIMAL'), total = .N), by = region])
pdf('temp_figures/prioritizr_frontiers.pdf', height = 6, width = 6)
ggplot(frontierall, aes(x = presgoals, y = futgoals, group = budget, color = budget)) +
geom_path(size = 0.4) +
geom_point(size = 0.3) +
facet_wrap(~ region, nrow = 3, scales = 'free')
dev.off()
|
d66901ecaea6f9a42dd71b4c806736b2f82bb08b
|
48aea1547fb612b127d5b5def716d48398236159
|
/man/CIMseq.testing-package.Rd
|
0dc41e324b8ff6d8ece9a460967805947ff58e59
|
[] |
no_license
|
jasonserviss/CIMseq.testing
|
6a1951a5d1cd53a22704df631138050bc4e057c6
|
7039f9b52fb9280bb811662aa19d4fe7f7bf8398
|
refs/heads/master
| 2021-03-30T17:46:24.443721
| 2020-01-27T09:55:25
| 2020-01-27T09:55:25
| 76,064,214
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 485
|
rd
|
CIMseq.testing-package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CIMseq.testing-package.R
\docType{package}
\name{CIMseq.testing-package}
\alias{CIMseq.testing-package}
\alias{CIMseq.testing}
\title{Testing and analysis of the CIMseq and method.}
\description{
Description
}
\details{
\tabular{ll}{ Package: \tab CIMseq\cr Type: \tab Package\cr
Version: \tab 1.0\cr Date: \tab 2016-02-28\cr License: \tab GPL-3\cr }
}
\author{
Author: Jason T. Serviss
}
\keyword{package}
|
ac1ee8fbff6fb6fbf1a09f64920a125501480ea5
|
0ecd29c40cbecd945f5d8e3d2b2d27e4070ef897
|
/rstan_installation_helper.R
|
94b689be7d9c14d2ee8b1056bb5c5d5b568fe582
|
[] |
no_license
|
paul-buerkner/2019_DAGStat_Stan_Tutorial
|
195bc4e440feff7be10f93057614c252fe2cf7f7
|
41f778f9c1188bad80d0b58542aada541d275ca3
|
refs/heads/master
| 2020-04-29T16:39:14.856833
| 2019-03-19T11:11:15
| 2019-03-19T11:11:15
| 176,269,018
| 8
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,144
|
r
|
rstan_installation_helper.R
|
# install rstan
# Quite a few other packages will be installed as well
if (!require("rstan")) {
install.packages("rstan")
}
# The following explains how to install a C++ compiler
# which is required for Stan
# -------- FOR WINDOWS -------
# This requires using R from Rstudio 1.1 or higher!
library(rstan)
example("stan_model", run.dontrun = TRUE)
# RStudio will ask if you want to install Rtools,
# in which case you should say Yes and click through the installer
# If this doesn't work, go to download and install Rtools 3.5
# manually from https://cran.r-project.org/bin/windows/Rtools/
# make sure to check the box to change the System PATH
# -------- FOR MAC ----------
# Please install Xcode, which you can download from the App-Store for free.
# Installing Xcode may take some time and you may restart your machine afterwards
# Make sure that a C++ compiler is installed and can be called within R via
system("clang++ -v")
# If no warning occurs and a few lines of difficult to read system code
# are printed out, the compiler should work correctly
# -----------------------
# try to run a demo model
example("stan_model")
|
e763167ec62779096a002d1abf989af1d5a54e5e
|
7384fa7a27f0fddda69766c4d351efabb494d799
|
/cachematrix.R
|
08c89e9658042fa59aee19a4eab6bf0e004341fc
|
[] |
no_license
|
abumeezo/ProgrammingAssignment2
|
edfb04334f35c41afc956dfd5d5e1cb6c267b939
|
63e9aaeded7f04cc8bace8bf01cf13e09e643db7
|
refs/heads/master
| 2021-01-13T15:52:19.018228
| 2016-12-19T04:20:53
| 2016-12-19T04:20:53
| 76,826,210
| 0
| 0
| null | 2016-12-19T03:51:38
| 2016-12-19T03:51:38
| null |
UTF-8
|
R
| false
| false
| 1,120
|
r
|
cachematrix.R
|
##Functions to create a special "matrix" object with a cached inverse
##and to retrieve the cached inverse if already calculated from inside the object itself
##This function creates the "matrix" object with cached inverse
##Object has internal functions to establish and return itself and its inverse
makeCacheMatrix <- function(x = matrix()) {
inverseOfMatrix <- NULL
set <- function(y) {
x <<- y
inverseOfMatrix <<- NULL
}
get <- function() x
setInverse <- function(inverseM) inverseOfMatrix <<- inverseM
getInverse <- function() inverseOfMatrix
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
##This function extracts cached inverse from a "matrix" object or
##computes and sets the inverse if it was not cached then returns it
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inverseOfMatrix <- x$getInverse()
if(!is.null(inverseOfMatrix)) {
message("getting cached data")
return(inverseOfMatrix)
}
data <- x$get()
inverseOfMatrix <- solve(data, ...)
x$setInverse(inverseOfMatrix)
inverseOfMatrix
}
|
bca932738cd7110522cc3a5917a64f1837ffd015
|
890c942249dd887b82ca07eee97f68149ffd1f49
|
/R/degs.R
|
8b4e4e7fc2a894ffe64827aae81c51b497c52081
|
[
"MIT"
] |
permissive
|
lefeverde/QSPpaper
|
3285c4829273120508610fef2ecdef3186dd26b7
|
eec8fbedd1fefd1ed88dadbc77dd385ad78f274f
|
refs/heads/master
| 2023-01-18T21:39:56.050930
| 2022-12-24T17:35:23
| 2022-12-24T17:35:23
| 240,388,280
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 465
|
r
|
degs.R
|
#' Wrapper to create a fit object (see \code{\link[limma]{eBayes}}) using the contrast method
#'
#' @param v a voom object
#' @param group_cont vector of contrasts
#' @param mod model matrix
#'
#' @return fit object
#' @export
#'
#' @examples
make_cont_fit <- function(v, group_cont, mod){
m <- data.frame(mod)
cont_mod <- makeContrasts(contrasts = group_cont, levels=m)
fit <- lmFit(v, m) %>%
contrasts.fit(., cont_mod) %>%
eBayes(.)
return(fit)
}
|
3842f59e4582f10f535df6b39f20ac6e86903009
|
93defdbd4e3c597ec4b7f95b5cdaf649e7cbb21c
|
/man/dot-extract_base_schedule.Rd
|
237c896518dc3dc8d05d42878943ba8b815c5615
|
[] |
no_license
|
meysubb/collegeballR
|
0e909cbda2ec96f386fd5385168a65790507aab3
|
1727a03dc3bf0377d65849586c0e44c9a089b591
|
refs/heads/master
| 2021-05-05T13:31:13.268673
| 2019-07-25T01:06:32
| 2019-07-25T01:06:32
| 105,055,203
| 17
| 5
| null | 2019-04-20T23:26:48
| 2017-09-27T18:36:46
|
HTML
|
UTF-8
|
R
| false
| true
| 507
|
rd
|
dot-extract_base_schedule.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extract_base_schedule.R
\name{.extract_base_schedule}
\alias{.extract_base_schedule}
\title{Extract Raw Base schedule}
\usage{
.extract_base_schedule(team_id, year, sport)
}
\arguments{
\item{team_id}{Team ID (form team_mapping)}
\item{sprt}{Tradiational Sport Name}
\item{yr}{Select year, (example: 2015-2016 season is 2016)}
}
\description{
Extracts the date, team's played and the results
}
\examples{
}
\keyword{internal}
|
106b776f269767b3681b2d3ffc91a718ae45600c
|
3b2a2137476edc5fb5dad4c3f0f29fa83252db0f
|
/man/notin.Rd
|
1b11b4450d58a48ddd0a7cbb27d9e12455461958
|
[] |
no_license
|
woodwards/octopus
|
32d8c64947d634fd8cf32b5abdf34fea478baef6
|
5be3adffe27bd0d8300ff6394a59587f86c4bd51
|
refs/heads/master
| 2020-09-25T09:24:18.651141
| 2020-01-06T20:48:57
| 2020-01-06T20:48:57
| 225,973,852
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 346
|
rd
|
notin.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility.R
\name{\%notin\%}
\alias{\%notin\%}
\title{Returns TRUE if x is not in y.}
\usage{
x \%notin\% y
}
\arguments{
\item{x}{Anything.}
\item{y}{Anything.}
}
\value{
Logical.
}
\description{
Returns TRUE if x is not in y.
}
\examples{
"a" \%notin\% c("b", "c")
}
|
d038121a9e733c43eb036e15362a4ee823293615
|
76dbc1754d4fac81e75fc054858ba91f99b55b2d
|
/R/mortalityhazard-consthaz.R
|
78c3b9f0e7b177a3e4e2d9f308644f8668e20ab6
|
[] |
no_license
|
dfeehan/mortfit
|
e51ac12507385bd9024e8109aa1a3eaea2895fb5
|
8dfd82e93fde1bf408dbe59eb004cc8694603f88
|
refs/heads/master
| 2021-01-18T00:00:39.351697
| 2020-11-08T16:23:12
| 2020-11-08T16:23:12
| 18,040,328
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 753
|
r
|
mortalityhazard-consthaz.R
|
########################
# constant hazard object
consthaz.haz.fn <- function(theta, z) {
alpha <- exp(theta[1])
return(rep(alpha,length(z)))
}
## these starting values have been updated based on preliminary analysis
consthaz.haz <- new("mortalityHazard",
name="Constant Hazard",
num.param=1L,
theta.default=c(-2.48),
theta.range=list(c(-2.73, -2.29)),
optim.default=list(method="BFGS",
control=list(reltol=1e-10)),
haz.fn=consthaz.haz.fn,
haz.to.prob.fn=functional::Curry(haz.to.prob,
haz.fn=consthaz.haz.fn))
|
b7f48bb3fa02594c8937ebfd82abd33ccec55b9d
|
2171709c5b23d8e5f7c2194d4c77b8d1d3c232f3
|
/man/Content.Rd
|
2acf178c31593e5c7d08ef59848501167ba451c6
|
[] |
no_license
|
colearendt/connectapi
|
9472351abc6f24c5d3bb9acc41721754d13f52af
|
00a01aa74aee8df5fcc67cee14b80c5239168b52
|
refs/heads/master
| 2021-07-19T07:33:16.802365
| 2020-05-19T12:50:52
| 2020-05-19T12:50:52
| 168,455,783
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,214
|
rd
|
Content.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/content.R
\name{Content}
\alias{Content}
\title{Content}
\description{
An R6 class that represents content
}
\seealso{
Other R6 classes:
\code{\link{Bundle}},
\code{\link{RStudioConnect}},
\code{\link{Task}},
\code{\link{Vanity}}
}
\concept{R6 classes}
\section{Public fields}{
\if{html}{\out{<div class="r6-fields">}}
\describe{
\item{\code{connect}}{An R6 Connect object}
\item{\code{content}}{The content details from RStudio Connect}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-new}{\code{Content$new()}}
\item \href{#method-get_connect}{\code{Content$get_connect()}}
\item \href{#method-get_content}{\code{Content$get_content()}}
\item \href{#method-get_content_remote}{\code{Content$get_content_remote()}}
\item \href{#method-update}{\code{Content$update()}}
\item \href{#method-get_dashboard_url}{\code{Content$get_dashboard_url()}}
\item \href{#method-get_jobs}{\code{Content$get_jobs()}}
\item \href{#method-get_job}{\code{Content$get_job()}}
\item \href{#method-print}{\code{Content$print()}}
\item \href{#method-clone}{\code{Content$clone()}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-new"></a>}}
\if{latex}{\out{\hypertarget{method-new}{}}}
\subsection{Method \code{new()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Content$new(connect, content)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-get_connect"></a>}}
\if{latex}{\out{\hypertarget{method-get_connect}{}}}
\subsection{Method \code{get_connect()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Content$get_connect()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-get_content"></a>}}
\if{latex}{\out{\hypertarget{method-get_content}{}}}
\subsection{Method \code{get_content()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Content$get_content()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-get_content_remote"></a>}}
\if{latex}{\out{\hypertarget{method-get_content_remote}{}}}
\subsection{Method \code{get_content_remote()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Content$get_content_remote()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-update"></a>}}
\if{latex}{\out{\hypertarget{method-update}{}}}
\subsection{Method \code{update()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Content$update(...)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-get_dashboard_url"></a>}}
\if{latex}{\out{\hypertarget{method-get_dashboard_url}{}}}
\subsection{Method \code{get_dashboard_url()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Content$get_dashboard_url(pane = "")}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-get_jobs"></a>}}
\if{latex}{\out{\hypertarget{method-get_jobs}{}}}
\subsection{Method \code{get_jobs()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Content$get_jobs()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-get_job"></a>}}
\if{latex}{\out{\hypertarget{method-get_job}{}}}
\subsection{Method \code{get_job()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Content$get_job(key)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-print"></a>}}
\if{latex}{\out{\hypertarget{method-print}{}}}
\subsection{Method \code{print()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Content$print(...)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\if{latex}{\out{\hypertarget{method-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Content$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
303967312cdd875937c88bf2ee59ce2095c505da
|
c7ecb5298854ca192e5613f81e74265bd53f9e96
|
/Project 2/Drop Out Loop-Spec.R
|
aaed1efb20844600dc54aa7d6bb26a6c12a52206
|
[] |
no_license
|
tommsmit/R_Projects
|
14e89784956ed333c6e8c03c33738bd9c01aad82
|
bafc0c5a60b295b5d75b6668734a422ff7440ba3
|
refs/heads/master
| 2023-07-16T01:39:01.576903
| 2021-08-24T21:07:53
| 2021-08-24T21:07:53
| 360,184,072
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 59,900
|
r
|
Drop Out Loop-Spec.R
|
### Drop out Loop 2: Special Programs ###
library(rvest)
library(tm)
library(pdftools)
library(stringr)
library(dplyr)
library(plyr)
library(data.table)
library(Hmisc)
library(tictoc)
library(tidyverse)
require(XML)
library(ggplot2)
library(shiny)
school_year<-(c("1998-99","1999-00","2000-01","2001-02","2002-03","2003-04","2004-05","2005-06","2006-07","2007-08","2008-09","2009-10","2010-11","2011-12","2012-13","2013-14","2014-15","2015-16","2016-17","2017-18","2018-19"))
drop_spec<-data.frame(Groups=character(),Students=numeric(), Students_Percentage=numeric(), Dropouts=numeric(), Dropouts_Percentage=numeric(), Annual_Dropout_Rate=numeric(), School_Year=character())
total_drop_spec<-data.frame(Groups=character(),Students=numeric(), Students_Percentage=numeric(), Dropouts=numeric(), Dropouts_Percentage=numeric(), Annual_Dropout_Rate=numeric(), School_Year=character())
for (i in 1:length(school_year)){
if (i==1){
a<-paste0("https://tea.texas.gov/sites/default/files/DropComp_",school_year[i],".pdf")
dropout<-pdf_text(a)
p1<-strsplit(dropout, "\n")
table<-data.frame(p1[[86]][c(29:32,41:44)])
rnums<-nrow(table)
table$Main<-as.character(table[1:rnums,1])
table$Main<-trimws(table$Main, which="left")
table$Main<-stripWhitespace(table$Main)
table$Main<-gsub("(?:Bilingual or ESLa)","ESL", table$Main)
table$Main<-gsub("(?:Bilingual or English as a second language)","ESL", table$Main)
table$Main<-gsub("(?:Second Language)","ESL", table$Main)
table$Main<-gsub("(?:CTEb)","Career-Technical", table$Main)
table$Main<-gsub("(?:Gifted and talented)","GT", table$Main)
table$Main<-gsub("(?:Gifted/Talented)","GT", table$Main)
table$Main<-gsub("(?:Section 504)","504", table$Main)
table$Main<-gsub("(?:Special education)","Spec-Ed", table$Main)
table$Main<-gsub("(?:Special Education)","Spec-Ed", table$Main)
table$Main<-gsub("(?:Title I)","Title-I", table$Main)
table$Main<-gsub("(?:At Risk)","At-Risk", table$Main)
table$Main<-gsub("(?:Limited English Proficient)","ELL", table$Main)
table$Main<-gsub("(?:English learner)","ELL", table$Main)
table$Main<-gsub("(?:Foster care)","Foster-Care", table$Main)
table$Main<-gsub("(?:Overage/Not on Grade)","Overage", table$Main)
table$Main<-gsub("(?:,)","", table$Main)
split_var<-as.data.frame(ldply(strsplit(table$Main, split = " ")))
drop_spec<-data.frame(Groups=character(),Students=numeric(), Students_Percentage=numeric(), Dropouts=numeric(), Dropouts_Percentage=numeric(), Annual_Dropout_Rate=numeric(), School_Year=character())
drop_spec<-drop_spec[1:rnums,]
drop_spec$Main<-table$Main
drop_spec$Groups<-split_var[,1]
drop_spec$Students<-split_var[,2]
drop_spec$Students_Percentage<-split_var[,3]
drop_spec$Dropouts<-split_var[,4]
drop_spec$Dropouts_Percentage<-split_var[,5]
drop_spec$Annual_Dropout_Rate<-split_var[,6]
drop_spec$School_Year<-school_year[i]
drop_spec$Students<-as.numeric(drop_spec$Students)
drop_spec$Students_Percentage<-as.numeric(drop_spec$Students_Percentage)
drop_spec$Dropouts<-as.numeric(drop_spec$Dropouts)
drop_spec$Dropouts_Percentage<-as.numeric(drop_spec$Dropouts_Percentage)
drop_spec$Annual_Dropout_Rate<-as.numeric(drop_spec$Annual_Dropout_Rate)
drop_spec<-select(drop_spec,-Main)
drop_spec<-rbind(drop_spec,NA,NA,NA,NA,NA,NA)
drop_spec<-drop_spec[c(1,9,2,10,3,4,5,11,6,12,13,14,7,15,8),]
} else if (i==2){
a<-paste0("https://tea.texas.gov/sites/default/files/DropComp_",school_year[i],".pdf")
dropout<-pdf_text(a)
p1<-strsplit(dropout, "\n")
table<-data.frame(p1[[97]][c(28:31,41:45)])
rnums<-nrow(table)
table$Main<-as.character(table[1:rnums,1])
table$Main<-trimws(table$Main, which="left")
table$Main<-stripWhitespace(table$Main)
table$Main<-gsub("(?:Bilingual/English as a Second Language)","ESL", table$Main)
table$Main<-gsub("(?:Second Language)","ESL", table$Main)
table$Main<-gsub("(?:CTEb)","Career-Technical", table$Main)
table$Main<-gsub("(?:Gifted and talented)","GT", table$Main)
table$Main<-gsub("(?:Gifted/Talented)","GT", table$Main)
table$Main<-gsub("(?:Section 504)","504", table$Main)
table$Main<-gsub("(?:Special education)","Spec-Ed", table$Main)
table$Main<-gsub("(?:Special Education)","Spec-Ed", table$Main)
table$Main<-gsub("(?:Title I)","Title-I", table$Main)
table$Main<-gsub("(?:At Risk)","At-Risk", table$Main)
table$Main<-gsub("(?:Limited English Proficient)","ELL", table$Main)
table$Main<-gsub("(?:English learner)","ELL", table$Main)
table$Main<-gsub("(?:Foster care)","Foster-Care", table$Main)
table$Main<-gsub("(?:Overage/Not on Grade)","Overage", table$Main)
table$Main<-gsub("(?:,)","", table$Main)
split_var<-as.data.frame(ldply(strsplit(table$Main, split = " ")))
drop_spec<-data.frame(Groups=character(),Students=numeric(), Students_Percentage=numeric(), Dropouts=numeric(), Dropouts_Percentage=numeric(), Annual_Dropout_Rate=numeric(), School_Year=character())
drop_spec<-drop_spec[1:rnums,]
drop_spec$Main<-table$Main
drop_spec$Groups<-split_var[,1]
drop_spec$Students<-split_var[,2]
drop_spec$Students_Percentage<-split_var[,3]
drop_spec$Dropouts<-split_var[,4]
drop_spec$Dropouts_Percentage<-split_var[,5]
drop_spec$Annual_Dropout_Rate<-split_var[,6]
drop_spec$School_Year<-school_year[i]
drop_spec$Students<-as.numeric(drop_spec$Students)
drop_spec$Students_Percentage<-as.numeric(drop_spec$Students_Percentage)
drop_spec$Dropouts<-as.numeric(drop_spec$Dropouts)
drop_spec$Dropouts_Percentage<-as.numeric(drop_spec$Dropouts_Percentage)
drop_spec$Annual_Dropout_Rate<-as.numeric(drop_spec$Annual_Dropout_Rate)
drop_spec<-select(drop_spec,-Main)
drop_spec<-rbind(drop_spec,NA,NA,NA,NA,NA,NA)
drop_spec<-drop_spec[c(1,10,2,11,3,4,5,12,7,13,14,6,8,15,9),]
} else if (i==3){
a<-paste0("https://tea.texas.gov/sites/default/files/DropComp_",school_year[i],".pdf")
dropout<-pdf_text(a)
p1<-strsplit(dropout, "\n")
table1<-data.frame(p1[[111]][c(5,7:9)])
table2<-data.frame(p1[[112]][5:9])
rnums1<-nrow(table1)
rnums2<-nrow(table2)
table1$Main<-as.character(table1[1:rnums1,1])
table2$Main<-as.character(table2[1:rnums2,1])
table1$Main<-trimws(table1$Main, which="left")
table2$Main<-trimws(table2$Main, which="left")
table1$Main<-stripWhitespace(table1$Main)
table2$Main<-stripWhitespace(table2$Main)
table1$Main<-gsub("(?:Bilingual or English as a second language)","ESL", table1$Main)
table1$Main<-gsub("(?:Gifted and talented)","GT", table1$Main)
table1$Main<-gsub("(?:Special education)","Spec-Ed", table1$Main)
table1$Main<-gsub("(?:Title I)","Title-I", table1$Main)
table2$Main<-gsub("(?:At risk)","At-Risk", table2$Main)
table2$Main<-gsub("(?:Limited English proficient)","ELL", table2$Main)
table1$Main<-gsub("(?:,)","", table1$Main)
table2$Main<-gsub("(?:,)","", table2$Main)
split_var1<-as.data.frame(ldply(strsplit(table1$Main, split = " ")))
split_var2<-as.data.frame(ldply(strsplit(table2$Main, split = " ")))
split_var<-rbind(split_var1,split_var2)
drop_spec<-data.frame(Groups=character(),Students=numeric(), Students_Percentage=numeric(), Dropouts=numeric(), Dropouts_Percentage=numeric(), Annual_Dropout_Rate=numeric(), School_Year=character())
drop_spec<-drop_spec[1:rnums,]
drop_spec$Main<-table$Main
drop_spec$Groups<-split_var[,1]
drop_spec$Students<-split_var[,2]
drop_spec$Students_Percentage<-split_var[,3]
drop_spec$Dropouts<-split_var[,4]
drop_spec$Dropouts_Percentage<-split_var[,5]
drop_spec$Annual_Dropout_Rate<-split_var[,6]
drop_spec$School_Year<-school_year[i]
drop_spec$Students<-as.numeric(drop_spec$Students)
drop_spec$Students_Percentage<-as.numeric(drop_spec$Students_Percentage)
drop_spec$Dropouts<-as.numeric(drop_spec$Dropouts)
drop_spec$Dropouts_Percentage<-as.numeric(drop_spec$Dropouts_Percentage)
drop_spec$Annual_Dropout_Rate<-as.numeric(drop_spec$Annual_Dropout_Rate)
drop_spec<-select(drop_spec,-Main)
drop_spec<-rbind(drop_spec,NA,NA,NA,NA,NA,NA)
drop_spec<-drop_spec[c(1,10,2,11,3,4,5,12,7,13,14,6,8,15,9),]
} else if (i==4){
a<-paste0("https://tea.texas.gov/sites/default/files/DropComp_",school_year[i],".pdf")
dropout<-pdf_text(a)
p1<-strsplit(dropout, "\n")
table1<-data.frame(p1[[121]][c(5,7:9)])
table2<-data.frame(p1[[122]][5:9])
rnums1<-nrow(table1)
rnums2<-nrow(table2)
table1$Main<-as.character(table1[1:rnums1,1])
table2$Main<-as.character(table2[1:rnums2,1])
table1$Main<-trimws(table1$Main, which="left")
table2$Main<-trimws(table2$Main, which="left")
table1$Main<-stripWhitespace(table1$Main)
table2$Main<-stripWhitespace(table2$Main)
table1$Main<-gsub("(?:Bilingual or English as a second language)","ESL", table1$Main)
table1$Main<-gsub("(?:Gifted and talented)","GT", table1$Main)
table1$Main<-gsub("(?:Special education)","Spec-Ed", table1$Main)
table1$Main<-gsub("(?:Title I)","Title-I", table1$Main)
table2$Main<-gsub("(?:At risk)","At-Risk", table2$Main)
table2$Main<-gsub("(?:Limited English proficient)","ELL", table2$Main)
table1$Main<-gsub("(?:,)","", table1$Main)
table2$Main<-gsub("(?:,)","", table2$Main)
split_var1<-as.data.frame(ldply(strsplit(table1$Main, split = " ")))
split_var2<-as.data.frame(ldply(strsplit(table2$Main, split = " ")))
split_var<-rbind(split_var1,split_var2)
drop_spec<-data.frame(Groups=character(),Students=numeric(), Students_Percentage=numeric(), Dropouts=numeric(), Dropouts_Percentage=numeric(), Annual_Dropout_Rate=numeric(), School_Year=character())
drop_spec<-drop_spec[1:rnums,]
drop_spec$Main<-table$Main
drop_spec$Groups<-split_var[,1]
drop_spec$Students<-split_var[,2]
drop_spec$Students_Percentage<-split_var[,3]
drop_spec$Dropouts<-split_var[,4]
drop_spec$Dropouts_Percentage<-split_var[,5]
drop_spec$Annual_Dropout_Rate<-split_var[,6]
drop_spec$School_Year<-school_year[i]
drop_spec$Students<-as.numeric(drop_spec$Students)
drop_spec$Students_Percentage<-as.numeric(drop_spec$Students_Percentage)
drop_spec$Dropouts<-as.numeric(drop_spec$Dropouts)
drop_spec$Dropouts_Percentage<-as.numeric(drop_spec$Dropouts_Percentage)
drop_spec$Annual_Dropout_Rate<-as.numeric(drop_spec$Annual_Dropout_Rate)
drop_spec<-select(drop_spec,-Main)
drop_spec<-rbind(drop_spec,NA,NA,NA,NA,NA,NA)
drop_spec<-drop_spec[c(1,10,2,11,3,4,5,12,7,13,14,6,8,15,9),]
} else if (i==5){
a<-paste0("https://tea.texas.gov/sites/default/files/DropComp_",school_year[i],".pdf")
dropout<-pdf_text(a)
p1<-strsplit(dropout, "\n")
table1<-data.frame(p1[[138]][c(5,7:9)])
table2<-data.frame(p1[[139]][5:9])
rnums1<-nrow(table1)
rnums2<-nrow(table2)
table1$Main<-as.character(table1[1:rnums1,1])
table2$Main<-as.character(table2[1:rnums2,1])
table1$Main<-trimws(table1$Main, which="left")
table2$Main<-trimws(table2$Main, which="left")
table1$Main<-stripWhitespace(table1$Main)
table2$Main<-stripWhitespace(table2$Main)
table1$Main<-gsub("(?:Bilingual or English as a second language)","ESL", table1$Main)
table1$Main<-gsub("(?:Gifted and talented)","GT", table1$Main)
table1$Main<-gsub("(?:Special education)","Spec-Ed", table1$Main)
table1$Main<-gsub("(?:Title I)","Title-I", table1$Main)
table2$Main<-gsub("(?:At risk)","At-Risk", table2$Main)
table2$Main<-gsub("(?:Limited English proficient)","ELL", table2$Main)
table1$Main<-gsub("(?:,)","", table1$Main)
table2$Main<-gsub("(?:,)","", table2$Main)
split_var1<-as.data.frame(ldply(strsplit(table1$Main, split = " ")))
split_var2<-as.data.frame(ldply(strsplit(table2$Main, split = " ")))
split_var<-rbind(split_var1,split_var2)
drop_spec<-data.frame(Groups=character(),Students=numeric(), Students_Percentage=numeric(), Dropouts=numeric(), Dropouts_Percentage=numeric(), Annual_Dropout_Rate=numeric(), School_Year=character())
drop_spec<-drop_spec[1:rnums,]
drop_spec$Main<-table$Main
drop_spec$Groups<-split_var[,1]
drop_spec$Students<-split_var[,2]
drop_spec$Students_Percentage<-split_var[,3]
drop_spec$Dropouts<-split_var[,4]
drop_spec$Dropouts_Percentage<-split_var[,5]
drop_spec$Annual_Dropout_Rate<-split_var[,6]
drop_spec$School_Year<-school_year[i]
drop_spec$Students<-as.numeric(drop_spec$Students)
drop_spec$Students_Percentage<-as.numeric(drop_spec$Students_Percentage)
drop_spec$Dropouts<-as.numeric(drop_spec$Dropouts)
drop_spec$Dropouts_Percentage<-as.numeric(drop_spec$Dropouts_Percentage)
drop_spec$Annual_Dropout_Rate<-as.numeric(drop_spec$Annual_Dropout_Rate)
drop_spec<-select(drop_spec,-Main)
drop_spec<-rbind(drop_spec,NA,NA,NA,NA,NA,NA)
drop_spec<-drop_spec[c(1,10,2,11,3,4,5,12,7,13,14,6,8,15,9),]
} else if (i==6){
a<-paste0("https://tea.texas.gov/sites/default/files/DropComp_",school_year[i],".pdf")
dropout<-pdf_text(a)
p1<-strsplit(dropout, "\n")
table1<-data.frame(p1[[146]][c(5,7:9)])
table2<-data.frame(p1[[147]][5:9])
rnums1<-nrow(table1)
rnums2<-nrow(table2)
table1$Main<-as.character(table1[1:rnums1,1])
table2$Main<-as.character(table2[1:rnums2,1])
table1$Main<-trimws(table1$Main, which="left")
table2$Main<-trimws(table2$Main, which="left")
table1$Main<-stripWhitespace(table1$Main)
table2$Main<-stripWhitespace(table2$Main)
table1$Main<-gsub("(?:Bilingual or English as a second language)","ESL", table1$Main)
table1$Main<-gsub("(?:Gifted and talented)","GT", table1$Main)
table1$Main<-gsub("(?:Special education)","Spec-Ed", table1$Main)
table1$Main<-gsub("(?:Title I)","Title-I", table1$Main)
table2$Main<-gsub("(?:At risk)","At-Risk", table2$Main)
table2$Main<-gsub("(?:Limited English proficient)","ELL", table2$Main)
table1$Main<-gsub("(?:,)","", table1$Main)
table2$Main<-gsub("(?:,)","", table2$Main)
split_var1<-as.data.frame(ldply(strsplit(table1$Main, split = " ")))
split_var2<-as.data.frame(ldply(strsplit(table2$Main, split = " ")))
split_var<-rbind(split_var1,split_var2)
drop_spec<-data.frame(Groups=character(),Students=numeric(), Students_Percentage=numeric(), Dropouts=numeric(), Dropouts_Percentage=numeric(), Annual_Dropout_Rate=numeric(), School_Year=character())
drop_spec<-drop_spec[1:rnums,]
drop_spec$Main<-table$Main
drop_spec$Groups<-split_var[,1]
drop_spec$Students<-split_var[,2]
drop_spec$Students_Percentage<-split_var[,3]
drop_spec$Dropouts<-split_var[,4]
drop_spec$Dropouts_Percentage<-split_var[,5]
drop_spec$Annual_Dropout_Rate<-split_var[,6]
drop_spec$School_Year<-school_year[i]
drop_spec$Students<-as.numeric(drop_spec$Students)
drop_spec$Students_Percentage<-as.numeric(drop_spec$Students_Percentage)
drop_spec$Dropouts<-as.numeric(drop_spec$Dropouts)
drop_spec$Dropouts_Percentage<-as.numeric(drop_spec$Dropouts_Percentage)
drop_spec$Annual_Dropout_Rate<-as.numeric(drop_spec$Annual_Dropout_Rate)
drop_spec<-select(drop_spec,-Main)
drop_spec<-rbind(drop_spec,NA,NA,NA,NA,NA,NA)
drop_spec<-drop_spec[c(1,10,2,11,3,4,5,12,7,13,14,6,8,15,9),]
} else if (i==7){
a<-paste0("https://tea.texas.gov/sites/default/files/DropComp_",school_year[i],".pdf")
dropout<-pdf_text(a)
p1<-strsplit(dropout, "\n")
table1<-data.frame(p1[[154]][c(5,7:9)])
table2<-data.frame(p1[[155]][5:9])
rnums1<-nrow(table1)
rnums2<-nrow(table2)
table1$Main<-as.character(table1[1:rnums1,1])
table2$Main<-as.character(table2[1:rnums2,1])
table1$Main<-trimws(table1$Main, which="left")
table2$Main<-trimws(table2$Main, which="left")
table1$Main<-stripWhitespace(table1$Main)
table2$Main<-stripWhitespace(table2$Main)
table1$Main<-gsub("(?:Bilingual or English as a second language)","ESL", table1$Main)
table1$Main<-gsub("(?:Gifted and talented)","GT", table1$Main)
table1$Main<-gsub("(?:Special education)","Spec-Ed", table1$Main)
table1$Main<-gsub("(?:Title I)","Title-I", table1$Main)
table2$Main<-gsub("(?:At risk)","At-Risk", table2$Main)
table2$Main<-gsub("(?:Limited English proficient)","ELL", table2$Main)
table1$Main<-gsub("(?:,)","", table1$Main)
table2$Main<-gsub("(?:,)","", table2$Main)
split_var1<-as.data.frame(ldply(strsplit(table1$Main, split = " ")))
split_var2<-as.data.frame(ldply(strsplit(table2$Main, split = " ")))
split_var<-rbind(split_var1,split_var2)
drop_spec<-data.frame(Groups=character(),Students=numeric(), Students_Percentage=numeric(), Dropouts=numeric(), Dropouts_Percentage=numeric(), Annual_Dropout_Rate=numeric(), School_Year=character())
drop_spec<-drop_spec[1:rnums,]
drop_spec$Main<-table$Main
drop_spec$Groups<-split_var[,1]
drop_spec$Students<-split_var[,2]
drop_spec$Students_Percentage<-split_var[,3]
drop_spec$Dropouts<-split_var[,4]
drop_spec$Dropouts_Percentage<-split_var[,5]
drop_spec$Annual_Dropout_Rate<-split_var[,6]
drop_spec$School_Year<-school_year[i]
drop_spec$Students<-as.numeric(drop_spec$Students)
drop_spec$Students_Percentage<-as.numeric(drop_spec$Students_Percentage)
drop_spec$Dropouts<-as.numeric(drop_spec$Dropouts)
drop_spec$Dropouts_Percentage<-as.numeric(drop_spec$Dropouts_Percentage)
drop_spec$Annual_Dropout_Rate<-as.numeric(drop_spec$Annual_Dropout_Rate)
drop_spec<-select(drop_spec,-Main)
drop_spec<-rbind(drop_spec,NA,NA,NA,NA,NA,NA)
drop_spec<-drop_spec[c(1,10,2,11,3,4,5,12,7,13,14,6,8,15,9),]
} else if (i==8){
a<-paste0("https://tea.texas.gov/sites/default/files/DropComp_",school_year[i],".pdf")
dropout<-pdf_text(a)
p1<-strsplit(dropout, "\n")
table<-data.frame(p1[[60]][c(6,8:10,22:26)])
rnums<-nrow(table)
table$Main<-as.character(table[1:rnums,1])
table$Main<-trimws(table$Main, which="left")
table$Main<-stripWhitespace(table$Main)
table$Main<-gsub("(?:Bilingual or ESLa)","ESL", table$Main)
table$Main<-gsub("(?:Bilingual or English as a second language)","ESL", table$Main)
table$Main<-gsub("(?:Second Language)","ESL", table$Main)
table$Main<-gsub("(?:CTEb)","Career-Technical", table$Main)
table$Main<-gsub("(?:Gifted and talented)","GT", table$Main)
table$Main<-gsub("(?:Gifted/Talented)","GT", table$Main)
table$Main<-gsub("(?:Section 504)","504", table$Main)
table$Main<-gsub("(?:Special education)","Spec-Ed", table$Main)
table$Main<-gsub("(?:Special Education)","Spec-Ed", table$Main)
table$Main<-gsub("(?:Title I)","Title-I", table$Main)
table$Main<-gsub("(?:At-risk)","At-Risk", table$Main)
table$Main<-gsub("(?:Limited English proficient)","ELL", table$Main)
table$Main<-gsub("(?:English learner)","ELL", table$Main)
table$Main<-gsub("(?:Foster care)","Foster-Care", table$Main)
table$Main<-gsub("(?:Overage/Not on Grade)","Overage", table$Main)
table$Main<-gsub("(?:,)","", table$Main)
split_var<-as.data.frame(ldply(strsplit(table$Main, split = " ")))
drop_spec<-data.frame(Groups=character(),Students=numeric(), Students_Percentage=numeric(), Dropouts=numeric(), Dropouts_Percentage=numeric(), Annual_Dropout_Rate=numeric(), School_Year=character())
drop_spec<-drop_spec[1:rnums,]
drop_spec$Main<-table$Main
drop_spec$Groups<-split_var[,1]
drop_spec$Students<-split_var[,2]
drop_spec$Students_Percentage<-split_var[,3]
drop_spec$Dropouts<-split_var[,4]
drop_spec$Dropouts_Percentage<-split_var[,5]
drop_spec$Annual_Dropout_Rate<-split_var[,6]
drop_spec$School_Year<-school_year[i]
drop_spec$Students<-as.numeric(drop_spec$Students)
drop_spec$Students_Percentage<-as.numeric(drop_spec$Students_Percentage)
drop_spec$Dropouts<-as.numeric(drop_spec$Dropouts)
drop_spec$Dropouts_Percentage<-as.numeric(drop_spec$Dropouts_Percentage)
drop_spec$Annual_Dropout_Rate<-as.numeric(drop_spec$Annual_Dropout_Rate)
drop_spec<-select(drop_spec,-Main)
drop_spec<-rbind(drop_spec,NA,NA,NA,NA,NA,NA)
drop_spec<-drop_spec[c(1,10,2,11,3,4,5,12,7,13,14,6,8,15,9),]
} else if (i==9){
a<-paste0("https://tea.texas.gov/sites/default/files/DropComp_",school_year[i],".pdf")
dropout<-pdf_text(a)
p1<-strsplit(dropout, "\n")
table<-data.frame(p1[[61]][c(21,23:25,37:41)])
rnums<-nrow(table)
table$Main<-as.character(table[1:rnums,1])
table$Main<-trimws(table$Main, which="left")
table$Main<-stripWhitespace(table$Main)
table$Main<-gsub("(?:Bilingual or ESLa)","ESL", table$Main)
table$Main<-gsub("(?:Bilingual or English as a second language)","ESL", table$Main)
table$Main<-gsub("(?:Second Language)","ESL", table$Main)
table$Main<-gsub("(?:CTEb)","Career-Technical", table$Main)
table$Main<-gsub("(?:Gifted and talented)","GT", table$Main)
table$Main<-gsub("(?:Gifted/Talented)","GT", table$Main)
table$Main<-gsub("(?:Section 504)","504", table$Main)
table$Main<-gsub("(?:Special education)","Spec-Ed", table$Main)
table$Main<-gsub("(?:Special Education)","Spec-Ed", table$Main)
table$Main<-gsub("(?:Title I)","Title-I", table$Main)
table$Main<-gsub("(?:At-risk)","At-Risk", table$Main)
table$Main<-gsub("(?:Limited English proficient)","ELL", table$Main)
table$Main<-gsub("(?:English learner)","ELL", table$Main)
table$Main<-gsub("(?:Foster care)","Foster-Care", table$Main)
table$Main<-gsub("(?:Overage/Not on Grade)","Overage", table$Main)
table$Main<-gsub("(?:,)","", table$Main)
split_var<-as.data.frame(ldply(strsplit(table$Main, split = " ")))
drop_spec<-data.frame(Groups=character(),Students=numeric(), Students_Percentage=numeric(), Dropouts=numeric(), Dropouts_Percentage=numeric(), Annual_Dropout_Rate=numeric(), School_Year=character())
drop_spec<-drop_spec[1:rnums,]
drop_spec$Main<-table$Main
drop_spec$Groups<-split_var[,1]
drop_spec$Students<-split_var[,2]
drop_spec$Students_Percentage<-split_var[,3]
drop_spec$Dropouts<-split_var[,4]
drop_spec$Dropouts_Percentage<-split_var[,5]
drop_spec$Annual_Dropout_Rate<-split_var[,6]
drop_spec$School_Year<-school_year[i]
drop_spec$Students<-as.numeric(drop_spec$Students)
drop_spec$Students_Percentage<-as.numeric(drop_spec$Students_Percentage)
drop_spec$Dropouts<-as.numeric(drop_spec$Dropouts)
drop_spec$Dropouts_Percentage<-as.numeric(drop_spec$Dropouts_Percentage)
drop_spec$Annual_Dropout_Rate<-as.numeric(drop_spec$Annual_Dropout_Rate)
drop_spec<-select(drop_spec,-Main)
drop_spec<-rbind(drop_spec,NA,NA,NA,NA,NA,NA)
drop_spec<-drop_spec[c(1,10,2,11,3,4,5,12,7,13,14,6,8,15,9),]
} else if (i==10){
a<-paste0("https://tea.texas.gov/sites/default/files/DropComp_",school_year[i],".pdf")
dropout<-pdf_text(a)
p1<-strsplit(dropout, "\n")
table<-data.frame(p1[[62]][c(17,19:21,35:39)])
rnums<-nrow(table)
table$Main<-as.character(table[1:rnums,1])
table$Main<-trimws(table$Main, which="left")
table$Main<-stripWhitespace(table$Main)
table$Main<-gsub("(?:Bilingual or ESLa)","ESL", table$Main)
table$Main<-gsub("(?:Bilingual or English as a second language)","ESL", table$Main)
table$Main<-gsub("(?:Second Language)","ESL", table$Main)
table$Main<-gsub("(?:CTEb)","Career-Technical", table$Main)
table$Main<-gsub("(?:Gifted and talented)","GT", table$Main)
table$Main<-gsub("(?:Gifted/Talented)","GT", table$Main)
table$Main<-gsub("(?:Section 504)","504", table$Main)
table$Main<-gsub("(?:Special education)","Spec-Ed", table$Main)
table$Main<-gsub("(?:Special Education)","Spec-Ed", table$Main)
table$Main<-gsub("(?:Title I)","Title-I", table$Main)
table$Main<-gsub("(?:At-risk)","At-Risk", table$Main)
table$Main<-gsub("(?:Limited English proficient)","ELL", table$Main)
table$Main<-gsub("(?:English learner)","ELL", table$Main)
table$Main<-gsub("(?:Foster care)","Foster-Care", table$Main)
table$Main<-gsub("(?:Overage/Not on Grade)","Overage", table$Main)
table$Main<-gsub("(?:,)","", table$Main)
split_var<-as.data.frame(ldply(strsplit(table$Main, split = " ")))
drop_spec<-data.frame(Groups=character(),Students=numeric(), Students_Percentage=numeric(), Dropouts=numeric(), Dropouts_Percentage=numeric(), Annual_Dropout_Rate=numeric(), School_Year=character())
drop_spec<-drop_spec[1:rnums,]
drop_spec$Main<-table$Main
drop_spec$Groups<-split_var[,1]
drop_spec$Students<-split_var[,2]
drop_spec$Students_Percentage<-split_var[,3]
drop_spec$Dropouts<-split_var[,4]
drop_spec$Dropouts_Percentage<-split_var[,5]
drop_spec$Annual_Dropout_Rate<-split_var[,6]
drop_spec$School_Year<-school_year[i]
drop_spec$Students<-as.numeric(drop_spec$Students)
drop_spec$Students_Percentage<-as.numeric(drop_spec$Students_Percentage)
drop_spec$Dropouts<-as.numeric(drop_spec$Dropouts)
drop_spec$Dropouts_Percentage<-as.numeric(drop_spec$Dropouts_Percentage)
drop_spec$Annual_Dropout_Rate<-as.numeric(drop_spec$Annual_Dropout_Rate)
drop_spec<-select(drop_spec,-Main)
drop_spec<-rbind(drop_spec,NA,NA,NA,NA,NA,NA)
drop_spec<-drop_spec[c(1,10,2,11,3,4,5,12,7,13,14,6,8,15,9),]
} else if (i==11){
a<-paste0("https://tea.texas.gov/sites/default/files/DropComp_",school_year[i],".pdf")
dropout<-pdf_text(a)
p1<-strsplit(dropout, "\n")
table1<-data.frame(p1[[67]][c(30,32:34)])
table2<-data.frame(p1[[68]][6:10])
rnums1<-nrow(table1)
rnums2<-nrow(table2)
table1$Main<-as.character(table1[1:rnums1,1])
table2$Main<-as.character(table2[1:rnums2,1])
table1$Main<-trimws(table1$Main, which="left")
table2$Main<-trimws(table2$Main, which="left")
table1$Main<-stripWhitespace(table1$Main)
table2$Main<-stripWhitespace(table2$Main)
table1$Main<-gsub("(?:Bilingual or English as a second language)","ESL", table1$Main)
table1$Main<-gsub("(?:Gifted and talented)","GT", table1$Main)
table1$Main<-gsub("(?:Special education)","Spec-Ed", table1$Main)
table1$Main<-gsub("(?:Title I)","Title-I", table1$Main)
table2$Main<-gsub("(?:At-risk)","At-Risk", table2$Main)
table2$Main<-gsub("(?:Limited English proficient)","ELL", table2$Main)
table1$Main<-gsub("(?:,)","", table1$Main)
table2$Main<-gsub("(?:,)","", table2$Main)
split_var1<-as.data.frame(ldply(strsplit(table1$Main, split = " ")))
split_var2<-as.data.frame(ldply(strsplit(table2$Main, split = " ")))
split_var<-rbind(split_var1,split_var2)
drop_spec<-data.frame(Groups=character(),Students=numeric(), Students_Percentage=numeric(), Dropouts=numeric(), Dropouts_Percentage=numeric(), Annual_Dropout_Rate=numeric(), School_Year=character())
drop_spec<-drop_spec[1:rnums,]
drop_spec$Main<-table$Main
drop_spec$Groups<-split_var[,1]
drop_spec$Students<-split_var[,2]
drop_spec$Students_Percentage<-split_var[,3]
drop_spec$Dropouts<-split_var[,4]
drop_spec$Dropouts_Percentage<-split_var[,5]
drop_spec$Annual_Dropout_Rate<-split_var[,6]
drop_spec$School_Year<-school_year[i]
drop_spec$Students<-as.numeric(drop_spec$Students)
drop_spec$Students_Percentage<-as.numeric(drop_spec$Students_Percentage)
drop_spec$Dropouts<-as.numeric(drop_spec$Dropouts)
drop_spec$Dropouts_Percentage<-as.numeric(drop_spec$Dropouts_Percentage)
drop_spec$Annual_Dropout_Rate<-as.numeric(drop_spec$Annual_Dropout_Rate)
drop_spec<-select(drop_spec,-Main)
drop_spec<-rbind(drop_spec,NA,NA,NA,NA,NA,NA)
drop_spec<-drop_spec[c(1,10,2,11,3,4,5,12,7,13,14,6,8,15,9),]
} else if (i==12){
a<-paste0("https://tea.texas.gov/sites/default/files/DropComp_",school_year[i],".pdf")
dropout<-pdf_text(a)
p1<-strsplit(dropout, "\n")
table<-data.frame(p1[[75]][c(15,17:19,33:37)])
rnums<-nrow(table)
table$Main<-as.character(table[1:rnums,1])
table$Main<-trimws(table$Main, which="left")
table$Main<-stripWhitespace(table$Main)
table$Main<-gsub("(?:Bilingual or ESLa)","ESL", table$Main)
table$Main<-gsub("(?:Bilingual or English as a second language)","ESL", table$Main)
table$Main<-gsub("(?:Second Language)","ESL", table$Main)
table$Main<-gsub("(?:CTEb)","Career-Technical", table$Main)
table$Main<-gsub("(?:Gifted and talented)","GT", table$Main)
table$Main<-gsub("(?:Gifted/Talented)","GT", table$Main)
table$Main<-gsub("(?:Section 504)","504", table$Main)
table$Main<-gsub("(?:Special education)","Spec-Ed", table$Main)
table$Main<-gsub("(?:Special Education)","Spec-Ed", table$Main)
table$Main<-gsub("(?:Title I)","Title-I", table$Main)
table$Main<-gsub("(?:At-risk)","At-Risk", table$Main)
table$Main<-gsub("(?:Limited English proficient)","ELL", table$Main)
table$Main<-gsub("(?:English learner)","ELL", table$Main)
table$Main<-gsub("(?:Foster care)","Foster-Care", table$Main)
table$Main<-gsub("(?:Overage/Not on Grade)","Overage", table$Main)
table$Main<-gsub("(?:,)","", table$Main)
split_var<-as.data.frame(ldply(strsplit(table$Main, split = " ")))
drop_spec<-data.frame(Groups=character(),Students=numeric(), Students_Percentage=numeric(), Dropouts=numeric(), Dropouts_Percentage=numeric(), Annual_Dropout_Rate=numeric(), School_Year=character())
drop_spec<-drop_spec[1:rnums,]
drop_spec$Main<-table$Main
drop_spec$Groups<-split_var[,1]
drop_spec$Students<-split_var[,2]
drop_spec$Students_Percentage<-split_var[,3]
drop_spec$Dropouts<-split_var[,4]
drop_spec$Dropouts_Percentage<-split_var[,5]
drop_spec$Annual_Dropout_Rate<-split_var[,6]
drop_spec$School_Year<-school_year[i]
drop_spec$Students<-as.numeric(drop_spec$Students)
drop_spec$Students_Percentage<-as.numeric(drop_spec$Students_Percentage)
drop_spec$Dropouts<-as.numeric(drop_spec$Dropouts)
drop_spec$Dropouts_Percentage<-as.numeric(drop_spec$Dropouts_Percentage)
drop_spec$Annual_Dropout_Rate<-as.numeric(drop_spec$Annual_Dropout_Rate)
drop_spec<-select(drop_spec,-Main)
drop_spec<-rbind(drop_spec,NA,NA,NA,NA,NA,NA)
drop_spec<-drop_spec[c(1,10,2,11,3,4,5,12,7,13,14,6,8,15,9),]
} else if (i==13){
a<-paste0("https://tea.texas.gov/sites/default/files/DropComp_",school_year[i],".pdf")
dropout<-pdf_text(a)
p1<-strsplit(dropout, "\n")
table<-data.frame(p1[[76]][c(6,8:10,24:28)])
rnums<-nrow(table)
table$Main<-as.character(table[1:rnums,1])
table$Main<-trimws(table$Main, which="left")
table$Main<-stripWhitespace(table$Main)
table$Main<-gsub("(?:Bilingual or ESLa)","ESL", table$Main)
table$Main<-gsub("(?:Bilingual or English as a second language)","ESL", table$Main)
table$Main<-gsub("(?:Second Language)","ESL", table$Main)
table$Main<-gsub("(?:CTEb)","Career-Technical", table$Main)
table$Main<-gsub("(?:Gifted and talented)","GT", table$Main)
table$Main<-gsub("(?:Gifted/Talented)","GT", table$Main)
table$Main<-gsub("(?:Section 504)","504", table$Main)
table$Main<-gsub("(?:Special education)","Spec-Ed", table$Main)
table$Main<-gsub("(?:Special Education)","Spec-Ed", table$Main)
table$Main<-gsub("(?:Title I)","Title-I", table$Main)
table$Main<-gsub("(?:At-risk)","At-Risk", table$Main)
table$Main<-gsub("(?:Limited English proficient)","ELL", table$Main)
table$Main<-gsub("(?:English learner)","ELL", table$Main)
table$Main<-gsub("(?:Foster care)","Foster-Care", table$Main)
table$Main<-gsub("(?:Overage/Not on Grade)","Overage", table$Main)
table$Main<-gsub("(?:,)","", table$Main)
split_var<-as.data.frame(ldply(strsplit(table$Main, split = " ")))
drop_spec<-data.frame(Groups=character(),Students=numeric(), Students_Percentage=numeric(), Dropouts=numeric(), Dropouts_Percentage=numeric(), Annual_Dropout_Rate=numeric(), School_Year=character())
drop_spec<-drop_spec[1:rnums,]
drop_spec$Main<-table$Main
drop_spec$Groups<-split_var[,1]
drop_spec$Students<-split_var[,2]
drop_spec$Students_Percentage<-split_var[,3]
drop_spec$Dropouts<-split_var[,4]
drop_spec$Dropouts_Percentage<-split_var[,5]
drop_spec$Annual_Dropout_Rate<-split_var[,6]
drop_spec$School_Year<-school_year[i]
drop_spec$Students<-as.numeric(drop_spec$Students)
drop_spec$Students_Percentage<-as.numeric(drop_spec$Students_Percentage)
drop_spec$Dropouts<-as.numeric(drop_spec$Dropouts)
drop_spec$Dropouts_Percentage<-as.numeric(drop_spec$Dropouts_Percentage)
drop_spec$Annual_Dropout_Rate<-as.numeric(drop_spec$Annual_Dropout_Rate)
drop_spec<-select(drop_spec,-Main)
drop_spec<-rbind(drop_spec,NA,NA,NA,NA,NA,NA)
drop_spec<-drop_spec[c(1,10,2,11,3,4,5,12,7,13,14,6,8,15,9),]
} else if (i==14){
a<-paste0("https://tea.texas.gov/sites/default/files/DropComp_",school_year[i],".pdf")
dropout<-pdf_text(a)
p1<-strsplit(dropout, "\n")
table<-data.frame(p1[[76]][c(6:10,25:29)])
rnums<-nrow(table)
table$Main<-as.character(table[1:rnums,1])
table$Main<-trimws(table$Main, which="left")
table$Main<-stripWhitespace(table$Main)
table$Main<-gsub("(?:Bilingual or ESLa)","ESL", table$Main)
table$Main<-gsub("(?:Bilingual or English as a second language)","ESL", table$Main)
table$Main<-gsub("(?:Second Language)","ESL", table$Main)
table$Main<-gsub("(?:CTEb)","Career-Technical", table$Main)
table$Main<-gsub("(?:Gifted and talented)","GT", table$Main)
table$Main<-gsub("(?:Gifted/Talented)","GT", table$Main)
table$Main<-gsub("(?:Section 504)","504", table$Main)
table$Main<-gsub("(?:Special education)","Spec-Ed", table$Main)
table$Main<-gsub("(?:Special Education)","Spec-Ed", table$Main)
table$Main<-gsub("(?:Title I)","Title-I", table$Main)
table$Main<-gsub("(?:At-risk)","At-Risk", table$Main)
table$Main<-gsub("(?:Limited English proficient)","ELL", table$Main)
table$Main<-gsub("(?:English learner)","ELL", table$Main)
table$Main<-gsub("(?:English language learner)","ELL", table$Main)
table$Main<-gsub("(?:Foster care)","Foster-Care", table$Main)
table$Main<-gsub("(?:Overage/Not on Grade)","Overage", table$Main)
table$Main<-gsub("(?:,)","", table$Main)
split_var<-as.data.frame(ldply(strsplit(table$Main, split = " ")))
drop_spec<-data.frame(Groups=character(),Students=numeric(), Students_Percentage=numeric(), Dropouts=numeric(), Dropouts_Percentage=numeric(), Annual_Dropout_Rate=numeric(), School_Year=character())
drop_spec<-drop_spec[1:rnums,]
drop_spec$Main<-table$Main
drop_spec$Groups<-split_var[,1]
drop_spec$Students<-split_var[,2]
drop_spec$Students_Percentage<-split_var[,3]
drop_spec$Dropouts<-split_var[,4]
drop_spec$Dropouts_Percentage<-split_var[,5]
drop_spec$Annual_Dropout_Rate<-split_var[,6]
drop_spec$School_Year<-school_year[i]
drop_spec$Students<-as.numeric(drop_spec$Students)
drop_spec$Students_Percentage<-as.numeric(drop_spec$Students_Percentage)
drop_spec$Dropouts<-as.numeric(drop_spec$Dropouts)
drop_spec$Dropouts_Percentage<-as.numeric(drop_spec$Dropouts_Percentage)
drop_spec$Annual_Dropout_Rate<-as.numeric(drop_spec$Annual_Dropout_Rate)
drop_spec<-select(drop_spec,-Main)
drop_spec<-rbind(drop_spec,NA,NA,NA,NA,NA)
drop_spec<-drop_spec[c(1,2,3,11,4,5,6,12,7,13,14,8,9,15,10),]
} else if (i==15){
a<-paste0("https://tea.texas.gov/sites/default/files/DropComp_",school_year[i],".pdf")
dropout<-pdf_text(a)
p1<-strsplit(dropout, "\n")
table<-data.frame(p1[[76]][c(15:19,34:38)])
rnums<-nrow(table)
table$Main<-as.character(table[1:rnums,1])
table$Main<-trimws(table$Main, which="left")
table$Main<-stripWhitespace(table$Main)
table$Main<-gsub("(?:Bilingual or ESLa)","ESL", table$Main)
table$Main<-gsub("(?:Bilingual or English as a second language)","ESL", table$Main)
table$Main<-gsub("(?:Second Language)","ESL", table$Main)
table$Main<-gsub("(?:CTEb)","Career-Technical", table$Main)
table$Main<-gsub("(?:Gifted and talented)","GT", table$Main)
table$Main<-gsub("(?:Gifted/Talented)","GT", table$Main)
table$Main<-gsub("(?:Section 504)","504", table$Main)
table$Main<-gsub("(?:Special education)","Spec-Ed", table$Main)
table$Main<-gsub("(?:Special Education)","Spec-Ed", table$Main)
table$Main<-gsub("(?:Title I)","Title-I", table$Main)
table$Main<-gsub("(?:At-risk)","At-Risk", table$Main)
table$Main<-gsub("(?:Limited English proficient)","ELL", table$Main)
table$Main<-gsub("(?:English learner)","ELL", table$Main)
table$Main<-gsub("(?:English language learner)","ELL", table$Main)
table$Main<-gsub("(?:Foster care)","Foster-Care", table$Main)
table$Main<-gsub("(?:Overage/Not on Grade)","Overage", table$Main)
table$Main<-gsub("(?:,)","", table$Main)
split_var<-as.data.frame(ldply(strsplit(table$Main, split = " ")))
drop_spec<-data.frame(Groups=character(),Students=numeric(), Students_Percentage=numeric(), Dropouts=numeric(), Dropouts_Percentage=numeric(), Annual_Dropout_Rate=numeric(), School_Year=character())
drop_spec<-drop_spec[1:rnums,]
drop_spec$Main<-table$Main
drop_spec$Groups<-split_var[,1]
drop_spec$Students<-split_var[,2]
drop_spec$Students_Percentage<-split_var[,3]
drop_spec$Dropouts<-split_var[,4]
drop_spec$Dropouts_Percentage<-split_var[,5]
drop_spec$Annual_Dropout_Rate<-split_var[,6]
drop_spec$School_Year<-school_year[i]
drop_spec$Students<-as.numeric(drop_spec$Students)
drop_spec$Students_Percentage<-as.numeric(drop_spec$Students_Percentage)
drop_spec$Dropouts<-as.numeric(drop_spec$Dropouts)
drop_spec$Dropouts_Percentage<-as.numeric(drop_spec$Dropouts_Percentage)
drop_spec$Annual_Dropout_Rate<-as.numeric(drop_spec$Annual_Dropout_Rate)
drop_spec<-select(drop_spec,-Main)
drop_spec<-rbind(drop_spec,NA,NA,NA,NA,NA)
drop_spec<-drop_spec[c(1,2,3,11,4,5,6,12,7,13,14,8,9,15,10),]
} else if (i==15){
a<-paste0("https://tea.texas.gov/sites/default/files/DropComp_",school_year[i],".pdf")
dropout<-pdf_text(a)
p1<-strsplit(dropout, "\n")
table<-data.frame(p1[[76]][c(15:19,34:38)])
rnums<-nrow(table)
table$Main<-as.character(table[1:rnums,1])
table$Main<-trimws(table$Main, which="left")
table$Main<-stripWhitespace(table$Main)
table$Main<-gsub("(?:Bilingual or ESLa)","ESL", table$Main)
table$Main<-gsub("(?:Bilingual or English as a second language)","ESL", table$Main)
table$Main<-gsub("(?:Second Language)","ESL", table$Main)
table$Main<-gsub("(?:CTEb)","Career-Technical", table$Main)
table$Main<-gsub("(?:Gifted and talented)","GT", table$Main)
table$Main<-gsub("(?:Gifted/Talented)","GT", table$Main)
table$Main<-gsub("(?:Section 504)","504", table$Main)
table$Main<-gsub("(?:Special education)","Spec-Ed", table$Main)
table$Main<-gsub("(?:Special Education)","Spec-Ed", table$Main)
table$Main<-gsub("(?:Title I)","Title-I", table$Main)
table$Main<-gsub("(?:At-risk)","At-Risk", table$Main)
table$Main<-gsub("(?:Limited English proficient)","ELL", table$Main)
table$Main<-gsub("(?:English learner)","ELL", table$Main)
table$Main<-gsub("(?:English language learner)","ELL", table$Main)
table$Main<-gsub("(?:Foster care)","Foster-Care", table$Main)
table$Main<-gsub("(?:Overage/Not on Grade)","Overage", table$Main)
table$Main<-gsub("(?:,)","", table$Main)
split_var<-as.data.frame(ldply(strsplit(table$Main, split = " ")))
drop_spec<-data.frame(Groups=character(),Students=numeric(), Students_Percentage=numeric(), Dropouts=numeric(), Dropouts_Percentage=numeric(), Annual_Dropout_Rate=numeric(), School_Year=character())
drop_spec<-drop_spec[1:rnums,]
drop_spec$Main<-table$Main
drop_spec$Groups<-split_var[,1]
drop_spec$Students<-split_var[,2]
drop_spec$Students_Percentage<-split_var[,3]
drop_spec$Dropouts<-split_var[,4]
drop_spec$Dropouts_Percentage<-split_var[,5]
drop_spec$Annual_Dropout_Rate<-split_var[,6]
drop_spec$School_Year<-school_year[i]
drop_spec$Students<-as.numeric(drop_spec$Students)
drop_spec$Students_Percentage<-as.numeric(drop_spec$Students_Percentage)
drop_spec$Dropouts<-as.numeric(drop_spec$Dropouts)
drop_spec$Dropouts_Percentage<-as.numeric(drop_spec$Dropouts_Percentage)
drop_spec$Annual_Dropout_Rate<-as.numeric(drop_spec$Annual_Dropout_Rate)
drop_spec<-select(drop_spec,-Main)
drop_spec<-rbind(drop_spec,NA,NA,NA,NA,NA)
drop_spec<-drop_spec[c(1,2,3,11,4,5,6,12,7,13,14,8,9,15,10),]
} else if (i==16){
a<-paste0("https://tea.texas.gov/sites/default/files/DropComp_",school_year[i],".pdf")
dropout<-pdf_text(a)
p1<-strsplit(dropout, "\n")
table<-data.frame(p1[[79]][c(20:24,40:43)])
rnums<-nrow(table)
table$Main<-as.character(table[1:rnums,1])
table$Main<-trimws(table$Main, which="left")
table$Main<-stripWhitespace(table$Main)
table$Main<-gsub("(?:Bilingual or ESLa)","ESL", table$Main)
table$Main<-gsub("(?:Bilingual or English as a second language)","ESL", table$Main)
table$Main<-gsub("(?:Second Language)","ESL", table$Main)
table$Main<-gsub("(?:CTEb)","Career-Technical", table$Main)
table$Main<-gsub("(?:Gifted and talented)","GT", table$Main)
table$Main<-gsub("(?:Gifted/Talented)","GT", table$Main)
table$Main<-gsub("(?:Section 504)","504", table$Main)
table$Main<-gsub("(?:Special education)","Spec-Ed", table$Main)
table$Main<-gsub("(?:Special Education)","Spec-Ed", table$Main)
table$Main<-gsub("(?:Title I)","Title-I", table$Main)
table$Main<-gsub("(?:At-risk)","At-Risk", table$Main)
table$Main<-gsub("(?:Limited English proficient)","ELL", table$Main)
table$Main<-gsub("(?:English learner)","ELL", table$Main)
table$Main<-gsub("(?:English language learner)","ELL", table$Main)
table$Main<-gsub("(?:Foster care)","Foster-Care", table$Main)
table$Main<-gsub("(?:Overage/Not on Grade)","Overage", table$Main)
table$Main<-gsub("(?:,)","", table$Main)
split_var<-as.data.frame(ldply(strsplit(table$Main, split = " ")))
drop_spec<-data.frame(Groups=character(),Students=numeric(), Students_Percentage=numeric(), Dropouts=numeric(), Dropouts_Percentage=numeric(), Annual_Dropout_Rate=numeric(), School_Year=character())
drop_spec<-drop_spec[1:rnums,]
drop_spec$Main<-table$Main
drop_spec$Groups<-split_var[,1]
drop_spec$Students<-split_var[,2]
drop_spec$Students_Percentage<-split_var[,3]
drop_spec$Dropouts<-split_var[,4]
drop_spec$Dropouts_Percentage<-split_var[,5]
drop_spec$Annual_Dropout_Rate<-split_var[,6]
drop_spec$School_Year<-school_year[i]
drop_spec$Students<-as.numeric(drop_spec$Students)
drop_spec$Students_Percentage<-as.numeric(drop_spec$Students_Percentage)
drop_spec$Dropouts<-as.numeric(drop_spec$Dropouts)
drop_spec$Dropouts_Percentage<-as.numeric(drop_spec$Dropouts_Percentage)
drop_spec$Annual_Dropout_Rate<-as.numeric(drop_spec$Annual_Dropout_Rate)
drop_spec<-select(drop_spec,-Main)
drop_spec<-rbind(drop_spec,NA,NA,NA,NA,NA)
drop_spec<-drop_spec[c(1,2,3,11,4,5,6,12,7,13,14,8,9,15,10),]
} else if (i==17){
b<-paste0("https://tea.texas.gov/sites/default/files/dropcomp_",school_year[i],".pdf")
dropout<-pdf_text(b)
p2<-strsplit(dropout, "\n")
table<-data.frame(p2[[79]][c(19:23,38:42)])
rnums<-nrow(table)
table$Main<-as.character(table[1:rnums,1])
table$Main<-trimws(table$Main, which="left")
table$Main<-stripWhitespace(table$Main)
table$Main<-gsub("(?:Bilingual or ESLa)","ESL", table$Main)
table$Main<-gsub("(?:Bilingual or English as a second language)","ESL", table$Main)
table$Main<-gsub("(?:Second Language)","ESL", table$Main)
table$Main<-gsub("(?:CTEb)","Career-Technical", table$Main)
table$Main<-gsub("(?:Gifted and talented)","GT", table$Main)
table$Main<-gsub("(?:Gifted/Talented)","GT", table$Main)
table$Main<-gsub("(?:Section 504)","504", table$Main)
table$Main<-gsub("(?:Special education)","Spec-Ed", table$Main)
table$Main<-gsub("(?:Special Education)","Spec-Ed", table$Main)
table$Main<-gsub("(?:Title I)","Title-I", table$Main)
table$Main<-gsub("(?:At-risk)","At-Risk", table$Main)
table$Main<-gsub("(?:Limited English proficient)","ELL", table$Main)
table$Main<-gsub("(?:English learner)","ELL", table$Main)
table$Main<-gsub("(?:English language learner)","ELL", table$Main)
table$Main<-gsub("(?:Foster care)","Foster-Care", table$Main)
table$Main<-gsub("(?:Overage/Not on Grade)","Overage", table$Main)
table$Main<-gsub("(?:,)","", table$Main)
split_var<-as.data.frame(ldply(strsplit(table$Main, split = " ")))
drop_spec<-data.frame(Groups=character(),Students=numeric(), Students_Percentage=numeric(), Dropouts=numeric(), Dropouts_Percentage=numeric(), Annual_Dropout_Rate=numeric(), School_Year=character())
drop_spec<-drop_spec[1:rnums,]
drop_spec$Main<-table$Main
drop_spec$Groups<-split_var[,1]
drop_spec$Students<-split_var[,2]
drop_spec$Students_Percentage<-split_var[,3]
drop_spec$Dropouts<-split_var[,4]
drop_spec$Dropouts_Percentage<-split_var[,5]
drop_spec$Annual_Dropout_Rate<-split_var[,6]
drop_spec$School_Year<-school_year[i]
drop_spec$Students<-as.numeric(drop_spec$Students)
drop_spec$Students_Percentage<-as.numeric(drop_spec$Students_Percentage)
drop_spec$Dropouts<-as.numeric(drop_spec$Dropouts)
drop_spec$Dropouts_Percentage<-as.numeric(drop_spec$Dropouts_Percentage)
drop_spec$Annual_Dropout_Rate<-as.numeric(drop_spec$Annual_Dropout_Rate)
drop_spec<-select(drop_spec,-Main)
drop_spec<-rbind(drop_spec,NA,NA,NA,NA,NA)
drop_spec<-drop_spec[c(1,2,3,11,4,5,6,12,7,13,14,8,9,15,10),]
} else if (i==18){
b<-paste0("https://tea.texas.gov/sites/default/files/dropcomp_",school_year[i],".pdf")
dropout<-pdf_text(b)
p2<-strsplit(dropout, "\n")
table<-data.frame(p2[[79]][c(20:24,38:43)])
rnums<-nrow(table)
table$Main<-as.character(table[1:rnums,1])
table$Main<-trimws(table$Main, which="left")
table$Main<-stripWhitespace(table$Main)
table$Main<-gsub("(?:Bilingual or ESLa)","ESL", table$Main)
table$Main<-gsub("(?:Bilingual or English as a second language)","ESL", table$Main)
table$Main<-gsub("(?:Second Language)","ESL", table$Main)
table$Main<-gsub("(?:CTEb)","Career-Technical", table$Main)
table$Main<-gsub("(?:Gifted and talented)","GT", table$Main)
table$Main<-gsub("(?:Gifted/Talented)","GT", table$Main)
table$Main<-gsub("(?:Section 504)","504", table$Main)
table$Main<-gsub("(?:Special education)","Spec-Ed", table$Main)
table$Main<-gsub("(?:Special Education)","Spec-Ed", table$Main)
table$Main<-gsub("(?:Title I)","Title-I", table$Main)
table$Main<-gsub("(?:At-risk)","At-Risk", table$Main)
table$Main<-gsub("(?:Limited English proficient)","ELL", table$Main)
table$Main<-gsub("(?:English learner)","ELL", table$Main)
table$Main<-gsub("(?:English language learner)","ELL", table$Main)
table$Main<-gsub("(?:Foster care)","Foster-Care", table$Main)
table$Main<-gsub("(?:Overage/Not on Grade)","Overage", table$Main)
table$Main<-gsub("(?:,)","", table$Main)
split_var<-as.data.frame(ldply(strsplit(table$Main, split = " ")))
drop_spec<-data.frame(Groups=character(),Students=numeric(), Students_Percentage=numeric(), Dropouts=numeric(), Dropouts_Percentage=numeric(), Annual_Dropout_Rate=numeric(), School_Year=character())
drop_spec<-drop_spec[1:rnums,]
drop_spec$Main<-table$Main
drop_spec$Groups<-split_var[,1]
drop_spec$Students<-split_var[,2]
drop_spec$Students_Percentage<-split_var[,3]
drop_spec$Dropouts<-split_var[,4]
drop_spec$Dropouts_Percentage<-split_var[,5]
drop_spec$Annual_Dropout_Rate<-split_var[,6]
drop_spec$School_Year<-school_year[i]
drop_spec$Students<-as.numeric(drop_spec$Students)
drop_spec$Students_Percentage<-as.numeric(drop_spec$Students_Percentage)
drop_spec$Dropouts<-as.numeric(drop_spec$Dropouts)
drop_spec$Dropouts_Percentage<-as.numeric(drop_spec$Dropouts_Percentage)
drop_spec$Annual_Dropout_Rate<-as.numeric(drop_spec$Annual_Dropout_Rate)
drop_spec<-select(drop_spec,-Main)
drop_spec<-rbind(drop_spec,NA,NA,NA,NA)
drop_spec<-drop_spec[c(1,2,3,12,4,5,6,13,7,14,8,9,10,15,11),]
} else if (i==19){
b<-paste0("https://tea.texas.gov/sites/default/files/dropcomp_",school_year[i],".pdf")
dropout<-pdf_text(b)
p2<-strsplit(dropout, "\n")
table<-data.frame(p2[[84]][c(18:22,36:43)])
rnums<-nrow(table)
table$Main<-as.character(table[1:rnums,1])
table$Main<-trimws(table$Main, which="left")
table$Main<-stripWhitespace(table$Main)
table$Main<-gsub("(?:Bilingual or ESLa)","ESL", table$Main)
table$Main<-gsub("(?:Bilingual or English as a second language)","ESL", table$Main)
table$Main<-gsub("(?:Second Language)","ESL", table$Main)
table$Main<-gsub("(?:CTEb)","Career-Technical", table$Main)
table$Main<-gsub("(?:Gifted and talented)","GT", table$Main)
table$Main<-gsub("(?:Gifted/Talented)","GT", table$Main)
table$Main<-gsub("(?:Section 504)","504", table$Main)
table$Main<-gsub("(?:Special education)","Spec-Ed", table$Main)
table$Main<-gsub("(?:Special Education)","Spec-Ed", table$Main)
table$Main<-gsub("(?:Title I)","Title-I", table$Main)
table$Main<-gsub("(?:At-risk)","At-Risk", table$Main)
table$Main<-gsub("(?:Limited English proficient)","ELL", table$Main)
table$Main<-gsub("(?:English learner)","ELL", table$Main)
table$Main<-gsub("(?:English language learner)","ELL", table$Main)
table$Main<-gsub("(?:Foster care)","Foster-Care", table$Main)
table$Main<-gsub("(?:Overage/Not on Grade)","Overage", table$Main)
table$Main<-gsub("(?:,)","", table$Main)
split_var<-as.data.frame(ldply(strsplit(table$Main, split = " ")))
drop_spec<-data.frame(Groups=character(),Students=numeric(), Students_Percentage=numeric(), Dropouts=numeric(), Dropouts_Percentage=numeric(), Annual_Dropout_Rate=numeric(), School_Year=character())
drop_spec<-drop_spec[1:rnums,]
drop_spec$Main<-table$Main
drop_spec$Groups<-split_var[,1]
drop_spec$Students<-split_var[,2]
drop_spec$Students_Percentage<-split_var[,3]
drop_spec$Dropouts<-split_var[,4]
drop_spec$Dropouts_Percentage<-split_var[,5]
drop_spec$Annual_Dropout_Rate<-split_var[,6]
drop_spec$School_Year<-school_year[i]
drop_spec$Students<-as.numeric(drop_spec$Students)
drop_spec$Students_Percentage<-as.numeric(drop_spec$Students_Percentage)
drop_spec$Dropouts<-as.numeric(drop_spec$Dropouts)
drop_spec$Dropouts_Percentage<-as.numeric(drop_spec$Dropouts_Percentage)
drop_spec$Annual_Dropout_Rate<-as.numeric(drop_spec$Annual_Dropout_Rate)
drop_spec<-select(drop_spec,-Main)
drop_spec<-rbind(drop_spec,NA,NA)
drop_spec<-drop_spec[c(1,2,3,14,4,5,6,15,7,8,9,10,11,12,13),]
} else if (i==20){
b<-paste0("https://tea.texas.gov/sites/default/files/dropcomp_",school_year[i],".pdf")
dropout<-pdf_text(b)
p2<-strsplit(dropout, "\n")
table<-data.frame(p2[[84]][c(17:21,35:43)])
rnums<-nrow(table)
table$Main<-as.character(table[1:rnums,1])
table$Main<-trimws(table$Main, which="left")
table$Main<-stripWhitespace(table$Main)
table$Main<-gsub("(?:Bilingual or ESLa)","ESL", table$Main)
table$Main<-gsub("(?:Bilingual or English as a second language)","ESL", table$Main)
table$Main<-gsub("(?:Second Language)","ESL", table$Main)
table$Main<-gsub("(?:CTEb)","Career-Technical", table$Main)
table$Main<-gsub("(?:Gifted and talented)","GT", table$Main)
table$Main<-gsub("(?:Gifted/Talented)","GT", table$Main)
table$Main<-gsub("(?:Section 504)","504", table$Main)
table$Main<-gsub("(?:Special education)","Spec-Ed", table$Main)
table$Main<-gsub("(?:Special Education)","Spec-Ed", table$Main)
table$Main<-gsub("(?:Title I)","Title-I", table$Main)
table$Main<-gsub("(?:At-risk)","At-Risk", table$Main)
table$Main<-gsub("(?:Limited English proficient)","ELL", table$Main)
table$Main<-gsub("(?:English learner)","ELL", table$Main)
table$Main<-gsub("(?:English language learner)","ELL", table$Main)
table$Main<-gsub("(?:Foster care)","Foster-Care", table$Main)
table$Main<-gsub("(?:Overage/Not on Grade)","Overage", table$Main)
table$Main<-gsub("(?:,)","", table$Main)
split_var<-as.data.frame(ldply(strsplit(table$Main, split = " ")))
drop_spec<-data.frame(Groups=character(),Students=numeric(), Students_Percentage=numeric(), Dropouts=numeric(), Dropouts_Percentage=numeric(), Annual_Dropout_Rate=numeric(), School_Year=character())
drop_spec<-drop_spec[1:rnums,]
drop_spec$Main<-table$Main
drop_spec$Groups<-split_var[,1]
drop_spec$Students<-split_var[,2]
drop_spec$Students_Percentage<-split_var[,3]
drop_spec$Dropouts<-split_var[,4]
drop_spec$Dropouts_Percentage<-split_var[,5]
drop_spec$Annual_Dropout_Rate<-split_var[,6]
drop_spec$School_Year<-school_year[i]
drop_spec$Students<-as.numeric(drop_spec$Students)
drop_spec$Students_Percentage<-as.numeric(drop_spec$Students_Percentage)
drop_spec$Dropouts<-as.numeric(drop_spec$Dropouts)
drop_spec$Dropouts_Percentage<-as.numeric(drop_spec$Dropouts_Percentage)
drop_spec$Annual_Dropout_Rate<-as.numeric(drop_spec$Annual_Dropout_Rate)
drop_spec<-select(drop_spec,-Main)
drop_spec<-rbind(drop_spec,NA)
drop_spec<-drop_spec[c(1,2,3,15,4,5,6,7,8,9,10,11,12,13,14),]
} else if (i==21){
b<-paste0("https://tea.texas.gov/sites/default/files/dropcomp_",school_year[i],".pdf")
dropout<-pdf_text(b)
p2<-strsplit(dropout, "\n")
table<-data.frame(p2[[84]][c(17:22,36:44)])
rnums<-nrow(table)
table$Main<-as.character(table[1:rnums,1])
table$Main<-trimws(table$Main, which="left")
table$Main<-stripWhitespace(table$Main)
table$Main<-gsub("(?:Bilingual or ESLa)","ESL", table$Main)
table$Main<-gsub("(?:Bilingual or English as a second language)","ESL", table$Main)
table$Main<-gsub("(?:Second Language)","ESL", table$Main)
table$Main<-gsub("(?:CTEb)","Career-Technical", table$Main)
table$Main<-gsub("(?:Gifted and talented)","GT", table$Main)
table$Main<-gsub("(?:Gifted/Talented)","GT", table$Main)
table$Main<-gsub("(?:Section 504)","504", table$Main)
table$Main<-gsub("(?:Special education)","Spec-Ed", table$Main)
table$Main<-gsub("(?:Special Education)","Spec-Ed", table$Main)
table$Main<-gsub("(?:Title I)","Title-I", table$Main)
table$Main<-gsub("(?:At-risk)","At-Risk", table$Main)
table$Main<-gsub("(?:Limited English proficient)","ELL", table$Main)
table$Main<-gsub("(?:English learner)","ELL", table$Main)
table$Main<-gsub("(?:English language learner)","ELL", table$Main)
table$Main<-gsub("(?:Foster care)","Foster-Care", table$Main)
table$Main<-gsub("(?:Overage/Not on Grade)","Overage", table$Main)
table$Main<-gsub("(?:,)","", table$Main)
split_var<-as.data.frame(ldply(strsplit(table$Main, split = " ")))
drop_spec<-data.frame(Groups=character(),Students=numeric(), Students_Percentage=numeric(), Dropouts=numeric(), Dropouts_Percentage=numeric(), Annual_Dropout_Rate=numeric(), School_Year=character())
drop_spec<-drop_spec[1:rnums,]
drop_spec$Main<-table$Main
drop_spec$Groups<-split_var[,1]
drop_spec$Students<-split_var[,2]
drop_spec$Students_Percentage<-split_var[,3]
drop_spec$Dropouts<-split_var[,4]
drop_spec$Dropouts_Percentage<-split_var[,5]
drop_spec$Annual_Dropout_Rate<-split_var[,6]
drop_spec$School_Year<-school_year[i]
drop_spec$Students<-as.numeric(drop_spec$Students)
drop_spec$Students_Percentage<-as.numeric(drop_spec$Students_Percentage)
drop_spec$Dropouts<-as.numeric(drop_spec$Dropouts)
drop_spec$Dropouts_Percentage<-as.numeric(drop_spec$Dropouts_Percentage)
drop_spec$Annual_Dropout_Rate<-as.numeric(drop_spec$Annual_Dropout_Rate)
drop_spec<-select(drop_spec,-Main)
}
total_drop_spec<-rbind(total_drop_spec,drop_spec)
print(paste0("Finished Year: ", school_year[i]))
}
################################ Data Analysis ################################
x1<-total_drop_spec[1:15,]
x2<-total_drop_spec[16:30,]
x3<-total_drop_spec[31:45,]
x4<-total_drop_spec[46:60,]
x5<-total_drop_spec[61:75,]
x6<-total_drop_spec[76:90,]
x7<-total_drop_spec[91:105,]
x8<-total_drop_spec[106:120,]
x9<-total_drop_spec[121:135,]
x10<-total_drop_spec[136:150,]
x11<-total_drop_spec[151:165,]
x12<-total_drop_spec[166:180,]
x13<-total_drop_spec[181:195,]
x14<-total_drop_spec[196:210,]
x15<-total_drop_spec[211:225,]
x16<-total_drop_spec[226:240,]
x17<-total_drop_spec[241:255,]
x18<-total_drop_spec[256:270,]
x19<-total_drop_spec[271:285,]
x20<-total_drop_spec[286:300,]
x21<-total_drop_spec[301:315,]
total1<-cbind(x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16,x17,x18,x19,x20,x21)
total1<-total1[,c(-8,-15,-22,-29,-36,-34,-41,-48,-55,-62,-69,-76,-83,-90,-97,-104,-111,-118,-125,-132,-139)]
label<-c("ESL","CTE","GT","504","Sped","Title I","At Risk","Dyslexia","ELL","Foster","Homele","Immig","Mig","Military","Overg")
#All Groups compared by Year
barplot(total1$Dropouts_Percentage.20~label)
total3<-arrange(total_drop_spec,Groups)
#1998-2019 by Group
tot_ESL<-filter(total_drop_spec,Groups=="ESL")
tot_CTE<-filter(total_drop_spec,Groups=="Career-Technical")
tot_GT<-filter(total_drop_spec,Groups=="GT")
tot_504<-filter(total_drop_spec,Groups=="504")
tot_Spec<-filter(total_drop_spec,Groups=="Spec-Ed")
tot_TitleI<-filter(total_drop_spec,Groups=="Title-I")
tot_AtRisk<-filter(total_drop_spec,Groups=="At-Risk")
tot_Dys<-filter(total_drop_spec,Groups=="Dyslexia")
tot_ELL<-filter(total_drop_spec,Groups=="ELL")
tot_Fost<-filter(total_drop_spec,Groups=="Foster-Care")
tot_Homeless<-filter(total_drop_spec,Groups=="Homeless")
tot_Immig<-filter(total_drop_spec,Groups=="Immigrant")
tot_Mig<-filter(total_drop_spec,Groups=="Migrant")
tot_Military<-filter(total_drop_spec,Groups=="Military-connected")
tot_Overage<-filter(total_drop_spec,Groups=="Overage")
barplot(tot_ESL$Dropouts_Percentage~school_year)
barplot(tot_CTE$Dropouts_Percentage~school_year)
barplot(tot_GT$Dropouts_Percentage~school_year)
barplot(tot_504$Dropouts_Percentage~school_year)
barplot(tot_Spec$Dropouts_Percentage~school_year)
barplot(tot_TitleI$Dropouts_Percentage~school_year)
barplot(tot_AtRisk$Dropouts_Percentage~school_year)
barplot(tot_Dys$Dropouts_Percentage~school_year)
barplot(tot_ELL$Dropouts_Percentage~school_year)
barplot(tot_Fost$Dropouts_Percentage~school_year)
barplot(tot_Homeless$Dropouts_Percentage~school_year)
barplot(tot_Immig$Dropouts_Percentage~school_year)
barplot(tot_Mig$Dropouts_Percentage~school_year)
barplot(tot_Military$Dropouts_Percentage~school_year)
barplot(tot_Overage$Dropouts_Percentage~school_year)
total3<-cbind(tot_ESL,tot_CTE,tot_GT,tot_504,tot_Spec,tot_TitleI,tot_AtRisk,tot_Dys,tot_ELL,tot_Fost,tot_Homeless,tot_Immig,tot_Mig,tot_Military,tot_Overage,check.names=T)
total2<-filter(total_drop_spec,Groups!="Overage")
ggplot(total_drop_spec, aes(x=School_Year, y =Dropouts_Percentage, label=Groups,col=Groups)) + geom_label()
ggplot(total2, aes(x=School_Year, y =Dropouts_Percentage,col=Groups)) + geom_jitter()
ggplot(total2, aes(x=School_Year, y =Dropouts_Percentage,col=Groups,label=Groups)) + geom_label()
|
9e62fa555b9ac5c6277e638e12d85dd8bad0a61a
|
40962c524801fb9738e3b450dbb8129bb54924e1
|
/DAY - 5/Class/LineChartColourful.R
|
da0eb2436fa8ea8450359f2aa1bd10f3c028ff39
|
[] |
no_license
|
klmsathish/R_Programming
|
628febe334d5d388c3dc51560d53f223585a0843
|
93450028134d4a9834740922ff55737276f62961
|
refs/heads/master
| 2023-01-14T12:08:59.068741
| 2020-11-15T13:23:31
| 2020-11-15T13:23:31
| 309,288,498
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 286
|
r
|
LineChartColourful.R
|
#Line plot
marks <- c(7,12,28,3,41)
age <- c(14,7,6,19,3)
#Line chart only accepts numbers(no string)
plot(marks,type = "o",col = "red", xlab = "marks", ylab = "Age",
main = "Marks Vs Age")
#Mutiple lines in a single chart used for comparison
lines(age, type = "o", col = "blue")
|
598cfe56ca964a7de5cc71cd7ecf017c4b7f1dd1
|
abea0b5d000d7c01d390eeb615427bc0322aa30f
|
/src/modify_asos/R_asos_pred.R
|
fc954d48612c38621c769051b30885b6075037a0
|
[] |
no_license
|
janmandel/firewx-evaluation
|
5e176d8762f34b4e88a9446f1d898b3698abc5e5
|
51ca3c4a1c63d8c6ba00e910a87f4c87c2c0ac53
|
refs/heads/master
| 2020-05-05T01:10:49.662013
| 2017-08-24T17:40:06
| 2017-08-24T17:40:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,517
|
r
|
R_asos_pred.R
|
############# ASOS FORECAST DATA - EXTRACT LANDFIRE / FIX FORMATTING ############
### Set needed packages
library(geosphere)
library(raster)
library(rgdal)
library(sp)
library(data.table)
library(plyr)
### Set Working Directory
setwd("/home/wpage/Documents/ASOS")
### Read-in Observed lat/long and Landfire data
asos.obs = read.csv("/home/wpage/Documents/Output/Misc/landfire_asos.csv")
LandFire = asos.obs
### Read-in and fix FORECAST weather data / convert rows to columns / fix lat long / fix times
## Get forecast data for each month
files = list.files("/home/wpage/Documents/Output/asos")
for (i in 1:length(files)) {
temp = data.table(read.csv(paste("/home/wpage/Documents/Output/asos/",files[i],sep=""),header=TRUE))
assign(files[i],temp) }
## Start for loop to work with each file (month) one at a time
for (j in 1:length(files)) {
## Fix misc variables
temp = get(files[j])
drop = c("30-36 hour acc fcst","6-12 hour acc fcst","2-8 hour acc fcst","3-9 hour acc fcst",
"4-10 hour acc fcst","5-11 hour acc fcst","30 min fcst","90-96 hour acc fcst","150-156 hour acc fcst",
"210-216 hour acc fcst","270-276 hour acc fcst","330-336 hour acc fcst")
temp = subset(temp, !Forecast %in% drop)
temp$Value = as.numeric(as.character(temp$Value))
temp$Long = as.numeric(as.character(temp$Long))
temp$Lat = as.numeric(as.character(temp$Lat))
Fct_Old = temp
Fct_Old$X = NULL
## Match forecast lat/longs to station lat/long and add station id to Fct data
ObsLoc1 = data.frame(unique(asos.obs[,c("station_id","lon","lat")]))
ObsLoc = as.data.frame(cbind(ObsLoc1$lon,ObsLoc1$lat)) #get observed lat/longs
FixLong = function(long) { #Fix forecast long
out = -(360-long)
return(out) }
Fct_Old$Long = unlist(lapply(Fct_Old$Long,FixLong)) #Fix long data
FctLoc = unique(data.frame(Fct_Old$Long,Fct_Old$Lat)) #Get fixed forecast lat/long data
FctLoc = FctLoc[complete.cases(FctLoc),] # remove NAs
Match = data.frame(Obs.lon=NA,Obs.lat=NA,Fct.lon=NA,Fct.lat=NA) #Find the match to each lat/long
for (i in 1:length(ObsLoc[,2])) { #Create data frame that matches lat/longs
distance = distGeo(ObsLoc[i,],FctLoc,a=6378137, f=1/298.257223563)
MinDist = grep(min(distance),distance)
Match[i,1] = ObsLoc[i,1]
Match[i,2] = ObsLoc[i,2]
Match[i,3] = FctLoc[MinDist,1]
Match[i,4] = FctLoc[MinDist,2] }
Fct.n = data.frame() #Change data to appropriate lat/long
for (i in 1:length(Match[,3])) {
try = subset(Fct_Old,Long == Match[i,3] & Lat == Match[i,4])
try$Long = Match[i,1]
try$Lat = Match[i,2]
try$station_id = ObsLoc1[i,1]
Fct.n = rbind(try,Fct.n)}
Fct_Old = Fct.n
## Work on precip
extract = data.frame(subset(subset(Fct_Old, Variable == "APCP"),Forecast == "1-7 hour acc fcst" & Value > 0))
for (i in 1:length(extract$Date)) {
extract$Date[i] = paste(substr(extract$Date[i],1,4),"-",substr(extract$Date[i],5,6),
"-",substr(extract$Date[i],7,8)," ",substr(extract$Date[i],9,10),sep="") }
extract$Date = strptime(extract$Date, "%Y-%m-%d %H",tz="UTC")
precip = data.frame(datetime=as.POSIXlt("2014-01-02 05", format="%Y-%m-%d %H",tz="UTC"),lon=NA,lat=NA,precip=NA,stringsAsFactors=FALSE)
precip.1 = data.frame(datetime=as.POSIXlt("2014-01-02 05", format="%Y-%m-%d %H",tz="UTC"),lon=NA,lat=NA,precip=NA,stringsAsFactors=FALSE)
#Break into smaller pieces
extract1 = extract[1:(length(extract$Date)/2),]
extract2 = extract[length(extract$Date)/2:length(extract$Date),]
for (i in 1:length(extract1$Date)) {
pre = data.frame(datetime=as.POSIXlt("2014-01-02 05", format="%Y-%m-%d %H",tz="UTC"),lon=NA,lat=NA,precip=NA,stringsAsFactors=FALSE)
pre[1,1] = extract1$Date[i] +3600
pre[2,1] = extract1$Date[i] +7200
pre[3,1] = extract1$Date[i] +10800
pre[4,1] = extract1$Date[i] +14400
pre[5,1] = extract1$Date[i] +18000
pre[6,1] = extract1$Date[i] +21600
pre$lon = extract1$Long[i]
pre$lat = extract1$Lat[i]
pre$precip = (extract1$Value[i]) / 6
precip = rbind(pre,precip) }
for (i in 1:length(extract2$Date)) {
pre = data.frame(datetime=as.POSIXlt("2014-01-02 05", format="%Y-%m-%d %H",tz="UTC"),lon=NA,lat=NA,precip=NA,stringsAsFactors=FALSE)
pre[1,1] = extract2$Date[i] +3600
pre[2,1] = extract2$Date[i] +7200
pre[3,1] = extract2$Date[i] +10800
pre[4,1] = extract2$Date[i] +14400
pre[5,1] = extract2$Date[i] +18000
pre[6,1] = extract2$Date[i] +21600
pre$lon = extract2$Long[i]
pre$lat = extract2$Lat[i]
pre$precip = (extract2$Value[i]) / 6
precip.1 = rbind(pre,precip.1) }
precip = rbind(precip.1,precip)
## Change rows to columns according to type
Temp = subset(Fct_Old, Variable == "TMP" & Forecast == "1 hour fcst")
RH = subset(Fct_Old, Variable == "RH" & Forecast == "1 hour fcst")
Wind = subset(Fct_Old, Variable == "WIND" & Forecast == "1 hour fcst")
WDir = subset(Fct_Old, Variable == "WDIR" & Forecast == "1 hour fcst")
TCDC = subset(Fct_Old, Variable == "TCDC" & Forecast == "1 hour fcst")
t1 = merge(Temp,RH,by = c('Date','station_id'),all=TRUE)
t2 = merge(t1,Wind,by = c('Date','station_id'),all=TRUE)
t21 = data.frame(t2$Date,t2$station_id,t2$Long,t2$Lat,t2$Value.x,t2$Value.y,t2$Value)
colnames(t21) = c("Date", "station_id","Long","Lat","tempC","rh","windKn")
t3 = merge(t21,WDir,by = c('Date','station_id'),all=TRUE)
t4 = merge(t3,TCDC,by = c('Date','station_id'),all=TRUE)
Fct = data.frame(t4$Date,t4$station_id,t4$Long,t4$Lat,t4$tempC,t4$rh,t4$windKn,
t4$Value.x,t4$Value.y)
colnames(Fct) = c("datetime","station_id","lon","lat","tempK","rh","windKn","wind_direction_deg",
"cloud_cover_percent")
## Convert forecast Date to date/time / increase time by 1 hour (match obs/forecast)
fixDate = function(datetime) {
time = paste(substr(datetime,1,4),"-",substr(datetime,5,6),"-",substr(datetime,7,8)," ",
substr(datetime,9,10),sep="") }
Fct$datetime = unlist(lapply(Fct$datetime,fixDate)) #Fix datetime
Fct$datetime = strptime(Fct$datetime, "%Y-%m-%d %H",tz="UTC")
Fct$datetime = Fct$datetime +3600 # add 1 hour to forecast time to match observed time
## Convert Kelvin to celsius
KtoC = function(tempK) {
tempC = tempK - 273.15
return(tempC) }
Fct$tempK = unlist(lapply(Fct$tempK,KtoC)) #Fix temp data
colnames(Fct)[5] = "air_temp_c"
## Convert wind speed from knots to mps
Fct$windKn = Fct$windKn * 0.514444
colnames(Fct)[7] = "wind_speed_mps"
## Merge precip data & convert precip from inches to mm
Fct = merge(Fct,precip,by = c('datetime','lon','lat'),all.x=TRUE,all.y=TRUE)
Fct = Fct[-c(1,2), ]
Fct$precip[is.na(Fct$precip)] = 0
Fct$precip = Fct$precip * 25.4
colnames(Fct)[10] = "precip_mm"
## Add columns data_type, station_type
Fct$data_type = "pred"
Fct$station_type = "ndfd"
### Get LANDFIRE data attach to Forecast data
noburn.fm = c(-9999,91,92,93,98,99)
LandFire = subset(LandFire, !(FM40 %in% noburn.fm))
Fct = merge(Fct,LandFire,by=c('station_id'),all=TRUE)
Fct = Fct[,-c(3,4)]
colnames(Fct)[c(11,12)] = c("lon","lat")
### Convert 10 m wind to 20 ft; 10m wind in m/s, Canopy height needs to be in meters
Wind10to20_mps = function(m10Wind,CanopyH,FuelMod) {
z.m = ifelse(FuelMod ==101|FuelMod==102|FuelMod==103|FuelMod==104|
FuelMod==105|FuelMod==106|FuelMod==107|FuelMod==108|FuelMod==109|
FuelMod==121|FuelMod==122|FuelMod==123|FuelMod==124,0.01,
ifelse(FuelMod==141|FuelMod==142|FuelMod==143|FuelMod==144|FuelMod==145|
FuelMod==146|FuelMod==147|FuelMod==148|FuelMod==149,0.43,1))
d = 0.65*CanopyH
u.star = (m10Wind*0.4) / log(((10+CanopyH) - d)/z.m)
newWind = (u.star/0.4)*log(((6.1+CanopyH)-d)/z.m)
return(newWind) }
Fct$wind_speed20ft_mps = mapply(Wind10to20_mps,Fct$wind_speed_mps,Fct$CH_m,Fct$FM40)
### Convert 20ft wind to mid-flame wind (per Andrews 2012 and Finney 2004)
FBD = c(0.4,1,2,2,1.5,1.5,3,4,5,0.9,1.5,1.8,2.1,1.0,1.0,2.4,3,6,2,6,3,4.4,0.6,1,1.3,.5,
1,.2,.2,.3,.4,.6,.3,.4,.3,.6,1,1,1.2,2.7)
FM = c(101,102,103,104,105,106,107,108,109,121,122,123,124,141,142,143,144,145,146,147,
148,149,161,162,163,164,165,181,182,183,184,185,186,187,188,189,201,202,203,204)
FBD_ft = data.frame(cbind(FM,FBD))
Wind20ft_Mid_mps = function(Wind20ft,FuelMod,CC,CanopyH) {
FH = grep(FuelMod,FBD_ft$FM)
FH_ft = FBD_ft$FBD[FH]
un.WAF = 1.83 / log((20+0.36*FH_ft)/(0.13*FH_ft))
f = (CC/100)*(pi/12)
sh.WAF = 0.555 / (sqrt(f*3.28*CanopyH) * log(20+(1.18*CanopyH)/(0.43*CanopyH)))
WAF = ifelse(CC>5,sh.WAF,un.WAF)
mid_wind = Wind20ft * WAF
return(mid_wind) }
Fct$wind_speedMid_mps = mapply(Wind20ft_Mid_mps,Fct$wind_speed20ft_mps,Fct$FM40,Fct$CC_percent,Fct$CH_m)
### Clean up output / remove any duplicates
Fct$wind_speed_mps = NULL
Fct = Fct[c("station_id","station_type","data_type","lon","lat","datetime",
"air_temp_c","rh","wind_speed20ft_mps","wind_speedMid_mps","wind_direction_deg",
"cloud_cover_percent","precip_mm","FM40","asp_deg","elev_m","slope_deg",
"CBD_kgm3","CBH_m","CC_percent","CH_m")]
## Remove any duplicated rows
n.df = data.frame()
stn = unique(Fct$station_id)
for (i in 1:length(stn)) {
try = subset(Fct, station_id == stn[i])
t = subset(try,!duplicated(try$datetime))
n.df = rbind(t,n.df) }
Fct = n.df
write.csv(Fct,file=files[j])
#Fct$datetime = as.character(Fct$datetime)
#out = rbind.fill(Fct,asos.obs)
### End for loop for all files
assign(files[j],Fct) }
|
f19c197e31d2b6d9a9f323c8d5dd85aa3142d8e8
|
8bebde68b834700de79052db26f459dd8636fec7
|
/R/hvalir.R
|
48b98d0aecf25d68280e49eccd5a939f39ec8ac0
|
[] |
no_license
|
vonStadarhraun/mar
|
da025e84d86bba2db0a46c1f6f1917d98878535f
|
8d56708739faf9cd6eed98309c8df9f5f769416d
|
refs/heads/master
| 2022-11-30T19:05:54.908761
| 2020-08-12T12:51:32
| 2020-08-12T12:51:32
| 286,978,909
| 0
| 0
| null | 2020-08-12T10:00:47
| 2020-08-12T10:00:46
| null |
UTF-8
|
R
| false
| false
| 585
|
r
|
hvalir.R
|
#' Hvalir
#'
#' @param con Tenging við Oracle
#'
#' @name hvalir_hvalir
#'
#' @return SQL fyrirspurn
#'
#' @export
#'
hvalir_hvalir <- function(con) {
tbl_mar(con, 'hvalir.hvalir_v') %>%
dplyr::mutate(veiddur_breidd = to_number(replace(nvl(veiddur_breidd,0),',','.')),
veiddur_lengd = to_number(replace(decode(veiddur_lengd,'-',NULL,veiddur_lengd),',','.'))) %>%
dplyr::select_(.dots = colnames(tbl_mar(mar,"hvalir.hvalir_v"))) %>%
dplyr::mutate(ar = to_char(dags_veidi,'yyyy'),
er_fostur = ifelse(substr(radnumer,-1,0)=='F',1,0))
}
|
ef672cf98a55274e2a56efafc28a0ef2f6ab2a93
|
3b107075ed5cf4c005d62c6fd13d6c42bd3e96ef
|
/R/zTDGSpill.R
|
25511e00b1b3a84c2f756ce77ea689b80c68290c
|
[] |
no_license
|
ryankinzer/pitph2
|
14f9a5a6683e2598b16639e98335ae6ca8d8e50c
|
b1edbe76a3866e07ead26e3c1a2233f1cadbf8a0
|
refs/heads/master
| 2020-03-27T09:26:38.893562
| 2018-10-09T20:11:11
| 2018-10-09T20:11:11
| 146,341,990
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,473
|
r
|
zTDGSpill.R
|
#------------------------------------------------------------------------------
# The function estimates the amount of TDG generated from spill. The output
# doesn't represent the amount of TGD being reported at monitoring sites.
# Monitoring site TDG is calculated with the companion function zTDGMON().
# Both TDG functions were originally written by Nick Beer at Columbia Basin
# Research (GasGen.R). The current version is altered to handle vectorized
# inputs and to include the lookup data within the function, so we don't need
# to call them from a .csv file.
#
# To vectorize I removed the "if" statements which demand a rowwise/for loop
# proceedure, and instead combine inputs and parameters with to run across input vectors.
#
# Author and Source: Nick Beer
# Modified by Ryan Kinzer
#------------------------------------------------------------------------------
zTDGSpill <- function(project_code, flow, spill_prop){
df <- tibble(id = 1:length(project_code), project_code, flow, spill_prop)
coef_df <- tibble(project_code = c("BON", "TDA", "JDA", "MCN", "PRD", "WAN", "RIS", "RRH", "WEL", "IHR", "LMN", "LGS", "LWG", "CHJ", "DWR"),
EQN = c(62, 62, 62, 62, 30, 62, 62, 30, 62, 62, 30, 62, 62, 30, 30),
D0 = c(16.16, 21.9, 11.04, 12.38, 34.9, 17.63, 21.6, 24.47, 20.56, 11.15, 22.12, 9.304, 7.007, 20.92, 36.65),
D1 = c(0.02983, 0.02109, 0.05969, 0.04007, -16.23, 0.08495, 0.007694, -47.83, 0.05935, 0.1009, -11.4, 0.1675, 0.2261, -14.74, -40.22),
D2 = c(0, 0, 0, 0, -0.002783, 0, 0, -0.2692, 0, 0, -0.03437, 0, 0, -0.01815, -0.3211))
Gspill <- inner_join(df, coef_df, by = 'project_code') %>%
mutate(Gspill = ifelse(EQN == 62,
D0 + D1*flow*spill_prop,
D0 + D1*exp(D2*flow*spill_prop))) %>%
arrange(id) %>%
pull(Gspill)
return(Gspill)
# CAUTION: It is possible to ask for a flow that exceeds the powerhouse's hydraulic capacity.
# These formulas will compute a gas level, but it will be impossible to attain in the field.
# Extra flow above the hydraulic capacity SHOULD be converted into spill.
# This is not trapped in these computations. For example, IHR powerhouse capacity is 106 KCFS.
# A flow of 200 with spill fraction of 0.15 will never happen there.
# TEST
# Compute the expected TDG in the spill water.
#print(zTDGSpill("MCN",spill=0.45, flow=100))
}
|
2d57f739aad3688e089189c6d8987c0d15c85dca
|
f7e93d31f57542cf25fa0894b4a69355f40469a0
|
/man/theme_timeline.Rd
|
60f994c67b9d6b160a2bd378edda2f8b6f8e2096
|
[
"MIT"
] |
permissive
|
kamenbliznashki/noaaeq
|
92a239b7c4e6aa06c3d78fab1ff2b4ed88e92c45
|
ecdca2eb4810196e1e6076c5a5df901e2d41ab1e
|
refs/heads/master
| 2020-12-06T10:21:08.917068
| 2020-01-08T02:28:32
| 2020-01-08T02:28:32
| 232,437,089
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 609
|
rd
|
theme_timeline.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geom_timeline.R
\name{theme_timeline}
\alias{theme_timeline}
\title{Custom theme for use with the earthquake timeline plots}
\usage{
theme_timeline()
}
\description{
The theme properly formats the axes, background and gridlines.
}
\examples{
\dontrun{
df \%>\%
ggplot() +
geom_timeline_label(aes(x=DATE, group=COUNTRY, size=EQ_PRIMARY,
color=DEATHS, label=LOCATION_NAME)) +
scale_y_continuous(limits = c(0, 4)) +
labs(y='', size='Richter scale value', color = '# deaths') +
theme_timeline()
}
}
|
b198d7658f48c81c7ffeac61b925bc5f4d294e76
|
78b6410be67a167fde91abb6a039847a45ce46cc
|
/man/n.Rd
|
936a4db3d8837da4756b750a42198aaec5ac5bd8
|
[] |
no_license
|
reyesem/IntroAnalysis
|
fea3283abc4bd995339acfc7e74f2193812317e2
|
54cf3930879303fb128faf81bd1710b385300d6c
|
refs/heads/master
| 2023-07-12T08:45:27.546965
| 2023-06-29T22:07:02
| 2023-06-29T22:07:02
| 123,822,392
| 0
| 0
| null | 2022-08-15T14:34:13
| 2018-03-04T19:42:24
|
HTML
|
UTF-8
|
R
| false
| true
| 317
|
rd
|
n.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/variable_summaries.R
\name{n}
\alias{n}
\title{Compute sample size.}
\usage{
n(x)
}
\arguments{
\item{x}{any vector.}
}
\description{
This is just an alias for \code{length(x)}.
}
\examples{
summarize_variable(am ~ 1, data = mtcars, n)
}
|
026481c9e465b343fb6f067a5c184a04f58f3b24
|
c46a6ff80331d7f47bc3c379b7b6f51644a3925b
|
/Chapter_07/customTests.R
|
5db1d26f0be81fbd374a2eb220993307b8955d55
|
[] |
no_license
|
elmstedt/stats20_swirl
|
6bb215dc600decaf03ecf441cf0e28bdbd525536
|
6de97f3613f941c5c39a85b9df4f26fa3b62e766
|
refs/heads/master
| 2021-05-22T02:29:59.080370
| 2020-10-06T07:42:50
| 2020-10-06T07:42:50
| 252,929,124
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,076
|
r
|
customTests.R
|
# Put custom tests in this file.
# Uncommenting the following line of code will disable
# auto-detection of new variables and thus prevent swirl from
# executing every command twice, which can slow things down.
# AUTO_DETECT_NEWVAR <- FALSE
# However, this means that you should detect user-created
# variables when appropriate. The answer test, creates_new_var()
# can be used for for the purpose, but it also re-evaluates the
# expression which the user entered, so care must be taken.
# Get the swirl state
start_timer <- function() {
e <- get('e', parent.frame())
e$`__lesson_start_time` <- now()
TRUE
}
stop_timer <- function() {
e <- get('e', parent.frame())
if(deparse(e$expr) == "stopwatch()") {
start_time <- e$`__lesson_start_time`
stop_time <- now()
print(as.period(interval(start_time, stop_time)))
}
TRUE
}
# Get the swirl state
getState <- function(){
# Whenever swirl is running, its callback is at the top of its call stack.
# Swirl's state, named e, is stored in the environment of the callback.
environment(sys.function(1))$e
}
# Get the value which a user either entered directly or was computed
# by the command he or she entered.
getVal <- function(){
getState()$val
}
# Get the last expression which the user entered at the R console.
getExpr <- function(){
getState()$expr
}
# Retrieve the log from swirl's state
getLog <- function(){
getState()$log
}
submit_log <- function(...){
si <- as.data.frame(t(Sys.info()))
e <- get("e", parent.frame())
form_link <- "https://docs.google.com/forms/d/e/1FAIpQLScJ2lYafz7lqnhnD9Z7Dw-PZLfhhC3IihZKWkURFGcMseYeGg/viewform?entry.1752962042"
form_link2 <- "http://bit.ly/stats20_19f_swirl"
if(!grepl("=$", form_link)){
form_link <- paste0(form_link, "=")
}
p <- function(x, p, f, l = length(x)){if(l < p){x <- c(x, rep(f, p - l))};x}
temp <- tempfile()
log_ <- getLog()
nrow_ <- max(unlist(lapply(log_, length)))
log_tbl <- data.frame(user = rep(log_$user, nrow_),
course_name = rep(log_$course_name, nrow_),
lesson_name = rep(log_$lesson_name, nrow_),
question_number = p(log_$question_number, nrow_, NA),
correct = p(log_$correct, nrow_, NA),
attempt = p(log_$attempt, nrow_, NA),
skipped = p(log_$skipped, nrow_, NA),
datetime = p(as.POSIXct.numeric(log_$datetime, origin="1970-01-01"), nrow_, NA),
stringsAsFactors = FALSE)
# write.csv(log_tbl, file = temp, row.names = FALSE)
suppressWarnings(write.table(si, file = temp, row.names = FALSE, col.names = TRUE, sep = ",")) # drop if not working
suppressWarnings(write.table(log_tbl, file = temp, row.names = FALSE, col.names = TRUE, append = TRUE, sep = ","))
encoded_log <- base64encode(temp)
logname <- paste0("logfile - ", log_$lesson_name, ".txt")
fileConn<-file(logname)
writeLines(encoded_log, fileConn)
close(fileConn)
if(e$val == "Yes"){
file.show(logname, title = "Lesson Log")
browseURL(paste0(form_link, encoded_log))
cat(paste0("If the submission page does not appear or the lesson log is not completely filled, you SHOULD submit it yourself.\nYou may do so by copying the encoded log record located in:\n\n",
logname,
"\n\nand pasting its contents into the form at:\n\n",
form_link2, "\n\n"))
} else {
file.show(logname, title = "Lesson Log")
cat(paste0("You have chosen not to submit, unless this is for a good reason (e.g. you are just repeating lessons for practice) you SHOULD submit it yourself.\nYou may do so by copying the encoded log record located in:\n\n",
logname,
"\n\nand pasting its contents into the form at:\n\n",
form_link2, "\n\n"))
}
}
|
6ec41fb7d5a6a83ed3a7aae73ecfc3f1dae4526f
|
96a7892b0ba2eb4e26979911642d725ce0225fae
|
/HW2/HW2.R
|
8e30dc7bd1d47253b1e8439e3be44e00d2f77bcb
|
[] |
no_license
|
sachinshindegit/R-Programming
|
868e2052bfed62a51e1155d71e2ec25228723ec0
|
bd55286e79be0e675bd72fdcfb765b88f3989ba5
|
refs/heads/master
| 2021-01-10T04:35:27.053033
| 2016-01-10T21:10:14
| 2016-01-10T21:10:14
| 43,481,702
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 161
|
r
|
HW2.R
|
library(boot)
set.seed(1)
Y=rnorm(100)
X=rnorm(100)
Y=X-2*X^2+rnorm(100)
plot(X,Y)
set.seed(1)
Data <- data.frame(X, Y)
z <- glm(Y ~ X)
cv.glm(Data, z)$delta[1]
|
96b76019a45035db539b7cfd23cde55311116efa
|
bebba2b371a41e0fae55e2b5853a2870f9e6814a
|
/archive/isotria_lifehistoryfigs.R
|
333a5c29873cf15f8592a5d148e86803bfb0f534
|
[] |
no_license
|
AileneKane/isotria
|
fa8015a69e1e80c095d598625d762ddcb2700d2a
|
230ee3e8f63cc450ced49a0f6932c4558f3c0c02
|
refs/heads/master
| 2021-08-28T01:38:51.555826
| 2021-08-16T23:24:41
| 2021-08-16T23:24:41
| 66,016,437
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 32,866
|
r
|
isotria_lifehistoryfigs.R
|
#Figures and individual life history traits estimated from posterior samples of multistate model for Isotria medioloides Alton NH population
#Data provided by Bill Brumback
#Coding by Ailene Ettinger with help frmo Andy Roly and Elizabeth Crone
#this file has code for all figures in the manuscript, and for estimating life history traits
#(lifepsan, proportion dormant, length of dormancy)
#setwd("~/isotria") #at usgs
#setwd("/Users/aileneettinger/git/isotria/analyses")
rm(list=ls())
options(stringsAsFactors=FALSE)
###Figure 1
isoinds<-read.csv("Isotria_Stage3_2016.csv", header=T)#
isoinds<-isoinds[-which(isoinds$UniqueID=="X-01-244"),]#individual has to be removed because of errors in its monitoring
#head(isoinds)
#Add column for emergent/nonemergent
isoinds$Emerg<-NA
isoinds[which(isoinds$TotNoStems>0),]$Emerg=1
isoinds[which(isoinds$TotNoStems==0),]$Emerg=0
##Select out just groups X and Y for this analysis
isoindsX=isoinds[isoinds$Group=="X",]
isoindsY=isoinds[isoinds$Group=="Y",]
isoindsXY=isoinds[isoinds$Group=="X"|isoinds$Group=="Y",]
isoindsXY$UniqueID=factor(isoindsXY$UniqueID)
#dim(isoindsXY)
isoindsXY$Group=factor(isoindsXY$Group)
##get isotria data into format such that 1=veg, 2=rep and 3=not seen
#to do this, Add column for reproductive (=arrested, flowering, or fruiting)/not rep
isoindsXY$Repro<-NA
isoindsXY[which(isoindsXY$NoFrStems>0|isoindsXY$NoFlStems>0|isoindsXY$NoArrStems>0),]$Repro=1
isoindsXY[which(isoindsXY$NoFrStems==0&isoindsXY$NoFlStems==0&isoindsXY$NoArrStems==0),]$Repro=0
isoindsX$Repro<-NA
isoindsX[which(isoindsX$NoFrStems>0|isoindsX$NoFlStems>0|isoindsX$NoArrStems>0),]$Repro=1
isoindsX[which(isoindsX$NoFrStems==0&isoindsX$NoFlStems==0&isoindsX$NoArrStems==0&isoindsX$Stage!="D"),]$Repro=0
isoindsY$Repro<-NA
isoindsY[which(isoindsY$NoFrStems>0|isoindsY$NoFlStems>0|isoindsY$NoArrStems>0),]$Repro=1
isoindsY[which(isoindsY$NoFrStems==0 & isoindsY$NoFlStems==0 & isoindsY$NoArrStems==0&isoindsY$Stage!="D"),]$Repro=0
quartz(height=6,width=10)
par(mfrow=c(1,1),mar=c(1,5,1,.5), oma=c(5,.5,.5,.5))
pop_x<-tapply(isoindsX$UniqueID,list(isoindsX$Year,isoindsX$Repro),length)#how does pop change over time, by group?
pop_y<-tapply(isoindsY$UniqueID,list(isoindsY$Year,isoindsY$Repro),length)#how does pop change over time, by group?
pop_x[which(is.na(pop_x))]=0#replace NAs with 0s
pop_y[which(is.na(pop_y))]=0#replace NAs with 0s
plot(pop_x[,1]~rownames(pop_x),type="l",ylab="# Individuals Observed", xlab="Year", xaxt="n", xlim=c(1985,2025),ylim=c(0,60), bty="l", lty=3,col="black", lwd=2, cex.axis=1.3, cex.lab=1.5)
lines(pop_x[,2]~rownames(pop_x), lty=1, lwd=2)
lines(pop_y[,1]~rownames(pop_y), lty=3,col="darkgray", lwd=2)
lines(pop_y[,2]~rownames(pop_y), lty=1, col="darkgray", lwd=2)
text(2015, pop_x[31,1]+1,labels="Control Group, X",adj=0,cex=1.1)
text(2015.2, pop_x[31,1]-1.5,labels="(Vegetative)",adj=0,cex=1.1)
text(2015, pop_y[31,1],labels="Cleared Group, Y",adj=0,cex=1.1)
text(2015.2, pop_y[31,1]-2.5,labels="(Vegetative)",adj=0,cex=1.1)
text(2015, pop_x[31,2],labels="Control Group, X",adj=0,cex=1.1)
text(2015.2, pop_x[31,2]-2.5,labels="(Reproductive)",adj=0,cex=1.1)
text(2015,pop_y[31,2],labels="Cleared Group, Y",adj=0,cex=1.1)
text(2015.2, pop_y[31,2]-2.5,labels="(Reproductive)",adj=0,cex=1.1)
abline(v=1997,lty=2,col="gray", lwd=3)
axis(side=1,at=rownames(pop_x), labels=TRUE, cex.axis=1.3)
mtext("Year",side=1, adj=.35, cex=1.5, line=2.5)
### select out vital rates to calculate dwell times, etc
library(popbio)
mod.samples<-read.csv("msmod_samples_complex.csv", header=T)
# vital rates for group X prior to clearing
phiV_Xpre<-mod.samples[,which(colnames(mod.samples)=="phiA0.1.")]#if not read in, colname=psiA0[1]
phiR_Xpre<-mod.samples[,which(colnames(mod.samples)=="phiB0.1.")]#if not read in, colname=psiB0[1]
pdormV_Xpre<-1-mod.samples[,which(colnames(mod.samples)=="pA0.1.")]#if not read in, colname=pA0[1]
pdormR_Xpre<-1-mod.samples[,which(colnames(mod.samples)=="pB0.1.")]#if not read in, colname=pB0[1]
veg.rep_Xpre<- mod.samples[,which(colnames(mod.samples)=="psiA0.1.")]#if not read in, colname=phiA0[1]
rep.veg_Xpre<-mod.samples[,which(colnames(mod.samples)=="psiB0.1.")]#if not read in, colname=phiB0[1]
# vital rates for group Y prior to clearing
phiV_Ypre<-mod.samples[,which(colnames(mod.samples)=="phiA0.2.")]#if not read in, colname=psiA0[2]
phiR_Ypre<-mod.samples[,which(colnames(mod.samples)=="phiB0.2.")]#if not read in, colname=psiB0[2]
pdormV_Ypre<-1-mod.samples[,which(colnames(mod.samples)=="pA0.2.")]#if not read in, colname=pA0[2]
pdormR_Ypre<-1-mod.samples[,which(colnames(mod.samples)=="pB0.2.")]#if not read in, colname=pB0[2]
veg.rep_Ypre<- mod.samples[,which(colnames(mod.samples)=="psiA0.2.")]#if not read in, colname=phiA0[2]
rep.veg_Ypre<-mod.samples[,which(colnames(mod.samples)=="psiB0.2.")]#if not read in, colname=phiB0[2]
# vital rates for group X after clearing
phiV_Xpost<-mod.samples[,which(colnames(mod.samples)=="phiA1.1.")]#if not read in, colname=psiA1[1]
phiR_Xpost<-mod.samples[,which(colnames(mod.samples)=="phiB1.1.")]#if not read in, colname=psiB1[1]
pdormV_Xpost<-1-mod.samples[,which(colnames(mod.samples)=="pA1.1.")]#if not read in, colname=pA1[1]
pdormR_Xpost<-1-mod.samples[,which(colnames(mod.samples)=="pB1.1.")]#if not read in, colname=pB1[1]
veg.rep_Xpost<- mod.samples[,which(colnames(mod.samples)=="psiA1.1.")]#if not read in, colname=phiA1[1]
rep.veg_Xpost<-mod.samples[,which(colnames(mod.samples)=="psiB1.1.")]#if not read in, colname=phiB1[1]
# vital rates for group Y after clearing
phiV_Ypost<-mod.samples[,which(colnames(mod.samples)=="phiA1.2.")]#if not read in, colname=psiA1[2]
phiR_Ypost<-mod.samples[,which(colnames(mod.samples)=="phiB1.2.")]#if not read in, colname=psiB1[2]
pdormV_Ypost<-1-mod.samples[,which(colnames(mod.samples)=="pA1.2.")]#if not read in, colname=pA1[2]
pdormR_Ypost<-1-mod.samples[,which(colnames(mod.samples)=="pB1.2.")]#if not read in, colname=pB1[2]
veg.rep_Ypost<- mod.samples[,which(colnames(mod.samples)=="psiA1.2.")]#if not read in, colname=phiA1[2]
rep.veg_Ypost<-mod.samples[,which(colnames(mod.samples)=="psiB1.2.")]#if not read in, colname=phiB1[2]
###Porportion of plants dormant in each condition
get.propdorm <- function(phiV,veg.rep,pdormV,phiR,rep.veg,pdormR) {
prop.dorm= array()
for (i in 1:length(phiV)){
tmx = c(phiV[i]*(1-veg.rep[i])*pdormV[i], phiR[i]*rep.veg[i]*pdormV[i], phiV[i]*(1-veg.rep[i])*pdormV[i], phiR[i]*rep.veg[i]*pdormV[i],
phiV[i]*veg.rep[i]*pdormR[i], phiR[i]*(1-rep.veg[i])*pdormR[i], phiV[i]*veg.rep[i]*pdormR[i], phiR[i]*(1-rep.veg[i])*pdormR[i],
phiV[i]*(1-veg.rep[i])*(1-pdormV[i]), phiR[i]*rep.veg[i]*(1-pdormV[i]), phiV[i]*(1-veg.rep[i])*(1-pdormV[i]), phiR[i]*rep.veg[i]*(1-pdormV[i]),
phiV[i]*veg.rep[i]*(1-pdormR[i]), phiR[i]*(1-rep.veg[i])*(1-pdormR[i]), phiV[i]*veg.rep[i]*(1-pdormR[i]), phiR[i]*(1-rep.veg[i])*(1-pdormR[i]))
tmx = matrix(tmx, nrow = 4, byrow = T)
eigen.analysis(tmx)$stable.stage
prop.dorm[i] = sum(eigen.analysis(tmx)$stable.stage[1:2])
}
return(prop.dorm)#
}
#even though clearing does not change the probability of dormancy per se, it could change the expected proportion of dormant plants via changes in other vital rates.
propdorm_Xpre<-get.propdorm(phiV_Xpre,veg.rep_Xpre,pdormV_Xpre,phiR_Xpre,rep.veg_Xpre,pdormR_Xpre)
propdorm_Ypre<-get.propdorm(phiV_Ypre,veg.rep_Ypre,pdormV_Ypre,phiR_Ypre,rep.veg_Ypre,pdormR_Ypre)
propdorm_Xpost<-get.propdorm(phiV_Xpost,veg.rep_Xpost,pdormV_Xpost,phiR_Xpost,rep.veg_Xpost,pdormR_Xpost)
propdorm_Ypost<-get.propdorm(phiV_Ypost,veg.rep_Ypost,pdormV_Ypost,phiR_Ypost,rep.veg_Ypost,pdormR_Ypost)
#windows(height=6,width=10)
#quartz(height=6,width=10)
#par(mfrow=c(2,2))
#hist(propdorm_Xpre, xlim=c(0,1))
#hist(propdorm_Ypre,xlim=c(0,1))
hist(propdorm_Xpost,xlim=c(0,1))
hist(propdorm_Ypost,xlim=c(0,1))
mean(propdorm_Xpre);sd(propdorm_Xpre)#0.257 (0.052 plants dormant in uncleared prior to clearing
mean(propdorm_Ypre);sd(propdorm_Ypre)#0.219 (0.050)plants dormant in cleared prior to clearing
mean(propdorm_Xpost);sd(propdorm_Xpost)#0.10 (0.06) plants dormant in uncleared post clearing
mean(propdorm_Ypost);sd(propdorm_Ypost)#0.094 (0.069) plants dormant in cleared post clearing
####Now life expectancy:
##test:
#phiV=phiV_Ypost
#veg.rep=veg.rep_Ypost
#pdormV=pdormV_Ypost
#phiR=phiR_Ypost
#rep.veg=rep.veg_Ypost
#pdormR=pdormR_Ypost
#to figure out effect of survival on lifepsand estimates, plug in mean values for everything then try changing phi:
#phiV=0.9999
#veg.rep=0.47
#pdormV=0.30
#phiR=0.99
#rep.veg=0.02
#pdormR=0.024
#with theabove mean parameters, lifespan_med is 30.
#if i change phiV to 0.98, lifepsan_med is 35
#to 0.99, liefepsan=40; change of phiV frmo .99 to .999 moves lifespan from 69 to 74
#both phiV and phiR changed to .99; lifespan goes up to 69;
#with PhiV at .99 and when phiR changed frmo .99 to .999-.9933, med lifespan=Inf
#with PhiV at .99 and when phiR .991, med lifespan=77
#with PhiV at .99 and when phiR .992, med lifespan=85
#with PhiV at .99 and when phiR .993, med lifespan=97
#with PhiV at .99 and when phiR .9931, med lifespan=98
#with PhiV at .99 and when phiR .9932, med lifespan=99
#with PhiV at .99 and when phiR .99325-8, med lifespan=100
get.lifespan<- function(phiV,veg.rep,pdormV,phiR,rep.veg,pdormR){
lifespan_med= array()
#lifespan_95th= array()
#lifespan_rep_med= array()
#lifespan_rep_95th= array()
#nyrs_fl= array()
for (i in 1:length(phiV)){
tmx = c(phiV[i]*(1-veg.rep[i])*pdormV[i], phiR[i]*rep.veg[i]*pdormV[i], phiV[i]*(1-veg.rep[i])*pdormV[i], phiR[i]*rep.veg[i]*pdormV[i],
phiV[i]*veg.rep[i]*pdormR[i], phiR[i]*(1-rep.veg[i])*pdormR[i], phiV[i]*veg.rep[i]*pdormR[i], phiR[i]*(1-rep.veg[i])*pdormR[i],
phiV[i]*(1-veg.rep[i])*(1-pdormV[i]), phiR[i]*rep.veg[i]*(1-pdormV[i]), phiV[i]*(1-veg.rep[i])*(1-pdormV[i]), phiR[i]*rep.veg[i]*(1-pdormV[i]),
phiV[i]*veg.rep[i]*(1-pdormR[i]), phiR[i]*(1-rep.veg[i])*(1-pdormR[i]), phiV[i]*veg.rep[i]*(1-pdormR[i]), phiR[i]*(1-rep.veg[i])*(1-pdormR[i]))
tmx = matrix(tmx, nrow = 4, byrow = T)
##### one way to calculate life span - calculate the probability of still being alive i years into the future
n0 = c(0,0,1000,0)#lifespan starting from vegetative
nsum = array()
flwrsum = array()
for(j in 1:1800){
n1 = tmx%*%n0
nsum[j] = sum(n1)
flwrsum[j] = n1[4]
n0 = n1
}#
lifespan_med[i]= min(which(nsum <900)) # this is actually the median survival time
#nyrs_fl[i]=sum(flwrsum)/1000 # number of years flowering, over an average lifetime = 1.9 without clearing, 11.6 with
}
return (lifespan_med)
}
lifespan_Xpre<-get.lifespan(phiV_Xpre,veg.rep_Xpre,pdormV_Xpre,phiR_Xpre,rep.veg_Xpre,pdormR_Xpre)
lifespan_Ypre<-get.lifespan(phiV_Ypre,veg.rep_Ypre,pdormV_Ypre,phiR_Ypre,rep.veg_Ypre,pdormR_Ypre)
lifespan_Xpost<-get.lifespan(phiV_Xpost,veg.rep_Xpost,pdormV_Xpost,phiR_Xpost,rep.veg_Xpost,pdormR_Xpost)
lifespan_Ypost<-get.lifespan(phiV_Ypost,veg.rep_Ypost,pdormV_Ypost,phiR_Ypost,rep.veg_Ypost,pdormR_Ypost)
lifespan_Xpost2<-lifespan_Xpost[-(which(lifespan_Xpost=="Inf"))]
lifespan_Ypost2<-lifespan_Ypost[-(which(lifespan_Ypost=="Inf"))]
#Alternatively, life expectancy can be calculated as -1/ln(s)
LEV_Xpre<--1/(log(phiV_Xpre))#median=5.5
LEV_Xpost<--1/(log(phiV_Xpost))#median=7.69
LEV_Ypre<--1/(log(phiV_Ypre))#median=6.31
LEV_Ypost<--1/(log(phiV_Ypost))#median=60
get.lifespan_flow<- function(phiV,veg.rep,pdormV,phiR,rep.veg,pdormR){
lifespan_med= array()
#lifespan_95th= array()
#lifespan_rep_med= array()
#lifespan_rep_95th= array()
#nyrs_fl= array()
for (i in 1:length(phiV)){
tmx = c(phiV[i]*(1-veg.rep[i])*pdormV[i], phiR[i]*rep.veg[i]*pdormV[i], phiV[i]*(1-veg.rep[i])*pdormV[i], phiR[i]*rep.veg[i]*pdormV[i],
phiV[i]*veg.rep[i]*pdormR[i], phiR[i]*(1-rep.veg[i])*pdormR[i], phiV[i]*veg.rep[i]*pdormR[i], phiR[i]*(1-rep.veg[i])*pdormR[i],
phiV[i]*(1-veg.rep[i])*(1-pdormV[i]), phiR[i]*rep.veg[i]*(1-pdormV[i]), phiV[i]*(1-veg.rep[i])*(1-pdormV[i]), phiR[i]*rep.veg[i]*(1-pdormV[i]),
phiV[i]*veg.rep[i]*(1-pdormR[i]), phiR[i]*(1-rep.veg[i])*(1-pdormR[i]), phiV[i]*veg.rep[i]*(1-pdormR[i]), phiR[i]*(1-rep.veg[i])*(1-pdormR[i]))
tmx = matrix(tmx, nrow = 4, byrow = T)
##### one way to calculate life span - calculate the probability of still being alive i years into the future
n0 = c(0,0,0,1000)#lifespan starting from vegetative
nsum = array()
flwrsum = array()
for(j in 1:1800){
n1 = tmx%*%n0
nsum[j] = sum(n1)
flwrsum[j] = n1[4]
n0 = n1
}#
lifespan_med[i]= min(which(nsum <900)) #
#nyrs_fl[i]=sum(flwrsum)/1000 # number of years flowering, over an average lifetime = 1.9 without clearing, 11.6 with
}
return (lifespan_med)
}
lifespan_flow_Xpre<-get.lifespan_flow(phiV_Xpre,veg.rep_Xpre,pdormV_Xpre,phiR_Xpre,rep.veg_Xpre,pdormR_Xpre)
lifespan_flow_Ypre<-get.lifespan_flow(phiV_Ypre,veg.rep_Ypre,pdormV_Ypre,phiR_Ypre,rep.veg_Ypre,pdormR_Ypre)
lifespan_flow_Xpost<-get.lifespan_flow(phiV_Xpost,veg.rep_Xpost,pdormV_Xpost,phiR_Xpost,rep.veg_Xpost,pdormR_Xpost)
lifespan_flow_Ypost<-get.lifespan_flow(phiV_Ypost,veg.rep_Ypost,pdormV_Ypost,phiR_Ypost,rep.veg_Ypost,pdormR_Ypost)
lifespan_flow_Xpost2<-lifespan_flow_Xpost[-(which(lifespan_flow_Xpost=="Inf"))]
lifespan_flow_Ypost2<-lifespan_flow_Ypost[-(which(lifespan_flow_Ypost=="Inf"))]
###Length of each bout of dormancy
# even though clearing does not change the probability of dormancy per se, it could change the expected proportion of dormant plants via changes in other vital rates.
get.lengthdorm <- function(phiV,veg.rep,pdormV,phiR,rep.veg,pdormR){
mydorm_all=matrix(data=NA,nrow=length(phiV),ncol=11,byrow=TRUE)
mnlengthdor=array()
for (i in 1:length(phiV)){
tmx.dorm = c(phiV[i]*(1-veg.rep[i])*pdormV[i], phiR[i]*rep.veg[i]*pdormV[i], phiV[i]*(1-veg.rep[i])*pdormV[i], phiR[i]*rep.veg[i]*pdormV[i],
phiV[i]*veg.rep[i]*pdormR[i], phiR[i]*(1-rep.veg[i])*pdormR[i], phiV[i]*veg.rep[i]*pdormR[i], phiR[i]*(1-rep.veg[i])*pdormR[i],
0,0,0,0,0,0,0,0)
tmx.dorm = matrix(tmx.dorm, nrow = 4, byrow = T)
# length of dormancy starting from dormant veg
n0 = c(1000,0,0,0)
nsum = array()
for(j in 1:100){
n1 = tmx.dorm%*%n0
nsum[j] = sum(n1)
n0 = n1
}
mydorm = c(1, nsum[1:10]/1000)/(1+sum(nsum)/1000)
mydorm_all[i,]= mydorm
numinds<-mydorm*1000
dormls<-array()
for (k in 1:length(numinds)){
inddormls<-c(rep(k,times=numinds[k]))
dormls<-c(dormls,inddormls)
}
mnlengthdor[i]<-mean(dormls, na.rm=T)
}
return(mnlengthdor)#
}
lengthdorm_Xpre<-get.lengthdorm(phiV_Xpre,veg.rep_Xpre,pdormV_Xpre,phiR_Xpre,rep.veg_Xpre,pdormR_Xpre)
lengthdorm_Ypre<-get.lengthdorm(phiV_Ypre,veg.rep_Ypre,pdormV_Ypre,phiR_Ypre,rep.veg_Ypre,pdormR_Ypre)
lengthdorm_Xpost<-get.lengthdorm(phiV_Xpost,veg.rep_Xpost,pdormV_Xpost,phiR_Xpost,rep.veg_Xpost,pdormR_Xpost)
lengthdorm_Ypost<-get.lengthdorm(phiV_Ypost,veg.rep_Ypost,pdormV_Ypost,phiR_Ypost,rep.veg_Ypost,pdormR_Ypost)
###Now, calculate length of each bout of dormancy and proportion dormant plant, starting with reproductive plants
get.lengthdorm_flow <- function(phiV,veg.rep,pdormV,phiR,rep.veg,pdormR){
mnlengthdor=array()
mydorm_all=matrix(data=NA,nrow=length(phiV),ncol=11,byrow=TRUE)
for (i in 1:length(phiV)){
tmx.dorm = c(phiV[i]*(1-veg.rep[i])*pdormV[i], phiR[i]*rep.veg[i]*pdormV[i], phiV[i]*(1-veg.rep[i])*pdormV[i], phiR[i]*rep.veg[i]*pdormV[i],
phiV[i]*veg.rep[i]*pdormR[i], phiR[i]*(1-rep.veg[i])*pdormR[i], phiV[i]*veg.rep[i]*pdormR[i], phiR[i]*(1-rep.veg[i])*pdormR[i],
0,0,0,0,0,0,0,0)
tmx.dorm = matrix(tmx.dorm, nrow = 4, byrow = T)
# length of dormancy starting from dormant flowering
n0_flow = c(0,1000,0,0)
nsum_flow = array()
for(j in 1:100){
n1_flow = tmx.dorm%*%n0_flow
nsum_flow[j] = sum(n1_flow)
n0_flow = n1_flow
}
mydorm_flow = c(1, nsum_flow[1:10]/1000)/(1+sum(nsum_flow)/1000)
#prop_dorm1yr_flow[i]= mydorm_flow[1]
mydorm_all[i,]= mydorm_flow
numinds<-mydorm_flow*1000
dormls<-array()
for (k in 1:length(numinds)){
inddormls<-c(rep(k,times=numinds[k]))
dormls<-c(dormls,inddormls)
}
mnlengthdor[i]<-mean(dormls, na.rm=T)
}
return(mnlengthdor)#
}
lengthdorm_flow_Xpre<-get.lengthdorm_flow(phiV_Xpre,veg.rep_Xpre,pdormV_Xpre,phiR_Xpre,rep.veg_Xpre,pdormR_Xpre)
lengthdorm_flow_Ypre<-get.lengthdorm_flow(phiV_Ypre,veg.rep_Ypre,pdormV_Ypre,phiR_Ypre,rep.veg_Ypre,pdormR_Ypre)
lengthdorm_flow_Xpost<-get.lengthdorm_flow(phiV_Xpost,veg.rep_Xpost,pdormV_Xpost,phiR_Xpost,rep.veg_Xpost,pdormR_Xpost)
lengthdorm_flow_Ypost<-get.lengthdorm_flow(phiV_Ypost,veg.rep_Ypost,pdormV_Ypost,phiR_Ypost,rep.veg_Ypost,pdormR_Ypost)
##Figures
#2x2table for each vital rate with first column control, second column logged
#if model not loaded, then use model sample files to get estinat
ms3a<-read.csv("isotria2stagemodsum_complex.csv", header=T)
rownames(ms3a)<-ms3a[,1]
surv_veg<-as.data.frame(rbind(ms3a$mean[grep("phiA0",substr(rownames(ms3a),1,5))],ms3a$mean[grep("phiA1",substr(rownames(ms3a),1,5))]))
surv_rep<-as.data.frame(rbind(ms3a$mean[grep("phiB0",substr(rownames(ms3a),1,5))],ms3a$mean[grep("phiB1",substr(rownames(ms3a),1,5))]))
emer_veg<-as.data.frame(rbind(ms3a$mean[grep("pA0",substr(rownames(ms3a),1,3))],ms3a$mean[grep("pA1",substr(rownames(ms3a),1,3))]))
emer_rep<-as.data.frame(rbind(ms3a$mean[grep("pB0",substr(rownames(ms3a),1,3))],ms3a$mean[grep("pB1",substr(rownames(ms3a),1,3))]))
trans_vr<-as.data.frame(rbind(ms3a$mean[grep("psiA0",substr(rownames(ms3a),1,5))],ms3a$mean[grep("psiA1",substr(rownames(ms3a),1,5))]))
trans_rv<-as.data.frame(rbind(ms3a$mean[grep("psiB0",substr(rownames(ms3a),1,5))],ms3a$mean[grep("psiB1",substr(rownames(ms3a),1,5))]))
surv_veg_med<-as.data.frame(rbind(ms3a$X50.[grep("phiA0",substr(rownames(ms3a),1,5))],ms3a$X50.[grep("phiA1",substr(rownames(ms3a),1,5))]))
surv_rep_med<-as.data.frame(rbind(ms3a$X50.[grep("phiB0",substr(rownames(ms3a),1,5))],ms3a$X50.[grep("phiB1",substr(rownames(ms3a),1,5))]))
emer_veg_med<-as.data.frame(rbind(ms3a$X50.[grep("pA0",substr(rownames(ms3a),1,3))],ms3a$X50.[grep("pA1",substr(rownames(ms3a),1,3))]))
emer_rep_med<-as.data.frame(rbind(ms3a$X50.[grep("pB0",substr(rownames(ms3a),1,3))],ms3a$X50.[grep("pB1",substr(rownames(ms3a),1,3))]))
trans_vr_med<-as.data.frame(rbind(ms3a$X50.[grep("psiA0",substr(rownames(ms3a),1,5))],ms3a$X50.[grep("psiA1",substr(rownames(ms3a),1,5))]))
trans_rv_med<-as.data.frame(rbind(ms3a$X50.[grep("psiB0",substr(rownames(ms3a),1,5))],ms3a$X50.[grep("psiB1",substr(rownames(ms3a),1,5))]))
colnames(surv_veg)<-c("control","logged")
colnames(surv_rep)<-c("control","logged")
colnames(emer_veg)<-c("control","logged")
colnames(emer_rep)<-c("control","logged")
colnames(trans_vr)<-c("control","logged")
colnames(trans_rv)<-c("control","logged")
colnames(surv_veg_med)<-c("control","logged")
colnames(surv_rep_med)<-c("control","logged")
colnames(emer_veg_med)<-c("control","logged")
colnames(emer_rep_med)<-c("control","logged")
colnames(trans_vr_med)<-c("control","logged")
colnames(trans_rv_med)<-c("control","logged")
##use code below if model not loaded:
surv_veg_q2.5<-c(ms3a$X2.5.[grep("phiA0",substr(rownames(ms3a),1,5))],ms3a$X2.5.[grep("phiA1",substr(rownames(ms3a),1,5))])
surv_rep_q2.5<-c(ms3a$X2.5.[grep("phiB0",substr(rownames(ms3a),1,5))],ms3a$X2.5.[grep("phiB1",substr(rownames(ms3a),1,5))])
trans_vr_q2.5<-c(ms3a$X2.5.[grep("psiA0",substr(rownames(ms3a),1,5))],ms3a$X2.5.[grep("psiA1",substr(rownames(ms3a),1,5))])
trans_rv_q2.5<-c(ms3a$X2.5.[grep("psiB0",substr(rownames(ms3a),1,5))],ms3a$X2.5.[grep("psiB1",substr(rownames(ms3a),1,5))])
emer_veg_q2.5<-c(ms3a$X2.5.[grep("pA0",substr(rownames(ms3a),1,3))],ms3a$X2.5.[grep("pA1",substr(rownames(ms3a),1,3))])
emer_rep_q2.5<-c(ms3a$X2.5.[grep("pB0",substr(rownames(ms3a),1,3))],ms3a$X2.5.[grep("pB1",substr(rownames(ms3a),1,3))])
surv_veg_q97.5<-c(ms3a$X97.5.[grep("phiA0",substr(rownames(ms3a),1,5))],ms3a$X97.5.[grep("phiA1",substr(rownames(ms3a),1,5))])
surv_rep_q97.5<-c(ms3a$X97.5.[grep("phiB0",substr(rownames(ms3a),1,5))],ms3a$X97.5.[grep("phiB1",substr(rownames(ms3a),1,5))])
trans_vr_q97.5<-c(ms3a$X97.5.[grep("psiA0",substr(rownames(ms3a),1,5))],ms3a$X97.5.[grep("psiA1",substr(rownames(ms3a),1,5))])
trans_rv_q97.5<-c(ms3a$X97.5.[grep("psiB0",substr(rownames(ms3a),1,5))],ms3a$X97.5.[grep("psiB1",substr(rownames(ms3a),1,5))])
emer_veg_q97.5<-c(ms3a$X97.5.[grep("pA0",substr(rownames(ms3a),1,3))],ms3a$X97.5.[grep("pA1",substr(rownames(ms3a),1,3))])
emer_rep_q97.5<-c(ms3a$X97.5.[grep("pB0",substr(rownames(ms3a),1,3))],ms3a$X97.5.[grep("pB1",substr(rownames(ms3a),1,3))])
#Figure 3, of vital rates
x<-c(1,2,1,2)
#x<-c(1,2,1.05,2.05)#jittered
xerror<-c(1,1,2,2)
#xerror<-c(1,1.05,2,2.05)#jittered
x2<-c(3,4,3,4)
#x2<-c(3,4,3.05,4.05)#jittered
x2error<-c(3,3,4,4)
#x2error<-c(3,3.05,4,4.05)#jittered
windows(height=6,width=7)
quartz(height=6,width=7)
par(mfrow=c(3,1),mar=c(.5,4.1,1,.5), oma=c(3,.6,.5,.5))
#survival
plot(x,c(surv_veg$control,surv_veg$logged), pch=21, bg=c("black","black","white","white"), ylim=c(0,1), ylab="Survival", xaxt="n", cex=1.5, xlab="", xlim=c(0.75,4.25), cex.lab=1.5, las=1,, cex.axis=1.3)
lines(x[1:2],c(surv_veg$control), lty=1)
lines(x[3:4],c(surv_veg$logged), lty=3)
abline(v=2.5,lty=1, lwd=2)
abline(v=1.5,lty=2,col="gray", lwd=2)
abline(v=3.5,lty=2,col="gray", lwd=2)
arrows(xerror,surv_veg_q2.5,xerror,surv_veg_q97.5, code=0,angle=90, length=0.1)
points(x,c(surv_veg$control,surv_veg$logged),pch=21, bg=c("black","black","white","white"), cex=1.5)
arrows(x2error,surv_rep_q2.5,x2error,surv_rep_q97.5, code=0,angle=90, length=0.1)
lines(x2[1:2],c(surv_rep$control), lty=1)
lines(x2[3:4],c(surv_rep$logged), lty=3)
points(x2,c(surv_rep$control,surv_rep$logged),pch=21, bg=c("black","black","white","white"), cex=1.5)
axis(side=1,at=c(1.5,3.5),labels=c("Vegetative","Reproductive" ),line=-15, tick=F, cex.axis=1.2)
legend("bottomright",legend=c("Control", "Cleared"),pch=21,pt.cex=1.5,pt.bg=c("black","white"), bty="n", cex=1.2)
#cbind(rownames(ms3a[25:32,]),ms3a$mean[25:32],ms3a$X2.5[25:32],ms3a$X97.5.[25:32])
#Dormancy(=1-)Emergence
plot(x,c(1-emer_veg$control,1-emer_veg$logged), pch=21, bg="black", ylim=c(0,1), ylab="Dormancy", xaxt="n", cex=1.5, xlab="", xlim=c(0.75,4.25), cex.lab=1.5, las=1, cex.axis=1.3)
lines(x[1:2],c(1-emer_veg$control), lty=1)
lines(x[3:4],c(1-emer_veg$logged), lty=3)
abline(v=2.5,lty=1, lwd=2)
abline(v=1.5,lty=2,col="gray", lwd=2)
abline(v=3.5,lty=2,col="gray", lwd=2)
arrows(xerror,1-emer_veg_q2.5,xerror,1-emer_veg_q97.5, code=0,angle=90, length=0.1)
points(x,c(1-emer_veg$control,1-emer_veg$logged),pch=21, bg=c("black","black","white","white"), cex=1.5)
arrows(x2error,1-emer_rep_q2.5,x2error,1-emer_rep_q97.5, code=0,angle=90, length=0.1)
lines(x2[1:2],c(1-emer_rep$control), lty=1)
lines(x2[3:4],c(1-emer_rep$logged), lty=3)
points(x2,c(1-emer_rep$control,1-emer_rep$logged),pch=21, bg=c("black","black","white","white"), cex=1.5)
cbind(rownames(ms3a[41:48,]),1-ms3a$mean[41:48],1-ms3a$X2.5[41:48],1-ms3a$X97.5.[41:48])#check error bars
#transition
plot(x,c(trans_vr$control,trans_vr$logged), pch=21, bg=c("black","black","white","white"), ylim=c(0,1), ylab="Transition", xaxt="n", cex=1.6, xlab="", xlim=c(0.75,4.25), cex.lab=1.5, las=1, cex.axis=1.3)
lines(x[1:2],c(trans_vr$control), lty=1)
lines(x[3:4],c(trans_vr$logged), lty=3)
abline(v=2.5,lty=1, lwd=2)
abline(v=1.5,lty=2,col="gray", lwd=2)
abline(v=3.5,lty=2,col="gray", lwd=2)
arrows(xerror,trans_vr_q2.5,xerror,trans_vr_q97.5, code=0,angle=90, length=0.1)
points(x,c(trans_vr$control,trans_vr$logged),pch=21, bg=c("black","black","white","white"), cex=1.5)
arrows(x2error,trans_rv_q2.5,x2error,trans_rv_q97.5, code=0,angle=90, length=0.1)
lines(x2[1:2],c(trans_rv$control), lty=1)
lines(x2[3:4],c(trans_rv$logged), lty=3)
points(x2,c(trans_rv$control,trans_rv$logged),pch=21, bg=c("black","black","white","white"), cex=1.5)
axis(side=1,at=c(x[1:2],x2[1:2]),labels=c("pre","post","pre","post"), cex.axis=1.3)
axis(side=1,at=c(x[1:2],x2[1:2]),labels=c("(1982-1997)","(1998-2015)","(1982-1997)","(1998-2015)"), line=1.2,tick=F, cex.axis=1.3)
#####Figure 4, length of dormancy, lifepsan, etc
#x<-c(1,2,1,2)
windows(height=6,width=7)
quartz(height=6,width=7)
par(mfrow=c(3,1),mar=c(.5,4.1,1,.5), oma=c(3,.6,.5,.5))
#lifespan
plot(x,c(mean(lifespan_Xpre, na.rm=T),mean(lifespan_Xpost2, na.rm=T),mean(lifespan_Ypre, na.rm=T),mean(lifespan_Ypost2, na.rm=T)), pch=21, bg=c("black","black","white","white"), ylim=c(0,100), ylab="Lifespan (yrs)", xaxt="n", cex=1.5, xlab="", xlim=c(0.75,4.25), cex.lab=1.5, cex.axis=1.3, las=1)
abline(v=2.5,lty=1, lwd=2)
abline(v=1.5,lty=2,col="gray", lwd=2)
abline(v=3.5,lty=2,col="gray", lwd=2)
lines(x[1:2],c(mean(lifespan_Xpre, na.rm=T),mean(lifespan_Xpost, na.rm=T)), lty=1)
lines(x[3:4],c(mean(lifespan_Ypre, na.rm=T),mean(lifespan_Ypost2, na.rm=T)), lty=3)
arrows(xerror,c(mean(lifespan_Xpre, na.rm=T)-sd(lifespan_Xpre, na.rm=T),mean(lifespan_Ypre, na.rm=T)-sd(lifespan_Ypre, na.rm=T),mean(lifespan_Xpost2, na.rm=T)-sd(lifespan_Xpost2, na.rm=T),mean(lifespan_Ypost2, na.rm=T)-sd(lifespan_Ypost2, na.rm=T)),xerror,c(mean(lifespan_Xpre, na.rm=T)+sd(lifespan_Xpre, na.rm=T),mean(lifespan_Ypre, na.rm=T)+sd(lifespan_Ypre, na.rm=T),mean(lifespan_Xpost2, na.rm=T)+sd(lifespan_Xpost2, na.rm=T),mean(lifespan_Ypost2, na.rm=T)+sd(lifespan_Ypost2, na.rm=T)), code=0,angle=90, length=0.1)
points(x,c(mean(lifespan_Xpre, na.rm=T),mean(lifespan_Xpost2, na.rm=T),mean(lifespan_Ypre, na.rm=T),mean(lifespan_Ypost2, na.rm=T)), pch=21, bg=c("black","black","white","white"), cex=1.5)
lines(x2[1:2],c(mean(lifespan_flow_Xpre, na.rm=T),mean(lifespan_flow_Xpost2, na.rm=T)), lty=1)
lines(x2[3:4],c(mean(lifespan_flow_Ypre, na.rm=T),mean(lifespan_flow_Ypost2, na.rm=T)), lty=3)
arrows(x2error,c(mean(lifespan_flow_Xpre, na.rm=T)-sd(lifespan_flow_Xpre, na.rm=T),mean(lifespan_flow_Ypre, na.rm=T)-sd(lifespan_flow_Ypre, na.rm=T),mean(lifespan_flow_Xpost2, na.rm=T)-sd(lifespan_flow_Xpost2, na.rm=T),mean(lifespan_flow_Ypost2, na.rm=T)-sd(lifespan_flow_Ypost2, na.rm=T)),x2error,c(mean(lifespan_flow_Xpre, na.rm=T)+sd(lifespan_flow_Xpre, na.rm=T),mean(lifespan_flow_Ypre, na.rm=T)+sd(lifespan_flow_Ypre, na.rm=T),mean(lifespan_flow_Xpost2, na.rm=T)+sd(lifespan_flow_Xpost2, na.rm=T),mean(lifespan_flow_Ypost2, na.rm=T)+sd(lifespan_flow_Ypost2, na.rm=T)), code=0,angle=90, length=0.1)
points(x2,c(mean(lifespan_flow_Xpre, na.rm=T),mean(lifespan_flow_Xpost2, na.rm=T),mean(lifespan_flow_Ypre, na.rm=T),mean(lifespan_flow_Ypost2, na.rm=T)), pch=21, bg=c("black","black","white","white"), cex=1.5)
axis(side=1,at=c(1.5,3.5),labels=c("Vegetative","Reproductive" ),line=-15, tick=F, cex.axis=1.2)
#Length of dormancy, starting from veg (black) or rep (white)
plot(x,c(mean(lengthdorm_Xpre, na.rm=T),mean(lengthdorm_Xpost, na.rm=T),mean(lengthdorm_Ypre, na.rm=T),mean(lengthdorm_Ypost, na.rm=T)), pch=21, bg=c("black","black","white","white"), ylim=c(0,2), ylab="Dormancy length (yrs)", xaxt="n", cex=1.5, xlab="", xlim=c(0.75,4.25), cex.lab=1.2,cex.lab=1.5, cex.axis=1.3, las=1)
abline(v=2.5,lty=1, lwd=2)
abline(v=1.5,lty=2,col="gray", lwd=2)
abline(v=3.5,lty=2,col="gray", lwd=2)
lines(x[1:2],c(mean(lengthdorm_Xpre, na.rm=T),mean(lengthdorm_Xpost, na.rm=T)), lty=1)
lines(x[3:4],c(mean(lengthdorm_Ypre, na.rm=T),mean(lengthdorm_Ypost, na.rm=T)), lty=3)
arrows(xerror,c(mean(lengthdorm_Xpre, na.rm=T)-sd(lengthdorm_Xpre, na.rm=T),mean(lengthdorm_Ypre, na.rm=T)-sd(lengthdorm_Ypre, na.rm=T),mean(lengthdorm_Xpost, na.rm=T)-sd(lengthdorm_Xpost, na.rm=T),mean(lengthdorm_Ypost, na.rm=T)-sd(lengthdorm_Ypost, na.rm=T)),xerror,c(mean(lengthdorm_Xpre, na.rm=T)+sd(lengthdorm_Xpre, na.rm=T),mean(lengthdorm_Ypre, na.rm=T)+sd(lengthdorm_Ypre, na.rm=T),mean(lengthdorm_Xpost, na.rm=T)+sd(lengthdorm_Xpost, na.rm=T),mean(lengthdorm_Ypost, na.rm=T)+sd(lengthdorm_Ypost, na.rm=T)), code=0,angle=90, length=0.1)
points(x,c(mean(lengthdorm_Xpre, na.rm=T),mean(lengthdorm_Xpost, na.rm=T),mean(lengthdorm_Ypre, na.rm=T),mean(lengthdorm_Ypost, na.rm=T)), pch=21, bg=c("black","black","white","white"), cex=1.5)
arrows(x2error,c(mean(lengthdorm_flow_Xpre, na.rm=T)-sd(lengthdorm_flow_Xpre, na.rm=T),mean(lengthdorm_flow_Ypre, na.rm=T)-sd(lengthdorm_flow_Ypre, na.rm=T),mean(lengthdorm_flow_Xpost, na.rm=T)-sd(lengthdorm_flow_Xpost, na.rm=T),mean(lengthdorm_flow_Ypost, na.rm=T)-sd(lengthdorm_flow_Ypost, na.rm=T)),x2error,c(mean(lengthdorm_flow_Xpre, na.rm=T)+sd(lengthdorm_flow_Xpre, na.rm=T),mean(lengthdorm_flow_Ypre, na.rm=T)+sd(lengthdorm_flow_Ypre, na.rm=T),mean(lengthdorm_flow_Xpost, na.rm=T)+sd(lengthdorm_flow_Xpost, na.rm=T),mean(lengthdorm_flow_Ypost, na.rm=T)+sd(lengthdorm_flow_Ypost, na.rm=T)), code=0,angle=90, length=0.1)
lines(x2[1:2],c(mean(lengthdorm_flow_Xpre, na.rm=T),mean(lengthdorm_flow_Xpost, na.rm=T)), lty=1)
lines(x2[3:4],c(mean(lengthdorm_flow_Ypre, na.rm=T),mean(lengthdorm_flow_Ypost, na.rm=T)), lty=3)
points(x2,c(mean(lengthdorm_flow_Xpre, na.rm=T),mean(lengthdorm_flow_Xpost, na.rm=T),mean(lengthdorm_flow_Ypre, na.rm=T),mean(lengthdorm_flow_Ypost, na.rm=T)), pch=21, bg=c("black","black","white","white"), cex=1.5)
axis(side=1,at=c(x[1:2],x2[1:2]),labels=c("pre","post","pre","post"), cex.axis=1.3)
axis(side=1,at=c(x[1:2],x2[1:2]),labels=c("(1982-1997)","(1998-2015)","(1982-1997)","(1998-2015)"), line=1.2,tick=F, cex.axis=1.3)
#proportion of plants dormant
plot(x,c(mean(propdorm_Xpre, na.rm=T),mean(propdorm_Xpost, na.rm=T),mean(propdorm_Ypre, na.rm=T),mean(propdorm_Ypost, na.rm=T)), pch=21, bg=c("black","black","white","white"), ylim=c(0,1), ylab="Proportion Dormant", xaxt="n", cex=1.5, xlab="", xlim=c(0.75,4.25), cex.lab=1.5, cex.axis=1.3, las=1)
abline(v=2.5,lty=1, lwd=2)
abline(v=1.5,lty=2,col="gray", lwd=2)
lines(x[1:2],c(mean(propdorm_Xpre, na.rm=T),mean(propdorm_Xpost, na.rm=T)), lty=1)
lines(x[3:4],c(mean(propdorm_Ypre, na.rm=T),mean(propdorm_Ypost, na.rm=T)), lty=3)
arrows(xerror,c(mean(propdorm_Xpre, na.rm=T)-sd(propdorm_Xpre, na.rm=T),mean(propdorm_Ypre, na.rm=T)-sd(propdorm_Ypre, na.rm=T),mean(propdorm_Xpost, na.rm=T)-sd(propdorm_Xpost, na.rm=T),mean(propdorm_Ypost, na.rm=T)-sd(propdorm_Ypost, na.rm=T)),xerror,c(mean(propdorm_Xpre, na.rm=T)+sd(propdorm_Xpre, na.rm=T),mean(propdorm_Ypre, na.rm=T)+sd(propdorm_Ypre, na.rm=T),mean(propdorm_Xpost, na.rm=T)+sd(propdorm_Xpost, na.rm=T),mean(propdorm_Ypost, na.rm=T)+sd(propdorm_Ypost, na.rm=T)), code=0,angle=90, length=0.1)
points(x,c(mean(propdorm_Xpre, na.rm=T),mean(propdorm_Xpost, na.rm=T),mean(propdorm_Ypre, na.rm=T),mean(propdorm_Ypost, na.rm=T)), pch=21, bg=c("black","black","white","white"), cex=1.5)
axis(side=1,at=c(x[1:2]),labels=c("pre","post"), cex.axis=1.3)
axis(side=1,at=c(x[1:2]),labels=c("(1982-1997)","(1998-2015)"), line=1.2,tick=F, cex.axis=1.3)
##Figuring out why error bars are so wide for lifespan Ypost
inf.params<-cbind(phiV_Ypost[which(lifespan_Ypost=="Inf")],veg.rep_Ypost[which(lifespan_Ypost=="Inf")],pdormV_Ypost[which(lifespan_Ypost=="Inf")],phiR_Ypost[which(lifespan_Ypost=="Inf")],rep.veg_Ypost[which(lifespan_Ypost=="Inf")],pdormR_Ypost[which(lifespan_Ypost=="Inf")])
noninf.params<-cbind(phiV_Ypost[which(lifespan_Ypost!="Inf")],veg.rep_Ypost[which(lifespan_Ypost!="Inf")],pdormV_Ypost[which(lifespan_Ypost!="Inf")],phiR_Ypost[which(lifespan_Ypost!="Inf")],rep.veg_Ypost[which(lifespan_Ypost!="Inf")],pdormR_Ypost[which(lifespan_Ypost!="Inf")])
t.test(noninf.params[,6],inf.params[,6])
high.params<-cbind(phiV_Ypost[which(lifespan_Ypost>200)],veg.rep_Ypost[which(lifespan_Ypost>200)],pdormV_Ypost[which(lifespan_Ypost>200)],phiR_Ypost[which(lifespan_Ypost>200)],rep.veg_Ypost[which(lifespan_Ypost>200)],pdormR_Ypost[which(lifespan_Ypost>200)])
low.params<-cbind(phiV_Ypost[which(lifespan_Ypost<200)],veg.rep_Ypost[which(lifespan_Ypost<200)],pdormV_Ypost[which(lifespan_Ypost<200)],phiR_Ypost[which(lifespan_Ypost<200)],rep.veg_Ypost[which(lifespan_Ypost<200)],pdormR_Ypost[which(lifespan_Ypost<200)])
lowlow.params<-cbind(phiV_Ypost[which(lifespan_Ypost<10)],veg.rep_Ypost[which(lifespan_Ypost<10)],pdormV_Ypost[which(lifespan_Ypost<10)],phiR_Ypost[which(lifespan_Ypost<10)],rep.veg_Ypost[which(lifespan_Ypost<10)],pdormR_Ypost[which(lifespan_Ypost<10)])
t.test(lowlow.params[,5],low.params[,5])
#vital rates from high lifespan estimates (>200 years) have the following differences from low lifespan estimates:
#1) higher phis for both reproductive and veg plants
#2) lower transition from reproductive to vegetative rep.veg_Ypost
#vital rates frmo low low lifespand estimates (<10 years) have the following
#1) higher phis for both reproductive and veg plants
#2) higher transition from reproductive to vegetative rep.veg_Ypost
|
d661b4879f506a74cc74b88c3aa9a78080aa1a36
|
19706720652dd327c738e5b4ac30859fa87130e9
|
/cleaner final.R
|
aee8c455f09dd780e4d7939c9360a6c4b5841c26
|
[
"Apache-2.0"
] |
permissive
|
souravbose1991/toxic_element
|
bea9cbc03b714e5dffde20ed1331a305698a6ae1
|
abca8b6d1d88a925e411d0cc182cdd49966d08bf
|
refs/heads/master
| 2021-04-29T21:24:40.310464
| 2018-08-19T18:27:50
| 2018-08-19T18:27:50
| 121,615,503
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,761
|
r
|
cleaner final.R
|
train <- fread("train.csv", key=c("id"))
test <- fread("C:\\Users\\HP LAP\\Desktop\\Kaggle\\Data\\test\\test.csv", key=c("id"))
stopwords.en <- fread("stopwords-en.txt")
profane <- c("damn", "dyke", "fuck", "shit", "ahole", "amcik", "andskota", "anus",
"arschloch", "arse", "ash0le", "ash0les", "asholes", "ass", "Ass Monkey", "Assface",
"assh0le", "assh0lez" , "asshole", "assholes", "assholz", "assrammer", "asswipe", "ayir",
"azzhole", "b00b", "b00bs", "b17ch", "b1tch", "bassterds", "bastard",
"bastards", "bastardz", "basterds", "basterdz", "bi7ch", "Biatch", "bitch", "bitch",
"bitches", "Blow Job", "blowjob", "boffing", "boiolas", "bollock", "boobs", "breasts",
"buceta", "butt-pirate", "butthole", "buttwipe", "c0ck", "c0cks",
"c0k", "cabron", "Carpet Muncher", "cawk", "cawks", "cazzo", "chink", "chraa", "chuj",
"cipa", "clit", "Clit", "clits", "cnts", "cntz", "cock", "cock-head", "cock-sucker",
"Cock", "cockhead", "cocks", "CockSucker", "crap", "cum", "cunt",
"cunt", "cunts", "cuntz", "d4mn", "daygo", "dego", "dick", "dick", "dike", "dild0",
"dild0s", "dildo", "dildos", "dilld0", "dilld0s", "dirsa", "dominatricks", "dominatrics",
"dominatrix", "dupa", "dyke", "dziwka", "ejackulate", "ejakulate", "Ekrem", "Ekto", "enculer",
"enema", "f u c k", "f u c k e r", "faen", "fag", "fag", "fag1t", "faget",
"fagg1t", "faggit", "faggot", "fagit", "fags", "fagz", "faig", "faigs", "fanculo", "fanny",
"fart", "fatass", "fcuk", "feces", "feg", "Felcher", "ficken", "fitt", "Flikker", "flipping the bird",
"foreskin", "Fotze", "fuck", "fucker", "fuckin", "fucking", "fucks", "Fudge Packer", "fuk", "fuk",
"Fukah", "Fuken", "fuker", "Fukin", "Fukk", "Fukkah", "Fukker", "Fukkin", "futkretzn", "fux0r",
"g00k", "gay", "gayboy", "gaygirl", "gays", "gayz", "God-damned", "gook",
"guiena", "h00r", "h0ar", "h0r", "h0re", "h4x0r", "hell", "hells", "helvete", "hoar", "hoer",
"hoer", "honkey", "hoore", "hore", "Huevon", "hui", "injun", "jackoff", "jap", "japs", "jerk-off",
"jisim", "jism", "jiss", "jizm", "jizz", "kanker", "kawk", "kike", "klootzak", "knob", "knobs",
"knobz", "knulle", "kraut", "kuk", "kuksuger", "kunt", "kunts", "kuntz", "Kurac", "kurwa", "kusi",
"kyrpa", "l3i+ch", "l3itch", "lesbian", "Lesbian", "lesbo", "Lezzian",
"Lipshitz", "mamhoon", "masochist", "masokist", "massterbait", "masstrbait", "masstrbate",
"masterbaiter", "masterbat", "masterbat3", "masterbate", "masterbates", "masturbat", "masturbate",
"merd", "mibun", "mofo", "monkleigh", "Motha Fucker", "Motha Fuker", "Motha Fukkah", "Motha Fukker",
"mother-fucker", "Mother Fucker", "Mother Fukah", "Mother Fuker", "Mother Fukker", "motherfucker",
"mouliewop", "muie", "mulkku", "muschi", "Mutha Fucker", "Mutha Fukah", "Mutha Fuker",
"Mutha Fukkah", "Mutha Fukker", "n1gr", "nastt", "nazi", "nazis", "nepesaurio", "nigga", "nigger",
"nigger", "nigger;", "nigur;", "niiger;", "niigr;", "nutsack", "orafis", "orgasim;", "orgasm", "orgasum",
"oriface", "orifice", "orifiss", "orospu", "p0rn", "packi", "packie", "packy", "paki", "pakie", "paky",
"paska", "pecker", "peeenus", "peeenusss", "peenus", "peinus", "pen1s", "penas", "penis", "penis-breath",
"penus", "penuus", "perse", "Phuc", "phuck", "Phuck", "Phuker", "Phukker", "picka", "pierdol", "pillu",
"pimmel", "pimpis", "piss", "pizda", "polac", "polack", "polak", "poontsee", "poop", "porn", "pr0n",
"pr1c", "pr1ck", "pr1k", "preteen", "pula", "pule", "pusse", "pussee", "pussy", "puto", "puuke", "puuker",
"qahbeh", "queef", "queer", "queers", "queerz", "qweers", "qweerz", "qweir", "rautenberg",
"rectum", "retard", "sadist", "scank", "schaffer", "scheiss", "schlampe", "schlong", "schmuck", "screw",
"screwing", "scrotum", "semen", "sex", "sexy", "sh!t", "Sh!t", "sh!t", "sh1t", "sh1ter", "sh1ts", "sh1tter",
"sh1tz", "sharmuta", "shemale", "shi+", "shipal", "shit", "shits", "shitter", "Shitty", "Shity", "shitz",
"shiz", "Shyt", "Shyte", "Shytty", "skanck", "skank", "skankee", "skankey", "skanks", "Skanky", "skribz",
"skurwysyn", "slut", "sluts", "Slutty", "slutz", "son-of-a-bitch", "sphencter", "spic", "spierdalaj",
"splooge", "suka", "teets", "teez", "testical", "testicle", "testicle", "tit", "tits", "titt", "titt",
"turd", "twat", "va1jina", "vag1na", "vagiina", "vagina", "vaj1na", "vajina", "vittu",
"vulva", "w00se", "w0p", "wank", "wank", "wetback", "wh00r", "wh0re", "whoar", "whore",
"wichser", "wop", "xrated", "xxx", "Lipshits", "Mother Fukkah", "zabourah", "Phuk", "Poonani",
"puta", "recktum", "sharmute", "Shyty", "smut", "vullva", "yed")
stowwords.custom <- c("put", "far", "bit", "well", "article", "articles", "edit", "edits", "page", "pages",
"talk", "page", "editor", "ax", "edu", "subject", "lines", "like", "likes", "line",
"uh", "oh", "also", "get", "just", "hi", "hello", "ok", "editing", "edited",
"dont", "use", "need", "take", "wikipedia", "give", "say",
"look", "one", "make", "come", "see", "said", "now",
"wiki", "know", "talk", "read", "hey", "time", "still",
"user", "day", "want", "tell", "edit", "even", "ain't", "wow", "image", "jpg", "copyright",
"sentence", "wikiproject", "background color", "align", "px", "pixel",
"org", "com", "en", "ip", "ip address", "http", "www", "html", "htm",
"wikimedia", "https", "httpimg", "url", "urls", "utc", "uhm","username","wikipedia",
"what", "which", "who", "whom", "this", "that", "these", "those",
"was", "be", "been", "being", "have", "has", "had", "having", "do", "does", "did",
"doing", "would", "should", "could", "ought", "isn't", "aren't", "wasn't", "weren't", "hasn't",
"haven't", "hadn't", "doesn't", "don't", "didn't", "won't", "wouldn't", "shan't", "shouldn't",
"can't", "cannot", "couldn't", "mustn't", "let's", "that's", "who's", "what's", "here's",
"there's", "when's", "where's", "why's", "how's", "a", "an", "the", "and", "but", "if",
"or", "because", "as", "until", "while", "of", "at", "by", "for", "with", "about", "against",
"between", "into", "through", "during", "before", "after", "above", "below", "to", "from",
"up", "down", "in", "out", "on", "off", "over", "under", "again", "further", "then", "once",
"here", "there", "when", "where", "why", "how", "all", "any", "both", "each", "few", "more",
"most", "other", "some", "such", "no", "nor", "not", "only", "own", "same", "so", "than",
"too", "very","articl","ani")
#train <- train %>% mutate(filter="train")
test <- test %>% mutate(filter="test")
#all_comments <- train %>% bind_rows(test)
#all_comments <- train
all_comments <- test
#all_comments <- all_comments[1:100,]
nrow(all_comments)
#******************************Train***************************************
# Create some new features relative to use of punctuation, emotj, ...
all_comments.features <- all_comments %>%
select(id, comment_text) %>%
mutate(
length = str_length(comment_text),
use_cap = str_count(comment_text, "[A-Z]"),
cap_len = use_cap / length,
use_cap3plus = str_count(comment_text, "\\b[A-Z]{3,}\\b"),
cap_len3plus = use_cap3plus / length,
use_lower = str_count(comment_text, "[a-z]"),
low_len = use_lower / length,
image_cnt = str_count(comment_text, "\\b[\\w|:]*\\.(jpg|png|svg|jpeg|tiff|gif|bmp)\\b"),
link_cnt = str_count(comment_text, "((f|ht)tp(s?)://\\S+)|(http\\S+)|(xml\\S+)"),
wikilink_cnt = str_count(comment_text, "Wikipedia:(\\w|[[:punct:]])+\\b"),
graph_cnt = str_count(comment_text, "[^[:graph:]]"),
email_cnt = str_count(comment_text, "\\b[A-Z0-9._%+-]+@[A-Z0-9.-]+\\.[A-Z]{2,}\\b"),
fact_cnt = image_cnt + link_cnt + wikilink_cnt + graph_cnt + email_cnt,
nicknames_cnt = str_count(comment_text, "@\\w+"),
use_exl = str_count(comment_text, fixed("!")),
use_space = str_count(comment_text, fixed(" ")),
use_double_space = str_count(comment_text, fixed(" ")),
use_quest = str_count(comment_text, fixed("?")),
use_punt = str_count(comment_text, "[[:punct:]]"),
use_digit = str_count(comment_text, "[[:digit:]]"),
digit_len = use_digit / length,
use_break = str_count(comment_text, fixed("\n")),
use_invis = str_count(comment_text, fixed("\\p{C}")),
use_word = str_count(comment_text, "\\w+"),
word_len = use_word / length,
use_symbol = str_count(comment_text, "&|@|#|\\$|%|\\*|\\^"),
use_symbol2plus = str_count(comment_text, "[&|@|#|\\$|%|\\*|\\^]{2,}"),
use_symbol3plus = str_count(comment_text, "[&|@|#|\\$|%|\\*|\\^]{3,}"),
use_symbol = use_symbol/ length,
use_char = str_count(comment_text, "\\W*\\b\\w\\b\\W*"),
use_i = str_count(comment_text, "(\\bI\\b)|(\\bi\\b)"),
i_len = use_i / length,
char_len = use_char / length,
symbol_len = use_symbol / length,
use_emotj = str_count(comment_text, "((?::|;|=)(?:-)?(?:\\)|D|P))"),
cap_emo = use_emotj / length,
prop_emot = str_count(replace_emoticon(comment_text))/ length,
prop_names = str_count(replace_names(comment_text))/ length,
prop_emoj = str_count(replace_emoji(comment_text))/ length,
prop_kern = str_count(replace_kern(comment_text))/ length,
prop_abbv = str_count(replace_abbreviation(comment_text))/ length,
prop_contra = str_count(replace_contraction(comment_text))/ length,
prop_slang = str_count(replace_internet_slang(comment_text))/ length,
word_cnt = str_count(comment_text, "\\w+"),
word_avglen = length / word_cnt,
shit_prop = str_count(replace_word_elongation(comment_text))/length,
use_nonascii = str_count(comment_text, "[^[:ascii:]]"),
avg_sent = ((sentiment_by(get_sentences(comment_text)))[[4]]),
uniqueword = lengths(regmatches(uniqueWords(comment_text), gregexpr("\\w+", uniqueWords(comment_text)))),
prop_unique = uniqueword/lengths(regmatches(comment_text, gregexpr("\\w+", comment_text))),
n_fword = str_count(comment_text, paste(profane,collapse = '|')),
prop_fword = n_fword/word_cnt
) %>%
select(-id) %T>%
glimpse()
#count stopwords
all_comments.features$propstopwords_cnt <- str_count(removeWords(all_comments.features$comment_text, stopwords("en")))
all_comments.features$propstopwords <- str_count(removeWords(all_comments.features$comment_text, stopwords("en")))/all_comments.features$length
#Package conversions
all_comments.features$comment_text <- iconv(all_comments.features$comment_text, to='ASCII//TRANSLIT')
all_comments.features$comment_text <- replace_emoticon(all_comments.features$comment_text)
all_comments.features$comment_text <- replace_emoji(all_comments.features$comment_text)
all_comments.features$comment_text <- replace_kern(all_comments.features$comment_text)
all_comments.features$comment_text <- replace_abbreviation(all_comments.features$comment_text)
all_comments.features$comment_text <- replace_contraction(all_comments.features$comment_text)
all_comments.features$comment_text <- replace_names(all_comments.features$comment_text)
all_comments.features$comment_text <- replace_word_elongation(all_comments.features$comment_text)
all_comments.features$comment_text <- replace_internet_slang(all_comments.features$comment_text)
#POS Features
posdat_count <- counts(pos(all_comments.features$comment_text,progress.bar = TRUE,
parallel = TRUE, cores = detectCores()))
if (length(posdat_count) == 2){
posdat_prop <- data.frame()
posdat_prop[1,1] <- posdat_count[1,1]
colnames(posdat_prop)[1] <- "pos_prp_wrd.cnt"
colnames(posdat_prop)[2] <- paste0("pos_prp_", colnames(posdat_count)[2])
} else {
posdat_prop <- proportions(pos(all_comments.features$clean_comment_text,progress.bar = FALSE,
parallel = TRUE, cores = detectCores()))
names(posdat_prop) = paste0("pos_prp_", names(posdat_prop))
}
names(posdat_count) = paste0("pos_cnt_", names(posdat_count))
all_comments.features <- cbind(all_comments.features,posdat_count,posdat_prop)
gc()
head(all_comments.features)
nrow(all_comments.features)
#Run it for both train and test and then combine
all_comments.features <- all_comments.features %>% mutate(filter="train")
all_comments.features_test <- all_comments.features_test %>% mutate(filter="test")
all_comments.features <- all_comments.features %>% bind_rows(all_comments.features_test)
# Remove all special chars, clean text and trasform words
all_comments.clean <- all_comments.features %$%
str_to_lower(comment_text) %>%
# clear link
str_replace_all("(f|ht)tp(s?)://\\S+", " ") %>%
str_replace_all("http\\S+", "") %>%
str_replace_all("xml\\S+", "") %>%
str_replace_all("\\b\\w*:*\\w*\\.(jpg|png|svg|jpeg|tiff|gif|bmp)\\b", "") %>%
str_replace_all("((f|ht)tp(s?)://\\S+)|(http\\S+)|(xml\\S+)", "") %>%
#str_replace_all("Wikipedia:(\\w|[[:punct:]])+\\b", "") %>%
str_replace_all("\\b[A-Z0-9._%+-]+@[A-Z0-9.-]+\\.[A-Z]{2,}\\b", "") %>%
str_replace_all("\n", "") %>%
#str_replace_all("\\p{C}", "") %>%
# multiple whitspace to one
str_replace_all("\\s{2}", " ") %>%
# transform short forms
str_replace_all("what's", "what is ") %>%
str_replace_all("\\'s", " is ") %>%
str_replace_all("\\'ve", " have ") %>%
str_replace_all("can't", "cannot ") %>%
str_replace_all("n't", " not ") %>%
str_replace_all("i'm", "i am ") %>%
str_replace_all("\\'re", " are ") %>%
str_replace_all("\\'d", " would ") %>%
str_replace_all("\\'ll", " will ") %>%
str_replace_all("\\'scuse", " excuse ") %>%
str_replace_all("pleas", " please ") %>%
str_replace_all("sourc", " source ") %>%
str_replace_all("peopl", " people ") %>%
str_replace_all("remov", " remove ") %>%
# multiple whitspace to one
str_replace_all("\\s{2}", " ") %>%
# transform shittext
str_replace_all("(a|e)w+\\b", "") %>%
str_replace_all("(y)a+\\b", "") %>%
str_replace_all("(w)w+\\b", "") %>%
str_replace_all("((a+)|(h+))(a+)((h+)?)\\b", "") %>%
str_replace_all("((lol)(o?))+\\b", "") %>%
str_replace_all("n ig ger", " nigger ") %>%
str_replace_all("s hit", " shit ") %>%
str_replace_all("g ay", " gay ") %>%
str_replace_all("f ag got", " faggot ") %>%
str_replace_all("c ock", " cock ") %>%
str_replace_all("cu nt", " cunt ") %>%
str_replace_all("idi ot", " idiot ") %>%
str_replace_all("f u c k", " fuck ") %>%
str_replace_all("fu ck", " fuck ") %>%
str_replace_all("f u ck", " fuck ") %>%
str_replace_all("c u n t", " cunt ") %>%
str_replace_all("s u c k", " suck ") %>%
str_replace_all("c o c k", " cock ") %>%
str_replace_all("g a y", " gay ") %>%
str_replace_all("ga y", " gay ") %>%
str_replace_all("i d i o t", " idiot ") %>%
str_replace_all("cocksu cking", "cock sucking") %>%
str_replace_all("du mbfu ck", "dumbfuck") %>%
str_replace_all("cu nt", "cunt") %>%
str_replace_all("(?<=\\b(fu|su|di|co|li))\\s(?=(ck)\\b)", "") %>%
str_replace_all("(?<=\\w(ck))\\s(?=(ing)\\b)", "") %>%
str_replace_all("(?<=\\b\\w)\\s(?=\\w\\b)", "") %>%
str_replace_all("((lol)(o?))+", "") %>%
str_replace_all("(?<=\\b(fu|su|di|co|li))\\s(?=(ck)\\b)", "") %>%
str_replace_all("(?<=\\w(uc))\\s(?=(ing)\\b)", "") %>%
str_replace_all("(?<=\\b(fu|su|di|co|li))\\s(?=(ck)\\w)", "") %>%
str_replace_all("(?<=\\b(fu|su|di|co|li))\\s(?=(k)\\w)", "c") %>%
str_replace_all(fixed("sh*t"), "shit") %>%
str_replace_all(fixed("$h*t"), "shit") %>%
str_replace_all(fixed("$#*!"), "shit") %>%
str_replace_all(fixed("$h*!"), "shit") %>%
str_replace_all(fixed("sh!t"), "shit") %>%
str_replace_all(fixed("@ss"), "ass") %>%
str_replace_all(fixed("@$$"), "ass") %>%
str_replace_all(fixed("a$$"), "ass") %>%
str_replace_all(fixed("f*ck"), "fuck") %>%
str_replace_all(fixed("f*uck"), "fuck") %>%
str_replace_all(fixed("f***"), "fuck") %>%
str_replace_all(fixed("f**k"), "fuck") %>%
str_replace_all(fixed("c0ck"), "cock") %>%
str_replace_all(fixed("a55"), "ass") %>%
str_replace_all(fixed("$h1t"), "shit") %>%
str_replace_all(fixed("b!tch"), "bitch") %>%
str_replace_all(fixed("bi+ch"), "bitch") %>%
str_replace_all(fixed("l3itch"), "bitch") %>%
str_replace_all(fixed("p*ssy"), "pussy") %>%
str_replace_all(fixed("d*ck"), "dick") %>%
str_replace_all(fixed("n*gga"), "nigga") %>%
str_replace_all(fixed("f*cking"), "fucking") %>%
str_replace_all(fixed("shhiiitttt"), "shit") %>%
str_replace_all(fixed("c**t"), "cunt") %>%
str_replace_all(fixed("a**hole"), "asshole") %>%
str_replace_all(fixed("@$$hole"), "asshole") %>%
str_replace_all(fixed("fu"), "fuck you") %>%
str_replace_all(fixed("wtf"), "what the fuck") %>%
str_replace_all(fixed("ymf"), "motherfuck") %>%
str_replace_all(fixed("f*@king"), "fucking") %>%
str_replace_all(fixed("$#!^"), "shit") %>%
str_replace_all(fixed("m0+#3rf~ck3r"), "motherfuck") %>%
str_replace_all(fixed("pi55"), "piss") %>%
str_replace_all(fixed("c~nt"), "cunt") %>%
str_replace_all(fixed("c0ck$~ck3r"), "cocksucker") %>%
# clean nicknames
str_replace_all("@\\w+", " ") %>%
# clean digit
str_replace_all("[[:digit:]]", " ") %>%
# remove linebreaks
str_replace_all("\n", " ") %>%
# remove graphics
#str_replace_all("[^[:graph:]]", " ") %>%
str_replace_all("'s\\b", " ") %>%
# remove punctuation (if remain...)
str_replace_all("[[:punct:]]", " ") %>%
str_replace_all("[^[:alnum:]]", " ") %>%
# remove single char
str_replace_all("\\W*\\b\\w\\b\\W*", " ") %>%
# remove words with len < 2
str_replace_all("\\b\\w{1,2}\\b", " ") %>%
# multiple whitspace to one
str_replace_all("\\s{2}", " ") %>%
str_replace_all("\\s+", " ") %>%
itoken(tokenizer = tokenize_word_stems)
max_words = 200000
glove = fread("glove840b300dtxt/glove.840B.300d.txt", data.table = FALSE) %>%
rename(word=V1) %>%
mutate(word=gsub("[[:punct:]]"," ", rm_white(word) ))
word_embed = all_comments.clean %>%
left_join(glove)
J = ncol(word_embed)
ndim = J-2
word_embed = word_embed [1:(max_words-1),3:J] %>%
mutate_all(as.numeric) %>%
mutate_all(round,6) %>%
#fill na with 0
mutate_all(funs(replace(., is.na(.), 0)))
colnames(word_embed) = paste0("V",1:ndim)
word_embed = rbind(rep(0, ndim), word_embed) %>%
as.matrix()
word_embed = list(array(word_embed , c(max_words, ndim)))
|
a5820d8a9bf966a5f97255e07733d4ccfc6b0d6d
|
7cd1f7f9555954476d9538c070e5a43ef93ce3d2
|
/man/parse_keyvals.Rd
|
d24551531900347e221d2ff7830714ef4b71fe67
|
[] |
no_license
|
vsbuffalo/msr
|
f015447cc8815ea6ae9a24f5a451537edfa0b087
|
18fb0020ceb8c6e45b82dfd036dda7e03f64a163
|
refs/heads/master
| 2021-01-11T14:35:19.611932
| 2018-05-25T19:41:19
| 2018-05-25T19:41:19
| 80,166,891
| 20
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 430
|
rd
|
parse_keyvals.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parse_ms.r
\name{parse_keyvals}
\alias{parse_keyvals}
\title{Parse MS's key/value pairs, e.g. segsites and positions
returning a list of key/vals (where vals can be list too)}
\usage{
parse_keyvals(x)
}
\description{
Parse MS's key/value pairs, e.g. segsites and positions
returning a list of key/vals (where vals can be list too)
}
\keyword{internal}
|
59621a895e08d583a6c05de5f2b8c7b3b64f45b4
|
f44335c0bb9597c994c06611ef34b4c4fe9637c1
|
/R/listas.R
|
20397d379c01d21a25d0028fc57a6a93b87136af
|
[] |
no_license
|
cran/INQC
|
7467d5c33cf59d1602fc6c82549f49613743d297
|
ddce985594be74cdf93b135c0febd4ef7cfb3c1e
|
refs/heads/master
| 2023-03-17T14:39:12.484132
| 2021-05-24T13:00:02
| 2021-05-24T13:00:02
| 334,129,195
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,407
|
r
|
listas.R
|
listas<-function(country='all',name='allstations.txt'){ #NECESITO parametrizar listas. Usar esa parametrizacion par subset de downloads too.
#' Creates listings for stations ('non-blended' case) linking STAID and SOUID
#' @description This function takes all the elements and rbinds them into a single list to process
#' @param country country for which the list is created. If 'all', no country filter.
#' @param name output file name, do not touch, default is always good.
#' @return data frame and the list file containing all stations for all elements, linking STAID and SOUID
#' and metadata
#' @examples
#' #Set a temporal working directory:
#' wd <- tempdir(); wd0 <- setwd(wd)
#' #Extract the non-blended ECA&D station files from the example data folder
#' #Only TX (maximum air temperature) and CC (cloud cover) variables are used in the example
#' path2txlist<-system.file("extdata", "ECA_blend_source_tx.txt", package = "INQC")
#' txlist<-readr::read_lines_raw(path2txlist)
#' readr::write_lines(txlist,'ECA_blend_source_tx.txt')
#' path2cclist<-system.file("extdata", "ECA_blend_source_cc.txt", package = "INQC")
#' cclist<-readr::read_lines_raw(path2cclist)
#' readr::write_lines(cclist,'ECA_blend_source_cc.txt')
#' options("homefolder"='./')
#' liston.nb<-listas(country='all',name='allstations.txt')
#' #The created list file can be found in the directory:
#' print(wd)
#' #Return to user's working directory:
#' setwd(wd0)
#' @export
#Get value of 'Global variable' 'homefolder'
homefolder <- getOption("homefolder")
variables<- c('TX','TN','TG','RR','HU','PP','SS','FG', 'FX', 'DD','SD', 'CC')
ene<-length(variables)
missing= -9999
ereseunnyu<-0
for(i in 1:ene){
list<-paste(homefolder,'ECA_blend_source_',tolower(variables[i]),'.txt',sep='')
if(file.exists(list)){
ereseunnyu<-ereseunnyu+1
x<-utils::read.csv(list,header=FALSE,stringsAsFactors = FALSE,flush=TRUE,strip.white = TRUE)
names(x)<-c('STAID','SOUID','SOUNAME','CN','LAT','LON','HGTH','ELEI','START','STOP','PARID','PARNAME')
if(ereseunnyu==1){todas<-x}else{todas<-rbind(todas,x)}
}
}
if(country!='all'){target<-which(todas$CN == country);todas<-todas[target,]}
utils::write.csv(todas,paste(homefolder,name,sep='')) ## as consequence of the previous action
return(todas)
}
|
dfb109a560663c8d93fbb9c33b8952b1378225af
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/sppmix/man/GetBDCompfit.Rd
|
152097f9e40cfb5b44abc45f41fba8d431dc3149
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,275
|
rd
|
GetBDCompfit.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/postgen_ops.R
\name{GetBDCompfit}
\alias{GetBDCompfit}
\title{Retrieve parts of a BDMCMC fit}
\usage{
GetBDCompfit(BDfit, num_comp, burnin = floor(BDfit$L/10))
}
\arguments{
\item{BDfit}{Object of class \code{damcmc_res}.}
\item{num_comp}{Number of components requested. Only
the posterior realizations that have this many components will be returned. The function
fails if the BDMCMC chain never visited this number of components.}
\item{burnin}{Number of initial realizations to discard. By default, it is 1/10 of the total number of iterations.}
}
\value{
A list containing the following:
\item{BDgens}{Realizations corresponging to this many mixture components. This is a \code{damcmc_res} object (same as the result of a \code{\link{est_mix_damcmc}} call). All realizations for the requested number of components are returned, that is, burnin is not applied to this object.}
\item{BDsurf}{For the requested \code{num_comp}, this is the Poisson intensity surface based on the corresponding posterior means (label switching might be present).}
\item{BDnormmix}{For the requested \code{num_comp}, this is a \code{\link{normmix}} object containing the corresponding ps, mus and sigmas (label switching might be present).}
}
\description{
The function can be used to obtain the realizations and
the corresponding surface of posterior means, for a specific
number of components. Use \code{\link{GetPMEst}} if you want just the surface.
For examples see
\url{http://faculty.missouri.edu/~micheasa/sppmix/sppmix_all_examples.html
#GetBDCompfit}
}
\examples{
\donttest{
fit <- est_mix_bdmcmc(pp = spatstat::redwood, m = 7)
GetBDTable(fit)
#retrieve all BDMCMC realizations corresponding to a mixture with 5 components
BDfit5comp=GetBDCompfit(fit,5)
plot(BDfit5comp$BDsurf,main="Mixture intensity surface with 5 components")
#plot with the correct window
plot(BDfit5comp$BDnormmix,xlim =BDfit5comp$BDsurf$window$xrange,ylim =
BDfit5comp$BDsurf$window$yrange )
plot(BDfit5comp$BDgens)}
}
\seealso{
\code{\link{est_mix_bdmcmc}},
\code{\link{GetBDTable}},
\code{\link{plot.damcmc_res}},
\code{\link{plot.normmix}}
}
\author{
Sakis Micheas
}
|
075a2ad8ae7a55d5a7ee9969a0d77f07609120b9
|
06d9afe4e9666407ff607b142649d4c6e944d674
|
/man/ezDesign.Rd
|
bb0a61f165e066500fbb5c5dc7723429155bbefd
|
[] |
no_license
|
cran/ez
|
fe4ae993c2ed1042d6f84c64e368970c502a5bff
|
1d7a35d30f31b1671e7f6548b15864ddfe61c5ef
|
refs/heads/master
| 2021-07-10T23:03:03.489960
| 2016-11-02T18:17:31
| 2016-11-02T18:17:31
| 17,695,925
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,248
|
rd
|
ezDesign.Rd
|
\name{ezDesign}
\alias{ezDesign}
\title{Plot the balance of data in an experimental design}
\description{
This function provides easy visualization of the balance of data in a data set given a specified experimental design. This function is useful for identifying missing data and other issues (see examples).
}
\usage{
ezDesign(
data
, x
, y
, row = NULL
, col = NULL
, cell_border_size = 10
)
}
\arguments{
\item{data}{
Data frame containing the data to be visualized.
}
\item{x}{
Name of the variable to plot on the x-axis.
}
\item{y}{
Name of the variable to plot on the y-axis.
}
\item{row}{
Name of a variable by which to split the data into facet rows.
}
\item{col}{
Name of a variable by which to split the data into facet columns.
}
\item{cell_border_size}{
Numeric value specifying the size of the border seperating cells (0 specifies no border)
}
}
\details{
The function works by counting the number of rows in \code{data} in each cell of the design specified by the factorial combination of \code{x}, \code{y}, \code{row}, \code{col} variables.
}
\value{
A printable/modifiable ggplot2 object.
}
\author{
Michael A. Lawrence \email{mike.lwrnc@gmail.com}\cr
Visit the \code{ez} development site at \url{http://github.com/mike-lawrence/ez}\cr
for the bug/issue tracker and the link to the mailing list.
}
\seealso{
\code{\link{ezPrecis}}
}
\examples{
#Read in the ANT2 data (see ?ANT2).
data(ANT2)
head(ANT2)
ezPrecis(ANT2)
#toss NA trials
ANT2 = ANT2[!is.na(ANT2$rt),]
ezDesign(
data = ANT2
, x = trial
, y = subnum
, row = block
, col = group
)
#subnum #7 is missing data from the last half of the experiment
\dontrun{
ezDesign(
data = ANT2
, x = flank
, y = subnum
, row = cue
)
#again, subnum#7 has half the data as the rest
#now look at error rates, which affect the number of RTs we can use
ezDesign(
data = ANT2[ANT2$error==0,]
, x = flank
, y = subnum
, row = cue
)
#again, subnum#7 stands out because they have half the data as the rest
#also, subnum#14 has no data in any incongruent cells, suggesting that
##they made all errors in this condition
#finally, subnum#12 has virtually no data, suggesting that they mistakenly
##swapped responses
}
}
|
257cddc3647acb198e381e68d9529afca110fa07
|
e2f3fee3cb8f1abdee08724f0fe8a89b5756cfbe
|
/COSTdata/man/FRS_ob_1999.Rd
|
b8723bdcffadb91d85ed0e31506de4222be3d5d9
|
[] |
no_license
|
BackupTheBerlios/cost-project
|
1a88c928f4d99db583a95324b31d6a02d9bd20c9
|
4ab39d16c48f031ca46512545895cb17e5586139
|
refs/heads/master
| 2021-01-21T12:39:53.387734
| 2012-03-26T14:58:36
| 2012-03-26T14:58:36
| 40,071,425
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,125
|
rd
|
FRS_ob_1999.Rd
|
\name{FRS_ob_1999}
%\alias{FRS_ob_trips}
\alias{FRS_ob_1999}
\docType{data}
\title{FRS observer data}
\description{
FRS observer data in the COST data exchange format.
\cr Consists of 53 demersal sampling trips from 1999. Discards length distributions are sampled by haul, the landed length distribution is sampled by trip, and the age given length distribution of a sub-sample of the discarded fraction is pooled by trip.
}
\usage{
data(FRS_ob_1999)
}
\format{
Formal class 'csData' [package "COSTcore"] objects with 6 slots
\cr@desc: description
\cr@tr: data.frame of 16 variables
\cr@hh: data.frame of 29 variables
\cr@sl: data.frame of 17 variables
\cr@hl: data.frame of 16 variables
\cr@ca: data.frame of 31 variables
\cr see csData for details of the variables
}
\details{
The FRS observer sampling protocol is as follows: \cr
From each haul during the trip the discarded fraction of the catch (consisting of unsorted fish of various species) is
sub-sampled by the observer; two representative baskets of the discarded fish being obtained. The ratio of the discarded weight to sub-sampled weight is
estimated by the observer and expressed in terms of "baskests".
The length frequencies are taken from all cod, haddock, whiting and saithe in the sub-sample.
The otoliths are collected from cod, haddock, whiting and saithe of each length class in the sub-sampled fraction until (usually) 5 otoliths are obtained for each length class for each species.
Some length classes are not represented and some have fewer than 5 individuals.
At the end of the trip the landed fraction of the catch, which will have been sorted into commercial size classes, is sub-sampled and the length frequencies recorded for cod, haddock, whiting and saithe. No otoliths are taken of the landed fraction.
This observer data therefore consists of three components:
\cr1. The length distribution for a sub-sample of the discarded fraction of the catch by haul.
This is contained in the hl table where \kbd{\$catchCat} = "DIS", trips are identifiable
by trip code \kbd{\$trpCode}, and the individual hauls by station number \kbd{\$staNum}
\cr2. The length distribution of a sub-sample of the landed fraction pooled by trip.
This is contained in the hl table where \kbd{\$catchCat} = "LAN", and the station number is \kbd{\$staNum = 999}.
\cr3. The age given length distribution of a sub-sampled fraction of the discarded catch pooled by trip.
This is contained in the ca table where \kbd{\$catchCat} = "DIS", and the station number is \kbd{\$staNum} = 999.
Note that to obtain a catch weight \kbd{\$wt} and sub-sampled weight \kbd{\$subSampWt} by species in the sl table these values have been obtained retrospectively from the species' length frequency distribution using a standard weight length relationship, and the raising factor of number of discarded "baskets" to number sub-sampled baskets.
}
\section{Warning }{This is a test data set only and should not to be used or cited without prior permission.}
\source{
FRS Marine Laboratory, Aberdeen, Scotland.
}
\examples{
data(FRS_ob_1999)
}
\keyword{datasets}
|
b43b2f4dcf308e25c52ed4a407ee709ad3aed10d
|
0e6d3ed19aa2ef50bf4e4bd164cb3383c106a84f
|
/GWAS/JIA/individual_level/jia_analysis.R
|
f149c4fd13a82aa4724fa2e0239e4807753193cf
|
[
"MIT"
] |
permissive
|
ollyburren/basis_paper
|
4cdefd86a8811efb0bbeeae6975b804f6f7639a6
|
7393390c1b1f5b673049202d293994704cebeafb
|
refs/heads/master
| 2020-03-29T22:06:03.889635
| 2019-10-23T17:06:15
| 2019-10-23T17:06:15
| 150,402,845
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,249
|
r
|
jia_analysis.R
|
library(cowplot)
## analyse jia projections
DATA.DIR <- '/home/ob219/share/as_basis/GWAS/individual_data/individual_proj'
all.files <- list.files(path=DATA.DIR,pattern="*.RDS",full.names=TRUE)
BASIS_FILE <- '/home/ob219/share/as_basis/GWAS/support/ss_basis_gwas.RDS'
VARIANCE_FILE <- '/home/ob219/share/as_basis/GWAS/support/ss_av_june.RDS'
res <- lapply(all.files,function(f){
trait <- basename(f) %>% gsub("\\_projection\\.RDS","",.)
dat <- readRDS(f)
t.DT <- data.table(ind=rownames(dat),dat)
melt.DT <- melt(t.DT,id.vars='ind')
melt.DT[,trait:=trait]
}) %>% rbindlist
res <- res[!trait %in% c('jiaUnA','jiamissing','raj_cd14','raj_cd4'),]
t.test(res[variable=='PC3' & trait %in% c('jiaERA','jiasys')]$value,res[variable=='PC3' & !trait %in% c('jiaERA','jiasys')]$value)
t.test(res[variable=='PC3' & trait=='jiaERA']$value,res[variable=='PC3' & !trait %in% c('jiaERA','jiasys')]$value)
t.test(res[variable=='PC3' & trait=='jiasys']$value,res[variable=='PC3' & !trait %in% c('jiaERA','jiasys')]$value)
## look at all of them
traits <- res$trait %>% unique
all.compare <- lapply(paste('PC',1:10,sep=''),function(PC){
message(PC)
lapply(traits,function(tra){
lapply(traits,function(tra2){
tt <- t.test(res[variable==PC & trait==tra]$value,res[variable==PC & trait==tra2]$value)
data.table(pc=PC,trait1=tra,trait2=tra2,p=tt$p.value,t.stat=tt$statistic)
}) %>% rbindlist
}) %>% rbindlist
}) %>% rbindlist
all.compare <- all.compare[trait1 != trait2,]
## get rid of reciprocal comparisons
all.compare <- all.compare[which(!duplicated(abs(t.stat))),]
all.compare[abs(t)]
all.compare[,fdr:=p.adjust(p,method="fdr")]
all.compare[fdr<0.05,]
all.compare.rest <- lapply(paste('PC',1:10,sep=''),function(PC){
message(PC)
lapply(traits,function(tra){
tt <- t.test(res[variable==PC & trait==tra]$value,res[variable==PC & trait!=tra]$value)
data.table(pc=PC,trait1=tra,p=tt$p.value,t.stat=tt$statistic)
}) %>% rbindlist
}) %>% rbindlist
all.compare.rest[,fdr:=p.adjust(p,method="bonferroni"),by=pc]
all.compare.rest[fdr<0.05,]
## get mean and variance across trait and pc
summ.DT <- res[,list(mean.load=mean(value),var.load=var(value)),by=c('trait','variable')]
pc.emp <- readRDS(BASIS_FILE)
basis.DT <- data.table(trait=rownames(pc.emp$x),pc.emp$x)
tmp <- basis.DT[trait=='control',] %>% t
ctrl.DT <- data.table(variable=rownames(tmp)[-1],control.loading=as.numeric(tmp[-1,1]))
#var.DT <- readRDS(VARIANCE_FILE)
summ.DT[,variable:=factor(variable,levels=paste0('PC',1:11))]
summ.DT <- merge(summ.DT,ctrl.DT,by='variable')
#summ.DT <- merge(summ.DT,var.DT,by.x='variable',by.y='pc')
summ.DT[,Z:=(mean.load-control.loading)/sqrt()]
summ.DT[,Z:=(mean.load-control.loading)/sqrt(mfactor)]
summ.DT[,p.value:=pnorm(abs(Z),lower.tail=FALSE) * 2]
#bb.DT.m[,p.adj:=p.adjust(p.value),by='variable']
summ.DT[,p.adj:=p.adjust(p.value),by='variable']
summ.DT[,variable:=factor(variable,levels=paste0('PC',1:11))]
summ.DT[,short.trait:=substr(trait,1,15),]
summ.DT[,Subtype:=gsub("^jia","",trait)]
pd <- position_dodge(0.1)
#pa <- ggplot(summ.DT[!trait %in% c('jiaUnA','jiamissing','raj_cd14','raj_cd4'),],aes(x=variable,y=mean.load-control.loading,group=Subtype,col=Subtype)) + geom_point(position=pd) +
#geom_line(position=pd) + guides(size=FALSE) + xlab("Principal Component") + ylab(expression(Delta~"Control Loading")) +
pa <- ggplot(summ.DT[!trait %in% c('jiaUnA','jiamissing','raj_cd14','raj_cd4'),],aes(x=variable,y=mean.load-control.loading,group=Subtype,col=Subtype)) + geom_point(size=2,position=pd) +
geom_line(position=pd) + ylab(expression(Delta*"Control Loading")) + xlab("Principal Component") + geom_hline(yintercept=0,color="black") +
background_grid(major = "xy", minor = "none") + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust=1))
save_plot(pa,file="~/tmp/ind_jia_line.pdf",base_aspect=1.3)
jia.sum <- readRDS("~/share/as_basis/GWAS/tmp/jia_plot.RDS")
jia.sum[,Subtype:=gsub("jia\\_","",trait)]
pb <- ggplot(jia.sum[!trait %in% c('jiaUnA','jiamissing'),],aes(x=variable,y=value-control.loading,group=Subtype,col=Subtype)) + geom_point(position=pd) +
geom_line(position=pd) + guides(size=FALSE) + xlab("Principal Component") + ylab(expression(Delta~"Control Loading"))
## comparison between summary and individual data
plot_grid(pa + ggtitle("Genotypes") + geom_hline(yintercept=0,color='black'),pb + ggtitle("Summary statistics") + geom_hline(yintercept=0,color='black'), nrow=2)
dev.print(pdf,"~/tmp/gt_vs_summ.pdf")
## next do t.test of sys and era and the rest to see which PC's are important to discern between
g1 <- c('jiaERA','jiasys')
diff <- lapply(paste('PC',1:11,sep=''),function(y){
t.tmp <- t.test(res[variable==y & trait %in% g1,]$value,res[variable==y & !trait %in% g1,]$value)
data.table(pc=y,p.value=t.tmp$p.value,tstat=t.tmp$statistic)
}) %>% rbindlist()
diff[,p.adj:=p.adjust(p.value)]
## other class is
g1 <- c('jiaPsA','jiaEO')
g2 <- c('jiaRFneg','jiaRFpos','jiaPO')
diff2 <- lapply(paste('PC',1:11,sep=''),function(y){
t.tmp <- t.test(res[variable==y & trait %in% g1,]$value,res[variable==y & trait %in% g2,]$value)
data.table(pc=y,p.value=t.tmp$p.value,tstat=t.tmp$statistic)
}) %>% rbindlist()
diff2[,p.adj:=p.adjust(p.value)]
|
9b6d331ec04ffa4c2f18ec4999de1c23bd1c5120
|
bdbb30b1fa9d20d16b37dfe43c8796e43d919934
|
/Application/ui.R
|
644b4c3e57605864ff63835f64f7ac45299fec31
|
[] |
no_license
|
pogh/Course-2015.09-Developing-Data-Products
|
8f4abc75c4b55ef1523bd0e14e484deb1171c86c
|
27cc269c0286896e1abe8a3955f3d30973ef8524
|
refs/heads/master
| 2021-05-30T04:25:58.459676
| 2015-09-14T09:33:54
| 2015-09-14T09:33:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 838
|
r
|
ui.R
|
require(markdown)
shinyUI(fluidPage(
navbarPage("Exploratory Linear Modelling on the ‘mtcars’ Dataset"),
fluidRow(
column(4,
includeMarkdown("documentation1.md"),
hr(),
uiOutput("colNamesDropdown1"),
uiOutput("colNamesDropdown2"),
sliderInput("size", "Display Size:", min = 1, max = 10, value = 5),
hr(),
includeMarkdown("documentation2.md")
),
column(7,
plotOutput("plot"),
hr(),
actionButton("btnSubmit", "Show Linear Model Detail"),
br(),br(),
textOutput("modelSummaryText"),
br(),
verbatimTextOutput("modelSummaryPrint"),
plotOutput("modelPlot")
)
)
))
|
1da759a17a1602f2b5465ce3b2c0a9df7d20e93d
|
3f63ed18371a3237d501badeef43e2fe6a41cd45
|
/vectorFieldsInR.R
|
66db7bd2bbcdb7966fdad915ec313c999c539a3a
|
[] |
no_license
|
johnwithrowjr/R_Libraries
|
9afff950b412dc14aace92d39cf9819bf96e52e4
|
69cdd55ef8f2d2cb1f5e5a1e806d4379fac98a8d
|
refs/heads/master
| 2021-01-18T23:50:35.750753
| 2016-06-10T21:32:31
| 2016-06-10T21:32:31
| 55,651,823
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 978
|
r
|
vectorFieldsInR.R
|
plotVectorDivergence <- function(z)
{
vectorField(atanc(z),mag(z),rep(1:dim(z)[2],dim(z)[1]),rep(1:dim(z)[1],each=dim(x)[2]))
}
createVectorDivergence <- function(rstX,rstY,betamax=-1)
{
matX <- as.matrix(rstX@data)
dim(matX) <- rstX@grid@cells.dim
n <- dim(matX)
matY <- as.matrix(rstY@data)
dim(matY) <- rstY@grid@cells.dim
if (dim(matX) != dim(matY)) { stop("The two rasters must have identical dimensions") }
matXx <- as.matrix(rep(0),n[1]*n[2]))
dim(matXx) <- n
matXy <- as.matrix(rep(0),n[1]*n[2]))
dim(matXy) <- n
z <- as.matrix(rep(complex(real=0,imaginary=0),n[1]*n[2]))
dim(z) <- n
for (i in 2:(n[1]-1))
{
for (j in 2:(n[2]-1))
{
matXx[i,j] <- matX[i,j+1] - matX[i,j-1]
matXy[i,j] <- matX[i+1,j] - matX[i-1,j]
for (p in 1:betamax)
{
for (q in 1:betamax)
{
z[i,j] <- z[i,j] + complex(real=matY[i,j]/(matXx[i,j]-p/q*matXy[i,j]),imaginary=matY[i,j]/(matXy[i,j]-q/p*matXx[i,j]))
}
}
}
}
plotVectorDivergence(z)
z
}
|
656ed8b42aa10270230095d95b02dc45794fcc29
|
bdc863461d5b665914b5cc369c4d445283917f29
|
/Tests/Candidates/10^3/Data 10^3 Test/PLOT FPOP 10^3 MultiTest Candidates.r
|
71ee89d907cb41e93d689eff29f44535bdd04a74
|
[] |
no_license
|
lpishchagina/FPOPdim2
|
706b5942f7a07a4de834c0f7964444c18f600fa1
|
353d96dc1450e4bac90c1b270a3f0312d01ee626
|
refs/heads/main
| 2023-04-01T17:52:52.647384
| 2021-04-13T21:16:16
| 2021-04-13T21:16:16
| 346,318,274
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,465
|
r
|
PLOT FPOP 10^3 MultiTest Candidates.r
|
library(ggplot2)
library(ggpubr)
################################################################################
fname <- "PLOT FPOP 10^3 MultiTest Candidates.png"
f1name <- "PLOT FPOP1 10^3 MultiTest Candidates.png"
f2name <- "PLOT FPOP2 10^3 MultiTest Candidates.png"
f3name <- "PLOT FPOP3 10^3 MultiTest Candidates.png"
################################################################################
dim <- c(2:10)
s = "10^3"
size <-1000
Time <-c(1:size)
dimension <- c("dim 2","dim 3","dim 4","dim 5","dim 6","dim 7","dim 8","dim 9","dim 10")
#################################
# FPOP1 #
#################################
F1C <- matrix(nrow = size, ncol = length(dim)+1)
F1C[,1] <- Time
for (i in 1:length(dim)){
ffname = paste("dim",dim[i],"FPOP1",s,"MultiTest Candidates.txt")
f_data <- readLines(con = ffname, n = -1)
f_data <- strsplit(f_data,split = ' ')
f_data <- sapply(f_data, FUN = function(x) {as.double(unlist(x))})
F1C[, i+1] <-f_data
}
#################################
# FPOP2 #
#################################
F2C <- matrix(nrow = size, ncol = length(dim)+1)
F2C[,1] <- Time
for (i in 1:length(dim)){
ffname = paste("dim",dim[i],"FPOP2",s,"MultiTest Candidates.txt")
f_data <- readLines(con = ffname, n = -1)
f_data <- strsplit(f_data,split = ' ')
f_data <- sapply(f_data, FUN = function(x) {as.double(unlist(x))})
F2C[, i+1] <-f_data
}
#################################
# FPOP3 #
#################################
F3C <- matrix(nrow = size, ncol = length(dim)+1)
F3C[,1] <- Time
for (i in 1:length(dim)){
ffname = paste("dim",dim[i],"FPOP3",s,"MultiTest Candidates.txt")
f_data <- readLines(con = ffname, n = -1)
f_data <- strsplit(f_data,split = ' ')
f_data <- sapply(f_data, FUN = function(x) {as.double(unlist(x))})
F3C[, i+1] <-f_data
}
################################################################################
F1 <- as.data.frame(F1C)
F2 <- as.data.frame(F2C)
F3 <- as.data.frame(F3C)
PLOT_dim = list()
PLOT_dim[[1]] <- ggplot(F1, aes(Time))+geom_line(aes(y = F1C[,10], color = "dim 10"), size = 1)+geom_line(aes(y = F1C[,9], color = "dim 9"), size = 1)+geom_line(aes(y = F1C[,8], color = "dim 8"), size = 1)+geom_line(aes(y = F1C[,7], color = "dim 7"), size = 1)+geom_line(aes(y = F1C[,6], color = "dim 6"), size = 1)+geom_line(aes(y = F1C[,5], color = "dim 5"), size = 1)+geom_line(aes(y = F1C[,4], color = "dim 4"), size = 1)+geom_line(aes(y = F1C[,3], color = "dim 3"), size = 1)+geom_line(aes(y = F1C[,2], color = "dim 2"), size = 2)+labs( x = "Time", y = "Number of candidates being considered", title ="FPOP1:Candidates")+theme(legend.position = c(0, 1),legend.justification = c(0, 1))
PLOT_dim[[2]] <- ggplot(F2, aes(Time))+geom_line(aes(y = F2C[,10], color = "dim 10"), size = 1)+geom_line(aes(y = F2C[,9], color = "dim 9"), size = 1)+geom_line(aes(y = F2C[,8], color = "dim 8"), size = 1)+geom_line(aes(y = F2C[,7], color = "dim 7"), size = 1)+geom_line(aes(y = F2C[,6], color = "dim 6"), size = 1)+geom_line(aes(y = F2C[,5], color = "dim 5"), size = 1)+geom_line(aes(y = F2C[,4], color = "dim 4"), size = 1)+geom_line(aes(y = F2C[,3], color = "dim 3"), size = 1)+geom_line(aes(y = F2C[,2], color = "dim 2"), size = 2)+labs( x = "Time", y = "Number of candidates being considered", title ="FPOP2:Candidates")+theme(legend.position = c(0, 1),legend.justification = c(0, 1))
PLOT_dim[[3]] <- ggplot(F3, aes(Time))+geom_line(aes(y = F3C[,10], color = "dim 10"), size = 1)+geom_line(aes(y = F3C[,9], color = "dim 9"), size = 1)+geom_line(aes(y = F3C[,8], color = "dim 8"), size = 1)+geom_line(aes(y = F3C[,7], color = "dim 7"), size = 1)+geom_line(aes(y = F3C[,6], color = "dim 6"), size = 1)+geom_line(aes(y = F3C[,5], color = "dim 5"), size = 1)+geom_line(aes(y = F3C[,4], color = "dim 4"), size = 1)+geom_line(aes(y = F3C[,3], color = "dim 3"), size = 1)+geom_line(aes(y = F3C[,2], color = "dim 2"), size = 2)+labs( x = "Time", y = "Number of candidates being considered", title ="FPOP3:Candidates")+theme(legend.position = c(0, 1),legend.justification = c(0, 1))
png(filename = fname, width = 1500, height = 1000)
ggarrange(PLOT_dim[[1]],PLOT_dim[[2]],PLOT_dim[[3]],ncol = 1)
dev.off()
png(filename = f1name, width = 1500, height = 1000)
ggarrange(PLOT_dim[[1]],ncol = 1)
dev.off()
png(filename = f2name, width = 1500, height = 1000)
ggarrange(PLOT_dim[[2]],ncol = 1)
dev.off()
png(filename = f3name, width = 1500, height = 1000)
ggarrange(PLOT_dim[[3]],ncol = 1)
dev.off()
################################################################################
Fdim10 = data.frame(Time,F1C[,10],F2C[,10],F3C[,10])
Fdim9 = data.frame(Time,F1C[,9],F2C[,9],F3C[,9])
Fdim8 = data.frame(Time,F1C[,8],F2C[,8],F3C[,8])
Fdim7 = data.frame(Time,F1C[,7],F2C[,7],F3C[,7])
Fdim6 = data.frame(Time,F1C[,6],F2C[,6],F3C[,6])
Fdim5 = data.frame(Time,F1C[,5],F2C[,5],F3C[,5])
Fdim4 = data.frame(Time,F1C[,4],F2C[,4],F3C[,4])
Fdim3 = data.frame(Time,F1C[,3],F2C[,3],F3C[,3])
Fdim2 = data.frame(Time,F1C[,2],F2C[,2],F3C[,2])
PLOT_FPOP = list()
PLOT_FPOP[[2]] <- ggplot(Fdim2, aes(Time))+geom_line(aes(y = F1C[,2], color = "FPOP1"), size = 1)+geom_line(aes(y = F2C[,2], color = "FPOP2"), size = 1)+geom_line(aes(y = F3C[,2], color = "FPOP3"), size = 1)+labs( x = "Time", y = "Number of candidates being considered", title ="Dimension 2: Candidates")+theme(legend.position = c(0, 1),legend.justification = c(0, 1))
PLOT_FPOP[[3]] <- ggplot(Fdim3, aes(Time))+geom_line(aes(y = F1C[,3], color = "FPOP1"), size = 1)+geom_line(aes(y = F2C[,3], color = "FPOP2"), size = 1)+geom_line(aes(y = F3C[,3], color = "FPOP3"), size = 1)+labs( x = "Time", y = "Number of candidates being considered", title ="Dimension 3: Candidates")+theme(legend.position = c(0, 1),legend.justification = c(0, 1))
PLOT_FPOP[[4]] <- ggplot(Fdim4, aes(Time))+geom_line(aes(y = F1C[,4], color = "FPOP1"), size = 1)+geom_line(aes(y = F2C[,4], color = "FPOP2"), size = 1)+geom_line(aes(y = F3C[,4], color = "FPOP3"), size = 1)+labs( x = "Time", y = "Number of candidates being considered", title ="Dimension 4: Candidates")+theme(legend.position = c(0, 1),legend.justification = c(0, 1))
PLOT_FPOP[[5]] <- ggplot(Fdim5, aes(Time))+geom_line(aes(y = F1C[,5], color = "FPOP1"), size = 1)+geom_line(aes(y = F2C[,5], color = "FPOP2"), size = 1)+geom_line(aes(y = F3C[,5], color = "FPOP3"), size = 1)+labs( x = "Time", y = "Number of candidates being considered", title ="Dimension 5: Candidates")+theme(legend.position = c(0, 1),legend.justification = c(0, 1))
PLOT_FPOP[[6]] <- ggplot(Fdim6, aes(Time))+geom_line(aes(y = F1C[,6], color = "FPOP1"), size = 1)+geom_line(aes(y = F2C[,6], color = "FPOP2"), size = 1)+geom_line(aes(y = F3C[,6], color = "FPOP3"), size = 1)+labs( x = "Time", y = "Number of candidates being considered", title ="Dimension 6: Candidates")+theme(legend.position = c(0, 1),legend.justification = c(0, 1))
PLOT_FPOP[[7]] <- ggplot(Fdim7, aes(Time))+geom_line(aes(y = F1C[,7], color = "FPOP1"), size = 1)+geom_line(aes(y = F2C[,7], color = "FPOP2"), size = 1)+geom_line(aes(y = F3C[,7], color = "FPOP3"), size = 1)+labs( x = "Time", y = "Number of candidates being considered", title ="Dimension 7: Candidates")+theme(legend.position = c(0, 1),legend.justification = c(0, 1))
PLOT_FPOP[[8]] <- ggplot(Fdim8, aes(Time))+geom_line(aes(y = F1C[,8], color = "FPOP1"), size = 1)+geom_line(aes(y = F2C[,8], color = "FPOP2"), size = 1)+geom_line(aes(y = F3C[,8], color = "FPOP3"), size = 1)+labs( x = "Time", y = "Number of candidates being considered", title ="Dimension 8: Candidates")+theme(legend.position = c(0, 1),legend.justification = c(0, 1))
PLOT_FPOP[[9]] <- ggplot(Fdim9, aes(Time))+geom_line(aes(y = F1C[,9], color = "FPOP1"), size = 1)+geom_line(aes(y = F2C[,9], color = "FPOP2"), size = 1)+geom_line(aes(y = F3C[,9], color = "FPOP3"), size = 1)+labs( x = "Time", y = "Number of candidates being considered", title ="Dimension 9: Candidates")+theme(legend.position = c(0, 1),legend.justification = c(0, 1))
PLOT_FPOP[[1]] <- ggplot(Fdim10, aes(Time))+geom_line(aes(y = F1C[,10], color = "FPOP1"), size = 1)+geom_line(aes(y = F2C[,10], color = "FPOP2"), size = 1)+geom_line(aes(y = F3C[,10], color = "FPOP3"), size = 1)+labs( x = "Time", y = "Number of candidates being considered", title ="Dimension 10: Candidates")+theme(legend.position = c(0, 1),legend.justification = c(0, 1))
png(filename = "dim 2 PLOT FPOP 10^3 MultiTest Candidates.png", width = 1500, height = 1000)
ggarrange(PLOT_FPOP[[2]],ncol = 1)
dev.off()
png(filename = "dim 3 PLOT FPOP 10^3 MultiTest Candidates.png", width = 1500, height = 1000)
ggarrange(PLOT_FPOP[[3]],ncol = 1)
dev.off()
png(filename = "dim 4 PLOT FPOP 10^3 MultiTest Candidates.png", width = 1500, height = 1000)
ggarrange(PLOT_FPOP[[4]],ncol = 1)
dev.off()
png(filename = "dim 5 PLOT FPOP 10^3 MultiTest Candidates.png", width = 1500, height = 1000)
ggarrange(PLOT_FPOP[[5]],ncol = 1)
dev.off()
png(filename = "dim 6 PLOT FPOP 10^3 MultiTest Candidates.png", width = 1500, height = 1000)
ggarrange(PLOT_FPOP[[6]],ncol = 1)
dev.off()
png(filename = "dim 7 PLOT FPOP 10^3 MultiTest Candidates.png", width = 1500, height = 1000)
ggarrange(PLOT_FPOP[[7]],ncol = 1)
dev.off()
png(filename = "dim 8 PLOT FPOP 10^3 MultiTest Candidates.png", width = 1500, height = 1000)
ggarrange(PLOT_FPOP[[8]],ncol = 1)
dev.off()
png(filename = "dim 9 PLOT FPOP 10^3 MultiTest Candidates.png", width = 1500, height = 1000)
ggarrange(PLOT_FPOP[[9]],ncol = 1)
dev.off()
png(filename = "dim 10 PLOT FPOP 10^3 MultiTest Candidates.png", width = 1500, height = 1000)
ggarrange(PLOT_FPOP[[1]],ncol = 1)
dev.off()
png(filename = "dim 2-10 PLOT FPOP 10^3 MultiTest Candidates.png", width = 1500, height = 1000)
ggarrange(PLOT_FPOP[[2]],PLOT_FPOP[[3]],PLOT_FPOP[[4]],PLOT_FPOP[[5]],PLOT_FPOP[[6]],PLOT_FPOP[[7]],PLOT_FPOP[[8]],PLOT_FPOP[[9]],PLOT_FPOP[[1]],ncol = 1)
dev.off()
png(filename = "dim 2,3,4 PLOT FPOP 10^3 MultiTest Candidates.png", width = 1500, height = 1000)
ggarrange(PLOT_FPOP[[2]],PLOT_FPOP[[3]],PLOT_FPOP[[4]],ncol = 1)
dev.off()
png(filename = "dim 5,6,7 PLOT FPOP 10^3 MultiTest Candidates.png", width = 1500, height = 1000)
ggarrange(PLOT_FPOP[[5]],PLOT_FPOP[[6]],PLOT_FPOP[[7]],ncol = 1)
dev.off()
png(filename = "dim 8,9,10 PLOT FPOP 10^3 MultiTest Candidates.png", width = 1500, height = 1000)
ggarrange(PLOT_FPOP[[8]],PLOT_FPOP[[9]],PLOT_FPOP[[1]],ncol = 1)
dev.off()
|
977c327793b62f70fec96111fe29503937b35a27
|
08747ab934d09afeb31876584d2b8ab96a524648
|
/Projet_final/ui.R
|
3cf91599b2d4cccb359647c00b47900f0cce70e7
|
[] |
no_license
|
vneyret/Rapportfin
|
3900a3c99bbb628abe546bd10a9743c28b2eae9b
|
9b35cfbaca42528d63e923e27df05d0ae4148d79
|
refs/heads/master
| 2022-07-24T09:53:22.124311
| 2020-05-15T08:21:29
| 2020-05-15T08:21:29
| 263,560,439
| 0
| 0
| null | 2020-05-13T14:59:35
| 2020-05-13T07:44:48
|
HTML
|
UTF-8
|
R
| false
| false
| 9,030
|
r
|
ui.R
|
library(shiny)
library(shinythemes)
ui <- tagList(
fluidPage(theme = shinytheme("flatly")),
navbarPage(
# title=div(img(src="logo.png"), "ISARA Projet Shiny G1"),
"ISARA Projet Shiny G1",
#Onglet Informations
tabPanel("Informations",
fluidRow(column(width=2),
column(
h3(p("ANOVA à 1 un facteur",style="color:black;text-align:center")),
width=8,style="background-color:#e0eee0;border-radius: 8px")
),
br(),
fluidRow(column(width=2, icon("arrow-alt-circle-right","fa-5x"),align="center"),
column(
p("Cette application a pour but de réaliser une ANOVA à 1 facteur sur tous types de données numérique. Un ANOVA est un test statistique qui permet de tester des données
paramétriques en comparant la moyenne entre plusieurs modalités d'un facteur. Deux hypothèses de travail sont alors testées :",style="color:black;text-align:justify"),
withMathJax(),
p(strong("H0 : Les moyennes sont toutes égales entre elles. Le facteur n’a pas un effet significatif sur la variable"),style="color:black; text-align:justify; padding:20px;border:1px solid black;background-color:white"),
p(strong("H1 : Au moins une des moyennes est différente des autres. Le facteur a un effet significatif sur la variable"),style="color:black; text-align:justify; padding:20px;border:1px solid black;background-color:white"),
width=8,style="background-color:#e0eee0 ;border-radius: 8px")
),
br(),
p("A titre d'exemple, nous utilisons un jeu de données provenant d'essais agronomiques
réalisés sur du petit épeautre. Les essais se sont déroulés sur ", strong("l'année 2019"), "et ont eu lieu sur
", strong("9 variétés différentes."), "L'expérimentation s'est déroulée dans le département de l'Aude, une partie dans la Piège, une
partie dans le Minervois et une troisième partie à l'école d'ingénieur de Purpan.", style="text-align:justify;color:black;background-color:#e0eee0;padding:15px;border-radius:8px"),
br(),
fluidPage(
img(src ="ptitepeautre.png", align = "center", height = 300 , width = 300 ),
img(src ="Parcelles.png", align = "right", height = 300, width = 500)
)
),
#Onglet Données
tabPanel("Charger Tableau",
# Titre
titlePanel("Importez votre tableau"),
# Mise en page : disposition de la barre latérale
sidebarLayout(
# Panneau pour afficher les entrées
sidebarPanel(
# Imput pour sélectionner un fichier
fileInput("file1", "Choisissez un fichier .CSV",
multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv")),
# Insertion d'une ligne horizontale
tags$hr(),
# Input pour insérer un bouton pour voir si le fichier a des en-tête
checkboxInput("header", "En-tête", TRUE),
# Input pour choisir le type de séparateur
radioButtons("sep", "Séparateurs",
choices = c(Comma = ",",
Semicolon = ";"),
# Tab = "\t"),
selected = ","),
# Insertion d'une ligne horizontale
tags$hr(),
# Input pour choisir le nombre de lignes à afficher
),
# Panneau pour afficher les sorties
mainPanel(
# Affichage du tableau
tableOutput("contents")
)
)
),
tabPanel("Interprétation",
mainPanel(
tabsetPanel(
tabPanel("Description",
br(),
h5(p("Nous affichons la",strong ("moyenne"), "pour les différentes modalités du facteur :", style="text-align:justify;color:black;background-color:#e0ffff;padding:15px;border-radius:8px")),
br(),
tableOutput("mean"),
h5(p("Nous affichons le",strong ("boxplot"), "pour les différentes modalités du facteur. Le boxplot permet de visualiser des mesures statistiques clés telles que la médiane, la moyenne et les quartiles.", style="text-align:justify;color:black;background-color:#e0ffff;padding:15px;border-radius:8px")),
plotOutput("boxplot")
),
tabPanel("Hypothèses",
br(),
h5(p("Pour le test de l'ANOVA, il est nécessaire de tester la normalité des résidus.", style="text-align:justify;color:black;background-color:#e0ffff;padding:15px;border-radius:8px")),
plotOutput("plotsindep"),
br(),
h5(p("Dans un premier temps, on effectue le test de Shapiro. Si p-value > 0.05, alors les résidus sont normaux et nous devons donc alors tester
comme deuxième hypothèse l'égalité des variances par le test de Bartlett.", style="text-align:justify;color:black;background-color:#e0ffff;padding:15px;border-radius:8px")),
verbatimTextOutput("shapiro"),
br(),
h5(p("Pour le test de Bartlett, si p-value > 0.05 alors les variances sont égales.", style="text-align:justify;color:black;background-color:#e0ffff;padding:15px;border-radius:8px")),
verbatimTextOutput("bartlett"),
br(),
fluidRow(column(width=2, icon("arrow-alt-circle-right","fa-5x"),align="center"),
column(10,
h4(p("Si toutes les hypothèses sont vérifiées, nous pouvons faire une ANOVA",style="color:white;background-color:#36648b;padding:15px;border-radius:8px;text-align:justify"))),
withMathJax()),
),
tabPanel("Anova",
br(),
h5(p("Suite à la validation des hypothèses, on test les hypothèses de départ H0 et H1.", style="text-align:justify;color:black;background-color:#e0ffff;padding:15px;border-radius:8px")),
tableOutput("anov"),
br(),
h5(p("Si la p-value > 0.05, cela signifie que ce n'est pas significatif, H0 est accepté.", strong("Le facteur n'a pas d'effet significatif sur la variable : les moyennes sont toutes égales entre elles."),
br(),
br(),
"Si la p-value < 0.05, cela signifie que cela est significatif, H0 est rejété et H1 est donc accepté.", strong("Le facteur a un effet significatif sur la varialbe : au moins une des moyennes est différente des autres."), style="text-align:justify;color:black;background-color:#e0ffff;padding:15px;border-radius:8px")),
br(),
fluidRow(column(width=2, icon("arrow-alt-circle-right","fa-5x"),align="center"),
column(10,
h4(p("Pour savoir où se situent les différences :"),
h4("Test TuckeyHSD",style="color:white;background-color:#36648b;padding:15px;border-radius:8px;text-align:justify"))),
withMathJax())),
tabPanel("Résultats",
br(),
h5(p(".", style="text-align:justify;color:black;background-color:#e0ffff;padding:15px;border-radius:8px")),
plotOutput("tuck"),
br(),
h5(p(".", style="text-align:justify;color:black;background-color:#e0ffff;padding:15px;border-radius:8px")),
tableOutput("classe"))
)
)
)
)
)
|
90f271db65fe132d760e43161b2a9afc6503719c
|
3a5f227074d2d903633cd893b57af9125b536aaf
|
/man/ei_ind.Rd
|
32f570b62116b363974db9109ee062e94937a883
|
[] |
no_license
|
cran/ITNr
|
eefd3a398e7bca302b05782913f300dae1fb7f26
|
35a833cc39458b6cf5f6b491a4327cca2effa63a
|
refs/heads/master
| 2023-06-24T15:43:02.117877
| 2023-03-31T13:10:11
| 2023-03-31T13:10:11
| 120,759,051
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 750
|
rd
|
ei_ind.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/individual_EI_function.R
\name{ei_ind}
\alias{ei_ind}
\title{Individual/Node level E-I Index}
\usage{
ei_ind(gs, attrname)
}
\arguments{
\item{gs}{igraph object}
\item{attrname}{Attribute name}
}
\value{
Group level results dataframe
}
\description{
This function calculates the E-I Index (External-internal) at the individual/node level
}
\examples{
require(igraph)
##Create random network (igraph object)
gs<-erdos.renyi.game(30,0.05,directed = TRUE)
##Add vertex names
V(gs)$name<-1:vcount(gs)
## Add an attribute
V(gs)$letters<- rep(LETTERS[1:5],6)
##Calculate the Individual E-I Results
EI_IND_DATAFRAME<-ei_ind(gs,"letters")
}
|
848d9a10277a31349aaf0a97ce408f5ccc03bd2c
|
aae143af482690863b42f76af555f2617fabfc39
|
/R/build_nhtsa_url.R
|
f4c86cb80fea4c99baf7c5a602ab2097fa666a82
|
[
"MIT"
] |
permissive
|
burch-cm/vindecodr
|
ded9ef48f6d36a2a98d021684211a64a0185117b
|
68b4debc479a7709dbefe6159c73a22b7b7a231f
|
refs/heads/main
| 2023-01-13T08:03:59.566698
| 2020-11-23T20:45:05
| 2020-11-23T20:45:05
| 314,322,534
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,246
|
r
|
build_nhtsa_url.R
|
#' Build a NHTSA URL
#'
#' @description
#'
#' A family of functions to build URLs for the National Highway Transportation
#' Safety Administration (NHTSA) vehicle identification number (VIN) decoder API.
#'
#' The `build_nhtsa_url()` function returns a closure containing the appropriate
#' endpoint and file format request to pass to the NHTSA VIN API.
#'
#' * `build_vin_url()` takes a single VIN in a character string and returns
#' an appropriately-formatted url for a NHTSA API request via the
#' /vehicles/DecodeVINValues/ endpoint.
#'
#' * `build_vin_batch_url()` takes up to 50 VINs in a character vector and
#' returns appropriately-formatted url for a NHTSA API request via the
#' /vehicles/DecodeVINBatchValues/ endpoint.
#'
#' @param endpoint a string containing the appropriate endpoint. Candidate
#' endpoints can be found at https://vpic.nhtsa.dot.gov/api/
#' @param format the file format to return from the API, one of 'json', 'xml',
#' or 'csv'. Defaults to 'json'.
#' @param vin a string containing the VIN to query.
#' @param ... additional arguments to passed on to derived builder functions
#' @return
#' * `build_nhtsa_url()` returns a function which will in turn build a url which
#' points to the specified endpoint on the NHTSA API
#'
#' * `build_vin_url()` returns a url as a string, formatted to query the NHTSA
#' `DecodeVinValues` endpoint and decode a single VIN.
#' * `build_vin_batch_url()` returns a url as a string, formatted to query the NHTSA
#' `DecodeVinBatch Values` endpoint and decode multiple VINs in one call.
#'
#' @export
#'
#' @examples
#' vin_url_xml <- build_nhtsa_url("/vehicles/DecodeVINValues/", format = "xml")
#' build_vin_url("3VWLL7AJ9BM053541")
#' build_vin_batch_url(c("3VWLL7AJ9BM053541", "JH4KA3140KC015221"))
build_nhtsa_url <- function(endpoint, format = "json", ...) {
baseurl <- "https://vpic.nhtsa.dot.gov/api"
function(vin, ...) {
paste0(baseurl, endpoint, vin, "?format=", format, ...)
}
}
#' @rdname build_nhtsa_url
#' @export
build_vin_url <- build_nhtsa_url(endpoint = "/vehicles/DecodeVINValues/")
#' @rdname build_nhtsa_url
#' @export
build_vin_batch_url <- build_nhtsa_url(endpoint = "/vehicles/DecodeVINValuesBatch/")
|
ccfbb670d39ba8a8666244c163a4ffa80e79f4af
|
547e448dd1b38c8b8fd4e4edc9cdff799670d498
|
/Archived/05-01-16/X2 - Make Gephi Files.R
|
f9f3e2ad0d607a6fbe9b3a42a06be4c18473c99e
|
[] |
no_license
|
BrianAronson/ADHD-Peer-Influence
|
c20bd69fd918a92091f79622f9180504148f1b16
|
d726d8fff6ef967972ed41c73e602e2ede394956
|
refs/heads/master
| 2020-12-05T00:03:56.800043
| 2020-01-05T23:43:19
| 2020-01-05T23:43:19
| 231,944,033
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,339
|
r
|
X2 - Make Gephi Files.R
|
#Attempt igraph
# plot(g2,layout=layout.fruchterman.reingold(g2),vertex.size=2,
# vertex.label=NA, edge.arrow.size=.025,vertex.color=DifficultyAttention)
#Edgelists
#Rename edgelists
Geph1.0<-get.edgelist(g0)
Geph1.1<-get.edgelist(g1)
Geph1.2<-get.edgelist(g2)
Geph2.0<-get.edgelist(g10)
Geph2.1<-get.edgelist(g11)
Geph2.2<-get.edgelist(g12)
#Reformat edgelists for Gephi
GephiEdges1.0<-data.frame(Source=Geph1.0[,1], Target=Geph1.0[,2])
GephiEdges1.1<-data.frame(Source=Geph1.1[,1], Target=Geph1.1[,2])
GephiEdges1.2<-data.frame(Source=Geph1.2[,1], Target=Geph1.2[,2])
GephiEdges2.0<-data.frame(Source=Geph2.0[,1], Target=Geph2.0[,2])
GephiEdges2.1<-data.frame(Source=Geph2.1[,1], Target=Geph2.1[,2])
GephiEdges2.2<-data.frame(Source=Geph2.2[,1], Target=Geph2.2[,2])
#Write Edgelists to CSV
write.csv(GephiEdges1.0, file="edges1.0.csv")
write.csv(GephiEdges1.1, file="edges1.1.csv")
write.csv(GephiEdges1.2, file="edges1.2.csv")
write.csv(GephiEdges2.0, file="edges2.0.csv")
write.csv(GephiEdges2.1, file="edges2.1.csv")
write.csv(GephiEdges2.2, file="edges2.2.csv")
#Nodes covariates
#Create a list of unique nodes
GephiNodes1.0<-data.frame(ID=rownames(ADHDFriends58M0),DifficultyAttention=ADHDFriends58M0$DifficultyAttention)
GephiNodes1.1<-data.frame(ID=rownames(ADHDFriends58M1),DifficultyAttention=ADHDFriends58M1$DifficultyAttention)
GephiNodes1.2<-data.frame(ID=rownames(ADHDFriends58M2),DifficultyAttention=ADHDFriends58M2$DifficultyAttention, ChangeAttention=ADHDFriends58M2$DifficultyAttention-ADHDFriends58M0$DifficultyAttention, BinaryDifficultyAttention=ifelse(ADHDFriends58M2$DifficultyAttention>1,1,0), BinaryChangeAttention=ifelse(ADHDFriends58M2$DifficultyAttention-ADHDFriends58M0$DifficultyAttention>0,1,0), W1BinaryDifficultyAttention=ifelse(ADHDFriends58M0$DifficultyAttention>2,1,0))
GephiNodes2.0<-data.frame(ID=rownames(ADHDFriends77M0),DifficultyAttention=ADHDFriends77M0$DifficultyAttention)
GephiNodes2.1<-data.frame(ID=rownames(ADHDFriends77M1),DifficultyAttention=ADHDFriends77M1$DifficultyAttention)
GephiNodes2.2<-data.frame(ID=rownames(ADHDFriends77M2),DifficultyAttention=ADHDFriends77M2$DifficultyAttention, ChangeAttention=ADHDFriends77M2$DifficultyAttention-ADHDFriends77M0$DifficultyAttention, BinaryDifficultyAttention=ifelse(ADHDFriends77M2$DifficultyAttention>1,1,0), BinaryChangeAttention=ifelse(ADHDFriends77M2$DifficultyAttention-ADHDFriends77M0$DifficultyAttention>0,1,0), W1BinaryDifficultyAttention=ifelse(ADHDFriends77M0$DifficultyAttention>2,1,0))
#Write nodes to csv
write.csv(GephiNodes1.0, file="nodes1.0.csv")
write.csv(GephiNodes1.1, file="nodes1.1.csv")
write.csv(GephiNodes1.2, file="nodes1.2.csv")
write.csv(GephiNodes2.0, file="nodes2.0.csv")
write.csv(GephiNodes2.1, file="nodes2.1.csv")
write.csv(GephiNodes2.2, file="nodes2.2.csv")
#table(GephiNodes2.2$DifficultyAttention)
#table(GephiNodes1.2$DifficultyAttention)
#table(GephiNodes2.2$ChangeAttention)
#table(GephiNodes1.2$ChangeAttention)
#Magnifier affect
mean(GephiNodes1.1$DifficultyAttention, na.rm=TRUE)
mean(GephiNodes2.1$DifficultyAttention, na.rm=TRUE)
mean(GephiNodes1.2$ChangeAttention, na.rm=TRUE)
mean(GephiNodes2.2$ChangeAttention, na.rm=TRUE)
|
3763359fbf3daa406bdd3441e9c0ae34689cf8e1
|
0affcfeafed053dab542218ea09a6a11716b27da
|
/LaLigaEconomics.R
|
bba9859f8340b6fcabf088df1b1fa104f52a6184
|
[] |
no_license
|
trickytaco/laliga-economic-analysis
|
26d3bdcfd6db6f9db9bbb502653ea613bc7b786a
|
bf59198e6eea4c51b983eb721acc53cee16c465f
|
refs/heads/master
| 2021-01-10T01:59:43.791331
| 2016-04-04T01:54:01
| 2016-04-04T01:54:01
| 55,268,664
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 33,938
|
r
|
LaLigaEconomics.R
|
#Library to read Excel files
library(xlsx)
#library(ggplot2)
#Set the working directory
setwd("D:\\Statistics\\LaLiga\\laliga-economic-analysis")
#Read in the standings for each season from 2005-06 to 2014-15
standingsList <- list()
sheetNames <- c("2005-06", "2006-07", "2007-08", "2008-09", "2009-10",
"2010-11", "2011-12", "2012-13", "2013-14", "2014-15")
for (i in 1:10) {
standingsList[[i]] <- read.xlsx("La Liga Standings.xlsx",
sheetName = sheetNames[i])
}
#Read in the Points sheet and set the column names
PointsDF <- read.xlsx("La Liga economics.xlsx", sheetName = "Points")
names(PointsDF) <- c("Team_ID", "Team", "2005-06", "2006-07", "2007-08",
"2008-09", "2009-10", "2010-11", "2011-12", "2012-13",
"2013-14", "2014-15")
#Remove some non-data rows
PointsDF <- PointsDF[-1:-3,-1]
#Calculate the maximum value of the entire data table for plotting purposes
allMax <- 0
for (i in 2:11) {
if (max(PointsDF[,i][complete.cases(PointsDF[,i])]) > allMax) {
allMax <- max(PointsDF[,i][complete.cases(PointsDF[,i])])
}
}
#Plot the first row's data as a line
plot(factor(names(PointsDF)[2:11]), PointsDF[2,2:11], type="l",
ylim = c(0,allMax), xaxt = "n", xlab = "Season", ylab = "Points",
main = "Points by Season")
#Add some information to the bottom axis
axis(1, 1:10, names(PointsDF)[2:11])
#Now add a line for every other team to the plot
for (i in 2:36) {
lines(1:10, PointsDF[i,2:11], col = i)
}
#Copy the device to a png device
dev.copy(png, file = ".\\plots\\basic\\points.png", width = 1280, height = 720, units = "px")
#Close the device to save the file
dev.off()
#Read in the transfer expense sheet and set the column names
TransExpDF <- read.xlsx("La Liga economics.xlsx", sheetName = "Transfer expense")
names(TransExpDF) <- c("Team_ID", "Team", "2005-06", "2006-07", "2007-08",
"2008-09", "2009-10", "2010-11", "2011-12", "2012-13",
"2013-14", "2014-15")
#Remove some non-data rows
TransExpDF <- TransExpDF[-37:-39,-1]
#Calculate the maximum value of the entire data table for plotting purposes
allMax <- 0
for (i in 2:11) {
if (max(TransExpDF[,i][complete.cases(TransExpDF[,i])]) > allMax) {
allMax <- max(TransExpDF[,i][complete.cases(TransExpDF[,i])])
}
}
#Plot the first row's data as a line
plot(factor(names(TransExpDF)[2:11]), TransExpDF[2,2:11], type="l",
ylim = c(0,allMax), xaxt = "n", xlab = "Season",
ylab = "Transfer Expenditure", main = "Transfer Expenditure by Season")
#Add some information to the bottom axis
axis(1, 1:10, names(TransExpDF)[2:11])
#Now add a line for every other team to the plot
for (i in 1:36) {
lines(1:10, TransExpDF[i,2:11], col = i)
}
#Copy the device to a png device
dev.copy(png, file = ".\\plots\\basic\\transfer_expense.png", width = 1280, height = 720, units = "px")
#Close the device to save the file
dev.off()
#Read in the transfer average expense sheet and set the column names
TransExpAvgDF <- read.xlsx("La Liga economics.xlsx", sheetName = "Transfer expense - Rolling avg")
names(TransExpAvgDF) <- c("Team_ID", "Team", "2005-06", "2006-07", "2007-08",
"2008-09", "2009-10", "2010-11", "2011-12", "2012-13",
"2013-14", "2014-15")
#Remove some non-data rows
TransExpAvgDF <- TransExpAvgDF[-37:-39,-1]
#Calculate the maximum value of the entire data table for plotting purposes
allMax <- 0
for (i in 2:11) {
if (max(TransExpAvgDF[,i][complete.cases(TransExpAvgDF[,i])]) > allMax) {
allMax <- max(TransExpAvgDF[,i][complete.cases(TransExpAvgDF[,i])])
}
}
#Plot the first row's data as a line
plot(factor(names(TransExpAvgDF)[2:11]), TransExpAvgDF[2,2:11], type="l",
ylim = c(0,allMax), xaxt = "n", xlab = "Season",
ylab = "Transfer Expenditure Average",
main = "Transfer Expenditure Average by Season")
#Add some information to the bottom axis
axis(1, 1:10, names(TransExpAvgDF)[2:11])
#Now add a line for every other team to the plot
for (i in 1:36) {
lines(1:10, TransExpAvgDF[i,2:11], col = i)
}
#Copy the device to a png device
dev.copy(png, file = ".\\plots\\basic\\transfer_expense_average.png", width = 1280, height = 720, units = "px")
#Close the device to save the file
dev.off()
#Read in the net transfer expense sheet and set the column names
NetTransDF <- read.xlsx("La Liga economics.xlsx", sheetName = "Net transfer spend")
names(NetTransDF) <- c("Team_ID", "Team", "2005-06", "2006-07", "2007-08",
"2008-09", "2009-10", "2010-11", "2011-12", "2012-13",
"2013-14", "2014-15")
#Remove some non-data rows
NetTransDF <- NetTransDF[-37:-39,-1]
#Calculate the maximum and minimum value of the entire data table for plotting
#purposes
allMax <- 0
allMin <- 0
for (i in 2:11) {
if (max(NetTransDF[,i][complete.cases(NetTransDF[,i])]) > allMax) {
allMax <- max(NetTransDF[,i][complete.cases(NetTransDF[,i])])
}
if (min(NetTransDF[,i][complete.cases(NetTransDF[,i])]) < allMin) {
allMin <- min(NetTransDF[,i][complete.cases(NetTransDF[,i])])
}
}
#Plot the first row's data as a line
plot(factor(names(NetTransDF)[2:11]), NetTransDF[2,2:11], type="l",
ylim = c(allMin,allMax), xaxt = "n", xlab = "Season",
ylab = "Net Transfer Expenditure",
main = "Net Transfer Expenditure by Season")
#Add some information to the bottom axis
axis(1, 1:10, names(NetTransDF)[2:11])
#Now add a line for every other team to the plot
for (i in 1:36) {
lines(1:10, NetTransDF[i,2:11], col = i)
}
#Copy the device to a png device
dev.copy(png, file = ".\\plots\\basic\\net_transfer_spend.png", width = 1280, height = 720, units = "px")
#Close the device to save the file
dev.off()
#Read in the net transfer expense average sheet and set the column names
NetTransAvgDF <- read.xlsx("La Liga economics.xlsx", sheetName = "Net transfer spend - Rolling av")
names(NetTransAvgDF) <- c("Team_ID", "Team", "2005-06", "2006-07", "2007-08",
"2008-09", "2009-10", "2010-11", "2011-12", "2012-13",
"2013-14", "2014-15")
#Remove some non-data rows
NetTransAvgDF <- NetTransAvgDF[-37:-39,-1]
#Calculate the maximum and minimum value of the entire data table for plotting
#purposes
allMax <- 0
allMin <- 0
for (i in 2:11) {
if (max(NetTransAvgDF[,i][complete.cases(NetTransAvgDF[,i])]) > allMax) {
allMax <- max(NetTransAvgDF[,i][complete.cases(NetTransAvgDF[,i])])
}
if (min(NetTransAvgDF[,i][complete.cases(NetTransAvgDF[,i])]) < allMin) {
allMin <- min(NetTransAvgDF[,i][complete.cases(NetTransAvgDF[,i])])
}
}
#Plot the first row's data as a line
plot(factor(names(NetTransAvgDF)[2:11]), NetTransAvgDF[2,2:11], type="l",
ylim = c(allMin,allMax), xaxt = "n", xlab = "Season",
ylab = "Net Transfer Expenditure Average",
main = "Net Transfer Expenditure Average by Season")
#Add some information to the bottom axis
axis(1, 1:10, names(NetTransAvgDF)[2:11])
#Now add a line for every other team to the plot
for (i in 1:36) {
lines(1:10, NetTransAvgDF[i,2:11], col = i)
}
#Copy the device to a png device
dev.copy(png, file = ".\\plots\\basic\\net_transfer_spend_average.png", width = 1280, height = 720, units = "px")
#Close the device to save the file
dev.off()
#Read in the market value sheet and set the column names
MarketValueDF <- read.xlsx("La Liga economics.xlsx", sheetName = "Market Value")
names(MarketValueDF) <- c("Team_ID", "Team", "2005-06", "2006-07", "2007-08",
"2008-09", "2009-10", "2010-11", "2011-12", "2012-13",
"2013-14", "2014-15")
#Remove some non-data rows
MarketValueDF <- MarketValueDF[-37:-39,-1]
#Calculate the maximum value of the entire data table for plotting purposes
allMax <- 0
for (i in 2:11) {
if (max(MarketValueDF[,i][complete.cases(MarketValueDF[,i])]) > allMax) {
allMax <- max(MarketValueDF[,i][complete.cases(MarketValueDF[,i])])
}
}
#Plot the first row's data as a line
plot(factor(names(MarketValueDF)[2:11]), MarketValueDF[2,2:11], type="l",
ylim = c(0,allMax), xaxt = "n", xlab = "Season", ylab = "Market Value",
main = "Market Value by Season")
#Add some information to the bottom axis
axis(1, 1:10, names(MarketValueDF)[2:11])
#Now add a line for every other team to the plot
for (i in 1:36) {
lines(1:10, MarketValueDF[i,2:11], col = i)
}
#Copy the device to a png device
dev.copy(png, file = ".\\plots\\basic\\market_value.png", width = 1280, height = 720, units = "px")
#Close the device to save the file
dev.off()
#Correlate TransExpAvgDF to PointsDF year-by-year
for (i in 2:11) {
#Convert PointsDF and TransExpAvgDF into one-dimensional vectors so that
#the correlation can be calculated. The complete.cases part is so that
#the operation excludes the NA values.
PointsDFVector <- c(as.matrix(PointsDF[,i]))[complete.cases(c(as.matrix(PointsDF[,i])))]
TransExpAvgDFVector <- c(as.matrix(TransExpAvgDF[,i]))[complete.cases(c(as.matrix(TransExpAvgDF[,i])))]
#Now calculate the correlation between the two vectors
corVal <- cor(PointsDFVector, TransExpAvgDFVector)
#Print the results along with the year
print(paste(names(PointsDF)[i], as.character(corVal)))
}
#Correlate TransExpAvgDF to PointsDF year-by-year (no Madrid or Barcelona)
for (i in 2:11) {
#Convert PointsDF and TransExpAvgDF into one-dimensional vectors so that
#the correlation can be calculated. The complete.cases part is so that
#the operation excludes the NA values. The -1:-2 bits cause the first two
#rows to be excluded. These are the RMD/BAR rows.
PointsDFVector <- c(as.matrix(PointsDF[-1:-2,i]))[complete.cases(c(as.matrix(PointsDF[-1:-2,i])))]
TransExpAvgDFVector <- c(as.matrix(TransExpAvgDF[-1:-2,i]))[complete.cases(c(as.matrix(TransExpAvgDF[-1:-2,i])))]
#Now calculate the correlation between the two vectors
corVal <- cor(PointsDFVector, TransExpAvgDFVector)
#Print the results along with the year
print(paste(names(PointsDF)[i], as.character(corVal)))
}
#Correlate MarketValueDF to PointsDF year-by-year
for (i in 2:11) {
PointsDFVector <- c(as.matrix(PointsDF[,i]))[complete.cases(c(as.matrix(PointsDF[,i])))]
MarketValueDFVector <- c(as.matrix(MarketValueDF[,i]))[complete.cases(c(as.matrix(MarketValueDF[,i])))]
corVal <- cor(PointsDFVector, MarketValueDFVector)
print(paste(names(PointsDF)[i], as.character(corVal)))
}
#Correlate MarketValueDF to PointsDF year-by-year (no Madrid or Barcelona)
for (i in 2:11) {
#Convert PointsDF and TransExpAvgDF into one-dimensional vectors so that
#the correlation can be calculated. The complete.cases part is so that
#the operation excludes the NA values. The -1:-2 bits cause the first two
#rows to be excluded. These are the RMD/BAR rows.
PointsDFVector <- c(as.matrix(PointsDF[-1:-2,i]))[complete.cases(c(as.matrix(PointsDF[-1:-2,i])))]
MarketValueDFVector <- c(as.matrix(MarketValueDF[-1:-2,i]))[complete.cases(c(as.matrix(MarketValueDF[-1:-2,i])))]
#Now calculate the correlation between the two vectors
corVal <- cor(PointsDFVector, MarketValueDFVector)
#Print the results along with the year
print(paste(names(PointsDF)[i], as.character(corVal)))
}
#Calculate the maximum Market value for plotting purposes
marketMax <- 0
for (i in 2:11) {
if (max(MarketValueDF[,i][complete.cases(MarketValueDF[,i])]) > marketMax) {
marketMax <- max(MarketValueDF[,i][complete.cases(MarketValueDF[,i])])
}
}
#Create a linear regression model of points earned vs. market value (all
#clubs.)
for (i in 2:11) {
#Convert PointsDF and MarketValueDF into one-dimensional vectors so that
#the regression can be calculated. The complete.cases part is so that
#the operation excludes the NA values.
PointsDFVector <- c(as.matrix(PointsDF[,i]))[complete.cases(c(as.matrix(PointsDF[,i])))]
MarketValueDFVector <- c(as.matrix(MarketValueDF[,i]))[complete.cases(c(as.matrix(MarketValueDF[,i])))]
#Create the linear model and print the details
pts_MktValFit <- lm(PointsDFVector ~ MarketValueDFVector)
print(names(PointsDF)[i])
print(summary(pts_MktValFit))
#Plot the data points and trend line
if (summary(pts_MktValFit)$coefficients[2] >= 0) {
signStr <- "+"
} else {
signStr <- "-"
}
regFormula <- paste(round(summary(pts_MktValFit)$coefficients[1], 4), " ",
signStr, " ",
round(summary(pts_MktValFit)$coefficients[2], 4),
"x", sep="")
plot(MarketValueDFVector, PointsDFVector, type="p",
xlim=c(0, marketMax), ylim=c(0, 114),
main=paste("Points vs. Market Value, ",
names(PointsDF)[i], "\n", regFormula,
", R-Squared = ",
round(summary(pts_MktValFit)$adj.r.squared, 5), sep=""),
xlab = "Market Value", ylab = "Points", pch=19)
abline(pts_MktValFit)
#Copy the device to a png device
dev.copy(png, file = paste(".\\plots\\pointsVS\\points_vs_market_value_",
names(PointsDF)[i], ".png", sep=""),
width = 1280, height = 720, units = "px")
#Close the device to save the file
dev.off()
}
#Calculate the maximum Market value for plotting purposes (no RMD or BAR)
marketMaxNORMDBAR <- 0
for (i in 2:11) {
if (max(MarketValueDF[-1:-2,i][complete.cases(MarketValueDF[-1:-2,i])]) > marketMaxNORMDBAR) {
marketMaxNORMDBAR <- max(MarketValueDF[-1:-2,i][complete.cases(MarketValueDF[-1:-2,i])])
}
}
#Create a linear regression model of points earned vs. market value (no
#RMD or BAR.)
for (i in 2:11) {
#Convert PointsDF and MarketValueDF into one-dimensional vectors so that
#the regression can be calculated. The complete.cases part is so that
#the operation excludes the NA values.
PointsDFVector <- c(as.matrix(PointsDF[-1:-2,i]))[complete.cases(c(as.matrix(PointsDF[-1:-2,i])))]
MarketValueDFVector <- c(as.matrix(MarketValueDF[-1:-2,i]))[complete.cases(c(as.matrix(MarketValueDF[-1:-2,i])))]
#Create the linear model and print the details
pts_MktValFit <- lm(PointsDFVector ~ MarketValueDFVector)
print(names(PointsDF)[i])
print(summary(pts_MktValFit))
#Plot the data points and trend line
if (summary(pts_MktValFit)$coefficients[2] >= 0) {
signStr <- "+"
} else {
signStr <- "-"
}
regFormula <- paste(round(summary(pts_MktValFit)$coefficients[1], 4), " ",
signStr, " ",
round(summary(pts_MktValFit)$coefficients[2], 4),
"x", sep="")
plot(MarketValueDFVector, PointsDFVector, type="p",
xlim=c(0, marketMaxNORMDBAR), ylim=c(0, 114),
main=paste("Points vs. Market Value (No RMD/BAR), ",
names(PointsDF)[i], "\n", regFormula,
", R-Squared = ",
round(summary(pts_MktValFit)$adj.r.squared, 5), sep=""),
xlab = "Market Value", ylab = "Points", pch=19)
abline(pts_MktValFit)
#Copy the device to a png device
dev.copy(png, file = paste(".\\plots\\pointsVS\\points_vs_market_value_",
names(PointsDF)[i], "_noRMDBAR.png", sep=""),
width = 1280, height = 720, units = "px")
#Close the device to save the file
dev.off()
}
#Calculate the maximum transfer expense average value for plotting purposes
transExpAvgMax <- 0
for (i in 2:11) {
if (max(TransExpAvgDF[,i][complete.cases(TransExpAvgDF[,i])]) > transExpAvgMax) {
transExpAvgMax <- max(TransExpAvgDF[,i][complete.cases(TransExpAvgDF[,i])])
}
}
#Create a linear regression model of points earned vs. transfer expense average
#(all clubs.)
for (i in 2:11) {
#Convert PointsDF and TransExpAvgDF into one-dimensional vectors so that
#the regression can be calculated. The complete.cases part is so that
#the operation excludes the NA values.
PointsDFVector <- c(as.matrix(PointsDF[,i]))[complete.cases(c(as.matrix(PointsDF[,i])))]
TransExpAvgDFVector <- c(as.matrix(TransExpAvgDF[,i]))[complete.cases(c(as.matrix(TransExpAvgDF[,i])))]
#Create the linear model and print the details
pts_TExpAvgFit <- lm(PointsDFVector ~ TransExpAvgDFVector)
print(names(PointsDF)[i])
print(summary(pts_TExpAvgFit))
#Plot the data points and trend line
if (summary(pts_TExpAvgFit)$coefficients[2] >= 0) {
signStr <- "+"
} else {
signStr <- "-"
}
regFormula <- paste(round(summary(pts_TExpAvgFit)$coefficients[1], 4), " ",
signStr, " ",
round(summary(pts_TExpAvgFit)$coefficients[2], 4),
"x", sep="")
plot(TransExpAvgDFVector, PointsDFVector, type="p",
xlim=c(0, transExpAvgMax), ylim=c(0, 114),
main=paste("Points vs. Transfer Expense Average, ",
names(PointsDF)[i], "\n", regFormula,
", R-Squared = ",
round(summary(pts_TExpAvgFit)$adj.r.squared, 5), sep=""),
xlab = "Transfer Expense Average", ylab = "Points", pch=19)
abline(pts_TExpAvgFit)
#Copy the device to a png device
dev.copy(png, file = paste(".\\plots\\pointsVS\\points_vs_transfer_expense_average_",
names(PointsDF)[i], ".png", sep=""),
width = 1280, height = 720, units = "px")
#Close the device to save the file
dev.off()
}
#Calculate the maximum transfer expense average value for plotting purposes (no RMD or BAR)
transExpAvgMaxNORMDBAR <- 0
for (i in 2:11) {
if (max(TransExpAvgDF[-1:-2,i][complete.cases(TransExpAvgDF[-1:-2,i])]) > transExpAvgMaxNORMDBAR) {
transExpAvgMaxNORMDBAR <- max(TransExpAvgDF[-1:-2,i][complete.cases(TransExpAvgDF[-1:-2,i])])
}
}
#Create a linear regression model of points earned vs. transfer expense average (no
#RMD or BAR.)
for (i in 2:11) {
#Convert PointsDF and MarketValueDF into one-dimensional vectors so that
#the regression can be calculated. The complete.cases part is so that
#the operation excludes the NA values.
PointsDFVector <- c(as.matrix(PointsDF[-1:-2,i]))[complete.cases(c(as.matrix(PointsDF[-1:-2,i])))]
TransExpAvgDFVector <- c(as.matrix(TransExpAvgDF[-1:-2,i]))[complete.cases(c(as.matrix(TransExpAvgDF[-1:-2,i])))]
#Create the linear model and print the details
pts_TExpAvgFit <- lm(PointsDFVector ~ TransExpAvgDFVector)
print(names(PointsDF)[i])
print(summary(pts_TExpAvgFit))
#Plot the data points and trend line
if (summary(pts_TExpAvgFit)$coefficients[2] >= 0) {
signStr <- "+"
} else {
signStr <- "-"
}
regFormula <- paste(round(summary(pts_TExpAvgFit)$coefficients[1], 4), " ",
signStr, " ",
round(summary(pts_TExpAvgFit)$coefficients[2], 4),
"x", sep="")
plot(TransExpAvgDFVector, PointsDFVector, type="p",
xlim=c(0, transExpAvgMaxNORMDBAR), ylim=c(0, 114),
main=paste("Points vs. Transfer Expense Average (No RMD/BAR), ",
names(PointsDF)[i], "\n", regFormula,
", R-Squared = ",
round(summary(pts_TExpAvgFit)$adj.r.squared, 5), sep=""),
xlab = "Transfer Expense Average", ylab = "Points", pch=19)
abline(pts_TExpAvgFit)
#Copy the device to a png device
dev.copy(png, file = paste(".\\plots\\pointsVS\\points_vs_transfer_expense_average_",
names(PointsDF)[i], "_noRMDBAR.png", sep=""),
width = 1280, height = 720, units = "px")
#Close the device to save the file
dev.off()
}
#COMMENTED OUT BECAUSE THIS ISN'T REALLY A VALID CALCULATION
# #Calculate the maximum goal differential value for plotting purposes
# goalDiffMin <- 0
# goalDiffMax <- 0
# for (i in 1:10) {
# if (max(standingsList[[i]]$GD) > goalDiffMax) {
# goalDiffMax <- max(standingsList[[i]]$GD)
# }
# if (min(standingsList[[i]]$GD) < goalDiffMin) {
# goalDiffMin <- min(standingsList[[i]]$GD)
# }
# }
# #Create a linear regression model of goal differential vs. market value
# #(all clubs.)
# for (i in 1:10) {
# #Extract each season in standingsList and perform the calculations
# goalDiffVector <- standingsList[[i]]$GD
# TransExpAvgDFVector <- c(as.matrix(TransExpAvgDF[,i+1]))[match(standingsList[[i]]$Team, PointsDF$Team)]
# #Create the linear model and print the details
# GD_TExpAvgFit <- lm(goalDiffVector ~ TransExpAvgDFVector)
# print(names(PointsDF)[i+1])
# print(summary(GD_TExpAvgFit))
#
# #Plot the data points and trend line
# if (summary(GD_TExpAvgFit)$coefficients[2] >= 0) {
# signStr <- "+"
# } else {
# signStr <- "-"
# }
# regFormula <- paste(round(summary(GD_TExpAvgFit)$coefficients[1], 4), " ",
# signStr, " ",
# round(summary(GD_TExpAvgFit)$coefficients[2], 4),
# "x", sep="")
# plot(TransExpAvgDFVector, goalDiffVector, type="p",
# xlim=c(0, transExpAvgMax), ylim=c(goalDiffMin, goalDiffMax),
# main=paste("Transfer Expense Average vs. Goal Differential, ",
# names(PointsDF)[i+1], "\n", regFormula,
# ", R-Squared = ",
# round(summary(GD_TExpAvgFit)$adj.r.squared, 5), sep=""),
# xlab = "Transfer Expense Average", ylab = "Goal Differential", pch=19)
# abline(GD_TExpAvgFit)
#
# #Copy the device to a png device
# dev.copy(png, file = paste(".\\plots\\goal_differential_vs_transfer_expense_average_",
# names(PointsDF)[i+1], ".png", sep=""),
# width = 1280, height = 720, units = "px")
#
# #Close the device to save the file
# dev.off()
# }
#Calculate the maximum goals for value for plotting purposes
goalsForMax <- 0
for (i in 1:10) {
if (max(standingsList[[i]]$GD) > goalsForMax) {
goalsForMax <- max(standingsList[[i]]$GD)
}
}
#Create a linear regression model of goals for vs. transfer expense average
#(all clubs.)
for (i in 1:10) {
#Extract each season in standingsList and perform the calculations
goalsForVector <- standingsList[[i]]$OF
TransExpAvgDFVector <- c(as.matrix(TransExpAvgDF[,i+1]))[match(standingsList[[i]]$Team, PointsDF$Team)]
#Create the linear model and print the details
GF_TExpAvgFit <- lm(goalsForVector ~ TransExpAvgDFVector)
print(names(PointsDF)[i+1])
print(summary(GF_TExpAvgFit))
#Plot the data points and trend line
if (summary(GF_TExpAvgFit)$coefficients[2] >= 0) {
signStr <- "+"
} else {
signStr <- "-"
}
regFormula <- paste(round(summary(GF_TExpAvgFit)$coefficients[1], 4), " ",
signStr, " ",
round(summary(GF_TExpAvgFit)$coefficients[2], 4),
"x", sep="")
plot(TransExpAvgDFVector, goalsForVector, type="p",
xlim=c(0, transExpAvgMax), ylim=c(0, goalsForMax),
main=paste("Goals For vs. Transfer Expense Average, ",
names(PointsDF)[i+1], "\n", regFormula,
", R-Squared = ",
round(summary(GF_TExpAvgFit)$adj.r.squared, 5), sep=""),
xlab = "Transfer Expense Average", ylab = "Goals For", pch=19)
abline(GF_TExpAvgFit)
#Copy the device to a png device
dev.copy(png, file = paste(".\\plots\\goalsForVS\\goals_for_vs_transfer_expense_average_",
names(PointsDF)[i+1], ".png", sep=""),
width = 1280, height = 720, units = "px")
#Close the device to save the file
dev.off()
}
#Calculate the maximum goals for value for plotting purposes (no RMD or BAR)
goalsForMaxNORMDBAR <- 0
for (i in 1:10) {
if (max(standingsList[[i]]$OF[!(standingsList[[i]]$Team %in% c("Real Madrid", "Barcelona"))]) > goalsForMaxNORMDBAR) {
goalsForMaxNORMDBAR <- max(standingsList[[i]]$OF[!(standingsList[[i]]$Team %in% c("Real Madrid", "Barcelona"))])
}
}
#Create a linear regression model of goals for vs. transfer expense average
#(no RMD or BAR.)
for (i in 1:10) {
#Extract each season in standingsList and perform the calculations
goalsForVector <- standingsList[[i]]$OF[!(standingsList[[i]]$Team %in% c("Real Madrid", "Barcelona"))]
TransExpAvgDFVector <- c(as.matrix(TransExpAvgDF[,i+1]))[match(standingsList[[i]]$Team[!(standingsList[[i]]$Team %in% c("Real Madrid", "Barcelona"))], PointsDF$Team)]
#Create the linear model and print the details
GF_TExpAvgFit <- lm(goalsForVector ~ TransExpAvgDFVector)
print(names(PointsDF)[i+1])
print(summary(GF_TExpAvgFit))
#Plot the data points and trend line
if (summary(GF_TExpAvgFit)$coefficients[2] >= 0) {
signStr <- "+"
} else {
signStr <- "-"
}
regFormula <- paste(round(summary(GF_TExpAvgFit)$coefficients[1], 4), " ",
signStr, " ",
round(summary(GF_TExpAvgFit)$coefficients[2], 4),
"x", sep="")
plot(TransExpAvgDFVector, goalsForVector, type="p",
xlim=c(0, transExpAvgMaxNORMDBAR), ylim=c(0, goalsForMaxNORMDBAR),
main=paste("Goals For vs. Transfer Expense Average (No RMD/BAR), ",
names(PointsDF)[i+1], "\n", regFormula,
", R-Squared = ",
round(summary(GF_TExpAvgFit)$adj.r.squared, 5), sep=""),
xlab = "Transfer Expense Average", ylab = "Goals For", pch=19)
abline(GF_TExpAvgFit)
#Copy the device to a png device
dev.copy(png, file = paste(".\\plots\\goalsForVS\\goals_for_vs_transfer_expense_average_",
names(PointsDF)[i+1], "_noRMDBAR.png", sep=""),
width = 1280, height = 720, units = "px")
#Close the device to save the file
dev.off()
}
#Create a linear regression model of goals for vs. market value
#(all clubs.)
for (i in 1:10) {
#Extract each season in standingsList and perform the calculations
goalsForVector <- standingsList[[i]]$OF
MarketValueDFVector <- c(as.matrix(MarketValueDF[,i+1]))[match(standingsList[[i]]$Team, PointsDF$Team)]
#Create the linear model and print the details
GF_MktValFit <- lm(goalsForVector ~ MarketValueDFVector)
print(names(PointsDF)[i+1])
print(summary(GF_MktValFit))
#Plot the data points and trend line
if (summary(GF_MktValFit)$coefficients[2] >= 0) {
signStr <- "+"
} else {
signStr <- "-"
}
regFormula <- paste(round(summary(GF_MktValFit)$coefficients[1], 4), " ",
signStr, " ",
round(summary(GF_MktValFit)$coefficients[2], 4),
"x", sep="")
plot(MarketValueDFVector, goalsForVector, type="p",
xlim=c(0, marketMax), ylim=c(0, goalsForMax),
main=paste("Goals For vs. Market Value, ",
names(PointsDF)[i+1], "\n", regFormula,
", R-Squared = ",
round(summary(GF_MktValFit)$adj.r.squared, 5), sep=""),
xlab = "Market Value", ylab = "Goals For", pch=19)
abline(GF_MktValFit)
#Copy the device to a png device
dev.copy(png, file = paste(".\\plots\\goalsForVS\\goals_for_vs_market_value_",
names(PointsDF)[i+1], ".png", sep=""),
width = 1280, height = 720, units = "px")
#Close the device to save the file
dev.off()
}
#Create a linear regression model of goals for vs. market value
#(no RMD/BAR.)
for (i in 1:10) {
#Extract each season in standingsList and perform the calculations
goalsForVector <- standingsList[[i]]$OF[!(standingsList[[i]]$Team %in% c("Real Madrid", "Barcelona"))]
MarketValueDFVector <- c(as.matrix(MarketValueDF[,i+1]))[match(standingsList[[i]]$Team[!(standingsList[[i]]$Team %in% c("Real Madrid", "Barcelona"))], PointsDF$Team)]
#Create the linear model and print the details
GF_MktValFit <- lm(goalsForVector ~ MarketValueDFVector)
print(names(PointsDF)[i+1])
print(summary(GF_MktValFit))
#Plot the data points and trend line
if (summary(GF_MktValFit)$coefficients[2] >= 0) {
signStr <- "+"
} else {
signStr <- "-"
}
regFormula <- paste(round(summary(GF_MktValFit)$coefficients[1], 4), " ",
signStr, " ",
round(summary(GF_MktValFit)$coefficients[2], 4),
"x", sep="")
plot(MarketValueDFVector, goalsForVector, type="p",
xlim=c(0, marketMaxNORMDBAR), ylim=c(0, goalsForMaxNORMDBAR),
main=paste("Goals For vs. Market Value (No RMD/BAR), ",
names(PointsDF)[i+1], "\n", regFormula,
", R-Squared = ",
round(summary(GF_MktValFit)$adj.r.squared, 5), sep=""),
xlab = "Market Value", ylab = "Goals For", pch=19)
abline(GF_MktValFit)
#Copy the device to a png device
dev.copy(png, file = paste(".\\plots\\goalsForVS\\goals_for_vs_market_value_",
names(PointsDF)[i+1], "_noRMDBAR.png", sep=""),
width = 1280, height = 720, units = "px")
#Close the device to save the file
dev.off()
}
#Calculate the maximum goals against value for plotting purposes
goalsAgainstMax <- 0
for (i in 1:10) {
if (max(standingsList[[i]]$GD) > goalsAgainstMax) {
goalsAgainstMax <- max(standingsList[[i]]$GD)
}
}
#Create a linear regression model of goals against vs. market value
#(all clubs.)
for (i in 1:10) {
#Extract each season in standingsList and perform the calculations
goalsAgainstVector <- standingsList[[i]]$OA
MarketValueDFVector <- c(as.matrix(MarketValueDF[,i+1]))[match(standingsList[[i]]$Team, PointsDF$Team)]
#Create the linear model and print the details
GA_MktValFit <- lm(goalsAgainstVector ~ MarketValueDFVector)
print(names(PointsDF)[i+1])
print(summary(GA_MktValFit))
#Plot the data points and trend line
if (summary(GA_MktValFit)$coefficients[2] >= 0) {
signStr <- "+"
xCoef <- round(summary(GA_MktValFit)$coefficients[2], 4)
} else {
signStr <- "-"
xCoef <- -1 * round(summary(GA_MktValFit)$coefficients[2], 4)
}
regFormula <- paste(round(summary(GA_MktValFit)$coefficients[1], 4), " ",
signStr, " ", xCoef, "x", sep="")
plot(MarketValueDFVector, goalsAgainstVector, type="p",
xlim=c(0, marketMax), ylim=c(0, goalsAgainstMax),
main=paste("Goals Against vs. Market Value, ",
names(PointsDF)[i+1], "\n", regFormula,
", R-Squared = ",
round(summary(GA_MktValFit)$adj.r.squared, 5), sep=""),
xlab = "Market Value", ylab = "Goals For", pch=19)
abline(GA_MktValFit)
#Copy the device to a png device
dev.copy(png, file = paste(".\\plots\\goalsAgainstVS\\goals_against_vs_market_value_",
names(PointsDF)[i+1], ".png", sep=""),
width = 1280, height = 720, units = "px")
#Close the device to save the file
dev.off()
}
#Calculate the maximum goals against value for plotting purposes (no RMD or BAR)
goalsAgainstMaxNORMDBAR <- 0
for (i in 1:10) {
if (max(standingsList[[i]]$OA[!(standingsList[[i]]$Team %in% c("Real Madrid", "Barcelona"))]) > goalsAgainstMaxNORMDBAR) {
goalsAgainstMaxNORMDBAR <- max(standingsList[[i]]$OA[!(standingsList[[i]]$Team %in% c("Real Madrid", "Barcelona"))])
}
}
#Create a linear regression model of goals against vs. market value
#(no RMD/BAR.)
for (i in 1:10) {
#Extract each season in standingsList and perform the calculations
goalsAgainstVector <- standingsList[[i]]$OA[!(standingsList[[i]]$Team %in% c("Real Madrid", "Barcelona"))]
MarketValueDFVector <- c(as.matrix(MarketValueDF[,i+1]))[match(standingsList[[i]]$Team[!(standingsList[[i]]$Team %in% c("Real Madrid", "Barcelona"))], PointsDF$Team)]
#Create the linear model and print the details
GA_MktValFit <- lm(goalsAgainstVector ~ MarketValueDFVector)
print(names(PointsDF)[i+1])
print(summary(GA_MktValFit))
#Plot the data points and trend line
if (summary(GA_MktValFit)$coefficients[2] >= 0) {
signStr <- "+"
xCoef <- round(summary(GA_MktValFit)$coefficients[2], 4)
} else {
signStr <- "-"
xCoef <- -1 * round(summary(GA_MktValFit)$coefficients[2], 4)
}
regFormula <- paste(round(summary(GA_MktValFit)$coefficients[1], 4), " ",
signStr, " ", xCoef, "x", sep="")
plot(MarketValueDFVector, goalsAgainstVector, type="p",
xlim=c(0, marketMaxNORMDBAR), ylim=c(0, goalsAgainstMaxNORMDBAR),
main=paste("Goals Against vs. Market Value (no RMD/BAR), ",
names(PointsDF)[i+1], "\n", regFormula,
", R-Squared = ",
round(summary(GA_MktValFit)$adj.r.squared, 5), sep=""),
xlab = "Market Value", ylab = "Goals For", pch=19)
abline(GA_MktValFit)
#Copy the device to a png device
dev.copy(png, file = paste(".\\plots\\goalsAgainstVS\\goals_against_vs_market_value_",
names(PointsDF)[i+1], "_noRMDBAR.png", sep=""),
width = 1280, height = 720, units = "px")
#Close the device to save the file
dev.off()
}
|
7e4c63d17f456748e5f7ca0fabd873c8f51571ec
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/715_0/rinput.R
|
73ce2a48d58103a6d781e11f94fae244b103ca16
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 133
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("715_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="715_0_unrooted.txt")
|
66c3e36d314b270a6e79a6e1a1026ac91eeb8b5a
|
2f475f7067440bc1f4561aec79ac0216bf27d36f
|
/iowa_case_ts/server.R
|
42ea4631d7a0b9be917f7df21c1ec7b7acb5c063
|
[] |
no_license
|
mkim0903/RshinyApp
|
c1979d0e1c611167db3d504c5de3e3f592b65ed3
|
f959718525631b0812564baf752e8b8d754965a7
|
refs/heads/main
| 2023-07-12T18:20:21.835733
| 2021-08-25T21:39:07
| 2021-08-25T21:39:07
| 381,122,241
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,193
|
r
|
server.R
|
cols <- c("#045a8d", "#cc4c02")
iowa.case.ts = function(date.update, plot.type) {
dfplot = slid::dfplot
if (plot.type == 'counts'){
ts <-
ggplot(dfplot, aes(Date, DailyCases, colour = Group) ) +
## Plot observed
geom_line(colour = 'darkgray') +
geom_point() +
scale_color_manual(values = c("Observation" = cols[1], "Prediction" = cols[2])) +
## Change labs
labs(title = 'Daily new infected cases and prediction for Iowa') +
xlab('Date') +
ylab('Daily new cases')
}else if (plot.type == 'logcounts'){
ts <-
ggplot(dfplot, aes(Date, logDailyCases, colour = Group)) +
## Plot observed
geom_line(colour = 'darkgray') +
geom_point() +
scale_color_manual(values = c("Observation" = cols[1], "Prediction" = cols[2])) +
## Change labs
labs(title = 'Logarithm of daily new infected cases and prediction for Iowa') +
xlab('Date') +
ylab('Log (Daily new cases)')
}
return(ts)
}
shinyServer(function(input, output) {
output$iowa_case_ts <- renderPlotly({
ts <- iowa.case.ts(date.update = date.update,
plot.type = input$plot_type)
})
})
|
d2351b965f6ad3d032926ee5b2de98aba59cafea
|
4d0716e85ee73c0ba88a83b45f141652bbf7625d
|
/other/2.1/Assignment_2.1_HillZach.R
|
c272afb03e02355763378ed3cd69a2bdb67e3814
|
[] |
no_license
|
midumass/DSC-520
|
dc83acec5077b80fd15f7f0e2b879c8d17cb27bd
|
fcd08ca23a583dc633a6556484b77c76b9b96b4e
|
refs/heads/master
| 2022-08-14T08:25:56.821159
| 2020-05-28T04:08:20
| 2020-05-28T04:08:20
| 255,189,729
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,683
|
r
|
Assignment_2.1_HillZach.R
|
# Assignment: Assignment 2.1 DSC 520
# Name: Hill, Zach
# Date: 24MAR2019
# 1. What are the observational units in this study?
#
# Units of Observation in this study are the grades of students in a proferssor's course.
#
# 2. Identify the variables mentioned in the narrative paragraph and determine
# which are categorical and quantitative?
#
# Categorical variables include the section topics (sports vs. other areas),
# while quantitative variables would include the students scores and the number of students
# with each score
#
# 3. Create one variable to hold a subset of your data set that contains only
# the Regular Section and one variable for the Sports Section.
library(readr)
my_csv <- read_csv('scores.csv')
sports <- subset(my_csv, Section == 'Sports')
sports
regular <- subset(my_csv, Section == 'Regular')
regular
# 4. Use the Plot function to plot each Sections scores and the number of
# students achieving that score. Use additional Plot Arguments to label the
# graph and give each axis an appropriate label. Once you have produced your
# Plots answer the following questions:
plot(sports$Score, sports$Count, main = 'Sports Sections', xlab = 'Scores', ylab = 'Number of Students')
plot(regular$Score, regular$Count, main = 'Regular Sections', xlab = 'Scores', ylab = 'Number of Students')
# a. Comparing and contrasting the point distributions between the two section,
# looking at both tendency and consistency: Can you say that one section tended
# to score more points than the other? Justify and explain your answer.
# More students in the regular sections scored higher. Both the mean and the
# median scores of the regular sections were higher than the sports sections.
mean(my_csv$Score)
mean(sports$Score)
mean(regular$Score)
median(my_csv$Score)
median(sports$Score)
median(regular$Score)
# b. Did every student in one section score more points than every student in
# the other section? If not, explain what a statistical tendency means in this
# context.
# No, students scores were fairly well distributed across all sections. The
# sports sections had a broader range of scores than the regular sections but
# tended towards doing above the mean of the other students in the sections.
# The regular students were more evenly spread out.
# c. What could be one additional variable that was not mentioned in the
# narrative that could be influencing the point distributions between the two
# sections?
# It might be important to allow the students to have chosen whether they were
# enrolled in the sports section or the regular section. If a student finds
# no interest in sports, they might be less likely to do well with sports.
|
8a9d0d6af6ec667d53b51d68fc093e8cec327a16
|
c49aa09f1f83ee8f8c9d1e716ae38381ed3fafca
|
/feature_selection/ex_9/roc9_1_4.R
|
1498421675e62e021a128c7f50f27de5d366dd45
|
[] |
no_license
|
whtbowers/multiomics
|
de879d61f15aa718a18dc866b1e5ef3848e27c42
|
81dcedf2c491107005d184f93cb6318865d00e65
|
refs/heads/master
| 2020-04-11T03:25:40.635266
| 2018-09-24T08:51:06
| 2018-09-24T08:51:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,084
|
r
|
roc9_1_4.R
|
setwd("/home/whb17/Documents/project3/project_files/feature_selection/ex_9/")
#setwd("/project/home17/whb17/Documents/project3/project_files/preprocessing/ex_9/")
library(pROC)
library(ggplot2)
set.seed(12)
# To direct to the correct folder
date <- "2018-08-07/"
ex_dir <- "ex_9/"
# Features selected in Kaforou 2013
sel.gene.kaforou.tb_od <- read.csv("../../data/kaforou_2013/gene_tb_od_kaforou_2013.csv", header=TRUE, row.names = 1)
# Complete datasets
#df.gene.all <- read.csv("../../data/ex_9/gene_train_body.csv", header=TRUE, row.names = 1)
df.prot.all <- read.csv("../../data/ex_9/prot_train_body.csv", header=TRUE, row.names = 1)
df.gp.all <- cbind(df.prot.all, df.gene.all)
df.meta <- read.csv("../../data/ex_9/gp_train_meta.csv", header=TRUE, row.names = 1)
df.meta$group <- as.character(df.meta$group)
# Selected features for tb vs od
sel.gene.tb_od <- read.csv("../../data/ex_9/feat_sel/gene_tb_od_BH_LFC_lasso_sig_factors.csv", header=TRUE, row.names = 1)
sel.prot.tb_od <- read.csv("../../data/ex_9/feat_sel/prot_tb_od_BH_LFC_lasso_sig_factors.csv", header=TRUE, row.names = 1)
sel.gp.tb_od <- rbind(sel.prot.tb_od, sel.gene.tb_od)
# Reconstitute probe ids so kaforou stuff can be searched by id
all.probe.ids <- c()
for (i in 1:length(df.gene.all)){
id.parts <- strsplit(colnames(df.gene.all)[i], split="_")
recon.probe.id <- paste(id.parts[[1]][1], "_",id.parts[[1]][2], sep = "")
all.probe.ids <- c(all.probe.ids, recon.probe.id)
}
#Get upreg and downreg factors for mine
my.upreg.factors <- c()
my.downreg.factors <- c()
for (i in 1:nrow(sel.gp.tb_od)){
if (sel.gp.tb_od$reg_dir[i] == "up"){
my.upreg.factors <- c(my.upreg.factors, as.character(sel.gp.tb_od$features[i]))
#print(paste("UP:", sel.gp.tb_od$features[i]))
} else {
my.downreg.factors <- c(my.downreg.factors, as.character(sel.gp.tb_od$features[i]))
#print(paste("DOWN:", sel.gp.tb_od$features[i]))
}
}
df.upreg.my.tb_od <- df.gp.all[match(my.upreg.factors, colnames(df.gp.all))]
df.downreg.my.tb_od <- df.gp.all[match(my.downreg.factors, colnames(df.gp.all))]
|
a4f5e28b82a274b7869ac62a84df804d542baa20
|
050b136eb6bb7c7d57c18ea894104acf890e3bb7
|
/src/prep-inputs-static.R
|
da5f205f0e388e4e70a3769739da500676274ee2
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
LBNL-UCB-STI/gem
|
fc5bf991a4e2c95368f68bd7478f1cde40891a01
|
3ce8dcac69fe504bfec62b9cec7d9af0c1f1178e
|
refs/heads/master
| 2023-04-24T19:04:33.083931
| 2021-02-04T22:29:06
| 2021-02-04T22:29:06
| 157,785,909
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,765
|
r
|
prep-inputs-static.R
|
#############################################################################################
# Grid-Integrated Electric Mobility Model (GEM)
#
# This has functions needed to prepare static inputs (those that will never vary in an
# experiment).
#
# Argument: none
# Returns: list containing data tables used to run the model (as named data.tables)
#############################################################################################
prep.inputs.static <- function(){
cat(pp('Creating static inputs\n'))
##### STATIC SETS #####
regions <- c('ENC','ESC','MAT-NL','MAT-NY','MTN','NENG','PAC-CA','PAC-NL','SAT-FL','SAT-NL','WNC','WSC-TX','WSC-NL')
rmob <- as.vector(sapply(regions,function(x){ pp(x,c('-RUR','-URB'))}))
rmobtor <- data.table('r'=rep(regions,each=2),'rmob'=rmob)
g <- generators$g
gtor <- generators[,list(g,r)]
hydro <- generators$g[generators$FuelType=='Hydro']
solar <- generators$g[generators$FuelType=='Solar']
wind <- generators$g[generators$FuelType=='Wind']
inputs.sets <- list(t=pp('t',sprintf('%04d',seq(1,length(days)*24))),rmob=rmob,r=regions,rmobtor=rmobtor,g=g,gtor=gtor,hydro=hydro,solar=solar,wind=wind)
##### STATIC PARAMETERS #####
dates <- date.info(days,year)
hours.to.simulate <- unlist(lapply(days,function(day){ pp('t',sprintf('%04d',(day-1)*24+1:24))}))
load[,t:=pp('t',sprintf('%04d',as.numeric(substr(as.character(t),2,nchar(as.character(t))))))]
setkey(load,r,t)
demandLoad <- load[load$t%in%hours.to.simulate,list(r,t,value=demandLoad)]
demandLoad[,t:=NULL]
demandLoad[,t:=inputs.sets$t,by='r']
demandLoad <- demandLoad[,list(r,t,value)]
inputs.parameters <- list(demandLoad=demandLoad)
inputs <- list(sets=inputs.sets,parameters=inputs.parameters)
inputs
}
|
e3e022e147158a2de9e733700afb2289f0276fa6
|
27d0436a8c9725ca98962d239571478de7727a2a
|
/man/extract_ffd.Rd
|
f4d6786fb0083d7adca38772a5744188c7cc857a
|
[
"MIT"
] |
permissive
|
lee269/iapdashboardadmin
|
7d3d762c1956c88512c324d3aea15b3f6958e89c
|
43312e4012f871f62f3ada29f085ee65670293a9
|
refs/heads/master
| 2020-12-02T08:50:05.691888
| 2020-02-22T12:09:54
| 2020-02-22T12:09:54
| 230,950,866
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 563
|
rd
|
extract_ffd.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extract_ffd.R
\name{extract_ffd}
\alias{extract_ffd}
\title{Extracts FFD level data at 4 digit from a comtrade bulk download zipfile}
\usage{
extract_ffd(file)
}
\arguments{
\item{file}{zipfile to process}
}
\value{
A tibble containing trade data
}
\description{
\code{extract_ffd} is a low level function which works on a single file. Use
\code{\link{merge_ffd}} to do the same operation on a number of zipfiles in a single
folder
}
\examples{
\dontrun{extract_ffd("152-2016.zip")}
}
|
35baf40e4e470c3b5aed6424e0f20996ec5807bf
|
ee1af63213eaf268bf38a51e52883e43ca811937
|
/hands-on-with-r/project01.R
|
de0909a328de5f6767dcfbf941ca516ee0d0bee0
|
[] |
no_license
|
geocarvalho/r-bioinfo-ds
|
06ce4ae515981989274ade8f582988ea6fef6ffa
|
596daf835f2d8c64055e96906e6f3bda7fa3d42b
|
refs/heads/master
| 2023-05-11T14:45:41.841356
| 2023-04-28T08:02:47
| 2023-04-28T08:02:47
| 92,194,815
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 693
|
r
|
project01.R
|
# Chapter 1: Objects and functions
roll <- function(list=1:6, size=2) {
sum(sample(x=list, size=size, replace=TRUE))
}
# Chapter 2: Packages and help pages
library(ggplot2)
x <- c(-1, -0.8, -0.6, -0.4, -0.2, 0, 0.2, 0.4, 0.6, 0.8, 1)
y <- x^3
qplot(x, y)
#histogram
x <- c(1, 2, 2, 2, 3, 3)
qplot(x, binwidth=1)
x2 <- c(1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 4)
qplot(x2, binwidth=1)
x3 <- c(0, 1, 1, 2, 2, 2, 3, 3, 4)
qplot(x3, binwidth=1)
rolls <- replicate(1000, roll())
qplot(rolls, binwidth=1)
roll <- function(list=1:6, size=2) {
sum(sample(x=list, size=size, replace=TRUE, prob=c(1/8, 1/8, 1/8, 1/8, 1/8, 3/8)))
}
rolls <- replicate(1000, roll())
qplot(rolls, binwidth = 1)
|
77d10504bcdf3ab695860ce83d8d4a9fbf7e8dfa
|
e37c3e8e0b32162ca7ed72fc78d3815a87ecbb2b
|
/pairwise_approach/Carrie/Code/RunRatings.R
|
cdecac2e9c77f34a47cbb91fdd516533f4160e7d
|
[
"MIT"
] |
permissive
|
cfowle/elo_sailor
|
7169e15f1eebaf5c9287f4a960ba8202a393f290
|
b3d436e749c20ffcadc73c030f1d28508c73fa90
|
refs/heads/master
| 2020-06-04T10:41:46.569880
| 2019-08-01T12:04:29
| 2019-08-01T12:04:29
| 191,988,433
| 1
| 2
|
MIT
| 2019-07-29T11:44:32
| 2019-06-14T18:23:28
|
Python
|
UTF-8
|
R
| false
| false
| 2,081
|
r
|
RunRatings.R
|
###############################################################################
### PROJECT: ELO SAILOR
### CREATED: 2019-06-24
### MODIFIED: 2019-06-24
### REVIEWED: NO
### SUMMARY: RUNS ratingsS
###############################################################################
##Import college test dataset
college = read_csv("../Input/neisa_only.csv")
college %<>%
mutate(X1 = NULL,
raceID = 0,
competitorName = school_coded) %>%
rename(competitorID = school_coded) %>%
group_by(regatta_id, day, raceID, competitorID, competitorName) %>%
summarise(score = min(score), place = min(place)) %>%
ungroup()
##TODO: Import clean results dataset
##TODO: If using existing ratingss, import existing ratingss table
##If starting fresh, create existingRatings, pastRatings, competitors, and
##regattas tables
results = college
existingRatings = data.frame(competitorID = character(),
regattaID = character(),
day = numeric(),
rating = numeric())
pastRatings = existingRatings
competitors = data.frame(competitorID = character(),
name = character())
##Get list of regattas
regattaTable = results %>%
select(regatta_id, day) %>%
distinct() %>%
mutate(name = regatta_id) %>%
rename(regattaID = regatta_id) %>%
filter(!is.na(regattaID))
##Run ratingss
for(i in 1:nrow(regattaTable)){
regatta = regattaTable[i,]
id = regatta$regattaID[[1]]
regattaResults = results %>%
filter(regatta_id == id)
output = updateExistingRatings(existingRatings,
competitors,
pastRatings,
regatta,
regattaResults)
existingRatings = output[["current"]]
pastRatings = output[["past"]]
}
##TODO: Export results
ratings = data.frame()
for(i in 1:nrow(existingRatings)) {
competitor = existingRatings[i,]
print(competitor)
ratings %<>% bind_rows(data.frame(competitor$competitorID, competitor$rating))
}
|
8892e8eb0099dc16393fb1f55709beefed00f7e6
|
fef6ba95f4a6a98e26f7f9f81bc457c562e62364
|
/tests/testthat/test-checkItemExists.R
|
e3dbee9e8645468c0f2793a132f20146891e3b97
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
USGS-R/hazardItems
|
48b6701b082cde4a51edd417a86aa963ed2f9383
|
dcf69e2df7d4b0db5054c8193bcc4aca4d41e859
|
refs/heads/main
| 2023-04-13T20:30:37.493049
| 2020-08-13T20:42:26
| 2020-08-13T20:42:26
| 10,981,467
| 5
| 10
|
NOASSERTION
| 2023-04-07T23:06:59
| 2013-06-26T22:59:23
|
R
|
UTF-8
|
R
| false
| false
| 211
|
r
|
test-checkItemExists.R
|
context("testing checkItemExists")
test_that ("check if item exists", {
setBaseURL("prod")
expect_false(checkItemExists("CHEX123")) # bad itemID
expect_true(checkItemExists("CCGftiy")) # good itemID
})
|
9812e60704c3e71b4cca65493ad23c41abaf1b0c
|
1e7d70ac2935728335327b6b8e7755f48c6cbbb3
|
/ui.R
|
798764f9a9b034a6ea14bd9aa46e6e1dcd4e93f7
|
[] |
no_license
|
cpulec/chitest
|
09b4a2782d9a54d07c19e6db74d6423fc5af3212
|
2da95246506cec543b721121f3edd0dbd6c54d54
|
refs/heads/master
| 2021-01-18T17:10:45.917941
| 2014-02-28T04:01:16
| 2014-02-28T04:01:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,716
|
r
|
ui.R
|
library(shiny)
# library(shinyIncubator)
# library(ggplot2)
# library(ggmap)
library(rCharts)
library(doSNOW)
library(foreach)
# Define UI for miles per gallon application
shinyUI(pageWithSidebar(
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## Application title
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
headerPanel("Chicago Crime Data Visualisation"),
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## Sidebar Panel
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
sidebarPanel(
wellPanel(
helpText(HTML("<b>READY?</b>")),
HTML("Continue to scroll down and modify the settings. Come back and click this when you are ready to render new plots."),
submitButton("Update Graphs and Tables")
),
wellPanel(
helpText(HTML("<b>BASIC SETTINGS</b>")),
selectInput("crimetype", "Choose Crime Type:", choice = c("THEFT", "BATTERY", "BURGLARY","ROBBERY")),
helpText("Examples: BATTERY, THEFT etc."),
dateInput("startdate", "Start Date of Data Collection:", value = "2000-01-01", format = "mm-dd-yyyy",
min = "2000-01-01", max = "2014-09-29"),
dateInput("enddate", "End Date of Data Collection:", value = "2015-01-02", format = "mm-dd-yyyy",
min = "startdate", max = "2014-09-30"),
##Need some validation that enddate is after start date
helpText("Note: Enter info here if we want")
),
wellPanel(
helpText(HTML("<b>MAP SETTINGS</b>")),
textInput("center", "Enter a Location to Center Map, such as city or zipcode:", "Chicago"),
selectInput("facet", "Choose Facet Type:", choice = c("none","type", "month", "category")),
selectInput("type", "Choose Google Map Type:", choice = c("roadmap", "satellite", "hybrid","terrain")),
checkboxInput("res", "High Resolution?", FALSE),
checkboxInput("bw", "Black & White?", FALSE),
sliderInput("zoom", "Zoom Level (Recommended - 14):",
min = 9, max = 20, step = 1, value = 12)
),
wellPanel(
helpText(HTML("<b>DENSITY PLOT SETTINGS</b>")),
sliderInput("alpharanage", "Alpha Range:",
min = 0, max = 1, step = 0.1, value = c(0.1, 0.4)),
sliderInput("bins", "Number of Bins:",
min = 5, max = 50, step = 5, value = 15),
sliderInput("boundwidth", "Boundary Lines Width:",
min = 0, max = 1, step = 0.1, value = 0.1),
selectInput("boundcolour", "Boundary Lines Colour:",
choice = c("grey95","black", "white", "red", "orange", "yellow", "green", "blue", "purple")),
selectInput("low", "Fill Gradient (Low):",
choice = c("yellow", "red", "orange", "green", "blue", "purple", "white", "black", "grey")),
selectInput("high", "Fill Gradient (High):",
choice = c("red", "orange", "yellow", "green", "blue", "purple", "white", "black", "grey"))
),
wellPanel(
helpText(HTML("<b>MISC. SETTINGS</b>")),
checkboxInput("watermark", "Use 'Blenditbayes' Watermark?", TRUE),
helpText("Note: automatically disabled when 'Facet' is used.")
),
wellPanel(
helpText(HTML("<b>ABOUT US</b>")),
HTML('Rajiv Shah & Chris Pulec'),
HTML('<br>'),
HTML('Big Data Guys'),
HTML('<br>'),
HTML('<a href="http://www.rajivshah.com" target="_blank">About Rajiv</a>, ')
),
wellPanel(
helpText(HTML("<b>VERSION CONTROL</b>")),
HTML('Version 0.1.2'),
HTML('<br>'),
HTML('Deployed on 04-Feb-2013')
),
wellPanel(
helpText(HTML("<b>CREDITS</b>")),
HTML('<a href="https://blenditbayes.shinyapps.io/crimemap/" target=" blank">Crime Data Visualization</a>, ')
)
),
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## Main Panel
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
mainPanel(
tabsetPanel(
tabPanel("Introduction", includeMarkdown("docs/introduction.md")),
#tabPanel("LondonR Demo", includeMarkdown("docs/londonr.md")),
#tabPanel("Sandbox (rCharts)", showOutput("myChart", "nvd3")),
#tabPanel("Sandbox", includeMarkdown("docs/sandbox.md")),
tabPanel("Data", dataTableOutput("datatable")),
tabPanel("Crime Map", plotOutput("map")),
tabPanel("Trends", plotOutput("trends1")),
tabPanel("To Do", includeMarkdown("docs/To_do.md")),
tabPanel("Changes", includeMarkdown("docs/changes.md"))
)
)
))
|
150be4e469061e64ffdb1fc96e05101c366297cd
|
a94b8d7428112111fb0ec7a2db31dcca2a929f7e
|
/Figures/FigureS3/FigS3F.R
|
ce2185c421eaa19d51df22c33246f8dc42f5813b
|
[] |
no_license
|
edcurry/esc-se-regions
|
7152064aa3d34bdcc27c1a18fcbdc2ea199ad153
|
63c41f671a0e6b54003b0fdc48146dad082017e5
|
refs/heads/master
| 2020-07-24T01:34:44.984848
| 2020-02-28T12:10:36
| 2020-02-28T12:10:36
| 207,763,046
| 0
| 0
| null | 2020-01-20T12:57:52
| 2019-09-11T08:33:25
|
R
|
UTF-8
|
R
| false
| false
| 1,991
|
r
|
FigS3F.R
|
####################################################################
#
# Wout Megchelenbrink
# Jan. 17, 2020
# SE engaged in closest or more distal promoter interactions
###################################################################
rm(list=ls())
source("include/style.R")
chic <- unique(fread("DATA/CHIC_promoter_SE_interactions_Joshi_Sahlen_629_with_RPKM_ser_above_1_and_gene_status_KNOWN.tab")[gene_status == "KNOWN"], by=c("se_id","gene_name"))
se <- fread("DATA/Superenhancers.tsv")
DT <- merge(chic, se, by="se_id")
se.with.closest <- DT[gene_name == closest_expressed_gene & rpkm_ser >= 1, unique(se_id)]
se.with.distal <- DT[gene_name != closest_expressed_gene & rpkm_ser >= 1, unique(se_id)]
se.with.both <- intersect(se.with.closest, se.with.distal)
se.not.engaged <- se[!se_id %in% chic$se_id,se_id]
se.not.expressed <- setdiff(chic[is.na(rpkm_ser) | rpkm_ser < 1, unique(se_id)], chic[rpkm_ser >= 1, unique(se_id)])
se[!se_id %in% se.with.closest & !se_id %in% se.with.distal & !se_id %in% se.not.engaged & !se_id %in% se.not.expressed]
length(se.with.closest) + length(se.with.distal) - length(se.with.both) + length(se.not.engaged) + length(se.not.expressed)
## Barplot of interaction categories
DT <- data.table(category=c("Closest and distal", "Only closest", "Only distal", "Only non-expressed", "No interaction"),
N=c(length(se.with.both), length(se.with.closest)-length(se.with.both), length(se.with.distal)-length(se.with.both), length(se.not.engaged), length(se.not.expressed)))
DT[, category:=factor(category, levels = rev(c("Closest and distal", "Only closest", "Only distal", "Only non-expressed", "No interaction")))]
ggplot(DT, aes(x=category, y=N, fill=category, label=N)) +
geom_bar(stat = "identity") +
coord_flip() +
scale_fill_manual(values = bs.col.dark[5:1], guide="none") +
theme_SE() +
xlab("") +
ylab("Interacting SE (#)") +
geom_text(nudge_x = 0, nudge_y = 10)
ggsave("IMG/FigS3F.pdf", width = 4.5, height = 2.5)
|
8743fd44f3d7479bb4990233972a5977904c7105
|
71821a5612e50fc8120afc8c5dc18019dadb9e84
|
/1BM17CS024_DSR Lab/Lab2/cbind.R
|
f78ae5e812f0c136cc145a94797762e3d217e457
|
[] |
no_license
|
dikshajain228/Semester-7
|
825229cd63c4a047ac5dd5c3896a43b9835a791d
|
996def1ada173ac60d9fd0e4c9da4f954d2de4f0
|
refs/heads/master
| 2023-02-04T22:19:25.984283
| 2020-12-20T07:48:06
| 2020-12-20T07:48:06
| 297,544,965
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 254
|
r
|
cbind.R
|
list.files()
getwd()
setwd("C:/Users/Dell/Documents/R")
getwd()
delim <- read.delim("perfume.csv", sep = ',')
delim
head(delim)
colnames(delim)
new <- delim
temp<-data.frame(num=c(1:100))
temp
new<-cbind(new,new_col=temp)
new
head(new)
|
d44a51ac738a2869acee95fb1d8940bfd1810f43
|
3aef5a679c390d1f2c7ecba35eca09864164c5a5
|
/data-raw/onc3.R
|
0f087bb2c1f2a326da1f93a057bae16f89da2c36
|
[] |
no_license
|
jeff-m-sullivan/hesim
|
576edfd8c943c62315890528039366fe20cf7844
|
fa14d0257f0d6d4fc7d344594b2c4bf73417aaf3
|
refs/heads/master
| 2022-11-14T07:35:15.780960
| 2022-09-02T03:13:49
| 2022-09-02T03:13:49
| 140,300,858
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,063
|
r
|
onc3.R
|
# Data for a 3-state (Stable, Progression, Death) oncology model
rm(list = ls())
library("flexsurv")
library("hesim")
library("data.table")
# Simulate multi-state dataset -------------------------------------------------
sim_onc3_data <- function(n = 2500, seed = NULL){
if (!is.null(seed)) set.seed(seed)
# Data
age_mu <- 60
data <- data.table(
intercept = 1,
strategy_id = 1,
strategy_name = sample(c("SOC", "New 1", "New 2"), n, replace = TRUE,
prob = c(1/3, 1/3, 1/3)),
patient_id = 1:n,
female = rbinom(n, 1, .5),
age = rnorm(n, mean = age_mu, sd = 5.5)
)
data[, `:=` (new1 = ifelse(strategy_name == "New 1", 1, 0),
new2 = ifelse(strategy_name == "New 2", 1, 0))]
attr(data, "id_vars") <- c("strategy_id", "patient_id")
# Transition matrix
tmat <- rbind(
c(NA, 1, 2),
c(NA, NA, 3),
c(NA, NA, NA)
)
trans_dt <- create_trans_dt(tmat)
# Parameters for each transition
get_scale <- function(shape, mean) {
scale <- mean/(gamma(1 + 1/shape))
scale_ph <- scale^{-shape}
return(scale_ph)
}
matrixv <- function(v) {
x <- matrix(v); colnames(x) <- "intercept"
return(x)
}
params_wei <- function(shape, mean,
beta_new1 = log(1),
beta_new2 = log(1),
beta_age, beta_female){
log_shape <- matrixv(log(shape))
scale = get_scale(shape, mean)
beta_intercept <- log(scale) - mean(data$age) * beta_age
scale_coefs <- matrix(c(beta_intercept, beta_new1, beta_new2,
beta_age, beta_female),
ncol = 5)
colnames(scale_coefs) <- c("intercept", "new1", "new2", "age", "female")
params_surv(coefs = list(shape = log_shape,
scale = scale_coefs),
dist = "weibullPH")
}
mstate_params <- params_surv_list(
# 1. S -> P
params_wei(shape = 2, mean = 6.25,
beta_new1 = log(.7), beta_new2 = log(.6),
beta_female = log(1.4), beta_age = log(1.03)),
# 2. S -> D
params_wei(shape = 2.5, mean = 10,
beta_new1 = log(.85), beta_new2 = log(.75),
beta_female = log(1.2), beta_age = log(1.02)),
# 3. P -> D
params_wei(shape = 3.5, mean = 8, beta_new1 = log(1),
beta_female = log(1.3), beta_age = log(1.02))
)
# Create multi-state model
mstatemod <- create_IndivCtstmTrans(mstate_params,
input_data = data,
trans_mat = tmat,
clock = "reset",
start_age = data$age)
# Simulate data
## Observed "latent" transitions
sim <- mstatemod$sim_disease(max_age = 100)
sim[, c("sample", "grp_id", "strategy_id") := NULL]
sim <- cbind(
data[match(sim$patient_id, data$patient_id)][, patient_id := NULL],
sim
)
sim[, ":=" (intercept = NULL, strategy_id = NULL, status = 1, added = 0)]
## Add all possible states for each transition
### Observed 1->2 add 1->3
sim_13 <- sim[from == 1 & to == 2]
sim_13[, ":=" (to = 3, status = 0, final = 0, added = 1)]
sim <- rbind(sim, sim_13)
### Observed 1->3 add 1->2
sim_12 <- sim[from == 1 & to == 3 & added == 0]
sim_12[, ":=" (to = 2, status = 0, final = 0, added = 1)]
sim <- rbind(sim, sim_12)
### Sort and clean
sim <- merge(sim, trans_dt, by = c("from", "to")) # Add transition ID
setorderv(sim, c("patient_id", "from", "to"))
sim[, added := NULL]
## Add right censoring
rc <- data.table(patient_id = 1:n,
time = stats::rexp(n, rate = 1/15))
sim[, time_rc := rc[match(sim$patient_id, rc$patient_id)]$time]
sim[, status := ifelse(time_stop < 15 & time_stop < time_rc, status, 0)]
sim[, time_stop := pmin(time_stop, 15, time_rc)]
sim <- sim[time_start <= pmin(time_rc, 15)]
## Final data cleaning
sim[, strategy_id := fcase(
strategy_name == "SOC", 1L,
strategy_name == "New 1", 2L,
strategy_name == "New 2", 3L
)]
sim[, strategy_name := factor(strategy_id,
levels = c(1, 2, 3),
labels = c("SOC", "New 1", "New 2"))]
label_states <- function (x) {
fcase(
x == 1, "Stable",
x == 2, "Progression",
x == 3, "Death"
)
}
sim[, from := label_states(from)]
sim[, to := label_states(to)]
sim[, c("new1", "new2", "final", "time_rc") := NULL]
# Return
sim[, time := time_stop - time_start]
return(sim[, ])
}
onc3 <- sim_onc3_data(n = 3000, seed = 102)
# Check that coefficient estimates are consistent with "truth"
fit_weibull <- function(i) {
flexsurvreg(Surv(time, status) ~ strategy_name + female + age,
data = onc3, subset = (transition_id == i), dist = "weibullPH")
}
fit_weibull(1)
fit_weibull(2)
fit_weibull(3)
# Panel data version -----------------------------------------------------------
onc3p <- copy(onc3)
onc3p[, n := 1:.N, by = c("patient_id", "time_start")]
onc3p[, c("transition_id", "time") := NULL]
# Time 0
onc3p_t0 <- onc3p[time_start == 0 & n == 1]
onc3p_t0[, c("time_stop", "n", "to", "status") := NULL]
setnames(onc3p_t0, c("time_start", "from"), c("time", "state"))
# Time > 0
onc3p[, mstatus := mean(status), by = c("patient_id", "time_start")]
onc3p <- onc3p[status == 1 | (mstatus == 0 & n == 1)]
onc3p[, state := ifelse(mstatus == 0, from, to)]
onc3p[, c("time_start", "n", "from",
"to", "status", "mstatus") := NULL]
setnames(onc3p, "time_stop", "time")
# Full panel
onc3p <- rbind(onc3p_t0, onc3p)
setorderv(onc3p, c("patient_id", "time"))
onc3p[, state_id := factor(
state,
levels = c("Stable", "Progression", "Death"),
labels = 1:3)]
# Save -------------------------------------------------------------------------
save(onc3, file = "../data/onc3.rda", compress = "bzip2")
save(onc3p, file = "../data/onc3p.rda", compress = "bzip2")
|
1cbc2384137ca30e8790dca42aca2d05566cdca0
|
9d28e9c8305feb5f585761e629ae7ac862a23265
|
/exercises/solution_07_09.R
|
3d0851e6993bc424c333d324b4de72c58d2213c7
|
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
awconway/NUR1027-FALL-2019
|
1e94759346933311c8b56da079f132dfc3b0abcb
|
5dd4fea0a17ebea7fd4c1e58b79626e94418d7a5
|
refs/heads/master
| 2023-01-21T09:53:53.446575
| 2022-08-15T18:27:45
| 2022-08-15T18:27:45
| 190,033,550
| 0
| 1
|
MIT
| 2023-01-11T20:46:07
| 2019-06-03T15:34:45
|
HTML
|
UTF-8
|
R
| false
| false
| 68
|
r
|
solution_07_09.R
|
SEM <- 0.258
measurements <- 3
round(SEM*1.96*sqrt(measurements), 2)
|
eae92fbc23053f30d0a5b1d1fb3ab44e87d05629
|
fe36c4fdae6bdc7f426631675ebd4b4eedc6be87
|
/man/load_table_lineage.Rd
|
bc053c98f21dea9efe3465c9476b8bb62b82d37f
|
[
"MIT"
] |
permissive
|
nyuglobalties/blueprintr
|
13f5c40ff263fdf069b3a3785312fad3513e493c
|
56d1da3f03b86ba3533107fab1926315505f8f57
|
refs/heads/main
| 2023-08-11T10:15:08.603385
| 2023-07-28T20:40:29
| 2023-07-28T20:40:29
| 230,140,058
| 1
| 2
|
NOASSERTION
| 2023-07-28T20:40:31
| 2019-12-25T18:33:30
|
R
|
UTF-8
|
R
| false
| true
| 691
|
rd
|
load_table_lineage.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lineage-tables.R
\name{load_table_lineage}
\alias{load_table_lineage}
\title{Read blueprints from folder and get lineage}
\usage{
load_table_lineage(
directory = here::here("blueprints"),
recurse = FALSE,
script = here::here("_targets.R")
)
}
\arguments{
\item{directory}{A folder containing blueprint scripts}
\item{recurse}{Should this function recursively load blueprints?}
\item{script}{Where the targets/drake project script file is located. Defaults
to using targets.}
}
\value{
An igraph of the table lineage for the desired blueprints
}
\description{
Read blueprints from folder and get lineage
}
|
539b0a1819efe5889eb847664a11002f3ac3e650
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/clttools/examples/normal.simu.plot.Rd.R
|
0c75052b36d8ec4ec1f3d141d4c28bbd7345ab62
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 220
|
r
|
normal.simu.plot.Rd.R
|
library(clttools)
### Name: normal.simu.plot
### Title: Histogram and Q-Q plot of simulated Normal distribution
### Aliases: normal.simu.plot
### ** Examples
normal.simu.plot(n = 5, mean = 3, sd =2, times = 100)
|
81e22b3228fa1e5e093300fbd64cb0b112dc03ba
|
6cbb51fe996e65a51a8d9f2f35e3159721933f25
|
/inst/shiny/ui_09_2_seuratWorkflow.R
|
a21987a2716ffcd99297f0ef6e9b4ee3793d4de3
|
[
"MIT"
] |
permissive
|
compbiomed/singleCellTK
|
927fb97e257ba89cddee9a90f9cb7cb375a5c6fb
|
990e89e7ccfbf663f23c793454f72fb8c6878a32
|
refs/heads/master
| 2023-08-11T09:17:41.232437
| 2023-07-26T20:43:47
| 2023-07-26T20:43:47
| 68,756,293
| 144
| 89
|
NOASSERTION
| 2023-09-06T18:22:08
| 2016-09-20T21:50:24
|
R
|
UTF-8
|
R
| false
| false
| 26,282
|
r
|
ui_09_2_seuratWorkflow.R
|
# User Interface for Seurat Workflow ---
shinyPanelSeurat <- fluidPage(
tags$script("Shiny.addCustomMessageHandler('close_dropDownSeuratHM', function(x){
$('html').click();
});"),
h1("Seurat"),
h5(tags$a(href = paste0(docs.artPath, "cnsl_seurat_curated_workflow.html"),
"(help)", target = "_blank")),
inlineCSS(list(".panel-danger>.panel-heading" = "background-color:#dcdcdc; color:#000000", ".panel-primary>.panel-heading" = "background-color:#f5f5f5; color:#000000; border-color:#dddddd", ".panel-primary" = "border-color:#dddddd;", ".panel-primary>.panel-heading+.panel-collapse>.panel-body" = "border-color:#dddddd;")),
conditionalPanel(
condition = "false",
selectInput(
"activePanelSelectSeurat",
label = "Active Panel:",
choices = c("",
"Normalize Data",
"Scale Data",
"Highly Variable Genes",
"Dimensionality Reduction",
"tSNE/UMAP",
"Clustering",
"Find Markers",
"Heatmap Plot"),
selected = ""
)
),
bsCollapse(id = "SeuratUI", open = "Normalize Data",
bsCollapsePanel("Normalize Data",
fluidRow(
column(4,
panel(heading = "Options",
selectizeInput(
inputId = "seuratSelectNormalizationAssay",
label = "Select input matrix:",
choices = NULL,
selected = NULL,
multiple = FALSE,
options = NULL),
#uiOutput("seuratSelectNormalizationAssay"),
selectInput(inputId = "normalization_method", label = "Select normalization method: ", choices = c("LogNormalize", "CLR", "RC")),
textInput(inputId = "scale_factor", label = "Set scaling factor: ", value = "10000"),
actionButton(inputId = "normalize_button", "Normalize")
)
)
),
style = "primary"
),
bsCollapsePanel("Highly Variable Genes",
fluidRow(
column(4,
fluidRow(
column(12,
panel(heading = "Compute HVG",
selectInput(inputId = "hvg_method", label = "Select HVG method: ", choices = c("vst", "mean.var.plot", "dispersion")),
textInput(inputId = "hvg_no_features", label = "Select number of features to find: ", value = "2000"),
actionButton(inputId = "find_hvg_button", "Find HVG")
)
)
),
br(),
fluidRow(
column(12,
panel(heading = "Display HVG",
numericInput(inputId = "hvg_no_features_view", label = "Select number of features to display: ", value = 10, step = 1),
verbatimTextOutput(outputId = "hvg_output", placeholder = TRUE)
)
)
)
),
column(8,
fluidRow(
column(12,
panel(heading = "Plot",
plotlyOutput(outputId = "plot_hvg")
)
)
)
)
),
style = "primary"),
# bsCollapsePanel("Scale Data",
# fluidRow(
# column(4,
# panel(heading = "Options",
# #selectInput(inputId = "model.use", label = "Select model for scaling: ", choices = c("linear", "poisson", "negbinom")),
# materialSwitch(inputId = "do.scale", label = "Scale data?", value = TRUE),
# materialSwitch(inputId = "do.center", label = "Center data?", value = TRUE),
# textInput(inputId = "scale.max", label = "Max value for scaled data: ", value = "10"),
# actionButton(inputId = "scale_button", "Scale")
# )
# )
# ),
# style = "primary"
# ),
bsCollapsePanel("Dimensionality Reduction",
tabsetPanel(type = "tabs",
tabPanel("PCA",
br(),
fluidRow(
column(4,
fluidRow(
column(12,
panel(heading = "PCA",
numericInput(inputId = "pca_no_components", label = "Select number of components to compute: ", value = 50),
materialSwitch(inputId = "pca_compute_elbow", label = "Compute ElbowPlot?", value = TRUE),
materialSwitch(inputId = "pca_compute_jackstraw", label = "Compute JackStrawPlot?", value = FALSE),
materialSwitch(inputId = "pca_compute_heatmap", label = "Compute Heatmap?", value = TRUE),
conditionalPanel(
condition = 'input.pca_compute_heatmap == true',
numericInput(inputId = "pca_compute_heatmap_nfeatures",
label = "Set number of features for heatmap:", value = 30, step = 1),
),
numericInput(inputId = "seed_PCA",
label = "Seed value for reproducibility of result:",
value = 42,
step = 1),
actionButton(inputId = "run_pca_button", "Run PCA")
),
panel(heading = "Select No. of Components",
htmlOutput(outputId = "pca_significant_pc_output", inline = FALSE),
numericInput(inputId = "pca_significant_pc_counter", label = "Select number of components for downstream analysis: ", min = 1, max = 20, value = 10)
)
)
)
),
column(8,
fluidRow(
column(12,
hidden(
tags$div(class = "seurat_pca_plots", tabsetPanel(id = "seuratPCAPlotTabset", type = "tabs"
)
))
)
)
)
)
),
tabPanel("ICA",
br(),
fluidRow(
column(4,
fluidRow(
column(12,
panel(heading = "ICA",
textInput(inputId = "ica_no_components", label = "Select number of components to compute: ", value = "20"),
materialSwitch(inputId = "ica_compute_heatmap", label = "Compute Heatmap?", value = TRUE),
conditionalPanel(
condition = 'input.ica_compute_heatmap == true',
numericInput(inputId = "ica_compute_heatmap_nfeatures",
label = "Set number of features for heatmap:", value = 30, step = 1),
),
numericInput(inputId = "seed_ICA",
label = "Seed value for reproducibility of result:",
value = 42,
step = 1),
actionButton(inputId = "run_ica_button", "Run ICA")
),
panel(heading = "Select No. of Components",
#h5("Number of components suggested by ElbowPlot: "),
#verbatimTextOutput(outputId = "ica_significant_pc_output", placeholder = TRUE),
numericInput(inputId = "ica_significant_ic_counter", label = "Select number of components for downstream analysis: ", min = 1, max = 20, value = 10)
)
)
)
),
column(8,
fluidRow(
column(12,
hidden(
tags$div(class = "seurat_ica_plots", tabsetPanel(id="seuratICAPlotTabset", type = "tabs"
))
)
)
)
)
)
)
),
style = "primary"),
bsCollapsePanel("2D-Embedding",
tabsetPanel(id = "tsneUmapTabsetSeurat", type = "tabs",
tabPanel("UMAP",
br(),
fluidRow(
column(4,
fluidRow(
column(12,
panel(heading = "UMAP",
selectInput(inputId = "reduction_umap_method", label = "Select reduction method: ", choices = c("pca", "ica")),
#textInput(inputId = "reduction_umap_count", label = "Select number of reduction components: ", value = "20"),
numericInput(inputId = "min_dist_umap", label = "Set min.dist:", value = 0.3),
numericInput(inputId = "n_neighbors_umap", label = "Set n.neighbors:", value = 30, step = 1),
numericInput(inputId = "spread_umap", label = "Set spread:", value = 1),
numericInput(inputId = "seed_UMAP",
label = "Seed value for reproducibility of result:",
value = 42,
step = 1),
htmlOutput(outputId = "display_message_umap", inline = FALSE),
actionButton(inputId = "run_umap_button", "Run UMAP")
)
)
)
),
column(8,
fluidRow(
panel(heading = "Plot",
column(12,
plotlyOutput(outputId = "plot_umap")
)
)
)
)
)
),
tabPanel("tSNE",
br(),
fluidRow(
column(4,
fluidRow(
column(12,
panel(heading = "tSNE",
selectInput(inputId = "reduction_tsne_method", label = "Select reduction method: ", choices = c("pca", "ica")),
#textInput(inputId = "reduction_tsne_count", label = "Select number of reduction components: ", value = "20"),
numericInput(inputId = "perplexity_tsne", label = "Set perplexity:", value = 30),
numericInput(inputId = "seed_TSNE",
label = "Seed value for reproducibility of result:",
value = 1,
step = 1),
htmlOutput(outputId = "display_message_tsne", inline = FALSE),
actionButton(inputId = "run_tsne_button", "Run tSNE")
)
)
)
),
column(8,
fluidRow(
panel(heading = "Plot",
column(12,
plotlyOutput(outputId = "plot_tsne")
)
)
)
)
)
)
),
style = "primary"),
bsCollapsePanel("Clustering",
fluidRow(
column(4,
fluidRow(
column(12,
panel(heading = "Options",
selectInput(inputId = "reduction_clustering_method", label = "Select reduction method: ", choices = c("pca", "ica")),
#textInput(inputId = "reduction_clustering_count", label = "Select number of reduction components: ", value = "20"),
selectInput(inputId = "algorithm.use", label = "Select clustering algorithm: ", choices = list("Original Louvain algorithm" = "louvain",
"Louvain algorithm with multilevel refinement" = "multilevel",
"SLM algorithm" = "SLM")),
numericInput(inputId = "resolution_clustering", label = "Set resolution:", value = 0.8),
materialSwitch(inputId = "group.singletons", label = "Group singletons?", value = TRUE),
htmlOutput(outputId = "display_message_clustering", inline = FALSE),
actionButton(inputId = "find_clusters_button", "Find Clusters")
)
)
)
),
column(8,
fluidRow(
column(12,
hidden(
tags$div(class = "seurat_clustering_plots", tabsetPanel(id = "seuratClusteringPlotTabset", type = "tabs"
))
)
)
)
)
),
style = "primary"),
bsCollapsePanel("Find Markers",
fluidRow(
column(4,
fluidRow(
column(12,
panel(heading = "Options",
h6("Compute marker genes that are either differentially expressed or conserved between selected groups and visualize them from the selected plots on right panel."),
radioButtons(
inputId = "seuratFindMarkerType",
label = "Select type of markers to identify:",
choices = c(
"markers between all groups" = "markerAll",
"markers differentially expressed between two selected groups" = "markerDiffExp",
"markers conserved between two selected groups" = "markerConserved"
)
),
selectInput(
inputId = "seuratFindMarkerSelectPhenotype",
label = "Select biological phenotype:",
choices = NULL
),
conditionalPanel(
condition = "input.seuratFindMarkerType == 'markerDiffExp'
|| input.seuratFindMarkerType == 'markerConserved'",
selectInput(
inputId = "seuratFindMarkerGroup1",
label = "Select first group of interest:",
choices = NULL
),
selectInput(
inputId = "seuratFindMarkerGroup2",
label = "Select second group of interest:",
choices = NULL
)
),
selectInput(
inputId = "seuratFindMarkerTest",
label = "Select test:",
choices = c("wilcox", "bimod",
"t", "negbinom",
"poisson", "LR",
"DESeq2")
),
materialSwitch(
inputId = "seuratFindMarkerPosOnly",
label = "Only return positive markers?",
value = FALSE
),
actionButton(inputId = "seuratFindMarkerRun", "Find Markers")
)
)
)
),
column(8,
fluidRow(
column(12,
hidden(
tags$div(
class = "seurat_findmarker_table",
filterTableUI(id = "filterSeuratFindMarker")
)
),
br(),
hidden(
tags$div(class = "seurat_findmarker_jointHeatmap",
bsCollapse(
bsCollapsePanel(
title = "Heatmap Plot",
fluidRow(
column(12, align = "center",
panel(
numericInput("findMarkerHeatmapPlotFullNumeric", value = 10, max = 2000, min = 2, step = 1, label = "Select number of top genes from each cluster/group to visualize in the heatmap below based on highest average log fold change value:"),
actionButton("findMarkerHeatmapPlotFullNumericRun", label = "Plot"),
hr(),
shinyjqui::jqui_resizable(
plotOutput(outputId = "findMarkerHeatmapPlotFull", height = "500px")
)
)
)
)
)
)
)
),
br(),
hidden(
tags$div(class = "seurat_findmarker_plots",
panel(heading = "Marker Gene Plots",
HTML("<center><h5><span style='color:red; font-weight:bold; text-align:center;'>Click on the rows of the table above to plot the selected marker genes below!</span></h5></br></center>"),
tabsetPanel(id = "seuratFindMarkerPlotTabset", type = "tabs"))
)
)
)
)
)
),
style = "primary")
),
nonLinearWorkflowUI(id = "nlw-seurat")
)
|
80c5dda1e99a993694f8b45ab60f4a84ed2e49d0
|
d2f0c07eeba563b88021010e450ac7a29779972b
|
/dataJoin.R
|
131c4d8f4a6894edb0c20b00b7c8b5b3a8b770e5
|
[] |
no_license
|
lefpoem/R_log
|
dc60495063fe26a8fe8291708a98594a9ff27df2
|
74c9f927daa4c185655d365186b01df71061dc1e
|
refs/heads/main
| 2023-06-07T05:56:55.643987
| 2021-07-04T09:17:39
| 2021-07-04T09:17:39
| 381,149,578
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 591
|
r
|
dataJoin.R
|
# data frame1
df1 = data.frame(SiteId=c(1:6),Site=c("Goolge","Baidu","Numpy","Zhihu","CSDN","Pokect"))
# data frame2
df2 = data.frame(SiteId=c(2,4,6,7,8),Country=c('CN','USA','CN','USA','IN'))
print(df1)
print(df2)
# inner Join取交集
df3= merge(x=df1,y=df2,by="SiteId")
print("----NATURAL JOIN----")
print(df3)
# full Join取交集
df4 = merge(x=df1,y=df2,by="SiteId",all=TRUE)
print("----FULL JOIN----")
print(df4)
df5 = merge(x=df1,y=df2,by="SiteId",all.x=TRUE)
print("----LEFT JOIN----")
print(df5)
df6 = merge(x=df1,y=df2,by="SiteId",all.y=TRUE)
print("----RIGHT JOIN----")
print(df6)
|
e2560aeff0c3a26c563912f604f867531936378c
|
8c9ce99672ce84da4400238e6f8278c130210100
|
/Gene Info sorter_V4.R
|
05fb39369b2cfa65064481eccb7bea1f8bad63b4
|
[] |
no_license
|
debabratadutta6/Sesame-transcriptome
|
183cb8120b6611fe01889907c72676a7fa299d70
|
36b7199a52fb21e3b2ee735d484267a37cfc2a39
|
refs/heads/main
| 2022-12-31T02:03:31.042727
| 2020-10-24T03:58:45
| 2020-10-24T03:58:45
| 306,801,341
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,055
|
r
|
Gene Info sorter_V4.R
|
winDialog("ok", "Please select Stringtie Assembled Transcripts file")
options(stringsAsFactors=FALSE)
data1 <- choose.files(default = "", caption = "Select input datafile",
multi = TRUE, filters = Filters,
index = nrow(Filters))
data11 <- read.csv(data1, header=F, sep=" ")
data11 <- data11[-1,]
data11 <- data11[-1,]
winDialog("ok", "Please select Stringtie Gene Count file")
data2 <- choose.files(default = "", caption = "Select input datafile",
multi = TRUE, filters = Filters,
index = nrow(Filters))
data21 <- read.csv(data2, header=TRUE, sep="\t")
winDialog("ok", "Please select Stringtie Transcript Count file")
data3 <- choose.files(default = "", caption = "Select input datafile",
multi = TRUE, filters = Filters,
index = nrow(Filters))
data31 <- read.csv(data3, header=TRUE, sep="\t")
gene <- data11$V2
gene <- gsub(";", "", gene)
transcript <- data11$V4
transcript <- gsub(";", "", transcript)
locus1 <- data11$V6
locus1 <- gsub(";", "", locus1)
locus2 <- data11$V8
locus2 <- gsub(";", "", locus2)
num1 <- as.numeric(locus1)
num2 <- as.numeric(locus2)
bind <- as.data.frame(cbind (gene, transcript, locus1, locus2, num1, num2))
x <- nrow(bind)
bind$seqs <- 1:x
bind$isna1 <- is.na(bind$num1)
bind$isna2 <- is.na(bind$num2)
bind2 <- as.data.frame(bind)
sub1 <- subset(bind2, bind2$isna1==TRUE)
sub2 <- subset(bind2, bind2$isna2==TRUE)
cols1 <- c(1,2,3,7)
sub12 <- sub1[,cols1]
colnames(sub12) <- c("gene", "transcript", "locus", "seqs")
cols2 <- c(1,2,4,7)
sub22 <- sub2[,cols2]
colnames(sub22) <- c("gene", "transcript", "locus", "seqs")
finalish <- rbind(sub12, sub22)
final <- finalish[order(finalish$seqs),]
final <- final[,-4]
dup <- duplicated(final)
final$dup <- dup
sub3 <- subset(final, final$dup==FALSE)
#############################################################################
sub4 <- sub3
colnames(sub4)[1] <- c("gene_id")
merge1 <- merge(data21, sub4, by.x="gene_id")
merge2 <- merge1
merge2$gene_id <- merge2$locus
new1 <- merge2[,1:2]
#dup1 <- duplicated(new1)
#new1$dup <- dup1
#sub1 <- subset(new1, new1$dup==FALSE)
sub1 <- new1
rows <- nrow(sub1)
sub1$row <- 1:rows
sub1$gene_id <- paste(sub1$row,"_",sub1$gene_id, sep="")
new11 <- sub1[,-c(3:4)]
write.table(new11, "Adjusted Gene Counts.tabular", quote=F, sep="\t", row.names=F)
#############################################################################
sub5 <- sub3
colnames(sub5)[2] <- c("transcript_id")
merge3 <- merge(data31, sub5, by.x="transcript_id")
merge4 <- merge3
merge4$transcript_id <- merge4$locus
new2 <- merge4[,1:2]
#dup2 <- duplicated(new2)
#new2$dup <- dup2
#sub2 <- subset(new2, new2$dup==FALSE)
sub2 <- new2
rows <- nrow(sub2)
sub2$row <- 1:rows
sub2$transcript_id <- paste(sub2$row,"_",sub2$transcript_id, sep="")
new21 <- sub2[,-3]
write.table(new21, "Adjusted Transcript Counts.tabular", quote=F, sep="\t", row.names=F)
|
5e84bcd5ad7e2aa57c3b32656ce54a7553c8f625
|
556d3d35f85264e5c5c27b5de5c158dc7d2500dc
|
/rankhospital.R
|
627ff676413d1243ba4df197315e0f86d7252f38
|
[] |
no_license
|
Dcroix/Programming-Assign-Three
|
68ec8c66f34546c1dfc69fc8642f49ed9e93d930
|
c39034d0555fba57ee5a1f68646fe6bfccd08708
|
refs/heads/master
| 2020-05-15T22:03:22.886827
| 2019-04-21T09:59:39
| 2019-04-21T09:59:39
| 182,516,902
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,042
|
r
|
rankhospital.R
|
#This function returns the best or worst performing hospital for the identified state and outcome
rankhospital <- function(state, outcome, num = "best"){
data <- read.csv("outcome-of-care-measures.csv")
states<- levels(data[,7])[data[,7]]
state_flag <- FALSE
for (i in 1:length(states)){
if (state == states[i]){
state_flag <- TRUE
}
}
if (!state_flag){
stop("invalid state")
}
if(!((outcome == "heart attack")|(outcome == "heart failure")
| (outcome == "pneumonia"))){
stop ("invalid outcome")
}
col <- if (outcome == "heart attack"){
11
} else if (outcome == "heart failure") {
17
}else {
23
}
data[,col]<- suppressWarnings(as.numeric(levels(data[,col])[data[,col]]))
data[,2] <- as.character(data[,2])
statedata <- data[grep(state, data$State),]
orderdata <- statedata[order(statedata[,col], statedata[,2], na.last = NA),]
if(num == "best") {
orderdata[1,2]
} else if (num == "worst"){
orderdata[nrow(orderdata),2]
} else {
orderdata[num,2]
}
}
|
408cba7ba434ecd613de3acbf9317d3829dea857
|
4bf26f1905d2d51a85591a24b29740bef8472abe
|
/src/install.dependencies.R
|
2cc60d62ee70f3f37384aaaa54c862169506155d
|
[] |
no_license
|
NEONScience/swift.aqua
|
c8fc9d4f508602b29f14d3c0ff57c4ac08a8560d
|
b7c19f09dbab6a28797bcee4de04ca0e124c4060
|
refs/heads/master
| 2022-10-25T10:19:51.846812
| 2020-06-15T20:59:20
| 2020-06-15T20:59:20
| 272,516,953
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 375
|
r
|
install.dependencies.R
|
# Install Packages for intialization
install.packages("fst")
install.packages("shiny")
install.packages("dplyr")
install.packages("plotly")
install.packages("ggplot2")
install.packages("DT")
install.packages("tidyr")
install.packages("data.table")
install.packages("shinycssloaders")
install.packages("shinydashboard")
install.packages("viridis")
install.packages("stringr")
|
a4f69aefc17cb0244749c0520c58a2bcfbef1b1b
|
4017621a72dcf76a3a9b66905ea0374e7acd0517
|
/data_analysis_scripts/InteractiveViz.R
|
76fd2adfb85cb192eeddfec162b25df58841ee5d
|
[] |
no_license
|
JackLich10/data_plus_basketball
|
c9fed4ace98a74504ecf7c320c655ac5eaaea9a8
|
40ad762ec54b6f761ee81c54417cbd6e2f098518
|
refs/heads/master
| 2023-03-18T11:00:57.002177
| 2021-03-12T01:46:57
| 2021-03-12T01:46:57
| 196,605,447
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,745
|
r
|
InteractiveViz.R
|
# load packages
library(tidyverse)
library(broom)
library(ggiraph)
library(rvest)
library(modelr)
# load data
Duke201415teamstats <- read_csv("data/Duke201415teamstats.csv")
ShotChart <- read_csv("data/shot_chart_NN_SVM.csv")
# change from wide to long
long <- subset %>%
dplyr::select(game_number, opponent, PPS, ePPS_NN, ePPS_SVM, ePPS, shot_making_NN, shot_making_SVM, shot_making) %>%
gather(type, value, -game_number, -opponent, -shot_making_NN, -shot_making_SVM, -shot_making)
subset <- subset %>%
mutate(pt_diff = pts - opp_pts)
# add ePPS and shot making
ShotChart <- ShotChart %>%
mutate(EPS_NN = value * NN_probability,
EPS_SVM = value * SVM_probability)
ePPS_NN <- ShotChart %>%
dplyr::group_by(game) %>%
dplyr::summarise(ePPS = mean(EPS_NN)) %>%
dplyr::select(ePPS) %>%
pull()
ePPS_SVM <- ShotChart %>%
dplyr::group_by(game) %>%
dplyr::summarise(ePPS = mean(EPS_SVM)) %>%
dplyr::select(ePPS) %>%
pull()
ePPS = (ePPS_NN + ePPS_SVM)/2
Duke201415teamstats <- Duke201415teamstats %>%
mutate(PPS = (pts - ft) /fga)
subset <- Duke201415teamstats %>%
filter(game_number %in% c(1:7, 9, 11:13, 16, 18, 22, 23, 26, 27, 29, 30, 32:35)) %>%
dplyr::mutate(shot_making_NN = PPS - ePPS_NN,
shot_making_SVM = PPS - ePPS_SVM,
shot_making = PPS - ePPS)
subset <- subset %>%
dplyr::mutate(ePPS_NN = ePPS_NN,
ePPS_SVM = ePPS_SVM,
ePPS = ePPS)
ShotChart <- ShotChart %>%
dplyr::mutate(ePPS_NN = NN_probability * value,
ePPS_SVM = SVM_probability * value,
ePPS = (ePPS_NN +ePPS_SVM)/2)
# function to scrape gameIDs
get_game_ids <- function(Year) {
url <- paste("https://www.espn.com/mens-college-basketball/team/schedule/_/id/150/season/", Year, sep = "")
y <- read_html(url) %>%
html_nodes(".ml4 a") %>%
html_attr("href") %>%
substr(57, 65)
return(y)
}
gameIDs201415 <- get_game_ids("2015")
NCAATourn <- gameIDs201415[1:6]
gameIDs201415 <- gameIDs201415[-c(1:6)]
gameIDs201415[c(34:39)] <- NCAATourn
gameIDs201415 <- gameIDs201415[c(1:7, 9, 11:13, 16, 18, 22, 23, 26, 27, 29, 30, 32:35)]
long$gameID <- paste(gameIDs201415)
subset$gameID <- paste(gameIDs201415)
long$tooltip <- paste(long$type, ": ", round(long$value, 2), sep = "")
long$onclick <- sprintf("window.open(\"%s%s\")",
"https://www.espn.com/mens-college-basketball/game?gameId=", as.character(long$gameID))
game_by_game <- long %>%
filter(type %in% c("ePPS", "PPS")) %>%
ggplot(aes(x = reorder(opponent, game_number), y = value, group = type,
color = type, tooltip = tooltip, onclick = onclick)) +
geom_line() +
geom_point_interactive(aes(data_id = value), size = 2) +
geom_label(aes(x = reorder(opponent, game_number), y = 0.75, label = round(shot_making, 1),
fill = shot_making), color = "black", size = 2.5, label.size = 0.1, label.r = unit(0.1, "lines"), label.padding = unit(0.1, "lines")) +
geom_text(aes(x = 3, y = 0.8, label = "Shot-Making:"), size = 3, inherit.aes = F) +
scale_fill_gradient2(low = "blue", mid = "white", high = "red", labels = c(low = "Worse: -0.3", mid = "Expected: 0", high = "Better: 0.3"), breaks = c(-0.2, 0, 0.2), limits = c(-0.3, 0.3)) +
scale_color_manual(values = c("#001A57", "grey", "light blue", "grey")) +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
labs(title = "Duke's Game-by-Game Shooting", subtitle = "2014-15 Season",
x = "Opponent", y = "Points Per Shot", color = "Type", fill = "Shot-Making")
ggiraph(code = {print(game_by_game)})
subset$tooltip <- paste("vs. ", subset$opponent, ": ", subset$pts, " - ", subset$opp_pts, sep = "")
subset$onclick <- sprintf("window.open(\"%s%s\")",
"https://www.espn.com/mens-college-basketball/game?gameId=", as.character(subset$gameID))
model <- lm(pts ~ shot_making, data = subset)
r2 <- paste("R-squared: ", round(100 * glance(model)$r.squared, 4), "%", sep = "")
p_val <- paste("P-value: ", round(glance(model)$p.value, 10), sep = "")
shot_making_pt_diff <- subset %>%
ggplot(aes(x = shot_making, y = pt_diff, color = result, tooltip = tooltip, onclick = onclick)) +
geom_point_interactive(aes(data_id = shot_making), size = 3) +
geom_hline(yintercept = 0, linetype = "dashed") +
geom_vline(xintercept = 0, linetype = "dashed") +
geom_text(aes(x = -0.11, y = 50, label = "Worse Than Expected Shot-Making:"), size = 3.5, inherit.aes = F) +
geom_text(aes(x = 0.11, y = 50, label = "Better Than Expected Shot-Making:"), size = 3.5, inherit.aes = F) +
geom_text(aes(x = 0.25, y = 38, label = r2), size = 2.5, inherit.aes = F) +
geom_text(aes(x = 0.25, y = 35, label = p_val), size = 2.5, inherit.aes = F) +
scale_color_manual(values = c("red", "#001A57")) +
geom_smooth(aes(x = shot_making, y = pt_diff), method = "lm", se = F, inherit.aes = F) +
labs(title = "Better shot-making leads to larger margins of victory",
subtitle = "Duke 2014-15 Season",
x = "Shot-Making Index", y = "Point Differential", color = "Result")
ggiraph(code = {print(shot_making_pt_diff)}, width_svg = 8, height_svg = 7)
subset$tooltip_ast <- paste("vs. ", subset$opponent, ": ", subset$pts, " - ", subset$opp_pts,
", Asts: ", subset$ast, sep = "")
model2 <- lm(shot_making ~ ast, data = subset)
r22 <- paste("R-squared: ", round(100 * glance(model2)$r.squared, 4), "%", sep = "")
p_val2 <- paste("P-value: ", round(glance(model2)$p.value, 10), sep = "")
ast_shot_making <- subset %>%
ggplot(aes(x = ast, y = shot_making, color = result, tooltip = tooltip_ast, onclick = onclick)) +
geom_point_interactive(aes(data_id = shot_making), size = 3) +
geom_hline(yintercept = 0, linetype = "dashed") +
geom_vline(xintercept = mean(subset$ast), linetype = "dashed") +
geom_text(aes(x = 25, y = -0.05, label = "Worse Than Expected Shot-Making:"), size = 3, inherit.aes = F) +
geom_text(aes(x = 25, y = 0.05, label = "Better Than Expected Shot-Making:"), size = 3, inherit.aes = F) +
geom_text(aes(x = mean(subset$ast) + 2, y = -0.2, label = paste("Mean: ", as.character(round(mean(subset$ast), 1)), " asts", sep = "")), size = 3, inherit.aes = F) +
geom_text(aes(x = 26, y = 0.26, label = r22), size = 2.5, inherit.aes = F) +
geom_text(aes(x = 26, y = 0.24, label = p_val2), size = 2.5, inherit.aes = F) +
scale_color_manual(values = c("red", "#001A57")) +
geom_smooth(aes(x = ast, y = shot_making), method = "lm", se = F, inherit.aes = F) +
labs(title = "More assists lead to better shot-making", subtitle = "Duke 2014-15 Season",
x = "Assists", y = "Shot-Making Index", color = "Result")
ggiraph(code = {print(ast_shot_making)})
|
68591ec4c7d0250bc1583b41fed00290291b665d
|
d812db15a12cfce3666d69812fbbb0da4b070c14
|
/code/package-list.R
|
56d0bec79761df9c608d21ef9501abf32f7da3b9
|
[
"MIT"
] |
permissive
|
jvpoulos/patt-c
|
97fba2cec409113747f246cec1cc36ee6cf21f5d
|
d471872f710210516c540f313437f8fa69a91e21
|
refs/heads/master
| 2021-07-07T15:02:19.941944
| 2020-07-31T03:27:53
| 2020-07-31T03:27:53
| 156,440,652
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 411
|
r
|
package-list.R
|
packages <- c("ggplot2","ggpubr","gridExtra","reshape2","dplyr","MASS","gbm","rpart","foreach","doParallel","downloader","SAScii","RCurl",
"foreign","plyr","downloader","digest","SuperLearner","class", "randomForest","glmnet","gam","e1071","gbm","xgboost","ROCR","reporttools")
weights <- c("cluster","HMisc","weights") # install cluster -> HMisc -> weights
install.packages(c(packages,weights))
|
b2c158d78aeadd38533f16f792cc15f78df908f0
|
0feedfcb9f76e63e15727486747d9693d4863e5a
|
/主代码/portfolio_characteristics_rkt (in one box).R
|
191eb09f1efec89177fd594e0d8f3fd34fd40375
|
[] |
no_license
|
jaynewton/paper_6
|
be06bd623707d87e0446f25eed0851d86738f561
|
331ce16dd031e9e3506fc91ac00bb7769cc2095f
|
refs/heads/master
| 2020-04-02T02:33:06.048234
| 2018-11-11T11:24:59
| 2018-11-11T11:24:59
| 153,915,405
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,731
|
r
|
portfolio_characteristics_rkt (in one box).R
|
#################################
load("F:/我的论文/第五篇/RData/da_all_m.RData")
load("F:/我的论文/第五篇/主代码/individual investor preference/RData/da_individual_m.RData")
load("F:/我的论文/第五篇/RData/da_inst_m.RData")
load("F:/我的论文/第五篇/主代码/individual investor preference/RData/da_price_m.RData")
load("F:/我的论文/第五篇/主代码/individual investor preference/RData/da_ivol_6m.RData")
load("F:/我的论文/第五篇/主代码/individual investor preference/RData/da_size_m.RData")
load("F:/我的论文/第五篇/主代码/individual investor preference/RData/da_profit_m.RData")
load("F:/我的论文/第五篇/主代码/individual investor preference/RData/da_bm_m.RData")
load("F:/我的论文/第五篇/主代码/beta anomaly/monthly data in five years/RData/da_beta_5y.RData")
#load("F:/我的论文/第五篇/主代码/beta anomaly/daily data in one year/RData/da_beta_y.RData")
load("F:/我的论文/第五篇/主代码/individual investor preference/RData/da_turnover_m.RData")
load("F:/我的论文/第五篇/主代码/individual investor preference/RData/da_dividend_m.RData")
da_all_m <- da_all_m[,.(ym,SecCode,max_ret)]
da_individual_m <- da_individual_m[,.(ym,SecCode,individual)]
da_ivol_6m <- da_ivol_6m[,.(ym,SecCode,ivol)]
da_profit_m <- na.omit(da_profit_m)
da_m <- merge(da_all_m,da_individual_m,by=c("ym","SecCode"))
da_m <- merge(da_m,da_inst_m,by=c("ym","SecCode"))
da_m <- merge(da_m,da_price_m,by=c("ym","SecCode"))
da_m <- merge(da_m,da_ivol_6m,by=c("ym","SecCode"))
da_m <- merge(da_m,da_size_m,by=c("ym","SecCode"))
da_m <- merge(da_m,da_profit_m,by=c("ym","SecCode"))
da_m <- merge(da_m,da_bm_m,by=c("ym","SecCode"))
da_m <- merge(da_m,da_beta_5y,by=c("ym","SecCode"))
da_m <- merge(da_m,da_turnover_m,by=c("ym","SecCode"))
da_m <- merge(da_m,da_dividend_m,by=c("ym","SecCode"))
####
ym_index <- sort(unique(da_m$ym))
k <- 5
y <- 13 # number of porfolio characteristics variables
ret_p <- matrix(NA,nrow=length(ym_index),ncol=k) # p denotes portfolio
colnames(ret_p) <- paste0("p",1:k)
vari_level <- array(NA,c(length(ym_index),k,y)) # vari_level denotes variable level
for (i in 1:length(ym_index)) {
da_sub <- da_m[ym==ym_index[i],]
da_sub <- da_sub[order(max_ret),]
n_mid <- floor(nrow(da_sub)/k)
if ((nrow(da_sub)-n_mid*(k-2))%%2==0){
n_f <- (nrow(da_sub)-n_mid*(k-2))/2 # f denotes first, l denotes last
n_l <- n_f
} else {
n_f <- (nrow(da_sub)-n_mid*(k-2)-1)/2
n_l <- n_f+1
}
x <- seq(from=n_f,to=nrow(da_sub),by=n_mid)[1:(k-1)]
x <- c(x,nrow(da_sub))
da_sub$group_n <- cut(1:nrow(da_sub), c(0,x),labels = 1:k)
for (j in 1:k) {
vari_level[i,j,1] <- da_sub[group_n==j,mean(max_ret)]
vari_level[i,j,2] <- da_sub[group_n==j,mean(individual)]
vari_level[i,j,3] <- da_sub[group_n==j,mean(inst)]
vari_level[i,j,4] <- da_sub[group_n==j,mean(price)]
vari_level[i,j,5] <- da_sub[group_n==j,mean(ivol)]
vari_level[i,j,6] <- da_sub[group_n==j,mean(size)]
vari_level[i,j,7] <- da_sub[group_n==j,mean(eps)]
vari_level[i,j,8] <- da_sub[group_n==j,mean(roe)]
vari_level[i,j,9] <- da_sub[group_n==j,mean(opps)]
vari_level[i,j,10] <- da_sub[group_n==j,mean(BM)]
vari_level[i,j,11] <- da_sub[group_n==j,mean(be)]
vari_level[i,j,12] <- da_sub[group_n==j,mean(turnover)]
vari_level[i,j,13] <- da_sub[group_n==j,mean(dividend)]
}
}
vari_level_m <- matrix(NA,nrow=k,ncol=y) # m denotes mean
for (j in 1:k) {
for (p in 1:y) {
vari_level_m[j,p] <- mean(vari_level[,j,p],na.rm=T)
}
}
colnames(vari_level_m) <- c("max_ret","individual","inst","price","ivol","size","eps",
"roe","opps","BM","be","turnover","dividend")
vari_level_m
|
cbf6621a8586452b820d509a3b1c81b69c4007c0
|
99fb6ea41554f6ebe7fbd21f368c68e0980770d1
|
/executable/microbiome_statistics_and_functions.R
|
75115a89dd59c9b4e3a08b7beb7f418f0f5a4cf0
|
[] |
no_license
|
GreathouseLab/Preg_Diet_microbiome
|
6b504335492f4edec4dd268271a5ca5ed6a66ffd
|
6e05d84a5952452db90443014dd19db798e58004
|
refs/heads/master
| 2020-06-26T15:21:45.116303
| 2020-01-07T02:15:55
| 2020-01-07T02:15:55
| 199,672,022
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 29,724
|
r
|
microbiome_statistics_and_functions.R
|
# =================================================== #
# =================================================== #
#
# Microbiome Analysis Functions
# Jun Chen, PhD
#
# =================================================== #
# =================================================== #
# Created: 06/10/2018 #
# Last Editted: 06/10/2018 #
# By: R. Noah Padgett #
# =================================================== #
# =================================================== #
# Copyright R. Noah Padgett, 2018
#
# This script is not guaranteed to be free of bugs and/or errors.
#
# This script can be freely used and shared as long as the author and
# copyright information in this header remain intact.
#
#
# You can edit script files (for example, this file)
# and either cut and paste lines from file into R command line
# (in Windows you can use ctrl-R to do this)
# or in the R command line type:
#
# source("main.R")
#
# You may need to use full path name in the filename, or alternatively in the R console
# window change to the directory containing the file by using the command:
#
# setwd("<path of your directory>")
#
# Or this file can be sourced through a different file that needs
# the functions that are listed in this file.
# =================================================== #
# =================================================== #
# This file contains the following functions
#
# Function
# subset_data()
# is.na.null()
# perform_differential_analysis_para()
# perform_differential_analysis_para_single_FE()
# - FE = Fixed Effects
#
# =================================================== #
# =================================================== #
# =================================================== #
# subset_data()
# =================================================== #
# Inputs
# data.obj = a dataframe
# samIDs = a vector of IDs to subset to
# Returns subsetted dataset
subset_data <- function (data.obj, samIDs) {
data.obj$meta.dat <- data.obj$meta.dat[samIDs, , drop=FALSE]
if (!is.na.null(data.obj$otu.tab)) {
data.obj$otu.tab <- data.obj$otu.tab[, samIDs, drop=FALSE]
data.obj$otu.tab <- data.obj$otu.tab[rowSums(data.obj$otu.tab) != 0, , drop=FALSE]
data.obj$otu.name <- data.obj$otu.name[rownames(data.obj$otu.tab), , drop=FALSE]
if (!is.na.null(data.obj$otu.name.full)) {
data.obj$otu.name.full <- data.obj$otu.name.full[rownames(data.obj$otu.tab), , drop=FALSE]
}
}
if (!is.na.null(data.obj$abund.list)) {
data.obj$abund.list <- lapply(data.obj$abund.list, function(x) {
xx <- x[, samIDs, drop=FALSE]
xx <- xx[rowSums(xx) != 0, , drop=FALSE]
})
}
if (!is.na.null(data.obj$size.factor)) {
data.obj$size.factor <- data.obj$size.factor[samIDs]
}
if (!is.na.null(data.obj$ko.list)) {
data.obj$ko.list <- lapply(data.obj$ko.list, function(x) {
xx <- x[, samIDs, drop=FALSE]
xx <- xx[rowSums(xx) != 0, , drop=FALSE]
})
}
if (!is.na.null(data.obj$cog.list)) {
data.obj$cog.list <- lapply(data.obj$cog.list, function(x) {
xx <- x[, samIDs, drop=FALSE]
xx <- xx[rowSums(xx) != 0, , drop=FALSE]
})
}
data.obj
}
# =================================================== #
# is.na.null()
# =================================================== #
# This function is called by other functions as a logical check
# Do not mess with this function!
is.na.null <- function (x) {
if (is.null(x)) {
return(TRUE)
} else {
if (is.na(x)[1]) {
return(TRUE)
} else {
return(FALSE)
}
}
}
# =================================================== #
# perform_differential_analysis_para()
# =================================================== #
# The following function performs the taxonomic differential analysis
# this is a monster of a function that does a LOT with lots of options.
#
# An example call:
# perform_differential_analysis_para(data.obj0, grp.name='Status.cat',
# adj.name=c('Gender', 'Age'), RE=FALSE, method='NB',
# taxa.levels=c('Genus', 'Species'), winsor=TRUE,
# winsor.qt=0.97, norm='TSS', norm.level='Species',
# intersect.no=4, prev=0.1, minp=0.002, medianp=NULL,
# mt.method='raw', cutoff=0.05, ann=paste0(df, '.BMI.TSSNB'))
#
# Arguments/Inputs
# data.obj Data for analysis
# grp.name
# adj.name
# subject
# RE Random Effects? - Logical
# method
# zerop.cutoff
# ZINB
# LRT
# taxa.levels
# winsor
# winsor.qt
# norm
# norm.level
# intersect.no
# prev
# minp
# medianp
# mt.method
# cutoff
# ann
# ...
perform_differential_analysis_para <- function (data.obj, grp.name, adj.name=NULL,
subject=NULL, RE=FALSE, method='Adaptive0',
zerop.cutoff=0.25, ZINB='ZINB1', LRT=FALSE,
taxa.levels=c('Phylum', 'Order', 'Class',
'Family', 'Genus'),
winsor=TRUE, winsor.qt=0.97, norm='GMPR',
norm.level='Genus', intersect.no=4,prev=0.1,
minp=0.002, medianp=NULL, mt.method='fdr',
cutoff=0.15, ann='', ...) {
# To be completed
# subject holds the random effects formula
if (!RE) {
if (!(method %in% c('ZINB', 'B', 'QB', 'NB', 'OP', 'Adaptive0', 'Adaptive1', 'Adaptive2'))) stop('The speficied model is not supported!\n')
perform_differential_analysis_para_single <- perform_differential_analysis_para_single_FE
if (!is.null(subject)) warning('subject will not be used. Are you sure you want to run fixed effects model? ')
} else {
if (!(method %in% c('ZINB', 'B', 'B0', 'QB', 'NB', 'OP', 'Adaptive0', 'Adaptive1', 'Adaptive2'))) stop('The speficied model does not have random effects implementation!\n')
if (ZINB != 'ZINB1') stop('Currently only ZINB1 is supported!\n')
if (is.null(subject)) warning('subject is not supplied. Fixed effects model will be used instead!\n')
perform_differential_analysis_para_single <- perform_differential_analysis_para_single_RE
}
df <- data.obj$meta.dat
grp <- df[, grp.name]
ind <- !is.na(grp)
#data.obj <- subset_data(data.obj, ind)
grp <- grp[ind]
df <- df[ind, ]
if ('Species' %in% taxa.levels & !('Species' %in% names(data.obj$abund.list))) {
data.obj$abund.list[['Species']] <- data.obj$otu.tab
rownames(data.obj$abund.list[['Species']]) <- paste0("OTU", rownames(data.obj$otu.tab), ":",
data.obj$otu.name[, 'Phylum'], ";", data.obj$otu.name[, 'Genus'])
}
dep <- colSums(data.obj$otu.tab)
diff.seq.p <- summary(aov(dep ~ grp))[[1]][1, 'Pr(>F)']
if (!is.na(diff.seq.p) & diff.seq.p <= 0.05) {
cat("Signficant sequencing depth confounding!\n")
cat("For parametric test with sequence depth adjustment, please be cautious about the results!\n")
cat("There may be potential residual sequence depth confounding!\n")
}
pv.list <- qv.list <- fc.list <- fc.lc.list <- fc.uc.list <- met.list <- list()
res.final <- NULL
if (norm == 'Precalculated') {
dep <- data.obj$size.factor
}
if (norm == 'GMPR') {
dep <- GMPR(data.obj$abund.list[[norm.level]], intersect.no)
}
if (norm == 'TSS') {
dep <- colSums(data.obj$abund.list[[norm.level]])
}
ldep <- log(dep)
for (LOI in taxa.levels) {
cat(LOI, "\n")
taxon.ct <- data.obj$abund.list[[LOI]]
if (winsor == TRUE) {
# Addressing the outlier (97% percent) or at least one outlier
taxon.ct.p <- t(t(taxon.ct) / dep)
taxon.ct.p <- apply(taxon.ct.p, 1, function(x) {
cutoff <- quantile(x, winsor.qt)
x[x >= cutoff] <- cutoff
x
}
)
# column/row switch
taxon.ct <- t(round(taxon.ct.p * dep))
}
prop <- t(t(taxon.ct) / colSums(taxon.ct))
if (!is.null(minp)) {
prop <- prop[rowMaxs(prop) > minp & rowSums(prop!=0) > prev*ncol(prop), , drop=FALSE]
taxon.ct <- taxon.ct[rownames(prop), , drop=FALSE]
}
if (!is.null(medianp)) {
nz.mean <- apply(prop, 1, function(x) median(x[x!=0]))
prop <- prop[nz.mean > medianp & rowSums(prop!=0) > prev*ncol(prop), , drop=FALSE]
taxon.ct <- taxon.ct[rownames(prop), , drop=FALSE]
}
pv.vec <- fc.vec <- fc.lc.vec <- fc.uc.vec <- met.vec <- conv.vec <- NULL
obj <- NULL
for (taxon in rownames(taxon.ct)) {
cat('.')
taxon.abund <- taxon.ct[taxon, ]
######## Logistic regression ###############
if (method == 'B0') error <- try(obj <- perform_differential_analysis_para_single(taxon.abund, ldep, grp.name, adj.name, subject, df, method='B0', LRT, ...))
if (method == 'B') error <- try(obj <- perform_differential_analysis_para_single(taxon.abund, ldep, grp.name, adj.name, subject, df, method='B', LRT, ...))
if (method == 'QB') error <- try(obj <- perform_differential_analysis_para_single(taxon.abund, ldep, grp.name, adj.name, subject, df, method='QB', LRT, ...))
######## Overdispersed Poisson regression #########
if (method == 'OP') error <- try(obj <- perform_differential_analysis_para_single(taxon.abund, ldep, grp.name, adj.name, subject, df, method='OP', LRT, ...))
######## Negative binomial regression #########
if (method == 'NB') error <- try(obj <- perform_differential_analysis_para_single(taxon.abund, ldep, grp.name, adj.name, subject, df, method='NB', LRT, ...))
######## Zeroinflated negbinomial regression 1 ########
if (method == 'ZINB') error <- try(obj <- perform_differential_analysis_para_single(taxon.abund, ldep, grp.name, adj.name, subject, df, method=ZINB, LRT, ...))
# Adpative 0 selects OP and QB based on the zero proportion (Not optimal)
if (method == 'Adaptive0') {
temp <- mean(as.numeric(taxon.abund != 0))
if (temp > zerop.cutoff) {
error <- try(obj <- perform_differential_analysis_para_single(taxon.abund, ldep, grp.name, adj.name, subject, df, method='QB', LRT, ...))
} else {
error <- try(obj <- perform_differential_analysis_para_single(taxon.abund, ldep, grp.name, adj.name, subject, df, method='OP', LRT, ...))
}
}
# Adpative 1 selects NB and ZIB based on AIC
if (method == 'Adaptive1') {
error1 <- try(obj1 <- perform_differential_analysis_para_single(taxon.abund, ldep, grp.name, adj.name, subject, df, method='NB', LRT, ...))
error2 <- try(obj2 <- perform_differential_analysis_para_single(taxon.abund, ldep, grp.name, adj.name, subject, df, method=ZINB, LRT, ...))
if (class(error1) != 'try-error' & class(error2) != 'try-error') {
if (obj1$aic < obj2$aic) {
obj <- obj1
} else {
obj <- obj2
}
error <- error1
} else {
# pv == 0 indicates some problems in fitting
if (class(error1) != 'try-error' & obj1$pv != 0) {
obj <- obj1
error <- error1
} else {
if (class(error2) != 'try-error' & obj1$pv != 0) {
obj <- obj2
error <- error2
} else {
error <- error2
}
}
}
}
# Adaptive 2 starts with NB model, if it fails, it switches ZINB
if (method == 'Adaptive2') {
error1 <- try(obj1 <- perform_differential_analysis_para_single(taxon.abund, ldep, grp.name, adj.name, subject, df, method='NB', LRT, ...))
if (class(error1) == 'try-error' | obj1$pv == 0) {
error2 <- try(obj2 <- perform_differential_analysis_para_single(taxon.abund, ldep, grp.name, adj.name, subject, df, method=ZINB, LRT, ...))
if (class(error2) != 'try-error') {
obj <- obj2
error <- error2
} else {
error <- error2
}
} else {
error <- error1
obj <- obj1
}
}
# Random Effects model
# ZINB, B, NB, Adpative1 is implemented based on glmmADMB
# Set P value NA for those not makes sense
if (class(error) == "try-error" | abs(obj$lfc) > 100) {
obj$pv <- obj$lfc <- obj$lfc.lci <- obj$lfc.uci <- obj$method <- NA
}
pv.vec <- rbind(pv.vec, obj$pv)
fc.vec <- rbind(fc.vec, obj$lfc)
fc.lc.vec <- rbind(fc.lc.vec, obj$lfc.lci)
fc.uc.vec <- rbind(fc.uc.vec, obj$lfc.uci)
met.vec <- rbind(met.vec, obj$method)
}
cat('\n')
qv.vec <- matrix(p.adjust(pv.vec[, 1], 'fdr'), ncol=1)
rownames(pv.vec) <- rownames(qv.vec) <- rownames(fc.vec) <- rownames(fc.uc.vec) <- rownames(fc.lc.vec) <- rownames(met.vec) <- rownames(prop)
colnames(pv.vec) <- 'Pvalue'
colnames(qv.vec) <- 'Qvalue'
colnames(met.vec) <- 'Method'
pv.list[[LOI]] <- pv.vec
qv.list[[LOI]] <- qv.vec
fc.list[[LOI]] <- fc.vec
fc.lc.list[[LOI]] <- fc.lc.vec
fc.uc.list[[LOI]] <- fc.uc.vec
met.list[[LOI]] <- met.vec
res <- cbind(pv.vec, qv.vec, fc.vec, fc.lc.vec, fc.uc.vec, met.vec)
rownames(res) <- rownames(prop)
#write.csv(res, paste0("Taxa_DifferentialAbundanceAnalysis_", LOI, "_", ann, ".csv"))
if (mt.method == 'fdr') {
res.final <- rbind(res.final, res[as.numeric(res[, 'Qvalue']) <= cutoff, , drop=F])
}
if (mt.method == 'raw') {
res.final <- rbind(res.final, res[ as.numeric(res[, 'Pvalue']) <= cutoff, , drop=F])
}
}
if (!is.null(res.final)) {
colnames(res.final) <- colnames(res)
res.final <- res.final[rowSums(is.na(res.final)) == 0, , drop=F]
#write.csv(res.final, paste0("Taxa_DifferentialAbundanceAnalysis_AllLevels_", mt.method, '_', cutoff, "_", ann, ".csv"))
}
return(list(pv.list=pv.list, qv.list=qv.list, fc.list=fc.list, fc.uc.list=fc.uc.list, fc.lc.list=fc.lc.list, met.list=met.list))
}
# =================================================== #
# perform_differential_analysis_para_FE()
# =================================================== #
# The following function performs the taxonomic differential analysis
# this is a monster of a function that does a LOT with lots of options.
#
perform_differential_analysis_para_single_FE <- function (taxon.abund, ldep, grp.name, adj.name=NULL, subject=NULL, df, method='NB', LRT=FALSE) {
# ldep: log depth (size factor)
if (!is.null(adj.name)) {
if (sum(grepl(grp.name, c(adj.name)))) {
stop('grp.name could not be part of adj.name or subject, or there will be problem!\n')
}
}
if (!is.null(subject)) {
warnings('Fixed effects model will ignore the subject variable! Please use randome effects model!\n')
}
if (LRT & method == 'OP') warning('Overdispersed Poisson does not support LRT! Wald test used!\n')
if (is.null(adj.name)) {
grp.name.adj.name <- grp.name
} else {
grp.name.adj.name <- paste(grp.name, '+', adj.name)
}
if (method == 'NB') {
m1.nb <- glm.nb(as.formula(paste('taxon.abund ~', grp.name.adj.name, '+ offset(ldep)')), data = df)
if (LRT) {
m0.nb <- update(m1.nb, as.formula(paste('. ~ . -', grp.name)))
code <- list(m1.conv=m1.nb$converged, m1.bound=m1.nb$boundary, m0.conv=m0.nb$converged, m0.bound=m0.nb$boundary)
pv.nb <- anova(m1.nb, m0.nb)['Pr(Chi)'][2, ]
method <- paste(method, 'LRT')
} else {
code <- list(m1.conv=m1.nb$converged, m1.bound=m1.nb$boundary)
pv.nb <- wald.test(b = coef(m1.nb), Sigma = vcov(m1.nb), Terms = grep(grp.name, names(coef(m1.nb))))$result$chi2['P']
method <- paste(method, 'Wald')
}
aic.nb <- summary(m1.nb)$aic
coef.nb <- coef(m1.nb)
fc.nb <- coef.nb[grep(grp.name, names(coef.nb))]
ci.nb <- confint.default(m1.nb)
obj <- ci.nb[grep(grp.name, rownames(ci.nb)), ]
if (is.vector(obj)) {
fc.lc.nb <- obj[1]
fc.uc.nb <- obj[2]
} else {
fc.lc.nb <- obj[, 1]
fc.uc.nb <- obj[, 2]
names(fc.lc.nb) <- paste(names(fc.lc.nb), '2.5%')
names(fc.uc.nb) <- paste(names(fc.uc.nb), '97.5%')
}
return(list(method=method, pv=pv.nb, lfc=fc.nb, lfc.lci=fc.lc.nb, lfc.uci=fc.uc.nb, aic=aic.nb, code=code))
}
if (method == 'B') {
taxon.abund2 <- as.numeric(taxon.abund != 0)
m1.b <- glm(as.formula(paste('taxon.abund2 ~', grp.name.adj.name, '+ ldep')), data = df, family=binomial)
if (LRT) {
m0.b <- update(m1.b, as.formula(paste('. ~ . -', grp.name)))
code <- list(m1.conv=m1.b$converged, m1.bound=m1.b$boundary, m0.conv=m0.b$converged, m0.bound=m0.b$boundary)
pv.b <- pchisq(2 * (logLik(m1.b) - logLik(m0.b)), df = df.residual(m0.b) - df.residual(m1.b), lower.tail=FALSE)
method <- paste(method, 'LRT')
} else {
code <- list(m1.conv=m1.b$converged, m1.bound=m1.b$boundary)
pv.b <- wald.test(b = coef(m1.b), Sigma = vcov(m1.b), Terms = grep(grp.name, names(coef(m1.b))))$result$chi2['P']
method <- paste(method, 'Wald')
}
aic.b <- summary(m1.b)$aic
coef.b <- coef(m1.b)
fc.b <- coef.b[grep(grp.name, names(coef.b))]
ci.b <- confint.default(m1.b)
obj <- ci.b[grep(grp.name, rownames(ci.b)), ]
if (is.vector(obj)) {
fc.lc.b <- obj[1]
fc.uc.b <- obj[2]
} else {
fc.lc.b <- obj[, 1]
fc.uc.b <- obj[, 2]
names(fc.lc.b) <- paste(names(fc.lc.b), '2.5%')
names(fc.uc.b) <- paste(names(fc.uc.b), '97.5%')
}
return(list(method=method, pv=pv.b, lfc=fc.b, lfc.lci=fc.lc.b, lfc.uci=fc.uc.b, aic=aic.b, code=code))
}
# Rev: 2016_09_13 add 'QB', No likelihood ratio test
if (method == 'QB') {
taxon.abund2 <- as.numeric(taxon.abund != 0)
m1.b <- glm(as.formula(paste('taxon.abund2 ~', grp.name.adj.name, '+ ldep')), data = df, family=quasibinomial)
code <- list(m1.conv=m1.b$converged, m1.bound=m1.b$boundary)
pv.b <- wald.test(b = coef(m1.b), Sigma = vcov(m1.b), Terms = grep(grp.name, names(coef(m1.b))))$result$chi2['P']
method <- paste(method, 'Wald')
aic.b <- summary(m1.b)$aic
coef.b <- coef(m1.b)
fc.b <- coef.b[grep(grp.name, names(coef.b))]
ci.b <- confint.default(m1.b)
obj <- ci.b[grep(grp.name, rownames(ci.b)), ]
if (is.vector(obj)) {
fc.lc.b <- obj[1]
fc.uc.b <- obj[2]
} else {
fc.lc.b <- obj[, 1]
fc.uc.b <- obj[, 2]
names(fc.lc.b) <- paste(names(fc.lc.b), '2.5%')
names(fc.uc.b) <- paste(names(fc.uc.b), '97.5%')
}
return(list(method=method, pv=pv.b, lfc=fc.b, lfc.lci=fc.lc.b, lfc.uci=fc.uc.b, aic=aic.b, code=code))
}
if (method == 'OP') {
# No LRT
m1.op <- glm(as.formula(paste('taxon.abund ~', grp.name.adj.name)), offset=ldep, data = df, family=quasipoisson)
code <- list(m1.conv=m1.op$converged, m1.bound=m1.op$boundary)
# pv.op <- pchisq(2 * (logLik(m1.op) - logLik(m0.op)), df = df.residual(m0.op) - df.residual(m1.op), lower.tail=FALSE) # LRT not applicable
coef.op <- coef(m1.op)
pv.op <- wald.test(b = coef.op, Sigma = vcov(m1.op), Terms = grep(grp.name, names(coef.op)))$result$chi2['P']
method <- paste(method, 'Wald')
fc.op <- coef.op[grep(grp.name, names(coef.op))]
ci.op <- confint.default(m1.op)
obj <- ci.op[grep(grp.name, rownames(ci.op)), ]
if (is.vector(obj)) {
fc.lc.op <- obj[1]
fc.uc.op <- obj[2]
} else {
fc.lc.op <- obj[, 1]
fc.uc.op <- obj[, 2]
names(fc.lc.op) <- paste(names(fc.lc.op), '2.5%')
names(fc.uc.op) <- paste(names(fc.uc.op), '97.5%')
}
return(list(method=method, pv=pv.op, lfc=fc.op, lfc.lci=fc.lc.op, lfc.uci=fc.uc.op, aic=NULL, code=code))
}
if (method == 'ZINB0') {
m1.zinb <- zeroinfl(as.formula(paste('taxon.abund ~', grp.name.adj.name, '+ offset(ldep)')),
data = df, dist = "negbin", EM = TRUE)
if (LRT) {
if (is.null(adj.name)) {
m0.zinb <- zeroinfl(as.formula(paste('taxon.abund ~ offset(ldep)')),
data = df, dist = "negbin", EM = TRUE)
} else {
m0.zinb <- zeroinfl(as.formula(paste('taxon.abund ~', adj.name, '+ offset(ldep)')),
data = df, dist = "negbin", EM = TRUE)
}
code <- list(m1.conv=m1.zinb$converged, m0.conv=m0.zinb$converged)
# LRT
pv.zinb <- pchisq(2 * (logLik(m1.zinb) - logLik(m0.zinb)), df = df.residual(m0.zinb) - df.residual(m1.zinb), lower.tail=FALSE)
method <- paste(method, 'LRT')
} else {
code <- list(m1.conv=m1.zinb$converged)
pv.zinb <- wald.test(b = coef(m1.zinb), Sigma = vcov(m1.zinb), Terms = grep(grp.name, names(coef(m1.zinb))))$result$chi2['P']
method <- paste(method, 'Wald')
}
aic.zinb <- -2 * logLik(m1.zinb) + 2 * (m1.zinb$n - m1.zinb$df.residual)
coef.zinb <- coef(m1.zinb)
fc.zinb <- coef.zinb[grep(grp.name, names(coef.zinb))]
ci.zinb <- confint.default(m1.zinb)
obj <- ci.zinb[grep(grp.name, rownames(ci.zinb)), ]
if (is.vector(obj)) {
fc.lc.zinb <- obj[1]
fc.uc.zinb <- obj[2]
} else {
fc.lc.zinb <- obj[, 1]
fc.uc.zinb <- obj[, 2]
names(fc.lc.zinb) <- paste(names(fc.lc.zinb), '2.5%')
names(fc.uc.zinb) <- paste(names(fc.uc.zinb), '97.5%')
}
return(list(method=method, pv=pv.zinb, lfc=fc.zinb, lfc.lci=fc.lc.zinb, lfc.uci=fc.uc.zinb, aic=aic.zinb, code=code))
}
if (method == 'ZINB1') {
m1.zinb <- zeroinfl(as.formula(paste('taxon.abund ~', grp.name.adj.name, '+ offset(ldep) | ldep')),
data = df, dist = "negbin", EM = TRUE)
if (LRT) {
if (is.null(adj.name)) {
m0.zinb <- zeroinfl(as.formula(paste('taxon.abund ~ offset(ldep) | ldep')),
data = df, dist = "negbin", EM = TRUE)
} else {
m0.zinb <- zeroinfl(as.formula(paste('taxon.abund ~', adj.name, '+ offset(ldep) | ldep')),
data = df, dist = "negbin", EM = TRUE)
}
code <- list(m1.conv=m1.zinb$converged, m0.conv=m0.zinb$converged)
# LRT
pv.zinb <- pchisq(2 * (logLik(m1.zinb) - logLik(m0.zinb)), df = df.residual(m0.zinb) - df.residual(m1.zinb), lower.tail=FALSE)
method <- paste(method, 'LRT')
} else {
code <- list(m1.conv=m1.zinb$converged)
pv.zinb <- wald.test(b = coef(m1.zinb), Sigma = vcov(m1.zinb), Terms = grep(grp.name, names(coef(m1.zinb))))$result$chi2['P']
method <- paste(method, 'Wald')
}
aic.zinb <- -2 * logLik(m1.zinb) + 2 * (m1.zinb$n - m1.zinb$df.residual)
coef.zinb <- coef(m1.zinb)
fc.zinb <- coef.zinb[grep(grp.name, names(coef.zinb))]
ci.zinb <- confint.default(m1.zinb)
obj <- ci.zinb[grep(grp.name, rownames(ci.zinb)), ]
if (is.vector(obj)) {
fc.lc.zinb <- obj[1]
fc.uc.zinb <- obj[2]
} else {
fc.lc.zinb <- obj[, 1]
fc.uc.zinb <- obj[, 2]
names(fc.lc.zinb) <- paste(names(fc.lc.zinb), '2.5%')
names(fc.uc.zinb) <- paste(names(fc.uc.zinb), '97.5%')
}
return(list(method=method, pv=pv.zinb, lfc=fc.zinb, lfc.lci=fc.lc.zinb, lfc.uci=fc.uc.zinb, aic=aic.zinb, code=code))
}
if (method == 'ZINB2') {
m2.zinb <- zeroinfl(as.formula(paste('taxon.abund ~', grp.name.adj.name, '+ offset(ldep) |', grp.name.adj.name, '+ ldep')),
data = df, dist = "negbin", EM = TRUE)
if (LRT) {
if (is.null(adj.name)) {
m0.zinb <- zeroinfl(as.formula(paste('taxon.abund ~ offset(ldep) | ldep')),
data = df, dist = "negbin", EM = TRUE)
} else {
m0.zinb <- zeroinfl(as.formula(paste('taxon.abund ~', adj.name, '+ offset(ldep) |', adj.name, ' + ldep')),
data = df, dist = "negbin", EM = TRUE)
}
code <- list(m1.conv=m2.zinb$converged, m0.conv=m0.zinb$converged)
# LRT
pv2.zinb <- pchisq(2 * (logLik(m2.zinb) - logLik(m0.zinb)), df = df.residual(m0.zinb) - df.residual(m2.zinb), lower.tail=FALSE)
method <- paste(method, 'LRT')
} else {
code <- list(m2.conv=m2.zinb$converged)
pv2.zinb <- wald.test(b = coef(m2.zinb), Sigma = vcov(m2.zinb), Terms = grep(grp.name, names(coef(m2.zinb))))$result$chi2['P']
method <- paste(method, 'Wald')
}
aic2.zinb <- -2 * logLik(m2.zinb) + 2 * (m2.zinb$n - m2.zinb$df.residual)
coef.zinb <- coef(m2.zinb)
fc2.zinb <- coef.zinb[grep(grp.name, names(coef.zinb))]
ci.zinb <- confint.default(m2.zinb)
obj <- ci.zinb[grep(grp.name, rownames(ci.zinb)), ]
if (is.vector(obj)) {
fc2.lc.zinb <- obj[1]
fc2.uc.zinb <- obj[2]
} else {
fc2.lc.zinb <- obj[, 1]
fc2.uc.zinb <- obj[, 2]
names(fc2.lc.zinb) <- paste(names(fc2.lc.zinb), '2.5%')
names(fc2.uc.zinb) <- paste(names(fc2.uc.zinb), '97.5%')
}
return(list(method=method, pv=pv2.zinb, lfc=fc2.zinb, lfc.lci=fc2.lc.zinb, lfc.uci=fc2.uc.zinb, aic=aic2.zinb, code=code))
}
}
# =================================================== #
# abundance_list_create()
# =================================================== #
# this function is designed to help transform the raw data into the
# form usable in the Jun Chen functions above.
# mydata is an object of raw counts and a grouping variable
# group.Var is a grouping variable.
#
# return a matrix of the observed counts in the groups
# across all individuals.
abundance_list_create <- function(mydata,Group.Var)
{
N <- ncol(mydata) - 1
output <- matrix(nrow=length(unique(Group.Var)), ncol = N)
i <- 1
for(i in 1:N){
person.counts <- aggregate(mydata[,(1+i)], by=list(Group.Var), FUN=sum)
output[,i] <- person.counts[,2]
}
rownames(output) <- person.counts[,1]
colnames(output) <- colnames(mydata)[2:(N+1)]
return(output)
}
|
ac157c65e4ffd20028c82d2554858ea9b09b726b
|
233711a9c97ed63ac7fccbdbc896890b01784d03
|
/PrevisaoMacro/ipca-sarima.R
|
18eb55ea9bc09e3efd1b0d6aa2e7664b50130c4a
|
[] |
no_license
|
econoquant/EconoQuantCode
|
d092f3efa226c0a7b5bfd6d8620f2cfb303c4d37
|
35d0fba8fd2d8afae65815da4283be0a8c88502b
|
refs/heads/master
| 2020-12-02T18:01:13.243372
| 2017-07-12T13:26:32
| 2017-07-12T13:26:32
| 96,462,158
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,783
|
r
|
ipca-sarima.R
|
################### Carregar dados #######################################
ipca <- read.csv('ipca.csv',header = T, sep = ';', dec = ',')
ipca <- ts(ipca[,2], start = c(1980,1), freq = 12)
##################### Selecionar subamostra ###############################
library(changepoint)
library(ggfortify)
autoplot(cpt.meanvar(ipca), main='Variação mensal do IPCA (%)')+
scale_x_date(date_breaks = '1 year',date_labels = "%b %y")
ipca <- window(ipca, c(1995,1), freq =12)
ipca <- window(ipca, c(2004,1), freq =12)
ipca <- window(ipca, c(2007,2), freq =12)
train <- window(ipca, end = end(ipca) - c(1,0))
test <- window(ipca, start = end(train) + c(0,1))
##################### SARIMA e analise residuos ###########################
library(forecast)
fit_sarima <- auto.arima(train, seasonal = T)
summary(fit_sarima)
ggtsdisplay(residuals(fit_sarima))
ggAcf(residuals(fit_sarima), main = "Autocorrelação resíduos")
# Ljung Box: H0 resíduos sao iid
Box.test(residuals(fit_sarima), lag=24, fitdf=length(coef(fit_sarima)),
type="Ljung")
############### Avaliando a previsao #####################################
# 12 passoas a frente (dinamica)
fcast.fit_sarima <- forecast(fit_sarima, h=length(test))$mean
accuracy(fcast.fit_sarima, test)
# 1 passoa a frente (estatica)
fit <- Arima(test, model = fit_sarima)
accuracy(fit)
############### Previsão ###############################################
onestep <- fitted(fit) # valores previsao um passo a frente
plot(forecast(fit_sarima, h=12),xlab='', ylab='(% a.m.)', bty='l',
main='IPCA Mensal')
lines(test, col='black', lwd=2)
lines(onestep, col='red', lwd=2)
legend('topleft', col=c('blue','red'), lty=c(1,1), lwd=c(2,2),
legend=c('12 meses', '1 mês'))
|
715324680f72139831abc888b1aa7f1a66577d1a
|
d6c9f897714cea47c9b74547dd268462efd971b6
|
/Classifier/src/Classifier.R
|
8cf4592e0a246aa71e5bf929d106feb69fa8dd9d
|
[] |
no_license
|
SilambarasanM/Data-Preparation-and-Analysis
|
dc883ba9d556f81150dd1dda6758b9fef3063991
|
dba14b6e07cd31319d95bc70226283d1f6dd3b7a
|
refs/heads/master
| 2021-01-18T21:25:16.695758
| 2016-05-16T08:42:11
| 2016-05-16T08:42:11
| 52,257,666
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,531
|
r
|
Classifier.R
|
#Make sure system is connected to internet while running this code to install libraries
#Install and load the required R libraries
if (!require("plyr")) {
install.packages("plyr", dependencies = TRUE)
library(plyr)
}
if (!require("ggmap")) {
install.packages("ggmap", dependencies = TRUE)
library(ggmap)
}
if (!require("gplots")) {
install.packages("gplots", dependencies = TRUE)
library(gplots)
}
if (!require("rgdal")) {
install.packages("rgdal", dependencies = TRUE)
library(rgdal)
}
if (!require("clusterSim")) {
install.packages("clusterSim", dependencies = TRUE)
library(rgdal)
}
#Loading Source Files
path = "C:\\Users\\admin\\Documents\\R Scripts\\Classifier"
setwd(path)
source("getGroceryCount.R")
source("getExpectancy.R")
source("getCrimeCount.R")
source("getHousingData.R")
source("getHardshipIndex.R")
grocery_count<-getGroceryCount("GroceryStores_2013.csv", path)
life_expectancy<-getExpectancy("Life_Expectancy_2000.csv", path)
crime_count<-getCrimeCount("Crimes_2015.csv", path)
housing_data<-getHousingData("Affordable_Rental_Housing_Developments.csv", path)
hardship_index<-getHardshipIndex("SocioEconomicData_2008_2012.csv", path)
#Loading Community Areas Information
setwd("C:\\Users\\admin\\Documents\\R Scripts\\Classifier\\Community_Areas")
area_data<-read.csv("community_areas.csv", header=TRUE, stringsAsFactor=FALSE)
area_data<-area_data[,c("AREA_NUMBE","COMMUNITY")]
colnames(area_data)<-c("Community.Area","Community.Name")
#Filling Missing values
community<-unique(area_data$Community.Area)
for(i in 1: length(community)) {
if(!community[i] %in% grocery_count$Community.Area) {
gcount <- data.frame(Community.Area = community[i], Grocery.Count = 0)
grocery_count <- rbind(grocery_count, gcount)
}
if(!community[i] %in% housing_data$Community.Area) {
hd <- data.frame(Community.Area = community[i], Housing.Units = 0)
housing_data <- rbind(housing_data, hd)
}
}
grocery_count<-grocery_count[ order(grocery_count$Community.Area), ]
housing_data<-housing_data[ order(housing_data$Community.Area), ]
#Loading Community Areas Map Info
sfn <- readOGR(".","community_areas", stringsAsFactors=FALSE)
sfn<-spTransform(sfn, CRS("+proj=longlat +datum=WGS84"))
ids<-sapply(slot(sfn, "polygons"), function(x) slot(x, "ID"))
can<-sfn$area_numbe
lookup<-do.call(rbind, Map(data.frame, Community.Area=can, id=ids))
lookup$Community.Area<-as.numeric(levels(lookup$Community.Area))[lookup$Community.Area]
lookup$id<-as.numeric(levels(lookup$id))[lookup$id]
lookup<-lookup[order(lookup$Community.Area),]
sfn<-fortify(sfn)
#Merging all data into one data frame - Profiling each Community Area
community_profile<- data.frame(grocery_count$Community.Area, grocery_count$Grocery.Count, life_expectancy$Life.Expectancy, crime_count$Crime.Count, housing_data$Housing.Units, hardship_index$Hardship.Index)
colnames(community_profile)<-c("Community.Area", "Grocery.Count", "Life.Expectancy", "Crime.Count", "Housing.Units", "Hardship.Index")
normalized_data<-community_profile[,-1]
normalized_data<-data.Normalization(normalized_data, type="n10", normalization="column")
data_matrix<-data.matrix(normalized_data)
data_matrix <- t(data_matrix)
data_matrix <- data_matrix * 100
barplot(data_matrix, main="Features of Community Areas", xlab="Community Areas", ylab="Percentage", col=rainbow(5), legend = rownames(data_matrix))
#Assigning correct Polygon IDs to the Community Areas
grocery_count$id<-"0"
life_expectancy$id<-"0"
crime_count$id<-"0"
housing_data$id<-"0"
hardship_index$id<-"0"
community_profile$id<-"0"
for (i in 1: length(community)){
grocery_count$id[i] <- lookup$id[i]
life_expectancy$id[i] <- lookup$id[i]
crime_count$id[i] <- lookup$id[i]
housing_data$id[i] <- lookup$id[i]
hardship_index$id[i] <- lookup$id[i]
community_profile$id[i] <- lookup$id[i]
}
#Merging Community areas to corresponding Community Map info
grocery_map_data<-merge(sfn, grocery_count, by=c("id"))
expectancy_map_data<-merge(sfn, life_expectancy, by=c("id"))
crime_map_data<-merge(sfn, crime_count, by=c("id"))
housing_map_data<-merge(sfn, housing_data, by=c("id"))
hardship_map_data<-merge(sfn, hardship_index, by=c("id"))
#Loading Chicago Map
chicago <- get_map(location = 'chicago', zoom = 'auto', maptype="roadmap")
ggmap(chicago) + geom_polygon(aes(x = long, y = lat, group=id, fill=Grocery.Count),data = grocery_map_data, color ="black",alpha = .7, size = .2) + labs(title="Grocery Store Density by Community Areas (2013)") + scale_fill_gradient(high = "#56B1F7", low = "white")
dev.new()
ggmap(chicago) + geom_polygon(aes(x = long, y = lat, group=id, fill=Life.Expectancy),data = expectancy_map_data, color ="black",alpha = .7, size = .2) + labs(title="Life Expectancy by Community Areas (2010)") + scale_fill_gradient(high = "#56B1F7", low = "#132B43")
dev.new()
ggmap(chicago) + geom_polygon(aes(x = long, y = lat, group=id, fill=Crime.Count),data = crime_map_data, color ="black",alpha = .7, size = .2) + labs(title="Crime Incidents by Community Areas (2015)") + scale_fill_gradient(high = "#132B43", low = "#56B1F7")
dev.new()
ggmap(chicago) + geom_polygon(aes(x = long, y = lat, group=id, fill=Housing.Units),data = housing_map_data, color ="black",alpha = .7, size = .2) + labs(title="Affordable Housing Developments by Community Areas (2013)") + scale_fill_gradient(high = "#132B43", low = "#56B1F7")
dev.new()
ggmap(chicago) + geom_polygon(aes(x = long, y = lat, group=id, fill=Hardship.Index),data = hardship_map_data, color ="black",alpha = .7, size = .2) + labs(title="Hardship Index by Community Areas (2008-2012)") + scale_fill_gradient(high = "#132B43", low = "#56B1F7")
#Classification Rules
community_profile$class<-0
for (i in 1:nrow(community_profile)){
if (community_profile$Hardship.Index[i] < 25){
if((community_profile$Life.Expectancy[i] > 77.6) | ((community_profile$Grocery.Count[i] > 10) | (community_profile$Crime.Count[i] <= 1315))){
community_profile$class[i] = 1
}
}
if (community_profile$class[i] == 0){
if ((community_profile$Hardship.Index[i] <=50) & ((community_profile$Housing.Units[i] >350) | (community_profile$Crime.Count[i] > 3383) | (community_profile$Life.Expectancy[i] > 77.6))){
community_profile$class[i] = 2
}
}
if (community_profile$class[i] == 0){
if ((community_profile$Hardship.Index[i] <=50) & (community_profile$Crime.Count[i] <=3383)){
community_profile$class[i] = 3
}
else{
if ((community_profile$Hardship.Index[i] <=75) & ((community_profile$Housing.Units[i] >350) | (community_profile$Crime.Count[i] > 3383) | (community_profile$Life.Expectancy[i] > 77.6))){
community_profile$class[i] = 3
}
}
}
if (community_profile$class[i] == 0){
if ((community_profile$Hardship.Index[i] <=75) & (community_profile$Crime.Count[i] <=3383)){
community_profile$class[i] = 4
}
else{
if((community_profile$Hardship.Index[i] > 75) & ((community_profile$Housing.Units[i] >350) | (community_profile$Crime.Count[i] > 3383) | (community_profile$Life.Expectancy[i] > 77.6))){
community_profile$class[i] = 4
}
}
}
if (community_profile$class[i] == 0)
community_profile$class[i] = 5
}
#Plotting the Classifications on the Chicago Map
community_profile<-merge(sfn, community_profile, by=c("id"))
dev.new()
ggmap(chicago) +
geom_polygon(aes(x = long, y = lat, group=id, fill=class),data = community_profile, color ="black",alpha = .7, size = .2) +
labs(title="Community Areas Classification") +
scale_fill_gradient(high = "#132B43", low = "#56B1F7")
|
83d5723d47e4820dbb5d33bd01b7242c30a37b72
|
ca3fbf9bcf0349b35471e75344e95b5c92cdc2be
|
/plot1.R
|
56ae2d49388688c0349c65883868d023be8a9d1e
|
[] |
no_license
|
AntoninPrunet/ExData_Plotting1
|
198642bdf789a278dd10129bb9f8cccf63c9fbe3
|
3f41d5a229f6a4a766f344017809fe8df22869b7
|
refs/heads/master
| 2022-05-22T00:28:47.652991
| 2020-04-28T16:17:01
| 2020-04-28T16:17:01
| 259,387,287
| 0
| 0
| null | 2020-04-27T16:21:23
| 2020-04-27T16:21:22
| null |
UTF-8
|
R
| false
| false
| 657
|
r
|
plot1.R
|
if (!file.exists("household_power_consumption.txt")) {
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile = "Household Power Consumption")
unzip("Household Power Consumption")
}
library(data.table)
library(lubridate)
library(dplyr)
x<-fread("household_power_consumption.txt")
x$Date<-dmy(x$Date)
x<-filter(x,Date=="2007-02-01"|Date=="2007-02-02")
x$Global_active_power<-as.numeric(x$Global_active_power)
png("plot1.png",width=480, height=480)
hist(x$Global_active_power,main="Global Active Power",
col="red",xlab="Global Active Power (kilowatts)",
ylim=c(0,1200))
dev.off()
|
0367e75f3c900e317a3aab1cc017b7f84281c9c2
|
23a0e63a84671fd7304cfae5cb8002e34d310a14
|
/references_cleane.R
|
0d10bc9112b8a1bc15c2efed835cff18b843cc25
|
[] |
no_license
|
rxdavim/bibtex-cleaneR
|
35d1a91c7028ad4fc514e227bd89dd38d52dece2
|
1f3ecd91e433f2b70168f06574500c77cd3b387f
|
refs/heads/main
| 2023-08-10T00:54:35.740059
| 2021-09-06T09:40:24
| 2021-09-06T09:40:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,749
|
r
|
references_cleane.R
|
library(fulltext)
library(bib2df)
library(bibtex)
library(RefManageR)
# library(readtext)
library(doi2bib)
library(dplyr)
library(curl)
library(stringr)
library(RecordLinkage)
library(rcrossref)
library(aRxiv)
library(foreach)
library(doParallel)
library(doSNOW)
getAbstractFromDOI <- function(doi){
cat("\nGetting abstract for DOI: ", doi)
tryCatch({
abstr <- cr_abstract(doi =doi)
return(abstr)
},
error = function(e){
cat("\nError getting abstract for doi: ", paste0(e))
return(NULL)
},
warning = function(e){
cat("\nWarning getting abstract for doi: ", paste0(e))
return(NULL)
})
return(NULL)
}
getCitationFromDOI <- function(doi, style="acm", locale="en-US"){
cat("\nGetting citation for DOI: ", doi, "\tStyle: ", style, "\tLocale: ", locale)
tryCatch({
tmp_file <- tempfile(fileext = ".bib")
cr_cn(dois = doi, format = "bibtex", style=style, locale="en-US") %>%
write(file = tmp_file, append = FALSE)
new_citation <- ReadBib(tmp_file, check=FALSE)
return(new_citation)
},
error = function(e){
cat("\nError getting citation for doi: ", paste0(e))
return(NULL)
},
warning = function(e){
cat("\nWarning getting citation for doi: ", paste0(e))
return(NULL)
})
return(NULL)
}
getTitleSimilarity <- function(old_title, new_title, sim_threshold=0.8){
sim <- -1
tryCatch({
clean_old <- tolower(gsub("[^0-9A-Za-z ]","" ,old_title, ignore.case = TRUE))
clean_new <- tolower(gsub("[^0-9A-Za-z ]","" ,new_title, ignore.case = TRUE))
sim <- levenshteinSim(clean_new, clean_old)
if(sim >= sim_threshold){
cat("\n- Same title: YES! (Similarity: ", sim, " >= ", sim_threshold, "\n")
return(TRUE)
}
else if(grepl(clean_new, clean_old, fixed = FALSE)){
cat("\n- Same title: YES! (New title included fully in old title)\n")
return(TRUE)
}
}, error = function(e){
cat("\n- Same title: ERROR, ", paste0(e), "\n")
return(FALSE)
})
cat("\n- Same title: NO! (Similarity: ", sim, " >= ", sim_threshold, "\n")
return(FALSE)
}
getReferenceListData <- function(bibentry){
list_data <- list()
if(class(bibentry)[1] != "list"){
bibentry <- unclass(bibentry)[[1]]
}
fields <- names(bibentry)
for(field in fields){
field_txt <- tolower(paste0(field))
data <- paste0(bibentry[[field]], collapse =" and ")
data <- str_replace(data, "%2F", "/")
list_data[[field]] <- data
}
list_data[["bibtype"]] <- attr(bibentry, "bibtype")
list_data[["bibkey"]] <- attr(bibentry, "key")
return(list_data)
}
getReferenceString <- function(bibentry, bibtype, bibkey){
if(tolower(class(bibentry)[1] == "bibentry")){
bibentry <- unclass(bibentry)[[1]]
}
fields <- names(bibentry)
attr_data <- ""
fields <- fields[fields != "r_updated"]
fields <- c(fields, "r_updated")
for(field in fields){
field_txt <- tolower(paste0(field))
data <- bibentry[[field]]
if(!is.na(data) && str_length(data) > 0){
data <- paste0(bibentry[[field]], collapse =" and ")
data <- str_replace(data, "%2F", "/")
tabs_n <- 2
if(str_length(field_txt) <= 3){
tabs_n <- tabs_n+1
}
if(str_length(field_txt) >= 9){
tabs_n <- tabs_n - 1
}
attr_data <- paste0(attr_data, " ",field_txt,paste0(rep("\t", tabs_n), collapse=""), "= {",data, "},\n")
}
}
ref_str <- paste0("@", bibtype, "{", bibkey, ",\n", attr_data, "}")
return(ref_str)
}
mergeReferencesClass <- function(old_bib, new_bib, upd_bibkey=FALSE, upd_title=FALSE, upd_authors=TRUE, verbose=FALSE){
fields <- unique(RefManageR::fields(new_bib)[[1]])
# fields <- unique(c(RefManageR::fields(old_bib)[[1]], RefManageR::fields(new_bib)[[1]]))
old_bib_unclassed <- unclass(old_bib)[[1]]
new_bib <- unclass(new_bib)[[1]]
for(field in fields){
if(field == "title" && !upd_title || field == "author" && !upd_authors){
cat("\n--Not Updating ", field)
next
}
if(verbose){
cat("Updating field", field, "\tOld: ", paste0(old_bib_unclassed[[field]]), "\tNew: ", paste0(new_bib[[field]]), "\n")
}
if(!is.null(new_bib[[field]]) && !is.na(new_bib[[field]])){
old_bib_unclassed[[field]] <- str_replace_all(new_bib[[field]], "[{|}]", "")
}
}
if(upd_bibkey){
cat("\n--Updating bibkey: ", attr(old_bib_unclassed, "key"), "=>", attr(new_bib, "key") )
attr(old_bib_unclassed, "key") <- attr(new_bib, "key")
}
return(old_bib_unclassed)
}
mergeReferencesDF <- function(old_bib, new_bib, upd_bibkey=FALSE, upd_title=FALSE, upd_authors=TRUE, verbose=TRUE){
for(field in colnames(new_bib)){
tryCatch({
if(field == "bibkey" && !upd_bibkey || field == "title" && !upd_title || field == "author" && !upd_authors){
cat("\n--Not Updating ", field)
next
}
if(!is.null(new_bib[[field]]) && !is.na(new_bib[[field]])){
replacement <- str_replace_all(new_bib[[field]], "[{|}]", "")
replacement <- str_replace_all(new_bib[[field]], "\textsinglequote", "\'")
if(verbose){
cat("\nUpdating field", field, "\t\'", paste0(old_bib[[field]]), "\' => \'", paste0(replacement), "\'")
}
old_bib[[field]] <- replacement
}
else{
cat("\nNOT updating field (new is empty)", field, "\t\'", paste0(old_bib[[field]]), "\' => \'", paste0(replacement), "\'")
}
}, error = function(e){
cat("\n--Error updating DF ", field, ": ", paste0(e))
})
}
return(old_bib)
}
cleanDoiUrl <- function(doi=NULL, url=NULL){
if(!is.null(doi))
doi_url <- doi
else
doi_url <- url
doi_url <- str_replace_all(doi_url,"[{|}]", "")
if(!is.null(doi_url) && length(doi_url) > 0){
if(grepl("doi", doi_url)){
last_slash_idx <- str_locate_all(doi_url,"/")[[1]][3]
doi_url <- substr(doi_url, last_slash_idx+1, str_length(doi_url))
return(doi_url)
}
}
if(!is.null(url))
stop("The URL is not a doi")
return(doi_url)
}
updateBibEntry <- function(bib_data, index, out_file, style="acm", upd_bibkey=FALSE, upd_title=FALSE, upd_author=TRUE, upd_abstract=FALSE, is_cluster=FALSE, wd=NA){
if(is_cluster){
if(!is.na(wd)){
setwd(wd)
}
source("references_cleane.R")
}
bib_entry <- bib_data[index]
bib_key <- bib_entry$bibkey
new_entry <- NULL
cat("\n----------- Exporting", index, "/", nrow(bib_data), ". ", bib_key, ": ", bib_entry$title, "-----------")
field <- "doi"
doi <- cleanDoiUrl(doi=bib_entry$doi)
title <- str_replace_all(bib_entry$title, "[{|}]", "")
if(is.na(doi) || is.null(doi) || length(doi) <= 0 ){
tryCatch({
field <- "url"
doi <- cleanDoiUrl(url=bib_entry$url)
}, error = function(e){
doi <- NULL
url <- NULL
})
}
if(is.na(doi) || is.null(doi) || length(doi) <= 0 ){
if(is.null(title) || length(title) <= 0 ){
cat(paste0("\nNo DOI and no TITLE found, skipping\n"))
}
else{
cat(paste0("\nNo DOI FOUND, looking for it on CrossRef and ARXIV, query: ", title, "\n"))
resCR <- cr_works(query = title, format = "text", limit=10) # https://docs.ropensci.org/rcrossref/reference/cr_works.html
# resPlos <- ft_search(query = title, from="plos")
resArxiv <- arxiv_search(query = noquote(paste0('ti:\"', title, '\"')), limit=10)
dois <- list(c(resArxiv$doi, resCR$data$doi))
dois <- lapply(dois, function(z){ z[!is.na(z) & z != ""]})[[1]]
cat("ARXIV DOIS: ", length(resArxiv$doi), "CR DOIS: ", length(resCR$data$doi), "Total: ", length(dois))
similarity_threshold <- 0.8
for(j in 1:length(dois)){
doi <- dois[[j]]
cat("\nGetting data for DOI ", j, " of ", length(dois), ":\t", doi, "\n")
ref <- getCitationFromDOI(doi, style)
if(!is.null(ref)){
cat("\n- Current Title: ", bib_entry$title,"\n- New Title: ", ref$title)
same_title <- getTitleSimilarity(bib_entry$title, ref$title, similarity_threshold)
if(same_title){
cat("\nGetting new Reference")
new_entry <- as.data.frame(getReferenceListData(ref))
break
}
}
}
}
}
else{
cat("\nFOUND DOI in field", field, ", looking for data")
new_entry <- getCitationFromDOI(doi, style)
}
updated <- FALSE
if(!is.null(new_entry) && length(new_entry) > 0){
tryCatch({
# bib_entry <- mergeReferencesClass(bib_entry, new_entry, upd_bibkey, upd_title, upd_author, verbose=FALSE)
bib_entry <- mergeReferencesDF(bib_entry, new_entry, upd_bibkey, upd_title, upd_author, verbose=TRUE)
if(upd_abstract){
tryCatch({
new_abstr <- getAbstractFromDOI(bib_entry$doi)
if(!is.null(new_abstr)){
bib_entry[["abstract"]] <- new_abstr
cat("\nAbstract updated")
}
}, error = function(e) {
cat("\nError updating abstract: ", paste0(e))
})
}
bib_entry[["r_updated"]] <- "YES"
cat("\nUpdating OLD Reference")
updated <- TRUE
}, error = function(e) {
cat("\nError merging references: ", paste0(e))
updated <- FALSE
})
}
if(!updated){
cat("\nNOT UPDATING")
tryCatch({
bib_entry$r_updated <- "NO"
}, error = function(e){
cat("\nError updating entry: ", paste0(e))
})
}
# entry_str <- getReferenceString(bib_entry, tolower(attr(bibentry, "bibtype")), tolower(attr(bibentry, "key")))
# write(entry_str, file = out_file, append = TRUE)
cat("\n-----------------------------------------\n")
# entry_data <- getReferenceListData(bib_entry)
ret_list <- list(bib_key=bib_entry)
# names(ret_list) <- bib_key
return(ret_list)
}
|
567d4b5ef97a5c606892441a1e5de3ac20375808
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/tm/examples/writeCorpus.Rd.R
|
c54b336c27648d6585feae6e0bb24591d5a38b0b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 265
|
r
|
writeCorpus.Rd.R
|
library(tm)
### Name: writeCorpus
### Title: Write a Corpus to Disk
### Aliases: writeCorpus
### ** Examples
data("crude")
## Not run:
##D writeCorpus(crude, path = ".",
##D filenames = paste(seq_along(crude), ".txt", sep = ""))
## End(Not run)
|
07cdcb9c9abf05a7ad8f8010d18724bdfd077ad2
|
c46a6ff80331d7f47bc3c379b7b6f51644a3925b
|
/Chapter_04/scripts/f4.R
|
d9da9fd4c29f524c47df0532a8073ea26f51981d
|
[] |
no_license
|
elmstedt/stats20_swirl
|
6bb215dc600decaf03ecf441cf0e28bdbd525536
|
6de97f3613f941c5c39a85b9df4f26fa3b62e766
|
refs/heads/master
| 2021-05-22T02:29:59.080370
| 2020-10-06T07:42:50
| 2020-10-06T07:42:50
| 252,929,124
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 143
|
r
|
f4.R
|
f4 <- function(x) {
# put the code below into an appropriate while loop.
if (x %% 2 == 0) {
x <- x / 2
} else {
x <- 3 * x + 1
}
x
}
|
c74b103e292211a342f40f191b32404db43df2a6
|
663cb73a0c47ab0fc81bc0efab7d1bbce8b369b1
|
/ParseHTML.R
|
bb3c6c280661b8e7ad440e78229522d340391658
|
[] |
no_license
|
wangguansong/nlxj-profiles
|
f48920eeee6b3fb5bf637207857cf67162ad5cdd
|
15661e472b259668c19ffbd4097ef3b3996e3669
|
refs/heads/master
| 2021-01-10T01:35:22.337574
| 2015-11-24T23:06:47
| 2015-11-24T23:06:47
| 46,497,747
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,665
|
r
|
ParseHTML.R
|
########################################################################
########################################################################
ParseHTML <- function(filename) {
# Parse the HTML file, find the line contain the informations.
# Return a list of strings??? corresponding to the <p> tags, which
# contains informations of many people
# The title of page is saved as the top of the list.
# The link to the image is stripped from <img> tag.
con <- file(filename, open="r", encoding="utf-8")
line <- readLines(con,n=1)
while (length(line)!= 0) {
idx <- grep('<title>', line)
if (length(idx) > 0) {
title <- gsub(".*<title>(.*)</title>", "\\1", line)
}
idx <-
grep('<div class=\"rich_media_content[ ]*\" id=\"js_content\">',
line)
line <- readLines(con, n=1)
if (length(idx) == 0) next
else break
}
close(con)
# Split the line by "-----"
rawinfo <- as.list(strsplit(line, "-{5,}")[[1]])
# Drop the last one in the list (ad of the platform)
rawinfo[[length(rawinfo)]] <- NULL
for (i in 1:length(rawinfo)) {
# Split the line by "</p>" tags
rawinfo[[i]] <- strsplit(rawinfo[[i]], "</p>")[[1]]
# Delete "<p ...>" tags
rawinfo[[i]] <- gsub("<p[^<]*?>", "", rawinfo[[i]])
# Replace "<br ...>" tags with spaces
rawinfo[[i]] <- gsub("<br[^<]* />", " ", rawinfo[[i]])
# Strip the urls of the image from "<img ...>" tags
rawinfo[[i]] <- gsub("<img.*src=\"(http://[^ ]*)\"[^<]*>", "\\1",
rawinfo[[i]])
# Delete "<em ...>" and "</em>" tags
rawinfo[[i]] <- gsub("<em[^<]*>", "", rawinfo[[i]])
rawinfo[[i]] <- gsub("</em>", "", rawinfo[[i]])
# Delete "<span ...>" and "</span>" tags
rawinfo[[i]] <- gsub("<span[^<]*>", "", rawinfo[[i]])
rawinfo[[i]] <- gsub("</span>", "", rawinfo[[i]])
# Delete empty strings
rawinfo[[i]] <- rawinfo[[i]][!rawinfo[[i]]==""]
rawinfo[[i]] <- c(title, rawinfo[[i]])
# Delete
rawinfo[[i]] <- gsub(" ", "", rawinfo[[i]])
rawinfo[[i]] <- gsub("&", "&", rawinfo[[i]])
# Delete spaces in beginning and end
rawinfo[[i]] <- gsub("^ +", "", rawinfo[[i]])
rawinfo[[i]] <- gsub(" +$", "", rawinfo[[i]])
}
return(rawinfo)
}
########################################################################
# Scan the HTML files, parse the individuals into a list
htmllist <- list.files("html/", pattern="nlxj[0-9]+.html$",
full.names=T)
infolist <- list()
for (i in 1:length(htmllist)) {
print(htmllist[i])
rawinfo <- ParseHTML(htmllist[i])
infolist <- c(infolist, rawinfo)
}
remove(i, rawinfo)
|
24c88bcc998524b842a51396d7f075e9e4b745d5
|
071cd8492b051065de257750d5b16cd47409c996
|
/locus_discovery_config_files/BFP_AD_config.R
|
d771af6c5d8defa501a6a0a4127132f27c9dbd9b
|
[] |
no_license
|
wpbone06/AD_and_Cardiometabolic_Trait_Bivariate_Scans
|
28734236a73b0c069241ccfebfe46648fbf6d93f
|
3dd4e32d8ddd23e8d55b64299ed31e59de915c90
|
refs/heads/master
| 2021-04-08T19:03:33.886867
| 2020-03-24T19:40:01
| 2020-03-24T19:40:01
| 248,802,625
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,007
|
r
|
BFP_AD_config.R
|
trait1 = "AD"
trait2 = "BFP"
trait1GWASStr = c("Alzheimer")
trait2GWASStr = c("body fat percentage","body fat %", "Body Fat Percentage","Body Fat")
expPath="/project/voight_datasets/GWAS/01_alzD/AD_sumstats_Jansenetal.txt"
outPath="/project/voight_GWAS/wbone/bivariate_scan_project/BodyFatPer_CHD_bivarscan/BFP_CHD_input_data/BFP_chr_pos_from_CHD.txt"
trait1_BPcol = "BP"
trait2_BPcol = "BP"
trait1_CHRcol = "CHR"
trait2_CHRcol = "CHR"
trait1_Pcol = "P"
trait2_Pcol = "P.value"
exp_dat = read_exposure_data("/project/voight_datasets/GWAS/01_alzD/AD_sumstats_Jansenetal.txt",sep="\t",snp_col="SNP",effect_allele_col="A1",other_allele_col="A2",eaf_col="MAF",se_col="SE",pval_col=trait1_Pcol,beta_col="BETA")
out_dat = read_outcome_data("/project/voight_GWAS/wbone/bivariate_scan_project/BodyFatPer_CHD_bivarscan/BFP_CHD_input_data/BFP_chr_pos_from_CHD.txt",sep="\t",snp_col="SNPID",effect_allele_col="Allele1",other_allele_col="Allele2",eaf_col="Freq1",se_col="StdErr",pval_col=trait2_Pcol,beta_col="Effect")
|
73c75396ffdcd5cda4920e453749f47e44cbe7fc
|
7967712d2e16907605f7d0acde096950eae5c3d4
|
/components/functions.R
|
b98f8550617006082ffbec3c08319e69da3094e2
|
[
"MIT"
] |
permissive
|
pablo-vivas/ProbabilityDistributionsViewer
|
8efdff6f7347ee940f8efa202beec8b5ba298153
|
ba24761a890e7d4f06f180316ca088d8210c4940
|
refs/heads/master
| 2020-08-07T02:17:51.092184
| 2018-07-24T12:31:36
| 2018-07-24T12:31:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,797
|
r
|
functions.R
|
library(shiny)
library(shinydashboard)
library(htmltools)
# Variables ----
boxcolor <- "blue"
mean.icon <- icon("star", lib = "glyphicon")
variance.icon <- icon("resize-horizontal", lib = "glyphicon")
# Functions ----
## ui ----
### Custom selectInput
selectLanguageInput <- function(inputId, choices, selected = NULL, selectize = TRUE, width = NULL) {
selected <- shiny::restoreInput(id = inputId, default = selected)
choices <- shiny:::choicesWithNames(choices)
if (is.null(selected)) {
selected <- shiny:::firstChoice(choices)
} else {
selected <- as.character(selected)
}
selectTag <- htmltools::tags$select(
id = inputId,
shiny:::selectOptions(choices, selected)
)
res <- div(
class = "form-group shiny-input-container",
style = paste0("width: ", htmltools::validateCssUnit(width), ";"),
NULL, # For selectizeIt function.
div(selectTag)
)
shiny:::selectizeIt(inputId, res, NULL, nonempty = TRUE)
}
### Panel for Distributions
distPanel <- function(name, en) {
if (missing(en)) {
wiki <- paste0("http://ja.wikipedia.org/wiki/", name)
} else {
wiki <- paste0("http://en.wikipedia.org/wiki/", en)
}
box(
width = 5,
status = "primary",
title = name,
"参考 : ",
a(
target = "_blank",
href = wiki,
"Wikipedia",
img(src = "img/external.png")
)
)
}
distBox <- function(name, wiki, i18n) {
box(
width = 5,
status = "primary",
title = i18n()$t(name),
paste0(i18n()$t("Reference"), " : "),
a(
target = "_blank",
href = i18n()$t(wiki),
"Wikipedia",
img(src = "img/external.png")
)
)
}
### Formula Box
formulaBox <- function(f_str, c_or_d, i18n) {
if (c_or_d == "c") {
f_title <- "Probability density function (PDF)"
} else {
f_title <- "Probability mass function"
}
f_text <- paste0("$$", f_str, "$$")
box(
width = 7,
status = "primary",
title = i18n()$t(f_title),
helpText(f_text)
)
}
### Sliders
createSlider <- function(name, label, min, max, value, step = 1L) {
sliderInput(
inputId = name,
label = label,
min = min, max = max, value = value, step = step
)
}
### Parameters Box
createParamBox <- function(ns, c_or_d, rangeArgs, paramArgs = NULL, p_or_c = NULL, i18n = NULL) {
# Selector
choices <- c("p", "c")
if (c_or_d == "c") {
pdf <- i18n()$t("Probability density function (PDF)")
} else {
pdf <- i18n()$t("Probability mass function (PMF)")
}
cdf <- i18n()$t("Cumulative distribution function (CDF)")
names(choices) <- c(pdf, cdf)
pcButton <- radioButtons(ns("p_or_c"), "", choices, p_or_c)
# Range Slider
rangeArgs$name <- ns("range")
rangeArgs$label <- i18n()$t("Range")
rangeSlider <- do.call(createSlider, rangeArgs)
# Parameter Sliders
if (is.null(paramArgs)) {
paramSliders <- NULL
} else {
paramSliders <- lapply(paramArgs, function(x) {
x$name <- ns(x$name)
label_name <- x$label_name
label_symbol <- paste0("\\(", x$label_symbol, "\\)")
if (is.na(label_name) || label_name == "") {
x$label <- label_symbol
} else {
label_name <- i18n()$t(label_name)
x$label <- paste(label_name, label_symbol)
}
# Remove "label_name" and "label_symbol"
x <- x[!(names(x) %in% c("label_name", "label_symbol"))]
do.call(createSlider, x)
})
}
# Box
paramBox <-
do.call(
box,
list(
width = 5,
title = i18n()$t("Parameters"),
status = "primary",
solidHeader = TRUE,
withMathJax(),
pcButton,
rangeSlider,
paramSliders
)
)
return(paramBox)
}
### Dynamic Value Box
valueBoxRow <- function(ns, width = 6L) {
fluidRow(
valueBoxOutput(ns("meanBox"), width = width),
valueBoxOutput(ns("varianceBox"), width = width)
)
}
valueBoxRowWide <- function(ns) {
valueBoxRow(ns, width = 12L)
}
## server ----
createFormula <- function(f_str, value) {
paste0("\\(", f_str, "\\!=\\!", value, "\\)")
}
createBox <- function(f_str, value, param = "Mean", i18n = NULL) {
if (param == "Variance") {
icon <- variance.icon
} else {
icon <- mean.icon
}
param_str <- i18n()$t(param)
if (is.null(f_str) | is.null(value)) {
formula <- i18n()$t("Undefined")
} else {
value <- round(value, digits = 3)
formula <- createFormula(f_str, value)
formula <- withMathJax(formula)
}
box <-
valueBox(
formula,
param_str,
icon = icon,
color = boxcolor
)
return(box)
}
meanBox <- function(f_str, value, i18n) {
f <- createBox(f_str, value, param = "Mean", i18n)
return(f)
}
varianceBox <- function(f_str, value, i18n) {
f <- createBox(f_str, value, param = "Variance", i18n)
return(f)
}
|
6a367ae7c8e2e213fbbec7b85acd42ce9d25e96a
|
5d9470e54c69e914800f770ff3ca95b72e0d02b0
|
/R/ziaq.R
|
b981d1d0b3aefd66e171db06132ee3f08391474d
|
[] |
no_license
|
gefeizhang/ZIAQ
|
fffbd9da58490e455c58d65c2bf0bf49fadf6703
|
017da9ab92fac73faf4ae4e50934270893d296c1
|
refs/heads/master
| 2020-07-04T07:35:38.288467
| 2020-02-20T18:38:50
| 2020-02-20T18:38:50
| 202,207,402
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,573
|
r
|
ziaq.R
|
#' Zero-inflation adjusted quantile regression for single cell RNA sequencing data
#'
#' This function fits the zero-inflation adjusted quantile regression model for the
#' differential expression analysis in single cell RNA sequencing data
#' @param Y_matrix a matrix for expression values with row representing indiviudal genes
#' and column representing cells
#' @param colDat a dataframe including the individual cell information
#' @param formula a formula with the predictors included in \code{colDat}. The default
#' is ~condition.
#' @param group the variable name in \code{colDat} for the factor used in group comparsion. The default
#' is condition.
#' @param probs the quantile levels for the quantile regressions.
#' The default is \code{c(0.25, 0.5, 0.75)}
#' @param log_i TRUE or FALSE indicate whether to apply log transformation.
#' The default is TRUE.
#' @param parallel TRUE or FALSE indicate whether to apply parallel computing.
#' The default is TRUE.
#' @param no.core The number of cores used in parallel computing. The default
#' is all available cores \code{detectCores()}
#' @return \item{pvalue}{The p-values of all genes for testing the signficance
#' of the specified \code{group} variable.}
#' \item{res}{The full results from function \code{ziaq_fit} for all genes}
#' @keywords ziaq_fit
#' @export
#' @import quantreg
#' @import metap
#' @import parallel
#' @import stats
#'
#' @examples
#' #Use simuluated data
#'ymatrix = matrix(round(100* runif(100*150)), ncol = 100)
#'rownames(ymatrix) = paste0('gene', 1:150)
#'
#'colDat = data.frame(condition = rep(c(1, 0), e = 50))
#'
#'res = ziaq(ymatrix, colDat, formula = ~ condition,
#' group = 'condition', probs = c(0.25, 0.5, 0.75),
#' log_i = TRUE, parallel = FALSE, no.core = 1)
#'
#'print(res$pvalue)
ziaq <-function (Y_matrix, colDat, formula = ~ condition,
group = 'condition', probs = c(0.25, 0.5, 0.75),
log_i = TRUE, parallel = FALSE, no.core = detectCores() ) {
#require(parallel)
if(parallel ){
cl <- makeCluster(getOption("cl.cores", no.core))
res = parApply(cl = cl, Y_matrix, 1, ziaq_fit, colDat = colDat, formula = formula,
group = group, probs =probs,log_i = log_i )
}else{
res = apply(Y_matrix, 1, ziaq_fit, colDat = colDat, formula = formula,
group = group, probs =probs,log_i = log_i )
}
pval = sapply(res, function(x) return(x$pvalue))
return(list(pvalue = pval, full_results = res))
}
|
b0a38d25523a09e2481c70e89072d7e2ef84b452
|
8de7c88fd3ce03591c538694b3361f6b6c7fbf61
|
/R/transformPhylo.sim.R
|
21322e57e625cbe770b000a1a52c691cdae0c592
|
[] |
no_license
|
ghthomas/motmot
|
b093742a4ed264076ca41bbc4fddf29d3cc00a93
|
c24372f5d5efbfbee6196c5459d0def31d547e54
|
refs/heads/master
| 2021-01-01T17:57:38.949773
| 2018-07-30T10:12:35
| 2018-07-30T10:12:35
| 10,839,257
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,521
|
r
|
transformPhylo.sim.R
|
transformPhylo.sim <- function(phy, n=1, x=NULL, model=NULL, kappa=NULL, lambda=NULL, delta=NULL, alpha=NULL, psi=NULL, nodeIDs=NULL, rateType=NULL, cladeRates=NULL, branchRates=NULL, rate=NULL, group.means=NULL) {
switch(model,
"bm" = {
transformPhy <- phy
phyMat <- VCV.array(transformPhy)
attr(phyMat, "class") <- "matrix"
ydum <- as.matrix(t(rmvnorm(n, sigma = phyMat)))
rownames(ydum) <- rownames(phyMat)
},
"kappa" = {
transformPhy <- transformPhylo(phy=phy, model="kappa", kappa=kappa)
phyMat <- VCV.array(transformPhy)
attr(phyMat, "class") <- "matrix"
ydum <- as.matrix(t(rmvnorm(n, sigma = phyMat)))
rownames(ydum) <- rownames(phyMat)
},
"lambda" = {
transformPhy <- transformPhylo(phy=phy, model="lambda", lambda=lambda)
phyMat <- VCV.array(transformPhy)
attr(phyMat, "class") <- "matrix"
ydum <- as.matrix(t(rmvnorm(n, sigma = phyMat)))
rownames(ydum) <- rownames(phyMat)
},
"delta" = {
transformPhy <- transformPhylo(phy=phy, model="delta", delta=delta)
phyMat <- VCV.array(transformPhy)
attr(phyMat, "class") <- "matrix"
ydum <- as.matrix(t(rmvnorm(n, sigma = phyMat)))
rownames(ydum) <- rownames(phyMat)
},
"free" = {
transformPhy <- transformPhylo(phy=phy, model="free", branchRates=branchRates)
phyMat <- VCV.array(transformPhy)
attr(phyMat, "class") <- "matrix"
ydum <- as.matrix(t(rmvnorm(n, sigma = phyMat)))
rownames(ydum) <- rownames(phyMat)
},
"clade" = {
transformPhy <- transformPhylo(phy=phy, model="clade", nodeIDs=nodeIDs, cladeRates=cladeRates, rateType=rateType)
phyMat <- VCV.array(transformPhy)
attr(phyMat, "class") <- "matrix"
ydum <- as.matrix(t(rmvnorm(n, sigma = phyMat)))
rownames(ydum) <- rownames(phyMat)
},
"OU" = {
transformPhy <- transformPhylo(phy=phy, model="OU", alpha=alpha)
phyMat <- VCV.array(transformPhy)
attr(phyMat, "class") <- "matrix"
ydum <- as.matrix(t(rmvnorm(n, sigma = phyMat)))
rownames(ydum) <- rownames(phyMat)
},
"psi" = {
transformPhy <- transformPhylo(phy=phy, model="psi", psi=psi)
phyMat <- VCV.array(transformPhy)
attr(phyMat, "class") <- "matrix"
ydum <- as.matrix(t(rmvnorm(n, sigma = phyMat)))
rownames(ydum) <- rownames(phyMat)
},
"mixedRate" = {
x <- as.matrix(x)
dat <- data.frame(x=x, y=rep(0, length(x[,1])))
ntip <- Ntip(phy)
rateData <- as.rateData(y="y", x="x", rateMatrix = NULL, phy=phy, data=dat)
V <- transformRateMatrix(rateData, rate=rate)
# expect.sd <- sqrt(mean(V[upper.tri(V)])) #
expect.sd <- sqrt((1/ntip * sum(diag(V))) - ((1/ntip^2)*t(matrix(rep(1,ntip))) %*% V %*% matrix(rep(1,ntip))))
if (is.null(group.means)) {ydum <- as.matrix(t(rmvnorm(n, sigma = (V) )))
rownames(ydum) <- rownames(V)}
else {
x.means <- unique(rateData$x)
n.means <- length(x.means)
samp.means <- rep(NA, length(rateData$x))
ydum <- vector(mode="list", length=length(group.means))
for (i in 1:n.means) {
samp.means[which(rateData$x == (i-1))] <- rep(0+(expect.sd*group.means[i]), length(which(rateData$x == (i-1))))
}
ydum <- as.matrix(t(rmvnorm(n, mean=samp.means, sigma = (V) )))
rownames(ydum) <- rownames(V)
}
}
)
return(ydum)
}
|
8a0036224b05274ea9e07024c73ec1308b2abb25
|
ef10085faba12cbca8e6ef55e3575031dd82da71
|
/app.R
|
c9930c6aa221793647d6816a6b9d2d138417cd76
|
[] |
no_license
|
RforOperations2018/project2-clarissp
|
eb3e75bb4c0c88ac90da665b46888f8f590ecb5b
|
7ee468dd088ae6f802ff3671432df0e1a09d7a14
|
refs/heads/master
| 2020-04-01T05:39:59.319277
| 2018-10-21T18:36:15
| 2018-10-21T18:36:15
| 152,913,980
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,174
|
r
|
app.R
|
#Project 2
require(shiny)
require(rgdal)
require(leaflet)
require(leaflet.extras)
require(dplyr)
require(readxl)
require(stringr)
require(shinydashboard)
require(reshape2)
require(dplyr)
require(ggplot2)
require(plotly)
require(shinythemes)
require(RSocrata)
require(httr)
require(jsonlite)
#Shapefile for County Boundaries
pacounty <- readOGR("PA_Counties_clip.shp")
#Didn't end up using the subset of the counties because I couldn't get the over function to only display markers in my subsetted counties
#Subsetting counties to Southwest counties
#swcounty <- c("Armstrong", "Allegheny", "Beaver", "Cambria", "Fayette", "Greene", "Indiana", "Somerset", "Washington", "Westmoreland")
#pa_swcounty <- pacounty[pacounty$NAME %in% swcounty,]
#Transofrming projection of counties to match the following two layers
proj4string(pa_swcounty) <- CRS("+proj=longlat +datum=NAD83 +no_defs +ellps=GRS80 +towgs84=0,0,0")
pa_swcounty <- spTransform(pa_swcounty, CRS=CRS("+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"))
#API for the Permit data
permits <- readOGR("http://data-padep-1.opendata.arcgis.com/datasets/cea4b401782a4178b139c6b5c6a929f2_48.geojson")
permitcounty <- c("Armstrong", "Beaver", "Cambria", "Greene", "Indiana", "Somerset", "Washington", "Westmoreland")
sw_permits <- permits[permits$COUNTY %in% permitcounty,]
#CSV for Permit data
permitdata <- read.csv("Active_Underground_Permit_Boundaries.csv")
sw_permitdata <- filter(permitdata, COUNTY == "Armstrong" | COUNTY == "Beaver" | COUNTY == "Cambria" | COUNTY == "Greene" | COUNTY == "Indiana" | COUNTY == "Somerset" | COUNTY == "Washington" | COUNTY == "Westmoreland")
#API for Environmental Good Samaritan Act points
surfacemine <- readOGR("http://data-padep-1.opendata.arcgis.com/datasets/67ed627a525548d5900c1b6964b8e619_25.geojson")
#Another attempt to get the map API reactive function to work
#getEsri <- function(url) {
# Make Call
#g <- GET(URLencode(url))
#c <- content(g)
#readOGR(c)
#}
#Creating county column for Environmental Good Samaritan Act points (goodact)
surfacemine$county <- over(surfacemine, pa_swcounty, fn = NULL)
#Header of the shiny dashboard
header <- dashboardHeader(title = "Permits in PA")
#Sidebar of the shiny dashboard
sidebar <- dashboardSidebar(
sidebarMenu(
id = "tabs",
#Pages in the sidebar
menuItem("Graphs", icon = icon("pie-chart"), tabName = "activepermit"),
menuItem("Dataset", icon = icon("database"),tabName = "permittable"),
menuItem("Map", icon= icon("map-o"), tabName = "permit"),
#Select input for Type of Permits
selectInput("type",
"Permit Type(s):",
choices = sort(unique(sw_permitdata$TYPE)),
multiple = TRUE,
selected = c("Room and Pillar")
),
#Select input for Counties of Permits
selectInput("counties",
"Select a County:",
choices = sort(unique(sw_permitdata$COUNTY)),
multiple = TRUE,
selected = c("Cambria","Somerset")),
#Select input for Status of Permits
selectInput("operator",
"Operator(s) of Permit:",
choices = sort(unique(sw_permitdata$OPERATOR)),
multiple = TRUE,
selected = "Rosebud Mining"),
#Reset button for filters
actionButton("reset", "Reset Filters", icon = icon("refresh"))
)
)
#Body of the shiny dashboard
body <- dashboardBody(
tabItems(
#Content for graphs page
tabItem("activepermit",
fluidPage(
box(tabPanel("Bar Plot", plotlyOutput("permitbar")), width = 12),
box(tabPanel("Pie Chart", plotlyOutput("permitpie")), width = 12)
)
),
#Contents for dataset page
tabItem("permittable",
fluidPage(
inputPanel(
downloadButton("downloadData", "Download Active Permit Data")
),
box(title = "Abandoned Mine Land Dataset", DT::dataTableOutput("permittable"), width = 12)
)
),
#Content for map page
tabItem("permit",
fluidRow(
box(
selectInput("facility",
"Type of Facility for Markers:",
choices = sort(unique(surfacemine$PRIMARY_FACILITY_KIND)),
multiple = TRUE,
selected = "GROWING GREENER")
),
box(title = "Active Permits in Southwest PA", leafletOutput("permitmap"), width = 12)
))
)
)
ui <- dashboardPage(header, sidebar, body, skin = "black")
#Defines server logic
server <- function(input, output, session = session){
#Reactive function for permit types
permitInput <- reactive({
if(length(input$type) > 0 ){
sw_permitdata <- subset(sw_permitdata, TYPE %in% input$type)
}
if(length(input$counties) > 0 ){
sw_permitdata <- subset(sw_permitdata, COUNTY %in% input$counties)
}
if(length(input$operator) > 0 ){
sw_permitdata <- subset(sw_permitdata, OPERATOR %in% input$operator)
}
return(sw_permitdata)
})
#Icons for the markers
icons <- awesomeIconList(
makeAwesomeIcon(icon = "leaf", library = "fa", markerColor = "green")
)
#Map for permits and reclamation sites
output$permitmap <- renderLeaflet({
#facilitymarker <- facilityInput()
leaflet() %>%
addPolygons(data = pacounty,
weight = 2,
color = "black") %>%
addPolygons(data = permits,
weight = 1.5,
color = "red") %>%
#Data for the markers should be facilitymarker however I wasn't able to get the reactive function to work so I changed the data source so that at least you can see my map in the dashboard
addAwesomeMarkers(data = surfacemine, icon = ~icons, popup = ~SITE_NAME) %>%
addProviderTiles("Esri.WorldGrayCanvas", group = "Gray Canvas", options = providerTileOptions(noWrap = TRUE)) %>%
addProviderTiles("CartoDB.DarkMatterNoLabels", group = "Dark Matter", options = providerTileOptions(noWrap = TRUE)) %>% # This basemap doesn't really make sense since your county lines are black!
addProviderTiles("Esri.WorldTopoMap", group = "Topography", options = providerTileOptions(noWrap = TRUE)) %>%
addLayersControl(
baseGroups = c("Gray Canvas", "Dark Matter", "Topography"),
options = layersControlOptions(collapsed = TRUE)
)
})
#Pie chart for active permit data
output$permitpie <- renderPlotly({
permit <- permitInput()
plot_ly(data = permit, labels = permit$COUNTY, type = 'pie',
textposition = 'inside',
textinfo = 'label+percent', insidetextfont = list(color = '#FFFFFF'),
hoverinfo = 'label+percent', showlegend = TRUE)
})
#Bar plot for Active Permits
output$permitbar <- renderPlotly({
permit <- permitInput()
ggplot(data = permit, mapping = aes(x = COUNTY, fill = STATUS)) +
geom_bar(stat = "count") +
labs(title = "Active Underground Permits in Pennsylvania",
x= "County",
y= "Count of Permits", fill = "Status"
) +
scale_fill_brewer(palette = "Pastel1") +
theme_bw() +
theme(plot.title =
element_text(face = "bold",
family = "American Typewriter"),
axis.title.x =
element_text(
family = "American Typewriter"
),
axis.text.x =
element_text(
family = "American Typewriter",
angle = 45,
vjust = 0.5
),
axis.title.y =
element_text(
family = "American Typewriter"
),
axis.text.y =
element_text(
family = "American Typewriter"
),
legend.position = "bottom",
legend.box = "horizontal"
)
})
#Data table for permit table
output$permittable <- DT::renderDataTable({
subset(permitInput(), select = c("MINE", "OPERATOR", "TYPE", "STATUS", "COAL_SEAM", "COUNTY"))
})
#Download button for the data table
output$downloadData <- downloadHandler(
filename = function() {
paste("sw_permitdata", Sys.Date(), ".csv", sep="")
},
content = function(file) {
write.csv(permitInput(), file)
}
)
#Allows for the reset button to work
observeEvent(input$reset, {
updateSelectInput(session, "type", selected = c("Room and Pillar"))
updateSelectInput(session, "counties", selected = c("Cambria","Somerset"))
updateSelectInput(session, "coal", selected = c("Pittsburgh"))
showNotification("You have successfully reset the filters", type = "message")
})
#Attempt at reactive function for the map. All the commented out urls are my attempts to fix it. I just left them there so that you can see that I tried a bunch of different things
facilityInput <- reactive({
filter <- ifelse(length(input$facility) > 0,
paste0("%20IN%20(%27", paste(input$facility, collapse = "%27AND%27"),"%27"),"")
#url <- URLencode(paste0('http://www.depgis.state.pa.us/arcgis/rest/services/emappa/eMapPA_External_Extraction/MapServer/25/query?where=PRIMARY_FACILITY_KIND', gsub(" ", "+", input$facility), "&outFields=*&returnGeometry=true&returnTrueCurves=false&maxAllowableOffset=&geometryPrecision=&outSR=&returnIdsOnly=false&returnCountOnly=false&orderByFields=&groupByFieldsForStatistics=&outStatistics=&returnZ=false&returnM=false&gdbVersion=&returnDistinctValues=false&resultOffset=&resultRecordCount=&queryByDistance=&returnExtentsOnly=false&datumTransformation=¶meterValues=&rangeValues=&f=json"))
#marker <- getEsri(url) %>%
#spTransform("+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")
#return(marker)
url <- paste0('http://www.depgis.state.pa.us/arcgis/rest/services/emappa/eMapPA_External_Extraction/MapServer/25/query?where=PRIMARY_FACILITY_KIND',filter, '&outFields=*&returnGeometry=true&returnTrueCurves=false&maxAllowableOffset=&geometryPrecision=&outSR=&returnIdsOnly=false&returnCountOnly=false&orderByFields=&groupByFieldsForStatistics=&outStatistics=&returnZ=false&returnM=false&gdbVersion=&returnDistinctValues=false&resultOffset=&resultRecordCount=&queryByDistance=&returnExtentsOnly=false&datumTransformation=¶meterValues=&rangeValues=&f=geojson')
#url <- paste0('http://www.depgis.state.pa.us/arcgis/rest/services/emappa/eMapPA_External_Extraction/MapServer/25/query?where=1%', filter,'&outFields=*&returnGeometry=true&returnTrueCurves=false&maxAllowableOffset=&geometryPrecision=&outSR=&returnIdsOnly=false&returnCountOnly=false&orderByFields=&groupByFieldsForStatistics=&outStatistics=&returnZ=false&returnM=false&gdbVersion=&returnDistinctValues=false&resultOffset=&resultRecordCount=&queryByDistance=&returnExtentsOnly=false&datumTransformation=¶meterValues=&rangeValues=&f=geojson')
print(url)
facilityfilter <- readOGR(url)
return(facilityfilter)
})
}
#http://www.depgis.state.pa.us/arcgis/rest/services/emappa/eMapPA_External_Extraction/MapServer/25/query?where=1%3D1&text=&objectIds=&time=&geometry=%7B%22xmin%22%3A-10315563.459876563%2C%22ymin%22%3A4644636.53163017%2C%22xmax%22%3A-6971902.094570702%2C%22ymax%22%3A5378432.0031676665%2C%22spatialReference%22%3A%7B%22wkid%22%3A102100%7D%7D&geometryType=esriGeometryEnvelope&inSR=&spatialRel=esriSpatialRelIntersects&relationParam=&outFields=*&returnGeometry=true&returnTrueCurves=false&maxAllowableOffset=&geometryPrecision=&outSR=&returnIdsOnly=false&returnCountOnly=false&orderByFields=&groupByFieldsForStatistics=&outStatistics=&returnZ=false&returnM=false&gdbVersion=&returnDistinctValues=false&resultOffset=&resultRecordCount=&queryByDistance=&returnExtentsOnly=false&datumTransformation=¶meterValues=&rangeValues=&f=geojson
#Runs the application
shinyApp(ui = ui, server = server)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.