blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
91d27c736ebfc3975b22a1725237365b94db2540
|
a0e84488a6bf4cecf049784104095b05fcba19f8
|
/man/run_mHMM.Rd
|
da6a9ef50d0a14aa02a1707e023fec72822cec12
|
[
"MIT"
] |
permissive
|
JasperHG90/sleepsimR
|
2a149bb8af60619c84eab6b9e79caa21c70faef6
|
75fca083bd2304b98290d9ba945d91b6073cb17a
|
refs/heads/master
| 2021-01-09T13:49:05.095474
| 2020-05-14T06:48:51
| 2020-05-14T06:48:51
| 242,324,165
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,731
|
rd
|
run_mHMM.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run_model.R
\name{run_mHMM}
\alias{run_mHMM}
\title{Run an mHMM on a simulated sleep dataset}
\usage{
run_mHMM(
data,
start_values,
mprop,
hyperprior_means,
model_seed,
mcmc_iterations = 2000,
mcmc_burn_in = 1000,
show_progress = TRUE,
order_data = FALSE
)
}
\arguments{
\item{data}{Matrix. data set used to run the mHMM. See s_data parameter in \link[mHMMbayes]{mHMM_cont}.}
\item{start_values}{List (must be unnamed). start values for relevant parameters. See start_val parameter in \link[mHMMbayes]{mHMM_cont}.}
\item{mprop}{List containing two elements (numeric scalars), (1) 'm' or the number of hypothesized latent states and (2) 'n_dep' or the number of dependent (emission) variables.}
\item{hyperprior_means}{Numeric vector. Contains the hyperprior value for the between-subject distribution means. See \link[mHMMbayes]{mHMM_cont}.}
\item{model_seed}{Int. Random seed that is set before running the model.}
\item{mcmc_iterations}{Int. number of iterations for the MCMC algorithm. Defaults to 1000. See mcmc parameter in \link[mHMMbayes]{mHMM_cont}.}
\item{mcmc_burn_in}{Int. number of burn-in samples for the MCMC algorithm. Defaults to 500. See mcmc parameter in \link[mHMMbayes]{mHMM_cont}.}
\item{show_progress}{Boolean. Should progress of MCMC algorithm be displayed? Defaults to TRUE.}
\item{order_data}{Boolean. Should hyperpriors and start values be sorted from lowest to highest? This is required to record label switching. See \link[mHMMbayes]{mHMM_cont}.}
}
\value{
An mHMM_cont object containing posterior distributions for each of the parameters.
}
\description{
Run an mHMM on a simulated sleep dataset
}
|
6205116b7083ec2a1cb3979f5fbd109d3bdb74a6
|
c4788fa46ef6504065e8b31d6e3f82432ef3954e
|
/pkg/tests/dataframe.R
|
87d1cbdc7a8be796139b71f4ab6b11dd4ed09247
|
[] |
no_license
|
RevolutionAnalytics/plyrmr
|
bb8c2d6946e141cb51971f17ee5480071d3a168c
|
ce54c98c5d23045e70e83b4b0e833424ce97a4cf
|
refs/heads/master
| 2023-08-31T17:19:14.978193
| 2015-03-19T04:51:05
| 2015-03-19T04:51:05
| 11,489,876
| 30
| 21
| null | 2015-03-25T00:40:06
| 2013-07-17T23:19:10
|
R
|
UTF-8
|
R
| false
| false
| 1,957
|
r
|
dataframe.R
|
# Copyright 2014 Revolution Analytics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
library(plyrmr)
library(quickcheck)
library(functional)
cmp.df = plyrmr:::cmp.df
#where
test(
function(df = rdata.frame(), x = rnumeric()) {
df = cbind(df, col.n = suppressWarnings(cbind(x, df[,1]))[1:nrow(df),1])
numeric.cols = which(sapply(df, is.numeric ))
filter.col = names(numeric.cols[1])
cmp.df(
where(df, eval(as.name(filter.col)) > 0),
subset(df, eval(as.name(filter.col)) > 0))})
#transmute
test(
function(df = rdata.frame()) {
col = as.name(sample(names(df), 1))
cmp.df(
transmute(df, eval(col)),
plyrmr::select(df, eval(col)))})
#bind.cols
test(
function(df = rdata.frame()) {
col = as.name(sample(names(df), 1))
cmp.df(
bind.cols(df, z = eval(col)),
transform(df, z = eval(col)))})
#sample
assert.sample.is.subset =
function(df, method, method.args)
cmp.df(
df,
union(do.call(sample, c(list(df, method = method), method.args)), df))
test(
function(df = rdata.frame(ncol = 10), n = rinteger(element = 5, size = ~1))
assert.sample.is.subset(df, "any", list(n = min(n, nrow(df)))))
test(
function(df = rdata.frame(ncol = 10), p = rdouble(element = runif, size = ~1))
assert.sample.is.subset(df, "Bernoulli", list(p = p)))
test(
function(df = rdata.frame(ncol = 10), n = rinteger(element = 5, size = ~1))
assert.sample.is.subset(df, "hypergeometric", list(n = min(n, nrow(df)))))
|
d2504ca055bf76ac79367cd8c0e672ae1bd84815
|
3e50dd3332d32c68e8de65e8172832edb5f88356
|
/run_analysis.R
|
e0c19da3e8e36b252de77cf817e6c03638b8cbe4
|
[] |
no_license
|
LourdesC/Getting-and-Cleaning-Data-Course-Project
|
f06ad3d071b2e65637fb39724151cec43fbb9403
|
920155f410b0a59707d6acf6fb5b72193138d76d
|
refs/heads/master
| 2021-01-22T11:51:28.893337
| 2015-02-17T20:48:27
| 2015-02-17T20:48:27
| 30,932,110
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,771
|
r
|
run_analysis.R
|
####Getting and Cleaning Data Course Project####################################
# You should create one R script called run_analysis.R that does the following.#
# #
# 1.- Merges the training and the test sets to create one data set. #
# 2.- Extracts only the measurements on the mean and standard deviation for #
# each measurement. #
# 3.- Uses descriptive activity names to name the activities in the data set #
# 4.- Appropriately labels the data set with descriptive variable names. #
# #
# 5.- From the data set in step 4, creates a second, independent tidy data set #
# with the average of each variable for each activity and each subject. #
################################################################################
library(plyr)
## comprobamos el directorio de trabajo:
getwd()
# Read in the data from files features y activity_labels
features = read.table('./features.txt',header=FALSE); #imports features.txt
activity = read.table('./activity_labels.txt',header=FALSE); #imports activity_labels.txt
#Assigin column names to the activity
colnames(activity) = c('activityId','activityType');
#Read in the data from test files
x_test <- read.table("./test/X_test.txt", header = FALSE)
y_test <- read.table("./test/y_test.txt", header = FALSE)
subject_test <- read.table("./test/subject_test.txt", header = FALSE)
#Assigin column names to the data test files
colnames(subject_test) = "subjectId";
colnames(x_test) = features[,2];
colnames(y_test) = "activityId";
# create "test" data set
testData <- cbind(y_test, subject_test, x_test)
#Read in the data from train files
x_train <- read.table("./train/X_train.txt", header = FALSE)
y_train <- read.table("./train/y_train.txt", header = FALSE)
subject_train <- read.table("./train/subject_train.txt", header = FALSE)
#Assigin column names to the data train files
colnames(subject_train) = "subjectId";
colnames(x_train) = features[,2];
colnames(y_train) = "activityId";
# create "train" data set
trainData <- cbind(y_train, subject_train, x_train)
# Merges the training and the test sets to create one data set.
dataset <- rbind(trainData, testData)
colNames = colnames(dataset)
##2.- Extracts only the measurements on the mean and standard deviation for
# each measurement.
selection = (grepl("activity..",colNames) | grepl("subject..",colNames) | grepl("-mean()..",colNames) | grepl("-std()..",colNames) )& !grepl("-meanFreq()..",colNames)
mean_std_Data= dataset[selection== TRUE]
colnames(mean_std_Data)
# 3.- Uses descriptive activity names to name the activities in the data set
data_activity = merge(mean_std_Data,activity,by='activityId',all.x=TRUE);
names(data_activity)
#4.- Appropriately labels the data set with descriptive variable names.
names(data_activity)<-gsub("^t", "time", names(data_activity))
names(data_activity)<-gsub("^f", "frequency", names(data_activity))
names(data_activity)<-gsub("Acc", "Accelerometer", names(data_activity))
names(data_activity)<-gsub("Gyro", "Gyroscope", names(data_activity))
names(data_activity)<-gsub("Mag", "Magnitude", names(data_activity))
names(data_activity)<-gsub("BodyBody", "Body", names(data_activity))
names(data_activity)
# 5.- From the data set in step 4, creates a second, independent tidy data set
# with the average of each variable for each activity and each subject.
tidyData = aggregate(. ~subjectId + activityId, data_activity, mean)
tidyData<-tidyData[order(tidyData$subjectId,tidyData$activityId),]
write.table(tidyData, file = "tidydata.txt",row.name=FALSE)
|
aeb2096622cad68498dbea47c78960f8b3392b0f
|
c05ffccc08a88027d6d303f70eb6bec29e8e3eee
|
/P5_Trends_Graphs.R
|
a3e7477922fa2baf726495184868ef29ab42d2fa
|
[] |
no_license
|
melitanick/NCMP
|
00c0f89e0cbfb1ce4f1f2ff8ce8b1b94422e5c79
|
9f133d239166157cbee5bc46d06596ef68d779e4
|
refs/heads/master
| 2021-09-02T02:16:27.038672
| 2017-12-29T17:46:37
| 2017-12-29T17:46:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 26,960
|
r
|
P5_Trends_Graphs.R
|
###################################################################################
# #
# The R-NCMPs package has been developed by the ET-NCMP. #
# P5_Trends_Graphs.R #
# #
# This program reads the indices calculated previously and calculates the #
# trend for the period specified by the user. It produces a map of each of #
# these trends by station.For further details please refer to the User Manual #
# #
# Programmers: #
# Megan Hartwell, McMaster University, Canada #
# Lucie Vincent, Environment and Climate Change Canada #
# December 2016 #
# Modified by Simon Grainger, Bureau of Meteorology, Australia #
# December 2017 - Fixed bugs introduced in February 2017 version #
# #
###################################################################################
#
# The mapdata package is used for the high resolution world map, but the
# basic "world" database in maps seems equivalent to the wrld_simpl object in maptools
library(maps)
#library(mapdata)
library(zyp)
###################################################################################
# Gathers input info from the user #
#
inquiry <- function() { #
#
x <- NA_integer_ #
while (is.na(x) || x < 0L || x > 200L) { #
x <- readline("\nEnter the number of stations (between 1 and 200, or 0 for all): ")
x <- suppressWarnings(as.integer(x)) #
} #
#
# Start and end years can be a subset of the indices and/or regional average #
# But do not see why cannot allow a larger year range #
y1 <- 0L #
while (is.na(y1) || y1 < 1950L || y1 > 2010L) { #
cat("Enter beginning year to calculate trends") #
y1 <- readline("\n(between 1950 and 2010, ex. 1950): ") #
y1 <- suppressWarnings(as.integer(y1)) #
} #
#
y2 <- 0L #
y2l <- y1+10L # need at least 11 years to estimate trend #
y2h <- as.POSIXlt(Sys.time())$year + 1899L # last completed year #
yex <- max(y2l,min(2015L,y2h)) #
mess <- paste("\n(between ",y2l," and ",y2h,",ex. ",yex,"): ",sep="") #
while (is.na(y2) || y2 < y2l || y2 > y2h) { #
cat("Enter ending year to calculate trends") #
y2 <- readline(mess) #
y2 <- suppressWarnings(as.integer(y2)) #
} #
#
c(x,y1,y2) #
} #
#
# User input collected. Done! #
###################################################################################
if (interactive()) a <- inquiry() # ask if interactive call function inquiry
#a <- c(0L,1950L,2015L)
nstn <- a[1]
nyb <- a[2]
nye <- a[3]
nyrs <- nye-nyb+1L
###################################################################################
# Creates directories for output files #
#
folder <- "A5_Trends_Graphs" #
folder2 <- c("Graphs_Annual_Station","Maps_Annual_Station","Graphs_Annual_Region",#
"Ranks_Annual_Region") #
dirs <- file.path(folder,paste(folder2,nyb,nye,sep="_")) # adds separator "/" #
for (dname in dirs) dir.create(dname,showWarnings=FALSE,recursive=TRUE) #
#
# Directories created. Done! #
###################################################################################
###################################################################################
# Reads the station list #
files <- read.table("A2_Indices/P2_Station_List.txt",header=TRUE,stringsAsFactors=FALSE)
Station <- files[,"Station"] #
nstn <- nrow(files) #
#
# Read station list. Done! #
###################################################################################
# Construct elements for the 16 indices to graph
ncmpn <- c(1L,2L,3L,6L,5L,4L,5L,4L,6L,6L,6L,6L,1L,2L,2L,2L)
folderi <- "A2_Indices"
folder2 <- paste("NCMP",ncmpn,sep="")
ele <- c("Monthly_Mean_Temp_Anom","Monthly_Total_Prec_Anom_Norm",
"Standard_Prec_Index","Extreme_Prec",
"Cold_Days","Warm_Days",
"Cold_Nights","Warm_Nights",
"Extreme_Cold_Day","Extreme_Warm_Day",
"Extreme_Cold_Night","Extreme_Warm_Night",
"Monthly_Mean_Temp","Monthly_Total_Prec",
"Monthly_Total_Prec_Anom","Monthly_Total_Prec_Ratio")
ele2 <- c("TMA","PrAn","SPI","RXday1","TX10p","TX90p","TN10p","TN90p",
"TXn","TXx","TNn","TNx","TM","Pr","PrA","PrR")
iext <- (ncmpn == 6L) # which are extremes indices
# Output table of trend values
X <- data.frame(Station[1:nstn],
NA_real_,"?",NA_real_,"?",NA_real_,"?",NA_real_,"?",
NA_real_,"?",NA_real_,"?",NA_real_,"?",NA_real_,"?",
NA_real_,"?",NA_real_,"?",NA_real_,"?",NA_real_,"?",
NA_real_,"?",NA_real_,"?",NA_real_,"?",NA_real_,"?",stringsAsFactors=FALSE)
names(X) <- c("Station",as.vector(t(matrix(c(ele2,paste(ele2,"S",sep="_")),ncol=2))))
# Information for plots - have 16 elements but only plotting the first 12
# Also determining the range of the of the monthly means from the annual range
ymin <- c(-4,-50,-4,NA,rep( 0,4),rep(NA,8))
ymax <- c( 4, 50, 4,NA,rep(25,4),rep(NA,8))
ylabel <- c("deg C","%","no units","mm",rep("%",4),rep("deg C",5),"mm","mm","%")
# Titles of graphs
title <- paste("(",letters[1:16],") ",gsub("_"," ",ele),sep="")
# Output PDF file names by station
namep <- file.path(dirs[1],paste(Station,"_Annual.pdf",sep=""))
sq <- seq(from=2,to=32,by=2) # sequence of even numbers
# Begins loop for reading data files and doing calculations
cat("Analysis by station",fill=TRUE)
for (i in 1:nstn) { # 1:nstn
cat("\t",i,"\t",Station[i],fill=TRUE)
###################################################################################
# Calculate the trend for each element #
#
trend <- rep(NA,16) # empty vector to fill with trends #
pval <- rep(NA,16) # empty vector to fill with p-values #
namex <- file.path(folderi,folder2,ele,paste(Station[i],"_",ele2,".csv",sep=""))#
#
pdf(namep[i],width=7,height=5) #
par(mfrow=c(2,2),mar=c(3,3,2,0.5)+0.1,mgp=c(2,0.5,0),tcl=-0.25,las=1) #
for (j in 1:16) { #
#
# Read in data - extremes indices require deleting the last row #
# Will only be plotting annual values #
#
In <- read.csv(namex[j],header=TRUE,stringsAsFactors=FALSE,na.strings="-99.9")#
if (iext[j]) { #
In <- In[-nrow(In),] #
In[,'Year'] <- as.integer(In[,'Year']) # Year needs to be converted #
} #
ref <- (In[,'Year'] >= nyb & In[,'Year'] <= nye) # Select ref years #
#
# Allow for dynamic range for any variable by setting the default value to NA #
# RX1day is a special case - lower limit is zero, upper is set to 50 above #
#
if (j <= 12L) { #
if (!is.na(ymin[j])) { #
ylim <- c(ymin[j],ymax[j]) #
} else if (ele2[j] == 'RXday1') { #
ylim <- c(0,ceiling(max(In[ref,'Annual'],na.rm=TRUE)/50)*50) #
} else { #
yrng <- range(In[ref,'Annual'],na.rm=TRUE) #
ylim <- c(floor(yrng[1]),ceiling(yrng[2])) # expanded to nearest integer #
} #
plot(In[ref,'Year'],In[ref,'Annual'],xlab="Year",ylab=ylabel[j],main=title[j],
col="Blue",type="l",ylim=ylim,yaxs="i") #
} #
#
if (sum(!is.na(In[ref,'Annual'])) > 10L) { # check if have enough data #
q <- zyp.trend.vector(In[ref,'Annual'],In[ref,'Year'],method='zhang') #
trend[j] <- q['trendp'] # extract trend over period to vector #
pval[j] <- q['sig'] # extract p-value to vector #
# lq <- lm(Annual ~ Year,In,ref) # Linear best fit #
# q <- coef(summary(lq)) # Extract coefficients #
# trend[j] <- q[2,1]*nyrs # trend per year times number of years #
# pval[j] <- q[2,4] #
if (j <= 12L) { #
abline(q[c('intercept','trend')],col="Red",lty=2) # plot trend line on graph #
# abline(q[,1],col="Red",lty=2) # plot trend line on graph #
} #
} #
} # Ends loop for each element #
dev.off() # close PDF file #
#
X[i,sq] <- round(trend,2) # put trend into data frame #
X[i,sq+1] <- ifelse(is.na(pval),"?",ifelse(pval < 0.05,"y","n")) # signif flag #
#
# Calculated the trend and significance of each element #
###################################################################################
} # Ends loop for stations
###################################################################################
# Write trends and significance - including absolute monthly values #
# Consider separating as per Region Averages (and now Count Records) #
#
filet <- file.path(folder,paste("Trends_Annual_stn_",nyb,"_",nye,".csv",sep="")) #
write.csv(X,file=filet,row.names=FALSE,na="-99.90") # write data frame #
#
# Finished writing trends. Done! #
###################################################################################
cat("Mapping by station",fill=TRUE)
# Add station lat/lon back into trends
# Note that have enforced -180 <= longitudes <= 180 in modified station file
# to work generally with the maps package and the Greenwich Meridian
# But this means tackling the Dateline - can use the "world2" map here
Dt <- cbind(files[1:nstn,],X[,-1])
lonrng <- range(Dt[,3])
if (lonrng[2] - lonrng[1] > 180) {
ind <- which(Dt[,3] < 0) # should be at least 1
Dt[ind,3] <- Dt[ind,3] + 360 # needed since world2 map has range 0-360
xlim <- c(ceiling(lonrng[2]),floor(lonrng[1])+360)
wmap <- "world2"
} else {
xlim <- c(floor(lonrng[1]),ceiling(lonrng[2]))
wmap <- "world"
}
latrng <- range(Dt[,2])
ylim <- c(floor(latrng[1]),ceiling(latrng[2]))
# Names of maps produced - why JPEG and not PDF as per other graphs?
mapf <- file.path(dirs[2],paste("Map_",c("Stns",ele2),"_Annual.jpg",sep=""))
# Prec normalised anomaly (2) is %
uts <- c("Deg C","%","no units","mm",rep(c("%","Deg C",NA),each=4))
###################################################################################
# Functions for size colour and direction of triangles for mapping #
# These are all vectorised, so should be able to call once for each graph #
# Set up size divisions and colours for triangles by index #
# Missing values => NA => no triangle #
#
multiple <- c(1,10,1,10,3,3,3,3,2,2,2,2,1,50,50,10) #
colup <- c("red",rep("darkgreen",3),rep("red",9),rep("darkgreen",3)) #
coldown <- ifelse(colup == "red","blue","tan4") # consistently match colup #
#
Size <- function(x) # input: trend, output: size with limit (TEST) #
{pmin(trunc(abs(x)/multiple[i])+1,3)} #
#
Type <- function(x) # input: trend, output: symbol triangle up/down #
{ifelse(x >= 0,24,25)} #
#
Colour <- function(x,i) # input: trend, index, output colour #
{ifelse(x >= 0,colup[i],coldown[i])} #
#
Back <- function(x,y,i) # input: trend, signif, index, output: fill if signif #
{ifelse(y == 'y',Colour(x,i),"white")} #
#
# Functions for mapping! #
###################################################################################
###################################################################################
# Map Stations #
# Now using the standard resolution world map instead of high resolution #
#
cat("\t Stns",fill=TRUE) # write update in terminal #
jpeg(mapf[1]) #
map(wmap,xlim=xlim,ylim=ylim,col="gray80",fill=TRUE) # grey fill #
points(Dt[,3],Dt[,2],pch=16,col="Red",cex=1.0) # add dots of lat/long #
title(main="Stn locations") # map title #
dev.off() # close map #
#
for (i in 1:12) { #
cat("\t",ele2[i],fill=TRUE) # write update in terminal #
S <- Size(Dt[,2*i+2]) # size of all triangles #
Ty <- Type(Dt[,2*i+2]) # type (up/down) of all triangles #
Cr <- Colour(Dt[,2*i+2],i) # colour of all triangles #
Bg <- Back(Dt[,2*i+2],Dt[,2*i+3],i) # fill of all triangles (if signif) #
#
jpeg(mapf[i+1]) #
map(wmap,xlim=xlim,ylim=ylim,col="gray80",fill=TRUE) #
title(main=title[i]) #
points(Dt[,3],Dt[,2],pch=Ty,col=Cr,bg=Bg,cex=0.4*S+0.9,lwd=2) #
#
# Legend only shows the filled triangles #
S <- 0.4*c(2:0,0:2)+0.9 #
Cr <- rep(c(colup[i],coldown[i]),each=3) #
Ty <- rep(24:25,each=3) #
n1 <- c(Inf,2:-2) #
n2 <- c(2:-2,-Inf) #
lT <- paste(n2*multiple[i],"< x <",n1*multiple[i]) #
# Smaller/larger cex = 0.8/1.0: #
legend("topleft",legend=lT,title=uts[i],pch=Ty,col=Cr,pt.cex=S,cex=0.8,pt.bg=Cr)#
dev.off() # close map #
} #
#
# Finished mapping stations! #
###################################################################################
cat("Analysis by region",fill=TRUE)
# Element relating to NCMP Index
ele <- c("TMA","PrAn","PrA","SPI","TX90p","TN90p","TX10p","TN10p")
# Names of input regional average files
folderz <- "A4_Region_Average"
filez <- file.path(folderz,paste("NCMP",ele,"Region_Avg.csv",sep="_"))
# Names of output regional average graphs and ranks
namep <- file.path(dirs[3],paste("NCMP_",ele,"_Annual.pdf",sep=""))
namex <- file.path(dirs[4],paste("NCMP_",ele,"_Annual_Rank.csv",sep=""))
# Titles of graphs
title <- c("Mean Temp Anom","Prec Anom Norm","Prec Anom","Standardized Prec Index",
"Warm Days","Warm Nights","Cold Days","Cold Nights")
# Range for graphs - need to allow the Prec Anom (at least) to vary
ymin <- c(-4,-50,NA,-2,rep( 0,4))
ymax <- c( 4, 50,NA, 2,rep(25,4))
ylabel <- c("deg C","%","mm","no units",rep("%",4)) # Labels for y axis
# Output data.frame for trend and significance
# Changed column names for consistency with other output tables
X <- data.frame(title,
NA_real_,"?",NA_real_,"?",NA_real_,"?",NA_real_,"?",
NA_real_,"?",NA_real_,"?",NA_real_,"?",NA_real_,"?",
NA_real_,"?",NA_real_,"?",NA_real_,"?",NA_real_,"?",
NA_real_,"?",stringsAsFactors=FALSE)
cnames <- c(month.name,"Annual")
names(X) <- c("NCMP",as.vector(t(matrix(c(cnames,paste(cnames,"S",sep="_")),ncol=2))))
sq <- seq(from=2,to=26,by=2) # sequence of even numbers
for (i in 1:8) { # Loop for elements
cat("\t",ele[i],fill=TRUE)
###################################################################################
# Calculations for trends by month #
# Not checking that have long enough period to estimate the trend #
# Is there much difference between ZYP trends and linear least squares regression?#
#
trend <- rep(NA,13) # empty vector to fill with trends #
pval <- rep(NA,13) # empty vector to fill with p-values #
#
In <- read.csv(file=filez[i],header=TRUE,na.strings="-99.9",check.names=FALSE) #
for (j in 1:13) { # Loop through months and annual #
ref <- which(In[,'Year'] >= nyb & In[,'Year'] <= nye & In[,'Month'] == j) #
q <- zyp.trend.vector(In[ref,'Index'],In[ref,'Year'],method="zhang") #
trend[j] <- q['trendp'] # extract trend over period to vector #
pval[j] <- q['sig'] # extract p-value to vector #
# lq <- lm(Index ~ Year,In,ref) # Linear best fit #
# q <- coef(summary(lq)) # Extract coefficients #
# trend[j] <- q[2,1]*nyrs # trend per year times number of years #
# pval[j] <- q[2,4] #
} #
#
X[i,sq] <- round(trend,2) # put trend into data frame #
X[i,sq+1] <- ifelse(pval < 0.05,"y","n") # put signif flag into data frame #
#
# Monthly trends calculated and written. Done! #
###################################################################################
###################################################################################
# Examine annual values to graph and rank. #
# These are preserved as the last index in the loop (j == 13) #
#
pdf(namep[i]) # Open plot #
if (!is.na(ymin[i])) { #
ylim <- c(ymin[i],ymax[i]) #
} else { #
yrng <- range(In[ref,'Index'],na.rm=TRUE) #
ylim <- c(floor(yrng[1]),ceiling(yrng[2])) #
} #
plot(In[ref,'Year'],In[ref,'Index'],type="l",col="Blue",ylim=ylim,yaxs="i", #
xlab="Year",ylab=ylabel[i],main=title[i],las=1) #
#
abline(q[c("intercept","trend")],col="Red",lty=2) # Add trend line #
# abline(q[,1],col="Red",lty=2) #
dev.off() # Close plot #
#
Rank <- order(In[ref,"Index"],decreasing=TRUE) # missing values are put last #
In[ref,"Index"] <- round(In[ref,"Index"],3) # round to 3dp for writing to file #
write.csv(In[ref[Rank],-2],file=namex[i],row.names=FALSE,na="-99.900") #
#
# Finished graphing/ranking annual values. Done! #
###################################################################################
} # End element loop
###################################################################################
# Write regional average trends and significance with months in columns #
#
filet <- file.path(folder,paste("Trends_Region_",nyb,"_",nye,".csv",sep="")) #
write.csv(X,file=filet,row.names=FALSE,na="-99.90") # write trends #
#
# Finished writing trends. Done! #
###################################################################################
cat("Calculations and maps done!",fill=TRUE)
|
c84ad19879f82107acd9a2f71dc82a8af0adf802
|
ea967eeccfccc59acc2a79793826049f5ed7bc91
|
/man/chac.Rd
|
a56477640afe9390bcf0b00e7d3f7fe827be1e96
|
[] |
no_license
|
pneuvial/adjclust
|
0d9cffb36b3313defec73b783e01ef68f059fe58
|
e3532a2ba23f54572fbbb8b81e31b29a7b7708e5
|
refs/heads/develop
| 2023-05-07T15:16:27.161557
| 2023-04-26T07:24:06
| 2023-04-26T07:24:06
| 61,875,551
| 15
| 17
| null | 2023-04-28T06:44:33
| 2016-06-24T10:02:56
|
R
|
UTF-8
|
R
| false
| true
| 4,905
|
rd
|
chac.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chac.R
\name{chac}
\alias{chac}
\alias{as.hclust.chac}
\alias{print.chac}
\alias{head.chac}
\alias{summary.chac}
\alias{plot.chac}
\alias{diagnose}
\alias{diagnose.chac}
\alias{correct}
\alias{correct.chac}
\alias{cutree_chac}
\alias{cuttree_chac}
\title{Class chac}
\usage{
\method{as.hclust}{chac}(x, ...)
\method{print}{chac}(x, ...)
\method{head}{chac}(x, ...)
\method{summary}{chac}(object, ...)
\method{plot}{chac}(
x,
y,
...,
mode = c("standard", "corrected", "total-disp", "within-disp", "average-disp"),
nodeLabel = FALSE
)
diagnose(x, graph = TRUE, verbose = TRUE)
correct(x)
cutree_chac(tree, k = NULL, h = NULL)
}
\arguments{
\item{x, object, tree}{an object of class 'chac'}
\item{...}{for \code{\link{plot}}, arguments passed to the function
\code{\link{plot.dendrogram}}. Default values for \code{type} and
\code{leaflab} are respectively set to \code{"triangle"} and \code{"none"}}
\item{y}{not used}
\item{mode}{type of dendrogram to plot (see Details). Default to
\code{"standard"}}
\item{nodeLabel}{(logical) whether the order of merging has to be displayed
or not. \code{nodeLabel=TRUE} prints orders of fusion at corresponding
nodes. Default to \code{FALSE}}
\item{graph}{(logical) whether the diagnostic plot has to be displayed or
not. Default to \code{TRUE}}
\item{verbose}{(logical) whether to print a summary of the result or not.
Default to \code{TRUE}}
\item{k}{an integer scalar or vector with the desired number of groups}
\item{h}{numeric scalar or vector with heights where the tree should be cut.
Only available when the heights are increasing}
}
\value{
The function \code{plot.chac} displays the dendrogram and
additionally invisibly returns an object of class
\code{\link[stats]{dendrogram}} with heights as specified by the user through
the option \code{mode}.
\code{\link{diagnose}} invisibly exports a data frame with the
numbers of decreasing merges described by the labels of the clusters being
merged at this step and at the previous one, as well as the corresponding
merge heights.
The function \code{\link{correct}} returns a \code{chac} objects with
modified heights so as they are increasing. The new heights are calculated in
an way identical to the option \code{mode = "corrected"} of the function
\code{plot.chac} (see Details). In addition, the \code{chac} object has its
field \code{method} modified from \code{adjClust} to
\code{adjClust-modified}.
The function \code{\link{cutree_chac}} returns the clustering with
\code{k} groups or with the groups obtained by cutting the tree at height
\code{h}. If the heights are not increasing, the cutting of the tree is based
on the corrected heights as provided by the function \code{correct}.
}
\description{
S3 class for Constrained Hierarchical Agglomerative Clustering results
}
\details{
Methods for class 'chac'
When \code{\link{plot.chac}} is called with
\code{mode = "standard"}, the standard dendrogram is plotted, even though,
due to contingency constrains, some branches are reversed (decreasing
merges). When \code{\link{plot.chac}} is called with
\code{mode = "corrected"}, a correction is applied to original heights so as
to have only non decreasing merges). It does not change the result of the
clustering, only the look of the dendrogram for easier interpretation.\cr\cr
Other modes are provided that correspond to different alternatives
described in Grimm (1987): \itemize{
\item in \code{mode = "within-disp"}, heights correspond to within-cluster
dispersion, \emph{i.e.}, for a corresponding cluster, its height is
\deqn{I(C) = \sum_{i \in C} d(i,g_C)} where \eqn{d} is the dissimilarity
used to cluster objects and \eqn{g_C} is the center of gravity of cluster
\eqn{C}. In this case, heights are always non decreasing;
\item in \code{mode = "total-disp"}, heights correspond to the total
within-cluster dispersion. It is obtained from \code{mode = "standard"} by
the cumulative sum of its heights. In this case, heights are always
non decreasing;
\item in \code{mode = "average-disp"}, heights correspond to the
within-cluster dispersion divided by the cluster size. In this case, there
is no guaranty that the heights are non decreasing. When reversals are
detected, a warning is printed to advice the user to change the mode of the
representation.}
Grimm (1987) indicates that heights as provided by
\code{mode = "within-disp"} are highly dependent on cluster sizes and that
the most advisable representation is the one provided by
\code{mode = "total-disp"}. Further details are provided in the vignette
"Notes on CHAC implementation in adjclust".
}
\references{
{ Grimm, E.C. (1987) CONISS: a fortran 77 program for
stratigraphically constrained analysis by the method of incremental sum of
squares. \emph{Computer & Geosciences}, \strong{13}(1), 13-35. }
}
|
a80fb30e56433ab172279fc9f4f60d740cdaec02
|
d119af4b62debd9019a54971ff19ff97560001da
|
/base_codes/allele_distribution_effects.R
|
91a20a21c5b7b4e318808bb4ff9d7f53d8ea4645
|
[] |
no_license
|
kenoll/U19-Ab
|
3f438a45bc551f0f3a02475023c31ee3a98985b7
|
a48446d8916f99025e743d121d373fe295869734
|
refs/heads/master
| 2021-01-22T04:09:33.558744
| 2017-10-12T17:59:09
| 2017-10-12T17:59:09
| 81,491,368
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,469
|
r
|
allele_distribution_effects.R
|
#2016_11_23 updated to used average scores that encompass recombinations within locus
setwd("~/Dropbox/Heise/ELISA Antibody/")
chrom=read.csv("qtls/d10/snps/chr5_68-75_scores.csv")
##### strain averages #####
dat=dat.10
Map.dat=summaryBy(IgG1+IgG2ac+IgG2b+IgG3+IgM+TotalG ~
RIX + day, data=dat, FUN=mean, na.rm=T)
colnames(Map.dat)[3:8] = gsub(".mean","",colnames(Map.dat[3:8]))
Map.dat$RIX=as.character(Map.dat$RIX)
RIX_sep <- data.frame(do.call("rbind", strsplit(Map.dat$RIX,"x")))
colnames(RIX_sep)[1:2]=c("dam","sire")
Map.dat=cbind(RIX_sep,Map.dat)
chrom=chrom[c(1,length(chrom))]
colnames(chrom)[1]="dam"
Map.dat=merge(Map.dat,chrom)
colnames(Map.dat)[11]="dam.chrom"
colnames(chrom)[1]="sire"
Map.dat=merge(Map.dat,chrom)
colnames(Map.dat)[12]="sire.chrom"
Map.dat=Map.dat[c(1,12,2,11,3:10)]
Map.dat$additive.status=rowSums(Map.dat[c("dam.chrom","sire.chrom")])
Map.dat$additive.status=as.numeric(Map.dat$additive.status)
hist(Map.dat$additive.status,main=paste("Distribution of Founder Alleles at Chr5 QTL"),
xlab=paste("Allele Score at Chr5 QTL"),col="moccasin")
# png("qtls/d10/chr5_distribution.png",width=500,height=400)
#
# hist(Map.dat$additive.status,main=paste("Distribution of Founder Alleles at Chr5 QTL"),
# xlab=paste("Allele Score at Chr5 QTL"),col="moccasin")
#
# dev.off()
Map.dat$additive.status=as.character(Map.dat$additive.status)
Map.dat$additive.status=as.factor(Map.dat$additive.status)
ggplot(Map.dat, aes(additive.status, IgM))+labs(x="Allele Score at Chr5 QTL",y="Log10 IgM AUC")+
geom_jitter(width=0.5,aes(colour = additive.status))+theme_minimal()+theme(legend.position='none')
ggsave("qtls/d10/chrom_allele_igm_strainavg.png")
#####mapping with an allele effect as a covariate
# library(doBy)
# library(DOQTL)
# load("~/Dropbox/Heise/ELISA Antibody/R codes/CCRIXb38F.Rdata")
# load(url("ftp://ftp.jax.org/MUGA/MM_snps.Rdata"))
#
# model.probs<-model.probs+(1e-20)
# K=kinship.probs(model.probs)
Map.dat$Sex="F"
Map.dat$RIX=as.factor(Map.dat$RIX)
Map.dat$additive.status=as.character(Map.dat$additive.status)
Map.dat$additive.status=as.numeric(Map.dat$additive.status)
row.names(Map.dat)<-Map.dat$RIX
covar = data.frame(sex=as.numeric(Map.dat$Sex == "F"),chrom=Map.dat$additive.status)
rownames(covar)=rownames(Map.dat)
png(file.path(paste("qtls/d",day,sep=""),paste("qtl_IgM_chromcovariate_2_d",day.list[[k]],".png",sep="")),width=1000,height=500)
par(mfrow = c(2,3))
for(i in 1:6)
{
pheno=abs[i]
qtl=scanone(pheno=Map.dat, pheno.col=pheno, addcovar=covar, probs=model.probs, K=K, snps=MM_snps)
plot(qtl,main=paste("Day",day.list[[k]],pheno,"w/ chrom as covariate",sep=" "))
# saveRDS(qtl,file.path(paste("qtls/d",day,sep=""),paste("qtl_scan_d",day,"_",abs[i],".rds",sep="")))
# save(qtl,file=file.path(paste("qtls/d",day,sep=""),paste("qtl_scan_d",day,"_",abs[i],".RData",sep="")))
}
dev.off()
}
qtl.chr=subset(qtl$lod$A,qtl$lod$A$chr==5)
qtl.chr.sub=subset(qtl.chr,qtl.chr$pos>63 & qtl.chr$pos<75)
ggplot(qtl.chr.sub, aes(pos, lod))+geom_point()+theme_minimal()
#### individual mice ####
setwd("~/Dropbox/Heise/ELISA Antibody/")
chrom=read.csv("qtls/d10/chr5_status_curated.csv")
dat=dat.10
dat$RIX=as.character(dat$RIX)
RIX_sep <- data.frame(do.call("rbind", strsplit(dat$RIX,"x")))
colnames(RIX_sep)[1:2]=c("dam","sire")
dat=cbind(RIX_sep,dat)
chrom=chrom[c(1,8)]
colnames(chrom)[1]="dam"
dat=merge(dat,chrom)
colnames(dat)[(length(dat))]="dam.chrom"
colnames(chrom)[1]="sire"
dat=merge(dat,chrom)
colnames(dat)[(length(dat))]="sire.chrom"
dat=dat[c(1,14,2,13,3:11)]
dat$additive.status=rowSums(dat[c("dam.chrom","sire.chrom")])
dat$additive.status=as.numeric(dat$additive.status)
hist(dat$additive.status,main=paste("Distribution of Founder Alleles at Chr5 QTL"),
xlab=paste("Allele Score at Chr5 QTL"),col="moccasin")
# png("qtls/d10/chr5_distribution.png",width=500,height=400)
#
# hist(dat$additive.status,main=paste("Distribution of Founder Alleles at Chr5 QTL"),
# xlab=paste("Allele Score at Chr5 QTL"),col="moccasin")
#
# dev.off()
dat$additive.status=as.character(dat$additive.status)
dat$additive.status=as.factor(dat$additive.status)
ggplot(dat, aes(additive.status, IgG3))+labs(x="Allele Score at Chr5 QTL",y="Log10 IgM AUC")+
geom_jitter(width=0.5,aes(colour = additive.status))+theme_minimal()+theme(legend.position='none')
ggsave("qtls/d10/chrom_allele_igm.png")
|
8390d43655a8d95a945a4df417c426900a9ba033
|
427db53b82601e0d8d344410c624a3c3e2797bbe
|
/R/Monty Hall.R
|
40907e870fbcc194ba7d5ce0b5b84c45f29ed539
|
[] |
no_license
|
idjs2/montyhall
|
f053da5204a9e3d93050775f665b91df62ae52ac
|
ee4ec14f0c1c22b116f6af5b76717eb9785b04c1
|
refs/heads/master
| 2022-07-09T05:45:48.275628
| 2020-05-10T10:15:01
| 2020-05-10T10:15:01
| 262,764,566
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,612
|
r
|
Monty Hall.R
|
#' 몬티홀 딜레마함수 montyhall(k)
#'
#' 함수 설명
#' 몬티홀문제를 3가지 경우로 나누어 어떠한 선택에도 결론(선택을 바꾸는 것이 유리하다.)을 도출할 수 있다는 것을
#' 증명한 함수입니다.
#' 각 케이스의 첫째 행은 쇼의 주인공이 모르는 문 뒤의 상황입니다. (k는 주인공이 선택한 문의 번호입니다.k=1,2,3)
#' 두번째 행은 주인공의 선택(Choice)과 사회자가 열어서 보여준 염소(Open)의 위치입니다.
#'
#' @example
#' montyhall(1) 1번문을 선택한 경우
#' [1] "CASE1"
#' [1] "염소" "염소" "스포츠카" (첫번째 선택 전)
#' [1] "Choice" "Open" "스포츠카" (첫번째 선택 후)
#' [1] "CASE2"
#' [1] "염소" "스포츠카" "염소" (첫번째 선택 전)
#' [1] "Choice" "스포츠카" "Open" (첫번째 선택 후)
#' [1] "CASE3"
#' [1] "스포츠카" "염소" "염소" (첫번째 선택 전)
#' [1] "Choice" "Open" "염소" (첫번째 선택 후)
#' [1] "2/3의 확률. 당신은 선택을 바꾸는 것이 유리하다."
#'
#'
#'
#' 함수 코드
montyhall <- function(k){
quiz <- c("염소","염소","스포츠카")
for (i in 1:3){
t <- quiz
t[k] <- "Choice"
for(j in 1:3){
if (t[j] == "염소"){
t[j] <- "Open"
print(paste0("CASE",i))
print(paste0(quiz))
print(paste0(t))
break
}
}
q <- quiz[3]
quiz[3] <- quiz[1]
quiz[1] <- quiz[2]
quiz[2] <- q
}
print("2/3의 확률. 당신은 선택을 바꾸는 것이 유리하다.")
}
|
775117988f0d64053d337f76b87b80a2bf68193e
|
a1e3f742d80a225e9a2a35e8e88b3054f5408037
|
/R/boot.gomp.R
|
5907670e1b3af38ef30e326c3f56556396abbd31
|
[] |
no_license
|
cran/MXM
|
7590471ea7ed05944f39bf542c41a07dc831d34f
|
46a61706172ba81272b80abf25b862c38d580d76
|
refs/heads/master
| 2022-09-12T12:14:29.564720
| 2022-08-25T07:52:40
| 2022-08-25T07:52:40
| 19,706,881
| 0
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,059
|
r
|
boot.gomp.R
|
boot.gomp <- function(target, dataset, tol = qchisq(0.95, 1), test = "testIndLogistic", method = "ar2", B = 500, ncores = 1) {
runtime <- proc.time()
sel <- NULL
n <- dim(dataset)[1]
if ( !is.matrix(target) ) dim(target) <- c(n, 1)
if ( ncores <= 1 ) {
for (i in 1:B) {
ina <- sample(n, n, replace = TRUE)
sel <- c(sel, gomp(target[ina, ], dataset[ina, ], tol = tol, test = test, method = method)$res[-1, 1])
}
} else {
cl <- makePSOCKcluster(ncores)
registerDoParallel(cl)
mod <- foreach(i = 1:B, .packages = "MXM", .export = "gomp") %dopar% {
ina <- sample(n, n, replace = TRUE)
sel <- MXM::gomp(target[ina, ], dataset[ina, ], tol = tol, test = test, method = method)$res[-1, 1]
return( sel )
}
stopCluster(cl)
sel <- unlist(mod)
}
res <- cbind(unique(sel), Rfast::Table(sel)/B )
colnames(res) <- c("Variable", "Selection proportion")
runtime <- proc.time() - runtime
list(runtime = runtime, res = res)
}
|
a80376cd9b0fe46f068ff452e7ecbd596fff2388
|
9a5b39ad69a9e79ef101711aa20f892b4d50316f
|
/man/modelHomotypic.Rd
|
e474d97abf5f324ef417e7bf2212cbd2601e7f2f
|
[] |
no_license
|
chris-mcginnis-ucsf/DoubletFinder
|
207dc938574b2647d5dc73a47c520fc9eb782548
|
1b1d4e2d7f893a3552d9f8f791ab868ee4c782e6
|
refs/heads/master
| 2023-09-01T19:51:25.819412
| 2023-08-18T22:26:49
| 2023-08-18T22:26:49
| 138,660,553
| 288
| 76
| null | 2023-08-18T22:26:50
| 2018-06-25T23:32:45
|
R
|
UTF-8
|
R
| false
| false
| 1,344
|
rd
|
modelHomotypic.Rd
|
\name{modelHomotypic}
\alias{modelHomotypic}
\title{modelHomotypic}
\description{
Leverages user-provided cell annotations to model the proportion of homotypic doublets. Building on the assumption that literature-supported annotations reflect real transcriptional divergence, homotypic doublet proportions are modeled as the sum of squared annotation frequencies.
}
\usage{modelHomotypic(annotations)}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{annotations}{ An nCell-length character vector of annotations.
}
}
\details{
}
\value{ Numeric proportion of homotypic doublets. }
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
## Initial run, nExp set to Poisson loading estimate (e.g., 913 total doublet predictions)
nExp_poi <- round(0.15*length(seu@cell.names))
seu <- doubletFinder(seu, pN = 0.25, pK = 0.01, nExp = nExp_poi, reuse.pANN = FALSE)
## With homotypic adjustment
homotypic.prop <- modelHomotypic(annotations)
nExp_poi.adj <- round(nExp_poi*(1-homotypic.prop))
seu <- doubletFinder(seu, pN = 0.25, pK = 0.01, nExp = nExp_poi.adj, reuse.pANN = "pANN_0.25_0.01_913")
}
|
06155c366e9b60df5c4f6560342dc00ce3614142
|
13ab7466ef1fe4d688eaea3668ea79e549ca5467
|
/src/FeatureSelection.R
|
ec307169bc6e1e29447a5a1aad1797f79ca3e426
|
[] |
no_license
|
mina2796/Ensayo
|
f038346d68ef65235d6e674a097da32d55bce35e
|
6a4eb6156f9853a9647e8f36cf9a3b50bbecdc39
|
refs/heads/master
| 2021-07-24T14:20:42.836051
| 2017-11-06T04:53:26
| 2017-11-06T04:53:26
| 109,647,183
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,619
|
r
|
FeatureSelection.R
|
library(mlbench)
library(caret)
library(reshape2)
set.seed(7)
# function to fing High correlated variables
mosthighlycorrelated <- function(df, numtoreport){
cormatrix <- cor(df)
upperTriangle <- upper.tri(cormatrix, diag = F)
cor.upperTriangle <- cormatrix
cor.upperTriangle[!upperTriangle] <- NA
cor.melted <- na.omit(melt(cor.upperTriangle, value.name = "CorrelationCoef"))
colnames(cor.melted) <- c("Firts Variable", "Second Variable", "Correlation")
head(cor.melted[order(abs(cor.melted$Correlation), decreasing=T),], n=numtoreport)
}
#--------------------- Functions for RankImportance ------------------
# Function to apply the Rank Feature by Importance
rfifunction <- function (df, control, indepVariable = character){
longdata <- ncol(df)
control <- trainControl(method = "boot", number = 10, repeats = 3) # prepare training set
f <- as.formula(paste(indepVariable, "~", "."))
model <- train(f, data = df, method = "knn", preProcess = "scale", trControl = control) # define model
importance <- varImp(model, scale = FALSE)
return(importance)
}
#---------------------- Functions for RFE ----------------------------
# Function to apply the rfe function on a dataframe
rfefunction <- function (df){
longdata <- ncol(df)
newControl <- rfeControl(functions=caretFuncs, method = "boot", number = 3) # define the control
rfe ( df[,1:longdata], df[,longdata], sizes = c(1:longdata), rfeControl = newControl) # apply the rfe function for feature selection.
}
# Function to get the best variables of a rfe Object
rfebestvariables <- function (rfeObject, numOfVariables){
selectedVars <- rfeObject$variables
bestvar <- rfeObject$control$functions$selectVar(selectedVars, numOfVariables)
return (bestvar)
}
#------------------------------------------------
FeatureSelection <- function(data, type = c("HighCorr", "RankImp", "RFE", "Wrapper"), numvarstoreport, indepVariable = character)
{
if (!is.data.frame(data) && !is.matrix(data) && !is.numeric(data)) stop(warning('Input must be one of classes \"vector\", \"data frame\" or \"matrix\"'))
type = match.arg(type)
if (type == "HighCorr") TestName = "HighCorrelated"
if (type == "RankImp") TestName = "RankImportance"
if (type == "RFE") TestName = "Recursive Feature Elimination"
if (type == "Wrapper") TestName = "Wrapper method"
if (is.data.frame(data) || is.matrix(data)){
varNames = colnames(data)
dims = dim(data)
if (type == "HighCorr"){
require(caret)
n <- numvarstoreport
highlyCorrelated <- mosthighlycorrelated(data, n)
SelectedFeatures <- highlyCorrelated
c <- cor(data)
f <- findCorrelation(c, cutoff = 0.6) # It finds columns with correlation >= 0.6
new_data <- data[, -c(f)]
}else
if (type == "RankImp"){
#rankImp <- rfifunction(data, indepVariable = indepVariable )
#SelectedFeatures <- rankImp
require(mlr)
modeled.task <- makeRegrTask(data=data, target=indepVariable)
fv <- generateFilterValuesData(modeled.task, method="linear.correlation")
ff <- fv$data$name[fv$data$linear.correlation > 0.3] # Esto se usa para dfN21
ff <- append(indepVariable, ff)
new_data <- data[ff]
}else
if (type == "RFE"){
m <- numvarstoreport
recursiveFE <- rfefunction(data)
predictors(recursiveFE)
bestVariables <- as.data.frame(rfebestvariables(recursiveFE, m ))
names(bestVariables) <- c("Variable")
SelectedFeatures <- bestVariables
new_data <- data[, c(rfebestvariables(recursiveFE, m))]
}else
if (type == "Wrapper"){
require (mlr)
ctrl = makeFeatSelControlRandom(maxit = 20L)
#ctrl = makeFeatSelControlSequential(method = "sfs", alpha = 0.02) # Specify the search strategy
modeled.task <- makeRegrTask(data=data, target=indepVariable) # fir a saimple regression model
rdesc = makeResampleDesc("CV", iters = 10)
sfeats = selectFeatures(learner = "regr.lm", task = modeled.task, resampling = rdesc, control = ctrl,
show.info = FALSE) # Select features
cc <- append(indepVariable, sfeats$x)
SelectedFeatures <- sfeats
new_data <- data[, cc]
}
}
{
return(new_data)
#return (list(Features=SelectedFeatures, ReducedDF = new_data))
}
}
#--------------------------
|
e54b217a195ba89ed6f1a8705cd2212eadd17906
|
2e9faf2f73476db35ee7a72359a8979b79a86605
|
/R/tiplength.R
|
366b27e73efa5b8e3c38ec00dee4594086b106a3
|
[] |
no_license
|
Hackout3/saphy
|
0450efe1014b5e1d850850709ce77adb6347e923
|
d588c1788a29cd9f18441f2c9ffa095b2b099d4d
|
refs/heads/master
| 2021-01-19T07:09:56.358549
| 2016-06-23T22:42:08
| 2016-06-23T22:42:08
| 61,653,942
| 4
| 6
| null | 2016-06-24T18:52:00
| 2016-06-21T17:36:19
|
R
|
UTF-8
|
R
| false
| false
| 674
|
r
|
tiplength.R
|
#' Extracts the length of a tip from a tree
#'
#' \code{tiplength} returns the length of a tip of a tree, given either (a) the tip name or
#' (b) the index of the tip (in the order of the tip labels in the tree).
#'
#' @param tree a phylogenetic tree (as a \code{phylo} object)
#' @param tipname the tip name, as a character string, or a numeric index
#' @return The tip length (as a \code{double}).
#' @author Simon Frost (\email{sdwfrost@@gmail.com})
#' @export
tiplength <- function(tree,tipname){
if(is.character(tipname)){
idx <- match(tipname,tree$tip.label)
}else{
idx <- tipname
}
idx2 <- match(idx,tree$edge[,2])
return(tree$edge.length[idx2])
}
|
11d4b5bfcf158fb9166550778428c529fac8889f
|
28d40f7881898e499127d49c573028b93ba512f6
|
/load_data.R
|
b2cc85447c0285a7fae32a20fe70de5d82f1eee4
|
[] |
no_license
|
dwolffram/covid19-ensembles-retrospective
|
1b55e016677fdbaa5664f18b0f2f7799cdf2553b
|
b9ed03cb541b6d27f4b548e8dac8e04871b4727a
|
refs/heads/main
| 2023-09-04T06:00:04.633212
| 2021-11-04T15:54:28
| 2021-11-04T15:54:28
| 402,688,755
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,599
|
r
|
load_data.R
|
library(tidyverse)
Sys.setlocale("LC_ALL", "C")
next_monday <- Vectorize(function(date){
date + (0:6)[weekdays(date + (0:6)) == "Monday"]
}
)
load_truth <- function(target="incident_cases", as_of){
truth <- read.csv(paste0("https://raw.githubusercontent.com/dwolffram/covid19-versioned-data/main/data/",
target, "/jhu_", target, "_as_of_", as_of, ".csv"),
colClasses = c(location="character", date ="Date"))
}
add_truth <- function(df, as_of){
target_dict = list("inc case" = "incident_cases",
"inc death" = "incident_deaths",
"cum death" = "cumulative_deaths")
df$merge_target <- str_sub(df$target, start=12)
targets <- unique(df$merge_target)
truth_df <- data.frame()
for (target in targets){
truth <- load_truth(target_dict[[target]], as_of) %>%
rename(truth = value) %>%
mutate(merge_target = target)
truth_df <- bind_rows(truth_df, truth)
}
df <- df %>%
left_join(truth_df, by=c("merge_target", "target_end_date"="date", "location")) %>%
select(- merge_target)
return(df)
}
# load data for models that require complete submissions (window size 4)
load_train_test <- function(forecast_date, national_level = FALSE){
df_train <- read_csv(paste0("data/", forecast_date, "_train.csv"),
col_types = cols(forecast_date = col_date(format = ""),
target = col_character(),
target_end_date = col_date(format = ""),
location = col_character(),
type = col_character(),
quantile = col_double(),
value = col_double())) %>%
filter(if (!national_level) location != "US" else TRUE) %>%
as.data.frame()
df_test <- read_csv(paste0("data/", forecast_date, "_test.csv"),
col_types = cols(forecast_date = col_date(format = ""),
target = col_character(),
target_end_date = col_date(format = ""),
location = col_character(),
type = col_character(),
quantile = col_double(),
value = col_double())) %>%
filter(if (!national_level) {location != "US"} else {location == "US"}) %>%
as.data.frame()
return(list(df_train=df_train, df_test=df_test))
}
load_data <- function(target = "cum death"){
df <- read_csv("data/df.csv.gz", col_types = cols(forecast_date = col_date(format = ""),
target = col_character(),
target_end_date = col_date(format = ""),
location = col_character(),
type = col_character(),
quantile = col_double(),
value = col_double()))
# add timezero: the next Monday after forecast_date (in case the submission was made before Monday)
df <- df %>%
nest_by(forecast_date) %>%
mutate(timezero = as.Date(next_monday(forecast_date), origin = "1970-01-01")) %>%
unnest(cols = c(data))
df <- df %>%
filter(target %in% paste(1:4, "wk ahead", !!target))
return(df)
}
# select train and test data for models that use the full history (with missing submissions)
train_test_split <- function(df, test_date, intersect = TRUE){
df_test <- df %>%
filter(timezero == test_date) %>%
as.data.frame()
df_train <- df %>%
filter(target_end_date < test_date,
model %in% unique(df_test$model)) %>%
as.data.frame()
if(intersect){
df_train <- df_train %>%
filter(model %in% unique(df_test$model))
df_test <- df_test %>%
filter(model %in% unique(df_train$model))
}
df_train <- add_truth(df_train, as_of=test_date)
df_test$truth <- NA
return(list(df_train=df_train, df_test=df_test))
}
df <- load_data()
dfs <- train_test_split(df, "2021-07-19")
train <- dfs$df_train
test <- dfs$df_test
sort(unique(train$model)) == sort(unique(test$model))
sort(unique(train$model))
sort(unique(test$model))
|
0370699f080ae63584d4b15ee18bde2e67accb1c
|
8cf4416f7e4c9016d85a616aaae3fbf0d48cf9a4
|
/r/Old/Sparrow20090715.r
|
616760d00030492c9c0ba8582e9e06db8ec26eed
|
[] |
no_license
|
willbmisled/MRB1
|
35f9bb4ef9279f55b1348b8b3fbda6543ddbc70d
|
af39fb697255df15ae41131d76c6fcf552a55a70
|
refs/heads/master
| 2020-07-20T08:43:00.460675
| 2017-06-14T14:09:38
| 2017-06-14T14:09:38
| 94,337,564
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,805
|
r
|
Sparrow20090715.r
|
rm(list=ls(all=T)) #clear workspace
# Read data-****Make Sure the Path Is Correct****
require(RODBC) #Package RODBC must be installed
con <- odbcConnectAccess("//AA.AD.EPA.GOV/ORD/NAR/USERS/EC2/wmilstea/Net MyDocuments/EPA/Data/Sparrow/MRB1Sparrow/MRB1Sparrow.mdb")
get <- sqlQuery(con, "
SELECT MRB1_WBIDLakes.WB_ID, tblJoinNLAID_WBID.NLA_ID, Round([AlbersAreaM],0) AS area, Round([AlbersAreaM]*[DEPTHMAX]/3,0) AS volume, tblSparrowLoads.OutflowM3_yr AS Outflow, tblNLA_AnalysisTeamData20090421.DEPTHMAX, [Area]/[Volume] AS z, [OutflowM3_yr]/[Volume] AS rho, 1/[rho] AS tau, tblNLA_WaterQualityData.NTL AS TN, [N_Load_kg_yr]*1000000/[AlbersAreaM] AS LN, 1000*[N_Conc_Load_mg_l] AS CN, tblNLA_WaterQualityData.PTL AS TP, [P_Load_kg_yr]*1000000/[AlbersAreaM] AS LP, 1000*[P_Conc_Load_mg_l] AS CP
FROM (((MRB1_WBIDLakes INNER JOIN tblJoinNLAID_WBID ON MRB1_WBIDLakes.WB_ID = tblJoinNLAID_WBID.WB_ID) INNER JOIN tblNLA_AnalysisTeamData20090421 ON tblJoinNLAID_WBID.NLA_ID = tblNLA_AnalysisTeamData20090421.SITEID) INNER JOIN tblSparrowLoads ON MRB1_WBIDLakes.WB_ID = tblSparrowLoads.WB_ID) INNER JOIN tblNLA_WaterQualityData ON (tblNLA_AnalysisTeamData20090421.VISITNO = tblNLA_WaterQualityData.VISIT_NO) AND (tblNLA_AnalysisTeamData20090421.SITEID = tblNLA_WaterQualityData.SITE_ID)
WHERE (((tblNLA_AnalysisTeamData20090421.VISITNO)=1))
")
MRB1<-data.frame(get)
close(con)
attach(MRB1)
logTP=log10(TP)
logCP=log10(CP)
plot(logCP, logTP)
summary(lm(logTP~logCP))
logTN=log10(TN)
logCN=log10(CN)
plot(logCN, logTN)
summary(lm(logTN~logCN))
par(mfrow=c(2,2))
#Canfield # Bachmann
sigma=0.162*((LP/z)**.458)
logPP=log10(LP/z*(sigma+rho))
plot(logPP, logTP)
title(main = "sigma=0.162*((LP/z)**.458) logPP=log10(LP/z*(sigma+rho))",
sub="Multiple R-squared: 0.1925, Adjusted R-squared: 0.1864")
summary(lm(logTP~logPP))
#Canfield # Bachmann P.416
sigma=((LP/z)/TP)-rho
logPP=log10(LP/z*(sigma+rho))
plot(logPP, logTP)
summary(lm(logTP~logPP))
#Welch & Jacoby Fig 7.1 P.180-Nitrogen logPN=log10(LN)/(1+(1.17*tau**.45))
#Estimate parameters
NitrogenParam <- nls(logTN ~ log10(LN)/(1+(beta1*tau**beta2)),
start=list(beta1 = 1.17, beta2 = .45), trace=T)
summary(NitrogenParam)
#Add parameter estimates to model
One=.51260
Two=-.07584
logPN=log10(LN)/(1+(One*tau**Two))
test=summary(lm(logTN~logPN))
plot(logPN, logTN, xlab="Sparrow Predicted Log Total Nitrogen", ylab="NLA Measured Log Total Nitrogen")
lines(fitted.values(NitrogenParam), logPN, lwd=2)
title(main = paste("log10(LN)/(1+(",One,"*HRT**", Two,"))"),
sub=paste('r-squared=',round(test$r.squared,4)))
test=summary(lm(logTN~logCN))
plot(logCN, logTN, xlab="Sparrow Predicted Log Nitrogen Concentration", ylab="NLA Measured Log Total Nitrogen")
title(main = "NLA TN vs. Sparrow Est. Outflow N Conc.",
sub=paste('r-squared=',round(test$r.squared,4)))
abline(test, lwd=2)
#Welch & Jacoby Fig 7.1 P.180-Phosphorus logPN=log10(LN)/(1+(1.17*tau**.45))
#Estimate parameters
PhosphorusParam <- nls(logTP ~ log10(LP)/(1+(beta1*tau**beta2)),
start=list(beta1 = 1.17, beta2 = .45), trace=T)
summary(PhosphorusParam)
#Add parameter estimates to model
One=2.02018
Two=.07598
logPP=log10(LP)/(1+(One*tau**Two))
test=summary(lm(logTP~logPP))
plot(logPP, logTP, xlab="Sparrow Predicted Log Total Phosphorus", ylab="NLA Measured Log Total Phosphorus")
lines(fitted.values(PhosphorusParam), logPP, lwd=2)
title(main = paste("log10(LP)/(1+(",One,"*HRT**",Two,"))"),
sub=paste('r-squared=',round(test$r.squared,4)))
test=summary(lm(logTP~logCP))
plot(logCP, logTP, xlab="Sparrow Predicted Log Phosphorus Concentration", ylab="NLA Measured Log Total Phosphorus")
title(main = "NLA TP vs. Sparrow Est. Outflow P Conc.",
sub=paste('r-squared=',round(test$r.squared,4)))
abline(test, lwd=2)
|
0e3f9a17a6e7258d5f414dd61d8ad0c40f6dc9e6
|
ec15073d91ea3d63bfd93d4fcb0ff1d963a0d251
|
/bayesian_methods/challenger.R
|
ad0fa8c246bab67b3e06adbe2b627d3fdd263256
|
[] |
no_license
|
nishkavijay/data-analysis
|
e56079649ab4798a6d49bcec16a559fccaf4a377
|
95d962b4fad31501b1aa5e6def13179417b7f270
|
refs/heads/master
| 2021-01-15T11:56:36.993456
| 2014-07-23T16:51:48
| 2014-07-23T16:51:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,277
|
r
|
challenger.R
|
# a.Prepare analysis of the type we did for Bioassay problem,
# with the covariate being the temperature and the response variable being
# #of primary O-rings in field joints with Erosion or blowby.
# There are 6 primary O-rings per shuttle.
# Challenger was launched at temperature of 31 degrees.
input<-function(LL=200){
oring<-data.frame(temp <- c(66,70,69,68,67,72,73,70,57,63,70,78,
67,53,67,75,70,81,76,79,75,76,58),
orings<-rep(6,length(temp)),
eorb<-c(0.5,1,0.5,0.5,0.5,0.5,0.5,0.5,1,1,1,
0.5,0.5,2,0.5,0.5,0.5,0.5,0.5,0.5,2,0.5,1),
freq<-eorb/6)
DD<-data.frame(y<-log((oring$freq)/(1- oring$freq)),x<-oring$temp)
estimates <- lm(y~x,data=DD)
alpha.hat <- summary(estimates)$coef[1,1]
std.alpha <- summary(estimates)$coef[1,2]
beta.hat <- summary(estimates)$coef[2,1]
std.beta <- summary(estimates)$coef[2,2]
alpha<-seq(-2,3,length=LL)
beta<-seq(-1,0,length=LL)
return(list(oring=oring,estimates=estimates,alpha=alpha,beta=beta))
}
log.post<-function(alpha,beta,data=DD$oring){
ldens<-0
for (i in 1:length(data$temp)){
theta <- 1/(1+exp(-alpha-beta*data$temp[i]))
ldens <- ldens + data$eorb[i]*log(theta) +
(data$orings[i]-data$eorb[i])*log(1-theta)}
return(ldens)
}
randomdraw2d<-function(size,x,y,prob){
if (sum(prob)!=1){
prob<-prob/sum(prob)
}
probx<-rowSums(prob)
xsampleindex<-sample(1:length(x),size=size,replace=TRUE,prob=probx)
ysampleindex<-rep(NA,size)
for (i in 1:size){
proby.x<-prob[xsampleindex[i],]
proby.x<-proby.x/sum(proby.x)
ysampleindex[i]<-sample(1:length(y),1,replace=TRUE,prob=proby.x)
}
sample2d<-cbind(x[xsampleindex],y[ysampleindex])
return(sample2d)
}
plot.joint.post=function(DD,log.post,drawsize){
contours=seq(.05,.95,.1)
logdens=outer(DD$alpha,DD$beta,log.post,data=DD$oring)
dens=exp(logdens-max(logdens))
contour(DD$alpha,DD$beta,dens,levels=contours,
xlab= "alpha", ylab="beta", ylim=c(-0.1,0))
points(randomdraw2d(drawsize,DD$alpha,DD$beta,prob= dens/sum(dens)),pch=".")
mtext("Posterior density",3,line=1,cex=1.2)
}
DD <- input(200)
prob <- outer(DD$alpha, DD$beta, log.post, DD$oring)
prob <- exp(prob)
prob <- prob/sum(prob)
pdraws <- randomdraw2d(10000,DD$alpha, DD$beta, prob)
# b.Compute the predictive distribution of an erosion or blowby at 31 degrees
theta31 <- 1/(1+exp(-(pdraws[,1]+pdraws[,2]*31)))
pdf("dist31.pdf")
hist(theta31,freq=FALSE,breaks=30, yaxt="n",
xlab="Predictive dist. at 31 degrees", ylab="", main="")
lines(density(theta31))
dev.off()
# c.Find distribution of x such that p(erosion or blowby)=.05 based on this data
ld50 <- -pdraws[,1]/pdraws[,2]
pdf("ld50.pdf")
hist(ld50,freq=FALSE,breaks=30, yaxt="n", xlab="LD50", ylab="", main="")
lines(density(ld50))
dev.off()
# d.What is the credible interval for the probability of erosion/blowby
# at 31 degrees Fahrenheit?
c2 <- quantile(theta31,0.975)
c1 <- quantile(theta31,0.025)
# 95% credible interval: [0.1267895, 0.6689950]
ci95 <- c(c1,c2)
# e.Find the predictive distribution of probability of a field joint O-ring being
# damaged (erosion/blowby) to a new space shuttle launch at 50 degrees
theta50 <- 1/(1+exp(-(pdraws[,1]+pdraws[,2]*50)))
pdf("dist50.pdf")
hist(theta50,freq=FALSE,breaks=30, yaxt="n",
xlab="Predictive dist. at 50 degrees", ylab="", main="")
lines(density(theta50))
dev.off()
# f.Suppose you were told that an expert has predicted that the space shuttle at
# temperature of 60 degrees will have has median .02 and 90 percentile of .08.
# How would you quantify this in your prior? How would your posterior change?
log.post.betaprior<-function(alpha,beta,data=DD$oring){
params <- beta.select(list(p=.5,x=0.02),list(p=.9,x=0.08))
ldens <- (alpha+60*beta)*(params[1]-1)+
(2-params[1]-params[2])*log(1+exp(alpha+60*beta))
for (i in 1:length(data$temp)){
theta <- 1/(1+exp(-alpha-beta*data$temp[i]))
ldens <- ldens + data$eorb[i]*log(theta) +
(data$orings[i]-data$eorb[i])*log(1-theta)}
return(ldens)
}
pdf("post_dist_1.pdf")
plot.joint.post(DD,log.post,drawsize=1000)
dev.off()
pdf("post_dist_beta.pdf")
plot.joint.post(DD,log.post.betaprior,drawsize=1000)
dev.off()
|
cf70a9ac2da5781aa7431d45da4a06ca20d22acb
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/catR/examples/EPV.Rd.R
|
a75efc6045de372723a87e523899e062e23b8102
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,595
|
r
|
EPV.Rd.R
|
library(catR)
### Name: EPV
### Title: Expected Posterior Variance (EPV)
### Aliases: EPV
### ** Examples
## Dichotomous models ##
# Loading the 'tcals' parameters
data(tcals)
# Selecting item parameters only
bank <- as.matrix(tcals[,1:4])
# Selection of two arbitrary items (15 and 20) of the
# 'tcals' data set
it.given <- bank[c(15,20),]
# Creation of a response pattern
x <- c(0, 1)
# EPV for item 1, provisional ability level 0
EPV(bank, 1, x, 0, it.given)
# With prior standard deviation 2
EPV(bank, 1, x, 0, it.given, priorPar = c(0,2))
## Polytomous models ##
# Generation of an item bank under GRM with 100 items and at most 4 categories
m.GRM <- genPolyMatrix(100, 4, "GRM")
m.GRM <- as.matrix(m.GRM)
# Selection of two arbitrary items (15 and 20)
it.given <- m.GRM[c(15,20),]
# Generation of a response pattern (true ability level 0)
x <- genPattern(0, it.given, model = "GRM")
# EPV for item 1, provisional ability level 0
EPV(m.GRM, 1, x, 0, it.given, model = "GRM")
# With prior standard deviation 2
EPV(m.GRM, 1, x, 0, it.given, model = "GRM", priorPar = c(0, 2))
# Loading the cat_pav data
data(cat_pav)
cat_pav <- as.matrix(cat_pav)
# Selection of two arbitrary items (15 and 20)
it.given <- cat_pav[c(15, 20),]
# Generation of a response pattern (true ability level 0)
x <- genPattern(0, it.given, model = "GPCM")
# EPV for item 1, provisional ability level 0
EPV(cat_pav, 1, x, 0, it.given, model = "GPCM")
# With prior standard deviation 2
EPV(cat_pav, 1, x, 0, it.given, model = "GPCM", priorPar = c(0, 2))
|
f2758eead6d3f6c5bc72fbae3b9a8a0e7966b0ed
|
0e3935889a98eed7f993c88df5b86f4809769f28
|
/brier.R
|
2c687fd95986e487232de9d07ed5a4d25714e685
|
[] |
no_license
|
tbruning/R-functions
|
aeb02507c07a617792a61978feb4ea269beaa545
|
efcfa581fdc8a14204d47b4b4a859db7cc46805e
|
refs/heads/master
| 2021-01-21T13:11:32.513505
| 2016-05-12T14:07:06
| 2016-05-12T14:07:06
| 49,440,558
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 114
|
r
|
brier.R
|
brier <- function(prob, happened = 0) {
ifelse(happened == 1,
2 * (prob - 1)**2, 2 * (1 - prob - 1)**2)
}
|
2d6fa47fa711cf66f5371660106fbe959bd97f06
|
b3d411d09b7e92545f4a52273ca7777b83b9bee8
|
/functions.R
|
d836f7a9c92d0047a1b3eae5c717109ea14817a8
|
[
"MIT",
"CC-BY-3.0"
] |
permissive
|
rafalab/maria
|
14f8fbb377bbc9c583a270943cc1a9c26cd77d11
|
30973de794b6c0b6cba82211cde9eccb0319b218
|
refs/heads/master
| 2020-03-27T23:50:47.460493
| 2018-09-09T02:18:53
| 2018-09-09T02:18:53
| 147,352,312
| 4
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,874
|
r
|
functions.R
|
## day of the year skipping 2/29
my_yday <- function(x) ifelse(year(x)%%4 == 0 & month(x)>2, yday(x)-1, yday(x))
## function to create harmonic model
fourier_trend <- function(x, k=3){
H <- lapply(1:k, function(k){
cbind(sin(2*pi*k/365*x), cos(2*pi*k/365*x))
})
return(do.call(cbind, H))
}
## for vectors like this 2000 NA NA NA NA NA ... NA 2001 NA NA... fill in the NA
## works for non years too
fill_years <- function(x){
y <- c(NA, head(x, -1))
x <- ifelse(is.na(x), y, x)
if (any(is.na(x))) Recall(x) else x
}
fit_daily_model <- function(dat,
hurricane_dates,
knots_per_year = 4,
harmonics = 3,
outlier_years = c(),
no_effect_season = c(61, 245), ## this is 3/1 to 9/1
last_day = max(dat$Date),
extrapolate_last_year = 2,
verbose=TRUE){ #0 is nothing, 1 is constant, 2 is line
if(verbose) message("Computing offsets.")
dat <- filter(dat, Date <= last_day)
hurricane_years <- year(hurricane_dates)
dat <- dat %>% mutate(offset_sample_size = log(Population))
### find the overall mean ans seasonel
s <- cbind(1, fourier_trend(dat$yd, k = harmonics))
##excluding the hurrican years and outlier years given that they are not normal
#index <- which(!year(dat$Date) %in% c(hurricane_years, outlier_years))
index <- which(!year(dat$Date) %in% hurricane_years)
## offset for year effect
fit <- glm(dat$Deaths[index] ~ s[index,] - 1,
offset = dat$offset_sample_size[index],
family = "poisson")
dat <- dat %>%
mutate(offset_seasonal = as.vector(s%*%fit$coefficients),
offset_seasonal_se = sqrt(colSums(t(s) * vcov(fit) %*% t(s))))
## find the yearly offset (deviation from the seasonal effect by year)
## but using only parts of the year without hurricane effects
offset_year <- dat %>%
filter(yd >= no_effect_season[1] & yd <= no_effect_season[2]) %>%
mutate(Year = as.factor(Year)) %>%
glm(Deaths ~ Year - 1, data = ., family = "poisson",
offset = offset_seasonal + offset_sample_size) %>%
tidy() %>%
mutate(term = as.numeric(gsub("Year", "", term))) %>%
select(term, estimate, std.error) %>%
setNames(c("Year", "offset_year", "offset_year_se"))
## extrapolate last year
last_year <- max(year(dat$Date))
if(extrapolate_last_year == 2){
offset_year$offset_year[offset_year$Year == last_year] <-
predict(lm(offset_year ~ ns(Year, knots = Year[-c(1,length(Year))]),
data = filter(offset_year, Year < last_year)),
newdata = filter(offset_year, Year == last_year))
offset_year$offset_year_se[offset_year$Year == last_year] <- offset_year$offset_year_se[offset_year$Year == last_year-1]
} else{
if(extrapolate_last_year == 1){
offset_year$offset_year[offset_year$Year == last_year] <- offset_year$offset_year[offset_year$Year == last_year - 1]
offset_year$offset_year_se[offset_year$Year == last_year] <- offset_year$offset_year_se[offset_year$Year == last_year - 1]
}
}
## join it with dat so we can use it in the glm
if(verbose) message("Preparing design matrices.")
dat <- left_join(dat, offset_year, by="Year")
### smooth function with jumps at hurricanes
hurricane_knots <- filter(dat, Date %in% hurricane_dates) %>% .$t
nknots <- round(as.numeric(diff(range(dat$Date)))/365 * knots_per_year)
if(verbose) message(paste("Using", nknots,"knots."))
knots <- quantile(dat$t, seq(0, 1, length.out = nknots))
knots <- knots[-c(1, length(knots))]
## find the index of closest knot to hurricane
index_hurricane <- sapply(hurricane_knots, function(tau)
which.min(abs(tau-knots)))
## use natural cubic splines at knots not the hurricane
f <- ns(dat$t, knots = knots[-index_hurricane])
## permit complete change at the hurricane
tau <- c(hurricane_knots, max(dat$t)+1)
h <- lapply(seq_along(hurricane_knots), function(i, x=dat$t){
ind <- I(x>=tau[i] & x<tau[i+1])
cbind(ind, poly((x-tau[i])*ind, 3))
})
## combine the hurricane and non-hurricane knots
h <- do.call(cbind, h)
## index of the columns we will not scale because they represent the jump
## and we want to interprete this parameter
no_scale_index <- ncol(f) + seq(1,ncol(h),4)
f <- cbind(f,h)
## make the average day the reference:
f[ ,-no_scale_index] <- sweep(f[, -no_scale_index], 2, colMeans(f[ ,-no_scale_index]))
##check for full rank
if(!identical(qr(f)$rank, ncol(f))) stop("Not full rank")
if(verbose) message("Fitting model.")
## need to figure out how to deal with year effect
fit_glm <- with(dat,
glm(Deaths ~ f-1,
offset = offset_sample_size +
offset_seasonal +
offset_year,
family = "poisson"))
beta_f <- fit_glm$coef
se_f <- sqrt(colSums(t(f) * vcov(fit_glm) %*% t(f)))
se_f <- sqrt(se_f^2 + dat$offset_seasonal_se^2 + dat$offset_year_se^2) ##we need to check this
dat <- dat %>%
mutate(f_hat = (f %*% beta_f)[,1], se = se_f,
fitted_values = fitted.values(fit_glm))
return(dat)
}
fit_monthly_model <- function(dat,
hurricane_dates,
no_effect_season = c(2, 7)){## this one in months
hurricane_years <- year(hurricane_dates)
dat <- dat %>% mutate(rate = Deaths / Population * 365 / days)
### compute the seasonal effect
s_hat <- dat %>% filter(!Year %in% hurricane_years) %>%
group_by(Month) %>%
summarize(s_hat = mean(rate))
dat <- left_join(dat, s_hat, by = "Month")
## now compute the yearly offset
year_offset <- dat %>%
filter(Month >= no_effect_season[1] & Month <= no_effect_season[2]) %>%
group_by(Year) %>%
summarize(year_offset = mean(rate - s_hat))
dat <- left_join(dat, year_offset, by = "Year") %>%
mutate(expected = s_hat + year_offset,
diff = rate - expected,
increase = diff / expected)
sds <- dat %>%
## now compute the sd and se for expected counts
filter(!Year %in% hurricane_years) %>%
group_by(Month) %>%
summarize(sd = sd(rate - year_offset), se = sd/sqrt(n()))
dat <- left_join(dat, sds, by = "Month") %>%
select(Date, Deaths, Population, rate, expected, diff, sd, se, increase, Year, Month, days, s_hat, year_offset)
return(dat)
}
### Function to visualize population estimates
population_viz <- function(dat, month_breaks="1", theme="theme_minimal")
{
dslabs::ds_theme_set(new=theme)
p <- dat %>% ggplot(aes(Date, Population)) +
geom_line() + xlab("") +
scale_x_date(date_labels = "%b %Y", date_breaks = paste(month_breaks,"months")) +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
return(p)
}
### Function to visualize seasonal fit
seasonal_fit_viz <- function(dat, hurricane_dates, month_breaks="1", theme="theme_minimal")
{
dslabs::ds_theme_set(new=theme)
name <- dat$state[1]
hurricane_years <- year(hurricane_dates)
tmp <- dat %>%
filter(!(Year %in% hurricane_years)) %>%
group_by(yd) %>%
summarize(avg = exp(mean(log(Deaths) - log(Population)))*1000*365)
p <- dat %>% filter(Year == min(Year)) %>%
ggplot(aes(yd, exp(offset_seasonal)*1000*365))+xlab("Days")+ylab("Death rate")+
geom_point(aes(yd, avg), data = tmp, alpha=0.70, col="#525252") +
geom_ribbon(aes(x=yd,ymin=exp(offset_seasonal-1.96*offset_seasonal_se)*1000*365,
ymax=exp(offset_seasonal+1.96*offset_seasonal_se)*1000*365),fill="#ca0020",alpha=0.75)+
geom_line(col="#ca0020")+ ggtitle(paste("Seasonal fit for", name))
return(p)
}
### Function to visualize f_hat with points
f_viz <- function(dat, years, month_breaks="1", theme="theme_minimal", l_lim=-0.50, u_lim=0.70)
{
dslabs::ds_theme_set(new=theme)
name <- dat$state[1]
dat <- dat %>%
mutate(points = log(Deaths)-offset_sample_size-offset_seasonal-offset_year)
p <- dat %>% filter(Year %in% years) %>% ggplot() +
geom_point(aes(Date, points), alpha=0.50,col="#525252") +
geom_ribbon(aes(x=Date, ymin=f_hat-1.96*se, ymax=f_hat+1.96*se),fill="#ca0020",alpha=0.5) +
xlab("") + ylab("log Death rate ratio") + ggtitle(paste("f hat for", name)) +
geom_line(aes(Date, f_hat),col="#ca0020") + geom_hline(yintercept=0,lty=2)+
scale_x_date(date_labels = "%b %Y", date_breaks = paste(month_breaks,"months")) +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
scale_y_continuous(limits=c(l_lim,u_lim),
breaks=c(seq(l_lim,u_lim,by=0.10)))
return(p)
}
|
e9a7149659857485a7fa2faa130ed546e554e4e3
|
dc7c1016493af2179bd6834614be0902a0133754
|
/forcats.R
|
a08e0a3b56760fb9a3d2d75ddf87cb88babfa25c
|
[] |
no_license
|
ashishjsharda/R
|
5f9dc17fe33e22be9a6031f2688229e436ffc35c
|
fc6f76740a78d85c50eaf6519cec5c0206b2910c
|
refs/heads/master
| 2023-08-08T13:57:05.868593
| 2023-07-30T13:51:56
| 2023-07-30T13:51:56
| 208,248,049
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 85
|
r
|
forcats.R
|
library(forcats)
gss_cat %>%
count(race)
ggplot(gss_cat, aes(race)) +
geom_bar()
|
f923d899a967b0d93b106b4a94212e5b50d94611
|
4fd6dedc1b65f6575990f5c4e7c85a966561c4c2
|
/Functions-sim-true-spp/Sim-spp-funcitons.R
|
504fa87cf9fcb4d4207078b9d5361770caf5e1d7
|
[] |
no_license
|
gerardommc/Niche-centroids
|
ed6233e48d1a6c0e1183ed6d3cf63908dc9536d7
|
3667a2885fbbe6d0a0a240400ba4f56699435357
|
refs/heads/master
| 2023-04-09T15:44:40.994073
| 2022-02-02T19:34:11
| 2022-02-02T19:34:11
| 296,682,843
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 291
|
r
|
Sim-spp-funcitons.R
|
temp.optim <- function(x, k = list(k1, k2, k3, k4, k5, k6, k7)){
y <- with(k, (k1*(x - k2)^k3)/(k4^k3 + (x - k2)^k3) - exp(k7 - (k5 - (x - k2))/(k5 - k6)))
return(y)
}
rain.resp <- function(x, k = list(k1, k2)){
y <- with(k, k1 * (1 - exp( - k2 * x)))
return(y)
}
|
9203db8e8cadcde9ddb23d226b1ff7dd40b4a63f
|
53a794aff945938f5a183e154b4b54216aea4020
|
/R/h2o-package/R/h2oWrapper.R
|
57a75d7747b737205c7f0783d8ffd3b4ab2789e1
|
[
"Apache-2.0"
] |
permissive
|
jmcclell/h2o
|
510989cecdf63b50f49106779c942b1c7b18aa7e
|
753b2e5baed9c5305e7b5e7335b27243e93abb7c
|
refs/heads/master
| 2021-01-21T15:43:45.395213
| 2013-12-12T01:38:08
| 2013-12-12T01:38:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,090
|
r
|
h2oWrapper.R
|
setClass("H2OClient", representation(ip="character", port="numeric"), prototype(ip="127.0.0.1", port=54321))
setGeneric("h2o.init", function(ip = "127.0.0.1", port = 54321, startH2O = TRUE, silentUpgrade = FALSE, promptUpgrade = TRUE) { standardGeneric("h2o.init") })
# setGeneric("h2o.shutdown", function(ip = "127.0.0.1", port = 54321, prompt = TRUE) { standardGeneric("h2o.shutdown") })
setGeneric("h2o.shutdown", function(object, prompt = TRUE) { standardGeneric("h2o.shutdown") })
h2o.__PAGE_RPACKAGE = "RPackage.json"
h2o.__PAGE_SHUTDOWN = "Shutdown.json"
setMethod("show", "H2OClient", function(object) {
cat("IP Address:", object@ip, "\n")
cat("Port :", object@port, "\n")
})
# Checks H2O connection and installs H2O R package matching version on server if indicated by user
# 1) If can't connect and user doesn't want to start H2O, stop immediately
# 2) If user does want to start H2O and running locally, attempt to bring up H2O launcher
# 3) If user does want to start H2O, but running non-locally, print an error
setMethod("h2o.init", signature(ip="character", port="numeric", startH2O="logical", silentUpgrade="logical", promptUpgrade="logical"),
function(ip, port, startH2O, silentUpgrade, promptUpgrade) {
myURL = paste("http://", ip, ":", port, sep="")
if(!url.exists(myURL)) {
if(!startH2O)
stop(paste("Cannot connect to H2O server. Please check that H2O is running at", myURL))
else if(ip=="localhost" || ip=="127.0.0.1") {
print("H2O is not running yet, starting it now.")
h2oWrapper.startLauncher()
invisible(readline("Start H2O, then hit <Return> to continue: "))
# h2o.startJar()
# count = 0; while(!url.exists(myURL) && count < 10) { Sys.sleep(1); count = count + 1 }
if(!url.exists(myURL)) stop("H2O failed to start, stopping execution.")
} else stop("Can only start H2O launcher if IP address is localhost")
}
cat("Successfully connected to", myURL, "\n")
h2o.checkPackage(myURL, silentUpgrade, promptUpgrade)
if("package:h2oRClient" %in% search())
detach("package:h2oRClient", unload=TRUE)
library(h2oRClient)
return(new("H2OClient", ip = ip, port = port))
})
setMethod("h2o.init", signature(ip="ANY", port="ANY", startH2O="ANY", silentUpgrade="ANY", promptUpgrade="ANY"),
function(ip, port, startH2O, silentUpgrade, promptUpgrade) {
if(!(missing(ip) || class(ip) == "character"))
stop(paste("ip cannot be of class", class(ip)))
if(!(missing(port) || class(port) == "numeric"))
stop(paste("port cannot be of class", class(port)))
if(!(missing(startH2O) || class(startH2O) == "logical"))
stop(paste("startH2O cannot be of class", class(startH2O)))
if(!(missing(silentUpgrade) || class(silentUpgrade) == "logical"))
stop(paste("silentUpgrade cannot be of class", class(silentUpgrade)))
if(!(missing(promptUpgrade) || class(promptUpgrade) == "logical"))
stop(paste("promptUpgrade cannot be of class", class(promptUpgrade)))
h2o.init(ip, port, startH2O, silentUpgrade, promptUpgrade)
})
# Shuts down H2O instance running at given IP and port
setMethod("h2o.shutdown", signature(object="H2OClient", prompt="logical"),
function(object, prompt) {
myURL = paste("http://", object@ip, ":", object@port, sep="")
if(!url.exists(myURL)) stop(paste("There is no H2O instance running at", myURL))
if(prompt) {
ans = readline(paste("Are you sure you want to shutdown the H2O instance running at", myURL, "(Y/N)? "))
temp = substr(ans, 1, 1)
} else temp = "y"
if(temp == "Y" || temp == "y") {
res = getURLContent(paste(myURL, h2o.__PAGE_SHUTDOWN, sep="/"))
res = fromJSON(res)
if(!is.null(res$error))
stop(paste("Unable to shutdown H2O. Server returned the following error:\n", res$error))
}
# if(url.exists(myURL)) stop("H2O failed to shutdown.")
})
setMethod("h2o.shutdown", signature(object="ANY", prompt="ANY"),
function(object, prompt) {
if(!(missing(object) || class(object) == "H2OClient"))
stop(paste("object cannot be of class", class(object)))
if(!(missing(prompt) || is.logical(prompt)))
stop(paste("prompt cannot be of class", class(prompt)))
h2o.shutdown(object, prompt)
})
#-------------------------------- Helper Methods --------------------------------#
h2o.checkPackage <- function(myURL, silentUpgrade, promptUpgrade) {
temp = postForm(paste(myURL, h2o.__PAGE_RPACKAGE, sep="/"), style = "POST")
res = fromJSON(temp)
if (!is.null(res$error))
stop(paste(myURL," returned the following error:\n", h2oWrapper.__formatError(res$error)))
H2OVersion = res$version
myFile = res$filename
serverMD5 = res$md5_hash
myPackages = rownames(installed.packages())
if("h2oRClient" %in% myPackages && packageVersion("h2oRClient") == H2OVersion)
cat("H2O R package and server version", H2OVersion, "match\n")
else if(h2o.shouldUpgrade(silentUpgrade, promptUpgrade, H2OVersion)) {
if("h2oRClient" %in% myPackages) {
cat("Removing old H2O R package version", toString(packageVersion("h2oRClient")), "\n")
remove.packages("h2oRClient")
}
cat("Downloading and installing H2O R package version", H2OVersion, "\n")
# download.file(paste(myURL, "R", myFile, sep="/"), destfile = paste(getwd(), myFile, sep="/"), mode = "wb")
temp = getBinaryURL(paste(myURL, "R", myFile, sep="/"))
writeBin(temp, paste(getwd(), myFile, sep="/"))
if(as.character(serverMD5) != as.character(md5sum(paste(getwd(), myFile, sep="/"))))
warning("Mismatched MD5 hash! Check you have downloaded complete R package.")
install.packages(paste(getwd(), myFile, sep="/"), repos = NULL, type = "source")
file.remove(paste(getwd(), myFile, sep="/"))
# cat("\nSuccess\nYou may now type 'library(h2oRClient)' to load the R package\n\n")
# require(h2oRClient)
}
}
# Check if user wants to install H2O R package matching version on server
# Note: silentUpgrade supercedes promptUpgrade
h2o.shouldUpgrade <- function(silentUpgrade, promptUpgrade, H2OVersion) {
if(silentUpgrade) return(TRUE)
if(promptUpgrade) {
ans = readline(paste("Do you want to install H2O R package", H2OVersion, "from the server (Y/N)? "))
temp = substr(ans, 1, 1)
if(temp == "Y" || temp == "y") return(TRUE)
else if(temp == "N" || temp == "n") return(FALSE)
else stop("Invalid answer! Please enter Y for yes or N for no")
} else return(FALSE)
}
h2oWrapper.__formatError <- function(error, prefix=" ") {
result = ""
items = strsplit(error,"\n")[[1]];
for (i in 1:length(items))
result = paste(result, prefix, items[i], "\n", sep="")
result
}
#---------------------------- H2O Jar Initialization -------------------------------#
.h2o.pkg.path <- NULL
.onLoad <- function(lib, pkg) {
.h2o.pkg.path <<- paste(lib, pkg, sep = .Platform$file.sep)
# Install and load H2O R package dependencies
require(tools)
myPackages = rownames(installed.packages())
myReqPkgs = c("bitops", "RCurl", "rjson", "statmod")
temp = lapply(myReqPkgs, function(x) { if(!x %in% myPackages) { cat("Installing package dependency", x, "\n"); install.packages(x, repos = "http://cran.rstudio.com/") }
if(!require(x, character.only = TRUE)) stop("The required package ", x, " is not installed. Please type install.packages(\"", x, "\") to install the dependency from CRAN.") })
}
.onAttach <- function(libname, pkgname) {
packageStartupMessage("\nPlease type h2o.init() to launch H2O, and use h2o.shutdown() to quit H2O. More information can be found at http://docs.0xdata.com/.\n")
}
h2o.startJar <- function() {
if(.Platform$OS.type == "windows") {
runs <- paste(.h2o.pkg.path, "scripts", "h2o.bat", sep = .Platform$file.sep)
if (!file.exists(runs)) {
rs = h2o.__genScript()
wl <- try(writeLines(rs, runs), silent = TRUE)
if (inherits(wl, "try-error"))
stop("Cannot create H2O start script! Please check if h2o.bat exists at ", runs)
system(paste("chmod a+x '", runs, "'", sep = ""))
}
system(paste("open", runs))
}
else {
runs <- paste(.h2o.pkg.path, "scripts", "h2o", sep = .Platform$file.sep)
if (!file.exists(runs)) {
rs = h2o.__genScript()
wl <- try(writeLines(rs, runs), silent = TRUE)
if (inherits(wl, "try-error"))
stop("Cannot create H2O start script! Please check if h2o exists at ", runs)
system(paste("chmod a+x '", runs, "'", sep = ""))
}
system(paste("sh", runs, "&"))
}
}
h2o.__genScript <- function(target = NULL) {
if(.Platform$OS.type == "windows")
run.template <- paste(.h2o.pkg.path, "scripts", "h2o.bat.TEMPLATE", sep = .Platform$file.sep)
else
run.template <- paste(.h2o.pkg.path, "scripts", "h2o.TEMPLATE", sep = .Platform$file.sep)
rt <- readLines(run.template)
settings <- c("JAVA_HOME", "JAVA_PROG", "H2O_JAR", "FLAT")
sl <- list()
for (i in settings) sl[[i]] <- Sys.getenv(i)
if (nchar(sl[["JAVA_PROG"]]) == 0) {
if (nchar(sl[["JAVA_HOME"]]) > 0) {
jc <- paste(sl[["JAVA_HOME"]], "bin", "java", sep = .Platform$file.sep)
if (file.exists(jc))
sl[["JAVA_PROG"]] <- jc
}
else sl[["JAVA_PROG"]] <- "java"
}
sl[["H2O_JAR"]] <- system.file("java", "h2o.jar", package = "h2o")
sl[["FLAT"]] <- system.file("java", "flatfile.txt", package = "h2o")
for (i in names(sl)) rt <- gsub(paste("@", i, "@", sep = ""), sl[[i]], rt)
if (is.null(target)) return(rt)
writeLines(rt, target)
}
#---------------------------------- Deprecated ----------------------------------#
# Start H2O launcher GUI if installed locally from InstallBuilder executable
h2oWrapper.startLauncher <- function() {
myOS = Sys.info()["sysname"]
if(myOS == "Windows") verPath = paste(Sys.getenv("APPDATA"), "h2o", sep="/")
else verPath = paste(Sys.getenv("HOME"), "Library/Application Support/h2o", sep="/")
myFiles = list.files(verPath)
if(length(myFiles) == 0) stop("Cannot find location of H2O launcher. Please check that your H2O installation is complete.")
# Must trim myFiles so all have format 1.2.3.45678.txt (use regexpr)!
# Get H2O with latest version number
# If latest isn't working, maybe go down list to earliest until one executes?
fileName = paste(verPath, tail(myFiles, n=1), sep="/")
myVersion = strsplit(tail(myFiles, n=1), ".txt")[[1]]
launchPath = readChar(fileName, file.info(fileName)$size)
if(is.null(launchPath) || launchPath == "")
stop(paste("No H2O launcher matching H2O version", myVersion, "found"))
cat("Launching H2O version", myVersion)
if(myOS == "Windows") {
tempPath = paste(launchPath, "windows/h2o.bat", sep="/")
if(!file.exists(tempPath)) stop(paste("Cannot open H2OLauncher.jar! Please check if it exists at", tempPath))
shell.exec(tempPath)
}
else {
tempPath = paste(launchPath, "Contents/MacOS/h2o", sep="/")
if(!file.exists(tempPath)) stop(paste("Cannot open H2OLauncher.jar! Please check if it exists at", tempPath))
system(paste("bash ", tempPath))
}
}
|
0a5d24746f65be149dae3a284ae8c64d2054eb68
|
3281ca220cb0c99a6c763ecf78c99a67812537b6
|
/multi-regression.R
|
fec3a0559dc875594e2d0b510a749990827bc39b
|
[] |
no_license
|
deokju12/-multi_variable_analysis
|
d7d406042ae6221d606c52d190649f357158ec54
|
285249ff0a2ab97a978cb47c212e208a25476099
|
refs/heads/master
| 2020-05-28T02:59:10.889960
| 2019-05-27T14:57:56
| 2019-05-27T14:57:56
| 188,861,704
| 0
| 0
| null | null | null | null |
UHC
|
R
| false
| false
| 2,880
|
r
|
multi-regression.R
|
# 1번
#Houseprice.csv는 주택판매가격(price)과 이에 영향을 줄 것으로 판단되는 4가지 설명변수인 세금(tax; 만원), 대지평수(ground; 평), 건물평수(floor; 평), 주택연령(year; 년)을 27개 주택에 대해 조사한 것이다.
# 1. 5개 변수들에 대한 산점도 행렬을 작성하고 변수들 간의 관계를 설명하여라. 특히 판매가격과 세금의 산점도를 보면 특이값 3개가 나타난다. 이들 값의 특성은 무엇인가?
houseprice <- read.csv("houseprice.csv")
head(Houseprice)
pairs(Houseprice[,c("price","tax","ground", "floor","year")])
library(corrplot)
a <- cor(Houseprice)
corrplot(a,method="ellipse")
#시각화
library(ggplot2)
ggplot(data=Houseprice,
aes(x=Houseprice$tax,
y=Houseprice$price)) + geom_point() + geom_smooth(method="lm", se=FALSE)
# 9번 10번 자료는 세금에 비해 집값이 더 비싸다고 볼수 있고 27번 자료는 낸 세금에
# 비해 집값이 더 싸다고 볼수 있다.
par(mfrow = c(2,2))
#2번
#주택 판매가격을 종속변수로, 나머지 4개 변수를 설명변수로 하는 선형회귀모형을 적합하여라.
fit2 =lm(price~tax+ground+floor+year,data=Houseprice)
fit2
# 3번 결정계수는 얼마인가? 주택 판매가격에 유의한 영향을 주는 변수들은 무엇인가? 세금과 floor(건물평수)
summary(fit2)
'결정계수(multiple R-squared) 값은 0.9313 즉 이 다중선형회귀모형의 적합값은 데이터의 실제값
를 아주 잘 설명한다고 볼 수 있다. 표를 보면 p값 이 아주 낮은 ground와 floor(각각 0.00109,8.41e-05)를 볼 수 있는 데 이 두 변수가 다중선형회귀모형에 주택가격에 비교적 많은 영향을 주는 변수라고 할 수 있다.
# 4번
세금이 150만원, 대지평수가 50평, 건물평수가 30평, 주택연령이 3년인 주택의 평균판매
가격에 대한 추정값과 90% 신뢰구간을 구하여라'
fit2 =lm(price~tax+ground+floor+year,data=Houseprice)
predict(fit2, data.frame(tax=100, ground=50, floor=30, year=3),
interval="confidence", level=0.90)
# 5번
# 회귀진단을 위한 그래프들과 잔차 대 설명변수들의 산점도를 그리고 설명하여라
plot(fit2)
par(mfrow= c(2,2))
rs
res <-resid(fit2)
plot(res ~ tax, Houseprice, ylab= "Residuals")
abline(h=0, lty=3)
plot(res ~ ground, Houseprice, ylab= "Residuals")
abline(h=0, lty=3)
plot(res ~ floor, Houseprice, ylab= "Residuals")
abline(h=0, lty=3)
plot(res ~ year, Houseprice, ylab= "Residuals")
abline(h=0, lty=3)
# 판매가격과 세금의 산점도에서 관측된 우측 상단의 특이값 2개를 제외하면 회귀분석 결
# 과에 영향을 줄 것으로 예상되는가?
fit_orin <- lm(price~tax,data=Houseprice)
plot(fit_orin)
|
a37af7cfabcf58c01681e5469753074f8ac9c9d0
|
4201e9b754760dc35fc0aeef9df5a8b9d801c47f
|
/bin/R-3.5.1/src/library/base/man/Bessel.Rd
|
85a91ad64a696096c39e11f27944118f52693f00
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only",
"GPL-1.0-or-later",
"GPL-2.0-or-later",
"LGPL-2.1-only",
"LGPL-3.0-only",
"GPL-3.0-only"
] |
permissive
|
lifebit-ai/exomedepth
|
cbe59cb7fcf2f9183d187f8d466c6620fb1a0c2e
|
5a775ae5e2a247aeadc5208a34e8717c7855d080
|
refs/heads/master
| 2020-03-27T12:55:56.400581
| 2018-10-11T10:00:07
| 2018-10-11T10:00:07
| 146,578,924
| 0
| 0
|
MIT
| 2018-08-29T09:43:52
| 2018-08-29T09:43:51
| null |
UTF-8
|
R
| false
| false
| 6,574
|
rd
|
Bessel.Rd
|
% File src/library/base/man/Bessel.Rd
% Part of the R package, https://www.R-project.org
% Copyright 1995-2018 R Core Team
% Distributed under GPL 2 or later
\name{Bessel}
\title{Bessel Functions}
\alias{bessel}
\alias{Bessel}
\alias{besselI}
\alias{besselJ}
\alias{besselK}
\alias{besselY}
\usage{
besselI(x, nu, expon.scaled = FALSE)
besselK(x, nu, expon.scaled = FALSE)
besselJ(x, nu)
besselY(x, nu)
}
\description{
Bessel Functions of integer and fractional order, of first
and second kind, \eqn{J_{\nu}}{J(nu)} and \eqn{Y_{\nu}}{Y(nu)}, and
Modified Bessel functions (of first and third kind),
\eqn{I_{\nu}}{I(nu)} and \eqn{K_{\nu}}{K(nu)}.
}
\arguments{
\item{x}{numeric, \eqn{\ge 0}.}
\item{nu}{numeric; The \emph{order} (maybe fractional!) of the
corresponding Bessel function.}
\item{expon.scaled}{logical; if \code{TRUE}, the results are
exponentially scaled in order to avoid overflow
(\eqn{I_{\nu}}{I(nu)}) or underflow (\eqn{K_{\nu}}{K(nu)}),
respectively.}
}
\value{
Numeric vector with the (scaled, if \code{expon.scaled = TRUE})
values of the corresponding Bessel function.
The length of the result is the maximum of the lengths of the
parameters. All parameters are recycled to that length.
}
\details{
If \code{expon.scaled = TRUE}, \eqn{e^{-x} I_{\nu}(x)}{exp(-x) I(x;nu)},
or \eqn{e^{x} K_{\nu}(x)}{exp(x) K(x;nu)} are returned.
For \eqn{\nu < 0}{nu < 0}, formulae 9.1.2 and 9.6.2 from Abramowitz &
Stegun are applied (which is probably suboptimal), except for
\code{besselK} which is symmetric in \code{nu}.
The current algorithms will give warnings about accuracy loss for
large arguments. In some cases, these warnings are exaggerated, and
the precision is perfect. For large \code{nu}, say in the order of
millions, the current algorithms are rarely useful.
}
\source{
The C code is a translation of Fortran routines from
\url{http://www.netlib.org/specfun/ribesl}, \samp{../rjbesl}, etc.
The four source code files for bessel[IJKY] each contain a paragraph
\dQuote{Acknowledgement} and \dQuote{References}, a short summary of
which is
\describe{
\item{besselI}{based on (code) by David J. Sookne, see Sookne (1973)\dots
Modifications\dots An earlier version was published in Cody (1983).}
\item{besselJ}{as \code{besselI}}
\item{besselK}{based on (code) by J. B. Campbell (1980)\dots Modifications\dots}
\item{besselY}{draws heavily on Temme's Algol program for
\eqn{Y}\dots and on Campbell's programs for \eqn{Y_\nu(x)}
\dots. \dots heavily modified.}
}
}
\references{
Abramowitz, M. and Stegun, I. A. (1972).
\emph{Handbook of Mathematical Functions}.
Dover, New York;
Chapter 9: Bessel Functions of Integer Order.
In order of \dQuote{Source} citation above:
Sockne, David J. (1973).
Bessel Functions of Real Argument and Integer Order.
\emph{Journal of Research of the National Bureau of Standards},
\bold{77B}, 125--132.
Cody, William J. (1983).
Algorithm 597: Sequence of modified Bessel functions of the first kind.
\emph{ACM Transactions on Mathematical Software}, \bold{9}(2), 242--245.
\doi{10.1145/357456.357462}.
Campbell, J.B. (1980).
On Temme's algorithm for the modified Bessel function of the third kind.
\emph{ACM Transactions on Mathematical Software}, \bold{6}(4), 581--586.
\doi{10.1145/355921.355928}.
Campbell, J.B. (1979).
Bessel functions J_nu(x) and Y_nu(x) of float order and float argument.
\emph{Computer Physics Communications}, \bold{18}, 133--142.
\doi{10.1016/0010-4655(79)90030-4}.
Temme, Nico M. (1976).
On the numerical evaluation of the ordinary Bessel function of the
second kind.
\emph{Journal of Computational Physics}, \bold{21}, 343--350.
\doi{10.1016/0021-9991(76)90032-2}.
}
\seealso{
Other special mathematical functions, such as
\code{\link{gamma}}, \eqn{\Gamma(x)}, and \code{\link{beta}},
\eqn{B(x)}.
}
\author{
Original Fortran code:
W. J. Cody, Argonne National Laboratory \cr
Translation to C and adaptation to \R:
Martin Maechler \email{maechler@stat.math.ethz.ch}.
}
\examples{
require(graphics)
nus <- c(0:5, 10, 20)
x <- seq(0, 4, length.out = 501)
plot(x, x, ylim = c(0, 6), ylab = "", type = "n",
main = "Bessel Functions I_nu(x)")
for(nu in nus) lines(x, besselI(x, nu = nu), col = nu + 2)
legend(0, 6, legend = paste("nu=", nus), col = nus + 2, lwd = 1)
x <- seq(0, 40, length.out = 801); yl <- c(-.8, .8)
plot(x, x, ylim = yl, ylab = "", type = "n",
main = "Bessel Functions J_nu(x)")
for(nu in nus) lines(x, besselJ(x, nu = nu), col = nu + 2)
legend(32, -.18, legend = paste("nu=", nus), col = nus + 2, lwd = 1)
## Negative nu's :
xx <- 2:7
nu <- seq(-10, 9, length.out = 2001)
op <- par(lab = c(16, 5, 7))
matplot(nu, t(outer(xx, nu, besselI)), type = "l", ylim = c(-50, 200),
main = expression(paste("Bessel ", I[nu](x), " for fixed ", x,
", as ", f(nu))),
xlab = expression(nu))
abline(v = 0, col = "light gray", lty = 3)
legend(5, 200, legend = paste("x=", xx), col=seq(xx), lty=seq(xx))
par(op)
x0 <- 2^(-20:10)
plot(x0, x0^-8, log = "xy", ylab = "", type = "n",
main = "Bessel Functions J_nu(x) near 0\n log - log scale")
for(nu in sort(c(nus, nus+0.5)))
lines(x0, besselJ(x0, nu = nu), col = nu + 2)
legend(3, 1e50, legend = paste("nu=", paste(nus, nus+0.5, sep=",")),
col = nus + 2, lwd = 1)
plot(x0, x0^-8, log = "xy", ylab = "", type = "n",
main = "Bessel Functions K_nu(x) near 0\n log - log scale")
for(nu in sort(c(nus, nus+0.5)))
lines(x0, besselK(x0, nu = nu), col = nu + 2)
legend(3, 1e50, legend = paste("nu=", paste(nus, nus + 0.5, sep = ",")),
col = nus + 2, lwd = 1)
x <- x[x > 0]
plot(x, x, ylim = c(1e-18, 1e11), log = "y", ylab = "", type = "n",
main = "Bessel Functions K_nu(x)")
for(nu in nus) lines(x, besselK(x, nu = nu), col = nu + 2)
legend(0, 1e-5, legend=paste("nu=", nus), col = nus + 2, lwd = 1)
yl <- c(-1.6, .6)
plot(x, x, ylim = yl, ylab = "", type = "n",
main = "Bessel Functions Y_nu(x)")
for(nu in nus){
xx <- x[x > .6*nu]
lines(xx, besselY(xx, nu=nu), col = nu+2)
}
legend(25, -.5, legend = paste("nu=", nus), col = nus+2, lwd = 1)
## negative nu in bessel_Y -- was bogus for a long time
curve(besselY(x, -0.1), 0, 10, ylim = c(-3,1), ylab = "")
for(nu in c(seq(-0.2, -2, by = -0.1)))
curve(besselY(x, nu), add = TRUE)
title(expression(besselY(x, nu) * " " *
{nu == list(-0.1, -0.2, ..., -2)}))
}
\keyword{math}
|
79a889afb5b0e5b3c0c4b5ced50601dc3cd7ff9c
|
2d34708b03cdf802018f17d0ba150df6772b6897
|
/googlecivicinfov2.auto/man/RepresentativeInfoResponse.divisions.Rd
|
8772cef9d1c4b0fca5cfc25fc224307c00242181
|
[
"MIT"
] |
permissive
|
GVersteeg/autoGoogleAPI
|
8b3dda19fae2f012e11b3a18a330a4d0da474921
|
f4850822230ef2f5552c9a5f42e397d9ae027a18
|
refs/heads/master
| 2020-09-28T20:20:58.023495
| 2017-03-05T19:50:39
| 2017-03-05T19:50:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 646
|
rd
|
RepresentativeInfoResponse.divisions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/civicinfo_objects.R
\name{RepresentativeInfoResponse.divisions}
\alias{RepresentativeInfoResponse.divisions}
\title{RepresentativeInfoResponse.divisions Object}
\usage{
RepresentativeInfoResponse.divisions()
}
\value{
RepresentativeInfoResponse.divisions object
}
\description{
RepresentativeInfoResponse.divisions Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Political geographic divisions that contain the requested address.
}
\seealso{
Other RepresentativeInfoResponse functions: \code{\link{RepresentativeInfoResponse}}
}
|
702c27a1ba89b0caf97acd6ad239da370658bae7
|
ca0fce0ee341c347193036d0dc36142d5a841d48
|
/ECL_PROJECT_SCRAP.R
|
3cec4d69ce52c0bda9ed145a3f6f348faab1cd51
|
[] |
no_license
|
seanh21/UCL_Awat_Goal_Rule_Analysis
|
ad1514192221832d7f36edc33506918a8ae38fc9
|
762299525deb0bc87bd494ac6e18f3301da16e15
|
refs/heads/master
| 2023-06-10T12:02:37.360429
| 2021-06-29T02:05:18
| 2021-06-29T02:05:18
| 380,922,953
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,055
|
r
|
ECL_PROJECT_SCRAP.R
|
library("tidyverse")
league_data <- read_csv("ginf.csv")
colnames(league_data)
head(league_data)
hg_sum <- sum(league_data[, "fthg"])
ag_sum <- sum(league_data[, "ftag"])
Venue <- c("Home", "Away")
Goals <- c((hg_sum+hg_ecl_sum),(ag_sum+ag_ecl_sum))
df_sum_of_lg <- data.frame(Venue,Goals)
ggplot(df_sum_of_lg, aes(x="", y=Percentage, fill=Venue)) +
geom_bar(stat="identity", width=1) +
coord_polar("y", start=0) +
theme_void()
hg_ecl_sum <- sum(master_ecl_csv_rev2[, "FT Home"])
ag_ecl_sum <- sum(master_ecl_csv_rev2[, "FT Away"])
total_goals <- hg_sum+hg_ecl_sum+ag_sum+ag_ecl_sum
hg_per <- ((hg_sum+hg_ecl_sum)/total_goals)
ag_per <- ((ag_sum+ag_ecl_sum)/total_goals)
df_sum_of_lg$Percentage <- c(hg_per,ag_per)
leg1_hg_ecl_sum <- sum(master_ecl_csv_leg1[, "FT Home"])
leg1_ag_ecl_sum <- sum(master_ecl_csv_leg1[, "FT Away"])
leg2_hg_ecl_sum <- sum(master_ecl_csv_leg2[, "FT Home"])
leg2_ag_ecl_sum <- sum(master_ecl_csv_leg2[, "FT Away"])
leg1_ecl_sum <- leg1_ag_ecl_sum+leg1_hg_ecl_sum
leg2_ecl_sum <- leg2_ag_ecl_sum+leg2_hg_ecl_sum
|
b9056b8a6b0c8babc6923d99f5db6b1fe1f02605
|
f466eb68f09190a8e5972c679f086221021422bd
|
/man/get_tbl.Rd
|
a3907341b3d511d7ba046d1e59a4778706fbc02c
|
[] |
no_license
|
mdelhey/mdutils
|
1ffbd4bbcf737133c67c96d69d4539e4e26f98cd
|
a05ab2cc7fdab24d711b2994db4dcc53f0e2520c
|
refs/heads/master
| 2020-04-10T04:00:27.037351
| 2016-06-02T20:25:29
| 2016-06-02T20:25:29
| 21,291,060
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 464
|
rd
|
get_tbl.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/sql.r
\name{get_tbl}
\alias{get_tbl}
\title{get all rows from a table}
\usage{
get_tbl(con = NULL, x, hive = FALSE)
}
\arguments{
\item{con}{dbi connection}
\item{x}{table name}
}
\description{
get all rows from a table
}
\seealso{
Other sql: \code{\link{collapse_char_vec}};
\code{\link{collap}}; \code{\link{get_htbl}};
\code{\link{get_qry}}; \code{\link{insert.sql}}
}
|
7b1aaf0e19da8e54dd8156684802841082294f99
|
9cb659691e96fdcf3b0485e880cdcae47c7271c4
|
/man/mac_to_binary_string.Rd
|
f000a8c3cb9785e5d7040dc53148fa8bb4ffdebb
|
[] |
no_license
|
petr0vsk/MACtools
|
eecc5e99ff7762667a98e09c99a0355052b81ac6
|
4750d83608b977c64e47fd6753c641e9d604c575
|
refs/heads/master
| 2020-05-02T19:03:37.420472
| 2019-01-28T17:14:34
| 2019-01-28T17:14:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 809
|
rd
|
mac_to_binary_string.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mac-to-binary-string.R
\name{mac_to_binary_string}
\alias{mac_to_binary_string}
\title{Convert MAC address character vector to a binary string representation}
\usage{
mac_to_binary_string(x)
}
\arguments{
\item{x}{character vector of MAC address. Each MAC address can be in any
case, the octets do not need to be 0-prefixed, and it doesn't matter
if the octets are separated by a \code{:} or if they are just contiguous.
If they _are_contiguous, that — by definition — means
each octet is 0-prefixed.}
}
\value{
a character vector of binary strings
}
\description{
Convert MAC address character vector to a binary string representation
}
\examples{
mac_to_binary_string(c("f023b9eb4204", "f0:23:b9:eb:42:4", "F023B9eB4204"))
}
|
8efe42b191395ddc6b7c4718ca0abd181b2e5220
|
04d93fbc0fb3a160cdfbc8aa3d5c258df7b0d0af
|
/man/plotSpliceGraph.Rd
|
d332c4bfd7b1b909060ae4a70d396a16d6f04344
|
[] |
no_license
|
ldg21/SGSeq
|
b279000a73e58514a681d3aa802cdf7ec91a3716
|
6c67388c39853ba5df50c94b5c3fd2457288e825
|
refs/heads/master
| 2021-01-24T08:49:50.563432
| 2020-10-14T19:36:26
| 2020-10-14T19:36:26
| 122,996,617
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,621
|
rd
|
plotSpliceGraph.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots.R
\name{plotSpliceGraph}
\alias{plotSpliceGraph}
\title{Plot splice graph}
\usage{
plotSpliceGraph(x, geneID = NULL, geneName = NULL, eventID = NULL,
which = NULL, toscale = c("exon", "none", "gene"), label = c("id",
"name", "label", "none"), color = "gray", color_novel = color,
color_alpha = 0.8, color_labels = FALSE, border = "fill",
curvature = NULL, ypos = c(0.5, 0.1), score = NULL,
score_color = "darkblue", score_ylim = NULL, score_ypos = c(0.3, 0.1),
score_nbin = 200, score_summary = mean, score_label = NULL,
ranges = NULL, ranges_color = "darkblue", ranges_ypos = c(0.1, 0.1),
main = NULL, tx_view = FALSE, tx_dist = 0.2, short_output = TRUE)
}
\arguments{
\item{x}{\code{SGFeatures} or \code{SGVariants} object}
\item{geneID}{Single gene identifier used to subset \code{x}}
\item{geneName}{Single gene name used to subset \code{x}}
\item{eventID}{Single event identifier used to subset \code{x}}
\item{which}{\code{GRanges} used to subset \code{x}}
\item{toscale}{Controls which parts of the splice graph are drawn to
scale. Possible values are \dQuote{none} (exonic and intronic regions
have constant length), \dQuote{exon} (exonic regions are drawn to scale)
and \dQuote{gene} (both exonic and intronic regions are drawn to scale).}
\item{label}{Format of exon/splice junction labels,
possible values are \dQuote{id} (format E1,... J1,...), \dQuote{name}
(format type:chromosome:start-end:strand), \dQuote{label} for labels
specified in metadata column \dQuote{label}, or \dQuote{none}
for no labels.}
\item{color}{Color used for plotting the splice graph. Ignored if features
metadata column \dQuote{color} is not \code{NULL}.}
\item{color_novel}{Features with missing annotation are
highlighted in \code{color_novel}. Ignored if features
metadata column \dQuote{color} is not \code{NULL}.}
\item{color_alpha}{Controls color transparency}
\item{color_labels}{Logical indicating whether label colors should
be the same as feature colors}
\item{border}{Determines the color of exon borders, can be \dQuote{fill}
(same as exon color), \dQuote{none} (no border), or a valid color name}
\item{curvature}{Numeric determining curvature of plotted splice junctions.}
\item{ypos}{Numeric vector of length two, indicating the vertical
position and height of the exon bins in the splice graph,
specificed as fraction of the height of the plotting region
(not supported for \code{tx_view = TRUE})}
\item{score}{\code{RLeList} containing nucleotide-level scores
to be plotted with the splice graph}
\item{score_color}{Color used for plotting scores}
\item{score_ylim}{Numeric vector of length two, determining y-axis range
for plotting scores}
\item{score_ypos}{Numeric vector of length two, indicating the vertical
position and height of the score panel, specificed as fraction of the
height of the plotting region}
\item{score_nbin}{Number of bins for plotting scores}
\item{score_summary}{Function used to calculate per-bin score summaries}
\item{score_label}{Label used to annotate score panel}
\item{ranges}{\code{GRangesList} to be plotted with the splice graph}
\item{ranges_color}{Color used for plotting ranges}
\item{ranges_ypos}{Numeric vector of length two, indicating the vertical
position and height of the ranges panel, specificed as fraction of the
height of the plotting region}
\item{main}{Plot title}
\item{tx_view}{Plot transcripts instead of splice graph (experimental)}
\item{tx_dist}{Vertical distance between transcripts as fraction of height
of plotting region}
\item{short_output}{Logical indicating whether the returned data frame
should only include information that is likely useful to the user}
}
\value{
\code{data.frame} with information on exon bins and
splice junctions included in the splice graph
}
\description{
Plot the splice graph implied by splice junctions and exon bins.
Invisibly returns a \code{data.frame} with details of plotted
features, including genomic coordinates.
}
\details{
By default, the color of features in the splice graph is
determined by annotation status (see arguments \code{color},
\code{color_novel}) and feature labels are generated automatically
(see argument \code{label}). Alternatively, colors and labels can
be specified via metadata columns \dQuote{color} and
\dQuote{label}, respectively.
}
\examples{
\dontrun{
sgf_annotated <- annotate(sgf_pred, txf_ann)
plotSpliceGraph(sgf_annotated)
}
\dontrun{
sgv_annotated <- annotate(sgv_pred, txf_ann)
plotSpliceGraph(sgv_annotated)
}
NULL
}
\author{
Leonard Goldstein
}
|
10c9ce24f2b70cedf8b9e6cc9b56223c56a39c7f
|
1e9d315bd9880ded26e11acf8ba2a3ebf0eb0fde
|
/sigma.R
|
5391fbab2a79f13c15c96b550252e061ba5e17eb
|
[] |
no_license
|
DmitryKokorin/diffmc
|
cce7de67949037e111ee12003aaa5217dfd0aa6f
|
42490984581eb6967540bb443042890212ecd806
|
refs/heads/master
| 2021-01-19T00:41:46.154106
| 2014-04-05T10:54:48
| 2014-04-05T10:54:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,137
|
r
|
sigma.R
|
#!/usr/bin/Rscript
options <- commandArgs(trailingOnly = TRUE)
params.inputFilename <- options[1]
params.outputFilename <- options[2]
params.width <- as.integer(options[3])
data <- read.table(col.names=c("t",
"x", "y", "z",
"x2", "y2", "z2",
"x3", "y3", "z3",
"x4", "y4", "z4",
"x5", "y5", "z5",
"x6", "y6", "z6",
"n", "photons"),
sep="\t", file=params.inputFilename)
rmse <- function(lmr)
{
residuals <- resid(lmr)
return <- sqrt(sum(residuals^2)/(length(residuals) - 2))
}
sigma <- function(momentum, time, idx)
{
d <- data.frame(momentum=momentum, time=time)
lmr <- lm(0.5*momentum ~ time, data=d, subset=idx:(idx + params.width))
return <- rmse(lmr)
}
range <- 1:(length(data$n) - params.width)
sigmas <- sapply(range, function(x) sigma(data$x2, data$n, x))
write.table(cbind(data$n[range], sigmas), file=params.outputFilename, row.names=FALSE, col.names=FALSE)
|
233e1612fefbf76cd4bdbcc61fbabf3e73c63b96
|
bad132f51935944a52a00e20e90395990afd378a
|
/R/ISOCarrierOfCharacteristics.R
|
9244ec655ec2a712cfea9f385cc91d4b1c8604de
|
[] |
no_license
|
cran/geometa
|
9612ad75b72956cfd4225b764ed8f048804deff1
|
b87c8291df8ddd6d526aa27d78211e1b8bd0bb9f
|
refs/heads/master
| 2022-11-10T21:10:25.899335
| 2022-10-27T22:45:13
| 2022-10-27T22:45:13
| 92,486,874
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 952
|
r
|
ISOCarrierOfCharacteristics.R
|
#' ISOCarrierOfCharacteristics
#'
#' @docType class
#' @importFrom R6 R6Class
#' @export
#' @keywords ISO carrierOfCharacteristics
#' @return Object of \code{\link{R6Class}} for modelling an ISOCarrierOfCharacteristics
#' @format \code{\link{R6Class}} object.
#'
#' @references
#' ISO 19110:2005 Methodology for Feature cataloguing
#'
#' @author Emmanuel Blondel <emmanuel.blondel1@@gmail.com>
#'
ISOCarrierOfCharacteristics <- R6Class("ISOCarrierOfCharacteristics",
inherit = ISOAbstractCarrierOfCharacteristics,
private = list(
xmlElement = "FC_CarrierOfCharacteristics",
xmlNamespacePrefix = "GFC"
),
public = list(
#'@description Initializes object
#'@param xml object of class \link{XMLInternalNode-class}
#'@param defaults defaults
initialize = function(xml = NULL, defaults = NULL){
super$initialize(xml = xml, defaults = defaults)
}
)
)
|
d0c63880b0ea9508ac2e9bc5239e28c30245a36e
|
babdbc9f4b726eba0f6cb3ee496b0c7d8b722a60
|
/pipeline_cytof/R/CYTOFclust.R
|
beec66d614613bd335040b7e3d18bdb12e7d8e51
|
[] |
no_license
|
Tariq-K/CYTOF
|
a8f08ebd970fb5a26a3f38820b57fcd230570d5d
|
685e43587bece01e30d811ebba3ab0de2e8924cd
|
refs/heads/master
| 2020-03-28T06:16:55.286232
| 2020-02-27T13:13:52
| 2020-02-27T13:13:52
| 147,824,756
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,157
|
r
|
CYTOFclust.R
|
# packages
stopifnot(
require(cytofkit),
require(optparse),
require(Rphenograph)
)
# options
option_list <- list(
make_option(c("--infile", "-i"), help="normalised CYTOF data"),
make_option(c("--outfile", "-o"), help="RData object containing clustering info"),
make_option(c("--k"), default=30, help="k parameter for Rphenograph"),
make_option(c("--markers", "-m"), default=NULL, help="Comma seperated list of markers (in caps)"),
make_option(c("--noClusters"), default=NULL, help="No clusters parameter for FlowSOM")
)
opts <- parse_args(OptionParser(option_list=option_list))
# load data
data <- read.table(opts$infile, sep="\t")
clust_markers <- strsplit(opts$markers, ",")[[1]] # get markers for clustering as vector
data <- data[unique(clust_markers)] # subset data on markers for tSNE
# clustering
clust <- Rphenograph(data, k=as.integer(opts$k))
clusters <- as.data.frame(factor(membership(clust[[2]])))
# annotate df
colnames(clusters) <- c(paste0("phenograph_k", opts$k))
rownames(clusters) <- rownames(data) # add index
# save df
write.table(clusters, file=opts$outfile, sep="\t")
|
facf3299e898a1017614b959ab88feed3e07bcf5
|
27f53c5a9aa2d0962b5cd74efd373d5e9d9e0a99
|
/todo-files/TuneControlMies.R
|
34bfbe7bcbe24588139b56f09edce8a8390bb11b
|
[] |
no_license
|
dickoa/mlr
|
aaa2c27e20ae9fd95a0b63fc5215ee373fa88420
|
4e3db7eb3f60c15ce2dfa43098abc0ed84767b2d
|
refs/heads/master
| 2020-12-24T13:44:59.269011
| 2015-04-18T19:57:42
| 2015-04-18T19:57:42
| 31,710,800
| 2
| 0
| null | 2015-04-18T19:57:43
| 2015-03-05T11:29:18
|
R
|
UTF-8
|
R
| false
| false
| 1,077
|
r
|
TuneControlMies.R
|
##' @include TuneControl.R
#roxygen()
#
##' Control structure for MI-ES tuning.
##' @exportClass TuneControlMies
##' @seealso \code{\link{makeTuneControlMies}}
#
#setClass(
# "TuneControlMies",
# contains = c("TuneControl")
#)
#
#
##' Create control structure for MI-ES tuning.
##'
##' @title Control for MI-ES tuning.
##' @param path [\code{logical(1)}]\cr
##' Should optimization path be saved? Default is TRUE.
##' @param same.resampling.instance [\code{logical(1)}]\cr
##' Should the same resampling instance be used for all evaluations to reduce variance? Default is \code{TRUE}.
##' @param ... Further control parameters passed to the \code{control} argument of \code{\link[mies]{mies}}.
##' @return [\code{\linkS4class{TuneControlMies}}].
##' @export
#makeTuneControlMies = function(path=TRUE, same.resampling.instance=TRUE, ...) {
# checkArg(path, "logical", len=1, na.ok=FALSE)
# checkArg(same.resampling.instance, "logical", len=1, na.ok=FALSE)
# new("TuneControlMies", path=path, same.resampling.instance=same.resampling.instance, start=list(), ...)
#}
|
515ad11488ed3349ef204ead3c6c7770de239171
|
c64b12fb6dcf0122e5dd8417dfef59987508a764
|
/tests/testthat/test_inspecting.R
|
9dc8f54ca28d7555d63313dcc1097e26755ef9a7
|
[] |
no_license
|
SciDoPhenIA/phenomis
|
611a6c92e59c40ab049c2d6f14c078e04afbc773
|
1e83ce6997a8d16b89ce5f0f899a1570004ebc0e
|
refs/heads/master
| 2022-06-12T16:43:07.328260
| 2022-06-09T12:40:17
| 2022-06-09T12:40:17
| 253,553,101
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,862
|
r
|
test_inspecting.R
|
testthat::context("Testing 'inspecting'")
testthat::test_that("inspecting-se", {
sacurine.se <- reading(system.file("extdata/W4M00001_Sacurine-statistics", package = "phenomis"))
sacurine.se <- inspecting(sacurine.se,
figure.c = "none",
report.c = "none")
testthat::expect_equivalent(rowData(sacurine.se)["(2-methoxyethoxy)propanoic acid isomer", "pool_CV"],
0.3160307,
tolerance = 1e-6)
proteo.eset <- reading(system.file("extdata/prometis/proteomics", package = "phenomis"))
proteo.eset <- inspecting(proteo.eset,
figure.c = "none",
report.c = "none")
testthat::expect_equivalent(colData(proteo.eset)["s1", "deci_pval"],
0.106289011,
tolerance = 1e-6)
})
testthat::test_that("inspecting-mae", {
prometis.mae <- reading(system.file("extdata/prometis", package = "phenomis"))
prometis.mae <- inspecting(prometis.mae,
figure.c = "none",
report.c = "none")
testthat::expect_equivalent(colData(prometis.mae[["metabolomics"]])["s1", "hotel_pval"],
0.3350008,
tolerance = 1e-6)
})
testthat::test_that("inspecting-eset", {
sacurine.eset <- reading(system.file("extdata/W4M00001_Sacurine-statistics", package = "phenomis"),
output.c = "set")
sacurine.eset <- inspecting(sacurine.eset,
figure.c = "none",
report.c = "none")
testthat::expect_equivalent(Biobase::fData(sacurine.eset)["(2-methoxyethoxy)propanoic acid isomer", "pool_CV"],
0.3160307,
tolerance = 1e-6)
proteo.eset <- reading(system.file("extdata/prometis/proteomics", package = "phenomis"),
output.c = "set")
proteo.eset <- inspecting(proteo.eset,
figure.c = "none",
report.c = "none")
testthat::expect_equivalent(Biobase::pData(proteo.eset)["s1", "deci_pval"],
0.106289011,
tolerance = 1e-6)
})
testthat::test_that("inspecting-mset", {
prometis.mset <- reading(system.file("extdata/prometis", package = "phenomis"), output.c = "set")
prometis.mset <- inspecting(prometis.mset,
figure.c = "none",
report.c = "none")
testthat::expect_equivalent(Biobase::pData(prometis.mset)[["metabolomics"]]["s1", "hotel_pval"],
0.3350008,
tolerance = 1e-6)
})
|
c15f3bb5ac2a58db8678178da08ea01bc468911a
|
a05cd3990215b3b6213d0feee3c928ec9fd19aaa
|
/tests/testthat.R
|
29b3ce8929e321a5e1866f66eabdfef8345370a8
|
[] |
no_license
|
cran/rfinterval
|
e215d6af113f8350bc74878d31404b4fa9907303
|
ad2d8758c179a2bd068e8e1c764ca5881bf7126a
|
refs/heads/master
| 2020-12-22T17:58:21.738122
| 2019-07-18T15:40:04
| 2019-07-18T15:40:04
| 236,881,683
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 962
|
r
|
testthat.R
|
#context("Coverage")
library(testthat)
library(rfinterval)
#test_check("rfinterval")
BeijingPM25 <- BeijingPM25[sample.int(n=nrow(BeijingPM25), nrow(BeijingPM25)), ]
#devtools::use_data(BeijingPM25, overwrite = TRUE)
output <- rfinterval(pm2.5~.,
train_data = BeijingPM25[1:1000, ],
test_data = BeijingPM25[1001:2000, ],
method = c("oob", "split-conformal", "quantreg"),
symmetry = TRUE,
seed = 2019,
alpha = 0.1)
y <- BeijingPM25[1001:2000, "pm2.5"]
oob_coverage <- mean(output$oob_interval$lo < y & output$oob_interval$up > y)
sc_coverage <- mean(output$sc_interval$lo < y & output$sc_interval$up > y)
quantreg_coverage <- mean(output$quantreg_interval$lo < y & output$quantreg_interval$up > y)
test_that("Check coverage",{
expect_true(oob_coverage>0.1)
expect_true(sc_coverage>0.1)
expect_true(quantreg_coverage>0.1)
})
|
d6055571bcd5f25551280e78ecd9b99a7122e46a
|
1cacac56c2f368f02c814966086f22d0c9ef734d
|
/man/tokens_dutchclauses.Rd
|
7a98487f379e29bc4fb3d0747dfd06b6e5073c52
|
[] |
no_license
|
vanatteveldt/rsyntax
|
5dff4da5c05acd4925aab083c103683a922fda99
|
531f864da09aedaed88cc385b16eae05167b069a
|
refs/heads/master
| 2022-06-22T09:46:21.871170
| 2022-06-06T13:00:41
| 2022-06-06T13:00:41
| 44,437,830
| 32
| 7
| null | null | null | null |
UTF-8
|
R
| false
| true
| 322
|
rd
|
tokens_dutchclauses.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.r
\docType{data}
\name{tokens_dutchclauses}
\alias{tokens_dutchclauses}
\title{Example tokens for Dutch clauses}
\format{
data.frame
}
\usage{
data(tokens_dutchclauses)
}
\description{
Example tokens for Dutch clauses
}
\keyword{datasets}
|
c78bb549ebcc735f18ba888fd8fa39de72aa7a81
|
a6f39c13bb49c330337cf24a9a502326fa6d8199
|
/granule-functions/findNearestMovers3.R
|
51fc087729992b8284a4f4f3bf2f6a00a720d031
|
[] |
no_license
|
apadr007/GranMod
|
5779448e91f4fa0466bb6e1b1433246e656c3690
|
1cb0dd96cf20a66d0789d3ee341d4397bc08886a
|
refs/heads/master
| 2020-04-03T07:10:16.879088
| 2017-06-01T21:41:01
| 2017-06-01T21:41:01
| 46,621,577
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 726
|
r
|
findNearestMovers3.R
|
findNearestMovers3 = function(t){
x=matrix(); x2=list()
y = matrix(); y2 = list()
for (i in 1:nrow(t)){
for (j in 1:nrow(t)){
if (abs(t[i,1] - t[j,1]) <= 0.4) {
x[j] = j
} else { x[j] = NA }
if (abs(t[i,2] - t[j,2]) <= 0.4) {
y[j] = j
} else { y[j] = NA }
}
x2[[i]] = x
y2[[i]] = y
}
for (i in 1:length(x2)){
x2[[i]] = x2[[i]][!x2[[i]] == i]
y2[[i]] = y2[[i]][!y2[[i]] == i]
}
x2 = lapply(x2, function(x) x[!is.na(x)])
y2 = lapply(y2, function(x) x[!is.na(x)])
#this intersects the x and y axis to find factors that are close in both axes
z = list()
for (i in 1:length(x2)){
z[[i]] = intersect(unlist(x2[[i]]), unlist(y2[[i]]))
}
return(z)}
|
753a0670d2ba5dbae10b202d640b541ff5937fdd
|
2d34708b03cdf802018f17d0ba150df6772b6897
|
/googleplusDomainsv1.auto/man/Activity.object.statusForViewer.Rd
|
bbbb360fd812a6d29e02c15fdd451917f7129882
|
[
"MIT"
] |
permissive
|
GVersteeg/autoGoogleAPI
|
8b3dda19fae2f012e11b3a18a330a4d0da474921
|
f4850822230ef2f5552c9a5f42e397d9ae027a18
|
refs/heads/master
| 2020-09-28T20:20:58.023495
| 2017-03-05T19:50:39
| 2017-03-05T19:50:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,177
|
rd
|
Activity.object.statusForViewer.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plusDomains_objects.R
\name{Activity.object.statusForViewer}
\alias{Activity.object.statusForViewer}
\title{Activity.object.statusForViewer Object}
\usage{
Activity.object.statusForViewer(canComment = NULL, canPlusone = NULL,
canUpdate = NULL, isPlusOned = NULL, resharingDisabled = NULL)
}
\arguments{
\item{canComment}{Whether the viewer can comment on the activity}
\item{canPlusone}{Whether the viewer can +1 the activity}
\item{canUpdate}{Whether the viewer can edit or delete the activity}
\item{isPlusOned}{Whether the viewer has +1'd the activity}
\item{resharingDisabled}{Whether reshares are disabled for the activity}
}
\value{
Activity.object.statusForViewer object
}
\description{
Activity.object.statusForViewer Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Status of the activity as seen by the viewer.
}
\seealso{
Other Activity functions: \code{\link{Activity.actor.clientSpecificActorInfo.youtubeActorInfo}},
\code{\link{Activity.actor.clientSpecificActorInfo}},
\code{\link{Activity.actor.image}},
\code{\link{Activity.actor.name}},
\code{\link{Activity.actor.verification}},
\code{\link{Activity.actor}},
\code{\link{Activity.object.actor.clientSpecificActorInfo.youtubeActorInfo}},
\code{\link{Activity.object.actor.clientSpecificActorInfo}},
\code{\link{Activity.object.actor.image}},
\code{\link{Activity.object.actor.verification}},
\code{\link{Activity.object.actor}},
\code{\link{Activity.object.attachments.embed}},
\code{\link{Activity.object.attachments.fullImage}},
\code{\link{Activity.object.attachments.image}},
\code{\link{Activity.object.attachments.previewThumbnails}},
\code{\link{Activity.object.attachments.thumbnails.image}},
\code{\link{Activity.object.attachments.thumbnails}},
\code{\link{Activity.object.attachments}},
\code{\link{Activity.object.plusoners}},
\code{\link{Activity.object.replies}},
\code{\link{Activity.object.resharers}},
\code{\link{Activity.object}},
\code{\link{Activity.provider}}, \code{\link{Activity}},
\code{\link{activities.insert}}
}
|
19c0ad92e18f495310e733693c3234d699fea9d5
|
7f83d592c5e502a6675aedb199708856fda304c9
|
/R/trunckGUI.R
|
01c9e0ce48b791a32a06b584a510021de2db3f0b
|
[] |
no_license
|
cran/StatFingerprints
|
f3dd923caacc8b73a27ee9eed6bcfee991a48f33
|
7c31db057c4cc796f3951eaace3854413b2bd2aa
|
refs/heads/master
| 2020-06-04T10:33:56.131798
| 2010-05-26T00:00:00
| 2010-05-26T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,827
|
r
|
trunckGUI.R
|
trunckGUI <-
function()
{
checkprofile()
if(sum(mat.align)==length(mat.align))
{
tkmessageBox(message="Attention: profiles are not aligned")
stop()
}
if(sum(mat.baseline)==length(mat.baseline))
{
tkmessageBox(message="Attention: baseline must be proceed before range to be efficient")
stop()
}
if(sum(mat.align)!=length(mat.align)) m<-mat.align
tt <- tktoplevel()
tkwm.title(tt,"Define the range of all fingerprint profiles")
tkgrid(tklabel(tt,text=" "))
define.range<-function()
{
m<-trunck(mat=m)
rownames(m)<-rownames(mat.baseline)
mat.range<-m
mat.range<<-mat.range
mat.analyse<-m
mat.analyse<<-mat.analyse
tkmessageBox(message="Range of the fingerprint profiles successfully defined")
dev.off()
tkdestroy(tt)
}
help.define.range<-function()
{
ter <- tktoplevel()
tkgrid(tklabel(ter,text=""))
tkwm.title(ter,"Define the range of the profiles")
zzz<-file.path(paste(.libPaths(), "/StatFingerprints/range.GIF",sep=""))
icnn<-tkimage.create("photo", file = zzz)
tcltklab <- tklabel(ter, image = icnn)
tkgrid(tcltklab)
tkgrid(tklabel(ter,text=""))
tkgrid(tkbutton(ter,text="Cancel",command=function() tkdestroy(ter)))
tkgrid(tklabel(ter,text=""))
}
t1<-tkframe(tt)
b1<-tkbutton(t1,text="Define range of the fingerpint profiles",command=define.range)
b2<-tkbutton(t1,text="Cancel",command=function() tkdestroy(tt))
b3<-tkbutton(t1,text="Help picture",command=help.define.range)
tkpack(b1,b2,b3,side="left")
tkgrid(t1)
tkgrid(tklabel(tt,text=" "))
tkfocus(tt)
}
|
202715de63da05ef85ed48760f3ea09cd1951ae3
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/shiny/examples/exprToFunction.Rd.R
|
3b9e6d1a09fb87af1185d17a5e30c69baae02813
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 859
|
r
|
exprToFunction.Rd.R
|
library(shiny)
### Name: exprToFunction
### Title: Convert an expression to a function
### Aliases: exprToFunction
### ** Examples
# Example of a new renderer, similar to renderText
# This is something that toolkit authors will do
renderTriple <- function(expr, env=parent.frame(), quoted=FALSE) {
# Convert expr to a function
func <- shiny::exprToFunction(expr, env, quoted)
function() {
value <- func()
paste(rep(value, 3), collapse=", ")
}
}
# Example of using the renderer.
# This is something that app authors will do.
values <- reactiveValues(A="text")
## Not run:
##D # Create an output object
##D output$tripleA <- renderTriple({
##D values$A
##D })
## End(Not run)
# At the R console, you can experiment with the renderer using isolate()
tripleA <- renderTriple({
values$A
})
isolate(tripleA())
# "text, text, text"
|
686304b754d14b4eca70ce6bf505a436cccb138f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/SoyNAM/examples/BLUP.Rd.R
|
140d6b5c82cca493c9095da984869479b67e4b73
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 160
|
r
|
BLUP.Rd.R
|
library(SoyNAM)
### Name: BLUP
### Title: Best Linear Unbias Predictor
### Aliases: BLUP ENV
### ** Examples
Test=BLUP(trait="yield",family=2:3,env=1:2)
|
7815d2bbd00c527d583f2e4a09779f52dd751cef
|
1d0a2f1495a9a9c7e4d31ee1a99de93aee360ac7
|
/tests/testthat/test_dynamics.R
|
0694b088765844f88b81be17c36cadb8d0f7ec55
|
[] |
no_license
|
cran/dfvad
|
3e47cb142308135cec2d64796df8e8d4b7438469
|
2cff6b6e48e1e10521a295670819cea2fc40c876
|
refs/heads/master
| 2021-10-27T23:53:00.254621
| 2021-10-15T08:30:02
| 2021-10-15T08:30:02
| 245,387,739
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 459
|
r
|
test_dynamics.R
|
context("Testing firm dynamics")
test_that("Firm dynamics with different methods", {
list_test <- readRDS(system.file("extdata", "test_dynamics.rds", package = "dfvad"))
df <- list_test[[1]]
for (i in c("bhc", "gr", "fhk", "bg", "df", "mp")) {
dym <- dynamics(df, "tfp", "share", "firm", "period", typ = i)
i <- toupper(i)
expect_equal(dym, list_test[[2]][[i]])
}
})
|
a6c8c2cdb082e8323069deccb136b24894c323df
|
02aa5b132bc995fcb21a00f403c432b2b6b82e76
|
/man/associations.Rd
|
3e3ebcb152fdfd7d3856a1eb4a5d63863986911e
|
[] |
no_license
|
ropensci/rusda
|
ea9257c1705e57392c4595f6f737418360abe6dc
|
fb492630f040a9e355b28606179a0863f753cc74
|
refs/heads/master
| 2021-06-19T06:57:26.450830
| 2021-01-28T13:31:15
| 2021-01-28T13:31:15
| 40,041,385
| 15
| 6
| null | 2021-01-28T13:31:16
| 2015-08-01T09:07:41
|
R
|
UTF-8
|
R
| false
| true
| 3,182
|
rd
|
associations.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/associations.R
\name{associations}
\alias{associations}
\title{Downloads associations for input species from SMML Fungus-Host DB}
\usage{
associations(x, database = c("FH", "SP", "both"),
spec_type = c("plant", "fungus"), clean = TRUE, syn_include = TRUE,
process = TRUE, db = "ncbi")
}
\arguments{
\item{x}{a vector of class \code{character} containing fungal or plant species names or a genus name (see Details)}
\item{database}{a character string specifying the databases that should be queried. Valid are
\code{"FH"} (Fungus-Host Distributions), \code{"SP"} (Specimens) or \code{"both"} databases}
\item{spec_type}{a character string specifying the type of \code{x}.
Can be either \code{"plant"} or \code{"fungus"}}
\item{clean}{logical, if \code{TRUE} a cleaning step is run of the resulting associations list}
\item{syn_include}{logical, if \code{TRUE} associations for synonyms are searched and added. For a
complete synonyms list check \code{rusda::synonyms}}
\item{process}{logical, if \code{TRUE} downloading and extraction process is displayed}
\item{db}{if x is higher than species level, all species for the higher taxon are retrived using the function taxize::downstream. Here one of ITIS (itis), Catalogue of Life (col), GBIF (gbif), or NCBI (ncbi) has to be selected. NCBI is default.}
}
\value{
an object of class \code{list}.
First is synonyms, second is associations. Synonmys is a
vector of mode \code{list} with synonyms for \code{x}. Notice: This is not a
complete list of synonym data in the database. This is the list of synonyms that contain data for
the input \code{x}. For a complete synonyms list check \code{rusda::synonyms} or (if needed) for fungi R package rmycobank.
Associations is a vector of mode \code{list} of associations for \code{x}
}
\description{
Searches and downloads associations from SMML Fungus-Hosts Distributions and Specimens database
for fungus or plant species input vector
}
\details{
The Fungus-Hosts distributions database 'FH' comprises data compiled from Literature. In
the uncleaned output all kinds of unspecified substrates are documented like "submerged wood".
Cleanded data displayes Linnean names only and species names with either "subsp.","f. sp." "f.",
"var.". The Specimens database comprises entries from field collections.
If genera names are supplied, then species are derived from the NCBI taxonomy.
}
\examples{
\dontrun{
## Example for species name(s) as input
x <- "Fagus sylvatica"
pathogens <- associations(x, database = "both", clean = TRUE, syn_include = TRUE,
spec_type = "plant", process = TRUE)
x <- "Rosellinia ligniaria"
hosts <- associations(x, database = "both", clean = TRUE, syn_include = TRUE,
spec_type = "fungus", process = TRUE)
is.element("Rosellinia ligniaria", pathogens$association[[1]])
is.element("Fagus sylvatica", hosts$association[[1]])
## Example for genus/genera name(s) as input
x <- "Zehneria"
# or
x <- c("Zehneria", "Momordica")
hosts <- associations(x, database = "both", clean = TRUE, syn_include = TRUE,
spec_type = "plant", process = TRUE)
}
}
\author{
Franz-Sebastian Krah
}
|
f0517e8fe258a1d39b4cae7c03d03cfb7be1e1db
|
0f104ea64886750d6c5f7051810b4ee39fa91ba9
|
/inst/test-data/project-survey/expected/default.R
|
460731323113bac284a2400cb2a83e8058c34cbb
|
[
"MIT"
] |
permissive
|
OuhscBbmc/REDCapR
|
3ca0c106e93b14d55e2c3e678f7178f0e925a83a
|
34f2154852fb52fb99bccd8e8295df8171eb1c18
|
refs/heads/main
| 2023-07-24T02:44:12.211484
| 2023-07-15T23:03:31
| 2023-07-15T23:03:31
| 14,738,204
| 108
| 43
|
NOASSERTION
| 2023-09-04T23:07:30
| 2013-11-27T05:27:58
|
R
|
UTF-8
|
R
| false
| false
| 1,239
|
r
|
default.R
|
structure(list(participant_id = c(1, 2), redcap_survey_identifier = c(NA,
NA), prescreening_survey_timestamp = structure(c(1520351563,
1520351595), class = c("POSIXct", "POSIXt"), tzone = "UTC"),
dob = structure(c(17596, 17595), class = "Date"), email = c("aaa@bbb.com",
"ccc@ddd.com"), has_diabetes = c(1, 0), consent___1 = c(1,
0), prescreening_survey_complete = c(2, 2), participant_info_survey_timestamp = c(NA,
NA), first_name = c(NA, NA), last_name = c(NA, NA), address = c(NA,
NA), telephone_1 = c(NA, NA), ethnicity = c(NA, NA), race = c(NA,
NA), sex = c(NA, NA), height = c(NA, NA), weight = c(NA,
NA), participant_info_survey_complete = c(0, 0), participant_morale_questionnaire_timestamp = c("[not completed]",
NA), pmq1 = c(NA, NA), pmq2 = c(NA, NA), pmq3 = c(NA, NA),
pmq4 = c(NA, NA), participant_morale_questionnaire_complete = c(0,
0), complete_study = c(NA, NA), withdraw_date = c(NA, NA),
withdraw_reason = c(NA, NA), date_visit_4 = c(NA, NA), discharge_date_4 = c(NA,
NA), discharge_summary_4 = c(NA, NA), study_comments = c(NA,
NA), completion_data_complete = c(0, 0)), row.names = c(NA,
-2L), class = c("spec_tbl_df", "tbl_df", "tbl", "data.frame"))
|
6a1cbca04a6df4faf6a542fb291b2dddd02886ef
|
a588dd1a34555dd71c898c82fbc7016dcc9cbdb3
|
/DepressionModels/R/main.R
|
364ee20d4694473870676f310aaf9c8ca8149283
|
[] |
no_license
|
NEONKID/StudyProtocolSandbox
|
5e9b0d66d88a610a3c5cacb6809c900a36bc35c3
|
c26bd337da32c6eca3e5179c78ac5c8f91675c0f
|
refs/heads/master
| 2020-03-23T14:02:11.887983
| 2018-10-19T05:33:13
| 2018-10-19T05:33:13
| 141,651,747
| 0
| 1
| null | 2018-07-20T02:10:06
| 2018-07-20T02:10:06
| null |
UTF-8
|
R
| false
| false
| 1,018
|
r
|
main.R
|
main <- function(){
library(DatabaseConnector)
library(PatientLevelPrediction)
library(DepressionModels)
options('fftempdir' = 's:/fftemp')
connectionDetails <- createConnectionDetails(dbms = "pdw",
server = Sys.getenv('server'),
port = Sys.getenv('port'),
user = NULL,
password = NULL)
cdmDatabaseSchema <- Sys.getenv('mdcr')
targetDatabaseSchema <- 'scratch.dbo'
outputDir <- 'S:/externVal'
# 1) first create the data in the data cdm_database
plpData <- extractData(connectionDetails,
cdmDatabaseSchema,
targetDatabaseSchema,
targetCohortTable = 'extValCohort',
targetCohortId=1, outcomeCohortId = 2:22)
# 2) apply each of the outcome id 2 models
applyModel(plpData, outcomeCohortId=2, outputDir=file.path(getwd(), 'externalValidation'))
}
|
6c5d087c43ab6bc4fcd7e2e697d2afd2d3168e24
|
dfc54f1755f9eea3037f93b40a9aec8eeed57783
|
/workout2/saving-investment-simulation/app.R
|
1635c6a198100bd465a7d1371dd782c40c67c0a5
|
[] |
no_license
|
stat133-sp19/hw-stat133-mingyueyang
|
eea107ce497f1847a98663b14e9610643072d042
|
09dc2477cc3ec20e2f43f4e05464dd0ed374be9d
|
refs/heads/master
| 2020-04-28T08:04:20.557614
| 2019-05-03T23:42:20
| 2019-05-03T23:42:20
| 175,114,010
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,837
|
r
|
app.R
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(ggplot2)
library(dplyr)
# Define UI for application
ui <- fluidPage(
# Application title
titlePanel("Savings-Investment Simulation"),
# Widgets
fluidRow(
column(4,
sliderInput("initial",
label = "Initial Amount",
min = 0,
max = 100000,step = 500,pre = "$",
value = 1000)),
column(4,
sliderInput("return",
label = "Return Rate(in %)",
min = 0,
max = 20,
step = 0.1,
value = 5)),
column(4,
sliderInput("years",
label = "Years",
min = 0,
max = 50,step = 1,
value = 20))),
fluidRow(
column(4,
sliderInput("contribution",
label = "Annual Contribution",pre = "$",
min = 0,
max = 50000,step = 500,
value = 2000)),
column(4,
sliderInput("growth",
label = "Growth Rate(in %)",
min = 0,
max = 20,step = 0.1,
value = 2)),
column(4,
selectInput("facet",
label = "Facet?",
choices = c("YES","NO"),selected = "NO"))
),
hr(),
fluidRow(h4("Timelines"),
plotOutput("linePlot")),
br(),
fluidRow(h4("Balances"),
verbatimTextOutput("balances"))
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$linePlot <- renderPlot({
future_value <- function(amount=1000,rate=0.05,years=1){
fv <- amount*(1+rate)^(years)
return(fv)}
annuity <- function(contrib=200,rate=0.05,years=1){
fva <- contrib*(((1+rate)^(years)-1)/rate)
return(fva)}
growing_annuity <- function(contrib=200,rate=0.05,growth=0.03,years=1){
fvga <- contrib*(((1+rate)^(years)-(1+growth)^(years))/(rate-growth))
return(fvga)}
ini <- input$initial
contri <- input$contribution
r <- (input$return)/100
g <- (input$growth)/100
yr <- input$years
mode1 <- rep(0,yr+1)
mode2 <- rep(0,yr+1)
mode3 <- rep(0,yr+1)
for(i in 1:yr){
mode1[i+1] <- future_value(amount = ini,rate = r, years = i)
mode2[i+1] <- future_value(amount = ini,rate = r, years = i)+annuity(contrib = contri,rate=r,years=i)
mode3[i+1] <- future_value(amount = ini,rate=r,years=i)+growing_annuity(contrib = contri,rate = r,growth = g,years = i)
mode1[1] <- ini
mode2[1] <- ini
mode3[1] <- ini
}
year <- c(0:yr)
modalities <- data.frame(year=year,no_contrib=mode1,fixed_contrib=mode2,growing_contrib=mode3)
investment_mode <- c(rep("no_contrib",yr+1),rep("fixed_contrib",yr+1),rep("growing_contrib",yr+1))
investment_mode <- as.factor(investment_mode)
facet1 <- data.frame(year=year,bal=mode1)
facet2 <- data.frame(year=year,bal=mode2)
facet3 <- data.frame(year=year,bal=mode3)
facett <- rbind.data.frame(facet1,facet2,facet3)
facettotal <- cbind.data.frame(investment_mode,facett)
facet <- input$facet
if(facet == "NO"){
ggplot(data=modalities,aes(x=year))+
geom_line(aes(y=no_contrib,color='no_contrib'),size=0.8)+
geom_line(aes(y=fixed_contrib,color='fixed_contrib'),size=0.8)+
geom_line(aes(y=growing_contrib,color='growing_contrib'),size=0.8)+
labs(title="Annual Balances under Three Investment Modes",x="Year",y="Annual Balance(in dollars)",color="Investment Modes")+
scale_colour_manual("", breaks = c("no_contrib", "fixed_contrib", "growing_contrib"),values = c("#7A3225", "#7ADFCC","#2174F0"))}
else{
ggplot(data=facettotal,aes(x=year))+
geom_line(aes(y=bal,col=investment_mode),size=0.8)+geom_area(aes(y=bal,fill=investment_mode),alpha=0.4)+
labs(title="Annual Balances under Three Investment Modes",x="Year",y="Annual Balance(in dollars)")+facet_grid(~ investment_mode)
}
})
output$balances <- renderPrint({
future_value <- function(amount=1000,rate=0.05,years=1){
fv <- amount*(1+rate)^(years)
return(fv)}
annuity <- function(contrib=200,rate=0.05,years=1){
fva <- contrib*(((1+rate)^(years)-1)/rate)
return(fva)}
growing_annuity <- function(contrib=200,rate=0.05,growth=0.03,years=1){
fvga <- contrib*(((1+rate)^(years)-(1+growth)^(years))/(rate-growth))
return(fvga)}
ini <- input$initial
contri <- input$contribution
r <- (input$return)/100
g <- (input$growth)/100
yr <- input$years
mode1 <- rep(0,yr+1)
mode2 <- rep(0,yr+1)
mode3 <- rep(0,yr+1)
for(i in 1:yr){
mode1[i+1] <- future_value(amount = ini,rate = r, years = i)
mode2[i+1] <- future_value(amount = ini,rate = r, years = i)+annuity(contrib = contri,rate=r,years=i)
mode3[i+1] <- future_value(amount = ini,rate=r,years=i)+growing_annuity(contrib = contri,rate = r,growth = g,years = i)
mode1[1] <- ini
mode2[1] <- ini
mode3[1] <- ini
}
year <- c(0:yr)
modalities <- data.frame(year=year,no_contrib=mode1,fixed_contrib=mode2,growing_contrib=mode3)
modalities
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
0bc9d714eedc0aea2ecb3a9991bc0e9fe1cade1a
|
e1f9dbb834bc550b243e325f7b8eb639d3d33c56
|
/man/fetchFX.Rd
|
57a3174f9b6045f68b63bd0ec82e94de2b8a78be
|
[] |
no_license
|
enricoschumann/pacificFX
|
55a543908e7732e965880ecb2271112ffe5b0423
|
c26b1b607c0c15016867648b62e18f0c7ac85f2b
|
refs/heads/master
| 2021-05-04T11:47:09.881858
| 2019-02-24T07:18:08
| 2019-02-24T07:18:08
| 52,907,420
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,019
|
rd
|
fetchFX.Rd
|
\name{fetchFX}
\alias{fetchFX}
\title{
Download historical FX data
}
\description{
Download historical FX data from the PACIFIC Exchange Rate
Service, run by Werner Antweiler at Sauder School of Business,
University of British Columbia. Visit the homepage
\url{http://fx.sauder.ubc.ca/} to learn more.
}
\usage{
fetchFX(baseCurrency = "USD", targetCurrencies = c("EUR", "JPY"),
startDate = "2010-01-01",endDate,
dataFrequency = "daily", notation = "volume")
}
\arguments{
\item{baseCurrency}{
base currency (eg, \code{"EUR"})
}
\item{targetCurrencies}{
target currency; can be a vector like \code{c("CHF", "USD")}
}
\item{startDate}{
a length-one vector of class \code{Date}, or a length-one vector of
class \code{character} in \acronym{ISO} 8601 format (ie,
\acronym{YYYY-MM-DD})
}
\item{endDate}{
a length-one vector of class \code{Date}, or a length-one vector of
class \code{character} in \acronym{ISO} 8601 format (ie,
\acronym{YYYY-MM-DD}). If missing, the current date is used.
}
\item{dataFrequency}{a length-one character vector: either
\code{"daily"}, \code{"weekly"} or \code{"monthly"}
}
\item{notation}{
a length-one character vector: either \code{"volume"} or \code{"price"}}
}
\details{
\emph{The database is for academic purposes only.} Please see the
\acronym{FAQ} at \url{http://fx.sauder.ubc.ca/FAQ.html} for the terms of use.
There are two types of quoting convention, set through the argument
\code{notation}. Volume notation: how many units of the
\code{targetCurrency} do I have to give for one unit of the
\code{baseCurrency}? Price notation: how many units of the
\code{baseCurrency} do I have to give for one unit of the
\code{targetCurrency}?
The web interface to the database restricts daily and weekly data to
no more four calendar years per download. \code{fetchFX} will
automatically loop in such cases (but will add a delay of one second
in each iteration).
}
\note{
In the FX markets, currencies are typically quoted as \code{FX1FX2},
which reads \sQuote{How many units of \code{FX2} do I have to give to
get one unit of \code{FX1}?} For instance, \code{EURUSD} means
\sQuote{how many dollars do I have to give for one euro?} This is
\strong{not} how currencies are labelled in the PACIFIC Exchange Rate
Service.
}
\value{
The function returns a data.frame. Column \code{"Jul.Dates"} contains
the Julian dates (see \url{http://fx.sauder.ubc.ca/julian.html}) in
\code{numeric} form; column \code{"Dates"} the calendar dates (class
\code{Date}); the following columns contain the exchange rates.
}
\author{
Enrico Schumann
Maintainer: Enrico Schumann <es@enricoschumann.net>
}
\examples{
\dontrun{
x <- fetchFX("USD", targetCurrencies = c("EUR", "AUD"),
startDate = as.Date("2006-01-01"),
endDate = as.Date("2011-10-01"),
dataFrequency = "weekly",
notation = "price")
head(x, 3L)
tail(x, 3L)}
}
|
d8e63a3bc5d52c93f4ad8764940d5f1b728b8928
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/8641_1/rinput.R
|
169d7a3bb9878b007c8e1c819510bf865be02e2d
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("8641_1.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="8641_1_unrooted.txt")
|
b3ef58380f211a423095bce5ed6695fe216c6c16
|
30d2ed023fed988d04dbb83e66edba3df96dad74
|
/dashboard/dash_header.R
|
89ac8a985f95898a663d45cda3a37618750fd537
|
[
"MIT"
] |
permissive
|
arestrom/Chehalis
|
7949a449a0e4ec603b98db72b9fbdefd0c39529a
|
c4d8bfd5c56c2b0b4b58eee3af7eb1a6b47b2695
|
refs/heads/master
| 2023-05-31T05:58:51.247660
| 2021-06-29T17:26:19
| 2021-06-29T17:26:19
| 295,434,189
| 0
| 0
|
MIT
| 2021-05-26T22:11:54
| 2020-09-14T14:03:02
|
HTML
|
UTF-8
|
R
| false
| false
| 325
|
r
|
dash_header.R
|
#=============================================================
# ShinyDashboardPlus header function
#=============================================================
dash_header = dashboardHeader(
fixed = TRUE,
title = tagList(
span(class = "logo-lg", "Chehalis Basin data"),
img(src = "ShinyDashboardPlus.svg"))
)
|
f98a03cad70e86f15e1c4a7ee23756a92118a014
|
359cbcaf78f3d70062610f13e01ff49b7bf202de
|
/[R] lab13.R
|
cdf717dff164bc5722aa5852ad41ae8561686a3d
|
[] |
no_license
|
haoingg/analysis
|
8bb4b776b8fe95cdc5dd0acacc2a08d752b2584a
|
a1be341a77ba9ee859816d2b5d9cdb8c966f5c92
|
refs/heads/master
| 2023-06-02T06:09:02.956848
| 2021-06-25T14:07:53
| 2021-06-25T14:07:53
| 355,138,994
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 885
|
r
|
[R] lab13.R
|
library(wordcloud2)
library(KoNLP)
data <- readLines("output/yes24.txt", encoding='UTF-8'); data
txt <- extractNoun(data); txt
#undata <- gsub("[[:punct:]]","",txt)
undata <- unlist(txt)
undata2 <- gsub("^[가-힣]","", txt)
text <- Filter(function(x){nchar(x) >= 2}, undata2)
#text <- Filter(function(x){nchar(x) >= 2|nchar(x) <= 4}, undata)
ytext <- nchar(text)
ysort <- head(sort(ytext, decreasing=T))
data <- data.frame(ytext, ysort)
result <- wordcloud2(data = demoFreq)
htmltools::save_html(result,"output/yes24.html")
# 정답
data <- readLines("output/yes24.txt", encoding='UTF-8')
data2 <- unlist(extractNoun(data))
data3 <- gsub("^[가-힣]","", data2)
data4 <- Filter(function(x){nchar(x) >= 2 & nchar(x) <= 4}, data3)
data5 <- table(data4)
data6 <- sort(data5, decreasing = T)
yes24 <- data.frame(data6)
result <- wordcloud2(data = yes24, fontFamily = '휴먼옛체')
|
572f2ffcc71b1ea11189593df7114aad38d903ba
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/phrasemachine/examples/coarsen_POS_tags.Rd.R
|
27925b78532e3fb84b73e1f56d0d3be81fc9c9df
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 196
|
r
|
coarsen_POS_tags.Rd.R
|
library(phrasemachine)
### Name: coarsen_POS_tags
### Title: Coarsen POS tags
### Aliases: coarsen_POS_tags
### ** Examples
pos_tags <- c("VB", "JJ", "NN", "NN")
coarsen_POS_tags(pos_tags)
|
14f2af86530272e58004807e02494e98acc84150
|
eb5d6c88a8ca82b8d78fc55da93734cbd09f231b
|
/man/splitIntoStatements.Rd
|
ab11673ba0a2753d739a1ae6cda4ec94046cc7a9
|
[] |
no_license
|
uhjish/sasMap
|
79b9675e648c9dbe93423552a7d8d2d2fba0f3e3
|
769f28467fad3d0c4b3d51c6dde7389366d2b494
|
refs/heads/master
| 2021-01-19T20:36:50.691431
| 2017-08-20T21:18:00
| 2017-08-20T21:18:00
| 101,232,842
| 1
| 0
| null | 2017-08-23T23:19:06
| 2017-08-23T23:19:06
| null |
UTF-8
|
R
| false
| true
| 492
|
rd
|
splitIntoStatements.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility_functions.R
\name{splitIntoStatements}
\alias{splitIntoStatements}
\title{Split SAS code into statements}
\usage{
splitIntoStatements(sasCode)
}
\arguments{
\item{sasCode}{Character string containing SAS code}
}
\description{
Split SAS code into statements
}
\examples{
sasPath <- system.file('examples/SAScode/Macros/fun2.SAS', package='sasMap')
sasCode <- loadSAS(sasPath)
splitIntoStatements(sasCode)
}
|
782722fc52b58e1f09b168c038722892fd59c7d8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Matrix/examples/all-methods.Rd.R
|
0d28db0dfdbaa84cdd2d1abdcb0528586000d296
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 984
|
r
|
all-methods.Rd.R
|
library(Matrix)
### Name: all-methods
### Title: "Matrix" Methods for Functions all() and any()
### Aliases: all-methods all,Matrix-method any,Matrix-method
### all,ldenseMatrix-method all,lsparseMatrix-method all,lsyMatrix-method
### any,lMatrix-method
### Keywords: methods
### ** Examples
M <- Matrix(1:12 +0, 3,4)
all(M >= 1) # TRUE
any(M < 0 ) # FALSE
MN <- M; MN[2,3] <- NA; MN
all(MN >= 0) # NA
any(MN < 0) # NA
any(MN < 0, na.rm = TRUE) # -> FALSE
## Don't show:
sM <- as(MN, "sparseMatrix")
stopifnot(all(M >= 1), !any(M < 0),
all.equal((sM >= 1), as(MN >= 1, "sparseMatrix")),
## MN:
any(MN < 2), !all(MN < 5),
is.na(all(MN >= 0)), is.na(any(MN < 0)),
all(MN >= 0, na.rm=TRUE), !any(MN < 0, na.rm=TRUE),
## same for sM :
any(sM < 2), !all(sM < 5),
is.na(all(sM >= 0)), is.na(any(sM < 0)),
all(sM >= 0, na.rm=TRUE), !any(sM < 0, na.rm=TRUE)
)
## End(Don't show)
|
70cff931ebf6fa812cdab8ea87c36d057082d985
|
6a28ba69be875841ddc9e71ca6af5956110efcb2
|
/Introduction_To_Mathematical_Statistics_by_Robert_V_Hogg_Joseph_W_Mckean_Allen_T_Craig/CH4/EX4.10.1/Ex4_10_1.R
|
73811731069338fcb7d0114c9dadca18212753b5
|
[] |
permissive
|
FOSSEE/R_TBC_Uploads
|
1ea929010b46babb1842b3efe0ed34be0deea3c0
|
8ab94daf80307aee399c246682cb79ccf6e9c282
|
refs/heads/master
| 2023-04-15T04:36:13.331525
| 2023-03-15T18:39:42
| 2023-03-15T18:39:42
| 212,745,783
| 0
| 3
|
MIT
| 2019-10-04T06:57:33
| 2019-10-04T05:57:19
| null |
UTF-8
|
R
| false
| false
| 121
|
r
|
Ex4_10_1.R
|
#Page no 287
f<-function(v)
{
(30*(v^4))*(1-v)
}
i<-integrate(f,lower=0,upper=0.8)
ans<-1-i$value
round(ans,2)
|
690ffb7b1abd3ebc3042f5f2459202e6ee85f4af
|
c08e6b516a3d341d1fdb893448922082dc3626cf
|
/R/dimsum__filter_reads.R
|
d60c5a6279db04143d0f96507017fbcb247cef50
|
[
"MIT"
] |
permissive
|
lehner-lab/DiMSum
|
eda57459bbb450ae52f15adc95d088747d010251
|
ca1e50449f1d39712e350f38836dc3598ce8e712
|
refs/heads/master
| 2023-08-10T17:20:39.324026
| 2023-07-20T15:29:47
| 2023-07-20T15:29:47
| 58,115,412
| 18
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,672
|
r
|
dimsum__filter_reads.R
|
#' dimsum__filter_reads
#'
#' Concatenate reads (with or without reverse complementing second read in pair).
#'
#' @param input_FASTQ Path to input FASTQ file (required)
#' @param input_REPORT Path to input report file (required)
#' @param output_FASTQ Path to output FASTQ file (required)
#' @param output_REPORT Path to output report file (required)
#' @param min_qual Minimum observed base quality to retain read pair (required)
#'
#' @return Nothing
#' @export
dimsum__filter_reads <- function(
input_FASTQ,
input_REPORT,
output_FASTQ,
output_REPORT,
min_qual
){
#Alignment statistics
a_stats <- list()
a_stats[['Pairs']] <- 0 #from vsearch report
a_stats[['Merged']] <- 0
a_stats[['Too_short']] <- 0 #from vsearch report
a_stats[['No_alignment_found']] <- 0 #from vsearch report
a_stats[['Too_many_diffs']] <- 0 #from vsearch report
a_stats[['Overlap_too_short']] <- 0 #from vsearch report
a_stats[['Exp.errs._too_high']] <- 0 #from vsearch report
a_stats[['Min_Q_too_low']] <- 0
a_stats[['merged_lengths']] <- c()
#Get vsearch results
temp_out <- readLines(input_REPORT)
a_stats[['Pairs']] <- sum(as.integer(rev(unlist(strsplit(temp_out[grep('Pairs$', temp_out)], ' ')))[3]), na.rm = T)
a_stats[['Too_short']] <- sum(as.integer(rev(unlist(strsplit(temp_out[grep('reads too short', temp_out)], ' ')))[7]), na.rm = T)
too_few_kmers <- sum(as.integer(rev(unlist(strsplit(temp_out[grep('too few kmers found on same diagonal', temp_out)], ' ')))[9]), na.rm = T)
multiple_potential <- sum(as.integer(rev(unlist(strsplit(temp_out[grep('multiple potential alignments', temp_out)], ' ')))[5]), na.rm = T)
score_too_low <- sum(as.integer(rev(unlist(strsplit(temp_out[grep('alignment score too low', temp_out)], ' ')))[11]), na.rm = T)
a_stats[['No_alignment_found']] <- sum(too_few_kmers, multiple_potential, score_too_low, na.rm = T)
a_stats[['Too_many_diffs']] <- sum(as.integer(rev(unlist(strsplit(temp_out[grep('too many differences', temp_out)], ' ')))[5]), na.rm = T)
a_stats[['Overlap_too_short']] <- sum(as.integer(rev(unlist(strsplit(temp_out[grep('overlap too short', temp_out)], ' ')))[5]), na.rm = T)
a_stats[['Exp.errs._too_high']] <- sum(as.integer(rev(unlist(strsplit(temp_out[grep('expected error too high', temp_out)], ' ')))[6]), na.rm = T)
#Process FASTQ files
initial_write <- TRUE #records written to output file already?
yield_size <- 1e6
f1 <- ShortRead::FastqStreamer(input_FASTQ, n=yield_size)
#Read input FASTQ files in chunks
while(length(fq1 <- ShortRead::yield(f1))){
#Read quality matrices
qmat1 <- as(Biostrings::quality(fq1), "matrix")
#Number of bases with qualities less than minimum specified?
non_merge_num_bases_too_low_qual <- apply(qmat1<min_qual, 1, sum, na.rm = T)
#Update statistics
a_stats[['Min_Q_too_low']] <- a_stats[['Min_Q_too_low']] + sum(non_merge_num_bases_too_low_qual!=0)
#Subset to sequences with all base qualities not less than specified
fq1 <- fq1[non_merge_num_bases_too_low_qual==0]
#Write to file
dimsum__writeFastq(shortreads = fq1, outputFile = output_FASTQ, initial_write = initial_write)
initial_write <- FALSE
#Update statistics
a_stats[['Merged']] <- a_stats[['Merged']] + length(fq1)
a_stats[['merged_lengths']] <- c(a_stats[['merged_lengths']], IRanges::width(ShortRead::sread(fq1)))
}
#Update length statistics
if(a_stats[['Merged']]!=0){
a_stats[['Merged_length_min']] <- min(a_stats[['merged_lengths']])
a_stats[['Merged_length_low']] <- as.numeric(quantile(a_stats[['merged_lengths']], 0.25))
a_stats[['Merged_length_median']] <- as.numeric(median(a_stats[['merged_lengths']]))
a_stats[['Merged_length_high']] <- as.numeric(quantile(a_stats[['merged_lengths']], 0.75))
a_stats[['Merged_length_max']] <- max(a_stats[['merged_lengths']])
}else{
a_stats[['Merged_length_min']] <- NA
a_stats[['Merged_length_low']] <- NA
a_stats[['Merged_length_median']] <- NA
a_stats[['Merged_length_high']] <- NA
a_stats[['Merged_length_max']] <- NA
}
#Report
report_list <- list()
report_list <- append(report_list, 'Merged length distribution:\n')
report_list <- append(report_list, paste0('\t ', a_stats[['Merged_length_min']], ' Min\n'))
report_list <- append(report_list, paste0('\t ', a_stats[['Merged_length_low']], ' Low quartile\n'))
report_list <- append(report_list, paste0('\t ', a_stats[['Merged_length_median']], ' Median\n'))
report_list <- append(report_list, paste0('\t ', a_stats[['Merged_length_high']], ' High quartile\n'))
report_list <- append(report_list, paste0('\t ', a_stats[['Merged_length_max']], ' Max\n\nTotals:\n'))
report_list <- append(report_list, paste0('\t ', a_stats[['Pairs']], ' Pairs\n'))
report_list <- append(report_list, paste0('\t ', a_stats[['Merged']], ' Merged\n'))
report_list <- append(report_list, paste0('\t ', a_stats[['Too_short']], ' Too short\n'))
report_list <- append(report_list, paste0('\t ', a_stats[['No_alignment_found']], ' No alignment found\n'))
report_list <- append(report_list, paste0('\t ', a_stats[['Too_many_diffs']], ' Too many diffs\n'))
report_list <- append(report_list, paste0('\t ', a_stats[['Overlap_too_short']], ' Overlap too short\n'))
report_list <- append(report_list, paste0('\t ', a_stats[['Exp.errs._too_high']], ' Exp.errs. too high\n'))
report_list <- append(report_list, paste0('\t ', a_stats[['Min_Q_too_low']], ' Min Q too low\n'))
write(paste0(unlist(report_list), collapse = ""), file = output_REPORT, sep = "")
#Delete input FASTQ file
suppressWarnings(temp_out <- file.remove(input_FASTQ))
}
|
3721a13341ebe9786a03ae1be84f3b8ad512bd51
|
1903367ccf3ffcf2dc3680fabfa58a99de58cb5e
|
/R/foci.R
|
d3e9aa2440b7dabd270ad095c4ffd9ad175b0d5b
|
[] |
no_license
|
cran/FOCI
|
f97477ce4e6bd02e7468d6d5aa3c5af31c95f505
|
8ebce7c9ce307796724fbdf020e43abf7fd75b81
|
refs/heads/master
| 2021-07-12T12:02:38.917189
| 2021-03-18T22:00:07
| 2021-03-18T22:00:07
| 236,599,263
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,556
|
r
|
foci.R
|
####################################################################
# MAIN FUNCTIONS:
# foci_main: the core function for implementing the foci algorithm
# foci: Performs feature ordering by conditional independence
####################################################################
# foci_main -------------------------------------------------------------------------
# Variable selection by the FOCI algorithm
#
# foci_main is the core function for foci, implementing the
# variable selection algorithm based on the measure of conditional dependence \code{\link{codec}}.
#
# @param X Matrix of predictors (n by p)
# @param Y Vector of responses (length n)
# @param num_features Number of variables to be selected, cannot be larger than p. The default value is NULL and in that
# case it will be set equal to p. If stop == TRUE (see below), then num_features is irrelevant.
# @param stop Stops at the first instance of negative codec, if TRUE.
# @param numCores number of cores that are going to be used for parallelizing the process.
# @details foci is a forward stepwise algorithm that uses the conditional dependence coefficient (\code{\link{codec}})
# at each step, instead of the multiple correlation coefficient
# as in ordinary forward stepwise. If stop == TRUE, the process is stopped at the first instance of
# nonpositive codec, thereby selecting subset of variables. Otherwise, a set of covariates of size
# num_features, ordered according to predictive power (as measured by codec) is produced.
# @return An object of class "foci", with three attributes a vector of selected covariates,
# their names and their cumulative dependence value with Y in decreasing order of predictive power.
# @author Mona Azadkia, Sourav Chatterjee, and Norman Matloff
# @references Azadkia, M. and Chatterjee, S. (2019). A simple measure of conditional dependence. \url{https://arxiv.org/pdf/1910.12327.pdf}
# @seealso \code{\link{codec}}
foci_main <- function(Y, X, num_features = NULL, stop = TRUE, numCores = parallel::detectCores()){
namesX <- colnames(X)
if (is.null(num_features)) num_features = dim(X)[2]
n = length(Y)
p = ncol(X)
Q = rep(0, num_features)
index_select = rep(0, num_features)
# select the first variable
if (is.null(dim(X))) {
seq_Q = .estimateQ(Y, X)
} else {
estimateQFixedY <- function(id){
return(.estimateQ(Y, X[, id]))
}
seq_Q = parallel::mclapply(seq(1, p), estimateQFixedY, mc.cores = numCores)
seq_Q = unlist(seq_Q)
}
Q[1] = max(seq_Q)
if (Q[1] <= 0 & stop == TRUE) return(NULL)
index_max = min(which(seq_Q == Q[1]))
index_select[1] = index_max
count = 1
# select rest of the variables
while (count < num_features) {
seq_Q = rep(0, p - count)
# indices that have not been selected yet
index_left = setdiff(seq(1, p), index_select[1:count])
# find the next best feature
estimateQFixedYandSubX <- function(id){
return(.estimateQ(Y, cbind(X[, c(index_select[1:count], id)])))
}
if (length(index_left) == 1) {
seq_Q = estimateQFixedYandSubX(index_left[1])
} else {
seq_Q = parallel::mclapply(index_left, estimateQFixedYandSubX, mc.cores = numCores)
seq_Q = unlist(seq_Q)
}
Q[count + 1] = max(seq_Q)
index_max = min(which(seq_Q == Q[count + 1]))
if (Q[count + 1] <= Q[count] & stop == TRUE) break
index_select[count + 1] = index_left[index_max]
count = count + 1
}
selectedVar = data.table::data.table(index = index_select[1:count], names = namesX[index_select[1:count]])
stepT = Q / .estimateS(Y)
result = list(selectedVar = selectedVar, stepT = stepT[1:count])
class(result) = "foci"
return(result)
}
# foci -------------------------------------------------------------------------
#' Variable selection by the FOCI algorithm
#'
#' FOCI is a variable selection algorithm based on the measure of conditional dependence \code{\link{codec}}.
#'
#' @param X Matrix of predictors (n by p)
#' @param Y Vector of responses (length n)
#' @param num_features Number of variables to be selected, cannot be larger than p. The default value is NULL and in that
#' case it will be set equal to p. If stop == TRUE (see below), then num_features is irrelevant.
#' @param stop Stops at the first instance of negative codec, if TRUE.
#' @param na.rm Removes NAs if TRUE.
#' @param standardize Standardize covariates if set equal to "scale" or "bounded". Otherwise will use the raw inputs.
#' The default value is "scale" and normalizes each column of X to have mean zero and variance 1. If set equal to "bounded"
#' map the values of each column of X to [0, 1].
#' @param numCores Number of cores that are going to be used for
#' parallelizing the variable selecction process.
#' @param parPlat Specifies the parallel platform to chunk data by rows.
#' It can take three values:
#' 1- The default value is set to 'none', in which case no row chunking
#' is done;
#' 2- the \code{parallel} cluster to be used for row chunking;
#' 3- "locThreads", specifying that row chunking will be done via
#' threads on the host machine.
#' @param printIntermed The default value is TRUE, in which case print intermediate results from the cluster nodes before final processing.
#' @details FOCI is a forward stepwise algorithm that uses the conditional dependence coefficient (\code{\link{codec}})
#' at each step, instead of the multiple correlation coefficient
#' as in ordinary forward stepwise. If \code{stop} == TRUE, the process is stopped at the first instance of
#' nonpositive codec, thereby selecting a subset of variables. Otherwise, a set of covariates of size
#' \code{num_features}, ordered according to predictive power (as measured by codec) is produced.
#'
#' \emph{Parallel computation:}
#'
#' The computation can be lengthy, so the package offers two kinds of
#' parallel computation.
#'
#' The first, controlled by the argument \code{numCores},
#' specifies the number of cores to be used on the host
#' machine. If at a given step there are k candidate variables
#' under consideration for inclusion, these k tasks are assigned
#' to the various cores.
#'
#' The second approach, controlled by the argument \code{parPlat}
#' ("parallel platform"), involves the user first setting up a cluster via
#' the \pkg{parallel} package. The data are divided into chunks by rows,
#' with each cluster node applying FOCI to its data chunk. The
#' union of the results is then formed, and fed through FOCI one more
#' time to adjust the discrepancies. The idea is that that last step
#' will not be too lengthy, as the number of candidate variables has
#' already been reduced. A cluster size of r may actually
#' produce a speedup factor of more than r (Matloff 2016).
#'
#' Potentially the best speedup is achieved by using the two approaches
#' together.
#'
#' The first approach cannot be used on Windows platforms, as
#' \code{parallel::mcapply} has no effect. Windows users should thus
#' use the second approach only.
#'
#' In addition to speed, the second approach is useful for diagnostics, as
#' the results from the different chunks gives the user an
#' idea of the degree of sampling variability in the
#' FOCI results.
#'
#' In the second approach, a random permutation is applied to the
#' rows of the dataset, as many datasets are sorted by one or more
#' columns.
#'
#' Note that if a certain value of a feature is rare in the
#' full dataset, it may be absent entirely in some chunk.
#' @return An object of class "foci", with attributes
#' \code{selectedVar}, showing the selected variables in decreasing
#' order of (conditional) predictive power, and \code{stepT}, listing
#' the 'codec' values. Typically the latter will begin to level off at
#' some point, with additional marginal improvements being small.
#' @import data.table
#' @export
#' @author Mona Azadkia, Sourav Chatterjee, and Norman Matloff
#' @references Azadkia, M. and Chatterjee, S. (2019). A simple measure
#' of conditional dependence.
#' \url{https://arxiv.org/pdf/1910.12327.pdf}.
#' @references Matloff, N. (2016). Software Alchemy: Turning Complex
#' Statistical Computations into Embarrassingly-Parallel Ones.
#' \emph{J. of Stat. Software.}
#' @seealso \code{\link{codec}}, \code{\link[XICOR]{xicor}}
#' @examples
#' # Example 1
#' n = 1000
#' p = 100
#' x <- matrix(rnorm(n * p), nrow = n)
#' colnames(x) = paste0(rep("x", p), seq(1, p))
#' y <- x[, 1] * x[, 10] + x[, 20]^2
#' # with num_features equal to 3 and stop equal to FALSE, foci will give a list of
#' # three selected features
#' result1 = foci(y, x, num_features = 3, stop = FALSE, numCores = 1)
#' result1
#' # Example 2
#' # same example, but stop according to the stopping rule
#' result2 = foci(y, x, numCores = 1)
#' result2
#' \dontrun{
#' # Windows use of multicore
#' library(parallel)
#' cls <- makeCluster(parallel::detectCores())
#' foci(y, x, parPlat = cls)
#' # run on physical cluster
#' cls <- makePSOCKcluster('machineA','machineB')
#' foci(y, x, parPlat = cls)
#' }
foci <- function(Y, X, num_features = NULL, stop = TRUE, na.rm = TRUE,
standardize = "scale", numCores = parallel::detectCores(),
parPlat = 'none', printIntermed = TRUE) {
if (is.null(colnames(X))) {
colnames(X) <- paste0('V',1:ncol(X))
warning('X lacked column names, has been assigned V1, V2,...')
}
namesX <- colnames(X)
# if inputs are not in proper format change if possible
# otherwise send error
if(!is.vector(Y)) {
Y <- as.vector(unlist(Y))
}
if(!is.matrix(X)) {
X <- as.matrix(X)
}
if (is.null(num_features)) num_features <- dim(X)[2]
if((length(Y) != nrow(X))) stop("Number of rows of Y and X should be equal.")
if (na.rm == TRUE) {
# NAs are removed here:
ok <- complete.cases(Y,X)
X <- as.matrix(X[ok,])
Y <- Y[ok]
}
n <- length(Y)
if(n < 2) stop("Number of rows with no NAs should be bigger than 1.")
p = ncol(X)
if (num_features > p) stop("Number of features should not be larger than maximum number of original features.")
if ((floor(num_features) != num_features) || (num_features <= 0)) stop("Number of features should be a positive integer.")
if (!is.numeric(Y)) stop("currently FOCI does not handle factor Y")
if (standardize == "scale") {
for (i in 1:p) {
if(length(unique(X[, i])) > 1) {
X[,i] <- (X[,i] - mean(X[,i])) / sd(X[,i])
}else{
# NM, May 12; changed to paste0() to remove superfluous blank
stop(paste0("Column ", i, " of X is constant."))
}
}
}
if (standardize == "bounded") {
for (i in 1:p) {
if(length(unique(X[, i])) > 1) {
X[,i] <- (X[,i] - min(X[,i])) / (max(X[, i]) - min(X[, i]))
}else{
stop(paste0("Column ", i, " of X is constant."))
}
}
}
if (parPlat[1] == 'none') {
return(foci_main(Y, X, num_features = num_features,
stop = stop, numCores = numCores))
}
# NM, May 12: many datasets are ordered by one or more columns; to
# preserve iid-ness, randomize the row order; if we get here, we will
# be chunking
nr <- nrow(X)
permRowNums <- sample(1:nr,nr,replace=FALSE)
X <- X[permRowNums,]
Y <- Y[permRowNums]
rowNums <- parallel::splitIndices(length(Y), numCores)
selectFromChunk <- function(nodeNum) {
myRows <- rowNums[[nodeNum]]
sel <- foci_main(Y[myRows], X[myRows,], stop = stop,
numCores = numCores)$selectedVar$index
}
if(inherits(parPlat,'cluster')) {
cls <- parPlat
}else if(parPlat == 'locThreads') {
# set up the cluster (in multicore case, it's virtual)
cls <- parallel::makeCluster(numCores)
} else stop('invalid parPlat')
# worker nodes load library
parallel::clusterEvalQ(cls, library(FOCI))
# ship data to workers
parallel::clusterExport(cls, c('Y', 'X', 'rowNums', 'selectFromChunk'),
envir = environment())
# drop-in replacement for mclapply
slc <- parallel::parLapply(cls, seq(1, length(cls)), selectFromChunk)
if (printIntermed) print(slc)
slc <- Reduce(union, slc)
## 17.02.2021: Check whether windows of mac
numCores <- if (.Platform$OS.type == 'windows') 1 else parallel::detectCores()
res <- foci_main(Y, X[, slc], num_features, stop, numCores=numCores)
# must translate indices in reduced system to those of original
newIdxs <- res$selectedVar$index
origIdxs <- slc[newIdxs]
res$selectedVar$index <- origIdxs
res$stepT = res$stepT[1:num_features]
parallel::stopCluster(cls)
return(res)
}
|
c9aad05e2602d0a5038f58a9011f798ec75701f2
|
512251113212381e4704309f9b5f386118125bc6
|
/man/toolCodeLabels.Rd
|
63efd3d02dad5d0535ba6901e2f10cdba090f99e
|
[] |
no_license
|
johanneskoch94/madrat
|
74f07ee693d53baa5e53772fc41829438bdf1862
|
2a321caad22c2b9b472d38073bc664ae2991571f
|
refs/heads/master
| 2023-08-09T11:07:47.429880
| 2021-09-03T13:04:32
| 2021-09-03T13:04:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,001
|
rd
|
toolCodeLabels.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/toolCodeLabels.R
\name{toolCodeLabels}
\alias{toolCodeLabels}
\title{Tool: CodeLabels}
\usage{
toolCodeLabels(get = NULL, add = NULL)
}
\arguments{
\item{get}{A vector of hash codes which should be replaced}
\item{add}{Additional entries that should be added to the dictionary. Need to be
provided in the form of a named vector with the structure c(<label>=<hash>),
e.g. c(h12="62eff8f7")}
}
\value{
A vector with either labels (if available) or hash codes (if no label was available).
}
\description{
This function replaces a hash code (e.g. regioncode) or another cryptic
code with a human readable code via a given dictionary.
This can be useful to make outputs better readable in cases where hash
codes are already known to the user.
If not entry exists in the dictionary the hash code is returned again.
}
\examples{
toolCodeLabels("62eff8f7")
}
\seealso{
\code{\link{regionscode}}
}
\author{
Jan Philipp Dietrich
}
|
279481e695baeadb59d6ad855f90de27107990dd
|
636ef7dfc05c678b24fda5aa9fea3323c457354c
|
/R/zzz.R
|
766862ddd85463c58f65d737065377ad46660d31
|
[] |
no_license
|
bschulth/jsTreeRExample
|
ea1921ae9f5764b70837558eef1db1b70511eef0
|
cf676f09717f25d990cdbc05fc9e75883a682659
|
refs/heads/master
| 2023-08-03T01:04:40.872464
| 2021-09-18T19:49:58
| 2021-09-18T19:49:58
| 407,584,851
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 411
|
r
|
zzz.R
|
#++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Before package sealing
#++++++++++++++++++++++++++++++++++++++++++++++++++++++
.onLoad <- function(libname, pkgname) {
options(keep.source = TRUE)
invisible()
}
#++++++++++++++++++++++++++++++++++++++++++++++++++++++
# After package sealing
#++++++++++++++++++++++++++++++++++++++++++++++++++++++
.onAttach <- function(libname, pkgname) {
}
|
62859a8cdd536305ca854d5d27254f9359f0d45f
|
cbc7f6ba280cba7cd297520c0eb7916a815045e2
|
/cachematrix.R
|
9509cf4d746263ed3dbdca12f1e65451ff7fc22e
|
[] |
no_license
|
atorrents/ProgrammingAssignment2
|
dd14d61e3b349c24029f7a2ccb934691ee01f1b0
|
22b1c048c9a5c766533a1e02c1d030519bca2f35
|
refs/heads/master
| 2021-01-18T04:14:36.166369
| 2014-07-16T14:40:54
| 2014-07-16T14:40:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,652
|
r
|
cachematrix.R
|
## Time-saving system, caching the inverse of a matrix
## when we need calculate it several times
## function: makeCacheMatrix
## (uses special operator <<- which stores values in "parent" environment)
## Usage:
## a<-makeCacheMatrix(matrix <default NULL>) >> creates cache matrix a
## This matrix will cache its inverse by means of four methods
## managed by the function cacheSolve:
## a$get() >> gets actual value of a
## a$set(matrix) >> sets matrix as the value of a (clear cache)
## a$setSolve(matrix) >> caches matrix as the inverse of a
## a$getSolve() >> delivers (from cache) the inverse of a
makeCacheMatrix <- function(x = matrix()) {
## creates cache and methods list
m<<-NULL ## in "parent" env.
set<-function(y){
x<<-y ## set value in "parent" env.
m<<-NULL ## in "parent" env.
}
get<-function() x
setSolve<-function (inverse) m<<-inverse
getSolve<-function() m
list(set=set,get=get,setSolve=setSolve,getSolve=getSolve)
}
## function: cacheSolve
## Usage: cacheSolve(a) >> retrieves (from cache, if available;
## otherwise computes and caches) the inverse of a
cacheSolve <- function(x, ...) {
## Return the inverse matrix of 'x'
m<-x$getSolve() ## Trying to avoid calculation
if(!is.null(m)){ ## Success, inverse cached!
## message("getting cached data") - unnecessary here
return(m)
}
data<-x$get() ## Getting matrix to invert
m<-solve(data, ...) ## Inverse: time-consuming calculation
x$setSolve(m) ## Caching result, time-saving next time
m
}
|
77af801303ed5b7d55597280c436fd10a4ab2225
|
57c9c8efb9dd8e88d11e0a6b755f862aeb445eef
|
/human/sveval/repeatmasked/eval.R
|
227d208013202ed176bc12e8eb7e3b962488299a
|
[
"MIT"
] |
permissive
|
quanrd/sv-genotyping-paper
|
ce8471699f2e3f0ad874ebdaf259bc8e37428c0e
|
627d6049fef1fbb26a0c63c96301dff293e0c58d
|
refs/heads/master
| 2022-03-11T22:02:20.899651
| 2019-11-25T23:02:08
| 2019-11-25T23:02:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,227
|
r
|
eval.R
|
library(sveval)
library(VariantAnnotation)
library(ggplot2)
truth.vcf = readSVvcf('hgsvc-truth-baseline.norm.rmsk.vcf.gz', vcf.object=TRUE)
calls.vcf = readSVvcf('hgsvc-vg-HG00514.norm.rmsk.vcf.gz', vcf.object=TRUE)
min.cov = .8
truth.reps = ifelse(info(truth.vcf)$RMSKCOV>min.cov, info(truth.vcf)$RMSKCLASS, NA)
truth.ids = paste(as.character(seqnames(rowRanges(truth.vcf))),
start(rowRanges(truth.vcf)),
info(truth.vcf)$SVTYPE, info(truth.vcf)$SIZE)
calls.reps = ifelse(info(calls.vcf)$RMSKCOV>min.cov, info(calls.vcf)$RMSKCLASS, NA)
calls.ids = paste(as.character(seqnames(rowRanges(calls.vcf))),
start(rowRanges(calls.vcf)),
info(calls.vcf)$SVTYPE, info(calls.vcf)$SIZE)
load('../rdata/sveval-hgsvc-vg-HG00514-all-geno.RData')
svs.geno = eval.o$svs
load('../rdata/sveval-hgsvc-vg-HG00514-all-call.RData')
svs.call = eval.o$svs
svs = list(geno=svs.geno, call=svs.call)
reps = table(calls.reps)
reps = names(reps)[reps>100]
eval.df = lapply(names(svs), function(eval){
svs = svs[[eval]]
eval.df = lapply(reps, function(repc){
## For each SV type
eval.df = lapply(names(svs), function(svtype){
svs = svs[[svtype]]
## For each class of variant
df = lapply(c('TP', 'TP.baseline', 'FP', 'FN'), function(metric){
svs = svs[[metric]]
sv.ids = paste(as.character(seqnames(svs)), start(svs), svs$type, svs$size)
if(metric %in% c('TP.baseline', 'FN')){
svs = svs[which(sv.ids %in% truth.ids[which(truth.reps==repc)])]
} else {
svs = svs[which(sv.ids %in% calls.ids[which(calls.reps==repc)])]
}
data.frame(rep=repc, type=svtype, metric=metric, n=length(svs), eval=eval,
stringsAsFactors=FALSE)
})
do.call(rbind, df)
})
eval.df = do.call(rbind, eval.df)
})
eval.df = do.call(rbind, eval.df)
})
eval.df = do.call(rbind, eval.df)
## Reformat into one row per size class/type with columns TP, FP, etc
eval.df = tidyr::spread(eval.df, 'metric', 'n', fill=0)
## Precision, recall and F1
eval.df = prf(eval.df)
write.table(eval.df, file='eval-rmsk-hgsvc-vg-HG00514-call-geno.tsv', sep='\t', quote=FALSE, row.names=FALSE)
|
b4e57a4b014f401a8ce2adab5da25dd033e746ab
|
8ea89ec7e70995d44aee36a0698a20e50fa91d7d
|
/examples/sqlite/update_samples.R
|
77843cdc2f13e1afd0265dfe151e56c8c6c8976d
|
[
"MIT"
] |
permissive
|
DrRoad/shiny-chart-builder
|
6e353b0ee12772e93af85a7b295a7784700bc5e3
|
e5072ab8bfcff1a897e6aac448ce00de0b4d58aa
|
refs/heads/master
| 2020-06-05T11:16:11.780338
| 2017-11-07T03:43:10
| 2017-11-07T03:43:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,557
|
r
|
update_samples.R
|
# Make sure your working directory is that of the project, not "example".
source('example/config.R')
allTables = queryDb("SELECT name tablename FROM sqlite_master WHERE type='table'")
getDef = function(table)
{
t=queryDb(paste0("pragma table_info(", table, ")"))
t$type = case_when(
t$type == "INTEGER" ~ "integer",
t$type == "REAL" ~ "numeric",
t$type == "BOOLEAN" ~ "boolean",
t$type == "TEXT" ~ "character",
TRUE ~ "character"
)
t$tablename=table
select(t, tablename, column=name, type)
}
allColumns = rowwise(allTables) %>% do(getDef(.))
#Save schema to disk
save(allTables, allColumns, file="schema.rda")
downloadSample = function(tab){
print(paste0("Downloading sample for table ", tab))
sample = queryDb(paste0('select * from ', tab, ' order by random() limit 5000'))
saveRDS(sample, file=paste0('tables/', tab, '.rda'))
TRUE
}
#Download 5k sample for all tables
res = by(allTables, 1:nrow(allTables), function(row){
tab = row$tablename
r <- NULL
attempt <- 1
while( is.null(r) && attempt <= 3 ) {
attempt <- attempt + 1
r = tryCatch({
return(downloadSample(tab))
}, error = function(e){
print(paste("Error: ",err, ". Retrying..."))
})
}
})
#Delete old samples of non-existent tables
delete = data.frame(tablename=gsub('.rda', '', list.files('tables/', pattern="*.rda"))) %>% anti_join(allTables, by='tablename')
if(nrow(delete) > 0){
res = by(delete, 1:nrow(delete), function(row){
tab = row$tablename
file.remove(paste0('tables/',tab,'.rda'))
})
}
|
e064a2435e105b6936b60a2c3cc6d2e7bae3750f
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/CovSelHigh/R/cov.sel.high.R
|
a7df087fa20621a9c1ad6d1b6f3790e8704d074f
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 61,311
|
r
|
cov.sel.high.R
|
cov.sel.high<-function(T=NULL, Y=NULL, X=NULL,type=c("mmpc","mmhc","rf","lasso"), betahat=TRUE, parallel=FALSE, Simulate=TRUE, N=NULL, Setting=1, rep=1, Models=c("Linear", "Nonlinear", "Binary"),...){
Simulate<<-substitute(Simulate)
N<<-substitute(N)
Setting<<-substitute(Setting)
Rep<<-substitute(rep)
Models<<-match.arg(Models)
type<<-match.arg(type)
betahat<<-substitute(betahat)
if(Simulate==FALSE){
if(is.null(T)==FALSE && is.null(Y)==FALSE && is.null(X)==FALSE){
Setting<-Models<-NULL
Rep<-1
N<-dim(X)[1]
}else{stop("Data or simulation settings must be provided correctly")}
if (sum(is.na(X)) > 0 | sum(is.na(T)) > 0 | sum(is.na(Y)) > 0) {
stop("missing data is currently not supported. Check T, Y, and X for missing values")
}
if (class(Y)!="numeric") {
stop("the outcome Y must be a numeric vector")
}
if (length(unique(T))!=2 || is.na(match(unique(T),c(0,1))) ){
stop("the treatment variable T must be binary")
}
uniqueclass<-unique(unlist(lapply(X,class)))
nrclass<-length(uniqueclass)
if(sum(uniqueclass %in% c("factor", "ordered", "numeric"))<nrclass){
stop("the covariates in X must be either numeric, factors or ordered factors.")
}
dat<-data.frame(X,Y,T)
}
if(Simulate==TRUE){
if(is.null(N) || is.null(Setting) || is.null(Rep) || is.null(Models)){stop("Data or simulation settings must be provided correctly")}
if(parallel==TRUE){registerDoRNG(1231255125)}else{set.seed(1231255125)}
}
reslist<-vector("list", Rep)
if(parallel==TRUE){
reslist<-foreach(i=1:Rep, .packages=c('MASS', 'bindata','Matching','bnlearn','CovSelHigh','glmnet','randomForest'),.export=c("Simulate","N","Setting","Rep","Models","type","betahat"))%dopar%{
if(Simulate==TRUE){
dat<-cov.sel.high.sim(N, Setting, Rep, Models)$dat
X<-dat[,-c((dim(dat)[2])-1,dim(dat)[2])]
Y<-dat[,((dim(dat)[2])-1)]
T<-dat[,dim(dat)[2]]
}
varnames<-colnames(dat)
covarcol<-1:(dim(X)[2])
ycol<-(dim(X)[2])+1
Tcol<-ycol+1
if(class(dat[,Tcol])!="factor"){
dat[,Tcol]<-factor(dat[,Tcol])
}
datbeta<-dat
if(length(unique(dat[,ycol]))==2){
datbeta[,ycol]<-factor(datbeta[,ycol])
}
numcol<-which(lapply(dat[covarcol],class)=="numeric")
covars<-colnames(dat[,covarcol])
if(type=="mmpc" || type=="mmhc"){
if(length(unique(dat[,ycol]))>2){
tried<-try(discretize(data.frame(dat[,ycol]),method="quantile"),silent=TRUE)
if(class(tried)=="data.frame"){
dat[,ycol]<-discretize(data.frame(dat[,ycol]),method="quantile")
}else{
print(tried)
stop("the numeric outcome could not be discretized, recode it into factor")
}
}else{dat[,ycol]<-factor(dat[,ycol])
}
if(length(numcol)>0){
if(length(numcol)==1){
tried<-try(discretize(data.frame(dat[,numcol]),method="quantile"),silent=TRUE)
if(class(tried)=="data.frame"){
dat[,numcol]<-discretize(data.frame(dat[,numcol]),method="quantile")
}else{ print(tried)
stop("the numeric covariate could not be discretized, recode it into factor")
}
}else{
tried<-try(discretize(dat[,numcol],method="quantile"),silent=TRUE)
if(class(tried)=="data.frame"){
dat[,numcol]<-discretize(dat[,numcol],method="quantile")
}else{ print(tried)
stop("at least one numeric covariate could not be discretized, recode it into factor")
}
}}
}
#####Markov networks
if(type=="mmpc"){
#Common cause criterion (Waernbaum, de Luna and Richardson)
##Algorithm 1
bmT<-matrix(c(rep("T",length(covarcol)),covars),ncol=2)
blacklistT<-data.frame(bmT)
names(blacklistT)<-c("from","to")
lt1<-dat[,c(covarcol,Tcol)]
res1<-mmpc(lt1,optimized=FALSE)
##Subset X.T
XT<-res1$nodes$T$mb
covarsT<-which(match(colnames(dat),XT)!="NA")
bmQ<-matrix(c(rep("Y",length(covarsT)),XT),ncol=2)
blacklistQ<-data.frame(bmQ)
names(blacklistQ)<-c("from","to")
lt2<-dat[which(dat[,Tcol]==1),c(covarsT,ycol)]
##Subset Q.1
if(is.data.frame(lt2)==FALSE){
Q1<-NULL
}else{
res2<-mmpc(lt2,optimized=FALSE)
Q1<-res2$nodes$Y$mb
}
lt3<-dat[which(dat[,Tcol]==0),c(covarsT,ycol)]
##Subset Q.0
if(is.data.frame(lt3)==FALSE){
Q0<-NULL
}else{
res3<-mmpc(lt3,optimized=FALSE)
Q0<-res3$nodes$Y$mb
}
##Subset Q=Q.1UQ.0
if((length(Q1)+length(Q0)==0)){Q<-NULL}else{Q<-unique(c(Q1,Q0))[order(unique(c(Q1,Q0)))]}
##Algortithm 2
bmY<-matrix(c(rep("Y",length(covarcol)),covars),ncol=2)
blacklistY<-data.frame(bmY)
names(blacklistY)<-c("from","to")
lt4<-dat[which(dat[,Tcol]==1),c(covarcol,ycol)]
lt5<-dat[which(dat[,Tcol]==0),c(covarcol,ycol)]
res4<-mmpc(lt4,optimized=FALSE)
res5<-mmpc(lt5,optimized=FALSE)
##Subset X.1
X1<-res4$nodes$Y$mb
##Subset X.0
X0<-res5$nodes$Y$mb
##Subset X.Y
if((length(X1)+length(X0)==0)){XY<-NULL}else{
XY<-unique(c(X1,X0))[order(unique(c(X1,X0)))]}
covars1<-which(match(colnames(dat),X1)!="NA")
covars0<-which(match(colnames(dat),X0)!="NA")
bm1<-matrix(c(rep("T",length(covars1)),X1),ncol=2)
blacklist1<-data.frame(bm1)
names(blacklist1)<-c("from","to")
bm0<-matrix(c(rep("T",length(covars0)),X0),ncol=2)
blacklist0<-data.frame(bm0)
names(blacklist0)<-c("from","to")
lt6<-dat[,c(covars1,Tcol)]
lt7<-dat[,c(covars0,Tcol)]
##Subset Z.1
if(is.data.frame(lt6)==FALSE){
Z1<-NULL
}else{
res6<-mmpc(lt6,optimized=FALSE)
Z1<-res6$nodes$T$mb
}
##Subset Z.0
if(is.data.frame(lt7)==FALSE){
Z0<-NULL
}else{
res7<-mmpc(lt7,optimized=FALSE)
Z0<-res7$nodes$T$mb
}
##Subset Z=Z.1UZ.0
if((length(Z1)+length(Z0)==0)){Z<-NULL}else{Z<-unique(c(Z1,Z0))[order(unique(c(Z1,Z0)))]}
#Disjunctive cause criterion (VanderWeele and Shpitser)
##Subset X.D
if((length(XY)+length(XT)==0)){XD<-NULL}else{
XD<-unique(c(XY,XT))[order(unique(c(XY,XT)))]
}
covarsXD<-which(match(colnames(dat),XD)!="NA")
}
#########Bayesian networks
if(type=="mmhc"){
#Common cause criterion (Waernbaum, de Luna and Richardson)
##Algorithm 1
bmT<-matrix(c(rep("T",length(covarcol)),covars),ncol=2)
blacklistT<-data.frame(bmT)
names(blacklistT)<-c("from","to")
lt1<-dat[,c(covarcol,Tcol)]
res1<-mmhc(lt1,blacklist=blacklistT,optimized=FALSE, score="aic")
##Subset X.T
XT<-res1$nodes$T$mb
#print(XT)
covarsT<-which(match(colnames(dat),XT)!="NA")
bmQ<-matrix(c(rep("Y",length(covarsT)),XT),ncol=2)
blacklistQ<-data.frame(bmQ)
names(blacklistQ)<-c("from","to")
lt2<-dat[which(dat[,Tcol]==1),c(covarsT,ycol)]
##Subset Q.1
if(is.data.frame(lt2)==FALSE){
Q1<-NULL
}else{
res2<-mmhc(lt2,blacklist=blacklistQ,optimized=FALSE, score="aic")
Q1<-res2$nodes$Y$mb
}
lt3<-dat[which(dat[,Tcol]==0),c(covarsT,ycol)]
##Subset Q.0
if(is.data.frame(lt3)==FALSE){
Q0<-NULL
}else{
res3<-mmhc(lt3,blacklist=blacklistQ,optimized=FALSE, score="aic")
Q0<-res3$nodes$Y$mb
}
##Subset Q=Q.1UQ.0
if((length(Q1)+length(Q0)==0)){Q<-NULL}else{Q<-unique(c(Q1,Q0))[order(unique(c(Q1,Q0)))]}
##Algortithm 2
bmY<-matrix(c(rep("Y",length(covarcol)),covars),ncol=2)
blacklistY<-data.frame(bmY)
names(blacklistY)<-c("from","to")
lt4<-dat[which(dat[,Tcol]==1),c(covarcol,ycol)]
lt5<-dat[which(dat[,Tcol]==0),c(covarcol,ycol)]
res4<-mmhc(lt4,blacklist=blacklistY,optimized=FALSE, score="aic")
res5<-mmhc(lt5,blacklist=blacklistY,optimized=FALSE, score="aic")
##Subset X.1
X1<-res4$nodes$Y$mb
##Subset X.0
X0<-res5$nodes$Y$mb
##Subset X.Y
if((length(X1)+length(X0)==0)){XY<-NULL}else{
XY<-unique(c(X1,X0))[order(unique(c(X1,X0)))]}
covars1<-which(match(colnames(dat),X1)!="NA")
covars0<-which(match(colnames(dat),X0)!="NA")
bm1<-matrix(c(rep("T",length(covars1)),X1),ncol=2)
blacklist1<-data.frame(bm1)
names(blacklist1)<-c("from","to")
bm0<-matrix(c(rep("T",length(covars0)),X0),ncol=2)
blacklist0<-data.frame(bm0)
names(blacklist0)<-c("from","to")
lt6<-dat[,c(covars1,Tcol)]
lt7<-dat[,c(covars0,Tcol)]
##Subset Z.1
if(is.data.frame(lt6)==FALSE){
Z1<-NULL
}else{
res6<-mmhc(lt6,blacklist=blacklist1,optimized=FALSE, score="aic")
Z1<-res6$nodes$T$mb
}
##Subset Z.0
if(is.data.frame(lt7)==FALSE){
Z0<-NULL
}else{
res7<-mmhc(lt7,blacklist=blacklist0,optimized=FALSE, score="aic")
Z0<-res7$nodes$T$mb
}
##Subset Z=Z.1UZ.0
if((length(Z1)+length(Z0)==0)){Z<-NULL}else{Z<-unique(c(Z1,Z0))[order(unique(c(Z1,Z0)))]}
#Disjunctive cause criterion (VanderWeele and Shpitser)
##Subset X.D
if((length(XY)+length(XT)==0)){XD<-NULL}else{
XD<-unique(c(XY,XT))[order(unique(c(XY,XT)))]
}
covarsXD<-which(match(colnames(dat),XD)!="NA")
}
######LASSO
if(type=="lasso"){
#Common cause criterion (Waernbaum, de Luna and Richardson)
##Algorithm 1
Y<-datbeta$T
X<-datbeta[,covarcol]
D<-dim(X)[2]
Xo<-X
if(length(numcol)>0){
xnum<-X[,numcol]
Xvars<-t(rbind(matrix(c(names(X),names(xnum),rep(0,length(c(names(X),names(xnum))))),ncol=2), t(combn(names(X),2))))
f1<-eval(paste("Y ~", paste(paste('I(', names(X)[numcol], '^2)', sep=''), collapse=" + ")))
f2<-as.formula(f1)
x1<- model.matrix(f2, cbind(X,Y))[, -1]
f3 <- as.formula(Y ~ .*.)
x2<- model.matrix(f3, cbind(X,Y))[, -1]
X<-cbind(x2[,c(1:D)],x1,x2[,-c(1:D)])
}else{
Xvars<-t(rbind(matrix(c(names(X),rep(0,length(names(X)))),ncol=2), t(combn(names(X),2))))
f3 <- as.formula(Y ~ .*.)
x2<- model.matrix(f3, data.frame(cbind(X,Y)))[, -1]
X<-cbind(x2[,c(1:D)])
}
SL <-cov.sel.high.lasso(Y = Y, X = X)
##Subset X.T
u<-unique(c(unique(Xvars[1,which(SL==TRUE)]),unique(Xvars[2,which(SL==TRUE)])))
XT<-covars[is.na(match(covars,u))==FALSE]
covarsT<-which(match(colnames(datbeta),XT)!="NA")
lt2<-datbeta[which(datbeta[,Tcol]==1),c(covarsT,ycol)]
##Subset Q.1
if(is.data.frame(lt2)==FALSE){
Q1<-NULL
}else{
Y<-lt2$Y
X<-lt2[,-dim(lt2)[2]]
xnames<-names(lt2)[-dim(lt2)[2]]
D<-length(xnames)
if(D==1){Q1<-XT}else{
Xo<-X
numcol2<-which(lapply(X,class)=="numeric")
if(length(numcol2)>0){
xnum<-X[,numcol2]
Xvars<-t(rbind(matrix(c(names(X),names(xnum),rep(0,length(c(names(X),names(xnum))))),ncol=2), t(combn(names(X),2))))
f1<-eval(paste("Y ~", paste(paste('I(', xnames[numcol2], '^2)', sep=''), collapse=" + ")))
f2<-as.formula(f1)
x1<- model.matrix(f2, cbind(X,Y))[, -1]
f3 <- as.formula(Y ~ .*.)
x2<- model.matrix(f3, cbind(X,Y))[, -1]
X<-cbind(x2[,c(1:D)],x1,x2[,-c(1:D)])
}else{
Xvars<-t(rbind(matrix(c(names(X),rep(0,length(names(X)))),ncol=2), t(combn(names(X),2))))
f3 <- as.formula(Y ~ .*.)
x2<- model.matrix(f3, cbind(X,Y))[, -1]
X<-cbind(x2[,c(1:D)])
}
SL <-cov.sel.high.lasso(Y = Y, X = X)
u<-unique(c(unique(Xvars[1,which(SL==TRUE)]),unique(Xvars[2,which(SL==TRUE)])))
Q1<-covars[is.na(match(covars,u))==FALSE]
}
}
lt2<-datbeta[which(datbeta[,Tcol]==0),c(covarsT,ycol)]
##Subset Q.0
if(is.data.frame(lt2)==FALSE){
Q0<-NULL
}else{
Y<-lt2$Y
X<-lt2[,-dim(lt2)[2]]
xnames<-names(lt2)[-dim(lt2)[2]]
D<-length(xnames)
if(D==1){Q0<-XT}else{
Xo<-X
numcol2<-which(lapply(X,class)=="numeric")
if(length(numcol2)>0){
xnum<-X[,numcol2]
Xvars<-t(rbind(matrix(c(names(X),names(xnum),rep(0,length(c(names(X),names(xnum))))),ncol=2), t(combn(names(X),2))))
f1<-eval(paste("Y ~", paste(paste('I(', names(X)[numcol2], '^2)', sep=''), collapse=" + ")))
f2<-as.formula(f1)
x1<- model.matrix(f2, cbind(X,Y))[, -1]
f3 <- as.formula(Y ~ .*.)
x2<- model.matrix(f3, cbind(X,Y))[, -1]
X<-cbind(x2[,c(1:D)],x1,x2[,-c(1:D)])
}else{
Xvars<-t(rbind(matrix(c(names(X),rep(0,length(names(X)))),ncol=2), t(combn(names(X),2))))
f3 <- as.formula(Y ~ .*.)
x2<- model.matrix(f3, cbind(X,Y))[, -1]
X<-cbind(x2[,c(1:D)])
}
SL <-cov.sel.high.lasso(Y = Y, X = X)
u<-unique(c(unique(Xvars[1,which(SL==TRUE)]),unique(Xvars[2,which(SL==TRUE)])))
Q0<-covars[is.na(match(covars,u))==FALSE]
}
}
##Subset Q=Q.1UQ.0
if((length(Q1)+length(Q0)==0)){Q<-NULL}else{Q<-unique(c(Q1,Q0))[order(unique(c(Q1,Q0)))]}
##Algortithm 2
lt4<-datbeta[which(datbeta[,Tcol]==1),c(covarcol,ycol)]
Y<-lt4$Y
X<-lt4[,covarcol]
D<-dim(X)[2]
Xo<-X
if(length(numcol)>0){
xnum<-X[,numcol]
Xvars<-t(rbind(matrix(c(names(X),names(xnum),rep(0,length(c(names(X),names(xnum))))),ncol=2), t(combn(names(X),2))))
f1<-eval(paste("Y ~", paste(paste('I(', names(X)[numcol], '^2)', sep=''), collapse=" + ")))
f2<-as.formula(f1)
x1<- model.matrix(f2, cbind(X,Y))[, -1]
f3 <- as.formula(Y ~ .*.)
x2<- model.matrix(f3, cbind(X,Y))[, -1]
X<-cbind(x2[,c(1:D)],x1,x2[,-c(1:D)])
}else{
Xvars<-t(rbind(matrix(c(names(X),rep(0,length(names(X)))),ncol=2), t(combn(names(X),2))))
f3 <- as.formula(Y ~ .*.)
x2<- model.matrix(f3, cbind(X,Y))[, -1]
X<-cbind(x2[,c(1:D)])
}
SL <-cov.sel.high.lasso(Y = Y, X = X)
##Subset X.1
u<-unique(c(unique(Xvars[1,which(SL==TRUE)]),unique(Xvars[2,which(SL==TRUE)])))
X1<-covars[is.na(match(covars,u))==FALSE]
covarsX1<-which(match(colnames(datbeta),X1)!="NA")
lt2<-datbeta[,c(covarsX1,Tcol)]
##Subset Z.1
if(is.data.frame(lt2)==FALSE){
Z1<-NULL
}else{
Y<-lt2$T
X<-lt2[,-dim(lt2)[2]]
xnames<-names(lt2)[-dim(lt2)[2]]
D<-length(xnames)
if(D==1){Z1<-X1}else{
Xo<-X
numcol2<-which(lapply(X,class)=="numeric")
if(length(numcol2)>0){
xnum<-X[,numcol2]
Xvars<-t(rbind(matrix(c(names(X),names(xnum),rep(0,length(c(names(X),names(xnum))))),ncol=2), t(combn(names(X),2))))
f1<-eval(paste("Y ~", paste(paste('I(', names(X)[numcol2], '^2)', sep=''), collapse=" + ")))
f2<-as.formula(f1)
x1<- model.matrix(f2, cbind(X,Y))[, -1]
f3 <- as.formula(Y ~ .*.)
x2<- model.matrix(f3, cbind(X,Y))[, -1]
X<-cbind(x2[,c(1:D)],x1,x2[,-c(1:D)])
}else{
Xvars<-t(rbind(matrix(c(names(X),rep(0,length(names(X)))),ncol=2), t(combn(names(X),2))))
f3 <- as.formula(Y ~ .*.)
x2<- model.matrix(f3, cbind(X,Y))[, -1]
X<-cbind(x2[,c(1:D)])
}
SL <-cov.sel.high.lasso(Y = Y, X = X)
u<-unique(c(unique(Xvars[1,which(SL==TRUE)]),unique(Xvars[2,which(SL==TRUE)])))
Z1<-covars[is.na(match(covars,u))==FALSE]
}
}
lt4<-datbeta[which(datbeta[,Tcol]==0),c(covarcol,ycol)]
Y<-lt4$Y
X<-lt4[,covarcol]
D<-dim(X)[2]
Xo<-X
if(length(numcol)>0){
xnum<-X[,numcol]
Xvars<-t(rbind(matrix(c(names(X),names(xnum),rep(0,length(c(names(X),names(xnum))))),ncol=2), t(combn(names(X),2))))
f1<-eval(paste("Y ~", paste(paste('I(', names(X)[numcol], '^2)', sep=''), collapse=" + ")))
f2<-as.formula(f1)
x1<- model.matrix(f2, cbind(X,Y))[, -1]
f3 <- as.formula(Y ~ .*.)
x2<- model.matrix(f3, cbind(X,Y))[, -1]
X<-cbind(x2[,c(1:D)],x1,x2[,-c(1:D)])
}else{
Xvars<-t(rbind(matrix(c(names(X),rep(0,length(names(X)))),ncol=2), t(combn(names(X),2))))
f3 <- as.formula(Y ~ .*.)
x2<- model.matrix(f3, cbind(X,Y))[, -1]
X<-cbind(x2[,c(1:D)])
}
SL <-cov.sel.high.lasso(Y = Y, X = X)
##Subset X.0
u<-unique(c(unique(Xvars[1,which(SL==TRUE)]),unique(Xvars[2,which(SL==TRUE)])))
X0<-covars[is.na(match(covars,u))==FALSE]
covarsX0<-which(match(colnames(datbeta),X0)!="NA")
lt2<-datbeta[,c(covarsX0,Tcol)]
##Subset Z.0
if(is.data.frame(lt2)==FALSE){
Z0<-NULL
}else{
Y<-lt2$T
X<-lt2[,-dim(lt2)[2]]
xnames<-names(lt2)[-dim(lt2)[2]]
D<-length(xnames)
if(D==1){Z0<-X0}else{
Xo<-X
numcol2<-which(lapply(X,class)=="numeric")
if(length(numcol2)>0){
xnum<-X[,numcol2]
Xvars<-t(rbind(matrix(c(names(X),names(xnum),rep(0,length(c(names(X),names(xnum))))),ncol=2), t(combn(names(X),2))))
f1<-eval(paste("Y ~", paste(paste('I(', names(X)[numcol2], '^2)', sep=''), collapse=" + ")))
f2<-as.formula(f1)
x1<- model.matrix(f2, cbind(X,Y))[, -1]
f3 <- as.formula(Y ~ .*.)
x2<- model.matrix(f3, cbind(X,Y))[, -1]
X<-cbind(x2[,c(1:D)],x1,x2[,-c(1:D)])
}else{
Xvars<-t(rbind(matrix(c(names(X),rep(0,length(names(X)))),ncol=2), t(combn(names(X),2))))
f3 <- as.formula(Y ~ .*.)
x2<- model.matrix(f3, cbind(X,Y))[, -1]
X<-cbind(x2[,c(1:D)])
}
SL <-cov.sel.high.lasso(Y = Y, X = X)
u<-unique(c(unique(Xvars[1,which(SL==TRUE)]),unique(Xvars[2,which(SL==TRUE)])))
Z0<-covars[is.na(match(covars,u))==FALSE]
}
}
##Subset X.Y
if((length(X1)+length(X0)==0)){XY<-NULL}else{
XY<-unique(c(X1,X0))[order(unique(c(X1,X0)))]}
##Subset Z=Z.1UZ.0
if((length(Z1)+length(Z0)==0)){Z<-NULL}else{Z<-unique(c(Z1,Z0))[order(unique(c(Z1,Z0)))]}
#Disjunctive cause criterion (VanderWeele and Shpitser)
##Subset X.D
if((length(XY)+length(XT)==0)){XD<-NULL}else{
XD<-unique(c(XY,XT))[order(unique(c(XY,XT)))]
}
covarsXD<-which(match(colnames(datbeta),XD)!="NA")
}
if(type=="rf"){
#Common cause criterion (Waernbaum, de Luna and Richardson)
##Algorithm 1
Y<-datbeta$T
X<-datbeta[,covarcol]
SL <-cov.sel.high.rf(Y = Y, X = X)
##Subset X.T
XT<-colnames(X)[which(SL==TRUE)]
covarsT<-which(match(colnames(datbeta),XT)!="NA")
lt2<-datbeta[which(datbeta[,Tcol]==1),c(covarsT,ycol)]
##Subset Q.1
if(is.data.frame(lt2)==FALSE){
Q1<-NULL
}else{
Y<-lt2$Y
X<-lt2[,-dim(lt2)[2]]
if(is.data.frame(X)==FALSE){
X<-data.frame(X)
names(X)<-XT
}
SL <-cov.sel.high.rf(Y = Y, X = X)
Q1<-colnames(X)[which(SL==TRUE)]
}
lt2<-datbeta[which(datbeta[,Tcol]==0),c(covarsT,ycol)]
##Subset Q.0
if(is.data.frame(lt2)==FALSE){
Q0<-NULL
}else{
Y<-lt2$Y
X<-lt2[,-dim(lt2)[2]]
if(is.data.frame(X)==FALSE){
X<-data.frame(X)
names(X)<-XT
}
SL <-cov.sel.high.rf(Y = Y, X = X)
Q0<-colnames(X)[which(SL==TRUE)]
}
##Subset Q=Q.1UQ.0
if((length(Q1)+length(Q0)==0)){Q<-NULL}else{Q<-unique(c(Q1,Q0))[order(unique(c(Q1,Q0)))]}
##Algortithm 2
lt4<-datbeta[which(datbeta[,Tcol]==1),c(covarcol,ycol)]
Y<-lt4$Y
X<-lt4[,covarcol]
SL <-cov.sel.high.rf(Y = Y, X = X)
##Subset X.1
X1<-colnames(X)[which(SL==TRUE)]
covarsX1<-which(match(colnames(datbeta),X1)!="NA")
lt2<-datbeta[,c(covarsX1,Tcol)]
##Subset Z.1
if(is.data.frame(lt2)==FALSE){
Z1<-NULL
}else{
Y<-lt2$T
X<-lt2[,-dim(lt2)[2]]
if(is.data.frame(X)==FALSE){
X<-data.frame(X)
names(X)<-X1
}
SL <-cov.sel.high.rf(Y = Y, X = X)
Z1<-colnames(X)[which(SL==TRUE)]
}
lt4<-datbeta[which(datbeta[,Tcol]==0),c(covarcol,ycol)]
Y<-lt4$Y
X<-lt4[,covarcol]
SL <-cov.sel.high.rf(Y = Y, X = X)
##Subset X.0
X0<-colnames(X)[which(SL==TRUE)]
covarsX0<-which(match(colnames(datbeta),X0)!="NA")
lt2<-datbeta[,c(covarsX0,Tcol)]
##Subset Z.0
if(is.data.frame(lt2)==FALSE){
Z0<-NULL
}else{
Y<-lt2$T
X<-lt2[,-dim(lt2)[2]]
if(is.data.frame(X)==FALSE){
X<-data.frame(X)
names(X)<-X0
}
SL <-cov.sel.high.rf(Y = Y, X = X)
Z0<-colnames(X)[which(SL==TRUE)]
}
##Subset X.Y
if((length(X1)+length(X0)==0)){XY<-NULL}else{
XY<-unique(c(X1,X0))[order(unique(c(X1,X0)))]}
##Subset Z=Z.1UZ.0
if((length(Z1)+length(Z0)==0)){Z<-NULL}else{Z<-unique(c(Z1,Z0))[order(unique(c(Z1,Z0)))]}
#Disjunctive cause criterion (VanderWeele and Shpitser)
##Subset X.D
if((length(XY)+length(XT)==0)){XD<-NULL}else{
XD<-unique(c(XY,XT))[order(unique(c(XY,XT)))]
}
covarsXD<-which(match(colnames(datbeta),XD)!="NA")
}
#Subset cardinalities
cardXT<-length(XT)
cardQ<-length(Q)
cardXY<-length(XY)
cardZ<-length(Z)
cardXD<-length(XD)
cards<-data.frame(X.T=cardXT,Q=cardQ,X.Y=cardXY,Z=cardZ,X.D=cardXD)
#ATE estimate via propensity score matching
if(betahat==TRUE){
datbeta[,Tcol]<-as.numeric(datbeta[,Tcol])-1
#Pre-treatment criterion
f1<-as.formula(paste("T~", paste(paste(colnames(datbeta)[covarcol]), collapse= "+")))
ps<-glm(f1,family=binomial,data=datbeta)$fitted
betahatX<-Match(Y=datbeta[,ycol], Tr=datbeta[,Tcol], X=as.matrix(ps,ncol=1), estimand = "ATE", M = 1, replace=TRUE)
betahatXest<-betahatX$est
betahatXse<-betahatX$se
#Common cause criterion
##Algorithm 1
##Subset X.T
if(length(XT)==0){
betahatXTest<-mean(datbeta[which(datbeta[,Tcol]==1),ycol])-mean(datbeta[which(datbeta[,Tcol]==0),ycol])
betahatXTse<-sqrt((var(datbeta[which(datbeta[,Tcol]==1),ycol])/length(which(datbeta[,Tcol]==1))+var(datbeta[which(datbeta[,Tcol]==0),ycol]))/length(which(datbeta[,Tcol]==0)))
}else{
f1<-as.formula(paste("T~", paste(paste(XT), collapse= "+")))
ps<-glm(f1,family=binomial,data=datbeta)$fitted
betahatXT<- Match(Y=datbeta[,ycol], Tr=datbeta[,Tcol], X=as.matrix(ps,ncol=1), estimand = "ATE", M = 1,replace=TRUE)
betahatXTest<-betahatXT$est
betahatXTse<-betahatXT$se
}
##Subset Q
if(length(Q)==0){ betahatQest<-mean(datbeta[which(datbeta[,Tcol]==1),ycol])-mean(datbeta[which(datbeta[,Tcol]==0),ycol])
betahatQse<-sqrt((var(datbeta[which(datbeta[,Tcol]==1),ycol])/length(which(datbeta[,Tcol]==1))+var(datbeta[which(datbeta[,Tcol]==0),ycol]))/length(which(datbeta[,Tcol]==0)))
}else{
f1<-as.formula(paste("T~", paste(paste(Q), collapse= "+")))
ps<-glm(f1,family=binomial,data=datbeta)$fitted
betahatQ<-Match(Y=datbeta[,ycol], Tr=datbeta[,Tcol], X=as.matrix(ps,ncol=1), estimand = "ATE", M = 1,replace=TRUE)
betahatQest<-betahatQ$est
betahatQse<-betahatQ$se
}
#Algorithm 2
##Subset X.Y
if(length(XY)==0){ betahatXYest<-mean(datbeta[which(datbeta[,Tcol]==1),ycol])-mean(datbeta[which(datbeta[,Tcol]==0),ycol])
betahatXYse<-sqrt((var(datbeta[which(datbeta[,Tcol]==1),ycol])/length(which(datbeta[,Tcol]==1))+var(datbeta[which(datbeta[,Tcol]==0),ycol]))/length(which(datbeta[,Tcol]==0)))
}else{
f1<-as.formula(paste("T~", paste(paste(XY), collapse= "+")))
ps<-glm(f1,family=binomial,data=datbeta)$fitted
betahatXY<-Match(Y=datbeta[,ycol], Tr=datbeta[,Tcol], X=as.matrix(ps,ncol=1), estimand = "ATE", M = 1,replace=TRUE)
betahatXYest<-betahatXY$est
betahatXYse<-betahatXY$se
}
##Subset Z
if(length(Z)==0){ betahatZest<-mean(datbeta[which(datbeta[,Tcol]==1),ycol])-mean(datbeta[which(datbeta[,Tcol]==0),ycol])
betahatZse<-sqrt((var(datbeta[which(datbeta[,Tcol]==1),ycol])/length(which(datbeta[,Tcol]==1))+var(datbeta[which(datbeta[,Tcol]==0),ycol]))/length(which(datbeta[,Tcol]==0)))
}else{
f1<-as.formula(paste("T~", paste(paste(Z), collapse= "+")))
ps<-glm(f1,family=binomial,data=datbeta)$fitted
betahatZ<-Match(Y=datbeta[,ycol], Tr=datbeta[,Tcol], X=as.matrix(ps,ncol=1), estimand = "ATE", M = 1, replace=TRUE)
betahatZest<-betahatZ$est
betahatZse<-betahatZ$se
}
#Disjunctive cause criterion
##Subset X.D
if(length(XD)==0){ betahatXDest<-mean(datbeta[which(datbeta[,Tcol]==1),ycol])-mean(datbeta[which(datbeta[,Tcol]==0),ycol])
betahatXDse<-sqrt((var(datbeta[which(datbeta[,Tcol]==1),ycol])/length(which(datbeta[,Tcol]==1))+var(datbeta[which(datbeta[,Tcol]==0),ycol]))/length(which(datbeta[,Tcol]==0)))
}else{
f1<-as.formula(paste("T~", paste(paste(XD), collapse= "+")))
ps<-glm(f1,family=binomial,data=datbeta)$fitted
betahatXD<-Match(Y=datbeta[,ycol], Tr=datbeta[,Tcol], X=as.matrix(ps,ncol=1), estimand = "ATE", M = 1,replace=TRUE)
betahatXDest<-betahatXD$est
betahatXDse<-betahatXD$se
}
betahats<-data.frame(X=betahatXest,X.T=betahatXTest,Q=betahatQest,X.Y=betahatXYest,Z=betahatZest,X.D=betahatXDest)
betahatsse<-data.frame(X=betahatXse,X.T=betahatXTse,Q=betahatQse,X.Y=betahatXYse,Z=betahatZse,X.D=betahatXDse)
}else{betahats<-betahatsse<-NULL}
list(X.T=XT,Q.0=Q0,Q.1=Q1,Q=Q,X.0=X0,X.1=X1,X.Y=XY,Z.0=Z0,Z.1=Z1,Z=Z,X.TY=XD,cardinalities=cards, est=betahats, se=betahatsse, N=N, Setting=Setting, rep=Rep, Models=Models,type=type, varnames=varnames)
}
}else{
for(i in 1:Rep){
if(Simulate==TRUE){
dat<-cov.sel.high.sim(N, Setting, Rep, Models)$dat
X<-dat[,-c((dim(dat)[2])-1,dim(dat)[2])]
Y<-dat[,((dim(dat)[2])-1)]
T<-dat[,dim(dat)[2]]
}
varnames<-colnames(dat)
covarcol<-1:(dim(X)[2])
ycol<-(dim(X)[2])+1
Tcol<-ycol+1
if(class(dat[,Tcol])!="factor"){
dat[,Tcol]<-factor(dat[,Tcol])
}
datbeta<-dat
if(length(unique(dat[,ycol]))==2){
datbeta[,ycol]<-factor(datbeta[,ycol])
}
numcol<-which(lapply(dat[covarcol],class)=="numeric")
covars<-colnames(dat[,covarcol])
if(type=="mmpc" || type=="mmhc"){
if(length(unique(dat[,ycol]))>2){
tried<-try(discretize(data.frame(dat[,ycol]),method="quantile"),silent=TRUE)
if(class(tried)=="data.frame"){
dat[,ycol]<-discretize(data.frame(dat[,ycol]),method="quantile")
}else{
print(tried)
stop("the numeric outcome could not be discretized, recode it into factor")
}
}else{dat[,ycol]<-factor(dat[,ycol])
}
if(length(numcol)>0){
if(length(numcol)==1){
tried<-try(discretize(data.frame(dat[,numcol]),method="quantile"),silent=TRUE)
if(class(tried)=="data.frame"){
dat[,numcol]<-discretize(data.frame(dat[,numcol]),method="quantile")
}else{ print(tried)
stop("the numeric covariate could not be discretized, recode it into factor")
}
}else{
tried<-try(discretize(dat[,numcol],method="quantile"),silent=TRUE)
if(class(tried)=="data.frame"){
dat[,numcol]<-discretize(dat[,numcol],method="quantile")
}else{ print(tried)
stop("at least one numeric covariate could not be discretized, recode it into factor")
}
}}
}
#####Markov networks
if(type=="mmpc"){
#Common cause criterion (Waernbaum, de Luna and Richardson)
##Algorithm 1
bmT<-matrix(c(rep("T",length(covarcol)),covars),ncol=2)
blacklistT<-data.frame(bmT)
names(blacklistT)<-c("from","to")
lt1<-dat[,c(covarcol,Tcol)]
res1<-mmpc(lt1,optimized=FALSE)
##Subset X.T
XT<-res1$nodes$T$mb
covarsT<-which(match(colnames(dat),XT)!="NA")
bmQ<-matrix(c(rep("Y",length(covarsT)),XT),ncol=2)
blacklistQ<-data.frame(bmQ)
names(blacklistQ)<-c("from","to")
lt2<-dat[which(dat[,Tcol]==1),c(covarsT,ycol)]
##Subset Q.1
if(is.data.frame(lt2)==FALSE){
Q1<-NULL
}else{
res2<-mmpc(lt2,optimized=FALSE)
Q1<-res2$nodes$Y$mb
}
lt3<-dat[which(dat[,Tcol]==0),c(covarsT,ycol)]
##Subset Q.0
if(is.data.frame(lt3)==FALSE){
Q0<-NULL
}else{
res3<-mmpc(lt3,optimized=FALSE)
Q0<-res3$nodes$Y$mb
}
##Subset Q=Q.1UQ.0
if((length(Q1)+length(Q0)==0)){Q<-NULL}else{Q<-unique(c(Q1,Q0))[order(unique(c(Q1,Q0)))]}
##Algortithm 2
bmY<-matrix(c(rep("Y",length(covarcol)),covars),ncol=2)
blacklistY<-data.frame(bmY)
names(blacklistY)<-c("from","to")
lt4<-dat[which(dat[,Tcol]==1),c(covarcol,ycol)]
lt5<-dat[which(dat[,Tcol]==0),c(covarcol,ycol)]
res4<-mmpc(lt4,optimized=FALSE)
res5<-mmpc(lt5,optimized=FALSE)
##Subset X.1
X1<-res4$nodes$Y$mb
##Subset X.0
X0<-res5$nodes$Y$mb
##Subset X.Y
if((length(X1)+length(X0)==0)){XY<-NULL}else{
XY<-unique(c(X1,X0))[order(unique(c(X1,X0)))]}
covars1<-which(match(colnames(dat),X1)!="NA")
covars0<-which(match(colnames(dat),X0)!="NA")
bm1<-matrix(c(rep("T",length(covars1)),X1),ncol=2)
blacklist1<-data.frame(bm1)
names(blacklist1)<-c("from","to")
bm0<-matrix(c(rep("T",length(covars0)),X0),ncol=2)
blacklist0<-data.frame(bm0)
names(blacklist0)<-c("from","to")
lt6<-dat[,c(covars1,Tcol)]
lt7<-dat[,c(covars0,Tcol)]
##Subset Z.1
if(is.data.frame(lt6)==FALSE){
Z1<-NULL
}else{
res6<-mmpc(lt6,optimized=FALSE)
Z1<-res6$nodes$T$mb
}
##Subset Z.0
if(is.data.frame(lt7)==FALSE){
Z0<-NULL
}else{
res7<-mmpc(lt7,optimized=FALSE)
Z0<-res7$nodes$T$mb
}
##Subset Z=Z.1UZ.0
if((length(Z1)+length(Z0)==0)){Z<-NULL}else{Z<-unique(c(Z1,Z0))[order(unique(c(Z1,Z0)))]}
#Disjunctive cause criterion (VanderWeele and Shpitser)
##Subset X.D
if((length(XY)+length(XT)==0)){XD<-NULL}else{
XD<-unique(c(XY,XT))[order(unique(c(XY,XT)))]
}
covarsXD<-which(match(colnames(dat),XD)!="NA")
}
#########Bayesian networks
if(type=="mmhc"){
#Common cause criterion (Waernbaum, de Luna and Richardson)
##Algorithm 1
bmT<-matrix(c(rep("T",length(covarcol)),covars),ncol=2)
blacklistT<-data.frame(bmT)
names(blacklistT)<-c("from","to")
lt1<-dat[,c(covarcol,Tcol)]
res1<-mmhc(lt1,blacklist=blacklistT,optimized=FALSE, score="aic")
##Subset X.T
XT<-res1$nodes$T$mb
#print(XT)
covarsT<-which(match(colnames(dat),XT)!="NA")
bmQ<-matrix(c(rep("Y",length(covarsT)),XT),ncol=2)
blacklistQ<-data.frame(bmQ)
names(blacklistQ)<-c("from","to")
lt2<-dat[which(dat[,Tcol]==1),c(covarsT,ycol)]
##Subset Q.1
if(is.data.frame(lt2)==FALSE){
Q1<-NULL
}else{
res2<-mmhc(lt2,blacklist=blacklistQ,optimized=FALSE, score="aic")
Q1<-res2$nodes$Y$mb
}
lt3<-dat[which(dat[,Tcol]==0),c(covarsT,ycol)]
##Subset Q.0
if(is.data.frame(lt3)==FALSE){
Q0<-NULL
}else{
res3<-mmhc(lt3,blacklist=blacklistQ,optimized=FALSE, score="aic")
Q0<-res3$nodes$Y$mb
}
##Subset Q=Q.1UQ.0
if((length(Q1)+length(Q0)==0)){Q<-NULL}else{Q<-unique(c(Q1,Q0))[order(unique(c(Q1,Q0)))]}
##Algortithm 2
bmY<-matrix(c(rep("Y",length(covarcol)),covars),ncol=2)
blacklistY<-data.frame(bmY)
names(blacklistY)<-c("from","to")
lt4<-dat[which(dat[,Tcol]==1),c(covarcol,ycol)]
lt5<-dat[which(dat[,Tcol]==0),c(covarcol,ycol)]
res4<-mmhc(lt4,blacklist=blacklistY,optimized=FALSE, score="aic")
res5<-mmhc(lt5,blacklist=blacklistY,optimized=FALSE, score="aic")
##Subset X.1
X1<-res4$nodes$Y$mb
##Subset X.0
X0<-res5$nodes$Y$mb
##Subset X.Y
if((length(X1)+length(X0)==0)){XY<-NULL}else{
XY<-unique(c(X1,X0))[order(unique(c(X1,X0)))]}
covars1<-which(match(colnames(dat),X1)!="NA")
covars0<-which(match(colnames(dat),X0)!="NA")
bm1<-matrix(c(rep("T",length(covars1)),X1),ncol=2)
blacklist1<-data.frame(bm1)
names(blacklist1)<-c("from","to")
bm0<-matrix(c(rep("T",length(covars0)),X0),ncol=2)
blacklist0<-data.frame(bm0)
names(blacklist0)<-c("from","to")
lt6<-dat[,c(covars1,Tcol)]
lt7<-dat[,c(covars0,Tcol)]
##Subset Z.1
if(is.data.frame(lt6)==FALSE){
Z1<-NULL
}else{
res6<-mmhc(lt6,blacklist=blacklist1,optimized=FALSE, score="aic")
Z1<-res6$nodes$T$mb
}
##Subset Z.0
if(is.data.frame(lt7)==FALSE){
Z0<-NULL
}else{
res7<-mmhc(lt7,blacklist=blacklist0,optimized=FALSE, score="aic")
Z0<-res7$nodes$T$mb
}
##Subset Z=Z.1UZ.0
if((length(Z1)+length(Z0)==0)){Z<-NULL}else{Z<-unique(c(Z1,Z0))[order(unique(c(Z1,Z0)))]}
#Disjunctive cause criterion (VanderWeele and Shpitser)
##Subset X.D
if((length(XY)+length(XT)==0)){XD<-NULL}else{
XD<-unique(c(XY,XT))[order(unique(c(XY,XT)))]
}
covarsXD<-which(match(colnames(dat),XD)!="NA")
}
######LASSO
if(type=="lasso"){
#Common cause criterion (Waernbaum, de Luna and Richardson)
##Algorithm 1
Y<-datbeta$T
X<-datbeta[,covarcol]
D<-dim(X)[2]
Xo<-X
if(length(numcol)>0){
xnum<-X[,numcol]
Xvars<-t(rbind(matrix(c(names(X),names(xnum),rep(0,length(c(names(X),names(xnum))))),ncol=2), t(combn(names(X),2))))
f1<-eval(paste("Y ~", paste(paste('I(', names(X)[numcol], '^2)', sep=''), collapse=" + ")))
f2<-as.formula(f1)
x1<- model.matrix(f2, cbind(X,Y))[, -1]
f3 <- as.formula(Y ~ .*.)
x2<- model.matrix(f3, cbind(X,Y))[, -1]
X<-cbind(x2[,c(1:D)],x1,x2[,-c(1:D)])
}else{
Xvars<-t(rbind(matrix(c(names(X),rep(0,length(names(X)))),ncol=2), t(combn(names(X),2))))
f3 <- as.formula(Y ~ .*.)
x2<- model.matrix(f3, data.frame(cbind(X,Y)))[, -1]
X<-cbind(x2[,c(1:D)])
}
SL <-cov.sel.high.lasso(Y = Y, X = X)
##Subset X.T
u<-unique(c(unique(Xvars[1,which(SL==TRUE)]),unique(Xvars[2,which(SL==TRUE)])))
XT<-covars[is.na(match(covars,u))==FALSE]
covarsT<-which(match(colnames(datbeta),XT)!="NA")
lt2<-datbeta[which(datbeta[,Tcol]==1),c(covarsT,ycol)]
##Subset Q.1
if(is.data.frame(lt2)==FALSE){
Q1<-NULL
}else{
Y<-lt2$Y
X<-lt2[,-dim(lt2)[2]]
xnames<-names(lt2)[-dim(lt2)[2]]
D<-length(xnames)
if(D==1){Q1<-XT}else{
Xo<-X
numcol2<-which(lapply(X,class)=="numeric")
if(length(numcol2)>0){
xnum<-X[,numcol2]
Xvars<-t(rbind(matrix(c(names(X),names(xnum),rep(0,length(c(names(X),names(xnum))))),ncol=2), t(combn(names(X),2))))
f1<-eval(paste("Y ~", paste(paste('I(', xnames[numcol2], '^2)', sep=''), collapse=" + ")))
f2<-as.formula(f1)
x1<- model.matrix(f2, cbind(X,Y))[, -1]
f3 <- as.formula(Y ~ .*.)
x2<- model.matrix(f3, cbind(X,Y))[, -1]
X<-cbind(x2[,c(1:D)],x1,x2[,-c(1:D)])
}else{
Xvars<-t(rbind(matrix(c(names(X),rep(0,length(names(X)))),ncol=2), t(combn(names(X),2))))
f3 <- as.formula(Y ~ .*.)
x2<- model.matrix(f3, cbind(X,Y))[, -1]
X<-cbind(x2[,c(1:D)])
}
SL <-cov.sel.high.lasso(Y = Y, X = X)
u<-unique(c(unique(Xvars[1,which(SL==TRUE)]),unique(Xvars[2,which(SL==TRUE)])))
Q1<-covars[is.na(match(covars,u))==FALSE]
}
}
lt2<-datbeta[which(datbeta[,Tcol]==0),c(covarsT,ycol)]
##Subset Q.0
if(is.data.frame(lt2)==FALSE){
Q0<-NULL
}else{
Y<-lt2$Y
X<-lt2[,-dim(lt2)[2]]
xnames<-names(lt2)[-dim(lt2)[2]]
D<-length(xnames)
if(D==1){Q0<-XT}else{
Xo<-X
numcol2<-which(lapply(X,class)=="numeric")
if(length(numcol2)>0){
xnum<-X[,numcol2]
Xvars<-t(rbind(matrix(c(names(X),names(xnum),rep(0,length(c(names(X),names(xnum))))),ncol=2), t(combn(names(X),2))))
f1<-eval(paste("Y ~", paste(paste('I(', names(X)[numcol2], '^2)', sep=''), collapse=" + ")))
f2<-as.formula(f1)
x1<- model.matrix(f2, cbind(X,Y))[, -1]
f3 <- as.formula(Y ~ .*.)
x2<- model.matrix(f3, cbind(X,Y))[, -1]
X<-cbind(x2[,c(1:D)],x1,x2[,-c(1:D)])
}else{
Xvars<-t(rbind(matrix(c(names(X),rep(0,length(names(X)))),ncol=2), t(combn(names(X),2))))
f3 <- as.formula(Y ~ .*.)
x2<- model.matrix(f3, cbind(X,Y))[, -1]
X<-cbind(x2[,c(1:D)])
}
SL <-cov.sel.high.lasso(Y = Y, X = X)
u<-unique(c(unique(Xvars[1,which(SL==TRUE)]),unique(Xvars[2,which(SL==TRUE)])))
Q0<-covars[is.na(match(covars,u))==FALSE]
}
}
##Subset Q=Q.1UQ.0
if((length(Q1)+length(Q0)==0)){Q<-NULL}else{Q<-unique(c(Q1,Q0))[order(unique(c(Q1,Q0)))]}
##Algortithm 2
lt4<-datbeta[which(datbeta[,Tcol]==1),c(covarcol,ycol)]
Y<-lt4$Y
X<-lt4[,covarcol]
D<-dim(X)[2]
Xo<-X
if(length(numcol)>0){
xnum<-X[,numcol]
Xvars<-t(rbind(matrix(c(names(X),names(xnum),rep(0,length(c(names(X),names(xnum))))),ncol=2), t(combn(names(X),2))))
f1<-eval(paste("Y ~", paste(paste('I(', names(X)[numcol], '^2)', sep=''), collapse=" + ")))
f2<-as.formula(f1)
x1<- model.matrix(f2, cbind(X,Y))[, -1]
f3 <- as.formula(Y ~ .*.)
x2<- model.matrix(f3, cbind(X,Y))[, -1]
X<-cbind(x2[,c(1:D)],x1,x2[,-c(1:D)])
}else{
Xvars<-t(rbind(matrix(c(names(X),rep(0,length(names(X)))),ncol=2), t(combn(names(X),2))))
f3 <- as.formula(Y ~ .*.)
x2<- model.matrix(f3, cbind(X,Y))[, -1]
X<-cbind(x2[,c(1:D)])
}
SL <-cov.sel.high.lasso(Y = Y, X = X)
##Subset X.1
u<-unique(c(unique(Xvars[1,which(SL==TRUE)]),unique(Xvars[2,which(SL==TRUE)])))
X1<-covars[is.na(match(covars,u))==FALSE]
covarsX1<-which(match(colnames(datbeta),X1)!="NA")
lt2<-datbeta[,c(covarsX1,Tcol)]
##Subset Z.1
if(is.data.frame(lt2)==FALSE){
Z1<-NULL
}else{
Y<-lt2$T
X<-lt2[,-dim(lt2)[2]]
xnames<-names(lt2)[-dim(lt2)[2]]
D<-length(xnames)
if(D==1){Z1<-X1}else{
Xo<-X
numcol2<-which(lapply(X,class)=="numeric")
if(length(numcol2)>0){
xnum<-X[,numcol2]
Xvars<-t(rbind(matrix(c(names(X),names(xnum),rep(0,length(c(names(X),names(xnum))))),ncol=2), t(combn(names(X),2))))
f1<-eval(paste("Y ~", paste(paste('I(', names(X)[numcol2], '^2)', sep=''), collapse=" + ")))
f2<-as.formula(f1)
x1<- model.matrix(f2, cbind(X,Y))[, -1]
f3 <- as.formula(Y ~ .*.)
x2<- model.matrix(f3, cbind(X,Y))[, -1]
X<-cbind(x2[,c(1:D)],x1,x2[,-c(1:D)])
}else{
Xvars<-t(rbind(matrix(c(names(X),rep(0,length(names(X)))),ncol=2), t(combn(names(X),2))))
f3 <- as.formula(Y ~ .*.)
x2<- model.matrix(f3, cbind(X,Y))[, -1]
X<-cbind(x2[,c(1:D)])
}
SL <-cov.sel.high.lasso(Y = Y, X = X)
u<-unique(c(unique(Xvars[1,which(SL==TRUE)]),unique(Xvars[2,which(SL==TRUE)])))
Z1<-covars[is.na(match(covars,u))==FALSE]
}
}
lt4<-datbeta[which(datbeta[,Tcol]==0),c(covarcol,ycol)]
Y<-lt4$Y
X<-lt4[,covarcol]
D<-dim(X)[2]
Xo<-X
if(length(numcol)>0){
xnum<-X[,numcol]
Xvars<-t(rbind(matrix(c(names(X),names(xnum),rep(0,length(c(names(X),names(xnum))))),ncol=2), t(combn(names(X),2))))
f1<-eval(paste("Y ~", paste(paste('I(', names(X)[numcol], '^2)', sep=''), collapse=" + ")))
f2<-as.formula(f1)
x1<- model.matrix(f2, cbind(X,Y))[, -1]
f3 <- as.formula(Y ~ .*.)
x2<- model.matrix(f3, cbind(X,Y))[, -1]
X<-cbind(x2[,c(1:D)],x1,x2[,-c(1:D)])
}else{
Xvars<-t(rbind(matrix(c(names(X),rep(0,length(names(X)))),ncol=2), t(combn(names(X),2))))
f3 <- as.formula(Y ~ .*.)
x2<- model.matrix(f3, cbind(X,Y))[, -1]
X<-cbind(x2[,c(1:D)])
}
SL <-cov.sel.high.lasso(Y = Y, X = X)
##Subset X.0
u<-unique(c(unique(Xvars[1,which(SL==TRUE)]),unique(Xvars[2,which(SL==TRUE)])))
X0<-covars[is.na(match(covars,u))==FALSE]
covarsX0<-which(match(colnames(datbeta),X0)!="NA")
lt2<-datbeta[,c(covarsX0,Tcol)]
##Subset Z.0
if(is.data.frame(lt2)==FALSE){
Z0<-NULL
}else{
Y<-lt2$T
X<-lt2[,-dim(lt2)[2]]
xnames<-names(lt2)[-dim(lt2)[2]]
D<-length(xnames)
if(D==1){Z0<-X0}else{
Xo<-X
numcol2<-which(lapply(X,class)=="numeric")
if(length(numcol2)>0){
xnum<-X[,numcol2]
Xvars<-t(rbind(matrix(c(names(X),names(xnum),rep(0,length(c(names(X),names(xnum))))),ncol=2), t(combn(names(X),2))))
f1<-eval(paste("Y ~", paste(paste('I(', names(X)[numcol2], '^2)', sep=''), collapse=" + ")))
f2<-as.formula(f1)
x1<- model.matrix(f2, cbind(X,Y))[, -1]
f3 <- as.formula(Y ~ .*.)
x2<- model.matrix(f3, cbind(X,Y))[, -1]
X<-cbind(x2[,c(1:D)],x1,x2[,-c(1:D)])
}else{
Xvars<-t(rbind(matrix(c(names(X),rep(0,length(names(X)))),ncol=2), t(combn(names(X),2))))
f3 <- as.formula(Y ~ .*.)
x2<- model.matrix(f3, cbind(X,Y))[, -1]
X<-cbind(x2[,c(1:D)])
}
SL <-cov.sel.high.lasso(Y = Y, X = X)
u<-unique(c(unique(Xvars[1,which(SL==TRUE)]),unique(Xvars[2,which(SL==TRUE)])))
Z0<-covars[is.na(match(covars,u))==FALSE]
}
}
##Subset X.Y
if((length(X1)+length(X0)==0)){XY<-NULL}else{
XY<-unique(c(X1,X0))[order(unique(c(X1,X0)))]}
##Subset Z=Z.1UZ.0
if((length(Z1)+length(Z0)==0)){Z<-NULL}else{Z<-unique(c(Z1,Z0))[order(unique(c(Z1,Z0)))]}
#Disjunctive cause criterion (VanderWeele and Shpitser)
##Subset X.D
if((length(XY)+length(XT)==0)){XD<-NULL}else{
XD<-unique(c(XY,XT))[order(unique(c(XY,XT)))]
}
covarsXD<-which(match(colnames(datbeta),XD)!="NA")
}
if(type=="rf"){
#Common cause criterion (Waernbaum, de Luna and Richardson)
##Algorithm 1
Y<-datbeta$T
X<-datbeta[,covarcol]
SL <-cov.sel.high.rf(Y = Y, X = X)
##Subset X.T
XT<-colnames(X)[which(SL==TRUE)]
covarsT<-which(match(colnames(datbeta),XT)!="NA")
lt2<-datbeta[which(datbeta[,Tcol]==1),c(covarsT,ycol)]
##Subset Q.1
if(is.data.frame(lt2)==FALSE){
Q1<-NULL
}else{
Y<-lt2$Y
X<-lt2[,-dim(lt2)[2]]
if(is.data.frame(X)==FALSE){
X<-data.frame(X)
names(X)<-XT
}
SL <-cov.sel.high.rf(Y = Y, X = X)
Q1<-colnames(X)[which(SL==TRUE)]
}
lt2<-datbeta[which(datbeta[,Tcol]==0),c(covarsT,ycol)]
##Subset Q.0
if(is.data.frame(lt2)==FALSE){
Q0<-NULL
}else{
Y<-lt2$Y
X<-lt2[,-dim(lt2)[2]]
if(is.data.frame(X)==FALSE){
X<-data.frame(X)
names(X)<-XT
}
SL <-cov.sel.high.rf(Y = Y, X = X)
Q0<-colnames(X)[which(SL==TRUE)]
}
##Subset Q=Q.1UQ.0
if((length(Q1)+length(Q0)==0)){Q<-NULL}else{Q<-unique(c(Q1,Q0))[order(unique(c(Q1,Q0)))]}
##Algortithm 2
lt4<-datbeta[which(datbeta[,Tcol]==1),c(covarcol,ycol)]
Y<-lt4$Y
X<-lt4[,covarcol]
SL <-cov.sel.high.rf(Y = Y, X = X)
##Subset X.1
X1<-colnames(X)[which(SL==TRUE)]
covarsX1<-which(match(colnames(datbeta),X1)!="NA")
lt2<-datbeta[,c(covarsX1,Tcol)]
##Subset Z.1
if(is.data.frame(lt2)==FALSE){
Z1<-NULL
}else{
Y<-lt2$T
X<-lt2[,-dim(lt2)[2]]
if(is.data.frame(X)==FALSE){
X<-data.frame(X)
names(X)<-X1
}
SL <-cov.sel.high.rf(Y = Y, X = X)
Z1<-colnames(X)[which(SL==TRUE)]
}
lt4<-datbeta[which(datbeta[,Tcol]==0),c(covarcol,ycol)]
Y<-lt4$Y
X<-lt4[,covarcol]
SL <-cov.sel.high.rf(Y = Y, X = X)
##Subset X.0
X0<-colnames(X)[which(SL==TRUE)]
covarsX0<-which(match(colnames(datbeta),X0)!="NA")
lt2<-datbeta[,c(covarsX0,Tcol)]
##Subset Z.0
if(is.data.frame(lt2)==FALSE){
Z0<-NULL
}else{
Y<-lt2$T
X<-lt2[,-dim(lt2)[2]]
if(is.data.frame(X)==FALSE){
X<-data.frame(X)
names(X)<-X0
}
SL <-cov.sel.high.rf(Y = Y, X = X)
Z0<-colnames(X)[which(SL==TRUE)]
}
##Subset X.Y
if((length(X1)+length(X0)==0)){XY<-NULL}else{
XY<-unique(c(X1,X0))[order(unique(c(X1,X0)))]}
##Subset Z=Z.1UZ.0
if((length(Z1)+length(Z0)==0)){Z<-NULL}else{Z<-unique(c(Z1,Z0))[order(unique(c(Z1,Z0)))]}
#Disjunctive cause criterion (VanderWeele and Shpitser)
##Subset X.D
if((length(XY)+length(XT)==0)){XD<-NULL}else{
XD<-unique(c(XY,XT))[order(unique(c(XY,XT)))]
}
covarsXD<-which(match(colnames(datbeta),XD)!="NA")
}
#Subset cardinalities
cardXT<-length(XT)
cardQ<-length(Q)
cardXY<-length(XY)
cardZ<-length(Z)
cardXD<-length(XD)
cards<-data.frame(X.T=cardXT,Q=cardQ,X.Y=cardXY,Z=cardZ,X.D=cardXD)
#ATE estimate via propensity score matching
if(betahat==TRUE){
datbeta[,Tcol]<-as.numeric(datbeta[,Tcol])-1
#Pre-treatment criterion
f1<-as.formula(paste("T~", paste(paste(colnames(datbeta)[covarcol]), collapse= "+")))
ps<-glm(f1,family=binomial,data=datbeta)$fitted
betahatX<-Match(Y=datbeta[,ycol], Tr=datbeta[,Tcol], X=as.matrix(ps,ncol=1), estimand = "ATE", M = 1, replace=TRUE)
betahatXest<-betahatX$est
betahatXse<-betahatX$se
#Common cause criterion
##Algorithm 1
##Subset X.T
if(length(XT)==0){
betahatXTest<-mean(datbeta[which(datbeta[,Tcol]==1),ycol])-mean(datbeta[which(datbeta[,Tcol]==0),ycol])
betahatXTse<-sqrt((var(datbeta[which(datbeta[,Tcol]==1),ycol])/length(which(datbeta[,Tcol]==1))+var(datbeta[which(datbeta[,Tcol]==0),ycol]))/length(which(datbeta[,Tcol]==0)))
}else{
f1<-as.formula(paste("T~", paste(paste(XT), collapse= "+")))
ps<-glm(f1,family=binomial,data=datbeta)$fitted
betahatXT<- Match(Y=datbeta[,ycol], Tr=datbeta[,Tcol], X=as.matrix(ps,ncol=1), estimand = "ATE", M = 1,replace=TRUE)
betahatXTest<-betahatXT$est
betahatXTse<-betahatXT$se
}
##Subset Q
if(length(Q)==0){ betahatQest<-mean(datbeta[which(datbeta[,Tcol]==1),ycol])-mean(datbeta[which(datbeta[,Tcol]==0),ycol])
betahatQse<-sqrt((var(datbeta[which(datbeta[,Tcol]==1),ycol])/length(which(datbeta[,Tcol]==1))+var(datbeta[which(datbeta[,Tcol]==0),ycol]))/length(which(datbeta[,Tcol]==0)))
}else{
f1<-as.formula(paste("T~", paste(paste(Q), collapse= "+")))
ps<-glm(f1,family=binomial,data=datbeta)$fitted
betahatQ<-Match(Y=datbeta[,ycol], Tr=datbeta[,Tcol], X=as.matrix(ps,ncol=1), estimand = "ATE", M = 1,replace=TRUE)
betahatQest<-betahatQ$est
betahatQse<-betahatQ$se
}
#Algorithm 2
##Subset X.Y
if(length(XY)==0){ betahatXYest<-mean(datbeta[which(datbeta[,Tcol]==1),ycol])-mean(datbeta[which(datbeta[,Tcol]==0),ycol])
betahatXYse<-sqrt((var(datbeta[which(datbeta[,Tcol]==1),ycol])/length(which(datbeta[,Tcol]==1))+var(datbeta[which(datbeta[,Tcol]==0),ycol]))/length(which(datbeta[,Tcol]==0)))
}else{
f1<-as.formula(paste("T~", paste(paste(XY), collapse= "+")))
ps<-glm(f1,family=binomial,data=datbeta)$fitted
betahatXY<-Match(Y=datbeta[,ycol], Tr=datbeta[,Tcol], X=as.matrix(ps,ncol=1), estimand = "ATE", M = 1,replace=TRUE)
betahatXYest<-betahatXY$est
betahatXYse<-betahatXY$se
}
##Subset Z
if(length(Z)==0){ betahatZest<-mean(datbeta[which(datbeta[,Tcol]==1),ycol])-mean(datbeta[which(datbeta[,Tcol]==0),ycol])
betahatZse<-sqrt((var(datbeta[which(datbeta[,Tcol]==1),ycol])/length(which(datbeta[,Tcol]==1))+var(datbeta[which(datbeta[,Tcol]==0),ycol]))/length(which(datbeta[,Tcol]==0)))
}else{
f1<-as.formula(paste("T~", paste(paste(Z), collapse= "+")))
ps<-glm(f1,family=binomial,data=datbeta)$fitted
betahatZ<-Match(Y=datbeta[,ycol], Tr=datbeta[,Tcol], X=as.matrix(ps,ncol=1), estimand = "ATE", M = 1, replace=TRUE)
betahatZest<-betahatZ$est
betahatZse<-betahatZ$se
}
#Disjunctive cause criterion
##Subset X.D
if(length(XD)==0){ betahatXDest<-mean(datbeta[which(datbeta[,Tcol]==1),ycol])-mean(datbeta[which(datbeta[,Tcol]==0),ycol])
betahatXDse<-sqrt((var(datbeta[which(datbeta[,Tcol]==1),ycol])/length(which(datbeta[,Tcol]==1))+var(datbeta[which(datbeta[,Tcol]==0),ycol]))/length(which(datbeta[,Tcol]==0)))
}else{
f1<-as.formula(paste("T~", paste(paste(XD), collapse= "+")))
ps<-glm(f1,family=binomial,data=datbeta)$fitted
betahatXD<-Match(Y=datbeta[,ycol], Tr=datbeta[,Tcol], X=as.matrix(ps,ncol=1), estimand = "ATE", M = 1,replace=TRUE)
betahatXDest<-betahatXD$est
betahatXDse<-betahatXD$se
}
betahats<-data.frame(X=betahatXest,X.T=betahatXTest,Q=betahatQest,X.Y=betahatXYest,Z=betahatZest,X.D=betahatXDest)
betahatsse<-data.frame(X=betahatXse,X.T=betahatXTse,Q=betahatQse,X.Y=betahatXYse,Z=betahatZse,X.D=betahatXDse)
}else{betahats<-betahatsse<-NULL}
reslist[[i]]<-list(X.T=XT,Q.0=Q0,Q.1=Q1,Q=Q,X.0=X0,X.1=X1,X.Y=XY,Z.0=Z0,Z.1=Z1,Z=Z,X.TY=XD,cardinalities=cards, est=betahats, se=betahatsse, N=N, Setting=Setting, rep=Rep, Models=Models,type=type, varnames=varnames)
}
}
l<-list(reslist=reslist)
if(Simulate==TRUE){invisible(return(l[[1]]))}else{invisible(return(l[[1]][[1]]))}
}
|
3f9a5f865cc3e5002d901e47e406e9abe48d9e35
|
1dcd515a6742140b05c00b098115c7909f6ced42
|
/summary_data.R
|
2b2a9a10cc80020c393a670e780ce9de38e81a3b
|
[] |
no_license
|
hearyt/FC-manuscript
|
549912690723c04a8fb6d6142a8b70b31a2884d1
|
7df739df232121cca805f58d08ec4560a7fc53c1
|
refs/heads/master
| 2021-05-15T16:07:50.722710
| 2017-11-03T19:06:28
| 2017-11-03T19:06:28
| 107,435,293
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,575
|
r
|
summary_data.R
|
summary_data <- function(lv, d1, k, k.parent, k.den, lab.y, binwidth, caption.fig, caption.tab)
{
#############
## data QA ##
#############
# sum of all children nodes' numbers of events (numerator)
d1$sum = apply(d1[k:(k+length(lv)-1)], MARGIN = 1, FUN=sum)
## percentage out of parent node
d1$perc = d1$sum / d1[ ,k.parent] * 100
## summary table
tab1 = summaryfunction(d1$perc, digits=0)
tab1 = xtable(tab1)
###########################
## data summary: boxplot ##
###########################
## percentage
for (i in k:(k+length(lv)-1))
{
d1[ ,i] = d1[ ,i] / d1[ ,k.den]
}
## reshape: wide to long
l <- reshape(d1[,k:(k+length(lv))],
varying = lv,
v.names = "perc",
timevar = "cell",
times = lv,
direction = "long")
## order
l = l[order(l$cell, l$perc), ]
## convert cell type to factor
l$cell = factor(l$cell, levels=lv)
## plot
p = ggplot(l, aes(cell, perc, fill=group)) +
geom_boxplot(outlier.colour=NA) +
geom_dotplot(binaxis='y', stackdir='center',
position=position_dodge(1), binwidth=bin.width) +
ylab(lab.y) +
theme(text = element_text(size=16)) +
theme(axis.title.x=element_blank()) +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
scale_x_discrete(labels=lab.x.tick.marks) +
scale_y_continuous(breaks = seq(0,max(d1[,k:(k+length(lv)-1)], na.rm=TRUE),by=0.1),
labels=scales::percent) +
theme(plot.title = element_text(size=14, face="bold", lineheight=.8, hjust=0.5)) +
theme(legend.title=element_blank()) +
theme(legend.position=leg.pos) +
ggtitle(caption.fig)
#########################
## data summary: table ##
#########################
## select dataset
temp = subset(d1, select=c(k:(k+length(lv)-1)))
## multiple percentages by 100 for table presentation
temp = temp * 100
## combine it with group info
temp = data.frame( cbind(temp, d1$group))
names(temp)[(length(lv)+1)] = "group"
## table output
vars = names(temp)[1:length(lv)]
tab2 = CreateTableOne(vars=vars, strata=c("group"), data=temp)
tab2 = print(tab2, nonnormal=vars, smd=FALSE, quote=FALSE, noSpaces=TRUE)
tab2 = data.frame(tab2)[ ,1:3]
row.names(tab2)[-1] = lv
#tab2 = xtable( tab2, caption = caption.tab )
#align(tab2) <- "lccc"
####################
## return results ##
####################
return(list(tab1, p, tab2))
}
|
8c7fabc440b1f5e53dbfaa612065e7e1d73caa74
|
f0b675fe8fdab8d263ccd0faf9d02962697c2f51
|
/R/poolData.R
|
255379a2aa880b14d197e4d7cb94530a3348cca0
|
[] |
no_license
|
kemacdonald/iChartAnalyzeR
|
bef94793c8f175d335c14fd2d6155ccb4c925a36
|
a2a1c352ee692a52d8c7a50e4a79964934c44bb5
|
refs/heads/master
| 2020-03-21T04:07:36.997658
| 2018-09-18T17:47:07
| 2018-09-18T17:47:07
| 138,092,029
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,503
|
r
|
poolData.R
|
#' Pool Data
#'
#' This function aggregates iChart RT and Accuracy for each trial, participant, and condition,
#' returning this information in a data frame.
#'
#' @description \code{poolData()} computes the mean accuracy or RT for each each participant and condition.
#' @param iChart A data frame in iChart format with iChart column names.
#' @param dependent A string indicating which dependent measure to use (Accuracy or RT).
#' @param include_T_initial A boolean indicating whether Target-initial trials should be included in RT computation.
#' @param RejectFirstGap A boolean indicating whether bad first gaps should be filtered out of the computation.
#' @param RejectLongestGap A boolean indicating whether bad longest gaps should be filtered out of the computation.
#' @param RejectRT A boolean indicating whether bad RTs should be filtered out of the computation.
#' @param save_results A boolean indicating whether the results should be saved to disk.
#' @export
#' @examples
#' \dontrun{acc <- poolData(d, RejectFirstGap=FALSE, RejectLongestGap=FALSE, RejectRT=FALSE,
#' dependent="Accuracy", paired = TRUE, save_results = TRUE)}
#'
poolData <- function(iChart,
dependent = "Accuracy",
include_T_initial = TRUE,
RejectFirstGap = TRUE,
RejectLongestGap = TRUE,
RejectRT = FALSE,
save_results = TRUE) {
GoodFirstGap <- RejectFirstGap
GoodLongestGap <- RejectLongestGap
GoodRT <- RejectRT
if(include_T_initial) {
filterediChart <- iChart[iChart$Response == "D" | iChart$Response == "T",]
} else {
filterediChart <- iChart[iChart$Response == "D",]
}
## filtering
if(GoodFirstGap) filterediChart <- filterediChart[filterediChart$GoodFirstGap | is.na(filterediChart$GoodFirstGap),]
if(GoodLongestGap) filterediChart <- filterediChart[filterediChart$GoodLongestGap | is.na(filterediChart$GoodLongestGap),]
if(GoodRT) filterediChart <- filterediChart[filterediChart$GoodRT,]
## aggregate depending on dependent variable
if (dependent == "Accuracy") {
results_table <- filterediChart %>%
dplyr::group_by(Sub.Num, Condition) %>%
dplyr::summarise(accuracy = mean(Accuracy, na.rm = T),
stdev = sd(Accuracy, na.rm = T),
n_trials = dplyr::n())
} else if (dependent == "RT") {
results_table <- filterediChart %>%
dplyr::group_by(Sub.Num, Condition, Response) %>%
dplyr::summarise(rt = mean(RT, na.rm = T),
stdev = stats::sd(RT, na.rm = T),
n_trials = dplyr::n()) %>%
dplyr::filter(!is.na(Sub.Num))
}
## save results
if(save_results & dependent == "Accuracy") {
npar <- length(unique(iChart$Sub.Num))
dir.create("processed_data", showWarnings = FALSE)
save_as_ta <- paste("processed_data/", iChart[1, "StudyName"], "_mean_", dependent, "_by_subs_", iChart[1, "StartWindowAcc"], "_", iChart[1, "EndWindowAcc"], "_n_", npar, ".txt", sep="")
write.table(results_table, save_as_ta, sep="\t", row.names=F)
} else {
npar <- length(unique(iChart$Sub.Num))
dir.create("processed_data", showWarnings = FALSE)
save_as_ta <- paste("processed_data/", iChart[1, "StudyName"], "_mean_", dependent, "_by_subs_", iChart[1, "StartWindowRT"], "_", iChart[1, "EndWindowRT"], "_n_", npar, ".txt", sep="")
write.table(results_table, save_as_ta, sep="\t", row.names=F)
}
results_table
}
|
510845b019af382b6ac903a168b7fb2465323847
|
18cd6280d38d276d723f766c57f7e7fb1a3da3f5
|
/cachematrix.R
|
dde85648edb644ce3059bf8d1c40c628f4ad06e2
|
[] |
no_license
|
morenz75/ProgrammingAssignment2
|
29802f405bac70c0a56a1db8f906dcf45449db0a
|
c4a4185a63b3771c1e244a75264ae2ad6539d302
|
refs/heads/master
| 2020-12-30T22:10:03.437043
| 2015-04-16T14:44:25
| 2015-04-16T14:44:25
| 33,993,836
| 0
| 0
| null | 2015-04-15T12:57:10
| 2015-04-15T12:57:10
| null |
UTF-8
|
R
| false
| false
| 1,883
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
# This function get a matrix (supposed to be reversible) and return it's reversed ones
# The reversed matrix is calculated just one time, so if we call cacheSolve it take
# the cached version of the reversed matric if exists, othewise it calculate and cache it
## Write a short comment describing this function
# Take a matrix as input and create a list of function to get and set the original matrix
# and get / set the reversed version
makeCacheMatrix <- function(x = matrix()) {
# Take a matrix in input
# First of all reset reversed Matrix m
m <- NULL
# Create function
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setReverse <- function(solve) m <<- solve
getReverse <- function() m
# Return a list of function
list(set = set, get = get,
setReverse = setReverse,
getReverse = getReverse)
}
## Write a short comment describing this function
# Take a makeCaccheMatrix object as input, and try to get a reversed matrix from it
# If there's a cache version of it, it juts return it without rereun solve function
# Otherwise, it run solve function, cache and return the reversed matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
# Get Revered matrix
m <- x$getReverse()
# If m is not null it means that we already know reversed matrix
if(!is.null(m)) {
message("getting cached data")
return(m)
}
# Get original Matrix
data <- x$get()
# calculate the reversed one
m <- solve(data, ...)
# Save reversed Matrix
x$setReverse(m)
# return it
m
}
|
6858ce4f3d5733dc0498d08f76048346108e4017
|
46b4592d82aac98165d6d65225cefa42714be853
|
/inspkg.r
|
7cc317056eed4bec7aec57eec4db954ec20cee16
|
[] |
no_license
|
nhchauvnu/rpkg
|
be852c3fd0eb841efdc5dffa19b1a218150c4a57
|
8c3519ebc95a6e87c113aa360ff07c8c8ee529fa
|
refs/heads/master
| 2022-02-17T12:07:34.609758
| 2022-02-04T10:48:43
| 2022-02-04T10:48:43
| 32,579,295
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 962
|
r
|
inspkg.r
|
file = paste('wget -O -', 'https://raw.githubusercontent.com/nhchauvnu/rpkg/master/rpkg.csv')
pkg = as.character(read.csv(pipe(file))$pkg)
chkpkg = function(pkg) {
pkglist = installed.packages()[,1]
m = sum(pkglist == pkg)
if (m <= 0) return(FALSE)
else return(TRUE)
}
printstat = function(pkg) {
if (chkpkg(pkg)) print(paste("Installed:",pkg))
else print(paste("Not installed:",pkg))
}
printpkg = function() {
for (i in pkg)
printstat(i)
}
list0 = function() {
print("Not yet installed packages:")
for (i in pkg)
if (!chkpkg(i)) print(i)
}
list1 = function() {
print("Installed packages:")
for (i in pkg)
if (chkpkg(i)) print(i)
}
inspkg = function(pkg) {
if (!chkpkg(pkg)) {
print(paste(" >>> Installing ",pkg))
install.packages(pkg)
}
else print(paste(" >>> Package", pkg, "is already installed"))
}
insmypkg = function() {
for (i in pkg)
if (!chkpkg(i)) {
print(paste(" >>> Installing ",i))
install.packages(i)
}
}
|
651f51296862825add8ddc95ac77dfc23a72d3bc
|
4295a487ac3bca6a1493001f5090f5c2c4a5297e
|
/man/print.JMR.Rd
|
f5e75a33546009d09503d4344cdb125fefd99507
|
[] |
no_license
|
sa4khan/JMR
|
f8c732155830558ddbf7d3d00a1a0f3ea6d5d56c
|
fd801ddf778f30054b6cab0de5bc09964dfc14a3
|
refs/heads/main
| 2023-06-26T17:25:39.893637
| 2021-07-13T01:44:56
| 2021-07-13T01:44:56
| 385,138,057
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 433
|
rd
|
print.JMR.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/JMRprint.R
\name{print.JMR}
\alias{print.JMR}
\title{Prints JMR objects}
\usage{
\method{print}{JMR}(x, digits = max(options()$digits - 4, 3))
}
\arguments{
\item{x}{A \code{JMR} object}
\item{digits}{minimal number of significant digits}
}
\value{
No value is returned.
}
\description{
Prints JMR objects
}
\author{
Shahedul Khan <khan@math.usask.ca>
}
|
c2f2ee0dd982ff14d9153c13cfa0df2f5a067a5a
|
223893fbf2d8b796dfcc9593ccaca4195d86b45d
|
/cachematrix.R
|
ef25ca81fef7ef33e1b364b64d9aa7b1a20df35d
|
[] |
no_license
|
dscourse1/ProgrammingAssignment2
|
8c922d3375df06496439e59718c65c0f6d09fbf5
|
bf9b3a4709201edb7af30272492ab921192d411a
|
refs/heads/master
| 2020-12-27T05:30:58.643336
| 2015-01-25T07:36:41
| 2015-01-25T07:36:41
| 29,805,590
| 0
| 0
| null | 2015-01-25T06:42:52
| 2015-01-25T06:42:50
| null |
UTF-8
|
R
| false
| false
| 1,561
|
r
|
cachematrix.R
|
## Functions to support caching of the inverse of a matrix
## usage:
## > x <- makeCacheMatrix(matrix(rnorm(16), 4, 4))
## > cacheSolve(x)
## [,1] [,2] [,3] [,4]
## [1,] -0.5217698 0.59491642 -0.3728793 -0.09381126
## [2,] 1.5125709 -0.58157656 0.4206847 -0.32677740
## [3,] 0.1860804 -0.08813034 -0.5291406 0.38721203
## [4,] -0.4011924 0.36633059 -0.0295378 0.46514693
##
## (call cacheSolve successively returns inverse from cache)
## > cacheSolve(x)
## getting matrix from cache
## [,1] [,2] [,3] [,4]
## [1,] -0.5217698 0.59491642 -0.3728793 -0.09381126
## [2,] 1.5125709 -0.58157656 0.4206847 -0.32677740
## [3,] 0.1860804 -0.08813034 -0.5291406 0.38721203
## [4,] -0.4011924 0.36633059 -0.0295378 0.46514693
## Function to store a matrix and cache its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setInverse <- function(solve) m <<- solve
getInverse <- function() m
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Function to calculate the inverse of a matrix.
## Uses a cache to store and retrieve previous calculations.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
if(!is.null(m)) {
message("getting matrix from cache")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setInverse(m)
m
}
|
2f887ebd4826b55734d851191e82fd1983012dc9
|
77b52dc4be6980a7066a435d53a18ecc6574be77
|
/One Predictor Models/R_estimates.R
|
110a7b4f179eba93bed1fd6d6b7627cd1aad78e5
|
[] |
no_license
|
AgneseG/RatioImputationImprovements
|
32c0e3a505d9334b02567b586a6b02c4c565ec7e
|
9cd348d79547b67a2296eaf175caaf3f10a34d87
|
refs/heads/master
| 2020-07-22T22:37:26.095907
| 2019-09-16T18:08:21
| 2019-09-16T18:08:21
| 207,353,383
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,081
|
r
|
R_estimates.R
|
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
### In this file, the ratio R has been estimated for the wls, Huber and Tukey estimators.
#### Final estimates can be found in the R_estimates dataframe.
## Subsequentely, the goodness of fit on the final imputed mean, compared with the true observed mean, has been compared for each estimator.
### Final estimates can be found in the GoodnessOfFit dataframe.
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
setwd("C:/Users/AGMO/Desktop")
set.seed(42)
library(MASS)
dat<-read.csv2("Data_wholesalers.csv")
str(dat)
dat <- dat[-c(1,4,5,7)]
str(dat)
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
### R estimates
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
ratios.estimates <- function(target.variable, predictor.variable) {
r.means <- mean(target.variable, na.rm=TRUE)/mean(predictor.variable, na.rm=TRUE)
r.huber <- rlm(target.variable ~ predictor.variable -1,weight=1/(predictor.variable+1), psi=psi.huber)[[1]]
r.turkey <- rlm(target.variable ~ predictor.variable -1,weight=1/(predictor.variable+1), psi=psi.bisquare)[[1]]
r <- data.frame(r.means, r.median, r.huber, r.turkey)
colnames(r) <- c("Means", "Huber", "Turkey")
return(r)
}
PersonalCosts_Employees <- ratios.estimates(dat[,5], dat[,3])
CostPurchases_Turnover <- ratios.estimates(dat[,1], dat[,2])
CostDepreciations_Turnover <- ratios.estimates(dat[,6], dat[,2])
OtherCosts_Employees <- ratios.estimates(dat[,4], dat[,3])
ratios.estimates(dat_centering[,4], dat_centering[,3])
# Combines all R estimates in a dataframe
R_estimates = data.frame(matrix(data=c(PersonalCosts_Employees, CostPurchases_Turnover, CostDepreciations_Turnover, OtherCosts_Employees), ncol=4, nrow=4 , byrow=TRUE))
colnames(R_estimates) <- c("Means", "Median", "Huber", "Turkey")
rownames(R_estimates) <- c("PersonalCosts/Employees", "CostPurchases/Turnover", "CostDepreciations/Turnover", "OtherCosts/Employees ")
R_estimates
# -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#### GOF: Goodness of fit at the aggregate level (mean)
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
GOF <- function(x, y, R){
p <- imputed.mean <- rep(0,ncol(R))
for (i in 1:ncol(R)){
imputed.mean[i] <- mean(as.numeric(R[i])*x) # Mean predicted values
mean <- mean(y, na.rm=TRUE) # Observed mean
p[i] <- round(100*(abs(imputed.mean[i] - mean)/mean), 3) #Relative percentage difference
}
return(p)
}
# Combines all estimates in a dataframe
GOF1 <- GOF(dat[,3], dat[,5], PersonalCosts_Employees)
GOF2 <- GOF(dat[,2], dat[,1], CostPurchases_Turnover)
GOF3 <- GOF(dat[,2], dat[,6], CostDepreciations_Turnover )
GOF4 <- GOF(dat[,3], dat[,4], OtherCosts_Employees)
GoodnessOfFit <- data.frame(matrix(data=c(GOF1,
GOF2,
GOF3,
GOF4), ncol=4, byrow=TRUE))
colnames(GoodnessOfFit) <- c("Means", "Huber", "Turkey")
rownames(GoodnessOfFit) <- c("PersonalCosts/Employees", "CostPurchases/Turnover", "CostDepreciations/Turnover", "OtherCosts/Employees ")
GoodnessOfFit
|
6b021c76b6281a7ab665521b9a0ee69bd5ff5b2f
|
d4bbec7817b1704c40de6aca499625cf9fa2cb04
|
/src/lib/distributions/chi-2/__test__/fixture-generation/dnchisq2.R
|
a2af860b9b73c25eeb663cdd760e387b110b72c5
|
[
"MIT"
] |
permissive
|
R-js/libRmath.js
|
ac9f21c0a255271814bdc161b378aa07d14b2736
|
9462e581da4968938bf4bcea2c716eb372016450
|
refs/heads/main
| 2023-07-24T15:00:08.372576
| 2023-07-16T16:59:32
| 2023-07-16T16:59:32
| 79,675,609
| 108
| 15
|
MIT
| 2023-02-08T15:23:17
| 2017-01-21T22:01:44
|
TypeScript
|
UTF-8
|
R
| false
| false
| 2,472
|
r
|
dnchisq2.R
|
#> x=seq(0,40,0.5);
#> y=dchisq(x,13,ncp=8, log=T)
#> plot(x,y, type='l')
#> data.frame(x,y)
# x y
1 0.0 -Inf
2 0.5 -18.0780230497580163
3 1.0 -14.3663511842686908
4 1.5 -12.2396822876410578
5 2.0 -10.7634009342061265
6 2.5 -9.6445091114802803
7 3.0 -8.7524278409333771
8 3.5 -8.0174509734716644
9 4.0 -7.3979311562244003
10 4.5 -6.8669746405579941
11 5.0 -6.4061944698800630
12 5.5 -6.0024565385168103
13 6.0 -5.6460472109437170
14 6.5 -5.3295766890677383
15 7.0 -5.0472897659011844
16 7.5 -4.7946148096429519
17 8.0 -4.5678584947640752
18 8.5 -4.3639931545534978
19 9.0 -4.1805049367098173
20 9.5 -4.0152830106581803
21 10.0 -3.8665371794938781
22 10.5 -3.7327355752673141
23 11.0 -3.6125568294328030
24 11.5 -3.5048528571172768
25 12.0 -3.4086195451592554
26 12.5 -3.3229734087389868
27 13.0 -3.2471328129356687
28 13.5 -3.1804027264473405
29 14.0 -3.1221622376358846
30 14.5 -3.0718542521274528
31 15.0 -3.0289769289770301
32 15.5 -2.9930765140282007
33 16.0 -2.9637413049137389
34 16.5 -2.9405965392888143
35 17.0 -2.9233000413932997
36 17.5 -2.9115384954576826
37 18.0 -2.9050242403554272
38 18.5 -2.9034925001259233
39 19.0 -2.9066989808977679
40 19.5 -2.9144177773488522
41 20.0 -2.9264395418939704
42 20.5 -2.9425698778622467
43 21.0 -2.9626279244418456
44 21.5 -2.9864451064642945
45 22.0 -3.0138640264183252
46 22.5 -3.0447374796295996
47 23.0 -3.0789275764650039
48 23.5 -3.1163049578411028
49 24.0 -3.1567480923312496
50 24.5 -3.2001426448462866
51 25.0 -3.2463809082786321
52 25.5 -3.2953612906846690
53 26.0 -3.3469878515864964
54 26.5 -3.4011698818252389
55 27.0 -3.4578215221229431
56 27.5 -3.5168614161286689
57 28.0 -3.5782123942551758
58 28.5 -3.6418011850648710
59 29.0 -3.7075581513602263
60 29.5 -3.7754170484680820
61 30.0 -3.8453148025031201
62 30.5 -3.9171913066495194
63 31.0 -3.9909892337216570
64 31.5 -4.0666538634581419
65 32.0 -4.1441329231725890
66 32.5 -4.2233764405328680
67 33.0 -4.3043366073704910
68 33.5 -4.3869676535388118
69 34.0 -4.4712257299330238
70 34.5 -4.5570687998853971
71 35.0 -4.6444565382176970
72 35.5 -4.7333502373091818
73 36.0 -4.8237127195989489
74 36.5 -4.9155082559973762
75 37.0 -5.0087024897307959
76 37.5 -5.1032623651880344
77 38.0 -5.1991560613769012
78 38.5 -5.2963529296342831
79 39.0 -5.3948234352654039
80 39.5 -5.4945391028163542
81 40.0 -5.5954724647086893
|
d15429d85c5029265625d3e5fabe303aea685718
|
f23496883cd9ee0267027e74946647fc809735d5
|
/simulated/velezmalaga_plots.R
|
053adec4ce8386a8e6c4bfc1fa0dfca47ac4f6fe
|
[] |
no_license
|
davgutavi/IoT_Smart_Agent
|
0e847b0f1397c7d38342a97c2b443642d20402d5
|
788a3fcad141e1c4dc4d793ae9ea0a88caf2c5c4
|
refs/heads/master
| 2021-04-03T08:37:52.786031
| 2020-01-20T13:06:27
| 2020-01-20T13:06:27
| 124,416,104
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,832
|
r
|
velezmalaga_plots.R
|
source("simulated/paths.R")
require(ggplot2)
# Presure ----
velezmalaga.pressure <- read.csv(path.velezmalaga.pressure)
velezmalaga.pressure.01 <- velezmalaga.pressure[velezmalaga.pressure$entity_id=="weatherobserved-velezmalaga-01",]
velezmalaga.pressure.02 <- velezmalaga.pressure[velezmalaga.pressure$entity_id=="weatherobserved-velezmalaga-02",]
velezmalaga.pressure.03 <- velezmalaga.pressure[velezmalaga.pressure$entity_id=="weatherobserved-velezmalaga-03",]
velezmalaga.pressure.04 <- velezmalaga.pressure[velezmalaga.pressure$entity_id=="weatherobserved-velezmalaga-04",]
velezmalaga.pressure.05 <- velezmalaga.pressure[velezmalaga.pressure$entity_id=="weatherobserved-velezmalaga-05",]
velezmalaga.pressure.06 <- velezmalaga.pressure[velezmalaga.pressure$entity_id=="weatherobserved-velezmalaga-06",]
velezmalaga.01.rows <- nrow(velezmalaga.pressure.01)
velezmalaga.02.rows <- nrow(velezmalaga.pressure.02)
velezmalaga.03.rows <- nrow(velezmalaga.pressure.03)
velezmalaga.04.rows <- nrow(velezmalaga.pressure.04)
velezmalaga.05.rows <- nrow(velezmalaga.pressure.05)
velezmalaga.06.rows <- nrow(velezmalaga.pressure.06)
velezmalaga.01.timePoints <- c(1:velezmalaga.01.rows)
velezmalaga.02.timePoints <- c(1:velezmalaga.02.rows)
velezmalaga.03.timePoints <- c(1:velezmalaga.03.rows)
velezmalaga.04.timePoints <- c(1:velezmalaga.04.rows)
velezmalaga.05.timePoints <- c(1:velezmalaga.05.rows)
velezmalaga.06.timePoints <- c(1:velezmalaga.06.rows)
ggplot(velezmalaga.pressure.01, aes(x=c(1:nrow(velezmalaga.pressure.01)),y = atmosphericPressure)) + geom_line()
ggplot(velezmalaga.pressure.02, aes(x=c(1:nrow(velezmalaga.pressure.02)),y = atmosphericPressure)) + geom_line()
ggplot(velezmalaga.pressure.03, aes(x=c(1:nrow(velezmalaga.pressure.03)),y = atmosphericPressure)) + geom_line()
ggplot(velezmalaga.pressure.04, aes(x=c(1:nrow(velezmalaga.pressure.04)),y = atmosphericPressure)) + geom_line()
ggplot(velezmalaga.pressure.05, aes(x=c(1:nrow(velezmalaga.pressure.05)),y = atmosphericPressure)) + geom_line()
ggplot(velezmalaga.pressure.06, aes(x=c(1:nrow(velezmalaga.pressure.06)),y = atmosphericPressure)) + geom_line()
# Precipitation ----
velezmalaga.precipitation <- read.csv(path.velezmalaga.precipitation)
velezmalaga.precipitation.01 <- velezmalaga.precipitation[velezmalaga.precipitation$entity_id=="weatherobserved-velezmalaga-01",]
velezmalaga.precipitation.02 <- velezmalaga.precipitation[velezmalaga.precipitation$entity_id=="weatherobserved-velezmalaga-02",]
velezmalaga.precipitation.03 <- velezmalaga.precipitation[velezmalaga.precipitation$entity_id=="weatherobserved-velezmalaga-03",]
velezmalaga.precipitation.04 <- velezmalaga.precipitation[velezmalaga.precipitation$entity_id=="weatherobserved-velezmalaga-04",]
velezmalaga.precipitation.05 <- velezmalaga.precipitation[velezmalaga.precipitation$entity_id=="weatherobserved-velezmalaga-05",]
velezmalaga.precipitation.06 <- velezmalaga.precipitation[velezmalaga.precipitation$entity_id=="weatherobserved-velezmalaga-06",]
ggplot(velezmalaga.precipitation.01, aes(x=c(1:nrow(velezmalaga.precipitation.01)),y = precipitation)) + geom_line()
ggplot(velezmalaga.precipitation.02, aes(x=c(1:nrow(velezmalaga.precipitation.02)),y = precipitation)) + geom_line()
ggplot(velezmalaga.precipitation.03, aes(x=c(1:nrow(velezmalaga.precipitation.03)),y = precipitation)) + geom_line()
ggplot(velezmalaga.precipitation.04, aes(x=c(1:nrow(velezmalaga.precipitation.04)),y = precipitation)) + geom_line()
ggplot(velezmalaga.precipitation.05, aes(x=c(1:nrow(velezmalaga.precipitation.05)),y = precipitation)) + geom_line()
ggplot(velezmalaga.precipitation.06, aes(x=c(1:nrow(velezmalaga.precipitation.06)),y = precipitation)) + geom_line()
# Humidity ----
velezmalaga.humidity <- read.csv(path.velezmalaga.humidity)
velezmalaga.humidity.01 <- velezmalaga.humidity[velezmalaga.humidity$entity_id=="weatherobserved-velezmalaga-01",]
velezmalaga.humidity.02 <- velezmalaga.humidity[velezmalaga.humidity$entity_id=="weatherobserved-velezmalaga-02",]
velezmalaga.humidity.03 <- velezmalaga.humidity[velezmalaga.humidity$entity_id=="weatherobserved-velezmalaga-03",]
velezmalaga.humidity.04 <- velezmalaga.humidity[velezmalaga.humidity$entity_id=="weatherobserved-velezmalaga-04",]
velezmalaga.humidity.05 <- velezmalaga.humidity[velezmalaga.humidity$entity_id=="weatherobserved-velezmalaga-05",]
velezmalaga.humidity.06 <- velezmalaga.humidity[velezmalaga.humidity$entity_id=="weatherobserved-velezmalaga-06",]
ggplot(velezmalaga.humidity.01, aes(x=c(1:nrow(velezmalaga.humidity.01)),y = relativeHumidity)) + geom_line()
ggplot(velezmalaga.humidity.02, aes(x=c(1:nrow(velezmalaga.humidity.02)),y = relativeHumidity)) + geom_line()
ggplot(velezmalaga.humidity.03, aes(x=c(1:nrow(velezmalaga.humidity.03)),y = relativeHumidity)) + geom_line()
ggplot(velezmalaga.humidity.04, aes(x=c(1:nrow(velezmalaga.humidity.04)),y = relativeHumidity)) + geom_line()
ggplot(velezmalaga.humidity.05, aes(x=c(1:nrow(velezmalaga.humidity.05)),y = relativeHumidity)) + geom_line()
ggplot(velezmalaga.humidity.06, aes(x=c(1:nrow(velezmalaga.humidity.06)),y = relativeHumidity)) + geom_line()
# Radiation ----
velezmalaga.radiation <- read.csv(path.velezmalaga.radiation)
velezmalaga.radiation.01 <- velezmalaga.radiation[velezmalaga.radiation$entity_id=="weatherobserved-velezmalaga-01",]
velezmalaga.radiation.02 <- velezmalaga.radiation[velezmalaga.radiation$entity_id=="weatherobserved-velezmalaga-02",]
velezmalaga.radiation.03 <- velezmalaga.radiation[velezmalaga.radiation$entity_id=="weatherobserved-velezmalaga-03",]
velezmalaga.radiation.04 <- velezmalaga.radiation[velezmalaga.radiation$entity_id=="weatherobserved-velezmalaga-04",]
velezmalaga.radiation.05 <- velezmalaga.radiation[velezmalaga.radiation$entity_id=="weatherobserved-velezmalaga-05",]
velezmalaga.radiation.06 <- velezmalaga.radiation[velezmalaga.radiation$entity_id=="weatherobserved-velezmalaga-06",]
ggplot(velezmalaga.radiation.01, aes(x=c(1:nrow(velezmalaga.radiation.01)),y = solarRadiation)) + geom_line()
ggplot(velezmalaga.radiation.02, aes(x=c(1:nrow(velezmalaga.radiation.02)),y = solarRadiation)) + geom_line()
ggplot(velezmalaga.radiation.03, aes(x=c(1:nrow(velezmalaga.radiation.03)),y = solarRadiation)) + geom_line()
ggplot(velezmalaga.radiation.04, aes(x=c(1:nrow(velezmalaga.radiation.04)),y = solarRadiation)) + geom_line()
ggplot(velezmalaga.radiation.05, aes(x=c(1:nrow(velezmalaga.radiation.05)),y = solarRadiation)) + geom_line()
ggplot(velezmalaga.radiation.06, aes(x=c(1:nrow(velezmalaga.radiation.06)),y = solarRadiation)) + geom_line()
# Temperature ----
velezmalaga.temperature <- read.csv(path.velezmalaga.temperature)
velezmalaga.temperature.01 <- velezmalaga.temperature[velezmalaga.temperature$entity_id=="weatherobserved-velezmalaga-01",]
velezmalaga.temperature.02 <- velezmalaga.temperature[velezmalaga.temperature$entity_id=="weatherobserved-velezmalaga-02",]
velezmalaga.temperature.03 <- velezmalaga.temperature[velezmalaga.temperature$entity_id=="weatherobserved-velezmalaga-03",]
velezmalaga.temperature.04 <- velezmalaga.temperature[velezmalaga.temperature$entity_id=="weatherobserved-velezmalaga-04",]
velezmalaga.temperature.05 <- velezmalaga.temperature[velezmalaga.temperature$entity_id=="weatherobserved-velezmalaga-05",]
velezmalaga.temperature.06 <- velezmalaga.temperature[velezmalaga.temperature$entity_id=="weatherobserved-velezmalaga-06",]
ggplot(velezmalaga.temperature.01, aes(x=c(1:nrow(velezmalaga.temperature.01)),y = temperature)) + geom_line()
ggplot(velezmalaga.temperature.02, aes(x=c(1:nrow(velezmalaga.temperature.02)),y = temperature)) + geom_line()
ggplot(velezmalaga.temperature.03, aes(x=c(1:nrow(velezmalaga.temperature.03)),y = temperature)) + geom_line()
ggplot(velezmalaga.temperature.04, aes(x=c(1:nrow(velezmalaga.temperature.04)),y = temperature)) + geom_line()
ggplot(velezmalaga.temperature.05, aes(x=c(1:nrow(velezmalaga.temperature.05)),y = temperature)) + geom_line()
ggplot(velezmalaga.temperature.06, aes(x=c(1:nrow(velezmalaga.temperature.06)),y = temperature)) + geom_line()
# Direction ----
velezmalaga.direction <- read.csv(path.velezmalaga.direction)
velezmalaga.direction.01 <- velezmalaga.direction[velezmalaga.direction$entity_id=="weatherobserved-velezmalaga-01",]
velezmalaga.direction.02 <- velezmalaga.direction[velezmalaga.direction$entity_id=="weatherobserved-velezmalaga-02",]
velezmalaga.direction.03 <- velezmalaga.direction[velezmalaga.direction$entity_id=="weatherobserved-velezmalaga-03",]
velezmalaga.direction.04 <- velezmalaga.direction[velezmalaga.direction$entity_id=="weatherobserved-velezmalaga-04",]
velezmalaga.direction.05 <- velezmalaga.direction[velezmalaga.direction$entity_id=="weatherobserved-velezmalaga-05",]
velezmalaga.direction.06 <- velezmalaga.direction[velezmalaga.direction$entity_id=="weatherobserved-velezmalaga-06",]
ggplot(velezmalaga.direction.01, aes(x=c(1:nrow(velezmalaga.direction.01)),y = windDirection)) + geom_line()
ggplot(velezmalaga.direction.02, aes(x=c(1:nrow(velezmalaga.direction.02)),y = windDirection)) + geom_line()
ggplot(velezmalaga.direction.03, aes(x=c(1:nrow(velezmalaga.direction.03)),y = windDirection)) + geom_line()
ggplot(velezmalaga.direction.04, aes(x=c(1:nrow(velezmalaga.direction.04)),y = windDirection)) + geom_line()
ggplot(velezmalaga.direction.05, aes(x=c(1:nrow(velezmalaga.direction.05)),y = windDirection)) + geom_line()
ggplot(velezmalaga.direction.06, aes(x=c(1:nrow(velezmalaga.direction.06)),y = windDirection)) + geom_line()
# Speed ----
velezmalaga.speed <- read.csv(path.velezmalaga.speed)
velezmalaga.speed.01 <- velezmalaga.speed[velezmalaga.speed$entity_id=="weatherobserved-velezmalaga-01",]
velezmalaga.speed.02 <- velezmalaga.speed[velezmalaga.speed$entity_id=="weatherobserved-velezmalaga-02",]
velezmalaga.speed.03 <- velezmalaga.speed[velezmalaga.speed$entity_id=="weatherobserved-velezmalaga-03",]
velezmalaga.speed.04 <- velezmalaga.speed[velezmalaga.speed$entity_id=="weatherobserved-velezmalaga-04",]
velezmalaga.speed.05 <- velezmalaga.speed[velezmalaga.speed$entity_id=="weatherobserved-velezmalaga-05",]
velezmalaga.speed.06 <- velezmalaga.speed[velezmalaga.speed$entity_id=="weatherobserved-velezmalaga-06",]
ggplot(velezmalaga.speed.01, aes(x=c(1:nrow(velezmalaga.speed.01)),y = windSpeed)) + geom_line()
ggplot(velezmalaga.speed.02, aes(x=c(1:nrow(velezmalaga.speed.02)),y = windSpeed)) + geom_line()
ggplot(velezmalaga.speed.03, aes(x=c(1:nrow(velezmalaga.speed.03)),y = windSpeed)) + geom_line()
ggplot(velezmalaga.speed.04, aes(x=c(1:nrow(velezmalaga.speed.04)),y = windSpeed)) + geom_line()
ggplot(velezmalaga.speed.05, aes(x=c(1:nrow(velezmalaga.speed.05)),y = windSpeed)) + geom_line()
ggplot(velezmalaga.speed.06, aes(x=c(1:nrow(velezmalaga.speed.06)),y = windSpeed)) + geom_line()
|
4047410ed4b74d60d17b841d106b96ee07f294c8
|
9463229d7d9cc5902971aed9c9c4efe2f4083c50
|
/R/vis_functions.R
|
93c3badc6f9d00744b1c1c379c854774d3f66bac
|
[] |
no_license
|
sapfluxnet/sapfluxnetQC1
|
df66fb4c8282c14aa8921e668f671928c73e300a
|
c66115798d0c0814f399b1df6505f146594b6bdf
|
refs/heads/master
| 2021-05-01T12:53:11.380754
| 2019-03-01T09:15:47
| 2019-03-01T09:15:47
| 52,454,181
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 23,575
|
r
|
vis_functions.R
|
################################################################################
# VISUALIZATION FUNCTIONS #
# #
# Functions to visualize the data #
################################################################################
################################################################################
#' Gaps density visualization
#'
#' Heatmap calendar to visualize gaps density
#'
#' @family Visualization functions
#'
#' @param data Data frame containing the data (env or sf) with the TIMESTAMP
#' column and the environmental variables or the sapflow measures for the
#' trees
#'
#' @return Plot is printed and a ggplot2 object is created if there is
#' assignation
#'
#' @export
# START
# Function declaration
vis_gaps_calendar <- function(data, parent_logger = 'test') {
# Using callin handlers to manage errors
withCallingHandlers({
# STEP 0
# Argument checks
if (!is.data.frame(data)) {
stop('data provided is not a data frame')
}
# STEP 1
# modify the data shape to be able to represent the visualization
data %>%
tidyr::gather(Id, Value, -TIMESTAMP) %>%
# STEP 2
# Create new variables containing time information
dplyr::mutate(
Year = lubridate::year(TIMESTAMP),
Month = lubridate::month(TIMESTAMP, label = TRUE),
Week = factor(lubridate::isoweek(TIMESTAMP)),
Day = factor(lubridate::wday(TIMESTAMP, label = TRUE),
levels = rev(c('Mon', 'Tues', 'Wed', 'Thurs',
'Fri', 'Sat', 'Sun')),
ordered = TRUE)
) %>%
# STEP 3
# Group by interest variables and summarise by n
dplyr::group_by(Year, Month, Week, Day) %>%
dplyr::summarise(n = sum(!is.na(Value))) %>%
# STEP 4
# Plot
ggplot(aes(x = Week, y = Day, fill = n)) +
geom_tile() +
facet_grid(Year ~ Month, scales = 'free_x') +
scale_fill_gradient(low = "#C8F7C5", high = "#26A65B") +
# viridis::scale_fill_viridis() +
theme_sfn()
# END FUNCTION
},
# handlers
warning = function(w){logging::logwarn(w$message,
logger = paste(parent_logger, 'vis_gaps_calendar', sep = '.'))},
error = function(e){logging::logerror(e$message,
logger = paste(parent_logger, 'vis_gaps_calendar', sep = '.'))},
message = function(m){logging::loginfo(m$message,
logger = paste(parent_logger, 'vis_gaps_calendar', sep = '.'))})
}
################################################################################
#' Plotting an histogram for gaps intervals/gaps coverage
#'
#' Wrapper for ggplot to plot an histogram of gaps info
#'
#' This function is a simple wrapper for ggplot + geom_histogram. It produces
#' a ggplot object that can be modified adding layers, like any other ggplot
#' object.
#'
#' @family Visualization Functions
#'
#' @param gaps_info Data frame as obtained from \code{\link{qc_mind_the_gap}}
#'
#' @param type Character indicating what to represent, \code{gap_interval} or
#' \code{gap_coverage}
#'
#' @param binwidth Bin width as stated in geom_histogram, default to NULL to
#' use the geom_histrogram default. Change it if more or less resolution is
#' needed. Only works for \code{type = 'gap_interval'}.
#'
#' @return a ggplot object with the basic histogram, no themes added.
#'
#' @export
# START
# Function declaration
vis_plot_the_gap <- function(gaps_info, type = 'gap_interval', binwidth = NULL,
parent_logger = 'test') {
# Using calling handlers to manage errors
withCallingHandlers({
# STEP 0
# Argument check
# Is gaps_info a data frame?
if (!is.data.frame(gaps_info)) {
stop('gaps_info is not a data frame')
}
# Has it the necessary variables, as produced by mind_the_gap?
if (any(is.null(gaps_info$gap_interval), is.null(gaps_info$gap_coverage))) {
stop('gaps_info has not the necessary variables,',
' see function help (?vis_plot_the_gap)')
}
# Is data empty (no gaps found)
if (gaps_info[1,1] == 'No gaps found') {
# create an empty plot
res_plot <- ggplot(data.frame(x = c(1,5,10), y =c(1,5,10)),
aes(x = x, y = y)) +
geom_blank() +
annotate('text', x = 5, y = 5, label = 'No gaps found') +
theme_void()
# return empty plot
return(res_plot)
}
# STEP 1
# Create the ggplot object
# 1.1 gap_coverage special effects
if (type == 'gap_coverage') {
res_plot <- gaps_info %>%
dplyr::mutate(gap_coverage = gap_coverage * 100) %>%
ggplot(aes_string(x = type)) +
geom_histogram(binwidth = 5,
fill = viridis::viridis(1)) +
scale_x_continuous(limits = c(NA, 105)) +
labs(x = 'Gap coverage (%)', y = 'Count') +
theme_sfn()
} else {
# 1.2 gap_interval special effects
res_plot <- ggplot(gaps_info, aes_string(x = type)) +
geom_histogram(binwidth = binwidth,
fill = viridis::viridis(1)) +
labs(x = 'Gap interval (minutes)', y = 'Count') +
theme_sfn()
}
# STEP 2
# Return the plot
return(res_plot)
# END FUNCTION
},
# handlers
warning = function(w){logging::logwarn(w$message,
logger = paste(parent_logger,
'vis_plot_the_gap', sep = '.'))},
error = function(e){logging::logerror(e$message,
logger = paste(parent_logger,
'vis_plot_the_gap', sep = '.'))},
message = function(m){logging::loginfo(m$message,
logger = paste(parent_logger,
'vis_plot_the_gap', sep = '.'))})
}
################################################################################
#' TIMESTAMP with gaps visualization
#'
#' Concordance lines plot with gaps
#'
#' @family Visualization Functions
#'
#' @param sapf_gaps Data frame with the sapflow gap info as obtained from
#' \code{\link{qc_mind_the_gap}}
#'
#' @param env_gaps Data frame with the env gap info as obtained from
#' \code{\link{qc_mind_the_gap}}
#'
#' @param sapf_intervals Data frame with the sapflow intervals info as obtained from
#' \code{\link{qc_time_interval}}
#'
#' @param env_intervals Data frame with the env intervals info as obtained from
#' \code{\link{qc_time_interval}}
#'
#' @return A ggplot object with the basic lines plot, no themes added.
#'
#' @export
# START
# Function declaration
vis_gap_lines <- function(sapf_gaps = NULL, env_gaps = NULL,
sapf_intervals = NULL, env_intervals = NULL,
parent_logger = 'test') {
# Using calling handlers to manage errors
withCallingHandlers({
# STEP 1
# Get the data ready to plot
# 1.1 sapf
# sapf_intervals <- qc_time_interval(sapf_data)
sapf_intervals$Object[1] <- 'Total_sapf'
# sapf_gaps <- qc_mind_the_gap(sapf_data)
# if no gaps, no join
if (sapf_gaps$Object[1] == 'No gaps found') {
sapf_tmp_data <- sapf_intervals
} else {
sapf_tmp_data <- dplyr::full_join(sapf_intervals, sapf_gaps, by = 'Object')
}
# 1.2 env
# env_intervals <- qc_time_interval(env_data)
env_intervals$Object[1] <- 'Total_env'
# env_gaps <- qc_mind_the_gap(env_data)
# if no gaps, no join
if (env_gaps$Object[1] == 'No gaps found') {
env_tmp_data <- env_intervals
} else {
env_tmp_data <- dplyr::full_join(env_intervals, env_gaps, by = 'Object')
}
# 1.3 all
gaps_info <- dplyr::bind_rows(env_tmp_data, sapf_tmp_data)
# STEP 2
# For loop
# 2.1 Initiate res vectors
x_start <- vector()
x_end <- vector()
y_start <- vector()
y_end <- vector()
# 2.3 Get the object names
object_names <- unique(gaps_info$Object)
# 2.4 For loop
for (obj in object_names) {
# data by object
tmp_data <- gaps_info %>%
dplyr::filter(Object == obj)
# update the vectors
# 2.4.1 no gaps
if (all(is.na(tmp_data$gap_start))) {
x_start <- c(x_start, tmp_data$t0)
x_end <- c(x_end, tmp_data$tf)
y_start <- c(y_start, as.character(tmp_data$Object))
y_end <- c(y_end, as.character(tmp_data$Object))
} else {
# 2.4.2 gaps
x_start <- c(x_start, tmp_data$timestamp_start[[1]], tmp_data$gap_end)
x_end <- c(x_end, tmp_data$gap_start, tmp_data$timestamp_end[[1]])
y_start <- c(y_start, as.character(tmp_data$Object),
as.character(tmp_data$Object[[1]]))
y_end <- c(y_end, as.character(tmp_data$Object), as.character(tmp_data$Object[[1]]))
}
}
# STEP 3
# Build the plot data
plot_data <- data.frame(
x_start = as.POSIXct(x_start, origin = lubridate::origin),
x_end = as.POSIXct(x_end, origin = lubridate::origin),
y_start = y_start,
y_end = y_end,
stringsAsFactors = FALSE
) %>%
dplyr::mutate(y_start = factor(y_start, levels = rev(unique(y_start)))) %>%
dplyr::mutate(y_end = factor(y_end, levels = rev(unique(y_end))))
# STEP 4
# Build the plot
res_plot <- ggplot(plot_data, aes(x = x_start, y = y_start, color = y_start)) +
geom_segment(aes(xend = x_end, yend = y_end), size = 2) +
geom_point(aes(x = x_start, y = y_start)) +
geom_point(aes(x = x_end, y = y_end)) +
scale_x_datetime(date_breaks = '1 month') +
scale_colour_manual(values = c(rep(viridis::viridis(1),
length(unique(sapf_intervals$Object))),
rep(viridis::viridis(3)[2],
length(unique(env_intervals$Object))))) +
labs(x = 'TIMESTAMP', y = 'Object') +
theme_sfn() +
theme(legend.position = 'none',
axis.text.x = element_text(angle = 30, margin = margin(t = 15)))
# 3.1 And return it, by the power of return!!
return(res_plot)
# END FUNCTION
},
# handlers
warning = function(w){logging::logwarn(w$message,
logger = paste(parent_logger,
'vis_gap_lines', sep = '.'))},
error = function(e){logging::logerror(e$message,
logger = paste(parent_logger,
'vis_gap_lines', sep = '.'))},
message = function(m){logging::loginfo(m$message,
logger = paste(parent_logger,
'vis_gap_lines', sep = '.'))})
}
################################################################################
#' ggplot2 theme for SAPFLUXNET plots
#'
#' Custom ggplot2 theme for uniformization of plot visuals
#'
#' @export
theme_sfn <- function(base_size = 10, base_family = "Lato") {
half_line <- base_size/2
theme(line = element_line(colour = "black", size = 1,
linetype = 1, lineend = "butt"),
rect = element_rect(fill = NA, colour = "black",
size = 1, linetype = 1),
text = element_text(family = base_family, face = "plain",
colour = "black", size = base_size,
lineheight = 0.9, hjust = 0.5,
vjust = 0.5, angle = 0,
margin = margin(), debug = FALSE),
axis.line = element_blank(),
# axis.line.x = element_line(),
# axis.line.y = element_line(),
axis.text = element_text(size = rel(0.8)),
axis.text.x = element_text(margin = margin(t = 0.8 * half_line*2.5),
vjust = 1),
axis.text.y = element_text(margin = margin(r = 0.8 * half_line*2),
hjust = 1),
axis.ticks = element_line(colour = "black", size = 0.5),
axis.ticks.length = unit(-half_line, "pt"),
axis.title.x = element_text(margin = margin(t = 0.8 * half_line,
b = 0.8 * half_line/2)),
axis.title.y = element_text(angle = 90,
margin = margin(r = 0.8 * half_line,
l = 0.8 * half_line/2)),
legend.background = element_rect(colour = NA, fill = ),
legend.spacing = unit(1, "pt"),
legend.key = element_rect(colour = NA),
legend.key.size = unit(1, "lines"),
legend.key.height = NULL,
legend.key.width = NULL,
legend.text = element_text(size = rel(0.8)),
legend.text.align = NULL,
legend.title = element_text(hjust = 0.5),
legend.title.align = 0,
legend.position = "right",
legend.direction = NULL,
legend.justification = "top",
legend.box = NULL,
panel.background = element_blank(),
panel.border = element_rect(),
panel.grid = element_blank(),
# panel.grid.major = element_line(colour = "black", size = rel(0.3),
# linetype = 2),
# panel.grid.minor = element_blank(),
# panel.grid.major.x = element_blank(),
panel.spacing = unit(half_line, "pt"),
panel.spacing.x = NULL,
panel.spacing.y = NULL,
panel.ontop = TRUE,
strip.background = element_rect(size = rel(0.3)),
strip.text = element_text(colour = "grey10", size = rel(0.8)),
strip.text.x = element_text(margin = margin(t = half_line,
b = half_line)),
strip.text.y = element_text(angle = -90,
margin = margin(l = half_line, r = half_line)),
strip.switch.pad.grid = unit(0.1, "cm"),
strip.switch.pad.wrap = unit(0.1, "cm"),
plot.background = element_blank(),
plot.title = element_text(size = rel(1.2),
margin = margin(b = half_line * 1.2)),
plot.margin = margin(half_line, half_line, half_line, half_line),
complete = TRUE)
}
################################################################################
#' Plotting a diagram of biomes
#'
#' This function produces a ggplot object showing the biomes as colored areas
#' according to mean annual temperature (MAT) and mean annual precipitation (MAP)
#' using a SpatialPolygonsDataFrame object obtained with
#' \code{\link{qc_get_biomes_spdf}}
#'
#' @family Visualization Functions
#'
#' @param merge_deserts Logical indicating if desert biomes should be merged
#' in a single biome. By default, deserts are not merged.
#'
#' @return a ggplot object showing the biomes.
#'
#' @export
# START
# Function declaration
vis_biome <- function(merge_deserts = FALSE, parent_logger = 'test') {
# Using calling handlers to logging
withCallingHandlers({
# STEP 0
# Argument checks
# Is merge_deserts logical?
if (!(is.logical(merge_deserts))) {
stop('merge_deserts must be logical')
}
# Is merge_deserts NA?
if (is.na(merge_deserts)) {
stop('merge_deserts must be either TRUE or FALSE')
}
# STEP 1
# Get biomes SpatialPointsDataFrame object
suppressMessages(
biomes_df <- fortify(qc_get_biomes_spdf(merge_deserts = merge_deserts))
)
# STEP 2
# Make and return the plot object
# 2.1 Make color palette
if (merge_deserts){
pal <- viridis::viridis(9)[c(2,9,3,4,6,7,8,1)]
} else {
pal <- viridis::viridis(9)[c(2,3,5,4,9,6,7,8,1)]
}
# 2.2 Make the plot object
plot <- ggplot() +
ggiraph::geom_polygon_interactive(data = biomes_df,
aes(tooltip = id, data_id = id,
x = long, y = lat, group = id,
fill = id)) +
scale_fill_manual('Biomes', values = pal) +
xlab('Mean annual precipitation (mm)') +
ylab('Mean annual temperature (ºC)')
# 2.3 Return the plot object
return(plot)
# END FUNCTION
},
# handlers
warning = function(w){logging::logwarn(w$message,
logger = paste(parent_logger,
'vis_biome',
sep = '.'))},
error = function(e){logging::logerror(e$message,
logger = paste(parent_logger,
'vis_biome',
sep = '.'))},
message = function(m){logging::loginfo(m$message,
logger = paste(parent_logger,
'vis_biome',
sep = '.'))})
}
################################################################################
#' Plotting a diagram of biomes with sites as dots
#'
#' This function produces a ggplot object showing the biomes as colored areas
#' according to mean annual temperature (MAT) and mean annual precipitation (MAP),
#' using the function \code{\link{vis_biome}}, and adds the sites on it according
#' to their values of MAT and MAP.
#'
#' @family Visualization Functions
#'
#' @param data Data frame of site metadata, including mean annual temperature
#' (si_mat) and mean annual precipitation (si_map) columns, or at least
#' latitude (si_lat) and longitude (si_long) columns that will be used to obtain
#' climatic data with \code{\link{qc_get_biome}}.
#'
#' @param merge_deserts Logical indicating if desert biomes should be merged
#' in a single biome. By default, deserts are not merged.
#'
#' @return a ggplot object showing the biomes.
#'
#' @export
# START
# Function declaration
vis_location_biome <- function(data, merge_deserts = FALSE,
parent_logger = 'test') {
# Using calling handlers to logging
withCallingHandlers({
# STEP 0
# Argument checks
# Is data a data.frame?
if (!is.data.frame(data)) {
stop('Provided data object is not a data.frame.',
' Please verify if it is the correct object')
}
# Does data contains a longitude variable?
if (is.null(data$si_long)) {
stop('There is no longitude variable in this dataset. ',
'Please verify if it is the correct data')
}
# Does data contains a latitude variable?
if (is.null(data$si_lat)) {
stop('There is no latitude variable in this dataset. ',
'Please verify if it is the correct data')
}
# Is merge_deserts logical?
if (!(is.logical(merge_deserts))) {
stop('merge_deserts must be logical')
}
# Is merge_deserts NA?
if (is.na(merge_deserts)) {
stop('merge_deserts must be either TRUE or FALSE')
}
# STEP 1
# Get MAT and MAP if not provided
if (!all(c('si_mat', 'si_map') %in% names(data))){
data <- qc_get_biome(data, merge_deserts = merge_deserts)
}
# STEP 2
# Make the plot
# 2.1 Get biome plot
plot <- vis_biome(merge_deserts = merge_deserts)
# 2.2 Make the plot object
plot <- plot +
ggiraph::geom_point_interactive(data = data, aes(
x = si_map, y = si_mat,
tooltip = si_code, data_id = si_code
),
color = 'black', shape = 21, fill = 'white', size = 2, stroke = 0.5) +
theme_bw() +
coord_cartesian(xlim = c (0, 4500), ylim = c(-16, 30), expand = FALSE)
# 2.3 Return the plot object
return(plot)
# END FUNCTION
},
# handlers
warning = function(w){logging::logwarn(w$message,
logger = paste(parent_logger,
'vis_location_biome',
sep = '.'))},
error = function(e){logging::logerror(e$message,
logger = paste(parent_logger,
'vis_location_biome',
sep = '.'))},
message = function(m){logging::loginfo(m$message,
logger = paste(parent_logger,
'vis_location_biome',
sep = '.'))})
}
################################################################################
#' Environmental responses plot
#'
#' Plot the desired environmental funcion \emph{vs.} spaflow values
#'
#' @family Visualization Functions
#'
#' @param SfnData SfnData object
#'
#' @param env_var Character indicating the nameof the environmental variable to
#' plot
#'
#' @param solar Use solarTIMESTAMP?
#'
#' @return a \code{ggplot} object with the desired plot
#'
#' @export
vis_environmental_responses <- function(
SfnData,
env_var = 'vpd',
solar = FALSE,
parent_logger = 'test'
) {
# Using calling handlers to manage errors
withCallingHandlers({
# STEP 0
# Checking arguments
if (!is(SfnData, 'SfnData')) {
stop('vis_environmental_responses needs an SfnData object as argument')
}
# STEP 1
# Get the data
env_data <- get_env(SfnData, solar)
# 1.1 check for timestamp (if solar = TRUE and no solarTimestamp can be a
# memory problem)
if (all(is.na(env_data[['TIMESTAMP']]))) {
stop('TIMESTAMP is all NA, can not produce the plot')
}
# 1.2 plot data
plot_data <- env_data %>%
dplyr::select(TIMESTAMP, !!env_var) %>%
dplyr::full_join(get_sapf(SfnData, solar), .) %>%
tidyr::gather(Tree, Value, -TIMESTAMP, -(!!env_var))
units_char <- get_plant_md(SfnData)[['pl_sap_units']][1]
# STEP 2
# Build the plot
env_res_plot <- plot_data %>%
ggplot(aes_(x = as.name(env_var), y = ~Value, colour = ~Tree)) +
geom_point(alpha = 0.2) +
labs(y = paste0('Sapflow [', units_char, ']')) +
facet_wrap('Tree', ncol = 3)
# STEP 3
# Return the plot
return(env_res_plot)
},
# handlers
warning = function(w){logging::logwarn(w$message,
logger = paste(parent_logger,
'vis_environmental_responses',
sep = '.'))},
error = function(e){logging::logerror(e$message,
logger = paste(parent_logger,
'vis_environmental_responses',
sep = '.'))},
message = function(m){logging::loginfo(m$message,
logger = paste(parent_logger,
'vis_environmental_responses',
sep = '.'))})
}
|
c95adf490093ceb91aa061b7f67c2a7c9e003d38
|
a09eaebce8c5e5aca5b25e18072ab9092b120680
|
/main/4_model_loglinear.R
|
05e7b1ccdb8ccf11d788824ba185af07973ff9ef
|
[] |
no_license
|
byyangyby/ct_rt_hk
|
ad72afda8c534cb85d2b3694ebbcb37578139704
|
52f6101f754e66d951e3a0d22d8f50223f6a3ab7
|
refs/heads/main
| 2023-07-12T20:33:35.650050
| 2021-08-23T03:29:50
| 2021-08-23T03:29:50
| 398,963,437
| 0
| 0
| null | 2021-08-23T03:36:03
| 2021-08-23T03:36:02
| null |
UTF-8
|
R
| false
| false
| 5,346
|
r
|
4_model_loglinear.R
|
#------------
# log-linear
# Rt and Ct
# also include reverse validation
#------------
######################################################
## data_daily_all: daily case counts/sample counts, incidence-based Rt;
## daily Ct mean, median and skewness (imputed) from "1_merge_data"
## correspond to "Supplementary" data in source data file
######################################################
# read in "data_daily_all.csv"
daily.linelist <- read.csv("data_daily_all.csv",as.is=T)
#
######
# correlation
#####
# assign training/testing periods --
daily.linelist$period <-
ifelse(as.Date(daily.linelist$date)>=as.Date("2020-07-01")&
as.Date(daily.linelist$date)<=as.Date("2020-08-31"),1,
ifelse(as.Date(daily.linelist$date)>=as.Date("2020-11-01")&
as.Date(daily.linelist$date)<=as.Date("2021-03-31"),2,0))
table(daily.linelist$period,useNA = 'always') # checked
#
# function for calculating spearman rho --
correlation.rho <- function(df,var1,var2){
cortest <- cor.test(df[,var1],df[,var2],
use="na.or.complete",method="spearman")
out <- round(c(cortest$estimate,cortest$p.value),2)
return(out)
}
# calculate rho between Ct mean/skewness and Rt
cor.mat <- matrix(NA,2,4)
for (i in 1:2){
df.tmp <- daily.linelist[daily.linelist$period==i,]
df.tmp$log.rt <- log(df.tmp$local.rt.mean)
cor.mat[1,(2*i-1):(2*i)] <-
correlation.rho(df.tmp, var1 = "mean", var2 = "log.rt")
cor.mat[2,(2*i-1):(2*i)] <-
correlation.rho(df.tmp, var1 = "skewness", var2 = "log.rt")
}
cor.mat[cor.mat==0] <- "<0.001"
## export results
# cor.mat - Supplementary Table 1
#write.csv(cor.mat,"table_s1.csv",row.names=F)
#--------------
#####
# regression
#####
train.period <- seq(as.Date("2020-07-06"),as.Date("2020-08-31"),1)
train.data <- daily.linelist[daily.linelist$date%in%as.character(train.period),]
#
# model select on AIC
# original form, Rt
aic.list <- list()
m1 <- lm(local.rt.mean~mean,data=train.data)
m2 <- lm(local.rt.mean~median,data=train.data)
m3 <- lm(local.rt.mean~skewness.imputed,data=train.data)
m4 <- lm(local.rt.mean~mean+skewness.imputed,data=train.data)
m5 <- lm(local.rt.mean~median+skewness.imputed,data=train.data)
aic.list[[1]] <- AIC(m1,m2,m3,m4,m5)
#
# log-scaled Rt
n1 <- lm(log(local.rt.mean)~mean,data=train.data)
n2 <- lm(log(local.rt.mean)~median,data=train.data)
n3 <- lm(log(local.rt.mean)~skewness.imputed,data=train.data)
n4 <- lm(log(local.rt.mean)~mean+skewness.imputed,data=train.data)
n5 <- lm(log(local.rt.mean)~median+skewness.imputed,data=train.data)
aic.list[[2]] <- AIC(n1,n2,n3,n4,n5)
#
aic.mat <- matrix(NA,5,2)
for (i in 1:5){
for (k in 1:2){
aic.mat[i,k] <- aic.list[[k]]$AIC[i]
}
}
aic.mat <- round(aic.mat,2)
## export results
# aic.mat - Supplementary Table 4
#write.csv(aic.mat,"table_s4.csv",row.names=F)
#--------------
#####
# get estimate
#####
model.used <- lm(log(local.rt.mean)~mean+skewness.imputed,data=train.data)
round(exp(cbind(coef(model.used),confint(model.used))),2)
summary(model.used) # get model coefficients and adjusted R^2
#
# get daily Ct-based Rt (as main estimates)
est <- exp(predict(model.used,daily.linelist,interval = "prediction"))
daily.est <- cbind(daily.linelist,est)
#
# Spearman rank correlation coefficients (rho)
# between incidence- and Ct-based Rt
#
cor.rt <- matrix(NA,2,2)
for (i in 1:2){
cortest.tmp <-
with(daily.est[daily.est$period==i,],
cor.test(log(local.rt.mean),log(fit),
use="na.or.complete",method="spearman"))
cor.rt[i,1] <- round(cortest.tmp$est,2)
cor.rt[i,2] <- round(cortest.tmp$p.value,2)
}
cor.rt
#
## export estimated daily Ct-based Rt
# daily.est - daily estimated Ct-based Rt
#write.csv(daily.est,"daily_ct_rt.csv",row.names = F)
# correspond to "Figure 2" data in source data file
##
#--------------
#####
# get coefficients
# for regression
#####
#
# get models build over the alternative training period
train.period2 <- seq(as.Date("2020-11-20"),as.Date("2020-12-31"),1)
train.data2 <- daily.linelist[daily.linelist$date%in%as.character(train.period2),]
model.validate <- lm(log(local.rt.mean)~mean+skewness.imputed,data=train.data2)
#
model.list <- list(model.used,model.validate)
coef.mat <- matrix(NA,4,4)
for (i in 1:2){
# coefficients
coef.mat[(2*i-1):(2*i),1:3] <-
round(cbind(exp(coef(model.list[[i]])[2:3]),
exp(confint(model.list[[i]])[2:3,1:2])),2)
# p-values
coef.mat[(2*i-1):(2*i),4] <-
round(summary(model.list[[i]])$coefficients[2:3,4],2)
}
coef.out <- matrix(NA,4,3)
for (i in 1:4){
coef.out[i,1] <- paste0(coef.mat[i,1],"(",coef.mat[i,2],
",",coef.mat[i,3],")")
coef.out[i,2] <- coef.mat[i,4]
}
# add r-square
coef.out[1,3] <- round(summary(model.list[[1]])$adj.r.square,2)
coef.out[3,3] <- round(summary(model.list[[2]])$adj.r.square,2)
coef.out[is.na(coef.out)] <- ""
coef.out[coef.out==0] <- "<0.001"
## export results
# coef.out - Supplementary Table 2
#write.csv(coef.out,"table_s2.csv",row.names = F)
##
#####
## end of script
#####
|
3399fab5b28237eb279bcc6541bdb80796d8d2e9
|
c6ebbbfb3250deb51005c9d7a91a5b1e72407d20
|
/ui.R
|
6dede83ed77f71799303b7fb4f080387d54c1129
|
[] |
no_license
|
M-A-AliKhan/Main
|
57d09d8c67b05c59c4954b1b0094ce0aa37c8f3a
|
07f47c55c8918fbb4c0f45011f9953ef4b66be4e
|
refs/heads/master
| 2021-01-10T17:00:24.531120
| 2015-05-24T14:23:31
| 2015-05-24T14:23:31
| 36,175,010
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,183
|
r
|
ui.R
|
# Load shiny library
library(shiny)
# All UI elements must be inside shinyUI()
shinyUI(
# Set up a page with a left-hand sidebar menu.
pageWithSidebar(
# The header/title of the page.
headerPanel("Dynamic Plotting"),
# User inputs go in a sidebar on the left.
sidebarPanel(
# User types name of data.frame into a text box.
textInput(
inputId = "dataName",
label = "Enter data.frame name"
),
# This is returned once a valid data.frame name is entered
# above. It allows the user to pick an x-variable and y-variable
# from the column names of the selected data.frame.
uiOutput(outputId = "varSelector")
),
# The output goes in mainPanel().
mainPanel(
# Show the plot itself.
plotOutput(
outputId = "scatterPlot"),
# Button to allow the user to save the image.
downloadButton(
outputId = "downloadPlot",
label = "Download Plot")
)
)
)
|
3a3bd05e584b0547de031823396d1b008e040c97
|
79c3864f9edb99e373243fe2fbf14a8b6933a7f4
|
/R/get_multiple_LVs.R
|
1a16dd93c1eb9267c5bbe8e314f7f62c29301556
|
[
"MIT"
] |
permissive
|
acsala/sRDA
|
b7f33047b25cf5a81f702ae7a4936115bff4caa1
|
e6b3635d2bc79bd736825ecc40c3a1110bae9cd5
|
refs/heads/master
| 2022-08-12T03:27:13.835323
| 2022-07-27T08:53:04
| 2022-07-27T08:53:04
| 124,358,659
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,145
|
r
|
get_multiple_LVs.R
|
get_multiple_LVs <- function(X,
Y,
penalization,
lambda,
nonzero,
nr_latent=1,
stop_criterium = 1 * 10^-5,
max_iterations,
cross_validate) {
Res_X <- X
alphas <- c()
betas <- c()
xis <- c()
etas <- c()
iterations <- c()
corr_v <- c()
s_cond_v <- c()
red_indexs <- c()
ridge_penaltys<- c()
nr_nonzeros <- c()
CV_results <- c()
iterations_crts <- c()
sum_of_sq_betas <- c()
sum_of_sq_alphas <- c()
i <- 1
WeCarryOn <- TRUE
cat("Multiple latent variables scenario,
number of latent variables calculated:",nr_latent, "\n")
while ( !(i > nr_latent) && WeCarryOn ){
results <- sRDA(predictor = Res_X,
predicted = Y,
penalization = penalization,
ridge_penalty = lambda,
nonzero = nonzero,
tolerance = stop_criterium,
# cross validate for every latent variables
cross_validate = cross_validate,
multiple_LV = FALSE,
max_iterations = max_iterations)
alphas[[i]] <- results$ALPHA
betas[[i]] <- results$BETA
xis[[i]] <- results$XI
etas[[i]] <- results$ETA
iterations[[i]] <- results$nr_iterations
red_indexs[[i]] <- results$redundancy_index
iterations_crts[[i]] <- results$iterations_crts
ridge_penaltys[[i]] <- results$ridge_penalty
nr_nonzeros[[i]] <- results$nr_nonzeros
if(cross_validate){
CV_results[[i]] <- results$CV_results
}
reg_coeff <- results$inverse_of_XIXI %*% as.matrix(Res_X)
# calculate the residuals
calcres = function(Xcol)
Xcol - results$inverse_of_XIXI %*% Xcol %*% t(xis[[i]])
Res_X = apply(Res_X, 2,calcres)
sum_of_sq_betas[[i]] <- sum(betas[[i]]^2)
sum_of_sq_alphas[[i]] <- sum(alphas[[i]]^2)
if (i>1){
stop_condition <- abs(sum_of_sq_betas[[i]] - sum_of_sq_betas[[i-1]])
s_cond_v[[i]] <- stop_condition
if (stop_condition < stop_criterium){
WeCarryOn <- FALSE
}
}
i <- i +1
}
result <- list(
XI = xis,
ETA = etas,
ALPHA = alphas,
BETA= betas,
nr_iterations = iterations,
# inverse_of_XIXI = SOLVE_XIXI,
iterations_crts = iterations_crts,
sum_absolute_betas = sum_of_sq_betas,
ridge_penalty = ridge_penaltys,
nr_nonzeros = nr_nonzeros,
nr_latent_variables = nr_latent,
CV_results = CV_results
)
result
}
|
19851133fb4f9ad4230aa1a7b7b4e1c8626c4e16
|
0539c05542edb96da66aab6943e785e9d81fe980
|
/plot4.R
|
517519f71d92ea01eff9013a1a09736be594a927
|
[] |
no_license
|
metustat/ExData_Plotting1
|
f8fea02a102906be7845f160b323830ca1065d9a
|
ea36227fd333d3292fdf857ad5a1b72304851b83
|
refs/heads/master
| 2020-04-05T23:27:44.535844
| 2015-08-07T20:06:03
| 2015-08-07T20:06:03
| 39,706,154
| 0
| 0
| null | 2015-07-25T23:19:31
| 2015-07-25T23:19:31
| null |
UTF-8
|
R
| false
| false
| 1,526
|
r
|
plot4.R
|
fileUrl<-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl,destfile="data.zip")
unzip("data.zip",list=TRUE)
data<-read.csv2(unz("data.zip","household_power_consumption.txt"),
header=TRUE,colClasses=c("character"))
options(warn=-1)
for(i in 3:9){
class(data[,i])<-"numeric"
}
options(warn=0)
library("dplyr")
names(data)
data[,1]<-as.Date(data[,1],"%d/%m/%Y")
library(chron)
data[,2]<-chron(times=data[,2])
fil_data<-filter(data,Date=="2007-02-01" | Date=="2007-02-02")
png(file="plot4.png", width = 480, height = 480)
par(mfrow=c(2,2))
with(fil_data,plot(Global_active_power,xaxt="n",type="l",xlab="",ylab=""))
axis(1,c(0,1440,2880),labels=c("Thu","Fri","Sat"))
title(ylab="Global Active Power (kilowatts)")
with(fil_data,plot(Voltage,xaxt="n",type="l",xlab="",ylab=""))
axis(1,c(0,1440,2880),labels=c("Thu","Fri","Sat"))
title(ylab="Voltage",xlab="datetime")
with(fil_data,
{
plot(Sub_metering_1,col="black",type="l",xlab="",ylab="",xaxt="n")
lines(Sub_metering_2,col="red")
lines(Sub_metering_3,col="blue")
}
)
legend(800,40,col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=1,bty="n")
title(ylab="Energy sub metering")
axis(1,c(0,1440,2880),labels=c("Thu","Fri","Sat"))
with(fil_data,plot(Global_reactive_power,xaxt="n",type="l",xlab="",ylab=""))
axis(1,c(0,1440,2880),labels=c("Thu","Fri","Sat"))
title(ylab="Global_rective_power",xlab="datetime")
dev.off()
|
55c526dcf4e6bd67c99a2f4ac21b2835110349ba
|
c287ac86f02fe8b3fc5ecc97fe761a636ee2f72c
|
/man/rbinorm.Rd
|
43d501bb73fd76c372f0e37dc5882a092c061745
|
[] |
no_license
|
cran/FamilyRank
|
88b9d194faacf337f0a0f2acfd8c6c2fa5387e82
|
0c31311610f1cfa4e7bdf1e41dda814bf0ccd772
|
refs/heads/master
| 2023-03-03T03:25:58.937043
| 2021-02-05T07:50:08
| 2021-02-05T07:50:08
| 336,239,780
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 962
|
rd
|
rbinorm.Rd
|
\name{rbinorm}
\alias{rbinorm}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Bimodal Normal Distribution}
\description{Simulates random data from a bimodal Gaussian distribution.}
\usage{
rbinorm(n, mean1, mean2, sd1, sd2, prop)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{n}{Number of observations to simulate}
\item{mean1}{Mean of mode 1}
\item{mean2}{Mean of mode 2}
\item{sd1}{Standard deviation of mode 1}
\item{sd2}{Standard deviation of mode 2}
\item{prop}{Probability of being in mode 1. \code{1 - prop} is the probability of being in mode 2.}
}
\details{This function is modeled off of the \code{\link{rnorm}} function.}
\value{Generates random deviates}
\author{Michelle Saul}
\examples{
## Generate 100 samples from a two component Guassian curve
samples <- rbinorm(n=100, mean1=10, mean2=20, sd1=1, sd2=2, prop=.5)
## Plot distribution of simulated data
plot(density(samples))
}
|
673090c7649058e71d72ae6164e0c609c798aa9e
|
efe3bdc6afd1f111ece86b830fc92cc4bec8910e
|
/man/contact_df_countries.Rd
|
bb1525fd9fac7f9c032bfa818d93a1210d679107
|
[
"MIT"
] |
permissive
|
Bisaloo/contactdata
|
cd2b35db31ae2d32b52721bc13e01cc80a5e4484
|
444ba7569703863092ed488f8f4b572a6453c5e6
|
refs/heads/main
| 2023-04-11T15:34:34.543076
| 2023-03-22T11:19:38
| 2023-03-22T11:19:48
| 293,047,665
| 6
| 2
|
NOASSERTION
| 2023-09-05T11:32:09
| 2020-09-05T09:52:13
|
R
|
UTF-8
|
R
| false
| true
| 1,477
|
rd
|
contact_df_countries.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/contact_countries.R
\name{contact_df_countries}
\alias{contact_df_countries}
\title{Get a data.frame (in long format) of contact data for multiple countries}
\usage{
contact_df_countries(countries, ...)
}
\arguments{
\item{countries}{A character string or a vector of character containing the
names of the countries for which to return contact data}
\item{...}{Arguments passed to \code{\link[=contact_matrix]{contact_matrix()}}}
}
\value{
A data.frame (in long format) with 4 columns:
\itemize{
\item \code{country}: the country name
\item \code{age_from}: the age group of individual
\item \code{age_to}: the age group of contact
\item \code{contact}: the intensity of contact
}
}
\description{
Get a data.frame (in long format) of contact data for multiple countries
}
\examples{
contact_df_countries(c("Austria", "Belgium"), location = "all")
}
\references{
Kiesha Prem, Alex R. Cook, Mark Jit, \emph{Projecting social contact
matrices in 152 countries using contact surveys and demographic data}, PLoS
Comp. Biol. (2017), \doi{10.1371/journal.pcbi.1005697}
Kiesha Prem, Kevin van Zandvoort, Petra Klepac, Rosalind M. Eggo,
Nicholas G. Davies, CMMID COVID-19 Working Group, Alex R. Cook, Mark Jit,
\emph{Projecting contact matrices in 177 geographical regions: An update and
comparison with empirical data for the COVID-19 era}, PLoS Comp. Biol.
(2021), \doi{10.1371/journal.pcbi.1009098}.
}
|
dd5ecc8dce421807b4a4c7528618b117cc1d238a
|
43b17584478c0360d0fdced151db43c35728837a
|
/man/set_gitlab_connection.Rd
|
fc79babb58778a4558fc473061d6d53239a1de67
|
[] |
no_license
|
cran/gitlabr
|
51357cc4c136b4d5125a1d39aec63ea62ef509d1
|
b8d64273024933804044ca8eeab18930a4611c55
|
refs/heads/master
| 2022-10-03T07:18:40.338952
| 2022-09-13T10:00:02
| 2022-09-13T10:00:02
| 48,080,948
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 990
|
rd
|
set_gitlab_connection.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/global_env.R
\name{set_gitlab_connection}
\alias{set_gitlab_connection}
\alias{get_gitlab_connection}
\alias{unset_gitlab_connection}
\title{Get/set a GitLab connection for all calls}
\usage{
set_gitlab_connection(gitlab_con = NULL, ...)
get_gitlab_connection()
unset_gitlab_connection()
}
\arguments{
\item{gitlab_con}{A function used for GitLab API calls, such
as \code{\link[=gitlab]{gitlab()}} or as returned by \code{\link[=gl_connection]{gl_connection()}}.}
\item{...}{if gitlab_con is NULL, a new connection is created used the parameters
is ... using \code{\link[=gl_connection]{gl_connection()}}}
}
\value{
Used for side effects. Set or unset global connection settings.
}
\description{
This sets the default value of \code{gitlab_con}
in a call to \code{\link[=gitlab]{gitlab()}}
}
\examples{
\dontrun{
set_gitlab_connection("https://gitlab.com", private_token = Sys.getenv("GITLAB_COM_TOKEN"))
}
}
|
19b2f5e4cb2ec6ec474b6ba70a24a2464a5411e4
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/geosapi/examples/GSOracleNGDataStore.Rd.R
|
8b5b776aefe6d792e8c71d1ad329189e824bfa63
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 283
|
r
|
GSOracleNGDataStore.Rd.R
|
library(geosapi)
### Name: GSOracleNGDataStore
### Title: Geoserver REST API OracleNGDataStore
### Aliases: GSOracleNGDataStore
### Keywords: DataStore OracleNG api geoserver rest
### ** Examples
GSOracleNGDataStore$new(dataStore="ds", description = "des", enabled = TRUE)
|
207b16f0721a41f3d03f768bc77d18603934b086
|
2bd293d9eff164a31e5ca98900a3b623aced5815
|
/man/drawbipl.catPCA.Rd
|
e3cbfdaea80a3bb115093d1b90abd36a5e5a24c0
|
[] |
no_license
|
carelvdmerwe/UBbipl3
|
213a17f60e7cd1796f51ce2a2f5fc097b57e722c
|
ea5bbe4767d42d92d949e42b1db199fc8d5e12f0
|
refs/heads/master
| 2020-06-07T04:44:22.514152
| 2019-06-20T13:39:36
| 2019-06-20T13:39:36
| 192,926,846
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 128
|
rd
|
drawbipl.catPCA.Rd
|
\name{drawbipl.catPCA}
\alias{drawbipl.catPCA}
\title{
DRAW CATEGORICAL PCA BIPLOTS
}
\description{
See Understanding Biplots.
}
|
0e986de1becbcb737312eaf8755f9ebb2a8d8c70
|
202fb2f3a908b0c002ef6859275a617ffa6a51e8
|
/man/multiplicativeSeasonalityDecomposition.Rd
|
9f02405b30b968bcd1771c80d0966e39aa0ddee4
|
[] |
no_license
|
jdestefani/MM4Benchmark
|
ad68d68a000dc879be2bcb91acb0bf4aaa48cc9f
|
bb8f185ce984121b084d109912e7df1566d7fb26
|
refs/heads/master
| 2023-09-04T17:58:34.312342
| 2021-11-20T21:30:09
| 2021-11-20T21:30:09
| 364,299,203
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 745
|
rd
|
multiplicativeSeasonalityDecomposition.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/benchmarks.R
\name{multiplicativeSeasonalityDecomposition}
\alias{multiplicativeSeasonalityDecomposition}
\title{multiplicativeSeasonalityDecomposition}
\usage{
multiplicativeSeasonalityDecomposition(input, fh)
}
\arguments{
\item{input}{\itemize{
\item Input time series
}}
\item{fh}{\itemize{
\item Forecasting horizon
}}
}
\value{
\itemize{
\item{\code{des_input}: }{Deseasonalized input time series}
\item{\code{SIOut}: }{Additional deseasonalization parameters}
}
}
\description{
Auxiliary function for computing the multiplicative seasonality decomposition
From \url{https://github.com/M4Competition/M4-methods/blob/master/Benchmarks\%20and\%20Evaluation.R}
}
|
642947d1b807d4902d3b3bc6db02da9b2eb66a2a
|
915dd8fb8c341c90288f84fd19a9d72ad4f11b9c
|
/chemenhance.R
|
f7b2f228b0a30943ab2915542366c805942ef0d9
|
[] |
no_license
|
martwine/Transfer-velocity-core-scheme
|
b8db0c3889d0d19738546b18148a3d7385768046
|
e0786e1437ef90845294123552ca188eb621d19a
|
refs/heads/master
| 2016-09-06T07:49:27.708907
| 2012-07-03T08:07:40
| 2012-07-03T08:07:40
| 1,215,413
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,233
|
r
|
chemenhance.R
|
source("K_calcs_Johnson_OS.R")
# Chemical enhancement calculations based on Hoover and Berkshire (1969) equation
#############################################
#Calculating chemical enhancement to kw
###############################################
# get mass boundarly layer dpeth from windspeed for given compound
z_from_wind<- function(compound,ws,T,S){
#calculate z in cm given compound and conditions
diff<-0.5*(diff_HM(compound,T,S)+diff_WC(compound,T,S))
diff/(kw(compound,T,ws,S)*100)
}
alpha_kw <- function (compound,ws,T,S,khyd=1e-100,tau=1.0000000001){
#khyd is pseudo first-order rate constant of reaction in water
# z is apparent mass boundary layer depth (see Liss and Slater 1974)
z <- z_from_wind(compound,T,ws,S)
x <- z*sqrt(khyd*tau/(0.5*(diff_HM(compound,T,S)+diff_WC(compound,T,S))))
alpha <- (tau)/((tau-1) + (tanh(x)/x))
alpha
}
#define a range of tau (minus 1) for generic calculation
tau_minus_one_list <- c(1e-10,1e-9,1e-8,1e-7,1e-6,1e-5,1e-4,1e-3,1e-2,1e-1,1,10,100)
#a_test <- function(khyd,tauminusone,Diff,ws,T=15, S=35){
# tau <- tauminusone + 1
# z <- z_from_wind("O2",ws,T,S)
# x <- z*sqrt(khyd*tau/Diff)
# (tau*x)/((tauminusone*x) + tanh(x))
#}
# this function retuns the residual between a given target value of alpha and the actual one calculated
# given the input params - for use in the uniroot solver below, where all but khyd are contsrained.
a_general <- function(khyd,tauminusone,Diff,z,target_value){
tau <- tauminusone + 1
x <- z*sqrt(khyd*tau/Diff)
target_value-(tau/(tauminusone + (tanh(x)/x)))
}
# use an iterative solver to find the value of khyd for a given enhancement factor
# Diffusivity is compound specific and needs to be fed into this function appropriately
solve_a_general_for_rate <- function(tauminusone,Diff,z,target){
uniroot(a_general,c(1e-300,1e300),tol=1e-100,tauminusone=tauminusone,Diff=Diff,z=z,target_value=target,maxiter=10000)$root
}
#generic table of k values for a given threshold enhancement over a range of tau and z. This is not compound-specific by default, as it isn't particularly compound-sensitive (at the order of magnitude level, which appropriate for this analysis). Use O2 as default typical average gas.
zeds <- function(ws=WS, compound="O2", T=15, S=35){
# get z values in cm from standard windspeeds, by default using O2 as model gas (after Liss and Slater 1974)
z_from_wind(compound,ws,T,S)
}
khyd_for_given_enhancement_table<-function(target, Diff=1e-5, ws=WS, T=15, S=35, tauminusonelist=tau_minus_one_list){
#use t and WS (or user-specified vectors of temp and windspeed) to calculate the total transfer velocity (Kw) for a given compound over these ranges of conditions
zedds<-zeds(ws,T=T,S=S)
tabler<-NULL
#need to modify tau-minus-one list so max enhancement = tau/tau-1 can always be satisfied
taucutter<-NULL
for (t in tauminusonelist) {
if (((t+1)/t)>target){
taucutter<-c(taucutter,t)
}
}
tauminusonelist<-tauminusonelist[1:length(taucutter)]
for(i in tauminusonelist){
rowtemp<-NULL
for(j in zedds){
rowtemp<-c(rowtemp,solve_a_general_for_rate(i,Diff,j,target))
}
tabler<-rbind(tabler,rowtemp)
}
output_table<-data.frame(tabler,row.names=(tauminusonelist))
names(output_table)<-ws
output_table
}
#####################################################################
# Calculate chemical enhancement threshold values in gas phase #
#####################################################################
alpha_ka <- function (compound,ws,T,S,katm=1e-100,tau=1.00000000001){
#katm is rate constant of reaction in gas phase
Diff <- D_air(compound,T)
z <- Diff/(100*ka(compound,ws,T))
x <- z*sqrt(katm*tau/Diff)
alpha <- (tau)/((tau-1) + (tanh(x)/x))
alpha
}
zg_from_wind<- function(compound,ws,T){
#calculate z in cm given compound and conditions
D_air(compound,T)/(ka(compound,ws,T)*100)
}
zgeds <- function(ws=WS, compound="O2", T=15){
# get z values in cm from standard windspeeds using O2 as model gas (after Liss and Slater 1974)
zg_from_wind(compound,ws,T)
}
#generic table of k (rate constant) values for a given threshold enhancement over a range of tau and z. This is not compound-specific by default, as it isn't particularly compound-sensitive (at the order of magnitude level, which appropriate for this analysis). Use O2 as default typical average gas.
khyd_for_given_gas_phase_enhancement_table<-function(target, Diff=0.1, ws=WS, T=15, tauminusonelist=tau_minus_one_list){
#use t and WS (or user-specified vectors of temp and windspeed) to calculate the total transfer velocity (Kw) for a given compound over these ranges of conditions
zedds<-zgeds(ws,T=T)
tabler<-NULL
#need to modify tau-minus-one list so max enhancement = tau/tau-1 can always be satisfied
taucutter<-NULL
for (t in tauminusonelist) {
if (((t+1)/t)>target){
taucutter<-c(taucutter,t)
}
}
tauminusonelist<-tauminusonelist[1:length(taucutter)]
for(i in tauminusonelist){
rowtemp<-NULL
for(j in zedds){
rowtemp<-c(rowtemp,solve_a_general_for_rate(i,Diff,j,target))
}
tabler<-rbind(tabler,rowtemp)
}
output_table<-data.frame(tabler,row.names=(tauminusonelist))
names(output_table)<-ws
output_table
}
|
06e0e62c2f239581a160a5923d7283818a8fbd62
|
ca25692e5f1e3c1f63c59e37fd92ffcdc6c78412
|
/code/12_extract_samples_calc_residuals.R
|
b4995e042ee5a627e1d40e64877a7522aa327dac
|
[] |
no_license
|
mkiang/decomposing_inequality
|
3a06f8d86c1eddc161e0c83bbfaa092cde7285f8
|
b3431e499572aeb2b11bfd2f7a2ea82d84ecfbb1
|
refs/heads/master
| 2020-09-06T12:19:47.116124
| 2019-11-09T06:40:55
| 2019-11-09T06:40:55
| 220,422,411
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,405
|
r
|
12_extract_samples_calc_residuals.R
|
## Imports ----
library(rstan)
library(tidyverse)
library(matrixStats)
source('./code/helpers/99_helper_functions.R')
## Define the level of significance ----
sig_limit <- .8
## Mapping data ----
fips_idx_map <- readRDS('./data_working/fips_to_dummy_mappings.RDS')
## Get model fits ----
## Note: I named model 1 model 0 because I was using index as number of
## covariates. Rename here for the paper (where Model 1 will be null model)
m1_path <- './stanfit_objects/model0_no_covars.RDS'
m1_fit <- readRDS(m1_path)
m1_samps <- extract_samples(m1_fit)
rm(m1_fit, m1_path)
gc()
m2_path <- './stanfit_objects/model1_with_income.RDS'
m2_fit <- readRDS(m2_path)
m2_samps <- extract_samples(m2_fit)
rm(m2_fit, m2_path)
gc()
## Residual disparitiy ----
m1_res_disp <- calc_residual_disparity(m1_samps)
m2_res_disp <- calc_residual_disparity(m2_samps)
## Total disparity ----
m1_tot_disp <- (m1_samps$alpha2 - m1_samps$alpha1) + m1_res_disp
m2_tot_disp <- (m2_samps$alpha2 - m2_samps$alpha1) + m2_res_disp
## Figure out the components of total disparity ----
diff_in_spec <- rbind(
summarize_by_columns(m1_samps$psi2 - m1_samps$psi1) %>%
dplyr::select(c_idx, diff_in_spec = p500) %>%
dplyr::mutate(model_name = "Model 1"),
summarize_by_columns(m2_samps$psi2 - m2_samps$psi1) %>%
dplyr::select(c_idx, diff_in_spec = p500) %>%
dplyr::mutate(model_name = "Model 2")
)
phis <- rbind(
summarize_by_columns(m1_samps$phi) %>%
dplyr::select(c_idx, phi = p500) %>%
dplyr::mutate(model_name = "Model 1"),
summarize_by_columns(m2_samps$phi) %>%
dplyr::select(c_idx, phi = p500) %>%
dplyr::mutate(model_name = "Model 2")
)
diff_in_alphas <- rbind(
dplyr::as_tibble(list(
diff_in_alpha =
stats::median(m1_samps$alpha2 - m1_samps$alpha1)
)) %>%
dplyr::mutate(model_name = "Model 1"),
dplyr::as_tibble(list(
diff_in_alpha =
stats::median(m2_samps$alpha2 - m2_samps$alpha1)
)) %>%
dplyr::mutate(model_name = "Model 2")
)
recip_delta_minus_delta <- rbind(
dplyr::as_tibble(list(
recip_delta_minus_delta =
stats::median(1 / m1_samps$delta - m1_samps$delta)
)) %>%
dplyr::mutate(model_name = "Model 1"),
dplyr::as_tibble(list(
recip_delta_minus_delta =
stats::median(1 / m2_samps$delta - m2_samps$delta)
)) %>%
dplyr::mutate(model_name = "Model 2")
)
parts <- diff_in_spec %>%
dplyr::left_join(phis) %>%
dplyr::left_join(diff_in_alphas) %>%
dplyr::left_join(recip_delta_minus_delta) %>%
dplyr::mutate(
total_disp = diff_in_alpha + phi * recip_delta_minus_delta +
diff_in_spec,
frac_alphas = diff_in_alpha / total_disp,
frac_shared = (phi * recip_delta_minus_delta) / total_disp,
frac_spec = diff_in_spec / total_disp,
sum_fracs = frac_alphas + frac_shared + frac_spec
)
## Save samples ----
save(m1_samps, file = './data_working/model1_extracted_samples.RData')
save(m2_samps, file = './data_working/model2_extracted_samples.RData')
saveRDS(parts, file = "./data_working/proportion_of_inequality.RDS")
rm(m1_samps, m2_samps, parts)
## Summarize and combine them ----
m1m2_tdisp <- rbind(
summarize_by_columns(m1_tot_disp) %>%
tibble::add_column(model_name = "Model 1"),
summarize_by_columns(m2_tot_disp) %>%
tibble::add_column(model_name = "Model 2")
) %>%
tibble::add_column(disp_type = "total")
m1m2_rdisp <- rbind(
summarize_by_columns(m1_res_disp) %>%
tibble::add_column(model_name = "Model 1"),
summarize_by_columns(m2_res_disp) %>%
tibble::add_column(model_name = "Model 2")
) %>%
tibble::add_column(disp_type = "residual")
resid_df <- rbind(m1m2_tdisp, m1m2_rdisp)
rm(m1m2_rdisp,
m1m2_tdisp,
m1_res_disp,
m2_res_disp,
m1_tot_disp,
m2_tot_disp)
gc()
## Define "significant" counties as > X% above or below 0. ----
resid_df <- resid_df %>%
dplyr::mutate(sig = dplyr::case_when(n_neg / count >= sig_limit ~ 1,
n_pos / count >= sig_limit ~ 1,
TRUE ~ 0))
## Add in correct fipschar ----
resid_df <- resid_df %>%
dplyr::left_join(fips_idx_map, by = "c_idx")
## Save it ----
save(resid_df, file = './data_working/residual_disparities_data.RData')
|
a0102f39023ec233699522fa7ec639a9a60d7f61
|
6704f3fb2a8fe3b4ccd951c902cf0e0d84bb59ca
|
/nosocomialtransmission_runfile.R
|
11536b86b662f0b8405baf695d0ad7edb3e468f5
|
[] |
no_license
|
tm-pham/covid-19_nosocomialtransmission
|
f5d04f987530d556c33a37eed7b98b017271eb7d
|
67878b9feaf487404760dffac817025e7f4d05a8
|
refs/heads/master
| 2022-12-05T07:42:01.994047
| 2020-08-24T06:56:48
| 2020-08-24T06:56:48
| 265,420,009
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,307
|
r
|
nosocomialtransmission_runfile.R
|
# =============================================================================#
# NOSOCOMIAL TRANSMISSION
# Run file for STAN code
# =============================================================================#
library(rstan)
setwd("/Users/tm-pham/PhD/covid-19/nosocomialtransmission/stochasticepidemicmodel/stan")
sim_data <- readRDS("sim_data_v3.RDS")
# Data
eps <- 10^(-7)
pDis <- 10
pDis_obs <- 10
pDshape <- 10 # Shape for gamma distribution for dispersion parameter for transmission process
pDrate <- 10 # Rate for gamma distribution for dispersion parameter for transmission process
pDshape_obs <- 10 # Shape for gamma distribution for dispersion parameter for observation process
pDrate_obs <- 10 # Rate for gamma distribution for dispersion parameter for observation process
# f_mu <- c(0.05, 0.04, 0.0005, 0.0005, 0.001, 0.001) # Mean for normal distribution for transmission parameters
f_mu <- c(0.01, 0.005, 0.0005, 0.0001, 0.01, 0.001) # Mean for normal distribution for transmission parameters
f_sigma <- rep(min(f_mu), length(f_mu)) # Sigma for normal distribution for transmission parameters
sim_data <- append(sim_data, list(eps=eps,
pDis=pDis, pDis_obs=pDis_obs,
pDshape=pDshape, pDrate=pDrate,
pDshape_obs=pDshape_obs, pDrate_obs=pDrate_obs,
f_mu=f_mu, f_sigma=f_sigma))
fit <- stan(
file = "nosocomialtransmissionv3.stan", # Input model version here
data = sim_data, # named list of data defined in metareg_define_data.R
chains = 1, # number of Markov chains
warmup = 100, # number of warmup iterations per chain
iter = 1000, # total number of iterations per chain
cores = 1, # number of cores (use one per chain)
refresh = 1, # no of runs at which progress is shown
control = list(max_treedepth = 15, adapt_delta=0.99)
)
# fit <- stan(
# file = "nosocomialtransmissionv3.stan", # Input model version here
# data = sim_data, # named list of data defined in metareg_define_data.R
# chains = 1, # number of Markov chains
# warmup = 100, # number of warmup iterations per chain
# iter = 1000, # total number of iterations per chain
# cores = 1, # number of cores (use one per chain)
# refresh = 1, # no of runs at which progress is shown
# control = list(max_treedepth = 15, adapt_delta=0.99)
# )
#
# init_fn <- function(){
# return(list(f_pU_hcw=f_mu[1],
# f_pU_p=f_mu[2],
# f_p_hcw=f_mu[3],
# f_p_p=f_mu[4],
# f_hcw_hcw=f_mu[5],
# f_hcw_p=f_mu[6],
# I_hcwU=sim_data$I_hcwU0_array,
# I_pU=sim_data$I_pU0_array,
# new_symptomatic_hcw=sim_data$new_symptomatic_hcw0,
# new_symptomatic_pat=sim_data$new_symptomatic_pat0,
# discharge_dead_pat=sim_data$discharged_dead_pat0)
# )
# }
|
a6dcea3a1d50e2499085176d1fb8191a461c2d6a
|
104c538bdf45af6b466b2dd3a7f64f4541d1d711
|
/man/readDB.Rd
|
be2c4f22cc3e0238200ab22bb011004f2d149e9a
|
[] |
no_license
|
betonr/rPostgis
|
6b819d444a8e78ac1d58d7cda3523bd2c6d44ff4
|
5f4ce28ba35da810d9eaa24a0030360b409bed4e
|
refs/heads/master
| 2021-01-20T02:12:51.342364
| 2017-08-25T11:24:08
| 2017-08-25T11:24:08
| 89,390,184
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 546
|
rd
|
readDB.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ReadDB.R
\name{readDB}
\alias{readDB}
\title{readDB}
\usage{
readDB(objectInfo, date, type, uf)
}
\arguments{
\item{objectInfo}{(InfoDB) - Object with informations for connections}
\item{date}{(character) - amount of previous days (Y-m-d)}
\item{type}{(character) - vehicle}
\item{uf}{(int) - UF desired}
}
\value{
Number of occurrences (int)
}
\description{
function responsible for searching the database and
returning numbers of occurrences on the select date
}
|
3b6847b579f2dc6c6ae36b626c9a9d32e9dec100
|
4e35775e2a3b6903b68ca5ae2ce0ecbd25b1b5a2
|
/tests/testthat/test_draw_posterior.R
|
4fa149669575a50aff94ac4b3dbcf05a7bc5bc9d
|
[
"MIT"
] |
permissive
|
FrankLef/eflRethinking
|
93ab747e7ebe93ec4ca3fe5e5a80e6952a45d04e
|
d9b2a868923134b3e549c96982f1b00092b0c3b2
|
refs/heads/main
| 2023-08-07T12:19:05.414807
| 2021-10-08T18:56:57
| 2021-10-08T18:56:57
| 410,892,718
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 848
|
r
|
test_draw_posterior.R
|
# declarations ------------------------------------------------------------
library(posterior, quietly = TRUE)
# the inla objects used
m04m07ctr <- readRDS(test_path("testdata", "fits", "m04m07ctr.rds"))
# tests -------------------------------------------------------------------
test_that("verify the inla model object", {
expect_s4_class(m04m07ctr, "map")
})
test_that("verify the inla model object", {
# str(m04m07ctr)
expect_s4_class(m04m07ctr, "map")
})
test_that("draw_posterior_map", {
nsamples <- 3L
samples <- draw_posterior_quap(m04m07ctr, n = nsamples)
# str(samples)
# expect_type(samples, "list")
expect_s3_class(samples, "draws_rvars")
expect_identical(variables(samples), names(m04m07ctr@coef))
expect_equal(nvariables(samples), length(m04m07ctr@coef))
expect_equal(niterations(samples), nsamples)
})
|
94f8875c42558712853e1832bcb2c930df185b7a
|
d859174ad3cb31ab87088437cd1f0411a9d7449b
|
/scripts/setup_switchr_environment.R
|
bcb4f47f2bfe140a978821d0617d7b8c8988f8d9
|
[] |
no_license
|
bhagwataditya/autonomics0
|
97c73d0a809aea5b4c9ef2bf3f886614eceb7a3c
|
c7ca7b69161e5181409c6b1ebcbeede4afde9974
|
refs/heads/master
| 2023-02-24T21:33:02.717621
| 2021-01-29T16:30:54
| 2021-01-29T16:30:54
| 133,491,102
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,816
|
r
|
setup_switchr_environment.R
|
# Setup 'switchr' infrastructure ------------------------------------------
switchr::switchTo("autonomics")
# Install/update gitted infrastructure ------------------------------------
# Deal with prerequisites
packages_prerequisite <- c(
'magrittr',
'stringi')
packages_to_install <- setdiff(
packages_prerequisite,
installed.packages())
if(length(packages_to_install) > 0){
switchr::install_packages(
packages_to_install)
}
## Don't use magrittr functionality prior to this!
library(magrittr)
# Discover dirname
split_wd <- getwd() %>%
stringi::stri_split_fixed(.Platform$file.sep) %>%
unlist()
base_i <- split_wd %>%
stringi::stri_detect_regex('^autonomics$') %>%
which() %>%
max(na.rm = TRUE)
autonomics_base_dir <- split_wd %>%
magrittr::extract(seq(base_i)) %>%
paste(collapse = .Platform$file.sep)
# Install autonomics modules
## Order matters for interdepence!
autonomics_module_packages <- c(
"autonomics.data",
"autonomics.ora",
"autonomics.integrate",
"autonomics.find",
"autonomics.preprocess",
"autonomics.plot",
"autonomics.import",
"autonomics.explore",
"autonomics.support",
"autonomics.annotate",
"autonomics")
## This takes LONG when no dependencies are present yet ...
switchr::makeManifest(
name = autonomics_module_packages,
url = autonomics_base_dir,
subdir = autonomics_module_packages,
type = 'local') %>% #,
# branch = dplyr::case_when(
# . == 'autonomics.preprocess' ~ 'feature_sd_mean',
# . != 'autonomics.preprocess' ~ 'master')) %>%
switchr::install_packages(
slot(., 'manifest') %>%
magrittr::extract2('name'),
.)
# Update everything (also already present) --------------------------------
BiocInstaller::biocLite(checkBuilt = TRUE, ask = FALSE)
|
a9885b0697fa9ec3d7e2d28990ffc4a91f6c3e00
|
1708c822fe65b55f5b5680536ab9cd2fda511a27
|
/11-IST719-InformationVisualization/Assignments/ThulasiRamRuppaKrishnan_LabWeek8.R
|
fbe7b2d40fd3f50bcc73fbcfe4b52722479e859b
|
[] |
no_license
|
truppakr/AppliedDataScience
|
bec73a851d2c46f00468c88f0e99db46e55aadf5
|
f70faf5c585e064cb54e03f236b731238cf2dfca
|
refs/heads/master
| 2022-09-15T18:15:28.788511
| 2020-05-31T22:38:46
| 2020-05-31T22:38:46
| 267,444,552
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,046
|
r
|
ThulasiRamRuppaKrishnan_LabWeek8.R
|
###########################################################################
#
# Author : Ram Krishnan
# Purpose: Week 8
# "Class Social Network Data (structure and Cleaning)"
# uses LINKS-421-719Networks.csv
# Nodes-421-719Network.csv
###########################################################################
library(igraph)
my.dir <- "C:\\Users\\rkrishnan\\Documents\\01 Personal\\MS\\IST719\\Data\\"
link.data <- read.csv(paste0(my.dir,"links-421-719network.xls")
,header = TRUE, stringsAsFactors = FALSE)
node.data <- read.csv(paste0(my.dir,"nodes-421-719network.xls")
,header = TRUE, stringsAsFactors = FALSE)
colnames(link.data) <- gsub("\\.","",colnames(link.data))
link.data$X <- gsub(" |-","",link.data$X)
cbind(link.data$X,colnames(link.data)[-1])
node.data$Name <- gsub(" |-","",node.data$Name)
cbind(node.data$Name,link.data$X)
M <- as.matrix(link.data[,-1])
rownames(M) <- colnames(M)
dim(M)
any(is.na(M))
M[is.na(M)] <- 0
m[M>1]
g <- graph_from_adjacency_matrix(M)
##############################################################################################################
#
# The graph object and the first plot
#
##############################################################################################################
vcount(g)
ecount(g)
plot.igraph(g)
g <- simplify(g)
plot.igraph(g)
par(mar=c(0,0,0,0))
plot.igraph(g,edge.arrow.size=0, edge.arrow.width=0)
E(g)$arrow.size <- 0
E(g)$arrow.width <- 0
plot.igraph(g)
g
V(g)$color <- "gold"
V(g)$frame.color <- "white"
V(g)$label.color <- "black"
E(g)$color <- "cadetblue"
V(g)$size <- 5
plot.igraph(g)
?igraph.plotting()
E(g)$curved <- .4
###################################################################################
#
# Visualizing Centrality and Centrality measurement
#
###################################################################################
plot(degree(g))
par(mar= c(3,10,1,1))
barplot(sort(degree(g)), horiz= T, las = 2, main= "Number of Social Connections by Individual")
V(g)$degree <- degree(g)
V(g)$deg.out <- degree(g, mode="out")
V(g)$deg.in <- degree(g, mode="in")
barplot(V(g)$deg.out, horiz= T, las = 2
, names.arg = V(g)$name
, main= "Most friendliness degree by Indiviudal")
barplot(V(g)$deg.in, horiz= T, las = 2
, names.arg = V(g)$name
, main= "Most Important degree by Indiviudal")
#g.bak <- g
#g <- as.undirected(g)
#g <- g.bak
V(g)$close <- closeness(g, normalized = T, mode = "all")
V(g)$bet <- betweenness(g,directed = F)
library(plotrix)
my.pallet <- colorRampPalette(c("steelblue1","violet","tomato","red","red"))
V(g)$color <- rev(my.pallet(200))[round(1+rescale(V(g)$close,c(1,199)),0)]
plot.igraph(g)
V(g)$size <- 2 + rescale(V(g)$degree, c(0,13))
V(g)$label.cex <- .7+ rescale(V(g)$bet,c(0,1.25))
##########################################################################
#
# Visualizing Social Network Structures
#
##########################################################################
cbind(V(g)$name, node.data$Name)
V(g)$class <- node.data$Class
V(g)$country <- node.data$Country
V(g)$year <- node.data$year
g <- delete_vertices(g,"JoHunter")
plot.igraph(g)
V(g)$shape <- "circle"
V(g)$shape[V(g)$class =="Wednesday"] <- "square"
V(g)$shape[V(g)$class =="Both"] <- "rectangle"
plot.igraph(g)
V(g)$color <- "gold"
V(g)$color[V(g)$country=="India"] <- "springgreen4"
V(g)$color[V(g)$country=="China"] <- "red"
V(g)$color[V(g)$country=="Both"] <- "purple"
plot.igraph(g)
V(g)$label.color <- "blue"
V(g)$label.color[V(g)$year==1] <- "black"
plot.igraph(g)
fc <- cluster_fast_greedy(as.undirected(g))
print(modularity(fc))
membership(fc)
V(g)$cluster <- membership(fc)
length(fc)
sizes(fc)
par(mar=c(1,1,1,1))
plot_dendrogram(fc, palette = rainbow(7), main="Social Network Cluster Dendrogram")
##################################################################################
#
# Visualizing Social Network Structures
# use ist719NetworkObjects.rda
#
##################################################################################
my.dir <- "C:\\Users\\rkrishnan\\Documents\\01 Personal\\MS\\IST719\\Data\\"
load(paste0(my.dir,"ist719networkobject.rda"))
par(mar = c(0,0,0,0))
plot.igraph(g)
l <- layout_in_circle(g)
V(g)$x <- l[,1]
V(g)$y <- l[,2]
plot.igraph(g)
l <- layout_with_fr(g)
V(g)$x <- l[,1]
V(g)$y <- l[,2]
plot.igraph(g)
l <- layout_as_star(g, center = "LeelaDeshmukh")
V(g)$x <- l[,1]
V(g)$y <- l[,2]
plot.igraph(g)
E(g)$color <- "gray"
E(g)[from("LeelaDeshmukh")]$color <- "red"
l <- layout_as_star(g, center = "LeelaDeshmukh")
plot.igraph(g)
l <- layout_with_kk(g)
V(g)$x <- l[,1]
V(g)$y <- l[,2]
plot.igraph(g)
|
13f4cf4312f5c57a41d8501dee06ba47b660fc30
|
f3ca0a4a2391f3e226b14b54f367a9797fe2d275
|
/man/a_in_mem_asset.Rd
|
3e41c6a2320a92feab94cf42d57f6e2d8a3ccc1d
|
[
"MIT"
] |
permissive
|
Rukshani/r2vr
|
a8b9903f5876f9d679824b27e3376a514695a26c
|
8d5e9630eb7538121f01951045e174ec235b043c
|
refs/heads/master
| 2023-07-10T12:43:30.438378
| 2021-03-28T07:22:43
| 2021-03-28T07:22:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,544
|
rd
|
a_in_mem_asset.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/in_mem_asset.R
\name{a_in_mem_asset}
\alias{a_in_mem_asset}
\title{a_in_mem_asset()}
\usage{
a_in_mem_asset(.data, src, .parts = NULL, ...)
}
\arguments{
\item{.data}{a string containing file content or a list of such strings.}
\item{src}{a realistic path to a notional file. The path is used to set the
route the used by the scene server for the in memory asset. The file
extension of the is used to set the HTTP content header mime type.}
\item{.parts}{additional notional files referenced in the content of `data`.
Unlike 'src' the names used here matter, e.g. if the 'src' file is a model
that references textures, those textures need to be mapped by relative
path inside `src` the paths provided for `parts` must be compatible with
these. When `data` is a list, elements after the first are assumed to be
the in-memory content of `parts`, matched by list position.}
\item{...}{additional parameters passed to `a_asset()`}
}
\value{
an asset object.
}
\description{
Create an A-Frame asset hosted in R's memory.
}
\details{
To understand this object you will almost certainly need to familiarise yourself with its base class 'A_Asset' first. See \code{\link{a_asset}}.
This function is used to create a special kind of A-Frame asset that is only
notionally backed by a file. The primary use of this is for passing CRAN's
vignette checks, testing, and demonstration. From the end-user's perspective
an in-memory-asset appears exactly like a regular asset file. However the
scene creates a route to `src` and `parts` that reference the contents of
`data` rather than files specified by the paths in `src` and `parts`.
An example usecase: Serving a JSON transform of an built-in R dataframe as an
asset without writing an intermediate JSON file to disk.
It is still necessary to supply a 'real looking' path to the asset. The path must be
relative to the current R working directory, but other than that is doesn't
matter what it is. The most important aspect of this notional path is the
file extension, since this is used to determine the HTTP mime-type header
when the asset is served.
`data` is either a length one character vector or a list of length one
character vectors. It must have the same length as the number of paths
supplied in `src` + `parts`. The character vectors are strings that contain
the entire contents of a notional file. For non-text files they would need
to contain the equivalent encoded text of calling readr::read_file_raw().
}
|
b55943068522360e5e8e410ae14e5c8c55150830
|
f4296a6ce342a35f15b51b0555ea000e10414391
|
/05 Analysis Function Calculate Predicted Building.R
|
01dee41d859b1b9bb73e2d875469652a31459738
|
[] |
no_license
|
ReneVanDijkBDH/Ubiqum_wifi
|
23ab924ccd9c8b88dc7dec57144c76de52b6369c
|
6c6419bc56d809c5aadece96f67841a952d8b12f
|
refs/heads/master
| 2022-02-22T03:46:24.335269
| 2019-10-23T14:59:08
| 2019-10-23T14:59:08
| 212,550,364
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,702
|
r
|
05 Analysis Function Calculate Predicted Building.R
|
#########################################################################
## Author: René van Dijk
## Creation: 10-10-2019
## Purpose: calculates the expected building based on predicted Longitude & latitude:
## 1) Add predicted Longitude and Latitude to testing set
## 2) Calculate errors compared to actual values
## 3) Calculate expected building: based on predicted Long & Lat.
## uses a (linear) formula to determine location of buildings
## 4) Calculate error of predicted building vs actual building
#########################################################################
CalculatePredictedBuilding <- function(testing, PredictLong, PredictLat){
CompareResults <- testing
CompareResults$PredictLong <- PredictLong
CompareResults$PredictLat <- PredictLat
CompareResults$LongError <- with(CompareResults,LONGITUDE - PredictLong)
CompareResults$LatError <- with(CompareResults,LATITUDE - PredictLat)
CompareResults$PredictBuilding <- with(CompareResults,
ifelse(PredictLat > 4876350 + PredictLong*1.506,
0,
ifelse(PredictLat >4876000 + PredictLong* 1.506,
1,
2)
)
)
CompareResults$BuildingError <- with(CompareResults,
ifelse(BUILDINGID==PredictBuilding,0,1))
return(CompareResults)
}
|
0b4a5ed801a2bff3c1ca3bfc87ab60fa1925283b
|
604f064ac46806c8aaba71afc42fc6f8f6d2a1e0
|
/tests/testthat/test-getLocations.R
|
873d4b073d1897b2f61641314578e6ed792ea185
|
[
"MIT"
] |
permissive
|
johndharrison/webpagetestr
|
d2278dcf271dbe7f800e95569829fff045689d43
|
48ba992bfa4ebf3edf6b8767160c1a2e92f65595
|
refs/heads/master
| 2021-01-13T10:26:49.749120
| 2016-11-08T16:34:32
| 2016-11-08T16:34:32
| 72,283,586
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 938
|
r
|
test-getLocations.R
|
context("getLocations")
test_that("canGetLocations", {
WPT <- server()
locs <- getLocations(WPT)
expect_s3_class(locs, "locations")
})
test_that("errorOnInvalidRequestIdGetLocations", {
WPT <- server()
expect_error(getLocations(WPT, 1), "requestId is not a string or null")
})
test_that("canGetErrorFromGetLocations", {
WPT <- server()
with_mock(
`httr::POST` = function(...){
list(statusCode = 400L,
statusText = "Some location error")},
`httr::stop_for_status` = function(...){},
`httr::content` = function(x){x},
expect_error(
locs <- getLocations(WPT), "Server returned a statusCode = 400"
)
)
})
test_that("canDryRunGetLocations", {
WPT <- server()
dr <- getLocations(WPT, "wwww", dryRun = TRUE)
expect_identical(dr, paste0(WPT[["url"]],
"/getLocations.php?r=wwww&k=",
Sys.getenv("WPTKEY"), "&f=json"))
})
|
f0e66f008ae6345948398b0a8af2e721aa41fce5
|
705255987191f8df33b8c2a007374f8492634d03
|
/examples/Rules-class-NextBestMinDist.R
|
cf61db54aa62fd25bc103dbfb6b8681a393e80a0
|
[] |
no_license
|
Roche/crmPack
|
be9fcd9d223194f8f0e211616c8b986c79245062
|
3d897fcbfa5c3bb8381da4e94eb5e4fbd7f573a4
|
refs/heads/main
| 2023-09-05T09:59:03.781661
| 2023-08-30T09:47:20
| 2023-08-30T09:47:20
| 140,841,087
| 24
| 9
| null | 2023-09-14T16:04:51
| 2018-07-13T11:51:52
|
HTML
|
UTF-8
|
R
| false
| false
| 176
|
r
|
Rules-class-NextBestMinDist.R
|
# In the example below, the MTD is defined as the dose with the toxicity rate
# with minimal distance to the target of 30%.
next_best_min_dist <- NextBestMinDist(target = 0.3)
|
2305596394bd887bfce2b70499d64d51bc1ebf31
|
48f83c8d49a4abe93213daf2068f27180633a9d3
|
/tests/testthat/test-predict_asymmetry.R
|
c631dc91eaea8949dfc5ab863566b4632e467d7b
|
[] |
no_license
|
LCBC-UiO/BayesianLaterality
|
71a051964d1f93cdd4f821fc3188b76e400c3cf7
|
4ed929c363995a7b68240db199d823e88fdf852b
|
refs/heads/master
| 2023-07-03T09:34:41.426732
| 2021-08-08T20:06:05
| 2021-08-08T20:06:05
| 171,873,953
| 0
| 0
| null | 2021-08-08T20:06:05
| 2019-02-21T13:10:52
|
R
|
UTF-8
|
R
| false
| false
| 210
|
r
|
test-predict_asymmetry.R
|
data <- dplyr::tibble(
listening = c(-20),
handedness = "left",
stringsAsFactors = FALSE
)
df <- predict_dominance(data)
expect_equal(
df$probability,
c(0.476309298919027, 0.523690701080972, 0)
)
|
184300e0084455882208d221a975281e4c2a4ff9
|
b1d7cd1c5f99d510c03ef8f792f5e59a86e82292
|
/instalaPaquetes.R
|
790b1fae15236663a07e9ad9904ae4bfe1209363
|
[] |
no_license
|
diegoosorio/iData
|
9322f7b88bec3e85b96a2161d9ce8f5cdd9810ee
|
ee3f56a5f1fc82d4142cd0cc33f7f5fc0d0ad748
|
refs/heads/master
| 2020-04-05T14:30:25.641130
| 2018-11-15T01:36:12
| 2018-11-15T01:36:12
| 156,932,534
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
R
| false
| false
| 1,663
|
r
|
instalaPaquetes.R
|
# -----------------------------------------------------------------
# Instalación de paquetes necesarios para el curso
# -----------------------------------------------------------------
# Función para verificar si un paquete está instalado o no
is.installed <- function(paquete) is.element(
paquete, installed.packages())
is.installed('data.table')
is.installed('DataExplorer')
is.installed('dplyr')
is.installed('anytime')
if(!is.installed('DataExplorer'))
install.packages("DataExplorer")
if(!is.installed('data.table'))
install.packages("data.table")
if(!is.installed('sos'))
install.packages("sos")
if(!is.installed('XLConnect'))
install.packages('XLConnect')
if(!is.installed('xlsx'))
install.packages('xlsx')
if(!is.installed('foreign'))
install.packages('foreign')
if(!is.installed('RWeka'))
install.packages('RWeka')
if(!is.installed('RCurl'))
install.packages('RCurl')
if(!is.installed('Hmisc'))
install.packages('Hmisc')
if(!is.installed('ellipse'))
install.packages('ellipse')
if(!is.installed('animation'))
install.packages('animation')
if(!is.installed('ggplot2'))
install.packages('ggplot2')
if(!is.installed('circlize'))
install.packages('circlize')
if(!is.installed('fmsb'))
install.packages('fmsb')
if(!is.installed('scatterplot3d'))
install.packages('scatterplot3d')
if(!is.installed('lattice'))
install.packages('lattice')
if(!is.installed('TurtleGraphics'))
install.packages('TurtleGraphics')
if(!is.installed('xtable'))
install.packages('xtable')
if(!is.installed('texreg'))
install.packages('texreg')
|
63ecb4378db05f0652e6674c5c01c83033f5755a
|
7aa1bc3dedd865bf833294c63e578d72392f7cc8
|
/deaPDF.R
|
ca61087aed404a018d010b5226eec879bb42c89b
|
[] |
no_license
|
SophiaLC/KSSP_Data_Quality
|
8724a1544b285c4aceeb510f829aadc1d5957499
|
cd9ec42d5167a49449b457d2f6858e02faeb144c
|
refs/heads/master
| 2020-03-22T09:51:36.959338
| 2018-09-13T19:30:54
| 2018-09-13T19:30:54
| 139,865,192
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 26,128
|
r
|
deaPDF.R
|
#### prep ####
setwd("~/mhwii/misc")
library(tabulizer)
library(dplyr)
library(tidyr)
library(magrittr)
library(stringr)
#### notes ####
# not including state totals, because that is at a different level of analysis than zip
# the different matrices on different pages get read in differently, so I had to change the code slightly for every page
# thus, this code is not as readily applicable to another .pdf or state, but the skeleton here can be adapted
#### pg. 7, amphetamine ####
pg7 <- extract_tables("report_yr_2016.pdf", pages=7) # read pdf into matrix
pg7 <- pg7[[1]][c(1:19),-c(1,3,6)] # getting just kansas
pg7 <- as.data.frame(pg7) # convert to data frame
colnames(pg7) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg7$drug <- c("amphetamine") # enter drug type name
pg7[,c(2:6)] <- lapply(pg7[,c(2:6)], function(x) str_replace_all(x, ",", "")) # remove commas
pg7 <- gather(pg7, quarter, grams, q1:total) # convert to long format
#### pg. 23, dl-methamphetamine ####
pg23 <- extract_tables("report_yr_2016.pdf", pages=23) # read pdf into matrix
pg23 <- pg23[[2]][c(6,7),-c(1,3,5,7,9,11)] # getting just kansas
pg23 <- as.data.frame(pg23) # convert to data frame
colnames(pg23) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg23$drug <- c("dl-methamphetamine") # enter drug type name
pg23 <- gather(pg23, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(pg7, pg23) # combine data thus far
#### pg. 32, d-methamphetamine ####
pg32 <- extract_tables("report_yr_2016.pdf", pages=32) # read pdf into matrix
pg32 <- pg32[[2]][c(7:14),c(2,4,6,8,9,12)] # getting just kansas
pg32 <- as.data.frame(pg32) # convert to data frame
colnames(pg32) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg32$drug <- c("d-methamphetamine") # enter drug type name
pg32 <- gather(pg32, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg32) # combine data thus far
#### pg. 46, lisdexamfetamine ####
pg46 <- extract_tables("report_yr_2016.pdf", pages=46) # read pdf into matrix
pg46 <- pg46[[1]][c(46:57),-c(1,3,6)] # getting just kansas
pg46 <- as.data.frame(pg46) # convert to data frame
colnames(pg46) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg46$drug <- c("lisdexamfetamine") # enter drug type name
pg46[,c(2:6)] <- lapply(pg46[,c(2:6)], function(x) str_replace_all(x, ",", "")) # remove commas
pg46 <- gather(pg46, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg46) # combine data thus far
#### pg. 47, lisdexamfetamine ####
pg47 <- extract_tables("report_yr_2016.pdf", pages=47) # read pdf into matrix
pg47 <- pg47[[1]][c(1:7),-c(1)] # getting just kansas
pg47 <- as.data.frame(pg47) # convert to data frame
colnames(pg47) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg47$drug <- c("lisdexamfetamine") # enter drug type name
pg47[,c(2:6)] <- lapply(pg47[,c(2:6)], function(x) str_replace_all(x, ",", "")) # remove commas
pg47 <- gather(pg47, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg47) # combine data thus far
#### pg. 66, methylphenidate ####
pg66 <- extract_tables("report_yr_2016.pdf", pages=66) # read pdf into matrix
pg66 <- pg66[[1]][c(29:45),-c(1,3,6)] # getting just kansas
pg66 <- as.data.frame(pg66) # convert to data frame
colnames(pg66) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg66$drug <- c("methylphenidate") # enter drug type name
pg66[,c(2:6)] <- lapply(pg66[,c(2:6)], function(x) str_replace_all(x, ",", "")) # remove commas
pg66 <- gather(pg66, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg66) # combine data thus far
#### pg. 66b, methylphenidate ####
pg66 <- extract_tables("report_yr_2016.pdf", pages=66) # read pdf into matrix
pg66 <- pg66[[2]][c(1,2),-c(1,3,6)] # getting just kansas
pg66 <- as.data.frame(pg66) # convert to data frame
colnames(pg66) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg66$drug <- c("methylphenidate") # enter drug type name
pg66[,c(2:6)] <- lapply(pg66[,c(2:6)], function(x) str_replace_all(x, ",", "")) # remove commas
pg66 <- gather(pg66, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg66) # combine data thus far
#### pg. 84, barbaturicacid ####
pg84 <- extract_tables("report_yr_2016.pdf", pages=84) # read pdf into matrix
pg84 <- pg84[[2]][c(32:38),c(2,4,6,8,10,12)] # getting just kansas
pg84 <- as.data.frame(pg84) # convert to data frame
colnames(pg84) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg84$drug <- c("barbaturicacid") # enter drug type name
pg84 <- gather(pg84, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg84) # combine data thus far
#### pg. 92, amobarbital ####
pg92 <- extract_tables("report_yr_2016.pdf", pages=92) # read pdf into matrix
pg92 <- pg92[[2]][c(10,11),c(2,4,6,8,10,12)] # getting just kansas
pg92 <- as.data.frame(pg92) # convert to data frame
colnames(pg92) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg92$drug <- c("amobarbital") # enter drug type name
pg92 <- gather(pg92, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg92) # combine data thus far
#### pg. 102, butalbital ####
pg102 <- extract_tables("report_yr_2016.pdf", pages=102) # read pdf into matrix
pg102 <- pg102[[1]][c(7:22),c(2,4,6,8,9,11)] # getting just kansas
pg102 <- as.data.frame(pg102) # convert to data frame
colnames(pg102) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg102$drug <- c("butalbital") # enter drug type name
pg102[,c(2:6)] <- lapply(pg102[,c(2:6)], function(x) str_replace_all(x, ",", "")) # remove commas
pg102 <- gather(pg102, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg102) # combine data thus far
#### pg. 102b, butalbital ####
pg102 <- extract_tables("report_yr_2016.pdf", pages=102) # read pdf into matrix
pg102 <- pg102[[2]][c(1:3),c(2,4,6,8,9,11)] # getting just kansas
pg102 <- as.data.frame(pg102) # convert to data frame
colnames(pg102) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg102$drug <- c("butalbital") # enter drug type name
pg102[,c(2:6)] <- lapply(pg102[,c(2:6)], function(x) str_replace_all(x, ",", "")) # remove commas
pg102 <- gather(pg102, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg102) # combine data thus far
#### pg. 121, pentobarbital ####
pg121 <- extract_tables("report_yr_2016.pdf", pages=121) # read pdf into matrix
pg121 <- pg121[[1]][c(23:41),-c(1,3,6)] # getting just kansas
pg121 <- as.data.frame(pg121) # convert to data frame
colnames(pg121) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg121$drug <- c("pentobarbital") # enter drug type name
pg121[,c(2:6)] <- lapply(pg121[,c(2:6)], function(x) str_replace_all(x, ",", "")) # remove commas
pg121 <- gather(pg121, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg121) # combine data thus far
#### pg. 135, secobarbital ####
pg135 <- extract_tables("report_yr_2016.pdf", pages=135) # read pdf into matrix
pg135 <- pg135[[2]][c(13:15),c(2,4,6,8,10,12)] # getting just kansas
pg135 <- as.data.frame(pg135) # convert to data frame
colnames(pg135) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg135$drug <- c("secobarbital") # enter drug type name
pg135 <- gather(pg135, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg135) # combine data thus far
#### pg. 147, cocaine ####
pg147 <- extract_tables("report_yr_2016.pdf", pages=147) # read pdf into matrix
pg147 <- pg147[[2]][c(25:41),-c(1,3,6,9)] # getting just kansas
pg147 <- as.data.frame(pg147) # convert to data frame
colnames(pg147) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg147$drug <- c("cocaine") # enter drug type name
pg147[,c(2:6)] <- lapply(pg147[,c(2:6)], function(x) str_replace_all(x, ",", "")) # remove commas
pg147 <- gather(pg147, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg147) # combine data thus far
#### pg. 148, cocaine ####
pg148 <- extract_tables("report_yr_2016.pdf", pages=148) # read pdf into matrix
pg148 <- pg148[[1]][c(1:2),-c(1,3,6,9)] # getting just kansas
pg148 <- as.data.frame(pg148) # convert to data frame
colnames(pg148) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg148 <- pg148[-2,]
pg148$drug <- c("cocaine") # enter drug type name
pg148 <- gather(pg148, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg148) # combine data thus far
#### pg. 166, codeine ####
pg166 <- extract_tables("report_yr_2016.pdf", pages=166) # read pdf into matrix
pg166 <- pg166[[1]][c(1:19),-c(1,3,6)] # getting just kansas
pg166 <- as.data.frame(pg166) # convert to data frame
colnames(pg166) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg166$drug <- c("codeine") # enter drug type name
pg166[,c(2:6)] <- lapply(pg166[,c(2:6)], function(x) str_replace_all(x, ",", "")) # remove commas
pg166 <- gather(pg166, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg166) # combine data thus far
#### pg. 185, buprenorphine ####
pg185 <- extract_tables("report_yr_2016.pdf", pages=185) # read pdf into matrix
pg185 <- pg185[[2]][c(17:32),-c(1,3,6)] # getting just kansas
pg185 <- as.data.frame(pg185) # convert to data frame
colnames(pg185) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg185$drug <- c("buprenorphine") # enter drug type name
pg185[,c(2:6)] <- lapply(pg185[,c(2:6)], function(x) str_replace_all(x, ",", "")) # remove commas
pg185 <- gather(pg185, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg185) # combine data thus far
#### pg. 202, dihydrocodeine ####
pg202 <- extract_tables("report_yr_2016.pdf", pages=202) # read pdf into matrix
pg202 <- pg202[[2]][c(9,10),c(2,4,6,8,10,12)] # getting just kansas
pg202 <- as.data.frame(pg202) # convert to data frame
colnames(pg202) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg202$drug <- c("dihydrocodeine") # enter drug type name
pg202 <- gather(pg202, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg202) # combine data thus far
#### pg. 215 oxycodone ####
pg215 <- extract_tables("report_yr_2016.pdf", pages=215) # read pdf into matrix
pg215 <- pg215[[1]][c(6:12),-c(1,3,6)] # getting just kansas
pg215 <- as.data.frame(pg215) # convert to data frame
colnames(pg215) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg215$drug <- c("oxycodone") # enter drug type name
pg215[,c(2:6)] <- lapply(pg215[,c(2:6)], function(x) str_replace_all(x, ",", "")) # remove commas
pg215 <- gather(pg215, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg215) # combine data thus far
#### pg. 215b oxycodone ####
pg215 <- extract_tables("report_yr_2016.pdf", pages=215) # read pdf into matrix
pg215 <- pg215[[2]][c(1:12),-c(1,3,6)] # getting just kansas
pg215 <- as.data.frame(pg215) # convert to data frame
colnames(pg215) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg215$drug <- c("oxycodone") # enter drug type name
pg215[,c(2:6)] <- lapply(pg215[,c(2:6)], function(x) str_replace_all(x, ",", "")) # remove commas
pg215 <- gather(pg215, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg215) # combine data thus far
#### pg. 234 hydromorphone ####
pg234 <- extract_tables("report_yr_2016.pdf", pages=234) # read pdf into matrix
pg234 <- pg234[[1]][c(44:50),-c(1,3,6)] # getting just kansas
pg234 <- as.data.frame(pg234) # convert to data frame
colnames(pg234) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg234$drug <- c("hydromorphone") # enter drug type name
pg234[,c(2:6)] <- lapply(pg234[,c(2:6)], function(x) str_replace_all(x, ",", "")) # remove commas
pg234 <- gather(pg234, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg234) # combine data thus far
#### pg. 234b hydromorphone ####
pg234 <- extract_tables("report_yr_2016.pdf", pages=234) # read pdf into matrix
pg234 <- as.data.frame(pg234[[2]]) # convert to data frame
colnames(pg234) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg234$drug <- c("hydromorphone") # enter drug type name
pg234[,c(2:6)] <- lapply(pg234[,c(2:6)], function(x) str_replace_all(x, ",", "")) # remove commas
pg234 <- gather(pg234, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg234) # combine data thus far
#### pg. 235 hydromorphone ####
pg235 <- extract_tables("report_yr_2016.pdf", pages=235) # read pdf into matrix
pg235 <- pg235[[1]][c(1:8),-c(1,3,6)] # getting just kansas
pg235 <- as.data.frame(pg235) # convert to data frame
colnames(pg235) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg235$drug <- c("hydromorphone") # enter drug type name
pg235[,c(2:6)] <- lapply(pg235[,c(2:6)], function(x) str_replace_all(x, ",", "")) # remove commas
pg235 <- gather(pg235, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg235) # combine data thus far
#### pg. 254 hydrocodone ####
pg254 <- extract_tables("report_yr_2016.pdf", pages=254) # read pdf into matrix
pg254 <- pg254[[1]][c(24:30),-c(1,3,6)] # getting just kansas
pg254 <- as.data.frame(pg254) # convert to data frame
colnames(pg254) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg254$drug <- c("hydrocodone") # enter drug type name
pg254[,c(2:6)] <- lapply(pg254[,c(2:6)], function(x) str_replace_all(x, ",", "")) # remove commas
pg254 <- gather(pg254, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg254) # combine data thus far
#### pg. 254b hydrocodone ####
pg254 <- extract_tables("report_yr_2016.pdf", pages=254) # read pdf into matrix
pg254 <- pg254[[2]][c(1:12),-c(1,3,6)] # getting just kansas
pg254 <- as.data.frame(pg254) # convert to data frame
colnames(pg254) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg254$drug <- c("hydrocodone") # enter drug type name
pg254[,c(2:6)] <- lapply(pg254[,c(2:6)], function(x) str_replace_all(x, ",", "")) # remove commas
pg254 <- gather(pg254, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg254) # combine data thus far
#### pg. 270 levorphanol ####
pg270 <- extract_tables("report_yr_2016.pdf", pages=270) # read pdf into matrix
pg270 <- pg270[[2]][c(19:23),c(2,4,6,8,9,12)] # getting just kansas
pg270 <- as.data.frame(pg270) # convert to data frame
colnames(pg270) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg270$drug <- c("levorphanol") # enter drug type name
pg270 <- gather(pg270, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg270) # combine data thus far
#### pg. 282 meperidine ####
pg282 <- extract_tables("report_yr_2016.pdf", pages=282) # read pdf into matrix
pg282 <- pg282[[1]][c(22:38),-c(1,3,6)] # getting just kansas
pg282 <- as.data.frame(pg282) # convert to data frame
colnames(pg282) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg282$drug <- c("meperidine") # enter drug type name
pg282[,c(2:6)] <- lapply(pg282[,c(2:6)], function(x) str_replace_all(x, ",", "")) # remove commas
pg282 <- gather(pg282, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg282) # combine data thus far
#### pg. 301 methadone ####
pg301 <- extract_tables("report_yr_2016.pdf", pages=301) # read pdf into matrix
pg301 <- pg301[[2]][c(22:34),-c(1,3,6)] # getting just kansas
pg301 <- as.data.frame(pg301) # convert to data frame
colnames(pg301) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg301$drug <- c("methadone") # enter drug type name
pg301[,c(2:6)] <- lapply(pg301[,c(2:6)], function(x) str_replace_all(x, ",", "")) # remove commas
pg301 <- gather(pg301, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg301) # combine data thus far
#### pg. 302 methadone ####
pg302 <- extract_tables("report_yr_2016.pdf", pages=302) # read pdf into matrix
pg302 <- pg302[[1]][c(1:6),-c(1,3,6)] # getting just kansas
pg302 <- as.data.frame(pg302) # convert to data frame
colnames(pg302) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg302$drug <- c("methadone") # enter drug type name
pg302[,c(2:6)] <- lapply(pg302[,c(2:6)], function(x) str_replace_all(x, ",", "")) # remove commas
pg302 <- gather(pg302, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg302) # combine data thus far
#### pg. 321 morphine ####
pg321 <- extract_tables("report_yr_2016.pdf", pages=321) # read pdf into matrix
pg321 <- pg321[[1]][c(26:44),-c(1,3,6)] # getting just kansas
pg321 <- as.data.frame(pg321) # convert to data frame
colnames(pg321) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg321$drug <- c("morphine") # enter drug type name
pg321[,c(2:6)] <- lapply(pg321[,c(2:6)], function(x) str_replace_all(x, ",", "")) # remove commas
pg321 <- gather(pg321, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg321) # combine data thus far
#### pg. 336 opium tincture ####
pg336 <- extract_tables("report_yr_2016.pdf", pages=336) # read pdf into matrix
pg336 <- pg336[[1]][c(8,9),c(2,4,6,8,9,11)] # getting just kansas
pg336 <- as.data.frame(pg336) # convert to data frame
colnames(pg336) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg336$drug <- c("opiumtincture") # enter drug type name
pg336 <- gather(pg336, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg336) # combine data thus far
#### pg. 344 opium powdered ####
pg344 <- extract_tables("report_yr_2016.pdf", pages=344) # read pdf into matrix
pg344 <- pg344[[1]][c(33:39),-c(1,3,5,7)] # getting just kansas
pg344 <- as.data.frame(pg344) # convert to data frame
colnames(pg344) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg344$drug <- c("opiumpowdered") # enter drug type name
pg344[,c(2:6)] <- lapply(pg344[,c(2:6)], function(x) str_replace_all(x, ",", "")) # remove commas
pg344 <- gather(pg344, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg344) # combine data thus far
#### pg. 344b opium powdered ####
pg344 <- extract_tables("report_yr_2016.pdf", pages=344) # read pdf into matrix
pg344 <- pg344[[2]][-13,-1] # getting just kansas
pg344 <- as.data.frame(pg344) # convert to data frame
colnames(pg344) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg344$drug <- c("opiumpowdered") # enter drug type name
pg344 <- gather(pg344, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg344) # combine data thus far
#### pg. 363 oxymorphone ####
pg363 <- extract_tables("report_yr_2016.pdf", pages=363) # read pdf into matrix
pg363 <- pg363[[2]][c(5:23),-c(1,3,6)] # getting just kansas
pg363 <- as.data.frame(pg363) # convert to data frame
colnames(pg363) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg363$drug <- c("oxymorphone") # enter drug type name
pg363[,c(2:6)] <- lapply(pg363[,c(2:6)], function(x) str_replace_all(x, ",", "")) # remove commas
pg363 <- gather(pg363, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg363) # combine data thus far
#### pg. 378 noroxymorphone ####
pg378 <- extract_tables("report_yr_2016.pdf", pages=378) # read pdf into matrix
pg378 <- pg378[[1]][c(10,11),c(2,4,6,8,9,11)] # getting just kansas
pg378 <- as.data.frame(pg378) # convert to data frame
colnames(pg378) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg378 <- pg378[-2,]
pg378$drug <- c("noroxymorphone") # enter drug type name
pg378 <- gather(pg378, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg378) # combine data thus far
#### pg. 384 alfentanil ####
pg384 <- extract_tables("report_yr_2016.pdf", pages=384) # read pdf into matrix
pg384 <- pg384[[1]][c(32,33),c(2,4,6,8,10,12)] # getting just kansas
pg384 <- as.data.frame(pg384) # convert to data frame
pg384 <- pg384[-1,]
colnames(pg384) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg384$drug <- c("alfentanil") # enter drug type name
pg384 <- gather(pg384, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg384) # combine data thus far
#### pg. 384b alfentanil ####
pg384 <- extract_tables("report_yr_2016.pdf", pages=384) # read pdf into matrix
pg384 <- pg384[[2]][c(1:6),c(2,4,6,8,10,12)] # getting just kansas
pg384 <- as.data.frame(pg384) # convert to data frame
pg384 <- pg384[-1,]
colnames(pg384) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg384$drug <- c("alfentanil") # enter drug type name
pg384 <- gather(pg384, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg384) # combine data thus far
#### pg. 396 remifentanil ####
pg396 <- extract_tables("report_yr_2016.pdf", pages=396) # read pdf into matrix
pg396 <- pg396[[2]][c(16:26),c(2,4,6,8,10,12)] # getting just kansas
pg396 <- as.data.frame(pg396) # convert to data frame
colnames(pg396) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg396$drug <- c("remifentanil") # enter drug type name
pg396 <- gather(pg396, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg396) # combine data thus far
#### pg. 411 sufentanil base ####
pg411 <- extract_tables("report_yr_2016.pdf", pages=411) # read pdf into matrix
pg411 <- pg411[[1]][c(47:59),c(2,4,6,8,10,12)] # getting just kansas
pg411 <- as.data.frame(pg411) # convert to data frame
colnames(pg411) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg411$drug <- c("sufentanilbase") # enter drug type name
pg411 <- gather(pg411, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg411) # combine data thus far
#### pg. 412 sufentanil base ####
pg412 <- extract_tables("report_yr_2016.pdf", pages=412) # read pdf into matrix
pg412 <- pg412[[1]][c(1:4),c(2,3,6,8,10,12)] # getting just kansas
pg412 <- as.data.frame(pg412) # convert to data frame
colnames(pg412) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg412$drug <- c("sufentanilbase") # enter drug type name
pg412 <- gather(pg412, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg412) # combine data thus far
#### pg. 427 tapentadol ####
pg427 <- extract_tables("report_yr_2016.pdf", pages=427) # read pdf into matrix
pg427 <- pg427[[2]][c(38:51),-c(1,3,6)] # getting just kansas
pg427 <- as.data.frame(pg427) # convert to data frame
colnames(pg427) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg427$drug <- c("tapentadol") # enter drug type name
pg427[,c(2:6)] <- lapply(pg427[,c(2:6)], function(x) str_replace_all(x, ",", "")) # remove commas
pg427 <- gather(pg427, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg427) # combine data thus far
#### pg. 428 tapentadol ####
pg428 <- extract_tables("report_yr_2016.pdf", pages=428) # read pdf into matrix
pg428 <- pg428[[1]][c(1:5),-c(1,3,6)] # getting just kansas
pg428 <- as.data.frame(pg428) # convert to data frame
colnames(pg428) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg428$drug <- c("tapentadol") # enter drug type name
pg428 <- gather(pg428, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg428) # combine data thus far
#### pg. 447 fentanylbase ####
pg447 <- extract_tables("report_yr_2016.pdf", pages=447) # read pdf into matrix
pg447 <- pg447[[1]][c(25:43),-c(1,3,6)] # getting just kansas
pg447 <- as.data.frame(pg447) # convert to data frame
colnames(pg447) <- c("zip", "q1", "q2", "q3", "q4", "total") # change column names
pg447$drug <- c("fentanylbase") # enter drug type name
pg447 <- gather(pg447, quarter, grams, q1:total) # convert to long format
arcos_2016_summary_ks <- bind_rows(arcos_2016_summary_ks, pg447) # combine data thus far
#### all long format ####
write.csv(arcos_2016_summary_ks, "arcos_2016_summary_ks_v1.csv", row.names=FALSE)
d2 <- arcos_2016_summary_ks %>%
spread(quarter, grams)
write.csv(d2, "arcos_2016_summary_ks_v2.csv", row.names=FALSE)
d3 <- arcos_2016_summary_ks %>%
spread(drug, grams)
d3[is.na(d3)] <- 0
write.csv(d3, "arcos_2016_summary_ks_v3.csv", row.names=FALSE)
d4 <- arcos_2016_summary_ks %>%
unite(drug_quarter, c(drug, quarter)) %>%
spread(drug_quarter, grams)
d4[is.na(d4)] <- 0
write.csv(d4, "arcos_2016_summary_ks_v4.csv", row.names=FALSE)
|
6ab8c0aa1ed965d776f44f290eafe9453fb3d30c
|
c5fe243e1c7f01c6217cc15f7f64a955f6561624
|
/R/ph2.r
|
719380abee69bb0780f89883fd99fdfb7b4913a9
|
[] |
no_license
|
cran/probhat
|
79c1ff28e9867565d80f2744e3a84f49aeba5dc6
|
ae21b43b0de331247713e4294acad3aa99c4fdb5
|
refs/heads/master
| 2021-06-09T16:27:06.236867
| 2021-05-12T08:40:02
| 2021-05-12T08:40:02
| 174,553,921
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 932
|
r
|
ph2.r
|
#probhat: Multivariate Generalized Kernel Smoothing and Related Statistical Methods
#Copyright (C), Abby Spurdle, 2019 to 2021
#This program is distributed without any warranty.
#This program is free software.
#You can modify it and/or redistribute it, under the terms of:
#The GNU General Public License, version 2, or (at your option) any later version.
#You should have received a copy of this license, with R.
#Also, this license should be available at:
#https://cran.r-project.org/web/licenses/GPL-2
ph.namesf = function (...) UseMethod ("ph.namesf")
ph.printf = function (...) UseMethod ("ph.printf")
ph.plotf = function (...) UseMethod ("ph.plotf")
ph.linesf = function (...) UseMethod ("ph.linesf")
names.phob = function (x, ...) ph.namesf (x, ...)
print.phob = function (x, ...) ph.printf (x, ...)
plot.phob = function (x, ...) ph.plotf (x, ...)
lines.phob = function (x, ...) ph.linesf (x, ...)
|
d930a99c4ff3a1ff54328b8a7e1106c2168f94ac
|
9f438e0f499907ee53f4f77d13639af1729a0687
|
/plot3.R
|
e4cceb3f9e5484bbaa4cd1926980f4bf7064c69c
|
[] |
no_license
|
ankit76ja/ExData_Plotting1
|
c2b948fa8959ca3e994192a94f0b5dc4b9e0f647
|
b644b4f0be7c68086931d10e2a9eccbfaa75199a
|
refs/heads/master
| 2021-08-10T17:44:30.153899
| 2017-11-12T21:01:48
| 2017-11-12T21:01:48
| 110,467,205
| 0
| 0
| null | 2017-11-12T20:37:19
| 2017-11-12T20:37:19
| null |
UTF-8
|
R
| false
| false
| 477
|
r
|
plot3.R
|
source("read_data.r")
power_data<-read_data()
png("plot3.png",height = 480,width = 480,units = "px")
plot(power_data$Time, power_data$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")
lines(power_data$Time,power_data$Sub_metering_2,col="red")
lines(power_data$Time,power_data$Sub_metering_3,col="blue")
legend("topright",legend = c("sub_metering_1","sub_metering_2","sub_metering_3"),lty = 1,col = c("black","red","blue"),cex = 0.75,box.lty = 0)
dev.off()
|
9d6d6cb80a9889b54dd0ff16a91c93e0bc2a9dbe
|
9271179ea2cac166e527234f9013aa3ef195913b
|
/data-raw/get_fbi_datsa.R
|
e447db7b9183f2df907985ee7ded887cda7fa4ac
|
[] |
no_license
|
mikkelkrogsholm/murderdata
|
0eb25bd680dedaac1002e04ba23ef4d5fb5ba29c
|
86b3842fd647b1e395bb6784b2f87f63e95ebf36
|
refs/heads/master
| 2021-05-06T07:53:54.688980
| 2017-12-18T16:05:31
| 2017-12-18T16:05:31
| 113,975,913
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 382
|
r
|
get_fbi_datsa.R
|
url <- "https://www.dropbox.com/s/xzulxmnzm3mf6bm/SHR.zip?dl=1"
filename <- "data-raw/fbi/fbi_sup.zip"
utils::download.file(url, filename)
unzip(filename, exdir = "data-raw/fbi/fbi_sup")
url <- "https://www.dropbox.com/s/mdonovdamlppf80/ReturnA.zip?dl=1"
filename <- "data-raw/fbi/fbi_all.zip"
utils::download.file(url, filename)
unzip(filename, exdir = "data-raw/fbi/fbi_all")
|
27e4c09f32aaa7413fa98a34ff34b771a05c3e51
|
09f489b818406f56e28f544d566121e5a2c1be2c
|
/init.R
|
19f494731ec9ce32807f14fcd8e675669fc85fce
|
[] |
no_license
|
Martien1973/rAHNextract
|
e477e998509912c217061902548a3c0e9aadf332
|
696d66ec58d5e15eb076623ccd2bac7bd5d46285
|
refs/heads/master
| 2021-01-04T10:21:28.198351
| 2020-02-14T08:22:17
| 2020-02-14T08:22:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 121
|
r
|
init.R
|
my_init <- function(){
library(raster)
library(rgdal)
library(sp)
library(sf)
library(data.table)
}
my_init()
|
672f524843b837654ff277f6779793d3650c0607
|
c2497a475ceb8fead3eea2432876d0e6ca6c1e9f
|
/AgeMixing/plotGroupMixingPairs.R
|
af7dc1b68f96299d9db5fb546705635f0f4c7750
|
[] |
no_license
|
nicfel/FluBaselPhylo
|
bb64f70f70ef6ada41f5b55aec5fbbbb50302b9d
|
10bf47725bab230324037f1f814f1d98ea1cf699
|
refs/heads/master
| 2022-02-12T00:27:06.621190
| 2019-08-14T08:36:14
| 2019-08-14T08:36:14
| 192,312,417
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,007
|
r
|
plotGroupMixingPairs.R
|
######################################################
######################################################
# Here the inferred mean coalescent and migration
# rate ratios are plotted
######################################################
######################################################
library(ggplot2)
# clear workspace
rm(list = ls())
# Set the directory to the directory of the file
this.dir <- dirname(parent.frame(2)$ofile)
setwd(this.dir)
# use the matlab standard colors to plot
col0 <- rgb(red=0.0, green=0.4470,blue=0.7410)
col1 <- rgb(red=0.8500, green=0.3250,blue=0.0980)
col2 <- rgb(red=0.9290, green=0.6940,blue=0.1250)
col4 <- rgb(red=0.4660, green=0.6740,blue=0.1880)
col3 <- rgb(red=0.3010, green=0.7450,blue=0.9330)
# read in the cutoff values
filevals <- read.table("./out/file_values_group_mixing_pairs.csv", header = T, sep=",",check.names = F)
tic.labels = c("pre-school", "school","adults unknown","adults with kids","adults without kids","elderly")
# set and upper or lower limit for the association score (just for plotting reasons)
lim=2
# do the same plotting in a grid style tough
int_size <- unique(filevals$interval_size)
cut_offs <- unique(filevals$upper)
p <- list()
for (b in seq(1,length(cut_offs))){
ind <- intersect(which(filevals$upper==cut_offs[b]),which(filevals$interval_size==int_size[1]))
t = read.table(file=as.character(filevals[ind,"filename"]), header = T, sep=",",check.names = F)
# t$percentile[which(t$percentile < 1.3010 & t$percentile > -0.5)] <- 0
# set everything above the limiit to be the limit (due to color gradients)
t$percentile[which(t$percentile< -lim)] <- -lim
t$percentile[which(t$percentile> lim)] <- lim
breaks = unique(t$from)
ylabelstext=breaks[seq(1,length(breaks),1)]
reverse_order = c("006_007", "005_006","004_005","003_004","002_003","001_002")
# remove a part of the matrix
for (i in seq(1,length(ylabelstext))){
for (j in seq(1,length(ylabelstext))){
if (j>i){
t[which(t$from==ylabelstext[[i]] & t$to==ylabelstext[[j]]),]$percentile = NA
}
}
}
p[[b]] <- ggplot() +
geom_tile(data=t, aes(ordered(to, levels=ylabelstext), ordered(from, levels=reverse_order), fill = percentile)) +
scale_fill_gradientn(na.value="white", colors = c("#88419d","#b3cde3", rgb(1,1,1), "#fdcc8a","#d7301f"),
limits = c(-lim,lim),
breaks = c(-2, -1, 0, 1, 2),
name="Association",
label=c(expression(paste("p" <= 0.01, sep="")),
"p=0.1",
"",
"p=0.1",
expression(paste("p" <= 0.01, sep=""))
)) +
scale_x_discrete(breaks=ylabelstext, labels=tic.labels)+
scale_y_discrete(breaks=ylabelstext, labels=tic.labels)+
ylab("") +
xlab("") +
theme_minimal()+
theme(
# axis.ticks.x=element_blank(),
# axis.text.x=element_blank(),
axis.text.x = element_text(angle = 45, hjust = 1),
axis.title=element_text(size=14,face="bold"),
legend.position = "none") +ggtitle(paste("cutoff value =", cut_offs[b], "years" ))
}
g_legend <- function(a.gplot){
tmp <- ggplot_gtable(ggplot_build(a.gplot))
leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
return(legend)}
legend.plot <-p[[5]] + theme(legend.position="right"
)
legend <- g_legend(legend.plot)
p[[6]] = legend
require(grid)
require(gridExtra)
plot <- do.call("grid.arrange",c(p, ncol=3))
ggsave(plot=plot,paste("/Users/nicmuell/Documents/github/BaselFlu-Text/Figures/Supplement/Group_Mixing_pairs_all.pdf", sep=""),width=10, height=8)
ggsave(plot=plot,paste("/Users/nicmuell/Documents/github/BaselFlu-Text/Figures/Supplement/Group_Mixing_pairs_all.png", sep=""),width=10, height=8)
# make the plot for interval size 10 years and cutoff 0.1 yeasr seperately
ind <- intersect(which(filevals$upper==0.1),which(filevals$interval_size==1))
t = read.table(file=as.character(filevals[ind,"filename"]), header = T, sep=",",check.names = F)
# t$percentile[which(t$percentile < 1.3010 & t$percentile > -1.3010)] <- 0
# set everything above the limiit to be the limit (due to color gradients)
t$percentile[which(t$percentile< -lim)] <- -lim
t$percentile[which(t$percentile> lim)] <- lim
breaks = unique(t$from)
ylabelstext=breaks[seq(1,length(breaks),1)]
# remove a part of the matrix
for (i in seq(1,length(ylabelstext))){
for (j in seq(1,length(ylabelstext))){
if (j>i){
t[which(t$from==ylabelstext[[i]] & t$to==ylabelstext[[j]]),]$percentile = NA
}
}
}
p <- ggplot() +
geom_tile(data=t, aes(ordered(to, levels=ylabelstext), ordered(from, levels=reverse_order), fill = percentile)) +
scale_fill_gradientn(na.value="white", colors = c("#88419d","#b3cde3", rgb(1,1,1), "#fdcc8a","#d7301f"),
limits = c(-lim,lim),
breaks = c(-2, -1, 0, 1, 2),
name="Association",
label=c(expression(paste("p" <= 0.01, sep="")),
"p=0.1",
"",
"p=0.1",
expression(paste("p" <= 0.01, sep=""))
)) +
scale_x_discrete(breaks=ylabelstext, labels=tic.labels)+
scale_y_discrete(breaks=ylabelstext, labels=tic.labels)+
ylab("") +
xlab("") +
theme_minimal()+
theme(
axis.text.x = element_text(angle = 45, hjust = 1),
axis.title=element_text(size=14,face="bold"))
# + theme(legend.position="none")
plot(p)
ggsave(plot=p,paste("/Users/nicmuell/Documents/github/BaselFlu-Text/Figures/Age/Group_Mixing_pairs.pdf", sep=""),width=6, height=5)
ggsave(plot=p,paste("/Users/nicmuell/Documents/github/BaselFlu-Text/Figures/Age/Group_Mixing_pairs.png", sep=""),width=6, height=5)
|
bf2f363f8b10aa2b708e5dec9041c1b275ae9ad6
|
0768cca65ac0cbda2096577e7156090ebf73dd08
|
/KNN.R
|
ae3ca3749bc7f10355113aa140e652855f18d9f9
|
[] |
no_license
|
bilsko/ML
|
0281616da5d8dc1a5a76ff454eb4b8123a6d545f
|
5cab16e2a25a14dfea8c077d526acd426912a60c
|
refs/heads/master
| 2020-04-14T22:46:09.088344
| 2019-01-05T03:13:25
| 2019-01-05T03:13:25
| 164,177,112
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 798
|
r
|
KNN.R
|
library(ISLR)
purchase<- Caravan[,86]
standardized.Caravan <- scale(Caravan[,-86])
test.index<- 1:1000
test.data<- standardized.Caravan[test.index,]
test.purchase<- purchase[test.index]
train.data <- standardized.Caravan[-test.index,]
train.purchase<-purchase[-test.index]
#KNN model
library(class)
set.seed(101)
predicted.purchase<- knn(train.data,test.data,train.purchase,k=5)
print(head(predicted.purchase))
mean(test.purchase != predicted.purchase)
predicted.purchase = NULL
error.rate = NULL
for(i in 1:20){
set.seed(101)
predicted.purchase = knn(train.data,test.data,train.purchase,k=i)
error.rate[i] = mean(test.purchase != predicted.purchase)
}
print(error.rate)
library(ggplot2)
k.values<- 1:20
error.df<- data.frame(error.rate,k.values)
error.df
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.