blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
327
content_id
stringlengths
40
40
detected_licenses
listlengths
0
91
license_type
stringclasses
2 values
repo_name
stringlengths
5
134
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
46 values
visit_date
timestamp[us]date
2016-08-02 22:44:29
2023-09-06 08:39:28
revision_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
committer_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
github_id
int64
19.4k
671M
star_events_count
int64
0
40k
fork_events_count
int64
0
32.4k
gha_license_id
stringclasses
14 values
gha_event_created_at
timestamp[us]date
2012-06-21 16:39:19
2023-09-14 21:52:42
gha_created_at
timestamp[us]date
2008-05-25 01:21:32
2023-06-28 13:19:12
gha_language
stringclasses
60 values
src_encoding
stringclasses
24 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
7
9.18M
extension
stringclasses
20 values
filename
stringlengths
1
141
content
stringlengths
7
9.18M
751733160e37e333ed1204d70f24113ac562c751
83b0b99eab88998bdd24ad7671feaeb9f586d0a4
/man/ggpreview.Rd
02f9bc0a54735716f46ef32ac07384a88a6c2356
[]
no_license
GuangchuangYu/ggimage
d3b8a2ada540d500436879f6e6b6ab279d25ad50
6aea2a64ba12f3268040f87817154ac9675e2df0
refs/heads/master
2023-06-30T15:02:43.149063
2023-06-19T03:36:47
2023-06-19T03:36:47
82,661,218
165
38
null
2023-06-19T03:33:09
2017-02-21T09:29:32
R
UTF-8
R
false
true
734
rd
ggpreview.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ggpreview.R \name{ggpreview} \alias{ggpreview} \title{ggpreview} \usage{ ggpreview( filename = NULL, plot = last_plot(), width = NA, height = NA, units = "in", ... ) } \arguments{ \item{filename}{If it is not NULL, the previewed figure will be save to the file} \item{plot}{any plot that supported by the 'ggplotify' package} \item{width}{width of the figure} \item{height}{height of the figure} \item{units}{units of the 'width' and 'height'} \item{...}{additional parameters pass to ggsave() if filename is not NULL} } \value{ a preview of the figure } \description{ preview a plot befor saving it to a file. } \author{ Guangchuang Yu }
a1e0cced52d83596e35f4a1a64367be86582aefd
708f744bc98651fd3f78f2d59307509118c16879
/RKEEL/man/writeDatFromDataframe.Rd
14af8fed46db09f4ce275c0fe507efb66b75fbec
[]
no_license
i02momuj/RKEEL
726efa0409193a1ebc6ff82ef195e2708f3fa397
445cd8cceade2316bc12d40406c7c1248e2daeaa
refs/heads/master
2021-01-10T10:13:36.242589
2019-07-19T07:43:32
2019-07-19T07:43:32
49,633,299
6
1
null
null
null
null
UTF-8
R
false
false
450
rd
writeDatFromDataframe.Rd
\docType{methods} \name{writeDatFromDataframe} \alias{writeDatFromDataframe} \title{Write .dat from data.frame} \description{ Method for writing a .dat dataset file in KEEL format given a data.frame dataset } \usage{ writeDatFromDataframe(data, fileName) } \arguments{ \item{data}{data.frame dataset} \item{fileName}{String with the file name to store the dataset} } \examples{ #data(iris) #writeDatFromDataframe(iris, "iris.dat") } \keyword{utils}
316457a6b7f9825706e1c490e94017da76980b20
ff70b276b8ac674c4f0aa3771ade4ae1d063d1a0
/R/createSMDplot.R
a107d4d7d95bf57dc6f92c177b28e11ccc54c164
[]
no_license
lhenneman/sourceOrientedApproach
f7e4bf1368db44f40aeffe614bd920a0ccde40e4
1d4bdf01e0dd2464c3bbeb04e09eb73d985ef00a
refs/heads/master
2020-12-30T08:53:21.704462
2020-03-22T16:35:14
2020-03-22T16:35:14
238,937,456
0
1
null
2020-02-07T14:04:22
2020-02-07T14:04:21
null
UTF-8
R
false
false
3,229
r
createSMDplot.R
#' Create a plot of the standardized mean differences in covariates #' #' This function takes a matchitobject and returns a plot of the standardized #' mean differences (SMD) between the high exposed and control locations for #' each covariate in the propensity score model. The SMD is a common way to #' evalulate whether covariates were balanced between the two groups during matching. #' Code for this plot was adopted from a vignette to the R tableone package (Yoshida and Bohn). #' #' @param matched.model A matchitobject returned from the matchit function. \code{matched.model} #' #' @return NA #' #' @keywords keywords #' #' @export #' #' @references #' \insertRef{austin2011introduction}{sourceOrientedApproach} #' #' \insertRef{yoshidapackage}{sourceOrientedApproach} #' #' \insertRef{ho2011matchit}{sourceOrientedApproach} #' #' @examples #' # Include these regions #' regions <- c("IndustrialMidwest", "Northeast", "Southeast") #' #' # Covariates to adjust for using propensity score matching #' covariate.vars <- c("logPop", "PctUrban","MedianHHInc", "PctPoor", "smokerate2000") #' #' dataset <- getMatchedDataset(exposure = inmap2005, covariates, covariate.vars, regions) #' #' createSMDplot(dataset$matched.model) createSMDplot <- function(matched.model){ require(ggplot2) require(MatchIt) sum.all <- summary(matched.model, standardize = TRUE)[[3]] SMD.vars <- rownames(sum.all)[-1] SMD.all <- sum.all[-1,4] SMD.matched <- summary(matched.model, standardize = TRUE)$sum.matched[-1,4] SMD.vars <- SMD.vars[!SMD.all %in% c(-Inf, Inf)] SMD.matched <- SMD.matched[!SMD.all %in% c(-Inf, Inf)] SMD.all <- SMD.all[!SMD.all %in% c(-Inf, Inf)] dataPlot <- data.frame(variable = SMD.vars, Before = SMD.all, After = SMD.matched) dataPlotMelt <- melt(data = dataPlot, id.vars = c("variable"), variable.name = "Dataset", value.name = "SMD") varNames <- as.character(dataPlot$variable)[order(dataPlot$Before)] dataPlotMelt$variable <- factor(dataPlotMelt$variable, levels = varNames) cbbPalette <- c("#E69F00", "#0072B2", "#D55E00", "#CC79A7", "#56B4E9", "#009E73","#F0E442") x.lab <- "Standardized Mean Difference \n (High/Low)" ggplot(data = dataPlotMelt, mapping = aes(x = variable, y = SMD, group = Dataset, color = Dataset)) + scale_colour_manual(values=cbbPalette) + geom_line() + geom_point() + geom_hline(yintercept = c(0), color = "black", size = 0.1) + coord_flip(ylim = c(-2,2)) + theme_bw() + theme(text = element_text(family = "Times New Roman"), legend.key = element_blank(), axis.title.y = element_blank(), axis.text.y = element_text(size=10), axis.text.x = element_text(size=12), axis.title.x = element_text(size=12), legend.text = element_text(size=12), legend.title = element_blank(), legend.position = "right", plot.title = element_text(size = 12, hjust = 0.5)) + labs(y = x.lab, title = "") }
db511589ec26a86b0506d54b680381db01dedd46
ca3791da3a2e7b84b8366cf6b1247814fe6d6daa
/code/5. CART.R
8abf6a5011501504f6ce1ea255def9d29f6c1753
[]
no_license
jodyndaniel/pprbirds-agriculture
3630267044e6c194c14a66ff0df397c405bcfdef
db67087ebbb4ea2fa15aa2f3137af7f360f4da88
refs/heads/main
2023-01-03T10:59:00.183421
2020-10-30T21:00:31
2020-10-30T21:00:31
308,741,594
0
0
null
null
null
null
UTF-8
R
false
false
5,197
r
5. CART.R
################################################################## ################################################################## library(tree) library(caret) library(maptree) require(rattle) library(RColorBrewer) library(diversity) library(vegan) library(vegetarian) library(plotrix) library(RVAideMemoire) library(DescTools) ################################################################### Wet.Covariets.CART.GP <- Wet.Covariets.CART # give a new clustering number based on wetland affinity Wet.Covariets.CART.GP$Group[wetncluster.bd.isa[[5]] == 1] <- 5 # 8 Wet.Covariets.CART.GP$Group[wetncluster.bd.isa[[5]] == 2] <- 1 # 7 Wet.Covariets.CART.GP$Group[wetncluster.bd.isa[[5]]== 3] <- 3 # 10 Wet.Covariets.CART.GP$Group[wetncluster.bd.isa[[5]] == 4] <- 4 # 11 Wet.Covariets.CART.GP$Group[wetncluster.bd.isa[[5]] == 5] <- 2 # 12 # after running the CART, we will need to prune the tree (using a function we made) # selecting where we see a drop in error as the number of nodes we should prune at WETBirds.CART.Raw <- tree(as.factor(Group) ~ ., data = Wet.Covariets.CART.GP, split = "deviance") # examine the error based on the number of nods Prune.WEB <- PruneTreeDeviance(x=100,y=WETBirds.CART.Raw,z = 64) plot(rownames(Prune.WEB), Prune.WEB[,1], type="b") # 8 seems optimal # now we can prune WETBirds.CART.PR <- prune.tree(WETBirds.CART.Raw, best = 8) # pull out tree staistics sum(sapply(resid(WETBirds.CART.PR),function(x)(x-mean(resid(WETBirds.CART.PR)))^2)) sum(sapply(resid(WETBirds.CART.Raw),function(x)(x-mean(resid(WETBirds.CART.Raw)))^2)) # visualize the final tree and save draw.tree(WETBirds.CART.Raw,size = 3, digits = 3,nodeinfo = TRUE,print.levels=FALSE) draw.tree(WETBirds.CART.PR,size = 3, digits = 3,nodeinfo = TRUE,print.levels=FALSE) png("Output/WETBCART.png",width = 7, height = 7,units = 'in', res = 600) draw.tree(WETBirds.CART.PR,size = 3, digits = 3,nodeinfo = TRUE,print.levels=FALSE) dev.off() # Now we need to examine the classification error rates for the tree # is their a large difference in what the tree predicts and what the data suggests? WetBirds.CART.CM <- data.frame(Group = as.factor(predict(WETBirds.CART.PR,Wet.Covariets.CART.GP,type="class"))) # a confusion matrix could be helpful confusionMatrix(WetBirds.CART.CM$Group,as.factor(Wet.Covariets.CART.GP$Group)) WCMatrix <- cbind(Prediction = predict(WETBirds.CART.PR,Wet.Covariets.CART.GP,type="class"), Group = Wet.Covariets.CART.GP$Group) # kappa is fair, but not great # a g-test could help in figuring out if the group membership frequency # differences between the predictions and observations GTest(WCMatrix, correct = "williams") # no differences WETNODE <- data.frame(Node = WETBirds.CART.PR$where,# node membership of each site Group_Predicted = Wet.Covariets.CART.GP$Group, # assemblage they were predicted to belong to Group_Observed = WetBirds.CART.CM$Group) # based on an examination of WETBirds.CART.PR$frame, and WETBirds.CART.PR$where # we are able to tell which node each site was predicted to # this, we could use to work back the error rates in each node # the actual assemblage they belong to # and site covariates (e.g., Region, Disturbance Class) # if an assemblage has more than one terminal node, the node on the left is listed as # A and the one to the right as B - this is based on the visualized tree WETNODE$Class[WETNODE$Node == 5] = "3A" WETNODE$Class[WETNODE$Node == 6] = 2 WETNODE$Class[WETNODE$Node == 7] = "4A" WETNODE$Class[WETNODE$Node == 8] = "4B" WETNODE$Class[WETNODE$Node == 12] = 1 WETNODE$Class[WETNODE$Node == 13] = "5A" WETNODE$Class[WETNODE$Node == 14] = "3B" WETNODE$Class[WETNODE$Node == 15] = "5B" WETNODEF <- data.frame(WETNODE, Disturb = LandCover.Polan[WetEmptyISA,"Disturb"], Region = Wet.Covariets.CART.GP$Region, Permanence = Wet.Covariets.CART.GP$Permanence ) ###################################################################################################################### ###################################################################################################################### ###################################################################################################################### # Now, we can create a table of site covariates - mean and standard error for each predicted group CART.Table.WetMean <- matrix(data=NA,nrow=15,ncol=5) CART.Table.WetError <- matrix(data=NA,nrow=15,ncol=5) # covariates that are numeric for (j in 4:13){ red <- aggregate(Wet.Covariets.CART.GP[,j]~ Group, Wet.Covariets.CART.GP, mean) redb <- aggregate(Wet.Covariets.CART.GP[,j]~ Group, Wet.Covariets.CART.GP, std.error) CART.Table.WetMean[j,] <- red[,2] CART.Table.WetError[j,] <- redb[,2] } # add rownames rownames(CART.Table.WetMean) <- colnames(Wet.Covariets.CART.GP) rownames(CART.Table.WetError) <- colnames(Wet.Covariets.CART.GP) # categorical covariates table(WETNODEF$Class,WETNODEF$Region) table(WETNODEF$Class,WETNODEF$Permanence) table(WETNODEF$Class,WETNODEF$Disturb) table(WETNODEF$Class, WETNODEF$Group_Observed)
7cd440f0f4f3b92c11ba801ec7f495bb6d7de7ac
0ef1a314914e88740dbe33b248a552a57c0b261d
/MBQhsi/R/Litfc.R
145eb0abfe5cf9cb74e8fac04fa66eb887b5e196
[]
no_license
rocrat/MBQ_Package
845d25faed797835d916ed646496f26f78254521
b8c4f978fce36cfd3deb5cb2604372b00bf68e15
refs/heads/master
2021-01-21T13:25:37.952739
2016-05-17T19:13:40
2016-05-17T19:13:40
53,088,771
0
0
null
null
null
null
UTF-8
R
false
false
742
r
Litfc.R
#' @title Literature Forb Cover #' @description Calculates the partial HSI given the forb cover. #' #' @param x Percent aerial forb cover measured as in a Daubenmire plot (Daubenmire 1959). #' @param author The paper from which to calculate the partion HSI, c("king", "simms") #' #' @return Returns the relative HSI value #' #' @usage FC.Lit(x, author) #' #' @export #' @name FC.Lit #' @author Dominic LaRoche #' FC.Lit <- function(x, author){ if(length(author) > 1) stop("Please choose only one author") if(!author %in% c("simms", "king")) stop("Please select either 'simms' or 'king'") if(author == "king"){ s <- dnorm(x, 32.39, 2.55) * 6.391949 } if(author == "simms"){ s <- dnorm(x, 15, 4) * 10.02651 } return(s) }
e45744bf52cd23615c28126c4879235e9ba12bdb
758f48e8724cdbace0de71ddf54104ffb0a21aa3
/run_analysis.R
11f0243010fbe5039217553fd1bce9103bd6a00d
[]
no_license
zhangxuecq/wearable-computing
5450847ce1ea0be4a89d8c6b4826c021fe0a2b5c
81448187ae0fe9f5a1061bfd9304f575befc5ecb
refs/heads/master
2021-01-10T01:45:44.917279
2015-10-25T16:25:59
2015-10-25T16:25:59
44,895,529
0
0
null
null
null
null
UTF-8
R
false
false
1,492
r
run_analysis.R
# Load the data. X_test <- read.table("UCI HAR Dataset/test/X_test.txt") y_test <- read.table("UCI HAR Dataset/test/y_test.txt") subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt") X_train <- read.table("UCI HAR Dataset/train/X_train.txt") y_train <- read.table("UCI HAR Dataset/train/y_train.txt") subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt") features <- read.table("UCI HAR Dataset/features.txt") activity_labels <- read.table("UCI HAR Dataset/activity_labels.txt") # Merges the training and the test sets to create one data set. test <- cbind(subject_test,y_test,X_test) train <- cbind(subject_train,y_train,X_train) data_total <- rbind(train,test) #Extracts only the measurements on the mean and standard deviation #for each measurement. index <- grep("\\bmean()\\b|\\bstd()\\b",features[,2]) data_part <- subset(data_total,select=c(1,2,index+2)) #Uses descriptive activity names to name the activities in the data set data_part[,2] <- as.factor(activity_labels$V2[data_part[,2]]) #Appropriately labels the data set with descriptive variable names. name <- c("subject","activity",as.character(features[index,2])) names(data_part) <- name #From the data set in step 4, creates a second, independent tidy data #set with the average of each variable for each activity and each subject. new_data <- data_part %>% group_by(subject,activity) %>% summarize_each(funs="mean") write.table(new_data,file="tidy new data set.txt",row.names = FALSE)
b70263d490ad35e54ee2e22e6b5f88331e8e02a4
fe8f22495982e2d8769b806ea55fd8235c5a29aa
/man/read_logs.Rd
a0e17063b3edd0b07abfd4d60b3cf9d4a74fcb8e
[]
no_license
drsimonj/adapter
f67e1711cf57d2193b49668a7b38994e4d1e1d15
69a0e3330b6d39a1e337bb662e8c602c303370b9
refs/heads/master
2021-05-02T16:01:44.816127
2017-05-18T03:35:49
2017-05-18T03:35:49
72,581,326
0
0
null
null
null
null
UTF-8
R
false
true
3,134
rd
read_logs.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reading.R \name{read_logs} \alias{read_logs} \alias{read_session} \alias{read_events} \alias{read_stream} \alias{read_all_streams} \title{Read user's log files} \usage{ read_logs(user_dir) read_session(user_dir, file_name = "session.tsv", col_names = c("var", "info"), tz = "Australia/Sydney") read_events(user_dir, file_name = "events.tsv", col_names = c("time", "event", "detail")) read_stream(user_dir, file_name, stream_dir = "streams/", is_numeric = TRUE, is_vec3 = FALSE) read_all_streams(user_dir, pattern = "tsv$", stream_dir = "streams/", is_numeric = c("input_brake", "input_horizontal", "input_vertical"), is_vec3 = c("position", "rotation", "velocity")) } \arguments{ \item{user_dir}{Character string defining the user's log-file directory} \item{file_name}{Character string of the file name. Must include extension such as .tsv} \item{col_names}{Vector of column names to be used.} \item{tz}{a character string that specifies which time zone to parse the date with. The string must be a time zone that is recognized by the user's OS.} \item{stream_dir}{Character string defining the directory in which stream log files exist.} \item{is_numeric}{Indicate whether a stream variable is numeric and, therefore, should be convereted to numeric. Can be a boolean value (TRUE/FALSE) or a character vector of variable names to convert if present.} \item{is_vec3}{Indicate whether a stream variable is a Vector3 value in Unity and, therefore, should be convereted to a vector of 3 values. Can be a boolean value (TRUE/FALSE) or a character vector of variable names to convert if present.} \item{pattern}{an optional \link{regular expression}. Only file names which match the regular expression will be returned.} } \value{ read_logs with return a list with a "user" S3 class, with three tibbles (session, events and streams). All others will return a single \code{\link[tibble]{tibble}}. } \description{ These functions help to read a user's session, events, or stream log files. read_logs() is a wrapper function that uses the default values of all other functions to read all log files into a single list. If this fails, the log files can be read separately using the other functions and by adjusting the variables appropriately. } \details{ All files that can be read are assumed to be tab-separated values without variable headers. Uniquely, they are assumed to: \describe{ \item{session}{Appear in top-level of user's log file directory; contain variable names and values} \item{events}{Appear in top-level of user's log file directory; contain a timestamp and an event name (tab-separated details about the event can follow in some cases).} \item{stream}{Appear in streams/ directory of user's log file directory; contain a timestamp and a value.} } Most functions are helper functions to read in a single file. read_all_streams, however, reads all the stream files in a directory and merges them into a single tibble. Also, read_logs will make use of all the functions to read all log files into a list. }
734377be4451c001af29cada6d0eaa3c54f02b55
8a8e37a05bd1810e0c6c46bdf3e63a8ff0a79e86
/r/SCEUtils/man/metadata_histogram.Rd
33fa816449d2c25a706c2a8480a6ede3b6ff20a6
[]
no_license
nathancfox/tools
3209208c45226988273af4a9a69267a49f9bfcf1
3f7db30b380a8fbf569d2016795e42cb853fa15e
refs/heads/master
2021-12-10T04:45:38.955354
2021-09-29T14:19:56
2021-09-29T14:19:56
240,432,103
0
0
null
null
null
null
UTF-8
R
false
true
1,158
rd
metadata_histogram.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plots.R \name{metadata_histogram} \alias{metadata_histogram} \title{Plot a histogram of a variable in rowData and colData.} \usage{ metadata_histogram( sce_obj, x, row_metadata = FALSE, title = NULL, x_label = NULL, bins = NULL, binwidth = NULL, mean = TRUE, median = TRUE ) } \arguments{ \item{sce_obj}{The SingleCellExperiment object.} \item{x}{The name of the variable to plot.} \item{row_metadata}{TRUE if x is in rowData, FALSE if in colData.} \item{title}{The title of the plot.} \item{x_label}{The x-axis label of the plot.} \item{bins}{The number of bins to use. Cannot be set if binwidth is set.} \item{binwidth}{The size of each bin. Cannot be set if bins is set.} \item{mean}{If TRUE, a line will be plotted to indicate the mean of the distribution.} \item{median}{If TRUE, a line will be plotted to indicate the median of the distribution.} } \description{ Plots a ggplot histogram of a metadata variable. Can also annotate with the mean and/or median. Note that the mean and median legend values will only be accurate to 0 decimal places. }
69d98ef53e649d1db4f554a64057aa7db7b4dfe9
e914f4ee7b6fab2a992abc04088ffd9cd1df85ed
/Scripts/PNM_Lichens_NoPar.R
87048e6542d9d3ca2e8a0183d895b94b6e013ccc
[]
no_license
kunstler/MollierChap1PhD
9abe2c9ce907fb442c12ff8b228f3eafd4ff2cec
2a361ec49c0b448cfc9ad5469bdb6d4bfdbec671
refs/heads/master
2021-04-23T20:40:48.091471
2020-09-11T17:45:20
2020-09-11T17:45:20
249,998,574
0
0
null
null
null
null
UTF-8
R
false
false
109
r
PNM_Lichens_NoPar.R
# Fit PNM Lichens source("R/Functions.R") Fun_Fit_Parc_Group_NoPar(Parc = "PNM", Groupe_Select = "Lichens")
18a120a68448332b1735bf4c936d9341cf856f95
2a48401dc9fa8307da20f92a005fc2b2f6efe531
/man/get_status.Rd
845c6a51c9e32b766aa17aeee1f8475f8f6e94ac
[]
no_license
vitorcapdeville/longRunningAux
022312b6a50bc8e0182aa85bd71740300b567b6b
0b5dcc58afe0b98e31eb242c5a3073441aa26df5
refs/heads/master
2021-01-01T07:42:18.775110
2020-02-08T17:54:17
2020-02-08T17:54:17
239,177,238
0
0
null
null
null
null
UTF-8
R
false
true
498
rd
get_status.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/long_running_aux.R \name{get_status} \alias{get_status} \title{Função que pega o status atual da tarefa.} \usage{ get_status(status_file) } \arguments{ \item{status_file}{Um arquivo temporário onde está registrado o status da tarefa. Usualmente criado com o comando `status_file = tempfile()`.} } \value{ O status atual assim como registrado no arquivo. } \description{ Função que pega o status atual da tarefa. }
b0adc7a212fa0ceb2ed5a6ee600f5761de7597fa
439933a3fb21a29240ab4b04aebaced0569248be
/_R code for processing raw data/Make Rec effort files Spr-Sum.R
d0082c2c8f87ed883e0fb3dc3f00e94bed2eed09
[]
no_license
nwfsc-cb/spring-chinook-distribution
e47b5e39f5ce2ab8f20413085bc13249ef3bec37
5bff26b6fe5102a16a9c3f2c13d659b7e831e03e
refs/heads/master
2023-08-08T03:35:36.302066
2023-08-01T16:35:04
2023-08-01T16:35:04
128,123,447
0
0
null
null
null
null
UTF-8
R
false
false
24,225
r
Make Rec effort files Spr-Sum.R
### SCRIPT FOR CREATING FILES THAT CAN BE READ IN BY STAN # Rec Effort data first #print(base.dir) ak.eff <- read.csv("./Processed Data/Effort Data/effort.data.ak.2022-09.csv") # THIS IS ONLY COMMERCIAL TROLL (OMIT) ca.or.wa.eff <- read.csv("./Processed Data/Effort Data/effort.data.REC.ca.or.wa.csv") puso.eff <- read.csv("./Processed Data/Effort Data/effort.data.REC.puso-to-2020-FIN.csv") puso.retention <- read.csv("./Processed Data/Effort Data/WA PUSO Chinook retention.csv") sgeo.eff <- read.csv("./Processed Data/Effort Data/effort.data.REC.sgeo.csv") johnstone.eff <- read.csv("./Processed Data/Effort Data/effort.data.REC.johnstone.csv") wcvi.eff <- read.csv("./Processed Data/Effort Data/effort.data.REC.wcvi.csv") bc.eff <- read.csv("./Processed Data/Effort Data/effort.data.REC.bc.2019.csv") can.irec.eff <- read.csv("./Processed Data/Effort Data/iREC chinook effort 05-2019.csv") can.irec.mapping <- read.csv("./Processed Data/Effort Data/can.irec.areas.mapping.csv") colnames(ca.or.wa.eff)[3:14] <- colnames(ak.eff)[3:14] locations <- read.csv("./Processed Data/Locations-map to coastal areas 1-2019.csv") ##### These are the month groupings to start: # Need to make separate matrices for power and hand trolling ALL.MONTH <- c("month.01","month.02","month.03","month.04","month.05","month.06","month.07","month.08","month.09","month.10","month.11","month.12") ### At present we lack rec effort for 1978-1995 for both BC (except SGEO) and Alaska. ### Therefore, we will use three files for rec effort: ### One for the south in which we will use the recreational effort data. ### One for SGEO where we have effort data (post 1982) but it units of boat trips, not angler trips. ### One for the north where we will not. ############################################################################################################## ####################################################################################################################################### ####################################################################################################################################### ####################################################################################################################################### #if(MONTH.STRUCTURE == "FOUR"){ # Seasonal Blocks ############################################################## # AK Effort # ak.eff$area.code <- ak.eff$SEAK.region # ak.eff.by.area <- aggregate(ak.eff[,ALL.MONTH],by=list(year=ak.eff$year,area.code=ak.eff$area.code),sum) # # ak.eff.by.area$month.winter <- rowSums(ak.eff.by.area[,WINTER.MONTH]) # ak.eff.by.area$month.spring <- rowSums(ak.eff.by.area[,SPRING.MONTH]) # ak.eff.by.area$month.summer <- rowSums(ak.eff.by.area[,SUMMER.MONTH]) # ak.eff.by.area$month.fall <- rowSums(ak.eff.by.area[,FALL.MONTH]) # # ak.eff.rec <- ak.eff.by.area[,c("year","area.code",MONTH)] # US Coast if(loc_18=="TWO_OR" | loc_18=="_two_OR_PUSO_AK"){ ca.or.wa.eff$area.code <- locations$area.code.two.OR[match(ca.or.wa.eff$port,locations$stat.area.port)] }else if(loc_18=="NCA_SOR_PUSO"){ ca.or.wa.eff$area.code <- locations$area.code.NCA_SOR_PUSO[match(ca.or.wa.eff$port,locations$stat.area.port)] }else{ ca.or.wa.eff$area.code <- locations$area.code[match(ca.or.wa.eff$port,locations$stat.area.port)] } ### DIVIDE WESTPORT EFFORT EQUALLY BETWEEN WAC and COL ca.or.wa.eff[ca.or.wa.eff$port=="Westport",][,grep("month",colnames(ca.or.wa.eff))] <- ca.or.wa.eff[ca.or.wa.eff$port=="Westport",][,grep("month",colnames(ca.or.wa.eff))] /2 temp <- ca.or.wa.eff[ca.or.wa.eff$port=="Westport",] temp$area.code <- "COL" ca.or.wa.eff <- rbind(ca.or.wa.eff,temp) ### ca.or.wa.eff.by.area <- aggregate(ca.or.wa.eff[,ALL.MONTH],by=list(year=ca.or.wa.eff$year,area.code=ca.or.wa.eff$area.code),sum) if(MONTH.STRUCTURE=="FOUR"){ ca.or.wa.eff.by.area$month.winter.2 <- rowSums(ca.or.wa.eff.by.area[,WINTER.MONTH[1:3]]) ca.or.wa.eff.by.area$month.winter.1 <- rowSums(ca.or.wa.eff.by.area[,WINTER.MONTH[4:5]]) ca.or.wa.eff.by.area$month.spring <- rowSums(ca.or.wa.eff.by.area[,SPRING.MONTH]) ca.or.wa.eff.by.area$month.summer <- rowSums(ca.or.wa.eff.by.area[,SUMMER.MONTH]) ca.or.wa.eff.by.area$month.fall <- rowSums(ca.or.wa.eff.by.area[,FALL.MONTH]) } if(MONTH.STRUCTURE=="SPRING"){ ca.or.wa.eff.by.area$month.winter.2 <- rowSums(ca.or.wa.eff.by.area[,WINTER.MONTH[1:2]]) ca.or.wa.eff.by.area$month.winter.1 <- rowSums(ca.or.wa.eff.by.area[,WINTER.MONTH[3:4]]) ca.or.wa.eff.by.area$month.spring <- rowSums(ca.or.wa.eff.by.area[,SPRING.MONTH]) ca.or.wa.eff.by.area$month.summer <- rowSums(ca.or.wa.eff.by.area[,SUMMER.MONTH]) ca.or.wa.eff.by.area$month.fall <- rowSums(ca.or.wa.eff.by.area[,FALL.MONTH]) } if(MONTH.STRUCTURE=="FRAM"){ ca.or.wa.eff.by.area$month.winter.2 <- (ca.or.wa.eff.by.area[,WINTER.MONTH[1]]) ca.or.wa.eff.by.area$month.winter.1 <- rowSums(ca.or.wa.eff.by.area[,WINTER.MONTH[2:4]]) ca.or.wa.eff.by.area$month.spring <- rowSums(ca.or.wa.eff.by.area[,SPRING.MONTH]) ca.or.wa.eff.by.area$month.summer <- rowSums(ca.or.wa.eff.by.area[,SUMMER.MONTH]) ca.or.wa.eff.by.area$month.fall <- rowSums(ca.or.wa.eff.by.area[,FALL.MONTH]) } ca.or.wa.eff.by.area$year.wint.2 <- ca.or.wa.eff.by.area$year-1 temp <- ca.or.wa.eff.by.area[,c("year.wint.2", "area.code","month.winter.2") ] ca.or.wa.eff.by.area <- ca.or.wa.eff.by.area %>% dplyr::select(-year.wint.2,-month.winter.2) ca.or.wa.eff.by.area <- merge(ca.or.wa.eff.by.area,temp,by.x=c("year","area.code"),by.y=c("year.wint.2" ,"area.code")) # added all=T to avoid cutting off a year of data and maintaining consistency with other scripts ca.or.wa.eff.by.area$month.winter <- ca.or.wa.eff.by.area$month.winter.2 + ca.or.wa.eff.by.area$month.winter.1 ca.or.wa.eff.rec <- ca.or.wa.eff.by.area[,c("year","area.code",MONTH)] ################################################################################# # Puget Sound ################################################################################# #Adjust later years for chinook non-retention. #### make mapping between month number and number of days. M <- 1:12 D <- c(31,28,31,30,31,30,31,31,30,31,30,31) REF <- data.frame(M,D) #Come up with a mapping between month and fraction of month that Chinook retention was possible for PUSO puso.ret.wide <- expand.grid(month=1:12,year=unique(puso.retention$Year)) A <-matrix(0,nrow(puso.ret.wide),length(grep("Area",colnames(puso.eff)))) colnames(A) <- colnames(puso.eff)[grep("Area",colnames(puso.eff))] puso.ret.wide <- data.frame(cbind(puso.ret.wide,A)) ALL <- NULL for(i in 1:nrow(puso.retention)){ area <- puso.retention$Area[i] COL <- grep(area,colnames(puso.ret.wide)) M <- puso.retention$month.open[i] : puso.retention$month.close[i] M.frac <- NULL if(length(M) == 1 ){ M.frac[1] <- (puso.retention$day.close[i] - puso.retention$day.open[i] + 1) / REF$D[REF$M==M[1]] } if(length(M) > 1 ){ M.frac[1] <- (REF$D[REF$M==M[1]] - puso.retention$day.open[i] + 1) / REF$D[REF$M==M[1]] if(length(M)>2){M.frac[2:(length(M)-1)] <- 1} M.frac[length(M)] <- (puso.retention$day.close[i]) / REF$D[REF$M==M[length(M)]] } M.frac[M.frac > 1] <- 1 B <- puso.ret.wide %>% filter(year == puso.retention$Year[i],month >= M[1], month <= M[length(M)]) B[,COL] <- M.frac ALL <- rbind(ALL,B) } ALL <- as.data.frame(ALL) puso.ret.wide <- ALL %>% group_by(month,year) %>% summarise(Area.5=sum(Area.5),Area.6=sum(Area.6),Area.7=sum(Area.7),Area.8=sum(Area.8), Area.8.1=sum(Area.8.1),Area.8.2=sum(Area.8.2),Area.9=sum(Area.9), Area.10=sum(Area.10),Area.11=sum(Area.11),Area.12=sum(Area.12),Area.13=sum(Area.13)) %>% as.data.frame() puso.ret.wide[ ,3:ncol(puso.ret.wide)][puso.ret.wide[ ,3:ncol(puso.ret.wide)] >1 ] <- 1 ################# ## Deal with the effort data. ################# puso.eff[,4:14] <- puso.eff[,4:14] * puso.eff$adjust #3 Combine times of chinook retention puso.eff <- puso.eff %>% group_by(Year,Month) %>% summarise(Area.5=sum(Area.5),Area.6=sum(Area.6),Area.7=sum(Area.7),Area.8=sum(Area.8), Area.8.1=sum(Area.8.1),Area.8.2=sum(Area.8.2),Area.9=sum(Area.9), Area.10=sum(Area.10),Area.11=sum(Area.11),Area.12=sum(Area.12),Area.13=sum(Area.13)) %>% as.data.frame() puso.ret.wide <- merge(puso.eff[,c("Year","Month")],puso.ret.wide,by.x=c("Year","Month"),by.y=c("year","month"),all=T) puso.ret.wide[is.na(puso.ret.wide)==T] <- 1 puso.eff <- merge(puso.ret.wide[,c("Year","Month")],puso.eff,by=c("Year","Month"),all=T) puso.eff[,3:ncol(puso.eff)] <- puso.eff[,3:ncol(puso.eff)] * puso.ret.wide[,3:ncol(puso.eff)] #### DROP AREA 5 from the EFFORT STATISTICS IF APPROPRIATE # puso.eff.area5 <- puso.eff %>% dplyr::select(Year,Month,Area.5) # puso.eff <- puso.eff %>% dplyr::select(-Area.5) # # puso.eff.area5$total.effort <- puso.eff.area5$Area.5 puso.eff<- puso.eff %>% mutate(total.effort.out = Area.5+Area.6+Area.7, total.effort.in=Area.8+Area.8.1+Area.8.2+Area.9+Area.10+Area.11+Area.12+Area.13, total.effort = total.effort.in + total.effort.out) # temp <- aggregate(puso.eff$total.effort,by=list(year=puso.eff$Year,month=puso.eff$Month),sum) # temp <- temp[order(temp$year,temp$month),] puso.eff.wide <- data.frame(port="All Puget Sound",year=YEARS.RECOVER) puso.eff.wide <- cbind(puso.eff.wide,matrix(0,length(YEARS.RECOVER),12)) # This section is for the original 17 area model puso.eff.wide <- pivot_wider(puso.eff %>% dplyr::select(Year,Month,total.effort), id_cols = "Year",names_from = "Month",values_from = "total.effort") puso.eff.wide <- data.frame(port="All Puget Sound",puso.eff.wide) colnames(puso.eff.wide)[3:14] <- ALL.MONTH puso.eff.wide$effort.type <- "angler.trip" puso.eff.wide$Notes <- NA puso.eff.wide$area.code <- "PUSO" if(loc_18 == "TRUE" | loc_18 == "TWO_OR" | loc_18 =="NCA_SOR_PUSO" | loc_18 =="_two_OR_PUSO_AK"){ # This section is for the 18 area model puso.eff.in.wide <- pivot_wider(puso.eff %>% dplyr::select(Year,Month,total.effort.in), id_cols = "Year",names_from = "Month",values_from = "total.effort.in") puso.eff.in.wide <- data.frame(port="All Puget Sound",puso.eff.in.wide) colnames(puso.eff.in.wide)[3:14] <- ALL.MONTH puso.eff.in.wide$area.code <- "PUSO" puso.eff.out.wide <- pivot_wider(puso.eff %>% dplyr::select(Year,Month,total.effort.out), id_cols = "Year",names_from = "Month",values_from = "total.effort.out") puso.eff.out.wide <- data.frame(port="All Puget Sound",puso.eff.out.wide) colnames(puso.eff.out.wide)[3:14] <- ALL.MONTH puso.eff.out.wide$area.code <- "PUSO_out" puso.eff.wide <- rbind(puso.eff.in.wide, puso.eff.out.wide) } # This section is shared for all models if(MONTH.STRUCTURE=="FOUR"){ puso.eff.wide$month.winter.2 <- rowSums(puso.eff.wide[,WINTER.MONTH[1:3]]) puso.eff.wide$month.winter.1 <- rowSums(puso.eff.wide[,WINTER.MONTH[4:5]]) puso.eff.wide$month.spring <- rowSums(puso.eff.wide[,SPRING.MONTH]) puso.eff.wide$month.summer <- rowSums(puso.eff.wide[,SUMMER.MONTH]) puso.eff.wide$month.fall <- rowSums(puso.eff.wide[,FALL.MONTH]) } if(MONTH.STRUCTURE=="SPRING"){ puso.eff.wide$month.winter.2 <- rowSums(puso.eff.wide[,WINTER.MONTH[1:2]]) puso.eff.wide$month.winter.1 <- rowSums(puso.eff.wide[,WINTER.MONTH[3:4]]) puso.eff.wide$month.spring <- rowSums(puso.eff.wide[,SPRING.MONTH]) puso.eff.wide$month.summer <- rowSums(puso.eff.wide[,SUMMER.MONTH]) puso.eff.wide$month.fall <- rowSums(puso.eff.wide[,FALL.MONTH]) } if(MONTH.STRUCTURE=="FRAM"){ puso.eff.wide$month.winter.2 <- (puso.eff.wide[,WINTER.MONTH[1]]) puso.eff.wide$month.winter.1 <- rowSums(puso.eff.wide[,WINTER.MONTH[2:4]]) puso.eff.wide$month.spring <- rowSums(puso.eff.wide[,SPRING.MONTH]) puso.eff.wide$month.summer <- rowSums(puso.eff.wide[,SUMMER.MONTH]) puso.eff.wide$month.fall <- rowSums(puso.eff.wide[,FALL.MONTH]) } puso.eff.wide$year.wint.2 <- puso.eff.wide$Year-1 temp <- puso.eff.wide[,c("year.wint.2", "area.code","month.winter.2") ] puso.eff.wide <- puso.eff.wide %>% dplyr::select(-year.wint.2,-month.winter.2) puso.eff.wide <- merge(puso.eff.wide,temp,by.x=c("Year","area.code"),by.y=c("year.wint.2" ,"area.code"),all=T) puso.eff.wide$month.winter <- puso.eff.wide$month.winter.2 + puso.eff.wide$month.winter.1 puso.eff.rec <- puso.eff.wide[,c("Year","area.code",MONTH)] puso.eff.rec <- puso.eff.rec %>% dplyr::rename(year=Year) ##3 puso.eff.area5.wide # # puso.eff.area5.wide <- data.frame(port="WAC",year=YEARS.RECOVER) # puso.eff.area5.wide <- cbind(puso.eff.area5.wide,matrix(0,length(YEARS.RECOVER),12)) # # puso.eff.area5.wide <- dcast(puso.eff.area5[,c("Year","Month","total.effort")],Year~Month) # puso.eff.area5.wide <- data.frame(port="All Puget Sound",puso.eff.area5.wide) # colnames(puso.eff.area5.wide)[3:14] <- ALL.MONTH # # puso.eff.area5.wide$effort.type <- "angler.trip" # puso.eff.area5.wide$Notes <- NA # puso.eff.area5.wide$area.code <- "WAC" # # puso.eff.area5.wide$month.winter.2 <- rowSums(puso.eff.area5.wide[,WINTER.MONTH[4:5]]) # puso.eff.area5.wide$month.winter.1 <- rowSums(puso.eff.area5.wide[,WINTER.MONTH[1:3]]) # puso.eff.area5.wide$month.spring <- rowSums(puso.eff.area5.wide[,SPRING.MONTH]) # puso.eff.area5.wide$month.summer <- rowSums(puso.eff.area5.wide[,SUMMER.MONTH]) # puso.eff.area5.wide$month.fall <- rowSums(puso.eff.area5.wide[,FALL.MONTH]) # # puso.eff.area5.wide$year.wint.2 <- puso.eff.area5.wide$Year+1 # # temp <- puso.eff.area5.wide[,c("year.wint.2", "area.code","month.winter.2") ] # puso.eff.area5.wide <- puso.eff.area5.wide %>% dplyr::select(-year.wint.2,-month.winter.2) # puso.eff.area5.wide <- merge(puso.eff.area5.wide,temp,by.x=c("Year","area.code"),by.y=c("year.wint.2" ,"area.code"),all=T) # puso.eff.area5.wide$month.winter <- puso.eff.area5.wide$month.winter.2 + puso.eff.area5.wide$month.winter.1 # # puso.eff.area5.rec <- puso.eff.area5.wide[,c("Year","area.code",MONTH)] # puso.eff.area5.rec <- puso.eff.area5.rec %>% dplyr::rename(year=Year) # # puso.eff.by.area <- aggregate(puso.eff.wide[,ALL.MONTH], # by=list(year=puso.eff.wide$year,area.code=puso.eff.wide$area.code),sum) #update WAC effort with puso.eff.area5 effort # temp.WAC <- ca.or.wa.eff.rec %>% filter(area.code == "WAC")%>% filter(year %in% YEARS.RECOVER) %>% arrange(year) # temp.puso.eff.area5 <- puso.eff.area5.rec %>% filter(year %in% YEARS.RECOVER) %>% arrange(year) # # if(nrow(temp.WAC)==nrow(temp.puso.eff.area5)){ # temp.WAC.rec <- cbind(temp.WAC[,1:2],temp.WAC[,3:ncol(temp.WAC)] + temp.puso.eff.area5[,3:ncol(temp.puso.eff.area5)]) %>% as.data.frame() # }else{ # print(rep("STOP",3)) # } # # ca.or.wa.eff.rec <- ca.or.wa.eff.rec %>% filter(area.code != "WAC") # ca.or.wa.eff.rec <- rbind(ca.or.wa.eff.rec,temp.WAC.rec) %>% arrange(year) %>% as.data.frame() ################################################################################# ### SGEO - NOTE THE JOHNSTONE DATA IS EXCLUDED FROM THIS BECAUSE WE DON"T HAVE EARLY DATA FROM JOHNSTONE ################################################################################# bc.trim <- bc.eff %>% filter(DISPOSITION == "Effort") john.areas <- c("PFMA 11","PFMA 111","PFMA 12") sgeo.areas <- c("PFMA 13","PFMA 14","PFMA 15","PFMA 16","PFMA 17", "PFMA 18","PFMA 19","PFMA 20","PFMA 28","PFMA 29") swvi.areas <- c("PFMA 21","PFMA 22","PFMA 23","PFMA 24","PFMA 121","PFMA 123","PFMA 124") nwvi.areas <- c("PFMA 25","PFMA 26","PFMA 27","PFMA 125","PFMA 126","PFMA 127") bc.trim <- bc.trim %>% mutate(area.code = "", area.code=ifelse(PFMA %in% john.areas,"CBC",area.code)) %>% mutate(area.code=ifelse(PFMA %in% sgeo.areas,"SGEO",area.code)) %>% mutate(area.code=ifelse(PFMA %in% swvi.areas,"SWVI",area.code)) %>% mutate(area.code=ifelse(PFMA %in% nwvi.areas,"NWVI",area.code)) bc.trim <- bc.trim %>% group_by(year=YEAR,month.numb,area.code) %>% dplyr::summarise(total.effort=sum(Estimate)) if(MONTH.STRUCTURE=="FOUR"){ bc.trim <- bc.trim %>% mutate(season="", season = ifelse(month.numb<=3, "month.winter.2",""), season = ifelse(month.numb>=11 & month.numb<=12, "month.winter.1",season), season = ifelse(month.numb>=4 & month.numb<=5, "month.spring",season), season = ifelse(month.numb>=6 & month.numb<=7, "month.summer",season), season = ifelse(month.numb>=8 & month.numb<=10, "month.fall",season)) } if(MONTH.STRUCTURE=="SPRING"){ bc.trim <- bc.trim %>% mutate(season="", season = ifelse(month.numb<=2, "month.winter.2",""), season = ifelse(month.numb>=11 & month.numb<=12, "month.winter.1",season), season = ifelse(month.numb>=3 & month.numb<=5, "month.spring",season), season = ifelse(month.numb>=6 & month.numb<=7, "month.summer",season), season = ifelse(month.numb>=8 & month.numb<=10, "month.fall",season)) } if(MONTH.STRUCTURE=="FRAM"){ bc.trim <- bc.trim %>% mutate(season="",season = ifelse(month.numb<=1, "month.winter.2",""), season = ifelse(month.numb>=10 & month.numb<=12, "month.winter.1",season), season = ifelse(month.numb>=4 & month.numb<=5, "month.spring",season), season = ifelse(month.numb>=6 & month.numb<=7, "month.summer",season), season = ifelse(month.numb>=8 & month.numb<=10, "month.fall",season)) } bc.trim <- bc.trim %>% mutate(year.mod =year,year.mod=ifelse(season=="month.winter.2",year.mod-1,year.mod), season=ifelse(season=="month.winter.1","month.winter",season), season=ifelse(season=="month.winter.2","month.winter",season)) bc.mod <- bc.trim %>% group_by(year,season,area.code) %>% dplyr::summarise(total.eff = sum(total.effort)) bc.eff.rec <- full_join(expand.grid(area.code=c("CBC","SGEO","NWVI","SWVI"),year=YEARS.RECOVER), pivot_wider(bc.mod,id_cols = c("year","area.code"), names_from = "season", values_from = "total.eff")) bc.eff.rec <- bc.eff.rec %>% dplyr::select(year,area.code,month.winter,month.spring,month.summer,month.fall) bc.eff.rec[is.na(bc.eff.rec)==T] <- 0 ### iREC DATA ### Process the iREC data from Canada. This is a different data type and form than the other recreational data. ### it needs additional processing. # extract only effort information. can.irec.mod <- can.irec.eff %>% filter(ITEM_GROUP=="EFFORT") %>% dplyr::select(YEAR,MONTH,AREA,ITEM,ESTIMATE) # Combine adult and juvenile effort can.irec.mod <- can.irec.mod %>% group_by(YEAR,MONTH,AREA) %>% summarize(effort = sum(ESTIMATE)) can.irec.mod <- left_join(can.irec.mod,can.irec.mapping) %>% filter(!REGION=="RIVER") %>% group_by(YEAR,MONTH,REGION) %>% summarize(tot.effort = sum(effort)) if(MONTH.STRUCTURE=="FOUR"){ can.irec.mod<- can.irec.mod %>% mutate(season = ifelse(MONTH<=3, "month.winter.2",""), season = ifelse(MONTH>=11 & MONTH<=12, "month.winter.1",season), season = ifelse(MONTH>=4 & MONTH<=5, "month.spring",season), season = ifelse(MONTH>=6 & MONTH<=7, "month.summer",season), season = ifelse(MONTH>=8 & MONTH<=10, "month.fall",season)) } if(MONTH.STRUCTURE=="SPRING"){ can.irec.mod<- can.irec.mod %>% mutate(season = ifelse(MONTH<=2, "month.winter.2",""), season = ifelse(MONTH>=11 & MONTH<=12, "month.winter.1",season), season = ifelse(MONTH>=3 & MONTH<=5, "month.spring",season), season = ifelse(MONTH>=6 & MONTH<=7, "month.summer",season), season = ifelse(MONTH>=8 & MONTH<=10, "month.fall",season)) } if(MONTH.STRUCTURE=="FRAM"){ can.irec.mod<- can.irec.mod %>% mutate(season = ifelse(MONTH<=1, "month.winter.2",""), season = ifelse(MONTH>=10 & MONTH<=12, "month.winter.1",season), season = ifelse(MONTH>=4 & MONTH<=5, "month.spring",season), season = ifelse(MONTH>=6 & MONTH<=7, "month.summer",season), season = ifelse(MONTH>=8 & MONTH<=10, "month.fall",season)) } can.irec.mod <- can.irec.mod %>% mutate(year.mod =YEAR,year.mod=ifelse(season=="month.winter.2",year.mod-1,year.mod), season=ifelse(season=="month.winter.1","month.winter",season), season=ifelse(season=="month.winter.2","month.winter",season)) can.irec.mod <- can.irec.mod %>% rename(area.code=REGION) can.irec.mod <- can.irec.mod %>% group_by(year.mod,area.code,season) %>% summarize(effort=sum(tot.effort)) can.irec.mod <- pivot_wider(can.irec.mod,id_cols = c("year.mod","area.code"), names_from = "season", values_from = "effort") # dcast(can.irec.mod,year.mod+area.code~season,value.var="effort",sum) can.irec.eff.fin <- left_join(data.frame(expand.grid(year.mod=YEARS.RECOVER,area.code=LOCATIONS$location.name)),can.irec.mod) can.irec.eff.fin <- can.irec.eff.fin %>% rename(year=year.mod) can.irec.eff.fin <- can.irec.eff.fin[,c("year","area.code",MONTH)] can.irec.eff.fin[is.na(can.irec.eff.fin)==T] <- 0 effort.can.irec <- can.irec.eff.fin ################################################################################### ################################################################################### # Combine the files and trim to match the Years span specified by the Master File #effort <- rbind(ca.or.wa.eff.rec,puso.eff.rec) effort <- rbind(ca.or.wa.eff.rec,puso.eff.rec,bc.eff.rec) #effort <- rbind(ca.or.wa.eff.rec,puso.eff.rec,sgeo.eff.rec,ak.eff.rec) temp<- expand.grid(year=YEARS.RECOVER,area.code=LOCATIONS$location.name) effort <- merge(effort,data.frame(year=YEARS.RECOVER)) effort <- merge(effort,temp,all=T) effort$area.numb <- LOCATIONS$location.number[match(effort$area.code,LOCATIONS$location.name)] effort <- effort[order(effort$area.numb,effort$year),] effort[is.na(effort == T)]<- 0 effort.can.irec$area.numb <- LOCATIONS$location.number[match(effort.can.irec$area.code,LOCATIONS$location.name)] effort.rec <- effort
a729fc86b928c4260e19e494422c058b0046c958
f17524c4609ca21b3bf05b17e2670031ebe2f136
/Vegetation Change/VegChangeAnalyses_Final.R
f0001430c6d3dcc61cded098e008ab79f92c15b0
[]
no_license
cliffbueno/Manuscripts
98edb62d9ccd70b98c8d31f4c9d6c0d4f8c8b348
27a11135599bab6c630132a6af87b134d01f1a7c
refs/heads/master
2023-04-11T05:14:39.090989
2023-03-22T00:46:59
2023-03-22T00:46:59
153,540,391
0
0
null
null
null
null
UTF-8
R
false
false
12,917
r
VegChangeAnalyses_Final.R
# Analysis of remote sensing data, ground truth data, and summer climate data, Niwot Ridge Colorado # By Cliff Bueno de Mesquita Fall 2015 - Spring 2018 # Original remote sensing by Luke Tillmann 2014 # Ground truthing by Cliff Bueno de Mesquita, Connor Bernard, Katherine Rosemond # Now published in Arctic, Antarctic, and Alpine Research ################################### Setup ################################################## library(leaps) library(bestglm) library(AICcmodavg) library(corrplot) library(randomForest) library(miscTools) library(ggplot2) library(MASS) library(spData) library(sp) library(spgwr) library(boot) library(modEvA) library(VSURF) library(maptools) library(robustbase) library(Rcpp) library(spdep) library(Matrix) library(GWmodel) library(TTR) library(quantmod) library(tseries) library(fracdiff) library(timeDate) library(forecast) library(ggplot2) library(nlme) source("~/Desktop/Functions/logisticPseudoR2s.R") setwd("~/Desktop/CU/2Research/VegChange") vc <- read.csv("VegChangeAll.csv") # What variables are too correlated with elevation? None! plot(vc$Elevation, vc$Aspect) plot(vc$Elevation, vc$Slope) plot(vc$Elevation, vc$Mean) plot(vc$Elevation, vc$CV) plot(vc$Elevation, vc$Trend) plot(vc$Elevation, vc$Solar) ggplot(vc, aes(Elevation, Mean)) + geom_smooth(method = loess) # Make continuous cover variable cover <- read.csv("Elev_Cover.csv") tcover <- cover[1:5,] treemodel <- lm(tcover$Tree~poly(tcover$Elevation,3,raw=TRUE)) summary(treemodel) scover <- cover[1:7,] shrubmodel <- lm(scover$Shrub~poly(scover$Elevation,4,raw=TRUE)) summary(shrubmodel) tundramodel <- lm(cover$Tundra~poly(cover$Elevation,4,raw=TRUE)) summary(tundramodel) third_order <- function(newdist, model) { coefs <- coef(model) res <- coefs[1] + (coefs[2] * newdist) + (coefs[3] * newdist^2) + (coefs[4] * newdist^3) return(res) } fourth_order <- function(newdist, model) { coefs <- coef(model) res <- coefs[1] + (coefs[2] * newdist) + (coefs[3] * newdist^2) + (coefs[4] * newdist^3) + (coefs[5] * newdist^4) return(res) } vc$Tree_Cover <- third_order(vc$Elevation, treemodel) for (i in 1:1532) { if (vc$Tree_Cover[i] < 0) { vc$Tree_Cover[i] <- 0 } if (vc$Elevation[i] > 3472) { vc$Tree_Cover[i] <- 0 } } vc$Shrub_Cover <- fourth_order(vc$Elevation, shrubmodel) for (i in 1:1532) { if (vc$Shrub_Cover[i] < 0) { vc$Shrub_Cover[i] <- 0 } if (vc$Elevation[i] > 3685) { vc$Shrub_Cover[i] <- 0 } } vc$Tundra_Cover <- fourth_order(vc$Elevation, tundramodel) for (i in 1:1532) { if (vc$Tundra_Cover[i] < 0) { vc$Tundra_Cover[i] <- 0 } } plot(vc$Elevation, vc$Tree_Cover) plot(vc$Elevation, vc$Shrub_Cover) plot(vc$Elevation, vc$Tundra_Cover) # Supplementary Figure A1 ggplot(vc, aes(x = Elevation, y = Tree_Cover)) + geom_line(aes(x=Elevation,y=Tree_Cover,colour="darkgreen")) + geom_line(aes(x=Elevation,y=Shrub_Cover,colour="lightgreen")) + geom_line(aes(x=Elevation,y=Tundra_Cover,colour="orange")) + ylim(0,100) + xlab("Elevation (m)") + ylab("Percent Cover") + scale_colour_manual(name = "Veg. Type", values = c("darkgreen","lightgreen","orange"), labels = c("Tree","Shrub","Tundra")) + theme_bw() + theme(axis.text.x = element_text(size = 14), axis.title.x = element_text(size = 16, face = "bold"), axis.text.y = element_text(size = 14), axis.title.y = element_text(size = 16, face = "bold")) # covers <- as.data.frame(cbind(vc$X, vc$Y, vc$Tree_Cover, vc$Shrub_Cover, vc$Tundra_Cover)) # write.csv(covers, file = "Covers.csv") # Subsets for tundra, shrub, open forest, absent in 1972 ta <- subset(vc, Cover_1972 != "T") sa <- subset(vc, Cover_1972 != "S") oa <- subset(vc, Cover_1972 != "O") loa <- subset(oa, Elevation < 3600) lsa <- subset(sa, Elevation < 3760) hta <- subset(ta, Elevation > 3550) ############################ Elevational Range ############################################# # Test for change in max and min, and in 95th percentile for each # All Forest f72 <- subset(vc, Cover_1972 == "O" | Cover_1972 == "C") f08 <- subset(vc, Cover_2008 == "O" | Cover_2008 == "C") min(f72$Elevation) max(f72$Elevation) quantile(f72$Elevation, c(0.05, 0.95)) min(f08$Elevation) max(f08$Elevation) quantile(f08$Elevation, c(0.05, 0.95)) # Closed Canopy Forest cf72 <- subset(vc, Cover_1972 == "C") cf08 <- subset(vc, Cover_2008 == "C") min(cf72$Elevation) max(cf72$Elevation) quantile(cf72$Elevation, c(0.05, 0.95)) min(cf08$Elevation) max(cf08$Elevation) quantile(cf08$Elevation, c(0.05, 0.95)) # Open Forest of72 <- subset(vc, Cover_1972 == "O") of08 <- subset(vc, Cover_2008 == "O") min(of72$Elevation) max(of72$Elevation) quantile(of72$Elevation, c(0.05, 0.95)) min(of08$Elevation) max(of08$Elevation) quantile(of08$Elevation, c(0.05, 0.95)) # Shrub s72 <- subset(vc, Cover_1972 == "S") s08 <- subset(vc, Cover_2008 == "S") min(s72$Elevation) max(s72$Elevation) quantile(s72$Elevation, c(0.05, 0.95)) min(s08$Elevation) max(s08$Elevation) quantile(s08$Elevation, c(0.05, 0.95)) # Tundra t72 <- subset(vc, Cover_1972 == "T") t08 <- subset(vc, Cover_2008 == "T") min(t72$Elevation) max(t72$Elevation) quantile(t72$Elevation, c(0.05, 0.95)) min(t08$Elevation) max(t08$Elevation) quantile(t08$Elevation, c(0.05, 0.95)) ################################### Best GLMs ############################################# # To test for best combo of fine-scale predictor variables of change # First look at correlations env <- vc[,c(3:8,27:29)] M <- cor(env) corrplot(M, method = "number", type = "lower") # Tree below 3600m X <- as.data.frame(scale(loa[,c(3:8,27)])) y <- loa$Oexpand Xy <- as.data.frame(cbind(X,y)) bestLOE <- bestglm(Xy, IC = "AIC", family = binomial) bestLOE # Cover, Elevation, Solar bestLOE$BestModels bestLOE <- glm(Oexpand ~ Tree_Cover + Elevation + Solar, family = binomial, data = loa) summary(bestLOE) logisticPseudoR2s(bestLOE) Dsquared(bestLOE, adjust = TRUE) bestLOECV<-cv.glm(data=loa,glmfit=bestLOE,K=10) bestLOECV$delta bestLOEAUC<-AUC(model=bestLOE) bestLOEAUC$AUC # Null AIC 318.88, AIC 278.27 min(loa$Tree_Cover) max(loa$Tree_Cover) min(loa$Elevation) max(loa$Elevation) min(loa$Solar) max(loa$Solar) # Shrub below 3760m lsa <- subset(sa, Elevation < 3760) X <- as.data.frame(scale(lsa[,c(3:8,28)])) y <- lsa$Sexpand Xy <- as.data.frame(cbind(X,y)) bestLSE <- bestglm(Xy, IC = "AIC", family = binomial) bestLSE # Cover, Elevation, Solar, Trend bestLSE$BestModels bestLSE <- glm(Sexpand ~ Shrub_Cover + Elevation + Solar + Trend, family = binomial, data = lsa) summary(bestLSE) logisticPseudoR2s(bestLSE) Dsquared(bestLSE, adjust = TRUE) bestLSECV<-cv.glm(data=lsa,glmfit=bestLSE,K=10) bestLSECV$delta bestLSEAUC<-AUC(model=bestLSE) bestLSEAUC$AUC # Null AIC 352.63, AIC 321.65 min(lsa$Shrub_Cover) max(lsa$Shrub_Cover) min(lsa$Elevation) max(lsa$Elevation) min(lsa$Solar) max(lsa$Solar) min(lsa$Trend) max(lsa$Trend) # Tundra above 3500m X <- as.data.frame(scale(hta[,c(3:8,29)])) y <- hta$Texpand Xy <- as.data.frame(cbind(X,y)) bestHTE <- bestglm(Xy, IC = "AIC", family = binomial) bestHTE # CV, Solar, Slope bestHTE$BestModels bestHTE <- glm(Texpand ~ CV + Solar + Slope, family = binomial, data = hta) summary(bestHTE) logisticPseudoR2s(bestHTE) Dsquared(bestHTE, adjust = TRUE) bestHTECV<-cv.glm(data=hta,glmfit=bestHTE,K=10) bestHTECV$delta bestHTEAUC<-AUC(model=bestHTE) bestHTEAUC$AUC # Null AIC 148.26, AIC 138.42 min(hta$CV) max(hta$CV) min(hta$Solar) max(hta$Solar) min(hta$Slope) max(hta$Slope) plot(vc$Mean, vc$CV) # Less snow is more variable cor <- cor.test(vc$Mean, vc$CV, method = "pearson") cor ############################### Random Forests ############################################ of.rf <- randomForest(as.factor(Oexpand) ~ Tree_Cover + Elevation + Solar + Slope + Mean + CV + Trend, data = loa, family = binomial(logit), ntree = 5000, importance = TRUE) of.rf importance(of.rf) varImpPlot(of.rf) sh.rf <- randomForest(as.factor(Sexpand) ~ Shrub_Cover + Elevation + Solar + Trend + Slope + Mean + CV, data = lsa, family = binomial(logit), ntree = 5000, importance = TRUE) sh.rf importance(sh.rf) varImpPlot(sh.rf) tu.rf <- randomForest(as.factor(Texpand) ~ CV + Solar + Slope + Tundra_Cover + Elevation + Mean + Trend, data = hta, family = binomial(logit), ntree = 5000, importance = TRUE) tu.rf importance(tu.rf) varImpPlot(tu.rf) ########################## Geographically Weighted Regression ############################## # With package GWmodel # Run model selection manually following the gwr.model.selection protocol # Then compare to the original bestglm model loa <- SpatialPointsDataFrame(cbind(loa$X,loa$Y), loa) DM <- gw.dist(dp.locat=cbind(loa$X,loa$Y)) bw.f2 <- bw.ggwr(Oexpand~Tree_Cover+Elevation+Solar,data=loa,dMat=DM, family ="binomial") res.binomial <- ggwr.basic(Oexpand~Tree_Cover+Elevation+Solar,bw=bw.f2, data=loa,dMat=DM,family ="binomial") res.binomial$GW.diagnostic lsa <- SpatialPointsDataFrame(cbind(lsa$X,lsa$Y), lsa) DM <- gw.dist(dp.locat=cbind(lsa$X,lsa$Y)) bw.f2 <- bw.ggwr(Sexpand~Shrub_Cover+Elevation+Solar+Trend,data=lsa,dMat=DM, family ="binomial") res.binomial <- ggwr.basic(Sexpand~Shrub_Cover+Elevation+Solar+Trend,bw=bw.f2, data=lsa,dMat=DM,family ="binomial") res.binomial$GW.diagnostic hta <- SpatialPointsDataFrame(cbind(hta$X,hta$Y), hta) DM <- gw.dist(dp.locat=cbind(hta$X,hta$Y)) bw.f2 <- bw.ggwr(Texpand~CV+Solar+Slope,data=hta,dMat=DM, family ="binomial") res.binomial <- ggwr.basic(Texpand~CV+Solar+Slope,bw=bw.f2, data=hta,dMat=DM,family ="binomial") res.binomial$GW.diagnostic ################################### Climate Data ########################################### # Analysis and Figure 2 d <- read.csv("Summer.csv") m <- lm(d$Summer_mean ~ d$Year) summary(m) # 1972 to 2008 da <- d[20:56,] m1 <- lm(da$Summer_mean ~ da$Year) summary(m1) time_series <- ts(da[,2:length(da)], start = 1972, end = 2008) m2 <- tslm(time_series ~ trend) summary(m2) # Figure 2 ggplot(da, aes(x=Year,y=Summer_mean)) + geom_point(size = 3) + scale_x_continuous(name = "Year", breaks = seq(1972,2008,4)) + scale_y_continuous(name = "Mean Summer Temp. (˚C)", breaks = seq(3,12,1), limits = c(3,11)) + geom_smooth(method=loess, color = "blue", fill = "blue", alpha = 0.1) + geom_smooth(method=lm, color = "red", "fill" = "red", alpha = 0.1) + theme_bw() + theme(legend.position="NULL", axis.title.x = element_text(face="bold", size = 18), axis.text.x = element_text(size = 16), axis.text.y = element_text(size = 14), axis.title.y = element_text(face="bold",size=18)) ############################ Bare to Tundra, Soil ########################################## bt <- read.csv("BareTundra.csv", header = TRUE) log <- bt[1:24,] log$Change <- c(0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1) BTlog<-glm(Change ~ Depth, family = binomial, data = log) summary(BTlog) # NSD BTlog<-glm(Change ~ pH, family = binomial, data = log) summary(BTlog) # NSD BTlog<-glm(Change ~ Bulk, family = binomial, data = log) summary(BTlog) # NSD ########################### Tundra to Shrub, Soil ########################################## ts <- read.csv("TundraShrub.csv") log <- as.data.frame(ts[1:33,]) log$Change <- c(1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0) TSlog<-glm(Change ~ Depth, family = binomial, data = log) summary(TSlog) # NSD TSlog<-glm(Change ~ pH, family = binomial, data = log) summary(TSlog) # NSD TSlog<-glm(Change ~ Bulk, family = binomial, data = log) summary(TSlog) # NSD ############################# Tundra to Forest, Soil ####################################### log <- read.csv("TundraForest.csv") # All Species TOlog<-glm(Change ~ Depth, family = binomial, data = log) summary(TOlog) # NSD TOlog<-glm(Change ~ pH, family = binomial, data = log) summary(TOlog) # NSD TOlog<-glm(Change ~ Bulk, family = binomial, data = log) summary(TOlog) # NSD # Limber Pine TOlog<-glm(Pine ~ Depth, family = binomial, data = log) summary(TOlog) # NSD TOlog<-glm(Pine ~ pH, family = binomial, data = log) summary(TOlog) # NSD TOlog<-glm(Pine ~ Bulk, family = binomial, data = log) summary(TOlog) # NSD # Englemann Spruce TOlog<-glm(Spruce ~ Depth, family = binomial, data = log) summary(TOlog) # NSD TOlog<-glm(Spruce ~ pH, family = binomial, data = log) summary(TOlog) # NSD TOlog<-glm(Spruce ~ Bulk, family = binomial, data = log) summary(TOlog) # Significant # Subalpine Fir TOlog<-glm(Fir ~ Depth, family = binomial, data = log) summary(TOlog) # NSD TOlog<-glm(Fir ~ pH, family = binomial, data = log) summary(TOlog) # NSD TOlog<-glm(Fir ~ Bulk, family = binomial, data = log) summary(TOlog) # NSD
c5592e69f0bf8bfc914abae660a0f889c89c6f92
639186029bc52a756bf395cda3cf70bc6b5ce309
/06-IndstrInc/01-import.R
da1f809937599e3014261415ccdce8b498dc6845
[]
no_license
Ravin515/R-Play
36c4e502625b6bf72175b092fe84c2b8fc5f69c3
02b012a5bff4eb961f108ee96d2638f4933321ad
refs/heads/master
2022-11-29T04:08:41.128438
2020-08-11T07:41:40
2020-08-11T07:43:33
109,561,205
0
2
null
2017-11-05T07:53:36
2017-11-05T07:53:35
null
UTF-8
R
false
false
843
r
01-import.R
library(data.table) library(dplyr) library(stringr) # a <- list.files(pattern = "*.csv") data <- data.table(year = list.files(pattern = "*.csv")) data[, csv := lapply(year, function(x) { fread(x, sep = ',', fill = T, encoding = "UTF-8", na.strings = "", integer64 = "integer64") })] flat.data <- data[, rbindlist(.SD[['csv']], fill = T, idcol = "year") ][, year := year + 2010] fwrite(flat.data, "2011-2013.csv") library(RODBC) mydb <- odbcDriverConnect("Driver={Microsoft Access Driver (*.mdb, *.accdb)}; DBQ=D:/code/r-play/07-indstrinc/2003.mdb") sqlTables(mydb, tableName = "qy03") res <- sqlFetch(mydb, "qy03") %>% as.data.table() library(DBI) library(odbc) cn <- dbConnect(odbc::odbc(), dsn = "2003") cn <- dbConnect(odbc::odbc(), DBQ = "D:/code/r-play/07-indstrinc/2003.mdb") cn <- dbConnect(drv = odbc::odbc(), dsn = "2003")
8e97a4594ab25ed5fb76f6d55af7f079e7935fef
57ed22671d2c348fe35c7832fd008c3a51de039c
/man/lea_prep.Rd
39ed67834b0b167c096019af59127f01a9b2040a
[ "MIT" ]
permissive
datalorax/leaidr
694c4d1d6d7773454673876d3c982d0db49f80b7
26f4672c98cae96a6ecc96e9c705890ed7a8ecb7
refs/heads/master
2022-11-20T13:38:44.794448
2020-07-27T21:42:36
2020-07-27T21:42:36
281,791,912
1
0
null
2020-07-22T22:02:47
2020-07-22T22:02:46
null
UTF-8
R
false
true
584
rd
lea_prep.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lea_prep.R \name{lea_prep} \alias{lea_prep} \title{Prep State- or Nation-Wide District Shapefile} \usage{ lea_prep(path = NULL, fips = NULL) } \arguments{ \item{path}{A character vector specifying a file path, such as: path = "./test".} \item{fips}{A character vector specifying a FIPS code for a state. A reference table is available \href{https://www.mcc.co.mercer.pa.us/dps/state_fips_code_listing.htm}{here}.} } \value{ A shapefile. } \description{ \code{lea_prep()} creates your desired shapefile. }
087e5ca23ac6ae8cdb98e356adeec776530a3a54
33b3195f06fcde4e95841c6b6db2ceeb2deb2737
/man/transmute.Rd
b0e4f86226822f6a3f466df0f98ce1f9448c7416
[]
no_license
serenity-r/tidylog
26dda5c3ef05e1e87cf2fe23702a679158517a7a
f9d569c75119bb01d52cbb517a1cb180480e906c
refs/heads/master
2020-04-21T17:12:54.823380
2019-02-07T06:47:05
2019-02-07T06:47:05
null
0
0
null
null
null
null
UTF-8
R
false
true
680
rd
transmute.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mutate.R \name{transmute} \alias{transmute} \alias{transmute_all} \alias{transmute_if} \alias{transmute_at} \title{Wrapper around dplyr::transmute and related functions that prints information about the operation} \usage{ transmute(.data, ...) transmute_all(.data, ...) transmute_if(.data, ...) transmute_at(.data, ...) } \arguments{ \item{.data}{a tbl; see \link[dplyr:mutate]{transmute}} \item{...}{see \link[dplyr:mutate]{transmute}} } \value{ see \link[dplyr:mutate]{transmute} } \description{ Wrapper around dplyr::transmute and related functions that prints information about the operation }
914449f641473436929fe068b880f7589da25095
fdc6d2044b02501ea04743b01bc143c2275ac010
/R/cleanInvalidWhen.R
9ae9a6401a4f2758a6322006c157003c39dfd3c8
[]
no_license
fjbaron/accelerator
ad964ae4c3d5afce2a87a6d9dc1bd6c654702bed
b0da822c6150c9255f7f7551fa799b67042f0516
refs/heads/master
2023-06-25T02:51:56.447351
2023-06-13T08:14:58
2023-06-13T08:14:58
220,418,840
0
0
null
null
null
null
UTF-8
R
false
false
318
r
cleanInvalidWhen.R
#' Title #' #' @param when #' @param invalidWhen #' #' @return #' @export #' #' @examples cleanInvalidWhen=function(when,invalidWhen){ invalidWhen=invalidWhen %>% rename(label=when) when %>% map(~ .x %>% left_join(invalidWhen,by=c("day","label")) %>% filter(is.na(reason)) %>% select(-reason) ) }
20fe236cdcd461760dced09d184fe6873852d341
17b6dbd8acf2ce8556684754dc5f48a9373f7c96
/man/optimize_numRegions.Rd
88042ac147aa445e18cc549ab93edb1401609bbd
[]
no_license
leekgroup/phenopredict
f53a517c670a9670041825c79456874367d92327
ab34f6ca3c0aeb90d2c672837175d7a13c308ca5
refs/heads/master
2021-09-06T23:59:31.970592
2018-02-13T19:24:14
2018-02-13T19:24:14
66,372,434
16
3
null
null
null
null
UTF-8
R
false
true
2,442
rd
optimize_numRegions.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/optimize_numRegions.R \name{optimize_numRegions} \alias{optimize_numRegions} \title{Optimize number of regions used for prediction} \usage{ optimize_numRegions(inputdata = NULL, phenodata = NULL, phenotype = NULL, covariates = NULL, type = NULL, numRegions_set = c(10, 20, 30, 60, 80, 100, 150, 200)) } \arguments{ \item{inputdata}{output from filter_regions() \code{inputdata}} \item{phenodata}{data set with phenotype information; samples in rows, variables in columns \code{phenodata}} \item{phenotype}{phenotype of interest \code{phenotype}} \item{covariates}{Which covariates to include in model \code{covariates}} \item{type}{The class of the phenotype of interest (numeric, binary, factor) \code{type}} \item{numRegions_set}{set of numRegions to test \code{numRegions_set}} } \value{ Prediction accuracies across each numRegions argument tested } \description{ This function takes a list of possible options for the numRegions argument of build_predictor. Using this set of possible numRegions and expression data (the training data / output of filter_regions()), this function builds a predictor for each possible numRegions. Prediction accuracy is then calculated across varying numbers of regions. The numRegions argument that optimizes accuracy in the training data can then be used in build_predictor. } \examples{ library('GenomicRanges') library('dplyr') ## Make up some some region data regions <- GRanges(seqnames = 'chr2', IRanges( start = c(28971710:28971712, 29555081:29555083, 29754982:29754984), end = c(29462417:29462419, 29923338:29923340, 29917714:29917716))) ## make up some expression data for 9 rows and 30 people data(sysdata, package='phenopredict') ## includes R object 'cm' exp= cm[1:length(regions),1:30] ## generate some phenotype information sex = as.data.frame(rep(c("male","female"),each=15)) age = as.data.frame(sample(1:100,30)) pheno = dplyr::bind_cols(sex,age) colnames(pheno) <- c("sex","age") ## filter regions to be used to build the predictor inputdata <- filter_regions(expression=exp, regiondata=regions, phenodata=pheno, phenotype="sex", covariates=NULL, type="factor", numRegions=5) regnum <- optimize_numRegions(inputdata=inputdata ,phenodata=pheno, phenotype="sex", covariates=NULL,type="factor",numRegions_set=c(3,5)) } \keyword{optimization} \keyword{phenotype,} \keyword{prediction,}
ceda7a6f790b40c8cc1c845cffd261f2bb6beaf2
f860a2ddbebe96ad25f2347823d1ad31a5ae949e
/R/inclass/class_15.R
d8e83cd3406051d24c2cac7ebefc5e29a2d4fee6
[ "MIT" ]
permissive
mespe/STS198
edd0e966a329b8701048e2c8371a57b0a261f2fa
4dd8184e67689ff9d0af3dab1813973e46f59df3
refs/heads/master
2021-01-22T18:51:06.426391
2017-09-15T23:10:34
2017-09-15T23:10:34
85,125,705
0
0
null
null
null
null
UTF-8
R
false
false
2,280
r
class_15.R
# Class 15 # STS 198 # 22 May 2017 library(ggplot2) ################################################################################ # Cleaning data load(url("https://github.com/mespe/STS198/blob/master/data/auto_posts.rda?raw=true")) # load("../../data/auto_posts.rda") # Starting with price column: head(posts$price) # The price inculdes "$", which is going to cause issues # if we just use as.numeric() # We can use gsub() to get rid of the "$" # gsub stands for global subsitution # This does not work - "$" is a special character price_new = gsub("$", "", posts$price) # To search/replace for special chars $^.*+?[()]\ price_new = gsub("\\$", "", posts$price) # Have to escape these using a double "\" gsub(pattern = "\\\ \\$", replacement = "BOB", x = "\ $") # This introduces NAs - We need to check those out price_new = as.numeric(price_new) # Turns out there are some long strings in the price column # These are not going to convert well long_price = nchar(posts$price) > 8 table(long_price) posts$price[long_price] # Looks like we are OK replacing the long ones with NAs # Here is a test: x = c("$353", "353", "2011 MAZDA") as.numeric(x) # Lets look at the results table(price_new, useNA = "ifany") # Now save the price as the price column posts$price = price_new # Plotting is helpful to check out extreme values ggplot(posts, aes(y = price, x = condition)) + geom_boxplot() + ylim(c(0,100000)) + facet_wrap(~maker) # We can also subset to look at the values subset(posts, posts$maker == "maserati") subset(posts, posts$price > 1e6) summary(posts$price) ################################################################################ # Using lapply/sapply to go over multiple columns at once # This would be helpful if there were more than one columns with "$" tmp = sapply(posts[,c("price", "maker")], function(x) gsub("\\$","",x)) ################################################################################ # Subsetting with the [] in 2 dimensions # data[rows,cols] # First row posts[1,] # First column posts[,1] # Value in first row, first column posts[1,1] # Can subset by name posts["posted3625",] posts[,"price"] # cat() converts the \n to a newline # Making the body easier to read cat(posts$body[1])
446692c38bfc012379694967e8f232cb48fac568
fbe57536cc2d84e69a5bf799c88fcb784e853558
/man/unitconversion.pint.to.fluid.ounce.Rd
9fbbdf5160d0493d028baa89a27aef340b19c692
[ "MIT" ]
permissive
burrm/lolcat
78edf19886fffc02e922b061ce346fdf0ee2c80f
abd3915791d7e63f3827ccb10b1b0895aafd1e38
refs/heads/master
2023-04-02T11:27:58.636616
2023-03-24T02:33:34
2023-03-24T02:33:34
49,685,593
5
2
null
2016-10-21T05:14:49
2016-01-15T00:56:55
R
UTF-8
R
false
true
562
rd
unitconversion.pint.to.fluid.ounce.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/unitconversion.pint.to.fluid.ounce.R \name{unitconversion.pint.to.fluid.ounce} \alias{unitconversion.pint.to.fluid.ounce} \alias{unitconversion.pt.to.fl.oz} \title{Unit Conversion - Liquid Volume - Pint to Fluid Ounce} \usage{ unitconversion.pint.to.fluid.ounce(x = 1) unitconversion.pt.to.fl.oz(x = 1) } \arguments{ \item{x}{Vector - Values in units of pints} } \value{ x, but converted to fluid ounces } \description{ Performs a conversion of volumes from pints to fluid ounces. }
3ff0efc62797b46f17b33dd1b6df559ecf9df672
7fa82a4eef53ed2c3260fbdff44d26e545f92e80
/netplot_whole_trn.R
12cb78e360f7d077336a754ad7b07d4bbaf1d203
[]
no_license
SocialBiologyGroupWesternU/Gene-Context
54a6c165b22ffb385de09e8c3fc31cf417af251a
0da3ed9f3f264b9534871a3800ddcc0f29a951b2
refs/heads/master
2022-12-15T02:07:26.963913
2020-09-18T18:59:27
2020-09-18T18:59:27
275,677,745
0
0
null
null
null
null
UTF-8
R
false
false
2,354
r
netplot_whole_trn.R
library("igraph") library("tidyverse") library("RColorBrewer") # Constants ============================================================ read_from <- "read/updatedTRN.txt" # Edgelist file write_to <- "write/figure/netplot/whole_trn_kk_layout.pdf" # Figure file # Helper functions ============================================================ alpha <- function(edge){ incident_vertices <- ends(trn, edge) vertex1_deg <- trn %>% degree(v=incident_vertices[1,1]) vertex2_deg <- trn %>% degree(v=incident_vertices[1,2]) ifelse(vertex1_deg >= vertex2_deg, vertex1_deg, vertex2_deg) %>% magrittr::multiply_by(255/max_degree) %>% trunc() %>% as.hexmode() %>% format(width=2, upper.case = TRUE) %>% as.character() } colour <- function(edge){ for (i in 1:length(clusters)){ if (edge %in% as_ids(E(clusters[[i]]))) return(colours[i]) } return(NULL) } #Load and organize the data ============================================================ edgelist <- scan(read_from, skip = 1, what = "character") %>% matrix(ncol=2,byrow=TRUE) trn <- edgelist %>% graph_from_edgelist(directed = FALSE) trn_clustered <- trn %>% cluster_fast_greedy(weights=NULL) clusters <- trn_clustered %>% communities() %>% map(function(cluster) induced_subgraph(trn, cluster)) colours <- brewer.pal(length(clusters),"Set3") max_degree <- trn %>% degree() %>% max() #Add visual attributes to trn then plot ============================================================ trn_with_attr <- trn %>% set_vertex_attr("label", value="") %>% set_vertex_attr("size", value= trn %>% degree(normalized=TRUE) %>% magrittr::multiply_by(30) ) %>% set_vertex_attr("color", value= trn_clustered %>% membership() %>% as.integer() %>% map_chr(function(cluster_num) colours[cluster_num]) ) %>% set_vertex_attr("frame.color", value=NA) %>% set_edge_attr("color", value= trn %>% E() %>% as_ids() %>% map_chr(function(edge){ edge_colour <- colour(edge) if(!is.null(edge_colour)) paste(edge_colour,alpha(edge), sep="") else NA } ) ) %>% set_edge_attr("width", value=.5) %>% set_graph_attr("layout", layout_with_kk) pdf(file=write_to) trn_with_attr %>% plot() %>% print() dev.off()
6dc1eb5615079f5871e1a96549380489eaca45a4
4a9ce5ff6971a0f3b340af1ddbbf970ad921aa88
/Modeling.R
55576702f42aa6e835aef292050d91270c832a56
[]
no_license
MarcSchneble/OnStreetParking
0d7e8af3f82787677b62bf1f2023e56c41b03614
07ed62a5a8b3de06504389259d56b8812641f6bb
refs/heads/master
2023-07-04T23:41:15.113434
2021-08-02T18:37:52
2021-08-02T18:37:52
392,055,939
0
0
null
null
null
null
UTF-8
R
false
false
19,015
r
Modeling.R
# set current path to this file setwd(dirname(rstudioapi::getActiveDocumentContext()$path)) # clear the workspace and restart session rm(list = ls()) # sets local system language to English Sys.setlocale("LC_ALL","English") source("Functions.R") library(dplyr) library(lubridate) library(ggplot2) library(survival) library(grDevices) library(pROC) library(pracma) library(readr) library(spatstat) library(readxl) library(scales) library(frailtypack) library(mgcv) library(PRROC) # read parking data ---- G <- readRDS("Data/network.rds") intens.G <- density.lpp(unmark(G), sigma = 50) data <- read_rds("Data/data_2019_clean.rds") %>% filter(StreetMarker %in% levels(G$data$marks), same.day == 1, irregular == 0, DurationMinutes2 > 0, h.start >= 8, h.start < 20, d.start >= 91, d.start <= 191) %>% mutate(SideOfStreetCode = factor(SideOfStreetCode, levels = unique(SideOfStreetCode)), StreetMarker = factor(StreetMarker, levels = levels(G$data$marks)), State = State - 1) G$data <- G$data[G$data$marks %in% data$StreetMarker] G$data$marks <- factor(G$data$marks, levels = unique(G$data$marks)) data$StreetMarker <- factor(data$StreetMarker, levels = levels(G$data$marks)) # distance matrix N <- length(levels(data$StreetMarker)) distance <- matrix(NA, N, N) for (i in 1:N) { G.i <- G G.i$data <- G$data[i, ] fundist <- distfun(G.i) distance[i, ] <- fundist(G) } #source("occupancy.R") data <- read_rds("Data/data_frac.rds") data$frac0[which(is.na(data$frac0))] <- 0 data$frac1[which(is.na(data$frac1))] <- 0 # model Lonsdale east ---- data.lonsdale.east <- filter(data, StreetName == "LONSDALE STREET", BetweenStreet1 == "RUSSELL STREET" | BetweenStreet1 == "EXHIBITION STREET", BetweenStreet2 == "EXHIBITION STREET" | BetweenStreet2 == "SPRING STREET") %>% mutate(SideOfStreetCode = factor(SideOfStreetCode, levels = unique(SideOfStreetCode))) sm <- smooth.construct.ps.smooth.spec(s(m.start), data = data.lonsdale.east, knots = NULL) X <- sm$X X <- sweep(X, 2, colMeans(X))[, -1] Z <- as.data.frame(X) colnames(Z) <- paste0("m", 1:ncol(X)) data.lonsdale.east <- bind_cols(data.lonsdale.east, Z) ind.m <- match(unique(data.lonsdale.east$m.start), data.lonsdale.east$m.start) x <- data.lonsdale.east$m.start[ind.m] model.weibull.0 <- survreg(Surv(pmin(DurationMinutes2, 60), 1*(DurationMinutes2 <= 60)) ~ weekday + frailty(StreetMarker) + SideOfStreetCode + frac1 + m1 + m2 + m3 + m4 + m5 + m6 + m7 + m8 + m9, dist = "weibull", data = data.lonsdale.east %>% filter(State == 0), score = TRUE) par.m <- as.numeric(tail(model.weibull.0$coefficients, ncol(X))) x <- data.lonsdale.east$m.start[ind.m] y <- as.vector(X[ind.m, ]%*%par.m) limits <- smoothConfidence(par.m, model.weibull.0$var[11:19, 11:19], X[ind.m, ]) df <- tibble(x = x, y = -y/model.weibull.0$scale, y.lower = -limits$lower/model.weibull.0$scale, y.upper = -limits$upper/model.weibull.0$scale) g0 <- ggplot(df) + geom_line(aes(x = x, y = y), col = "red") + geom_ribbon(aes(x = x, ymin = y.lower, ymax = y.upper), alpha = 0.5) + theme_bw() + scale_x_continuous(breaks = seq(360, 1200, 120), labels = c("6am", "8am", "10am", "12pm", "2pm", "4pm", "6pm", "8pm")) + labs(x = expression(paste(hour[t])), y = expression(paste(g[4*","*0](hour[t]))), parse = TRUE) + scale_y_continuous(limits = c(-0.8, 1)) pdf(file = "Plots/hour0.pdf", width = 4.5, height = 3) print(g0) dev.off() model.weibull.1 <- survreg(Surv(pmin(DurationMinutes2, 60), 1*(DurationMinutes2 <= 60)) ~ weekday + frailty(StreetMarker) + SideOfStreetCode + frac0 + m1 + m2 + m3 + m4 + m5 + m6 + m7 + m8 + m9, dist = "weibull", data = data.lonsdale.east %>% filter(State == 1), score = TRUE) par.m <- as.numeric(tail(model.weibull.1$coefficients, ncol(X))) y <- as.vector(X[ind.m, ]%*%par.m) limits <- smoothConfidence(par.m, model.weibull.1$var[11:19, 11:19], X[ind.m, ]) df <- tibble(x = x, y = -y/model.weibull.1$scale, y.lower = -limits$lower/model.weibull.1$scale, y.upper = -limits$upper/model.weibull.1$scale) g1 <- ggplot(df) + geom_line(aes(x = x, y = y), col = "red") + geom_ribbon(aes(x = x, ymin = y.lower, ymax = y.upper), alpha = 0.5) + theme_bw() + scale_x_continuous(breaks = seq(360, 1200, 120), labels = c("6am", "8am", "10am", "12pm", "2pm", "4pm", "6pm", "8pm")) + labs(x = expression(paste(hour[t])), y = expression(paste(g[4*","*1](hour[t]))), parse = TRUE) + scale_y_continuous(limits = c(-0.8, 1)) pdf(file = "Plots/hour1.pdf", width = 4.5, height = 3) print(g1) dev.off() # prediction ---- R <- 100 dur <- 10 time <- as.POSIXct("2019-06-01 10:00:00", tz = "Australia/Melbourne") observed <- prediction.det <- prediction.exp <- prediction.weibull <- prediction.weibull.wostar <- NULL for (r in 1:R) { time.r <- time + days(sample(0:29, 1)) + seconds(runif(1, 0, 60*120)) # simulate data point and compute distance function P <- rlpp(1, intens.G) fundist <- distfun(P) dist <- fundist(G) # data for distance ind.distance <- which(dist <= 300) markers.distance <- levels(data$StreetMarker)[ind.distance] ind.fit <- which(dist <= 250) if (length(ind.fit) > 10) { print(r) markers.fit <- levels(data$StreetMarker)[ind.fit] pre <- get_occupancy2(time.r, data, ind.distance, duration = TRUE) post <- get_occupancy2(time.r + minutes(dur), data, ind.fit, duration = FALSE) # fit models data.r <- data %>% filter(StreetMarker %in% markers.fit, difftime(time.r, DepartureTime, units = "days") > 0, difftime(time.r, DepartureTime, units = "days") < 30) %>% mutate(SideOfStreetCode = factor(SideOfStreetCode, levels = unique(SideOfStreetCode)), StreetMarker = factor(StreetMarker, levels = unique(StreetMarker))) data0 <- data.r %>% filter(State == 0) data1 <- data.r %>% filter(State == 1) markers <- intersect(unique(data0$StreetMarker), unique(data1$StreetMarker)) data0 <- data0 %>% filter(StreetMarker %in% markers) %>% mutate(SideOfStreetCode = factor(SideOfStreetCode, levels = unique(SideOfStreetCode)), StreetMarker = factor(StreetMarker, levels = unique(StreetMarker))) data1 <- data1 %>% filter(StreetMarker %in% markers) %>% mutate(SideOfStreetCode = factor(SideOfStreetCode, levels = unique(SideOfStreetCode)), StreetMarker = factor(StreetMarker, levels = unique(StreetMarker))) fmla0 <- Surv(pmin(DurationMinutes2, 60), 1*(DurationMinutes2 <= 60)) ~ weekday + frailty(StreetMarker) + factor(h.start) + frac1 if (length(unique(data0$SideOfStreetCode)) > 1) { fmla0 <- update(fmla0, ~ .+ SideOfStreetCode) } fmla1 <- Surv(pmin(DurationMinutes2, 60), 1*(DurationMinutes2 <= 60)) ~ weekday + frailty(StreetMarker) + factor(h.start) + frac1 if (length(unique(data1$SideOfStreetCode)) > 1) { fmla1 <- update(fmla1, ~ .+ SideOfStreetCode) } model.exp.0 <- survreg(fmla0, dist = "exponential", data = data0, score = TRUE) model.exp.1 <- survreg(fmla1, dist = "exponential", data = data1, score = TRUE) model.weibull.0 <- survreg(fmla0, dist = "weibull", data = data0, score = TRUE) model.weibull.1 <- survreg(fmla1, dist = "weibull", data = data1, score = TRUE) # get parameters from the model par.exp.0 <- get_par2(time.r, model.exp.0, data0, pre, 0, distance) par.exp.1 <- get_par2(time.r, model.exp.1, data1, pre, 1, distance) par.weibull.0 <- get_par2(time.r, model.weibull.0, data0, pre, 0, distance) par.weibull.1 <- get_par2(time.r, model.weibull.1, data1, pre, 1, distance) # predict occupancy occupancy.pre <- filter(pre, marker %in% markers)$occupancy occupancy.post <- filter(post, marker %in% markers)$occupancy d0 <- filter(pre, marker %in% markers)$duration ind.prediction <- which(is.element(occupancy.pre, c(0, 1)) & is.element(occupancy.post, c(0, 1))) prediction.exp <- c(prediction.exp, get_prediction_exp(occupancy.pre, par.exp.0, par.exp.1, dur)[ind.prediction]) prediction.weibull <- c(prediction.weibull, get_prediction_weibull(occupancy.pre, d0, par.weibull.0, par.weibull.1, dur)[ind.prediction]) d0 <- rep(0, length(d0)) prediction.weibull.wostar <- c(prediction.weibull.wostar, get_prediction_weibull(occupancy.pre, d0, par.weibull.0, par.weibull.1, dur)[ind.prediction]) prediction.det <- c(prediction.det, 1 - occupancy.pre[ind.prediction]) observed <- c(observed, occupancy.post[ind.prediction]) } } roc.exp <- pROC::roc(observed, prediction.exp, levels = c(1, 0)) roc.weibull <- pROC::roc(observed, prediction.weibull, levels = c(1, 0)) roc.weibull.wostar <- pROC::roc(observed, prediction.weibull.wostar, levels = c(1, 0)) roc.check <- pROC::roc(observed, prediction.det, levels = c(1, 0)) df.exp <- tibble(x = roc.exp$specificities, y = roc.exp$sensitivities, kind = "exp") df.weibull <- tibble(x = roc.weibull$specificities, y = roc.weibull$sensitivities, kind = "weibull") df.weibull.wostar <- tibble(x = roc.weibull.wostar$specificities, y = roc.weibull.wostar$sensitivities, kind = "weibullwostar") df.check <- tibble(x = roc.check$specificities, y = roc.check$sensitivities, kind = "check") df.random <- tibble(x = c(0, 1), y = c(1, 0), kind = "random") df <- bind_rows(df.weibull, df.weibull.wostar, df.exp) %>% mutate(kind = factor(kind, levels = c("weibull", "weibullwostar", "exp"))) g <- ggplot() + geom_ribbon(data = df %>% filter(kind == "weibull"), aes(x = 1-x, ymin = 0, ymax = y), alpha = 0.2) + geom_line(data = df, aes(x = 1-x, y = y, color = kind, linetype = kind)) + scale_color_hue(name = "Predictor", labels = c(paste0("Semi-Markov\n(AUC = ", round(roc.weibull$auc, 3), ")"), paste0("Markov\n(AUC = ", round(roc.exp$auc, 3), ")"), paste0("Random\n(AUC = 0.5)"))) + labs(x = "Specifciity", y = "Senisitivity", linetype = "Predictor") + theme_bw() + theme(legend.position = "bottom", legend.key.width = unit(0.8, "cm")) + scale_x_continuous(breaks = seq(0, 1, 0.2), labels = seq(1, 0, -0.2)) + scale_y_continuous(breaks = seq(0, 1, 0.2)) + scale_linetype_manual(labels = c(paste0("Semi-Markov\n(AUC = ", round(roc.weibull$auc, 3), ")"), paste0("Markov\n(AUC = ", round(roc.exp$auc, 3), ")"), paste0("Random\n(AUC = 0.5)")), values = c(1, 3, 5)) pdf(file = "Plots/roc_30min_afternoon.pdf", width = 5.5, height = 5.5) print(g) dev.off() # modeling with different prediction horizons ---- dur <- seq(5, 60, 5) AUC <- matrix(0, length(dur), 3) R <- 100 daytime <- 10 for (d in 1:length(dur)) { time <- as.POSIXct("2019-06-01", tz = "Australia/Melbourne") + hours(daytime) observed <- prediction.det <- prediction.exp <- prediction.weibull <- prediction.weibull.wostar <- NULL for (r in 1:R) { time.r <- time + days(sample(0:29, 1)) + seconds(runif(1, 0, 60*120)) # simulate data point and compute distance function P <- rlpp(1, intens.G) fundist <- distfun(P) dist <- fundist(G) # data for distance ind.distance <- which(dist <= 300) markers.distance <- levels(data$StreetMarker)[ind.distance] ind.fit <- which(dist <= 250) if (length(ind.fit) > 10) { print(r) markers.fit <- levels(data$StreetMarker)[ind.fit] pre <- get_occupancy2(time.r, data, ind.distance, duration = TRUE) post <- get_occupancy2(time.r + minutes(dur[d]), data, ind.fit, duration = FALSE) # fit models data.r <- data %>% filter(StreetMarker %in% markers.fit, difftime(time.r, DepartureTime, units = "days") > 0, difftime(time.r, DepartureTime, units = "days") < 30) %>% mutate(SideOfStreetCode = factor(SideOfStreetCode, levels = unique(SideOfStreetCode)), StreetMarker = factor(StreetMarker, levels = unique(StreetMarker))) data0 <- data.r %>% filter(State == 0) data1 <- data.r %>% filter(State == 1) markers <- intersect(unique(data0$StreetMarker), unique(data1$StreetMarker)) data0 <- data0 %>% filter(StreetMarker %in% markers) %>% mutate(SideOfStreetCode = factor(SideOfStreetCode, levels = unique(SideOfStreetCode)), StreetMarker = factor(StreetMarker, levels = unique(StreetMarker))) data1 <- data1 %>% filter(StreetMarker %in% markers) %>% mutate(SideOfStreetCode = factor(SideOfStreetCode, levels = unique(SideOfStreetCode)), StreetMarker = factor(StreetMarker, levels = unique(StreetMarker))) fmla0 <- Surv(pmin(DurationMinutes2, 60), 1*(DurationMinutes2 <= 60)) ~ weekday + frailty(StreetMarker) + factor(h.start) + frac1 if (length(unique(data0$SideOfStreetCode)) > 1) { fmla0 <- update(fmla0, ~ .+ SideOfStreetCode) } fmla1 <- Surv(pmin(DurationMinutes2, 60), 1*(DurationMinutes2 <= 60)) ~ weekday + frailty(StreetMarker) + factor(h.start) + frac1 if (length(unique(data1$SideOfStreetCode)) > 1) { fmla1 <- update(fmla1, ~ .+ SideOfStreetCode) } model.exp.0 <- survreg(fmla0, dist = "exponential", data = data0, score = TRUE) model.exp.1 <- survreg(fmla1, dist = "exponential", data = data1, score = TRUE) model.weibull.0 <- survreg(fmla0, dist = "weibull", data = data0, score = TRUE) model.weibull.1 <- survreg(fmla1, dist = "weibull", data = data1, score = TRUE) # get parameters from the model par.exp.0 <- get_par2(time.r, model.exp.0, data0, pre, 0, distance) par.exp.1 <- get_par2(time.r, model.exp.1, data1, pre, 1, distance) par.weibull.0 <- get_par2(time.r, model.weibull.0, data0, pre, 0, distance) par.weibull.1 <- get_par2(time.r, model.weibull.1, data1, pre, 1, distance) # predict occupancy occupancy.pre <- filter(pre, marker %in% markers)$occupancy occupancy.post <- filter(post, marker %in% markers)$occupancy d0 <- filter(pre, marker %in% markers)$duration ind.prediction <- which(is.element(occupancy.pre, c(0, 1)) & is.element(occupancy.post, c(0, 1))) prediction.exp <- c(prediction.exp, get_prediction_exp(occupancy.pre, par.exp.0, par.exp.1, dur[d])[ind.prediction]) prediction.weibull <- c(prediction.weibull, get_prediction_weibull(occupancy.pre, d0, par.weibull.0, par.weibull.1, dur[d])[ind.prediction]) prediction.weibull.wostar <- c(prediction.weibull.wostar, get_prediction_weibull(occupancy.pre, d0, par.weibull.0, par.weibull.1, dur[d], star = FALSE)[ind.prediction]) prediction.det <- c(prediction.det, 1 - occupancy.pre[ind.prediction]) observed <- c(observed, occupancy.post[ind.prediction]) } } roc.exp <- pROC::roc(observed, prediction.exp, levels = c(1, 0)) roc.weibull <- pROC::roc(observed, prediction.weibull, levels = c(1, 0)) roc.weibull.wostar <- pROC::roc(observed, prediction.weibull.wostar, levels = c(1, 0)) roc.check <- pROC::roc(observed, prediction.det, levels = c(1, 0)) #AUC[d, ] <- c(roc.weibull$auc, roc.weibull.wostar$auc, roc.exp$auc) #saveRDS(AUC, file = "AUC.rds") df.exp <- tibble(x = roc.exp$specificities, y = roc.exp$sensitivities, kind = "exp") df.weibull <- tibble(x = roc.weibull$specificities, y = roc.weibull$sensitivities, kind = "weibull") df.weibull.wostar <- tibble(x = roc.weibull.wostar$specificities, y = roc.weibull.wostar$sensitivities, kind = "weibullwostar") df.check <- tibble(x = roc.check$specificities, y = roc.check$sensitivities, kind = "check") df.random <- tibble(x = c(0, 1), y = c(1, 0), kind = "random") df <- bind_rows(df.weibull, df.weibull.wostar, df.exp) %>% mutate(kind = factor(kind, levels = c("weibull", "weibullwostar", "exp"))) g <- ggplot() + geom_ribbon(data = df %>% filter(kind == "weibull"), aes(x = 1-x, ymin = 0, ymax = y), alpha = 0.2) + geom_line(data = df, aes(x = 1-x, y = y, color = kind, linetype = kind)) + scale_color_hue(name = "Predictor", labels = c(paste0("Semi-Markov\n(state space S*,\nAUC = ", round(roc.weibull$auc, 3), ")"), paste0("Semi-Markov\n(state space S,\nAUC = ", round(roc.weibull.wostar$auc, 3), ")"), paste0("Markov\n(state space S,\nAUC = ", round(roc.exp$auc, 3), ")"))) + labs(x = "Specifciity", y = "Senisitivity", linetype = "Predictor") + theme_bw() + theme(legend.position = "bottom", legend.key.width = unit(0.8, "cm")) + scale_x_continuous(breaks = seq(0, 1, 0.2), labels = seq(1, 0, -0.2)) + scale_y_continuous(breaks = seq(0, 1, 0.2)) + scale_linetype_manual(labels = c(paste0("Semi-Markov\n(state space S*,\nAUC = ", round(roc.weibull$auc, 3), ")"), paste0("Semi-Markov\n(state space S,\nAUC = ", round(roc.weibull.wostar$auc, 3), ")"), paste0("Markov\n(state space S,\nAUC = ", round(roc.exp$auc, 3), ")")), values = c(1, 3, 5)) + geom_line(data = tibble(x = c(0, 1), y = c(0, 1)), aes(x = x, y = y)) pdf(file = paste0("Plots/roc_", dur[d], "min_daytime", daytime, ".pdf"), width = 5.5, height = 5.5) print(g) dev.off() } AUC <- readRDS("AUC.rds") df <- tibble(duration = rep(dur, 3), AUC = as.vector(AUC), predictor = rep(c("weibull", "weibullwostar", "exp"), each = length(dur))) %>% mutate(predictor = factor(predictor, levels = c("weibull", "weibullwostar", "exp"))) g <- ggplot(df, aes(x = duration, y = AUC, color = predictor)) + geom_point() + geom_line(aes(linetype = predictor)) + theme_bw() + scale_y_continuous(limits = c(0, 1), breaks = seq(0, 1, 0.2)) + scale_x_continuous(breaks = seq(0, 60, 10)) + labs(x = "Prediction horizon (in minutes)", color = "Predictor", linetype = "Predictor") + scale_linetype_manual(labels = c(paste0("Semi-Markov (state space S*)"), paste0("Semi-Markov (state space S)"), paste0("Markov (State space S)")), values = c(1, 3, 5)) + scale_color_hue(labels = c(paste0("Semi-Markov (state space S*)"), paste0("Semi-Markov (state space S)"), paste0("Markov (State space S)"))) + geom_hline(yintercept = 0.5) pdf(file = paste0("Plots/AUC.pdf"), width = 8, height = 4) print(g) dev.off()
7d8163f4e1e555f607fb905f3053dbf81b32ead1
5c542ee6a12a4637236ee876da7eb6c42667b211
/analysis-functions.R
e80b02771688d695483ab67368946687dd79f481
[]
no_license
jeffeaton/epp-spectrum
cecacd42c0f926d681bf3f3d9d9e6e21dd28e9a3
b525b52e05ddbd914ec6d9c7095a52a512419929
refs/heads/master
2021-01-10T21:13:09.377733
2015-05-27T09:20:12
2015-05-27T09:20:12
19,249,007
2
0
null
null
null
null
UTF-8
R
false
false
2,372
r
analysis-functions.R
############################ #### Spline functions #### ############################ proj.steps <- seq(1, 43.5, 0.1) # 1970:2012 t0 <- 6 dt <- 0.1 numSplineSteps <- sum(proj.steps >= t0) numSplines <- 7 library(splines) fnBSpline <- function(u, q = numSplines, m = 2){ x <- seq(1,numSplineSteps) k <- seq(min(x),max(x),length=q-m) # knots dk <- k[2]-k[1] k <- c(k[1]-dk*((m+1):1),k,k[q-m]+dk*(1:(m+1))) X <- splineDesign(k,x,ord=m+2) # model matrix b <- u # Now the coefficients b[1] <- u[1] ## b[2] <- u[2] b[2] <- b[1] + u[2] for(i in 3:length(b)){ b[i] <- 2*b[i-1] - b[i-2] + u[i] } ## Predicted r r <- c(rep(0, sum(proj.steps < t0)), X%*%b) return(r) } ############################## #### Analysis functions #### ############################## fert.idx <- 4:10 a15to49.idx <- 4:10 a15to24.idx <- 4:5 a15plus.idx <- 4:AG m.idx <- 1 f.idx <- 2 fnRVec <- function(theta){return(fnBSpline(theta[-(1:2)]))} fnIota <- function(theta){return(exp(theta[1]))} fnMod <- function(theta){return(fnSpectrum(fnIota(theta), fnRVec(theta)))} fnASFRadjPrev <- function(mod){ prev.by.age <- rowSums(mod[,2, fert.idx, -1,],,2)/rowSums(mod[,2, fert.idx,,],,2) births.by.age <- t(asfr[,1:dim(mod)[1]]) * rowSums(mod[,2, fert.idx,,],,2) return(rowSums(prev.by.age * births.by.age) / rowSums(births.by.age)) } fnANCprev <- function(mod){ hivn.by.age <- rowSums(mod[,2, fert.idx, 1,],,2) hivp.by.age <- rowSums(mod[,2, fert.idx, -1,],,2) births.by.age <- t(asfr[,1:dim(mod)[1]]) * (hivn.by.age + hivp.by.age) frac.hivp.moth <- 1.0 - hivn.by.age/(sweep(hivp.by.age, 2, fert.rat, "*")+hivn.by.age) return(rowSums(births.by.age * frac.hivp.moth) / rowSums(births.by.age)) } prev <- function(mod, age.idx=a15to49.idx, sex.idx=c(m.idx, f.idx)) return(rowSums(mod[,sex.idx, age.idx,-1,])/rowSums(mod[,sex.idx, age.idx,,])) ageprev <- function(mod, age.idx = 1:dim(mod)[3], agegr.idx = 1:length(age.idx)){ hivn <- apply(mod[,,age.idx,1,1], 1:2, tapply, agegr.idx, sum) tot <- apply(rowSums(mod[,,age.idx,,], d=3), 1:2, tapply, agegr.idx, sum) return(aperm(1 - hivn/tot, c(2, 3, 1))) } ## fn15to49inc <- function(mod) ## inc.rate.15to49 <- rVec[ts == proj.steps] * ((sum(X[,age15to49.idx,-1,1]) + relinfect.ART*sum(X[,age15to49.idx,-1,-1]))/sum(X[,age15to49.idx,,]) + (ts == t0)*iota)
e694b04bdd0a30a480ef2a172009aa2ea69d8749
cd1d77f9de36cddcd912324123559692425b3625
/Lecture-11/figure/fig-11-02.R
64d75e634c4209dd4df0fe83c5c7f7453a7ee8a3
[]
no_license
jirou7800/Statistics_YNU2020_exercise
5b798b5bb81d0abf14dddc55fb7f51306ddf3f7b
fdc7e365a884d9986d4bf8f39c03630add3ced14
refs/heads/master
2022-11-25T17:17:39.210419
2020-08-05T03:01:03
2020-08-05T03:01:03
null
0
0
null
null
null
null
UTF-8
R
false
false
939
r
fig-11-02.R
library(dplyr) library(ggplot2) circle <- data.frame(t = seq(from= 0, to = 2*pi, length.out = 100)) %>% mutate(x = cos(t), y = sin(t)) %>% dplyr::select(-t) %>% mutate(type = factor("circle")) quadra <- data.frame(x = seq(from = -1, to = 1, length.out = 100)) %>% mutate(y = x^2 -0.5) %>% mutate(type = factor("quadratic")) sin <- data.frame(x = seq(from = -1.5*pi, to = 2.5*pi, length.out = 100)) %>% mutate(y = sin(x)) %>% mutate(type = factor("sin")) bind_rows(circle, quadra, sin) %>% ggplot() + aes(x = x, y = y) + geom_point() + facet_wrap(~ type, ncol = 3, scale = "free") + xlab("") + ylab("") + theme(axis.text.y = element_blank()) + theme(axis.text.x = element_blank()) #library(purrr) #library(broom) #bind_rows(circle, quadra, sin) %>% # nest(data = -type) %>% # mutate(cor = map(data, ~ cor.test(.x$x, .x$y)), # tidied = map(cor, tidy) # ) %>% # unnest(tidied)
56794b3195582f7e404eab476c3b31e9ecca7360
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/heemod/examples/combine_probs.Rd.R
1d610ba3a5fb353afe58cc195ceb2639461fa052
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
181
r
combine_probs.Rd.R
library(heemod) ### Name: combine_probs ### Title: Combine Probabilities ### Aliases: combine_probs ### ** Examples (p1 <- runif(5)) (p2 <- runif(5)) combine_probs(p1, p2)
e0bcf9ac26e5ddad2b473eb7649cc16ae9217e0f
b2d32cb57604a26e31f0c4947ee866f59a7aa8ba
/R/searcher_efficiency_figure_functions.R
6d89117ff414f572829c853b62b9e82a418220e9
[ "LicenseRef-scancode-warranty-disclaimer", "CC0-1.0", "LicenseRef-scancode-public-domain" ]
permissive
atredennick/GenEst
fe1f95ca844b2bf9cf0f3358e810439b0a964410
38b73a290c074872c0651926bd6de283072aa8e6
refs/heads/master
2020-06-10T20:24:52.460911
2019-06-25T16:52:44
2019-06-25T16:52:44
193,736,013
0
0
null
2019-06-25T15:35:03
2019-06-25T15:35:03
null
UTF-8
R
false
false
26,174
r
searcher_efficiency_figure_functions.R
#' @title Plot results of a single pk model #' #' @description Plot a single \code{\link{pkm}} model #' #' @param x model of class pkm #' #' @param col color to use #' #' @param ... arguments to be passed to sub functions #' #' @return a plot #' #' @examples #' data(wind_RP) #' mod <- pkm(formula_p = p ~ Season, formula_k = k ~ 1, data = wind_RP$SE) #' plot(mod) #' #' @export #' plot.pkm <- function(x, col = "black", ...){ model <- x if (anyNA(model$varbeta) || sum(diag(model$varbeta) < 0) > 0){ stop("Variance in pkm not well-defined. Cannot plot.") } name_p <- format(model$formula_p) name_k <- model$formula_k if (!is.null(model$pOnly) && model$pOnly){ stop("k missing from pk model. Cannot plot.") } if (class(name_k) == "numeric"){ name_k <- paste("k fixed at ", name_k, sep = "") } else if (class(name_k) == "character"){ name_k <- "k not estimated" }else { name_k <- format(model$formula_k) } modelName <- paste(name_p, "; ", name_k, sep = "") par(mar = c(0, 0, 0, 0)) par(fig = c(0, 1, 0.95, 1)) plot(1, 1, type = 'n', bty = 'n', xaxt = 'n', yaxt = 'n', xlab = "", ylab = "", ylim = c(0, 1), xlim = c(0, 1) ) points(c(0.01, 0.06), c(0.25, 0.25), type = 'l', lwd = 2, col = col) text(x = 0.07, y = 0.3, "= Median", adj = 0, cex = 0.9) points(c(0.2, 0.25), c(0.25, 0.25), type = 'l', lwd = 2, lty = 3, col = col) text(x = 0.26, y = 0.3, "= Confidence Bounds", adj = 0, cex = 0.9) labelsText <- paste(model$predictors, collapse = ".") text_label <- paste("Labels: ", labelsText, sep = "") text_model <- paste("Model: ", modelName, sep = "") text(x = 0.58, y = 0.3, text_label, adj = 0, cex = 0.75) text(x = 0.58, y = 0.7, text_model, adj = 0, cex = 0.75) par(mar = c(2,4,2,1)) par(fig = c(0, 0.5, 0.725, 0.975), new = TRUE) pkmParamPlot(model = model, pk = "p", col = col) par(fig = c(0.5, 1, 0.725, 0.975), new = TRUE) pkmParamPlot(model = model, pk = "k", col = col) par(fig = c(0, 1, 0, 0.75), new = TRUE) par(mar = c(1, 1, 1, 1)) plot(1,1, type = 'n', bty = 'n', xaxt = 'n', yaxt = 'n', xlab = "", ylab = "" ) mtext(side = 1, "Search", line = -0.25, cex = 1.5) mtext(side = 2, "Searcher Efficiency", line = -0.25, cex = 1.5) ncell <- model$ncell cellNames <- model$cells[ , "CellNames"] nmatrix_col <- min(3, ncell) nmatrix_row <- ceiling(ncell / nmatrix_col) figxspace <- 0.95 / nmatrix_col figyspace <- 0.65 / nmatrix_row x1 <- rep(figxspace * ((1:nmatrix_col) - 1), nmatrix_row) + 0.05 x2 <- rep(figxspace * ((1:nmatrix_col)), nmatrix_row) + 0.05 y1 <- rep(figyspace * ((nmatrix_row:1) - 1), each = nmatrix_col) + 0.04 y2 <- rep(figyspace * ((nmatrix_row:1)), each = nmatrix_col) + 0.04 bottomCells <- seq(ncell - (nmatrix_col - 1), ncell, 1) leftCells <- which(1:ncell %% nmatrix_col == 1) if (length(leftCells) == 0){ leftCells <- 1 } for (celli in 1:ncell){ par(mar = c(2.5, 2, 0, 0)) par(fig = c(x1[celli], x2[celli], y1[celli], y2[celli]), new = T) specificCell <- cellNames[celli] axis_x <- FALSE axis_y <- FALSE if (celli %in% bottomCells){ axis_x <- TRUE } if (celli %in% leftCells){ axis_y <- TRUE } pkmSECellPlot(model = model, specificCell = specificCell, col = col, axis_y = axis_y, axis_x = axis_x) } } #' Plot parameter box plots for each cell for either p or k #' #' @param model model of class pkm #' #' @param pk character of "p" or "k" to delineate between parameter graphed #' #' @param col color to use #' #' @return a parameter plot panel #' #' @export #' pkmParamPlot <- function(model, pk = "p", col){ ncell <- model$ncell cellNames <- model$cells[ , "CellNames"] predictors <- model$predictors CL <- model$CL probs <- c(0, (1 - CL) / 2, 0.25, 0.5, 0.75, 1 - (1 - CL) / 2, 1) pks <- rpk(n = 1000, model = model) pks_full <- rpk(n = 1000, model = model) if (pk == "p"){ maxy <- 1 } else if (pk == "k"){ maxy <- 1 for (celli in 1:ncell){ maxcell <- max(pks[[celli]][ , "k"]) * 1.01 maxy <- max(maxy, maxcell) } } maxy[is.na(maxy)] <- 1 plot(1, type = "n", xlab = "", ylab = "", bty = "L", xaxt = 'n', yaxt = 'n', ylim = c(0, maxy), xlim = c(0.5, ncell + 0.5) ) for (celli in 1:ncell){ x <- celli y <- quantile(pks[[celli]][ , pk], probs, na.rm = TRUE) med <- c(-0.1, 0.1) tb <- c(-0.07, 0.07) rect(x - 0.1, y[3], x + 0.1, y[5], lwd = 2, border = col) points(x + med, rep(y[4], 2), type = 'l', lwd = 2, col = col) points(x + tb, rep(y[2], 2), type = 'l', lwd = 2, col = col) points(x + tb, rep(y[6], 2), type = 'l', lwd = 2, col = col) points(rep(x, 3), y[1:3], type = 'l', lwd = 2, col = col) points(rep(x, 3), y[5:7], type = 'l', lwd = 2, col = col) } axis(1, at = 1:ncell, cellNames, las = 1, cex.axis = 0.75) axis(2, at = seq(0, 1, 0.5), las = 1, cex.axis = 0.75) axis(2, at = seq(0, 1, 0.1), labels = FALSE, tck = -0.05) mtext(side = 2, pk, line = 2.75, cex = 1.1) } #' Plot cell-specific decay curve for searcher efficiency #' #' @param model model of class pkm #' #' @param specificCell name of the specific cell to plot #' #' @param col color to use #' #' @param axis_x logical of whether or not to plot the x axis #' #' @param axis_y logical of whether or not to plot the y axis #' #' @return a cell plot panel #' #' @export #' pkmSECellPlot <- function(model, specificCell, col, axis_y = TRUE, axis_x = TRUE){ CL <- model$CL cellwise <- model$cell_pk cellNames <- model$cells[ , "CellNames"] whichCarcs <- which(model$carcCell == specificCell) observations <- as.matrix(model$observations[whichCarcs, ], nrow = length(whichCarcs), ncol = ncol(model$observations) ) nobs <- ncol(observations) ncarc <- nrow(observations) carcFound <- apply(observations, 2, sum, na.rm = TRUE) carcUnavail <- apply(apply(observations, 2, is.na), 2, sum) carcAvail <- ncarc - carcUnavail whichSpecificCell <- which(cellNames == specificCell) p <- cellwise[whichSpecificCell, "p_median"] k <- cellwise[whichSpecificCell, "k_median"] pks <- rpk(n = 1000, model = model) ps <- pks[[whichSpecificCell]][ , "p"] ks <- pks[[whichSpecificCell]][ , "k"] searchTab <- matrix(1:nobs, nrow = length(ps), ncol = nobs, byrow = TRUE) ktab <- ks^(searchTab - 1) SE <- ps * ktab y <- apply(SE, 2, median) y_l <- apply(SE, 2, quantile, probs = (1 - CL) / 2) y_u <- apply(SE, 2, quantile, probs = 1 - (1 - CL) / 2) x_pts <- 1:nobs y_pts <- carcFound / carcAvail plot(x_pts, y_pts, ylim = c(0, 1), xlim = c(0.5, nobs + 0.5), xlab = "", ylab = "", xaxt = "n", yaxt = "n", bty = "L", col = rgb(0.02, 0.02, 0.02), lwd = 2, pch = 1, cex = 1.5 ) points(x_pts, y, type = 'l', lwd = 3, col = col) points(x_pts, y_l, type = 'l', lwd = 2, lty = 3, col = col) points(x_pts, y_u, type = 'l', lwd = 2, lty = 3, col = col) for (obi in 1:nobs){ x1 <- x_pts[obi] - 0.25 y1 <- y_pts[obi] + 0.06 x2 <- x_pts[obi] + 0.35 y2 <- y_pts[obi] + 0.15 rect(x1, y1, x2, y2, border = NA, col = "white") } obsLabels <- paste(carcFound, carcAvail, sep = "/") text(x_pts + 0.05, y_pts + 0.1, obsLabels, cex = 0.65) axis(1, at = x_pts, las = 1, cex.axis = 0.75, labels = axis_x) axis(2, at = seq(0, 1, 0.2), las = 1, cex.axis = 0.75, labels = axis_y) text(0.5, 0.95, specificCell, adj = 0, cex = 0.75, font = 2) } #' @title Plot results of a set of SE models #' #' @description Produce a set of figures for a set of SE models, as fit by #' \code{\link{pkmSet}} #' #' @param x pk model set of class pkmSet #' #' @param specificModel the name(s) or index number(s) of specific model(s) #' to restrict the plot #' #' @param app logical indicating if the plot is for the app #' #' @param cols named vector of colors to use for the specific and reference #' models #' #' @param ... to be sent to subfunctions #' #' @return a set of plots #' #' @examples #' data(wind_RP) #' mod <- pkmSet(formula_p = p ~ Season, formula_k = k ~ Season, #' data = wind_RP$SE #' ) #' plot(mod) #' #' @export #' plot.pkmSet <- function(x, specificModel = NULL, app = FALSE, cols = SEcols(), ...){ modelSet <- x specMods <- checkSpecificModelSE(modelSet, specificModel) modelSet <- tidyModelSetSE(modelSet) nmod <- length(specMods) for (modi in 1:nmod){ if (modi == 2){ devAskNewPage(TRUE) } if (!is.null(modelSet[[modi]]$pOnly) && modelSet[[modi]]$pOnly){ plot(0, 0, type = 'n', axes = F, xlab = '', ylab = '') text(0, .5, "k missing from pk model. Cannot plot.", cex = 2, col = 2) } else { plotSEFigure(modelSet, specMods[modi], app, cols) } } devAskNewPage(FALSE) } #' @title Plot results of a single SE model in a set #' #' @description Produce a figures for a specific SE model, as fit by #' \code{\link{pkmSet}} #' #' @param modelSet pk model set of class \code{pkmSet} #' #' @param specificModel the name of the specific model for the plot #' #' @param app logical indicating if the plot is for the app #' #' @param cols named vector of colors to use for the specific and reference #' models #' #' @return a plot #' #' @export #' plotSEFigure <- function(modelSet, specificModel, app, cols){ plotSEHeader(modelSet, specificModel, app, cols) plotSEBoxPlots(modelSet, specificModel, cols) plotSEBoxTemplate(modelSet, specificModel, cols) plotSECells(modelSet, specificModel, cols) } #' @title The SE plot header #' #' @description Produce the header for an SE plot #' #' @param modelSet pk model set of class pkmSet #' #' @param specificModel the name of the specific model for the plot #' #' @param app logical indicating if the plot is for the app #' #' @param cols named vector of colors to use for the specific and reference #' models #' #' @return a plot #' #' @export #' plotSEHeader <- function(modelSet, specificModel, app = FALSE, cols = SEcols()){ par(mar = c(0, 0, 0, 0)) par(fig = c(0, 1, 0.935, 1)) plot(1, 1, type = 'n', bty = 'n', xaxt = 'n', yaxt = 'n', xlab = "", ylab = "", ylim = c(0, 1), xlim = c(0, 1) ) LL <- sapply(modelSet, "[[", "loglik") referenceMod <- names(modelSet)[which(LL == max(LL))] if (app){ specificModel <- gsub("~ 1", "~ constant", specificModel) referenceMod <- gsub("~ 1", "~ constant", referenceMod) } rect(0.01, 0.725, 0.06, 0.925, lwd = 2, col = cols["spec"], border = NA) text_model <- paste("= Selected Model: ", specificModel, sep = "") text(x = 0.07, y = 0.85, text_model, adj = 0, cex = 0.9) rect(0.01, 0.325, 0.06, 0.525, lwd = 2, col = cols["ref"], border = NA) text_model <- paste("= Reference Model: ", referenceMod, sep = "") text(x = 0.07, y = 0.45, text_model, adj = 0, cex = 0.9) labelsText <- paste(modelSetPredictors(modelSet), collapse = ".") labelsText[labelsText == ""] <- "all" text_label <- paste("Labels: ", labelsText, sep = "") text(x = 0.9, y = 0.8, text_label, adj = 1, cex = 0.75) } #' @title p and k box plots for an SE model set #' #' @description Plot parameter box plots for each cell within a model for #' both p and k with comparison to the cellwise model #' #' @param modelSet modelSet of class pkmSet #' #' @param specificModel name of the specific submodel to plot #' #' @param cols named vector of colors to use for the specific and reference #' models #' #' @return a set of parameter plot panels #' #' @export #' plotSEBoxPlots <- function(modelSet, specificModel, cols){ par(mar = c(0,0,0,0)) par(fig = c(0, 0.45, 0.7, 0.965), new = TRUE) pkmSetSpecParamPlot(modelSet, specificModel, "p", cols) par(fig = c(0.45, 0.9, 0.7, 0.965), new = TRUE) if (!grepl("k not estimated", specificModel)){ pkmSetSpecParamPlot(modelSet, specificModel, "k", cols) } } #' @title p or k box plots for an SE model set #' #' @description Plot parameter box plots for each cell within a model for #' either p or k with comparison to the cellwise model #' #' @param modelSet modelSet of class pkmSet #' #' @param specificModel name of the specific submodel to plot #' #' @param pk character of "p" or "k" to delineate between parameter graphed #' #' @param cols named vector of colors to use for the specific and reference #' models #' #' @return a specific parameter plot panel #' #' @export #' pkmSetSpecParamPlot <- function(modelSet, specificModel, pk = "p", cols){ model_spec <- modelSet[[specificModel]] model_ref <- refMod(modelSet) CL <- model_ref$CL probs <- c(0, (1 - CL) / 2, 0.25, 0.5, 0.75, 1 - (1 - CL) / 2, 1) observations_spec <- model_spec$observations observations_ref <- model_ref$observations ncell_spec <- model_spec$ncell ncell_ref <- model_ref$ncell cellNames_ref <- model_ref$cells[ , "CellNames"] predictors_spec <- model_spec$predictors predictors_ref <- model_ref$predictors if (any(grepl("k not estimated", specificModel))){ return(1) } pks_spec <- rpk(n = 1000, model = model_spec) pks_ref <- rpk(n = 1000, model = model_ref) # kIncluded <- !any(grepl("k not estimated", specificModel)) # if (kIncluded){ # pks_spec <- rpk(n = 1000, model = model_spec) # pks_ref <- rpk(n = 1000, model = model_ref) # } else{ # pks_spec <- rpk(n = 1000, model = model_spec) # pks_ref <- rpk(n = 1000, model = model_ref) # } cellMatch_spec <- matchCells(model_spec, modelSet) cellMatch_ref <- matchCells(model_ref, modelSet) cells_set <- modelSetCells(modelSet) cellNames_set <- cells_set$CellNames ncell_set <- nrow(cells_set) if (pk == "p"){ maxy <- 1 } else if (pk == "k"){ maxy <- 1 for (celli in 1:ncell_set){ maxcell_spec <- max(pks_spec[[cellMatch_spec[celli]]][ , "k"]) * 1.01 maxcell_ref <- max(pks_ref[[cellMatch_ref[celli]]][ , "k"]) * 1.01 maxy <- max(c(maxy, maxcell_spec, maxcell_ref)) } } maxy[is.na(maxy)] <- 1 par(mar = c(4,3,2,1)) plot(1, type = "n", xlab = "", ylab = "", bty = "L", xaxt = 'n', yaxt = 'n', ylim = c(0, maxy), xlim = c(0.5, ncell_set + 0.5) ) for (celli in 1:ncell_set){ cMi_s <- cellMatch_spec[celli] cMi_f <- cellMatch_ref[celli] x_s <- celli - 0.2 y_s <- quantile(pks_spec[[cMi_s]][ , pk], probs, na.rm = TRUE) x_f <- celli + 0.2 y_f <- quantile(pks_ref[[cMi_f]][ , pk], probs, na.rm = TRUE) med <- c(-0.1, 0.1) tb <- c(-0.07, 0.07) rect(x_s - 0.1, y_s[3], x_s + 0.1, y_s[5], lwd = 1, border = cols["spec"]) points(x_s + med, rep(y_s[4], 2), type = 'l', lwd = 1, col = cols["spec"]) points(x_s + tb, rep(y_s[2], 2), type = 'l', lwd = 1, col = cols["spec"]) points(x_s + tb, rep(y_s[6], 2), type = 'l', lwd = 1, col = cols["spec"]) points(rep(x_s, 3), y_s[1:3], type = 'l', lwd = 1, col = cols["spec"]) points(rep(x_s, 3), y_s[5:7], type = 'l', lwd = 1, col = cols["spec"]) rect(x_f - 0.1, y_f[3], x_f + 0.1, y_f[5], lwd = 1, border = cols["ref"]) points(x_f + med, rep(y_f[4], 2), type = 'l', lwd = 1, col = cols["ref"]) points(x_f + tb, rep(y_f[2], 2), type = 'l', lwd = 1, col = cols["ref"]) points(x_f + tb, rep(y_f[6], 2), type = 'l', lwd = 1, col = cols["ref"]) points(rep(x_f, 3), y_f[1:3], type = 'l', lwd = 1, col = cols["ref"]) points(rep(x_f, 3), y_f[5:7], type = 'l', lwd = 1, col = cols["ref"]) } axis(1, at = 1:ncell_set, labels = FALSE, tck = -0.05) ang <- 0 offy <- -0.25 offx <- NULL if (ncell_set > 3){ ang <- 35 offx <- 1 } xcex <- 0.75 if (ncell_set > 6){ xcex <- 0.5 offy <- -0.125 } text(1:ncell_set, offy, srt = ang, adj = offx, labels = cellNames_set, xpd = TRUE, cex = xcex ) axis(2, at = seq(0, 1, 0.5), las = 1, cex.axis = 0.7) axis(2, at = seq(0, 1, 0.1), labels = FALSE, tck = -0.015) mtext(side = 2, pk, line = 2.2, cex = 1.125) } #' @title template box plot #' #' @description Plot template box plot #' #' @param modelSet modelSet of class pkmSet #' #' @param specificModel name of the specific submodel to plot #' #' @param cols named vector of colors to use for the specific and reference #' models #' #' @return a template box plot #' #' @export #' plotSEBoxTemplate <- function(modelSet, specificModel, cols){ model_spec <- modelSet[[specificModel]] col_spec <- cols["spec"] par(mar = c(0,0,0,0)) par(fig = c(0.92, 1, 0.8, 0.95), new = TRUE) plot(1,1, type = "n", bty = "n", xaxt = "n", yaxt = "n", xlab = "", ylab = "", ylim = c(0, 1), xlim = c(0, 1) ) x_s <- 0.1 CL_split <- (1 - model_spec$CL) / 2 probs_y <- c(0, CL_split, 0.25, 0.5, 0.75, 1 - CL_split, 1) set.seed(12) y_s <- quantile(rnorm(1000, 0.5, 0.15), probs = probs_y) med <- c(-0.1, 0.1) tb <- c(-0.07, 0.07) rect(x_s - 0.1, y_s[3], x_s + 0.1, y_s[5], lwd = 1, border = col_spec) points(x_s + med, rep(y_s[4], 2), type = 'l', lwd = 1, col = col_spec) points(x_s + tb, rep(y_s[2], 2), type = 'l', lwd = 1, col = col_spec) points(x_s + tb, rep(y_s[6], 2), type = 'l', lwd = 1, col = col_spec) points(rep(x_s, 3), y_s[1:3], type = 'l', lwd = 1, col = col_spec) points(rep(x_s, 3), y_s[5:7], type = 'l', lwd = 1, col = col_spec) num_CL <- c(CL_split, 1 - CL_split) * 100 text_CL <- paste(num_CL, "%", sep = "") text_ex <- c("min", text_CL[1], "25%", "50%", "75%", text_CL[2], "max") text(x_s + 0.2, y_s, text_ex, cex = 0.6, adj = 0) } #' @title Plot the cellwise results of a single model in a set of SE models #' #' @description Produce a set of cellwise figures for a specific SE model, as #' fit by \code{\link{pkmSet}} #' #' @param modelSet pk model set of class pkmSet #' #' @param specificModel the name of the specific model for the plot #' #' @param cols named vector of colors to use for the specific and reference #' models #' #' @return a plot #' #' @export #' plotSECells <- function(modelSet, specificModel, cols){ model_ref <- refMod(modelSet) par(fig = c(0, 1, 0, 0.65), new = TRUE, mar = c(1, 1, 1, 1)) plot(1,1, type = "n", bty = "n", xaxt = "n", yaxt = "n", xlab = "", ylab = "" ) mtext(side = 1, "Search", line = -0.25, cex = 1.5) mtext(side = 2, "Searcher Efficiency", line = -0.25, cex = 1.5) cells_set <- modelSetCells(modelSet) ncell <- nrow(cells_set) cellNames <- cells_set[ , "CellNames"] nmatrix_col <- min(3, ncell) nmatrix_row <- ceiling(ncell / nmatrix_col) figxspace <- 0.95 / nmatrix_col figyspace <- 0.65 / nmatrix_row x1 <- rep(figxspace * ((1:nmatrix_col) - 1), nmatrix_row) + 0.035 x2 <- rep(figxspace * ((1:nmatrix_col)), nmatrix_row) + 0.035 y1 <- rep(figyspace * ((nmatrix_row:1) - 1), each = nmatrix_col) + 0.03 y2 <- rep(figyspace * ((nmatrix_row:1)), each = nmatrix_col) + 0.03 bottomCells <- seq(ncell - (nmatrix_col - 1), ncell, 1) leftCells <- which(1:ncell %% nmatrix_col == 1) if (length(leftCells) == 0){ leftCells <- 1 } for (celli in 1:ncell){ par(mar = c(2.5, 2, 0, 0)) par(fig = c(x1[celli], x2[celli], y1[celli], y2[celli]), new = TRUE) specificCell <- cellNames[celli] axis_x <- FALSE axis_y <- FALSE if (celli %in% bottomCells){ axis_x <- TRUE } if (celli %in% leftCells){ axis_y <- TRUE } axes <- c("x" = axis_x, "y" = axis_y) pkmSetSpecSECellPlot(modelSet, specificModel, specificCell, cols, axes) } } #' Plot cell-specific decay curve for searcher efficiency for a specific model #' with comparison to the cellwise model #' #' @param modelSet modelSet of class pkmSet #' #' @param specificModel name of the specific submodel to plot #' #' @param specificCell name of the specific cell to plot #' #' @param cols named vector of colors to use for the specific and reference #' models #' #' @param axes named vector of logical values indicating whether or not to #' plot the x axis and the y axis #' #' @return a specific cell plot panel #' #' @export #' pkmSetSpecSECellPlot <- function(modelSet, specificModel, specificCell, cols, axes){ model_spec <- modelSet[[specificModel]] model_ref <- refMod(modelSet) cellwise_spec <- model_spec$cell_pk cellwise_ref <- model_ref$cell_pk cellNames_spec <- model_spec$cells[ , "CellNames"] cellNames_ref <- model_ref$cells[ , "CellNames"] cells_set <- modelSetCells(modelSet) preds_set <- modelSetPredictors(modelSet) carcCells <- apply(data.frame(model_spec$data0[ , preds_set]), 1, paste, collapse = "." ) whichCarcs <- which(carcCells == specificCell) if (specificCell == "all"){ whichCarcs <- 1:length(carcCells) } observations <- as.matrix(model_spec$observations[whichCarcs, ], nrow = length(whichCarcs), ncol = ncol(model_spec$observations) ) nobs <- ncol(observations) ncarc <- nrow(observations) carcFound <- apply(observations, 2, sum, na.rm = TRUE) carcUnavail <- apply(apply(observations, 2, is.na), 2, sum) carcAvail <- ncarc - carcUnavail if (any(grepl("k not estimated", specificModel))){ return(1) } pks_spec <- rpk(n = 1000, model = model_spec) pks_ref <- rpk(n = 1000, model = model_ref) # kIncluded <- !any(grepl("k not estimated", specificModel)) # if (kIncluded){ # pks_spec <- rpk(n = 1000, model = model_spec) # pks_ref <- rpk(n = 1000, model = model_ref) # } else{ # pks_spec <- rpk(n = 1000, model = model_spec, kFill = 1) # pks_ref <- rpk(n = 1000, model = model_ref, kFill = 1) # } cellMatch_spec <- matchCells(model_spec, modelSet) cellMatch_ref <- matchCells(model_ref, modelSet) cells_set <- modelSetCells(modelSet) cellNames_set <- cells_set$CellNames whichSpecificCell_spec <- cellMatch_spec[cellNames_set == specificCell] whichSpecificCell_ref <- cellMatch_ref[cellNames_set == specificCell] ps_spec <- pks_spec[[whichSpecificCell_spec]][ , "p"] ks_spec <- pks_spec[[whichSpecificCell_spec]][ , "k"] ps_ref <- pks_ref[[whichSpecificCell_ref]][ , "p"] ks_ref <- pks_ref[[whichSpecificCell_ref]][ , "k"] searchTab <- matrix(1:nobs, nrow = 1000, ncol = nobs, byrow = TRUE) ktab_spec <- ks_spec^(searchTab - 1) ktab_ref <- ks_ref^(searchTab - 1) SE_spec <- ps_spec * ktab_spec SE_ref <- ps_ref * ktab_ref y_spec <- apply(SE_spec, 2, median) y_ref <- apply(SE_ref, 2, median) x_pts <- 1:nobs y_pts <- carcFound / carcAvail plot(x_pts, y_pts, ylim = c(0, 1.1), xlim = c(0.5, nobs + 0.5), xlab = "", ylab = "", xaxt = "n", yaxt = "n", bty = "L", col = rgb(0.02, 0.02, 0.02), lwd = 2, pch = 1, cex = 1.5 ) points(x_pts, y_ref, type = 'l', lwd = 3, col = cols["ref"]) points(x_pts, y_spec, type = 'l', lwd = 3, col = cols["spec"]) for (obi in 1:nobs){ x1 <- x_pts[obi] - 0.25 y1 <- y_pts[obi] + 0.035 x2 <- x_pts[obi] + 0.35 y2 <- y_pts[obi] + 0.11 rect(x1, y1, x2, y2, border = NA, col = "white") } obsLabels <- paste(carcFound, carcAvail, sep = "/") text(x_pts + 0.05, y_pts + 0.075, obsLabels, cex = 0.65) axis(1, at = x_pts, las = 1, cex.axis = 0.75, labels = axes["x"]) axis(2, at = seq(0, 1, 0.2), las = 1, cex.axis = 0.75, labels = axes["y"]) text(0.5, 1.1, specificCell, adj = 0, cex = 0.75, font = 2, xpd = TRUE) } #' @title Produce a named vectory with standard SE plot colors #' #' @description Produce a named vectory with standard SE plot colors #' #' @export #' SEcols <- function(){ c(spec = "black", ref = "grey") } #' @title Error check a specific model selection for an SE plot #' #' @description Make sure it's available and good, update the name for usage #' #' @param modelSet pk model set of class pkmSet #' #' @param specificModel the name of the specific model for the plot #' #' @return updated name of the model to use #' #' @export #' checkSpecificModelSE <- function(modelSet, specificModel){ if (!is.null(specificModel) && anyNA(specificModel)){ stop( "specificModel must be NULL or a vector of model names or positions.", "\nNAs not allowed." ) } if (length(specificModel) > 0){ if (is.numeric(specificModel)){ if (anyNA(specificModel)){ warning("specificModel cannot be NA. NA models removed.") specificModel <- specificModel[!is.na(specificModel)] if (length(specificModel) == 0){ stop("No valid specificModel") } } if (any(specificModel > length(modelSet))){ stop(paste0("there are only ", length(modelSet), " model choices.")) } specificModel <- names(modelSet)[specificModel] } if (any(specificModel %in% names(modelSet)) == FALSE){ stop("Selected model not in set. To see options use names(modelSet).") } modNames <- specificModel for (modi in modNames){ if (pkmFail(modelSet[[modi]])){ stop("specificModel ", modi, " is not a well-fit pk model") } } } else{ specificModel <- names(pkmSetFailRemove(modelSet)) } return(specificModel) } #' @title Tidy an SE model set #' #' @description Remove bad fit models #' #' @param modelSet pk model set of class pkmSet #' #' @return a trimmed model set #' #' @export #' tidyModelSetSE <- function(modelSet){ modelSet <- pkmSetFailRemove(modelSet) modelSet <- modelSet[order(sapply(modelSet, "[[", "AICc"))] class(modelSet) <- c("pkmSet", "list") return(modelSet) }
a2f79ef755aae6a40a235d5fd9cd811bd7712d21
b3547e9c11de7bea45121be5d475282de7ca0915
/BayesSUR/R/plotGraph.R
799bbe7df3815c1b52f3c873c11b499253680ecb
[]
permissive
ocbe-uio/BayesSUR
e0fbfb48777059d8ad094707406a7c56d322b268
a0806225eba05a835cb1d8042e6749d8a2c9ba00
refs/heads/master
2021-12-10T07:03:55.945054
2021-11-30T08:40:14
2021-11-30T08:40:14
220,016,529
0
0
MIT
2020-10-21T12:09:17
2019-11-06T14:32:02
C++
UTF-8
R
false
false
3,803
r
plotGraph.R
#' @title plot the estimated graph for multiple response variables #' @description #' Plot the estimated graph for multiple response variables from a \code{BayesSUR} class object. #' @importFrom igraph E plot.igraph graph_from_adjacency_matrix #' @importFrom graphics par #' @name plotGraph #' @param x either an object of class \code{BayesSUR} (default) or a symmetric numeric matrix representing an adjacency matrix for a given graph structure. #' If x is an adjacency matrix, argument \code{main="Given graph of responses"} by default. #' @param Pmax a value for thresholding the learning structure matrix of multiple response variables. Default is 0.5 #' @param main an overall title for the plot #' @param edge.width edge width. Default is 2 #' @param edge.weight draw weighted edges after thresholding at 0.5. The default value \code{FALSE} is not to draw weighted edges #' @param vertex.label character vector used to label the nodes #' @param vertex.label.color label color. Default is \code{"black"} #' @param vertex.size node size. Default is 30 #' @param vertex.color node color. Default is \code{"dodgerblue"} #' @param vertex.frame.color node color. Default is \code{"NA"} #' @param ... other arguments #' #' @examples #' data("exampleEQTL", package = "BayesSUR") #' hyperpar <- list( a_w = 2 , b_w = 5 ) #' #' set.seed(9173) #' fit <- BayesSUR(Y = exampleEQTL[["blockList"]][[1]], #' X = exampleEQTL[["blockList"]][[2]], #' data = exampleEQTL[["data"]], outFilePath = tempdir(), #' nIter = 100, burnin = 50, nChains = 2, gammaPrior = "hotspot", #' hyperpar = hyperpar, tmpFolder = "tmp/" ) #' #' ## check output #' # show the graph relationship between responses #' plotGraph(fit, estimator = "Gy") #' #' @export plotGraph <- function(x, Pmax=0.5, main = "Estimated graph of responses", edge.width=2, edge.weight=FALSE, vertex.label=NULL, vertex.label.color="black", vertex.size=30, vertex.color="dodgerblue", vertex.frame.color=NA, ...){ if (!inherits(x, "BayesSUR")){ if( is.matrix(x) & is.numeric(x) ){ if( !((dim(x)[1]==dim(x)[2]) & (sum(dim(x))>2)) ) stop("Use only with a \"BayesSUR\" object or numeric square matrix") Gy_hat <- x if(!is.null(vertex.label)) rownames(Gy_hat) <- colnames(Gy_hat) <- vertex.label if( main=="Estimated graph of responses" ) main <- "Given graph of responses" }else{ stop("Use only with a \"BayesSUR\" object or numeric square matrix") } }else{ x$output[-1] <- paste(x$output$outFilePath,x$output[-1],sep="") covariancePrior <- x$input$covariancePrior if(covariancePrior == "HIW"){ Gy_hat <- as.matrix( read.table(x$output$Gy) ) }else{ stop("Gy is only estimated with hyper-inverse Wishart prior for the covariance matrix of responses!") } if(!is.null(vertex.label)){ rownames(Gy_hat) <- colnames(Gy_hat) <- vertex.label }else{ rownames(Gy_hat) <- colnames(Gy_hat) <- names(read.table(x$output$Y,header=T)) } } if( Pmax<0 | Pmax>1 ) stop("Please specify correct argument 'Pmax' in [0,1]!") if(edge.weight){ Gy_thresh <- Gy_hat Gy_thresh[Gy_hat<=Pmax] <- 0 }else{ Gy_thresh <- as.matrix( Gy_hat > Pmax ) } net <- graph_from_adjacency_matrix( Gy_thresh, weighted=T, mode="undirected", diag=F) if( edge.weight ){ plot.igraph(net, main = main, edge.width=E(net)$weight*2, vertex.label=vertex.label, vertex.color=vertex.color, vertex.frame.color=vertex.frame.color, ...) }else{ plot.igraph(net, main = main, edge.width=edge.width, vertex.label=vertex.label, vertex.color=vertex.color, vertex.frame.color=vertex.frame.color, vertex.label.color=vertex.label.color, vertex.size=vertex.size, ...) } }
0d14d05fb94a70281a1925b122cdd04e698904dd
d47833e60e3b9760619cf9c348d97b188f342db3
/MobileNetworkDataSimulationTemplate/code/src/inference/man/computeInitialPopulation.Rd
ebda7dc090cddd1bf08e2c3cc7353918109c01c0
[]
no_license
Lorencrack3/TFG-Lorenzo
1a8ef9dedee45edda19ec93146e9f7701d261fbc
e781b139e59a338d78bdaf4d5b73605de222cd1c
refs/heads/main
2023-06-04T00:23:16.141485
2021-06-29T21:31:25
2021-06-29T21:31:25
364,060,022
0
0
null
null
null
null
UTF-8
R
false
true
2,409
rd
computeInitialPopulation.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/computeInitialPopulation.R \name{computeInitialPopulation} \alias{computeInitialPopulation} \title{Computes the distribution of the population count at initial time instant.} \usage{ computeInitialPopulation( nnet, params, popDistr, rndVal = FALSE, ciprob = NULL, method = "ETI" ) } \arguments{ \item{nnet}{The random values generated with \code{aggregation} package for the number of individuals detected by the network.} \item{params}{The parameters of the distribution. It should be a data.table object with the following columns: \code{region, omega1, omega2, pnrRate, regionArea_km2, N0, dedupPntRate, alpha, beta, theta, zeta, Q}.} \item{popDistr}{The distribution to be used for population count. This parameter could have one of the following values: \code{NegBin} (negative binomial distribution), \code{BetaNegBin} (beta negative binomial distribution) or \code{STNegBin} (state process negative binomial distribution).} \item{rndVal}{If FALSE the result return by this function will be a list with a single element, a data.table object with the following columns: \code{region, Mean, Mode, Median, SD, Min, Max, Q1, Q3, IQR, CV, CI_LOW, CI_HIGH}. If TRUE the list will have a second element which is a data.table object containing the random values generated for each region.} \item{ciprob}{Value of probability of the CI (between 0 and 1) to be estimated. If NULL the default value is 0.89.} \item{method}{The method to compute credible intervals. It could have 2 values, 'ETI' or 'HDI'. The default value is 'ETI.} } \value{ A list object with one or two elements. If rndVal is FALSE the list will have a single element with descriptive statistics for the population count, which is a data.table object with the following columns: \code{region, Mean, Mode, Median, Min, Max, Q1, Q3, IQR, SD, CV, CI_LOW, CI_HIGH}. If rndVal is TRUE the list will have a second element which is a data.table object containing the random values generated for each region. The name of the two list elements giving the descriptive statistics and random values for time t are 'stats' and 'rnd_values'. } \description{ Computes the distribution of the population count at initial time instant using one of the three distributions: Negative Binomial, Beta Negative Binomial or State Process Negative Binomial. }
457639b37ccfab993a16b42be00cf159c6426700
f9ea199b6dd4611ede80ef2e7311e401291db602
/plot_example.R
ebc7715693fb64b824d0ec581bceca2ec39169e7
[]
no_license
owenqiz/miscellaneous
1e47bae86268e718710a4df6200f3982e41eb93b
3dc3470bb1acc6401da3a89941aa14c9a545ab74
refs/heads/master
2023-04-13T00:09:13.275915
2023-04-05T06:08:10
2023-04-05T06:08:10
150,648,172
0
0
null
null
null
null
UTF-8
R
false
false
160
r
plot_example.R
plotm <- function(m){ # pause between plots par(ask = TRUE) hist(m) plot(density(m)) image(m) } plotm(matrix(rnorm(100), ncol = 10))
724f74ad488602bb7f7c8bd361764a7b84884eaa
a5978fdc5e4e2400e8042db708db07b01e383346
/tests/testthat/test_format_acct.R
dc89f7f4abeee48c59a96516e6a31f384bc4f248
[]
no_license
willbradley/acctR
eea045fe50446eed133e26d4b1d1bf20e240a13c
82c5f97161cd4b9e1a1bf3079e56bb00303d0d2b
refs/heads/master
2021-09-05T11:25:07.123276
2018-01-26T22:15:44
2018-01-26T22:15:44
119,079,203
0
0
null
null
null
null
UTF-8
R
false
false
377
r
test_format_acct.R
# Test that format_acct works! x <- c(1000, -1000, NA) test_that("format_acct returns expected output", { expect_error(format_acct('test')) expect_true(any(is.na(format_acct(x)))) expect_true(identical(format_acct(x[1]), "$1,000.00")) expect_true(identical(format_acct(x[2]), "$(1,000.00)")) expect_true(identical(format_acct(x[1], shown.in = 'k'), "$1.00K")) })
68886f05344f66a7e9e99e11456ce475444d9afb
2dcd0a1e101abdd50e72d7f37b803c10a41cc659
/src/final/psth.r
9a39aa9a50719ef9ab941ec3ce6ba12d3d2c241a
[ "MIT" ]
permissive
Abraham-newbie/Life-Events
147a9c3eb873a3ae5b640a5f2fe6b9878ff99822
80a6ef45833edb0e5b99530bcbdeac58a8698d39
refs/heads/master
2023-06-24T12:43:26.953285
2021-07-07T12:44:40
2021-07-07T12:44:40
383,793,634
0
0
null
null
null
null
UTF-8
R
false
false
1,724
r
psth.r
psth <- function(tb, eventnames, title_str) { xlabels = c( "pre24" = "-24", "pre12" = "-12", "post03" = "+3", "post06" = "+6", "post09" = "+9", "post12" = "+12", "post24" = "+24", "post36" = "+36", "post48" = "+48" ) timelevels= c('pre36', 'pre24', 'pre12', 'post03', 'post06', 'post09', 'post12', 'post24', 'post36', 'post48') tb %>% filter(code %in% eventnames) %>% group_by(code) %>% summarise(N_min = min(n)) %>% mutate( note = paste('smallest n =', N_min), outcome = 'min' ) -> annotates # tb %>% # mutate(n = ifelse(n > 1000, 1000, n)) -> tb tb %>% filter(code %in% eventnames) %>% mutate( outcome = recode(outcome, mcs = 'Affective', losat = 'Cognitive'), term = factor(term, levels = timelevels, ordered = TRUE), estimate=scale(estimate), code = fct_relevel(code, eventnames)) %>% ggplot(aes(x = term, y = estimate, group = outcome, colour = outcome)) + geom_point() + geom_line() + geom_errorbar(aes(ymin = lower, ymax = upper), width = 0.2) + geom_hline(yintercept = 0, alpha = 0.25) + geom_vline(xintercept = 2.85, alpha = 0.25) + scale_x_discrete(labels = xlabels) + ylim(-10,10)+ facet_wrap(~ code) + labs(title = title_str, subtitle = 'Coefficients (sd units) at 90 percent confidence intervals', y = '', x = 'months pre and post life event') + geom_text(data = annotates, aes(x = Inf, y = -Inf, label = note), hjust = 1.1, vjust = -.75, colour = 1, size = 3) + theme_light()+ theme(plot.title = element_text(face="bold"))+ expand_limits(y = 900000) # or some other arbitrarily large number }
3d687e0994b22eddaafb01655163492d671a8670
95494552a25d2250351e2d53bd8306947610a91d
/pwr-crossvalidation.R
bbe39d1b68c9a6e4d762bede42dc297bd68ffee9
[]
no_license
lizzieinvancouver/pwr
aedd7b773465b26710cb3501fe836e5b270e4e5a
3993a7535fb64f5db1698165a7dad4652c057001
refs/heads/master
2022-05-02T13:03:04.286970
2022-04-28T17:35:34
2022-04-28T17:35:34
143,559,117
5
1
null
null
null
null
UTF-8
R
false
false
11,152
r
pwr-crossvalidation.R
# Davies et al. R code for PWR cross-validation (CV) # Code includes f(x)s and CV of Arnell data # # By Jim Regetz (NCEAS) # simple function to randomly partition indices 1:n into k equal sized # groups (folds) fold <- function(n, k) { samp <- sample(n) len <- unname(table(cut(seq.int(n), k))) end <- cumsum(len) mapply( function(start, end, samp) { samp[start:end] }, start = c(1, head(end, -1)+1), end = cumsum(len), MoreArgs = list(samp=sample(n)), SIMPLIFY = FALSE, USE.NAMES = FALSE ) } # cross-validation of pwr pwr.cv <- function(formula, phy4d, wfun, bwidth, method, holdout) { # extract training set from phy4d phy.train <- phy4d[setdiff(seq(nTips(phy4d)), holdout)] if (missing(bwidth)) { bwidth <- get.opt.bw(formula, phy.train, wfun=wfun, method=method) } # get weights vectors for *all* species, but only keep rows # corresponding to training set wts.train <- getWts(phy4d, bwidth, wfun)[-holdout,] # extract training data from phy4d dat.train <- tipData(phy.train) # loop over each point and do a weighted least squares regression # with weights based on distance yhat <- sapply(holdout, function(p) { w <- wts.train[,p] b <- lm(formula, data=data.frame(dat.train, w), weights=w) predict(b, tipData(phy4d)[p,]) }) return(data.frame( y=tipData(phy4d)[holdout, as.character(formula)[[2]]], yhat.pwr=yhat) ) } pwr.cv.slope <- function(formula, phy4d, wfun, bwidth, method, holdout) { # extract training set from phy4d phy.train <- phy4d[setdiff(seq(nTips(phy4d)), holdout)] if (missing(bwidth)) { bwidth <- get.opt.bw(formula, phy.train, wfun=wfun, method=method) } # get weights vectors for *all* species, but only keep rows # corresponding to training set wts.train <- getWts(phy4d, bwidth, wfun)[-holdout,] # extract training data from phy4d dat.train <- tipData(phy.train) # loop over each point and do a weighted least squares regression # with weights based on distance yhat <- sapply(holdout, function(p) { w <- wts.train[,p] b <- lm(formula, data=data.frame(dat.train, w), weights=w) b$coefficients["x"] }) return(data.frame( slope=tipData(phy4d)[holdout, "true_slope"], slope.pwr=yhat) ) } # cross-validation of pgls pgls.cv <- function(formula, phy4d, holdout) { # extract training set from phy4d p4.train <- phy4d[setdiff(seq(nTips(phy4d)), holdout)] phy.train <- suppressWarnings(as(p4.train, "phylo")) # extract training data from phy4d dat.train <- tipData(p4.train) # loop over each point and do a weighted least squares regression # with weights based on distance pgls <- do.call(gls, list(model=formula, data=dat.train, correlation=corBrownian(phy=phy.train))) return(data.frame( y=tipData(phy4d)[holdout, as.character(formula)[[2]]], yhat.pgls=predict(pgls, tipData(phy4d)[holdout,])) ) } pgls.cv.slope <- function(formula, phy4d, holdout) { # extract training set from phy4d p4.train <- phy4d[setdiff(seq(nTips(phy4d)), holdout)] phy.train <- suppressWarnings(as(p4.train, "phylo")) # extract training data from phy4d dat.train <- tipData(p4.train) # loop over each point and do a weighted least squares regression # with weights based on distance pgls <- do.call(gls, list(model=formula, data=dat.train, correlation=corBrownian(phy=phy.train))) return(data.frame( slope=tipData(phy4d)[holdout, "true_slope"], slope.pgls=pgls$coefficients[[2]]) ) } simcv <- function(pglsfit, sim=c("slope", "var1", "var2"), vcv, mc.cores) { sim <- match.arg(sim, c("slope", "var1", "var2")) # make sure target columns don't already exist tipData(arn.p4d)$simSlope <- NULL tipData(arn.p4d)$seed.sim <- NULL tipData(arn.p4d)$ffd.sim <- NULL if (sim=="slope") { # extract global intercept (assumed constant in our simulation) simInt <- coef(pglsfit)[[1]] # use global slope estimate as root value for simulated forward # evolution of this "trait", and add to tree data simSlope <- rTraitCont(as(arn.p4d, "phylo"), sigma=2, root.value=coef(pglsfit)[[2]]) arn.p4d <- addData(arn.p4d, data.frame(simSlope)) # generate simulated FFD arn.p4d <- addData(arn.p4d, data.frame(ffd.sim=simInt + tipData(arn.p4d)$simSlope * tipData(arn.p4d)$seed, seed.sim=tipData(arn.p4d)$seed)) } else if (sim=="var1") { require(geiger) if (missing(vcv)) { vcv <- ic.sigma(arnp, tipData(arn.p4d)[c("FFD", "seed")]) } vars.sim <- sim.char(arnp, vcv)[,,1] arn.p4d <- addData(arn.p4d, setNames(data.frame(vars.sim), c("ffd.sim", "seed.sim"))) } else if (sim=="var2") { require(phytools) tipData(arn.p4d)$seed.sim <- fastBM(arnp, a=coef(gls(seed ~ 1, data=tipData(arn.p4d), correlation=corBrownian(phy=arnp)))) tipData(arn.p4d)$ffd.sim <- c(cbind(1, tipData(arn.p4d)$seed.sim) %*% coef(pglsfit)) + fastBM(arnp) } # 5-fold cross validation k <- fold(nTips(arn.p4d), 5) if (missing(mc.cores)) { mc.cores <- min(length(k), 16) } # ...pgls arn.pgls.cve <- mclapply(seq_along(k), function(fold) { yhat <- pgls.cv(ffd.sim ~ seed.sim, arn.p4d, holdout=k[[fold]]) sqrt(mean(do.call("-", yhat)^2)) }, mc.cores=mc.cores) # ...pwr arn.pwr.cve <- mclapply(seq_along(k), function(fold) { yhat <- pwr.cv(ffd.sim ~ seed.sim, arn.p4d, wfun="martins", method="L-BFGS-B", holdout=k[[fold]]) sqrt(mean(do.call("-", yhat)^2)) }, mc.cores=mc.cores) return(c( sapply(c(mean.pgls=mean, sd.pgls=sd), function(f) f(unlist(arn.pgls.cve))), sapply(c(mean.pwr=mean, sd.pwr=sd), function(f) f(unlist(arn.pwr.cve))) )) } # # procedural code # stop("end of function definitions") set.seed(99) k <- matrix(sample(nTips(ap4d2)), ncol=5) library(parallel) # estimate expected generalization error using 5-fold CV # ...pwr pgls.cve <- mclapply(seq(ncol(k)), function(fold) { yhat <- pgls.cv(ffd.sc ~ seed.sc, ap4d2, holdout=k[,fold]) sqrt(mean(do.call("-", yhat)^2)) }) # ...pgls pwr.cve <- mclapply(seq(ncol(k)), function(fold) { yhat <- pwr.cv(ffd.sc ~ seed.sc, ap4d2, wfun="martins", method="L-BFGS-B", holdout=k[, fold]) sqrt(mean(do.call("-", yhat)^2)) }) sapply(c(mean=mean, sd=sd), function(f) f(unlist(pgls.cve))) ## mean sd ## 39.82692 2.74358 sapply(c(mean=mean, sd=sd), function(f) f(unlist(pwr.cve))) ## mean sd ## 29.19123 5.25956 # estimate expected generalization error using 5-fold CV # ...pwr arn.pgls.cve <- mclapply(seq(ncol(k)), function(fold) { yhat <- pgls.cv(FFD ~ seed, arn.p4d, holdout=k[fold]) sqrt(mean(do.call("-", yhat)^2)) }) # ...pgls arn.pwr.cve <- mclapply(seq(ncol(k)), function(fold) { yhat <- pwr.cv(FFD ~ seed, arn.p4d, wfun="martins", method="L-BFGS-B", holdout=k[fold]) sqrt(mean(do.call("-", yhat)^2)) }) sapply(c(mean=mean, sd=sd), function(f) f(unlist(arn.pgls.cve))) ## mean sd ## 39.82692 2.74358 sapply(c(mean=mean, sd=sd), function(f) f(unlist(arn.pwr.cve))) ## mean sd ## 29.19123 5.25956 # estimate expected generalization error using 10-fold CV set.seed(99) k <- fold(nTips(arn.p4d), 10) # ...pgls arn.pgls.cvp <- mclapply(seq_along(k), function(fold) { yhat <- pgls.cv(FFD ~ seed, arn.p4d, holdout=k[[fold]]) yhat }, mc.cores=min(length(k), 16)) arn.pgls.cve <- lapply(arn.pgls.cvp, function(y) { sqrt(mean(do.call("-", y)^2)) }) sapply(c(mean=mean, sd=sd), function(f) f(unlist(arn.pgls.cve))) ## mean sd ## 20.698476 5.332476 mean(with(do.call(rbind, arn.pgls.cvp), (y-yhat.pgls)^2)) ## [1] 454.0954 # ...pwr arn.pwr.cvp <- mclapply(seq_along(k), function(fold) { yhat <- pwr.cv(FFD ~ seed, arn.p4d, wfun="martins", method="L-BFGS-B", holdout=k[[fold]]) yhat }, mc.cores=min(length(k), 16)) arn.pwr.cve <- lapply(arn.pwr.cvp, function(y) { sqrt(mean(do.call("-", y)^2)) }) sapply(c(mean=mean, sd=sd), function(f) f(unlist(arn.pwr.cve))) ## mean sd ## 18.491499 3.691288 mean(with(do.call(rbind, arn.pwr.cvp), (y-yhat.pwr)^2)) ## [1] 353.5059 # estimate expected generalization error using 5-fold CV set.seed(99) k <- fold(nTips(arn.p4d), 5) # ...pgls arn.pgls.cve <- mclapply(seq_along(k), function(fold) { yhat <- pgls.cv(FFD ~ seed, arn.p4d, holdout=k[[fold]]) sqrt(mean(do.call("-", yhat)^2)) }, mc.cores=min(length(k), 16)) # ...pwr arn.pwr.cve <- mclapply(seq_along(k), function(fold) { yhat <- pwr.cv(FFD ~ seed, arn.p4d, wfun="martins", method="L-BFGS-B", holdout=k[[fold]]) sqrt(mean(do.call("-", yhat)^2)) }, mc.cores=min(length(k), 16)) sapply(c(mean=mean, sd=sd), function(f) f(unlist(arn.pgls.cve))) ## mean sd ## 21.097889 2.928884 # ... in-sample prediction error sqrt(mean(resid(pgls.arn.brownian)^2)) ## [1] 21.32004 sapply(c(mean=mean, sd=sd), function(f) f(unlist(arn.pwr.cve))) ## mean sd ## 18.453470 2.916869 # ... in-sample prediction error sqrt(mean((mapply(function(coef, seed) coef["(Intercept)", "Estimate"] + coef["seed", "Estimate"] * seed, pwr.arn.martins, tipData(arn.p4d)$seed) - tipData(arn.p4d)$FFD)^2)) ## [1] 8.879647 cve.sim <- mclapply(1:100, function(i) simcv(pgls.arn.brownian, mc.cores=1), mc.cores=25) system.time(cve.sim <- mclapply(1:100, function(i) simcv(pgls.arn.brownian, mc.cores=1), mc.cores=25) ) ## user system elapsed ## 14191.143 10.849 643.536 system.time(cve.sim <- mclapply(1:100, function(i) simcv(pgls.arn.brownian, mc.cores=1), mc.cores=25) ) # # simulate data using fastBM # library(phytools) set.seed(99) tipData(arn.p4d)$x <- fastBM(arnp, a=coef(gls(seed ~ 1, data=tipData(arn.p4d), correlation=corBrownian(phy=arnp)))) tipData(arn.p4d)$y <- c(cbind(1, tipData(arn.p4d)$x) %*% coef(gls(FFD ~ seed, data=tipData(arn.p4d), correlation=corBrownian(phy=arnp)))) + fastBM(arnp) # ...pgls arnBM.pgls.cve <- mclapply(seq_along(k), function(fold) { yhat <- pgls.cv(y ~ x, arn.p4d, holdout=k[[fold]]) sqrt(mean(do.call("-", yhat)^2)) }, mc.cores=min(length(k), 16)) # ...pwr arnBM.pwr.cve <- mclapply(seq_along(k), function(fold) { yhat <- pwr.cv(y ~ x, arn.p4d, wfun="martins", method="L-BFGS-B", holdout=k[[fold]]) sqrt(mean(do.call("-", yhat)^2)) }, mc.cores=min(length(k), 16)) arnBM.pgls.yhat <- do.call(rbind, mclapply(1:length(k), function(fold) pgls.cv(y ~ x, arn.p4d, holdout=k[[fold]]), mc.cores=5)) arnBM.pwr.yhat <- do.call(rbind, mclapply(1:length(k), function(fold) pwr.cv(y ~ x, arn.p4d, wfun="martins", method="L-BFGS-B", holdout=k[[fold]]), mc.cores=5))
5570927927e7356701b139fb9653175790d13ccd
3845f0cfcf3d2cb7abf8b358daf95f2ea5f8529c
/plot1.R
323eb65333e8ebadf13b69a6c8d4b3301db945e4
[]
no_license
wiseguy3257/ExData_Plotting1
869a1316f88763516972af5cae32b558637654ac
521d888830f1b5912ee6199866cf3780c7d40777
refs/heads/master
2021-01-18T17:40:31.486906
2014-12-07T23:48:08
2014-12-07T23:48:08
null
0
0
null
null
null
null
UTF-8
R
false
false
429
r
plot1.R
setwd("C:/Users/Chris/Desktop/R/exdata-data-household_power_consumption") d = read.table("household_power_consumption.txt", sep=";", header=T, na.strings="?", stringsAsFactors=FALSE) str(d) dat <- d[which(d$Date=='1/2/2007' | d$Date=='2/2/2007'),] str(dat) hist(dat$Global_active_power, col="red", xlab="Global Active Power (kilowatts)", main="Global Active Power") dev.copy(png, file="plot1.png") dev.off()
d4a62e248001ab7577f6401b34abc01428731da2
e5fc120f866933943a29c796c7c607dc2690cab3
/analysis/ipm/ltre/sensitivity_mean_ipm.R
0f9cc452ccd2728f9e32c548c211c5e780d0e3e6
[]
no_license
AldoCompagnoni/lupine
e07054e7e382590d5fa022a23e024dfec80c80b2
afc41a2b66c785957db25583f25431bb519dc7ec
refs/heads/master
2021-06-23T04:50:30.617943
2021-06-11T13:00:59
2021-06-11T13:00:59
185,047,698
0
0
null
null
null
null
UTF-8
R
false
false
17,892
r
sensitivity_mean_ipm.R
# Calculate sensitivities of the mean model rm(list=ls()) source("analysis/format_data/format_functions.R") options(stringsAsFactors = F) library(dplyr) library(tidyr) library(ggplot2) library(readxl) library(testthat) # data lupine_df <- read.csv( "data/lupine_all.csv") fruit_rac <- read_xlsx('data/fruits_per_raceme.xlsx') seed_x_fr <- read_xlsx('data/seedsperfruit.xlsx') pred_g <- read_xlsx('data/post predation_lupinus tidestromii.xlsx') sl_size <- read.csv('results/ml_mod_sel/size_sl/seedl_size.csv') clim <- read.csv("data/prism_point_reyes_87_18.csv") enso <- read.csv("data/enso_data.csv") germ <- read_xlsx('data/seedbaskets.xlsx') %>% select(g0:g2) %>% colMeans germ_adj <- read.csv('results/ml_mod_sel/germ/germ_adj.csv') # format climate data ---------------------------------------- years <- c(2005:2018) m_obs <- 5 m_back <- 36 # calculate yearly anomalies year_anom <- function(x, var ){ # set names of climate variables clim_names <- paste0( var,c('_t0','_tm1','_t0_tm1','_t0_tm2') ) mutate(x, avgt0 = x %>% select(V1:V12) %>% rowSums, avgtm1 = x %>% select(V13:V24) %>% rowSums, avgt0_tm1 = x %>% select(V1:V24) %>% rowSums, avgt0_tm2 = x %>% select(V1:V36) %>% rowSums ) %>% select(year, avgt0, avgtm1, avgt0_tm1, avgt0_tm2) %>% setNames( c('year',clim_names) ) } # format climate - need to select climate predictor first ppt_mat <- subset(clim, clim_var == "ppt") %>% prism_clim_form("precip", years, m_back, m_obs) %>% year_anom('ppt') tmp_mat <- subset(clim, clim_var == 'tmean') %>% prism_clim_form('tmean', years, m_back, m_obs) %>% year_anom('tmp') enso_mat <- subset(enso, clim_var == 'oni' ) %>% month_clim_form('oni', years, m_back, m_obs) %>% year_anom('oni') # put together all climate clim_mat <- Reduce( function(...) full_join(...), list(ppt_mat,tmp_mat,enso_mat) ) # format climate data ---------------------------------------- years <- c(2005:2018) m_obs <- 5 m_back <- 36 # calculate yearly anomalies year_anom <- function(x, var){ # set names of climate variables clim_names <- paste0( var,c('_t0','_tm1','_t0_tm1','_t0_tm2') ) mutate(x, avgt0 = x %>% select(V1:V12) %>% rowSums, avgtm1 = x %>% select(V13:V24) %>% rowSums, avgt0_tm1 = x %>% select(V1:V24) %>% rowSums, avgt0_tm2 = x %>% select(V1:V36) %>% rowSums ) %>% select(year, avgt0, avgtm1, avgt0_tm1, avgt0_tm2) %>% setNames( c('year',clim_names) ) } # format climate - need to select climate predictor first ppt_mat <- subset(clim, clim_var == "ppt") %>% prism_clim_form("precip", years, m_back, m_obs) %>% year_anom('ppt') tmp_mat <- subset(clim, clim_var == 'tmean') %>% prism_clim_form('tmean', years, m_back, m_obs) %>% year_anom('tmp') enso_mat <- subset(enso, clim_var == 'oni' ) %>% month_clim_form('oni', years, m_back, m_obs) %>% year_anom('oni') # put together all climate clim_mat <- Reduce( function(...) full_join(...), list(ppt_mat,tmp_mat,enso_mat) ) # vital rates format -------------------------------------------------------------- # first, format site/year combinations site_df <- select(lupine_df, year, location) %>% unique %>% # create 'Site' column to merge with consumption dat mutate( Site = gsub(' \\([0-9]\\)','',location) ) %>% subset( year > 2004 ) %>% arrange( location, year ) %>% complete(location,year) surv <- subset(lupine_df, !is.na(surv_t1) ) %>% subset( area_t0 != 0) %>% mutate( log_area_t0 = log(area_t0), year = year ) %>% mutate( log_area_t02 = log_area_t0^2, log_area_t03 = log_area_t0^3) %>% left_join( clim_mat ) grow <- lupine_df %>% # remove sleedings at stage_t0 subset(!(stage_t0 %in% c("DORM", "NF")) & !(stage_t1 %in% c("D", "NF", "DORM")) ) %>% # remove zeroes from area_t0 and area_t1 subset( area_t0 != 0) %>% subset( area_t1 != 0) %>% mutate( log_area_t1 = log(area_t1), log_area_t0 = log(area_t0), log_area_t02 = log(area_t0)^2, year = year ) %>% left_join( clim_mat ) flow <- subset(lupine_df, !is.na(flow_t0) ) %>% subset( area_t0 != 0) %>% mutate( log_area_t0 = log(area_t0), log_area_t02 = log(area_t0)^2, year = year ) %>% left_join( clim_mat ) fert <- subset(lupine_df, flow_t0 == 1 ) %>% subset( area_t0 != 0) %>% subset( !is.na(numrac_t0) ) %>% # remove non-flowering individuals subset( !(flow_t0 %in% 0) ) %>% mutate( log_area_t0 = log(area_t0), log_area_t02 = log(area_t0)^2, year = year ) %>% # remove zero fertility (becase fertility should not be 0) # NOTE: in many cases, notab_t1 == 0, because numab_t1 == 0 also subset( !(numrac_t0 %in% 0) ) %>% left_join( clim_mat ) abor_df <- subset(lupine_df, !is.na(flow_t0) & flow_t0 == 1 ) %>% subset( !is.na(numrac_t0) ) %>% # remove non-flowering individuals subset( !(flow_t0 %in% 0) ) %>% # remove zero fertility (becase fertility should not be 0) subset( !(numrac_t0 %in% 0) ) %>% # only years indicated by Tiffany subset( year %in% c(2010, 2011, 2013:2017) ) %>% # calculate abortion rates mutate( ab_r = numab_t0 / numrac_t0 ) %>% group_by( location, year ) %>% summarise( ab_r_m = mean(ab_r, na.rm=T) ) %>% ungroup %>% right_join( select(site_df,-Site) ) %>% mutate( ab_r_m = replace(ab_r_m, is.na(ab_r_m), mean(ab_r_m, na.rm=T)) ) cons_df <- read_xlsx('data/consumption.xlsx') %>% mutate( Mean_consumption = Mean_consumption %>% as.numeric) %>% select( Year, Site, Mean_consumption) %>% # update contents/names of variables for merging mutate( Site = toupper(Site) ) %>% rename( year = Year ) %>% # expand to all site/year combinations right_join( site_df ) %>% mutate( Mean_consumption = replace(Mean_consumption, is.na(Mean_consumption), mean(Mean_consumption,na.rm=T) ) ) %>% # remove NA locations subset( !is.na(location) ) %>% # remove annoying code select( -Site ) %>% rename( cons = Mean_consumption ) %>% arrange(location,year) germ_df <- site_df %>% select( location ) %>% unique %>% arrange( location ) %>% left_join(germ_adj) %>% # create germ_obs for AL (1) mutate( germ_obs = replace( germ_obs, location == 'AL (1)', mean(germ_obs[location %in% c('ATT (8)', 'POP9 (9)')])) ) %>% # create germ_obs for BR (6) mutate( germ_obs = replace( germ_obs, location == 'BR (6)', mean(germ_obs[location %in% c('BS (7)', 'DR (3)')])) ) %>% # post-dispersal predation mutate( post_d_p = (germ['g0'] - germ_obs) / germ['g0'] ) %>% # add germination rats mutate( g0 = germ['g0'], g1 = germ['g1'], g2 = germ['g2'] ) # models --------------------------------------------------------- mod_s <- glm(surv_t1 ~ log_area_t0 + log_area_t02 + log_area_t03 + tmp_tm1, data=surv, family='binomial') mod_g <- lm( log_area_t1 ~ log_area_t0, data=grow) g_lim <- range(c(grow$log_area_t0, grow$log_area_t1)) mod_fl <- glm(flow_t0 ~ log_area_t0 + tmp_tm1, data=flow, family='binomial') mod_fr <- glm(numrac_t0 ~ log_area_t0 + tmp_tm1, data=fert, family='poisson') fr_rac <- glm(NumFruits ~ 1, data=fruit_rac, family='poisson') seed_fr <- glm(SEEDSPERFRUIT ~ 1, data=mutate(seed_x_fr, # substitute 0 value with really low value (0.01) SEEDSPERFRUIT = replace(SEEDSPERFRUIT, SEEDSPERFRUIT == 0, 0.01) ), family=Gamma(link = "log")) # vital rate models surv_p <- coef(mod_s) grow_p <- coef(mod_g) grow_p <- c(grow_p, summary(mod_g)$sigma) flow_p <- coef(mod_fl) fert_p <- coef(mod_fr) size_sl_p <- sl_size fr_rac_p <- coef(fr_rac) %>% exp seed_fr_p <- coef(seed_fr) %>% exp germ_p <- germ * (1 - 0.9) cons_p <- subset(cons_df, location != 'AL (1)')$cons %>% mean(na.rm=T) abor_p <- abor_df$ab_r_m %>% mean # IPM parameters ------------------------------------------------------------- # function to extract values extr_value <- function(x, field){ subset(x, type_coef == 'fixef' & ranef == field )$V1 } # list of mean IPM parameters. pars_mean <- list( # adults vital rates surv_b0 = surv_p['(Intercept)'], surv_b1 = surv_p['log_area_t0'], surv_b2 = surv_p['log_area_t02'], surv_b3 = surv_p['log_area_t03'], surv_clim = surv_p['tmp_tm1'], grow_b0 = grow_p['(Intercept)'], grow_b1 = grow_p['log_area_t0'], grow_sig = grow_p[3], flow_b0 = flow_p['(Intercept)'], flow_b1 = flow_p['log_area_t0'], flow_clim = flow_p['tmp_tm1'], fert_b0 = fert_p['(Intercept)'], fert_b1 = fert_p['log_area_t0'], fert_clim = fert_p['tmp_tm1'], abort = abor_p, clip = cons_p, fruit_rac = fr_rac_p, seed_fruit = seed_fr_p, g0 = germ_p['g0'], g1 = germ_p['g1'], g2 = germ_p['g2'], recr_sz = size_sl_p$mean_sl_size, recr_sd = size_sl_p$sd_sl_size, L = g_lim[1], U = g_lim[2], mat_siz_sl = 100, mat_siz = 100 ) # test that no NAs present expect_equal(pars_mean %>% unlist %>% is.na() %>% sum, 0) # IPM functions ------------------------------------------------------------------------------ inv_logit <- function(x){ exp(x)/(1+exp(x)) } # Survival at size x sx<-function(x,pars,tmp_anom){ # survival prob. of each x size class inv_logit(pars$surv_b0 + pars$surv_b1 * x + pars$surv_b2 * x^2 + pars$surv_b3 * x^3 + pars$surv_clim * tmp_anom) } # update kernel functions grow_sd <- function(x,pars){ pars$a*(exp(pars$b*x)) %>% sqrt } # growth (transition) from size x to size y gxy <- function(y,x,pars){ # returns a *probability density distribution* for each x value dnorm(y, mean = pars$grow_b0 + pars$grow_b1*x, sd = pars$grow_sig) } # transition: Survival * growth pxy<-function(y,x,pars,tmp_anom){ sx(x,pars,tmp_anom) * gxy(y,x,pars) } # production of seeds from x-sized mothers fx <-function(x,pars,tmp_anom){ # total racemes prod tot_rac <- inv_logit( pars$flow_b0 + pars$flow_b1*x + pars$flow_clim*tmp_anom ) * exp( pars$fert_b0 + pars$fert_b1*x + pars$fert_clim*tmp_anom ) # viable racs viab_rac <- tot_rac * (1- pars$abort) * (1-pars$clip) # viable seeds viab_sd <- viab_rac * pars$fruit_rac * pars$seed_fruit return(viab_sd) } # Size distribution of recruits recs <-function(y,pars){ dnorm(y, mean = pars$recr_sz, sd = pars$recr_sd ) } fxy <- function(y,x,pars,tmp_anom){ fx(x,pars,tmp_anom) * recs(y,pars) } # IPM kernel/matrix ------------------------------------------------------------ kernel <- function(tmp_anom, pars){ # set up IPM domains -------------------------------------------------------- # plants n <- pars$mat_siz L <- pars$L U <- pars$U #these are the upper and lower integration limits h <- (U-L)/n #Bin size b <- L+c(0:n)*h #Lower boundaries of bins y <- 0.5*(b[1:n]+b[2:(n+1)]) #Bins' midpoints #these are the boundary points (b) and mesh points (y) # populate kernel ------------------------------------------------------------ # seeds mini matrix s_mat <- matrix(0,2,2) # seeds that enter 2 yr-old seed bank plant_s2 <- fx(y,pars,tmp_anom) * pars$g2 # seeds that enter 1 yr-old seed bank plant_s1 <- fx(y,pars,tmp_anom) * pars$g1 # seeds that go directly to seedlings germinate right away Fmat <- (outer(y,y, fxy, pars, tmp_anom) * pars$g0 * h) # seeds that enter 2 yr-old seed bank s_mat[2,1] <- 1 # recruits from the 1 yr-old seedbank s1_rec <- h * recs(y, pars) # recruits from the 2 yr-old seedbank s2_rec <- h * recs(y, pars) # survival and growth of adult plants Tmat <- outer(y,y,pxy,pars,tmp_anom) * h # rotate <- function(x) t(apply(x, 2, rev)) # outer(y,y, fxy, pars, h) %>% t %>% rotate %>% image small_K <- Tmat + Fmat # Assemble the kernel ------------------------------------------------------------- # top 2 vectors from_plant <- rbind( rbind( plant_s2, plant_s1), small_K ) # leftmost vectors from_seed <- rbind( s_mat, cbind(s1_rec, s2_rec) ) k_yx <- cbind( from_seed, from_plant ) return(k_yx) # tests "integrating' functions --------------------------------------------------- # s_sl # expect_true( ((outer( rep(1,100), y_s, s_sl, pars, h_s) %>% t %>% colSums) > 0.99) %>% all ) # gxy # expect_true( ((outer(y,y,gxy,pars)*h) %>% t %>% colSums > 0.97) %>% all) # gxy_s: huge unintentional eviction. Why? # expect_true( ((outer(y_s,y,gxy_s,pars)*h) %>% t %>% colSums > 0.97) %>% all) } ker <- kernel(0,pars_mean) lambda <- Re(eigen(ker)$value[1]) nPar <- length(pars_mean) - 4 sPar <- numeric(nPar) # vector to hold parameter sensitivities dp <- 0.01 # perturbation for calculating sensitivities for(j in 1:nPar){ m.par <- pars_mean m.par[[j]] <- m.par[[j]] - dp IPM.down <- kernel(0.5,m.par) lambda.down <- Re(eigen(IPM.down)$values[1]) m.par[[j]] <- m.par[[j]] + 2*dp IPM.up <- kernel(0.5,m.par) lambda.up <- Re(eigen(IPM.up)$values[1]) sj <- (lambda.up-lambda.down)/(2*dp) sPar[j] <- sj cat(j,names(pars_mean)[j],sj,"\n"); } graphics.off(); dev.new(width=11,height=6); par(mfrow=c(2,1),mar=c(4,2,2,1),mgp=c(2,1,0)); # graph the sensitivity sens_df <- data.frame( parameter = names(pars_mean)[1:nPar], sensitivity = sPar, elasticity = sPar*abs(as.numeric(pars_mean[1:nPar]))/lambda ) %>% gather( measure, value, sensitivity:elasticity) ggplot(sens_df) + geom_bar( aes( x = parameter, y = value ), stat = 'identity') + theme( axis.text.x = element_text( angle = 80, vjust = 0.5) ) + facet_grid( measure ~ 1 ) + ylab( 'Sensitivity/Elasticity') + xlab( 'Parameter' ) + ggsave( 'results/ipm/ltre/sens_elast.tiff', width=6.3,height=6.3,compression='lzw') # only plot elasticity subset(sens_df, measure == 'elasticity') %>% ggplot() + geom_bar( aes( x = parameter, y = value ), stat = 'identity') + theme( axis.text.x = element_text( angle = 80, vjust = 0.5) ) + ylab( 'Elasticity') + xlab( 'Parameter' ) + ggsave( 'results/ipm/ltre/elast.tiff', width=6.3,height=6.3,compression='lzw')
c942bf00bedd981b6c57aca30631202524e9c934
70bd36f4dbbbdb101e853d135b34385ae7366161
/squirrels.R
24e29e94afdfaca98b7305f0824a6051ef0bbf76
[ "MIT" ]
permissive
bbelcher97/mapping
35560ad8b4067c2ff0ae3963d7ebc8ef87579920
3b33a3d323256c711d6d1db38c28f495d9e48828
refs/heads/master
2020-04-07T17:49:09.526748
2018-12-17T01:25:51
2018-12-17T01:25:51
158,584,918
0
0
null
null
null
null
UTF-8
R
false
false
163
r
squirrels.R
library(spocc) library(mapr) squirrels <- occ(query='Sciuridae', from='gbif', limit=2500) df = as.data.frame(squirrels$gbif$data$Sciuridae) map_leaflet(squirrels)
fb9c4b46d0cef314bb5a1418f7b21e7a6e2f66f5
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/Compositional/examples/diri.reg.Rd.R
1db3b9832b54a7d0202ce73e472914abeabec4ed
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
343
r
diri.reg.Rd.R
library(Compositional) ### Name: Dirichlet regression ### Title: Dirichlet regression ### Aliases: diri.reg diri.reg2 ### Keywords: Dirichlet regression multivariate regression ### ** Examples x <- as.vector(iris[, 4]) y <- as.matrix(iris[, 1:3]) y <- y / rowSums(y) mod1 <- diri.reg(y, x) mod2 <-diri.reg2(y, x) mod3 <- comp.reg(y, x)
34d8b04a6586c0f7976bdf3f8e63e9ef90737cce
1fe9c4fc4f4b3a193ee042c414bcd87c22fec4af
/Dealing_with_date-time_data/time-zones.R
d7c43ddefa9bdce635ae2d8c422cdc197bf23be6
[ "MIT" ]
permissive
oucru-biostats/Data_management_and_basic_summaries_in_R
949e6efd6e2dbe0c5d381aab2a6cfe087e4786a3
8153ee732eff1a3bc227cd5211ff30c357871e1f
refs/heads/main
2023-03-19T04:17:51.909131
2021-03-03T02:47:39
2021-03-03T02:47:39
343,683,158
0
0
null
null
null
null
UTF-8
R
false
false
83
r
time-zones.R
Time_Zones <- read.csv('time-zones.csv', stringsAsFactors = FALSE) View(Time_Zones)
056001e23a334d20e331a90e1e41c83407cc90cb
cbd96ff896c7c62ed6b4960e5011e14039e5e60c
/inst/tests/test-dmvn.R
837d99c80e9ae79846babb3ecf6211c8a5101f55
[]
no_license
mfasiolo/mvnfast
af016cca7eeed89beae71639dc895cca5e71d267
1f5aacbba23a6e1fd369cd963917298e5eb8b4a4
refs/heads/master
2023-07-06T00:59:49.298436
2023-06-26T15:52:42
2023-06-26T15:52:42
19,045,554
28
10
null
2023-06-26T15:52:44
2014-04-22T20:44:38
R
UTF-8
R
false
false
1,377
r
test-dmvn.R
context("dmvn() and maha()") test_that("Checking dmvn() and maha() against dmvnorm() and mahalanobis", { library("mvtnorm") ########## ###### d = 1, n = 1 case ########## set.seed(4616) N <- c(1, 100, 1, 100) d <- c(1, 1, 10, 10) message("Testing dmvn() and maha()") for(ii in 1:length(N)) { mu <- 1:d[ii] tmp <- matrix(rnorm(d[ii]^2), d[ii], d[ii]) mcov <- tcrossprod(tmp, tmp) myChol <- chol(mcov) X <- rmvnorm(N[ii], mu, mcov) ##### dmvn() bench <- dmvnorm(X, mu, mcov, log = T) # Sequential expect_lt(sum(abs(dmvn(X, mu, mcov, log = T) - bench)), 1e-6) expect_lt(sum(abs(dmvn(X, mu, myChol, isChol = TRUE, log = T) - bench)), 1e-6) # Parallel expect_lt(sum(abs(dmvn(X, mu, mcov, ncores = 2, log = T) - bench)), 1e-6) expect_lt(sum(abs(dmvn(X, mu, myChol, ncores = 2, isChol = TRUE, log = T) - bench)), 1e-6) ##### maha() bench <- mahalanobis(X, mu, mcov) # Sequential expect_lt(sum(abs(maha(X, mu, mcov) - bench)), 1e-6) expect_lt(sum(abs(maha(X, mu, myChol, isChol = TRUE) - bench)), 1e-6) # Parallel expect_lt(sum(abs(maha(X, mu, mcov, ncores = 2) - bench)), 1e-6) expect_lt(sum(abs(maha(X, mu, myChol, ncores = 2, isChol = TRUE) - bench)), 1e-6) message(paste("Test", ii, "passed.")) } detach("package:mvtnorm", unload=TRUE) })
4885395ce1534d0a44c29d973db1a762f6202304
aae44f8e1422ae9d611bd27a4a7b9754f25a60ef
/ssm revision complete analyses for archiving.R
cf8fa632aecff79fcdd8f1580c67cff90a7bc5f3
[]
no_license
adampepi/nonstationary_ecoletts
d36a9c45266c6faa2f3aa16a3cab822ab9d028f7
282be62639536eeede685c5163be509b190a068e
refs/heads/master
2023-04-11T19:55:32.774473
2021-03-24T19:04:57
2021-03-24T19:04:57
351,192,914
0
0
null
null
null
null
UTF-8
R
false
false
32,307
r
ssm revision complete analyses for archiving.R
##Code for state space models and simulations for Pepi, Holyoak & Karban 2021, Ecology Letters rm(list = ls()) library(rjags) library(R2jags) library(AICcmodavg) library(lattice) library(MCMCvis) library(tidyverse) library(tidybayes) library(ggplot2) library(ggstance) library(bayestestR) setwd("~/Documents/Research/dissertation/time series analyses/nonstationary_modelling") cats1<-read.csv('bodega.cats2.csv') str(cats1) cats<-cats1 cats$Precip<-as.numeric(scale(cats$precip)) cats$cat.count<-as.integer(cats$lupine.count) #First part cats2<-cats1[1:19,] cats2$Precip<-as.numeric(scale(cats2$precip)) cats2$cat.count<-as.integer(cats2$lupine.count) str(cats2) #Second part cats3<-cats1[19:34,] cats3$Precip<-as.numeric(scale(cats3$precip)) cats3$cat.count<-as.integer(cats3$lupine.count) str(cats) ### Delayed DD Gompertz precip #Full series sink("tigermodelprecipgompdelayed.jags") # name of the txt file cat(" model{ #Priors mean.r[1] ~ dnorm(0, 0.1) # Prior for mean growth rate mean.r[2] ~ dnorm(0, 0.1) sigma.proc ~ dunif(0, 10) # Prior for sd of state process sigma2.proc <- pow(sigma.proc, 2) tau.proc <- pow(sigma.proc, -2) beta_precip ~ dnorm(0,0.01) beta_dd1 ~ dnorm(0,0.01) beta_dd2 ~ dnorm(0,0.01) beta_0 ~ dnorm(0, 0.01) #State process for (t in 3:T){ mean.r[t] <- beta_0 + beta_dd1 * logN.est[t-1] + beta_dd2 * logN.est[t-2] + beta_precip * precip[t-1] } #Error model for(t in 1:T){ logN.est[t] ~ dnorm(mean.r[t], tau.proc) N.est[t] <- exp(logN.est[t]) # Observation process y[t] ~ dpois(N.est[t]*logarea[t]) } } ", fill=T) sink() jags.data15<- list(y = as.integer(cats$cat.count), T = length(cats$Year),precip=cats$Precip,logarea=cats$lupine.area) year<-cats$Year inits15 <- function(){list(sigma.proc =0.9, beta_0= 3,beta_dd1= -0.1, beta_dd2= -0.35, beta_precip=0.03)} parameters15<- c("mean.r", "sigma2.proc", "N.est","logN.est","beta_0","beta_dd1", 'beta_dd2',"beta_precip") ni <- 20000 nt <- 1 nb <- 1000 nc <- 3 pgompdelayed <- jags(jags.data15, inits15, parameters15, "tigermodelprecipgompdelayed.jags", n.chains = nc, n.thin = nt, n.iter = ni, n.burnin = nb, working.directory = getwd()) pgd<-jags.model("tigermodelprecipgompdelayed.jags",data=jags.data15, inits=inits15,n.chains = nc) summary(pgompdelayed) print(pgompdelayed, digits = 3) theta.samples1 <- coda.samples(pgd, n.iter=10000, thin=10, c("beta_precip","beta_dd1","beta_dd2","beta_0",'sigma2.proc')) par(mar=rep(2,4)); plot(theta.samples1) #First half sink("tigermodelprecipgompdelayed2.jags") # name of the txt file cat(" model{ #Priors mean.r[1] ~ dnorm(0, 0.1) # Prior for mean growth rate mean.r[2] ~ dnorm(0, 0.1) sigma.proc ~ dunif(0, 10) # Prior for sd of state process sigma2.proc <- pow(sigma.proc, 2) tau.proc <- pow(sigma.proc, -2) beta_precip ~ dnorm(0,0.01) beta_dd1 ~ dnorm(0,0.01) beta_dd2 ~ dnorm(0,0.01) beta_0 ~ dnorm(0, 0.01) #State process for (t in 3:T){ mean.r[t] <- beta_0 + beta_dd1 * logN.est[t-1] + beta_dd2 * logN.est[t-2] + beta_precip * precip[t-1] } #Error model for(t in 1:T){ logN.est[t] ~ dnorm(mean.r[t], tau.proc) N.est[t] <- exp(logN.est[t]) # Observation process y[t] ~ dpois(N.est[t]*logarea[t]) } } ", fill=T) sink() jags.data15<- list(y = as.integer(cats2$cat.count), T = length(cats2$Year),precip=cats2$Precip,logarea=cats2$lupine.area) year<-cats$Year inits15 <- function(){list(sigma.proc =0.9, beta_0= 3,beta_dd1= -0.1, beta_dd2= -0.35, beta_precip=0.03)} parameters15<- c("mean.r", "sigma2.proc", "N.est","logN.est","beta_0","beta_dd1", 'beta_dd2',"beta_precip") ni <- 20000 nt <- 1 nb <- 1000 nc <- 3 pgompdelayed2 <- jags(jags.data15, inits15, parameters15, "tigermodelprecipgompdelayed2.jags", n.chains = nc, n.thin = nt, n.iter = ni, n.burnin = nb, working.directory = getwd()) pgd2<-jags.model("tigermodelprecipgompdelayed2.jags",data=jags.data15, inits=inits15,n.chains = nc) summary(pgompdelayed) print(pgompdelayed, digits = 3) theta.samples2 <- coda.samples(pgd2, n.iter=10000, thin=10, c("beta_precip","beta_dd1","beta_dd2","beta_0","sigma2.proc")) par(mar=rep(2,4)); plot(theta.samples2) #Second half sink("tigermodelprecipgompdelayed3.jags") # name of the txt file cat(" model{ #Priors mean.r[1] ~ dnorm(0, 0.1) # Prior for mean growth rate mean.r[2] ~ dnorm(0, 0.1) sigma.proc ~ dunif(0, 10) # Prior for sd of state process sigma2.proc <- pow(sigma.proc, 2) tau.proc <- pow(sigma.proc, -2) beta_precip ~ dnorm(0,0.01) beta_dd1 ~ dnorm(0,0.01) beta_dd2 ~ dnorm(0,0.01) beta_0 ~ dnorm(0, 0.01) #State process for (t in 3:T){ mean.r[t] <- beta_0 + beta_dd1 * logN.est[t-1] + beta_dd2 * logN.est[t-2] + beta_precip * precip[t-1] } #Error model for(t in 1:T){ logN.est[t] ~ dnorm(mean.r[t], tau.proc) N.est[t] <- exp(logN.est[t]) # Observation process y[t] ~ dpois(N.est[t]*logarea[t]) } } ", fill=T) sink() jags.data15<- list(y = as.integer(cats3$cat.count), T = length(cats3$Year),precip=cats3$Precip,logarea=cats3$lupine.area) year<-cats$Year inits15 <- function(){list(sigma.proc =0.9, beta_0= 3,beta_dd1= -0.1, beta_dd2= -0.35, beta_precip=0.03)} parameters15<- c("mean.r", "sigma2.proc", "N.est","logN.est","beta_0","beta_dd1", 'beta_dd2',"beta_precip") ni <- 20000 nt <- 1 nb <- 1000 nc <- 3 pgompdelayed3 <- jags(jags.data15, inits15, parameters15, "tigermodelprecipgompdelayed3.jags", n.chains = nc, n.thin = nt, n.iter = ni, n.burnin = nb, working.directory = getwd()) pgd3<-jags.model("tigermodelprecipgompdelayed3.jags",data=jags.data15, inits=inits15,n.chains = nc) summary(pgompdelayed) print(pgompdelayed, digits = 3) plotModelOutput(pgompdelayed,log((cats$lupine.count+1)/cats$lupine.area)) theta.samples3 <- coda.samples(pgd3, n.iter=10000, thin=10, c("beta_precip","beta_dd1","beta_dd2","beta_0",'sigma2.proc')) par(mar=rep(2,4)); plot(theta.samples3) coda::traceplot(theta.samples3) png("pgomp12.png", width=600, height=400) MCMCplot(object = pgompdelayed, object2 = pgompdelayed2, col='black', col2='blue', params = c("beta_0",'beta_dd1','beta_dd2',"beta_precip"), labels = c('Intercept', 'Direct density-dependence', 'Delayed density-dependence', 'Precipitation'), xlim=c(-3,3)) legend('topright',inset=.05,col = c('black',"blue","red"),lty=1,lwd=3,legend=c('Whole series',"Before threshold","After threshold")) dev.off() png("pgomp3.png", width=600, height=400, bg = "transparent") MCMCplot(object = pgompdelayed3, col='red', params = c("beta_0",'beta_dd1','beta_dd2',"beta_precip"), labels = c('Intercept', 'Direct density-dependence', 'Delayed density-dependence', 'Precipitation'), xlim=c(-3,3)) dev.off() m1<-tidy_draws(theta.samples1) m1 spread_draws() point_interval(m1) III<-theta.samples1 %>% gather_draws(beta_0,beta_dd1,beta_dd2,beta_precip)%>% mutate(Model = "Whole series") I<-theta.samples2 %>% gather_draws(beta_0,beta_dd1,beta_dd2,beta_precip)%>% mutate(Model = "Part I") II<-theta.samples3 %>% gather_draws(beta_0,beta_dd1,beta_dd2,beta_precip)%>% mutate(Model = "Part II") overall<-bind_rows(III,II,I) overall$Model<-as.factor(overall$Model) levels(overall$Model) overall$Model = factor(overall$Model,levels(overall$Model)[c(2,1,3)]) levels(overall$Model) overall%>% ggplot(aes(y = .variable , x = .value,color=Model)) + stat_halfeyeh(position = position_dodgev(height = .9),.width = c(.90, .95))+geom_vline(xintercept=0,lty=2)+xlab(label="Value")+ ylab(label="Parameter")+ scale_y_discrete(labels = c(expression(alpha["0"]),expression(alpha["1"]),expression(alpha["2"]),expression(beta["Precip"])))+ theme_classic()+theme(text=element_text(size=20))+guides(color = guide_legend(reverse = TRUE))+ scale_color_manual(values=c("red", "blue", "black")) ci(overall.1$beta_dd1[overall.1$Model=="Whole series"],method="HDI",ci=0.9) ci(overall.1$beta_dd1[overall.1$Model=="Part I"],method="HDI",ci=0.9) ci(overall.1$beta_dd1[overall.1$Model=="Part II"],method="HDI",ci=0.9) ci(overall.1$beta_dd2[overall.1$Model=="Whole series"],method="HDI",ci=0.9) ci(overall.1$beta_dd2[overall.1$Model=="Part I"],method="HDI",ci=0.9) ci(overall.1$beta_dd2[overall.1$Model=="Part II"],method="HDI",ci=0.9) ci(overall.1$beta_precip[overall.1$Model=="Whole series"],method="HDI",ci=0.9) ci(overall.1$beta_precip[overall.1$Model=="Part I"],method="HDI",ci=0.9) ci(overall.1$beta_precip[overall.1$Model=="Part II"],method="HDI",ci=0.9) III.1<-theta.samples1 %>% spread_draws(beta_0,beta_dd1,beta_dd2,beta_precip)%>% mutate(Model = "Whole series") I.1<-theta.samples2 %>% spread_draws(beta_0,beta_dd1,beta_dd2,beta_precip)%>% mutate(Model = "Part I") II.1<-theta.samples3 %>% spread_draws(beta_0,beta_dd1,beta_dd2,beta_precip)%>% mutate(Model = "Part II") overall.1<-bind_rows(III.1,II.1,I.1) overall.1$Model<-as.factor(overall.1$Model) overall.1 overall.1$beta1<-overall.1$beta_dd1 overall.1$beta2<-overall.1$beta_dd2 sample(overall.1$beta_0[overall.1$Model=="Whole series"],size=1) xes<-seq(-2,2,by=.1) croyama<-function(x)-0.25*x^2 curve1<-croyama(xes) curve<-data.frame(beta1=xes,beta2=curve1) xx<-c(-2,0,2) yy<-c(-1,1,-1) beta1<-c(0.085,-0.619,0.45) beta2<-c(-0.325,-0.258,-0.239) model<-c("Whole series","Part I","Part II") royama<-data.frame(beta1=beta1,beta2=beta2,Model=model) triangle<-data.frame(beta1=xx,beta2=yy) vline<-data.frame(x1=0,x2=0,y1=-1,y2=1) labels<-data.frame(quadrant=c("I","II","III","IV","I'","II'","III'","IV'"),y=c(0.35,0.35,-.6,-.6,0.35,0.35,-1.15,-1.15),x=c(0.3,-0.3,-.7,.7,-1.5,1.5,-.7,.7)) str(overall.1) detach("package:biwavelet", unload=TRUE) ggplot()+geom_point(data=overall.1,mapping=aes(x=beta1,y=beta2, color=Model),size=1,alpha = 0.25)+xlim(-2,2)+ylim(-1.2,1)+theme_classic()+geom_point(data=royama,mapping=aes(x=beta1,y=beta2, fill=Model),size=4,shape=21)+xlab(expression(alpha["1"]))+ylab(expression(alpha["2"]))+geom_line(data=curve,mapping=aes(x=beta1,y=beta2))+ geom_polygon(data=triangle,mapping=aes(x=beta1,y=beta2),colour="black",fill=NA)+geom_segment(data=vline,mapping=aes(x=x1,xend=x2,y=y1,yend=y2))+ geom_text(data=labels,mapping=aes(x=x,y=y,label=quadrant),size=7)+ geom_segment(aes(x =royama$beta1[2],y = royama$beta2[2],xend = royama$beta1[3],yend = royama$beta2[3]),arrow=arrow(),data=royama)+theme(text=element_text(size=20)) ##### Simlations -- using posterior library(biwavelet) cats1$logmean<-log(cats1$cat.mean) cats1$logntm1<-log(cats1$ntm1) cats1$logntm2<-log(cats1$ntm1) cats1$precipscaled<-scale(cats1$precip) cats2<-cats1[2:34,] cats3<-cats1[3:34,] oseries<-cbind(cats1$Year,cats1$logmean) nrands<-1000 wtc.0 = wt(oseries) ##Generalised simulation function ##values drawn from posterior simulation<-function(a0samp=overall.1$beta_0[overall.1$Model=="Whole series"], a1samp=overall.1$beta_dd1[overall.1$Model=="Whole series"], a2samp=overall.1$beta_dd2[overall.1$Model=="Whole series"], bprecipsamp=overall.1$beta_precip[overall.1$Model=="Whole series"], reps=1000 ) { ##fixed values log1<-function (N,Ntm1,a0,a1,a2,b1,p) a0 + a1*N+a2*Ntm1+b1*p tf<-34 #run time n0<-0.293 #pop init size n1<--2.957 n2<--1.822 samples<-vector(length=reps) for(i in 1:reps){ a0<-sample(a0samp,size=1) a1<-sample(a1samp,size=1) a2<-sample(a2samp,size=1) b1<-sample(bprecipsamp,size=1) n<-rep(NA,tf) #make vector n[1] = n0 #put init pop n[2] = n1 n[3]=n2 precip<-cats1$precipscaled for(t in 3:(tf-1)){ #t-1 to match lengths n[t+1]<-log1(N=n[t],Ntm1=n[t-1],a0=a0,a1=a1,a2=a2,b1=b1,p=precip[t]) } sim1<-cbind(cats1$Year,n) wtc.1 = wt(sim1) samples[i]<-wdist(wtc.0$wave,wtc.1$wave) } print(samples) } s1<-simulation(a0samp=overall.1$beta_0[overall.1$Model=="Whole series"], a1samp=overall.1$beta_dd1[overall.1$Model=="Whole series"], a2samp=overall.1$beta_dd2[overall.1$Model=="Whole series"], bprecipsamp=overall.1$beta_precip[overall.1$Model=="Whole series"], reps=10000) hist(s1) s1.1<-as.data.frame(s1) s1.1$sim<-"Whole Series" str(s1.1) ggplot(s1.1,aes(s1))+geom_density() ###First Part s2<-simulation(a0samp=overall.1$beta_0[overall.1$Model=="Part I"], a1samp=overall.1$beta_dd1[overall.1$Model=="Part I"], a2samp=overall.1$beta_dd2[overall.1$Model=="Part I"], bprecipsamp=overall.1$beta_precip[overall.1$Model=="Part I"], reps=10000) hist(s2) s2.1<-as.data.frame(s2) s2.1$sim<-"Part I" s2.1$s1<-s2.1$s2 ggplot(s2.1,aes(s2))+geom_density() ###Second Part s3<-simulation(a0samp=overall.1$beta_0[overall.1$Model=="Part II"], a1samp=overall.1$beta_dd1[overall.1$Model=="Part II"], a2samp=overall.1$beta_dd2[overall.1$Model=="Part II"], bprecipsamp=overall.1$beta_precip[overall.1$Model=="Part II"], reps=10000) hist(s3) s3.1<-as.data.frame(s3) s3.1$sim<-"Part II" s3.1$s1<-s3.1$s3 ggplot(s3.1,aes(s3))+geom_density() ###Whole series --- precip from part II s4<-simulation(a0samp=overall.1$beta_0[overall.1$Model=="Whole series"], a1samp=overall.1$beta_dd1[overall.1$Model=="Whole series"], a2samp=overall.1$beta_dd2[overall.1$Model=="Whole series"], bprecipsamp=overall.1$beta_precip[overall.1$Model=="Part II"], reps=10000) hist(s4) s4.1<-as.data.frame(s4) s4.1$sim<-"Whole Series Precip Same" s4.1$s1<-s4.1$s4 ggplot(s1.1,aes(s4))+geom_density() ###First Part s5<-simulation(a0samp=overall.1$beta_0[overall.1$Model=="Part I"], a1samp=overall.1$beta_dd1[overall.1$Model=="Part I"], a2samp=overall.1$beta_dd2[overall.1$Model=="Part I"], bprecipsamp=overall.1$beta_precip[overall.1$Model=="Part II"], reps=10000) hist(s5) s5.1<-as.data.frame(s5) s5.1$sim<-"Part I Precip Same" s5.1$s1<-s5.1$s5 ggplot(s5.1,aes(s5))+geom_density() ##Part II no DD s6<-simulation(a0samp=overall.1$beta_0[overall.1$Model=="Part II"], a1samp=0, a2samp=0, bprecipsamp=overall.1$beta_precip[overall.1$Model=="Part II"], reps=10000) hist(s6) s6.1<-as.data.frame(s6) s6.1$sim<-"Part II No DD" s6.1$s1<-s6.1$s6 ggplot(s6.1,aes(s6))+geom_density() s1.1 str(s1.1) str(s2.1) str(s3.1) str(s4.1) str(s5.1) str(s6.1) values<-c(s1.1$s1,s2.1$s1,s3.1$s1,s4.1$s1,s5.1$s1,s6.1$s1) length(values) simulations<-c(s1.1$sim,s2.1$sim,s3.1$sim,s4.1$sim,s5.1$sim,s6.1$sim) length(simulations) allsims<-data.frame(values=values,simulation=simulations) str(allsims) allsims$simulation<-as.factor(allsims$simulation) str(allsims) ggplot(allsims,aes(values,lty=simulation))+geom_density() hdi1<-ci(allsims$values[allsims$simulation=='Whole Series'], method="HDI",ci=0.95) hdi2<-ci(allsims$values[allsims$simulation=='Whole Series Precip Same'], method="HDI",ci=0.95) hdi3<-ci(allsims$values[allsims$simulation=='Part II No DD'], method="HDI",ci=0.95) hdi4<-ci(allsims$values[allsims$simulation=='Part II'], method="HDI",ci=0.95) hdi5<-ci(allsims$values[allsims$simulation=='Part I Precip Same'], method="HDI",ci=0.95) hdi6<-ci(allsims$values[allsims$simulation=='Part I'], method="HDI",ci=0.95) hdi1 hdi2 hdi3 hdi4 hdi5 hdi6 hdi1<-ci(allsims$values[allsims$simulation=='Whole Series'], method="HDI",ci=0.9) hdi2<-ci(allsims$values[allsims$simulation=='Whole Series Precip Same'], method="HDI",ci=0.9) hdi3<-ci(allsims$values[allsims$simulation=='Part II No DD'], method="HDI",ci=0.9) hdi4<-ci(allsims$values[allsims$simulation=='Part II'], method="HDI",ci=0.9) hdi5<-ci(allsims$values[allsims$simulation=='Part I Precip Same'], method="HDI",ci=0.9) hdi6<-ci(allsims$values[allsims$simulation=='Part I'], method="HDI",ci=0.9) hdi1 hdi2 hdi3 hdi4 hdi5 hdi6 str(allsims) ggplot(data=allsims,aes(y = simulation , x = values)) + stat_halfeyeh(position = position_dodgev(height = .9),.width = c(.90, .95))+xlab(label="Value")+ ylab(label="Scenario")+ theme_classic()+theme(text=element_text(size=20)) #####Posterior predictive checks cats1<-read.csv('bodega.cats2.csv') str(cats1) cats<-cats1 cats$Precip<-as.numeric(scale(cats$precip)) cats$cat.count<-as.integer(cats$lupine.count) cats2<-cats1[1:19,] cats2$Precip<-as.numeric(scale(cats2$precip)) cats2$cat.count<-as.integer(cats2$lupine.count) str(cats2) cats3<-cats1[19:34,] cats3$Precip<-as.numeric(scale(cats3$precip)) cats3$cat.count<-as.integer(cats3$lupine.count) str(cats) sink("tigermodelprecipgompdelayedppcheck.jags") # name of the txt file cat(" model{ #Priors mean.r[1] ~ dnorm(0, 0.1) # Prior for mean growth rate mean.r[2] ~ dnorm(0, 0.1) sigma.proc ~ dunif(0, 10) # Prior for sd of state process sigma2.proc <- pow(sigma.proc, 2) tau.proc <- pow(sigma.proc, -2) beta_precip ~ dnorm(0,0.01) beta_dd1 ~ dnorm(0,0.01) beta_dd2 ~ dnorm(0,0.01) beta_0 ~ dnorm(0, 0.01) #State process for (t in 3:T){ mean.r[t] <- beta_0 + beta_dd1 * logN.est[t-1] + beta_dd2 * logN.est[t-2] + beta_precip * precip[t-1] } #Error model for(t in 1:T){ logN.est[t] ~ dnorm(mean.r[t], tau.proc) N.est[t] <- exp(logN.est[t]) # Observation process y[t] ~ dpois(N.est[t]*logarea[t]) y.new[t] ~ dpois(N.est[t]*logarea[t]) res[t]<-y[t]-N.est[t] res.new[t]<-y.new[t]-N.est[t] } #Derived parameters fit <- sum(res[]) fit.new <- sum(res.new[]) } ", fill=T) sink() jags.data15<- list(y = as.integer(cats$cat.count), T = length(cats$Year),precip=cats$Precip,logarea=cats$lupine.area) year<-cats$Year inits15 <- function(){list(sigma.proc =0.9, beta_0= 3,beta_dd1= -0.1, beta_dd2= -0.35, beta_precip=0.03)} parameters15<- c("mean.r", "sigma2.proc", "N.est","logN.est","beta_0","beta_dd1", 'beta_dd2',"beta_precip","fit",'fit.new','y.new') ni <- 20000 nt <- 1 nb <- 1000 nc <- 3 pgompdelayed <- jags(jags.data15, inits15, parameters15, "tigermodelprecipgompdelayedppcheck.jags", n.chains = nc, n.thin = nt, n.iter = ni, n.burnin = nb, working.directory = getwd()) pgd<-jags.model("tigermodelprecipgompdelayed.jags",data=jags.data15, inits=inits15,n.chains = nc) summary(pgompdelayed) print(pgompdelayed, digits = 3) theta.samples1 <- coda.samples(pgd, n.iter=10000, thin=10, c("beta_precip","beta_dd1","beta_dd2","beta_0")) par(mar=rep(1,4)); plot(theta.samples1,oma=c(2,2,2,2)) library(jagsUI) pgompdelayed <- jags(jags.data15, inits15, parameters15, "tigermodelprecipgompdelayedppcheck.jags", n.chains = nc, n.thin = nt, n.iter = ni, n.burnin = nb) pp.check(x=pgompdelayed, observed = 'fit', simulated = 'fit.new') crosscorr.plot(theta.samples1) crosscorr(theta.samples1) #pp.check(x=pgompdelayed, observed = 'y', simulated = 'y.new') str(pgompdelayed) ynew<-pgompdelayed$sims.list$y.new year<-cats$Year y = as.integer(cats$cat.count) y plot(x=year,y=y+1,type='l',log='y',xlab="Year",ylab="Count") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=y+1, lwd=3) #First half sink("tigermodelprecipgompdelayed2ppcheck.jags") # name of the txt file cat(" model{ #Priors mean.r[1] ~ dnorm(0, 0.1) # Prior for mean growth rate mean.r[2] ~ dnorm(0, 0.1) sigma.proc ~ dunif(0, 10) # Prior for sd of state process sigma2.proc <- pow(sigma.proc, 2) tau.proc <- pow(sigma.proc, -2) beta_precip ~ dnorm(0,0.01) beta_dd1 ~ dnorm(0,0.01) beta_dd2 ~ dnorm(0,0.01) beta_0 ~ dnorm(0, 0.01) #State process for (t in 3:T){ mean.r[t] <- beta_0 + beta_dd1 * logN.est[t-1] + beta_dd2 * logN.est[t-2] + beta_precip * precip[t-1] } #Error model for(t in 1:T){ logN.est[t] ~ dnorm(mean.r[t], tau.proc) N.est[t] <- exp(logN.est[t]) # Observation process y[t] ~ dpois(N.est[t]*logarea[t]) y.new[t] ~ dpois(N.est[t]*logarea[t]) res[t]<-y[t]-N.est[t] res.new[t]<-y.new[t]-N.est[t] } #Derived parameters fit <- sum(res[]) fit.new <- sum(res.new[]) } ", fill=T) sink() jags.data15<- list(y = as.integer(cats2$cat.count), T = length(cats2$Year),precip=cats2$Precip,logarea=cats2$lupine.area) year<-cats$Year inits15 <- function(){list(sigma.proc =0.9, beta_0= 3,beta_dd1= -0.1, beta_dd2= -0.35, beta_precip=0.03)} parameters15<- c("mean.r", "sigma2.proc", "N.est","logN.est","beta_0","beta_dd1", 'beta_dd2',"beta_precip","fit",'fit.new','y.new') ni <- 20000 nt <- 1 nb <- 1000 nc <- 3 pgompdelayed2 <- jags(jags.data15, inits15, parameters15, "tigermodelprecipgompdelayed2ppcheck.jags", n.chains = nc, n.thin = nt, n.iter = ni, n.burnin = nb) pgd2<-jags.model("tigermodelprecipgompdelayed2.jags",data=jags.data15, inits=inits15,n.chains = nc) summary(pgompdelayed) print(pgompdelayed, digits = 3) theta.samples2 <- coda.samples(pgd2, n.iter=10000, thin=10, c("beta_precip","beta_dd1","beta_dd2","beta_0")) par(mar=rep(1,4)); plot(theta.samples2) pp.check(x=pgompdelayed2, observed = 'fit', simulated = 'fit.new') crosscorr(theta.samples2) ynew<-pgompdelayed2$sims.list$y.new ynew y = as.integer(cats2$cat.count) y year<-cats2$Year plot(x=cats2$Year,y=y+1,type='l',log='y',xlab="Year",ylab="Count") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=y+1, lwd=3) #Second half sink("tigermodelprecipgompdelayed3ppcheck.jags") # name of the txt file cat(" model{ #Priors mean.r[1] ~ dnorm(0, 0.1) # Prior for mean growth rate mean.r[2] ~ dnorm(0, 0.1) sigma.proc ~ dunif(0, 10) # Prior for sd of state process sigma2.proc <- pow(sigma.proc, 2) tau.proc <- pow(sigma.proc, -2) beta_precip ~ dnorm(0,0.01) beta_dd1 ~ dnorm(0,0.01) beta_dd2 ~ dnorm(0,0.01) beta_0 ~ dnorm(0, 0.01) #State process for (t in 3:T){ mean.r[t] <- beta_0 + beta_dd1 * logN.est[t-1] + beta_dd2 * logN.est[t-2] + beta_precip * precip[t-1] } #Error model for(t in 1:T){ logN.est[t] ~ dnorm(mean.r[t], tau.proc) N.est[t] <- exp(logN.est[t]) # Observation process y[t] ~ dpois(N.est[t]*logarea[t]) y.new[t] ~ dpois(N.est[t]*logarea[t]) res[t]<-y[t]-N.est[t] res.new[t]<-y.new[t]-N.est[t] } #Derived parameters fit <- sum(res[]) fit.new <- sum(res.new[]) } ", fill=T) sink() jags.data15<- list(y = as.integer(cats3$cat.count), T = length(cats3$Year),precip=cats3$Precip,logarea=cats3$lupine.area) year<-cats$Year inits15 <- function(){list(sigma.proc =0.9, beta_0= 3,beta_dd1= -0.1, beta_dd2= -0.35, beta_precip=0.03)} parameters15<- c("mean.r", "sigma2.proc", "N.est","logN.est","beta_0","beta_dd1", 'beta_dd2',"beta_precip","fit",'fit.new','y.new') ni <- 20000 nt <- 1 nb <- 1000 nc <- 3 pgompdelayed3 <- jags(jags.data15, inits15, parameters15, "tigermodelprecipgompdelayed3ppcheck.jags", n.chains = nc, n.thin = nt, n.iter = ni, n.burnin = nb ) pgd3<-jags.model("tigermodelprecipgompdelayed3.jags",data=jags.data15, inits=inits15,n.chains = nc) summary(pgompdelayed3) print(pgompdelayed3, digits = 3) theta.samples3<- coda.samples(pgd3, n.iter=10000, thin=10, c("beta_precip","beta_dd1","beta_dd2","beta_0")) par(mar=rep(1,4)); plot(theta.samples3) crosscorr(theta.samples3) pp.check(x=pgompdelayed3, observed = 'fit', simulated = 'fit.new') ynew<-pgompdelayed3$sims.list$y.new ynew y = as.integer(cats3$cat.count) y year<-cats3$Year dev.off() plot(x=year,y=y+1,type='l',log='y',xlab="Year",ylab="Count") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=ynew[sample(nrow(ynew),size=1,replace=FALSE),]+1, col="blue") lines(x=year,y=y+1, lwd=3) ##Simulation plots samples<-matrix(0,ncol=34-3,nrow=100) samples simulation2<-function(a0samp=overall.1$beta_0[overall.1$Model=="Whole series"], a1samp=overall.1$beta_dd1[overall.1$Model=="Whole series"], a2samp=overall.1$beta_dd2[overall.1$Model=="Whole series"], bprecipsamp=overall.1$beta_precip[overall.1$Model=="Whole series"], reps=100 ) { ##fixed values log1<-function (N,Ntm1,a0,a1,a2,b1,p) a0 + a1*N+a2*Ntm1+b1*p tf<-34 #run time n0<-0.293 #pop init size n1<--2.957 n2<--1.822 samples<-matrix(0,ncol=tf,nrow=reps) for(i in 1:reps){ a0<-sample(a0samp,size=1) a1<-sample(a1samp,size=1) a2<-sample(a2samp,size=1) b1<-sample(bprecipsamp,size=1) n<-rep(NA,tf) #make vector n[1] = n0 #put init pop n[2] = n1 n[3]=n2 precip<-cats1$precipscaled for(t in 3:(tf-1)){ #t-1 to match lengths n[t+1]<-log1(N=n[t],Ntm1=n[t-1],a0=a0,a1=a1,a2=a2,b1=b1,p=precip[t]) } sim1<-cbind(cats1$Year,n) samples[i,]<-n } print(samples) } ##Have to have JagsUI unloaded? par(mfrow=c(2,3)) s1<-simulation2(a0samp=overall.1$beta_0[overall.1$Model=="Whole series"], a1samp=overall.1$beta_dd1[overall.1$Model=="Whole series"], a2samp=overall.1$beta_dd2[overall.1$Model=="Whole series"], bprecipsamp=overall.1$beta_precip[overall.1$Model=="Whole series"], reps=100) s1.1<-exp(s1) s1.1[1,] y = cats$lupine.count/cats$lupine.area year<-cats$Year plot(x=year,y=y+1,type='l',log='y',xlab="Year",ylab="Density",ylim=c(1,20),main="Whole series") for(i in 1:50){ lines(x=year,y=s1.1[sample(nrow(s1.1),size=1,replace=FALSE),]+1, col=rgb(0,0,1,0.1)) } lines(x=year,y=y+1, lwd=3) ###First Part s2<-simulation2(a0samp=overall.1$beta_0[overall.1$Model=="Part I"], a1samp=overall.1$beta_dd1[overall.1$Model=="Part I"], a2samp=overall.1$beta_dd2[overall.1$Model=="Part I"], bprecipsamp=overall.1$beta_precip[overall.1$Model=="Part I"], reps=100) s1.1<-exp(s2) y = cats$lupine.count/cats$lupine.area year<-cats$Year plot(x=year,y=y+1,type='l',log='y',xlab="Year",ylab="Density",ylim=c(1,20),main='Part I') for(i in 1:50){ lines(x=year,y=s1.1[sample(nrow(s1.1),size=1,replace=FALSE),]+1, col=rgb(0,0,1,0.1)) } lines(x=year,y=y+1, lwd=3) ###Second Part s3<-simulation2(a0samp=overall.1$beta_0[overall.1$Model=="Part II"], a1samp=overall.1$beta_dd1[overall.1$Model=="Part II"], a2samp=overall.1$beta_dd2[overall.1$Model=="Part II"], bprecipsamp=overall.1$beta_precip[overall.1$Model=="Part II"], reps=100) s1.1<-exp(s3) y = cats$lupine.count/cats$lupine.area year<-cats$Year plot(x=year,y=y+1,type='l',log='y',xlab="Year",ylab="Density",ylim=c(1,20),main='Part II') for(i in 1:50){ lines(x=year,y=s1.1[sample(nrow(s1.1),size=1,replace=FALSE),]+1, col=rgb(0,0,1,0.1)) } lines(x=year,y=y+1, lwd=3) ###Whole series --- precip from part II s4<-simulation2(a0samp=overall.1$beta_0[overall.1$Model=="Whole series"], a1samp=overall.1$beta_dd1[overall.1$Model=="Whole series"], a2samp=overall.1$beta_dd2[overall.1$Model=="Whole series"], bprecipsamp=overall.1$beta_precip[overall.1$Model=="Part II"], reps=100) s1.1<-exp(s4) y = cats$lupine.count/cats$lupine.area year<-cats$Year plot(x=year,y=y+1,type='l',log='y',xlab="Year",ylab="Density",ylim=c(1,20),main='Whole Series Precip Same') for(i in 1:50){ lines(x=year,y=s1.1[sample(nrow(s1.1),size=1,replace=FALSE),]+1, col=rgb(0,0,1,0.1)) } lines(x=year,y=y+1, lwd=3) ###First Part s5<-simulation2(a0samp=overall.1$beta_0[overall.1$Model=="Part I"], a1samp=overall.1$beta_dd1[overall.1$Model=="Part I"], a2samp=overall.1$beta_dd2[overall.1$Model=="Part I"], bprecipsamp=overall.1$beta_precip[overall.1$Model=="Part II"], reps=100) s1.1<-exp(s5) y = cats$lupine.count/cats$lupine.area year<-cats$Year plot(x=year,y=y+1,type='l',log='y',xlab="Year",ylab="Density",ylim=c(1,20),main="Part I Precip Same") for(i in 1:30){ lines(x=year,y=s1.1[sample(nrow(s1.1),size=1,replace=FALSE),]+1, col=rgb(0,0,1,0.1)) } lines(x=year,y=y+1, lwd=3) ##Part II no DD s6<-simulation2(a0samp=overall.1$beta_0[overall.1$Model=="Part II"], a1samp=0, a2samp=0, bprecipsamp=overall.1$beta_precip[overall.1$Model=="Part II"], reps=100) s1.1<-exp(s6) y = cats$lupine.count/cats$lupine.area year<-cats$Year plot(x=year,y=y+1,type='l',log='y',xlab="Year",ylab="Density",ylim=c(1,20),main='Part II No DD') for(i in 1:30){ lines(x=year,y=s1.1[sample(nrow(s1.1),size=1,replace=FALSE),]+1, col=rgb(0,0,1,0.1)) } lines(x=year,y=y+1, lwd=3)
1bf880e3230ed008084baa29d8b0acc00620cd3b
84d9d69a930ab7a15fff5d3d05a9c11c4431ce30
/pml.r
331889b994dc0443fa24499544b35473dca4a20b
[]
no_license
etaoinbe/predmachlearn-project
2de25ee018dc803bfcd4bb203cc215e9922eee46
f9df76e133802484f7e48575182bf99aff1804be
refs/heads/master
2020-06-01T18:16:07.020056
2014-06-22T16:59:55
2014-06-22T16:59:55
null
0
0
null
null
null
null
UTF-8
R
false
false
5,525
r
pml.r
# What you should submit # # The goal of your project is to predict the manner in which they did the exercise. This is the "classe" variable # in the training set. You may use any of the other variables to predict with. You should create a report describing # how you built your model, how you used cross validation, what you think the expected out of sample error is, and why # you made the choices you did. You will also use your prediction model to predict 20 different test cases. # # 1. Your submission should consist of a link to a Github repo with your R markdown and compiled HTML file describing # your analysis. Please constrain the text of the writeup to < 2000 words and the number of figures to be less than 5. # It will make it easier for the graders if you submit a repo with a gh-pages branch so the HTML page can be viewed # online (and you always want to make it easy on graders :-). # 2. You should also apply your machine learning algorithm to the 20 test cases available in the test data above. # Please submit your predictions in appropriate format to the programming assignment for automated grading. # See the programming assignment for additional details. # # Reproducibility # # Due to security concerns with the exchange of R code, your code will not be run during the evaluation by your # classmates. Please be sure that if they download the repo, they will be able to view the compiled HTML version of your # analysis. # C:\data\git\predmachlearn-project library(caret) library(rattle) #http://groupware.les.inf.puc-rio.br/public/papers/2013.Velloso.QAR-WLE.pdf # testmethod="rf" #testmethod="rpart" ##################### # TRAINING ##################### # setwd("C:\\data\\lectures\\predmachlearn\\project") #setwd("e:\\data-e\\project") #training <- read.csv("pml-training.csv") #xx <- read.csv("pml-training.csv",as.is=T,stringsAsFactors=F) pmlData <- read.csv("pml-training.csv",as.is=T,stringsAsFactors=F) ns=names(pmlData) for(i in 1:length(ns)) { name=ns[i] if( name!="classe") { if( typeof(pmlData[,name])=="character" ) { cat("!!!",name); pmlData[, c(name)]=as.numeric( pmlData[, c(name)] ) ; } #print(sprintf("types tr %s name %s ",typeof(pmlData[, c(name)]), name ) ) }} pmlData$classe=as.factor(pmlData$classe) inTrain = createDataPartition(pmlData$classe, p = .6)[[1]] training = pmlData[ inTrain,] validationset = pmlData[-inTrain,] dim(validationset) #training<- training[ sample(dim(training)[1], 100), ] #!!! qplot(seq_along(training$classe),training$classe) qplot(training$X,training$classe) excludes="timestamp|X|user_name|new_window" #training<-training[,colSums(is.na(training)) < nrow(training) ] #testing<-testingsrc[,colSums(is.na(testingsrc)) < nrow(testingsrc) ] #training1 <- subset( trainingsrc, select = -X ) NAs <- apply(training,2,function(x) {sum(is.na(x))}) training <- training[,which(NAs == 0)] removeIndex <- grep(excludes, names(training)) training <- training[,-removeIndex] set.seed(975) if(testmethod=="rpart") { modfit=train(training$classe ~ ., method="rpart", data=training ) print(modfit$finalModel) jpeg("modfittree.jpg") plot(modfit$finalModel,uniform=TRUE,main="tree") text(modfit$finalModel,use.n=TRUE,all=TRUE,cex=.8) dev.off() jpeg("fancytree.jpeg") fancyRpartPlot(modfit$finalModel) dev.off() confusionMatrix(training$classe, predict(modfit, training)) } ### if(testmethod=="rf") { # training<- training[ sample(dim(training)[1], 6000), ] #!!! #training<- training[ sample(dim(training)[1], 3000), ] #!!! #https://class.coursera.org/predmachlearn-002/forum/thread?thread_id=249#post-1024 trctrl=trainControl(method = "cv", number = 5) modfitrf=train(training$classe ~ ., method="rf", data=training, trControl = trctrl, prox=TRUE ) print(modfitrf$results) confusionMatrix(training$classe, predict(modfitrf, training)) } ########################################## # validation out of sample error ########################################## if(testmethod=="rpart") { valp= predict(modfit, validationset, verbose = TRUE) confusionMatrix(validationset$classe, valp) } if(testmethod=="rf") { valp = predict(modfitrf, validationset, verbose = TRUE) confusionMatrix(validationset$classe, valp) } ##################### # TESTING ##################### testing <- read.csv("pml-testing.csv",as.is=T,stringsAsFactors=F) ns=names(testing) for(i in 1:length(ns)) { name=ns[i] if( name!="classe") { if( typeof(testing[,name])=="character" ) { cat("!!!",name); testing[, c(name)]=as.numeric( testing[, c(name)] ) ; } print(sprintf("types tr %s name %s ",typeof(testing[, c(name)]), name ) ) }} #NAs2 <- apply(testing,2,function(x) {sum(is.na(x))}) testing <- testing[,which(NAs == 0)] #testing<-testing[,colSums(is.na(testing)) < nrow(testing) ] removeIndex <- grep(excludes,names(testing)) #testing <- subset( testing, select = removeIndex ) testing <- testing[,-removeIndex] table(training$classe) plot(table(training$classe)) if(testmethod=="rpart") { predict(modfit, testing, verbose = TRUE) } if(testmethod=="rf") { prediction= predict(modfitrf, testing, verbose = TRUE) } pml_write_files = function(x){ n = length(x) for(i in 1:n){ filename = paste0("problem_id_",i,".txt") write.table(x[i],file=filename,quote=FALSE,row.names=FALSE,col.names=FALSE) } } pml_write_files(prediction)
84363a636901ec229eeb9d8fa8486c5dc8f1a677
343d569ab4a4a89a762c58f4fda375ab95823f0a
/R/lr.R
1cef03a2fc349ec9ca03fd931c7f3e183313f6a1
[]
no_license
asancpt/sasLM
d62aa33ac3e63aff1c1a2db92a4c8615840ba06b
8c8d4dcf5f556a44bedfa5b19d3094bbd41bc486
refs/heads/master
2023-05-26T06:33:44.773640
2021-06-15T03:50:02
2021-06-15T03:50:02
null
0
0
null
null
null
null
UTF-8
R
false
false
2,027
r
lr.R
lr = function(Formula, Data, eps=1e-8) { if (!attr(terms(Formula, data=Data), "response")) stop("Dependent variable should be provided!") x = ModelMatrix(Formula, Data) y = model.frame(Formula, Data)[,1] if (!is.numeric(y)) stop("Dependent variable should be numeric!") nc = ncol(x$X) XpX = crossprod(x$X) XpY = crossprod(x$X, y) aXpX = rbind(cbind(XpX, XpY), cbind(t(XpY), crossprod(y))) ag2 = G2SWEEP(aXpX, Augmented = TRUE, eps = eps) b = ag2[1:nc, (nc + 1)] iXpX = ag2[1:nc, 1:nc] nr = nrow(x$X) np = attr(ag2, "rank") DFr = nr - np SSE = ag2[(nc + 1), (nc + 1)] fIntercept = attr(x$terms, "intercept") SST = as.numeric(crossprod(y - fIntercept*mean(y))) if (DFr > 0) { MSE = SSE/DFr bVar = iXpX %*% XpX %*% t(iXpX) * MSE bVar[abs(bVar) < eps] = NA_real_ bSE = sqrt(diag(bVar)) Tval = b/bSE Pval = 2*(1 - pt(abs(Tval), DFr)) } else { MSE = NA bSE = NA Tval = NA Pval = NA } Parameter = cbind(b, bSE, Tval, Pval) colnames(Parameter) = c("Estimate", "Std. Error", "t value", "Pr(>|t|)") rownames(Parameter) = colnames(x$X) Res = list() Res$call = match.call() Res$terms = x$terms Res$residuals = as.vector(y - x$X %*% b) coef1 = Parameter coef1[is.na(bSE) & b == 0, "Estimate"] = NA_real_ DefOpt = options(contrasts=c("contr.SAS", "contr.SAS")) coef1 = coef1[colnames(model.matrix(Formula, Data)), , drop=FALSE] options(DefOpt) Res$coefficients = coef1 Res$aliased = !is.numeric(coef1[,"Estimate"]) Res$df = c(np, DFr, nc) Res$r.squared = 1 - SSE/SST if (DFr > 0) { Res$sigma = sqrt(MSE) Res$adj.r.squared = 1 - (1 - Res$r.squared) * (nr - fIntercept)/DFr Res$fstatistic = c(value=(SST - SSE)/(np - fIntercept)/MSE, numdf=(np - fIntercept), dendf=DFr) } else { Res$sigma = NaN Res$adj.r.squared = NaN Res$fstatistic = c(NaN, numdf=(np - fIntercept), dendf=DFr) } class(Res) = "summary.lm" return(Res) }
81e38bf0edcf8a3d78d32cea44ce6949ade68e4f
7bb3f64824627ef179d5f341266a664fd0b69011
/Business_Statistics_:_A_First_Course_by_David_M._Levine,_Kathryn_A._Szabat,_David_F._Stephan,_P._K._Vishwanathan/CH7/EX7.3/7_3.R
2f6ee19a071780ad3593b1edc0c3679385c50da9
[ "MIT" ]
permissive
prashantsinalkar/R_TBC_Uploads
8bd0f71834814b1d03df07ce90b2eae3b7d357f8
b3f3a8ecd454359a2e992161844f2fb599f8238a
refs/heads/master
2020-08-05T23:06:09.749051
2019-10-04T06:54:07
2019-10-04T06:54:07
212,746,586
0
0
MIT
2019-10-04T06:03:49
2019-10-04T06:03:48
null
UTF-8
R
false
false
346
r
7_3.R
#Effect of sample size n, on the clustering of Means is the sampling Distribution # Z formula for sample means : z = (sample_mean - average)/(standard_dev/sqrt(sample_size)) sample_mean<- 365 avg<- 368 standard_dev<-15 sample_size<- 100 standard_error_mean<- standard_dev/sqrt(sample_size) z<- (sample_mean - avg)/standard_error_mean z
f9056a264915fa042030c754904daad5a663d6a2
483b19fca44376eee27583bcb9691e731d3345d2
/tests/testthat/test-initAnimal.R
01e5360a3814e963c304a76aa3ccd581fe727436
[ "MIT" ]
permissive
Nature40/tRackIT
f550f6a629b369519b9e5b7d60829ca16ed885ea
10176a15c1123b29c1c74da6ce6db34cfab15063
refs/heads/main
2023-04-09T13:46:45.948535
2022-11-10T12:32:41
2022-11-10T12:32:41
321,942,074
1
0
null
null
null
null
UTF-8
R
false
false
514
r
test-initAnimal.R
test_that("Expected output", { anml<-initAnimal(projList = test_project,projroot= "H:/projects/repositories/test_project/",saveAnml = TRUE, animalID = test_project$tags$ID[1], species = "woodpecker", sex = "m", age = "adult", weight = 36, rep.state = "breeding", freq = 150050, start = test_project$tags$start[1], end = test_project$tags$end[1] ) expect_equal(names(anml), c("meta", "path")) expect_equal(!is.na(anml$meta$animalID),!is.na(1)) expect_equal(!is.na(anml$meta$freq),!is.na(1)) })
091c8dc5c7c42ae1d93d24a8dc5c0891d25a2880
b1ef9bf42b79ef0b5ee7979f537d05d50fc24f75
/inst/shiny-examples/BioMonTools/www/linked_files/TaxaMaps/BMT_MapTaxaObs_Example1.R
ed0e3bdfaf8caabffd182b451fd0a54b6d56f057
[ "MIT" ]
permissive
leppott/BioMonTools
19b2c06f8faee35d9f17060e82150d8c3e07521c
3c69c89e0b79acc2150e8516949f6a977362213c
refs/heads/main
2023-08-18T10:37:49.117554
2023-08-11T11:26:36
2023-08-11T11:26:36
157,881,777
12
6
MIT
2022-06-09T13:43:46
2018-11-16T14:55:10
R
UTF-8
R
false
false
1,695
r
BMT_MapTaxaObs_Example1.R
library(readxl) library(BioMonTools) #set working directory wd <-'C:/Users/Jen.Stamp/Documents/R_code/BioMonTools_4.3.C/TaxaDistribMaps' setwd(wd) #this script creates maps in the order in which the taxa appear in your input file. #I recommend sorting by taxonomy (your preference - e.g., phylum, order, family, genus) before running the script #df_obs <- read_excel("~/BioMonTools/Maps_Plecop_genus.xlsx") data_example <- read_excel(file.path(wd, 'MapInput_Example1.xlsx')) df_obs <- data_example SampID <- "SampleID" TaxaID <- "TaxaID" TaxaCount <- "N_Taxa" Lat <- "Latitude" Long <- "Longitude" output_dir <- getwd() output_prefix <- "maps.taxa." output_type <- "pdf" myDB <- "state" myRegion <- c("iowa", "nebraska", "kansas", "missouri", "oklahoma", "minnesota") # Iowa lat/long x_IA <- c(-(96+38/60), -(90+8/60)) y_IA <- c((40+23/60), (43+30/60)) # Nebraska lat/long x_NE <- c(-(104+3/60), -(95+19/60)) y_NE <- c((40), (43)) # Kansas lat/long x_KS <- c(-(102+3/60), -(94+35/60)) y_KS <- c((37), (40)) # Missouri lat/long x_MO <- c(-(95+46/60), -(89+6/60)) y_MO <- c((36), (40+37/60)) # Oklahoma lat/long x_OK <- c(-(103), -(94+26/60)) y_OK <- c((33+37/60), (37)) # Minnesota lat/long x_MN <- c(-(89+29/60), -(97+14/60)) y_MN <- c((43+30/60), (46)) myXlim <- c(min(x_IA, x_NE, x_KS, x_MO, x_OK, x_MN), max(x_IA, x_NE, x_KS, x_MO, x_OK, x_MN)) myYlim <- c(min(y_IA, y_NE, y_KS, y_MO, y_OK, y_MN), max(y_IA, y_NE, y_KS, y_MO, y_OK, y_MN)) df_obs <- as.data.frame(df_obs) # Run function with extra arguments for map MapTaxaObs(df_obs, SampID, TaxaID, TaxaCount, Lat, Long , database=myDB, regions=myRegion, xlim=myXlim, ylim=myYlim , map_grp = "Source")
008ae68588abff0192e6b7dd54a36431ed13f4df
e294404235f97acb3b0da5f85148b5433577500c
/Scripts/(3)Injection_donnees.R
9afa241b2c982a903d7e51d5fea8e9e155638d1b
[]
no_license
montravailBIO500/Travail_Spikee
77c4be040a8f05f2553dee83a07c416060aa83fc
46ed3beecdd8a6dff09e3b9da4776436755c9187
refs/heads/main
2023-04-10T19:13:26.558711
2021-04-26T03:12:55
2021-04-26T03:12:55
357,302,943
0
0
null
null
null
null
UTF-8
R
false
false
1,657
r
(3)Injection_donnees.R
### INJECTION DES DONNEES ### # INSTALLER LE PACKAGE SQLITE install.packages('RSQLite') library(RSQLite) # OUVRIR LA CONNECTION con <- dbConnect(SQLite(), dbname="projetspikee.db") # CREER LES TABLES SQL # Creer la table noeuds noeuds_sql <- ' CREATE TABLE noeuds ( nom_prenom VARCHAR(50), annee_debut DATE, session_debut CHAR(1), programme VARCHAR(50), coop BOLEAN, bio500 BOLEAN, bio5002 BOLEAN, PRIMARY KEY (nom_prenom) );' dbSendQuery(con, noeuds_sql) # Creer la table collaborations collaborations_sql <- ' CREATE TABLE collaborations ( etudiant1 VARCHAR(50), etudiant2 VARCHAR(50), sigle CHAR(6), session CHAR(3), PRIMARY KEY (etudiant1, etudiant2, sigle, session), FOREIGN KEY (etudiant1) REFERENCES noeuds(nom_prenom), FOREIGN KEY (etudiant2) REFERENCES noeuds(nom_prenom), FOREIGN KEY (sigle) REFERENCES cours(sigle) );' dbSendQuery(con, collaborations_sql) # Creer la table cours cours_sql <- ' CREATE TABLE cours ( sigle CHAR(6) NOT NULL, credits INTEGER NOT NULL, obligatoire BOLEAN, laboratoire BOLEAN, distance BOLEAN, groupes BOLEAN, libre BOLEAN, PRIMARY KEY (sigle, distance) );' dbSendQuery(con, cours_sql) # VERIFIER LA PRESENCE DES 3 TABLES DANS LE SERVEUR SQL dbListTables(con) # INJECTION DES DONNEES DANS LES TABLES SQL dbWriteTable(con, append = TRUE, name = "noeuds", value = db_noeuds2.2, row.names = FALSE) dbWriteTable(con, append = TRUE, name = "collaborations", value = db_collaborations1.0, row.names = FALSE) dbWriteTable(con, append = TRUE, name = "cours", value = db_cours2.0, row.names = FALSE)
55e8dce5d2b4cd215545ce398308af125f032598
abdae0f889e4dc5aa848c850a213a609a698ec4b
/file1.R
d0b1a15a20ba4b6a3092d4d0e98c689b19ccd804
[]
no_license
Tusharbiswas/analytics1
265f9bdcef312af9af4031591536d804fcf912f8
56e1cb09732772ef91c1a5a37f085f587032fe05
refs/heads/master
2020-03-26T20:07:04.823175
2018-08-20T17:39:41
2018-08-20T17:39:41
145,305,792
0
0
null
null
null
null
UTF-8
R
false
false
3,895
r
file1.R
# Data structures #vectors---- v1=1:100 #create vector from 1 to 100 v2=c(1,4,5,10) class(v1) class(v2) v3=c('a','santa','banta') v3 #print the vector v3=c(TRUE,FALSE,T,F,T) class(v4) #summary on vectors mean(v1) median(v1) sd(v1) var(v1) hist(v1) hist(women$height) v2[v2>=5] x=rnorm(60,mean=60,sd=10) x plot(x) hist(x) plot(density(x)) abline(v=60) #rectangles and density together hist(x,freq=F) lines(density(x)) hist(x,breaks=10,col=1:10) lines(density(x)) length(x) sd(x) ?sample x1=LETTERS[5:20] x1 set.seed(1234) y1=sample(x1) y1 set.seed(6) (y2=sample(x1,size=5)) (gender=sample(c('M','F'),size=1000000,replace=TRUE,prob=c(.3,.7))) (t1=table(gender)) prop.table(t1) pie(t1) barplot(t1,col=1:2,horiz=T) #matrix---- (m1=matrix(1:24,nrow=4)) (m2=matrix(1:24,nrow=4,byrow=T)) (m3=matrix(1:24,ncol=4,byrow=T)) (x=trunc(runif(60,60,100))) plot(density(x)) (m4=matrix(x,ncol=6)) colSums(m4) rowSums(m4) rowMeans(m4) colMeans(m4) m4[m4>67] m4[m4>67&m4<86] m4[8:10,c(1,3,5)] rowSums(m4[8:10,c(1,3,5)]) #round, trunc, ceiling, floor ?runif #array---- #data.frame #rollno, name, gender, course, marks1, marks2 (rollno=1:60) (name=paste('student1',1:60,sep = '-')) name[1:20] name[c(15,20,37)] name[-c(1:10)] rev(name) name[60:1] (gender=sample(c('MALE','FEMALE'),size=60, replace=T, prob=c(.3,.7))) (course=sample(c('BBA','MBA','FPM'),size=60,replace=T,prob=c(.2,.2,.6))) (marks1=ceiling(rnorm(60,mean=65,sd=7))) (marks2=ceiling(rnorm(60,mean=65,sd=11))) (grades=sample(c('A','B','C'),size=60, replace=T)) students=data.frame(rollno,name, gender,course,marks1,marks2,grades, stringsAsFactors=F) class(students) summary(students) students[,c('name')] students[students$gender=='MALE',c('rollno','gender','marks1')] students[students$gender=='MALE'& students$grades=='c',c('rollno','gender','marks1')] students$gender t1=table(students$gender) barplot(table(students$course)) student1. student2 students[students$marks>55|students$marks<75,c('name','marks1')] text(1:3,table(students$course)+5,table(students$course)) str(students) nrow(students) names(students) dim(students) head(students) tail(students) head(students,n=7) students[10:15,-c(3)] #avg marks scored by each gender in marks1 #gender, marks1 aggregate(students$marks1,by=list(students$gender),FUN=mean) aggregate(students$marks2,by=list(students$gender),FUN=max) #max marks scored by each student in course 2 aggregate(students$marks2,by=list(students$course, students$gender),FUN=mean) #dplyr library(dplyr) students%>%group_by(gender)%>%summarise(mean(marks1)) students%>%group_by(course,gender)%>%summarise(mean(marks1)) students%>%group_by(gender)%>%summarise(mean(marks1),min(marks2),max(marks2)) students%>%group_by(course)%>%summarise(mean(marks1)) students%>%group_by(gender)%>%summarise(mean(marks2)) students%>%group_by(course,gender)%>%summarise(meanmarks1=mean(marks1),min(marks2),max(marks2))%>%arrange(desc(meanmarks1)) students%>%arrange(desc(marks1))%>% filter(gender=='MALE')%>%top_n(5) ?sample_frac sample_frac(students,replace=T,0.1) ?sample_n sample_n(students, 5, replace = TRUE) students%>%sample_frac(.1) students%>%sample_n(10) students%>%sample_frac(.1)%>%arrange(course)%>%select(name,gender) students%>%arrange(course,marks1,grades)%>%select(course,gender,marks1)%>%filter(course=='BBA',grades=='B') #factor names(students) students$gender=factor(students$gender) summary(students$gender) summary(students$course) students$course=factor(students$course,ordered=T) summary(students$course) students$course=factor(students$course,ordered=T,levels=c('FPM','MBA','BBA')) summary(students$course) students$grades students$grades=factor(students$grades,ordered=T,levels=c('C','B','A')) summary(students$grades) students$grades students write.csv(students,'./data/iimtrichy.csv') students2=read.csv('./data/iimtrichy.csv') students3=read.csv(file.choose()) install.packages('gsheet')
c0cc00670060e963a7ab09fb32cb5eebe48e2f38
705255987191f8df33b8c2a007374f8492634d03
/man/update-DataDA-method.Rd
cdd85e96195eb731c45df82965892becc6bed78e
[]
no_license
Roche/crmPack
be9fcd9d223194f8f0e211616c8b986c79245062
3d897fcbfa5c3bb8381da4e94eb5e4fbd7f573a4
refs/heads/main
2023-09-05T09:59:03.781661
2023-08-30T09:47:20
2023-08-30T09:47:20
140,841,087
24
9
null
2023-09-14T16:04:51
2018-07-13T11:51:52
HTML
UTF-8
R
false
true
2,325
rd
update-DataDA-method.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Data-methods.R \name{update,DataDA-method} \alias{update,DataDA-method} \alias{update-DataDA} \title{Updating \code{DataDA} Objects} \usage{ \S4method{update}{DataDA}(object, u, t0, trialtime, y, ..., check = TRUE) } \arguments{ \item{object}{(\code{DataDA})\cr object you want to update.} \item{u}{(\code{numeric})\cr the new DLT free survival times for all patients, i.e. for existing patients in the \code{object} as well as for new patients.} \item{t0}{(\code{numeric})\cr the time that each patient starts DLT observation window. This parameter covers all patients, i.e. existing patients in the \code{object} as well as for new patients.} \item{trialtime}{(\code{number})\cr current time in the trial, i.e. a followup time.} \item{y}{(\code{numeric})\cr the new DLTs for all patients, i.e. for existing patients in the \code{object} as well as for new patients.} \item{...}{further arguments passed to \code{Data} update method \code{\link{update-Data}}. These are used when there are new patients to be added to the cohort.} \item{check}{(\code{flag})\cr whether the validation of the updated object should be conducted. See help for \code{\link{update-Data}} for more details on the use case of this parameter.} } \value{ The new, updated \code{\link{DataDA}} object. } \description{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#stable}{\figure{lifecycle-stable.svg}{options: alt='[Stable]'}}}{\strong{[Stable]}} A method that updates existing \code{\link{DataDA}} object with new data. } \note{ This function is capable of not only adding new patients but also updates existing ones with respect to \code{y}, \code{t0}, \code{u} slots. } \examples{ # Create an object of class 'DataDA'. my_data <- DataDA( x = c(0.1, 0.5, 1.5, 3, 6, 10, 10, 10), y = c(0, 0, 1, 1, 0, 0, 1, 0), doseGrid = c(0.1, 0.5, 1.5, 3, 6, seq(from = 10, to = 80, by = 2)), u = c(42, 30, 15, 5, 20, 25, 30, 60), t0 = c(0, 15, 30, 40, 55, 70, 75, 85), Tmax = 60 ) # Update the data. my_data1 <- update( object = my_data, y = c(my_data@y, 0), # The 'y' will be updated according to 'u'. u = c(my_data@u, 20), t0 = c(my_data@t0, 95), x = 20, trialtime = 120 # This is the global timeline for a trial. ) my_data1 }
69cbe5b608efa08997d0051809103b0df8c438fb
db18b7261e6aeee4fbd66ac7c34a94a97eae4375
/Modelling.R
0739b108acc466dc18132b603ab6a10dfca73d61
[]
no_license
NatasjaFortuin/M2P3
a748ff470712695ff42a7c98287b1c464f402776
5e1b3c85a72ab0c16a0e291718f247e4ce83d693
refs/heads/master
2020-08-10T22:32:09.593890
2019-10-18T11:49:08
2019-10-18T11:49:08
214,434,255
0
0
null
null
null
null
UTF-8
R
false
false
20,476
r
Modelling.R
library(readr) library(caret) library(ggplot2) library(mlbench) library(e1071) library(dplyr) #Load data---- existingproductattributes2017 <- read_csv("existingproductattributes2017.csv") Exist <- existingproductattributes2017 #Preprocessing---- # dummify the data DummyVarsExist <- dummyVars(" ~ .", data = Exist) readyData <- data.frame(predict(DummyVarsExist, newdata = Exist)) # Final selection relevant features---- Final_relevant_vars <- c( "ProductTypeLaptop","ProductTypeNetbook","ProductTypePC", "ProductTypeSmartphone","ProductNum","x4StarReviews", "x3StarReviews","PositiveServiceReview","Volume" ) # create correlation matrix---- cor(readyData[Final_relevant_vars]) corrplot(readyData) corrData <- cor(readyData[Final_relevant_vars]) final_df <- readyData[Final_relevant_vars] head(final_df) set.seed(15) #create a 20% sample of the data---- BWsample <- final_df[sample(1:nrow(final_df), 70,replace=FALSE),] # define an 75%/25% train/test split of the dataset---- inTraining_lm <- createDataPartition(BWsample$Volume, p = .75, list = FALSE) training_lm <- BWsample[inTraining,] testing_lm <- BWsample[-inTraining,] #CV 10 fold fitControl_lm <- trainControl(method = "repeatedcv", number = 10, repeats = 1) #### MODELLING #### #LINEAR MODEL---- #lm model: lmfFit AUTOM GRID #type: line y based on x model #package: baseR #dataframe = final_df #Y Value = Volume lmFit <- lm(Volume~., data = training_lm) #training results lmFit saveRDS(lmFit, file = "lmFit.rds") #LM summary lmFit---- summary(lmFit) #summaryperformance_lmFit #multiple R-squared Adjusted R-squared # 0.8699 0.8467 saveRDS(object = lmFit, file = "lmFit.rds") #Predict Output---- predicted= predict(lmFit, testing_lm) print(predicted) str(predicted) #Save predictions LM Model in separate column---- final_df$predLM <- predict(lmFit, testing_lm) #LM postresample---- postResample(pred = predict(object = lmFit, newdata = testing_lm), obs = testing_lm$Volume) ##output = RMSE Rsquared MAE ##lmFit = +/-4354.847 0.5672 271.163 #KNN MODEL---- #K-nn model: KNNfFit AUTOM GRID #type: neighbour based model #package: caret #dataframe = final_df #Y Value = Volume set.seed(15) #SET SPLIT 75%/25% for train/test in the dataset inTrainingKNN <- createDataPartition(BWsample$Volume, p = .75, list = FALSE) trainingKNN <- BWsample[inTraining,] testingKNN <- BWsample[-inTraining,] #10 fold cross validation fitControlKNN <- trainControl(method = "repeatedcv", number = 10, repeats = 1) #train knn model with a tuneLenght = `1`(trains with 1 mtry values for knn) # preProcess=c("center", "scale") removed because not appl on prod types KNNFit <- train(Volume~., data = trainingKNN, method = "kknn", trControl=fitControlKNN, tuneLength = 1 ) #training results KNNFit #KNN traning results---- # RMSE Rsquared MAE # 864.3071 0.8787 MAE 463.236 #KNN summary KNNFit K3---- summary(KNNFit) #summaryperformance_KNNFit= Min Mean Abs Error: 420.7407, Min Mean S-error 1945 saveRDS(object = KNNFit, file = "KNNFit.rds") #KNN postresample---- postResample(pred = predict(object = KNNFit, newdata = testingKNN), obs = testingKNN$Volume) # RMSE Rsquared MAE # 380.911 0.6624 210.75 #Predict Output---- predicted= predict(KNNFit, testingKNN) print(predicted) str(predicted) #Save predictions KNN Model in separate column---- final_df$predKNN <- predict(KNNFit, testingKNN) #RF MODEL---- #Random Forest model: rfFit AUTOM GRID #type: decision tree for mean prediction of individual trees #package: caret #dataframe = final_df #Y Value = Volume set.seed(15) #SET SPLIT 75%/25% for train/test in the dataset inTrainingrf <- createDataPartition(BWsample$Volume, p = .75, list = FALSE) trainingrf <- BWsample[inTraining,] testingrf <- BWsample[-inTraining,] #10 fold cross validation fitControlrf <- trainControl(method = "repeatedcv", number = 10, repeats = 1) #train knn model with a tuneLenght = `1`(trains with 1 mtry values for knn) # preProcess=c("center", "scale") removed because not appl on prod types rfFit <- train(Volume~., data = trainingrf, method = "rf", trControl=fitControlrf, tuneLength = 1 ) #training results rfFit #RF traning results---- # RMSE Rsquared MAE # 870.75 0.978 464.13 saveRDS(object = rfFit, file = "rfFit.rds") #RF postresample---- postResample(pred = predict(object = rfFit, newdata = testingrf), obs = testingrf$Volume) # RMSE Rsquared MAE # 143.58 0.955 101.373 #Predict Output---- predicted= predict(rfFit, testingrf) print(predicted) str(predicted) #Save predictions RF Model in separate column---- final_df$predRF <- predict(rfFit, testingrf) #SVM MODEL---- #svmLinear2 model: svmFit AUTOM GRID #type: neighourhood based implicitly maps inputs to high-dimens feature spaces. #package: e1071 #dataframe = final_df #Y Value = Volume set.seed(15) #SET SPLIT 75%/25% for train/test in the dataset inTrainingsvm <- createDataPartition(BWsample$Volume, p = .75, list = FALSE) trainingsvm <- BWsample[inTraining,] testingsvm <- BWsample[-inTraining,] #10 fold cross validation fitControlsvm <- trainControl(method = "repeatedcv", number = 10, repeats = 1) #train svm model with a tuneLenght = `1` # preProcess=c("center", "scale") removed because not appl on prod types svmFit <- train(Volume~., data = trainingsvm, method = "svmLinear2", trControl=fitControlsvm, tuneLength = 1 ) #training results svmFit #SVM traning results---- # RMSE Rsquared MAE Tuning par cost was held constant at value 0.25 # 787.177 0.9629 433.4216 saveRDS(object = svmFit, file = "svmFit.rds") #SVM postresample---- postResample(pred = predict(object = svmFit, newdata = testingsvm), obs = testingsvm$Volume) # RMSE Rsquared MAE # 392.461 0.5861 243.71 #Predict Output---- predicted= predict(svmFit, testingsvm) print(predicted) str(predicted) #Save predictions SVM Model in separate column---- final_df$predSVM <- predict(svmFit, testingsvm) str(final_df) View(final_df) #create excel---- write.csv(final_df, file = "ExistVolumeInclPred", row.names = TRUE) #### REVIEW by PLOTS #### #NETBOOK---- #Model plot LM---- ggplot(data = final_df, aes(x = ProductTypeNetbook, y = predLM)) + geom_point() + geom_smooth(method = "lm", se = FALSE) #Model plot KNN---- ggplot(data = final_df, aes(x = ProductTypeNetbook, y = predKNN)) + geom_point() + geom_smooth(method = "lm", se = FALSE) #Model plot RF---- ggplot(data = final_df, aes(x = ProductTypeNetbook, y = predRF)) + geom_point() + geom_smooth(method = "lm", se = FALSE) #Model plot SVM---- ggplot(data = final_df, aes(x = ProductTypeNetbook, y = predSVM)) + geom_point() + geom_smooth(method = "lm", se = FALSE) #LAPTOP---- #Model plot LM---- ggplot(data = final_df, aes(x = ProductTypeLaptop, y = predLM)) + geom_point() + geom_smooth(method = "lm", se = FALSE) #Model plot KNN---- ggplot(data = final_df, aes(x = ProductTypeLaptop, y = predKNN)) + geom_point() + geom_smooth(method = "lm", se = FALSE) #Model plot RF---- ggplot(data = final_df, aes(x = ProductTypeLaptop, y = predRF)) + geom_point() + geom_smooth(method = "lm", se = FALSE) #Model plot SVM---- ggplot(data = final_df, aes(x = ProductTypeLaptop, y = predSVM)) + geom_point() + geom_smooth(method = "lm", se = FALSE) #PC---- #Model plot LM---- ggplot(data = final_df, aes(x = ProductTypePC, y = predLM)) + geom_point() + geom_smooth(method = "lm", se = FALSE) #Model plot KNN---- ggplot(data = final_df, aes(x = ProductTypePC, y = predKNN)) + geom_point() + geom_smooth(method = "lm", se = FALSE) #Model plot RF---- ggplot(data = final_df, aes(x = ProductTypePC, y = predRF)) + geom_point() + geom_smooth(method = "lm", se = FALSE) #Model plot SVM---- ggplot(data = final_df, aes(x = ProductTypePC, y = predSVM)) + geom_point() + geom_smooth(method = "lm", se = FALSE) #SMARTPHONE---- #Model plot LM---- ggplot(data = final_df, aes(x = ProductTypeSmartphone, y = predLM)) + geom_point() + geom_smooth(method = "lm", se = FALSE) #Model plot KNN---- ggplot(data = final_df, aes(x = ProductTypeSmartphone, y = predKNN)) + geom_point() + geom_smooth(method = "lm", se = FALSE) #Model plot RF---- ggplot(data = final_df, aes(x = ProductTypeSmartphone, y = predRF)) + geom_point() + geom_smooth(method = "lm", se = FALSE) #Model plot SVM---- ggplot(data = final_df, aes(x = ProductTypeSmartphone, y = predSVM)) + geom_point() + geom_smooth(method = "lm", se = FALSE) #Rename columnnames final_df---- head(final_df) names(final_df)<-c("Laptop","Netbook","PC", "Phone", "ID", "x4Star", "x3Star", "PosSerRev", "Volume", "predSVM", "predLM", "predKNN", "predRF") head(final_df) #Find outliers outlier_values <- boxplot.stats(final_df$Volume)$out boxplot(final_df$Volume) boxplot(final_df$predSVM) boxplot(final_df$predLM) boxplot(final_df$predKNN) boxplot(final_df$predRF) boxplot(final_df$Volume)$out #outliers determined as values 7036 and 11204 #find in which row the outliers are final_df[which(final_df$Volume %in% outlier_values),] #outliers are in rows 50 (11204) and 73 (7036) #ERROR Check ---- #Error check is done with Volume & Pred Volume!! ggplot(data = final_df) + geom_point(aes(x = Volume, y = predRF)) + geom_abline(intercept = 1) View(outliers_values) #Remove Outliers---- final_df_ExOut <- final_df[-which(final_df$Volume %in% outlier_values),] #check removal with boxplot boxplot(final_df_ExOut) boxplot(final_df_ExOut$Volume) #Remove Duplicates---- duplicated(final_df_ExOut$Volume) duplicates <- duplicated(final_df_ExOut$Volume) final_df_ExOut[which (final_df_ExOut$Volume %in% duplicates),] #didn't select the duplicates from prod id 135 tm 141 that I want to remove #so nothing removed yet duplicated(final_df_ExOut$PosSerRev) duplicates2 <- duplicated(final_df_ExOut$PosSerRev) final_df_ExOut[which (final_df_ExOut$PosSerRev %in% duplicates),] #works with normalized PosServRev 0-1 so is of no use. Nothing removed. #doesn't work because duplicate values are not recognized as such #I know it is product ID's 134 tm 141. I want to keep 134 and remove rest Finaldf_cleaned <- final_df_ExOut[!(final_df_ExOut$ID==135 final_df_ExOut$ID==136 final_df_ExOut$ID==137 final_df_ExOut$ID==138 final_df_ExOut$ID==139 final_df_ExOut$ID=140 final_df_ExOut$ID=141),] #not working. Tried +, , AND etc... subset(Finaldf_cleaned, ID!=135) subset(Finaldf_cleaned, ID!=136) subset(Finaldf_cleaned, ID!=137) subset(Finaldf_cleaned, ID!=138) subset(Finaldf_cleaned, ID!=139) subset(Finaldf_cleaned, ID!=140) subset(Finaldf_cleaned, ID!=141) View(Finaldf_cleaned) #did not work, only removes one value at a time. Replace dfcleaned with #original dataframe ex Outliers Finaldf_cleaned <- final_df_ExOut str(Finaldf_cleaned) Finaldf_cleaned <- distinct(.data = final_df_ExOut, PosSerRev, x4Star, x3Star, Volume, .keep_all = TRUE) rm(Finalsdf_cleaned) str(Finaldf_cleaned) View(Finaldf_cleaned) #remove prediction colums with dplyr in order to re run the modelling Finaldf_cleaned <- select (Finaldf_cleaned, -c(predLM, predRF, predKNN, predSVM)) View((Finaldf_cleaned)) set.seed(15) #create a 20% sample of the data---- BWsample2 <- Finaldf_cleaned[sample(1:nrow(Finaldf_cleaned), 70,replace=FALSE),] # define an 75%/25% train/test split of the dataset---- inTraining_lm2 <- createDataPartition(BWsample2$Volume, p = .75, list = FALSE) training_lm2 <- BWsample2[inTraining,] testing_lm2 <- BWsample2[-inTraining,] #### MODELLING CLEANED #### #LINEAR MODEL---- #lm model: lmfFit2 AUTOM GRID #type: line y based on x model #package: baseR #dataframe = Finaldf_cleaned #Y Value = Volume lmFit2 <- lm(Volume~., data = training_lm2) #training results lmFit2 saveRDS(lmFit2, file = "lmFit2.rds") #LM summary lmFit---- summary(lmFit2) #summaryperformance_lmFit2 #multiple R-squared Adjusted R-squared # 0.6784 0.6294 saveRDS(object = lmFit2, file = "lmFit2.rds") #Predict Output---- predicted= predict(lmFit2, testing_lm2) print(predicted) str(predicted) #Save predictions LM Model in separate column---- Finaldf_cleaned$predLM <- predict(lmFit2, Finaldf_cleaned) #LM postresample---- postResample(pred = predict(object = lmFit2, newdata = testing_lm2), obs = testing_lm2$Volume) ##output = RMSE Rsquared MAE ##lmFit = +/-320.680 0.6456 223.876 #KNN MODEL---- #K-nn model: KNNfFit2 AUTOM GRID #type: neighbour based model #package: caret #dataframe = Finaldf_Cleaned #Y Value = Volume set.seed(15) #SET SPLIT 75%/25% for train/test in the dataset inTrainingKNN2 <- createDataPartition(BWsample2$Volume, p = .75, list = FALSE) trainingKNN2 <- BWsample2[inTraining,] testingKNN2 <- BWsample2[-inTraining,] #10 fold cross validation fitControlKNN2 <- trainControl(method = "repeatedcv", number = 10, repeats = 1) #train knn model with a tuneLenght = `1`(trains with 1 mtry values for knn) # preProcess=c("center", "scale") removed because not appl on prod types KNNFit2 <- train(Volume~., data = trainingKNN2, method = "kknn", trControl=fitControlKNN2, tuneLength = 1 ) #training results KNNFit2 #KNN traning results---- # RMSE Rsquared MAE # 266.3007 0.84267 MAE 159.5161 #KNN summary KNNFit2 K3---- summary(KNNFit2) #summaryperformance_KNNFit= Min Mean Abs Error: 150.2986, Min Mean S-error 8705 saveRDS(object = KNNFit2, file = "KNNFit2.rds") #KNN postresample---- postResample(pred = predict(object = KNNFit2, newdata = testingKNN2), obs = testingKNN2$Volume) # RMSE Rsquared MAE # 191.826 0.8312 102.92 #Predict Output---- predicted= predict(KNNFit2, testingKNN2) print(predicted) str(predicted) #Save predictions KNN Model in separate column---- Finaldf_cleaned$predKNN <- predict(KNNFit2, Finaldf_cleaned) #RF MODEL---- #Random Forest model: rfFit2 AUTOM GRID #type: decision tree for mean prediction of individual trees #package: caret #dataframe = Finaldf_cleaned #Y Value = Volume set.seed(15) #SET SPLIT 75%/25% for train/test in the dataset inTrainingrf2 <- createDataPartition(BWsample2$Volume, p = .75, list = FALSE) trainingrf2 <- BWsample2[inTraining,] testingrf2 <- BWsample2[-inTraining,] #10 fold cross validation fitControlrf2 <- trainControl(method = "repeatedcv", number = 10, repeats = 1) #train knn model with a tuneLenght = `1`(trains with 1 mtry values for knn) # preProcess=c("center", "scale") removed because not appl on prod types rfFit2 <- train(Volume~., data = trainingrf2, method = "rf", trControl=fitControlrf2, tuneLength = 1 ) #training results rfFit2 #RF traning results---- # RMSE Rsquared MAE # 277.3768 0.8645 193.8815 saveRDS(object = rfFit2, file = "rfFit2.rds") #RF postresample---- postResample(pred = predict(object = rfFit2, newdata = testingrf2), obs = testingrf2$Volume) # RMSE Rsquared MAE # 137.120 0.9509 109.22 #Predict Output---- predicted= predict(rfFit2, testingrf2) print(predicted) str(predicted) #Save predictions RF Model in separate column---- Finaldf_cleaned$predRF <- predict(rfFit2, Finaldf_cleaned) #SVM MODEL---- #svmLinear2 model: svmFit2 AUTOM GRID #type: neighourhood based implicitly maps inputs to high-dimens feature spaces. #package: e1071 #dataframe = Finaldf_cleaned #Y Value = Volume set.seed(15) #SET SPLIT 75%/25% for train/test in the dataset inTrainingsvm2 <- createDataPartition(BWsample2$Volume, p = .75, list = FALSE) trainingsvm2 <- BWsample2[inTraining,] testingsvm2 <- BWsample2[-inTraining,] #10 fold cross validation fitControlsvm2 <- trainControl(method = "repeatedcv", number = 10, repeats = 1) #train svm model with a tuneLenght = `1` # preProcess=c("center", "scale") removed because not appl on prod types svmFit2 <- train(Volume~., data = trainingsvm2, method = "svmLinear2", trControl=fitControlsvm2, tuneLength = 1) #training results svmFit2 #SVM traning results---- # RMSE Rsquared MAE Tuning par cost was held constant at value 0.25 # 511.667 0.7061 281.878 saveRDS(object = svmFit2, file = "svmFit2.rds") #SVM postresample---- postResample(pred = predict(object = svmFit2, newdata = testingsvm2), obs = testingsvm2$Volume) # RMSE Rsquared MAE # 286.367 0.63371 121.99 #Predict Output---- predicted= predict(svmFit2, testingsvm2) print(predicted) str(predicted) #Save predictions SVM Model in separate column---- Finaldf_cleaned$predSVM <- predict(svmFit2, Finaldf_cleaned) str(Finaldf_cleaned) View(Finaldf_cleaned) #### IMPROVE DATAFRAME FOR PLOTS #### PredData <- Finaldf_cleaned as.integer(PredData$predLM) as.integer(PredData$predKNN) as.integer(PredData$predRF) as.integer(PredData$predSVM) View(PredData) #ERROR Check ---- #Error check is done with Volume & Pred Volume!! ggplot(data = PredData) + geom_point(aes(x = Volume, y = predLM)) + geom_abline(intercept = 1) ggsave("Errorplot_LM.png", width = 5, height = 5) PlotErrorCheck <- ggplot(data = PredData) + geom_point(aes(x = Volume, y = predLM)) + geom_abline(intercept = 1) ggplot(data = PredData) + geom_point(aes(x = Volume, y = predKNN)) + geom_abline(intercept = 1) ggsave("Errorplot_KNN.png", width = 5, height = 5) ggplot(data = PredData) + geom_point(aes(x = Volume, y = predRF)) + geom_abline(intercept = 1) ggsave("Errorplot_RF.png", width = 5, height = 5) ggplot(data = PredData) + geom_point(aes(x = Volume, y = predSVM)) + geom_abline(intercept = 1) ggsave("Errorplot_SVM.png", width = 5, height = 5) #### REVIEW by PLOTS2 #### #NETBOOK---- #Model plot LM---- ggplot(data = PredData, aes(x = Netbook, y = predLM)) + geom_point() + geom_smooth(method = "lm", se = TRUE) #Model plot KNN---- ggplot(data = PredData, aes(x = Netbook, y = predKNN)) + geom_point() + geom_smooth(method = "lm", se = TRUE) #Model plot RF---- ggplot(data = PredData, aes(x = Netbook, y = predRF)) + geom_point() + geom_smooth(method = "lm", se = TRUE) #Model plot SVM---- ggplot(data = PredData, aes(x = Netbook, y = predSVM)) + geom_point() + geom_smooth(method = "lm", se = FALSE) #LAPTOP---- #Model plot LM---- ggplot(data = PredData, aes(x = Laptop, y = predLM)) + geom_point() + geom_smooth(method = "lm", se = FALSE) #Model plot KNN---- ggplot(data = PredData, aes(x = Laptop, y = predKNN)) + geom_point() + geom_smooth(method = "lm", se = FALSE) #Model plot RF---- ggplot(data = PredData, aes(x = Laptop, y = predRF)) + geom_point() + geom_smooth(method = "lm", se = FALSE) #Model plot SVM---- ggplot(data = PredData, aes(x = Laptop, y = predSVM)) + geom_point() + geom_smooth(method = "lm", se = FALSE) #PC---- #Model plot LM---- ggplot(data = PredData, aes(x = PC, y = predLM)) + geom_point() + geom_smooth(method = "lm", se = FALSE) #Model plot KNN---- ggplot(data = PredData, aes(x = PC, y = predKNN)) + geom_point() + geom_smooth(method = "lm", se = FALSE) #Model plot RF---- ggplot(data = PredData, aes(x = PC, y = predRF)) + geom_point() + geom_smooth(method = "lm", se = FALSE) #Model plot SVM---- ggplot(data = PredData, aes(x = PC, y = predSVM)) + geom_point() + geom_smooth(method = "lm", se = FALSE) #SMARTPHONE---- #Model plot LM---- ggplot(data = PredData, aes(x = Phone, y = predLM)) + geom_point() + geom_smooth(method = "lm", se = FALSE) ggsave("Smartphoneplot_LM.png", width = 5, height = 5) #Model plot KNN---- ggplot(data = PredData, aes(x = Phone, y = predKNN)) + geom_point() + geom_smooth(method = "lm", se = FALSE) ggsave("Smartphoneplot_KNN.png", width = 5, height = 5) #Model plot RF---- ggplot(data = PredData, aes(x = Phone, y = predRF)) + geom_point() + geom_smooth(method = "lm", se = FALSE) ggsave("Smartphoneplot_RF.png", width = 5, height = 5) #Model plot SVM---- ggplot(data = PredData, aes(x = Phone, y = predSVM)) + geom_point() + geom_smooth(method = "lm", se = FALSE) ggsave("SmartphoneplotRF.png", width = 5, height = 5)
66a9a7faa56a0dad040f612ac3f26ea15ba2fd90
e43ccc719a5df63664598db7614d7b10e3b4d4fb
/man/p1x2.Rd
51342f0806c7a70459d796bdc3eb41e29484c773
[]
no_license
opisthokonta/goalmodel
58fa2236e894df745f4f5985e16c863e55fd6272
55a33c620a1c36b51ad634f0e47abf402766cf56
refs/heads/master
2023-09-03T09:30:18.823581
2023-08-29T08:39:50
2023-08-29T08:39:50
153,664,398
98
21
null
2019-12-28T23:10:27
2018-10-18T17:49:21
R
UTF-8
R
false
true
987
rd
p1x2.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/goalmodel_predict.R \name{p1x2} \alias{p1x2} \title{Compute 1x2 probabilities from expected goals.} \usage{ p1x2(expg1, expg2, model = "poisson", dispersion = NULL, rho = NULL, uprx = 25) } \arguments{ \item{expg1}{Non-negative numeric. The expected number of goals.} \item{expg2}{Non-negative numeric. The expected number of goals.} \item{model}{String indicating whether the goals follow a 'poisson' model (default), a Negative Binomial ('negbin'), or Conway-Maxwell-Poisson ('cmp') model.} \item{dispersion}{Non-negative numeric. The dispersion parameter in the Negative Binomial model or the Conway-Maxwell-Poisson model.} \item{rho}{Numeric. The Dixon-Coles adjustment.} \item{uprx}{Numeric. The upper limit for evaluating the underlying distributions.} } \value{ A matrix with 3 columns with one row for each pair of expected goals. } \description{ Compute 1x2 probabilities from expected goals. }
ab3be2c6b73ddd487f5bd92b1ec5379a2957a0c5
2c46f227e81ceec714b4a0170ae39bb0b74bfd5d
/R/summarize-alg.R
4415c1b41f1ebca4447dbd791b70384dab6d74a4
[]
no_license
tengfei/chromatoplots
f25d95a521ca09eceb79498588df9b1f93047ae6
858cec990aafbf58b8e95bbdc37414a7ac6b833c
refs/heads/master
2021-01-02T09:33:06.358088
2013-07-12T07:03:15
2013-07-12T07:03:15
926,471
0
0
null
null
null
null
UTF-8
R
false
false
1,542
r
summarize-alg.R
summarize.common <- function(object) { common <- sum_common_peaks(object@peaks, object@comps) comps <- object@comps[,colnames(object@comps) != "quantity"] object@comps <- cbind(comps, quantity = common) object } find_spectra <- function(peaks) { comp_peaks <- split(seq_len(nrow(peaks)), interaction(peaks[,"comp"], peaks[,"sample"])) existing_ints <- sapply(comp_peaks, length) > 0 comp_peaks <- comp_peaks[existing_ints] mass <- peaks[,"mz"] height <- peaks[,"maxf"] sigma <- peaks[,"sigma"] max_mass <- max(mass) spectrum <- function(p) { y <- numeric(max_mass) y[mass[p]] <- 2*height[p]*sqrt(pi/2)*sigma[p] y } sapply(comp_peaks, spectrum) } sum_common_peaks <- function(peaks, comps) { # ensure component order is compatible with find_spectra() comps <- cbind(comps, id = seq_len(nrow(comps))) comps <- comps[order(comps[,"comp"]),] comps <- comps[order(comps[,"sample"]),] spectra <- t(find_spectra(peaks)) sums <- unsplit(by(spectra, comps[,"group"], function(group) { masses <- apply(group, 2, function(column) all(column != 0)) apply(group[,masses,drop=FALSE], 1, sum) }), comps[,"group"]) result <- numeric(length(sums)) result[comps[,"id"]] <- sums result } scale_log_quantities <- function(comps) { quantity <- suppressWarnings(log(comps[,"quantity"])) quantity[is.infinite(quantity)] <- NA unsplit(tapply(quantity, comps[,"sample"], function(sample) { sample - mean(sample, na.rm = TRUE) }), comps[,"sample"]) + mean(quantity, na.rm = TRUE) }
66990599857f0e06ace479698eef7b26dba5b0a5
cedc3d2c404da16d8dac0d2e481de7b329266da8
/CPM_TPM.R
e888a412d310742e89b34a4775325c28bff2f138
[]
no_license
holiday10/my-perl
dd98bd8d52604f75e871e6fcfa33cf318a475a89
6a25e9fb3d2e7542f33afd9eebc3413da0385083
refs/heads/main
2023-02-19T23:49:44.166666
2021-01-19T14:07:56
2021-01-19T14:07:56
329,888,464
0
0
null
null
null
null
GB18030
R
false
false
947
r
CPM_TPM.R
setwd("E:/omics_data/ml/FF")#设置工作目录 countdata<-read.table("37emb.mod.counts",sep="\t",header = T,row.names = 1) metadata <- countdata[,1:2]#提取基因信息count数据前的几列,第一列认为是ID(row.names=1),第二列开始索引为1 countdata <- countdata[,3:ncol(countdata)]#提取counts数,counts数据主题部分,从metadata基因信息往后的第一列开始 prefix<-"mla-37emb-mod"#设置输出文件前缀名 cpm <- t(t(countdata)/colSums(countdata) * 1000000)#参考cpm定义 avg_cpm <- data.frame(avg_cpm=rowMeans(cpm)) #-----TPM Calculation------ kb <- metadata$Length / 1000 rpk <- countdata/kb tpm <- t(t(rpk)/colSums(rpk) * 1000000) avg_tpm <- data.frame(avg_tpm=rowMeans(tpm)) write.csv(avg_tpm,paste0(prefix,"_avg_tpm.csv")) write.csv(avg_cpm,paste0(prefix,"_avg_cpm.csv")) write.csv(tpm,paste0(prefix,"jj_tpm.csv")) write.csv(cpm,paste0(prefix,"_cpm.csv"))
43877b5e597d54e90d509e1bd25ee35ad56a308d
f4e7e4cafdba256f221192abe667f36fddc17228
/man/predictpower.Rd
e662fdbc5354a0814de9f1e0c50c61ceef76f93a
[]
no_license
Rommelio-coli/SSPA
61d3943592d716061342dddcc6b8bb6d9571abfc
b28a35d65a13ef593e648378db92bf71486c66fe
refs/heads/master
2023-06-18T21:11:05.477651
2021-07-21T19:54:36
2021-07-21T19:54:36
388,225,161
0
0
null
null
null
null
UTF-8
R
false
true
607
rd
predictpower.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/powerandsamplesize.R \name{predictpower} \alias{predictpower} \title{Predict power for given vector of sample sizes} \usage{ predictpower(object, samplesizes, alpha = 0.1, verbose = FALSE, plot = FALSE) } \arguments{ \item{object}{of class 'SampleSize'} \item{samplesizes}{vector of total sample sizes.} \item{alpha}{FDR.} \item{verbose}{TRUE/FALSE} \item{plot}{TRUE/FALSE} } \value{ predicted power. } \description{ Predict power for given vector of sample sizes } \details{ details follow. } \author{ Maarten van Iterson }
d127d0ab9c5bf9dfc22b9f5610765cec41e1d677
80ed2246df21e2ff4f29793602966b699a78d899
/code/global_editing_index.R
60862f1fb70f322c1222b5c5b2ae23e2284857de
[]
no_license
bahlolab/brain_scRNAed
cff59bddca84f82a26f3d9f6b45be02ce6722bdf
db8b50c423ae71a6a97a9df4d7a9942ab3a1f95c
refs/heads/master
2020-09-08T10:34:08.966450
2019-11-13T01:38:10
2019-11-13T01:38:10
221,109,416
2
0
null
null
null
null
UTF-8
R
false
false
14,183
r
global_editing_index.R
#cell-wise editing proportion testing #devtools::install_github("tidymodels/broom") library(tidyverse) here::here() BL_joinKey <- readRDS('data/phs000834/BL_metadata_highQualCells.Rds') mapping_stats_BL <- readRDS("data/mapping_output/mapping_stats_BL.Rds") dt_filt <- readRDS("data/phs000834/dt_filt.Rds") dt_siteStats_TDjoin <- readRDS("data/phs000834/dt_siteStats_TDjoin.Rds") #samtools bqsr depth; contains candidate editing sites in each neuron covered by at least 5 reads. gte5_DPsites <- read_tsv("data/samtools_depth_output/samDepth_BQSR_sitesGTE5.out", col_names = FALSE, col_types = cols('X2'=col_character())) %>% magrittr::set_colnames(c('sample','siteID','depth')) %>% filter(sample %in% BL_joinKey$SRA) %>% filter(siteID %in% dt_siteStats_TDjoin$siteID) #plot covered sites per cell gte5_DPsites %>% count(sample) %>% ggplot(aes(x=n)) + geom_histogram(fill='dodger blue') gte5_DPsites %>% count(sample) %>% summary() #plot n. cells per covered site gte5_DPsites %>% count(siteID) %>% ggplot(aes(x=n)) + geom_histogram(fill='dodger blue') gte5_DPsites %>% count(siteID) %>% summary() #### ### ### ### ### ### ### ### ### ### ### ### ### ### GLOBAL EDITING INDEX ### ### ### #### ### ### ### ### ### ### ### ### ### ### ### Calculate cell-wise editing as a proportion of covered sites ### cell_edProp <- left_join(gte5_DPsites %>% count(sample), #n sites covered ≥ 5 high-quality (de-duplicated BQSR) reads per cell. dt_filt %>% filter(siteID %in% dt_siteStats_TDjoin$siteID) %>% count(sample), by='sample', suffix=c('_totalCov',"_ed")) %>% mutate(edProp = n_ed/n_totalCov) saveRDS(cell_edProp, "data/stat_tests/GEI.Rds") cell_edProp %>% left_join(BL_joinKey,by=c('sample'='SRA')) %>% filter(neuType!="NoN") %>% ggplot(aes(x=area,y=edProp, fill=neuType,col=neuType)) + geom_point(position=position_jitterdodge(jitter.width = 0.2,dodge.width = 0.8), cex=0.25) + geom_boxplot(alpha=0) + ylab('Proportion of covered sites edited') + xlab('Brodmann area') #### Neuronal GroupID #### #uniquely mapped reads mapping_stats_BL %>% left_join(BL_joinKey,by=c('sample'='SRA')) %>% filter(neuType!="NoN") %>% ggplot(aes(x=Group_ID,y=Uniquely_mapped_reads_number, fill=neuType,col=neuType)) + geom_point(position=position_jitterdodge(jitter.width = 0.2,dodge.width = 0.8), cex=0.25) + geom_boxplot(alpha=0) #GEI # add combined Ex and In groups ## FIG 4c cell_edProp %>% left_join(BL_joinKey,by=c('sample'='SRA')) %>% filter(neuType!="NoN") %>% mutate("Collapsed" = neuType) %>% gather(key,value,Group_ID,`Collapsed`) %>% mutate(key=factor(key,levels=c('Group_ID','Collapsed'))) %>% ggplot(aes(x=value,y=edProp, fill=neuType,col=neuType)) + geom_point(position=position_jitterdodge(jitter.width = 0.2,dodge.width = 0.8), cex=0.25) + geom_boxplot(alpha=0) + #facet_wrap(~key) ggforce::facet_row(vars(key), scales='free_x',space='free') + xlab('Neuronal phenotype') + ylab('Proportion of sites edited') + theme_grey() + theme(legend.position = "NONE") ggsave('charts/Figure_4c.pdf', width=7,height=3.5) ## FIG 4b cell_edProp %>% left_join(BL_joinKey,by=c('sample'='SRA')) %>% filter(neuType!="NoN") %>% mutate(`Neuronal type`=neuType) %>% mutate(area=factor(area,levels=paste0('BA', c(8,10,21,22,41,17)))) %>% ggplot(aes(x=area,y=edProp, fill=`Neuronal type`,col=`Neuronal type`)) + geom_point(position=position_jitterdodge(jitter.height=0, jitter.width = 0.15,dodge.width = 0.8),alpha=0.5, cex=0.25) + geom_boxplot(alpha=0) + xlab('Brodmann area') + ylab('Proportion of sites edited') + theme(legend.position='Null') ggsave('charts/4b.pdf',width=7,height=3.5) #Calculate mean cell type proportions across cortical regions BL_joinKey %>% group_by(area) %>% filter(neuType !="NoN") %>% count(neuType) %>% mutate(prop=n/sum(n)) %>% group_by(neuType) %>% summarize(meanProp = mean(prop)) #Count neuronal type per cortical region p1 <- cell_edProp %>% left_join(BL_joinKey,by=c('sample'='SRA')) %>% filter(neuType!="NoN") %>% mutate(`Neuronal type`=neuType) %>% mutate(area=factor(area,levels=paste0('BA', rev(c(8,10,21,22,41,17))))) %>% ggplot(aes(x=area,y=edProp, fill=`Neuronal type`,col=`Neuronal type`)) + geom_point(position=position_jitterdodge(jitter.height=0, jitter.width = 0.15,dodge.width = 0.8),alpha=0.5, cex=0.25) + geom_boxplot(alpha=0) + xlab('Brodmann area') + ylab('Proportion of sites edited') + theme(legend.position='bottom') + coord_flip() source('code/colour_palettes.R') col_mat_full <- matrix(rep(c(drsimonj_pal('hot')(8),drsimonj_pal('cool')(8)),16),ncol=16,nrow=16) p2 <- cell_edProp %>% left_join(BL_joinKey,by=c('sample'='SRA')) %>% mutate(area=factor(area,levels = paste0('BA', c(8,10,21,22,41,17)))) %>% filter(neuType!="NoN") %>% mutate(`Neuronal type`=neuType) %>% group_by(area,neuType,Group_ID) %>% summarize("N. nuclei"=n()) %>% ggplot(aes(x=neuType,y=`N. nuclei`)) + geom_col(aes(fill=Group_ID),position='stack',col='white') + xlab('') + coord_flip() + theme(legend.position = 'bottom') + scale_fill_manual(values=col_mat_full) + ggforce::facet_col(vars(area), scales='free',space='free') cowplot::plot_grid(p1 + theme_grey(), p2 + theme_grey() + theme(legend.position='bottom'), labels='AUTO',align='h',axis='b') Fig4a <- p1 + theme_grey() + theme(legend.position='bottom') ggsave(plot=Fig4a, 'charts/Fig4a.pdf',width=4,height=7) Fig4b <- p2 + theme_grey() + theme(legend.position='bottom') ggsave(plot=Fig4b, 'charts/Fig4b.pdf',width=4,height=7) ###### ###### ###### ###### ###### ###### ###### ###### ###### ###### ###### ###### ###### ###### broom::tidy(summary(lm(map_sum ~ factor(neuType), data = mapping_stats_BL %>% left_join(BL_joinKey, by = c('sample'='SRA')) %>% filter(neuType!="NoN") ))) #No significant difference in read mapping numbers for excitatory vs inhibitory neurons broom::tidy(summary( lm(map_sum ~ factor(area), data = mapping_stats_BL %>% left_join(BL_joinKey, by = c('sample'='SRA')) %>% filter(neuType!="NoN") %>% mutate(map_sum = Uniquely_mapped_reads_number + Number_of_reads_mapped_to_multiple_loci)))) %>% arrange(p.value) #Significant differences in read total mapping per cortical region. broom::tidy(summary( lm(map_sum ~ factor(Group_ID), data = mapping_stats_BL %>% left_join(BL_joinKey, by = c('sample'='SRA')) %>% filter(neuType!="NoN") %>% mutate(map_sum = Uniquely_mapped_reads_number + Number_of_reads_mapped_to_multiple_loci)))) %>% arrange(p.value) #Significant differences in total read mapping per neuronal sub-group: Ex2, In6, In7 and In8 are different to Ex1. #### TEST GEI by neuronal sub-type (Group_ID) #### broom::tidy(summary( lm(edProp ~ factor(Group_ID), data = cell_edProp %>% left_join(BL_joinKey, by = c('sample'='SRA')) %>% filter(neuType!="NoN")))) %>% arrange(p.value) %>% mutate(FDR=p.adjust(p.value,method="BH")) %>% filter(p.value > 0.05) #all sub-groups EXCEPT for Ex8 and In3 have GEI significantly different to Ex1. #some relationship between mapping stats / total coverage and editing proportion mapping_stats_BL %>% left_join(cell_edProp,by='sample') %>% ggplot(aes(x=map_sum,y=edProp)) + geom_hex(bins=100) + geom_smooth(method="lm") mapping_stats_BL %>% left_join(cell_edProp,by='sample') %>% ggplot(aes(x=n_totalCov, y=edProp)) + geom_hex(bins=100) + geom_smooth(method="lm") #test collapsed neuType controlling for read coverage broom::tidy(summary( lm(edProp ~ factor(neuType) + n_totalCov, # + map_sum + Number_of_reads_mapped_to_multiple_loci data = cell_edProp %>% left_join(BL_joinKey, by = c('sample'='SRA')) %>% filter(neuType!="NoN") %>% left_join(mapping_stats_BL,by='sample')))) %>% arrange(p.value) %>% mutate(FDR=p.adjust(p.value,method="BH")) #control for n sites covered [and/or sum of mapped reads] #test Group_ID lm_Group_ID_GEI <- broom::tidy(summary( lm(edProp ~ factor(Group_ID) + n_totalCov , # + map_sum data = cell_edProp %>% left_join(BL_joinKey, by = c('sample'='SRA')) %>% filter(neuType!="NoN") %>% left_join(mapping_stats_BL,by='sample')))) %>% arrange(estimate) %>% mutate(FDR=p.adjust(p.value,method="BH")) lm_Group_ID_GEI %>% filter(FDR>0.05) #GEI differences remain after controlling for site coverage. #test cortical area ± total coverage lm_area_GEI <- broom::tidy(summary( lm(edProp ~ factor(area), data = cell_edProp %>% left_join(BL_joinKey, by = c('sample'='SRA')) %>% filter(neuType!="NoN") %>% left_join(mapping_stats_BL,by='sample')))) %>% arrange(estimate) lm_area_GEI_ctrlCov <- broom::tidy(summary( lm(edProp ~ factor(area) + n_totalCov, data = cell_edProp %>% left_join(BL_joinKey, by = c('sample'='SRA')) %>% filter(neuType!="NoN") %>% left_join(mapping_stats_BL,by='sample')))) %>% arrange(estimate) rbind(lm_area_GEI %>% mutate(covar='none'), lm_area_GEI_ctrlCov %>% mutate(covar = 'n_totalCov')) %>% select(term,estimate,covar) %>% spread(covar,estimate) %>% filter(term!='(Intercept)', term!='n_totalCov') %>% ggplot(aes(x=none,y=n_totalCov,col=term)) + geom_point() + geom_smooth(method='lm') + geom_hline(yintercept = 0)+ geom_vline(xintercept = 0) #only BA22 sign changes. #for STables saveRDS(lm_area_GEI, 'data/stat_tests/lm_area_GEI.Rds') saveRDS(lm_Group_ID_GEI, 'data/stat_tests/lm_Group_ID_GEI.Rds') ###### ###### ###### ###### ###### ###### ###### ###### ###### ###### ###### ###### ###### ###### ###### ###### ###### ###### ###### ###### ###### ###### ###### ###### #Do inhibitory neurons have greater editing in shared genes; or editing in e.g. inhib.-specific genes? dt_depth <- left_join(gte5_DPsites, dt_filt %>% filter(siteID %in% gte5_DPsites$siteID), by=c('siteID','sample')) %>% left_join(BL_joinKey %>% select(SRA,neuType,area), by=c('sample'='SRA')) # saveRDS(dt_depth, 'data/phs000834/dt_depth.Rds') # dt_depth <- readRDS('data/phs000834/dt_depth.Rds') dt_depth_summ <- dt_depth %>% filter(neuType!="NoN") %>% group_by(sample, neuType) %>% summarize(meanDP = mean(depth),medDP = median(depth)) dt_depth_summ %>% ggplot(aes(x=medDP)) + geom_histogram(aes(fill=neuType)) + facet_wrap(~ neuType, ncol = 1, scales='free_y') + xlab('Median editing site coverage') dt_depth_summ %>% group_by(neuType) %>% summarize(meanMED = mean(medDP)) broom::tidy(lm(medDP ~ factor(neuType), data = dt_depth_summ)) # --> Inhibitory neurons have an averaeg median edSite coverage 0.5 units greater than excitatory neurons. #sample 500 cellsof each neuronal type set.seed(1234); sample_Ex_In <- dt_depth %>% select(neuType, sample) %>% distinct() %>% filter(neuType!="NoN") %>% group_by(neuType) %>% sample_n(500) dt_depth %>% filter(str_detect(siteID,"1_")) %>% #sites on chromosome 1 filter(!is.na(alt_al)) %>% filter(sample %in% sample_Ex_In$sample) %>% group_by(siteID) %>% mutate(nCells = n()) %>% ungroup() %>% select(nCells, everything()) %>% arrange(desc(nCells)) %>% #--> exclude marker genes filter(nCells > 200) %>% count(neuType) #For inhibitory neurons there are 82 sites in chr1 in > 200 cells; excitatory have 160 sites in chr1 in >200 cells. #Do In. neurons have more sites than Ex., or greater editing per site? dt_depth %>% count(sample, neuType) %>% filter(neuType!="NoN") %>% ggplot(aes(x=n)) + geom_histogram(aes(fill=neuType),bins=50) dt_depth %>% filter(!is.na(alt_al)) %>% count(sample, neuType) %>% filter(neuType!="NoN") %>% ggplot(aes(x=n)) + geom_histogram(aes(fill=neuType),bins=50) #Ex have more editing sites covered, and more edited sites, than Inhibitory... dt_depth %>% select(sample,siteID,depth,alt_al,neuType) %>% filter(neuType!="NoN") %>% mutate(status = ifelse(is.na(alt_al),0,1)) %>% group_by(neuType) %>% summarize(meanEd = mean(status)) #But In neurons have more editing as a _proportion_ of sites covered. #N. and % of sites are restricted to In / Ex neurons dt_depth %>% filter(neuType!="NoN") %>% count(neuType) dt_depth %>% filter(neuType!="NoN") %>% group_by(neuType) %>% count(siteID) %>% arrange(siteID) %>% ungroup() %>% count(siteID,sort=TRUE) %>% count(n) #Only 253 sites were unique one neuronal type. 99.4% sites detected in both cell types. 1-(253/sum(253,40608)) ### ### ### ### ### ### ### ### ### # If ensemble averaging is suppressing novel sites, we expect those sites to be expressed in small numbers of cells. dt_siteStats_filt <- readRDS('data/phs000834/dt_siteStats_filt.Rds') dt_siteStats_filt %>% select(site_type,n_Cells) %>% mutate(site_status = ifelse(str_detect(site_type,'Novel'),'Novel','Reported')) %>% ggplot(aes(x=site_type,y=log10(n_Cells))) + geom_boxplot(aes(col=site_type),outlier.alpha = 0) + geom_jitter(width=0.05,cex=0.1,alpha=0.25, aes(col=site_type)) + coord_flip() + scale_color_brewer(palette='Paired') + xlab('Site type') + theme(legend.position = 'None') ggsave('charts/Ncells_per_site_bySiteType.pdf',width=6,height=5) #### #### #### #### #### #### #### #### dt_siteStats_filt %>% select(site_type,n_Cells) %>% mutate(site_status = ifelse(str_detect(site_type,'Novel'),'Novel','Reported')) %>% mutate(log_nCells = log10(n_Cells), site_status = factor(site_status)) %>% do(broom::tidy(lm(log_nCells ~ site_status, data = .))) dt_siteStats_filt %>% select(site_type,n_Cells) %>% mutate(site_status = ifelse(str_detect(site_type,'Novel'),'Novel','Reported')) %>% mutate(log_nCells = log10(n_Cells), site_status = factor(site_status)) %>% do(broom::tidy(lm(log_nCells ~ site_type, data = .))) #Indeed, previously reported nonRep and rep-nonAlu sites are edited in significantly more cells than Alu sites; \ # whereas novel sites in each class are detected in significantly fewer cells on average than reported sites.
3d5e1f7528989d5ac9da1b690a295975ce6a7e1e
26f61017226007122b72c7076023b7eee4ff4d73
/cachematrix.R
dd86a94aaeec9414667c58a5a3588b529ecb7613
[]
no_license
hessier00/ProgrammingAssignment2
5612617d8d147e485d795612f9aa018459cb4c44
71f04dd52a185360e6b9ffcefce5736764c0565c
refs/heads/master
2021-01-24T19:52:04.006785
2015-07-24T09:55:31
2015-07-24T09:55:31
39,617,724
0
0
null
2015-07-24T07:23:24
2015-07-24T07:23:23
null
UTF-8
R
false
false
3,138
r
cachematrix.R
## This pair of functions are used to effeciently calculate matrix inverses ## Once the inverse of a specific matrix is calculated, ## it's cached, allowing the calculation function to return ## the correct answer without re-preforming the underlying calculation ## makeCacheMatrix adds functions to support caching to a vanilla matrix makeCacheMatrix <- function(originalMatrix = matrix()) { ## cachedInverse will store the inverse of originalMatrix ## When set to NULL, it indicates no inverse has been cached. ## As we haven't calculated anything yet, it's set to NULL by default cachedInverse <- NULL ## The setMatrix function resets our enhanced matrix without requring external reassignment setMatrix <- function(newMatrix) { ## change the value of our originalMatrix to that of newMatrix originalMatrix <<- newMatrix ## as our originalMatrix has (probably) changed, dump our cached inverse cachedInverse <<- NULL } ## The setInverse function receives an inverse matrix to be cached ## inverse gets stored to the cachedInverse in the parent environment ## Note that this function is completely trusting that an outside function ## has correctly calculated the inverse, and isn't just handing it some random vector setInverse <- function(inverse=matrix()) cachedInverse <<- inverse ## The getMatrix function simply returns the matrix contained in originalMatrix from the parent environment getMatrix <- function() originalMatrix ## The getInverse function returns the cached inverse from the parent environment, even if it's NULL getInverse <- function() cachedInverse ## The list below contains named references to each of the functions within makeCacheMatrix(), ## making them available externally via subsetting the list list(setMatrix=setMatrix, getMatrix=getMatrix, setInverse=setInverse, getInverse=getInverse) } ## cacheSolve() takes an object created with makeCacheMatrix() ## and finds its cached inverse if one exists. Otherwise, ## it calcualtes the inverse and caches it cacheSolve <- function(theMatrix, ...) { ## get the existing cahched matrix inverse value from theMatrix cachedInverse <- theMatrix$getInverse() ## check if a valid inverse alread exists (NULL indicates no cached inverse) if(!is.null(cachedInverse)) { ## cachedInverse isn't NULL, so a cached inverse exists ## let the world know! message ("Retreiving Cached Matrix Inverse (SO EFFECIENT!)") ## return the cached inverse, breaking out of cacheSolve() now return(cachedInverse) } ## If we reached this point, it's because cachedInverse was NULL ## Therefore, we need to actually compute the inverse and cache it ## First, get the original matrix contained in theMatrix original <- theMatrix$getMatrix() ## Next, calculate the inverse using solve() inverse <- solve(original) ## Then, cache the calculated inverse theMatrix$setInverse(inverse) ## Finally, return the calculated inverse inverse }
ad5ec1c9cf0c82087fcb6f3c2adb56f8445f2969
b1c1e9d146157d14c142d24a9e02b95b3a31f584
/Doutorado/Chapter-2/Ecological Exploration/Ecological exploration of blowdown.R
9174e5412bfee3e0017254340d1a820700e2c1b1
[]
no_license
Eduardoqm/Science-Repository
1ef37904f290cbbea3c060c0a4cf37265f60b699
d655a12fb833a9dd128672576c93cc6f9303f6ea
refs/heads/master
2023-07-17T08:24:52.460738
2023-07-05T17:22:07
2023-07-05T17:22:07
200,397,253
1
1
null
null
null
null
UTF-8
R
false
false
1,582
r
Ecological exploration of blowdown.R
###################################### # Ecological exploration of blowdown # # # # Eduardo Q Marques 10-03-2022 # ###################################### library(tidyverse) library(reshape2) #First Part ================================================================================= #Merged field data setwd("C:/Users/Eduardo Q Marques/Documents/Research/Doutorado/Banco de Dados Tanguro/Area1-plot/Campo vento") df = read.csv("blowdown_full_update_2021.csv", sep = ",") #Abundance of time series ------------------------------------------------------------------- df04 = df %>% filter(dap.04 != "NA") df04 = length(df04$tipo_de_dano) #df06 = df %>% # filter(dap.06 != "NA") #df06 = length(df06$tipo_de_dano) #df07 = df %>% # filter(dap.07 != "NA") #df07 = length(df07$tipo_de_dano) df08 = df %>% filter(dap.08 != "NA") df08 = length(df08$tipo_de_dano) df10 = df %>% filter(dap.10 != "NA") df10 = length(df10$tipo_de_dano) df11 = df %>% filter(dap.11 != "NA") df11 = length(df11$tipo_de_dano) df12 = df %>% filter(dap.12 != "NA") df12 = length(df12$tipo_de_dano) df14 = df %>% filter(dap.14 != "NA") df14 = length(df14$tipo_de_dano) df16 = df %>% filter(dap.16 != "NA") df16 = length(df16$tipo_de_dano) df18 = df %>% filter(dap.18 != "NA") df18 = length(df18$tipo_de_dano) abu <- data.frame(Abundance=c(df04, df08, df10, df11, df12, df14, df16, df18), Year=c(2004, 2008, 2010, 2011, 2012, 2014, 2016, 2018)) ggplot(abu, aes(x = Year, y = Abundance))+ geom_col()
abfe18028f08f8c65c5b983d13a78d9623e39c87
38c16978738ffac95bfcf1e78fcb243fc4195305
/man/create_q_vector_multi_kern.Rd
905fe6d208df81ec92869fb0ce31cc459824fbd1
[]
no_license
ebenmichael/balancer
ca3e2f733c52450d8e7b5b1a4ebd0d182713d4eb
55173367e2c91f1a3ce47070f8430c6686a049bd
refs/heads/master
2023-07-10T20:52:54.547666
2023-06-20T14:40:01
2023-06-20T14:40:01
129,783,286
7
3
null
2023-05-16T19:21:44
2018-04-16T17:47:11
R
UTF-8
R
false
true
517
rd
create_q_vector_multi_kern.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/multilevel_kernel_QP.R \name{create_q_vector_multi_kern} \alias{create_q_vector_multi_kern} \title{Create the q vector for an QP that solves min_x 0.5 * x'Px + q'x} \usage{ create_q_vector_multi_kern(kern_mats, trtz) } \arguments{ \item{target}{Vector of population means to re-weight to} \item{aux_dim}{Dimension of auxiliary weights} } \value{ q vector } \description{ Create the q vector for an QP that solves min_x 0.5 * x'Px + q'x }
059ac8442c4b614c500a6ee82621821e6044c6ad
8a313266928dc5e985050ebaeb1feb6fe43a2929
/novo_data_preload.R
bc9adcebdd2c6f6da132e1ad515a1ca91317396f
[]
no_license
mt-christo/ej
b4a2c283285256f9fd3961b0f67dd89be20f890a
265a9c171eb64e5e4e5ed7d2e2053908b1d946d4
refs/heads/master
2022-12-12T12:03:41.654039
2019-10-26T03:22:06
2019-10-26T03:22:06
102,653,282
0
1
null
2022-12-08T00:54:12
2017-09-06T20:07:26
R
UTF-8
R
false
false
107
r
novo_data_preload.R
# NOVO_UNI <<- load_uni('data-20190506', c('equity', 'equity_metrics', 'h_usd', 'libors'), list())
e361f1e29f7ff5916be8838da4be0f6a1e54e188
d00f0bb26883c1f3073b530bb352764f4b3843f1
/logistic_regression_practice.R
5ff05683df09203cab8822f7280ef646b61e7e35
[]
no_license
sandyqlin/R_Projects
9e9ee1168b2c6e51aadd354e4db26a9a235bdc7d
cf452941ac1a5d31448d327255240bc6fb8dbc39
refs/heads/master
2020-05-20T06:04:30.382698
2015-08-31T23:55:04
2015-08-31T23:55:04
null
0
0
null
null
null
null
UTF-8
R
false
false
245
r
logistic_regression_practice.R
x=rep(c(0,1,2),each=10) x1=c(rep(0,20),rep(1,10)) #x1=as.factor(x1) x2=c(rep(0,10),rep(1,10),rep(0,10)) #x2=as.factor(x2) y=c(rep(1,2),rep(0,8),rep(1,4),rep(0,6),rep(1,6),rep(0,4)) #y=as.factor(y) fit=glm(y~x1+x2,family=binomial()) summary(fit)
1861d0526b606ca2a7ddae6342bc810ccb3622ff
9fc4d25ba7ddfc50c4a82dc75abc5af749b239cc
/man/diatobc.Rd
2a096e8db8369b87104e04035149f05bfa44199b
[]
no_license
fkeck/diatobc
123a804a07edf92f5ebcf8ebfa3f68ebc144750c
5346927a5da772a183ff50f94897fdc21eef8569
refs/heads/master
2020-03-10T11:58:14.466485
2018-04-15T09:46:37
2018-04-15T09:46:37
129,366,839
0
0
null
null
null
null
UTF-8
R
false
true
209
rd
diatobc.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/diatobc-package.R \docType{package} \name{diatobc} \alias{diatobc} \alias{diatobc-package} \title{diatobc} \description{ diatobc }
0a9d54c54f6537ce0fdb36311af67f1fc7ac4c76
7c4c32c31fa4caab7532a3c11cd9440d144cb8e4
/Visual_rep.R
68c06b96155d2efa688b17dd4f0ad0e2306a0731
[]
no_license
joseconde1997/Business-analytics-with-R
12edcf5d86d2bb903955d2b1bd5d65142e3c7ad1
814f3d712d51110f7e67283c0286a532ebb278e7
refs/heads/master
2021-03-03T15:53:14.009529
2020-03-09T07:47:49
2020-03-09T07:47:49
245,971,270
0
0
null
null
null
null
UTF-8
R
false
false
758
r
Visual_rep.R
library(tidyverse) library(knitr) kable(smart %>% summarize( n_brands=n_distinct(brand), n_models=n_distinct(model) ,n_displays=n_distinct(display) ,n_os=n_distinct(os),booktabs=TRUE)) ggplot(smart,aes(ppi,price,label=model)) + geom_text(aes(label=model),size=2.5,check_overlap = TRUE) samsung <- smart %>% filter(brand=="Samsung") ggplot(samsung,aes(ppi,price,label=model)) + geom_text(aes(label=model),size=2.5, check_overlap = TRUE) ggplot(smart) + geom_bar(aes(x=brand, fill=brand)) + theme(legend.position = "none",axis.text.x = element_text(angle=90,hjust = 1,size = 8)) ggplot(filter(d,count>20)) + geom_bar(aes(x=brand, fill=brand)) + theme(legend.position = "none", axis.text.x=element_text(angle = 90, hjust = 1, size = 11))
9dab3fbbb06ac69ddbe17b27553b85a7c528b713
7fdf101f8063e63697e4a174b104b68e57419315
/Plot_1.R
054d7d4b77a29be8d4deb4b9fb7af24be194c632
[]
no_license
JoseLuis1966/ExData_Plotting1
f6edd8b7bd93420c0350908dfae43e5fffb4d3ac
be743389e3d68e3b502f6a4c553a7f61f6e8cca3
refs/heads/master
2021-01-01T05:56:57.032605
2017-07-15T14:04:40
2017-07-15T14:04:40
97,315,301
0
0
null
2017-07-15T12:43:46
2017-07-15T12:43:46
null
UTF-8
R
false
false
1,341
r
Plot_1.R
##load Packages library(dplyr) library(data.table) library (lubridate) ## set workingdirectory setwd("C:/Users/NACHO/Documents/cursoR/graficos/") ## read and clean data consumo <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?", colClasses = c('character','character','numeric','numeric','numeric','numeric','numeric','numeric','numeric')) ## Format date to Type dd/mm/yy consumo$Date <- as.Date(consumo$Date, "%d/%m/%Y") ## Filter dataset from dates 2007-02-01 and 2007-02-02 consumo <- subset(consumo,Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2")) ## Erase incomplete observation consumo<- consumo[complete.cases(consumo),] ## Combine Date and Time column dateTime <- paste(consumo$Date, consumo$Time) ## Name the vector dateTime <- setNames(dateTime, "DateTime") ## EraseDate and Time column from consumo consumo <- consumo[ ,!(names(consumo) %in% c("Date","Time"))] ## Add DateTime column to Consumo consumo <- cbind(dateTime, consumo) ## Format dateTime Column consumo$dateTime <- as.POSIXct(dateTime) ## Create the histogram plot1 "Global Active Power" hist(consumo$Global_active_power, main="Global Active Power", xlab = "Global Active Power (kilowatts)", col="red") ## Save file and close device dev.copy(png,"plot_1.png", width=480, height=480) dev.off()
fde8cf6b456d794158123e60506797475afa24cc
2d277476733ba48dee4bec8bacc6c8dfbb86717b
/tests/testthat/test-BuyseTest-checkValues.R
e0f658196e56caedec03e86671fea6b85d5f6b27
[]
no_license
cran/BuyseTest
ef75b3c7f93a476b35786e485ae4ab2e56c8d90f
a3dfe49778c8d5e2f0b987dd2e9cfbd6f01cb479
refs/heads/master
2023-04-14T00:41:38.778354
2023-03-20T21:30:02
2023-03-20T21:30:02
135,258,510
0
0
null
null
null
null
UTF-8
R
false
false
48,633
r
test-BuyseTest-checkValues.R
if(FALSE){ library(mvtnorm) library(testthat) library(BuyseTest) library(data.table) } context("Check BuyseTest without strata") ## * Settings n.patients <- c(90,100) BuyseTest.options(check = TRUE, keep.pairScore = TRUE, method.inference = "none", trace = 0) ## * Simulated data set.seed(10) dt.sim <- simBuyseTest(n.T = n.patients[1], n.C = n.patients[2], argsBin = list(p.T = list(c(0.5,0.5),c(0.25,0.75))), argsCont = list(mu.T = 1:3, sigma.T = rep(1,3)), argsTTE = list(scale.T = 1:3, scale.censoring.T = rep(1,3))) ## butils::object2script(dt.sim) dt.sim$status1.noC <- 1 dtS.sim <- rbind(cbind(dt.sim, strata = 1), cbind(dt.sim, strata = 2), cbind(dt.sim, strata = 3)) ## * Binary endpoint ## ** No strata test_that("BuyseTest - binary (no strata)", { BT.bin <- BuyseTest(treatment ~ bin(toxicity1), data = dt.sim) BT2 <- BuyseTest(data = dt.sim, endpoint = "toxicity1", treatment = "treatment", type = "bin") ## *** test against fixed value test <- list(favorable = as.double(coef(BT.bin, statistic = "count.favorable", cumulative = FALSE)), unfavorable = as.double(coef(BT.bin, statistic = "count.unfavorable", cumulative = FALSE)), neutral = as.double(coef(BT.bin, statistic = "count.neutral", cumulative = FALSE)), uninf = as.double(coef(BT.bin, statistic = "count.uninf", cumulative = FALSE)), favorable = as.double(coef(BT.bin, statistic = "favorable", cumulative = TRUE)), unfavorable = as.double(coef(BT.bin, statistic = "unfavorable", cumulative = TRUE)), netChange = as.double(coef(BT.bin, statistic = "netBenefit", cumulative = TRUE)), winRatio = as.double(coef(BT.bin, statistic = "winRatio", cumulative = TRUE)) ) GS <- list(favorable = c(1968) , unfavorable = c(2478) , neutral = c(4554) , uninf = c(0) , favorable = c(0.21866667) , unfavorable = c(0.27533333) , netChange = c(-0.05666667) , winRatio = c(0.79418886) ) ## butils::object2script(test, digit = 6) expect_equal(test, GS, tol = 1e-6, scale = 1) BT.bin@call <- list() BT2@call <- list() expect_equal(BT.bin,BT2) ## *** count pairs tableS <- summary(BT.bin, print = FALSE, percentage = FALSE)$table dt.tableS <- as.data.table(tableS)[strata == "global"] expect_equal(dt.tableS[,total], unname(dt.tableS[,favorable + unfavorable + neutral + uninf]) ) }) ## ** Strata test_that("BuyseTest - binary (strata)", { BT.bin <- BuyseTest(treatment ~ bin(toxicity1) + strata, data = dtS.sim) tableS <- summary(BT.bin, print = FALSE, percentage = FALSE)$table dt.tableS <- as.data.table(tableS) ## *** count pairs expect_equal(dt.tableS[,total], unname(dt.tableS[,favorable + unfavorable + neutral + uninf] )) expect_equal(dt.tableS[,total], c(27000,9000,9000,9000)) expect_equal(dt.tableS[,favorable], c(5904, 1968, 1968, 1968)) expect_equal(dt.tableS[,unfavorable], c(7434, 2478, 2478, 2478)) expect_equal(dt.tableS[,neutral], c(13662, 4554, 4554, 4554)) expect_equal(dt.tableS[,uninf], c(0, 0, 0, 0)) ## *** test summary statistic expect_equal(dt.tableS[,delta], c(-0.05666667, -0.05666667, -0.05666667, -0.05666667), tol = 1e-6) expect_equal(dt.tableS[,Delta], c(-0.05666667, NA, NA, NA), tol = 1e-6) }) ## * Continuous endpoint ## ** No strata test_that("BuyseTest - continuous (no strata)", { BT.cont <- BuyseTest(treatment ~ cont(score1, 1) + cont(score2, 0), data = dt.sim) BT2 <- BuyseTest(data = dt.sim, endpoint = c("score1","score2"), treatment = "treatment", type = c("cont","cont"), threshold = c(1,0) ) ## *** test against fixed value test <- list(favorable = as.double(coef(BT.cont, statistic = "count.favorable", cumulative = FALSE)), unfavorable = as.double(coef(BT.cont, statistic = "count.unfavorable", cumulative = FALSE)), neutral = as.double(coef(BT.cont, statistic = "count.neutral", cumulative = FALSE)), uninf = as.double(coef(BT.cont, statistic = "count.uninf", cumulative = FALSE)), favorable = as.double(coef(BT.cont, statistic = "favorable", cumulative = TRUE)), unfavorable = as.double(coef(BT.cont, statistic = "unfavorable", cumulative = TRUE)), netChange = as.double(coef(BT.cont, statistic = "netBenefit", cumulative = TRUE)), winRatio = as.double(coef(BT.cont, statistic = "winRatio", cumulative = TRUE)) ) GS <- list(favorable = c(2196, 2142) , unfavorable = c(2501, 2161) , neutral = c(4303, 0) , uninf = c(0, 0) , favorable = c(0.244, 0.482) , unfavorable = c(0.27788889, 0.518) , netChange = c(-0.03388889, -0.036) , winRatio = c(0.87804878, 0.93050193) ) ## butils::object2script(test, digit = 6) BT.cont@call <- list() BT2@call <- list() expect_equal(test, GS, tol = 1e-6, scale = 1) expect_equal(BT.cont,BT2) ## *** count pairs tableS <- summary(BT.cont, print = FALSE, percentage = FALSE)$table dt.tableS <- as.data.table(tableS)[strata == "global"] expect_equal(dt.tableS[,total], unname(dt.tableS[, favorable + unfavorable + neutral + uninf] )) }) ## ** Strata test_that("BuyseTest - continuous (strata)", { BT.cont <- BuyseTest(treatment ~ cont(score1, 1) + cont(score2, 0) + strata, data = dtS.sim) tableS <- summary(BT.cont, print = FALSE, percentage = FALSE)$table dt.tableS <- as.data.table(tableS) ## *** count pairs expect_equal(dt.tableS[,total], unname(dt.tableS[,favorable + unfavorable + neutral + uninf] )) expect_equal(dt.tableS[,total], c(27000, 9000, 9000, 9000, 12909, 4303, 4303, 4303)) expect_equal(dt.tableS[,favorable], c(6588, 2196, 2196, 2196, 6426, 2142, 2142, 2142)) expect_equal(dt.tableS[,unfavorable], c(7503, 2501, 2501, 2501, 6483, 2161, 2161, 2161)) expect_equal(dt.tableS[,neutral], c(12909, 4303, 4303, 4303, 0, 0, 0, 0)) expect_equal(dt.tableS[,uninf], c(0, 0, 0, 0, 0, 0, 0, 0)) ## *** test summary statistic expect_equal(dt.tableS[,delta], c(-0.03388889, -0.03388889, -0.03388889, -0.03388889, -0.00211111, -0.00211111, -0.00211111, -0.00211111), tol = 1e-6) expect_equal(dt.tableS[,Delta], c(-0.03388889, NA, NA, NA, -0.036, NA, NA, NA), tol = 1e-6) }) ## * Time to event endpoint ## ** No strata - same endpoint for(method in c("Gehan","Peron")){ ## method <- "Gehan" ## method <- "Peron" test_that(paste0("BuyseTest - tte (same, ",method,", no strata)"),{ BT.tte <- BuyseTest(treatment ~ tte(eventtime1, status1, threshold = 1) + tte(eventtime1, status1, threshold = 0.5) + tte(eventtime1, status1, threshold = 0.25), data = dt.sim, scoring.rule = method, correction.uninf = FALSE ) BT.1tte <- BuyseTest(treatment ~ tte(eventtime1, status1, threshold = 0.25), data = dt.sim, scoring.rule = method, correction.uninf = FALSE ) BT2 <- BuyseTest(data = dt.sim, endpoint = c("eventtime1","eventtime1","eventtime1"), status = c("status1","status1","status1"), treatment = "treatment", type = c("tte","tte","tte"), threshold = c(1,0.5,0.25), scoring.rule = method, correction.uninf = FALSE ) ## *** compatibility between BuyseTests BT.tte@call <- list() BT2@call <- list() expect_equal(BT.tte, BT2) expect_equal(sum(coef(BT.tte, statistic = "count.favorable", cumulative = FALSE)), as.double(coef(BT.1tte, statistic = "count.favorable", cumulative = FALSE))) expect_equal(sum(coef(BT.tte, statistic = "count.unfavorable", cumulative = FALSE)), as.double(coef(BT.1tte, statistic = "count.unfavorable", cumulative = FALSE))) expect_equal(coef(BT.tte, statistic = "count.neutral", cumulative = FALSE)[3], coef(BT.1tte, statistic = "count.neutral", cumulative = FALSE)) expect_equal(coef(BT.tte, statistic = "count.uninf", cumulative = FALSE)[3], coef(BT.1tte, statistic = "count.uninf", cumulative = FALSE)) expect_equal(coef(BT.tte, statistic = "netBenefit", cumulative = TRUE)[3], coef(BT.1tte, statistic = "netBenefit", cumulative = TRUE)) expect_equal(coef(BT.tte, statistic = "winRatio", cumulative = TRUE)[3], coef(BT.1tte, statistic = "winRatio", cumulative = TRUE)) ## *** test against fixed value test <- list(favorable = as.double(coef(BT.tte, statistic = "count.favorable", cumulative = FALSE)), unfavorable = as.double(coef(BT.tte, statistic = "count.unfavorable", cumulative = FALSE)), neutral = as.double(coef(BT.tte, statistic = "count.neutral", cumulative = FALSE)), uninf = as.double(coef(BT.tte, statistic = "count.uninf", cumulative = FALSE)), favorable = as.double(coef(BT.tte, statistic = "favorable", cumulative = TRUE)), unfavorable = as.double(coef(BT.tte, statistic = "unfavorable", cumulative = TRUE)), netChange = as.double(coef(BT.tte, statistic = "netBenefit", cumulative = TRUE)), winRatio = as.double(coef(BT.tte, statistic = "winRatio", cumulative = TRUE)) ) if(method == "Gehan"){ GS <- list(favorable = c(438, 719, 543) , unfavorable = c(325, 582, 500) , neutral = c(2284, 1569, 1084) , uninf = c(5953, 5367, 4809) , favorable = c(0.04866667, 0.12855556, 0.18888889) , unfavorable = c(0.03611111, 0.10077778, 0.15633333) , netChange = c(0.01255556, 0.02777778, 0.03255556) , winRatio = c(1.34769231, 1.27563396, 1.20824449) ) ## butils::object2script(test, digit = 8) }else if(method == "Peron"){ GS <- list(favorable = c(1289.0425448, 1452.9970531, 682.33602169) , unfavorable = c(2044.84933459, 908.62963327, 578.82862552) , neutral = c(5666.10812061, 3304.48143424, 2043.31678703) , uninf = c(0, 0, 0) , favorable = c(0.14322695, 0.30467107, 0.38048618) , unfavorable = c(0.22720548, 0.32816433, 0.39247862) , netChange = c(-0.08397853, -0.02349326, -0.01199244) , winRatio = c(0.6303851, 0.92841006, 0.96944434) ) } expect_equal(test, GS, tolerance = 1e-6, scale = 1) ## *** count pairs tableS <- summary(BT.tte, print = FALSE, percentage = FALSE)$table dt.tableS <- as.data.table(tableS)[strata == "global"] expect_equal(dt.tableS[,total], unname(dt.tableS[,favorable + unfavorable + neutral + uninf]), tolerance = 1e-1, scale = 1) ## inexact for Peron }) } ## ** No strata - different endpoints for(method in c("Gehan","Peron")){ ## method <- "Gehan" ## method <- "Peron" test_that(paste0("BuyseTest - tte (different, ",method,", no strata)"),{ BT.tte <- BuyseTest(treatment ~ tte(eventtime1, status1, threshold = 1) + tte(eventtime2, status2, threshold = 0.5) + tte(eventtime3, status3, threshold = 0.25), data = dt.sim, scoring.rule = method, correction.uninf = FALSE) BT2 <- BuyseTest(data = dt.sim, endpoint = c("eventtime1","eventtime2","eventtime3"), status = c("status1","status2","status3"), treatment = "treatment", type = c("tte","tte","tte"), threshold = c(1,0.5,0.25), scoring.rule = method, correction.uninf = FALSE ) test <- list(favorable = as.double(coef(BT.tte, statistic = "count.favorable", cumulative = FALSE)), unfavorable = as.double(coef(BT.tte, statistic = "count.unfavorable", cumulative = FALSE)), neutral = as.double(coef(BT.tte, statistic = "count.neutral", cumulative = FALSE)), uninf = as.double(coef(BT.tte, statistic = "count.uninf", cumulative = FALSE)), favorable = as.double(coef(BT.tte, statistic = "favorable", cumulative = TRUE)), unfavorable = as.double(coef(BT.tte, statistic = "unfavorable", cumulative = TRUE)), netChange = as.double(coef(BT.tte, statistic = "netBenefit", cumulative = TRUE)), winRatio = as.double(coef(BT.tte, statistic = "winRatio", cumulative = TRUE)) ) ## *** compatibility between BuyseTests BT.tte@call <- list() BT2@call <- list() expect_equal(BT.tte, BT2) ## *** test against fixed value if(method == "Gehan"){ GS <- list(favorable = c(438, 620, 794) , unfavorable = c(325, 561, 361) , neutral = c(2284, 339, 73) , uninf = c(5953, 6717, 5828) , favorable = c(0.04866667, 0.11755556, 0.20577778) , unfavorable = c(0.03611111, 0.09844444, 0.13855556) , netChange = c(0.01255556, 0.01911111, 0.06722222) , winRatio = c(1.34769231, 1.19413093, 1.48516439) ) ## butils::object2script(test, digit = 8) }else if(method == "Peron"){ GS <- list(favorable = c(1289.0425448, 2318.38791489, 1231.91554493) , unfavorable = c(2044.84933459, 1529.8258322, 491.18260522) , neutral = c(5666.10812061, 867.93018367, 94.79622337) , uninf = c(0, 949.96418985, 0) , favorable = c(0.14322695, 0.40082561, 0.53770511) , unfavorable = c(0.22720548, 0.39718613, 0.45176197) , netChange = c(-0.08397853, 0.00363948, 0.08594314) , winRatio = c(0.6303851, 1.00916315, 1.19023986) ) } ## *** count pairs tableS <- summary(BT.tte, print = FALSE, percentage = FALSE)$table dt.tableS <- as.data.table(tableS)[strata == "global"] expect_equal(dt.tableS[,total], unname(dt.tableS[,favorable + unfavorable + neutral + uninf]), tolerance = 1e-1, scale = 1) ## inexact for Peron }) } ## ** Strata - same endpoint for(method in c("Gehan","Peron")){ ## method <- "Peron" ## method <- "Gehan" test_that(paste0("BuyseTest - tte (same, ",method,", strata)"),{ BT.tte <- BuyseTest(treatment ~ tte(eventtime1, status1, threshold = 1) + tte(eventtime1, status1, threshold = 0.5) + tte(eventtime1, status1, threshold = 0.25) + strata, data = dtS.sim, scoring.rule = method) ## *** test against fixed value test <- list(favorable = as.double(coef(BT.tte, statistic = "count.favorable", stratified = TRUE, cumulative = FALSE)), unfavorable = as.double(coef(BT.tte, statistic = "count.unfavorable", stratified = TRUE, cumulative = FALSE)), neutral = as.double(coef(BT.tte, statistic = "count.neutral", stratified = TRUE, cumulative = FALSE)), uninf = as.double(coef(BT.tte, statistic = "count.uninf", stratified = TRUE, cumulative = FALSE)), favorable = as.double(coef(BT.tte, statistic = "favorable", stratified = FALSE, cumulative = TRUE)), unfavorable = as.double(coef(BT.tte, statistic = "unfavorable", stratified = FALSE, cumulative = TRUE)), netChange = as.double(coef(BT.tte, statistic = "netBenefit", stratified = FALSE, cumulative = TRUE)), winRatio = as.double(coef(BT.tte, statistic = "winRatio", stratified = FALSE, cumulative = TRUE)) ) if(method == "Gehan"){ GS <- list(favorable = c(438, 438, 438, 719, 719, 719, 543, 543, 543) , unfavorable = c(325, 325, 325, 582, 582, 582, 500, 500, 500) , neutral = c(2284, 2284, 2284, 1569, 1569, 1569, 1084, 1084, 1084) , uninf = c(5953, 5953, 5953, 5367, 5367, 5367, 4809, 4809, 4809) , favorable = c(0.04867, 0.12856, 0.18889) , unfavorable = c(0.03611, 0.10078, 0.15633) , netChange = c(0.01256, 0.02778, 0.03256) , winRatio = c(1.34769, 1.27563, 1.20824) ) } else if(method == "Peron"){ GS <- list(favorable = c(1289.04254, 1289.04254, 1289.04254, 1452.99705, 1452.99705, 1452.99705, 682.33602, 682.33602, 682.33602) , unfavorable = c(2044.84933, 2044.84933, 2044.84933, 908.62963, 908.62963, 908.62963, 578.82863, 578.82863, 578.82863) , neutral = c(5666.10812, 5666.10812, 5666.10812, 3304.48143, 3304.48143, 3304.48143, 2043.31679, 2043.31679, 2043.31679) , uninf = c(0, 0, 0, 0, 0, 0, 0, 0, 0) , favorable = c(0.14323, 0.30467, 0.38049) , unfavorable = c(0.22721, 0.32816, 0.39248) , netChange = c(-0.08398, -0.02349, -0.01199) , winRatio = c(0.63039, 0.92841, 0.96944) ) ## butils::object2script(test, digit = 5) } expect_equal(GS, test, tol = 1e-4, scale = 1) ## *** same result for each pair tableS <- summary(BT.tte, print = FALSE, percentage = FALSE)$table expect_equal(tableS[tableS$strata=="1","Delta"],tableS[tableS$strata=="2","Delta"]) expect_equal(tableS[tableS$strata=="1","Delta"],tableS[tableS$strata=="3","Delta"]) expect_equal(tableS[tableS$strata=="1","Delta"],tableS[tableS$strata=="3","Delta"]) ## *** count pairs dt.tableS <- as.data.table(tableS)[strata == "global"] expect_equal(dt.tableS[,total], unname(dt.tableS[,favorable + unfavorable + neutral + uninf]), tolerance = 1e-1, scale = 1) ## inexact for Peron }) } ## * Mixed endpoints for(method in c("Gehan","Peron")){ ## method <- "Peron" ## method <- "Gehan" test_that(paste0("BuyseTest - mixed (",method,", no strata)"),{ BT.mixed <- BuyseTest(treatment ~ tte(eventtime1, status1, threshold = 0.5) + cont(score1, 1) + bin(toxicity1) + tte(eventtime1, status1, threshold = 0.25) + cont(score1, 0.5), data = dt.sim, scoring.rule = method) BT2 <- BuyseTest(data=dt.sim, endpoint=c("eventtime1","score1","toxicity1","eventtime1","score1"), status=c("status1","..NA..","..NA..","status1","..NA.."), treatment="treatment", type=c("timeToEvent","continuous","binary","timeToEvent","continuous"), threshold=c(0.5,1,NA,0.25,0.5), scoring.rule=method) ## *** compatibility between BuyseTests BT.mixed@call <- list() BT2@call <- list() expect_equal(BT.mixed, BT2) ## *** test against fixed value test <- list(favorable = as.double(coef(BT.mixed, statistic = "count.favorable", cumulative = FALSE)), unfavorable = as.double(coef(BT.mixed, statistic = "count.unfavorable", cumulative = FALSE)), neutral = as.double(coef(BT.mixed, statistic = "count.neutral", cumulative = FALSE)), uninf = as.double(coef(BT.mixed, statistic = "count.uninf", cumulative = FALSE)), favorable = as.double(coef(BT.mixed, statistic = "favorable", cumulative = TRUE)), unfavorable = as.double(coef(BT.mixed, statistic = "unfavorable", cumulative = TRUE)), netChange = as.double(coef(BT.mixed, statistic = "netBenefit", cumulative = TRUE)), winRatio = as.double(coef(BT.mixed, statistic = "winRatio", cumulative = TRUE)) ) if(method == "Gehan"){ GS <- list(favorable = c(1157, 1753, 751, 134, 373) , unfavorable = c(907, 1806, 949, 129, 323) , neutral = c(1569, 3377, 1677, 277, 718) , uninf = c(5367, 0, 0, 1137, 0) , favorable = c(0.12855556, 0.32333333, 0.40677778, 0.42166667, 0.46311111) , unfavorable = c(0.10077778, 0.30144444, 0.40688889, 0.42122222, 0.45711111) , netChange = c(0.02777778, 0.02188889, -0.00011111, 0.00044444, 0.006) , winRatio = c(1.27563396, 1.07261334, 0.99972693, 1.00105513, 1.01312591) ) ## butils::object2script(test, digit = 8) }else if(method == "Peron"){ GS <- list(favorable = c(2742.0395979, 792.80301972, 403.03891763, 160.70305305, 134.38721963) , unfavorable = c(2953.47896786, 896.93725328, 407.50415506, 142.85049401, 122.54879121) , neutral = c(3304.48143424, 1614.74116124, 804.19808854, 500.64454148, 243.70853064) , uninf = c(0, 0, 0, 0, 0) , favorable = c(0.30467107, 0.39276029, 0.43754239, 0.45539829, 0.4703302) , unfavorable = c(0.32816433, 0.42782402, 0.47310226, 0.48897454, 0.50259107) , netChange = c(-0.02349326, -0.03506373, -0.03555987, -0.03357625, -0.03226087) , winRatio = c(0.92841006, 0.91804169, 0.92483682, 0.93133333, 0.93581089) ) } expect_equal(test, GS, tolerance = 1e-6, scale = 1) ## *** count pairs tableS <- summary(BT.mixed, print = FALSE, percentage = FALSE)$table dt.tableS <- as.data.table(tableS)[strata == "global"] expect_equal(dt.tableS[,total], unname(dt.tableS[,favorable + unfavorable + neutral + uninf]) ) }) } test_that("ordering does not matter", { BT.mixed1 <- BuyseTest(treatment ~ tte(eventtime1, status1, threshold = 0.25) + cont(score1, 1), data = dt.sim, scoring.rule = method) BT.mixed2 <- BuyseTest(treatment ~ tte(eventtime1, status1, threshold = 0.5) + tte(eventtime1, status1, threshold = 0.25) + cont(score1, 1), data = dt.sim, scoring.rule = method) expect_equal(coef(BT.mixed2, statistic = "netBenefit")[2:3], coef(BT.mixed1, statistic = "netBenefit"), tol = 1e-6) expect_equal(coef(BT.mixed2, statistic = "winRatio")[2:3], coef(BT.mixed1, statistic = "winRatio"), tol = 1e-6) }) test_that(paste0("BuyseTest - Peron scoring rule with 2 TTE, one without censoring"),{ ## 1 continuous ## 2 Gehan left-censoring ## 3 Gehan right-censoring ## 4 Peron right-censoring survival ## 5 Peron right-censoring competing risks BT.mixed <- BuyseTest(treatment ~ tte(eventtime2, status2, threshold = 0.5) + tte(eventtime1, status1.noC, threshold = 0), data = dt.sim, scoring.rule = "Peron") expect_equal(unname(attr(BT.mixed@scoring.rule,"method.score")), c("SurvPeron","continuous")) ## summary(BT.mixed) BT.mixed <- BuyseTest(treatment ~ tte(eventtime1, status1.noC, threshold = 0) + tte(eventtime2, status2, threshold = 0.5), data = dt.sim, scoring.rule = "Peron") ## summary(BT.mixed) expect_equal(unname(attr(BT.mixed@scoring.rule,"method.score")), c("continuous","SurvPeron")) }) ## * Left censoring test_that("BuyseTest - left vs. right censoring", { BT.left <- BuyseTest(treatment ~ tte(eventtime1, status = status1, censoring = "left"), data = dt.sim, scoring.rule = "Gehan") expect_equal(as.double(coef(BT.left)), 0.09488889, tol = 1e-6) BT.left <- BuyseTest(treatment ~ tte(eventtime1, status = status1, censoring = "left"), data = dt.sim, scoring.rule = "Gehan", correction.uninf = TRUE) expect_equal(as.double(coef(BT.left)), 0.1768116, tol = 1e-6) }) ## * Gaussian endpoint ## ** uncorrelated df.1 <- data.frame(mean = 0:1, sd = 1, treatment = c("C","T")) df.2 <- data.frame(mean = 0:1, sd = c(2,0.5), treatment = c("C","T")) df.3 <- rbind(df.1,df.2) test_that("BuyseTest - uncorrelated gaussians", { GS.1 <- 1 - pnorm(0, mean = 1, sd = sqrt(2)) ## GS.1 - mean( rnorm(n.GS, mean = 1)> rnorm(n.GS, mean = 0)) BTG.1 <- BuyseTest(treatment ~ gaus(mean, sd), data = df.1, method.inference = "none") expect_equal(GS.1,as.double(coef(BTG.1, statistic = "favorable")),tol=1e-6) GS.2 <- 1 - pnorm(0, mean = 1, sd = sqrt(4.25)) ## GS.2 - mean( rnorm(n.GS, mean = 1, sd = 0.5)> rnorm(n.GS, mean = 0, sd = 2)) BTG.2 <- BuyseTest(treatment ~ gaus(mean = mean, std = sd), data = df.2, method.inference = "none") expect_equal(GS.2,as.double(coef(BTG.2, statistic = "favorable")),tol=1e-6) GS.3 <- mean(c(GS.1, (1 - pnorm(0, mean = 1, sd = sqrt(5))), 1 - pnorm(0, mean = 1, sd = sqrt(1.25)), GS.2)) ## GS.3 - mean(c(GS.1,mean( rnorm(n.GS, mean = 1)> rnorm(n.GS, mean = 0, sd = 2)), mean( rnorm(n.GS, mean = 1, sd = 0.5)> rnorm(n.GS, mean = 0)),GS.2)) BTG.3 <- BuyseTest(treatment ~ gaus(mean = mean, std = sd), data = df.3, method.inference = "none") expect_equal(GS.3,as.double(coef(BTG.3, statistic = "favorable")),tol=1e-6) }) ## ** correlated complement <- function(rho, n) {## generate a dataset with given correlation ## adapted from ## https://stats.stackexchange.com/questions/15011/generate-a-random-variable-with-a-defined-correlation-to-an-existing-variables x <- rnorm(n) y <- rnorm(n) y.perp <- residuals(lm(x ~ y)) z <- rho * sd(y.perp) * y + sqrt(1 - rho^2) * sd(y) * y.perp return(list(Y=as.double(y),X=as.double(z))) } ## cor(complement(rho = 0.5, n = 10)) df.1$iid <- complement(rho = 0.5, n = 10) df.2$iid <- complement(rho = 0.5, n = 10) df.3 <- rbind(df.1,df.2) test_that("BuyseTest - correlated gaussians", { GS.1 <- 1 - pnorm(0, mean = 1, sd = sqrt(1)) ## GS.1 - mean(apply(mvtnorm::rmvnorm(n.GS, mean = 0:1, sigma = matrix(c(1,0.5,0.5,1),2,2)),1, FUN = function(x){x[2]>x[1]})) BTG.1 <- BuyseTest(treatment ~ gaus(mean, sd, iid), data = df.1, method.inference = "none") expect_equal(GS.1,as.double(coef(BTG.1, statistic = "favorable")),tol=1e-6) GS.2 <- 1 - pnorm(0, mean = 1, sd = sqrt(3.25)) ## 2^2+0.5^2-2*0.5*0.5*2 ## GS.2 - mean(apply(mvtnorm::rmvnorm(10*n.GS, mean = 0:1, sigma = matrix(c(0.5^2,0.5,0.5,2^2),2,2)),1, FUN = function(x){x[2]>x[1]})) BTG.2 <- BuyseTest(treatment ~ gaus(mean = mean, std = sd, iid), data = df.2, method.inference = "none") expect_equal(GS.2,as.double(coef(BTG.2, statistic = "favorable")),tol=1e-6) GS.3 <- mean(c(GS.1, 1 - pnorm(0, mean = 1, sd = sqrt(1.25-cor(df.1$iid[[1]],df.2$iid[[2]]))), 1 - pnorm(0, mean = 1, sd = sqrt(5-4*cor(df.1$iid[[2]],df.2$iid[[1]]))), GS.2)) ## GS.3 - c(GS.1,mean( rnorm(n.GS, mean = 1)> rnorm(n.GS, mean = 0, sd = 2)), mean( rnorm(n.GS, mean = 1, sd = 0.5)> rnorm(n.GS, mean = 0)),GS.2) BTG.3 <- BuyseTest(treatment ~ gaus(mean = mean, std = sd, iid = iid), data = df.3, method.inference = "none") expect_equal(GS.3,as.double(coef(BTG.3, statistic = "favorable")),tol=1e-6) }) ## * dataset [save] ## dt.sim <- data.table("treatment" = c("C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "C", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T", "T"), ## "eventtime1" = c(0.29972933, 0.17098301, 0.03202016, 0.44202235, 0.10311930, 1.15511106, 0.56124439, 1.21925282, 0.17187895, 0.29113518, 0.09773182, 0.67653288, 0.03861652, 0.07063795, 0.01458732, 0.71823701, 1.08732315, 0.15199073, 0.60176965, 0.41266664, 0.70669922, 0.43573838, 0.07363507, 0.96531658, 0.05755952, 0.94544071, 3.92245778, 0.26717898, 0.16922697, 0.73171108, 1.56385321, 0.00937406, 0.16954569, 0.09197029, 0.09036225, 0.18974451, 0.04073623, 0.02378406, 0.56634940, 1.61507125, 0.49139404, 0.19115956, 0.13740882, 0.36936734, 0.36919469, 1.41367871, 0.94269045, 0.01191205, 0.37297697, 0.19502322, 0.01296422, 0.31343847, 0.00213360, 0.28171661, 0.17320335, 0.06992269, 0.03277328, 0.21255628, 1.43421433, 0.50712777, 0.24571909, 1.00813698, 0.51160661, 0.09173387, 0.28063656, 1.14177394, 0.12593593, 1.58859472, 0.07077964, 0.11041468, 0.09741926, 0.56342077, 0.23108781, 0.76598116, 0.02193362, 0.14312356, 1.36059222, 0.85553186, 0.38761972, 0.05592164, 0.24080708, 2.23146741, 0.65659820, 0.12662146, 0.33644115, 0.93738422, 0.93216642, 0.80139621, 0.65390255, 0.60241389, 0.34299720, 0.66186296, 1.10529116, 0.14865979, 0.12501623, 0.04451988, 0.48423927, 0.92904199, 0.32060823, 0.20941169, 0.29373301, 0.99816128, 1.33980963, 0.16543365, 1.22099704, 0.03737215, 0.16298912, 0.32335369, 0.39027702, 0.10348081, 1.03796020, 0.47513692, 0.24106903, 0.45926525, 0.49608224, 1.44827529, 0.52186516, 0.68353467, 0.01981440, 0.18416592, 0.97426659, 1.77382739, 0.33398520, 0.19615994, 0.03780470, 0.17649501, 0.22901002, 0.02323039, 0.20845366, 0.52986278, 0.74053528, 0.27117162, 0.19489030, 0.66019467, 0.88323068, 0.32137872, 0.17473734, 0.10029520, 0.08490402, 0.34625873, 1.92253508, 1.24734174, 0.20656446, 1.47517308, 0.00019922, 0.33644227, 0.26031585, 0.24064544, 0.87270237, 0.50428180, 0.55966656, 1.09623527, 0.00653237, 0.51073806, 0.36994855, 0.74641533, 0.44120419, 0.98441248, 0.27874909, 0.29785716, 0.19272977, 0.03585685, 0.07027667, 0.00518237, 0.13138687, 0.03262746, 0.26673138, 0.22325116, 0.71796943, 0.29428682, 0.74450076, 0.29965883, 0.17469397, 1.73676014, 1.38930578, 1.61992553, 0.73321636, 0.79600567, 0.04142438, 0.94565307, 0.00825042, 0.65877918, 0.76745178, 1.11121647, 1.58163545, 0.10784914, 0.94274529, 0.05602611, 0.59380396, 1.25969953), ## "status1" = c(1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1), ## "eventtime2" = c(0.13556029, 0.53306031, 0.75447141, 0.25613517, 2.07382035, 0.03757998, 0.14045294, 1.43718090, 0.25834648, 0.00990976, 0.09679214, 0.10656134, 0.12146487, 1.65973920, 0.36003623, 0.27730941, 0.38657147, 0.24456645, 1.45490109, 1.36873057, 0.59120666, 0.13334589, 0.01156500, 0.53612156, 0.50263578, 0.83841401, 1.13769949, 0.32597204, 2.28409336, 0.24155223, 0.14974690, 0.44926621, 0.04444326, 1.74047606, 0.14653971, 0.05550446, 0.91919142, 0.19709401, 0.13809616, 0.19533969, 2.16819532, 1.63080095, 0.63950588, 0.39308933, 0.93342527, 1.44372187, 0.07228017, 1.65850544, 1.65081603, 1.59301263, 0.56652696, 1.18547005, 0.17771856, 0.88895104, 0.55326678, 0.53893584, 0.06138524, 0.41325058, 0.50743982, 0.56196957, 0.05072848, 0.78399042, 0.14126094, 0.37339708, 1.71804695, 0.61959578, 0.37048513, 0.19876601, 1.13166471, 0.16526419, 1.00895604, 0.27660263, 0.15692162, 0.56680821, 1.02953170, 0.15395316, 0.18412961, 0.35121113, 1.71637364, 0.37027203, 0.05331582, 0.41455140, 0.40164440, 0.40714141, 1.60638089, 0.42633103, 0.21886920, 0.12911882, 0.21075684, 0.41380614, 0.13020199, 0.83162531, 0.33213999, 0.25378188, 0.03565690, 1.79972143, 0.49513339, 0.85519650, 0.95797393, 1.18930068, 1.52944416, 0.21211345, 0.36342043, 1.12946317, 0.11842668, 1.50611081, 0.47826400, 0.58815796, 0.20995225, 0.25050953, 0.38504902, 2.57865824, 2.37486593, 0.37757152, 0.11404643, 0.05407206, 0.42755586, 0.06360704, 0.04317937, 0.45965630, 0.40623887, 0.21847145, 0.39437507, 0.88480211, 1.40718306, 0.64707974, 0.08332118, 0.36962127, 0.60152779, 0.39706135, 0.55125693, 0.36913746, 1.42278678, 0.69311190, 1.01065256, 1.08925374, 1.34066288, 0.59957988, 0.04203430, 2.77233260, 3.28708257, 1.73709539, 0.45768357, 0.32263242, 0.29657430, 0.02366551, 0.20247683, 1.35654772, 0.00694441, 1.38201424, 0.89090216, 0.88823543, 1.41377148, 0.37135459, 0.36557318, 1.90512208, 0.31316393, 1.10058790, 0.36843826, 1.04621615, 0.99875000, 0.12788404, 0.36530394, 0.05811976, 2.05009814, 0.51824171, 0.87219406, 0.13617999, 1.00594646, 0.74437044, 0.00258926, 0.57609633, 0.39368111, 0.39772202, 0.31094959, 0.37548816, 2.17934168, 0.99261368, 0.25028018, 0.04431970, 0.77118728, 1.56589807, 2.07293061, 0.90534207, 1.07834985, 0.16480664, 0.14750491, 0.30542754, 0.19788267, 0.07055950), ## "status2" = c(0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1), ## "eventtime3" = c(0.25956955, 0.50870505, 0.38243703, 0.48507356, 0.58090695, 0.09143226, 0.11595455, 2.45142917, 0.31529917, 0.01063624, 0.23269836, 0.00314356, 0.35192745, 0.05702643, 0.77426085, 0.04011495, 0.47305223, 1.32361908, 0.49734166, 0.08057535, 0.31222287, 0.49705636, 0.78467962, 0.85062628, 0.01585449, 0.84971402, 0.14097533, 0.64007436, 0.47504948, 0.47065190, 0.91619564, 3.14863908, 0.72449497, 0.34146462, 0.06298503, 1.26862569, 0.07311503, 0.26950937, 0.24296576, 1.46229570, 0.44175144, 0.08437995, 0.11765742, 1.26484624, 0.13972311, 0.88368353, 0.10077329, 0.30071004, 0.18178031, 0.40319616, 0.19262871, 0.84156278, 1.01319195, 1.09334295, 0.06925393, 0.08496785, 0.38289653, 0.77851078, 2.90459458, 0.70906019, 1.44433835, 0.31710947, 0.83804625, 0.52672195, 1.39708324, 0.05738464, 0.00424703, 0.02745054, 0.15640178, 0.60252617, 0.45624187, 0.03877660, 0.26583575, 1.93489936, 0.16157491, 0.16150214, 3.12000133, 1.15754730, 0.20733374, 1.36244884, 0.85908195, 1.17088649, 1.04190785, 0.58512798, 0.17684563, 0.39304759, 0.50360868, 0.25826671, 1.36782193, 0.79286184, 0.54019913, 0.54899883, 0.01927732, 0.83191354, 2.95844611, 0.66324356, 0.37850024, 1.01325887, 0.68367717, 0.16975714, 1.07644784, 2.05425366, 0.76593812, 0.93194348, 0.46623093, 2.96814573, 0.12555074, 1.85279179, 0.91838000, 2.96795061, 0.20853482, 0.55747755, 1.16290689, 0.25204838, 1.45458273, 0.47887218, 2.14245439, 0.73046914, 1.21973505, 0.24528169, 0.60239017, 1.79625436, 0.09840920, 0.42368372, 0.07741995, 1.28366723, 0.51361326, 1.44102172, 0.23235485, 0.00745763, 1.46408645, 0.28432717, 0.50773758, 0.12780739, 1.62522052, 0.60240232, 0.53551248, 0.37865307, 1.43088588, 0.49141086, 0.27257514, 1.28147177, 1.11686803, 0.37442988, 1.00084367, 1.78079525, 0.36024791, 1.50952573, 0.36300718, 0.73043847, 0.25946183, 0.86342025, 0.86724760, 0.65525025, 0.34944216, 0.78352676, 0.76614068, 0.03508025, 1.10827027, 0.13490347, 2.82395488, 0.42936653, 0.15014156, 0.82605928, 0.38453132, 1.19652345, 0.54175957, 0.40951641, 0.25130183, 2.10913985, 3.90959749, 0.83906640, 0.35827788, 0.82174584, 0.43750343, 0.72346693, 2.07799650, 0.03194980, 0.02397542, 0.84753338, 0.39459503, 1.40010494, 1.05098332, 2.16823693, 1.17526902, 1.21647314, 0.20328870, 0.08513324, 0.20774038, 0.14752052), ## "status3" = c(0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0), ## "toxicity1" = c("2", "1", "1", "2", "1", "2", "2", "1", "1", "1", "2", "2", "2", "1", "2", "1", "2", "2", "2", "2", "2", "2", "2", "2", "1", "2", "2", "2", "2", "1", "1", "2", "1", "1", "2", "2", "2", "1", "2", "2", "2", "2", "1", "2", "2", "2", "2", "1", "1", "1", "2", "1", "2", "1", "2", "1", "1", "2", "2", "2", "2", "2", "1", "2", "1", "1", "1", "1", "1", "1", "2", "1", "2", "2", "1", "1", "2", "1", "2", "2", "2", "2", "1", "1", "1", "2", "2", "2", "1", "1", "2", "2", "2", "2", "1", "2", "1", "2", "1", "2", "1", "1", "2", "2", "2", "1", "1", "2", "2", "2", "1", "2", "1", "2", "1", "2", "2", "1", "2", "2", "1", "2", "1", "1", "2", "2", "2", "2", "2", "2", "1", "2", "1", "1", "2", "1", "2", "1", "1", "1", "2", "2", "1", "1", "1", "1", "2", "2", "2", "2", "1", "1", "1", "2", "2", "2", "2", "2", "2", "2", "1", "1", "2", "1", "1", "1", "2", "2", "1", "1", "2", "1", "2", "1", "1", "1", "2", "1", "2", "1", "1", "1", "2", "2", "1", "2", "2", "1", "2", "2"), ## "toxicity2" = c("2", "2", "2", "2", "2", "1", "2", "2", "1", "2", "1", "2", "2", "2", "2", "2", "1", "2", "1", "1", "2", "2", "2", "1", "1", "1", "2", "2", "2", "2", "2", "2", "2", "1", "2", "2", "2", "2", "2", "2", "2", "2", "1", "1", "1", "2", "2", "2", "2", "1", "2", "2", "2", "2", "1", "2", "2", "2", "2", "2", "1", "2", "1", "2", "2", "2", "1", "2", "2", "1", "2", "2", "2", "2", "1", "2", "1", "1", "1", "2", "2", "2", "2", "2", "1", "1", "1", "2", "2", "2", "1", "2", "2", "1", "2", "2", "2", "2", "2", "2", "2", "2", "1", "2", "2", "2", "2", "2", "2", "2", "2", "2", "1", "1", "1", "2", "1", "1", "1", "1", "2", "2", "2", "2", "1", "2", "2", "1", "2", "1", "2", "2", "2", "1", "2", "1", "2", "2", "2", "1", "2", "2", "2", "2", "2", "2", "2", "2", "2", "2", "2", "1", "2", "2", "2", "2", "1", "1", "1", "1", "2", "2", "2", "2", "2", "2", "2", "2", "2", "2", "2", "2", "1", "1", "1", "2", "1", "1", "1", "1", "2", "2", "1", "2", "2", "2", "2", "1", "2", "2"), ## "score1" = c( 1.37248589, 2.19299163, 1.73483525, -0.00752315, 1.88739264, 1.41874690, 1.29806497, 1.10816840, 1.04006544, 2.47152734, -0.75581445, 0.90738612, 2.23780927, 1.42039507, 1.16518174, 0.17216119, 1.20579932, 0.00104613, 1.02850830, 1.72188935, 1.20350882, 0.75061290, 3.07140367, 0.01723101, 1.24266243, 1.00337130, -0.55113598, 2.22269088, 0.62567229, 1.03200894, 0.76908950, 0.72202409, -0.33953529, 0.16539723, 1.33126684, 0.13868705, 1.44159579, 1.73077039, 2.37832330, 1.48123808, 0.43160998, 0.60855246, 1.64288621, 1.27620613, 0.74186695, 2.46582983, -0.97327590, 1.92010988, 0.85092865, 2.23738748, 1.52738054, 2.49049279, 0.60756862, 1.18796458, 0.98872659, -0.24592590, 1.94396826, 1.45941533, -0.65322897, 0.22658973, 0.31024850, 2.67183286, 0.90395013, 0.20399559, 0.77525623, -0.26017269, -0.42640232, -1.65363714, -0.03865936, 1.57982435, 3.24926361, 0.65269903, 1.35892133, 0.43765887, 0.03994969, 1.34253659, -0.04648803, 1.55333508, 0.71948130, 0.74599877, 1.99124934, 0.33964643, 1.69329156, 2.52728405, 1.23695659, 2.17387684, -1.48614865, 2.49742470, 1.97382087, 0.51588451, 1.27685067, 0.78477715, 2.70827066, 2.53843123, 0.76378630, -1.07643183, 0.39430703, 1.95235963, -0.18769287, 4.81258034, 2.16396750, -0.52338460, -1.51833505, 0.29247077, 0.71256712, 0.56469169, 0.65692123, 0.96068912, 1.88696599, 0.64005160, 0.27104573, 2.75174562, 0.91396141, 2.10636302, 0.98082216, -0.49346018, 3.70063662, 0.25630558, 2.06519498, 0.96791827, 0.46004031, -0.92564357, 2.00783138, 0.72076520, -0.24956586, 2.24849113, 0.80778662, 1.91197623, -1.15826968, 2.28961053, 0.57189003, 0.74499711, 2.32715824, 1.83869012, -0.46500068, 0.38374730, 1.10610584, -0.84401676, -0.61642285, 1.22767670, 1.92121670, 0.66879656, 2.28723403, 1.05726080, -1.20393313, 0.47708965, 2.08078531, 0.75685866, 0.09055344, 0.13170049, 1.86947504, 0.31999040, 1.17321454, 0.84056196, 1.79349943, 2.69435049, 2.23996869, 1.02943674, 2.65721450, 2.13122314, -0.40241063, 1.15677046, 1.86750860, 0.96676484, 1.95406456, 2.13009673, 1.39369731, 1.37680833, 0.74825709, 1.50409489, -0.32980347, 0.88170055, 0.36675153, 1.67769134, 0.70211824, 1.62792225, 2.67843073, -1.40000254, -0.34984289, 2.20200790, 2.29384272, 0.94040565, 0.31493765, -0.45343930, 2.29412656, 2.17616909, 1.86404508, -1.29673252, -0.39181001, -0.38587675), ## "score2" = c( 2.65317519, 2.19804984, 3.39958837, 2.36418421, 3.64077145, 3.13527144, 0.78355959, 2.90098701, 1.38803990, 3.13141377, 0.84382164, 0.89827149, 2.57784739, 2.18087366, 0.01207711, 3.36059519, 0.82579966, 1.06209054, 3.26831405, 3.36400522, 0.15778253, 1.43518251, 0.34003090, 3.25256983, 0.36760006, 3.05366327, -0.76870569, 0.21894658, 2.25633914, 2.00545663, 2.74176995, -0.68560240, 3.10162133, 3.21802045, 3.50102058, 0.81409682, 0.57813789, 1.51272481, 2.58998056, 1.54575954, 1.78942788, 2.93706409, 1.41297510, 2.49658599, 1.73979822, 3.38222640, 2.56692197, 2.07484513, 1.08979248, 1.04274682, 0.72159509, 2.00701403, 1.81918381, 0.86052608, 2.74381724, 2.62444167, 0.58176694, 2.93314119, 3.15064738, 2.47002921, 2.87963124, 1.07675302, 2.25137934, 0.74625923, 1.84460737, 2.51732094, 2.42306429, 3.15885851, 3.90794404, 2.12231655, 2.95211161, 3.38124150, 1.31147508, 1.99527444, 2.70690665, 2.63814472, 0.21252668, 1.79456144, 2.15728030, 2.77809395, 2.44579281, 1.93244981, 0.51950279, 1.79247109, 1.68026738, 3.28908680, 2.07278306, 1.99467467, 2.81879144, 2.95246510, 0.06090360, 2.87884707, 1.59755053, 1.11761304, 2.61240767, 3.13087171, 0.36334251, 3.08359626, 1.91447252, 2.56312532, 3.09462984, 2.72640935, 0.60034286, 0.71220484, 1.03158452, 0.26331424, 2.83169736, 1.24104689, 4.15507700, 1.57674112, 1.99435006, 2.40036173, 1.63432220, 1.57085616, 2.24469713, 4.21977625, 2.31426981, 1.43621493, 0.51430384, 2.41529859, 1.47193632, 1.64952938, 2.73062194, 1.44337583, 0.55023946, 0.94800357, 1.31716138, 1.38705882, 3.89078004, 1.85458947, 1.36057628, 1.58882395, 2.34279188, 0.86675325, 2.06607030, 2.03799977, 2.92106475, 4.13496564, 1.40017307, 3.24444646, 1.92054298, 3.18175155, 4.18614406, 2.40617493, 1.26164090, 0.04351330, 0.04995430, 1.05900218, 3.19778677, 1.37576061, 1.86713034, 1.98277908, 1.54037072, 3.47293679, 4.16931961, -1.00143131, 0.22801442, 1.63516078, 2.37453378, 0.76592207, 2.47489342, 3.23305746, 1.40687134, 2.75226244, 2.61204551, 1.77134263, 1.29516065, 3.22951605, 3.51851513, 3.34509418, 1.87630847, 2.93390245, 2.01949685, 2.83085211, 1.69550533, 1.94071872, 3.00739670, 0.62220491, 0.99579727, 1.97671970, 3.54674727, 1.24063675, 3.00679266, 2.27421492, 1.91961764, 1.09644750, 2.47314510, 3.38848898, 2.03474609, 0.70741497), ## "score3" = c(1.1422912, 3.7273816, 3.3474971, 3.8424655, 2.9674466, 1.8834731, 1.9977386, 1.9200766, 2.9129226, 1.6749344, 2.7190415, 1.9429009, 4.8992433, 4.5210321, 3.2763423, 1.7971510, 2.1940079, 2.4692713, 3.7387694, 5.6624053, 2.4008662, 2.5411284, 1.4538581, 3.9670348, 0.6024646, 4.3625804, 3.2195859, 2.3472134, 3.2848838, 3.1957958, 2.0305307, 1.8458276, 2.9608437, 2.6592635, 4.0677380, 2.8616322, 3.8583077, 4.1206752, 2.1229097, 2.2782372, 3.5029077, 1.6258354, 3.0368000, 3.5862009, 3.6839435, 2.4312439, 3.2703556, 2.4580630, 3.4026097, 2.8134101, 3.6427508, 3.6714750, 2.5989603, 5.1650258, 1.6047065, 2.8292999, 2.8305656, 2.9670166, 5.3885198, 2.0099881, 2.8989458, 1.0858509, 2.4511256, 3.7138949, 4.8744887, 3.7946575, 2.9052863, 1.6529439, 3.2264245, 3.0241093, 1.1952988, 3.7405965, 2.6342226, 3.9380437, 2.7353587, 2.8336375, 2.9961175, 3.4351701, 2.3799796, 2.7542375, 2.8883925, 3.5208775, 2.7418243, 2.9288784, 4.1494674, 0.5699271, 1.2895520, 3.2585224, 3.4980213, 2.9506039, 4.4946761, 0.4182285, 3.0610335, 3.3277493, 3.1286587, 2.5031202, 3.4321744, 4.4126444, 0.9265798, 3.6894032, 2.7362238, 2.6441475, 2.5188452, 3.2227355, 5.4299103, 4.4962981, 2.2827918, 2.5329461, 3.6629235, 5.3001769, 3.3275101, 3.0638635, 1.8604391, 4.1804102, 3.0413885, 1.7864064, 3.0731958, 2.7426754, 3.2668064, 4.3877243, 3.1930796, 3.5923166, 2.1700255, 3.3925733, 3.3848676, 4.0510447, 4.1557975, 1.9655620, 2.7455319, 4.2736843, 4.5025446, 3.5904095, 2.3693145, 3.7923495, 3.1253846, 3.3227550, 2.5544168, 3.7668439, 1.5964970, 1.8239532, 3.5115965, 4.3167653, 5.3929130, 2.9322306, 2.7962149, 4.9964344, 4.5432178, 1.7716624, 5.6444880, 3.7570137, 4.0553452, 3.9579449, 3.8338791, 2.8307244, 2.5955664, 3.2956016, 2.7681255, 2.5519218, 3.0223108, 3.0444673, 3.4807212, 3.6356199, 0.9992576, 2.3093445, 2.8693548, 2.6557482, 2.9475872, 3.0961239, 3.2664071, 3.5547935, 4.2354459, 3.2851334, 2.5412071, 3.6234780, 2.2760049, 4.6194187, 2.3833266, 3.3845411, 5.1145213, 1.9714326, 2.1132120, 4.2711460, 1.3949146, 4.1222734, 5.1584386, 3.4282466, 4.2011787, 4.0316901, 3.6538742, 5.0120818), ## "status1.noC" = c(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1))
ac60c1321379ab32732f5b914af799cb7b4bd85b
0feca096f50f64f6cad998204c4c848b10cd37a9
/R/status_messages.R
f05fb738f8ef7d5e25e759d3e0e6ffa313838d1c
[]
no_license
dakep/examinr
f75487896192294539ac3a3bfb4e15b0de78012c
e02570c5c6f79b10ec126fa2343f6dd0b6ab4fc7
refs/heads/main
2023-04-05T23:38:04.656653
2021-04-30T21:59:21
2021-04-30T21:59:21
301,797,514
1
0
null
null
null
null
UTF-8
R
false
false
3,904
r
status_messages.R
#' @include state_frame.R #' @importFrom yaml read_yaml .status_messages <- state_frame(yaml::read_yaml(system.file('messages.yaml', package = 'examinr', mustWork = TRUE), eval.expr = FALSE)) #' Customize Status Messages #' #' Customize the status messages displayed at various times and states during an exam. #' A template with the default status messages can be saved as a YAML text file with `status_message_template()`. #' #' Note that `status_message_template()` should not be called from within an exam document. Rather, generate the #' template once, modify as necessary, and use in the exam document by specifying the file name in the [exam_document()] #' or by calling `status_messages()` #' #' Calling `status_messages()` without arguments invisibly returns the currently set status messages. #' #' @param file path to the messages file. For `status_message_template()`, this is where the template is saved #' (if `NULL`, only returns the default messages). For `status_messages()`, the file to read the status messages #' from. #' @param messages Optionally specify the messages via an R list, in the same format as returned by #' `status_message_template()`. If `messages` is set, `file` is ignored with a warning. #' #' @return `status_message_template()` invisibly returns the default messages as R list. #' `status_messages()` invisibly returns the new status messages as R list. #' #' @importFrom yaml read_yaml #' @importFrom rlang warn #' @family localization #' @export status_message_template <- function (file) { if (isTRUE(getOption('knitr.in.progress'))) { warn("`status_message_template()` should not be called from a knitr document") } template <- system.file('messages.yaml', package = 'examinr', mustWork = TRUE) messages <- read_yaml(template, eval.expr = FALSE) if (is.null(file)) { cat(readLines(template, encoding = 'UTF-8'), sep = '\n') } else { file.copy(template, file, overwrite = FALSE) } return(invisible(messages)) } #' @rdname status_message_template #' @importFrom yaml read_yaml #' @importFrom rlang warn abort #' @importFrom knitr opts_current #' @export status_messages <- function (file, messages) { if (missing(file) && missing(messages)) { return(invisible(get_status_message())) } if (!is_knitr_context('setup')) { abort("`status_messages()` must be called in a context='setup' chunk.") } if (is_missing(messages) || is.null(messages)) { messages <- read_yaml(file, eval.expr = FALSE) } else { if (!is_missing(file)) { warn("Ignoring argument `file` as argument `messages` is also given.") } } validate_status_messages(messages) set_status_messages(messages) return(invisible(messages)) } set_status_messages <- function (messages) { .status_messages$set(messages) } #' @importFrom rlang abort get_status_message <- function (what) { if (is_missing(what)) { .status_messages$get() } else { .status_messages$get(what) %||% abort(paste("Requested unknown status message", what)) } } #' @importFrom yaml read_yaml #' @importFrom rlang abort validate_status_messages <- function (messages, template = NULL, path = NULL) { if (is.null(template)) { template <- read_yaml(system.file('messages.yaml', package = 'examinr', mustWork = TRUE), eval.expr = FALSE) } for (name in names(template)) { subpath <- paste(c(path, name), collapse = ' > ') if (is.null(messages[[name]])) { abort(sprintf("Message string %s is missing.", subpath)) } else if (is.list(template[[name]])) { if (is.list(messages[[name]])) { validate_status_messages(messages[[name]], template[[name]], subpath) } else { abort(sprintf("Messages malformed: %s must contain sub-items %s.", subpath, paste(names(template[[name]]), collapse = ', '))) } } } }
741d25e1938a262800d4eec22109b6d535850ef0
4f3d98129625eaa618dc25f54988bce179b841fc
/R/gxeRC.R
ca8639015fd8d431a44bd58c87c48548b59d1808
[]
no_license
SharonLutz/gxeRC
eb62cad249083be7cfc902e63a9139dae58e45b1
4e32a4de03232f3c46ff7e03df2b14bc463711c5
refs/heads/master
2022-05-30T00:17:04.402843
2020-03-02T19:36:52
2020-03-02T19:36:52
131,533,759
3
0
null
null
null
null
UTF-8
R
false
false
5,057
r
gxeRC.R
gxeRC <- function(n=5000,nSNP=3,MAF=c(0.05,0.01,0.005),betaX=c(0.25,0.25,0.25),betaI=c(0,0.05,0.1), zMu=0,zVar=1,yVar=1,nSim=1000,alpha=0.05,plot.name="gxeRC.pdf"){ #################################### # input parameters #################################### # n is the number of subjects # nSNP= the number of SNPS # MAF= minor allele frequency for the SNPS # betaX = genetic effect of each SNP # betaI= effect of interaction for each SNP # zMu is the mean for the environmental effect # zVar is the variance for the environmental effect # nSim is the number of simulations # alpha is the alpha level, default=0.05 #################################### # Error Checks #################################### # Check nSNP = length(MAF)==length(betaX)==length(betaI) if(nSNP!=length(MAF)){stop("Error: nSNP must equal length(MAF).")} if(nSNP!=length(betaX)){stop("Error: nSNP must equal length(betaX).")} if(length(betaI)<2){stop("Error: length(betaI) must be 2 or greater.")} # Check n, nSNP and nSim are integers if(floor(n)!=ceiling(n)){stop("Error: n must be an integer.")} if(floor(nSNP)!=ceiling(nSNP)){stop("Error: nSNP must be an integer.")} if(floor(nSim)!=ceiling(nSim)){stop("Error: nSim must be an integer.")} # Check n, nSNP and nSim are greater than 0 if(!(n>0)){stop("Error: n must be greater than 0.")} if(!(nSNP>0)){stop("Error: nSNP must be greater than 0.")} if(!(nSim>0)){stop("Error: nSim must be greater than 0.")} # Check zVar > 0 and yVar > 0 if(!(zVar>0)){stop("Error: zVar must be greater than 0.")} if(!(yVar>0)){stop("Error: yVar must be greater than 0.")} # Check length(zVar)==1 length(zMu)==1 & length(yVar==1) if(length(zVar)!=1){stop("Error: zVar must be of length 1")} if(length(zMu)!=1){stop("Error: zMu must be of length 1")} if(length(yVar)!=1){stop("Error: yVar must be of length 1")} # Check alpha>0 & alpha<1 if(alpha<0 | alpha>1){stop("Error: alpha must be between 0 and 1.")} #################################### # Store Results #################################### rejectH0<-matrix(0,nrow=length(betaI),ncol=(nSNP+1)) colnames(rejectH0)<-c(paste("lmX",1:nSNP,sep=""),"lmAll") #################################### # Run Simulations #################################### for(GLOBALVAR in 1:nSim){ set.seed(GLOBALVAR) if(floor(GLOBALVAR/100)==ceiling(GLOBALVAR/100)){print(GLOBALVAR)} #################################### # Generate Data #################################### # CYCLE through values of betaI for(bb in 1:length(betaI)){ betaIv<-betaI[bb] #################################### # simulate data #################################### # generate the matrix of SNPs X<-matrix(0,nrow=n,ncol=nSNP) errorFound <- F for(xx in 1:nSNP){ X[,xx]<-rbinom(n,2,MAF[xx]) # Check X is not all zero if(mean(X[,xx])==0|mean(X[,xx])==2){ problemSNP<- xx errorFound <- T break } # let user know they need to increase n or MAF because there is no variability in SNP xx <- give what xx is don't give xx as an index } if(errorFound){ errormessage <- paste("Error: Increase n or MAF because there is no variability in SNP ",problemSNP,sep = "") stop(errormessage)} # generate the environment Z z<- rnorm(n,zMu,sqrt(zVar)) zz<-matrix(0,nrow=n,ncol=1) zz[,1]<-z # generate the outcome Y mainEffects<- X%*%betaX intEffects<- (X%*%rep(betaIv,nSNP))*zz yMu<- mainEffects+ intEffects y<-rnorm(n,yMu,sqrt(yVar)) #################################### # linear regression for interaction #################################### modelA<-lm(y~z+X+X*z) modelAA<-summary(modelA)$coef if(nrow(modelAA)<(nSNP+2+nSNP)){stop("Error: Increase n or MAF because there is not enough variability")} nRow<-nSNP+2 for(rr in 1:nSNP){ if(modelAA[(nRow+rr),4]<(alpha/nSNP)){rejectH0[bb,rr]<-rejectH0[bb,rr]+1} } modelR<-lm(y~z+X) if(anova(modelA,modelR)$P[2]<alpha){rejectH0[bb,"lmAll"]<-rejectH0[bb,"lmAll"]+1} #################################### # Compile results #################################### }#end betaI loop }#end globalvar rejectMat<-rejectH0/nSim #################################### # Create plot #################################### nn<-ncol(rejectMat) pdf(plot.name) plot(-1,-1,xlim=c(min(betaI),max(betaI)),ylim=c(0,1),xlab="betaI",ylab="",main="") for(pp in 1:nn){ lines(betaI,rejectMat[,pp],pch=pp,col=pp,type="b") } legend("topleft",c(paste("SNP",1:nSNP,": MAF=",c(MAF),sep=""),"All SNPs"),col=c(1:nn),pch=(1:nn),lwd=1) dev.off() #################################### # End function #################################### list(rejectMat)}
0818dd506966db6e2824d2af141e428aee040369
6b8d5069dbfd473a14c14458efd33d3e85fc2a4d
/run_analysis.R
c54615d6139f44b741fa0c0b2c6be4e00e0f3149
[]
no_license
vipvipvip/CleanData_CourseProject
faded72631bf36420f0e34f42fa9aa583bdf8192
1199f684c016edaf90d6d13cd4dc96b89507e26a
refs/heads/master
2021-01-16T18:03:35.592381
2014-08-19T20:32:27
2014-08-19T20:32:27
null
0
0
null
null
null
null
UTF-8
R
false
false
3,625
r
run_analysis.R
library(data.table) library(plyr) run <- function () { # read 'train/X_train.txt': Training set. # read 'test/X_test.txt' # read features.txt -- apply these as columns name for the data sets. # remove _, -, (, ) from each column # merge two data set now since col names are identical # select columns with word "mean" and "sd" if (!file.exists("UCI HAR Dataset")) { stop ("./UCI HAR Dataset directory not found.") } dfFeatures <- read.csv("./UCI HAR Dataset/features.txt", colClasses = "character", header=F, sep=" ") dfFeatures[,2] <- gsub("-","", dfFeatures[,2]) dfFeatures[,2] <- gsub("-","", dfFeatures[,2]) dfFeatures[,2] <- gsub('\\(',"", dfFeatures[,2]) dfFeatures[,2] <- gsub('\\)',"", dfFeatures[,2]) dfFeatures[,2] <- gsub('\\,',"", dfFeatures[,2]) dfXTest <- read.table("./UCI HAR Dataset/test/X_test.txt") colnames(dfXTest) <-dfFeatures[,2] dfyTest <- read.table("./UCI HAR Dataset/test/y_test.txt") colnames(dfyTest) <- c("ActivityCode") dfTestSubjects <- read.table("./UCI HAR Dataset/test/subject_test.txt") colnames(dfTestSubjects) <- c("Subjects") dfTest <- cbind(dfXTest, dfyTest, dfTestSubjects) #2 extract mean & std cols & additional columns dfTest <- dfTest[,sort(grep("mean|std|ActivityCode|Subjects", names(dfTest)))] #str(dfTest) dfXTrain <- read.table("./UCI HAR Dataset/train/X_train.txt") colnames(dfXTrain) <-dfFeatures[,2] dfyTrain <- read.table("./UCI HAR Dataset/train/y_train.txt") colnames(dfyTrain) <- c("ActivityCode") dfTrainSubjects <- read.table("./UCI HAR Dataset/train/subject_train.txt") colnames(dfTrainSubjects) <- c("Subjects") dfTrain <- cbind(dfXTrain, dfyTrain, dfTrainSubjects) #2 extract mean & std cols & additional columns dfTrain <- dfTrain[,sort(grep("mean|std|ActivityCode|Subjects", names(dfTrain)))] #str(dfTrain) #1 merge both dataset dfALL <- rbind(dfTrain, dfTest) #2 extract mean & std cols & additional columns #dfALL <- dfALL[,sort(grep("mean|std|ActivityCode|Subjects", names(dfALL)))] #3 descriptive activity names numCols <- 82 #ncol(dfALL) dfALL$ActivityLabel <- seq(1:nrow(dfALL)) dfALL[dfALL$ActivityCode %in% c(1),numCols] = "WALKING" dfALL[dfALL$ActivityCode %in% c(2),numCols] = "WALKING_UPSTAIRS" dfALL[dfALL$ActivityCode %in% c(3),numCols] = "WALKING_DOWNSTAIRS" dfALL[dfALL$ActivityCode %in% c(4),numCols] = "SITTING" dfALL[dfALL$ActivityCode %in% c(5),numCols] = "STANDING" dfALL[dfALL$ActivityCode %in% c(6),numCols] = "LAYING" #table(dfALL$ActivityLabel) dfALL <- dfALL[,c(80:82,1:79)] #4 columns already labeled correctly. it has 180 rows #5 prepare tidy data set final <- data.frame() for (i in 1:30) { for (j in 1:6) { final <- rbind(final, colMeans(dfALL[dfALL$Subjects==i & dfALL$ActivityCode==j, 4:82], c(4,82))) } } colnames(final) <- c(names(dfALL[,4:82])) final$Subject <- 1 final$ActivityCode <- 1 nr=1 for (i in 1:30) { for (j in 1:6) { final$Subject[nr] <- i final$ActivityCode[nr] <- j nr = nr + 1 } } final$ActivityLabel <- seq(1:nrow(final)) final[final$ActivityCode %in% c(1),ncol(final)] = "WALKING" final[final$ActivityCode %in% c(2),ncol(final)] = "WALKING_UPSTAIRS" final[final$ActivityCode %in% c(3),ncol(final)] = "WALKING_DOWNSTAIRS" final[final$ActivityCode %in% c(4),ncol(final)] = "SITTING" final[final$ActivityCode %in% c(5),ncol(final)] = "STANDING" final[final$ActivityCode %in% c(6),ncol(final)] = "LAYING" final <- final[,c(80,82,1:79)] #final write.table(final,"tidySet.csv",row.names=F) }
f22765e0b55a015888c2b1230fdd5c348d5eaa7d
84a81beb43008d608479b4e5c993ca86cfe86873
/man/gap.barplot.cust.Rd
507e59e4fb981d4ca011e7fc3b876988974bef24
[]
no_license
andrew-edwards/sizeSpectra
bb3204c5190ec6ccf09ef3252da30f0c2b4ac428
517c18d84f4326b59807de5235ab4cddac74876b
refs/heads/master
2023-06-22T17:57:26.718351
2023-06-12T16:51:23
2023-06-12T16:51:23
212,250,882
7
8
null
null
null
null
UTF-8
R
false
true
1,954
rd
gap.barplot.cust.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plotting.R \name{gap.barplot.cust} \alias{gap.barplot.cust} \title{Customising \code{plotrix::gap.barplot} for a histogram with a gap in y-axis} \usage{ gap.barplot.cust( y, gap = c(9, 980), xaxlab, xtics, yaxlab, ytics = c(seq(0, 8, by = 4), seq(980, 988, by = 4)), midpoints, breakpoints, xlim, ylim = c(0, 17), xlab = expression(paste("Values, ", italic(x))), ylab = "Count in each bin", horiz = FALSE, col = NULL, N = 1000, ... ) } \arguments{ \item{y}{vector of data values} \item{gap}{range of values to be left out} \item{xaxlab}{labels for the x axis ticks} \item{xtics}{position of the x axis ticks} \item{yaxlab}{labels for the y axis ticks} \item{ytics}{position of the y axis ticks} \item{midpoints}{midpoints of the bins} \item{breakpoints}{breaks of the bins} \item{xlim}{optional x limits for the plot} \item{ylim}{optional y limits for the plot} \item{xlab}{label for the x axis} \item{ylab}{label for the y axis} \item{horiz}{whether to have vertical or horizontal bars} \item{col}{color(s) in which to plot the values} \item{N}{value of highest top short tickmark} \item{...}{arguments passed to 'barplot'.} } \value{ Barplot with a gap in the y-axis } \description{ For Figure 1 of MEE paper, to make a histogram (barplot) with a gap in the y-axis. Customising \code{gap.barplot()} from the package plotrix by Jim Lemon and others Several default options here are customised for the particular plot (and to change a few of the defaults in gap.barplot) so the code would require some modifiying to use more generally. } \details{ This function modifies \code{plotrix::gap.barplot()}, between 2nd Sept 2014 and finalised here in October 2019. \code{plotrix} was written by Jim Lemon and others and is available from CRAN at https://cran.r-project.org/web/packages/plotrix/index.html. } \author{ Andrew Edwards }
9343eededf0aa867e58a66eb8f4be454c8dc620c
956615ffd5cb4f5d6695f55b17737164b66b4428
/man/edit_collection_folder.Rd
1e813e75d14f6d3f063bda07d05d6a25871cc805
[]
no_license
Pascallio/discogsAPI
dcef3691ce050955343914695bacd8bf93c03af3
be00217a931009cd4f6821677946b48fd83ded16
refs/heads/master
2023-01-22T15:18:51.054985
2020-11-19T21:47:09
2020-11-19T21:47:09
311,048,605
1
0
null
null
null
null
UTF-8
R
false
true
792
rd
edit_collection_folder.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Collection.R \name{edit_collection_folder} \alias{edit_collection_folder} \title{Edit a folder’s metadata.} \usage{ edit_collection_folder( username, folder_id token, name = "" ) } \arguments{ \item{username}{String containing a valid username} \item{folder_id}{Valid identifier for a folder} \item{token}{Token object obtained from authorize() or a string containing your personal access token} \item{name}{(optional) Name for the folder} } \description{ Edit a folder’s metadata. } \details{ Folders 0 and 1 cannot be renamed. Authentication as the collection owner is required. } \examples{ token <- authorize("key", "secret") edit_collection_folder("username", 3, token, name = "new_folder_name") }
4fff7228ea80e76c6e081d5bc5912c38599df46e
d7d100225fe95a58431b89ae2e48126f2589dbdb
/hello.R
7d49c4f26165a6d858aea88ddd3bfd645422054d
[]
no_license
shubhamkalra27/datasciencecoursera
8253ce1e1b29d5bce71193e62cb148767e4b92fc
a37e00a905a3f2c8157e143dc29307673b100903
refs/heads/master
2021-01-10T03:20:41.688090
2015-11-04T15:01:00
2015-11-04T15:01:00
45,500,079
0
0
null
null
null
null
UTF-8
R
false
false
8,662
r
hello.R
# Topic: R Sessions - MSU # Purpose: Introduce basic concepts of R # Session 1 # Set working directory setwd("D:/MSU/R/Working") # Get the location of the working directory getwd() # Read a data file into R # Read a csv file trans <- read.csv("TransactionMaster.csv") cust <- read.csv("CustomerMaster.csv") # Read files using the read.table command trans_1 <- read.table("TransactionMaster.csv", header=FALSE, sep=",", stringsAsFactors=FALSE, na.strings = TRUE) # Create different types of data # vectors/array a <- c(1,3,4,6) a b <- c("LedZep","Floyd","Who?","Doors") b 1:16 # Matrix mat1 <- matrix(c(1:16), ncol=4, byrow=TRUE, dimnames = list(c("row1", "row2","row3","row4"), b)) mat1 # list list1 <- list(a,mat1) list1 # dataframe data1 <- data.frame(a,b) data2 <- data.frame(BandName = b, Rank=a) data1 data2 # Get basic statistics about the data summary(trans) head(trans) str(trans) colnames(cust) # Accessing elements from R objects a a[3] list1[1] # Access Columns data2[2] data2[,2] # What's the difference in these 2 methods? cust["Branch_Num"] trans$System_Period # Access rows data2[2,] trans[1223,] # Subset based on rows and columns cust[c(1,2,3,4),c(3,4,5)] # Session 2 # Basic functions in R # Dimensions of dataframes nrow(trans) ncol(cust) # Identifying unique entries unique(trans$Branch_Number) # Counting the number of entries length(trans$Invoice_Date) length(unique(trans$Branch_Number)) # 'Which' function which(cust$City == 'ATLANTA') which(cust$City == 'NEW YORK') # Subset the dataset based on the 'which' function Atlanta_cust <- cust[which(cust$City == 'ATLANTA'),] # Subset function Atlanta_cust_1 <- subset(cust, cust$City == 'ATLANTA') # Text manipulation functions # Find function grep("CARTUM", cust$Customer_Name) which(cust$Customer_Name == 'CARTUM') # Why we use grep when 'which' function is available cartum_cust <- cust[grep("CARTUM", cust$Customer_Name),] # Find and Replace gsub("-","",cust$Phone_Number) cust$Phone_Number <- gsub("-","",cust$Phone_Number) # Check out regexpr function # Concatenation function cust$Full_Name <- paste(cust$Contact_Name_First, cust$Contact_Name_Last,sep=" ") # Sorting data order(a) order(-trans$Sales_Amount) trans_sorted <- trans[order(-trans$Sales_Amount),] # Merging data # Inner Join inner <- merge(trans,cust,by.x="Customer_Number", by.y="Customer_Number", all=FALSE) # Outer Join outer <- merge(trans,cust,by.x="Customer_Number", by.y="Customer_Number", all=TRUE) # Left Join left <- merge(trans,cust,by.x="Customer_Number", by.y="Customer_Number", all.x=TRUE) # Right Join right <- merge(trans,cust,by.x="Customer_Number", by.y="Customer_Number", all.y=TRUE) # Session 3 # Reading Date formats: # Reference: http://www.statmethods.net/input/dates.html inv_date <- as.Date(trans$Invoice_Date, format = c("%d-%b-%y")) inv_date # Test on real data trans$Invoice_Date <- as.Date(trans$Invoice_Date, format = c("%d-%b-%y")) trans$Service_Date <- as.Date(trans$Service_Date, format = c("%d-%b-%y")) # Get the system time Sys.time() which(trans$Invoice_Date < trans$Service_Date) # Read up on as.POSIXct and as.POSIXlt. When and why are they useful?? # Extract time related values format(trans$Invoice_Date, "%d") max(format(trans$Invoice_Date, "%y")) max(format(trans$Service_Date, "%Y")) trans$Service_Date max(trans$Service_Date) # Subset based on year trans_sub <- subset(trans, as.numeric(format(trans$Invoice_Date,"%m")) %in% c(1,2,3)) # Aggregate function trans_agg <- aggregate(trans$Sales_Amount, by=list(trans$Branch_Number), FUN=sum) trans_agg <- aggregate(trans["Sales_Amount"], by=list(trans[,"Branch_Number"]), FUN=sum) trans_agg_1 <- aggregate(trans$Sales_Amount ~ trans$Branch_Number, FUN=max) trans_agg_2 <- aggregate(trans$Sales_Amount ~ trans$Branch_Number + trans$Product_Number, FUN = sum) # Let's say i want to do 'Max - Min' when aggregating, how do i do that? # Flow Control x <- runif(100,min=100, max=10000) measure <- "max" if(measure == "median" ) { print(median(x)) } else if (measure == "mean") { print(mean(x)) } else { print("Wrong Input") } ## Include apply family # SQLDF in R install.packages("sqldf") library(sqldf) df <- sqldf("select distinct Product_Number from trans") sqldf("select distinct Customer_Number as Customers from cust where State = 'FL'") leftjoin <- sqldf("select a.*, b.* from cust as a left join trans as b on a.Customer_Number = b.Customer_Number") innerjoin <- sqldf("select a.Customer_Number, a.City, b.Sales_Amount from cust as a inner join trans as b on a.Customer_Number = b.Customer_Number") SalesByCustomer <- sqldf("select Customer_Number as Customer, sum(Sales_Amount) as Total_Sales from innerjoin group by Customer_Number") # Session 4 # Custom Functions in R oddcount <- function(x) { k <- 0 ## Assign the value 0 to k for (n in x) { ## Start a FOR loop for every element in x if (n %% 2 == 1) k <- k + 1 ## %% is a modulo operator } return(k) } oddcount(c(1,2,3,5,7,9,14)) # For-Loops for ( i in 1:5) { print(i) } # While-Loop i <- 0 while (i < 10) { print(i) i <- i + 1 } # Basic plots and charts in R # Good reference site: http://www.harding.edu/fmccown/r/ # Read in world bank dataset world <- read.csv("worldbank.csv") summary(world) # Subset the dataset attach(world) # Plotting # Histogram hist(life_expectancy) hist(infant_mortality_rate , breaks=10, main = "Infant Mortality rate", xlab = "Infant MR") plot(density(infant_mortality_rate)) # Scatterplots # Test the hypothesis - 'Higher the life expectancy, lower the infant mortality' plot(life_expectancy, infant_mortality_rate) plot(life_expectancy, infant_mortality_rate, main = "Hypothesis Test", xlab = "Life Expectancy", ylab = "Infant mortality rate", col = "blue", pch = 20) pairs(~life_expectancy + infant_mortality_rate + birth_rate ) pairs(world) # Look at 'Pairs' function # Sample Bar plot # Subset the data world_subset <- subset(world, country_name %in% c("Australia", "India", "Mexico", "Bulgaria", "Finland", "Uruguay")) detach(world) # Bar and Pie charts attach(world_subset) barplot(energy_use_percapita, main = "Energy per capita", xlab = "Country", ylab = "Consumption",names.arg= country_name) barplot(energy_use_percapita, main = "Energy per capita", xlab = "Country", ylab = "Consumption", col=rainbow(length(country_name)), legend = country_name) # Sample Pie plot pie(x=fertility_rate, col = rainbow(length(country_name)), label = paste(country_name, fertility_rate, sep = "-"), main = "Fertility rate") detach(world_subset) # Session 5 # Misc operations and Mathematical functions # Missing values is.na(world) which(is.na(world)) # What's the difference between NA and Null?? world_2 <- na.omit(world) # Quick test 1: Create a dataset removing all NA's world_subset_2 <- world[which(!is.na(world$energy_use_percapita)),] world <- world[-1,] # Quick test 2: Calculate the % of NA values in each column in the dataset 'world' per <- function(x) { k <- (length(which(is.na(x)))*100/length(x)) k <- round(k, digits = 2) k <- paste(k,"%", sep = "") } out <- sapply(world[,2:9], per) out # Tables table(as.factor(cust$City), as.factor(cust$Customer_Number)) # Correlations attach(world) cor(energy_use_percapita, gni_per_capita) cor(world$energy_use_percapita, world$gni_per_capita,use="pairwise.complete", method = "spearman") # Quantile subsets quantile(energy_use_percapita, probs = c(0.05,0.95), na.rm=T) # Small mathematics # mean mean(birth_rate) # standard deviation sd(birth_rate) # Sampling # From normal distribution rnorm(10, mean = 10, sd = 22) # From t-distribution rt(100, df=2,ncp=23) set.seed(2123) rnorm(10, mean = 10, sd = 22) # Create your own dataset with your employee number set.seed(3547) c1 <- c("India","Pakistan","Sri Lanka", "Bangladesh") c2 <- rnorm(4, mean = 200, sd = 50) c3 <- rnorm(4, mean = 5, sd = 2) c4 <- rbeta(4, 1,2) asia <- data.frame(Country = c1, Avg_team_score = c2, Avg_team_Wickets = c3, Stat = c4) asia a <- na.omit(world[2:6]) b <- cor(a[1],a[2:5]) max(b) colnames(b)[order(-b)[which(b[1,] %in% b[1,order(-b)[1:4]])]] order(-b)[1:2] which(b = max(b))
869eb541eaebc734b13141f070d9705727f4696e
4840eb138586354c1d12adc6a77367cfaffb4d8e
/WebCrawler/csx/Debug/ServiceDefinition.rd
313fa4076ce0209d8d684f7c6f93449c55ca4e26
[]
no_license
rhenvar/WebCrawler
c1a42d75815f18df017604995c5ffff6786c8a8f
108f1012721849f37dadeb7359672b10c1b76b90
refs/heads/master
2016-09-13T13:17:19.918077
2016-05-16T22:22:56
2016-05-16T22:22:56
58,058,898
0
0
null
null
null
null
UTF-8
R
false
false
8,987
rd
ServiceDefinition.rd
<?xml version="1.0" encoding="utf-8"?> <serviceModel xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" name="WebCrawler" generation="1" functional="0" release="0" Id="7ec79cca-40a9-4d07-bb1c-a46cb37add4f" dslVersion="1.2.0.0" xmlns="http://schemas.microsoft.com/dsltools/RDSM"> <groups> <group name="WebCrawlerGroup" generation="1" functional="0" release="0"> <componentports> <inPort name="CrawlerWebRole:Endpoint1" protocol="http"> <inToChannel> <lBChannelMoniker name="/WebCrawler/WebCrawlerGroup/LB:CrawlerWebRole:Endpoint1" /> </inToChannel> </inPort> </componentports> <settings> <aCS name="CrawlerWebRole:APPINSIGHTS_INSTRUMENTATIONKEY" defaultValue=""> <maps> <mapMoniker name="/WebCrawler/WebCrawlerGroup/MapCrawlerWebRole:APPINSIGHTS_INSTRUMENTATIONKEY" /> </maps> </aCS> <aCS name="CrawlerWebRole:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" defaultValue=""> <maps> <mapMoniker name="/WebCrawler/WebCrawlerGroup/MapCrawlerWebRole:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" /> </maps> </aCS> <aCS name="CrawlerWebRoleInstances" defaultValue="[1,1,1]"> <maps> <mapMoniker name="/WebCrawler/WebCrawlerGroup/MapCrawlerWebRoleInstances" /> </maps> </aCS> <aCS name="CrawlerWorkerRole:APPINSIGHTS_INSTRUMENTATIONKEY" defaultValue=""> <maps> <mapMoniker name="/WebCrawler/WebCrawlerGroup/MapCrawlerWorkerRole:APPINSIGHTS_INSTRUMENTATIONKEY" /> </maps> </aCS> <aCS name="CrawlerWorkerRole:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" defaultValue=""> <maps> <mapMoniker name="/WebCrawler/WebCrawlerGroup/MapCrawlerWorkerRole:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" /> </maps> </aCS> <aCS name="CrawlerWorkerRoleInstances" defaultValue="[1,1,1]"> <maps> <mapMoniker name="/WebCrawler/WebCrawlerGroup/MapCrawlerWorkerRoleInstances" /> </maps> </aCS> </settings> <channels> <sFSwitchChannel name="IE:CrawlerWorkerRole:WorkerEndpoint"> <toPorts> <inPortMoniker name="/WebCrawler/WebCrawlerGroup/CrawlerWorkerRole/WorkerEndpoint" /> </toPorts> </sFSwitchChannel> <lBChannel name="LB:CrawlerWebRole:Endpoint1"> <toPorts> <inPortMoniker name="/WebCrawler/WebCrawlerGroup/CrawlerWebRole/Endpoint1" /> </toPorts> </lBChannel> </channels> <maps> <map name="MapCrawlerWebRole:APPINSIGHTS_INSTRUMENTATIONKEY" kind="Identity"> <setting> <aCSMoniker name="/WebCrawler/WebCrawlerGroup/CrawlerWebRole/APPINSIGHTS_INSTRUMENTATIONKEY" /> </setting> </map> <map name="MapCrawlerWebRole:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" kind="Identity"> <setting> <aCSMoniker name="/WebCrawler/WebCrawlerGroup/CrawlerWebRole/Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" /> </setting> </map> <map name="MapCrawlerWebRoleInstances" kind="Identity"> <setting> <sCSPolicyIDMoniker name="/WebCrawler/WebCrawlerGroup/CrawlerWebRoleInstances" /> </setting> </map> <map name="MapCrawlerWorkerRole:APPINSIGHTS_INSTRUMENTATIONKEY" kind="Identity"> <setting> <aCSMoniker name="/WebCrawler/WebCrawlerGroup/CrawlerWorkerRole/APPINSIGHTS_INSTRUMENTATIONKEY" /> </setting> </map> <map name="MapCrawlerWorkerRole:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" kind="Identity"> <setting> <aCSMoniker name="/WebCrawler/WebCrawlerGroup/CrawlerWorkerRole/Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" /> </setting> </map> <map name="MapCrawlerWorkerRoleInstances" kind="Identity"> <setting> <sCSPolicyIDMoniker name="/WebCrawler/WebCrawlerGroup/CrawlerWorkerRoleInstances" /> </setting> </map> </maps> <components> <groupHascomponents> <role name="CrawlerWebRole" generation="1" functional="0" release="0" software="C:\Users\iGuest\Source\Repos\WebCrawler\WebCrawler\csx\Debug\roles\CrawlerWebRole" entryPoint="base\x64\WaHostBootstrapper.exe" parameters="base\x64\WaIISHost.exe " memIndex="-1" hostingEnvironment="frontendadmin" hostingEnvironmentVersion="2"> <componentports> <inPort name="Endpoint1" protocol="http" portRanges="80" /> </componentports> <settings> <aCS name="APPINSIGHTS_INSTRUMENTATIONKEY" defaultValue="" /> <aCS name="Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" defaultValue="" /> <aCS name="__ModelData" defaultValue="&lt;m role=&quot;CrawlerWebRole&quot; xmlns=&quot;urn:azure:m:v1&quot;&gt;&lt;r name=&quot;CrawlerWebRole&quot;&gt;&lt;e name=&quot;Endpoint1&quot; /&gt;&lt;/r&gt;&lt;r name=&quot;CrawlerWorkerRole&quot;&gt;&lt;e name=&quot;WorkerEndpoint&quot; /&gt;&lt;/r&gt;&lt;/m&gt;" /> </settings> <resourcereferences> <resourceReference name="DiagnosticStore" defaultAmount="[4096,4096,4096]" defaultSticky="true" kind="Directory" /> <resourceReference name="EventStore" defaultAmount="[1000,1000,1000]" defaultSticky="false" kind="LogStore" /> </resourcereferences> </role> <sCSPolicy> <sCSPolicyIDMoniker name="/WebCrawler/WebCrawlerGroup/CrawlerWebRoleInstances" /> <sCSPolicyUpdateDomainMoniker name="/WebCrawler/WebCrawlerGroup/CrawlerWebRoleUpgradeDomains" /> <sCSPolicyFaultDomainMoniker name="/WebCrawler/WebCrawlerGroup/CrawlerWebRoleFaultDomains" /> </sCSPolicy> </groupHascomponents> <groupHascomponents> <role name="CrawlerWorkerRole" generation="1" functional="0" release="0" software="C:\Users\iGuest\Source\Repos\WebCrawler\WebCrawler\csx\Debug\roles\CrawlerWorkerRole" entryPoint="base\x64\WaHostBootstrapper.exe" parameters="base\x64\WaWorkerHost.exe " memIndex="-1" hostingEnvironment="consoleroleadmin" hostingEnvironmentVersion="2"> <settings> <aCS name="APPINSIGHTS_INSTRUMENTATIONKEY" defaultValue="" /> <aCS name="Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" defaultValue="" /> <aCS name="__ModelData" defaultValue="&lt;m role=&quot;CrawlerWorkerRole&quot; xmlns=&quot;urn:azure:m:v1&quot;&gt;&lt;r name=&quot;CrawlerWebRole&quot;&gt;&lt;e name=&quot;Endpoint1&quot; /&gt;&lt;/r&gt;&lt;r name=&quot;CrawlerWorkerRole&quot;&gt;&lt;e name=&quot;WorkerEndpoint&quot; /&gt;&lt;/r&gt;&lt;/m&gt;" /> </settings> <resourcereferences> <resourceReference name="DiagnosticStore" defaultAmount="[4096,4096,4096]" defaultSticky="true" kind="Directory" /> <resourceReference name="EventStore" defaultAmount="[1000,1000,1000]" defaultSticky="false" kind="LogStore" /> </resourcereferences> </role> <sCSPolicy> <sCSPolicyIDMoniker name="/WebCrawler/WebCrawlerGroup/CrawlerWorkerRoleInstances" /> <sCSPolicyUpdateDomainMoniker name="/WebCrawler/WebCrawlerGroup/CrawlerWorkerRoleUpgradeDomains" /> <sCSPolicyFaultDomainMoniker name="/WebCrawler/WebCrawlerGroup/CrawlerWorkerRoleFaultDomains" /> </sCSPolicy> </groupHascomponents> </components> <sCSPolicy> <sCSPolicyUpdateDomain name="CrawlerWebRoleUpgradeDomains" defaultPolicy="[5,5,5]" /> <sCSPolicyUpdateDomain name="CrawlerWorkerRoleUpgradeDomains" defaultPolicy="[5,5,5]" /> <sCSPolicyFaultDomain name="CrawlerWebRoleFaultDomains" defaultPolicy="[2,2,2]" /> <sCSPolicyFaultDomain name="CrawlerWorkerRoleFaultDomains" defaultPolicy="[2,2,2]" /> <sCSPolicyID name="CrawlerWebRoleInstances" defaultPolicy="[1,1,1]" /> <sCSPolicyID name="CrawlerWorkerRoleInstances" defaultPolicy="[1,1,1]" /> </sCSPolicy> </group> </groups> <implements> <implementation Id="40abb8c0-0966-43ea-bde0-8a60f8102ecc" ref="Microsoft.RedDog.Contract\ServiceContract\WebCrawlerContract@ServiceDefinition"> <interfacereferences> <interfaceReference Id="bd17416d-0cae-4ebe-828b-60c322f69850" ref="Microsoft.RedDog.Contract\Interface\CrawlerWebRole:Endpoint1@ServiceDefinition"> <inPort> <inPortMoniker name="/WebCrawler/WebCrawlerGroup/CrawlerWebRole:Endpoint1" /> </inPort> </interfaceReference> </interfacereferences> </implementation> </implements> </serviceModel>
12ef18f55b43fb00388363716556fae1ca98fc8f
119b181488acae0e7d49a5d35ee7decf527ebe44
/man/getDisaggCommodityPercentages.Rd
a1b7b7eae29c1bd16f31c83d138df6b9012a448f
[ "MIT" ]
permissive
USEPA/useeior
0d46f1ca9ca1756e1760b153be620a234fddda03
169ae5a16c4e367a3c39ceabff3c85f0b4e187a1
refs/heads/master
2023-08-06T19:03:28.121338
2023-07-14T18:39:13
2023-07-14T18:39:13
221,473,707
30
24
MIT
2023-09-06T15:47:55
2019-11-13T14:07:05
R
UTF-8
R
false
true
627
rd
getDisaggCommodityPercentages.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/DisaggregateFunctions.R \name{getDisaggCommodityPercentages} \alias{getDisaggCommodityPercentages} \title{Obtain default disaggregation percentages for commodities from the disaggregation input files.} \usage{ getDisaggCommodityPercentages(disagg) } \arguments{ \item{disagg}{Specifications for disaggregating the current Model} } \value{ A dataframe with the default disaggregation percentages for the Commodities of the current model } \description{ Obtain default disaggregation percentages for commodities from the disaggregation input files. }
b002965ba1dabb7ccfc4fc65e96d03308d0fbbc6
48c55aad7dcb6196d98be4d13878d2617509e404
/shiny/ui.R
1eceb2260969536e1273d6faa978e31291c1388d
[ "MIT" ]
permissive
majbc1999/APPR-2019-20
234cd5c06bdc24a62b0e7e95dbd8940061a6b1e3
3a75dc1f3ccdff4565250ad8769da57bedc68434
refs/heads/master
2020-09-10T03:44:06.745219
2020-08-07T16:07:23
2020-08-07T16:07:23
221,640,084
1
0
MIT
2019-11-28T13:50:09
2019-11-14T07:45:22
R
UTF-8
R
false
false
126
r
ui.R
library(shiny) shinyUI(fluidPage( titlePanel("Obnovljivi viri po svetu"), DT::dataTableOutput("TD_world_obnovljivi")) )
3f962ec2767e918b78dc6125220cd551ccec7bd5
bbf8cd2c300eb3c7f5c18dda9855969dc537ebfc
/R/estimate.R
f615655f7c2b353e278e3cc18a02e55088bdf607
[]
no_license
Gootjes/semlj
11eb07aa4f6b945f4acd91fe395da66260a56477
e9bbcc305b52d4e043937d27aca1656b4426ed8e
refs/heads/main
2023-05-08T09:46:35.041344
2021-05-24T16:05:26
2021-05-24T16:05:26
null
0
0
null
null
null
null
UTF-8
R
false
false
8,410
r
estimate.R
## This class takes care of estimating the model and return the results. It inherit from Syntax, and define the same tables ## defined by Syntax, but it fill them with the results. Estimate <- R6::R6Class("Estimate", inherit = Syntax, cloneable=FALSE, class=FALSE, list( model=NULL, tab_fit=NULL, tab_fitindices=NULL, ciwidth=NULL, tab_constfit=NULL, initialize=function(options,datamatic) { super$initialize(options=options,datamatic=datamatic) self$ciwidth<-options$ciWidth/100 }, estimate=function(data) { ## prepare the options based on Syntax definitions lavoptions<-list(model = private$.lav_structure, data = data, se=self$options$se, bootstrap=self$options$bootN, estimator=self$options$estimator ) if (is.something(self$multigroup)) { lavoptions[["group"]]<-self$multigroup$var lavoptions[["group.label"]]<-self$multigroup$levels } if (self$options$estimator=="ML") { lavoptions[["likelihood"]]<-self$options$likelihood } ## estimate the models results<-try_hard({do.call(lavaan::lavaan,lavoptions) }) self$warnings<-list(topic="info",message=results$warning) self$errors<-results$error if (is.something(self$errors)) return(self$errors) ## ask for the paramters estimates self$model<-results$obj .lav_params<-lavaan::parameterestimates( self$model, ci=self$options$ci, standardized = T, level = self$ciwidth, boot.ci.type = self$options$bootci ) ## we need some info initialized by Syntax regarding the parameters properties .lav_structure<-private$.lav_structure sel<-grep("==|<|>",.lav_structure$op,invert = T) .lav_structure<-.lav_structure[sel,] ## make some change to render the results .lav_params$free<-(.lav_structure$free>0) ## collect regression coefficients table self$tab_coefficients<-.lav_params[.lav_params$op=="~",] ## collect loadings table self$tab_loadings<-.lav_params[.lav_params$op=="=~",] ## collect variances and covariances table self$tab_covariances<-.lav_params[.lav_params$op=="~~",] ## collect defined parameters table self$tab_defined<-.lav_params[.lav_params$op==":=",] if (nrow(self$tab_defined)==0) self$tab_defined<-NULL tab<-self$tab_covariances ### collect intercepts self$tab_intercepts<-.lav_params[.lav_params$op=="~1",] if (nrow(self$tab_intercepts)==0) self$tab_intercepts<-NULL #### fit tests ### alist<-list() ff<-lavaan::fitmeasures(self$model) alist<-list() if (ff[["df"]]>0) alist[[1]]<-list(label="User Model",chisq=ff[["chisq"]],df=ff[["df"]],pvalue=ff[["pvalue"]]) try(alist[[length(alist)+1]]<-list(label="Baseline Model",chisq=ff[["baseline.chisq"]],df=ff[["baseline.df"]],pvalue=ff[["baseline.pvalue"]])) self$tab_fitindices<-as.list(ff) self$tab_fit<-alist # fit indices alist<-list() alist[[length(alist)+1]]<-c(info="Estimation Method",value=self$model@Options$estimator) alist[[length(alist)+1]]<-c(info="Number of observations",value=lavaan::lavInspect(self$model,"ntotal")) alist[[length(alist)+1]]<-c(info="Free parameters",value=self$model@Fit@npar) alist[[length(alist)+1]]<-c(info="Converged",value=self$model@Fit@converged) alist[[length(alist)+1]]<-c(info="",value="") try(alist[[length(alist)+1]]<-c(info="Loglikelihood user model",value=round(ff[["logl"]],digits=3) )) try(alist[[length(alist)+1]]<-c(info="Loglikelihood unrestricted model",value=round(ff[["unrestricted.logl"]],digits=3))) alist[[length(alist)+1]]<-c(info="",value="") self$tab_info<-alist if (is.something(self$tab_constfit)) { check<-sapply(self$tab_constfit$op,function(con) length(grep("<|>",con))>0,simplify = T) if (any(check)) { self$warnings<-list(topic="constraints",message=WARNS[["scoreineq"]]) } else { tab<-lavaan::lavTestScore(self$model, univariate = self$options$scoretest, cumulative = self$options$cumscoretest) if (self$options$scoretest) { names(tab$uni)<-c("lhs","op","rhs","chisq","df","pvalue") self$tab_constfit<-tab$uni self$tab_constfit$type="Univariate" } if (self$options$cumscoretest) { names(tab$cumulative)<-c("lhs","op","rhs","chisq","df","pvalue") tab$cumulative$type<-"Cumulative" self$tab_constfit<-rbind(self$tab_constfit,tab$cumulative) } self$tab_fit[[length(self$tab_fit)+1]]<-list(label="Constraints Score Test", chisq=tab$test$X2, df=tab$test$df, pvalue=tab$test$p.value) } } # end of checking constraints ginfo("Estimation is done...") } # end of private function estimate ) # end of private ) # end of class
266d159d99c5846e73735ceef49cf1efbfa08ed4
14b088d7a841ea2391a3c85626eacc00410603e8
/src/mitX_theAnalyticsEdge/mitX-15.071x-analyticEdge_unit5_recitation.R
c794b0eb773aac18bf444b24ceef470c33f0219b
[ "MIT" ]
permissive
pparacch/PlayingWithDataScience
f634080bee7b93fbc9071469b043996db72da061
5a753edde6a479cc3ee797bd30cfc88317557bda
refs/heads/master
2020-04-15T23:46:49.848710
2017-04-07T13:48:30
2017-04-07T13:48:30
28,199,608
0
0
null
null
null
null
UTF-8
R
false
false
16,951
r
mitX-15.071x-analyticEdge_unit5_recitation.R
# Let's begin by creating a data frame called emails # using the read.csv function. # And loading up energy_bids.csv. # # And as always, in the text analytics week, # we're going to pass stringsAsFactors=FALSE to this # function. # So we can take a look at the structure of our new data frame # using the str function. # Load the dataset emails = read.csv("energy_bids.csv", stringsAsFactors=FALSE) # We can see that there are 855 observations. # This means we have 855 labeled emails in the data set. # And for each one we have the text of the email # and whether or not it's responsive to our query # about energy schedules and bids. str(emails) # So let's take a look at a few example emails in the data set, # starting with the first one. # So the first email can be accessed with emails$email[1], # and we'll select the first one. # Note use the strwrap function and pass it the long string you # want to print out, in this case emails$email. # Now we can see that this has broken down our long string # into multiple shorter lines that are much easier to read. # Look at emails # So let's take a look now at this email, # now that it's a lot easier to read. # We can see just by parsing through the first couple # of lines that this is an email that's # talking about a new working paper, # "The Environmental Challenges and Opportunities # in the Evolving North American Electricity Market" # is the name of the paper. # And it's being released by the Commission # for Environmental Cooperation, or CEC. # So while this certainly deals with electricity markets, # it doesn't have to do with energy schedules or bids. # So it is not responsive to our query. # So we can take a look at the value in the responsive # variable for this email using emails$responsive and selecting # the first one. # And we have value 0 there. emails$email[1] strwrap(emails$email[1]) emails$responsive[1] # And scrolling up to the top here we can # see that the original message is actually very short, # it just says FYI (For Your Information), # and most of it is a forwarded message. # So we have all the people who originally # received the message. # And then down at the very bottom is the message itself. # "Attached is my report prepared on behalf of the California # State auditor." # And there's an attached report, ca report new.pdf. # Now our data set contains just the text of the emails # and not the text of the attachments. # But it turns out, as we might expect, # that this attachment had to do with Enron's electricity bids # in California, # and therefore it is responsive to our query. emails$email[2] strwrap(emails$email[2]) emails$responsive[2] # So now let's look at the breakdown # of the number of emails that are responsive to our query using # the table function. # We're going to pass it emails$responsive. # # And as we can see the data set is unbalanced, # with a relatively small proportion of emails responsive # to the query. # And this is typical in predictive coding problems. # Responsive emails table(emails$responsive) # Now it's time to construct and preprocess the corpus. # So we'll start by loading the tm package with library(tm). # Load tm package install.packages("tm") library(tm) # Then we also need to install the package SnowballC. # This package helps us use the tm package. # And go ahead and load the snowball package as well. install.packages("SnowballC") library(SnowballC) # we'll construct a variable called corpus using the Corpus # and VectorSource functions and passing in all the emails # in our data set, which is emails$email. # Create corpus corpus = Corpus(VectorSource(emails$email)) # So now that we've constructed the corpus, # we can output the first email in the corpus. # We'll start out by calling the strwrap function to get it # on multiple lines, and then we can select the first element # in the corpus using the double square bracket notation # and selecting element 1. strwrap(corpus[[1]]) # And we can see that this is exactly # the same email that we saw originally, # the email about the working paper. # So now we're ready to preprocess the corpus using the tm_map # function. # So first, we'll convert the corpus # to lowercase using tm_map and the tolower function. # So we'll have corpus = tm_map(corpus, tolower). # Pre-process data corpus = tm_map(corpus, tolower) strwrap(corpus[[1]]) # IMPORTANT NOTE: If you are using the latest version of the tm package, # you will need to run the following line before continuing (it converts corpus to a Plain Text Document). # This is a recent change having to do with the tolower function that occurred after this video # was recorded. corpus = tm_map(corpus, PlainTextDocument) strwrap(corpus[[1]]) # And then we'll do the exact same thing except removing # punctuation, so we'll have corpus = tm_map(corpus, # removePunctuation). corpus = tm_map(corpus, removePunctuation) strwrap(corpus[[1]]) # We'll remove the stop words with removeWords function # and we'll pass along the stop words of the English language # as the words we want to remove. corpus = tm_map(corpus, removeWords, stopwords("english")) strwrap(corpus[[1]]) # And lastly, we're going to stem the document. # So corpus = tm_map(corpus, stemDocument). corpus = tm_map(corpus, stemDocument) # And now that we've gone through those four preprocessing steps, # we can take a second look at the first email in the corpus. # So again, call strwrap(corpus[[1]]). strwrap(corpus[[1]]) # And now it looks quite a bit different. # We can come up to the top here. # It's a lot harder to read now that we removed # all the stop words and punctuation and word stems, # but now the emails in this corpus # are ready for our machine learning algorithms. # BAG OF WORDS # Now let's build the document-term matrix # for our corpus. # So we'll create a variable called # dtm that contains the DocumentTermMatrix(corpus). # Create matrix dtm = DocumentTermMatrix(corpus) dtm # The corpus has already had all the pre-processing run on it. # So to get the summary statistics about the document-term matrix, # we'll just type in the name of our variable, dtm. # And what we can see is that even though we # have only 855 emails in the corpus, # we have over 22,000 terms that showed up at least once, # which is clearly too many variables # for the number of observations we have. # So we want to remove the terms that # don't appear too often in our data set, # and we'll do that using the removeSparseTerms function. # And we're going to have to determine the sparsity, # so we'll say that we'll remove any term that doesn't appear # in at least 3% of the documents. # To do that, we'll pass 0.97 to removeSparseTerms. # Remove sparse terms dtm = removeSparseTerms(dtm, 0.97) dtm # Now we can take a look at the summary statistics # for the document-term matrix, and we # can see that we've decreased the number of terms # to 788, which is a much more reasonable number. # So let's build a data frame called labeledTerms out # of this document-term matrix. # So to do this, we'll use as.data.frame # of as.matrix applied to dtm, the document-term matrix. # So this data frame is only including right now # the frequencies of the words that appeared in at least 3% # of the documents, # Create data frame labeledTerms = as.data.frame(as.matrix(dtm)) # But in order to run our text analytics # models, we're also going to have the outcome variable, which # is whether or not each email was responsive. # So we need to add in this outcome variable. # So we'll create labeledTerms$responsive, # and we'll simply copy over the responsive variable from # the original emails data frame so it's equal # to emails$responsive. # Add in the outcome variable labeledTerms$responsive = emails$responsive # So finally let's take a look at our newly constructed data # frame with the str function. # # So as we expect, there are an awful lot of variables, 789 in total. # 788 of those variables are the frequencies # of various words in the emails, and the last one is responsive, # the outcome variable. str(labeledTerms) # At long last, we're ready to split our data into a training # and testing set, and to actually build a model. # So we'll start by loading the caTools package, # so that we can split our data. # So we'll do library(caTools). # Split the data library(caTools) # And then, as usual, we're going to set our random seed so # that everybody has the same results. # So use set.seed and we'll pick the number 144. # Again, the number isn't particularly important. # The important thing is that we all use the same one. # So as usual, we're going to obtain the split variable. # We'll call it spl, using the sample.split function. # The outcome variable that we pass is # labeledTerms$responsive. # And we'll do a 70/30 split. # So we'll pass 0.7 here. # So then train, the training data frame, # can be obtained using subset on the labeled terms where # spl is TRUE. # And test is the subset when spl is FALSE. set.seed(144) spl = sample.split(labeledTerms$responsive, 0.7) train = subset(labeledTerms, spl == TRUE) test = subset(labeledTerms, spl == FALSE) # So now we're ready to build the model. # And we'll build a simple CART model # using the default parameters. # But a random forest would be another good choice # from our toolset. # So we'll start by loading up the packages for the CART model. # We'll do library(rpart). # # And we'll also load up the rpart.plot package, so # that we can plot the outcome. # Build a CART model library(rpart) library(rpart.plot) # So we'll create a model called emailCART, # using the rpart function. # We're predicting responsive. # And we're predicting it using all # of the additional variables. # All the frequencies of the terms that are included. # Obviously tilde period is important here, # because there are 788 terms. # Way too many to actually type out. # The data that we're using to train the model # is just our training data frame, train. # And then the method is class, since we # have a classification problem here. emailCART = rpart(responsive~., data=train, method="class") # And once we've trained the CART model, # we can plot it out using prp. prp(emailCART) # So we can see at the very top is the word California. # If California appears at least twice in an email, # we're going to take the right part over here and predict # that a document is responsive. # It's somewhat unsurprising that California shows up, # because we know that Enron had a heavy involvement # in the California energy markets. # So further down the tree, we see a number of other terms # that we could plausibly expect to be related # to energy bids and energy scheduling, # like system, demand, bid, and gas. # Down here at the bottom is Jeff, which is perhaps # a reference to Enron's CEO, Jeff Skillings, who ended up # actually being jailed for his involvement # in the fraud at the company. # Now that we've trained a model, we # need to evaluate it on the test set. # So let's build an object called pred # that has the predicted probabilities # for each class from our CART model. # So we'll use predict of emailCART, our CART model, # passing it newdata=test, to get test set predicted # probabilities. # Make predictions on the test set pred = predict(emailCART, newdata=test) # So to recall the structure of pred, # we can look at the first 10 rows with pred[1:10,]. pred[1:10,] # So this is the rows we want. # We want all the columns. # So we'll just leave a comma and nothing else afterward. # So the left column here is the predicted probability # of the document being non-responsive. # And the right column is the predicted probability # of the document being responsive. # They sum to 1. # So in our case, we want to extract # the predicted probability of the document being responsive. # So we're looking for the rightmost column. # So we'll create an object called pred.prob. # And we'll select the rightmost or second column. # So pred.prob now contains our test set # predicted probabilities. pred.prob = pred[,2] # And we're interested in the accuracy # of our model on the test set. # So for this computation, we'll use a cutoff of 0.5. # And so we can just table the true outcome, # which is test$responsive against the predicted outcome, # which is pred.prob >= 0.5. # What we can see here is that in 195 cases, # we predict false when the left column and the true outcome # was zero, non-responsive. # So we were correct. # And in another 25, we correctly identified a responsive # document. # In 20 cases, we identified a document as responsive, # but it was actually non-responsive. # And in 17, the opposite happened. # We identified a document as non-responsive, # but it actually was responsive. # Compute accuracy table(test$responsive, pred.prob >= 0.5) (195+25)/(195+25+17+20) # So we have an accuracy in the test set of 85.6%. # And now we want to compare ourselves # to the accuracy of the baseline model. # As we've already established, the baseline model # is always going to predict the document is non-responsive. # So if we table test$responsive, we see that it's going to be # correct in 215 of the cases. # So then the accuracy is 215 divided # by the total number of test set observations. # So that's 83.7% accuracy. # So we see just a small improvement # in accuracy using the CART model, which, as we know, # is a common case in unbalanced data sets. # Baseline model accuracy table(test$responsive) 215/(215+42) # However, as in most document retrieval applications, # there are uneven costs for different types of errors here. # Typically, a human will still have to manually review # all of the predicted responsive documents # to make sure they are actually responsive. # Therefore, if we have a false positive, # in which a non-responsive document is labeled # as responsive, the mistake translates # to a bit of additional work in the manual review # process but no further harm, since the manual review process # will remove this erroneous result. # But on the other hand, if we have a false negative, # in which a responsive document is labeled as non-responsive # by our model, we will miss the document entirely # in our predictive coding process. # Therefore, we're going to assign a higher cost to false negatives # than to false positives, which makes this a good time to look # at other cut-offs on our ROC curve. # Now let's look at the ROC curve so we # can understand the performance of our model # at different cutoffs. # We'll first need to load the ROCR package # with a library(ROCR). # ROC curve library(ROCR) # Next, we'll build our ROCR prediction object. # So we'll call this object predROCR = # prediction(pred.prob, test$responsive). predROCR = prediction(pred.prob, test$responsive) # So now we want to plot the ROC curve # so we'll use the performance function to extract # the true positive rate and false positive rate. # So create something called perfROCR = # performance(predROCR, "tpr", "fpr"). perfROCR = performance(predROCR, "tpr", "fpr") # And then we'll plot(perfROCR, colorize=TRUE), # so that we can see the colors for the different cutoff # thresholds. plot(perfROCR, colorize=TRUE) # Now, of course, the best cutoff to select # is entirely dependent on the costs assigned by the decision # maker to false positives and true positives. # However, again, we do favor cutoffs # that give us a high sensitivity. # We want to identify a large number of the responsive # documents. # So something that might look promising # might be a point right around here, # in this part of the curve, where we # have a true positive rate of around 70%, # meaning that we're getting about 70% # of all the responsive documents, and a false positive rate # of about 20%, meaning that we're making mistakes # and accidentally identifying as responsive 20% # of the non-responsive documents. # Now, since, typically, the vast majority of documents # are non-responsive, operating at this cutoff # would result, perhaps, in a large decrease # in the amount of manual effort needed # in the eDiscovery process. # And we can see from the blue color # of the plot at this particular location # that we're looking at a threshold around maybe 0.15 # or so, significantly lower than 50%, which is definitely # what we would expect since we favor # false positives to false negatives. # So lastly, we can use the ROCR package # to compute our AUC value. # So, again, call the performance function # with our prediction object, this time extracting the AUC value # and just grabbing the y value slot of it. # We can see that we have an AUC in the test set of 79.4%, which # means that our model can differentiate # between a randomly selected responsive and non-responsive # document about 80% of the time. # Compute AUC performance(predROCR, "auc")@y.values
7bc86fdf36fda77a414ad5d8cf5f2a8e2d4b0476
9e758a1fd686a06c99eccf25e02bf736640531c7
/man/fetch_google_analytics_4.Rd
ca2421f651953552d0090e01271986beb6cde86f
[]
no_license
addixvietnam/googleAnalyticsR_v0.4.2
2873c59bd23c76a06deaa036d676e6675c275869
d60ff8f8c6f6748b1fc985b9b079ac6b4738f8b3
refs/heads/master
2020-08-26T12:54:05.693373
2019-10-23T09:28:49
2019-10-23T09:28:49
217,016,500
0
0
null
null
null
null
UTF-8
R
false
true
1,605
rd
fetch_google_analytics_4.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ga_v4_get.R \name{fetch_google_analytics_4} \alias{fetch_google_analytics_4} \title{Fetch multiple GAv4 requests} \usage{ fetch_google_analytics_4(request_list, merge = FALSE) } \arguments{ \item{request_list}{A list of requests created by \link{make_ga_4_req}} \item{merge}{If TRUE then will rbind that list of data.frames} } \value{ A dataframe if one request, or a list of data.frames if multiple. } \description{ Fetch the GAv4 requests as created by \link{make_ga_4_req} } \details{ For same viewId, daterange, segments, samplingLevel and cohortGroup, v4 batches can be made } \examples{ \dontrun{ library(googleAnalyticsR) ## authenticate, ## or use the RStudio Addin "Google API Auth" with analytics scopes set ga_auth() ## get your accounts account_list <- google_analytics_account_list() ## pick a profile with data to query ga_id <- account_list[23,'viewId'] ga_req1 <- make_ga_4_req(ga_id, date_range = c("2015-07-30","2015-10-01"), dimensions=c('source','medium'), metrics = c('sessions')) ga_req2 <- make_ga_4_req(ga_id, date_range = c("2015-07-30","2015-10-01"), dimensions=c('source','medium'), metrics = c('users')) fetch_google_analytics_4(list(ga_req1, ga_req2)) } } \seealso{ Other GAv4 fetch functions: \code{\link{fetch_google_analytics_4_slow}}, \code{\link{google_analytics_4}}, \code{\link{make_ga_4_req}} }
7c859ac50dc52f09af721099580c59203053e677
1d4c729a11381851e0b5c8578bf5cd7289fc082f
/man/xSimplifyNet.Rd
9b35bc23066e15d2b939c70e203bf9fa2a2f83bf
[]
no_license
hfang-bristol/XGR
95b484a0350e14ad59fa170ead902689a34be89a
7b947080b310363e2b82c24c82d3394335906f54
refs/heads/master
2023-02-05T12:35:24.074365
2023-01-28T05:49:33
2023-01-28T05:49:33
52,982,296
9
3
null
null
null
null
UTF-8
R
false
true
857
rd
xSimplifyNet.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/xSimplifyNet.r \name{xSimplifyNet} \alias{xSimplifyNet} \title{Function to simplify networks from an igraph object} \usage{ xSimplifyNet(g, verbose = TRUE) } \arguments{ \item{g}{an "igraph" object} \item{verbose}{logical to indicate whether the messages will be displayed in the screen. By default, it sets to true for display} } \value{ an object of class "igraph" } \description{ \code{xSimplifyNet} is supposed to simplify networks from an igraph object by keeping root-tip shortest paths only. } \note{ none } \examples{ \dontrun{ # Load the library library(XGR) } RData.location <- "http://galahad.well.ox.ac.uk/bigdata" \dontrun{ g <- xRDataLoader(RData.customised='ig.DO', RData.location=RData.location) ig <- xSimplifyNet(g) } } \seealso{ \code{\link{xSimplifyNet}} }
d9d9881e64be7764256e2e821c2bcb798685b871
60bb1cc368bfa719822a6d5c6d2d6c96770766cf
/data_cleaning_draft.R
eb8c657e1d5d32054b2fcbeceff2322912cbc68c
[]
no_license
markerenberg/TMDB-Box-Office-Prediction-Kaggle
db849ee8ed2736507058f8fb7920bcba6cf48230
dc511ffd33e9415cfb478a3e4d95e1735f762355
refs/heads/master
2020-04-29T22:09:21.665861
2019-04-15T15:14:10
2019-04-15T15:14:10
176,436,916
1
0
null
null
null
null
UTF-8
R
false
false
16,535
r
data_cleaning_draft.R
#------------------------------- #This is a draft code generally separating some json columns #Creating dummy variables based on common genere, production_companies etc. #Creating new variables such as size of cast, size of crew etc. #------------------------------- #Modified on 31 MAR 2019 #new column: ratio of size of each department to total size of crew #new column: ratio of male crew to (male+female) #new column: ratio of male cast to (male+female) library(data.table) library(plotly) library(ggplot2) library(dplyr) library(tidyr) library(tidyverse) library(stringi) library(lubridate) library(scales) library(DT) library(dplyr) library(stringr) library(jsonlite) library(randomForest) setwd("Desktop/ST4248/project") train_raw <- read.csv("train.csv",header = TRUE,stringsAsFactors = F) train_raw %>% glimpse() ##Cleaning of training data train <- train_raw %>% separate(belongs_to_collection, 'idPart', sep = 'name', remove = TRUE) %>% # Get the collection ID separate(release_date, c('releaseMonth', 'releaseDay', 'releaseYear'), sep = '/', remove = TRUE) %>% # Separate the release_date mutate(collectionID = ifelse(is.na(idPart) == FALSE, gsub("\\D", "", idPart), idPart), # Get digitis from collection collectionID = ifelse(is.na(collectionID) == TRUE, 0, collectionID), # If collection value is NA the movie is not part of collection mainSpokenLanguage = substr(spoken_languages,17,18), # This contains the ISO value for the first spoken language mainSpokenLanguage = ifelse(is.na(mainSpokenLanguage), 'NA', mainSpokenLanguage), spokenEn = ifelse(mainSpokenLanguage == 'en', TRUE, FALSE), # Hot vec for spoken language == en partOfCollection = ifelse(is.na(idPart) == FALSE, TRUE, FALSE), # Hot vec for is in collection hasHomePage = ifelse(is.na(homepage) == FALSE, TRUE, FALSE), # Hot vec for has homepage hasTagline = ifelse(is.na(tagline) == FALSE, TRUE, FALSE), # Hot vec for has tagline hasOverview = ifelse(is.na(overview) == FALSE, TRUE, FALSE), # Hot vec for has overview genres = ifelse(is.na(genres) == TRUE, 'NoGen', genres), # Hot vecs for the different genres genComedy = ifelse(stri_detect_fixed(genres, 'Comedy'),TRUE, FALSE), genDrama = ifelse(stri_detect_fixed(genres, 'Drama'),TRUE, FALSE), genThriller = ifelse(stri_detect_fixed(genres, 'Comedy'),TRUE, FALSE), genAction = ifelse(stri_detect_fixed(genres, 'Action'),TRUE, FALSE), genAnimation = ifelse(stri_detect_fixed(genres, 'Comedy'),TRUE, FALSE), genHorror = ifelse(stri_detect_fixed(genres, 'Horror'),TRUE, FALSE), genDocumentary = ifelse(stri_detect_fixed(genres, 'Documentary'),TRUE, FALSE), genAdventure = ifelse(stri_detect_fixed(genres, 'Adventure'),TRUE, FALSE), genCrime = ifelse(stri_detect_fixed(genres, 'Crime'),TRUE, FALSE), genMystery = ifelse(stri_detect_fixed(genres, 'Mystery'),TRUE, FALSE), genFantasy = ifelse(stri_detect_fixed(genres, 'Fantasy'),TRUE, FALSE), genWar = ifelse(stri_detect_fixed(genres, 'War'),TRUE, FALSE), genScienceFiction = ifelse(stri_detect_fixed(genres, 'Science Fiction'),TRUE, FALSE), genRomance = ifelse(stri_detect_fixed(genres, 'Romance'),TRUE, FALSE), genMusic = ifelse(stri_detect_fixed(genres, 'Music'),TRUE, FALSE), genWestern = ifelse(stri_detect_fixed(genres, 'Western'),TRUE, FALSE), genFamily = ifelse(stri_detect_fixed(genres, 'Family'),TRUE, FALSE), genHistory = ifelse(stri_detect_fixed(genres, 'Comedy'),TRUE, FALSE), genForeign = ifelse(stri_detect_fixed(genres, 'Foreign'),TRUE, FALSE), genTVMovie = ifelse(stri_detect_fixed(genres, 'TV Movie'),TRUE, FALSE), genNoGen = ifelse(genres == 'NoGen', TRUE, FALSE), production_companies = ifelse(is.na(production_companies) == TRUE, 'NoProd', production_companies), # Hot vecs for the most popular production companies prodUniversal = ifelse(stri_detect_fixed(production_companies, 'Universal Pictures'),TRUE, FALSE), prodParamount = ifelse(stri_detect_fixed(production_companies, 'Paramount Pictures'),TRUE, FALSE), prodTCF = ifelse(stri_detect_fixed(production_companies, 'Twentieth Century Fox Film Corporation'),TRUE, FALSE), prodColumbia = ifelse(stri_detect_fixed(production_companies, 'Columbia Pictures'),TRUE, FALSE), prodWarner = ifelse(stri_detect_fixed(production_companies, 'Warner Bros.'),TRUE, FALSE), prodNLC = ifelse(stri_detect_fixed(production_companies, 'New Line Cinema'),TRUE, FALSE), prodDisney = ifelse(stri_detect_fixed(production_companies, 'Walt Disney Pictures'),TRUE, FALSE), prodColumbiaPictures = ifelse(stri_detect_fixed(production_companies, 'Columbia Pictures Corporation'),TRUE, FALSE), prodTriStar = ifelse(stri_detect_fixed(production_companies, 'TriStar Pictures'),TRUE, FALSE), prodMGM = ifelse(stri_detect_fixed(production_companies, 'Metro-Goldwyn-Mayer (MGM)'),TRUE, FALSE), prodUnitedArtists = ifelse(stri_detect_fixed(production_companies, 'United Artists'),TRUE, FALSE), prodMiramax = ifelse(stri_detect_fixed(production_companies, 'Miramax Films'),TRUE, FALSE), prodTouchstone = ifelse(stri_detect_fixed(production_companies, 'Touchstone Pictures '),TRUE, FALSE), prodFoxSearchlight = ifelse(stri_detect_fixed(production_companies, 'Fox Searchlight Pictures'),TRUE, FALSE), releaseYear = ifelse(as.integer(releaseYear) <= 18, paste0('20', releaseYear), paste0('19', releaseYear)), # Year of relese release_date = as.Date(paste(releaseYear, releaseMonth, releaseDay, sep = '-')), age = as.integer(today() - release_date) / 365, # Age of movie in years quarterRelease = quarter(release_date), # Relese quarter weekRelease = week(release_date), # Relese week dayRelease = wday(release_date), # Relese day of week runtime = ifelse(is.na(runtime) == TRUE, 0, runtime), # If runtime is missing set it to zero, this will be fixed later sizeOfCast = str_count(cast, 'cast_id'), # Size of cast sizeOfCrew = str_count(crew, 'name'), # Size of crew sizeOfDirecting=ifelse(is.na(sizeOfCrew)==TRUE,0,str_count(crew,'Directing')/sizeOfCrew), #department size ratio sizeOfWriting=ifelse(is.na(sizeOfCrew)==TRUE,0,str_count(crew,'Writing')/sizeOfCrew), sizeOfProduction=ifelse(is.na(sizeOfCrew)==TRUE,0,str_count(crew,'Production')/sizeOfCrew), sizeOfSound=ifelse(is.na(sizeOfCrew)==TRUE,0,str_count(crew,'Sound')/sizeOfCrew), sizeOfCamera=ifelse(is.na(sizeOfCrew)==TRUE,0,str_count(crew,'Camera')/sizeOfCrew), sizeOfEditing=ifelse(is.na(sizeOfCrew)==TRUE,0,str_count(crew,'Editing')/sizeOfCrew), sizeOfArt=ifelse(is.na(sizeOfCrew)==TRUE,0,str_count(crew,'Art')/sizeOfCrew), sizeOfCostumeMakeUp=ifelse(is.na(sizeOfCrew)==TRUE,0,str_count(crew,'Costume MakeUp')/sizeOfCrew), sizeOfLighting=ifelse(is.na(sizeOfCrew)==TRUE,0,str_count(crew,'Lighting')/sizeOfCrew), sizeOfVisualEffects=ifelse(is.na(sizeOfCrew)==TRUE,0,str_count(crew,'Visual Effects')/sizeOfCrew), sizeOfActors=ifelse(is.na(sizeOfCrew)==TRUE,0,str_count(crew,'Actors')/sizeOfCrew), sizeOfCrew = ifelse(is.na(sizeOfCrew), 0, sizeOfCrew), numberOfKeywords = str_count(Keywords, 'name'), # Get nmber of keywords by conting how many "name" instances there is numberOfKeywords = ifelse(is.na(numberOfKeywords) == TRUE, 0, numberOfKeywords), numberOfProductionCompanies = str_count(production_companies, 'name'), # Get nmber of production companies by conting how many "name" instances there is numberOfProductionCompanies = ifelse(is.na(numberOfProductionCompanies) == TRUE, 0, numberOfProductionCompanies), numberOfProductionCountries = str_count(production_countries, 'name'), # Get nmber of production countries by conting how many "name" instances there is numberOfProductionCountries = ifelse(is.na(numberOfProductionCountries) == TRUE, 0, numberOfProductionCountries), numberOfGenres = str_count(genres, 'name'), # Get nmber of genres by conting how many "name" instances there is collectionID = as.factor(collectionID)) %>% # Make collectionID a factor group_by(collectionID) %>% mutate(sizeOfCollection = n()) %>% ungroup() %>% mutate(sizeOfCollection = ifelse(sizeOfCollection > 1000, 0, sizeOfCollection)) %>% # Most movies are not in a collection. Collection size for the biggest collection i set to zero select(-idPart, -homepage, -imdb_id, -poster_path, -original_title, -genres, -overview, # Drop all unwanted columns -tagline, -production_companies, -status, -spoken_languages, -production_countries, -releaseYear, -releaseMonth, -releaseDay, -title, -collectionID, -mainSpokenLanguage) train %>% glimpse() df = train n = nrow(df) cast = unlist(as.list(df$cast)) # Creating a dataframe for each row, with 3 columns: # C1: Name, C2: Gender, C3: Order cast_lists = lapply(cast,function(x){unlist(as.list(strsplit(x,'},')[[1]]))}) cast_dfs = lapply(cast_lists,function(x){ as.data.frame(cbind( # extract name unlist(lapply(x,function(y){ substr(y,str_locate(y,'name')[,2]+5,str_locate(y,'order')-5)})), # extract gender unlist(lapply(x,function(y){ substr(y,str_locate(y,'gender')[,2]+4,str_locate(y,'gender')[,2]+4)})), # extract order unlist(lapply(x,function(y){ substr(y,str_locate(y,'order')[,2]+4,str_locate(y,'profile_path')-4)}))), stringsAsFactors = F) }) # Counting how many times a cast member appears in a movie: cast_members = unique(unlist(lapply(cast_dfs,function(x){x[,1]}))) cast_tally = unlist(lapply(cast_members,function(actor){ sum(unlist(lapply(cast_dfs,function(df){ sum(rowSums(df == actor)) }) ),na.rm=T)} )) # Putting cast_member and cast_tally in a dataframe, sort by cast_tally DESC cast = as.data.frame(cbind(cast_members,as.numeric(cast_tally)),stringsAsFactors = F) names(cast) = c('cast_members','cast_tally') cast$cast_tally = as.numeric(cast$cast_tally) cast = cast[order(-cast_tally),] # Bar Graph of Top Actors ggplot(data=cast,aes(x=cast_members,y=cast_tally))+ geom_bar(data=cast, aes(fill=cast_members),stat='identity',show.legend=F)+ ggtitle('Actor Count')+ theme(plot.title = element_text(hjust = 0.5))+ ylab('Count')+ xlab('Actor') # Get list of top 300 actors (after removing empty strings) if(length(which(cast$cast_members == '')) != 0){ cast = cast[-which(cast$cast_members == ''),]} topactors = cast[1:301,1] # Create a dataframe that contains dummy variables for every actor's # apperance in a movie actors_df = as.data.frame(seq(1:n)) for(actor in topactors){ old_names = names(actors_df) actor_count = unlist(lapply(cast_dfs,function(df){ sum(rowSums(df == actor),na.rm=T) })) actors_df = cbind(actors_df,actor_count) names(actors_df) = c(old_names,actor) } # Get Male to Female ratio actors_df$actor_males = unlist(lapply(cast_dfs,function(df){sum(df$V2 == 2,na.rm=T)})) actors_df$actor_females = unlist(lapply(cast_dfs,function(df){sum(df$V2 == 1,na.rm=T)})) actors_df$actor_gender_ratio = ifelse(is.finite(actors_df$males/actors_df$females), actors_df$males/actors_df$females, 0) # M:F ratio in top 10 actors actors_df$actor_top10males = unlist(lapply(cast_dfs,function(df){sum(df$V2[1:11] == 2,na.rm=T)})) actors_df$actor_top10females = unlist(lapply(cast_dfs,function(df){sum(df$V2[1:11] == 1,na.rm=T)})) actors_df$actor_top10gender_ratio = ifelse( is.finite(actors_df$top10males/actors_df$top10females), actors_df$top10males/actors_df$top10females, 0) # Create a list for each movie, containing all keywords for that movie keyw = df$Keywords key_lists = lapply(keyw,function(x){unlist(as.list(strsplit(x,'},')[[1]]))}) key_df = lapply(key_lists,function(lst){ # extract names unlist(lapply(lst,function(strng){ # if string in list is last string, remove the '}]"' characters ifelse(match(strng,lst)==length(lst), substr(strng,str_locate(strng,'name')[,2]+5,nchar(strng)-3), substr(strng,str_locate(strng,'name')[,2]+5,nchar(strng)-1)) } )) }) # To count how many times a keyword appears in a movie: unique_keynames = unique(unlist(key_df)) keyword_tally = unlist(lapply(unique_keynames,function(key){ sum(unlist(lapply(key_df,function(lst){ sum(lst == key, na.rm=T) }) ),na.rm=T)} )) # Create a dataframe containing each keyword and the keyword_tally keywords = as.data.frame(cbind(unique_keynames,keyword_tally), stringsAsFactors = F) names(keywords) = c('keyword','keyword_tally') keywords = keywords[order(-keyword_tally),] # Get list of top 300 keywords (after removing empty strings) if(length(which(keywords$keyword == '')) != 0){ keywords = keywords[-which(keywords$keyword == ''),] } topkeys = keywords[1:301,1] # Create a dataframe that contains dummy variables for every keyword's # apperance in a movie description keyword_df = as.data.frame(seq(1:n)) for(key in topkeys){ old_names = names(keyword_df) key_count = unlist(lapply(key_df,function(lst){ sum(lst == key,na.rm=T) })) keyword_df = cbind(keyword_df,key_count) names(keyword_df) = c(old_names,key) } # Creating a dataframe for each row, with 3 columns: # C1: Name, C2: Gender, C3: Department crew = unlist(as.list(df$crew)) crew_lists = lapply(toString(crew),function(x){unlist(as.list(strsplit(x,'},')[[1]]))}) crew_dfs = lapply(crew_lists,function(x){ as.data.frame(cbind( # extract name unlist(lapply(x,function(y){ substr(y,str_locate(y,'name')[,2]+5,str_locate(y,'profile_path')-5)})), # extract gender unlist(lapply(x,function(y){ substr(y,str_locate(y,'gender')[,2]+4,str_locate(y,'gender')[,2]+4)})), # extract department unlist(lapply(x,function(y){ substr(y,str_locate(y,'department')[,2]+5,str_locate(y,'gender')-5)}))), stringsAsFactors = F) }) # Counting how many times a crew member appears in a movie: crew_members = unique(unlist(lapply(crew_dfs,function(x){x[,1]}))) crew_tally = unlist(lapply(crew_members,function(crew_member){ sum(unlist(lapply(crew_dfs,function(df){ sum(rowSums(df == crew_member)) }) ),na.rm=T)} )) # Putting crew_member and crew_tally in a dataframe, sort by crew_tally DESC crew = as.data.frame(cbind(crew_members,as.numeric(cast_tally)),stringsAsFactors = F) colnames(crew) = c('crew_members','crew_tally') crew$crew_tally = as.numeric(crew$crew_tally) crew = crew[order(-crew$crew_tally),] # Bar Graph of Top Crew Members ggplot(data=crew,aes(x=crew_members,y=crew_tally))+ geom_bar(data=crew, aes(fill=crew_members),stat='identity',show.legend=F)+ ggtitle('Crew Member Count')+ theme(plot.title = element_text(hjust = 0.5))+ ylab('Count')+ xlab('Crew Member') # Get list of top 300 actors (after removing empty strings) if(length(which(crew$crew_members == '')) != 0){ crew = crew[-which(crew$crew_members == ''),]} topcrewmembers = crew[1:301,1] # Create a dataframe that contains dummy variables for every crew's # apperance in a movie crews_df = as.data.frame(seq(1:n)) for(crews in topcrewmembers){ old_cnames = names(crews_df) crew_count = unlist(lapply(crew_dfs,function(df){ sum(rowSums(df == crews),na.rm=T) })) crews_df = cbind(crews_df,crew_count) names(crews_df) = c(old_cnames,crews) } # Get Male to Female ratio crews_df$crew_males = unlist(lapply(crew_dfs,function(df){sum(df$V2 == 2,na.rm=T)})) crews_df$crew_females = unlist(lapply(crew_dfs,function(df){sum(df$V2 == 1,na.rm=T)})) crews_df$crew_gender_ratio = ifelse(is.finite(crews_df$males/crews_df$females), crews_df$males/crews_df$females, 0) # M:F ratio in top 10 crews crews_df$top10males = unlist(lapply(crew_dfs,function(df){sum(df$V2[1:11] == 2,na.rm=T)})) crews_df$top10females = unlist(lapply(crew_dfs,function(df){sum(df$V2[1:11] == 1,na.rm=T)})) crews_df$top10gender_ratio = ifelse( is.finite(crews_df$top10males/crews_df$top10females), crews_df$top10males/crews_df$top10females, 0) # Append Cast, Keyword, and Crew variables to train/test dataframe cast_key_crew_vars = cbind(actors_df[,-1],keyword_df[,-1],crews_df[,-1]) train_df = cbind(df,cast_key_crew_vars)
db3d54649e06f358a6da844dbb1005392f958c8b
e67259f518e61f2b15dda1eb767f012a5f3a6958
/tools/rpkg/dependencies.R
5b9dd80112b27349daeb72f3b9aa636466e95cb5
[ "MIT" ]
permissive
AdrianRiedl/duckdb
e0151d883d9ef2fa1b84296c57e9d5d11210e9e3
60c06c55973947c37fcf8feb357da802e39da3f1
refs/heads/master
2020-11-26T13:14:07.776404
2020-01-31T11:44:23
2020-01-31T11:44:23
229,081,391
2
0
MIT
2019-12-19T15:17:41
2019-12-19T15:17:40
null
UTF-8
R
false
false
117
r
dependencies.R
install.packages(c("DBI", "DBItest", "testthat", "dbplyr", "RSQLite", "callr"), repos=c("http://cran.rstudio.com/"))
a516db5cc2ebcd1899f919545e544f2c42bbdc2f
3ff323d4cbd2c81e044024be225166c022ff6728
/R/bindings.R
c542134909d17ab9f061f8652824495d2736231f
[]
no_license
dhh15/techhist
88a50ff543f7a0805bcb00a7f4e80ce4d4c2150d
7b105ea2b638cc49ba63e16c617e1519927ff729
refs/heads/master
2020-04-06T06:38:04.195335
2015-05-19T17:17:17
2015-05-19T17:17:17
35,090,022
0
0
null
null
null
null
UTF-8
R
false
false
700
r
bindings.R
library(dplyr) library(tau) bin <- df %>% group_by(BindingId) %>% tally(sort=TRUE) binn <- rapply(bin[1],c) n_vol <- length(binn) vol <- tbl_df(data.frame(list(row.index = 1:n_vol))) vol$Year <- rep(NA,n_vol) vol$Month <- rep(NA,n_vol) vol$Day <- rep(NA,n_vol) vol$Lang <- rep(NA,n_vol) vol$Text <- rep(NA,n_vol) for (i in 1:n_vol) { id <- binn[i] vol$BindingId[i] <- id vol$Year[i] <- (df %>% filter(BindingId == id))$Year[1] vol$Month[i] <- (df %>% filter(BindingId == id))$Month[1] vol$Day[i] <- (df %>% filter(BindingId == id))$Day[1] vol$Lang[i] <- (df %>% filter(BindingId == id))$Lang[1] vol$Text[i] <- toString(rapply((df %>% filter(BindingId == id))$Text,c)) } saveRDS(vol,"vol.Rds")
5ca9a8fe6385af2868a8f3c7ac53d2509ea96d59
1552c44b53a9a071532792cc611ce76d43795453
/Inteligencia_Artificial/pso_I.r
b0d1434c8e38ab1ae880b59f6e3bee5c3f4b012f
[]
no_license
robl-25/Faculdade
8ce16cee93f5948d33d45714de66578d189163f4
0801f5748d8d7d79314699b2e35258e402a55bd1
refs/heads/master
2021-01-10T05:16:45.842442
2017-11-09T02:26:26
2017-11-09T02:26:26
45,509,015
0
0
null
null
null
null
UTF-8
R
false
false
1,649
r
pso_I.r
# Programa que implementa o PSO rm() #particulas xmin = -10 xmax = 10 vmin = -1 vmax = 1 max_iter = 100 # Numero de particulas n = 100 # Numero de variaveis de cada particula qtd_var = 2 # Parametros da componente cognitiva e sociavel c1 = c2 = 2.05 # Funcao objetivo fobj = function(x){ sum(x^2)/sqrt(abs(max(x))) } # Particulas x = matrix(runif(n*qtd_var, xmin, xmax), ncol = qtd_var) #memoria y = x #velocidades v = matrix(runif(n*qtd_var, vmin, vmax), ncol = qtd_var) # Calcula o fx fx = apply(x, 1, fobj) # Melhor solucao encontrada (linha do melhor cara) gbest = y[which.max(fx), ] # Valor da melhor solucao gbestValor = max(fx) x11() for(i in 1:max_iter){ cat("\nGBest = ", gbest, " e GBestVal = ", gbestValor) plot(x, xlim = c(xmin, xmax), ylim = c(xmin, xmax), pch="*") points(y, col = 2) points(gbest[1], gbest[2], col=3, pch="x") Sys.sleep(1) # Numero gerado de uma distribuicao r1 = runif(1) r2 = runif(1) # Formula da velocidade v = v + r1*c1*(y-x) + c2*r2*t(gbest-t(x)) # Deixa a particula dentro dos limites v[ v > vmax] = vmax v[ v > vmin] = vmin # Anda com a particula x = x + v # Volta aprticula para dentro do intervalo permitido x[x<xmin] = xmin x[x>xmax] = xmax fx_novo = apply(x, 1, fobj) # Procura as pos em que fx melhora (eh um vetor de true ou false) pos = fx_novo > fx # Ve se alguem melhorou if(length(pos) > 0){ # Atualiza a memoria e o fx dos que melhoraram y[pos, ] = x[pos, ] fx[pos] = fx_novo[pos] # Atualizando o gbest e o maior valor gbest = y[which.max(fx), ] gbestValor = max(fx) } }
f7214f4587c8ead6ca6a20f4e389e8879fa23e71
14c2f47364f72cec737aed9a6294d2e6954ecb3e
/man/assertEdgeToptable.Rd
610d9e5ef2f97b8449f013e61306beed6cd3eac6
[]
no_license
bedapub/ribiosNGS
ae7bac0e30eb0662c511cfe791e6d10b167969b0
a6e1b12a91068f4774a125c539ea2d5ae04b6d7d
refs/heads/master
2023-08-31T08:22:17.503110
2023-08-29T15:26:02
2023-08-29T15:26:02
253,536,346
2
3
null
2022-04-11T09:36:23
2020-04-06T15:18:41
R
UTF-8
R
false
true
370
rd
assertEdgeToptable.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/edgeR-funcs.R \name{assertEdgeToptable} \alias{assertEdgeToptable} \title{Assert that the input data.frame is a valid EdgeTopTable} \usage{ assertEdgeToptable(x) } \arguments{ \item{x}{A data.frame} } \value{ Logical } \description{ Assert that the input data.frame is a valid EdgeTopTable }
710d161472e97a1bda8a1ee13c69ce2e83911548
44d17a26766da7ad02445b214fb26fa06c73e5a3
/R_programming/practice_assignment.R
5f52598ae36dfbb3d65da2531e1ea344d7ad486f
[]
no_license
elbacilon/datascience
9adc5dfa17e444ba52c14905eda8960b559e5f0b
74e50c7f2e62277e156a684685b2d82f52c77002
refs/heads/master
2020-05-04T16:08:51.364962
2015-04-22T19:40:13
2015-04-22T19:40:13
30,263,585
0
0
null
null
null
null
UTF-8
R
false
false
4,174
r
practice_assignment.R
# Download Data setwd("C:/Users/Cedric/Documents/GitHub/datascience/R_programming/") dataset_url <- "http://s3.amazonaws.com/practice_assignment/diet_data.zip" download.file(dataset_url, "diet_data.zip") # Explore data unzip("diet_data.zip", exdir = "diet_data") # unzip in a new directory list.files("diet_data") # list files of the directory diet_data => 5 files ## explore 1 file andy <- read.csv("diet_data/Andy.csv") head(andy) # 4 columns # how many rows length(andy$Day) # 30 observations # OR # dim(andy) # str(andy) # summary(andy) # names(andy) # all of the other files match this format and length. # 30 days worth of weight data for 5 subjects of an imaginary diet study. # Andy's starting weight? # subset the data => first row of the 'Weight' column: andy[1, "Weight"] # [1] 140 # create a subset of the 'Weight' column where the value of the 'Day' column is equal to 30 andy[which(andy$Day == 30), "Weight"] # OR: andy[which(andy[,"Day"] == 30), "Weight"] # OR: subset(andy$Weight, andy$Day==30) # assign Andy's starting and ending weight to vectors: andy_start <- andy[1, "Weight"] andy_end <- andy[30, "Weight"] # find out how much weight he lost by subtracting the vectors: andy_loss <- andy_start - andy_end andy_loss # Andy lost 5 pounds over the 30 days # look at everybody at once # list.files() command. It returns the contents of a directory in alphabetical order. # need full.names = TRUE to include "diet_data" else read funcrtion we'll look in working directory files_full <- list.files("diet_data", full.names=TRUE) dat <- data.frame() for (i in 1:5) { dat <- rbind(dat, read.csv(files_full[i])) } str(dat) # median(dat$Weight) # return NA # sum(is.na(dat)) # there are 13 NA median(dat$Weight, na.rm=TRUE) # median weight of day 30 by taking the median of a subset of the data where Day=30. median(dat[dat$Day == 30, "Weight"], na.rm = TRUE) # build a function that will return the median weight of a given day. # argument should be directory and day for which they want to calculate the median. weightmedian <- function(directory, day) { files_full <- list.files(directory, full.names=TRUE) # create list of files dat <- data.frame() # create empty data.frame for (i in 1:5) { #loops through the files, rbinding them together dat <- rbind(dat, read.csv(files_full[i])) } dat_subset <- dat[which(dat[, "Day"] == day),] #subsets the rows that match the 'day' argument median(dat_subset[, "Weight"], na.rm=TRUE) #identifies the median weight while stripping out the NAs } weightmedian(directory = "diet_data", day = 20) weightmedian("diet_data", 4) weightmedian("diet_data", 17) ##################################### ALTERNATIVE ################################## # loop by copying and recopying it. It works, but it's slow and if you've got a lot of data, # The better approach is to create an output object of an appropriate size and then fill it up. # create an empty list that's the length of our expected output. # In this case, our input object is going to be files_full and our empty list is going to be tmp. files_full <- list.files(directory, full.names=TRUE) # create list of files tmp <- vector(mode = "list", length = length(files_full)) summary(tmp) # read in those csv files and drop them into tmp. for (i in seq_along(files_full)) { tmp[[i]] <- read.csv(files_full[[i]]) } str(tmp) # we have a list of 5 elements called tmp # each element of the list is a data frame containing one of the csv files. # It just so happens that what we just did is functionally identical to using lapply. # str(lapply(files_full, read.csv)) # need to go from a list to a single data frame # str(tmp[[1]]) # head(tmp[[1]][,"Day"]) # We can use a function called do.call() to combine tmp into a single data frame. # do.call lets you specify a function and then passes a list as if each element of the list were an argument to the function. output <- do.call(rbind, tmp) str(output)
e8e98b4a3953814e93ee989917ebd36c49c0b359
a1fb1bf5ffb6ca117bbdff2b47f8a6a84fa8129f
/scripts/dada2/06_dbOTU_into.R
f0dc3969d3b0b153156f335d5165086eca9c2a12
[]
no_license
lvelosuarez/Snakemake_amplicon
5e439121595cf4d6b1508fa4ba7ce1a8f07b6372
9a8d36332d50e41d7b2fc6d2fd82b2cec805d06f
refs/heads/master
2023-08-25T07:39:46.037942
2021-10-28T14:07:30
2021-10-28T14:07:30
337,670,914
0
1
null
2021-02-16T10:07:49
2021-02-10T09:11:26
R
UTF-8
R
false
false
840
r
06_dbOTU_into.R
#!/usr/bin/Rscript suppressPackageStartupMessages(library(tidyverse)) sink(snakemake@log[[1]]) seqtab= readRDS(snakemake@input[['seqtab']]) dbOTU <- seqtab %>% t() %>% as.data.frame(stringsAsFactors = FALSE) %>% rownames_to_column(var="seqs") %>% mutate(asv_id=paste0("asv", 1:nrow(.))) %>% dplyr::select(asv_id,seqs,everything()) table <- read_tsv(snakemake@input[['dbOTU']], col_names=TRUE) dada2 <- data.frame(OTU_ID=dbOTU$asv_id,seq=dbOTU$seqs, stringsAsFactors= FALSE) %>% right_join(table, by="OTU_ID") %>% as.data.frame() %>% dplyr::select(-OTU_ID) %>% column_to_rownames(var = "seq") %>% t() %>% as.matrix() saveRDS(dada2, snakemake@output[['rds']])
d162025b2d91f4348f064046188a71315c527e46
7e2811cb7005bba30a4ff64fa8c93e8f41bcbb72
/filterVCF.Rcheck/00_pkg_src/filterVCF/R/filterVCF_functions.R
c11f8560aca65ed58608d5d8f051e9c6fff6b981
[]
no_license
benjaminlaenen/filterVCF
38a2c242240519f72b0b7c55dfc2248099a17512
79ed3f783c84de85c14c5afd8ab390d97ba3f51c
refs/heads/master
2022-12-14T17:43:08.854767
2020-09-06T19:45:52
2020-09-06T19:45:52
293,107,005
0
0
null
null
null
null
UTF-8
R
false
false
22,114
r
filterVCF_functions.R
# ========================================================================== # Main functions # ========================================================================== #' Extract the META info from a vcf #' #' Extract the META info from a vcfR object and can report plot. #' #' #' @param vcf a vcfR object #' @param vcf_file names fro plotting if opt$plot = TRUE #' @param ... options from initialise_option() #' @return list #' \item{META }{META data extracted} #' \item{Description_META }{description taken from the vcf header} #' \item{INFO_per_snp }{META data per SNPs} #' \item{CI_Stats }{Statistic for each META data} #' #' @author ~~Benjamin Laenen~~ #' @references #' @keywords ~utilities #' @examples #' #' opt <- parse_args(OptionParser(option_list=option_list)) #' opt$plot <- TRUE #' stats_META("my.vcf", vcf_file="Plot_metadata", opt) #' #' @export stats_META <- function(vcf, vcf_file="vcf", ...){ myDots <- list(...) if (!is.null(myDots$opt)){ opt <- myDots$opt }else{ opt <- parse_args(OptionParser(option_list=initialise_option())) } if(nrow(vcf) == 0){ message("\nNo site left after filtering!!") return(list()) } #Export some stats adapted for GATK, #it can also output from other caller #but not tested. META <- grep("INFO", queryMETA(vcf), value = TRUE) META <- gsub(".+=(.+)$", "\\1", META) META <- grep("AC|AF|AN", META, value = TRUE, invert =TRUE) Description_META <- lapply(META, function(x) paste(x, grep("Description", queryMETA(vcf, element = x)[[1]], value = TRUE), sep = ":")) INFO_per_snp <- lapply(META, function(x) extract.info(vcf, element = x, as.numeric = TRUE)) names(INFO_per_snp) <- names(Description_META) <- META mat_INFO_per_snp <- as.data.frame(matrix(unlist(INFO_per_snp), ncol=length(INFO_per_snp))) index_col <- colSums(mat_INFO_per_snp, na.rm=TRUE) != 0 mat_INFO_per_snp <- mat_INFO_per_snp[,index_col] mat_INFO_per_snp <- mat_INFO_per_snp[complete.cases(mat_INFO_per_snp),] colnames(mat_INFO_per_snp) <- META[index_col] if(opt$verbose) message(sprintf("Computing stats for INFO :\n%s", paste(META[index_col], collapse = " ")), appendLF=TRUE) CI_Stats <- apply(mat_INFO_per_snp,2, quantile, probs = c(0.025, 0.5, 0.975)) if(opt$plot){ pdf(paste0(gsub(".vcf|.vcf.gz", "",vcf_file), "_", "filter_stat_overview.pdf")) #pdf() par(mfrow=c(2, 2), cex.main = 0.5) Hist_out <- suppressWarnings(invisible(lapply(META, function(x) try(hist(INFO_per_snp[[x]], 20, xlab = x, main=Description_META[[x]], col = "aquamarine2"),silent=TRUE)))) pc <- try(prcomp(scale(mat_INFO_per_snp))) par(mfrow=c(1, 1), cex.main = 0.6) try(biplot(pc, choices =1:2, cex=.6, pc.biplot = TRUE)) try(biplot(pc, choices =3:4, cex=.6, pc.biplot = TRUE)) graphics.off() } message("Done...", appendLF=TRUE) return(list(META=META, Description_META=Description_META, INFO_per_snp=INFO_per_snp, CI_Stats=CI_Stats)) } #' Save large output from a filterVCF #' #' This function is used in the pipeline filterVCF.R to save output from a run #' of filtering. The filterVCF object contains information about the sites that #' were filtered, the invairant, the fixed, the bedfile imputed,etc. This #' function creates a directory with the resulting filtered vcf file, invariant #' and fixed bedfile and all the filterts that have been applied as bedfiles. #' #' #' @param RES a filterVCF object #' @param output_dir output directory #' @param bed_outputdir output directory to save filters #' @return NULL #' #' @author ~~Benjamin Laenen~~ #' @seealso objects to See Also as \code{\link{help}}, #' @references #' @keywords ~utilities #' @examples #' #' opt <- GetOpt(filterVCF_object) #' save_output(filterVCF_object, output_dir = "./", bed_outputdir = "filters", opt) #' #' @export save_output <- function(RES, output_dir = "./", bed_outputdir, ...){ myDots <- list(...) if (!is.null(myDots$opt)){ opt <- myDots$opt }else{ opt <- parse_args(OptionParser(option_list=initialise_option())) } if(!is.na(opt$output_file)){ vcf_file_short <- paste0(basename(gsub(".vcf$|.vcf.gz$", "", opt$output_file))) }else{ vcf_file_short <- paste0(basename(gsub(".vcf$|.vcf.gz$", "", opt$vcf_file))) } bed_window_filter_dir <- paste0(bed_outputdir, "/window_filters") if(!dir.exists(bed_window_filter_dir)){ dir.create(bed_window_filter_dir) } if(!is.na(opt$bed_file[1])){ Grange2bed(BEDFilter(RES), "Filter by bed file", bed_window_filter_dir, paste0(vcf_file_short, "_filtered_by_", "bed_GRange_merged", ".bed")) } if(isTRUE(opt$filter_repeats_by_windows)){ Grange2bed(Filter_WindowsProp(RES), "windows with half repeats (or other bedfile specified in --repeats)", bed_window_filter_dir, paste0(vcf_file_short, "_filtered_by_", "windows_with_half_repeats_to_remove", ".bed")) } if(isTRUE(opt$filter_fix_het)){ Grange2bed(FixHet_RefPop(RES), "Fixed contiguous heterozygous sites in 50bp windows in the population specified in --filter_fix_het_contiguous_in_pop", bed_window_filter_dir, paste0(vcf_file_short, "_filtered_by_", "fix_het_pop", ".bed")) } if(isTRUE(!is.na(opt$filter_high_DP_standardized))){ names(Normalized_DP_Filter(RES)[[1]]) <- names(Normalized_DP_Filter(RES)[[2]]) <- paste0("median_normDP_", parse_filter_high_DP_standardized(opt$filter_high_DP_standardized)$threshold) for(x in names(Normalized_DP_Filter(RES))){ for(y in names(Normalized_DP_Filter(RES)[[x]])){ Grange2bed(Normalized_DP_Filter(RES)[[x]][[y]], sprintf("%s with normalized depth higher than the threshold %s", x, y), bed_window_filter_dir, paste0(vcf_file_short, "_filtered_by_", "standardized_DP_", y, ifelse(x=="total_high_DP_windows", "_per_sample", "_across_sample"), ".bed") ) } } } #Save bed file for sites removed by diffrent filters to intersect with vcf_bed_Grange bed_single_filter_dir <- paste0(bed_outputdir, "/individual_filters") filter2save <- c( "QD", "SOR", "MQRankSum", "FS", "MQ", "ReadPosRankSum", "InbreedingCoeff", "fix_het", "all_het", "bi_allelic", "missing", "indel") vcf_bed_GRange <- vcf2Grange(vcfRaw(RES)) if(!dir.exists(bed_single_filter_dir)){ dir.create(bed_single_filter_dir) } void <- lapply(filter2save, function(x) filter2bed(Filters(RES)[[x]], x, vcf_bed_GRange, bed_single_filter_dir, paste0(vcf_file_short, "_filtered_by_", x, ".bed"))) #save removed sites master_filter_Grange <- removed_sites(RES) if(sum(width(invariantRaw(RES))) != 0){ master_filter_Grange <- c(master_filter_Grange, removed_sites_inv(RES)) } Grange2bed(master_filter_Grange, "Removed sites", bed_outputdir, paste0(vcf_file_short, "_removed_sites", ".bed")) #save fixed invariant if(isTRUE(length(fixed(RES)) != 0) ){ Grange2bed(fixed(RES), "Fixed homozygous ALT sites , keep that filter if a repolarization is done later", bed_outputdir, paste0(vcf_file_short, "_ALT_fixed", ".bed")) } #save invariant if(isTRUE(length(invariant(RES)) != 0)){ Grange2bed(invariant(RES), "Invariable sites including REF/REF and missing", bed_outputdir, paste0(vcf_file_short, "_invariable", ".bed")) } list_bed_files <- list.files(path=outputdir, pattern=".bed$", recursive = TRUE, full.names = TRUE) void <- lapply(list_bed_files, gzip, overwrite=TRUE) #gzip all bed file #write vcf file if(opt$verbose) message(sprintf("Save filtered vcf to %s: ", paste0(vcf_file_short, "_filtered", ".vcf.gz"))) write.vcf(vcf(RES), file =paste0(vcf_file_short, "_filtered", ".vcf.gz")) } #' Main wrapper to filter a VCF file #' #' This function is the main core of the pipeline filterVCF.R. It will perform #' a series of filtering steps depending of the options provided in a opt list #' that can be initiated with initialise_option(). This function is not expected #' to be used directly by the user which should first run the pipeline #' filterVCF.R on a complete non filtered VCF (including invariant). #' #' #' see the help of filterVCF.R for more detail #' Rscript filterVCF.R --help #' #' #' @param vcf_file path to vcf file #' @param ... options from initialise_option(). This parameter regulates all the filtering steps that will be applied to the data. #' @return a filterVCF object. #' #' @author ~~Benjamin Laenen~~ #' @references #' @keywords ~main #' @examples #' #' opt <- parse_args(OptionParser(option_list=option_list)) #' main_filter_VCF("my.vcf", opt) #' #' @export main_filter_VCF <- function(vcf_file, ...){ .libPaths("/proj/uppstore2018024/private/Rpackages/") suppressPackageStartupMessages(suppressMessages(try(library(filterVCF)))) myDots <- list(...) if (!is.null(myDots$opt)){ opt <- myDots$opt }else{ opt <- parse_args(OptionParser(option_list=initialise_option())) } vcf <- read.vcfR(vcf_file, limit = 1e+08, verbose = opt$verbose, convertNA = FALSE) vcf@fix[,"ID"] <- NA if(!is.na(opt$keep_sites)){ sites2keep <- bed2Grange(opt$keep_sites) vcf_bed_GRange <- vcf2Grange(vcf) index_sites2keep <- findOverlaps(vcf_bed_GRange, sites2keep, minoverlap=1, ignore.strand=TRUE) vcf <- vcf[from(index_sites2keep)] if(opt$verbose) message(sprintf("Keeping only sites interesecting with %s\nResults in %s sites before filtering", opt$keep_sites, nrow(vcf)), appendLF=TRUE) } #select only some chromosome #return a empty RES if the chr i not present in the vcf if(!is.na(opt$chr[1])){ vcf <- vcf[getCHROM(vcf) %in% opt$chr] if(nrow(vcf) == 0 ){ return(as.filterVCF(list( vcf=vcf, vcf_filtered=vcf, vcf_inv=GRanges(), vcf_inv_filtered=GRanges(), fixed_inv_Grange=GRanges(), reference=new("DNAStringSet"), master_filter=logical(0), master_filter_inv=logical(0), filters=list(logical(0)), filters_inv=list(logical(0)), removed_sites=GRanges(), removed_sites_inv=GRanges(), bed_GRange_merged=GRanges(), windows_with_half_repeats_to_remove=GRanges(), fix_het_pop=GRanges(), filter_DP_repeats=list(high_DP_windows_means_across_sample=GRanges(), total_high_DP_windows=GRanges()), stats_vcf_filtered=NA_character_, opt = opt))) } } if(!is.na(opt$sample)){ opt$sample_list <- parse_opt_sample(opt$sample) vcf <- vcf[,c("FORMAT", opt$sample_list)] warnings("\nInformation in the INFO field will not be recalculated, use GATK -T SelectVariants on the output vcf to recreate them.\n") } # extract_genotype gt <- extract_gt_ff(vcf) #remove sample with too much missing data if(!is.na(opt$missing_per_individual)){ passing_missing_ind <- missing_per_individual(gt, opt$missing_per_individual) vcf <- vcf[,c("FORMAT", passing_missing_ind)] gt <- gt[passing_missing_ind] } #Start by filtering the invariant out and keep the vcf for further saving filter_invariant <- create_invariant_filter(vcf, gt=gt, opt) gc(verbose=FALSE) vcf_inv <- vcf[filter_invariant] vcf <- vcf[!filter_invariant] if(opt$verbose) message("Done...", appendLF=TRUE) #explore the filtering option in the metadata and plot some reports. If the vcf is too big start by subsampling 100000 if(!is.na(opt$split) & isTRUE(opt$split != 1)){ #dont calculate stats on splitted file #it will be done later # note that is a subsample is taken the info will not match for the sample # it is better to use SelectVariants first if(nrow(vcf) > 1e5){ if(opt$verbose) message(sprintf("The VCF has %s variants, taking a subsample of 100000 to draw stats plot", nrow(vcf)), appendLF=TRUE) vcf_for_stats <- vcf[sample(1:nrow(vcf), 1e5),] }else{ vcf_for_stats <- vcf } #get some stats stats_vcf <- suppressWarnings(stats_META(vcf_for_stats, vcf_file=vcf_file, opt)) } # extract_genotype gt <- gt[as.ff(!filter_invariant),] #filter DP per individual, if used filter missing should be recalculated #as we will replace the genotype by NA is DP is < threshold > . # vcf_bak <- vcf # vcf <- vcf_bak if(!is.na(opt$filter_depth)){ opt$filter_depth_parsed <- parse_filter_depth(opt$filter_depth) vcf <- change_gt_by_DP_filter(vcf, min_DP=opt$filter_depth_parsed$min_DP, max_DP=opt$filter_depth_parsed$max_DP, opt) #We need to re extract the genotype rm(gt) gt <- extract_gt_ff(vcf) #After changing the genotyoe to NA some sites can become invariant #create a new filter for invariant and #adding the new inv to the old while filtering the snp #it works even if no sites is filtered filter_invariant_dp <- create_invariant_filter(vcf, gt=gt, opt) vcf_inv <- rbind2(vcf_inv, vcf[filter_invariant_dp]) vcf <- vcf[!filter_invariant_dp] gt <- gt[as.ff(!filter_invariant_dp),] #re add the invariant } if(!is.na(opt$filter_genotype_quality)){ opt$filter_genotype_quality <- parse_filter_GQ(opt$filter_genotype_quality) vcf <- change_gt_by_GQ_filter(vcf, invariable = FALSE, GQ_threshold=opt$filter_genotype_quality$GQ, opt) #We need to re extract the genotype rm(gt) gt <- extract_gt_ff(vcf) #After changing the genotyoe to NA some sites can become invariant #create a new filter for invariant and #adding the new inv to the old while filtering the snp #it works even if no sites is filtered filter_invariant_dp <- create_invariant_filter(vcf, gt=gt, opt) vcf_inv <- rbind2(vcf_inv, vcf[filter_invariant_dp]) vcf <- vcf[!filter_invariant_dp] gt <- gt[as.ff(!filter_invariant_dp),] #re add the invariant } #save the inv to temp file to retrieve after to sve memory usage tmp_vcf_inv <- tempfile() saveRDS(vcf_inv, file = tmp_vcf_inv) rm(vcf_inv) filters <- list() # Filter the variant # GATK recommendation modified after exploring the data. It is a bit more stringent # and try to remove excess of hets due to wrong call # "QD < 2.0 || FS > 60.0 || MQ < 40.0 || MQRankSum < -12.5 || ReadPosRankSum < -8.0" \ # "QD < 5.0 || FS > 60.0 || MQ < 50.0 || MQRankSum < -5 || ReadPosRankSum < -4.0 || SOR > 3 || InbreedingCoeff < -0.2" \ if(!is.null(opt$filter_GATK_info)){ filter_GATK_info <- parse_GATK_filter_option(opt$filter_GATK_info) for(i in names(filter_GATK_info)){ filters[[i]] <- create_filter_META(vcf, i, filter_GATK_info[[i]]) } } # filter for fix het if(opt$filter_fix_het){ filters[["fix_het"]] <- create_filter_fix_het(gt, opt) } # filter for fix het if(opt$filter_all_het){ filters[["all_het"]] <- create_filter_all_het(gt, opt) } #filter for bi allelic if(opt$biallelic){ if(opt$verbose) message("Apply filter to keep bi-allelic sites", appendLF=TRUE) filters[["bi_allelic"]] <- !is.biallelic(vcf) } #filter for missing if(!is.na(opt$missing)){ filters[["missing"]] <- create_filter_missing(gt, opt$missing) } # filter snps only remove indels if(opt$filter_indel){ filters[["indel"]] <- create_indel_filter(vcf, opt) } # Reading all bedfiles into Grange object if(!is.na(opt$bed_file[1])){ if(opt$verbose) message("Apply filter from bedfiles", appendLF=TRUE) bed_GRange <- lapply(opt$bed_file, bed2Grange) # Merging Grange object into one to intersect with the vcf bed_GRange_merged <- suppressWarnings(do.call("c", bed_GRange)) # Converting vcf to Grange vcf_bed_GRange <- vcf2Grange(vcf) #finding the overlap between the vcf and the bed file (bedtools subtract) filters[["filter_bed"]] <- !is.na(GenomicRanges::findOverlaps(vcf_bed_GRange, bed_GRange_merged , select ="first", ignore.strand=TRUE)) }else{ bed_GRange_merged <- NULL } if(isTRUE(opt$filter_repeats_by_windows)){ #repeats windows with more than 50% windows_with_half_repeats_to_remove <- suppressWarnings(create_filter_repeats_in_windows(opt$reference, opt$repeats, vcf, 20000, 0.5)) filters[["filter_repeats_by_windows"]] <- !is.na(GenomicRanges::findOverlaps(vcf_bed_GRange, windows_with_half_repeats_to_remove , select ="first", ignore.strand=TRUE)) }else{ windows_with_half_repeats_to_remove <- NULL } if(isTRUE(!is.na(opt$filter_high_DP_standardized))){ #custom filter on DP per windows opt$filter_high_DP_standardized <- parse_filter_high_DP_standardized(opt$filter_high_DP_standardized) filter_DP_repeats <- create_filter_high_DP_standardized(opt$reference, vcf, threshold = opt$filter_high_DP_standardized$threshold, threshold_CI = opt$filter_high_DP_standardized$threshold_CI, windows_size= opt$filter_high_DP_standardized$windows_size, overlapping= opt$filter_high_DP_standardized$slidding, percent_passing_filter = opt$filter_high_DP_standardized$percent_passing_filter, opt) gc() filters[["filter_DP_repeats"]] <- !is.na(GenomicRanges::findOverlaps(vcf_bed_GRange, filter_DP_repeats[[2]][[1]] , select ="first", ignore.strand=TRUE)) }else{ filter_DP_repeats <- NULL } if(!is.na(opt$filter_fix_het_contiguous_in_pop)){ #filter cluster of fix het in the swedish pop fix_het_pop <- create_filter_fix_het_contiguous_in_pop(gt, vcf, opt$filter_fix_het_contiguous_in_pop, windows_size = 50, max_nb_contigous_hets = 1) vcf_bed_GRange <- vcf2Grange(vcf) filters[["filter_pop_het_contiguous"]] <- !is.na(GenomicRanges::findOverlaps(vcf_bed_GRange, fix_het_pop , select ="first", ignore.strand=TRUE)) }else{ fix_het_pop <- NULL } # Sum up all filters to create a master filter master_filter <- Reduce("+", filters) > 0 if(length(master_filter) == 0) master_filter <- rep(FALSE, nrow(vcf)) # #information on the number of snps remove by each filters # snp_remove_by_filter <- lapply(filters, sum, na.rm = TRUE) #write a report of the snps that were filtered #Remove the fixed sites in the vcf and it to the invariant vcf. #Save a bedfile with the fixed sites filters[["inv_fix_sites"]] <- create_filter_fixed_sites(gt) filters[["inv_fix_sites"]] <- !master_filter & filters[["inv_fix_sites"]] fixed_inv_Grange <- vcf2Grange(vcf[filters[["inv_fix_sites"]],]) rm(gt) gc() fix_vcf <- vcf[filters[["inv_fix_sites"]],] #join the invariant with the fixed in a vcf vcf_inv <- rbind2(readRDS(tmp_vcf_inv), fix_vcf) #filter the vcf file with all the filter at once. #single filter will be outputed to a folder #in order to create removed sites bedfile or #refiltered the original vcf using only some #filters with bedtools subtract. vcf_filtered <- vcf[!master_filter & !filters[["inv_fix_sites"]],] stats_vcf_filtered <- stats_META(vcf_filtered, vcf_file=paste0(vcf_file, "_filtered"), opt) # ========================================================================== # apply some filters to vcf_inv # ========================================================================== if(!is.na(opt$filter_depth)){ vcf_inv <- change_gt_by_DP_filter(vcf_inv, min_DP=opt$filter_depth_parsed$min_DP, max_DP=opt$filter_depth_parsed$max_DP, opt) } #filter on Ref Genotype Quality if(!is.na(opt$filter_genotype_quality)){ vcf_inv <- change_gt_by_GQ_filter(vcf_inv, invariable = TRUE, GQ_threshold=opt$filter_genotype_quality$RGQ, opt) } filters_inv <- list() #filter for bi allelic if(opt$biallelic){ filters_inv[["bi_allelic"]] <- !is.biallelic(vcf_inv) } #filter for missing if(!is.na(opt$missing)){ filters_inv[["missing"]] <- create_filter_missing(vcf_inv, opt$missing) } # filter snps only remove indels if(opt$filter_indel){ filters_inv[["indel"]] <- create_indel_filter(vcf_inv, invariable=TRUE) } # Converting vcf to Grange vcf_inv_bed_GRange <- vcf2Grange(vcf_inv) #finding the overlap between the vcf and the bed file (bedtools subtract) if(!is.na(opt$bed_file[1])){ #finding the overlap between the vcf and the bed file (bedtools subtract) filters_inv[["filter_bed"]] <- !is.na(GenomicRanges::findOverlaps(vcf_inv_bed_GRange, bed_GRange_merged , select ="first", ignore.strand=TRUE)) } #repeats windows with more than 50% if(opt$filter_repeats_by_windows){ filters_inv[["filter_repeats_by_windows"]] <- !is.na(GenomicRanges::findOverlaps(vcf_inv_bed_GRange, windows_with_half_repeats_to_remove , select ="first", ignore.strand=TRUE)) } if(isTRUE(!is.na(opt$filter_high_DP_standardized))){ #custom filter on DP per windows vcf_inv_bed_GRange <- vcf2Grange(vcf_inv) filter_DP_repeats_inv <- create_filter_high_DP_standardized(opt$reference, vcf_inv, threshold = opt$filter_high_DP_standardized$threshold, threshold_CI = opt$filter_high_DP_standardized$threshold_CI, windows_size= opt$filter_high_DP_standardized$windows_size, overlapping= opt$filter_high_DP_standardized$slidding, opt) gc() filters_inv[["filter_DP_repeats"]] <- !is.na(GenomicRanges::findOverlaps(vcf_inv_bed_GRange, filter_DP_repeats_inv[[1]][[1]] , select ="first", ignore.strand=TRUE)) }else{ filter_DP_repeats_inv <- NULL } # Sum up all filters to create a master filter master_filter_inv <- Reduce("+", filters_inv) > 0 #information on the number of snps remove by each filters #inv_remove_by_filter <- lapply(filters_inv, sum, na.rm = TRUE) vcf_inv_filtered <- vcf2Grange(vcf_inv[!master_filter_inv], metadata = "present") ############### vcf_bed_GRange <- vcf2Grange(vcf) removed_sites <- reduce(vcf_bed_GRange[master_filter]) removed_sites_inv <- reduce(vcf_inv_bed_GRange[master_filter_inv]) RES <- as.filterVCF(list( vcf=vcf, vcf_filtered=vcf_filtered, vcf_inv=vcf2Grange(vcf_inv), vcf_inv_filtered=vcf_inv_filtered, fixed_inv_Grange=fixed_inv_Grange, reference=readDNAStringSet(opt$reference), master_filter=master_filter, master_filter_inv=master_filter_inv, filters=filters, filters_inv=filters_inv, removed_sites=removed_sites, removed_sites_inv=removed_sites_inv, bed_GRange_merged=bed_GRange_merged, windows_with_half_repeats_to_remove=windows_with_half_repeats_to_remove, fix_het_pop=fix_het_pop, filter_DP_repeats=filter_DP_repeats, stats_vcf_filtered=NA, opt=opt)) #stats_vcf_filtered=stats_vcf_filtered$CI_Stats)) rm(vcf_inv) rm(vcf) gc() if(isTRUE(opt$debug)){ if(!is.na(opt$output_file)){ saveRDS(RES, paste0(getwd(), "/", basename(paste0(gsub(".vcf$|.vcf.gz$", "", opt$output_file), ".rds")))) }else{ saveRDS(RES, paste0(getwd(), "/", basename(gsub(".vcf$|.vcf.gz$", ".rds", vcf_file)))) } } if(opt$verbose) Sys.procmem() #remove vcf_inv and inv_filtered to save some disk space? return(RES) }
35eff1361771115dcf16a02ca3d225b023997aeb
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/SyNet/examples/mayflynz.Rd.R
d2cd966dcd6b74c4c2337bca14d6b6b4ba8b4473
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
275
r
mayflynz.Rd.R
library(SyNet) ### Name: mayflynz ### Title: Mayfly Fauna of New Zealand ### Aliases: mayflynz ### Keywords: datasets ### ** Examples data(mayflynz) plot(mayflynz[[2]][,2:3], main = "Mayfly Fauna of New Zealand", xlab = "Latitude", ylab = "Longitude", asp = 1.3)
1022ed71bea01ff7628fa066dbf06cc51e152fdc
5069665a0ff64671c1ffe92fc46fa4cc68c29f61
/CTEC_tfidf.R
e5360e0c8daee22779acc58822abafb087a244a8
[]
no_license
nicholson2208/AlgosAndSociety
06121530160fcdfc0b6b89fca5ccbcb37a555044
d8cf9c57179f01a994d931990dac2c82beb3bda2
refs/heads/master
2020-04-09T01:17:08.135641
2018-12-12T20:15:04
2018-12-12T20:15:04
159,898,408
0
0
null
null
null
null
UTF-8
R
false
false
2,841
r
CTEC_tfidf.R
library(tidyverse) library(tidytext) # load sentiment dictionary nrc_lex <- get_sentiments("nrc") # many sentiments all_stop_words <- stop_words %>% select(-lexicon) # long list of stop words # read in json from CaesaParser.py, and flatten dat <- jsonlite::fromJSON('data.json', flatten = TRUE)[[1]] dat.df <- dat %>% bind_rows() %>% mutate(department = rep(names(dat), map_dbl(dat, nrow))) # filter only CTECs with a gender and select a few columns gender.comments.dept.df <- dat.df %>% filter(instructor_gender == "M" | instructor_gender == "F") %>% select(department, instructor_gender, comments) %>% mutate(dept_gender = paste(department, instructor_gender, sep="-")) # one word per row comment.words <- gender.comments.dept.df %>% unnest %>% unnest_tokens(ngram, comments, token="ngrams", n=2) # bigrams comment.bigrams <- gender.comments.dept.df %>% unnest %>% unnest_tokens(ngram, comments, token="ngrams", n=5) # filter only words that have a sentiment score # skip when n > 1 comment.words.interesting <- semi_join(comment.words, nrc_lex) # create total word counts gender.word.count <- comment.words.interesting %>% count(instructor_gender, word, sort=TRUE) %>% # count(dept_gender, department, instructor_gender, word, sort=TRUE) %>% ungroup() # bigrams gender.bigram.count <- comment.bigrams %>% count(instructor_gender, ngram, sort=TRUE) %>% ungroup() gender.total.bigrams <- gender.bigram.count %>% group_by(instructor_gender) %>% summarize(total = sum(n)) gender.bigrams <- left_join(gender.bigram.count, gender.total.bigrams) gender.bigrams <- gender.bigrams %>% bind_tf_idf(ngram, instructor_gender, n) gender.bigrams %>% # filter(department == 'BME' | department == 'EECS') %>% arrange(desc(tf_idf)) %>% mutate(ngram = factor(ngram, levels=rev(unique(ngram)))) %>% group_by(instructor_gender) %>% top_n(10) %>% ungroup %>% ggplot(aes(ngram, tf_idf, fill=instructor_gender)) + geom_col(show.legend = FALSE) + labs(x = NULL, y = "tf-idf") + facet_wrap(~instructor_gender, ncol=2, scales = "free") + coord_flip() # group word counts gender.total.words <- gender.word.count %>% group_by(instructor_gender) %>% summarize(total = sum(n)) gender.words <- left_join(gender.word.count, gender.total.words) # perform tf-idf gender.words <- gender.words %>% bind_tf_idf(word, instructor_gender, n) ######## # visualize tf-idf gender.words %>% # filter(department == 'BME' | department == 'EECS') %>% arrange(desc(tf_idf)) %>% mutate(word = factor(word, levels=rev(unique(word)))) %>% group_by(instructor_gender) %>% top_n(10) %>% ungroup %>% ggplot(aes(word, tf_idf, fill=instructor_gender)) + geom_col(show.legend = FALSE) + labs(x = NULL, y = "tf-idf") + facet_wrap(~instructor_gender, ncol=2, scales = "free") + coord_flip()
d5c71021f206725793e26224cf30e5b01a95b3bb
74ef16d6169e660d445bfa1e359d7f08d6bf48aa
/데이터기반 통계분석 시스템구축/GraphicsTool.R
2e041c009bf8b391ebfb69b9b7d16ae98171837d
[]
no_license
BenHeo/SNU
c739828a19c25c8f11e7dec3a9100951eba97cde
e842377e68a42185f7ece118dc883f0c8e33bb13
refs/heads/master
2020-03-21T05:14:37.774269
2018-12-17T02:30:57
2018-12-17T02:30:57
138,151,468
0
0
null
null
null
null
UTF-8
R
false
false
3,403
r
GraphicsTool.R
data(mtcars) # View(mtcars) str(mtcars) names(mtcars) plot(mpg~disp, mtcars) # ~는 formula y = ax 에서 y가 앞에 있고 x가 뒤에 있게 하는 것 같은 원리 a = "mpg~disp" a_f = as.formula(a); class(a_f) plot(a_f, mtcars) ?plot plot(hp~disp, mtcars) # hp = B0 + (B1 * disp) + error(평균이 0) # 회귀분석은 등분산 가정을 지켜야 하는데 이분산 가정이 되는 경우 예측이 힘들겠다고 생각하면 됨 # 에러텀이 0 인 경우를 생각해서 잇는 것과 상위 10% 혹은 5%를 생각해서 잇는 경우가 기울기가 다르다면 등분산성이 어긋난다 set.seed(1) x = rnorm(100) y = 2 + 2*x + rnorm(100) plot(y~x, main = "y=2x+2") # or plot(x,y) # plot types : p(point), l(line), b(both point and line), s(step), n(no plot) x = seq(-2, 2, length.out = 10) y = x^2 plot(x, y, type = 'p') plot(x, y, type = 'l') plot(x, y, type = 'b') plot(x, y, type = 's') plot(x, y, type = 'n') plot(x, y, type = 'b', lty = 3) # different line type plot(x, y, type = 'b', pch = 2) # different shape plot(x=1:25, y=rep(0,25), pch=1:25) # kyakyakyakya head(colors()) # colors in r pallete plot(x,y, type="b", xlab="xx", ylab="yy", main="y=x^2", col="lightblue") plot(x,y, type="b", xlim= c(-1,1)) # draw multiple plots at once plot(~mpg+disp+drat, mtcars, main="Simple Scatterplot Matrix", col = "orange", pch = 19) plot(x,y, pch =20, main="scatter plot") abline(a=1, b=2, col="red") # a + bx abline(v=1, col="blue") # vertical line abline(h=1, col="green") # horizontal line plot(x=1,y=1, type='n', xlim=c(0,10), ylim=c(0,5), xlab = 'time', ylab = '# of visiting') x = 0:10 set.seed(1) y=rpois(length(x), lambda=1) points(x,y,col="blue", type="s") points(x,y,col="red", type="l", lty = 3) plot(0,0, type='n', xlim=c(-2,2), ylim=c(-2,2)) x = c(-2,1,0,1,0) y = c(0,-1,2,-2,1) lines(x,y) # please draw by order :( # NA is used for disconnect line plot(0,0, type='n', xlim=c(-2,2), ylim=c(-2,2)) x = c(-2,1,NA,1,0) y = c(0,-1,NA,-2,1) lines(x,y) # still not good # use group or order plot(0,0, type='n', xlim=c(1,5), ylim=c(0,2)) x = seq(1,5,1) abline(v=x, lty=1:length(x)) z = sort(rnorm(100)) y1 = 2+ z + rnorm(100) plot(z, y1, col="blue", pch=3) points(z, y1/2, col="red", pch=19) legend("topright", c("pch_3", "pch_19"), col=c("blue", "red"), pch = c(3,19)) ### Visualization of KNN set.seed(1) x <- sort(rnorm(100)) y <- 3 + x^2 + rnorm(100) plot(x, y, pch = 20) fit = lm(y~x) str(fit) coef <- fit$coefficients coef[1] coef[2] abline(coef[1], coef[2], col='red') # model bias ==> evaluated by least square ===> enlarger model space # y_hat(x) = 1/k * sum(index set of xi k-nearest to x * yi) # KNN is non-parametric regression which means KNN doesn't assume model space library(FNN) k10zero <- knnx.index(x, 0, k=10) x[47] x[46] idx <- k10zero[1,] points(x[idx], y[idx], pch = 19, col = 'green' ) abline(v=0, lty = 3) k10mean0 <- mean(y[idx]) abline(h=k10mean0, col = 'blue') eval.n = 100 eval.point = seq(-3,3,length.out = 100) plot(x,y,pch=20) idx.mat <- knnx.index(x, eval.point, k=10) yhat <- rep(0, eval.n) for (i in 1:eval.n){ yhat[i] <- mean(y[idx.mat[i,]]) } lines(eval.point, yhat, type = 'l', col = 'red') a = matrix(1:25, 5, 5) image(a) a z <- 2*volcano dim(z) x <- 10*(1:nrow(z)) y <- 10*(1:ncol(z)) z[30,4] x[30] y[4] persp(x,y,z, theta = 135, # 산 모양 ltheta = 20, col = "green3") contour(x,y,z) # 등고선
fa9ffa66974b6e7ec2ae8c61c04594e9349d50c3
a3eb888d081e824d3081412ea1da47a67d40cc92
/Getting and Cleaning Data/12_reading_from_the_web.R
96dc352187f76cf19a0a769d0721e9291b19a7eb
[]
no_license
Ads99/Coursera
b1bc38ba6c61b84abd3694c2eaa1a158a5fc1a94
f775ac73361098746d38db3bf4a004100109e4c6
refs/heads/master
2020-04-25T06:47:01.003825
2016-09-05T15:08:47
2016-09-05T15:08:47
25,043,486
0
0
null
null
null
null
UTF-8
R
false
false
1,684
r
12_reading_from_the_web.R
setwd('C://Users//ABaker//Documents//GitHub//Coursera//Getting and Cleaning Data') if (!file.exists("data")) { dir.create("data") } # Getting data off webpages - readLines() con = url("http://scholar.google.com/citations?user=HI-I6C0AAAAJ&hl=en") htmlCode = readLines(con) close(con) htmlCode # Parsing with XML # The above generated a lot of unstructured html stuff so we can use the # XML package to parse this data library(XML) url <- "http://scholar.google.com/citations?user=HI-I6C0AAAAJ&hl=en" html <- htmlTreeParse(url, useInternalNodes=T) xpathSApply(html, "//title", xmlValue) # [1] "Jeff Leek - Google Scholar Citations" xpathSApply(html, "//td[@id='col-citedby']", xmlValue) # Another approach - GET from the httr package library(httr); html2 = GET(url) content2 = content(html2,as="text") parsedHtml = htmlParse(content2,asText=TRUE) xpathSApply(parsedHtml, "//title", xmlValue) # More complicated - Accessing websites with passwords pg1 = GET("http://httpbin.org/basic-auth/user/passwd") #Response [http://httpbin.org/basic-auth/user/passwd] #Date: 2014-10-16 17:50 #Status: 401 #Content-type: <unknown> # <EMPTY BODY> # So the httr package allows us to authenticate with user name and password pg2 = GET("http://httpbin.org/basic-auth/user/passwd", authenticate("user","passwd")) pg2 #Response [http://httpbin.org/basic-auth/user/passwd] #Date: 2014-10-16 17:52 #Status: 200 #Content-type: application/json #Size: 46 B #{ # "authenticated": true, # "user": "user" #} names(pg2) # Using handles (won't need to repeatedly authenticate) google = handle("http://google.com") pg1 = GET(handle=google,path="/") pg2 = GET(handle=google,path="search")
d4347af2d55d9caff08f000318c12fcd55e21d7a
60a091a68b32cdceb2a02f5e85783a05b2230e8a
/useTau.R
bcc29faa43809b28babe86ebffc1d140f535eb92
[]
no_license
pingqingsheng/mbeta
b57e8c69c0244033b008787ecbc2311e4e76f735
f83b2bfdfde438920d3c7a993fba955585c882cf
refs/heads/master
2021-01-11T22:35:57.652426
2017-03-09T03:52:05
2017-03-09T03:52:05
78,995,547
0
0
null
null
null
null
UTF-8
R
false
false
7,427
r
useTau.R
# Simulate some data library(MASS) set.seed(234) fixed <- mvrnorm(1,m=rep(5,5), Sigma=diag(rep(1,5))) D <- outer(rep(sqrt(3),5), rep(sqrt(3),5)) diag(D) <- 9 random <- as.matrix(mvrnorm(100, m=rep(0,5), Sigma=D)) random <- matrix(rep(random, each=100),ncol=5, byrow=FALSE) phi <- 5 X <- matrix(0,10000,4) for(i in 1:100){ X[((i-1)*100+1):(i*100),] <- mvrnorm(100, m=rep(i/100,4), Sigma=diag(rep(1,4))) } X <- cbind(1,X) Z <- X group <- rep(seq_len(100), each=100) Dat <- as.data.frame(cbind(X,Z)) Dat <- cbind(group, Dat) # Let X=Z at present time, but of course we could sim different Z eta <- X %*% fixed + apply(Z*random, 1, sum) mu <- as.vector(pmax(1/(1+exp(eta)^(-1)), .Machine$double.eps)) y <- rbeta(length(group), shape1 = mu*phi, shape2 = (1-mu)*phi) names(Dat) <- c("group",paste0("x",seq_len(5)),paste0("z",seq_len(5))) Dat <- cbind(y=y, Dat) # Estimation with Laplace Approximation # initialization # Use mini batch (possiblly find a extra package do SGD to accelerate or rewrite with C) terminate <- FALSE tolerance_1 <- 1 tolerance_2 <- 1 tolerance_3 <- 5 # gamma_new <- gamma_0 gamma_new <- unique(random) beta_new_sgd <- fixed # D_new <- matrix(15,5,5) # diag(D_new) <- 20 D_new <- D phi_new_sgd <- 5 rate <- 0.01 cond_1 <- FALSE cond_2 <- FALSE cond_3 <- FALSE marker <- 1 verbose <- TRUE # SGD then Newtown-Rapson iteratively while(!terminate){ beta_old <- beta_new_sgd phi_old <- phi_new_sgd D_old <- D_new tau_new_sgd <- self_solve(D_old) gamma_old <- matrix(rep(gamma_new, each=100), ncol=5, byrow = FALSE) gamma_temp <- aggregate(gamma_old, by=list(Dat$group), unique, simplify=TRUE) gamma_temp <- as.matrix(gamma_temp[,-1]) # Use ADAM updating scheme b_1 <- 0.9 b_2 <- 0.9 m_beta <- 0 nu_beta <- 0 m_D <- 0 nu_D <- 0 m_phi <- 0 nu_phi <- 0 # m_beta_old <- 0 # nu_beta_old <- 0 # m_D_old <- 0 # nu_D_old <- 0 # Give gamma_i (gamma_old), use SGD to optimize beta and D for (i in 1:max(as.integer(Dat$group))){ # reset parameter and moment beta_old_sgd <- beta_new_sgd tau_old_sgd <- tau_new_sgd phi_old_sgd <- phi_new_sgd X <- as.matrix(Dat[group==i, 3:12]) Y <- as.matrix(Dat[group==i, 1]) # ------------------------ ADAM SGD --------------------------------- # Updating 1st and 2nd moment score_beta <- g_h(X, Y, gamma_temp[i,], beta_old_sgd)$g_beta - 1/2*g_d_h_2(X, Y, gamma_temp[i,], beta_old_sgd, tau_old_sgd)$g_beta m_beta <- b_1*m_beta + (1-b_1)*score_beta nu_beta <- b_2*nu_beta + (1-b_2)*score_beta^2 steps_1 <- rate/sqrt(nu_beta/(1-b_1))*m_beta/(1-b_1) beta_new_sgd <- beta_old_sgd + steps_1 score_phi <- g_h(X, Y, gamma_temp[i,], beta_old_sgd)$g_phi - 1/2*g_d_h_2(X, Y, gamma_temp[i,], beta_old_sgd, tau_old_sgd)$g_phi m_phi <- b_1*m_phi + (1-b_1)*score_phi nu_phi <- b_2*nu_phi + (1-b_2)*score_phi^2 steps_2 <- rate/sqrt(nu_phi/(1-b_1))*m_phi/(1-b_1) phi_new_sgd <- phi_old_sgd + steps_2 score_tau <- g_h(X, Y, gamma_temp[i,], beta_old_sgd)$g_tau - 1/2*t(self_solve(d_h_2(X,Y,gamma_temp[i,],beta_old_sgd,tau_old_sgd))) + 1/2*D_old m_D <- b_1*m_D + (1-b_1)*score_tau nu_D <- b_2*nu_D + (1-b_2)*score_tau^2 steps_3 <- rate*(1/sqrt(nu_D/(1-b_2)))*m_D/(1-b_2) tau_new_sgd <- tau_old_sgd + steps_3 tau_new_sgd <- eigen(tau_new_sgd)$vectors %*% diag(pmax(eigen(tau_new_sgd)$values,0)) %*% t(eigen(tau_new_sgd)$vectors) # # ----------------------------- Momentum SGD ----------------------------------- # nu_beta_new <- 0.9*nu_beta_old + rate*score_beta # nu_D_new <- 0.9*nu_D_old + rate*score_psi # beta_new_sgd <- beta_old_sgd + nu_beta_new # D_new_sgd <- D_old_sgd + nu_D_new # if(any(is.nan(beta_new_sgd)==TRUE)) stop("NaN appears again !!!!!!!!!!!") cond1 <- all(abs(beta_old_sgd - beta_new_sgd) < 0.00001 ) cond2 <- all(abs(phi_old_sgd - phi_new_sgd) < 0.00001 ) cond3 <- all(abs(tau_old_sgd - tau_new_sgd) < 0.1 ) if(cond1 & cond2 & cond3) {cat("SGD converge","\n"); break} if(verbose){ cat(i,"th group descent (beta|phi)", paste0(round(beta_new_sgd,digit=4), collapse=" "), " | ", phi_new_sgd, "\n") # cat("biggest tau_ij", max(tau_new_sgd),"\n") } } # Give up iteratively reweighted algorithm..... # I need some time to really understand IRWLS # Use Newtown Rapson instead marker_inner <- 1 converge <- FALSE gamma_old <- matrix(rep(gamma_temp,each=100), ncol=5, byrow=FALSE) gamma_new <- aggregate(gamma_old, by=list(Dat$group), unique, simplify=TRUE) gamma_new <- as.matrix(gamma_new[,-1]) while(!converge){ gamma_old <- gamma_new working_value <- working_vec(Dat[,3:12], Dat[,1], gamma_old, beta_new_sgd, tau_new_sgd) # if(marker_inner > 100) { # factor <- marker_inner # }else{ # factor <- 101 # } # working_value <- working_vec(Dat[,3:12], Dat[,1], gamma_old, beta_new_sgd, tau_new_sgd) # All these loops seems stupid for (i in 1:length(unique(Dat$group))){ gamma_new[i,] <- gamma_old[i,] + self_solve(working_value$H[[i]]) %*% working_value$d_h[[i]] } if(all(abs(gamma_new - gamma_old) < 2)) {converge <- TRUE} if(any(is.nan(gamma_new)==TRUE)) {stop("NaN appears again !!!!!!!!!!")} marker_inner <- marker_inner + 1 cat(paste0(marker_inner, "th iteration: Newtown's Way ", max(abs(gamma_new - gamma_old))), "\n") cat("Difference between D and D_hat(frobenious): ", sum((D_old-var(gamma_new))^2), "\n") # if(marker_inner > 19) {cat("Fail to converge","\n"); break} } # check convergence beta_new <- beta_new_sgd phi_new <- phi_new_sgd D_new <- self_solve(tau_new_sgd) cond_1 <- (all(abs(beta_new - beta_old) < tolerance_1)) cond_2 <- (all(abs(phi_new - phi_old) < tolerance_2)) cond_3 <- (sum((D_new-D_old)^2) < tolerance_3) if ( cond_1 & cond_2 & cond_3) { terminate <- TRUE cat("Estimation Finished","\n") (beta_hat <- beta_new) (D_hat <- D_new) (phi_hat <- phi_new) } marker <- marker + 1 if(marker > 30) { warning("Fail to converge, estimation is not optimal") terminate <- TRUE (beta_hat <- as.numeric(beta_new)) (D_hat <- D_new) (phi_hat <- phi_new) } cat("--------",paste0(marker, "th iteration"),"--------","\n") } # gamma_hat <- matrix(rep(gamma_new,100), ncol=5, byrow=FALSE) eta_hat <- as.matrix(Dat[,3:7]) %*% beta_hat + apply(as.matrix(Dat[,8:12])*gamma_hat, 1, sum) mu_hat <- 1/(1+exp(eta_hat)^(-1)) y[y > 1-.Machine$double.eps] <- 1-.Machine$double.eps y[y < .Machine$double.eps] <- .Machine$double.eps mu_real <- y RMSE <- sqrt(sum((mu_hat - mu_real)^2)) dev.cur() png("trial .png", width=600, height=400, res=100, units="px") plot(density(y), type="l", lwd=2) lines(density(mu_hat, bw=density(y)$bw), lwd=2, lty=2, main="Line: Real vs Fitted", col="red") legend("topright", legend=c("Fitted","Real"), lty=c(2,1), lwd=2, col=c(2,1)) dev.off()
ba51873d4c5e5aad6fec0ed7905d90276679509f
c9cae6b31d52f7ce5720308a3a8b1193cb5aabcd
/01-barcelona-incidents-complaints-suggestions/functions/data_reader.R
c966a2625059aaa9f8a809efd978c2622806f397
[]
no_license
marcfresquet/opendata-rshiny
f306baa1c185344a25ad8135d503a8c24292bc85
95f4f02424f1359b41a41eb46bfda400fbbb72a1
refs/heads/master
2020-03-22T04:29:26.191908
2018-10-26T17:26:25
2018-10-26T17:26:25
139,502,019
0
0
null
2018-10-21T20:54:41
2018-07-02T22:43:06
R
UTF-8
R
false
false
640
r
data_reader.R
data_reader <- function(csv_path) { # Read data from a csv file df <- read_csv(csv_path) # Filter by target columns df <- df %>% select(target_columns) # Filter rows depending on year df <- df %>% filter(ANY_DATA_ALTA >= min_year) # Transform some columns to numeric values for (num_col in numeric_columns) { df[[num_col]] <- as.numeric(df[[num_col]]) } # Format date df$DATA_ALTA <- ymd(paste(df$ANY_DATA_ALTA, df$MES_DATA_ALTA, df$DIA_DATA_ALTA, sep="-")) df$DATA_TANCAMENT <- ymd(paste(df$ANY_DATA_TANCAMENT, df$MES_DATA_TANCAMENT, df$DIA_DATA_TANCAMENT, sep="-")) # Return DataFrame return(df) }
c3904994c8e6a6a15a901293b3791fb076731fce
12f3c27dfa6fda0a241c3974a7721acc4327fb09
/scripts/03_bed_movement_correction.R
bca48718435411c1932503ae79ef95d3d758592c
[ "MIT" ]
permissive
sbhattacharyay/nims
2793d7ff8cb3f1644347a029689cfbf573322b87
cf93aaa2fc1814c9dba1dad3768377456d1c637c
refs/heads/master
2023-06-24T14:54:12.391823
2023-06-20T09:27:45
2023-06-20T09:27:45
236,843,680
3
0
MIT
2021-04-14T14:46:29
2020-01-28T21:17:29
R
UTF-8
R
false
false
4,160
r
03_bed_movement_correction.R
#### Master Script 3: Bed Motion Correction and Collection of Multiple Imputations #### # # Shubhayu Bhattacharyay, Matthew Wang, Eshan Joshi # University of Cambridge # Johns Hopkins University # email address: sb2406@cam.ac.uk # ### Contents: # I. Initialization # II. Load imputed motion feature data and collect into one object # III. Determine feature space thresholds to correct bed motion # IV. Correct bed motion by identifying time points and patient indices during which SMA exceeds a literature-reviewed threshold ### I. Initialization # Denote number of imputations m <- 9 # Call requisite libraries library(tidyverse) library(readxl) # Define feature label names feature.labels <- c("BPW","FDE","HLF_h","HLF_l","MFR","SMA","WVL") ### II. Load imputed motion feature data and collect into one object compiledImputations <- vector(mode = "list") for (i in 1:m){ print(paste('Imputation no.',i,'started')) currPattern <- paste0('*',i,'.csv') currFileList <- list.files('../features/01_imputed_features/',pattern = currPattern) imputation.df <- data.frame(matrix(ncol = 12, nrow = 0)) for (j in 1:length(feature.labels)){ currFilePath <- file.path('../features/01_imputed_features',currFileList[j]) curr.df <- read.csv(currFilePath) %>% mutate(Feature = feature.labels[j]) %>% relocate(Feature, .after = TimeOfDay) imputation.df <- rbind(imputation.df,curr.df) print(paste('Feature no.',j,'complete')) } compiledImputations[[i]] <- imputation.df %>% arrange(UPI,RecordingIdx,Feature) print(paste('Imputation no.',i,'complete')) } ### III. Determine feature space thresholds to correct bed motion # Based on an SMA threshold for dynamic vs. static activity (https://doi.org/10.1016/j.medengphy.2013.06.005), # find corresponding thresholds for the other feature spaces. SMA.thresh <- .135 # Note we only use the first imputation to do so since, when testing, we found that the thresholds are the same for all imputations source('./functions/find_thresholds.R') feature.thresholds <- find_thresholds(compiledImputations[[1]],SMA.thresh) featRanges <- list( BPW.nm.range = c(0, feature.thresholds[1]), FDE.nm.range = c(feature.thresholds[2], 1.707), HLF_l.nm.range = c(0, feature.thresholds[3]), HLF_h.nm.range = c(0, feature.thresholds[4]), MFR.nm.range = c(feature.thresholds[5], 3.2), SMA.nm.range = c(0, feature.thresholds[6]), WVL.nm.range = c(0, feature.thresholds[7]) ) ### IV. Correct bed motion by identifying time points and patient indices during which SMA exceeds a literature-reviewed threshold: for (i in 1:length(compiledImputations)){ print(paste("Imputation No.",i,"Started")) curr.df <- compiledImputations[[i]] curr.SMA.df <-curr.df %>% filter(Feature == "SMA") bed.SMA.rows <- which(curr.SMA.df$Bed > SMA.thresh) for (j in 1:length(feature.labels)){ a <- featRanges[[j]][1] b <- featRanges[[j]][2] print(paste("Feature No.",j,"Started")) curr.feat.rows <- which(curr.df$Feature == feature.labels[j]) currFeat.df <- curr.df %>% filter(Feature == feature.labels[j]) currFeatChange.df <- currFeat.df[bed.SMA.rows,] if (feature.labels[j] %in% c("FDE","MFR")) { temp.mat <- currFeatChange.df[,c('LA','LE','LW','RA','RE','RW')] + currFeatChange.df[,'Bed'] temp.mat[temp.mat > b] <- runif(sum(temp.mat > b),a+(b-a)/2,b) } else { temp.mat <- currFeatChange.df[,c('LA','LE','LW','RA','RE','RW')] - currFeatChange.df[,'Bed'] temp.mat[temp.mat < 0] <- runif(sum(temp.mat < 0),0,b/2) } currFeatChange.df[,c('LA','LE','LW','RA','RE','RW')] <- temp.mat currFeat.df[bed.SMA.rows,] <- currFeatChange.df curr.df[curr.feat.rows,] <- currFeat.df print(paste("Feature No.",j,"Complete")) } curr.df <- curr.df %>% mutate(ImputationNo = i) %>% relocate(ImputationNo, UPI, RecordingIdx, HoursFromICUAdmission, TimeOfDay, Feature) write.csv(currImp,paste0('../features/02_bed_corrected_imputed_features/bed_corrected_imputation_',i,'.csv'),row.names = F) print(paste("Imputation No.",i,"Complete")) }
8954532b5865f5038f1ba3d609604f5bd06e173b
bb191ee8e08341188e48c62f1fc30d1ce322b470
/cachematrix.R
c08c23a9342e7a698f233d1d8a05fdd5932a25aa
[]
no_license
jvassy/ProgrammingAssignment2
75005b8f2cc512ca681b08a485a5bc886f1d02fa
8839b84ac0b4a33a809e11260c163beeb46e1bea
refs/heads/master
2021-01-18T10:32:48.056734
2014-07-20T21:36:36
2014-07-20T21:36:36
null
0
0
null
null
null
null
UTF-8
R
false
false
1,231
r
cachematrix.R
## The combination of makeCacheMatrix and cacheSolve takes a matrix 'x' and reports its inverse 's', first determining whether 's' is already cached. If it is, it report 's' without calculating it again. ## makeCacheMatrix creates a list of 4 functions that can be read by cacheSolve to determine whether the inverse of a matrix 'x' has already been calculated and cached ('s') makeCacheMatrix <- function(x = matrix()) { s <- NULL set <- function(y) { x <<- y s <<- NULL } get <- function() x setSolve <- function(solve) s <<- solve getSolve <- function() s list(set = set, get = get, setSolve = setSolve, getSolve = getSolve) } cacheSolve computes the inverse of the special "matrix" returned by makeCacheMatrix above. If the inverse has already been calculated ('s'), then cacheSolve retrieves it from the cache. If not, it calculates and reports the inverse. cacheSolve <- function(y, ...) { s <- y$getSolve() if(!is.null(s)) { message("getting cached data") return(s) } data <- y$get() s <- solve(x) y$setSolve(s) s }
8563335a3c52843abd52657bdda0b56aa1ef84cc
af074201b063ceacbf57f1dd8da0fa95f8513e6f
/mayer1pre.R
439c1d3f94074fe6e0a4281b28ff2f8f3879438b
[]
no_license
cswasey16/thesis
d3244172460f5f10f3191196b09c92b3f6c01f55
4193e5fae5e1ab9816a202a981d4a0726945d711
refs/heads/master
2021-01-10T13:16:05.677161
2016-04-20T02:02:34
2016-04-20T02:02:34
53,007,128
0
0
null
null
null
null
UTF-8
R
false
false
867
r
mayer1pre.R
#mayer table 2 library(stargazer) #percents across just D anes_prevoter <- subset(anes_2008, anes_2008$prevoter==TRUE) dummiespre <- dummy(anes_prevoter$subbucketsfac, sep="", fun= as.numeric, drop=FALSE) weightspre <- anes_prevoter$V080102 countspre <- colSums(dummiespre) dumweightpre <- sapply(1:ncol(dummiespre),function(x) dummiespre[,x] * weightspre ) sumspre <- colSums(dumweightpre) totalpre <- sum(sumspre) range <- c("-100:-91", "-90:-81", "-80:-71", " -70:-61", "-60:-51", "-50:-41", "-40:-3","-30:-21", "-20:-16", "15:-11", "-10:-6", "-5:-1", "0", "1:5", "6:10", "11:15" ,"16:20", "21:30", "31:40", "41:50", "51:60", "61:70", "71:80","81:90", "91:100", "NA") wpctpre <- (sumspre/totalpre)*100 percentpre <- round(wpctpre, digits=2) names(percent) <- range mayer1pre <- cbind(range, countspre, percentpre) stargazer(mayer1pre, rownames=FALSE)
fe785c57420c930a1371ace9c9f16174dbb7bd7a
e3bb23c8c3be4c7edcb8dd5a3571c8f0dd567490
/Anomoly_Detection_R.R
02a3cf6a35e663ff09f738d1abc6c273036affd8
[]
no_license
snersuai/ML_Stack
4b3e8bfa896dca4042c2574d7202618f3ca3ce15
820d1e4eba1c53ad1ee4719a2262a73904fb2aa1
refs/heads/master
2021-10-16T15:49:35.949645
2018-12-28T15:45:19
2018-12-28T15:45:19
163,424,937
0
0
null
null
null
null
UTF-8
R
false
false
1,927
r
Anomoly_Detection_R.R
install.packages("devtools") devtools::install_github("twitter/AnomalyDetection") library(AnomalyDetection) help(AnomalyDetectionTs) library(AnomalyDetection) ??AnomalyDetectionTs help(AnomalyDetectionVec) AnomalyDetectionVec(raw_data[,2], max_anoms=0.02, period=1440, direction='both', only_last=FALSE, plot=TRUE) raw_data <- read.csv(file="C:/Users/Administrator/Documents/ML_Analysis/heart_activity_fitbit1.csv", header=TRUE, sep=",",stringsAsFactors = FALSE) head(raw_data,100) raw_data$caloriesOut_OR = as.numeric(raw_data$caloriesOut_OR) raw_data$caloriesOut_FB = as.numeric(raw_data$caloriesOut_FB) raw_data$caloriesOut_Cardio = as.numeric(raw_data$caloriesOut_Cardio) raw_data$caloriesOut_Peak = as.numeric(raw_data$caloriesOut_Peak) raw_data$dateTime = as.POSIXct(raw_data$dateTime) raw_data[is.na(raw_data)] <- 0 AnomalyDetectionTs(raw_data[c("dateTime","caloriesOut_Peak")], xlabel = "Heart in Peak Mode" , ylabel = "Calories" , na.rm=T, max_anoms=0.1, direction='both', plot=TRUE)$plot AnomalyDetectionTs(raw_data[c("dateTime","caloriesOut_FB")], xlabel = "Heart in Fat burn Mode" , ylabel = "Calories", max_anoms=0.1, direction='both', plot=TRUE)$plot AnomalyDetectionTs(raw_data[c("dateTime","caloriesOut_Cardio")], xlabel = "Heart in Cardio Mode", ylabel = "Calories", max_anoms=0.1, direction='both', plot=TRUE)$plot AnomalyDetectionTs(raw_data[c("dateTime","caloriesOut_OR")], "Heart in Out of Range Mode", ylabel = "Calories", max_anoms=0.1, direction='both', plot=TRUE)$plot res$plot df <- read.csv(url("https://raw.githubusercontent.com/ieatbaozi/R-Practicing/master/example.csv"),header = TRUE,stringsAsFactors = FALSE) df$DateTime <- as.POSIXct(df$DateTime) library(AnomalyDetection) ADtest <- AnomalyDetectionTs(df, max_anoms=0.1, direction='both', plot=TRUE) ADtest$plot
6345c34cf9ac1d51d1f06a3f36b3f53be826a9ef
1f94d932cd3526dadff7e47eeb32c14f92aeba41
/R/altadata.R
5288b1d286b9cf1504f8e74bbe65e18e64714272
[]
no_license
altabering/altadata-r
8218afae2367c3c681c864a6aded698b61b66df5
d057bddbb0b52ce15a672e5722bd663f1d013bf7
refs/heads/master
2023-02-09T07:33:13.638349
2020-12-29T14:50:17
2020-12-29T14:50:17
313,133,338
3
0
null
null
null
null
UTF-8
R
false
false
12,161
r
altadata.R
#' Initialize retrieve data process #' #' @param product_code data product code #' @param limit number of rows you want to retrieve #' #' @return Nothing just set the initial parameters #' @export #' #' @examples #' \dontrun{ #' altadata.get_data("co_10_jhucs_03", limit = 50) #' } altadata.get_data <- function(product_code, limit) { altadata.check_parameters(product_code, "product_code", parameter_type = "character") data_api_url <- paste( getOption("aldatata.data_api_base_url"), product_code, "/?format=json", "&api_key=", getOption("aldatata.api_key"), sep ="" ) options(aldatata.data_api_url = data_api_url) options(aldatata.condition_text = "") if(missing(limit)){ options(aldatata.data_limit = NULL) } else { altadata.check_parameters(limit, "limit", parameter_type = "numeric") options(aldatata.data_limit = limit) } } #' Fetch data with configurations given before #' #' @return dataframe object #' @export #' #' @examples #' \dontrun{ #' aldatata.api_key('YOUR_API_KEY') #' altadata.get_data("co_10_jhucs_03", limit = 50) #' altadata.load() #' } altadata.load <- function() { data <- c() page <- 1 total_size <- 0 response_length <- 1 data_limit <- getOption("aldatata.data_limit") while (response_length > 0) { request_url <- altadata.query_builder(page) tryCatch({ df <- altadata.request(request_url) }, error = function(e){ print(e) }) data = rbind(data, df) response_length <- length(df) total_size <- length(data) if(!is.null(data_limit)){ if(total_size > data_limit){ break } } page <- page + 1 } if(!is.null(data_limit)){ data <- utils::head(data, data_limit) } return(data) } #' Get customer subscription info #' #' @return dataframe object #' @export #' #' @examples #' \dontrun{ #' altadata.list_subscription() #' } altadata.list_subscription <- function() { subscription_api_url <- getOption("aldatata.subscription_api_url") subscription_info <- altadata.request(subscription_api_url) return(subscription_info) } #' Get data header as a vector #' #' @param product_code data product code #' #' @return vector object #' @export #' #' @examples #' \dontrun{ #' aldatata.api_key('YOUR_API_KEY') #' altadata.get_header("co_10_jhucs_03") #' } altadata.get_header <- function(product_code) { altadata.check_parameters(product_code, "product_code", parameter_type = "character") request_url <- paste( getOption("aldatata.data_api_base_url"), product_code, "/?format=json", "&api_key=", getOption("aldatata.api_key"), "&page=1", sep ="" ) json_response <- altadata.request(request_url) header_info <- names(json_response) return(header_info) } #' Select specific columns in the retrieve data process #' #' @param selected_columns list of columns to select #' #' @return Nothing just set the select parameters #' @export #' #' @examples #' \dontrun{ #' altadata.select(c("reported_date", "province_state", "mortality_rate")) #' } altadata.select <- function(selected_columns) { altadata.check_parameters(selected_columns, "selected_columns", parameter_type = "vector") selected_columns_text <- paste(selected_columns, collapse=",") condition_text <- paste( getOption("aldatata.condition_text"), "&columns=", selected_columns_text, sep = "" ) options(aldatata.condition_text = condition_text) } #' Sort data by given column and method in the retrieve data process #' #' @param order_column column to which the order is applied #' @param order_method sorting method. Possible values: asc or desc #' #' @return Nothing just set the sort parameters #' @export #' #' @examples #' \dontrun{ #' altadata.sort("province_state", order_method = "desc") #' } altadata.sort <- function(order_column, order_method = "asc") { altadata.check_parameters(order_column, "order_column", parameter_type = "character") altadata.check_parameters(order_method, "order_method", parameter_type = "character") if(!(order_method %in% c("asc", "desc"))){ stop("order_method parameter must be 'asc' or 'desc'") } condition_text <- paste( getOption("aldatata.condition_text"), "&order_by=", order_column, "_", toString(order_method), sep = "" ) options(aldatata.condition_text = condition_text) } #' Equal condition by given column and value in the retrieve data process #' #' @param condition_column column to which the condition will be applied #' @param condition_value value to use with condition #' #' @return Nothing just set the equal condition parameters #' @export #' #' @examples #' \dontrun{ #' altadata.equal("province_state", "Alabama") #' } altadata.equal <- function(condition_column, condition_value) { altadata.check_parameters(condition_column, "condition_column", parameter_type = "character") condition_text <- paste( getOption("aldatata.condition_text"), "&", condition_column, "_eq=", toString(condition_value), sep = "" ) options(aldatata.condition_text = condition_text) } #' Not equal condition by given column and value #' #' @param condition_column column to which the condition will be applied #' @param condition_value value to use with condition #' #' @return Nothing just set the not equal condition parameters #' @export #' #' @examples #' \dontrun{ #' altadata.not_equal("province_state", "Utah") #' } altadata.not_equal <- function(condition_column, condition_value) { altadata.check_parameters(condition_column, "condition_column", parameter_type = "character") condition_text <- paste( getOption("aldatata.condition_text"), "&", condition_column, "_neq=", toString(condition_value), sep = "" ) options(aldatata.condition_text = condition_text) } #' Greater than condition by given column and value #' #' @param condition_column column to which the condition will be applied #' @param condition_value value to use with condition #' #' @return Nothing just set the greater than condition parameters #' @export #' #' @examples #' \dontrun{ #' altadata.greater_than("mortality_rate", 2) #' } altadata.greater_than <- function(condition_column, condition_value) { altadata.check_parameters(condition_column, "condition_column", parameter_type = "character") condition_text <- paste( getOption("aldatata.condition_text"), "&", condition_column, "_gt=", toString(condition_value), sep = "" ) options(aldatata.condition_text = condition_text) } #' Greater than equal condition by given column and value #' #' @param condition_column column to which the condition will be applied #' @param condition_value value to use with condition #' #' @return Nothing just set the greater than equal condition parameters #' @export #' #' @examples #' \dontrun{ #' altadata.greater_than_equal("mortality_rate", 3) #' } altadata.greater_than_equal <- function(condition_column, condition_value) { altadata.check_parameters(condition_column, "condition_column", parameter_type = "character") condition_text <- paste( getOption("aldatata.condition_text"), "&", condition_column, "_gte=", toString(condition_value), sep = "" ) options(aldatata.condition_text = condition_text) } #' Less than condition by given column and value #' #' @param condition_column column to which the condition will be applied #' @param condition_value value to use with condition #' #' @return Nothing just set the less than condition parameters #' @export #' #' @examples #' \dontrun{ #' altadata.less_than("mortality_rate", 2) #' } altadata.less_than <- function(condition_column, condition_value) { altadata.check_parameters(condition_column, "condition_column", parameter_type = "character") condition_text <- paste( getOption("aldatata.condition_text"), "&", condition_column, "_lt=", toString(condition_value), sep = "" ) options(aldatata.condition_text = condition_text) } #' Less than equal condition by given column and value #' #' @param condition_column column to which the condition will be applied #' @param condition_value value to use with condition #' #' @return Nothing just set the less than equal condition parameters #' @export #' #' @examples #' \dontrun{ #' altadata.less_than_equal("mortality_rate", 3) #' } altadata.less_than_equal <- function(condition_column, condition_value) { altadata.check_parameters(condition_column, "condition_column", parameter_type = "character") condition_text <- paste( getOption("aldatata.condition_text"), "&", condition_column, "_lte=", toString(condition_value), sep = "" ) options(aldatata.condition_text = condition_text) } #' In condition by given column and value list #' #' @param condition_column column to which the condition will be applied #' @param condition_value value to use with condition #' #' @return Nothing just set the in condition parameters #' @export #' #' @examples #' \dontrun{ #' altadata.condition_in("province_state", c("Utah", "Alabama")) #' } altadata.condition_in <- function(condition_column, condition_value) { altadata.check_parameters(condition_column, "condition_column", parameter_type = "character") altadata.check_parameters(condition_value, "condition_value", parameter_type = "vector") condition_value_text <- paste(condition_value, collapse=",") condition_text <- paste( getOption("aldatata.condition_text"), "&", condition_column, "_in=", condition_value_text, sep = "" ) options(aldatata.condition_text = condition_text) } #' Not in condition by given column and value list #' #' @param condition_column column to which the condition will be applied #' @param condition_value value to use with condition #' #' @return Nothing just set the not in condition parameters #' @export #' #' @examples #' \dontrun{ #' altadata.condition_not_in("province_state", c("Utah", "Alabama")) #' } altadata.condition_not_in <- function(condition_column, condition_value) { altadata.check_parameters(condition_column, "condition_column", parameter_type = "character") altadata.check_parameters(condition_value, "condition_value", parameter_type = "vector") condition_value_text <- paste(condition_value, collapse=",") condition_text <- paste( getOption("aldatata.condition_text"), "&", condition_column, "_notin=", condition_value_text, sep = "" ) options(aldatata.condition_text = condition_text) } ## Helper functions # Check parameter types by given inputs altadata.check_parameters <- function(paramater, paramater_name, parameter_type) { if (parameter_type == "vector") { if (class(paramater) != "character") { stop(paste(paramater_name, " parameter must be ", parameter_type, sep="")) } else if (length(paramater) < 1) { stop(paste(paramater_name, " parameter must contain at least one value", sep="")) } } else if (parameter_type == "character") { if (class(paramater) != "character") { stop(paste(paramater_name, " parameter must be ", parameter_type, sep="")) } } else if (parameter_type == "numeric") { if (class(paramater) != "numeric") { stop(paste(paramater_name, " parameter must be ", parameter_type, sep="")) } } } # Request API and parse the result altadata.request <- function(request_url) { response <- httr::GET(request_url) if(httr::status_code(response) != 200){ stop(httr::content(response, as = "text"), call. = FALSE) } json_response <- jsonlite::fromJSON(rawToChar(response$content)) return(json_response) } # Create API request url altadata.query_builder <- function(page) { condition_text <- getOption("aldatata.condition_text") if(condition_text == ""){ request_url <- paste( getOption("aldatata.data_api_url"), "&page=", toString(page), sep ="" ) } else { request_url <- paste( getOption("aldatata.data_api_url"), condition_text, "&page=", toString(page), sep ="" ) } return(request_url) }
953c6a8fd2ff4b85a3e8ad59d8775418381d9f03
d922758e6c9ac51cdbcfe25ff26a114d1635250c
/man/update.Rd
038cfb049024280cd37be8632f717035e3b551e2
[]
no_license
jeffreyhanson/raptr
c7fa50617b080a72f8fe97c9534a664cc02d91b9
059a1abc9c2e2f071ce2a7d3596bfd1189441d92
refs/heads/master
2023-08-31T22:13:12.446845
2023-03-14T03:26:50
2023-03-14T03:26:50
44,244,406
5
0
null
2023-08-21T23:56:38
2015-10-14T11:51:44
R
UTF-8
R
false
true
6,325
rd
update.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/generics.R, R/GurobiOpts.R, R/ManualOpts.R, % R/RapData.R, R/RapReliableOpts.R, R/RapUnreliableOpts.R, R/RapSolved.R \name{update} \alias{update} \alias{update.GurobiOpts} \alias{update.ManualOpts} \alias{update.RapData} \alias{update.RapReliableOpts} \alias{update.RapUnreliableOpts} \alias{update.RapUnsolOrSol} \title{Update object} \usage{ \method{update}{GurobiOpts}( object, Threads = NULL, MIPGap = NULL, Method = NULL, Presolve = NULL, TimeLimit = NULL, NumberSolutions = NULL, MultipleSolutionsMethod = NULL, NumericFocus = NULL, ... ) \method{update}{ManualOpts}(object, NumberSolutions = NULL, ...) \method{update}{RapData}( object, species = NULL, space = NULL, name = NULL, amount.target = NULL, space.target = NULL, pu = NULL, cost = NULL, status = NULL, ... ) \method{update}{RapReliableOpts}(object, BLM = NULL, failure.multiplier = NULL, max.r.level = NULL, ...) \method{update}{RapUnreliableOpts}(object, BLM = NULL, ...) \method{update}{RapUnsolOrSol}(object, ..., formulation = NULL, solve = TRUE) } \arguments{ \item{object}{\code{\link[=GurobiOpts]{GurobiOpts()}}, \code{\link[=RapUnreliableOpts]{RapUnreliableOpts()}}, \code{\link[=RapReliableOpts]{RapReliableOpts()}}, \code{\link[=RapData]{RapData()}}, \code{\link[=RapUnsolved]{RapUnsolved()}}, or \code{\link[=RapSolved]{RapSolved()}} object.} \item{Threads}{\code{integer} number of cores to use for processing.} \item{MIPGap}{\code{numeric} MIP gap specifying minimum solution quality.} \item{Method}{\code{integer} Algorithm to use for solving model.} \item{Presolve}{\code{integer} code for level of computation in presolve.} \item{TimeLimit}{\code{integer} number of seconds to allow for solving.} \item{NumberSolutions}{\code{integer} number of solutions to generate.} \item{MultipleSolutionsMethod}{\code{integer} name of method to obtain multiple solutions (used when \code{NumberSolutions} is greater than one). Available options are \code{"benders.cuts"}, \code{"solution.pool.0"}, \code{"solution.pool.1"}, and \code{"solution.pool.2"}. The \code{"benders.cuts"} method produces a set of distinct solutions that are all within the optimality gap. The \code{"solution.pool.0"} method returns all solutions identified whilst trying to find a solution that is within the specified optimality gap. The \code{"solution.pool.1"} method finds one solution within the optimality gap and a number of additional solutions that are of any level of quality (such that the total number of solutions is equal to \code{number_solutions}). The \code{"solution.pool.2"} finds a specified number of solutions that are nearest to optimality. The search pool methods correspond to the parameters used by the Gurobi software suite (see \url{https://www.gurobi.com/documentation/8.0/refman/poolsearchmode.html#parameter:PoolSearchMode}). Defaults to \code{"benders.cuts"}.} \item{NumericFocus}{\code{integer} how much effort should Gurobi focus on addressing numerical issues? Defaults to \code{0L} such that minimal effort is spent to reduce run time.} \item{...}{parameters passed to \code{\link[=update.RapReliableOpts]{update.RapReliableOpts()}}, \code{\link[=update.RapUnreliableOpts]{update.RapUnreliableOpts()}}, or \code{\link[=update.RapData]{update.RapData()}}.} \item{species}{\code{integer} or \code{character} denoting species for which targets or name should be updated.} \item{space}{\code{integer} denoting space for which targets should be updated.} \item{name}{\code{character} to rename species.} \item{amount.target}{\code{numeric} vector for new area targets (\%) for the specified species.} \item{space.target}{\code{numeric} vector for new attribute space targets (\%) for the specified species and attribute spaces.} \item{pu}{\code{integer} planning unit indices that need to be updated.} \item{cost}{\code{numeric} new costs for specified planning units.} \item{status}{\code{integer} new statuses for specified planning units.} \item{BLM}{\code{numeric} boundary length modifier.} \item{failure.multiplier}{\code{numeric} multiplier for failure planning unit.} \item{max.r.level}{\code{numeric} maximum R failure level for approximation.} \item{formulation}{\code{character} indicating new problem formulation to use. This can be either "unreliable" or "reliable". The default is \code{NULL} so that formulation in \code{object} is used.} \item{solve}{\code{logical} should the problem be solved? This argument is only valid for \code{\link[=RapUnsolved]{RapUnsolved()}} and \code{\link[=RapSolved]{RapSolved()}} objects. Defaults to \code{TRUE}.} } \value{ \linkS4class{GurobiOpts}, \linkS4class{RapUnreliableOpts}, \linkS4class{RapReliableOpts}, \linkS4class{RapData}, \linkS4class{RapUnsolved}, or \linkS4class{RapSolved} object depending on argument to \code{x}. } \description{ This function updates parameters or data stored in an existing \code{\link[=GurobiOpts]{GurobiOpts()}}, \code{\link[=RapUnreliableOpts]{RapUnreliableOpts()}}, \code{\link[=RapReliableOpts]{RapReliableOpts()}}, \code{\link[=RapData]{RapData()}}, \code{\link[=RapUnsolved]{RapUnsolved()}}, or \code{\link[=RapSolved]{RapSolved()}} object. } \examples{ \dontrun{ # load data data(sim_ru, sim_rs) # GurobiOpts x <- GurobiOpts(MIPGap = 0.7) y <- update(x, MIPGap = 0.1) print(x) print(y) # RapUnreliableOpts x <- RapUnreliableOpts(BLM = 10) y <- update(x, BLM = 2) print(x) print(y) # RapReliableOpts x <- RapReliableOpts(failure.multiplier = 2) y <- update(x, failure.multiplier = 4) print(x) print(y) # RapData x <- sim_ru@data y <- update(x, space.target = c(0.4, 0.7, 0.1)) print(space.target(x)) print(space.target(y)) ## RapUnsolved x <- sim_ru y <- update(x, amount.target = c(0.1, 0.2, 0.3), BLM = 3, solve = FALSE) print(x@opts@BLM); print(amount.target(x)) print(y@opts@BLM); print(space.target(y)) ## RapSolved x <- sim_rs y <- update(x, space.target = c(0.4, 0.6, 0.9), BLM = 100, Presolve = 1L, solve = FALSE) print(x@opts@BLM); print(amount.target(x)) print(y@opts@BLM); print(space.target(y)) } } \seealso{ \linkS4class{GurobiOpts}, \linkS4class{RapUnreliableOpts}, \linkS4class{RapReliableOpts}, \linkS4class{RapData}, \linkS4class{RapUnsolved}, \linkS4class{RapSolved}. }
14db5f8d1387f872ff224de2948d5449c2b42ee4
b77b91dd5ee0f13a73c6225fabc7e588b953842b
/ev_prep_scripts/ds_dn_3_paml_parse.R
c175f91ffccad01cf3c2266197dcac794a0c2b13
[ "MIT" ]
permissive
ksamuk/gene_flow_linkage
a1264979e28b61f09808f864d5fa6c75568147b0
6182c3d591a362407e624b3ba87403a307315f2d
refs/heads/master
2021-01-18T09:18:02.904770
2017-04-02T16:51:40
2017-04-02T16:51:40
47,041,898
1
0
null
null
null
null
UTF-8
R
false
false
5,353
r
ds_dn_3_paml_parse.R
########parse paml file to data frame ########new approach (uses raw paml files to avoid shell script shenanigans) library(ape) library(biomaRt) library(dplyr) library(magrittr) home.dir<-"~/Documents/Science/Projects/Ph.D./Genome Meta Analysis/ev_prep_scripts/paml_analysis" #home.dir<-"~/review/analysis/gma/ev_prep_scripts/paml_analysis" #the output directory (window evs) out.dir<-"~/Documents/Science/Projects/Ph.D./Genome Meta Analysis/evs/window" #contains pre-processed paml output (the dn and ds tree lines+first file of paml file) paml.output.dir<-file.path(home.dir,"alignments_all") setwd(paml.output.dir) #dat file list file.list<-list.files() #prep output data frame gene.id<-vector(mode="character",length=length(file.list)) ds<-vector(mode="numeric",length=length(file.list)) dn<-vector(mode="numeric",length=length(file.list)) num.sites<-vector(mode="numeric",length=length(file.list)) gacu.dnds<-data.frame(gene.id,ds,dn,num.sites,stringsAsFactors=FALSE) #loop through files and extract ds info for (i in 1:length(file.list)){ #read in file file.con<-file(file.list[i]) file.lines<-readLines(file.con) closeAllConnections() #find the "ds tree" line ds.tree.line<-grep("dS tree",file.lines) #find the "After deleting gaps. " line (for filtering bad alignments) gaps.line<-grep("After deleting gaps. ",file.lines) num.sites<-as.numeric(unlist(strsplit(file.lines[gaps.line],split=" "))[4]) #edge case where there are no alignment gaps if(length(num.sites)==0){ num.sites<-as.numeric(unlist(strsplit(file.lines[1],split=" "))) num.sites<-num.sites[length(num.sites)] } gacu.dnds$num.sites[i]<-num.sites #find gene name from the file name gene.name<-sapply(strsplit(file.list[i],split=".cml"),function(x)x[1]) if(gene.name==""){ print(file.list[i]," has no gene name?!") } gacu.dnds$gene.id[i]<-gene.name #if no ds tree or target gene, skip file #uncomment for commentary on the quality of your data files if(length(ds.tree.line)==0){ #print(paste(file.list[i],"is missing dS tree.")) gacu.dnds$ds[i]<-NA gacu.dnds$dn[i]<-NA }else if(length(grep(paste(gene.name,":",sep=""),file.lines))==0){ #print(paste(file.list[i],"has a dS tree, but is missing the target gene.")) gacu.dnds$ds[i]<-NA gacu.dnds$dn[i]<-NA }else{ #make the tree(s) ds.tree<-read.tree(text=file.lines[ds.tree.line+1]) dn.tree<-read.tree(text=file.lines[ds.tree.line+3]) #if there is no dn or ds value, assign NA, otherwise grab value from tree if(is.null(ds.tree$edge.length[which.edge(ds.tree,gene.name)])){ gacu.dnds$ds[i]<-NA }else{ gacu.dnds$ds[i]<-ds.tree$edge.length[which.edge(ds.tree,gene.name)] } if(is.null(dn.tree$edge.length[which.edge(dn.tree,gene.name)])){ gacu.dnds$dn[i]<-NA }else{ gacu.dnds$dn[i]<-dn.tree$edge.length[which.edge(dn.tree,gene.name)] } #progress bar if (i%%1000==0){ cat(i,"...",sep="") } } } #remove lines containing NAs gacu.dnds<-gacu.dnds[complete.cases(gacu.dnds),] ##match gene.ids to genomic coordinates #initialize gacu ensembl ensembl <- useMart("ensembl",dataset="gaculeatus_gene_ensembl") #the attributes of interest attributes.feat<-c("ensembl_gene_id", "ensembl_peptide_id", "start_position", "end_position", "chromosome_name") #query ensembl for coordinates coords<-getBM(attributes=attributes.feat,values=gacu.dnds$gene.id,filters=c("ensembl_peptide_id"),mart = ensembl) #for easier viewing coords<-arrange(coords,ensembl_peptide_id) #match in ds/dn values (could be a left_join, whateves) ds.out<-gacu.dnds$ds[match(coords$ensembl_peptide_id,gacu.dnds$gene.id)] dn.out<-gacu.dnds$dn[match(coords$ensembl_peptide_id,gacu.dnds$gene.id)] #build prelim output file gacu.out<-data.frame(gene.id=coords$ensembl_gene_id, peptide.id=coords$ensembl_peptide_id, lg=coords$chromosome_name, pos1=coords$start_position, pos2=coords$end_position, ds=ds.out, dn=dn.out, sites=num.sites) #if there are multiple peptides from a single gene (~30% of data), take the mean of the ds values out.means<-gacu.out%>% group_by(gene.id)%>% summarise(mean(ds),mean(dn)) out.means<-data.frame(out.means) names(out.means)<-c("gene.id","ds.mean","dn.mean") #join in means with rest of data gacu.out.2<-left_join(gacu.out,out.means) #remove scaffolds gacu.out.2<-gacu.out[grep("group",gacu.out$lg),] #convert lg to numeric gacu.out.2$lg<-as.numeric(as.roman(sapply(strsplit(as.character(gacu.out.2$lg),"group"),function(x)x[2]))) #arrange gacu.out.2<-arrange(gacu.out.2,lg) #FILTERING: no dn/dsvalues above 2 (from literature), no genes with <=100 sites gacu.out.2<-gacu.out.2[gacu.out.2$sites>=100,] gacu.out.2<-gacu.out.2[gacu.out.2$dn<=2,] gacu.out.2<-gacu.out.2[gacu.out.2$ds<=2,] #prep output files (strip duplicates) gacu.out.ds<-gacu.out.2[,3:6] gacu.out.ds<-unique(gacu.out.ds) gacu.out.dn<-gacu.out.2[,c(3:5,7)] gacu.out.dn<-unique(gacu.out.dn) #write to file setwd(out.dir) write.table(gacu.out.ds,file="ds.txt",row.names=FALSE) write.table(gacu.out.dn,file="dn.txt",row.names=FALSE)
8fe7ae1f9f946dbb648b5dd968a5d3de1ee1fc60
6f10f8643a261334c1e1b2b534c6e433b1100a53
/plot4.R
90122a6694a948da002203ae16f87b4f34f9fe5d
[]
no_license
JustinAbbottcs/ExData_Plotting1
772d4dcf449eb516f10ffcd174f6ce8107d599b0
fe1672482773d7b62e78d1839b65998b4e8620ec
refs/heads/master
2020-09-15T16:25:34.019867
2019-11-24T00:39:26
2019-11-24T00:39:26
223,502,775
0
0
null
2019-11-22T23:27:30
2019-11-22T23:27:29
null
UTF-8
R
false
false
1,677
r
plot4.R
library(tidyverse) ##Read in data from local file, limiting rows read to preserve memory hpc_data <- read.table("../household_power_consumption.txt", header = TRUE, sep = ";", nrows = 500000) ##Data cleaning hpc_data <- hpc_data %>% mutate(Date=as.Date(hpc_data$Date, "%d/%m/%Y")) %>% mutate(Time=as.POSIXct(strptime(paste(Date, Time), format = "%Y-%m-%d %H:%M:%S"))) %>% filter(Date == "2007-02-01" | Date == "2007-02-02") hpc_data[3:9] <- sapply(hpc_data[3:9], function(x) as.numeric(as.character(x))) #Initializes 4 graphs and subsequently adds to each to write to pdf png("plot4.png") par(mfcol = c(2, 2)) with(hpc_data, { #1 plot(y = hpc_data$Global_active_power, x = hpc_data$Time, xlab = "", ylab = "Global Active Power", type = "l") #2 with(hpc_data, plot(Time, Sub_metering_1, ylim = c(0,40), type = "l", xlab = "", ylab = "Energy sub metering")) with(hpc_data, lines(Time, Sub_metering_2, col = "red")) with(hpc_data, lines(Time, Sub_metering_3, col = "blue")) with(hpc_data, legend("topright", col = c("black", "red", "blue"), lty = c(1, 1, 1), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))) #3 plot(y = hpc_data$Voltage, x = hpc_data$Time, xlab = "datetime", ylab = "Voltage", type = "l") #4 plot(y = hpc_data$Global_reactive_power, x = hpc_data$Time, xlab = "datetime", ylab = "Global_reactive_power", type = "l") }) #Write to and close png file dev.off()
c4e1458347eaf44cbf3d260a64cabeeef2a4fec8
410813638cfe015b49c2396634207545445ff35b
/str/BB Calibrado/SimPiPS_BB_v1 .r
aab77504f9186756d0389d016a28f27695517887
[]
no_license
stalynGuerrero/InferenciaBootstrapBayesiana
dda9335299d910b0f94f4139cc93f6fde1086c4f
ed208199546820edc7df7b0c0481aab89dbd09a4
refs/heads/master
2021-01-19T04:58:01.793901
2017-08-04T13:56:26
2017-08-04T13:56:26
60,229,312
0
0
null
null
null
null
UTF-8
R
false
false
4,368
r
SimPiPS_BB_v1 .r
############################################################################# rm(list = ls()) options(digits = 10) dirPath<-"/home/sguerrero/Documentos/Dropbox/articulos/Artículo Proporción bayesiana/InferenciaBootstrapBayesiana/Algoritmo prueba/InferenciaBootstrapBayesiana" setwd(dirPath) ############################################################################# require(TeachingSampling) require(hdrcde) require(cubature) source("str/Funciones Comunes/SimMASGamma.r") source("str/Funciones Comunes/VeroBootPiPS.r") source("str/Funciones Comunes/Medida.Calidad.r") ############################################################################# # Definir función para la simulación SimHT <- function(Pob, n, apriori = "unif", k) { ty<-sum(Pob[,"Y"]) sel<-S.piPS(n,Pob[,"X"]) pik<-sel[,"Pik.s"] sel<-sel[,"samp"] ys<-Pob[sel,"Y"] Vero <- VeroBootPiPS(ys,pik,ty) if (apriori == "gamma") { Aprio = dgamma(Vero$x, ty ^ 2 / (k), ty / (k)) nombre<-paste0("Vero,",k,apriori) } if (apriori == "unif") { Aprio = dunif(Vero$x, min = 0, max = 10 ^ 10) nombre<-paste0("Vero,",apriori) } if (apriori == "normal") { Aprio = dnorm(Vero$x, mean = ty, sd = k) nombre<-paste0("Vero,",k,apriori) } # par(mfrow=c(1,2)) # plot(Vero,type = "l",main = nombre) # abline(v=ty) post = Vero$y * Aprio if(sum(post)==0){post=dunif(Vero$x, min = min(Vero$x), max = max(Vero$x))} Vero$x <- Vero$x[post>0] Vero$y <- Vero$y[post>0] post <- post[post>0] Fxpost <- approxfun(Vero$x,post) q0.001<-as.numeric(quantile(Vero$x,probs = 0.05)) q0.999<-as.numeric(quantile(Vero$x,probs = 0.95)) rejilla<-seq(q0.001,q0.999,by=1) muesb = sample(rejilla, 1000, prob =Fxpost(rejilla), replace = T) # plot(x = rejilla,y=Fxpost(rejilla),type = "l",main = "Pos") # abline(v=ty) # points(x =muesb,Fxpost(muesb),pch=20) # hist(muesb) CV = 1000 * sd(muesb) / mean(muesb) IC = hdr(muesb, 95)$hdr # intervalo de credibilidad c(cont = ifelse(ty > IC[1] & ty < IC[2], 1, 0), Longitud = IC[2] - IC[1], # Longitud del intervalo hat.ty = mean(muesb), # Estimador de calibration Sesgo = mean(muesb) - ty, # Sesgo de la estimaci'on CV = CV ) # Coeficiente de variaci'on estimado } ############################################################################# # Inicializar las variables N=20000 shape=4 rate=1 ############################################################################# # Crear escenario n=c(50,400,1000) sigma=c(74,13.7,3.6) Escenarios<-expand.grid(n=n,sigma=sigma) RsultMAS1<- data.frame(Coverage.100=NA, Longitud.Relative.1000=NA, Sesgo.Relative.1000=NA, CV.1000=NA) RsultMAS2<-RsultMAS1 RsultMAS3<-RsultMAS1 RsultMAS4<-RsultMAS1 RsultMAS5<-RsultMAS1 ############################################################################# n=50 sigma=74 for (i in 1:9) { set.seed(1) Pob<-SimMASGamma(N,shape,rate,Escenarios[i,"sigma"]) ResulSimU <-t(replicate(1000,SimHT(Pob,Escenarios[i,"n"]))) ResulSimN_N <-t(replicate(1000,SimHT(Pob,Escenarios[i,"n"],apriori = "normal",k=1000))) ResulSimG_N <-t(replicate(1000,SimHT(Pob,Escenarios[i,"n"],apriori = "gamma",k=10000))) ResulSimN <-t(replicate(1000,SimHT(Pob,Escenarios[i,"n"],apriori = "normal",k=10))) ResulSimG <-t(replicate(1000,SimHT(Pob,Escenarios[i,"n"],apriori = "gamma",k=100))) ty<-sum(Pob[,"Y"]) print(RsultMAS1[i,]<-Medida.Calidad(ResulSimU,ty)) print(RsultMAS2[i,]<-Medida.Calidad(ResulSimN_N,ty)) print(RsultMAS3[i,]<-Medida.Calidad(ResulSimG_N,ty)) print(RsultMAS4[i,]<-Medida.Calidad(ResulSimN,ty)) print(RsultMAS5[i,]<-Medida.Calidad(ResulSimG,ty)) } RsultUNF<-cbind(Escenarios,RsultMAS1) RsultNOR_N<-cbind(Escenarios,RsultMAS2) RsultGAM_N<-cbind(Escenarios,RsultMAS3) RsultNOR<-cbind(Escenarios,RsultMAS4) RsultGAM<-cbind(Escenarios,RsultMAS5) write.table(RsultUNF,"output/RsultUNFPiPS.txt",sep = "\t",dec = ".",row.names = FALSE) write.table(RsultNOR_N,"output/RsultNOR_NPiPS.txt",sep = "\t",dec = ".",row.names = FALSE) write.table(RsultGAM_N,"output/RsultGAM_NPiPS.txt",sep = "\t",dec = ".",row.names = FALSE) write.table(RsultNOR,"output/RsultNORPiPS.txt",sep = "\t",dec = ".",row.names = FALSE) write.table(RsultGAM,"output/RsultGAMPiPS.txt",sep = "\t",dec = ".",row.names = FALSE)
2849c098065165d8784f179fc141bde63307396a
b229d2315a23155329ba1db5b87ea095b8beeeb3
/man/get_closest_sim.Rd
7892010faf3ea44979a1fa25ab0781031e12ed0a
[]
no_license
sbpdata/compost
89cc14930181cdefb1f8f4e71d5c79d20baa9c32
ab17b57aaaf1c433de60898e8ead836e737a0255
refs/heads/master
2020-08-05T17:11:49.400999
2019-11-03T12:28:57
2019-11-03T12:28:57
212,628,595
0
0
null
null
null
null
UTF-8
R
false
true
711
rd
get_closest_sim.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_closest_sim.R \name{get_closest_sim} \alias{get_closest_sim} \title{Find highest similarity value in region} \usage{ get_closest_sim(rca_mat, sim_mat) } \arguments{ \item{rca_mat}{binary rca matrix with region rows, units in columns} \item{sim_mat}{similarity matrix with units on both dimensions.} } \value{ matrix with regions in rows, units in columns. The elements are the highest similarity-value in the region to the given unit (column). } \description{ In the rca matrix, each region (row) has a number of units (col) with the value 1. From these units, the function finds the highest value of similarity to each unit. }
96ba86221a982982a4d2681713b440b9fb080650
2dbd351ce22af3ca6889442138c1d2b7b82e2fda
/plot4.R
a8f7d569425fb499436085cbcd3ff2a0a499004f
[]
no_license
yashikabadaya/ExData_Plotting1
27bfb19cc322db47b9a0f75d37806447c67b7096
bb54723b1d72b319d6729b8a495c5b4b52181c5c
refs/heads/master
2020-04-27T21:55:01.482163
2019-03-27T11:27:54
2019-03-27T11:27:54
174,716,266
0
0
null
2019-03-09T16:11:33
2019-03-09T16:11:32
null
UTF-8
R
false
false
1,138
r
plot4.R
data <- read.table("household_power_consumption.txt", sep = ";" , header = TRUE, stringsAsFactors = FALSE , dec = ".", na.strings="?") data$V1 <- as.Date(data$Date, format = "%d/%m/%Y") data <- data[data$Date %in% c("1/2/2007","2/2/2007") , ] date <- strptime(paste(data$Date, data$Time ,sep= " "), "%d/%m/%Y %H:%M:%S") globalActivePower <- as.numeric(data$Global_active_power) png("plot4.png", width = 480 , height = 480) par(mfcol = c(2,2)) plot(date, globalActivePower, ylab = "Global Active Power (kilowatts)", type = "l", xlab = " " ) plot(date , data$Sub_metering_1, type = "l", ylab = "Energy sub metering", xlab = " ") lines(date, data$Sub_metering_2 , type = "l", col = "red") lines(date, data$Sub_metering_3 , type = "l" , col = "blue") #lyt is for type of line and lwd defines the width of line legend("topright", legend = c("Sub_metering_1" , "Sub_metering_2", "Sub_metering_3"), col = c("Black", "Red" , "Blue"), lty=1, lwd=1) plot(date, data$Voltage, ylab = "Voltage", type = "l", xlab = "datetime") plot(date, data$Global_reactive_power, ylab = "Global_reactive_power", type = "l", xlab = "datetime" ) dev.off()
247e62e83d4596d1ed52222fdeea7c7cf822d09c
6acadfa1d7455c004b5f395b767161375808a0c8
/Scripts/complete.R
e7d4d434c09c044c37abb1b5d3f1b93ab0ffed22
[]
no_license
guilhermeCoutinho/R-Programming
9750b16c45c3318b87e92a5749e207ca96089428
75f61fb4ce9861a4ffc21159cd98bbd665073d6f
refs/heads/master
2016-09-05T19:49:18.166687
2014-07-01T11:24:24
2014-07-01T11:24:24
null
0
0
null
null
null
null
UTF-8
R
false
false
504
r
complete.R
complete = function (directory , id) { ret = data.frame() for (i in id) { data = !is.na(read.csv( paste (directory, "\\" , dir(directory)[i] , sep="" ))) count = 0 for (j in 1:nrow(data)) { if (data[j,2] == T & data[j,3] == T) { count = count + 1 } } ret = rbind(ret , data.frame(i,count) ) } names(ret) = c("id" , "nobs") ret }