content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/FLUIDPROPS.R \name{oil.dead_visc.BeggsRobinson} \alias{oil.dead_visc.BeggsRobinson} \title{Dead Oil viscosity by Beggs-Robinson} \usage{ oil.dead_visc.BeggsRobinson(temp, API) } \arguments{ \item{temp}{temperature} \item{API}{oil specific gravity} } \description{ Dead Oil viscosity by Beggs-Robinson }
/man/oil.dead_visc.BeggsRobinson.Rd
no_license
maulingogri/rNodal
R
false
true
382
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/FLUIDPROPS.R \name{oil.dead_visc.BeggsRobinson} \alias{oil.dead_visc.BeggsRobinson} \title{Dead Oil viscosity by Beggs-Robinson} \usage{ oil.dead_visc.BeggsRobinson(temp, API) } \arguments{ \item{temp}{temperature} \item{API}{oil specific gravity} } \description{ Dead Oil viscosity by Beggs-Robinson }
setwd('~/dev/marchMayhem/March_Madness_2017/') rm(list = ls()) # scripts needed for data transformations source('../prepData.R') season.years <- levels(as.factor(tourney_results.c$Season))[19:32] # season.years <- c('2016') train_df <- prepTrainingData(season.years) test_df <- prepTestingData('2017') str(train_df) require(lattice) with(train_df, xyplot(win,w_twpct)) ######## building model ######### train <- subset(team_metrics, season <= 2014) train_df <- train[,c(-c(1:2,4:5))] train_df <- train_df[complete.cases(train_df),] test <- subset(team_metrics, season > 2014) test_df <- test[,c(-c(1:3,4:5))] # library(glmnet) # library(ROCR) # library(caret) # # options(scipen=999) # # # model <- glm(win ~ ., family = 'binomial', data = train_df) # glm_model <- caret::train(win~., data = train_df, method = 'glm', family = 'binomial') # summary(model) library(randomForest) library(e1071) library(caret) train_df <- train_df[complete.cases(train_df),] # vars2exclude <- c("w_TEAM_NAME","wteam", "l_TEAM_NAME", "lteam", "season", "matchup" ) # cols2exclude <- names(test_df) %in% vars2exclude # test <- test_df[!cols2exclude] # rf = randomForest(win ~., data = train_df, ntree = 100) rf = randomForest(win ~.-w_TEAM_NAME -wteam -l_TEAM_NAME -lteam -season -matchup, data = train_df, ntree = 100) pred <- predict(rf, test_df, type = 'prob') hmm <- cbind(pred, test_df); hmm$win <- ifelse(hmm$`0` < hmm$`1`, 1, 0) varImpPlot(rf) print(confusionMatrix(data = pred, reference = test$win, positive = "1")) fitControl <- trainControl(## 10-fold Crossvalidation method = "repeatedcv", number = 10, ## repeated ten times repeats = 5, verboseIter=FALSE , # PCA Preprocessing preProcOptions="pca", # With parallel backend allowParallel=TRUE) rf <- caret::train(win~., data = train_df, method = 'rf', trControl = fitControl) glm <- caret::train(win~., data = train_df, method = 'glm', family = 'binomial', trControl = fitControl) Model <- c('Random Forest', 'GLM') Accuracy <- c(round(max(rf$results$Accuracy),4)*100, round(max(glm$results$Accuracy),4)*100) performance <- cbind(Model,Accuracy); performance pred_rf <- predict(rf, test_df) cm_rf <- table(pred_rf, test$win[complete.cases(test$win)]) length(pred_rf) test_2016 <- subset(team_metrics, season == 2016) test_df_2016 <- test_2016[,c(-c(1:3,4:5))] pred_2016.rf <- predict(rf, test_2016, type = 'prob') pred_2016.glm <- predict(glm, test_2016, type = 'prob') glm.output <- data.frame('matchup' = test_2016$matchup, 'prediction' = pred_2016.glm) glm.output$prediction <- ifelse(glm.output$prediction.0 > glm.output$prediction.1, 0, 1) write.csv(glm.output, file = 'marchMadnessBracketResults.csv', row.names = F) output <- as.data.frame(cbind(test_2016$matchup, pred_2016)) # require('rvest') # conf.url <- 'http://www.ncaa.com/standings/basketball-men/d1' # webpage <- read_html(conf.url) # conf_table <- html_nodes(webpage, 'table') # conf <- html_table(conf_table, fill = T) # names(conf) # conferences = read_html('http://www.ncaa.com/standings/basketball-men/d1') # # this returns full list of conferences with teams seperated by conf names # teamsNConferences = html_nodes(conferences, '.ncaa-standings-conference-name, .ncaa-standing-conference-team') # length(teamsNConferences) # teamsNConferences[1:3] # html_text(teamsNConferences) # # this returns just teams # hmm <- html_nodes(conferences, 'span.ncaa-standings-conference-name, .ncaa-standing-conference-team') # teamsByConf <- html_text(hmm) # class(teamsByConf) # team.list <- # # conferences <- html_nodes(conferences, '.ncass-standings-conference-name') # conf <- html_text(conferences) # # # intersect(names(reg.season), names(tourney_slots)) # reg_season <- merge(reg.season, seasons, by = 'season') # reg_season_w_teams <- merge(reg_season, teams, by.x = '') # intersect(names()) # # str(reg.season) # str(seasons) # str(tourney_results)
/datacleanup.R
no_license
SeyiA91/MarchMadness
R
false
false
3,978
r
setwd('~/dev/marchMayhem/March_Madness_2017/') rm(list = ls()) # scripts needed for data transformations source('../prepData.R') season.years <- levels(as.factor(tourney_results.c$Season))[19:32] # season.years <- c('2016') train_df <- prepTrainingData(season.years) test_df <- prepTestingData('2017') str(train_df) require(lattice) with(train_df, xyplot(win,w_twpct)) ######## building model ######### train <- subset(team_metrics, season <= 2014) train_df <- train[,c(-c(1:2,4:5))] train_df <- train_df[complete.cases(train_df),] test <- subset(team_metrics, season > 2014) test_df <- test[,c(-c(1:3,4:5))] # library(glmnet) # library(ROCR) # library(caret) # # options(scipen=999) # # # model <- glm(win ~ ., family = 'binomial', data = train_df) # glm_model <- caret::train(win~., data = train_df, method = 'glm', family = 'binomial') # summary(model) library(randomForest) library(e1071) library(caret) train_df <- train_df[complete.cases(train_df),] # vars2exclude <- c("w_TEAM_NAME","wteam", "l_TEAM_NAME", "lteam", "season", "matchup" ) # cols2exclude <- names(test_df) %in% vars2exclude # test <- test_df[!cols2exclude] # rf = randomForest(win ~., data = train_df, ntree = 100) rf = randomForest(win ~.-w_TEAM_NAME -wteam -l_TEAM_NAME -lteam -season -matchup, data = train_df, ntree = 100) pred <- predict(rf, test_df, type = 'prob') hmm <- cbind(pred, test_df); hmm$win <- ifelse(hmm$`0` < hmm$`1`, 1, 0) varImpPlot(rf) print(confusionMatrix(data = pred, reference = test$win, positive = "1")) fitControl <- trainControl(## 10-fold Crossvalidation method = "repeatedcv", number = 10, ## repeated ten times repeats = 5, verboseIter=FALSE , # PCA Preprocessing preProcOptions="pca", # With parallel backend allowParallel=TRUE) rf <- caret::train(win~., data = train_df, method = 'rf', trControl = fitControl) glm <- caret::train(win~., data = train_df, method = 'glm', family = 'binomial', trControl = fitControl) Model <- c('Random Forest', 'GLM') Accuracy <- c(round(max(rf$results$Accuracy),4)*100, round(max(glm$results$Accuracy),4)*100) performance <- cbind(Model,Accuracy); performance pred_rf <- predict(rf, test_df) cm_rf <- table(pred_rf, test$win[complete.cases(test$win)]) length(pred_rf) test_2016 <- subset(team_metrics, season == 2016) test_df_2016 <- test_2016[,c(-c(1:3,4:5))] pred_2016.rf <- predict(rf, test_2016, type = 'prob') pred_2016.glm <- predict(glm, test_2016, type = 'prob') glm.output <- data.frame('matchup' = test_2016$matchup, 'prediction' = pred_2016.glm) glm.output$prediction <- ifelse(glm.output$prediction.0 > glm.output$prediction.1, 0, 1) write.csv(glm.output, file = 'marchMadnessBracketResults.csv', row.names = F) output <- as.data.frame(cbind(test_2016$matchup, pred_2016)) # require('rvest') # conf.url <- 'http://www.ncaa.com/standings/basketball-men/d1' # webpage <- read_html(conf.url) # conf_table <- html_nodes(webpage, 'table') # conf <- html_table(conf_table, fill = T) # names(conf) # conferences = read_html('http://www.ncaa.com/standings/basketball-men/d1') # # this returns full list of conferences with teams seperated by conf names # teamsNConferences = html_nodes(conferences, '.ncaa-standings-conference-name, .ncaa-standing-conference-team') # length(teamsNConferences) # teamsNConferences[1:3] # html_text(teamsNConferences) # # this returns just teams # hmm <- html_nodes(conferences, 'span.ncaa-standings-conference-name, .ncaa-standing-conference-team') # teamsByConf <- html_text(hmm) # class(teamsByConf) # team.list <- # # conferences <- html_nodes(conferences, '.ncass-standings-conference-name') # conf <- html_text(conferences) # # # intersect(names(reg.season), names(tourney_slots)) # reg_season <- merge(reg.season, seasons, by = 'season') # reg_season_w_teams <- merge(reg_season, teams, by.x = '') # intersect(names()) # # str(reg.season) # str(seasons) # str(tourney_results)
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/purity.R \name{savgol} \alias{savgol} \title{Savytzky-Golay filter} \usage{ savgol(data, params) } \arguments{ \item{data}{a matrix with data values} \item{params}{vector with parameters: derivative order, width of filter and polynomial order} } \description{ Applies Savytzky-Golay filter to the rows of data matrix }
/man/savgol.Rd
no_license
svkucheryavski/supure
R
false
false
407
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/purity.R \name{savgol} \alias{savgol} \title{Savytzky-Golay filter} \usage{ savgol(data, params) } \arguments{ \item{data}{a matrix with data values} \item{params}{vector with parameters: derivative order, width of filter and polynomial order} } \description{ Applies Savytzky-Golay filter to the rows of data matrix }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dd.plot.R \name{dd.plot} \alias{dd.plot} \title{Depth-Depth Plots} \usage{ dd.plot(data1, data2 = rmvnorm(nrow(data1), array(0, ncol(data1)), diag(1, ncol(data1), ncol(data1))), main = "Normal DD-plot", xlab = "Sample Depths", ylab = "Normal Depths", col = "black", pch = 20) } \arguments{ \item{data1}{A matrix or a data.frame with each row as a p-variate observation.} \item{data2}{A matrix or a data.frame (defaults to a standard independent p-variate normal).} \item{main}{Plot labels. The title of the plot.} \item{xlab}{Plot labels. The \code{x-axis} label of the plot.} \item{ylab}{Plot labels. The \code{y-axis} label of the plot.} \item{col}{The color of the points} \item{pch}{character string or vector of 1-characters or integers for plotting characters.} } \value{ A \code{DD-plot} of the input data } \description{ \code{dd.plot} is a multivariate genralization of a normal \code{QQ-plot}. It produces a DD-plot of two datasets. } \examples{ u<-matrix(rnorm(300,1,4),ncol=3) dd.plot(u) } \author{ Somedip Karmakar <somedip@yahoo.co.in> Omker Mahalanobish <omker.scorpio@gmail.com> } \seealso{ \code{\link{spatial.depth}} }
/man/dd.plot.Rd
no_license
cran/depth.plot
R
false
true
1,233
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dd.plot.R \name{dd.plot} \alias{dd.plot} \title{Depth-Depth Plots} \usage{ dd.plot(data1, data2 = rmvnorm(nrow(data1), array(0, ncol(data1)), diag(1, ncol(data1), ncol(data1))), main = "Normal DD-plot", xlab = "Sample Depths", ylab = "Normal Depths", col = "black", pch = 20) } \arguments{ \item{data1}{A matrix or a data.frame with each row as a p-variate observation.} \item{data2}{A matrix or a data.frame (defaults to a standard independent p-variate normal).} \item{main}{Plot labels. The title of the plot.} \item{xlab}{Plot labels. The \code{x-axis} label of the plot.} \item{ylab}{Plot labels. The \code{y-axis} label of the plot.} \item{col}{The color of the points} \item{pch}{character string or vector of 1-characters or integers for plotting characters.} } \value{ A \code{DD-plot} of the input data } \description{ \code{dd.plot} is a multivariate genralization of a normal \code{QQ-plot}. It produces a DD-plot of two datasets. } \examples{ u<-matrix(rnorm(300,1,4),ncol=3) dd.plot(u) } \author{ Somedip Karmakar <somedip@yahoo.co.in> Omker Mahalanobish <omker.scorpio@gmail.com> } \seealso{ \code{\link{spatial.depth}} }
library(readxl) library(optimx) library(pracma) library(tseries) library(Metrics) # parameters h <- 0.9701556 # obtained from data # function looking for optimal sigma and beta parameters TP_findpar <- function(betasigma) { beta <- betasigma[1] sigma <- betasigma[2] # pre-allocate b <- matrix(NA,4,1) calculator <- 0 # first part of equation for (n in c(12,60,120,240)) { calculator <- calculator+1 temp <- 0 for (k in 2:n) { temp1 <- 0 for (j in 2:(k-1)){ temp1 <- temp1 + beta*h^(k-j-1) } temp <- temp + beta * h^(k-2) + temp1 } first_eq <- temp # second part of equation temp2 <- 0 for(k in 2:n){ for(j in 2:(k-1)){ for(i in 1:(j-1)){ temp2 <- temp2 + h^(j-i-1)*h^(k-i-1) } } } second_eq <- temp2 b[calculator] <- -1/n * sigma^2 * (first_eq + second_eq) } # transform output b <- b*100*12 # *100 because of percentage, *12 because of annual transformation # fit these values as well as possible, see TermPremiumCalibration.xlsx for details b_real <- matrix(NA,4,1) b_real[1] <- -0.0685978 b_real[2] <- 0.33780927 b_real[3] <- 0.9332286 b_real[4] <- 1.4565876 b_dif <- rmse(b_real,b) # uncomment following rows to see how the optim function seeks best values, warning: it slows the algorithm significantly #cat("\nbeta", beta) #cat("\nsigma", sigma) #cat("\nbdiff", sum(b_dif)) return(b_dif) } # starting values based on Kozicki and Tinsley optimized <- optim(c(-383,0.69/1200), TP_findpar,method = c("Nelder-Mead"), control = list(maxit = 10000)) optimized$par sprintf("%.10f",optimized$par)
/Code/RMSE_TermPremium.R
no_license
MartinVojtek/masters_thesis
R
false
false
1,760
r
library(readxl) library(optimx) library(pracma) library(tseries) library(Metrics) # parameters h <- 0.9701556 # obtained from data # function looking for optimal sigma and beta parameters TP_findpar <- function(betasigma) { beta <- betasigma[1] sigma <- betasigma[2] # pre-allocate b <- matrix(NA,4,1) calculator <- 0 # first part of equation for (n in c(12,60,120,240)) { calculator <- calculator+1 temp <- 0 for (k in 2:n) { temp1 <- 0 for (j in 2:(k-1)){ temp1 <- temp1 + beta*h^(k-j-1) } temp <- temp + beta * h^(k-2) + temp1 } first_eq <- temp # second part of equation temp2 <- 0 for(k in 2:n){ for(j in 2:(k-1)){ for(i in 1:(j-1)){ temp2 <- temp2 + h^(j-i-1)*h^(k-i-1) } } } second_eq <- temp2 b[calculator] <- -1/n * sigma^2 * (first_eq + second_eq) } # transform output b <- b*100*12 # *100 because of percentage, *12 because of annual transformation # fit these values as well as possible, see TermPremiumCalibration.xlsx for details b_real <- matrix(NA,4,1) b_real[1] <- -0.0685978 b_real[2] <- 0.33780927 b_real[3] <- 0.9332286 b_real[4] <- 1.4565876 b_dif <- rmse(b_real,b) # uncomment following rows to see how the optim function seeks best values, warning: it slows the algorithm significantly #cat("\nbeta", beta) #cat("\nsigma", sigma) #cat("\nbdiff", sum(b_dif)) return(b_dif) } # starting values based on Kozicki and Tinsley optimized <- optim(c(-383,0.69/1200), TP_findpar,method = c("Nelder-Mead"), control = list(maxit = 10000)) optimized$par sprintf("%.10f",optimized$par)
apply(mtcars,2,mean) apply(mtcars,1,mean) #lapply() #sapply() #> apply(mtcars,2,mean) # mpg cyl disp hp drat wt qsec vs am gear carb # 20.090625 6.187500 230.721875 146.687500 3.596563 3.217250 17.848750 0.437500 0.406250 3.687500 2.812500 #> apply(mtcars,1,mean) # Mazda RX4 Mazda RX4 Wag Datsun 710 Hornet 4 Drive Hornet Sportabout Valiant # 29.90727 29.98136 23.59818 38.73955 53.66455 35.04909 # Duster 360 Merc 240D Merc 230 Merc 280 Merc 280C Merc 450SE # 59.72000 24.63455 27.23364 31.86000 31.78727 46.43091 # Merc 450SL Merc 450SLC Cadillac Fleetwood Lincoln Continental Chrysler Imperial Fiat 128 # 46.50000 46.35000 66.23273 66.05855 65.97227 19.44091 # Honda Civic Toyota Corolla Toyota Corona Dodge Challenger AMC Javelin Camaro Z28 # 17.74227 18.81409 24.88864 47.24091 46.00773 58.75273 # Pontiac Firebird Fiat X1-9 Porsche 914-2 Lotus Europa Ford Pantera L Ferrari Dino # 57.37955 18.92864 24.77909 24.88027 60.97182 34.50818 # Maserati Bora Volvo 142E # 63.15545 26.26273
/Funtion_remember/Part2(apply).R
no_license
LucasZhengrui/R_Lauguage_Study
R
false
false
1,665
r
apply(mtcars,2,mean) apply(mtcars,1,mean) #lapply() #sapply() #> apply(mtcars,2,mean) # mpg cyl disp hp drat wt qsec vs am gear carb # 20.090625 6.187500 230.721875 146.687500 3.596563 3.217250 17.848750 0.437500 0.406250 3.687500 2.812500 #> apply(mtcars,1,mean) # Mazda RX4 Mazda RX4 Wag Datsun 710 Hornet 4 Drive Hornet Sportabout Valiant # 29.90727 29.98136 23.59818 38.73955 53.66455 35.04909 # Duster 360 Merc 240D Merc 230 Merc 280 Merc 280C Merc 450SE # 59.72000 24.63455 27.23364 31.86000 31.78727 46.43091 # Merc 450SL Merc 450SLC Cadillac Fleetwood Lincoln Continental Chrysler Imperial Fiat 128 # 46.50000 46.35000 66.23273 66.05855 65.97227 19.44091 # Honda Civic Toyota Corolla Toyota Corona Dodge Challenger AMC Javelin Camaro Z28 # 17.74227 18.81409 24.88864 47.24091 46.00773 58.75273 # Pontiac Firebird Fiat X1-9 Porsche 914-2 Lotus Europa Ford Pantera L Ferrari Dino # 57.37955 18.92864 24.77909 24.88027 60.97182 34.50818 # Maserati Bora Volvo 142E # 63.15545 26.26273
# Time Series - xts
/77-TS/TS-xts.R
no_license
DUanalytics/rAnalytics
R
false
false
20
r
# Time Series - xts
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/koboAPI.R \name{kobo_noGroupsHeader} \alias{kobo_noGroupsHeader} \title{Remove groupes from dataframe header} \usage{ kobo_noGroupsHeader(data, formid, pwd, user, api = "https://kobo.humanitarianresponse.info", separator = "\\\\/") } \arguments{ \item{data}{The dataframe to be treated.} \item{formid}{The ID of the form to be accessed (as a character string)} \item{pwd}{Password of the Kobo account to use} \item{user}{Optional. A single string indicating the username} \item{api}{The URL at which the API can be accessed. Default to "kobo.humanitarianresponse.info"} \item{separator}{Separator used between select_multiple questions and their choices. Must be a regex expression. Default to forward slash} } \value{ A dataframe without groups in headers. } \description{ Remove groupes from dataframe header } \author{ Elliott Messeiller }
/man/kobo_noGroupsHeader.Rd
no_license
agualtieri/koboAPI
R
false
true
928
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/koboAPI.R \name{kobo_noGroupsHeader} \alias{kobo_noGroupsHeader} \title{Remove groupes from dataframe header} \usage{ kobo_noGroupsHeader(data, formid, pwd, user, api = "https://kobo.humanitarianresponse.info", separator = "\\\\/") } \arguments{ \item{data}{The dataframe to be treated.} \item{formid}{The ID of the form to be accessed (as a character string)} \item{pwd}{Password of the Kobo account to use} \item{user}{Optional. A single string indicating the username} \item{api}{The URL at which the API can be accessed. Default to "kobo.humanitarianresponse.info"} \item{separator}{Separator used between select_multiple questions and their choices. Must be a regex expression. Default to forward slash} } \value{ A dataframe without groups in headers. } \description{ Remove groupes from dataframe header } \author{ Elliott Messeiller }
################################################################ ## Workshop: Collecting and Analyzing Social Media Data with R ## Script 3: Collecting Facebook data ## Author: Shivam Panchal ################################################################ # Loading the Rfacebook package library(Rfacebook) ## To get access to the Facebook API, you need an OAuth code. ## You can get yours going to the following URL: ## https://developers.facebook.com/tools/explorer ## Once you're there: ## 1) Click on "Get Access Token" ## 2) Copy the long code ("Access Token") and paste it here below, substituting ## the fake one I wrote: fb_oauth = 'XXXXXXXYYYYYYZZZZZZ11111' ## Now try running the following line: getUsers("me", token=fb_oauth, private_info=TRUE) ## Does it return your Facebook public information? Yes? Then we're ready to go ## See also ?fbOAuth for information on how to get a long-lived OAuth token ################################################ ### SCRAPING INFORMATION FROM FACEBOOK PAGES ### ################################################ # How can I get a list of posts from a Facebook page? # The following line downloads the ~100 most recent posts on the facebook # page of Barack Obama page <- getPage("barackobama", token=fb_oauth, n=100) # What information is available for each of these posts? page[1,] # Which post got more likes? page[which.max(page$likes_count),] # Which post got more comments? page[which.max(page$comments_count),] # Which post was shared the most? page[which.max(page$shares_count),] # We can also subset by date # For example, imagine we want to get all the posts from October 2012 page <- getPage("barackobama", token=fb_oauth, n=1000, since='2012/10/01', until='2012/10/30') #################################### ### COLLECTING PAGES' LIKES DATA ### #################################### # How can I get a list of users who liked a specific post? # The following line downloads more information about the first post # (note that it uses the ID of the post as main option), as well # as a list of 1,000 people who "liked" it post <- getPost(page$id[1], token=fb_oauth, n.likes=1000, comments=FALSE) # This is how you can view that list of people: likes <- post$likes head(likes) # What information is available for these users? # The first step is to use again "getUsers" to gather their public Facebook # information, with their IDs as main option. users <- getUsers(likes$from_id, token=fb_oauth) # What are the most common first names? head(sort(table(users$first_name), decreasing=TRUE), n=10) # This gives us an idea about the gender distribution of the people # interacting with this page. ################################## ### COLLECTING PAGES' COMMENTS ### ################################## # How can I get the text of the comments on a specific post? post <- getPost(page$id[1], token=fb_oauth, n.comments=1000, likes=FALSE) # This is how you can view those comments: comments <- post$comments head(comments) # Also, note that users can like comments! # What is the comment that got the most likes? comments[which.max(comments$likes_count),]
/facebook-data-collection.r
no_license
ShivamPanchal/Social-Media-Analysis-using-R
R
false
false
3,238
r
################################################################ ## Workshop: Collecting and Analyzing Social Media Data with R ## Script 3: Collecting Facebook data ## Author: Shivam Panchal ################################################################ # Loading the Rfacebook package library(Rfacebook) ## To get access to the Facebook API, you need an OAuth code. ## You can get yours going to the following URL: ## https://developers.facebook.com/tools/explorer ## Once you're there: ## 1) Click on "Get Access Token" ## 2) Copy the long code ("Access Token") and paste it here below, substituting ## the fake one I wrote: fb_oauth = 'XXXXXXXYYYYYYZZZZZZ11111' ## Now try running the following line: getUsers("me", token=fb_oauth, private_info=TRUE) ## Does it return your Facebook public information? Yes? Then we're ready to go ## See also ?fbOAuth for information on how to get a long-lived OAuth token ################################################ ### SCRAPING INFORMATION FROM FACEBOOK PAGES ### ################################################ # How can I get a list of posts from a Facebook page? # The following line downloads the ~100 most recent posts on the facebook # page of Barack Obama page <- getPage("barackobama", token=fb_oauth, n=100) # What information is available for each of these posts? page[1,] # Which post got more likes? page[which.max(page$likes_count),] # Which post got more comments? page[which.max(page$comments_count),] # Which post was shared the most? page[which.max(page$shares_count),] # We can also subset by date # For example, imagine we want to get all the posts from October 2012 page <- getPage("barackobama", token=fb_oauth, n=1000, since='2012/10/01', until='2012/10/30') #################################### ### COLLECTING PAGES' LIKES DATA ### #################################### # How can I get a list of users who liked a specific post? # The following line downloads more information about the first post # (note that it uses the ID of the post as main option), as well # as a list of 1,000 people who "liked" it post <- getPost(page$id[1], token=fb_oauth, n.likes=1000, comments=FALSE) # This is how you can view that list of people: likes <- post$likes head(likes) # What information is available for these users? # The first step is to use again "getUsers" to gather their public Facebook # information, with their IDs as main option. users <- getUsers(likes$from_id, token=fb_oauth) # What are the most common first names? head(sort(table(users$first_name), decreasing=TRUE), n=10) # This gives us an idea about the gender distribution of the people # interacting with this page. ################################## ### COLLECTING PAGES' COMMENTS ### ################################## # How can I get the text of the comments on a specific post? post <- getPost(page$id[1], token=fb_oauth, n.comments=1000, likes=FALSE) # This is how you can view those comments: comments <- post$comments head(comments) # Also, note that users can like comments! # What is the comment that got the most likes? comments[which.max(comments$likes_count),]
## PURPOSE: APPLY A BAYESIAN NETWORK TO ESTIMATE LEARNER KNOWLEGE STATE GIVEN EVIDENCE OF LEARNING ## MEASUREMENT. The scope of the estimation is bounded by the learning map associated with a course unit and the ## course section. An external file USE_CASE_QUERY_ATTRIBUTES specifies the scope of the query. ## ## MAJOR STEPS IN THE ALGORITHM LOGIC. ## 1︎⃣ Set workspace parameters and read in working files. We specifically require the following: ## ⪧ USE_CASE_QUERY_ATTRIBUTES guides the case study on which we focus. ## ⪧ COURSE_ENROLL contains the enrollment and responsible educator. ## ⪧ EoL_MEAS contains the learners' evidence of learning (EoL) measurements. ## ⪧ KNOW_STATE_SPEC contains relationships between learners' measured learning evidence and their implied knowledge states. ## ⪧ GRAPH_CLUST_N_UNIT_MAP_JDF contains the joint distrubition functions (JDF) for Nᵗʰ cluster of connected vertices ## within UNIT_MAP_EDGE_LIST. We employ this to get the in-scope vertices. ## 2︎⃣ Window the EoL_MEAS learning-measurement table. Retain only records corresponding to subjects (students) for whom ## STUDENT_ID exists in EoL_MEAS. Also, limit the LEARNING_STANDARD_ID to the variables specfied within the columns of ## GRAPH_CLUST_N_UNIT_MAP_JDF. Also, sort the EoL_MEAS by DATE_OF_MEAS and retain only the most-recent in cases ## of multiple measurements of LEARNING_STANDARD_IDs for distinct subjects. ## 3︎⃣ Apply KNOW_STATE_SPEC to impute hard-decision knowledge-state estimates for each EoL_MEAS. ## 4︎⃣ Identify the evidence states in EoL_MEAS. We introduce here three aspects of our framework. ## ⓐ KNOWLEDGE STATE represents the estimated extent of mastery for an individual learner with respect to all LEARNING_STANDARD_ID ## attributes from the proficiency model. ## ⓑ EVIDENTIARY PROFILE contains all of the observed variables from which that estimate is derived. ## ⓒ EVIDENTIARY STATE specifies the actual state for each evidentiary-profile variable for a specific learner. ## We extract during this stage the evidentiary profile and evidentiary state for each subject (learner, student) from EoL_MEAS. ## Categorize learners according to evidentiary profile and evidentiary state. Also identify by cluster for each unit-submap cluster ## of connected vertices: ## ⓐ Observed variables from the evidentiary profile on which we condition the submap-cluster's JDF; and ## ⓑ The target variables for which we obtain marginal CDFs conditioned on evidentiary states in the evidentiary profile. ## 6︎⃣ Translate each EVIDENTIARY STATE into an estimated KNOWLEDGE STATE. Condition GRAPH_CLUST_N_UNIT_MAP_JDF ## on each observed evidentiary state. Marginalize the resulting conditional distribution with respect to each target variable to obtain ## a distribution of knowledge-state probabilities for each observed evidentiary state. ## 7︎⃣ Associate the LEARNING_STANDARD_ID-marginalized CDFs for each learner with the measured knowledge state to get a complete ## probability distribution for each variable. Append to LEARNER_KNOW_STATE. Reshape to wide-table format so that LEARNER_KNOW_STATE ## contains for each STUDENT_ID × LEARNING_STANDARD_ID pair a row of conditional probability distributions regarding the LEARNER's state. # # Initialize environment. options(stringsAsFactors = FALSE) options(java.parameters = "-Xmx16g") library(stringr) library(reshape2) library(abind) # # 1︎⃣ DATA INGESTION. Read in USE_CASE_ATTRIBUTES to get the distinguishing case-study variable states. proto.dir <- "/Users/nahamlet/Box Sync/IBM-Watson ED K12/Pathway-Centric CONOPS/Learning-Map Prototype" USE_CASE_ATTRIBUTES <- read.csv(file = paste(proto.dir, "USE_CASE_QUERY_ATTRIBUTES.csv", sep = "/"), colClasses = "character") rownames(USE_CASE_ATTRIBUTES) <- USE_CASE_ATTRIBUTES[,"QUERY_ATTRIBUTE"] Case.dir <- USE_CASE_ATTRIBUTES["Case.dir","VALUE"] PROF_TASK.dir <- paste(Case.dir, "PROF_TASK_MODEL", sep = "/") setwd(PROF_TASK.dir) # # ‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️ # We need one JDF for each course-unit vertex cluster. Also, remove a leading substring "X" from the column names of the # UNIT_SUBMAP_JDF data frames. These were introduced becuase the colClasses were not constrained to be "character." jdf_files <- list.files(PROF_TASK.dir)[grep(x = list.files(PROF_TASK.dir), pattern = "UNIT_MAP_JDF.csv")] UNIT_SUBMAP_JDF <- list() for (jdf_idx in jdf_files) { ## jdf_idx <- jdf_files[1] print(paste("Reading in SUBMAP_JDF", jdf_idx, "Starting at", format(Sys.time(),"%H:%M:%S"))) UNIT_SUBMAP_JDF[[jdf_idx]] <- read.csv(file = paste(PROF_TASK.dir, jdf_idx, sep = "/")) print(paste("Reading in SUBMAP_JDF", jdf_idx, "Ending at", format(Sys.time(),"%H:%M:%S"))) } # # ‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️ # Read in other files listed above. KNOW_STATE_SPEC <- read.csv(file = paste(PROF_TASK.dir, "KNOW_STATE_SPEC.csv", sep = "/"), colClasses = "character") COURSE_ENROLL <- read.csv(file = paste(PROF_TASK.dir, "COURSE_ENROLL.csv", sep = "/"), colClasses = "character") EoL_MEAS <- read.csv(file = paste(PROF_TASK.dir, "EoL_MEAS.csv", sep = "/"), colClasses = "character")[c("STUDENT_ID","LEARNING_STANDARD_ID", "MEAS_EVIDENCE","DATE_OF_MEAS")] LEARNING_STANDARD <- read.csv(file = paste(PROF_TASK.dir, "SIHLEARNING_STANDARD.csv", sep = "/"), colClasses = "character")[c("LEARNING_STANDARD_ID", "LEARNING_STANDARD_CD")] # # 2︎⃣ Window the EoL_MEAS learning-measurement table. First window by subjects with STUDENT_IDs in COURSE_ENROLL for the # COURSE_ID, SECTION_ID specified by the corresponding values of USE_CASE_ATTRIBUTES. Create a "windowed" version of # COURSE_ENROLL. Then merge the result with EoL_MEAS. SECT_ENROLL <- data.frame(rbind(USE_CASE_ATTRIBUTES[c("COURSE_ID","SECTION_ID"),"VALUE"])) colnames(SECT_ENROLL) <- c("COURSE_ID","SECTION_ID") SECT_ENROLL <- merge(x = SECT_ENROLL, y = COURSE_ENROLL) EoL_MEAS <- merge(x = COURSE_ENROLL["STUDENT_ID"], y = EoL_MEAS, all.x = TRUE) # EoL_MEAS <- EoL_MEAS[EoL_MEAS[,"STUDENT_ID"] %in% COURSE_ENROLL[,"STUDENT_ID"],] EoL_MEAS[!is.na(EoL_MEAS[,"LEARNING_STANDARD_ID"]),"LEARNING_STANDARD_ID"] <- paste("X", EoL_MEAS[!is.na(EoL_MEAS[,"LEARNING_STANDARD_ID"]),"LEARNING_STANDARD_ID"], sep = "") # # Now filter EoL_MEAS according to the LEARNING_STANDARD_IDs included in the graph. These are obtained by calculating the intersection # between the column names of UNIT_SUBMAP_JDF and the instances of LEARNING_STANDARD_ID in EoL_MEAS. The procedure here # is slightly more-difficult, given that we want the LEARNING_STANDARD_ID instances in SUBMAP_VERTEX, derived from UNIT_SUBMAP_JDF, # as well as STUDENT_IDs for which no measurements are available. SUBMAP_VERTEX <- data.frame(LEARNING_STANDARD_ID = intersect(unique(EoL_MEAS[,"LEARNING_STANDARD_ID"]), unlist(lapply(X = UNIT_SUBMAP_JDF, FUN = colnames)) ) ) EoL_MEAS <- rbind(EoL_MEAS[EoL_MEAS[,"LEARNING_STANDARD_ID"] %in% SUBMAP_VERTEX[,"LEARNING_STANDARD_ID"],], EoL_MEAS[apply (X = is.na(EoL_MEAS), MARGIN = 1, FUN = any), ]) # # Time-window EoL measurements. Apply the following procedure. # ⓐ First, sort the measurements in decreasing order of DATE_OF_MEAS. # ⓑ Then truncate EoL_MEAS to include only measurements up to DATE_LATEST_MEAS from the USE_CASE_ATTRIBUTES table. # Special handling is required due to the presence of subjects (learners, students) for which no evidence of learning is present. # The logic of time-windowing looses those records. So they must be reintroduced. # ⓒ Retain only the most-recent measurement in instances for which a given LEARNING_STANDARD_ID variable has been # measured multiple times for a subject (student or learner). Accomplish this with the duplicated logic. Since we sorted # in order of decreasing DATE_OF_MEAS, all STUDENT_ID × LEARNING_STANDARD_ID pairs after the first occurrence # of each are identified as duplicated. # EoL_MEAS <- EoL_MEAS[order(x = as.Date(EoL_MEAS[,"DATE_OF_MEAS"],"%Y-%m-%d"), decreasing = TRUE),] EoL_MEAS <- rbind(EoL_MEAS[which(as.Date(EoL_MEAS[,"DATE_OF_MEAS"],"%Y-%m-%d") <= as.Date(USE_CASE_ATTRIBUTES["DATE_LATEST_MEAS","VALUE"],"%Y-%m-%d") ), ], EoL_MEAS[apply (X = is.na(EoL_MEAS), MARGIN = 1, FUN = any), ]) EoL_MEAS <- EoL_MEAS[!duplicated(EoL_MEAS[c("STUDENT_ID", "LEARNING_STANDARD_ID")]),] EoL_MEAS <- EoL_MEAS[order(EoL_MEAS[,"STUDENT_ID"], EoL_MEAS[,"LEARNING_STANDARD_ID"]),] # # 3︎⃣ Impute IMPLIED_KNOW_STATE to MEAS_EVIDENCE in EoL_MEAS. Assign IMPLIED_KNOW_STATE based on threshold intervals # in KNOW_STATE_SPEC. We first need to coerce MEAS_EVIDENCE to numeric. EoL_MEAS[,"MEAS_EVIDENCE"] <- as.numeric(EoL_MEAS[,"MEAS_EVIDENCE"]) EoL_MEAS[,"IMPLIED_KNOW_STATE"] <- cut(x = EoL_MEAS[,"MEAS_EVIDENCE"], breaks = unique(unlist(KNOW_STATE_SPEC[c("LOW_BOUND","UP_BOUND")])), labels = KNOW_STATE_SPEC[,"IMPLIED_KNOW_STATE"], include.lowest = TRUE, ordered.result = TRUE) EoL_MEAS[,"IMPLIED_KNOW_STATE"] <- as.character(EoL_MEAS[,"IMPLIED_KNOW_STATE"]) # # 4︎⃣ Identify the evidence states in EoL_MEAS. We need to relate subjects (learners, students) to evidentiary profiles and # evidentiary states. We use these to marginalize, reduce, condition the Joint Distribution Functions (JDFs) for each disconnected subgraph cluster # of connected vertices. We also need the knowledge-state estimation profiles, the variables for which the Bayesian network produces estimates. # These estimates result from marginalization of the conditioned JDF with each variable in the estimation profile one at a time. # # We get at these by the following procedure. # ⓐ Reshape EoL_MEAS into wide-table format. We records for each subject indicating the IMPLED_KNOW_STATE of each # measured variable. Evidentiary profiles and states vary between subjects (learners, students). Keeping track of these # distinctions and applying each to the Bayesian Network represents the greatest source of complexity in this approach. # ⓑ Associate each learner with an evidentiary profile and an evidentiary state. Calculate signatures based on concatenation # of the variable names — column names of the wide table — and variable values indicating evidentiary state. # ⓒ Create EVID_PROF_STATE, a data frame containing unique rows in the wide-table EoL_MEAS table. # ⓓ Allocate in EVID_PROF_STATE the variables in the evidentiary profile to each disconnected cluster of connected subgraph # vertices. Collect this information for each cluster a distinct data frame of unique evidentiary-state configurations. The # column names represent the variables in the cluster-allocated evidentiary profile. The rownames contain the cluster-allocated # evidentiary states. # ⓔ For each subgraph cluster, ascertain the knowledge-state estimated profile, the variables in the cluster not included in the # cluster-allocated evidentiary profile. # To summarize, we must manage two dimensions of combinatorial variability: Subject evidentiary profiles and states, and their # coverage of disconnected clusters of connected subgraph vertices. This requires two levels of categorization of evidentiary # profiles, states. # # ⓐ Reshape EoL_MEAS into wide-table format. Assign the STUDENT_ID subject-unique attributes as the rownames for the # resulting data frame. Get rid of all remaining columns not pertaining to the possibly measured variables in SUBMAP_VERTEX. EoL_WIDE <- dcast(data = EoL_MEAS, formula = STUDENT_ID ~ LEARNING_STANDARD_ID, value.var = "IMPLIED_KNOW_STATE") rownames(EoL_WIDE) <- EoL_WIDE[,"STUDENT_ID"] for (col_idx in setdiff(colnames(EoL_WIDE),unlist(SUBMAP_VERTEX))) EoL_WIDE[col_idx] <- NULL # # # Write out a csv file containing the LEARNER_EVID_STATE. It is derived from EoL_WIDE but has LEARNING_STANDARD_CD for its # column names. LEARNER_EVID_STATE <- EoL_WIDE LEARNER_EVID_STATE.cols <- data.frame(LEARNING_STANDARD_ID = colnames(LEARNER_EVID_STATE)) LEARNER_EVID_STATE.cols[,"LEARNING_STANDARD_ID"] <- gsub(x = LEARNER_EVID_STATE.cols[,"LEARNING_STANDARD_ID"], pattern = "X", replacement = "") LEARNER_EVID_STATE.cols <- merge(x = LEARNER_EVID_STATE.cols, y = LEARNING_STANDARD) colnames(LEARNER_EVID_STATE) <- LEARNER_EVID_STATE.cols[,"LEARNING_STANDARD_CD"] LEARNER_EVID_STATE["STUDENT_ID"] <- rownames(EoL_WIDE) LEARNER_EVID_STATE <- merge(x = LEARNER_EVID_STATE, y = COURSE_ENROLL[c("STUDENT_ID","STUDENT_NAME","CLASS_ID")]) write.csv(x = LEARNER_EVID_STATE[c("STUDENT_ID","STUDENT_NAME","CLASS_ID",LEARNER_EVID_STATE.cols[,"LEARNING_STANDARD_CD"])], file = paste(PROF_TASK.dir, "LEARNER_EVID_STATE.csv", sep = "/"), row.names = FALSE, eol = "\r\n", fileEncoding = "UTF-8", quote = TRUE) # # ⓑ Associate each learner with an evidentiary profile and an evidentiary state. We specifically seek the above-described # evidentiary-profile and -state signatures. Getting the evidentiary-profile signature requires three steps. # ⅰ. LIst the column names for the non-NA evidentiary state for each learner; # ⅱ. Concatenate the non-NA evidentiary-state variables into a signature; and # ⅲ. Write the results as an colum to EoL_WIDE. # We encounter subjects (learners, students) and clusters for which no measurements are avaiable. Our syntax logic # returns a blank for these instances. Replace the blank with "UNMEASURED". EoL_WIDE["EVID_PROF_SIG"] <- unlist(lapply(X = lapply(X = apply(X = !is.na(EoL_WIDE[,unlist(SUBMAP_VERTEX)]), MARGIN = 1, FUN = which), FUN = names), FUN = paste, collapse = "_")) EoL_WIDE[nchar(EoL_WIDE[,"EVID_PROF_SIG"]) == 0,"EVID_PROF_SIG"] <- "UNMEASURED" # # The evidentiary-state signatures are simpler to obtain. Simply row-concatenate all of the evidentiary-state variables. As with # the EVID_PROF_SIG, we want evidentiary-state signatures for which no evidence is instantiated to be "UNMEASURED". # Replace all such instances — assigned "NA_ ...." by our syntax logic with "UNMEASURED". EoL_WIDE["EVID_STATE_SIG"] <- apply(X = EoL_WIDE[unlist(SUBMAP_VERTEX)], MARGIN = 1, FUN = paste, collapse = "_") EoL_WIDE[grep(x = EoL_WIDE[,"EVID_STATE_SIG"], pattern = "NA_"),"EVID_STATE_SIG"] <- "UNMEASURED" # # ⓒ Create EVID_PROF_STATE, a data frame containing unique evidentiary-profile signatures. Then, add a column containing the unique # evidentiary states for each evidentiary profile. EVID_PROF_STATE <- unique(EoL_WIDE["EVID_PROF_SIG"]) rownames(EVID_PROF_STATE) <- EVID_PROF_STATE[, "EVID_PROF_SIG"] EVID_STATE <- list() for (prof_idx in rownames(EVID_PROF_STATE)){ ## prof_idx <- rownames(EVID_PROF_STATE)[3] EVID_STATE[[prof_idx]] <- EoL_WIDE[EoL_WIDE[,"EVID_PROF_SIG"] == prof_idx,unlist(SUBMAP_VERTEX)] EVID_STATE[[prof_idx]] <- unique(EVID_STATE[[prof_idx]][apply(X = !is.na(EVID_STATE[[prof_idx]]), MARGIN = 2, FUN = all)]) rownames(EVID_STATE[[prof_idx]]) <- apply(X = EVID_STATE[[prof_idx]], MARGIN = 1, FUN = paste, collapse = "_") } EVID_PROF_STATE[["EVID_STATE"]] <- EVID_STATE # # ⓓ Allocate in EVID_PROF_STATE the variables in the evidentiary profile to each cluster. Vertex membership in # clusters is contained in the column names of UNIT_SUBMAP_JDF. We want to build up external to EVID_PROF_STATE # a list of data frames and then write it back into EVID_PROF_STATE as CLUST_EVID_STATE. CLUST_EVID_STATE <- list() CLUST_EVID_PROF <- lapply(X = lapply(X = UNIT_SUBMAP_JDF, FUN = colnames), FUN = intersect, colnames(EoL_WIDE)) # # CLUST_EVID_PROF now contains the all of the the evidentiary profiles in EoL_WIDE. We now need to subset # according to scope of the subtraph clusters in EVID_PROF_STATE. for (prof_idx in unique(EVID_PROF_STATE[,"EVID_PROF_SIG"])){ ## prof_idx <- unique(EVID_PROF_STATE[,"EVID_PROF_SIG"])[3] EVID_PROF_STATE.prof_idx <- EVID_PROF_STATE[["EVID_STATE"]][[prof_idx]] CLUST_EVID_STATE.prof_idx <- list() for (clust_idx in names(CLUST_EVID_PROF)){ ## clust_idx <- names(CLUST_EVID_PROF)[1] CLUST_EVID_STATE.prof_idx[[clust_idx]] <- EVID_PROF_STATE.prof_idx[intersect(colnames(EVID_PROF_STATE.prof_idx), CLUST_EVID_PROF[[clust_idx]])] CLUST_EVID_STATE.prof_idx[[clust_idx]] <- unique(CLUST_EVID_STATE.prof_idx[[clust_idx]][apply(X = !is.na(CLUST_EVID_STATE.prof_idx[[clust_idx]]), MARGIN = 2, FUN = all)]) rownames(CLUST_EVID_STATE.prof_idx[[clust_idx]]) <- apply(X = CLUST_EVID_STATE.prof_idx[[clust_idx]], MARG = 1, FUN = paste, collapse = "_") } CLUST_EVID_STATE[[prof_idx]] <- CLUST_EVID_STATE.prof_idx } EVID_PROF_STATE[["CLUST_EVID_STATE"]] <- CLUST_EVID_STATE # # ⓔ Provide the knowledge-state estimation profile by cluster for each evidentiary profile. TARG_EST_PROF <- list() for (prof_idx in rownames(EVID_PROF_STATE)) TARG_EST_PROF[[prof_idx]] <- lapply(X = lapply(X = UNIT_SUBMAP_JDF, FUN = colnames), FUN = setdiff, c(unlist(str_split(string = prof_idx, pattern = "_")),"MEAS")) EVID_PROF_STATE[["TARG_EST_PROF"]] <- TARG_EST_PROF # # The EVID_PROF_SIG attribute is, finally, redundant and can be elimintated from EVID_PROF_STATE. EVID_PROF_STATE[["EVID_PROF_SIG"]] <- NULL # # 🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉 # 6︎⃣ Translate each EVIDENTIARY STATE into an estimated KNOWLEDGE STATE. Our complex data frame EVID_PROF_STATE now contains # all of the structure we need to estimate knowledge states. We accomplish this via a Bayesian-network instantiation. This includes the # following key steps. # ⓐ Marginalize UNIT_MAP_JDF according to the evidentiary profile. The evidentiary profile again conatins all of the attributes for which # evidence is asserted. The colum names of the data frame CLUST_EVID_STATE in EVID_PROF_STATE for each profile, each cluster # contain the evidentiary profile used for marginalization. # ⓑ Reduce the marginalized JDF according to the observed evidentiary state specified by the CLUST_EVID_STATE data frame in # EVID_PROF_STATE. # ⓒ Condition the marginalized JDF on each of the observed evidentiary states. This conditioned, marginalized distribution # becomes a three-dimensional array in which the first two dimensions represent CDFs. The third dimension represents the # evidentiary states with respect to each CDF is conditioned. # ⓓ Marginalize the resulting conditional, marginal distribution function with respect to each of the variables not included # evidentiary profile. The resulting marginal distribution represents our estimate of the knowledge state conditioned on the # evidentiary state. # The procedure must be applied cluster-by-cluster. We accumulate all of the results into a three-dimensional array resembling # constructed for ⓑ above. We work through according to the structure of EVID_PROF_STATE. Loop through first according # to the evidentiary profile, EVID_PROF_SIG in EVID_PROF_STATE, then by unit-map connected-vertex cluster. # # We seek to collect for each row in EVID_PROF_SIG an array of LEARNER_KNOW_STATE templates corresponding to each # observed-variable evidentiary state. Each template should contain knowledge-state CPDs for each subnet vertex. We # subsequently use this to # # Begin by declaring an a list LEARNER_KNOW_STATE_ARR into which the learning-map states conditioned on each evidentiary # state are stored. LEARNER_KNOW_STATE <- list() # for (prof_idx in rownames(EVID_PROF_STATE)){ ## prof_idx <- rownames(EVID_PROF_STATE)[1] LEARNER_KNOW_STATE.prof_idx <- list() for (clust_idx in names(UNIT_SUBMAP_JDF)){ ## clust_idx <- names(UNIT_SUBMAP_JDF)[1] # Extract the evidentiary profile for the cluster from EVID_PROF_CLUST.prof_idx and its JDF from UNIT_SUBMAP_JDF. CLUST_EVID_STATE.clust_idx <- EVID_PROF_STATE[["CLUST_EVID_STATE"]][[prof_idx]][[clust_idx]] EVID_PROF_CLUST.clust_idx <- colnames(CLUST_EVID_STATE.clust_idx) EVID_STATE_CLUST_SIG.clust_idx <- rownames(CLUST_EVID_STATE.clust_idx) if (length(EVID_STATE_CLUST_SIG.clust_idx) <1) EVID_STATE_CLUST_SIG.clust_idx <- "UNMEASURED" TARG_PROF_CLUS.clust_idx <- EVID_PROF_STATE[["TARG_EST_PROF"]][[prof_idx]][[clust_idx]] JDF.clust_idx <- UNIT_SUBMAP_JDF[[clust_idx]] # # Proceed conditionally. If length EVID_PROF.clust_idx is greater than zero, then we condition JDF.clust_idx on its elements. Otherwise, # convert JDF.clust_idx to a three-dimensional array with only one increment in the third dimension. if(length(EVID_PROF_CLUST.clust_idx) > 0){ # ⓐ Marginalize UNIT_MAP_JDF according to the evidentiary profile. Marginalization # results from sum-aggregation. First construct the formula. marg_formula.clust_idx <- as.formula(paste("MEAS", paste(EVID_PROF_CLUST.clust_idx, collapse = " + "), sep = " ~ ")) print(paste("Marginalizing wrt observed profile:", prof_idx, ", ", clust_idx, "Starting at", format(Sys.time(),"%H:%M:%S") )) MARG_JDF.cust_idx <- aggregate(formula = marg_formula.clust_idx, data = JDF.clust_idx, FUN = sum) print(paste("Marginalizing wrt observed profile:", prof_idx, ", ", clust_idx, "Finishing at", format(Sys.time(),"%H:%M:%S") )) # # ⓑ Reduce the marginalized JDF according to the observed evidentiary state. Merge MARG_JDF.cust_idx with the evidentiary-state # data frame CLUST_EVID_STATE.clust_idx. print(paste("Reducing marginal with respect to observed evidentiary state:", prof_idx, ", ", clust_idx, "Starting at", format(Sys.time(),"%H:%M:%S") )) MARG_JDF.cust_idx <- merge(x = MARG_JDF.cust_idx, y = CLUST_EVID_STATE.clust_idx) # # ⓒ Condition the marginalized JDF on each of the observed evidentiary states. Conditioning is accomplished by Bayes rule. # Invert the measure attribute MEAS in MARG_JDF.cust_idx. Merge the result back onto JDF.clust_idx. Then multiply the inverted # MEAS of the former by the MEAS of the latter. print(paste("Conditioning unobserved wrt observed:", prof_idx, ", ", clust_idx, "Starting at", format(Sys.time(),"%H:%M:%S") )) MARG_JDF.cust_idx["invMEAS"] <- 1/MARG_JDF.cust_idx["MEAS"] MARG_JDF.cust_idx["MEAS"] <- NULL COND_JDF.clust_idx <- merge(x = JDF.clust_idx, y = MARG_JDF.cust_idx) COND_JDF.clust_idx["MEAS"] <- apply(X = COND_JDF.clust_idx[c("MEAS","invMEAS")], MARGIN = 1, FUN = prod) COND_JDF.clust_idx["invMEAS"] <- NULL # # ⓓ Marginalize the resulting conditional. # Reshape COND_JDF.clust_idx into an array. We need a variable EVID_STATE_SIG.clust_idx to fill this out. The EVID_STATE_SIG # variable is the evidentiary-state signature of all evidentiary states in EVID_PROF_CLUST.clust_idx. COND_JDF_ARR.clust_idx <- array(dim = list(length(KNOW_STATE_SPEC[,"IMPLIED_KNOW_STATE"])^length(TARG_PROF_CLUS.clust_idx), length(TARG_PROF_CLUS.clust_idx)+1, length(EVID_STATE_CLUST_SIG.clust_idx)), dimnames = list(1:(length(KNOW_STATE_SPEC[,"IMPLIED_KNOW_STATE"])^length(TARG_PROF_CLUS.clust_idx)), c(TARG_PROF_CLUS.clust_idx,"MEAS"), EVID_STATE_CLUST_SIG.clust_idx)) for (state_idx in rownames(CLUST_EVID_STATE.clust_idx)){ ## rownames <- rownames(CLUST_EVID_STATE.clust_idx)[1] print(paste("Reshaping to array:", state_idx, ", ", which(state_idx == EVID_STATE_CLUST_SIG.clust_idx), "of", length(EVID_STATE_CLUST_SIG.clust_idx), prof_idx,clust_idx, format(Sys.time(),"%H:%M:%S") )) CLUST_EVID_STATE.state_idx <- data.frame(rbind(CLUST_EVID_STATE.clust_idx[state_idx,])) COND_JDF_ARR.clust_idx[,,state_idx] <- as.matrix(merge(x = CLUST_EVID_STATE.state_idx, y = COND_JDF.clust_idx)[c(TARG_PROF_CLUS.clust_idx,"MEAS")]) } # } else { COND_JDF_ARR.clust_idx <- array(data = as.matrix(JDF.clust_idx), dim = c(dim(JDF.clust_idx),1), dimnames = list(1:nrow(JDF.clust_idx), colnames(JDF.clust_idx), "UNMEASURED") ) } # We now must cycle through EVID_STATE_CLUST_SIG.clust_idx and marginalize each associated slice of COND_JDF_ARR.clust_idx # with respect its variables in TARG_PROF_CLUS.clust_idx. Store the result in another array. We want the dimensions of the array to be # TARG_PROF_CLUS.clust_idx × IMPLIED_KNOW_STATE × EVID_STATE_CLUST_SIG.clust_idx. The cells contain the MEAS values. from # COND_JDF_ARR.clust_idx. LEARNER_KNOW_STATE.clust_idx <- array(dim = list(length(TARG_PROF_CLUS.clust_idx), length(KNOW_STATE_SPEC[,"IMPLIED_KNOW_STATE"]), length(EVID_STATE_CLUST_SIG.clust_idx)), dimnames = list(TARG_PROF_CLUS.clust_idx, KNOW_STATE_SPEC[,"IMPLIED_KNOW_STATE"], EVID_STATE_CLUST_SIG.clust_idx)) # for (evid_state_idx in EVID_STATE_CLUST_SIG.clust_idx){ ## evid_state_idx <- EVID_STATE_CLUST_SIG.clust_idx[1] COND_JDF_ARR.evid_state_idx <- as.data.frame(COND_JDF_ARR.clust_idx[,, evid_state_idx]) COND_JDF_ARR.evid_state_idx[,"MEAS"] <- as.numeric(COND_JDF_ARR.evid_state_idx[,"MEAS"]) for (targ_prof_idx in TARG_PROF_CLUS.clust_idx){ ## targ_prof_idx <- TARG_PROF_CLUS.clust_idx[1] print(paste("Marginalizing with to respect to target variable:", targ_prof_idx, ", ", which(targ_prof_idx == TARG_PROF_CLUS.clust_idx), "of", length(TARG_PROF_CLUS.clust_idx), evid_state_idx, "Evidentiary state", ", ", which(evid_state_idx == EVID_STATE_CLUST_SIG.clust_idx), "of", length(EVID_STATE_CLUST_SIG.clust_idx), prof_idx,clust_idx, format(Sys.time(),"%H:%M:%S") )) LEARNER_KNOW_STATE.targ_prof_idx <- aggregate(formula = as.formula(paste("MEAS", targ_prof_idx, sep = " ~ ")), data = COND_JDF_ARR.evid_state_idx, FUN = sum) rownames(LEARNER_KNOW_STATE.targ_prof_idx) <- LEARNER_KNOW_STATE.targ_prof_idx[, targ_prof_idx] LEARNER_KNOW_STATE.clust_idx[targ_prof_idx,KNOW_STATE_SPEC[,"IMPLIED_KNOW_STATE"], evid_state_idx] <- LEARNER_KNOW_STATE.targ_prof_idx[KNOW_STATE_SPEC[,"IMPLIED_KNOW_STATE"],"MEAS"] # } } # # LEARNER_KNOW_STATE.clust_idx now contains the knowledge-state estimates for the clust_idxᵗʰ subgraph disconnected cluster of # connected vertices for which evidence is not offered. We want concatenate to each evid_state_idx slice the corresponding evidentiary # state for the corresponding vertices. The evidentiary state assigns a value of 1.0 for the IMPLIED_KNOW_STATE in which the variable was observed. # The previously-built EVID_STATE_CLUST.clust_idx provides our starting point. Proceed conditionally. Only proceed if the length of the # evidentiary profile EVID_PROF_CLUST.clust_idx is greater than zero. if (length(EVID_PROF_CLUST.clust_idx) > 0){ OBS_KNOW_STATE_ARR.clust_idx <- array(dimnames = list(EVID_PROF_CLUST.clust_idx, KNOW_STATE_SPEC[,"IMPLIED_KNOW_STATE"], EVID_STATE_CLUST_SIG.clust_idx), dim = list(length(EVID_PROF_CLUST.clust_idx), length(KNOW_STATE_SPEC[,"IMPLIED_KNOW_STATE"]), length(EVID_STATE_CLUST_SIG.clust_idx))) # # Now cycle through EVID_STATE_CLUST_SIG.clust_idx, EVID_PROF_CLUST.clust_idx assigning unity values for the corresponding # rows of EVID_STATE_CLUST.clust_idx. Assign zero to the remaining values. for (evid_state_idx in EVID_STATE_CLUST_SIG.clust_idx){ ## evid_state_idx <- EVID_STATE_CLUST_SIG.clust_idx[1] for (evid_prof_idx in EVID_PROF_CLUST.clust_idx){ ## evid_prof_idx <- EVID_PROF_CLUST.clust_idx[1] OBS_KNOW_STATE_ARR.clust_idx[evid_prof_idx, CLUST_EVID_STATE.clust_idx[evid_state_idx, evid_prof_idx], evid_state_idx] <- 1 } } OBS_KNOW_STATE_ARR.clust_idx[which(is.na(OBS_KNOW_STATE_ARR.clust_idx))] <- 0 # # Concatenate OBS_KNOW_STATE_ARR.clust_idx onto LEARNER_KNOW_STATE.clust_idx. This provides a complete knowledge-state array # for the cluster. LEARNER_KNOW_STATE.clust_idx <- abind(LEARNER_KNOW_STATE.clust_idx, OBS_KNOW_STATE_ARR.clust_idx, along = 1) } # # Assign LEARNER_KNOW_STATE.clust_idx as the clust_idxᵗʰ element of list LEARNER_KNOW_STATE.prof_idx. LEARNER_KNOW_STATE.prof_idx[[clust_idx]] <- LEARNER_KNOW_STATE.clust_idx # } ## Close for (clust_idx in names(EVID_PROF_CLUST.prof_idx)) — Subnet clusters # # Assign the list LEARNER_KNOW_STATE.prof_idx of conditional knowledge-state arrays as the prof_idxᵗʰ element of # LEARNER_KNOW_STATE. LEARNER_KNOW_STATE[[prof_idx]] <- LEARNER_KNOW_STATE.prof_idx # } ## Close prof_idx in rownames(EVID_PROF_STATE)) — Observed evidentiary profiles # EVID_PROF_STATE[["LEARNER_KNOW_STATE"]] <- LEARNER_KNOW_STATE # # 7︎⃣ Associate the LEARNING_STANDARD_ID-marginalized CDFs. EVID_PROF_STATE and EoL_WIDE contain the evidentiary framework by which # we assemble LEARNER_KNOW_STATE, our intended output. We again manage two dimensions of variabilty: Evidentiary profiles and states, # and coverage thereof of the submap clusters. Our approach follows. # ⓐ Expand the LEARNER_KNOW_STATE attributes into data frames. LEARNER_KNOW_STATE is provided for each evidentiary-profile × # subraph configuration. Each such LEARNER_KNOW_STATE pair contains an array whose dimensions are target-variable estimation profile × # IMPLIED_KNOW_STATE × evidentiary state. Reshape this into a two-dmensional table such that the evidentiary-state signature is # a distinguishing attribute for the corresponding table of conditional probabilities of target-variable states. We concatenate these # tables "vertically". Then join them onto the corresponding CLUST_EVID_STATE tables. The evidentiary-profile is also added as an attribute # to this data frame. This gives us a data frame that can be joined onto EoL_WIDE, in order to associate the estimated knowledge-states # with individual subjects (learners, students). # ⓑ Merge the resulting LEARNER_KNOW_STATE tables with the EoL_WIDE. We then have a wide-table with distinct columns for the # probability that the subject's knowledge is in a given state for each variable. # ⓒ Reshape the wide-table LEARNER_KNOW_STATE tables into long tables. We want columns for the IMPLIED_KNOW_STATE as well # as a MEAS column with the probability that the subject is in the corresponding state for a given variable. # ⓓ Prepare the LEARNER_KNOW_STATE long table and write it out as a csv file. # # ⓐ Expand the LEARNER_KNOW_STATE attributes into data frames. KNOW_STATE <- list() for (prof_idx in rownames(EVID_PROF_STATE)){ ## prof_idx <- rownames(EVID_PROF_STATE)[1] # First extract the EVID_STATE and KNOW_STATE attributes from EVID_PROF_STATE. Also get EoL_WIDE records corresponding to the # prof_idxᵗʰ evidentiary profile. KNOW_STATE.prof_idx <- EVID_PROF_STATE[["LEARNER_KNOW_STATE"]][[prof_idx]] CLUST_EVID_STATE.prof_idx <- EVID_PROF_STATE[["CLUST_EVID_STATE"]][[prof_idx]] EoL_WIDE.prof_idx <- EoL_WIDE[EoL_WIDE[,"EVID_PROF_SIG"] == prof_idx,] # # Prepare EoL_WIDE.prof_idx to subsequently be merged with the wide-format KNOW_STATE table. We only need the # evidentiary-state attributes. We also want a STUDENT_ID attribute, derived from the rownames. EoL_WIDE.prof_idx <- EoL_WIDE.prof_idx[unlist(SUBMAP_VERTEX)] EoL_WIDE.prof_idx["STUDENT_ID"] <- rownames(EoL_WIDE.prof_idx) # KNOW_STATE_LONG.prof_idx <- list() for (clust_idx in names(KNOW_STATE.prof_idx)){ ## clust_idx <- names(KNOW_STATE.prof_idx)[1] # Now extract CLUST_EVID_STATE and KNOW_STATE for the clust_idxᵗʰ cluster. KNOW_STATE.clust_idx <- KNOW_STATE.prof_idx[[clust_idx]] CLUST_EVID_STATE.clust_idx <- CLUST_EVID_STATE.prof_idx[[clust_idx]] CLUST_EVID_PROF.clust_idx <- colnames(CLUST_EVID_STATE.clust_idx) # # Reshape the array for the clust_idxᵗʰ subgraph cluster × the prof_idxᵗʰ evidentiary profile into data frame. KNOW_STATE_WIDE.clust_idx <- list() for (evid_state_idx in dimnames(KNOW_STATE.clust_idx)[[3]]){ ## evid_state_idx <- dimnames(KNOW_STATE.clust_idx)[[3]][1] KNOW_STATE_WIDE.clust_idx[[evid_state_idx]] <- as.data.frame(KNOW_STATE.clust_idx[,, evid_state_idx]) KNOW_STATE_WIDE.clust_idx[[evid_state_idx]]["LEARNING_STANDARD_ID"] <- rownames(KNOW_STATE_WIDE.clust_idx[[evid_state_idx]]) KNOW_STATE_WIDE.clust_idx[[evid_state_idx]]["CLUST_EVID_STATE"] <- evid_state_idx # } ## CLOSE evid_state_idx in dimnames(KNOW_STATE.clust_idx)[[3]] # Concatenate the elements of KNOW_STATE_WIDE.clust_idx into a single data frame. KNOW_STATE_WIDE.clust_idx <- do.call(what = rbind, args = KNOW_STATE_WIDE.clust_idx) rownames(KNOW_STATE_WIDE.clust_idx) <- NULL # # Now prepare CLUST_EVID_STATE.clust_idx to merge with KNOW_STATE_WIDE.clust_idx. Proceed conditionally. Unmeasured clusters must be # treated differently. if(length(CLUST_EVID_STATE.clust_idx) > 0){ # Specifically, assign as attribute CLUST_EVID_STATE as the rownames. Then merge with KNOW_STATE_WIDE.clust_idx. CLUST_EVID_STATE.clust_idx["CLUST_EVID_STATE"] <- rownames(CLUST_EVID_STATE.clust_idx) # KNOW_STATE_WIDE.clust_idx <- merge(x = KNOW_STATE_WIDE.clust_idx, y = CLUST_EVID_STATE.clust_idx) # # Now merge with EoL_WIDE.prof_idx. We expect number of rows in the post-merged KNOW_STATE_WIDE.clust_idx to be # length(unique(LEARNING_STANDARD_ID)) × length(unique(STUDENT_ID)), one for each coinciding pair. Specify merging by # the cluster evidentiary profile, CLUST_EVID_PROF.clust_idx. KNOW_STATE_WIDE.clust_idx <- merge(x = KNOW_STATE_WIDE.clust_idx[c("LEARNING_STANDARD_ID", CLUST_EVID_PROF.clust_idx, KNOW_STATE_SPEC[,"IMPLIED_KNOW_STATE"])], y = EoL_WIDE.prof_idx[c("STUDENT_ID", CLUST_EVID_PROF.clust_idx)], by = CLUST_EVID_PROF.clust_idx) # # Coerce IMPLIED_KNOW_STATE variables to numeric. for (state_idx in KNOW_STATE_SPEC[,"IMPLIED_KNOW_STATE"]) KNOW_STATE_WIDE.clust_idx[,state_idx] <- as.numeric(KNOW_STATE_WIDE.clust_idx[,state_idx]) # # Melt KNOW_STATE_WIDE.clust_idx into a long table. KNOW_STATE.clust_idx <- melt(data = KNOW_STATE_WIDE.clust_idx[c("STUDENT_ID","LEARNING_STANDARD_ID", KNOW_STATE_SPEC[,"IMPLIED_KNOW_STATE"])], id.vars = c("STUDENT_ID","LEARNING_STANDARD_ID"), meas.vars = KNOW_STATE_SPEC[,"IMPLIED_KNOW_STATE"], variable.name = "IMPLIED_KNOW_STATE", value.name = "MEAS") # } else { # For unmeasured clusters, simply merge KNOW_STATE_WIDE.clust_idx with the STUDENT_IDs in EoL_WIDE.prof_idx. Then # melt. KNOW_STATE_WIDE.clust_idx <- merge(x = KNOW_STATE_WIDE.clust_idx, y = EoL_WIDE.prof_idx["STUDENT_ID"]) for (state_idx in KNOW_STATE_SPEC[,"IMPLIED_KNOW_STATE"]) KNOW_STATE_WIDE.clust_idx[,state_idx] <- as.numeric(KNOW_STATE_WIDE.clust_idx[,state_idx]) # # Melt KNOW_STATE_WIDE.clust_idx into a long table. KNOW_STATE.clust_idx <- melt(data = KNOW_STATE_WIDE.clust_idx[c("STUDENT_ID","LEARNING_STANDARD_ID", KNOW_STATE_SPEC[,"IMPLIED_KNOW_STATE"])], id.vars = c("STUDENT_ID","LEARNING_STANDARD_ID"), meas.vars = KNOW_STATE_SPEC[,"IMPLIED_KNOW_STATE"], variable.name = "IMPLIED_KNOW_STATE", value.name = "MEAS") # } ### CLOSE else # # Write KNOW_STATE_WIDE.clust_idx as the clust_idᵗʰ mebmer of list KNOW_STATE_WIDE.prof_idx. KNOW_STATE.prof_idx[[clust_idx]] <- KNOW_STATE.clust_idx # } ## CLOSE clust_idx in names(KNOW_STATE.prof_idx)) # Concatenate the elements of KNOW_STATE.prof_idx into a single data frame. Write KNOW_STATE.prof_idx as the prof_idxᵗʰ element # of data frame KNOW_STATE.prof_idx <- do.call(what = rbind, args = KNOW_STATE.prof_idx) rownames(KNOW_STATE.prof_idx) <- NULL KNOW_STATE[[prof_idx]] <- KNOW_STATE.prof_idx # } ## CLOSE prof_idx in rownames(EVID_PROF_STATE)) # # Concatenate the elements of KNOW_STATE into a single data frame. KNOW_STATE <- do.call(args = KNOW_STATE, what = rbind) rownames(KNOW_STATE) <- NULL # # Clean up the LEARNING_STANDARD_ID attribute. Truncate off the leading character "X". KNOW_STATE[,"LEARNING_STANDARD_ID"] <- gsub(x = KNOW_STATE[,"LEARNING_STANDARD_ID"], pattern = "X", replacement = "") # # Now apply two date-stamp attributes to KNOW_STATE. EVID_STATE_AS_OF is the date of the most-recent evidentiary state # measurement. KNOW_STATE_AS_OF is the date of the estimate, conditioned on the evidentiary state in the former. We get # EVID_STATE_AS_OF by max-aggregation of EoL_MEAS and then merging the result by STUDENT_ID with KNOW_STATE. # KNOW_STATE_AS_OF is simply the system date of the calculation. 🐞🐜🕷🐝🕷🐞🐜🕷🐝🕷🐞🐜🕷🐝🕷🐞🐜🕷🐝🕷🐞🐜🕷🐝🕷 EoL_MEAS[,"DATE_OF_MEAS"] <- as.Date(x = EoL_MEAS[,"DATE_OF_MEAS"], "%Y-%m-%d") EVID_STATE_AS_OF <- aggregate(formula = DATE_OF_MEAS ~ STUDENT_ID + LEARNING_STANDARD_ID, data = EoL_MEAS, FUN = max) colnames(EVID_STATE_AS_OF) <- c("STUDENT_ID","LEARNING_STANDARD_ID","EVID_STATE_AS_OF") EVID_STATE_AS_OF[,"LEARNING_STANDARD_ID"] <- gsub(x = EVID_STATE_AS_OF[,"LEARNING_STANDARD_ID"], pattern = "X", replacement = "") KNOW_STATE <- merge(x = KNOW_STATE, y = EVID_STATE_AS_OF, all.x = TRUE) KNOW_STATE["KNOW_STATE_AS_OF"] <- Sys.Date() KNOW_STATE <- merge(x = KNOW_STATE, y = LEARNING_STANDARD) # # Finally, reorder the records according to STUDENT_ID, LEARNING_STANDARD_ID, IMPLIED_KNOW_STATE. Then # coerce attributs to UTF-8 character and write out as a csv table. KNOW_STATE[,"IMPLIED_KNOW_STATE"] <- factor(x = KNOW_STATE[,"IMPLIED_KNOW_STATE"], levels = KNOW_STATE_SPEC[,"IMPLIED_KNOW_STATE"]) KNOW_STATE <- KNOW_STATE[order(KNOW_STATE[,"STUDENT_ID"], KNOW_STATE[,"LEARNING_STANDARD_ID"]), c("STUDENT_ID","LEARNING_STANDARD_ID","LEARNING_STANDARD_CD", "IMPLIED_KNOW_STATE","MEAS","EVID_STATE_AS_OF", "KNOW_STATE_AS_OF")] for (col_idx in c("EVID_STATE_AS_OF","KNOW_STATE_AS_OF","IMPLIED_KNOW_STATE")) KNOW_STATE[,col_idx] <- as.character(KNOW_STATE[,col_idx]) for (col_idx in colnames(KNOW_STATE)) { KNOW_STATE[,col_idx] <- enc2utf8(as.character(KNOW_STATE[,col_idx])) KNOW_STATE[is.na(KNOW_STATE[,col_idx]),col_idx] <- "" } # write.csv(x = KNOW_STATE, file = paste(PROF_TASK.dir, "LEARNER_KNOW_STATE_BRUTE_FORCE.csv", sep = "/"), row.names = FALSE, eol = "\r\n", fileEncoding = "UTF-8", quote = TRUE) #
/PrototypeSubroutinesInR/LEARNER_KNOW_STATE—BRUTE_FORCE.R
no_license
hamlett-neil-ur/diagnostic_cognitive_model
R
false
false
42,478
r
## PURPOSE: APPLY A BAYESIAN NETWORK TO ESTIMATE LEARNER KNOWLEGE STATE GIVEN EVIDENCE OF LEARNING ## MEASUREMENT. The scope of the estimation is bounded by the learning map associated with a course unit and the ## course section. An external file USE_CASE_QUERY_ATTRIBUTES specifies the scope of the query. ## ## MAJOR STEPS IN THE ALGORITHM LOGIC. ## 1︎⃣ Set workspace parameters and read in working files. We specifically require the following: ## ⪧ USE_CASE_QUERY_ATTRIBUTES guides the case study on which we focus. ## ⪧ COURSE_ENROLL contains the enrollment and responsible educator. ## ⪧ EoL_MEAS contains the learners' evidence of learning (EoL) measurements. ## ⪧ KNOW_STATE_SPEC contains relationships between learners' measured learning evidence and their implied knowledge states. ## ⪧ GRAPH_CLUST_N_UNIT_MAP_JDF contains the joint distrubition functions (JDF) for Nᵗʰ cluster of connected vertices ## within UNIT_MAP_EDGE_LIST. We employ this to get the in-scope vertices. ## 2︎⃣ Window the EoL_MEAS learning-measurement table. Retain only records corresponding to subjects (students) for whom ## STUDENT_ID exists in EoL_MEAS. Also, limit the LEARNING_STANDARD_ID to the variables specfied within the columns of ## GRAPH_CLUST_N_UNIT_MAP_JDF. Also, sort the EoL_MEAS by DATE_OF_MEAS and retain only the most-recent in cases ## of multiple measurements of LEARNING_STANDARD_IDs for distinct subjects. ## 3︎⃣ Apply KNOW_STATE_SPEC to impute hard-decision knowledge-state estimates for each EoL_MEAS. ## 4︎⃣ Identify the evidence states in EoL_MEAS. We introduce here three aspects of our framework. ## ⓐ KNOWLEDGE STATE represents the estimated extent of mastery for an individual learner with respect to all LEARNING_STANDARD_ID ## attributes from the proficiency model. ## ⓑ EVIDENTIARY PROFILE contains all of the observed variables from which that estimate is derived. ## ⓒ EVIDENTIARY STATE specifies the actual state for each evidentiary-profile variable for a specific learner. ## We extract during this stage the evidentiary profile and evidentiary state for each subject (learner, student) from EoL_MEAS. ## Categorize learners according to evidentiary profile and evidentiary state. Also identify by cluster for each unit-submap cluster ## of connected vertices: ## ⓐ Observed variables from the evidentiary profile on which we condition the submap-cluster's JDF; and ## ⓑ The target variables for which we obtain marginal CDFs conditioned on evidentiary states in the evidentiary profile. ## 6︎⃣ Translate each EVIDENTIARY STATE into an estimated KNOWLEDGE STATE. Condition GRAPH_CLUST_N_UNIT_MAP_JDF ## on each observed evidentiary state. Marginalize the resulting conditional distribution with respect to each target variable to obtain ## a distribution of knowledge-state probabilities for each observed evidentiary state. ## 7︎⃣ Associate the LEARNING_STANDARD_ID-marginalized CDFs for each learner with the measured knowledge state to get a complete ## probability distribution for each variable. Append to LEARNER_KNOW_STATE. Reshape to wide-table format so that LEARNER_KNOW_STATE ## contains for each STUDENT_ID × LEARNING_STANDARD_ID pair a row of conditional probability distributions regarding the LEARNER's state. # # Initialize environment. options(stringsAsFactors = FALSE) options(java.parameters = "-Xmx16g") library(stringr) library(reshape2) library(abind) # # 1︎⃣ DATA INGESTION. Read in USE_CASE_ATTRIBUTES to get the distinguishing case-study variable states. proto.dir <- "/Users/nahamlet/Box Sync/IBM-Watson ED K12/Pathway-Centric CONOPS/Learning-Map Prototype" USE_CASE_ATTRIBUTES <- read.csv(file = paste(proto.dir, "USE_CASE_QUERY_ATTRIBUTES.csv", sep = "/"), colClasses = "character") rownames(USE_CASE_ATTRIBUTES) <- USE_CASE_ATTRIBUTES[,"QUERY_ATTRIBUTE"] Case.dir <- USE_CASE_ATTRIBUTES["Case.dir","VALUE"] PROF_TASK.dir <- paste(Case.dir, "PROF_TASK_MODEL", sep = "/") setwd(PROF_TASK.dir) # # ‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️ # We need one JDF for each course-unit vertex cluster. Also, remove a leading substring "X" from the column names of the # UNIT_SUBMAP_JDF data frames. These were introduced becuase the colClasses were not constrained to be "character." jdf_files <- list.files(PROF_TASK.dir)[grep(x = list.files(PROF_TASK.dir), pattern = "UNIT_MAP_JDF.csv")] UNIT_SUBMAP_JDF <- list() for (jdf_idx in jdf_files) { ## jdf_idx <- jdf_files[1] print(paste("Reading in SUBMAP_JDF", jdf_idx, "Starting at", format(Sys.time(),"%H:%M:%S"))) UNIT_SUBMAP_JDF[[jdf_idx]] <- read.csv(file = paste(PROF_TASK.dir, jdf_idx, sep = "/")) print(paste("Reading in SUBMAP_JDF", jdf_idx, "Ending at", format(Sys.time(),"%H:%M:%S"))) } # # ‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️‼️ # Read in other files listed above. KNOW_STATE_SPEC <- read.csv(file = paste(PROF_TASK.dir, "KNOW_STATE_SPEC.csv", sep = "/"), colClasses = "character") COURSE_ENROLL <- read.csv(file = paste(PROF_TASK.dir, "COURSE_ENROLL.csv", sep = "/"), colClasses = "character") EoL_MEAS <- read.csv(file = paste(PROF_TASK.dir, "EoL_MEAS.csv", sep = "/"), colClasses = "character")[c("STUDENT_ID","LEARNING_STANDARD_ID", "MEAS_EVIDENCE","DATE_OF_MEAS")] LEARNING_STANDARD <- read.csv(file = paste(PROF_TASK.dir, "SIHLEARNING_STANDARD.csv", sep = "/"), colClasses = "character")[c("LEARNING_STANDARD_ID", "LEARNING_STANDARD_CD")] # # 2︎⃣ Window the EoL_MEAS learning-measurement table. First window by subjects with STUDENT_IDs in COURSE_ENROLL for the # COURSE_ID, SECTION_ID specified by the corresponding values of USE_CASE_ATTRIBUTES. Create a "windowed" version of # COURSE_ENROLL. Then merge the result with EoL_MEAS. SECT_ENROLL <- data.frame(rbind(USE_CASE_ATTRIBUTES[c("COURSE_ID","SECTION_ID"),"VALUE"])) colnames(SECT_ENROLL) <- c("COURSE_ID","SECTION_ID") SECT_ENROLL <- merge(x = SECT_ENROLL, y = COURSE_ENROLL) EoL_MEAS <- merge(x = COURSE_ENROLL["STUDENT_ID"], y = EoL_MEAS, all.x = TRUE) # EoL_MEAS <- EoL_MEAS[EoL_MEAS[,"STUDENT_ID"] %in% COURSE_ENROLL[,"STUDENT_ID"],] EoL_MEAS[!is.na(EoL_MEAS[,"LEARNING_STANDARD_ID"]),"LEARNING_STANDARD_ID"] <- paste("X", EoL_MEAS[!is.na(EoL_MEAS[,"LEARNING_STANDARD_ID"]),"LEARNING_STANDARD_ID"], sep = "") # # Now filter EoL_MEAS according to the LEARNING_STANDARD_IDs included in the graph. These are obtained by calculating the intersection # between the column names of UNIT_SUBMAP_JDF and the instances of LEARNING_STANDARD_ID in EoL_MEAS. The procedure here # is slightly more-difficult, given that we want the LEARNING_STANDARD_ID instances in SUBMAP_VERTEX, derived from UNIT_SUBMAP_JDF, # as well as STUDENT_IDs for which no measurements are available. SUBMAP_VERTEX <- data.frame(LEARNING_STANDARD_ID = intersect(unique(EoL_MEAS[,"LEARNING_STANDARD_ID"]), unlist(lapply(X = UNIT_SUBMAP_JDF, FUN = colnames)) ) ) EoL_MEAS <- rbind(EoL_MEAS[EoL_MEAS[,"LEARNING_STANDARD_ID"] %in% SUBMAP_VERTEX[,"LEARNING_STANDARD_ID"],], EoL_MEAS[apply (X = is.na(EoL_MEAS), MARGIN = 1, FUN = any), ]) # # Time-window EoL measurements. Apply the following procedure. # ⓐ First, sort the measurements in decreasing order of DATE_OF_MEAS. # ⓑ Then truncate EoL_MEAS to include only measurements up to DATE_LATEST_MEAS from the USE_CASE_ATTRIBUTES table. # Special handling is required due to the presence of subjects (learners, students) for which no evidence of learning is present. # The logic of time-windowing looses those records. So they must be reintroduced. # ⓒ Retain only the most-recent measurement in instances for which a given LEARNING_STANDARD_ID variable has been # measured multiple times for a subject (student or learner). Accomplish this with the duplicated logic. Since we sorted # in order of decreasing DATE_OF_MEAS, all STUDENT_ID × LEARNING_STANDARD_ID pairs after the first occurrence # of each are identified as duplicated. # EoL_MEAS <- EoL_MEAS[order(x = as.Date(EoL_MEAS[,"DATE_OF_MEAS"],"%Y-%m-%d"), decreasing = TRUE),] EoL_MEAS <- rbind(EoL_MEAS[which(as.Date(EoL_MEAS[,"DATE_OF_MEAS"],"%Y-%m-%d") <= as.Date(USE_CASE_ATTRIBUTES["DATE_LATEST_MEAS","VALUE"],"%Y-%m-%d") ), ], EoL_MEAS[apply (X = is.na(EoL_MEAS), MARGIN = 1, FUN = any), ]) EoL_MEAS <- EoL_MEAS[!duplicated(EoL_MEAS[c("STUDENT_ID", "LEARNING_STANDARD_ID")]),] EoL_MEAS <- EoL_MEAS[order(EoL_MEAS[,"STUDENT_ID"], EoL_MEAS[,"LEARNING_STANDARD_ID"]),] # # 3︎⃣ Impute IMPLIED_KNOW_STATE to MEAS_EVIDENCE in EoL_MEAS. Assign IMPLIED_KNOW_STATE based on threshold intervals # in KNOW_STATE_SPEC. We first need to coerce MEAS_EVIDENCE to numeric. EoL_MEAS[,"MEAS_EVIDENCE"] <- as.numeric(EoL_MEAS[,"MEAS_EVIDENCE"]) EoL_MEAS[,"IMPLIED_KNOW_STATE"] <- cut(x = EoL_MEAS[,"MEAS_EVIDENCE"], breaks = unique(unlist(KNOW_STATE_SPEC[c("LOW_BOUND","UP_BOUND")])), labels = KNOW_STATE_SPEC[,"IMPLIED_KNOW_STATE"], include.lowest = TRUE, ordered.result = TRUE) EoL_MEAS[,"IMPLIED_KNOW_STATE"] <- as.character(EoL_MEAS[,"IMPLIED_KNOW_STATE"]) # # 4︎⃣ Identify the evidence states in EoL_MEAS. We need to relate subjects (learners, students) to evidentiary profiles and # evidentiary states. We use these to marginalize, reduce, condition the Joint Distribution Functions (JDFs) for each disconnected subgraph cluster # of connected vertices. We also need the knowledge-state estimation profiles, the variables for which the Bayesian network produces estimates. # These estimates result from marginalization of the conditioned JDF with each variable in the estimation profile one at a time. # # We get at these by the following procedure. # ⓐ Reshape EoL_MEAS into wide-table format. We records for each subject indicating the IMPLED_KNOW_STATE of each # measured variable. Evidentiary profiles and states vary between subjects (learners, students). Keeping track of these # distinctions and applying each to the Bayesian Network represents the greatest source of complexity in this approach. # ⓑ Associate each learner with an evidentiary profile and an evidentiary state. Calculate signatures based on concatenation # of the variable names — column names of the wide table — and variable values indicating evidentiary state. # ⓒ Create EVID_PROF_STATE, a data frame containing unique rows in the wide-table EoL_MEAS table. # ⓓ Allocate in EVID_PROF_STATE the variables in the evidentiary profile to each disconnected cluster of connected subgraph # vertices. Collect this information for each cluster a distinct data frame of unique evidentiary-state configurations. The # column names represent the variables in the cluster-allocated evidentiary profile. The rownames contain the cluster-allocated # evidentiary states. # ⓔ For each subgraph cluster, ascertain the knowledge-state estimated profile, the variables in the cluster not included in the # cluster-allocated evidentiary profile. # To summarize, we must manage two dimensions of combinatorial variability: Subject evidentiary profiles and states, and their # coverage of disconnected clusters of connected subgraph vertices. This requires two levels of categorization of evidentiary # profiles, states. # # ⓐ Reshape EoL_MEAS into wide-table format. Assign the STUDENT_ID subject-unique attributes as the rownames for the # resulting data frame. Get rid of all remaining columns not pertaining to the possibly measured variables in SUBMAP_VERTEX. EoL_WIDE <- dcast(data = EoL_MEAS, formula = STUDENT_ID ~ LEARNING_STANDARD_ID, value.var = "IMPLIED_KNOW_STATE") rownames(EoL_WIDE) <- EoL_WIDE[,"STUDENT_ID"] for (col_idx in setdiff(colnames(EoL_WIDE),unlist(SUBMAP_VERTEX))) EoL_WIDE[col_idx] <- NULL # # # Write out a csv file containing the LEARNER_EVID_STATE. It is derived from EoL_WIDE but has LEARNING_STANDARD_CD for its # column names. LEARNER_EVID_STATE <- EoL_WIDE LEARNER_EVID_STATE.cols <- data.frame(LEARNING_STANDARD_ID = colnames(LEARNER_EVID_STATE)) LEARNER_EVID_STATE.cols[,"LEARNING_STANDARD_ID"] <- gsub(x = LEARNER_EVID_STATE.cols[,"LEARNING_STANDARD_ID"], pattern = "X", replacement = "") LEARNER_EVID_STATE.cols <- merge(x = LEARNER_EVID_STATE.cols, y = LEARNING_STANDARD) colnames(LEARNER_EVID_STATE) <- LEARNER_EVID_STATE.cols[,"LEARNING_STANDARD_CD"] LEARNER_EVID_STATE["STUDENT_ID"] <- rownames(EoL_WIDE) LEARNER_EVID_STATE <- merge(x = LEARNER_EVID_STATE, y = COURSE_ENROLL[c("STUDENT_ID","STUDENT_NAME","CLASS_ID")]) write.csv(x = LEARNER_EVID_STATE[c("STUDENT_ID","STUDENT_NAME","CLASS_ID",LEARNER_EVID_STATE.cols[,"LEARNING_STANDARD_CD"])], file = paste(PROF_TASK.dir, "LEARNER_EVID_STATE.csv", sep = "/"), row.names = FALSE, eol = "\r\n", fileEncoding = "UTF-8", quote = TRUE) # # ⓑ Associate each learner with an evidentiary profile and an evidentiary state. We specifically seek the above-described # evidentiary-profile and -state signatures. Getting the evidentiary-profile signature requires three steps. # ⅰ. LIst the column names for the non-NA evidentiary state for each learner; # ⅱ. Concatenate the non-NA evidentiary-state variables into a signature; and # ⅲ. Write the results as an colum to EoL_WIDE. # We encounter subjects (learners, students) and clusters for which no measurements are avaiable. Our syntax logic # returns a blank for these instances. Replace the blank with "UNMEASURED". EoL_WIDE["EVID_PROF_SIG"] <- unlist(lapply(X = lapply(X = apply(X = !is.na(EoL_WIDE[,unlist(SUBMAP_VERTEX)]), MARGIN = 1, FUN = which), FUN = names), FUN = paste, collapse = "_")) EoL_WIDE[nchar(EoL_WIDE[,"EVID_PROF_SIG"]) == 0,"EVID_PROF_SIG"] <- "UNMEASURED" # # The evidentiary-state signatures are simpler to obtain. Simply row-concatenate all of the evidentiary-state variables. As with # the EVID_PROF_SIG, we want evidentiary-state signatures for which no evidence is instantiated to be "UNMEASURED". # Replace all such instances — assigned "NA_ ...." by our syntax logic with "UNMEASURED". EoL_WIDE["EVID_STATE_SIG"] <- apply(X = EoL_WIDE[unlist(SUBMAP_VERTEX)], MARGIN = 1, FUN = paste, collapse = "_") EoL_WIDE[grep(x = EoL_WIDE[,"EVID_STATE_SIG"], pattern = "NA_"),"EVID_STATE_SIG"] <- "UNMEASURED" # # ⓒ Create EVID_PROF_STATE, a data frame containing unique evidentiary-profile signatures. Then, add a column containing the unique # evidentiary states for each evidentiary profile. EVID_PROF_STATE <- unique(EoL_WIDE["EVID_PROF_SIG"]) rownames(EVID_PROF_STATE) <- EVID_PROF_STATE[, "EVID_PROF_SIG"] EVID_STATE <- list() for (prof_idx in rownames(EVID_PROF_STATE)){ ## prof_idx <- rownames(EVID_PROF_STATE)[3] EVID_STATE[[prof_idx]] <- EoL_WIDE[EoL_WIDE[,"EVID_PROF_SIG"] == prof_idx,unlist(SUBMAP_VERTEX)] EVID_STATE[[prof_idx]] <- unique(EVID_STATE[[prof_idx]][apply(X = !is.na(EVID_STATE[[prof_idx]]), MARGIN = 2, FUN = all)]) rownames(EVID_STATE[[prof_idx]]) <- apply(X = EVID_STATE[[prof_idx]], MARGIN = 1, FUN = paste, collapse = "_") } EVID_PROF_STATE[["EVID_STATE"]] <- EVID_STATE # # ⓓ Allocate in EVID_PROF_STATE the variables in the evidentiary profile to each cluster. Vertex membership in # clusters is contained in the column names of UNIT_SUBMAP_JDF. We want to build up external to EVID_PROF_STATE # a list of data frames and then write it back into EVID_PROF_STATE as CLUST_EVID_STATE. CLUST_EVID_STATE <- list() CLUST_EVID_PROF <- lapply(X = lapply(X = UNIT_SUBMAP_JDF, FUN = colnames), FUN = intersect, colnames(EoL_WIDE)) # # CLUST_EVID_PROF now contains the all of the the evidentiary profiles in EoL_WIDE. We now need to subset # according to scope of the subtraph clusters in EVID_PROF_STATE. for (prof_idx in unique(EVID_PROF_STATE[,"EVID_PROF_SIG"])){ ## prof_idx <- unique(EVID_PROF_STATE[,"EVID_PROF_SIG"])[3] EVID_PROF_STATE.prof_idx <- EVID_PROF_STATE[["EVID_STATE"]][[prof_idx]] CLUST_EVID_STATE.prof_idx <- list() for (clust_idx in names(CLUST_EVID_PROF)){ ## clust_idx <- names(CLUST_EVID_PROF)[1] CLUST_EVID_STATE.prof_idx[[clust_idx]] <- EVID_PROF_STATE.prof_idx[intersect(colnames(EVID_PROF_STATE.prof_idx), CLUST_EVID_PROF[[clust_idx]])] CLUST_EVID_STATE.prof_idx[[clust_idx]] <- unique(CLUST_EVID_STATE.prof_idx[[clust_idx]][apply(X = !is.na(CLUST_EVID_STATE.prof_idx[[clust_idx]]), MARGIN = 2, FUN = all)]) rownames(CLUST_EVID_STATE.prof_idx[[clust_idx]]) <- apply(X = CLUST_EVID_STATE.prof_idx[[clust_idx]], MARG = 1, FUN = paste, collapse = "_") } CLUST_EVID_STATE[[prof_idx]] <- CLUST_EVID_STATE.prof_idx } EVID_PROF_STATE[["CLUST_EVID_STATE"]] <- CLUST_EVID_STATE # # ⓔ Provide the knowledge-state estimation profile by cluster for each evidentiary profile. TARG_EST_PROF <- list() for (prof_idx in rownames(EVID_PROF_STATE)) TARG_EST_PROF[[prof_idx]] <- lapply(X = lapply(X = UNIT_SUBMAP_JDF, FUN = colnames), FUN = setdiff, c(unlist(str_split(string = prof_idx, pattern = "_")),"MEAS")) EVID_PROF_STATE[["TARG_EST_PROF"]] <- TARG_EST_PROF # # The EVID_PROF_SIG attribute is, finally, redundant and can be elimintated from EVID_PROF_STATE. EVID_PROF_STATE[["EVID_PROF_SIG"]] <- NULL # # 🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉🦉 # 6︎⃣ Translate each EVIDENTIARY STATE into an estimated KNOWLEDGE STATE. Our complex data frame EVID_PROF_STATE now contains # all of the structure we need to estimate knowledge states. We accomplish this via a Bayesian-network instantiation. This includes the # following key steps. # ⓐ Marginalize UNIT_MAP_JDF according to the evidentiary profile. The evidentiary profile again conatins all of the attributes for which # evidence is asserted. The colum names of the data frame CLUST_EVID_STATE in EVID_PROF_STATE for each profile, each cluster # contain the evidentiary profile used for marginalization. # ⓑ Reduce the marginalized JDF according to the observed evidentiary state specified by the CLUST_EVID_STATE data frame in # EVID_PROF_STATE. # ⓒ Condition the marginalized JDF on each of the observed evidentiary states. This conditioned, marginalized distribution # becomes a three-dimensional array in which the first two dimensions represent CDFs. The third dimension represents the # evidentiary states with respect to each CDF is conditioned. # ⓓ Marginalize the resulting conditional, marginal distribution function with respect to each of the variables not included # evidentiary profile. The resulting marginal distribution represents our estimate of the knowledge state conditioned on the # evidentiary state. # The procedure must be applied cluster-by-cluster. We accumulate all of the results into a three-dimensional array resembling # constructed for ⓑ above. We work through according to the structure of EVID_PROF_STATE. Loop through first according # to the evidentiary profile, EVID_PROF_SIG in EVID_PROF_STATE, then by unit-map connected-vertex cluster. # # We seek to collect for each row in EVID_PROF_SIG an array of LEARNER_KNOW_STATE templates corresponding to each # observed-variable evidentiary state. Each template should contain knowledge-state CPDs for each subnet vertex. We # subsequently use this to # # Begin by declaring an a list LEARNER_KNOW_STATE_ARR into which the learning-map states conditioned on each evidentiary # state are stored. LEARNER_KNOW_STATE <- list() # for (prof_idx in rownames(EVID_PROF_STATE)){ ## prof_idx <- rownames(EVID_PROF_STATE)[1] LEARNER_KNOW_STATE.prof_idx <- list() for (clust_idx in names(UNIT_SUBMAP_JDF)){ ## clust_idx <- names(UNIT_SUBMAP_JDF)[1] # Extract the evidentiary profile for the cluster from EVID_PROF_CLUST.prof_idx and its JDF from UNIT_SUBMAP_JDF. CLUST_EVID_STATE.clust_idx <- EVID_PROF_STATE[["CLUST_EVID_STATE"]][[prof_idx]][[clust_idx]] EVID_PROF_CLUST.clust_idx <- colnames(CLUST_EVID_STATE.clust_idx) EVID_STATE_CLUST_SIG.clust_idx <- rownames(CLUST_EVID_STATE.clust_idx) if (length(EVID_STATE_CLUST_SIG.clust_idx) <1) EVID_STATE_CLUST_SIG.clust_idx <- "UNMEASURED" TARG_PROF_CLUS.clust_idx <- EVID_PROF_STATE[["TARG_EST_PROF"]][[prof_idx]][[clust_idx]] JDF.clust_idx <- UNIT_SUBMAP_JDF[[clust_idx]] # # Proceed conditionally. If length EVID_PROF.clust_idx is greater than zero, then we condition JDF.clust_idx on its elements. Otherwise, # convert JDF.clust_idx to a three-dimensional array with only one increment in the third dimension. if(length(EVID_PROF_CLUST.clust_idx) > 0){ # ⓐ Marginalize UNIT_MAP_JDF according to the evidentiary profile. Marginalization # results from sum-aggregation. First construct the formula. marg_formula.clust_idx <- as.formula(paste("MEAS", paste(EVID_PROF_CLUST.clust_idx, collapse = " + "), sep = " ~ ")) print(paste("Marginalizing wrt observed profile:", prof_idx, ", ", clust_idx, "Starting at", format(Sys.time(),"%H:%M:%S") )) MARG_JDF.cust_idx <- aggregate(formula = marg_formula.clust_idx, data = JDF.clust_idx, FUN = sum) print(paste("Marginalizing wrt observed profile:", prof_idx, ", ", clust_idx, "Finishing at", format(Sys.time(),"%H:%M:%S") )) # # ⓑ Reduce the marginalized JDF according to the observed evidentiary state. Merge MARG_JDF.cust_idx with the evidentiary-state # data frame CLUST_EVID_STATE.clust_idx. print(paste("Reducing marginal with respect to observed evidentiary state:", prof_idx, ", ", clust_idx, "Starting at", format(Sys.time(),"%H:%M:%S") )) MARG_JDF.cust_idx <- merge(x = MARG_JDF.cust_idx, y = CLUST_EVID_STATE.clust_idx) # # ⓒ Condition the marginalized JDF on each of the observed evidentiary states. Conditioning is accomplished by Bayes rule. # Invert the measure attribute MEAS in MARG_JDF.cust_idx. Merge the result back onto JDF.clust_idx. Then multiply the inverted # MEAS of the former by the MEAS of the latter. print(paste("Conditioning unobserved wrt observed:", prof_idx, ", ", clust_idx, "Starting at", format(Sys.time(),"%H:%M:%S") )) MARG_JDF.cust_idx["invMEAS"] <- 1/MARG_JDF.cust_idx["MEAS"] MARG_JDF.cust_idx["MEAS"] <- NULL COND_JDF.clust_idx <- merge(x = JDF.clust_idx, y = MARG_JDF.cust_idx) COND_JDF.clust_idx["MEAS"] <- apply(X = COND_JDF.clust_idx[c("MEAS","invMEAS")], MARGIN = 1, FUN = prod) COND_JDF.clust_idx["invMEAS"] <- NULL # # ⓓ Marginalize the resulting conditional. # Reshape COND_JDF.clust_idx into an array. We need a variable EVID_STATE_SIG.clust_idx to fill this out. The EVID_STATE_SIG # variable is the evidentiary-state signature of all evidentiary states in EVID_PROF_CLUST.clust_idx. COND_JDF_ARR.clust_idx <- array(dim = list(length(KNOW_STATE_SPEC[,"IMPLIED_KNOW_STATE"])^length(TARG_PROF_CLUS.clust_idx), length(TARG_PROF_CLUS.clust_idx)+1, length(EVID_STATE_CLUST_SIG.clust_idx)), dimnames = list(1:(length(KNOW_STATE_SPEC[,"IMPLIED_KNOW_STATE"])^length(TARG_PROF_CLUS.clust_idx)), c(TARG_PROF_CLUS.clust_idx,"MEAS"), EVID_STATE_CLUST_SIG.clust_idx)) for (state_idx in rownames(CLUST_EVID_STATE.clust_idx)){ ## rownames <- rownames(CLUST_EVID_STATE.clust_idx)[1] print(paste("Reshaping to array:", state_idx, ", ", which(state_idx == EVID_STATE_CLUST_SIG.clust_idx), "of", length(EVID_STATE_CLUST_SIG.clust_idx), prof_idx,clust_idx, format(Sys.time(),"%H:%M:%S") )) CLUST_EVID_STATE.state_idx <- data.frame(rbind(CLUST_EVID_STATE.clust_idx[state_idx,])) COND_JDF_ARR.clust_idx[,,state_idx] <- as.matrix(merge(x = CLUST_EVID_STATE.state_idx, y = COND_JDF.clust_idx)[c(TARG_PROF_CLUS.clust_idx,"MEAS")]) } # } else { COND_JDF_ARR.clust_idx <- array(data = as.matrix(JDF.clust_idx), dim = c(dim(JDF.clust_idx),1), dimnames = list(1:nrow(JDF.clust_idx), colnames(JDF.clust_idx), "UNMEASURED") ) } # We now must cycle through EVID_STATE_CLUST_SIG.clust_idx and marginalize each associated slice of COND_JDF_ARR.clust_idx # with respect its variables in TARG_PROF_CLUS.clust_idx. Store the result in another array. We want the dimensions of the array to be # TARG_PROF_CLUS.clust_idx × IMPLIED_KNOW_STATE × EVID_STATE_CLUST_SIG.clust_idx. The cells contain the MEAS values. from # COND_JDF_ARR.clust_idx. LEARNER_KNOW_STATE.clust_idx <- array(dim = list(length(TARG_PROF_CLUS.clust_idx), length(KNOW_STATE_SPEC[,"IMPLIED_KNOW_STATE"]), length(EVID_STATE_CLUST_SIG.clust_idx)), dimnames = list(TARG_PROF_CLUS.clust_idx, KNOW_STATE_SPEC[,"IMPLIED_KNOW_STATE"], EVID_STATE_CLUST_SIG.clust_idx)) # for (evid_state_idx in EVID_STATE_CLUST_SIG.clust_idx){ ## evid_state_idx <- EVID_STATE_CLUST_SIG.clust_idx[1] COND_JDF_ARR.evid_state_idx <- as.data.frame(COND_JDF_ARR.clust_idx[,, evid_state_idx]) COND_JDF_ARR.evid_state_idx[,"MEAS"] <- as.numeric(COND_JDF_ARR.evid_state_idx[,"MEAS"]) for (targ_prof_idx in TARG_PROF_CLUS.clust_idx){ ## targ_prof_idx <- TARG_PROF_CLUS.clust_idx[1] print(paste("Marginalizing with to respect to target variable:", targ_prof_idx, ", ", which(targ_prof_idx == TARG_PROF_CLUS.clust_idx), "of", length(TARG_PROF_CLUS.clust_idx), evid_state_idx, "Evidentiary state", ", ", which(evid_state_idx == EVID_STATE_CLUST_SIG.clust_idx), "of", length(EVID_STATE_CLUST_SIG.clust_idx), prof_idx,clust_idx, format(Sys.time(),"%H:%M:%S") )) LEARNER_KNOW_STATE.targ_prof_idx <- aggregate(formula = as.formula(paste("MEAS", targ_prof_idx, sep = " ~ ")), data = COND_JDF_ARR.evid_state_idx, FUN = sum) rownames(LEARNER_KNOW_STATE.targ_prof_idx) <- LEARNER_KNOW_STATE.targ_prof_idx[, targ_prof_idx] LEARNER_KNOW_STATE.clust_idx[targ_prof_idx,KNOW_STATE_SPEC[,"IMPLIED_KNOW_STATE"], evid_state_idx] <- LEARNER_KNOW_STATE.targ_prof_idx[KNOW_STATE_SPEC[,"IMPLIED_KNOW_STATE"],"MEAS"] # } } # # LEARNER_KNOW_STATE.clust_idx now contains the knowledge-state estimates for the clust_idxᵗʰ subgraph disconnected cluster of # connected vertices for which evidence is not offered. We want concatenate to each evid_state_idx slice the corresponding evidentiary # state for the corresponding vertices. The evidentiary state assigns a value of 1.0 for the IMPLIED_KNOW_STATE in which the variable was observed. # The previously-built EVID_STATE_CLUST.clust_idx provides our starting point. Proceed conditionally. Only proceed if the length of the # evidentiary profile EVID_PROF_CLUST.clust_idx is greater than zero. if (length(EVID_PROF_CLUST.clust_idx) > 0){ OBS_KNOW_STATE_ARR.clust_idx <- array(dimnames = list(EVID_PROF_CLUST.clust_idx, KNOW_STATE_SPEC[,"IMPLIED_KNOW_STATE"], EVID_STATE_CLUST_SIG.clust_idx), dim = list(length(EVID_PROF_CLUST.clust_idx), length(KNOW_STATE_SPEC[,"IMPLIED_KNOW_STATE"]), length(EVID_STATE_CLUST_SIG.clust_idx))) # # Now cycle through EVID_STATE_CLUST_SIG.clust_idx, EVID_PROF_CLUST.clust_idx assigning unity values for the corresponding # rows of EVID_STATE_CLUST.clust_idx. Assign zero to the remaining values. for (evid_state_idx in EVID_STATE_CLUST_SIG.clust_idx){ ## evid_state_idx <- EVID_STATE_CLUST_SIG.clust_idx[1] for (evid_prof_idx in EVID_PROF_CLUST.clust_idx){ ## evid_prof_idx <- EVID_PROF_CLUST.clust_idx[1] OBS_KNOW_STATE_ARR.clust_idx[evid_prof_idx, CLUST_EVID_STATE.clust_idx[evid_state_idx, evid_prof_idx], evid_state_idx] <- 1 } } OBS_KNOW_STATE_ARR.clust_idx[which(is.na(OBS_KNOW_STATE_ARR.clust_idx))] <- 0 # # Concatenate OBS_KNOW_STATE_ARR.clust_idx onto LEARNER_KNOW_STATE.clust_idx. This provides a complete knowledge-state array # for the cluster. LEARNER_KNOW_STATE.clust_idx <- abind(LEARNER_KNOW_STATE.clust_idx, OBS_KNOW_STATE_ARR.clust_idx, along = 1) } # # Assign LEARNER_KNOW_STATE.clust_idx as the clust_idxᵗʰ element of list LEARNER_KNOW_STATE.prof_idx. LEARNER_KNOW_STATE.prof_idx[[clust_idx]] <- LEARNER_KNOW_STATE.clust_idx # } ## Close for (clust_idx in names(EVID_PROF_CLUST.prof_idx)) — Subnet clusters # # Assign the list LEARNER_KNOW_STATE.prof_idx of conditional knowledge-state arrays as the prof_idxᵗʰ element of # LEARNER_KNOW_STATE. LEARNER_KNOW_STATE[[prof_idx]] <- LEARNER_KNOW_STATE.prof_idx # } ## Close prof_idx in rownames(EVID_PROF_STATE)) — Observed evidentiary profiles # EVID_PROF_STATE[["LEARNER_KNOW_STATE"]] <- LEARNER_KNOW_STATE # # 7︎⃣ Associate the LEARNING_STANDARD_ID-marginalized CDFs. EVID_PROF_STATE and EoL_WIDE contain the evidentiary framework by which # we assemble LEARNER_KNOW_STATE, our intended output. We again manage two dimensions of variabilty: Evidentiary profiles and states, # and coverage thereof of the submap clusters. Our approach follows. # ⓐ Expand the LEARNER_KNOW_STATE attributes into data frames. LEARNER_KNOW_STATE is provided for each evidentiary-profile × # subraph configuration. Each such LEARNER_KNOW_STATE pair contains an array whose dimensions are target-variable estimation profile × # IMPLIED_KNOW_STATE × evidentiary state. Reshape this into a two-dmensional table such that the evidentiary-state signature is # a distinguishing attribute for the corresponding table of conditional probabilities of target-variable states. We concatenate these # tables "vertically". Then join them onto the corresponding CLUST_EVID_STATE tables. The evidentiary-profile is also added as an attribute # to this data frame. This gives us a data frame that can be joined onto EoL_WIDE, in order to associate the estimated knowledge-states # with individual subjects (learners, students). # ⓑ Merge the resulting LEARNER_KNOW_STATE tables with the EoL_WIDE. We then have a wide-table with distinct columns for the # probability that the subject's knowledge is in a given state for each variable. # ⓒ Reshape the wide-table LEARNER_KNOW_STATE tables into long tables. We want columns for the IMPLIED_KNOW_STATE as well # as a MEAS column with the probability that the subject is in the corresponding state for a given variable. # ⓓ Prepare the LEARNER_KNOW_STATE long table and write it out as a csv file. # # ⓐ Expand the LEARNER_KNOW_STATE attributes into data frames. KNOW_STATE <- list() for (prof_idx in rownames(EVID_PROF_STATE)){ ## prof_idx <- rownames(EVID_PROF_STATE)[1] # First extract the EVID_STATE and KNOW_STATE attributes from EVID_PROF_STATE. Also get EoL_WIDE records corresponding to the # prof_idxᵗʰ evidentiary profile. KNOW_STATE.prof_idx <- EVID_PROF_STATE[["LEARNER_KNOW_STATE"]][[prof_idx]] CLUST_EVID_STATE.prof_idx <- EVID_PROF_STATE[["CLUST_EVID_STATE"]][[prof_idx]] EoL_WIDE.prof_idx <- EoL_WIDE[EoL_WIDE[,"EVID_PROF_SIG"] == prof_idx,] # # Prepare EoL_WIDE.prof_idx to subsequently be merged with the wide-format KNOW_STATE table. We only need the # evidentiary-state attributes. We also want a STUDENT_ID attribute, derived from the rownames. EoL_WIDE.prof_idx <- EoL_WIDE.prof_idx[unlist(SUBMAP_VERTEX)] EoL_WIDE.prof_idx["STUDENT_ID"] <- rownames(EoL_WIDE.prof_idx) # KNOW_STATE_LONG.prof_idx <- list() for (clust_idx in names(KNOW_STATE.prof_idx)){ ## clust_idx <- names(KNOW_STATE.prof_idx)[1] # Now extract CLUST_EVID_STATE and KNOW_STATE for the clust_idxᵗʰ cluster. KNOW_STATE.clust_idx <- KNOW_STATE.prof_idx[[clust_idx]] CLUST_EVID_STATE.clust_idx <- CLUST_EVID_STATE.prof_idx[[clust_idx]] CLUST_EVID_PROF.clust_idx <- colnames(CLUST_EVID_STATE.clust_idx) # # Reshape the array for the clust_idxᵗʰ subgraph cluster × the prof_idxᵗʰ evidentiary profile into data frame. KNOW_STATE_WIDE.clust_idx <- list() for (evid_state_idx in dimnames(KNOW_STATE.clust_idx)[[3]]){ ## evid_state_idx <- dimnames(KNOW_STATE.clust_idx)[[3]][1] KNOW_STATE_WIDE.clust_idx[[evid_state_idx]] <- as.data.frame(KNOW_STATE.clust_idx[,, evid_state_idx]) KNOW_STATE_WIDE.clust_idx[[evid_state_idx]]["LEARNING_STANDARD_ID"] <- rownames(KNOW_STATE_WIDE.clust_idx[[evid_state_idx]]) KNOW_STATE_WIDE.clust_idx[[evid_state_idx]]["CLUST_EVID_STATE"] <- evid_state_idx # } ## CLOSE evid_state_idx in dimnames(KNOW_STATE.clust_idx)[[3]] # Concatenate the elements of KNOW_STATE_WIDE.clust_idx into a single data frame. KNOW_STATE_WIDE.clust_idx <- do.call(what = rbind, args = KNOW_STATE_WIDE.clust_idx) rownames(KNOW_STATE_WIDE.clust_idx) <- NULL # # Now prepare CLUST_EVID_STATE.clust_idx to merge with KNOW_STATE_WIDE.clust_idx. Proceed conditionally. Unmeasured clusters must be # treated differently. if(length(CLUST_EVID_STATE.clust_idx) > 0){ # Specifically, assign as attribute CLUST_EVID_STATE as the rownames. Then merge with KNOW_STATE_WIDE.clust_idx. CLUST_EVID_STATE.clust_idx["CLUST_EVID_STATE"] <- rownames(CLUST_EVID_STATE.clust_idx) # KNOW_STATE_WIDE.clust_idx <- merge(x = KNOW_STATE_WIDE.clust_idx, y = CLUST_EVID_STATE.clust_idx) # # Now merge with EoL_WIDE.prof_idx. We expect number of rows in the post-merged KNOW_STATE_WIDE.clust_idx to be # length(unique(LEARNING_STANDARD_ID)) × length(unique(STUDENT_ID)), one for each coinciding pair. Specify merging by # the cluster evidentiary profile, CLUST_EVID_PROF.clust_idx. KNOW_STATE_WIDE.clust_idx <- merge(x = KNOW_STATE_WIDE.clust_idx[c("LEARNING_STANDARD_ID", CLUST_EVID_PROF.clust_idx, KNOW_STATE_SPEC[,"IMPLIED_KNOW_STATE"])], y = EoL_WIDE.prof_idx[c("STUDENT_ID", CLUST_EVID_PROF.clust_idx)], by = CLUST_EVID_PROF.clust_idx) # # Coerce IMPLIED_KNOW_STATE variables to numeric. for (state_idx in KNOW_STATE_SPEC[,"IMPLIED_KNOW_STATE"]) KNOW_STATE_WIDE.clust_idx[,state_idx] <- as.numeric(KNOW_STATE_WIDE.clust_idx[,state_idx]) # # Melt KNOW_STATE_WIDE.clust_idx into a long table. KNOW_STATE.clust_idx <- melt(data = KNOW_STATE_WIDE.clust_idx[c("STUDENT_ID","LEARNING_STANDARD_ID", KNOW_STATE_SPEC[,"IMPLIED_KNOW_STATE"])], id.vars = c("STUDENT_ID","LEARNING_STANDARD_ID"), meas.vars = KNOW_STATE_SPEC[,"IMPLIED_KNOW_STATE"], variable.name = "IMPLIED_KNOW_STATE", value.name = "MEAS") # } else { # For unmeasured clusters, simply merge KNOW_STATE_WIDE.clust_idx with the STUDENT_IDs in EoL_WIDE.prof_idx. Then # melt. KNOW_STATE_WIDE.clust_idx <- merge(x = KNOW_STATE_WIDE.clust_idx, y = EoL_WIDE.prof_idx["STUDENT_ID"]) for (state_idx in KNOW_STATE_SPEC[,"IMPLIED_KNOW_STATE"]) KNOW_STATE_WIDE.clust_idx[,state_idx] <- as.numeric(KNOW_STATE_WIDE.clust_idx[,state_idx]) # # Melt KNOW_STATE_WIDE.clust_idx into a long table. KNOW_STATE.clust_idx <- melt(data = KNOW_STATE_WIDE.clust_idx[c("STUDENT_ID","LEARNING_STANDARD_ID", KNOW_STATE_SPEC[,"IMPLIED_KNOW_STATE"])], id.vars = c("STUDENT_ID","LEARNING_STANDARD_ID"), meas.vars = KNOW_STATE_SPEC[,"IMPLIED_KNOW_STATE"], variable.name = "IMPLIED_KNOW_STATE", value.name = "MEAS") # } ### CLOSE else # # Write KNOW_STATE_WIDE.clust_idx as the clust_idᵗʰ mebmer of list KNOW_STATE_WIDE.prof_idx. KNOW_STATE.prof_idx[[clust_idx]] <- KNOW_STATE.clust_idx # } ## CLOSE clust_idx in names(KNOW_STATE.prof_idx)) # Concatenate the elements of KNOW_STATE.prof_idx into a single data frame. Write KNOW_STATE.prof_idx as the prof_idxᵗʰ element # of data frame KNOW_STATE.prof_idx <- do.call(what = rbind, args = KNOW_STATE.prof_idx) rownames(KNOW_STATE.prof_idx) <- NULL KNOW_STATE[[prof_idx]] <- KNOW_STATE.prof_idx # } ## CLOSE prof_idx in rownames(EVID_PROF_STATE)) # # Concatenate the elements of KNOW_STATE into a single data frame. KNOW_STATE <- do.call(args = KNOW_STATE, what = rbind) rownames(KNOW_STATE) <- NULL # # Clean up the LEARNING_STANDARD_ID attribute. Truncate off the leading character "X". KNOW_STATE[,"LEARNING_STANDARD_ID"] <- gsub(x = KNOW_STATE[,"LEARNING_STANDARD_ID"], pattern = "X", replacement = "") # # Now apply two date-stamp attributes to KNOW_STATE. EVID_STATE_AS_OF is the date of the most-recent evidentiary state # measurement. KNOW_STATE_AS_OF is the date of the estimate, conditioned on the evidentiary state in the former. We get # EVID_STATE_AS_OF by max-aggregation of EoL_MEAS and then merging the result by STUDENT_ID with KNOW_STATE. # KNOW_STATE_AS_OF is simply the system date of the calculation. 🐞🐜🕷🐝🕷🐞🐜🕷🐝🕷🐞🐜🕷🐝🕷🐞🐜🕷🐝🕷🐞🐜🕷🐝🕷 EoL_MEAS[,"DATE_OF_MEAS"] <- as.Date(x = EoL_MEAS[,"DATE_OF_MEAS"], "%Y-%m-%d") EVID_STATE_AS_OF <- aggregate(formula = DATE_OF_MEAS ~ STUDENT_ID + LEARNING_STANDARD_ID, data = EoL_MEAS, FUN = max) colnames(EVID_STATE_AS_OF) <- c("STUDENT_ID","LEARNING_STANDARD_ID","EVID_STATE_AS_OF") EVID_STATE_AS_OF[,"LEARNING_STANDARD_ID"] <- gsub(x = EVID_STATE_AS_OF[,"LEARNING_STANDARD_ID"], pattern = "X", replacement = "") KNOW_STATE <- merge(x = KNOW_STATE, y = EVID_STATE_AS_OF, all.x = TRUE) KNOW_STATE["KNOW_STATE_AS_OF"] <- Sys.Date() KNOW_STATE <- merge(x = KNOW_STATE, y = LEARNING_STANDARD) # # Finally, reorder the records according to STUDENT_ID, LEARNING_STANDARD_ID, IMPLIED_KNOW_STATE. Then # coerce attributs to UTF-8 character and write out as a csv table. KNOW_STATE[,"IMPLIED_KNOW_STATE"] <- factor(x = KNOW_STATE[,"IMPLIED_KNOW_STATE"], levels = KNOW_STATE_SPEC[,"IMPLIED_KNOW_STATE"]) KNOW_STATE <- KNOW_STATE[order(KNOW_STATE[,"STUDENT_ID"], KNOW_STATE[,"LEARNING_STANDARD_ID"]), c("STUDENT_ID","LEARNING_STANDARD_ID","LEARNING_STANDARD_CD", "IMPLIED_KNOW_STATE","MEAS","EVID_STATE_AS_OF", "KNOW_STATE_AS_OF")] for (col_idx in c("EVID_STATE_AS_OF","KNOW_STATE_AS_OF","IMPLIED_KNOW_STATE")) KNOW_STATE[,col_idx] <- as.character(KNOW_STATE[,col_idx]) for (col_idx in colnames(KNOW_STATE)) { KNOW_STATE[,col_idx] <- enc2utf8(as.character(KNOW_STATE[,col_idx])) KNOW_STATE[is.na(KNOW_STATE[,col_idx]),col_idx] <- "" } # write.csv(x = KNOW_STATE, file = paste(PROF_TASK.dir, "LEARNER_KNOW_STATE_BRUTE_FORCE.csv", sep = "/"), row.names = FALSE, eol = "\r\n", fileEncoding = "UTF-8", quote = TRUE) #
library(agridat) ### Name: hayman.tobacco ### Title: Diallel cross of Aztec tobacco ### Aliases: hayman.tobacco ### Keywords: datasets ### ** Examples # 1951 data. Fit the first REML model of Mohring 2011 Supplement. data(hayman.tobacco) dat1 <- subset(hayman.tobacco, year==1951) # Make a factor 'comb' in which G1xG2 is the same cross as G2xG1 dat1 <- transform(dat1, comb = ifelse(as.character(male) < as.character(female), paste0(male,female), paste0(female,male))) # 'dr' is the direction of the cross, 0 for self dat1$dr <- 1 dat1 <- transform(dat1, dr = ifelse(as.character(male) < as.character(female), -1, dr)) dat1 <- transform(dat1, dr = ifelse(as.character(male) == as.character(female), 0, dr)) # ASREML code for Mixed Griffing. # Mohring Table 2, column 2 (after dividing by 10^2) gives variances: # GCA 12.77, SCA 11.09, RSCA .65, Error 4.23. # Mohring Supplement ASREML code part1 model is: # y ~ mu r !r mother and(father) combination combination.dr # Note that the levels of 'male' and 'female' are the same, so the # and(female) term tells asreml to use the same levels (or, equivalently, # fix the correlation of the male/female levels to be 1. # The block effect is minimial and therefore ignored. ## m1 <- asreml(day~1, data=dat1, ## random = ~ male + and(female) + comb + comb:dr) ## require(lucid) ## vc(m1) ## effect component std.error z.ratio con ## male!male.var 12.77 7.502 1.7 Positive ## comb!comb.var 11.11 3.353 3.3 Positive ## comb:dr!comb.var 0.6603 0.4926 1.3 Positive ## R!variance 4.185 0.7449 5.6 Positive # -------------------- # 1952 data. Reproduce table 3 and figure 2 of Hayman 1954b. dat2 <- subset(hayman.tobacco, year==1952) # Does flowering date follow a gamma distn? Maybe. require(lattice) densityplot(~day, data=dat2, main="hayman.tobacco", xlab="flowering date") d1 <- subset(dat2, block=='B1') d2 <- subset(dat2, block=='B2') if(require(reshape2)){ m1 <- acast(d1, male~female, value.var='day') m2 <- acast(d2, male~female, value.var='day') mn1 <- (m1+t(m1))/2 mn2 <- (m2+t(m2))/2 # Variance and covariance of 'rth' offspring vr1 <- apply(mn1, 1, var) vr2 <- apply(mn2, 1, var) wr1 <- apply(mn1, 1, cov, diag(mn1)) wr2 <- apply(mn2, 1, cov, diag(mn2)) # Remove row names to prevent a mild warning rownames(mn1) <- rownames(mn2) <- NULL summ <- data.frame(rbind(mn1,mn2)) summ$block <- rep(c('B1','B2'), each=8) summ$vr <- c(vr1,vr2) summ$wr <- c(wr1,wr2) summ$male <- rep(1:8,2) # Vr and Wr match Hayman table 3 with(summ, plot(wr~vr, type='n', main="hayman.tobacco")) with(summ, text(vr, wr, male)) # Match Hayman figure 2 abline(0,1,col="gray") # Hayman notes that 1 and 3 do not lie along the line, so modifies them # and re-analyzes. }
/data/genthat_extracted_code/agridat/examples/hayman.tobacco.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
2,894
r
library(agridat) ### Name: hayman.tobacco ### Title: Diallel cross of Aztec tobacco ### Aliases: hayman.tobacco ### Keywords: datasets ### ** Examples # 1951 data. Fit the first REML model of Mohring 2011 Supplement. data(hayman.tobacco) dat1 <- subset(hayman.tobacco, year==1951) # Make a factor 'comb' in which G1xG2 is the same cross as G2xG1 dat1 <- transform(dat1, comb = ifelse(as.character(male) < as.character(female), paste0(male,female), paste0(female,male))) # 'dr' is the direction of the cross, 0 for self dat1$dr <- 1 dat1 <- transform(dat1, dr = ifelse(as.character(male) < as.character(female), -1, dr)) dat1 <- transform(dat1, dr = ifelse(as.character(male) == as.character(female), 0, dr)) # ASREML code for Mixed Griffing. # Mohring Table 2, column 2 (after dividing by 10^2) gives variances: # GCA 12.77, SCA 11.09, RSCA .65, Error 4.23. # Mohring Supplement ASREML code part1 model is: # y ~ mu r !r mother and(father) combination combination.dr # Note that the levels of 'male' and 'female' are the same, so the # and(female) term tells asreml to use the same levels (or, equivalently, # fix the correlation of the male/female levels to be 1. # The block effect is minimial and therefore ignored. ## m1 <- asreml(day~1, data=dat1, ## random = ~ male + and(female) + comb + comb:dr) ## require(lucid) ## vc(m1) ## effect component std.error z.ratio con ## male!male.var 12.77 7.502 1.7 Positive ## comb!comb.var 11.11 3.353 3.3 Positive ## comb:dr!comb.var 0.6603 0.4926 1.3 Positive ## R!variance 4.185 0.7449 5.6 Positive # -------------------- # 1952 data. Reproduce table 3 and figure 2 of Hayman 1954b. dat2 <- subset(hayman.tobacco, year==1952) # Does flowering date follow a gamma distn? Maybe. require(lattice) densityplot(~day, data=dat2, main="hayman.tobacco", xlab="flowering date") d1 <- subset(dat2, block=='B1') d2 <- subset(dat2, block=='B2') if(require(reshape2)){ m1 <- acast(d1, male~female, value.var='day') m2 <- acast(d2, male~female, value.var='day') mn1 <- (m1+t(m1))/2 mn2 <- (m2+t(m2))/2 # Variance and covariance of 'rth' offspring vr1 <- apply(mn1, 1, var) vr2 <- apply(mn2, 1, var) wr1 <- apply(mn1, 1, cov, diag(mn1)) wr2 <- apply(mn2, 1, cov, diag(mn2)) # Remove row names to prevent a mild warning rownames(mn1) <- rownames(mn2) <- NULL summ <- data.frame(rbind(mn1,mn2)) summ$block <- rep(c('B1','B2'), each=8) summ$vr <- c(vr1,vr2) summ$wr <- c(wr1,wr2) summ$male <- rep(1:8,2) # Vr and Wr match Hayman table 3 with(summ, plot(wr~vr, type='n', main="hayman.tobacco")) with(summ, text(vr, wr, male)) # Match Hayman figure 2 abline(0,1,col="gray") # Hayman notes that 1 and 3 do not lie along the line, so modifies them # and re-analyzes. }
#' vcf annotation with MORFEE #' #' @param myvcf_annot an ANNOVAR annotated VCF object to annotate with MORFEE #' @param morfee_data data object obtained from get.morfee.data() #' #' @importFrom stats na.omit #' @import foreach #' @import VariantAnnotation #' @import Biostrings #' @import GenomicRanges #' #' @export #' morfee.annotation <- function(myvcf_annot, morfee_data){ myvcf_annot_info <- info(myvcf_annot) myvcf_annot_info$MORFEE_uATG <- NA myvcf_annot_info$MORFEE_uSTOP <- NA myvcf_annot_header <- rbind(info(header(myvcf_annot)), data.frame(Number = ".", Type = "String", Description = "New ATG annotation provided by MORFEE", stringsAsFactors = FALSE), data.frame(Number = ".", Type = "String", Description = "Deletion STOP annotation provided by MORFEE", stringsAsFactors = FALSE) ) rownames(myvcf_annot_header) <- c(rownames(info(header(myvcf_annot))),"MORFEE_uATG", "MORFEE_uSTOP") info(header(myvcf_annot)) <- myvcf_annot_header info(myvcf_annot) <- myvcf_annot_info i <- NULL for(i in 1:nrow(myvcf_annot_info)){ my_func <- as.character(myvcf_annot_info[i,"Func.refGene"]) if(my_func!="UTR5"){ message(paste0("Skip variant ",i,": not in UTR5 region")) next } my_refgene <- as.character(myvcf_annot_info[i,"GeneDetail.refGene"]) if(my_refgene=="."){ message(paste0("Skip variant ",i,": has no GeneDetail.refGene annotation")) next } my_gene <- as.character(myvcf_annot_info[i,"Gene.refGene"]) my_nm_list <- parse_GeneDetail.refGene(my_refgene) my_snp_pos_geno <- start(ranges(rowRanges(myvcf_annot)))[i] my_chr <- as.character(seqnames(rowRanges(myvcf_annot))[i]) if(length(grep("chr",my_chr))<1){ my_chr <- paste0("chr",my_chr) } my_gencode_seque <- morfee_data[["GENCODE_SEQ"]][which(morfee_data[["GENCODE_SEQ_ORDER"]]==my_chr)] # Loop for each transcript (row) for(nm in 1:nrow(my_nm_list)){ if(!(as.character(my_nm_list[nm,5]) %in% c("A","T","C","G"))){ message(paste0("Skip variant ",i,": reference allele not A, T, C or G! ",my_nm_list[nm,5])) # message(" it could be an indel, but not yet supported!") next } if(!(as.character(my_nm_list[nm,6]) %in% c("A","T","C","G"))){ message(paste0("Skip variant ",i,": alternative allele not A, T, C or G! ",my_nm_list[nm,6])) # message(" it could be an indel, but not yet supported!") next } my_nm <- my_nm_list[nm,1] my_snp <- my_nm_list[nm,2] my_upordown <- my_nm_list[nm,3] my_snp_pos_rel <- as.numeric(my_nm_list[nm,4]) # Position from ATG, ex: 94 my_nm_id <- grep(paste0(my_nm,"\\."), morfee_data[["GENCODE_METAD"]]$V2) if(length(my_nm_id)==0){ message(paste0("Skip variant ",i,": annotation for ",my_nm_id," was not found in GENCODE")) next } my_enst <- morfee_data[["GENCODE_METAD"]][my_nm_id,1][1] if(morfee_data[["GRCh"]]==37){ my_transcript_id <- grep(paste0(my_enst,"_"), morfee_data[["GENCODE_ANNOT"]]$transcript_id) }else if(morfee_data[["GRCh"]]==38){ my_transcript_id <- grep(my_enst, morfee_data[["GENCODE_ANNOT"]]$transcript_id) }else{ stop("Reference Genome unknown") } gencode_annot_sub <- morfee_data[["GENCODE_ANNOT"]][my_transcript_id,] gencode_annot_cds <- gencode_annot_sub[gencode_annot_sub$type=="CDS",] gencode_annot_exon <- gencode_annot_sub[gencode_annot_sub$type=="exon",] gencode_annot_transcript_type <- unique(gencode_annot_exon$transcript_type) if(length(gencode_annot_transcript_type)<1){ message(paste0("Skip variant ",i,": transcript type is not protein coding")) next }else if(!(gencode_annot_transcript_type %in% "protein_coding")){ message(paste0("Skip variant ",i,": transcript type is not protein coding")) next } my_init_codon_r <- gencode_annot_sub[gencode_annot_sub$type=="start_codon",] if(nrow(my_init_codon_r)==0){ message(paste0("Skip variant ",i,": associated transcript without start_codon")) next } my_init_codon_start <- as.numeric(my_init_codon_r[,"start"])[1] # Several start codon? my_init_codon_end <- as.numeric(my_init_codon_r[,"end"])[1] # Several start codon? my_stop_codon_r <- gencode_annot_sub[gencode_annot_sub$type=="stop_codon",] if(nrow(my_stop_codon_r)==0){ message(paste0("Skip variant ",i,": associated transcript without stop_codon")) next } my_stop_codon_start <- as.numeric(my_stop_codon_r[,"start"])[1] # Several stop codon? my_stop_codon_end <- as.numeric(my_stop_codon_r[,"end"])[1] # Several stop codon? if(my_init_codon_end < my_stop_codon_end){ my_init_codon_5 <- my_init_codon_start my_init_codon_3 <- my_init_codon_end my_stop_codon_5 <- my_stop_codon_start my_stop_codon_3 <- my_stop_codon_end }else{ my_init_codon_5 <- my_init_codon_end my_init_codon_3 <- my_init_codon_start my_stop_codon_5 <- my_stop_codon_end my_stop_codon_3 <- my_stop_codon_start } ######################################## # Test gene orientation if(my_init_codon_end < my_stop_codon_end){ gencode_annot_exon <- gencode_annot_exon[order(as.numeric(gencode_annot_exon$exon_number), decreasing = FALSE),] exons_length <- (gencode_annot_exon[,"end"]+1)-gencode_annot_exon[,"start"] my_snp_exon <- which(gencode_annot_exon[,"start"] <= my_snp_pos_geno & gencode_annot_exon[,"end"] >= my_snp_pos_geno) my_start_exon <- which(gencode_annot_exon[,"start"] <= my_init_codon_5 & gencode_annot_exon[,"end"] >= my_init_codon_5) my_cdna_length_A <- my_cdna_length_B <- 0 if(length(my_snp_exon)==0){ message(paste0("Skip variant ",i,": not in an exonic part")) next } # Reference sequence stats for(exon_i in 1:nrow(gencode_annot_exon)){ if(exon_i==1){ my_cdna <- subseq(my_gencode_seque, start=gencode_annot_exon[exon_i,"start"], end=gencode_annot_exon[exon_i,"end"])[[1]] }else{ my_cdna_i <- subseq(my_gencode_seque, start=gencode_annot_exon[exon_i,"start"], end=gencode_annot_exon[exon_i,"end"])[[1]] my_cdna <- c(my_cdna, my_cdna_i) } # Calcul position of variant in cDNA if(exon_i < my_snp_exon){ my_cdna_length_A <- my_cdna_length_A + exons_length[exon_i] }else if(exon_i == my_snp_exon){ my_snp_pos_cdna <- my_cdna_length_A + ((my_snp_pos_geno+1) - gencode_annot_exon[exon_i,"start"]) } # Calcul position of reference A(TG) in cDNA if(exon_i < my_start_exon){ my_cdna_length_B <- my_cdna_length_B + exons_length[exon_i] }else if(exon_i == my_start_exon){ my_init_codon_5_cdna <- my_cdna_length_B + ((my_init_codon_5 + 1) - gencode_annot_exon[exon_i,"start"]) } } # Find codon start in reference sequence stats_orig <- matchPattern(morfee_data[["SEQ_INIT"]], my_cdna) # Found all STOP in reference sequence for(j in 1:length(morfee_data[["SEQ_STOP"]])){ stats_stop_orig_j <- matchPattern(morfee_data[["SEQ_STOP"]][[j]], my_cdna) if(j==1){ stats_stop_orig <- stats_stop_orig_j }else{ stats_stop_orig <- c(stats_stop_orig, stats_stop_orig_j) } } my_ref_allele <- as.character(subseq(my_cdna, start=my_snp_pos_cdna, end=my_snp_pos_cdna)) if(my_ref_allele!=my_nm_list[nm,5]){ message(paste0("Skip variant ",i,": mismatch between alleles")) next } my_ref_ATG <- as.character(subseq(my_cdna, start=my_init_codon_5_cdna, end=(my_init_codon_5_cdna+2))) if(my_ref_ATG!="ATG"){ message(paste0("Skip variant ",i,": reference ATG no detected")) next } # Replace my sequence with SNP my_cdna_updated <- replaceLetterAt(my_cdna, my_snp_pos_cdna, my_nm_list[nm,6]) ######################################## }else{ # message("Opposite orientation!") gencode_annot_exon <- gencode_annot_exon[order(as.numeric(gencode_annot_exon$exon_number), decreasing = TRUE),] exons_length <- (gencode_annot_exon[,"end"]+1)-gencode_annot_exon[,"start"] my_snp_exon <- which(gencode_annot_exon[,"start"] <= my_snp_pos_geno & gencode_annot_exon[,"end"] >= my_snp_pos_geno) my_start_exon <- which(gencode_annot_exon[,"start"] <= my_init_codon_5 & gencode_annot_exon[,"end"] >= my_init_codon_5) my_cdna_length_A <- my_cdna_length_B <- 0 if(length(my_snp_exon)==0){ message(paste0("Skip variant ",i,": not in an exonic part")) next } # Reference sequence stats for(exon_i in 1:nrow(gencode_annot_exon)){ if(exon_i==1){ my_cdna <- subseq(my_gencode_seque, start=gencode_annot_exon[exon_i,"start"], end=gencode_annot_exon[exon_i,"end"])[[1]] my_cdna <- reverse(complement(my_cdna)) }else{ my_cdna_i <- subseq(my_gencode_seque, start=gencode_annot_exon[exon_i,"start"], end=gencode_annot_exon[exon_i,"end"])[[1]] my_cdna_i <- reverse(complement(my_cdna_i)) my_cdna <- c(my_cdna, my_cdna_i) } # Calcul position of variant in cDNA if(exon_i < my_snp_exon){ my_cdna_length_A <- my_cdna_length_A + exons_length[exon_i] }else if(exon_i == my_snp_exon){ my_snp_pos_cdna <- my_cdna_length_A + (gencode_annot_exon[exon_i,"end"] - (my_snp_pos_geno-1)) } # Calcul position of reference A(TG) in cDNA if(exon_i < my_start_exon){ my_cdna_length_B <- my_cdna_length_B + exons_length[exon_i] }else if(exon_i == my_start_exon){ my_init_codon_5_cdna <- my_cdna_length_B + (gencode_annot_exon[exon_i,"end"] - (my_init_codon_3+1)) } } # Find codon start in reference sequence stats_orig <- matchPattern(morfee_data[["SEQ_INIT"]], my_cdna) # Found all STOP in reference sequence for(j in 1:length(morfee_data[["SEQ_STOP"]])){ stats_stop_orig_j <- matchPattern(morfee_data[["SEQ_STOP"]][[j]], my_cdna) if(j==1){ stats_stop_orig <- stats_stop_orig_j }else{ stats_stop_orig <- c(stats_stop_orig, stats_stop_orig_j) } } my_ref_allele <- as.character(subseq(my_cdna, start=my_snp_pos_cdna, end=my_snp_pos_cdna)) if(my_ref_allele!=my_nm_list[nm,5]){ message(paste0("Skip variant ",i,": mismatch between alleles")) next } my_ref_ATG <- as.character(subseq(my_cdna, start=my_init_codon_5_cdna, end=(my_init_codon_5_cdna+2))) if(my_ref_ATG!="ATG"){ message(paste0("Skip variant ",i,": reference ATG no detected")) next } # Replace my sequence with SNP my_cdna_updated <- replaceLetterAt(my_cdna, my_snp_pos_cdna, my_nm_list[nm,6]) } ######################################## # Find codon start in mutated sequence stats_mut <- matchPattern(morfee_data[["SEQ_INIT"]], my_cdna_updated) # Found all STOP in mutated sequence for(j in 1:length(morfee_data[["SEQ_STOP"]])){ stats_stop_mut_j <- matchPattern(morfee_data[["SEQ_STOP"]][[j]], my_cdna_updated) if(j==1){ stats_stop_mut <- stats_stop_mut_j }else{ stats_stop_mut <- c(stats_stop_mut, stats_stop_mut_j) } } # Comparer codon start in reference and mutated sequences new.atg <- ranges(stats_mut)[!c(ranges(stats_mut) %in% ranges(stats_orig)) ,] # Compare stop codons in reference and mutated sequences del.stop <- ranges(stats_stop_orig)[!c(ranges(stats_stop_orig) %in% ranges(stats_stop_mut)) ,] if(length(new.atg)>0){ message("New ATG detected!") if(my_init_codon_end < my_stop_codon_end){ my.strand <- "forward" }else{ my.strand <- "reverse" } new.atg.distance <- my_init_codon_5_cdna - (start(new.atg)[1]) test.frame.uatg <- (new.atg.distance%%3) if(test.frame.uatg==0){ in.frame <- "in_frame" }else{ in.frame <- paste0("out_of_frame_(",test.frame.uatg,")") } # Found all STOP for(j in 1:length(morfee_data[["SEQ_STOP"]])){ my_stop_j <- matchPattern(morfee_data[["SEQ_STOP"]][[j]], my_cdna_updated[ start(range(new.atg)) : length(my_cdna_updated) ]) if(j==1){ my_stops <- my_stop_j }else{ my_stops <- c(my_stops, my_stop_j) } } # First stop in phase to the new codon start my_stops_sort <- sort(start(ranges(my_stops))) my_first_stop <- my_stops_sort[my_stops_sort%%3==1][1] # Lenght of proteins generated.prot.length <- (my_first_stop-1)/3 ref.prot.length <- (sum(gencode_annot_cds[,"end"]+1 - gencode_annot_cds[,"start"] ) -3)/3 if(is.na(my_first_stop)){ message(paste0("Skip variant ",i,": new ATG detected but no STOP in phase")) next } # Determine whether the ORF is overlapping, not overlapping or elongated CD if(my_first_stop < my_init_codon_5_cdna){ overlapping.prot <- "not_overlapping" }else if( in.frame=="in_frame" & (my_first_stop > my_init_codon_5_cdna) ){ overlapping.prot <- "elongated_CDS" }else{ overlapping.perc <- ((my_first_stop-my_init_codon_5_cdna)/(ref.prot.length*3))*100 overlapping.perc.round <- round(overlapping.perc, digits = 2) overlapping.prot <- paste0("overlapping_",overlapping.perc.round,"%") } message( paste("For",my_gene,"-",my_nm,"and",my_snp)) message(paste0(" - New uATG detected at: ",new.atg.distance," from the main ATG!")) message( paste(" - new uATG is",in.frame,"to the main ATG!")) message( paste(" - new generated protein has a length of",generated.prot.length,"(aa) vs",ref.prot.length,"(aa)")) # message(paste0(" - DEBUG: i=",i," ; nm=",nm)) message("\n\n") # Update myvcf_annot_info new_field <- paste( na.omit(c( myvcf_annot_info[i,"MORFEE_uATG"], paste0(my_nm,":",my.strand,",",new.atg.distance,",",in.frame,",",overlapping.prot,",",generated.prot.length,"[/",ref.prot.length,"]","(aa)")) ) , collapse="|") myvcf_annot_info[i,"MORFEE_uATG"] <- new_field }# END new ATG if(length(del.stop)>0){ # Use stats_orig, but could use stats_mut uatg <- start(stats_orig)[ c(start(del.stop) - start(stats_orig)) > 0] uatg_in_frame <- uatg[((start(del.stop) - uatg) %% 3)==0] del.stop.distance <- my_init_codon_5_cdna - (start(del.stop)[1]) if(length(uatg_in_frame)>0){ if(my_init_codon_end < my_stop_codon_end){ my.strand <- "forward" }else{ my.strand <- "reverse" } print( "STOP deletion detected!") print( " - uSTOP deletion in ORF detected!") print( paste("For",my_gene,"-",my_nm,"and",my_snp)) print(paste0(" - Deletion of a uSTOP codon detected at: ",-del.stop.distance," from the main ATG!")) print( paste(" --- " , as.character( my_cdna[start(del.stop)[1]:end(del.stop)[1]] ), " becomes ",as.character(my_cdna_updated[start(del.stop)[1]:end(del.stop)[1]] ) )) print( paste(" --- Gene direction:",my.strand)) # print(paste0(" - DEBUG: i=",i," ; nm=",nm)) # several uATG could be present, so the protein length will be different for(uatg_i in uatg_in_frame){ # uatg_i = uatg_in_frame[1] # Find next stop in frame with uatg_i uatg_i_in_frame <- start(stats_stop_mut)[ ((uatg_i - start(stats_stop_mut)) %%3)==0 ] id_ustop <- uatg_i_in_frame > uatg_i if(sum(id_ustop)>0){ first_new_stop <- min( uatg_i_in_frame[id_ustop] ) }else{ message(paste0("Skip variant ",i,": uATG detected but no STOP in phase")) next } # Compute distance and length stop.generated.prot.length <- (first_new_stop-uatg_i)/3 ref.prot.length <- (sum(gencode_annot_cds[,"end"]+1 - gencode_annot_cds[,"start"] ) -3)/3 uatg_used <- -(my_init_codon_5_cdna - uatg_i) stop_used <- (my_init_codon_5_cdna - 1 - first_new_stop) if(uatg_used>=0){ message(paste0("Skip variant ",i,": position of uATG is positive! Probably an error in the used reference database")) next } if(stop_used<0){ # 1. uATG in frame to ref ATG uatg_inframe_refatg <- ((uatg_i-my_init_codon_5_cdna)%%3)==0 # 2. STOP position used > ATG stop_used_downstream <- (first_new_stop > my_init_codon_5_cdna) if(uatg_inframe_refatg & stop_used_downstream){ overlapping.prot <- "elongated_CDS" }else{ overlapping.perc <- (-stop_used/(ref.prot.length*3))*100 overlapping.perc.round <- round(overlapping.perc, digits = 2) overlapping.prot <- paste0("overlapping_",overlapping.perc.round,"%") } }else{ overlapping.prot <- "not_overlapping" } stop.codon <- as.character(stats_stop_mut[start(stats_stop_mut)==first_new_stop]) test.frame.ustop <- ((my_init_codon_5_cdna-stop_used)%%3) if(test.frame.ustop==0){ in.frame <- "in_frame" }else{ in.frame <- paste0("out_of_frame_(",test.frame.ustop,")") } print( " --") print( paste(" --- using uATG at",uatg_used,"to the main ATG!")) print(paste0(" --- using STOP (",stop.codon,") at ",-stop_used," to the main ATG!")) print( paste(" --- new predicted ORF has a length of",stop.generated.prot.length,"(aa) vs",ref.prot.length,"(aa) for the main protein")) print( paste(" --- new predicted ORF is",overlapping.prot,"with the main protein")) # Update myvcf_annot_info new_field <- paste( na.omit(c( myvcf_annot_info[i,"MORFEE_uSTOP"], paste0(my_nm,":",my.strand,",",-del.stop.distance,",",in.frame,",",overlapping.prot,",",stop.generated.prot.length,"[/",ref.prot.length,"]","(aa)")) ) , collapse="|") myvcf_annot_info[i,"MORFEE_uSTOP"] <- new_field } cat("\n\n") }else{ message( "STOP deletion detected!") message( " - uSTOP deletion detected BUT without an upstream ATG (not in an ORF region)!") message( paste("For",my_gene,"-",my_nm,"and",my_snp)) message(paste0(" - Deletion of a uSTOP codon detected at: ",-del.stop.distance," from the main ATG!")) message( paste(" --- ", as.character( my_cdna[start(del.stop)[1]:end(del.stop)[1]] ), " becomes ",as.character(my_cdna_updated[start(del.stop)[1]:end(del.stop)[1]] ) )) message("\n\n") } }# END del STOP } } info(myvcf_annot) <- myvcf_annot_info return(myvcf_annot) }
/R/morfee.R
no_license
daissi/MORFEE
R
false
false
20,313
r
#' vcf annotation with MORFEE #' #' @param myvcf_annot an ANNOVAR annotated VCF object to annotate with MORFEE #' @param morfee_data data object obtained from get.morfee.data() #' #' @importFrom stats na.omit #' @import foreach #' @import VariantAnnotation #' @import Biostrings #' @import GenomicRanges #' #' @export #' morfee.annotation <- function(myvcf_annot, morfee_data){ myvcf_annot_info <- info(myvcf_annot) myvcf_annot_info$MORFEE_uATG <- NA myvcf_annot_info$MORFEE_uSTOP <- NA myvcf_annot_header <- rbind(info(header(myvcf_annot)), data.frame(Number = ".", Type = "String", Description = "New ATG annotation provided by MORFEE", stringsAsFactors = FALSE), data.frame(Number = ".", Type = "String", Description = "Deletion STOP annotation provided by MORFEE", stringsAsFactors = FALSE) ) rownames(myvcf_annot_header) <- c(rownames(info(header(myvcf_annot))),"MORFEE_uATG", "MORFEE_uSTOP") info(header(myvcf_annot)) <- myvcf_annot_header info(myvcf_annot) <- myvcf_annot_info i <- NULL for(i in 1:nrow(myvcf_annot_info)){ my_func <- as.character(myvcf_annot_info[i,"Func.refGene"]) if(my_func!="UTR5"){ message(paste0("Skip variant ",i,": not in UTR5 region")) next } my_refgene <- as.character(myvcf_annot_info[i,"GeneDetail.refGene"]) if(my_refgene=="."){ message(paste0("Skip variant ",i,": has no GeneDetail.refGene annotation")) next } my_gene <- as.character(myvcf_annot_info[i,"Gene.refGene"]) my_nm_list <- parse_GeneDetail.refGene(my_refgene) my_snp_pos_geno <- start(ranges(rowRanges(myvcf_annot)))[i] my_chr <- as.character(seqnames(rowRanges(myvcf_annot))[i]) if(length(grep("chr",my_chr))<1){ my_chr <- paste0("chr",my_chr) } my_gencode_seque <- morfee_data[["GENCODE_SEQ"]][which(morfee_data[["GENCODE_SEQ_ORDER"]]==my_chr)] # Loop for each transcript (row) for(nm in 1:nrow(my_nm_list)){ if(!(as.character(my_nm_list[nm,5]) %in% c("A","T","C","G"))){ message(paste0("Skip variant ",i,": reference allele not A, T, C or G! ",my_nm_list[nm,5])) # message(" it could be an indel, but not yet supported!") next } if(!(as.character(my_nm_list[nm,6]) %in% c("A","T","C","G"))){ message(paste0("Skip variant ",i,": alternative allele not A, T, C or G! ",my_nm_list[nm,6])) # message(" it could be an indel, but not yet supported!") next } my_nm <- my_nm_list[nm,1] my_snp <- my_nm_list[nm,2] my_upordown <- my_nm_list[nm,3] my_snp_pos_rel <- as.numeric(my_nm_list[nm,4]) # Position from ATG, ex: 94 my_nm_id <- grep(paste0(my_nm,"\\."), morfee_data[["GENCODE_METAD"]]$V2) if(length(my_nm_id)==0){ message(paste0("Skip variant ",i,": annotation for ",my_nm_id," was not found in GENCODE")) next } my_enst <- morfee_data[["GENCODE_METAD"]][my_nm_id,1][1] if(morfee_data[["GRCh"]]==37){ my_transcript_id <- grep(paste0(my_enst,"_"), morfee_data[["GENCODE_ANNOT"]]$transcript_id) }else if(morfee_data[["GRCh"]]==38){ my_transcript_id <- grep(my_enst, morfee_data[["GENCODE_ANNOT"]]$transcript_id) }else{ stop("Reference Genome unknown") } gencode_annot_sub <- morfee_data[["GENCODE_ANNOT"]][my_transcript_id,] gencode_annot_cds <- gencode_annot_sub[gencode_annot_sub$type=="CDS",] gencode_annot_exon <- gencode_annot_sub[gencode_annot_sub$type=="exon",] gencode_annot_transcript_type <- unique(gencode_annot_exon$transcript_type) if(length(gencode_annot_transcript_type)<1){ message(paste0("Skip variant ",i,": transcript type is not protein coding")) next }else if(!(gencode_annot_transcript_type %in% "protein_coding")){ message(paste0("Skip variant ",i,": transcript type is not protein coding")) next } my_init_codon_r <- gencode_annot_sub[gencode_annot_sub$type=="start_codon",] if(nrow(my_init_codon_r)==0){ message(paste0("Skip variant ",i,": associated transcript without start_codon")) next } my_init_codon_start <- as.numeric(my_init_codon_r[,"start"])[1] # Several start codon? my_init_codon_end <- as.numeric(my_init_codon_r[,"end"])[1] # Several start codon? my_stop_codon_r <- gencode_annot_sub[gencode_annot_sub$type=="stop_codon",] if(nrow(my_stop_codon_r)==0){ message(paste0("Skip variant ",i,": associated transcript without stop_codon")) next } my_stop_codon_start <- as.numeric(my_stop_codon_r[,"start"])[1] # Several stop codon? my_stop_codon_end <- as.numeric(my_stop_codon_r[,"end"])[1] # Several stop codon? if(my_init_codon_end < my_stop_codon_end){ my_init_codon_5 <- my_init_codon_start my_init_codon_3 <- my_init_codon_end my_stop_codon_5 <- my_stop_codon_start my_stop_codon_3 <- my_stop_codon_end }else{ my_init_codon_5 <- my_init_codon_end my_init_codon_3 <- my_init_codon_start my_stop_codon_5 <- my_stop_codon_end my_stop_codon_3 <- my_stop_codon_start } ######################################## # Test gene orientation if(my_init_codon_end < my_stop_codon_end){ gencode_annot_exon <- gencode_annot_exon[order(as.numeric(gencode_annot_exon$exon_number), decreasing = FALSE),] exons_length <- (gencode_annot_exon[,"end"]+1)-gencode_annot_exon[,"start"] my_snp_exon <- which(gencode_annot_exon[,"start"] <= my_snp_pos_geno & gencode_annot_exon[,"end"] >= my_snp_pos_geno) my_start_exon <- which(gencode_annot_exon[,"start"] <= my_init_codon_5 & gencode_annot_exon[,"end"] >= my_init_codon_5) my_cdna_length_A <- my_cdna_length_B <- 0 if(length(my_snp_exon)==0){ message(paste0("Skip variant ",i,": not in an exonic part")) next } # Reference sequence stats for(exon_i in 1:nrow(gencode_annot_exon)){ if(exon_i==1){ my_cdna <- subseq(my_gencode_seque, start=gencode_annot_exon[exon_i,"start"], end=gencode_annot_exon[exon_i,"end"])[[1]] }else{ my_cdna_i <- subseq(my_gencode_seque, start=gencode_annot_exon[exon_i,"start"], end=gencode_annot_exon[exon_i,"end"])[[1]] my_cdna <- c(my_cdna, my_cdna_i) } # Calcul position of variant in cDNA if(exon_i < my_snp_exon){ my_cdna_length_A <- my_cdna_length_A + exons_length[exon_i] }else if(exon_i == my_snp_exon){ my_snp_pos_cdna <- my_cdna_length_A + ((my_snp_pos_geno+1) - gencode_annot_exon[exon_i,"start"]) } # Calcul position of reference A(TG) in cDNA if(exon_i < my_start_exon){ my_cdna_length_B <- my_cdna_length_B + exons_length[exon_i] }else if(exon_i == my_start_exon){ my_init_codon_5_cdna <- my_cdna_length_B + ((my_init_codon_5 + 1) - gencode_annot_exon[exon_i,"start"]) } } # Find codon start in reference sequence stats_orig <- matchPattern(morfee_data[["SEQ_INIT"]], my_cdna) # Found all STOP in reference sequence for(j in 1:length(morfee_data[["SEQ_STOP"]])){ stats_stop_orig_j <- matchPattern(morfee_data[["SEQ_STOP"]][[j]], my_cdna) if(j==1){ stats_stop_orig <- stats_stop_orig_j }else{ stats_stop_orig <- c(stats_stop_orig, stats_stop_orig_j) } } my_ref_allele <- as.character(subseq(my_cdna, start=my_snp_pos_cdna, end=my_snp_pos_cdna)) if(my_ref_allele!=my_nm_list[nm,5]){ message(paste0("Skip variant ",i,": mismatch between alleles")) next } my_ref_ATG <- as.character(subseq(my_cdna, start=my_init_codon_5_cdna, end=(my_init_codon_5_cdna+2))) if(my_ref_ATG!="ATG"){ message(paste0("Skip variant ",i,": reference ATG no detected")) next } # Replace my sequence with SNP my_cdna_updated <- replaceLetterAt(my_cdna, my_snp_pos_cdna, my_nm_list[nm,6]) ######################################## }else{ # message("Opposite orientation!") gencode_annot_exon <- gencode_annot_exon[order(as.numeric(gencode_annot_exon$exon_number), decreasing = TRUE),] exons_length <- (gencode_annot_exon[,"end"]+1)-gencode_annot_exon[,"start"] my_snp_exon <- which(gencode_annot_exon[,"start"] <= my_snp_pos_geno & gencode_annot_exon[,"end"] >= my_snp_pos_geno) my_start_exon <- which(gencode_annot_exon[,"start"] <= my_init_codon_5 & gencode_annot_exon[,"end"] >= my_init_codon_5) my_cdna_length_A <- my_cdna_length_B <- 0 if(length(my_snp_exon)==0){ message(paste0("Skip variant ",i,": not in an exonic part")) next } # Reference sequence stats for(exon_i in 1:nrow(gencode_annot_exon)){ if(exon_i==1){ my_cdna <- subseq(my_gencode_seque, start=gencode_annot_exon[exon_i,"start"], end=gencode_annot_exon[exon_i,"end"])[[1]] my_cdna <- reverse(complement(my_cdna)) }else{ my_cdna_i <- subseq(my_gencode_seque, start=gencode_annot_exon[exon_i,"start"], end=gencode_annot_exon[exon_i,"end"])[[1]] my_cdna_i <- reverse(complement(my_cdna_i)) my_cdna <- c(my_cdna, my_cdna_i) } # Calcul position of variant in cDNA if(exon_i < my_snp_exon){ my_cdna_length_A <- my_cdna_length_A + exons_length[exon_i] }else if(exon_i == my_snp_exon){ my_snp_pos_cdna <- my_cdna_length_A + (gencode_annot_exon[exon_i,"end"] - (my_snp_pos_geno-1)) } # Calcul position of reference A(TG) in cDNA if(exon_i < my_start_exon){ my_cdna_length_B <- my_cdna_length_B + exons_length[exon_i] }else if(exon_i == my_start_exon){ my_init_codon_5_cdna <- my_cdna_length_B + (gencode_annot_exon[exon_i,"end"] - (my_init_codon_3+1)) } } # Find codon start in reference sequence stats_orig <- matchPattern(morfee_data[["SEQ_INIT"]], my_cdna) # Found all STOP in reference sequence for(j in 1:length(morfee_data[["SEQ_STOP"]])){ stats_stop_orig_j <- matchPattern(morfee_data[["SEQ_STOP"]][[j]], my_cdna) if(j==1){ stats_stop_orig <- stats_stop_orig_j }else{ stats_stop_orig <- c(stats_stop_orig, stats_stop_orig_j) } } my_ref_allele <- as.character(subseq(my_cdna, start=my_snp_pos_cdna, end=my_snp_pos_cdna)) if(my_ref_allele!=my_nm_list[nm,5]){ message(paste0("Skip variant ",i,": mismatch between alleles")) next } my_ref_ATG <- as.character(subseq(my_cdna, start=my_init_codon_5_cdna, end=(my_init_codon_5_cdna+2))) if(my_ref_ATG!="ATG"){ message(paste0("Skip variant ",i,": reference ATG no detected")) next } # Replace my sequence with SNP my_cdna_updated <- replaceLetterAt(my_cdna, my_snp_pos_cdna, my_nm_list[nm,6]) } ######################################## # Find codon start in mutated sequence stats_mut <- matchPattern(morfee_data[["SEQ_INIT"]], my_cdna_updated) # Found all STOP in mutated sequence for(j in 1:length(morfee_data[["SEQ_STOP"]])){ stats_stop_mut_j <- matchPattern(morfee_data[["SEQ_STOP"]][[j]], my_cdna_updated) if(j==1){ stats_stop_mut <- stats_stop_mut_j }else{ stats_stop_mut <- c(stats_stop_mut, stats_stop_mut_j) } } # Comparer codon start in reference and mutated sequences new.atg <- ranges(stats_mut)[!c(ranges(stats_mut) %in% ranges(stats_orig)) ,] # Compare stop codons in reference and mutated sequences del.stop <- ranges(stats_stop_orig)[!c(ranges(stats_stop_orig) %in% ranges(stats_stop_mut)) ,] if(length(new.atg)>0){ message("New ATG detected!") if(my_init_codon_end < my_stop_codon_end){ my.strand <- "forward" }else{ my.strand <- "reverse" } new.atg.distance <- my_init_codon_5_cdna - (start(new.atg)[1]) test.frame.uatg <- (new.atg.distance%%3) if(test.frame.uatg==0){ in.frame <- "in_frame" }else{ in.frame <- paste0("out_of_frame_(",test.frame.uatg,")") } # Found all STOP for(j in 1:length(morfee_data[["SEQ_STOP"]])){ my_stop_j <- matchPattern(morfee_data[["SEQ_STOP"]][[j]], my_cdna_updated[ start(range(new.atg)) : length(my_cdna_updated) ]) if(j==1){ my_stops <- my_stop_j }else{ my_stops <- c(my_stops, my_stop_j) } } # First stop in phase to the new codon start my_stops_sort <- sort(start(ranges(my_stops))) my_first_stop <- my_stops_sort[my_stops_sort%%3==1][1] # Lenght of proteins generated.prot.length <- (my_first_stop-1)/3 ref.prot.length <- (sum(gencode_annot_cds[,"end"]+1 - gencode_annot_cds[,"start"] ) -3)/3 if(is.na(my_first_stop)){ message(paste0("Skip variant ",i,": new ATG detected but no STOP in phase")) next } # Determine whether the ORF is overlapping, not overlapping or elongated CD if(my_first_stop < my_init_codon_5_cdna){ overlapping.prot <- "not_overlapping" }else if( in.frame=="in_frame" & (my_first_stop > my_init_codon_5_cdna) ){ overlapping.prot <- "elongated_CDS" }else{ overlapping.perc <- ((my_first_stop-my_init_codon_5_cdna)/(ref.prot.length*3))*100 overlapping.perc.round <- round(overlapping.perc, digits = 2) overlapping.prot <- paste0("overlapping_",overlapping.perc.round,"%") } message( paste("For",my_gene,"-",my_nm,"and",my_snp)) message(paste0(" - New uATG detected at: ",new.atg.distance," from the main ATG!")) message( paste(" - new uATG is",in.frame,"to the main ATG!")) message( paste(" - new generated protein has a length of",generated.prot.length,"(aa) vs",ref.prot.length,"(aa)")) # message(paste0(" - DEBUG: i=",i," ; nm=",nm)) message("\n\n") # Update myvcf_annot_info new_field <- paste( na.omit(c( myvcf_annot_info[i,"MORFEE_uATG"], paste0(my_nm,":",my.strand,",",new.atg.distance,",",in.frame,",",overlapping.prot,",",generated.prot.length,"[/",ref.prot.length,"]","(aa)")) ) , collapse="|") myvcf_annot_info[i,"MORFEE_uATG"] <- new_field }# END new ATG if(length(del.stop)>0){ # Use stats_orig, but could use stats_mut uatg <- start(stats_orig)[ c(start(del.stop) - start(stats_orig)) > 0] uatg_in_frame <- uatg[((start(del.stop) - uatg) %% 3)==0] del.stop.distance <- my_init_codon_5_cdna - (start(del.stop)[1]) if(length(uatg_in_frame)>0){ if(my_init_codon_end < my_stop_codon_end){ my.strand <- "forward" }else{ my.strand <- "reverse" } print( "STOP deletion detected!") print( " - uSTOP deletion in ORF detected!") print( paste("For",my_gene,"-",my_nm,"and",my_snp)) print(paste0(" - Deletion of a uSTOP codon detected at: ",-del.stop.distance," from the main ATG!")) print( paste(" --- " , as.character( my_cdna[start(del.stop)[1]:end(del.stop)[1]] ), " becomes ",as.character(my_cdna_updated[start(del.stop)[1]:end(del.stop)[1]] ) )) print( paste(" --- Gene direction:",my.strand)) # print(paste0(" - DEBUG: i=",i," ; nm=",nm)) # several uATG could be present, so the protein length will be different for(uatg_i in uatg_in_frame){ # uatg_i = uatg_in_frame[1] # Find next stop in frame with uatg_i uatg_i_in_frame <- start(stats_stop_mut)[ ((uatg_i - start(stats_stop_mut)) %%3)==0 ] id_ustop <- uatg_i_in_frame > uatg_i if(sum(id_ustop)>0){ first_new_stop <- min( uatg_i_in_frame[id_ustop] ) }else{ message(paste0("Skip variant ",i,": uATG detected but no STOP in phase")) next } # Compute distance and length stop.generated.prot.length <- (first_new_stop-uatg_i)/3 ref.prot.length <- (sum(gencode_annot_cds[,"end"]+1 - gencode_annot_cds[,"start"] ) -3)/3 uatg_used <- -(my_init_codon_5_cdna - uatg_i) stop_used <- (my_init_codon_5_cdna - 1 - first_new_stop) if(uatg_used>=0){ message(paste0("Skip variant ",i,": position of uATG is positive! Probably an error in the used reference database")) next } if(stop_used<0){ # 1. uATG in frame to ref ATG uatg_inframe_refatg <- ((uatg_i-my_init_codon_5_cdna)%%3)==0 # 2. STOP position used > ATG stop_used_downstream <- (first_new_stop > my_init_codon_5_cdna) if(uatg_inframe_refatg & stop_used_downstream){ overlapping.prot <- "elongated_CDS" }else{ overlapping.perc <- (-stop_used/(ref.prot.length*3))*100 overlapping.perc.round <- round(overlapping.perc, digits = 2) overlapping.prot <- paste0("overlapping_",overlapping.perc.round,"%") } }else{ overlapping.prot <- "not_overlapping" } stop.codon <- as.character(stats_stop_mut[start(stats_stop_mut)==first_new_stop]) test.frame.ustop <- ((my_init_codon_5_cdna-stop_used)%%3) if(test.frame.ustop==0){ in.frame <- "in_frame" }else{ in.frame <- paste0("out_of_frame_(",test.frame.ustop,")") } print( " --") print( paste(" --- using uATG at",uatg_used,"to the main ATG!")) print(paste0(" --- using STOP (",stop.codon,") at ",-stop_used," to the main ATG!")) print( paste(" --- new predicted ORF has a length of",stop.generated.prot.length,"(aa) vs",ref.prot.length,"(aa) for the main protein")) print( paste(" --- new predicted ORF is",overlapping.prot,"with the main protein")) # Update myvcf_annot_info new_field <- paste( na.omit(c( myvcf_annot_info[i,"MORFEE_uSTOP"], paste0(my_nm,":",my.strand,",",-del.stop.distance,",",in.frame,",",overlapping.prot,",",stop.generated.prot.length,"[/",ref.prot.length,"]","(aa)")) ) , collapse="|") myvcf_annot_info[i,"MORFEE_uSTOP"] <- new_field } cat("\n\n") }else{ message( "STOP deletion detected!") message( " - uSTOP deletion detected BUT without an upstream ATG (not in an ORF region)!") message( paste("For",my_gene,"-",my_nm,"and",my_snp)) message(paste0(" - Deletion of a uSTOP codon detected at: ",-del.stop.distance," from the main ATG!")) message( paste(" --- ", as.character( my_cdna[start(del.stop)[1]:end(del.stop)[1]] ), " becomes ",as.character(my_cdna_updated[start(del.stop)[1]:end(del.stop)[1]] ) )) message("\n\n") } }# END del STOP } } info(myvcf_annot) <- myvcf_annot_info return(myvcf_annot) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_multiedge_count.R \name{get_multiedge_count} \alias{get_multiedge_count} \title{Get the count of multiple edges} \usage{ get_multiedge_count(graph) } \arguments{ \item{graph}{a graph object of class \code{dgr_graph}.} } \value{ a vector with a single, numerical value. } \description{ Get a count of the number of multiple edges in the graph. Included in the count is the number of separate edges that share the same edge definition (i.e., same pair of nodes) across the entire graph. So, for example, if there are 2 edge definitions in the graph that involve 6 separate edge IDs, the count will be \code{4}. } \examples{ # Create a node data frame (ndf) ndf <- create_node_df( n = 5, label = TRUE) # Create an edge data frame (edf) edf <- create_edge_df( from = c(1, 4, 4, 3, 5, 1, 3, 4), to = c(4, 1, 1, 2, 2, 2, 2, 1)) # Create a graph with the ndf and edf graph <- create_graph( nodes_df = ndf, edges_df = edf) # Get the total number of multiple # edges (those edges that share an # edge definition) in the graph graph \%>\% get_multiedge_count() }
/man/get_multiedge_count.Rd
permissive
akkalbist55/DiagrammeR
R
false
true
1,172
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_multiedge_count.R \name{get_multiedge_count} \alias{get_multiedge_count} \title{Get the count of multiple edges} \usage{ get_multiedge_count(graph) } \arguments{ \item{graph}{a graph object of class \code{dgr_graph}.} } \value{ a vector with a single, numerical value. } \description{ Get a count of the number of multiple edges in the graph. Included in the count is the number of separate edges that share the same edge definition (i.e., same pair of nodes) across the entire graph. So, for example, if there are 2 edge definitions in the graph that involve 6 separate edge IDs, the count will be \code{4}. } \examples{ # Create a node data frame (ndf) ndf <- create_node_df( n = 5, label = TRUE) # Create an edge data frame (edf) edf <- create_edge_df( from = c(1, 4, 4, 3, 5, 1, 3, 4), to = c(4, 1, 1, 2, 2, 2, 2, 1)) # Create a graph with the ndf and edf graph <- create_graph( nodes_df = ndf, edges_df = edf) # Get the total number of multiple # edges (those edges that share an # edge definition) in the graph graph \%>\% get_multiedge_count() }
\name{navigation.hac} \alias{navigation.hac} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Compute bearing, navigated distance and speed } \description{ This function computes navigation course (bearing), navigated distance, time diference and navigation speed between GPS fixes in position data imported from an HAC file. } \usage{ navigation.hac(pos) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{pos}{ geographic position data from an HAC file, as imported with \code{position.hac}. } } \details{ The bearing and navigated distance are computed with functions \code{bearingRhumb} and \code{distVincentyEllipsoid} from package \code{geosphere}. This function is intended to be called inside \code{read.echogram}, rather than being used directly. } \value{ A data frame with seven variables: \item{time.cpu}{date and time from the computer CPU during data acquisition.} \item{lon}{longitudes.} \item{lat}{latitudes.} \item{bearing}{navigation course between two consecutive GPS fixes.} \item{navdist}{navigated distance between two consecutive GPS fixes.} \item{time.dif}{time difference between two consecutive GPS fixes.} \item{navspeed}{navigation speed between two consecutive GPS fixes.} } \author{ Héctor Villalobos } \seealso{ \code{\link{position.hac}}, \code{\link{bearingRhumb}}, \code{\link{distVincentyEllipsoid}}. } \examples{ hacfile <- system.file("extdata", "D20150510-T202221.hac", package="echogram") pos <- position.hac( hacfile ) pos pos2 <- navigation.hac(pos) pos2 } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ manip }
/man/navigation.hac.Rd
no_license
hvillalo/echogram
R
false
false
1,682
rd
\name{navigation.hac} \alias{navigation.hac} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Compute bearing, navigated distance and speed } \description{ This function computes navigation course (bearing), navigated distance, time diference and navigation speed between GPS fixes in position data imported from an HAC file. } \usage{ navigation.hac(pos) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{pos}{ geographic position data from an HAC file, as imported with \code{position.hac}. } } \details{ The bearing and navigated distance are computed with functions \code{bearingRhumb} and \code{distVincentyEllipsoid} from package \code{geosphere}. This function is intended to be called inside \code{read.echogram}, rather than being used directly. } \value{ A data frame with seven variables: \item{time.cpu}{date and time from the computer CPU during data acquisition.} \item{lon}{longitudes.} \item{lat}{latitudes.} \item{bearing}{navigation course between two consecutive GPS fixes.} \item{navdist}{navigated distance between two consecutive GPS fixes.} \item{time.dif}{time difference between two consecutive GPS fixes.} \item{navspeed}{navigation speed between two consecutive GPS fixes.} } \author{ Héctor Villalobos } \seealso{ \code{\link{position.hac}}, \code{\link{bearingRhumb}}, \code{\link{distVincentyEllipsoid}}. } \examples{ hacfile <- system.file("extdata", "D20150510-T202221.hac", package="echogram") pos <- position.hac( hacfile ) pos pos2 <- navigation.hac(pos) pos2 } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ manip }
#' Filter Vegetation Index with Modified Whittaker Approach #' #' @description #' Use a modified Whittaker filter function (see References) from package #' **ptw** to filter a vegetation index (VI) time series of satellite data. #' #' @param vi `Raster*` or `character` file names, sorted VI. Use [preStack()] #' functionality to ensure the right input. #' @param w `Raster*` or `character` file names. In case of MODIS composite, the #' sorted 'VI_Quality' layers. #' @param t `Raster*` or `character` file names. In case of MODIS composite, the #' sorted 'composite_day_of_the_year' layers. If missing, the date is #' determined using 'timeInfo'. #' @param timeInfo Output from [orgTime()]. #' @param lambda `character` or `integer`. Yearly lambda value passed to #' [ptw::whit2()]. If set as `character` (i.e., `lambda = "600"`), it is not #' adapted to the time series length, but used as a fixed value (see Details). #' High values = stiff/rigid spline. #' @param nIter `integer`. Number of iterations for the upper envelope fitting. #' @param outputAs `character`, organization of output files. `"single"` #' (default) means each date one `RasterLayer`; `"yearly"` a `RasterBrick` for #' each year, and `"one"` one `RasterBrick` for the entire time series. #' @param collapse `logical`. Collapse input data of multiple years into one #' single year before filtering. #' @param prefixSuffix `character`, file naming. Names are dot-separated: #' `paste0(prefixSuffix[1], "YYYDDD", lambda, prefixSuffix[2], ".defaultFileExtension")`. #' @param outDirPath `character`, output path. Defaults to the current working #' directory. #' @param outlierThreshold `numeric` in the same unit as 'vi', used for outlier #' removal (see Details). #' @param mergeDoyFun Especially when using `collapse = TRUE`, multiple #' measurements for one day can be present. Here you can choose how those #' values are merged to one single value: `"max"` uses the highest value, #' `"mean"` or `"weighted.mean"` use [mean()] or [stats::weighted.mean()]. #' @param ... Arguments passed to [raster::writeRaster()] (except for #' 'filename'). #' #' @return #' A Whittaker-smoothed `RasterStack`. #' #' @details #' The argument 'lambda' is passed to `MODIS:::miwhitatzb1`. You can set it as #' yearly 'lambda', which means that it doesn't matter how long the input time #' series is because 'lambda' is adapted to it with: #' `lambda * ('length of input time series in days' / 365)`. The input length #' can differ from the output because of the 'pillow' argument in [orgTime()]. #' But it can also be set as `character` (i.e., `lambda = "1000"`). In this #' case, the adaption to the time series length is not performed. #' #' @references #' Modified Whittaker smoother, according to Atzberger & Eilers 2011 #' International Journal of Digital Earth 4(5):365-386, #' \doi{10.1080/17538947.2010.505664}. #' Implementation in R: Agustin Lobo 2012 #' #' @note #' Currently tested on MODIS and Landsat data. Using M*D13, it is also possible #' to use the 'composite_day_of_the_year' and the 'VI_Quality' layers. #' #' @seealso #' [smooth.spline.raster()], [raster::raster()]. #' #' @author #' Matteo Mattiuzzi and Agustin Lobo #' #' @examples #' \dontrun{ #' # The following function will download bit more than 1 year of MOD13A1 (~180mB) and therefore #' # take while to execute! Data will be downloaded to options("MODIS_localArcPath") and processed #' # to 'paste0(options("MODIS_outDirPath"),"fullCapa")' #' # You need to extract: 'vegetation index', 'VI_Quality layer', and 'composite day of the year', #' # this is expressed by the argument 'SDSstring' #' runGdal(product="MOD13A2",begin="2004340",extent="ireland",end="2006020", job="fullCapa", #' SDSstring="101000000010") #' path <- paste0(options("MODIS_outDirPath"),"fullCapa") #' #' # the only obligatory dataset is the vegetatino index #' # get the 'vi' data in the source directory: #' vi <- preStack(path=path, pattern="*_NDVI.tif$") #' #' # "orgTime" detects timing information of the input data and generates based on the arguments #' # the output date information. #' # For spline functions (in general) the beginning and the end of the time series #' # is always problematic. So there is the argument "pillow" (default 75 days) that adds #' # (if available) some more layers on the two endings. #' timeInfo <- orgTime(vi,nDays=16,begin="2005001",end="2005365",pillow=40) #' #' # now re-run "preStack" with two differences, 'files' (output of the first 'preStack' call) #' # and the 'timeInfo' #' # Here only the data needed for the filtering is extracted: #' vi <- preStack(files=vi,timeInfo=timeInfo) #' #' whittaker.raster(vi,timeInfo=timeInfo,lambda=5000) #' #' # if the files are M*D13 you can use also Quality layers and the composite day of the year: #' wt <- preStack(path=path, pattern="*_VI_Quality.tif$", timeInfo=timeInfo) #' # can also be already stacked: #' inT <- preStack(path=path, pattern="*_composite_day_of_the_year.tif$", timeInfo=timeInfo) #' #' whittaker.raster(vi=vi, wt=wt, inT=inT, timeInfo=timeInfo, lambda=5000, overwrite=TRUE) #' } #' #' @name whittaker.raster #' @export whittaker.raster whittaker.raster <- function(vi, w=NULL, t=NULL, timeInfo = orgTime(vi), lambda = 5000, nIter= 3, outputAs="single", collapse=FALSE, prefixSuffix=c("MCD","ndvi"), outDirPath=".", outlierThreshold=NULL, mergeDoyFun="max", ...) { opts <- combineOptions(...) outDirPath <- setPath(outDirPath) bitShift <- opts$bitShift bitMask <- opts$bitMask threshold <- opts$threshold dataFormat <- opts$dataFormat rasterOut <- toupper(raster::writeFormats()) if(!toupper(dataFormat) %in% rasterOut[,"name"]) { stop("Unknown or unsupported data format: '", dataFormat, "'. Please run raster::writeFormats() (column 'name') for supported file types.\n") } minDat <- ifelse(is.null(opts$minDat), 3, opts$minDat) # 3 is very small! if (collapse) { if(timeInfo$call$nDays == "asIn") stop("Argument nDays = 'asIn' (passed to orgTime()) is not allowed when using collapse = TRUE.\n") fitt <- seq(as.numeric(format(min(timeInfo$outputLayerDates),"%j")),as.numeric(format(max(timeInfo$outputLayerDates),"%j")),by=timeInfo$call$nDays) + timeInfo$call$pillow } else { fitt <- timeInfo$outSeq } inlam <- lambda ## fixed lambda if (is.character(lambda)) { cat("Using fixed 'lambda': ", lambda, ".\n", sep = "") nameL <- "fL" ## yearly lambda } else { if (collapse) { lambda <- lambda * ((365 + 2 * timeInfo$call$pillow) / 365) cat("Yearly 'lambda' is:", inlam, "\nNow changed with lambda*((365+2*pillow)/365) to:",lambda,"\n") } else { lambda <- lambda * ((max(timeInfo$inSeq) - min(timeInfo$inSeq) - 1) / 365) cat("Yearly 'lambda' is: ", inlam, ".\n", "Now changed to lambda * ('length of input period in days' / 365): ", lambda, ".\n", sep = "") } nameL <- "yL" } if (is.character(inlam)) inlam <- as.numeric(inlam) inlam <- round(inlam) #from here on used only in outputfilename lambda <- as.numeric(lambda) if (!inherits(vi, "Raster")) vi <- raster::stack(vi, quick = TRUE) if(!inherits(w, "Raster") & !is.null(w)) w <- raster::stack(w, quick = TRUE) if(!inherits(t, "Raster") & !is.null(t)) t <- raster::stack(t, quick = TRUE) if (is.null(opts$datatype)) opts$datatype <- raster::dataType(vi[[1]]) if (grepl("FLT", opts$datatype)) { doround <- FALSE } else { doround <- TRUE } if (is.null(opts$overwrite)) { opts$overwrite <- FALSE } outputAs <- tolower(outputAs) if (collapse) # use only DOY obmitt year (all years into one) { d <- sprintf("%03d",seq(as.numeric(format(min(timeInfo$outputLayerDates),"%j")),as.numeric(format(max(timeInfo$outputLayerDates),"%j")),by=timeInfo$call$nDays)) if(outputAs=="single") { oname <- paste0(outDirPath,prefixSuffix[1],".",d,".",prefixSuffix[2]) b <- vector(mode="list",length=length(oname)) for(a in seq_along(oname)) { b[[a]] <- raster(vi) b[[a]] <- writeStart(b[[a]], filename=oname[a], datatype=opts$datatype , overwrite=opts$overwrite, format = dataFormat) } } else { b <- list() b[[1]] <- writeStart(brick(raster(vi),nl=length(d)), filename=oname, datatype=opts$datatype , overwrite=opts$overwrite, format = dataFormat) names(b[[1]]) <- paste0("doy",d) } } else if (outputAs=="yearly") { b <- vector(mode="list",length=length(unique(format(timeInfo$outputLayerDates,"%Y")))) for (a in seq_along(unique(format(timeInfo$outputLayerDates,"%Y")))) { y <- unique(format(timeInfo$outputLayerDates,"%Y"))[a] oname <- paste0(outDirPath,prefixSuffix[1],".year",y,".",nameL,inlam,".",prefixSuffix[2]) names <- timeInfo$outputLayerDates[format(timeInfo$outputLayerDates,"%Y")==y] b[[a]] <- brick(raster(vi),nl=as.integer(sum(format(timeInfo$outputLayerDates,"%Y")==y)), values=FALSE) b[[a]] <- writeStart(b[[a]], filename=oname, datatype=opts$datatype , overwrite=opts$overwrite, format = dataFormat) names(b[[a]]) <- names } } else if(outputAs=="one") { y <- unique(format(timeInfo$outputLayerDates,"%Y")) oname <- paste0(outDirPath,prefixSuffix[1],"_from",paste0(y,collapse="to"),".",nameL,inlam,".",prefixSuffix[2]) b <- list() b[[1]] <- brick(raster(vi),nl=length(fitt), values=FALSE) b[[1]] <- writeStart(b[[1]], filename=oname, datatype=opts$datatype , overwrite=opts$overwrite, format = dataFormat) names(b[[1]]) <- timeInfo$outputLayerDates } else if (outputAs=="single") { d <- sort(format(timeInfo$outputLayerDates,"%Y%j")) oname <- paste0(outDirPath,prefixSuffix[1],".",d,".",nameL,inlam,".",prefixSuffix[2]) b <- vector(mode="list",length=length(oname)) for(a in seq_along(oname)) { b[[a]] <- raster::raster(vi) b[[a]] <- writeStart(b[[a]], filename=oname[a], datatype=opts$datatype , overwrite=opts$overwrite, format = dataFormat) } } tr <- raster::blockSize(vi) cat("Data is in, start processing!\n") if (mergeDoyFun == "max") { mergeFun <- unifyDoubleMX } else if (mergeDoyFun == "weighted.mean" | mergeDoyFun == "mean") { mergeFun <- unifyDoubleWM } clFun <- function(l) { val <- raster::getValues(vi, row = tr$row[l], nrows = tr$nrows[l]) val <- t(val) mtrdim <- dim(val) set0 <- matrix(FALSE,nrow = mtrdim[1], ncol = mtrdim[2]) set0[is.na(val)] <- TRUE ## if 'VI_Quality' is supplied: if (!is.null(w)) { wtu <- raster::getValues(w, row = tr$row[l], nrows = tr$nrows[l]) # is it not a weight info [0-1]? if (max(wtu, na.rm = TRUE) > 1) { if(is.null(bitShift) | is.null(bitMask)) { # try to detect VI usefulness layer bits <- detectBitInfo(vi, "VI usefulness", warn = FALSE) bitShift <- bits$bitShift bitMask <- bits$bitMask } if(is.null(bitShift) | is.null(bitMask)) stop("Could not extract 'bits' for weighting from this product. ", "Use '?makeWeights' function to generate weights manually!") wtu <- makeWeights(wtu, bitShift = bitShift, bitMask = bitMask, threshold = threshold, decodeOnly = FALSE) } wtu <- t(wtu) set0[wtu==0] <- TRUE set0[is.na(wtu)] <- TRUE ## else if 'VI_Quality' is not supplied, then weight = 1: } else { wtu <- matrix(1, nrow = mtrdim[1], ncol = mtrdim[2]) } if (inherits(t, "Raster")) { inTu <- raster::getValues(t, row = tr$row[l], nrows = tr$nrows[l]) inTu <- t(inTu) set0[is.na(inTu)] <- TRUE set0[inTu <= 0] <- TRUE t0 <- min(timeInfo$inDoys[1]) - 1 if (!collapse) { inTu <- t(repDoy(t(inTu), layerDate = timeInfo, bias = -t0)) } inTu[set0] <- 0 } else { if (collapse) { inTu <- matrix(timeInfo$inDoys,nrow=length(timeInfo$inDoys),ncol=mtrdim[2]) } else { inTu <- matrix(timeInfo$inSeq,nrow=length(timeInfo$inSeq),ncol=mtrdim[2]) } } # the entire info to use or not a pix is in "wtu" wtu[set0] <- 0 val[set0] <- 0 out <- matrix(NA, nrow = length(fitt), ncol = mtrdim[2]) if (!is.null(outlierThreshold)) { kickOutlier <- function(vals, weights, lambda, threshold) { fTS <- ptw::whit2(vals, w = weights, lambda = lambda) weights[weights==1][abs(vals[weights==1]-fTS[weights==1]) > threshold] <- 0 return(weights) } } else { # if is.null(outlierThreshold) generate a fake function to avoid a per pixel "if" kickOutlier <- function(vals, weights, lambda, threshold) { return(weights) } } if (collapse) { vec0 <- rep(0,365 + (2*timeInfo$call$pillow) + 30) # add a save length of data (because layer doy + effectice composite doy) } else { vec0 <- rep(0,max(timeInfo$inSeq,timeInfo$outSeq) - min(timeInfo$inSeq,timeInfo$outSeq) - 1 + 30) } # minimum "minDat" input values for filtering Cvec <- (colSums(wtu > 0) >= minDat) Cvec <- (1:mtrdim[2])[Cvec] ind <- inTu > 0 win <- options("warn") options(warn=-1) for (u in Cvec) { index <- ind[,u] use <- mergeFun(vx=val[index,u],wx=wtu[index,u],tx=inTu[index,u]) valVec <- wtVec <- vec0 if(!collapse) { valVec[use$tx] <- use$vx wtVec[use$tx] <- use$wx } else { newOrder <- doCollapse(tx=use$tx,pillow=timeInfo$call$pillow) valVec[newOrder$sequence] <- use$vx[newOrder$order] wtVec[newOrder$sequence] <- use$wx[newOrder$order] } wtVec <- kickOutlier(vals=valVec,weights=wtVec,lambda=lambda,threshold=outlierThreshold) #plot(valVec,ylim=c(-1000,9000)) for(i in 1:nIter) { fTS <- ptw::whit2(valVec,w=wtVec,lambda=lambda) valVec[valVec < fTS] <- fTS[valVec < fTS] } out[,u] <- fTS[fitt] #lines(fTS,col=2) } options(warn=win$warn) out[,colSums(abs(out))==0] <- NA return(t(out)) } for (i in seq_along(tr$row)) { res <- clFun(i) if (doround) res <- round(res) b <- writeValuesMODIS(b, res, tr$row[i], timeInfo, collapse, outputAs) } writeStopMODIS(b,timeInfo,outputAs,collapse) return(raster::stack(b)) } unifyDoubleWM <- function(vx,wx,tx) { tx <- as.numeric(tx) vx <- as.numeric(vx) wx <- as.numeric(wx) double <- tx[duplicated(tx)] if(length(double)>0) { double <- unique(double) for(i in seq_along(double)) { inx <- which(tx==double[i]) vx[inx[1]] <- weighted.mean(vx[inx],w=wx[inx]) wx[inx[1]] <- max(wx[inx]) vx <- vx[-inx[-1]] wx <- wx[-inx[-1]] tx <- tx[-inx[-1]] } } list(vx=vx,wx=wx,tx=tx) } unifyDoubleMX <- function(vx,wx,tx) { tx <- as.numeric(tx) vx <- as.numeric(vx) wx <- as.numeric(wx) double <- tx[duplicated(tx)] if(length(double)>0) { double <- unique(double) for(i in seq_along(double)) { inx <- which(tx==double[i]) mx <- which.max(wx[inx]) vx <- vx[-inx[-mx]] wx <- wx[-inx[-mx]] tx <- tx[-inx[-mx]] } } list(vx=vx,wx=wx,tx=tx) } doCollapse <- function(tx,pillow) { ord <- order(tx) txS <- tx[ord] t0 <- 365 - pillow tS <- ord[txS >= t0] tE <- ord[txS <= pillow] s0 <- txS[txS >= t0] - t0 s1 <- txS + pillow s2 <- txS[txS <= pillow] + 365 + pillow list(order=c(tS,ord,tE),sequence=c(s0,s1,s2)+1) } ##################################################### writeValuesMODIS <- function(b,val,row,timeInfo,collapse,outputAs) { if(collapse) { d <- seq_along(sprintf("%03d",seq(as.numeric(format(min(timeInfo$outputLayerDates),"%j")),as.numeric(format(max(timeInfo$outputLayerDates),"%j")),by=timeInfo$call$nDays))) } else { d <- seq_along(b) } if(outputAs=="single") { for (a in seq_along(d)) { b[[a]] <- writeValues(b[[a]], val[,a], row) } } else if(outputAs=="one") { b[[1]] <- writeValues(b[[1]], val, row) } else { for (a in seq_along(d)) { y <- unique(format(timeInfo$outputLayerDates,"%Y"))[a] b[[a]] <- writeValues(b[[a]], val[,format(timeInfo$outputLayerDates,"%Y")==y], row) } } return(b) } writeStopMODIS <- function(b,timeInfo,outputAs,collapse) { for (a in seq_along(b)) { b[[a]] <- writeStop(b[[a]]) nam <- filename(b[[a]]) extension(nam) <- "" if (collapse & outputAs!="single") { write.table(x=unique(format(timeInfo$outputLayerDates,"%j")), file=nam, col.names=FALSE, row.names=FALSE) } else if(outputAs=="one") { write.table(x=timeInfo$outputLayerDates, file=nam, col.names=FALSE, row.names=FALSE) } else if (outputAs=="yearly") { y <- unique(format(timeInfo$outputLayerDates,"%Y")) for (ax in seq_along(y)) { ind <- format(timeInfo$outputLayerDates,"%Y")==y[ax] write.table(x=timeInfo$outputLayerDates[ind], file=nam, col.names=FALSE, row.names=FALSE) } } } }
/R/whittaker.R
permissive
itati01/MODIS
R
false
false
17,749
r
#' Filter Vegetation Index with Modified Whittaker Approach #' #' @description #' Use a modified Whittaker filter function (see References) from package #' **ptw** to filter a vegetation index (VI) time series of satellite data. #' #' @param vi `Raster*` or `character` file names, sorted VI. Use [preStack()] #' functionality to ensure the right input. #' @param w `Raster*` or `character` file names. In case of MODIS composite, the #' sorted 'VI_Quality' layers. #' @param t `Raster*` or `character` file names. In case of MODIS composite, the #' sorted 'composite_day_of_the_year' layers. If missing, the date is #' determined using 'timeInfo'. #' @param timeInfo Output from [orgTime()]. #' @param lambda `character` or `integer`. Yearly lambda value passed to #' [ptw::whit2()]. If set as `character` (i.e., `lambda = "600"`), it is not #' adapted to the time series length, but used as a fixed value (see Details). #' High values = stiff/rigid spline. #' @param nIter `integer`. Number of iterations for the upper envelope fitting. #' @param outputAs `character`, organization of output files. `"single"` #' (default) means each date one `RasterLayer`; `"yearly"` a `RasterBrick` for #' each year, and `"one"` one `RasterBrick` for the entire time series. #' @param collapse `logical`. Collapse input data of multiple years into one #' single year before filtering. #' @param prefixSuffix `character`, file naming. Names are dot-separated: #' `paste0(prefixSuffix[1], "YYYDDD", lambda, prefixSuffix[2], ".defaultFileExtension")`. #' @param outDirPath `character`, output path. Defaults to the current working #' directory. #' @param outlierThreshold `numeric` in the same unit as 'vi', used for outlier #' removal (see Details). #' @param mergeDoyFun Especially when using `collapse = TRUE`, multiple #' measurements for one day can be present. Here you can choose how those #' values are merged to one single value: `"max"` uses the highest value, #' `"mean"` or `"weighted.mean"` use [mean()] or [stats::weighted.mean()]. #' @param ... Arguments passed to [raster::writeRaster()] (except for #' 'filename'). #' #' @return #' A Whittaker-smoothed `RasterStack`. #' #' @details #' The argument 'lambda' is passed to `MODIS:::miwhitatzb1`. You can set it as #' yearly 'lambda', which means that it doesn't matter how long the input time #' series is because 'lambda' is adapted to it with: #' `lambda * ('length of input time series in days' / 365)`. The input length #' can differ from the output because of the 'pillow' argument in [orgTime()]. #' But it can also be set as `character` (i.e., `lambda = "1000"`). In this #' case, the adaption to the time series length is not performed. #' #' @references #' Modified Whittaker smoother, according to Atzberger & Eilers 2011 #' International Journal of Digital Earth 4(5):365-386, #' \doi{10.1080/17538947.2010.505664}. #' Implementation in R: Agustin Lobo 2012 #' #' @note #' Currently tested on MODIS and Landsat data. Using M*D13, it is also possible #' to use the 'composite_day_of_the_year' and the 'VI_Quality' layers. #' #' @seealso #' [smooth.spline.raster()], [raster::raster()]. #' #' @author #' Matteo Mattiuzzi and Agustin Lobo #' #' @examples #' \dontrun{ #' # The following function will download bit more than 1 year of MOD13A1 (~180mB) and therefore #' # take while to execute! Data will be downloaded to options("MODIS_localArcPath") and processed #' # to 'paste0(options("MODIS_outDirPath"),"fullCapa")' #' # You need to extract: 'vegetation index', 'VI_Quality layer', and 'composite day of the year', #' # this is expressed by the argument 'SDSstring' #' runGdal(product="MOD13A2",begin="2004340",extent="ireland",end="2006020", job="fullCapa", #' SDSstring="101000000010") #' path <- paste0(options("MODIS_outDirPath"),"fullCapa") #' #' # the only obligatory dataset is the vegetatino index #' # get the 'vi' data in the source directory: #' vi <- preStack(path=path, pattern="*_NDVI.tif$") #' #' # "orgTime" detects timing information of the input data and generates based on the arguments #' # the output date information. #' # For spline functions (in general) the beginning and the end of the time series #' # is always problematic. So there is the argument "pillow" (default 75 days) that adds #' # (if available) some more layers on the two endings. #' timeInfo <- orgTime(vi,nDays=16,begin="2005001",end="2005365",pillow=40) #' #' # now re-run "preStack" with two differences, 'files' (output of the first 'preStack' call) #' # and the 'timeInfo' #' # Here only the data needed for the filtering is extracted: #' vi <- preStack(files=vi,timeInfo=timeInfo) #' #' whittaker.raster(vi,timeInfo=timeInfo,lambda=5000) #' #' # if the files are M*D13 you can use also Quality layers and the composite day of the year: #' wt <- preStack(path=path, pattern="*_VI_Quality.tif$", timeInfo=timeInfo) #' # can also be already stacked: #' inT <- preStack(path=path, pattern="*_composite_day_of_the_year.tif$", timeInfo=timeInfo) #' #' whittaker.raster(vi=vi, wt=wt, inT=inT, timeInfo=timeInfo, lambda=5000, overwrite=TRUE) #' } #' #' @name whittaker.raster #' @export whittaker.raster whittaker.raster <- function(vi, w=NULL, t=NULL, timeInfo = orgTime(vi), lambda = 5000, nIter= 3, outputAs="single", collapse=FALSE, prefixSuffix=c("MCD","ndvi"), outDirPath=".", outlierThreshold=NULL, mergeDoyFun="max", ...) { opts <- combineOptions(...) outDirPath <- setPath(outDirPath) bitShift <- opts$bitShift bitMask <- opts$bitMask threshold <- opts$threshold dataFormat <- opts$dataFormat rasterOut <- toupper(raster::writeFormats()) if(!toupper(dataFormat) %in% rasterOut[,"name"]) { stop("Unknown or unsupported data format: '", dataFormat, "'. Please run raster::writeFormats() (column 'name') for supported file types.\n") } minDat <- ifelse(is.null(opts$minDat), 3, opts$minDat) # 3 is very small! if (collapse) { if(timeInfo$call$nDays == "asIn") stop("Argument nDays = 'asIn' (passed to orgTime()) is not allowed when using collapse = TRUE.\n") fitt <- seq(as.numeric(format(min(timeInfo$outputLayerDates),"%j")),as.numeric(format(max(timeInfo$outputLayerDates),"%j")),by=timeInfo$call$nDays) + timeInfo$call$pillow } else { fitt <- timeInfo$outSeq } inlam <- lambda ## fixed lambda if (is.character(lambda)) { cat("Using fixed 'lambda': ", lambda, ".\n", sep = "") nameL <- "fL" ## yearly lambda } else { if (collapse) { lambda <- lambda * ((365 + 2 * timeInfo$call$pillow) / 365) cat("Yearly 'lambda' is:", inlam, "\nNow changed with lambda*((365+2*pillow)/365) to:",lambda,"\n") } else { lambda <- lambda * ((max(timeInfo$inSeq) - min(timeInfo$inSeq) - 1) / 365) cat("Yearly 'lambda' is: ", inlam, ".\n", "Now changed to lambda * ('length of input period in days' / 365): ", lambda, ".\n", sep = "") } nameL <- "yL" } if (is.character(inlam)) inlam <- as.numeric(inlam) inlam <- round(inlam) #from here on used only in outputfilename lambda <- as.numeric(lambda) if (!inherits(vi, "Raster")) vi <- raster::stack(vi, quick = TRUE) if(!inherits(w, "Raster") & !is.null(w)) w <- raster::stack(w, quick = TRUE) if(!inherits(t, "Raster") & !is.null(t)) t <- raster::stack(t, quick = TRUE) if (is.null(opts$datatype)) opts$datatype <- raster::dataType(vi[[1]]) if (grepl("FLT", opts$datatype)) { doround <- FALSE } else { doround <- TRUE } if (is.null(opts$overwrite)) { opts$overwrite <- FALSE } outputAs <- tolower(outputAs) if (collapse) # use only DOY obmitt year (all years into one) { d <- sprintf("%03d",seq(as.numeric(format(min(timeInfo$outputLayerDates),"%j")),as.numeric(format(max(timeInfo$outputLayerDates),"%j")),by=timeInfo$call$nDays)) if(outputAs=="single") { oname <- paste0(outDirPath,prefixSuffix[1],".",d,".",prefixSuffix[2]) b <- vector(mode="list",length=length(oname)) for(a in seq_along(oname)) { b[[a]] <- raster(vi) b[[a]] <- writeStart(b[[a]], filename=oname[a], datatype=opts$datatype , overwrite=opts$overwrite, format = dataFormat) } } else { b <- list() b[[1]] <- writeStart(brick(raster(vi),nl=length(d)), filename=oname, datatype=opts$datatype , overwrite=opts$overwrite, format = dataFormat) names(b[[1]]) <- paste0("doy",d) } } else if (outputAs=="yearly") { b <- vector(mode="list",length=length(unique(format(timeInfo$outputLayerDates,"%Y")))) for (a in seq_along(unique(format(timeInfo$outputLayerDates,"%Y")))) { y <- unique(format(timeInfo$outputLayerDates,"%Y"))[a] oname <- paste0(outDirPath,prefixSuffix[1],".year",y,".",nameL,inlam,".",prefixSuffix[2]) names <- timeInfo$outputLayerDates[format(timeInfo$outputLayerDates,"%Y")==y] b[[a]] <- brick(raster(vi),nl=as.integer(sum(format(timeInfo$outputLayerDates,"%Y")==y)), values=FALSE) b[[a]] <- writeStart(b[[a]], filename=oname, datatype=opts$datatype , overwrite=opts$overwrite, format = dataFormat) names(b[[a]]) <- names } } else if(outputAs=="one") { y <- unique(format(timeInfo$outputLayerDates,"%Y")) oname <- paste0(outDirPath,prefixSuffix[1],"_from",paste0(y,collapse="to"),".",nameL,inlam,".",prefixSuffix[2]) b <- list() b[[1]] <- brick(raster(vi),nl=length(fitt), values=FALSE) b[[1]] <- writeStart(b[[1]], filename=oname, datatype=opts$datatype , overwrite=opts$overwrite, format = dataFormat) names(b[[1]]) <- timeInfo$outputLayerDates } else if (outputAs=="single") { d <- sort(format(timeInfo$outputLayerDates,"%Y%j")) oname <- paste0(outDirPath,prefixSuffix[1],".",d,".",nameL,inlam,".",prefixSuffix[2]) b <- vector(mode="list",length=length(oname)) for(a in seq_along(oname)) { b[[a]] <- raster::raster(vi) b[[a]] <- writeStart(b[[a]], filename=oname[a], datatype=opts$datatype , overwrite=opts$overwrite, format = dataFormat) } } tr <- raster::blockSize(vi) cat("Data is in, start processing!\n") if (mergeDoyFun == "max") { mergeFun <- unifyDoubleMX } else if (mergeDoyFun == "weighted.mean" | mergeDoyFun == "mean") { mergeFun <- unifyDoubleWM } clFun <- function(l) { val <- raster::getValues(vi, row = tr$row[l], nrows = tr$nrows[l]) val <- t(val) mtrdim <- dim(val) set0 <- matrix(FALSE,nrow = mtrdim[1], ncol = mtrdim[2]) set0[is.na(val)] <- TRUE ## if 'VI_Quality' is supplied: if (!is.null(w)) { wtu <- raster::getValues(w, row = tr$row[l], nrows = tr$nrows[l]) # is it not a weight info [0-1]? if (max(wtu, na.rm = TRUE) > 1) { if(is.null(bitShift) | is.null(bitMask)) { # try to detect VI usefulness layer bits <- detectBitInfo(vi, "VI usefulness", warn = FALSE) bitShift <- bits$bitShift bitMask <- bits$bitMask } if(is.null(bitShift) | is.null(bitMask)) stop("Could not extract 'bits' for weighting from this product. ", "Use '?makeWeights' function to generate weights manually!") wtu <- makeWeights(wtu, bitShift = bitShift, bitMask = bitMask, threshold = threshold, decodeOnly = FALSE) } wtu <- t(wtu) set0[wtu==0] <- TRUE set0[is.na(wtu)] <- TRUE ## else if 'VI_Quality' is not supplied, then weight = 1: } else { wtu <- matrix(1, nrow = mtrdim[1], ncol = mtrdim[2]) } if (inherits(t, "Raster")) { inTu <- raster::getValues(t, row = tr$row[l], nrows = tr$nrows[l]) inTu <- t(inTu) set0[is.na(inTu)] <- TRUE set0[inTu <= 0] <- TRUE t0 <- min(timeInfo$inDoys[1]) - 1 if (!collapse) { inTu <- t(repDoy(t(inTu), layerDate = timeInfo, bias = -t0)) } inTu[set0] <- 0 } else { if (collapse) { inTu <- matrix(timeInfo$inDoys,nrow=length(timeInfo$inDoys),ncol=mtrdim[2]) } else { inTu <- matrix(timeInfo$inSeq,nrow=length(timeInfo$inSeq),ncol=mtrdim[2]) } } # the entire info to use or not a pix is in "wtu" wtu[set0] <- 0 val[set0] <- 0 out <- matrix(NA, nrow = length(fitt), ncol = mtrdim[2]) if (!is.null(outlierThreshold)) { kickOutlier <- function(vals, weights, lambda, threshold) { fTS <- ptw::whit2(vals, w = weights, lambda = lambda) weights[weights==1][abs(vals[weights==1]-fTS[weights==1]) > threshold] <- 0 return(weights) } } else { # if is.null(outlierThreshold) generate a fake function to avoid a per pixel "if" kickOutlier <- function(vals, weights, lambda, threshold) { return(weights) } } if (collapse) { vec0 <- rep(0,365 + (2*timeInfo$call$pillow) + 30) # add a save length of data (because layer doy + effectice composite doy) } else { vec0 <- rep(0,max(timeInfo$inSeq,timeInfo$outSeq) - min(timeInfo$inSeq,timeInfo$outSeq) - 1 + 30) } # minimum "minDat" input values for filtering Cvec <- (colSums(wtu > 0) >= minDat) Cvec <- (1:mtrdim[2])[Cvec] ind <- inTu > 0 win <- options("warn") options(warn=-1) for (u in Cvec) { index <- ind[,u] use <- mergeFun(vx=val[index,u],wx=wtu[index,u],tx=inTu[index,u]) valVec <- wtVec <- vec0 if(!collapse) { valVec[use$tx] <- use$vx wtVec[use$tx] <- use$wx } else { newOrder <- doCollapse(tx=use$tx,pillow=timeInfo$call$pillow) valVec[newOrder$sequence] <- use$vx[newOrder$order] wtVec[newOrder$sequence] <- use$wx[newOrder$order] } wtVec <- kickOutlier(vals=valVec,weights=wtVec,lambda=lambda,threshold=outlierThreshold) #plot(valVec,ylim=c(-1000,9000)) for(i in 1:nIter) { fTS <- ptw::whit2(valVec,w=wtVec,lambda=lambda) valVec[valVec < fTS] <- fTS[valVec < fTS] } out[,u] <- fTS[fitt] #lines(fTS,col=2) } options(warn=win$warn) out[,colSums(abs(out))==0] <- NA return(t(out)) } for (i in seq_along(tr$row)) { res <- clFun(i) if (doround) res <- round(res) b <- writeValuesMODIS(b, res, tr$row[i], timeInfo, collapse, outputAs) } writeStopMODIS(b,timeInfo,outputAs,collapse) return(raster::stack(b)) } unifyDoubleWM <- function(vx,wx,tx) { tx <- as.numeric(tx) vx <- as.numeric(vx) wx <- as.numeric(wx) double <- tx[duplicated(tx)] if(length(double)>0) { double <- unique(double) for(i in seq_along(double)) { inx <- which(tx==double[i]) vx[inx[1]] <- weighted.mean(vx[inx],w=wx[inx]) wx[inx[1]] <- max(wx[inx]) vx <- vx[-inx[-1]] wx <- wx[-inx[-1]] tx <- tx[-inx[-1]] } } list(vx=vx,wx=wx,tx=tx) } unifyDoubleMX <- function(vx,wx,tx) { tx <- as.numeric(tx) vx <- as.numeric(vx) wx <- as.numeric(wx) double <- tx[duplicated(tx)] if(length(double)>0) { double <- unique(double) for(i in seq_along(double)) { inx <- which(tx==double[i]) mx <- which.max(wx[inx]) vx <- vx[-inx[-mx]] wx <- wx[-inx[-mx]] tx <- tx[-inx[-mx]] } } list(vx=vx,wx=wx,tx=tx) } doCollapse <- function(tx,pillow) { ord <- order(tx) txS <- tx[ord] t0 <- 365 - pillow tS <- ord[txS >= t0] tE <- ord[txS <= pillow] s0 <- txS[txS >= t0] - t0 s1 <- txS + pillow s2 <- txS[txS <= pillow] + 365 + pillow list(order=c(tS,ord,tE),sequence=c(s0,s1,s2)+1) } ##################################################### writeValuesMODIS <- function(b,val,row,timeInfo,collapse,outputAs) { if(collapse) { d <- seq_along(sprintf("%03d",seq(as.numeric(format(min(timeInfo$outputLayerDates),"%j")),as.numeric(format(max(timeInfo$outputLayerDates),"%j")),by=timeInfo$call$nDays))) } else { d <- seq_along(b) } if(outputAs=="single") { for (a in seq_along(d)) { b[[a]] <- writeValues(b[[a]], val[,a], row) } } else if(outputAs=="one") { b[[1]] <- writeValues(b[[1]], val, row) } else { for (a in seq_along(d)) { y <- unique(format(timeInfo$outputLayerDates,"%Y"))[a] b[[a]] <- writeValues(b[[a]], val[,format(timeInfo$outputLayerDates,"%Y")==y], row) } } return(b) } writeStopMODIS <- function(b,timeInfo,outputAs,collapse) { for (a in seq_along(b)) { b[[a]] <- writeStop(b[[a]]) nam <- filename(b[[a]]) extension(nam) <- "" if (collapse & outputAs!="single") { write.table(x=unique(format(timeInfo$outputLayerDates,"%j")), file=nam, col.names=FALSE, row.names=FALSE) } else if(outputAs=="one") { write.table(x=timeInfo$outputLayerDates, file=nam, col.names=FALSE, row.names=FALSE) } else if (outputAs=="yearly") { y <- unique(format(timeInfo$outputLayerDates,"%Y")) for (ax in seq_along(y)) { ind <- format(timeInfo$outputLayerDates,"%Y")==y[ax] write.table(x=timeInfo$outputLayerDates[ind], file=nam, col.names=FALSE, row.names=FALSE) } } } }
# Descriptive Analysis # Central VA Youth Volunteer Hours # Redcross Internship # Yixin Tang (July 29th, 2021) # Data Source: Red Cross Hour by Race (Calculated by Sophia Walton) # OUTLIERS INFO ANALYSIS # Load packages library(tidyverse) library(ggplot2) library(readxl) library(viridis) # Read in the data race <- read_excel("/Users/yixintang/Redcross/Demographics/redcross_hoursbyrace.xlsx") outliers <-boxplot(race$`hours worked since 8/1/20`, plot=FALSE)$out min(outliers) # Outliers info outliers_info<-data.frame() for (i in 1:length(outliers)){ position<-which(race$`hours worked since 8/1/20`==outliers[i]) x<-race[position,] outliers_info<-rbind(outliers_info,x) } race<-outliers_info nrow(race) ################### ###### RACE ####### ################### # Number of Volunteers by Race ggplot(race, aes(x=forcats::fct_infreq(race),fill=factor(race)))+ geom_bar(stat="count", width=0.7)+ theme(axis.text.x = element_text(angle = 30, size=9, vjust=1, hjust=1))+ scale_fill_viridis(discrete=TRUE,name="Race")+ labs(x="Race", title="Number of Volunteers by Race", size=12)+ geom_text(aes(label=..count..),stat='count', position=position_dodge(0.9),vjust=-0.2) # Reorder geom_bar from high to low when using stat=“count” # https://stackoverflow.com/questions/56599684/reorder-geom-bar-from-high-to-low-when-using-stat-count # Total Hours by Race TH<-race %>% group_by(race) %>% summarize(total_hour = sum(`hours worked since 8/1/20`, na.rm = TRUE)) ggplot(data=TH,aes(x=reorder(race,-total_hour), y=total_hour,fill=factor(race))) + geom_bar(stat="identity",position="dodge")+ theme(axis.text.x = element_text(angle = 30, size=9, vjust=1, hjust=1))+ scale_fill_viridis(discrete=TRUE,name="Race")+ labs(x="Race", title="Total Hours by Race", size=12)+ geom_text(aes(label=total_hour), vjust=-0.3, size=3.5) # Averaged Hours by Race race_count<-race %>% count(race) TH$Average<-TH$total_hour/race_count$n ggplot(data=TH,aes(x=reorder(race,-Average), y=Average,fill=factor(race))) + geom_bar(stat="identity",position="dodge")+ theme(axis.text.x = element_text(angle = 30, size=9, vjust=1, hjust=1))+ scale_fill_viridis(discrete=TRUE,name="Race")+ labs(x="Race", title="Averaged Hours by Race", size=12)+ geom_text(aes(label=round(Average,2)), vjust=-0.3, size=3.5) ################### ##### GENDER ###### ################### # Number of Volunteers by Gender ggplot(race, aes(x=forcats::fct_infreq(gender),fill=factor(gender)))+ geom_bar(stat="count", width=0.5,position=position_dodge())+ theme(axis.text.x = element_text(angle = 30, size=9, vjust=1, hjust=1))+ scale_fill_manual(values=c('red','blue','grey','yellow'),name="Gender")+ labs(x="Race", title="Number of Volunteers by Gender", size=12)+ geom_text(aes(label=..count..),stat='count', position=position_dodge(0.9),vjust=-0.2) # Total Hours by Gender TH_gender<-race %>% group_by(gender) %>% summarize(total_hour = sum(`hours worked since 8/1/20`, na.rm = TRUE)) ggplot(data=TH_gender,aes(x=reorder(gender,-total_hour), y=total_hour,fill=factor(gender))) + geom_bar(stat="identity",width=0.5)+ theme(axis.text.x = element_text(angle = 30, size=9, vjust=1, hjust=1))+ scale_fill_manual(values=c('red','blue','grey','yellow'),name="Gender")+ labs(x="Gender", title="Total Hours by Gender", size=12)+ geom_text(aes(label=total_hour), vjust=-0.3, size=3.5) # Averaged Hours by Gender gender_count<-race %>% count(gender) TH_gender$Average<-TH_gender$total_hour/gender_count$n ggplot(data=TH_gender,aes(x=reorder(gender,-Average), y=Average,fill=factor(gender))) + geom_bar(stat="identity",position="dodge",width=0.5)+ theme(axis.text.x = element_text(angle = 30, size=9, vjust=1, hjust=1))+ scale_fill_manual(values=c('red','blue','grey','yellow'),name="Gender")+ labs(x="Gender", title="Averaged Hours by Gender", size=12)+ geom_text(aes(label=round(Average,2)), vjust=-0.3, size=3.5) # Hispanic: highest, all female (male: potential) # Black: male is higher than female # Asian: female is high but male is lower # Middle Eastern: only female # White: male significantly higher, closed to Hispanic female ######################## ##### RACE/GENDER ###### ######################## png("/Users/yixintang/Redcross/Demographics/Number_of_Volunteers(outliers).png") # Number of Volunteers by Race/Gender ggplot(race, aes(x=forcats::fct_infreq(race),fill=factor(gender)))+ geom_bar(stat="count", width=0.7,position=position_dodge())+ theme(axis.text.x = element_text(angle = 30, size=9, vjust=1, hjust=1))+ scale_fill_manual(values=c('pink','light blue','grey','yellow'),name="Gender")+ labs(x="Race", title="Charlottesville Volunteers \nNumber of Volunteers by Race&Gender (Outliers)", size=12)+ geom_text(aes(label=..count..),stat='count', position=position_dodge(0.9),vjust=-0.3) dev.off() # Total Hours by Race/Gender TH_race_gender<-race %>% group_by(race,gender) %>% summarize(total_hour = sum(`hours worked since 8/1/20`, na.rm = TRUE)) png("/Users/yixintang/Redcross/Demographics/Total_Hour(outliers).png") ggplot(data=TH_race_gender,aes(x=reorder(race,-total_hour), y=total_hour,fill=factor(gender))) + geom_bar(stat="identity",width=0.7,position=position_dodge())+ theme(axis.text.x = element_text(angle = 30, size=9, vjust=1, hjust=1))+ scale_fill_manual(values=c('pink','light blue','grey','yellow'),name="Gender")+ labs(x="Gender", title="Charlottesville Volunteers \nTotal Hours by Race&Gender(Outliers)", size=12)+ geom_text(aes(label=total_hour), vjust=-0.3, size=3.5, position=position_dodge(0.9)) # labels: overlap together # have to fix it: https://stackoverflow.com/questions/40542685/how-to-jitter-remove-overlap-for-geom-text-labels dev.off() ############################# ######## HIGHLIGHT ########## ############################# # Averaged Hours by Race/Gender race_gender_count<-race %>% count(race,gender) TH_race_gender$Average<-TH_race_gender$total_hour/race_gender_count$n png("/Users/yixintang/Redcross/Demographics/Average_Hour(outliers).png") ggplot(data=TH_race_gender,aes(x=reorder(race,-Average), y=Average,fill=factor(gender))) + geom_bar(stat="identity",width=0.7,position=position_dodge())+ theme(axis.text.x = element_text(angle = 30, size=9, vjust=1, hjust=1))+ scale_fill_manual(values=c('pink','light blue','grey','yellow'),name="Gender")+ labs(x="Gender", title="Charlottesville Volunteers \nAveraged Hours by Race&Gender(Outliers)", size=12)+ geom_text(aes(label=round(Average,2)), vjust=-0.3, size=3.5, position=position_dodge(0.9)) dev.off() # Boxplot Hours by Race/Gender ggplot(data=race,aes(x=race,y=`hours worked since 8/1/20`,color=factor(gender)))+ geom_boxplot()+ scale_color_manual(values=c('red','blue','grey','yellow'),name="Gender")+ labs(x="Race", title="Boxplot: Hours by Race/Gender", size=12) ######################## ###### RACE/CLUB ####### ######################## # Number of Volunteers by Race/Club ggplot(race, aes(x=forcats::fct_infreq(race),fill=factor(`club?`)))+ geom_bar(stat="count", width=0.7,position=position_dodge())+ theme(axis.text.x = element_text(angle = 30, size=9, vjust=1, hjust=1))+ scale_fill_viridis(discrete=TRUE,name="Club")+ labs(x="Race", title="Number of Volunteers by Race/Club", size=12)+ geom_text(aes(label=..count..),stat='count', position=position_dodge(0.9),vjust=-0.2) # Total Hours by Race/Club TH_race_club<-race %>% group_by(race,`club?`) %>% summarize(total_hour = sum(`hours worked since 8/1/20`, na.rm = TRUE)) ggplot(data=TH_race_club,aes(x=reorder(race,-total_hour), y=total_hour,fill=factor(`club?`))) + geom_bar(stat="identity",width=0.7,position=position_dodge())+ theme(axis.text.x = element_text(angle = 30, size=9, vjust=1, hjust=1))+ scale_fill_viridis(discrete=TRUE,name="Club")+ labs(x="Club", title="Total Hours by Race/Gender", size=12)+ geom_text(aes(label=total_hour), vjust=-0.3, size=3.5) # Averaged Hours by Race/Club race_club_count<-race %>% count(race,`club?`) TH_race_club$Average<-TH_race_club$total_hour/race_club_count$n ggplot(data=TH_race_club,aes(x=reorder(race,-Average), y=Average,fill=factor(`club?`))) + geom_bar(stat="identity",width=0.7,position=position_dodge())+ theme(axis.text.x = element_text(angle = 30, size=9, vjust=1, hjust=1))+ scale_fill_viridis(discrete=TRUE,name="Club")+ labs(x="Club", title="Averaged Hours by Race/Club", size=12)+ geom_text(aes(label=round(Average,2)), vjust=-0.3, size=3.5, position=position_jitter(width=0.2,height=0.2))
/hoursbyrace(outliers).R
no_license
srw9rx/redcross_diversityanalysis_codebook
R
false
false
9,063
r
# Descriptive Analysis # Central VA Youth Volunteer Hours # Redcross Internship # Yixin Tang (July 29th, 2021) # Data Source: Red Cross Hour by Race (Calculated by Sophia Walton) # OUTLIERS INFO ANALYSIS # Load packages library(tidyverse) library(ggplot2) library(readxl) library(viridis) # Read in the data race <- read_excel("/Users/yixintang/Redcross/Demographics/redcross_hoursbyrace.xlsx") outliers <-boxplot(race$`hours worked since 8/1/20`, plot=FALSE)$out min(outliers) # Outliers info outliers_info<-data.frame() for (i in 1:length(outliers)){ position<-which(race$`hours worked since 8/1/20`==outliers[i]) x<-race[position,] outliers_info<-rbind(outliers_info,x) } race<-outliers_info nrow(race) ################### ###### RACE ####### ################### # Number of Volunteers by Race ggplot(race, aes(x=forcats::fct_infreq(race),fill=factor(race)))+ geom_bar(stat="count", width=0.7)+ theme(axis.text.x = element_text(angle = 30, size=9, vjust=1, hjust=1))+ scale_fill_viridis(discrete=TRUE,name="Race")+ labs(x="Race", title="Number of Volunteers by Race", size=12)+ geom_text(aes(label=..count..),stat='count', position=position_dodge(0.9),vjust=-0.2) # Reorder geom_bar from high to low when using stat=“count” # https://stackoverflow.com/questions/56599684/reorder-geom-bar-from-high-to-low-when-using-stat-count # Total Hours by Race TH<-race %>% group_by(race) %>% summarize(total_hour = sum(`hours worked since 8/1/20`, na.rm = TRUE)) ggplot(data=TH,aes(x=reorder(race,-total_hour), y=total_hour,fill=factor(race))) + geom_bar(stat="identity",position="dodge")+ theme(axis.text.x = element_text(angle = 30, size=9, vjust=1, hjust=1))+ scale_fill_viridis(discrete=TRUE,name="Race")+ labs(x="Race", title="Total Hours by Race", size=12)+ geom_text(aes(label=total_hour), vjust=-0.3, size=3.5) # Averaged Hours by Race race_count<-race %>% count(race) TH$Average<-TH$total_hour/race_count$n ggplot(data=TH,aes(x=reorder(race,-Average), y=Average,fill=factor(race))) + geom_bar(stat="identity",position="dodge")+ theme(axis.text.x = element_text(angle = 30, size=9, vjust=1, hjust=1))+ scale_fill_viridis(discrete=TRUE,name="Race")+ labs(x="Race", title="Averaged Hours by Race", size=12)+ geom_text(aes(label=round(Average,2)), vjust=-0.3, size=3.5) ################### ##### GENDER ###### ################### # Number of Volunteers by Gender ggplot(race, aes(x=forcats::fct_infreq(gender),fill=factor(gender)))+ geom_bar(stat="count", width=0.5,position=position_dodge())+ theme(axis.text.x = element_text(angle = 30, size=9, vjust=1, hjust=1))+ scale_fill_manual(values=c('red','blue','grey','yellow'),name="Gender")+ labs(x="Race", title="Number of Volunteers by Gender", size=12)+ geom_text(aes(label=..count..),stat='count', position=position_dodge(0.9),vjust=-0.2) # Total Hours by Gender TH_gender<-race %>% group_by(gender) %>% summarize(total_hour = sum(`hours worked since 8/1/20`, na.rm = TRUE)) ggplot(data=TH_gender,aes(x=reorder(gender,-total_hour), y=total_hour,fill=factor(gender))) + geom_bar(stat="identity",width=0.5)+ theme(axis.text.x = element_text(angle = 30, size=9, vjust=1, hjust=1))+ scale_fill_manual(values=c('red','blue','grey','yellow'),name="Gender")+ labs(x="Gender", title="Total Hours by Gender", size=12)+ geom_text(aes(label=total_hour), vjust=-0.3, size=3.5) # Averaged Hours by Gender gender_count<-race %>% count(gender) TH_gender$Average<-TH_gender$total_hour/gender_count$n ggplot(data=TH_gender,aes(x=reorder(gender,-Average), y=Average,fill=factor(gender))) + geom_bar(stat="identity",position="dodge",width=0.5)+ theme(axis.text.x = element_text(angle = 30, size=9, vjust=1, hjust=1))+ scale_fill_manual(values=c('red','blue','grey','yellow'),name="Gender")+ labs(x="Gender", title="Averaged Hours by Gender", size=12)+ geom_text(aes(label=round(Average,2)), vjust=-0.3, size=3.5) # Hispanic: highest, all female (male: potential) # Black: male is higher than female # Asian: female is high but male is lower # Middle Eastern: only female # White: male significantly higher, closed to Hispanic female ######################## ##### RACE/GENDER ###### ######################## png("/Users/yixintang/Redcross/Demographics/Number_of_Volunteers(outliers).png") # Number of Volunteers by Race/Gender ggplot(race, aes(x=forcats::fct_infreq(race),fill=factor(gender)))+ geom_bar(stat="count", width=0.7,position=position_dodge())+ theme(axis.text.x = element_text(angle = 30, size=9, vjust=1, hjust=1))+ scale_fill_manual(values=c('pink','light blue','grey','yellow'),name="Gender")+ labs(x="Race", title="Charlottesville Volunteers \nNumber of Volunteers by Race&Gender (Outliers)", size=12)+ geom_text(aes(label=..count..),stat='count', position=position_dodge(0.9),vjust=-0.3) dev.off() # Total Hours by Race/Gender TH_race_gender<-race %>% group_by(race,gender) %>% summarize(total_hour = sum(`hours worked since 8/1/20`, na.rm = TRUE)) png("/Users/yixintang/Redcross/Demographics/Total_Hour(outliers).png") ggplot(data=TH_race_gender,aes(x=reorder(race,-total_hour), y=total_hour,fill=factor(gender))) + geom_bar(stat="identity",width=0.7,position=position_dodge())+ theme(axis.text.x = element_text(angle = 30, size=9, vjust=1, hjust=1))+ scale_fill_manual(values=c('pink','light blue','grey','yellow'),name="Gender")+ labs(x="Gender", title="Charlottesville Volunteers \nTotal Hours by Race&Gender(Outliers)", size=12)+ geom_text(aes(label=total_hour), vjust=-0.3, size=3.5, position=position_dodge(0.9)) # labels: overlap together # have to fix it: https://stackoverflow.com/questions/40542685/how-to-jitter-remove-overlap-for-geom-text-labels dev.off() ############################# ######## HIGHLIGHT ########## ############################# # Averaged Hours by Race/Gender race_gender_count<-race %>% count(race,gender) TH_race_gender$Average<-TH_race_gender$total_hour/race_gender_count$n png("/Users/yixintang/Redcross/Demographics/Average_Hour(outliers).png") ggplot(data=TH_race_gender,aes(x=reorder(race,-Average), y=Average,fill=factor(gender))) + geom_bar(stat="identity",width=0.7,position=position_dodge())+ theme(axis.text.x = element_text(angle = 30, size=9, vjust=1, hjust=1))+ scale_fill_manual(values=c('pink','light blue','grey','yellow'),name="Gender")+ labs(x="Gender", title="Charlottesville Volunteers \nAveraged Hours by Race&Gender(Outliers)", size=12)+ geom_text(aes(label=round(Average,2)), vjust=-0.3, size=3.5, position=position_dodge(0.9)) dev.off() # Boxplot Hours by Race/Gender ggplot(data=race,aes(x=race,y=`hours worked since 8/1/20`,color=factor(gender)))+ geom_boxplot()+ scale_color_manual(values=c('red','blue','grey','yellow'),name="Gender")+ labs(x="Race", title="Boxplot: Hours by Race/Gender", size=12) ######################## ###### RACE/CLUB ####### ######################## # Number of Volunteers by Race/Club ggplot(race, aes(x=forcats::fct_infreq(race),fill=factor(`club?`)))+ geom_bar(stat="count", width=0.7,position=position_dodge())+ theme(axis.text.x = element_text(angle = 30, size=9, vjust=1, hjust=1))+ scale_fill_viridis(discrete=TRUE,name="Club")+ labs(x="Race", title="Number of Volunteers by Race/Club", size=12)+ geom_text(aes(label=..count..),stat='count', position=position_dodge(0.9),vjust=-0.2) # Total Hours by Race/Club TH_race_club<-race %>% group_by(race,`club?`) %>% summarize(total_hour = sum(`hours worked since 8/1/20`, na.rm = TRUE)) ggplot(data=TH_race_club,aes(x=reorder(race,-total_hour), y=total_hour,fill=factor(`club?`))) + geom_bar(stat="identity",width=0.7,position=position_dodge())+ theme(axis.text.x = element_text(angle = 30, size=9, vjust=1, hjust=1))+ scale_fill_viridis(discrete=TRUE,name="Club")+ labs(x="Club", title="Total Hours by Race/Gender", size=12)+ geom_text(aes(label=total_hour), vjust=-0.3, size=3.5) # Averaged Hours by Race/Club race_club_count<-race %>% count(race,`club?`) TH_race_club$Average<-TH_race_club$total_hour/race_club_count$n ggplot(data=TH_race_club,aes(x=reorder(race,-Average), y=Average,fill=factor(`club?`))) + geom_bar(stat="identity",width=0.7,position=position_dodge())+ theme(axis.text.x = element_text(angle = 30, size=9, vjust=1, hjust=1))+ scale_fill_viridis(discrete=TRUE,name="Club")+ labs(x="Club", title="Averaged Hours by Race/Club", size=12)+ geom_text(aes(label=round(Average,2)), vjust=-0.3, size=3.5, position=position_jitter(width=0.2,height=0.2))
kursname <- "bbSt Eng GS 18/19-1" kursfile <- "bbst_gs_englisch_1819" lab.kurs <- c( "A" = "Berufsbegleitende Studien Englisch Grundschule") lev.kurs <- c( "Berufsbegleitende Studien Englisch Grundschule") l.fragen.gs_englisch <- c( "Das Gesamtkonzept für die bbSt Englisch mit der Aufteilung in die Bereiche Fachdidaktik, Literaturwissenschaft, Linguistik und Sprache ist zielführend.", "Die Inhalte in den Bereichen Fachdidaktik, Literaturwissenschaft und Linguistik sind aufeinander abgestimmt.", "Meine persönlichen Erwartungen an die Ausbildung werplot.spden/wurden erfüllt.") # Variablen bereinigen bbst_gs_englisch_c <- bbst_gs_englisch %>% select(Prüfung, starts_with("TICKED.")) %>% rename_at(vars(starts_with("TICKED.")), funs(str_replace(., "TICKED.", ""))) bbst_gs_englisch_1 <- bbst_1.recode(bbst_gs_englisch_c) ################ #### PLOTS ##### ################ dir.create("4_Ergebnisse/Grafiken/bbst_gs_englisch", showWarnings = F) if (length(lev.kurs) > 1) { kurse <- c("Englisch", lev.kurs) } else { kurse <- "Englisch" } for (i in kurse) { if (i == "Englisch") { file <- paste0("4_Ergebnisse/Grafiken/bbst_gs_englisch/", kursfile , ".pdf") anzahl <- paste0("Teilnehmende: ", nrow(bbst_gs_englisch_c)) kursnr <- kursname write.csv(bbst_gs_englisch_c, paste0("4_Ergebnisse/Tabellen/", kursfile, ".csv"), na = "") if (length(lev.kurs) > 1) { plot.kurs <- bbst_gs_englisch_1 %>% filter(!is.na((kurs)), str_detect(kurs, pattern = i)) %>% select(kurs) %>% ggplot(aes(x = reorder(kurs, desc(kurs)), fill = kurs)) + geom_bar(width = 0.6) + coord_flip() + scale_fill_manual(values = colors.n14) + theme_minimal() + labs(x = NULL, y = "Anzahl", title = "Kurse") + theme(legend.position = "none", plot.title = element_text(hjust = 0, size = 14, face = "bold"), axis.ticks.x = element_blank(), panel.grid.minor.x = element_blank(), panel.grid.major.y = element_blank()) } } else { file <- paste0("4_Ergebnisse/Grafiken/bbst_gs_englisch/", kursfile, " ", str_sub(i, -1, -1), ".pdf") anzahl <- paste0("Teilnehmende: ", nrow(bbst_gs_englisch_1[grepl(i, bbst_gs_englisch_1$kurs), ])) kursnr <- paste0(kursname, " ", str_sub(i, -1, -1)) } plot.geschl <- plot.pie(bbst_gs_englisch_1, i, geschl, T, "Geschlecht") plot.alter <- plot.pie(bbst_gs_englisch_1, i, alter, T, "Alter") plot.fach <- plot.pie(bbst_gs_englisch_1, i, fach, T, "Welche Fachrichtung haben Sie studiert?") plot.erfahr <- plot.bar(bbst_gs_englisch_1, i, erfahr, T, "Wie viele Jahre Berufserfahrung haben Sie in der studierten Fachrichtung?") plot.querein <- plot.bar(bbst_gs_englisch_1, i, querein, T, "Sind Sie bereits vor Ihrem Quereinstieg in Ausbildungsbereichen tätig gewesen \n(z.B. als Trainer/in, im Bereich Nachhilfe, als Chorleitung etc.)?") plot.aufmerk_ausb <- plot.bar(bbst_gs_englisch_1, i, aufmerk_ausb, T, "Wie wurden Sie auf diese Ausbildungsmöglichkeit aufmerksam?") plot.schultyp <- plot.bar(bbst_gs_englisch_1, i, schultyp, T, "An welchem Schultyp sind Sie eingesetzt?") plot.klassen <- plot.bar.klassen(bbst_gs_englisch_c, bbst_gs_englisch_1) plot.stunden <- plot.bar(bbst_gs_englisch_1, i, stunden, T, "Wie viele Stunden eigenständigen Unterricht erteilen Sie pro Woche?") plot.stunden_fach <- plot.bar(bbst_gs_englisch_1, i, stunden_fach, T, "Wie viele Stunden davon unterrichten Sie Ihr anerkanntes Fach?") plot.stunden_stud <- plot.bar(bbst_gs_englisch_1, i, stunden_stud, T, "Wie viele Stunden unterrichten Sie bereits in dem Fach,\nwelches Sie gerade in den berufsbegleitenden Studien belegen?") plot.einsatz <- plot.bar.einsatz(bbst_gs_englisch_c, bbst_gs_englisch_1) plot.leitung <- plot.pie(bbst_gs_englisch_1, i, leitung, T, "Fühlen Sie sich seitens der Schulleitung ausreichend unterstützt?") plot.belast <- plot.bar.div(bbst_gs_englisch_1, i, belast, "Wie hoch ist Ihre gefühlte Belastung durch die berufsbegleit. Studien und Schule?", "völlig in Ordnung", "maximale Belastung") plot.verein <- plot.bar.div(bbst_gs_englisch_1, i, verein, "Wie gelingt Ihnen die Vereinbarkeit von berufsbegleitenden Studien und Familie?", "sehr gut", "ungenügend") plot.quali <- plot.bar.div(bbst_gs_englisch_1, i, quali, "Wie empfanden Sie die Qualität der Beratung im Vorfeld der Ausbildung?", "sehr gut", "ungenügend") plot.fehlt <- plot.bar.fehlt(bbst_gs_englisch_c, bbst_gs_englisch_1) ### plots for batteries # Fragen zu den berufsbegleitenden Studien bbst_gs_englisch_fragen <- batterie.recode(bbst_gs_englisch_c, i, "fragen") names(bbst_gs_englisch_fragen) <- l.fragen.gs_englisch # Fachdidaktik bbst_gs_englisch_fd <- batterie.recode(bbst_gs_englisch_c, i, "fd") names(bbst_gs_englisch_fd) <- items_13 ### Literaturwissenschaft bbst_gs_englisch_lw <- batterie.recode(bbst_gs_englisch_c, i, "lw") names(bbst_gs_englisch_lw) <- items_13 ### Linguistik bbst_gs_englisch_lk <- batterie.recode(bbst_gs_englisch_c, i, "lk") names(bbst_gs_englisch_lk) <- items_13 ### Sprache bbst_gs_englisch_sp <- batterie.recode(bbst_gs_englisch_c, i, "sp") names(bbst_gs_englisch_sp) <- items_13 # Fragen zu den berufsbegleitenden Studien plot.fragen <- plot.likert(bbst_gs_englisch_fragen, "Fragen zu den berufsbegleitenden Studien Englisch \nfür die Grundschule") # Fachdidaktik plot.fd <- plot.likert(bbst_gs_englisch_fd, "Fragen zur Struktur und Ablauf in der Fachdidaktik") # Literaturwissenschaft plot.lw <- plot.likert(bbst_gs_englisch_lw, "Fragen zur Struktur und Ablauf in der Literaturwissenschaft") # Linguistik plot.lk <- plot.likert(bbst_gs_englisch_lk, "Fragen zur Struktur und Ablauf in der Linguistik") # Linguistik plot.sp <- plot.likert(bbst_gs_englisch_sp, "Fragen zur Struktur und Ablauf in der Sprache") ## plots to pdf pdf(file, width = 9, height = 8) plot(0:10, type = "n", xaxt= "n", yaxt= "n", bty= "n", xlab = "", ylab = "") text(5.5, 8, "Evaluation", cex = 2) text(5.5, 7, "Berufsbegleitende Studien") text(5.5, 6, kursnr) text(5.5, 4, anzahl) rasterImage(beberlin, 1, 0, 4.5, 1) rasterImage(steps, 6.5, 0 , 9, 1) if (i == "Englisch" & length(kurse) > 1) { grid.arrange(plot.kurs, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) } grid.arrange(plot.geschl, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.alter, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.fach, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.erfahr, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.querein, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.aufmerk_ausb, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.schultyp, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.klassen, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.stunden, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.stunden_fach, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.stunden_stud, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.einsatz, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.leitung, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.belast, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.verein, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.quali, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.fehlt, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.fragen, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.fd, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.lw, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.lk, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.sp, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) dev.off() }
/3_Analyse/Scripte/bbst_gs_englisch.R
no_license
StefanMunnes/Evaluation_Steps
R
false
false
8,193
r
kursname <- "bbSt Eng GS 18/19-1" kursfile <- "bbst_gs_englisch_1819" lab.kurs <- c( "A" = "Berufsbegleitende Studien Englisch Grundschule") lev.kurs <- c( "Berufsbegleitende Studien Englisch Grundschule") l.fragen.gs_englisch <- c( "Das Gesamtkonzept für die bbSt Englisch mit der Aufteilung in die Bereiche Fachdidaktik, Literaturwissenschaft, Linguistik und Sprache ist zielführend.", "Die Inhalte in den Bereichen Fachdidaktik, Literaturwissenschaft und Linguistik sind aufeinander abgestimmt.", "Meine persönlichen Erwartungen an die Ausbildung werplot.spden/wurden erfüllt.") # Variablen bereinigen bbst_gs_englisch_c <- bbst_gs_englisch %>% select(Prüfung, starts_with("TICKED.")) %>% rename_at(vars(starts_with("TICKED.")), funs(str_replace(., "TICKED.", ""))) bbst_gs_englisch_1 <- bbst_1.recode(bbst_gs_englisch_c) ################ #### PLOTS ##### ################ dir.create("4_Ergebnisse/Grafiken/bbst_gs_englisch", showWarnings = F) if (length(lev.kurs) > 1) { kurse <- c("Englisch", lev.kurs) } else { kurse <- "Englisch" } for (i in kurse) { if (i == "Englisch") { file <- paste0("4_Ergebnisse/Grafiken/bbst_gs_englisch/", kursfile , ".pdf") anzahl <- paste0("Teilnehmende: ", nrow(bbst_gs_englisch_c)) kursnr <- kursname write.csv(bbst_gs_englisch_c, paste0("4_Ergebnisse/Tabellen/", kursfile, ".csv"), na = "") if (length(lev.kurs) > 1) { plot.kurs <- bbst_gs_englisch_1 %>% filter(!is.na((kurs)), str_detect(kurs, pattern = i)) %>% select(kurs) %>% ggplot(aes(x = reorder(kurs, desc(kurs)), fill = kurs)) + geom_bar(width = 0.6) + coord_flip() + scale_fill_manual(values = colors.n14) + theme_minimal() + labs(x = NULL, y = "Anzahl", title = "Kurse") + theme(legend.position = "none", plot.title = element_text(hjust = 0, size = 14, face = "bold"), axis.ticks.x = element_blank(), panel.grid.minor.x = element_blank(), panel.grid.major.y = element_blank()) } } else { file <- paste0("4_Ergebnisse/Grafiken/bbst_gs_englisch/", kursfile, " ", str_sub(i, -1, -1), ".pdf") anzahl <- paste0("Teilnehmende: ", nrow(bbst_gs_englisch_1[grepl(i, bbst_gs_englisch_1$kurs), ])) kursnr <- paste0(kursname, " ", str_sub(i, -1, -1)) } plot.geschl <- plot.pie(bbst_gs_englisch_1, i, geschl, T, "Geschlecht") plot.alter <- plot.pie(bbst_gs_englisch_1, i, alter, T, "Alter") plot.fach <- plot.pie(bbst_gs_englisch_1, i, fach, T, "Welche Fachrichtung haben Sie studiert?") plot.erfahr <- plot.bar(bbst_gs_englisch_1, i, erfahr, T, "Wie viele Jahre Berufserfahrung haben Sie in der studierten Fachrichtung?") plot.querein <- plot.bar(bbst_gs_englisch_1, i, querein, T, "Sind Sie bereits vor Ihrem Quereinstieg in Ausbildungsbereichen tätig gewesen \n(z.B. als Trainer/in, im Bereich Nachhilfe, als Chorleitung etc.)?") plot.aufmerk_ausb <- plot.bar(bbst_gs_englisch_1, i, aufmerk_ausb, T, "Wie wurden Sie auf diese Ausbildungsmöglichkeit aufmerksam?") plot.schultyp <- plot.bar(bbst_gs_englisch_1, i, schultyp, T, "An welchem Schultyp sind Sie eingesetzt?") plot.klassen <- plot.bar.klassen(bbst_gs_englisch_c, bbst_gs_englisch_1) plot.stunden <- plot.bar(bbst_gs_englisch_1, i, stunden, T, "Wie viele Stunden eigenständigen Unterricht erteilen Sie pro Woche?") plot.stunden_fach <- plot.bar(bbst_gs_englisch_1, i, stunden_fach, T, "Wie viele Stunden davon unterrichten Sie Ihr anerkanntes Fach?") plot.stunden_stud <- plot.bar(bbst_gs_englisch_1, i, stunden_stud, T, "Wie viele Stunden unterrichten Sie bereits in dem Fach,\nwelches Sie gerade in den berufsbegleitenden Studien belegen?") plot.einsatz <- plot.bar.einsatz(bbst_gs_englisch_c, bbst_gs_englisch_1) plot.leitung <- plot.pie(bbst_gs_englisch_1, i, leitung, T, "Fühlen Sie sich seitens der Schulleitung ausreichend unterstützt?") plot.belast <- plot.bar.div(bbst_gs_englisch_1, i, belast, "Wie hoch ist Ihre gefühlte Belastung durch die berufsbegleit. Studien und Schule?", "völlig in Ordnung", "maximale Belastung") plot.verein <- plot.bar.div(bbst_gs_englisch_1, i, verein, "Wie gelingt Ihnen die Vereinbarkeit von berufsbegleitenden Studien und Familie?", "sehr gut", "ungenügend") plot.quali <- plot.bar.div(bbst_gs_englisch_1, i, quali, "Wie empfanden Sie die Qualität der Beratung im Vorfeld der Ausbildung?", "sehr gut", "ungenügend") plot.fehlt <- plot.bar.fehlt(bbst_gs_englisch_c, bbst_gs_englisch_1) ### plots for batteries # Fragen zu den berufsbegleitenden Studien bbst_gs_englisch_fragen <- batterie.recode(bbst_gs_englisch_c, i, "fragen") names(bbst_gs_englisch_fragen) <- l.fragen.gs_englisch # Fachdidaktik bbst_gs_englisch_fd <- batterie.recode(bbst_gs_englisch_c, i, "fd") names(bbst_gs_englisch_fd) <- items_13 ### Literaturwissenschaft bbst_gs_englisch_lw <- batterie.recode(bbst_gs_englisch_c, i, "lw") names(bbst_gs_englisch_lw) <- items_13 ### Linguistik bbst_gs_englisch_lk <- batterie.recode(bbst_gs_englisch_c, i, "lk") names(bbst_gs_englisch_lk) <- items_13 ### Sprache bbst_gs_englisch_sp <- batterie.recode(bbst_gs_englisch_c, i, "sp") names(bbst_gs_englisch_sp) <- items_13 # Fragen zu den berufsbegleitenden Studien plot.fragen <- plot.likert(bbst_gs_englisch_fragen, "Fragen zu den berufsbegleitenden Studien Englisch \nfür die Grundschule") # Fachdidaktik plot.fd <- plot.likert(bbst_gs_englisch_fd, "Fragen zur Struktur und Ablauf in der Fachdidaktik") # Literaturwissenschaft plot.lw <- plot.likert(bbst_gs_englisch_lw, "Fragen zur Struktur und Ablauf in der Literaturwissenschaft") # Linguistik plot.lk <- plot.likert(bbst_gs_englisch_lk, "Fragen zur Struktur und Ablauf in der Linguistik") # Linguistik plot.sp <- plot.likert(bbst_gs_englisch_sp, "Fragen zur Struktur und Ablauf in der Sprache") ## plots to pdf pdf(file, width = 9, height = 8) plot(0:10, type = "n", xaxt= "n", yaxt= "n", bty= "n", xlab = "", ylab = "") text(5.5, 8, "Evaluation", cex = 2) text(5.5, 7, "Berufsbegleitende Studien") text(5.5, 6, kursnr) text(5.5, 4, anzahl) rasterImage(beberlin, 1, 0, 4.5, 1) rasterImage(steps, 6.5, 0 , 9, 1) if (i == "Englisch" & length(kurse) > 1) { grid.arrange(plot.kurs, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) } grid.arrange(plot.geschl, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.alter, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.fach, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.erfahr, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.querein, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.aufmerk_ausb, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.schultyp, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.klassen, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.stunden, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.stunden_fach, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.stunden_stud, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.einsatz, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.leitung, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.belast, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.verein, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.quali, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.fehlt, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.fragen, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.fd, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.lw, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.lk, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) grid.arrange(plot.sp, bottom = anzahl, vp=viewport(width=0.9, height=0.9)) dev.off() }
# China dataset cleaning and spade operations # Created by Simone Passarelli 11_18_20 library(tidyverse) library(haven) library(here) library(janitor) china_1 <- read_sas(here("data", "raw" , "China", "nutr1_00.sas7bdat")) china_2 <- read_sas(here("data", "raw" , "China", "nutr2_00.sas7bdat")) china_3 <- read_sas(here("data", "raw" , "China", "nutr3_00.sas7bdat")) # Read in sas files downloaded from https://www.cpc.unc.edu/projects/china/data/datasets china <- read_sas(here("data", "raw" , "China", "nutr3_00.sas7bdat")) %>% clean_names() %>% rename(code = foodcode) %>% filter(any(!is.na(code))) %>% mutate(code = as.numeric(code)) %>% filter(wave == 2011) #the 2011 data used the 2002-2004 FCT # Read in macronutrient file china_macro <- read_sas(here("data", "raw" , "China", "c12diet.sas7bdat")) %>% clean_names() # Read in China food codes translated from Ling food_codes <- readxl::read_xlsx(here("data", "raw" , "China", "Food code_China.xlsx")) %>% clean_names() %>% rename(code = code_2002) %>% filter(any(!is.na(code))) %>% mutate(code = gsub("-", "", code)) %>% mutate(code = as.numeric(code)) # Join in the translated food codes china_merge <- left_join(china, food_codes, by="code")
/scripts/china_prep.R
no_license
cg0lden/subnational_distributions_BFA
R
false
false
1,242
r
# China dataset cleaning and spade operations # Created by Simone Passarelli 11_18_20 library(tidyverse) library(haven) library(here) library(janitor) china_1 <- read_sas(here("data", "raw" , "China", "nutr1_00.sas7bdat")) china_2 <- read_sas(here("data", "raw" , "China", "nutr2_00.sas7bdat")) china_3 <- read_sas(here("data", "raw" , "China", "nutr3_00.sas7bdat")) # Read in sas files downloaded from https://www.cpc.unc.edu/projects/china/data/datasets china <- read_sas(here("data", "raw" , "China", "nutr3_00.sas7bdat")) %>% clean_names() %>% rename(code = foodcode) %>% filter(any(!is.na(code))) %>% mutate(code = as.numeric(code)) %>% filter(wave == 2011) #the 2011 data used the 2002-2004 FCT # Read in macronutrient file china_macro <- read_sas(here("data", "raw" , "China", "c12diet.sas7bdat")) %>% clean_names() # Read in China food codes translated from Ling food_codes <- readxl::read_xlsx(here("data", "raw" , "China", "Food code_China.xlsx")) %>% clean_names() %>% rename(code = code_2002) %>% filter(any(!is.na(code))) %>% mutate(code = gsub("-", "", code)) %>% mutate(code = as.numeric(code)) # Join in the translated food codes china_merge <- left_join(china, food_codes, by="code")
#' @title Frequency map profile #' @description Function to create a frequency profile for a process map. #' @param value The type of frequency value to be used: absolute or relative. #' @export enriched_frequency enriched_frequency <- function(value = c("absolute", "relative"),columnName = NULL) { value <- match.arg(value) attr(value, "perspective") <- "frequency" if (is.null(columnName)) { attr(value, "columnName") <- paste0("frequency_",value) } else { attr(value, "columnName") <- columnName } return(value) }
/R/enriched_frequency.R
no_license
wfeijen/Uitbreiden_ProcessMap
R
false
false
538
r
#' @title Frequency map profile #' @description Function to create a frequency profile for a process map. #' @param value The type of frequency value to be used: absolute or relative. #' @export enriched_frequency enriched_frequency <- function(value = c("absolute", "relative"),columnName = NULL) { value <- match.arg(value) attr(value, "perspective") <- "frequency" if (is.null(columnName)) { attr(value, "columnName") <- paste0("frequency_",value) } else { attr(value, "columnName") <- columnName } return(value) }
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -9.38885367242723e-297, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 )) result <- do.call(DLMtool::LBSPRgen,testlist) str(result)
/DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615835757-test.R
no_license
akhikolla/updatedatatype-list2
R
false
false
2,048
r
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -9.38885367242723e-297, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 )) result <- do.call(DLMtool::LBSPRgen,testlist) str(result)
#### # Raw data - Care Seeking Behavior and Treatment # # We model the treatment cascade first as the probability of symptoms appearing followed by the probability of following through with seeking treatment. # # Both of these quantities are modeled using the symptoms and treatment reported in the MIS data. # # October 2, 2019 # #### library(here) # Load in the raw data, where people report their treatment seeking behavior and symptoms raw.2015.2017.data <- fread(here("data/raw/2015-2017_survey_data/summaries.csv")) care.fever <- raw.2015.2017.data[,.(areaId, n, pf, pffv, pftto)] # Load in cleaned data, which has more accurate ad2 sorting clean.data <- fread(here("data/clean/aggregated_2015_2018_travel_data.csv")) # travel distance data set, we'll use this for the distance to malabo: reg.travel.dist <- fread(here("data/clean/travel_dist_by_region.csv")) # merge data before fitting care.fever <- merge(clean.data[year == 2015, .(areaId, ad2, pop)], care.fever, by = "areaId") care.fever <- merge(care.fever, reg.travel.dist[,.(areaId, dist.mal)]) care.fever[is.na(n)]$n <- 0 care.fever[is.na(pf)]$pf <- 0 care.fever[is.na(pffv)]$pffv <- 0 care.fever[is.na(pftto)]$pftto <- 0 # Overall, aggregated across the island, the rate of fevers reported | Pf positive # 0.1116336 sum(care.fever$pffv)/sum(care.fever$pf) # Overall, aggregated across the island, the rate of treatment reported | fever # 0.6025641 sum(care.fever$pftto)/sum(care.fever$pffv) # probability of reporting fever, given that one had reported an infection # perform binomial model fit, similar to how we fit the travel frequency model (Travel_frequency_model.R) h <- glm(cbind(pffv, pf - pffv) ~ pop + dist.mal, data = care.fever, family = binomial(link = logit)) # aic = 483.4 # the model that used ad2 as an indicator covariate made it so that Ureka was a very weird outlier, with probability of fever = 0 # taking distance from malabo into account, it appears that the farther away one is the more likely one will have symptoms care.fever$prob.fever <- predict(h, data = care.fever, type = "response") ## some summary statistics # hist(care.fever$prob.fever) # summary(care.fever$prob.fever) # mean = 0.1170 ## compare to the results from # sum(care.fever$pffv)/sum(care.fever$pf) # 0.1116336 # probability of reporting seeking treatment, given that one had a reported symptoms h <- glm(cbind(pftto, pffv - pftto) ~ pop + dist.mal, data = care.fever, family = binomial(link = logit)) care.fever$prob.treatment <- predict(h, data = care.fever, type = "response") ## summary statistics hist(care.fever$prob.treatment) summary(care.fever$prob.treatment) ## mean: 0.5744 ## compare to: #sum(care.fever$pftto)/sum(care.fever$pffv) ## 0.602 fwrite(care.fever, here("data/clean/Care_Seeking_Model_estimates.csv")) # Of course, the problem is actually that for this particular simulation application # we will have to use a single number for fever + care seeking behavior. # Yes, these numbers very likely vary across the island, but in this case # the macro.pfsi model only accommodates a single value.
/scripts/Care_Seeking_Model.R
no_license
dd-harp/2019_ASTMH_Bioko_Simulations
R
false
false
3,126
r
#### # Raw data - Care Seeking Behavior and Treatment # # We model the treatment cascade first as the probability of symptoms appearing followed by the probability of following through with seeking treatment. # # Both of these quantities are modeled using the symptoms and treatment reported in the MIS data. # # October 2, 2019 # #### library(here) # Load in the raw data, where people report their treatment seeking behavior and symptoms raw.2015.2017.data <- fread(here("data/raw/2015-2017_survey_data/summaries.csv")) care.fever <- raw.2015.2017.data[,.(areaId, n, pf, pffv, pftto)] # Load in cleaned data, which has more accurate ad2 sorting clean.data <- fread(here("data/clean/aggregated_2015_2018_travel_data.csv")) # travel distance data set, we'll use this for the distance to malabo: reg.travel.dist <- fread(here("data/clean/travel_dist_by_region.csv")) # merge data before fitting care.fever <- merge(clean.data[year == 2015, .(areaId, ad2, pop)], care.fever, by = "areaId") care.fever <- merge(care.fever, reg.travel.dist[,.(areaId, dist.mal)]) care.fever[is.na(n)]$n <- 0 care.fever[is.na(pf)]$pf <- 0 care.fever[is.na(pffv)]$pffv <- 0 care.fever[is.na(pftto)]$pftto <- 0 # Overall, aggregated across the island, the rate of fevers reported | Pf positive # 0.1116336 sum(care.fever$pffv)/sum(care.fever$pf) # Overall, aggregated across the island, the rate of treatment reported | fever # 0.6025641 sum(care.fever$pftto)/sum(care.fever$pffv) # probability of reporting fever, given that one had reported an infection # perform binomial model fit, similar to how we fit the travel frequency model (Travel_frequency_model.R) h <- glm(cbind(pffv, pf - pffv) ~ pop + dist.mal, data = care.fever, family = binomial(link = logit)) # aic = 483.4 # the model that used ad2 as an indicator covariate made it so that Ureka was a very weird outlier, with probability of fever = 0 # taking distance from malabo into account, it appears that the farther away one is the more likely one will have symptoms care.fever$prob.fever <- predict(h, data = care.fever, type = "response") ## some summary statistics # hist(care.fever$prob.fever) # summary(care.fever$prob.fever) # mean = 0.1170 ## compare to the results from # sum(care.fever$pffv)/sum(care.fever$pf) # 0.1116336 # probability of reporting seeking treatment, given that one had a reported symptoms h <- glm(cbind(pftto, pffv - pftto) ~ pop + dist.mal, data = care.fever, family = binomial(link = logit)) care.fever$prob.treatment <- predict(h, data = care.fever, type = "response") ## summary statistics hist(care.fever$prob.treatment) summary(care.fever$prob.treatment) ## mean: 0.5744 ## compare to: #sum(care.fever$pftto)/sum(care.fever$pffv) ## 0.602 fwrite(care.fever, here("data/clean/Care_Seeking_Model_estimates.csv")) # Of course, the problem is actually that for this particular simulation application # we will have to use a single number for fever + care seeking behavior. # Yes, these numbers very likely vary across the island, but in this case # the macro.pfsi model only accommodates a single value.
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/wrapper.R \name{downloadMultipleObjects} \alias{downloadMultipleObjects} \title{downloadMultipleObjects} \usage{ downloadMultipleObjects(bucketname, src, dest = ".", pattern = NULL, resume = TRUE, split = 5, method = "aria2", quiet = TRUE, ..., .progressbar = TRUE, .parallel = TRUE) } \arguments{ \item{src}{The objects to be downloaded. Ended with "/" means the whold "folder" will be download.} \item{dest}{Local destination to save the files.} \item{pattern}{Filter which files to be uploaded.} \item{resume}{Auto resume from last failed download or not.} \item{split}{How many download progress at the same time.} \item{method}{Same argument in download.file. Supports aria2 if installed.} \item{quiet}{Suppress status messages or not.} \item{...}{Arguments pass to downloadObject.} \item{.progressbar}{Show progress bar or not.} \item{.parallel}{Parallel multiple upload or not. When False, split will be disable too.} } \value{ named status_codes indicates failed or success. } \description{ downloadMultipleObjects } \examples{ downloadMultipleObjects('ross-test', 'test/tmp') r<-downloadMultipleObjects('ross-test', 'test/tmp/cache2/') r<-downloadMultipleObjects('ross-test', 'test', '~/asdf') r<-downloadMultipleObjects('ross-test', 'test', '~/asdf', resume=F, .parallel = F) r<-downloadMultipleObjects('ross-test', 'test', '/Volumes/RamDisk/asdf', pattern="tmp", quiet = F, split=10, .progressbar = F) }
/man/downloadMultipleObjects.Rd
permissive
gahoo/ross
R
false
true
1,506
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/wrapper.R \name{downloadMultipleObjects} \alias{downloadMultipleObjects} \title{downloadMultipleObjects} \usage{ downloadMultipleObjects(bucketname, src, dest = ".", pattern = NULL, resume = TRUE, split = 5, method = "aria2", quiet = TRUE, ..., .progressbar = TRUE, .parallel = TRUE) } \arguments{ \item{src}{The objects to be downloaded. Ended with "/" means the whold "folder" will be download.} \item{dest}{Local destination to save the files.} \item{pattern}{Filter which files to be uploaded.} \item{resume}{Auto resume from last failed download or not.} \item{split}{How many download progress at the same time.} \item{method}{Same argument in download.file. Supports aria2 if installed.} \item{quiet}{Suppress status messages or not.} \item{...}{Arguments pass to downloadObject.} \item{.progressbar}{Show progress bar or not.} \item{.parallel}{Parallel multiple upload or not. When False, split will be disable too.} } \value{ named status_codes indicates failed or success. } \description{ downloadMultipleObjects } \examples{ downloadMultipleObjects('ross-test', 'test/tmp') r<-downloadMultipleObjects('ross-test', 'test/tmp/cache2/') r<-downloadMultipleObjects('ross-test', 'test', '~/asdf') r<-downloadMultipleObjects('ross-test', 'test', '~/asdf', resume=F, .parallel = F) r<-downloadMultipleObjects('ross-test', 'test', '/Volumes/RamDisk/asdf', pattern="tmp", quiet = F, split=10, .progressbar = F) }
#' Edge Code #' #' This function allows you to classify individual pitches based on the various categories from the Edge% metric. The dataframe passed to the function must include the batter's handedness, the px and pz coordinates from the PITCHf/x system, and the batter's height. #' @param df A dataframe that, at a minimum, includes the following columns: batter height (b_height), the batter's handedness (stand), vertical location of the pitch (pz), and then horizontal location of the pitch (pz) #' @param height_var_name The name of the variable in the dataset that includes the batter's height. Defaults to b_height which assumes an height + inch format. If the variable name is "Height" it assumes the variable is already converted to inches (as is the case in some databases) #' @keywords MLB, sabermetrics, PITCHf/x #' @export #' @examples \dontrun{edge_code(df)} edge_code <- function(df, height_var_name = "b_height") { if (height_var_name == "b_height") { if (class(df$px) == "factor") {df$px <- as.numeric(levels(df$px))[df$px]} if (class(df$pz) == "factor") {df$pz <- as.numeric(levels(df$pz))[df$pz]} if (class(df$b_height) == "factor") {df$b_height <- as.numeric(levels(df$b_height))[df$b_height]} f <- as.numeric(lapply(strsplit(df$b_height, "-"), function(x) x[1])) * 12 i <- as.numeric(lapply(strsplit(df$b_height, "-"), function(x) x[2])) df$b_height_inch <- f+i df$called_pitch <- ifelse(grepl("Called|Ball", df$des2), 1, 0) df$called_strike <- ifelse(grepl("Called", df$des2), 1, 0) df$swing <- ifelse(grepl("Swinging|Foul|In play", df$des2), 1, 0) df$whiff <- ifelse(grepl("Swinging", df$des2), 1, 0) LHH <- filter(df, stand == "L") RHH <- filter(df, stand == "R") LHH$location <- with(LHH, ifelse(!is.na(px) & !is.na(pz) & px > .21 & px < .81 & pz > (.35 + b_height_inch/12 *.229) & pz < (2.0 + b_height_inch/12 *.229), "Inside Edge", ifelse(!is.na(px) & !is.na(pz) & px > -1.20 & px < -0.9 & pz > (.35 + b_height_inch/12 *.229) & pz < (2.0 + b_height_inch/12 *.229), "Outside Edge", ifelse(!is.na(px) & !is.na(pz) & px >= -0.9 & px <= .21 & pz > (1.7 + b_height_inch/12 *.229) & pz < (2.0 + b_height_inch/12 *.229), "Upper Edge", ifelse(!is.na(px) & !is.na(pz) & px >= -0.9 & px <= .21 & pz > (.35 + b_height_inch/12 *.229) & pz < (.65 + b_height_inch/12 *.229), "Lower Edge", ifelse(!is.na(px) & !is.na(pz) & px >= -0.9 & px <= .21 & pz >= (.65 + b_height_inch/12 *.229) & pz <= (1.7 + b_height_inch/12 *.229), "Heart", ifelse(is.na(px) | is.na(pz), NA, "Out of Zone"))))))) RHH$location <- with(RHH, ifelse(!is.na(px) & !is.na(pz) & px > -1.03 & px < -.43 & pz > (.92 + b_height_inch/12 *.136) & pz < (2.6 + b_height_inch/12 *.136), "Inside Edge", ifelse(!is.na(px) & !is.na(pz) & px > .7 & px < 1.00 & pz > (.92 + b_height_inch/12 *.136) & pz < (2.6 + b_height_inch/12 *.136), "Outside Edge", ifelse(!is.na(px) & !is.na(pz) & px >= -.43 & px <= .70 & pz > (2.3 + b_height_inch/12 *.136) & pz < (2.6 + b_height_inch/12 *.136), "Upper Edge", ifelse(!is.na(px) & !is.na(pz) & px >= -.43 & px <= .70 & pz > (.92 + b_height_inch/12 *.136) & pz < (1.22 + b_height_inch/12 *.136), "Lower Edge", ifelse(!is.na(px) & !is.na(pz) & px >= -.43 & px <= .70 & pz >= (1.22 + b_height_inch/12 *.136) & pz <= (2.30 + b_height_inch/12 *.136), "Heart", ifelse(is.na(px) | is.na(pz), NA, "Out of Zone"))))))) df_combined <- rbind(LHH, RHH) df_combined$Upper_Edge <- with(df_combined, ifelse(location == "Upper Edge", 1, 0)) df_combined$Lower_Edge <- with(df_combined, ifelse(location == "Lower Edge", 1, 0)) df_combined$Inside_Edge <- with(df_combined, ifelse(location == "Inside Edge", 1, 0)) df_combined$Outside_Edge <- with(df_combined, ifelse(location == "Outside Edge", 1, 0)) df_combined$Heart <- with(df_combined, ifelse(location == "Heart", 1, 0)) df_combined$OutOfZone <- with(df_combined, ifelse(location == "Out of Zone", 1, 0)) df_combined } else { if (class(df$px) == "factor") {df$px <- as.numeric(levels(df$px))[df$px]} if (class(df$pz) == "factor") {df$pz <- as.numeric(levels(df$pz))[df$pz]} df$b_height_inch <- df$Height df$called_pitch <- ifelse(grepl("Called|Ball", df$description), 1, 0) df$called_strike <- ifelse(grepl("Called", df$description), 1, 0) df$swing <- ifelse(grepl("Swinging|Foul|In play", df$description), 1, 0) df$whiff <- ifelse(grepl("Swinging", df$description), 1, 0) LHH <- filter(df, stand == "L") RHH <- filter(df, stand == "R") LHH$location <- with(LHH, ifelse(!is.na(px) & !is.na(pz) & px > .21 & px < .81 & pz > (.35 + b_height_inch/12 *.229) & pz < (2.0 + b_height_inch/12 *.229), "Inside Edge", ifelse(!is.na(px) & !is.na(pz) & px > -1.20 & px < -0.9 & pz > (.35 + b_height_inch/12 *.229) & pz < (2.0 + b_height_inch/12 *.229), "Outside Edge", ifelse(!is.na(px) & !is.na(pz) & px >= -0.9 & px <= .21 & pz > (1.7 + b_height_inch/12 *.229) & pz < (2.0 + b_height_inch/12 *.229), "Upper Edge", ifelse(!is.na(px) & !is.na(pz) & px >= -0.9 & px <= .21 & pz > (.35 + b_height_inch/12 *.229) & pz < (.65 + b_height_inch/12 *.229), "Lower Edge", ifelse(!is.na(px) & !is.na(pz) & px >= -0.9 & px <= .21 & pz >= (.65 + b_height_inch/12 *.229) & pz <= (1.7 + b_height_inch/12 *.229), "Heart", ifelse(is.na(px) | is.na(pz), NA, "Out of Zone"))))))) RHH$location <- with(RHH, ifelse(!is.na(px) & !is.na(pz) & px > -1.03 & px < -.43 & pz > (.92 + b_height_inch/12 *.136) & pz < (2.6 + b_height_inch/12 *.136), "Inside Edge", ifelse(!is.na(px) & !is.na(pz) & px > .7 & px < 1.00 & pz > (.92 + b_height_inch/12 *.136) & pz < (2.6 + b_height_inch/12 *.136), "Outside Edge", ifelse(!is.na(px) & !is.na(pz) & px >= -.43 & px <= .70 & pz > (2.3 + b_height_inch/12 *.136) & pz < (2.6 + b_height_inch/12 *.136), "Upper Edge", ifelse(!is.na(px) & !is.na(pz) & px >= -.43 & px <= .70 & pz > (.92 + b_height_inch/12 *.136) & pz < (1.22 + b_height_inch/12 *.136), "Lower Edge", ifelse(!is.na(px) & !is.na(pz) & px >= -.43 & px <= .70 & pz >= (1.22 + b_height_inch/12 *.136) & pz <= (2.30 + b_height_inch/12 *.136), "Heart", ifelse(is.na(px) | is.na(pz), NA, "Out of Zone"))))))) df_combined <- rbind(LHH, RHH) df_combined$Upper_Edge <- with(df_combined, ifelse(location == "Upper Edge", 1, 0)) df_combined$Lower_Edge <- with(df_combined, ifelse(location == "Lower Edge", 1, 0)) df_combined$Inside_Edge <- with(df_combined, ifelse(location == "Inside Edge", 1, 0)) df_combined$Outside_Edge <- with(df_combined, ifelse(location == "Outside Edge", 1, 0)) df_combined$Heart <- with(df_combined, ifelse(location == "Heart", 1, 0)) df_combined$OutOfZone <- with(df_combined, ifelse(location == "Out of Zone", 1, 0)) df_combined } }
/R/edge_code.R
no_license
keberwein/baseballr
R
false
false
6,735
r
#' Edge Code #' #' This function allows you to classify individual pitches based on the various categories from the Edge% metric. The dataframe passed to the function must include the batter's handedness, the px and pz coordinates from the PITCHf/x system, and the batter's height. #' @param df A dataframe that, at a minimum, includes the following columns: batter height (b_height), the batter's handedness (stand), vertical location of the pitch (pz), and then horizontal location of the pitch (pz) #' @param height_var_name The name of the variable in the dataset that includes the batter's height. Defaults to b_height which assumes an height + inch format. If the variable name is "Height" it assumes the variable is already converted to inches (as is the case in some databases) #' @keywords MLB, sabermetrics, PITCHf/x #' @export #' @examples \dontrun{edge_code(df)} edge_code <- function(df, height_var_name = "b_height") { if (height_var_name == "b_height") { if (class(df$px) == "factor") {df$px <- as.numeric(levels(df$px))[df$px]} if (class(df$pz) == "factor") {df$pz <- as.numeric(levels(df$pz))[df$pz]} if (class(df$b_height) == "factor") {df$b_height <- as.numeric(levels(df$b_height))[df$b_height]} f <- as.numeric(lapply(strsplit(df$b_height, "-"), function(x) x[1])) * 12 i <- as.numeric(lapply(strsplit(df$b_height, "-"), function(x) x[2])) df$b_height_inch <- f+i df$called_pitch <- ifelse(grepl("Called|Ball", df$des2), 1, 0) df$called_strike <- ifelse(grepl("Called", df$des2), 1, 0) df$swing <- ifelse(grepl("Swinging|Foul|In play", df$des2), 1, 0) df$whiff <- ifelse(grepl("Swinging", df$des2), 1, 0) LHH <- filter(df, stand == "L") RHH <- filter(df, stand == "R") LHH$location <- with(LHH, ifelse(!is.na(px) & !is.na(pz) & px > .21 & px < .81 & pz > (.35 + b_height_inch/12 *.229) & pz < (2.0 + b_height_inch/12 *.229), "Inside Edge", ifelse(!is.na(px) & !is.na(pz) & px > -1.20 & px < -0.9 & pz > (.35 + b_height_inch/12 *.229) & pz < (2.0 + b_height_inch/12 *.229), "Outside Edge", ifelse(!is.na(px) & !is.na(pz) & px >= -0.9 & px <= .21 & pz > (1.7 + b_height_inch/12 *.229) & pz < (2.0 + b_height_inch/12 *.229), "Upper Edge", ifelse(!is.na(px) & !is.na(pz) & px >= -0.9 & px <= .21 & pz > (.35 + b_height_inch/12 *.229) & pz < (.65 + b_height_inch/12 *.229), "Lower Edge", ifelse(!is.na(px) & !is.na(pz) & px >= -0.9 & px <= .21 & pz >= (.65 + b_height_inch/12 *.229) & pz <= (1.7 + b_height_inch/12 *.229), "Heart", ifelse(is.na(px) | is.na(pz), NA, "Out of Zone"))))))) RHH$location <- with(RHH, ifelse(!is.na(px) & !is.na(pz) & px > -1.03 & px < -.43 & pz > (.92 + b_height_inch/12 *.136) & pz < (2.6 + b_height_inch/12 *.136), "Inside Edge", ifelse(!is.na(px) & !is.na(pz) & px > .7 & px < 1.00 & pz > (.92 + b_height_inch/12 *.136) & pz < (2.6 + b_height_inch/12 *.136), "Outside Edge", ifelse(!is.na(px) & !is.na(pz) & px >= -.43 & px <= .70 & pz > (2.3 + b_height_inch/12 *.136) & pz < (2.6 + b_height_inch/12 *.136), "Upper Edge", ifelse(!is.na(px) & !is.na(pz) & px >= -.43 & px <= .70 & pz > (.92 + b_height_inch/12 *.136) & pz < (1.22 + b_height_inch/12 *.136), "Lower Edge", ifelse(!is.na(px) & !is.na(pz) & px >= -.43 & px <= .70 & pz >= (1.22 + b_height_inch/12 *.136) & pz <= (2.30 + b_height_inch/12 *.136), "Heart", ifelse(is.na(px) | is.na(pz), NA, "Out of Zone"))))))) df_combined <- rbind(LHH, RHH) df_combined$Upper_Edge <- with(df_combined, ifelse(location == "Upper Edge", 1, 0)) df_combined$Lower_Edge <- with(df_combined, ifelse(location == "Lower Edge", 1, 0)) df_combined$Inside_Edge <- with(df_combined, ifelse(location == "Inside Edge", 1, 0)) df_combined$Outside_Edge <- with(df_combined, ifelse(location == "Outside Edge", 1, 0)) df_combined$Heart <- with(df_combined, ifelse(location == "Heart", 1, 0)) df_combined$OutOfZone <- with(df_combined, ifelse(location == "Out of Zone", 1, 0)) df_combined } else { if (class(df$px) == "factor") {df$px <- as.numeric(levels(df$px))[df$px]} if (class(df$pz) == "factor") {df$pz <- as.numeric(levels(df$pz))[df$pz]} df$b_height_inch <- df$Height df$called_pitch <- ifelse(grepl("Called|Ball", df$description), 1, 0) df$called_strike <- ifelse(grepl("Called", df$description), 1, 0) df$swing <- ifelse(grepl("Swinging|Foul|In play", df$description), 1, 0) df$whiff <- ifelse(grepl("Swinging", df$description), 1, 0) LHH <- filter(df, stand == "L") RHH <- filter(df, stand == "R") LHH$location <- with(LHH, ifelse(!is.na(px) & !is.na(pz) & px > .21 & px < .81 & pz > (.35 + b_height_inch/12 *.229) & pz < (2.0 + b_height_inch/12 *.229), "Inside Edge", ifelse(!is.na(px) & !is.na(pz) & px > -1.20 & px < -0.9 & pz > (.35 + b_height_inch/12 *.229) & pz < (2.0 + b_height_inch/12 *.229), "Outside Edge", ifelse(!is.na(px) & !is.na(pz) & px >= -0.9 & px <= .21 & pz > (1.7 + b_height_inch/12 *.229) & pz < (2.0 + b_height_inch/12 *.229), "Upper Edge", ifelse(!is.na(px) & !is.na(pz) & px >= -0.9 & px <= .21 & pz > (.35 + b_height_inch/12 *.229) & pz < (.65 + b_height_inch/12 *.229), "Lower Edge", ifelse(!is.na(px) & !is.na(pz) & px >= -0.9 & px <= .21 & pz >= (.65 + b_height_inch/12 *.229) & pz <= (1.7 + b_height_inch/12 *.229), "Heart", ifelse(is.na(px) | is.na(pz), NA, "Out of Zone"))))))) RHH$location <- with(RHH, ifelse(!is.na(px) & !is.na(pz) & px > -1.03 & px < -.43 & pz > (.92 + b_height_inch/12 *.136) & pz < (2.6 + b_height_inch/12 *.136), "Inside Edge", ifelse(!is.na(px) & !is.na(pz) & px > .7 & px < 1.00 & pz > (.92 + b_height_inch/12 *.136) & pz < (2.6 + b_height_inch/12 *.136), "Outside Edge", ifelse(!is.na(px) & !is.na(pz) & px >= -.43 & px <= .70 & pz > (2.3 + b_height_inch/12 *.136) & pz < (2.6 + b_height_inch/12 *.136), "Upper Edge", ifelse(!is.na(px) & !is.na(pz) & px >= -.43 & px <= .70 & pz > (.92 + b_height_inch/12 *.136) & pz < (1.22 + b_height_inch/12 *.136), "Lower Edge", ifelse(!is.na(px) & !is.na(pz) & px >= -.43 & px <= .70 & pz >= (1.22 + b_height_inch/12 *.136) & pz <= (2.30 + b_height_inch/12 *.136), "Heart", ifelse(is.na(px) | is.na(pz), NA, "Out of Zone"))))))) df_combined <- rbind(LHH, RHH) df_combined$Upper_Edge <- with(df_combined, ifelse(location == "Upper Edge", 1, 0)) df_combined$Lower_Edge <- with(df_combined, ifelse(location == "Lower Edge", 1, 0)) df_combined$Inside_Edge <- with(df_combined, ifelse(location == "Inside Edge", 1, 0)) df_combined$Outside_Edge <- with(df_combined, ifelse(location == "Outside Edge", 1, 0)) df_combined$Heart <- with(df_combined, ifelse(location == "Heart", 1, 0)) df_combined$OutOfZone <- with(df_combined, ifelse(location == "Out of Zone", 1, 0)) df_combined } }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/CCAMLRGIS_DataDescription.R \docType{data} \name{Coast} \alias{Coast} \title{Coast} \format{ SpatialPolygonsDataFrame } \usage{ data(Coast) } \description{ Coastline polygons generated from \link{load_Coastline} and sub-sampled to only contain data that falls within the CCAMLR boundaries. This spatial object may be subsetted to plot the coastline for selected ASDs or EEZs. Source: \url{http://gis.ccamlr.org/} } \examples{ #Complete coastline: plot(Coast[Coast$ID=='All',],col='grey') #ASD 48.1 coastline: plot(Coast[Coast$ID=='48.1',],col='grey') } \seealso{ \code{\link{Clip2Coast}}. }
/man/Coast.Rd
no_license
rsbivand/CCAMLRGIS
R
false
true
670
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/CCAMLRGIS_DataDescription.R \docType{data} \name{Coast} \alias{Coast} \title{Coast} \format{ SpatialPolygonsDataFrame } \usage{ data(Coast) } \description{ Coastline polygons generated from \link{load_Coastline} and sub-sampled to only contain data that falls within the CCAMLR boundaries. This spatial object may be subsetted to plot the coastline for selected ASDs or EEZs. Source: \url{http://gis.ccamlr.org/} } \examples{ #Complete coastline: plot(Coast[Coast$ID=='All',],col='grey') #ASD 48.1 coastline: plot(Coast[Coast$ID=='48.1',],col='grey') } \seealso{ \code{\link{Clip2Coast}}. }
##-------- Load the .txt as Data Frame ------------------- ## data <- read.table("./household_power_consumption.txt", sep=";", header=TRUE, na.strings = "?") ##-------- Format the Date ------------------------------- ## data$Date <- format(strptime(as.character(data$Date), "%d/%m/%Y"), "%Y-%m-%d") ##-------- Subset the required data ---------------------- ## dataset <- subset(data, data$Date=="2007-02-01" | data$Date == "2007-02-02", ) ##-------- Plot and Save PNG Grap N2 ---------------------- ## jpeg(filename ="./plot2.png", width=480, height=480, unit="px") fulltime <- paste(dataset$Date, dataset$Time, sep=" ") time <- strptime(as.character(fulltime), "%Y-%m-%d %H:%M:%S") dataset <- cbind(dataset, time) plot(y = dataset$Global_active_power, x= dataset$time , type="l", xlab="", ylab="Global Active Power (kilowatts)") dev.off() ##-------- END -------------------------------------------- ##
/plot2.R
no_license
chris-villarroel/Assignment-N3
R
false
false
916
r
##-------- Load the .txt as Data Frame ------------------- ## data <- read.table("./household_power_consumption.txt", sep=";", header=TRUE, na.strings = "?") ##-------- Format the Date ------------------------------- ## data$Date <- format(strptime(as.character(data$Date), "%d/%m/%Y"), "%Y-%m-%d") ##-------- Subset the required data ---------------------- ## dataset <- subset(data, data$Date=="2007-02-01" | data$Date == "2007-02-02", ) ##-------- Plot and Save PNG Grap N2 ---------------------- ## jpeg(filename ="./plot2.png", width=480, height=480, unit="px") fulltime <- paste(dataset$Date, dataset$Time, sep=" ") time <- strptime(as.character(fulltime), "%Y-%m-%d %H:%M:%S") dataset <- cbind(dataset, time) plot(y = dataset$Global_active_power, x= dataset$time , type="l", xlab="", ylab="Global Active Power (kilowatts)") dev.off() ##-------- END -------------------------------------------- ##
library(tidyverse) library(ncdf4) library(lubridate) #edit this path #TODO: get this from pecan.xml outdir_name <- "/data/tests/ed2_transect_WB/" #TODO: refactor this to parallelize as it is quite slow # Also, shouldn't use .h5 files since we already summarized into .nc files, right? # If .h5 are necessary, can I use `stars` to open them as a raster stack or something? #Function to read in .nc files for a single ensemble and a single year as a data frame ens_nc_to_df <- function(ens_file) { year_file <- ncdf4::nc_open(ens_file, readunlim = FALSE) npp <- tibble(pft = ncdf4::ncvar_get(year_file, "PFT"), npp = ncdf4::ncvar_get(year_file, "NPP"), time = ncdf4::ncvar_get(year_file, "time")) #convert time variable to ymd hms time_units <- ncatt_get(year_file, "time")$units ncdf4::nc_close(year_file) npp <- npp %>% mutate(time = ymd_hms(time_units) + minutes(time*24*60)) } dirs <- list.dirs(paste0(outdir_name, "out"), recursive = FALSE) ensembles_npps <- # for all ensembles... map_df(dirs, ~{ # get a list of filepaths, one .nc file per year ens_filepaths <- Sys.glob(file.path(.x, "*.nc")) # extract the ensemble number ens_num <- str_remove(word(.x, -2, sep = "-"), "^0+") npp <- ens_filepaths %>% # apply the wrangling function to all years of .nc files map_df(ens_nc_to_df) %>% # wrangle dates into useful format mutate(year = year(time), month = month(time)) %>% # summarize monthly group_by(pft, year, month) %>% summarize(npp = sum(npp), .groups = "drop") %>% #re-build date as last day of the month since NPP is summarized as cumulative for the month mutate(date = ceiling_date(make_date(year, month, 01), "month") - 1) %>% #add the ensemble number add_column(ensemble = ens_num, .before = "pft") } ) ggplot(ensembles_npps, aes(x = date, y = npp, group = ensemble, alpha = 0.2)) + geom_line() + facet_grid(~pft) npp_summary <- ensembles_npps %>% mutate(date = as.POSIXct(as.Date(paste0(date, "-01"))), pft = case_when(pft == 1 ~ "Setaria", pft == 5 ~ "C3 grass", pft == 9 ~ "Hardwood trees"), pft = as.factor(pft)) %>% group_by(patch, pft, date) %>% summarize(mean = mean(pft_npp, na.rm = TRUE), median = median(pft_npp, na.rm = TRUE), sd = sd(pft_npp, na.rm = TRUE), lcl_50 = quantile(pft_npp, probs = c(0.25), na.rm = TRUE, names = FALSE), ucl_50 = quantile(pft_npp, probs = c(0.75), na.rm = TRUE, names = FALSE), lcl_95 = quantile(pft_npp, probs = c(0.025), na.rm = TRUE, names = FALSE), ucl_95 = quantile(pft_npp, probs = c(0.975), na.rm = TRUE, names = FALSE)) %>% rename(Species = pft) ggplot(data = npp_summary) + geom_line(aes(x = date, y = median, color = Species)) + geom_ribbon(aes(x = date, ymin = lcl_50, ymax = ucl_50, fill = Species), alpha = 0.4) + #facet_grid(rows = vars(patch)) + scale_x_datetime(labels = scales::date_format("%Y")) + xlab("Year") + ylab("NPP (kgC/m2/yr)") + theme_classic()
/ED2/transect_runs/test/run/plot.R
no_license
KristinaRiemer/model-vignettes
R
false
false
3,281
r
library(tidyverse) library(ncdf4) library(lubridate) #edit this path #TODO: get this from pecan.xml outdir_name <- "/data/tests/ed2_transect_WB/" #TODO: refactor this to parallelize as it is quite slow # Also, shouldn't use .h5 files since we already summarized into .nc files, right? # If .h5 are necessary, can I use `stars` to open them as a raster stack or something? #Function to read in .nc files for a single ensemble and a single year as a data frame ens_nc_to_df <- function(ens_file) { year_file <- ncdf4::nc_open(ens_file, readunlim = FALSE) npp <- tibble(pft = ncdf4::ncvar_get(year_file, "PFT"), npp = ncdf4::ncvar_get(year_file, "NPP"), time = ncdf4::ncvar_get(year_file, "time")) #convert time variable to ymd hms time_units <- ncatt_get(year_file, "time")$units ncdf4::nc_close(year_file) npp <- npp %>% mutate(time = ymd_hms(time_units) + minutes(time*24*60)) } dirs <- list.dirs(paste0(outdir_name, "out"), recursive = FALSE) ensembles_npps <- # for all ensembles... map_df(dirs, ~{ # get a list of filepaths, one .nc file per year ens_filepaths <- Sys.glob(file.path(.x, "*.nc")) # extract the ensemble number ens_num <- str_remove(word(.x, -2, sep = "-"), "^0+") npp <- ens_filepaths %>% # apply the wrangling function to all years of .nc files map_df(ens_nc_to_df) %>% # wrangle dates into useful format mutate(year = year(time), month = month(time)) %>% # summarize monthly group_by(pft, year, month) %>% summarize(npp = sum(npp), .groups = "drop") %>% #re-build date as last day of the month since NPP is summarized as cumulative for the month mutate(date = ceiling_date(make_date(year, month, 01), "month") - 1) %>% #add the ensemble number add_column(ensemble = ens_num, .before = "pft") } ) ggplot(ensembles_npps, aes(x = date, y = npp, group = ensemble, alpha = 0.2)) + geom_line() + facet_grid(~pft) npp_summary <- ensembles_npps %>% mutate(date = as.POSIXct(as.Date(paste0(date, "-01"))), pft = case_when(pft == 1 ~ "Setaria", pft == 5 ~ "C3 grass", pft == 9 ~ "Hardwood trees"), pft = as.factor(pft)) %>% group_by(patch, pft, date) %>% summarize(mean = mean(pft_npp, na.rm = TRUE), median = median(pft_npp, na.rm = TRUE), sd = sd(pft_npp, na.rm = TRUE), lcl_50 = quantile(pft_npp, probs = c(0.25), na.rm = TRUE, names = FALSE), ucl_50 = quantile(pft_npp, probs = c(0.75), na.rm = TRUE, names = FALSE), lcl_95 = quantile(pft_npp, probs = c(0.025), na.rm = TRUE, names = FALSE), ucl_95 = quantile(pft_npp, probs = c(0.975), na.rm = TRUE, names = FALSE)) %>% rename(Species = pft) ggplot(data = npp_summary) + geom_line(aes(x = date, y = median, color = Species)) + geom_ribbon(aes(x = date, ymin = lcl_50, ymax = ucl_50, fill = Species), alpha = 0.4) + #facet_grid(rows = vars(patch)) + scale_x_datetime(labels = scales::date_format("%Y")) + xlab("Year") + ylab("NPP (kgC/m2/yr)") + theme_classic()
require(XLConnect) wb <- loadWorkbook("RNASeq_Tools.xlsx") lst = readWorksheet(wb, sheet = getSheets(wb)[-1], startRow=1, header=FALSE) for(i in getSheets(wb)[-1]) { fname = gsub(" ","",i) write.csv(lst[[i]], row.names=FALSE, col.names=NA, file=paste0("csv/",fname,".csv")) }
/SoftwareList/xls2csv.R
no_license
ncchung/bioinformatics
R
false
false
280
r
require(XLConnect) wb <- loadWorkbook("RNASeq_Tools.xlsx") lst = readWorksheet(wb, sheet = getSheets(wb)[-1], startRow=1, header=FALSE) for(i in getSheets(wb)[-1]) { fname = gsub(" ","",i) write.csv(lst[[i]], row.names=FALSE, col.names=NA, file=paste0("csv/",fname,".csv")) }
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 5.05213242132815e+208, 4.12396251261199e-221, 0, 0, 0), .Dim = c(1L, 7L)), B = structure(0, .Dim = c(1L, 1L))) result <- do.call(multivariance:::match_rows,testlist) str(result)
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613109957-test.R
no_license
akhikolla/updatedatatype-list3
R
false
false
257
r
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 5.05213242132815e+208, 4.12396251261199e-221, 0, 0, 0), .Dim = c(1L, 7L)), B = structure(0, .Dim = c(1L, 1L))) result <- do.call(multivariance:::match_rows,testlist) str(result)
library(car) ;library(dae) ;library(nlme) ;library(effects) library(ggplot2) ;library(psych) ;library(interplot) library(plyr) ;library(devtools);library(ez) rm(list=ls()) names_group=c("old","young","patient","control") fname = "/Users/heshamelshafei/Dropbox/Fieldtripping/R/txt/PrepAtt22_behav_table4R.csv" behav_summary <- read.table(fname,header=T, sep=";") list_grp <- unique(behav_summary$idx_group) pat_concat <- data.frame(SUB=character(), GROUP=character(), p_incorrect=numeric(), p_miss =numeric(), p_falseAlarm = numeric(),stringsAsFactors=TRUE) for (gr in 1:length(list_grp)){ pat <- behav_summary[behav_summary$idx_group == list_grp[gr],] list_suj <- unique(pat$sub_idx) for (sb in 1:length(list_suj)){ for (cue in 1:4){ for (dis in 1:3){ if (cue>2){new_cue=sub_pat <- pat[pat$sub_idx == list_suj[sb] & pat$CUE==cue-2 & pat$DIS==dis-1,]} if (cue==1){new_cue=sub_pat <- pat[pat$sub_idx == list_suj[sb] & pat$CUE==0 & pat$TAR%%2 != 0 & pat$DIS==dis-1,]} if (cue==2){new_cue=sub_pat <- pat[pat$sub_idx == list_suj[sb] & pat$CUE==0 & pat$TAR%%2 == 0 &pat$DIS==dis-1,]} suj <- paste0(names_group[gr],sb) n_incorrect <- lengths(sub_pat[sub_pat$CORR==-1,])[1] n_miss <- lengths(sub_pat[sub_pat$ERROR==1,])[1] n_fa <- lengths(sub_pat[sub_pat$ERROR==3,])[1] n_tot <- lengths(sub_pat)[1] p_incorrect <- (n_incorrect/n_tot)*100 p_miss <- (n_miss/n_tot)*100 p_fa <- (n_fa/n_tot)*100 cue_names = c('NLCue','NRCue','LCue','RCue') dis_names = c('D0','D1','D2') medianRT = median(sub_pat[sub_pat$CORR==1,11]) bloc <- cbind(suj,names_group[gr],cue_names[cue],dis_names[dis],p_incorrect,p_miss,p_fa,medianRT) pat_concat<-rbind(pat_concat,bloc) } } } } rm(list=setdiff(ls(), "pat_concat")) names(pat_concat) <- c("SUB","GROUP","CUE","DIS","PerIncorrect", "PerMiss","PerFlaseAlarm","MedianRT") pat_concat<- pat_concat[pat_concat$GROUP=="young",] pd <- position_dodge(.2) ggplot(pat_concat, aes(x=DIS, y=MedianRT, colour=CUE, group=CUE, shape=CUE)) + stat_summary(fun.data = "mean_se", geom="errorbar",position=pd) + stat_summary(fun.y="mean", geom="line",position=pd) + stat_summary(fun.y="mean", geom="point", size=8,position=pd) + scale_x_discrete(breaks=c("0", "1", "2"), labels=c("T0", "T1", "T2"))+ coord_cartesian(ylim=c(2, 7)) + scale_y_continuous(breaks=seq(2, 6, 2)) + scale_color_manual(values=c("gray30", "gray50","gray70"),name ="Gruppe", breaks=c("baseline", "negative", "neutral"), labels=c("Baseline", "Attend-negative", "Attend-neutral")) + scale_shape_discrete(name ="Gruppe", breaks=c("baseline", "negative", "neutral"), labels=c("Baseline", "Attend-negative", "Attend-neutral")) + theme( panel.grid.major.y = element_line(colour = "gray80", size = NULL, linetype = NULL, # horizontale Linien lineend = NULL) ,panel.grid.minor.y = element_line(colour = "gray90", size = NULL, linetype = NULL, lineend = NULL) ,panel.grid.major.x = element_blank() # vertikale Linien ,panel.grid.minor.x = element_blank() ,legend.background = element_rect(fill = "white", colour = "white") # Legende ,legend.key = element_rect(fill = "white", colour = "white") ,panel.background = element_rect(fill = "white", colour = "white", size = NULL, # Panel Hintergrund linetype = NULL) ,axis.line = element_line(colour = "black", size=.5) ,axis.ticks.x = element_line(colour = "black", size=.5) ,axis.ticks.y = element_line(colour = "black", size=.5) ,axis.ticks.length = unit(0.5, "cm") ,axis.ticks.margin = unit(.3, "cm") ,axis.title.x = element_text(family = NULL, face = "bold", size = 11,vjust=0.1) ,axis.title.y = element_text(family = NULL, face = "bold", size = 11,vjust=0.1) ,axis.text=element_text(colour="black") ,legend.title = element_text(family = NULL, face = "plain", size = 11) ,legend.text = element_text(family = NULL, face = "plain", size = 9) ) + xlab("Messzeitpunkt")+ ylab("State-KA (M)")
/rstudio/PrepAtt22_BehavioralAnalysis_Percentages_InteractionPlot.R
no_license
elshafeh/own
R
false
false
4,442
r
library(car) ;library(dae) ;library(nlme) ;library(effects) library(ggplot2) ;library(psych) ;library(interplot) library(plyr) ;library(devtools);library(ez) rm(list=ls()) names_group=c("old","young","patient","control") fname = "/Users/heshamelshafei/Dropbox/Fieldtripping/R/txt/PrepAtt22_behav_table4R.csv" behav_summary <- read.table(fname,header=T, sep=";") list_grp <- unique(behav_summary$idx_group) pat_concat <- data.frame(SUB=character(), GROUP=character(), p_incorrect=numeric(), p_miss =numeric(), p_falseAlarm = numeric(),stringsAsFactors=TRUE) for (gr in 1:length(list_grp)){ pat <- behav_summary[behav_summary$idx_group == list_grp[gr],] list_suj <- unique(pat$sub_idx) for (sb in 1:length(list_suj)){ for (cue in 1:4){ for (dis in 1:3){ if (cue>2){new_cue=sub_pat <- pat[pat$sub_idx == list_suj[sb] & pat$CUE==cue-2 & pat$DIS==dis-1,]} if (cue==1){new_cue=sub_pat <- pat[pat$sub_idx == list_suj[sb] & pat$CUE==0 & pat$TAR%%2 != 0 & pat$DIS==dis-1,]} if (cue==2){new_cue=sub_pat <- pat[pat$sub_idx == list_suj[sb] & pat$CUE==0 & pat$TAR%%2 == 0 &pat$DIS==dis-1,]} suj <- paste0(names_group[gr],sb) n_incorrect <- lengths(sub_pat[sub_pat$CORR==-1,])[1] n_miss <- lengths(sub_pat[sub_pat$ERROR==1,])[1] n_fa <- lengths(sub_pat[sub_pat$ERROR==3,])[1] n_tot <- lengths(sub_pat)[1] p_incorrect <- (n_incorrect/n_tot)*100 p_miss <- (n_miss/n_tot)*100 p_fa <- (n_fa/n_tot)*100 cue_names = c('NLCue','NRCue','LCue','RCue') dis_names = c('D0','D1','D2') medianRT = median(sub_pat[sub_pat$CORR==1,11]) bloc <- cbind(suj,names_group[gr],cue_names[cue],dis_names[dis],p_incorrect,p_miss,p_fa,medianRT) pat_concat<-rbind(pat_concat,bloc) } } } } rm(list=setdiff(ls(), "pat_concat")) names(pat_concat) <- c("SUB","GROUP","CUE","DIS","PerIncorrect", "PerMiss","PerFlaseAlarm","MedianRT") pat_concat<- pat_concat[pat_concat$GROUP=="young",] pd <- position_dodge(.2) ggplot(pat_concat, aes(x=DIS, y=MedianRT, colour=CUE, group=CUE, shape=CUE)) + stat_summary(fun.data = "mean_se", geom="errorbar",position=pd) + stat_summary(fun.y="mean", geom="line",position=pd) + stat_summary(fun.y="mean", geom="point", size=8,position=pd) + scale_x_discrete(breaks=c("0", "1", "2"), labels=c("T0", "T1", "T2"))+ coord_cartesian(ylim=c(2, 7)) + scale_y_continuous(breaks=seq(2, 6, 2)) + scale_color_manual(values=c("gray30", "gray50","gray70"),name ="Gruppe", breaks=c("baseline", "negative", "neutral"), labels=c("Baseline", "Attend-negative", "Attend-neutral")) + scale_shape_discrete(name ="Gruppe", breaks=c("baseline", "negative", "neutral"), labels=c("Baseline", "Attend-negative", "Attend-neutral")) + theme( panel.grid.major.y = element_line(colour = "gray80", size = NULL, linetype = NULL, # horizontale Linien lineend = NULL) ,panel.grid.minor.y = element_line(colour = "gray90", size = NULL, linetype = NULL, lineend = NULL) ,panel.grid.major.x = element_blank() # vertikale Linien ,panel.grid.minor.x = element_blank() ,legend.background = element_rect(fill = "white", colour = "white") # Legende ,legend.key = element_rect(fill = "white", colour = "white") ,panel.background = element_rect(fill = "white", colour = "white", size = NULL, # Panel Hintergrund linetype = NULL) ,axis.line = element_line(colour = "black", size=.5) ,axis.ticks.x = element_line(colour = "black", size=.5) ,axis.ticks.y = element_line(colour = "black", size=.5) ,axis.ticks.length = unit(0.5, "cm") ,axis.ticks.margin = unit(.3, "cm") ,axis.title.x = element_text(family = NULL, face = "bold", size = 11,vjust=0.1) ,axis.title.y = element_text(family = NULL, face = "bold", size = 11,vjust=0.1) ,axis.text=element_text(colour="black") ,legend.title = element_text(family = NULL, face = "plain", size = 11) ,legend.text = element_text(family = NULL, face = "plain", size = 9) ) + xlab("Messzeitpunkt")+ ylab("State-KA (M)")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rangemap_fig.R \name{rangemap_fig} \alias{rangemap_fig} \title{Figures of species range maps} \usage{ rangemap_fig(range, polygons, add_extent = FALSE, add_occurrences = FALSE, basemap_color = "grey93", range_color = "darkgreen", extent_color = "blue", occurrences_color = "yellow", grid = FALSE, grid_sides = "bottomleft", ylabels_position = 2.3, legend = FALSE, legend_position = "bottomright", northarrow = FALSE, northarrow_position = "topright", scalebar = FALSE, scalebar_position = "bottomleft", scalebar_length = 100, zoom = 1, save_fig = FALSE, name = "range_fig", format = "png", resolution = 300, width = 166, height = 166) } \arguments{ \item{range}{an object produced with any of the following functions: \code{\link{rangemap_buff}}, \code{\link{rangemap_bound}}, \code{\link{rangemap_hull}}, \code{\link{rangemap_enm}}, and \code{\link{rangemap_tsa}}.} \item{polygons}{a SpatialPolygon object to be used as base map for plotting the species range. If not provided, a simplified world map will be used.} \item{add_extent}{(logical) if TRUE, the extent of occurrence of the species will be added to the figure. Ignored if the \code{range} is product of the \code{\link{rangemap_bound}} function and administrative areas were selected only based on names. Default = FALSE.} \item{add_occurrences}{(logical) if TRUE, the species occurrence records will be added to the figure. Ignored if the \code{range} is product of the \code{\link{rangemap_bound}} function and administrative areas were selected only based on names. Default = FALSE.} \item{basemap_color}{color for the basemap (\code{polygons}) to be ploted in the figure. Default = "grey93".} \item{range_color}{color for the species \code{range} to be ploted in the figure. Default = "darkgreen".} \item{extent_color}{color for the species extent of occurrence to be ploted in the figure. Default = "blue".} \item{occurrences_color}{color for the species \code{occurrences} to be ploted in the figure. Default = "yellow".} \item{grid}{(logical) if TRUE, labels and grid division ticks will be inserted in \code{grid_sides}. Default = FALSE.} \item{grid_sides}{(character) sides in which the labels will be placed in the figure. Options are the same than for other position character indicators (see details). Default = "bottomleft".} \item{ylabels_position}{(numeric) if \code{grid} = TRUE, separation (in lines) of y axis labels from the axis. Bigger numbers will increase separation. Default = 2.3.} \item{legend}{(logical) if TRUE, a legend of the plotted features will be added to the figure at \code{legend_position}. Default = FALSE.} \item{legend_position}{(numeric or character) site in the figure where the legend will be placed. If numeric, vector of leght two indicating x and y coordinates to be used to position the legend. See details for options of character indicators of position. Default = "bottomright".} \item{northarrow}{(logical) if TRUE, a simple north arrow will be placed in \code{northarrow_position}. Default = FALSE.} \item{northarrow_position}{(numeric or character) site in the figure where the north legend will be placed. If numeric, vector of leght two indicating x and y coordinates to be used to position the north arrow. See details for options of character indicators of position. Default = "topright".} \item{scalebar}{(logical) if TRUE, a simple scale bar will be inserted in the figure at \code{scalebar_position} with a length of \code{scalebar_length}. Default = FALSE.} \item{scalebar_position}{(numeric or character) site in the figure where the scale bar will be placed. If numeric, vector of leght two indicating x and y coordinates to be used to position the scale bar. See details for options of character indicators of position. Default = "bottomleft".} \item{scalebar_length}{(numeric) length of the scale bar in km. Using entire numbers divisble for two is recommended. Default = 100.} \item{zoom}{(numeric) zoom factor when ploting the species range in a map. Default = 1. Values lower than 1 will zoom in into the species range and values bigger than 1 will zoom out. A value of 2 will duplicate the area that the figure is covering.} \item{save_fig}{(logical) if TRUE, the figure will be written in the working directory. Default = FALSE.} \item{name}{(character) if \code{save_fig} = TRUE, name of the figure to be exported. Default = "range_fig".} \item{format}{(character) if \code{save_fig} = TRUE, format in which the figure will be written. Options include "bmp", "png", "jpeg", "tiff", and "pdf". Default = "png".} \item{resolution}{(numeric) if \code{save_fig} = TRUE, resolution (ppi) in wich the figure will be exported. Default = 300.} \item{width}{(numeric) if \code{save_fig} = TRUE, width of the figure in mm. Default = 166.} \item{height}{(numeric) if \code{save_fig} = TRUE, height of the figure in mm. Default = 166.} } \value{ A figure of the species distributional range in a geographical context, with map components defined by the user. } \description{ rangemap_fig generates customizable figures of species range maps using objects produced by other functions of this package. } \details{ Ranges should be generated with any of the functions: \code{\link{rangemap_buff}}, \code{\link{rangemap_bound}}, \code{\link{rangemap_hull}}, \code{\link{rangemap_enm}}, and \code{\link{rangemap_tsa}}. Position of distinct elements depend on the spatial configuration of the species range. Therefore, their position may need to be changed if the elements are needed. Position options are: "bottomright", "bottomleft", "topleft", and "topright". Numerical descriptions of positions are also allowed. Scale bar is ploted using a modification of the "scalebar" function developed by Tanimura et al. (2007) \url{http://hdl.handle.net/10.18637/jss.v019.c01}. } \examples{ if(!require(rgbif)){ install.packages("rgbif") library(rgbif) } # getting the data from GBIF species <- name_lookup(query = "Dasypus kappleri", rank="species", return = "data") # information about the species occ_count(taxonKey = species$key[14], georeferenced = TRUE) # testing if keys return records key <- species$key[14] # using species key that return information occ <- occ_search(taxonKey = key, return = "data") # using the taxon key # keeping only georeferenced records occ_g <- occ[!is.na(occ$decimalLatitude) & !is.na(occ$decimalLongitude), c("name", "decimalLongitude", "decimalLatitude")] level <- 0 adm <- "Ecuador" dissolve <- FALSE save <- FALSE countries <- c("PER", "BRA", "COL", "VEN", "ECU", "GUF", "GUY", "SUR", "BOL") # creating the species range map range <- rangemap_bound(occurrences = occ_g, country_code = countries, adm_areas = adm, boundary_level = level, dissolve = dissolve, save_shp = save) # arguments for the species range figure extent <- TRUE occ <- TRUE legend <- TRUE north <- TRUE # creating the species range figure rangemap_fig(range, add_extent = extent, add_occurrences = occ, legend = legend, northarrow = north) #dev.off() # for returning to default par settings }
/man/rangemap_fig.Rd
no_license
ynzhangrong/rangemap
R
false
true
7,216
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rangemap_fig.R \name{rangemap_fig} \alias{rangemap_fig} \title{Figures of species range maps} \usage{ rangemap_fig(range, polygons, add_extent = FALSE, add_occurrences = FALSE, basemap_color = "grey93", range_color = "darkgreen", extent_color = "blue", occurrences_color = "yellow", grid = FALSE, grid_sides = "bottomleft", ylabels_position = 2.3, legend = FALSE, legend_position = "bottomright", northarrow = FALSE, northarrow_position = "topright", scalebar = FALSE, scalebar_position = "bottomleft", scalebar_length = 100, zoom = 1, save_fig = FALSE, name = "range_fig", format = "png", resolution = 300, width = 166, height = 166) } \arguments{ \item{range}{an object produced with any of the following functions: \code{\link{rangemap_buff}}, \code{\link{rangemap_bound}}, \code{\link{rangemap_hull}}, \code{\link{rangemap_enm}}, and \code{\link{rangemap_tsa}}.} \item{polygons}{a SpatialPolygon object to be used as base map for plotting the species range. If not provided, a simplified world map will be used.} \item{add_extent}{(logical) if TRUE, the extent of occurrence of the species will be added to the figure. Ignored if the \code{range} is product of the \code{\link{rangemap_bound}} function and administrative areas were selected only based on names. Default = FALSE.} \item{add_occurrences}{(logical) if TRUE, the species occurrence records will be added to the figure. Ignored if the \code{range} is product of the \code{\link{rangemap_bound}} function and administrative areas were selected only based on names. Default = FALSE.} \item{basemap_color}{color for the basemap (\code{polygons}) to be ploted in the figure. Default = "grey93".} \item{range_color}{color for the species \code{range} to be ploted in the figure. Default = "darkgreen".} \item{extent_color}{color for the species extent of occurrence to be ploted in the figure. Default = "blue".} \item{occurrences_color}{color for the species \code{occurrences} to be ploted in the figure. Default = "yellow".} \item{grid}{(logical) if TRUE, labels and grid division ticks will be inserted in \code{grid_sides}. Default = FALSE.} \item{grid_sides}{(character) sides in which the labels will be placed in the figure. Options are the same than for other position character indicators (see details). Default = "bottomleft".} \item{ylabels_position}{(numeric) if \code{grid} = TRUE, separation (in lines) of y axis labels from the axis. Bigger numbers will increase separation. Default = 2.3.} \item{legend}{(logical) if TRUE, a legend of the plotted features will be added to the figure at \code{legend_position}. Default = FALSE.} \item{legend_position}{(numeric or character) site in the figure where the legend will be placed. If numeric, vector of leght two indicating x and y coordinates to be used to position the legend. See details for options of character indicators of position. Default = "bottomright".} \item{northarrow}{(logical) if TRUE, a simple north arrow will be placed in \code{northarrow_position}. Default = FALSE.} \item{northarrow_position}{(numeric or character) site in the figure where the north legend will be placed. If numeric, vector of leght two indicating x and y coordinates to be used to position the north arrow. See details for options of character indicators of position. Default = "topright".} \item{scalebar}{(logical) if TRUE, a simple scale bar will be inserted in the figure at \code{scalebar_position} with a length of \code{scalebar_length}. Default = FALSE.} \item{scalebar_position}{(numeric or character) site in the figure where the scale bar will be placed. If numeric, vector of leght two indicating x and y coordinates to be used to position the scale bar. See details for options of character indicators of position. Default = "bottomleft".} \item{scalebar_length}{(numeric) length of the scale bar in km. Using entire numbers divisble for two is recommended. Default = 100.} \item{zoom}{(numeric) zoom factor when ploting the species range in a map. Default = 1. Values lower than 1 will zoom in into the species range and values bigger than 1 will zoom out. A value of 2 will duplicate the area that the figure is covering.} \item{save_fig}{(logical) if TRUE, the figure will be written in the working directory. Default = FALSE.} \item{name}{(character) if \code{save_fig} = TRUE, name of the figure to be exported. Default = "range_fig".} \item{format}{(character) if \code{save_fig} = TRUE, format in which the figure will be written. Options include "bmp", "png", "jpeg", "tiff", and "pdf". Default = "png".} \item{resolution}{(numeric) if \code{save_fig} = TRUE, resolution (ppi) in wich the figure will be exported. Default = 300.} \item{width}{(numeric) if \code{save_fig} = TRUE, width of the figure in mm. Default = 166.} \item{height}{(numeric) if \code{save_fig} = TRUE, height of the figure in mm. Default = 166.} } \value{ A figure of the species distributional range in a geographical context, with map components defined by the user. } \description{ rangemap_fig generates customizable figures of species range maps using objects produced by other functions of this package. } \details{ Ranges should be generated with any of the functions: \code{\link{rangemap_buff}}, \code{\link{rangemap_bound}}, \code{\link{rangemap_hull}}, \code{\link{rangemap_enm}}, and \code{\link{rangemap_tsa}}. Position of distinct elements depend on the spatial configuration of the species range. Therefore, their position may need to be changed if the elements are needed. Position options are: "bottomright", "bottomleft", "topleft", and "topright". Numerical descriptions of positions are also allowed. Scale bar is ploted using a modification of the "scalebar" function developed by Tanimura et al. (2007) \url{http://hdl.handle.net/10.18637/jss.v019.c01}. } \examples{ if(!require(rgbif)){ install.packages("rgbif") library(rgbif) } # getting the data from GBIF species <- name_lookup(query = "Dasypus kappleri", rank="species", return = "data") # information about the species occ_count(taxonKey = species$key[14], georeferenced = TRUE) # testing if keys return records key <- species$key[14] # using species key that return information occ <- occ_search(taxonKey = key, return = "data") # using the taxon key # keeping only georeferenced records occ_g <- occ[!is.na(occ$decimalLatitude) & !is.na(occ$decimalLongitude), c("name", "decimalLongitude", "decimalLatitude")] level <- 0 adm <- "Ecuador" dissolve <- FALSE save <- FALSE countries <- c("PER", "BRA", "COL", "VEN", "ECU", "GUF", "GUY", "SUR", "BOL") # creating the species range map range <- rangemap_bound(occurrences = occ_g, country_code = countries, adm_areas = adm, boundary_level = level, dissolve = dissolve, save_shp = save) # arguments for the species range figure extent <- TRUE occ <- TRUE legend <- TRUE north <- TRUE # creating the species range figure rangemap_fig(range, add_extent = extent, add_occurrences = occ, legend = legend, northarrow = north) #dev.off() # for returning to default par settings }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/shiny_app.R \name{start_shiny_app} \alias{start_shiny_app} \title{Shiny app for absolute copy number scaling} \usage{ start_shiny_app( load_sample_data = TRUE, launch_browser = TRUE, host = getOption("shiny.host", "127.0.0.1"), port = getOption("shiny.port") ) } \arguments{ \item{load_sample_data}{load a sample data set on starting the Shiny app.} \item{launch_browser}{launch the application in a web browser on starting the Shiny app.} \item{host}{The IPv4 address that the application should listen on. Defaults to the shiny.host option, if set, or "127.0.0.1" if not. The default value of "127.0.0.1" will only allow the current machine to access the Shiny app; set to "0.0.0.0" to allow other clients to connect.} \item{port}{the TCP port that the application should listen on.} } \value{ opens the Shiny app in a web browser. } \description{ Starts the shiny app in a new tab in the the default web browser. If a browser tab isn't opened it may be necessary to manually open the URL shown in the console. }
/man/start_shiny_app.Rd
permissive
crukci-bioinformatics/rascal
R
false
true
1,104
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/shiny_app.R \name{start_shiny_app} \alias{start_shiny_app} \title{Shiny app for absolute copy number scaling} \usage{ start_shiny_app( load_sample_data = TRUE, launch_browser = TRUE, host = getOption("shiny.host", "127.0.0.1"), port = getOption("shiny.port") ) } \arguments{ \item{load_sample_data}{load a sample data set on starting the Shiny app.} \item{launch_browser}{launch the application in a web browser on starting the Shiny app.} \item{host}{The IPv4 address that the application should listen on. Defaults to the shiny.host option, if set, or "127.0.0.1" if not. The default value of "127.0.0.1" will only allow the current machine to access the Shiny app; set to "0.0.0.0" to allow other clients to connect.} \item{port}{the TCP port that the application should listen on.} } \value{ opens the Shiny app in a web browser. } \description{ Starts the shiny app in a new tab in the the default web browser. If a browser tab isn't opened it may be necessary to manually open the URL shown in the console. }
#' Barnes-Hut implementation of t-Distributed Stochastic Neighbor Embedding #' #' Wrapper for the C++ implementation of Multicore t-SNE. t-SNE is a method for constructing a low dimensional embedding of high-dimensional data, distances or similarities. Exact t-SNE can be computed by setting theta=0.0. #' #' Given a distance matrix \eqn{D} between input objects (which by default, is the euclidean distances between two objects), we calculate a similarity score in the original space p_ij. \deqn{ p_{j | i} = \frac{\exp(-\|D_{ij}\|^2 / 2 \sigma_i^2)}{\sum_{k \neq i} \exp(-\|D_{ij}\|^2 / 2 \sigma_i^2)} } which is then symmetrized using: \deqn{ p_{i j}=\frac{p_{j|i} + p_{i|j}}{2n}}. The \eqn{\sigma} for each object is chosen in such a way that the perplexity of p_{j|i} has a value that is close to the user defined perplexity. This value effectively controls how many nearest neighbours are taken into account when constructing the embedding in the low-dimensional space. #' For the low-dimensional space we use the Cauchy distribution (t-distribution with one degree of freedom) as the distribution of the distances to neighbouring objects: #' \deqn{ q_{i j} = \frac{(1+ \| y_i-y_j\|^2)^{-1}}{\sum_{k \neq l} 1+ \| y_k-y_l\|^2)^{-1}}}. #' By changing the location of the objects y in the embedding to minimize the Kullback-Leibler divergence between these two distributions \eqn{ q_{i j}} and \eqn{ p_{i j}}, we create a map that focusses on small-scale structure, due to the assymetry of the KL-divergence. The t-distribution is chosen to avoid the crowding problem: in the original high dimensional space, there are potentially many equidistant objects with moderate distance from a particular object, more than can be accounted for in the low dimensional representation. The t-distribution makes sure that these objects are more spread out in the new representation. #' #' For larger datasets, a problem with the a simple gradient descent to minimize the Kullback-Leibler divergence is the computational complexity of each gradient step (which is \eqn{O(n^2)}). The Barnes-Hut implementation of the algorithm attempts to mitigate this problem using two tricks: (1) approximating small similarities by 0 in the \eqn{p_{ij}} distribution, where the non-zero entries are computed by finding 3*perplexity nearest neighbours using an efficient tree search. (2) Using the Barnes-Hut algorithm in the computation of the gradient which approximates large distance similarities using a quadtree. This approximation is controlled by the \code{theta} parameter, with smaller values leading to more exact approximations. When \code{theta=0.0}, the implementation uses a standard t-SNE implementation. The Barnes-Hut approximation leads to a \eqn{O(n log(n))} computational complexity for each iteration. #' #' During the minimization of the KL-divergence, the implementation uses a trick known as early exaggeration, which multiplies the \eqn{p_{ij}}'s by 12 during the first 250 iterations. This leads to tighter clustering and more distance between clusters of objects. This early exaggeration is not used when the user gives an initialization of the objects in the embedding by setting \code{Y_init}. During the early exaggeration phase, a momentum term of 0.5 is used while this is changed to 0.8 after the first 250 iterations. All these default parameters can be changed by the user. #' #' After checking the correctness of the input, the \code{Rtsne.multicore} function (optionally) does an initial reduction of the feature space using \code{\link{prcomp}}, before calling the C++ TSNE implementation. Since R's random number generator is used, use \code{\link{set.seed}} before the function call to get reproducible results. #' #' If \code{X} is a data.frame, it is transformed into a matrix using \code{\link{model.matrix}}. If \code{X} is a \code{\link{dist}} object, it is currently first expanded into a full distance matrix. #' #' @param X matrix; Data matrix #' @param dims integer; Output dimensionality (Currently only support 2) #' @param initial_dims integer; the number of dimensions that should be retained in the initial PCA step (default: 50) #' @param perplexity numeric; Perplexity parameter #' @param theta numeric; Speed/accuracy trade-off (increase for less accuracy), set to 0.0 for exact TSNE (default: 0.5) #' @param check_duplicates logical; Checks whether duplicates are present. It is best to make sure there are no duplicates present and set this option to FALSE, especially for large datasets (default: TRUE) #' @param pca logical; Whether an initial PCA step should be performed (default: TRUE) #' @param num_threads integer: number of cores to be used for parallel runs. Default is 2. #' @param max_iter integer; Number of iterations (default: 1000) #' @param verbose logical; Whether progress updates should be printed (default: FALSE) #' @param ... Other arguments that can be passed to Rtsne.multicore #' @param pca_center logical; Should data be centered before pca is applied? (default: TRUE) #' @param pca_scale logical; Should data be scaled before pca is applied? (default: FALSE) #' #' @return List with the following elements: #' \item{Y}{Matrix containing the new representations for the objects} #' \item{N}{Number of objects} #' \item{origD}{Original Dimensionality before TSNE} #' \item{perplexity}{See above} #' \item{theta}{See above} #' #' @references Maaten, L. Van Der, 2014. Accelerating t-SNE using Tree-Based Algorithms. Journal of Machine Learning Research, 15, p.3221-3245. #' @references van der Maaten, L.J.P. & Hinton, G.E., 2008. Visualizing High-Dimensional Data Using t-SNE. Journal of Machine Learning Research, 9, pp.2579-2605. #' #' @examples #' iris_unique <- unique(iris) # Remove duplicates #' iris_matrix <- as.matrix(iris_unique[,1:4]) #' set.seed(42) # Set a seed if you want reproducible results #' tsne_out <- Rtsne.multicore(iris_matrix, num_threads = 2) # Run TSNE #' # Show the objects in the 2D tsne representation #' plot(tsne_out$Y,col=iris_unique$Species) #' #' #' # Use a given initialization of the locations of the points #' tsne_out <- Rtsne.multicore(iris_unique[,1:4], theta=0.0, pca=FALSE,max_iter=350) #' @useDynLib Rtsne.multicore #' @import Rcpp #' @importFrom stats model.matrix prcomp #' #' @export Rtsne.multicore <- function (X, ...) { UseMethod("Rtsne.multicore", X) } #' @describeIn Rtsne.multicore Default Interface #' @export Rtsne.multicore.default <- function(X, dims=2, initial_dims=50, perplexity=30, theta=0.5, check_duplicates=TRUE, pca=TRUE, max_iter=1000, num_threads = 2 ,verbose=FALSE , is_distance=FALSE, Y_init=NULL ,pca_center=TRUE, pca_scale=FALSE # stop_lying_iter=ifelse(is.null(Y_init),250L,0L), # mom_switch_iter=ifelse(is.null(Y_init),250L,0L), # momentum=0.5, final_momentum=0.8, # eta=200.0, exaggeration_factor=12.0 , ...) { if(dims!=2) stop("Only 2d output is supported due to its c++ implemenation!") # if (!is.logical(is_distance)) { stop("is_distance should be a logical variable")} if (!is.numeric(theta) || (theta<0.0) || (theta>1.0) ) { stop("Incorrect theta.")} if (nrow(X) - 1 < 3 * perplexity) { stop("Perplexity is too large.")} if (!is.matrix(X)) { stop("Input X is not a matrix")} if (!(max_iter>0)) { stop("Incorrect number of iterations.")} # if (is_distance & !(is.matrix(X) & (nrow(X)==ncol(X)))) { stop("Input is not an accepted distance matrix") } # if (!is.null(Y_init) & (nrow(X)!=nrow(Y_init) || ncol(Y_init)!=dims)) { stop("Incorrect format for Y_init.") } if (!(is.logical(pca_center) && is.logical(pca_scale)) ) { stop("pca_center and pca_scale should be TRUE or FALSE")} # if (!is.integer(stop_lying_iter) || stop_lying_iter<0) { stop("stop_lying_iter should be a positive integer")} # if (!is.integer(mom_switch_iter) || mom_switch_iter<0) { stop("mom_switch_iter should be a positive integer")} # if (!is.numeric(exaggeration_factor)) { stop("exaggeration_factor should be numeric")} is.wholenumber <- function(x, tol = .Machine$double.eps^0.5) abs(x - round(x)) < tol if (!is.wholenumber(initial_dims) || initial_dims<=0) { stop("Incorrect initial dimensionality.")} # Apply PCA if (pca) { pca_result <- prcomp(X,retx=TRUE,center = pca_center, scale. = pca_scale) X <- pca_result$x[,1:min(initial_dims,ncol(pca_result$x))] } if (check_duplicates){ if (any(duplicated(X))) { stop("Remove duplicates before running TSNE.") } } # Compute Squared distance if we are using exact TSNE if (theta==0.0) { X <- X^2 } # if (is.null(Y_init)) { # init <- FALSE # Y_init <- matrix() # } else { # init <- TRUE # } # msg <- capture.output( res <- Rtsne_cpp(X, dims, perplexity, theta,num_threads, max_iter # , is_distance, Y_init, init,stop_lying_iter, mom_switch_iter, momentum, final_momentum, eta, exaggeration_factor ) ) if(verbose) print(msg) res } #' @describeIn Rtsne.multicore tsne on data.frame #' @export Rtsne.multicore.data.frame <- function(X,...) { X <- model.matrix(~.-1,X) Rtsne.multicore(X, ...) }
/R/Rtsne.R
no_license
pkharchenko/Rtsne.multicore
R
false
false
9,465
r
#' Barnes-Hut implementation of t-Distributed Stochastic Neighbor Embedding #' #' Wrapper for the C++ implementation of Multicore t-SNE. t-SNE is a method for constructing a low dimensional embedding of high-dimensional data, distances or similarities. Exact t-SNE can be computed by setting theta=0.0. #' #' Given a distance matrix \eqn{D} between input objects (which by default, is the euclidean distances between two objects), we calculate a similarity score in the original space p_ij. \deqn{ p_{j | i} = \frac{\exp(-\|D_{ij}\|^2 / 2 \sigma_i^2)}{\sum_{k \neq i} \exp(-\|D_{ij}\|^2 / 2 \sigma_i^2)} } which is then symmetrized using: \deqn{ p_{i j}=\frac{p_{j|i} + p_{i|j}}{2n}}. The \eqn{\sigma} for each object is chosen in such a way that the perplexity of p_{j|i} has a value that is close to the user defined perplexity. This value effectively controls how many nearest neighbours are taken into account when constructing the embedding in the low-dimensional space. #' For the low-dimensional space we use the Cauchy distribution (t-distribution with one degree of freedom) as the distribution of the distances to neighbouring objects: #' \deqn{ q_{i j} = \frac{(1+ \| y_i-y_j\|^2)^{-1}}{\sum_{k \neq l} 1+ \| y_k-y_l\|^2)^{-1}}}. #' By changing the location of the objects y in the embedding to minimize the Kullback-Leibler divergence between these two distributions \eqn{ q_{i j}} and \eqn{ p_{i j}}, we create a map that focusses on small-scale structure, due to the assymetry of the KL-divergence. The t-distribution is chosen to avoid the crowding problem: in the original high dimensional space, there are potentially many equidistant objects with moderate distance from a particular object, more than can be accounted for in the low dimensional representation. The t-distribution makes sure that these objects are more spread out in the new representation. #' #' For larger datasets, a problem with the a simple gradient descent to minimize the Kullback-Leibler divergence is the computational complexity of each gradient step (which is \eqn{O(n^2)}). The Barnes-Hut implementation of the algorithm attempts to mitigate this problem using two tricks: (1) approximating small similarities by 0 in the \eqn{p_{ij}} distribution, where the non-zero entries are computed by finding 3*perplexity nearest neighbours using an efficient tree search. (2) Using the Barnes-Hut algorithm in the computation of the gradient which approximates large distance similarities using a quadtree. This approximation is controlled by the \code{theta} parameter, with smaller values leading to more exact approximations. When \code{theta=0.0}, the implementation uses a standard t-SNE implementation. The Barnes-Hut approximation leads to a \eqn{O(n log(n))} computational complexity for each iteration. #' #' During the minimization of the KL-divergence, the implementation uses a trick known as early exaggeration, which multiplies the \eqn{p_{ij}}'s by 12 during the first 250 iterations. This leads to tighter clustering and more distance between clusters of objects. This early exaggeration is not used when the user gives an initialization of the objects in the embedding by setting \code{Y_init}. During the early exaggeration phase, a momentum term of 0.5 is used while this is changed to 0.8 after the first 250 iterations. All these default parameters can be changed by the user. #' #' After checking the correctness of the input, the \code{Rtsne.multicore} function (optionally) does an initial reduction of the feature space using \code{\link{prcomp}}, before calling the C++ TSNE implementation. Since R's random number generator is used, use \code{\link{set.seed}} before the function call to get reproducible results. #' #' If \code{X} is a data.frame, it is transformed into a matrix using \code{\link{model.matrix}}. If \code{X} is a \code{\link{dist}} object, it is currently first expanded into a full distance matrix. #' #' @param X matrix; Data matrix #' @param dims integer; Output dimensionality (Currently only support 2) #' @param initial_dims integer; the number of dimensions that should be retained in the initial PCA step (default: 50) #' @param perplexity numeric; Perplexity parameter #' @param theta numeric; Speed/accuracy trade-off (increase for less accuracy), set to 0.0 for exact TSNE (default: 0.5) #' @param check_duplicates logical; Checks whether duplicates are present. It is best to make sure there are no duplicates present and set this option to FALSE, especially for large datasets (default: TRUE) #' @param pca logical; Whether an initial PCA step should be performed (default: TRUE) #' @param num_threads integer: number of cores to be used for parallel runs. Default is 2. #' @param max_iter integer; Number of iterations (default: 1000) #' @param verbose logical; Whether progress updates should be printed (default: FALSE) #' @param ... Other arguments that can be passed to Rtsne.multicore #' @param pca_center logical; Should data be centered before pca is applied? (default: TRUE) #' @param pca_scale logical; Should data be scaled before pca is applied? (default: FALSE) #' #' @return List with the following elements: #' \item{Y}{Matrix containing the new representations for the objects} #' \item{N}{Number of objects} #' \item{origD}{Original Dimensionality before TSNE} #' \item{perplexity}{See above} #' \item{theta}{See above} #' #' @references Maaten, L. Van Der, 2014. Accelerating t-SNE using Tree-Based Algorithms. Journal of Machine Learning Research, 15, p.3221-3245. #' @references van der Maaten, L.J.P. & Hinton, G.E., 2008. Visualizing High-Dimensional Data Using t-SNE. Journal of Machine Learning Research, 9, pp.2579-2605. #' #' @examples #' iris_unique <- unique(iris) # Remove duplicates #' iris_matrix <- as.matrix(iris_unique[,1:4]) #' set.seed(42) # Set a seed if you want reproducible results #' tsne_out <- Rtsne.multicore(iris_matrix, num_threads = 2) # Run TSNE #' # Show the objects in the 2D tsne representation #' plot(tsne_out$Y,col=iris_unique$Species) #' #' #' # Use a given initialization of the locations of the points #' tsne_out <- Rtsne.multicore(iris_unique[,1:4], theta=0.0, pca=FALSE,max_iter=350) #' @useDynLib Rtsne.multicore #' @import Rcpp #' @importFrom stats model.matrix prcomp #' #' @export Rtsne.multicore <- function (X, ...) { UseMethod("Rtsne.multicore", X) } #' @describeIn Rtsne.multicore Default Interface #' @export Rtsne.multicore.default <- function(X, dims=2, initial_dims=50, perplexity=30, theta=0.5, check_duplicates=TRUE, pca=TRUE, max_iter=1000, num_threads = 2 ,verbose=FALSE , is_distance=FALSE, Y_init=NULL ,pca_center=TRUE, pca_scale=FALSE # stop_lying_iter=ifelse(is.null(Y_init),250L,0L), # mom_switch_iter=ifelse(is.null(Y_init),250L,0L), # momentum=0.5, final_momentum=0.8, # eta=200.0, exaggeration_factor=12.0 , ...) { if(dims!=2) stop("Only 2d output is supported due to its c++ implemenation!") # if (!is.logical(is_distance)) { stop("is_distance should be a logical variable")} if (!is.numeric(theta) || (theta<0.0) || (theta>1.0) ) { stop("Incorrect theta.")} if (nrow(X) - 1 < 3 * perplexity) { stop("Perplexity is too large.")} if (!is.matrix(X)) { stop("Input X is not a matrix")} if (!(max_iter>0)) { stop("Incorrect number of iterations.")} # if (is_distance & !(is.matrix(X) & (nrow(X)==ncol(X)))) { stop("Input is not an accepted distance matrix") } # if (!is.null(Y_init) & (nrow(X)!=nrow(Y_init) || ncol(Y_init)!=dims)) { stop("Incorrect format for Y_init.") } if (!(is.logical(pca_center) && is.logical(pca_scale)) ) { stop("pca_center and pca_scale should be TRUE or FALSE")} # if (!is.integer(stop_lying_iter) || stop_lying_iter<0) { stop("stop_lying_iter should be a positive integer")} # if (!is.integer(mom_switch_iter) || mom_switch_iter<0) { stop("mom_switch_iter should be a positive integer")} # if (!is.numeric(exaggeration_factor)) { stop("exaggeration_factor should be numeric")} is.wholenumber <- function(x, tol = .Machine$double.eps^0.5) abs(x - round(x)) < tol if (!is.wholenumber(initial_dims) || initial_dims<=0) { stop("Incorrect initial dimensionality.")} # Apply PCA if (pca) { pca_result <- prcomp(X,retx=TRUE,center = pca_center, scale. = pca_scale) X <- pca_result$x[,1:min(initial_dims,ncol(pca_result$x))] } if (check_duplicates){ if (any(duplicated(X))) { stop("Remove duplicates before running TSNE.") } } # Compute Squared distance if we are using exact TSNE if (theta==0.0) { X <- X^2 } # if (is.null(Y_init)) { # init <- FALSE # Y_init <- matrix() # } else { # init <- TRUE # } # msg <- capture.output( res <- Rtsne_cpp(X, dims, perplexity, theta,num_threads, max_iter # , is_distance, Y_init, init,stop_lying_iter, mom_switch_iter, momentum, final_momentum, eta, exaggeration_factor ) ) if(verbose) print(msg) res } #' @describeIn Rtsne.multicore tsne on data.frame #' @export Rtsne.multicore.data.frame <- function(X,...) { X <- model.matrix(~.-1,X) Rtsne.multicore(X, ...) }
library(emmeans) ### Name: xtable.emmGrid ### Title: Using 'xtable' for EMMs ### Aliases: xtable.emmGrid xtable.summary_emm print.xtable_emm ### ** Examples pigsint.lm <- lm(log(conc) ~ source * factor(percent), data = pigs) pigsint.emm <- emmeans(pigsint.lm, ~ percent | source) xtable::xtable(pigsint.emm, type = "response")
/data/genthat_extracted_code/emmeans/examples/xtable.emmGrid.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
334
r
library(emmeans) ### Name: xtable.emmGrid ### Title: Using 'xtable' for EMMs ### Aliases: xtable.emmGrid xtable.summary_emm print.xtable_emm ### ** Examples pigsint.lm <- lm(log(conc) ~ source * factor(percent), data = pigs) pigsint.emm <- emmeans(pigsint.lm, ~ percent | source) xtable::xtable(pigsint.emm, type = "response")
## This programme caches the inverse of a matrix ## This function creates a special "matrix" object that can cache its inverse. makeCacheMatrix <- function(x = matrix()) { s <- NULL set <- function(y) { x <<- y s <<- NULL } get <- function() x setinverse <- function(solve) s <<- solve getinverse <- function() s list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. ## If the inverse has already been calculated (and the matrix has not changed), ## then the cachesolve should retrieve the inverse from the cache. cachesolve <- function(x, ...) { s <- x$getinverse() if(!is.null(s)) { message("getting cached data") return(s) } data <- x$get() s <- solve(data, ...) x$setinverse(s) s } ## Return a matrix that is the inverse of 'x'
/cachematrix.R
no_license
Tuckerman2/ProgrammingAssignment2
R
false
false
931
r
## This programme caches the inverse of a matrix ## This function creates a special "matrix" object that can cache its inverse. makeCacheMatrix <- function(x = matrix()) { s <- NULL set <- function(y) { x <<- y s <<- NULL } get <- function() x setinverse <- function(solve) s <<- solve getinverse <- function() s list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. ## If the inverse has already been calculated (and the matrix has not changed), ## then the cachesolve should retrieve the inverse from the cache. cachesolve <- function(x, ...) { s <- x$getinverse() if(!is.null(s)) { message("getting cached data") return(s) } data <- x$get() s <- solve(data, ...) x$setinverse(s) s } ## Return a matrix that is the inverse of 'x'
associationsToDiamondPlotDf <- function(dat, covariates, criterion, labels = NULL, decreasing=NULL, conf.level=.95, esMetric = 'r') { if (is.null(labels)) labels <- covariates; assocMatrix <- associationMatrix(dat, x=covariates, y=criterion); resDf <- data.frame(lo = as.numeric(assocMatrix$output$raw$ci.lo), es = as.numeric(assocMatrix$output$raw$es), hi = as.numeric(assocMatrix$output$raw$ci.hi)); if (esMetric == 'r') { resDf <- data.frame(matrix(sapply(1:length(covariates), function(i) { if (assocMatrix$output$raw$esType[i] == 'd') { return(convert.d.to.r(resDf[i, ])); } else if ((assocMatrix$output$raw$esType[i] == 'etasq') || (assocMatrix$output$raw$esType[i] == 'omegasq')) { return(sqrt(resDf[i, ])); } else { return(resDf[i, ]); } }), ncol=3, byrow=TRUE)); } else if (esMetric == 'd' | esMetric == 'g') { resDf <- data.frame(matrix(sapply(1:length(covariates), function(i) { if (assocMatrix$output$raw$esType[i] == 'r' | assocMatrix$output$raw$esType[i] == 'v') { return(convert.r.to.d(resDf[i, ])); } else if ((assocMatrix$output$raw$esType[i] == 'etasq') || (assocMatrix$output$raw$esType[i] == 'omegasq')) { return(convert.r.to.d(sqrt(resDf[i, ]))); } else { return(resDf[i, ]); } }), ncol=3, byrow=TRUE)); } else { stop("No other effect size metrics implemented yet!"); } names(resDf) <- c('lo', 'es', 'hi'); resDf$label <- labels; resDf$rownr <- 1:nrow(resDf); resDf$constant <- 1; if (!is.null(decreasing)) { ### Invert 'decreasing' because ggplot plots the lowest/first value first (near the origin). ### So a decreasing sort would normally result in higher means being displayed LOWER in ### the plot, which is counter-intuitive, hence the inversion. sortedByMean <- order(unlist(resDf$es), decreasing=!decreasing); resDf <- resDf[sortedByMean, ]; labels <- labels[sortedByMean]; } else { ### sortedByMean is used later on to organise the raw data; therefore, this should ### reflect the order of the variables on the Y axis regardless of whether they're ### reorganised sortedByMean <- 1:length(labels); } ### Return this vector as attribute to use in meansDiamondPlot attr(resDf, 'sortedByMean') <- sortedByMean; return(resDf); }
/R/associationsToDiamondPlotDf.R
no_license
DBoegner/userfriendlyscience
R
false
false
2,657
r
associationsToDiamondPlotDf <- function(dat, covariates, criterion, labels = NULL, decreasing=NULL, conf.level=.95, esMetric = 'r') { if (is.null(labels)) labels <- covariates; assocMatrix <- associationMatrix(dat, x=covariates, y=criterion); resDf <- data.frame(lo = as.numeric(assocMatrix$output$raw$ci.lo), es = as.numeric(assocMatrix$output$raw$es), hi = as.numeric(assocMatrix$output$raw$ci.hi)); if (esMetric == 'r') { resDf <- data.frame(matrix(sapply(1:length(covariates), function(i) { if (assocMatrix$output$raw$esType[i] == 'd') { return(convert.d.to.r(resDf[i, ])); } else if ((assocMatrix$output$raw$esType[i] == 'etasq') || (assocMatrix$output$raw$esType[i] == 'omegasq')) { return(sqrt(resDf[i, ])); } else { return(resDf[i, ]); } }), ncol=3, byrow=TRUE)); } else if (esMetric == 'd' | esMetric == 'g') { resDf <- data.frame(matrix(sapply(1:length(covariates), function(i) { if (assocMatrix$output$raw$esType[i] == 'r' | assocMatrix$output$raw$esType[i] == 'v') { return(convert.r.to.d(resDf[i, ])); } else if ((assocMatrix$output$raw$esType[i] == 'etasq') || (assocMatrix$output$raw$esType[i] == 'omegasq')) { return(convert.r.to.d(sqrt(resDf[i, ]))); } else { return(resDf[i, ]); } }), ncol=3, byrow=TRUE)); } else { stop("No other effect size metrics implemented yet!"); } names(resDf) <- c('lo', 'es', 'hi'); resDf$label <- labels; resDf$rownr <- 1:nrow(resDf); resDf$constant <- 1; if (!is.null(decreasing)) { ### Invert 'decreasing' because ggplot plots the lowest/first value first (near the origin). ### So a decreasing sort would normally result in higher means being displayed LOWER in ### the plot, which is counter-intuitive, hence the inversion. sortedByMean <- order(unlist(resDf$es), decreasing=!decreasing); resDf <- resDf[sortedByMean, ]; labels <- labels[sortedByMean]; } else { ### sortedByMean is used later on to organise the raw data; therefore, this should ### reflect the order of the variables on the Y axis regardless of whether they're ### reorganised sortedByMean <- 1:length(labels); } ### Return this vector as attribute to use in meansDiamondPlot attr(resDf, 'sortedByMean') <- sortedByMean; return(resDf); }
/lib/Carbon.framework/Versions/A/Frameworks/SecurityHI.framework/Versions/A/Headers/SecurityHI.r
no_license
oshanis/webnnel
R
false
false
547
r
library(ggplot2) library(dplyr) interleave <- function(v1,v2) { ord1 <- 2*(1:length(v1))-1 ord2 <- 2*(1:length(v2)) c(v1,v2)[order(c(ord1,ord2))] } boxes = data.frame(x=c( 1920/2 - 38 * c(1,2,3,4), 1920/2 + 38 * c(1,2,3,4)), y= 1080/2) boxCentres = c( 1920/2 - 38 * c(1,2,3,4), 1920/2 + 38 * c(1,2,3,4)) sqSize = 28 dat = read.csv("fixations.csv") timedat = read.csv("eventTimes.csv") for (tr in unique(dat$trial)) { tdat = filter(dat, trial==tr) ttime = filter(timedat, trial==tr) tdat$aoi = NaN for (b in 1:8) { tdat$aoi[which(abs(tdat$x -boxCentres[b])<20)] = b } print(tdat) if (1) { pts = data.frame(x=rep(tdat$x, 1, each=2), y=interleave(tdat$on, tdat$off)) plt = ggplot(tdat, aes(x=x, y=y)) + geom_point(aes(colour=timeInt)) + geom_path() plt = plt + scale_x_continuous(limits=c(1+600,1920-600)) + scale_y_continuous(limits=c(1080/2-100,1080/2+100)) plt = plt + coord_fixed() + geom_text(aes( label=aoi)) for (b in 1:nrow(boxes)) { box = data.frame( x = c(boxes$x[b]-sqSize/2, boxes$x[b]-sqSize/2, boxes$x[b]+sqSize/2, boxes$x[b]+sqSize/2, boxes$x[b]-sqSize/2), y = c(boxes$y[b]-sqSize/2, boxes$y[b]+sqSize/2, boxes$y[b]+sqSize/2, boxes$y[b]-sqSize/2, boxes$y[b]-sqSize/2)) plt = plt + geom_path(data=box, aes(x=x, y=y)) } # plt2 = ggplot(pts, aes(x=x, y=y)) + geom_path() # plt2 = plt2 + geom_hline(data=ttime, aes(yintercept = times, colour=events)) # plt2 = plt2 + geom_vline(xintercept=c( # 1920/2 - 38 * c(1,2,3,4), # 1920/2 + 38 * c(1,2,3,4)), linetype=2) # plt2 = plt2 + geom_text() ggsave(paste('scnpth', tr, '.png', sep="")) } } plt2
/analysis/check_fix.R
no_license
Riadsala/RemappedCrowding
R
false
false
1,635
r
library(ggplot2) library(dplyr) interleave <- function(v1,v2) { ord1 <- 2*(1:length(v1))-1 ord2 <- 2*(1:length(v2)) c(v1,v2)[order(c(ord1,ord2))] } boxes = data.frame(x=c( 1920/2 - 38 * c(1,2,3,4), 1920/2 + 38 * c(1,2,3,4)), y= 1080/2) boxCentres = c( 1920/2 - 38 * c(1,2,3,4), 1920/2 + 38 * c(1,2,3,4)) sqSize = 28 dat = read.csv("fixations.csv") timedat = read.csv("eventTimes.csv") for (tr in unique(dat$trial)) { tdat = filter(dat, trial==tr) ttime = filter(timedat, trial==tr) tdat$aoi = NaN for (b in 1:8) { tdat$aoi[which(abs(tdat$x -boxCentres[b])<20)] = b } print(tdat) if (1) { pts = data.frame(x=rep(tdat$x, 1, each=2), y=interleave(tdat$on, tdat$off)) plt = ggplot(tdat, aes(x=x, y=y)) + geom_point(aes(colour=timeInt)) + geom_path() plt = plt + scale_x_continuous(limits=c(1+600,1920-600)) + scale_y_continuous(limits=c(1080/2-100,1080/2+100)) plt = plt + coord_fixed() + geom_text(aes( label=aoi)) for (b in 1:nrow(boxes)) { box = data.frame( x = c(boxes$x[b]-sqSize/2, boxes$x[b]-sqSize/2, boxes$x[b]+sqSize/2, boxes$x[b]+sqSize/2, boxes$x[b]-sqSize/2), y = c(boxes$y[b]-sqSize/2, boxes$y[b]+sqSize/2, boxes$y[b]+sqSize/2, boxes$y[b]-sqSize/2, boxes$y[b]-sqSize/2)) plt = plt + geom_path(data=box, aes(x=x, y=y)) } # plt2 = ggplot(pts, aes(x=x, y=y)) + geom_path() # plt2 = plt2 + geom_hline(data=ttime, aes(yintercept = times, colour=events)) # plt2 = plt2 + geom_vline(xintercept=c( # 1920/2 - 38 * c(1,2,3,4), # 1920/2 + 38 * c(1,2,3,4)), linetype=2) # plt2 = plt2 + geom_text() ggsave(paste('scnpth', tr, '.png', sep="")) } } plt2
library(testthat) library(Rssa) test_check("Rssa")
/tests/testthat.R
no_license
asl/rssa
R
false
false
52
r
library(testthat) library(Rssa) test_check("Rssa")
# DESCRIPTION # This file contains essential functions for the "draw_ifrs.R" file #' @title Compute PDF or CDF of delay #' @description Compute PDF or CDF of lognormal delays #' #' @param lmu mean on the logscale #' @param lsigma sd on the logscale #' #' @return vector of either probability densities or probabilities #' computeDelay <- function(lmu, lsigma, nmax, dt = 1, start = dt/2, pdf = T) { if (pdf) { res <- dlnorm(seq(start, nmax, by = dt), meanlog = lmu, sdlog = lsigma) } else { res <- plnorm(seq(start, nmax, by = dt), meanlog = lmu, sdlog = lsigma) } return(res) } #' @title Get time delay PDF or CDF #' @description Get the PDF or CDF of the delay distributions assuming a log-normal #' #' @param delay_params Parameters of the log-normal distribution #' @param delay Name of the delay #' @param nmax Max number of timepoints, here using timestep dt #' @param dt timestep in days along which to compute the densities #' @param start firts timepoint to use #' @param pdf boolean, whether to return the PDF or the CDF #' @param rnd Whether to draw distribution parameter values #' #' @return vector of either probability or probability density values #' getDelay <- function(delay_params = NULL, delay = NULL, nmax, dt = 1, start = dt/2, pdf = T, rnd = F) { if (!(delay %in% delay_params$delay)) stop("Delay name not known") ind <- which(delay_params$delay == delay) if (!rnd) { # Set lognorm parameters mu and sigma on the logscale lmu <- delay_params$logmu[ind] lsigma <- delay_params$logsigma[ind] } else { # Set lognorm parameters mu and sigma on the logscale lmu <- truncnorm::rtruncnorm(1, a = 0, mean = delay_params$logmu[ind], sd = delay_params$logmu.sd[ind]) lsigma <- truncnorm::rtruncnorm(1, a = 0, mean = delay_params$logsigma[ind], sd = delay_params$logsigma.sd[ind]) } res <- computeDelay(lmu, lsigma, nmax, dt = dt, start = start, pdf = pdf) return(res) } #' @title Set Delay PMF #' @description Set the PMF of the delay distributions assuming a log-normal #' #' @param delay_params Parameters of the log-normal distribution #' @param delay Name of the delay #' @param nmax Max number of timepoints, here using a 1 day dt #' @param rnd Whether to draw distribution parameter values #' #' @details The daily PMF values are computed by taking the difference between the #' delay distribution's CDF at times t and t-1: PMF(t) = CDF(t+1) - CDF(t). #' #' @return vector of probability values #' setDelayPMF <- function(delay_params, delay, nmax, dt = 1, rnd = F) { if (!(delay %in% delay_params$delay)) stop("Delay name not known") ntot <- nmax/dt + 1 # total number of timesteps cdf <- getDelay(delay_params, delay, nmax, start = 0, pdf = F, rnd = rnd) prob_values <- cdf[2:ntot] - cdf[1:(ntot-1)] return(prob_values) } #' @title Compute Istar #' @description Function to compute the deconvolution of reported cases and the #' incubation period accounting for reporting delay. Returns an estimate of the #' cumulative number of infections up to a proportionality constant (Istar). #' #' @param cases time series of daily cumulative reported cases #' @param pdf_inc PMF of the incubation period #' @param pdf_report PMF of the reporting delay #' @param gamma the threshold value for bounding the inverse fourier transform values #' #' @return A vector of the values of Istar #' computeIstar <- function(cases, pdf_inc, pdf_report, gamma = .05) { nc <- length(cases) ni <- length(pdf_inc) nr <- length(pdf_report) # FFT convolution from `convolve` function in R # Pad pdfs ntot <- ni + nr - 1 pdf_inc2 <- c(pdf_inc, rep.int(0, ntot - ni)) pdf_report2 <- c(pdf_report, rep.int(0, ntot - nr)) ntot <- length(pdf_report2) F_pdf_comb <- fft(pdf_inc2) * fft(pdf_report2) pdf_comb <- Re(fft(F_pdf_comb, inverse = T))/ntot # Preserve sum(prob) = 1 pdf_comb <- pdf_comb/sum(pdf_comb) # Pad cases ntot2 <- nc + ntot - 1 cases2 <- c(cases, rep.int(0, ntot2 - nc)) eps <- 1e-10 pdf_comb2 <- c(pdf_comb, rep(0, ntot2 - ntot)) # fourier transform of convolution pdf F_cases <- fft(cases2) F_pdf_comb2 <- fft(pdf_comb2) # Water level regularization to prevent numerical instability # From https://cnx.org/resources/22c9f37591a06c51a0e632cc790ec83bcb853aa5/inverseFilter.m R <- F_pdf_comb2 R1 <- F_pdf_comb2*(abs(F_pdf_comb2)>0)+1/gamma*(abs(F_pdf_comb2)==0) iR <- 1/F_pdf_comb2 # invert the filter using threshold gamma G <- iR *((abs(R1)*gamma)>1)+gamma*abs(R1)*iR*((abs(R1)*gamma)<=1); Istar <- Re(fft(F_cases*G, inverse = T))[1:nc] # Sanity check for cumulative function Istar2 <- cummax(Istar) # # to get back to pdf # ntot <- length(pdf_comb2) # pdf_comb3 <- Re(fft(F_pdf_comb2, inverse = T))/ntot # # Preserve sum(prob) = 1 # pdf_comb3 <- pdf_comb3/sum(pdf_comb3) return(Istar2) } #' @title Compute Bayesian p-values #' @description Compute the Bayesian p-value that two vectors of posterior draws #' have different means. #' #' @param x vector of posterior draws of parameter to compare #' @param y vector of posterior draws of reference parameter #' @details the hypothesis that is tested is x = y using y - x = 0. #' #' @return p-value #' computePval <- function(x, y) { if (length(x) != length(y)) stop("Vectors need to be the same length") return(min(sum((y-x) > 0), sum((y-x) < 0))*2/length(x)) } #' @title Random initial values #' @description Produces random initial values for Stan's HMC #' #' @return list with initial parameter values #' rndInit <- function() { # Draw hyperprior parameters from priors phi <- rbeta(1, 1, 6.5) # median of prior ~ 0.1 lambda <- actuar::rpareto1(1, shape = 1.5, min = 0.1) # Draw IFR IFR <- runif(1, 1e-6, 1e-1) list(phi = phi, lambda = lambda, IFR = IFR) } #' @title Get SD #' @description Computes the sd assuming a normal based on the 95% CI #' #' @param mu mean #' @param q025 0.025 quantile #' @param q975 0.975 quantile #' #' @return the standard deviation #' getSD <- function(mu, q025, q975) { sd1 <- (mu - q025)/2 sd2 <- (q975 - mu)/2 return(mean(c(sd1, sd2))) } #' @title Log-norm mean #' @description Computes the mean of a lognormal distribution #' #' @param logmu mean on the logscale #' @param logsigma sd on the logscale #' #' @return the mean #' lnormMean <- function(logmu, logsigma) { exp(logmu + .5 * logsigma^2) } #' @title Log-norm sd #' @description Computes the sd of a lognormal distribution #' @param logmu mean on the logscale #' @param logsigma sd on the logscale #' @return the sd lnormSD <- function(logmu, logsigma) { sqrt((exp(logsigma^2) - 1) * exp(2 * logmu + logsigma^2)) } #' @title Get quantiles #' @description Computes the quantiles of a matrix of values #' #' @param mat matrix over which to compute quantiles #' #' @return dataframe of quantiles #' getQuantiles <- function(mat) { mat %>% as.data.frame() %>% mutate(sim = row_number()) %>% gather(var, value, -sim) %>% mutate(time = as.numeric(str_replace_all(var, "V", ""))) %>% group_by(sim) %>% arrange(time) %>% mutate(cumvalue = cumsum(value)) %>% group_by(time) %>% summarise(q025 = quantile(value, .025), q975 = quantile(value, .975), median = quantile(value, 0.5), mean = mean(value), cdf.q025 = quantile(cumvalue, .025), cdf.q975 = quantile(cumvalue, .975), cdf.median = quantile(cumvalue, 0.5), cdf.mean = mean(cumvalue)) } #' @title organize epidata #' @description subsets age-stratified cases and deaths from a dataset. Subsets #' based on death type - Confirmed deaths or Confirmed+Probable Deaths. Renames #' age classes based on whether we're adjusting the data #' #' #' @return data frame of cumulative cases and deaths #' getEpidata <- function(death_type, epidata_source) { if (death_type == "Confirmed"){ epidata <- read.csv(epidata_source) %>% filter(var != "death_cumul_comb") %>% select(-X) epidata["var"][epidata["var"] == "death_cumul_conf"] <- "death_cumul" } else if (death_type == "Combined"){ epidata <- read.csv(epidata_source) %>% filter(var != "death_cumul_conf") %>% select(-X) epidata["var"][epidata["var"] == "death_cumul_comb"] <- "death_cumul" } return(epidata) } #' @title get serosurvey draws for the 0-17 age class #' @description Extrapolates 0-17 serosurvey draws from a source without #' serodata for the 0-17 age class. Uses trend data from an external source #' to generate a ratio between the 0-17 age class and another age class - here, #' it's the 18-44 age class. Applies the ratio to the existing 18-44 data #' #' @return data frame of serovalues for age 0-17 #' get0to17 <- function(trend_source, n_post, nyc_draws, pop){ # SPECIAL CASE: Age class 0-17 # There is no 0-17 serodata available for New York City. However, there is # 0-17 case and death data available for New York City. We derived a scale of # 0-17:18-45 seroprevalence from Spain data (Pollan et al. 2020) and then # applied that scale to the 18-44 seroprevalence in NYC to obtain the 0-17 # seroprevalence for NYC. We used Pollan et al. 2020 because it is the # only sufficiently large study to include age-specific seroprevalence values # in our target range # set up the ratio vector - each serosurvey draw gets its own scaling vector ratio_draws_0to17 <- rep(0, n_post) trend_data <- read.csv(trend_source) age_sero_fit = lm(mean~age_midpoint+I(age_midpoint^2), data = trend_data) trend_data$fitted = fitted(age_sero_fit) trend_data$scale = trend_data$fitted/mean(trend_data$mean) # The trend data is organized by the midpoints of each age class for (i in 1:n_post){ # draws 1 value for each class between ages 0-17 of seroprevalence trend data nclasses_0to17 <- length(trend_data$age_class[trend_data$age_midpoint < 18]) trend_draws_0to17 <- rep(0, nclasses_0to17) for (j in 1:nclasses_0to17){ trend_draws_0to17[j] <- rnorm(1, mean = (trend_data[j,]$mean)/100, sd = (trend_data[j,]$UpperCI-trend_data[j,]$mean)/100) if (trend_draws_0to17[j] < 0 | is.na(trend_draws_0to17[j])){ trend_draws_0to17[j] <- 1e-6 } } # draws 1 value for each class between ages 18-44 of seroprevalence trend data nclasses_18to44 <- length(trend_data$age_class[trend_data$age_midpoint > 18 & trend_data$age_midpoint < 45]) trend_draws_18to44 <- rep(0, nclasses_18to44) for (j in (nclasses_0to17+1):(nclasses_0to17+nclasses_18to44)){ trend_draws_18to44[j-nclasses_0to17] <- rnorm(1, mean = (trend_data[j,]$mean)/100, sd = (trend_data[j,]$UpperCI-trend_data[j,]$mean)/100) if (trend_draws_18to44[j-nclasses_0to17] < 0 | is.na(trend_draws_18to44[j-nclasses_0to17])){ trend_draws_18to44[j-nclasses_0to17] <- 1e-6 } } # creates a single value for the 0-17:18-44 ratio out of the draws we # made for this posterior iteration ratio_draws_0to17[i] <- mean(trend_draws_0to17)/mean(trend_draws_18to44) } print(mean(ratio_draws_0to17)) # creates a 0-17 class from the ratio and the 18-44 seroprevalence draws from NYC draws_0to17 <- data.frame(age_class = "0-17", seropos = ratio_draws_0to17*nyc_draws, sim = 1:n_post, pop = pop) return(draws_0to17) }
/utils.R
no_license
ChloeRickards/sars-cov-2-ifr-nyc
R
false
false
12,045
r
# DESCRIPTION # This file contains essential functions for the "draw_ifrs.R" file #' @title Compute PDF or CDF of delay #' @description Compute PDF or CDF of lognormal delays #' #' @param lmu mean on the logscale #' @param lsigma sd on the logscale #' #' @return vector of either probability densities or probabilities #' computeDelay <- function(lmu, lsigma, nmax, dt = 1, start = dt/2, pdf = T) { if (pdf) { res <- dlnorm(seq(start, nmax, by = dt), meanlog = lmu, sdlog = lsigma) } else { res <- plnorm(seq(start, nmax, by = dt), meanlog = lmu, sdlog = lsigma) } return(res) } #' @title Get time delay PDF or CDF #' @description Get the PDF or CDF of the delay distributions assuming a log-normal #' #' @param delay_params Parameters of the log-normal distribution #' @param delay Name of the delay #' @param nmax Max number of timepoints, here using timestep dt #' @param dt timestep in days along which to compute the densities #' @param start firts timepoint to use #' @param pdf boolean, whether to return the PDF or the CDF #' @param rnd Whether to draw distribution parameter values #' #' @return vector of either probability or probability density values #' getDelay <- function(delay_params = NULL, delay = NULL, nmax, dt = 1, start = dt/2, pdf = T, rnd = F) { if (!(delay %in% delay_params$delay)) stop("Delay name not known") ind <- which(delay_params$delay == delay) if (!rnd) { # Set lognorm parameters mu and sigma on the logscale lmu <- delay_params$logmu[ind] lsigma <- delay_params$logsigma[ind] } else { # Set lognorm parameters mu and sigma on the logscale lmu <- truncnorm::rtruncnorm(1, a = 0, mean = delay_params$logmu[ind], sd = delay_params$logmu.sd[ind]) lsigma <- truncnorm::rtruncnorm(1, a = 0, mean = delay_params$logsigma[ind], sd = delay_params$logsigma.sd[ind]) } res <- computeDelay(lmu, lsigma, nmax, dt = dt, start = start, pdf = pdf) return(res) } #' @title Set Delay PMF #' @description Set the PMF of the delay distributions assuming a log-normal #' #' @param delay_params Parameters of the log-normal distribution #' @param delay Name of the delay #' @param nmax Max number of timepoints, here using a 1 day dt #' @param rnd Whether to draw distribution parameter values #' #' @details The daily PMF values are computed by taking the difference between the #' delay distribution's CDF at times t and t-1: PMF(t) = CDF(t+1) - CDF(t). #' #' @return vector of probability values #' setDelayPMF <- function(delay_params, delay, nmax, dt = 1, rnd = F) { if (!(delay %in% delay_params$delay)) stop("Delay name not known") ntot <- nmax/dt + 1 # total number of timesteps cdf <- getDelay(delay_params, delay, nmax, start = 0, pdf = F, rnd = rnd) prob_values <- cdf[2:ntot] - cdf[1:(ntot-1)] return(prob_values) } #' @title Compute Istar #' @description Function to compute the deconvolution of reported cases and the #' incubation period accounting for reporting delay. Returns an estimate of the #' cumulative number of infections up to a proportionality constant (Istar). #' #' @param cases time series of daily cumulative reported cases #' @param pdf_inc PMF of the incubation period #' @param pdf_report PMF of the reporting delay #' @param gamma the threshold value for bounding the inverse fourier transform values #' #' @return A vector of the values of Istar #' computeIstar <- function(cases, pdf_inc, pdf_report, gamma = .05) { nc <- length(cases) ni <- length(pdf_inc) nr <- length(pdf_report) # FFT convolution from `convolve` function in R # Pad pdfs ntot <- ni + nr - 1 pdf_inc2 <- c(pdf_inc, rep.int(0, ntot - ni)) pdf_report2 <- c(pdf_report, rep.int(0, ntot - nr)) ntot <- length(pdf_report2) F_pdf_comb <- fft(pdf_inc2) * fft(pdf_report2) pdf_comb <- Re(fft(F_pdf_comb, inverse = T))/ntot # Preserve sum(prob) = 1 pdf_comb <- pdf_comb/sum(pdf_comb) # Pad cases ntot2 <- nc + ntot - 1 cases2 <- c(cases, rep.int(0, ntot2 - nc)) eps <- 1e-10 pdf_comb2 <- c(pdf_comb, rep(0, ntot2 - ntot)) # fourier transform of convolution pdf F_cases <- fft(cases2) F_pdf_comb2 <- fft(pdf_comb2) # Water level regularization to prevent numerical instability # From https://cnx.org/resources/22c9f37591a06c51a0e632cc790ec83bcb853aa5/inverseFilter.m R <- F_pdf_comb2 R1 <- F_pdf_comb2*(abs(F_pdf_comb2)>0)+1/gamma*(abs(F_pdf_comb2)==0) iR <- 1/F_pdf_comb2 # invert the filter using threshold gamma G <- iR *((abs(R1)*gamma)>1)+gamma*abs(R1)*iR*((abs(R1)*gamma)<=1); Istar <- Re(fft(F_cases*G, inverse = T))[1:nc] # Sanity check for cumulative function Istar2 <- cummax(Istar) # # to get back to pdf # ntot <- length(pdf_comb2) # pdf_comb3 <- Re(fft(F_pdf_comb2, inverse = T))/ntot # # Preserve sum(prob) = 1 # pdf_comb3 <- pdf_comb3/sum(pdf_comb3) return(Istar2) } #' @title Compute Bayesian p-values #' @description Compute the Bayesian p-value that two vectors of posterior draws #' have different means. #' #' @param x vector of posterior draws of parameter to compare #' @param y vector of posterior draws of reference parameter #' @details the hypothesis that is tested is x = y using y - x = 0. #' #' @return p-value #' computePval <- function(x, y) { if (length(x) != length(y)) stop("Vectors need to be the same length") return(min(sum((y-x) > 0), sum((y-x) < 0))*2/length(x)) } #' @title Random initial values #' @description Produces random initial values for Stan's HMC #' #' @return list with initial parameter values #' rndInit <- function() { # Draw hyperprior parameters from priors phi <- rbeta(1, 1, 6.5) # median of prior ~ 0.1 lambda <- actuar::rpareto1(1, shape = 1.5, min = 0.1) # Draw IFR IFR <- runif(1, 1e-6, 1e-1) list(phi = phi, lambda = lambda, IFR = IFR) } #' @title Get SD #' @description Computes the sd assuming a normal based on the 95% CI #' #' @param mu mean #' @param q025 0.025 quantile #' @param q975 0.975 quantile #' #' @return the standard deviation #' getSD <- function(mu, q025, q975) { sd1 <- (mu - q025)/2 sd2 <- (q975 - mu)/2 return(mean(c(sd1, sd2))) } #' @title Log-norm mean #' @description Computes the mean of a lognormal distribution #' #' @param logmu mean on the logscale #' @param logsigma sd on the logscale #' #' @return the mean #' lnormMean <- function(logmu, logsigma) { exp(logmu + .5 * logsigma^2) } #' @title Log-norm sd #' @description Computes the sd of a lognormal distribution #' @param logmu mean on the logscale #' @param logsigma sd on the logscale #' @return the sd lnormSD <- function(logmu, logsigma) { sqrt((exp(logsigma^2) - 1) * exp(2 * logmu + logsigma^2)) } #' @title Get quantiles #' @description Computes the quantiles of a matrix of values #' #' @param mat matrix over which to compute quantiles #' #' @return dataframe of quantiles #' getQuantiles <- function(mat) { mat %>% as.data.frame() %>% mutate(sim = row_number()) %>% gather(var, value, -sim) %>% mutate(time = as.numeric(str_replace_all(var, "V", ""))) %>% group_by(sim) %>% arrange(time) %>% mutate(cumvalue = cumsum(value)) %>% group_by(time) %>% summarise(q025 = quantile(value, .025), q975 = quantile(value, .975), median = quantile(value, 0.5), mean = mean(value), cdf.q025 = quantile(cumvalue, .025), cdf.q975 = quantile(cumvalue, .975), cdf.median = quantile(cumvalue, 0.5), cdf.mean = mean(cumvalue)) } #' @title organize epidata #' @description subsets age-stratified cases and deaths from a dataset. Subsets #' based on death type - Confirmed deaths or Confirmed+Probable Deaths. Renames #' age classes based on whether we're adjusting the data #' #' #' @return data frame of cumulative cases and deaths #' getEpidata <- function(death_type, epidata_source) { if (death_type == "Confirmed"){ epidata <- read.csv(epidata_source) %>% filter(var != "death_cumul_comb") %>% select(-X) epidata["var"][epidata["var"] == "death_cumul_conf"] <- "death_cumul" } else if (death_type == "Combined"){ epidata <- read.csv(epidata_source) %>% filter(var != "death_cumul_conf") %>% select(-X) epidata["var"][epidata["var"] == "death_cumul_comb"] <- "death_cumul" } return(epidata) } #' @title get serosurvey draws for the 0-17 age class #' @description Extrapolates 0-17 serosurvey draws from a source without #' serodata for the 0-17 age class. Uses trend data from an external source #' to generate a ratio between the 0-17 age class and another age class - here, #' it's the 18-44 age class. Applies the ratio to the existing 18-44 data #' #' @return data frame of serovalues for age 0-17 #' get0to17 <- function(trend_source, n_post, nyc_draws, pop){ # SPECIAL CASE: Age class 0-17 # There is no 0-17 serodata available for New York City. However, there is # 0-17 case and death data available for New York City. We derived a scale of # 0-17:18-45 seroprevalence from Spain data (Pollan et al. 2020) and then # applied that scale to the 18-44 seroprevalence in NYC to obtain the 0-17 # seroprevalence for NYC. We used Pollan et al. 2020 because it is the # only sufficiently large study to include age-specific seroprevalence values # in our target range # set up the ratio vector - each serosurvey draw gets its own scaling vector ratio_draws_0to17 <- rep(0, n_post) trend_data <- read.csv(trend_source) age_sero_fit = lm(mean~age_midpoint+I(age_midpoint^2), data = trend_data) trend_data$fitted = fitted(age_sero_fit) trend_data$scale = trend_data$fitted/mean(trend_data$mean) # The trend data is organized by the midpoints of each age class for (i in 1:n_post){ # draws 1 value for each class between ages 0-17 of seroprevalence trend data nclasses_0to17 <- length(trend_data$age_class[trend_data$age_midpoint < 18]) trend_draws_0to17 <- rep(0, nclasses_0to17) for (j in 1:nclasses_0to17){ trend_draws_0to17[j] <- rnorm(1, mean = (trend_data[j,]$mean)/100, sd = (trend_data[j,]$UpperCI-trend_data[j,]$mean)/100) if (trend_draws_0to17[j] < 0 | is.na(trend_draws_0to17[j])){ trend_draws_0to17[j] <- 1e-6 } } # draws 1 value for each class between ages 18-44 of seroprevalence trend data nclasses_18to44 <- length(trend_data$age_class[trend_data$age_midpoint > 18 & trend_data$age_midpoint < 45]) trend_draws_18to44 <- rep(0, nclasses_18to44) for (j in (nclasses_0to17+1):(nclasses_0to17+nclasses_18to44)){ trend_draws_18to44[j-nclasses_0to17] <- rnorm(1, mean = (trend_data[j,]$mean)/100, sd = (trend_data[j,]$UpperCI-trend_data[j,]$mean)/100) if (trend_draws_18to44[j-nclasses_0to17] < 0 | is.na(trend_draws_18to44[j-nclasses_0to17])){ trend_draws_18to44[j-nclasses_0to17] <- 1e-6 } } # creates a single value for the 0-17:18-44 ratio out of the draws we # made for this posterior iteration ratio_draws_0to17[i] <- mean(trend_draws_0to17)/mean(trend_draws_18to44) } print(mean(ratio_draws_0to17)) # creates a 0-17 class from the ratio and the 18-44 seroprevalence draws from NYC draws_0to17 <- data.frame(age_class = "0-17", seropos = ratio_draws_0to17*nyc_draws, sim = 1:n_post, pop = pop) return(draws_0to17) }
# #DEPENDENCIES # # suppressMessages(library(RJDBC, quietly=FALSE)) #requires "./sqljdbc4.jar" suppressMessages(library(getPass, quietly=FALSE)) # #FUNCTIONS # # getRawDataAll = function(minYear, maxYear, save=F){ #minYear: start of modeled time period #maxYear: end of modeled time period # #value : a raw data.frame called from the data source # # #SQL # # writeLines('\n****** Query *******') #driver drv = JDBC('com.microsoft.sqlserver.jdbc.SQLServerDriver', './sqljdbc4.jar', identifier.quote="'"); #connection ch = dbConnect(drv, 'jdbc:sqlserver://161.55.237.17;databaseName=COMX', 'nick.grunloh', 'Nmfsswfsc!2018') #getPass('User:'), getPass('Password:'))# #port sample data query raw = dbGetQuery(ch, sprintf(" select master_clusts.sample_no as sampleNumber, clust_no as clusterNumber, rtrim(ltrim(species)) as species, weight as weight, DATEPART(yyyy, sample_date) as year, DATEPART(QUARTER, sample_date) as qtr, port_complex as portComplex, gear_grp as gearGroup, mark_cat as marketCategory, live_fish as live from master_samples inner join master_clusts ON master_samples.sample_no=master_clusts.sample_no where DATEPART(yyyy, sample_date) >= %d and DATEPART(yyyy, sample_date) <= %d and check_me='0' and live_fish='N' and gear_grp in ('HKL', 'TWL', 'FPT', 'NET', 'MDT') ", minYear, maxYear) ) #landings data lands = dbGetQuery(ch, sprintf(" select mark_cat as marketCategory, year as year, quarter as qtr, gear_grp as gearGroup, port_complex as portComplex, species as species, live as live, sum(pounds) as comLands FROM [COMX_DB].[dbo].[COM_LANDS] where year >= %d and year <= %d and live='N' and gear_grp in ('HKL', 'TWL', 'FPT', 'NET', 'MDT') group by mark_cat, year, quarter, gear_grp, port_complex, live, species ", minYear, maxYear) ) #merge raw = merge(raw, lands, by=c('species', 'year', 'qtr', 'portComplex', 'gearGroup', 'marketCategory', 'live'), all.x=T) raw$comLands[is.na(raw$comLands)] = 0 # #SAVE # # if( save ){ #save a local version of data for future reference write.csv(raw, sprintf('data%sTo%s_%s.csv', substring(minYear, 3, 4), substring(maxYear, 3, 4), Sys.Date()), row.names=F, quote=F ) #raw = read.csv('./data83to90.csv', header=T) } # writeLines('**** Complete ******\n') return( raw ) } # getRawData = function(mcat, minYear, maxYear, save=F){ #mcat : mcat number #minYear: start of modeled time period #maxYear: end of modeled time period # #value : a raw data.frame called from the data source # # #SQL # # writeLines('\n****** Query *******') #driver drv = JDBC('com.microsoft.sqlserver.jdbc.SQLServerDriver', './sqljdbc4.jar', identifier.quote="'"); #connection ch = dbConnect(drv, 'jdbc:sqlserver://128.114.3.187;databaseName=COMX_DB', 'nick.grunloh', 'Nmfsswfsc!2018') #getPass('User:'), getPass('Password:'))# #port sample data query raw = dbGetQuery(ch, sprintf(" select master_clusts.sample_no as sampleNumber, clust_no as clusterNumber, rtrim(ltrim(species)) as species, weight as weight, DATEPART(yyyy, sample_date) as year, DATEPART(QUARTER, sample_date) as qtr, port_complex as portComplex, gear_grp as gearGroup, mark_cat as marketCategory, live_fish as live from master_samples inner join master_clusts ON master_samples.sample_no=master_clusts.sample_no where DATEPART(yyyy, sample_date) >= %d and DATEPART(yyyy, sample_date) <= %d and mark_cat=%d and check_me='0' and live_fish='N' and gear_grp in ('HKL', 'TWL', 'FPT', 'NET', 'MDT') ", minYear, maxYear, mcat) ) #landings data lands = dbGetQuery(ch, sprintf(" select mark_cat as marketCategory, year as year, quarter as qtr, gear_grp as gearGroup, port_complex as portComplex, species as species, live as live, sum(pounds) as comLands FROM [COMX_DB].[dbo].[COM_LANDS] where year >= %d and year <= %d and mark_cat=%d and live='N' and gear_grp in ('HKL', 'TWL', 'FPT', 'NET', 'MDT') group by mark_cat, year, quarter, gear_grp, port_complex, live, species ", minYear, maxYear, mcat) ) #merge raw = merge(raw, lands, by=c('species', 'year', 'qtr', 'portComplex', 'gearGroup', 'marketCategory', 'live'), all.x=T) raw$comLands[is.na(raw$comLands)] = 0 # #SAVE # # if( save ){ #save a local version of data for future reference write.csv(raw, sprintf('%sdata%sTo%s_%s.csv', mcat, substring(minYear, 3, 4), substring(maxYear, 3, 4), Sys.Date()), row.names=F, quote=F ) #raw = read.csv('./data83to90.csv', header=T) } # writeLines('**** Complete ******\n') return( raw ) } makeD = function(sppGold, raw){ #raw : raw samples without and implied structure # #value : a list of augmented samples aggregated across clusters with multinomial implied structure filling in implied zeros for unobserved species writeLines('makeD...\n') # #PREP D # #print( aggregate(raw$comLands, by=list(raw$port), sum) ) #by = list( # id = raw$sampleNumber, # species = raw$species, # year = raw$year, # qtr = raw$qtr, # port = raw$portComplex, # gear = raw$gearGroup, # live = raw$live, # landing = raw$comLands #) #THIS AGGREGATEION DROP LBS #D = aggregate(raw$weight, by=by, FUN=sum) #print(tail(D)) #print( aggregate(D$comLands, by=list(D$port), sum) ) #data.frame(raw$year, raw$qtr, raw$portComplex, raw$gearGroup, raw$live, raw$comLands), by=list(raw$sampleNumber, raw$species), FUN=unique ) #D2 = aggregate( raw$weight, by=list(raw$sampleNumber, raw$species), FUN=sum) ##aggregate the categorical variables so we have one observation per species per sample (aggregate cluster samples) #D = aggregate( data.frame(raw$year, raw$qtr, raw$portComplex, raw$gearGroup, raw$live, raw$comLands), by=list(raw$sampleNumber, raw$species), FUN=unique ) ##aggregate add weights such that there is one weight per species per sample (aggregate cluster samples) #D = cbind(D, aggregate( raw$weight, by=list(raw$sampleNumber, raw$species), FUN=sum)[,3] ) #print( aggregate(D$raw.comLands, by=list(D$raw.portComplex), sum) ) #sum weight across cluster D = aggregate(raw$weight, raw[-which(colnames(raw)%in%c('clusterNumber', 'weight'))], sum) colnames(D) = c( 'species', 'year', 'qtr', 'port', 'gear', 'mcat', 'live', 'id', 'landing', 'weight' ) #sum of weights of cluster in each sample clustWeight = aggregate( raw$weight, by=list(id=raw$sampleNumber), FUN=sum ) #match up total sampled weight with species weights by ids D = merge(D, clustWeight, by='id') colnames(D)[colnames(D)=='x'] = 'nBB' # D$live = as.character(D$live) D$port = as.character(D$port) D$gear = as.character(D$gear) #TWL=TWL+MDT; 'HKL'; 'FPT'; 'NET' D$gear[D$gear=='MDT'] = 'TWL' # #ADD ZEROS # #its easier to fill D holes as a list D = as.list(D) #an index to grow D to fill holes end = length(D$id) #fill implied zeroes from unobserved species in the samples for(id in unique(D$id)){ # wid = which(D$id==id) # nBB = D$nBB[wid[1]] port = D$port[wid[1]] gear = D$gear[wid[1]] year = D$year[wid[1]] qtr = D$qtr[wid[1]] mcat = D$mcat[wid[1]] live = D$live[wid[1]] #landing = raw[Draw$port=='OSF' & Draw$gear=='TWL' & Draw$year==1980 & Draw$qtr==1 & raw$species=='CLPR','comLands']) #each sample should have at least a zero for each species for(sn in sppGold[!sppGold%in%D$species[wid]]){ # end = end + 1 # D$id[end] = id D$nBB[end] = nBB D$port[end] = port D$gear[end] = gear D$year[end] = year D$qtr[end] = qtr D$species[end] = sn D$mcat[end] = mcat D$live[end] = live D$landing[end] = max(0, raw[raw$port==port & raw$gear==gear & raw$year==year & raw$qtr==qtr & raw$species==sn,'comLands']) # D$weight[end] = 0 } } # D = as.data.frame(D) D = D[, c('id', 'mcat', 'live', 'year', 'qtr', 'port', 'gear', 'species', 'nBB', 'landing', 'weight')] # return( D ) } # addPredStrat = function(sppGold, portGold, gearGold, yearGold, qtrGold, D){ #sppGold : a list of gold standard species #portGold : a list of gold standard ports #gearGold : a list of gold standard gears #yearGold : a list of gold standard years #qtrGold : a list of gold standard qtrs #D : a list to augment # #value : an augmented list with unsampled strata added for use by the model to make predictions writeLines('addPredStrat...\n') #D is easyier to modify as a list D = as.list(D) D$port = as.character(D$port) D$gear = as.character(D$gear) D$species = as.character(D$species) #an index to grow D to fill holes end = length(D$id) #prediction sum cluster size fill = 100 #fill unsampled strata that are internal to the range of the data for prediction for(p in portGold){ for(g in gearGold){ for(q in qtrGold ){ for(y in yearGold){ for(s in sppGold ){ # wJoint = which( D$port==p & D$gear==g & D$year==y & D$qtr==q & D$species==s ) # if( length(wJoint)==0 ){ #data grows by a single row with 0 weight end = end + 1 # D$id[end] = NA D$weight[end] = NA D$nBB[end] = fill D$port[end] = p D$gear[end] = g D$year[end] = y D$qtr[end] = q D$species[end]= s D$live[end] = 'N' D$mcat[end] = D$mcat[1] D$landing[end]= NA } }}}} } # D = as.data.frame(D) D$port = as.character(D$port) D$gear = as.character(D$gear) D$species = as.character(D$species) return( D ) }
/try2/quickScripts/dataMatters/dataFunkAll.r
no_license
gasduster99/sppComp
R
false
false
11,272
r
# #DEPENDENCIES # # suppressMessages(library(RJDBC, quietly=FALSE)) #requires "./sqljdbc4.jar" suppressMessages(library(getPass, quietly=FALSE)) # #FUNCTIONS # # getRawDataAll = function(minYear, maxYear, save=F){ #minYear: start of modeled time period #maxYear: end of modeled time period # #value : a raw data.frame called from the data source # # #SQL # # writeLines('\n****** Query *******') #driver drv = JDBC('com.microsoft.sqlserver.jdbc.SQLServerDriver', './sqljdbc4.jar', identifier.quote="'"); #connection ch = dbConnect(drv, 'jdbc:sqlserver://161.55.237.17;databaseName=COMX', 'nick.grunloh', 'Nmfsswfsc!2018') #getPass('User:'), getPass('Password:'))# #port sample data query raw = dbGetQuery(ch, sprintf(" select master_clusts.sample_no as sampleNumber, clust_no as clusterNumber, rtrim(ltrim(species)) as species, weight as weight, DATEPART(yyyy, sample_date) as year, DATEPART(QUARTER, sample_date) as qtr, port_complex as portComplex, gear_grp as gearGroup, mark_cat as marketCategory, live_fish as live from master_samples inner join master_clusts ON master_samples.sample_no=master_clusts.sample_no where DATEPART(yyyy, sample_date) >= %d and DATEPART(yyyy, sample_date) <= %d and check_me='0' and live_fish='N' and gear_grp in ('HKL', 'TWL', 'FPT', 'NET', 'MDT') ", minYear, maxYear) ) #landings data lands = dbGetQuery(ch, sprintf(" select mark_cat as marketCategory, year as year, quarter as qtr, gear_grp as gearGroup, port_complex as portComplex, species as species, live as live, sum(pounds) as comLands FROM [COMX_DB].[dbo].[COM_LANDS] where year >= %d and year <= %d and live='N' and gear_grp in ('HKL', 'TWL', 'FPT', 'NET', 'MDT') group by mark_cat, year, quarter, gear_grp, port_complex, live, species ", minYear, maxYear) ) #merge raw = merge(raw, lands, by=c('species', 'year', 'qtr', 'portComplex', 'gearGroup', 'marketCategory', 'live'), all.x=T) raw$comLands[is.na(raw$comLands)] = 0 # #SAVE # # if( save ){ #save a local version of data for future reference write.csv(raw, sprintf('data%sTo%s_%s.csv', substring(minYear, 3, 4), substring(maxYear, 3, 4), Sys.Date()), row.names=F, quote=F ) #raw = read.csv('./data83to90.csv', header=T) } # writeLines('**** Complete ******\n') return( raw ) } # getRawData = function(mcat, minYear, maxYear, save=F){ #mcat : mcat number #minYear: start of modeled time period #maxYear: end of modeled time period # #value : a raw data.frame called from the data source # # #SQL # # writeLines('\n****** Query *******') #driver drv = JDBC('com.microsoft.sqlserver.jdbc.SQLServerDriver', './sqljdbc4.jar', identifier.quote="'"); #connection ch = dbConnect(drv, 'jdbc:sqlserver://128.114.3.187;databaseName=COMX_DB', 'nick.grunloh', 'Nmfsswfsc!2018') #getPass('User:'), getPass('Password:'))# #port sample data query raw = dbGetQuery(ch, sprintf(" select master_clusts.sample_no as sampleNumber, clust_no as clusterNumber, rtrim(ltrim(species)) as species, weight as weight, DATEPART(yyyy, sample_date) as year, DATEPART(QUARTER, sample_date) as qtr, port_complex as portComplex, gear_grp as gearGroup, mark_cat as marketCategory, live_fish as live from master_samples inner join master_clusts ON master_samples.sample_no=master_clusts.sample_no where DATEPART(yyyy, sample_date) >= %d and DATEPART(yyyy, sample_date) <= %d and mark_cat=%d and check_me='0' and live_fish='N' and gear_grp in ('HKL', 'TWL', 'FPT', 'NET', 'MDT') ", minYear, maxYear, mcat) ) #landings data lands = dbGetQuery(ch, sprintf(" select mark_cat as marketCategory, year as year, quarter as qtr, gear_grp as gearGroup, port_complex as portComplex, species as species, live as live, sum(pounds) as comLands FROM [COMX_DB].[dbo].[COM_LANDS] where year >= %d and year <= %d and mark_cat=%d and live='N' and gear_grp in ('HKL', 'TWL', 'FPT', 'NET', 'MDT') group by mark_cat, year, quarter, gear_grp, port_complex, live, species ", minYear, maxYear, mcat) ) #merge raw = merge(raw, lands, by=c('species', 'year', 'qtr', 'portComplex', 'gearGroup', 'marketCategory', 'live'), all.x=T) raw$comLands[is.na(raw$comLands)] = 0 # #SAVE # # if( save ){ #save a local version of data for future reference write.csv(raw, sprintf('%sdata%sTo%s_%s.csv', mcat, substring(minYear, 3, 4), substring(maxYear, 3, 4), Sys.Date()), row.names=F, quote=F ) #raw = read.csv('./data83to90.csv', header=T) } # writeLines('**** Complete ******\n') return( raw ) } makeD = function(sppGold, raw){ #raw : raw samples without and implied structure # #value : a list of augmented samples aggregated across clusters with multinomial implied structure filling in implied zeros for unobserved species writeLines('makeD...\n') # #PREP D # #print( aggregate(raw$comLands, by=list(raw$port), sum) ) #by = list( # id = raw$sampleNumber, # species = raw$species, # year = raw$year, # qtr = raw$qtr, # port = raw$portComplex, # gear = raw$gearGroup, # live = raw$live, # landing = raw$comLands #) #THIS AGGREGATEION DROP LBS #D = aggregate(raw$weight, by=by, FUN=sum) #print(tail(D)) #print( aggregate(D$comLands, by=list(D$port), sum) ) #data.frame(raw$year, raw$qtr, raw$portComplex, raw$gearGroup, raw$live, raw$comLands), by=list(raw$sampleNumber, raw$species), FUN=unique ) #D2 = aggregate( raw$weight, by=list(raw$sampleNumber, raw$species), FUN=sum) ##aggregate the categorical variables so we have one observation per species per sample (aggregate cluster samples) #D = aggregate( data.frame(raw$year, raw$qtr, raw$portComplex, raw$gearGroup, raw$live, raw$comLands), by=list(raw$sampleNumber, raw$species), FUN=unique ) ##aggregate add weights such that there is one weight per species per sample (aggregate cluster samples) #D = cbind(D, aggregate( raw$weight, by=list(raw$sampleNumber, raw$species), FUN=sum)[,3] ) #print( aggregate(D$raw.comLands, by=list(D$raw.portComplex), sum) ) #sum weight across cluster D = aggregate(raw$weight, raw[-which(colnames(raw)%in%c('clusterNumber', 'weight'))], sum) colnames(D) = c( 'species', 'year', 'qtr', 'port', 'gear', 'mcat', 'live', 'id', 'landing', 'weight' ) #sum of weights of cluster in each sample clustWeight = aggregate( raw$weight, by=list(id=raw$sampleNumber), FUN=sum ) #match up total sampled weight with species weights by ids D = merge(D, clustWeight, by='id') colnames(D)[colnames(D)=='x'] = 'nBB' # D$live = as.character(D$live) D$port = as.character(D$port) D$gear = as.character(D$gear) #TWL=TWL+MDT; 'HKL'; 'FPT'; 'NET' D$gear[D$gear=='MDT'] = 'TWL' # #ADD ZEROS # #its easier to fill D holes as a list D = as.list(D) #an index to grow D to fill holes end = length(D$id) #fill implied zeroes from unobserved species in the samples for(id in unique(D$id)){ # wid = which(D$id==id) # nBB = D$nBB[wid[1]] port = D$port[wid[1]] gear = D$gear[wid[1]] year = D$year[wid[1]] qtr = D$qtr[wid[1]] mcat = D$mcat[wid[1]] live = D$live[wid[1]] #landing = raw[Draw$port=='OSF' & Draw$gear=='TWL' & Draw$year==1980 & Draw$qtr==1 & raw$species=='CLPR','comLands']) #each sample should have at least a zero for each species for(sn in sppGold[!sppGold%in%D$species[wid]]){ # end = end + 1 # D$id[end] = id D$nBB[end] = nBB D$port[end] = port D$gear[end] = gear D$year[end] = year D$qtr[end] = qtr D$species[end] = sn D$mcat[end] = mcat D$live[end] = live D$landing[end] = max(0, raw[raw$port==port & raw$gear==gear & raw$year==year & raw$qtr==qtr & raw$species==sn,'comLands']) # D$weight[end] = 0 } } # D = as.data.frame(D) D = D[, c('id', 'mcat', 'live', 'year', 'qtr', 'port', 'gear', 'species', 'nBB', 'landing', 'weight')] # return( D ) } # addPredStrat = function(sppGold, portGold, gearGold, yearGold, qtrGold, D){ #sppGold : a list of gold standard species #portGold : a list of gold standard ports #gearGold : a list of gold standard gears #yearGold : a list of gold standard years #qtrGold : a list of gold standard qtrs #D : a list to augment # #value : an augmented list with unsampled strata added for use by the model to make predictions writeLines('addPredStrat...\n') #D is easyier to modify as a list D = as.list(D) D$port = as.character(D$port) D$gear = as.character(D$gear) D$species = as.character(D$species) #an index to grow D to fill holes end = length(D$id) #prediction sum cluster size fill = 100 #fill unsampled strata that are internal to the range of the data for prediction for(p in portGold){ for(g in gearGold){ for(q in qtrGold ){ for(y in yearGold){ for(s in sppGold ){ # wJoint = which( D$port==p & D$gear==g & D$year==y & D$qtr==q & D$species==s ) # if( length(wJoint)==0 ){ #data grows by a single row with 0 weight end = end + 1 # D$id[end] = NA D$weight[end] = NA D$nBB[end] = fill D$port[end] = p D$gear[end] = g D$year[end] = y D$qtr[end] = q D$species[end]= s D$live[end] = 'N' D$mcat[end] = D$mcat[1] D$landing[end]= NA } }}}} } # D = as.data.frame(D) D$port = as.character(D$port) D$gear = as.character(D$gear) D$species = as.character(D$species) return( D ) }
################################################## #Author : Pichai Raman #Date : 10/15/15 #Package : This package is for the creation #and display of ROC plots that are customizable #with GGplot. It is meant both for novice and expert #GGPlot & R users and has a command line component. ################################################## #Call libraries require("AUC"); require("ggplot2"); ############################################ #Main function to generate ROC Plots # #data : data frame, 1st column is metric, second column is label (0=False, 1=True) #diagCol : Color of the diagnomal line #roccol : Color of the ROC line #isDecreasing : by default the lower the metric the better, i.e. if you have a score of 1 its better than a score of 2 (i.e. p-values), if using fold change set to T #myTitle : title of the plot ############################################ rocon <- function(data, diagCol="black", roccol="red", isDecreasing=T, myTitle="ROC Curve") { #Removes all NA data <- na.omit(data); data[,2] <- as.numeric(as.character(data[,2])); pn <- nrow(subset(data, data[, 2] == 1)) fn <- nrow(data) - pn diag = data.frame(x = seq(0, 1, by = .01), y = seq(0, 1, by = .01)) data <- data[order(data[,1], decreasing=isDecreasing),]; x = 0 y = 0 for (i in 1:nrow(data)) { tpr <- sum(data[1:i,2])/pn; fpr <- (length(c(1:i))-sum(data[1:i,2]))/fn x <- c(x, fpr) y <- c(y, tpr) } if(isDecreasing==F) { data[,1] <- 1/data[,1]; } myAuc <- auc(roc(data[,1], factor(data[,2]))) #Create and return object rocdata <- data.frame(FPR = x, TPR = y, method="") legLabs <- paste("AUC = ", round(myAuc, 3), sep=""); p <- ggplot(data = rocdata, aes(x = FPR, y = TPR, color=method)) +geom_line(size=.5) + geom_line(data = diag, aes(x = x,y = y), color ="black")+theme_bw()+ labs(x = "False Positive Rate",y = "True Positive Rate", title = "ROC curve")+scale_color_discrete(labels=legLabs)+ theme(legend.title=element_blank()) return(list(rocdata[1:2], p, myAuc)); } ############################################ #Function to generate multiple ROC Plots # #data : data frame, 1st column is metric, second column is label (0=False, 1=True), 3rd is the method #diagCol : Color of the diagnomal line #isDecreasing : by default the lower the metric the better, i.e. if you have a score of 1 its better than a score of 2 (i.e. p-values), if using fold change set to T #myTitle : title of the plot ############################################ roconMult <- function(data, diagCol="black", colors=NULL, isDecreasing=T, myTitle="ROC Curve") { dataList <- split(data, f=data[,3]); diag = data.frame(x = seq(0, 1, by = .01), y = seq(0, 1, by = .01)) output <- lapply(dataList, FUN=rocon, isDecreasing=isDecreasing) dfAll <- data.frame(); allAUC <- data.frame(); legLabs <- c(); for(i in 1:length(output)) { dfAll <- rbind(dfAll, data.frame(output[[i]][1], method=as.character(names(output)[i]))); allAUC <- rbind(allAUC, data.frame(AUCValue=as.numeric(as.character(output[[i]][3])), method=names(output)[i])); legLabs <- c(legLabs, paste(method=names(output)[i], " : AUC = ", round(as.numeric(as.character(output[[i]][3])), 3), sep="")); } #Create and return object p <- ggplot(data = dfAll, aes(x = FPR, y = TPR, color=method)) +geom_line(size=.5) + geom_line(data = diag, aes(x = x,y = y), color ="black")+theme_bw()+ labs(x = "False Positive Rate",y = "True Positive Rate", title = myTitle)+scale_color_discrete(labels=legLabs)+ theme(legend.title=element_blank()) return(list(dfAll, p, allAUC)); }
/code/helper/rocon.R
no_license
PichaiRaman/PDACSurvivalAnalysis
R
false
false
3,728
r
################################################## #Author : Pichai Raman #Date : 10/15/15 #Package : This package is for the creation #and display of ROC plots that are customizable #with GGplot. It is meant both for novice and expert #GGPlot & R users and has a command line component. ################################################## #Call libraries require("AUC"); require("ggplot2"); ############################################ #Main function to generate ROC Plots # #data : data frame, 1st column is metric, second column is label (0=False, 1=True) #diagCol : Color of the diagnomal line #roccol : Color of the ROC line #isDecreasing : by default the lower the metric the better, i.e. if you have a score of 1 its better than a score of 2 (i.e. p-values), if using fold change set to T #myTitle : title of the plot ############################################ rocon <- function(data, diagCol="black", roccol="red", isDecreasing=T, myTitle="ROC Curve") { #Removes all NA data <- na.omit(data); data[,2] <- as.numeric(as.character(data[,2])); pn <- nrow(subset(data, data[, 2] == 1)) fn <- nrow(data) - pn diag = data.frame(x = seq(0, 1, by = .01), y = seq(0, 1, by = .01)) data <- data[order(data[,1], decreasing=isDecreasing),]; x = 0 y = 0 for (i in 1:nrow(data)) { tpr <- sum(data[1:i,2])/pn; fpr <- (length(c(1:i))-sum(data[1:i,2]))/fn x <- c(x, fpr) y <- c(y, tpr) } if(isDecreasing==F) { data[,1] <- 1/data[,1]; } myAuc <- auc(roc(data[,1], factor(data[,2]))) #Create and return object rocdata <- data.frame(FPR = x, TPR = y, method="") legLabs <- paste("AUC = ", round(myAuc, 3), sep=""); p <- ggplot(data = rocdata, aes(x = FPR, y = TPR, color=method)) +geom_line(size=.5) + geom_line(data = diag, aes(x = x,y = y), color ="black")+theme_bw()+ labs(x = "False Positive Rate",y = "True Positive Rate", title = "ROC curve")+scale_color_discrete(labels=legLabs)+ theme(legend.title=element_blank()) return(list(rocdata[1:2], p, myAuc)); } ############################################ #Function to generate multiple ROC Plots # #data : data frame, 1st column is metric, second column is label (0=False, 1=True), 3rd is the method #diagCol : Color of the diagnomal line #isDecreasing : by default the lower the metric the better, i.e. if you have a score of 1 its better than a score of 2 (i.e. p-values), if using fold change set to T #myTitle : title of the plot ############################################ roconMult <- function(data, diagCol="black", colors=NULL, isDecreasing=T, myTitle="ROC Curve") { dataList <- split(data, f=data[,3]); diag = data.frame(x = seq(0, 1, by = .01), y = seq(0, 1, by = .01)) output <- lapply(dataList, FUN=rocon, isDecreasing=isDecreasing) dfAll <- data.frame(); allAUC <- data.frame(); legLabs <- c(); for(i in 1:length(output)) { dfAll <- rbind(dfAll, data.frame(output[[i]][1], method=as.character(names(output)[i]))); allAUC <- rbind(allAUC, data.frame(AUCValue=as.numeric(as.character(output[[i]][3])), method=names(output)[i])); legLabs <- c(legLabs, paste(method=names(output)[i], " : AUC = ", round(as.numeric(as.character(output[[i]][3])), 3), sep="")); } #Create and return object p <- ggplot(data = dfAll, aes(x = FPR, y = TPR, color=method)) +geom_line(size=.5) + geom_line(data = diag, aes(x = x,y = y), color ="black")+theme_bw()+ labs(x = "False Positive Rate",y = "True Positive Rate", title = myTitle)+scale_color_discrete(labels=legLabs)+ theme(legend.title=element_blank()) return(list(dfAll, p, allAUC)); }
library(sp) load("nc_zips.rda") library(tidyverse) glimpse(shp@data) # Print the class of the data slot of shp class(shp@data) # Print GEOID10 shp@data$GEOID10 load("wealthiest_zips.rda") # Glimpse the nc_income data glimpse(nc_income) # Summarize the nc_income data summary(nc_income) # Left join nc_income onto shp@data and store in shp_nc_income shp_nc_income <- shp@data %>% left_join(nc_income, by = c("GEOID10" = "zipcode")) # Print the number of missing values of each variable in shp_nc_income shp_nc_income %>% summarize_all(funs(sum(is.na(.)))) shp %>% leaflet() %>% addTiles() %>% addPolygons() # map the polygons in shp shp %>% leaflet() %>% addTiles() %>% addPolygons() # which zips were not in the income data? shp_na <- shp[is.na(shp$mean_income),] # map the polygons in shp_na shp_na %>% leaflet() %>% addTiles() %>% addPolygons() # summarize the mean income variable summary(shp$mean_income) # subset shp to include only zip codes in the top quartile of mean income high_inc <- shp[!is.na(shp$mean_income) & shp$mean_income > 55917,] # map the boundaries of the zip codes in the top quartile of mean income high_inc %>% leaflet() %>% addTiles() %>% addPolygons() # create color palette with colorNumeric() nc_pal <- colorNumeric("YlGn", domain = high_inc@data$mean_income) high_inc %>% leaflet() %>% addTiles() %>% # set boundary thickness to 1 and color polygons addPolygons(weight = 1, col = ~nc_pal(mean_income), # add labels that display mean income label = ~paste0("Mean Income: ", dollar(mean_income)), # highlight polygons on hover highlight = highlightOptions(weight = 5, color = "white", bringToFront = TRUE)) # Use the log function to create a new version of nc_pal nc_pal <- colorNumeric("YlGn", domain = log(high_inc@data$mean_income)) # comment out the map tile high_inc %>% leaflet() %>% #addProviderTiles("CartoDB") %>% # apply the new nc_pal to the map addPolygons(weight = 1, color = ~nc_pal(log(mean_income)), fillOpacity = 1, label = ~paste0("Mean Income: ", dollar(mean_income)), highlightOptions = highlightOptions(weight = 5, color = "white", bringToFront = TRUE)) # plot zip codes with mean incomes >= $200k wealthy_zips %>% leaflet() %>% addProviderTiles("CartoDB") %>% # set color to green and create Wealth Zipcodes group addPolygons(weight = 1, fillOpacity = .7, color = "green", group = "Wealthy Zipcodes", label = ~paste0("Mean Income: ", dollar(mean_income)), highlightOptions = highlightOptions(weight = 5, color = "white", bringToFront = TRUE)) # Add polygons using wealthy_zips final_map <- m4 %>% addPolygons(data = wealthy_zips, weight = 1, fillOpacity = .5, color = "Grey", group = "Wealthy Zip Codes", label = ~paste0("Mean Income: ", dollar(mean_income)), highlightOptions = highlightOptions(weight = 5, color = "white", bringToFront = TRUE)) %>% # Update layer controls including "Wealthy Zip Codes" addLayersControl(baseGroups = c("OSM", "Carto", "Esri"), overlayGroups = c("Public", "Private", "For-Profit", "Wealthy Zip Codes")) # Print and explore your very last map of the course! final_map
/R/interactive_maps_leaflet/ex4.R
no_license
m-mburu/data_camp
R
false
false
3,513
r
library(sp) load("nc_zips.rda") library(tidyverse) glimpse(shp@data) # Print the class of the data slot of shp class(shp@data) # Print GEOID10 shp@data$GEOID10 load("wealthiest_zips.rda") # Glimpse the nc_income data glimpse(nc_income) # Summarize the nc_income data summary(nc_income) # Left join nc_income onto shp@data and store in shp_nc_income shp_nc_income <- shp@data %>% left_join(nc_income, by = c("GEOID10" = "zipcode")) # Print the number of missing values of each variable in shp_nc_income shp_nc_income %>% summarize_all(funs(sum(is.na(.)))) shp %>% leaflet() %>% addTiles() %>% addPolygons() # map the polygons in shp shp %>% leaflet() %>% addTiles() %>% addPolygons() # which zips were not in the income data? shp_na <- shp[is.na(shp$mean_income),] # map the polygons in shp_na shp_na %>% leaflet() %>% addTiles() %>% addPolygons() # summarize the mean income variable summary(shp$mean_income) # subset shp to include only zip codes in the top quartile of mean income high_inc <- shp[!is.na(shp$mean_income) & shp$mean_income > 55917,] # map the boundaries of the zip codes in the top quartile of mean income high_inc %>% leaflet() %>% addTiles() %>% addPolygons() # create color palette with colorNumeric() nc_pal <- colorNumeric("YlGn", domain = high_inc@data$mean_income) high_inc %>% leaflet() %>% addTiles() %>% # set boundary thickness to 1 and color polygons addPolygons(weight = 1, col = ~nc_pal(mean_income), # add labels that display mean income label = ~paste0("Mean Income: ", dollar(mean_income)), # highlight polygons on hover highlight = highlightOptions(weight = 5, color = "white", bringToFront = TRUE)) # Use the log function to create a new version of nc_pal nc_pal <- colorNumeric("YlGn", domain = log(high_inc@data$mean_income)) # comment out the map tile high_inc %>% leaflet() %>% #addProviderTiles("CartoDB") %>% # apply the new nc_pal to the map addPolygons(weight = 1, color = ~nc_pal(log(mean_income)), fillOpacity = 1, label = ~paste0("Mean Income: ", dollar(mean_income)), highlightOptions = highlightOptions(weight = 5, color = "white", bringToFront = TRUE)) # plot zip codes with mean incomes >= $200k wealthy_zips %>% leaflet() %>% addProviderTiles("CartoDB") %>% # set color to green and create Wealth Zipcodes group addPolygons(weight = 1, fillOpacity = .7, color = "green", group = "Wealthy Zipcodes", label = ~paste0("Mean Income: ", dollar(mean_income)), highlightOptions = highlightOptions(weight = 5, color = "white", bringToFront = TRUE)) # Add polygons using wealthy_zips final_map <- m4 %>% addPolygons(data = wealthy_zips, weight = 1, fillOpacity = .5, color = "Grey", group = "Wealthy Zip Codes", label = ~paste0("Mean Income: ", dollar(mean_income)), highlightOptions = highlightOptions(weight = 5, color = "white", bringToFront = TRUE)) %>% # Update layer controls including "Wealthy Zip Codes" addLayersControl(baseGroups = c("OSM", "Carto", "Esri"), overlayGroups = c("Public", "Private", "For-Profit", "Wealthy Zip Codes")) # Print and explore your very last map of the course! final_map
# this script enables to easily remove log entries in incremental mode. # if we want to rerun/overwrite previous data library(magrittr) logFolder <- "D:\\temp\\outputFolder\\packageMode" diagnosticsFileName <- "CreatedDiagnostics.csv" listFiles <- list.files( path = logFolder, pattern = diagnosticsFileName, full.names = TRUE, recursive = TRUE ) # "getCohortCounts", "runInclusionStatistics", "runIncludedSourceConcepts", # "runBreakdownIndexEvents", "runOrphanConcepts", # "runVisitContext", "runIncidenceRate", "runCohortOverlap","runCohortAsFeatures", # "runTemporalCohortCharacterization" # tasksToRemove <- c("runTimeSeries") for (i in (1:length(listFiles))) { readr::read_csv( file = listFiles[[i]], col_types = readr::cols(), guess_max = min(1e7) ) %>% dplyr::filter(!.data$task %in% tasksToRemove) %>% readr::write_excel_csv(file = listFiles[[i]]) }
/extras/EditIncrementalLogDiagnostics.R
permissive
gowthamrao/CohortDiagnostics
R
false
false
916
r
# this script enables to easily remove log entries in incremental mode. # if we want to rerun/overwrite previous data library(magrittr) logFolder <- "D:\\temp\\outputFolder\\packageMode" diagnosticsFileName <- "CreatedDiagnostics.csv" listFiles <- list.files( path = logFolder, pattern = diagnosticsFileName, full.names = TRUE, recursive = TRUE ) # "getCohortCounts", "runInclusionStatistics", "runIncludedSourceConcepts", # "runBreakdownIndexEvents", "runOrphanConcepts", # "runVisitContext", "runIncidenceRate", "runCohortOverlap","runCohortAsFeatures", # "runTemporalCohortCharacterization" # tasksToRemove <- c("runTimeSeries") for (i in (1:length(listFiles))) { readr::read_csv( file = listFiles[[i]], col_types = readr::cols(), guess_max = min(1e7) ) %>% dplyr::filter(!.data$task %in% tasksToRemove) %>% readr::write_excel_csv(file = listFiles[[i]]) }
#Wilcox Test data = read.table("ttestdata.txt") library(car) leveneTest(data$V3~factor(data$V2)) #checking for Homogeneity of Variance #Test is significant, thus indicating a violation of the assumption t.test(data$V3~factor(data$V2)) #Test is significant, but is inappropriate due to the failure of assumption wilcox.test(data$V3~factor(data$V2)) #Wilcoxon Signed Rank Test data = read.table("ttestdata2.txt") data1 = data$V3[data$V2 == 1] data2 = data$V3[data$V2 == 2] differences = data2 - data1 hist(differences) #The resulting histogram looks normal, so can use a dependent samples ttest t.test(data$V3~factor(data$V2), paired = TRUE) #But, if you were concerned about a violation of the assumption you could use the Wilcoxon Rank test wilcox.test(data$V3~factor(data$V2), paired = TRUE) #Multiple Groups data = read.table("anovadata.txt") group = factor(data$V2) dv = data$V3 boxplot(dv~group) library(car) leveneTest(dv~group) #Definitely non-parametric #Non-parametric ANOVA kruskal.test(dv~group) #Non-parametric Post-Hoc library(pgirmess) kruskalmc(dv~group)#Shows that all the groups are difference (significance = TRUE) #Non Parametric Equivalent of RM ANOVA friedman.test(as.matrix(data)) #Post Hoc friedmanmc(as.matrix(data))
/Assignment 9.R
no_license
JulietRowe/Stats
R
false
false
1,254
r
#Wilcox Test data = read.table("ttestdata.txt") library(car) leveneTest(data$V3~factor(data$V2)) #checking for Homogeneity of Variance #Test is significant, thus indicating a violation of the assumption t.test(data$V3~factor(data$V2)) #Test is significant, but is inappropriate due to the failure of assumption wilcox.test(data$V3~factor(data$V2)) #Wilcoxon Signed Rank Test data = read.table("ttestdata2.txt") data1 = data$V3[data$V2 == 1] data2 = data$V3[data$V2 == 2] differences = data2 - data1 hist(differences) #The resulting histogram looks normal, so can use a dependent samples ttest t.test(data$V3~factor(data$V2), paired = TRUE) #But, if you were concerned about a violation of the assumption you could use the Wilcoxon Rank test wilcox.test(data$V3~factor(data$V2), paired = TRUE) #Multiple Groups data = read.table("anovadata.txt") group = factor(data$V2) dv = data$V3 boxplot(dv~group) library(car) leveneTest(dv~group) #Definitely non-parametric #Non-parametric ANOVA kruskal.test(dv~group) #Non-parametric Post-Hoc library(pgirmess) kruskalmc(dv~group)#Shows that all the groups are difference (significance = TRUE) #Non Parametric Equivalent of RM ANOVA friedman.test(as.matrix(data)) #Post Hoc friedmanmc(as.matrix(data))
\name{cgarchfilter-methods} \docType{methods} \alias{cgarchfilter} \alias{cgarchfilter,ANY-method} \alias{cgarchfilter,cGARCHspec-method} \title{function: Copula-GARCH Filter} \description{ Method for creating a Copula-GARCH filter object. } \usage{ cgarchfilter(spec, data, out.sample = 0, filter.control = list(n.old = NULL), spd.control = list(lower = 0.1, upper = 0.9, type = "pwm", kernel = "epanech"), cluster = NULL, varcoef = NULL, realizedVol = NULL, ...) } \arguments{ \item{spec}{ A \code{\linkS4class{cGARCHspec}} object created by calling \code{\link{cgarchspec}} with fixed parameters for the coeffficients.} \item{data}{ A multivariate xts data object or one which can be coerced to such.} \item{out.sample}{ A positive integer indicating the number of periods before the last to keep for out of sample forecasting.} \item{filter.control}{Control arguments passed to the filtering routine (see note below).} \item{cluster}{ A cluster object created by calling \code{makeCluster} from the parallel package. If it is not NULL, then this will be used for parallel estimation (remember to stop the cluster on completion).} \item{spd.control}{ If the spd transformation was chosen in the specification, the spd.control passes its arguments to the \code{spdfit} routine of the \code{spd} package.} \item{varcoef}{ If a VAR model was chosen, then this is the VAR coefficient matrix which must be supplied. No checks are done on its dimension or correctness so it is up to the user to perform the appropriate checks.} \item{realizedVol}{ Required xts matrix for the realGARCH model.} \item{...}{ . } } \value{ A \code{\linkS4class{cGARCHfilter}} object containing details of the Copula-GARCH filter and sharing most of the methods of the \code{\linkS4class{cGARCHfit}} class. } \note{ The \sQuote{n.old} option in the \code{filter.control} argument is key in replicating conditions of the original fit. That is, if you want to filter a dataset consisting of an expanded dataset (versus the original used in fitting), but want to use the same assumptions as the original dataset then the \sQuote{n.old} argument denoting the original number of data points passed to the \code{\link{cgarchfit}} function must be provided. This is then used to ensure that some calculations which make use of the full dataset (unconditional starting values for the garch filtering, the dcc model and the copula transformation methods) only use the first \sQuote{n.old} points thus replicating the original conditions making filtering appropriate for rolling 1-ahead forecasting.\cr For extensive examples look in the \sQuote{rmgarch.tests} folder. } \author{Alexios Galanos} \keyword{methods}
/man/cgarchfilter-methods.Rd
no_license
cran/rmgarch
R
false
false
2,799
rd
\name{cgarchfilter-methods} \docType{methods} \alias{cgarchfilter} \alias{cgarchfilter,ANY-method} \alias{cgarchfilter,cGARCHspec-method} \title{function: Copula-GARCH Filter} \description{ Method for creating a Copula-GARCH filter object. } \usage{ cgarchfilter(spec, data, out.sample = 0, filter.control = list(n.old = NULL), spd.control = list(lower = 0.1, upper = 0.9, type = "pwm", kernel = "epanech"), cluster = NULL, varcoef = NULL, realizedVol = NULL, ...) } \arguments{ \item{spec}{ A \code{\linkS4class{cGARCHspec}} object created by calling \code{\link{cgarchspec}} with fixed parameters for the coeffficients.} \item{data}{ A multivariate xts data object or one which can be coerced to such.} \item{out.sample}{ A positive integer indicating the number of periods before the last to keep for out of sample forecasting.} \item{filter.control}{Control arguments passed to the filtering routine (see note below).} \item{cluster}{ A cluster object created by calling \code{makeCluster} from the parallel package. If it is not NULL, then this will be used for parallel estimation (remember to stop the cluster on completion).} \item{spd.control}{ If the spd transformation was chosen in the specification, the spd.control passes its arguments to the \code{spdfit} routine of the \code{spd} package.} \item{varcoef}{ If a VAR model was chosen, then this is the VAR coefficient matrix which must be supplied. No checks are done on its dimension or correctness so it is up to the user to perform the appropriate checks.} \item{realizedVol}{ Required xts matrix for the realGARCH model.} \item{...}{ . } } \value{ A \code{\linkS4class{cGARCHfilter}} object containing details of the Copula-GARCH filter and sharing most of the methods of the \code{\linkS4class{cGARCHfit}} class. } \note{ The \sQuote{n.old} option in the \code{filter.control} argument is key in replicating conditions of the original fit. That is, if you want to filter a dataset consisting of an expanded dataset (versus the original used in fitting), but want to use the same assumptions as the original dataset then the \sQuote{n.old} argument denoting the original number of data points passed to the \code{\link{cgarchfit}} function must be provided. This is then used to ensure that some calculations which make use of the full dataset (unconditional starting values for the garch filtering, the dcc model and the copula transformation methods) only use the first \sQuote{n.old} points thus replicating the original conditions making filtering appropriate for rolling 1-ahead forecasting.\cr For extensive examples look in the \sQuote{rmgarch.tests} folder. } \author{Alexios Galanos} \keyword{methods}
# Define UI for data upload app ---- ui <- fluidPage( # App title ---- titlePanel(" Fake News Detector"), # Sidebar layout with input and output definitions ---- sidebarLayout( # Sidebar panel for inputs ---- sidebarPanel( #text input textInput(inputId = "text", label = "Text input" ), # Horizontal line ---- tags$hr(), # Input: Select a file ---- fileInput("file1", "File input", multiple = FALSE, accept = c("text", "text/plain" )), #showing Note p("Note: Use Text field or Upload file:"), #checkbox input checkboxInput("xgboost","XGBoost"), checkboxInput("nbase","NaiveBase"), checkboxInput("rforset","Random Forest"), #submit button with name submit actionButton("submit","Submit"), # Horizontal line ---- tags$hr() ), # slidebar panel closed # Main panel for displaying outputs ---- mainPanel( # Output: Data file ---- tableOutput("contents"), #Bar Chart # Output: Tabset w/ plot, summary, and table ---- tabsetPanel(type = "tabs", tabPanel("Plot", plotOutput("plot")), tabPanel("Summary", verbatimTextOutput("summary")), tabPanel("Table", tableOutput("table")) ), # Bar Ends here #warning for wrong file upload h1(textOutput("warning1")) ) # main panel closed ) # slidebar layout closed ) # fluid page closed
/ver3/ui.r
no_license
farooqsaqib2017/ShinyApp-Ver1
R
false
false
1,822
r
# Define UI for data upload app ---- ui <- fluidPage( # App title ---- titlePanel(" Fake News Detector"), # Sidebar layout with input and output definitions ---- sidebarLayout( # Sidebar panel for inputs ---- sidebarPanel( #text input textInput(inputId = "text", label = "Text input" ), # Horizontal line ---- tags$hr(), # Input: Select a file ---- fileInput("file1", "File input", multiple = FALSE, accept = c("text", "text/plain" )), #showing Note p("Note: Use Text field or Upload file:"), #checkbox input checkboxInput("xgboost","XGBoost"), checkboxInput("nbase","NaiveBase"), checkboxInput("rforset","Random Forest"), #submit button with name submit actionButton("submit","Submit"), # Horizontal line ---- tags$hr() ), # slidebar panel closed # Main panel for displaying outputs ---- mainPanel( # Output: Data file ---- tableOutput("contents"), #Bar Chart # Output: Tabset w/ plot, summary, and table ---- tabsetPanel(type = "tabs", tabPanel("Plot", plotOutput("plot")), tabPanel("Summary", verbatimTextOutput("summary")), tabPanel("Table", tableOutput("table")) ), # Bar Ends here #warning for wrong file upload h1(textOutput("warning1")) ) # main panel closed ) # slidebar layout closed ) # fluid page closed
########## FINAL SYMPTOM 2 ########## setwd("/Users/gildamatteucci/OneDrive - Politecnico di Milano/PROGETTO BAYESIANA/Modified_Data/Filtered_NEW") symp <- read.csv("Filtered_ALSHistory_Subject_symptoms.csv", header = T) sy <- symp library(plyr) # specifico il sintomo che c'è in other levels(sy$FINAL_SYMPTOM) <- c(levels(sy$FINAL_SYMPTOM), levels(sy$FINAL_SYMPTOM_OTHER)) sy$FINAL_SYMPTOM[sy$FINAL_SYMPTOM=="OTHER" & sy$FINAL_SYMPTOM_OTHER!=""]<-sy$FINAL_SYMPTOM_OTHER[sy$FINAL_SYMPTOM=="OTHER" & sy$FINAL_SYMPTOM_OTHER!=""] sy[,"FINAL_SYMPTOM_OTHER"]<- NULL # levels(sy$LOCATION2) <- c(levels(sy$LOCATION2),"") levels(sy$LOCATION3) <- c(levels(sy$LOCATION3),"") levels(sy$LOCATION4) <- c(levels(sy$LOCATION4),"") levels(sy$LOCATION1) <- c(levels(sy$LOCATION1), levels(sy$LOCATION2), levels(sy$LOCATION3), levels(sy$LOCATION4)) N <- dim(sy)[1] for (i in 1:N) { if (sy$LOCATION2[i]!="") { temp <- data.frame(sy$Subject_id[i], sy$FINAL_SYMPTOM[i], sy$LOCATION2[i],"","","") colnames(temp) = names(sy) sy <- rbind(sy,temp) if (sy$LOCATION3[i]!="") { temp <- data.frame(sy$Subject_id[i], sy$FINAL_SYMPTOM[i], sy$LOCATION3[i],"","","") colnames(temp) = names(sy) sy <- rbind(sy,temp) if (sy$LOCATION4[i]!="") { temp <- data.frame(sy$Subject_id[i], sy$FINAL_SYMPTOM[i], sy$LOCATION4[i],"","","") colnames(temp) = names(sy) sy <- rbind(sy,temp) } } } } sy <-sy[order(sy$Subject_id),] sy[,"LOCATION2"]<- NULL sy[,"LOCATION3"]<- NULL sy[,"LOCATION4"]<- NULL # Metto UNKNOWN dove c'è NA su location levels(sy$LOCATION1) <- c(levels(sy$LOCATION1), "UNKNOWN") sy$LOCATION1[is.na(sy$LOCATION1)]<-"UNKNOWN" N <- dim(sy)[1] newCol <- rep(NA,N) for (i in 1:N) { newCol[i] <- paste(sy$FINAL_SYMPTOM[i],"-",sy$LOCATION1[i]) } newCol <- data.frame(sy$Subject_id,newCol) colnames(newCol)[2] = "Symp" library(fastDummies) ciccio <- dummy_cols(newCol,select_columns = "Symp") ciccio <- aggregate(.~ sy.Subject_id, ciccio, sum) colnames(ciccio)[1] = "subject_id" ciccio[,"Symp"]<- NULL symp <- ciccio write.csv(symp, "Filtered_R_ALSHistory_Subject_symptoms.csv", row.names = FALSE)
/Dataset&Filter/Filtering script/Filter_R_ALS_Subject_symptoms.R
no_license
DanielSakir/Bayesian-SLA-project
R
false
false
2,169
r
########## FINAL SYMPTOM 2 ########## setwd("/Users/gildamatteucci/OneDrive - Politecnico di Milano/PROGETTO BAYESIANA/Modified_Data/Filtered_NEW") symp <- read.csv("Filtered_ALSHistory_Subject_symptoms.csv", header = T) sy <- symp library(plyr) # specifico il sintomo che c'è in other levels(sy$FINAL_SYMPTOM) <- c(levels(sy$FINAL_SYMPTOM), levels(sy$FINAL_SYMPTOM_OTHER)) sy$FINAL_SYMPTOM[sy$FINAL_SYMPTOM=="OTHER" & sy$FINAL_SYMPTOM_OTHER!=""]<-sy$FINAL_SYMPTOM_OTHER[sy$FINAL_SYMPTOM=="OTHER" & sy$FINAL_SYMPTOM_OTHER!=""] sy[,"FINAL_SYMPTOM_OTHER"]<- NULL # levels(sy$LOCATION2) <- c(levels(sy$LOCATION2),"") levels(sy$LOCATION3) <- c(levels(sy$LOCATION3),"") levels(sy$LOCATION4) <- c(levels(sy$LOCATION4),"") levels(sy$LOCATION1) <- c(levels(sy$LOCATION1), levels(sy$LOCATION2), levels(sy$LOCATION3), levels(sy$LOCATION4)) N <- dim(sy)[1] for (i in 1:N) { if (sy$LOCATION2[i]!="") { temp <- data.frame(sy$Subject_id[i], sy$FINAL_SYMPTOM[i], sy$LOCATION2[i],"","","") colnames(temp) = names(sy) sy <- rbind(sy,temp) if (sy$LOCATION3[i]!="") { temp <- data.frame(sy$Subject_id[i], sy$FINAL_SYMPTOM[i], sy$LOCATION3[i],"","","") colnames(temp) = names(sy) sy <- rbind(sy,temp) if (sy$LOCATION4[i]!="") { temp <- data.frame(sy$Subject_id[i], sy$FINAL_SYMPTOM[i], sy$LOCATION4[i],"","","") colnames(temp) = names(sy) sy <- rbind(sy,temp) } } } } sy <-sy[order(sy$Subject_id),] sy[,"LOCATION2"]<- NULL sy[,"LOCATION3"]<- NULL sy[,"LOCATION4"]<- NULL # Metto UNKNOWN dove c'è NA su location levels(sy$LOCATION1) <- c(levels(sy$LOCATION1), "UNKNOWN") sy$LOCATION1[is.na(sy$LOCATION1)]<-"UNKNOWN" N <- dim(sy)[1] newCol <- rep(NA,N) for (i in 1:N) { newCol[i] <- paste(sy$FINAL_SYMPTOM[i],"-",sy$LOCATION1[i]) } newCol <- data.frame(sy$Subject_id,newCol) colnames(newCol)[2] = "Symp" library(fastDummies) ciccio <- dummy_cols(newCol,select_columns = "Symp") ciccio <- aggregate(.~ sy.Subject_id, ciccio, sum) colnames(ciccio)[1] = "subject_id" ciccio[,"Symp"]<- NULL symp <- ciccio write.csv(symp, "Filtered_R_ALSHistory_Subject_symptoms.csv", row.names = FALSE)
library("RMySQL") ucscDb <- dbConnect(MySQL(), user="genome", db="hg19", host="genome-mysql.cse.ucsc.edu") allTables <- dbListTables(ucscDb) length(allTables) dbDisconnect(ucscDb) allTables[1:5]
/GettingAndCleaningData/QuizTestingWeek2.R
no_license
ashtearty/datasciencecoursera-1
R
false
false
217
r
library("RMySQL") ucscDb <- dbConnect(MySQL(), user="genome", db="hg19", host="genome-mysql.cse.ucsc.edu") allTables <- dbListTables(ucscDb) length(allTables) dbDisconnect(ucscDb) allTables[1:5]
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_advarsel.R \name{get_advarsel} \alias{get_advarsel} \title{Get and parse the alarmeringsapp rss feed} \usage{ get_advarsel(rss_feed = "https://alarmeringsapp.like.st/rss") } \arguments{ \item{rss_feed}{is the url to the advarsels rss feed} } \value{ a data frame of the ten latest } \description{ Get and parse the alarmeringsapp rss feed } \examples{ library(alarmeringsapp) alarms <- get_advarsel() }
/man/get_advarsel.Rd
no_license
mikkelkrogsholm/alarmeringsapp
R
false
true
485
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_advarsel.R \name{get_advarsel} \alias{get_advarsel} \title{Get and parse the alarmeringsapp rss feed} \usage{ get_advarsel(rss_feed = "https://alarmeringsapp.like.st/rss") } \arguments{ \item{rss_feed}{is the url to the advarsels rss feed} } \value{ a data frame of the ten latest } \description{ Get and parse the alarmeringsapp rss feed } \examples{ library(alarmeringsapp) alarms <- get_advarsel() }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Functions_BSS_SGL.R \name{prox.nuclear.func.fLS} \alias{prox.nuclear.func.fLS} \title{Proximal function for nuclear norm penalty} \usage{ prox.nuclear.func.fLS(y, A, b, L, lambda, AtA, Atb) } \arguments{ \item{y}{model parameter} \item{A}{design matrix} \item{b}{correspond vector, or matrix} \item{L}{learning rate} \item{lambda}{tuning parameter} \item{AtA}{Gram matrix obtained by design matrix} \item{Atb}{inner product for design matrix A and correspond vector b} } \value{ value of proximal function } \description{ Proximal function for nuclear norm penalty } \keyword{internal}
/man/prox.nuclear.func.fLS.Rd
no_license
peiliangbai92/VARDetect
R
false
true
670
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Functions_BSS_SGL.R \name{prox.nuclear.func.fLS} \alias{prox.nuclear.func.fLS} \title{Proximal function for nuclear norm penalty} \usage{ prox.nuclear.func.fLS(y, A, b, L, lambda, AtA, Atb) } \arguments{ \item{y}{model parameter} \item{A}{design matrix} \item{b}{correspond vector, or matrix} \item{L}{learning rate} \item{lambda}{tuning parameter} \item{AtA}{Gram matrix obtained by design matrix} \item{Atb}{inner product for design matrix A and correspond vector b} } \value{ value of proximal function } \description{ Proximal function for nuclear norm penalty } \keyword{internal}
library("dplyr") library("readxl") library("partykit") library("rpart") library("caTools") library("caret") Data <- read_xlsx("C:/Users/User/Desktop/SEM-1/Data Analytics/ProjectData.xlsx") str(Data) df <- data.frame(Data[,-1]) #remove ID from dataframe Target=ifelse(df$Response==1,'Y','N') df <- data.frame(df, Target) #add Target to the dataframe df <- df[,-1] #remove Response df <- mutate(df,Y1=factor(Y1),Y2=factor(Y2),Y3=factor(Y3),Y4=factor(Y4),Y5=factor(Y5),Y6=factor(Y6),Y7=factor(Y7)) str(df) summary(df) #split into train and test sets set.seed(123) sample= sample.split(df$Target,SplitRatio= 0.75) train=subset(df, sample==TRUE) test= subset(df, sample==FALSE) # All X's, Y's and groups df_with_all_X_and_Y_all_groups <- train DT_Model_XY_all_groups <- rpart(Target~., data=df_with_all_X_and_Y_all_groups, control=rpart.control(minsplit=30, minbucket=15, maxdepth=4 )) plot(as.party(DT_Model_XY_all_groups)) # checking accuracy using confusion matrix tree.predicted<- predict(DT_Model_XY_all_groups,test, type='class') confusionMatrix(tree.predicted , test$Target) # all Y's - for all groups df_excluding_X <- within(train, rm(X1, X2, X3, X4, X5, X6, X7)) DT_Model_excluding_X <- rpart(Target~., data=df_excluding_X, control=rpart.control(minsplit=30, minbucket=15, maxdepth=4 )) plot(as.party(DT_Model_excluding_X)) # checking accuracy using confusion matrix tree.predicted<- predict(DT_Model_excluding_X,test, type='class') confusionMatrix(tree.predicted , test$Target) # all X's - for all groups df_excluding_Y <- within(train, rm(Y1, Y2, Y3, Y4, Y5, Y6, Y7)) DT_Model_excluding_Y <- rpart(Target~., data=df_excluding_Y, control=rpart.control(minsplit=30, minbucket=15, maxdepth=4 )) plot(as.party(DT_Model_excluding_Y)) # checking accuracy using confusion matrix tree.predicted<- predict(DT_Model_excluding_Y,test, type='class') confusionMatrix(tree.predicted , test$Target) # All X,Y, group 0 df_all_XY_group0 <- train[train$Group == 0,] DT_Model_all_XY_group0 <- rpart(Target~., data=df_all_XY_group0, control=rpart.control(minsplit=30, minbucket=15, maxdepth=4 )) plot(as.party(DT_Model_all_XY_group0)) # checking accuracy using confusion matrix tree.predicted<- predict(DT_Model_all_XY_group0,test, type='class') confusionMatrix(tree.predicted , test$Target) # all X, group 0 df_all_X_group0 <- train[train$Group == 0,] df_all_X_group0 <- within(df_all_X_group0, rm(Y1, Y2, Y3, Y4, Y5, Y6, Y7)) DT_Model_all_X_group0 <- rpart(Target~., data=df_all_X_group0, control=rpart.control(minsplit=30, minbucket=15, maxdepth=4 )) plot(as.party(DT_Model_all_X_group0)) # checking accuracy using confusion matrix tree.predicted<- predict(DT_Model_all_X_group0,test, type='class') confusionMatrix(tree.predicted , test$Target) # all Y, group 0 df_all_Y_group0 <- train[train$Group == 0,] df_all_Y_group0 <- within(df_all_Y_group0, rm(X1, X2, X3, X4, X5, X6, X7)) DT_Model_all_Y_group0 <- rpart(Target~., data=df_all_Y_group0, control=rpart.control(minsplit=30, minbucket=15, maxdepth=4 )) plot(as.party(DT_Model_all_Y_group0)) # checking accuracy using confusion matrix tree.predicted<- predict(DT_Model_all_Y_group0,test, type='class') confusionMatrix(tree.predicted , test$Target) # All X,Y, group 1 df_all_XY_group1 <- train[train$Group == 1,] DT_Model_all_XY_group1 <- rpart(Target~., data=df_all_XY_group1, control=rpart.control(minsplit=30, minbucket=15, maxdepth=4 )) plot(as.party(DT_Model_all_XY_group1)) # checking accuracy using confusion matrix tree.predicted<- predict(DT_Model_excluding_X,test, type='class') confusionMatrix(tree.predicted , test$Target) # all X, group 1 df_all_X_group1 <- train[train$Group == 1,] df_all_X_group1 <- within(df_all_X_group1, rm(Y1, Y2, Y3, Y4, Y5, Y6, Y7)) DT_Model_all_X_group1 <- rpart(Target~., data=df_all_X_group1, control=rpart.control(minsplit=30, minbucket=15, maxdepth=4 )) plot(as.party(DT_Model_all_X_group1)) # checking accuracy using confusion matrix tree.predicted<- predict(DT_Model_all_X_group1,test, type='class') confusionMatrix(tree.predicted , test$Target) # all Y, group 1 df_all_Y_group1 <- train[train$Group == 1,] df_all_Y_group1 <- within(df_all_Y_group1, rm(X1, X2, X3, X4, X5, X6, X7)) DT_Model_all_Y_group1 <- rpart(Target~., data=df_all_Y_group1, control=rpart.control(minsplit=30, minbucket=15, maxdepth=4 )) plot(as.party(DT_Model_all_Y_group1)) # checking accuracy using confusion matrix tree.predicted<- predict(DT_Model_all_Y_group1,test, type='class') confusionMatrix(tree.predicted , test$Target)
/Project.R
no_license
basithamid/DA-Assignments
R
false
false
5,935
r
library("dplyr") library("readxl") library("partykit") library("rpart") library("caTools") library("caret") Data <- read_xlsx("C:/Users/User/Desktop/SEM-1/Data Analytics/ProjectData.xlsx") str(Data) df <- data.frame(Data[,-1]) #remove ID from dataframe Target=ifelse(df$Response==1,'Y','N') df <- data.frame(df, Target) #add Target to the dataframe df <- df[,-1] #remove Response df <- mutate(df,Y1=factor(Y1),Y2=factor(Y2),Y3=factor(Y3),Y4=factor(Y4),Y5=factor(Y5),Y6=factor(Y6),Y7=factor(Y7)) str(df) summary(df) #split into train and test sets set.seed(123) sample= sample.split(df$Target,SplitRatio= 0.75) train=subset(df, sample==TRUE) test= subset(df, sample==FALSE) # All X's, Y's and groups df_with_all_X_and_Y_all_groups <- train DT_Model_XY_all_groups <- rpart(Target~., data=df_with_all_X_and_Y_all_groups, control=rpart.control(minsplit=30, minbucket=15, maxdepth=4 )) plot(as.party(DT_Model_XY_all_groups)) # checking accuracy using confusion matrix tree.predicted<- predict(DT_Model_XY_all_groups,test, type='class') confusionMatrix(tree.predicted , test$Target) # all Y's - for all groups df_excluding_X <- within(train, rm(X1, X2, X3, X4, X5, X6, X7)) DT_Model_excluding_X <- rpart(Target~., data=df_excluding_X, control=rpart.control(minsplit=30, minbucket=15, maxdepth=4 )) plot(as.party(DT_Model_excluding_X)) # checking accuracy using confusion matrix tree.predicted<- predict(DT_Model_excluding_X,test, type='class') confusionMatrix(tree.predicted , test$Target) # all X's - for all groups df_excluding_Y <- within(train, rm(Y1, Y2, Y3, Y4, Y5, Y6, Y7)) DT_Model_excluding_Y <- rpart(Target~., data=df_excluding_Y, control=rpart.control(minsplit=30, minbucket=15, maxdepth=4 )) plot(as.party(DT_Model_excluding_Y)) # checking accuracy using confusion matrix tree.predicted<- predict(DT_Model_excluding_Y,test, type='class') confusionMatrix(tree.predicted , test$Target) # All X,Y, group 0 df_all_XY_group0 <- train[train$Group == 0,] DT_Model_all_XY_group0 <- rpart(Target~., data=df_all_XY_group0, control=rpart.control(minsplit=30, minbucket=15, maxdepth=4 )) plot(as.party(DT_Model_all_XY_group0)) # checking accuracy using confusion matrix tree.predicted<- predict(DT_Model_all_XY_group0,test, type='class') confusionMatrix(tree.predicted , test$Target) # all X, group 0 df_all_X_group0 <- train[train$Group == 0,] df_all_X_group0 <- within(df_all_X_group0, rm(Y1, Y2, Y3, Y4, Y5, Y6, Y7)) DT_Model_all_X_group0 <- rpart(Target~., data=df_all_X_group0, control=rpart.control(minsplit=30, minbucket=15, maxdepth=4 )) plot(as.party(DT_Model_all_X_group0)) # checking accuracy using confusion matrix tree.predicted<- predict(DT_Model_all_X_group0,test, type='class') confusionMatrix(tree.predicted , test$Target) # all Y, group 0 df_all_Y_group0 <- train[train$Group == 0,] df_all_Y_group0 <- within(df_all_Y_group0, rm(X1, X2, X3, X4, X5, X6, X7)) DT_Model_all_Y_group0 <- rpart(Target~., data=df_all_Y_group0, control=rpart.control(minsplit=30, minbucket=15, maxdepth=4 )) plot(as.party(DT_Model_all_Y_group0)) # checking accuracy using confusion matrix tree.predicted<- predict(DT_Model_all_Y_group0,test, type='class') confusionMatrix(tree.predicted , test$Target) # All X,Y, group 1 df_all_XY_group1 <- train[train$Group == 1,] DT_Model_all_XY_group1 <- rpart(Target~., data=df_all_XY_group1, control=rpart.control(minsplit=30, minbucket=15, maxdepth=4 )) plot(as.party(DT_Model_all_XY_group1)) # checking accuracy using confusion matrix tree.predicted<- predict(DT_Model_excluding_X,test, type='class') confusionMatrix(tree.predicted , test$Target) # all X, group 1 df_all_X_group1 <- train[train$Group == 1,] df_all_X_group1 <- within(df_all_X_group1, rm(Y1, Y2, Y3, Y4, Y5, Y6, Y7)) DT_Model_all_X_group1 <- rpart(Target~., data=df_all_X_group1, control=rpart.control(minsplit=30, minbucket=15, maxdepth=4 )) plot(as.party(DT_Model_all_X_group1)) # checking accuracy using confusion matrix tree.predicted<- predict(DT_Model_all_X_group1,test, type='class') confusionMatrix(tree.predicted , test$Target) # all Y, group 1 df_all_Y_group1 <- train[train$Group == 1,] df_all_Y_group1 <- within(df_all_Y_group1, rm(X1, X2, X3, X4, X5, X6, X7)) DT_Model_all_Y_group1 <- rpart(Target~., data=df_all_Y_group1, control=rpart.control(minsplit=30, minbucket=15, maxdepth=4 )) plot(as.party(DT_Model_all_Y_group1)) # checking accuracy using confusion matrix tree.predicted<- predict(DT_Model_all_Y_group1,test, type='class') confusionMatrix(tree.predicted , test$Target)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/util_gfft.R \name{drop_nodes} \alias{drop_nodes} \title{Drop a node from an FFT definition} \usage{ drop_nodes(fft, nodes = NA, quiet = FALSE) } \arguments{ \item{fft}{One FFT definition (as a data frame in tidy format, with one row per node).} \item{nodes}{The FFT nodes to drop (as an integer vector). Default: \code{nodes = NA}.} \item{quiet}{Hide feedback messages (as logical)? Default: \code{quiet = FALSE}.} } \value{ One FFT definition (as a data frame in tidy format, with one row per node). } \description{ \code{drop_nodes} deletes one or more \code{nodes} from an existing FFT definition (by removing the corresponding rows from the FFT definition in the tidy data frame format). When dropping the final node, the last remaining node becomes the new final node (i.e., gains a second exit). Duplicates in \code{nodes} are dropped only once (rather than incrementally) and \code{nodes} not in the range \code{1:nrow(fft)} are ignored. Dropping all nodes yields an error. \code{drop_nodes} is the inverse function of \code{\link{select_nodes}}. Inserting new nodes is possible by \code{\link{add_nodes}}. } \seealso{ \code{\link{add_nodes}} for adding nodes to an FFT definition; \code{\link{edit_nodes}} for editing nodes in an FFT definition; \code{\link{select_nodes}} for selecting nodes in an FFT definition; \code{\link{get_fft_df}} for getting the FFT definitions of an \code{FFTrees} object; \code{\link{read_fft_df}} for reading one FFT definition from tree definitions; \code{\link{add_fft_df}} for adding FFTs to tree definitions; \code{\link{FFTrees}} for creating FFTs from and applying them to data. Other tree definition and manipulation functions: \code{\link{add_fft_df}()}, \code{\link{add_nodes}()}, \code{\link{edit_nodes}()}, \code{\link{flip_exits}()}, \code{\link{get_fft_df}()}, \code{\link{read_fft_df}()}, \code{\link{reorder_nodes}()}, \code{\link{select_nodes}()}, \code{\link{write_fft_df}()} } \concept{tree definition and manipulation functions}
/man/drop_nodes.Rd
no_license
ndphillips/FFTrees
R
false
true
2,071
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/util_gfft.R \name{drop_nodes} \alias{drop_nodes} \title{Drop a node from an FFT definition} \usage{ drop_nodes(fft, nodes = NA, quiet = FALSE) } \arguments{ \item{fft}{One FFT definition (as a data frame in tidy format, with one row per node).} \item{nodes}{The FFT nodes to drop (as an integer vector). Default: \code{nodes = NA}.} \item{quiet}{Hide feedback messages (as logical)? Default: \code{quiet = FALSE}.} } \value{ One FFT definition (as a data frame in tidy format, with one row per node). } \description{ \code{drop_nodes} deletes one or more \code{nodes} from an existing FFT definition (by removing the corresponding rows from the FFT definition in the tidy data frame format). When dropping the final node, the last remaining node becomes the new final node (i.e., gains a second exit). Duplicates in \code{nodes} are dropped only once (rather than incrementally) and \code{nodes} not in the range \code{1:nrow(fft)} are ignored. Dropping all nodes yields an error. \code{drop_nodes} is the inverse function of \code{\link{select_nodes}}. Inserting new nodes is possible by \code{\link{add_nodes}}. } \seealso{ \code{\link{add_nodes}} for adding nodes to an FFT definition; \code{\link{edit_nodes}} for editing nodes in an FFT definition; \code{\link{select_nodes}} for selecting nodes in an FFT definition; \code{\link{get_fft_df}} for getting the FFT definitions of an \code{FFTrees} object; \code{\link{read_fft_df}} for reading one FFT definition from tree definitions; \code{\link{add_fft_df}} for adding FFTs to tree definitions; \code{\link{FFTrees}} for creating FFTs from and applying them to data. Other tree definition and manipulation functions: \code{\link{add_fft_df}()}, \code{\link{add_nodes}()}, \code{\link{edit_nodes}()}, \code{\link{flip_exits}()}, \code{\link{get_fft_df}()}, \code{\link{read_fft_df}()}, \code{\link{reorder_nodes}()}, \code{\link{select_nodes}()}, \code{\link{write_fft_df}()} } \concept{tree definition and manipulation functions}
# To run this make sure these packages have been installed (to install them run # file with the next line of code uncommented, you only need to do this once per # machine) # install.packages(c("httr", "jsonlite", "dplyr", "tidyr")) library(httr) library(jsonlite) library(dplyr) library(tidyr) # Sets up information for TBA API source("api_key.R") tba_auth <- api_key.R base <- "www.thebluealliance.com/api/v3/" # Change these values year <- 2018 team <- 2557 event <- "tur" get_opr <- function(year_opr, event_opr) { # Queries TBA for match data for 2557 at an event query <- paste0("event/", year_opr, event_opr, "/oprs") url <- paste0(base, query) API_KEY <- " EdSrcK5eTwGAwsnfIuBbS8HUTe4nK4GlMIYC8AwPNDaPSgNmKPaGTrZwDLhqVzfR " httpResponse <- GET(url, add_headers("X-TBA-Auth-Key" = API_KEY), accept_json()) fromJSON(content(httpResponse, "text")) } get_matches <- function(year_matches, team_matches, event_matches) { query <- paste0( "team/frc", team_matches, "/event/", year_matches, event_matches, "/matches" ) url <- paste0(base, query) API_KEY <- " EdSrcK5eTwGAwsnfIuBbS8HUTe4nK4GlMIYC8AwPNDaPSgNmKPaGTrZwDLhqVzfR " httpResponse <- GET(url, add_headers("X-TBA-Auth-Key" = API_KEY), accept_json()) fromJSON(content(httpResponse, "text")) } get_team_opr <- function(team) { oprs[[team]] } get_team_dpr <- function(team) { dprs[[team]] } opr_results <- get_opr(year, event) match_results <- get_matches(year, team, event) # ccwm is "Calculated Contribution to Winning Margin" ccwms <- opr_results[["ccwms"]] oprs <- opr_results[["oprs"]] dprs <- opr_results[["dprs"]] both_alliances <- match_results %>% select(alliances) comp_levels <- match_results[["comp_level"]] levels_num <- gsub("qm", 1, comp_levels) levels_num <- gsub("qf", 2, levels_num) levels_num <- gsub("sf", 3, levels_num) levels_num <- gsub("f", 4, levels_num) match_results$levels_num <- levels_num red_alliance <- both_alliances[, 1]$red red_score <- red_alliance %>% select(score) red_score <- as.numeric(red_score[, 1]) red_alliance_teams <- do.call(rbind, red_alliance[["team_keys"]]) red_alliance_teams <- as.data.frame(red_alliance_teams, stringsAsFactors = FALSE) colnames(red_alliance_teams) <- c("robot1", "robot2", "robot3") red_alliance_teams <- red_alliance_teams %>% mutate( "match_number" = match_results$match_number, "levels_num" = match_results$levels_num) red_alliance_teams <- red_alliance_teams %>% arrange(match_number, levels_num) red_robot1_opr <- signif(sapply(red_alliance_teams[["robot1"]], get_team_opr, USE.NAMES = FALSE ), 4) red_robot2_opr <- signif(sapply(red_alliance_teams[["robot2"]], get_team_opr, USE.NAMES = FALSE ), 4) red_robot3_opr <- signif(sapply(red_alliance_teams[["robot3"]], get_team_opr, USE.NAMES = FALSE ), 4) red_robot1_dpr <- signif(sapply(red_alliance_teams[["robot1"]], get_team_dpr, USE.NAMES = FALSE ), 4) red_robot2_dpr <- signif(sapply(red_alliance_teams[["robot2"]], get_team_dpr, USE.NAMES = FALSE ), 4) red_robot3_dpr <- signif(sapply(red_alliance_teams[["robot3"]], get_team_dpr, USE.NAMES = FALSE ), 4) blue_alliance <- both_alliances[, 1]$blue blue_score <- blue_alliance %>% select(score) blue_score <- as.numeric(blue_score[, 1]) blue_alliance_teams <- do.call(rbind, blue_alliance[["team_keys"]]) blue_alliance_teams <- as.data.frame(blue_alliance_teams, stringsAsFactors = FALSE) colnames(blue_alliance_teams) <- c("robot1", "robot2", "robot3") blue_alliance_teams <- blue_alliance_teams %>% mutate( "match_number" = match_results$match_number, "levels_num" = match_results$levels_num) blue_alliance_teams <- blue_alliance_teams %>% arrange(match_number, levels_num) blue_robot1_opr <- signif(sapply(blue_alliance_teams[["robot1"]], get_team_opr, USE.NAMES = FALSE ), 4) blue_robot2_opr <- signif(sapply(blue_alliance_teams[["robot2"]], get_team_opr, USE.NAMES = FALSE ), 4) blue_robot3_opr <- signif(sapply(blue_alliance_teams[["robot3"]], get_team_opr, USE.NAMES = FALSE ), 4) blue_robot1_dpr <- signif(sapply(blue_alliance_teams[["robot1"]], get_team_dpr, USE.NAMES = FALSE ), 4) blue_robot2_dpr <- signif(sapply(blue_alliance_teams[["robot2"]], get_team_dpr, USE.NAMES = FALSE ), 4) blue_robot3_dpr <- signif(sapply(blue_alliance_teams[["robot3"]], get_team_dpr, USE.NAMES = FALSE ), 4) result_df <- match_results %>% select(match_number, winning_alliance) result_df <- result_df %>% mutate( "comp_level" = comp_levels, "level_num" = levels_num ) result_df <- result_df %>% arrange(levels_num, match_number) result_df <- result_df %>% mutate( "red_average_opr" = signif((red_robot1_opr + red_robot2_opr + red_robot3_opr) / 3, 4), "blue_average_opr" = signif((blue_robot1_opr + blue_robot2_opr + blue_robot3_opr) / 3, 4), "red_average_dpr" = signif((red_robot1_dpr + red_robot2_dpr + red_robot3_dpr) / 3, 4), "blue_average_dpr" = signif((blue_robot1_dpr + blue_robot2_dpr + blue_robot3_dpr) / 3, 4), "red_robot1" = paste(red_alliance_teams[["robot1"]], paste0( "opr =", red_robot1_opr, " dpr =", red_robot1_dpr )), "red_robot2" = paste(red_alliance_teams[["robot2"]], paste0( "opr =", red_robot2_opr, " dpr =", red_robot2_dpr )), "red_robot3" = paste(red_alliance_teams[["robot3"]], paste0( "opr =", red_robot3_opr, " dpr =", red_robot3_dpr )), "blue_robot1" = paste(blue_alliance_teams[["robot1"]], paste0( "opr =", blue_robot1_opr, " dpr =", blue_robot1_dpr )), "blue_robot2" = paste(blue_alliance_teams[["robot2"]], paste0( "opr =", blue_robot2_opr, " dpr =", blue_robot2_dpr )), "blue_robot3" = paste(blue_alliance_teams[["robot3"]], paste0( "opr =", blue_robot3_opr, " dpr =", blue_robot3_dpr )) ) result_df <- result_df %>% mutate( "red_adjusted_score" = 2 / 3 * red_average_opr + 1 / 2 * red_average_dpr, "blue_adjusted_score" = 2 / 3 * blue_average_opr + 1 / 2 * blue_average_dpr ) prediction <- result_df$red_adjusted_score > result_df$blue_adjusted_score prediction <- gsub(TRUE, "red", prediction) prediction <- gsub(FALSE, "blue", prediction) result_df <- result_df %>% mutate("predicted_winner" = prediction) rownames(result_df) <- paste0(result_df$comp_level, result_df$match_number) # result_df <- result_df[, c(1, 2, 13, 5, 6, 7, 8, 9, 10, 11, 12, 3, 4)] # result_df <- result_df[, c(-1, -12, -13)] print_df <- result_df %>% subset(select = c( winning_alliance, predicted_winner, red_average_opr, red_average_dpr, red_robot1, red_robot2, red_robot3, blue_robot1, blue_robot2, blue_robot3 )) result_file <- paste0(team, event, year, "_oprs.csv") write.csv(print_df, file = result_file, row.names = TRUE)
/OPR.R
no_license
OscarLewis/FRCR
R
false
false
6,668
r
# To run this make sure these packages have been installed (to install them run # file with the next line of code uncommented, you only need to do this once per # machine) # install.packages(c("httr", "jsonlite", "dplyr", "tidyr")) library(httr) library(jsonlite) library(dplyr) library(tidyr) # Sets up information for TBA API source("api_key.R") tba_auth <- api_key.R base <- "www.thebluealliance.com/api/v3/" # Change these values year <- 2018 team <- 2557 event <- "tur" get_opr <- function(year_opr, event_opr) { # Queries TBA for match data for 2557 at an event query <- paste0("event/", year_opr, event_opr, "/oprs") url <- paste0(base, query) API_KEY <- " EdSrcK5eTwGAwsnfIuBbS8HUTe4nK4GlMIYC8AwPNDaPSgNmKPaGTrZwDLhqVzfR " httpResponse <- GET(url, add_headers("X-TBA-Auth-Key" = API_KEY), accept_json()) fromJSON(content(httpResponse, "text")) } get_matches <- function(year_matches, team_matches, event_matches) { query <- paste0( "team/frc", team_matches, "/event/", year_matches, event_matches, "/matches" ) url <- paste0(base, query) API_KEY <- " EdSrcK5eTwGAwsnfIuBbS8HUTe4nK4GlMIYC8AwPNDaPSgNmKPaGTrZwDLhqVzfR " httpResponse <- GET(url, add_headers("X-TBA-Auth-Key" = API_KEY), accept_json()) fromJSON(content(httpResponse, "text")) } get_team_opr <- function(team) { oprs[[team]] } get_team_dpr <- function(team) { dprs[[team]] } opr_results <- get_opr(year, event) match_results <- get_matches(year, team, event) # ccwm is "Calculated Contribution to Winning Margin" ccwms <- opr_results[["ccwms"]] oprs <- opr_results[["oprs"]] dprs <- opr_results[["dprs"]] both_alliances <- match_results %>% select(alliances) comp_levels <- match_results[["comp_level"]] levels_num <- gsub("qm", 1, comp_levels) levels_num <- gsub("qf", 2, levels_num) levels_num <- gsub("sf", 3, levels_num) levels_num <- gsub("f", 4, levels_num) match_results$levels_num <- levels_num red_alliance <- both_alliances[, 1]$red red_score <- red_alliance %>% select(score) red_score <- as.numeric(red_score[, 1]) red_alliance_teams <- do.call(rbind, red_alliance[["team_keys"]]) red_alliance_teams <- as.data.frame(red_alliance_teams, stringsAsFactors = FALSE) colnames(red_alliance_teams) <- c("robot1", "robot2", "robot3") red_alliance_teams <- red_alliance_teams %>% mutate( "match_number" = match_results$match_number, "levels_num" = match_results$levels_num) red_alliance_teams <- red_alliance_teams %>% arrange(match_number, levels_num) red_robot1_opr <- signif(sapply(red_alliance_teams[["robot1"]], get_team_opr, USE.NAMES = FALSE ), 4) red_robot2_opr <- signif(sapply(red_alliance_teams[["robot2"]], get_team_opr, USE.NAMES = FALSE ), 4) red_robot3_opr <- signif(sapply(red_alliance_teams[["robot3"]], get_team_opr, USE.NAMES = FALSE ), 4) red_robot1_dpr <- signif(sapply(red_alliance_teams[["robot1"]], get_team_dpr, USE.NAMES = FALSE ), 4) red_robot2_dpr <- signif(sapply(red_alliance_teams[["robot2"]], get_team_dpr, USE.NAMES = FALSE ), 4) red_robot3_dpr <- signif(sapply(red_alliance_teams[["robot3"]], get_team_dpr, USE.NAMES = FALSE ), 4) blue_alliance <- both_alliances[, 1]$blue blue_score <- blue_alliance %>% select(score) blue_score <- as.numeric(blue_score[, 1]) blue_alliance_teams <- do.call(rbind, blue_alliance[["team_keys"]]) blue_alliance_teams <- as.data.frame(blue_alliance_teams, stringsAsFactors = FALSE) colnames(blue_alliance_teams) <- c("robot1", "robot2", "robot3") blue_alliance_teams <- blue_alliance_teams %>% mutate( "match_number" = match_results$match_number, "levels_num" = match_results$levels_num) blue_alliance_teams <- blue_alliance_teams %>% arrange(match_number, levels_num) blue_robot1_opr <- signif(sapply(blue_alliance_teams[["robot1"]], get_team_opr, USE.NAMES = FALSE ), 4) blue_robot2_opr <- signif(sapply(blue_alliance_teams[["robot2"]], get_team_opr, USE.NAMES = FALSE ), 4) blue_robot3_opr <- signif(sapply(blue_alliance_teams[["robot3"]], get_team_opr, USE.NAMES = FALSE ), 4) blue_robot1_dpr <- signif(sapply(blue_alliance_teams[["robot1"]], get_team_dpr, USE.NAMES = FALSE ), 4) blue_robot2_dpr <- signif(sapply(blue_alliance_teams[["robot2"]], get_team_dpr, USE.NAMES = FALSE ), 4) blue_robot3_dpr <- signif(sapply(blue_alliance_teams[["robot3"]], get_team_dpr, USE.NAMES = FALSE ), 4) result_df <- match_results %>% select(match_number, winning_alliance) result_df <- result_df %>% mutate( "comp_level" = comp_levels, "level_num" = levels_num ) result_df <- result_df %>% arrange(levels_num, match_number) result_df <- result_df %>% mutate( "red_average_opr" = signif((red_robot1_opr + red_robot2_opr + red_robot3_opr) / 3, 4), "blue_average_opr" = signif((blue_robot1_opr + blue_robot2_opr + blue_robot3_opr) / 3, 4), "red_average_dpr" = signif((red_robot1_dpr + red_robot2_dpr + red_robot3_dpr) / 3, 4), "blue_average_dpr" = signif((blue_robot1_dpr + blue_robot2_dpr + blue_robot3_dpr) / 3, 4), "red_robot1" = paste(red_alliance_teams[["robot1"]], paste0( "opr =", red_robot1_opr, " dpr =", red_robot1_dpr )), "red_robot2" = paste(red_alliance_teams[["robot2"]], paste0( "opr =", red_robot2_opr, " dpr =", red_robot2_dpr )), "red_robot3" = paste(red_alliance_teams[["robot3"]], paste0( "opr =", red_robot3_opr, " dpr =", red_robot3_dpr )), "blue_robot1" = paste(blue_alliance_teams[["robot1"]], paste0( "opr =", blue_robot1_opr, " dpr =", blue_robot1_dpr )), "blue_robot2" = paste(blue_alliance_teams[["robot2"]], paste0( "opr =", blue_robot2_opr, " dpr =", blue_robot2_dpr )), "blue_robot3" = paste(blue_alliance_teams[["robot3"]], paste0( "opr =", blue_robot3_opr, " dpr =", blue_robot3_dpr )) ) result_df <- result_df %>% mutate( "red_adjusted_score" = 2 / 3 * red_average_opr + 1 / 2 * red_average_dpr, "blue_adjusted_score" = 2 / 3 * blue_average_opr + 1 / 2 * blue_average_dpr ) prediction <- result_df$red_adjusted_score > result_df$blue_adjusted_score prediction <- gsub(TRUE, "red", prediction) prediction <- gsub(FALSE, "blue", prediction) result_df <- result_df %>% mutate("predicted_winner" = prediction) rownames(result_df) <- paste0(result_df$comp_level, result_df$match_number) # result_df <- result_df[, c(1, 2, 13, 5, 6, 7, 8, 9, 10, 11, 12, 3, 4)] # result_df <- result_df[, c(-1, -12, -13)] print_df <- result_df %>% subset(select = c( winning_alliance, predicted_winner, red_average_opr, red_average_dpr, red_robot1, red_robot2, red_robot3, blue_robot1, blue_robot2, blue_robot3 )) result_file <- paste0(team, event, year, "_oprs.csv") write.csv(print_df, file = result_file, row.names = TRUE)
\name{AlgebraicRule-class} \docType{class} \alias{AlgebraicRule-class} \title{SBML type "AlgebraicRule"} \description{Expresses equations that are not assignments nor rates of change.} \section{Instantiation}{ Objects can be created by calls of the form \code{new("AlgebraicRule", ...)}. } \section{Slots}{ \describe{ \item{\code{math}:}{Object of class \code{"expression"} specifying the equation. } \item{\code{metaId}:}{Object of class \code{"character"} that is an XML ID "described" by an RDF resource. This links an SBML element to an RDF resource. RDF may appear anywhere in an SBML element, but is usually placed inside the \code{annotation} element.} \item{\code{notes}:}{Object of class \code{"character"} containing user-readable XHTML notes about an element. } \item{\code{annotation}:}{Object of class \code{"character"} containing additional machine-readable information about an element, usually as RDF, such as BioPAX. This is where application-specific data belongs.} } } \section{Extends}{ Class \code{"\linkS4class{Rule}"}, directly. Class \code{"\linkS4class{SBase}"}, by class "Rule", distance 2. } \section{Methods}{ No methods defined with class "AlgebraicRule" in the signature. } \references{\url{http://sbml.org/documents/}} \author{Michael Lawrence} \keyword{classes}
/man/AlgebraicRule-class.Rd
no_license
cran/rsbml
R
false
false
1,353
rd
\name{AlgebraicRule-class} \docType{class} \alias{AlgebraicRule-class} \title{SBML type "AlgebraicRule"} \description{Expresses equations that are not assignments nor rates of change.} \section{Instantiation}{ Objects can be created by calls of the form \code{new("AlgebraicRule", ...)}. } \section{Slots}{ \describe{ \item{\code{math}:}{Object of class \code{"expression"} specifying the equation. } \item{\code{metaId}:}{Object of class \code{"character"} that is an XML ID "described" by an RDF resource. This links an SBML element to an RDF resource. RDF may appear anywhere in an SBML element, but is usually placed inside the \code{annotation} element.} \item{\code{notes}:}{Object of class \code{"character"} containing user-readable XHTML notes about an element. } \item{\code{annotation}:}{Object of class \code{"character"} containing additional machine-readable information about an element, usually as RDF, such as BioPAX. This is where application-specific data belongs.} } } \section{Extends}{ Class \code{"\linkS4class{Rule}"}, directly. Class \code{"\linkS4class{SBase}"}, by class "Rule", distance 2. } \section{Methods}{ No methods defined with class "AlgebraicRule" in the signature. } \references{\url{http://sbml.org/documents/}} \author{Michael Lawrence} \keyword{classes}
library(ape) testtree <- read.tree("12590_0.txt") unrooted_tr <- unroot(testtree) write.tree(unrooted_tr, file="12590_0_unrooted.txt")
/codeml_files/newick_trees_processed/12590_0/rinput.R
no_license
DaniBoo/cyanobacteria_project
R
false
false
137
r
library(ape) testtree <- read.tree("12590_0.txt") unrooted_tr <- unroot(testtree) write.tree(unrooted_tr, file="12590_0_unrooted.txt")
testlist <- list(a = 1735549300L, b = 1952542255L, x = c(795373421L, NA, 2053468767L, 1885430635L, 1634166131L)) result <- do.call(grattan:::anyOutside,testlist) str(result)
/grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610386564-test.R
no_license
akhikolla/updated-only-Issues
R
false
false
174
r
testlist <- list(a = 1735549300L, b = 1952542255L, x = c(795373421L, NA, 2053468767L, 1885430635L, 1634166131L)) result <- do.call(grattan:::anyOutside,testlist) str(result)
# Quiz 07-2 set.seed(1034) library(tidyverse) library(GGally) library(cowplot) n <- 20 x1 <- scale(runif(n, 5, 9)) x2 <- scale(runif(n, 1, 4)) y <- rnorm(n, 5, 2) + (2.3 * x1 + rnorm(n)) + (1.8 * x2 + rnorm(n)) M <- data.frame(y, x1, x2) ggscatmat(M) fm <- lm(y ~ x1 + x2) summary(fm)
/Quiz_Code/Quiz_08-2.R
permissive
midoripanda/quant_methods
R
false
false
291
r
# Quiz 07-2 set.seed(1034) library(tidyverse) library(GGally) library(cowplot) n <- 20 x1 <- scale(runif(n, 5, 9)) x2 <- scale(runif(n, 1, 4)) y <- rnorm(n, 5, 2) + (2.3 * x1 + rnorm(n)) + (1.8 * x2 + rnorm(n)) M <- data.frame(y, x1, x2) ggscatmat(M) fm <- lm(y ~ x1 + x2) summary(fm)
#' Adds column to a NONMEM data file containing values of a time-varying covariate. #' #' @author Katarzyna Nurzynska #' @param data_cov data frame containing information about time-varying covariate. #' @param data_dose data frame containing information about when the dose was administered. #' @param data_final data frame to which column with time-varying covariate should be added. #' @param cov_time column name in data_cov containing information about the time when the covariate was measured. #' @param dose_time column name in data_dose containing information about the time when the dose was administered. #' @param ID_col name of the ID column in data_final. #' @param cov_name name of the time-varying covariate. #' @param cov_value name that describes the value (e.g. "Concentration") of the time-varying covariate. #' @param cov_column column name in data_cov containing information about the name of the covariate to be selected. #' e.g. weight, albumin, bilirubin etc. #' @param date_format_dose logical. change date format from '\%Y-\%m-\%dT\%H:\%M' to '\%Y-\%m-\%d \%H:\%M'(if TRUE). #' @param date_format_cov logical. change date format from '\%Y-\%m-\%dT\%H:\%M' to '\%Y-\%m-\%d \%H:\%M'(if TRUE). #' @param time_zone name of required timezone. Daylight saving change is applied when time difference is calculated. #' @return The input data frame with new column added containing values of time-varying covariate. #' @note The data file containing information about the dose records should be filtered to include only records of interest e.g. related #' to a specific drug (e.g. BAY and not co-medication). #' @note Time difference between times when the dose was administered and the covariate was measured is calculated in hours. #' @note The ID columns in the original data files containing information about covariate and dose should be called SUBJIDN. #' @note The time column in data_final (containing dosing and concentration times) should be called TIME. #' @note The format of the dates in the cov_time and dose_time columns should be '\%Y-\%m-\%dT\%H:\%M' or #' '\%Y-\%m-\%d \%H:\%M' (it is assumed that clock time is present). #' @export ######################################################################################################## #Date: 29/02/16 #Author: Katarzyna Nurzynska #Project: Copanlicib, Pi3K-front BAY80-6946, popPK #Purpose: Add a column to a data file containing values of time-varying covariate #Lab journal: KN-04 p.67 #Arguments: data_cov - is the data file containing information about time-varying covariate # data_dose - is the data file containing information when the dose was administarted # data_final - is the final data file to which column with time-varying covariate should be added # cov_time - column in the data_cov containing information about the time when the covariate was measured # dose_time - column in the data_dose containing information about the time when the dose was administarted # ID_col - the name of the ID column in the data_final # cov_name - the name of the time-varying covariate # cov_value - value (e.g.concentration) of time-varying covariate # cov_column - column in the data_cov containing information about the name of the covariate to be selected e.g. weight, albumin, bilirubin etc. # date_format_dose / data_format_cov - change date format from '%Y-%m-%dT%H:%M' to '%Y-%m-%d %H:%M'(if TRUE) # time_zone - required timezone; daylight saving change is applied when time difference is calculated #Notes: The data file (ex) containing information about the dose records should be filtered to include only records of interest e.g. related to a specific drug (e.g. BAY and not co-medication) # Time difference between times when the dose was administrated and the covariate was measured is calculated in hours # The ID columns in the original data files containing information about covariate and dose should be called SUBJIDN # The time column in the data_final (containing dosing and concentration times) should be called TIME # The format of the dates in cov_time and dose_time columns should be '%Y-%m-%dT%H:%M' or '%Y-%m-%d %H:%M' (it is assumed that clock time is present) ##Defalt arguments: add column containing information about albumin ######################################################################################################## covariate_time_update = function(data_cov, data_dose, data_final, cov_time="LBDTC", dose_time='EXSTDTC', ID_col='SID', cov_column='LBTESTCD', cov_name='ALB', cov_value="LBSTRESN", date_format_cov=FALSE, date_format_dose=TRUE, time_zone='US/Eastern'){ #Add an empty column containing information about the covariate to the final data file (data_final) data_final [,cov_name]=9999 data_cov<-data_cov[which(data_cov[,cov_column]==cov_name),] #From data_cov select only rows which contain info about the covariate of interest e.g. LBTESTCD -> ALBU #Change the format of dates in the data_cov if(date_format_cov!=FALSE){ data_cov[,cov_time]=as.character(as.POSIXct(data_cov[,cov_time], format = '%Y-%m-%dT%H:%M', tz=time_zone), format = '%Y-%m-%d %H:%M', tz=time_zone) } #Create a data file containing dates when the first dose was administarted for each subject (to take into account multiple dose studies) data_dose=data_dose[!duplicated(data_dose$SUBJIDN),] #Change the format of dates in the data_dose if(date_format_dose!=FALSE){ data_dose[,dose_time]=as.character(as.POSIXct(data_dose[,dose_time], format = '%Y-%m-%dT%H:%M', tz=time_zone), format = '%Y-%m-%d %H:%M', tz=time_zone) } #ACalculate time after forst dose for the covariate records data_cov[,"TIME"]=9999 subject_cov=unique(data_cov$SUBJIDN) for (i in 1:length(subject_cov)){ subjectunique_cov=data_cov[data_cov$SUBJIDN==subject_cov[i],] for (k in 1:nrow(subjectunique_cov)){ TAFD<- difftime(subjectunique_cov[k,cov_time], data_dose[i,dose_time], units="hours") subjectunique_cov [k,"TIME"] <- TAFD } data_cov[data_cov$SUBJIDN==subject_cov[i],]=subjectunique_cov } #Update the covariate records with time subject=unique(data_final[,ID_col]) for(i in 1:length(subject)){ subjectunique=data_final[data_final[,ID_col]==subject[i],] subjectunique_cov=data_cov[data_cov[,"SUBJIDN"]==subject[i],] baseline=subjectunique_cov[subjectunique_cov[,"TIME"]<=0,] #select all the records when TIME is negative (baseline) subjectunique[,cov_name]=mean(as.numeric(as.character(baseline[,cov_value]))) #calculate avearge of all the values of the covariate before the administartion of the first dose (baseline value) for (k in 1:nrow(subjectunique_cov)) { for (n in 1:nrow (subjectunique)){ if (subjectunique [n,"TIME"]>=subjectunique_cov [k, "TIME"]){ subjectunique [n:nrow (subjectunique),cov_name]= as.numeric(as.character(subjectunique_cov [k, cov_value])) } } } data_final[data_final[,ID_col]==subject[i],]=subjectunique } return(data_final) }
/R/covariate_time_update.R
no_license
jgrevel/BAST1-R-Library
R
false
false
7,245
r
#' Adds column to a NONMEM data file containing values of a time-varying covariate. #' #' @author Katarzyna Nurzynska #' @param data_cov data frame containing information about time-varying covariate. #' @param data_dose data frame containing information about when the dose was administered. #' @param data_final data frame to which column with time-varying covariate should be added. #' @param cov_time column name in data_cov containing information about the time when the covariate was measured. #' @param dose_time column name in data_dose containing information about the time when the dose was administered. #' @param ID_col name of the ID column in data_final. #' @param cov_name name of the time-varying covariate. #' @param cov_value name that describes the value (e.g. "Concentration") of the time-varying covariate. #' @param cov_column column name in data_cov containing information about the name of the covariate to be selected. #' e.g. weight, albumin, bilirubin etc. #' @param date_format_dose logical. change date format from '\%Y-\%m-\%dT\%H:\%M' to '\%Y-\%m-\%d \%H:\%M'(if TRUE). #' @param date_format_cov logical. change date format from '\%Y-\%m-\%dT\%H:\%M' to '\%Y-\%m-\%d \%H:\%M'(if TRUE). #' @param time_zone name of required timezone. Daylight saving change is applied when time difference is calculated. #' @return The input data frame with new column added containing values of time-varying covariate. #' @note The data file containing information about the dose records should be filtered to include only records of interest e.g. related #' to a specific drug (e.g. BAY and not co-medication). #' @note Time difference between times when the dose was administered and the covariate was measured is calculated in hours. #' @note The ID columns in the original data files containing information about covariate and dose should be called SUBJIDN. #' @note The time column in data_final (containing dosing and concentration times) should be called TIME. #' @note The format of the dates in the cov_time and dose_time columns should be '\%Y-\%m-\%dT\%H:\%M' or #' '\%Y-\%m-\%d \%H:\%M' (it is assumed that clock time is present). #' @export ######################################################################################################## #Date: 29/02/16 #Author: Katarzyna Nurzynska #Project: Copanlicib, Pi3K-front BAY80-6946, popPK #Purpose: Add a column to a data file containing values of time-varying covariate #Lab journal: KN-04 p.67 #Arguments: data_cov - is the data file containing information about time-varying covariate # data_dose - is the data file containing information when the dose was administarted # data_final - is the final data file to which column with time-varying covariate should be added # cov_time - column in the data_cov containing information about the time when the covariate was measured # dose_time - column in the data_dose containing information about the time when the dose was administarted # ID_col - the name of the ID column in the data_final # cov_name - the name of the time-varying covariate # cov_value - value (e.g.concentration) of time-varying covariate # cov_column - column in the data_cov containing information about the name of the covariate to be selected e.g. weight, albumin, bilirubin etc. # date_format_dose / data_format_cov - change date format from '%Y-%m-%dT%H:%M' to '%Y-%m-%d %H:%M'(if TRUE) # time_zone - required timezone; daylight saving change is applied when time difference is calculated #Notes: The data file (ex) containing information about the dose records should be filtered to include only records of interest e.g. related to a specific drug (e.g. BAY and not co-medication) # Time difference between times when the dose was administrated and the covariate was measured is calculated in hours # The ID columns in the original data files containing information about covariate and dose should be called SUBJIDN # The time column in the data_final (containing dosing and concentration times) should be called TIME # The format of the dates in cov_time and dose_time columns should be '%Y-%m-%dT%H:%M' or '%Y-%m-%d %H:%M' (it is assumed that clock time is present) ##Defalt arguments: add column containing information about albumin ######################################################################################################## covariate_time_update = function(data_cov, data_dose, data_final, cov_time="LBDTC", dose_time='EXSTDTC', ID_col='SID', cov_column='LBTESTCD', cov_name='ALB', cov_value="LBSTRESN", date_format_cov=FALSE, date_format_dose=TRUE, time_zone='US/Eastern'){ #Add an empty column containing information about the covariate to the final data file (data_final) data_final [,cov_name]=9999 data_cov<-data_cov[which(data_cov[,cov_column]==cov_name),] #From data_cov select only rows which contain info about the covariate of interest e.g. LBTESTCD -> ALBU #Change the format of dates in the data_cov if(date_format_cov!=FALSE){ data_cov[,cov_time]=as.character(as.POSIXct(data_cov[,cov_time], format = '%Y-%m-%dT%H:%M', tz=time_zone), format = '%Y-%m-%d %H:%M', tz=time_zone) } #Create a data file containing dates when the first dose was administarted for each subject (to take into account multiple dose studies) data_dose=data_dose[!duplicated(data_dose$SUBJIDN),] #Change the format of dates in the data_dose if(date_format_dose!=FALSE){ data_dose[,dose_time]=as.character(as.POSIXct(data_dose[,dose_time], format = '%Y-%m-%dT%H:%M', tz=time_zone), format = '%Y-%m-%d %H:%M', tz=time_zone) } #ACalculate time after forst dose for the covariate records data_cov[,"TIME"]=9999 subject_cov=unique(data_cov$SUBJIDN) for (i in 1:length(subject_cov)){ subjectunique_cov=data_cov[data_cov$SUBJIDN==subject_cov[i],] for (k in 1:nrow(subjectunique_cov)){ TAFD<- difftime(subjectunique_cov[k,cov_time], data_dose[i,dose_time], units="hours") subjectunique_cov [k,"TIME"] <- TAFD } data_cov[data_cov$SUBJIDN==subject_cov[i],]=subjectunique_cov } #Update the covariate records with time subject=unique(data_final[,ID_col]) for(i in 1:length(subject)){ subjectunique=data_final[data_final[,ID_col]==subject[i],] subjectunique_cov=data_cov[data_cov[,"SUBJIDN"]==subject[i],] baseline=subjectunique_cov[subjectunique_cov[,"TIME"]<=0,] #select all the records when TIME is negative (baseline) subjectunique[,cov_name]=mean(as.numeric(as.character(baseline[,cov_value]))) #calculate avearge of all the values of the covariate before the administartion of the first dose (baseline value) for (k in 1:nrow(subjectunique_cov)) { for (n in 1:nrow (subjectunique)){ if (subjectunique [n,"TIME"]>=subjectunique_cov [k, "TIME"]){ subjectunique [n:nrow (subjectunique),cov_name]= as.numeric(as.character(subjectunique_cov [k, cov_value])) } } } data_final[data_final[,ID_col]==subject[i],]=subjectunique } return(data_final) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/eval.R \name{path.plot} \alias{path.plot} \title{Plot the regularization path} \usage{ path.plot(v0s, obj, G, thres = 0.5, normalize = FALSE, xlab = "", ylab = "", main = "") } \arguments{ \item{v0s}{the vector of parameter v0} \item{obj}{fitted EMGS object, or a three-dimensional array of precision matrices} \item{thres}{threshold of slab} \item{normalize}{logical indicator to plot precision matrices or partial correlation matrices} \item{xlab}{x axis label} \item{ylab}{y axis label} \item{main}{plot title} } \value{ the regularization path } \description{ This function plots the regularization path } \examples{ }
/EMGS/man/path.plot.Rd
no_license
richardli/EMGS
R
false
true
710
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/eval.R \name{path.plot} \alias{path.plot} \title{Plot the regularization path} \usage{ path.plot(v0s, obj, G, thres = 0.5, normalize = FALSE, xlab = "", ylab = "", main = "") } \arguments{ \item{v0s}{the vector of parameter v0} \item{obj}{fitted EMGS object, or a three-dimensional array of precision matrices} \item{thres}{threshold of slab} \item{normalize}{logical indicator to plot precision matrices or partial correlation matrices} \item{xlab}{x axis label} \item{ylab}{y axis label} \item{main}{plot title} } \value{ the regularization path } \description{ This function plots the regularization path } \examples{ }
#' @importFrom glue glue #' @importFrom attempt warn_if_not create_dockerfile <- function(FROM = "rocker/r-base", AS = NULL){ if (is.null(AS)) { glue("FROM {FROM}") } else { glue("FROM {FROM} AS {AS}") } } add_run <- function(cmd){ glue("RUN {cmd}") } add_add <- function(from, to, force = TRUE){ if (!force) { warn_if_not(normalizePath(from), file.exists, "The file `from` doesn't seem to exists") warn_if_not(normalizePath(to), file.exists, "The file `to` doesn't seem to exists.") } glue("ADD {from} {to}") } add_copy <- function(from, to, force = TRUE){ if (!force) { warn_if_not(normalizePath(from), file.exists, "The file `from` doesn't seem to exists") warn_if_not(normalizePath(to), file.exists, "The file `to` doesn't seem to exists.") } glue("COPY {from} {to}") } add_workdir <- function(where){ glue("WORKDIR {where}") } add_expose <- function(port){ warn_if_not(port, is.numeric, "You've entered a character vector") glue("EXPOSE {port}") } add_volume <- function(volume){ glue("VOLUME {volume}") } add_cmd <- function(cmd){ glue("CMD {cmd}") } add_label <- function(key, value){ glue('LABEL "{key}"="{value}"') } add_env <- function(key, value){ glue('ENV "{key}"="{value}"') } add_entrypoint <- function(cmd){ glue("ENTRYPOINT {cmd}") } add_user <- function(user){ glue("USER {user}") } add_arg <- function(arg){ glue("ARG {arg}") } add_onbuild <- function(cmd){ glue("ONBUILD {cmd}") } add_stopsignal <- function(signal){ glue("STOPSIGNAL {signal}") } add_healthcheck <- function(check){ glue("HEALTHCHECK {check}") } add_shell <- function(shell){ glue("SHELL {shell}") } add_maintainer <- function(name, email){ glue("MAINTAINER {name} <{email}>") } switch_them <- function(vec, a, b){ what <- vec[a] whbt <- vec[b] vec[b] <- what vec[a] <- whbt vec } remove_from <- function(vec, what){ vec[-what] }
/R/add.R
permissive
uribo/dockerfiler
R
false
false
1,923
r
#' @importFrom glue glue #' @importFrom attempt warn_if_not create_dockerfile <- function(FROM = "rocker/r-base", AS = NULL){ if (is.null(AS)) { glue("FROM {FROM}") } else { glue("FROM {FROM} AS {AS}") } } add_run <- function(cmd){ glue("RUN {cmd}") } add_add <- function(from, to, force = TRUE){ if (!force) { warn_if_not(normalizePath(from), file.exists, "The file `from` doesn't seem to exists") warn_if_not(normalizePath(to), file.exists, "The file `to` doesn't seem to exists.") } glue("ADD {from} {to}") } add_copy <- function(from, to, force = TRUE){ if (!force) { warn_if_not(normalizePath(from), file.exists, "The file `from` doesn't seem to exists") warn_if_not(normalizePath(to), file.exists, "The file `to` doesn't seem to exists.") } glue("COPY {from} {to}") } add_workdir <- function(where){ glue("WORKDIR {where}") } add_expose <- function(port){ warn_if_not(port, is.numeric, "You've entered a character vector") glue("EXPOSE {port}") } add_volume <- function(volume){ glue("VOLUME {volume}") } add_cmd <- function(cmd){ glue("CMD {cmd}") } add_label <- function(key, value){ glue('LABEL "{key}"="{value}"') } add_env <- function(key, value){ glue('ENV "{key}"="{value}"') } add_entrypoint <- function(cmd){ glue("ENTRYPOINT {cmd}") } add_user <- function(user){ glue("USER {user}") } add_arg <- function(arg){ glue("ARG {arg}") } add_onbuild <- function(cmd){ glue("ONBUILD {cmd}") } add_stopsignal <- function(signal){ glue("STOPSIGNAL {signal}") } add_healthcheck <- function(check){ glue("HEALTHCHECK {check}") } add_shell <- function(shell){ glue("SHELL {shell}") } add_maintainer <- function(name, email){ glue("MAINTAINER {name} <{email}>") } switch_them <- function(vec, a, b){ what <- vec[a] whbt <- vec[b] vec[b] <- what vec[a] <- whbt vec } remove_from <- function(vec, what){ vec[-what] }
library(rehh) library(vcfR) library(cowplot) locus <- read_tsv("data/hpc/selection/vcf_files/locus.txt", col_names = "id") make_ehh_plot <- function(loci) { scaff <- str_split(loci,":")[[1]][1] pos <- str_split(loci,":")[[1]][2] %>% as.numeric() print(scaff) hh <- data2haplohh(hap_file = paste0("data/hpc/selection/vcf_files/", scaff, "_withid.vcf"), polarize_vcf = FALSE, vcf_reader = "vcfR") hh_offshore <- data2haplohh(hap_file = paste0("data/hpc/selection/vcf_files/", scaff, "_offshore_withid.vcf"), polarize_vcf = FALSE, vcf_reader = "vcfR") res <- calc_ehh(hh, mrk = loci, include_nhaplo = FALSE) res_offshore <- calc_ehh(hh_offshore, mrk = loci, include_nhaplo = FALSE) pehh <- res$ehh %>% as_tibble() %>% pivot_longer(-POSITION) %>% ggplot() + geom_line(aes(x=(POSITION-pos)/1e+6,y=value, color=name),size=1.5) + theme_classic() + ggsci::scale_color_aaas() + theme(legend.position = "none") + labs(x="Position(Mb)",y="Extended Haplotype Homozygosity",title = paste("EHH around", loci,"in inshore")) pehh2 <- res_offshore$ehh %>% as_tibble() %>% pivot_longer(-POSITION) %>% ggplot() + geom_line(aes(x=(POSITION-pos)/1e+6,y=value, color=name),size=1.5) + theme_classic() + ggsci::scale_color_aaas(name="",label=c("Ancestral","Derived")) + labs(x="Position(Mb)",y="Extended Haplotype Homozygosity",title = paste("EHH around", loci,"in offshore")) plot_grid(pehh, pehh2) } make_ehh_plot(locus$id[1]) hh <- data2haplohh(hap_file = paste0("data/hpc/selection/vcf_files/BLFC01000156_offshore_withid.vcf"), polarize_vcf = FALSE, vcf_reader = "vcfR") res <- calc_ehh(hh, mrk = "BLFC01000156.1:905489" , include_nhaplo = TRUE) plot(res)
/scripts/plot_ehh.R
no_license
bakeronit/acropora_digitifera_wgs
R
false
false
1,847
r
library(rehh) library(vcfR) library(cowplot) locus <- read_tsv("data/hpc/selection/vcf_files/locus.txt", col_names = "id") make_ehh_plot <- function(loci) { scaff <- str_split(loci,":")[[1]][1] pos <- str_split(loci,":")[[1]][2] %>% as.numeric() print(scaff) hh <- data2haplohh(hap_file = paste0("data/hpc/selection/vcf_files/", scaff, "_withid.vcf"), polarize_vcf = FALSE, vcf_reader = "vcfR") hh_offshore <- data2haplohh(hap_file = paste0("data/hpc/selection/vcf_files/", scaff, "_offshore_withid.vcf"), polarize_vcf = FALSE, vcf_reader = "vcfR") res <- calc_ehh(hh, mrk = loci, include_nhaplo = FALSE) res_offshore <- calc_ehh(hh_offshore, mrk = loci, include_nhaplo = FALSE) pehh <- res$ehh %>% as_tibble() %>% pivot_longer(-POSITION) %>% ggplot() + geom_line(aes(x=(POSITION-pos)/1e+6,y=value, color=name),size=1.5) + theme_classic() + ggsci::scale_color_aaas() + theme(legend.position = "none") + labs(x="Position(Mb)",y="Extended Haplotype Homozygosity",title = paste("EHH around", loci,"in inshore")) pehh2 <- res_offshore$ehh %>% as_tibble() %>% pivot_longer(-POSITION) %>% ggplot() + geom_line(aes(x=(POSITION-pos)/1e+6,y=value, color=name),size=1.5) + theme_classic() + ggsci::scale_color_aaas(name="",label=c("Ancestral","Derived")) + labs(x="Position(Mb)",y="Extended Haplotype Homozygosity",title = paste("EHH around", loci,"in offshore")) plot_grid(pehh, pehh2) } make_ehh_plot(locus$id[1]) hh <- data2haplohh(hap_file = paste0("data/hpc/selection/vcf_files/BLFC01000156_offshore_withid.vcf"), polarize_vcf = FALSE, vcf_reader = "vcfR") res <- calc_ehh(hh, mrk = "BLFC01000156.1:905489" , include_nhaplo = TRUE) plot(res)
# titanic is avaliable in your workspace # 1 - Check the structure of titanic str(titanic) # 2 - Use ggplot() for the first instruction ggplot(titanic, aes(x = Pclass, fill = Sex)) + geom_bar(position = "dodge") # 3 - Plot 2, add facet_grid() layer ggplot(titanic, aes(x = Pclass, fill = Sex)) + geom_bar(position = "dodge") + facet_grid(.~Survived) # 4 - Define an object for position jitterdodge, to use below posn.jd <- position_jitterdodge(0.5, 0, 0.6) # 5 - Plot 3, but use the position object from instruction 4 ggplot(titanic, aes(x = Pclass, y = Age, color = Sex)) + geom_point(size = 3, alpha = .5, position=posn.jd) + facet_grid(.~Survived)
/4-2/titanic.R
no_license
jt1800/springboard-projects
R
false
false
664
r
# titanic is avaliable in your workspace # 1 - Check the structure of titanic str(titanic) # 2 - Use ggplot() for the first instruction ggplot(titanic, aes(x = Pclass, fill = Sex)) + geom_bar(position = "dodge") # 3 - Plot 2, add facet_grid() layer ggplot(titanic, aes(x = Pclass, fill = Sex)) + geom_bar(position = "dodge") + facet_grid(.~Survived) # 4 - Define an object for position jitterdodge, to use below posn.jd <- position_jitterdodge(0.5, 0, 0.6) # 5 - Plot 3, but use the position object from instruction 4 ggplot(titanic, aes(x = Pclass, y = Age, color = Sex)) + geom_point(size = 3, alpha = .5, position=posn.jd) + facet_grid(.~Survived)
\name{move.HMM.dwell.plot} \alias{move.HMM.dwell.plot} \title{Dwell Plot} \usage{ move.HMM.dwell.plot(move.HMM) } \arguments{ \item{move.HMM}{A move.HMM object containing a fitted HMM model.} } \value{ A plot of dwell times } \description{ This function plots the (geometric) pdf of the fitted model against the dwell times from the state sequence predicted by the Viterbi algorithm. }
/man/move.HMM.dwell.plot.Rd
no_license
benaug/move.HMM
R
false
false
401
rd
\name{move.HMM.dwell.plot} \alias{move.HMM.dwell.plot} \title{Dwell Plot} \usage{ move.HMM.dwell.plot(move.HMM) } \arguments{ \item{move.HMM}{A move.HMM object containing a fitted HMM model.} } \value{ A plot of dwell times } \description{ This function plots the (geometric) pdf of the fitted model against the dwell times from the state sequence predicted by the Viterbi algorithm. }
print(connect via ssh)
/sshconnect.R
no_license
cereberaodollam/RStudio-Test
R
false
false
22
r
print(connect via ssh)
#' Localised multiple kernel k-means #' #' Perform the training step of the localised multiple kernel k-means. #' @param Km Array of size N X N X M containing M different N x N kernel #' matrices. #' @param parameters A list of parameters containing the desired number of #' clusters, \code{cluster_count}, and the number of iterations of the #' algorithm to be run, \code{iteration_count}. #' @param missing Matrix of size N X M containing missingness indicators, i.e. #' missing[i,j] = 1 (or = TRUE) if observation \code{i} is missing in dataset #' \code{j}, missing[i,j] = 0 (or = FALSE). #' @param verbose Boolean flag. If TRUE, at each iteration the iteration number #' is printed. Defaults to FALSE. #' @return This function returns a list containing: #' \item{clustering}{the cluster labels for each element (i.e. row/column) of #' the kernel matrix.} #' \item{objective}{the value of the objective function for the given #' clustering.} #' \item{parameters}{same parameters as in the input.} #' \item{Theta}{N x M matrix of weights, each row corresponds to an observation #' and each column to one of the kernels.} #' @author Mehmet Gonen, Alessandra Cabassi #' @references Gonen, M. and Margolin, A.A., 2014. Localized data fusion for #' kernel k-means clustering with application to cancer biology. In Advances in #' Neural Information Processing Systems (pp. 1305-1313). #' @examples #' if(requireNamespace("Rmosek", quietly = TRUE) && #' (!is.null(utils::packageDescription("Rmosek")$Configured.MSK_VERSION))){ #' #' # Intialise 100 x 100 x 3 array containing M kernel matrices #' # representing three different types of similarities between 100 data points #' km <- array(NA, c(100, 100, 3)) #' # Load kernel matrices #' km[,,1] <- as.matrix(read.csv(system.file('extdata', #' 'kernel_matrix1.csv', package = 'klic'), row.names = 1)) #' km[,,2] <- as.matrix(read.csv(system.file('extdata', #' 'kernel_matrix2.csv', package = 'klic'), row.names = 1)) #' km[,,3] <- as.matrix(read.csv(system.file('extdata', #' 'kernel_matrix3.csv', package = 'klic'), row.names = 1)) #' # Introduce some missing data #' km[76:80, , 1] <- NA #' km[, 76:80, 1] <- NA #' #' # Define missingness indicators #' missing <- matrix(FALSE, 100, 3) #' missing[76:80,1] <- TRUE #' #' # Initalize the parameters of the algorithm #' parameters <- list() #' # Set the number of clusters #' parameters$cluster_count <- 4 #' # Set the number of iterations #' parameters$iteration_count <- 10 #' #' # Perform training #' state <- lmkkmeans_missingData(km, parameters, missing) #' #' # Display the clustering #' print(state$clustering) #' # Display the kernel weights #' print(state$Theta) #' } #' @export lmkkmeans_missingData <- function(Km, parameters, missing = NULL, verbose = FALSE) { state <- list() N <- dim(Km)[2] P <- dim(Km)[3] if (!is.null(missing)) { avail <- abs(1 - missing) } else { avail <- matrix(1, N, P) } # Initialise weight matrix assigning equal weights to each object in each # kernel Theta <- matrix(NA, N, P) for (i in 1:N) { Theta[i, ] <- 1/sum(avail[i, ]) } Theta <- Theta * avail # Set to zero the weights of missing observations # Initialise weighted kernel matrix K_Theta <- matrix(0, nrow(Km), ncol(Km)) for (m in 1:P) { avail_m <- avail[, m] > 0 K_Theta[avail_m, avail_m] <- K_Theta[avail_m, avail_m] + (Theta[avail_m, m, drop = FALSE] %*% t(Theta[avail_m, m, drop = FALSE])) * Km[avail_m, avail_m, m] } # Initialise vector of objective functions objective <- rep(0, parameters$iteration_count) for (iter in 1:parameters$iteration_count) { if (verbose) print(sprintf("running iteration %d...", iter)) H <- eigen(K_Theta, symmetric = TRUE)$vectors[, 1:parameters$cluster_count] HHT <- H %*% t(H) Q <- matrix(0, N * P, N * P) for (m in 1:P) { avail_m <- avail[, m] > 0 start_index <- (m - 1) * N + 1 end_index <- m * N Q[(start_index:end_index)[avail_m], (start_index:end_index)[avail_m]] <- diag(1, sum(avail_m), sum(avail_m)) * Km[avail_m, avail_m, m] - HHT[avail_m, avail_m] * Km[avail_m, avail_m, m] # See Gönen & Margolin 2014 NIPS, page 5 } avail_vec <- as.logical(as.vector(avail)) sum_avail_vec <- sum(avail_vec) Q <- Q[avail_vec, avail_vec] ### Solve QP problem ### problem <- list() # problem$sense: Objective sense: e.g. 'min' or 'max' problem$sense <- "min" # problem$c: Objective coefficient array problem$c <- rep(0, sum_avail_vec) # problem$A: Constraint sparse matrix A <- Matrix::Matrix(rep(diag(1, N, N), P), nrow = N, ncol = N * P, sparse = TRUE) problem$A <- A[, avail_vec, drop = F] # problem$bc: Lower and upper constraint bounds problem$bc <- rbind(blc = rep(1, N), buc = rep(1, N)) # problem$bx: Lower and upper variable bounds problem$bx <- rbind(blx = rep(0, sum_avail_vec), bux = rep(1, sum_avail_vec)) # problem$qobj: Quadratic convex optimization I <- matrix(1:sum_avail_vec, sum_avail_vec, sum_avail_vec, byrow = FALSE) J <- matrix(1:sum_avail_vec, sum_avail_vec, sum_avail_vec, byrow = TRUE) problem$qobj <- list(i = I[lower.tri(I, diag = TRUE)], j = J[lower.tri(J, diag = TRUE)], v = Q[lower.tri(Q, diag = TRUE)]) opts <- list() # opts$verbose: Output logging verbosity opts$verbose <- 0 # Solve QP problem result <- Rmosek::mosek(problem, opts) # Extract Theta and put it in matrix form Theta <- matrix(0, N, P) count <- 0 for (i in 1:P) { avail_i <- which(avail[, i] == 1) startt <- (count + 1) endt <- count + sum(avail[, i]) Theta[avail_i, i] <- result$sol$itr$xx[startt:endt] count <- count + sum(avail[, i]) } # Update weighted kernel K_Theta <- matrix(0, nrow(Km), ncol(Km)) for (m in 1:P) { avail_m <- avail[, m] > 0 K_Theta[avail_m, avail_m] <- K_Theta[avail_m, avail_m] + (Theta[avail_m, m, drop = FALSE] %*% t(Theta[avail_m, m, drop = FALSE])) * Km[avail_m, avail_m, m] } # Update objective function objective[iter] <- sum(diag(t(H) %*% K_Theta %*% H)) - sum(diag(K_Theta)) } normalize <- which(rowSums(H^2, 2) > .Machine$double.eps) H_normalized <- matrix(0, N, parameters$cluster_count) H_normalized[normalize, ] <- H[normalize, ]/matrix(sqrt(rowSums(H[normalize, ]^2, 2)), nrow(H[normalize, ]), parameters$cluster_count, byrow = FALSE) state$clustering <- stats::kmeans( H_normalized, centers = parameters$cluster_count, iter.max = 1000, nstart = 10 )$cluster state$objective <- objective state$parameters <- parameters state$Theta <- Theta state }
/R/lmkkmeans_missingData.R
permissive
rnaimehaom/klic
R
false
false
7,460
r
#' Localised multiple kernel k-means #' #' Perform the training step of the localised multiple kernel k-means. #' @param Km Array of size N X N X M containing M different N x N kernel #' matrices. #' @param parameters A list of parameters containing the desired number of #' clusters, \code{cluster_count}, and the number of iterations of the #' algorithm to be run, \code{iteration_count}. #' @param missing Matrix of size N X M containing missingness indicators, i.e. #' missing[i,j] = 1 (or = TRUE) if observation \code{i} is missing in dataset #' \code{j}, missing[i,j] = 0 (or = FALSE). #' @param verbose Boolean flag. If TRUE, at each iteration the iteration number #' is printed. Defaults to FALSE. #' @return This function returns a list containing: #' \item{clustering}{the cluster labels for each element (i.e. row/column) of #' the kernel matrix.} #' \item{objective}{the value of the objective function for the given #' clustering.} #' \item{parameters}{same parameters as in the input.} #' \item{Theta}{N x M matrix of weights, each row corresponds to an observation #' and each column to one of the kernels.} #' @author Mehmet Gonen, Alessandra Cabassi #' @references Gonen, M. and Margolin, A.A., 2014. Localized data fusion for #' kernel k-means clustering with application to cancer biology. In Advances in #' Neural Information Processing Systems (pp. 1305-1313). #' @examples #' if(requireNamespace("Rmosek", quietly = TRUE) && #' (!is.null(utils::packageDescription("Rmosek")$Configured.MSK_VERSION))){ #' #' # Intialise 100 x 100 x 3 array containing M kernel matrices #' # representing three different types of similarities between 100 data points #' km <- array(NA, c(100, 100, 3)) #' # Load kernel matrices #' km[,,1] <- as.matrix(read.csv(system.file('extdata', #' 'kernel_matrix1.csv', package = 'klic'), row.names = 1)) #' km[,,2] <- as.matrix(read.csv(system.file('extdata', #' 'kernel_matrix2.csv', package = 'klic'), row.names = 1)) #' km[,,3] <- as.matrix(read.csv(system.file('extdata', #' 'kernel_matrix3.csv', package = 'klic'), row.names = 1)) #' # Introduce some missing data #' km[76:80, , 1] <- NA #' km[, 76:80, 1] <- NA #' #' # Define missingness indicators #' missing <- matrix(FALSE, 100, 3) #' missing[76:80,1] <- TRUE #' #' # Initalize the parameters of the algorithm #' parameters <- list() #' # Set the number of clusters #' parameters$cluster_count <- 4 #' # Set the number of iterations #' parameters$iteration_count <- 10 #' #' # Perform training #' state <- lmkkmeans_missingData(km, parameters, missing) #' #' # Display the clustering #' print(state$clustering) #' # Display the kernel weights #' print(state$Theta) #' } #' @export lmkkmeans_missingData <- function(Km, parameters, missing = NULL, verbose = FALSE) { state <- list() N <- dim(Km)[2] P <- dim(Km)[3] if (!is.null(missing)) { avail <- abs(1 - missing) } else { avail <- matrix(1, N, P) } # Initialise weight matrix assigning equal weights to each object in each # kernel Theta <- matrix(NA, N, P) for (i in 1:N) { Theta[i, ] <- 1/sum(avail[i, ]) } Theta <- Theta * avail # Set to zero the weights of missing observations # Initialise weighted kernel matrix K_Theta <- matrix(0, nrow(Km), ncol(Km)) for (m in 1:P) { avail_m <- avail[, m] > 0 K_Theta[avail_m, avail_m] <- K_Theta[avail_m, avail_m] + (Theta[avail_m, m, drop = FALSE] %*% t(Theta[avail_m, m, drop = FALSE])) * Km[avail_m, avail_m, m] } # Initialise vector of objective functions objective <- rep(0, parameters$iteration_count) for (iter in 1:parameters$iteration_count) { if (verbose) print(sprintf("running iteration %d...", iter)) H <- eigen(K_Theta, symmetric = TRUE)$vectors[, 1:parameters$cluster_count] HHT <- H %*% t(H) Q <- matrix(0, N * P, N * P) for (m in 1:P) { avail_m <- avail[, m] > 0 start_index <- (m - 1) * N + 1 end_index <- m * N Q[(start_index:end_index)[avail_m], (start_index:end_index)[avail_m]] <- diag(1, sum(avail_m), sum(avail_m)) * Km[avail_m, avail_m, m] - HHT[avail_m, avail_m] * Km[avail_m, avail_m, m] # See Gönen & Margolin 2014 NIPS, page 5 } avail_vec <- as.logical(as.vector(avail)) sum_avail_vec <- sum(avail_vec) Q <- Q[avail_vec, avail_vec] ### Solve QP problem ### problem <- list() # problem$sense: Objective sense: e.g. 'min' or 'max' problem$sense <- "min" # problem$c: Objective coefficient array problem$c <- rep(0, sum_avail_vec) # problem$A: Constraint sparse matrix A <- Matrix::Matrix(rep(diag(1, N, N), P), nrow = N, ncol = N * P, sparse = TRUE) problem$A <- A[, avail_vec, drop = F] # problem$bc: Lower and upper constraint bounds problem$bc <- rbind(blc = rep(1, N), buc = rep(1, N)) # problem$bx: Lower and upper variable bounds problem$bx <- rbind(blx = rep(0, sum_avail_vec), bux = rep(1, sum_avail_vec)) # problem$qobj: Quadratic convex optimization I <- matrix(1:sum_avail_vec, sum_avail_vec, sum_avail_vec, byrow = FALSE) J <- matrix(1:sum_avail_vec, sum_avail_vec, sum_avail_vec, byrow = TRUE) problem$qobj <- list(i = I[lower.tri(I, diag = TRUE)], j = J[lower.tri(J, diag = TRUE)], v = Q[lower.tri(Q, diag = TRUE)]) opts <- list() # opts$verbose: Output logging verbosity opts$verbose <- 0 # Solve QP problem result <- Rmosek::mosek(problem, opts) # Extract Theta and put it in matrix form Theta <- matrix(0, N, P) count <- 0 for (i in 1:P) { avail_i <- which(avail[, i] == 1) startt <- (count + 1) endt <- count + sum(avail[, i]) Theta[avail_i, i] <- result$sol$itr$xx[startt:endt] count <- count + sum(avail[, i]) } # Update weighted kernel K_Theta <- matrix(0, nrow(Km), ncol(Km)) for (m in 1:P) { avail_m <- avail[, m] > 0 K_Theta[avail_m, avail_m] <- K_Theta[avail_m, avail_m] + (Theta[avail_m, m, drop = FALSE] %*% t(Theta[avail_m, m, drop = FALSE])) * Km[avail_m, avail_m, m] } # Update objective function objective[iter] <- sum(diag(t(H) %*% K_Theta %*% H)) - sum(diag(K_Theta)) } normalize <- which(rowSums(H^2, 2) > .Machine$double.eps) H_normalized <- matrix(0, N, parameters$cluster_count) H_normalized[normalize, ] <- H[normalize, ]/matrix(sqrt(rowSums(H[normalize, ]^2, 2)), nrow(H[normalize, ]), parameters$cluster_count, byrow = FALSE) state$clustering <- stats::kmeans( H_normalized, centers = parameters$cluster_count, iter.max = 1000, nstart = 10 )$cluster state$objective <- objective state$parameters <- parameters state$Theta <- Theta state }
library(nlme) ### Name: Variogram.corSpatial ### Title: Calculate Semi-variogram for a corSpatial Object ### Aliases: Variogram.corSpatial ### Keywords: models ### ** Examples cs1 <- corExp(3, form = ~ Time | Rat) cs1 <- Initialize(cs1, BodyWeight) Variogram(cs1, FUN = function(x, y) (1 - exp(-x/y)))[1:10,]
/data/genthat_extracted_code/nlme/examples/Variogram.corSpatial.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
316
r
library(nlme) ### Name: Variogram.corSpatial ### Title: Calculate Semi-variogram for a corSpatial Object ### Aliases: Variogram.corSpatial ### Keywords: models ### ** Examples cs1 <- corExp(3, form = ~ Time | Rat) cs1 <- Initialize(cs1, BodyWeight) Variogram(cs1, FUN = function(x, y) (1 - exp(-x/y)))[1:10,]
N <- 60 y <- c(0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0) x <- c(-0.339249227079859, 2.16776116177459, 0.583946420212137, -1.25010955939473, -2.51903611689114, 1.81544864387954, -0.679428295955671, -0.332381711251256, 3.85474147773995, 1.75146143243406, 3.90671349584242, 3.9682374861409, 3.3953615751143, 2.46322159107928, 2.78883671382684, 2.69848902325916, 0.707973116235291, 0.726405148002986, 1.04441497239998, 0.34200937885663, -0.79068375014397, 3.32954116488817, 1.38079159128074, 3.98181472599443, 2.24877214672357, 3.64920054528528, 2.10202334114661, 2.62895859353315, -1.58423510538688, 2.04590428928891, 1.90034431094031, -3.39115009145794, 2.3864044612747, -1.43181905943197, 3.15308468479338, -1.67968373161864, -0.317596527305397, -0.961773428635424, -0.00569787589869719, 2.24223051880604, 4.00897366227002, -0.247837875244195, -2.01586022574059, 2.37127511539768, 0.804244899377085, 0.204942438628814, 4.71913059489207, -3.18550290262071, 0.0371760285378893, 1.63050107482969, 1.58176660124101, 0.308978975631489, 1.14906114364148, -2.19539484323522, 1.38753269145889, 0.447612648140833, 1.24317280127491, 2.55202204616542, -1.73857703161963, -1.8513770611542)
/test/integration/example-models/ARM/Ch.5/separation.data.R
permissive
nhuurre/stanc3
R
false
false
1,317
r
N <- 60 y <- c(0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0) x <- c(-0.339249227079859, 2.16776116177459, 0.583946420212137, -1.25010955939473, -2.51903611689114, 1.81544864387954, -0.679428295955671, -0.332381711251256, 3.85474147773995, 1.75146143243406, 3.90671349584242, 3.9682374861409, 3.3953615751143, 2.46322159107928, 2.78883671382684, 2.69848902325916, 0.707973116235291, 0.726405148002986, 1.04441497239998, 0.34200937885663, -0.79068375014397, 3.32954116488817, 1.38079159128074, 3.98181472599443, 2.24877214672357, 3.64920054528528, 2.10202334114661, 2.62895859353315, -1.58423510538688, 2.04590428928891, 1.90034431094031, -3.39115009145794, 2.3864044612747, -1.43181905943197, 3.15308468479338, -1.67968373161864, -0.317596527305397, -0.961773428635424, -0.00569787589869719, 2.24223051880604, 4.00897366227002, -0.247837875244195, -2.01586022574059, 2.37127511539768, 0.804244899377085, 0.204942438628814, 4.71913059489207, -3.18550290262071, 0.0371760285378893, 1.63050107482969, 1.58176660124101, 0.308978975631489, 1.14906114364148, -2.19539484323522, 1.38753269145889, 0.447612648140833, 1.24317280127491, 2.55202204616542, -1.73857703161963, -1.8513770611542)
# These two functions work in concert to provide the inverse of a function # without calculating it more than once. Use "makeCacheMatrix" to create an # object that can hold a cached inverse. Use "cacheSolve" to calculate the inverse. # Proper usage: ## obj <- makeCacheMatrix(mymatrix) ## cacheSolve(obj) # returns inverse # This function creates a special "matrix" object that can cache its inverse. makeCacheMatrix <- function(x = matrix()) { i <- NULL set <- function(y) { x <<- y i <<- NULL } get <- function() x setinverse <- function(inverse) i <<- inverse getinverse <- function() i list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } # This function computes the inverse of the special "matrix" returned by # makeCacheMatrix above. If the inverse has already been calculated (and the # matrix has not changed), then the cachesolve should retrieve the inverse # from the cache. cacheSolve <- function(x, ...) { i <- x$getinverse() if(!is.null(i)) { message("getting cached inverse") return(i) } data <- x$get() i <- solve(data, ...) x$setinverse(i) i }
/cachematrix.R
no_license
entrity/ProgrammingAssignment2
R
false
false
1,124
r
# These two functions work in concert to provide the inverse of a function # without calculating it more than once. Use "makeCacheMatrix" to create an # object that can hold a cached inverse. Use "cacheSolve" to calculate the inverse. # Proper usage: ## obj <- makeCacheMatrix(mymatrix) ## cacheSolve(obj) # returns inverse # This function creates a special "matrix" object that can cache its inverse. makeCacheMatrix <- function(x = matrix()) { i <- NULL set <- function(y) { x <<- y i <<- NULL } get <- function() x setinverse <- function(inverse) i <<- inverse getinverse <- function() i list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } # This function computes the inverse of the special "matrix" returned by # makeCacheMatrix above. If the inverse has already been calculated (and the # matrix has not changed), then the cachesolve should retrieve the inverse # from the cache. cacheSolve <- function(x, ...) { i <- x$getinverse() if(!is.null(i)) { message("getting cached inverse") return(i) } data <- x$get() i <- solve(data, ...) x$setinverse(i) i }
% Generated by roxygen2 (4.0.2): do not edit by hand \name{compute.logLR} \alias{compute.logLR} \title{Compute logLR.} \usage{ compute.logLR(x, g, TItable = NULL, read.depth = NULL, minobs = 1, pseudocounts = 0.5, all = FALSE, center = FALSE, repara = TRUE, forcebin = FALSE, lm.approx = TRUE, disp = "add", nullcheck = TRUE, pointmass = TRUE, prior = "uniform", gridmult = 2, mixsd = NULL, trace = FALSE, mixcompdist = "normal", lambda1 = 1, lambda2 = 0, df = NULL, randomstart = FALSE, minimaloutput = FALSE, maxiter = 5000, VB = FALSE, cxx = TRUE, maxlogLR = NULL) } \arguments{ \item{x:}{a matrix of \code{nsig} by \code{n} counts where \code{n} should be a power of 2} \item{g:}{an \code{nsig}-vector containing group indicators/covariate value for each sample} \item{TItable:}{pre-calculated \code{TItable}; If \code{TItable} is provided, this function skips computation of \code{TItable} from \code{x} and use the \code{TItable} provided as a parameter. This helps with fast permutation test.} \item{read.depth:}{an \code{nsig}-vector containing the total number of reads for each sample (used to test for association with the total intensity). Defaults to NULL.} \item{minobs:}{minimum number of obs required to be in each logistic model} \item{pseudocounts:}{a number to be added to counts} \item{all:}{bool, if TRUE pseudocounts are added to all entries, if FALSE pseudocounts are added only to cases when either number of successes or number of failures (but not both) is 0} \item{center:}{bool, indicating whether to center \code{g}} \item{repara:}{bool, indicating whether to reparameterize alpha and beta so that their likelihoods can be factorized.} \item{forcebin:}{bool, if TRUE don't allow for overdipersion. Defaults to TRUE if \code{nsig=1}} \item{lm.approx:}{bool, indicating whether a WLS alternative should be used} \item{disp:}{"add" or "mult", indicates which type of overdispersion is assumed when \code{lm.approx}=TRUE} \item{cxx:}{bool, indicating whether to use Rcode or c++ code (faster)} \item{maxlogLR:}{a positive number, default=NULL, if \code{maxlogLR} is provided as a positive number, the function returns this number as \code{logLR} when \code{logLR} is infinite.} } \value{ a list of \code{logLR}, \code{logLR.each.scale}, \code{finite.logLR}; \code{logLR.each.scale} contains logLR for each scale. \code{finite.logLR} takes 0 or 1 indicating whether \code{logLR} is finite or not. } \description{ This function takes a series of Poisson count signals \code{x}, with data on different samples in each row and covariate \code{g} for each sample, and compute logLR to test for association between \code{x} and \code{g}. If \code{TItable} is provided, this function skips computation of \code{TItable} from \code{x} and use the \code{TItable} provided as a parameter. This helps with fast permutation test. Parameters \code{minobs}, \code{pseudocounts}, \code{all}, \code{center}, \code{repara}, \code{forcebin}, \code{lm.approx}, and \code{disp} are passed to \code{\link{glm.approx}}. The list \code{ashparam} specifies a list of parameters to be passed to \code{ash}. }
/package/multiseq.Rcheck/00_pkg_src/multiseq/man/compute.logLR.Rd
no_license
stephenslab/multiseq
R
false
false
3,135
rd
% Generated by roxygen2 (4.0.2): do not edit by hand \name{compute.logLR} \alias{compute.logLR} \title{Compute logLR.} \usage{ compute.logLR(x, g, TItable = NULL, read.depth = NULL, minobs = 1, pseudocounts = 0.5, all = FALSE, center = FALSE, repara = TRUE, forcebin = FALSE, lm.approx = TRUE, disp = "add", nullcheck = TRUE, pointmass = TRUE, prior = "uniform", gridmult = 2, mixsd = NULL, trace = FALSE, mixcompdist = "normal", lambda1 = 1, lambda2 = 0, df = NULL, randomstart = FALSE, minimaloutput = FALSE, maxiter = 5000, VB = FALSE, cxx = TRUE, maxlogLR = NULL) } \arguments{ \item{x:}{a matrix of \code{nsig} by \code{n} counts where \code{n} should be a power of 2} \item{g:}{an \code{nsig}-vector containing group indicators/covariate value for each sample} \item{TItable:}{pre-calculated \code{TItable}; If \code{TItable} is provided, this function skips computation of \code{TItable} from \code{x} and use the \code{TItable} provided as a parameter. This helps with fast permutation test.} \item{read.depth:}{an \code{nsig}-vector containing the total number of reads for each sample (used to test for association with the total intensity). Defaults to NULL.} \item{minobs:}{minimum number of obs required to be in each logistic model} \item{pseudocounts:}{a number to be added to counts} \item{all:}{bool, if TRUE pseudocounts are added to all entries, if FALSE pseudocounts are added only to cases when either number of successes or number of failures (but not both) is 0} \item{center:}{bool, indicating whether to center \code{g}} \item{repara:}{bool, indicating whether to reparameterize alpha and beta so that their likelihoods can be factorized.} \item{forcebin:}{bool, if TRUE don't allow for overdipersion. Defaults to TRUE if \code{nsig=1}} \item{lm.approx:}{bool, indicating whether a WLS alternative should be used} \item{disp:}{"add" or "mult", indicates which type of overdispersion is assumed when \code{lm.approx}=TRUE} \item{cxx:}{bool, indicating whether to use Rcode or c++ code (faster)} \item{maxlogLR:}{a positive number, default=NULL, if \code{maxlogLR} is provided as a positive number, the function returns this number as \code{logLR} when \code{logLR} is infinite.} } \value{ a list of \code{logLR}, \code{logLR.each.scale}, \code{finite.logLR}; \code{logLR.each.scale} contains logLR for each scale. \code{finite.logLR} takes 0 or 1 indicating whether \code{logLR} is finite or not. } \description{ This function takes a series of Poisson count signals \code{x}, with data on different samples in each row and covariate \code{g} for each sample, and compute logLR to test for association between \code{x} and \code{g}. If \code{TItable} is provided, this function skips computation of \code{TItable} from \code{x} and use the \code{TItable} provided as a parameter. This helps with fast permutation test. Parameters \code{minobs}, \code{pseudocounts}, \code{all}, \code{center}, \code{repara}, \code{forcebin}, \code{lm.approx}, and \code{disp} are passed to \code{\link{glm.approx}}. The list \code{ashparam} specifies a list of parameters to be passed to \code{ash}. }
# plot2.R: Time trace of global active power data <- read.csv("household_power_consumption.txt", sep = ";", na.strings = "?") datas <- subset(data, (data$Date == "1/2/2007" | data$Date == "2/2/2007")) x <- paste(datas$Date, datas$Time) time <- strptime(x, "%d/%m/%Y %H:%M:%S") plot(time, datas$Global_active_power, ylab = "Global active power (kilowatts)", type = "l", xlab = " ") dev.print(png, file = "plot2.png", width = 480, height = 480, bg = "transparent") dev.off()
/plot2.R
no_license
mdwyman/ExData_Plotting1
R
false
false
480
r
# plot2.R: Time trace of global active power data <- read.csv("household_power_consumption.txt", sep = ";", na.strings = "?") datas <- subset(data, (data$Date == "1/2/2007" | data$Date == "2/2/2007")) x <- paste(datas$Date, datas$Time) time <- strptime(x, "%d/%m/%Y %H:%M:%S") plot(time, datas$Global_active_power, ylab = "Global active power (kilowatts)", type = "l", xlab = " ") dev.print(png, file = "plot2.png", width = 480, height = 480, bg = "transparent") dev.off()
# Analysis code for Garner & Raymond - Distinct selection mechanisms for when # predictions and rewards guide visual selective attention # https://docs.google.com/document/d/1j_9irYF9LbegaxuxnWxSFdmkLe8nSrm6AsWl-ShVCYI/edit?usp=sharing # (c) K. Garner, University of Birmingham, Sept 2017 # if you have any questions please email getkellygarner@gmail.com # for each experiment I # load the pre-cleaned data # summarise data for plotting # conduct for RT and accuracy separately # - an NHST mixed models analysis (did this out of curiosity to see if it # corroborates the Bayesian approach - it does) # - a Bayesian analysis to find evidence for the winning model # use the winning model to predict data for plots # save to the workspace # EXP 1 - ##################################################################################### rm(list=ls()) #setwd("~/ADDBIAS_REPOS") # set this to own directory ### tried to use packrat but error with library creation. will come back to this and update # in the future - sorry world! # packrat::init(getwd()) # preserve package versions (start R from the project directory and # get directed to the package library for this project) library(wesanderson) #library(lm.beta) library(lme4) library(BayesFactor) library(plyr) #________________________________________________________________________________________________ load("EXP1/ANALYSIS/EXP1_ANALYSIS_MIXDMDLS_BIAS.R") # NOTE: this loads the workspace that has the outputs that this code was written to # produce using the dataset commented out below. # load("EXP1/ANALYSIS/exp3_clean_BS_fv_18_03_17.R") i.e. loading the R file will load the # entire workspace # DATA PREP # ____________________________________________________________________________________________ #### first, trim the data to remove correct RTs < or > 2.5 std devs from the mu, then calculate regressors data = dat data = data[data$cor_resp == data$resp, ] data = na.omit(data) tmp <- by(data.frame(rt=data$RT, sub=data$sub, fixprob=data$fixprob, value=data$value, valid=data$valid, cresp=data$cor_resp), list(data$loc_prob, data$value, data$session), trim) # remove RTs > or < 2.5 stdevs from the mu data = as.data.frame(do.call(rbind, tmp)) rm(tmp) data$prob[data$fixprob == "98:02" & data$valid == "valid"] = .98 data$prob[data$fixprob == "98:02" & data$valid == "invalid"] = .02 data$prob[data$fixprob == "94:06" & data$valid == "valid"] = .94 data$prob[data$fixprob == "94:06" & data$valid == "invalid"] = .06 data$prob[data$fixprob == "90:10" & data$valid == "valid"] = .9 data$prob[data$fixprob == "90:10" & data$valid == "invalid"] = .1 data$prob[data$fixprob == "80:20" & data$valid == "valid"] = .8 data$prob[data$fixprob == "80:20" & data$valid == "invalid"] = .2 data$prob[data$fixprob == "60:40" & data$valid == "valid"] = .6 data$prob[data$fixprob == "60:40" & data$valid == "invalid"] = .4 data$val_num[data$value == "high"] = 50 data$val_num[data$value == "low"] = 1 data$rel_num_comp[data$value == "high"] = 1 data$rel_num_comp[data$value == "low"] = 50 data$cue[data$valid == "valid" & data$value == "high"] = "h2l" data$cue[data$valid == "invalid" & data$value == "low"] = "h2l" data$cue[data$valid == "valid" & data$value == "low"] = "l2h" data$cue[data$valid == "invalid" & data$value == "high"] = "l2h" # SUMMARISE # __________________________________________________________________________________________________ ###### RTs sum.dat.all = ddply(data, .(sub, fixprob, value, valid), summarise, mu = mean(rt), r1 = val_num[1], r2 = rel_num_comp[1]) # turn data to wideform and save for Jane so she can use excel etc sum.dat.all.wide = reshape2::dcast(sum.dat.all, sub ~ fixprob + value + valid, value.var = "mu" ) write.csv(sum.dat.all.wide, file="exp1-RT-wide.csv") # PLOT AVERAGE DATA sum.dat.all.plot = ddply(sum.dat.all, .(fixprob, value, valid), summarise, mean = mean(mu), N = length(mu) ) # calc confidence intervals sum.dat.sub = ddply(sum.dat.all, .(sub), summarise, mean = mean(mu)) sum.dat.all.gmu = mean(sum.dat.sub$mean) sum.dat.err = sum.dat.all for (i in levels(sum.dat.err$sub)) sum.dat.err$mu[sum.dat.err$sub == i] = sum.dat.err$mu[sum.dat.err$sub == i] - sum.dat.sub$mean[sum.dat.sub$sub == i] + sum.dat.all.gmu # crit.t = 2.086 sum.dat.cis = ddply( sum.dat.err, .(value, fixprob, valid), summarise, ci = (sd(mu)/sqrt(length(mu))) * ( 9/8 ) * 2.086 ) ###### ACC acc.dat.all = ddply(dat, .( sub, fixprob, value, valid ), summarise, N=length(resp), acc = sum(cor_resp == resp, na.rm=T)/N) # make and save in wideform for Jane/SPSS/excel users acc.dat.all.wide = reshape2::dcast(acc.dat.all, sub ~ fixprob + value + valid, value.var = "acc" ) write.csv(acc.dat.all.wide, file="exp1-acc-wide.csv") acc.dat.sub = ddply(dat, .(sub), summarise, N = length(resp), acc = sum(cor_resp == resp, na.rm=T)/N) acc.dat.gmu = mean(acc.dat.sub$acc) acc.dat.plot = ddply(dat, .( fixprob, value, valid ), summarise, N=length(resp), acc = sum(cor_resp == resp, na.rm=T)/N) acc.dat.err = acc.dat.all for (i in levels(acc.dat.err)) acc.dat.err$acc[acc.dat.err$sub == i] = acc.dat.err$acc[acc.dat.err$sub == i] - acc.dat.sub$acc[acc.dat.sub$sub == i] + acc.dat.all.gmu acc.dat.cis = ddply( acc.dat.err, .(value, fixprob, valid), summarise, ci = (sd(acc)/sqrt(length(acc))) * ( 9/8 ) * 2.086 ) # ANALYSE RT AND ACCURACY DATA # ______________________________________________________________________________________________________________________________________ ##### NOTE - I PLAYED AROUD WITH NHST w LINEAR MIXED MODELS. NOT REPORTED IN PAPER, ONLY SUPPLEMENTARY. JUST WANTED TO # SEE IF IT CORROBORATED THE BAYES PICTURE (IT DOES) sum.dat.all$info_gain = rep(xs, each = 4, times = 21) ### 1 = check each variable contributes rt.full = lmer( mu ~ valid + info_gain + r1 + info_gain:valid + info_gain:r1 + (1|sub), REML = FALSE, data = sum.dat.all) # knock out valid rt.valid = lmer( mu ~ info_gain + r1 + info_gain:valid + info_gain:r1 + (1|sub), REML = FALSE, data = sum.dat.all) # knock out info rt.valinf = lmer( mu ~ valid + info_gain + r1 + info_gain:valid + (1|sub), REML = FALSE, data = sum.dat.all) rt.infgain = lmer( mu ~ valid + info_gain + r1 + (1|sub), REML = FALSE, data = sum.dat.all) rt.meinfgain = lmer( mu ~ valid + r1 + (1|sub), REML = FALSE, data = sum.dat.all) # knock out reward rt.rew.int = lmer( mu ~ valid + info_gain + info_gain:valid + (1|sub), REML = FALSE, data = sum.dat.all) e1.rt.a1 = anova(rt.full, rt.valid) # **** me valid e1.rt.a2 = anova(rt.full, rt.valinf ) e1.rt.a3 = anova( rt.valinf, rt.infgain) ### info * valid e1.rt.a4 = anova(rt.infgain, rt.meinfgain) e1.rt.a5 = anova(rt.valinf, rt.rew.int) # **** me value e1.rt.ps = c(2.2e-16, 0.3409, 0.002196, 0.1387, 2.977e-12 ) e1.rt.p.adj = p.adjust(e1.rt.ps, method = "fdr") e1.rt.p.win = which(e1.rt.p.adj < .05) summary(rt.full) # BAYESIAN ANALYSIS (reported in paper) rt.all.mods = generalTestBF( mu ~ valid * info_gain * r1 + sub, data = sum.dat.all, whichRandom = "sub", neverExclude="^sub$") rt.all.mods = recompute(rt.all.mods, iterations = 500000) # careful running this - takes a long time, reduce iterations if you # want to run it yourself in a speedier fashion rt.top = head(rt.all.mods) # comparing winning model to all models at top that included an interaction between value and either validity or info gain rt.bf2 = rt.top[1]/rt.top[2] # valid + info_gain + valid:info_gain + r1 + info_gain:r1 + sub rt.bf3 = rt.top[1]/rt.top[3] # valid + r1 + sub rt.bf4 = rt.top[1]/rt.top[4] # valid + info_gain + valid:info_gain + r1 + valid:r1 + sub rt.bf5 = rt.top[1]/rt.top[5] # valid + info_gain + r1 + sub rt.bf6 = rt.top[1]/rt.top[6] # valid + info_gain + valid:info_gain + r1 + valid:r1 + info_gain:r1 + sub rt.bf.for.plot = data.frame( BF = c(3.814597, 4.84, 5.189638, 13.53112, 17.23548), upper = c(3.814597 + (3.814597*.0015), 4.83534 + ( 4.83534*.004 ), 5.189638 + (5.189638*.0053), 13.53112 + (13.53112 * .0057), 17.23548 + (17.23548*.0072)), lower = c(3.814597 - (3.814597*.0015),4.83534 - ( 4.83534*.004 ), 5.189638 - (5.189638*.0053), 13.53112 - (13.53112 * .0057), 17.23548 - (17.23548*.0072)), names = c( "v*c + c*va", "v + va", "v*c + v*va", "v + c + va", "v*c + v*va + c*va" )) # NOW DO ACCURACY DATA acc.dat.all$info_gain = rep(xs, each = 4, times = 21) acc.dat.all$r1[acc.dat.all$value == "high"] = 50 acc.dat.all$r1[acc.dat.all$value == "low"] = 1 # AGAIN, NHST APPROACH FOR FUNSIES acc.full = lmer( acc ~ valid + info_gain + r1 + info_gain:valid + info_gain:r1 + (1|sub), REML = FALSE, data = acc.dat.all) # knock out valid acc.valid = lmer( acc ~ info_gain + r1 + info_gain:valid + info_gain:r1 + (1|sub), REML = FALSE, data = acc.dat.all) # knock out info acc.valinf = lmer( acc ~ valid + info_gain + r1 + info_gain:valid + (1|sub), REML = FALSE, data = acc.dat.all) acc.infgain = lmer( acc ~ valid + info_gain + r1 + (1|sub), REML = FALSE, data = acc.dat.all) acc.meinfgain = lmer( acc ~ valid + r1 + (1|sub), REML = FALSE, data = acc.dat.all) # knock out reward acc.rew.int = lmer( acc ~ valid + info_gain + info_gain:valid + (1|sub), REML = FALSE, data = acc.dat.all) e1.acc.a1 = anova( acc.full, acc.valid) # ******** e1.acc.a2 = anova(acc.full, acc.valinf ) e1.acc.a3 = anova( acc.valinf, acc.infgain) e1.acc.a4 = anova( acc.infgain, acc.meinfgain) e1.acc.a5 = anova( acc.valinf, acc.rew.int) # ****** e1.acc.ps = c( 8.29e-14, 0.3209, 0.07941, 0.1635, 0.0004885) e1.acc.p.adj = p.adjust(e1.acc.ps, method = "fdr") e1.acc.p.win = which(e1.acc.p.adj < .05) summary( acc.full ) # BAYESIAN ANALYSIS - in paper acc.all.mods = generalTestBF( acc ~ valid * info_gain * r1 + sub, data = acc.dat.all, whichRandom = "sub", neverExclude="^sub$") acc.all.mods = recompute(acc.all.mods, iterations = 500000) acc.top = head(acc.all.mods) # win = valid + r1 + sub acc.bf2 = acc.top[1]/acc.top[2] # valid + info_gain + r1 + sub acc.bf3 = acc.top[1]/acc.top[3] # valid + info_gain + valid:info_gain + r1 + sub acc.bf4 = acc.top[1]/acc.top[4] # valid + r1 + valid:r1 + sub acc.bf5 = acc.top[1]/acc.top[5] # valid + info_gain + r1 + info_gain:r1 + sub acc.bf6 = acc.top[1]/acc.top[6] # valid + info_gain + valid:info_gain + r1 + info_gain:r1 + sub acc.bf.for.plot = data.frame( BF = c(2.57, 3.187716, 5.694131, 8.452145 , 9.173843), upper = c(2.57 + (2.57*.0068), 3.187716 + ( 3.187716*.0078 ), 5.694131 + (5.694131*.0066), 8.452145 + (8.452145 * .0086), 9.173843 + (9.173843*.0095)), lower = c(2.57 - (2.57*.0068), 3.187716 - ( 3.187716*.0078 ), 5.694131 - (5.694131*.0066), 8.452145 - (8.452145 * .0086), 9.173843 - (9.173843*.0095)), names = c( "v + c + va", "v*c + va", "v*va", "v + c*va", "v*c + c*va")) ################################################################################################ ########### PLOT DATA PAPER #################################################################### ################################################################################################ # 1 - get data together # 2 - predict based on winning model # JUST PREDICTING FIXED EFFECTS FOR PLOT win.mod.rt = lm( mu ~ valid + info_gain + valid:info_gain + r1, data = sum.dat.all) # top model sum.dat.all.plot$info_gain = rep(xs, each = 4) sum.dat.all.plot$r1 = rep(c(50,1), each = 2, times = 5) sum.dat.all.plot$predict = predict( win.mod.rt, sum.dat.all.plot ) win.mod.acc = lm( acc ~ valid + r1, data = acc.dat.all) # top model acc.dat.plot$info_gain = rep(xs, each = 4) acc.dat.plot$r1 = rep(c(50,1), each = 2, times = 5) acc.dat.plot$predict = predict( win.mod.acc, acc.dat.plot) # save.image("EXP1/ANALYSIS/EXP1_ANALYSIS_MIXDMDLS_BIAS.R") # run this line if you want to save new # stuff ################# EXPERIMENT ONE ANALYSIS COMPLETE - HUZZAH! # EXP 2 #________________________________________________________________________________________________ rm(list=ls()) library(wesanderson) library(lm.beta) library(lme4) library(BayesFactor) library(plyr) # EXP 2 # IS AN ADDITIVE OR AN INTERACTIVE MODEL BETTER TO ACCOUNT FOR DATA? # set to current directory ##### trim functions - remove RTs 2.5 standard deviations above and below the mean source("EXP2/ANALYSIS/trim_functions.R") # NOTE: this loads the workspace that has the outputs that this code was written to # produce using the dataset commented out below. # load("EXP2/ANALYSIS/exp2_clean_BS_v1_28_02_17") i.e. same principle as exp 1 load("EXP2/ANALYSIS/EXP2_ANALYSIS_MIXDMDLS_BIAS.R") data = dat ### trim rts tmp <- by(data.frame(rt=data$RT, sub=data$sub, loc_prob=data$loc_prob, value=data$value, rew_cond=data$rew_cond, valid = data$valid, fixprob = data$fixprob), list(data$sub, data$loc_prob, data$value, data$rew_cond), trim) # remove RTs > or < 2.5 stdevs from the mu data = as.data.frame(do.call(rbind, tmp)) data = data[!is.na(data$rt),] # remove na's data$loc_prob_fact = data$loc_prob data$loc_prob = varhandle::unfactor(data$loc_prob) data$val_num[data$value == "high"] = 50 data$val_num[data$value == "low"] = 1 data$rel_num_comp[data$value == "high"] = 1 data$rel_num_comp[data$value == "low"] = 50 data$prob[data$fixprob == "80:20" & data$valid == "valid"] = .8 data$prob[data$fixprob == "80:20" & data$valid == "invalid"] = .2 data$prob[data$fixprob == "60:40" & data$valid == "valid"] = .6 data$prob[data$fixprob == "60:40" & data$valid == "invalid"] = .4 data$cue[data$valid == "valid" & data$value == "high"] = "h2l" data$cue[data$valid == "invalid" & data$value == "low"] = "h2l" data$cue[data$valid == "valid" & data$value == "low"] = "l2h" data$cue[data$valid == "invalid" & data$value == "high"] = "l2h" # SUMMARISE AND GET DATA TO PLOT # __________________________________________________________________________________________________ ###### RTs sum.dat.all = ddply(data, .(sub, fixprob, value, valid, rew_cond), summarise, mu = mean(rt), r1 = val_num[1], r2 = rel_num_comp[1]) sum.dat.all.wide = reshape2::dcast(sum.dat.all, sub ~ fixprob + value + valid + rew_cond, value.var = "mu" ) write.csv(sum.dat.all.wide, file="exp2-RT-wide.csv") # DATA FOR PLOTTING sum.dat.all.plot = ddply(sum.dat.all, .(fixprob, value, valid, rew_cond), summarise, mean = mean(mu), N = length(mu) ) sum.dat.sub = ddply(sum.dat.all, .(sub), summarise, mean = mean(mu)) sum.dat.all.gmu = mean(sum.dat.sub$mean) sum.dat.err = sum.dat.all for (i in levels(sum.dat.err$sub)) sum.dat.err$mu[sum.dat.err$sub == i] = sum.dat.err$mu[sum.dat.err$sub == i] - sum.dat.sub$mean[sum.dat.sub$sub == i] + sum.dat.all.gmu # crit.t = 2.086 sum.dat.cis = ddply( sum.dat.err, .(value, fixprob, valid, rew_cond), summarise, ci = (sd(mu)/sqrt(length(mu))) * ( 3/2 ) * 2.086 ) ###### ACC acc.dat.all = ddply(dat, .( sub, fixprob, value, valid, rew_cond ), summarise, N=length(resp), acc = sum(cor_resp == resp, na.rm=T)/N) acc.dat.all.wide = reshape2::dcast(acc.dat.all, sub ~ fixprob + value + valid + rew_cond, value.var = "acc" ) write.csv(acc.dat.all.wide, file="exp2-acc-wide.csv") acc.dat.sub = ddply(dat, .(sub), summarise, N = length(resp), acc = sum(cor_resp == resp, na.rm=T)/N) acc.dat.gmu = mean(acc.dat.sub$acc) acc.dat.plot = ddply(dat, .( fixprob, value, valid, rew_cond ), summarise, N=length(resp), acc = sum(cor_resp == resp, na.rm=T)/N) acc.dat.err = acc.dat.all for (i in levels(acc.dat.err)) acc.dat.err$acc[acc.dat.err$sub == i] = acc.dat.err$acc[acc.dat.err$sub == i] - acc.dat.sub$acc[acc.dat.sub$sub == i] + acc.dat.all.gmu acc.dat.cis = ddply( acc.dat.err, .(value, fixprob, valid, rew_cond), summarise, ci = (sd(acc)/sqrt(length(acc))) * ( 3/2 ) * 2.086 ) # ANALYSE RT AND ACCURACY DATA; # ______________________________________________________________________________________________________________________________________ sum.dat.all$info_gain = rep(xs, each = 8, times = 26) ### 1 = check each variable contributes rt.full = lmer( mu ~ valid + r1 + info_gain + rew_cond + valid:info_gain + info_gain:r1 + info_gain:rew_cond + r1:rew_cond + info_gain:r1:rew_cond + (1|sub), REML = FALSE, data = sum.dat.all) ### drop interactions using info gain rt.d3way = lmer( mu ~ valid + r1 + info_gain + rew_cond + valid:info_gain + info_gain:r1 + info_gain:rew_cond + r1:rew_cond + (1|sub), REML = FALSE, data = sum.dat.all) rt.dinf.gain = lmer( mu ~ valid + r1 + info_gain + rew_cond + valid:info_gain + info_gain:r1 + r1:rew_cond + (1|sub), REML = FALSE, data = sum.dat.all) rt.dinf.r1 = lmer( mu ~ valid + r1 + info_gain + rew_cond + valid:info_gain + r1:rew_cond + (1|sub), REML = FALSE, data = sum.dat.all) rt.dinf.val = lmer( mu ~ valid + r1 + info_gain + rew_cond + r1:rew_cond + (1|sub), REML = FALSE, data = sum.dat.all) rt.dme.inf = lmer( mu ~ valid + r1 + rew_cond + r1:rew_cond + (1|sub), REML = FALSE, data = sum.dat.all) #### now drop validity rt.dval = lmer( mu ~ r1 + rew_cond + r1:rew_cond + (1|sub), REML = FALSE, data = sum.dat.all) #### drop rew by rew_cond int rt.dr1.rewcond = lmer( mu ~ valid + r1 + rew_cond + (1|sub), REML = FALSE, data = sum.dat.all) #### drop main effect of rew cond rt.drewcond = lmer( mu ~ valid + r1 + (1|sub), REML = FALSE, data = sum.dat.all) #### drop main effect of value rt.drewr1 = lmer( mu ~ valid + rew_cond + (1|sub), REML = FALSE, data = sum.dat.all) e2.rt.a1 = anova(rt.full, rt.d3way) # info gain does not interact with rew_cond and reward e2.rt.a2 = anova(rt.d3way, rt.dinf.gain) # info gain does not interact with rew_cond - decay v fixed e2.rt.a3 = anova(rt.dinf.gain, rt.dinf.r1) # info gain does not interact with value e2.rt.a4 = anova(rt.dinf.r1, rt.dinf.val ) # info gain does not interact with validity e2.rt.a5 = anova(rt.dinf.val, rt.dme.inf ) # no main effect of info gain e2.rt.a6 = anova(rt.dme.inf, rt.dval) # main effect validity ******** e2.rt.a7 = anova(rt.dme.inf, rt.dr1.rewcond) # no interaction with reward cond e2.rt.a8 = anova(rt.dr1.rewcond, rt.drewcond) # main effect of reward condition ****** e2.rt.a9 = anova(rt.dr1.rewcond, rt.drewr1) # main effect of loc-value*** e2.rt.p = c( 0.9221, 0.09444, 0.9073, 0.09309, 0.9344, 3.635e-10, 0.4477, 2.406e-09, 0.0004157) e2.rt.p = p.adjust(e2.rt.p, method = "fdr") e2.rt.win = which(e2.rt.p < .05) ##### BAYES ANALYSIS rt.all.mods = generalTestBF( mu ~ valid * info_gain * r1 * rew_cond + sub, data = sum.dat.all, whichRandom = "sub", neverExclude="^sub$" ) rt.all.mods = recompute(rt.all.mods, iterations = 500000) rt.top = head(rt.all.mods) # valid + r1 + rew_cond + sub rt.bf2 = rt.top[1]/rt.top[2] # valid + r1 + valid:r1 + rew_cond + sub rt.bf3 = rt.top[1]/rt.top[3] # valid + r1 + rew_cond + r1:rew_cond + sub rt.bf4 = rt.top[1]/rt.top[4] # valid + info_gain + r1 + rew_cond + sub rt.bf5 = rt.top[1]/rt.top[5] # valid + r1 + rew_cond + valid:rew_cond + sub rt.bf6 = rt.top[1]/rt.top[6] # valid + info_gain + valid:info_gain + r1 + rew_cond + sub rt.bf.for.plot = data.frame( BF = c(4.645585, 5.031692, 6.63436, 6.642919, 8.969172), lower = c(4.645585 - (4.645585*.024), 5.031692 - (5.031692*.02), 6.63436 - (6.63436*.018), 6.642919 - (6.642919*.019), 8.969172 - (8.969172*.018)), upper = c(4.645585 + (4.645585*.024), 5.031692 + (5.031692*.02), 6.63436 + (6.63436*.018), 6.642919 + (6.642919*.019), 8.969172 + (8.969172*.018)), names = c("v*va + rc", "v + va*rc", "v + c + va + rc", "v*rc + val", "v*c + va + rc")) # accuracy model to report values in paper acc.report = lmer( acc ~ valid*info_gain + rew_cond + r1 + (1|sub), REML = FALSE, data = acc.dat.all) # now accuracy acc.dat.all$info_gain = rep(xs, each = 8, times = 26) acc.dat.all$r1[acc.dat.all$value == "high"] = 50 acc.dat.all$r1[acc.dat.all$value == "low"] = 1 acc.full = lmer( acc ~ valid + r1 + info_gain + rew_cond + valid:info_gain + info_gain:r1 + info_gain:rew_cond + r1:rew_cond + info_gain:r1:rew_cond + (1|sub), REML = FALSE, data = acc.dat.all) ### drop interactions using info gain acc.d3way = lmer( acc ~ valid + r1 + info_gain + rew_cond + valid:info_gain + info_gain:r1 + info_gain:rew_cond + r1:rew_cond + (1|sub), REML = FALSE, data = acc.dat.all) acc.dinf.gain = lmer( acc ~ valid + r1 + info_gain + rew_cond + valid:info_gain + info_gain:r1 + r1:rew_cond + (1|sub), REML = FALSE, data = acc.dat.all) acc.dinf.r1 = lmer( acc ~ valid + r1 + info_gain + rew_cond + valid:info_gain + r1:rew_cond + (1|sub), REML = FALSE, data = acc.dat.all) acc.dinf.val = lmer( acc ~ valid + r1 + info_gain + rew_cond + r1:rew_cond + (1|sub), REML = FALSE, data = acc.dat.all) acc.dme.inf = lmer( acc ~ valid + r1 + rew_cond + r1:rew_cond + (1|sub), REML = FALSE, data = acc.dat.all) #### now drop validity acc.dval = lmer( acc ~ r1 + rew_cond + r1:rew_cond + (1|sub), REML = FALSE, data = acc.dat.all) #### drop rew by rew_cond int acc.dr1.rewcond = lmer( acc ~ valid + r1 + rew_cond + (1|sub), REML = FALSE, data = acc.dat.all) #### drop main effect of rew cond acc.drewcond = lmer( acc ~ valid + r1 + (1|sub), REML = FALSE, data = acc.dat.all) #### drop main effect of value acc.drewr1 = lmer( acc ~ valid + rew_cond + (1|sub), REML = FALSE, data = acc.dat.all) e2.acc.a1 = anova(acc.full, acc.d3way) # info gain does not interact with rew_cond and reward e2.acc.a2 = anova(acc.d3way, acc.dinf.gain) # info gain does not interact with rew_cond - decay v fixed e2.acc.a3 = anova(acc.dinf.gain, acc.dinf.r1) # info gain does not interact with value e2.acc.a4 = anova(acc.dinf.r1, acc.dinf.val ) # info gain DOES with validity e2.acc.a5 = anova(acc.dinf.val, acc.dme.inf ) # MAIN EFFECT OF INFO GAIN**** e2.acc.a6 = anova(acc.dme.inf, acc.dval) # main effect validity**** e2.acc.a7 = anova(acc.dme.inf, acc.dr1.rewcond) # no interaction with reward cond e2.acc.a8 = anova(acc.dr1.rewcond, acc.drewcond) # main effect of reward condition**** e2.acc.a9 = anova(acc.dr1.rewcond, acc.drewr1) # main effect of loc-value e2.acc.p = c( 0.9911, 0.4061, 0.999, 0.03348, 0.007604, 8.222e-09, 0.9816, 1.031e-08, 0.03825 ) e2.acc.p.adj = p.adjust(e2.acc.p, method = "fdr") e2.acc.p.win = which( e2.acc.p.adj < .05 ) acc.all.mods = generalTestBF( acc ~ valid * info_gain * r1 * rew_cond + sub, data = acc.dat.all, whichRandom = "sub", neverExclude="^sub$" ) acc.all.mods = recompute(acc.all.mods, iterations = 500000) acc.top = head(acc.all.mods) # valid + info_gain + valid:info_gain + r1 + rew_cond + sub acc.bf2 = acc.top[1]/acc.top[2] # valid + info_gain + valid:info_gain + rew_cond + sub acc.bf3 = acc.top[1]/acc.top[3] # valid + info_gain + r1 + rew_cond + sub acc.bf4 = acc.top[1]/acc.top[4] # valid + info_gain + rew_cond + sub acc.bf5 = acc.top[1]/acc.top[5] # valid + info_gain + valid:info_gain + r1 + valid:r1 + rew_cond + sub acc.bf6 = acc.top[1]/acc.top[6] # valid + info_gain + valid:info_gain + r1 + rew_cond + info_gain:rew_cond + sub ### doing specific models because evidence not strong bf_int1 = lmBF( acc ~ valid + info_gain + r1 + rew_cond + valid:info_gain + info_gain:r1:rew_cond + sub, data = acc.dat.all, whichRandom = "sub" ) bf_int1 = recompute(bf_int1, iterations = 500000) acc.bf7 = acc.top[1]/bf_int1 bf_int2 = lmBF( acc ~ valid + info_gain + r1 + rew_cond + valid:info_gain + info_gain:r1:rew_cond:valid + sub, data = acc.dat.all, whichRandom = "sub" ) bf_int2 = recompute(bf_int2, iterations = 500000) acc.bf8 = acc.top[1]/bf_int2 acc.bf.for.plot = data.frame( BF = c(1.561972, 1.624413, 2.070495, 2.646015, 3.292799, 4.395263, 3.935153), lower = c(1.561972 - (1.561972*.0075), 1.624413 - (1.624413*.0075), 2.070495 - (2.070495*.016), 2.646015 - (2.646015*.092), 3.292799 - (3.292799*.092), 4.395263 - (4.395263*.053), 3.935153 - (3.935153*.05)), upper = c(1.561972 + (1.561972*.0075), 1.624413 + (1.624413*.0075), 2.070495 + (2.070495*.016), 2.646015 + (2.646015*.092), 3.292799 + (3.292799*.092), 4.395263 + (4.395263*.053), 3.935153 + (3.935153*.05)), names = c("v*c + rc", "v + c + va + rc", "v + c + rc", "v*c + v*va + rc", "v*c + c*rc + va", "v*c + c*va*rc", "v*c + vc*va*rc*v" )) ################################################################################################ ########### PREDICTED DATA FOR PLOTS #################################################################### ################################################################################################ # 2 PLOTS - 1 FOR RT AND 1 FOR ACCURACY # ROW 1 = RT: HIGH VALID & LOW INVALID, LOW VALID & HIGH INVALID, BAYES FACTORS - STATIC CONDITION # ROW 2 = SAME BUT DECAY CONDITION # 1 - get data together # 2 - predict based on winning model # JUST PREDICTING FIXED EFFECTS win.mod.rt = lm( mu ~ valid + r1 + rew_cond, data = sum.dat.all) sum.dat.all.plot$info_gain = rep(xs, each = 8) sum.dat.all.plot$r1 = rep(c(50,1), each = 4, times = 2) sum.dat.all.plot$predict = predict( win.mod.rt, sum.dat.all.plot ) win.mod.acc = lm( acc ~ rew_cond + valid + info_gain + valid:info_gain + r1, data = acc.dat.all) acc.dat.plot$info_gain = rep(xs, each = 8) acc.dat.plot$r1 = rep(c(50,1), each = 4, times = 2) acc.dat.plot$predict = predict( win.mod.acc, acc.dat.plot) # save.image("EXP2_ANALYSIS_MIXDMDLS_BIAS.R") uncomment and use to save new stuff
/code-analysis-and-task/Exp_1_2_ANALYSIS.R
no_license
Nian-Jingqing/attention-value-certainty
R
false
false
26,134
r
# Analysis code for Garner & Raymond - Distinct selection mechanisms for when # predictions and rewards guide visual selective attention # https://docs.google.com/document/d/1j_9irYF9LbegaxuxnWxSFdmkLe8nSrm6AsWl-ShVCYI/edit?usp=sharing # (c) K. Garner, University of Birmingham, Sept 2017 # if you have any questions please email getkellygarner@gmail.com # for each experiment I # load the pre-cleaned data # summarise data for plotting # conduct for RT and accuracy separately # - an NHST mixed models analysis (did this out of curiosity to see if it # corroborates the Bayesian approach - it does) # - a Bayesian analysis to find evidence for the winning model # use the winning model to predict data for plots # save to the workspace # EXP 1 - ##################################################################################### rm(list=ls()) #setwd("~/ADDBIAS_REPOS") # set this to own directory ### tried to use packrat but error with library creation. will come back to this and update # in the future - sorry world! # packrat::init(getwd()) # preserve package versions (start R from the project directory and # get directed to the package library for this project) library(wesanderson) #library(lm.beta) library(lme4) library(BayesFactor) library(plyr) #________________________________________________________________________________________________ load("EXP1/ANALYSIS/EXP1_ANALYSIS_MIXDMDLS_BIAS.R") # NOTE: this loads the workspace that has the outputs that this code was written to # produce using the dataset commented out below. # load("EXP1/ANALYSIS/exp3_clean_BS_fv_18_03_17.R") i.e. loading the R file will load the # entire workspace # DATA PREP # ____________________________________________________________________________________________ #### first, trim the data to remove correct RTs < or > 2.5 std devs from the mu, then calculate regressors data = dat data = data[data$cor_resp == data$resp, ] data = na.omit(data) tmp <- by(data.frame(rt=data$RT, sub=data$sub, fixprob=data$fixprob, value=data$value, valid=data$valid, cresp=data$cor_resp), list(data$loc_prob, data$value, data$session), trim) # remove RTs > or < 2.5 stdevs from the mu data = as.data.frame(do.call(rbind, tmp)) rm(tmp) data$prob[data$fixprob == "98:02" & data$valid == "valid"] = .98 data$prob[data$fixprob == "98:02" & data$valid == "invalid"] = .02 data$prob[data$fixprob == "94:06" & data$valid == "valid"] = .94 data$prob[data$fixprob == "94:06" & data$valid == "invalid"] = .06 data$prob[data$fixprob == "90:10" & data$valid == "valid"] = .9 data$prob[data$fixprob == "90:10" & data$valid == "invalid"] = .1 data$prob[data$fixprob == "80:20" & data$valid == "valid"] = .8 data$prob[data$fixprob == "80:20" & data$valid == "invalid"] = .2 data$prob[data$fixprob == "60:40" & data$valid == "valid"] = .6 data$prob[data$fixprob == "60:40" & data$valid == "invalid"] = .4 data$val_num[data$value == "high"] = 50 data$val_num[data$value == "low"] = 1 data$rel_num_comp[data$value == "high"] = 1 data$rel_num_comp[data$value == "low"] = 50 data$cue[data$valid == "valid" & data$value == "high"] = "h2l" data$cue[data$valid == "invalid" & data$value == "low"] = "h2l" data$cue[data$valid == "valid" & data$value == "low"] = "l2h" data$cue[data$valid == "invalid" & data$value == "high"] = "l2h" # SUMMARISE # __________________________________________________________________________________________________ ###### RTs sum.dat.all = ddply(data, .(sub, fixprob, value, valid), summarise, mu = mean(rt), r1 = val_num[1], r2 = rel_num_comp[1]) # turn data to wideform and save for Jane so she can use excel etc sum.dat.all.wide = reshape2::dcast(sum.dat.all, sub ~ fixprob + value + valid, value.var = "mu" ) write.csv(sum.dat.all.wide, file="exp1-RT-wide.csv") # PLOT AVERAGE DATA sum.dat.all.plot = ddply(sum.dat.all, .(fixprob, value, valid), summarise, mean = mean(mu), N = length(mu) ) # calc confidence intervals sum.dat.sub = ddply(sum.dat.all, .(sub), summarise, mean = mean(mu)) sum.dat.all.gmu = mean(sum.dat.sub$mean) sum.dat.err = sum.dat.all for (i in levels(sum.dat.err$sub)) sum.dat.err$mu[sum.dat.err$sub == i] = sum.dat.err$mu[sum.dat.err$sub == i] - sum.dat.sub$mean[sum.dat.sub$sub == i] + sum.dat.all.gmu # crit.t = 2.086 sum.dat.cis = ddply( sum.dat.err, .(value, fixprob, valid), summarise, ci = (sd(mu)/sqrt(length(mu))) * ( 9/8 ) * 2.086 ) ###### ACC acc.dat.all = ddply(dat, .( sub, fixprob, value, valid ), summarise, N=length(resp), acc = sum(cor_resp == resp, na.rm=T)/N) # make and save in wideform for Jane/SPSS/excel users acc.dat.all.wide = reshape2::dcast(acc.dat.all, sub ~ fixprob + value + valid, value.var = "acc" ) write.csv(acc.dat.all.wide, file="exp1-acc-wide.csv") acc.dat.sub = ddply(dat, .(sub), summarise, N = length(resp), acc = sum(cor_resp == resp, na.rm=T)/N) acc.dat.gmu = mean(acc.dat.sub$acc) acc.dat.plot = ddply(dat, .( fixprob, value, valid ), summarise, N=length(resp), acc = sum(cor_resp == resp, na.rm=T)/N) acc.dat.err = acc.dat.all for (i in levels(acc.dat.err)) acc.dat.err$acc[acc.dat.err$sub == i] = acc.dat.err$acc[acc.dat.err$sub == i] - acc.dat.sub$acc[acc.dat.sub$sub == i] + acc.dat.all.gmu acc.dat.cis = ddply( acc.dat.err, .(value, fixprob, valid), summarise, ci = (sd(acc)/sqrt(length(acc))) * ( 9/8 ) * 2.086 ) # ANALYSE RT AND ACCURACY DATA # ______________________________________________________________________________________________________________________________________ ##### NOTE - I PLAYED AROUD WITH NHST w LINEAR MIXED MODELS. NOT REPORTED IN PAPER, ONLY SUPPLEMENTARY. JUST WANTED TO # SEE IF IT CORROBORATED THE BAYES PICTURE (IT DOES) sum.dat.all$info_gain = rep(xs, each = 4, times = 21) ### 1 = check each variable contributes rt.full = lmer( mu ~ valid + info_gain + r1 + info_gain:valid + info_gain:r1 + (1|sub), REML = FALSE, data = sum.dat.all) # knock out valid rt.valid = lmer( mu ~ info_gain + r1 + info_gain:valid + info_gain:r1 + (1|sub), REML = FALSE, data = sum.dat.all) # knock out info rt.valinf = lmer( mu ~ valid + info_gain + r1 + info_gain:valid + (1|sub), REML = FALSE, data = sum.dat.all) rt.infgain = lmer( mu ~ valid + info_gain + r1 + (1|sub), REML = FALSE, data = sum.dat.all) rt.meinfgain = lmer( mu ~ valid + r1 + (1|sub), REML = FALSE, data = sum.dat.all) # knock out reward rt.rew.int = lmer( mu ~ valid + info_gain + info_gain:valid + (1|sub), REML = FALSE, data = sum.dat.all) e1.rt.a1 = anova(rt.full, rt.valid) # **** me valid e1.rt.a2 = anova(rt.full, rt.valinf ) e1.rt.a3 = anova( rt.valinf, rt.infgain) ### info * valid e1.rt.a4 = anova(rt.infgain, rt.meinfgain) e1.rt.a5 = anova(rt.valinf, rt.rew.int) # **** me value e1.rt.ps = c(2.2e-16, 0.3409, 0.002196, 0.1387, 2.977e-12 ) e1.rt.p.adj = p.adjust(e1.rt.ps, method = "fdr") e1.rt.p.win = which(e1.rt.p.adj < .05) summary(rt.full) # BAYESIAN ANALYSIS (reported in paper) rt.all.mods = generalTestBF( mu ~ valid * info_gain * r1 + sub, data = sum.dat.all, whichRandom = "sub", neverExclude="^sub$") rt.all.mods = recompute(rt.all.mods, iterations = 500000) # careful running this - takes a long time, reduce iterations if you # want to run it yourself in a speedier fashion rt.top = head(rt.all.mods) # comparing winning model to all models at top that included an interaction between value and either validity or info gain rt.bf2 = rt.top[1]/rt.top[2] # valid + info_gain + valid:info_gain + r1 + info_gain:r1 + sub rt.bf3 = rt.top[1]/rt.top[3] # valid + r1 + sub rt.bf4 = rt.top[1]/rt.top[4] # valid + info_gain + valid:info_gain + r1 + valid:r1 + sub rt.bf5 = rt.top[1]/rt.top[5] # valid + info_gain + r1 + sub rt.bf6 = rt.top[1]/rt.top[6] # valid + info_gain + valid:info_gain + r1 + valid:r1 + info_gain:r1 + sub rt.bf.for.plot = data.frame( BF = c(3.814597, 4.84, 5.189638, 13.53112, 17.23548), upper = c(3.814597 + (3.814597*.0015), 4.83534 + ( 4.83534*.004 ), 5.189638 + (5.189638*.0053), 13.53112 + (13.53112 * .0057), 17.23548 + (17.23548*.0072)), lower = c(3.814597 - (3.814597*.0015),4.83534 - ( 4.83534*.004 ), 5.189638 - (5.189638*.0053), 13.53112 - (13.53112 * .0057), 17.23548 - (17.23548*.0072)), names = c( "v*c + c*va", "v + va", "v*c + v*va", "v + c + va", "v*c + v*va + c*va" )) # NOW DO ACCURACY DATA acc.dat.all$info_gain = rep(xs, each = 4, times = 21) acc.dat.all$r1[acc.dat.all$value == "high"] = 50 acc.dat.all$r1[acc.dat.all$value == "low"] = 1 # AGAIN, NHST APPROACH FOR FUNSIES acc.full = lmer( acc ~ valid + info_gain + r1 + info_gain:valid + info_gain:r1 + (1|sub), REML = FALSE, data = acc.dat.all) # knock out valid acc.valid = lmer( acc ~ info_gain + r1 + info_gain:valid + info_gain:r1 + (1|sub), REML = FALSE, data = acc.dat.all) # knock out info acc.valinf = lmer( acc ~ valid + info_gain + r1 + info_gain:valid + (1|sub), REML = FALSE, data = acc.dat.all) acc.infgain = lmer( acc ~ valid + info_gain + r1 + (1|sub), REML = FALSE, data = acc.dat.all) acc.meinfgain = lmer( acc ~ valid + r1 + (1|sub), REML = FALSE, data = acc.dat.all) # knock out reward acc.rew.int = lmer( acc ~ valid + info_gain + info_gain:valid + (1|sub), REML = FALSE, data = acc.dat.all) e1.acc.a1 = anova( acc.full, acc.valid) # ******** e1.acc.a2 = anova(acc.full, acc.valinf ) e1.acc.a3 = anova( acc.valinf, acc.infgain) e1.acc.a4 = anova( acc.infgain, acc.meinfgain) e1.acc.a5 = anova( acc.valinf, acc.rew.int) # ****** e1.acc.ps = c( 8.29e-14, 0.3209, 0.07941, 0.1635, 0.0004885) e1.acc.p.adj = p.adjust(e1.acc.ps, method = "fdr") e1.acc.p.win = which(e1.acc.p.adj < .05) summary( acc.full ) # BAYESIAN ANALYSIS - in paper acc.all.mods = generalTestBF( acc ~ valid * info_gain * r1 + sub, data = acc.dat.all, whichRandom = "sub", neverExclude="^sub$") acc.all.mods = recompute(acc.all.mods, iterations = 500000) acc.top = head(acc.all.mods) # win = valid + r1 + sub acc.bf2 = acc.top[1]/acc.top[2] # valid + info_gain + r1 + sub acc.bf3 = acc.top[1]/acc.top[3] # valid + info_gain + valid:info_gain + r1 + sub acc.bf4 = acc.top[1]/acc.top[4] # valid + r1 + valid:r1 + sub acc.bf5 = acc.top[1]/acc.top[5] # valid + info_gain + r1 + info_gain:r1 + sub acc.bf6 = acc.top[1]/acc.top[6] # valid + info_gain + valid:info_gain + r1 + info_gain:r1 + sub acc.bf.for.plot = data.frame( BF = c(2.57, 3.187716, 5.694131, 8.452145 , 9.173843), upper = c(2.57 + (2.57*.0068), 3.187716 + ( 3.187716*.0078 ), 5.694131 + (5.694131*.0066), 8.452145 + (8.452145 * .0086), 9.173843 + (9.173843*.0095)), lower = c(2.57 - (2.57*.0068), 3.187716 - ( 3.187716*.0078 ), 5.694131 - (5.694131*.0066), 8.452145 - (8.452145 * .0086), 9.173843 - (9.173843*.0095)), names = c( "v + c + va", "v*c + va", "v*va", "v + c*va", "v*c + c*va")) ################################################################################################ ########### PLOT DATA PAPER #################################################################### ################################################################################################ # 1 - get data together # 2 - predict based on winning model # JUST PREDICTING FIXED EFFECTS FOR PLOT win.mod.rt = lm( mu ~ valid + info_gain + valid:info_gain + r1, data = sum.dat.all) # top model sum.dat.all.plot$info_gain = rep(xs, each = 4) sum.dat.all.plot$r1 = rep(c(50,1), each = 2, times = 5) sum.dat.all.plot$predict = predict( win.mod.rt, sum.dat.all.plot ) win.mod.acc = lm( acc ~ valid + r1, data = acc.dat.all) # top model acc.dat.plot$info_gain = rep(xs, each = 4) acc.dat.plot$r1 = rep(c(50,1), each = 2, times = 5) acc.dat.plot$predict = predict( win.mod.acc, acc.dat.plot) # save.image("EXP1/ANALYSIS/EXP1_ANALYSIS_MIXDMDLS_BIAS.R") # run this line if you want to save new # stuff ################# EXPERIMENT ONE ANALYSIS COMPLETE - HUZZAH! # EXP 2 #________________________________________________________________________________________________ rm(list=ls()) library(wesanderson) library(lm.beta) library(lme4) library(BayesFactor) library(plyr) # EXP 2 # IS AN ADDITIVE OR AN INTERACTIVE MODEL BETTER TO ACCOUNT FOR DATA? # set to current directory ##### trim functions - remove RTs 2.5 standard deviations above and below the mean source("EXP2/ANALYSIS/trim_functions.R") # NOTE: this loads the workspace that has the outputs that this code was written to # produce using the dataset commented out below. # load("EXP2/ANALYSIS/exp2_clean_BS_v1_28_02_17") i.e. same principle as exp 1 load("EXP2/ANALYSIS/EXP2_ANALYSIS_MIXDMDLS_BIAS.R") data = dat ### trim rts tmp <- by(data.frame(rt=data$RT, sub=data$sub, loc_prob=data$loc_prob, value=data$value, rew_cond=data$rew_cond, valid = data$valid, fixprob = data$fixprob), list(data$sub, data$loc_prob, data$value, data$rew_cond), trim) # remove RTs > or < 2.5 stdevs from the mu data = as.data.frame(do.call(rbind, tmp)) data = data[!is.na(data$rt),] # remove na's data$loc_prob_fact = data$loc_prob data$loc_prob = varhandle::unfactor(data$loc_prob) data$val_num[data$value == "high"] = 50 data$val_num[data$value == "low"] = 1 data$rel_num_comp[data$value == "high"] = 1 data$rel_num_comp[data$value == "low"] = 50 data$prob[data$fixprob == "80:20" & data$valid == "valid"] = .8 data$prob[data$fixprob == "80:20" & data$valid == "invalid"] = .2 data$prob[data$fixprob == "60:40" & data$valid == "valid"] = .6 data$prob[data$fixprob == "60:40" & data$valid == "invalid"] = .4 data$cue[data$valid == "valid" & data$value == "high"] = "h2l" data$cue[data$valid == "invalid" & data$value == "low"] = "h2l" data$cue[data$valid == "valid" & data$value == "low"] = "l2h" data$cue[data$valid == "invalid" & data$value == "high"] = "l2h" # SUMMARISE AND GET DATA TO PLOT # __________________________________________________________________________________________________ ###### RTs sum.dat.all = ddply(data, .(sub, fixprob, value, valid, rew_cond), summarise, mu = mean(rt), r1 = val_num[1], r2 = rel_num_comp[1]) sum.dat.all.wide = reshape2::dcast(sum.dat.all, sub ~ fixprob + value + valid + rew_cond, value.var = "mu" ) write.csv(sum.dat.all.wide, file="exp2-RT-wide.csv") # DATA FOR PLOTTING sum.dat.all.plot = ddply(sum.dat.all, .(fixprob, value, valid, rew_cond), summarise, mean = mean(mu), N = length(mu) ) sum.dat.sub = ddply(sum.dat.all, .(sub), summarise, mean = mean(mu)) sum.dat.all.gmu = mean(sum.dat.sub$mean) sum.dat.err = sum.dat.all for (i in levels(sum.dat.err$sub)) sum.dat.err$mu[sum.dat.err$sub == i] = sum.dat.err$mu[sum.dat.err$sub == i] - sum.dat.sub$mean[sum.dat.sub$sub == i] + sum.dat.all.gmu # crit.t = 2.086 sum.dat.cis = ddply( sum.dat.err, .(value, fixprob, valid, rew_cond), summarise, ci = (sd(mu)/sqrt(length(mu))) * ( 3/2 ) * 2.086 ) ###### ACC acc.dat.all = ddply(dat, .( sub, fixprob, value, valid, rew_cond ), summarise, N=length(resp), acc = sum(cor_resp == resp, na.rm=T)/N) acc.dat.all.wide = reshape2::dcast(acc.dat.all, sub ~ fixprob + value + valid + rew_cond, value.var = "acc" ) write.csv(acc.dat.all.wide, file="exp2-acc-wide.csv") acc.dat.sub = ddply(dat, .(sub), summarise, N = length(resp), acc = sum(cor_resp == resp, na.rm=T)/N) acc.dat.gmu = mean(acc.dat.sub$acc) acc.dat.plot = ddply(dat, .( fixprob, value, valid, rew_cond ), summarise, N=length(resp), acc = sum(cor_resp == resp, na.rm=T)/N) acc.dat.err = acc.dat.all for (i in levels(acc.dat.err)) acc.dat.err$acc[acc.dat.err$sub == i] = acc.dat.err$acc[acc.dat.err$sub == i] - acc.dat.sub$acc[acc.dat.sub$sub == i] + acc.dat.all.gmu acc.dat.cis = ddply( acc.dat.err, .(value, fixprob, valid, rew_cond), summarise, ci = (sd(acc)/sqrt(length(acc))) * ( 3/2 ) * 2.086 ) # ANALYSE RT AND ACCURACY DATA; # ______________________________________________________________________________________________________________________________________ sum.dat.all$info_gain = rep(xs, each = 8, times = 26) ### 1 = check each variable contributes rt.full = lmer( mu ~ valid + r1 + info_gain + rew_cond + valid:info_gain + info_gain:r1 + info_gain:rew_cond + r1:rew_cond + info_gain:r1:rew_cond + (1|sub), REML = FALSE, data = sum.dat.all) ### drop interactions using info gain rt.d3way = lmer( mu ~ valid + r1 + info_gain + rew_cond + valid:info_gain + info_gain:r1 + info_gain:rew_cond + r1:rew_cond + (1|sub), REML = FALSE, data = sum.dat.all) rt.dinf.gain = lmer( mu ~ valid + r1 + info_gain + rew_cond + valid:info_gain + info_gain:r1 + r1:rew_cond + (1|sub), REML = FALSE, data = sum.dat.all) rt.dinf.r1 = lmer( mu ~ valid + r1 + info_gain + rew_cond + valid:info_gain + r1:rew_cond + (1|sub), REML = FALSE, data = sum.dat.all) rt.dinf.val = lmer( mu ~ valid + r1 + info_gain + rew_cond + r1:rew_cond + (1|sub), REML = FALSE, data = sum.dat.all) rt.dme.inf = lmer( mu ~ valid + r1 + rew_cond + r1:rew_cond + (1|sub), REML = FALSE, data = sum.dat.all) #### now drop validity rt.dval = lmer( mu ~ r1 + rew_cond + r1:rew_cond + (1|sub), REML = FALSE, data = sum.dat.all) #### drop rew by rew_cond int rt.dr1.rewcond = lmer( mu ~ valid + r1 + rew_cond + (1|sub), REML = FALSE, data = sum.dat.all) #### drop main effect of rew cond rt.drewcond = lmer( mu ~ valid + r1 + (1|sub), REML = FALSE, data = sum.dat.all) #### drop main effect of value rt.drewr1 = lmer( mu ~ valid + rew_cond + (1|sub), REML = FALSE, data = sum.dat.all) e2.rt.a1 = anova(rt.full, rt.d3way) # info gain does not interact with rew_cond and reward e2.rt.a2 = anova(rt.d3way, rt.dinf.gain) # info gain does not interact with rew_cond - decay v fixed e2.rt.a3 = anova(rt.dinf.gain, rt.dinf.r1) # info gain does not interact with value e2.rt.a4 = anova(rt.dinf.r1, rt.dinf.val ) # info gain does not interact with validity e2.rt.a5 = anova(rt.dinf.val, rt.dme.inf ) # no main effect of info gain e2.rt.a6 = anova(rt.dme.inf, rt.dval) # main effect validity ******** e2.rt.a7 = anova(rt.dme.inf, rt.dr1.rewcond) # no interaction with reward cond e2.rt.a8 = anova(rt.dr1.rewcond, rt.drewcond) # main effect of reward condition ****** e2.rt.a9 = anova(rt.dr1.rewcond, rt.drewr1) # main effect of loc-value*** e2.rt.p = c( 0.9221, 0.09444, 0.9073, 0.09309, 0.9344, 3.635e-10, 0.4477, 2.406e-09, 0.0004157) e2.rt.p = p.adjust(e2.rt.p, method = "fdr") e2.rt.win = which(e2.rt.p < .05) ##### BAYES ANALYSIS rt.all.mods = generalTestBF( mu ~ valid * info_gain * r1 * rew_cond + sub, data = sum.dat.all, whichRandom = "sub", neverExclude="^sub$" ) rt.all.mods = recompute(rt.all.mods, iterations = 500000) rt.top = head(rt.all.mods) # valid + r1 + rew_cond + sub rt.bf2 = rt.top[1]/rt.top[2] # valid + r1 + valid:r1 + rew_cond + sub rt.bf3 = rt.top[1]/rt.top[3] # valid + r1 + rew_cond + r1:rew_cond + sub rt.bf4 = rt.top[1]/rt.top[4] # valid + info_gain + r1 + rew_cond + sub rt.bf5 = rt.top[1]/rt.top[5] # valid + r1 + rew_cond + valid:rew_cond + sub rt.bf6 = rt.top[1]/rt.top[6] # valid + info_gain + valid:info_gain + r1 + rew_cond + sub rt.bf.for.plot = data.frame( BF = c(4.645585, 5.031692, 6.63436, 6.642919, 8.969172), lower = c(4.645585 - (4.645585*.024), 5.031692 - (5.031692*.02), 6.63436 - (6.63436*.018), 6.642919 - (6.642919*.019), 8.969172 - (8.969172*.018)), upper = c(4.645585 + (4.645585*.024), 5.031692 + (5.031692*.02), 6.63436 + (6.63436*.018), 6.642919 + (6.642919*.019), 8.969172 + (8.969172*.018)), names = c("v*va + rc", "v + va*rc", "v + c + va + rc", "v*rc + val", "v*c + va + rc")) # accuracy model to report values in paper acc.report = lmer( acc ~ valid*info_gain + rew_cond + r1 + (1|sub), REML = FALSE, data = acc.dat.all) # now accuracy acc.dat.all$info_gain = rep(xs, each = 8, times = 26) acc.dat.all$r1[acc.dat.all$value == "high"] = 50 acc.dat.all$r1[acc.dat.all$value == "low"] = 1 acc.full = lmer( acc ~ valid + r1 + info_gain + rew_cond + valid:info_gain + info_gain:r1 + info_gain:rew_cond + r1:rew_cond + info_gain:r1:rew_cond + (1|sub), REML = FALSE, data = acc.dat.all) ### drop interactions using info gain acc.d3way = lmer( acc ~ valid + r1 + info_gain + rew_cond + valid:info_gain + info_gain:r1 + info_gain:rew_cond + r1:rew_cond + (1|sub), REML = FALSE, data = acc.dat.all) acc.dinf.gain = lmer( acc ~ valid + r1 + info_gain + rew_cond + valid:info_gain + info_gain:r1 + r1:rew_cond + (1|sub), REML = FALSE, data = acc.dat.all) acc.dinf.r1 = lmer( acc ~ valid + r1 + info_gain + rew_cond + valid:info_gain + r1:rew_cond + (1|sub), REML = FALSE, data = acc.dat.all) acc.dinf.val = lmer( acc ~ valid + r1 + info_gain + rew_cond + r1:rew_cond + (1|sub), REML = FALSE, data = acc.dat.all) acc.dme.inf = lmer( acc ~ valid + r1 + rew_cond + r1:rew_cond + (1|sub), REML = FALSE, data = acc.dat.all) #### now drop validity acc.dval = lmer( acc ~ r1 + rew_cond + r1:rew_cond + (1|sub), REML = FALSE, data = acc.dat.all) #### drop rew by rew_cond int acc.dr1.rewcond = lmer( acc ~ valid + r1 + rew_cond + (1|sub), REML = FALSE, data = acc.dat.all) #### drop main effect of rew cond acc.drewcond = lmer( acc ~ valid + r1 + (1|sub), REML = FALSE, data = acc.dat.all) #### drop main effect of value acc.drewr1 = lmer( acc ~ valid + rew_cond + (1|sub), REML = FALSE, data = acc.dat.all) e2.acc.a1 = anova(acc.full, acc.d3way) # info gain does not interact with rew_cond and reward e2.acc.a2 = anova(acc.d3way, acc.dinf.gain) # info gain does not interact with rew_cond - decay v fixed e2.acc.a3 = anova(acc.dinf.gain, acc.dinf.r1) # info gain does not interact with value e2.acc.a4 = anova(acc.dinf.r1, acc.dinf.val ) # info gain DOES with validity e2.acc.a5 = anova(acc.dinf.val, acc.dme.inf ) # MAIN EFFECT OF INFO GAIN**** e2.acc.a6 = anova(acc.dme.inf, acc.dval) # main effect validity**** e2.acc.a7 = anova(acc.dme.inf, acc.dr1.rewcond) # no interaction with reward cond e2.acc.a8 = anova(acc.dr1.rewcond, acc.drewcond) # main effect of reward condition**** e2.acc.a9 = anova(acc.dr1.rewcond, acc.drewr1) # main effect of loc-value e2.acc.p = c( 0.9911, 0.4061, 0.999, 0.03348, 0.007604, 8.222e-09, 0.9816, 1.031e-08, 0.03825 ) e2.acc.p.adj = p.adjust(e2.acc.p, method = "fdr") e2.acc.p.win = which( e2.acc.p.adj < .05 ) acc.all.mods = generalTestBF( acc ~ valid * info_gain * r1 * rew_cond + sub, data = acc.dat.all, whichRandom = "sub", neverExclude="^sub$" ) acc.all.mods = recompute(acc.all.mods, iterations = 500000) acc.top = head(acc.all.mods) # valid + info_gain + valid:info_gain + r1 + rew_cond + sub acc.bf2 = acc.top[1]/acc.top[2] # valid + info_gain + valid:info_gain + rew_cond + sub acc.bf3 = acc.top[1]/acc.top[3] # valid + info_gain + r1 + rew_cond + sub acc.bf4 = acc.top[1]/acc.top[4] # valid + info_gain + rew_cond + sub acc.bf5 = acc.top[1]/acc.top[5] # valid + info_gain + valid:info_gain + r1 + valid:r1 + rew_cond + sub acc.bf6 = acc.top[1]/acc.top[6] # valid + info_gain + valid:info_gain + r1 + rew_cond + info_gain:rew_cond + sub ### doing specific models because evidence not strong bf_int1 = lmBF( acc ~ valid + info_gain + r1 + rew_cond + valid:info_gain + info_gain:r1:rew_cond + sub, data = acc.dat.all, whichRandom = "sub" ) bf_int1 = recompute(bf_int1, iterations = 500000) acc.bf7 = acc.top[1]/bf_int1 bf_int2 = lmBF( acc ~ valid + info_gain + r1 + rew_cond + valid:info_gain + info_gain:r1:rew_cond:valid + sub, data = acc.dat.all, whichRandom = "sub" ) bf_int2 = recompute(bf_int2, iterations = 500000) acc.bf8 = acc.top[1]/bf_int2 acc.bf.for.plot = data.frame( BF = c(1.561972, 1.624413, 2.070495, 2.646015, 3.292799, 4.395263, 3.935153), lower = c(1.561972 - (1.561972*.0075), 1.624413 - (1.624413*.0075), 2.070495 - (2.070495*.016), 2.646015 - (2.646015*.092), 3.292799 - (3.292799*.092), 4.395263 - (4.395263*.053), 3.935153 - (3.935153*.05)), upper = c(1.561972 + (1.561972*.0075), 1.624413 + (1.624413*.0075), 2.070495 + (2.070495*.016), 2.646015 + (2.646015*.092), 3.292799 + (3.292799*.092), 4.395263 + (4.395263*.053), 3.935153 + (3.935153*.05)), names = c("v*c + rc", "v + c + va + rc", "v + c + rc", "v*c + v*va + rc", "v*c + c*rc + va", "v*c + c*va*rc", "v*c + vc*va*rc*v" )) ################################################################################################ ########### PREDICTED DATA FOR PLOTS #################################################################### ################################################################################################ # 2 PLOTS - 1 FOR RT AND 1 FOR ACCURACY # ROW 1 = RT: HIGH VALID & LOW INVALID, LOW VALID & HIGH INVALID, BAYES FACTORS - STATIC CONDITION # ROW 2 = SAME BUT DECAY CONDITION # 1 - get data together # 2 - predict based on winning model # JUST PREDICTING FIXED EFFECTS win.mod.rt = lm( mu ~ valid + r1 + rew_cond, data = sum.dat.all) sum.dat.all.plot$info_gain = rep(xs, each = 8) sum.dat.all.plot$r1 = rep(c(50,1), each = 4, times = 2) sum.dat.all.plot$predict = predict( win.mod.rt, sum.dat.all.plot ) win.mod.acc = lm( acc ~ rew_cond + valid + info_gain + valid:info_gain + r1, data = acc.dat.all) acc.dat.plot$info_gain = rep(xs, each = 8) acc.dat.plot$r1 = rep(c(50,1), each = 4, times = 2) acc.dat.plot$predict = predict( win.mod.acc, acc.dat.plot) # save.image("EXP2_ANALYSIS_MIXDMDLS_BIAS.R") uncomment and use to save new stuff
library(ggplot2) ## load data humChr <- c("chr1","chr2","chr3","chr4","chr5","chr6","chr7","chr8","chr9","chr10","chr11","chr12","chr13","chr14","chr15","chr16","chr17","chr18","chr19","chr20","chr21","chr22") for(i in 1:(length(humChr))){ if(i==1){ afYRI <- read.table(paste("chr1YRI.af", sep=""), head=T, sep = "\t") }else{ temp <- read.table(paste(humChr[i],"YRI.af", sep=""), head=T, sep = "\t") afYRI <- rbind(afYRI, temp) } } ## Figure S10 gcFreqTVcpg <- subset(afYRI, fType%in%c("A_G", "C_G", "G_G", "T_G")&mType=="TV")$freqGC fS10 <- ggplot() + geom_histogram(aes(x=gcFreqTVcpg,y=0.05*..density..),alpha=0.75,position='identity',binwidth=0.05) + geom_segment(aes(x = mean(gcFreqTVcpg) , y = 0, xend = mean(gcFreqTVcpg), yend = Inf), lty=1, col="black") + geom_segment(aes(x = median(gcFreqTVcpg) , y = 0, xend = median(gcFreqTVcpg), yend = Inf), lty=2, col="black") + xlab(expression(italic(f[GC]))) + ylab("Proportion") + theme_bw() ggsave("figS10.pdf", fS10, width=10, height=7, units = "cm")
/figS10.R
no_license
jbergman/gcDynamics
R
false
false
1,051
r
library(ggplot2) ## load data humChr <- c("chr1","chr2","chr3","chr4","chr5","chr6","chr7","chr8","chr9","chr10","chr11","chr12","chr13","chr14","chr15","chr16","chr17","chr18","chr19","chr20","chr21","chr22") for(i in 1:(length(humChr))){ if(i==1){ afYRI <- read.table(paste("chr1YRI.af", sep=""), head=T, sep = "\t") }else{ temp <- read.table(paste(humChr[i],"YRI.af", sep=""), head=T, sep = "\t") afYRI <- rbind(afYRI, temp) } } ## Figure S10 gcFreqTVcpg <- subset(afYRI, fType%in%c("A_G", "C_G", "G_G", "T_G")&mType=="TV")$freqGC fS10 <- ggplot() + geom_histogram(aes(x=gcFreqTVcpg,y=0.05*..density..),alpha=0.75,position='identity',binwidth=0.05) + geom_segment(aes(x = mean(gcFreqTVcpg) , y = 0, xend = mean(gcFreqTVcpg), yend = Inf), lty=1, col="black") + geom_segment(aes(x = median(gcFreqTVcpg) , y = 0, xend = median(gcFreqTVcpg), yend = Inf), lty=2, col="black") + xlab(expression(italic(f[GC]))) + ylab("Proportion") + theme_bw() ggsave("figS10.pdf", fS10, width=10, height=7, units = "cm")
#!/usr/bin/env Rscript echo "Program about to run" Rscript --vanilla simulation.R 500 0 0 "homozygous" "heterozygous" Rscript --vanilla simulation.R 500 0 0 "heterozygous" "heterozygous" Rscript --vanilla simulation.R 500 0 0 "homozygous" "homozygous" Rscript --vanilla simulation.R 500 0 0 "heterozygous" "homozygous" Rscript --vanilla simulation.R 500 0.01 0 "homozygous" "heterozygous" Rscript --vanilla simulation.R 500 0.03 0 "homozygous" "heterozygous" Rscript --vanilla simulation.R 500 0.05 0 "homozygous" "heterozygous" Rscript --vanilla simulation.R 500 0.01 0 "heterozygous" "heterozygous" Rscript --vanilla simulation.R 500 0.03 0 "heterozygous" "heterozygous" Rscript --vanilla simulation.R 500 0.05 0 "heterozygous" "heterozygous" echo "Finished a_sim_1-1"
/Project/Code/a_sim_1-1.sh
no_license
matthewcampos/CMEECourseWork
R
false
false
777
sh
#!/usr/bin/env Rscript echo "Program about to run" Rscript --vanilla simulation.R 500 0 0 "homozygous" "heterozygous" Rscript --vanilla simulation.R 500 0 0 "heterozygous" "heterozygous" Rscript --vanilla simulation.R 500 0 0 "homozygous" "homozygous" Rscript --vanilla simulation.R 500 0 0 "heterozygous" "homozygous" Rscript --vanilla simulation.R 500 0.01 0 "homozygous" "heterozygous" Rscript --vanilla simulation.R 500 0.03 0 "homozygous" "heterozygous" Rscript --vanilla simulation.R 500 0.05 0 "homozygous" "heterozygous" Rscript --vanilla simulation.R 500 0.01 0 "heterozygous" "heterozygous" Rscript --vanilla simulation.R 500 0.03 0 "heterozygous" "heterozygous" Rscript --vanilla simulation.R 500 0.05 0 "heterozygous" "heterozygous" echo "Finished a_sim_1-1"
setwd("/Users/admin/Manuscripts/75 Mat4DiseaseEnhancerPrediction (DEP)/Rcode/dthybrid/") pkgs <- c( "matrixcalc", "data.table", "Rcpp", "ROCR", "Bolstad2", "MESS", "nloptr", "cluster", "kernlab", "plyr", "parallel", "snow", "foreach", "gtools", "DTHybrid", "data.table", "mltools", "Metrics" ) rPkgs <- lapply(pkgs, require, character.only = TRUE) ## source required R files rSourceNames <- c( "doCrossValidation.R" ) rSN <- lapply(rSourceNames, source, verbose = FALSE) # ## sourceCPP required C++ files # cppSourceNames <- c("fastKF.cpp", # "fastKgipMat.cpp", # "log1pexp.cpp", # "sigmoid.cpp", # "fastSolve.cpp") # cppSN <- lapply(cppSourceNames, sourceCpp, verbose = FALSE) sd <- read.table("/Users/admin/Manuscripts/75 Mat4DiseaseEnhancerPrediction (DEP)/Data/UnMatched/DOIDSimMat.txt",check.names = FALSE) sd <- as.matrix(sd) #st <- read.table("/Users/admin/Manuscripts/75 Mat4DiseaseEnhancerPrediction (DEP)/Data/UnMatched/EnhSimMat_SharedGene.txt",check.names = FALSE) st <- read.table("/Users/admin/Manuscripts/75 Mat4DiseaseEnhancerPrediction (DEP)/Data/UnMatched/EnhSimMat_Seq.txt",check.names = FALSE) st <- as.matrix(st) Y <- read.table("/Users/admin/Manuscripts/75 Mat4DiseaseEnhancerPrediction (DEP)/Data/UnMatched/DOIDEnhMat.txt",check.names = FALSE) Y <- as.matrix(Y) db <- "MatchedHeterNet" #db <- "MatchedHomoNet" switch (db, MatchedHeterNet = { flush.console() sharedRows <- intersect(rownames(sd), rownames(Y)) sharedCols<-intersect(colnames(st), colnames(Y)) sdm<-sd[sharedRows, sharedRows] stm<-st[sharedCols, sharedCols] Ym<-Y[sharedRows,sharedCols] }, MatchedHomoNet = { flush.console() sharedRows <- intersect(rownames(sd), rownames(Y)) sharedCols<-intersect(colnames(st), colnames(Y)) sdm<-sd stm<-st Ym<-matrix(0, nrow = dim(sd)[1], ncol = dim(st)[1]) rownames(Ym) <-rownames(sd) colnames(Ym) <-colnames(st) Ym[sharedRows,sharedCols]<-Y[sharedRows,sharedCols] }, stop("db should be one of the follows: {,GDSC_IC50,GDSC_AUC,CCLE_IC50}\n") ) NuRows=dim(Ym)[1] AUROCAll <- vector() AUPRAll <- vector() AUROCAll.SD <- vector() AUPRAll.SD <- vector() FPRAll <-vector() TPRAll <-vector() RecAll <-vector() PreAll <-vector() for (ii in 1:NuRows){ Oneidxs <-which(Ym[ii,]>0) kfold <-length(Oneidxs) if(kfold<2) next() AUROCDrug <- vector(length = kfold) AUPRDrug <- vector(length = kfold) FPRDrug <-vector() TPRDrug <-vector() RecDrug <-vector() PreDrug <-vector() for (j in 1:kfold) { print(paste("Drug",ii,":", rownames(Ym)[ii], "kfold:", j, "/", kfold)) flush.console() testLabel <-Ym[ii,] Yfold <- Ym Yfold[ii,Oneidxs[j]] <-0 #Ypred <- computeRecommendation(Yfold, lambda=0.5, alpha=0.5, S=NA, S1=NA, cl=NA) Ypred <- computeRecommendation(Yfold, lambda=0.5, alpha=0.5, S=sdm, S1=stm, cl=NA) score <- Ypred[ii,] ########## Start Evaluation pred <- prediction(score,testLabel) #AUCROC perfROC <- performance(pred,"tpr","fpr") fpr<-perfROC@x.values[[1]] tpr<-perfROC@y.values[[1]] # print(length(fpr)) # print(length(tpr)) FPRDrug<-c(FPRDrug,fpr) TPRDrug<-c(TPRDrug,tpr) # plot(perfROC,colorize=FALSE) perfAUC <- performance(pred,"auc") auc<-perfAUC@y.values[[1]] #AUCPR perfPR <- performance(pred,"prec","rec") # plot(perfPR,colorize=FALSE) rec<-perfPR@x.values[[1]]#Recall pre<-perfPR@y.values[[1]]#Precision # aupr <- try(MESS::auc(rec, pre, type = 'spline'), silent = TRUE) aupr <- Bolstad2::sintegral(rec, pre)$int RecDrug <-c(RecDrug, rec) PreDrug <-c(PreDrug, pre) ########## End Evaluation print(auc) AUROCDrug[j] <- auc print(aupr) AUPRDrug[j] <- aupr } print(paste("Drug", ii, ":", mean(AUROCDrug))) plot(FPRDrug, TPRDrug) AUROCAll <- c(AUROCAll, mean(AUROCDrug)) AUROCAll.SD <- c(AUROCAll.SD, sd(AUROCDrug)) AUPRAll <- c(AUPRAll, mean(AUPRDrug)) AUPRAll.SD <- c(AUPRAll.SD, sd(AUPRDrug)) FPRAll<-c(FPRAll,FPRDrug) TPRAll<-c(TPRAll,TPRDrug) RecAll<-c(RecAll,RecDrug) PreAll<-c(PreAll,PreDrug) } AUROCavg <- mean(AUROCAll) AUROCavg.sd <- sd(AUROCAll) AUPRavg <- mean(AUPRAll) AUPRavg.sd <- sd(AUPRAll) print("Final results") print(paste("AUROCavg:",AUROCavg,"AUROCavg.sd:",AUROCavg.sd)) print(paste("AUPRavg:", AUPRavg, "AUPRavg.sd:",AUPRavg.sd)) plot(FPRAll, TPRAll) plot(RecAll, PreAll) # save to file curDate <- format(Sys.time(), format = "%Y-%m-%d") curTime <- format(Sys.time(), format = "%H.%M.%S") savedFileName <- paste0(db, "_", curDate, "_", curTime, "_AUROC_", AUROCavg, "+-", "_SD_", AUROCavg.sd,".RData") cat("\n\n") print(savedFileName)
/Rcode/dthybrid/DTHbrid_LOOCV_eachrow.R
no_license
hauldhut/DEP
R
false
false
5,245
r
setwd("/Users/admin/Manuscripts/75 Mat4DiseaseEnhancerPrediction (DEP)/Rcode/dthybrid/") pkgs <- c( "matrixcalc", "data.table", "Rcpp", "ROCR", "Bolstad2", "MESS", "nloptr", "cluster", "kernlab", "plyr", "parallel", "snow", "foreach", "gtools", "DTHybrid", "data.table", "mltools", "Metrics" ) rPkgs <- lapply(pkgs, require, character.only = TRUE) ## source required R files rSourceNames <- c( "doCrossValidation.R" ) rSN <- lapply(rSourceNames, source, verbose = FALSE) # ## sourceCPP required C++ files # cppSourceNames <- c("fastKF.cpp", # "fastKgipMat.cpp", # "log1pexp.cpp", # "sigmoid.cpp", # "fastSolve.cpp") # cppSN <- lapply(cppSourceNames, sourceCpp, verbose = FALSE) sd <- read.table("/Users/admin/Manuscripts/75 Mat4DiseaseEnhancerPrediction (DEP)/Data/UnMatched/DOIDSimMat.txt",check.names = FALSE) sd <- as.matrix(sd) #st <- read.table("/Users/admin/Manuscripts/75 Mat4DiseaseEnhancerPrediction (DEP)/Data/UnMatched/EnhSimMat_SharedGene.txt",check.names = FALSE) st <- read.table("/Users/admin/Manuscripts/75 Mat4DiseaseEnhancerPrediction (DEP)/Data/UnMatched/EnhSimMat_Seq.txt",check.names = FALSE) st <- as.matrix(st) Y <- read.table("/Users/admin/Manuscripts/75 Mat4DiseaseEnhancerPrediction (DEP)/Data/UnMatched/DOIDEnhMat.txt",check.names = FALSE) Y <- as.matrix(Y) db <- "MatchedHeterNet" #db <- "MatchedHomoNet" switch (db, MatchedHeterNet = { flush.console() sharedRows <- intersect(rownames(sd), rownames(Y)) sharedCols<-intersect(colnames(st), colnames(Y)) sdm<-sd[sharedRows, sharedRows] stm<-st[sharedCols, sharedCols] Ym<-Y[sharedRows,sharedCols] }, MatchedHomoNet = { flush.console() sharedRows <- intersect(rownames(sd), rownames(Y)) sharedCols<-intersect(colnames(st), colnames(Y)) sdm<-sd stm<-st Ym<-matrix(0, nrow = dim(sd)[1], ncol = dim(st)[1]) rownames(Ym) <-rownames(sd) colnames(Ym) <-colnames(st) Ym[sharedRows,sharedCols]<-Y[sharedRows,sharedCols] }, stop("db should be one of the follows: {,GDSC_IC50,GDSC_AUC,CCLE_IC50}\n") ) NuRows=dim(Ym)[1] AUROCAll <- vector() AUPRAll <- vector() AUROCAll.SD <- vector() AUPRAll.SD <- vector() FPRAll <-vector() TPRAll <-vector() RecAll <-vector() PreAll <-vector() for (ii in 1:NuRows){ Oneidxs <-which(Ym[ii,]>0) kfold <-length(Oneidxs) if(kfold<2) next() AUROCDrug <- vector(length = kfold) AUPRDrug <- vector(length = kfold) FPRDrug <-vector() TPRDrug <-vector() RecDrug <-vector() PreDrug <-vector() for (j in 1:kfold) { print(paste("Drug",ii,":", rownames(Ym)[ii], "kfold:", j, "/", kfold)) flush.console() testLabel <-Ym[ii,] Yfold <- Ym Yfold[ii,Oneidxs[j]] <-0 #Ypred <- computeRecommendation(Yfold, lambda=0.5, alpha=0.5, S=NA, S1=NA, cl=NA) Ypred <- computeRecommendation(Yfold, lambda=0.5, alpha=0.5, S=sdm, S1=stm, cl=NA) score <- Ypred[ii,] ########## Start Evaluation pred <- prediction(score,testLabel) #AUCROC perfROC <- performance(pred,"tpr","fpr") fpr<-perfROC@x.values[[1]] tpr<-perfROC@y.values[[1]] # print(length(fpr)) # print(length(tpr)) FPRDrug<-c(FPRDrug,fpr) TPRDrug<-c(TPRDrug,tpr) # plot(perfROC,colorize=FALSE) perfAUC <- performance(pred,"auc") auc<-perfAUC@y.values[[1]] #AUCPR perfPR <- performance(pred,"prec","rec") # plot(perfPR,colorize=FALSE) rec<-perfPR@x.values[[1]]#Recall pre<-perfPR@y.values[[1]]#Precision # aupr <- try(MESS::auc(rec, pre, type = 'spline'), silent = TRUE) aupr <- Bolstad2::sintegral(rec, pre)$int RecDrug <-c(RecDrug, rec) PreDrug <-c(PreDrug, pre) ########## End Evaluation print(auc) AUROCDrug[j] <- auc print(aupr) AUPRDrug[j] <- aupr } print(paste("Drug", ii, ":", mean(AUROCDrug))) plot(FPRDrug, TPRDrug) AUROCAll <- c(AUROCAll, mean(AUROCDrug)) AUROCAll.SD <- c(AUROCAll.SD, sd(AUROCDrug)) AUPRAll <- c(AUPRAll, mean(AUPRDrug)) AUPRAll.SD <- c(AUPRAll.SD, sd(AUPRDrug)) FPRAll<-c(FPRAll,FPRDrug) TPRAll<-c(TPRAll,TPRDrug) RecAll<-c(RecAll,RecDrug) PreAll<-c(PreAll,PreDrug) } AUROCavg <- mean(AUROCAll) AUROCavg.sd <- sd(AUROCAll) AUPRavg <- mean(AUPRAll) AUPRavg.sd <- sd(AUPRAll) print("Final results") print(paste("AUROCavg:",AUROCavg,"AUROCavg.sd:",AUROCavg.sd)) print(paste("AUPRavg:", AUPRavg, "AUPRavg.sd:",AUPRavg.sd)) plot(FPRAll, TPRAll) plot(RecAll, PreAll) # save to file curDate <- format(Sys.time(), format = "%Y-%m-%d") curTime <- format(Sys.time(), format = "%H.%M.%S") savedFileName <- paste0(db, "_", curDate, "_", curTime, "_AUROC_", AUROCavg, "+-", "_SD_", AUROCavg.sd,".RData") cat("\n\n") print(savedFileName)
library(openair) library(plyr) library(dplyr) library(rmweather) library(ranger) library(magrittr) library(globals) library(future) library(foreach) library(iterators) library(parallel) library(doFuture) library(readxl) library(doParallel) plan(multicore) registerDoParallel(cores = detectCores()- 1) registerDoFuture() plan(multiprocess) filenamelist<-list("Milan_Rural") polllist<-list("no2","co","o3","nox") #polllist<-list("no2","co","o3") ncal=100 for (filename in filenamelist){ Dataraw1 <- read_excel(paste(filename,".xlsx",sep=''), sheet = "Sheet1", col_types = c("date", "numeric", "numeric", "text", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "text", "numeric", "numeric", "numeric", "numeric","numeric", "numeric", "numeric", "numeric", "numeric", "numeric")) Dataraw1$cluster<-as.factor(Dataraw1$cluster) Dataraw1$weekday<-as.factor(Dataraw1$weekday) Dataraw1 <- Dataraw1 %>% filter(!is.na(cluster)) Dataraw <- Dataraw1 %>% filter(date>="2018-12-01"& date <= "2019-05-31") for (poll in polllist){ r.min <- 0.1 perform<-matrix(data=NA,ncol=11,nrow=1) colnames(perform)<-c("default","n", "FAC2","MB", "MGE", "NMB", "NMGE", "RMSE", "r","COE", "IOA") for (i in as.numeric(1:ncal)){ set.seed(i) data_prepared <- Dataraw %>% filter(!is.na(ws)) %>% dplyr::rename(value = poll) %>% rmw_prepare_data(na.rm = TRUE,fraction = 0.7) set.seed(i) RF_model <- rmw_do_all( data_prepared, variables = c( "date_unix","day_julian", "weekday","hour", "temp", "RH", "wd", "ws","sp","cluster","tp","blh","tcc","ssr"),variables_sample=c("temp", "RH", "wd", "ws","sp","cluster","tp","blh","tcc","ssr"), n_trees = 300, n_samples = 300, verbose = TRUE ) testing_model <- rmw_predict_the_test_set(model = RF_model$model,df = RF_model$observations) model_performance<-modStats(testing_model, mod = "value", obs = "value_predict", statistic = c("n", "FAC2","MB", "MGE", "NMB", "NMGE", "RMSE", "r","COE", "IOA"), type = "default", rank.name = NULL) perform<-rbind(perform,model_performance) if (model_performance$r > r.min){ r.min <- model_performance$r RF_modelo <- RF_model} } save.image(file = paste(filename,"_",poll,"_RW_Short",".RData",sep="")) write.table(perform, file=paste(filename,"_",poll,"_RWPerformance_Short",".csv",sep=""), sep=",", row.names=FALSE) } }
/Data and code/2019/Milan_Rural/RWshort.R
no_license
songnku/COVID-19-AQ
R
false
false
2,920
r
library(openair) library(plyr) library(dplyr) library(rmweather) library(ranger) library(magrittr) library(globals) library(future) library(foreach) library(iterators) library(parallel) library(doFuture) library(readxl) library(doParallel) plan(multicore) registerDoParallel(cores = detectCores()- 1) registerDoFuture() plan(multiprocess) filenamelist<-list("Milan_Rural") polllist<-list("no2","co","o3","nox") #polllist<-list("no2","co","o3") ncal=100 for (filename in filenamelist){ Dataraw1 <- read_excel(paste(filename,".xlsx",sep=''), sheet = "Sheet1", col_types = c("date", "numeric", "numeric", "text", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "text", "numeric", "numeric", "numeric", "numeric","numeric", "numeric", "numeric", "numeric", "numeric", "numeric")) Dataraw1$cluster<-as.factor(Dataraw1$cluster) Dataraw1$weekday<-as.factor(Dataraw1$weekday) Dataraw1 <- Dataraw1 %>% filter(!is.na(cluster)) Dataraw <- Dataraw1 %>% filter(date>="2018-12-01"& date <= "2019-05-31") for (poll in polllist){ r.min <- 0.1 perform<-matrix(data=NA,ncol=11,nrow=1) colnames(perform)<-c("default","n", "FAC2","MB", "MGE", "NMB", "NMGE", "RMSE", "r","COE", "IOA") for (i in as.numeric(1:ncal)){ set.seed(i) data_prepared <- Dataraw %>% filter(!is.na(ws)) %>% dplyr::rename(value = poll) %>% rmw_prepare_data(na.rm = TRUE,fraction = 0.7) set.seed(i) RF_model <- rmw_do_all( data_prepared, variables = c( "date_unix","day_julian", "weekday","hour", "temp", "RH", "wd", "ws","sp","cluster","tp","blh","tcc","ssr"),variables_sample=c("temp", "RH", "wd", "ws","sp","cluster","tp","blh","tcc","ssr"), n_trees = 300, n_samples = 300, verbose = TRUE ) testing_model <- rmw_predict_the_test_set(model = RF_model$model,df = RF_model$observations) model_performance<-modStats(testing_model, mod = "value", obs = "value_predict", statistic = c("n", "FAC2","MB", "MGE", "NMB", "NMGE", "RMSE", "r","COE", "IOA"), type = "default", rank.name = NULL) perform<-rbind(perform,model_performance) if (model_performance$r > r.min){ r.min <- model_performance$r RF_modelo <- RF_model} } save.image(file = paste(filename,"_",poll,"_RW_Short",".RData",sep="")) write.table(perform, file=paste(filename,"_",poll,"_RWPerformance_Short",".csv",sep=""), sep=",", row.names=FALSE) } }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/simpleTests.R \name{cofeeConsumption} \alias{cofeeConsumption} \title{Function to screen for higher coffee consumption} \usage{ cofeeConsumption(myDNA) } \arguments{ \item{myDNA}{(character) path to the genotype file} } \description{ Function to screen for higher coffee consumption } \details{ SOURCE: 23andMe report INFO: Report is based on genetic variants near two genes that play a role in how your body handles caffeine. The first gene, CYP1A2, contains instructions for an enzyme that breaks down 95% of the caffeine you consume. The second gene, AHR (rs4410790), contains instructions for a protein that ramps up production of the CYP1A2 enzyme. Variants in these genes may affect how quickly the body breaks down and clears away caffeine. NOTE! MyHeritage genotypes do not report this SNP, however missing genotypes can be imputed and accessed by DNALand imputation tool The CYP1A2 gene (rs2472297) contains instructions for an enzyme that breaks down many substances, including caffeine. This enzyme is a member of a large family of enzymes called cytochrome P450. Presence of variants: likely higher coffee consumption. !!!! myAllele column reports whether risk allele was identified in my genome } \examples{ \dontrun{ # example myHeritage library(myDNA) library(plyr) library(stringr) Genome="/data/akalin/Projects/AAkalin_myDNA/Data/MyHeritage/MyHeritage_raw_dna_dataInga/MyHeritage_raw_dna_data.csv" myDNA <- importDNA(myGenotypes = Genome,type = "myHeritage" ) cofeeConsumption(myDNA) } } \author{ Inga Patarcic }
/man/cofeeConsumption.Rd
no_license
IngaPa/myDNAS
R
false
true
1,622
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/simpleTests.R \name{cofeeConsumption} \alias{cofeeConsumption} \title{Function to screen for higher coffee consumption} \usage{ cofeeConsumption(myDNA) } \arguments{ \item{myDNA}{(character) path to the genotype file} } \description{ Function to screen for higher coffee consumption } \details{ SOURCE: 23andMe report INFO: Report is based on genetic variants near two genes that play a role in how your body handles caffeine. The first gene, CYP1A2, contains instructions for an enzyme that breaks down 95% of the caffeine you consume. The second gene, AHR (rs4410790), contains instructions for a protein that ramps up production of the CYP1A2 enzyme. Variants in these genes may affect how quickly the body breaks down and clears away caffeine. NOTE! MyHeritage genotypes do not report this SNP, however missing genotypes can be imputed and accessed by DNALand imputation tool The CYP1A2 gene (rs2472297) contains instructions for an enzyme that breaks down many substances, including caffeine. This enzyme is a member of a large family of enzymes called cytochrome P450. Presence of variants: likely higher coffee consumption. !!!! myAllele column reports whether risk allele was identified in my genome } \examples{ \dontrun{ # example myHeritage library(myDNA) library(plyr) library(stringr) Genome="/data/akalin/Projects/AAkalin_myDNA/Data/MyHeritage/MyHeritage_raw_dna_dataInga/MyHeritage_raw_dna_data.csv" myDNA <- importDNA(myGenotypes = Genome,type = "myHeritage" ) cofeeConsumption(myDNA) } } \author{ Inga Patarcic }
#' Returns a line from a file. #' #' @param linenumber is the line number of the line to grab. #' @param file is the file to grab the line from. #' @return A line of text. #' @export get.line <- function(linenumber, file) readLines(file)[linenumber]
/R/get.line.R
no_license
johnjosephhorton/JJHmisc
R
false
false
255
r
#' Returns a line from a file. #' #' @param linenumber is the line number of the line to grab. #' @param file is the file to grab the line from. #' @return A line of text. #' @export get.line <- function(linenumber, file) readLines(file)[linenumber]
a.image <- function(Q,color=rev(heat.colors(100)),#paste0("gray",100:0), numbers=F,num.cex=1,label.axis=F, numcolor="black",axis.num=T,...) { image(t(apply(Q,2,rev)),yaxt="n",xaxt="n",col=color,...) ec <- apply(Q,2,sum) er <- apply(Q,1,sum) seq1 <- seq(0,1,len=length(ec)) seq2 <- seq(1,0,len=length(er)) seq4 <- seq(0,1,len=nrow(Q)) if (label.axis) axis(4,at=seq4,lab=1:n,las=2,...) if (axis.num) { axis(1,at=seq1,lab=ec) axis(2,at=seq2,lab=er,las=2,...) } if (numbers) { xx <- rep(1:ncol(Q),each=nrow(Q)) yy <- rep(1:nrow(Q),ncol(Q)) text(seq1[xx],seq2[yy],c(Q),col=numcolor,font=2,cex=num.cex) #print(t(Q)[xx,yy]) #for (x in 1:ncol(Q)) { # for (y in 1:nrow(Q)) { # text(seq1[x],seq2[y],t(Q)[x,y],col=numcolor,font=2,cex=num.cex) # } #} } } count.down <- function(old.time,i,B) { prog <- round(100*i/B,4) new.time <- Sys.time() time.diff <- as.numeric(new.time-old.time) time.remain <- time.diff * (B-i) if (time.remain < 60) { secs <- round(time.remain) time.remain <- paste0(secs,"s ") } else if (time.remain<3600) { mins <- round(time.remain%/%60) secs <- round(time.remain%%60) time.remain <- paste0(mins,"m ",secs,"s ") } else { hrs <- round(time.remain%/%3600) mins <- round((time.remain%%3600) %/% 60) time.remain <- paste0(hrs,"h ",mins,"m ") } cat(paste0("\rProgress: ",prog,"%. Time Remaining: ",time.remain," ")) if (i==B) cat("100%\n") } # Example #B <- 5000 #for(i in 1:B) { # old.time <- Sys.time() # Sys.sleep(1) # your function here # count.down(old.time) #} sum.matrices <- function(Ms,return.matrices=F) { # Ms is a list of matrices of different lengths # return.matrices is a boolean. If FALSE, function returns the sum of the matrices. # If TRUE, function returns a list of the matrices also. l <- length(Ms) max.c <- max(unlist(lapply(Ms,ncol))) max.r <- max(unlist(lapply(Ms,nrow))) for (i in 1:l) { M <- Ms[[i]] ncol0 <- max.c - ncol(M) nrow0 <- max.r - nrow(M) if (ncol0>0) { col0 <- matrix(0,nrow(M),ncol0) M <- Ms[[i]] <- cbind(Ms[[i]],col0) } if (nrow0>0) { row0 <- matrix(0,nrow0,ncol(M)) M <- Ms[[i]] <- rbind(Ms[[i]],row0) } } out <- Reduce("+",Ms) if (return.matrices) out <- list("sum"=out,"matrices"=Ms) out } # EXAMPLE: #########################################################3 #A <- matrix(1:10,nrow=2) #B <- matrix(1:6,nrow=3) #C <- matrix(1:6,nrow=1) #D <- matrix(1:4) # #Ms <- list(A,B,C,D) # # #sum.matrices(Ms) #sum.matrices(Ms,T) color.den <- function(den,from,to,col.den="black",col.area="red",add=F,...) { # Colors area under a density within an interval # den has to be a density object if (add) { #lines(den,col=col.den,...) } else { plot(den,col=col.den,...) } polygon(c(from, den$x[den$x>=from & den$x<=to], to), c(0, den$y[den$x>=from & den$x<=to], 0), col=col.area,border=col.den) } bound <- function(x, dens, return.x=TRUE){ # Mickey Warner: # https://github.com/mickwar/r-sandbox/blob/master/mcmc/bayes_functions.R # returns the x-value in dens that is closest # to the given x if (return.x) return(dens$x[which.min(abs(dens$x-x))]) # returns the y-value in dens at the closest x return(dens$y[which.min(abs(dens$x-x))]) } col.mult = function(col1 = 0x000000, col2 = "gray50"){ # Mickey Warner: # https://github.com/mickwar/r-sandbox/blob/master/mcmc/bayes_functions.R # returns the x-value in dens that is closest # to the given x if (is.character(col1)) val1 = t(col2rgb(col1) / 255) if (is.numeric(col1)) val1 = t(int2rgb(col1) / 255) if (is.character(col2)) val2 = t(col2rgb(col2) / 255) if (is.numeric(col2)) val2 = t(int2rgb(col2) / 255) rgb(val1 * val2) } int2rgb = function(x){ # int2rgb() # convert an integer between 0 and 16777215 = 256^3 - 1, # or between 0 and 0xFFFFFF # this function is depended upon by col.mult # Mickey Warner: # https://github.com/mickwar/r-sandbox/blob/master/mcmc/bayes_functions.R # returns the x-value in dens that is closest # to the given x hex = as.character(as.hexmode(x)) hex = paste0("#", paste0(rep("0", 6-nchar(hex)), collapse=""), hex) col2rgb(hex) } plot.post <- function(x,main=NULL,hpd=T,color="cornflowerblue",cex.l=1,trace=T,stay=F,tck.dig=4,its=length(x),...) { mn.x <- round(mean(x),5) v.x <- round(sd(x),3) den <- density(x) rng <- c(min(den$y),max(den$y)) diff <- rng[2]-rng[1] main <- ifelse(is.null(main),"Posterior Distribution", paste("Posterior Distribution \n for",main)) if (hpd) { } else { } rng.x <- range(den$x) x.diff <- rng.x[2] - rng.x[1] if (hpd) { hpd <- get.hpd(x) plot(density(x),col=color,ylim=c(rng[1],rng[2]+diff*.3),lwd=3, main=main,xaxt="n") color.den(den,rng.x[1],rng.x[2],col.den=color,col.area=color,add=T) color.den(den,hpd[1],hpd[2],col.den=col.mult(color), col.area=col.mult(color),add=T) lines(c(mn.x,mn.x),c(0,bound(mn.x,den,ret=F)),lwd=2,col="red") axis(1,at=c(hpd,mn.x),labels=round(c(hpd,mn.x),tck.dig),las=0,...) legend("topleft",legend=c(paste("Mean =",mn.x), paste("Std. Dev. =",v.x), paste("Low HPD =",round(hpd[1],4)), paste("Upp HPD =",round(hpd[2],4)), paste("Iterations =",its)), bty="n",cex=cex.l) } else { plot(density(x),col=color,ylim=c(rng[1],rng[2]+diff*.3),lwd=3,main=main) color.den(den,rng.x[1],rng.x[2],col.den=color,col.area=color,add=T) lines(c(mn.x,mn.x),c(0,bound(mn.x,den,ret=F)),lwd=2,col="red") legend("topleft",legend=c(paste("Mean =",mn.x), paste("Std. Dev. =",v.x), paste("Iterations =",length(x))), bty="n",cex=cex.l) } mfg <- par()$mfg if (trace) { opts <- par(no.readonly=T) left <- rng.x[1] + x.diff*2/3 right <- rng.x[2] par(fig = c(grconvertX(c(left,right),from="user",to="ndc"), grconvertY(c(rng[2],rng[2]+diff*.3),from="user",to="ndc")), mar = c(.1,.1,1,.1), new = TRUE) plot(x,type="l",col="gray30",cex.main=.5,axes=F,main="Trace Plot") axis(1,cex.axis=.5) axis(2,cex.axis=.5) par(opts) } if (!(stay)) { row.num <- mfg[1] col.num <- mfg[2] last.row <- mfg[3] last.col <- mfg[4] if (col.num < last.col) { mfg[2] <- mfg[2] + 1 } else { if (row.num < last.row) { mfg[1] <- mfg[1] + 1 } else { mfg[1] <- 1 } mfg[2] <- 1 } } par(mfg=mfg) } get.hpd <- function(x,a=.05,len=1e3) { V <- matrix(seq(0,a,length=len)) quants <- t(apply(V,1,function(v) quantile(x,c(v,v+1-a)))) diff <- quants[,2]-quants[,1] min.d <- V[which.min(diff)] hpd <- quantile(x,c(min.d,min.d+1-a)) hpd } plot.in.plot <- function(minor.plot,coords="topright",scale=1/3) { # coords = x1,y1,x2,y2 # minor.plot is a function with no parameters that plots the smaller plot mar <- x1 <- x2 <- y1 <- y2 <- NULL s <- par("usr") if (is.numeric(coords)) { x1 <- coords[1]; x2 <- coords[2] y1 <- coords[3]; y2 <- coords[4] } else if (coords=="topright"){ x1 <- s[1] + (s[2]-s[1]) * (1-scale) x2 <- s[2] - (s[2]-s[1]) * .01 y1 <- s[3] + (s[4]-s[3]) * (1-scale) y2 <- s[4] mar <- c(.1,.1,1,.1) } else if (coords=="bottomright") { x1 <- s[1] + (s[2]-s[1]) * (1-scale) x2 <- s[2] - (s[2]-s[1]) * .01 y1 <- s[3] + (s[4]-s[3]) *.05 y2 <- y1 + (s[4]-s[3]) * (scale) mar <- c(1,.1,1,.1) } else if (coords=="topleft") { x1 <- s[1] + (s[2]-s[1]) * .05 x2 <- x1 + (s[2]-s[1]) * (scale) y1 <- s[3] + (s[4]-s[3]) * (1-scale) y2 <- s[4] mar <- c(.1,1,1,.1) }else if (coords=="bottomleft") { x1 <- s[1] + (s[2]-s[1]) * .05 x2 <- x1 + (s[2]-s[1]) * (scale) y1 <- s[3] + (s[4]-s[3]) *.05 y2 <- y1 + (s[4]-s[3]) * (scale) mar <- c(1,1,1,.1) } opts <- par(no.readonly=T) par(fig = c(grconvertX(c(x1,x2),from="user",to="ndc"), grconvertY(c(y1,y2),from="user",to="ndc")), mar = mar, new = TRUE) minor.plot() #axis(1,cex.axis=.5) #axis(2,cex.axis=.5) par(opts) } #x <- rnorm(10000) #plot(density(x),ylim=c(0,.5)) # #minor <- function() { # plot(x,type="l",axes=F,main="Trace",cex.main=.8) # axis(1,cex.axis=.5) # axis(2,cex.axis=.5) #} # ##plotinplot(minor,c(1,4,.4,.5)) #plot.in.plot(minor,"topright") #plot.in.plot(minor,"bottomright") est.Z <- function(Zs,p=.5) { B <- length(Zs) EZ <- sum.matrices(Zs) / B EZ <- ifelse(EZ>p,1,0) col0.ind <- which(apply(EZ,2,function(x) sum(x)==0)) EZ <- as.matrix(EZ[,-col0.ind]) EZ } Rapply <- function(L,f) { # L is a list, f is a function to apply to L[[x]] # L apply takes a list, applies a function, # and rbinds it. Assumes output is vector. n <- length(L) out <- apply(matrix(1:n),1,function(i) f(L[[i]])) t(out) } clust.Z <- function(z) { z <- as.matrix(z) v.z <- apply(z,1,function(x) toString(x)) uniq.vz <- unique(v.z) clust.num <- apply(matrix(v.z),1,function(x) which(uniq.vz %in% x)) k <- length(uniq.vz) n <- nrow(z) z.out <- matrix(0,n,k) for (i in 1:n) { z.out[i,clust.num[i]] <- 1 } z.out } det <- function(x,log=F) { out <- 0 if (!log) { out <- det(x) } else { out <- unlist(determinant(x,log=T))[1] } out } plot.contour <- function(M,...) { library(MASS) # filled.contour, kde2d J <- kde2d(M[,1],M[,2]) contour(J,...) } plot.posts <- function(M,names=rep(NULL,ncol(as.matrix(M))),digits=4,cex.legend=.7, keep.par=F,tck.dig=4,cex.a=1/ncol(M),its=nrow(M),...) { M <- as.matrix(M) k <- ncol(M) corrs <- cor(M) set <- par(no.readonly=T) par(mfrow=c(k,k)) for (i in 1:k) { if (i>1) { for (j in 1:(i-1)) { plot(1, type="n", axes=F, xlab="", ylab="", main=paste0("Corr (",names[i],",",names[j],")")) # empty plot r <- round(corrs[i,j],digits) cex.cor <- max(.8/strwidth(format(r)) * abs(r),1) text(1,labels=r,cex=cex.cor) #legend("center",legend=corrs[i,j], # title=paste0("Corr (",names[i],",",names[j],")")) } } plot.post(M[,i],cex.l=cex.legend,main=names[i],tck.dig=tck.dig,cex.axis=cex.a,its=its,...) if (i<k) { for (j in (i+1):k) { plot(M[,c(j,i)],type="l",col="gray85",xlab=names[j],ylab=names[i], main=paste("Trace & Contour \n",names[i],"vs",names[j])) plot.contour(M[,c(j,i)],add=T) } } } if (!(keep.par)) par(set) }
/Fall2014/Stat635/project/mm/R/rfunctions.R
no_license
luiarthur/byuHW
R
false
false
10,961
r
a.image <- function(Q,color=rev(heat.colors(100)),#paste0("gray",100:0), numbers=F,num.cex=1,label.axis=F, numcolor="black",axis.num=T,...) { image(t(apply(Q,2,rev)),yaxt="n",xaxt="n",col=color,...) ec <- apply(Q,2,sum) er <- apply(Q,1,sum) seq1 <- seq(0,1,len=length(ec)) seq2 <- seq(1,0,len=length(er)) seq4 <- seq(0,1,len=nrow(Q)) if (label.axis) axis(4,at=seq4,lab=1:n,las=2,...) if (axis.num) { axis(1,at=seq1,lab=ec) axis(2,at=seq2,lab=er,las=2,...) } if (numbers) { xx <- rep(1:ncol(Q),each=nrow(Q)) yy <- rep(1:nrow(Q),ncol(Q)) text(seq1[xx],seq2[yy],c(Q),col=numcolor,font=2,cex=num.cex) #print(t(Q)[xx,yy]) #for (x in 1:ncol(Q)) { # for (y in 1:nrow(Q)) { # text(seq1[x],seq2[y],t(Q)[x,y],col=numcolor,font=2,cex=num.cex) # } #} } } count.down <- function(old.time,i,B) { prog <- round(100*i/B,4) new.time <- Sys.time() time.diff <- as.numeric(new.time-old.time) time.remain <- time.diff * (B-i) if (time.remain < 60) { secs <- round(time.remain) time.remain <- paste0(secs,"s ") } else if (time.remain<3600) { mins <- round(time.remain%/%60) secs <- round(time.remain%%60) time.remain <- paste0(mins,"m ",secs,"s ") } else { hrs <- round(time.remain%/%3600) mins <- round((time.remain%%3600) %/% 60) time.remain <- paste0(hrs,"h ",mins,"m ") } cat(paste0("\rProgress: ",prog,"%. Time Remaining: ",time.remain," ")) if (i==B) cat("100%\n") } # Example #B <- 5000 #for(i in 1:B) { # old.time <- Sys.time() # Sys.sleep(1) # your function here # count.down(old.time) #} sum.matrices <- function(Ms,return.matrices=F) { # Ms is a list of matrices of different lengths # return.matrices is a boolean. If FALSE, function returns the sum of the matrices. # If TRUE, function returns a list of the matrices also. l <- length(Ms) max.c <- max(unlist(lapply(Ms,ncol))) max.r <- max(unlist(lapply(Ms,nrow))) for (i in 1:l) { M <- Ms[[i]] ncol0 <- max.c - ncol(M) nrow0 <- max.r - nrow(M) if (ncol0>0) { col0 <- matrix(0,nrow(M),ncol0) M <- Ms[[i]] <- cbind(Ms[[i]],col0) } if (nrow0>0) { row0 <- matrix(0,nrow0,ncol(M)) M <- Ms[[i]] <- rbind(Ms[[i]],row0) } } out <- Reduce("+",Ms) if (return.matrices) out <- list("sum"=out,"matrices"=Ms) out } # EXAMPLE: #########################################################3 #A <- matrix(1:10,nrow=2) #B <- matrix(1:6,nrow=3) #C <- matrix(1:6,nrow=1) #D <- matrix(1:4) # #Ms <- list(A,B,C,D) # # #sum.matrices(Ms) #sum.matrices(Ms,T) color.den <- function(den,from,to,col.den="black",col.area="red",add=F,...) { # Colors area under a density within an interval # den has to be a density object if (add) { #lines(den,col=col.den,...) } else { plot(den,col=col.den,...) } polygon(c(from, den$x[den$x>=from & den$x<=to], to), c(0, den$y[den$x>=from & den$x<=to], 0), col=col.area,border=col.den) } bound <- function(x, dens, return.x=TRUE){ # Mickey Warner: # https://github.com/mickwar/r-sandbox/blob/master/mcmc/bayes_functions.R # returns the x-value in dens that is closest # to the given x if (return.x) return(dens$x[which.min(abs(dens$x-x))]) # returns the y-value in dens at the closest x return(dens$y[which.min(abs(dens$x-x))]) } col.mult = function(col1 = 0x000000, col2 = "gray50"){ # Mickey Warner: # https://github.com/mickwar/r-sandbox/blob/master/mcmc/bayes_functions.R # returns the x-value in dens that is closest # to the given x if (is.character(col1)) val1 = t(col2rgb(col1) / 255) if (is.numeric(col1)) val1 = t(int2rgb(col1) / 255) if (is.character(col2)) val2 = t(col2rgb(col2) / 255) if (is.numeric(col2)) val2 = t(int2rgb(col2) / 255) rgb(val1 * val2) } int2rgb = function(x){ # int2rgb() # convert an integer between 0 and 16777215 = 256^3 - 1, # or between 0 and 0xFFFFFF # this function is depended upon by col.mult # Mickey Warner: # https://github.com/mickwar/r-sandbox/blob/master/mcmc/bayes_functions.R # returns the x-value in dens that is closest # to the given x hex = as.character(as.hexmode(x)) hex = paste0("#", paste0(rep("0", 6-nchar(hex)), collapse=""), hex) col2rgb(hex) } plot.post <- function(x,main=NULL,hpd=T,color="cornflowerblue",cex.l=1,trace=T,stay=F,tck.dig=4,its=length(x),...) { mn.x <- round(mean(x),5) v.x <- round(sd(x),3) den <- density(x) rng <- c(min(den$y),max(den$y)) diff <- rng[2]-rng[1] main <- ifelse(is.null(main),"Posterior Distribution", paste("Posterior Distribution \n for",main)) if (hpd) { } else { } rng.x <- range(den$x) x.diff <- rng.x[2] - rng.x[1] if (hpd) { hpd <- get.hpd(x) plot(density(x),col=color,ylim=c(rng[1],rng[2]+diff*.3),lwd=3, main=main,xaxt="n") color.den(den,rng.x[1],rng.x[2],col.den=color,col.area=color,add=T) color.den(den,hpd[1],hpd[2],col.den=col.mult(color), col.area=col.mult(color),add=T) lines(c(mn.x,mn.x),c(0,bound(mn.x,den,ret=F)),lwd=2,col="red") axis(1,at=c(hpd,mn.x),labels=round(c(hpd,mn.x),tck.dig),las=0,...) legend("topleft",legend=c(paste("Mean =",mn.x), paste("Std. Dev. =",v.x), paste("Low HPD =",round(hpd[1],4)), paste("Upp HPD =",round(hpd[2],4)), paste("Iterations =",its)), bty="n",cex=cex.l) } else { plot(density(x),col=color,ylim=c(rng[1],rng[2]+diff*.3),lwd=3,main=main) color.den(den,rng.x[1],rng.x[2],col.den=color,col.area=color,add=T) lines(c(mn.x,mn.x),c(0,bound(mn.x,den,ret=F)),lwd=2,col="red") legend("topleft",legend=c(paste("Mean =",mn.x), paste("Std. Dev. =",v.x), paste("Iterations =",length(x))), bty="n",cex=cex.l) } mfg <- par()$mfg if (trace) { opts <- par(no.readonly=T) left <- rng.x[1] + x.diff*2/3 right <- rng.x[2] par(fig = c(grconvertX(c(left,right),from="user",to="ndc"), grconvertY(c(rng[2],rng[2]+diff*.3),from="user",to="ndc")), mar = c(.1,.1,1,.1), new = TRUE) plot(x,type="l",col="gray30",cex.main=.5,axes=F,main="Trace Plot") axis(1,cex.axis=.5) axis(2,cex.axis=.5) par(opts) } if (!(stay)) { row.num <- mfg[1] col.num <- mfg[2] last.row <- mfg[3] last.col <- mfg[4] if (col.num < last.col) { mfg[2] <- mfg[2] + 1 } else { if (row.num < last.row) { mfg[1] <- mfg[1] + 1 } else { mfg[1] <- 1 } mfg[2] <- 1 } } par(mfg=mfg) } get.hpd <- function(x,a=.05,len=1e3) { V <- matrix(seq(0,a,length=len)) quants <- t(apply(V,1,function(v) quantile(x,c(v,v+1-a)))) diff <- quants[,2]-quants[,1] min.d <- V[which.min(diff)] hpd <- quantile(x,c(min.d,min.d+1-a)) hpd } plot.in.plot <- function(minor.plot,coords="topright",scale=1/3) { # coords = x1,y1,x2,y2 # minor.plot is a function with no parameters that plots the smaller plot mar <- x1 <- x2 <- y1 <- y2 <- NULL s <- par("usr") if (is.numeric(coords)) { x1 <- coords[1]; x2 <- coords[2] y1 <- coords[3]; y2 <- coords[4] } else if (coords=="topright"){ x1 <- s[1] + (s[2]-s[1]) * (1-scale) x2 <- s[2] - (s[2]-s[1]) * .01 y1 <- s[3] + (s[4]-s[3]) * (1-scale) y2 <- s[4] mar <- c(.1,.1,1,.1) } else if (coords=="bottomright") { x1 <- s[1] + (s[2]-s[1]) * (1-scale) x2 <- s[2] - (s[2]-s[1]) * .01 y1 <- s[3] + (s[4]-s[3]) *.05 y2 <- y1 + (s[4]-s[3]) * (scale) mar <- c(1,.1,1,.1) } else if (coords=="topleft") { x1 <- s[1] + (s[2]-s[1]) * .05 x2 <- x1 + (s[2]-s[1]) * (scale) y1 <- s[3] + (s[4]-s[3]) * (1-scale) y2 <- s[4] mar <- c(.1,1,1,.1) }else if (coords=="bottomleft") { x1 <- s[1] + (s[2]-s[1]) * .05 x2 <- x1 + (s[2]-s[1]) * (scale) y1 <- s[3] + (s[4]-s[3]) *.05 y2 <- y1 + (s[4]-s[3]) * (scale) mar <- c(1,1,1,.1) } opts <- par(no.readonly=T) par(fig = c(grconvertX(c(x1,x2),from="user",to="ndc"), grconvertY(c(y1,y2),from="user",to="ndc")), mar = mar, new = TRUE) minor.plot() #axis(1,cex.axis=.5) #axis(2,cex.axis=.5) par(opts) } #x <- rnorm(10000) #plot(density(x),ylim=c(0,.5)) # #minor <- function() { # plot(x,type="l",axes=F,main="Trace",cex.main=.8) # axis(1,cex.axis=.5) # axis(2,cex.axis=.5) #} # ##plotinplot(minor,c(1,4,.4,.5)) #plot.in.plot(minor,"topright") #plot.in.plot(minor,"bottomright") est.Z <- function(Zs,p=.5) { B <- length(Zs) EZ <- sum.matrices(Zs) / B EZ <- ifelse(EZ>p,1,0) col0.ind <- which(apply(EZ,2,function(x) sum(x)==0)) EZ <- as.matrix(EZ[,-col0.ind]) EZ } Rapply <- function(L,f) { # L is a list, f is a function to apply to L[[x]] # L apply takes a list, applies a function, # and rbinds it. Assumes output is vector. n <- length(L) out <- apply(matrix(1:n),1,function(i) f(L[[i]])) t(out) } clust.Z <- function(z) { z <- as.matrix(z) v.z <- apply(z,1,function(x) toString(x)) uniq.vz <- unique(v.z) clust.num <- apply(matrix(v.z),1,function(x) which(uniq.vz %in% x)) k <- length(uniq.vz) n <- nrow(z) z.out <- matrix(0,n,k) for (i in 1:n) { z.out[i,clust.num[i]] <- 1 } z.out } det <- function(x,log=F) { out <- 0 if (!log) { out <- det(x) } else { out <- unlist(determinant(x,log=T))[1] } out } plot.contour <- function(M,...) { library(MASS) # filled.contour, kde2d J <- kde2d(M[,1],M[,2]) contour(J,...) } plot.posts <- function(M,names=rep(NULL,ncol(as.matrix(M))),digits=4,cex.legend=.7, keep.par=F,tck.dig=4,cex.a=1/ncol(M),its=nrow(M),...) { M <- as.matrix(M) k <- ncol(M) corrs <- cor(M) set <- par(no.readonly=T) par(mfrow=c(k,k)) for (i in 1:k) { if (i>1) { for (j in 1:(i-1)) { plot(1, type="n", axes=F, xlab="", ylab="", main=paste0("Corr (",names[i],",",names[j],")")) # empty plot r <- round(corrs[i,j],digits) cex.cor <- max(.8/strwidth(format(r)) * abs(r),1) text(1,labels=r,cex=cex.cor) #legend("center",legend=corrs[i,j], # title=paste0("Corr (",names[i],",",names[j],")")) } } plot.post(M[,i],cex.l=cex.legend,main=names[i],tck.dig=tck.dig,cex.axis=cex.a,its=its,...) if (i<k) { for (j in (i+1):k) { plot(M[,c(j,i)],type="l",col="gray85",xlab=names[j],ylab=names[i], main=paste("Trace & Contour \n",names[i],"vs",names[j])) plot.contour(M[,c(j,i)],add=T) } } } if (!(keep.par)) par(set) }
setwd("/Users/Michelle/Documents") hp <- read.table("household_power_consumption.txt", header = TRUE, sep = ";") hp$Time <- strptime(paste(hp$Date, hp$Time), "%d/%m/%Y %H:%M:%S") hp$Date <- as.Date(hp$Date, "%d/%m/%Y") ##convert Date and Time variables subhp <- subset(hp, Date %in% as.Date(c("2007-02-01", "2007-02-02"))) ##set target data
/DataSource.R
no_license
Michelle-Zhao/Course-Project-1
R
false
false
344
r
setwd("/Users/Michelle/Documents") hp <- read.table("household_power_consumption.txt", header = TRUE, sep = ";") hp$Time <- strptime(paste(hp$Date, hp$Time), "%d/%m/%Y %H:%M:%S") hp$Date <- as.Date(hp$Date, "%d/%m/%Y") ##convert Date and Time variables subhp <- subset(hp, Date %in% as.Date(c("2007-02-01", "2007-02-02"))) ##set target data
plot.graphrestrictions <- function(nodes, positions, distance ) { plot.new() if (NROW(positions) == 1){ positions <- rbind(positions, 0) minx <- min(positions) maxx <- max(positions) miny <- -distance maxy <- distance } else { minx <- min(positions[1 , ]) maxx <- max(positions[1 , ]) miny <- min(positions[2 , ]) maxy <- max(positions[2 , ]) } NodeList <- data.frame(nodes, positions[1, ] , positions[2, ]) EdgeList <- data.frame(from = numeric(0), to= integer(0)) a <- graph_from_data_frame(vertices = NodeList, d = EdgeList) color=c("red", "blue", "green", "yellow", "brown", "black", "pink", "cyan") plot.igraph(a, layout=t(positions), vertex.size=4, vertex.color=color, rescale=F, xlim=c(minx, maxx), ylim=c(miny, maxy), asp=FALSE , axes = TRUE) mapply(plotellipse, mid = split(positions, rep(1:ncol(positions), each = nrow(positions))), lcol = color , MoreArgs = list( rx = distance, ry = distance, asp = FALSE)) }
/functions/plot.graph.functions/plot.graphrestrictions.R
permissive
shepherdmeng/BNdownscale
R
false
false
992
r
plot.graphrestrictions <- function(nodes, positions, distance ) { plot.new() if (NROW(positions) == 1){ positions <- rbind(positions, 0) minx <- min(positions) maxx <- max(positions) miny <- -distance maxy <- distance } else { minx <- min(positions[1 , ]) maxx <- max(positions[1 , ]) miny <- min(positions[2 , ]) maxy <- max(positions[2 , ]) } NodeList <- data.frame(nodes, positions[1, ] , positions[2, ]) EdgeList <- data.frame(from = numeric(0), to= integer(0)) a <- graph_from_data_frame(vertices = NodeList, d = EdgeList) color=c("red", "blue", "green", "yellow", "brown", "black", "pink", "cyan") plot.igraph(a, layout=t(positions), vertex.size=4, vertex.color=color, rescale=F, xlim=c(minx, maxx), ylim=c(miny, maxy), asp=FALSE , axes = TRUE) mapply(plotellipse, mid = split(positions, rep(1:ncol(positions), each = nrow(positions))), lcol = color , MoreArgs = list( rx = distance, ry = distance, asp = FALSE)) }
rm(list=ls()) #load packages library(readr) library(DBI) library(RMySQL) library(tm) require("NLP") #install.packages("NMF") require("openNLP") library("tm") library("SnowballC") library("RColorBrewer") library("wordcloud") library(NMF) library(plyr) #weight sentiment #install.packages("rJava") #install.packages("Rwordseg") library(rJava) library(Rwordseg) #access to text data # driver <- dbDriver("MySQL") # myhost <- "localhost" # mydb <- "studb" # myacct <- "cis434" # mypwd <- "LLhtFPbdwiJans8F@S207" # # conn <- dbConnect(driver, host=myhost, dbname=mydb, myacct, mypwd) # # #LOAD TEXT DATA # # rO(x6c15'Can # temp <- dbGetQuery(conn, "SELECT * FROM proj4final WHERE tag=\"rO(x6c15'Can\"") # dbDisconnect(conn) ####load raw data temp <- read.csv("~/Desktop/rstudio-export/temp.csv") ####load pos/neg dictionary "posneg" # positive words ----label +1 pos <- read.csv("~/Desktop/rstudio-export/positive-words-dic.txt", header = T, sep = ",", stringsAsFactors = F) weight <- rep(1, length(pos[,1])) pos <- cbind(pos, weight) names(pos) <- c("term", "weight") #pos <- tolower(pos) # negative words ----label -1 neg <- read.csv("~/Desktop/rstudio-export/negative-words-dic.txt", header = T, sep = ",", stringsAsFactors = F) weight <- rep(-1, length(neg[,1])) neg <- cbind(neg, weight) names(neg) <- c("term", "weight") # assign special weights to the negative words that I found from LDA #They are: bad;delay;delayed;lost; last;trying; sucks; try; sucks; late; ###########rudest;stuck;wait;waiting;cancel;cancelled;shitty; stupid; ashamed neg$weight[which(neg$term =="bad")]=-100 neg$weight[which(neg$term =="delay")]=-100 neg$weight[which(neg$term =="delayed")]=-100 neg$weight[which(neg$term =="lost")]=-100 neg$weight[which(neg$term =="last")]=-100 neg$weight[which(neg$term =="trying")]=-100 neg$weight[which(neg$term =="sucks")]=-100 neg$weight[which(neg$term =="try")]=-100 neg$weight[which(neg$term =="late")]=-100 neg$weight[which(neg$term =="rudest")]=-100 neg$weight[which(neg$term =="stuck")]=-100 neg$weight[which(neg$term =="wait")]=-100 neg$weight[which(neg$term =="Waiting")]=-100 neg$weight[which(neg$term =="waiting")]=-100 neg$weight[which(neg$term =="cancel")]=-100 neg$weight[which(neg$term =="cancelled")]=-100 neg$weight[which(neg$term =="Cancel")]=-100 neg$weight[which(neg$term =="cancellation")]=-100 neg$weight[which(neg$term =="Thieves")]=-100 neg$weight[which(neg$term =="ashamed")]=-100 neg$weight[which(neg$term =="shitty")]=-100 neg$weight[which(neg$term =="stupid")]=-100 # combine pos and neg words posneg <- rbind(pos, neg) posneg <- posneg[!duplicated(posneg$term), ] #`duplicated` is similar to `unique`,but it can return to its own number ########divide words into vectors in each tweet #make them to each string sentence <- as.vector(temp$tweet) #clean text sentence= gsub("[[:punct:]]", "", sentence ) sentence = gsub("[[:digit:]]", "", sentence ) sentence <- tolower(sentence) #install.packages("tokenizers") library("tokenizers") x<-tokenize_words(sentence, strip_punct = FALSE) term <- unlist(x) temp0 <- lapply(x, length) temp0 <- unlist(temp0) id <- rep(temp$id, temp0) #create a data frame that contains terms and their ids testterm <- as.data.frame(cbind(id, term), stringsAsFactors = F) # create a set of vocabulary and clean data with elimiating stop words stopwords<-data.frame(stopwords(kind='en')) names(stopwords) <- c('term') stopwords <- data.frame(setdiff(stopwords$term,posneg$term)) names(stopwords) <- c('term') testterm <- testterm[!testterm$term %in% stopwords,] #weight each review and weight with pos/neg dictionary library(plyr) testterm <- merge(testterm, posneg) testterm <- testterm[!is.na(testterm$weight), ] #head(testterm) #computing sentiment index dictresult <- aggregate(weight ~ id, data = testterm, sum) dictlabel <- rep(-1, length(dictresult[, 1])) #convert dictlabel which contains sign of sentiment to the data frame dictlabel[dictresult$weight > 0] <- 1 dictresult <- as.data.frame(cbind(dictresult, dictlabel), stringsAsFactors = F) ###getting data frame only with non negative review text <- join(dictresult, temp, by="id") nonNeg <- subset(text, weight>0) non_Negative <- nonNeg #Finish non-negative data frame non_Negative$weight <- NULL non_Negative$dictlabel<-NULL non_Negative$tag<-NULL non_Negative$airline<-NULL non_Negative$tid_not_to_be_used<-NULL non_Negative$Evaluation <-1 non_Negative <- non_Negative[, c("id", "Evaluation", "tweet")] #export non_Negative write.csv(non_Negative, "non_Negative.csv") dim(non_Negative) #160 non negative reviews
/Airline Compliant Reviews.R
no_license
SiqiJiang47/Airline-Compliant-Reviews
R
false
false
4,608
r
rm(list=ls()) #load packages library(readr) library(DBI) library(RMySQL) library(tm) require("NLP") #install.packages("NMF") require("openNLP") library("tm") library("SnowballC") library("RColorBrewer") library("wordcloud") library(NMF) library(plyr) #weight sentiment #install.packages("rJava") #install.packages("Rwordseg") library(rJava) library(Rwordseg) #access to text data # driver <- dbDriver("MySQL") # myhost <- "localhost" # mydb <- "studb" # myacct <- "cis434" # mypwd <- "LLhtFPbdwiJans8F@S207" # # conn <- dbConnect(driver, host=myhost, dbname=mydb, myacct, mypwd) # # #LOAD TEXT DATA # # rO(x6c15'Can # temp <- dbGetQuery(conn, "SELECT * FROM proj4final WHERE tag=\"rO(x6c15'Can\"") # dbDisconnect(conn) ####load raw data temp <- read.csv("~/Desktop/rstudio-export/temp.csv") ####load pos/neg dictionary "posneg" # positive words ----label +1 pos <- read.csv("~/Desktop/rstudio-export/positive-words-dic.txt", header = T, sep = ",", stringsAsFactors = F) weight <- rep(1, length(pos[,1])) pos <- cbind(pos, weight) names(pos) <- c("term", "weight") #pos <- tolower(pos) # negative words ----label -1 neg <- read.csv("~/Desktop/rstudio-export/negative-words-dic.txt", header = T, sep = ",", stringsAsFactors = F) weight <- rep(-1, length(neg[,1])) neg <- cbind(neg, weight) names(neg) <- c("term", "weight") # assign special weights to the negative words that I found from LDA #They are: bad;delay;delayed;lost; last;trying; sucks; try; sucks; late; ###########rudest;stuck;wait;waiting;cancel;cancelled;shitty; stupid; ashamed neg$weight[which(neg$term =="bad")]=-100 neg$weight[which(neg$term =="delay")]=-100 neg$weight[which(neg$term =="delayed")]=-100 neg$weight[which(neg$term =="lost")]=-100 neg$weight[which(neg$term =="last")]=-100 neg$weight[which(neg$term =="trying")]=-100 neg$weight[which(neg$term =="sucks")]=-100 neg$weight[which(neg$term =="try")]=-100 neg$weight[which(neg$term =="late")]=-100 neg$weight[which(neg$term =="rudest")]=-100 neg$weight[which(neg$term =="stuck")]=-100 neg$weight[which(neg$term =="wait")]=-100 neg$weight[which(neg$term =="Waiting")]=-100 neg$weight[which(neg$term =="waiting")]=-100 neg$weight[which(neg$term =="cancel")]=-100 neg$weight[which(neg$term =="cancelled")]=-100 neg$weight[which(neg$term =="Cancel")]=-100 neg$weight[which(neg$term =="cancellation")]=-100 neg$weight[which(neg$term =="Thieves")]=-100 neg$weight[which(neg$term =="ashamed")]=-100 neg$weight[which(neg$term =="shitty")]=-100 neg$weight[which(neg$term =="stupid")]=-100 # combine pos and neg words posneg <- rbind(pos, neg) posneg <- posneg[!duplicated(posneg$term), ] #`duplicated` is similar to `unique`,but it can return to its own number ########divide words into vectors in each tweet #make them to each string sentence <- as.vector(temp$tweet) #clean text sentence= gsub("[[:punct:]]", "", sentence ) sentence = gsub("[[:digit:]]", "", sentence ) sentence <- tolower(sentence) #install.packages("tokenizers") library("tokenizers") x<-tokenize_words(sentence, strip_punct = FALSE) term <- unlist(x) temp0 <- lapply(x, length) temp0 <- unlist(temp0) id <- rep(temp$id, temp0) #create a data frame that contains terms and their ids testterm <- as.data.frame(cbind(id, term), stringsAsFactors = F) # create a set of vocabulary and clean data with elimiating stop words stopwords<-data.frame(stopwords(kind='en')) names(stopwords) <- c('term') stopwords <- data.frame(setdiff(stopwords$term,posneg$term)) names(stopwords) <- c('term') testterm <- testterm[!testterm$term %in% stopwords,] #weight each review and weight with pos/neg dictionary library(plyr) testterm <- merge(testterm, posneg) testterm <- testterm[!is.na(testterm$weight), ] #head(testterm) #computing sentiment index dictresult <- aggregate(weight ~ id, data = testterm, sum) dictlabel <- rep(-1, length(dictresult[, 1])) #convert dictlabel which contains sign of sentiment to the data frame dictlabel[dictresult$weight > 0] <- 1 dictresult <- as.data.frame(cbind(dictresult, dictlabel), stringsAsFactors = F) ###getting data frame only with non negative review text <- join(dictresult, temp, by="id") nonNeg <- subset(text, weight>0) non_Negative <- nonNeg #Finish non-negative data frame non_Negative$weight <- NULL non_Negative$dictlabel<-NULL non_Negative$tag<-NULL non_Negative$airline<-NULL non_Negative$tid_not_to_be_used<-NULL non_Negative$Evaluation <-1 non_Negative <- non_Negative[, c("id", "Evaluation", "tweet")] #export non_Negative write.csv(non_Negative, "non_Negative.csv") dim(non_Negative) #160 non negative reviews
#simulate drift - demonstrate loss of heterozygosity but overall maintenance of allele frequencies # by Anders Gonçalves da Silva (C) 2014 # email: andersgs@gmail.com # 20 March 2014 #a function to simulate drift sim_drift = function(initial_geno_count=c(4,8,4), generations=200,n_pop=50){ # use: sim_drift(initial_geno_count=c(4,8,4),generations=200,n_pop=50) # # This will run 200 generations with 16 individuals are allowed to breed in each # generation # # The experiment is replicated across 50 populations # # Initially, each population gets 4 'AA', 8 'Aa', and 4 'aa' individuals # # The function outputs a list with allele frequency of 'A' for each population at # each timestep (sim_ps); the frequency of 'Aa' for each population at each step # (sim_hs); mean frequency of 'A' across populations for each step (p_hat); and the # mean heterozygosity across populations for each time step (hs_hat). #auxiliary function to calculate allele frequency new_p = function(genos,n_ind){ fAA = sum(genos=='AA') fAa = sum(genos=='Aa') return((2*fAA+fAa)/(2*n_ind)) } #set some variables #number of individual to sample each generation n_ind = sum(initial_geno_count) #vector of initial population allele frequencies of 'A' cur_p = (2*initial_geno_count[1]+initial_geno_count[2])/(2*n_ind) cur_p = rep(cur_p,times=n_pop) #create some storage space and output matrices and vectors #store allele frequency in each generation ps = matrix(0,ncol=n_pop,nrow=generations) ps[1,] = cur_p #store the mean allele frequency across populations for each generation mean_p = numeric(generations) mean_p[1] = mean(cur_p) #store heterozygosity for across populations for each generation hs = matrix(0,ncol=n_pop,nrow=generations) hs[1,] = rep((initial_geno_count[2]/n_ind),times=n_pop) #store the mean heterozygosity across population for each generation mean_h = numeric(generations) mean_h[1] = mean(hs[1,]) #store the genotypes for each individual in each population in a particular generation #this matrix gets re-written in each generation pop_genotypes = matrix(0,ncol=n_ind,nrow=n_pop) #initiate experiment #determine genotypic composition of first generation of all populations init_samp = c(rep('AA',initial_geno_count[1]),rep('Aa',initial_geno_count[2]),rep('aa',initial_geno_count[3])) #populate our experiment for(i in 1:n_pop){ pop_genotypes[i,] = init_samp } #iterate over generations for(gen in 2:generations){ # set the frequency of 'a' cur_q = 1-cur_p #create a data.frame with HW expected genotypic frequencies for each population # if one wanted to simulate inbreeding, it would be possible to add this here gf = data.frame(AA=cur_p^2,Aa=(2*cur_p*cur_q),aa=cur_q^2) #based on HW genotypic frequencies, generate a new set of n individuals for the next # generation for(p in 1:n_pop){ pop_genotypes[p,] <- sample(c('AA','Aa','aa'),size=n_ind,replace=T,prob=gf[p,]) } #calculate the new generations allele frequency for 'A' cur_p = ps[gen,] = apply(pop_genotypes,1,new_p,n_ind) #calculate mean allele frequency and observed heterzogysity mean_p[gen] = mean(cur_p) hs[gen,] = apply(pop_genotypes,1,function(inds) sum(inds=='Aa')/n_ind) mean_h[gen] = mean(hs[gen,]) #print progress to screen every 10 generations if(gen%%10 == 0){ plot(ps[1:gen,1],type='l',lwd=2,ylim=c(0,1),xlab='Generations',xlim=c(1,generations),ylab='Allele frequency') matlines(ps[1:gen,2:n_pop],lwd=2) title(paste("p =",mean_p[gen]," h=",mean_h[gen])) Sys.sleep(0.02) } } # return list of results return(list(sim_p=ps,sim_hs=hs,p_hat=mean_p,hs_hat=mean_h)) } #run a simulation where we sample 16 individuals per population, for 200 populations and 200 generations. We start with 16 heterozygote individuals drift1 = sim_drift(initial_geno_count=c(0,16,0),n_pop=200)
/morning/first_session/sim_drift.R
permissive
andersgs/canberra_workshop_day2
R
false
false
4,026
r
#simulate drift - demonstrate loss of heterozygosity but overall maintenance of allele frequencies # by Anders Gonçalves da Silva (C) 2014 # email: andersgs@gmail.com # 20 March 2014 #a function to simulate drift sim_drift = function(initial_geno_count=c(4,8,4), generations=200,n_pop=50){ # use: sim_drift(initial_geno_count=c(4,8,4),generations=200,n_pop=50) # # This will run 200 generations with 16 individuals are allowed to breed in each # generation # # The experiment is replicated across 50 populations # # Initially, each population gets 4 'AA', 8 'Aa', and 4 'aa' individuals # # The function outputs a list with allele frequency of 'A' for each population at # each timestep (sim_ps); the frequency of 'Aa' for each population at each step # (sim_hs); mean frequency of 'A' across populations for each step (p_hat); and the # mean heterozygosity across populations for each time step (hs_hat). #auxiliary function to calculate allele frequency new_p = function(genos,n_ind){ fAA = sum(genos=='AA') fAa = sum(genos=='Aa') return((2*fAA+fAa)/(2*n_ind)) } #set some variables #number of individual to sample each generation n_ind = sum(initial_geno_count) #vector of initial population allele frequencies of 'A' cur_p = (2*initial_geno_count[1]+initial_geno_count[2])/(2*n_ind) cur_p = rep(cur_p,times=n_pop) #create some storage space and output matrices and vectors #store allele frequency in each generation ps = matrix(0,ncol=n_pop,nrow=generations) ps[1,] = cur_p #store the mean allele frequency across populations for each generation mean_p = numeric(generations) mean_p[1] = mean(cur_p) #store heterozygosity for across populations for each generation hs = matrix(0,ncol=n_pop,nrow=generations) hs[1,] = rep((initial_geno_count[2]/n_ind),times=n_pop) #store the mean heterozygosity across population for each generation mean_h = numeric(generations) mean_h[1] = mean(hs[1,]) #store the genotypes for each individual in each population in a particular generation #this matrix gets re-written in each generation pop_genotypes = matrix(0,ncol=n_ind,nrow=n_pop) #initiate experiment #determine genotypic composition of first generation of all populations init_samp = c(rep('AA',initial_geno_count[1]),rep('Aa',initial_geno_count[2]),rep('aa',initial_geno_count[3])) #populate our experiment for(i in 1:n_pop){ pop_genotypes[i,] = init_samp } #iterate over generations for(gen in 2:generations){ # set the frequency of 'a' cur_q = 1-cur_p #create a data.frame with HW expected genotypic frequencies for each population # if one wanted to simulate inbreeding, it would be possible to add this here gf = data.frame(AA=cur_p^2,Aa=(2*cur_p*cur_q),aa=cur_q^2) #based on HW genotypic frequencies, generate a new set of n individuals for the next # generation for(p in 1:n_pop){ pop_genotypes[p,] <- sample(c('AA','Aa','aa'),size=n_ind,replace=T,prob=gf[p,]) } #calculate the new generations allele frequency for 'A' cur_p = ps[gen,] = apply(pop_genotypes,1,new_p,n_ind) #calculate mean allele frequency and observed heterzogysity mean_p[gen] = mean(cur_p) hs[gen,] = apply(pop_genotypes,1,function(inds) sum(inds=='Aa')/n_ind) mean_h[gen] = mean(hs[gen,]) #print progress to screen every 10 generations if(gen%%10 == 0){ plot(ps[1:gen,1],type='l',lwd=2,ylim=c(0,1),xlab='Generations',xlim=c(1,generations),ylab='Allele frequency') matlines(ps[1:gen,2:n_pop],lwd=2) title(paste("p =",mean_p[gen]," h=",mean_h[gen])) Sys.sleep(0.02) } } # return list of results return(list(sim_p=ps,sim_hs=hs,p_hat=mean_p,hs_hat=mean_h)) } #run a simulation where we sample 16 individuals per population, for 200 populations and 200 generations. We start with 16 heterozygote individuals drift1 = sim_drift(initial_geno_count=c(0,16,0),n_pop=200)
ci.transformed <- function (x, y, n, conf.level) { z <- qnorm(1-((1-conf.level)/2)) ll <- exp(log(x/y)-z*sqrt((1-x)/(n$d*x)+(1-y)/(n$h*y))) ul <- exp(log(x/y)+z*sqrt((1-x)/(n$d*x)+(1-y)/(n$h*y))) res <- list (ci = matrix(c(ll,ul), ncol = 2)) }
/R/ci.transformed.R
no_license
cran/OptimalCutpoints
R
false
false
286
r
ci.transformed <- function (x, y, n, conf.level) { z <- qnorm(1-((1-conf.level)/2)) ll <- exp(log(x/y)-z*sqrt((1-x)/(n$d*x)+(1-y)/(n$h*y))) ul <- exp(log(x/y)+z*sqrt((1-x)/(n$d*x)+(1-y)/(n$h*y))) res <- list (ci = matrix(c(ll,ul), ncol = 2)) }
\name{vcov.CountsEPPM} \alias{vcov.CountsEPPM} \title{ Variance/Covariance Matrix for Coefficients } \description{ Variance/covariance matrix for coefficients of fitted model. } \usage{ \method{vcov}{CountsEPPM}(object, model = c("full", "mean", "scale.factor"), \dots) } \arguments{ \item{object}{ fitted model object of class "CountsEPPM". } \item{model}{ character indicating variance/covariance matrix for all coefficients to be output: all coefficients ("full"), variance/covariance matrix for coefficients of probability of success ("mean"), variance/covariance matrix for coefficients of scale-factor ("scale.factor") } \item{\dots}{ } } \value{ The variance/covariance matrix of the parameters of the fitted model object. } \references{ Cribari-Neto F, Zeileis A. (2010). Beta Regression in R. \emph{Journal of Statistical Software}, \bold{34}(2), 1-24. \doi{10.18637/jss.v034.i02}. } \author{ David M. Smith <smithdm1@us.ibm.com> } \seealso{ \code{\link[betareg]{betareg}} } \examples{ data("herons.group") output.fn <- CountsEPPM(number.attempts ~ 0 + group, herons.group, model.type = 'mean only', model.name = 'Poisson') vcov(output.fn) } \keyword{ models }
/man/vcov.CountsEPPM.Rd
no_license
cran/CountsEPPM
R
false
false
1,222
rd
\name{vcov.CountsEPPM} \alias{vcov.CountsEPPM} \title{ Variance/Covariance Matrix for Coefficients } \description{ Variance/covariance matrix for coefficients of fitted model. } \usage{ \method{vcov}{CountsEPPM}(object, model = c("full", "mean", "scale.factor"), \dots) } \arguments{ \item{object}{ fitted model object of class "CountsEPPM". } \item{model}{ character indicating variance/covariance matrix for all coefficients to be output: all coefficients ("full"), variance/covariance matrix for coefficients of probability of success ("mean"), variance/covariance matrix for coefficients of scale-factor ("scale.factor") } \item{\dots}{ } } \value{ The variance/covariance matrix of the parameters of the fitted model object. } \references{ Cribari-Neto F, Zeileis A. (2010). Beta Regression in R. \emph{Journal of Statistical Software}, \bold{34}(2), 1-24. \doi{10.18637/jss.v034.i02}. } \author{ David M. Smith <smithdm1@us.ibm.com> } \seealso{ \code{\link[betareg]{betareg}} } \examples{ data("herons.group") output.fn <- CountsEPPM(number.attempts ~ 0 + group, herons.group, model.type = 'mean only', model.name = 'Poisson') vcov(output.fn) } \keyword{ models }
#!/usr/bin/env Rscript # calc spearman corr between kmer GC and counts # cjfiscus library(pacman) p_load(data.table, stringr) args = commandArgs(trailingOnly=TRUE) # args[1] is input file, args[2] is output file, args[3] is array id # SLURM_TASK_ARRAY_ID -> col to read in array<-as.integer(args[3]) # import kmer and column df<-fread(args[1], select=c(1, array)) # calculate gc df$gc<-str_count(df$mer, pattern="G|C") # calc cor out<-as.data.frame(cbind(names(df)[2], cor(df[,2], df[,3], method="spearman"))) # write out write.table(out, args[2], sep="\t", col.names=F, row.names=F, quote=F)
/chapter_1/analysis_scripts/kmer_pl/006a_corr_count_gc.R
permissive
cjfiscus/2022_Fiscus_Dissertation
R
false
false
600
r
#!/usr/bin/env Rscript # calc spearman corr between kmer GC and counts # cjfiscus library(pacman) p_load(data.table, stringr) args = commandArgs(trailingOnly=TRUE) # args[1] is input file, args[2] is output file, args[3] is array id # SLURM_TASK_ARRAY_ID -> col to read in array<-as.integer(args[3]) # import kmer and column df<-fread(args[1], select=c(1, array)) # calculate gc df$gc<-str_count(df$mer, pattern="G|C") # calc cor out<-as.data.frame(cbind(names(df)[2], cor(df[,2], df[,3], method="spearman"))) # write out write.table(out, args[2], sep="\t", col.names=F, row.names=F, quote=F)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/coreNLPsetup_rexports.R, R/gofastr_reexports.R \docType{import} \name{reexports} \alias{reexports} \alias{check_setup} \alias{reexports} \alias{as_dtm} \alias{reexports} \alias{as_tdm} \title{Objects exported from other packages} \keyword{internal} \description{ These objects are imported from other packages. Follow the links below to see their documentation. \describe{ \item{coreNLPsetup}{\code{\link[coreNLPsetup]{check_setup}}} \item{gofastr}{\code{\link[gofastr]{as_dtm}}, \code{\link[gofastr]{as_tdm}}} }}
/man/reexports.Rd
no_license
Smfun12/tagger
R
false
true
599
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/coreNLPsetup_rexports.R, R/gofastr_reexports.R \docType{import} \name{reexports} \alias{reexports} \alias{check_setup} \alias{reexports} \alias{as_dtm} \alias{reexports} \alias{as_tdm} \title{Objects exported from other packages} \keyword{internal} \description{ These objects are imported from other packages. Follow the links below to see their documentation. \describe{ \item{coreNLPsetup}{\code{\link[coreNLPsetup]{check_setup}}} \item{gofastr}{\code{\link[gofastr]{as_dtm}}, \code{\link[gofastr]{as_tdm}}} }}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lightsail_operations.R \name{lightsail_stop_relational_database} \alias{lightsail_stop_relational_database} \title{Stops a specific database that is currently running in Amazon Lightsail} \usage{ lightsail_stop_relational_database( relationalDatabaseName, relationalDatabaseSnapshotName = NULL ) } \arguments{ \item{relationalDatabaseName}{[required] The name of your database to stop.} \item{relationalDatabaseSnapshotName}{The name of your new database snapshot to be created before stopping your database.} } \description{ Stops a specific database that is currently running in Amazon Lightsail. See \url{https://www.paws-r-sdk.com/docs/lightsail_stop_relational_database/} for full documentation. } \keyword{internal}
/cran/paws.compute/man/lightsail_stop_relational_database.Rd
permissive
paws-r/paws
R
false
true
806
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lightsail_operations.R \name{lightsail_stop_relational_database} \alias{lightsail_stop_relational_database} \title{Stops a specific database that is currently running in Amazon Lightsail} \usage{ lightsail_stop_relational_database( relationalDatabaseName, relationalDatabaseSnapshotName = NULL ) } \arguments{ \item{relationalDatabaseName}{[required] The name of your database to stop.} \item{relationalDatabaseSnapshotName}{The name of your new database snapshot to be created before stopping your database.} } \description{ Stops a specific database that is currently running in Amazon Lightsail. See \url{https://www.paws-r-sdk.com/docs/lightsail_stop_relational_database/} for full documentation. } \keyword{internal}
# problem 1 x=matrix(c(3,6,4,4,5,7,4,7),nrow=4,byrow = T) #mle of mean vector is xbar. and variance is S. X_bar<-colMeans(x) # mean vector n<-nrow(x) S<- var(x)*((n-1)/n) # Sigma^2/n #problem 2 # a) follows a chi square distribution. with 6 dof Sum of squares of standard normal variables # b) distribution of sqrt(n)(x-mu) follows normal distribution with (0,sigma) # x bar follows normal distribution with (mu, sigma/n) #c) chi square distribution # d) it follows scaled F distribution with 6,54 dof. # problem 3 library(MASS) dta1<-read.csv("C:/Users/jayabharath/Desktop/STAT 636/Hmw2/used_cars.csv",sep=",",header=T) qqnorm(x_lt <- dta1$Age); qqline(x_lt) bc_a<- boxcox(dta1$Age~1) title(main="Age") bc_p<- boxcox(dta1$Price~1) title(main="Price") lam_a <- bc_a$x[which.max(bc_a$y)] # finding lambda for age lam_p <- bc_p$x[which.max(bc_p$y)] # finding lambda for price dta_tr_a <- ((dta1$Age^lam_a)-1)/lam_a # transformation for age dta_tr_p <- ((dta1$Price^lam_p)-1)/lam_p # transformation for price par(mfrow=c(2,2)) hist(dta_tr_a,xlab = "",main="Age") # histograms after transformation. hist(dta_tr_p,xlab = "",main="Price") qqnorm(dta_tr_a,main="Age") # qq plot for age qqline(dta_tr_a) qqnorm(dta_tr_p,main="Price") # qqplot for price qqline(dta_tr_p) hist(dta1$Age) hist(dta1$Price) # multivariate power transformation library(car) summary(powerTransform(cbind(dta1$Age,dta1$Price)~1,)) n<- nrow(dta1) lambda_seq <- seq(from = -2, to = 2, length = 100) obj <- matrix(NA, nrow = 100, ncol = 100) csld <- colSums(log(dta1)) for(i in 1:100) { for(j in 1:100) { X_l <- dta1 lambda <- lambda_seq[c(i, j)] for(k in 1:2) { if(lambda[k] != 0) { X_l[, k] <- (X_l[, k] ^ lambda[k] - 1) / lambda[k] } else { X_l[, k] <- log(X_l[, k]) } } S <- var(X_l) obj[i, j] <- -(n / 2) * log(det(S)) + (lambda - 1) %*% csld } } par(mfrow = c(1, 1)) contour(lambda_seq, lambda_seq, obj, xlab = expression(lambda[1]), ylab = expression(lambda[2]),xlim=c(-2,3),ylim=c(-2,3)) points(1.27, 0.03, pch = 20, cex = 2, col = "red") text(0.3, 0.8, expression(paste(hat(lambda), "' = [1.27, 0.031]", sep = "")), lwd = 2) lambda_seq[which(obj==max(obj),arr.ind=TRUE)] dev.off() #problem 4 dta2<-read.csv("C:/STAT 636/Hmw2/sweat.csv",sep=",",header=T) dta2 #qqplot for sweat qqnorm(dta2$Sweat,main="sweat") qqline(dta2$Sweat) hist(dta2$Sweat) #qqplot for sodium qqnorm(dta2$Sodium,main="sodium") qqline(dta2$Sodium) hist(dta2$Sodium) #qqplot for potassium qqnorm(dta2$Potassium,main="potassium") qqline(dta2$Potassium) hist(dta2$Potassium) #pairwise scatter plots pairs(dta2) #b center<- colMeans(dta2) sigma_s<- var(dta2) eig<-eigen(sigma_s) eis<- eigen(solve(sigma_s)) # axes and half lengths pri_axis<-eig$vectors[,1];pri_hl<- sqrt(eig$values[1])*sqrt(ncol(dta2)*(nrow(dta2)-1)/(nrow(dta2)*(nrow(dta2)-ncol(dta2)))*qf(.95,ncol(dta2),nrow(dta2)-ncol(dta2))) sec_axis<-eig$vectors[,2];sec_hl<- sqrt(eig$values[2])*sqrt(ncol(dta2)*(nrow(dta2)-1)/(nrow(dta2)*(nrow(dta2)-ncol(dta2)))*qf(.95,ncol(dta2),nrow(dta2)-ncol(dta2))) ter_axis<-eig$vectors[,3];ter_hl<- sqrt(eig$values[3])*sqrt(ncol(dta2)*(nrow(dta2)-1)/(nrow(dta2)*(nrow(dta2)-ncol(dta2)))*qf(.95,ncol(dta2),nrow(dta2)-ncol(dta2))) pri_axis;pri_hl sec_axis;sec_hl ter_axis;ter_hl # partc a1=c(1,0,0) a<- a1%*%center b<- sqrt((t(a1)%*%sigma_s%*%a1)/nrow(dta2)) c<- (((nrow(dta2)-1)*ncol(dta2))/(nrow(dta2)-ncol(dta2)))*qf(.95,ncol(dta2),nrow(dta2)-ncol(dta2)) conf.intrvl_sw<- cat(a-(b*sqrt(c)),a+(b*sqrt(c))) # ci for sweat a2=c(0,1,0) a_s<- a2%*%center b_s<- sqrt((t(a2)%*%sigma_s%*%a2)/nrow(dta2)) c<- (nrow(dta2)-1)*ncol(dta2)/(nrow(dta2)-ncol(dta2))*qf(.95,ncol(dta2),nrow(dta2)-ncol(dta2)) conf.intrvl_sw<- cat(a_s-(b_s*sqrt(c)),a_s+(b_s*sqrt(c))) # ci for sodium a3=c(0,0,1) a_k<- a3%*%center b_k<- sqrt((t(a3)%*%sigma_s%*%a3)/nrow(dta2)) c<- (nrow(dta2)-1)*ncol(dta2)/(nrow(dta2)-ncol(dta2))*qf(.95,ncol(dta2),nrow(dta2)-ncol(dta2)) conf.intrvl_sw<- cat(a_k-(b_k*sqrt(c)),a_k+(b_k*sqrt(c))) # ci for potassium #partd bonferroni intervals # bonferroni interval for sweat bonf.CI_sw<- cat((center[1]+(qt(.05/6,19)*sqrt(sigma_s[1,1]/nrow(dta2)))),center[1]-(qt(.05/6,19)*sqrt(sigma_s[1,1]/nrow(dta2)))) #bonferroni interval for sodium bonf.CI_na<- cat((center[2]+(qt(.05/6,19)*sqrt(sigma_s[2,2]/nrow(dta2)))),center[2]-(qt(.05/6,19)*sqrt(sigma_s[2,2]/nrow(dta2)))) #bonferroni interval for potassium bonf.CI_k<- cat((center[3]+(qt(.05/6,19)*sqrt(sigma_s[3,3]/nrow(dta2)))),center[3]-(qt(.05/6,19)*sqrt(sigma_s[3,3]/nrow(dta2)))) # part E library(Hotelling) library(ICSNP) HotellingsT2(dta2,mu=c(4,45,10)) # part f mu<- c(4,45,10) tstat<- sqrt(nrow(dta2)*(t(center-mu)%*%solve(sigma_s)%*%(center-mu))) tstat cricval<- sqrt((nrow(dta2)-1)*ncol(dta2)/(nrow(dta2)-ncol(dta2))*qf(.95,ncol(dta2),nrow(dta2)-ncol(dta2))) cricval tstat<cricval # returns True, therefore point lies inside ellipse and is within 95% confidence interval # This is in line with the result of hypothesis test. pval<-pf(tstat*(nrow(dta2)-ncol(dta2))/((nrow(dta2)-1)*ncol(dta2)),ncol(dta2),(nrow(dta2)-ncol(dta2))) 1-pval 1-pval>.05 # returns true. Therefore we fail to reject null that mean vector is (4,45,10) # part G ## A function to compute T2. n <- 20 B <- 500 T2_f <- function(X, mu_0) { ## The covariance matrices under the null and unrestricted scenarios. S <- var(X) S_0 <- (t(X) - mu_0) %*% t(t(X) - mu_0) / (n - 1) # Compute T2 if the sample covariance matrices are non-singular. T2 <- NA if(det(S) > 0 & det(S_0) > 0) { Lambda <- (det(S) / det(S_0)) ^ (n / 2) T2 <- Lambda } return(T2) } sim_f <- function(mu, mu_0 = c(4,45,10)) { ## Simulate a sample from the multivariate t. X <- dta2 ## Observed value of T2. T2_0 <- T2_f(X, mu_0) T2_0_scaled <- (n - p) / ((n - 1) * p) * T2_0 set.seed(101) T2_b <- rep(NA, B) X_0 <- t(t(X) - colMeans(X) + mu_0) for(b in 1:B) { ii <- sample(1:n, replace = TRUE) T2_b[b] <- T2_f(X_0[ii, ], mu_0) } T2_b_scaled <- (n - p) / ((n - 1) * p) * T2_b p_value_boot <- mean(T2_b_scaled >= T2_0_scaled, na.rm = TRUE) return(1-p_value_boot) } # problem 5 dta3<-read.csv("C:/Users/jayabharath/Desktop/STAT 636/Hmw2/peanut.csv",sep=",",header=T) names(dta3) attach(dta3) Location <- as.character(Location) Variety <- as.character(Variety) x_1<- dta3[Location==1,-2] x_2<- dta3[Location==2,-2] x1_bar<- colMeans(x_1[,2:4]) x2_bar<- colMeans(x_2[,2:4]) x1var<- var(x_1[,2:4]) x2var<- var(x_2[,2:4]) y1<- as.matrix(dta3[,3:5]) factor<- dta3[,1] treat<- as.factor(factor) m1<- manova(y1~treat) summary(m1,test="Wilks") # location effect y2<- as.matrix(dta3[,3:5]) factor1<- dta3[,2] treat1<- as.factor(factor1) m2<- manova(y2~treat1) summary(m2,test="Wilks") # there is effect of variety. #two way MANOVA y1 m3<- manova(y1~treat+treat1+treat*treat1) summary(m3,test="Wilks") # no interaction but there is factor 1 and factor 2 effect. #5 b n <- 2 p <- 3 g <- 2 b <- 3 attach(dta3) ## Summary statistics. x_bar <- colMeans(dta3[, 3:5]) x_bar_lk <- rbind(colMeans(dta3[Location == 1 & Variety == 5, 3:5]), colMeans(dta3[Location == 2 & Variety == 5, 3:5]), colMeans(dta3[Location == 1 & Variety == 6, 3:5]), colMeans(dta3[Location == 2 & Variety == 6, 3:5]), colMeans(dta3[Location == 1 & Variety == 8, 3:5]), colMeans(dta3[Location == 2 & Variety == 8, 3:5])) x_bar_l_dot <- rbind(colMeans(dta3[Location == 1, 3:5]), colMeans(dta3[Location == 2, 3:5])) x_bar_dot_k <- rbind(colMeans(dta3[Variety == 5, 3:5]), colMeans(dta3[Variety == 6, 3:5]),colMeans(dta3[Variety == 8, 3:5])) ## Components for MANOVA. SSP_cor <- SSP_fac_1 <- SSP_fac_2 <- SSP_int <- SSP_res <- matrix(0, nrow = p, ncol = p) for(l in 1:g) { SSP_fac_1 <- SSP_fac_1 + b * n * t(x_bar_l_dot[l, , drop = FALSE] - x_bar) %*% (x_bar_l_dot[l, , drop = FALSE] - x_bar) } for(k in 1:b){ SSP_fac_2 <- SSP_fac_2 + g * n * t(x_bar_dot_k[k, , drop = FALSE] - x_bar) %*% (x_bar_dot_k[k, , drop = FALSE] - x_bar) } for(k in 1:b) { for(l in 1:g){ SSP_int <- SSP_int + n * t(x_bar_lk[(k - 1) * 2 + l, , drop = FALSE] - x_bar_l_dot[l, , drop = FALSE] - x_bar_dot_k[k, , drop = FALSE] + x_bar) %*% (x_bar_lk[(k - 1) * 2 + l, , drop = FALSE] - x_bar_l_dot[l, , drop = FALSE] - x_bar_dot_k[k, , drop = FALSE] + x_bar) } } for(l in 1:g) { for(k in 1:b) { for(r in 1:n){ SSP_res <- SSP_res + t(as.matrix(dta3[(l - 1) * 3 * n + (k - 1) * n + r, 3:5]) - x_bar_lk[(l - 1) * 3 + k, , drop = FALSE]) %*% (as.matrix(dta3[(l - 1) * 3 * n + (k - 1) * n + r, 3:5]) - x_bar_lk[(l - 1) * 3 + k, , drop = FALSE]) SSP_cor <- SSP_cor + t(as.matrix(dta3[(l - 1) * 3 * n + (k - 1) * n + r, 3:5]) - x_bar) %*% (as.matrix(dta3[(l - 1) * 3 * n + (k - 1) * n + r, 3:5]) - x_bar) } } } ## ## Inference. ## ## No interaction. Lambda1 <- det(SSP_res) / det(SSP_int + SSP_res) 1 - pf((((g * b * (n - 1) - p + 1) / 2) / ((abs((g - 1) * (b - 1) - p) + 1) / 2)) * (1 - Lambda1) / Lambda1, abs((g - 1) * (b - 1) - p) + 1, g * b * (n - 1) - p + 1) ## There is an effect of location Lambda2 <- det(SSP_res) / det(SSP_fac_1 + SSP_res) 1 - pf((((g * b * (n - 1) - p + 1) / 2) / ((abs((g - 1) - p) + 1) / 2)) * (1 - Lambda2) / Lambda2, abs((g - 1) - p) + 1, g * b * (n - 1) - p + 1) ## There is an effect of variety. Lambda3 <- det(SSP_res) / det(SSP_fac_2 + SSP_res) 1 - pf((((g * b * (n - 1) - p + 1) / 2) / ((abs((b - 1) - p) + 1) / 2)) * (1 - Lambda3) / Lambda3, abs((b - 1) - p) + 1, g * b * (n - 1) - p + 1) summary(manova(y1~treat+treat1+treat*treat1), test = "Wilks") #6 library(MASS) hof<-read.csv("C:/Users/jayabharath/Desktop/STAT 636/Hmw2/hof_data.csv",sep=",",header=T) num_vars <- c("H", "HR", "RBI", "AVG", "SLG", "OBP") X <- as.matrix(hof[, num_vars]) # extracting those variables X_st <- scale(X, center = TRUE, scale = TRUE) #standardising DTA_st <- data.frame(hof$HOF, X_st) colnames(DTA_st) <- c("HOF", num_vars) # part-a lda_out <- lda(HOF ~ H + HR + RBI + AVG + SLG + OBP, data = DTA_st) (lda_out$scaling) # coefficients of dicriminants. players in HOF have high H,HR and low RBI,SLG. ld<- X_st%*%lda_out$scaling ld[hof$HOF=="Y"] max(ld[hof$HOF=="N"]) max(ld[hof$HOF=="Y"]) min(ld[hof$HOF=="N"]) min(ld[hof$HOF=="Y"]) # partb t.test(X_st[,1]) nh<- table(hof$HOF) attach(hof) X_bar_Y_H<- mean(X_st[hof$HOF=="Y",1]) X_bar_N_H<- mean(X_st[hof$HOF=="N",1]) s_Y_H<- var(X_st[hof$HOF=="Y",1]) s_N_H<- var(X_st[hof$HOF=="N",1]) s_po<- sqrt((((nh[2]-1)*s_Y_H^2)+ ((nh[1]-1)*s_N_H^2))/(nh[1]+nh[2]-2)) tsta_H<- (X_bar_Y_H - X_bar_N_H)/(s_po*(sqrt((1/nh[1])+(1/nh[2])))) X_bar_Y_HR<- mean(X_st[hof$HOF=="Y",2]) X_bar_N_HR<- mean(X_st[hof$HOF=="N",2]) s_Y_HR<- var(X_st[hof$HOF=="Y",2]) s_N_HR<- var(X_st[hof$HOF=="N",2]) s_po<- sqrt((((nh[2]-1)*s_Y_HR^2)+ ((nh[1]-1)*s_N_HR^2))/(nh[1]+nh[2]-2)) tsta_HR<- (X_bar_Y_HR - X_bar_N_HR)/(s_po*(sqrt((1/nh[1])+(1/nh[2])))) X_bar_Y_RBI<- mean(X_st[hof$HOF=="Y",3]) X_bar_N_RBI<- mean(X_st[hof$HOF=="N",3]) s_Y_RBI<- var(X_st[hof$HOF=="Y",3]) s_N_RBI<- var(X_st[hof$HOF=="N",3]) s_po<- sqrt((((nh[2]-1)*s_Y_RBI^2)+ ((nh[1]-1)*s_N_RBI^2))/(nh[1]+nh[2]-2)) tsta_RBI<- (X_bar_Y_RBI - X_bar_N_RBI)/(s_po*sqrt((1/nh[1])+(1/nh[2]))) X_bar_Y_AVG<- mean(X_st[hof$HOF=="Y",4]) X_bar_N_AVG<- mean(X_st[hof$HOF=="N",4]) s_Y_AVG<- var(X_st[hof$HOF=="Y",4]) s_N_AVG<- var(X_st[hof$HOF=="N",4]) s_po<- sqrt((((nh[2]-1)*s_Y_AVG^2)+ ((nh[1]-1)*s_N_AVG^2))/(nh[1]+nh[2]-2)) tsta_AVG<- (X_bar_Y_AVG - X_bar_N_AVG)/(s_po*sqrt((1/nh[1])+(1/nh[2]))) X_bar_Y_SLG<- mean(X_st[hof$HOF=="Y",5]) X_bar_N_SLG<- mean(X_st[hof$HOF=="N",5]) s_Y_SLG<- var(X_st[hof$HOF=="Y",5]) s_N_SLG<- var(X_st[hof$HOF=="N",5]) s_po<- sqrt((((nh[2]-1)*s_Y_SLG^2)+ ((nh[1]-1)*s_N_SLG^2))/(nh[1]+nh[2]-2)) tsta_SLG<- (X_bar_Y_SLG - X_bar_N_SLG)/(s_po*sqrt((1/nh[1])+(1/nh[2]))) X_bar_Y_OBP<- mean(X_st[hof$HOF=="Y",6]) X_bar_N_OBP<- mean(X_st[hof$HOF=="N",6]) s_Y_OBP<- var(X_st[hof$HOF=="Y",6]) s_N_OBP<- var(X_st[hof$HOF=="N",6]) s_po<- sqrt((((nh[2]-1)*s_Y_OBP^2)+ ((nh[1]-1)*s_N_OBP^2))/(nh[1]+nh[2]-2)) tsta_OBP<- (X_bar_Y_OBP - X_bar_N_OBP)/(s_po*sqrt((1/nh[1])+(1/nh[2]))) # part-c n_k <- table(hof$HOF) X_bar_Y <- colMeans(X_st[hof$HOF=="Y",]) X_bar_N <- colMeans(X_st[hof$HOF=="N",]) S_Y <- var(X_st[hof$HOF=="Y",]) S_N <- var(X_st[hof$HOF=="N",]) X_bar_Y_H <- colMeans(X_st[hof$HOF=="Y",-1]) X_bar_N_H <- colMeans(X_st[hof$HOF=="N",-1]) S_Y.H <- var(X_st[hof$HOF=="Y",-1]) S_N.H <- var(X_st[hof$HOF=="N",-1]) S_pold <- (((n_k[2]-1)*S_Y)+((n_k[1]-1)*S_N))/(n_k[2]+n_k[1]-2) S_pold_H<- (((n_k[2]-1)*S_Y.H)+((n_k[1]-1)*S_N.H))/(n_k[2]+n_k[1]-2) T2_F<- t(X_bar_Y- X_bar_N) %*% solve(S_pold*((1/n_k[2])+(1/n_k[1]))) %*% (X_bar_Y- X_bar_N) T2_H<- t(X_bar_Y_H - X_bar_N_H) %*% solve(S_pold_H*((1/n_k[2])+(1/n_k[1]))) %*% (X_bar_Y_H - X_bar_N_H) # Partial F statistic F_stat_H<- ((n_k[1]+n_k[2]-2-6+1)*(T2_F- T2_H))/(n_k[1]+n_k[2]-2+ T2_H) X_bar_Y_H <- colMeans(X_st[hof$HOF=="Y",-2]) X_bar_N_H <- colMeans(X_st[hof$HOF=="N",-2]) S_Y.H <- var(X_st[hof$HOF=="Y",-2]) S_N.H <- var(X_st[hof$HOF=="N",-2]) S_pold_H<- (((n_k[2]-1)*S_Y.H)+((n_k[1]-1)*S_N.H))/(n_k[2]+n_k[1]-2) T2_H<- t(X_bar_Y_H - X_bar_N_H) %*% solve(S_pold_H*((1/n_k[2])+(1/n_k[1]))) %*% (X_bar_Y_H - X_bar_N_H) F_stat_HR<- ((n_k[1]+n_k[2]-2-6+1)*(T2_F- T2_H))/(n_k[1]+n_k[2]-2+ T2_H) ############ X_bar_Y_H <- colMeans(X_st[hof$HOF=="Y",-3]) X_bar_N_H <- colMeans(X_st[hof$HOF=="N",-3]) S_Y.H <- var(X_st[hof$HOF=="Y",-3]) S_N.H <- var(X_st[hof$HOF=="N",-3]) S_pold_H<- (((n_k[2]-1)*S_Y.H)+((n_k[1]-1)*S_N.H))/(n_k[2]+n_k[1]-2) T2_H<- t(X_bar_Y_H - X_bar_N_H) %*% solve(S_pold_H*((1/n_k[2])+(1/n_k[1]))) %*% (X_bar_Y_H - X_bar_N_H) F_stat_RBI<- ((n_k[1]+n_k[2]-2-6+1)*(T2_F- T2_H))/(n_k[1]+n_k[2]-2+ T2_H) ############ X_bar_Y_H <- colMeans(X_st[hof$HOF=="Y",-4]) X_bar_N_H <- colMeans(X_st[hof$HOF=="N",-4]) S_Y.H <- var(X_st[hof$HOF=="Y",-4]) S_N.H <- var(X_st[hof$HOF=="N",-4]) S_pold_H<- (((n_k[2]-1)*S_Y.H)+((n_k[1]-1)*S_N.H))/(n_k[2]+n_k[1]-2) T2_H<- t(X_bar_Y_H - X_bar_N_H) %*% solve(S_pold_H*((1/n_k[2])+(1/n_k[1]))) %*% (X_bar_Y_H - X_bar_N_H) F_stat_AVG<- ((n_k[1]+n_k[2]-2-6+1)*(T2_F- T2_H))/(n_k[1]+n_k[2]-2+ T2_H) ############ X_bar_Y_H <- colMeans(X_st[hof$HOF=="Y",-5]) X_bar_N_H <- colMeans(X_st[hof$HOF=="N",-5]) S_Y.H <- var(X_st[hof$HOF=="Y",-5]) S_N.H <- var(X_st[hof$HOF=="N",-5]) S_pold_H<- (((n_k[2]-1)*S_Y.H)+((n_k[1]-1)*S_N.H))/(n_k[2]+n_k[1]-2) T2_H<- t(X_bar_Y_H - X_bar_N_H) %*% solve(S_pold_H*((1/n_k[2])+(1/n_k[1]))) %*% (X_bar_Y_H - X_bar_N_H) F_stat_SLG<- ((n_k[1]+n_k[2]-2-6+1)*(T2_F- T2_H))/(n_k[1]+n_k[2]-2+ T2_H) ############ X_bar_Y_H <- colMeans(X_st[hof$HOF=="Y",-6]) X_bar_N_H <- colMeans(X_st[hof$HOF=="N",-6]) S_Y.H <- var(X_st[hof$HOF=="Y",-6]) S_N.H <- var(X_st[hof$HOF=="N",-6]) S_pold_H<- (((n_k[2]-1)*S_Y.H)+((n_k[1]-1)*S_N.H))/(n_k[2]+n_k[1]-2) T2_H<- t(X_bar_Y_H - X_bar_N_H) %*% solve(S_pold_H*((1/n_k[2])+(1/n_k[1]))) %*% (X_bar_Y_H - X_bar_N_H) F_stat_OBP<- ((n_k[1]+n_k[2]-2-6+1)*(T2_F- T2_H))/(n_k[1]+n_k[2]-2+ T2_H) # part 6 d X_st[hof$HOF=="Y",]%*%lda_out$scaling X_st[hof$HOF=="N",]%*%lda_out$scaling par(mfrow=c(1,1)) hist(X_st[hof$HOF=="N",]%*%lda_out$scaling,main="HOF",xlab="Discriminant value",col="red",xlim=c(-3,5),probability = T) hist(X_st[hof$HOF=="Y",]%*%lda_out$scaling, xlab="Discriminant value",col="green",add=T,probability = T) legend("topright",c("HOF=N","HOF=Y"),col=c("red","green"),bty="n",pch=c(15,15),pt.bg = c("red","green"),cex=1.2)
/Hmw2.R
no_license
bharathallu/Multivariate-Analysis
R
false
false
16,087
r
# problem 1 x=matrix(c(3,6,4,4,5,7,4,7),nrow=4,byrow = T) #mle of mean vector is xbar. and variance is S. X_bar<-colMeans(x) # mean vector n<-nrow(x) S<- var(x)*((n-1)/n) # Sigma^2/n #problem 2 # a) follows a chi square distribution. with 6 dof Sum of squares of standard normal variables # b) distribution of sqrt(n)(x-mu) follows normal distribution with (0,sigma) # x bar follows normal distribution with (mu, sigma/n) #c) chi square distribution # d) it follows scaled F distribution with 6,54 dof. # problem 3 library(MASS) dta1<-read.csv("C:/Users/jayabharath/Desktop/STAT 636/Hmw2/used_cars.csv",sep=",",header=T) qqnorm(x_lt <- dta1$Age); qqline(x_lt) bc_a<- boxcox(dta1$Age~1) title(main="Age") bc_p<- boxcox(dta1$Price~1) title(main="Price") lam_a <- bc_a$x[which.max(bc_a$y)] # finding lambda for age lam_p <- bc_p$x[which.max(bc_p$y)] # finding lambda for price dta_tr_a <- ((dta1$Age^lam_a)-1)/lam_a # transformation for age dta_tr_p <- ((dta1$Price^lam_p)-1)/lam_p # transformation for price par(mfrow=c(2,2)) hist(dta_tr_a,xlab = "",main="Age") # histograms after transformation. hist(dta_tr_p,xlab = "",main="Price") qqnorm(dta_tr_a,main="Age") # qq plot for age qqline(dta_tr_a) qqnorm(dta_tr_p,main="Price") # qqplot for price qqline(dta_tr_p) hist(dta1$Age) hist(dta1$Price) # multivariate power transformation library(car) summary(powerTransform(cbind(dta1$Age,dta1$Price)~1,)) n<- nrow(dta1) lambda_seq <- seq(from = -2, to = 2, length = 100) obj <- matrix(NA, nrow = 100, ncol = 100) csld <- colSums(log(dta1)) for(i in 1:100) { for(j in 1:100) { X_l <- dta1 lambda <- lambda_seq[c(i, j)] for(k in 1:2) { if(lambda[k] != 0) { X_l[, k] <- (X_l[, k] ^ lambda[k] - 1) / lambda[k] } else { X_l[, k] <- log(X_l[, k]) } } S <- var(X_l) obj[i, j] <- -(n / 2) * log(det(S)) + (lambda - 1) %*% csld } } par(mfrow = c(1, 1)) contour(lambda_seq, lambda_seq, obj, xlab = expression(lambda[1]), ylab = expression(lambda[2]),xlim=c(-2,3),ylim=c(-2,3)) points(1.27, 0.03, pch = 20, cex = 2, col = "red") text(0.3, 0.8, expression(paste(hat(lambda), "' = [1.27, 0.031]", sep = "")), lwd = 2) lambda_seq[which(obj==max(obj),arr.ind=TRUE)] dev.off() #problem 4 dta2<-read.csv("C:/STAT 636/Hmw2/sweat.csv",sep=",",header=T) dta2 #qqplot for sweat qqnorm(dta2$Sweat,main="sweat") qqline(dta2$Sweat) hist(dta2$Sweat) #qqplot for sodium qqnorm(dta2$Sodium,main="sodium") qqline(dta2$Sodium) hist(dta2$Sodium) #qqplot for potassium qqnorm(dta2$Potassium,main="potassium") qqline(dta2$Potassium) hist(dta2$Potassium) #pairwise scatter plots pairs(dta2) #b center<- colMeans(dta2) sigma_s<- var(dta2) eig<-eigen(sigma_s) eis<- eigen(solve(sigma_s)) # axes and half lengths pri_axis<-eig$vectors[,1];pri_hl<- sqrt(eig$values[1])*sqrt(ncol(dta2)*(nrow(dta2)-1)/(nrow(dta2)*(nrow(dta2)-ncol(dta2)))*qf(.95,ncol(dta2),nrow(dta2)-ncol(dta2))) sec_axis<-eig$vectors[,2];sec_hl<- sqrt(eig$values[2])*sqrt(ncol(dta2)*(nrow(dta2)-1)/(nrow(dta2)*(nrow(dta2)-ncol(dta2)))*qf(.95,ncol(dta2),nrow(dta2)-ncol(dta2))) ter_axis<-eig$vectors[,3];ter_hl<- sqrt(eig$values[3])*sqrt(ncol(dta2)*(nrow(dta2)-1)/(nrow(dta2)*(nrow(dta2)-ncol(dta2)))*qf(.95,ncol(dta2),nrow(dta2)-ncol(dta2))) pri_axis;pri_hl sec_axis;sec_hl ter_axis;ter_hl # partc a1=c(1,0,0) a<- a1%*%center b<- sqrt((t(a1)%*%sigma_s%*%a1)/nrow(dta2)) c<- (((nrow(dta2)-1)*ncol(dta2))/(nrow(dta2)-ncol(dta2)))*qf(.95,ncol(dta2),nrow(dta2)-ncol(dta2)) conf.intrvl_sw<- cat(a-(b*sqrt(c)),a+(b*sqrt(c))) # ci for sweat a2=c(0,1,0) a_s<- a2%*%center b_s<- sqrt((t(a2)%*%sigma_s%*%a2)/nrow(dta2)) c<- (nrow(dta2)-1)*ncol(dta2)/(nrow(dta2)-ncol(dta2))*qf(.95,ncol(dta2),nrow(dta2)-ncol(dta2)) conf.intrvl_sw<- cat(a_s-(b_s*sqrt(c)),a_s+(b_s*sqrt(c))) # ci for sodium a3=c(0,0,1) a_k<- a3%*%center b_k<- sqrt((t(a3)%*%sigma_s%*%a3)/nrow(dta2)) c<- (nrow(dta2)-1)*ncol(dta2)/(nrow(dta2)-ncol(dta2))*qf(.95,ncol(dta2),nrow(dta2)-ncol(dta2)) conf.intrvl_sw<- cat(a_k-(b_k*sqrt(c)),a_k+(b_k*sqrt(c))) # ci for potassium #partd bonferroni intervals # bonferroni interval for sweat bonf.CI_sw<- cat((center[1]+(qt(.05/6,19)*sqrt(sigma_s[1,1]/nrow(dta2)))),center[1]-(qt(.05/6,19)*sqrt(sigma_s[1,1]/nrow(dta2)))) #bonferroni interval for sodium bonf.CI_na<- cat((center[2]+(qt(.05/6,19)*sqrt(sigma_s[2,2]/nrow(dta2)))),center[2]-(qt(.05/6,19)*sqrt(sigma_s[2,2]/nrow(dta2)))) #bonferroni interval for potassium bonf.CI_k<- cat((center[3]+(qt(.05/6,19)*sqrt(sigma_s[3,3]/nrow(dta2)))),center[3]-(qt(.05/6,19)*sqrt(sigma_s[3,3]/nrow(dta2)))) # part E library(Hotelling) library(ICSNP) HotellingsT2(dta2,mu=c(4,45,10)) # part f mu<- c(4,45,10) tstat<- sqrt(nrow(dta2)*(t(center-mu)%*%solve(sigma_s)%*%(center-mu))) tstat cricval<- sqrt((nrow(dta2)-1)*ncol(dta2)/(nrow(dta2)-ncol(dta2))*qf(.95,ncol(dta2),nrow(dta2)-ncol(dta2))) cricval tstat<cricval # returns True, therefore point lies inside ellipse and is within 95% confidence interval # This is in line with the result of hypothesis test. pval<-pf(tstat*(nrow(dta2)-ncol(dta2))/((nrow(dta2)-1)*ncol(dta2)),ncol(dta2),(nrow(dta2)-ncol(dta2))) 1-pval 1-pval>.05 # returns true. Therefore we fail to reject null that mean vector is (4,45,10) # part G ## A function to compute T2. n <- 20 B <- 500 T2_f <- function(X, mu_0) { ## The covariance matrices under the null and unrestricted scenarios. S <- var(X) S_0 <- (t(X) - mu_0) %*% t(t(X) - mu_0) / (n - 1) # Compute T2 if the sample covariance matrices are non-singular. T2 <- NA if(det(S) > 0 & det(S_0) > 0) { Lambda <- (det(S) / det(S_0)) ^ (n / 2) T2 <- Lambda } return(T2) } sim_f <- function(mu, mu_0 = c(4,45,10)) { ## Simulate a sample from the multivariate t. X <- dta2 ## Observed value of T2. T2_0 <- T2_f(X, mu_0) T2_0_scaled <- (n - p) / ((n - 1) * p) * T2_0 set.seed(101) T2_b <- rep(NA, B) X_0 <- t(t(X) - colMeans(X) + mu_0) for(b in 1:B) { ii <- sample(1:n, replace = TRUE) T2_b[b] <- T2_f(X_0[ii, ], mu_0) } T2_b_scaled <- (n - p) / ((n - 1) * p) * T2_b p_value_boot <- mean(T2_b_scaled >= T2_0_scaled, na.rm = TRUE) return(1-p_value_boot) } # problem 5 dta3<-read.csv("C:/Users/jayabharath/Desktop/STAT 636/Hmw2/peanut.csv",sep=",",header=T) names(dta3) attach(dta3) Location <- as.character(Location) Variety <- as.character(Variety) x_1<- dta3[Location==1,-2] x_2<- dta3[Location==2,-2] x1_bar<- colMeans(x_1[,2:4]) x2_bar<- colMeans(x_2[,2:4]) x1var<- var(x_1[,2:4]) x2var<- var(x_2[,2:4]) y1<- as.matrix(dta3[,3:5]) factor<- dta3[,1] treat<- as.factor(factor) m1<- manova(y1~treat) summary(m1,test="Wilks") # location effect y2<- as.matrix(dta3[,3:5]) factor1<- dta3[,2] treat1<- as.factor(factor1) m2<- manova(y2~treat1) summary(m2,test="Wilks") # there is effect of variety. #two way MANOVA y1 m3<- manova(y1~treat+treat1+treat*treat1) summary(m3,test="Wilks") # no interaction but there is factor 1 and factor 2 effect. #5 b n <- 2 p <- 3 g <- 2 b <- 3 attach(dta3) ## Summary statistics. x_bar <- colMeans(dta3[, 3:5]) x_bar_lk <- rbind(colMeans(dta3[Location == 1 & Variety == 5, 3:5]), colMeans(dta3[Location == 2 & Variety == 5, 3:5]), colMeans(dta3[Location == 1 & Variety == 6, 3:5]), colMeans(dta3[Location == 2 & Variety == 6, 3:5]), colMeans(dta3[Location == 1 & Variety == 8, 3:5]), colMeans(dta3[Location == 2 & Variety == 8, 3:5])) x_bar_l_dot <- rbind(colMeans(dta3[Location == 1, 3:5]), colMeans(dta3[Location == 2, 3:5])) x_bar_dot_k <- rbind(colMeans(dta3[Variety == 5, 3:5]), colMeans(dta3[Variety == 6, 3:5]),colMeans(dta3[Variety == 8, 3:5])) ## Components for MANOVA. SSP_cor <- SSP_fac_1 <- SSP_fac_2 <- SSP_int <- SSP_res <- matrix(0, nrow = p, ncol = p) for(l in 1:g) { SSP_fac_1 <- SSP_fac_1 + b * n * t(x_bar_l_dot[l, , drop = FALSE] - x_bar) %*% (x_bar_l_dot[l, , drop = FALSE] - x_bar) } for(k in 1:b){ SSP_fac_2 <- SSP_fac_2 + g * n * t(x_bar_dot_k[k, , drop = FALSE] - x_bar) %*% (x_bar_dot_k[k, , drop = FALSE] - x_bar) } for(k in 1:b) { for(l in 1:g){ SSP_int <- SSP_int + n * t(x_bar_lk[(k - 1) * 2 + l, , drop = FALSE] - x_bar_l_dot[l, , drop = FALSE] - x_bar_dot_k[k, , drop = FALSE] + x_bar) %*% (x_bar_lk[(k - 1) * 2 + l, , drop = FALSE] - x_bar_l_dot[l, , drop = FALSE] - x_bar_dot_k[k, , drop = FALSE] + x_bar) } } for(l in 1:g) { for(k in 1:b) { for(r in 1:n){ SSP_res <- SSP_res + t(as.matrix(dta3[(l - 1) * 3 * n + (k - 1) * n + r, 3:5]) - x_bar_lk[(l - 1) * 3 + k, , drop = FALSE]) %*% (as.matrix(dta3[(l - 1) * 3 * n + (k - 1) * n + r, 3:5]) - x_bar_lk[(l - 1) * 3 + k, , drop = FALSE]) SSP_cor <- SSP_cor + t(as.matrix(dta3[(l - 1) * 3 * n + (k - 1) * n + r, 3:5]) - x_bar) %*% (as.matrix(dta3[(l - 1) * 3 * n + (k - 1) * n + r, 3:5]) - x_bar) } } } ## ## Inference. ## ## No interaction. Lambda1 <- det(SSP_res) / det(SSP_int + SSP_res) 1 - pf((((g * b * (n - 1) - p + 1) / 2) / ((abs((g - 1) * (b - 1) - p) + 1) / 2)) * (1 - Lambda1) / Lambda1, abs((g - 1) * (b - 1) - p) + 1, g * b * (n - 1) - p + 1) ## There is an effect of location Lambda2 <- det(SSP_res) / det(SSP_fac_1 + SSP_res) 1 - pf((((g * b * (n - 1) - p + 1) / 2) / ((abs((g - 1) - p) + 1) / 2)) * (1 - Lambda2) / Lambda2, abs((g - 1) - p) + 1, g * b * (n - 1) - p + 1) ## There is an effect of variety. Lambda3 <- det(SSP_res) / det(SSP_fac_2 + SSP_res) 1 - pf((((g * b * (n - 1) - p + 1) / 2) / ((abs((b - 1) - p) + 1) / 2)) * (1 - Lambda3) / Lambda3, abs((b - 1) - p) + 1, g * b * (n - 1) - p + 1) summary(manova(y1~treat+treat1+treat*treat1), test = "Wilks") #6 library(MASS) hof<-read.csv("C:/Users/jayabharath/Desktop/STAT 636/Hmw2/hof_data.csv",sep=",",header=T) num_vars <- c("H", "HR", "RBI", "AVG", "SLG", "OBP") X <- as.matrix(hof[, num_vars]) # extracting those variables X_st <- scale(X, center = TRUE, scale = TRUE) #standardising DTA_st <- data.frame(hof$HOF, X_st) colnames(DTA_st) <- c("HOF", num_vars) # part-a lda_out <- lda(HOF ~ H + HR + RBI + AVG + SLG + OBP, data = DTA_st) (lda_out$scaling) # coefficients of dicriminants. players in HOF have high H,HR and low RBI,SLG. ld<- X_st%*%lda_out$scaling ld[hof$HOF=="Y"] max(ld[hof$HOF=="N"]) max(ld[hof$HOF=="Y"]) min(ld[hof$HOF=="N"]) min(ld[hof$HOF=="Y"]) # partb t.test(X_st[,1]) nh<- table(hof$HOF) attach(hof) X_bar_Y_H<- mean(X_st[hof$HOF=="Y",1]) X_bar_N_H<- mean(X_st[hof$HOF=="N",1]) s_Y_H<- var(X_st[hof$HOF=="Y",1]) s_N_H<- var(X_st[hof$HOF=="N",1]) s_po<- sqrt((((nh[2]-1)*s_Y_H^2)+ ((nh[1]-1)*s_N_H^2))/(nh[1]+nh[2]-2)) tsta_H<- (X_bar_Y_H - X_bar_N_H)/(s_po*(sqrt((1/nh[1])+(1/nh[2])))) X_bar_Y_HR<- mean(X_st[hof$HOF=="Y",2]) X_bar_N_HR<- mean(X_st[hof$HOF=="N",2]) s_Y_HR<- var(X_st[hof$HOF=="Y",2]) s_N_HR<- var(X_st[hof$HOF=="N",2]) s_po<- sqrt((((nh[2]-1)*s_Y_HR^2)+ ((nh[1]-1)*s_N_HR^2))/(nh[1]+nh[2]-2)) tsta_HR<- (X_bar_Y_HR - X_bar_N_HR)/(s_po*(sqrt((1/nh[1])+(1/nh[2])))) X_bar_Y_RBI<- mean(X_st[hof$HOF=="Y",3]) X_bar_N_RBI<- mean(X_st[hof$HOF=="N",3]) s_Y_RBI<- var(X_st[hof$HOF=="Y",3]) s_N_RBI<- var(X_st[hof$HOF=="N",3]) s_po<- sqrt((((nh[2]-1)*s_Y_RBI^2)+ ((nh[1]-1)*s_N_RBI^2))/(nh[1]+nh[2]-2)) tsta_RBI<- (X_bar_Y_RBI - X_bar_N_RBI)/(s_po*sqrt((1/nh[1])+(1/nh[2]))) X_bar_Y_AVG<- mean(X_st[hof$HOF=="Y",4]) X_bar_N_AVG<- mean(X_st[hof$HOF=="N",4]) s_Y_AVG<- var(X_st[hof$HOF=="Y",4]) s_N_AVG<- var(X_st[hof$HOF=="N",4]) s_po<- sqrt((((nh[2]-1)*s_Y_AVG^2)+ ((nh[1]-1)*s_N_AVG^2))/(nh[1]+nh[2]-2)) tsta_AVG<- (X_bar_Y_AVG - X_bar_N_AVG)/(s_po*sqrt((1/nh[1])+(1/nh[2]))) X_bar_Y_SLG<- mean(X_st[hof$HOF=="Y",5]) X_bar_N_SLG<- mean(X_st[hof$HOF=="N",5]) s_Y_SLG<- var(X_st[hof$HOF=="Y",5]) s_N_SLG<- var(X_st[hof$HOF=="N",5]) s_po<- sqrt((((nh[2]-1)*s_Y_SLG^2)+ ((nh[1]-1)*s_N_SLG^2))/(nh[1]+nh[2]-2)) tsta_SLG<- (X_bar_Y_SLG - X_bar_N_SLG)/(s_po*sqrt((1/nh[1])+(1/nh[2]))) X_bar_Y_OBP<- mean(X_st[hof$HOF=="Y",6]) X_bar_N_OBP<- mean(X_st[hof$HOF=="N",6]) s_Y_OBP<- var(X_st[hof$HOF=="Y",6]) s_N_OBP<- var(X_st[hof$HOF=="N",6]) s_po<- sqrt((((nh[2]-1)*s_Y_OBP^2)+ ((nh[1]-1)*s_N_OBP^2))/(nh[1]+nh[2]-2)) tsta_OBP<- (X_bar_Y_OBP - X_bar_N_OBP)/(s_po*sqrt((1/nh[1])+(1/nh[2]))) # part-c n_k <- table(hof$HOF) X_bar_Y <- colMeans(X_st[hof$HOF=="Y",]) X_bar_N <- colMeans(X_st[hof$HOF=="N",]) S_Y <- var(X_st[hof$HOF=="Y",]) S_N <- var(X_st[hof$HOF=="N",]) X_bar_Y_H <- colMeans(X_st[hof$HOF=="Y",-1]) X_bar_N_H <- colMeans(X_st[hof$HOF=="N",-1]) S_Y.H <- var(X_st[hof$HOF=="Y",-1]) S_N.H <- var(X_st[hof$HOF=="N",-1]) S_pold <- (((n_k[2]-1)*S_Y)+((n_k[1]-1)*S_N))/(n_k[2]+n_k[1]-2) S_pold_H<- (((n_k[2]-1)*S_Y.H)+((n_k[1]-1)*S_N.H))/(n_k[2]+n_k[1]-2) T2_F<- t(X_bar_Y- X_bar_N) %*% solve(S_pold*((1/n_k[2])+(1/n_k[1]))) %*% (X_bar_Y- X_bar_N) T2_H<- t(X_bar_Y_H - X_bar_N_H) %*% solve(S_pold_H*((1/n_k[2])+(1/n_k[1]))) %*% (X_bar_Y_H - X_bar_N_H) # Partial F statistic F_stat_H<- ((n_k[1]+n_k[2]-2-6+1)*(T2_F- T2_H))/(n_k[1]+n_k[2]-2+ T2_H) X_bar_Y_H <- colMeans(X_st[hof$HOF=="Y",-2]) X_bar_N_H <- colMeans(X_st[hof$HOF=="N",-2]) S_Y.H <- var(X_st[hof$HOF=="Y",-2]) S_N.H <- var(X_st[hof$HOF=="N",-2]) S_pold_H<- (((n_k[2]-1)*S_Y.H)+((n_k[1]-1)*S_N.H))/(n_k[2]+n_k[1]-2) T2_H<- t(X_bar_Y_H - X_bar_N_H) %*% solve(S_pold_H*((1/n_k[2])+(1/n_k[1]))) %*% (X_bar_Y_H - X_bar_N_H) F_stat_HR<- ((n_k[1]+n_k[2]-2-6+1)*(T2_F- T2_H))/(n_k[1]+n_k[2]-2+ T2_H) ############ X_bar_Y_H <- colMeans(X_st[hof$HOF=="Y",-3]) X_bar_N_H <- colMeans(X_st[hof$HOF=="N",-3]) S_Y.H <- var(X_st[hof$HOF=="Y",-3]) S_N.H <- var(X_st[hof$HOF=="N",-3]) S_pold_H<- (((n_k[2]-1)*S_Y.H)+((n_k[1]-1)*S_N.H))/(n_k[2]+n_k[1]-2) T2_H<- t(X_bar_Y_H - X_bar_N_H) %*% solve(S_pold_H*((1/n_k[2])+(1/n_k[1]))) %*% (X_bar_Y_H - X_bar_N_H) F_stat_RBI<- ((n_k[1]+n_k[2]-2-6+1)*(T2_F- T2_H))/(n_k[1]+n_k[2]-2+ T2_H) ############ X_bar_Y_H <- colMeans(X_st[hof$HOF=="Y",-4]) X_bar_N_H <- colMeans(X_st[hof$HOF=="N",-4]) S_Y.H <- var(X_st[hof$HOF=="Y",-4]) S_N.H <- var(X_st[hof$HOF=="N",-4]) S_pold_H<- (((n_k[2]-1)*S_Y.H)+((n_k[1]-1)*S_N.H))/(n_k[2]+n_k[1]-2) T2_H<- t(X_bar_Y_H - X_bar_N_H) %*% solve(S_pold_H*((1/n_k[2])+(1/n_k[1]))) %*% (X_bar_Y_H - X_bar_N_H) F_stat_AVG<- ((n_k[1]+n_k[2]-2-6+1)*(T2_F- T2_H))/(n_k[1]+n_k[2]-2+ T2_H) ############ X_bar_Y_H <- colMeans(X_st[hof$HOF=="Y",-5]) X_bar_N_H <- colMeans(X_st[hof$HOF=="N",-5]) S_Y.H <- var(X_st[hof$HOF=="Y",-5]) S_N.H <- var(X_st[hof$HOF=="N",-5]) S_pold_H<- (((n_k[2]-1)*S_Y.H)+((n_k[1]-1)*S_N.H))/(n_k[2]+n_k[1]-2) T2_H<- t(X_bar_Y_H - X_bar_N_H) %*% solve(S_pold_H*((1/n_k[2])+(1/n_k[1]))) %*% (X_bar_Y_H - X_bar_N_H) F_stat_SLG<- ((n_k[1]+n_k[2]-2-6+1)*(T2_F- T2_H))/(n_k[1]+n_k[2]-2+ T2_H) ############ X_bar_Y_H <- colMeans(X_st[hof$HOF=="Y",-6]) X_bar_N_H <- colMeans(X_st[hof$HOF=="N",-6]) S_Y.H <- var(X_st[hof$HOF=="Y",-6]) S_N.H <- var(X_st[hof$HOF=="N",-6]) S_pold_H<- (((n_k[2]-1)*S_Y.H)+((n_k[1]-1)*S_N.H))/(n_k[2]+n_k[1]-2) T2_H<- t(X_bar_Y_H - X_bar_N_H) %*% solve(S_pold_H*((1/n_k[2])+(1/n_k[1]))) %*% (X_bar_Y_H - X_bar_N_H) F_stat_OBP<- ((n_k[1]+n_k[2]-2-6+1)*(T2_F- T2_H))/(n_k[1]+n_k[2]-2+ T2_H) # part 6 d X_st[hof$HOF=="Y",]%*%lda_out$scaling X_st[hof$HOF=="N",]%*%lda_out$scaling par(mfrow=c(1,1)) hist(X_st[hof$HOF=="N",]%*%lda_out$scaling,main="HOF",xlab="Discriminant value",col="red",xlim=c(-3,5),probability = T) hist(X_st[hof$HOF=="Y",]%*%lda_out$scaling, xlab="Discriminant value",col="green",add=T,probability = T) legend("topright",c("HOF=N","HOF=Y"),col=c("red","green"),bty="n",pch=c(15,15),pt.bg = c("red","green"),cex=1.2)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/getVencimentosTitulosPublicos.R \name{getVencimentosTitulosPublicos} \alias{getVencimentosTitulosPublicos} \title{Prepara Dados} \usage{ retorna planilha do tesouro direto } \description{ os dados vem da seguinte url: http://www.tesourotransparente.gov.br/ckan/dataset/taxas-dos-titulos-ofertados-pelo-tesouro-direto Retorna data.frame com as datas de vencimento dos papeis LFT, LTN, NTN-F, NTN-B Principal, NTN-B, NTN-C } \author{ Rodrigo Almeida } \keyword{abertos} \keyword{dados} \keyword{direto,} \keyword{tesouro}
/man/getVencimentosTitulosPublicos.Rd
no_license
lojadedados/rtesourodescomplicado
R
false
true
600
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/getVencimentosTitulosPublicos.R \name{getVencimentosTitulosPublicos} \alias{getVencimentosTitulosPublicos} \title{Prepara Dados} \usage{ retorna planilha do tesouro direto } \description{ os dados vem da seguinte url: http://www.tesourotransparente.gov.br/ckan/dataset/taxas-dos-titulos-ofertados-pelo-tesouro-direto Retorna data.frame com as datas de vencimento dos papeis LFT, LTN, NTN-F, NTN-B Principal, NTN-B, NTN-C } \author{ Rodrigo Almeida } \keyword{abertos} \keyword{dados} \keyword{direto,} \keyword{tesouro}
## Aula 2 GLM library(ggplot2) library(data.table) library(car) x<- rnorm(100, 0, 1) y<- seq(100) sampling <- seq(50, 100) sd_s <- seq(1, 10) cor(x, y) a <- 0 while (abs(cor(x, y)) <= 0.8){ x <- rnorm(100, sample(sampling, 1), 5) y <- rnorm(100, sample(sampling, 1), sample(sd_s, 1)) c <- cor(x, y) if(cor(x, y) > c){ print(c) } } summary(a) plot(x, y) x <- 0 y <- 0 x_1 <- x y_1 <- y x_2 <- x y_2 <- y plot(x_1, y_1) plot(x_2, y_2) ## Aula # a) # file.choose() data data <- fread( "C:\\Users\\fc55066\\Downloads\\Risk_Data_WWDaniel.csv") colnames(data) <- c('subject', 'oxygen', 'sbp', 'choles_tot', 'choles_HDL', 'trig') oxygen <- data[, oxygen] sbp <- data[, sbp] choles_tot <- data[, choles_tot] choles_HDL <- data[, choles_HDL] trig <- data[, trig] # b) cor_1 <- cor(oxygen, sbp) cor_2 <- cor(oxygen, choles_tot) cor_3 <- cor(oxygen, choles_HDL) cor_4 <- cor(oxygen, trig) correlations <- c(cor_1, cor_2, cor_3, cor_4) ; correlations par(mfrow = c(2, 2)) plot(sbp, oxygen, col = 'blue') plot(choles_tot, oxygen, col = 'red') plot(choles_HDL, oxygen, col = 'orange') plot(trig, oxygen) # c) model <- lm(data = data, formula = oxygen ~ choles_HDL) summary(model) # d) aov_mod <- anova(model) cat('F value is',aov_mod$`F value`, 'and so with p value = ', round(aov_mod$`Pr(>F)`, 1), 'we reject H0 and conclude that beta 1 is differente from 0') ## fitted model is oxygen = 16.307 + 0.3715*choles_HDL confint(model) ggplot(data, aes(choles_HDL, oxygen)) + geom_point(position = 'jitter', pch = 1) + geom_abline(slope = model$coefficients[2], intercept = model$coefficients[1], colour = 'blue') summary(model$res) par(mfrow = c(3, 2)) qqnorm(model$res) qqline(model$res) hist(model$res) qqPlot(model$res) boxplot(model$res, horizontal = T) plot(choles_HDL, model$res) abline(h = 0) plot(model$fitted.values, scale(model$residuals)) abline(h = 0) plot(model)
/GLM- aula- 2.R
no_license
martim98/aulas_2_semestre
R
false
false
2,098
r
## Aula 2 GLM library(ggplot2) library(data.table) library(car) x<- rnorm(100, 0, 1) y<- seq(100) sampling <- seq(50, 100) sd_s <- seq(1, 10) cor(x, y) a <- 0 while (abs(cor(x, y)) <= 0.8){ x <- rnorm(100, sample(sampling, 1), 5) y <- rnorm(100, sample(sampling, 1), sample(sd_s, 1)) c <- cor(x, y) if(cor(x, y) > c){ print(c) } } summary(a) plot(x, y) x <- 0 y <- 0 x_1 <- x y_1 <- y x_2 <- x y_2 <- y plot(x_1, y_1) plot(x_2, y_2) ## Aula # a) # file.choose() data data <- fread( "C:\\Users\\fc55066\\Downloads\\Risk_Data_WWDaniel.csv") colnames(data) <- c('subject', 'oxygen', 'sbp', 'choles_tot', 'choles_HDL', 'trig') oxygen <- data[, oxygen] sbp <- data[, sbp] choles_tot <- data[, choles_tot] choles_HDL <- data[, choles_HDL] trig <- data[, trig] # b) cor_1 <- cor(oxygen, sbp) cor_2 <- cor(oxygen, choles_tot) cor_3 <- cor(oxygen, choles_HDL) cor_4 <- cor(oxygen, trig) correlations <- c(cor_1, cor_2, cor_3, cor_4) ; correlations par(mfrow = c(2, 2)) plot(sbp, oxygen, col = 'blue') plot(choles_tot, oxygen, col = 'red') plot(choles_HDL, oxygen, col = 'orange') plot(trig, oxygen) # c) model <- lm(data = data, formula = oxygen ~ choles_HDL) summary(model) # d) aov_mod <- anova(model) cat('F value is',aov_mod$`F value`, 'and so with p value = ', round(aov_mod$`Pr(>F)`, 1), 'we reject H0 and conclude that beta 1 is differente from 0') ## fitted model is oxygen = 16.307 + 0.3715*choles_HDL confint(model) ggplot(data, aes(choles_HDL, oxygen)) + geom_point(position = 'jitter', pch = 1) + geom_abline(slope = model$coefficients[2], intercept = model$coefficients[1], colour = 'blue') summary(model$res) par(mfrow = c(3, 2)) qqnorm(model$res) qqline(model$res) hist(model$res) qqPlot(model$res) boxplot(model$res, horizontal = T) plot(choles_HDL, model$res) abline(h = 0) plot(model$fitted.values, scale(model$residuals)) abline(h = 0) plot(model)
\docType{data} \name{pisa.psa.cols} \alias{pisa.psa.cols} \title{Character vector representing the list of covariates used for estimating propensity scores.} \format{a character vector with covariate names for estimating propensity scores.} \description{ Character vector representing the list of covariates used for estimating propensity scores. } \keyword{datasets}
/man/pisa.psa.cols.Rd
no_license
Libardo1/PSAgraphics2
R
false
false
373
rd
\docType{data} \name{pisa.psa.cols} \alias{pisa.psa.cols} \title{Character vector representing the list of covariates used for estimating propensity scores.} \format{a character vector with covariate names for estimating propensity scores.} \description{ Character vector representing the list of covariates used for estimating propensity scores. } \keyword{datasets}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/explanationObject.R \docType{methods} \name{explainRuleModel,explanationObject,data.frame-method} \alias{explainRuleModel,explanationObject,data.frame-method} \title{Method for explaining a rule model} \usage{ \S4method{explainRuleModel}{explanationObject,data.frame}(theObject, data) } \arguments{ \item{theObject}{an explanation objectg} \item{data}{data to be explained} } \description{ Method for explaining a rule model }
/man/explainRuleModel-explanationObject-data.frame-method.Rd
no_license
jirifilip/arulesExplain
R
false
true
506
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/explanationObject.R \docType{methods} \name{explainRuleModel,explanationObject,data.frame-method} \alias{explainRuleModel,explanationObject,data.frame-method} \title{Method for explaining a rule model} \usage{ \S4method{explainRuleModel}{explanationObject,data.frame}(theObject, data) } \arguments{ \item{theObject}{an explanation objectg} \item{data}{data to be explained} } \description{ Method for explaining a rule model }
source(system.file(file.path('tests', 'test_utils.R'), package = 'nimble')) context("Testing of different Filtering Algorithms") ### particle filter testing follows similar steps to MCMC testing. ### for each example, we comapare filter output between R and C. We also (where applicable) compare: ### 1) estimated values for latent states to true values for both weighted and equally weighted samples ### 2) estimated top level parameter values to known values (Liu-West filter, PMCMC) ### 3) estimated log-likelihood values to known values (for normal transition - observation ### models where LL can be calculated analytically via KF) ### basic scalar latent node example, no top-level params ## code <- nimbleCode({ ## x0 ~ dnorm(0,1) ## x[1] ~ dnorm(x0, 1) ## y[1] ~ dnorm(x[1], var = 2) ## for(i in 2:3) { ## x[i] ~ dnorm(x[i-1], 1) ## y[i] ~ dnorm(x[i], var = 2) ## } ## }) ## testdata = list(y = c(0,1,2)) ## inits = list(x0 = 0) ## ### ll, means, vars calculated from FKF R packgae ## ActualLL <- -5.08 ## test_filter(model = code, name = 'basic bootstrap, always resamp', data = testdata, filterType = "bootstrap", latentNodes = "x", ## filterControl = list(thresh = 1, saveAll = TRUE), ## inits = inits, ## results = list(mean = list(x = c(0,0.454,1.209)), ## var = list(x = c(.667, .909, .977)), ## ll = list(ActualLL)), ## resultsTolerance = list(mean = list(x = rep(.2,3)), ## var = list(x = rep(.2,3)), ## ll = list(2))) ## test_filter(model = code, name = 'basic auxiliary', data = testdata, filterType = "auxiliary", latentNodes = "x", ## inits = inits, ## results = list(mean = list(x = c(0,0.454,1.209)), ## var = list(x = c(.667, .909, .977)), ## ll = list(ActualLL)), ## resultsTolerance = list(mean = list(x = rep(.2,3)), ## var = list(x = rep(.2,3)), ## ll = list(2))) ## test_filter(model = code, name = 'basic auxiliary w/ mean lookahead', data = testdata, filterType = "auxiliary", ## latentNodes = "x", filterControl = list(lookahead = "mean", saveAll = TRUE), ## inits = inits, ## results = list(mean = list(x = c(0,0.454,1.209)), ## var = list(x = c(.667, .909, .977)), ## ll = list(ActualLL)), ## resultsTolerance = list(mean = list(x = rep(.2,3)), ## var = list(x = rep(.2,3)), ## ll = list(2))) ## test_filter(model = code, name = 'basic ensembleKF', data = testdata, filterType = "ensembleKF", latentNodes = "x", ## inits = inits, ## results = list(mean = list(x = c(0,0.454,1.209)), ## var = list(x = c(.667, .909, .977))), ## resultsTolerance = list(mean = list(x = rep(.2,3)), ## var = list(x = rep(.2,3)))) ### multivariate latent node and data node example, no top-level params code <- nimbleCode({ x[1,1:3] ~ dmnorm(x0[1:3], cov = xCov[1:3, 1:3]) yMean[1,1:2] <- obsMat[1:2,1:3]%*%x[1,1:3] y[1,1:2] ~ dmnorm(yMean[1,1:2], cov = yCov[1:2,1:2]) for(i in 2:3) { prevX[i,1:3] <- x[i-1,1:3] x[i,1:3] ~ dmnorm(prevX[i,1:3] , cov = xCov[1:3, 1:3]) yMean[i,1:2] <- obsMat[1:2,1:3]%*%x[i,1:3] y[i,1:2] ~ dmnorm(yMean[i,1:2], cov = yCov[1:2,1:2]) } }) testdata = list(y = matrix(c(0, 1, 1, 2, 2, 3), nrow = 3, byrow = TRUE), obsMat = matrix(c(1,0,0, 0,1,1), nrow = 2, byrow = TRUE), x0 = c(1,1,1), xCov = matrix(c(1,0,0, 0,2,1, 0,1,4), nrow = 3, byrow = TRUE), yCov = matrix(c(.5, .1, .1, 1), nrow = 2, byrow = TRUE)) xFilter <- matrix(c(.323,1.02,1.03, .819, .991, .985, .946, 1.333, 1.556), nrow = 3, byrow = T) xFilterTol <- matrix(1.2, nrow = 3, ncol = 3) ActualLL <- -10.235 ## test_filter(model = code, name = 'multivariate bootstrap, always resamp', data = testdata, filterType = "bootstrap", latentNodes = "x", ## filterControl = list(thresh = 1, saveAll = TRUE, timeIndex = 1), ## results = list(mean = list(x = xFilter), ## ll = list(ActualLL)), ## resultsTolerance = list(mean = list(x = xFilterTol), ## ll = list(2))) ## test_filter(model = code, name = 'multivariate auxiliary', data = testdata, filterType = "auxiliary", latentNodes = "x", ## filterControl = list(saveAll = TRUE, timeIndex = 1), ## results = list(mean = list(x = xFilter), ## ll = list(ActualLL)), ## resultsTolerance = list(mean = list(x = xFilterTol), ## ll = list(2))) ## ## On Windows the next test can create a DLL name conflict and look ## ## up the wrong C++ class, from a previous DLL. Hence this will be the break ## ## into two windows test units ## if(.Platform$OS.type == 'windows') { ## message("Stopping filtering test here on Windows to avoid multiple DLL problems. Run test-filtering2 to continue") ## stop() ## } test_filter(model = code, name = 'multivariate auxiliary mean lookahead', data = testdata, filterType = "auxiliary", latentNodes = "x", filterControl = list(saveAll = TRUE, timeIndex = 1, lookahead = "mean"), results = list(mean = list(x = xFilter), ll = list(ActualLL)), resultsTolerance = list(mean = list(x = xFilterTol), ll = list(2))) test_filter(model = code, name = 'multivariate ensembleKF', data = testdata, filterType = "ensembleKF", latentNodes = "x", filterControl = list(timeIndex = 1, saveAll = F), results = list(mean = list(x = xFilter[3,])), resultsTolerance = list(mean = list(x = xFilterTol[3,]))) ### scalar latent node and data node example, two top-level params code <- nimbleCode({ x[1] ~ dnorm(mean = mu0, sd = sigma_x); y[1] ~ dnorm(x[1], sd=sigma_y); for(i in 2:N){ x[i] ~ dnorm(mean = x[i-1], sd = sigma_x); y[i] ~ dnorm(mean = x[i], sd=sigma_y); } sigma_x ~ T(dnorm(1, sd = .5), 0,); sigma_y ~ T(dnorm(.1, sd = .5), 0,); mu0 <- 0 }) set.seed(0) N <- 5 sigma_x <- 1 sigma_y <- .1 x <- rep(NA, N) y <- x x[1] <- rnorm(1,0,sigma_x) y[1] <- rnorm(1,x[1], sigma_y) for(i in 2:N){ x[i] <- rnorm(1,x[i-1], sigma_x) y[i] <- rnorm(1,x[i], sigma_y) } consts <- list(N=N) testdata <- list(y=y) inits <- list(sigma_x=1, sigma_y=.1, x = x) test_filter(model = code, name = 'scalar lwf', inits = inits, data = c(testdata, consts), filterType = "LiuWest", latentNodes = "x", results = list( mean = list(x = x, sigma_x = sigma_x, sigma_y = sigma_y)), resultsTolerance = list(mean = list(x = rep(1,N), sigma_x = .5, sigma_y = .5))) test_mcmc(model = code, name = 'scalar pmcmc', inits = inits, data = c(testdata, consts), samplers = list( list(type = 'RW_PF', target = 'sigma_x', control = list(latents = 'x', m = 1000, resample = F)), list(type = 'RW_PF', target = 'sigma_y', control = list(latents = 'x', m = 1000, resample = F))), removeAllDefaultSamplers = TRUE, numItsC = 1000, results = list( mean = list( sigma_x = sigma_x, sigma_y = sigma_y)), resultsTolerance = list(mean = list(sigma_x = .5, sigma_y = .5))) test_mcmc(model = code, name = 'block pmcmc', inits = inits, data = c(testdata, consts), samplers = list( list(type = 'RW_PF_block', target = c('sigma_x', 'sigma_y'), control = list(latents = 'x', m = 1000, resample = F))), removeAllDefaultSamplers = TRUE, numItsC = 1000, results = list( mean = list(sigma_x = sigma_x, sigma_y = sigma_y)), resultsTolerance = list(mean = list(sigma_x = .5, sigma_y = .5))) ## Let's stop here to save testing time ## # test MCMC with longer runs and lower tolerance ## set.seed(0) ## N <- 50 ## sigma_x <- 1 ## sigma_y <- .1 ## x <- rep(NA, N) ## y <- x ## x[1] <- rnorm(1,0,sigma_x) ## y[1] <- rnorm(1,x[1], sigma_y) ## for(i in 2:N){ ## x[i] <- rnorm(1,x[i-1], sigma_x) ## y[i] <- rnorm(1,x[i], sigma_y) ## } ## consts <- list(N=N) ## testdata <- list(y=y) ## inits <- list(sigma_x=1, sigma_y=.1, x = x) ## test_mcmc(model = code, name = 'scalar pmcmc, more data', inits = inits, data = c(testdata, consts), basic = FALSE, samplers = list( ## list(type = 'RW_PF', target = 'sigma_x', control = list(latents = 'x', m = 1000, resample = FALSE)), ## list(type = 'RW_PF', target = 'sigma_y', control = list(latents = 'x', m = 1000, resample = FALSE))), ## removeAllDefaultSamplers = TRUE, numItsC = 1000, numItsC_results = 5000, results = list( ## mean = list( sigma_x = sigma_x, ## sigma_y = sigma_y)), ## resultsTolerance = list(mean = list(sigma_x = .1, ## sigma_y = .1))) ## test_mcmc(model = code, name = 'block pmcmc, more data', inits = inits, data = c(testdata, consts), basic = FALSE, samplers = list( ## list(type = 'RW_PF_block', target = c('sigma_x', 'sigma_y'), control = list(latents = 'x', m = 1000, resample = FALSE))), ## removeAllDefaultSamplers = TRUE, numItsC = 1000, numItsC_results = 5000, results = list( ## mean = list(sigma_x = sigma_x, ## sigma_y = sigma_y)), ## resultsTolerance = list(mean = list(sigma_x = .1, ## sigma_y = .1)))
/packages/nimble/inst/tests/test-Filtering2.R
no_license
clarkfitzg/nimble
R
false
false
10,111
r
source(system.file(file.path('tests', 'test_utils.R'), package = 'nimble')) context("Testing of different Filtering Algorithms") ### particle filter testing follows similar steps to MCMC testing. ### for each example, we comapare filter output between R and C. We also (where applicable) compare: ### 1) estimated values for latent states to true values for both weighted and equally weighted samples ### 2) estimated top level parameter values to known values (Liu-West filter, PMCMC) ### 3) estimated log-likelihood values to known values (for normal transition - observation ### models where LL can be calculated analytically via KF) ### basic scalar latent node example, no top-level params ## code <- nimbleCode({ ## x0 ~ dnorm(0,1) ## x[1] ~ dnorm(x0, 1) ## y[1] ~ dnorm(x[1], var = 2) ## for(i in 2:3) { ## x[i] ~ dnorm(x[i-1], 1) ## y[i] ~ dnorm(x[i], var = 2) ## } ## }) ## testdata = list(y = c(0,1,2)) ## inits = list(x0 = 0) ## ### ll, means, vars calculated from FKF R packgae ## ActualLL <- -5.08 ## test_filter(model = code, name = 'basic bootstrap, always resamp', data = testdata, filterType = "bootstrap", latentNodes = "x", ## filterControl = list(thresh = 1, saveAll = TRUE), ## inits = inits, ## results = list(mean = list(x = c(0,0.454,1.209)), ## var = list(x = c(.667, .909, .977)), ## ll = list(ActualLL)), ## resultsTolerance = list(mean = list(x = rep(.2,3)), ## var = list(x = rep(.2,3)), ## ll = list(2))) ## test_filter(model = code, name = 'basic auxiliary', data = testdata, filterType = "auxiliary", latentNodes = "x", ## inits = inits, ## results = list(mean = list(x = c(0,0.454,1.209)), ## var = list(x = c(.667, .909, .977)), ## ll = list(ActualLL)), ## resultsTolerance = list(mean = list(x = rep(.2,3)), ## var = list(x = rep(.2,3)), ## ll = list(2))) ## test_filter(model = code, name = 'basic auxiliary w/ mean lookahead', data = testdata, filterType = "auxiliary", ## latentNodes = "x", filterControl = list(lookahead = "mean", saveAll = TRUE), ## inits = inits, ## results = list(mean = list(x = c(0,0.454,1.209)), ## var = list(x = c(.667, .909, .977)), ## ll = list(ActualLL)), ## resultsTolerance = list(mean = list(x = rep(.2,3)), ## var = list(x = rep(.2,3)), ## ll = list(2))) ## test_filter(model = code, name = 'basic ensembleKF', data = testdata, filterType = "ensembleKF", latentNodes = "x", ## inits = inits, ## results = list(mean = list(x = c(0,0.454,1.209)), ## var = list(x = c(.667, .909, .977))), ## resultsTolerance = list(mean = list(x = rep(.2,3)), ## var = list(x = rep(.2,3)))) ### multivariate latent node and data node example, no top-level params code <- nimbleCode({ x[1,1:3] ~ dmnorm(x0[1:3], cov = xCov[1:3, 1:3]) yMean[1,1:2] <- obsMat[1:2,1:3]%*%x[1,1:3] y[1,1:2] ~ dmnorm(yMean[1,1:2], cov = yCov[1:2,1:2]) for(i in 2:3) { prevX[i,1:3] <- x[i-1,1:3] x[i,1:3] ~ dmnorm(prevX[i,1:3] , cov = xCov[1:3, 1:3]) yMean[i,1:2] <- obsMat[1:2,1:3]%*%x[i,1:3] y[i,1:2] ~ dmnorm(yMean[i,1:2], cov = yCov[1:2,1:2]) } }) testdata = list(y = matrix(c(0, 1, 1, 2, 2, 3), nrow = 3, byrow = TRUE), obsMat = matrix(c(1,0,0, 0,1,1), nrow = 2, byrow = TRUE), x0 = c(1,1,1), xCov = matrix(c(1,0,0, 0,2,1, 0,1,4), nrow = 3, byrow = TRUE), yCov = matrix(c(.5, .1, .1, 1), nrow = 2, byrow = TRUE)) xFilter <- matrix(c(.323,1.02,1.03, .819, .991, .985, .946, 1.333, 1.556), nrow = 3, byrow = T) xFilterTol <- matrix(1.2, nrow = 3, ncol = 3) ActualLL <- -10.235 ## test_filter(model = code, name = 'multivariate bootstrap, always resamp', data = testdata, filterType = "bootstrap", latentNodes = "x", ## filterControl = list(thresh = 1, saveAll = TRUE, timeIndex = 1), ## results = list(mean = list(x = xFilter), ## ll = list(ActualLL)), ## resultsTolerance = list(mean = list(x = xFilterTol), ## ll = list(2))) ## test_filter(model = code, name = 'multivariate auxiliary', data = testdata, filterType = "auxiliary", latentNodes = "x", ## filterControl = list(saveAll = TRUE, timeIndex = 1), ## results = list(mean = list(x = xFilter), ## ll = list(ActualLL)), ## resultsTolerance = list(mean = list(x = xFilterTol), ## ll = list(2))) ## ## On Windows the next test can create a DLL name conflict and look ## ## up the wrong C++ class, from a previous DLL. Hence this will be the break ## ## into two windows test units ## if(.Platform$OS.type == 'windows') { ## message("Stopping filtering test here on Windows to avoid multiple DLL problems. Run test-filtering2 to continue") ## stop() ## } test_filter(model = code, name = 'multivariate auxiliary mean lookahead', data = testdata, filterType = "auxiliary", latentNodes = "x", filterControl = list(saveAll = TRUE, timeIndex = 1, lookahead = "mean"), results = list(mean = list(x = xFilter), ll = list(ActualLL)), resultsTolerance = list(mean = list(x = xFilterTol), ll = list(2))) test_filter(model = code, name = 'multivariate ensembleKF', data = testdata, filterType = "ensembleKF", latentNodes = "x", filterControl = list(timeIndex = 1, saveAll = F), results = list(mean = list(x = xFilter[3,])), resultsTolerance = list(mean = list(x = xFilterTol[3,]))) ### scalar latent node and data node example, two top-level params code <- nimbleCode({ x[1] ~ dnorm(mean = mu0, sd = sigma_x); y[1] ~ dnorm(x[1], sd=sigma_y); for(i in 2:N){ x[i] ~ dnorm(mean = x[i-1], sd = sigma_x); y[i] ~ dnorm(mean = x[i], sd=sigma_y); } sigma_x ~ T(dnorm(1, sd = .5), 0,); sigma_y ~ T(dnorm(.1, sd = .5), 0,); mu0 <- 0 }) set.seed(0) N <- 5 sigma_x <- 1 sigma_y <- .1 x <- rep(NA, N) y <- x x[1] <- rnorm(1,0,sigma_x) y[1] <- rnorm(1,x[1], sigma_y) for(i in 2:N){ x[i] <- rnorm(1,x[i-1], sigma_x) y[i] <- rnorm(1,x[i], sigma_y) } consts <- list(N=N) testdata <- list(y=y) inits <- list(sigma_x=1, sigma_y=.1, x = x) test_filter(model = code, name = 'scalar lwf', inits = inits, data = c(testdata, consts), filterType = "LiuWest", latentNodes = "x", results = list( mean = list(x = x, sigma_x = sigma_x, sigma_y = sigma_y)), resultsTolerance = list(mean = list(x = rep(1,N), sigma_x = .5, sigma_y = .5))) test_mcmc(model = code, name = 'scalar pmcmc', inits = inits, data = c(testdata, consts), samplers = list( list(type = 'RW_PF', target = 'sigma_x', control = list(latents = 'x', m = 1000, resample = F)), list(type = 'RW_PF', target = 'sigma_y', control = list(latents = 'x', m = 1000, resample = F))), removeAllDefaultSamplers = TRUE, numItsC = 1000, results = list( mean = list( sigma_x = sigma_x, sigma_y = sigma_y)), resultsTolerance = list(mean = list(sigma_x = .5, sigma_y = .5))) test_mcmc(model = code, name = 'block pmcmc', inits = inits, data = c(testdata, consts), samplers = list( list(type = 'RW_PF_block', target = c('sigma_x', 'sigma_y'), control = list(latents = 'x', m = 1000, resample = F))), removeAllDefaultSamplers = TRUE, numItsC = 1000, results = list( mean = list(sigma_x = sigma_x, sigma_y = sigma_y)), resultsTolerance = list(mean = list(sigma_x = .5, sigma_y = .5))) ## Let's stop here to save testing time ## # test MCMC with longer runs and lower tolerance ## set.seed(0) ## N <- 50 ## sigma_x <- 1 ## sigma_y <- .1 ## x <- rep(NA, N) ## y <- x ## x[1] <- rnorm(1,0,sigma_x) ## y[1] <- rnorm(1,x[1], sigma_y) ## for(i in 2:N){ ## x[i] <- rnorm(1,x[i-1], sigma_x) ## y[i] <- rnorm(1,x[i], sigma_y) ## } ## consts <- list(N=N) ## testdata <- list(y=y) ## inits <- list(sigma_x=1, sigma_y=.1, x = x) ## test_mcmc(model = code, name = 'scalar pmcmc, more data', inits = inits, data = c(testdata, consts), basic = FALSE, samplers = list( ## list(type = 'RW_PF', target = 'sigma_x', control = list(latents = 'x', m = 1000, resample = FALSE)), ## list(type = 'RW_PF', target = 'sigma_y', control = list(latents = 'x', m = 1000, resample = FALSE))), ## removeAllDefaultSamplers = TRUE, numItsC = 1000, numItsC_results = 5000, results = list( ## mean = list( sigma_x = sigma_x, ## sigma_y = sigma_y)), ## resultsTolerance = list(mean = list(sigma_x = .1, ## sigma_y = .1))) ## test_mcmc(model = code, name = 'block pmcmc, more data', inits = inits, data = c(testdata, consts), basic = FALSE, samplers = list( ## list(type = 'RW_PF_block', target = c('sigma_x', 'sigma_y'), control = list(latents = 'x', m = 1000, resample = FALSE))), ## removeAllDefaultSamplers = TRUE, numItsC = 1000, numItsC_results = 5000, results = list( ## mean = list(sigma_x = sigma_x, ## sigma_y = sigma_y)), ## resultsTolerance = list(mean = list(sigma_x = .1, ## sigma_y = .1)))
###-------ECONOMIC MANAGEMENT APPROVAL--### ###-----------GENERAL SETUP-------------### ###-------------Mats Ahrenshop----------### setwd("C:\\Users\\User\\Downloads") pe <- read.csv2("PHE.csv") pe$date <- ISOdate(pe$Year, pe$Month, pe$Day) pe$date <- as.Date(pe$date) ## The Problem: Aggregating six time series plot(x = pe$Appr[pe$House == "Gallup"], axes = F, ylab = "%", ylim = c(15, 85), xlab = "Day", type = "l", lwd = 2, col = "black", main = "Approval of Presidential economic management") axis(1, at = c(1:894), labels = pe$date, cex.axis = .7) axis(2) lines(pe$Appr[pe$House == "ABC"], col = "red", lwd = 2) lines(pe$Appr[pe$House == "ABCWP"], col = "blue", lwd = 2) lines(pe$Appr[pe$House == "CBS"], col = "green", lwd = 2) lines(pe$Appr[pe$House == "CBSNYT"], col = "orange", lwd = 2) lines(pe$Appr[pe$House == "LATIMES"], col = "grey", lwd = 2) legend(x = "topright", legend = c("Gallup", "ABC", "ABCWP", "CBS", "CBSNYT", "LATIMES"), col = c("black", "red", "blue", "green", "orange", "grey"), lty = 1, cex = .6) box() ## The Solution: Dyad ratios algorithm using \texttt{extract} function ## obtain via http://stimson.web.unc.edu/software/ ##--------------------------## ### FUNCTIONS FROM STIMSON ### display<-function(out,filename=NULL) { if (is.null(filename)) filename="" d<-out$dimensions p<-out$period m<-out$latent1 if (d==2) m2<-out$latent2 T<-out$T mo=100*(p-as.integer(p)) for (t in 1:T) { yr<-format(as.integer(p[t]),nsmall=0) month<-format(mo[t],digits=2) lat1<-format(m[t],nsmall=3) if (d==1) { cat(c(yr,month,lat1),fill=TRUE,file=filename,append=TRUE) } else { lat2<-format(m2[t],nsmall=3) cat(c(yr,month,lat1,lat2),fill=TRUE,file=filename,append=TRUE) } } } ########################################################################################## plot.Zextract<-function(outobject) { dim<- outobject$dimensions T<- outobject$T vect1<-outobject$latent1 t<-seq(1:T) if (dim>1) { vect2<-outobject$latent2 miny<-min(vect1) if (miny>min(vect2)) miny<-min(vect2) maxy<-max(vect1) if (maxy<max(vect2)) maxy<-max(vect2) dummy<-rep(miny,T-1) #dummy is a fake variable used to reset axes to handle min/max of both series dummy[T]<-maxy leg.text<-c("","Dimension 1","Dimension 2") plot(t,dummy,type="l",lty=0,main="Final Estimation Results: Two Dimensions",xlab="Time Point",ylab="Latent Variables") lines(t,vect1,col=1) lines(t,vect2,col=2) legend(1,maxy,leg.text,col=c(0,1,2),lty=c(0,1,1)) } else { plot(t,vect1,type="l",main="Final Estimation Results",xlab="Time Point",ylab="Latent Variable") if (dim == 2) lines(t,vect2,col=2) } } ########################################################################################## summary.Zextract<- function(outobject) { T=outobject$T nvar=outobject$nvar dim<- outobject$dimensions vn<- c(outobject$varname,"Variable Name") vn<- format(vn,justify="right") nc<- format(outobject$N,justify="right") ld<- format(outobject$loadings1,digits=3,justify="right") mean<- format(outobject$means,digits=6,justify="right") sd<- format(outobject$std.deviations,digits=6,justify="right") cat("Variable Loadings and Descriptive Information: Dimension 1\n") cat(paste(vn[nvar+1],"Cases","Loading"," Mean ","Std Dev","\n")) for (v in 1:nvar) { cat(paste(vn[v]," ",nc[v]," ",ld[v],mean[v],sd[v],"\n")) } if (dim == 2) { ld<- format(outobject$loadings2,digits=3,justify="right") cat("\nVariable Loadings and Descriptive Information: Dimension 2\n") cat(paste(vn[nvar+1],"Cases","Loading"," Mean ","Std Dev","\n")) for (v in 1:nvar) { cat(paste(vn[v]," ",nc[v]," ",ld[v],mean[v],sd[v],"\n")) } } } ########################################################################################## findper<-function(unit,curdate,mind,miny,minper,aggratio) { #returns intFindPer datcurdate<-curdate class(datcurdate)<-"Date" mo <- findmonth(datcurdate) qu <- 1 + as.integer((mo - 1)/3) dy <- findday(datcurdate) yr <- findyear(datcurdate) arinv<- 1/aggratio if (unit == "D") intFindPer <- curdate - mind +1 #curdate - mindate + 1 if (unit == "A" || unit == "O") intFindPer <- as.integer((yr - miny) / aggratio) + 1 if (unit == "Q") part <- qu if (unit == "M") part <- mo if (unit == "Q" || unit == "M") intFindPer <- (yr - miny - 1) * arinv + part + (arinv - (minper - 1)) return(intFindPer) } #findper ########################################################################################## findday<-function(DateVar) { z<-as.POSIXlt(DateVar) v<-unlist(z) findday<-as.integer(v[4]) } #end findday ########################################################################################## findmonth<-function(DateVar) { z<-as.POSIXlt(DateVar) v<-unlist(z) findmonth<-as.integer(v[5])+1 } #end findmonth ########################################################################################## findyear<-function(DateVar) { z<-as.POSIXlt(DateVar) v<-unlist(z) findyear<-as.integer(v[6])+1900 } #end findyear ########################################################################################## aggregate<- function(varname,date,index,ncases,mindate,maxdate,nperiods,nvar,aggratio,unit,miny,minper) { # #READ A NEW RECORD, CALCULATE PERIOD, AND SET UP AGGREGATION INTO MAT.ISSUE[NPERIODS,NVAR] vl<- character(nvar) mind<- as.integer(mindate)/86400 maxd<- as.integer(maxdate)/86400 vfac<- factor(varname) #make a factor vector vlev<- levels(vfac) #find unique categories Mat.Issue<- array(dim=c(nperiods,nvar)) nrec<-length(varname) #added for R compatibility lp<- 0 per<- 0 x<- 0 c<- 0 nkeep<- 0 lv<- "0" for (record in 1:nrec) { # MASTER LOOP THROUGH INPUT DATA, 1 TO NREC if (ncases[record] == 0 || is.na(ncases[record])) ncases[record] <- 1000 mo <- findmonth(date[record]) qu <- 1 + as.integer((mo - 1)/3) dy <- findday(date[record]) yr <- findyear(date[record]) curdate<- as.integer(date[record]) if (curdate >= mind && curdate <= maxd) { #is date within range? nkeep <- nkeep + 1 if (nkeep==1) { #startup routine for first good case firstcase<- TRUE lp <- findper(unit,curdate,mind,miny,minper,aggratio) lv <- varname[record] x <- index[record] * ncases[record] #start new sums for case 1 c <- ncases[record] for (i in 1:nvar) { if (lv==vlev[i]) v=i #determine v by matching to position of labels vector } #end for } else { firstcase<- FALSE } #end if if (firstcase == FALSE) { #skip over the rest for first good case per<- findper(unit,curdate,mind,miny,minper,aggratio) #here we translate date into agg category if ((varname[record] != lv) || (per !=lp)) { #found a new period or variable name if (lp > 0 && lp <= nperiods) { Mat.Issue[lp, v] <- x / c #recompute for either period or var change x<- 0 c<- 0 } if (varname[record] != lv) { #new var only for (i in 1:nvar) { if (varname[record]==vlev[i]) v=i #determine v by matching to position of labels vector } #end for vl[v]<- varname[record] #this will only catch names that have good cases lv<-vl[v] #reassign new varname to lastvar } # new var lp <- findper(unit,curdate,mind,miny,minper,aggratio) x <- index[record] * ncases[record] #start new sums for current case c <- ncases[record] } else { x<- x + index[record] * ncases[record] #a continuing case, increment sums c<- c + ncases[record] } } # end of first case special loop } #end of date test loop } #newrec: next record vl<- vlev #overwrite previous assignment which had good names only agglist<- list(lab=vl,iss=Mat.Issue) return(agglist) #list includes labels and issue matrix } #end aggregate function ########################################################################################## esmooth<- function(mood, fb, alpha){ ########################################################################################## smooth<- function(alpha) { #for time series "series" and alpha "alpha[1]" compute sum of squared forecast error ferror<- numeric(1) T<- length(series) xvect<- numeric(T) xvect[1] <- series[1] for (t in 2:T) { xvect[t] <- alpha[1] * series[t] + (1 - alpha[1]) * xvect[t - 1] } sumsq <- 0 for (t in 3:T) { ferror <- series[t] - xvect[t - 1] sumsq <- sumsq + ferror ^ 2 } return(sumsq) #this is the value of the function for a particular parameter alpha[1] } # END OF FUNCTION SMOOTH ########################################################################################## series<- mood[fb,] #create series to be smoothed sm.out<- optim(c(.75),smooth,method="L-BFGS-B",lower=0.5,upper=1) #call smoother alpha<- sm.out$par #assign result to alpha #NOW SMOOTH USING ALPHA T<- length(series) for (t in 2:T) { mood[fb,t] <- alpha * series[t] + (1 - alpha) * mood[fb,t - 1] } return(alpha) } #END OF FUNCTION ESMOOTH ########################################################################################## residmi<- function(issue,v,mood) { #function regresses issue(v) on mood and then residualizes it o<- lm(issue[,v] ~ mood[3,]) #regress issue on mood to get a,b issue[,v]<- 100 + issue[,v] - (o$coef[1]+o$coef[2]*mood[3,]) #100 + Y - (a+bx) return(issue[,v]) } ########################################################################################## iscorr<- function(issue,mood) { #compute issue-scale correlations Nv<- length(issue[1,]) Np<- length(issue[,1]) Rvector<- numeric(Nv) for (v in 1:Nv) { N<- Np - sum(is.na(issue[,v])) if (N > 1) Rvector[v]<- cor(issue[,v],mood[3,],use="complete.obs",method="pearson") } return(Rvector) } #end function iscorr ########################################################################################## dominate<- function(fb,issue,nperiods,nvar,mood,valid,smoothing,alpha) { nitems<- numeric(nperiods) if (fb==2) alpha1<-alpha if (fb==1) { unexp<-numeric(1) everlap<- integer(1) alpha<- 1 alpha1<- 1 } if (fb == 1) { startper <- 1 mood[fb, startper] <- 100 firstj <- 2 lastj <- nperiods stepj <- 1 jprev <- 1 } else { startper <- nperiods mood[fb, startper] <- mood[1, nperiods] #reuse forward metric firstj <- nperiods - 1 lastj <- 1 stepj <- -1 jprev <- nperiods } # end if for (j in seq(firstj,lastj,by=stepj)) { mood[fb, j] <- 0 everlap <- 0 ## of years which have contributed sums to mood if (fb == 1) { firstj2 <- 1 lastj2 <- j - 1 } else { firstj2 <- j + 1 lastj2 <- nperiods } # end if for (j2 in firstj2:lastj2) { sum <- 0 #has already been estimated consum <- 0 #sum of communalities across issues overlap <- 0 for (v in 1:nvar) { xj <- issue[j, v] #xj is base year value sngx2 <- issue[j2, v] #sngx2 is comparison year value if (!is.na(xj) && !is.na(sngx2)) { overlap <- overlap + 1 #numb of issues contributing to sum ratio <- xj / sngx2 if (csign[v] < 0) ratio <- 1 / ratio sum <- sum + valid[v] * ratio * mood[fb, j2] consum <- consum + valid[v] } # end if } #next v if (overlap > 0) { everlap <- everlap + 1 mood[fb, j] <- mood[fb, j] + sum / consum } # end if } #next j2 nitems[j] <- everlap if (everlap > 0) mood[fb, j] <- mood[fb, j] / everlap else mood[fb, j] <- mood[fb, jprev] #if undefined, set to lag(mood) jprev <- j #last value of j, whether lead or lag } #next j if (smoothing == TRUE) { alpha<- esmooth(mood, fb, alpha) #NOW SMOOTH USING ALPHA mood.sm<- mood[fb,] #set up alternate vector mood.sm for (t in 2:nperiods) { mood.sm[t]<- alpha*mood[fb,t]+(1-alpha)*mood.sm[t-1] } #end for mood[fb,]<- mood.sm #now assign back smoothed version } else { alpha1 <- 1 alpha <- 1 } if (smoothing == TRUE && fb == 1) alpha1 <- alpha dominate.out<- list(alpha1=alpha1,alpha=alpha,latent=mood[fb,]) #output object return(dominate.out) # return(mood[fb,]) } #end dominate algorithm ########################################################################################## #begindt<-NA #ISOdate(2004,6,1) #enddt<-NA #ISOdate(2004,10,31) ########################################################################################## ## MAIN EXTRACT CODE BEGINS HERE ######################################################### extract<- function(varname,date,index,ncases=NULL,unit="A",mult=1,begindt=NA,enddt=NA,npass=1,smoothing=TRUE,endmonth=12) { formula<-match.call(extract) nrecords<- length(varname) if (is.null(ncases)) ncases<- rep(0,nrecords) moddate<- date #create temporary date vector, leaving original unmodified if ((unit=="A" || unit=="O") && endmonth<12) { for (i in 1:nrecords) { #first loop through raw data file month<- findmonth(moddate[i]) year<- findyear(moddate[i]) if (month>endmonth) moddate[i]<- ISOdate(year+1,1,1) #modified date become 1/1 of next year } #end loop through data } # end if if (is.na(begindt)) minper<-findmonth(min(moddate)) else minper<-findmonth(begindt) if (is.na(begindt)) miny<-findyear(min(moddate)) else miny<-findyear(begindt) if (is.na(begindt)) minday<-findday(min(moddate)) else minday<-findday(begindt) if (is.na(enddt)) maxper<-findmonth(max(moddate)) else maxper<-findmonth(enddt) if (is.na(enddt)) maxy<-findyear(max(moddate)) else maxy<-findyear(enddt) if (is.na(enddt)) maxday<-findday(max(moddate)) else maxday<-findday(enddt) if (unit=="Q") { minper<- as.integer((minper-1)/3)+1 maxper<- as.integer((maxper-1)/3)+1 } mindate<- ISOdate(miny,minper,minday,0,0,0,tz="GMT") maxdate<- ISOdate(maxy, maxper, maxday,0,0,0,tz="GMT") #86400=24*60*60 #SETCONS: latent<- numeric(1) aggratio<- 0 fb<- 1 #initialize auto<- "start" #meaningless value alpha<- 1 alpha1<- 1 pass<- 1 holdtola<- 0.001 tola<- holdtola iter<- 0 lastconv<- 99999 wtmean<- 0 #for it=1 wtstd<- 1 fract<- 1 if (unit=="A") { nperiods<- maxy-miny+1 aggratio<- 1 months<- 12 } if (unit=="O") { years<- mult months<- years*12 aggratio<- 2 odd<- (maxy-miny+1) %% mult #mod nperiods=as.integer((maxy-miny)/mult) + odd } if (unit=="M") { fract<- 100 nperiods<- (maxy-miny)*12 nperiods<- nperiods-12 + (12-minper+1) + maxper aggratio<- 1/12 months<- 1 } if (unit=="Q") { aggratio<- 1/4 months<- 3 nperiods<- as.integer((maxy-miny)/aggratio) nperiods<- nperiods-4 + (4-minper+1) + maxper fract<- 10 } if (unit=="D") { months=1 nperiods<- (as.integer(maxdate)-as.integer(mindate))/86400 + 1 #86400=24*60*60 } arinv<- 1/aggratio aggratio<- months/12 nrecords<- length(index) #HERE WE SET UP FUNDAMENTAL DIMENSIONS AND DECLARE VECTORS if (fb != 2) mood<- array(dim=c(3,nperiods)) vfac<- factor(varname) #make a factor vector vlev<- levels(vfac) #find unique categories nvar<- length(vlev) #how many are there?, includes unusable series valid<- numeric(nvar) csign<<- numeric(nvar) vl<- character(nvar) r<- numeric(nvar) oldr<- rep(1,nvar) # r=1 for all v initially issue<- array(dim=c(nperiods,nvar)) count<- numeric(nperiods) vl<- numeric(nvar) period<- numeric(nperiods) converge<- 0 evalue<- 0 # create numeric variable period, eg, yyyy.0m if (unit=="D") { period<-seq(1:nperiods) } else { if (months >= 12) { for (l in 1:nperiods) { p <- (l - 1) * aggratio period[l] <- miny + p } #next l } else { y <- 0 i <- 0 my <- miny if (minper == 1) my <- my - 1 for (l in 1:nperiods) { i<- 1 + ((l-1) %% arinv) mq <- minper + i - 1 mq<- 1 + ((mq-1) %% arinv) if (mq == 1) y <- y + 1 #first month or quarter, increment year period[l] <- my + y + mq / fract } # end for } #end else } # end if agglist<- aggregate(varname,moddate,index,ncases,mindate,maxdate,nperiods,nvar,aggratio,unit,miny,minper) # call aggregate to produce issue matrix vl<- agglist$lab #extract two elements of the list from aggregate call issue<- agglist$iss rm(agglist) #don't need this anymore #NOW REDUCE ISSUE MATRIX TO ELIMINATE UNUSABLE SERIES (WN<2) ndrop<- 0 nissue<- numeric(nperiods) std<- numeric(nperiods) for (v in 1:nvar) { std[v]<- 0 #default nissue[v]<- sum(!is.na(issue[,v])) #criterion is 2 cases for npass=1 or 3 for npass=2 if (nissue[v]>npass) std[v]<- sqrt(var(issue[,v],na.rm=TRUE)) #this is just a test for variance >0 if (std[v]<.001) { #case dropped if std uncomputable (NA) or actually zero (constant) ndrop<- ndrop+1 print(paste("Series",vl[v],"discarded. After aggregation cases =",nissue[v])) } } nvarold<- nvar nvar<- nvar-ndrop pointer<- 1 found<- FALSE for (v in 1:nvar) { #now reduced nvar while (found==FALSE && pointer<=nvarold) { #find first valid column and push down if (std[pointer]>.001) { #good case, transfer issue[,v]<- issue[,pointer] vl[v]<- vl[pointer] pointer<- pointer+1 found<- TRUE } else { pointer<- pointer+1 #bad case, increment pointer } #end if } #end while found<- FALSE } #for length(vl)<- nvar #reduce length(issue)<- nperiods*nvar #chop off unused columns attr(issue,"dim")<- c(nperiods,nvar) N<- numeric(nvar) #export<<-list(nperiods,nvar,issue) for (pass in 1:npass) { #newpass: RESTART FOR SECOND DIMENSION CASE if (pass == 2) { #reset iteration control parameters iter <- 0 tola = holdtola lastconv <- 99999 converge<- lastconv conv<- converge } else { av<- numeric(nvar) std<- numeric(nvar) # ngood<- 0 for (v in 1:nvar) { #compute av and std by issue nvar now reduced to good cases wn<- as.integer(nperiods-sum(is.na(issue[,v]))) av[v] <- mean(issue[,v],na.rm=TRUE) std[v]<- sqrt(var(issue[,v],na.rm=TRUE) * ((wn - 1)/wn)) #convert to population standard deviation issue[,v]<- 100 + 10 * (issue[,v] - av[v])/std[v] #standardize # ngood<- ngood+1 }#end for } #READY FOR ESTIMATION, SET UP AND PRINT OPTIONS INFO out<- as.character(10) #initial length only out[1]<- print(paste("Estimation report:")) if (pass == 1) { if (months >= 12) { out[2]<- print(paste("Period:", miny, " to", maxy," ", nperiods, " time points")) } else { out[2]<- print(paste("Period:", miny, minper, " to", maxy, maxper, nperiods, " time points")) } out[3]<- print(paste("Number of series: ", nvar+ndrop)) out[4]<- print(paste("Number of usable series: ", nvar)) out[5]<- print(paste("Exponential smoothing: ",smoothing)) } out[6]<- print(paste("Iteration history: Dimension ",pass)) print(" ") out[7]<- print("Iter Convergence Criterion Reliability Alphaf Alphab") outcount<- 7 for (p in 1:nperiods) { count[p]<- sum(!is.na(issue[p,])) } valid<- rep(1,times=nvar) csign<<- rep(1,times=nvar) auto <- "y" #iterative estimation on by default quit <- 0 #false implies go ahead and estimate while (iter == 0 || converge > tola) { #MASTER CONTROL LOOP WHICH ITERATES UNTIL SOLUTION REACHED for (fb in 1:2) { # MASTER fb LOOP fb=1 is forward, 2 backward dominate.out<- dominate(fb,issue,nperiods,nvar,mood,valid,smoothing,alpha) #master estimation routine alpha1<- dominate.out$alpha1 alpha<- dominate.out$alpha mood[fb,]<- dominate.out$latent } #next fb fb <- 3 #average mood from here on for (p in 1:nperiods) { # AVERAGE mood[fb, p] <- (mood[1, p] + mood[2, p]) / 2 } #next p moodmean<-mean(mood[3,]) sdmood<-sd(mood[3,]) for (p in 1:nperiods) { #PLACEMENT OF THIS LOOP MAY NOT BE RIGHT mood[fb,p] <- ((mood[fb,p] - moodmean) * wtstd / sdmood) + wtmean } #end for #plot commands t<- seq(1:nperiods) #time counter used for plot below lo<- 50 #force scale of iterative plot to large range hi<- 150 if (min(mood[3,]) < lo) lo=min(mood[3,]) #whichever larger, use if (max(mood[3,]) > hi) hi=max(mood[3,]) dummy<- rep(lo,nperiods) #dummy is fake variable used to set plot y axis to 50,150 dummy[nperiods]<- hi if (iter==0) { plot(t,dummy,type="l",lty=0,xlab="Time Period",ylab="Estimate by iteration",main="Estimated Latent Dimension") #create box, no visible lines } else { lines(t,mood[3,],col=iter) } iter <- iter + 1 if (auto == "y") r<- iscorr(issue,mood) else auto <- "y" #recompute correlations wtmean<- 0 wtstd<- 0 vsum<- 0 goodvar<- 0 converge<- 0 #start off default evalue<- 0 totalvar<- 0 for (v in 1:nvar) { wn<- nperiods-sum(is.na(issue[,v])) if (!is.na(sign(r[v]))) csign[v]<<- sign(r[v]) wn<- nperiods-sum(is.na(issue[,v])) if (wn>1) { #sum over variables actually used vratio <- wn / nperiods evalue <- evalue + vratio * r[v]^2 totalvar <- totalvar + vratio } #end if #convergence tests if (wn > 3) { conv <- abs(r[v] - oldr[v]) #conv is convergence test for item=v conv <- conv * (wn / nperiods) #weight criterion by number of available periods if (conv > converge) converge <- conv #converge is the global max of conv } #end if if (!is.na(r[v])) oldr[v] <- r[v] if (!is.na(r[v])) valid[v] <- r[v]^2 if (!is.na(av[v])) wtmean <- wtmean + av[v] * valid[v] if (!is.na(std[v])) wtstd <- wtstd + std[v] * valid[v] if (!is.na(r[v])) vsum <- vsum + valid[v] } #end v loop if (vsum > 0) wtmean <- wtmean / vsum if (vsum > 0) wtstd <- wtstd / vsum if (pass == 1) { mean1 <- wtmean std1 <- wtstd e1=evalue } else { wtmean <- mean1 wtstd <- std1 #*unexp } #end if fbcorr <- cor(mood[1,],mood[2,]) #fnfrontback if (quit != 1) { outcount<- outcount+1 cv<- format(round(converge,4),nsmall=4) itfmt<-format(round(iter),justify="right",length=4) out[outcount]<- print(paste(itfmt," ",cv," ",round(tola,4)," ",round(fbcorr,3),round(alpha1,4),round(alpha,4))) } if (converge > lastconv) tola <- tola * 2 lastconv <- converge auto = "y" #skip corr on iter=1, set auto on if (iter >= 50) break #get out of while loop } #END MASTER WHILE ITERATION CONTROL LOOP if (auto == "y" && converge<tola) { #IF WE REACH THIS CODE WE HAVE A FINAL SOLUTION TO BE REPORTED if (pass == 1) out1<- out #hold output for 2 dimensional solution auto <- "Q" quit <- 1 #flag solution reached, last time through r<- iscorr(issue,mood) #final iteration correlations if (pass == 1) r1<- r #hold correlations for 2 dimensional solution if (pass > 1) { unexp <- totalvar totalvar <- unexp * totalvar evalue <- evalue * unexp } # end if if (pass == 1) { expprop <- evalue / totalvar tot1 <- totalvar } else { erel <- evalue / totalvar #% exp relative totalvar <- (1 - expprop) * tot1 #true var=original var discounted by %exp evalue <- erel * totalvar #rescale to retain %exp relationship expprop <- evalue / tot1 #now reduce eral to expprop } # end if for (v in 1:nvar) { N[v]<- sum(!is.na(issue[,v])) } var.out<- list(varname=vl,loadings=r,means=av,std.deviations=std) print(" ") outcount<- outcount+1 out[outcount]<- print(paste("Eigen Estimate ", round(evalue,2), " of possible ",round(tot1,2))) outcount<- outcount+1 out[outcount]<- print(paste(" Percent Variance Explained: ",round(100 * expprop,2))) if (pass != 2 && npass>1) { for (v in 1:nvar) { valid[v] <- 0 #reset all, regmoodissue will set good=1 if (csign[v] != 0) issue[,v]<- residmi(issue,v,mood) #regmoodissue() } #v loop } # if #begin prn output routine # mood[fb,] is now our estimate, WHAT ABOUT A SECOND DIMENSION latent<- mood[fb,] #vector holds values for output if (pass == 1) latent1<- latent #hold first dimension print(" ") out[outcount+1]<- print(paste("Final Weighted Average Metric: Mean: ",round(wtmean,2)," St. Dev: ",round(wtstd,2))) #for Zelig output if (npass==1) { extract.out<- list(formula=formula,T=nperiods,nvar=nvar,unit=unit,dimensions=npass,period=period,varname=vl,N=N,means=av,std.deviations=std,setup1=out1,loadings1=r1,latent1=latent1) } else { for (i in 6:outcount) { out[i-5]=out[i] } length(out)<- outcount-5 extract.out<- list(formula=formula,T=nperiods,nvar=nvar,unit=unit,dimensions=npass,period=period,varname=vl,N=N,means=av,std.deviations=std,setup1=out1,loadings1=r1,latent1=latent1,setup2=out,loadings2=r,latent2=latent) } } #end if auto="y" } #end of for pass=1,2 loop par(col=1) #reset on termination class(extract.out)<- "Zextract" return(extract.out) } #end of extract ## Algorithm Implementation output <- extract(varname = pe$House, date = pe$date, index = pe$Appr, ncases = pe$N, unit = "M") # monthly output2 <- extract(varname = pe$House, date = pe$date, index = pe$Appr, ncases = pe$N, unit = "A") # annually length(unique(output$period)) ## Plot latent variable monthly plot(x = output$latent1, axes = F, # monthly representation ylab = "%", xlab = "Month", type = "l", main = "Approval of Presidential economic management") axis(1, at = c(1:447), labels = output$period, cex.axis = .7) axis(2) abline(v = 1, col = "red") # Reagan text(25, 70, "Reagan", col = "red") abline(v = which(output$period == 1989.01), col = "red") # Bush text(105, 70, "Bush", col = "red") abline(v = which(output$period == 1993.01), col = "blue") # Clinton text(153, 70, "Clinton", col = "blue") abline(v = which(output$period == 2001.01), col = "red") # Bush text(247, 70, "Bush", col = "red") abline(v = which(output$period == 2009.01), col = "blue") # Obama text(355, 70, "Obama", col = "blue") abline(v = which(output$period == 2017.01), col = "red") # Trump text(439, 70, "Trump", col = "red") box() ## Plot latent variable annually length(unique(output2$period)) # 38 plot(x = output2$latent1, axes = F, # annually representation ylab = "%", xlab = "Year", type = "l", main = "Approval of Presidential economic management") axis(1, at = c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38), labels = output2$period, cex.axis = .7) axis(2) abline(v = 1, col = "red") # Reagan text(3, 70, "Reagan", col = "red") abline(v = 9, col = "red") # Bush text(11, 70, "Bush", col = "red") abline(v = 13, col = "blue") # Clinton text(15, 70, "Clinton", col = "blue") abline(v = 21, col = "red") # Bush text(23, 70, "Bush", col = "red") abline(v = 29, col = "blue") # Obama text(31, 70, "Obama", col = "blue") abline(v = 37, col = "red") # Trump text(37.5, 70, "Trump", col = "red") box() ## Produce numerical output of estimation procedure summary(output) ## Reproduce Figure 3 in Kellstedt/de Boef (2004) kellstedt <- subset(pe, subset = pe$Year <= 2001) kellstedt_lat <- extract(varname = kellstedt$House, date = kellstedt$date, index = kellstedt$Appr, ncases = kellstedt$N, unit = "M") length(unique(kellstedt_lat$period)) plot(x = kellstedt_lat$latent1, axes = F, # monthly representation ylab = "%", xlab = "Month", type = "l", main = "Approval of Presidential economic management (Kellstedt 2004)") axis(1, at = c(1:249), labels = kellstedt_lat$period, cex.axis = .7) axis(2) abline(v = 1, col = "red") # Reagan text(25, 70, "Reagan", col = "red") abline(v = which(kellstedt_lat$period == 1989.01), col = "red") # Bush text(105, 70, "Bush", col = "red") abline(v = which(kellstedt_lat$period == 1993.01), col = "blue") # Clinton text(153, 70, "Clinton", col = "blue") abline(v = which(kellstedt_lat$period == 2001.01), col = "red") # Bush text(247, 70, "Bush", col = "red") box() ## Correlations of time series with Stimson metric # compute averages for each year for each survey house # for each survey house mu_abc_y <- tapply(pe$Appr[pe$House == "ABC"], pe$Year[pe$House == "ABC"], mean) mu_abcwp_y <- tapply(pe$Appr[pe$House == "ABCWP"], pe$Year[pe$House == "ABCWP"], mean) mu_cbs_y <- tapply(pe$Appr[pe$House == "CBS"], pe$Year[pe$House == "CBS"], mean) mu_cbsnyt_y <- tapply(pe$Appr[pe$House == "CBSNYT"], pe$Year[pe$House == "CBSNYT"], mean) mu_gal_y <- tapply(pe$Appr[pe$House == "Gallup"], pe$Year[pe$House == "Gallup"], mean) mu_lat_y <- tapply(pe$Appr[pe$House == "LATIMES"], pe$Year[pe$House == "LATIMES"], mean) latent_y <- output2$latent1 names(latent_y) <- output2$period mu_total_y <- tapply(pe$Appr, pe$Year, mean) # for reference only and total correlation # now fill with NA's at time-incongruent positions mu_abc_y <- c(mu_abc_y[1], mu_abc_y[2], NA, NA, NA, NA, NA, NA, NA, NA, mu_abc_y[3], mu_abc_y[4], mu_abc_y[5], mu_abc_y[6], mu_abc_y[7], mu_abc_y[8], NA, NA, NA, NA, mu_abc_y[9], mu_abc_y[10], mu_abc_y[11], NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA) names(mu_abc_y) <- output2$period #mu_abc_y mu_abcwp_y <- c(mu_abcwp_y[1:18], NA, NA, mu_abcwp_y[19:36]) names(mu_abcwp_y) <- output2$period #mu_abcwp_y mu_cbs_y <- c(NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, mu_cbs_y[1:28]) names(mu_cbs_y) <- output2$period #mu_cbs_y mu_cbsnyt_y <- c(mu_cbsnyt_y[1:36], NA, NA) names(mu_cbsnyt_y) <- output2$period #mu_cbsnyt_y mu_gal_y <- c(NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, mu_gal_y[1:5], NA, mu_gal_y[6], NA, mu_gal_y[7:24]) names(mu_gal_y) <- output2$period #mu_gal_y mu_lat_y <- c(mu_lat_y[1], NA, mu_lat_y[2:4], NA, mu_lat_y[5], NA, NA, NA, mu_lat_y[6:11], NA, mu_lat_y[12:13], NA, mu_lat_y[14:21], NA, NA, NA, NA, NA, NA, NA, NA, NA, NA) names(mu_lat_y) <- output2$period #mu_lat_y # now combine into single matrix matr <- data.frame(latent = output2$latent1, abc = mu_abc_y, abcwp = mu_abcwp_y, cbs = mu_cbs_y, cbsnyt = mu_cbsnyt_y, gallup = mu_gal_y, latimes = mu_lat_y, total = mu_total_y) cor.matrix <- cor(matr, use = "pairwise.complete.obs") cor.matrix #stargazer(cor.matrix, type = "latex", #title = "Correlation Matrix Time Series and Metric")
/President-s Handling of the Economy PHE/R script PHE.R
no_license
rayduch/US-Economic-Policy-Uncertainty-is-Presidential
R
false
false
32,687
r
###-------ECONOMIC MANAGEMENT APPROVAL--### ###-----------GENERAL SETUP-------------### ###-------------Mats Ahrenshop----------### setwd("C:\\Users\\User\\Downloads") pe <- read.csv2("PHE.csv") pe$date <- ISOdate(pe$Year, pe$Month, pe$Day) pe$date <- as.Date(pe$date) ## The Problem: Aggregating six time series plot(x = pe$Appr[pe$House == "Gallup"], axes = F, ylab = "%", ylim = c(15, 85), xlab = "Day", type = "l", lwd = 2, col = "black", main = "Approval of Presidential economic management") axis(1, at = c(1:894), labels = pe$date, cex.axis = .7) axis(2) lines(pe$Appr[pe$House == "ABC"], col = "red", lwd = 2) lines(pe$Appr[pe$House == "ABCWP"], col = "blue", lwd = 2) lines(pe$Appr[pe$House == "CBS"], col = "green", lwd = 2) lines(pe$Appr[pe$House == "CBSNYT"], col = "orange", lwd = 2) lines(pe$Appr[pe$House == "LATIMES"], col = "grey", lwd = 2) legend(x = "topright", legend = c("Gallup", "ABC", "ABCWP", "CBS", "CBSNYT", "LATIMES"), col = c("black", "red", "blue", "green", "orange", "grey"), lty = 1, cex = .6) box() ## The Solution: Dyad ratios algorithm using \texttt{extract} function ## obtain via http://stimson.web.unc.edu/software/ ##--------------------------## ### FUNCTIONS FROM STIMSON ### display<-function(out,filename=NULL) { if (is.null(filename)) filename="" d<-out$dimensions p<-out$period m<-out$latent1 if (d==2) m2<-out$latent2 T<-out$T mo=100*(p-as.integer(p)) for (t in 1:T) { yr<-format(as.integer(p[t]),nsmall=0) month<-format(mo[t],digits=2) lat1<-format(m[t],nsmall=3) if (d==1) { cat(c(yr,month,lat1),fill=TRUE,file=filename,append=TRUE) } else { lat2<-format(m2[t],nsmall=3) cat(c(yr,month,lat1,lat2),fill=TRUE,file=filename,append=TRUE) } } } ########################################################################################## plot.Zextract<-function(outobject) { dim<- outobject$dimensions T<- outobject$T vect1<-outobject$latent1 t<-seq(1:T) if (dim>1) { vect2<-outobject$latent2 miny<-min(vect1) if (miny>min(vect2)) miny<-min(vect2) maxy<-max(vect1) if (maxy<max(vect2)) maxy<-max(vect2) dummy<-rep(miny,T-1) #dummy is a fake variable used to reset axes to handle min/max of both series dummy[T]<-maxy leg.text<-c("","Dimension 1","Dimension 2") plot(t,dummy,type="l",lty=0,main="Final Estimation Results: Two Dimensions",xlab="Time Point",ylab="Latent Variables") lines(t,vect1,col=1) lines(t,vect2,col=2) legend(1,maxy,leg.text,col=c(0,1,2),lty=c(0,1,1)) } else { plot(t,vect1,type="l",main="Final Estimation Results",xlab="Time Point",ylab="Latent Variable") if (dim == 2) lines(t,vect2,col=2) } } ########################################################################################## summary.Zextract<- function(outobject) { T=outobject$T nvar=outobject$nvar dim<- outobject$dimensions vn<- c(outobject$varname,"Variable Name") vn<- format(vn,justify="right") nc<- format(outobject$N,justify="right") ld<- format(outobject$loadings1,digits=3,justify="right") mean<- format(outobject$means,digits=6,justify="right") sd<- format(outobject$std.deviations,digits=6,justify="right") cat("Variable Loadings and Descriptive Information: Dimension 1\n") cat(paste(vn[nvar+1],"Cases","Loading"," Mean ","Std Dev","\n")) for (v in 1:nvar) { cat(paste(vn[v]," ",nc[v]," ",ld[v],mean[v],sd[v],"\n")) } if (dim == 2) { ld<- format(outobject$loadings2,digits=3,justify="right") cat("\nVariable Loadings and Descriptive Information: Dimension 2\n") cat(paste(vn[nvar+1],"Cases","Loading"," Mean ","Std Dev","\n")) for (v in 1:nvar) { cat(paste(vn[v]," ",nc[v]," ",ld[v],mean[v],sd[v],"\n")) } } } ########################################################################################## findper<-function(unit,curdate,mind,miny,minper,aggratio) { #returns intFindPer datcurdate<-curdate class(datcurdate)<-"Date" mo <- findmonth(datcurdate) qu <- 1 + as.integer((mo - 1)/3) dy <- findday(datcurdate) yr <- findyear(datcurdate) arinv<- 1/aggratio if (unit == "D") intFindPer <- curdate - mind +1 #curdate - mindate + 1 if (unit == "A" || unit == "O") intFindPer <- as.integer((yr - miny) / aggratio) + 1 if (unit == "Q") part <- qu if (unit == "M") part <- mo if (unit == "Q" || unit == "M") intFindPer <- (yr - miny - 1) * arinv + part + (arinv - (minper - 1)) return(intFindPer) } #findper ########################################################################################## findday<-function(DateVar) { z<-as.POSIXlt(DateVar) v<-unlist(z) findday<-as.integer(v[4]) } #end findday ########################################################################################## findmonth<-function(DateVar) { z<-as.POSIXlt(DateVar) v<-unlist(z) findmonth<-as.integer(v[5])+1 } #end findmonth ########################################################################################## findyear<-function(DateVar) { z<-as.POSIXlt(DateVar) v<-unlist(z) findyear<-as.integer(v[6])+1900 } #end findyear ########################################################################################## aggregate<- function(varname,date,index,ncases,mindate,maxdate,nperiods,nvar,aggratio,unit,miny,minper) { # #READ A NEW RECORD, CALCULATE PERIOD, AND SET UP AGGREGATION INTO MAT.ISSUE[NPERIODS,NVAR] vl<- character(nvar) mind<- as.integer(mindate)/86400 maxd<- as.integer(maxdate)/86400 vfac<- factor(varname) #make a factor vector vlev<- levels(vfac) #find unique categories Mat.Issue<- array(dim=c(nperiods,nvar)) nrec<-length(varname) #added for R compatibility lp<- 0 per<- 0 x<- 0 c<- 0 nkeep<- 0 lv<- "0" for (record in 1:nrec) { # MASTER LOOP THROUGH INPUT DATA, 1 TO NREC if (ncases[record] == 0 || is.na(ncases[record])) ncases[record] <- 1000 mo <- findmonth(date[record]) qu <- 1 + as.integer((mo - 1)/3) dy <- findday(date[record]) yr <- findyear(date[record]) curdate<- as.integer(date[record]) if (curdate >= mind && curdate <= maxd) { #is date within range? nkeep <- nkeep + 1 if (nkeep==1) { #startup routine for first good case firstcase<- TRUE lp <- findper(unit,curdate,mind,miny,minper,aggratio) lv <- varname[record] x <- index[record] * ncases[record] #start new sums for case 1 c <- ncases[record] for (i in 1:nvar) { if (lv==vlev[i]) v=i #determine v by matching to position of labels vector } #end for } else { firstcase<- FALSE } #end if if (firstcase == FALSE) { #skip over the rest for first good case per<- findper(unit,curdate,mind,miny,minper,aggratio) #here we translate date into agg category if ((varname[record] != lv) || (per !=lp)) { #found a new period or variable name if (lp > 0 && lp <= nperiods) { Mat.Issue[lp, v] <- x / c #recompute for either period or var change x<- 0 c<- 0 } if (varname[record] != lv) { #new var only for (i in 1:nvar) { if (varname[record]==vlev[i]) v=i #determine v by matching to position of labels vector } #end for vl[v]<- varname[record] #this will only catch names that have good cases lv<-vl[v] #reassign new varname to lastvar } # new var lp <- findper(unit,curdate,mind,miny,minper,aggratio) x <- index[record] * ncases[record] #start new sums for current case c <- ncases[record] } else { x<- x + index[record] * ncases[record] #a continuing case, increment sums c<- c + ncases[record] } } # end of first case special loop } #end of date test loop } #newrec: next record vl<- vlev #overwrite previous assignment which had good names only agglist<- list(lab=vl,iss=Mat.Issue) return(agglist) #list includes labels and issue matrix } #end aggregate function ########################################################################################## esmooth<- function(mood, fb, alpha){ ########################################################################################## smooth<- function(alpha) { #for time series "series" and alpha "alpha[1]" compute sum of squared forecast error ferror<- numeric(1) T<- length(series) xvect<- numeric(T) xvect[1] <- series[1] for (t in 2:T) { xvect[t] <- alpha[1] * series[t] + (1 - alpha[1]) * xvect[t - 1] } sumsq <- 0 for (t in 3:T) { ferror <- series[t] - xvect[t - 1] sumsq <- sumsq + ferror ^ 2 } return(sumsq) #this is the value of the function for a particular parameter alpha[1] } # END OF FUNCTION SMOOTH ########################################################################################## series<- mood[fb,] #create series to be smoothed sm.out<- optim(c(.75),smooth,method="L-BFGS-B",lower=0.5,upper=1) #call smoother alpha<- sm.out$par #assign result to alpha #NOW SMOOTH USING ALPHA T<- length(series) for (t in 2:T) { mood[fb,t] <- alpha * series[t] + (1 - alpha) * mood[fb,t - 1] } return(alpha) } #END OF FUNCTION ESMOOTH ########################################################################################## residmi<- function(issue,v,mood) { #function regresses issue(v) on mood and then residualizes it o<- lm(issue[,v] ~ mood[3,]) #regress issue on mood to get a,b issue[,v]<- 100 + issue[,v] - (o$coef[1]+o$coef[2]*mood[3,]) #100 + Y - (a+bx) return(issue[,v]) } ########################################################################################## iscorr<- function(issue,mood) { #compute issue-scale correlations Nv<- length(issue[1,]) Np<- length(issue[,1]) Rvector<- numeric(Nv) for (v in 1:Nv) { N<- Np - sum(is.na(issue[,v])) if (N > 1) Rvector[v]<- cor(issue[,v],mood[3,],use="complete.obs",method="pearson") } return(Rvector) } #end function iscorr ########################################################################################## dominate<- function(fb,issue,nperiods,nvar,mood,valid,smoothing,alpha) { nitems<- numeric(nperiods) if (fb==2) alpha1<-alpha if (fb==1) { unexp<-numeric(1) everlap<- integer(1) alpha<- 1 alpha1<- 1 } if (fb == 1) { startper <- 1 mood[fb, startper] <- 100 firstj <- 2 lastj <- nperiods stepj <- 1 jprev <- 1 } else { startper <- nperiods mood[fb, startper] <- mood[1, nperiods] #reuse forward metric firstj <- nperiods - 1 lastj <- 1 stepj <- -1 jprev <- nperiods } # end if for (j in seq(firstj,lastj,by=stepj)) { mood[fb, j] <- 0 everlap <- 0 ## of years which have contributed sums to mood if (fb == 1) { firstj2 <- 1 lastj2 <- j - 1 } else { firstj2 <- j + 1 lastj2 <- nperiods } # end if for (j2 in firstj2:lastj2) { sum <- 0 #has already been estimated consum <- 0 #sum of communalities across issues overlap <- 0 for (v in 1:nvar) { xj <- issue[j, v] #xj is base year value sngx2 <- issue[j2, v] #sngx2 is comparison year value if (!is.na(xj) && !is.na(sngx2)) { overlap <- overlap + 1 #numb of issues contributing to sum ratio <- xj / sngx2 if (csign[v] < 0) ratio <- 1 / ratio sum <- sum + valid[v] * ratio * mood[fb, j2] consum <- consum + valid[v] } # end if } #next v if (overlap > 0) { everlap <- everlap + 1 mood[fb, j] <- mood[fb, j] + sum / consum } # end if } #next j2 nitems[j] <- everlap if (everlap > 0) mood[fb, j] <- mood[fb, j] / everlap else mood[fb, j] <- mood[fb, jprev] #if undefined, set to lag(mood) jprev <- j #last value of j, whether lead or lag } #next j if (smoothing == TRUE) { alpha<- esmooth(mood, fb, alpha) #NOW SMOOTH USING ALPHA mood.sm<- mood[fb,] #set up alternate vector mood.sm for (t in 2:nperiods) { mood.sm[t]<- alpha*mood[fb,t]+(1-alpha)*mood.sm[t-1] } #end for mood[fb,]<- mood.sm #now assign back smoothed version } else { alpha1 <- 1 alpha <- 1 } if (smoothing == TRUE && fb == 1) alpha1 <- alpha dominate.out<- list(alpha1=alpha1,alpha=alpha,latent=mood[fb,]) #output object return(dominate.out) # return(mood[fb,]) } #end dominate algorithm ########################################################################################## #begindt<-NA #ISOdate(2004,6,1) #enddt<-NA #ISOdate(2004,10,31) ########################################################################################## ## MAIN EXTRACT CODE BEGINS HERE ######################################################### extract<- function(varname,date,index,ncases=NULL,unit="A",mult=1,begindt=NA,enddt=NA,npass=1,smoothing=TRUE,endmonth=12) { formula<-match.call(extract) nrecords<- length(varname) if (is.null(ncases)) ncases<- rep(0,nrecords) moddate<- date #create temporary date vector, leaving original unmodified if ((unit=="A" || unit=="O") && endmonth<12) { for (i in 1:nrecords) { #first loop through raw data file month<- findmonth(moddate[i]) year<- findyear(moddate[i]) if (month>endmonth) moddate[i]<- ISOdate(year+1,1,1) #modified date become 1/1 of next year } #end loop through data } # end if if (is.na(begindt)) minper<-findmonth(min(moddate)) else minper<-findmonth(begindt) if (is.na(begindt)) miny<-findyear(min(moddate)) else miny<-findyear(begindt) if (is.na(begindt)) minday<-findday(min(moddate)) else minday<-findday(begindt) if (is.na(enddt)) maxper<-findmonth(max(moddate)) else maxper<-findmonth(enddt) if (is.na(enddt)) maxy<-findyear(max(moddate)) else maxy<-findyear(enddt) if (is.na(enddt)) maxday<-findday(max(moddate)) else maxday<-findday(enddt) if (unit=="Q") { minper<- as.integer((minper-1)/3)+1 maxper<- as.integer((maxper-1)/3)+1 } mindate<- ISOdate(miny,minper,minday,0,0,0,tz="GMT") maxdate<- ISOdate(maxy, maxper, maxday,0,0,0,tz="GMT") #86400=24*60*60 #SETCONS: latent<- numeric(1) aggratio<- 0 fb<- 1 #initialize auto<- "start" #meaningless value alpha<- 1 alpha1<- 1 pass<- 1 holdtola<- 0.001 tola<- holdtola iter<- 0 lastconv<- 99999 wtmean<- 0 #for it=1 wtstd<- 1 fract<- 1 if (unit=="A") { nperiods<- maxy-miny+1 aggratio<- 1 months<- 12 } if (unit=="O") { years<- mult months<- years*12 aggratio<- 2 odd<- (maxy-miny+1) %% mult #mod nperiods=as.integer((maxy-miny)/mult) + odd } if (unit=="M") { fract<- 100 nperiods<- (maxy-miny)*12 nperiods<- nperiods-12 + (12-minper+1) + maxper aggratio<- 1/12 months<- 1 } if (unit=="Q") { aggratio<- 1/4 months<- 3 nperiods<- as.integer((maxy-miny)/aggratio) nperiods<- nperiods-4 + (4-minper+1) + maxper fract<- 10 } if (unit=="D") { months=1 nperiods<- (as.integer(maxdate)-as.integer(mindate))/86400 + 1 #86400=24*60*60 } arinv<- 1/aggratio aggratio<- months/12 nrecords<- length(index) #HERE WE SET UP FUNDAMENTAL DIMENSIONS AND DECLARE VECTORS if (fb != 2) mood<- array(dim=c(3,nperiods)) vfac<- factor(varname) #make a factor vector vlev<- levels(vfac) #find unique categories nvar<- length(vlev) #how many are there?, includes unusable series valid<- numeric(nvar) csign<<- numeric(nvar) vl<- character(nvar) r<- numeric(nvar) oldr<- rep(1,nvar) # r=1 for all v initially issue<- array(dim=c(nperiods,nvar)) count<- numeric(nperiods) vl<- numeric(nvar) period<- numeric(nperiods) converge<- 0 evalue<- 0 # create numeric variable period, eg, yyyy.0m if (unit=="D") { period<-seq(1:nperiods) } else { if (months >= 12) { for (l in 1:nperiods) { p <- (l - 1) * aggratio period[l] <- miny + p } #next l } else { y <- 0 i <- 0 my <- miny if (minper == 1) my <- my - 1 for (l in 1:nperiods) { i<- 1 + ((l-1) %% arinv) mq <- minper + i - 1 mq<- 1 + ((mq-1) %% arinv) if (mq == 1) y <- y + 1 #first month or quarter, increment year period[l] <- my + y + mq / fract } # end for } #end else } # end if agglist<- aggregate(varname,moddate,index,ncases,mindate,maxdate,nperiods,nvar,aggratio,unit,miny,minper) # call aggregate to produce issue matrix vl<- agglist$lab #extract two elements of the list from aggregate call issue<- agglist$iss rm(agglist) #don't need this anymore #NOW REDUCE ISSUE MATRIX TO ELIMINATE UNUSABLE SERIES (WN<2) ndrop<- 0 nissue<- numeric(nperiods) std<- numeric(nperiods) for (v in 1:nvar) { std[v]<- 0 #default nissue[v]<- sum(!is.na(issue[,v])) #criterion is 2 cases for npass=1 or 3 for npass=2 if (nissue[v]>npass) std[v]<- sqrt(var(issue[,v],na.rm=TRUE)) #this is just a test for variance >0 if (std[v]<.001) { #case dropped if std uncomputable (NA) or actually zero (constant) ndrop<- ndrop+1 print(paste("Series",vl[v],"discarded. After aggregation cases =",nissue[v])) } } nvarold<- nvar nvar<- nvar-ndrop pointer<- 1 found<- FALSE for (v in 1:nvar) { #now reduced nvar while (found==FALSE && pointer<=nvarold) { #find first valid column and push down if (std[pointer]>.001) { #good case, transfer issue[,v]<- issue[,pointer] vl[v]<- vl[pointer] pointer<- pointer+1 found<- TRUE } else { pointer<- pointer+1 #bad case, increment pointer } #end if } #end while found<- FALSE } #for length(vl)<- nvar #reduce length(issue)<- nperiods*nvar #chop off unused columns attr(issue,"dim")<- c(nperiods,nvar) N<- numeric(nvar) #export<<-list(nperiods,nvar,issue) for (pass in 1:npass) { #newpass: RESTART FOR SECOND DIMENSION CASE if (pass == 2) { #reset iteration control parameters iter <- 0 tola = holdtola lastconv <- 99999 converge<- lastconv conv<- converge } else { av<- numeric(nvar) std<- numeric(nvar) # ngood<- 0 for (v in 1:nvar) { #compute av and std by issue nvar now reduced to good cases wn<- as.integer(nperiods-sum(is.na(issue[,v]))) av[v] <- mean(issue[,v],na.rm=TRUE) std[v]<- sqrt(var(issue[,v],na.rm=TRUE) * ((wn - 1)/wn)) #convert to population standard deviation issue[,v]<- 100 + 10 * (issue[,v] - av[v])/std[v] #standardize # ngood<- ngood+1 }#end for } #READY FOR ESTIMATION, SET UP AND PRINT OPTIONS INFO out<- as.character(10) #initial length only out[1]<- print(paste("Estimation report:")) if (pass == 1) { if (months >= 12) { out[2]<- print(paste("Period:", miny, " to", maxy," ", nperiods, " time points")) } else { out[2]<- print(paste("Period:", miny, minper, " to", maxy, maxper, nperiods, " time points")) } out[3]<- print(paste("Number of series: ", nvar+ndrop)) out[4]<- print(paste("Number of usable series: ", nvar)) out[5]<- print(paste("Exponential smoothing: ",smoothing)) } out[6]<- print(paste("Iteration history: Dimension ",pass)) print(" ") out[7]<- print("Iter Convergence Criterion Reliability Alphaf Alphab") outcount<- 7 for (p in 1:nperiods) { count[p]<- sum(!is.na(issue[p,])) } valid<- rep(1,times=nvar) csign<<- rep(1,times=nvar) auto <- "y" #iterative estimation on by default quit <- 0 #false implies go ahead and estimate while (iter == 0 || converge > tola) { #MASTER CONTROL LOOP WHICH ITERATES UNTIL SOLUTION REACHED for (fb in 1:2) { # MASTER fb LOOP fb=1 is forward, 2 backward dominate.out<- dominate(fb,issue,nperiods,nvar,mood,valid,smoothing,alpha) #master estimation routine alpha1<- dominate.out$alpha1 alpha<- dominate.out$alpha mood[fb,]<- dominate.out$latent } #next fb fb <- 3 #average mood from here on for (p in 1:nperiods) { # AVERAGE mood[fb, p] <- (mood[1, p] + mood[2, p]) / 2 } #next p moodmean<-mean(mood[3,]) sdmood<-sd(mood[3,]) for (p in 1:nperiods) { #PLACEMENT OF THIS LOOP MAY NOT BE RIGHT mood[fb,p] <- ((mood[fb,p] - moodmean) * wtstd / sdmood) + wtmean } #end for #plot commands t<- seq(1:nperiods) #time counter used for plot below lo<- 50 #force scale of iterative plot to large range hi<- 150 if (min(mood[3,]) < lo) lo=min(mood[3,]) #whichever larger, use if (max(mood[3,]) > hi) hi=max(mood[3,]) dummy<- rep(lo,nperiods) #dummy is fake variable used to set plot y axis to 50,150 dummy[nperiods]<- hi if (iter==0) { plot(t,dummy,type="l",lty=0,xlab="Time Period",ylab="Estimate by iteration",main="Estimated Latent Dimension") #create box, no visible lines } else { lines(t,mood[3,],col=iter) } iter <- iter + 1 if (auto == "y") r<- iscorr(issue,mood) else auto <- "y" #recompute correlations wtmean<- 0 wtstd<- 0 vsum<- 0 goodvar<- 0 converge<- 0 #start off default evalue<- 0 totalvar<- 0 for (v in 1:nvar) { wn<- nperiods-sum(is.na(issue[,v])) if (!is.na(sign(r[v]))) csign[v]<<- sign(r[v]) wn<- nperiods-sum(is.na(issue[,v])) if (wn>1) { #sum over variables actually used vratio <- wn / nperiods evalue <- evalue + vratio * r[v]^2 totalvar <- totalvar + vratio } #end if #convergence tests if (wn > 3) { conv <- abs(r[v] - oldr[v]) #conv is convergence test for item=v conv <- conv * (wn / nperiods) #weight criterion by number of available periods if (conv > converge) converge <- conv #converge is the global max of conv } #end if if (!is.na(r[v])) oldr[v] <- r[v] if (!is.na(r[v])) valid[v] <- r[v]^2 if (!is.na(av[v])) wtmean <- wtmean + av[v] * valid[v] if (!is.na(std[v])) wtstd <- wtstd + std[v] * valid[v] if (!is.na(r[v])) vsum <- vsum + valid[v] } #end v loop if (vsum > 0) wtmean <- wtmean / vsum if (vsum > 0) wtstd <- wtstd / vsum if (pass == 1) { mean1 <- wtmean std1 <- wtstd e1=evalue } else { wtmean <- mean1 wtstd <- std1 #*unexp } #end if fbcorr <- cor(mood[1,],mood[2,]) #fnfrontback if (quit != 1) { outcount<- outcount+1 cv<- format(round(converge,4),nsmall=4) itfmt<-format(round(iter),justify="right",length=4) out[outcount]<- print(paste(itfmt," ",cv," ",round(tola,4)," ",round(fbcorr,3),round(alpha1,4),round(alpha,4))) } if (converge > lastconv) tola <- tola * 2 lastconv <- converge auto = "y" #skip corr on iter=1, set auto on if (iter >= 50) break #get out of while loop } #END MASTER WHILE ITERATION CONTROL LOOP if (auto == "y" && converge<tola) { #IF WE REACH THIS CODE WE HAVE A FINAL SOLUTION TO BE REPORTED if (pass == 1) out1<- out #hold output for 2 dimensional solution auto <- "Q" quit <- 1 #flag solution reached, last time through r<- iscorr(issue,mood) #final iteration correlations if (pass == 1) r1<- r #hold correlations for 2 dimensional solution if (pass > 1) { unexp <- totalvar totalvar <- unexp * totalvar evalue <- evalue * unexp } # end if if (pass == 1) { expprop <- evalue / totalvar tot1 <- totalvar } else { erel <- evalue / totalvar #% exp relative totalvar <- (1 - expprop) * tot1 #true var=original var discounted by %exp evalue <- erel * totalvar #rescale to retain %exp relationship expprop <- evalue / tot1 #now reduce eral to expprop } # end if for (v in 1:nvar) { N[v]<- sum(!is.na(issue[,v])) } var.out<- list(varname=vl,loadings=r,means=av,std.deviations=std) print(" ") outcount<- outcount+1 out[outcount]<- print(paste("Eigen Estimate ", round(evalue,2), " of possible ",round(tot1,2))) outcount<- outcount+1 out[outcount]<- print(paste(" Percent Variance Explained: ",round(100 * expprop,2))) if (pass != 2 && npass>1) { for (v in 1:nvar) { valid[v] <- 0 #reset all, regmoodissue will set good=1 if (csign[v] != 0) issue[,v]<- residmi(issue,v,mood) #regmoodissue() } #v loop } # if #begin prn output routine # mood[fb,] is now our estimate, WHAT ABOUT A SECOND DIMENSION latent<- mood[fb,] #vector holds values for output if (pass == 1) latent1<- latent #hold first dimension print(" ") out[outcount+1]<- print(paste("Final Weighted Average Metric: Mean: ",round(wtmean,2)," St. Dev: ",round(wtstd,2))) #for Zelig output if (npass==1) { extract.out<- list(formula=formula,T=nperiods,nvar=nvar,unit=unit,dimensions=npass,period=period,varname=vl,N=N,means=av,std.deviations=std,setup1=out1,loadings1=r1,latent1=latent1) } else { for (i in 6:outcount) { out[i-5]=out[i] } length(out)<- outcount-5 extract.out<- list(formula=formula,T=nperiods,nvar=nvar,unit=unit,dimensions=npass,period=period,varname=vl,N=N,means=av,std.deviations=std,setup1=out1,loadings1=r1,latent1=latent1,setup2=out,loadings2=r,latent2=latent) } } #end if auto="y" } #end of for pass=1,2 loop par(col=1) #reset on termination class(extract.out)<- "Zextract" return(extract.out) } #end of extract ## Algorithm Implementation output <- extract(varname = pe$House, date = pe$date, index = pe$Appr, ncases = pe$N, unit = "M") # monthly output2 <- extract(varname = pe$House, date = pe$date, index = pe$Appr, ncases = pe$N, unit = "A") # annually length(unique(output$period)) ## Plot latent variable monthly plot(x = output$latent1, axes = F, # monthly representation ylab = "%", xlab = "Month", type = "l", main = "Approval of Presidential economic management") axis(1, at = c(1:447), labels = output$period, cex.axis = .7) axis(2) abline(v = 1, col = "red") # Reagan text(25, 70, "Reagan", col = "red") abline(v = which(output$period == 1989.01), col = "red") # Bush text(105, 70, "Bush", col = "red") abline(v = which(output$period == 1993.01), col = "blue") # Clinton text(153, 70, "Clinton", col = "blue") abline(v = which(output$period == 2001.01), col = "red") # Bush text(247, 70, "Bush", col = "red") abline(v = which(output$period == 2009.01), col = "blue") # Obama text(355, 70, "Obama", col = "blue") abline(v = which(output$period == 2017.01), col = "red") # Trump text(439, 70, "Trump", col = "red") box() ## Plot latent variable annually length(unique(output2$period)) # 38 plot(x = output2$latent1, axes = F, # annually representation ylab = "%", xlab = "Year", type = "l", main = "Approval of Presidential economic management") axis(1, at = c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38), labels = output2$period, cex.axis = .7) axis(2) abline(v = 1, col = "red") # Reagan text(3, 70, "Reagan", col = "red") abline(v = 9, col = "red") # Bush text(11, 70, "Bush", col = "red") abline(v = 13, col = "blue") # Clinton text(15, 70, "Clinton", col = "blue") abline(v = 21, col = "red") # Bush text(23, 70, "Bush", col = "red") abline(v = 29, col = "blue") # Obama text(31, 70, "Obama", col = "blue") abline(v = 37, col = "red") # Trump text(37.5, 70, "Trump", col = "red") box() ## Produce numerical output of estimation procedure summary(output) ## Reproduce Figure 3 in Kellstedt/de Boef (2004) kellstedt <- subset(pe, subset = pe$Year <= 2001) kellstedt_lat <- extract(varname = kellstedt$House, date = kellstedt$date, index = kellstedt$Appr, ncases = kellstedt$N, unit = "M") length(unique(kellstedt_lat$period)) plot(x = kellstedt_lat$latent1, axes = F, # monthly representation ylab = "%", xlab = "Month", type = "l", main = "Approval of Presidential economic management (Kellstedt 2004)") axis(1, at = c(1:249), labels = kellstedt_lat$period, cex.axis = .7) axis(2) abline(v = 1, col = "red") # Reagan text(25, 70, "Reagan", col = "red") abline(v = which(kellstedt_lat$period == 1989.01), col = "red") # Bush text(105, 70, "Bush", col = "red") abline(v = which(kellstedt_lat$period == 1993.01), col = "blue") # Clinton text(153, 70, "Clinton", col = "blue") abline(v = which(kellstedt_lat$period == 2001.01), col = "red") # Bush text(247, 70, "Bush", col = "red") box() ## Correlations of time series with Stimson metric # compute averages for each year for each survey house # for each survey house mu_abc_y <- tapply(pe$Appr[pe$House == "ABC"], pe$Year[pe$House == "ABC"], mean) mu_abcwp_y <- tapply(pe$Appr[pe$House == "ABCWP"], pe$Year[pe$House == "ABCWP"], mean) mu_cbs_y <- tapply(pe$Appr[pe$House == "CBS"], pe$Year[pe$House == "CBS"], mean) mu_cbsnyt_y <- tapply(pe$Appr[pe$House == "CBSNYT"], pe$Year[pe$House == "CBSNYT"], mean) mu_gal_y <- tapply(pe$Appr[pe$House == "Gallup"], pe$Year[pe$House == "Gallup"], mean) mu_lat_y <- tapply(pe$Appr[pe$House == "LATIMES"], pe$Year[pe$House == "LATIMES"], mean) latent_y <- output2$latent1 names(latent_y) <- output2$period mu_total_y <- tapply(pe$Appr, pe$Year, mean) # for reference only and total correlation # now fill with NA's at time-incongruent positions mu_abc_y <- c(mu_abc_y[1], mu_abc_y[2], NA, NA, NA, NA, NA, NA, NA, NA, mu_abc_y[3], mu_abc_y[4], mu_abc_y[5], mu_abc_y[6], mu_abc_y[7], mu_abc_y[8], NA, NA, NA, NA, mu_abc_y[9], mu_abc_y[10], mu_abc_y[11], NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA) names(mu_abc_y) <- output2$period #mu_abc_y mu_abcwp_y <- c(mu_abcwp_y[1:18], NA, NA, mu_abcwp_y[19:36]) names(mu_abcwp_y) <- output2$period #mu_abcwp_y mu_cbs_y <- c(NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, mu_cbs_y[1:28]) names(mu_cbs_y) <- output2$period #mu_cbs_y mu_cbsnyt_y <- c(mu_cbsnyt_y[1:36], NA, NA) names(mu_cbsnyt_y) <- output2$period #mu_cbsnyt_y mu_gal_y <- c(NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, mu_gal_y[1:5], NA, mu_gal_y[6], NA, mu_gal_y[7:24]) names(mu_gal_y) <- output2$period #mu_gal_y mu_lat_y <- c(mu_lat_y[1], NA, mu_lat_y[2:4], NA, mu_lat_y[5], NA, NA, NA, mu_lat_y[6:11], NA, mu_lat_y[12:13], NA, mu_lat_y[14:21], NA, NA, NA, NA, NA, NA, NA, NA, NA, NA) names(mu_lat_y) <- output2$period #mu_lat_y # now combine into single matrix matr <- data.frame(latent = output2$latent1, abc = mu_abc_y, abcwp = mu_abcwp_y, cbs = mu_cbs_y, cbsnyt = mu_cbsnyt_y, gallup = mu_gal_y, latimes = mu_lat_y, total = mu_total_y) cor.matrix <- cor(matr, use = "pairwise.complete.obs") cor.matrix #stargazer(cor.matrix, type = "latex", #title = "Correlation Matrix Time Series and Metric")
source('MAPMeasure.R') Precision <- function(retrieved, relevant){ #r-precision se considera soh os |relevantes| primeiros retrieved return (sum (retrieved %in% relevant) / length(retrieved) ) } Recall <- function(retrieved, relevant){ return (sum (retrieved %in% relevant) / length(relevant) ) } MIR = read.csv("../data/MIR_Dortmund.csv") auxGenre = MIR[,6:14] #from alternative to rock genreClass = colnames(auxGenre)[apply(auxGenre, 1, which.max)] auxSinger = MIR[,c("male","female")] singerClass = colnames(auxSinger)[apply(auxSinger, 1, which.max)] auxMood = MIR[,c("happy", "sad")] moodClass = colnames(auxMood)[apply(auxMood,1,which.max)] auxInst = MIR[,c("voice","instrumental")] instrumentClass = colnames(auxInst)[apply(auxInst,1,which.max)] auxTempo = MIR[,c("aggressive", "relaxed")] tempoClass = colnames(auxTempo)[apply(auxTempo, 1, which.max)] ret = list() #test MAP tudo = cbind(singerClass, moodClass,genreClass,instrumentClass,sep='.') mat = as.matrix(read.table('Dissimilarity.dat')) MAPmeasure(classes = tudo, dissMatrix = mat) #test Genre without sigmoid cat('test Genre without sigmoid\n') mat = as.matrix(read.table("OriginalRadvizGenre/Dissimilarity.dat")) ret$OriginalRadvizGenre = list() ret$OriginalRadvizGenre$precision = rep(0,ncol(auxGenre)) ret$OriginalRadvizGenre$recall = rep(0,ncol(auxGenre)) ret$OriginalRadvizGenre$MAP = MAPmeasure(classes = genreClass, dissMatrix = mat) names(ret$OriginalRadvizGenre$precision) = colnames(auxGenre) names(ret$OriginalRadvizGenre$recall) = colnames(auxGenre) for (i in 1:ncol(auxGenre)){ genreStr = colnames(auxGenre)[i] fc <- file(paste('OriginalRadvizGenre/', genreStr, '.dat',sep = '')) order <- as.integer(unlist(strsplit(readLines(fc), " "))) close(fc) queryGenre = which(genreClass == genreStr) ret$OriginalRadvizGenre$precision[i] = Precision(order[1:length(queryGenre)], queryGenre) ret$OriginalRadvizGenre$recall[i] = Recall(order[1:length(queryGenre)], queryGenre) } #test Genre with sigmoid t = -0.5, s = 10 cat('test Genre with sigmoid t = -0.5, s = 10\n') mat = as.matrix(read.table("SigmoidRadvizGenre1/Dissimilarity.dat")) ret$SigmoidRadvizGenre1 = list() ret$SigmoidRadvizGenre1$precision = rep(0,ncol(auxGenre)) ret$SigmoidRadvizGenre1$recall = rep(0,ncol(auxGenre)) ret$SigmoidRadvizGenre1$MAP = MAPmeasure(classes = genreClass, dissMatrix = mat) names(ret$SigmoidRadvizGenre1$precision) = colnames(auxGenre) names(ret$SigmoidRadvizGenre1$recall) = colnames(auxGenre) for (i in 1:ncol(auxGenre)){ genreStr = colnames(auxGenre)[i] fc <- file(paste('SigmoidRadvizGenre1/', genreStr, '.dat',sep = '')) order <- as.integer(unlist(strsplit(readLines(fc), " "))) close(fc) queryGenre = which(genreClass == genreStr) ret$SigmoidRadvizGenre1$precision[i] = Precision(order[1:length(queryGenre)], queryGenre) ret$SigmoidRadvizGenre1$recall[i] = Recall(order[1:length(queryGenre)], queryGenre) } #test Genre with sigmoid t = -0.8, s = 10 cat('test Genre with sigmoid t = -0.8, s = 10') mat = as.matrix(read.table("SigmoidRadvizGenre2/Dissimilarity2.dat")) ret$SigmoidRadvizGenre2 = list() ret$SigmoidRadvizGenre2$precision = rep(0,ncol(auxGenre)) ret$SigmoidRadvizGenre2$recall = rep(0,ncol(auxGenre)) ret$SigmoidRadvizGenre2$MAP = MAPmeasure(classes = genreClass, dissMatrix = mat) names(ret$SigmoidRadvizGenre2$precision) = colnames(auxGenre) names(ret$SigmoidRadvizGenre2$recall) = colnames(auxGenre) for (i in 1:ncol(auxGenre)){ genreStr = colnames(auxGenre)[i] fc <- file(paste('SigmoidRadvizGenre2/', genreStr, '.dat',sep = '')) order <- as.integer(unlist(strsplit(readLines(fc), " "))) close(fc) queryGenre = which(genreClass == genreStr) ret$SigmoidRadvizGenre2$precision[i] = Precision(order[1:length(queryGenre)], queryGenre) ret$SigmoidRadvizGenre2$recall[i] = Recall(order[1:length(queryGenre)], queryGenre) } #test Genre with sigmoid t = -1, s = 100 cat('test Genre with sigmoid t = -1, s = 100') mat = as.matrix(read.table("SigmoidRadvizGenre3/Dissimilarity3.dat")) ret$SigmoidRadvizGenre3 = list() ret$SigmoidRadvizGenre3$MAP = MAPmeasure(classes = genreClass, dissMatrix = mat) #test Genre Mood (happy sad) t = 0, s = 0 cat('test Genre Mood (happy sad) t = 0, s = 0') mat = as.matrix(read.table("GenreMood/Dissimilarity0_0.dat")) GenreMoodClass = paste(genreClass,moodClass,sep='.') genremood1 = MAPmeasure(classes = GenreMoodClass, dissMatrix = mat) #test Genre Mood (happy sad) t = -1, s = 100 cat('test Genre Mood (happy sad) t = -1, s = 100') mat = as.matrix(read.table("GenreMood/Dissimilarity-1_100.dat")) GenreMoodClass = paste(genreClass,moodClass,sep='.') genremood2 = MAPmeasure(classes = GenreMoodClass, dissMatrix = mat) #test Genre Mood (happy sad) t = -.5, s = 10 cat('test Genre Mood (happy sad) t = -.5, s = 10') mat = as.matrix(read.table("GenreMood/Dissimilarity-.5_10.dat")) GenreMoodClass = paste(genreClass,moodClass,sep='.') genremood3 = MAPmeasure(classes = GenreMoodClass, dissMatrix = mat) #test Genre Mood (happy sad) t = -.5, s = 10 cat('test Genre Mood (happy sad) t = -.5, s = 10') mat = as.matrix(read.table("GenreMood/Dissimilarity-.8_10.dat")) GenreMoodClass = paste(genreClass,moodClass,sep='.') genremood4 = MAPmeasure(classes = GenreMoodClass, dissMatrix = mat) #test Genre Mood Sex cat('test Genre Mood Sex 1') GenreMoodSexClass = paste(genreClass, moodClass, singerClass, sep='.') mat = as.matrix(read.table("GenreMoodSex/Dissimilarity0_0.dat")) genremoodsex1 = MAPmeasure(classes = GenreMoodSexClass, dissMatrix = mat) cat('test Genre Mood Sex 2') GenreMoodSexClass = paste(genreClass, moodClass, singerClass, sep='.') mat = as.matrix(read.table("GenreMoodSex/Dissimilarity-.5_10.dat")) genremoodsex2 = MAPmeasure(classes = GenreMoodSexClass, dissMatrix = mat) cat('test Genre Mood Sex 3') GenreMoodSexClass = paste(genreClass, moodClass, singerClass, sep='.') mat = as.matrix(read.table("GenreMoodSex/Dissimilarity-.8_10.dat")) genremoodsex3 = MAPmeasure(classes = GenreMoodSexClass, dissMatrix = mat) cat('test Genre Mood Sex 4') GenreMoodSexClass = paste(genreClass, moodClass, singerClass, sep='.') mat = as.matrix(read.table("GenreMoodSex/Dissimilarity-1_100.dat")) genremoodsex3 = MAPmeasure(classes = GenreMoodSexClass, dissMatrix = mat) #test Pop pop = which(genreClass == 'pop') fc <- file('Indices.dat') order <- as.integer(unlist(strsplit(readLines(fc), " "))) close(fc) Precision(order[1:length(pop)], pop) #test Genre, mood, gender, instrumental/voice GenreMoodSexInstrument = paste(genreClass, moodClass, singerClass, instrumentClass, sep='.') mat = as.matrix(read.table("Dissimilarity.dat")) MAPmeasure(classes = GenreMoodSexClass, dissMatrix = mat) ############################# cat('query 1 - rock male sad') rockMaleSad = which(genreClass == 'funksoulrnb' & singerClass == 'female' & moodClass == 'happy') fc <- file('Query2/Indices-1_100.dat') order <- as.integer(unlist(strsplit(readLines(fc), " "))) close(fc) Precision(order[1:length(rockMaleSad)], rockMaleSad) ############################# query = which(tempoClass == "relaxed" & genreClass == "alternative") fc <- file('Indices.dat') order <- as.integer(unlist(strsplit(readLines(fc), " "))) close(fc) Precision(order[1:length(query)], query)
/Validation/Validation.R
no_license
jorgehpo/ConcentricRadviz
R
false
false
7,241
r
source('MAPMeasure.R') Precision <- function(retrieved, relevant){ #r-precision se considera soh os |relevantes| primeiros retrieved return (sum (retrieved %in% relevant) / length(retrieved) ) } Recall <- function(retrieved, relevant){ return (sum (retrieved %in% relevant) / length(relevant) ) } MIR = read.csv("../data/MIR_Dortmund.csv") auxGenre = MIR[,6:14] #from alternative to rock genreClass = colnames(auxGenre)[apply(auxGenre, 1, which.max)] auxSinger = MIR[,c("male","female")] singerClass = colnames(auxSinger)[apply(auxSinger, 1, which.max)] auxMood = MIR[,c("happy", "sad")] moodClass = colnames(auxMood)[apply(auxMood,1,which.max)] auxInst = MIR[,c("voice","instrumental")] instrumentClass = colnames(auxInst)[apply(auxInst,1,which.max)] auxTempo = MIR[,c("aggressive", "relaxed")] tempoClass = colnames(auxTempo)[apply(auxTempo, 1, which.max)] ret = list() #test MAP tudo = cbind(singerClass, moodClass,genreClass,instrumentClass,sep='.') mat = as.matrix(read.table('Dissimilarity.dat')) MAPmeasure(classes = tudo, dissMatrix = mat) #test Genre without sigmoid cat('test Genre without sigmoid\n') mat = as.matrix(read.table("OriginalRadvizGenre/Dissimilarity.dat")) ret$OriginalRadvizGenre = list() ret$OriginalRadvizGenre$precision = rep(0,ncol(auxGenre)) ret$OriginalRadvizGenre$recall = rep(0,ncol(auxGenre)) ret$OriginalRadvizGenre$MAP = MAPmeasure(classes = genreClass, dissMatrix = mat) names(ret$OriginalRadvizGenre$precision) = colnames(auxGenre) names(ret$OriginalRadvizGenre$recall) = colnames(auxGenre) for (i in 1:ncol(auxGenre)){ genreStr = colnames(auxGenre)[i] fc <- file(paste('OriginalRadvizGenre/', genreStr, '.dat',sep = '')) order <- as.integer(unlist(strsplit(readLines(fc), " "))) close(fc) queryGenre = which(genreClass == genreStr) ret$OriginalRadvizGenre$precision[i] = Precision(order[1:length(queryGenre)], queryGenre) ret$OriginalRadvizGenre$recall[i] = Recall(order[1:length(queryGenre)], queryGenre) } #test Genre with sigmoid t = -0.5, s = 10 cat('test Genre with sigmoid t = -0.5, s = 10\n') mat = as.matrix(read.table("SigmoidRadvizGenre1/Dissimilarity.dat")) ret$SigmoidRadvizGenre1 = list() ret$SigmoidRadvizGenre1$precision = rep(0,ncol(auxGenre)) ret$SigmoidRadvizGenre1$recall = rep(0,ncol(auxGenre)) ret$SigmoidRadvizGenre1$MAP = MAPmeasure(classes = genreClass, dissMatrix = mat) names(ret$SigmoidRadvizGenre1$precision) = colnames(auxGenre) names(ret$SigmoidRadvizGenre1$recall) = colnames(auxGenre) for (i in 1:ncol(auxGenre)){ genreStr = colnames(auxGenre)[i] fc <- file(paste('SigmoidRadvizGenre1/', genreStr, '.dat',sep = '')) order <- as.integer(unlist(strsplit(readLines(fc), " "))) close(fc) queryGenre = which(genreClass == genreStr) ret$SigmoidRadvizGenre1$precision[i] = Precision(order[1:length(queryGenre)], queryGenre) ret$SigmoidRadvizGenre1$recall[i] = Recall(order[1:length(queryGenre)], queryGenre) } #test Genre with sigmoid t = -0.8, s = 10 cat('test Genre with sigmoid t = -0.8, s = 10') mat = as.matrix(read.table("SigmoidRadvizGenre2/Dissimilarity2.dat")) ret$SigmoidRadvizGenre2 = list() ret$SigmoidRadvizGenre2$precision = rep(0,ncol(auxGenre)) ret$SigmoidRadvizGenre2$recall = rep(0,ncol(auxGenre)) ret$SigmoidRadvizGenre2$MAP = MAPmeasure(classes = genreClass, dissMatrix = mat) names(ret$SigmoidRadvizGenre2$precision) = colnames(auxGenre) names(ret$SigmoidRadvizGenre2$recall) = colnames(auxGenre) for (i in 1:ncol(auxGenre)){ genreStr = colnames(auxGenre)[i] fc <- file(paste('SigmoidRadvizGenre2/', genreStr, '.dat',sep = '')) order <- as.integer(unlist(strsplit(readLines(fc), " "))) close(fc) queryGenre = which(genreClass == genreStr) ret$SigmoidRadvizGenre2$precision[i] = Precision(order[1:length(queryGenre)], queryGenre) ret$SigmoidRadvizGenre2$recall[i] = Recall(order[1:length(queryGenre)], queryGenre) } #test Genre with sigmoid t = -1, s = 100 cat('test Genre with sigmoid t = -1, s = 100') mat = as.matrix(read.table("SigmoidRadvizGenre3/Dissimilarity3.dat")) ret$SigmoidRadvizGenre3 = list() ret$SigmoidRadvizGenre3$MAP = MAPmeasure(classes = genreClass, dissMatrix = mat) #test Genre Mood (happy sad) t = 0, s = 0 cat('test Genre Mood (happy sad) t = 0, s = 0') mat = as.matrix(read.table("GenreMood/Dissimilarity0_0.dat")) GenreMoodClass = paste(genreClass,moodClass,sep='.') genremood1 = MAPmeasure(classes = GenreMoodClass, dissMatrix = mat) #test Genre Mood (happy sad) t = -1, s = 100 cat('test Genre Mood (happy sad) t = -1, s = 100') mat = as.matrix(read.table("GenreMood/Dissimilarity-1_100.dat")) GenreMoodClass = paste(genreClass,moodClass,sep='.') genremood2 = MAPmeasure(classes = GenreMoodClass, dissMatrix = mat) #test Genre Mood (happy sad) t = -.5, s = 10 cat('test Genre Mood (happy sad) t = -.5, s = 10') mat = as.matrix(read.table("GenreMood/Dissimilarity-.5_10.dat")) GenreMoodClass = paste(genreClass,moodClass,sep='.') genremood3 = MAPmeasure(classes = GenreMoodClass, dissMatrix = mat) #test Genre Mood (happy sad) t = -.5, s = 10 cat('test Genre Mood (happy sad) t = -.5, s = 10') mat = as.matrix(read.table("GenreMood/Dissimilarity-.8_10.dat")) GenreMoodClass = paste(genreClass,moodClass,sep='.') genremood4 = MAPmeasure(classes = GenreMoodClass, dissMatrix = mat) #test Genre Mood Sex cat('test Genre Mood Sex 1') GenreMoodSexClass = paste(genreClass, moodClass, singerClass, sep='.') mat = as.matrix(read.table("GenreMoodSex/Dissimilarity0_0.dat")) genremoodsex1 = MAPmeasure(classes = GenreMoodSexClass, dissMatrix = mat) cat('test Genre Mood Sex 2') GenreMoodSexClass = paste(genreClass, moodClass, singerClass, sep='.') mat = as.matrix(read.table("GenreMoodSex/Dissimilarity-.5_10.dat")) genremoodsex2 = MAPmeasure(classes = GenreMoodSexClass, dissMatrix = mat) cat('test Genre Mood Sex 3') GenreMoodSexClass = paste(genreClass, moodClass, singerClass, sep='.') mat = as.matrix(read.table("GenreMoodSex/Dissimilarity-.8_10.dat")) genremoodsex3 = MAPmeasure(classes = GenreMoodSexClass, dissMatrix = mat) cat('test Genre Mood Sex 4') GenreMoodSexClass = paste(genreClass, moodClass, singerClass, sep='.') mat = as.matrix(read.table("GenreMoodSex/Dissimilarity-1_100.dat")) genremoodsex3 = MAPmeasure(classes = GenreMoodSexClass, dissMatrix = mat) #test Pop pop = which(genreClass == 'pop') fc <- file('Indices.dat') order <- as.integer(unlist(strsplit(readLines(fc), " "))) close(fc) Precision(order[1:length(pop)], pop) #test Genre, mood, gender, instrumental/voice GenreMoodSexInstrument = paste(genreClass, moodClass, singerClass, instrumentClass, sep='.') mat = as.matrix(read.table("Dissimilarity.dat")) MAPmeasure(classes = GenreMoodSexClass, dissMatrix = mat) ############################# cat('query 1 - rock male sad') rockMaleSad = which(genreClass == 'funksoulrnb' & singerClass == 'female' & moodClass == 'happy') fc <- file('Query2/Indices-1_100.dat') order <- as.integer(unlist(strsplit(readLines(fc), " "))) close(fc) Precision(order[1:length(rockMaleSad)], rockMaleSad) ############################# query = which(tempoClass == "relaxed" & genreClass == "alternative") fc <- file('Indices.dat') order <- as.integer(unlist(strsplit(readLines(fc), " "))) close(fc) Precision(order[1:length(query)], query)
\name{dummy.ppm} \alias{dummy.ppm} \title{Extract Dummy Points Used to Fit a Point Process Model} \description{ Given a fitted point process model, this function extracts the `dummy points' of the quadrature scheme used to fit the model. } \usage{ dummy.ppm(object, drop=FALSE) } \arguments{ \item{object}{ fitted point process model (an object of class \code{"ppm"}). } \item{drop}{ Logical value determining whether to delete dummy points that were not used to fit the model. } } \value{ A point pattern (object of class \code{"ppp"}). } \details{ An object of class \code{"ppm"} represents a point process model that has been fitted to data. It is typically produced by the model-fitting algorithm \code{\link{ppm}}. The maximum pseudolikelihood algorithm in \code{\link{ppm}} approximates the pseudolikelihood integral by a sum over a finite set of quadrature points, which is constructed by augmenting the original data point pattern by a set of ``dummy'' points. The fitted model object returned by \code{\link{ppm}} contains complete information about this quadrature scheme. See \code{\link{ppm}} or \code{\link{ppm.object}} for further information. This function \code{dummy.ppm} extracts the dummy points of the quadrature scheme. A typical use of this function would be to count the number of dummy points, to gauge the accuracy of the approximation to the exact pseudolikelihood. It may happen that some dummy points are not actually used in fitting the model (typically because the value of a covariate is \code{NA} at these points). The argument \code{drop} specifies whether these unused dummy points shall be deleted (\code{drop=TRUE}) or retained (\code{drop=FALSE}) in the return value. See \code{\link{ppm.object}} for a list of all operations that can be performed on objects of class \code{"ppm"}. } \seealso{ \code{\link{ppm.object}}, \code{\link{ppp.object}}, \code{\link{ppm}} } \examples{ data(cells) fit <- ppm(cells, ~1, Strauss(r=0.1)) X <- dummy.ppm(fit) npoints(X) # this is the number of dummy points in the quadrature scheme } \author{\adrian and \rolf } \keyword{spatial} \keyword{utilities} \keyword{models}
/man/dummy.ppm.Rd
no_license
spatstat/spatstat.core
R
false
false
2,251
rd
\name{dummy.ppm} \alias{dummy.ppm} \title{Extract Dummy Points Used to Fit a Point Process Model} \description{ Given a fitted point process model, this function extracts the `dummy points' of the quadrature scheme used to fit the model. } \usage{ dummy.ppm(object, drop=FALSE) } \arguments{ \item{object}{ fitted point process model (an object of class \code{"ppm"}). } \item{drop}{ Logical value determining whether to delete dummy points that were not used to fit the model. } } \value{ A point pattern (object of class \code{"ppp"}). } \details{ An object of class \code{"ppm"} represents a point process model that has been fitted to data. It is typically produced by the model-fitting algorithm \code{\link{ppm}}. The maximum pseudolikelihood algorithm in \code{\link{ppm}} approximates the pseudolikelihood integral by a sum over a finite set of quadrature points, which is constructed by augmenting the original data point pattern by a set of ``dummy'' points. The fitted model object returned by \code{\link{ppm}} contains complete information about this quadrature scheme. See \code{\link{ppm}} or \code{\link{ppm.object}} for further information. This function \code{dummy.ppm} extracts the dummy points of the quadrature scheme. A typical use of this function would be to count the number of dummy points, to gauge the accuracy of the approximation to the exact pseudolikelihood. It may happen that some dummy points are not actually used in fitting the model (typically because the value of a covariate is \code{NA} at these points). The argument \code{drop} specifies whether these unused dummy points shall be deleted (\code{drop=TRUE}) or retained (\code{drop=FALSE}) in the return value. See \code{\link{ppm.object}} for a list of all operations that can be performed on objects of class \code{"ppm"}. } \seealso{ \code{\link{ppm.object}}, \code{\link{ppp.object}}, \code{\link{ppm}} } \examples{ data(cells) fit <- ppm(cells, ~1, Strauss(r=0.1)) X <- dummy.ppm(fit) npoints(X) # this is the number of dummy points in the quadrature scheme } \author{\adrian and \rolf } \keyword{spatial} \keyword{utilities} \keyword{models}
names(d) names(regs) regs %<>% rename(rin = RIN) %>% left_join(d) regtracker <- read_csv("data/Deregulation-tracker-data-Final.csv") regtracker %<>% mutate(frNumber = str_extract(`Proposed-link`, "20[0-9][0-9]-[0-9]{5}")) regtracker$frNumber regtracker %<>% distinct() %>% filter(!is.na(frNumber)) %>% left_join(regs) %>% select(rin, PUBLIC_COMMENT_URL, frNumber, title, numberOfCommentsReceived) %>% distinct()
/code/BrookingsRegTracker.R
no_license
zoeang/rulemaking
R
false
false
445
r
names(d) names(regs) regs %<>% rename(rin = RIN) %>% left_join(d) regtracker <- read_csv("data/Deregulation-tracker-data-Final.csv") regtracker %<>% mutate(frNumber = str_extract(`Proposed-link`, "20[0-9][0-9]-[0-9]{5}")) regtracker$frNumber regtracker %<>% distinct() %>% filter(!is.na(frNumber)) %>% left_join(regs) %>% select(rin, PUBLIC_COMMENT_URL, frNumber, title, numberOfCommentsReceived) %>% distinct()
## Caching the Inverse of a Matrix: ## Matrix inversion is usually a costly computation and there may be some ## benefit to caching the inverse of a matrix rather than compute it repeatedly. ## Below are a pair of functions that are used to create a special object that ## stores a matrix and caches its inverse. ## The following two functions are used to cache the inverse of a matrix. ## This function creates a special "matrix" object that can cache its inverse. makeCacheMatrix <- function(x = matrix()) { inv <- NULL set <- function(y) { x <<- y inv <<- NULL } get <- function() x setInverse <- function(inverse) inv <<- inverse getInverse <- function() inv list(set = set, get = get, setInverse = setInverse, getInverse = getInverse) } ##This function computes the inverse of the special "matrix" returned by makeCacheMatrix above cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' inv <- x$getInverse() if (!is.null(inv)) { message("getting cached data") return(inv) } mat <- x$get() inv <- solve(mat, ...) x$setInverse(inv) inv }
/cachematrix.R
no_license
pahal2007/Coursera-R-Programming-Exercises-Solved
R
false
false
1,140
r
## Caching the Inverse of a Matrix: ## Matrix inversion is usually a costly computation and there may be some ## benefit to caching the inverse of a matrix rather than compute it repeatedly. ## Below are a pair of functions that are used to create a special object that ## stores a matrix and caches its inverse. ## The following two functions are used to cache the inverse of a matrix. ## This function creates a special "matrix" object that can cache its inverse. makeCacheMatrix <- function(x = matrix()) { inv <- NULL set <- function(y) { x <<- y inv <<- NULL } get <- function() x setInverse <- function(inverse) inv <<- inverse getInverse <- function() inv list(set = set, get = get, setInverse = setInverse, getInverse = getInverse) } ##This function computes the inverse of the special "matrix" returned by makeCacheMatrix above cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' inv <- x$getInverse() if (!is.null(inv)) { message("getting cached data") return(inv) } mat <- x$get() inv <- solve(mat, ...) x$setInverse(inv) inv }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/networks.R \name{rewire_connections_to_node.network} \alias{rewire_connections_to_node.network} \title{Rewire connections to a node} \usage{ \method{rewire_connections_to_node}{network}(x, node, prob_rewire = 1, weights = NULL, alpha = 100, beta = 1, epsilon = 10^-5, run_checks = TRUE, ...) } \arguments{ \item{x}{The 'network', 'network_module', or 'matrix' object to modify.} \item{node}{The node to rewire.} \item{prob_rewire}{A value between 0 and 1, inclusive. Each connection to \code{node} will be rewired with probability equal to \code{prob_rewire}. Note, the degree of \code{node} is unchanged after this operation.} \item{weights}{(Optional) A vector of weights for each node. These are used in addition to the degree of each node when sampling nodes to rewire.} \item{alpha}{A positive value used to parameterize the Beta distribution.} \item{beta}{A positive value used to parameterize the Beta distribution.} \item{epsilon}{A small constant added to the sampling probability of each node.} \item{run_checks}{If \code{TRUE} and 'x' is a matrix, then it is checked that 'x' is an adjacency matrix. This catches the case where 'x' is a weighted matrix, in which case the weights are removed and a warning is given.} \item{...}{Additional arguments.} } \value{ The modified object. } \description{ Rewire connections to a node } \examples{ # Create a random network with 10 nodes. nw <- random_network(10) # Rewire connections to the first node. nw_rewired <- rewire_connections_to_node(nw, 1) # Plot the two networks for comparison g <- plot(nw) plot(nw_rewired, g) # Pass in g to mirror the layout. # Or plot the differential network. plot_network_diff(nw, nw_rewired, g) }
/SeqNet/man/rewire_connections_to_node.network.Rd
no_license
akhikolla/TestedPackages-NoIssues
R
false
true
1,783
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/networks.R \name{rewire_connections_to_node.network} \alias{rewire_connections_to_node.network} \title{Rewire connections to a node} \usage{ \method{rewire_connections_to_node}{network}(x, node, prob_rewire = 1, weights = NULL, alpha = 100, beta = 1, epsilon = 10^-5, run_checks = TRUE, ...) } \arguments{ \item{x}{The 'network', 'network_module', or 'matrix' object to modify.} \item{node}{The node to rewire.} \item{prob_rewire}{A value between 0 and 1, inclusive. Each connection to \code{node} will be rewired with probability equal to \code{prob_rewire}. Note, the degree of \code{node} is unchanged after this operation.} \item{weights}{(Optional) A vector of weights for each node. These are used in addition to the degree of each node when sampling nodes to rewire.} \item{alpha}{A positive value used to parameterize the Beta distribution.} \item{beta}{A positive value used to parameterize the Beta distribution.} \item{epsilon}{A small constant added to the sampling probability of each node.} \item{run_checks}{If \code{TRUE} and 'x' is a matrix, then it is checked that 'x' is an adjacency matrix. This catches the case where 'x' is a weighted matrix, in which case the weights are removed and a warning is given.} \item{...}{Additional arguments.} } \value{ The modified object. } \description{ Rewire connections to a node } \examples{ # Create a random network with 10 nodes. nw <- random_network(10) # Rewire connections to the first node. nw_rewired <- rewire_connections_to_node(nw, 1) # Plot the two networks for comparison g <- plot(nw) plot(nw_rewired, g) # Pass in g to mirror the layout. # Or plot the differential network. plot_network_diff(nw, nw_rewired, g) }
mcp_d <- function(x,lambda,a=3){ ## ----------------------------------------------------------------------------------------------------------------- ## The name of the function: mcp_d ## ----------------------------------------------------------------------------------------------------------------- ## Description: ## Calculating the derivative of the MCP ## ----------------------------------------------------------------------------------------------------------------- ## Required preceding functions or packages: No ## ----------------------------------------------------------------------------------------------------------------- ## Input: ## @ x: a float value or a vector, the independent variable in the MCP. ## @ lambda: a float value, the tuning parameter in the MCP. ## @ a: a float value, regularization parameter in the MCP, the default setting is 3. ## ----------------------------------------------------------------------------------------------------------------- ## Output: ## @ rho: the derivative of the MCP. ## ----------------------------------------------------------------------------------------------------------------- if(lambda!=0){ rho <- lambda*( 1 > abs(x)/( lambda*a ) )*( 1 - abs(x)/( lambda*a )) } else{ rho=0 } return(rho) }
/R/mcp_d.R
no_license
Ren-Mingyang/HeteroGGM
R
false
false
1,353
r
mcp_d <- function(x,lambda,a=3){ ## ----------------------------------------------------------------------------------------------------------------- ## The name of the function: mcp_d ## ----------------------------------------------------------------------------------------------------------------- ## Description: ## Calculating the derivative of the MCP ## ----------------------------------------------------------------------------------------------------------------- ## Required preceding functions or packages: No ## ----------------------------------------------------------------------------------------------------------------- ## Input: ## @ x: a float value or a vector, the independent variable in the MCP. ## @ lambda: a float value, the tuning parameter in the MCP. ## @ a: a float value, regularization parameter in the MCP, the default setting is 3. ## ----------------------------------------------------------------------------------------------------------------- ## Output: ## @ rho: the derivative of the MCP. ## ----------------------------------------------------------------------------------------------------------------- if(lambda!=0){ rho <- lambda*( 1 > abs(x)/( lambda*a ) )*( 1 - abs(x)/( lambda*a )) } else{ rho=0 } return(rho) }