content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
#' merge expression and calls together #' #' @param exp a \code{matrix}-like object of expression levels #' @param calls a \code{matrix}-like object of detection calls (P/M/A) #' @return none. #' @author Mark Cowley #' @export affy.pivot.table <- function(exp, calls) { # if( !is.matrix.like(exp) ) exp <- exprs(exp) # if( !is.matrix.like(calls) ) calls <- exprs(calls) exp <- round(exp,4) res <- interleave.columns(exp, calls) colnames(res) <- sub(".CEL", "", colnames(res)) tmp2 <- rep(c("_Signal", "_Detection"), ncol(exp)) colnames(res) <- paste(colnames(res), tmp2, sep="") invisible(res) } # CHANGELOG: # 2013-01-16: # - dropped calls to exprs # - dropped file argument
/R/affy.pivot.table.R
no_license
drmjc/mjcaffy
R
false
false
704
r
#' merge expression and calls together #' #' @param exp a \code{matrix}-like object of expression levels #' @param calls a \code{matrix}-like object of detection calls (P/M/A) #' @return none. #' @author Mark Cowley #' @export affy.pivot.table <- function(exp, calls) { # if( !is.matrix.like(exp) ) exp <- exprs(exp) # if( !is.matrix.like(calls) ) calls <- exprs(calls) exp <- round(exp,4) res <- interleave.columns(exp, calls) colnames(res) <- sub(".CEL", "", colnames(res)) tmp2 <- rep(c("_Signal", "_Detection"), ncol(exp)) colnames(res) <- paste(colnames(res), tmp2, sep="") invisible(res) } # CHANGELOG: # 2013-01-16: # - dropped calls to exprs # - dropped file argument
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/youtube_objects.R \name{CaptionListResponse} \alias{CaptionListResponse} \title{CaptionListResponse Object} \usage{ CaptionListResponse(etag = NULL, eventId = NULL, items = NULL, visitorId = NULL) } \arguments{ \item{etag}{Etag of this resource} \item{eventId}{Serialized EventId of the request which produced this response} \item{items}{A list of captions that match the request criteria} \item{visitorId}{The visitorId identifies the visitor} } \value{ CaptionListResponse object } \description{ CaptionListResponse Object } \details{ Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}} No description }
/googleyoutubev3.auto/man/CaptionListResponse.Rd
permissive
GVersteeg/autoGoogleAPI
R
false
true
705
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/youtube_objects.R \name{CaptionListResponse} \alias{CaptionListResponse} \title{CaptionListResponse Object} \usage{ CaptionListResponse(etag = NULL, eventId = NULL, items = NULL, visitorId = NULL) } \arguments{ \item{etag}{Etag of this resource} \item{eventId}{Serialized EventId of the request which produced this response} \item{items}{A list of captions that match the request criteria} \item{visitorId}{The visitorId identifies the visitor} } \value{ CaptionListResponse object } \description{ CaptionListResponse Object } \details{ Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}} No description }
load("Beatles Song Album Year.Rdata") library(tidytext) library(dplyr) library(stringr) data <- Lyric.album.year.keep[order(Lyric.album.year.keep$year),] %>% select(album, year) %>% unique() Lyric.album.year.keep %>% subset(year==1963) unnest_tokens(input = songlyric, output = words, token = "words") %>% unique() data$album num_songs <- count(Lyric.album.year.keep, album = album) %>% arrange(sapply(album, function(y) which(y == data$album))) Lyric.album.year.keep %>% mutate(num_songs) data <- data %>% mutate(num_songs) nrow(filter(Lyric.album.year.keep, album == data$album[1]) %>% unnest_tokens(output = words, input = songlyric, token = 'words') %>% unique()) wordcount = Lyric.album.year.keep %>% unnest_tokens(output = word, input = songlyric) %>% #anti_join(stop_words) %>% group_by(track_title) %>% count(word,sort=TRUE)%>% ungroup() filter(Lyric.album.year.keep, album == data$album[1]) %>% unnest_tokens(output = words, input = songlyric, token = 'words') %>% count(words) unique_words <- sapply(data$album, function(x) nrow(filter(Lyric.album.year.keep, album == x) %>% unnest_tokens(output = words, input = songlyric, token = 'words') %>% count(words) )) data <- data %>% mutate(unique_words) data <- data %>% mutate(words_per_song = data$unique_words/data$n) gtools::permutations(12,12) data %>% select(album, words_per_song) filter(Lyric.album.year.keep, track_title == 'And I Love Her') %>% unnest_tokens(output = words, input = songlyric, token = 'words') %>% count(words) temp_song <- (filter(Lyric.album.year.keep, track_title == 'And I Love Her') %>% unnest_tokens(output = words, input = songlyric, token = 'words')) filter(Lyric.album.year.keep, track_title == 'A Taste of Honey')$songlyric[str_detect(filter(Lyric.album.year.keep, track_title == 'Twist and Shout')$songlyric, pattern = '\\bit\\b')] library(genius) genius_lyrics("The Beatles", "A Taste of Honey'") test_data <- Lyric.album.year.keep %>% mutate(words = sapply(Lyric.album.year.keep$track_title, function(x) nrow(filter(Lyric.album.year.keep, track_title == x) %>% unnest_tokens(output = words, input = songlyric, token = 'words') %>% count(words) ))) library(rcompanion) factored_album <- factor(data$album) pairwisePermutationTest(words ~ album, data = filter(test_data, album %in% c("Please Please Me", "Let It Be"))) filter(test_data, album == 'Please Please Me') %>% select(words) %>% sum() / nrow(filter(test_data, album == 'Please Please Me') %>% select(words)) library(broom) test_statistic <- function(data) { return (tidy(anova(lm(words ~ album, data = data)))$statistic[1]) } tidy(anova(lm(words ~ album, data = test_data)))$statistic[1] filtered_data <- filter(test_data, album %in% c("Please Please Me", "Let It Be")) TestStatFromData <- TestStat(filtered_data) NPermute = 10000 Samples <- numeric(NPermute) for(perms in 1:NPermute){ Index = sample(1:nrow(filtered_data),size = nrow(filtered_data), replace = FALSE) filtered_data[Index,] Samples[perms] = filtered_data %>% mutate(album = album[Index]) %>% TestStat() if(perms %% 1000 == 0) print(perms) } (Pvalue = sum(Samples>TestStatFromData)/NPermute) hist(Samples,100);abline(v=TestStatFromData ,lwd=3,col="red") ttt <- t.test(MASS::Boston$medv) ci <- ttt$conf.int qnorm(.975) qt(.975, length(MASS::Boston$medv))
/Assignment 2/chi zhang.R
no_license
chizhangzxc/STAT5702
R
false
false
3,333
r
load("Beatles Song Album Year.Rdata") library(tidytext) library(dplyr) library(stringr) data <- Lyric.album.year.keep[order(Lyric.album.year.keep$year),] %>% select(album, year) %>% unique() Lyric.album.year.keep %>% subset(year==1963) unnest_tokens(input = songlyric, output = words, token = "words") %>% unique() data$album num_songs <- count(Lyric.album.year.keep, album = album) %>% arrange(sapply(album, function(y) which(y == data$album))) Lyric.album.year.keep %>% mutate(num_songs) data <- data %>% mutate(num_songs) nrow(filter(Lyric.album.year.keep, album == data$album[1]) %>% unnest_tokens(output = words, input = songlyric, token = 'words') %>% unique()) wordcount = Lyric.album.year.keep %>% unnest_tokens(output = word, input = songlyric) %>% #anti_join(stop_words) %>% group_by(track_title) %>% count(word,sort=TRUE)%>% ungroup() filter(Lyric.album.year.keep, album == data$album[1]) %>% unnest_tokens(output = words, input = songlyric, token = 'words') %>% count(words) unique_words <- sapply(data$album, function(x) nrow(filter(Lyric.album.year.keep, album == x) %>% unnest_tokens(output = words, input = songlyric, token = 'words') %>% count(words) )) data <- data %>% mutate(unique_words) data <- data %>% mutate(words_per_song = data$unique_words/data$n) gtools::permutations(12,12) data %>% select(album, words_per_song) filter(Lyric.album.year.keep, track_title == 'And I Love Her') %>% unnest_tokens(output = words, input = songlyric, token = 'words') %>% count(words) temp_song <- (filter(Lyric.album.year.keep, track_title == 'And I Love Her') %>% unnest_tokens(output = words, input = songlyric, token = 'words')) filter(Lyric.album.year.keep, track_title == 'A Taste of Honey')$songlyric[str_detect(filter(Lyric.album.year.keep, track_title == 'Twist and Shout')$songlyric, pattern = '\\bit\\b')] library(genius) genius_lyrics("The Beatles", "A Taste of Honey'") test_data <- Lyric.album.year.keep %>% mutate(words = sapply(Lyric.album.year.keep$track_title, function(x) nrow(filter(Lyric.album.year.keep, track_title == x) %>% unnest_tokens(output = words, input = songlyric, token = 'words') %>% count(words) ))) library(rcompanion) factored_album <- factor(data$album) pairwisePermutationTest(words ~ album, data = filter(test_data, album %in% c("Please Please Me", "Let It Be"))) filter(test_data, album == 'Please Please Me') %>% select(words) %>% sum() / nrow(filter(test_data, album == 'Please Please Me') %>% select(words)) library(broom) test_statistic <- function(data) { return (tidy(anova(lm(words ~ album, data = data)))$statistic[1]) } tidy(anova(lm(words ~ album, data = test_data)))$statistic[1] filtered_data <- filter(test_data, album %in% c("Please Please Me", "Let It Be")) TestStatFromData <- TestStat(filtered_data) NPermute = 10000 Samples <- numeric(NPermute) for(perms in 1:NPermute){ Index = sample(1:nrow(filtered_data),size = nrow(filtered_data), replace = FALSE) filtered_data[Index,] Samples[perms] = filtered_data %>% mutate(album = album[Index]) %>% TestStat() if(perms %% 1000 == 0) print(perms) } (Pvalue = sum(Samples>TestStatFromData)/NPermute) hist(Samples,100);abline(v=TestStatFromData ,lwd=3,col="red") ttt <- t.test(MASS::Boston$medv) ci <- ttt$conf.int qnorm(.975) qt(.975, length(MASS::Boston$medv))
\name{compileMultiRes} \alias{compileMultiRes} \title{Compiles results from multiple runs and produces graph for choosing Post Refinement Threshold} \description{This function takes a list of objects returned by CNORwrapfuzzy (run using identical parameters, models, and data) and packages them together so they can be compared with plotMeanFuzzyFit and writeFuzzyNetwork. Because almost all training of cFL models are underdetermined problems, analyzing multiple runs together is essential.} \usage{ compileMultiRes(allRes, tag=NULL, show=TRUE) } \arguments{ \item{allRes}{list of objects returned by the CNORwrapFuzzy function. } \item{tag}{If provided, save the results in 3 files. Each file starts with the string "filename" that is provided. (<tag>_allRes.RData, <tag>_allFinalMSEs.RData and <tag>_allFinalNumParams.RData)} \item{show}{plot the MSE and mean number of parameters versus threshold. Can be switch off if \code{show=FALSE}} } \author{M.K. Morris, T. Cokelaer} \examples{ data(ToyModel, package="CellNOptR") data(CNOlistToy,package="CellNOptR") paramsList = defaultParametersFuzzy(CNOlistToy, ToyModel) N = 10 allRes = list() \dontrun{ for (i in 1:N){ Res = CNORwrapFuzzy(CNOlistToy, ToyModel, paramsList) allRes[[i]] = Res } summary = compileMultiRes(allRes) summary$allFinalMSEs summary$allFinalNumParams # You can save the resuls in files using the tag argument compileMultiRes(allRes, "output") } }
/man/compileMultiRes.Rd
no_license
saezlab/CNORfuzzy
R
false
false
1,555
rd
\name{compileMultiRes} \alias{compileMultiRes} \title{Compiles results from multiple runs and produces graph for choosing Post Refinement Threshold} \description{This function takes a list of objects returned by CNORwrapfuzzy (run using identical parameters, models, and data) and packages them together so they can be compared with plotMeanFuzzyFit and writeFuzzyNetwork. Because almost all training of cFL models are underdetermined problems, analyzing multiple runs together is essential.} \usage{ compileMultiRes(allRes, tag=NULL, show=TRUE) } \arguments{ \item{allRes}{list of objects returned by the CNORwrapFuzzy function. } \item{tag}{If provided, save the results in 3 files. Each file starts with the string "filename" that is provided. (<tag>_allRes.RData, <tag>_allFinalMSEs.RData and <tag>_allFinalNumParams.RData)} \item{show}{plot the MSE and mean number of parameters versus threshold. Can be switch off if \code{show=FALSE}} } \author{M.K. Morris, T. Cokelaer} \examples{ data(ToyModel, package="CellNOptR") data(CNOlistToy,package="CellNOptR") paramsList = defaultParametersFuzzy(CNOlistToy, ToyModel) N = 10 allRes = list() \dontrun{ for (i in 1:N){ Res = CNORwrapFuzzy(CNOlistToy, ToyModel, paramsList) allRes[[i]] = Res } summary = compileMultiRes(allRes) summary$allFinalMSEs summary$allFinalNumParams # You can save the resuls in files using the tag argument compileMultiRes(allRes, "output") } }
library(ggplot2) library(ggmap) library(maps) library(animation) library(plotly) library(dplyr) airdata1612 = read.csv("On_Time_On_Time_Performance_2016_12.csv") airports = read.csv("airport.csv") attach(airdata1612) #Subset the data with top airports in terms of frequencys freq_fight = aggregate(airdata1612$ArrDelay,list(airdata1612$Origin),length) freq_fight = freq_fight[order(-freq_fight$x),] rownames(freq_fight) = 1:nrow(freq_fight) topfreq_fight = head(freq_fight,100) airdata1612sub = airdata1612[airdata1612$Origin %in% topfreq_fight$Group.1,] attach(airdata1612sub) #Top and low average delay date meandelay = aggregate(airdata1612$ArrDelay,list(airdata1612$FlightDate),mean,na.rm = T) meandelayminute = aggregate(airdata1612$ArrDelayMinutes,list(airdata1612$FlightDate),mean,na.rm = T) meandelay = meandelay[order(-meandelay$x),] # Frequency of Flights by Departure Time flightbytime = aggregate(airdata1612$DepTimeBlk,list(airdata1612$DepTimeBlk),length) ggplot(flightbytime,aes(flightbytime$Group.1,flightbytime$x)) + theme(axis.text.x = element_text(angle=90)) + geom_bar(stat="identity") + ggtitle("Frequency of Flights by Departure Time") delaybytime = aggregate(airdata1612$ArrDel15,list(airdata1612$DepTimeBlk),mean,na.rm = T) ggplot(delaybytime,aes(delaybytime$Group.1,delaybytime$x,group = 1)) + theme(axis.text.x = element_text(angle=90)) + geom_line() + geom_point() + ggtitle("Delay Rate of Flights by Departure Time") #Locating the congested airports on DEC-17. airdatadec17 = subset(airdata1612,airdata1612$DayofMonth == "15") attach(airdatadec17) delaytime_airport = aggregate(airdatadec17$ArrDelayMinutes,list(airdatadec17$Origin),mean,na.rm = T) delaytime_airport$congested = sapply(1:nrow(delaytime_airport),function(i){ ifelse(delaytime_airport$x[i] >= 30,1,0) }) #Read the airport location airportloc = read.csv("https://raw.githubusercontent.com/jpatokal/openflights/master/data/airports.dat", header = F) airportloc = airportloc[,c(5,7,8)] delaytime_airport = merge(delaytime_airport,airportloc,by.x = c("Group.1"),by.y = c("V5")) #Draw the airports on US map ggplot() + geom_polygon( data=map_data("usa"), aes(x=long, y=lat, group = group),colour="black",fill = "white") + geom_point(data = delaytime_airport,aes(x = V8,y = V7,colour = factor(congested))) + coord_cartesian(xlim = c(-125,-70),ylim=c(20,50)) #Draw the animation plot based departure time block animatedplot1 = saveGIF({for(i in 1:19){ datasub = subset(airdatadec17,airdatadec17$DepTimeBlk == levels(DepTimeBlk)[i]) delaytime = aggregate(datasub$ArrDelayMinutes,list(datasub$Origin),mean,na.rm = T) delaytime$congested = sapply(1:nrow(delaytime),function(i){ ifelse(delaytime_airport$x[i] >= 30,1,0) }) delaytime = merge(delaytime,airportloc,by.x = c("Group.1"),by.y = c("V5")) print(ggplot() + geom_polygon( data=map_data("usa"), aes(x=long, y=lat, group = group),colour="black",fill = "white") + geom_point(data = delaytime,aes(x = V8,y = V7,colour = factor(congested))) + coord_cartesian(xlim = c(-125,-70),ylim=c(20,50)) + ggtitle(paste("Time: ", levels(DepTimeBlk)[i],sep = ""))) } }) #Let pick a day that is not that congest. airdatadec17 = subset(airdata1612,airdata1612$DayofMonth == "3") attach(airdatadec17) delaytime_airport = aggregate(airdatadec17$ArrDelayMinutes,list(airdatadec17$Origin),mean,na.rm = T) delaytime_airport$congested = sapply(1:nrow(delaytime_airport),function(i){ ifelse(delaytime_airport$x[i] >= 30,1,0) }) delaytime_airport = merge(delaytime_airport,airportloc,by.x = c("Group.1"),by.y = c("V5")) animatedplot1 = saveGIF({for(i in 1:19){ datasub = subset(airdatadec17,airdatadec17$DepTimeBlk == levels(DepTimeBlk)[i]) delaytime = aggregate(datasub$ArrDelayMinutes,list(datasub$Origin),mean,na.rm = T) delaytime$congested = sapply(1:nrow(delaytime),function(i){ ifelse(delaytime_airport$x[i] >= 30,1,0) }) delaytime = merge(delaytime,airportloc,by.x = c("Group.1"),by.y = c("V5")) print(ggplot() + geom_polygon( data=map_data("usa"), aes(x=long, y=lat, group = group),colour="black",fill = "white") + geom_point(data = delaytime,aes(x = V8,y = V7,colour = factor(congested))) + coord_cartesian(xlim = c(-125,-70),ylim=c(20,50)) + ggtitle(paste("Time: ", levels(DepTimeBlk)[i],sep = ""))) } }) #Locating the congested airports on DEC-17. datasub = subset(airdata1612,airdata1612$DayofMonth == "17") delaytime = aggregate(datasub$ArrDelayMinutes,list(datasub$Origin,datasub$DepTimeBlk),mean,na.rm = T) delaytime$congested = sapply(1:nrow(delaytime),function(i){ ifelse(delaytime$x[i] >= 30,1,0) }) #Determine clusters if congested airports have direct routes connecting together. #Animation of the Congested Routes delay_airport = lapply(1:19,function(i){ subset(delaytime,delaytime$Group.2 == levels(delaytime$Group.2)[i]) }) routes = unique(airdatadec17[,c("Origin","Dest","DepTimeBlk")]) rownames(routes) = 1:nrow(routes) routes = lapply(1:19,function(i){ subset(routes,routes$DepTimeBlk == levels(routes$DepTimeBlk)[i]) }) air <- read.csv('https://raw.githubusercontent.com/plotly/datasets/master/2011_february_us_airport_traffic.csv') airinfo = lapply(1:19,function(i){ merge(airports,delay_airport[[i]],by.x = c("iata_code"),by.y = c("Group.1")) }) route1 = lapply(1:19,function(i){ merge(routes[[i]],airinfo[[i]],by.x = c("Origin","DepTimeBlk"),by.y = c("iata_code","Group.2")) }) route2 = lapply(1:19,function(i){ merge(route1[[i]],airinfo[[i]],by.x = c("Dest","DepTimeBlk"),by.y = c("iata_code","Group.2")) }) airinfo = airinfo %>% bind_rows airinfo = merge(airinfo,freq_fight,by.x = c("iata_code"),by.y = c("Group.1")) route2 = route2 %>% bind_rows route2$congested = sapply(1:nrow(route2),function(i){ ifelse(route2$congested.x[i] == "1" && route2$congested.y[i] == "1",1,0) }) route2$id = seq_len(nrow(route2)) plot_geo(locationmode = "USA-states") %>% add_markers( data = airinfo, x = ~longitude_deg, y = ~latitude_deg, text = ~name, frame = ~airinfo$Group.2, size = ~airinfo$x.y, hoverinfo = "text", alpha = 0.5, color = ~congested, colors = c("green","red") ) %>% layout( title = paste("Congested Routes"), geo = geo, showlegend = FALSE ) %>% add_segments( data = group_by(route2,id), frame = ~DepTimeBlk, x = ~longitude_deg.x, xend = ~longitude_deg.y, y = ~latitude_deg.x, yend = ~latitude_deg.y, alpha = 0.3, size = I(1), color = factor(route2$congested) ) %>% animation_opts(1000,easing = "elastic",redraw = T ) %>% animation_slider( currentvalue = list(prefix = "Time Block ", font = list(color="Black")) ) #Finding clusters #First subset the data. datasub = subset(airdata1612,airdata1612$DayofMonth == "17") delaytime = aggregate(datasub$ArrDelayMinutes,list(datasub$Origin,datasub$DepTimeBlk),mean,na.rm = T) delaytime$congested = sapply(1:nrow(delaytime),function(i){ ifelse(delaytime$x[i] >= 30,1,0) }) congested_airport = subset(delaytime,delaytime$congested == 1) congestedcounted = aggregate(congested_airport$congested,list(congested_airport$Group.2),length) #Let me try igraph library(igraph) delay_airport = lapply(1:19,function(i){ subset(delaytime,delaytime$Group.2 == levels(delaytime$Group.2)[i]) }) delay_airport[[1]]$Group.1 = factor(delay_airport[[1]]$Group.1) routes = unique(airdatadec17[,c("Origin","Dest","DepTimeBlk")]) rownames(routes) = 1:nrow(routes) routes = lapply(1:19,function(i){ subset(routes,routes$DepTimeBlk == levels(routes$DepTimeBlk)[i]) }) cluster = sapply(1:19,function(i){ routes[[i]]$Origin = factor(routes[[i]]$Origin) routes[[i]]$Dest = factor(routes[[i]]$Dest) airports = data.frame(name = delay_airport[[i]]$Group.1,congested = delay_airport[[i]]$congested) paths = data.frame(from = routes[[i]]$Origin, to = routes[[i]]$Dest) paths = paths[which(paths[,2] %in% airports$name),] paths = paths[which(paths[,1] %in% airports$name),] g = graph_from_data_frame(paths,vertices = airports) V(g)$color = ifelse(airports$congested == 1,"red","green") g = induced.subgraph(g,V(g)[V(g)$color %in% c("red")]) plot(g) max(clusters(g)$csize) }) ####### routes[[1]]$Origin = factor(routes[[1]]$Origin) routes[[1]]$Dest = factor(routes[[1]]$Dest) airports = data.frame(name = delay_airport[[1]]$Group.1,congested = delay_airport[[1]]$congested) paths = data.frame(from = routes[[1]]$Origin, to = routes[[1]]$Dest) paths = paths[which(paths[,2] %in% airports$name),] paths = paths[which(paths[,1] %in% airports$name),] g = graph_from_data_frame(paths,vertices = airports) V(g)$color = ifelse(airports$congested == 1,"red","green") g = induced.subgraph(g,V(g)[V(g)$color %in% c("red")]) plot(g) max(clusters(g)$csize)
/Exploration/Delay Propagation Analysis (Yu).R
no_license
jiahtan/flight
R
false
false
8,741
r
library(ggplot2) library(ggmap) library(maps) library(animation) library(plotly) library(dplyr) airdata1612 = read.csv("On_Time_On_Time_Performance_2016_12.csv") airports = read.csv("airport.csv") attach(airdata1612) #Subset the data with top airports in terms of frequencys freq_fight = aggregate(airdata1612$ArrDelay,list(airdata1612$Origin),length) freq_fight = freq_fight[order(-freq_fight$x),] rownames(freq_fight) = 1:nrow(freq_fight) topfreq_fight = head(freq_fight,100) airdata1612sub = airdata1612[airdata1612$Origin %in% topfreq_fight$Group.1,] attach(airdata1612sub) #Top and low average delay date meandelay = aggregate(airdata1612$ArrDelay,list(airdata1612$FlightDate),mean,na.rm = T) meandelayminute = aggregate(airdata1612$ArrDelayMinutes,list(airdata1612$FlightDate),mean,na.rm = T) meandelay = meandelay[order(-meandelay$x),] # Frequency of Flights by Departure Time flightbytime = aggregate(airdata1612$DepTimeBlk,list(airdata1612$DepTimeBlk),length) ggplot(flightbytime,aes(flightbytime$Group.1,flightbytime$x)) + theme(axis.text.x = element_text(angle=90)) + geom_bar(stat="identity") + ggtitle("Frequency of Flights by Departure Time") delaybytime = aggregate(airdata1612$ArrDel15,list(airdata1612$DepTimeBlk),mean,na.rm = T) ggplot(delaybytime,aes(delaybytime$Group.1,delaybytime$x,group = 1)) + theme(axis.text.x = element_text(angle=90)) + geom_line() + geom_point() + ggtitle("Delay Rate of Flights by Departure Time") #Locating the congested airports on DEC-17. airdatadec17 = subset(airdata1612,airdata1612$DayofMonth == "15") attach(airdatadec17) delaytime_airport = aggregate(airdatadec17$ArrDelayMinutes,list(airdatadec17$Origin),mean,na.rm = T) delaytime_airport$congested = sapply(1:nrow(delaytime_airport),function(i){ ifelse(delaytime_airport$x[i] >= 30,1,0) }) #Read the airport location airportloc = read.csv("https://raw.githubusercontent.com/jpatokal/openflights/master/data/airports.dat", header = F) airportloc = airportloc[,c(5,7,8)] delaytime_airport = merge(delaytime_airport,airportloc,by.x = c("Group.1"),by.y = c("V5")) #Draw the airports on US map ggplot() + geom_polygon( data=map_data("usa"), aes(x=long, y=lat, group = group),colour="black",fill = "white") + geom_point(data = delaytime_airport,aes(x = V8,y = V7,colour = factor(congested))) + coord_cartesian(xlim = c(-125,-70),ylim=c(20,50)) #Draw the animation plot based departure time block animatedplot1 = saveGIF({for(i in 1:19){ datasub = subset(airdatadec17,airdatadec17$DepTimeBlk == levels(DepTimeBlk)[i]) delaytime = aggregate(datasub$ArrDelayMinutes,list(datasub$Origin),mean,na.rm = T) delaytime$congested = sapply(1:nrow(delaytime),function(i){ ifelse(delaytime_airport$x[i] >= 30,1,0) }) delaytime = merge(delaytime,airportloc,by.x = c("Group.1"),by.y = c("V5")) print(ggplot() + geom_polygon( data=map_data("usa"), aes(x=long, y=lat, group = group),colour="black",fill = "white") + geom_point(data = delaytime,aes(x = V8,y = V7,colour = factor(congested))) + coord_cartesian(xlim = c(-125,-70),ylim=c(20,50)) + ggtitle(paste("Time: ", levels(DepTimeBlk)[i],sep = ""))) } }) #Let pick a day that is not that congest. airdatadec17 = subset(airdata1612,airdata1612$DayofMonth == "3") attach(airdatadec17) delaytime_airport = aggregate(airdatadec17$ArrDelayMinutes,list(airdatadec17$Origin),mean,na.rm = T) delaytime_airport$congested = sapply(1:nrow(delaytime_airport),function(i){ ifelse(delaytime_airport$x[i] >= 30,1,0) }) delaytime_airport = merge(delaytime_airport,airportloc,by.x = c("Group.1"),by.y = c("V5")) animatedplot1 = saveGIF({for(i in 1:19){ datasub = subset(airdatadec17,airdatadec17$DepTimeBlk == levels(DepTimeBlk)[i]) delaytime = aggregate(datasub$ArrDelayMinutes,list(datasub$Origin),mean,na.rm = T) delaytime$congested = sapply(1:nrow(delaytime),function(i){ ifelse(delaytime_airport$x[i] >= 30,1,0) }) delaytime = merge(delaytime,airportloc,by.x = c("Group.1"),by.y = c("V5")) print(ggplot() + geom_polygon( data=map_data("usa"), aes(x=long, y=lat, group = group),colour="black",fill = "white") + geom_point(data = delaytime,aes(x = V8,y = V7,colour = factor(congested))) + coord_cartesian(xlim = c(-125,-70),ylim=c(20,50)) + ggtitle(paste("Time: ", levels(DepTimeBlk)[i],sep = ""))) } }) #Locating the congested airports on DEC-17. datasub = subset(airdata1612,airdata1612$DayofMonth == "17") delaytime = aggregate(datasub$ArrDelayMinutes,list(datasub$Origin,datasub$DepTimeBlk),mean,na.rm = T) delaytime$congested = sapply(1:nrow(delaytime),function(i){ ifelse(delaytime$x[i] >= 30,1,0) }) #Determine clusters if congested airports have direct routes connecting together. #Animation of the Congested Routes delay_airport = lapply(1:19,function(i){ subset(delaytime,delaytime$Group.2 == levels(delaytime$Group.2)[i]) }) routes = unique(airdatadec17[,c("Origin","Dest","DepTimeBlk")]) rownames(routes) = 1:nrow(routes) routes = lapply(1:19,function(i){ subset(routes,routes$DepTimeBlk == levels(routes$DepTimeBlk)[i]) }) air <- read.csv('https://raw.githubusercontent.com/plotly/datasets/master/2011_february_us_airport_traffic.csv') airinfo = lapply(1:19,function(i){ merge(airports,delay_airport[[i]],by.x = c("iata_code"),by.y = c("Group.1")) }) route1 = lapply(1:19,function(i){ merge(routes[[i]],airinfo[[i]],by.x = c("Origin","DepTimeBlk"),by.y = c("iata_code","Group.2")) }) route2 = lapply(1:19,function(i){ merge(route1[[i]],airinfo[[i]],by.x = c("Dest","DepTimeBlk"),by.y = c("iata_code","Group.2")) }) airinfo = airinfo %>% bind_rows airinfo = merge(airinfo,freq_fight,by.x = c("iata_code"),by.y = c("Group.1")) route2 = route2 %>% bind_rows route2$congested = sapply(1:nrow(route2),function(i){ ifelse(route2$congested.x[i] == "1" && route2$congested.y[i] == "1",1,0) }) route2$id = seq_len(nrow(route2)) plot_geo(locationmode = "USA-states") %>% add_markers( data = airinfo, x = ~longitude_deg, y = ~latitude_deg, text = ~name, frame = ~airinfo$Group.2, size = ~airinfo$x.y, hoverinfo = "text", alpha = 0.5, color = ~congested, colors = c("green","red") ) %>% layout( title = paste("Congested Routes"), geo = geo, showlegend = FALSE ) %>% add_segments( data = group_by(route2,id), frame = ~DepTimeBlk, x = ~longitude_deg.x, xend = ~longitude_deg.y, y = ~latitude_deg.x, yend = ~latitude_deg.y, alpha = 0.3, size = I(1), color = factor(route2$congested) ) %>% animation_opts(1000,easing = "elastic",redraw = T ) %>% animation_slider( currentvalue = list(prefix = "Time Block ", font = list(color="Black")) ) #Finding clusters #First subset the data. datasub = subset(airdata1612,airdata1612$DayofMonth == "17") delaytime = aggregate(datasub$ArrDelayMinutes,list(datasub$Origin,datasub$DepTimeBlk),mean,na.rm = T) delaytime$congested = sapply(1:nrow(delaytime),function(i){ ifelse(delaytime$x[i] >= 30,1,0) }) congested_airport = subset(delaytime,delaytime$congested == 1) congestedcounted = aggregate(congested_airport$congested,list(congested_airport$Group.2),length) #Let me try igraph library(igraph) delay_airport = lapply(1:19,function(i){ subset(delaytime,delaytime$Group.2 == levels(delaytime$Group.2)[i]) }) delay_airport[[1]]$Group.1 = factor(delay_airport[[1]]$Group.1) routes = unique(airdatadec17[,c("Origin","Dest","DepTimeBlk")]) rownames(routes) = 1:nrow(routes) routes = lapply(1:19,function(i){ subset(routes,routes$DepTimeBlk == levels(routes$DepTimeBlk)[i]) }) cluster = sapply(1:19,function(i){ routes[[i]]$Origin = factor(routes[[i]]$Origin) routes[[i]]$Dest = factor(routes[[i]]$Dest) airports = data.frame(name = delay_airport[[i]]$Group.1,congested = delay_airport[[i]]$congested) paths = data.frame(from = routes[[i]]$Origin, to = routes[[i]]$Dest) paths = paths[which(paths[,2] %in% airports$name),] paths = paths[which(paths[,1] %in% airports$name),] g = graph_from_data_frame(paths,vertices = airports) V(g)$color = ifelse(airports$congested == 1,"red","green") g = induced.subgraph(g,V(g)[V(g)$color %in% c("red")]) plot(g) max(clusters(g)$csize) }) ####### routes[[1]]$Origin = factor(routes[[1]]$Origin) routes[[1]]$Dest = factor(routes[[1]]$Dest) airports = data.frame(name = delay_airport[[1]]$Group.1,congested = delay_airport[[1]]$congested) paths = data.frame(from = routes[[1]]$Origin, to = routes[[1]]$Dest) paths = paths[which(paths[,2] %in% airports$name),] paths = paths[which(paths[,1] %in% airports$name),] g = graph_from_data_frame(paths,vertices = airports) V(g)$color = ifelse(airports$congested == 1,"red","green") g = induced.subgraph(g,V(g)[V(g)$color %in% c("red")]) plot(g) max(clusters(g)$csize)
#' MST Simulation #' @description \code{mst_sim} runs a MST simulation #' @param x an assembled mst #' @param theta.true the true value of theta parameter #' @param rdp a list of routing decision points #' @importFrom stats runif #' @export mst_sim <- function(x, theta.true, rdp=NULL){ if(class(x) != "mst") stop("not a 'mst' object: ", class(x)) if(is.null(x$items)) stop("the mst has not been assembled yet") # select a panel panel.items <- mst_get_items(x, panel=sample(1:x$npanel, 1)) # initials theta.est <- 0 used.items <- responses <- NULL route <- thetas <- ses <- rep(NA, x$nstage) # rdp if(!is.null(rdp)) { if(length(rdp) != x$nstage - 1) stop("invalid routing decision points.") rdp <- lapply(rdp, function(x) data.frame(lower=c(-Inf, x), upper=c(x, Inf))) rdp <- Reduce(rbind, rdp) rdp$index <- 2:x$nmodule } for(i in 1:x$nstage){ # select next module if(i == 1) { # initial stage: randomly select an module in Stage 1 next.module <- sample(unique(x$route[,i]), 1) } else { # later stage: use info or rdp to select a module # all connected modules next.modules <- sort(unique(x$route[x$route[, i-1] == route[i-1], i])) if(is.null(rdp)) { # maximum information items <- with(panel.items, subset(panel.items, stage == i & index %in% next.modules)) info <- irt_stats(model_3pl(theta=theta.est, items=items), "info")[1,] info <- aggregate(info, list(items$index), sum) colnames(info) <- c("index", "info") next.module <- info$index[which.max(info$info)] } else { # rdp next.module <- subset(rdp, rdp$index %in% next.modules & theta.est < rdp$upper) if(nrow(next.module) != 0) { next.module <- min(next.module$index) } else { next.module <- subset(rdp, rdp$index %in% next.modules & theta.est > rdp$lower) next.module <- max(next.module$index) } } } # generate responses items <- with(panel.items, subset(panel.items, stage == i & index == next.module)) p <- irt_stats(model_3pl(theta=theta.true, items=items), "prob")[1,] u <- (p >= runif(length(p))) * 1 # append module, items and responses route[i] <- next.module used.items <- rbind(used.items, items) responses <- c(responses, u) # estimate ability theta.est <- estimate_people(responses, used.items, model="3pl", method="mle")$people[1,] thetas[i] <- theta.est # information info <- irt_stats(model_3pl(theta=theta.est, items=used.items), "info") info <- sum(info) se <- 1 / sqrt(info) ses[i] <- se } used.items <- data.frame(rsp=responses, used.items) out <- list(panel=panel.items, items=used.items, true=theta.true, est=theta.est, se=se, thetas=thetas, ses=ses, route=route) class(out) <- "mst.sim" return(out) }
/R/module5_mst_simulation.R
no_license
xzhaopsy/xxIRT
R
false
false
2,884
r
#' MST Simulation #' @description \code{mst_sim} runs a MST simulation #' @param x an assembled mst #' @param theta.true the true value of theta parameter #' @param rdp a list of routing decision points #' @importFrom stats runif #' @export mst_sim <- function(x, theta.true, rdp=NULL){ if(class(x) != "mst") stop("not a 'mst' object: ", class(x)) if(is.null(x$items)) stop("the mst has not been assembled yet") # select a panel panel.items <- mst_get_items(x, panel=sample(1:x$npanel, 1)) # initials theta.est <- 0 used.items <- responses <- NULL route <- thetas <- ses <- rep(NA, x$nstage) # rdp if(!is.null(rdp)) { if(length(rdp) != x$nstage - 1) stop("invalid routing decision points.") rdp <- lapply(rdp, function(x) data.frame(lower=c(-Inf, x), upper=c(x, Inf))) rdp <- Reduce(rbind, rdp) rdp$index <- 2:x$nmodule } for(i in 1:x$nstage){ # select next module if(i == 1) { # initial stage: randomly select an module in Stage 1 next.module <- sample(unique(x$route[,i]), 1) } else { # later stage: use info or rdp to select a module # all connected modules next.modules <- sort(unique(x$route[x$route[, i-1] == route[i-1], i])) if(is.null(rdp)) { # maximum information items <- with(panel.items, subset(panel.items, stage == i & index %in% next.modules)) info <- irt_stats(model_3pl(theta=theta.est, items=items), "info")[1,] info <- aggregate(info, list(items$index), sum) colnames(info) <- c("index", "info") next.module <- info$index[which.max(info$info)] } else { # rdp next.module <- subset(rdp, rdp$index %in% next.modules & theta.est < rdp$upper) if(nrow(next.module) != 0) { next.module <- min(next.module$index) } else { next.module <- subset(rdp, rdp$index %in% next.modules & theta.est > rdp$lower) next.module <- max(next.module$index) } } } # generate responses items <- with(panel.items, subset(panel.items, stage == i & index == next.module)) p <- irt_stats(model_3pl(theta=theta.true, items=items), "prob")[1,] u <- (p >= runif(length(p))) * 1 # append module, items and responses route[i] <- next.module used.items <- rbind(used.items, items) responses <- c(responses, u) # estimate ability theta.est <- estimate_people(responses, used.items, model="3pl", method="mle")$people[1,] thetas[i] <- theta.est # information info <- irt_stats(model_3pl(theta=theta.est, items=used.items), "info") info <- sum(info) se <- 1 / sqrt(info) ses[i] <- se } used.items <- data.frame(rsp=responses, used.items) out <- list(panel=panel.items, items=used.items, true=theta.true, est=theta.est, se=se, thetas=thetas, ses=ses, route=route) class(out) <- "mst.sim" return(out) }
#' Age-based Catch Curve #' #' @param CatA vector of catch-at-age (in numbers) #' @param M natural mortality rate (assumed constant for age-classes) #' #' @return a data.frame with total mortality (Z), fishing mortality (F), and natural mortality (M) #' @export #' CC <- function(CatA, M) { ages <- 1:length(CatA) md <- which.max(CatA) logN <- log(CatA[md:length(CatA)]) LM <- lm(logN ~ ages[md:length(ages)]) Z <- as.numeric(-coef(LM)[2]) F <- Z-M data.frame(Z=Z, F=F, M=M) }
/R/CC.r
no_license
AdrianHordyk/FModel
R
false
false
491
r
#' Age-based Catch Curve #' #' @param CatA vector of catch-at-age (in numbers) #' @param M natural mortality rate (assumed constant for age-classes) #' #' @return a data.frame with total mortality (Z), fishing mortality (F), and natural mortality (M) #' @export #' CC <- function(CatA, M) { ages <- 1:length(CatA) md <- which.max(CatA) logN <- log(CatA[md:length(CatA)]) LM <- lm(logN ~ ages[md:length(ages)]) Z <- as.numeric(-coef(LM)[2]) F <- Z-M data.frame(Z=Z, F=F, M=M) }
data = read.csv("../datasets/raw/401K.csv") summary(data) lmobj = lm(prate~mrate, data=data) summ = summary(lmobj) summ predict(lmobj, data.frame(mrate=3.5))
/econometrics/notebooks/cx-2.1.r
permissive
kongscn/notes
R
false
false
158
r
data = read.csv("../datasets/raw/401K.csv") summary(data) lmobj = lm(prate~mrate, data=data) summ = summary(lmobj) summ predict(lmobj, data.frame(mrate=3.5))
#This script describes what biomass density would be predicted from a model that only knows canopy area and, specifically, does not know species #In development of a model to estimate biomass from Structure from Motion Point Clouds #Specifically, I am trying to answer the question: do we need to know the species of individual plants, or can we account for diversity with species-mixture parameters? #Load packages: packages = c('ggplot2', 'data.table', 'dplyr', 'tools', 'plotly', 'feather') lapply(packages, library, character.only = TRUE) #allometric models: #Cercidium microphyllum (paloverde canopy area allometry-biomass not published?) #natural log #log(Y) = (a + b(log(X)))*CF #Mesquite (Prosopis velutina) biomass - canopy area relationship: mesqAllom <- function(X){ #Function takes in Canopy Area (CA) in square meters or vector of CAs and returns Total biomass (kg) of mesquite #Equation from McClaran et al. 2013 a = -.59 b = 1.60 CF = 1.06 biomass <- exp(a + b*(log(X))*CF) return(biomass) } #hackberry (Celtis pallida) hackAllom <- function(X){ #Function takes in Canopy Area (CA) in square meters or vector of CAs and returns Total biomass (kg) #From HUANG et al. 2007 #to return mass in kg, multiply by .001 biomass <- .001*exp(1.02*(6.78 + 1.41 * log(X))) return(biomass) } #Burrowweed #As scripted, this function returns funny values burrAllom <- function(X){ biomass<-.001*exp(-4.81 + 1.25 *log(X)) return(biomass) } # Prickly pear (Opuntia engelmannii) #r 1⁄4 ([center height/2] þ [longest diameter/2])/2, where center height and longest diameter are measured in METERS IN THIS IMPLEMENTATION prickAllom <- function(r){ #Convert input METERS to centimeters: r <- r * 100 biomass <-((4.189 * r^3)^0.965)/(10^5) return(biomass) } #Plot lines biomass over canopy area: x <- seq(from = 1, to = 25, length.out = 1000) y <- mesqAllom(x) yHack<- hackAllom(x) yBurr <- burrAllom(x) xPrick <- seq(from = 0 , to = 1., length.out = 1000) yPrick <- prickAllom(xPrick) plot(x,y, type = "l", col = "red") lines(x,yHack, col = "darkgreen") #lines(x,yBurr, col = "orange") lines(xPrick,yPrick, col = "grey") legend("topleft", c("Mesquite", "Hackberry", "Pricklypear"), col=c("red", "darkgreen", "grey"), title = NULL, lty = 1) ############################################################################################################################################################################################################################################################################### # SAMPLING CANOPY AREAS FOR EACH SPECIES # using stratified sampling/uniform distribution # To sample Canopy Areas from a Gaussian distribution (the assumption should be verified with data): # use a truncated Guassian, to represent that we don't find infinitely large plants or plants that have negative dimensions: #library(msm) # e.g.: rtnorm(n, 12.5, 3, lower = 0, upper = 25) #But for now, let's sample from a uniform distribution, by extracting a sequence: # in field sampling, such as in McClaren et al.'s 2013 paper, they sample a sequence to represent the possible growth forms, not ecosystem-state- # -dependent size distributions #Gaussian varation in mass from an idealized allometric state # standard deviation taken from McClaren et al. 2013, standard error or the regression of biomass over canopy area: se = 0.589227544 # Mitch McClaren's number of samples n = 31 #se = sd/sqrt(n) sd = se * sqrt(n) set.seed(1234) numPlantsEachSpecies <- 1000000 mesqCASamp <- seq(0, 60, length = numPlantsEachSpecies) hist(mesqCASamp, main = cat('Simulated Canopy Areas of', numPlantsEachSpecies, 'Individuals'), xlab = "Canopy Area (sq. meters)") #Starting with Mesquite: mesqMassSamp <- mesqAllom(mesqCASamp) + rnorm(numPlantsEachSpecies, 0, sd) hist(mesqMassSamp, main = cat('Allometrically Estimated Mesquite Biomass of', numPlantsEachSpecies, 'Individuals'), xlab = "Biomass (kg)") #And for hackberry: CASampHack <- seq(0, 30, length = numPlantsEachSpecies) massSampHack <- hackAllom(CASampHack) + rnorm(numPlantsEachSpecies, 0, sd) hist(massSampHack, main = cat('Allometrically Estimated Hackberry Biomass of', numPlantsEachSpecies, 'Individuals'), xlab = "Biomass (kg)") #And pricklypear: #Note the low upper bound of the parameter r at .6 meters rSampPrick <-seq(0, .6, length = numPlantsEachSpecies) massSampPrick <- prickAllom(rSampPrick) + rnorm(numPlantsEachSpecies, 0, sd) hist(massSampPrick, main = cat('Allometrically Estimated Prickly Pear Biomass of', numPlantsEachSpecies, 'Individuals'), xlab = "Biomass (kg)") #Visualize the relationship of Biomass to Canopy Area by Species: #plot(mesqCASamp, mesqMassSamp, xlab = "Canopy Area (sq m)", ylab = "Biomass (kg)", main = "Biomass of individaul mesquite plants over canopy area") #dev.new() #plot(x,y, type = "l", col = "red", xlab = "Canopy Area (sq m)", ylab = "Biomass (kg)", main = "Biomass of individaul plant over canopy area") #lines(x,yHack, col = "darkgreen") #lines(xPrick,yPrick, col = "grey") #points(mesqCASamp, mesqMassSamp, col = "red") #points(CASampHack, massSampHack, col = "darkgreen") #points(rSampPrick, massSampPrick, col = "grey") #legend("topleft", c("Mesquite", "Hackberry", "Pricklypear"), col=c("red", "darkgreen", "grey"), title = NULL, lty = 1) ############################################################################################################################################################################################################################################################################### # Synthesize observations: numPlants = 10000 area = 1 #Then, we assume some species mixing parameters: #PROPORTIONS ARE BY NUMBERS OF SPECIES over 1 M #Prosopis velutina: pMesquite <- 0.8271605 numMesq = numPlants * pMesquite #Celtis pallida: pHackberry <- 0.1728395 numHack = numPlants * pHackberry #Isocoma tenuisecta: (don't know if this is present at our site) #pBurroweed <- .025 #Opuntia engelmannii pPricklypear <- 0.0 numPrick = numPlants * pPricklypear #Cercidium microphyllum (don't know if this is present at our site nor do I have allometry-biomass relationship for it) #pPaloverde <- .025 # Sample canopy areas from this distribution: mesquites <- sample(mesqCASamp, numMesq) mesquites <- as.data.table(mesquites) mesquites[,mass := mesqAllom(mesquites)] colnames(mesquites) <- c("CA", "mass") mesquites[,species:="mesquite"] hackberries <- sample(CASampHack, numHack) hackberries <- as.data.table(hackberries) hackberries[,mass:= hackAllom(hackberries)] colnames(hackberries) <- c("CA", "mass") hackberries[,species:="hackberry"] pricklypears <- sample(rSampPrick, numPrick) pricklypears <- as.data.table(pricklypears) pricklypears[,mass:= prickAllom(pricklypears)] colnames(pricklypears) <- c("CA", "mass") pricklypears[,species:="pricklypear"] DT = rbindlist(list(mesquites, hackberries, pricklypears)) #Print the number of each species: count(DT, species) #plot(DT$CA, DT$mass) #title(main = "Deterministic Mass over Canopy Area") # Generate different mass estimates assuming ONE allometric equation assumeMesq<-mesqAllom(DT[,CA]) sum(assumeMesq) hist(assumeMesq) assumeHack<-hackAllom(DT[,CA]) sum(assumeHack) hist(assumeHack) assumePrick <- prickAllom(DT[,CA]) # %>% hist() #(won't sum if piped) sum(assumePrick) #write.csv(DT, "/Users/seanhendryx/DATA/ecosystemAllometry/1000Deterministic_Mass_CA.csv") # Sum biomasses #Now do the same thing, generating a large sample of CA and Mass values from the species distribution but add variance (make non-deterministic) to the mass: mesquites <- sample(mesqCASamp, numMesq) mesquites <- as.data.table(mesquites) mesquites[,mass := mesqAllom(mesquites) + rnorm(numMesq, 0, sd)] colnames(mesquites) <- c("CA", "mass") mesquites[,species:="mesquite"] hackberries <- sample(CASampHack, numHack) hackberries <- as.data.table(hackberries) hackberries[,mass:= hackAllom(hackberries) + rnorm(numHack, 0, sd)] colnames(hackberries) <- c("CA", "mass") hackberries[,species:="hackberry"] pricklypears <- sample(rSampPrick, numPrick) pricklypears <- as.data.table(pricklypears) pricklypears[,mass:= prickAllom(pricklypears) + rnorm(numPrick, 0, sd)] colnames(pricklypears) <- c("CA", "mass") pricklypears[,species:="pricklypear"] DT = rbindlist(list(mesquites, hackberries, pricklypears)) #Print the number of each species: count(DT, species) #p = ggplot(data = DT, mapping = aes(x = CA, y = mass)) + geom_point(mapping = aes(color = species), alpha = 1) + labs(x = expression(paste("Canopy Area (", {m^2}, ")")), y = "AGB (kg)") + theme_bw() write_feather(DT, "/Users/seanhendryx/DATA/ecosystemAllometry/measuredSpeciesDistribution/Generated_Mass_and_CAs.feather") #HERE # Here #Next: run cross validation to infer the polynomial order of a least squares model: f_eco() {the function of ecosystem allometry}
/biomassFromCanopyAreas.R
no_license
SMHendryx/ecosystemAllometry
R
false
false
8,868
r
#This script describes what biomass density would be predicted from a model that only knows canopy area and, specifically, does not know species #In development of a model to estimate biomass from Structure from Motion Point Clouds #Specifically, I am trying to answer the question: do we need to know the species of individual plants, or can we account for diversity with species-mixture parameters? #Load packages: packages = c('ggplot2', 'data.table', 'dplyr', 'tools', 'plotly', 'feather') lapply(packages, library, character.only = TRUE) #allometric models: #Cercidium microphyllum (paloverde canopy area allometry-biomass not published?) #natural log #log(Y) = (a + b(log(X)))*CF #Mesquite (Prosopis velutina) biomass - canopy area relationship: mesqAllom <- function(X){ #Function takes in Canopy Area (CA) in square meters or vector of CAs and returns Total biomass (kg) of mesquite #Equation from McClaran et al. 2013 a = -.59 b = 1.60 CF = 1.06 biomass <- exp(a + b*(log(X))*CF) return(biomass) } #hackberry (Celtis pallida) hackAllom <- function(X){ #Function takes in Canopy Area (CA) in square meters or vector of CAs and returns Total biomass (kg) #From HUANG et al. 2007 #to return mass in kg, multiply by .001 biomass <- .001*exp(1.02*(6.78 + 1.41 * log(X))) return(biomass) } #Burrowweed #As scripted, this function returns funny values burrAllom <- function(X){ biomass<-.001*exp(-4.81 + 1.25 *log(X)) return(biomass) } # Prickly pear (Opuntia engelmannii) #r 1⁄4 ([center height/2] þ [longest diameter/2])/2, where center height and longest diameter are measured in METERS IN THIS IMPLEMENTATION prickAllom <- function(r){ #Convert input METERS to centimeters: r <- r * 100 biomass <-((4.189 * r^3)^0.965)/(10^5) return(biomass) } #Plot lines biomass over canopy area: x <- seq(from = 1, to = 25, length.out = 1000) y <- mesqAllom(x) yHack<- hackAllom(x) yBurr <- burrAllom(x) xPrick <- seq(from = 0 , to = 1., length.out = 1000) yPrick <- prickAllom(xPrick) plot(x,y, type = "l", col = "red") lines(x,yHack, col = "darkgreen") #lines(x,yBurr, col = "orange") lines(xPrick,yPrick, col = "grey") legend("topleft", c("Mesquite", "Hackberry", "Pricklypear"), col=c("red", "darkgreen", "grey"), title = NULL, lty = 1) ############################################################################################################################################################################################################################################################################### # SAMPLING CANOPY AREAS FOR EACH SPECIES # using stratified sampling/uniform distribution # To sample Canopy Areas from a Gaussian distribution (the assumption should be verified with data): # use a truncated Guassian, to represent that we don't find infinitely large plants or plants that have negative dimensions: #library(msm) # e.g.: rtnorm(n, 12.5, 3, lower = 0, upper = 25) #But for now, let's sample from a uniform distribution, by extracting a sequence: # in field sampling, such as in McClaren et al.'s 2013 paper, they sample a sequence to represent the possible growth forms, not ecosystem-state- # -dependent size distributions #Gaussian varation in mass from an idealized allometric state # standard deviation taken from McClaren et al. 2013, standard error or the regression of biomass over canopy area: se = 0.589227544 # Mitch McClaren's number of samples n = 31 #se = sd/sqrt(n) sd = se * sqrt(n) set.seed(1234) numPlantsEachSpecies <- 1000000 mesqCASamp <- seq(0, 60, length = numPlantsEachSpecies) hist(mesqCASamp, main = cat('Simulated Canopy Areas of', numPlantsEachSpecies, 'Individuals'), xlab = "Canopy Area (sq. meters)") #Starting with Mesquite: mesqMassSamp <- mesqAllom(mesqCASamp) + rnorm(numPlantsEachSpecies, 0, sd) hist(mesqMassSamp, main = cat('Allometrically Estimated Mesquite Biomass of', numPlantsEachSpecies, 'Individuals'), xlab = "Biomass (kg)") #And for hackberry: CASampHack <- seq(0, 30, length = numPlantsEachSpecies) massSampHack <- hackAllom(CASampHack) + rnorm(numPlantsEachSpecies, 0, sd) hist(massSampHack, main = cat('Allometrically Estimated Hackberry Biomass of', numPlantsEachSpecies, 'Individuals'), xlab = "Biomass (kg)") #And pricklypear: #Note the low upper bound of the parameter r at .6 meters rSampPrick <-seq(0, .6, length = numPlantsEachSpecies) massSampPrick <- prickAllom(rSampPrick) + rnorm(numPlantsEachSpecies, 0, sd) hist(massSampPrick, main = cat('Allometrically Estimated Prickly Pear Biomass of', numPlantsEachSpecies, 'Individuals'), xlab = "Biomass (kg)") #Visualize the relationship of Biomass to Canopy Area by Species: #plot(mesqCASamp, mesqMassSamp, xlab = "Canopy Area (sq m)", ylab = "Biomass (kg)", main = "Biomass of individaul mesquite plants over canopy area") #dev.new() #plot(x,y, type = "l", col = "red", xlab = "Canopy Area (sq m)", ylab = "Biomass (kg)", main = "Biomass of individaul plant over canopy area") #lines(x,yHack, col = "darkgreen") #lines(xPrick,yPrick, col = "grey") #points(mesqCASamp, mesqMassSamp, col = "red") #points(CASampHack, massSampHack, col = "darkgreen") #points(rSampPrick, massSampPrick, col = "grey") #legend("topleft", c("Mesquite", "Hackberry", "Pricklypear"), col=c("red", "darkgreen", "grey"), title = NULL, lty = 1) ############################################################################################################################################################################################################################################################################### # Synthesize observations: numPlants = 10000 area = 1 #Then, we assume some species mixing parameters: #PROPORTIONS ARE BY NUMBERS OF SPECIES over 1 M #Prosopis velutina: pMesquite <- 0.8271605 numMesq = numPlants * pMesquite #Celtis pallida: pHackberry <- 0.1728395 numHack = numPlants * pHackberry #Isocoma tenuisecta: (don't know if this is present at our site) #pBurroweed <- .025 #Opuntia engelmannii pPricklypear <- 0.0 numPrick = numPlants * pPricklypear #Cercidium microphyllum (don't know if this is present at our site nor do I have allometry-biomass relationship for it) #pPaloverde <- .025 # Sample canopy areas from this distribution: mesquites <- sample(mesqCASamp, numMesq) mesquites <- as.data.table(mesquites) mesquites[,mass := mesqAllom(mesquites)] colnames(mesquites) <- c("CA", "mass") mesquites[,species:="mesquite"] hackberries <- sample(CASampHack, numHack) hackberries <- as.data.table(hackberries) hackberries[,mass:= hackAllom(hackberries)] colnames(hackberries) <- c("CA", "mass") hackberries[,species:="hackberry"] pricklypears <- sample(rSampPrick, numPrick) pricklypears <- as.data.table(pricklypears) pricklypears[,mass:= prickAllom(pricklypears)] colnames(pricklypears) <- c("CA", "mass") pricklypears[,species:="pricklypear"] DT = rbindlist(list(mesquites, hackberries, pricklypears)) #Print the number of each species: count(DT, species) #plot(DT$CA, DT$mass) #title(main = "Deterministic Mass over Canopy Area") # Generate different mass estimates assuming ONE allometric equation assumeMesq<-mesqAllom(DT[,CA]) sum(assumeMesq) hist(assumeMesq) assumeHack<-hackAllom(DT[,CA]) sum(assumeHack) hist(assumeHack) assumePrick <- prickAllom(DT[,CA]) # %>% hist() #(won't sum if piped) sum(assumePrick) #write.csv(DT, "/Users/seanhendryx/DATA/ecosystemAllometry/1000Deterministic_Mass_CA.csv") # Sum biomasses #Now do the same thing, generating a large sample of CA and Mass values from the species distribution but add variance (make non-deterministic) to the mass: mesquites <- sample(mesqCASamp, numMesq) mesquites <- as.data.table(mesquites) mesquites[,mass := mesqAllom(mesquites) + rnorm(numMesq, 0, sd)] colnames(mesquites) <- c("CA", "mass") mesquites[,species:="mesquite"] hackberries <- sample(CASampHack, numHack) hackberries <- as.data.table(hackberries) hackberries[,mass:= hackAllom(hackberries) + rnorm(numHack, 0, sd)] colnames(hackberries) <- c("CA", "mass") hackberries[,species:="hackberry"] pricklypears <- sample(rSampPrick, numPrick) pricklypears <- as.data.table(pricklypears) pricklypears[,mass:= prickAllom(pricklypears) + rnorm(numPrick, 0, sd)] colnames(pricklypears) <- c("CA", "mass") pricklypears[,species:="pricklypear"] DT = rbindlist(list(mesquites, hackberries, pricklypears)) #Print the number of each species: count(DT, species) #p = ggplot(data = DT, mapping = aes(x = CA, y = mass)) + geom_point(mapping = aes(color = species), alpha = 1) + labs(x = expression(paste("Canopy Area (", {m^2}, ")")), y = "AGB (kg)") + theme_bw() write_feather(DT, "/Users/seanhendryx/DATA/ecosystemAllometry/measuredSpeciesDistribution/Generated_Mass_and_CAs.feather") #HERE # Here #Next: run cross validation to infer the polynomial order of a least squares model: f_eco() {the function of ecosystem allometry}
#' Plots segments on underlying point data #' #' @param segs segment data #' @param aes ggplot mapping (required: y) #' @param fml formula for mapping between pts and segs #' @param breaks dashed line breaks indicating secondary axis #' @param name name of the secondary axis #' @return ggplot2 object segs = function(segs, aes, fml, ..., breaks=waiver(), name=waiver()) { if ("GRanges" %in% class(segs)) segs = as.data.frame(segs) default_segs = aes(x=start, xend=end) default_segs[['yend']] = aes[['y']] aes_segs = utils::modifyList(default_segs, aes) args = list(...) defaults = list(size=1.5, color="green") defaults = defaults[!names(defaults) %in% names(aes_segs)] args = utils::modifyList(defaults, args) fscale = eval(fml[[2]][[3]], envir=environment(fml)) if (nrow(segs) > 0) seg_breaks = 1:(ceiling(max(segs[[as.character(aes[['y']][[2]])]])/fscale)) else seg_breaks = c() list(geom_hline(yintercept=seg_breaks*fscale, color="grey", linetype="dashed"), do.call(geom_segment, c(list(data=segs, mapping=aes_segs), args)), facet_grid(. ~ seqnames, scales="free_x"), scale_y_continuous(sec.axis=sec_axis(fml, breaks=breaks, name=name))) }
/plot/genome/segs.r
permissive
mschubert/ebits
R
false
false
1,254
r
#' Plots segments on underlying point data #' #' @param segs segment data #' @param aes ggplot mapping (required: y) #' @param fml formula for mapping between pts and segs #' @param breaks dashed line breaks indicating secondary axis #' @param name name of the secondary axis #' @return ggplot2 object segs = function(segs, aes, fml, ..., breaks=waiver(), name=waiver()) { if ("GRanges" %in% class(segs)) segs = as.data.frame(segs) default_segs = aes(x=start, xend=end) default_segs[['yend']] = aes[['y']] aes_segs = utils::modifyList(default_segs, aes) args = list(...) defaults = list(size=1.5, color="green") defaults = defaults[!names(defaults) %in% names(aes_segs)] args = utils::modifyList(defaults, args) fscale = eval(fml[[2]][[3]], envir=environment(fml)) if (nrow(segs) > 0) seg_breaks = 1:(ceiling(max(segs[[as.character(aes[['y']][[2]])]])/fscale)) else seg_breaks = c() list(geom_hline(yintercept=seg_breaks*fscale, color="grey", linetype="dashed"), do.call(geom_segment, c(list(data=segs, mapping=aes_segs), args)), facet_grid(. ~ seqnames, scales="free_x"), scale_y_continuous(sec.axis=sec_axis(fml, breaks=breaks, name=name))) }
testlist <- list(A = structure(c(2.3283870658308e-308, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L))) result <- do.call(multivariance:::match_rows,testlist) str(result)
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613108027-test.R
no_license
akhikolla/updatedatatype-list3
R
false
false
343
r
testlist <- list(A = structure(c(2.3283870658308e-308, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L))) result <- do.call(multivariance:::match_rows,testlist) str(result)
library(gtools) library(maxLik) # xd is a standardized vector of length N generateVector = function(randomGenerator, N) { xd = randomGenerator(N) N = length(xd) xd = (xd - mean(xd)) / sqrt(var(xd) * (N - 1) / N) xd } # The following functions are defined to make the code more simple to read getMean = function(b, f) { k = length(f) + 1 N = length(b) f1 = c(f, 1 - sum(f)) n = f1 * N means = numeric(k - 1) lastIndex = 0 for (i in 1 : (k - 1)) { nextIndex = lastIndex + n[i] means[i] = sum(b[(lastIndex + 1) : nextIndex]) / N; lastIndex = nextIndex } means } kcgf= function(tau, xd, f){ # This function computes an average cumulant generating function defined in the equation (4.2) and its derivatives # Inputs # tau 2*(k-1)-dimensional vector where tau = (t_0,t_1) # xd the population a=1,a=2, ..., a=N # f vector of ratios of group sizes (n_1,.. .,n_{k-1}) # Outputs # kcgf$cgf k(t), as defined in (4.2), where t is (k-1)-dimensional vector # kcgf$dcgf (k-1)-dimensional vector k'(t) # kcgf$ddcgf (k-1) by (k-1) matrix of k''(t) taum=matrix(tau,length(f),2) nn = length(xd) xsd = sqrt((var(xd) * (nn - 1))/nn) xdb = mean(xd) x = (xd - xdb)/xsd f1=1-sum(f) nn1=rep(1,nn) x2=rbind(nn1,x) # k(t), as defined in (4.2) kappa = mean(log(f1 + f%*%exp(taum%*%x2))) h=t(t(f * exp(taum%*%x2))/c(f1 + f %*% exp(taum%*%x2))) hx=t(x*t(f * exp(taum%*%x2))/c(f1 + f %*% exp(taum%*%x2))) hhx=rbind(h,hx) kappad = c(h%*%t(x2)/nn) kappad2= (cbind(rbind(diag(c(h%*%nn1)),diag(c(h%*%x))),rbind(diag(c(h%*%x)),diag(c(hx%*%x))))-hhx%*%t(hhx))/nn list(cgf = kappa, dcgf = kappad, ddcgf = kappad2) } kLam=function(x1, xd, f){ # This function computes the test statistic defined in the equation (4.3) # This function solves K'(t)=x for a given x=(f,x1) using Newton-Raphson method # Input # x1 (k-1)-dimension vector # xd the population a=1,a=2, ..., a=N # f vector of ratios of group sizes (n_1,.. .,n_{k-1}) # Outputs # kLam$lam value of the test statistic definned in (4.3) at x1 # kLam$t t(x), (k-1)-dimension vector, t(x)%*%x-K(t(x))=sup{t.x-K(t): t} # kLam$kdd 2*(k-1) by 2*(k-1) matrix K"(t(x)) # kLam$err calculate the accuracy of approximation using Newton-Raphson menthod # kLam$checkNA check of convergance fx=c(f,x1) checkNA=0 lf=length(f) tt=rep(0,2*lf) for (i in 1:10) { ktt=kcgf(tt,xd,f) if (!is.na(det(ktt$ddcgf)) && det(ktt$ddcgf)>.00000001) {tt=tt+solve(ktt$ddcgf)%*%(fx-ktt$dcgf)} else {{checkNA=1}&{break}}#t0=t0+solve(ktt$ddcgf)%*%(fx-ktt$dcgf) } ktt=kcgf(tt,xd,f) list(lam=-ktt$cgf+fx%*%tt,t=tt,kdd=ktt$ddcgf,err=fx-ktt$dcgf,checkNA=checkNA) } kHu=function(xd, f, v,M){ # this function calculates SP tail probs for k-sample perm test based on Lam(x) # Input # xd the population a=1,a=2, ..., a=N # f vector of ratios of group sizes (n_1,.. .,n_{k-1}) # v parameter in P(Lam(X)>v) # M number of MC replicates to approximate theintegral on the sphere # Outputs # kHu$tailpchi2 chi squared approximation # kHu$tailp saddlepoint approximations as in Theorem 2 of Kolassa and Robinson (2011) nn = length(xd) n = f * nn xsd = sqrt((var(xd) * (nn - 1))/nn) xdb = mean(xd) x = (xd - xdb)/xsd f1=1-sum(f) lf=length(f) tailp1=1-pchisq(nn*v^2,lf)#first order approximation VV=kcgf(rep(0,2*lf),x,f)$ddcgf[(lf+1):(2*lf),(lf+1):(2*lf)]#xd svdV=svd(VV) Vh=svdV$u%*%diag(svdV$d^(1/2))%*%t(svdV$v) Gu=0 sdelus=matrix(0,M,lf+4) Mm=M for (i in 1:M){ s=rnorm(lf) s=s/sqrt(sum(s^2)) s=c(Vh%*%s) r=v for (j in 1:6){ kl=kLam(r*s,x,f) if (kl$checkNA == 1) break; r=r+(v-sqrt(2*kl$lam))*sqrt(2*kl$lam)/sum(s*kl$t[(lf+1):(2*lf)]) } kl=kLam(r*s,x,f)#xd if (kl$checkNA==1){delus=NA}else {delus= f1*prod(f)*r^2/(v*sum(s*kl$t[(lf+1):(2*lf)])*(det(kl$kdd))^.5)} #Gu=ifelse(abs(v-sqrt(2*kl$lam))>.00001,Gu,Gu+delus) if (kl$checkNA==0) Gu=Gu+delus if (kl$checkNA==1) Mm=Mm-1 sdelus[i,]=c(delus,r*s,v-sqrt(2*kl$lam),sum(s*kl$t[(lf+1):(2*lf)]),det(kcgf(kl$t,xd,f)$ddcgf)) } cn=(nn^(lf/2))/((2)^(lf/2-1)*gamma(lf/2)) tailpLR=ifelse(Mm==0,tailp1,tailp1+nn^(-1)*cn*v^(lf-2)*exp(-nn*v^2/2)*(Gu/Mm-1)) tailpBN=ifelse(Mm==0,tailp1,1-pchisq(nn*(v-log(Gu/Mm)/(nn*v))^2,lf)) list(tailpchi2=tailp1,tailp=c(tailpLR,tailpBN)) } pvalcal=function(xd,f,M=30,MC,vv=0){ # this function computes MC number of replicates and is used in PowerForMu function nn=length(xd) n=f*nn xsd = sqrt((var(xd) * (nn - 1))/nn) xdb = mean(xd) x = (xd - xdb)/xsd f1=1-sum(f) lf=length(f) N=c(0,cumsum(n)) xb=orderedMeans=rep(0,lf) for(i in 1:lf){xb[i]=sum(x[(N[i]+1):N[i+1]])/nn} vl=kLam(xb,xd,f) v=ifelse(vv==0,sqrt(2*vl$lam),vv) out=ifelse(M==0,1-pchisq(nn*v^2,lf),kHu(xd,f,v,M)$tailp) MCpv=rep(0,MC) MCsqpv=rep(0,MC) MCm=MC for (k in 1:MC){ xs=sample(x,nn) xsb=rep(0,lf) for(i in 1:lf){xsb[i]=sum(xs[(N[i]+1):N[i+1]])/nn } MCpv[k]=kLam(xsb,xd,f)$lam xsbk=c(xsb,-sum(xsb)) MCsqpv[k]=nn*xsbk%*%diag(1/c(f,f1))%*%xsbk } list(SPpv=out,MCpv=MCpv,MCsqpv=MCsqpv,xb=xb) } PowerForMu = function(mu, distr, f, Z, M) { # PowerForMu gives the power of the F test and Lambda test, using approximations as in Theorem 1 #of K&R # mu k-dimensional vector parameter # distr distribution function, e.g. rnorm, rexp # Z integer number of random permutations to obtain one p-value # M integer number of p-values for each power k = length(f) + 1 N = length(mu) n = f*N pFValues = numeric(Z) pSPLRValues = numeric(Z) pSPBNValues = numeric(Z) for (i in 1 : Z) { a = generateVector(distr, N) a = a + mu a = (a - mean(a)) / sqrt(var(a) * (N - 1) / N) Xbar = getMean(a, f) Fbar = c(Xbar, -sum(Xbar)) LVal = kLam(Xbar, a, f) if (LVal$checkNA == 1) { pFValues[i] = NA pSPLRValues[i] = NA pSPBNValues[i] = NA next } u = sqrt(2*LVal$lam) FVal = c(N*Fbar%*%diag(1/c(n,N-sum(n)))%*%Fbar) FValues = pvalcal(a,f,MC=M)$MCsqpv pFValues[i] = mean(FValues > FVal) tt2 = kHu(a, f, u, 40)$tailp pSPLRValues[i] = tt2[1] pSPBNValues[i] = tt2[2] } PowerF = mean(pFValues[!is.na(pFValues)] < .05) PowerSPLR = mean(pSPLRValues[!is.na(pSPLRValues)] < .05) PowerSPBN = mean(pSPBNValues[!is.na(pSPBNValues)] < .05) list (PowerF = PowerF, PowerSPLR = PowerSPLR,PowerSPBN = PowerSPBN) } # the following are functions which generate different distributions and can be input in #PowerForMu function rexp2 = function(N) rexp(N) ^ 2 rgamma5 = function(N) rgamma(N, 5) rgamma05 = function(N) rgamma(N, 0.5)
/PhdThesisIngaSamonenko/PhdThesisIngaSamonenko/KD.R
no_license
mnaoumov/phd-thesis-inga-samonenko
R
false
false
6,714
r
library(gtools) library(maxLik) # xd is a standardized vector of length N generateVector = function(randomGenerator, N) { xd = randomGenerator(N) N = length(xd) xd = (xd - mean(xd)) / sqrt(var(xd) * (N - 1) / N) xd } # The following functions are defined to make the code more simple to read getMean = function(b, f) { k = length(f) + 1 N = length(b) f1 = c(f, 1 - sum(f)) n = f1 * N means = numeric(k - 1) lastIndex = 0 for (i in 1 : (k - 1)) { nextIndex = lastIndex + n[i] means[i] = sum(b[(lastIndex + 1) : nextIndex]) / N; lastIndex = nextIndex } means } kcgf= function(tau, xd, f){ # This function computes an average cumulant generating function defined in the equation (4.2) and its derivatives # Inputs # tau 2*(k-1)-dimensional vector where tau = (t_0,t_1) # xd the population a=1,a=2, ..., a=N # f vector of ratios of group sizes (n_1,.. .,n_{k-1}) # Outputs # kcgf$cgf k(t), as defined in (4.2), where t is (k-1)-dimensional vector # kcgf$dcgf (k-1)-dimensional vector k'(t) # kcgf$ddcgf (k-1) by (k-1) matrix of k''(t) taum=matrix(tau,length(f),2) nn = length(xd) xsd = sqrt((var(xd) * (nn - 1))/nn) xdb = mean(xd) x = (xd - xdb)/xsd f1=1-sum(f) nn1=rep(1,nn) x2=rbind(nn1,x) # k(t), as defined in (4.2) kappa = mean(log(f1 + f%*%exp(taum%*%x2))) h=t(t(f * exp(taum%*%x2))/c(f1 + f %*% exp(taum%*%x2))) hx=t(x*t(f * exp(taum%*%x2))/c(f1 + f %*% exp(taum%*%x2))) hhx=rbind(h,hx) kappad = c(h%*%t(x2)/nn) kappad2= (cbind(rbind(diag(c(h%*%nn1)),diag(c(h%*%x))),rbind(diag(c(h%*%x)),diag(c(hx%*%x))))-hhx%*%t(hhx))/nn list(cgf = kappa, dcgf = kappad, ddcgf = kappad2) } kLam=function(x1, xd, f){ # This function computes the test statistic defined in the equation (4.3) # This function solves K'(t)=x for a given x=(f,x1) using Newton-Raphson method # Input # x1 (k-1)-dimension vector # xd the population a=1,a=2, ..., a=N # f vector of ratios of group sizes (n_1,.. .,n_{k-1}) # Outputs # kLam$lam value of the test statistic definned in (4.3) at x1 # kLam$t t(x), (k-1)-dimension vector, t(x)%*%x-K(t(x))=sup{t.x-K(t): t} # kLam$kdd 2*(k-1) by 2*(k-1) matrix K"(t(x)) # kLam$err calculate the accuracy of approximation using Newton-Raphson menthod # kLam$checkNA check of convergance fx=c(f,x1) checkNA=0 lf=length(f) tt=rep(0,2*lf) for (i in 1:10) { ktt=kcgf(tt,xd,f) if (!is.na(det(ktt$ddcgf)) && det(ktt$ddcgf)>.00000001) {tt=tt+solve(ktt$ddcgf)%*%(fx-ktt$dcgf)} else {{checkNA=1}&{break}}#t0=t0+solve(ktt$ddcgf)%*%(fx-ktt$dcgf) } ktt=kcgf(tt,xd,f) list(lam=-ktt$cgf+fx%*%tt,t=tt,kdd=ktt$ddcgf,err=fx-ktt$dcgf,checkNA=checkNA) } kHu=function(xd, f, v,M){ # this function calculates SP tail probs for k-sample perm test based on Lam(x) # Input # xd the population a=1,a=2, ..., a=N # f vector of ratios of group sizes (n_1,.. .,n_{k-1}) # v parameter in P(Lam(X)>v) # M number of MC replicates to approximate theintegral on the sphere # Outputs # kHu$tailpchi2 chi squared approximation # kHu$tailp saddlepoint approximations as in Theorem 2 of Kolassa and Robinson (2011) nn = length(xd) n = f * nn xsd = sqrt((var(xd) * (nn - 1))/nn) xdb = mean(xd) x = (xd - xdb)/xsd f1=1-sum(f) lf=length(f) tailp1=1-pchisq(nn*v^2,lf)#first order approximation VV=kcgf(rep(0,2*lf),x,f)$ddcgf[(lf+1):(2*lf),(lf+1):(2*lf)]#xd svdV=svd(VV) Vh=svdV$u%*%diag(svdV$d^(1/2))%*%t(svdV$v) Gu=0 sdelus=matrix(0,M,lf+4) Mm=M for (i in 1:M){ s=rnorm(lf) s=s/sqrt(sum(s^2)) s=c(Vh%*%s) r=v for (j in 1:6){ kl=kLam(r*s,x,f) if (kl$checkNA == 1) break; r=r+(v-sqrt(2*kl$lam))*sqrt(2*kl$lam)/sum(s*kl$t[(lf+1):(2*lf)]) } kl=kLam(r*s,x,f)#xd if (kl$checkNA==1){delus=NA}else {delus= f1*prod(f)*r^2/(v*sum(s*kl$t[(lf+1):(2*lf)])*(det(kl$kdd))^.5)} #Gu=ifelse(abs(v-sqrt(2*kl$lam))>.00001,Gu,Gu+delus) if (kl$checkNA==0) Gu=Gu+delus if (kl$checkNA==1) Mm=Mm-1 sdelus[i,]=c(delus,r*s,v-sqrt(2*kl$lam),sum(s*kl$t[(lf+1):(2*lf)]),det(kcgf(kl$t,xd,f)$ddcgf)) } cn=(nn^(lf/2))/((2)^(lf/2-1)*gamma(lf/2)) tailpLR=ifelse(Mm==0,tailp1,tailp1+nn^(-1)*cn*v^(lf-2)*exp(-nn*v^2/2)*(Gu/Mm-1)) tailpBN=ifelse(Mm==0,tailp1,1-pchisq(nn*(v-log(Gu/Mm)/(nn*v))^2,lf)) list(tailpchi2=tailp1,tailp=c(tailpLR,tailpBN)) } pvalcal=function(xd,f,M=30,MC,vv=0){ # this function computes MC number of replicates and is used in PowerForMu function nn=length(xd) n=f*nn xsd = sqrt((var(xd) * (nn - 1))/nn) xdb = mean(xd) x = (xd - xdb)/xsd f1=1-sum(f) lf=length(f) N=c(0,cumsum(n)) xb=orderedMeans=rep(0,lf) for(i in 1:lf){xb[i]=sum(x[(N[i]+1):N[i+1]])/nn} vl=kLam(xb,xd,f) v=ifelse(vv==0,sqrt(2*vl$lam),vv) out=ifelse(M==0,1-pchisq(nn*v^2,lf),kHu(xd,f,v,M)$tailp) MCpv=rep(0,MC) MCsqpv=rep(0,MC) MCm=MC for (k in 1:MC){ xs=sample(x,nn) xsb=rep(0,lf) for(i in 1:lf){xsb[i]=sum(xs[(N[i]+1):N[i+1]])/nn } MCpv[k]=kLam(xsb,xd,f)$lam xsbk=c(xsb,-sum(xsb)) MCsqpv[k]=nn*xsbk%*%diag(1/c(f,f1))%*%xsbk } list(SPpv=out,MCpv=MCpv,MCsqpv=MCsqpv,xb=xb) } PowerForMu = function(mu, distr, f, Z, M) { # PowerForMu gives the power of the F test and Lambda test, using approximations as in Theorem 1 #of K&R # mu k-dimensional vector parameter # distr distribution function, e.g. rnorm, rexp # Z integer number of random permutations to obtain one p-value # M integer number of p-values for each power k = length(f) + 1 N = length(mu) n = f*N pFValues = numeric(Z) pSPLRValues = numeric(Z) pSPBNValues = numeric(Z) for (i in 1 : Z) { a = generateVector(distr, N) a = a + mu a = (a - mean(a)) / sqrt(var(a) * (N - 1) / N) Xbar = getMean(a, f) Fbar = c(Xbar, -sum(Xbar)) LVal = kLam(Xbar, a, f) if (LVal$checkNA == 1) { pFValues[i] = NA pSPLRValues[i] = NA pSPBNValues[i] = NA next } u = sqrt(2*LVal$lam) FVal = c(N*Fbar%*%diag(1/c(n,N-sum(n)))%*%Fbar) FValues = pvalcal(a,f,MC=M)$MCsqpv pFValues[i] = mean(FValues > FVal) tt2 = kHu(a, f, u, 40)$tailp pSPLRValues[i] = tt2[1] pSPBNValues[i] = tt2[2] } PowerF = mean(pFValues[!is.na(pFValues)] < .05) PowerSPLR = mean(pSPLRValues[!is.na(pSPLRValues)] < .05) PowerSPBN = mean(pSPBNValues[!is.na(pSPBNValues)] < .05) list (PowerF = PowerF, PowerSPLR = PowerSPLR,PowerSPBN = PowerSPBN) } # the following are functions which generate different distributions and can be input in #PowerForMu function rexp2 = function(N) rexp(N) ^ 2 rgamma5 = function(N) rgamma(N, 5) rgamma05 = function(N) rgamma(N, 0.5)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/DataDoc.R \docType{data} \name{CortesPreyVals} \alias{CortesPreyVals} \title{CortesPreyVals} \format{ A data frame of of 10 rows and 6 columns \itemize{ \item FoodI: Food category I. \item FoodII: Food category II. \item FoodIII: Food category III. \item Stage: Life history stage of the prey item. \item TL: Trophic level of the prey item. \item SE: Standard error around trophic level estimate of the prey item. } } \usage{ CortesPreyVals } \description{ A data frame containing prey items and their respective trophic levels for Chondrichthyes prey from Cortes, 1999 } \references{ { \itemize{ \item Cortes E. 1999. Standardized diet compositions and trophic levels of sharks. ICES Journal of marine science 56:707-717. } } } \keyword{datasets}
/man/CortesPreyVals.Rd
no_license
cran/dietr
R
false
true
873
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/DataDoc.R \docType{data} \name{CortesPreyVals} \alias{CortesPreyVals} \title{CortesPreyVals} \format{ A data frame of of 10 rows and 6 columns \itemize{ \item FoodI: Food category I. \item FoodII: Food category II. \item FoodIII: Food category III. \item Stage: Life history stage of the prey item. \item TL: Trophic level of the prey item. \item SE: Standard error around trophic level estimate of the prey item. } } \usage{ CortesPreyVals } \description{ A data frame containing prey items and their respective trophic levels for Chondrichthyes prey from Cortes, 1999 } \references{ { \itemize{ \item Cortes E. 1999. Standardized diet compositions and trophic levels of sharks. ICES Journal of marine science 56:707-717. } } } \keyword{datasets}
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/create_database.R \name{create_database} \alias{create_database} \title{Creating database} \usage{ create_database(dbname, user, password, host) } \arguments{ \item{dbname}{name of database} \item{user}{name of user} \item{password}{password of database} \item{host}{name of host} } \value{ invisible NULL } \description{ Function \code{create_database} creates a database with four empty tables: deputies, votings, votes, statements. } \details{ \preformatted{ Created tables: 1. deputies with columns: 1) id_deputy - deputy's id, 2) surname_name - deputy's names and surnames, 2. votings with columns: 1) id_voting - voting's id, 2) nr_meeting - meeting's number, 3) date_meeting - meeting's date, 4) nr_voting - voting's number, 5) topic_voting - voting's topic, 6) link_results - link with voting's results, 3. votes with columns: 1) id_vote - vote's id, 2) id_deputy - deputy's id, 3) id_voting - voting's id, 4) vote - deputy's vote, one of: 'Za','Przeciw', 'Wstrzymal sie','Nieobecny', 5) club - deputy's club, 4. statements with columns: 1) id_statement - statement's id, like: (meeting's number).(voting's number).(statement's number), 2) surname_name - author of statement, 3) date_statement - statement's date, 4) statement - content of statement.} } \note{ All information is stored in PostgreSQL database. } \examples{ \dontrun{ create_database(dbname, user, password, host)} } \author{ Piotr Smuda }
/sejmRP/man/create_database.Rd
no_license
ytmytm/sejmRP
R
false
false
1,590
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/create_database.R \name{create_database} \alias{create_database} \title{Creating database} \usage{ create_database(dbname, user, password, host) } \arguments{ \item{dbname}{name of database} \item{user}{name of user} \item{password}{password of database} \item{host}{name of host} } \value{ invisible NULL } \description{ Function \code{create_database} creates a database with four empty tables: deputies, votings, votes, statements. } \details{ \preformatted{ Created tables: 1. deputies with columns: 1) id_deputy - deputy's id, 2) surname_name - deputy's names and surnames, 2. votings with columns: 1) id_voting - voting's id, 2) nr_meeting - meeting's number, 3) date_meeting - meeting's date, 4) nr_voting - voting's number, 5) topic_voting - voting's topic, 6) link_results - link with voting's results, 3. votes with columns: 1) id_vote - vote's id, 2) id_deputy - deputy's id, 3) id_voting - voting's id, 4) vote - deputy's vote, one of: 'Za','Przeciw', 'Wstrzymal sie','Nieobecny', 5) club - deputy's club, 4. statements with columns: 1) id_statement - statement's id, like: (meeting's number).(voting's number).(statement's number), 2) surname_name - author of statement, 3) date_statement - statement's date, 4) statement - content of statement.} } \note{ All information is stored in PostgreSQL database. } \examples{ \dontrun{ create_database(dbname, user, password, host)} } \author{ Piotr Smuda }
#Load Packages install.packages("ggplot2") library(ggplot2) install.packages("ggrepel") library(ggrepel) install.packages("ggthemes") library(ggthemes) install.packages("scales") library(scales) install.packages("dplyr") library(dplyr) install.packages("VIM") library(VIM) install.packages("data.table") library(data.table) install.packages("formattable") library(formattable) install.packages("plotly") library(plotly) install.packages("corrplot") library(corrplot) install.packages("GGally") library(GGally) install.packages("caret") library(caret) install.packages("car") library(car) #Read Data movie_data <- read.csv("~/Desktop/movie_metadata.csv", header=TRUE) str(movie_data) # DATA EXPLORATION # Look for duplicates and delete them sum(duplicated(movie_data)) movie_data <- movie_data[!duplicated(movie_data),] # Tidy the movie title- garbage found before the actual name library(stringr) movie_data$movie_title <- gsub("Â", "", as.character(factor(movie_data$movie_title))) str_trim(movie_data$movie_title, side = "right") # Check all genres of the movies head(movie_data$genres) # Create a dataframe to store the substrings genres.df <- as.data.frame(movie_data[,c("genres", "imdb_score")]) # Separate different genres genres.df$Action <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Action") 1 else 0) genres.df$Action <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Action") 1 else 0) genres.df$Adventure <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Adventure") 1 else 0) genres.df$Animation <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Animation") 1 else 0) genres.df$Biography <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Biography") 1 else 0) genres.df$Comedy <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Comedy") 1 else 0) genres.df$Crime <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Crime") 1 else 0) genres.df$Documentary <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Documentary") 1 else 0) genres.df$Drama <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Drama") 1 else 0) genres.df$Family <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Family") 1 else 0) genres.df$Fantasy <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Fantasy") 1 else 0) genres.df$`Film-Noir` <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Film-Noir") 1 else 0) genres.df$History <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "History") 1 else 0) genres.df$Horror <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Horror") 1 else 0) genres.df$Musical <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Musical") 1 else 0) genres.df$Mystery <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Mystery") 1 else 0) genres.df$News <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "News") 1 else 0) genres.df$Romance <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Romance") 1 else 0) genres.df$`Sci-Fi` <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Sci-Fi") 1 else 0) genres.df$Short <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Short") 1 else 0) genres.df$Sport <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Sport") 1 else 0) genres.df$Thriller <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Thriller") 1 else 0) genres.df$War <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "War") 1 else 0) genres.df$Western <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Western") 1 else 0) # Find the mean of imdb score for different genres means <- rep(0,23) for (i in 1:23) { means[i] <- mean(genres.df$imdb_score[genres.df[i+2]==1]) } # Plot the Means barplot(means, main = "Mean od the imdb scores for different genres") # All means are in the range of 6-8, it can be assumed that not a lot of difference will be made to the # IMDB score if genres were present movie_data <- subset(movie_data, select = -c(genres)) # Making sure Genres returns a NULL str(movie_data$genres) # Find Aggregate of NAs in all columns colSums(sapply(movie_data, is.na)) # Plotting a heat map to visualize the missing values missing.values <- aggr(movie_data, sortVars = T, prop = T, sortCombs = T, cex.lab = 1.5, cex.axis = .6, cex.numbers = 5, combined = F, gap = -.2) # Gross and Budget have the highest amount of missing values but both of them are important factors in determing # the IMDB score and thus we remove only the rows which have NA in them movie_data <- movie_data[!is.na(movie_data$gross),] movie_data <- movie_data[!is.na(movie_data$budget),] # Checking how much was our daat affected due to removing rows dim(movie_data) # 23% of data removed, still consists of 3857 records for analysis # Rechecking the number of NAs sum(complete.cases(movie_data)) colSums(sapply(movie_data, is.na)) # aspect ratio has the highest number of NAs, checking how important aspect ration in prediction is table(movie_data$aspect_ratio) # Replacing NAs in aspect ration with 0 movie_data$aspect_ratio[is.na (movie_data$aspect_ratio)] <- 0 # Checking mean where aspect ratio is 1.85 and 2.35 mean(movie_data$imdb_score[movie_data$aspect_ratio == 1.85]) mean(movie_data$imdb_score[movie_data$aspect_ratio == 2.35]) # Checking mean where aspect ratio is not 1.85 and 2.35 mean(movie_data$imdb_score[movie_data$aspect_ratio != 1.85 & movie_data$aspect_ratio != 2.35]) # Observed: The mean in either of the cases isn't deviating much and it can be assumed tht removing this variable # will not affect our analysis movie_data <- subset(movie_data, select = -c(aspect_ratio)) # Rechecking if the aspect ratio is still present or is NULL str(movie_data$aspect_ratio) # Replacing NAs and 0s in the Data # Replacing NA in facenumber_in_poster with the average of the column movie_data$facenumber_in_poster[is.na(movie_data$facenumber_in_poster)] <- round(mean(movie_data$facenumber_in_poster, na.rm = TRUE)) # Convert 0s in the data to NAs movie_data[,c(5,6,8,13,24,26)][movie_data[,c(5,6,8,13,24,26)] == 0] <- NA # Replacing NA in num_critic_for_reviews with the average of the column movie_data$num_critic_for_reviews[is.na(movie_data$num_critic_for_reviews)] <- round(mean(movie_data$num_critic_for_reviews, na.rm = TRUE)) # Replacing NA in duration with the average of the column movie_data$duration[is.na(movie_data$duration)] <- round(mean(movie_data$duration, na.rm = TRUE)) # Replacing NA in director_facebook_likes with the average of the column movie_data$director_facebook_likes[is.na(movie_data$director_facebook_likes)] <- round(mean(movie_data$director_facebook_likes, na.rm = TRUE)) # Replacing NA in actor_3_facebook_likes with the average of the column movie_data$actor_3_facebook_likes[is.na(movie_data$actor_3_facebook_likes)] <- round(mean(movie_data$actor_3_facebook_likes, na.rm = TRUE)) # Replacing NA in actor_1_facebook_likes with the average of the column movie_data$actor_1_facebook_likes[is.na(movie_data$actor_1_facebook_likes)] <- round(mean(movie_data$actor_1_facebook_likes, na.rm = TRUE)) # Replacing NA in cast_total_facebook_likes with the average of the column movie_data$cast_total_facebook_likes[is.na(movie_data$cast_total_facebook_likes)] <- round(mean(movie_data$cast_total_facebook_likes, na.rm = TRUE)) # Replacing NA in actor_2_facebook_likes with the average of the column movie_data$actor_2_facebook_likes[is.na(movie_data$actor_2_facebook_likes)] <- round(mean(movie_data$actor_2_facebook_likes, na.rm = TRUE)) # Replacing NA in movie_facebook_likes with the average of the column movie_data$movie_facebook_likes[is.na(movie_data$movie_facebook_likes)] <- round(mean(movie_data$movie_facebook_likes, na.rm = TRUE)) # Finding the missing values in content rating table(movie_data$content_rating) # Blanks are to be considered as missing values movie_data <- movie_data[!(movie_data$content_rating %in% ""),] # Re-evaluating content ratings # M = GP = PG, X = NC-17. Replace M and GP with PG and replace X with NC-17 movie_data$content_rating movie_data$content_rating[movie_data$content_rating == 'M'] <- 'PG' movie_data$content_rating[movie_data$content_rating == 'GP'] <- 'PG' movie_data$content_rating[movie_data$content_rating == 'X'] <- 'NC-17' # Replace “Approved”, “Not Rated”, “Passed”, “Unrated” with the most common rating “R” movie_data$content_rating[movie_data$content_rating == 'Approved'] <- 'R' movie_data$content_rating[movie_data$content_rating == 'Not Rated'] <- 'R' movie_data$content_rating[movie_data$content_rating == 'Passed'] <- 'R' movie_data$content_rating[movie_data$content_rating == 'Unrated'] <- 'R' movie_data$content_rating <- factor(movie_data$content_rating) table(movie_data$content_rating) # Creating 2 columns profit and percentage of return based on gross and budget movie_data<- movie_data %>% mutate(profit = gross - budget, return_on_investment_perc = (profit/budget)*100) # Checking if movie color is an influential factor towards it's score table(movie_data$color) # It can be observed that the data in color is completely partial towards colored movies # and thus it is not an influential factor and we can remove it movie_data <- subset(movie_data, select = -c(color)) # Checking if color is removed from the data and returns a NULL value movie_data$color # Checking if movie color is an influential factor towards it's score table(movie_data$language) # It can be observed that the data in langauges is completely partial towards english movies # and thus it is not an influential factor and we can remove it movie_data <- subset(movie_data, select = -c(language)) # Checking if language is removed from the data and returns a NULL value movie_data$language # Checking if the country the movie is produced in is an influential factor towards it's score table(movie_data$country) # Approximately 79% movies are form the US, 8% from UK and 13% from other countries # Thus we collectovely represent the movie locations as: US, UK, Others levels(movie_data$country) <- c(levels(movie_data$country), "Others") movie_data$country[(movie_data$country != 'USA')&(movie_data$country != 'UK')] <- 'Others' movie_data$country <- factor(movie_data$country) # Checking if only 3 locations are available table(movie_data$country) # DATA VISUALIZATION # Histogram of movies released each year ggplot(movie_data, aes(title_year)) + geom_bar() + labs(x = "Year movie release", y = "No. of movies released", title = "Histogram of no. of movies released each year") + theme(plot.title = element_text(hjust = 0.5)) # It can be seen that the movie boom came after 1980 and thus we represent the data only after 1980 movie_data <- movie_data[movie_data$title_year >= 1980,] # Visualizing top 20 movies based on profits in Million$ install.packages("ggrepel") library(ggrepel) movie_data %>% filter(title_year %in% c(2000:2016)) %>% arrange(desc(profit)) %>% top_n(20, profit) %>% ggplot(aes(x=budget/1000000, y=profit/1000000)) + geom_point() + geom_smooth() + geom_text_repel(aes(label=movie_title)) + labs(x = "Budget in $million", y = "Profit in $million", title = "Top 10 Profitable Movies") + theme(plot.title = element_text(hjust = 0.5)) # Using profits and return on investment variables are criteria to find 20 most profitable movies movie_data %>% filter(budget > 100000) %>% mutate(profit = gross - budget, return_on_investment_perc = (profit/budget)*100) %>% arrange(desc(profit)) %>% top_n(20, profit) %>% ggplot(aes(x=budget/1000000, y = return_on_investment_perc)) + geom_point(size = 2) + geom_smooth(size = 1) + geom_text_repel(aes(label = movie_title), size = 3) + xlab("Budget in $million") + ylab("Percent of Return on Investment") + ggtitle("20 Most Profitable Movies based on their Return on Investment") # Visualizing 20 top directors based on the highest IMDB scores install.packages("formattable") library(formattable) movie_data %>% group_by(director_name) %>% summarise(avg_imdb = mean(imdb_score)) %>% arrange(desc(avg_imdb)) %>% top_n(20, avg_imdb) %>% formattable(list(avg_imdb = color_bar("Red")), align = 'l') # Plotting commerical success vs critical acclaim movie_data %>% top_n(20, profit) %>% ggplot(aes(x = imdb_score, y = gross/10^6, size = profit/10^6, color = content_rating)) + geom_point() + geom_hline(aes(yintercept = 600)) + geom_vline(aes(xintercept = 7.75)) + geom_text_repel(aes(label = movie_title), size = 4) + xlab("IMDB Score") + ylab("Gross Money earned in million$") + ggtitle("Commercial Success Vs Critical Acclaim") + annotate("text", x = 8.5, y = 700, label = "High Ratings \n & High Gross") + theme(plot.title = element_text(hjust = 0.5)) # The above observation shows that there is hardly any correlation between critical acclaim and the movie's commercial success # Visualizing relation between facebook likes and IMDB scores library(plotly) movie_data %>% plot_ly(x = ~movie_facebook_likes, y = ~imdb_score, color = ~content_rating , mode = "markers", text = ~content_rating, alpha = 0.7, type = "scatter") # Movies with high facebook likes can be seen to have higher IMDB score # DATA PRE-PROCESSING install.packages("data.table") library(data.table) # Find number of directors sum(uniqueN(movie_data$director_name)) # Find number of actors sum(uniqueN(movie_data[, c("actor_1_name", "actor_2_name", "actor_3_name")])) # The names of the directors, actors 1 2 3 are so different that it will not contribute in predicting the score. # The plot keyword is too diverse to be used as a predictor # The movie IMDB link is redundant movie_data <- subset(movie_data, select = -c(director_name, actor_2_name, actor_1_name, movie_title, actor_3_name, plot_keywords, movie_imdb_link)) # To avoid multicollinearity we remove the 2 previously added variables movie_data <- subset(movie_data, select = -c(profit, return_on_investment_perc)) # Plot heatmap of the entire data as of now ggcorr(movie_data, label = TRUE, label_round = 2, label_size = 3.5, size = 2, hjust = .85) + ggtitle("Correlation Heatmap") + theme(plot.title = element_text(hjust = 0.5)) # Based on the heatmap, we can see some high correlations (>0.7) between predictors. # The highest correlation value observed is 0.95 and we can see that actor_1_facebook_likes is highly correlated with the cast_total_facebook_likes # and both actor2 and actor3 are also correlated to the total. # Thus we modify them into 2 variables: actor_1_facebook_likes and other_actors_facebook_likes. movie_data$other_actors_facebook_likes <- movie_data$actor_2_facebook_likes + movie_data$actor_3_facebook_likes # There is high correlations among num_voted_users, num_user_for_reviews and num_critic_for_reviews. # We want to keep num_voted_users and take the ratio of num_user_for_reviews and num_critic_for_reviews. movie_data$critic_review_ratio <- movie_data$num_critic_for_reviews / movie_data$num_user_for_reviews # Delete Columns movie_data <- subset(movie_data, select = -c(cast_total_facebook_likes, actor_2_facebook_likes, actor_3_facebook_likes, num_critic_for_reviews, num_user_for_reviews)) # Plotting heatmap to review post changes ggcorr(movie_data, label = TRUE, label_round = 2, label_size = 4, size = 3, hjust = .85) + ggtitle("Correlation Heatmap") + theme(plot.title = element_text(hjust = 0.5)) # No strong correlation of value greater than 0.7 observed # The aim is to build a project wherein the model predicts whether the movie is good or bad. So bin the scores in four buckets: less than 4(Bad), # 4-6(OK), 6-8(Good) and 8-10(Excellent) movie_data$binned_score <- cut(movie_data$imdb_score, breaks = c(0,4,6,8,10)) # Rearranging the data and renaming the column to make it readable movie_data <- movie_data[,c(9,4,5,14,12,2,3,13,1,6,10,7,8,11,15)] colnames(movie_data) <- c("budget", "gross", "user_vote", "critic_review_ratio", "movie_fb", "director_fb", "actor1_fb", "other_actors_fb", "duration", "face_number", "year", "country", "content", "imdb_score", "binned_score") # To apply models, spliting the data into training, validation and test sets with the ratio of 6:2:2 set.seed(45) train.index <- sample(row.names(movie_data), dim(movie_data)[1]*0.6) valid.index <- sample(setdiff(row.names(movie_data), train.index), dim(movie_data)[1]*0.2) test.index <- setdiff(row.names(movie_data), union(train.index, valid.index)) train <- movie_data[train.index, ] valid <- movie_data[valid.index, ] test <- movie_data[test.index, ] # IMPLEMNETATION OF ALGORITHMS # CLASSIFICATION TREE # Implementing a full grown tree library(rpart) library(rpart.plot) # Full grown tree class.tree <- rpart(binned_score ~ . -imdb_score, data = train, method = "class") ## plot tree prp(class.tree, type = 1, extra = 1, under = TRUE, split.font = 2, varlen = 0) # Implementing Best pruned tree set.seed(51) cv.ct <- rpart(binned_score ~ . -imdb_score, data = train, method = "class", cp = 0.00001, minsplit = 5, xval = 5) printcp(cv.ct) pruned.ct <- prune(cv.ct, cp = cv.ct$cptable[which.min(cv.ct$cptable[,"xerror"]),"CP"]) length(pruned.ct$frame$var[pruned.ct$frame$var == "<leaf>"]) prp(pruned.ct, type = 1, extra = 1, split.font = 1, varlen = -10) # Apply model on training set tree.pred.train <- predict(pruned.ct, train, type = "class") # Generate confusion matrix for training data confusionMatrix(tree.pred.train, train$binned_score) # Apply model on validation set tree.pred.valid <- predict(pruned.ct, valid, type = "class") # Generate confusion matrix for validation data confusionMatrix(tree.pred.valid, valid$binned_score) # Apply model on test set tree.pred.test <- predict(pruned.ct, test, type = "class") # Generate confusion matrix for test data confusionMatrix(tree.pred.test, test$binned_score) # K NEAREST NEIGHBOUR library(FNN) # Using model.matrix() to create dummy variables for country and content movie_data2 <- movie_data movie_data2$country <- as.factor(movie_data2$country) movie_data2$content <- as.factor(movie_data2$content) movie_data2[,c("country_UK", "country_USA", "country_Others")] <- model.matrix( ~ country - 1, data = movie_data2) movie_data2[,c("content_G", "content_NC17", "content_PG", "content_PG13", "content_R")] <- model.matrix( ~ content - 1, data = movie_data2) # Select useful variables for future prediction movie_data2 <- movie_data2[, c(1,2,3,4,5,6,7,8,9,10,11,16,17,18,19,20,21,22,23,15)] # Partition the data into training and validation sets set.seed(52) train2 <- movie_data2[train.index, ] valid2 <- movie_data2[valid.index, ] test2 <- movie_data2[test.index, ] # Initializing normalized training, validation, test data, complete data frames to originals train2.norm <- train2 valid2.norm <- valid2 test2.norm <- test2 movie_data2.norm <- movie_data2 # Using preProcess() from the caret package to normalize predictors norm.values <- preProcess(train2[, -20], method=c("center", "scale")) train2.norm[, -20] <- predict(norm.values, train2[, -20]) valid2.norm[, -20] <- predict(norm.values, valid2[, -20]) test2.norm[, -20] <- predict(norm.values, test2[, -20]) movie_data2.norm[, -20] <- predict(norm.values, movie_data2[, -20]) # Finding the best K # Initialize a data frame with two columns: k, and accuracy accuracy.df <- data.frame(k = seq(1, 20, 1), accuracy = rep(0, 20)) # Computing knn for different k on validation data for(i in 1:20) { knn.pred <- knn(train2.norm[, -20], valid2.norm[, -20], cl = train2.norm[, 20], k = i) accuracy.df[i, 2] <- confusionMatrix(knn.pred, valid2.norm[, 20])$overall[1] } accuracy.df # Applying model on test set knn.pred.test <- knn(train2.norm[, -20], test2.norm[, -20], cl = train2.norm[, 20], k = 9) # Generating confusion matrix for test data accuracy <- confusionMatrix(knn.pred.test, test2.norm[, 20])$overall[1] accuracy # RANDOM FOREST install.packages("randomForest") library(randomForest) set.seed(53) rf <- randomForest(binned_score ~ . -imdb_score, data = train, mtry = 5) # Show model error plot(rf) legend('topright', colnames(rf$err.rate), col=1:5, fill=1:5) # Get importance importance <- importance(rf) varImportance <- data.frame(Variables = row.names(importance), Importance = round(importance[ ,'MeanDecreaseGini'],2)) # Creating a rank variable based on importance rankImportance <- varImportance %>% mutate(Rank = paste0('#',dense_rank(desc(Importance)))) # Using ggplot2 to visualize the relative importance of variables ggplot(rankImportance, aes(x = reorder(Variables, Importance), y = Importance, fill = Importance)) + geom_bar(stat='identity') + geom_text(aes(x = Variables, y = 0.5, label = Rank), hjust=0, vjust=0.55, size = 4, colour = 'red') + labs(x = 'Variables') + coord_flip() + theme_few() install.packages("caret") library(caret) set.seed(632) # apply model on validation set rf.pred.valid <- predict(rf, valid) # generate confusion matrix for validation data confusionMatrix(rf.pred.valid, valid$binned_score)
/data_mining_project.R
no_license
SrishtiBhandari/Data-Mining-with-ML-algorithms
R
false
false
21,906
r
#Load Packages install.packages("ggplot2") library(ggplot2) install.packages("ggrepel") library(ggrepel) install.packages("ggthemes") library(ggthemes) install.packages("scales") library(scales) install.packages("dplyr") library(dplyr) install.packages("VIM") library(VIM) install.packages("data.table") library(data.table) install.packages("formattable") library(formattable) install.packages("plotly") library(plotly) install.packages("corrplot") library(corrplot) install.packages("GGally") library(GGally) install.packages("caret") library(caret) install.packages("car") library(car) #Read Data movie_data <- read.csv("~/Desktop/movie_metadata.csv", header=TRUE) str(movie_data) # DATA EXPLORATION # Look for duplicates and delete them sum(duplicated(movie_data)) movie_data <- movie_data[!duplicated(movie_data),] # Tidy the movie title- garbage found before the actual name library(stringr) movie_data$movie_title <- gsub("Â", "", as.character(factor(movie_data$movie_title))) str_trim(movie_data$movie_title, side = "right") # Check all genres of the movies head(movie_data$genres) # Create a dataframe to store the substrings genres.df <- as.data.frame(movie_data[,c("genres", "imdb_score")]) # Separate different genres genres.df$Action <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Action") 1 else 0) genres.df$Action <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Action") 1 else 0) genres.df$Adventure <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Adventure") 1 else 0) genres.df$Animation <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Animation") 1 else 0) genres.df$Biography <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Biography") 1 else 0) genres.df$Comedy <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Comedy") 1 else 0) genres.df$Crime <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Crime") 1 else 0) genres.df$Documentary <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Documentary") 1 else 0) genres.df$Drama <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Drama") 1 else 0) genres.df$Family <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Family") 1 else 0) genres.df$Fantasy <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Fantasy") 1 else 0) genres.df$`Film-Noir` <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Film-Noir") 1 else 0) genres.df$History <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "History") 1 else 0) genres.df$Horror <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Horror") 1 else 0) genres.df$Musical <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Musical") 1 else 0) genres.df$Mystery <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Mystery") 1 else 0) genres.df$News <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "News") 1 else 0) genres.df$Romance <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Romance") 1 else 0) genres.df$`Sci-Fi` <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Sci-Fi") 1 else 0) genres.df$Short <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Short") 1 else 0) genres.df$Sport <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Sport") 1 else 0) genres.df$Thriller <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Thriller") 1 else 0) genres.df$War <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "War") 1 else 0) genres.df$Western <- sapply(1:length(genres.df$genres), function(x) if (genres.df[x,1] %like% "Western") 1 else 0) # Find the mean of imdb score for different genres means <- rep(0,23) for (i in 1:23) { means[i] <- mean(genres.df$imdb_score[genres.df[i+2]==1]) } # Plot the Means barplot(means, main = "Mean od the imdb scores for different genres") # All means are in the range of 6-8, it can be assumed that not a lot of difference will be made to the # IMDB score if genres were present movie_data <- subset(movie_data, select = -c(genres)) # Making sure Genres returns a NULL str(movie_data$genres) # Find Aggregate of NAs in all columns colSums(sapply(movie_data, is.na)) # Plotting a heat map to visualize the missing values missing.values <- aggr(movie_data, sortVars = T, prop = T, sortCombs = T, cex.lab = 1.5, cex.axis = .6, cex.numbers = 5, combined = F, gap = -.2) # Gross and Budget have the highest amount of missing values but both of them are important factors in determing # the IMDB score and thus we remove only the rows which have NA in them movie_data <- movie_data[!is.na(movie_data$gross),] movie_data <- movie_data[!is.na(movie_data$budget),] # Checking how much was our daat affected due to removing rows dim(movie_data) # 23% of data removed, still consists of 3857 records for analysis # Rechecking the number of NAs sum(complete.cases(movie_data)) colSums(sapply(movie_data, is.na)) # aspect ratio has the highest number of NAs, checking how important aspect ration in prediction is table(movie_data$aspect_ratio) # Replacing NAs in aspect ration with 0 movie_data$aspect_ratio[is.na (movie_data$aspect_ratio)] <- 0 # Checking mean where aspect ratio is 1.85 and 2.35 mean(movie_data$imdb_score[movie_data$aspect_ratio == 1.85]) mean(movie_data$imdb_score[movie_data$aspect_ratio == 2.35]) # Checking mean where aspect ratio is not 1.85 and 2.35 mean(movie_data$imdb_score[movie_data$aspect_ratio != 1.85 & movie_data$aspect_ratio != 2.35]) # Observed: The mean in either of the cases isn't deviating much and it can be assumed tht removing this variable # will not affect our analysis movie_data <- subset(movie_data, select = -c(aspect_ratio)) # Rechecking if the aspect ratio is still present or is NULL str(movie_data$aspect_ratio) # Replacing NAs and 0s in the Data # Replacing NA in facenumber_in_poster with the average of the column movie_data$facenumber_in_poster[is.na(movie_data$facenumber_in_poster)] <- round(mean(movie_data$facenumber_in_poster, na.rm = TRUE)) # Convert 0s in the data to NAs movie_data[,c(5,6,8,13,24,26)][movie_data[,c(5,6,8,13,24,26)] == 0] <- NA # Replacing NA in num_critic_for_reviews with the average of the column movie_data$num_critic_for_reviews[is.na(movie_data$num_critic_for_reviews)] <- round(mean(movie_data$num_critic_for_reviews, na.rm = TRUE)) # Replacing NA in duration with the average of the column movie_data$duration[is.na(movie_data$duration)] <- round(mean(movie_data$duration, na.rm = TRUE)) # Replacing NA in director_facebook_likes with the average of the column movie_data$director_facebook_likes[is.na(movie_data$director_facebook_likes)] <- round(mean(movie_data$director_facebook_likes, na.rm = TRUE)) # Replacing NA in actor_3_facebook_likes with the average of the column movie_data$actor_3_facebook_likes[is.na(movie_data$actor_3_facebook_likes)] <- round(mean(movie_data$actor_3_facebook_likes, na.rm = TRUE)) # Replacing NA in actor_1_facebook_likes with the average of the column movie_data$actor_1_facebook_likes[is.na(movie_data$actor_1_facebook_likes)] <- round(mean(movie_data$actor_1_facebook_likes, na.rm = TRUE)) # Replacing NA in cast_total_facebook_likes with the average of the column movie_data$cast_total_facebook_likes[is.na(movie_data$cast_total_facebook_likes)] <- round(mean(movie_data$cast_total_facebook_likes, na.rm = TRUE)) # Replacing NA in actor_2_facebook_likes with the average of the column movie_data$actor_2_facebook_likes[is.na(movie_data$actor_2_facebook_likes)] <- round(mean(movie_data$actor_2_facebook_likes, na.rm = TRUE)) # Replacing NA in movie_facebook_likes with the average of the column movie_data$movie_facebook_likes[is.na(movie_data$movie_facebook_likes)] <- round(mean(movie_data$movie_facebook_likes, na.rm = TRUE)) # Finding the missing values in content rating table(movie_data$content_rating) # Blanks are to be considered as missing values movie_data <- movie_data[!(movie_data$content_rating %in% ""),] # Re-evaluating content ratings # M = GP = PG, X = NC-17. Replace M and GP with PG and replace X with NC-17 movie_data$content_rating movie_data$content_rating[movie_data$content_rating == 'M'] <- 'PG' movie_data$content_rating[movie_data$content_rating == 'GP'] <- 'PG' movie_data$content_rating[movie_data$content_rating == 'X'] <- 'NC-17' # Replace “Approved”, “Not Rated”, “Passed”, “Unrated” with the most common rating “R” movie_data$content_rating[movie_data$content_rating == 'Approved'] <- 'R' movie_data$content_rating[movie_data$content_rating == 'Not Rated'] <- 'R' movie_data$content_rating[movie_data$content_rating == 'Passed'] <- 'R' movie_data$content_rating[movie_data$content_rating == 'Unrated'] <- 'R' movie_data$content_rating <- factor(movie_data$content_rating) table(movie_data$content_rating) # Creating 2 columns profit and percentage of return based on gross and budget movie_data<- movie_data %>% mutate(profit = gross - budget, return_on_investment_perc = (profit/budget)*100) # Checking if movie color is an influential factor towards it's score table(movie_data$color) # It can be observed that the data in color is completely partial towards colored movies # and thus it is not an influential factor and we can remove it movie_data <- subset(movie_data, select = -c(color)) # Checking if color is removed from the data and returns a NULL value movie_data$color # Checking if movie color is an influential factor towards it's score table(movie_data$language) # It can be observed that the data in langauges is completely partial towards english movies # and thus it is not an influential factor and we can remove it movie_data <- subset(movie_data, select = -c(language)) # Checking if language is removed from the data and returns a NULL value movie_data$language # Checking if the country the movie is produced in is an influential factor towards it's score table(movie_data$country) # Approximately 79% movies are form the US, 8% from UK and 13% from other countries # Thus we collectovely represent the movie locations as: US, UK, Others levels(movie_data$country) <- c(levels(movie_data$country), "Others") movie_data$country[(movie_data$country != 'USA')&(movie_data$country != 'UK')] <- 'Others' movie_data$country <- factor(movie_data$country) # Checking if only 3 locations are available table(movie_data$country) # DATA VISUALIZATION # Histogram of movies released each year ggplot(movie_data, aes(title_year)) + geom_bar() + labs(x = "Year movie release", y = "No. of movies released", title = "Histogram of no. of movies released each year") + theme(plot.title = element_text(hjust = 0.5)) # It can be seen that the movie boom came after 1980 and thus we represent the data only after 1980 movie_data <- movie_data[movie_data$title_year >= 1980,] # Visualizing top 20 movies based on profits in Million$ install.packages("ggrepel") library(ggrepel) movie_data %>% filter(title_year %in% c(2000:2016)) %>% arrange(desc(profit)) %>% top_n(20, profit) %>% ggplot(aes(x=budget/1000000, y=profit/1000000)) + geom_point() + geom_smooth() + geom_text_repel(aes(label=movie_title)) + labs(x = "Budget in $million", y = "Profit in $million", title = "Top 10 Profitable Movies") + theme(plot.title = element_text(hjust = 0.5)) # Using profits and return on investment variables are criteria to find 20 most profitable movies movie_data %>% filter(budget > 100000) %>% mutate(profit = gross - budget, return_on_investment_perc = (profit/budget)*100) %>% arrange(desc(profit)) %>% top_n(20, profit) %>% ggplot(aes(x=budget/1000000, y = return_on_investment_perc)) + geom_point(size = 2) + geom_smooth(size = 1) + geom_text_repel(aes(label = movie_title), size = 3) + xlab("Budget in $million") + ylab("Percent of Return on Investment") + ggtitle("20 Most Profitable Movies based on their Return on Investment") # Visualizing 20 top directors based on the highest IMDB scores install.packages("formattable") library(formattable) movie_data %>% group_by(director_name) %>% summarise(avg_imdb = mean(imdb_score)) %>% arrange(desc(avg_imdb)) %>% top_n(20, avg_imdb) %>% formattable(list(avg_imdb = color_bar("Red")), align = 'l') # Plotting commerical success vs critical acclaim movie_data %>% top_n(20, profit) %>% ggplot(aes(x = imdb_score, y = gross/10^6, size = profit/10^6, color = content_rating)) + geom_point() + geom_hline(aes(yintercept = 600)) + geom_vline(aes(xintercept = 7.75)) + geom_text_repel(aes(label = movie_title), size = 4) + xlab("IMDB Score") + ylab("Gross Money earned in million$") + ggtitle("Commercial Success Vs Critical Acclaim") + annotate("text", x = 8.5, y = 700, label = "High Ratings \n & High Gross") + theme(plot.title = element_text(hjust = 0.5)) # The above observation shows that there is hardly any correlation between critical acclaim and the movie's commercial success # Visualizing relation between facebook likes and IMDB scores library(plotly) movie_data %>% plot_ly(x = ~movie_facebook_likes, y = ~imdb_score, color = ~content_rating , mode = "markers", text = ~content_rating, alpha = 0.7, type = "scatter") # Movies with high facebook likes can be seen to have higher IMDB score # DATA PRE-PROCESSING install.packages("data.table") library(data.table) # Find number of directors sum(uniqueN(movie_data$director_name)) # Find number of actors sum(uniqueN(movie_data[, c("actor_1_name", "actor_2_name", "actor_3_name")])) # The names of the directors, actors 1 2 3 are so different that it will not contribute in predicting the score. # The plot keyword is too diverse to be used as a predictor # The movie IMDB link is redundant movie_data <- subset(movie_data, select = -c(director_name, actor_2_name, actor_1_name, movie_title, actor_3_name, plot_keywords, movie_imdb_link)) # To avoid multicollinearity we remove the 2 previously added variables movie_data <- subset(movie_data, select = -c(profit, return_on_investment_perc)) # Plot heatmap of the entire data as of now ggcorr(movie_data, label = TRUE, label_round = 2, label_size = 3.5, size = 2, hjust = .85) + ggtitle("Correlation Heatmap") + theme(plot.title = element_text(hjust = 0.5)) # Based on the heatmap, we can see some high correlations (>0.7) between predictors. # The highest correlation value observed is 0.95 and we can see that actor_1_facebook_likes is highly correlated with the cast_total_facebook_likes # and both actor2 and actor3 are also correlated to the total. # Thus we modify them into 2 variables: actor_1_facebook_likes and other_actors_facebook_likes. movie_data$other_actors_facebook_likes <- movie_data$actor_2_facebook_likes + movie_data$actor_3_facebook_likes # There is high correlations among num_voted_users, num_user_for_reviews and num_critic_for_reviews. # We want to keep num_voted_users and take the ratio of num_user_for_reviews and num_critic_for_reviews. movie_data$critic_review_ratio <- movie_data$num_critic_for_reviews / movie_data$num_user_for_reviews # Delete Columns movie_data <- subset(movie_data, select = -c(cast_total_facebook_likes, actor_2_facebook_likes, actor_3_facebook_likes, num_critic_for_reviews, num_user_for_reviews)) # Plotting heatmap to review post changes ggcorr(movie_data, label = TRUE, label_round = 2, label_size = 4, size = 3, hjust = .85) + ggtitle("Correlation Heatmap") + theme(plot.title = element_text(hjust = 0.5)) # No strong correlation of value greater than 0.7 observed # The aim is to build a project wherein the model predicts whether the movie is good or bad. So bin the scores in four buckets: less than 4(Bad), # 4-6(OK), 6-8(Good) and 8-10(Excellent) movie_data$binned_score <- cut(movie_data$imdb_score, breaks = c(0,4,6,8,10)) # Rearranging the data and renaming the column to make it readable movie_data <- movie_data[,c(9,4,5,14,12,2,3,13,1,6,10,7,8,11,15)] colnames(movie_data) <- c("budget", "gross", "user_vote", "critic_review_ratio", "movie_fb", "director_fb", "actor1_fb", "other_actors_fb", "duration", "face_number", "year", "country", "content", "imdb_score", "binned_score") # To apply models, spliting the data into training, validation and test sets with the ratio of 6:2:2 set.seed(45) train.index <- sample(row.names(movie_data), dim(movie_data)[1]*0.6) valid.index <- sample(setdiff(row.names(movie_data), train.index), dim(movie_data)[1]*0.2) test.index <- setdiff(row.names(movie_data), union(train.index, valid.index)) train <- movie_data[train.index, ] valid <- movie_data[valid.index, ] test <- movie_data[test.index, ] # IMPLEMNETATION OF ALGORITHMS # CLASSIFICATION TREE # Implementing a full grown tree library(rpart) library(rpart.plot) # Full grown tree class.tree <- rpart(binned_score ~ . -imdb_score, data = train, method = "class") ## plot tree prp(class.tree, type = 1, extra = 1, under = TRUE, split.font = 2, varlen = 0) # Implementing Best pruned tree set.seed(51) cv.ct <- rpart(binned_score ~ . -imdb_score, data = train, method = "class", cp = 0.00001, minsplit = 5, xval = 5) printcp(cv.ct) pruned.ct <- prune(cv.ct, cp = cv.ct$cptable[which.min(cv.ct$cptable[,"xerror"]),"CP"]) length(pruned.ct$frame$var[pruned.ct$frame$var == "<leaf>"]) prp(pruned.ct, type = 1, extra = 1, split.font = 1, varlen = -10) # Apply model on training set tree.pred.train <- predict(pruned.ct, train, type = "class") # Generate confusion matrix for training data confusionMatrix(tree.pred.train, train$binned_score) # Apply model on validation set tree.pred.valid <- predict(pruned.ct, valid, type = "class") # Generate confusion matrix for validation data confusionMatrix(tree.pred.valid, valid$binned_score) # Apply model on test set tree.pred.test <- predict(pruned.ct, test, type = "class") # Generate confusion matrix for test data confusionMatrix(tree.pred.test, test$binned_score) # K NEAREST NEIGHBOUR library(FNN) # Using model.matrix() to create dummy variables for country and content movie_data2 <- movie_data movie_data2$country <- as.factor(movie_data2$country) movie_data2$content <- as.factor(movie_data2$content) movie_data2[,c("country_UK", "country_USA", "country_Others")] <- model.matrix( ~ country - 1, data = movie_data2) movie_data2[,c("content_G", "content_NC17", "content_PG", "content_PG13", "content_R")] <- model.matrix( ~ content - 1, data = movie_data2) # Select useful variables for future prediction movie_data2 <- movie_data2[, c(1,2,3,4,5,6,7,8,9,10,11,16,17,18,19,20,21,22,23,15)] # Partition the data into training and validation sets set.seed(52) train2 <- movie_data2[train.index, ] valid2 <- movie_data2[valid.index, ] test2 <- movie_data2[test.index, ] # Initializing normalized training, validation, test data, complete data frames to originals train2.norm <- train2 valid2.norm <- valid2 test2.norm <- test2 movie_data2.norm <- movie_data2 # Using preProcess() from the caret package to normalize predictors norm.values <- preProcess(train2[, -20], method=c("center", "scale")) train2.norm[, -20] <- predict(norm.values, train2[, -20]) valid2.norm[, -20] <- predict(norm.values, valid2[, -20]) test2.norm[, -20] <- predict(norm.values, test2[, -20]) movie_data2.norm[, -20] <- predict(norm.values, movie_data2[, -20]) # Finding the best K # Initialize a data frame with two columns: k, and accuracy accuracy.df <- data.frame(k = seq(1, 20, 1), accuracy = rep(0, 20)) # Computing knn for different k on validation data for(i in 1:20) { knn.pred <- knn(train2.norm[, -20], valid2.norm[, -20], cl = train2.norm[, 20], k = i) accuracy.df[i, 2] <- confusionMatrix(knn.pred, valid2.norm[, 20])$overall[1] } accuracy.df # Applying model on test set knn.pred.test <- knn(train2.norm[, -20], test2.norm[, -20], cl = train2.norm[, 20], k = 9) # Generating confusion matrix for test data accuracy <- confusionMatrix(knn.pred.test, test2.norm[, 20])$overall[1] accuracy # RANDOM FOREST install.packages("randomForest") library(randomForest) set.seed(53) rf <- randomForest(binned_score ~ . -imdb_score, data = train, mtry = 5) # Show model error plot(rf) legend('topright', colnames(rf$err.rate), col=1:5, fill=1:5) # Get importance importance <- importance(rf) varImportance <- data.frame(Variables = row.names(importance), Importance = round(importance[ ,'MeanDecreaseGini'],2)) # Creating a rank variable based on importance rankImportance <- varImportance %>% mutate(Rank = paste0('#',dense_rank(desc(Importance)))) # Using ggplot2 to visualize the relative importance of variables ggplot(rankImportance, aes(x = reorder(Variables, Importance), y = Importance, fill = Importance)) + geom_bar(stat='identity') + geom_text(aes(x = Variables, y = 0.5, label = Rank), hjust=0, vjust=0.55, size = 4, colour = 'red') + labs(x = 'Variables') + coord_flip() + theme_few() install.packages("caret") library(caret) set.seed(632) # apply model on validation set rf.pred.valid <- predict(rf, valid) # generate confusion matrix for validation data confusionMatrix(rf.pred.valid, valid$binned_score)
library(shiny) library(DT) board <- read.csv("board.csv") ui <- fluidPage( br(), br(), fluidRow(DT::dataTableOutput("draftboard")) ) server <- function(input, output) { output$draftboard <- renderDataTable(board, rownames = FALSE, options = list(paging = FALSE, searching = FALSE)) } # Run the application shinyApp(ui = ui, server = server)
/app.R
no_license
amazehayes/draftboard
R
false
false
388
r
library(shiny) library(DT) board <- read.csv("board.csv") ui <- fluidPage( br(), br(), fluidRow(DT::dataTableOutput("draftboard")) ) server <- function(input, output) { output$draftboard <- renderDataTable(board, rownames = FALSE, options = list(paging = FALSE, searching = FALSE)) } # Run the application shinyApp(ui = ui, server = server)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/relative_position.R \name{relative_position} \alias{relative_position} \title{Get relative positions} \usage{ relative_position(vals) } \description{ Get relative positions }
/man/relative_position.Rd
no_license
rmatam/mltools
R
false
true
254
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/relative_position.R \name{relative_position} \alias{relative_position} \title{Get relative positions} \usage{ relative_position(vals) } \description{ Get relative positions }
num_helper <- function(state_subset, col_num, num) { # get "attack", "failure" and "pneumonia" vector outcome_arr <- as.numeric(state_subset[, col_num]) len <- dim(state_subset[!is.na(outcome_arr), ])[1] if (num == "best") { rank <- rank_helper(state_subset, outcome_arr, 1) } else if (num == "worst") { rank <- rank_helper(state_subset, outcome_arr, len) } else if (num > len) { rank <- NA } else { rank <- rank_helper(state_subset, outcome_arr, num) } result <- rank return(result) } rank_helper <- function(state_subset, outcome_arr, num) { result <- state_subset[, 2][order(outcome_arr, state_subset[, 2])[num]] return(result) } rankall <- function(outcome, num = "best") { directory <- "./data/outcome-of-care-measures.csv" data <- read.csv(directory, colClasses="character") valid_outcomes <- c("heart attack", "heart failure", "pneumonia") state_arr <- sort(unique(data$State)) arr_len <- length(state_arr) hospital <- rep("", arr_len) if (!outcome %in% valid_outcomes) { stop("invalid outcome") } else { for(i in 1:arr_len) { # loop for each state state_subset <- data[data[, 7]==state_arr[i], ] if(outcome == "heart attack") { hospital[i] <- num_helper(state_subset, 11, num) } else if (outcome == "heart failure") { hospital[i] <- num_helper(state_subset, 17, num) } else { hospital[i] <- num_helper(state_subset, 23, num) } } } # create the data frame to return df <- data.frame(hospital=hospital, state=state_arr) result <- df return(result) } # tests head(rankall("heart attack", 20), 10) tail(rankall("pneumonia", "worst"), 3) tail(rankall("heart failure"), 10)
/week4/rankall.R
no_license
banerjeeshayantan/Coursera-Rprog
R
false
false
1,864
r
num_helper <- function(state_subset, col_num, num) { # get "attack", "failure" and "pneumonia" vector outcome_arr <- as.numeric(state_subset[, col_num]) len <- dim(state_subset[!is.na(outcome_arr), ])[1] if (num == "best") { rank <- rank_helper(state_subset, outcome_arr, 1) } else if (num == "worst") { rank <- rank_helper(state_subset, outcome_arr, len) } else if (num > len) { rank <- NA } else { rank <- rank_helper(state_subset, outcome_arr, num) } result <- rank return(result) } rank_helper <- function(state_subset, outcome_arr, num) { result <- state_subset[, 2][order(outcome_arr, state_subset[, 2])[num]] return(result) } rankall <- function(outcome, num = "best") { directory <- "./data/outcome-of-care-measures.csv" data <- read.csv(directory, colClasses="character") valid_outcomes <- c("heart attack", "heart failure", "pneumonia") state_arr <- sort(unique(data$State)) arr_len <- length(state_arr) hospital <- rep("", arr_len) if (!outcome %in% valid_outcomes) { stop("invalid outcome") } else { for(i in 1:arr_len) { # loop for each state state_subset <- data[data[, 7]==state_arr[i], ] if(outcome == "heart attack") { hospital[i] <- num_helper(state_subset, 11, num) } else if (outcome == "heart failure") { hospital[i] <- num_helper(state_subset, 17, num) } else { hospital[i] <- num_helper(state_subset, 23, num) } } } # create the data frame to return df <- data.frame(hospital=hospital, state=state_arr) result <- df return(result) } # tests head(rankall("heart attack", 20), 10) tail(rankall("pneumonia", "worst"), 3) tail(rankall("heart failure"), 10)
#' getAllBoundsParam #' Computes the possible bounds for all parameters of a polytope defined by #' A.x<=b and C.x=v or by a CaNmod object #' @param x either a CaNmod oject or a named list with at least a matrix A and #' a vector b (A.x<=b) and optionnally a matrix C and a vector v (C.x=v) #' @param progressBar should a progress bar be displayed (default TRUE) #' #' @importFrom utils setTxtProgressBar #' @importFrom utils txtProgressBar #' @importFrom ROI objective #' @importFrom ROI L_objective #' #' @return a datafame with first column corresponding to colnames(A), and #' corresponding lower bounds (column 2) and upper bounds (column 3) #' @examples #' n <- 20 #' A1 <- -diag(n) #' b1 <- as.matrix(rep(0,n)) #' A2 <- diag(n) #' b2 <- as.matrix(rep(1,n)) #' A <- rbind(A1,A2) #' b <- rbind(b1,b2) #' X0 <- getAllBoundsParam(list(A = A, b = b)) #' @export getAllBoundsParam <- function(x, progressBar = TRUE) { x <- reformatX(x) A <- x$A b <- x$b C <- x$C v <- x$v nbparam <- ncol(A) if (is.null(colnames(A))) { colnames(A) <- paste("col", seq_len(ncol(A)), sep = "") } if (is.null(colnames(C)) & !is.null(C)) colnames(C) <- colnames(A) presolvedmin <- presolveLPMod(A, b, C, v, sense = "min") presolvedmax <- presolveLPMod(A, b, C, v, sense = "max") if (progressBar) pb <- txtProgressBar(min = 0, max = nbparam, style = 3) bounds <- sapply(1:nbparam, function(p) { if (progressBar) setTxtProgressBar(pb, p) sapply(c("min", "max"), function(s){ if (s == "min"){ maximum <- FALSE presolved <- presolvedmin } else { maximum <- TRUE presolved <- presolvedmax } if (!colnames(A)[p] %in% names(presolved$fixed)){ ip <- match(colnames(A)[p], colnames(presolved$A)) ob <- rep(0, ncol(presolved$A)) ob[ip] <- 1 ROI::objective(presolved$OP) <- L_objective(ob) presolved$OP$lp_model <- defineLPSolveMod(presolved$A, presolved$b, presolved$C, presolved$v, presolved$lower, presolved$upper, maximum, ob) set.objfn(presolved$OP$lp_model, ob) getParamMinMax(presolved$OP, ip) } else { presolved$fixed[colnames(A)[p]] } }) }) data.frame( param = colnames(A), lowerbound = bounds[1, ], upperbound = bounds[2, ] ) }
/RCaNmodel/R/getAllBoundsParam.R
permissive
inrae/RCaNmodel
R
false
false
2,703
r
#' getAllBoundsParam #' Computes the possible bounds for all parameters of a polytope defined by #' A.x<=b and C.x=v or by a CaNmod object #' @param x either a CaNmod oject or a named list with at least a matrix A and #' a vector b (A.x<=b) and optionnally a matrix C and a vector v (C.x=v) #' @param progressBar should a progress bar be displayed (default TRUE) #' #' @importFrom utils setTxtProgressBar #' @importFrom utils txtProgressBar #' @importFrom ROI objective #' @importFrom ROI L_objective #' #' @return a datafame with first column corresponding to colnames(A), and #' corresponding lower bounds (column 2) and upper bounds (column 3) #' @examples #' n <- 20 #' A1 <- -diag(n) #' b1 <- as.matrix(rep(0,n)) #' A2 <- diag(n) #' b2 <- as.matrix(rep(1,n)) #' A <- rbind(A1,A2) #' b <- rbind(b1,b2) #' X0 <- getAllBoundsParam(list(A = A, b = b)) #' @export getAllBoundsParam <- function(x, progressBar = TRUE) { x <- reformatX(x) A <- x$A b <- x$b C <- x$C v <- x$v nbparam <- ncol(A) if (is.null(colnames(A))) { colnames(A) <- paste("col", seq_len(ncol(A)), sep = "") } if (is.null(colnames(C)) & !is.null(C)) colnames(C) <- colnames(A) presolvedmin <- presolveLPMod(A, b, C, v, sense = "min") presolvedmax <- presolveLPMod(A, b, C, v, sense = "max") if (progressBar) pb <- txtProgressBar(min = 0, max = nbparam, style = 3) bounds <- sapply(1:nbparam, function(p) { if (progressBar) setTxtProgressBar(pb, p) sapply(c("min", "max"), function(s){ if (s == "min"){ maximum <- FALSE presolved <- presolvedmin } else { maximum <- TRUE presolved <- presolvedmax } if (!colnames(A)[p] %in% names(presolved$fixed)){ ip <- match(colnames(A)[p], colnames(presolved$A)) ob <- rep(0, ncol(presolved$A)) ob[ip] <- 1 ROI::objective(presolved$OP) <- L_objective(ob) presolved$OP$lp_model <- defineLPSolveMod(presolved$A, presolved$b, presolved$C, presolved$v, presolved$lower, presolved$upper, maximum, ob) set.objfn(presolved$OP$lp_model, ob) getParamMinMax(presolved$OP, ip) } else { presolved$fixed[colnames(A)[p]] } }) }) data.frame( param = colnames(A), lowerbound = bounds[1, ], upperbound = bounds[2, ] ) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/peeling_trajectory.R \name{jump.prim} \alias{jump.prim} \title{Peeling trajectory jump} \usage{ jump.prim(object, rel.support = TRUE) } \arguments{ \item{object}{A \code{prim} object resulting from a call to \code{peeling}.} \item{rel.support}{Logical indicating if the trajectory difference should be relative to the support for finding the jump (default to TRUE).} } \value{ A list with elements: \item{trajectory.difference}{Numeric vector of the computed (relative) differences.} \item{npeel.opt}{Integer giving the npeel value of the highest difference.} \item{final.box}{The extracted box corresponding to \code{npeel.opt}. See \code{\link{extract.box}}.} } \description{ Identifies a jump in the peeling trajectory of \code{object}. } \details{ Computes the (relative) trajectory differences of \code{object}: \deqn{\frac{yfun[k] - yfun[k - 1]}{support[k - 1] - support[k]}}{(yfun[k] - yfun[k - 1])/(support[k - 1] - support[k])} and returns its maximum value. The rationale is that the biggest jump in peeling trajectory gives a good cut-off point for the peeling algorithm. If \code{rel.support = FALSE}, the denominator is not used in the differences calculation. } \examples{ # A simple bump set.seed(12345) x <- matrix(runif(2000), ncol = 2, dimnames = list(NULL, c("x1", "x2"))) y <- 2 * x[,1] + 5 * x[,2] + 10 * (x[,1] >= .8 & x[,2] >= .5) + rnorm(1000) # Peeling with alpha = 0.05 and beta.stop = 0.05 peel_res <- peeling(y, x, beta.stop = 0.05) # Automatically choose the best box chosen <- jump.prim(peel_res) } \references{ Masselot P., Chebana F., Campagna C., Lavigne E., Ouarda T.B.M.J., Gosselin P. On threshold identification for weather-health warning systems. \emph{Submitted}. }
/man/jump.prim.Rd
no_license
PierreMasselot/primr
R
false
true
1,874
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/peeling_trajectory.R \name{jump.prim} \alias{jump.prim} \title{Peeling trajectory jump} \usage{ jump.prim(object, rel.support = TRUE) } \arguments{ \item{object}{A \code{prim} object resulting from a call to \code{peeling}.} \item{rel.support}{Logical indicating if the trajectory difference should be relative to the support for finding the jump (default to TRUE).} } \value{ A list with elements: \item{trajectory.difference}{Numeric vector of the computed (relative) differences.} \item{npeel.opt}{Integer giving the npeel value of the highest difference.} \item{final.box}{The extracted box corresponding to \code{npeel.opt}. See \code{\link{extract.box}}.} } \description{ Identifies a jump in the peeling trajectory of \code{object}. } \details{ Computes the (relative) trajectory differences of \code{object}: \deqn{\frac{yfun[k] - yfun[k - 1]}{support[k - 1] - support[k]}}{(yfun[k] - yfun[k - 1])/(support[k - 1] - support[k])} and returns its maximum value. The rationale is that the biggest jump in peeling trajectory gives a good cut-off point for the peeling algorithm. If \code{rel.support = FALSE}, the denominator is not used in the differences calculation. } \examples{ # A simple bump set.seed(12345) x <- matrix(runif(2000), ncol = 2, dimnames = list(NULL, c("x1", "x2"))) y <- 2 * x[,1] + 5 * x[,2] + 10 * (x[,1] >= .8 & x[,2] >= .5) + rnorm(1000) # Peeling with alpha = 0.05 and beta.stop = 0.05 peel_res <- peeling(y, x, beta.stop = 0.05) # Automatically choose the best box chosen <- jump.prim(peel_res) } \references{ Masselot P., Chebana F., Campagna C., Lavigne E., Ouarda T.B.M.J., Gosselin P. On threshold identification for weather-health warning systems. \emph{Submitted}. }
### Copyright (c) 2019 | Asif Al Faisal | Data Analyst | email: faisal.iit.du@gmail.com | CIMMYT-Bangladesh ### library(raster) library(rasterVis) library(sp) library(rgdal) library(RStoolbox) library(ggplot2) crop_name = "Wheat" season = "13-14" idx = "EVI" directory <- paste0("E:\\CIMMYT\\BIG DATA 2 CSA\\01_CCAFS_BIG_DATA2CSA\\PCA\\Input\\",idx,"\\",crop_name,"\\",season,"\\") setwd(directory) getwd() files <- dir(directory,recursive=TRUE, full.names=TRUE, pattern="\\.tif$") write.table(files, file = "inputRasters.txt", row.names = F) st.idx <- stack(files) idx.scale <- scale(st.idx) rpc <- rasterPCA(idx.scale, nComp = 4) output <- summary(rpc$model) vars <- output$sdev^2 vars <- vars/sum(vars) write.csv(rbind("Standard deviation" = output$sdev, "Proportion of Variance" = vars, "Cumulative Proportion" = cumsum(vars)), file="Output.csv") PC<-c() for (i in 1:dim(rpc$map)[3]){ #### PC[i] = c(evalulation(expression)) *** parse() creates expression from string ### PC[i] <- c(eval(parse(text=paste0("rpc$map$PC",i)))) writeRaster(PC[[i]], filename = paste0(idx,"_PC",i,".tiff"), "GTiff", overwrite=T) }
/Code/150919_Raster PCA Script.R
no_license
AsifAlFaisal/extracPCAraster
R
false
false
1,240
r
### Copyright (c) 2019 | Asif Al Faisal | Data Analyst | email: faisal.iit.du@gmail.com | CIMMYT-Bangladesh ### library(raster) library(rasterVis) library(sp) library(rgdal) library(RStoolbox) library(ggplot2) crop_name = "Wheat" season = "13-14" idx = "EVI" directory <- paste0("E:\\CIMMYT\\BIG DATA 2 CSA\\01_CCAFS_BIG_DATA2CSA\\PCA\\Input\\",idx,"\\",crop_name,"\\",season,"\\") setwd(directory) getwd() files <- dir(directory,recursive=TRUE, full.names=TRUE, pattern="\\.tif$") write.table(files, file = "inputRasters.txt", row.names = F) st.idx <- stack(files) idx.scale <- scale(st.idx) rpc <- rasterPCA(idx.scale, nComp = 4) output <- summary(rpc$model) vars <- output$sdev^2 vars <- vars/sum(vars) write.csv(rbind("Standard deviation" = output$sdev, "Proportion of Variance" = vars, "Cumulative Proportion" = cumsum(vars)), file="Output.csv") PC<-c() for (i in 1:dim(rpc$map)[3]){ #### PC[i] = c(evalulation(expression)) *** parse() creates expression from string ### PC[i] <- c(eval(parse(text=paste0("rpc$map$PC",i)))) writeRaster(PC[[i]], filename = paste0(idx,"_PC",i,".tiff"), "GTiff", overwrite=T) }
#' Plots a numeric matrix using a color scale #' #' Plots a numeric matrix using a color scale. #' #' @param x Numeric matrix with column and row names. #' @param breaks Numeric vector defining breaks in the legend. This typically #' includes the lower limit (e.g. zero for percentage data) but not the #' upper limit. The default is useful for fraction data (range 0...1). #' @param colors A vector of color names to build the palette. #' @param useHatching Use hatching to improve the distinction between #' neighboring colors? (logical) #' @param translateRownames Function to translate row names. Must accept a vector #' and return a vector of character strings of the same length as the input #' @param translateColnames Function to translate column names. Must accept a vector #' and return a vector of character strings of the same length as the input #' @param mar Vector of length 4 passed to the \code{mar} element of #' \code{\link[graphics]{par}}. The last element (right margin) must be chosen #' large enough so as to not cut off the automatic legend. #' @param rowGroups A vector of e.g. factors splitting the rows of \code{x} #' into groups. Lines will be plotted at the transition between groups. #' #' @return \code{invisible(NULL)}. #' #' @note Note that the matrix is plotted in transposed form. #' #' @seealso No links. #' #' @references No references. #' #' @author David Kneis \email{david.kneis@@tu-dresden.de} #' #' @export #' #' @examples #'x <- matrix(runif(24), nrow=4, ncol=6, #' dimnames=list(paste("row", 1:4), paste("col", 1:6))) #'plotColorMatrix(x=x, breaks=c(50,20,10,5,2,0)/100, #' useHatching=TRUE, translateColnames=function(x){sapply(x, gsub, #' patter="col", replacement="column", fixed=TRUE)}, rowGroups= c(1,1,2,2)) #'plotColorMatrix(x=x, breaks=c(50,20,10,5,2,0)/100, #' useHatching=FALSE, translateColnames=function(x){sapply(x, gsub, #' patter="col", replacement="column", fixed=TRUE)}, rowGroups= c(1,1,2,2)) plotColorMatrix <- function( x, breaks=c(50,20,10,5,2,0)/100, colors=if (useHatching) c("black", "white") else c("orangered3","khaki","royalblue4"), useHatching=FALSE, translateRownames=function(x){x}, translateColnames=function(x){x}, mar=c(5,5,1,8), rowGroups= rep(1, nrow(x)) ) { breaks <- sort(breaks, decreasing=TRUE) # Define helper functions first ramp <- grDevices::colorRampPalette(colors)(length(breaks)) clr <- function(x, breaks) { cols <- grDevices::colorRampPalette(ramp)(length(breaks)) for (i in 1:length(breaks)) { if (x >= breaks[i]) return(cols[i]) } return("black") } if (useHatching) { dens <- function(x, breaks) { dens <- rep(c(0, 20), length(breaks)) for (i in 1:length(breaks)) { if (x >= breaks[i]) return(dens[i]) } return(0) } } else { dens <- function(x, breaks) { return(0) } } # Actual plotting starts here omar <- graphics::par("mar") graphics::par(mar=mar) graphics::plot(c(0.5,nrow(x)+0.5), c(0.5,ncol(x)+0.5), type="n", xlab="", ylab="", xaxt="n", yaxt="n", bty="n") graphics::axis(1, at=1:nrow(x), label=translateRownames(rownames(x)), las=2, lwd=0, line=-1) graphics::axis(2, at=1:ncol(x), label=translateColnames(colnames(x)), las=1, lwd=0, line=-1) delta <- 0.3 for (nc in 1:ncol(x)) { for (nr in 1:nrow(x)) { graphics::rect(xleft=nr-delta, xright=nr+delta, ybottom=nc-delta, ytop=nc+delta, col=clr(x[nr,nc], breaks)) graphics::rect(xleft=nr-delta, xright=nr+delta, ybottom=nc-delta, ytop=nc+delta, col="white", density=dens(x[nr,nc], breaks), border="darkgrey") if ((nr > 1) && (rowGroups[nr] != rowGroups[nr-1])) graphics::lines(x=rep(nr-0.5, 2), y=c(1-delta*1.8,ncol(x)+delta*1.5)) } } rev <- length(breaks):1 labs <- paste0(">",breaks[rev]*100,"%") labs[1] <- gsub(labs[2], pattern="^>(.+)", replacement="<\\1") pos <- c(x=nrow(x)+1.2, y=ncol(x)) graphics::legend(pos["x"], pos["y"], bty="n", xpd=TRUE, fill=sapply(breaks, clr, breaks)[rev], legend=labs, adj=c(0.25,0.3), text.col="white", border="darkgrey") graphics::legend(pos["x"], pos["y"], bty="n", xpd=TRUE, density=sapply(breaks, dens, breaks)[rev], fill="white", legend=labs, adj=c(0.25,0.3), border="darkgrey") graphics::par(mar=omar) }
/R/plotPercentageMatrix.r
no_license
dkneis/knut
R
false
false
4,324
r
#' Plots a numeric matrix using a color scale #' #' Plots a numeric matrix using a color scale. #' #' @param x Numeric matrix with column and row names. #' @param breaks Numeric vector defining breaks in the legend. This typically #' includes the lower limit (e.g. zero for percentage data) but not the #' upper limit. The default is useful for fraction data (range 0...1). #' @param colors A vector of color names to build the palette. #' @param useHatching Use hatching to improve the distinction between #' neighboring colors? (logical) #' @param translateRownames Function to translate row names. Must accept a vector #' and return a vector of character strings of the same length as the input #' @param translateColnames Function to translate column names. Must accept a vector #' and return a vector of character strings of the same length as the input #' @param mar Vector of length 4 passed to the \code{mar} element of #' \code{\link[graphics]{par}}. The last element (right margin) must be chosen #' large enough so as to not cut off the automatic legend. #' @param rowGroups A vector of e.g. factors splitting the rows of \code{x} #' into groups. Lines will be plotted at the transition between groups. #' #' @return \code{invisible(NULL)}. #' #' @note Note that the matrix is plotted in transposed form. #' #' @seealso No links. #' #' @references No references. #' #' @author David Kneis \email{david.kneis@@tu-dresden.de} #' #' @export #' #' @examples #'x <- matrix(runif(24), nrow=4, ncol=6, #' dimnames=list(paste("row", 1:4), paste("col", 1:6))) #'plotColorMatrix(x=x, breaks=c(50,20,10,5,2,0)/100, #' useHatching=TRUE, translateColnames=function(x){sapply(x, gsub, #' patter="col", replacement="column", fixed=TRUE)}, rowGroups= c(1,1,2,2)) #'plotColorMatrix(x=x, breaks=c(50,20,10,5,2,0)/100, #' useHatching=FALSE, translateColnames=function(x){sapply(x, gsub, #' patter="col", replacement="column", fixed=TRUE)}, rowGroups= c(1,1,2,2)) plotColorMatrix <- function( x, breaks=c(50,20,10,5,2,0)/100, colors=if (useHatching) c("black", "white") else c("orangered3","khaki","royalblue4"), useHatching=FALSE, translateRownames=function(x){x}, translateColnames=function(x){x}, mar=c(5,5,1,8), rowGroups= rep(1, nrow(x)) ) { breaks <- sort(breaks, decreasing=TRUE) # Define helper functions first ramp <- grDevices::colorRampPalette(colors)(length(breaks)) clr <- function(x, breaks) { cols <- grDevices::colorRampPalette(ramp)(length(breaks)) for (i in 1:length(breaks)) { if (x >= breaks[i]) return(cols[i]) } return("black") } if (useHatching) { dens <- function(x, breaks) { dens <- rep(c(0, 20), length(breaks)) for (i in 1:length(breaks)) { if (x >= breaks[i]) return(dens[i]) } return(0) } } else { dens <- function(x, breaks) { return(0) } } # Actual plotting starts here omar <- graphics::par("mar") graphics::par(mar=mar) graphics::plot(c(0.5,nrow(x)+0.5), c(0.5,ncol(x)+0.5), type="n", xlab="", ylab="", xaxt="n", yaxt="n", bty="n") graphics::axis(1, at=1:nrow(x), label=translateRownames(rownames(x)), las=2, lwd=0, line=-1) graphics::axis(2, at=1:ncol(x), label=translateColnames(colnames(x)), las=1, lwd=0, line=-1) delta <- 0.3 for (nc in 1:ncol(x)) { for (nr in 1:nrow(x)) { graphics::rect(xleft=nr-delta, xright=nr+delta, ybottom=nc-delta, ytop=nc+delta, col=clr(x[nr,nc], breaks)) graphics::rect(xleft=nr-delta, xright=nr+delta, ybottom=nc-delta, ytop=nc+delta, col="white", density=dens(x[nr,nc], breaks), border="darkgrey") if ((nr > 1) && (rowGroups[nr] != rowGroups[nr-1])) graphics::lines(x=rep(nr-0.5, 2), y=c(1-delta*1.8,ncol(x)+delta*1.5)) } } rev <- length(breaks):1 labs <- paste0(">",breaks[rev]*100,"%") labs[1] <- gsub(labs[2], pattern="^>(.+)", replacement="<\\1") pos <- c(x=nrow(x)+1.2, y=ncol(x)) graphics::legend(pos["x"], pos["y"], bty="n", xpd=TRUE, fill=sapply(breaks, clr, breaks)[rev], legend=labs, adj=c(0.25,0.3), text.col="white", border="darkgrey") graphics::legend(pos["x"], pos["y"], bty="n", xpd=TRUE, density=sapply(breaks, dens, breaks)[rev], fill="white", legend=labs, adj=c(0.25,0.3), border="darkgrey") graphics::par(mar=omar) }
library(reshape2) # 1. Merges the training and the test sets to create one data set. # read data into respective data frames test_subject <- read.table("UCI HAR Dataset/test/subject_test.txt", sep="", header=FALSE) test_x <- read.table("UCI HAR Dataset/test/X_test.txt", sep="", header=FALSE) test_y <- read.table("UCI HAR Dataset/test/Y_test.txt", sep=",", header=FALSE) # read data into respective data frames train_subject <- read.table("UCI HAR Dataset/train/subject_train.txt", sep="", header=FALSE) train_x <- read.table("UCI HAR Dataset/train/X_train.txt", sep="", header=FALSE) train_y <- read.table("UCI HAR Dataset/train/Y_train.txt", sep="", header=FALSE) # Add column name for label files names(train_y) <- "activity" names(test_y) <- "activity" # Add column name for subject files names(train_subject) <- "subjectID" names(test_subject) <- "subjectID" # Add column names for measurement files feature_names <- read.table("UCI HAR Dataset/features.txt") names(train_x) <- feature_names[[2]] names(test_x) <- feature_names[[2]] # merge files into one data frame train <- cbind( train_subject, train_y, train_x) test <- cbind( test_subject, test_y, test_x) mergedData <- rbind(train, test) # 2 Extract only the measurements on the mean and standard deviation for each measurement. # columns with "mean()" or "std()" logicalVector <- grepl("mean", names(mergedData)) | grepl("std", names(mergedData)) #enable subjectid and activity logicalVector[1:2] <- TRUE # remove unnecessary data columns mergedData <- mergedData[, logicalVector] #ncol(mergedData) #names(mergedData) #3 Uses descriptive activity names to name the activities in the data set mergedData$activity <- factor(mergedData$activity, labels=c("WALKING", "WALKING_UPSTAIRS", "WALKING_DOWNSTAIRS", "SITTING", "STANDING", "LAYING")) #4 The data set is already set with descriptive label names in step 1 from feature.txt column 2 #5 tidy data set with the average of each variable for each activity and each subject long_format_melted <- melt( mergedData, id=c("subjectID","activity")) output <- dcast( long_format_melted, subjectID+activity ~ variable, mean) #tidy # output the result to a file write.table(output, "output.txt", row.names=FALSE)
/run_analysis.R
no_license
santhoshdaivajna/GettingAndCleaningData
R
false
false
2,233
r
library(reshape2) # 1. Merges the training and the test sets to create one data set. # read data into respective data frames test_subject <- read.table("UCI HAR Dataset/test/subject_test.txt", sep="", header=FALSE) test_x <- read.table("UCI HAR Dataset/test/X_test.txt", sep="", header=FALSE) test_y <- read.table("UCI HAR Dataset/test/Y_test.txt", sep=",", header=FALSE) # read data into respective data frames train_subject <- read.table("UCI HAR Dataset/train/subject_train.txt", sep="", header=FALSE) train_x <- read.table("UCI HAR Dataset/train/X_train.txt", sep="", header=FALSE) train_y <- read.table("UCI HAR Dataset/train/Y_train.txt", sep="", header=FALSE) # Add column name for label files names(train_y) <- "activity" names(test_y) <- "activity" # Add column name for subject files names(train_subject) <- "subjectID" names(test_subject) <- "subjectID" # Add column names for measurement files feature_names <- read.table("UCI HAR Dataset/features.txt") names(train_x) <- feature_names[[2]] names(test_x) <- feature_names[[2]] # merge files into one data frame train <- cbind( train_subject, train_y, train_x) test <- cbind( test_subject, test_y, test_x) mergedData <- rbind(train, test) # 2 Extract only the measurements on the mean and standard deviation for each measurement. # columns with "mean()" or "std()" logicalVector <- grepl("mean", names(mergedData)) | grepl("std", names(mergedData)) #enable subjectid and activity logicalVector[1:2] <- TRUE # remove unnecessary data columns mergedData <- mergedData[, logicalVector] #ncol(mergedData) #names(mergedData) #3 Uses descriptive activity names to name the activities in the data set mergedData$activity <- factor(mergedData$activity, labels=c("WALKING", "WALKING_UPSTAIRS", "WALKING_DOWNSTAIRS", "SITTING", "STANDING", "LAYING")) #4 The data set is already set with descriptive label names in step 1 from feature.txt column 2 #5 tidy data set with the average of each variable for each activity and each subject long_format_melted <- melt( mergedData, id=c("subjectID","activity")) output <- dcast( long_format_melted, subjectID+activity ~ variable, mean) #tidy # output the result to a file write.table(output, "output.txt", row.names=FALSE)
setwd("D:/Rt/Gab/") require(wnl) dPK22 = read.csv("PK22.csv", skip=1) colnames(dPK22) = c("TIME", "DV") ; dPK22 dPK22dose = read.csv("PK22_dose.csv") # dPK22dose dPK22dose[,"DUR"] = dPK22dose[,"AMT"]/dPK22dose[,"RATE"] # dPK22dose dPK22dose[,"TIME2"] = dPK22dose["TIME"] + dPK22dose[,"DUR"] ; dPK22dose DoseHist = rbind(dPK22dose[,c("TIME", "RATE")], cbind(TIME=dPK22dose[,"TIME2"],RATE=0)) # DoseHist DoseHist = DoseHist[order(DoseHist$TIME),] ; DoseHist #sTIME = seq(0, 73, by=0.1) #iTIME = findInterval(sTIME, DoseHist[,"TIME"]) #cbind(sTIME, iTIME, DoseHist[iTIME,]) PKde = function(t, y, p) { RateIn = DoseHist[findInterval(t, DoseHist[,"TIME"]),"RATE"] CL = p["CLs"]*(1 + y[3]) # eq 22:3 dy1dt = (RateIn - CL*y[1] - p["CLd"]*y[1] + p["CLd"]*y[2])/p["Vc"] # eq 22:1 dy2dt = (p["CLd"]*y[1] - p["CLd"]*y[2])/p["Vt"] # eq 22:2 dy3dt = p["Kout"]*(p["E0"] + y[1]) - p["Kout"]*y[3] # eq 22:4 return(list(c(dy1dt, dy2dt, dy3dt))) } # Figure 22.1, p 581 plot(0, 0, type="n", xlim=c(0, 100), ylim=c(0, 700), xlab="Time (h)", ylab="Concentration (ug/L)") gTIME = seq(0, 100, by=0.1) y = lsoda(y=c(0, 0, 132.864), times=gTIME, func=PKde, parms=c(Vc=150, CLs=0.04, CLd=97.8, Vt=54, Kout=0.024, E0=132.864)) points(dPK22[,"TIME"], dPK22[,"DV"]) lines(y[,"time"], y[,"1"]) ## Times = c(0, dPK22[, "TIME"]) iTime = 2:length(Times) fPK22 = function(THETA) { Vc = THETA[1] CLs = THETA[2] CLd = THETA[3] Vt = THETA[4] Kout = THETA[5] E0 = THETA[6] y = lsoda(y=c(0, 0, 100), times=Times, func=PKde, parms=c(Vc=Vc, CLs=CLs, CLd=CLd, Vt=Vt, Kout=Kout, E0=E0)) return(y[iTime, "1"]) } fPK22(c(150, 0.04, 97.8, 54, 0.024, 132.864)) fPK22(c(155, 0.05, 120, 60, 0.03, 100)) nlr(fPK22, dPK22, pNames=c("Vc", "CLs", "CLd", "Vt", "Kout", "E0"), IE=c(155, 0.05, 120, 60, 0.03, 100), LB=c(100, 0.01, 60, 30, 0.01, 50), UB=c(200, 0.09, 200, 120, 0.05, 200)) # fitting failure nlr(fPK22, dPK22, pNames=c("Vc", "CLs", "CLd", "Vt", "Kout", "E0"), IE=c(155, 0.05, 120, 60, 0.03, 100), LB=c(100, 0.01, 60, 30, 0.01, 50), UB=c(200, 0.09, 200, 120, 0.05, 200), Error="P") # fitting failure -> Use NONMEM ## microconstant model PKde2 = function(t, y, p) { RateIn = DoseHist[findInterval(t, DoseHist[,"TIME"]),"RATE"] CL = p["CLs"]*(1 + y[3]) # eq 22:3 dy1dt = (RateIn - CL*y[1])/p["Vc"] - p["k12"]*y[1] + p["k21"]*y[2] dy2dt = p["k12"]*y[1] - p["k21"]*y[2] dy3dt = p["Kout"]*(p["E0"] + y[1]) - p["Kout"]*y[3] # eq 22:4 return(list(c(dy1dt, dy2dt, dy3dt))) } # Figure 22.1, p 581 plot(0, 0, type="n", xlim=c(0, 100), ylim=c(0, 700), xlab="Time (h)", ylab="Concentration (ug/L)") gTIME = seq(0, 100, by=0.1) y = lsoda(y=c(0, 0, 137), times=gTIME, func=PKde2, parms=c(Vc=146, CLs=0.04, k12=0.8, k21=1.98, Kout=0.023, E0=137)) points(dPK22[,"TIME"], dPK22[,"DV"]) lines(y[,"time"], y[,"1"]) ## Times = c(0, dPK22[, "TIME"]) iTime = 2:length(Times) fPK22b = function(THETA) { Vc = THETA[1] CLs = THETA[2] k12 = THETA[3] k21 = THETA[4] Kout = THETA[5] E0 = THETA[6] y = lsoda(y=c(0, 0, 100), times=Times, func=PKde2, parms=c(Vc=Vc, CLs=CLs, k12=k12, k21=k21, Kout=Kout, E0=E0)) return(y[iTime, "1"]) } fPK22b(c(150, 0.04, 0.8, 1.98, 0.023, 132.864)) fPK22b(c(155, 0.05, 0.5, 1.25, 0.03, 100)) nlr(fPK22, dPK22, pNames=c("Vc", "CLs", "CLd", "Vt", "Kout", "E0"), IE=c(155, 0.05, 0.5, 1.25, 0.03, 100), LB=c(100, 0.01, 0.1, 0.5, 0.01, 50), UB=c(200, 0.09, 1.2, 3, 0.05, 200)) # fitting failure nlr(fPK22, dPK22, pNames=c("Vc", "CLs", "CLd", "Vt", "Kout", "E0"), IE=c(155, 0.05, 0.5, 1.25, 0.03, 100), LB=c(100, 0.01, 0.1, 0.5, 0.01, 50), UB=c(200, 0.09, 1.2, 3, 0.05, 200), Error="P") # fitting failure -> Use NONMEM
/R-old/PK22.R
no_license
pipetcpt/study-pkpd
R
false
false
3,850
r
setwd("D:/Rt/Gab/") require(wnl) dPK22 = read.csv("PK22.csv", skip=1) colnames(dPK22) = c("TIME", "DV") ; dPK22 dPK22dose = read.csv("PK22_dose.csv") # dPK22dose dPK22dose[,"DUR"] = dPK22dose[,"AMT"]/dPK22dose[,"RATE"] # dPK22dose dPK22dose[,"TIME2"] = dPK22dose["TIME"] + dPK22dose[,"DUR"] ; dPK22dose DoseHist = rbind(dPK22dose[,c("TIME", "RATE")], cbind(TIME=dPK22dose[,"TIME2"],RATE=0)) # DoseHist DoseHist = DoseHist[order(DoseHist$TIME),] ; DoseHist #sTIME = seq(0, 73, by=0.1) #iTIME = findInterval(sTIME, DoseHist[,"TIME"]) #cbind(sTIME, iTIME, DoseHist[iTIME,]) PKde = function(t, y, p) { RateIn = DoseHist[findInterval(t, DoseHist[,"TIME"]),"RATE"] CL = p["CLs"]*(1 + y[3]) # eq 22:3 dy1dt = (RateIn - CL*y[1] - p["CLd"]*y[1] + p["CLd"]*y[2])/p["Vc"] # eq 22:1 dy2dt = (p["CLd"]*y[1] - p["CLd"]*y[2])/p["Vt"] # eq 22:2 dy3dt = p["Kout"]*(p["E0"] + y[1]) - p["Kout"]*y[3] # eq 22:4 return(list(c(dy1dt, dy2dt, dy3dt))) } # Figure 22.1, p 581 plot(0, 0, type="n", xlim=c(0, 100), ylim=c(0, 700), xlab="Time (h)", ylab="Concentration (ug/L)") gTIME = seq(0, 100, by=0.1) y = lsoda(y=c(0, 0, 132.864), times=gTIME, func=PKde, parms=c(Vc=150, CLs=0.04, CLd=97.8, Vt=54, Kout=0.024, E0=132.864)) points(dPK22[,"TIME"], dPK22[,"DV"]) lines(y[,"time"], y[,"1"]) ## Times = c(0, dPK22[, "TIME"]) iTime = 2:length(Times) fPK22 = function(THETA) { Vc = THETA[1] CLs = THETA[2] CLd = THETA[3] Vt = THETA[4] Kout = THETA[5] E0 = THETA[6] y = lsoda(y=c(0, 0, 100), times=Times, func=PKde, parms=c(Vc=Vc, CLs=CLs, CLd=CLd, Vt=Vt, Kout=Kout, E0=E0)) return(y[iTime, "1"]) } fPK22(c(150, 0.04, 97.8, 54, 0.024, 132.864)) fPK22(c(155, 0.05, 120, 60, 0.03, 100)) nlr(fPK22, dPK22, pNames=c("Vc", "CLs", "CLd", "Vt", "Kout", "E0"), IE=c(155, 0.05, 120, 60, 0.03, 100), LB=c(100, 0.01, 60, 30, 0.01, 50), UB=c(200, 0.09, 200, 120, 0.05, 200)) # fitting failure nlr(fPK22, dPK22, pNames=c("Vc", "CLs", "CLd", "Vt", "Kout", "E0"), IE=c(155, 0.05, 120, 60, 0.03, 100), LB=c(100, 0.01, 60, 30, 0.01, 50), UB=c(200, 0.09, 200, 120, 0.05, 200), Error="P") # fitting failure -> Use NONMEM ## microconstant model PKde2 = function(t, y, p) { RateIn = DoseHist[findInterval(t, DoseHist[,"TIME"]),"RATE"] CL = p["CLs"]*(1 + y[3]) # eq 22:3 dy1dt = (RateIn - CL*y[1])/p["Vc"] - p["k12"]*y[1] + p["k21"]*y[2] dy2dt = p["k12"]*y[1] - p["k21"]*y[2] dy3dt = p["Kout"]*(p["E0"] + y[1]) - p["Kout"]*y[3] # eq 22:4 return(list(c(dy1dt, dy2dt, dy3dt))) } # Figure 22.1, p 581 plot(0, 0, type="n", xlim=c(0, 100), ylim=c(0, 700), xlab="Time (h)", ylab="Concentration (ug/L)") gTIME = seq(0, 100, by=0.1) y = lsoda(y=c(0, 0, 137), times=gTIME, func=PKde2, parms=c(Vc=146, CLs=0.04, k12=0.8, k21=1.98, Kout=0.023, E0=137)) points(dPK22[,"TIME"], dPK22[,"DV"]) lines(y[,"time"], y[,"1"]) ## Times = c(0, dPK22[, "TIME"]) iTime = 2:length(Times) fPK22b = function(THETA) { Vc = THETA[1] CLs = THETA[2] k12 = THETA[3] k21 = THETA[4] Kout = THETA[5] E0 = THETA[6] y = lsoda(y=c(0, 0, 100), times=Times, func=PKde2, parms=c(Vc=Vc, CLs=CLs, k12=k12, k21=k21, Kout=Kout, E0=E0)) return(y[iTime, "1"]) } fPK22b(c(150, 0.04, 0.8, 1.98, 0.023, 132.864)) fPK22b(c(155, 0.05, 0.5, 1.25, 0.03, 100)) nlr(fPK22, dPK22, pNames=c("Vc", "CLs", "CLd", "Vt", "Kout", "E0"), IE=c(155, 0.05, 0.5, 1.25, 0.03, 100), LB=c(100, 0.01, 0.1, 0.5, 0.01, 50), UB=c(200, 0.09, 1.2, 3, 0.05, 200)) # fitting failure nlr(fPK22, dPK22, pNames=c("Vc", "CLs", "CLd", "Vt", "Kout", "E0"), IE=c(155, 0.05, 0.5, 1.25, 0.03, 100), LB=c(100, 0.01, 0.1, 0.5, 0.01, 50), UB=c(200, 0.09, 1.2, 3, 0.05, 200), Error="P") # fitting failure -> Use NONMEM
\name{NISTkgPerSecTOpoundPerMin} \alias{NISTkgPerSecTOpoundPerMin} \title{Convert kilogram per second to pound per minute } \usage{NISTkgPerSecTOpoundPerMin(kgPerSec)} \description{\code{NISTkgPerSecTOpoundPerMin} converts from kilogram per second (kg/s) to pound per minute (lb/min) } \arguments{ \item{kgPerSec}{kilogram per second (kg/s) } } \value{pound per minute (lb/min) } \source{ National Institute of Standards and Technology (NIST), 2014 NIST Guide to SI Units B.8 Factors for Units Listed Alphabetically \url{http://physics.nist.gov/Pubs/SP811/appenB8.html} } \references{ National Institute of Standards and Technology (NIST), 2014 NIST Guide to SI Units B.8 Factors for Units Listed Alphabetically \url{http://physics.nist.gov/Pubs/SP811/appenB8.html} } \author{Jose Gama} \examples{ NISTkgPerSecTOpoundPerMin(10) } \keyword{programming}
/man/NISTkgPerSecTOpoundPerMin.Rd
no_license
cran/NISTunits
R
false
false
854
rd
\name{NISTkgPerSecTOpoundPerMin} \alias{NISTkgPerSecTOpoundPerMin} \title{Convert kilogram per second to pound per minute } \usage{NISTkgPerSecTOpoundPerMin(kgPerSec)} \description{\code{NISTkgPerSecTOpoundPerMin} converts from kilogram per second (kg/s) to pound per minute (lb/min) } \arguments{ \item{kgPerSec}{kilogram per second (kg/s) } } \value{pound per minute (lb/min) } \source{ National Institute of Standards and Technology (NIST), 2014 NIST Guide to SI Units B.8 Factors for Units Listed Alphabetically \url{http://physics.nist.gov/Pubs/SP811/appenB8.html} } \references{ National Institute of Standards and Technology (NIST), 2014 NIST Guide to SI Units B.8 Factors for Units Listed Alphabetically \url{http://physics.nist.gov/Pubs/SP811/appenB8.html} } \author{Jose Gama} \examples{ NISTkgPerSecTOpoundPerMin(10) } \keyword{programming}
#' Cleanly trim truncate long strings #' #' @param x Input string to trim to desired length, appending an ellipsis to the #' end, and without splitting words #' @param l Desired length at which to trim strings #' #' @return Character #' #' @export #' #' @import stringr #' @importFrom purrr map_chr #' #' @description Trims the input string to the desired length, appending an #' ellipsis to the end, without splitting in the middle of a word. #' #' @references None. #' #' @seealso <https://www.github.com/travis-m-blimkie/tRavis> #' tr_trunc_neatly <- function(x, l = 60) { map_chr( x, ~if (is.na(.x)) { return(NA_character_) } else if (str_length(.x) <= l) { return(.x) } else { shortened <- .x %>% as.character() %>% str_sub(., start = 1, end = l) %>% str_replace(., pattern = "\\s([^\\s]*)$", replacement = "...") return(shortened) } ) }
/R/tr_trunc_neatly.R
permissive
travis-m-blimkie/tRavis
R
false
false
920
r
#' Cleanly trim truncate long strings #' #' @param x Input string to trim to desired length, appending an ellipsis to the #' end, and without splitting words #' @param l Desired length at which to trim strings #' #' @return Character #' #' @export #' #' @import stringr #' @importFrom purrr map_chr #' #' @description Trims the input string to the desired length, appending an #' ellipsis to the end, without splitting in the middle of a word. #' #' @references None. #' #' @seealso <https://www.github.com/travis-m-blimkie/tRavis> #' tr_trunc_neatly <- function(x, l = 60) { map_chr( x, ~if (is.na(.x)) { return(NA_character_) } else if (str_length(.x) <= l) { return(.x) } else { shortened <- .x %>% as.character() %>% str_sub(., start = 1, end = l) %>% str_replace(., pattern = "\\s([^\\s]*)$", replacement = "...") return(shortened) } ) }
## Put comments here that give an overall description of what your ## functions do ## Write a short comment describing this function makeCacheMatrix <- function(x = matrix()) { inv <- NULL set <- function(y) { ## set the value of the matrix x <<- y inv <<- NULL } get <- function() x ## get the value of the matrix setInverse <- function(inverse) inv <<- inverse ## set the value of the inverse getInverse <- function() inv ## get the value of the inverse list(set = set, get = get, setInverse = setInverse, getInverse = getInverse) } ## Write a short comment describing this function cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' inv <- x$getInverse() if (!is.null(inv)) { message("getting cached data") return(inv) } mat <- x$get() ## get the value of the inverse if already calculated inv <- solve(mat, ...) ## calculate the value of the inverse if not in the cache x$setInverse(inv) inv }
/cachematrix.R
no_license
EdoardoPennesi/ProgrammingAssignment2
R
false
false
1,193
r
## Put comments here that give an overall description of what your ## functions do ## Write a short comment describing this function makeCacheMatrix <- function(x = matrix()) { inv <- NULL set <- function(y) { ## set the value of the matrix x <<- y inv <<- NULL } get <- function() x ## get the value of the matrix setInverse <- function(inverse) inv <<- inverse ## set the value of the inverse getInverse <- function() inv ## get the value of the inverse list(set = set, get = get, setInverse = setInverse, getInverse = getInverse) } ## Write a short comment describing this function cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' inv <- x$getInverse() if (!is.null(inv)) { message("getting cached data") return(inv) } mat <- x$get() ## get the value of the inverse if already calculated inv <- solve(mat, ...) ## calculate the value of the inverse if not in the cache x$setInverse(inv) inv }
# Fit a multi-level quadratic growth model to time at school using lme4 and # write out the estimated model, its parameters, and the predictions and residuals library(readr) library(dplyr) library(tidyr) library(lubridate) library(lmerTest) at_school <- read_rds("data/processed/at_school.rds") twin_info <- read_rds("data/processed/Robin_paper-entry_2-22-17_cleaned.rds") %>% haven::zap_formats() %>% haven::zap_labels() id_mapping_long <- read_csv("data/processed/id_mapping_long.csv", col_types = "ccc") # We started getting more locations in November 2016. Let's restrict to there and # later to make the manual part of this tractable at_school <- filter(at_school, date(DateTime) >= ymd("2016-11-01")) # Make a list of lubridate intervals representing days off from school # Drawn from the Denver public schools calendar and figs/school_frac_calendar.pdf vacays <- list( interval(ymd("2016-11-21"), ymd("2016-11-25")), interval(ymd("2016-12-22"), ymd("2017-01-06")), interval(ymd("2017-01-16"), ymd("2017-01-16")), interval(ymd("2017-02-20"), ymd("2017-02-20")), interval(ymd("2017-03-27"), ymd("2017-03-31")), interval(ymd("2017-05-29"), ymd("2017-05-31")), interval(ymd("2017-08-01"), ymd("2017-08-18")), interval(ymd("2017-09-04"), ymd("2017-09-04")), interval(ymd("2017-11-20"), ymd("2017-11-24")), interval(ymd("2017-12-22"), ymd("2018-01-05")), interval(ymd("2018-01-15"), ymd("2018-01-15")), interval(ymd("2018-02-19"), ymd("2018-02-19")), interval(ymd("2018-03-26"), ymd("2018-03-30")) ) # Shift the locations to local time and restrict to school hours and days at_school <- mutate(at_school, DateTime = DateTime + minutes(sample_timezone)) %>% filter(hour(DateTime) %in% c(8, 9, 10, 11, 12, 13, 14)) %>% # No school in June or July filter(month(DateTime) != 6, month(DateTime) != 7) %>% # No school on Saturday and Sunday filter(wday(DateTime) != 7, wday(DateTime) != 1) %>% # a %within% b where a is a date and b is a list of intervals, will check # if a falls within any of the intervals in b filter(!(date(DateTime) %within% vacays)) # Get ages for at_school at_school <- left_join(at_school, id_mapping_long, by = c("Michigan_ID" = "alternate_id")) %>% left_join(twin_info, by = c("SVID" = "ID1")) %>% select(DateTime, user_id = Michigan_ID, orig_datetime, at_school, family, sex = Sex1, Birth_Date) %>% mutate(sex = sex - 1, test_age = as.numeric(as_date(DateTime) - Birth_Date) / 365, test_age = test_age - 17) # Restrict at_school to 18 or younger at_school <- filter(at_school, test_age <= 1) # Aggregate at the week level to match the checking in survey frequency at_school <- group_by(at_school, user_id, week = week(DateTime), year = year(DateTime)) %>% summarize( school_frac = sum(at_school) / n(), test_age = mean(test_age), sex = first(sex), family = first(family) ) # Missing data doesn't play nice with the lme4 bootstrapping function at_school <- na.omit(at_school) # Fit a quadratic growth model with age at assessment as the time metric # and age squared and sex as fixed effects ml <- lmer( school_frac ~ (test_age + I(test_age^2) + sex) + (test_age + I(test_age^2) | family/user_id), data = at_school ) # Get the random and fixed effects rand_effs <- ranef(ml) fix_effs <- fixef(ml) # Get the random effects estimates for each twin and combine them with the # fixed effects estimates to get the estimated parameters rand_effs_family <- rand_effs$family %>% tibble::rownames_to_column("family") rand_effs_twin <- rand_effs$`user_id:family` %>% tibble::rownames_to_column("both") %>% separate(both, c("user_id", "family"), ":") parameters <- left_join(rand_effs_twin, rand_effs_family, by = "family") %>% transmute( user_id = user_id, intercept = `(Intercept).x` + `(Intercept).y` + fix_effs["(Intercept)"], slope = `test_age.x` + `test_age.y` + fix_effs["test_age"], quadratic = `I(test_age^2).x` + `I(test_age^2).y` + fix_effs["I(test_age^2)"], sex_beta = fix_effs["sex"] ) # Get the predicted values and the residuals at_school_pred <- left_join(at_school, parameters, by = "user_id") %>% mutate( school_frac_pred = intercept + slope * test_age + quadratic * test_age^2 + sex_beta * sex, school_frac_resid = school_frac - school_frac_pred ) %>% select(user_id, family, test_age, school_frac, sex, school_frac_pred, school_frac_resid) write_rds(ml, "data/models/at_school_quadratic_model.rds") write_rds(at_school_pred, "data/models/at_school_quadratic_predictions.rds") write_rds(parameters, "data/models/at_school_quadratic_parameters.rds") write_rds(at_school, "data/models/at_school_data.rds")
/src/models/at_school_quadratic_model.R
permissive
AFialkowski/cotwins-analyses
R
false
false
4,717
r
# Fit a multi-level quadratic growth model to time at school using lme4 and # write out the estimated model, its parameters, and the predictions and residuals library(readr) library(dplyr) library(tidyr) library(lubridate) library(lmerTest) at_school <- read_rds("data/processed/at_school.rds") twin_info <- read_rds("data/processed/Robin_paper-entry_2-22-17_cleaned.rds") %>% haven::zap_formats() %>% haven::zap_labels() id_mapping_long <- read_csv("data/processed/id_mapping_long.csv", col_types = "ccc") # We started getting more locations in November 2016. Let's restrict to there and # later to make the manual part of this tractable at_school <- filter(at_school, date(DateTime) >= ymd("2016-11-01")) # Make a list of lubridate intervals representing days off from school # Drawn from the Denver public schools calendar and figs/school_frac_calendar.pdf vacays <- list( interval(ymd("2016-11-21"), ymd("2016-11-25")), interval(ymd("2016-12-22"), ymd("2017-01-06")), interval(ymd("2017-01-16"), ymd("2017-01-16")), interval(ymd("2017-02-20"), ymd("2017-02-20")), interval(ymd("2017-03-27"), ymd("2017-03-31")), interval(ymd("2017-05-29"), ymd("2017-05-31")), interval(ymd("2017-08-01"), ymd("2017-08-18")), interval(ymd("2017-09-04"), ymd("2017-09-04")), interval(ymd("2017-11-20"), ymd("2017-11-24")), interval(ymd("2017-12-22"), ymd("2018-01-05")), interval(ymd("2018-01-15"), ymd("2018-01-15")), interval(ymd("2018-02-19"), ymd("2018-02-19")), interval(ymd("2018-03-26"), ymd("2018-03-30")) ) # Shift the locations to local time and restrict to school hours and days at_school <- mutate(at_school, DateTime = DateTime + minutes(sample_timezone)) %>% filter(hour(DateTime) %in% c(8, 9, 10, 11, 12, 13, 14)) %>% # No school in June or July filter(month(DateTime) != 6, month(DateTime) != 7) %>% # No school on Saturday and Sunday filter(wday(DateTime) != 7, wday(DateTime) != 1) %>% # a %within% b where a is a date and b is a list of intervals, will check # if a falls within any of the intervals in b filter(!(date(DateTime) %within% vacays)) # Get ages for at_school at_school <- left_join(at_school, id_mapping_long, by = c("Michigan_ID" = "alternate_id")) %>% left_join(twin_info, by = c("SVID" = "ID1")) %>% select(DateTime, user_id = Michigan_ID, orig_datetime, at_school, family, sex = Sex1, Birth_Date) %>% mutate(sex = sex - 1, test_age = as.numeric(as_date(DateTime) - Birth_Date) / 365, test_age = test_age - 17) # Restrict at_school to 18 or younger at_school <- filter(at_school, test_age <= 1) # Aggregate at the week level to match the checking in survey frequency at_school <- group_by(at_school, user_id, week = week(DateTime), year = year(DateTime)) %>% summarize( school_frac = sum(at_school) / n(), test_age = mean(test_age), sex = first(sex), family = first(family) ) # Missing data doesn't play nice with the lme4 bootstrapping function at_school <- na.omit(at_school) # Fit a quadratic growth model with age at assessment as the time metric # and age squared and sex as fixed effects ml <- lmer( school_frac ~ (test_age + I(test_age^2) + sex) + (test_age + I(test_age^2) | family/user_id), data = at_school ) # Get the random and fixed effects rand_effs <- ranef(ml) fix_effs <- fixef(ml) # Get the random effects estimates for each twin and combine them with the # fixed effects estimates to get the estimated parameters rand_effs_family <- rand_effs$family %>% tibble::rownames_to_column("family") rand_effs_twin <- rand_effs$`user_id:family` %>% tibble::rownames_to_column("both") %>% separate(both, c("user_id", "family"), ":") parameters <- left_join(rand_effs_twin, rand_effs_family, by = "family") %>% transmute( user_id = user_id, intercept = `(Intercept).x` + `(Intercept).y` + fix_effs["(Intercept)"], slope = `test_age.x` + `test_age.y` + fix_effs["test_age"], quadratic = `I(test_age^2).x` + `I(test_age^2).y` + fix_effs["I(test_age^2)"], sex_beta = fix_effs["sex"] ) # Get the predicted values and the residuals at_school_pred <- left_join(at_school, parameters, by = "user_id") %>% mutate( school_frac_pred = intercept + slope * test_age + quadratic * test_age^2 + sex_beta * sex, school_frac_resid = school_frac - school_frac_pred ) %>% select(user_id, family, test_age, school_frac, sex, school_frac_pred, school_frac_resid) write_rds(ml, "data/models/at_school_quadratic_model.rds") write_rds(at_school_pred, "data/models/at_school_quadratic_predictions.rds") write_rds(parameters, "data/models/at_school_quadratic_parameters.rds") write_rds(at_school, "data/models/at_school_data.rds")
#' Differential abundance analysis #' #' @param MAE A multi-assay experiment object #' @param tax_level The taxon level used for organisms #' @param input_da_condition Which condition is the target condition #' @param input_da_condition_covariate Covariates added to linear function #' @param min_num_filter Minimum number reads mapped to this microbe #' @param input_da_padj_cutoff adjusted pValue cutoff #' @param method choose between DESeq2 and limma #' #' @return A output dataframe #' #' @examples #' data_dir = system.file("extdata/MAE.rds", package = "animalcules") #' toy_data <- readRDS(data_dir) #' differential_abundance(toy_data, #' tax_level="phylum", #' input_da_condition=c("DISEASE"), #' min_num_filter = 2, #' input_da_padj_cutoff = 0.5, #' method = "DESeq2") #' #' #' @importFrom limma lmFit eBayes topTable #' @import DESeq2 #' @import MultiAssayExperiment #' #' @export differential_abundance <- function(MAE, tax_level, input_da_condition = c(), input_da_condition_covariate = NULL, min_num_filter = 5, input_da_padj_cutoff = 0.05, method = "DESeq2") { ## tables from MAE microbe <- MAE[['MicrobeGenetics']] #double bracket subsetting is easier # organism x taxlev tax_table <- as.data.frame(SummarizedExperiment::rowData(microbe)) # sample x condition sam_table <- as.data.frame(SummarizedExperiment::colData(microbe)) # organism x sample counts_table <- as.data.frame(SummarizedExperiment::assays(microbe))[, rownames(sam_table)] if (method == "DESeq2"){ # Sum counts by taxon level count_table_tax <- counts_table %>% upsample_counts(tax_table, tax_level) colnames_tmp <- colnames(count_table_tax) count_table_tax <- t(apply(count_table_tax, 1, as.integer)) colnames(count_table_tax) <- colnames_tmp # sam table sam_table %<>% df_char_to_factor() # build the deseq2 formula if (is.null(input_da_condition_covariate)){ dds_formula = stats::as.formula(paste("~",input_da_condition, sep = " ")) } else{ dds_formula = stats::as.formula(paste("~", paste( paste(input_da_condition_covariate, collapse = " + "), input_da_condition, sep = " + "), sep = " ")) } # run DEseq2 dds <- DESeq2::DESeqDataSetFromMatrix(countData = count_table_tax, colData = sam_table, design = dds_formula) dds <- DESeq2::DESeq(dds) # filter microbes with less than min_num_filter keep <- base::rowSums(DESeq2::counts(dds)) >= min_num_filter dds <- dds[keep,] #print(resultsNames(dds)) # check if the condition has multiple levels if (length(resultsNames(dds)) == 2 | length(resultsNames(dds)) - length(input_da_condition_covariate) == 2){ res <- DESeq2::results(dds) # reorder the result res = res[base::order(res$padj, na.last=NA), ] # reformat for reporting if (nrow(res) != 0){ sigtab = res[(res$padj < input_da_padj_cutoff), ] if (nrow(sigtab) == 0){ as.matrix("No differentially abundant items found!") } else{ sigtab = as(sigtab, "data.frame") sigtab$padj <- as.numeric(formatC(sigtab$padj, format = "e", digits = 2)) sigtab$pValue <- as.numeric(formatC(sigtab$pvalue, format = "e", digits = 2)) sigtab$log2FoldChange <- as.numeric(formatC(sigtab$log2FoldChange, format = "e", digits = 2)) sigtab$microbe <- rownames(sigtab) rownames(sigtab) <- seq_len(nrow(sigtab)) sigtab %<>% select(microbe, padj, pValue, log2FoldChange) num.1 <- c() num.2 <- c() # transform label into 1 and 0 label.vec.num = as.character((sam_table %>% select(input_da_condition))[,1]) label.vec.save <- unique(label.vec.num) label.vec.num[label.vec.num == unique(label.vec.num)[1]] <- 1 label.vec.num[label.vec.num != 1] <- 0 label.vec.num <- as.numeric(label.vec.num) for (i in seq_len(nrow(sigtab))){ species.index <- which(rownames(count_table_tax) == sigtab[i,1]) num.1 <- c(num.1, sum((count_table_tax[species.index, which(label.vec.num == 1)] > 0))) num.2 <- c(num.2, sum((count_table_tax[species.index, which(label.vec.num == 0)] > 0))) } sigtab <- cbind(sigtab, num.1) sigtab <- cbind(sigtab, num.2) df.output.prevalence <- percent(round((num.1 + num.2)/ncol(count_table_tax),4)) sigtab <- cbind(sigtab, df.output.prevalence) colnames(sigtab)[ncol(sigtab)-2] <- label.vec.save[1] colnames(sigtab)[ncol(sigtab)-1] <- label.vec.save[2] colnames(sigtab)[ncol(sigtab)] <- "prevalence" foldChange <- c() for (i in seq_len(nrow(sigtab))){ foldChange[i] <- round((max(as.numeric(c((sigtab[i,6] / sum(label.vec.num == 0)), (sigtab[i,5] / sum(label.vec.num == 1))))) / min(as.numeric(c((sigtab[i,6] / sum(label.vec.num == 0)), (sigtab[i,5] / sum(label.vec.num == 1)))))), digits = 2) } sigtab <- cbind(sigtab, foldChange) colnames(sigtab)[ncol(sigtab)] <- "Group Size adjusted fold change" # total num num_total <- length(label.vec.num) sigtab[,5] <- paste0(sigtab[,5], "/", sum(label.vec.num == 1)) sigtab[,6] <- paste0(sigtab[,6], "/", sum(label.vec.num == 0)) # if y is numeric, make the output table easier if (is.numeric((sam_table %>% select(input_da_condition))[,1])){ sigtab <- sigtab[,c(1,2,3,4,7)] } return(sigtab) } }else{ return(as.matrix("No differentially abundant items found!")) } }else{ # for multiple levels, # we need to combine results for each comparison sigtab <- NULL label.vec = as.character((sam_table %>% select(input_da_condition))[,1]) combination_mat <- utils::combn(sort(unique(label.vec)), 2) #print(combination_mat) for (j in seq(ncol(combination_mat))){ res <- DESeq2::results(dds, contrast=c(input_da_condition, combination_mat[1,j], combination_mat[2,j])) if (nrow(res) > 0){ res = res[base::order(res$padj, na.last=NA), ] sigtab_tmp = res[(res$padj < input_da_padj_cutoff), ] if (nrow(sigtab_tmp) > 0){ sigtab_tmp = as(sigtab_tmp, "data.frame") sigtab_tmp$padj <- as.numeric(formatC(sigtab_tmp$padj, format = "e", digits = 2)) sigtab_tmp$pValue <- as.numeric(formatC(sigtab_tmp$pvalue, format = "e", digits = 2)) sigtab_tmp$log2FoldChange <- as.numeric(formatC(sigtab_tmp$log2FoldChange, format = "e", digits = 2)) sigtab_tmp$microbe <- rownames(sigtab_tmp) rownames(sigtab_tmp) <- seq_len(nrow(sigtab_tmp)) sigtab_tmp %<>% select(microbe, padj, pValue, log2FoldChange) num.1 <- c() num.2 <- c() # transform label into 1 and 0 label.vec.num = as.character((sam_table %>% select(input_da_condition))[,1]) label.vec.num[label.vec.num == combination_mat[1,j]] <- 1 label.vec.num[label.vec.num == combination_mat[2,j]] <- 0 label.vec.num <- as.numeric(label.vec.num) for (i in seq_len(nrow(sigtab_tmp))){ species.index <- which(rownames(count_table_tax) == sigtab_tmp[i,1]) num.1 <- c(num.1, sum((count_table_tax[species.index, which(label.vec.num == 1)] > 0))) num.2 <- c(num.2, sum((count_table_tax[species.index, which(label.vec.num == 0)] > 0))) } sigtab_tmp <- cbind(sigtab_tmp, num.1) sigtab_tmp <- cbind(sigtab_tmp, num.2) df.output.prevalence <- percent(round((num.1 + num.2)/ ncol(count_table_tax),4)) sigtab_tmp <- cbind(sigtab_tmp, df.output.prevalence) colnames(sigtab_tmp)[ncol(sigtab_tmp)-2] <- "experiment" colnames(sigtab_tmp)[ncol(sigtab_tmp)-1] <- "control" colnames(sigtab_tmp)[ncol(sigtab_tmp)] <- "prevalence" foldChange <- c() for (i in seq_len(nrow(sigtab_tmp))){ foldChange[i] <- round((max(as.numeric(c((sigtab_tmp[i,6] / sum(label.vec == combination_mat[2,j])), (sigtab_tmp[i,5] / sum(label.vec == combination_mat[1,j]))))) / min(as.numeric(c((sigtab_tmp[i,6] / sum(label.vec == combination_mat[2,j])), (sigtab_tmp[i,5] / sum(label.vec == combination_mat[1,j])))))), digits = 2) } sigtab_tmp <- cbind(sigtab_tmp, foldChange) colnames(sigtab_tmp)[ncol(sigtab_tmp)] <- "Group Size adjusted fold change" # total num num_total <- length(label.vec.num) sigtab_tmp[,5] <- paste0(combination_mat[1,j], ": ", sigtab_tmp[,5], "/", sum(label.vec == combination_mat[1,j])) sigtab_tmp[,6] <- paste0(combination_mat[2,j], ": ", sigtab_tmp[,6], "/", sum(label.vec == combination_mat[2,j])) # group sigtab_tmp[,9] <- paste0(combination_mat[1,j], " vs. ", combination_mat[2,j]) colnames(sigtab_tmp)[9] <- "Contrast" # combine sigtab <- rbind(sigtab, sigtab_tmp) } } } return(sigtab) } }else if (method == "limma"){ # Sum counts by taxon level count_table_tax <- counts_table %>% upsample_counts(tax_table, tax_level) %>% counts_to_logcpm() colnames_tmp <- colnames(count_table_tax) count_table_tax <- t(apply(count_table_tax, 1, as.integer)) colnames(count_table_tax) <- colnames_tmp # sam table sam_table %<>% df_char_to_factor() # filter low count microbes count_table_tax <- count_table_tax[rowSums(count_table_tax) >= log10(min_num_filter),] #print(rowSums(count_table_tax)) #print(min_num_filter) #print(rowSums(count_table_tax) >= min_num_filter) #print(count_table_tax) if (is.null(input_da_condition_covariate)){ dds_formula <- stats::as.formula(paste("~",input_da_condition, sep = " ")) design <- model.matrix(dds_formula, sam_table) } else{ dds_formula <- stats::as.formula(paste("~", paste( paste(input_da_condition_covariate, collapse = " + "), input_da_condition, sep = " + "), sep = " ")) design <- model.matrix(dds_formula, sam_table) } #print(design) #print(str(count_table_tax)) fit <- limma::lmFit(count_table_tax, design) ebayes <- limma::eBayes(fit) sigtab <- limma::topTable(ebayes, adjust = "BH", number = nrow(count_table_tax), p.value=input_da_padj_cutoff) if (nrow(sigtab) == 0){ sigtab[1,1] <- "No differentially abundant items found!" colnames(sigtab) <- "result" return(sigtab) } colnames(sigtab)[which(colnames(sigtab) == "adj.P.Val")] <- "padj" colnames(sigtab)[which(colnames(sigtab) == "P.Value")] <- "pValue" sigtab <- sigtab[,which(colnames(sigtab) %in% c("padj", "pValue"))] sigtab$microbe <- rownames(sigtab) rownames(sigtab) <- seq_len(nrow(sigtab)) sigtab %<>% select(microbe, padj, pValue) return(sigtab) } }
/R/differential_abundance.R
permissive
Zinthrow/animalcules
R
false
false
14,841
r
#' Differential abundance analysis #' #' @param MAE A multi-assay experiment object #' @param tax_level The taxon level used for organisms #' @param input_da_condition Which condition is the target condition #' @param input_da_condition_covariate Covariates added to linear function #' @param min_num_filter Minimum number reads mapped to this microbe #' @param input_da_padj_cutoff adjusted pValue cutoff #' @param method choose between DESeq2 and limma #' #' @return A output dataframe #' #' @examples #' data_dir = system.file("extdata/MAE.rds", package = "animalcules") #' toy_data <- readRDS(data_dir) #' differential_abundance(toy_data, #' tax_level="phylum", #' input_da_condition=c("DISEASE"), #' min_num_filter = 2, #' input_da_padj_cutoff = 0.5, #' method = "DESeq2") #' #' #' @importFrom limma lmFit eBayes topTable #' @import DESeq2 #' @import MultiAssayExperiment #' #' @export differential_abundance <- function(MAE, tax_level, input_da_condition = c(), input_da_condition_covariate = NULL, min_num_filter = 5, input_da_padj_cutoff = 0.05, method = "DESeq2") { ## tables from MAE microbe <- MAE[['MicrobeGenetics']] #double bracket subsetting is easier # organism x taxlev tax_table <- as.data.frame(SummarizedExperiment::rowData(microbe)) # sample x condition sam_table <- as.data.frame(SummarizedExperiment::colData(microbe)) # organism x sample counts_table <- as.data.frame(SummarizedExperiment::assays(microbe))[, rownames(sam_table)] if (method == "DESeq2"){ # Sum counts by taxon level count_table_tax <- counts_table %>% upsample_counts(tax_table, tax_level) colnames_tmp <- colnames(count_table_tax) count_table_tax <- t(apply(count_table_tax, 1, as.integer)) colnames(count_table_tax) <- colnames_tmp # sam table sam_table %<>% df_char_to_factor() # build the deseq2 formula if (is.null(input_da_condition_covariate)){ dds_formula = stats::as.formula(paste("~",input_da_condition, sep = " ")) } else{ dds_formula = stats::as.formula(paste("~", paste( paste(input_da_condition_covariate, collapse = " + "), input_da_condition, sep = " + "), sep = " ")) } # run DEseq2 dds <- DESeq2::DESeqDataSetFromMatrix(countData = count_table_tax, colData = sam_table, design = dds_formula) dds <- DESeq2::DESeq(dds) # filter microbes with less than min_num_filter keep <- base::rowSums(DESeq2::counts(dds)) >= min_num_filter dds <- dds[keep,] #print(resultsNames(dds)) # check if the condition has multiple levels if (length(resultsNames(dds)) == 2 | length(resultsNames(dds)) - length(input_da_condition_covariate) == 2){ res <- DESeq2::results(dds) # reorder the result res = res[base::order(res$padj, na.last=NA), ] # reformat for reporting if (nrow(res) != 0){ sigtab = res[(res$padj < input_da_padj_cutoff), ] if (nrow(sigtab) == 0){ as.matrix("No differentially abundant items found!") } else{ sigtab = as(sigtab, "data.frame") sigtab$padj <- as.numeric(formatC(sigtab$padj, format = "e", digits = 2)) sigtab$pValue <- as.numeric(formatC(sigtab$pvalue, format = "e", digits = 2)) sigtab$log2FoldChange <- as.numeric(formatC(sigtab$log2FoldChange, format = "e", digits = 2)) sigtab$microbe <- rownames(sigtab) rownames(sigtab) <- seq_len(nrow(sigtab)) sigtab %<>% select(microbe, padj, pValue, log2FoldChange) num.1 <- c() num.2 <- c() # transform label into 1 and 0 label.vec.num = as.character((sam_table %>% select(input_da_condition))[,1]) label.vec.save <- unique(label.vec.num) label.vec.num[label.vec.num == unique(label.vec.num)[1]] <- 1 label.vec.num[label.vec.num != 1] <- 0 label.vec.num <- as.numeric(label.vec.num) for (i in seq_len(nrow(sigtab))){ species.index <- which(rownames(count_table_tax) == sigtab[i,1]) num.1 <- c(num.1, sum((count_table_tax[species.index, which(label.vec.num == 1)] > 0))) num.2 <- c(num.2, sum((count_table_tax[species.index, which(label.vec.num == 0)] > 0))) } sigtab <- cbind(sigtab, num.1) sigtab <- cbind(sigtab, num.2) df.output.prevalence <- percent(round((num.1 + num.2)/ncol(count_table_tax),4)) sigtab <- cbind(sigtab, df.output.prevalence) colnames(sigtab)[ncol(sigtab)-2] <- label.vec.save[1] colnames(sigtab)[ncol(sigtab)-1] <- label.vec.save[2] colnames(sigtab)[ncol(sigtab)] <- "prevalence" foldChange <- c() for (i in seq_len(nrow(sigtab))){ foldChange[i] <- round((max(as.numeric(c((sigtab[i,6] / sum(label.vec.num == 0)), (sigtab[i,5] / sum(label.vec.num == 1))))) / min(as.numeric(c((sigtab[i,6] / sum(label.vec.num == 0)), (sigtab[i,5] / sum(label.vec.num == 1)))))), digits = 2) } sigtab <- cbind(sigtab, foldChange) colnames(sigtab)[ncol(sigtab)] <- "Group Size adjusted fold change" # total num num_total <- length(label.vec.num) sigtab[,5] <- paste0(sigtab[,5], "/", sum(label.vec.num == 1)) sigtab[,6] <- paste0(sigtab[,6], "/", sum(label.vec.num == 0)) # if y is numeric, make the output table easier if (is.numeric((sam_table %>% select(input_da_condition))[,1])){ sigtab <- sigtab[,c(1,2,3,4,7)] } return(sigtab) } }else{ return(as.matrix("No differentially abundant items found!")) } }else{ # for multiple levels, # we need to combine results for each comparison sigtab <- NULL label.vec = as.character((sam_table %>% select(input_da_condition))[,1]) combination_mat <- utils::combn(sort(unique(label.vec)), 2) #print(combination_mat) for (j in seq(ncol(combination_mat))){ res <- DESeq2::results(dds, contrast=c(input_da_condition, combination_mat[1,j], combination_mat[2,j])) if (nrow(res) > 0){ res = res[base::order(res$padj, na.last=NA), ] sigtab_tmp = res[(res$padj < input_da_padj_cutoff), ] if (nrow(sigtab_tmp) > 0){ sigtab_tmp = as(sigtab_tmp, "data.frame") sigtab_tmp$padj <- as.numeric(formatC(sigtab_tmp$padj, format = "e", digits = 2)) sigtab_tmp$pValue <- as.numeric(formatC(sigtab_tmp$pvalue, format = "e", digits = 2)) sigtab_tmp$log2FoldChange <- as.numeric(formatC(sigtab_tmp$log2FoldChange, format = "e", digits = 2)) sigtab_tmp$microbe <- rownames(sigtab_tmp) rownames(sigtab_tmp) <- seq_len(nrow(sigtab_tmp)) sigtab_tmp %<>% select(microbe, padj, pValue, log2FoldChange) num.1 <- c() num.2 <- c() # transform label into 1 and 0 label.vec.num = as.character((sam_table %>% select(input_da_condition))[,1]) label.vec.num[label.vec.num == combination_mat[1,j]] <- 1 label.vec.num[label.vec.num == combination_mat[2,j]] <- 0 label.vec.num <- as.numeric(label.vec.num) for (i in seq_len(nrow(sigtab_tmp))){ species.index <- which(rownames(count_table_tax) == sigtab_tmp[i,1]) num.1 <- c(num.1, sum((count_table_tax[species.index, which(label.vec.num == 1)] > 0))) num.2 <- c(num.2, sum((count_table_tax[species.index, which(label.vec.num == 0)] > 0))) } sigtab_tmp <- cbind(sigtab_tmp, num.1) sigtab_tmp <- cbind(sigtab_tmp, num.2) df.output.prevalence <- percent(round((num.1 + num.2)/ ncol(count_table_tax),4)) sigtab_tmp <- cbind(sigtab_tmp, df.output.prevalence) colnames(sigtab_tmp)[ncol(sigtab_tmp)-2] <- "experiment" colnames(sigtab_tmp)[ncol(sigtab_tmp)-1] <- "control" colnames(sigtab_tmp)[ncol(sigtab_tmp)] <- "prevalence" foldChange <- c() for (i in seq_len(nrow(sigtab_tmp))){ foldChange[i] <- round((max(as.numeric(c((sigtab_tmp[i,6] / sum(label.vec == combination_mat[2,j])), (sigtab_tmp[i,5] / sum(label.vec == combination_mat[1,j]))))) / min(as.numeric(c((sigtab_tmp[i,6] / sum(label.vec == combination_mat[2,j])), (sigtab_tmp[i,5] / sum(label.vec == combination_mat[1,j])))))), digits = 2) } sigtab_tmp <- cbind(sigtab_tmp, foldChange) colnames(sigtab_tmp)[ncol(sigtab_tmp)] <- "Group Size adjusted fold change" # total num num_total <- length(label.vec.num) sigtab_tmp[,5] <- paste0(combination_mat[1,j], ": ", sigtab_tmp[,5], "/", sum(label.vec == combination_mat[1,j])) sigtab_tmp[,6] <- paste0(combination_mat[2,j], ": ", sigtab_tmp[,6], "/", sum(label.vec == combination_mat[2,j])) # group sigtab_tmp[,9] <- paste0(combination_mat[1,j], " vs. ", combination_mat[2,j]) colnames(sigtab_tmp)[9] <- "Contrast" # combine sigtab <- rbind(sigtab, sigtab_tmp) } } } return(sigtab) } }else if (method == "limma"){ # Sum counts by taxon level count_table_tax <- counts_table %>% upsample_counts(tax_table, tax_level) %>% counts_to_logcpm() colnames_tmp <- colnames(count_table_tax) count_table_tax <- t(apply(count_table_tax, 1, as.integer)) colnames(count_table_tax) <- colnames_tmp # sam table sam_table %<>% df_char_to_factor() # filter low count microbes count_table_tax <- count_table_tax[rowSums(count_table_tax) >= log10(min_num_filter),] #print(rowSums(count_table_tax)) #print(min_num_filter) #print(rowSums(count_table_tax) >= min_num_filter) #print(count_table_tax) if (is.null(input_da_condition_covariate)){ dds_formula <- stats::as.formula(paste("~",input_da_condition, sep = " ")) design <- model.matrix(dds_formula, sam_table) } else{ dds_formula <- stats::as.formula(paste("~", paste( paste(input_da_condition_covariate, collapse = " + "), input_da_condition, sep = " + "), sep = " ")) design <- model.matrix(dds_formula, sam_table) } #print(design) #print(str(count_table_tax)) fit <- limma::lmFit(count_table_tax, design) ebayes <- limma::eBayes(fit) sigtab <- limma::topTable(ebayes, adjust = "BH", number = nrow(count_table_tax), p.value=input_da_padj_cutoff) if (nrow(sigtab) == 0){ sigtab[1,1] <- "No differentially abundant items found!" colnames(sigtab) <- "result" return(sigtab) } colnames(sigtab)[which(colnames(sigtab) == "adj.P.Val")] <- "padj" colnames(sigtab)[which(colnames(sigtab) == "P.Value")] <- "pValue" sigtab <- sigtab[,which(colnames(sigtab) %in% c("padj", "pValue"))] sigtab$microbe <- rownames(sigtab) rownames(sigtab) <- seq_len(nrow(sigtab)) sigtab %<>% select(microbe, padj, pValue) return(sigtab) } }
# A function to calculate the Biological Homogeneity Index # (from clusteval package) BHI <- function(statClust, annotation, names = NULL, category = "all", dropEvidence = NULL) { if (is.matrix(annotation)) { bhi <- numeric(length(unique(statClust))) names(bhi) <- unique(statClust) for (k in unique(statClust)) { Ck.bhi <- 0 Ck.idx <- which(statClust == k) if (length(Ck.idx) < 2) { next } for (i in Ck.idx) { B <- which(annotation[i, ] == TRUE) if (length(B) == 0) { next } annot <- annotation[Ck.idx[Ck.idx != i], B] if (length(B) == 1) { Ck.bhi <- Ck.bhi + sum(annot) } else if (length(B) > 1) { if (is.null(dim(annot))) { Ck.bhi <- Ck.bhi + sum(sum(annot) > 0) } else { Ck.bhi <- Ck.bhi + sum(rowSums(annot) > 0) } } } nk <- sum(rowSums(annotation[Ck.idx, ]) > 0) if (nk > 1) { bhi[k] <- Ck.bhi / (nk * (nk - 1)) } } return(mean(bhi, na.rm = TRUE)) } goTerms <- annotate::getGO(names, annotation) if (!is.null(dropEvidence)) { goTerms <- lapply(goTerms, annotate::dropECode, dropEvidence) } bhi <- tapply(goTerms, statClust, function(x) clValid::matchGO(x, category)) bhi.mean <- mean(bhi, na.rm = TRUE) bhi.se <- stats::sd(bhi, na.rm = TRUE) / sqrt(length(bhi)) return(bhi.mean) }
/R/BHI.R
no_license
minghao2016/MOSS
R
false
false
1,493
r
# A function to calculate the Biological Homogeneity Index # (from clusteval package) BHI <- function(statClust, annotation, names = NULL, category = "all", dropEvidence = NULL) { if (is.matrix(annotation)) { bhi <- numeric(length(unique(statClust))) names(bhi) <- unique(statClust) for (k in unique(statClust)) { Ck.bhi <- 0 Ck.idx <- which(statClust == k) if (length(Ck.idx) < 2) { next } for (i in Ck.idx) { B <- which(annotation[i, ] == TRUE) if (length(B) == 0) { next } annot <- annotation[Ck.idx[Ck.idx != i], B] if (length(B) == 1) { Ck.bhi <- Ck.bhi + sum(annot) } else if (length(B) > 1) { if (is.null(dim(annot))) { Ck.bhi <- Ck.bhi + sum(sum(annot) > 0) } else { Ck.bhi <- Ck.bhi + sum(rowSums(annot) > 0) } } } nk <- sum(rowSums(annotation[Ck.idx, ]) > 0) if (nk > 1) { bhi[k] <- Ck.bhi / (nk * (nk - 1)) } } return(mean(bhi, na.rm = TRUE)) } goTerms <- annotate::getGO(names, annotation) if (!is.null(dropEvidence)) { goTerms <- lapply(goTerms, annotate::dropECode, dropEvidence) } bhi <- tapply(goTerms, statClust, function(x) clValid::matchGO(x, category)) bhi.mean <- mean(bhi, na.rm = TRUE) bhi.se <- stats::sd(bhi, na.rm = TRUE) / sqrt(length(bhi)) return(bhi.mean) }
#' Loads a pkg into the current R environment. #' #' @param name Package name. #' @param webservice From where to get the package name. load_eupath_pkg <- function(name, webservice = "eupathdb") { first_try <- try(do.call("library", as.list(name)), silent = TRUE) if (class(first_try) == "try-error") { metadata <- download_eupath_metadata(webservice = webservice) entry <- get_eupath_entry(name) pkg_names <- get_eupath_pkgnames(entry) first_pkg <- pkg_names[["orgdb"]] tt <- try(do.call("library", as.list(first_pkg)), silent = TRUE) if (class(tt) == "try-error") { message("Did not find the package: ", first_pkg, ". Will not be able to do reciprocal hits.") message("Perhaps try invoking make_eupath_organismdbi().") pkg <- NULL } else { message("Loaded: ", first_pkg) pkg <- get(first_pkg) } return(pkg) } else { pkg <- get(name) return(pkg) } } ## End internal function 'load_pkg()'
/R/load_eupath_pkg.R
no_license
khughitt/EuPathDB
R
false
false
1,000
r
#' Loads a pkg into the current R environment. #' #' @param name Package name. #' @param webservice From where to get the package name. load_eupath_pkg <- function(name, webservice = "eupathdb") { first_try <- try(do.call("library", as.list(name)), silent = TRUE) if (class(first_try) == "try-error") { metadata <- download_eupath_metadata(webservice = webservice) entry <- get_eupath_entry(name) pkg_names <- get_eupath_pkgnames(entry) first_pkg <- pkg_names[["orgdb"]] tt <- try(do.call("library", as.list(first_pkg)), silent = TRUE) if (class(tt) == "try-error") { message("Did not find the package: ", first_pkg, ". Will not be able to do reciprocal hits.") message("Perhaps try invoking make_eupath_organismdbi().") pkg <- NULL } else { message("Loaded: ", first_pkg) pkg <- get(first_pkg) } return(pkg) } else { pkg <- get(name) return(pkg) } } ## End internal function 'load_pkg()'
\encoding{utf8} \name{isCOP.PQD} \alias{isCOP.PQD} \title{The Positively Quadrant Dependency State of a Copula} \description{ Numerically determine the global property of the \emph{positively quadrant dependency} (PQD) characteristic of a copula as described by Nelsen (2006, p. 188). The random variables \eqn{X} and \eqn{Y} are PQD if for all \eqn{(x,y)} in \eqn{\mathcal{R}^2} when \eqn{H(x,y) \ge F(x)G(x)} for all \eqn{(x,y)} in \eqn{\mathcal{R}^2} and thus by the copula \eqn{\mathbf{C}(u,v) \ge uv} for all \eqn{(u,v)} in \eqn{\mathcal{I}^2}. Alternatively, this means that \eqn{\mathbf{C}(u,v) \ge \mathbf{\Pi}}, and thus it can be said that it is globally \dQuote{greater} than independence (\eqn{uv = \Pi}; \code{\link{P}}). Nelsen (2006) shows that a copula is PQD when \deqn{0 \le \beta_\mathbf{C} \mbox{,\ } 0 \le \gamma_\mathbf{C}\mbox{,\ and\ } 0 \le \rho_\mathbf{C} \le 3\tau_\mathbf{C}\mbox{,}} where \eqn{\beta_\mathbf{C}}, \eqn{\gamma_\mathbf{C}}, \eqn{\rho_\mathbf{C}}, and \eqn{\tau_\mathbf{C}} are various copula measures of association or concordance that are respectively described in \code{\link{blomCOP}}, \code{\link{giniCOP}}, \code{\link{rhoCOP}}, and \code{\link{tauCOP}}. The concept of negatively quadrant dependency (NQD) is the reverse: \eqn{\mathbf{C}(u,v) \le \mathbf{\Pi}} for all \eqn{(u,v)} in \eqn{\mathcal{I}^2}; so NQD is globally \dQuote{smaller} than independence. Conceptually, PQD is related to the probability that two random variables are simultaneously small (or simultaneously large) is at least as great as it would be if they were \emph{independent}. The graph of a PQD copula lies on or above the copulatic surface of the \emph{independence copula} \eqn{\mathbf{\Pi}}, and conversely a NQD copula lies on or below \eqn{\mathbf{\Pi}}. Albeit a \dQuote{global} property of a copula, there can be \dQuote{local} variations in the PQD/NQD state. Points in \eqn{\mathcal{I}^2} where \eqn{\mathbf{C}(u,v) - \mathbf{\Pi} \ge 0} are locally PQD, whereas points in \eqn{\mathcal{I}^2} where \eqn{\mathbf{C}(u,v) - \mathbf{\Pi} \le 0} and locally NQD. Lastly, readers are directed to the last examples in \code{\link{wolfCOP}} because as those examples involve the copulatic difference from independence \eqn{\mathbf{C}(u,v) - \mathbf{\Pi} = \mathbf{C}(u,v) - \mathbf{\Pi}} with 3-D renderings. } \usage{ isCOP.PQD(cop=NULL, para=NULL, uv=NULL, empirical=FALSE, verbose=TRUE, ...) } \arguments{ \item{cop}{A copula function;} \item{para}{Vector of parameters or other data structure, if needed, to pass to the copula;} \item{uv}{An optional \R \code{data.frame} of \eqn{U} and \eqn{V} nonexceedance probabilities \eqn{u} and \eqn{v} for the random variables \eqn{X} and \eqn{Y}. This argument triggers different value return behavior (see \bold{Value});} \item{empirical}{A logical that will use sample versions for \emph{Gini Gamma}, \emph{Spearman Rho}, and \emph{Kendall Tau}. This feature is \emph{only} applicable if the copula is empirical and therefore the \code{para} argument is the \code{data.frame} of \eqn{u} and \eqn{v}, which will be passed along to sample version functions instead of copula (see \bold{Note});} \item{verbose}{A logical that will report the four concordance measures; and} \item{...}{Additional arguments to pass, which are then passed to subordinate functions.} } \value{ If \code{uv=NULL} then a logical for the global property of PQD is returned but if argument \code{uv} is a \code{data.frame}, then an \R \code{list} is returned, and that list holds the global condition in \code{global.PQD} and local condition assessments in \code{local.PQD} and \code{local.NQD}. } \note{ The function \code{isCOP.PQD} will try \code{brute} force computations if subordinate calls to one or more functions fails. The user can use \code{...} to set the \code{delta} argument for \code{\link{giniCOP}}, \code{\link{rhoCOP}}, and (or) \code{\link{tauCOP}}. This function is not guaranteed to work using a \emph{bivariate empirical copula} such as the following operation: \code{copPQD(cop=EMPIRcop, para=the.data)}. An evidently open problem for \pkg{copBasic} is how to support PQD assessment (either globally or locally) for empirical copulas. The \eqn{\tau_\mathbf{C}} for the bivariate empirical copula example \code{brute=TRUE|FALSE} to unity and \eqn{\gamma_\mathbf{C}} and \eqn{\rho_\mathbf{C}} reach maximum number of subdivisions on the numerical integration and thus fail. If an empirical bivariate copula is \dQuote{Tau'd} to itself, is \eqn{\tau_\mathbf{C} \equiv 1} guaranteed? The \eqn{\tau_\mathbf{C}} computation relies on numerical partial derivatives of the copula, whereas the \eqn{\gamma_\mathbf{C}} and \eqn{\rho_\mathbf{C}} use the copula itself. It seems in the end that use of sample versions of \eqn{\gamma_\mathbf{C}}, \eqn{\rho_\mathbf{C}}, and \eqn{\tau_\mathbf{C}} would be appropriate and leave the \eqn{\beta_\mathbf{C}} as either copula or direct sample computation (see \bold{Examples}). \emph{SPECIAL DEMONSTRATION 1}---Given the following, \preformatted{ para <- list(cop1=PLACKETTcop, cop2=PLACKETTcop, para1=c(14.5),para2=c(1.45), alpha=0.51, beta=0.15, kappa=0.45, gamma=0.78) D <- simCOP(n=500, cop=composite3COP, para=para, cex=0.5, col=1, pch=16) } the two different call types to \code{isCOP.PQD} for an empirical copula are illustrative: \preformatted{ global.only <- isCOP.PQD(cop=EMPIRcop, para=D, empirical=TRUE) } and \preformatted{ PQD.list <- isCOP.PQD(cop=EMPIRcop, para=D, empirical=TRUE, uv=D) points(D, col=PQD.list$local.PQD+2, lwd=2) # red (if present) is local NQD } which in the former only returns the global PQD and the later returns an \R \code{list} with global (\code{global.PQD}), local (\code{local.PQD} as well as \code{local.NQD}), and the four statistics (\code{beta} \eqn{\beta_\mathbf{C}}, \code{gamma} \eqn{\gamma_\mathbf{C}}, \code{rho} \eqn{\rho_\mathbf{C}}, \code{tau} \eqn{\tau_\mathbf{C}}) used to determine global PQD. \emph{SPECIAL DEMONSTRATION 1}---Lastly, the \code{ctype=}\code{"bernstein"} argument to the empirical copula can be used. Repeated iterations of the following will show that local quadrant dependency can appear slightly different when the \code{bernstein} argument is present. The simulation sample size is reduced considerably for this second example because of the CPU effort triggered by the \emph{Bernstein extension} (see \code{\link{EMPIRcop}}) having been turned on. \preformatted{ para <- list(cop1=PLACKETTcop, cop2=PLACKETTcop, para1=14.5, para2=1.45, alpha=0.51, beta=0.15, kappa=0.45, gamma=0.78) D <- simCOP(n=50, cop=composite3COP, para=para, cex=0.5, col=1, pch=16) PQD.A<- isCOP.PQD(cop=EMPIRcop, para=D, empirical=TRUE, uv=D) points(D, col=PQD.A$local.PQD+2, lwd=2) # red (if present) is local NQD PQD.B<- isCOP.PQD(cop=EMPIRcop,para=D,empirical=TRUE,uv=D,ctype="bernstein") points(D, col=PQD.B$local.PQD+2, lwd=1, pch=3, cex=1.5) } } \references{ Nelsen, R.B., 2006, An introduction to copulas: New York, Springer, 269 p. } \author{W.H. Asquith} \seealso{\code{\link{blomCOP}}, \code{\link{giniCOP}}, \code{\link{rhoCOP}}, \code{\link{tauCOP}}, \code{\link{isCOP.LTD}}, \code{\link{isCOP.RTI}}} \examples{ \dontrun{ isCOP.PQD(cop=PSP) # TRUE} \dontrun{ # Example concerning Empirical Bivariate Copula and sample versions for comparison. set.seed(10); n <- 1000 para <- list(cop1=PLACKETTcop, cop2=PLACKETTcop, para1=0.145, para2=1.45, alpha=0.81, beta=0.8) D <- simCOP(n=n, cop=composite2COP, para=para, cex=0.5, col=rgb(0,0,0,0.2), pch=16) #tauCOP(cop=EMPIRcop, para=D) # ??? but == 1 cor(D$U, D$V, method="kendall") # -0.3224705 blomCOP(cop=EMPIRcop, para=D) # -0.332 giniCOP(cop=EMPIRcop, para=D) # -0.3692037 GINI <- sum(abs(rank(D$U)+rank(D$V)-n-1)) - sum(abs(rank(D$U)-rank(D$V))) print(GINI/as.integer(n^2/2)) # -0.369996 rhoCOP(cop=EMPIRcop, para=D) # ??? but fails cor(D$U, D$V, method="spearman") # -0.456694 lmomco::lcomoms2(D)$T2 # 1.0000000 -0.4568357 # -0.4567859 1.0000000} } \keyword{copula (characteristics)} \keyword{copula (properties)}
/man/isCOP.PQD.Rd
no_license
cran/copBasic
R
false
false
8,173
rd
\encoding{utf8} \name{isCOP.PQD} \alias{isCOP.PQD} \title{The Positively Quadrant Dependency State of a Copula} \description{ Numerically determine the global property of the \emph{positively quadrant dependency} (PQD) characteristic of a copula as described by Nelsen (2006, p. 188). The random variables \eqn{X} and \eqn{Y} are PQD if for all \eqn{(x,y)} in \eqn{\mathcal{R}^2} when \eqn{H(x,y) \ge F(x)G(x)} for all \eqn{(x,y)} in \eqn{\mathcal{R}^2} and thus by the copula \eqn{\mathbf{C}(u,v) \ge uv} for all \eqn{(u,v)} in \eqn{\mathcal{I}^2}. Alternatively, this means that \eqn{\mathbf{C}(u,v) \ge \mathbf{\Pi}}, and thus it can be said that it is globally \dQuote{greater} than independence (\eqn{uv = \Pi}; \code{\link{P}}). Nelsen (2006) shows that a copula is PQD when \deqn{0 \le \beta_\mathbf{C} \mbox{,\ } 0 \le \gamma_\mathbf{C}\mbox{,\ and\ } 0 \le \rho_\mathbf{C} \le 3\tau_\mathbf{C}\mbox{,}} where \eqn{\beta_\mathbf{C}}, \eqn{\gamma_\mathbf{C}}, \eqn{\rho_\mathbf{C}}, and \eqn{\tau_\mathbf{C}} are various copula measures of association or concordance that are respectively described in \code{\link{blomCOP}}, \code{\link{giniCOP}}, \code{\link{rhoCOP}}, and \code{\link{tauCOP}}. The concept of negatively quadrant dependency (NQD) is the reverse: \eqn{\mathbf{C}(u,v) \le \mathbf{\Pi}} for all \eqn{(u,v)} in \eqn{\mathcal{I}^2}; so NQD is globally \dQuote{smaller} than independence. Conceptually, PQD is related to the probability that two random variables are simultaneously small (or simultaneously large) is at least as great as it would be if they were \emph{independent}. The graph of a PQD copula lies on or above the copulatic surface of the \emph{independence copula} \eqn{\mathbf{\Pi}}, and conversely a NQD copula lies on or below \eqn{\mathbf{\Pi}}. Albeit a \dQuote{global} property of a copula, there can be \dQuote{local} variations in the PQD/NQD state. Points in \eqn{\mathcal{I}^2} where \eqn{\mathbf{C}(u,v) - \mathbf{\Pi} \ge 0} are locally PQD, whereas points in \eqn{\mathcal{I}^2} where \eqn{\mathbf{C}(u,v) - \mathbf{\Pi} \le 0} and locally NQD. Lastly, readers are directed to the last examples in \code{\link{wolfCOP}} because as those examples involve the copulatic difference from independence \eqn{\mathbf{C}(u,v) - \mathbf{\Pi} = \mathbf{C}(u,v) - \mathbf{\Pi}} with 3-D renderings. } \usage{ isCOP.PQD(cop=NULL, para=NULL, uv=NULL, empirical=FALSE, verbose=TRUE, ...) } \arguments{ \item{cop}{A copula function;} \item{para}{Vector of parameters or other data structure, if needed, to pass to the copula;} \item{uv}{An optional \R \code{data.frame} of \eqn{U} and \eqn{V} nonexceedance probabilities \eqn{u} and \eqn{v} for the random variables \eqn{X} and \eqn{Y}. This argument triggers different value return behavior (see \bold{Value});} \item{empirical}{A logical that will use sample versions for \emph{Gini Gamma}, \emph{Spearman Rho}, and \emph{Kendall Tau}. This feature is \emph{only} applicable if the copula is empirical and therefore the \code{para} argument is the \code{data.frame} of \eqn{u} and \eqn{v}, which will be passed along to sample version functions instead of copula (see \bold{Note});} \item{verbose}{A logical that will report the four concordance measures; and} \item{...}{Additional arguments to pass, which are then passed to subordinate functions.} } \value{ If \code{uv=NULL} then a logical for the global property of PQD is returned but if argument \code{uv} is a \code{data.frame}, then an \R \code{list} is returned, and that list holds the global condition in \code{global.PQD} and local condition assessments in \code{local.PQD} and \code{local.NQD}. } \note{ The function \code{isCOP.PQD} will try \code{brute} force computations if subordinate calls to one or more functions fails. The user can use \code{...} to set the \code{delta} argument for \code{\link{giniCOP}}, \code{\link{rhoCOP}}, and (or) \code{\link{tauCOP}}. This function is not guaranteed to work using a \emph{bivariate empirical copula} such as the following operation: \code{copPQD(cop=EMPIRcop, para=the.data)}. An evidently open problem for \pkg{copBasic} is how to support PQD assessment (either globally or locally) for empirical copulas. The \eqn{\tau_\mathbf{C}} for the bivariate empirical copula example \code{brute=TRUE|FALSE} to unity and \eqn{\gamma_\mathbf{C}} and \eqn{\rho_\mathbf{C}} reach maximum number of subdivisions on the numerical integration and thus fail. If an empirical bivariate copula is \dQuote{Tau'd} to itself, is \eqn{\tau_\mathbf{C} \equiv 1} guaranteed? The \eqn{\tau_\mathbf{C}} computation relies on numerical partial derivatives of the copula, whereas the \eqn{\gamma_\mathbf{C}} and \eqn{\rho_\mathbf{C}} use the copula itself. It seems in the end that use of sample versions of \eqn{\gamma_\mathbf{C}}, \eqn{\rho_\mathbf{C}}, and \eqn{\tau_\mathbf{C}} would be appropriate and leave the \eqn{\beta_\mathbf{C}} as either copula or direct sample computation (see \bold{Examples}). \emph{SPECIAL DEMONSTRATION 1}---Given the following, \preformatted{ para <- list(cop1=PLACKETTcop, cop2=PLACKETTcop, para1=c(14.5),para2=c(1.45), alpha=0.51, beta=0.15, kappa=0.45, gamma=0.78) D <- simCOP(n=500, cop=composite3COP, para=para, cex=0.5, col=1, pch=16) } the two different call types to \code{isCOP.PQD} for an empirical copula are illustrative: \preformatted{ global.only <- isCOP.PQD(cop=EMPIRcop, para=D, empirical=TRUE) } and \preformatted{ PQD.list <- isCOP.PQD(cop=EMPIRcop, para=D, empirical=TRUE, uv=D) points(D, col=PQD.list$local.PQD+2, lwd=2) # red (if present) is local NQD } which in the former only returns the global PQD and the later returns an \R \code{list} with global (\code{global.PQD}), local (\code{local.PQD} as well as \code{local.NQD}), and the four statistics (\code{beta} \eqn{\beta_\mathbf{C}}, \code{gamma} \eqn{\gamma_\mathbf{C}}, \code{rho} \eqn{\rho_\mathbf{C}}, \code{tau} \eqn{\tau_\mathbf{C}}) used to determine global PQD. \emph{SPECIAL DEMONSTRATION 1}---Lastly, the \code{ctype=}\code{"bernstein"} argument to the empirical copula can be used. Repeated iterations of the following will show that local quadrant dependency can appear slightly different when the \code{bernstein} argument is present. The simulation sample size is reduced considerably for this second example because of the CPU effort triggered by the \emph{Bernstein extension} (see \code{\link{EMPIRcop}}) having been turned on. \preformatted{ para <- list(cop1=PLACKETTcop, cop2=PLACKETTcop, para1=14.5, para2=1.45, alpha=0.51, beta=0.15, kappa=0.45, gamma=0.78) D <- simCOP(n=50, cop=composite3COP, para=para, cex=0.5, col=1, pch=16) PQD.A<- isCOP.PQD(cop=EMPIRcop, para=D, empirical=TRUE, uv=D) points(D, col=PQD.A$local.PQD+2, lwd=2) # red (if present) is local NQD PQD.B<- isCOP.PQD(cop=EMPIRcop,para=D,empirical=TRUE,uv=D,ctype="bernstein") points(D, col=PQD.B$local.PQD+2, lwd=1, pch=3, cex=1.5) } } \references{ Nelsen, R.B., 2006, An introduction to copulas: New York, Springer, 269 p. } \author{W.H. Asquith} \seealso{\code{\link{blomCOP}}, \code{\link{giniCOP}}, \code{\link{rhoCOP}}, \code{\link{tauCOP}}, \code{\link{isCOP.LTD}}, \code{\link{isCOP.RTI}}} \examples{ \dontrun{ isCOP.PQD(cop=PSP) # TRUE} \dontrun{ # Example concerning Empirical Bivariate Copula and sample versions for comparison. set.seed(10); n <- 1000 para <- list(cop1=PLACKETTcop, cop2=PLACKETTcop, para1=0.145, para2=1.45, alpha=0.81, beta=0.8) D <- simCOP(n=n, cop=composite2COP, para=para, cex=0.5, col=rgb(0,0,0,0.2), pch=16) #tauCOP(cop=EMPIRcop, para=D) # ??? but == 1 cor(D$U, D$V, method="kendall") # -0.3224705 blomCOP(cop=EMPIRcop, para=D) # -0.332 giniCOP(cop=EMPIRcop, para=D) # -0.3692037 GINI <- sum(abs(rank(D$U)+rank(D$V)-n-1)) - sum(abs(rank(D$U)-rank(D$V))) print(GINI/as.integer(n^2/2)) # -0.369996 rhoCOP(cop=EMPIRcop, para=D) # ??? but fails cor(D$U, D$V, method="spearman") # -0.456694 lmomco::lcomoms2(D)$T2 # 1.0000000 -0.4568357 # -0.4567859 1.0000000} } \keyword{copula (characteristics)} \keyword{copula (properties)}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/allMethods.R \docType{methods} \name{nrow-Rules} \alias{nrow-Rules} \alias{nrow,Rules-method} \title{Return the number of rows of underlying matrix in an Rules object.} \usage{ \S4method{nrow}{Rules}(x) } \arguments{ \item{x}{Object of class Rules} } \value{ number of rows / total number of possible items in the Rules object. } \description{ Although a Rules object does have left-hand side and a right hand-side the number of rows for both does represent the number items and therefore should be the same for both sides. This functions simply uses the left-hand sides as proxy. }
/man/nrow-Rules.Rd
no_license
abuchmueller/Rpriori
R
false
true
661
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/allMethods.R \docType{methods} \name{nrow-Rules} \alias{nrow-Rules} \alias{nrow,Rules-method} \title{Return the number of rows of underlying matrix in an Rules object.} \usage{ \S4method{nrow}{Rules}(x) } \arguments{ \item{x}{Object of class Rules} } \value{ number of rows / total number of possible items in the Rules object. } \description{ Although a Rules object does have left-hand side and a right hand-side the number of rows for both does represent the number items and therefore should be the same for both sides. This functions simply uses the left-hand sides as proxy. }
shinyUI( fluidPage( ##-- Favicon ---- tags$head( #-- biblio js ---- tags$link(rel="stylesheet", type = "text/css", href = "https://use.fontawesome.com/releases/v5.8.2/css/all.css"), tags$link(rel="stylesheet", type = "text/css", href = "https://fonts.googleapis.com/css?family=Open+Sans|Source+Sans+Pro") ), # tags$head(tags$style( # HTML("input[type='search']:disabled {visibility:hidden}") # )), ##-- Logo ---- list(tags$head(HTML('<link rel="icon", href="img/logo.png", type="image/png" />'))), ##-- Header ---- navbarPage(title = div(img(src="img/logo.png", height = "75px"), style = "padding-left:100px;"), windowTitle = "Fisheries Trade", id = "navbar", selected = "build_data", theme = "styles.css", fluid = T, ##-- Tabs ---- build_data, mapping, imputation, outlier_detection, mirroring, raw_data, about ), ##-- Footer ---- div(class = "footer", style = "z-index: 200;", includeHTML("html/footer.html") ) ) )
/shinyFisheriesTrade_prod/ui.R
permissive
SWS-Methodology/faoFisheriesTrade
R
false
false
1,359
r
shinyUI( fluidPage( ##-- Favicon ---- tags$head( #-- biblio js ---- tags$link(rel="stylesheet", type = "text/css", href = "https://use.fontawesome.com/releases/v5.8.2/css/all.css"), tags$link(rel="stylesheet", type = "text/css", href = "https://fonts.googleapis.com/css?family=Open+Sans|Source+Sans+Pro") ), # tags$head(tags$style( # HTML("input[type='search']:disabled {visibility:hidden}") # )), ##-- Logo ---- list(tags$head(HTML('<link rel="icon", href="img/logo.png", type="image/png" />'))), ##-- Header ---- navbarPage(title = div(img(src="img/logo.png", height = "75px"), style = "padding-left:100px;"), windowTitle = "Fisheries Trade", id = "navbar", selected = "build_data", theme = "styles.css", fluid = T, ##-- Tabs ---- build_data, mapping, imputation, outlier_detection, mirroring, raw_data, about ), ##-- Footer ---- div(class = "footer", style = "z-index: 200;", includeHTML("html/footer.html") ) ) )
resolve_location.cells_data <- function(loc, data_attr) { data_df <- data_attr[["data_df"]] stub_df <- data_attr[["stub_df"]] loc$columns <- resolve_vars_idx( var_expr = !!loc[["columns"]], data = data_df ) loc$rows <- resolve_data_vals_idx( var_expr = !!loc[["rows"]], data = data_df, vals = stub_df$rowname ) class(loc) <- c("resolved", class(loc)) loc } to_output_location.cells_data <- function(loc, data_attr) { loc <- resolve_location(loc, data_attr) columns_df <- get_column_reorder_df( cols_df = data_attr$cols_df, boxh_df = data_attr$boxh_df ) rows_df <- get_row_reorder_df( arrange_groups = data_attr$arrange_groups, stub_df = data_attr$stub_df ) # We shouldn't need to do this, but output_df doesn't match up exactly to # the colnum_final values due to groupnames/rownames loc$colnames <- colnames(data_attr[["data_df"]])[loc$columns] loc$columns <- columns_df$colnum_final[loc$columns] loc$rows <- rows_df$rownum_final[loc$rows] class(loc) <- c("output_relative", class(loc)) loc } text_transform_at_location.cells_data <- function(loc, data_attr, fn = identity) { loc <- to_output_location(loc, data_attr) output_df <- data_attr[["output_df"]] # Do one vectorized operation per column for (col in loc$colnames) { if (col %in% colnames(output_df)) { output_df[[col]][loc$rows] <- fn(output_df[[col]][loc$rows]) } } data_attr$output_df <- output_df data_attr }
/R/cells_data.R
permissive
gracelawley/gt
R
false
false
1,636
r
resolve_location.cells_data <- function(loc, data_attr) { data_df <- data_attr[["data_df"]] stub_df <- data_attr[["stub_df"]] loc$columns <- resolve_vars_idx( var_expr = !!loc[["columns"]], data = data_df ) loc$rows <- resolve_data_vals_idx( var_expr = !!loc[["rows"]], data = data_df, vals = stub_df$rowname ) class(loc) <- c("resolved", class(loc)) loc } to_output_location.cells_data <- function(loc, data_attr) { loc <- resolve_location(loc, data_attr) columns_df <- get_column_reorder_df( cols_df = data_attr$cols_df, boxh_df = data_attr$boxh_df ) rows_df <- get_row_reorder_df( arrange_groups = data_attr$arrange_groups, stub_df = data_attr$stub_df ) # We shouldn't need to do this, but output_df doesn't match up exactly to # the colnum_final values due to groupnames/rownames loc$colnames <- colnames(data_attr[["data_df"]])[loc$columns] loc$columns <- columns_df$colnum_final[loc$columns] loc$rows <- rows_df$rownum_final[loc$rows] class(loc) <- c("output_relative", class(loc)) loc } text_transform_at_location.cells_data <- function(loc, data_attr, fn = identity) { loc <- to_output_location(loc, data_attr) output_df <- data_attr[["output_df"]] # Do one vectorized operation per column for (col in loc$colnames) { if (col %in% colnames(output_df)) { output_df[[col]][loc$rows] <- fn(output_df[[col]][loc$rows]) } } data_attr$output_df <- output_df data_attr }
local({ pkgs <- c("gert", "remotes", "callr", "rlang", "bench", "ggplot2", "tidyr") avail <- pkgs %in% installed.packages() if (!all(avail)) { stop("Package(s) ", paste(pkgs[!avail], collapse = ", "), " are required. ", "Please install.") } }) install_gh <- function(lib, repo = "duckdb/duckdb", branch = NULL, ref = NULL, subdir = "tools/rpkg", update_deps = FALSE) { dir <- gert::git_clone(paste0("https://github.com/", repo), tempfile(), branch = branch) on.exit(unlink(dir, recursive = TRUE)) if (!is.null(ref)) { current <- gert::git_branch(repo = dir) branch <- paste0("bench_", rand_string(alphabet = c(letters, 0:9))) gert::git_branch_create(branch, ref = ref, checkout = TRUE, repo = dir) on.exit({ gert::git_branch_checkout(current, repo = dir) gert::git_branch_delete(branch, repo = dir) }, add = TRUE, after = FALSE) } pkg <- file.path(dir, subdir) arg <- c(pkg, paste0("--", c(paste0("library=", lib), "no-multiarch"))) if (isTRUE(update_deps)) { remotes::install_deps(pkg, upgrade = "always") } res <- callr::rcmd_safe("INSTALL", arg, show = TRUE, fail_on_status = TRUE) invisible(NULL) } install_one <- function(what, ...) do.call(install_gh, c(what, list(...))) install_all <- function(lst, ...) invisible(lapply(lst, install_one, ...)) dir_create <- function(paths = tempfile(), ...) { paths <- file.path(paths, ...) for (path in paths) { if (!dir.exists(path)) { dir.create(path) } } paths } rand_string <- function(length = 8, alphabet = c(letters, LETTERS, 0:9)) { paste0(sample(alphabet, length, replace = TRUE), collapse = "") } setup_data <- function(nrow = 1e3) { rand_strings <- function(n, ...) { vapply(integer(n), function(i, ...) rand_string(...), character(1L), ...) } rand_fact <- function(n, levels = rand_strings(5)) { structure( sample(length(levels), n, TRUE), levels = levels, class = "factor" ) } data.frame( v1 = sample.int(5, nrow, TRUE), v2 = sample.int(nrow, nrow), v3 = runif(nrow, max = 100), v4 = rand_strings(nrow), v5 = rand_fact(nrow) ) } write_df <- function(con, dat, tbl = rand_string()) { dbWriteTable(con, tbl, dat) dbRemoveTable(con, tbl) } register_df <- function(con, dat, tbl = rand_string()) { duckdb_register(con, tbl, dat) duckdb_unregister(con, tbl) } register_arrow <- function(con, dat, tbl = rand_string()) { duckdb_register_arrow(con, tbl, dat) duckdb_unregister_arrow(con, tbl) } select_some <- function(con, tbl) { dbGetQuery(con, paste("SELECT * FROM", dbQuoteIdentifier(con, tbl), "WHERE", dbQuoteIdentifier(con, "v3"), "> 50") ) } bench_mark <- function(versions, ..., grid = NULL, setup = NULL, teardown = NULL, seed = NULL, helpers = NULL, pkgs = NULL, reps = 1L) { eval_versions <- function(lib, vers, args) { eval_grid <- function(args, ...) { eval_one <- function(args, setup, teardown, exprs, nreps, seed, helpers, libs, vers) { if (!is.null(seed)) set.seed(seed) for (helper in helpers) source(helper) for (lib in libs) library(lib, character.only = TRUE) env <- rlang::new_data_mask(new.env(parent = emptyenv())) for (nme in names(args)) { assign(nme, args[[nme]], envir = env) } message(vers, appendLF = !length(args)) if (length(args)) { message("; ", paste0(names(args), ": ", args, collapse = ", ")) } rlang::eval_tidy(setup, data = env) on.exit(rlang::eval_tidy(teardown, data = env)) res <- bench::mark(iterations = nreps, check = FALSE, exprs = exprs, env = env, time_unit = "s") arg <- lapply(c(list(version = vers), args), rep, nrow(res)) ind <- colnames(res) == "expression" arg <- c(list(expression = names(res[[which(ind)]])), arg) cbind(arg, res[!ind]) } if (length(args)) { do.call(rbind, lapply(args, eval_one, ...)) } else { eval_one(args, ...) } } callr::r(eval_grid, c(args, vers), libpath = lib, show = TRUE) } exprs <- rlang::exprs(...) setup <- rlang::enquo(setup) teardown <- rlang::enquo(teardown) params <- expand.grid(grid, stringsAsFactors = FALSE) params <- split(params, seq_len(nrow(params))) res <- Map( eval_versions, vapply(versions, `[[`, character(1L), "lib"), names(versions), MoreArgs = list( list(params, setup, teardown, exprs, reps, seed, helpers, pkgs) ) ) bench::as_bench_mark(do.call(rbind, res)) } bench_plot <- function(object, type = c("beeswarm", "jitter", "ridge", "boxplot", "violin"), check = FALSE, ref, new, threshold, ...) { within_thresh <- function(x, ref, new, thresh) { bounds <- quantile(x[[ref]], c(0.5 - thresh, 0.5 + thresh)) val <- median(x[[new]]) ifelse( val > bounds[2L], "slower", ifelse(val < bounds[1L], "faster", "same") ) } labeller <- function(...) { sub_fun <- function(x) sub("^expression: ", "", x) lapply(ggplot2::label_both(...), sub_fun) } bench_cols <- function() { c("min", "median", "itr/sec", "mem_alloc", "gc/sec", "n_itr", "n_gc", "total_time", "result", "memory", "time", "gc") } extra_cols <- function(x) { setdiff(colnames(x), c("version", bench_cols(), c("level0", "level1", "level2"), "expression")) } type <- match.arg(type) if (type == "beeswarm" && !requireNamespace("ggbeeswarm", quietly = TRUE)) { stop("`ggbeeswarm` must be installed to use `type = \"beeswarm\"` option.") } res <- tidyr::unnest(object, c(time, gc)) plt <- ggplot2::ggplot() params <- extra_cols(object) if (!isFALSE(check)) { grid <- object[, c("expression", params)] temp <- split(object, grid) temp <- Map(setNames, lapply(temp, `[[`, "time"), lapply(temp, `[[`, "version"), USE.NAMES = FALSE) grid <- cbind( do.call(rbind, lapply(split(grid, grid), unique)), `Median runtime` = vapply(temp, within_thresh, character(1L), ref, new, threshold) ) plt <- plt + ggplot2::geom_rect(data = grid, ggplot2::aes(xmin = -Inf, xmax = Inf, ymin = -Inf, ymax = Inf, fill = `Median runtime`), alpha = 0.05 ) + ggplot2::scale_fill_manual( values = c(faster = "green", slower = "red", same = NA) ) } plt <- switch( type, beeswarm = plt + ggbeeswarm::geom_quasirandom( data = res, ggplot2::aes_string("version", "time", color = "gc"), ... ) + ggplot2::coord_flip(), jitter = plt + ggplot2::geom_jitter( data = res, ggplot2::aes_string("version", "time", color = "gc"), ... ) + ggplot2::coord_flip(), ridge = plt + ggridges::geom_density_ridges( data = res, ggplot2::aes_string("time", "version"), ... ), boxplot = plt + ggplot2::geom_boxplot( data = res, ggplot2::aes_string("version", "time"), ... ) + ggplot2::coord_flip(), violin = plt + ggplot2::geom_violin( data = res, ggplot2::aes_string("version", "time"), ... ) + ggplot2::coord_flip() ) if (length(params) == 0) { plt <- plt + ggplot2::facet_grid( rows = ggplot2::vars(expression), labeller = labeller ) } else { plt <- plt + ggplot2::facet_grid( as.formula(paste("expression", "~", paste(params, collapse = " + "))), labeller = labeller, scales = "free_x" ) } plt + ggplot2::labs(y = "Time [s]", color = "GC level") + ggplot2::theme_bw() + ggplot2::theme(axis.title.y = ggplot2::element_blank(), legend.position = "bottom") }
/tools/rpkg/tests/regression/helpers.R
permissive
tiagokepe/duckdb
R
false
false
8,036
r
local({ pkgs <- c("gert", "remotes", "callr", "rlang", "bench", "ggplot2", "tidyr") avail <- pkgs %in% installed.packages() if (!all(avail)) { stop("Package(s) ", paste(pkgs[!avail], collapse = ", "), " are required. ", "Please install.") } }) install_gh <- function(lib, repo = "duckdb/duckdb", branch = NULL, ref = NULL, subdir = "tools/rpkg", update_deps = FALSE) { dir <- gert::git_clone(paste0("https://github.com/", repo), tempfile(), branch = branch) on.exit(unlink(dir, recursive = TRUE)) if (!is.null(ref)) { current <- gert::git_branch(repo = dir) branch <- paste0("bench_", rand_string(alphabet = c(letters, 0:9))) gert::git_branch_create(branch, ref = ref, checkout = TRUE, repo = dir) on.exit({ gert::git_branch_checkout(current, repo = dir) gert::git_branch_delete(branch, repo = dir) }, add = TRUE, after = FALSE) } pkg <- file.path(dir, subdir) arg <- c(pkg, paste0("--", c(paste0("library=", lib), "no-multiarch"))) if (isTRUE(update_deps)) { remotes::install_deps(pkg, upgrade = "always") } res <- callr::rcmd_safe("INSTALL", arg, show = TRUE, fail_on_status = TRUE) invisible(NULL) } install_one <- function(what, ...) do.call(install_gh, c(what, list(...))) install_all <- function(lst, ...) invisible(lapply(lst, install_one, ...)) dir_create <- function(paths = tempfile(), ...) { paths <- file.path(paths, ...) for (path in paths) { if (!dir.exists(path)) { dir.create(path) } } paths } rand_string <- function(length = 8, alphabet = c(letters, LETTERS, 0:9)) { paste0(sample(alphabet, length, replace = TRUE), collapse = "") } setup_data <- function(nrow = 1e3) { rand_strings <- function(n, ...) { vapply(integer(n), function(i, ...) rand_string(...), character(1L), ...) } rand_fact <- function(n, levels = rand_strings(5)) { structure( sample(length(levels), n, TRUE), levels = levels, class = "factor" ) } data.frame( v1 = sample.int(5, nrow, TRUE), v2 = sample.int(nrow, nrow), v3 = runif(nrow, max = 100), v4 = rand_strings(nrow), v5 = rand_fact(nrow) ) } write_df <- function(con, dat, tbl = rand_string()) { dbWriteTable(con, tbl, dat) dbRemoveTable(con, tbl) } register_df <- function(con, dat, tbl = rand_string()) { duckdb_register(con, tbl, dat) duckdb_unregister(con, tbl) } register_arrow <- function(con, dat, tbl = rand_string()) { duckdb_register_arrow(con, tbl, dat) duckdb_unregister_arrow(con, tbl) } select_some <- function(con, tbl) { dbGetQuery(con, paste("SELECT * FROM", dbQuoteIdentifier(con, tbl), "WHERE", dbQuoteIdentifier(con, "v3"), "> 50") ) } bench_mark <- function(versions, ..., grid = NULL, setup = NULL, teardown = NULL, seed = NULL, helpers = NULL, pkgs = NULL, reps = 1L) { eval_versions <- function(lib, vers, args) { eval_grid <- function(args, ...) { eval_one <- function(args, setup, teardown, exprs, nreps, seed, helpers, libs, vers) { if (!is.null(seed)) set.seed(seed) for (helper in helpers) source(helper) for (lib in libs) library(lib, character.only = TRUE) env <- rlang::new_data_mask(new.env(parent = emptyenv())) for (nme in names(args)) { assign(nme, args[[nme]], envir = env) } message(vers, appendLF = !length(args)) if (length(args)) { message("; ", paste0(names(args), ": ", args, collapse = ", ")) } rlang::eval_tidy(setup, data = env) on.exit(rlang::eval_tidy(teardown, data = env)) res <- bench::mark(iterations = nreps, check = FALSE, exprs = exprs, env = env, time_unit = "s") arg <- lapply(c(list(version = vers), args), rep, nrow(res)) ind <- colnames(res) == "expression" arg <- c(list(expression = names(res[[which(ind)]])), arg) cbind(arg, res[!ind]) } if (length(args)) { do.call(rbind, lapply(args, eval_one, ...)) } else { eval_one(args, ...) } } callr::r(eval_grid, c(args, vers), libpath = lib, show = TRUE) } exprs <- rlang::exprs(...) setup <- rlang::enquo(setup) teardown <- rlang::enquo(teardown) params <- expand.grid(grid, stringsAsFactors = FALSE) params <- split(params, seq_len(nrow(params))) res <- Map( eval_versions, vapply(versions, `[[`, character(1L), "lib"), names(versions), MoreArgs = list( list(params, setup, teardown, exprs, reps, seed, helpers, pkgs) ) ) bench::as_bench_mark(do.call(rbind, res)) } bench_plot <- function(object, type = c("beeswarm", "jitter", "ridge", "boxplot", "violin"), check = FALSE, ref, new, threshold, ...) { within_thresh <- function(x, ref, new, thresh) { bounds <- quantile(x[[ref]], c(0.5 - thresh, 0.5 + thresh)) val <- median(x[[new]]) ifelse( val > bounds[2L], "slower", ifelse(val < bounds[1L], "faster", "same") ) } labeller <- function(...) { sub_fun <- function(x) sub("^expression: ", "", x) lapply(ggplot2::label_both(...), sub_fun) } bench_cols <- function() { c("min", "median", "itr/sec", "mem_alloc", "gc/sec", "n_itr", "n_gc", "total_time", "result", "memory", "time", "gc") } extra_cols <- function(x) { setdiff(colnames(x), c("version", bench_cols(), c("level0", "level1", "level2"), "expression")) } type <- match.arg(type) if (type == "beeswarm" && !requireNamespace("ggbeeswarm", quietly = TRUE)) { stop("`ggbeeswarm` must be installed to use `type = \"beeswarm\"` option.") } res <- tidyr::unnest(object, c(time, gc)) plt <- ggplot2::ggplot() params <- extra_cols(object) if (!isFALSE(check)) { grid <- object[, c("expression", params)] temp <- split(object, grid) temp <- Map(setNames, lapply(temp, `[[`, "time"), lapply(temp, `[[`, "version"), USE.NAMES = FALSE) grid <- cbind( do.call(rbind, lapply(split(grid, grid), unique)), `Median runtime` = vapply(temp, within_thresh, character(1L), ref, new, threshold) ) plt <- plt + ggplot2::geom_rect(data = grid, ggplot2::aes(xmin = -Inf, xmax = Inf, ymin = -Inf, ymax = Inf, fill = `Median runtime`), alpha = 0.05 ) + ggplot2::scale_fill_manual( values = c(faster = "green", slower = "red", same = NA) ) } plt <- switch( type, beeswarm = plt + ggbeeswarm::geom_quasirandom( data = res, ggplot2::aes_string("version", "time", color = "gc"), ... ) + ggplot2::coord_flip(), jitter = plt + ggplot2::geom_jitter( data = res, ggplot2::aes_string("version", "time", color = "gc"), ... ) + ggplot2::coord_flip(), ridge = plt + ggridges::geom_density_ridges( data = res, ggplot2::aes_string("time", "version"), ... ), boxplot = plt + ggplot2::geom_boxplot( data = res, ggplot2::aes_string("version", "time"), ... ) + ggplot2::coord_flip(), violin = plt + ggplot2::geom_violin( data = res, ggplot2::aes_string("version", "time"), ... ) + ggplot2::coord_flip() ) if (length(params) == 0) { plt <- plt + ggplot2::facet_grid( rows = ggplot2::vars(expression), labeller = labeller ) } else { plt <- plt + ggplot2::facet_grid( as.formula(paste("expression", "~", paste(params, collapse = " + "))), labeller = labeller, scales = "free_x" ) } plt + ggplot2::labs(y = "Time [s]", color = "GC level") + ggplot2::theme_bw() + ggplot2::theme(axis.title.y = ggplot2::element_blank(), legend.position = "bottom") }
setMethod("updateObject", signature(object="BeadStudioSet"), function(object, ..., verbose=FALSE) { if (verbose) message("updateObject(object = 'BeadStudioSet')") obj <- tryCatch(callNextMethod(object), error=function(e) NULL) if(is.null(obj)){ obj <- new("BeadStudioSet", assayData = updateObject(assayData(object), ..., verbose=verbose), phenoData = phenoData(object), experimentData = updateObject(experimentData(object), ..., verbose=verbose), annotation = updateObject(annotation(object), ..., verbose=verbose), featureData=updateObject(featureData(object), ..., verbose=FALSE), ...) } if (all(isCurrent(obj))) return(obj) obj }) setMethod("lrr", "BeadStudioSet", function(object){ return(assayDataElement(object, "lrr")) }) setMethod("copyNumber", "BeadStudioSet", function(object) lrr(object)) setReplaceMethod("lrr", c("BeadStudioSet", "ANY"), function(object, value) { assayDataElementReplace(object, "lrr", value) }) setReplaceMethod("lrr", c("BafLrrSet", "ANY"), function(object, value) { assayDataElementReplace(object, "lrr", value) }) setReplaceMethod("copyNumber", c("BeadStudioSet", "ANY"), function(object, value) { lrr(object) <- value object }) setMethod("baf", "BeadStudioSet", function(object) { return(assayDataElement(object, "baf")) }) setReplaceMethod("baf", c("BeadStudioSet", "ANY"), function(object, value) { assayDataElementReplace(object, "BAF", value) }) setAs("BeadStudioSet", "data.frame", function(from, to){ cn <- as.numeric(lrr(from))/100 bf <- as.numeric(baf(from))/1000 x <- rep(position(from)/1e6, ncol(from)) ##x <- rep(position(object)[marker.index], 4)/1e6 is.snp <- rep(isSnp(from), ncol(from)) id <- rep(sampleNames(from), each=nrow(from)) df <- data.frame(x=x, lrr=cn, baf=bf, id=id, is.snp=is.snp, stringsAsFactors=FALSE) df$id <- factor(df$id, ordered=TRUE, levels=unique(df$id)) return(df) }) setMethod("show", signature(object="BeadStudioSet"), function(object){ callNextMethod(object) ##cat("Genome Build: ", genomeBuild(object), "\n") ##cat("Integer representation of BAF/LRR: ", isInteger(object), "\n") })
/R/methods-BeadStudioSet.R
no_license
benilton/oligoClasses
R
false
false
2,350
r
setMethod("updateObject", signature(object="BeadStudioSet"), function(object, ..., verbose=FALSE) { if (verbose) message("updateObject(object = 'BeadStudioSet')") obj <- tryCatch(callNextMethod(object), error=function(e) NULL) if(is.null(obj)){ obj <- new("BeadStudioSet", assayData = updateObject(assayData(object), ..., verbose=verbose), phenoData = phenoData(object), experimentData = updateObject(experimentData(object), ..., verbose=verbose), annotation = updateObject(annotation(object), ..., verbose=verbose), featureData=updateObject(featureData(object), ..., verbose=FALSE), ...) } if (all(isCurrent(obj))) return(obj) obj }) setMethod("lrr", "BeadStudioSet", function(object){ return(assayDataElement(object, "lrr")) }) setMethod("copyNumber", "BeadStudioSet", function(object) lrr(object)) setReplaceMethod("lrr", c("BeadStudioSet", "ANY"), function(object, value) { assayDataElementReplace(object, "lrr", value) }) setReplaceMethod("lrr", c("BafLrrSet", "ANY"), function(object, value) { assayDataElementReplace(object, "lrr", value) }) setReplaceMethod("copyNumber", c("BeadStudioSet", "ANY"), function(object, value) { lrr(object) <- value object }) setMethod("baf", "BeadStudioSet", function(object) { return(assayDataElement(object, "baf")) }) setReplaceMethod("baf", c("BeadStudioSet", "ANY"), function(object, value) { assayDataElementReplace(object, "BAF", value) }) setAs("BeadStudioSet", "data.frame", function(from, to){ cn <- as.numeric(lrr(from))/100 bf <- as.numeric(baf(from))/1000 x <- rep(position(from)/1e6, ncol(from)) ##x <- rep(position(object)[marker.index], 4)/1e6 is.snp <- rep(isSnp(from), ncol(from)) id <- rep(sampleNames(from), each=nrow(from)) df <- data.frame(x=x, lrr=cn, baf=bf, id=id, is.snp=is.snp, stringsAsFactors=FALSE) df$id <- factor(df$id, ordered=TRUE, levels=unique(df$id)) return(df) }) setMethod("show", signature(object="BeadStudioSet"), function(object){ callNextMethod(object) ##cat("Genome Build: ", genomeBuild(object), "\n") ##cat("Integer representation of BAF/LRR: ", isInteger(object), "\n") })
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ggPerQCWrapper.R \name{plotScDblFinderResults} \alias{plotScDblFinderResults} \title{Plots for runScDblFinder outputs.} \usage{ plotScDblFinderResults( inSCE, sample = NULL, shape = NULL, groupBy = NULL, combinePlot = "all", violin = TRUE, boxplot = FALSE, dots = TRUE, reducedDimName = "UMAP", xlab = NULL, ylab = NULL, dim1 = NULL, dim2 = NULL, bin = NULL, binLabel = NULL, defaultTheme = TRUE, dotSize = 0.5, summary = "median", summaryTextSize = 3, transparency = 1, baseSize = 15, titleSize = NULL, axisLabelSize = NULL, axisSize = NULL, legendSize = NULL, legendTitleSize = NULL, relHeights = 1, relWidths = c(1, 1, 1), plotNCols = NULL, plotNRows = NULL, labelSamples = TRUE, samplePerColumn = TRUE, sampleRelHeights = 1, sampleRelWidths = 1 ) } \arguments{ \item{inSCE}{Input \linkS4class{SingleCellExperiment} object with saved dimension reduction components or a variable with saved results from \link{runScDblFinder}. Required.} \item{sample}{Character vector. Indicates which sample each cell belongs to. Default NULL.} \item{shape}{If provided, add shapes based on the value.} \item{groupBy}{Groupings for each numeric value. A user may input a vector equal length to the number of the samples in the SingleCellExperiment object, or can be retrieved from the colData slot. Default NULL.} \item{combinePlot}{Must be either "all", "sample", or "none". "all" will combine all plots into a single .ggplot object, while "sample" will output a list of plots separated by sample. Default "all".} \item{violin}{Boolean. If TRUE, will plot the violin plot. Default TRUE.} \item{boxplot}{Boolean. If TRUE, will plot boxplots for each violin plot. Default TRUE.} \item{dots}{Boolean. If TRUE, will plot dots for each violin plot. Default TRUE.} \item{reducedDimName}{Saved dimension reduction name in the \linkS4class{SingleCellExperiment} object. Required.} \item{xlab}{Character vector. Label for x-axis. Default NULL.} \item{ylab}{Character vector. Label for y-axis. Default NULL.} \item{dim1}{1st dimension to be used for plotting. Can either be a string which specifies the name of the dimension to be plotted from reducedDims, or a numeric value which specifies the index of the dimension to be plotted. Default is NULL.} \item{dim2}{2nd dimension to be used for plotting. Can either be a string which specifies the name of the dimension to be plotted from reducedDims, or a numeric value which specifies the index of the dimension to be plotted. Default is NULL.} \item{bin}{Numeric vector. If single value, will divide the numeric values into the `bin` groups. If more than one value, will bin numeric values using values as a cut point.} \item{binLabel}{Character vector. Labels for the bins created by the `bin` parameter. Default NULL.} \item{defaultTheme}{Removes grid in plot and sets axis title size to 10 when TRUE. Default TRUE.} \item{dotSize}{Size of dots. Default 0.5.} \item{summary}{Adds a summary statistic, as well as a crossbar to the violin plot. Options are "mean" or "median". Default NULL.} \item{summaryTextSize}{The text size of the summary statistic displayed above the violin plot. Default 3.} \item{transparency}{Transparency of the dots, values will be 0-1. Default 1.} \item{baseSize}{The base font size for all text. Default 12. Can be overwritten by titleSize, axisSize, and axisLabelSize, legendSize, legendTitleSize.} \item{titleSize}{Size of title of plot. Default NULL.} \item{axisLabelSize}{Size of x/y-axis labels. Default NULL.} \item{axisSize}{Size of x/y-axis ticks. Default NULL.} \item{legendSize}{size of legend. Default NULL.} \item{legendTitleSize}{size of legend title. Default NULL.} \item{relHeights}{Relative heights of plots when combine is set.} \item{relWidths}{Relative widths of plots when combine is set.} \item{plotNCols}{Number of columns when plots are combined in a grid.} \item{plotNRows}{Number of rows when plots are combined in a grid.} \item{labelSamples}{Will label sample name in title of plot if TRUE. Default TRUE.} \item{samplePerColumn}{If TRUE, when there are multiple samples and combining by "all", the output .ggplot will have plots from each sample on a single column. Default TRUE.} \item{sampleRelHeights}{If there are multiple samples and combining by "all", the relative heights for each plot.} \item{sampleRelWidths}{If there are multiple samples and combining by "all", the relative widths for each plot.} } \value{ list of .ggplot objects } \description{ A wrapper function which visualizes outputs from the runScDblFinder function stored in the colData slot of the SingleCellExperiment object via various plots. } \examples{ data(scExample, package="singleCellTK") sce <- subsetSCECols(sce, colData = "type != 'EmptyDroplet'") sce <- getUMAP(inSCE=sce, useAssay="counts", reducedDimName="UMAP") sce <- runScDblFinder(sce) plotScDblFinderResults(inSCE=sce, reducedDimName="UMAP") }
/man/plotScDblFinderResults.Rd
permissive
rz2333/singleCellTK
R
false
true
5,053
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ggPerQCWrapper.R \name{plotScDblFinderResults} \alias{plotScDblFinderResults} \title{Plots for runScDblFinder outputs.} \usage{ plotScDblFinderResults( inSCE, sample = NULL, shape = NULL, groupBy = NULL, combinePlot = "all", violin = TRUE, boxplot = FALSE, dots = TRUE, reducedDimName = "UMAP", xlab = NULL, ylab = NULL, dim1 = NULL, dim2 = NULL, bin = NULL, binLabel = NULL, defaultTheme = TRUE, dotSize = 0.5, summary = "median", summaryTextSize = 3, transparency = 1, baseSize = 15, titleSize = NULL, axisLabelSize = NULL, axisSize = NULL, legendSize = NULL, legendTitleSize = NULL, relHeights = 1, relWidths = c(1, 1, 1), plotNCols = NULL, plotNRows = NULL, labelSamples = TRUE, samplePerColumn = TRUE, sampleRelHeights = 1, sampleRelWidths = 1 ) } \arguments{ \item{inSCE}{Input \linkS4class{SingleCellExperiment} object with saved dimension reduction components or a variable with saved results from \link{runScDblFinder}. Required.} \item{sample}{Character vector. Indicates which sample each cell belongs to. Default NULL.} \item{shape}{If provided, add shapes based on the value.} \item{groupBy}{Groupings for each numeric value. A user may input a vector equal length to the number of the samples in the SingleCellExperiment object, or can be retrieved from the colData slot. Default NULL.} \item{combinePlot}{Must be either "all", "sample", or "none". "all" will combine all plots into a single .ggplot object, while "sample" will output a list of plots separated by sample. Default "all".} \item{violin}{Boolean. If TRUE, will plot the violin plot. Default TRUE.} \item{boxplot}{Boolean. If TRUE, will plot boxplots for each violin plot. Default TRUE.} \item{dots}{Boolean. If TRUE, will plot dots for each violin plot. Default TRUE.} \item{reducedDimName}{Saved dimension reduction name in the \linkS4class{SingleCellExperiment} object. Required.} \item{xlab}{Character vector. Label for x-axis. Default NULL.} \item{ylab}{Character vector. Label for y-axis. Default NULL.} \item{dim1}{1st dimension to be used for plotting. Can either be a string which specifies the name of the dimension to be plotted from reducedDims, or a numeric value which specifies the index of the dimension to be plotted. Default is NULL.} \item{dim2}{2nd dimension to be used for plotting. Can either be a string which specifies the name of the dimension to be plotted from reducedDims, or a numeric value which specifies the index of the dimension to be plotted. Default is NULL.} \item{bin}{Numeric vector. If single value, will divide the numeric values into the `bin` groups. If more than one value, will bin numeric values using values as a cut point.} \item{binLabel}{Character vector. Labels for the bins created by the `bin` parameter. Default NULL.} \item{defaultTheme}{Removes grid in plot and sets axis title size to 10 when TRUE. Default TRUE.} \item{dotSize}{Size of dots. Default 0.5.} \item{summary}{Adds a summary statistic, as well as a crossbar to the violin plot. Options are "mean" or "median". Default NULL.} \item{summaryTextSize}{The text size of the summary statistic displayed above the violin plot. Default 3.} \item{transparency}{Transparency of the dots, values will be 0-1. Default 1.} \item{baseSize}{The base font size for all text. Default 12. Can be overwritten by titleSize, axisSize, and axisLabelSize, legendSize, legendTitleSize.} \item{titleSize}{Size of title of plot. Default NULL.} \item{axisLabelSize}{Size of x/y-axis labels. Default NULL.} \item{axisSize}{Size of x/y-axis ticks. Default NULL.} \item{legendSize}{size of legend. Default NULL.} \item{legendTitleSize}{size of legend title. Default NULL.} \item{relHeights}{Relative heights of plots when combine is set.} \item{relWidths}{Relative widths of plots when combine is set.} \item{plotNCols}{Number of columns when plots are combined in a grid.} \item{plotNRows}{Number of rows when plots are combined in a grid.} \item{labelSamples}{Will label sample name in title of plot if TRUE. Default TRUE.} \item{samplePerColumn}{If TRUE, when there are multiple samples and combining by "all", the output .ggplot will have plots from each sample on a single column. Default TRUE.} \item{sampleRelHeights}{If there are multiple samples and combining by "all", the relative heights for each plot.} \item{sampleRelWidths}{If there are multiple samples and combining by "all", the relative widths for each plot.} } \value{ list of .ggplot objects } \description{ A wrapper function which visualizes outputs from the runScDblFinder function stored in the colData slot of the SingleCellExperiment object via various plots. } \examples{ data(scExample, package="singleCellTK") sce <- subsetSCECols(sce, colData = "type != 'EmptyDroplet'") sce <- getUMAP(inSCE=sce, useAssay="counts", reducedDimName="UMAP") sce <- runScDblFinder(sce) plotScDblFinderResults(inSCE=sce, reducedDimName="UMAP") }
/plot3.R
no_license
vinoth61/exdata-data-NEI_data
R
false
false
865
r
# The MIT License (MIT) # Copyright (c) 2017 Louise AC Millard, MRC Integrative Epidemiology Unit, University of Bristol # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated # documentation files (the "Software"), to deal in the Software without restriction, including without # limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of # the Software, and to permit persons to whom the Software is furnished to do so, subject to the following # conditions: # # The above copyright notice and this permission notice shall be included in all copies or substantial portions # of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED # TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF # CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # looks up categorical multiple field in the variable info file, return # whether field has YES in TRAIT_OF_INTEREST column (i.e. all values in # this field denote the exposure), or whether varName has varValue stated # as a trait of interest in the TRAIT_OF_INTEREST column (multiple values are # separated by "|" in this field getIsCatMultExposure <- function(varName, varValue) { # get row index of field in variable information file idx=which(vl$phenoInfo$FieldID==varName) # may be empty of may contain VALUE1|VALUE2 etc .. to denote those # cat mult values denoting exposure variable isExposure = vl$phenoInfo$TRAIT_OF_INTEREST[idx] if (!is.na(isExposure) & isExposure!="") { isExposure = as.character(isExposure) ## first check if value is YES, then all values are exposure traits if (isExposure == "YES") { cat("IS_CM_ALL_EXPOSURE || ") return(TRUE) } ## try to split by |, to set particular values as exposure # split into variable Values exposureValues = unlist(strsplit(isExposure,"\\|")) # for each value stated, check whether it is varValue for (thisVal in exposureValues) { if (thisVal == varValue) { cat("IS_CM_EXPOSURE || ") return(TRUE) } } } # varValue is not in list of exposure values return(FALSE) }
/WAS/getIsCatMultExposure.r
permissive
MRCIEU/PHESANT
R
false
false
2,476
r
# The MIT License (MIT) # Copyright (c) 2017 Louise AC Millard, MRC Integrative Epidemiology Unit, University of Bristol # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated # documentation files (the "Software"), to deal in the Software without restriction, including without # limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of # the Software, and to permit persons to whom the Software is furnished to do so, subject to the following # conditions: # # The above copyright notice and this permission notice shall be included in all copies or substantial portions # of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED # TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF # CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # looks up categorical multiple field in the variable info file, return # whether field has YES in TRAIT_OF_INTEREST column (i.e. all values in # this field denote the exposure), or whether varName has varValue stated # as a trait of interest in the TRAIT_OF_INTEREST column (multiple values are # separated by "|" in this field getIsCatMultExposure <- function(varName, varValue) { # get row index of field in variable information file idx=which(vl$phenoInfo$FieldID==varName) # may be empty of may contain VALUE1|VALUE2 etc .. to denote those # cat mult values denoting exposure variable isExposure = vl$phenoInfo$TRAIT_OF_INTEREST[idx] if (!is.na(isExposure) & isExposure!="") { isExposure = as.character(isExposure) ## first check if value is YES, then all values are exposure traits if (isExposure == "YES") { cat("IS_CM_ALL_EXPOSURE || ") return(TRUE) } ## try to split by |, to set particular values as exposure # split into variable Values exposureValues = unlist(strsplit(isExposure,"\\|")) # for each value stated, check whether it is varValue for (thisVal in exposureValues) { if (thisVal == varValue) { cat("IS_CM_EXPOSURE || ") return(TRUE) } } } # varValue is not in list of exposure values return(FALSE) }
FunctionReliabilityParameter<-function(x,vmuX,vmuY,vgammaX,vgammaY,valphaX,valphaY) { vmu1<-vmuX vmu2<-vmuY vgamma1<-vgammaX^valphaX vgamma2<-vgammaY^valphaY valpha1<-valphaX valpha2<-valphaY zX<-(1/vgamma1)*(x-vmu1)^valpha1 zY<-(1/vgamma2)*(x-vmu2)^valpha2 fx<-(valpha1/vgamma1)*((x-vmu1)^(valpha1-1))*exp(-zX) Fy<-1-exp(-zY) fobjective<-Fy*fx return(fobjective) }
/R_Programs_Discretized_Likelihood/03BootstrapApproach/FunctionReliabilityParameter.R
no_license
gudeliafp/Programs-Discretized-Likelihood
R
false
false
389
r
FunctionReliabilityParameter<-function(x,vmuX,vmuY,vgammaX,vgammaY,valphaX,valphaY) { vmu1<-vmuX vmu2<-vmuY vgamma1<-vgammaX^valphaX vgamma2<-vgammaY^valphaY valpha1<-valphaX valpha2<-valphaY zX<-(1/vgamma1)*(x-vmu1)^valpha1 zY<-(1/vgamma2)*(x-vmu2)^valpha2 fx<-(valpha1/vgamma1)*((x-vmu1)^(valpha1-1))*exp(-zX) Fy<-1-exp(-zY) fobjective<-Fy*fx return(fobjective) }
yahooWebMA <- function(appid, sentence, results="ma", response="surface,reading,pos", filter="", ma_response="", ma_filter="", uniq_response="", uniq_filter="", uniq_by_baseform=""){ search.url <- "http://jlp.yahooapis.jp/MAService/V1/parse" request.url <- paste(search.url, "?appid=", appid, "&sentence=", URLencode(sentence), "&results=", results, "&response", response, "&filter=", filter, "&ma_response=", ma_response, "&ma_filter=", ma_filter, "&uniq_response=", uniq_response, "&uniq_filter=", uniq_filter, "&uniq_by_baseform=", uniq_by_baseform, sep="") r <- try( xmlToList(xmlTreeParse(request.url)) , TRUE) r }
/R/yahooWebMA.R
no_license
cran/RWebMA
R
false
false
699
r
yahooWebMA <- function(appid, sentence, results="ma", response="surface,reading,pos", filter="", ma_response="", ma_filter="", uniq_response="", uniq_filter="", uniq_by_baseform=""){ search.url <- "http://jlp.yahooapis.jp/MAService/V1/parse" request.url <- paste(search.url, "?appid=", appid, "&sentence=", URLencode(sentence), "&results=", results, "&response", response, "&filter=", filter, "&ma_response=", ma_response, "&ma_filter=", ma_filter, "&uniq_response=", uniq_response, "&uniq_filter=", uniq_filter, "&uniq_by_baseform=", uniq_by_baseform, sep="") r <- try( xmlToList(xmlTreeParse(request.url)) , TRUE) r }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/PlotFunctions.R \name{plotMap} \alias{plotMap} \title{Plot map} \usage{ plotMap(data, PCx, PCy) } \description{ Plot map }
/popgen-R/popgen-0.1.0/man/plotMap.Rd
no_license
ZSI-Bio/popgen
R
false
true
201
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/PlotFunctions.R \name{plotMap} \alias{plotMap} \title{Plot map} \usage{ plotMap(data, PCx, PCy) } \description{ Plot map }
library(ape) testtree <- read.tree("3873_0.txt") unrooted_tr <- unroot(testtree) write.tree(unrooted_tr, file="3873_0_unrooted.txt")
/codeml_files/newick_trees_processed/3873_0/rinput.R
no_license
DaniBoo/cyanobacteria_project
R
false
false
135
r
library(ape) testtree <- read.tree("3873_0.txt") unrooted_tr <- unroot(testtree) write.tree(unrooted_tr, file="3873_0_unrooted.txt")
\name{cpath} \alias{cpath} %- Also NEED an '\alias' for EACH other topic documented here. \title{ internal function } \description{ internal function } %\usage{ %cpath(x, values = FALSE, collapse = TRUE, directed, bonds, sel, sep, lb2lb) %} % \keyword{internal}
/man/cpath.Rd
no_license
mplex/multiplex
R
false
false
276
rd
\name{cpath} \alias{cpath} %- Also NEED an '\alias' for EACH other topic documented here. \title{ internal function } \description{ internal function } %\usage{ %cpath(x, values = FALSE, collapse = TRUE, directed, bonds, sel, sep, lb2lb) %} % \keyword{internal}
#' @title Sparse canonical correlations for symptom mapping #' #' @description #' Multivariate SCCAN adapted for lesion to symptom mapping purposes. #' By default an optimization routine is used to find the best #' \code{sparseness} value. If you specify sparseness manually, it #' will be validated to find the cross-validated correlation that #' can be obtained with that sparseness. You can skip the entire #' optimization/validation by choosing \code{optimizeSparseness=FALSE}. #' To understand SCCAN arguments, see \code{\link[ANTsR]{sparseDecom2}}. #' #' @param lesmat matrix of voxels (columns) and subjects (rows). #' @param behavior vector of behavioral scores. #' @param mask antsImage binary mask to put back voxels in image. #' @param optimizeSparseness logical (default=TRUE) whether to #' run the sparseness optimization routine. If FALSE, the default #' sparseness value will be used. If sparseness is manually defined #' this flag decides if cross validated correlations will be #' computed for the defined sparseness. #' @param validateSparseness logical (conditional default=TRUE) If #' sparseness is manually defined, this flag decides if cross #' validated correlations will be computed for the defined sparseness. #' @param pThreshold (default=0.05) If cross validated #' correlations show significance below this value #' the results are considered null and an empty #' map is returned. #' @param showInfo logical (default=TRUE) display messages #' @param sparseness (default=1) SCCAN parameter. Decides the proportion #' of voxels that will receive a non-zero weight. A positive sparseness #' will force the solution of each component to be one sided, i.e., #' voxels cannot have both positive and negative weights. A negative #' sparseness allows dual sided solution, where some voxels can have #' positive weights and other voxels can have negative weights. Setting #' sparseness manually without running the optimization routing is not #' recommended. For more, see \code{\link[ANTsR]{sparseDecom2}}. #' @param sparseness.behav SCCAN parameter, what sparsness to use for #' behavioral scores. Useful only if multiple behavioral scores are #' passed. This argument is not optimized, you should not change it #' if you are not familiar with SCCAN. #' @param mycoption (default=1) SCCAN parameter, see \code{\link[ANTsR]{sparseDecom2}} #' @param robust (ddefault=1) SCCAN parameter, see \code{\link[ANTsR]{sparseDecom2}} #' @param nvecs (default=1) SCCAN parameter. Normally only #' one eigenvector of weights is obtained in LESYMAP. Multiple #' maps/eigenvectors can be retrieved for mapping full #' deficit profiles in the future. For more, see #' \code{\link[ANTsR]{sparseDecom2}} #' @param cthresh (default=150) SCCAN parameter, see \code{\link[ANTsR]{sparseDecom2}} #' @param its (default=20) SCCAN parameter, see \code{\link[ANTsR]{sparseDecom2}} #' @param smooth (default=0.4) SCCAN parameter. Determines the #' amount of smoothing of weights in image space performed by #' \code{\link[ANTsR]{sparseDecom2}}. The current default value #' is somewhat arbitrary, it was not determined through #' systematic simulations. #' @param npermsSCCAN (default=0) SCCAN permutations. In theory can be #' used to determine if the cross-correlation between the two sides #' (behavior and lesions) is not random. However, LESYMAP uses #' k-fold validations, which are faster; this option has not been #' tested. For more, see \code{\link[ANTsR]{sparseDecom2}}. #' @param maxBased (default=FALSE) SCCAN parameter. Removes voxels with #' weights smaller than 10\% of the peak weight during internal SCCAN #' iterations. Although similar to what is done in LESYMAP with standard #' SCCAN results, this strategy follows a different route, and produces #' different weights. The overall final result is, however, quite similar. #' This method is faster then the standard SCCAN call in LESYMAP, but #' has not been tested thoroughly. Note that the optimal sparseness #' obtained with \code{maxBased=TRUE} is not optimal when switching to #' \code{maxBased=FALSE}. #' @param directionalSCCAN (default=TRUE) If TRUE, the upper and lower #' bounds of sparseness search will be negative. A negative sparseness #' permits positive and negative voxel weights, thus finding the #' direction of the relationship with behavior. #' @param ... other arguments received from \code{\link{lesymap}}. #' #' @return #' List of objects returned: #' \itemize{ #' \item\code{statistic} - vector of statistical values #' \item\code{pvalue} - vector of pvalues #' \item\code{rawWeights.img} - image with raw SCCAN voxel weights #' \item\code{sccan.eig2} - SCCAN weight(s) for behavior #' column(s). #' \item\code{sccan.ccasummary} - SCCAN summary of #' projection correlations and permutation-derived pvalues #' \item\code{optimalSparseness} - (if optimizeSparseness=TRUE) optimal #' value found for sparseness #' \item\code{CVcorrelation.stat} - (if optimizeSparseness=TRUE) #' Correlation between true and predicted score with k-fold validation #' using the optimal sparseness value #' \item\code{CVcorrelation.pval} - (if optimizeSparseness=TRUE) p-value #' of the above correlation #' \item\code{sccan.behavior.scaleval} - scaling value for behavior #' \item\code{sccan.behavior.centerval} - center value for behavior #' \item\code{sccan.lesmat.scaleval} - scaling value for lesion matrix #' \item\code{sccan.lesmat.centerval} - center value for lesion matrix #' } #' #' @examples{ #' \dontrun{ #' lesydata = file.path(find.package('LESYMAP'),'extdata') #' filenames = Sys.glob(file.path(lesydata, 'lesions', '*.nii.gz')) #' behavior = Sys.glob(file.path(lesydata, 'behavior', 'behavior.txt')) #' behavior = read.table(behavior,header=FALSE)[,1] #' avg = antsAverageImages(filenames) #' mask = thresholdImage(avg, 0.1, Inf) #' lesmat = imagesToMatrix(filenames,mask) #' result = lsm_sccan(lesmat, behavior, #' optimizeSparseness=F, sparseness=0.8, mask = mask) #' } #' } #' #' @author Dorian Pustina #' #' @export lsm_sccan <- function(lesmat, behavior, mask, showInfo=TRUE, optimizeSparseness = TRUE, validateSparseness=FALSE, pThreshold=0.05, mycoption=1, robust=1, sparseness=0.045, sparseness.behav = -0.99, nvecs=1, cthresh=150, its=20, npermsSCCAN=0, smooth=0.4, maxBased=FALSE, directionalSCCAN=TRUE, ...) { sparseness = c( sparseness, sparseness.behav ) cthresh = c(cthresh,0) # scale and center data behavior.orig = behavior behavior = scale(behavior, scale=T, center=T) lesmat = scale(lesmat, scale=T, center=T) # prepare data inmats=list(lesmat,as.matrix(behavior)) sccan.masks=c(mask,NA) # check if user specified sparseness if ('sparseness' %in% names(match.call())) { optimizeSparseness = FALSE if (!('validateSparseness' %in% names(match.call()))) { validateSparseness = TRUE } } if (optimizeSparseness | validateSparseness) { sparse.optim = optimize_SCCANsparseness(lesmat = lesmat, behavior = behavior, mask=mask, cthresh=cthresh, mycoption=mycoption, robust=robust, nvecs=nvecs, its=its, npermsSCCAN=npermsSCCAN, smooth=smooth, sparseness.behav=sparseness.behav, showInfo=showInfo, maxBased=maxBased, sparseness=sparseness[1], justValidate=validateSparseness, directionalSCCAN=directionalSCCAN, ...) sparseness = c(sparse.optim$minimum, sparseness.behav) CVcorrelation.stat = sparse.optim$CVcorrelation.stat r = abs(CVcorrelation.stat) n = length(behavior) tstat = (r*sqrt(n-2))/(sqrt(1 - r^2)) CVcorrelation.pval = pt(-abs(tstat), n-2)*2 CVcorrelation.pval = ifelse(CVcorrelation.pval<1, CVcorrelation.pval, 1) # to fix p > 1 if (showInfo & !validateSparseness) { msg = paste0('\n Found optimal sparsenes ', round(sparseness[1],3), ' (CV corr=', round(CVcorrelation.stat,3), ' p=', format(CVcorrelation.pval, digits=3), ')') printInfo(msg, type='middle') } if (showInfo & validateSparseness) { msg = paste0('\n Validated sparseness ', round(sparseness[1],3), ' (CV corr=', round(CVcorrelation.stat,3), ' p=', format(CVcorrelation.pval, digits=3), ')') printInfo(msg, type='middle') } # if poor result, end it here if (CVcorrelation.pval > pThreshold) { if (showInfo) printInfo('\n WARNING: Poor cross-validated accuracy, returning NULL result.', type='middle') return(list(statistic=rep(0,ncol(lesmat)), pvalue=rep(1,ncol(lesmat)), optimalSparseness = sparse.optim$minimum, CVcorrelation.stat= CVcorrelation.stat, CVcorrelation.pval= CVcorrelation.pval)) } } if (showInfo) { printInfo(paste('\n Calling SCCAN with:')) printInfo(paste('\n Components:\t\t', nvecs), type='middle') printInfo(paste('\n Use ranks:\t\t', robust), type='middle') printInfo(paste('\n Sparseness:\t\t', round(sparseness[1], 3)), type='middle') printInfo(paste('\n Cluster threshold:\t', cthresh[1]), type='middle') printInfo(paste('\n Smooth sigma:\t', smooth), type='middle') printInfo(paste('\n Iterations:\t\t', its), type='middle') printInfo(paste('\n maxBased:\t\t', maxBased), type='middle') printInfo(paste('\n directionalSCCAN:\t', directionalSCCAN), type='middle') printInfo(paste('\n optimizeSparseness:\t', optimizeSparseness), type='middle') printInfo(paste('\n validateSparseness:\t', validateSparseness), type='middle') } sccan = sparseDecom2( inmats,inmask=sccan.masks, mycoption=mycoption, robust=robust, sparseness=sparseness, nvecs=nvecs, cthresh=cthresh,its=its, perms=npermsSCCAN, smooth=smooth, maxBased=maxBased) # normalize values to 1 or -1 statistic = sccan$eig1 / max(abs(sccan$eig1)) # flip weights if necessary if (directionalSCCAN) { posbehav = ifelse(sccan$eig2[1,1] < 0, -1, 1) poscor = ifelse(sccan$ccasummary$corrs[1] < 0, -1, 1) flipval = posbehav * poscor statistic = statistic * flipval } else { statistic = abs(statistic) } # shave away weights < 0.1, not needed for maxBased because they are removed in sparseDecom2 if (!maxBased) statistic[statistic < 0.1 & statistic > -0.1] = 0 # eleminate small clusters # placed on purpose after 0.1 thresholding to remove # remaining small leftover clusters temp = makeImage(mask,statistic) # put stat in image tempclust = labelClusters(abs(temp), minClusterSize = cthresh, minThresh = .Machine$double.eps, maxThresh=Inf) temp = temp * thresholdImage(tempclust, .Machine$double.eps, Inf) statistic = imageListToMatrix(list(temp), mask)[1,] if (showInfo & sum(statistic!=0) == 0) printInfo('\n WARNING: Post-sccan cluster thresholding removed all voxels.', type='middle') output = list(statistic=statistic) output$rawWeights.img = makeImage(mask,sccan$eig1) output$sccan.eig2 = sccan$eig2 output$sccan.ccasummary = sccan$ccasummary # needed later to rescale for prediction purposes output$sccan.behavior.scaleval = attr(behavior, 'scaled:scale') output$sccan.behavior.centerval = attr(behavior, 'scaled:center') output$sccan.lesmat.scaleval = attr(lesmat, 'scaled:scale') output$sccan.lesmat.centerval = attr(lesmat, 'scaled:center') # regression model to backproject to behavior original predbehav = lesmat %*% t(sccan$eig1) %*% sccan$eig2 predbehav.raw = predbehav * output$sccan.behavior.scaleval + output$sccan.behavior.centerval output$sccan.predictlm = lm(behavior.orig ~ predbehav.raw, data = data.frame(behavior.orig=behavior.orig, predbehav.raw=predbehav.raw)) if (optimizeSparseness) { output$optimalSparseness = sparse.optim$minimum output$CVcorrelation.stat = CVcorrelation.stat output$CVcorrelation.pval = CVcorrelation.pval } return(output) }
/R/lsm_sccan.R
permissive
mbowren/LESYMAP
R
false
false
12,723
r
#' @title Sparse canonical correlations for symptom mapping #' #' @description #' Multivariate SCCAN adapted for lesion to symptom mapping purposes. #' By default an optimization routine is used to find the best #' \code{sparseness} value. If you specify sparseness manually, it #' will be validated to find the cross-validated correlation that #' can be obtained with that sparseness. You can skip the entire #' optimization/validation by choosing \code{optimizeSparseness=FALSE}. #' To understand SCCAN arguments, see \code{\link[ANTsR]{sparseDecom2}}. #' #' @param lesmat matrix of voxels (columns) and subjects (rows). #' @param behavior vector of behavioral scores. #' @param mask antsImage binary mask to put back voxels in image. #' @param optimizeSparseness logical (default=TRUE) whether to #' run the sparseness optimization routine. If FALSE, the default #' sparseness value will be used. If sparseness is manually defined #' this flag decides if cross validated correlations will be #' computed for the defined sparseness. #' @param validateSparseness logical (conditional default=TRUE) If #' sparseness is manually defined, this flag decides if cross #' validated correlations will be computed for the defined sparseness. #' @param pThreshold (default=0.05) If cross validated #' correlations show significance below this value #' the results are considered null and an empty #' map is returned. #' @param showInfo logical (default=TRUE) display messages #' @param sparseness (default=1) SCCAN parameter. Decides the proportion #' of voxels that will receive a non-zero weight. A positive sparseness #' will force the solution of each component to be one sided, i.e., #' voxels cannot have both positive and negative weights. A negative #' sparseness allows dual sided solution, where some voxels can have #' positive weights and other voxels can have negative weights. Setting #' sparseness manually without running the optimization routing is not #' recommended. For more, see \code{\link[ANTsR]{sparseDecom2}}. #' @param sparseness.behav SCCAN parameter, what sparsness to use for #' behavioral scores. Useful only if multiple behavioral scores are #' passed. This argument is not optimized, you should not change it #' if you are not familiar with SCCAN. #' @param mycoption (default=1) SCCAN parameter, see \code{\link[ANTsR]{sparseDecom2}} #' @param robust (ddefault=1) SCCAN parameter, see \code{\link[ANTsR]{sparseDecom2}} #' @param nvecs (default=1) SCCAN parameter. Normally only #' one eigenvector of weights is obtained in LESYMAP. Multiple #' maps/eigenvectors can be retrieved for mapping full #' deficit profiles in the future. For more, see #' \code{\link[ANTsR]{sparseDecom2}} #' @param cthresh (default=150) SCCAN parameter, see \code{\link[ANTsR]{sparseDecom2}} #' @param its (default=20) SCCAN parameter, see \code{\link[ANTsR]{sparseDecom2}} #' @param smooth (default=0.4) SCCAN parameter. Determines the #' amount of smoothing of weights in image space performed by #' \code{\link[ANTsR]{sparseDecom2}}. The current default value #' is somewhat arbitrary, it was not determined through #' systematic simulations. #' @param npermsSCCAN (default=0) SCCAN permutations. In theory can be #' used to determine if the cross-correlation between the two sides #' (behavior and lesions) is not random. However, LESYMAP uses #' k-fold validations, which are faster; this option has not been #' tested. For more, see \code{\link[ANTsR]{sparseDecom2}}. #' @param maxBased (default=FALSE) SCCAN parameter. Removes voxels with #' weights smaller than 10\% of the peak weight during internal SCCAN #' iterations. Although similar to what is done in LESYMAP with standard #' SCCAN results, this strategy follows a different route, and produces #' different weights. The overall final result is, however, quite similar. #' This method is faster then the standard SCCAN call in LESYMAP, but #' has not been tested thoroughly. Note that the optimal sparseness #' obtained with \code{maxBased=TRUE} is not optimal when switching to #' \code{maxBased=FALSE}. #' @param directionalSCCAN (default=TRUE) If TRUE, the upper and lower #' bounds of sparseness search will be negative. A negative sparseness #' permits positive and negative voxel weights, thus finding the #' direction of the relationship with behavior. #' @param ... other arguments received from \code{\link{lesymap}}. #' #' @return #' List of objects returned: #' \itemize{ #' \item\code{statistic} - vector of statistical values #' \item\code{pvalue} - vector of pvalues #' \item\code{rawWeights.img} - image with raw SCCAN voxel weights #' \item\code{sccan.eig2} - SCCAN weight(s) for behavior #' column(s). #' \item\code{sccan.ccasummary} - SCCAN summary of #' projection correlations and permutation-derived pvalues #' \item\code{optimalSparseness} - (if optimizeSparseness=TRUE) optimal #' value found for sparseness #' \item\code{CVcorrelation.stat} - (if optimizeSparseness=TRUE) #' Correlation between true and predicted score with k-fold validation #' using the optimal sparseness value #' \item\code{CVcorrelation.pval} - (if optimizeSparseness=TRUE) p-value #' of the above correlation #' \item\code{sccan.behavior.scaleval} - scaling value for behavior #' \item\code{sccan.behavior.centerval} - center value for behavior #' \item\code{sccan.lesmat.scaleval} - scaling value for lesion matrix #' \item\code{sccan.lesmat.centerval} - center value for lesion matrix #' } #' #' @examples{ #' \dontrun{ #' lesydata = file.path(find.package('LESYMAP'),'extdata') #' filenames = Sys.glob(file.path(lesydata, 'lesions', '*.nii.gz')) #' behavior = Sys.glob(file.path(lesydata, 'behavior', 'behavior.txt')) #' behavior = read.table(behavior,header=FALSE)[,1] #' avg = antsAverageImages(filenames) #' mask = thresholdImage(avg, 0.1, Inf) #' lesmat = imagesToMatrix(filenames,mask) #' result = lsm_sccan(lesmat, behavior, #' optimizeSparseness=F, sparseness=0.8, mask = mask) #' } #' } #' #' @author Dorian Pustina #' #' @export lsm_sccan <- function(lesmat, behavior, mask, showInfo=TRUE, optimizeSparseness = TRUE, validateSparseness=FALSE, pThreshold=0.05, mycoption=1, robust=1, sparseness=0.045, sparseness.behav = -0.99, nvecs=1, cthresh=150, its=20, npermsSCCAN=0, smooth=0.4, maxBased=FALSE, directionalSCCAN=TRUE, ...) { sparseness = c( sparseness, sparseness.behav ) cthresh = c(cthresh,0) # scale and center data behavior.orig = behavior behavior = scale(behavior, scale=T, center=T) lesmat = scale(lesmat, scale=T, center=T) # prepare data inmats=list(lesmat,as.matrix(behavior)) sccan.masks=c(mask,NA) # check if user specified sparseness if ('sparseness' %in% names(match.call())) { optimizeSparseness = FALSE if (!('validateSparseness' %in% names(match.call()))) { validateSparseness = TRUE } } if (optimizeSparseness | validateSparseness) { sparse.optim = optimize_SCCANsparseness(lesmat = lesmat, behavior = behavior, mask=mask, cthresh=cthresh, mycoption=mycoption, robust=robust, nvecs=nvecs, its=its, npermsSCCAN=npermsSCCAN, smooth=smooth, sparseness.behav=sparseness.behav, showInfo=showInfo, maxBased=maxBased, sparseness=sparseness[1], justValidate=validateSparseness, directionalSCCAN=directionalSCCAN, ...) sparseness = c(sparse.optim$minimum, sparseness.behav) CVcorrelation.stat = sparse.optim$CVcorrelation.stat r = abs(CVcorrelation.stat) n = length(behavior) tstat = (r*sqrt(n-2))/(sqrt(1 - r^2)) CVcorrelation.pval = pt(-abs(tstat), n-2)*2 CVcorrelation.pval = ifelse(CVcorrelation.pval<1, CVcorrelation.pval, 1) # to fix p > 1 if (showInfo & !validateSparseness) { msg = paste0('\n Found optimal sparsenes ', round(sparseness[1],3), ' (CV corr=', round(CVcorrelation.stat,3), ' p=', format(CVcorrelation.pval, digits=3), ')') printInfo(msg, type='middle') } if (showInfo & validateSparseness) { msg = paste0('\n Validated sparseness ', round(sparseness[1],3), ' (CV corr=', round(CVcorrelation.stat,3), ' p=', format(CVcorrelation.pval, digits=3), ')') printInfo(msg, type='middle') } # if poor result, end it here if (CVcorrelation.pval > pThreshold) { if (showInfo) printInfo('\n WARNING: Poor cross-validated accuracy, returning NULL result.', type='middle') return(list(statistic=rep(0,ncol(lesmat)), pvalue=rep(1,ncol(lesmat)), optimalSparseness = sparse.optim$minimum, CVcorrelation.stat= CVcorrelation.stat, CVcorrelation.pval= CVcorrelation.pval)) } } if (showInfo) { printInfo(paste('\n Calling SCCAN with:')) printInfo(paste('\n Components:\t\t', nvecs), type='middle') printInfo(paste('\n Use ranks:\t\t', robust), type='middle') printInfo(paste('\n Sparseness:\t\t', round(sparseness[1], 3)), type='middle') printInfo(paste('\n Cluster threshold:\t', cthresh[1]), type='middle') printInfo(paste('\n Smooth sigma:\t', smooth), type='middle') printInfo(paste('\n Iterations:\t\t', its), type='middle') printInfo(paste('\n maxBased:\t\t', maxBased), type='middle') printInfo(paste('\n directionalSCCAN:\t', directionalSCCAN), type='middle') printInfo(paste('\n optimizeSparseness:\t', optimizeSparseness), type='middle') printInfo(paste('\n validateSparseness:\t', validateSparseness), type='middle') } sccan = sparseDecom2( inmats,inmask=sccan.masks, mycoption=mycoption, robust=robust, sparseness=sparseness, nvecs=nvecs, cthresh=cthresh,its=its, perms=npermsSCCAN, smooth=smooth, maxBased=maxBased) # normalize values to 1 or -1 statistic = sccan$eig1 / max(abs(sccan$eig1)) # flip weights if necessary if (directionalSCCAN) { posbehav = ifelse(sccan$eig2[1,1] < 0, -1, 1) poscor = ifelse(sccan$ccasummary$corrs[1] < 0, -1, 1) flipval = posbehav * poscor statistic = statistic * flipval } else { statistic = abs(statistic) } # shave away weights < 0.1, not needed for maxBased because they are removed in sparseDecom2 if (!maxBased) statistic[statistic < 0.1 & statistic > -0.1] = 0 # eleminate small clusters # placed on purpose after 0.1 thresholding to remove # remaining small leftover clusters temp = makeImage(mask,statistic) # put stat in image tempclust = labelClusters(abs(temp), minClusterSize = cthresh, minThresh = .Machine$double.eps, maxThresh=Inf) temp = temp * thresholdImage(tempclust, .Machine$double.eps, Inf) statistic = imageListToMatrix(list(temp), mask)[1,] if (showInfo & sum(statistic!=0) == 0) printInfo('\n WARNING: Post-sccan cluster thresholding removed all voxels.', type='middle') output = list(statistic=statistic) output$rawWeights.img = makeImage(mask,sccan$eig1) output$sccan.eig2 = sccan$eig2 output$sccan.ccasummary = sccan$ccasummary # needed later to rescale for prediction purposes output$sccan.behavior.scaleval = attr(behavior, 'scaled:scale') output$sccan.behavior.centerval = attr(behavior, 'scaled:center') output$sccan.lesmat.scaleval = attr(lesmat, 'scaled:scale') output$sccan.lesmat.centerval = attr(lesmat, 'scaled:center') # regression model to backproject to behavior original predbehav = lesmat %*% t(sccan$eig1) %*% sccan$eig2 predbehav.raw = predbehav * output$sccan.behavior.scaleval + output$sccan.behavior.centerval output$sccan.predictlm = lm(behavior.orig ~ predbehav.raw, data = data.frame(behavior.orig=behavior.orig, predbehav.raw=predbehav.raw)) if (optimizeSparseness) { output$optimalSparseness = sparse.optim$minimum output$CVcorrelation.stat = CVcorrelation.stat output$CVcorrelation.pval = CVcorrelation.pval } return(output) }
#My first R command print("Good Morning") #variable names <- "jane" price <- 3.99 print(price) ls ls() rm(price) ls() mass <- 47.5 age<- 122 mass<-mass*2.3 age<-age-20 height<-height+20 mass<- 64 sqrt(64) res <- sqrt(mass) print(res) getwd() hepl(print) help(print) ?print #install a package installed.packages("knitr") library(knitr) score<- 79 typeof(score) score<-79L typeof(score) score<-79 is.integer(score) typeof(is.integer(score)) v<- c(4,5,6) v<-c(1:3,45) length(v) str(v) head(v,n=2) tail(v,n=2) #manipulate the vector v<-c(v,56) str(v) m<-matrix(c(1:18),3,6) m dim(m) cont<-factor(c("asia","europe","america","africa","oceania")) str(cont) l<-list("Afghanistan",1952,88787) print(l) typeof(L) typeof(l) str(l) menuitems<-c("chicken", "salad","soup","drink") menutypes<-c("solid","liquid","solid","liquid") menucost<-c(4.99,2.99,3.29,1.89) myorder<-list(menuitems,menutypes,menucost) print(myorder) myorder_df<-data.frame(menutypes,menutypes,menucost) print(myorder_df) v<- c(2:10) v[3] v[c(3:6)] v[-c(3:6)] myorder[1] myorder_df[1:3,] myorder_df x<-myorder_df$menutypes.1 x v<-c(1,5,3,4,5) v1<-v[v==5] v1 v==5 df1<-myorder_df[myorder_df$menutypes=="solid", ] df1 df2<-myorder_df[myorder_df$menucost>3, ] df2 data<-read.table("gapminder.txt", header = TRUE) head(data) str(data) typeof(data) x1<-data[,c(3,5)] head(x1) data[data$country=="Sweden",] unique(data[,1]) head(data) x2<-data[data$lifeExp<70,] head(x2)
/Lesson2_IntroToR/R_commands.R
no_license
Nandezsendo/scw
R
false
false
1,431
r
#My first R command print("Good Morning") #variable names <- "jane" price <- 3.99 print(price) ls ls() rm(price) ls() mass <- 47.5 age<- 122 mass<-mass*2.3 age<-age-20 height<-height+20 mass<- 64 sqrt(64) res <- sqrt(mass) print(res) getwd() hepl(print) help(print) ?print #install a package installed.packages("knitr") library(knitr) score<- 79 typeof(score) score<-79L typeof(score) score<-79 is.integer(score) typeof(is.integer(score)) v<- c(4,5,6) v<-c(1:3,45) length(v) str(v) head(v,n=2) tail(v,n=2) #manipulate the vector v<-c(v,56) str(v) m<-matrix(c(1:18),3,6) m dim(m) cont<-factor(c("asia","europe","america","africa","oceania")) str(cont) l<-list("Afghanistan",1952,88787) print(l) typeof(L) typeof(l) str(l) menuitems<-c("chicken", "salad","soup","drink") menutypes<-c("solid","liquid","solid","liquid") menucost<-c(4.99,2.99,3.29,1.89) myorder<-list(menuitems,menutypes,menucost) print(myorder) myorder_df<-data.frame(menutypes,menutypes,menucost) print(myorder_df) v<- c(2:10) v[3] v[c(3:6)] v[-c(3:6)] myorder[1] myorder_df[1:3,] myorder_df x<-myorder_df$menutypes.1 x v<-c(1,5,3,4,5) v1<-v[v==5] v1 v==5 df1<-myorder_df[myorder_df$menutypes=="solid", ] df1 df2<-myorder_df[myorder_df$menucost>3, ] df2 data<-read.table("gapminder.txt", header = TRUE) head(data) str(data) typeof(data) x1<-data[,c(3,5)] head(x1) data[data$country=="Sweden",] unique(data[,1]) head(data) x2<-data[data$lifeExp<70,] head(x2)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fars_functions.R \name{fars_read} \alias{fars_read} \title{Reads a data file and creates a dataframe with dplyr.} \usage{ fars_read(filename) } \arguments{ \item{filename}{input character vector} } \value{ dplyr::tbl_df by reading the whole file } \description{ Reads a data file and creates a dataframe with dplyr. }
/man/fars_read.Rd
no_license
leofranco/fars
R
false
true
396
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fars_functions.R \name{fars_read} \alias{fars_read} \title{Reads a data file and creates a dataframe with dplyr.} \usage{ fars_read(filename) } \arguments{ \item{filename}{input character vector} } \value{ dplyr::tbl_df by reading the whole file } \description{ Reads a data file and creates a dataframe with dplyr. }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/function_prepare.R \name{prep_segm} \alias{prep_segm} \title{Find segment and states for a Picard model} \usage{ prep_segm(data, param, seg.type = NULL, nseg = NULL) } \arguments{ \item{data}{the data.frame with the different variable} \item{param}{the param output of the segmentation} \item{seg.type}{either 'hybrid' or 'dynprog'} \item{nseg}{number of segment chosen} } \value{ a data.frame with states of the different segments } \description{ \code{prep_segm_picard} find the different segment and states of a given HMM model } \examples{ \dontrun{prep_segm_picard(data, picard.param, picard.type = 'hybrid', picard.nseg=NULL)} }
/man/prep_segm.Rd
no_license
cynsky/segtools
R
false
true
716
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/function_prepare.R \name{prep_segm} \alias{prep_segm} \title{Find segment and states for a Picard model} \usage{ prep_segm(data, param, seg.type = NULL, nseg = NULL) } \arguments{ \item{data}{the data.frame with the different variable} \item{param}{the param output of the segmentation} \item{seg.type}{either 'hybrid' or 'dynprog'} \item{nseg}{number of segment chosen} } \value{ a data.frame with states of the different segments } \description{ \code{prep_segm_picard} find the different segment and states of a given HMM model } \examples{ \dontrun{prep_segm_picard(data, picard.param, picard.type = 'hybrid', picard.nseg=NULL)} }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/compareModels.R \name{compareModels} \alias{compareModels} \title{Compare the output of two Mplus models} \usage{ compareModels(m1, m2, show = "all", equalityMargin = c(param = 1e-04, pvalue = 1e-04), compare = "unstandardized", sort = "none", showFixed = FALSE, showNS = TRUE, diffTest = FALSE) } \arguments{ \item{m1}{The first Mplus model to be compared. Generated by \code{readModels}, \code{extractModelSummaries}, or \code{extractModelParameters}.} \item{m2}{The second Mplus model to be compared.} \item{show}{What aspects of the models should be compared. Options are "all", "summaries", "equal", "diff", "pdiff", and "unique". See below for details.} \item{equalityMargin}{Defines the discrepancy between models that is considered equal. Different margins can be specified for p-value equality versus parameter equality. Defaults to .0001 for both.} \item{compare}{Which parameter estimates should be compared. Options are "unstandardized", "stdyx.standardized" "stdy.standardized", and "std.standardized".} \item{sort}{How to sort the output of parameter comparisons. Options are "none", "type", "alphabetical", and "maxDiff". See below for details.} \item{showFixed}{Whether to display fixed parameters in the output (identified where the est/se = 999.000, per Mplus convention). Default to \code{FALSE}.} \item{showNS}{Whether to display non-significant parameter estimates. Can be \code{TRUE} or \code{FALSE}, or a numeric value (e.g., .10) that defines what p-value is filtered as non-significant.} \item{diffTest}{Whether to compute a chi-square difference test between the models. Assumes that the models are nested. Not available for MLMV, WLSMV, and ULSMV estimators. Use DIFFTEST in Mplus instead.} } \value{ No value is returned by this function. It is used to print model differences to the R console. } \description{ The \code{compareModels} function compares the output of two Mplus files and prints similarities and differences in the model summary statistics and parameter estimates. Options are provided for filtering out fixed parameters and nonsignificant parameters. When requested, \code{compareModels} will compute the chi-square difference test for nested models (does not apply to MLMV, WLSM, and WLSMV estimators, where DIFFTEST in Mplus is needed). Model outputs to be compared can be full summaries and parameters (generated by \code{readModels}), summary statistics only (\code{extractModelSummaries}), or parameters only (\code{extractModelParameters}). } \details{ The \code{show} parameter can be one or more of the following, which can be passed as a vector, such as c("equal", "pdiff"). \describe{ \item{show}{ \describe{ \item{"all"}{Display all available model comparison. Equivalent to c("summaries", "equal", "diff", "pdiff", "unique").} \item{"summaries"}{Print a comparison of model summary statistics. Compares the following summary statistics (where available): c("Title", "Observations", "Estimator", "Parameters", "LL", "AIC", "BIC", "ChiSqM_Value", "ChiSqM_DF", "CFI", "TLI", "RMSEA", "SRMR", "WRMR")} \item{"allsummaries"}{Prints a comparison of all summary statistics available in each model. May generate a lot of output.} \item{"equal"}{Print parameter estimates that are equal between models (i.e., \code{<= equalityMargin["param"]})}. \item{"diff"}{Print parameter estimates that are different between models (i.e., \code{> equalityMargin["param"]})}. \item{"pdiff"}{Print parameter estimates where the p-values differ between models (i.e., \code{> equalityMargin["pvalue"]})}. \item{"unique"}{Print parameter estimates that are unique to each model.} } } } The \code{sort} parameter determines the order in which parameter estimates are displayed. The following options are available: \describe{ \item{sort}{ \describe{ \item{"none"}{No sorting is performed, so parameters are output in the order presented in Mplus. (Default)} \item{"type"}{Sort parameters by their role in the model. This groups output by regression coefficient (ON), factor loadings (BY), covariances (WITH), and so on. Within each type, output is alphabetical.} \item{"alphabetical"}{Sort parameters in alphabetical order.} \item{"maxDiff"}{Sort parameter output by the largest differences between models (high to low).} } } } } \examples{ # make me!!! } \author{ Michael Hallquist } \keyword{interface}
/man/compareModels.Rd
no_license
zhanglj37/MplusAutomation
R
false
true
4,581
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/compareModels.R \name{compareModels} \alias{compareModels} \title{Compare the output of two Mplus models} \usage{ compareModels(m1, m2, show = "all", equalityMargin = c(param = 1e-04, pvalue = 1e-04), compare = "unstandardized", sort = "none", showFixed = FALSE, showNS = TRUE, diffTest = FALSE) } \arguments{ \item{m1}{The first Mplus model to be compared. Generated by \code{readModels}, \code{extractModelSummaries}, or \code{extractModelParameters}.} \item{m2}{The second Mplus model to be compared.} \item{show}{What aspects of the models should be compared. Options are "all", "summaries", "equal", "diff", "pdiff", and "unique". See below for details.} \item{equalityMargin}{Defines the discrepancy between models that is considered equal. Different margins can be specified for p-value equality versus parameter equality. Defaults to .0001 for both.} \item{compare}{Which parameter estimates should be compared. Options are "unstandardized", "stdyx.standardized" "stdy.standardized", and "std.standardized".} \item{sort}{How to sort the output of parameter comparisons. Options are "none", "type", "alphabetical", and "maxDiff". See below for details.} \item{showFixed}{Whether to display fixed parameters in the output (identified where the est/se = 999.000, per Mplus convention). Default to \code{FALSE}.} \item{showNS}{Whether to display non-significant parameter estimates. Can be \code{TRUE} or \code{FALSE}, or a numeric value (e.g., .10) that defines what p-value is filtered as non-significant.} \item{diffTest}{Whether to compute a chi-square difference test between the models. Assumes that the models are nested. Not available for MLMV, WLSMV, and ULSMV estimators. Use DIFFTEST in Mplus instead.} } \value{ No value is returned by this function. It is used to print model differences to the R console. } \description{ The \code{compareModels} function compares the output of two Mplus files and prints similarities and differences in the model summary statistics and parameter estimates. Options are provided for filtering out fixed parameters and nonsignificant parameters. When requested, \code{compareModels} will compute the chi-square difference test for nested models (does not apply to MLMV, WLSM, and WLSMV estimators, where DIFFTEST in Mplus is needed). Model outputs to be compared can be full summaries and parameters (generated by \code{readModels}), summary statistics only (\code{extractModelSummaries}), or parameters only (\code{extractModelParameters}). } \details{ The \code{show} parameter can be one or more of the following, which can be passed as a vector, such as c("equal", "pdiff"). \describe{ \item{show}{ \describe{ \item{"all"}{Display all available model comparison. Equivalent to c("summaries", "equal", "diff", "pdiff", "unique").} \item{"summaries"}{Print a comparison of model summary statistics. Compares the following summary statistics (where available): c("Title", "Observations", "Estimator", "Parameters", "LL", "AIC", "BIC", "ChiSqM_Value", "ChiSqM_DF", "CFI", "TLI", "RMSEA", "SRMR", "WRMR")} \item{"allsummaries"}{Prints a comparison of all summary statistics available in each model. May generate a lot of output.} \item{"equal"}{Print parameter estimates that are equal between models (i.e., \code{<= equalityMargin["param"]})}. \item{"diff"}{Print parameter estimates that are different between models (i.e., \code{> equalityMargin["param"]})}. \item{"pdiff"}{Print parameter estimates where the p-values differ between models (i.e., \code{> equalityMargin["pvalue"]})}. \item{"unique"}{Print parameter estimates that are unique to each model.} } } } The \code{sort} parameter determines the order in which parameter estimates are displayed. The following options are available: \describe{ \item{sort}{ \describe{ \item{"none"}{No sorting is performed, so parameters are output in the order presented in Mplus. (Default)} \item{"type"}{Sort parameters by their role in the model. This groups output by regression coefficient (ON), factor loadings (BY), covariances (WITH), and so on. Within each type, output is alphabetical.} \item{"alphabetical"}{Sort parameters in alphabetical order.} \item{"maxDiff"}{Sort parameter output by the largest differences between models (high to low).} } } } } \examples{ # make me!!! } \author{ Michael Hallquist } \keyword{interface}
testlist <- list(x = c(1.42404726944461e-306, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) result <- do.call(diceR:::connectivity_matrix,testlist) str(result)
/diceR/inst/testfiles/connectivity_matrix/libFuzzer_connectivity_matrix/connectivity_matrix_valgrind_files/1609958487-test.R
no_license
akhikolla/updated-only-Issues
R
false
false
312
r
testlist <- list(x = c(1.42404726944461e-306, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) result <- do.call(diceR:::connectivity_matrix,testlist) str(result)
#***************************************************************** #----------------------------------------------------------------- #' Change the print function for the data set: 'str_colorsOfMusic' #' #' Change the print function for the data set: 'str_colorsOfMusic' #' #' @param x a list: the data set: 'str_colorsOfMusic' #' @param ... the rest #' @author Herve Abdi #' @export print.str_colorsOfMusic <- function (x, ...) { ndash = 78 # How many dashes for separation lines cat(rep("-", ndash), sep = "") cat("\n 12 children and 10 Adults pick one color (out of 10) to describe 9 pieces of music \n") # cat("\n List name: ",deparse(eval(substitute(substitute(x)))),"\n") cat(rep("-", ndash), sep = "") cat("\n$participantsChoice ", "A df. Rows: participants, Columns: music pieces") cat("\n$participantsDescription ", "Age (Child/Adult) and Gender (F/M) of participants") cat("\n$colorInformation ", "Name and code of the colors") cat("\n$cubeOfData ", "The 10-Color*9-Music*22-Participant cube of 0/1 data") cat("\n$contingencyTable ", "The 10-Color*9-Music (pseudo) contingency table") cat("\n",rep("-", ndash), sep = "") cat("\n") invisible(x) } # end of function print.str_colorsOfMusic #-------------------------------------------------------------------- # Description for colorOfMusic #' \code{colorOfMusic}: 12 Children and 10 Adults picked up #' one color to describe each of 9 pieces of music. #' #' \code{colorOfMusic}. A data set: #' 12 Children and 10 Adults picked up #' one color to describe 9 pieces of music (one at a time). #' The participants are described by their #' age (children vs adults) and by their gender #' (F vs M). This data set is used to illustrate #' correspondence analysis in Abdi and Bera (2018). #' @usage data("colorOfMusic") #' @format A list with 5 data frames or arrays #' describing respectively #' 1) the check mark data, 2) the description of the participants, #' 3) the names and color codes of the 10 colors used, #' 4) the binary cube of data (color by music by participants), #' and 5) #' the (pseudo) contingency table of data (color by music). #' \describe{ #' \item{\code{participantsChoice}}{A 22 rows (Participants) by 9 #' columns (Pieces of Music) data frame. The number #' at the intersection of #' of a row and a column gives the number-code #' (from 1 to 10) of the chosen color #' for the music (column) by the participant (row). #' The name of the colors are given in the data frame stored in #' \code{$colorInformation}.} #'\item{\code{participantsDescription}}{A 22 by 2 #'data frame describing the #'participants according to \code{Age} (Child vs Adult) and #' \code{Gender} (F vs M).} #' \item{\code{colorInformation}}{The name #' of the colors and their color code (useful when plotting the #' data).} #' \item{\code{cubeOfData}}{The 10-Colors by 9-Music pieces #' by 22-Participants #' cube of 0/1 data. A value of 1 (resp. 0) means that the participant #' chose (resp. did not choose) #' the color to match the piece of music} #' \item{\code{contingencyTable}}{The 10-Colors #' by 9-Music Pieces (pseudo) contingency table. #' The value at the intersection of a row (color) and a column #' (piece of music) is the number of participants who chose this #' color to match this piece of music. This contingency #' table is typically analyzed with correspondence analysis. #' (e.g., see Abdi and Bera, 2018). } #' } #' @source Abdi, H. and Bera, M. (2018). Correspondence Analysis. #' \emph{Encyclopedia of Social Network Analysis and Mining} (2nd Edition). #' New York: Springer Verlag. #' \url{www.utdallas.edu/~herve}. #' @author Herve Abdi "colorOfMusic"
/R/colorOfMusic.R
no_license
HerveAbdi/PTCA4CATA
R
false
false
3,703
r
#***************************************************************** #----------------------------------------------------------------- #' Change the print function for the data set: 'str_colorsOfMusic' #' #' Change the print function for the data set: 'str_colorsOfMusic' #' #' @param x a list: the data set: 'str_colorsOfMusic' #' @param ... the rest #' @author Herve Abdi #' @export print.str_colorsOfMusic <- function (x, ...) { ndash = 78 # How many dashes for separation lines cat(rep("-", ndash), sep = "") cat("\n 12 children and 10 Adults pick one color (out of 10) to describe 9 pieces of music \n") # cat("\n List name: ",deparse(eval(substitute(substitute(x)))),"\n") cat(rep("-", ndash), sep = "") cat("\n$participantsChoice ", "A df. Rows: participants, Columns: music pieces") cat("\n$participantsDescription ", "Age (Child/Adult) and Gender (F/M) of participants") cat("\n$colorInformation ", "Name and code of the colors") cat("\n$cubeOfData ", "The 10-Color*9-Music*22-Participant cube of 0/1 data") cat("\n$contingencyTable ", "The 10-Color*9-Music (pseudo) contingency table") cat("\n",rep("-", ndash), sep = "") cat("\n") invisible(x) } # end of function print.str_colorsOfMusic #-------------------------------------------------------------------- # Description for colorOfMusic #' \code{colorOfMusic}: 12 Children and 10 Adults picked up #' one color to describe each of 9 pieces of music. #' #' \code{colorOfMusic}. A data set: #' 12 Children and 10 Adults picked up #' one color to describe 9 pieces of music (one at a time). #' The participants are described by their #' age (children vs adults) and by their gender #' (F vs M). This data set is used to illustrate #' correspondence analysis in Abdi and Bera (2018). #' @usage data("colorOfMusic") #' @format A list with 5 data frames or arrays #' describing respectively #' 1) the check mark data, 2) the description of the participants, #' 3) the names and color codes of the 10 colors used, #' 4) the binary cube of data (color by music by participants), #' and 5) #' the (pseudo) contingency table of data (color by music). #' \describe{ #' \item{\code{participantsChoice}}{A 22 rows (Participants) by 9 #' columns (Pieces of Music) data frame. The number #' at the intersection of #' of a row and a column gives the number-code #' (from 1 to 10) of the chosen color #' for the music (column) by the participant (row). #' The name of the colors are given in the data frame stored in #' \code{$colorInformation}.} #'\item{\code{participantsDescription}}{A 22 by 2 #'data frame describing the #'participants according to \code{Age} (Child vs Adult) and #' \code{Gender} (F vs M).} #' \item{\code{colorInformation}}{The name #' of the colors and their color code (useful when plotting the #' data).} #' \item{\code{cubeOfData}}{The 10-Colors by 9-Music pieces #' by 22-Participants #' cube of 0/1 data. A value of 1 (resp. 0) means that the participant #' chose (resp. did not choose) #' the color to match the piece of music} #' \item{\code{contingencyTable}}{The 10-Colors #' by 9-Music Pieces (pseudo) contingency table. #' The value at the intersection of a row (color) and a column #' (piece of music) is the number of participants who chose this #' color to match this piece of music. This contingency #' table is typically analyzed with correspondence analysis. #' (e.g., see Abdi and Bera, 2018). } #' } #' @source Abdi, H. and Bera, M. (2018). Correspondence Analysis. #' \emph{Encyclopedia of Social Network Analysis and Mining} (2nd Edition). #' New York: Springer Verlag. #' \url{www.utdallas.edu/~herve}. #' @author Herve Abdi "colorOfMusic"
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/extras.R \name{icon} \alias{icon} \title{Create an icon} \usage{ icon(x, ...) } \arguments{ \item{x}{name of glyphicon} \item{...}{other attributes to add to the span element} } \description{ Create an icon }
/man/icon.Rd
no_license
alteryx/AlteryxRviz
R
false
false
297
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/extras.R \name{icon} \alias{icon} \title{Create an icon} \usage{ icon(x, ...) } \arguments{ \item{x}{name of glyphicon} \item{...}{other attributes to add to the span element} } \description{ Create an icon }
do_nxt <- function(e)UseMethod("do_nxt") do_reset <- function(e)UseMethod("do_rst") do_submit <- function(e)UseMethod("do_submit") do_play <- function(e)UseMethod("do_play") do_main <- function(e)UseMethod("do_main") do_restart <- function(e)UseMethod("do_restart") do_nxt.default <- function(e) { ## Using the stored list of "official" swirl variables and values, # assign variables of the same names in the global environment # their "official" values, in case the user has changed them # while playing. if(length(e$snapshot)>0)xfer(as.environment(e$snapshot), globalenv()) swirl_out("Resuming lesson...") e$playing <- FALSE e$iptr <- 1 } do_reset.default <- function(e) { e$playing <- FALSE e$reset <- TRUE e$iptr <- 2 swirl_out("I just reset the script to its original state. If it doesn't refresh immediately, you may need to click on it.", skip_after = TRUE) } do_submit.default <- function(e) { e$playing <- FALSE # Get contents from user's submitted script e$script_contents <- readLines(e$script_temp_path, warn = FALSE) # Save expr to e e$expr <- try(parse(text = e$script_contents), silent = TRUE) swirl_out("Sourcing your script...", skip_after = TRUE) try(source(e$script_temp_path)) } do_play.default <- function(e) { swirl_out("Entering play mode. Experiment as you please, then type nxt() when you are ready to resume the lesson.", skip_after=TRUE) e$playing <- TRUE } do_main.default <- function(e) { swirl_out("Returning to the main menu...") # Remove the current lesson. Progress has been saved already. if(exists("les", e, inherits=FALSE)){ rm("les", envir=e, inherits=FALSE) } } do_restart.default <- function(e) { swirl_out("This feature is not implemented yet for Swirl.") }
/R/actions.R
no_license
sunsure/swirl
R
false
false
1,777
r
do_nxt <- function(e)UseMethod("do_nxt") do_reset <- function(e)UseMethod("do_rst") do_submit <- function(e)UseMethod("do_submit") do_play <- function(e)UseMethod("do_play") do_main <- function(e)UseMethod("do_main") do_restart <- function(e)UseMethod("do_restart") do_nxt.default <- function(e) { ## Using the stored list of "official" swirl variables and values, # assign variables of the same names in the global environment # their "official" values, in case the user has changed them # while playing. if(length(e$snapshot)>0)xfer(as.environment(e$snapshot), globalenv()) swirl_out("Resuming lesson...") e$playing <- FALSE e$iptr <- 1 } do_reset.default <- function(e) { e$playing <- FALSE e$reset <- TRUE e$iptr <- 2 swirl_out("I just reset the script to its original state. If it doesn't refresh immediately, you may need to click on it.", skip_after = TRUE) } do_submit.default <- function(e) { e$playing <- FALSE # Get contents from user's submitted script e$script_contents <- readLines(e$script_temp_path, warn = FALSE) # Save expr to e e$expr <- try(parse(text = e$script_contents), silent = TRUE) swirl_out("Sourcing your script...", skip_after = TRUE) try(source(e$script_temp_path)) } do_play.default <- function(e) { swirl_out("Entering play mode. Experiment as you please, then type nxt() when you are ready to resume the lesson.", skip_after=TRUE) e$playing <- TRUE } do_main.default <- function(e) { swirl_out("Returning to the main menu...") # Remove the current lesson. Progress has been saved already. if(exists("les", e, inherits=FALSE)){ rm("les", envir=e, inherits=FALSE) } } do_restart.default <- function(e) { swirl_out("This feature is not implemented yet for Swirl.") }
#' Calculate D-Prime Sub-Scores from D-Prime Labels #' #' Note: Must run dprime_lab.r first. #' Calculates d-prime category scores from d-prime labels: total number of hits, misses, false alarms, and correct rejections for each participant. #' These columns must already be in the data frame that is passed in as an argument. #' #' #' @param df is the data frame with columns corresponding to d-prime labels #' @param ... names of cols corresponding to IVs to group by (supports n > 1 cols). IVs must be factors. #' #' @return #' @export #' #' @examples #' #' # make data frame #' df<- data.frame(participant = c(rep("Participant 1",24), rep("Participant 2",24,), #' rep("Participant 3",24), rep("Participant 4",24)), #' stim1 = rep(c(rep(c("Apple", "Orange", "Orange", "Apple"), 6), #' rep(c("Orange", "Apple", "Apple", "Orange"), 6)),2), #' stim2 = rep(c(rep(c("Apple", "Orange", "Apple", "Orange"), 6), #' rep(c("Orange", "Apple", "Apple", "Orange"), 6)),2), #' correct = c(rep(c(1,1,0,0),1), rep(c(1,0,1,0),1), #' rep(c(0,0,1,1),2), rep(c(0,1,0,1),2), #' rep(c(0,1,0,1),2), rep(c(0,0,1,1),2), #' rep(c(1,0,1,0),1), c(rep(c(1,1,0,0),1)))) #' #' #' # reorder by participant #' df <- df[order(df[,"participant"]),] #' #' # calculate dprime labels #' df.dprime <- dprime_lab("stim1", "stim2", "correct", data = df) #' #' # calculate dprime sub-scores #' dprime_cat(df.dprime, participant) dprime_cat <- function(df, ...){ # Parker Tichko, 2020 require(dplyr) require(tibble) # drops NAs na.rm = TRUE # quote input args to use with dplyr functions group_vars <- dplyr::quos(...) # convert to tibble, if not already tibble if(tibble::is_tibble(df) == FALSE){ df <- tibble::as_tibble(df)} # group by df variables df <- dplyr::group_by(df,!!! group_vars) dplyr::summarise(df, Hits = sum(Hit, na.rm = na.rm), Misses = sum(Miss, na.rm = na.rm), FalseAlarms = sum(FalseAlarm, na.rm = na.rm), CorrectRejs = sum(CorrectRej, na.rm = na.rm), TotalTarg = Hits + Misses, TotalDis= FalseAlarms + CorrectRejs, NumRes = TotalTarg + TotalDis) #HitRate = if(Hits/TotalTarg == 1 | Hits/TotalTarg == 0){Hits/TotalTarg + 0.00001} else{Hits/TotalTarg}) # qnorm(1) & qnorm(0) are Inf. Increase HitRate and Missrate by 0.00001. #MissRate = Misses/TotalTarg #if(Misses/TotalTarg == 1 |Misses/TotalTarg == 0){Misses/TotalTarg + 0.00001} else{Misses/TotalTarg}, # qnorm(1) & qnorm(0) are Inf. Increase HitRate and Missrate by 0.00001. #FalseAlarmRate = FalseAlarms/TotalDis #if(FalseAlarms/TotalDis == 1 | FalseAlarms/TotalDis == 0){FalseAlarms/TotalDis + 0.00001} else{FalseAlarms/TotalDis}, # qnorm(1) & qnorm(0) are Inf. Increase FA and CirrectRej by 0.00001. #CorrectRejRate = CorrectRejRate/TotalDis #if(CorrectRejRate/TotalDis == 1 | CorrectRejRate/TotalDis == 0){CorrectRejRate/TotalDis + 0.00001} else{CorrectRejRate/TotalDis}, #Dprime = qnorm(HitRate) - qnorm(FalseAlarmRate)) }
/r/dprime_cat.R
permissive
ptichko/ptichko.github.io
R
false
false
3,313
r
#' Calculate D-Prime Sub-Scores from D-Prime Labels #' #' Note: Must run dprime_lab.r first. #' Calculates d-prime category scores from d-prime labels: total number of hits, misses, false alarms, and correct rejections for each participant. #' These columns must already be in the data frame that is passed in as an argument. #' #' #' @param df is the data frame with columns corresponding to d-prime labels #' @param ... names of cols corresponding to IVs to group by (supports n > 1 cols). IVs must be factors. #' #' @return #' @export #' #' @examples #' #' # make data frame #' df<- data.frame(participant = c(rep("Participant 1",24), rep("Participant 2",24,), #' rep("Participant 3",24), rep("Participant 4",24)), #' stim1 = rep(c(rep(c("Apple", "Orange", "Orange", "Apple"), 6), #' rep(c("Orange", "Apple", "Apple", "Orange"), 6)),2), #' stim2 = rep(c(rep(c("Apple", "Orange", "Apple", "Orange"), 6), #' rep(c("Orange", "Apple", "Apple", "Orange"), 6)),2), #' correct = c(rep(c(1,1,0,0),1), rep(c(1,0,1,0),1), #' rep(c(0,0,1,1),2), rep(c(0,1,0,1),2), #' rep(c(0,1,0,1),2), rep(c(0,0,1,1),2), #' rep(c(1,0,1,0),1), c(rep(c(1,1,0,0),1)))) #' #' #' # reorder by participant #' df <- df[order(df[,"participant"]),] #' #' # calculate dprime labels #' df.dprime <- dprime_lab("stim1", "stim2", "correct", data = df) #' #' # calculate dprime sub-scores #' dprime_cat(df.dprime, participant) dprime_cat <- function(df, ...){ # Parker Tichko, 2020 require(dplyr) require(tibble) # drops NAs na.rm = TRUE # quote input args to use with dplyr functions group_vars <- dplyr::quos(...) # convert to tibble, if not already tibble if(tibble::is_tibble(df) == FALSE){ df <- tibble::as_tibble(df)} # group by df variables df <- dplyr::group_by(df,!!! group_vars) dplyr::summarise(df, Hits = sum(Hit, na.rm = na.rm), Misses = sum(Miss, na.rm = na.rm), FalseAlarms = sum(FalseAlarm, na.rm = na.rm), CorrectRejs = sum(CorrectRej, na.rm = na.rm), TotalTarg = Hits + Misses, TotalDis= FalseAlarms + CorrectRejs, NumRes = TotalTarg + TotalDis) #HitRate = if(Hits/TotalTarg == 1 | Hits/TotalTarg == 0){Hits/TotalTarg + 0.00001} else{Hits/TotalTarg}) # qnorm(1) & qnorm(0) are Inf. Increase HitRate and Missrate by 0.00001. #MissRate = Misses/TotalTarg #if(Misses/TotalTarg == 1 |Misses/TotalTarg == 0){Misses/TotalTarg + 0.00001} else{Misses/TotalTarg}, # qnorm(1) & qnorm(0) are Inf. Increase HitRate and Missrate by 0.00001. #FalseAlarmRate = FalseAlarms/TotalDis #if(FalseAlarms/TotalDis == 1 | FalseAlarms/TotalDis == 0){FalseAlarms/TotalDis + 0.00001} else{FalseAlarms/TotalDis}, # qnorm(1) & qnorm(0) are Inf. Increase FA and CirrectRej by 0.00001. #CorrectRejRate = CorrectRejRate/TotalDis #if(CorrectRejRate/TotalDis == 1 | CorrectRejRate/TotalDis == 0){CorrectRejRate/TotalDis + 0.00001} else{CorrectRejRate/TotalDis}, #Dprime = qnorm(HitRate) - qnorm(FalseAlarmRate)) }
# this code is not exactly correct : but useful to present the idea to audience # only checks if the turbine location is inside any light strike ellipse # does not check if there is overlap between ellipse and circle, even center of circle is not inside of the ellipse # correct code is implemented in turbineStrikeOverLapArea.R --> this used for actual classification # this strikeCounter value is only for presentation # plots lightning strike counts for each turbine plotTurbineStrikeCount <- function(LightningData, # Lightning Strike Data set with columns : x, y, majorRad, minorRad, stormIdx (this column only for old data set) TurbineData, # Turbine Data set has columns : x, y plotEllipseFlag = T, # flag to plot lightning strike ellipses newDataFlag = T # flag for indicating if Lightning Data is for new data set or old data set ) { lightStrikeCenters <- cbind(LightningData$x, LightningData$y) lightStrikeRadius <- cbind(LightningData$majorRad, LightningData$minorRad) turbineCenters <- cbind(TurbineData$x, TurbineData$y) indxPoints = 1:10 # only plot the first 10 lightning strike if (!newDataFlag) { lightStrikeIndx = which(LightningData$stormIdx == 3)[indxPoints] # for old data set } else { lightStrikeIndx = which(LightningData$peakAmpAbs >= 25)[indxPoints] # for lighting data set generated using new data } numLightStrikes <- sum(!is.na(lightStrikeIndx)) numTurbines <- nrow(turbineCenters) TurbineStrikeIdx <- matrix(0, numLightStrikes, numTurbines) iLightStrikeCount = 0 for (iLightStrike in lightStrikeIndx) { iLightStrikeCount = iLightStrikeCount + 1 # indicator for which ellipse contain each turbine TurbineStrikeIdx[iLightStrikeCount, ] = as.numeric(isPointInEllipse(turbineCenters, lightStrikeCenters[iLightStrike,], lightStrikeRadius[iLightStrike,], LightningData$angle[iLightStrike])); } # count the number of ellipses each turbine belongs to strikeCounter = colSums(TurbineStrikeIdx); # plot the strikeCounter values for each turbines hfigTurbineStrikeCount <- plotTurbineAttributes(TurbineData, strikeCounter, 'Plot of lightning strike counts for all turbine locations', 'Lightning Strike Count', showGndTruth = F, nLevels = max(strikeCounter)*2) if (plotEllipseFlag) { # plot the ellipses ellipseData <- getEllipseData(cbind(LightningData$x[lightStrikeIndx], LightningData$y[lightStrikeIndx]), cbind(LightningData$majorRad[lightStrikeIndx], LightningData$minorRad[lightStrikeIndx]), LightningData$angle[lightStrikeIndx]); tempPos <- cartesian2LatLon(ellipseData$x*1000, ellipseData$y*1000); # input to this function has to be in meters ellipseData$lon <- tempPos[,1]; ellipseData$lat <- tempPos[,2]; hfigTurbineStrikeCount <- hfigTurbineStrikeCount + geom_path(data = ellipseData, mapping = aes(lon, lat, group = grp), color = 'blue', inherit.aes = F) } print(hfigTurbineStrikeCount) # ggsave(file = "Figures/TurbineStrikeCount.png", width = 16, height = 9, type = "cairo-png"); }
/WorkingCodes/plorTurbineStrikeCount.R
permissive
sahas3/turbine-damage-probability
R
false
false
3,646
r
# this code is not exactly correct : but useful to present the idea to audience # only checks if the turbine location is inside any light strike ellipse # does not check if there is overlap between ellipse and circle, even center of circle is not inside of the ellipse # correct code is implemented in turbineStrikeOverLapArea.R --> this used for actual classification # this strikeCounter value is only for presentation # plots lightning strike counts for each turbine plotTurbineStrikeCount <- function(LightningData, # Lightning Strike Data set with columns : x, y, majorRad, minorRad, stormIdx (this column only for old data set) TurbineData, # Turbine Data set has columns : x, y plotEllipseFlag = T, # flag to plot lightning strike ellipses newDataFlag = T # flag for indicating if Lightning Data is for new data set or old data set ) { lightStrikeCenters <- cbind(LightningData$x, LightningData$y) lightStrikeRadius <- cbind(LightningData$majorRad, LightningData$minorRad) turbineCenters <- cbind(TurbineData$x, TurbineData$y) indxPoints = 1:10 # only plot the first 10 lightning strike if (!newDataFlag) { lightStrikeIndx = which(LightningData$stormIdx == 3)[indxPoints] # for old data set } else { lightStrikeIndx = which(LightningData$peakAmpAbs >= 25)[indxPoints] # for lighting data set generated using new data } numLightStrikes <- sum(!is.na(lightStrikeIndx)) numTurbines <- nrow(turbineCenters) TurbineStrikeIdx <- matrix(0, numLightStrikes, numTurbines) iLightStrikeCount = 0 for (iLightStrike in lightStrikeIndx) { iLightStrikeCount = iLightStrikeCount + 1 # indicator for which ellipse contain each turbine TurbineStrikeIdx[iLightStrikeCount, ] = as.numeric(isPointInEllipse(turbineCenters, lightStrikeCenters[iLightStrike,], lightStrikeRadius[iLightStrike,], LightningData$angle[iLightStrike])); } # count the number of ellipses each turbine belongs to strikeCounter = colSums(TurbineStrikeIdx); # plot the strikeCounter values for each turbines hfigTurbineStrikeCount <- plotTurbineAttributes(TurbineData, strikeCounter, 'Plot of lightning strike counts for all turbine locations', 'Lightning Strike Count', showGndTruth = F, nLevels = max(strikeCounter)*2) if (plotEllipseFlag) { # plot the ellipses ellipseData <- getEllipseData(cbind(LightningData$x[lightStrikeIndx], LightningData$y[lightStrikeIndx]), cbind(LightningData$majorRad[lightStrikeIndx], LightningData$minorRad[lightStrikeIndx]), LightningData$angle[lightStrikeIndx]); tempPos <- cartesian2LatLon(ellipseData$x*1000, ellipseData$y*1000); # input to this function has to be in meters ellipseData$lon <- tempPos[,1]; ellipseData$lat <- tempPos[,2]; hfigTurbineStrikeCount <- hfigTurbineStrikeCount + geom_path(data = ellipseData, mapping = aes(lon, lat, group = grp), color = 'blue', inherit.aes = F) } print(hfigTurbineStrikeCount) # ggsave(file = "Figures/TurbineStrikeCount.png", width = 16, height = 9, type = "cairo-png"); }
## makeCacheMatrix creates a special matrix that allows for storing values, in this case, storing its inverse. ## It requires cacheSolve to be called to calculate the inverse and store it in a matrix created by makeCacheMatrix. ## ## makeCacheMatrix, similar to makeVector, creates a matrix with some extra information, which allows ## cacheSolve to work. It creates variables and functions that cacheSolve can call on and manipulate so that it can ## store answers. I don't understand, however, how set works, or why the '<<-' needs to be invoked. My best guess ## is that it is useful if other functions than cacheSolve would need to call on the stored values. makeCacheMatrix <- function(x = matrix()) { inv <- NULL set <- function(y) { x <<- y inv <<- NULL } get <- function() x setinverse <- function(inverse) inv <<- inverse getinverse <- function() inv list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## cacheSolve solves the inverse of the invertible matrix it is provided, and stores the answer in the special matrix ## that it is given. It needs to be given a matrix that has already been run through makeCacheMatrix, which is ## something like a matrix with metadata. The first time cacheSolve is solved for a given CacheMatrix, it stores the ## solution, so if it is called on again, it grabs the stored value rather than recalculating it. ## The functions getinverse, get, and setinverse are all defined within makeCacheMatrix. cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' inv <- x$getinverse() if(!is.null(inv)) { message("getting cached data") return(inv) } data <- x$get() inv <- solve(data, ...) x$setinverse(inv) inv }
/cachematrix.R
no_license
goldenmean1618/ProgrammingAssignment2
R
false
false
2,091
r
## makeCacheMatrix creates a special matrix that allows for storing values, in this case, storing its inverse. ## It requires cacheSolve to be called to calculate the inverse and store it in a matrix created by makeCacheMatrix. ## ## makeCacheMatrix, similar to makeVector, creates a matrix with some extra information, which allows ## cacheSolve to work. It creates variables and functions that cacheSolve can call on and manipulate so that it can ## store answers. I don't understand, however, how set works, or why the '<<-' needs to be invoked. My best guess ## is that it is useful if other functions than cacheSolve would need to call on the stored values. makeCacheMatrix <- function(x = matrix()) { inv <- NULL set <- function(y) { x <<- y inv <<- NULL } get <- function() x setinverse <- function(inverse) inv <<- inverse getinverse <- function() inv list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## cacheSolve solves the inverse of the invertible matrix it is provided, and stores the answer in the special matrix ## that it is given. It needs to be given a matrix that has already been run through makeCacheMatrix, which is ## something like a matrix with metadata. The first time cacheSolve is solved for a given CacheMatrix, it stores the ## solution, so if it is called on again, it grabs the stored value rather than recalculating it. ## The functions getinverse, get, and setinverse are all defined within makeCacheMatrix. cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' inv <- x$getinverse() if(!is.null(inv)) { message("getting cached data") return(inv) } data <- x$get() inv <- solve(data, ...) x$setinverse(inv) inv }
utils::globalVariables("density") #' @title Plot frequencies of variables #' @name sjp.frq #' #' @description Plot frequencies of a variable as bar graph, histogram, #' box plot etc. #' #' @note This function only works with variables with integer values (or numeric #' factor levels), i.e. scales / centred variables #' with decimals may result in unexpected behaviour. #' #' @param sort.frq Determines whether categories should be sorted #' according to their frequencies or not. Default is \code{"none"}, so #' categories are not sorted by frequency. Use \code{"asc"} or #' \code{"desc"} for sorting categories ascending or descending order. #' @param geom.colors User defined color for geoms, e.g. \code{geom.colors = "#0080ff"}. #' @param errorbar.color Color of confidence interval bars (error bars). #' Only applies to \code{type = "bar"}. In case of dot plots, error bars #' will have same colors as dots (see \code{geom.colors}). #' @param show.mean Logical, if \code{TRUE}, a vertical line in histograms #' is drawn to indicate the mean value of the variables. Only #' applies to histogram-charts. #' @param show.mean.val Logical, if \code{TRUE} (default), the mean value #' is printed to the vertical line that indicates the variable's #' mean. Only applies to histogram-charts. #' @param show.sd Logical, if \code{TRUE}, the standard deviation #' is annotated as shaded rectangle around the mean intercept #' line. Only applies to histogram-charts. #' @param mean.line.type Numeric value, indicating the linetype of the mean #' intercept line. Only applies to histogram-charts and #' when \code{show.mean = TRUE}. #' @param mean.line.size Numeric, size of the mean intercept line. Only #' applies to histogram-charts and when \code{show.mean = TRUE}. #' @param normal.curve Logical, if \code{TRUE}, a normal curve, which is adjusted to the data, #' is plotted over the histogram or density plot. Default is #' \code{FALSE}. Only applies when histograms or density plots are plotted (see \code{type}). #' @param normal.curve.color Color of the normal curve line. Only #' applies if \code{normal.curve = TRUE}. #' @param normal.curve.size Numeric, size of the normal curve line. Only #' applies if \code{normal.curve = TRUE}. #' @param normal.curve.alpha Transparancy level (alpha value) of the normal curve. Only #' applies if \code{normal.curve = TRUE}. #' @param xlim Numeric vector of length two, defining lower and upper axis limits #' of the x scale. By default, this argument is set to \code{NULL}, i.e. the #' x-axis fits to the required range of the data. #' @param axis.title Character vector of length one or two (depending on #' the plot function and type), used as title(s) for the x and y axis. #' If not specified, a default labelling is chosen. #' \strong{Note:} Some plot types do not support this argument. In such #' cases, use the return value and add axis titles manually with #' \code{\link[ggplot2]{labs}}, e.g.: \code{$plot.list[[1]] + labs(x = ...)} #' #' @inheritParams sjp.grpfrq #' #' @return A ggplot-object. #' #' @examples #' library(sjlabelled) #' data(efc) #' #' # boxplot #' sjp.frq(efc$e17age, type = "box") #' #' # histogram #' sjp.frq(efc$e17age, type = "hist", show.mean = TRUE) #' #' # violin plot #' sjp.frq(efc$e17age, type = "v") #' #' # bar plot #' sjp.frq(efc$e42dep) #' #' library(sjmisc) #' # grouped variable #' ageGrp <- group_var(efc$e17age) #' ageGrpLab <- group_labels(efc$e17age) #' sjp.frq(ageGrp, title = get_label(efc$e17age), axis.labels = ageGrpLab) #' #' # plotting confidence intervals. expand grid and v/hjust for text labels #' sjp.frq( #' efc$e15relat, type = "dot", show.ci = TRUE, sort.frq = "desc", #' coord.flip = TRUE, expand.grid = TRUE, vjust = "bottom", hjust = "left" #' ) #' #' # Simulate ggplot-default histogram #' sjp.frq(efc$c160age, type = "h", geom.size = 3) #' #' # histogram with overlayed normal curve #' sjp.frq(efc$c160age, type = "h", show.mean = TRUE, show.mean.val = TRUE, #' normal.curve = TRUE, show.sd = TRUE, normal.curve.color = "blue", #' normal.curve.size = 3, ylim = c(0,50)) #' #' @import ggplot2 #' @importFrom sjstats wtd_sd #' @importFrom sjmisc group_labels group_var to_value #' @importFrom sjlabelled set_labels #' @importFrom stats na.omit sd weighted.mean dnorm #' @importFrom rlang .data #' @export sjp.frq <- function(var.cnt, title = "", weight.by = NULL, title.wtd.suffix = NULL, sort.frq = c("none", "asc", "desc"), type = c("bar", "dot", "histogram", "line", "density", "boxplot", "violin"), geom.size = NULL, geom.colors = "#336699", errorbar.color = "darkred", axis.title = NULL, axis.labels = NULL, xlim = NULL, ylim = NULL, wrap.title = 50, wrap.labels = 20, grid.breaks = NULL, expand.grid = FALSE, show.values = TRUE, show.n = TRUE, show.prc = TRUE, show.axis.values = TRUE, show.ci = FALSE, show.na = FALSE, show.mean = FALSE, show.mean.val = TRUE, show.sd = TRUE, mean.line.type = 2, mean.line.size = 0.5, inner.box.width = 0.15, inner.box.dotsize = 3, normal.curve = FALSE, normal.curve.color = "red", normal.curve.size = 0.8, normal.curve.alpha = 0.4, auto.group = NULL, coord.flip = FALSE, vjust = "bottom", hjust = "center", y.offset = NULL) { # get variable name, used as default label if variable # has no label attributes var.name <- get_var_name(deparse(substitute(var.cnt))) # try to find some useful default offsets for textlabels, # depending on plot range and flipped coordinates if (is.null(y.offset)) { # get maximum y-pos y.offset <- ceiling(max(table(var.cnt)) / 100) if (coord.flip) { if (missing(vjust)) vjust <- "center" if (missing(hjust)) hjust <- "bottom" if (hjust == "bottom") y_offset <- y.offset else if (hjust == "top") y_offset <- -y.offset else y_offset <- 0 } else { if (vjust == "bottom") y_offset <- y.offset else if (vjust == "top") y_offset <- -y.offset else y_offset <- 0 } } else { y_offset <- y.offset } # try to automatically set labels, if not passed as argument ----- # to make plot annotations more beautiful, supporting labelled data if (is.null(axis.labels)) { axis.labels <- sjlabelled::get_labels( var.cnt, attr.only = is.character(var.cnt), values = NULL, non.labelled = TRUE ) } if (is.null(axis.title)) axis.title <- sjlabelled::get_label(var.cnt, def.value = var.name) if (is.null(title)) title <- sjlabelled::get_label(var.cnt, def.value = var.name) # remove titles if empty if (!is.null(axis.title) && axis.title == "") axis.title <- NULL if (!is.null(title) && title == "") title <- NULL # check color argument if (length(geom.colors) > 1) geom.colors <- geom.colors[1] # Match arguments ----- type <- match.arg(type) sort.frq <- match.arg(sort.frq) # default grid-expansion if (isTRUE(expand.grid) || (missing(expand.grid) && type == "histogram")) { expand.grid <- waiver() } else { expand.grid <- c(0, 0) } # for histograms or density plots... xv <- sjmisc::to_value(stats::na.omit(var.cnt)) # check for nice bin-width defaults if (type %in% c("histogram", "density") && !is.null(geom.size) && geom.size < round(diff(range(xv)) / 40)) message("Using very small binwidth. Consider adjusting `geom.size` argument.") # create second data frame hist.dat <- data.frame(xv) # check default geom.size ----- if (is.null(geom.size)) { geom.size <- dplyr::case_when( type == "bar" ~ .7, type == "dot" ~ 2.5, type == "density" ~ ceiling(diff(range(xv)) / 40), type == "histogram" ~ ceiling(diff(range(xv)) / 40), type == "line" ~ .8, type == "boxplot" ~ .3, type == "violin" ~ .3, TRUE ~ .7 ) } # check whether variable should be auto-grouped ----- if (!is.null(auto.group) && length(unique(var.cnt)) >= auto.group) { message(sprintf( "`%s` has %i unique values and was grouped...", var.name, length(unique(var.cnt)) )) # group axis labels axis.labels <- sjmisc::group_labels( sjmisc::to_value(var.cnt, keep.labels = F), size = "auto", n = auto.group ) # group variable var.cnt <- sjmisc::group_var( sjmisc::to_value(var.cnt, keep.labels = F), size = "auto", as.num = TRUE, n = auto.group, append = FALSE ) # set label attributes var.cnt <- sjlabelled::set_labels(var.cnt, labels = axis.labels) } # create frequency data frame ----- df.frq <- create.frq.df( var.cnt, wrap.labels = wrap.labels, order.frq = sort.frq, round.prz = 2, na.rm = !show.na, weight.by = weight.by ) mydat <- df.frq$mydat # any labels detected? if (!is.null(df.frq$labels) && is.null(axis.labels)) axis.labels <- df.frq$labels else if (!is.null(axis.labels) && sort.frq != "none") # sort labels in required order axis.labels <- axis.labels[mydat$order] # define text label position if (show.ci) mydat$label.pos <- mydat$upper.ci else mydat$label.pos <- mydat$frq # Trim labels and title to appropriate size ----- # check length of diagram title and split longer string into new lines # every 50 chars if (!is.null(title)) { # if we have weighted values, say that in diagram's title if (!is.null(title.wtd.suffix)) title <- paste(title, title.wtd.suffix, sep = "") title <- sjmisc::word_wrap(title, wrap.title) } # check length of x-axis title and split longer string into new lines # every 50 chars if (!is.null(axis.title)) axis.title <- sjmisc::word_wrap(axis.title, wrap.title) # count variable may not be a factor! if (is.factor(var.cnt) || is.character(var.cnt)) { var.cnt <- sjmisc::to_value(var.cnt, keep.labels = F) } # If we have a histogram, caluclate means of groups if (is.null(weight.by)) { mittelwert <- mean(var.cnt, na.rm = TRUE) stddev <- stats::sd(var.cnt, na.rm = TRUE) } else { mittelwert <- stats::weighted.mean(var.cnt, weight.by, na.rm = TRUE) stddev <- sjstats::wtd_sd(var.cnt, weights = weight.by) } # If we have boxplots, use different data frame structure if (type == "boxplot" || type == "violin") { mydat <- stats::na.omit(data.frame(cbind( grp = 1, frq = var.cnt, val = var.cnt ))) mydat$grp <- as.factor(mydat$grp) } # Prepare bar charts trimViolin <- FALSE lower_lim <- 0 # calculate upper y-axis-range # if we have a fixed value, use this one here if (!is.null(ylim) && length(ylim) == 2) { lower_lim <- ylim[1] upper_lim <- ylim[2] } else { # if we have boxplots, we have different ranges, so we can adjust # the y axis if (type == "boxplot" || type == "violin") { # use an extra standard-deviation as limits for the y-axis when we have boxplots lower_lim <- min(var.cnt, na.rm = TRUE) - floor(stats::sd(var.cnt, na.rm = TRUE)) upper_lim <- max(var.cnt, na.rm = TRUE) + ceiling(stats::sd(var.cnt, na.rm = TRUE)) # make sure that the y-axis is not below zero if (lower_lim < 0) { lower_lim <- 0 trimViolin <- TRUE } } else if (type == "histogram") { # what is the maximum values after binning for histograms? hist.grp.cnt <- ceiling(diff(range(var.cnt, na.rm = T)) / geom.size) # ... or the amount of max. answers per category # add 10% margin to upper limit upper_lim <- max(pretty(table( sjmisc::group_var( var.cnt, size = "auto", n = hist.grp.cnt, append = FALSE ) ) * 1.1)) } else { if (show.ci) upper_lim <- max(pretty(mydat$upper.ci * 1.1)) else upper_lim <- max(pretty(table(var.cnt) * 1.1)) } } # If we want to include NA, use raw percentages as valid percentages if (show.na) mydat$valid.prc <- mydat$raw.prc # don't display value labels when we have boxplots or violin plots if (type == "boxplot" || type == "violin") show.values <- FALSE if (show.values) { # here we have counts and percentages if (show.prc && show.n) { if (coord.flip) { ggvaluelabels <- geom_text( label = sprintf("%i (%.01f%%)", mydat$frq, mydat$valid.prc), hjust = hjust, vjust = vjust, aes(y = .data$label.pos + y_offset) ) } else { ggvaluelabels <- geom_text( label = sprintf("%i\n(%.01f%%)", mydat$frq, mydat$valid.prc), hjust = hjust, vjust = vjust, aes(y = .data$label.pos + y_offset) ) } } else if (show.n) { # here we have counts, without percentages ggvaluelabels <- geom_text( label = sprintf("%i", mydat$frq), hjust = hjust, vjust = vjust, aes(y = .data$label.pos + y_offset) ) } else if (show.prc) { # here we have counts, without percentages ggvaluelabels <- geom_text( label = sprintf("%.01f%%", mydat$valid.prc), hjust = hjust, vjust = vjust, aes(y = .data$label.pos + y_offset) ) } else { # no labels ggvaluelabels <- geom_text(aes(y = .data$frq), label = "") } } else { # no labels ggvaluelabels <- geom_text(aes(y = .data$frq), label = "") } # Set up grid breaks maxx <- max(mydat$val) + 1 if (is.null(grid.breaks)) { gridbreaks <- waiver() histgridbreaks <- waiver() } else { gridbreaks <- c(seq(lower_lim, upper_lim, by = grid.breaks)) histgridbreaks <- c(seq(lower_lim, maxx, by = grid.breaks)) } # set Y-axis, depending on the calculated upper y-range. # It either corresponds to the maximum amount of cases in the data set # (length of var) or to the highest count of var's categories. if (show.axis.values) { yscale <- scale_y_continuous( limits = c(lower_lim, upper_lim), expand = expand.grid, breaks = gridbreaks ) } else { yscale <- scale_y_continuous( limits = c(lower_lim, upper_lim), expand = expand.grid, breaks = gridbreaks, labels = NULL ) } # bar and dot plot start here! ----- if (type == "bar" || type == "dot") { # define geom if (type == "bar") { geob <- geom_bar(stat = "identity", width = geom.size, fill = geom.colors) } else if (type == "dot") { geob <- geom_point(size = geom.size, colour = geom.colors) } # mydat is a data frame that only contains one variable (var). # Must be declared as factor, so the bars are central aligned to # each x-axis-break. baseplot <- ggplot(mydat, aes(x = factor(.data$val), y = .data$frq)) + geob + yscale + # remove guide / legend guides(fill = FALSE) + # show absolute and percentage value of each bar. ggvaluelabels + # print value labels to the x-axis. # If argument "axis.labels" is NULL, the category numbers (1 to ...) # appear on the x-axis scale_x_discrete(labels = axis.labels) # add error bars if (show.ci) { ebcol <- ifelse(type == "dot", geom.colors, errorbar.color) # print confidence intervalls (error bars) baseplot <- baseplot + geom_errorbar(aes_string(ymin = "lower.ci", ymax = "upper.ci"), colour = ebcol, width = 0) } # check whether coordinates should be flipped, i.e. # swap x and y axis if (coord.flip) baseplot <- baseplot + coord_flip() # Start box plot here ----- } else if (type == "boxplot" || type == "violin") { # setup base plot baseplot <- ggplot(mydat, aes_string(x = "grp", y = "frq")) # and x-axis scalex <- scale_x_discrete(labels = "") if (type == "boxplot") { baseplot <- baseplot + geom_boxplot(width = geom.size, fill = geom.colors, notch = show.ci) } else { baseplot <- baseplot + geom_violin(trim = trimViolin, width = geom.size, fill = geom.colors) # if we have a violin plot, add an additional boxplot inside to show # more information if (show.ci) { baseplot <- baseplot + geom_boxplot(width = inner.box.width, fill = "white", notch = TRUE) } else { baseplot <- baseplot + geom_boxplot(width = inner.box.width, fill = "white") } } # if we have boxplots or violon plots, also add a point that indicates # the mean value # different fill colours, because violin boxplots have white background fcsp <- ifelse(type == "boxplot", "white", "black") baseplot <- baseplot + stat_summary(fun.y = "mean", geom = "point", shape = 21, size = inner.box.dotsize, fill = fcsp) # no additional labels for the x- and y-axis, only diagram title baseplot <- baseplot + yscale + scalex # Start density plot here ----- } else if (type == "density") { # First, plot histogram with density curve baseplot <- ggplot(hist.dat, aes(x = .data$xv)) + geom_histogram(aes(y = stat(density)), binwidth = geom.size, fill = geom.colors) + # transparent density curve above bars geom_density(aes(y = stat(density)), fill = "cornsilk", alpha = 0.3) + # remove margins from left and right diagram side scale_x_continuous(expand = expand.grid, breaks = histgridbreaks, limits = xlim) # check whether user wants to overlay the histogram # with a normal curve if (normal.curve) { baseplot <- baseplot + stat_function( fun = dnorm, args = list( mean = mean(hist.dat$xv), sd = stats::sd(hist.dat$xv) ), colour = normal.curve.color, size = normal.curve.size, alpha = normal.curve.alpha ) } } else { # Since the density curve shows no absolute numbers (counts) on the # y-axis, have also the opportunity to plot "real" histrograms with # counts on the y-axis if (type == "histogram") { # original data needed for normal curve baseplot <- ggplot(mydat) + # second data frame mapped to the histogram geom geom_histogram(data = hist.dat, aes(x = .data$xv), binwidth = geom.size, fill = geom.colors) } else { baseplot <- ggplot(mydat, aes(x = .data$val, y = .data$frq)) + geom_area(alpha = 0.3) + geom_line(size = geom.size, colour = geom.colors) + ggvaluelabels } # check whether user wants to overlay the histogram # with a normal curve if (normal.curve) { baseplot <- baseplot + stat_function( fun = function(xx, mean, sd, n) { n * stats::dnorm(x = xx, mean = mean, sd = sd) }, args = with(mydat, c( mean = mittelwert, sd = stddev, n = length(var.cnt) )), colour = normal.curve.color, size = normal.curve.size, alpha = normal.curve.alpha ) } # if we have a histogram, add mean-lines if (show.mean) { baseplot <- baseplot + # vertical lines indicating the mean geom_vline(xintercept = mittelwert, linetype = mean.line.type, size = mean.line.size) # check whether meanvalue should be shown. if (show.mean.val) { baseplot <- baseplot + # use annotation instead of geomtext, because we need mean value only printed once annotate( "text", x = mittelwert, y = upper_lim, parse = TRUE, label = paste( "italic(bar(x)) == ", round(mittelwert, 1), "~~italic(s) == ", round(stddev, 1) ), vjust = "top", hjust = "top" ) } # check whether the user wants to plot standard deviation area if (show.sd) { baseplot <- baseplot + # first draw shaded rectangle. these are by default in grey colour with very high transparancy annotate("rect", xmin = mittelwert - stddev, xmax = mittelwert + stddev, ymin = 0, ymax = c(upper_lim), fill = "grey70", alpha = 0.2) + # draw border-lines for shaded rectangle geom_vline(xintercept = mittelwert - stddev, linetype = 3, size = mean.line.size, alpha = 0.7) + geom_vline(xintercept = mittelwert + stddev, linetype = 3, size = mean.line.size, alpha = 0.7) } } # show absolute and percentage value of each bar. baseplot <- baseplot + yscale + # continuous x-scale for histograms scale_x_continuous(limits = xlim, expand = expand.grid, breaks = histgridbreaks) } # set axes text and baseplot <- baseplot + labs(title = title, x = axis.title, y = NULL) # Check whether ggplot object should be returned or plotted baseplot }
/R/sjPlotFrequencies.R
no_license
Mishkail/sjPlot
R
false
false
22,134
r
utils::globalVariables("density") #' @title Plot frequencies of variables #' @name sjp.frq #' #' @description Plot frequencies of a variable as bar graph, histogram, #' box plot etc. #' #' @note This function only works with variables with integer values (or numeric #' factor levels), i.e. scales / centred variables #' with decimals may result in unexpected behaviour. #' #' @param sort.frq Determines whether categories should be sorted #' according to their frequencies or not. Default is \code{"none"}, so #' categories are not sorted by frequency. Use \code{"asc"} or #' \code{"desc"} for sorting categories ascending or descending order. #' @param geom.colors User defined color for geoms, e.g. \code{geom.colors = "#0080ff"}. #' @param errorbar.color Color of confidence interval bars (error bars). #' Only applies to \code{type = "bar"}. In case of dot plots, error bars #' will have same colors as dots (see \code{geom.colors}). #' @param show.mean Logical, if \code{TRUE}, a vertical line in histograms #' is drawn to indicate the mean value of the variables. Only #' applies to histogram-charts. #' @param show.mean.val Logical, if \code{TRUE} (default), the mean value #' is printed to the vertical line that indicates the variable's #' mean. Only applies to histogram-charts. #' @param show.sd Logical, if \code{TRUE}, the standard deviation #' is annotated as shaded rectangle around the mean intercept #' line. Only applies to histogram-charts. #' @param mean.line.type Numeric value, indicating the linetype of the mean #' intercept line. Only applies to histogram-charts and #' when \code{show.mean = TRUE}. #' @param mean.line.size Numeric, size of the mean intercept line. Only #' applies to histogram-charts and when \code{show.mean = TRUE}. #' @param normal.curve Logical, if \code{TRUE}, a normal curve, which is adjusted to the data, #' is plotted over the histogram or density plot. Default is #' \code{FALSE}. Only applies when histograms or density plots are plotted (see \code{type}). #' @param normal.curve.color Color of the normal curve line. Only #' applies if \code{normal.curve = TRUE}. #' @param normal.curve.size Numeric, size of the normal curve line. Only #' applies if \code{normal.curve = TRUE}. #' @param normal.curve.alpha Transparancy level (alpha value) of the normal curve. Only #' applies if \code{normal.curve = TRUE}. #' @param xlim Numeric vector of length two, defining lower and upper axis limits #' of the x scale. By default, this argument is set to \code{NULL}, i.e. the #' x-axis fits to the required range of the data. #' @param axis.title Character vector of length one or two (depending on #' the plot function and type), used as title(s) for the x and y axis. #' If not specified, a default labelling is chosen. #' \strong{Note:} Some plot types do not support this argument. In such #' cases, use the return value and add axis titles manually with #' \code{\link[ggplot2]{labs}}, e.g.: \code{$plot.list[[1]] + labs(x = ...)} #' #' @inheritParams sjp.grpfrq #' #' @return A ggplot-object. #' #' @examples #' library(sjlabelled) #' data(efc) #' #' # boxplot #' sjp.frq(efc$e17age, type = "box") #' #' # histogram #' sjp.frq(efc$e17age, type = "hist", show.mean = TRUE) #' #' # violin plot #' sjp.frq(efc$e17age, type = "v") #' #' # bar plot #' sjp.frq(efc$e42dep) #' #' library(sjmisc) #' # grouped variable #' ageGrp <- group_var(efc$e17age) #' ageGrpLab <- group_labels(efc$e17age) #' sjp.frq(ageGrp, title = get_label(efc$e17age), axis.labels = ageGrpLab) #' #' # plotting confidence intervals. expand grid and v/hjust for text labels #' sjp.frq( #' efc$e15relat, type = "dot", show.ci = TRUE, sort.frq = "desc", #' coord.flip = TRUE, expand.grid = TRUE, vjust = "bottom", hjust = "left" #' ) #' #' # Simulate ggplot-default histogram #' sjp.frq(efc$c160age, type = "h", geom.size = 3) #' #' # histogram with overlayed normal curve #' sjp.frq(efc$c160age, type = "h", show.mean = TRUE, show.mean.val = TRUE, #' normal.curve = TRUE, show.sd = TRUE, normal.curve.color = "blue", #' normal.curve.size = 3, ylim = c(0,50)) #' #' @import ggplot2 #' @importFrom sjstats wtd_sd #' @importFrom sjmisc group_labels group_var to_value #' @importFrom sjlabelled set_labels #' @importFrom stats na.omit sd weighted.mean dnorm #' @importFrom rlang .data #' @export sjp.frq <- function(var.cnt, title = "", weight.by = NULL, title.wtd.suffix = NULL, sort.frq = c("none", "asc", "desc"), type = c("bar", "dot", "histogram", "line", "density", "boxplot", "violin"), geom.size = NULL, geom.colors = "#336699", errorbar.color = "darkred", axis.title = NULL, axis.labels = NULL, xlim = NULL, ylim = NULL, wrap.title = 50, wrap.labels = 20, grid.breaks = NULL, expand.grid = FALSE, show.values = TRUE, show.n = TRUE, show.prc = TRUE, show.axis.values = TRUE, show.ci = FALSE, show.na = FALSE, show.mean = FALSE, show.mean.val = TRUE, show.sd = TRUE, mean.line.type = 2, mean.line.size = 0.5, inner.box.width = 0.15, inner.box.dotsize = 3, normal.curve = FALSE, normal.curve.color = "red", normal.curve.size = 0.8, normal.curve.alpha = 0.4, auto.group = NULL, coord.flip = FALSE, vjust = "bottom", hjust = "center", y.offset = NULL) { # get variable name, used as default label if variable # has no label attributes var.name <- get_var_name(deparse(substitute(var.cnt))) # try to find some useful default offsets for textlabels, # depending on plot range and flipped coordinates if (is.null(y.offset)) { # get maximum y-pos y.offset <- ceiling(max(table(var.cnt)) / 100) if (coord.flip) { if (missing(vjust)) vjust <- "center" if (missing(hjust)) hjust <- "bottom" if (hjust == "bottom") y_offset <- y.offset else if (hjust == "top") y_offset <- -y.offset else y_offset <- 0 } else { if (vjust == "bottom") y_offset <- y.offset else if (vjust == "top") y_offset <- -y.offset else y_offset <- 0 } } else { y_offset <- y.offset } # try to automatically set labels, if not passed as argument ----- # to make plot annotations more beautiful, supporting labelled data if (is.null(axis.labels)) { axis.labels <- sjlabelled::get_labels( var.cnt, attr.only = is.character(var.cnt), values = NULL, non.labelled = TRUE ) } if (is.null(axis.title)) axis.title <- sjlabelled::get_label(var.cnt, def.value = var.name) if (is.null(title)) title <- sjlabelled::get_label(var.cnt, def.value = var.name) # remove titles if empty if (!is.null(axis.title) && axis.title == "") axis.title <- NULL if (!is.null(title) && title == "") title <- NULL # check color argument if (length(geom.colors) > 1) geom.colors <- geom.colors[1] # Match arguments ----- type <- match.arg(type) sort.frq <- match.arg(sort.frq) # default grid-expansion if (isTRUE(expand.grid) || (missing(expand.grid) && type == "histogram")) { expand.grid <- waiver() } else { expand.grid <- c(0, 0) } # for histograms or density plots... xv <- sjmisc::to_value(stats::na.omit(var.cnt)) # check for nice bin-width defaults if (type %in% c("histogram", "density") && !is.null(geom.size) && geom.size < round(diff(range(xv)) / 40)) message("Using very small binwidth. Consider adjusting `geom.size` argument.") # create second data frame hist.dat <- data.frame(xv) # check default geom.size ----- if (is.null(geom.size)) { geom.size <- dplyr::case_when( type == "bar" ~ .7, type == "dot" ~ 2.5, type == "density" ~ ceiling(diff(range(xv)) / 40), type == "histogram" ~ ceiling(diff(range(xv)) / 40), type == "line" ~ .8, type == "boxplot" ~ .3, type == "violin" ~ .3, TRUE ~ .7 ) } # check whether variable should be auto-grouped ----- if (!is.null(auto.group) && length(unique(var.cnt)) >= auto.group) { message(sprintf( "`%s` has %i unique values and was grouped...", var.name, length(unique(var.cnt)) )) # group axis labels axis.labels <- sjmisc::group_labels( sjmisc::to_value(var.cnt, keep.labels = F), size = "auto", n = auto.group ) # group variable var.cnt <- sjmisc::group_var( sjmisc::to_value(var.cnt, keep.labels = F), size = "auto", as.num = TRUE, n = auto.group, append = FALSE ) # set label attributes var.cnt <- sjlabelled::set_labels(var.cnt, labels = axis.labels) } # create frequency data frame ----- df.frq <- create.frq.df( var.cnt, wrap.labels = wrap.labels, order.frq = sort.frq, round.prz = 2, na.rm = !show.na, weight.by = weight.by ) mydat <- df.frq$mydat # any labels detected? if (!is.null(df.frq$labels) && is.null(axis.labels)) axis.labels <- df.frq$labels else if (!is.null(axis.labels) && sort.frq != "none") # sort labels in required order axis.labels <- axis.labels[mydat$order] # define text label position if (show.ci) mydat$label.pos <- mydat$upper.ci else mydat$label.pos <- mydat$frq # Trim labels and title to appropriate size ----- # check length of diagram title and split longer string into new lines # every 50 chars if (!is.null(title)) { # if we have weighted values, say that in diagram's title if (!is.null(title.wtd.suffix)) title <- paste(title, title.wtd.suffix, sep = "") title <- sjmisc::word_wrap(title, wrap.title) } # check length of x-axis title and split longer string into new lines # every 50 chars if (!is.null(axis.title)) axis.title <- sjmisc::word_wrap(axis.title, wrap.title) # count variable may not be a factor! if (is.factor(var.cnt) || is.character(var.cnt)) { var.cnt <- sjmisc::to_value(var.cnt, keep.labels = F) } # If we have a histogram, caluclate means of groups if (is.null(weight.by)) { mittelwert <- mean(var.cnt, na.rm = TRUE) stddev <- stats::sd(var.cnt, na.rm = TRUE) } else { mittelwert <- stats::weighted.mean(var.cnt, weight.by, na.rm = TRUE) stddev <- sjstats::wtd_sd(var.cnt, weights = weight.by) } # If we have boxplots, use different data frame structure if (type == "boxplot" || type == "violin") { mydat <- stats::na.omit(data.frame(cbind( grp = 1, frq = var.cnt, val = var.cnt ))) mydat$grp <- as.factor(mydat$grp) } # Prepare bar charts trimViolin <- FALSE lower_lim <- 0 # calculate upper y-axis-range # if we have a fixed value, use this one here if (!is.null(ylim) && length(ylim) == 2) { lower_lim <- ylim[1] upper_lim <- ylim[2] } else { # if we have boxplots, we have different ranges, so we can adjust # the y axis if (type == "boxplot" || type == "violin") { # use an extra standard-deviation as limits for the y-axis when we have boxplots lower_lim <- min(var.cnt, na.rm = TRUE) - floor(stats::sd(var.cnt, na.rm = TRUE)) upper_lim <- max(var.cnt, na.rm = TRUE) + ceiling(stats::sd(var.cnt, na.rm = TRUE)) # make sure that the y-axis is not below zero if (lower_lim < 0) { lower_lim <- 0 trimViolin <- TRUE } } else if (type == "histogram") { # what is the maximum values after binning for histograms? hist.grp.cnt <- ceiling(diff(range(var.cnt, na.rm = T)) / geom.size) # ... or the amount of max. answers per category # add 10% margin to upper limit upper_lim <- max(pretty(table( sjmisc::group_var( var.cnt, size = "auto", n = hist.grp.cnt, append = FALSE ) ) * 1.1)) } else { if (show.ci) upper_lim <- max(pretty(mydat$upper.ci * 1.1)) else upper_lim <- max(pretty(table(var.cnt) * 1.1)) } } # If we want to include NA, use raw percentages as valid percentages if (show.na) mydat$valid.prc <- mydat$raw.prc # don't display value labels when we have boxplots or violin plots if (type == "boxplot" || type == "violin") show.values <- FALSE if (show.values) { # here we have counts and percentages if (show.prc && show.n) { if (coord.flip) { ggvaluelabels <- geom_text( label = sprintf("%i (%.01f%%)", mydat$frq, mydat$valid.prc), hjust = hjust, vjust = vjust, aes(y = .data$label.pos + y_offset) ) } else { ggvaluelabels <- geom_text( label = sprintf("%i\n(%.01f%%)", mydat$frq, mydat$valid.prc), hjust = hjust, vjust = vjust, aes(y = .data$label.pos + y_offset) ) } } else if (show.n) { # here we have counts, without percentages ggvaluelabels <- geom_text( label = sprintf("%i", mydat$frq), hjust = hjust, vjust = vjust, aes(y = .data$label.pos + y_offset) ) } else if (show.prc) { # here we have counts, without percentages ggvaluelabels <- geom_text( label = sprintf("%.01f%%", mydat$valid.prc), hjust = hjust, vjust = vjust, aes(y = .data$label.pos + y_offset) ) } else { # no labels ggvaluelabels <- geom_text(aes(y = .data$frq), label = "") } } else { # no labels ggvaluelabels <- geom_text(aes(y = .data$frq), label = "") } # Set up grid breaks maxx <- max(mydat$val) + 1 if (is.null(grid.breaks)) { gridbreaks <- waiver() histgridbreaks <- waiver() } else { gridbreaks <- c(seq(lower_lim, upper_lim, by = grid.breaks)) histgridbreaks <- c(seq(lower_lim, maxx, by = grid.breaks)) } # set Y-axis, depending on the calculated upper y-range. # It either corresponds to the maximum amount of cases in the data set # (length of var) or to the highest count of var's categories. if (show.axis.values) { yscale <- scale_y_continuous( limits = c(lower_lim, upper_lim), expand = expand.grid, breaks = gridbreaks ) } else { yscale <- scale_y_continuous( limits = c(lower_lim, upper_lim), expand = expand.grid, breaks = gridbreaks, labels = NULL ) } # bar and dot plot start here! ----- if (type == "bar" || type == "dot") { # define geom if (type == "bar") { geob <- geom_bar(stat = "identity", width = geom.size, fill = geom.colors) } else if (type == "dot") { geob <- geom_point(size = geom.size, colour = geom.colors) } # mydat is a data frame that only contains one variable (var). # Must be declared as factor, so the bars are central aligned to # each x-axis-break. baseplot <- ggplot(mydat, aes(x = factor(.data$val), y = .data$frq)) + geob + yscale + # remove guide / legend guides(fill = FALSE) + # show absolute and percentage value of each bar. ggvaluelabels + # print value labels to the x-axis. # If argument "axis.labels" is NULL, the category numbers (1 to ...) # appear on the x-axis scale_x_discrete(labels = axis.labels) # add error bars if (show.ci) { ebcol <- ifelse(type == "dot", geom.colors, errorbar.color) # print confidence intervalls (error bars) baseplot <- baseplot + geom_errorbar(aes_string(ymin = "lower.ci", ymax = "upper.ci"), colour = ebcol, width = 0) } # check whether coordinates should be flipped, i.e. # swap x and y axis if (coord.flip) baseplot <- baseplot + coord_flip() # Start box plot here ----- } else if (type == "boxplot" || type == "violin") { # setup base plot baseplot <- ggplot(mydat, aes_string(x = "grp", y = "frq")) # and x-axis scalex <- scale_x_discrete(labels = "") if (type == "boxplot") { baseplot <- baseplot + geom_boxplot(width = geom.size, fill = geom.colors, notch = show.ci) } else { baseplot <- baseplot + geom_violin(trim = trimViolin, width = geom.size, fill = geom.colors) # if we have a violin plot, add an additional boxplot inside to show # more information if (show.ci) { baseplot <- baseplot + geom_boxplot(width = inner.box.width, fill = "white", notch = TRUE) } else { baseplot <- baseplot + geom_boxplot(width = inner.box.width, fill = "white") } } # if we have boxplots or violon plots, also add a point that indicates # the mean value # different fill colours, because violin boxplots have white background fcsp <- ifelse(type == "boxplot", "white", "black") baseplot <- baseplot + stat_summary(fun.y = "mean", geom = "point", shape = 21, size = inner.box.dotsize, fill = fcsp) # no additional labels for the x- and y-axis, only diagram title baseplot <- baseplot + yscale + scalex # Start density plot here ----- } else if (type == "density") { # First, plot histogram with density curve baseplot <- ggplot(hist.dat, aes(x = .data$xv)) + geom_histogram(aes(y = stat(density)), binwidth = geom.size, fill = geom.colors) + # transparent density curve above bars geom_density(aes(y = stat(density)), fill = "cornsilk", alpha = 0.3) + # remove margins from left and right diagram side scale_x_continuous(expand = expand.grid, breaks = histgridbreaks, limits = xlim) # check whether user wants to overlay the histogram # with a normal curve if (normal.curve) { baseplot <- baseplot + stat_function( fun = dnorm, args = list( mean = mean(hist.dat$xv), sd = stats::sd(hist.dat$xv) ), colour = normal.curve.color, size = normal.curve.size, alpha = normal.curve.alpha ) } } else { # Since the density curve shows no absolute numbers (counts) on the # y-axis, have also the opportunity to plot "real" histrograms with # counts on the y-axis if (type == "histogram") { # original data needed for normal curve baseplot <- ggplot(mydat) + # second data frame mapped to the histogram geom geom_histogram(data = hist.dat, aes(x = .data$xv), binwidth = geom.size, fill = geom.colors) } else { baseplot <- ggplot(mydat, aes(x = .data$val, y = .data$frq)) + geom_area(alpha = 0.3) + geom_line(size = geom.size, colour = geom.colors) + ggvaluelabels } # check whether user wants to overlay the histogram # with a normal curve if (normal.curve) { baseplot <- baseplot + stat_function( fun = function(xx, mean, sd, n) { n * stats::dnorm(x = xx, mean = mean, sd = sd) }, args = with(mydat, c( mean = mittelwert, sd = stddev, n = length(var.cnt) )), colour = normal.curve.color, size = normal.curve.size, alpha = normal.curve.alpha ) } # if we have a histogram, add mean-lines if (show.mean) { baseplot <- baseplot + # vertical lines indicating the mean geom_vline(xintercept = mittelwert, linetype = mean.line.type, size = mean.line.size) # check whether meanvalue should be shown. if (show.mean.val) { baseplot <- baseplot + # use annotation instead of geomtext, because we need mean value only printed once annotate( "text", x = mittelwert, y = upper_lim, parse = TRUE, label = paste( "italic(bar(x)) == ", round(mittelwert, 1), "~~italic(s) == ", round(stddev, 1) ), vjust = "top", hjust = "top" ) } # check whether the user wants to plot standard deviation area if (show.sd) { baseplot <- baseplot + # first draw shaded rectangle. these are by default in grey colour with very high transparancy annotate("rect", xmin = mittelwert - stddev, xmax = mittelwert + stddev, ymin = 0, ymax = c(upper_lim), fill = "grey70", alpha = 0.2) + # draw border-lines for shaded rectangle geom_vline(xintercept = mittelwert - stddev, linetype = 3, size = mean.line.size, alpha = 0.7) + geom_vline(xintercept = mittelwert + stddev, linetype = 3, size = mean.line.size, alpha = 0.7) } } # show absolute and percentage value of each bar. baseplot <- baseplot + yscale + # continuous x-scale for histograms scale_x_continuous(limits = xlim, expand = expand.grid, breaks = histgridbreaks) } # set axes text and baseplot <- baseplot + labs(title = title, x = axis.title, y = NULL) # Check whether ggplot object should be returned or plotted baseplot }
#' Violin RSV Distribution Plot #' #' Violin plot for displaying the number of RSVs per a sample variable #' #' @import ggplot2 #' @import phyloseq #' @export #' @return A \code{\link[ggplot2]{ggplot}} object. #' @examples #' require(phyloseq) #' data("soilrep") #' head( sample_data(soilrep) ) #' plot_violin(physeq = soilrep, x = "warmed") #' #' plot_violin(physeq, x) plot_violin <- function(physeq, x, y, title = NULL){ ss <- sample_sums(physeq) # sample sums sd <- as.data.frame(sample_data(physeq)) # sample data df <- merge(sd, data.frame("RSVs" = ss), by = "row.names") # merge #Violin + Jitter plots p <- ggplot(df, aes_string(x, y="RSVs", fill = x)) + geom_violin() + scale_y_log10() + geom_hline(yintercept = y, lty = 2) + geom_jitter(alpha=0.5, width = 0.15) + geom_text(data = subset(df, RSVs <= y), aes_string(x,y="RSVs", label="Description"), size=2) if (!is.null(title)) { p <- p + ggtitle(title) } return(p) }
/scripts/plot_violin.R
no_license
bhykes1/16S_analysis_framework
R
false
false
969
r
#' Violin RSV Distribution Plot #' #' Violin plot for displaying the number of RSVs per a sample variable #' #' @import ggplot2 #' @import phyloseq #' @export #' @return A \code{\link[ggplot2]{ggplot}} object. #' @examples #' require(phyloseq) #' data("soilrep") #' head( sample_data(soilrep) ) #' plot_violin(physeq = soilrep, x = "warmed") #' #' plot_violin(physeq, x) plot_violin <- function(physeq, x, y, title = NULL){ ss <- sample_sums(physeq) # sample sums sd <- as.data.frame(sample_data(physeq)) # sample data df <- merge(sd, data.frame("RSVs" = ss), by = "row.names") # merge #Violin + Jitter plots p <- ggplot(df, aes_string(x, y="RSVs", fill = x)) + geom_violin() + scale_y_log10() + geom_hline(yintercept = y, lty = 2) + geom_jitter(alpha=0.5, width = 0.15) + geom_text(data = subset(df, RSVs <= y), aes_string(x,y="RSVs", label="Description"), size=2) if (!is.null(title)) { p <- p + ggtitle(title) } return(p) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-zzz.R \name{find_reporter} \alias{find_reporter} \title{Find reporter object given name or object.} \usage{ find_reporter(reporter) } \arguments{ \item{reporter}{name of reporter(s), or reporter object(s)} } \description{ If not found, will return informative error message. Pass a character vector to create a \code{\link{MultiReporter}} composed of individual reporters. Will return null if given NULL. } \keyword{internal}
/man/find_reporter.Rd
no_license
LluisRamon/testthat
R
false
true
514
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-zzz.R \name{find_reporter} \alias{find_reporter} \title{Find reporter object given name or object.} \usage{ find_reporter(reporter) } \arguments{ \item{reporter}{name of reporter(s), or reporter object(s)} } \description{ If not found, will return informative error message. Pass a character vector to create a \code{\link{MultiReporter}} composed of individual reporters. Will return null if given NULL. } \keyword{internal}
library(tidyverse) library(lubridate) fileUrl<-'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip' #reading the data temp <- tempfile() download.file(fileUrl,temp) filList <- unzip(temp) data <- read.csv('./household_power_consumption.txt', header=TRUE, sep = ";", na.strings="?") #subsetting data3<-data %>% mutate(Date=dmy(Date), Time=hms(Time)) %>% filter(Date == ymd('2007-02-01') | Date == ymd('2007-02-02')) %>% arrange(Date, Time) %>% mutate(date_time=ymd_hms(paste(Date, Time))) ind<-wday(data3$Date, label=TRUE)==c('Thu','Fri','Sat') dev.set() png(filename = "./plot2.png", width = 480, height = 480, units = "px", pointsize = 12, bg = "white") plot(data3$date_time[ind], as.numeric(data3$Global_active_power[ind]),type="l",ylab='Global Active Power (kilowatts)', xlab='') dev.off()
/plot2.R
no_license
ElianaGC/ExData_Plotting1-1
R
false
false
865
r
library(tidyverse) library(lubridate) fileUrl<-'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip' #reading the data temp <- tempfile() download.file(fileUrl,temp) filList <- unzip(temp) data <- read.csv('./household_power_consumption.txt', header=TRUE, sep = ";", na.strings="?") #subsetting data3<-data %>% mutate(Date=dmy(Date), Time=hms(Time)) %>% filter(Date == ymd('2007-02-01') | Date == ymd('2007-02-02')) %>% arrange(Date, Time) %>% mutate(date_time=ymd_hms(paste(Date, Time))) ind<-wday(data3$Date, label=TRUE)==c('Thu','Fri','Sat') dev.set() png(filename = "./plot2.png", width = 480, height = 480, units = "px", pointsize = 12, bg = "white") plot(data3$date_time[ind], as.numeric(data3$Global_active_power[ind]),type="l",ylab='Global Active Power (kilowatts)', xlab='') dev.off()
# Random Forest Classification # Importing the dataset dataset = read.csv('LogisticRegression.csv') dataset = dataset[3:5] # Encoding the target feature as factor dataset$Purchased = factor(dataset$Purchased, levels = c(0, 1)) # Splitting the dataset into the Training set and Test set # install.packages('caTools') library(caTools) set.seed(123) split = sample.split(dataset$Purchased, SplitRatio = 0.75) training_set = subset(dataset, split == TRUE) test_set = subset(dataset, split == FALSE) # Feature Scaling training_set[-3] = scale(training_set[-3]) test_set[-3] = scale(test_set[-3]) # Fitting Random Forest Classification to the Training set # install.packages('randomForest') library(randomForest) set.seed(123) classifier = randomForest(x = training_set[-3], y = training_set$Purchased, ntree = 500) # Predicting the Test set results y_pred = predict(classifier, newdata = test_set[-3]) # Making the Confusion Matrix cm = table(test_set[, 3], y_pred) # Visualising the Training set results library(ElemStatLearn) set = training_set X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01) X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01) grid_set = expand.grid(X1, X2) colnames(grid_set) = c('Age', 'EstimatedSalary') y_grid = predict(classifier, grid_set) plot(set[, -3], main = 'Random Forest Classification (Training set)', xlab = 'Age', ylab = 'Estimated Salary', xlim = range(X1), ylim = range(X2)) contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE) points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato')) points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3')) # Visualising the Test set results library(ElemStatLearn) set = test_set X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01) X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01) grid_set = expand.grid(X1, X2) colnames(grid_set) = c('Age', 'EstimatedSalary') y_grid = predict(classifier, grid_set) plot(set[, -3], main = 'Random Forest Classification (Test set)', xlab = 'Age', ylab = 'Estimated Salary', xlim = range(X1), ylim = range(X2)) contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE) points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato')) points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3')) # Choosing the number of trees plot(classifier)
/random_forest_classification.R
no_license
karansharma357/Data-Science
R
false
false
2,467
r
# Random Forest Classification # Importing the dataset dataset = read.csv('LogisticRegression.csv') dataset = dataset[3:5] # Encoding the target feature as factor dataset$Purchased = factor(dataset$Purchased, levels = c(0, 1)) # Splitting the dataset into the Training set and Test set # install.packages('caTools') library(caTools) set.seed(123) split = sample.split(dataset$Purchased, SplitRatio = 0.75) training_set = subset(dataset, split == TRUE) test_set = subset(dataset, split == FALSE) # Feature Scaling training_set[-3] = scale(training_set[-3]) test_set[-3] = scale(test_set[-3]) # Fitting Random Forest Classification to the Training set # install.packages('randomForest') library(randomForest) set.seed(123) classifier = randomForest(x = training_set[-3], y = training_set$Purchased, ntree = 500) # Predicting the Test set results y_pred = predict(classifier, newdata = test_set[-3]) # Making the Confusion Matrix cm = table(test_set[, 3], y_pred) # Visualising the Training set results library(ElemStatLearn) set = training_set X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01) X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01) grid_set = expand.grid(X1, X2) colnames(grid_set) = c('Age', 'EstimatedSalary') y_grid = predict(classifier, grid_set) plot(set[, -3], main = 'Random Forest Classification (Training set)', xlab = 'Age', ylab = 'Estimated Salary', xlim = range(X1), ylim = range(X2)) contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE) points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato')) points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3')) # Visualising the Test set results library(ElemStatLearn) set = test_set X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01) X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01) grid_set = expand.grid(X1, X2) colnames(grid_set) = c('Age', 'EstimatedSalary') y_grid = predict(classifier, grid_set) plot(set[, -3], main = 'Random Forest Classification (Test set)', xlab = 'Age', ylab = 'Estimated Salary', xlim = range(X1), ylim = range(X2)) contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE) points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato')) points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3')) # Choosing the number of trees plot(classifier)
#' Access files in the current app #' #' @param ... Character vector specifying directory and or file to #' point to inside the current package. #' #' @noRd app_sys <- function(...){ system.file(..., package = "rightwatching") } #' Read App Config #' #' @param value Value to retrieve from the config file. #' @param config R_CONFIG_ACTIVE value. #' @param use_parent Logical, scan the parent directory for config file. #' #' @importFrom config get #' #' @noRd get_golem_config <- function( value, config = Sys.getenv("R_CONFIG_ACTIVE", "default"), use_parent = TRUE ){ config::get( value = value, config = config, # Modify this if your config file is somewhere else: file = app_sys("golem-config.yml"), use_parent = use_parent ) }
/R/app_config.R
permissive
prototypefund/rightwatching-shiny-app
R
false
false
787
r
#' Access files in the current app #' #' @param ... Character vector specifying directory and or file to #' point to inside the current package. #' #' @noRd app_sys <- function(...){ system.file(..., package = "rightwatching") } #' Read App Config #' #' @param value Value to retrieve from the config file. #' @param config R_CONFIG_ACTIVE value. #' @param use_parent Logical, scan the parent directory for config file. #' #' @importFrom config get #' #' @noRd get_golem_config <- function( value, config = Sys.getenv("R_CONFIG_ACTIVE", "default"), use_parent = TRUE ){ config::get( value = value, config = config, # Modify this if your config file is somewhere else: file = app_sys("golem-config.yml"), use_parent = use_parent ) }
library(tibble) library(dplyr) library(sf) ######### Airport Reference Point # Zurich ( from https://skyvector.com/airport/LSZH/Zurich-Airport ) # # Location Information for LSZH # Coordinates: N47°27.48' / E8°32.88' # View all Airports in Zürich, Switzerland. # Elevation is 1417.0 feet MSL. # Magnetic Variation is 2° East ########## Runways # Runway 16/34 # Dimensions: 12139 x 197 feet / 3700 x 60 meters # Surface: Hard # Runway 16 Runway 34 # Coordinates: N47°28.54' / E8°32.16' N47°26.96' / E8°33.25' # Elevation: 1390 1388 # Runway Heading: 154° 334° # Displaced Threshold: 1542 Feet # Runway 14/32 # Dimensions: 10827 x 197 feet / 3300 x 60 meters # Surface: Unknown # Runway 14 Runway 32 # Coordinates: N47°28.93' / E8°32.16' N47°27.68' / E8°33.87' # Elevation: 1402 1402 # Runway Heading: 136° 316° # Displaced Threshold: 492 Feet # Runway 10/28 # Dimensions: 8202 x 197 feet / 2500 x 60 meters # Surface: Unknown # Runway 10 Runway 28 # Coordinates: N47°27.54' / E8°32.25' N47°27.40' / E8°34.23' # Elevation: 1391 1416 # Runway Heading: 095° 275° lszh_apt <- tribble( ~latitude, ~longitude, ~elevation, ~heading, ~icao, ~id, ~type, ~name, "N47°27.48'", "E8°32.88'", 1417, NA, "LSZH", "LSZH", "ARP", "LSZH ARP", "N47°28.54'", "E8°32.16'", 1390, 154, "LSZH", "RWY16/34", "RWY", "Runway 16", "N47°26.96'", "E8°33.25'", 1388, 334, "LSZH", "RWY16/34", "RWY", "Runway 34", "N47°28.93'", "E8°32.16'", 1402, 136, "LSZH", "RWY14/32", "RWY", "Runway 14", "N47°27.68'", "E8°33.87'", 1402, 316, "LSZH", "RWY14/32", "RWY", "Runway 32", "N47°27.54'", "E8°32.25'", 1391, 95, "LSZH", "RWY10/28", "RWY", "Runway 10", "N47°27.40'", "E8°34.23'", 1416, 275, "LSZH", "RWY10/38", "RWY", "Runway 28" ) %>% dplyr::mutate(latitude = trrrj::ddm2dd(latitude), longitude = trrrj::ddm2dd(longitude)) # lon/lat!!!!!!!!!!!!!!! lszh_arp <- lszh_apt %>% dplyr::filter(type == "ARP")
/R/lszh-data.R
no_license
euctrl-pru/reproducible-ans-performance-paper
R
false
false
2,060
r
library(tibble) library(dplyr) library(sf) ######### Airport Reference Point # Zurich ( from https://skyvector.com/airport/LSZH/Zurich-Airport ) # # Location Information for LSZH # Coordinates: N47°27.48' / E8°32.88' # View all Airports in Zürich, Switzerland. # Elevation is 1417.0 feet MSL. # Magnetic Variation is 2° East ########## Runways # Runway 16/34 # Dimensions: 12139 x 197 feet / 3700 x 60 meters # Surface: Hard # Runway 16 Runway 34 # Coordinates: N47°28.54' / E8°32.16' N47°26.96' / E8°33.25' # Elevation: 1390 1388 # Runway Heading: 154° 334° # Displaced Threshold: 1542 Feet # Runway 14/32 # Dimensions: 10827 x 197 feet / 3300 x 60 meters # Surface: Unknown # Runway 14 Runway 32 # Coordinates: N47°28.93' / E8°32.16' N47°27.68' / E8°33.87' # Elevation: 1402 1402 # Runway Heading: 136° 316° # Displaced Threshold: 492 Feet # Runway 10/28 # Dimensions: 8202 x 197 feet / 2500 x 60 meters # Surface: Unknown # Runway 10 Runway 28 # Coordinates: N47°27.54' / E8°32.25' N47°27.40' / E8°34.23' # Elevation: 1391 1416 # Runway Heading: 095° 275° lszh_apt <- tribble( ~latitude, ~longitude, ~elevation, ~heading, ~icao, ~id, ~type, ~name, "N47°27.48'", "E8°32.88'", 1417, NA, "LSZH", "LSZH", "ARP", "LSZH ARP", "N47°28.54'", "E8°32.16'", 1390, 154, "LSZH", "RWY16/34", "RWY", "Runway 16", "N47°26.96'", "E8°33.25'", 1388, 334, "LSZH", "RWY16/34", "RWY", "Runway 34", "N47°28.93'", "E8°32.16'", 1402, 136, "LSZH", "RWY14/32", "RWY", "Runway 14", "N47°27.68'", "E8°33.87'", 1402, 316, "LSZH", "RWY14/32", "RWY", "Runway 32", "N47°27.54'", "E8°32.25'", 1391, 95, "LSZH", "RWY10/28", "RWY", "Runway 10", "N47°27.40'", "E8°34.23'", 1416, 275, "LSZH", "RWY10/38", "RWY", "Runway 28" ) %>% dplyr::mutate(latitude = trrrj::ddm2dd(latitude), longitude = trrrj::ddm2dd(longitude)) # lon/lat!!!!!!!!!!!!!!! lszh_arp <- lszh_apt %>% dplyr::filter(type == "ARP")
# This script takes cleaned and scored SPI data and runs a two-sample t test # to compare T1 vs. Healthy, T2 vs. Healthy, and T1 vs. T2 on each # load libraries library(here) library(tidyverse) library(broom) library(effsize) library(rsample) library(janitor) library(psych) # load data that has been filtered based on exclusion criteria data_filtered = readRDS(here("output/t_tests/data_filtered.RDS")) # Score SPI data ---------------------------------------------------------- source(here("scripts/preprocessing/score_spi.R")) spi_names = readRDS(here("output/spi_names.RDS")) IRT_path = here("data/IRTinfoSPI27.rdata") keys = read.csv(here("data/superKey.csv"), header = TRUE, row.names = 1) data_scored = score(data = data_filtered, keys = keys, IRT_path = IRT_path) # Wrangle data for iteration ------------------------------------------------ # convert to long format and nest data_nested = data_scored %>% select(diabetes, spi_names$spi_5, spi_names$spi_27) %>% # remove demographic vars and raw items gather(-diabetes, key = trait, value = score) %>% # convert to long format group_by(trait) %>% nest() # organize dataframe by group comparison and trait data_nested = expand.grid( comparison = c("t1.v.healthy", "t2.v.healthy", "t1.v.t2"), # create all possible group comparions trait = data_nested$trait, stringsAsFactors = FALSE) %>% left_join(data_nested) %>% # join with nested dataframe # filter nested data frames according to group comparison: mutate(data = case_when(comparison == "t1.v.t2" ~ map(data, ~filter(.x, !diabetes == "healthy")), comparison == "t1.v.healthy" ~ map(data, ~filter(.x, !diabetes == "t2d")), comparison == "t2.v.healthy" ~ map(data, ~filter(.x, !diabetes == "t1d")))) # Iterate t-tests ---------------------------------------------------- # run t-test for each personality trait variable t_test_output = data_nested %>% mutate(t_test = map(data, ~broom::tidy(t.test(score ~ diabetes, data = .))), # iterate t-tests cohens_d = map(data, ~effsize::cohen.d(score ~ diabetes, data = .)) %>% # iterate cohen's d map_dbl("estimate")) %>% # extract just Cohen's d estimate from list output select(-data) %>% unnest() %>% mutate(p.adj = p.adjust(p.value, method = "holm")) %>% # Holm correction for multiple comparisons select(comparison, trait, statistic, p.value, p.adj, cohens_d) # select relevant vars # Bootstrap Cohen's D ---------------------------------------------------- # number of bootstraps #boot.n = 100 boot.n = 10000 #helper function d_boot = function(split){ effsize::cohen.d(score ~ diabetes, data = analysis(split)) } # iterate cohen's d confidence intervals d_confidence = data_nested %>% mutate(boots = map(data, rsample::bootstraps, times = boot.n)) %>% mutate(boots = map(boots, .f = function(x) mutate(x, d = map(splits, d_boot)))) %>% #maps in maps! mutate(boots = map(boots, .f = function(x) mutate(x, d = map_dbl(d, "estimate")))) %>% mutate(boots = map(boots, "d")) %>% unnest(boots) %>% group_by(comparison, trait) %>% dplyr::summarise(d_conf_low = quantile(boots, probs = c(.025)), d_conf_high = quantile(boots, probs = c(.975))) # add to t-test output t_test_output = full_join(t_test_output, d_confidence) # Save t-test output ------------------------------------------------------ saveRDS(t_test_output, file = here("output/t_tests/t_test_output.RDS"))
/scripts/3_run_t_tests.R
permissive
brendanhcullen/personality-diabetes
R
false
false
3,479
r
# This script takes cleaned and scored SPI data and runs a two-sample t test # to compare T1 vs. Healthy, T2 vs. Healthy, and T1 vs. T2 on each # load libraries library(here) library(tidyverse) library(broom) library(effsize) library(rsample) library(janitor) library(psych) # load data that has been filtered based on exclusion criteria data_filtered = readRDS(here("output/t_tests/data_filtered.RDS")) # Score SPI data ---------------------------------------------------------- source(here("scripts/preprocessing/score_spi.R")) spi_names = readRDS(here("output/spi_names.RDS")) IRT_path = here("data/IRTinfoSPI27.rdata") keys = read.csv(here("data/superKey.csv"), header = TRUE, row.names = 1) data_scored = score(data = data_filtered, keys = keys, IRT_path = IRT_path) # Wrangle data for iteration ------------------------------------------------ # convert to long format and nest data_nested = data_scored %>% select(diabetes, spi_names$spi_5, spi_names$spi_27) %>% # remove demographic vars and raw items gather(-diabetes, key = trait, value = score) %>% # convert to long format group_by(trait) %>% nest() # organize dataframe by group comparison and trait data_nested = expand.grid( comparison = c("t1.v.healthy", "t2.v.healthy", "t1.v.t2"), # create all possible group comparions trait = data_nested$trait, stringsAsFactors = FALSE) %>% left_join(data_nested) %>% # join with nested dataframe # filter nested data frames according to group comparison: mutate(data = case_when(comparison == "t1.v.t2" ~ map(data, ~filter(.x, !diabetes == "healthy")), comparison == "t1.v.healthy" ~ map(data, ~filter(.x, !diabetes == "t2d")), comparison == "t2.v.healthy" ~ map(data, ~filter(.x, !diabetes == "t1d")))) # Iterate t-tests ---------------------------------------------------- # run t-test for each personality trait variable t_test_output = data_nested %>% mutate(t_test = map(data, ~broom::tidy(t.test(score ~ diabetes, data = .))), # iterate t-tests cohens_d = map(data, ~effsize::cohen.d(score ~ diabetes, data = .)) %>% # iterate cohen's d map_dbl("estimate")) %>% # extract just Cohen's d estimate from list output select(-data) %>% unnest() %>% mutate(p.adj = p.adjust(p.value, method = "holm")) %>% # Holm correction for multiple comparisons select(comparison, trait, statistic, p.value, p.adj, cohens_d) # select relevant vars # Bootstrap Cohen's D ---------------------------------------------------- # number of bootstraps #boot.n = 100 boot.n = 10000 #helper function d_boot = function(split){ effsize::cohen.d(score ~ diabetes, data = analysis(split)) } # iterate cohen's d confidence intervals d_confidence = data_nested %>% mutate(boots = map(data, rsample::bootstraps, times = boot.n)) %>% mutate(boots = map(boots, .f = function(x) mutate(x, d = map(splits, d_boot)))) %>% #maps in maps! mutate(boots = map(boots, .f = function(x) mutate(x, d = map_dbl(d, "estimate")))) %>% mutate(boots = map(boots, "d")) %>% unnest(boots) %>% group_by(comparison, trait) %>% dplyr::summarise(d_conf_low = quantile(boots, probs = c(.025)), d_conf_high = quantile(boots, probs = c(.975))) # add to t-test output t_test_output = full_join(t_test_output, d_confidence) # Save t-test output ------------------------------------------------------ saveRDS(t_test_output, file = here("output/t_tests/t_test_output.RDS"))
## Copyright (c) 2016 Windlogics, Inc. ## See the DESCRIPTION file for licensing information. ##' @importFrom R6 R6Class ##' @export GroveR <- R6Class( portable = FALSE, private = list( fileRoot = ".", artDefs = list(), memCache = list() ) ) ArtifactDef <- R6Class( portable = FALSE, public = list( initialize = function(deps, create, retrieve, checkTime, store) { deps <<- deps create <<- create retrieve <<- retrieve checkTime <<- checkTime store <<- store }, deps = character(), create = NULL, retrieve = NULL, checkTime = NULL, store = NULL, show = function() { list( deps = deps, create = create, retrieve = retrieve, checkTime = checkTime, store = store ) } ) ) ## Some little helper functions .public <- function(...) GroveR$set("public", ...) .private <- function(...) GroveR$set("private", ...) .noop <- function(...){} ## For R 3.1.x compatibility if (!exists("dir.exists")) { dir.exists <- function(paths) file.exists(paths) & file.info(paths)$isdir } if (!exists("file.mtime")) { file.mtime <- function(...) file.info(..., extra_cols = FALSE)$mtime }
/R/objects.R
no_license
nexteraanalytics/GroveR
R
false
false
1,225
r
## Copyright (c) 2016 Windlogics, Inc. ## See the DESCRIPTION file for licensing information. ##' @importFrom R6 R6Class ##' @export GroveR <- R6Class( portable = FALSE, private = list( fileRoot = ".", artDefs = list(), memCache = list() ) ) ArtifactDef <- R6Class( portable = FALSE, public = list( initialize = function(deps, create, retrieve, checkTime, store) { deps <<- deps create <<- create retrieve <<- retrieve checkTime <<- checkTime store <<- store }, deps = character(), create = NULL, retrieve = NULL, checkTime = NULL, store = NULL, show = function() { list( deps = deps, create = create, retrieve = retrieve, checkTime = checkTime, store = store ) } ) ) ## Some little helper functions .public <- function(...) GroveR$set("public", ...) .private <- function(...) GroveR$set("private", ...) .noop <- function(...){} ## For R 3.1.x compatibility if (!exists("dir.exists")) { dir.exists <- function(paths) file.exists(paths) & file.info(paths)$isdir } if (!exists("file.mtime")) { file.mtime <- function(...) file.info(..., extra_cols = FALSE)$mtime }
% Generated by roxygen2 (4.1.0): do not edit by hand % Please edit documentation in R/games.r \name{summary.game} \alias{summary.game} \title{Summarize a strategic model object} \usage{ \method{summary}{game}(object, useboot = TRUE, ...) } \arguments{ \item{object}{a fitted model of class \code{game}} \item{useboot}{logical: use bootstrap estimates (if present) to construct standard error estimates?} \item{...}{other arguments, currently ignored} } \value{ an object of class \code{summary.game}, containing the coefficient matrix and other information needed for printing } \description{ The default method for summarizing a \code{game} object. } \details{ Forms the standard regression results table from a fitted strategic model. Normally used interactively, in conjunction with \code{\link{print.summary.game}}. } \author{ Brenton Kenkel (\email{brenton.kenkel@gmail.com}) } \seealso{ \code{\link{print.summary.game}} }
/games/man/summary.game.Rd
no_license
brentonk/games
R
false
false
931
rd
% Generated by roxygen2 (4.1.0): do not edit by hand % Please edit documentation in R/games.r \name{summary.game} \alias{summary.game} \title{Summarize a strategic model object} \usage{ \method{summary}{game}(object, useboot = TRUE, ...) } \arguments{ \item{object}{a fitted model of class \code{game}} \item{useboot}{logical: use bootstrap estimates (if present) to construct standard error estimates?} \item{...}{other arguments, currently ignored} } \value{ an object of class \code{summary.game}, containing the coefficient matrix and other information needed for printing } \description{ The default method for summarizing a \code{game} object. } \details{ Forms the standard regression results table from a fitted strategic model. Normally used interactively, in conjunction with \code{\link{print.summary.game}}. } \author{ Brenton Kenkel (\email{brenton.kenkel@gmail.com}) } \seealso{ \code{\link{print.summary.game}} }
#' @import chk lifecycle NULL
/R/namespace.R
permissive
GinKampen/bcspecies
R
false
false
30
r
#' @import chk lifecycle NULL
\name{intcrcd.mae} \alias{intcrcd.mae} \title{ Generates initial connected row-column design } \description{ Generates a random initial connected row-column design for a given number of arrays \code{b} of size \code{k = 2} and the number of treatments \code{v}.} \usage{ intcrcd.mae(trt.N, col.N) } \arguments{ \item{trt.N}{ integer, specifying number of treatments, \code{v}. } \item{col.N}{ integer, specifying number of arrays, \code{b}. } } \value{ Returns a \code{2 x b} connected row-column design with \code{b} arrays of size \code{k = 2} and number of treatments \code{v}. } \references{ Debusho, L. K., Gemechu, D. B., and Haines, L. M. (2016). Algorithmic construction of optimal block designs for two-colour cDNA microarray experiments using the linear mixed model. Under review. Gemechu, D. B., Debusho, L. K., and Haines, L. M. (2014). A-optimal designs for two-colour cDNA microarray experiments using the linear mixed effects model. \emph{Peer-reviewed Proceedings of the Annual Conference of the South African Statistical Association for 2014 (SASA 2014), Rhodes University, Grahamstown, South Africa}. pp 33-40, ISBN: 978-1-86822-659-7. Gemechu, D. B., Debusho, L. K., and Haines, L. M. (2015). A-and D-optional row-column designs for two-colour cDNA microarray experiments using linear mixed effects models. \emph{South African Statistical Journal}, 49, 153-168. } \author{ Legesse Kassa Debusho, Dibaba Bayisa Gemechu, and Linda Haines } \seealso{ \code{\link{optrcdmaeAT}}, \code{\link{cmatrcd.mae}} } \examples{ #Initial connected row-column design for trt.N <- 4 #Number of treatments col.N <- 4 #Number of arrays intcrcd.mae(trt.N = 4, col.N = 4) } \keyword{Initial row-column design} \keyword{Connected row-column design}% __ONLY ONE__ keyword per line
/man/intcrcd.mae.Rd
no_license
cran/optrcdmaeAT
R
false
false
1,848
rd
\name{intcrcd.mae} \alias{intcrcd.mae} \title{ Generates initial connected row-column design } \description{ Generates a random initial connected row-column design for a given number of arrays \code{b} of size \code{k = 2} and the number of treatments \code{v}.} \usage{ intcrcd.mae(trt.N, col.N) } \arguments{ \item{trt.N}{ integer, specifying number of treatments, \code{v}. } \item{col.N}{ integer, specifying number of arrays, \code{b}. } } \value{ Returns a \code{2 x b} connected row-column design with \code{b} arrays of size \code{k = 2} and number of treatments \code{v}. } \references{ Debusho, L. K., Gemechu, D. B., and Haines, L. M. (2016). Algorithmic construction of optimal block designs for two-colour cDNA microarray experiments using the linear mixed model. Under review. Gemechu, D. B., Debusho, L. K., and Haines, L. M. (2014). A-optimal designs for two-colour cDNA microarray experiments using the linear mixed effects model. \emph{Peer-reviewed Proceedings of the Annual Conference of the South African Statistical Association for 2014 (SASA 2014), Rhodes University, Grahamstown, South Africa}. pp 33-40, ISBN: 978-1-86822-659-7. Gemechu, D. B., Debusho, L. K., and Haines, L. M. (2015). A-and D-optional row-column designs for two-colour cDNA microarray experiments using linear mixed effects models. \emph{South African Statistical Journal}, 49, 153-168. } \author{ Legesse Kassa Debusho, Dibaba Bayisa Gemechu, and Linda Haines } \seealso{ \code{\link{optrcdmaeAT}}, \code{\link{cmatrcd.mae}} } \examples{ #Initial connected row-column design for trt.N <- 4 #Number of treatments col.N <- 4 #Number of arrays intcrcd.mae(trt.N = 4, col.N = 4) } \keyword{Initial row-column design} \keyword{Connected row-column design}% __ONLY ONE__ keyword per line
#----------------------------------------------------------------------------- #plot distance distributions #----------------------------------------------------------------------------- simdist_a <- function(x, y){ rexp(n=x*100000, rate=y) } # DEPRECATED simulated marginal based on overall median # sim_marginal <- exp_fits2_anc %>% # group_by(pop) %>% # summarise(lambda=median(tot)/100000, rate=median(tot)/3e9) %>% # mutate(D2=map2(lambda, rate, simdist_a)) %>% # unnest() %>% # mutate(group="exp_marginal") # simulation based on mixture of 4 fitted components (medians) sim_mixture <- exp_fits2_anc %>% group_by(pop, param) %>% summarise(lambda=median(lambda), rate=1/median(rate)) %>% mutate(D2=map2(lambda, rate, simdist_a)) %>% unnest() %>% mutate(group="exp_mixture") %>% dplyr::select(pop, D2, group) # simulation based on mixture of 4k fitted components (4 per sample) # sim_mixture <- exp_fits2_anc %>% # group_by(pop, param) %>% # summarise(lambda=median(lambda), rate=1/median(rate)) %>% # mutate(D2=map2(lambda, rate, simdist_a)) %>% # unnest() %>% # mutate(group="exp_mixture") %>% # dplyr::select(pop, D2, group) # EMPIRICAL SIMULATION 1a: # simulation based on mixture of 1k components (1 per sample) id_anc <- exp_fits2_anc %>% dplyr::filter(param=="p1") %>% dplyr::select(ID, pop) sim_marginal2 <- left_join(id_counts, id_anc, by="ID") %>% group_by(pop) %>% mutate(D2=map2(tot/100000, tot/3e9, simdist_a)) %>% unnest() %>% mutate(group="exp_marginal_1a") test_sites %>% group_by(pop) %>% dplyr::filter(D2<3e6) %>% sample_n(1e5) %>% mutate(group=".observed") %>% bind_cols(data.frame(panel=rep(c("4-component", "1-component"), 1e5))) %>% #head dplyr::select(pop, Dmin, group, panel) %>% bind_rows(sim_mixture %>% mutate(Dmin=D2, panel="4-component")) %>% #head() bind_rows(sim_marginal2 %>% sample_n(1e5) %>% mutate(Dmin=D2, panel="1-component")) %>% mutate(D2=log(Dmin)) %>% ggplot(.)+ geom_line(aes(x=D2, colour=pop, linetype=group, alpha=group), size=1, stat="density")+ scale_x_continuous(limits=c(-1,15), breaks=log(c(1,150,20000,300000)), labels=c(1,150,20000,300000))+ scale_colour_manual("Ancestry", values=c("#FF7F00", "#33A02C"))+ scale_alpha_manual(values=c(1,0.7,0.7))+ # scale_linetype("Data")+ # scale_linetype_manual(guide = 'none', values=c("solid", "twodash", "dotted"))+ facet_wrap(~panel)+ xlab("Inter-singleton distance")+ ylab("density")+ theme_bw()+ theme(legend.position="bottom")+ guides(linetype = FALSE, alpha=FALSE) ggsave(paste0(scriptdir, "/figs/distance_distributions.png"), width=8, height=4) # EMPIRICAL SIMULATION 1b: # based on mixture of 1k components (1 per sample) with variable mutation rate simdist_mix <- function(x){ c(rexp(n=0.9*x, rate=x/3e9), rexp(n=0.1*x, rate=2*x/3e9)) } sim_marginal2_var_mu <- left_join(id_counts, id_anc, by="ID") %>% group_by(pop) %>% mutate(D2=map(tot, simdist_mix)) %>% unnest() %>% mutate(group="exp_marginal_1b") # COALESCENT SIMULATIONS # load data output from msprime Jupyter notebook sim_sites <- read_tsv(paste0(scriptdir, "/data/sim_dists_dip.txt")) sim_sites_rc <- read_tsv(paste0(scriptdir, "/data/sim_dists_dip_rc.txt")) # calculate proportion of inter-singleton distances <20kb attributable to # external branch length heterogeneity in empirical simulation # empirical sim, constant mu sim_marginal2 %>% group_by(pop) %>% dplyr::summarise(n=n(), count20kb=sum(D2<8700), prop=count20kb/n) sim_marginal2 %>% group_by(pop) %>% dplyr::summarise(n=n(), count20kb=sum(D2<120000), prop=count20kb/n) # empirical sim, variable mu (10% of genome subject to 2x higher mu) sim_marginal2_var_mu %>% group_by(pop) %>% dplyr::summarise(n=n(), count20kb=sum(D2<20000), prop=count20kb/n) # coalescent sim (EUR only!) sim_sites %>% dplyr::summarise(n=n(), count20kb=sum(dist<20000), prop=count20kb/n)
/scripts/simulated_distributions.R
no_license
carjed/topmed_singleton_clusters
R
false
false
3,994
r
#----------------------------------------------------------------------------- #plot distance distributions #----------------------------------------------------------------------------- simdist_a <- function(x, y){ rexp(n=x*100000, rate=y) } # DEPRECATED simulated marginal based on overall median # sim_marginal <- exp_fits2_anc %>% # group_by(pop) %>% # summarise(lambda=median(tot)/100000, rate=median(tot)/3e9) %>% # mutate(D2=map2(lambda, rate, simdist_a)) %>% # unnest() %>% # mutate(group="exp_marginal") # simulation based on mixture of 4 fitted components (medians) sim_mixture <- exp_fits2_anc %>% group_by(pop, param) %>% summarise(lambda=median(lambda), rate=1/median(rate)) %>% mutate(D2=map2(lambda, rate, simdist_a)) %>% unnest() %>% mutate(group="exp_mixture") %>% dplyr::select(pop, D2, group) # simulation based on mixture of 4k fitted components (4 per sample) # sim_mixture <- exp_fits2_anc %>% # group_by(pop, param) %>% # summarise(lambda=median(lambda), rate=1/median(rate)) %>% # mutate(D2=map2(lambda, rate, simdist_a)) %>% # unnest() %>% # mutate(group="exp_mixture") %>% # dplyr::select(pop, D2, group) # EMPIRICAL SIMULATION 1a: # simulation based on mixture of 1k components (1 per sample) id_anc <- exp_fits2_anc %>% dplyr::filter(param=="p1") %>% dplyr::select(ID, pop) sim_marginal2 <- left_join(id_counts, id_anc, by="ID") %>% group_by(pop) %>% mutate(D2=map2(tot/100000, tot/3e9, simdist_a)) %>% unnest() %>% mutate(group="exp_marginal_1a") test_sites %>% group_by(pop) %>% dplyr::filter(D2<3e6) %>% sample_n(1e5) %>% mutate(group=".observed") %>% bind_cols(data.frame(panel=rep(c("4-component", "1-component"), 1e5))) %>% #head dplyr::select(pop, Dmin, group, panel) %>% bind_rows(sim_mixture %>% mutate(Dmin=D2, panel="4-component")) %>% #head() bind_rows(sim_marginal2 %>% sample_n(1e5) %>% mutate(Dmin=D2, panel="1-component")) %>% mutate(D2=log(Dmin)) %>% ggplot(.)+ geom_line(aes(x=D2, colour=pop, linetype=group, alpha=group), size=1, stat="density")+ scale_x_continuous(limits=c(-1,15), breaks=log(c(1,150,20000,300000)), labels=c(1,150,20000,300000))+ scale_colour_manual("Ancestry", values=c("#FF7F00", "#33A02C"))+ scale_alpha_manual(values=c(1,0.7,0.7))+ # scale_linetype("Data")+ # scale_linetype_manual(guide = 'none', values=c("solid", "twodash", "dotted"))+ facet_wrap(~panel)+ xlab("Inter-singleton distance")+ ylab("density")+ theme_bw()+ theme(legend.position="bottom")+ guides(linetype = FALSE, alpha=FALSE) ggsave(paste0(scriptdir, "/figs/distance_distributions.png"), width=8, height=4) # EMPIRICAL SIMULATION 1b: # based on mixture of 1k components (1 per sample) with variable mutation rate simdist_mix <- function(x){ c(rexp(n=0.9*x, rate=x/3e9), rexp(n=0.1*x, rate=2*x/3e9)) } sim_marginal2_var_mu <- left_join(id_counts, id_anc, by="ID") %>% group_by(pop) %>% mutate(D2=map(tot, simdist_mix)) %>% unnest() %>% mutate(group="exp_marginal_1b") # COALESCENT SIMULATIONS # load data output from msprime Jupyter notebook sim_sites <- read_tsv(paste0(scriptdir, "/data/sim_dists_dip.txt")) sim_sites_rc <- read_tsv(paste0(scriptdir, "/data/sim_dists_dip_rc.txt")) # calculate proportion of inter-singleton distances <20kb attributable to # external branch length heterogeneity in empirical simulation # empirical sim, constant mu sim_marginal2 %>% group_by(pop) %>% dplyr::summarise(n=n(), count20kb=sum(D2<8700), prop=count20kb/n) sim_marginal2 %>% group_by(pop) %>% dplyr::summarise(n=n(), count20kb=sum(D2<120000), prop=count20kb/n) # empirical sim, variable mu (10% of genome subject to 2x higher mu) sim_marginal2_var_mu %>% group_by(pop) %>% dplyr::summarise(n=n(), count20kb=sum(D2<20000), prop=count20kb/n) # coalescent sim (EUR only!) sim_sites %>% dplyr::summarise(n=n(), count20kb=sum(dist<20000), prop=count20kb/n)
#Machine Learning Final Project - Sentiment Analysis and Corn Price Movement #Name: Yu-Chu Alice Chen(yc3178), Hsiao Hsien Tsao(ht2435) #Date: 8/13/2017 #Document includes data preprocessing and machine learning models explored #Set directory and read files setwd("~/Desktop/R/Machine learning") tweets<-read.csv('tweets_data.csv',header = TRUE) head(tweets) corn<-read.csv('corn_price_data.csv',header = TRUE) head(corn) summary(corn) summary(tweets) #Calculate percentage price for settle library(quantmod) #calculate standard deviation of corn price and price percentage change of price corn$pctchange <- Delt(corn$settle) #set var column to numeric corn$pctchange<-as.numeric(corn$pctchange) #calculate difference in price library(dplyr) corn$var<-c(NA,diff(corn$settle)) #corn%>% #mutate(var=diff(log(corn$settle))) #corn$diff<-percent_change(corn$settle) #corn$stdvarprice<-sd(corn$settle) #Change date format to the same as corn price date <- format(as.POSIXct(strptime(tweets$date,"%m/%d/%Y %H:%M",tz="")) ,format = "%m/%d/%Y") tweets$date<-date #create the wordcloud graph r_stats_text_corpus <- Corpus(VectorSource(tweets$text)) r_stats_text_corpus <- tm_map(r_stats_text_corpus, content_transformer(function(x) iconv(x, to='UTF-8-MAC', sub='byte'))) require("twitteR") require("wordcloud") require("tm") require("dplyr") r_stats_text_corpus <- tm_map(r_stats_text_corpus, content_transformer(tolower)) r_stats_text_corpus <- tm_map(r_stats_text_corpus, removePunctuation) #remove all punctuation r_stats_text_corpus <- tm_map(r_stats_text_corpus, function(x)removeWords(x,stopwords())) wordcloud(r_stats_text_corpus, min.freq = 10, max.words = 150, colors=brewer.pal(8, "Dark2")) #connect all libraries library(twitteR) install.packages("ROAuth") library(ROAuth) library(plyr) library(dplyr) library(stringr) library(ggplot2) install.packages("NLP") install.packages("stringi") library(tm) library(NLP) library(stringi) install.packages("qdap") library(qdap) #change tweets name to tweetpro twepro<-tweets #data preprocessing twepro$text <- tolower(twepro$text) twepro$text <- removePunctuation(twepro$text) twepro$text <- removeNumbers(twepro$text) twepro$text <- stripWhitespace(twepro$text) #data.frame manipulation for analysis twepro$location <- NULL twepro$language <- NULL twepro$favorites <- NULL #tweets evaluation function score.sentiment <- function(sentences, pos.words, neg.words, .progress='none') { require(plyr) require(stringr) scores <- laply(sentences, function(sentence, pos.words, neg.words){ sentence <- gsub('[[:punct:]]', "", sentence) sentence <- gsub('[[:cntrl:]]', "", sentence) sentence <- gsub('\\d+', "", sentence) sentence <- tolower(sentence) word.list <- str_split(sentence, '\\s+') words <- unlist(word.list) pos.matches <- match(words, pos.words) neg.matches <- match(words, neg.words) pos.matches <- !is.na(pos.matches) neg.matches <- !is.na(neg.matches) score <- sum(pos.matches) - sum(neg.matches) return(score) }, pos.words, neg.words, .progress=.progress) scores.df <- data.frame(score=scores, text=sentences) return(scores.df) } #lexicon neg = scan("negative-words.txt", what="character", comment.char=";") pos = scan("positive-words.txt", what="character", comment.char=";") twepro$text <- as.factor(twepro$text) score <- score.sentiment(twepro$text, pos.words, neg.words, .progress='text') combinescore <- cbind(tweets,score) write.csv(score, file = "scores.csv") write.csv(combinescore, file = "combinescore.csv") #drop unimportant columns combinescore<-combinescore[,-3:-6] #Join corn and score together library(plyr) stockscore<-join(combinescore, corn[c('date','commodity', 'settle','pctchange','var')], by='date', type='full',match='first') #create pos, neg and neutral stockscore$pos <- as.numeric(stockscore$score >= 1) stockscore$neg <- as.numeric(stockscore$score <= -1) stockscore$neu <- as.numeric(stockscore$score == 0) #Make one row per day tweet_stock <- ddply(stockscore, c('date','settle','pctchange','var'), plyr::summarise, pos.count = sum(pos), neg.count = sum(neg), neu.count = sum(neu),mean.score=mean(score),sum.retweets=sum(retweets),sum.score=sum(score)) tweet_stock$all.count <- tweet_stock$pos.count + tweet_stock$neg.count + tweet_stock$neu.count tweet_stock$percent.pos <- round((tweet_stock$pos.count / tweet_stock$all.count) * 100) cor(tweet_stock$percent.pos, tweet_stock$pctchange, use = "complete") #tweet_stock_df<- subset(tweet_stock_df, !is.na(var)) #Remove rows with no settle price tweet_stock<- subset(tweet_stock, !is.na(settle)) #change to matrix tweet_stock_t<-as.matrix(tweet_stock) #create features using lag function library(zoo) tweet_stock_t = tweet_stock %>% mutate(var.lag5 = rollapply(data = var, width = 5, FUN = mean, align = "right", fill = NA, na.rm = T))%>% mutate(var.lag10 = rollapply(data = var, width = 10, FUN = mean, align = "right", fill = NA, na.rm = T))%>% mutate(mean.score.lag5 = rollapply(data = mean.score, width = 5, FUN = mean, align = "right", fill = NA, na.rm = T))%>% mutate(mean.score.lag10 = rollapply(data = mean.score, width = 10, FUN = mean, align = "right", fill = NA, na.rm = T)) #drop mean.score with NA value tweet_stock_t<- subset(tweet_stock_t, !is.na(mean.score)) #set dummy variable for price change up down and neutral tweet_stock_t$change <- ifelse(tweet_stock_t$var > 5, 1,-1) #Split train and test set n = nrow(tweet_stock_t) trainIndex = sample(1:n, size = round(0.7*n), replace=FALSE) tweetstocktrain = tweet_stock_t[trainIndex ,] tweetstocktest = tweet_stock_t[-trainIndex ,] write.csv(tweetstocktrain,"train5-10.csv") write.csv(tweetstocktest,"test5-10.csv") #delete date column train<-tweetstocktrain[,-1] test<-tweetstocktest[,-1] #correlation graph install.packages("corrplot") library(corrplot) str(train) train<-sapply(train, as.numeric) corr_mat=cor(train, method="s") corrplot(corr_mat) ##Machine Learning Models: #load library library("e1071") library(caret) #train set train<-read.csv("train5-10.csv",header = T) train<-train[,-1] #remove x col train$date<-as.Date(train$date, format="%m/%d/%Y") train$change<-as.factor(train$change) train<-na.omit(train) #test set test<-read.csv("test5-10.csv",header = T) test<-test[,-1] #remove x col test$date<-as.Date(test$date, format="%m/%d/%Y") test$change<-as.factor(test$change) test<-na.omit(test) #Multi Linear Regression linermodel = lm(pctchange ~mean.score+var+pos.count, data = train) summary(linermodel) #annual return lin.pred<-predict(linermodel,train) newdata<-data.frame(pos.count=10, mean.score=1, var=50) lin.pred2<-predict(linermodel, test) #change y column to factor tweetstocktrain$change<-as.factor(tweetstocktrain$change) tweetstocktest$change<-as.factor(tweetstocktest$change) #SVM Model svm.model<-svm(change~.,data=train) svm.pred<-predict(svm.model, train) tab<-table(pred=svm.pred, true=train$change) confusionMatrix(svm.pred,train$change) confusionMatrix(tab) svm.testpred<-predict(svm.model,newdata=test) confusionMatrix(svm.testpred,test$change) #SVM ROC and AURPC #train SVM svm.pred<-as.numeric(svm.pred) y2<-as.numeric(train$change) pr.train<-pr.curve(scores.class0 = svm.pred, scores.class1 = y2, curve = T) plot(pr.train) roc.train<-roc(response=svm.pred, predictor=y2) plot(roc.train) #test SVM svm.testpred<-as.numeric(svm.testpred) #first change to numeric y<-as.numeric(test$change) roc<-roc(response=svm.testpred,predictor = y) pr<-pr.curve(scores.class0 = svm.testpred, scores.class1 = y, curve=T) plot(roc) #Kernel SVM - Radial kr.svm.model<-svm(change~., data=train, kernel="radial", cost=10, scale=FALSE) kr.svm.pred<-predict(kr.svm.model, train) kr.test.tab<-table(pred=kr.svm.pred, true=train$change) confusionMatrix(kr.test.tab) kr.svm.testpred<-predict(kr.svm.model,newdata=test) confusionMatrix(kr.svm.testpred,test$change) svm.error.rate <- sum(test$change != kr.svm.pred)/nrow(test) print(paste0("Accuracy (Precision): ", 1 - svm.error.rate)) #SVM ROC and AURPC #train SVM kr.svm.pred<-as.numeric(kr.svm.pred) y2<-as.numeric(train$change) pr.train<-pr.curve(scores.class0 = kr.svm.pred, scores.class1 = y2, curve = T) plot(pr.train) roc.train<-roc(response=svm.pred, predictor=y2) plot(roc.train) #test SVM kr.svm.testpred<-as.numeric(kr.svm.testpred) #first change to numeric y<-as.numeric(test$change) roc<-roc(response=kr.svm.testpred,predictor = y) pr<-pr.curve(scores.class0 = svm.testpred, scores.class1 = y, curve=T) plot(roc) #Naive Bayes train<-train[,-1] nbmodel<-naiveBayes(change~., data=train) pred<-predict(nbmodel, train) nbtab<-table(pred,train$change) confusionMatrix(nbtab) test<-test[,-1] newdata<-test[,-17] nb.testpred<-predict(nbmodel,test,type="class") confusionMatrix(nb.testpred,test$change) #Naive Bayes ROC and AURPC nb.testpred<-as.numeric(nb.testpred) #first change to numeric y<-as.numeric(test$change) roc<-roc(response=nb.testpred,predictor = y) pr<-pr.curve(scores.class0 = nb.testpred, scores.class1 = y, curve=T) plot(roc) pred<-as.numeric(pred) y2<-as.numeric(train$change) pr.train<-pr.curve(scores.class0 = pred, scores.class1 = y2, curve = T) plot(pr.train) roc.train<-roc(response=pred, predictor=y2) plot(roc.train) #plots library(ggplot2) pctgraph<-ggplot(train,aes(date,pctchange))+geom_point()+geom_smooth() poscount.graph<-ggplot(train,aes(pctchange,pos.count))+geom_point()+geom_smooth() var.graph<-ggplot(train,aes(date,var))+geom_line()+geom_smooth() #ggplot ggplot(train, aes(mean.score,pctchange))+geom_point()+geom_smooth()
/ML Project Code - Sentiment Analysis and Stock Price Movement.R
no_license
alicechenn/sentiment-analysis-stockprice-movement
R
false
false
10,322
r
#Machine Learning Final Project - Sentiment Analysis and Corn Price Movement #Name: Yu-Chu Alice Chen(yc3178), Hsiao Hsien Tsao(ht2435) #Date: 8/13/2017 #Document includes data preprocessing and machine learning models explored #Set directory and read files setwd("~/Desktop/R/Machine learning") tweets<-read.csv('tweets_data.csv',header = TRUE) head(tweets) corn<-read.csv('corn_price_data.csv',header = TRUE) head(corn) summary(corn) summary(tweets) #Calculate percentage price for settle library(quantmod) #calculate standard deviation of corn price and price percentage change of price corn$pctchange <- Delt(corn$settle) #set var column to numeric corn$pctchange<-as.numeric(corn$pctchange) #calculate difference in price library(dplyr) corn$var<-c(NA,diff(corn$settle)) #corn%>% #mutate(var=diff(log(corn$settle))) #corn$diff<-percent_change(corn$settle) #corn$stdvarprice<-sd(corn$settle) #Change date format to the same as corn price date <- format(as.POSIXct(strptime(tweets$date,"%m/%d/%Y %H:%M",tz="")) ,format = "%m/%d/%Y") tweets$date<-date #create the wordcloud graph r_stats_text_corpus <- Corpus(VectorSource(tweets$text)) r_stats_text_corpus <- tm_map(r_stats_text_corpus, content_transformer(function(x) iconv(x, to='UTF-8-MAC', sub='byte'))) require("twitteR") require("wordcloud") require("tm") require("dplyr") r_stats_text_corpus <- tm_map(r_stats_text_corpus, content_transformer(tolower)) r_stats_text_corpus <- tm_map(r_stats_text_corpus, removePunctuation) #remove all punctuation r_stats_text_corpus <- tm_map(r_stats_text_corpus, function(x)removeWords(x,stopwords())) wordcloud(r_stats_text_corpus, min.freq = 10, max.words = 150, colors=brewer.pal(8, "Dark2")) #connect all libraries library(twitteR) install.packages("ROAuth") library(ROAuth) library(plyr) library(dplyr) library(stringr) library(ggplot2) install.packages("NLP") install.packages("stringi") library(tm) library(NLP) library(stringi) install.packages("qdap") library(qdap) #change tweets name to tweetpro twepro<-tweets #data preprocessing twepro$text <- tolower(twepro$text) twepro$text <- removePunctuation(twepro$text) twepro$text <- removeNumbers(twepro$text) twepro$text <- stripWhitespace(twepro$text) #data.frame manipulation for analysis twepro$location <- NULL twepro$language <- NULL twepro$favorites <- NULL #tweets evaluation function score.sentiment <- function(sentences, pos.words, neg.words, .progress='none') { require(plyr) require(stringr) scores <- laply(sentences, function(sentence, pos.words, neg.words){ sentence <- gsub('[[:punct:]]', "", sentence) sentence <- gsub('[[:cntrl:]]', "", sentence) sentence <- gsub('\\d+', "", sentence) sentence <- tolower(sentence) word.list <- str_split(sentence, '\\s+') words <- unlist(word.list) pos.matches <- match(words, pos.words) neg.matches <- match(words, neg.words) pos.matches <- !is.na(pos.matches) neg.matches <- !is.na(neg.matches) score <- sum(pos.matches) - sum(neg.matches) return(score) }, pos.words, neg.words, .progress=.progress) scores.df <- data.frame(score=scores, text=sentences) return(scores.df) } #lexicon neg = scan("negative-words.txt", what="character", comment.char=";") pos = scan("positive-words.txt", what="character", comment.char=";") twepro$text <- as.factor(twepro$text) score <- score.sentiment(twepro$text, pos.words, neg.words, .progress='text') combinescore <- cbind(tweets,score) write.csv(score, file = "scores.csv") write.csv(combinescore, file = "combinescore.csv") #drop unimportant columns combinescore<-combinescore[,-3:-6] #Join corn and score together library(plyr) stockscore<-join(combinescore, corn[c('date','commodity', 'settle','pctchange','var')], by='date', type='full',match='first') #create pos, neg and neutral stockscore$pos <- as.numeric(stockscore$score >= 1) stockscore$neg <- as.numeric(stockscore$score <= -1) stockscore$neu <- as.numeric(stockscore$score == 0) #Make one row per day tweet_stock <- ddply(stockscore, c('date','settle','pctchange','var'), plyr::summarise, pos.count = sum(pos), neg.count = sum(neg), neu.count = sum(neu),mean.score=mean(score),sum.retweets=sum(retweets),sum.score=sum(score)) tweet_stock$all.count <- tweet_stock$pos.count + tweet_stock$neg.count + tweet_stock$neu.count tweet_stock$percent.pos <- round((tweet_stock$pos.count / tweet_stock$all.count) * 100) cor(tweet_stock$percent.pos, tweet_stock$pctchange, use = "complete") #tweet_stock_df<- subset(tweet_stock_df, !is.na(var)) #Remove rows with no settle price tweet_stock<- subset(tweet_stock, !is.na(settle)) #change to matrix tweet_stock_t<-as.matrix(tweet_stock) #create features using lag function library(zoo) tweet_stock_t = tweet_stock %>% mutate(var.lag5 = rollapply(data = var, width = 5, FUN = mean, align = "right", fill = NA, na.rm = T))%>% mutate(var.lag10 = rollapply(data = var, width = 10, FUN = mean, align = "right", fill = NA, na.rm = T))%>% mutate(mean.score.lag5 = rollapply(data = mean.score, width = 5, FUN = mean, align = "right", fill = NA, na.rm = T))%>% mutate(mean.score.lag10 = rollapply(data = mean.score, width = 10, FUN = mean, align = "right", fill = NA, na.rm = T)) #drop mean.score with NA value tweet_stock_t<- subset(tweet_stock_t, !is.na(mean.score)) #set dummy variable for price change up down and neutral tweet_stock_t$change <- ifelse(tweet_stock_t$var > 5, 1,-1) #Split train and test set n = nrow(tweet_stock_t) trainIndex = sample(1:n, size = round(0.7*n), replace=FALSE) tweetstocktrain = tweet_stock_t[trainIndex ,] tweetstocktest = tweet_stock_t[-trainIndex ,] write.csv(tweetstocktrain,"train5-10.csv") write.csv(tweetstocktest,"test5-10.csv") #delete date column train<-tweetstocktrain[,-1] test<-tweetstocktest[,-1] #correlation graph install.packages("corrplot") library(corrplot) str(train) train<-sapply(train, as.numeric) corr_mat=cor(train, method="s") corrplot(corr_mat) ##Machine Learning Models: #load library library("e1071") library(caret) #train set train<-read.csv("train5-10.csv",header = T) train<-train[,-1] #remove x col train$date<-as.Date(train$date, format="%m/%d/%Y") train$change<-as.factor(train$change) train<-na.omit(train) #test set test<-read.csv("test5-10.csv",header = T) test<-test[,-1] #remove x col test$date<-as.Date(test$date, format="%m/%d/%Y") test$change<-as.factor(test$change) test<-na.omit(test) #Multi Linear Regression linermodel = lm(pctchange ~mean.score+var+pos.count, data = train) summary(linermodel) #annual return lin.pred<-predict(linermodel,train) newdata<-data.frame(pos.count=10, mean.score=1, var=50) lin.pred2<-predict(linermodel, test) #change y column to factor tweetstocktrain$change<-as.factor(tweetstocktrain$change) tweetstocktest$change<-as.factor(tweetstocktest$change) #SVM Model svm.model<-svm(change~.,data=train) svm.pred<-predict(svm.model, train) tab<-table(pred=svm.pred, true=train$change) confusionMatrix(svm.pred,train$change) confusionMatrix(tab) svm.testpred<-predict(svm.model,newdata=test) confusionMatrix(svm.testpred,test$change) #SVM ROC and AURPC #train SVM svm.pred<-as.numeric(svm.pred) y2<-as.numeric(train$change) pr.train<-pr.curve(scores.class0 = svm.pred, scores.class1 = y2, curve = T) plot(pr.train) roc.train<-roc(response=svm.pred, predictor=y2) plot(roc.train) #test SVM svm.testpred<-as.numeric(svm.testpred) #first change to numeric y<-as.numeric(test$change) roc<-roc(response=svm.testpred,predictor = y) pr<-pr.curve(scores.class0 = svm.testpred, scores.class1 = y, curve=T) plot(roc) #Kernel SVM - Radial kr.svm.model<-svm(change~., data=train, kernel="radial", cost=10, scale=FALSE) kr.svm.pred<-predict(kr.svm.model, train) kr.test.tab<-table(pred=kr.svm.pred, true=train$change) confusionMatrix(kr.test.tab) kr.svm.testpred<-predict(kr.svm.model,newdata=test) confusionMatrix(kr.svm.testpred,test$change) svm.error.rate <- sum(test$change != kr.svm.pred)/nrow(test) print(paste0("Accuracy (Precision): ", 1 - svm.error.rate)) #SVM ROC and AURPC #train SVM kr.svm.pred<-as.numeric(kr.svm.pred) y2<-as.numeric(train$change) pr.train<-pr.curve(scores.class0 = kr.svm.pred, scores.class1 = y2, curve = T) plot(pr.train) roc.train<-roc(response=svm.pred, predictor=y2) plot(roc.train) #test SVM kr.svm.testpred<-as.numeric(kr.svm.testpred) #first change to numeric y<-as.numeric(test$change) roc<-roc(response=kr.svm.testpred,predictor = y) pr<-pr.curve(scores.class0 = svm.testpred, scores.class1 = y, curve=T) plot(roc) #Naive Bayes train<-train[,-1] nbmodel<-naiveBayes(change~., data=train) pred<-predict(nbmodel, train) nbtab<-table(pred,train$change) confusionMatrix(nbtab) test<-test[,-1] newdata<-test[,-17] nb.testpred<-predict(nbmodel,test,type="class") confusionMatrix(nb.testpred,test$change) #Naive Bayes ROC and AURPC nb.testpred<-as.numeric(nb.testpred) #first change to numeric y<-as.numeric(test$change) roc<-roc(response=nb.testpred,predictor = y) pr<-pr.curve(scores.class0 = nb.testpred, scores.class1 = y, curve=T) plot(roc) pred<-as.numeric(pred) y2<-as.numeric(train$change) pr.train<-pr.curve(scores.class0 = pred, scores.class1 = y2, curve = T) plot(pr.train) roc.train<-roc(response=pred, predictor=y2) plot(roc.train) #plots library(ggplot2) pctgraph<-ggplot(train,aes(date,pctchange))+geom_point()+geom_smooth() poscount.graph<-ggplot(train,aes(pctchange,pos.count))+geom_point()+geom_smooth() var.graph<-ggplot(train,aes(date,var))+geom_line()+geom_smooth() #ggplot ggplot(train, aes(mean.score,pctchange))+geom_point()+geom_smooth()
testlist <- list(end = NULL, start = NULL, x = structure(c(4.65774448417693e-10, 6.95356800386775e-310, 2.32903286132618e+96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), segment_end = structure(0, .Dim = c(1L, 1L)), segment_start = structure(0, .Dim = c(1L, 1L))) result <- do.call(dynutils::project_to_segments,testlist) str(result)
/dynutils/inst/testfiles/project_to_segments/AFL_project_to_segments/project_to_segments_valgrind_files/1609870296-test.R
no_license
akhikolla/updated-only-Issues
R
false
false
409
r
testlist <- list(end = NULL, start = NULL, x = structure(c(4.65774448417693e-10, 6.95356800386775e-310, 2.32903286132618e+96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), segment_end = structure(0, .Dim = c(1L, 1L)), segment_start = structure(0, .Dim = c(1L, 1L))) result <- do.call(dynutils::project_to_segments,testlist) str(result)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data-documentation.R \docType{data} \name{data_char_encodedtexts} \alias{data_char_encodedtexts} \alias{encodedTexts} \title{encoded texts for testing} \format{An object of class \code{character} of length 10.} \usage{ data_char_encodedtexts } \description{ \code{data_char_encodedtexts} is a 10-element character vector with 10 different encodings } \examples{ Encoding(data_char_encodedtexts) data.frame(labelled = names(data_char_encodedtexts), detected = encoding(data_char_encodedtexts)$all) } \keyword{datasets}
/man/data_char_encodedtexts.Rd
no_license
tbs08/quanteda
R
false
true
609
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data-documentation.R \docType{data} \name{data_char_encodedtexts} \alias{data_char_encodedtexts} \alias{encodedTexts} \title{encoded texts for testing} \format{An object of class \code{character} of length 10.} \usage{ data_char_encodedtexts } \description{ \code{data_char_encodedtexts} is a 10-element character vector with 10 different encodings } \examples{ Encoding(data_char_encodedtexts) data.frame(labelled = names(data_char_encodedtexts), detected = encoding(data_char_encodedtexts)$all) } \keyword{datasets}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/vectOrientation.R \name{vectOrientation} \alias{vectOrientation} \alias{vectOrientation.Atoms} \title{Vectors Orientation} \usage{ vectOrientation(x, ...) \method{vectOrientation}{Atoms}(x, V1, V2, pairwise = TRUE, ...) } \arguments{ \item{V1}{a data.frame containing pairs/triplets of atom indexes defining a set of distance/normal vectors.} \item{V2}{Same as V1 or a numeric vector of length 3 indicating a single reference vector for the calculation of the angles. See details.} \item{pairwise}{a logical value. When both V1 and V2 are data.frame, indicates wheither to compute angles pairwise or not.} \item{\dots}{further arguments passed to or from other methods.} } \value{ A 2D or 3D numeric array. The last dimension span over the frames of the trajectory. } \description{ Calculate the angles between two sets of vectors (Distance vectors or plan normals). } \details{ When \code{V1} and \code{V2} are data.frame, they must contain columns named \code{atm1}, \code{atm2} and optionally \code{atm3}. They must all be integer vectors containing atom indexes defining the vectors used for the analysis. When only \code{atm1} and \code{atm2} are specified, distances vectors are used. When \code{atm3} is also specified, plan normals are used instead. In this latter case, plan normals are calculated as the cross product between two normalized distance vectors d12 (defined by \code{atm1} and \code{atm2}) and d13 (defined by \code{atm1} and \code{atm3}). When \code{V2} is missing, the angles between V1 and itself are calculated (lower-triangle of the (V1, V1) angles matrix). When \code{V2} is a numeric vector, the angles between V1 and this single reference vector are calculated. When both \code{V1} and \code{V2} are data.frame, the angles between V1 and V2 are calculated (a (N1,N2) matrix for each frame is calculated). }
/man/vectOrientation.Rd
no_license
julienide/Atoms
R
false
true
1,937
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/vectOrientation.R \name{vectOrientation} \alias{vectOrientation} \alias{vectOrientation.Atoms} \title{Vectors Orientation} \usage{ vectOrientation(x, ...) \method{vectOrientation}{Atoms}(x, V1, V2, pairwise = TRUE, ...) } \arguments{ \item{V1}{a data.frame containing pairs/triplets of atom indexes defining a set of distance/normal vectors.} \item{V2}{Same as V1 or a numeric vector of length 3 indicating a single reference vector for the calculation of the angles. See details.} \item{pairwise}{a logical value. When both V1 and V2 are data.frame, indicates wheither to compute angles pairwise or not.} \item{\dots}{further arguments passed to or from other methods.} } \value{ A 2D or 3D numeric array. The last dimension span over the frames of the trajectory. } \description{ Calculate the angles between two sets of vectors (Distance vectors or plan normals). } \details{ When \code{V1} and \code{V2} are data.frame, they must contain columns named \code{atm1}, \code{atm2} and optionally \code{atm3}. They must all be integer vectors containing atom indexes defining the vectors used for the analysis. When only \code{atm1} and \code{atm2} are specified, distances vectors are used. When \code{atm3} is also specified, plan normals are used instead. In this latter case, plan normals are calculated as the cross product between two normalized distance vectors d12 (defined by \code{atm1} and \code{atm2}) and d13 (defined by \code{atm1} and \code{atm3}). When \code{V2} is missing, the angles between V1 and itself are calculated (lower-triangle of the (V1, V1) angles matrix). When \code{V2} is a numeric vector, the angles between V1 and this single reference vector are calculated. When both \code{V1} and \code{V2} are data.frame, the angles between V1 and V2 are calculated (a (N1,N2) matrix for each frame is calculated). }
library(SWMPr) ### Name: qaqc ### Title: QAQC filtering for SWMP data ### Aliases: qaqc qaqc.swmpr ### ** Examples ## Not run: ##D ## get data ##D data(apadbwq) ##D dat <- apadbwq ##D ##D ## retain only '0' and '-1' flags ##D qaqc(dat, qaqc_keep = c('0', '-1')) ##D ##D ## retain observations with the 'CSM' error code ##D qaqc(dat, qaqc_keep = 'CSM') ## End(Not run)
/data/genthat_extracted_code/SWMPr/examples/qaqc.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
378
r
library(SWMPr) ### Name: qaqc ### Title: QAQC filtering for SWMP data ### Aliases: qaqc qaqc.swmpr ### ** Examples ## Not run: ##D ## get data ##D data(apadbwq) ##D dat <- apadbwq ##D ##D ## retain only '0' and '-1' flags ##D qaqc(dat, qaqc_keep = c('0', '-1')) ##D ##D ## retain observations with the 'CSM' error code ##D qaqc(dat, qaqc_keep = 'CSM') ## End(Not run)
library(mvLSW) ### Name: plot.mvLSW ### Title: Plot mvLSW Object ### Aliases: plot.mvLSW ### Keywords: plot.mvLSW ### ** Examples ## Define evolutionary wavelet spectrum, structure only on level 2 Spec <- array(0, dim=c(3, 3, 8, 256)) Spec[1, 1, 2, ] <- 10 Spec[2, 2, 2, ] <- c(rep(5, 64), rep(0.6, 64), rep(5, 128)) Spec[3, 3, 2, ] <- c(rep(2, 128), rep(8, 128)) Spec[2, 1, 2, ] <- Spec[1, 2, 2, ] <- punif(1:256, 65, 192) Spec[3, 1, 2, ] <- Spec[1, 3, 2, ] <- c(rep(-1, 128), rep(5, 128)) Spec[3, 2, 2, ] <- Spec[2, 3, 2, ] <- -0.5 EWS <- as.mvLSW(x = Spec, filter.number = 1, family = "DaubExPhase", min.eig.val = NA) ## Sample time series and estimate the EWS and coherence. set.seed(10) X <- rmvLSW(Spectrum = EWS) EWS_X <- mvEWS(X, kernel.name = "daniell", kernel.param = 20) RHO_X <- coherence(EWS_X, partial = FALSE) ## Evaluate asymptotic spectral variance SpecVar <- varEWS(EWS_X) ## Evaluate 95% approximate confidence interval CI <- ApxCI(object = EWS_X, var = SpecVar, alpha=0.05) ## Plot mvEWS between channels 1 & 3 at level 2 plot(x = EWS_X, style = 1, info = c(1, 3, 2), Interval = CI) ## Plot coherence between channels 1 & 3 at level 2 plot(x = RHO_X, style = 1, info = c(1, 3, 2), ylab = "Coherence") ## mvEWS panel plot for level 2 plot(x = EWS_X, style = 2, info = 2, Interval = CI) ## Panel plot of coherence for level 2 plot(x = RHO_X, style = 2, info = 2, diag = FALSE, ylab = "Coherence") ## Plot mvEWS for channel pair 1 & 3 at all levels plot(x = EWS_X, style = 3, info = c(1, 3), Interval = CI) ## Plot coherence for channel pair 1 & 3 at all levels plot(x = RHO_X, style = 3, info = c(1, 3), ylab = "Coherence") ## Image plot for coherence between channels 1 & 3 plot(x = RHO_X, style = 4, info = c(1, 3), sub = "Coherence")
/data/genthat_extracted_code/mvLSW/examples/plot.mvLSW.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
1,775
r
library(mvLSW) ### Name: plot.mvLSW ### Title: Plot mvLSW Object ### Aliases: plot.mvLSW ### Keywords: plot.mvLSW ### ** Examples ## Define evolutionary wavelet spectrum, structure only on level 2 Spec <- array(0, dim=c(3, 3, 8, 256)) Spec[1, 1, 2, ] <- 10 Spec[2, 2, 2, ] <- c(rep(5, 64), rep(0.6, 64), rep(5, 128)) Spec[3, 3, 2, ] <- c(rep(2, 128), rep(8, 128)) Spec[2, 1, 2, ] <- Spec[1, 2, 2, ] <- punif(1:256, 65, 192) Spec[3, 1, 2, ] <- Spec[1, 3, 2, ] <- c(rep(-1, 128), rep(5, 128)) Spec[3, 2, 2, ] <- Spec[2, 3, 2, ] <- -0.5 EWS <- as.mvLSW(x = Spec, filter.number = 1, family = "DaubExPhase", min.eig.val = NA) ## Sample time series and estimate the EWS and coherence. set.seed(10) X <- rmvLSW(Spectrum = EWS) EWS_X <- mvEWS(X, kernel.name = "daniell", kernel.param = 20) RHO_X <- coherence(EWS_X, partial = FALSE) ## Evaluate asymptotic spectral variance SpecVar <- varEWS(EWS_X) ## Evaluate 95% approximate confidence interval CI <- ApxCI(object = EWS_X, var = SpecVar, alpha=0.05) ## Plot mvEWS between channels 1 & 3 at level 2 plot(x = EWS_X, style = 1, info = c(1, 3, 2), Interval = CI) ## Plot coherence between channels 1 & 3 at level 2 plot(x = RHO_X, style = 1, info = c(1, 3, 2), ylab = "Coherence") ## mvEWS panel plot for level 2 plot(x = EWS_X, style = 2, info = 2, Interval = CI) ## Panel plot of coherence for level 2 plot(x = RHO_X, style = 2, info = 2, diag = FALSE, ylab = "Coherence") ## Plot mvEWS for channel pair 1 & 3 at all levels plot(x = EWS_X, style = 3, info = c(1, 3), Interval = CI) ## Plot coherence for channel pair 1 & 3 at all levels plot(x = RHO_X, style = 3, info = c(1, 3), ylab = "Coherence") ## Image plot for coherence between channels 1 & 3 plot(x = RHO_X, style = 4, info = c(1, 3), sub = "Coherence")
#!/home/bio-longyk/apps/R-3.5.0/bin/Rscript ##Load Library suppressPackageStartupMessages(library("DESeq2")) library("pheatmap") library("RColorBrewer") args<-commandArgs(T) #args = c('high','low','DESeq2/FC_counts.txt','DESeq2/DESeq2_out.txt','DESeq2/DESeq2_summary.pdf') pdf(args[5]) ctrl=args[1] test=args[2] cts = read.table(args[3],header=TRUE,sep="\t",row.names=1) ###Rewirte Colum Names header = names(cts) header=sub("STAR.","",header) header=sub(".Aligned.sortedByCoord.out.bam","",header) names(cts) = header coldata <- data.frame(row.names = colnames( cts ),condition = c(rep(ctrl,2),rep(test,2))) dds <- DESeqDataSetFromMatrix(countData = cts,colData = coldata, design = ~ condition) ###Alternative Step; dds <- dds[ rowSums(counts(dds)) > 10, ] ##Sample distances rld <- rlog(dds, blind = FALSE) sampleDists <- dist(t(assay(rld))) sampleDistMatrix <- as.matrix( sampleDists ) #rownames(sampleDistMatrix) <- paste(vsd$condition, vsd$type, sep="-") colnames(sampleDistMatrix) <- NULL colors <- colorRampPalette( rev(brewer.pal(9, "Blues")) )(255) pheatmap(sampleDistMatrix, clustering_distance_rows = sampleDists, clustering_distance_cols = sampleDists, col = colors) plotPCA(rld) ##Running the differential expression pipeline dds$condition <- factor(dds$condition, levels=c(ctrl,test)) dds <- DESeq(dds) res <- results(dds,alpha=0.05,lfcThreshold=0.5, altHypothesis="greaterAbs") summary(res) #resLFC <- lfcShrink(dds, coef=2, res=res) #res <- resLFC resOrdered <- res[order(res$padj),] ####Alternative Step; write.table(resOrdered, args[4],quote=FALSE,sep="\t",col.names=NA,row.names=TRUE ) plotDispEsts(dds) hist(res$padj[res$baseMean>5], breaks=100,col="skyblue", border="slateblue") plotMA(res)
/workspace/shRNA/runx2_tead1/deseq2.r.2019010616
no_license
ijayden-lung/hpc
R
false
false
1,732
2019010616
#!/home/bio-longyk/apps/R-3.5.0/bin/Rscript ##Load Library suppressPackageStartupMessages(library("DESeq2")) library("pheatmap") library("RColorBrewer") args<-commandArgs(T) #args = c('high','low','DESeq2/FC_counts.txt','DESeq2/DESeq2_out.txt','DESeq2/DESeq2_summary.pdf') pdf(args[5]) ctrl=args[1] test=args[2] cts = read.table(args[3],header=TRUE,sep="\t",row.names=1) ###Rewirte Colum Names header = names(cts) header=sub("STAR.","",header) header=sub(".Aligned.sortedByCoord.out.bam","",header) names(cts) = header coldata <- data.frame(row.names = colnames( cts ),condition = c(rep(ctrl,2),rep(test,2))) dds <- DESeqDataSetFromMatrix(countData = cts,colData = coldata, design = ~ condition) ###Alternative Step; dds <- dds[ rowSums(counts(dds)) > 10, ] ##Sample distances rld <- rlog(dds, blind = FALSE) sampleDists <- dist(t(assay(rld))) sampleDistMatrix <- as.matrix( sampleDists ) #rownames(sampleDistMatrix) <- paste(vsd$condition, vsd$type, sep="-") colnames(sampleDistMatrix) <- NULL colors <- colorRampPalette( rev(brewer.pal(9, "Blues")) )(255) pheatmap(sampleDistMatrix, clustering_distance_rows = sampleDists, clustering_distance_cols = sampleDists, col = colors) plotPCA(rld) ##Running the differential expression pipeline dds$condition <- factor(dds$condition, levels=c(ctrl,test)) dds <- DESeq(dds) res <- results(dds,alpha=0.05,lfcThreshold=0.5, altHypothesis="greaterAbs") summary(res) #resLFC <- lfcShrink(dds, coef=2, res=res) #res <- resLFC resOrdered <- res[order(res$padj),] ####Alternative Step; write.table(resOrdered, args[4],quote=FALSE,sep="\t",col.names=NA,row.names=TRUE ) plotDispEsts(dds) hist(res$padj[res$baseMean>5], breaks=100,col="skyblue", border="slateblue") plotMA(res)
#' Generate parameter draws from a squire pmcmc run #' @param out Output of [[squire::pmcmc]] #' @param draws Number of draws from mcmc chain. Default = 10 generate_parameters <- function(out, draws = 10, burnin = 1000, ll = TRUE){ #set up parameters pmcmc_results <- out$pmcmc_results n_trajectories <- draws if("chains" %in% names(out$pmcmc_results)) { n_chains <- length(out$pmcmc_results$chains) } else { n_chains <- 1 } n_particles <- 2 forecast_days <- 0 #code from squire: Will need updating if squire undergoes changes squire:::assert_pos_int(n_chains) if (n_chains == 1) { squire:::assert_custom_class(pmcmc_results, "squire_pmcmc") } else { squire:::assert_custom_class(pmcmc_results, "squire_pmcmc_list") } squire:::assert_pos_int(burnin) squire:::assert_pos_int(n_trajectories) squire:::assert_pos_int(n_particles) squire:::assert_pos_int(forecast_days) if (n_chains > 1) { res <- squire::create_master_chain(x = pmcmc_results, burn_in = burnin) } else if (n_chains == 1 & burnin > 0) { res <- pmcmc_results$results[-seq_len(burnin), ] } else { res <- pmcmc_results$results } # are we drawing based on ll if (ll) { squire:::assert_neg(res$log_posterior, zero_allowed = FALSE) res <- unique(res) probs <- exp(res$log_posterior) probs <- probs/sum(probs) drop <- 0.9 while (any(is.na(probs))) { probs <- exp(res$log_posterior * drop) probs <- probs/sum(probs) drop <- drop^2 } params_smpl <- sample(x = length(probs), size = n_trajectories, replace = TRUE, prob = probs) } else { params_smpl <- sample(x = nrow(res), size = n_trajectories, replace = FALSE) } params_smpl <- res[params_smpl, !grepl("log", colnames(res))] params_smpl$start_date <- squire:::offset_to_start_date(pmcmc_results$inputs$data$date[1], round(params_smpl$start_date)) pars_list <- split(params_smpl, 1:nrow(params_smpl)) names(pars_list) <- rep("pars", length(pars_list)) #return the parameters return(pars_list) } #' Generate draws using parameters drawn from posterior #' @param out Output of [[squire::pmcmc]] #' @param pars_list Output of [[generate_parameters]] #' @param parallel Are we simulating in parallel. Default = FALSE #' @param draws How many draws are being used from pars_list. Default = NULL, #' which will use all the pars. #' @param interventions Are new interventions being used or default. Default = NULL generate_draws <- function(out, pars_list, parallel = FALSE, draws = NULL, interventions = NULL, ...){ # handle for no death days if(!("pmcmc_results" %in% names(out))) { message("`out` was not generated by pmcmc as no deaths for this country. \n", "Returning the original object, which assumes epidemic seeded on date ", "fits were run") return(out) } # grab information from the pmcmc run pmcmc <- out$pmcmc_results squire_model <- out$pmcmc_results$inputs$squire_model country <- out$parameters$country population <- out$parameters$population data <- out$pmcmc_results$inputs$data # are we drawing in parallel if (parallel) { suppressWarnings(future::plan(future::multisession())) } if(!is.null(interventions)){ #if making a change add that intervention here pmcmc$inputs$interventions <- interventions }else{ #else this is the interventions that come with the object interventions <- out$interventions } if (is.null(draws)) { draws <- length(pars_list) } #-------------------------------------------------------- # Section 3 of pMCMC Wrapper: Sample PMCMC Results #-------------------------------------------------------- #rename objects to their sample_pmcmc equivalent (so that it is simple to update #this code) pmcmc_results <- pmcmc n_particles <- 2 forecast_days <- 0 log_likelihood <- squire:::calc_loglikelihood replicates <- draws #recreate params_smpl object params_smpl <- do.call(rbind, pars_list) #instead of using squire:::sample_pmcmc we use the pars_list values provided #the following code is taken from squire:::sample_pmcmc and will need updating #if squire undergoes major changes message("Sampling from pMCMC Posterior...") if (Sys.getenv("SQUIRE_PARALLEL_DEBUG") == "TRUE") { traces <- purrr::map(.x = pars_list, .f = iran_log_likelihood, data = pmcmc_results$inputs$data, squire_model = pmcmc_results$inputs$squire_model, model_params = pmcmc_results$inputs$model_params, pars_obs = pmcmc_results$inputs$pars_obs, n_particles = n_particles, forecast_days = forecast_days, interventions = pmcmc_results$inputs$interventions, Rt_args = pmcmc_results$inputs$Rt_args, return = "full", ...) } else{ traces <- furrr::future_map(.x = pars_list, .f = iran_log_likelihood, data = pmcmc_results$inputs$data, squire_model = pmcmc_results$inputs$squire_model, model_params = pmcmc_results$inputs$model_params, pars_obs = pmcmc_results$inputs$pars_obs, n_particles = n_particles, forecast_days = forecast_days, interventions = pmcmc_results$inputs$interventions, Rt_args = pmcmc_results$inputs$Rt_args, return = "full", ..., .progress = TRUE, .options = furrr::furrr_options(seed = NULL)) } num_rows <- unlist(lapply(traces, nrow)) max_rows <- max(num_rows) seq_max <- seq_len(max_rows) max_date_names <- rownames(traces[[which.max(unlist(lapply(traces, nrow)))]]) trajectories <- array(NA, dim = c(max_rows, ncol(traces[[1]]), length(traces)), dimnames = list(max_date_names, colnames(traces[[1]]), NULL)) for (i in seq_len(length(traces))) { trajectories[utils::tail(seq_max, nrow(traces[[i]])), , i] <- traces[[i]] } pmcmc_samples <- list(trajectories = trajectories, sampled_PMCMC_Results = params_smpl, inputs = list(squire_model = pmcmc_results$inputs$squire_model, model_params = pmcmc_results$inputs$model_params, interventions = pmcmc_results$inputs$interventions, data = pmcmc_results$inputs$data, pars_obs = pmcmc_results$inputs$pars_obs)) class(pmcmc_samples) <- "squire_sample_PMCMC" #-------------------------------------------------------- # Section 4 of pMCMC Wrapper: Tidy Output #-------------------------------------------------------- # create a fake run object and fill in the required elements r <- squire_model$run_func(country = country, contact_matrix_set = pmcmc$inputs$model_params$contact_matrix_set, tt_contact_matrix = pmcmc$inputs$model_params$tt_matrix, hosp_bed_capacity = pmcmc$inputs$model_params$hosp_bed_capacity, tt_hosp_beds = pmcmc$inputs$model_params$tt_hosp_beds, ICU_bed_capacity = pmcmc$inputs$model_params$ICU_bed_capacity, tt_ICU_beds = pmcmc$inputs$model_params$tt_ICU_beds, population = population, day_return = TRUE, replicates = 1, time_period = nrow(pmcmc_samples$trajectories)) # and add the parameters that changed between each simulation, i.e. posterior draws r$replicate_parameters <- pmcmc_samples$sampled_PMCMC_Results # as well as adding the pmcmc chains so it's easy to draw from the chains again in the future r$pmcmc_results <- pmcmc # then let's create the output that we are going to use names(pmcmc_samples)[names(pmcmc_samples) == "trajectories"] <- "output" dimnames(pmcmc_samples$output) <- list(dimnames(pmcmc_samples$output)[[1]], dimnames(r$output)[[2]], NULL) r$output <- pmcmc_samples$output # and adjust the time as before full_row <- match(0, apply(r$output[,"time",],2,function(x) { sum(is.na(x)) })) saved_full <- r$output[,"time",full_row] for(i in seq_len(replicates)) { na_pos <- which(is.na(r$output[,"time",i])) full_to_place <- saved_full - which(rownames(r$output) == as.Date(max(data$date))) + 1L if(length(na_pos) > 0) { full_to_place[na_pos] <- NA } r$output[,"time",i] <- full_to_place } # second let's recreate the output r$model <- pmcmc_samples$inputs$squire_model$odin_model( user = pmcmc_samples$inputs$model_params, unused_user_action = "ignore" ) # we will add the interventions here so that we know what times are needed for projection r$interventions <- interventions # and fix the replicates r$parameters$replicates <- replicates r$parameters$time_period <- as.numeric(diff(as.Date(range(rownames(r$output))))) r$parameters$dt <- pmcmc$inputs$model_params$dt if ("province" %in% names(out$parameters)) { r$parameters$province <- out$parameters$province } return(r) } #' Specific log_likelihood wrapper for Iran simulations #' @noRd iran_log_likelihood <- function(pars, data, squire_model, model_params, pars_obs, n_particles, forecast_days = 0, return = "ll", Rt_args, interventions, rt_mult = 1) { switch(return, full = { save_particles <- TRUE full_output <- TRUE pf_return <- "sample" }, ll = { save_particles <- FALSE forecast_days <- 0 full_output <- FALSE pf_return <- "single" }, { stop("Unknown return type to calc_loglikelihood") }) squire:::assert_in(c("R0", "start_date"), names(pars), message = "Must specify R0, start date to infer") R0 <- pars[["R0"]] start_date <- pars[["start_date"]] squire:::assert_pos(R0) squire:::assert_date(start_date) R0_change <- interventions$R0_change date_R0_change <- interventions$date_R0_change date_contact_matrix_set_change <- interventions$date_contact_matrix_set_change date_ICU_bed_capacity_change <- interventions$date_ICU_bed_capacity_change date_hosp_bed_capacity_change <- interventions$date_hosp_bed_capacity_change date_vaccine_change <- interventions$date_vaccine_change date_vaccine_efficacy_infection_change <- interventions$date_vaccine_efficacy_infection_change date_vaccine_efficacy_disease_change <- interventions$date_vaccine_efficacy_disease_change if (is.null(date_R0_change)) { tt_beta <- 0 } else { tt_list <- squire:::intervention_dates_for_odin(dates = date_R0_change, change = R0_change, start_date = start_date, steps_per_day = round(1/model_params$dt), starting_change = 1) model_params$tt_beta <- tt_list$tt R0_change <- tt_list$change date_R0_change <- tt_list$dates } if (is.null(date_contact_matrix_set_change)) { tt_contact_matrix <- 0 } else { tt_list <- squire:::intervention_dates_for_odin(dates = date_contact_matrix_set_change, change = seq_along(interventions$contact_matrix_set)[-1], start_date = start_date, steps_per_day = round(1/model_params$dt), starting_change = 1) model_params$tt_matrix <- tt_list$tt model_params$mix_mat_set <- model_params$mix_mat_set[tt_list$change, , ] } if (is.null(date_ICU_bed_capacity_change)) { tt_ICU_beds <- 0 } else { tt_list <- squire:::intervention_dates_for_odin(dates = date_ICU_bed_capacity_change, change = interventions$ICU_bed_capacity[-1], start_date = start_date, steps_per_day = round(1/model_params$dt), starting_change = interventions$ICU_bed_capacity[1]) model_params$tt_ICU_beds <- tt_list$tt model_params$ICU_beds <- tt_list$change } if (is.null(date_hosp_bed_capacity_change)) { tt_hosp_beds <- 0 } else { tt_list <- squire:::intervention_dates_for_odin(dates = date_hosp_bed_capacity_change, change = interventions$hosp_bed_capacity[-1], start_date = start_date, steps_per_day = round(1/model_params$dt), starting_change = interventions$hosp_bed_capacity[1]) model_params$tt_hosp_beds <- tt_list$tt model_params$hosp_beds <- tt_list$change } if (is.null(date_vaccine_change)) { tt_vaccine <- 0 } else { tt_list <- squire:::intervention_dates_for_odin(dates = date_vaccine_change, change = interventions$max_vaccine[-1], start_date = start_date, steps_per_day = round(1/model_params$dt), starting_change = interventions$max_vaccine[1]) model_params$tt_vaccine <- tt_list$tt model_params$max_vaccine <- tt_list$change } if (is.null(date_vaccine_efficacy_infection_change)) { tt_vaccine_efficacy_infection <- 0 } else { tt_list <- squire:::intervention_dates_for_odin(dates = date_vaccine_efficacy_infection_change, change = seq_along(interventions$vaccine_efficacy_infection)[-1], start_date = start_date, steps_per_day = round(1/model_params$dt), starting_change = 1) model_params$tt_vaccine_efficacy_infection <- tt_list$tt model_params$vaccine_efficacy_infection <- model_params$vaccine_efficacy_infection[tt_list$change, , ] } if (is.null(date_vaccine_efficacy_disease_change)) { tt_vaccine_efficacy_disease <- 0 } else { tt_list <- squire:::intervention_dates_for_odin(dates = date_vaccine_efficacy_disease_change, change = seq_along(interventions$vaccine_efficacy_disease)[-1], start_date = start_date, steps_per_day = round(1/model_params$dt), starting_change = 1) model_params$tt_vaccine_efficacy_disease <- tt_list$tt model_params$prob_hosp <- model_params$prob_hosp[tt_list$change, , ] } R0 <- squire:::evaluate_Rt_pmcmc(R0_change = R0_change, R0 = R0, date_R0_change = date_R0_change, pars = pars, Rt_args = Rt_args) R0 <- R0*rt_mult beta_set <- squire:::beta_est(squire_model = squire_model, model_params = model_params, R0 = R0) model_params$beta_set <- beta_set if (inherits(squire_model, "stochastic")) { pf_result <- squire:::run_particle_filter(data = data, squire_model = squire_model, model_params = model_params, model_start_date = start_date, obs_params = pars_obs, n_particles = n_particles, forecast_days = forecast_days, save_particles = save_particles, full_output = full_output, return = pf_return) } else if (inherits(squire_model, "deterministic")) { pf_result <- run_deterministic_comparison_iran(data = data, squire_model = squire_model, model_params = model_params, model_start_date = start_date, obs_params = pars_obs, forecast_days = forecast_days, save_history = save_particles, return = pf_return) } pf_result } ll_pois <- function (data, model, phi, k, exp_noise) { mu <- phi * model + rexp(length(model), rate = exp_noise) dpois(data, lambda = mu, log = TRUE) } #' Specific deterministic model run for Iran with timing of Delta included #' @inheritParams squire:::run_deterministic_comparison run_deterministic_comparison_iran <- function(data, squire_model, model_params, model_start_date = "2020-02-02", obs_params = list( phi_cases = 0.1, k_cases = 2, phi_death = 1, k_death = 2, exp_noise = 1e+06 ), forecast_days = 0, save_history = FALSE, return = "ll") { if (!(return %in% c("full", "ll", "sample", "single"))) { stop("return argument must be full, ll, sample", "single") } if (as.Date(data$date[data$deaths > 0][1], "%Y-%m-%d") < as.Date(model_start_date, "%Y-%m-%d")) { stop("Model start date is later than data start date") } # set up as normal data <- squire:::particle_filter_data(data = data, start_date = model_start_date, steps_per_day = round(1/model_params$dt)) # correct for weekly deaths data$day_end[nrow(data)] <- data$day_start[nrow(data)] + 7 data$step_end[nrow(data)] <- data$step_start[nrow(data)] + 7 # back to normal model_params$tt_beta <- round(model_params$tt_beta * model_params$dt) model_params$tt_contact_matrix <- round(model_params$tt_contact_matrix * model_params$dt) model_params$tt_hosp_beds <- round(model_params$tt_hosp_beds * model_params$dt) model_params$tt_ICU_beds <- round(model_params$tt_ICU_beds * model_params$dt) # steps as normal steps <- c(0, data$day_end) fore_steps <- seq(data$day_end[nrow(data)], length.out = forecast_days + 1L) steps <- unique(c(steps, fore_steps)) if("dur_R" %in% names(obs_params)) { if(obs_params$dur_R != 365) { ch_dur_R <- as.integer(as.Date("2021-05-01") - model_start_date) model_params$tt_dur_R <- c(0, ch_dur_R, ch_dur_R+60) model_params$gamma_R <- c(model_params$gamma_R, 2/obs_params$dur_R, model_params$gamma_R) } } if("prob_hosp_multiplier" %in% names(obs_params)) { if(obs_params$prob_hosp_multiplier != 1) { ch_dur_R <- as.integer(as.Date("2021-05-01") - model_start_date) model_params$tt_prob_hosp_multiplier <- c(0, ch_dur_R) model_params$prob_hosp_multiplier <- c(model_params$prob_hosp_multiplier, obs_params$prob_hosp_multiplier) } } # run model model_func <- squire_model$odin_model(user = model_params, unused_user_action = "ignore") out <- model_func$run(t = seq(0, tail(steps, 1), 1), atol = 1e-6, rtol = 1e-6) index <- squire:::odin_index(model_func) # get deaths for comparison Ds <- diff(rowSums(out[c(data$day_end[2]-7, data$day_end[-1]), index$D])) Ds[Ds < 0] <- 0 deaths <- data$deaths[-1] # what type of ll for deaths if (obs_params$treated_deaths_only) { Ds_heathcare <- diff(rowSums(out[, index$D_get])) Ds_heathcare <- Ds_heathcare[data$day_end[-1]] ll <- ll_pois(deaths, Ds_heathcare, obs_params$phi_death, obs_params$k_death, obs_params$exp_noise) } else { ll <- ll_pois(deaths, Ds, obs_params$phi_death, obs_params$k_death, obs_params$exp_noise) } # now the ll for the seroprevalence sero_df <- obs_params$sero_df lls <- 0 if(!is.null(sero_df)) { if(nrow(sero_df) > 0) { sero_at_date <- function(date, symptoms, det, dates, N) { di <- which(dates == date) to_sum <- tail(symptoms[seq_len(di)], length(det)) min(sum(rev(to_sum)*head(det, length(to_sum)), na.rm=TRUE)/N, 0.99) } # get symptom incidence symptoms <- rowSums(out[,index$E2]) * model_params$gamma_E # dates of incidence, pop size and dates of sero surveys dates <- data$date[[1]] + seq_len(nrow(out)) - 1L N <- sum(model_params$population) sero_dates <- list(sero_df$date_end, sero_df$date_start, sero_df$date_start + as.integer((sero_df$date_end - sero_df$date_start)/2)) unq_sero_dates <- unique(c(sero_df$date_end, sero_df$date_start, sero_df$date_start + as.integer((sero_df$date_end - sero_df$date_start)/2))) det <- obs_params$sero_det # estimate model seroprev sero_model <- vapply(unq_sero_dates, sero_at_date, numeric(1), symptoms, det, dates, N) sero_model_mat <- do.call(cbind,lapply(sero_dates, function(x) {sero_model[match(x, unq_sero_dates)]})) # likelihood of model obvs lls <- rowMeans(dbinom(sero_df$sero_pos, sero_df$samples, sero_model_mat, log = TRUE)) } } # and wrap up as normal date <- data$date[[1]] + seq_len(nrow(out)) - 1L rownames(out) <- as.character(date) attr(out, "date") <- date pf_results <- list() pf_results$log_likelihood <- sum(ll) + sum(lls) if (save_history) { pf_results$states <- out } else if (return == "single") { pf_results$sample_state <- out[nrow(out), ] } if (return == "ll") { ret <- pf_results$log_likelihood } else if (return == "sample") { ret <- pf_results$states } else if (return == "single" || return == "full") { ret <- pf_results } ret }
/R/mcmc_utils.R
permissive
OJWatson/iran-ascertainment
R
false
false
22,036
r
#' Generate parameter draws from a squire pmcmc run #' @param out Output of [[squire::pmcmc]] #' @param draws Number of draws from mcmc chain. Default = 10 generate_parameters <- function(out, draws = 10, burnin = 1000, ll = TRUE){ #set up parameters pmcmc_results <- out$pmcmc_results n_trajectories <- draws if("chains" %in% names(out$pmcmc_results)) { n_chains <- length(out$pmcmc_results$chains) } else { n_chains <- 1 } n_particles <- 2 forecast_days <- 0 #code from squire: Will need updating if squire undergoes changes squire:::assert_pos_int(n_chains) if (n_chains == 1) { squire:::assert_custom_class(pmcmc_results, "squire_pmcmc") } else { squire:::assert_custom_class(pmcmc_results, "squire_pmcmc_list") } squire:::assert_pos_int(burnin) squire:::assert_pos_int(n_trajectories) squire:::assert_pos_int(n_particles) squire:::assert_pos_int(forecast_days) if (n_chains > 1) { res <- squire::create_master_chain(x = pmcmc_results, burn_in = burnin) } else if (n_chains == 1 & burnin > 0) { res <- pmcmc_results$results[-seq_len(burnin), ] } else { res <- pmcmc_results$results } # are we drawing based on ll if (ll) { squire:::assert_neg(res$log_posterior, zero_allowed = FALSE) res <- unique(res) probs <- exp(res$log_posterior) probs <- probs/sum(probs) drop <- 0.9 while (any(is.na(probs))) { probs <- exp(res$log_posterior * drop) probs <- probs/sum(probs) drop <- drop^2 } params_smpl <- sample(x = length(probs), size = n_trajectories, replace = TRUE, prob = probs) } else { params_smpl <- sample(x = nrow(res), size = n_trajectories, replace = FALSE) } params_smpl <- res[params_smpl, !grepl("log", colnames(res))] params_smpl$start_date <- squire:::offset_to_start_date(pmcmc_results$inputs$data$date[1], round(params_smpl$start_date)) pars_list <- split(params_smpl, 1:nrow(params_smpl)) names(pars_list) <- rep("pars", length(pars_list)) #return the parameters return(pars_list) } #' Generate draws using parameters drawn from posterior #' @param out Output of [[squire::pmcmc]] #' @param pars_list Output of [[generate_parameters]] #' @param parallel Are we simulating in parallel. Default = FALSE #' @param draws How many draws are being used from pars_list. Default = NULL, #' which will use all the pars. #' @param interventions Are new interventions being used or default. Default = NULL generate_draws <- function(out, pars_list, parallel = FALSE, draws = NULL, interventions = NULL, ...){ # handle for no death days if(!("pmcmc_results" %in% names(out))) { message("`out` was not generated by pmcmc as no deaths for this country. \n", "Returning the original object, which assumes epidemic seeded on date ", "fits were run") return(out) } # grab information from the pmcmc run pmcmc <- out$pmcmc_results squire_model <- out$pmcmc_results$inputs$squire_model country <- out$parameters$country population <- out$parameters$population data <- out$pmcmc_results$inputs$data # are we drawing in parallel if (parallel) { suppressWarnings(future::plan(future::multisession())) } if(!is.null(interventions)){ #if making a change add that intervention here pmcmc$inputs$interventions <- interventions }else{ #else this is the interventions that come with the object interventions <- out$interventions } if (is.null(draws)) { draws <- length(pars_list) } #-------------------------------------------------------- # Section 3 of pMCMC Wrapper: Sample PMCMC Results #-------------------------------------------------------- #rename objects to their sample_pmcmc equivalent (so that it is simple to update #this code) pmcmc_results <- pmcmc n_particles <- 2 forecast_days <- 0 log_likelihood <- squire:::calc_loglikelihood replicates <- draws #recreate params_smpl object params_smpl <- do.call(rbind, pars_list) #instead of using squire:::sample_pmcmc we use the pars_list values provided #the following code is taken from squire:::sample_pmcmc and will need updating #if squire undergoes major changes message("Sampling from pMCMC Posterior...") if (Sys.getenv("SQUIRE_PARALLEL_DEBUG") == "TRUE") { traces <- purrr::map(.x = pars_list, .f = iran_log_likelihood, data = pmcmc_results$inputs$data, squire_model = pmcmc_results$inputs$squire_model, model_params = pmcmc_results$inputs$model_params, pars_obs = pmcmc_results$inputs$pars_obs, n_particles = n_particles, forecast_days = forecast_days, interventions = pmcmc_results$inputs$interventions, Rt_args = pmcmc_results$inputs$Rt_args, return = "full", ...) } else{ traces <- furrr::future_map(.x = pars_list, .f = iran_log_likelihood, data = pmcmc_results$inputs$data, squire_model = pmcmc_results$inputs$squire_model, model_params = pmcmc_results$inputs$model_params, pars_obs = pmcmc_results$inputs$pars_obs, n_particles = n_particles, forecast_days = forecast_days, interventions = pmcmc_results$inputs$interventions, Rt_args = pmcmc_results$inputs$Rt_args, return = "full", ..., .progress = TRUE, .options = furrr::furrr_options(seed = NULL)) } num_rows <- unlist(lapply(traces, nrow)) max_rows <- max(num_rows) seq_max <- seq_len(max_rows) max_date_names <- rownames(traces[[which.max(unlist(lapply(traces, nrow)))]]) trajectories <- array(NA, dim = c(max_rows, ncol(traces[[1]]), length(traces)), dimnames = list(max_date_names, colnames(traces[[1]]), NULL)) for (i in seq_len(length(traces))) { trajectories[utils::tail(seq_max, nrow(traces[[i]])), , i] <- traces[[i]] } pmcmc_samples <- list(trajectories = trajectories, sampled_PMCMC_Results = params_smpl, inputs = list(squire_model = pmcmc_results$inputs$squire_model, model_params = pmcmc_results$inputs$model_params, interventions = pmcmc_results$inputs$interventions, data = pmcmc_results$inputs$data, pars_obs = pmcmc_results$inputs$pars_obs)) class(pmcmc_samples) <- "squire_sample_PMCMC" #-------------------------------------------------------- # Section 4 of pMCMC Wrapper: Tidy Output #-------------------------------------------------------- # create a fake run object and fill in the required elements r <- squire_model$run_func(country = country, contact_matrix_set = pmcmc$inputs$model_params$contact_matrix_set, tt_contact_matrix = pmcmc$inputs$model_params$tt_matrix, hosp_bed_capacity = pmcmc$inputs$model_params$hosp_bed_capacity, tt_hosp_beds = pmcmc$inputs$model_params$tt_hosp_beds, ICU_bed_capacity = pmcmc$inputs$model_params$ICU_bed_capacity, tt_ICU_beds = pmcmc$inputs$model_params$tt_ICU_beds, population = population, day_return = TRUE, replicates = 1, time_period = nrow(pmcmc_samples$trajectories)) # and add the parameters that changed between each simulation, i.e. posterior draws r$replicate_parameters <- pmcmc_samples$sampled_PMCMC_Results # as well as adding the pmcmc chains so it's easy to draw from the chains again in the future r$pmcmc_results <- pmcmc # then let's create the output that we are going to use names(pmcmc_samples)[names(pmcmc_samples) == "trajectories"] <- "output" dimnames(pmcmc_samples$output) <- list(dimnames(pmcmc_samples$output)[[1]], dimnames(r$output)[[2]], NULL) r$output <- pmcmc_samples$output # and adjust the time as before full_row <- match(0, apply(r$output[,"time",],2,function(x) { sum(is.na(x)) })) saved_full <- r$output[,"time",full_row] for(i in seq_len(replicates)) { na_pos <- which(is.na(r$output[,"time",i])) full_to_place <- saved_full - which(rownames(r$output) == as.Date(max(data$date))) + 1L if(length(na_pos) > 0) { full_to_place[na_pos] <- NA } r$output[,"time",i] <- full_to_place } # second let's recreate the output r$model <- pmcmc_samples$inputs$squire_model$odin_model( user = pmcmc_samples$inputs$model_params, unused_user_action = "ignore" ) # we will add the interventions here so that we know what times are needed for projection r$interventions <- interventions # and fix the replicates r$parameters$replicates <- replicates r$parameters$time_period <- as.numeric(diff(as.Date(range(rownames(r$output))))) r$parameters$dt <- pmcmc$inputs$model_params$dt if ("province" %in% names(out$parameters)) { r$parameters$province <- out$parameters$province } return(r) } #' Specific log_likelihood wrapper for Iran simulations #' @noRd iran_log_likelihood <- function(pars, data, squire_model, model_params, pars_obs, n_particles, forecast_days = 0, return = "ll", Rt_args, interventions, rt_mult = 1) { switch(return, full = { save_particles <- TRUE full_output <- TRUE pf_return <- "sample" }, ll = { save_particles <- FALSE forecast_days <- 0 full_output <- FALSE pf_return <- "single" }, { stop("Unknown return type to calc_loglikelihood") }) squire:::assert_in(c("R0", "start_date"), names(pars), message = "Must specify R0, start date to infer") R0 <- pars[["R0"]] start_date <- pars[["start_date"]] squire:::assert_pos(R0) squire:::assert_date(start_date) R0_change <- interventions$R0_change date_R0_change <- interventions$date_R0_change date_contact_matrix_set_change <- interventions$date_contact_matrix_set_change date_ICU_bed_capacity_change <- interventions$date_ICU_bed_capacity_change date_hosp_bed_capacity_change <- interventions$date_hosp_bed_capacity_change date_vaccine_change <- interventions$date_vaccine_change date_vaccine_efficacy_infection_change <- interventions$date_vaccine_efficacy_infection_change date_vaccine_efficacy_disease_change <- interventions$date_vaccine_efficacy_disease_change if (is.null(date_R0_change)) { tt_beta <- 0 } else { tt_list <- squire:::intervention_dates_for_odin(dates = date_R0_change, change = R0_change, start_date = start_date, steps_per_day = round(1/model_params$dt), starting_change = 1) model_params$tt_beta <- tt_list$tt R0_change <- tt_list$change date_R0_change <- tt_list$dates } if (is.null(date_contact_matrix_set_change)) { tt_contact_matrix <- 0 } else { tt_list <- squire:::intervention_dates_for_odin(dates = date_contact_matrix_set_change, change = seq_along(interventions$contact_matrix_set)[-1], start_date = start_date, steps_per_day = round(1/model_params$dt), starting_change = 1) model_params$tt_matrix <- tt_list$tt model_params$mix_mat_set <- model_params$mix_mat_set[tt_list$change, , ] } if (is.null(date_ICU_bed_capacity_change)) { tt_ICU_beds <- 0 } else { tt_list <- squire:::intervention_dates_for_odin(dates = date_ICU_bed_capacity_change, change = interventions$ICU_bed_capacity[-1], start_date = start_date, steps_per_day = round(1/model_params$dt), starting_change = interventions$ICU_bed_capacity[1]) model_params$tt_ICU_beds <- tt_list$tt model_params$ICU_beds <- tt_list$change } if (is.null(date_hosp_bed_capacity_change)) { tt_hosp_beds <- 0 } else { tt_list <- squire:::intervention_dates_for_odin(dates = date_hosp_bed_capacity_change, change = interventions$hosp_bed_capacity[-1], start_date = start_date, steps_per_day = round(1/model_params$dt), starting_change = interventions$hosp_bed_capacity[1]) model_params$tt_hosp_beds <- tt_list$tt model_params$hosp_beds <- tt_list$change } if (is.null(date_vaccine_change)) { tt_vaccine <- 0 } else { tt_list <- squire:::intervention_dates_for_odin(dates = date_vaccine_change, change = interventions$max_vaccine[-1], start_date = start_date, steps_per_day = round(1/model_params$dt), starting_change = interventions$max_vaccine[1]) model_params$tt_vaccine <- tt_list$tt model_params$max_vaccine <- tt_list$change } if (is.null(date_vaccine_efficacy_infection_change)) { tt_vaccine_efficacy_infection <- 0 } else { tt_list <- squire:::intervention_dates_for_odin(dates = date_vaccine_efficacy_infection_change, change = seq_along(interventions$vaccine_efficacy_infection)[-1], start_date = start_date, steps_per_day = round(1/model_params$dt), starting_change = 1) model_params$tt_vaccine_efficacy_infection <- tt_list$tt model_params$vaccine_efficacy_infection <- model_params$vaccine_efficacy_infection[tt_list$change, , ] } if (is.null(date_vaccine_efficacy_disease_change)) { tt_vaccine_efficacy_disease <- 0 } else { tt_list <- squire:::intervention_dates_for_odin(dates = date_vaccine_efficacy_disease_change, change = seq_along(interventions$vaccine_efficacy_disease)[-1], start_date = start_date, steps_per_day = round(1/model_params$dt), starting_change = 1) model_params$tt_vaccine_efficacy_disease <- tt_list$tt model_params$prob_hosp <- model_params$prob_hosp[tt_list$change, , ] } R0 <- squire:::evaluate_Rt_pmcmc(R0_change = R0_change, R0 = R0, date_R0_change = date_R0_change, pars = pars, Rt_args = Rt_args) R0 <- R0*rt_mult beta_set <- squire:::beta_est(squire_model = squire_model, model_params = model_params, R0 = R0) model_params$beta_set <- beta_set if (inherits(squire_model, "stochastic")) { pf_result <- squire:::run_particle_filter(data = data, squire_model = squire_model, model_params = model_params, model_start_date = start_date, obs_params = pars_obs, n_particles = n_particles, forecast_days = forecast_days, save_particles = save_particles, full_output = full_output, return = pf_return) } else if (inherits(squire_model, "deterministic")) { pf_result <- run_deterministic_comparison_iran(data = data, squire_model = squire_model, model_params = model_params, model_start_date = start_date, obs_params = pars_obs, forecast_days = forecast_days, save_history = save_particles, return = pf_return) } pf_result } ll_pois <- function (data, model, phi, k, exp_noise) { mu <- phi * model + rexp(length(model), rate = exp_noise) dpois(data, lambda = mu, log = TRUE) } #' Specific deterministic model run for Iran with timing of Delta included #' @inheritParams squire:::run_deterministic_comparison run_deterministic_comparison_iran <- function(data, squire_model, model_params, model_start_date = "2020-02-02", obs_params = list( phi_cases = 0.1, k_cases = 2, phi_death = 1, k_death = 2, exp_noise = 1e+06 ), forecast_days = 0, save_history = FALSE, return = "ll") { if (!(return %in% c("full", "ll", "sample", "single"))) { stop("return argument must be full, ll, sample", "single") } if (as.Date(data$date[data$deaths > 0][1], "%Y-%m-%d") < as.Date(model_start_date, "%Y-%m-%d")) { stop("Model start date is later than data start date") } # set up as normal data <- squire:::particle_filter_data(data = data, start_date = model_start_date, steps_per_day = round(1/model_params$dt)) # correct for weekly deaths data$day_end[nrow(data)] <- data$day_start[nrow(data)] + 7 data$step_end[nrow(data)] <- data$step_start[nrow(data)] + 7 # back to normal model_params$tt_beta <- round(model_params$tt_beta * model_params$dt) model_params$tt_contact_matrix <- round(model_params$tt_contact_matrix * model_params$dt) model_params$tt_hosp_beds <- round(model_params$tt_hosp_beds * model_params$dt) model_params$tt_ICU_beds <- round(model_params$tt_ICU_beds * model_params$dt) # steps as normal steps <- c(0, data$day_end) fore_steps <- seq(data$day_end[nrow(data)], length.out = forecast_days + 1L) steps <- unique(c(steps, fore_steps)) if("dur_R" %in% names(obs_params)) { if(obs_params$dur_R != 365) { ch_dur_R <- as.integer(as.Date("2021-05-01") - model_start_date) model_params$tt_dur_R <- c(0, ch_dur_R, ch_dur_R+60) model_params$gamma_R <- c(model_params$gamma_R, 2/obs_params$dur_R, model_params$gamma_R) } } if("prob_hosp_multiplier" %in% names(obs_params)) { if(obs_params$prob_hosp_multiplier != 1) { ch_dur_R <- as.integer(as.Date("2021-05-01") - model_start_date) model_params$tt_prob_hosp_multiplier <- c(0, ch_dur_R) model_params$prob_hosp_multiplier <- c(model_params$prob_hosp_multiplier, obs_params$prob_hosp_multiplier) } } # run model model_func <- squire_model$odin_model(user = model_params, unused_user_action = "ignore") out <- model_func$run(t = seq(0, tail(steps, 1), 1), atol = 1e-6, rtol = 1e-6) index <- squire:::odin_index(model_func) # get deaths for comparison Ds <- diff(rowSums(out[c(data$day_end[2]-7, data$day_end[-1]), index$D])) Ds[Ds < 0] <- 0 deaths <- data$deaths[-1] # what type of ll for deaths if (obs_params$treated_deaths_only) { Ds_heathcare <- diff(rowSums(out[, index$D_get])) Ds_heathcare <- Ds_heathcare[data$day_end[-1]] ll <- ll_pois(deaths, Ds_heathcare, obs_params$phi_death, obs_params$k_death, obs_params$exp_noise) } else { ll <- ll_pois(deaths, Ds, obs_params$phi_death, obs_params$k_death, obs_params$exp_noise) } # now the ll for the seroprevalence sero_df <- obs_params$sero_df lls <- 0 if(!is.null(sero_df)) { if(nrow(sero_df) > 0) { sero_at_date <- function(date, symptoms, det, dates, N) { di <- which(dates == date) to_sum <- tail(symptoms[seq_len(di)], length(det)) min(sum(rev(to_sum)*head(det, length(to_sum)), na.rm=TRUE)/N, 0.99) } # get symptom incidence symptoms <- rowSums(out[,index$E2]) * model_params$gamma_E # dates of incidence, pop size and dates of sero surveys dates <- data$date[[1]] + seq_len(nrow(out)) - 1L N <- sum(model_params$population) sero_dates <- list(sero_df$date_end, sero_df$date_start, sero_df$date_start + as.integer((sero_df$date_end - sero_df$date_start)/2)) unq_sero_dates <- unique(c(sero_df$date_end, sero_df$date_start, sero_df$date_start + as.integer((sero_df$date_end - sero_df$date_start)/2))) det <- obs_params$sero_det # estimate model seroprev sero_model <- vapply(unq_sero_dates, sero_at_date, numeric(1), symptoms, det, dates, N) sero_model_mat <- do.call(cbind,lapply(sero_dates, function(x) {sero_model[match(x, unq_sero_dates)]})) # likelihood of model obvs lls <- rowMeans(dbinom(sero_df$sero_pos, sero_df$samples, sero_model_mat, log = TRUE)) } } # and wrap up as normal date <- data$date[[1]] + seq_len(nrow(out)) - 1L rownames(out) <- as.character(date) attr(out, "date") <- date pf_results <- list() pf_results$log_likelihood <- sum(ll) + sum(lls) if (save_history) { pf_results$states <- out } else if (return == "single") { pf_results$sample_state <- out[nrow(out), ] } if (return == "ll") { ret <- pf_results$log_likelihood } else if (return == "sample") { ret <- pf_results$states } else if (return == "single" || return == "full") { ret <- pf_results } ret }
#run_analysis.R #Created by @AndrzejDzedzej #Collect and reshape data from Human Activity Recognition Using Smartphones Dataset #Reading test dataset, i assume that your working directory is UMI HAR Dataset #If it's not change it using setwd() function x_test <- read.table("test/X_test.txt") y_test <- read.table("test/y_test.txt") subject_test <- read.table("test/subject_test.txt") #Merge test dataset test_data <- cbind(x_test,y_test,subject_test) #Read train dataset x_train <- read.table("train/X_train.txt") y_train <- read.table("train/y_train.txt") subject_train <- read.table("train/subject_train.txt") #Merge train dataset train_data <- cbind(x_train, y_train, subject_train) #Merge test and train (end of assignment step 1) dataset <- rbind(test_data, train_data) #name variables in dataset using features.txt (Step 4) features <- read.table("features.txt") #reading from features.txt names(dataset) <- c(as.character(features$V2)) names(dataset)[length(dataset) - 1] <- "activity" names(dataset)[length(dataset)] <- "subject" #find positions of std and mean variable columns (step 2) column_indexes <- grep("-mean|-std", features[,2], ignore.case = TRUE) #we want activity and subject columns too, so we append last two columns of dataset column_indexes <- append(column_indexes, c(length(dataset) - 1,length(dataset))) #subset only needed columns dataset <- dataset[,column_indexes] #activity labels instead of numbers activity_labels <- read.table("activity_labels.txt") #We need dplyr package, installing it if not present if("dplyr" %in% rownames(installed.packages()) == FALSE) { install.packages("dplyr") } library("dplyr") #Join, key = activities, store in tmp table and substite activities in dataset tmp_join <- inner_join(dataset, activity_labels, by = c("activity" = "V1")) dataset$activity <- tmp_join$V2 #Here we have final tidy dataset (End of Step 4) #We need plyr package for ddply, installing if needed if("plyr" %in% rownames(installed.packages()) == FALSE) { install.packages("plyr") } library("plyr") #Step 5. We use ddply function to take averages of each variable for each activity and subject tidy_average <- ddply(dataset, .(subject,activity), .fun = numcolwise(mean)) #Exporting final dataset to txt file write.table(tidy_average,"tidy_average.txt", row.name = FALSE) #Clearing memory rm("x_train","y_train","subject_train","x_test","y_test","subject_test","features", "test_data", "tmp_join", "activity_labels", "train_data", "dataset")
/run_analysis.R
no_license
Jazzday/DataCleaning-CourseProject
R
false
false
2,579
r
#run_analysis.R #Created by @AndrzejDzedzej #Collect and reshape data from Human Activity Recognition Using Smartphones Dataset #Reading test dataset, i assume that your working directory is UMI HAR Dataset #If it's not change it using setwd() function x_test <- read.table("test/X_test.txt") y_test <- read.table("test/y_test.txt") subject_test <- read.table("test/subject_test.txt") #Merge test dataset test_data <- cbind(x_test,y_test,subject_test) #Read train dataset x_train <- read.table("train/X_train.txt") y_train <- read.table("train/y_train.txt") subject_train <- read.table("train/subject_train.txt") #Merge train dataset train_data <- cbind(x_train, y_train, subject_train) #Merge test and train (end of assignment step 1) dataset <- rbind(test_data, train_data) #name variables in dataset using features.txt (Step 4) features <- read.table("features.txt") #reading from features.txt names(dataset) <- c(as.character(features$V2)) names(dataset)[length(dataset) - 1] <- "activity" names(dataset)[length(dataset)] <- "subject" #find positions of std and mean variable columns (step 2) column_indexes <- grep("-mean|-std", features[,2], ignore.case = TRUE) #we want activity and subject columns too, so we append last two columns of dataset column_indexes <- append(column_indexes, c(length(dataset) - 1,length(dataset))) #subset only needed columns dataset <- dataset[,column_indexes] #activity labels instead of numbers activity_labels <- read.table("activity_labels.txt") #We need dplyr package, installing it if not present if("dplyr" %in% rownames(installed.packages()) == FALSE) { install.packages("dplyr") } library("dplyr") #Join, key = activities, store in tmp table and substite activities in dataset tmp_join <- inner_join(dataset, activity_labels, by = c("activity" = "V1")) dataset$activity <- tmp_join$V2 #Here we have final tidy dataset (End of Step 4) #We need plyr package for ddply, installing if needed if("plyr" %in% rownames(installed.packages()) == FALSE) { install.packages("plyr") } library("plyr") #Step 5. We use ddply function to take averages of each variable for each activity and subject tidy_average <- ddply(dataset, .(subject,activity), .fun = numcolwise(mean)) #Exporting final dataset to txt file write.table(tidy_average,"tidy_average.txt", row.name = FALSE) #Clearing memory rm("x_train","y_train","subject_train","x_test","y_test","subject_test","features", "test_data", "tmp_join", "activity_labels", "train_data", "dataset")
# load relevant libraries library("httr") library("jsonlite") # Be sure and check the README.md for complete instructions! # Use `source()` to load your API key variable from the `apikey.R` file you made. # Make sure you've set your working directory! source("apikey.R") key_param <- list("api-key" = nyt_key) # Create a variable `movie_name` that is the name of a movie of your choice. movie_name <- "Parasite" # Construct an HTTP request to search for reviews for the given movie. # The base URI is `https://api.nytimes.com/svc/movies/v2/` # The resource is `reviews/search.json` # See the interactive console for parameter details: # https://developer.nytimes.com/movie_reviews_v2.json # # You should use YOUR api key (as the `api-key` parameter) # and your `movie_name` variable as the search query! base_url <- "https://api.nytimes.com/svc/movies/v2/" endpoint <- "reviews/search.json" uri <- paste0(base_url, endpoint) query_params_list <- list("api-key" = nyt_key, "movie_name" = movie_name) # Send the HTTP Request to download the data # Extract the content and convert it from JSON response <- GET(uri, query = query_params_list) body <- content(response, "text", encoding = "UTF-8") data <- fromJSON(body) # What kind of data structure did this produce? A data frame? A list? is.data.frame(data) is.list(data) #It's a list # Manually inspect the returned data and identify the content of interest # (which are the movie reviews). # Use functions such as `names()`, `str()`, etc. # Flatten the movie reviews content into a data structure called `reviews` # From the most recent review, store the headline, short summary, and link to # the full article, each in their own variables # Create a list of the three pieces of information from above. # Print out the list.
/chapter-18-exercises/exercise-1/exercise.R
permissive
zpuiy/book-exercises
R
false
false
1,794
r
# load relevant libraries library("httr") library("jsonlite") # Be sure and check the README.md for complete instructions! # Use `source()` to load your API key variable from the `apikey.R` file you made. # Make sure you've set your working directory! source("apikey.R") key_param <- list("api-key" = nyt_key) # Create a variable `movie_name` that is the name of a movie of your choice. movie_name <- "Parasite" # Construct an HTTP request to search for reviews for the given movie. # The base URI is `https://api.nytimes.com/svc/movies/v2/` # The resource is `reviews/search.json` # See the interactive console for parameter details: # https://developer.nytimes.com/movie_reviews_v2.json # # You should use YOUR api key (as the `api-key` parameter) # and your `movie_name` variable as the search query! base_url <- "https://api.nytimes.com/svc/movies/v2/" endpoint <- "reviews/search.json" uri <- paste0(base_url, endpoint) query_params_list <- list("api-key" = nyt_key, "movie_name" = movie_name) # Send the HTTP Request to download the data # Extract the content and convert it from JSON response <- GET(uri, query = query_params_list) body <- content(response, "text", encoding = "UTF-8") data <- fromJSON(body) # What kind of data structure did this produce? A data frame? A list? is.data.frame(data) is.list(data) #It's a list # Manually inspect the returned data and identify the content of interest # (which are the movie reviews). # Use functions such as `names()`, `str()`, etc. # Flatten the movie reviews content into a data structure called `reviews` # From the most recent review, store the headline, short summary, and link to # the full article, each in their own variables # Create a list of the three pieces of information from above. # Print out the list.
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/smtsensorplott.R \name{smtsensorplott} \alias{smtsensorplott} \title{Soil moisture and temp sensor plot (Temperature)} \usage{ smtsensorplott(df, sensornames, startdate, enddate) } \arguments{ \item{df}{A csv file from soil moisture/temp sensors} \item{sensornames}{Vector of the names of the sensors used in the order they appear on the csv} \item{startdate}{Beginning of date range of interest (format: "y-m-d")} \item{enddate}{End of date range of interest (format: "y-m-d")} } \value{ Plot (a summary of the data for the entered range of dates) } \description{ Soil moisture and temp sensor plot (Temperature) } \details{ This function takes data from Decagon 5TM soil/moisture sensors and returns a plot of the soil temperature data for the time period requested } \examples{ smtsensorplott(dfsensor, sensornameg, "2015-08-10", "2015-08-20") }
/smtsensor/man/smtsensorplott.Rd
no_license
mengli20/590_final
R
false
true
930
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/smtsensorplott.R \name{smtsensorplott} \alias{smtsensorplott} \title{Soil moisture and temp sensor plot (Temperature)} \usage{ smtsensorplott(df, sensornames, startdate, enddate) } \arguments{ \item{df}{A csv file from soil moisture/temp sensors} \item{sensornames}{Vector of the names of the sensors used in the order they appear on the csv} \item{startdate}{Beginning of date range of interest (format: "y-m-d")} \item{enddate}{End of date range of interest (format: "y-m-d")} } \value{ Plot (a summary of the data for the entered range of dates) } \description{ Soil moisture and temp sensor plot (Temperature) } \details{ This function takes data from Decagon 5TM soil/moisture sensors and returns a plot of the soil temperature data for the time period requested } \examples{ smtsensorplott(dfsensor, sensornameg, "2015-08-10", "2015-08-20") }
#postscript("bernoulliLikelihood.eps", width=4, height=4, horizontal = FALSE) n <- 4 xvals <- seq(0, 1, length = 1000) plot(c(0, 1), c(0, 1), type = "n", frame = FALSE, xlab = "p", ylab = "likelihood") for (i in 0 : n){ ml <- (i / n) ^ i * (1 - i / n) ^ (n - i) likelihood <- xvals ^ i * (1 - xvals) ^ (n - i) / ml lines(xvals, likelihood, type = "l", lwd = 2) } #dev.off()
/B_analysts_sources_github/bcaffo/Caffo-Coursera/lecture07_slide05_bernoulliLikelihood.R
no_license
Irbis3/crantasticScrapper
R
false
false
381
r
#postscript("bernoulliLikelihood.eps", width=4, height=4, horizontal = FALSE) n <- 4 xvals <- seq(0, 1, length = 1000) plot(c(0, 1), c(0, 1), type = "n", frame = FALSE, xlab = "p", ylab = "likelihood") for (i in 0 : n){ ml <- (i / n) ^ i * (1 - i / n) ^ (n - i) likelihood <- xvals ^ i * (1 - xvals) ^ (n - i) / ml lines(xvals, likelihood, type = "l", lwd = 2) } #dev.off()
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ec2_operations.R \name{ec2_export_image} \alias{ec2_export_image} \title{Exports an Amazon Machine Image (AMI) to a VM file} \usage{ ec2_export_image( ClientToken = NULL, Description = NULL, DiskImageFormat, DryRun = NULL, ImageId, S3ExportLocation, RoleName = NULL, TagSpecifications = NULL ) } \arguments{ \item{ClientToken}{Token to enable idempotency for export image requests.} \item{Description}{A description of the image being exported. The maximum length is 255 characters.} \item{DiskImageFormat}{[required] The disk image format.} \item{DryRun}{Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is \code{DryRunOperation}. Otherwise, it is \code{UnauthorizedOperation}.} \item{ImageId}{[required] The ID of the image.} \item{S3ExportLocation}{[required] The Amazon S3 bucket for the destination image. The destination bucket must exist.} \item{RoleName}{The name of the role that grants VM Import/Export permission to export images to your Amazon S3 bucket. If this parameter is not specified, the default role is named 'vmimport'.} \item{TagSpecifications}{The tags to apply to the export image task during creation.} } \description{ Exports an Amazon Machine Image (AMI) to a VM file. For more information, see \href{https://docs.aws.amazon.com/vm-import/latest/userguide/vmexport_image.html}{Exporting a VM directly from an Amazon Machine Image (AMI)} in the \emph{VM Import/Export User Guide}. See \url{https://www.paws-r-sdk.com/docs/ec2_export_image/} for full documentation. } \keyword{internal}
/cran/paws.compute/man/ec2_export_image.Rd
permissive
paws-r/paws
R
false
true
1,756
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ec2_operations.R \name{ec2_export_image} \alias{ec2_export_image} \title{Exports an Amazon Machine Image (AMI) to a VM file} \usage{ ec2_export_image( ClientToken = NULL, Description = NULL, DiskImageFormat, DryRun = NULL, ImageId, S3ExportLocation, RoleName = NULL, TagSpecifications = NULL ) } \arguments{ \item{ClientToken}{Token to enable idempotency for export image requests.} \item{Description}{A description of the image being exported. The maximum length is 255 characters.} \item{DiskImageFormat}{[required] The disk image format.} \item{DryRun}{Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is \code{DryRunOperation}. Otherwise, it is \code{UnauthorizedOperation}.} \item{ImageId}{[required] The ID of the image.} \item{S3ExportLocation}{[required] The Amazon S3 bucket for the destination image. The destination bucket must exist.} \item{RoleName}{The name of the role that grants VM Import/Export permission to export images to your Amazon S3 bucket. If this parameter is not specified, the default role is named 'vmimport'.} \item{TagSpecifications}{The tags to apply to the export image task during creation.} } \description{ Exports an Amazon Machine Image (AMI) to a VM file. For more information, see \href{https://docs.aws.amazon.com/vm-import/latest/userguide/vmexport_image.html}{Exporting a VM directly from an Amazon Machine Image (AMI)} in the \emph{VM Import/Export User Guide}. See \url{https://www.paws-r-sdk.com/docs/ec2_export_image/} for full documentation. } \keyword{internal}
library(glmnet) mydata = read.table("./TrainingSet/RF/thyroid.csv",head=T,sep=",") x = as.matrix(mydata[,4:ncol(mydata)]) y = as.matrix(mydata[,1]) set.seed(123) glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.7,family="gaussian",standardize=FALSE) sink('./Model/EN/Classifier/thyroid/thyroid_074.txt',append=TRUE) print(glm$glmnet.fit) sink()
/Model/EN/Classifier/thyroid/thyroid_074.R
no_license
leon1003/QSMART
R
false
false
353
r
library(glmnet) mydata = read.table("./TrainingSet/RF/thyroid.csv",head=T,sep=",") x = as.matrix(mydata[,4:ncol(mydata)]) y = as.matrix(mydata[,1]) set.seed(123) glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.7,family="gaussian",standardize=FALSE) sink('./Model/EN/Classifier/thyroid/thyroid_074.txt',append=TRUE) print(glm$glmnet.fit) sink()
#Set the working directory: setwd("/Users/Kianamon/R/kaggle") rm(list=ls()) ##################################################################################### #libraries in use: library(knitr) library(httr) library(readr) library(dplyr) library(tidyr) library(XML) library(ggplot2) library(stringr) library(lubridate) library(grid) library(caret) library(glmnet) library(ranger) library(e1071) library(Metrics) library(rpart) library(rpart.plot) library(ModelMetrics) library(ipred) library(randomForest) library(gbm) library(ROCR) library(mlr) library(xgboost) library(tidyverse) library(magrittr) library(data.table) library(mosaic) library(Ckmeans.1d.dp) library(archdata) ##################################################################################### #check for missing packages and install them: list.of.packages <- c("knitr", "httr", "readr", "dplyr", "tidyr", "XML", "ggplot2", "stringr", "lubridate", "grid", "caret", "rpart", "Metrics", "e1071", "ranger", "glmnet", "randomForest", "ROCR", "gbm", "ipred", "ModelMetrics", "rpart.plot", "xgboost", "tidyverse", "magrittr", "mosaic", "Ckmeans.1d.dp", "archdata") new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])] if(length(new.packages)) install.packages(new.packages) ##################################################################################### #downloading the two main data sets: df_train <- read_csv("train.csv") df_test <- read_csv("test.csv") ##################################################################################### head(df_train) colSums(is.na(df_train)) unique(df_train$Activity) ##################################################################################### #ranger #modranger <- ranger(Activity ~ .-Id, df_train) #modranger$confusion.matrix #print(1-modranger$prediction.error) #acc_ranger <- 1-modranger$prediction.error #predranger <- predict(modranger, df_test) ##################################################################################### set.seed(1) assignment <- sample(1:3, size = nrow(df_train), prob =c(0.7, 0.15, 0.15), replace = TRUE) df_train1 <- df_train[assignment == 1, ] # subset valid df_valid <- df_train[assignment == 2, ] # subset valid df_test1 <- df_train[assignment == 3, ] # subset test ##################################################################################### set.seed(1234) grid <- expand.grid(mtry = c(3,4), splitrule = "gini", min.node.size = 10) df_train1 <- df_train %>% select(-Id) fitControl <- trainControl(method = "CV", number = 5, verboseIter = TRUE) modmod <- caret::train(y = df_train$Activity, x = df_train[, colnames(df_train) != "Activity"], trControl = fitControl, method = "ranger", num.trees = 200, tuneGrid = grid) print(modmod) pred2 <- predict(modmod, df_test) ##################################################################################### #submission submit <- data.frame(Id=df_test$Id, Activity=pred2, stringsAsFactors = TRUE) head(submit) write.csv(submit, file = "submission.csv", row.names=F)
/kaggleranger.R
no_license
kianamon/Assignments-MATET580
R
false
false
3,351
r
#Set the working directory: setwd("/Users/Kianamon/R/kaggle") rm(list=ls()) ##################################################################################### #libraries in use: library(knitr) library(httr) library(readr) library(dplyr) library(tidyr) library(XML) library(ggplot2) library(stringr) library(lubridate) library(grid) library(caret) library(glmnet) library(ranger) library(e1071) library(Metrics) library(rpart) library(rpart.plot) library(ModelMetrics) library(ipred) library(randomForest) library(gbm) library(ROCR) library(mlr) library(xgboost) library(tidyverse) library(magrittr) library(data.table) library(mosaic) library(Ckmeans.1d.dp) library(archdata) ##################################################################################### #check for missing packages and install them: list.of.packages <- c("knitr", "httr", "readr", "dplyr", "tidyr", "XML", "ggplot2", "stringr", "lubridate", "grid", "caret", "rpart", "Metrics", "e1071", "ranger", "glmnet", "randomForest", "ROCR", "gbm", "ipred", "ModelMetrics", "rpart.plot", "xgboost", "tidyverse", "magrittr", "mosaic", "Ckmeans.1d.dp", "archdata") new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])] if(length(new.packages)) install.packages(new.packages) ##################################################################################### #downloading the two main data sets: df_train <- read_csv("train.csv") df_test <- read_csv("test.csv") ##################################################################################### head(df_train) colSums(is.na(df_train)) unique(df_train$Activity) ##################################################################################### #ranger #modranger <- ranger(Activity ~ .-Id, df_train) #modranger$confusion.matrix #print(1-modranger$prediction.error) #acc_ranger <- 1-modranger$prediction.error #predranger <- predict(modranger, df_test) ##################################################################################### set.seed(1) assignment <- sample(1:3, size = nrow(df_train), prob =c(0.7, 0.15, 0.15), replace = TRUE) df_train1 <- df_train[assignment == 1, ] # subset valid df_valid <- df_train[assignment == 2, ] # subset valid df_test1 <- df_train[assignment == 3, ] # subset test ##################################################################################### set.seed(1234) grid <- expand.grid(mtry = c(3,4), splitrule = "gini", min.node.size = 10) df_train1 <- df_train %>% select(-Id) fitControl <- trainControl(method = "CV", number = 5, verboseIter = TRUE) modmod <- caret::train(y = df_train$Activity, x = df_train[, colnames(df_train) != "Activity"], trControl = fitControl, method = "ranger", num.trees = 200, tuneGrid = grid) print(modmod) pred2 <- predict(modmod, df_test) ##################################################################################### #submission submit <- data.frame(Id=df_test$Id, Activity=pred2, stringsAsFactors = TRUE) head(submit) write.csv(submit, file = "submission.csv", row.names=F)
#install.packages("caTools") library(caTools) #set working directory setwd("D:\\niit\\datascience and R\\new content slides\\day8") #Load the csv file into object cherry=read.csv("cherry.csv", header = T) #summary summary(cherry) #check no of rows and columns dim(cherry) # Randomly split the data into training and testing sets set.seed(1000) split = sample.split(cherry, SplitRatio = 0.8) # Split up the data using subset train = subset(cherry, split==TRUE) test = subset(cherry, split==FALSE) nrow(test) #check data structure str(train) #Train Model using the train data plot(train$Volume,train$Girth) model=lm(Volume~Girth, data = train) plot(train$Girth,train$Volume) abline(model) #View statistical information about the model summary(model) summary(model)$r.squared #Use the trained model to predict for test data pred=predict(model, test[,-3]) #Create data frame with Actual and Predicted results for checking the results resdf=data.frame("Actual"=test[,3], "Predicted"=pred) resdf
/advanced linear regression-demo2.R
no_license
sathishanm/R_workshop
R
false
false
1,046
r
#install.packages("caTools") library(caTools) #set working directory setwd("D:\\niit\\datascience and R\\new content slides\\day8") #Load the csv file into object cherry=read.csv("cherry.csv", header = T) #summary summary(cherry) #check no of rows and columns dim(cherry) # Randomly split the data into training and testing sets set.seed(1000) split = sample.split(cherry, SplitRatio = 0.8) # Split up the data using subset train = subset(cherry, split==TRUE) test = subset(cherry, split==FALSE) nrow(test) #check data structure str(train) #Train Model using the train data plot(train$Volume,train$Girth) model=lm(Volume~Girth, data = train) plot(train$Girth,train$Volume) abline(model) #View statistical information about the model summary(model) summary(model)$r.squared #Use the trained model to predict for test data pred=predict(model, test[,-3]) #Create data frame with Actual and Predicted results for checking the results resdf=data.frame("Actual"=test[,3], "Predicted"=pred) resdf
library(testthat) test_dir("testthat") # devtools::document(); devtools::load_all(); shinytest::recordTest("tests/testthat/app/")
/tests/testthat.R
permissive
earnaud/MetaShARK-v2
R
false
false
132
r
library(testthat) test_dir("testthat") # devtools::document(); devtools::load_all(); shinytest::recordTest("tests/testthat/app/")
rankall <- function(outcome, num = "best") { require(dplyr) ## read outcome data in downloaded csv file outcome.df <- read.csv("Assignment3-data/outcome-of-care-measures.csv", colClasses="character", na.strings = "Not Available") ## subset columns of interest: hospital, state and three disease outcomes mortality <- outcome.df[, c(2, 7, 11,17,23)] ## fix numeric values (now columns: 3:5) m.df <- sapply(3:5, function(x) as.numeric(mortality[,x])) mortality <- cbind(mortality[,1:2],m.df) colnames(mortality) <- c("hospital", "state", "heart_attack", "heart_failure","pneumonia") ## check the 'outcome' arguments is valid; stop() and ### return "invalid outcome" for bad input, ### "missing argment" if missing argument) if(missing(outcome)) stop("missing argument") outcome = tolower(outcome) diseases <- c("heart attack","heart failure","pneumonia") if(is.na(diseases[match(outcome,diseases)])) stop("invalid outcome") ## sub "_" for space in disease (outcome) name disease <- sub("\\s+","_",outcome) ## create a vector of valid state names states <- as.character(sort(unique(mortality$state))) mort_df <- tbl_df(mortality) states.list <- lapply(states, function(x) mort_df[mort_df$state == x,c(1,2,3)]) #hospital <- () #state <- () output.df <- data.frame(hospital=character(),state=character()) for(i in 1:length(states)){ mystate_df <- states.list[[i]] colnames(mystate_df) <- c("hospital","state","disease") if(disease == "heart_attack") { filter_s_df <- filter(mystate_df,!is.na(disease)) sorted_s_df = arrange(filter_s_df,disease,hospital) ifelse(num == "worst", num <<- length(filter_s_df$disease), num) out_df <-cbind(sorted_s_df[num,1:2]) #output.df <- do.call(cbind,sorted_s_df[num,1:2]) } output.df <- do.call(rbind,list(out_df)) } #return(output.df) }
/rankall_v2.R
no_license
thokeller/Taffy-master
R
false
false
2,300
r
rankall <- function(outcome, num = "best") { require(dplyr) ## read outcome data in downloaded csv file outcome.df <- read.csv("Assignment3-data/outcome-of-care-measures.csv", colClasses="character", na.strings = "Not Available") ## subset columns of interest: hospital, state and three disease outcomes mortality <- outcome.df[, c(2, 7, 11,17,23)] ## fix numeric values (now columns: 3:5) m.df <- sapply(3:5, function(x) as.numeric(mortality[,x])) mortality <- cbind(mortality[,1:2],m.df) colnames(mortality) <- c("hospital", "state", "heart_attack", "heart_failure","pneumonia") ## check the 'outcome' arguments is valid; stop() and ### return "invalid outcome" for bad input, ### "missing argment" if missing argument) if(missing(outcome)) stop("missing argument") outcome = tolower(outcome) diseases <- c("heart attack","heart failure","pneumonia") if(is.na(diseases[match(outcome,diseases)])) stop("invalid outcome") ## sub "_" for space in disease (outcome) name disease <- sub("\\s+","_",outcome) ## create a vector of valid state names states <- as.character(sort(unique(mortality$state))) mort_df <- tbl_df(mortality) states.list <- lapply(states, function(x) mort_df[mort_df$state == x,c(1,2,3)]) #hospital <- () #state <- () output.df <- data.frame(hospital=character(),state=character()) for(i in 1:length(states)){ mystate_df <- states.list[[i]] colnames(mystate_df) <- c("hospital","state","disease") if(disease == "heart_attack") { filter_s_df <- filter(mystate_df,!is.na(disease)) sorted_s_df = arrange(filter_s_df,disease,hospital) ifelse(num == "worst", num <<- length(filter_s_df$disease), num) out_df <-cbind(sorted_s_df[num,1:2]) #output.df <- do.call(cbind,sorted_s_df[num,1:2]) } output.df <- do.call(rbind,list(out_df)) } #return(output.df) }
# ============================================================================== # # ALGORITMO DESENVOLVIDO POR GUILHERME DINHANI # # ============================================================================== # # ============================================================================== # BIBLIOTECAS # ============================================================================== library(data.table) library(dplyr) library(stringr) library(rvest) # ============================================================================== # FUNÇÕES # ============================================================================== # ============================================================================== # ESTUDOS # ============================================================================== ParseCurriculoEstudos <- function(arquivo) { # HTML print(arquivo) curriculo_html <- read_html(arquivo) # NOME curriculo_nome <- curriculo_html %>% html_node(css = ".pv-top-card-v3--list li") %>% html_text() %>% str_trim() # NOME if (is.na(curriculo_nome)) { curriculo_nome <- curriculo_html %>% html_node(css = "h1") %>% html_text() %>% str_trim() } # ESTUDOS curriculo_estudos <- curriculo_html %>% html_nodes(css = ".pv-education-entity") # ESCOLA lapply(curriculo_estudos, function(curriculo_estudo) { curriculo_estudo_escola <- curriculo_estudo %>% html_nodes(css = "h3") %>% html_text() %>% str_trim() if (length(curriculo_estudo_escola) == 0) { curriculo_estudo_escola <- NA } # CURSO curriculo_estudo_curso <- curriculo_estudo %>% html_nodes(css = ".pv-entity__degree-name span:nth-child(2)") %>% html_text() %>% str_trim() if (length(curriculo_estudo_curso) == 0) { curriculo_estudo_curso <- NA } # AREA DE ESTUDO curriculo_estudo_area <- curriculo_estudo %>% html_nodes(css = ".pv-entity__fos span:nth-child(2)") %>% html_text() %>% str_trim() if (length(curriculo_estudo_area) == 0) { curriculo_estudo_area <- NA } # DATA curriculo_estudo_data <- curriculo_estudo %>% html_nodes(css = ".pv-entity__dates span:nth-child(2)") %>% html_text() %>% str_trim() if (length(curriculo_estudo_data) == 0) { curriculo_estudo_data <- NA } data.frame( Nome = curriculo_nome, Escola = curriculo_estudo_escola, Curso = curriculo_estudo_curso, Area = curriculo_estudo_area, Data = curriculo_estudo_data, stringsAsFactors = FALSE ) }) %>% rbindlist() } # ============================================================================== # EMPREGOS # ============================================================================== ParseCurriculoEmpregos <- function(arquivo) { # HTML print(arquivo) curriculo_html <- read_html(arquivo) # NOME curriculo_nome <- curriculo_html %>% html_node(css = ".pv-top-card-v3--list li") %>% html_text() %>% str_trim() # NOME if (is.na(curriculo_nome)) { curriculo_nome <- curriculo_html %>% html_node(css = "h1") %>% html_text() %>% str_trim() } # EMPRESAS curriculo_empresas <- curriculo_html %>% html_nodes(css = ".pv-position-entity") # VERIFICA SE POSSUI MAIS DE UM CARGO NA EMPRESA lapply(curriculo_empresas, function(curriculo_empresa) { possui_varios_cargos <- curriculo_empresa %>% html_nodes(css = ".pv-entity__position-group") %>% length() > 0 if (possui_varios_cargos) { ParseCurriculoEmpregosVariosCargos(curriculo_nome, curriculo_empresa) } else { ParseCurriculoEmpregosUnicoCargo(curriculo_nome, curriculo_empresa) } }) %>% rbindlist() } # EMPREGOS ParseCurriculoEmpregosUnicoCargo <- function(curriculo_nome, curriculo_empresa) { # NOME DA EMPRESA curriculo_empresa_nome <- curriculo_empresa %>% html_nodes(css = ".pv-entity__secondary-title") %>% html_text() %>% str_trim() if (length(curriculo_empresa_nome) == 0) { curriculo_empresa_nome <- NA } # CARGO curriculo_empresa_cargo_nome <- curriculo_empresa %>% html_nodes(css = "h3") %>% html_text() %>% str_trim() if (length(curriculo_empresa_cargo_nome) == 0) { curriculo_empresa_cargo_nome <- NA } # CIDADE curriculo_empresa_cargo_cidade <- curriculo_empresa %>% html_nodes(css = ".pv-entity__location span:nth-child(2)") %>% html_text() %>% str_trim() if (length(curriculo_empresa_cargo_cidade) == 0) { curriculo_empresa_cargo_cidade <- NA } # DATA curriculo_empresa_cargo_data <- curriculo_empresa %>% html_nodes(css = ".pv-entity__date-range span:nth-child(2)") %>% html_text() %>% str_trim() if (length(curriculo_empresa_cargo_data) == 0) { curriculo_empresa_cargo_data <- NA } data.frame( Nome = curriculo_nome, Empresa = curriculo_empresa_nome, Cargo = curriculo_empresa_cargo_nome, Cidade = curriculo_empresa_cargo_cidade, Data = curriculo_empresa_cargo_data, stringsAsFactors = FALSE ) } # EMPREGOS ParseCurriculoEmpregosVariosCargos <- function(curriculo_nome, curriculo_empresa) { # MÚLTIPLOS CARGOS curriculo_empresa_cargos <- curriculo_empresa %>% html_nodes(css = ".pv-entity__position-group-role-item") if (length(curriculo_empresa_cargos) == 0) { curriculo_empresa_cargos <- curriculo_empresa %>% html_nodes(css = ".pv-entity__position-group-role-item-fading-timeline") } # NOME DA EMPRESA lapply(curriculo_empresa_cargos, function(curriculo_empresa_cargo) { curriculo_empresa_nome <- curriculo_empresa %>% html_node(css = "h3 span:nth-child(2)") %>% html_text() %>% str_trim() if (length(curriculo_empresa_nome) == 0) { curriculo_empresa_nome <- NA } # CARGO curriculo_empresa_cargo_nome <- curriculo_empresa_cargo %>% html_nodes(css = ".pv-entity__summary-info-v2 h3 span:nth-child(2)") %>% html_text() %>% str_trim() if (length(curriculo_empresa_cargo_nome) == 0) { curriculo_empresa_cargo_nome <- NA } # CIDADE curriculo_empresa_cargo_cidade <- curriculo_empresa_cargo %>% html_nodes(css = ".pv-entity__location span:nth-child(2)") %>% html_text() %>% str_trim() if (length(curriculo_empresa_cargo_cidade) == 0) { curriculo_empresa_cargo_cidade <- NA } # DATA curriculo_empresa_cargo_data <- curriculo_empresa_cargo %>% html_nodes(css = ".pv-entity__date-range span:nth-child(2)") %>% html_text() %>% str_trim() if (length(curriculo_empresa_cargo_data) == 0) { curriculo_empresa_cargo_data <- NA } data.frame( Nome = curriculo_nome, Empresa = curriculo_empresa_nome, Cargo = curriculo_empresa_cargo_nome, Cidade = curriculo_empresa_cargo_cidade, Data = curriculo_empresa_cargo_data, stringsAsFactors = FALSE ) }) %>% rbindlist() } # ============================================================================== # EXECUÇÃO # ============================================================================== tryCatch({ curriculos_arquivos <- list.files("curriculos", pattern = "*.html", full.names = TRUE) if (identical(curriculos_arquivos, character(0))) { stop("Nenhum arquivo com a extensão .html encontrado.") } curriculos_empregos <- lapply(curriculos_arquivos, ParseCurriculoEmpregos) %>% rbindlist() if (dim(curriculos_empregos)[1] == 0) { stop("Nenhum dado sobre empregos encontrado.") } curriculos_estudos <- lapply(curriculos_arquivos, ParseCurriculoEstudos) %>% rbindlist() if (dim(curriculos_estudos)[1] == 0) { stop("Nenhum dado sobre estudos encontrado.") } }) # ============================================================================== # GRAVAÇÃO # ============================================================================== curriculos_csvs <- list.files("../Pré-processamento/", pattern = "*.csv", full.names = TRUE) file.remove(curriculos_csvs) write.csv(curriculos_empregos, file = "../Pré-processamento/Empregos.csv", row.names = FALSE) write.csv(curriculos_estudos, file = "../Pré-processamento/Estudos.csv", row.names = FALSE)
/Extrator/Extrator.r
no_license
guidinhani/TCC
R
false
false
8,529
r
# ============================================================================== # # ALGORITMO DESENVOLVIDO POR GUILHERME DINHANI # # ============================================================================== # # ============================================================================== # BIBLIOTECAS # ============================================================================== library(data.table) library(dplyr) library(stringr) library(rvest) # ============================================================================== # FUNÇÕES # ============================================================================== # ============================================================================== # ESTUDOS # ============================================================================== ParseCurriculoEstudos <- function(arquivo) { # HTML print(arquivo) curriculo_html <- read_html(arquivo) # NOME curriculo_nome <- curriculo_html %>% html_node(css = ".pv-top-card-v3--list li") %>% html_text() %>% str_trim() # NOME if (is.na(curriculo_nome)) { curriculo_nome <- curriculo_html %>% html_node(css = "h1") %>% html_text() %>% str_trim() } # ESTUDOS curriculo_estudos <- curriculo_html %>% html_nodes(css = ".pv-education-entity") # ESCOLA lapply(curriculo_estudos, function(curriculo_estudo) { curriculo_estudo_escola <- curriculo_estudo %>% html_nodes(css = "h3") %>% html_text() %>% str_trim() if (length(curriculo_estudo_escola) == 0) { curriculo_estudo_escola <- NA } # CURSO curriculo_estudo_curso <- curriculo_estudo %>% html_nodes(css = ".pv-entity__degree-name span:nth-child(2)") %>% html_text() %>% str_trim() if (length(curriculo_estudo_curso) == 0) { curriculo_estudo_curso <- NA } # AREA DE ESTUDO curriculo_estudo_area <- curriculo_estudo %>% html_nodes(css = ".pv-entity__fos span:nth-child(2)") %>% html_text() %>% str_trim() if (length(curriculo_estudo_area) == 0) { curriculo_estudo_area <- NA } # DATA curriculo_estudo_data <- curriculo_estudo %>% html_nodes(css = ".pv-entity__dates span:nth-child(2)") %>% html_text() %>% str_trim() if (length(curriculo_estudo_data) == 0) { curriculo_estudo_data <- NA } data.frame( Nome = curriculo_nome, Escola = curriculo_estudo_escola, Curso = curriculo_estudo_curso, Area = curriculo_estudo_area, Data = curriculo_estudo_data, stringsAsFactors = FALSE ) }) %>% rbindlist() } # ============================================================================== # EMPREGOS # ============================================================================== ParseCurriculoEmpregos <- function(arquivo) { # HTML print(arquivo) curriculo_html <- read_html(arquivo) # NOME curriculo_nome <- curriculo_html %>% html_node(css = ".pv-top-card-v3--list li") %>% html_text() %>% str_trim() # NOME if (is.na(curriculo_nome)) { curriculo_nome <- curriculo_html %>% html_node(css = "h1") %>% html_text() %>% str_trim() } # EMPRESAS curriculo_empresas <- curriculo_html %>% html_nodes(css = ".pv-position-entity") # VERIFICA SE POSSUI MAIS DE UM CARGO NA EMPRESA lapply(curriculo_empresas, function(curriculo_empresa) { possui_varios_cargos <- curriculo_empresa %>% html_nodes(css = ".pv-entity__position-group") %>% length() > 0 if (possui_varios_cargos) { ParseCurriculoEmpregosVariosCargos(curriculo_nome, curriculo_empresa) } else { ParseCurriculoEmpregosUnicoCargo(curriculo_nome, curriculo_empresa) } }) %>% rbindlist() } # EMPREGOS ParseCurriculoEmpregosUnicoCargo <- function(curriculo_nome, curriculo_empresa) { # NOME DA EMPRESA curriculo_empresa_nome <- curriculo_empresa %>% html_nodes(css = ".pv-entity__secondary-title") %>% html_text() %>% str_trim() if (length(curriculo_empresa_nome) == 0) { curriculo_empresa_nome <- NA } # CARGO curriculo_empresa_cargo_nome <- curriculo_empresa %>% html_nodes(css = "h3") %>% html_text() %>% str_trim() if (length(curriculo_empresa_cargo_nome) == 0) { curriculo_empresa_cargo_nome <- NA } # CIDADE curriculo_empresa_cargo_cidade <- curriculo_empresa %>% html_nodes(css = ".pv-entity__location span:nth-child(2)") %>% html_text() %>% str_trim() if (length(curriculo_empresa_cargo_cidade) == 0) { curriculo_empresa_cargo_cidade <- NA } # DATA curriculo_empresa_cargo_data <- curriculo_empresa %>% html_nodes(css = ".pv-entity__date-range span:nth-child(2)") %>% html_text() %>% str_trim() if (length(curriculo_empresa_cargo_data) == 0) { curriculo_empresa_cargo_data <- NA } data.frame( Nome = curriculo_nome, Empresa = curriculo_empresa_nome, Cargo = curriculo_empresa_cargo_nome, Cidade = curriculo_empresa_cargo_cidade, Data = curriculo_empresa_cargo_data, stringsAsFactors = FALSE ) } # EMPREGOS ParseCurriculoEmpregosVariosCargos <- function(curriculo_nome, curriculo_empresa) { # MÚLTIPLOS CARGOS curriculo_empresa_cargos <- curriculo_empresa %>% html_nodes(css = ".pv-entity__position-group-role-item") if (length(curriculo_empresa_cargos) == 0) { curriculo_empresa_cargos <- curriculo_empresa %>% html_nodes(css = ".pv-entity__position-group-role-item-fading-timeline") } # NOME DA EMPRESA lapply(curriculo_empresa_cargos, function(curriculo_empresa_cargo) { curriculo_empresa_nome <- curriculo_empresa %>% html_node(css = "h3 span:nth-child(2)") %>% html_text() %>% str_trim() if (length(curriculo_empresa_nome) == 0) { curriculo_empresa_nome <- NA } # CARGO curriculo_empresa_cargo_nome <- curriculo_empresa_cargo %>% html_nodes(css = ".pv-entity__summary-info-v2 h3 span:nth-child(2)") %>% html_text() %>% str_trim() if (length(curriculo_empresa_cargo_nome) == 0) { curriculo_empresa_cargo_nome <- NA } # CIDADE curriculo_empresa_cargo_cidade <- curriculo_empresa_cargo %>% html_nodes(css = ".pv-entity__location span:nth-child(2)") %>% html_text() %>% str_trim() if (length(curriculo_empresa_cargo_cidade) == 0) { curriculo_empresa_cargo_cidade <- NA } # DATA curriculo_empresa_cargo_data <- curriculo_empresa_cargo %>% html_nodes(css = ".pv-entity__date-range span:nth-child(2)") %>% html_text() %>% str_trim() if (length(curriculo_empresa_cargo_data) == 0) { curriculo_empresa_cargo_data <- NA } data.frame( Nome = curriculo_nome, Empresa = curriculo_empresa_nome, Cargo = curriculo_empresa_cargo_nome, Cidade = curriculo_empresa_cargo_cidade, Data = curriculo_empresa_cargo_data, stringsAsFactors = FALSE ) }) %>% rbindlist() } # ============================================================================== # EXECUÇÃO # ============================================================================== tryCatch({ curriculos_arquivos <- list.files("curriculos", pattern = "*.html", full.names = TRUE) if (identical(curriculos_arquivos, character(0))) { stop("Nenhum arquivo com a extensão .html encontrado.") } curriculos_empregos <- lapply(curriculos_arquivos, ParseCurriculoEmpregos) %>% rbindlist() if (dim(curriculos_empregos)[1] == 0) { stop("Nenhum dado sobre empregos encontrado.") } curriculos_estudos <- lapply(curriculos_arquivos, ParseCurriculoEstudos) %>% rbindlist() if (dim(curriculos_estudos)[1] == 0) { stop("Nenhum dado sobre estudos encontrado.") } }) # ============================================================================== # GRAVAÇÃO # ============================================================================== curriculos_csvs <- list.files("../Pré-processamento/", pattern = "*.csv", full.names = TRUE) file.remove(curriculos_csvs) write.csv(curriculos_empregos, file = "../Pré-processamento/Empregos.csv", row.names = FALSE) write.csv(curriculos_estudos, file = "../Pré-processamento/Estudos.csv", row.names = FALSE)
#Exercise age<-c(18:29) age hieght<-c(76.1,77,78.1,78.2,78.8,79.7,79.9,81.1,81.2,81.8,82.8,83.5) hieght df<-data.frame(age,hieght) df plot(age~hieght) result<-lm(age~hieght) summary(result) #Eq: Age= -100.84 + 1.55*Hieght #Accuracy: #Since R2 is close to 1, hence model is highly significant. data_fitted<-data.frame(df, fitted.value=fitted(result), residual) #Exercise 2: names(mtcars) View(mtcars) plot(mpg~hp, data=mtcars) plot(mpg~wt, data = mtcars) result<-lm(mpg~hp+wt, data=mtcars) summary(result) result<-lm(mpg~hp+wt, data=mtcars) summary(result) #Example 3: #Create Training and Test data --- trainingRowIndex <- sample(1:nrow(mtcars), 0.8*nrow(mtcars)) #row indices for training data trainingData <- mtcars[trainingRowIndex, ] # model training data testData <- mtcars[-trainingRowIndex, ] # test data #Bi-variate Analysis:--- #Checking relationships between different variables pairs(mtcars) #Correlation cr = cor(mtcars) cr library(corrplot) corrplot(cr, type="lower", method="circle") corrplot(cr, type="lower", method="number") #Build the model on training data--- lmMod <- lm(mpg ~ cyl+disp+hp+wt, data=trainingData) # build the model # Review diagnostic measures summary(lmMod) #model summary #Accuracy: #Since R2 is close to 1, i.e., 0.84, hence model is significant. #Prediction--- #Predicting values for test dataset testData$mpgPred <- predict(lmMod, testData) #Accuracy:--- #Determining Prediction accuracy on test dataset using MAPE #MAPE(MeansAbsolutePercentageError): #Lower its value better is the accuracy of the model. #MAPE Calculation; mape <- mean(abs((testData$mpgPred - testData$mpg))/testData$mpg) mape
/10Nov2019.R
no_license
abhijeetkp/analytics
R
false
false
1,661
r
#Exercise age<-c(18:29) age hieght<-c(76.1,77,78.1,78.2,78.8,79.7,79.9,81.1,81.2,81.8,82.8,83.5) hieght df<-data.frame(age,hieght) df plot(age~hieght) result<-lm(age~hieght) summary(result) #Eq: Age= -100.84 + 1.55*Hieght #Accuracy: #Since R2 is close to 1, hence model is highly significant. data_fitted<-data.frame(df, fitted.value=fitted(result), residual) #Exercise 2: names(mtcars) View(mtcars) plot(mpg~hp, data=mtcars) plot(mpg~wt, data = mtcars) result<-lm(mpg~hp+wt, data=mtcars) summary(result) result<-lm(mpg~hp+wt, data=mtcars) summary(result) #Example 3: #Create Training and Test data --- trainingRowIndex <- sample(1:nrow(mtcars), 0.8*nrow(mtcars)) #row indices for training data trainingData <- mtcars[trainingRowIndex, ] # model training data testData <- mtcars[-trainingRowIndex, ] # test data #Bi-variate Analysis:--- #Checking relationships between different variables pairs(mtcars) #Correlation cr = cor(mtcars) cr library(corrplot) corrplot(cr, type="lower", method="circle") corrplot(cr, type="lower", method="number") #Build the model on training data--- lmMod <- lm(mpg ~ cyl+disp+hp+wt, data=trainingData) # build the model # Review diagnostic measures summary(lmMod) #model summary #Accuracy: #Since R2 is close to 1, i.e., 0.84, hence model is significant. #Prediction--- #Predicting values for test dataset testData$mpgPred <- predict(lmMod, testData) #Accuracy:--- #Determining Prediction accuracy on test dataset using MAPE #MAPE(MeansAbsolutePercentageError): #Lower its value better is the accuracy of the model. #MAPE Calculation; mape <- mean(abs((testData$mpgPred - testData$mpg))/testData$mpg) mape
# This file is part of the standard setup for testthat. # It is recommended that you do not modify it. # # Where should you do additional test configuration? # Learn more about the roles of various files in: # * https://r-pkgs.org/tests.html # * https://testthat.r-lib.org/reference/test_package.html#special-files library(testthat) library(seqHMM) test_check("seqHMM")
/tests/testthat.R
no_license
cran/seqHMM
R
false
false
372
r
# This file is part of the standard setup for testthat. # It is recommended that you do not modify it. # # Where should you do additional test configuration? # Learn more about the roles of various files in: # * https://r-pkgs.org/tests.html # * https://testthat.r-lib.org/reference/test_package.html#special-files library(testthat) library(seqHMM) test_check("seqHMM")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/additionalFunctions.R \name{searchIsotopes} \alias{searchIsotopes} \title{Targeted isotopes search} \usage{ searchIsotopes( msobject, label, adductsTable = LipidMS::adductsTable, ppm = 10, coelCutoff = 0.7, results, dbs ) } \arguments{ \item{msobject}{msobject.} \item{label}{isotope employed for the experiment. It can be "13C" or "D".} \item{adductsTable}{adducts table employed for lipids annotation.} \item{ppm}{mass error tolerance.} \item{coelCutoff}{coelution score threshold between isotopes. By default, 0.7.} \item{results}{target list to search isotopes. If missing, all results from the msobject are searched. It is used by \link{searchIsotopesmsbatch}.} \item{dbs}{list of data bases required for annotation. By default, dbs contains the required data frames based on the default fragmentation rules. If these rules are modified, dbs may need to be supplied. See \link{createLipidDB} and \link{assignDB}.} } \value{ List with the isotopes for each compound in the results data frame. } \description{ This function uses annotation results of deisotoped data to search for isotopes in raw data. } \author{ M Isabel Alcoriza-Balaguer <maribel_alcoriza@iislafe.es> }
/man/searchIsotopes.Rd
no_license
cran/LipidMS
R
false
true
1,319
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/additionalFunctions.R \name{searchIsotopes} \alias{searchIsotopes} \title{Targeted isotopes search} \usage{ searchIsotopes( msobject, label, adductsTable = LipidMS::adductsTable, ppm = 10, coelCutoff = 0.7, results, dbs ) } \arguments{ \item{msobject}{msobject.} \item{label}{isotope employed for the experiment. It can be "13C" or "D".} \item{adductsTable}{adducts table employed for lipids annotation.} \item{ppm}{mass error tolerance.} \item{coelCutoff}{coelution score threshold between isotopes. By default, 0.7.} \item{results}{target list to search isotopes. If missing, all results from the msobject are searched. It is used by \link{searchIsotopesmsbatch}.} \item{dbs}{list of data bases required for annotation. By default, dbs contains the required data frames based on the default fragmentation rules. If these rules are modified, dbs may need to be supplied. See \link{createLipidDB} and \link{assignDB}.} } \value{ List with the isotopes for each compound in the results data frame. } \description{ This function uses annotation results of deisotoped data to search for isotopes in raw data. } \author{ M Isabel Alcoriza-Balaguer <maribel_alcoriza@iislafe.es> }
#' @title Optimal Test Sample Size for Multi-State RDT with Multiple Periods and Criteria for Separate Periods #' #' @description Define the optimal function to find the optimal test plan with minimum test sample size given an acceptable level of consumer's risk. #' The maximum allowable failures for each separate period need to be satisfied to pass the test (for Multi-state RDT, Multiple Periods, Scenario I) #' #' @param cvec Maximum allowable failures for each separate period #' @param pivec Failure probability for each seperate period #' @param Rvec Lower level reliability requirements for each cumulative period from the begining of the test. #' @param thres_CR Threshold (acceptable level) of consumer's risk #' @return Minimum test sample size #' @examples #' pi <- pi_MCSim_dirichlet(M = 5000, seed = 10, par = c(1, 1, 1)) #' MPSep_optimal_n(cvec = c(1,1), pivec = pi, Rvec = c(0.8, 0.7), thres_CR = 0.05) #' @export MPSep_optimal_n <- function(cvec, pivec, Rvec, thres_CR){ n <- sum(cvec) + 1 CR <- MPSep_consumerrisk(n, cvec, pivec, Rvec) while (CR > thres_CR){ n <- n + 1 CR <- MPSep_consumerrisk(n, cvec, pivec, Rvec) } return(n) }
/OptimalRDTinR/R/MPSep_optimal_n.R
permissive
ericchen12377/OptimalRDT_R
R
false
false
1,170
r
#' @title Optimal Test Sample Size for Multi-State RDT with Multiple Periods and Criteria for Separate Periods #' #' @description Define the optimal function to find the optimal test plan with minimum test sample size given an acceptable level of consumer's risk. #' The maximum allowable failures for each separate period need to be satisfied to pass the test (for Multi-state RDT, Multiple Periods, Scenario I) #' #' @param cvec Maximum allowable failures for each separate period #' @param pivec Failure probability for each seperate period #' @param Rvec Lower level reliability requirements for each cumulative period from the begining of the test. #' @param thres_CR Threshold (acceptable level) of consumer's risk #' @return Minimum test sample size #' @examples #' pi <- pi_MCSim_dirichlet(M = 5000, seed = 10, par = c(1, 1, 1)) #' MPSep_optimal_n(cvec = c(1,1), pivec = pi, Rvec = c(0.8, 0.7), thres_CR = 0.05) #' @export MPSep_optimal_n <- function(cvec, pivec, Rvec, thres_CR){ n <- sum(cvec) + 1 CR <- MPSep_consumerrisk(n, cvec, pivec, Rvec) while (CR > thres_CR){ n <- n + 1 CR <- MPSep_consumerrisk(n, cvec, pivec, Rvec) } return(n) }
\encoding{UTF-8} \name{RepNCAanalyze} \alias{RepNCAanalyze} \title{NCA analyze function for replicated study} \description{ We provide noncompartmental analysis (NCA) approach to compute AUCs and terminal elimination rate constant (kel) for plasma concentration. Here we provide six methods, exact 3 data points, ARS, TTT, AIC, TTT and ARS, and TTT and AIC. } \keyword{misc}
/man/RepNCAanalyze.rd
no_license
cran/bear
R
false
false
394
rd
\encoding{UTF-8} \name{RepNCAanalyze} \alias{RepNCAanalyze} \title{NCA analyze function for replicated study} \description{ We provide noncompartmental analysis (NCA) approach to compute AUCs and terminal elimination rate constant (kel) for plasma concentration. Here we provide six methods, exact 3 data points, ARS, TTT, AIC, TTT and ARS, and TTT and AIC. } \keyword{misc}
ui <- fluidPage(navlistPanel( tabPanel("New cases analysis", verticalLayout( titlePanel("Covid-19 Data Analysis"), selectInput(inputId = "countries", label = strong("Select the countries or territories to analyze"), choices = sort(unique(data$Country.Region)), multiple = TRUE, width = "100%", selected = 'France'), plotOutput(outputId = "exp_plot", height = "600px", width = "100%") ) ), tabPanel("Temporal plot", verticalLayout( titlePanel("Covid-19 Data Analysis"), plotOutput(outputId = "time_plot",height = "500px",width = "100%") ) ), widths = c(2,10) ))
/ui.R
no_license
gonzaq94/Covid-19-data-analysis
R
false
false
787
r
ui <- fluidPage(navlistPanel( tabPanel("New cases analysis", verticalLayout( titlePanel("Covid-19 Data Analysis"), selectInput(inputId = "countries", label = strong("Select the countries or territories to analyze"), choices = sort(unique(data$Country.Region)), multiple = TRUE, width = "100%", selected = 'France'), plotOutput(outputId = "exp_plot", height = "600px", width = "100%") ) ), tabPanel("Temporal plot", verticalLayout( titlePanel("Covid-19 Data Analysis"), plotOutput(outputId = "time_plot",height = "500px",width = "100%") ) ), widths = c(2,10) ))
author <- c("*") pkgs <- c("tidyverse", "mrgsolve", "knitr", "rmarkdown", "data.table", "caTools", "bitops", "formatR", "git2r") pkgRoot <- "/data/page-packages" pkgDir <- file.path(pkgRoot, "src", "contrib") pkgDir <- normalizePath(pkgDir) libDir <- "/data/page-Rlibs" if(!dir.exists(pkgDir)) dir.create(pkgDir, recursive = TRUE) if(!dir.exists(libDir)) dir.create(libDir) .libPaths(libDir) user <- Sys.info()["user"] fromCRAN <- user %in% author | "*" %in% author local_repos <- paste0("file://",pkgRoot) metrum_repos <- "https://metrumresearchgroup.github.io/r_validated/" cran_repos <- "https://cran.rstudio.com/" repos <- c(mrg = metrum_repos, cran = cran_repos, local = local_repos) deps <- tools::package_dependencies( packages = pkgs, which = c("Depends", "Imports", "LinkingTo"), recursive = TRUE, db = available.packages(repos=repos[c("mrg", "cran")]) ) deps <- unlist(deps, use.names=FALSE) pkgs <- unique(c(pkgs,deps)) base <- rownames(installed.packages(priority=c("base", "recommended"))) pkgs <- setdiff(pkgs,base) tools::write_PACKAGES(pkgDir) if(file.exists(file.path(pkgDir,"PACKAGES"))){ available <- available.packages(repos = repos["local"])[,"Package"] } else{ available <- NULL file.create(file.path(pkgDir,"PACKAGES")) tools::write_PACKAGES(pkgDir) } if(fromCRAN){ newpkgs <- setdiff(pkgs, available) if(length(newpkgs) > 0){ ## These packages are installed either from mrg or cran install.packages(newpkgs, lib=libDir, repos = repos[c("mrg", "cran")], destdir=pkgDir, type="source", INSTALL_opts="--no-multiarch") tools::write_PACKAGES(pkgDir) } ## If multiple authors qcing each other, a package could be available ## but uninstalled. Install from local. uninstalled <- setdiff(pkgs, installed.packages(libDir)) if(length(uninstalled)>0){ install.packages(uninstalled, lib = libDir, repos = repos["local"], type = "source", INSTALL_opts="--no-multiarch") } } if(!fromCRAN){ installed <- row.names(installed.packages(libDir)) newpkgs <- setdiff(pkgs, installed) if(length(newpkgs)>0){ install.packages(newpkgs, lib = libDir, repos = repos["local"], type = "source", INSTALL_opts="--no-multiarch") } } .ignore_libs <- function(root=getwd(),lib="lib", ci=FALSE) { if(!missing(root) & file.exists(root)) { lib <- file.path(root,"lib") } if(!file.exists(lib)) stop("Could not find lib directory") libs <- list.files(lib, full.names=FALSE) libs <- c(libs, "ignore.txt", "PACKAGES", "PACKAGES.gz") writeLines(con=file.path(lib,"ignore.txt"), libs) setwd(lib) system("svn propset svn:ignore -F ignore.txt .") setwd("..") if(ci) system("svn ci -m \"ignoring libs\" .") }
/src/pkgSetup.R
no_license
amitpatil21/page-2018-mrgsolve
R
false
false
3,006
r
author <- c("*") pkgs <- c("tidyverse", "mrgsolve", "knitr", "rmarkdown", "data.table", "caTools", "bitops", "formatR", "git2r") pkgRoot <- "/data/page-packages" pkgDir <- file.path(pkgRoot, "src", "contrib") pkgDir <- normalizePath(pkgDir) libDir <- "/data/page-Rlibs" if(!dir.exists(pkgDir)) dir.create(pkgDir, recursive = TRUE) if(!dir.exists(libDir)) dir.create(libDir) .libPaths(libDir) user <- Sys.info()["user"] fromCRAN <- user %in% author | "*" %in% author local_repos <- paste0("file://",pkgRoot) metrum_repos <- "https://metrumresearchgroup.github.io/r_validated/" cran_repos <- "https://cran.rstudio.com/" repos <- c(mrg = metrum_repos, cran = cran_repos, local = local_repos) deps <- tools::package_dependencies( packages = pkgs, which = c("Depends", "Imports", "LinkingTo"), recursive = TRUE, db = available.packages(repos=repos[c("mrg", "cran")]) ) deps <- unlist(deps, use.names=FALSE) pkgs <- unique(c(pkgs,deps)) base <- rownames(installed.packages(priority=c("base", "recommended"))) pkgs <- setdiff(pkgs,base) tools::write_PACKAGES(pkgDir) if(file.exists(file.path(pkgDir,"PACKAGES"))){ available <- available.packages(repos = repos["local"])[,"Package"] } else{ available <- NULL file.create(file.path(pkgDir,"PACKAGES")) tools::write_PACKAGES(pkgDir) } if(fromCRAN){ newpkgs <- setdiff(pkgs, available) if(length(newpkgs) > 0){ ## These packages are installed either from mrg or cran install.packages(newpkgs, lib=libDir, repos = repos[c("mrg", "cran")], destdir=pkgDir, type="source", INSTALL_opts="--no-multiarch") tools::write_PACKAGES(pkgDir) } ## If multiple authors qcing each other, a package could be available ## but uninstalled. Install from local. uninstalled <- setdiff(pkgs, installed.packages(libDir)) if(length(uninstalled)>0){ install.packages(uninstalled, lib = libDir, repos = repos["local"], type = "source", INSTALL_opts="--no-multiarch") } } if(!fromCRAN){ installed <- row.names(installed.packages(libDir)) newpkgs <- setdiff(pkgs, installed) if(length(newpkgs)>0){ install.packages(newpkgs, lib = libDir, repos = repos["local"], type = "source", INSTALL_opts="--no-multiarch") } } .ignore_libs <- function(root=getwd(),lib="lib", ci=FALSE) { if(!missing(root) & file.exists(root)) { lib <- file.path(root,"lib") } if(!file.exists(lib)) stop("Could not find lib directory") libs <- list.files(lib, full.names=FALSE) libs <- c(libs, "ignore.txt", "PACKAGES", "PACKAGES.gz") writeLines(con=file.path(lib,"ignore.txt"), libs) setwd(lib) system("svn propset svn:ignore -F ignore.txt .") setwd("..") if(ci) system("svn ci -m \"ignoring libs\" .") }
library(devtools) library(roxygen2) setwd("~/Desktop/integrateItRevised") #package.skeleton("integrateItRevised") rm(list=ls()) current.code <- as.package("integrateItRevised") load_all(current.code) document(current.code) #build(current.code, path="~/Desktop/integrateItRevised") #install.packages("~/Desktop/integrateItRevised/integrateItRevised_1.0.tar.gz", repos = NULL, type="source") #library(integrateItRevised) # Example 1 example.x <- 1:25 example.y <- (example.x - 10)^2 + 5 example.trap <- integrateIt("Trap", example.x, example.y, 4, 22) example.simp <- integrateIt("Simp", example.x, example.y, 4, 22) print(example.trap) print(example.simp) plot(example.trap) plot(example.simp) # Example 2 tryx<-seq(-5, 5, by = .95) tryy<-dnorm(tryx) exampTrap<-integrateIt("Trap", tryx, tryy, -5, 4.5) exampSimp<-integrateIt("Simp", tryx, tryy, -5, 4.5) print(exampTrap) print(exampSimp) plot(exampTrap) plot(exampSimp) # Validity Checks: The following statements should produce errors x <- c(2, 4, 6, 8, 10, 12) y <- c(3, 5, 7, 9, 11, 13) print(integrateIt("Simp", x, y, 2, 12)) x <- c(2, 4, 6, 8, 10) y <- c(3, 5, 7, 9, 11, 13) print(integrateIt("Trap", x, y, 4, 8)) print(integrateIt("Simp", x, y, 4, 8)) x <- c(2, 4, 6, 8, 10, 12) y <- c(3, 5, NA, 9, 11, 13) print(integrateIt("Trap", x, y, 4, 8)) print(integrateIt("Simp", x, y, 4, 8)) x <- c(2, 4, 6, 8, 10, 12) y <- c(3, 5, 7, 9, 11, 13) print(integrateIt("Trap", x, y, 8, 4)) #These two actually crash before the validity test, print(integrateIt("Simp", x, y, 8, 4)) #but would not pass the "boundsOrder" test regardless x <- c(2, 4, 6, 10, 8, 12) y <- c(3, 5, 7, 9, 11, 13) print(integrateIt("Trap", x, y, 4, 8)) print(integrateIt("Simp", x, y, 4, 8)) x <- c(2, 4, 6, 8, 10, 12) y <- c(3, 5, 7, 9, 11, 13) print(integrateIt("Trap", x, y, 4, 16)) print(integrateIt("Simp", x, y, 4, 16))
/DevelopmentFile.R
no_license
jacobhample/integrateIt_revised
R
false
false
1,865
r
library(devtools) library(roxygen2) setwd("~/Desktop/integrateItRevised") #package.skeleton("integrateItRevised") rm(list=ls()) current.code <- as.package("integrateItRevised") load_all(current.code) document(current.code) #build(current.code, path="~/Desktop/integrateItRevised") #install.packages("~/Desktop/integrateItRevised/integrateItRevised_1.0.tar.gz", repos = NULL, type="source") #library(integrateItRevised) # Example 1 example.x <- 1:25 example.y <- (example.x - 10)^2 + 5 example.trap <- integrateIt("Trap", example.x, example.y, 4, 22) example.simp <- integrateIt("Simp", example.x, example.y, 4, 22) print(example.trap) print(example.simp) plot(example.trap) plot(example.simp) # Example 2 tryx<-seq(-5, 5, by = .95) tryy<-dnorm(tryx) exampTrap<-integrateIt("Trap", tryx, tryy, -5, 4.5) exampSimp<-integrateIt("Simp", tryx, tryy, -5, 4.5) print(exampTrap) print(exampSimp) plot(exampTrap) plot(exampSimp) # Validity Checks: The following statements should produce errors x <- c(2, 4, 6, 8, 10, 12) y <- c(3, 5, 7, 9, 11, 13) print(integrateIt("Simp", x, y, 2, 12)) x <- c(2, 4, 6, 8, 10) y <- c(3, 5, 7, 9, 11, 13) print(integrateIt("Trap", x, y, 4, 8)) print(integrateIt("Simp", x, y, 4, 8)) x <- c(2, 4, 6, 8, 10, 12) y <- c(3, 5, NA, 9, 11, 13) print(integrateIt("Trap", x, y, 4, 8)) print(integrateIt("Simp", x, y, 4, 8)) x <- c(2, 4, 6, 8, 10, 12) y <- c(3, 5, 7, 9, 11, 13) print(integrateIt("Trap", x, y, 8, 4)) #These two actually crash before the validity test, print(integrateIt("Simp", x, y, 8, 4)) #but would not pass the "boundsOrder" test regardless x <- c(2, 4, 6, 10, 8, 12) y <- c(3, 5, 7, 9, 11, 13) print(integrateIt("Trap", x, y, 4, 8)) print(integrateIt("Simp", x, y, 4, 8)) x <- c(2, 4, 6, 8, 10, 12) y <- c(3, 5, 7, 9, 11, 13) print(integrateIt("Trap", x, y, 4, 16)) print(integrateIt("Simp", x, y, 4, 16))
#+ presets, echo = FALSE, warning = FALSE, message = FALSE library(winference) library(ggplot2) library(ggthemes) library(dplyr) library(foreach) library(doMC) library(doRNG) library(reshape2) library(tidyr) registerDoMC(cores = 6) rm(list = ls()) setmytheme() set.seed(11) target <- get_toggleswitch() # number of observations nobservations <- 2000 load(file = "~/Dropbox/ABCD/Results/data/toggleswitchdata.RData") obs <- obs[1:nobservations] obs_sorted <- sort(obs) # function to compute distance between observed data and data generated given theta compute_d <- function(theta, metric = metricL2){ fake_rand <- target$generate_randomness(nobservations) fake_obs <- target$robservation(nobservations, theta, target$parameters, fake_rand) fake_obs_sorted <- sort(fake_obs) return(metric(obs_sorted, fake_obs_sorted)) } # compute_d(true_theta) proposal <- mixture_proposal() param_algo <- list(nthetas = 2048, nmoves = 1, proposal = proposal, nsteps = 50, minimum_diversity = 0.5, R = 2, maxtrials = 1000) filename <- paste0("~/Dropbox/ABCD/Results/toggleswitch/toggleswitchdata.n", nobservations, ".wsmc_rhit.RData") # results <- wsmc_rhit(compute_d, target, param_algo, savefile = filename) # wsmc.df <- wsmc_to_dataframe(results, target$parameter_names) # nsteps <- max(wsmc.df$step) # save(wsmc.df, results, nsteps, file = filename) # load(filename) wsmc.df <- wsmc_to_dataframe(results, target$parameter_names) nsteps <- max(wsmc.df$step) target$parameter_names qplot(x = 1:nsteps, y = results$threshold_history, geom ="line") + scale_y_log10() g <- ggplot(wsmc.df, aes(x = alpha_1, y = alpha_2, colour = step, group = step)) g <- g + geom_point(alpha = 0.5) g <- g + scale_colour_gradient2(midpoint = floor(nsteps/2)) + theme(legend.position = "none") g <- g + xlab(expression(alpha[1])) + ylab(expression(alpha[2])) g + geom_point(data=NULL, aes(x = true_theta[1], y = true_theta[2], colour = NULL, group = NULL), size = 5) g <- ggplot(wsmc.df, aes(x = alpha_1, group = step)) g <- g + geom_density(aes(y = ..density..)) g g <- ggplot(wsmc.df, aes(x = alpha_2, group = step)) g <- g + geom_density(aes(y = ..density..)) g g <- ggplot(wsmc.df, aes(x = beta_1, y = beta_2, colour = step, group = step)) g <- g + geom_point(alpha = 0.5) g <- g + scale_colour_gradient2(midpoint = floor(nsteps/2)) + theme(legend.position = "none") g <- g + xlab(expression(beta[1])) + ylab(expression(beta[2])) g + geom_point(data=NULL, aes(x = true_theta[3], y = true_theta[4], colour = NULL, group = NULL), size = 5) g <- ggplot(wsmc.df, aes(x = mu, y = sigma, colour = step, group = step)) g <- g + geom_point(alpha = 0.5) g <- g + scale_colour_gradient2(midpoint = floor(nsteps/2)) + theme(legend.position = "none") g <- g + xlab(expression(mu)) + ylab(expression(sigma)) g + geom_point(data=NULL, aes(x = true_theta[5], y = true_theta[6], colour = NULL, group = NULL), size = 5) g <- ggplot(wsmc.df, aes(x = sigma, y = gamma, colour = step, group = step)) g <- g + geom_point(alpha = 0.5) g <- g + scale_colour_gradient2(midpoint = floor(nsteps/2)) + theme(legend.position = "none") g <- g + xlab(expression(sigma)) + ylab(expression(gamma)) g + geom_point(data=NULL, aes(x = true_theta[6], y = true_theta[7], colour = NULL, group = NULL), size = 5) g <- ggplot(wsmc.df %>% filter(step > 20), aes(x = gamma, group = step)) + geom_density(aes(y = ..density..)) g <- g + theme(legend.position = "none") g <- g + xlab(expression(gamma)) g
/inst/reproduce/toggleswitch/toggleswitch_w-smcrhit-n2000.R
no_license
alexanderwhatley/winference
R
false
false
3,546
r
#+ presets, echo = FALSE, warning = FALSE, message = FALSE library(winference) library(ggplot2) library(ggthemes) library(dplyr) library(foreach) library(doMC) library(doRNG) library(reshape2) library(tidyr) registerDoMC(cores = 6) rm(list = ls()) setmytheme() set.seed(11) target <- get_toggleswitch() # number of observations nobservations <- 2000 load(file = "~/Dropbox/ABCD/Results/data/toggleswitchdata.RData") obs <- obs[1:nobservations] obs_sorted <- sort(obs) # function to compute distance between observed data and data generated given theta compute_d <- function(theta, metric = metricL2){ fake_rand <- target$generate_randomness(nobservations) fake_obs <- target$robservation(nobservations, theta, target$parameters, fake_rand) fake_obs_sorted <- sort(fake_obs) return(metric(obs_sorted, fake_obs_sorted)) } # compute_d(true_theta) proposal <- mixture_proposal() param_algo <- list(nthetas = 2048, nmoves = 1, proposal = proposal, nsteps = 50, minimum_diversity = 0.5, R = 2, maxtrials = 1000) filename <- paste0("~/Dropbox/ABCD/Results/toggleswitch/toggleswitchdata.n", nobservations, ".wsmc_rhit.RData") # results <- wsmc_rhit(compute_d, target, param_algo, savefile = filename) # wsmc.df <- wsmc_to_dataframe(results, target$parameter_names) # nsteps <- max(wsmc.df$step) # save(wsmc.df, results, nsteps, file = filename) # load(filename) wsmc.df <- wsmc_to_dataframe(results, target$parameter_names) nsteps <- max(wsmc.df$step) target$parameter_names qplot(x = 1:nsteps, y = results$threshold_history, geom ="line") + scale_y_log10() g <- ggplot(wsmc.df, aes(x = alpha_1, y = alpha_2, colour = step, group = step)) g <- g + geom_point(alpha = 0.5) g <- g + scale_colour_gradient2(midpoint = floor(nsteps/2)) + theme(legend.position = "none") g <- g + xlab(expression(alpha[1])) + ylab(expression(alpha[2])) g + geom_point(data=NULL, aes(x = true_theta[1], y = true_theta[2], colour = NULL, group = NULL), size = 5) g <- ggplot(wsmc.df, aes(x = alpha_1, group = step)) g <- g + geom_density(aes(y = ..density..)) g g <- ggplot(wsmc.df, aes(x = alpha_2, group = step)) g <- g + geom_density(aes(y = ..density..)) g g <- ggplot(wsmc.df, aes(x = beta_1, y = beta_2, colour = step, group = step)) g <- g + geom_point(alpha = 0.5) g <- g + scale_colour_gradient2(midpoint = floor(nsteps/2)) + theme(legend.position = "none") g <- g + xlab(expression(beta[1])) + ylab(expression(beta[2])) g + geom_point(data=NULL, aes(x = true_theta[3], y = true_theta[4], colour = NULL, group = NULL), size = 5) g <- ggplot(wsmc.df, aes(x = mu, y = sigma, colour = step, group = step)) g <- g + geom_point(alpha = 0.5) g <- g + scale_colour_gradient2(midpoint = floor(nsteps/2)) + theme(legend.position = "none") g <- g + xlab(expression(mu)) + ylab(expression(sigma)) g + geom_point(data=NULL, aes(x = true_theta[5], y = true_theta[6], colour = NULL, group = NULL), size = 5) g <- ggplot(wsmc.df, aes(x = sigma, y = gamma, colour = step, group = step)) g <- g + geom_point(alpha = 0.5) g <- g + scale_colour_gradient2(midpoint = floor(nsteps/2)) + theme(legend.position = "none") g <- g + xlab(expression(sigma)) + ylab(expression(gamma)) g + geom_point(data=NULL, aes(x = true_theta[6], y = true_theta[7], colour = NULL, group = NULL), size = 5) g <- ggplot(wsmc.df %>% filter(step > 20), aes(x = gamma, group = step)) + geom_density(aes(y = ..density..)) g <- g + theme(legend.position = "none") g <- g + xlab(expression(gamma)) g
## makeCacheMatrix creates a special object, a list. It contains a function to: ## 1. Set the value of matrix. 2. Get the value of matrix. ## 3.Set the value of inverse matrix. 4.Get the value of inverse matrix. ## Pass matrix into makeCacheMatrix to create a special object makeCacheMatrix <- function(x = matrix()) { inversed <- NULL set <- function(y) { x <<- y inversed <<- NULL } get <- function() x setinverse <- function(inverse) inversed <<- inverse getinverse <- function() inversed list(set=set, get=get, setinverse=setinverse, getinverse=getinverse) } ## Pass the special object to CacheSolve in order to get it's inverse form. ## If it was called before and special object is the same, it'll get the cached data. cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' inversed <- x$getinverse() if(!is.null(inversed)) { message("getting cached data.") return(inversed) } data <- x$get() inversed <- solve(data) x$setinverse(inversed) inversed }
/Cachematrix.r
no_license
Mangalahegde/R-programming-assignment_Mangala
R
false
false
1,043
r
## makeCacheMatrix creates a special object, a list. It contains a function to: ## 1. Set the value of matrix. 2. Get the value of matrix. ## 3.Set the value of inverse matrix. 4.Get the value of inverse matrix. ## Pass matrix into makeCacheMatrix to create a special object makeCacheMatrix <- function(x = matrix()) { inversed <- NULL set <- function(y) { x <<- y inversed <<- NULL } get <- function() x setinverse <- function(inverse) inversed <<- inverse getinverse <- function() inversed list(set=set, get=get, setinverse=setinverse, getinverse=getinverse) } ## Pass the special object to CacheSolve in order to get it's inverse form. ## If it was called before and special object is the same, it'll get the cached data. cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' inversed <- x$getinverse() if(!is.null(inversed)) { message("getting cached data.") return(inversed) } data <- x$get() inversed <- solve(data) x$setinverse(inversed) inversed }
library(shiny) library(shinyfio) ui <- fluidPage( wellPanel( fileSelectWidget("fileSelect"), verbatimTextOutput("debug") ) ) server <- function(input, output) { selectedFile <- selectFile("fileSelect", fileLocation = C_FILE_LOCATION_BOTH, serverRootDirectories = c("wd" = ".")) output$debug <- renderPrint({ selectedFile() }) } shinyApp(ui = ui, server = server)
/inst/examples/file_select/file_select.R
permissive
keqiang/shinyfio
R
false
false
389
r
library(shiny) library(shinyfio) ui <- fluidPage( wellPanel( fileSelectWidget("fileSelect"), verbatimTextOutput("debug") ) ) server <- function(input, output) { selectedFile <- selectFile("fileSelect", fileLocation = C_FILE_LOCATION_BOTH, serverRootDirectories = c("wd" = ".")) output$debug <- renderPrint({ selectedFile() }) } shinyApp(ui = ui, server = server)